[automerger skipped] Add SafetyNet logging to JNI::NewStringUTF. am: 4b56bb8ce2 am: a575d6f7e4 am: 90825620b4 -s ours am: 340de1c79a -s ours am: dbb1a04dfd -s ours am: beae23cc57 -s ours

am skip reason: Change-Id I653db8be0c0a45302f0d1c54285c02d2d052a9f4 with SHA-1 69fc841b84 is in history

Original change: https://googleplex-android-review.googlesource.com/c/platform/art/+/13184710

MUST ONLY BE SUBMITTED BY AUTOMERGER

Change-Id: I7d8559478927a9653ab0a6355baa220d1cda7c15
diff --git a/.vpython b/.vpython
deleted file mode 100644
index ed00723..0000000
--- a/.vpython
+++ /dev/null
@@ -1,25 +0,0 @@
-# This is a vpython "spec" file.
-#
-# It describes patterns for python wheel dependencies of the python scripts in
-# the chromium repo, particularly for dependencies that have compiled components
-# (since pure-python dependencies can be easily vendored into third_party).
-#
-# When vpython is invoked, it finds this file and builds a python VirtualEnv,
-# containing all of the dependencies described in this file, fetching them from
-# CIPD (the "Chrome Infrastructure Package Deployer" service). Unlike `pip`,
-# this never requires the end-user machine to have a working python extension
-# compilation environment. All of these packages are built using:
-#   https://chromium.googlesource.com/infra/infra/+/master/infra/tools/dockerbuild/
-#
-# All python scripts in the repo share this same spec, to avoid dependency
-# fragmentation.
-#
-# If you have depot_tools installed in your $PATH, you can invoke python scripts
-# in this repo by running them as you normally would run them, except
-# substituting `vpython` instead of `python` on the command line, e.g.:
-#   vpython path/to/script.py some --arguments
-#
-# Read more about `vpython` and how to modify this file here:
-#   https://chromium.googlesource.com/infra/infra/+/master/doc/users/vpython.md
-
-python_version: "2.7"
diff --git a/Android.bp b/Android.bp
new file mode 100644
index 0000000..818fcfb
--- /dev/null
+++ b/Android.bp
@@ -0,0 +1,5 @@
+// If you're looking for ART global stuff, please see build/Android.bp.
+
+package {
+    default_visibility: ["//art:__subpackages__"],
+}
diff --git a/Android.mk b/Android.mk
index bebe759..d4de2e5 100644
--- a/Android.mk
+++ b/Android.mk
@@ -30,7 +30,7 @@
 
 .PHONY: clean-oat-host
 clean-oat-host:
-	find $(OUT_DIR) -name "*.oat" -o -name "*.odex" -o -name "*.art" -o -name '*.vdex' | xargs rm -f
+	find $(OUT_DIR) '(' -name '*.oat' -o -name '*.odex' -o -name '*.art' -o -name '*.vdex' ')' -a -type f | xargs rm -f
 	rm -rf $(TMPDIR)/*/test-*/dalvik-cache/*
 	rm -rf $(TMPDIR)/android-data/dalvik-cache/*
 
@@ -58,9 +58,7 @@
 # product rules
 
 include $(art_path)/oatdump/Android.mk
-include $(art_path)/tools/Android.mk
 include $(art_path)/tools/ahat/Android.mk
-include $(art_path)/tools/amm/Android.mk
 include $(art_path)/tools/dexfuzz/Android.mk
 include $(art_path)/tools/veridex/Android.mk
 
@@ -74,13 +72,7 @@
 endif
 
 ART_TARGET_DEPENDENCIES := \
-  $(ART_TARGET_EXECUTABLES) \
-  $(ART_TARGET_DEX_DEPENDENCIES) \
-  $(ART_TARGET_SHARED_LIBRARY_DEPENDENCIES)
-
-ifeq ($(ART_BUILD_TARGET_DEBUG),true)
-ART_TARGET_DEPENDENCIES += $(ART_TARGET_SHARED_LIBRARY_DEBUG_DEPENDENCIES)
-endif
+  $(ART_TARGET_DEX_DEPENDENCIES)
 
 ########################################################################
 # test rules
@@ -105,40 +97,6 @@
        $(ADB) wait-for-device root && \
        $(ADB) wait-for-device remount)))
 
-# Sync test files to the target, depends upon all things that must be pushed to the target.
-.PHONY: test-art-target-sync
-# Check if we need to sync. In case ART_TEST_CHROOT or ART_TEST_ANDROID_ROOT
-# is not empty, the code below uses 'adb push' instead of 'adb sync',
-# which does not check if the files on the device have changed.
-# TODO: Remove support for ART_TEST_ANDROID_ROOT when it is no longer needed.
-ifneq ($(ART_TEST_NO_SYNC),true)
-# Sync system and data partitions.
-ifeq ($(ART_TEST_ANDROID_ROOT),)
-ifeq ($(ART_TEST_CHROOT),)
-test-art-target-sync: $(TEST_ART_TARGET_SYNC_DEPS)
-	$(TEST_ART_ADB_ROOT_AND_REMOUNT)
-	$(ADB) sync system && $(ADB) sync data
-else
-# TEST_ART_ADB_ROOT_AND_REMOUNT is not needed here, as we are only
-# pushing things to the chroot dir, which is expected to be under
-# /data on the device.
-test-art-target-sync: $(TEST_ART_TARGET_SYNC_DEPS)
-	$(ADB) wait-for-device
-	$(ADB) push $(PRODUCT_OUT)/system $(ART_TEST_CHROOT)/
-	$(ADB) push $(PRODUCT_OUT)/data $(ART_TEST_CHROOT)/
-endif
-else
-test-art-target-sync: $(TEST_ART_TARGET_SYNC_DEPS)
-	$(TEST_ART_ADB_ROOT_AND_REMOUNT)
-	$(ADB) wait-for-device
-	$(ADB) push $(PRODUCT_OUT)/system $(ART_TEST_CHROOT)$(ART_TEST_ANDROID_ROOT)
-# Push the contents of the `data` dir into `$(ART_TEST_CHROOT)/data` on the device (note
-# that $(ART_TEST_CHROOT) can be empty).  If `$(ART_TEST_CHROOT)/data` already exists on
-# the device, it is not overwritten, but its content is updated.
-	$(ADB) push $(PRODUCT_OUT)/data $(ART_TEST_CHROOT)/
-endif
-endif
-
 # "mm test-art" to build and run all tests on host and device
 .PHONY: test-art
 test-art: test-art-host test-art-target
@@ -241,7 +199,7 @@
 
 # Dexdump/list regression test.
 .PHONY: test-art-host-dexdump
-test-art-host-dexdump: $(addprefix $(HOST_OUT_EXECUTABLES)/, dexdump2 dexlist)
+test-art-host-dexdump: $(addprefix $(HOST_OUT_EXECUTABLES)/, dexdump dexlist)
 	ANDROID_HOST_OUT=$(realpath $(HOST_OUT)) art/test/dexdump/run-all-tests
 
 ########################################################################
@@ -320,22 +278,24 @@
 
 
 #######################
-# Android Runtime APEX.
+# ART APEX.
 
 include $(CLEAR_VARS)
 
-# The Android Runtime APEX comes in two flavors:
-# - the release module (`com.android.runtime.release`), containing
+# The ART APEX comes in three flavors:
+# - the release module (`com.android.art.release`), containing
 #   only "release" artifacts;
-# - the debug module (`com.android.runtime.debug`), containing both
-#   "release" and "debug" artifacts, as well as additional tools.
+# - the debug module (`com.android.art.debug`), containing both
+#   "release" and "debug" artifacts, as well as additional tools;
+# - the testing module (`com.android.art.testing`), containing
+#   both "release" and "debug" artifacts, as well as additional tools
+#   and ART gtests).
 #
-# The Android Runtime APEX module (`com.android.runtime`) is an
-# "alias" for one of the previous modules. By default, "user" build
-# variants contain the release module, while "userdebug" and "eng"
-# build variant contain the debug module. However, if
-# `PRODUCT_ART_TARGET_INCLUDE_DEBUG_BUILD` is defined, it overrides
-# the previous logic:
+# The ART APEX module (`com.android.art`) is an "alias" for either the
+# release or the debug module. By default, "user" build variants contain
+# the release module, while "userdebug" and "eng" build variants contain
+# the debug module. However, if `PRODUCT_ART_TARGET_INCLUDE_DEBUG_BUILD`
+# is defined, it overrides the previous logic:
 # - if `PRODUCT_ART_TARGET_INCLUDE_DEBUG_BUILD` is set to `false`, the
 #   build will include the release module (whatever the build
 #   variant);
@@ -351,16 +311,16 @@
 ifeq (true,$(art_target_include_debug_build))
   # Module with both release and debug variants, as well as
   # additional tools.
-  TARGET_RUNTIME_APEX := com.android.runtime.debug
+  TARGET_ART_APEX := $(DEBUG_ART_APEX)
   APEX_TEST_MODULE := art-check-debug-apex-gen-fakebin
 else
   # Release module (without debug variants nor tools).
-  TARGET_RUNTIME_APEX := com.android.runtime.release
+  TARGET_ART_APEX := $(RELEASE_ART_APEX)
   APEX_TEST_MODULE := art-check-release-apex-gen-fakebin
 endif
 
-LOCAL_MODULE := com.android.runtime
-LOCAL_REQUIRED_MODULES := $(TARGET_RUNTIME_APEX)
+LOCAL_MODULE := com.android.art
+LOCAL_REQUIRED_MODULES := $(TARGET_ART_APEX)
 LOCAL_REQUIRED_MODULES += art_apex_boot_integrity
 
 # Clear locally used variable.
@@ -369,7 +329,7 @@
 include $(BUILD_PHONY_PACKAGE)
 
 include $(CLEAR_VARS)
-LOCAL_MODULE := com.android.runtime
+LOCAL_MODULE := com.android.art
 LOCAL_IS_HOST_MODULE := true
 ifneq ($(HOST_OS),darwin)
   LOCAL_REQUIRED_MODULES += $(APEX_TEST_MODULE)
@@ -378,25 +338,29 @@
 
 # Create canonical name -> file name symlink in the symbol directory
 # The symbol files for the debug or release variant are installed to
-# $(TARGET_OUT_UNSTRIPPED)/$(TARGET_RUNTIME_APEX) directory. However,
-# since they are available via /apex/com.android.runtime at runtime
+# $(TARGET_OUT_UNSTRIPPED)/$(TARGET_ART_APEX) directory. However,
+# since they are available via /apex/com.android.art at runtime
 # regardless of which variant is installed, create a symlink so that
-# $(TARGET_OUT_UNSTRIPPED)/apex/com.android.runtime is linked to
-# $(TARGET_OUT_UNSTRIPPED)/apex/$(TARGET_RUNTIME_APEX).
-# Note that installation of the symlink is triggered by the apex_manifest.json
+# $(TARGET_OUT_UNSTRIPPED)/apex/com.android.art is linked to
+# $(TARGET_OUT_UNSTRIPPED)/apex/$(TARGET_ART_APEX).
+# Note that installation of the symlink is triggered by the apex_manifest.pb
 # file which is the file that is guaranteed to be created regardless of the
 # value of TARGET_FLATTEN_APEX.
 ifeq ($(TARGET_FLATTEN_APEX),true)
-runtime_apex_manifest_file := $(PRODUCT_OUT)/system/apex/$(TARGET_RUNTIME_APEX)/apex_manifest.json
+art_apex_manifest_file := $(PRODUCT_OUT)/system/apex/$(TARGET_ART_APEX)/apex_manifest.pb
 else
-runtime_apex_manifest_file := $(PRODUCT_OUT)/apex/$(TARGET_RUNTIME_APEX)/apex_manifest.json
+art_apex_manifest_file := $(PRODUCT_OUT)/apex/$(TARGET_ART_APEX)/apex_manifest.pb
 endif
 
-$(runtime_apex_manifest_file): $(TARGET_OUT_UNSTRIPPED)/apex/com.android.runtime
-$(TARGET_OUT_UNSTRIPPED)/apex/com.android.runtime :
-	$(hide) ln -sf $(TARGET_RUNTIME_APEX) $@
+art_apex_symlink_timestamp := $(call intermediates-dir-for,FAKE,com.android.art)/symlink.timestamp
+$(art_apex_manifest_file): $(art_apex_symlink_timestamp)
+$(art_apex_manifest_file): PRIVATE_LINK_NAME := $(TARGET_OUT_UNSTRIPPED)/apex/com.android.art
+$(art_apex_symlink_timestamp):
+	$(hide) mkdir -p $(dir $(PRIVATE_LINK_NAME))
+	$(hide) ln -sf $(TARGET_ART_APEX) $(PRIVATE_LINK_NAME)
+	$(hide) touch $@
 
-runtime_apex_manifest_file :=
+art_apex_manifest_file :=
 
 #######################
 # Fake packages for ART
@@ -409,15 +373,16 @@
 
 # Base requirements.
 LOCAL_REQUIRED_MODULES := \
-    dalvikvm \
-    dex2oat \
-    dexoptanalyzer \
-    libart \
-    libart-compiler \
-    libopenjdkjvm \
-    libopenjdkjvmti \
-    profman \
-    libadbconnection \
+    dalvikvm.com.android.art.release \
+    dex2oat.com.android.art.release \
+    dexoptanalyzer.com.android.art.release \
+    libart.com.android.art.release \
+    libart-compiler.com.android.art.release \
+    libopenjdkjvm.com.android.art.release \
+    libopenjdkjvmti.com.android.art.release \
+    profman.com.android.art.release \
+    libadbconnection.com.android.art.release \
+    libperfetto_hprof.com.android.art.release \
 
 # Potentially add in debug variants:
 #
@@ -431,15 +396,16 @@
 endif
 ifeq (true,$(art_target_include_debug_build))
 LOCAL_REQUIRED_MODULES += \
-    dex2oatd \
-    dexoptanalyzerd \
-    libartd \
-    libartd-compiler \
-    libopenjdkd \
-    libopenjdkjvmd \
-    libopenjdkjvmtid \
-    profmand \
-    libadbconnectiond \
+    dex2oatd.com.android.art.debug \
+    dexoptanalyzerd.com.android.art.debug \
+    libartd.com.android.art.debug \
+    libartd-compiler.com.android.art.debug \
+    libopenjdkd.com.android.art.debug \
+    libopenjdkjvmd.com.android.art.debug \
+    libopenjdkjvmtid.com.android.art.debug \
+    profmand.com.android.art.debug \
+    libadbconnectiond.com.android.art.debug \
+    libperfetto_hprofd.com.android.art.debug \
 
 endif
 endif
@@ -521,54 +487,196 @@
   lib/bootstrap/libc.so \
   lib/bootstrap/libm.so \
   lib/bootstrap/libdl.so \
+  lib/bootstrap/libdl_android.so \
   lib64/bootstrap/libc.so \
   lib64/bootstrap/libm.so \
   lib64/bootstrap/libdl.so \
+  lib64/bootstrap/libdl_android.so \
 
-PRIVATE_RUNTIME_DEPENDENCY_LIBS := \
-  lib/libnativebridge.so \
-  lib64/libnativebridge.so \
-  lib/libnativehelper.so \
-  lib64/libnativehelper.so \
-  lib/libdexfile_external.so \
-  lib64/libdexfile_external.so \
-  lib/libnativeloader.so \
-  lib64/libnativeloader.so \
+PRIVATE_ART_APEX_DEPENDENCY_FILES := \
+  bin/dalvikvm32 \
+  bin/dalvikvm64 \
+  bin/dalvikvm \
+  bin/dex2oat \
+  bin/dex2oatd \
+  bin/dexdump \
+
+PRIVATE_ART_APEX_DEPENDENCY_LIBS := \
+  lib/libadbconnectiond.so \
+  lib/libadbconnection.so \
+  lib/libandroidicu.so \
   lib/libandroidio.so \
+  lib/libartbased.so \
+  lib/libartbase.so \
+  lib/libart-compiler.so \
+  lib/libartd-compiler.so \
+  lib/libartd-dexlayout.so \
+  lib/libartd-disassembler.so \
+  lib/libart-dexlayout.so \
+  lib/libart-disassembler.so \
+  lib/libartd.so \
+  lib/libartpalette.so \
+  lib/libart.so \
+  lib/libbacktrace.so \
+  lib/libbase.so \
+  lib/libcrypto.so \
+  lib/libdexfiled_external.so \
+  lib/libdexfiled.so \
+  lib/libdexfile_external.so \
+  lib/libdexfile.so \
+  lib/libdexfile_support.so \
+  lib/libdt_fd_forward.so \
+  lib/libdt_socket.so \
+  lib/libexpat.so \
+  lib/libicui18n.so \
+  lib/libicu_jni.so \
+  lib/libicuuc.so \
+  lib/libjavacore.so \
+  lib/libjdwp.so \
+  lib/liblzma.so \
+  lib/libmeminfo.so \
+  lib/libnativebridge.so \
+  lib/libnativehelper.so \
+  lib/libnativeloader.so \
+  lib/libnpt.so \
+  lib/libopenjdkd.so \
+  lib/libopenjdkjvmd.so \
+  lib/libopenjdkjvm.so \
+  lib/libopenjdkjvmtid.so \
+  lib/libopenjdkjvmti.so \
+  lib/libopenjdk.so \
+  lib/libpac.so \
+  lib/libprocinfo.so \
+  lib/libprofiled.so \
+  lib/libprofile.so \
+  lib/libsigchain.so \
+  lib/libunwindstack.so \
+  lib/libvixld.so \
+  lib/libvixl.so \
+  lib/libziparchive.so \
+  lib/libz.so \
+  lib64/libadbconnectiond.so \
+  lib64/libadbconnection.so \
+  lib64/libandroidicu.so \
   lib64/libandroidio.so \
+  lib64/libartbased.so \
+  lib64/libartbase.so \
+  lib64/libart-compiler.so \
+  lib64/libartd-compiler.so \
+  lib64/libartd-dexlayout.so \
+  lib64/libartd-disassembler.so \
+  lib64/libart-dexlayout.so \
+  lib64/libart-disassembler.so \
+  lib64/libartd.so \
+  lib64/libartpalette.so \
+  lib64/libart.so \
+  lib64/libbacktrace.so \
+  lib64/libbase.so \
+  lib64/libcrypto.so \
+  lib64/libdexfiled_external.so \
+  lib64/libdexfiled.so \
+  lib64/libdexfile_external.so \
+  lib64/libdexfile.so \
+  lib64/libdexfile_support.so \
+  lib64/libdt_fd_forward.so \
+  lib64/libdt_socket.so \
+  lib64/libexpat.so \
+  lib64/libicui18n.so \
+  lib64/libicu_jni.so \
+  lib64/libicuuc.so \
+  lib64/libjavacore.so \
+  lib64/libjdwp.so \
+  lib64/liblzma.so \
+  lib64/libmeminfo.so \
+  lib64/libnativebridge.so \
+  lib64/libnativehelper.so \
+  lib64/libnativeloader.so \
+  lib64/libnpt.so \
+  lib64/libopenjdkd.so \
+  lib64/libopenjdkjvmd.so \
+  lib64/libopenjdkjvm.so \
+  lib64/libopenjdkjvmtid.so \
+  lib64/libopenjdkjvmti.so \
+  lib64/libopenjdk.so \
+  lib64/libpac.so \
+  lib64/libprocinfo.so \
+  lib64/libprofiled.so \
+  lib64/libprofile.so \
+  lib64/libsigchain.so \
+  lib64/libunwindstack.so \
+  lib64/libvixld.so \
+  lib64/libvixl.so \
+  lib64/libziparchive.so \
+  lib64/libz.so \
 
+PRIVATE_CONSCRYPT_APEX_DEPENDENCY_LIBS := \
+  lib/libcrypto.so \
+  lib/libjavacrypto.so \
+  lib/libssl.so \
+  lib64/libcrypto.so \
+  lib64/libjavacrypto.so \
+  lib64/libssl.so \
+
+# Generate copies of Bionic bootstrap artifacts and ART APEX
+# libraries in the `system` (TARGET_OUT) directory. This is dangerous
+# as these files could inadvertently stay in this directory and be
+# included in a system image.
+#
 # Copy some libraries into `$(TARGET_OUT)/lib(64)` (the
 # `/system/lib(64)` directory to be sync'd to the target) for ART testing
 # purposes:
 # - Bionic bootstrap libraries, copied from
 #   `$(TARGET_OUT)/lib(64)/bootstrap` (the `/system/lib(64)/bootstrap`
 #   directory to be sync'd to the target);
-# - Some libraries which are part of the Runtime APEX; if the product
+# - Programs and libraries from the ART APEX; if the product
 #   to build uses flattened APEXes, these libraries are copied from
-#   `$(TARGET_OUT)/apex/com.android.runtime.debug` (the flattened
-#   (Debug) Runtime APEX directory to be sync'd to the target);
+#   `$(TARGET_OUT)/apex/com.android.art.debug` (the flattened
+#   (Debug) ART APEX directory to be sync'd to the target);
 #   otherwise, they are copied from
-#   `$(TARGET_OUT)/../apex/com.android.runtime.debug` (the local
-#   directory under the build tree containing the (Debug) Runtime APEX
+#   `$(TARGET_OUT)/../apex/com.android.art.debug` (the local
+#   directory under the build tree containing the (Debug) ART APEX
 #   artifacts, which is not sync'd to the target).
+# - Libraries from the Conscrypt APEX may be loaded during golem runs.
 #
-# TODO(b/121117762): Remove this when the ART Buildbot and Golem have
-# full support for the Runtime APEX.
+# This target is only used by Golem now.
+#
+# NB Android build does not use cp from:
+#  $ANDROID_BUILD_TOP/prebuilts/build-tools/path/{linux-x86,darwin-x86}
+# which has a non-standard set of command-line flags.
+#
+# TODO(b/129332183): Remove this when Golem has full support for the
+# ART APEX.
 .PHONY: standalone-apex-files
-standalone-apex-files: libc.bootstrap libdl.bootstrap libm.bootstrap linker com.android.runtime.debug
+standalone-apex-files: libc.bootstrap \
+                       libdl.bootstrap \
+                       libdl_android.bootstrap \
+                       libm.bootstrap \
+                       linker \
+                       $(DEBUG_ART_APEX) \
+                       $(CONSCRYPT_APEX)
 	for f in $(PRIVATE_BIONIC_FILES); do \
 	  tf=$(TARGET_OUT)/$$f; \
 	  if [ -f $$tf ]; then cp -f $$tf $$(echo $$tf | sed 's,bootstrap/,,'); fi; \
 	done
 	if [ "x$(TARGET_FLATTEN_APEX)" = xtrue ]; then \
-	  runtime_apex_orig_dir=$(TARGET_OUT)/apex/com.android.runtime.debug; \
+          apex_orig_dir=$(TARGET_OUT)/apex; \
 	else \
-	  runtime_apex_orig_dir=$(TARGET_OUT)/../apex/com.android.runtime.debug; \
+          apex_orig_dir=""; \
 	fi; \
-	for f in $(PRIVATE_RUNTIME_DEPENDENCY_LIBS); do \
-	  tf="$$runtime_apex_orig_dir/$$f"; \
+	art_apex_orig_dir=$$apex_orig_dir/$(DEBUG_ART_APEX); \
+	for f in $(PRIVATE_ART_APEX_DEPENDENCY_LIBS) $(PRIVATE_ART_APEX_DEPENDENCY_FILES); do \
+	  tf="$$art_apex_orig_dir/$$f"; \
+	  df="$(TARGET_OUT)/$$f"; \
+	  if [ -f $$tf ]; then \
+            if [ -h $$df ]; then rm $$df; fi; \
+            cp -fd $$tf $$df; \
+          fi; \
+	done; \
+	conscrypt_apex_orig_dir=$$apex_orig_dir/$(CONSCRYPT_APEX); \
+	for f in $(PRIVATE_CONSCRYPT_APEX_DEPENDENCY_LIBS); do \
+	  tf="$$conscrypt_apex_orig_dir/$$f"; \
 	  if [ -f $$tf ]; then cp -f $$tf $(TARGET_OUT)/$$f; fi; \
-	done
+	done; \
 
 ########################################################################
 # Phony target for only building what go/lem requires for pushing ART on /data.
@@ -576,33 +684,53 @@
 .PHONY: build-art-target-golem
 # Also include libartbenchmark, we always include it when running golem.
 # libstdc++ is needed when building for ART_TARGET_LINUX.
+
+# Also include the bootstrap Bionic libraries (libc, libdl, libdl_android,
+# libm). These are required as the "main" libc, libdl, libdl_android, and libm
+# have moved to the ART APEX. This is a temporary change needed until Golem
+# fully supports the ART APEX.
 #
-# Also include the bootstrap Bionic libraries (libc, libdl, libm).
-# These are required as the "main" libc, libdl, and libm have moved to
-# the Runtime APEX. This is a temporary change needed until Golem
-# fully supports the Runtime APEX.
-# TODO(b/121117762): Remove this when the ART Buildbot and Golem have
-# full support for the Runtime APEX.
+# TODO(b/129332183): Remove this when Golem has full support for the
+# ART APEX.
+
+# Also include:
+# - a copy of the ICU prebuilt .dat file in /system/etc/icu on target
+#   (see module `icu-data-art-test-i18n`); and
+# so that it can be found even if the ART APEX is not available, by setting the
+# environment variable `ART_TEST_ANDROID_ART_ROOT` to "/system" on device. This
+# is a temporary change needed until Golem fully supports the ART APEX.
 #
-# Also include a copy of the ICU .dat prebuilt files in
-# /system/etc/icu on target (see module `icu-data-art-test`), so that
-# it can found even if the Runtime APEX is not available, by setting
-# the environment variable `ART_TEST_ANDROID_RUNTIME_ROOT` to
-# "/system" on device. This is a temporary change needed until Golem
-# fully supports the Runtime APEX.
-# TODO(b/121117762): Remove this when the ART Buildbot and Golem have
-# full support for the Runtime APEX.
+# TODO(b/129332183): Remove this when Golem has full support for the
+# ART APEX.
+
+# Also include:
+# - a copy of the time zone data prebuilt files in
+#   /system/etc/tzdata_module/etc/tz and /system/etc/tzdata_module/etc/icu
+#   on target, (see modules `tzdata-art-test-tzdata`,
+#   `tzlookup.xml-art-test-tzdata`, and `tz_version-art-test-tzdata`, and
+#   `icu_overlay-art-test-tzdata`)
+# so that they can be found even if the Time Zone Data APEX is not available,
+# by setting the environment variable `ART_TEST_ANDROID_TZDATA_ROOT`
+# to "/system/etc/tzdata_module" on device. This is a temporary change needed
+# until Golem fully supports the Time Zone Data APEX.
+#
+# TODO(b/129332183): Remove this when Golem has full support for the
+# ART APEX (and TZ Data APEX).
+
 ART_TARGET_SHARED_LIBRARY_BENCHMARK := $(TARGET_OUT_SHARED_LIBRARIES)/libartbenchmark.so
 build-art-target-golem: dex2oat dalvikvm linker libstdc++ \
                         $(TARGET_OUT_EXECUTABLES)/art \
                         $(TARGET_OUT)/etc/public.libraries.txt \
                         $(ART_TARGET_DEX_DEPENDENCIES) \
-                        $(ART_TARGET_SHARED_LIBRARY_DEPENDENCIES) \
+                        $(ART_DEBUG_TARGET_SHARED_LIBRARY_DEPENDENCIES) \
                         $(ART_TARGET_SHARED_LIBRARY_BENCHMARK) \
                         $(TARGET_CORE_IMG_OUT_BASE).art \
                         $(TARGET_CORE_IMG_OUT_BASE)-interpreter.art \
-                        libc.bootstrap libdl.bootstrap libm.bootstrap \
-                        icu-data-art-test \
+                        libartpalette-system \
+                        libc.bootstrap libdl.bootstrap libdl_android.bootstrap libm.bootstrap \
+                        icu-data-art-test-i18n \
+                        tzdata-art-test-tzdata tzlookup.xml-art-test-tzdata \
+                        tz_version-art-test-tzdata icu_overlay-art-test-tzdata \
                         standalone-apex-files
 	# remove debug libraries from public.libraries.txt because golem builds
 	# won't have it.
@@ -622,7 +750,7 @@
 ########################################################################
 # Phony target for building what go/lem requires for syncing /system to target.
 .PHONY: build-art-unbundled-golem
-build-art-unbundled-golem: art-runtime linker oatdump $(TARGET_CORE_JARS) crash_dump
+build-art-unbundled-golem: art-runtime linker oatdump $(ART_APEX_JARS) conscrypt crash_dump
 
 ########################################################################
 # Rules for building all dependencies for tests.
diff --git a/CleanSpec.mk b/CleanSpec.mk
index b40f471..fe613b2 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -62,6 +62,49 @@
 $(call add-clean-step, rm -rf $(PRODUCT_OUT)/data/nativetest*/)
 $(call add-clean-step, rm -rf $(PRODUCT_OUT)/data/nativetest*/)
 
+# Clean up duplicate compiles between static and shared compiles of libart and libartd
+$(call add-clean-step, rm -rf $(OUT_DIR)/soong/.intermediates/art/runtime/libart/*shared*/obj)
+$(call add-clean-step, rm -rf $(OUT_DIR)/soong/.intermediates/art/runtime/libartd/*shared*/obj)
+
+# Force regeneration of .apex files after removal of time zone data files from the runtime APEX
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/apex/com.android.runtime.*)
+
+# Remove artifacts that used to be generated (as a workaround for
+# improper Runtime APEX support) by tools/buildbot-build.sh via the
+# `standalone-apex-files` Make rule.
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/bin)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib*)
+# Remove artifacts that used to be generated (as a workaround for
+# improper Runtime APEX support) by tools/buildbot-build.sh via the
+# `icu-data-art-test` Make rule.
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/etc/icu)
+
+# Remove ART test target artifacts.
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/data/nativetest*/)
+
+# Remove all APEX artifacts after the change to use the Testing
+# Runtime APEX in lieu of the Debug Runtime APEX for ART testing.
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/apex)
+
+# Remove the icu .dat file from /apex/com.android.runtime and the host equivalent.
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/apex)
+$(call add-clean-step, rm -rf $(HOST_OUT)/com.android.runtime/etc/icu/*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/etc/icu)
+
+# Remove all APEX artifacts for the Runtime/ART APEX split.
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/apex)
+$(call add-clean-step, rm -rf $(HOST_OUT)/apex)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/apex)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/symbols/apex)
+
+# Remove dex2oat artifacts for boot image extensions (workaround for broken dependencies).
+$(call add-clean-step, find $(OUT_DIR) -name "*.oat" -o -name "*.odex" -o -name "*.art" -o -name '*.vdex' | xargs rm -f)
+$(call add-clean-step, find $(OUT_DIR) -name "*.oat" -o -name "*.odex" -o -name "*.art" -o -name '*.vdex' | xargs rm -f)
+$(call add-clean-step, find $(OUT_DIR) -name "*.oat" -o -name "*.odex" -o -name "*.art" -o -name '*.vdex' | xargs rm -f)
+
+# Remove empty dir for art APEX because it will be created on demand while mounting release|debug
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/apex/com.android.art)
+
 # ************************************************
 # NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
 # ************************************************
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
index 60ad35c..6ab01dc 100644
--- a/PREUPLOAD.cfg
+++ b/PREUPLOAD.cfg
@@ -3,6 +3,8 @@
 
 [Builtin Hooks]
 cpplint = true
+bpfmt = true
+gofmt = true
 
 [Builtin Hooks Options]
 # Cpplint prints nothing unless there were errors.
diff --git a/TEST_MAPPING b/TEST_MAPPING
new file mode 100644
index 0000000..28dab29
--- /dev/null
+++ b/TEST_MAPPING
@@ -0,0 +1,10 @@
+{
+  "presubmit": [
+    {
+      "name": "CtsJdwpTestCases"
+    },
+    {
+      "name": "BootImageProfileTest"
+    }
+  ]
+}
diff --git a/adbconnection/Android.bp b/adbconnection/Android.bp
index 5f78278..b03cd0d 100644
--- a/adbconnection/Android.bp
+++ b/adbconnection/Android.bp
@@ -28,6 +28,7 @@
 
     shared_libs: [
         "libbase",
+        "libadbconnection_client",
     ],
     target: {
         host: {
@@ -40,15 +41,6 @@
         "libnativehelper_header_only",
         "dt_fd_forward_export",
     ],
-    multilib: {
-        lib32: {
-            suffix: "32",
-        },
-        lib64: {
-            suffix: "64",
-        },
-    },
-    symlink_preferred_arch: true,
     required: [
         "libjdwp",
         "libdt_fd_forward",
@@ -62,6 +54,10 @@
         "libart",
         "libartbase",
     ],
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
 }
 
 art_cc_library {
@@ -74,4 +70,7 @@
         "libartd",
         "libartbased",
     ],
+    apex_available: [
+        "com.android.art.debug",
+    ],
 }
diff --git a/adbconnection/adbconnection.cc b/adbconnection/adbconnection.cc
index e1b5b62..7ac2edb 100644
--- a/adbconnection/adbconnection.cc
+++ b/adbconnection/adbconnection.cc
@@ -15,9 +15,12 @@
  */
 
 #include <array>
+#include <cstddef>
+#include <iterator>
 
 #include "adbconnection.h"
 
+#include "adbconnection/client.h"
 #include "android-base/endian.h"
 #include "android-base/stringprintf.h"
 #include "base/file_utils.h"
@@ -25,6 +28,7 @@
 #include "base/macros.h"
 #include "base/mutex.h"
 #include "base/socket_peer_is_trusted.h"
+#include "debugger.h"
 #include "jni/java_vm_ext.h"
 #include "jni/jni_env_ext.h"
 #include "mirror/throwable.h"
@@ -34,20 +38,24 @@
 #include "scoped_thread_state_change-inl.h"
 #include "well_known_classes.h"
 
-#include "jdwp/jdwp_priv.h"
-
 #include "fd_transport.h"
 
 #include "poll.h"
 
 #include <sys/ioctl.h>
 #include <sys/socket.h>
+#include <sys/uio.h>
 #include <sys/un.h>
 #include <sys/eventfd.h>
 #include <jni.h>
 
 namespace adbconnection {
 
+static constexpr size_t kJdwpHeaderLen = 11U;
+/* DDM support */
+static constexpr uint8_t kJdwpDdmCmdSet = 199U;  // 0xc7, or 'G'+128
+static constexpr uint8_t kJdwpDdmCmd = 1U;
+
 // Messages sent from the transport
 using dt_fd_forward::kListenStartMessage;
 using dt_fd_forward::kListenEndMessage;
@@ -66,7 +74,6 @@
 
 static constexpr int kEventfdLocked = 0;
 static constexpr int kEventfdUnlocked = 1;
-static constexpr int kControlSockSendTimeout = 10;
 
 static constexpr size_t kPacketHeaderLen = 11;
 static constexpr off_t kPacketSizeOff = 0;
@@ -77,7 +84,8 @@
 static constexpr uint8_t kDdmCommandSet = 199;
 static constexpr uint8_t kDdmChunkCommand = 1;
 
-static AdbConnectionState* gState;
+static std::optional<AdbConnectionState> gState;
+static std::optional<pthread_t> gPthread;
 
 static bool IsDebuggingPossible() {
   return art::Dbg::IsJdwpAllowed();
@@ -92,9 +100,23 @@
   }
 }
 
-// The debugger should begin shutting down since the runtime is ending. We don't actually do
-// anything here. The real shutdown has already happened as far as the agent is concerned.
-void AdbConnectionDebuggerController::StopDebugger() { }
+// The debugger should have already shut down since the runtime is ending. As far
+// as the agent is concerned shutdown already happened when we went to kDeath
+// state. We need to clean up our threads still though and this is a good time
+// to do it since the runtime is still able to handle all the normal state
+// transitions.
+void AdbConnectionDebuggerController::StopDebugger() {
+  // Stop our threads.
+  gState->StopDebuggerThreads();
+  // Wait for our threads to actually return and cleanup the pthread.
+  if (gPthread.has_value()) {
+    void* ret_unused;
+    if (TEMP_FAILURE_RETRY(pthread_join(gPthread.value(), &ret_unused)) != 0) {
+      PLOG(ERROR) << "Failed to join debugger threads!";
+    }
+    gPthread.reset();
+  }
+}
 
 bool AdbConnectionDebuggerController::IsDebuggerConfigured() {
   return IsDebuggingPossible() && !art::Runtime::Current()->GetJdwpOptions().empty();
@@ -125,7 +147,7 @@
     controller_(this),
     ddm_callback_(this),
     sleep_event_fd_(-1),
-    control_sock_(-1),
+    control_ctx_(nullptr, adbconnection_client_destroy),
     local_agent_control_sock_(-1),
     remote_agent_control_sock_(-1),
     adb_connection_socket_(-1),
@@ -149,6 +171,15 @@
   art::Runtime::Current()->GetRuntimeCallbacks()->AddDebuggerControlCallback(&controller_);
 }
 
+AdbConnectionState::~AdbConnectionState() {
+  // Remove the startup callback.
+  art::Thread* self = art::Thread::Current();
+  if (self != nullptr) {
+    art::ScopedObjectAccess soa(self);
+    art::Runtime::Current()->GetRuntimeCallbacks()->RemoveDebuggerControlCallback(&controller_);
+  }
+}
+
 static jobject CreateAdbConnectionThread(art::Thread* thr) {
   JNIEnv* env = thr->GetJniEnv();
   // Move to native state to talk with the jnienv api.
@@ -173,7 +204,6 @@
 
 static void* CallbackFunction(void* vdata) {
   std::unique_ptr<CallbackData> data(reinterpret_cast<CallbackData*>(vdata));
-  CHECK(data->this_ == gState);
   art::Thread* self = art::Thread::Attach(kAdbConnectionThreadName,
                                           true,
                                           data->thr_);
@@ -199,10 +229,6 @@
   int detach_result = art::Runtime::Current()->GetJavaVM()->DetachCurrentThread();
   CHECK_EQ(detach_result, 0);
 
-  // Get rid of the connection
-  gState = nullptr;
-  delete data->this_;
-
   return nullptr;
 }
 
@@ -251,14 +277,15 @@
   ScopedLocalRef<jobject> thr(soa.Env(), CreateAdbConnectionThread(soa.Self()));
   // Note: Using pthreads instead of std::thread to not abort when the thread cannot be
   //       created (exception support required).
-  pthread_t pthread;
   std::unique_ptr<CallbackData> data(new CallbackData { this, soa.Env()->NewGlobalRef(thr.get()) });
   started_debugger_threads_ = true;
-  int pthread_create_result = pthread_create(&pthread,
+  gPthread.emplace();
+  int pthread_create_result = pthread_create(&gPthread.value(),
                                              nullptr,
                                              &CallbackFunction,
                                              data.get());
   if (pthread_create_result != 0) {
+    gPthread.reset();
     started_debugger_threads_ = false;
     // If the create succeeded the other thread will call EndThreadBirth.
     art::Runtime* runtime = art::Runtime::Current();
@@ -333,7 +360,7 @@
   // the adb_write_event_fd_ will ensure that the adb_connection_socket_ will not go away until
   // after we have sent our data.
   static constexpr uint32_t kDdmPacketHeaderSize =
-      kJDWPHeaderLen       // jdwp command packet size
+      kJdwpHeaderLen       // jdwp command packet size
       + sizeof(uint32_t)   // Type
       + sizeof(uint32_t);  // length
   alignas(sizeof(uint32_t)) std::array<uint8_t, kDdmPacketHeaderSize> pkt;
@@ -352,9 +379,9 @@
   switch (packet_type) {
     case DdmPacketType::kCmd: {
       // Now the cmd-set
-      *(pkt_data++) = kJDWPDdmCmdSet;
+      *(pkt_data++) = kJdwpDdmCmdSet;
       // Now the command
-      *(pkt_data++) = kJDWPDdmCmd;
+      *(pkt_data++) = kJdwpDdmCmd;
       break;
     }
     case DdmPacketType::kReply: {
@@ -447,57 +474,18 @@
 }
 
 android::base::unique_fd AdbConnectionState::ReadFdFromAdb() {
-  // We don't actually care about the data that is sent. We do need to receive something though.
-  char dummy = '!';
-  union {
-    cmsghdr cm;
-    char buffer[CMSG_SPACE(sizeof(int))];
-  } cm_un;
-
-  iovec iov;
-  iov.iov_base       = &dummy;
-  iov.iov_len        = 1;
-
-  msghdr msg;
-  msg.msg_name       = nullptr;
-  msg.msg_namelen    = 0;
-  msg.msg_iov        = &iov;
-  msg.msg_iovlen     = 1;
-  msg.msg_flags      = 0;
-  msg.msg_control    = cm_un.buffer;
-  msg.msg_controllen = sizeof(cm_un.buffer);
-
-  cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
-  cmsg->cmsg_len   = msg.msg_controllen;
-  cmsg->cmsg_level = SOL_SOCKET;
-  cmsg->cmsg_type  = SCM_RIGHTS;
-  (reinterpret_cast<int*>(CMSG_DATA(cmsg)))[0] = -1;
-
-  int rc = TEMP_FAILURE_RETRY(recvmsg(control_sock_, &msg, 0));
-
-  if (rc <= 0) {
-    return android::base::unique_fd(-1);
-  } else {
-    VLOG(jdwp) << "Fds have been received from ADB!";
-  }
-
-  return android::base::unique_fd((reinterpret_cast<int*>(CMSG_DATA(cmsg)))[0]);
+  return android::base::unique_fd(adbconnection_client_receive_jdwp_fd(control_ctx_.get()));
 }
 
 bool AdbConnectionState::SetupAdbConnection() {
-  int        sleep_ms     = 500;
-  const int  sleep_max_ms = 2*1000;
+  int sleep_ms = 500;
+  const int sleep_max_ms = 2 * 1000;
 
-  android::base::unique_fd sock(socket(AF_UNIX, SOCK_SEQPACKET | SOCK_CLOEXEC, 0));
-  if (sock < 0) {
-    PLOG(ERROR) << "Could not create ADB control socket";
-    return false;
-  }
-  struct timeval timeout;
-  timeout.tv_sec = kControlSockSendTimeout;
-  timeout.tv_usec = 0;
-  setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, &timeout, sizeof(timeout));
-  int32_t pid = getpid();
+  const AdbConnectionClientInfo infos[] = {
+    {.type = AdbConnectionClientInfoType::pid, .data.pid = static_cast<uint64_t>(getpid())},
+    {.type = AdbConnectionClientInfoType::debuggable, .data.debuggable = true},
+  };
+  const AdbConnectionClientInfo* info_ptrs[] = {&infos[0], &infos[1]};
 
   while (!shutting_down_) {
     // If adbd isn't running, because USB debugging was disabled or
@@ -511,58 +499,36 @@
     // of battery life, we should consider timing out and giving
     // up after a few minutes in case somebody ships an app with
     // the debuggable flag set.
-    int ret = connect(sock, &control_addr_.controlAddrPlain, control_addr_len_);
-    if (ret == 0) {
-      bool trusted = sock >= 0 && art::SocketPeerIsTrusted(sock);
-      if (!trusted) {
-        LOG(ERROR) << "adb socket is not trusted. Aborting connection.";
-        if (sock >= 0 && shutdown(sock, SHUT_RDWR)) {
-          PLOG(ERROR) << "trouble shutting down socket";
-        }
-        return false;
-      }
-      /* now try to send our pid to the ADB daemon */
-      ret = TEMP_FAILURE_RETRY(send(sock, &pid, sizeof(pid), 0));
-      if (ret == sizeof(pid)) {
-        VLOG(jdwp) << "PID " << pid << " sent to adb";
-        control_sock_ = std::move(sock);
-        return true;
-      } else {
-        PLOG(ERROR) << "Weird, can't send JDWP process pid to ADB. Aborting connection.";
-        return false;
-      }
-    } else {
-      if (VLOG_IS_ON(jdwp)) {
-        PLOG(ERROR) << "Can't connect to ADB control socket. Will retry.";
-      }
+    control_ctx_.reset(adbconnection_client_new(info_ptrs, std::size(infos)));
+    if (control_ctx_) {
+      return true;
+    }
 
-      usleep(sleep_ms * 1000);
+    // We failed to connect.
+    usleep(sleep_ms * 1000);
 
-      sleep_ms += (sleep_ms >> 1);
-      if (sleep_ms > sleep_max_ms) {
-        sleep_ms = sleep_max_ms;
-      }
+    sleep_ms += (sleep_ms >> 1);
+    if (sleep_ms > sleep_max_ms) {
+      sleep_ms = sleep_max_ms;
     }
   }
+
   return false;
 }
 
 void AdbConnectionState::RunPollLoop(art::Thread* self) {
   CHECK_NE(agent_name_, "");
   CHECK_EQ(self->GetState(), art::kNative);
-  // TODO: Clang prebuilt for r316199 produces bogus thread safety analysis warning for holding both
-  // exclusive and shared lock in the same scope. Remove the assertion as a temporary workaround.
-  // http://b/71769596
-  // art::Locks::mutator_lock_->AssertNotHeld(self);
+  art::Locks::mutator_lock_->AssertNotHeld(self);
   self->SetState(art::kWaitingInMainDebuggerLoop);
   // shutting_down_ set by StopDebuggerThreads
   while (!shutting_down_) {
-    // First get the control_sock_ from adb if we don't have one. We only need to do this once.
-    if (control_sock_ == -1 && !SetupAdbConnection()) {
+    // First, connect to adbd if we haven't already.
+    if (!control_ctx_ && !SetupAdbConnection()) {
       LOG(ERROR) << "Failed to setup adb connection.";
       return;
     }
-    while (!shutting_down_ && control_sock_ != -1) {
+    while (!shutting_down_ && control_ctx_) {
       bool should_listen_on_connection = !agent_has_socket_ && !sent_agent_fds_;
       struct pollfd pollfds[4] = {
         { sleep_event_fd_, POLLIN, 0 },
@@ -570,7 +536,8 @@
         { (agent_loaded_ ? local_agent_control_sock_ : -1), POLLIN, 0 },
         // Check for the control_sock_ actually going away. Only do this if we don't have an active
         // connection.
-        { (adb_connection_socket_ == -1 ? control_sock_ : -1), POLLIN | POLLRDHUP, 0 },
+        { (adb_connection_socket_ == -1 ? adbconnection_client_pollfd(control_ctx_.get()) : -1),
+          POLLIN | POLLRDHUP, 0 },
         // if we have not loaded the agent either the adb_connection_socket_ is -1 meaning we don't
         // have a real connection yet or the socket through adb needs to be listened to for incoming
         // data that the agent or this plugin can handle.
@@ -620,10 +587,10 @@
         {
           // Hold onto this lock so that concurrent ddm publishes don't try to use an illegal fd.
           ScopedEventFdLock sefdl(adb_write_event_fd_);
-          android::base::unique_fd new_fd(ReadFdFromAdb());
+          android::base::unique_fd new_fd(adbconnection_client_receive_jdwp_fd(control_ctx_.get()));
           if (new_fd == -1) {
             // Something went wrong. We need to retry getting the control socket.
-            control_sock_.reset();
+            control_ctx_.reset();
             break;
           } else if (adb_connection_socket_ != -1) {
             // We already have a connection.
@@ -648,7 +615,7 @@
         // Reset the connection since we don't have an active socket through the adb server.
         DCHECK(!agent_has_socket_) << "We shouldn't be doing anything if there is already a "
                                    << "connection active";
-        control_sock_.reset();
+        control_ctx_.reset();
         break;
       } else if (FlagsSet(adb_socket_poll.revents, POLLIN)) {
         DCHECK(!agent_has_socket_);
@@ -881,20 +848,17 @@
 }
 
 // The plugin initialization function.
-extern "C" bool ArtPlugin_Initialize() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+extern "C" bool ArtPlugin_Initialize() {
   DCHECK(art::Runtime::Current()->GetJdwpProvider() == art::JdwpProvider::kAdbConnection);
   // TODO Provide some way for apps to set this maybe?
-  DCHECK(gState == nullptr);
-  gState = new AdbConnectionState(kDefaultJdwpAgentName);
+  gState.emplace(kDefaultJdwpAgentName);
   return ValidateJdwpOptions(art::Runtime::Current()->GetJdwpOptions());
 }
 
 extern "C" bool ArtPlugin_Deinitialize() {
-  gState->StopDebuggerThreads();
-  if (!gState->DebuggerThreadsStarted()) {
-    // If debugger threads were started then those threads will delete the state once they are done.
-    delete gState;
-  }
+  // We don't actually have to do anything here. The debugger (if one was
+  // attached) was shutdown by the move to the kDeath runtime phase and the
+  // adbconnection threads were shutdown by StopDebugger.
   return true;
 }
 
diff --git a/adbconnection/adbconnection.h b/adbconnection/adbconnection.h
index c51f981..32f42ba 100644
--- a/adbconnection/adbconnection.h
+++ b/adbconnection/adbconnection.h
@@ -18,10 +18,12 @@
 #define ART_ADBCONNECTION_ADBCONNECTION_H_
 
 #include <stdint.h>
+#include <memory>
 #include <vector>
 #include <limits>
 
 #include "android-base/unique_fd.h"
+#include "adbconnection/client.h"
 
 #include "base/mutex.h"
 #include "base/array_ref.h"
@@ -73,6 +75,7 @@
 class AdbConnectionState {
  public:
   explicit AdbConnectionState(const std::string& name);
+  ~AdbConnectionState();
 
   // Called on the listening thread to start dealing with new input. thr is used to attach the new
   // thread to the runtime.
@@ -127,8 +130,8 @@
   // Eventfd used to allow the StopDebuggerThreads function to wake up sleeping threads
   android::base::unique_fd sleep_event_fd_;
 
-  // Socket that we use to talk to adbd.
-  android::base::unique_fd control_sock_;
+  // Context which wraps the socket which we use to talk to adbd.
+  std::unique_ptr<AdbConnectionClientContext, void(*)(AdbConnectionClientContext*)> control_ctx_;
 
   // Socket that we use to talk to the agent (if it's loaded).
   android::base::unique_fd local_agent_control_sock_;
diff --git a/benchmark/stringbuilder-append/info.txt b/benchmark/stringbuilder-append/info.txt
new file mode 100644
index 0000000..ae58812
--- /dev/null
+++ b/benchmark/stringbuilder-append/info.txt
@@ -0,0 +1 @@
+Benchmarks for the StringBuilder append pattern.
diff --git a/benchmark/stringbuilder-append/src/StringBuilderAppendBenchmark.java b/benchmark/stringbuilder-append/src/StringBuilderAppendBenchmark.java
new file mode 100644
index 0000000..1550e81
--- /dev/null
+++ b/benchmark/stringbuilder-append/src/StringBuilderAppendBenchmark.java
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class StringBuilderAppendBenchmark {
+    public static String string1 = "s1";
+    public static String string2 = "s2";
+    public static String longString1 = "This is a long string 1";
+    public static String longString2 = "This is a long string 2";
+    public static int int1 = 42;
+
+    public void timeAppendStrings(int count) {
+        String s1 = string1;
+        String s2 = string2;
+        int sum = 0;
+        for (int i = 0; i < count; ++i) {
+            String result = s1 + s2;
+            sum += result.length();  // Make sure the append is not optimized away.
+        }
+        if (sum != count * (s1.length() + s2.length())) {
+            throw new AssertionError();
+        }
+    }
+
+    public void timeAppendLongStrings(int count) {
+        String s1 = longString1;
+        String s2 = longString2;
+        int sum = 0;
+        for (int i = 0; i < count; ++i) {
+            String result = s1 + s2;
+            sum += result.length();  // Make sure the append is not optimized away.
+        }
+        if (sum != count * (s1.length() + s2.length())) {
+            throw new AssertionError();
+        }
+    }
+
+    public void timeAppendStringAndInt(int count) {
+        String s1 = string1;
+        int i1 = int1;
+        int sum = 0;
+        for (int i = 0; i < count; ++i) {
+            String result = s1 + i1;
+            sum += result.length();  // Make sure the append is not optimized away.
+        }
+        if (sum != count * (s1.length() + Integer.toString(i1).length())) {
+            throw new AssertionError();
+        }
+    }
+}
diff --git a/build/Android.bp b/build/Android.bp
index 7b807d5..946e5a6 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -23,7 +23,7 @@
     "android-cloexec-open",
     "bugprone-argument-comment",
     "bugprone-lambda-function-name",
-    "bugprone-unused-raii",  // Protect scoped things like MutexLock.
+    "bugprone-unused-raii", // Protect scoped things like MutexLock.
     "bugprone-unused-return-value",
     "bugprone-virtual-near-miss",
     "modernize-use-bool-literals",
@@ -58,6 +58,12 @@
     // Additional flags are computed by art.go
 
     name: "art_defaults",
+
+    // This is the default visibility for the //art package, but we repeat it
+    // here so that it gets merged with other visibility rules in modules
+    // extending these defaults.
+    visibility: ["//art:__subpackages__"],
+
     cflags: [
         // Base set of cflags used by all things ART.
         "-fno-rtti",
@@ -75,7 +81,8 @@
 
         // Warn about thread safety violations with clang.
         "-Wthread-safety",
-        "-Wthread-safety-negative",
+        // TODO(b/144045034): turn on -Wthread-safety-negative
+        //"-Wthread-safety-negative",
 
         // Warn if switch fallthroughs aren't annotated.
         "-Wimplicit-fallthrough",
@@ -108,6 +115,25 @@
         "-D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS",
     ],
 
+    arch: {
+        x86: {
+            avx2: {
+                cflags: [
+                    "-mavx2",
+                    "-mfma",
+                ],
+            },
+        },
+        x86_64: {
+            avx2: {
+                cflags: [
+                    "-mavx2",
+                    "-mfma",
+                ],
+            },
+        },
+    },
+
     target: {
         android: {
             cflags: [
@@ -119,10 +145,10 @@
                 // "-marm",
                 // "-mapcs",
             ],
-            include_dirs: [
+            header_libs: [
                 // We optimize Thread::Current() with a direct TLS access. This requires access to a
-                //  private Bionic header.
-                "bionic/libc/private",
+                //  platform specific Bionic header.
+                "bionic_libc_platform_headers",
             ],
         },
         linux: {
@@ -132,6 +158,18 @@
                 "-Wmissing-noreturn",
             ],
         },
+        linux_bionic: {
+            header_libs: [
+                // We optimize Thread::Current() with a direct TLS access. This requires access to a
+                //  platform specific Bionic header.
+                "bionic_libc_platform_headers",
+            ],
+            strip: {
+                // Do not strip art libs when building for linux-bionic.
+                // Otherwise we can't get any symbols out of crashes.
+                none: true,
+            },
+        },
         darwin: {
             enabled: false,
         },
@@ -160,12 +198,6 @@
         arm64: {
             cflags: ["-DART_ENABLE_CODEGEN_arm64"],
         },
-        mips: {
-            cflags: ["-DART_ENABLE_CODEGEN_mips"],
-        },
-        mips64: {
-            cflags: ["-DART_ENABLE_CODEGEN_mips64"],
-        },
         x86: {
             cflags: ["-DART_ENABLE_CODEGEN_x86"],
         },
@@ -174,10 +206,6 @@
         },
     },
 
-    include_dirs: [
-        "external/vixl/src",
-    ],
-
     tidy_checks: art_clang_tidy_errors + art_clang_tidy_disabled,
     tidy_checks_as_errors: art_clang_tidy_errors,
 
@@ -199,6 +227,7 @@
 
 art_debug_defaults {
     name: "art_debug_defaults",
+    visibility: ["//art:__subpackages__"],
     cflags: [
         "-DDYNAMIC_ANNOTATIONS_ENABLED=1",
         "-DVIXL_DEBUG",
diff --git a/build/Android.common.mk b/build/Android.common.mk
index e96e3ed..4d702e4 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -17,7 +17,7 @@
 ifndef ART_ANDROID_COMMON_MK
 ART_ANDROID_COMMON_MK = true
 
-ART_TARGET_SUPPORTED_ARCH := arm arm64 mips mips64 x86 x86_64
+ART_TARGET_SUPPORTED_ARCH := arm arm64 x86 x86_64
 ART_HOST_SUPPORTED_ARCH := x86 x86_64
 ART_DEXPREOPT_BOOT_JAR_DIR := system/framework
 
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index 625444f..3403f2d 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -74,7 +74,7 @@
 TARGET_CORE_IMG_LOCATION := $(ART_TARGET_TEST_OUT)/core.art
 
 # Modules to compile for core.art.
-CORE_IMG_JARS := core-oj core-libart okhttp bouncycastle apache-xml
+CORE_IMG_JARS := core-oj core-libart core-icu4j okhttp bouncycastle apache-xml
 HOST_CORE_IMG_JARS   := $(addsuffix -hostdex,$(CORE_IMG_JARS))
 TARGET_CORE_IMG_JARS := $(addsuffix -testdex,$(CORE_IMG_JARS))
 HOST_CORE_IMG_DEX_LOCATIONS   := $(foreach jar,$(HOST_CORE_IMG_JARS),  $(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar)
@@ -102,7 +102,7 @@
 ART_HOST_DEX_DEPENDENCIES := $(foreach jar,$(HOST_TEST_CORE_JARS),$(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar)
 ART_TARGET_DEX_DEPENDENCIES := $(foreach jar,$(TARGET_TEST_CORE_JARS),$(TARGET_OUT_JAVA_LIBRARIES)/$(jar).jar)
 
-ART_CORE_SHARED_LIBRARIES := libjavacore libopenjdk libopenjdkjvm libopenjdkjvmti
+ART_CORE_SHARED_LIBRARIES := libicu_jni libjavacore libopenjdk libopenjdkjvm libopenjdkjvmti
 ART_CORE_SHARED_DEBUG_LIBRARIES := libopenjdkd libopenjdkjvmd libopenjdkjvmtid
 ART_HOST_SHARED_LIBRARY_DEPENDENCIES := $(foreach lib,$(ART_CORE_SHARED_LIBRARIES), $(ART_HOST_OUT_SHARED_LIBRARIES)/$(lib)$(ART_HOST_SHLIB_EXTENSION))
 ART_HOST_SHARED_LIBRARY_DEBUG_DEPENDENCIES := $(foreach lib,$(ART_CORE_SHARED_DEBUG_LIBRARIES), $(ART_HOST_OUT_SHARED_LIBRARIES)/$(lib)$(ART_HOST_SHLIB_EXTENSION))
@@ -111,10 +111,11 @@
 ART_HOST_SHARED_LIBRARY_DEBUG_DEPENDENCIES += $(foreach lib,$(ART_CORE_SHARED_DEBUG_LIBRARIES), $(2ND_HOST_OUT_SHARED_LIBRARIES)/$(lib).so)
 endif
 
-ART_TARGET_SHARED_LIBRARY_DEPENDENCIES := $(foreach lib,$(ART_CORE_SHARED_LIBRARIES), $(TARGET_OUT_SHARED_LIBRARIES)/$(lib).so)
+# Both the primary and the secondary arches of the libs are built by depending
+# on the module name.
+ART_DEBUG_TARGET_SHARED_LIBRARY_DEPENDENCIES := $(foreach lib,$(ART_CORE_SHARED_LIBRARIES), $(lib).com.android.art.debug)
 ART_TARGET_SHARED_LIBRARY_DEBUG_DEPENDENCIES := $(foreach lib,$(ART_CORE_SHARED_DEBUG_LIBRARIES), $(TARGET_OUT_SHARED_LIBRARIES)/$(lib).so)
 ifdef TARGET_2ND_ARCH
-ART_TARGET_SHARED_LIBRARY_DEPENDENCIES += $(foreach lib,$(ART_CORE_SHARED_LIBRARIES), $(2ND_TARGET_OUT_SHARED_LIBRARIES)/$(lib).so)
 ART_TARGET_SHARED_LIBRARY_DEBUG_DEPENDENCIES += $(foreach lib,$(ART_CORE_SHARED_DEBUG_LIBRARIES), $(2ND_TARGET_OUT_SHARED_LIBRARIES)/$(lib).so)
 endif
 
@@ -147,4 +148,15 @@
 ART_HOST_EXECUTABLES += $(foreach name,$(ART_CORE_DEBUGGABLE_EXECUTABLES),$(name)d-host)
 endif
 
+# Release ART APEX, included by default in "user" builds.
+RELEASE_ART_APEX := com.android.art.release
+# Debug ART APEX, included by default in "userdebug" and "eng"
+# builds and used in ART device benchmarking.
+DEBUG_ART_APEX := com.android.art.debug
+# Testing ART APEX, used in ART device testing.
+TESTING_ART_APEX := com.android.art.testing
+
+# Conscrypt APEX
+CONSCRYPT_APEX := com.android.conscrypt
+
 endif # ART_ANDROID_COMMON_PATH_MK
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 03aae07..a9855cd 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -30,6 +30,8 @@
   ErroneousA \
   ErroneousB \
   ErroneousInit \
+  Extension1 \
+  Extension2 \
   ForClassLoaderA \
   ForClassLoaderB \
   ForClassLoaderC \
@@ -76,10 +78,10 @@
 ART_TEST_HOST_GTEST_MainStripped_DEX := $(basename $(ART_TEST_HOST_GTEST_Main_DEX))Stripped$(suffix $(ART_TEST_HOST_GTEST_Main_DEX))
 ART_TEST_TARGET_GTEST_MainStripped_DEX := $(basename $(ART_TEST_TARGET_GTEST_Main_DEX))Stripped$(suffix $(ART_TEST_TARGET_GTEST_Main_DEX))
 
-# Create rules for MainUncompressed, a copy of Main with the classes.dex uncompressed
+# Create rules for MainUncompressedAligned, a copy of Main with the classes.dex uncompressed
 # for the dex2oat tests.
-ART_TEST_HOST_GTEST_MainUncompressed_DEX := $(basename $(ART_TEST_HOST_GTEST_Main_DEX))Uncompressed$(suffix $(ART_TEST_HOST_GTEST_Main_DEX))
-ART_TEST_TARGET_GTEST_MainUncompressed_DEX := $(basename $(ART_TEST_TARGET_GTEST_Main_DEX))Uncompressed$(suffix $(ART_TEST_TARGET_GTEST_Main_DEX))
+ART_TEST_HOST_GTEST_MainUncompressedAligned_DEX := $(basename $(ART_TEST_HOST_GTEST_Main_DEX))UncompressedAligned$(suffix $(ART_TEST_HOST_GTEST_Main_DEX))
+ART_TEST_TARGET_GTEST_MainUncompressedAligned_DEX := $(basename $(ART_TEST_TARGET_GTEST_Main_DEX))UncompressedAligned$(suffix $(ART_TEST_TARGET_GTEST_Main_DEX))
 
 # Create rules for UncompressedEmpty, a classes.dex that is empty and uncompressed
 # for the dex2oat tests.
@@ -91,10 +93,10 @@
 ART_TEST_HOST_GTEST_EmptyUncompressedAligned_DEX := $(basename $(ART_TEST_HOST_GTEST_Main_DEX))EmptyUncompressedAligned$(suffix $(ART_TEST_HOST_GTEST_Main_DEX))
 ART_TEST_TARGET_GTEST_EmptyUncompressedAligned_DEX := $(basename $(ART_TEST_TARGET_GTEST_Main_DEX))EmptyUncompressedAligned$(suffix $(ART_TEST_TARGET_GTEST_Main_DEX))
 
-# Create rules for MultiDexUncompressed, a copy of MultiDex with the classes.dex uncompressed
+# Create rules for MultiDexUncompressedAligned, a copy of MultiDex with the classes.dex uncompressed
 # for the OatFile tests.
-ART_TEST_HOST_GTEST_MultiDexUncompressed_DEX := $(basename $(ART_TEST_HOST_GTEST_MultiDex_DEX))Uncompressed$(suffix $(ART_TEST_HOST_GTEST_MultiDex_DEX))
-ART_TEST_TARGET_GTEST_MultiDexUncompressed_DEX := $(basename $(ART_TEST_TARGET_GTEST_MultiDex_DEX))Uncompressed$(suffix $(ART_TEST_TARGET_GTEST_MultiDex_DEX))
+ART_TEST_HOST_GTEST_MultiDexUncompressedAligned_DEX := $(basename $(ART_TEST_HOST_GTEST_MultiDex_DEX))UncompressedAligned$(suffix $(ART_TEST_HOST_GTEST_MultiDex_DEX))
+ART_TEST_TARGET_GTEST_MultiDexUncompressedAligned_DEX := $(basename $(ART_TEST_TARGET_GTEST_MultiDex_DEX))UncompressedAligned$(suffix $(ART_TEST_TARGET_GTEST_MultiDex_DEX))
 
 ifdef ART_TEST_HOST_GTEST_Main_DEX
 $(ART_TEST_HOST_GTEST_MainStripped_DEX): $(ART_TEST_HOST_GTEST_Main_DEX)
@@ -109,14 +111,14 @@
 endif
 
 ifdef ART_TEST_HOST_GTEST_Main_DEX
-$(ART_TEST_HOST_GTEST_MainUncompressed_DEX): $(ART_TEST_HOST_GTEST_Main_DEX) $(ZIPALIGN)
+$(ART_TEST_HOST_GTEST_MainUncompressedAligned_DEX): $(ART_TEST_HOST_GTEST_Main_DEX) $(ZIPALIGN)
 	cp $< $@
 	$(call uncompress-dexs, $@)
 	$(call align-package, $@)
 endif
 
 ifdef ART_TEST_TARGET_GTEST_Main_DEX
-$(ART_TEST_TARGET_GTEST_MainUncompressed_DEX): $(ART_TEST_TARGET_GTEST_Main_DEX) $(ZIPALIGN)
+$(ART_TEST_TARGET_GTEST_MainUncompressedAligned_DEX): $(ART_TEST_TARGET_GTEST_Main_DEX) $(ZIPALIGN)
 	cp $< $@
 	$(call uncompress-dexs, $@)
 	$(call align-package, $@)
@@ -124,45 +126,45 @@
 
 ifdef ART_TEST_HOST_GTEST_Main_DEX
 $(ART_TEST_HOST_GTEST_EmptyUncompressed_DEX):
-	touch $(dir $@)classes.dex
-	zip -j -qD -X -0 $@ $(dir $@)classes.dex
-	rm $(dir $@)classes.dex
+	touch $@_classes.dex
+	zip -j -qD -X -0 $@ $@_classes.dex
+	rm $@_classes.dex
 endif
 
 ifdef ART_TEST_TARGET_GTEST_Main_DEX
 $(ART_TEST_TARGET_GTEST_EmptyUncompressed_DEX):
-	touch $(dir $@)classes.dex
-	zip -j -qD -X -0 $@ $(dir $@)classes.dex
-	rm $(dir $@)classes.dex
+	touch $@_classes.dex
+	zip -j -qD -X -0 $@ $@_classes.dex
+	rm $@_classes.dex
 endif
 
 ifdef ART_TEST_HOST_GTEST_Main_DEX
 $(ART_TEST_HOST_GTEST_EmptyUncompressedAligned_DEX): $(ZIPALIGN)
-	touch $(dir $@)classes.dex
-	zip -j -0 $(dir $@)temp.zip $(dir $@)classes.dex
-	$(ZIPALIGN) -f -v 4 $(dir $@)temp.zip $@
-	rm $(dir $@)classes.dex
-	rm $(dir $@)temp.zip
+	touch $@_classes.dex
+	zip -j -0 $@_temp.zip $@_classes.dex
+	$(ZIPALIGN) -f 4 $@_temp.zip $@
+	rm $@_classes.dex
+	rm $@_temp.zip
 endif
 
 ifdef ART_TEST_TARGET_GTEST_Main_DEX
 $(ART_TEST_TARGET_GTEST_EmptyUncompressedAligned_DEX): $(ZIPALIGN)
-	touch $(dir $@)classes.dex
-	zip -j -0 $(dir $@)temp.zip $(dir $@)classes.dex
-	$(ZIPALIGN) -f -v 4 $(dir $@)temp.zip $@
-	rm $(dir $@)classes.dex
-	rm $(dir $@)temp.zip
+	touch $@_classes.dex
+	zip -j -0 $@_temp.zip $@_classes.dex
+	$(ZIPALIGN) -f 4 $@_temp.zip $@
+	rm $@_classes.dex
+	rm $@_temp.zip
 endif
 
 ifdef ART_TEST_HOST_GTEST_MultiDex_DEX
-$(ART_TEST_HOST_GTEST_MultiDexUncompressed_DEX): $(ART_TEST_HOST_GTEST_MultiDex_DEX) $(ZIPALIGN)
+$(ART_TEST_HOST_GTEST_MultiDexUncompressedAligned_DEX): $(ART_TEST_HOST_GTEST_MultiDex_DEX) $(ZIPALIGN)
 	cp $< $@
 	$(call uncompress-dexs, $@)
 	$(call align-package, $@)
 endif
 
 ifdef ART_TEST_TARGET_GTEST_MultiDex_DEX
-$(ART_TEST_TARGET_GTEST_MultiDexUncompressed_DEX): $(ART_TEST_TARGET_GTEST_MultiDex_DEX) $(ZIPALIGN)
+$(ART_TEST_TARGET_GTEST_MultiDexUncompressedAligned_DEX): $(ART_TEST_TARGET_GTEST_MultiDex_DEX) $(ZIPALIGN)
 	cp $< $@
 	$(call uncompress-dexs, $@)
 	$(call align-package, $@)
@@ -209,7 +211,7 @@
 ART_GTEST_dex_cache_test_DEX_DEPS := Main Packages MethodTypes
 ART_GTEST_dexanalyze_test_DEX_DEPS := MultiDex
 ART_GTEST_dexlayout_test_DEX_DEPS := ManyMethods
-ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) ManyMethods Statics VerifierDeps MainUncompressed EmptyUncompressed EmptyUncompressedAligned StringLiterals
+ART_GTEST_dex2oat_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) ManyMethods Statics VerifierDeps MainUncompressedAligned EmptyUncompressed EmptyUncompressedAligned StringLiterals
 ART_GTEST_dex2oat_image_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) Statics VerifierDeps
 ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
 ART_GTEST_hiddenapi_test_DEX_DEPS := HiddenApi HiddenApiStubs
@@ -221,8 +223,8 @@
 ART_GTEST_jni_internal_test_DEX_DEPS := AllFields StaticLeafMethods MyClassNatives
 ART_GTEST_oat_file_assistant_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS)
 ART_GTEST_dexoptanalyzer_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS)
-ART_GTEST_image_space_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS)
-ART_GTEST_oat_file_test_DEX_DEPS := Main MultiDex MainUncompressed MultiDexUncompressed MainStripped Nested MultiDexModifiedSecondary
+ART_GTEST_image_space_test_DEX_DEPS := $(ART_GTEST_dex2oat_environment_tests_DEX_DEPS) Extension1 Extension2
+ART_GTEST_oat_file_test_DEX_DEPS := Main MultiDex MainUncompressedAligned MultiDexUncompressedAligned MainStripped Nested MultiDexModifiedSecondary
 ART_GTEST_oat_test_DEX_DEPS := Main
 ART_GTEST_oat_writer_test_DEX_DEPS := Main
 # two_runtimes_test build off dex2oat_environment_test, which does sanity checks on the following dex files.
@@ -232,6 +234,7 @@
 ART_GTEST_reflection_test_DEX_DEPS := Main NonStaticLeafMethods StaticLeafMethods
 ART_GTEST_profile_assistant_test_DEX_DEPS := ProfileTestMultiDex
 ART_GTEST_profile_compilation_info_test_DEX_DEPS := ManyMethods ProfileTestMultiDex
+ART_GTEST_profile_boot_info_test_DEX_DEPS := ManyMethods ProfileTestMultiDex MultiDex
 ART_GTEST_profiling_info_test_DEX_DEPS := ProfileTestMultiDex
 ART_GTEST_runtime_callbacks_test_DEX_DEPS := XandY
 ART_GTEST_stub_test_DEX_DEPS := AllFields
@@ -243,6 +246,7 @@
 ART_GTEST_dex_to_dex_decompiler_test_DEX_DEPS := VerifierDeps DexToDexDecompiler
 ART_GTEST_oatdump_app_test_DEX_DEPS := ProfileTestMultiDex
 ART_GTEST_oatdump_test_DEX_DEPS := ProfileTestMultiDex
+ART_GTEST_reg_type_test_DEX_DEPS := Interfaces
 
 # The elf writer test has dependencies on core.oat.
 ART_GTEST_elf_writer_test_HOST_DEPS := $(HOST_CORE_IMAGE_DEFAULT_64) $(HOST_CORE_IMAGE_DEFAULT_32)
@@ -252,6 +256,10 @@
 ART_GTEST_two_runtimes_test_HOST_DEPS := $(HOST_CORE_IMAGE_DEFAULT_64) $(HOST_CORE_IMAGE_DEFAULT_32)
 ART_GTEST_two_runtimes_test_TARGET_DEPS := $(TARGET_CORE_IMAGE_DEFAULT_64) $(TARGET_CORE_IMAGE_DEFAULT_32)
 
+# The transaction test has dependencies on core.oat.
+ART_GTEST_transaction_test_HOST_DEPS := $(HOST_CORE_IMAGE_DEFAULT_64) $(HOST_CORE_IMAGE_DEFAULT_32)
+ART_GTEST_transaction_test_TARGET_DEPS := $(TARGET_CORE_IMAGE_DEFAULT_64) $(TARGET_CORE_IMAGE_DEFAULT_32)
+
 ART_GTEST_dex2oat_environment_tests_HOST_DEPS := \
   $(HOST_CORE_IMAGE_optimizing_64) \
   $(HOST_CORE_IMAGE_optimizing_32) \
@@ -268,7 +276,7 @@
   $(HOST_OUT_EXECUTABLES)/dex2oatd
 ART_GTEST_oat_file_test_TARGET_DEPS := \
   $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) \
-  $(TARGET_OUT_EXECUTABLES)/dex2oatd
+  dex2oatd.com.android.art.debug
 
 ART_GTEST_oat_file_assistant_test_HOST_DEPS := \
   $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS)
@@ -280,7 +288,7 @@
   $(HOST_OUT_EXECUTABLES)/dexoptanalyzerd
 ART_GTEST_dexoptanalyzer_test_TARGET_DEPS := \
   $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) \
-  $(TARGET_OUT_EXECUTABLES)/dexoptanalyzerd
+  $(TESTING_ART_APEX)  # For dexoptanalyzerd.
 
 ART_GTEST_image_space_test_HOST_DEPS := \
   $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS)
@@ -292,32 +300,37 @@
   $(HOST_OUT_EXECUTABLES)/dex2oatd
 ART_GTEST_dex2oat_test_TARGET_DEPS := \
   $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) \
-  $(TARGET_OUT_EXECUTABLES)/dex2oatd
+  $(TESTING_ART_APEX)  # For dex2oatd.
 
 ART_GTEST_dex2oat_image_test_HOST_DEPS := \
   $(ART_GTEST_dex2oat_environment_tests_HOST_DEPS) \
   $(HOST_OUT_EXECUTABLES)/dex2oatd
 ART_GTEST_dex2oat_image_test_TARGET_DEPS := \
   $(ART_GTEST_dex2oat_environment_tests_TARGET_DEPS) \
-  $(TARGET_OUT_EXECUTABLES)/dex2oatd
+  $(TESTING_ART_APEX)  # For dex2oatd.
+
+ART_GTEST_module_exclusion_test_HOST_DEPS := \
+  $(ART_GTEST_dex2oat_image_test_HOST_DEPS)
+ART_GTEST_module_exclusion_test_TARGET_DEPS := \
+  $(ART_GTEST_dex2oat_image_test_TARGET_DEPS)
 
 # TODO: document why this is needed.
 ART_GTEST_proxy_test_HOST_DEPS := $(HOST_CORE_IMAGE_DEFAULT_64) $(HOST_CORE_IMAGE_DEFAULT_32)
 
 # The dexdiag test requires the dexdiag utility.
 ART_GTEST_dexdiag_test_HOST_DEPS := $(HOST_OUT_EXECUTABLES)/dexdiag
-ART_GTEST_dexdiag_test_TARGET_DEPS := $(TARGET_OUT_EXECUTABLES)/dexdiag
+ART_GTEST_dexdiag_test_TARGET_DEPS := $(TESTING_ART_APEX)  # For dexdiag.
 
 # The dexdump test requires an image and the dexdump utility.
 # TODO: rename into dexdump when migration completes
 ART_GTEST_dexdump_test_HOST_DEPS := \
   $(HOST_CORE_IMAGE_DEFAULT_64) \
   $(HOST_CORE_IMAGE_DEFAULT_32) \
-  $(HOST_OUT_EXECUTABLES)/dexdump2
+  $(HOST_OUT_EXECUTABLES)/dexdump
 ART_GTEST_dexdump_test_TARGET_DEPS := \
   $(TARGET_CORE_IMAGE_DEFAULT_64) \
   $(TARGET_CORE_IMAGE_DEFAULT_32) \
-  $(TARGET_OUT_EXECUTABLES)/dexdump2
+  dexdump.com.android.art.debug
 
 # The dexanalyze test requires an image and the dexanalyze utility.
 ART_GTEST_dexanalyze_test_HOST_DEPS := \
@@ -327,7 +340,7 @@
 ART_GTEST_dexanalyze_test_TARGET_DEPS := \
   $(TARGET_CORE_IMAGE_DEFAULT_64) \
   $(TARGET_CORE_IMAGE_DEFAULT_32) \
-  $(TARGET_OUT_EXECUTABLES)/dexanalyze
+  dexanalyze.com.android.art.debug
 
 # The dexlayout test requires an image and the dexlayout utility.
 # TODO: rename into dexdump when migration completes
@@ -335,12 +348,12 @@
   $(HOST_CORE_IMAGE_DEFAULT_64) \
   $(HOST_CORE_IMAGE_DEFAULT_32) \
   $(HOST_OUT_EXECUTABLES)/dexlayoutd \
-  $(HOST_OUT_EXECUTABLES)/dexdump2
+  $(HOST_OUT_EXECUTABLES)/dexdump
 ART_GTEST_dexlayout_test_TARGET_DEPS := \
   $(TARGET_CORE_IMAGE_DEFAULT_64) \
   $(TARGET_CORE_IMAGE_DEFAULT_32) \
-  $(TARGET_OUT_EXECUTABLES)/dexlayoutd \
-  $(TARGET_OUT_EXECUTABLES)/dexdump2
+  dexlayoutd.com.android.art.debug \
+  dexdump.com.android.art.debug
 
 # The dexlist test requires an image and the dexlist utility.
 ART_GTEST_dexlist_test_HOST_DEPS := \
@@ -350,7 +363,7 @@
 ART_GTEST_dexlist_test_TARGET_DEPS := \
   $(TARGET_CORE_IMAGE_DEFAULT_64) \
   $(TARGET_CORE_IMAGE_DEFAULT_32) \
-  $(TARGET_OUT_EXECUTABLES)/dexlist
+  $(TESTING_ART_APEX)   # For dexlist.
 
 # The imgdiag test has dependencies on core.oat since it needs to load it during the test.
 # For the host, also add the installed tool (in the base size, that should suffice). For the
@@ -362,13 +375,13 @@
 ART_GTEST_imgdiag_test_TARGET_DEPS := \
   $(TARGET_CORE_IMAGE_DEFAULT_64) \
   $(TARGET_CORE_IMAGE_DEFAULT_32) \
-  $(TARGET_OUT_EXECUTABLES)/imgdiagd
+  imgdiagd.com.android.art.debug
 
 # Dex analyze test requires dexanalyze.
 ART_GTEST_dexanalyze_test_HOST_DEPS := \
   $(HOST_OUT_EXECUTABLES)/dexanalyze
 ART_GTEST_dexanalyze_test_TARGET_DEPS := \
-  $(TARGET_OUT_EXECUTABLES)/dexanalyze
+  dexanalyze.com.android.art.debug
 
 # Oatdump test requires an image and oatfile to dump.
 ART_GTEST_oatdump_test_HOST_DEPS := \
@@ -376,23 +389,21 @@
   $(HOST_CORE_IMAGE_DEFAULT_32) \
   $(HOST_OUT_EXECUTABLES)/oatdumpd \
   $(HOST_OUT_EXECUTABLES)/oatdumpds \
-  $(HOST_OUT_EXECUTABLES)/dexdump2
+  $(HOST_OUT_EXECUTABLES)/dexdump \
+  $(HOST_OUT_EXECUTABLES)/dex2oatd \
+  $(HOST_OUT_EXECUTABLES)/dex2oatds
 ART_GTEST_oatdump_test_TARGET_DEPS := \
   $(TARGET_CORE_IMAGE_DEFAULT_64) \
   $(TARGET_CORE_IMAGE_DEFAULT_32) \
-  $(TARGET_OUT_EXECUTABLES)/oatdumpd \
-  $(TARGET_OUT_EXECUTABLES)/dexdump2
+  $(TESTING_ART_APEX)    # For oatdumpd, dexdump, dex2oatd.
 ART_GTEST_oatdump_image_test_HOST_DEPS := $(ART_GTEST_oatdump_test_HOST_DEPS)
 ART_GTEST_oatdump_image_test_TARGET_DEPS := $(ART_GTEST_oatdump_test_TARGET_DEPS)
-ART_GTEST_oatdump_app_test_HOST_DEPS := $(ART_GTEST_oatdump_test_HOST_DEPS) \
-  $(HOST_OUT_EXECUTABLES)/dex2oatd \
-  $(HOST_OUT_EXECUTABLES)/dex2oatds
-ART_GTEST_oatdump_app_test_TARGET_DEPS := $(ART_GTEST_oatdump_test_TARGET_DEPS) \
-  $(TARGET_OUT_EXECUTABLES)/dex2oatd
+ART_GTEST_oatdump_app_test_HOST_DEPS := $(ART_GTEST_oatdump_test_HOST_DEPS)
+ART_GTEST_oatdump_app_test_TARGET_DEPS := $(ART_GTEST_oatdump_test_TARGET_DEPS)
 
 # Profile assistant tests requires profman utility.
 ART_GTEST_profile_assistant_test_HOST_DEPS := $(HOST_OUT_EXECUTABLES)/profmand
-ART_GTEST_profile_assistant_test_TARGET_DEPS := $(TARGET_OUT_EXECUTABLES)/profmand
+ART_GTEST_profile_assistant_test_TARGET_DEPS := $(TESTING_ART_APEX)  # For profmand.
 
 ART_GTEST_hiddenapi_test_HOST_DEPS := \
   $(HOST_CORE_IMAGE_DEFAULT_64) \
@@ -428,13 +439,11 @@
     art_runtime_tests \
     art_sigchain_tests \
 
-ART_TARGET_GTEST_FILES := $(foreach m,$(ART_TEST_MODULES),\
-    $(ART_TEST_LIST_device_$(TARGET_ARCH)_$(m)))
-
-ifdef TARGET_2ND_ARCH
-2ND_ART_TARGET_GTEST_FILES := $(foreach m,$(ART_TEST_MODULES),\
-    $(ART_TEST_LIST_device_$(2ND_TARGET_ARCH)_$(m)))
-endif
+ART_TARGET_GTEST_NAMES := $(foreach tm,$(ART_TEST_MODULES),\
+  $(foreach path,$(ART_TEST_LIST_device_$(TARGET_ARCH)_$(tm)),\
+    $(notdir $(path))\
+   )\
+)
 
 ART_HOST_GTEST_FILES := $(foreach m,$(ART_TEST_MODULES),\
     $(ART_TEST_LIST_host_$(ART_HOST_ARCH)_$(m)))
@@ -459,9 +468,14 @@
   ART_GTEST_TARGET_ANDROID_ROOT := $(ART_TEST_ANDROID_ROOT)
 endif
 
-ART_GTEST_TARGET_ANDROID_RUNTIME_ROOT := '/apex/com.android.runtime'
-ifneq ($(ART_TEST_ANDROID_RUNTIME_ROOT),)
-  ART_GTEST_TARGET_ANDROID_RUNTIME_ROOT := $(ART_TEST_ANDROID_RUNTIME_ROOT)
+ART_GTEST_TARGET_ANDROID_I18N_ROOT := '/apex/com.android.i18n'
+ifneq ($(ART_TEST_ANDROID_I18N_ROOT),)
+  ART_GTEST_TARGET_ANDROID_I18N_ROOT := $(ART_TEST_ANDROID_I18N_ROOT)
+endif
+
+ART_GTEST_TARGET_ANDROID_ART_ROOT := '/apex/com.android.art'
+ifneq ($(ART_TEST_ANDROID_ART_ROOT),)
+  ART_GTEST_TARGET_ANDROID_ART_ROOT := $(ART_TEST_ANDROID_ART_ROOT)
 endif
 
 ART_GTEST_TARGET_ANDROID_TZDATA_ROOT := '/apex/com.android.tzdata'
@@ -469,77 +483,6 @@
   ART_GTEST_TARGET_ANDROID_TZDATA_ROOT := $(ART_TEST_ANDROID_TZDATA_ROOT)
 endif
 
-# Define a make rule for a target device gtest.
-# $(1): gtest name - the name of the test we're building such as leb128_test.
-# $(2): path relative to $OUT to the test binary
-# $(3): 2ND_ or undefined - used to differentiate between the primary and secondary architecture.
-# $(4): LD_LIBRARY_PATH or undefined - used in case libartd.so is not in /system/lib/
-define define-art-gtest-rule-target
-  ifeq ($(ART_TEST_CHROOT),)
-    # Non-chroot configuration.
-    maybe_art_test_chroot :=
-    maybe_chroot_command :=
-  else
-    # Chroot configuration.
-    maybe_art_test_chroot := $(ART_TEST_CHROOT)
-    maybe_chroot_command := chroot $(ART_TEST_CHROOT)
-  endif
-
-  gtest_rule := test-art-target-gtest-$(1)$$($(3)ART_PHONY_TEST_TARGET_SUFFIX)
-  gtest_exe := $(OUT_DIR)/$(2)
-  gtest_target_exe := $$(patsubst $(PRODUCT_OUT)/%,/%,$$(gtest_exe))
-
-  # Add the test dependencies to test-art-target-sync, which will be a prerequisite for the test
-  # to ensure files are pushed to the device.
-  gtest_deps := \
-    $$(ART_GTEST_$(1)_TARGET_DEPS) \
-    $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_TARGET_GTEST_$(file)_DEX)) \
-    $$(gtest_exe) \
-    $$($(3)TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so \
-    $$($(3)TARGET_OUT_SHARED_LIBRARIES)/libopenjdkd.so \
-    $$(foreach jar,$$(TARGET_TEST_CORE_JARS),$$(TARGET_OUT_JAVA_LIBRARIES)/$$(jar).jar)
-
-  ART_TEST_TARGET_GTEST_DEPENDENCIES += $$(gtest_deps)
-
-$$(gtest_rule): PRIVATE_TARGET_EXE := $$(gtest_target_exe)
-$$(gtest_rule): PRIVATE_MAYBE_CHROOT_COMMAND := $$(maybe_chroot_command)
-
-# File witnessing the success of the gtest, the presence of which means the gtest's success.
-gtest_witness := \
-  $$(maybe_art_test_chroot)$(ART_TARGET_TEST_DIR)/$(TARGET_$(3)ARCH)/$$(gtest_rule)-$$$$PPID
-
-$$(gtest_rule): PRIVATE_GTEST_WITNESS := $$(gtest_witness)
-
-.PHONY: $$(gtest_rule)
-$$(gtest_rule): test-art-target-sync
-	$(hide) $(ADB) shell touch $$(PRIVATE_GTEST_WITNESS)
-	$(hide) $(ADB) shell rm $$(PRIVATE_GTEST_WITNESS)
-	$(hide) $(ADB) shell $$(PRIVATE_MAYBE_CHROOT_COMMAND) chmod 755 $$(PRIVATE_TARGET_EXE)
-	$(hide) $$(call ART_TEST_SKIP,$$@) && \
-	  ($(ADB) shell "$$(PRIVATE_MAYBE_CHROOT_COMMAND) env $(GCOV_ENV) LD_LIBRARY_PATH=$(4) \
-	       ANDROID_ROOT=$(ART_GTEST_TARGET_ANDROID_ROOT) \
-	       ANDROID_RUNTIME_ROOT=$(ART_GTEST_TARGET_ANDROID_RUNTIME_ROOT) \
-	       ANDROID_TZDATA_ROOT=$(ART_GTEST_TARGET_ANDROID_TZDATA_ROOT) \
-	       $$(PRIVATE_TARGET_EXE) \
-	     && touch $$(PRIVATE_GTEST_WITNESS)" \
-	   && ($(ADB) pull $$(PRIVATE_GTEST_WITNESS) /tmp/ && $$(call ART_TEST_PASSED,$$@)) \
-	   || $$(call ART_TEST_FAILED,$$@))
-	$(hide) rm -f /tmp/$$@-$$$$PPID
-
-  ART_TEST_TARGET_GTEST$($(3)ART_PHONY_TEST_TARGET_SUFFIX)_RULES += $$(gtest_rule)
-  ART_TEST_TARGET_GTEST_RULES += $$(gtest_rule)
-  ART_TEST_TARGET_GTEST_$(1)_RULES += $$(gtest_rule)
-
-  # Clear locally defined variables.
-  gtest_witness :=
-  maybe_chroot_command :=
-  maybe_art_test_chroot :=
-  gtest_target_exe :=
-  gtest_deps :=
-  gtest_exe :=
-  gtest_rule :=
-endef  # define-art-gtest-rule-target
-
 # Define make rules for a host gtests.
 # $(1): gtest name - the name of the test we're building such as leb128_test.
 # $(2): path relative to $OUT to the test binary
@@ -549,15 +492,16 @@
   gtest_rule := test-art-host-gtest-$$(gtest_suffix)
   gtest_output := $(call intermediates-dir-for,PACKAGING,art-host-gtest,HOST)/$$(gtest_suffix).xml
   $$(call dist-for-goals,$$(gtest_rule),$$(gtest_output):gtest/$$(gtest_suffix))
-  gtest_exe := $(OUT_DIR)/$(2)
+  gtest_exe := $(2)
   # Dependencies for all host gtests.
   gtest_deps := $$(HOST_CORE_DEX_LOCATIONS) \
+    $$($(3)ART_HOST_OUT_SHARED_LIBRARIES)/libicu_jni$$(ART_HOST_SHLIB_EXTENSION) \
     $$($(3)ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$$(ART_HOST_SHLIB_EXTENSION) \
     $$($(3)ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$$(ART_HOST_SHLIB_EXTENSION) \
     $$(gtest_exe) \
     $$(ART_GTEST_$(1)_HOST_DEPS) \
     $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_HOST_GTEST_$(file)_DEX)) \
-    $(HOST_OUT_EXECUTABLES)/timeout_dumper
+    $(HOST_OUT_EXECUTABLES)/signal_dumper
 
   ART_TEST_HOST_GTEST_DEPENDENCIES += $$(gtest_deps)
 
@@ -571,7 +515,7 @@
 ifeq (,$(SANITIZE_HOST))
 $$(gtest_output): $$(gtest_exe) $$(gtest_deps)
 	$(hide) ($$(call ART_TEST_SKIP,$$(NAME)) && \
-		timeout --foreground -k 120s -s SIGRTMIN+2 2400s $(HOST_OUT_EXECUTABLES)/timeout_dumper \
+		timeout --foreground -k 120s 2400s $(HOST_OUT_EXECUTABLES)/signal_dumper -s 15 \
 			$$< --gtest_output=xml:$$@ && \
 		$$(call ART_TEST_PASSED,$$(NAME))) || $$(call ART_TEST_FAILED,$$(NAME))
 else
@@ -584,8 +528,8 @@
 # under ASAN.
 $$(gtest_output): $$(gtest_exe) $$(gtest_deps)
 	$(hide) ($$(call ART_TEST_SKIP,$$(NAME)) && set -o pipefail && \
-		ASAN_OPTIONS=detect_leaks=1 timeout --foreground -k 120s -s SIGRTMIN+2 3600s \
-			$(HOST_OUT_EXECUTABLES)/timeout_dumper \
+		ASAN_OPTIONS=detect_leaks=1 timeout --foreground -k 120s 3600s \
+			$(HOST_OUT_EXECUTABLES)/signal_dumper -s 15 \
 				$$< --gtest_output=xml:$$@ 2>&1 | tee $$<.tmp.out >&2 && \
 		{ $$(call ART_TEST_PASSED,$$(NAME)) ; rm $$<.tmp.out ; }) || \
 		( grep -q AddressSanitizer $$<.tmp.out && export ANDROID_BUILD_TOP=`pwd` && \
@@ -606,41 +550,20 @@
   gtest_suffix :=
 endef  # define-art-gtest-rule-host
 
-# Define the rules to build and run host and target gtests.
-# $(1): file name
-# $(2): 2ND_ or undefined - used to differentiate between the primary and secondary architecture.
-define define-art-gtest-target
-  art_gtest_filename := $(1)
+# Add the additional dependencies for the specified test
+# $(1): test name
+define add-art-gtest-dependencies
+  # Note that, both the primary and the secondary arches of the libs are built by depending
+  # on the module name.
+  gtest_deps := \
+    $$(ART_GTEST_$(1)_TARGET_DEPS) \
+    $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_TARGET_GTEST_$(file)_DEX)) \
 
-  include $$(CLEAR_VARS)
-  art_gtest_name := $$(notdir $$(basename $$(art_gtest_filename)))
-
-  library_path :=
-  2ND_library_path :=
-  ifneq ($$(ART_TEST_ANDROID_ROOT),)
-    ifdef TARGET_2ND_ARCH
-      2ND_library_path := $$(ART_TEST_ANDROID_ROOT)/lib
-      library_path := $$(ART_TEST_ANDROID_ROOT)/lib64
-    else
-      ifneq ($(filter %64,$(TARGET_ARCH)),)
-        library_path := $$(ART_TEST_ANDROID_ROOT)/lib64
-      else
-        library_path := $$(ART_TEST_ANDROID_ROOT)/lib
-      endif
-    endif
-  endif
-
-  ifndef ART_TEST_TARGET_GTEST_$$(art_gtest_name)_RULES
-    ART_TEST_TARGET_GTEST_$$(art_gtest_name)_RULES :=
-  endif
-  $$(eval $$(call define-art-gtest-rule-target,$$(art_gtest_name),$$(art_gtest_filename),$(2),$$($(2)library_path)))
+  ART_TEST_TARGET_GTEST_DEPENDENCIES += $$(gtest_deps)
 
   # Clear locally defined variables.
-  art_gtest_filename :=
-  art_gtest_name :=
-  library_path :=
-  2ND_library_path :=
-endef  # define-art-gtest-target
+  gtest_deps :=
+endef  # add-art-gtest-dependencies
 
 # $(1): file name
 # $(2): 2ND_ or undefined - used to differentiate between the primary and secondary architecture.
@@ -659,21 +582,6 @@
   art_gtest_name :=
 endef  # define-art-gtest-host
 
-# Define the rules to build and run gtests for both archs on target.
-# $(1): test name
-define define-art-gtest-target-both
-  art_gtest_name := $(1)
-
-    # A rule to run the different architecture versions of the gtest.
-.PHONY: test-art-target-gtest-$$(art_gtest_name)
-test-art-target-gtest-$$(art_gtest_name): $$(ART_TEST_TARGET_GTEST_$$(art_gtest_name)_RULES)
-	$$(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@)
-
-  # Clear now unused variables.
-  ART_TEST_TARGET_GTEST_$$(art_gtest_name)_RULES :=
-  art_gtest_name :=
-endef  # define-art-gtest-target-both
-
 # Define the rules to build and run gtests for both archs on host.
 # $(1): test name
 define define-art-gtest-host-both
@@ -689,12 +597,12 @@
 endef  # define-art-gtest-host-both
 
 ifeq ($(ART_BUILD_TARGET),true)
-  $(foreach file,$(ART_TARGET_GTEST_FILES), $(eval $(call define-art-gtest-target,$(file),)))
-  ifdef 2ND_ART_PHONY_TEST_TARGET_SUFFIX
-    $(foreach file,$(2ND_ART_TARGET_GTEST_FILES), $(eval $(call define-art-gtest-target,$(file),2ND_)))
-  endif
-  # Rules to run the different architecture versions of the gtest.
-  $(foreach file,$(ART_TARGET_GTEST_FILES), $(eval $(call define-art-gtest-target-both,$$(notdir $$(basename $$(file))))))
+  $(foreach name,$(ART_TARGET_GTEST_NAMES), $(eval $(call add-art-gtest-dependencies,$(name),)))
+  ART_TEST_TARGET_GTEST_DEPENDENCIES += \
+    libicu_jni.com.android.art.debug \
+    libjavacore.com.android.art.debug \
+    libopenjdkd.com.android.art.debug \
+    $(foreach jar,$(TARGET_TEST_CORE_JARS),$(TARGET_OUT_JAVA_LIBRARIES)/$(jar).jar)
 endif
 ifeq ($(ART_BUILD_HOST),true)
   $(foreach file,$(ART_HOST_GTEST_FILES), $(eval $(call define-art-gtest-host,$(file),)))
@@ -707,8 +615,18 @@
 
 # Used outside the art project to get a list of the current tests
 RUNTIME_TARGET_GTEST_MAKE_TARGETS :=
-$(foreach file, $(ART_TARGET_GTEST_FILES), $(eval RUNTIME_TARGET_GTEST_MAKE_TARGETS += $$(notdir $$(patsubst %/,%,$$(dir $$(file))))_$$(notdir $$(basename $$(file)))))
-COMPILER_TARGET_GTEST_MAKE_TARGETS :=
+art_target_gtest_files := $(foreach m,$(ART_TEST_MODULES),$(ART_TEST_LIST_device_$(TARGET_ARCH)_$(m)))
+# If testdir == testfile, assume this is not a test_per_src module
+$(foreach file,$(art_target_gtest_files),\
+  $(eval testdir := $$(notdir $$(patsubst %/,%,$$(dir $$(file)))))\
+  $(eval testfile := $$(notdir $$(basename $$(file))))\
+  $(if $(call streq,$(testdir),$(testfile)),,\
+    $(eval testfile := $(testdir)_$(testfile)))\
+  $(eval RUNTIME_TARGET_GTEST_MAKE_TARGETS += $(testfile))\
+)
+testdir :=
+testfile :=
+art_target_gtest_files :=
 
 # Define all the combinations of host/target and suffix such as:
 # test-art-host-gtest or test-art-host-gtest64
@@ -733,7 +651,7 @@
   dependencies := $$(ART_TEST_$(2)_GTEST$(3)_RULES)
 
 .PHONY: $$(rule_name)
-$$(rule_name): $$(dependencies) d8 d8-compat-dx
+$$(rule_name): $$(dependencies) d8
 	$(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@)
 
   # Clear locally defined variables.
@@ -770,7 +688,8 @@
 ART_TEST_TARGET_GTEST$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
 ART_TEST_TARGET_GTEST_RULES :=
 ART_GTEST_TARGET_ANDROID_ROOT :=
-ART_GTEST_TARGET_ANDROID_RUNTIME_ROOT :=
+ART_GTEST_TARGET_ANDROID_I18N_ROOT :=
+ART_GTEST_TARGET_ANDROID_ART_ROOT :=
 ART_GTEST_TARGET_ANDROID_TZDATA_ROOT :=
 ART_GTEST_class_linker_test_DEX_DEPS :=
 ART_GTEST_class_table_test_DEX_DEPS :=
@@ -798,6 +717,8 @@
 ART_GTEST_dex2oat_image_test_DEX_DEPS :=
 ART_GTEST_dex2oat_image_test_HOST_DEPS :=
 ART_GTEST_dex2oat_image_test_TARGET_DEPS :=
+ART_GTEST_module_exclusion_test_HOST_DEPS :=
+ART_GTEST_module_exclusion_test_TARGET_DEPS :=
 ART_GTEST_object_test_DEX_DEPS :=
 ART_GTEST_proxy_test_DEX_DEPS :=
 ART_GTEST_reflection_test_DEX_DEPS :=
@@ -810,8 +731,8 @@
 $(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval ART_TEST_HOST_GTEST_$(dir)_DEX :=))
 ART_TEST_HOST_GTEST_MainStripped_DEX :=
 ART_TEST_TARGET_GTEST_MainStripped_DEX :=
-ART_TEST_HOST_GTEST_MainUncompressed_DEX :=
-ART_TEST_TARGET_GTEST_MainUncompressed_DEX :=
+ART_TEST_HOST_GTEST_MainUncompressedAligned_DEX :=
+ART_TEST_TARGET_GTEST_MainUncompressedAligned_DEX :=
 ART_TEST_HOST_GTEST_EmptyUncompressed_DEX :=
 ART_TEST_TARGET_GTEST_EmptyUncompressed_DEX :=
 ART_TEST_GTEST_VerifierDeps_SRC :=
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index 2ad1143..c6fe400 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -44,11 +44,10 @@
   core_image_name :=
   core_oat_name :=
   core_infix :=
-  core_dex2oat_dependency := $(DEX2OAT_DEPENDENCY)
+  core_dex2oat_dependency := $(DEX2OAT)
 
   ifeq ($(1),optimizing)
     core_compile_options += --compiler-backend=Optimizing
-    core_dex2oat_dependency := $(DEX2OAT)
   endif
   ifeq ($(1),interpreter)
     core_compile_options += --compiler-filter=quicken
@@ -63,6 +62,7 @@
     $$(error found $(1) expected interpreter, interp-ac, or optimizing)
   endif
 
+  core_image_location := $(HOST_OUT_JAVA_LIBRARIES)/core$$(core_infix)$(CORE_IMG_SUFFIX)
   core_image_name := $($(2)HOST_CORE_IMG_OUT_BASE)$$(core_infix)$(CORE_IMG_SUFFIX)
   core_oat_name := $($(2)HOST_CORE_OAT_OUT_BASE)$$(core_infix)$(CORE_OAT_SUFFIX)
 
@@ -76,19 +76,46 @@
   HOST_CORE_OAT_OUTS += $$(core_oat_name)
 
 $$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options)
+$$(core_image_name): PRIVATE_CORE_IMAGE_LOCATION := $$(core_image_location)
 $$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name)
 $$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name)
-$$(core_image_name): $$(HOST_CORE_IMG_DEX_LOCATIONS) $$(core_dex2oat_dependency)
+# In addition to the primary core image containing HOST_CORE_IMG_DEX_FILES,
+# also build a boot image extension for the remaining HOST_CORE_DEX_FILES.
+$$(core_image_name): $$(HOST_CORE_DEX_LOCATIONS) $$(core_dex2oat_dependency)
 	@echo "host dex2oat: $$@"
 	@mkdir -p $$(dir $$@)
-	$$(hide) ANDROID_LOG_TAGS="*:e" $$(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
+	$$(hide) ANDROID_LOG_TAGS="*:e" $$(DEX2OAT) \
+	  --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
 	  --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
-	  --image-classes=$$(PRELOADED_CLASSES) \
 	  $$(addprefix --dex-file=,$$(HOST_CORE_IMG_DEX_FILES)) \
 	  $$(addprefix --dex-location=,$$(HOST_CORE_IMG_DEX_LOCATIONS)) \
 	  --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
-	  --oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \
-	  --base=$$(LIBART_IMG_HOST_BASE_ADDRESS) --instruction-set=$$($(2)ART_HOST_ARCH) \
+	  --oat-location=$$(PRIVATE_CORE_OAT_NAME) \
+          --image=$$(PRIVATE_CORE_IMG_NAME) \
+	  --base=$$(LIBART_IMG_HOST_BASE_ADDRESS) \
+	  --instruction-set=$$($(2)ART_HOST_ARCH) \
+	  $$(LOCAL_$(2)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION) \
+	  --host --android-root=$$(HOST_OUT) \
+	  --generate-debug-info --generate-build-id \
+	  --runtime-arg -XX:SlowDebug=true \
+	  --no-inline-from=core-oj-hostdex.jar \
+	  $$(PRIVATE_CORE_COMPILE_OPTIONS) && \
+	ANDROID_LOG_TAGS="*:e" $$(DEX2OAT) \
+	  --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
+	  --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
+	  --runtime-arg -Xbootclasspath:$$(subst $$(space),:,$$(strip \
+	        $$(HOST_CORE_DEX_FILES))) \
+	  --runtime-arg -Xbootclasspath-locations:$$(subst $$(space),:,$$(strip \
+	        $$(HOST_CORE_DEX_LOCATIONS))) \
+	  $$(addprefix --dex-file=, \
+	      $$(filter-out $$(HOST_CORE_IMG_DEX_FILES),$$(HOST_CORE_DEX_FILES))) \
+	  $$(addprefix --dex-location=, \
+	      $$(filter-out $$(HOST_CORE_IMG_DEX_LOCATIONS),$$(HOST_CORE_DEX_LOCATIONS))) \
+	  --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
+	  --oat-location=$$(PRIVATE_CORE_OAT_NAME) \
+	  --boot-image=$$(PRIVATE_CORE_IMAGE_LOCATION) \
+	  --image=$$(PRIVATE_CORE_IMG_NAME) \
+	  --instruction-set=$$($(2)ART_HOST_ARCH) \
 	  $$(LOCAL_$(2)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION) \
 	  --host --android-root=$$(HOST_OUT) \
 	  --generate-debug-info --generate-build-id \
@@ -129,13 +156,10 @@
   core_image_name :=
   core_oat_name :=
   core_infix :=
-  core_dex2oat_dependency := $(DEX2OAT_DEPENDENCY)
+  core_dex2oat_dependency := $(DEX2OAT)
 
   ifeq ($(1),optimizing)
     core_compile_options += --compiler-backend=Optimizing
-    # With the optimizing compiler, we want to rerun dex2oat whenever there is
-    # a dex2oat change to catch regressions early.
-    core_dex2oat_dependency := $(DEX2OAT)
   endif
   ifeq ($(1),interpreter)
     core_compile_options += --compiler-filter=quicken
@@ -150,6 +174,7 @@
     $$(error found $(1) expected interpreter, interp-ac, or optimizing)
   endif
 
+  core_image_location := $(ART_TARGET_TEST_OUT)/core$$(core_infix)$(CORE_IMG_SUFFIX)
   core_image_name := $($(2)TARGET_CORE_IMG_OUT_BASE)$$(core_infix)$(CORE_IMG_SUFFIX)
   core_oat_name := $($(2)TARGET_CORE_OAT_OUT_BASE)$$(core_infix)$(CORE_OAT_SUFFIX)
 
@@ -167,25 +192,53 @@
   TARGET_CORE_OAT_OUTS += $$(core_oat_name)
 
 $$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options)
+$$(core_image_name): PRIVATE_CORE_IMAGE_LOCATION := $$(core_image_location)
 $$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name)
 $$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name)
-$$(core_image_name): $$(TARGET_CORE_IMG_DEX_FILES) $$(core_dex2oat_dependency)
+# In addition to the primary core image containing TARGET_CORE_IMG_DEX_FILES,
+# also build a boot image extension for the remaining TARGET_CORE_DEX_FILES.
+$$(core_image_name): $$(TARGET_CORE_DEX_FILES) $$(core_dex2oat_dependency)
 	@echo "target dex2oat: $$@"
 	@mkdir -p $$(dir $$@)
-	$$(hide) $$(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
+	$$(hide) $$(DEX2OAT) \
+	  --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
 	  --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
-	  --image-classes=$$(PRELOADED_CLASSES) \
 	  $$(addprefix --dex-file=,$$(TARGET_CORE_IMG_DEX_FILES)) \
 	  $$(addprefix --dex-location=,$$(TARGET_CORE_IMG_DEX_LOCATIONS)) \
 	  --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
-	  --oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \
-	  --base=$$(LIBART_IMG_TARGET_BASE_ADDRESS) --instruction-set=$$($(2)TARGET_ARCH) \
+	  --oat-location=$$(PRIVATE_CORE_OAT_NAME) \
+	  --image=$$(PRIVATE_CORE_IMG_NAME) \
+	  --base=$$(LIBART_IMG_TARGET_BASE_ADDRESS) \
+	  --instruction-set=$$($(2)TARGET_ARCH) \
 	  --instruction-set-variant=$$($(2)DEX2OAT_TARGET_CPU_VARIANT) \
 	  --instruction-set-features=$$($(2)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \
 	  --android-root=$$(PRODUCT_OUT)/system \
 	  --generate-debug-info --generate-build-id \
 	  --runtime-arg -XX:SlowDebug=true \
-	  $$(PRIVATE_CORE_COMPILE_OPTIONS) || (rm $$(PRIVATE_CORE_OAT_NAME); exit 1)
+	  $$(PRIVATE_CORE_COMPILE_OPTIONS) && \
+	$$(DEX2OAT) \
+	  --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
+	  --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
+	  --runtime-arg -Xbootclasspath:$$(subst $$(space),:,$$(strip \
+	        $$(TARGET_CORE_DEX_FILES))) \
+	  --runtime-arg -Xbootclasspath-locations:$$(subst $$(space),:,$$(strip \
+	        $$(TARGET_CORE_DEX_LOCATIONS))) \
+	  $$(addprefix --dex-file=, \
+	       $$(filter-out $$(TARGET_CORE_IMG_DEX_FILES),$$(TARGET_CORE_DEX_FILES))) \
+	  $$(addprefix --dex-location=, \
+	       $$(filter-out $$(TARGET_CORE_IMG_DEX_LOCATIONS),$$(TARGET_CORE_DEX_LOCATIONS))) \
+	  --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
+	  --oat-location=$$(PRIVATE_CORE_OAT_NAME) \
+	  --boot-image=$$(PRIVATE_CORE_IMAGE_LOCATION) \
+	  --image=$$(PRIVATE_CORE_IMG_NAME) \
+	  --instruction-set=$$($(2)TARGET_ARCH) \
+	  --instruction-set-variant=$$($(2)DEX2OAT_TARGET_CPU_VARIANT) \
+	  --instruction-set-features=$$($(2)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \
+	  --android-root=$$(PRODUCT_OUT)/system \
+	  --generate-debug-info --generate-build-id \
+	  --runtime-arg -XX:SlowDebug=true \
+	  $$(PRIVATE_CORE_COMPILE_OPTIONS) || \
+	(rm $$(PRIVATE_CORE_OAT_NAME); exit 1)
 
 $$(core_oat_name): $$(core_image_name)
 
diff --git a/build/apex/Android.bp b/build/apex/Android.bp
index 95aea3c..22510ef 100644
--- a/build/apex/Android.bp
+++ b/build/apex/Android.bp
@@ -1,27 +1,33 @@
-// Android Runtime APEX module.
+// ART APEX module
+//
+// Contains both the Android Managed Runtime (ART) and the Android Core Library
+// (Libcore).
 
 // Modules listed in LOCAL_REQUIRED_MODULES for module art-runtime in art/Android.mk.
 // - Base requirements (binaries for which both 32- and 64-bit versions are built, if relevant).
 art_runtime_base_binaries_both = [
     "dalvikvm",
 ]
-// - Base requirements (binaries for which a 32-bit version is preferred).
-art_runtime_base_binaries_prefer32 = [
+art_runtime_base_binaries_both_on_device_first_on_host = [
     "dex2oat",
+]
+// - Base requirements (binaries for which a 32-bit version is preferred on device, but for which
+//   only the "first" (likely 64-bit) version is required on host).
+art_runtime_base_binaries_prefer32_on_device_first_on_host = [
     "dexoptanalyzer",
     "profman",
 ]
 // - Base requirements (libraries).
 //
 // Note: ART on-device chroot-based testing and benchmarking is not yet using
-// the Runtime APEX, meaning that copies of some of these libraries have to be
+// the ART APEX, meaning that copies of some of these libraries have to be
 // installed in `/system` for the ART Buildbot set-up to work properly. This is
 // done by the `standalone-apex-files` Make phony target, used by the ART
 // Buildbot and Golem (see `art/Android.mk`). If you add libraries to this list,
-// you may have to also add them to `PRIVATE_RUNTIME_DEPENDENCY_LIBS` in
+// you may have to also add them to `PRIVATE_ART_APEX_DEPENDENCY_LIBS` in
 // `art/Android.mk`.
 // TODO(b/121117762): Remove this note when both the ART Buildbot and Golem use
-// the Runtime APEX.
+// the ART APEX.
 art_runtime_base_native_shared_libs = [
     // External API (having APEX stubs).
     "libdexfile_external",
@@ -42,6 +48,11 @@
     "libopenjdkjvm",
     "libopenjdkjvmti",
 ]
+
+art_runtime_base_native_device_only_shared_libs = [
+    "libperfetto_hprof",
+]
+
 bionic_native_shared_libs = [
     // External API (having APEX stubs).
     "libc",
@@ -49,25 +60,17 @@
     "libdl",
 ]
 
-bionic_native_shared_libs_device = [
-    // ... and their internal dependencies
-    // These are available only on device
-    "libc_malloc_debug",
-    "libc_malloc_hooks",
-]
-
 bionic_binaries_both = [
     "linker",
 ]
-// - Debug variants (binaries for which a 32-bit version is preferred).
-art_runtime_debug_binaries_prefer32 = [
+
+// - Debug variants (binaries for which a 32-bit version is preferred on device, but for which
+//   only the "first" (likely 64-bit) version is required on host).
+art_runtime_debug_binaries_prefer32_on_device_first_on_host = [
     "dexoptanalyzerd",
     "profmand",
 ]
-art_runtime_debug_binaries_prefer32_device = [
-    "dex2oatd",
-]
-art_runtime_debug_binaries_both_host = [
+art_runtime_debug_binaries_both_on_device_first_on_host = [
     "dex2oatd",
 ]
 
@@ -76,16 +79,21 @@
     "libadbconnectiond",
     "libartd",
     "libartd-compiler",
+    "libdexfiled_external",
     "libopenjdkjvmd",
     "libopenjdkjvmtid",
 ]
 
-// Data files associated with bionic / managed core library APIs.
-art_runtime_data_file_prebuilts = [
-    "apex_tz_version",
-    "apex_tzdata",
-    "apex_tzlookup.xml",
-    "apex_icu.dat",
+art_runtime_base_native_device_only_debug_shared_libs = [
+    "libperfetto_hprofd",
+]
+
+// Libraries needed to execute ART run-tests.
+// TODO(b/124476339): When bug 124476339 is fixed, add these libraries as `runtime_libs`
+// dependencies of `libartd-compiler`, and remove `art_runtime_run_test_libs`.
+art_runtime_run_test_libs = [
+    "libart-disassembler",
+    "libartd-disassembler",
 ]
 
 // Tools common to both device APEX and host APEX. Derived from art-tools in art/Android.mk.
@@ -96,7 +104,15 @@
 
 // Tools common to both device and host debug APEXes.
 art_tools_debug_binaries = [
+    "dexanalyze",
     "dexdiag",
+    "dexlayout",
+    "dexlayoutd",
+]
+
+art_tools_debug_binaries_both = [
+    "imgdiag",
+    "imgdiagd",
 ]
 
 // Tools exclusively for the device APEX derived from art-tools in art/Android.mk.
@@ -120,15 +136,11 @@
     "hprof-conv",
 ]
 
-// Libraries needed to use com.android.runtime.host for zipapex run-tests
-art_runtime_host_run_test_libs = [
-    "libartd-disassembler"
-]
-
 // Core Java libraries.
 libcore_java_libs = [
     "core-oj",
     "core-libart",
+    "core-icu4j",
     "okhttp",
     "bouncycastle",
     "apache-xml",
@@ -137,14 +149,14 @@
 // Native libraries that support the core Java libraries.
 //
 // Note: ART on-device chroot-based testing and benchmarking is not yet using
-// the Runtime APEX, meaning that copies of some of these libraries have to be
+// the ART APEX, meaning that copies of some of these libraries have to be
 // installed in `/system` for the ART Buildbot set-up to work properly. This is
 // done by the `standalone-apex-files` Make phony target, used by the ART
 // Buildbot and Golem (see `art/Android.mk`). If you add libraries to this list,
-// you may have to also add them to `PRIVATE_RUNTIME_DEPENDENCY_LIBS` in
+// you may have to also add them to `PRIVATE_ART_APEX_DEPENDENCY_LIBS` in
 // `art/Android.mk`.
 // TODO(b/121117762): Remove this note when both the ART Buildbot and Golem use
-// the Runtime APEX.
+// the ART APEX.
 libcore_native_shared_libs = [
     // External API (having APEX stubs).
     "libandroidicu",
@@ -154,6 +166,7 @@
     "libexpat",
     "libicui18n",
     "libicuuc",
+    "libicu_jni",
     "libjavacore",
     "libopenjdk",
 ]
@@ -175,130 +188,168 @@
     "libcutils",
 ]
 
+android_app_certificate {
+    name: "com.android.art.certificate",
+    certificate: "com.android.art",
+}
+
 apex_key {
-    name: "com.android.runtime.key",
-    public_key: "com.android.runtime.avbpubkey",
-    private_key: "com.android.runtime.pem",
-}
-
-android_app_certificate {
-    name: "com.android.runtime.debug.certificate",
-    certificate: "com.android.runtime.debug",
-}
-
-android_app_certificate {
-    name: "com.android.runtime.release.certificate",
-    certificate: "com.android.runtime.release",
+    name: "com.android.art.key",
+    public_key: "com.android.art.avbpubkey",
+    private_key: "com.android.art.pem",
 }
 
 prebuilt_etc {
-    name: "com.android.runtime.ld.config.txt",
+    name: "com.android.art.ld.config.txt",
     src: "ld.config.txt",
     filename: "ld.config.txt",
     installable: false,
 }
 
+// Default values shared by device ART APEXes.
 apex_defaults {
-    name: "com.android.runtime-defaults",
+    name: "com.android.art-defaults",
     compile_multilib: "both",
-    manifest: "manifest.json",
+    manifest: "manifest-art.json",
     java_libs: libcore_java_libs,
-    native_shared_libs: art_runtime_base_native_shared_libs
-        + bionic_native_shared_libs
-        + libcore_native_device_only_shared_libs
-        + libcore_native_shared_libs,
+    native_shared_libs: art_runtime_base_native_shared_libs +
+        art_runtime_base_native_device_only_shared_libs +
+        libcore_native_device_only_shared_libs +
+        libcore_native_shared_libs,
     multilib: {
         both: {
-            binaries: art_runtime_base_binaries_both
-                + bionic_binaries_both,
+            binaries: art_runtime_base_binaries_both +
+                art_runtime_base_binaries_both_on_device_first_on_host,
         },
         prefer32: {
-            binaries: art_runtime_base_binaries_prefer32,
+            binaries: art_runtime_base_binaries_prefer32_on_device_first_on_host,
         },
         first: {
-            binaries: art_tools_common_binaries
-                + art_tools_device_only_binaries,
-        }
+            binaries: art_tools_common_binaries +
+                art_tools_device_only_binaries,
+        },
     },
-    binaries: [
-        "art_postinstall_hook",
-        "art_preinstall_hook",
-        "art_preinstall_hook_boot",
-        "art_preinstall_hook_system_server",
-        "art_prepostinstall_utils",
+    prebuilts: ["com.android.art.ld.config.txt"],
+    key: "com.android.art.key",
+    required: [
+        "art_apex_boot_integrity",
+        "com.android.i18n",
     ],
-    prebuilts: art_runtime_data_file_prebuilts
-        + ["com.android.runtime.ld.config.txt"],
-    key: "com.android.runtime.key",
-    required: ["art_apex_boot_integrity"],
 }
 
-// Release version of the Runtime APEX module (not containing debug
+// Default values shared by (device) Debug and Testing ART APEXes.
+apex_defaults {
+    name: "com.android.art-dev-defaults",
+    defaults: ["com.android.art-defaults"],
+    native_shared_libs: art_runtime_base_native_device_only_debug_shared_libs +
+        art_runtime_run_test_libs +
+        art_runtime_debug_native_shared_libs +
+        libcore_debug_native_shared_libs,
+    multilib: {
+        both: {
+            binaries: art_tools_debug_binaries_both +
+                art_runtime_debug_binaries_both_on_device_first_on_host,
+        },
+        prefer32: {
+            binaries: art_runtime_debug_binaries_prefer32_on_device_first_on_host,
+        },
+        first: {
+            binaries: art_tools_debug_binaries +
+                art_tools_debug_device_only_binaries,
+        },
+    },
+}
+
+// Release version of the ART APEX module (not containing debug
 // variants nor tools), included in user builds. Also used for
 // storage-constrained devices in userdebug and eng builds.
-apex {
-    name: "com.android.runtime.release",
-    defaults: ["com.android.runtime-defaults"],
-    native_shared_libs: bionic_native_shared_libs_device,
-    certificate: ":com.android.runtime.release.certificate",
+art_apex {
+    name: "com.android.art.release",
+    defaults: ["com.android.art-defaults"],
+    certificate: ":com.android.art.certificate",
 }
 
-// "Debug" version of the Runtime APEX module (containing both release and
+// "Debug" version of the ART APEX module (containing both release and
 // debug variants, as well as additional tools), included in userdebug and
 // eng build.
-apex {
-    name: "com.android.runtime.debug",
-    defaults: ["com.android.runtime-defaults"],
-    native_shared_libs: art_runtime_debug_native_shared_libs
-        + libcore_debug_native_shared_libs
-        + bionic_native_shared_libs_device,
-    multilib: {
-        prefer32: {
-            binaries: art_runtime_debug_binaries_prefer32
-                + art_runtime_debug_binaries_prefer32_device,
-        },
-        first: {
-            binaries: art_tools_debug_binaries
-                + art_tools_debug_device_only_binaries,
-        }
-    },
-    certificate: ":com.android.runtime.debug.certificate",
+art_apex {
+    name: "com.android.art.debug",
+    defaults: ["com.android.art-dev-defaults"],
+    certificate: ":com.android.art.certificate",
 }
 
-// TODO: Do this better. art_apex will disable host builds when
-// HOST_PREFER_32_BIT is set. We cannot simply use com.android.runtime.debug
+// ART gtests with dependencies on internal ART APEX libraries.
+art_gtests = [
+    "art_cmdline_tests",
+    "art_compiler_tests",
+    "art_dex2oat_tests",
+    "art_dexanalyze_tests",
+    "art_dexdiag_tests",
+    "art_dexdump_tests",
+    "art_dexlayout_tests",
+    "art_dexlist_tests",
+    "art_dexoptanalyzer_tests",
+    "art_imgdiag_tests",
+    "art_libartbase_tests",
+    "art_libartpalette_tests",
+    "art_libdexfile_tests",
+    "art_libdexfile_support_tests",
+    "art_libprofile_tests",
+    "art_oatdump_tests",
+    "art_profman_tests",
+    "art_runtime_compiler_tests",
+    "art_runtime_tests",
+    "art_sigchain_tests",
+]
+
+// "Testing" version of the ART APEX module (containing both release
+// and debug variants, additional tools, and ART gtests), for testing
+// purposes only.
+art_apex_test {
+    name: "com.android.art.testing",
+    defaults: ["com.android.art-dev-defaults"],
+    file_contexts: ":com.android.art.debug-file_contexts",
+    certificate: ":com.android.art.certificate",
+    tests: art_gtests,
+    binaries: ["signal_dumper"], // Need signal_dumper for run-tests.
+}
+
+// TODO: Do this better. art_apex_test_host will disable host builds when
+// HOST_PREFER_32_BIT is set. We cannot simply use com.android.art.debug
 // because binaries have different multilib classes and 'multilib: {}' isn't
 // supported by target: { ... }.
 // See b/120617876 for more information.
-art_apex_test {
-    name: "com.android.runtime.host",
+art_apex_test_host {
+    name: "com.android.art.host",
     compile_multilib: "both",
     payload_type: "zip",
     host_supported: true,
     device_supported: false,
-    manifest: "manifest.json",
+    manifest: "manifest-art.json",
     java_libs: libcore_java_libs,
     ignore_system_library_special_case: true,
-    native_shared_libs: art_runtime_base_native_shared_libs
-        + art_runtime_debug_native_shared_libs
-        + libcore_native_shared_libs
-        + libcore_debug_native_shared_libs
-        + art_runtime_libraries_zipapex
-        + art_runtime_host_run_test_libs,
+    native_shared_libs: art_runtime_base_native_shared_libs +
+        art_runtime_debug_native_shared_libs +
+        libcore_native_shared_libs +
+        libcore_debug_native_shared_libs +
+        art_runtime_libraries_zipapex +
+        art_runtime_run_test_libs,
     multilib: {
         both: {
-            binaries: art_runtime_base_binaries_both
-                + art_runtime_debug_binaries_both_host,
+            binaries: art_runtime_base_binaries_both +
+                art_tools_debug_binaries_both,
         },
         first: {
-            binaries: art_tools_common_binaries
-                + art_tools_debug_binaries  // Host APEX is always debug.
-                + art_tools_host_only_binaries
-                + art_runtime_base_binaries_prefer32
-                + art_runtime_debug_binaries_prefer32,
-        }
+            binaries: art_runtime_base_binaries_prefer32_on_device_first_on_host +
+                art_runtime_base_binaries_both_on_device_first_on_host +
+                art_runtime_debug_binaries_prefer32_on_device_first_on_host +
+                art_runtime_debug_binaries_both_on_device_first_on_host +
+                art_tools_common_binaries +
+                art_tools_debug_binaries + // Host APEX is always debug.
+                art_tools_host_only_binaries,
+        },
     },
-    key: "com.android.runtime.key",
+    key: "com.android.art.key",
     target: {
         darwin: {
             enabled: false,
@@ -309,8 +360,8 @@
                 both: {
                     native_shared_libs: bionic_native_shared_libs,
                     binaries: bionic_binaries_both,
-                }
-            }
+                },
+            },
         },
     },
 }
@@ -321,94 +372,88 @@
     main: "art_apex_test.py",
     version: {
         py2: {
-            enabled: false,
+            enabled: true,
         },
         py3: {
-            enabled: true,
+            enabled: false,
         },
     },
 }
 
 // Genrules so we can run the checker, and empty Java library so that it gets executed.
 
-genrule {
-    name: "art-check-release-apex-gen",
-    srcs: [":com.android.runtime.release"],
+art_check_apex_gen_stem = "$(location art-apex-tester)" +
+    " --debugfs $(location debugfs)" +
+    " --tmpdir $(genDir)"
+
+// The non-flattened APEXes are always checked, as they are always generated
+// (even when APEX flattening is enabled).
+genrule_defaults {
+    name: "art-check-apex-gen-defaults",
     tools: [
         "art-apex-tester",
         "debugfs",
     ],
-    cmd: "$(location art-apex-tester)"
-              + " --debugfs $(location debugfs)"
-              + " --tmpdir $(genDir)"
-              + " $(in)"
-         + " && touch $(out)",
-    out: ["art-check-release-apex-gen.dummy"],
 }
-cc_prebuilt_binary {
-    name: "art-check-release-apex-gen-fakebin",
-    srcs: [":art-check-release-apex-gen"],
+
+cc_defaults {
+    name: "art-check-apex-gen-fakebin-defaults",
     host_supported: true,
     device_supported: false,
     target: {
         darwin: {
-            enabled: false,  // No python3.
+            enabled: false, // No python3.
         },
     },
 }
 
 genrule {
+    name: "art-check-release-apex-gen",
+    defaults: ["art-check-apex-gen-defaults"],
+    srcs: [":com.android.art.release"],
+    cmd: art_check_apex_gen_stem +
+        " --flavor release" +
+        " $(in)" +
+        " && touch $(out)",
+    out: ["art-check-release-apex-gen.dummy"],
+}
+
+cc_prebuilt_binary {
+    name: "art-check-release-apex-gen-fakebin",
+    defaults: ["art-check-apex-gen-fakebin-defaults"],
+    srcs: [":art-check-release-apex-gen"],
+}
+
+genrule {
     name: "art-check-debug-apex-gen",
-    srcs: [":com.android.runtime.debug"],
-    tools: [
-        "art-apex-tester",
-        "debugfs",
-    ],
-    cmd: "$(location art-apex-tester)"
-              + " --debugfs $(location debugfs)"
-              + " --tmpdir $(genDir)"
-              + " --debug"
-              + " $(in)"
-         + " && touch $(out)",
+    defaults: ["art-check-apex-gen-defaults"],
+    srcs: [":com.android.art.debug"],
+    cmd: art_check_apex_gen_stem +
+        " --flavor debug" +
+        " $(in)" +
+        " && touch $(out)",
     out: ["art-check-debug-apex-gen.dummy"],
 }
+
 cc_prebuilt_binary {
     name: "art-check-debug-apex-gen-fakebin",
+    defaults: ["art-check-apex-gen-fakebin-defaults"],
     srcs: [":art-check-debug-apex-gen"],
-    host_supported: true,
-    device_supported: false,
-    target: {
-        darwin: {
-            enabled: false,  // No python3.
-        },
-    },
 }
 
-// Pre-install scripts.
-
-sh_binary {
-    name: "art_preinstall_hook",
-    src: "art_preinstall_hook.sh",
-}
-sh_binary {
-    name: "art_preinstall_hook_boot",
-    src: "art_preinstall_hook_boot.sh",
-}
-sh_binary {
-    name: "art_preinstall_hook_system_server",
-    src: "art_preinstall_hook_system_server.sh",
-}
-sh_binary {
-    name: "art_prepostinstall_utils",
-    src: "art_prepostinstall_utils.sh",
-}
-sh_binary {
-    name: "art_postinstall_hook",
-    src: "art_postinstall_hook.sh",
+genrule {
+    name: "art-check-testing-apex-gen",
+    defaults: ["art-check-apex-gen-defaults"],
+    srcs: [":com.android.art.testing"],
+    cmd: art_check_apex_gen_stem +
+        " --flavor testing" +
+        " $(in)" +
+        " && touch $(out)",
+    out: ["art-check-testing-apex-gen.dummy"],
 }
 
-sh_binary {
-    name: "art_apex_boot_integrity",
-    src: "art_apex_boot_integrity.sh",
-    init_rc: ["art_apex_boot_integrity.rc"],
+cc_prebuilt_binary {
+    name: "art-check-testing-apex-gen-fakebin",
+    defaults: ["art-check-apex-gen-fakebin-defaults"],
+    srcs: [":art-check-testing-apex-gen"],
 }
diff --git a/build/apex/art_apex_test.py b/build/apex/art_apex_test.py
index 1415571..6bccdf5 100755
--- a/build/apex/art_apex_test.py
+++ b/build/apex/art_apex_test.py
@@ -1,4 +1,5 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
 
 # Copyright (C) 2019 The Android Open Source Project
 #
@@ -26,16 +27,47 @@
 
 logging.basicConfig(format='%(message)s')
 
+# Flavors of ART APEX package.
+FLAVOR_RELEASE = 'release'
+FLAVOR_DEBUG = 'debug'
+FLAVOR_TESTING = 'testing'
+FLAVOR_AUTO = 'auto'
+FLAVORS_ALL = [FLAVOR_RELEASE, FLAVOR_DEBUG, FLAVOR_TESTING, FLAVOR_AUTO]
+
+# Bitness options for APEX package
+BITNESS_32 = '32'
+BITNESS_64 = '64'
+BITNESS_MULTILIB = 'multilib'
+BITNESS_AUTO = 'auto'
+BITNESS_ALL = [BITNESS_32, BITNESS_64, BITNESS_MULTILIB, BITNESS_AUTO]
+
+# Architectures supported by APEX packages.
+ARCHS = ["arm", "arm64", "x86", "x86_64"]
+
+# Directory containing ART tests within an ART APEX (if the package includes
+# any). ART test executables are installed in `bin/art/<arch>`. Segregating
+# tests by architecture is useful on devices supporting more than one
+# architecture, as it permits testing all of them using a single ART APEX
+# package.
+ART_TEST_DIR = 'bin/art'
+
+
+# Test if a given variable is set to a string "true".
+def isEnvTrue(var):
+  return var in os.environ and os.environ[var] == 'true'
+
 
 class FSObject:
-  def __init__(self, name, is_dir, is_exec, is_symlink):
+  def __init__(self, name, is_dir, is_exec, is_symlink, size):
     self.name = name
     self.is_dir = is_dir
     self.is_exec = is_exec
     self.is_symlink = is_symlink
+    self.size = size
 
   def __str__(self):
-    return '%s(dir=%r,exec=%r,symlink=%r)' % (self.name, self.is_dir, self.is_exec, self.is_symlink)
+    return '%s(dir=%r,exec=%r,symlink=%r,size=%d)' \
+             % (self.name, self.is_dir, self.is_exec, self.is_symlink, self.size)
 
 
 class TargetApexProvider:
@@ -97,6 +129,9 @@
         continue
       bits = comps[2]
       name = comps[5]
+      size_str = comps[6]
+      # Use a negative value as an indicator of undefined/unknown size.
+      size = int(size_str) if size_str != '' else -1
       if len(bits) != 6:
         logging.warning('Dont understand bits \'%s\'', bits)
         continue
@@ -107,7 +142,40 @@
 
       is_exec = is_exec_bit(bits[3]) and is_exec_bit(bits[4]) and is_exec_bit(bits[5])
       is_symlink = bits[1] == '2'
-      apex_map[name] = FSObject(name, is_dir, is_exec, is_symlink)
+      apex_map[name] = FSObject(name, is_dir, is_exec, is_symlink, size)
+    self._folder_cache[apex_dir] = apex_map
+    return apex_map
+
+
+class TargetFlattenedApexProvider:
+  def __init__(self, apex):
+    self._folder_cache = {}
+    self._apex = apex
+
+  def get(self, path):
+    apex_dir, name = os.path.split(path)
+    if not apex_dir:
+      apex_dir = '.'
+    apex_map = self.read_dir(apex_dir)
+    return apex_map[name] if name in apex_map else None
+
+  def read_dir(self, apex_dir):
+    if apex_dir in self._folder_cache:
+      return self._folder_cache[apex_dir]
+    apex_map = {}
+    dirname = os.path.join(self._apex, apex_dir)
+    if os.path.exists(dirname):
+      for basename in os.listdir(dirname):
+        filepath = os.path.join(dirname, basename)
+        is_dir = os.path.isdir(filepath)
+        is_exec = os.access(filepath, os.X_OK)
+        is_symlink = os.path.islink(filepath)
+        if is_symlink:
+          # Report the length of the symlink's target's path as file size, like `ls`.
+          size = len(os.readlink(filepath))
+        else:
+          size = os.path.getsize(filepath)
+        apex_map[basename] = FSObject(basename, is_dir, is_exec, is_symlink, size)
     self._folder_cache[apex_dir] = apex_map
     return apex_map
 
@@ -115,7 +183,7 @@
 class HostApexProvider:
   def __init__(self, apex, tmpdir):
     self._tmpdir = tmpdir
-    self.folder_cache = {}
+    self._folder_cache = {}
     self._payload = os.path.join(self._tmpdir, 'apex_payload.zip')
     # Extract payload to tmpdir.
     apex_zip = zipfile.ZipFile(apex)
@@ -134,12 +202,12 @@
     return apex_map[name] if name in apex_map else None
 
   def read_dir(self, apex_dir):
-    if apex_dir in self.folder_cache:
-      return self.folder_cache[apex_dir]
-    if not self.folder_cache:
+    if apex_dir in self._folder_cache:
+      return self._folder_cache[apex_dir]
+    if not self._folder_cache:
       self.parse_zip()
-    if apex_dir in self.folder_cache:
-      return self.folder_cache[apex_dir]
+    if apex_dir in self._folder_cache:
+      return self._folder_cache[apex_dir]
     return {}
 
   def parse_zip(self):
@@ -163,20 +231,23 @@
         apex_dir, base = os.path.split(path)
         # TODO: If directories are stored, base will be empty.
 
-        if apex_dir not in self.folder_cache:
-          self.folder_cache[apex_dir] = {}
-        dir_map = self.folder_cache[apex_dir]
+        if apex_dir not in self._folder_cache:
+          self._folder_cache[apex_dir] = {}
+        dir_map = self._folder_cache[apex_dir]
         if base not in dir_map:
           if is_zipinfo:
             bits = (zipinfo.external_attr >> 16) & 0xFFFF
             is_dir = get_octal(bits, 4) == 4
             is_symlink = get_octal(bits, 4) == 2
             is_exec = bits_is_exec(bits)
+            size = zipinfo.file_size
           else:
             is_exec = False  # Seems we can't get this easily?
             is_symlink = False
             is_dir = True
-          dir_map[base] = FSObject(base, is_dir, is_exec, is_symlink)
+            # Use a negative value as an indicator of undefined/unknown size.
+            size = -1
+          dir_map[base] = FSObject(base, is_dir, is_exec, is_symlink, size)
         is_zipinfo = False
         path = apex_dir
 
@@ -206,6 +277,14 @@
       return False, '%s is a directory'
     return True, ''
 
+  def is_dir(self, path):
+    fs_object = self._provider.get(path)
+    if fs_object is None:
+      return False, 'Could not find %s'
+    if not fs_object.is_dir:
+      return False, '%s is not a directory'
+    return True, ''
+
   def check_file(self, path):
     ok, msg = self.is_file(path)
     if not ok:
@@ -233,6 +312,31 @@
       self.fail('%s is not a symlink', path)
     self._expected_file_globs.add(path)
 
+  def arch_dirs_for_path(self, path):
+    # Look for target-specific subdirectories for the given directory path.
+    # This is needed because the list of build targets is not propagated
+    # to this script.
+    #
+    # TODO(b/123602136): Pass build target information to this script and fix
+    # all places where this function in used (or similar workarounds).
+    dirs = []
+    for arch in ARCHS:
+      dir = '%s/%s' % (path, arch)
+      found, _ = self.is_dir(dir)
+      if found:
+        dirs.append(dir)
+    return dirs
+
+  def check_art_test_executable(self, filename):
+    dirs = self.arch_dirs_for_path(ART_TEST_DIR)
+    if not dirs:
+      self.fail('ART test binary missing: %s', filename)
+    for dir in dirs:
+      test_path = '%s/%s' % (dir, filename)
+      self._expected_file_globs.add(test_path)
+      if not self._provider.get(test_path).is_exec:
+        self.fail('%s is not executable', test_path)
+
   def check_single_library(self, filename):
     lib_path = 'lib/%s' % filename
     lib64_path = 'lib64/%s' % filename
@@ -245,12 +349,22 @@
     if not lib_is_file and not lib64_is_file:
       self.fail('Library missing: %s', filename)
 
+  def check_dexpreopt(self, basename):
+    dirs = self.arch_dirs_for_path('javalib')
+    for dir in dirs:
+      for ext in ['art', 'oat', 'vdex']:
+        self.check_file('%s/%s.%s' % (dir, basename, ext))
+
   def check_java_library(self, basename):
     return self.check_file('javalib/%s.jar' % basename)
 
   def ignore_path(self, path_glob):
     self._expected_file_globs.add(path_glob)
 
+  def check_optional_art_test_executable(self, filename):
+    for arch in ARCHS:
+      self.ignore_path('%s/%s/%s' % (ART_TEST_DIR, arch, filename))
+
   def check_no_superfluous_files(self, dir_path):
     paths = []
     for name in sorted(self._provider.read_dir(dir_path).keys()):
@@ -275,10 +389,18 @@
     """Check bin/filename32, and/or bin/filename64, with symlink bin/filename."""
     raise NotImplementedError
 
+  def check_symlinked_first_executable(self, filename):
+    """Check bin/filename32, and/or bin/filename64, with symlink bin/filename."""
+    raise NotImplementedError
+
   def check_multilib_executable(self, filename):
     """Check bin/filename for 32 bit, and/or bin/filename64."""
     raise NotImplementedError
 
+  def check_first_executable(self, filename):
+    """Check bin/filename for 32 bit, and/or bin/filename64."""
+    raise NotImplementedError
+
   def check_native_library(self, basename):
     """Check lib/basename.so, and/or lib64/basename.so."""
     raise NotImplementedError
@@ -297,8 +419,15 @@
     self.check_executable('%s32' % filename)
     self.check_executable_symlink(filename)
 
+  def check_symlinked_first_executable(self, filename):
+    self.check_executable('%s32' % filename)
+    self.check_executable_symlink(filename)
+
   def check_multilib_executable(self, filename):
-    self.check_executable(filename)
+    self.check_executable('%s32' % filename)
+
+  def check_first_executable(self, filename):
+    self.check_executable('%s32' % filename)
 
   def check_native_library(self, basename):
     # TODO: Use $TARGET_ARCH (e.g. check whether it is "arm" or "arm64") to improve
@@ -317,9 +446,16 @@
     self.check_executable('%s64' % filename)
     self.check_executable_symlink(filename)
 
+  def check_symlinked_first_executable(self, filename):
+    self.check_executable('%s64' % filename)
+    self.check_executable_symlink(filename)
+
   def check_multilib_executable(self, filename):
     self.check_executable('%s64' % filename)
 
+  def check_first_executable(self, filename):
+    self.check_executable('%s64' % filename)
+
   def check_native_library(self, basename):
     # TODO: Use $TARGET_ARCH (e.g. check whether it is "arm" or "arm64") to improve
     # the precision of this test?
@@ -338,9 +474,16 @@
     self.check_executable('%s64' % filename)
     self.check_executable_symlink(filename)
 
+  def check_symlinked_first_executable(self, filename):
+    self.check_executable('%s64' % filename)
+    self.check_executable_symlink(filename)
+
   def check_multilib_executable(self, filename):
     self.check_executable('%s64' % filename)
-    self.check_executable(filename)
+    self.check_executable('%s32' % filename)
+
+  def check_first_executable(self, filename):
+    self.check_executable('%s64' % filename)
 
   def check_native_library(self, basename):
     # TODO: Use $TARGET_ARCH (e.g. check whether it is "arm" or "arm64") to improve
@@ -364,11 +507,11 @@
     return 'Release Checker'
 
   def run(self):
-    # Check the APEX manifest.
-    self._checker.check_file('apex_manifest.json')
+    # Check the Protocol Buffers APEX manifest.
+    self._checker.check_file('apex_manifest.pb')
 
     # Check binaries for ART.
-    self._checker.check_executable('dex2oat')
+    self._checker.check_first_executable('dex2oat')
     self._checker.check_executable('dexdump')
     self._checker.check_executable('dexlist')
     self._checker.check_executable('dexoptanalyzer')
@@ -386,6 +529,7 @@
     self._checker.check_native_library('libart')
     self._checker.check_native_library('libart-compiler')
     self._checker.check_native_library('libart-dexlayout')
+    self._checker.check_native_library('libart-disassembler')
     self._checker.check_native_library('libartbase')
     self._checker.check_native_library('libartpalette')
     self._checker.check_native_library('libdexfile')
@@ -398,9 +542,13 @@
     # Check java libraries for Managed Core Library.
     self._checker.check_java_library('apache-xml')
     self._checker.check_java_library('bouncycastle')
+    self._checker.check_java_library('core-icu4j')
     self._checker.check_java_library('core-libart')
     self._checker.check_java_library('core-oj')
     self._checker.check_java_library('okhttp')
+    if isEnvTrue('EMMA_INSTRUMENT_FRAMEWORK'):
+      # In coverage builds jacoco is added to the list of ART apex jars.
+      self._checker.check_java_library('jacocoagent')
 
     # Check internal native libraries for Managed Core Library.
     self._checker.check_native_library('libjavacore')
@@ -434,6 +582,16 @@
     self._checker.check_optional_native_library('libclang_rt.hwasan*')
     self._checker.check_optional_native_library('libclang_rt.ubsan*')
 
+    # Check dexpreopt files for libcore bootclasspath jars.
+    self._checker.check_dexpreopt('boot')
+    self._checker.check_dexpreopt('boot-apache-xml')
+    self._checker.check_dexpreopt('boot-bouncycastle')
+    self._checker.check_dexpreopt('boot-core-icu4j')
+    self._checker.check_dexpreopt('boot-core-libart')
+    self._checker.check_dexpreopt('boot-okhttp')
+    if isEnvTrue('EMMA_INSTRUMENT_FRAMEWORK'):
+      # In coverage builds the ART boot image includes jacoco.
+      self._checker.check_dexpreopt('boot-jacocoagent')
 
 class ReleaseTargetChecker:
   def __init__(self, checker):
@@ -443,30 +601,18 @@
     return 'Release (Target) Checker'
 
   def run(self):
-    # Check the APEX package scripts.
-    self._checker.check_executable('art_postinstall_hook')
-    self._checker.check_executable('art_preinstall_hook')
-    self._checker.check_executable('art_preinstall_hook_boot')
-    self._checker.check_executable('art_preinstall_hook_system_server')
-    self._checker.check_executable('art_prepostinstall_utils')
+    # We don't check for the presence of the JSON APEX manifest (file
+    # `apex_manifest.json`, only present in target APEXes), as it is only
+    # included for compatibility reasons with Android Q and will likely be
+    # removed in Android R.
 
     # Check binaries for ART.
     self._checker.check_executable('oatdump')
+    self._checker.check_multilib_executable('dex2oat')
 
     # Check internal libraries for ART.
     self._checker.check_prefer64_library('libart-disassembler')
-
-    # Check binaries for Bionic.
-    self._checker.check_multilib_executable('linker')
-    self._checker.check_multilib_executable('linker_asan')
-
-    # Check libraries for Bionic.
-    self._checker.check_native_library('bionic/libc')
-    self._checker.check_native_library('bionic/libdl')
-    self._checker.check_native_library('bionic/libm')
-    # ... and its internal dependencies
-    self._checker.check_native_library('libc_malloc_hooks')
-    self._checker.check_native_library('libc_malloc_debug')
+    self._checker.check_native_library('libperfetto_hprof')
 
     # Check exported native libraries for Managed Core Library.
     self._checker.check_native_library('libandroidicu')
@@ -477,13 +623,15 @@
     self._checker.check_native_library('libexpat')
     self._checker.check_native_library('libicui18n')
     self._checker.check_native_library('libicuuc')
+    self._checker.check_native_library('libicu_jni')
     self._checker.check_native_library('libpac')
     self._checker.check_native_library('libz')
 
-    # TODO(b/124293228): Cuttlefish puts ARM libs in a lib/arm subdirectory.
-    # Check that properly on that arch, but for now just ignore the directory.
+    # TODO(b/139046641): Fix proper 2nd arch checks. For now, just ignore these
+    # directories.
+    self._checker.ignore_path('bin/arm')
     self._checker.ignore_path('lib/arm')
-    self._checker.ignore_path('lib/arm64')
+    self._checker.ignore_path('lib64/arm')
 
 
 class ReleaseHostChecker:
@@ -496,7 +644,8 @@
   def run(self):
     # Check binaries for ART.
     self._checker.check_executable('hprof-conv')
-    self._checker.check_symlinked_multilib_executable('dex2oatd')
+    self._checker.check_symlinked_first_executable('dex2oatd')
+    self._checker.check_symlinked_first_executable('dex2oat')
 
     # Check exported native libraries for Managed Core Library.
     self._checker.check_native_library('libandroidicu-host')
@@ -506,6 +655,7 @@
     self._checker.check_native_library('libexpat-host')
     self._checker.check_native_library('libicui18n-host')
     self._checker.check_native_library('libicuuc-host')
+    self._checker.check_native_library('libicu_jni')
     self._checker.check_native_library('libz-host')
 
 
@@ -519,17 +669,24 @@
   def run(self):
     # Check binaries for ART.
     self._checker.check_executable('dexdiag')
+    self._checker.check_executable('dexanalyze')
+    self._checker.check_executable('dexlayout')
+    self._checker.check_symlinked_multilib_executable('imgdiag')
 
     # Check debug binaries for ART.
+    self._checker.check_executable('dexlayoutd')
     self._checker.check_executable('dexoptanalyzerd')
+    self._checker.check_symlinked_multilib_executable('imgdiagd')
     self._checker.check_executable('profmand')
 
     # Check internal libraries for ART.
     self._checker.check_native_library('libadbconnectiond')
+    self._checker.check_native_library('libart-disassembler')
     self._checker.check_native_library('libartbased')
     self._checker.check_native_library('libartd')
     self._checker.check_native_library('libartd-compiler')
     self._checker.check_native_library('libartd-dexlayout')
+    self._checker.check_native_library('libartd-disassembler')
     self._checker.check_native_library('libdexfiled')
     self._checker.check_native_library('libopenjdkjvmd')
     self._checker.check_native_library('libopenjdkjvmtid')
@@ -548,11 +705,13 @@
 
   def run(self):
     # Check ART debug binaries.
-    self._checker.check_executable('dex2oatd')
+    self._checker.check_multilib_executable('dex2oatd')
+    self._checker.check_multilib_executable('dex2oat')
     self._checker.check_executable('oatdumpd')
 
     # Check ART internal libraries.
-    self._checker.check_prefer64_library('libartd-disassembler')
+    self._checker.check_native_library('libdexfiled_external')
+    self._checker.check_native_library('libperfetto_hprofd')
 
     # Check internal native library dependencies.
     #
@@ -571,6 +730,263 @@
     self._checker.check_prefer64_library('libprocinfo')
 
 
+class TestingTargetChecker:
+  def __init__(self, checker):
+    self._checker = checker
+
+  def __str__(self):
+    return 'Testing (Target) Checker'
+
+  def run(self):
+    # Check cmdline tests.
+    self._checker.check_optional_art_test_executable('cmdline_parser_test')
+
+    # Check compiler tests.
+    self._checker.check_art_test_executable('atomic_dex_ref_map_test')
+    self._checker.check_art_test_executable('bounds_check_elimination_test')
+    self._checker.check_art_test_executable('codegen_test')
+    self._checker.check_art_test_executable('compiled_method_storage_test')
+    self._checker.check_art_test_executable('data_type_test')
+    self._checker.check_art_test_executable('dedupe_set_test')
+    self._checker.check_art_test_executable('dominator_test')
+    self._checker.check_art_test_executable('dwarf_test')
+    self._checker.check_art_test_executable('exception_test')
+    self._checker.check_art_test_executable('find_loops_test')
+    self._checker.check_art_test_executable('graph_checker_test')
+    self._checker.check_art_test_executable('graph_test')
+    self._checker.check_art_test_executable('gvn_test')
+    self._checker.check_art_test_executable('induction_var_analysis_test')
+    self._checker.check_art_test_executable('induction_var_range_test')
+    self._checker.check_art_test_executable('jni_cfi_test')
+    self._checker.check_art_test_executable('jni_compiler_test')
+    self._checker.check_art_test_executable('licm_test')
+    self._checker.check_art_test_executable('linker_patch_test')
+    self._checker.check_art_test_executable('live_interval_test')
+    self._checker.check_art_test_executable('load_store_analysis_test')
+    self._checker.check_art_test_executable('load_store_elimination_test')
+    self._checker.check_art_test_executable('loop_optimization_test')
+    self._checker.check_art_test_executable('nodes_test')
+    self._checker.check_art_test_executable('nodes_vector_test')
+    self._checker.check_art_test_executable('optimizing_cfi_test')
+    self._checker.check_art_test_executable('output_stream_test')
+    self._checker.check_art_test_executable('parallel_move_test')
+    self._checker.check_art_test_executable('pretty_printer_test')
+    self._checker.check_art_test_executable('reference_type_propagation_test')
+    self._checker.check_art_test_executable('scheduler_test')
+    self._checker.check_art_test_executable('select_generator_test')
+    self._checker.check_art_test_executable('side_effects_test')
+    self._checker.check_art_test_executable('src_map_elem_test')
+    self._checker.check_art_test_executable('ssa_liveness_analysis_test')
+    self._checker.check_art_test_executable('ssa_test')
+    self._checker.check_art_test_executable('stack_map_test')
+    self._checker.check_art_test_executable('superblock_cloner_test')
+    self._checker.check_art_test_executable('suspend_check_test')
+    self._checker.check_art_test_executable('swap_space_test')
+    # These tests depend on a specific code generator and are conditionally included.
+    self._checker.check_optional_art_test_executable('constant_folding_test')
+    self._checker.check_optional_art_test_executable('dead_code_elimination_test')
+    self._checker.check_optional_art_test_executable('linearize_test')
+    self._checker.check_optional_art_test_executable('live_ranges_test')
+    self._checker.check_optional_art_test_executable('liveness_test')
+    self._checker.check_optional_art_test_executable('managed_register_arm64_test')
+    self._checker.check_optional_art_test_executable('managed_register_arm_test')
+    self._checker.check_optional_art_test_executable('managed_register_x86_64_test')
+    self._checker.check_optional_art_test_executable('managed_register_x86_test')
+    self._checker.check_optional_art_test_executable('register_allocator_test')
+
+    # Check dex2oat tests.
+    self._checker.check_art_test_executable('compiler_driver_test')
+    self._checker.check_art_test_executable('dex2oat_image_test')
+    self._checker.check_art_test_executable('dex2oat_test')
+    self._checker.check_art_test_executable('dex_to_dex_decompiler_test')
+    self._checker.check_art_test_executable('elf_writer_test')
+    self._checker.check_art_test_executable('image_test')
+    self._checker.check_art_test_executable('image_write_read_test')
+    self._checker.check_art_test_executable('index_bss_mapping_encoder_test')
+    self._checker.check_art_test_executable('multi_oat_relative_patcher_test')
+    self._checker.check_art_test_executable('oat_writer_test')
+    self._checker.check_art_test_executable('verifier_deps_test')
+    # These tests depend on a specific code generator and are conditionally included.
+    self._checker.check_optional_art_test_executable('relative_patcher_arm64_test')
+    self._checker.check_optional_art_test_executable('relative_patcher_thumb2_test')
+    self._checker.check_optional_art_test_executable('relative_patcher_x86_64_test')
+    self._checker.check_optional_art_test_executable('relative_patcher_x86_test')
+
+    # Check dexanalyze tests.
+    self._checker.check_optional_art_test_executable('dexanalyze_test')
+
+    # Check dexdiag tests.
+    self._checker.check_optional_art_test_executable('dexdiag_test')
+
+    # Check dexdump tests.
+    self._checker.check_art_test_executable('dexdump_test')
+
+    # Check dexlayout tests.
+    self._checker.check_optional_art_test_executable('dexlayout_test')
+
+    # Check dexlist tests.
+    self._checker.check_art_test_executable('dexlist_test')
+
+    # Check dexoptanalyzer tests.
+    self._checker.check_art_test_executable('dexoptanalyzer_test')
+
+    # Check imgdiag tests.
+    self._checker.check_art_test_executable('imgdiag_test')
+
+    # Check libartbase tests.
+    self._checker.check_art_test_executable('arena_allocator_test')
+    self._checker.check_art_test_executable('bit_field_test')
+    self._checker.check_art_test_executable('bit_memory_region_test')
+    self._checker.check_art_test_executable('bit_string_test')
+    self._checker.check_art_test_executable('bit_struct_test')
+    self._checker.check_art_test_executable('bit_table_test')
+    self._checker.check_art_test_executable('bit_utils_test')
+    self._checker.check_art_test_executable('bit_vector_test')
+    self._checker.check_art_test_executable('fd_file_test')
+    self._checker.check_art_test_executable('file_utils_test')
+    self._checker.check_art_test_executable('hash_set_test')
+    self._checker.check_art_test_executable('hex_dump_test')
+    self._checker.check_art_test_executable('histogram_test')
+    self._checker.check_art_test_executable('indenter_test')
+    self._checker.check_art_test_executable('instruction_set_test')
+    self._checker.check_art_test_executable('intrusive_forward_list_test')
+    self._checker.check_art_test_executable('leb128_test')
+    self._checker.check_art_test_executable('logging_test')
+    self._checker.check_art_test_executable('mem_map_test')
+    self._checker.check_art_test_executable('membarrier_test')
+    self._checker.check_art_test_executable('memfd_test')
+    self._checker.check_art_test_executable('memory_region_test')
+    self._checker.check_art_test_executable('safe_copy_test')
+    self._checker.check_art_test_executable('scoped_flock_test')
+    self._checker.check_art_test_executable('time_utils_test')
+    self._checker.check_art_test_executable('transform_array_ref_test')
+    self._checker.check_art_test_executable('transform_iterator_test')
+    self._checker.check_art_test_executable('utils_test')
+    self._checker.check_art_test_executable('variant_map_test')
+    self._checker.check_art_test_executable('zip_archive_test')
+
+    # Check libartpalette tests.
+    self._checker.check_art_test_executable('palette_test')
+
+    # Check libdexfile tests.
+    self._checker.check_art_test_executable('art_dex_file_loader_test')
+    self._checker.check_art_test_executable('art_libdexfile_support_tests')
+    self._checker.check_art_test_executable('class_accessor_test')
+    self._checker.check_art_test_executable('code_item_accessors_test')
+    self._checker.check_art_test_executable('compact_dex_file_test')
+    self._checker.check_art_test_executable('compact_offset_table_test')
+    self._checker.check_art_test_executable('descriptors_names_test')
+    self._checker.check_art_test_executable('dex_file_loader_test')
+    self._checker.check_art_test_executable('dex_file_verifier_test')
+    self._checker.check_art_test_executable('dex_instruction_test')
+    self._checker.check_art_test_executable('primitive_test')
+    self._checker.check_art_test_executable('string_reference_test')
+    self._checker.check_art_test_executable('test_dex_file_builder_test')
+    self._checker.check_art_test_executable('type_lookup_table_test')
+    self._checker.check_art_test_executable('utf_test')
+
+    # Check libprofile tests.
+    self._checker.check_optional_art_test_executable('profile_boot_info_test')
+    self._checker.check_optional_art_test_executable('profile_compilation_info_test')
+
+    # Check oatdump tests.
+    self._checker.check_art_test_executable('oatdump_app_test')
+    self._checker.check_art_test_executable('oatdump_image_test')
+    self._checker.check_art_test_executable('oatdump_test')
+
+    # Check profman tests.
+    self._checker.check_art_test_executable('profile_assistant_test')
+
+    # Check runtime compiler tests.
+    self._checker.check_art_test_executable('module_exclusion_test')
+    self._checker.check_art_test_executable('reflection_test')
+
+    # Check runtime tests.
+    self._checker.check_art_test_executable('arch_test')
+    self._checker.check_art_test_executable('barrier_test')
+    self._checker.check_art_test_executable('card_table_test')
+    self._checker.check_art_test_executable('cha_test')
+    self._checker.check_art_test_executable('class_linker_test')
+    self._checker.check_art_test_executable('class_loader_context_test')
+    self._checker.check_art_test_executable('class_table_test')
+    self._checker.check_art_test_executable('compiler_filter_test')
+    self._checker.check_art_test_executable('dex_cache_test')
+    self._checker.check_art_test_executable('dlmalloc_space_random_test')
+    self._checker.check_art_test_executable('dlmalloc_space_static_test')
+    self._checker.check_art_test_executable('entrypoints_order_test')
+    self._checker.check_art_test_executable('exec_utils_test')
+    self._checker.check_art_test_executable('gtest_test')
+    self._checker.check_art_test_executable('handle_scope_test')
+    self._checker.check_art_test_executable('heap_test')
+    self._checker.check_art_test_executable('heap_verification_test')
+    self._checker.check_art_test_executable('hidden_api_test')
+    self._checker.check_art_test_executable('image_space_test')
+    self._checker.check_art_test_executable('immune_spaces_test')
+    self._checker.check_art_test_executable('imtable_test')
+    self._checker.check_art_test_executable('indirect_reference_table_test')
+    self._checker.check_art_test_executable('instruction_set_features_arm64_test')
+    self._checker.check_art_test_executable('instruction_set_features_arm_test')
+    self._checker.check_art_test_executable('instruction_set_features_test')
+    self._checker.check_art_test_executable('instruction_set_features_x86_64_test')
+    self._checker.check_art_test_executable('instruction_set_features_x86_test')
+    self._checker.check_art_test_executable('instrumentation_test')
+    self._checker.check_art_test_executable('intern_table_test')
+    self._checker.check_art_test_executable('java_vm_ext_test')
+    self._checker.check_art_test_executable('jit_memory_region_test')
+    self._checker.check_art_test_executable('jni_internal_test')
+    self._checker.check_art_test_executable('large_object_space_test')
+    self._checker.check_art_test_executable('math_entrypoints_test')
+    self._checker.check_art_test_executable('memcmp16_test')
+    self._checker.check_art_test_executable('method_handles_test')
+    self._checker.check_art_test_executable('method_type_test')
+    self._checker.check_art_test_executable('method_verifier_test')
+    self._checker.check_art_test_executable('mod_union_table_test')
+    self._checker.check_art_test_executable('monitor_pool_test')
+    self._checker.check_art_test_executable('monitor_test')
+    self._checker.check_art_test_executable('mutex_test')
+    self._checker.check_art_test_executable('oat_file_assistant_test')
+    self._checker.check_art_test_executable('oat_file_test')
+    self._checker.check_art_test_executable('object_test')
+    self._checker.check_art_test_executable('parsed_options_test')
+    self._checker.check_art_test_executable('prebuilt_tools_test')
+    self._checker.check_art_test_executable('profiling_info_test')
+    self._checker.check_art_test_executable('profile_saver_test')
+    self._checker.check_art_test_executable('proxy_test')
+    self._checker.check_art_test_executable('quick_trampoline_entrypoints_test')
+    self._checker.check_art_test_executable('reference_queue_test')
+    self._checker.check_art_test_executable('reference_table_test')
+    self._checker.check_art_test_executable('reg_type_test')
+    self._checker.check_art_test_executable('rosalloc_space_random_test')
+    self._checker.check_art_test_executable('rosalloc_space_static_test')
+    self._checker.check_art_test_executable('runtime_callbacks_test')
+    self._checker.check_art_test_executable('runtime_test')
+    self._checker.check_art_test_executable('safe_math_test')
+    self._checker.check_art_test_executable('space_bitmap_test')
+    self._checker.check_art_test_executable('space_create_test')
+    self._checker.check_art_test_executable('stub_test')
+    self._checker.check_art_test_executable('subtype_check_info_test')
+    self._checker.check_art_test_executable('subtype_check_test')
+    self._checker.check_art_test_executable('system_weak_test')
+    self._checker.check_art_test_executable('task_processor_test')
+    self._checker.check_art_test_executable('thread_pool_test')
+    self._checker.check_art_test_executable('timing_logger_test')
+    self._checker.check_art_test_executable('transaction_test')
+    self._checker.check_art_test_executable('two_runtimes_test')
+    self._checker.check_art_test_executable('unstarted_runtime_test')
+    self._checker.check_art_test_executable('var_handle_test')
+    self._checker.check_art_test_executable('vdex_file_test')
+
+    # Check sigchainlib tests.
+    self._checker.check_art_test_executable('sigchain_test')
+
+    # Check ART test (internal) libraries.
+    self._checker.check_native_library('libart-gtest')
+    self._checker.check_native_library('libartd-simulator-container')
+
+    # Check ART test tools.
+    self._checker.check_executable('signal_dumper')
+
+
 class NoSuperfluousBinariesChecker:
   def __init__(self, checker):
     self._checker = checker
@@ -592,38 +1008,58 @@
   def run(self):
     self._checker.check_no_superfluous_files('javalib')
     self._checker.check_no_superfluous_files('lib')
-    self._checker.check_no_superfluous_files('lib/bionic')
     self._checker.check_no_superfluous_files('lib64')
-    self._checker.check_no_superfluous_files('lib64/bionic')
+
+
+class NoSuperfluousArtTestsChecker:
+  def __init__(self, checker):
+    self._checker = checker
+
+  def __str__(self):
+    return 'No superfluous ART tests checker'
+
+  def run(self):
+    for arch in ARCHS:
+      self._checker.check_no_superfluous_files('%s/%s' % (ART_TEST_DIR, arch))
 
 
 class List:
-  def __init__(self, provider):
+  def __init__(self, provider, print_size=False):
     self._provider = provider
-    self._path = ''
+    self._print_size = print_size
 
   def print_list(self):
-    apex_map = self._provider.read_dir(self._path)
-    if apex_map is None:
-      return
-    apex_map = dict(apex_map)
-    if '.' in apex_map:
-      del apex_map['.']
-    if '..' in apex_map:
-      del apex_map['..']
-    for (_, val) in sorted(apex_map.items()):
-      self._path = os.path.join(self._path, val.name)
-      print(self._path)
-      if val.is_dir:
-        self.print_list()
+
+    def print_list_rec(path):
+      apex_map = self._provider.read_dir(path)
+      if apex_map is None:
+        return
+      apex_map = dict(apex_map)
+      if '.' in apex_map:
+        del apex_map['.']
+      if '..' in apex_map:
+        del apex_map['..']
+      for (_, val) in sorted(apex_map.items()):
+        val_path = os.path.join(path, val.name)
+        if self._print_size:
+          if val.size < 0:
+            print('[    n/a    ]  %s' % val_path)
+          else:
+            print('[%11d]  %s' % (val.size, val_path))
+        else:
+          print(val_path)
+        if val.is_dir:
+          print_list_rec(val_path)
+
+    print_list_rec('')
 
 
 class Tree:
-  def __init__(self, provider, title):
+  def __init__(self, provider, title, print_size=False):
     print('%s' % title)
     self._provider = provider
-    self._path = ''
     self._has_next_list = []
+    self._print_size = print_size
 
   @staticmethod
   def get_vertical(has_next_list):
@@ -637,90 +1073,120 @@
     return '└── ' if last else '├── '
 
   def print_tree(self):
-    apex_map = self._provider.read_dir(self._path)
-    if apex_map is None:
-      return
-    apex_map = dict(apex_map)
-    if '.' in apex_map:
-      del apex_map['.']
-    if '..' in apex_map:
-      del apex_map['..']
-    key_list = list(sorted(apex_map.keys()))
-    for i, key in enumerate(key_list):
-      prev = self.get_vertical(self._has_next_list)
-      last = self.get_last_vertical(i == len(key_list) - 1)
-      val = apex_map[key]
-      print('%s%s%s' % (prev, last, val.name))
-      if val.is_dir:
-        self._has_next_list.append(i < len(key_list) - 1)
-        saved_dir = self._path
-        self._path = os.path.join(self._path, val.name)
-        self.print_tree()
-        self._path = saved_dir
-        self._has_next_list.pop()
+
+    def print_tree_rec(path):
+      apex_map = self._provider.read_dir(path)
+      if apex_map is None:
+        return
+      apex_map = dict(apex_map)
+      if '.' in apex_map:
+        del apex_map['.']
+      if '..' in apex_map:
+        del apex_map['..']
+      key_list = list(sorted(apex_map.keys()))
+      for i, key in enumerate(key_list):
+        prev = self.get_vertical(self._has_next_list)
+        last = self.get_last_vertical(i == len(key_list) - 1)
+        val = apex_map[key]
+        if self._print_size:
+          if val.size < 0:
+            print('%s%s[    n/a    ]  %s' % (prev, last, val.name))
+          else:
+            print('%s%s[%11d]  %s' % (prev, last, val.size, val.name))
+        else:
+          print('%s%s%s' % (prev, last, val.name))
+        if val.is_dir:
+          self._has_next_list.append(i < len(key_list) - 1)
+          val_path = os.path.join(path, val.name)
+          print_tree_rec(val_path)
+          self._has_next_list.pop()
+
+    print_tree_rec('')
 
 
 # Note: do not sys.exit early, for __del__ cleanup.
 def art_apex_test_main(test_args):
-  if test_args.tree and test_args.debug:
-    logging.error("Both of --tree and --debug set")
-    return 1
-  if test_args.list and test_args.debug:
-    logging.error("Both of --list and --debug set")
+  if test_args.host and test_args.flattened:
+    logging.error("Both of --host and --flattened set")
     return 1
   if test_args.list and test_args.tree:
     logging.error("Both of --list and --tree set")
     return 1
-  if not test_args.tmpdir:
+  if test_args.size and not (test_args.list or test_args.tree):
+    logging.error("--size set but neither --list nor --tree set")
+    return 1
+  if not test_args.flattened and not test_args.tmpdir:
     logging.error("Need a tmpdir.")
     return 1
-  if not test_args.host and not test_args.debugfs:
+  if not test_args.flattened and not test_args.host and not test_args.debugfs:
     logging.error("Need debugfs.")
     return 1
-  if test_args.bitness not in ['32', '64', 'multilib', 'auto']:
-    logging.error('--bitness needs to be one of 32|64|multilib|auto')
+
+  if test_args.host:
+    # Host APEX.
+    if test_args.flavor not in [FLAVOR_DEBUG, FLAVOR_AUTO]:
+      logging.error("Using option --host with non-Debug APEX")
+      return 1
+    # Host APEX is always a debug flavor (for now).
+    test_args.flavor = FLAVOR_DEBUG
+  else:
+    # Device APEX.
+    if test_args.flavor == FLAVOR_AUTO:
+      logging.warning('--flavor=auto, trying to autodetect. This may be incorrect!')
+      for flavor in [ FLAVOR_RELEASE, FLAVOR_DEBUG, FLAVOR_TESTING ]:
+        flavor_pattern = '*.%s*' % flavor
+        if fnmatch.fnmatch(test_args.apex, flavor_pattern):
+          test_args.flavor = flavor
+          break
+      if test_args.flavor == FLAVOR_AUTO:
+        logging.error('  Could not detect APEX flavor, neither \'%s\', \'%s\' nor \'%s\' in \'%s\'',
+                    FLAVOR_RELEASE, FLAVOR_DEBUG, FLAVOR_TESTING, test_args.apex)
+        return 1
 
   try:
     if test_args.host:
       apex_provider = HostApexProvider(test_args.apex, test_args.tmpdir)
     else:
-      apex_provider = TargetApexProvider(test_args.apex, test_args.tmpdir, test_args.debugfs)
+      if test_args.flattened:
+        apex_provider = TargetFlattenedApexProvider(test_args.apex)
+      else:
+        apex_provider = TargetApexProvider(test_args.apex, test_args.tmpdir, test_args.debugfs)
   except (zipfile.BadZipFile, zipfile.LargeZipFile) as e:
     logging.error('Failed to create provider: %s', e)
     return 1
 
   if test_args.tree:
-    Tree(apex_provider, test_args.apex).print_tree()
+    Tree(apex_provider, test_args.apex, test_args.size).print_tree()
     return 0
   if test_args.list:
-    List(apex_provider).print_list()
+    List(apex_provider, test_args.size).print_list()
     return 0
 
   checkers = []
-  if test_args.bitness == 'auto':
+  if test_args.bitness == BITNESS_AUTO:
     logging.warning('--bitness=auto, trying to autodetect. This may be incorrect!')
     has_32 = apex_provider.get('lib') is not None
     has_64 = apex_provider.get('lib64') is not None
     if has_32 and has_64:
       logging.warning('  Detected multilib')
-      test_args.bitness = 'multilib'
+      test_args.bitness = BITNESS_MULTILIB
     elif has_32:
       logging.warning('  Detected 32-only')
-      test_args.bitness = '32'
+      test_args.bitness = BITNESS_32
     elif has_64:
       logging.warning('  Detected 64-only')
-      test_args.bitness = '64'
+      test_args.bitness = BITNESS_64
     else:
       logging.error('  Could not detect bitness, neither lib nor lib64 contained.')
-      print('%s' % apex_provider.folder_cache)
+      List(apex_provider).print_list()
       return 1
 
-  if test_args.bitness == '32':
+  if test_args.bitness == BITNESS_32:
     base_checker = Arch32Checker(apex_provider)
-  elif test_args.bitness == '64':
+  elif test_args.bitness == BITNESS_64:
     base_checker = Arch64Checker(apex_provider)
   else:
-    assert test_args.bitness == 'multilib'
+    assert test_args.bitness == BITNESS_MULTILIB
     base_checker = MultilibChecker(apex_provider)
 
   checkers.append(ReleaseChecker(base_checker))
@@ -728,13 +1194,16 @@
     checkers.append(ReleaseHostChecker(base_checker))
   else:
     checkers.append(ReleaseTargetChecker(base_checker))
-  if test_args.debug:
+  if test_args.flavor == FLAVOR_DEBUG or test_args.flavor == FLAVOR_TESTING:
     checkers.append(DebugChecker(base_checker))
-  if test_args.debug and not test_args.host:
-    checkers.append(DebugTargetChecker(base_checker))
+    if not test_args.host:
+      checkers.append(DebugTargetChecker(base_checker))
+  if test_args.flavor == FLAVOR_TESTING:
+    checkers.append(TestingTargetChecker(base_checker))
 
   # These checkers must be last.
   checkers.append(NoSuperfluousBinariesChecker(base_checker))
+  checkers.append(NoSuperfluousArtTestsChecker(base_checker))
   if not test_args.host:
     # We only care about superfluous libraries on target, where their absence
     # can be vital to ensure they get picked up from the right package.
@@ -769,7 +1238,7 @@
   test_args.tmpdir = '.'
   test_args.tree = False
   test_args.list = False
-  test_args.bitness = 'auto'
+  test_args.bitness = BITNESS_AUTO
   failed = False
 
   if not os.path.exists(test_args.debugfs):
@@ -777,10 +1246,12 @@
                   test_args.debugfs)
     sys.exit(1)
 
-  # TODO: Add host support
+  # TODO: Add host support.
+  # TODO: Add support for flattened APEX packages.
   configs = [
-    {'name': 'com.android.runtime.release', 'debug': False, 'host': False},
-    {'name': 'com.android.runtime.debug', 'debug': True, 'host': False},
+    {'name': 'com.android.art.release', 'flavor': FLAVOR_RELEASE, 'host': False},
+    {'name': 'com.android.art.debug',   'flavor': FLAVOR_DEBUG,   'host': False},
+    {'name': 'com.android.art.testing', 'flavor': FLAVOR_TESTING, 'host': False},
   ]
 
   for config in configs:
@@ -791,7 +1262,7 @@
       failed = True
       logging.error("Cannot find APEX %s. Please build it first.", test_args.apex)
       continue
-    test_args.debug = config['debug']
+    test_args.flavor = config['flavor']
     test_args.host = config['host']
     failed = art_apex_test_main(test_args) != 0
 
@@ -800,21 +1271,26 @@
 
 
 if __name__ == "__main__":
-  parser = argparse.ArgumentParser(description='Check integrity of a Runtime APEX.')
+  parser = argparse.ArgumentParser(description='Check integrity of an ART APEX.')
 
-  parser.add_argument('apex', help='apex file input')
+  parser.add_argument('apex', help='APEX file input')
 
-  parser.add_argument('--host', help='Check as host apex', action='store_true')
+  parser.add_argument('--host', help='Check as host APEX', action='store_true')
 
-  parser.add_argument('--debug', help='Check as debug apex', action='store_true')
+  parser.add_argument('--flattened', help='Check as flattened (target) APEX', action='store_true')
+
+  parser.add_argument('--flavor', help='Check as FLAVOR APEX', choices=FLAVORS_ALL,
+                      default=FLAVOR_AUTO)
 
   parser.add_argument('--list', help='List all files', action='store_true')
   parser.add_argument('--tree', help='Print directory tree', action='store_true')
+  parser.add_argument('--size', help='Print file sizes', action='store_true')
 
   parser.add_argument('--tmpdir', help='Directory for temp files')
   parser.add_argument('--debugfs', help='Path to debugfs')
 
-  parser.add_argument('--bitness', help='Bitness to check, 32|64|multilib|auto', default='auto')
+  parser.add_argument('--bitness', help='Bitness to check', choices=BITNESS_ALL,
+                      default=BITNESS_AUTO)
 
   if len(sys.argv) == 1:
     art_apex_test_default(parser)
diff --git a/build/apex/art_postinstall_hook.sh b/build/apex/art_postinstall_hook.sh
deleted file mode 100644
index cb3b887..0000000
--- a/build/apex/art_postinstall_hook.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/system/bin/sh
-
-# Copyright (C) 2019 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-. `dirname $0`/art_prepostinstall_utils || exit 100
-
-log_info "=== ART runtime post-install ==="
-
-# Check for OTA base folder.
-if [ ! -d /data/ota/dalvik-cache ] ; then
-  log_error "Postinstall dalvik-cache does not exist or is not a directory"
-  exit 101
-fi
-
-log_info "Checking fsverity"
-
-# Measure (and enable) fsverity to see if things are installed. Enable is not
-# idempotent, and we'd need to parse the error string to see whether it says
-# data was installed. Rather do a two-step.
-FILES=`find /data/ota/dalvik-cache -type f`
-for FILE in $FILES ; do
-  fsverity measure $FILE && continue
-  ENABLE_MSG=`fsverity enable $FILE 2>&1` && continue
-
-  # No installed data, can't enable. Clean up and fail.
-  log_error "Enable failed: $ENABLE_MSG"
-  rm -rf /data/ota/dalvik-cache
-  exit 200
-done
-
-log_info "Moving dalvik-cache"
-
-rm -rf /data/dalvik-cache/* || exit 102
-mv /data/ota/dalvik-cache/* /data/dalvik-cache/ || exit 103
-restorecon -R -F /data/dalvik-cache/* || exit 104
diff --git a/build/apex/art_preinstall_hook.sh b/build/apex/art_preinstall_hook.sh
deleted file mode 100644
index 94a1b21..0000000
--- a/build/apex/art_preinstall_hook.sh
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/system/bin/sh
-
-# Copyright (C) 2019 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-. `dirname $0`/art_prepostinstall_utils || exit 100
-
-log_info "=== ART runtime pre-install ==="
-
-set_arches || exit 101
-log_info "Arches = `echo $ARCHES`"
-
-# The runtime update uses /data/ota as a staging directory, similar to
-# A/B OTA. (There is no overlap, as A/B uses slot prefixes.)
-
-# Create OTA base folder.
-mkdir -p /data/ota/dalvik-cache || exit 102
-# Bind-mount to perceive as normal structure.
-mount -o bind /data/ota/dalvik-cache /data/dalvik-cache || exit 103
-
-for ARCH in $ARCHES ; do
-  log_info "Preparing compilation output directories for $ARCH"
-
-  # Create OTA folders.
-  mkdir -p /data/ota/dalvik-cache/$ARCH || exit 104
-  rm -rf /data/ota/dalvik-cache/$ARCH/* || exit 105
-
-  `dirname $0`/art_preinstall_hook_boot $ARCH || exit 200
-done
-
-PRIMARY_ARCH=`echo $ARCHES | sed -e 's/ .*//'`
-`dirname $0`/art_preinstall_hook_system_server $PRIMARY_ARCH || exit 300
-
-FILES=`find /data/dalvik-cache -type f`
-for FILE in $FILES ; do
-  setup_fsverity $FILE || exit 400
-done
diff --git a/build/apex/art_preinstall_hook_boot.sh b/build/apex/art_preinstall_hook_boot.sh
deleted file mode 100644
index cdad144..0000000
--- a/build/apex/art_preinstall_hook_boot.sh
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/system/bin/sh
-
-# Copyright (C) 2019 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-. `dirname $0`/art_prepostinstall_utils || exit 100
-
-log_info "Preparing boot image compilation parameters"
-
-# Prefer DEX2OATBOOTCLASSPATH, then BOOTCLASSPATH.
-USED_CLASSPATH=$DEX2OATBOOTCLASSPATH
-if [ -z "$USED_CLASSPATH" ] ; then
-  USED_CLASSPATH=$BOOTCLASSPATH
-  if [ -z "$USED_CLASSPATH" ] ; then
-    log_error "Could not find boot class-path to compile"
-    exit 101
-  fi
-fi
-BOOTCP=`echo $USED_CLASSPATH | tr ":" "\n"`
-
-DEX_FILES=
-DEX_LOCATIONS=
-for component in $BOOTCP ; do
-  DEX_FILES="$DEX_FILES --dex-file=$component"
-  DEX_LOCATIONS="$DEX_LOCATIONS --dex-location=$component"
-done
-
-PROFILING=
-if [ -f "/system/etc/boot-image.prof" ] ; then
-  PROFILING="--compiler-filter=speed-profile --profile-file=/system/etc/boot-image.prof"
-elif [ -f "/system/etc/preloaded-classes" ]; then
-  PROFILING="--image-classes=/system/etc/preloaded-classes"
-fi
-if [ -f "/system/etc/dirty-image-objects" ] ; then
-  PROFILING="$PROFILING --dirty-image-objects=/system/etc/dirty-image-objects"
-fi
-
-DEX2OAT_IMAGE_XMX=`getprop dalvik.vm.image-dex2oat-Xmx`
-
-DEX2OAT_TARGET_ARCH=$1
-DEX2OAT_TARGET_CPU_VARIANT=`getprop dalvik.vm.isa.${DEX2OAT_TARGET_ARCH}.variant`
-DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES=`getprop dalvik.vm.isa.${DEX2OAT_TARGET_ARCH}.features`
-
-log_info "Compiling boot image for $DEX2OAT_TARGET_ARCH"
-
-dex2oat \
-  --avoid-storing-invocation \
-  --runtime-arg -Xmx$DEX2OAT_IMAGE_XMX \
-  $PROFILING \
-  $DEX_FILES \
-  $DEX_LOCATIONS \
-  --generate-mini-debug-info \
-  --strip \
-  --oat-file=/data/dalvik-cache/$DEX2OAT_TARGET_ARCH/system@framework@boot.oat \
-  --oat-location=/data/dalvik-cache/$DEX2OAT_TARGET_ARCH/system@framework@boot.oat \
-  --image=/data/dalvik-cache/$DEX2OAT_TARGET_ARCH/system@framework@boot.art --base=0x70000000 \
-  --instruction-set=$DEX2OAT_TARGET_ARCH \
-  --instruction-set-variant=$DEX2OAT_TARGET_CPU_VARIANT \
-  --instruction-set-features=$DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES \
-  --android-root=/system \
-  --no-inline-from=core-oj.jar \
-  --abort-on-hard-verifier-error \
-  --force-determinism || { log_error "Dex2oat failed" ; exit 102 ; }
diff --git a/build/apex/art_preinstall_hook_system_server.sh b/build/apex/art_preinstall_hook_system_server.sh
deleted file mode 100644
index 9462c3b..0000000
--- a/build/apex/art_preinstall_hook_system_server.sh
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/system/bin/sh
-
-# Copyright (C) 2019 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-. `dirname $0`/art_prepostinstall_utils || exit 100
-
-function dalvik_cache_name {
-  local input=$1
-  # Strip first /, replace rest with @.
-  DALVIK_CACHE_NAME=`echo $input | sed -e 's,^/,,' -e 's,/,@,g'`
-  # Append @classes.dex.
-  DALVIK_CACHE_NAME="${DALVIK_CACHE_NAME}@classes.dex"
-}
-
-log_info "Preparing system server compilation parameters"
-
-if [ "x$SYSTEMSERVERCLASSPATH" = "x" ] ; then
-  log_info "SYSTEMSERVERCLASSPATH is not set! Trying to retrieve from init.environ.rc."
-  SYSTEMSERVERCLASSPATH=`grep "export SYSTEMSERVERCLASSPATH" init.environ.rc | sed -e "s/.* //"`
-  if [ "x$SYSTEMSERVERCLASSPATH" = "x" ] ; then
-    log_error "Could not find SYSTEMSERVERCLASSPATH"
-    exit 101
-  fi
-fi
-SYSCP=`echo $SYSTEMSERVERCLASSPATH | tr ":" "\n"`
-
-BOOTCPPARAM=
-if [ ! -z "$DEX2OATBOOTCLASSPATH" ] ; then
-  BOOTCPPARAM="--runtime-arg -Xbootclasspath:$DEX2OATBOOTCLASSPATH"
-fi
-
-DEX2OAT_IMAGE_XMX=`getprop dalvik.vm.dex2oat-Xmx`
-
-DEX2OAT_TARGET_ARCH=$1
-DEX2OAT_TARGET_CPU_VARIANT=`getprop dalvik.vm.isa.${DEX2OAT_TARGET_ARCH}.variant`
-DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES=`getprop dalvik.vm.isa.${DEX2OAT_TARGET_ARCH}.features`
-
-# Do this like preopt: speed compile, no classpath, possibly pick up profiles.
-
-# TODO: App image? Would have to scan /system for an existing image.
-
-for COMPONENT in $SYSCP ; do
-  log_info "Compiling $COMPONENT"
-  dalvik_cache_name $COMPONENT
-  PROFILING=
-  if [ -f "${COMPONENT}.prof" ] ; then
-    PROFILING="--profile-file=${COMPONENT}.prof"
-  fi
-  dex2oat \
-    --avoid-storing-invocation \
-    --runtime-arg -Xmx$DEX2OAT_IMAGE_XMX \
-    $BOOTCPPARAM \
-    --class-loader-context=\& \
-    --boot-image=/data/dalvik-cache/system@framework@boot.art \
-    --dex-file=$COMPONENT \
-    --dex-location=$COMPONENT \
-    --oat-file=/data/dalvik-cache/$DEX2OAT_TARGET_ARCH/$DALVIK_CACHE_NAME \
-    --android-root=/system \
-    --instruction-set=$DEX2OAT_TARGET_ARCH \
-    --instruction-set-variant=$DEX2OAT_TARGET_CPU_VARIANT \
-    --instruction-set-features=$DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES \
-    --no-generate-debug-info \
-    --abort-on-hard-verifier-error \
-    --force-determinism \
-    --no-inline-from=core-oj.jar \
-    --copy-dex-files=false \
-    --compiler-filter=speed \
-    --generate-mini-debug-info \
-    $PROFILING \
-      || { log_error "Dex2oat failed" ; exit 102 ; }
-done
diff --git a/build/apex/art_prepostinstall_utils.sh b/build/apex/art_prepostinstall_utils.sh
deleted file mode 100644
index f5a94d1..0000000
--- a/build/apex/art_prepostinstall_utils.sh
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/system/bin/sh
-
-# Copyright (C) 2019 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-alias log_info="log -t art_apex -p i"
-alias log_error="log -t art_apex -p f"
-
-# Set |ARCHES| to a string containing the architectures of the device.
-function set_arches {
-  # Derive architectures. For now, stop at two.
-  local abilist_prop=`getprop ro.product.cpu.abilist`
-  local abilist=`echo $abilist_prop | tr "," "\n"`
-  ARCHES=""
-  for abi in $abilist ; do
-    case "$abi" in
-      arm64-v8a)
-        ARCHES="$ARCHES\narm64"
-        ;;
-      armeabi-v7a|armeabi)
-        ARCHES="$ARCHES\narm"
-        ;;
-      x86)
-        ARCHES="$ARCHES\nx86"
-        ;;
-      x86_64)
-        ARCHES="$ARCHES\nx86_64"
-        ;;
-      *)
-        log_error "Unsupported ABI $abi"
-        return 1
-        ;;
-    esac
-  done
-  ARCHES=`echo $ARCHES | uniq`
-  return 0
-}
-
-function setup_fsverity {
-  local full_shell_path=`readlink -f $0`
-  local bin_dir=`dirname $full_shell_path`
-  local apex_dir=`dirname $bin_dir`
-  local sig_dir="${apex_dir}.signatures"
-  local file=$1
-  local signature_file="$sig_dir/$file.sig"
-  # Setup.
-  log_info "fsverity setup for $file"
-  SETUP_MSG=`fsverity setup $file --signature=$signature_file --hash=sha256 2>&1` || \
-    { log_error "Setup failed: $SETUP_MSG" ; return 300 ; }
-  # Enable.
-  log_info "fsverity enable for $file"
-  ENABLE_MSG=`fsverity enable $file 2>&1` || \
-    { log_error "Enable failed: $ENABLE_MSG" ; return 301 ; }
-  # Test integrity.
-  INTEGRITY_MSG=`dd if=$file of=/dev/null bs=4k 2>&1` || \
-    { log_error "Integrity failed: $INTEGRITY_MSG" ; return 302 ; }
-  return 0
-}
diff --git a/build/apex/com.android.art.avbpubkey b/build/apex/com.android.art.avbpubkey
new file mode 100644
index 0000000..0d9cb49
--- /dev/null
+++ b/build/apex/com.android.art.avbpubkey
Binary files differ
diff --git a/build/apex/com.android.art.pem b/build/apex/com.android.art.pem
new file mode 100644
index 0000000..5380b7a
--- /dev/null
+++ b/build/apex/com.android.art.pem
@@ -0,0 +1,51 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIJKAIBAAKCAgEArRrjkOjOEDk4/ZEdo6z7llFu8jNSkXvOWl95jRQ6ztgfM00y
+VaqtI0xCdh/C3XI4b7xbhHq4rdZXHid+dpRzlq1BvQu3fPVjHeUItY9MOT0vfnHS
+VrUIAHJxHtzMS+krtlNkqzhKtpvXBx4DbDP7B5KIDD276E6tn0ifqLBjsdJPkbso
++IkTMbykE/VBA7qe+Wx4v8Ay4iO1cfNREAlC6enXkxRXjCFz/MHz7CJjNjYmygmB
+k5AGZMylpVU16OcRCMHJM+zjQ/DuktnGScRgZWw3xlgpR+13Ej6ayrAETqI7oAwP
+2eDylVoccYdM/1wRFrYS7Ag6kk8xuvQr8015X3trVC5sM7dE/zEqqXo1yGGnUNdV
+pGdexJ4f8fkHNCDmCKnPUSfkcZmJh3VuWk8w7mf5rvm9BzX3S9p6yEcoFWNT9Yxd
+0TYU/Hl5E73+hvHWYkDfVxFK6GlCjPznkWKkOsA5M3HPsUAxcylfcW18uUcgx3sV
+DCsBCjLXCxbOhu4RDkD5L7fA622kFiuhnLbVjyXDxF6Ip1jCqu8lm/ftDb3wnIDL
+e5hP5JNxpC9mdu1dTojlM6ve5skOqjEPxJqOqclWHg6JGPA8oaJaqbVLNO2gd36U
+siTZkHwxyOv8zqjwAKNY3jNGjELXgry+CBdV3URY1SDhRJ30X8ZXVJZW/acCAwEA
+AQKCAgALboMoxrcVCzJgTH0QmhPjUW1qQUlqoip2fWehkXxwvIUS9j4kuijE8/xP
+oLlVtn1To7THgvM/R7BpJWKMojEf+kElIujzL6FkEAQLOXNnNEs2pn2ljD8DCIu/
+5gT33mYsnEVBqW4FsTT6G1lOhABH971UUZ9fMBL3OeyRT1TGIYVvslR6VVMXLcYI
+K2InxONKxYcT4rV5ibIp3E+2J2Zr3C2IYQeHEY5/Wq+pIHw80EavgQE1pYVGkt09
+lesBfoD5exK2gyZfDkIzIH6f0IAtMoBccOYJAf2jDs9aI1Wle8FESIejc9+RTWoj
+dTP4iTP3s/575+82SlSWbBma77rcH4+gzcOnxoeJdzmpJYDqcK4tPeM62wKsD72q
+LDTvbb9dF19g4yysCdeJwmTkYIGjYzWRsA6gWVT5MbMqqJPprYVQRQClpKjIFtaS
+I1qjrgEqksbh7ZJd4018LappOcEMDtagWsz9CAmp0Tu1HlvFOMnTk2hCKE+QtDe6
+COkIvz4sASS4XXxFRzyfF5l/LrMlAXCcX0YGvujZK/yxmVtZ7H6Mm8FZiRsPSboi
+s45QqgzYttwdjJqYly+JyEFgyVrnFi1bVGSu3pwv9JF5wc/sJYV6cqptApco5LCI
+M2xymtJD6MqYAJ9KpryaFb0HxEKP8NZykMz39nRqGotTP/HlwQKCAQEA2ng5JBym
+dWIOXJ9BmkjidzkZRC+JtYaEEMT5+UvULRzQ0h+hjaiSz4P58+QEj8bSPL49njif
+DzEwFQkxC+LL+FtcueGh4XTpB9hZiXIhakp1iL5iSJA211FjqQVwKb51Nx7qo1J7
+gKg8yawID7vl8aU2ujwQpbAN1aZcY+rZBABV5YVSTw08a0O1w/5kaUms3FHCEETo
+i3+Hzfra691dBTbCeD2fYNecNQnlv4S9VPId126c74Mu7GAI/yf8ti4weEDEhLB7
+aMyYEkAf3uQiKxW7tSQvhOcJXTdBYfoFDXP5ysJob7RThzmbm4L8GixybVEY8A81
+wBrSZHhZI9F8DQKCAQEAytejebi/SX+RymwnGJZOZeYPHNJN53Za6JSV77IrRMY2
+BkS6VcO5fpJaSU9CBvwZCv+HvPo1hZJhaOmGcrgj80/LymLy66sm3hHDjixlBKhI
+SVB4ivWjgxLKB08H5Xr1VdaqM2/4aIt3+hwV9hIA9uZAAAd/+4OO1OI8JbHlQxcn
+IEKQJbTwk4kroVU23ydYydKrt6VXWkyYq3eODpuQFH+VSI5+YNbmIjmU7qN8iAEH
+x2r4pSm5pkG8pxJOl4Am6pwfO0tozy7SJ3E7kL5DLWN1vr2pIVtfJQu/nZw4YP47
+MLDpgdakQ/EjFmZBYknRy8TQHAcC1YvqMM9/Rq7PgwKCAQBEpOHPZvEmkNjSYXfL
+cns12ssFkapDTzDP1BR3MExKoHM8kpPAXudCLMWszEhipKYKT/wsar4Pl/Tzpx+y
+DGDqeEp6XVrv7DwMKv53IVU+gIbNoIRhKG8S2I/n272SYDWUTDKNfq0vj60J8PPX
+fcKSWscHXTgd12OBbfQ5sODfUPusUme6Tv5c9bl3C8ehDXUzBL1lP0GSE1AoeFmx
+IqzHpp0UCsi5NQXv5Fw8AQk8V5boyeilmSJ1QveQtI/C7mBsaG1XA9zC4QYnNd7N
+ugDCaOHB2MzVhYJ7t5DjqBtOTtJ90vfdoVtdccxi4JU15CFQF9suEplg8wyIZQgd
+KQVBAoIBAQCQmvRG9WchPZmwxjOE+rp/OGhzspWpOh4LCsptLAZerDOdemegdr0J
+t8o30xIOKrCrv4mENpfrVnStNzYLGK1AaxWsfagSTFyUGfPgqlOF43ZNFdoprn5Y
+FhAC79uARI0cGcISk3NzDGKG4njhiOo5GeJsYuxhYON1bqdUdCMuFhZlkC51Qy3y
+7+ozxK0unz1T/CVA8dV5YXvBWaTjUxF/G4lQRY1g+jLsGULMca54wstJ5j/Gdx2L
+ofec35c5uDmGLbCyM8tPUGCvj7DYOltnwy0QwuMNDbehkGOVN+aVdwi5aJW/Y0aN
+zR2nfVSFfnzbGL8IDBOAK5PUIkjpGfyDAoIBAFxG99balQ7+dVhCdDlbE96zZO6u
+ckgqIiU0F4nke53Svqy3kMIwcq975V2g0TKF+GPsLYDu2u1oHpBp0cEYErMolgcf
+xWLjTc+7JgoeYJ9fhb1eGvet6L1znjInCFoRFnSGR6ctBZIg5ixIRsay6dBBFQVq
+Fu1B/XJuc0ZdaPDmulOzyarAvBnsmZ8tkYg+O9RH7mpFfmjdWwNYpj86lUtOi9z7
+qXFOZorylW2FhEwNUBeA6uvu/ThxtDCw5k3DMgaWdRwLUMxD0UwxjoFbIeyxGnH2
+d4LbSNXp4vRDkfPiYW8GTep/pwcfFVtLX41IWkQke1i5PQ8uF2f3kCn2Eq0=
+-----END RSA PRIVATE KEY-----
diff --git a/build/apex/com.android.art.pk8 b/build/apex/com.android.art.pk8
new file mode 100644
index 0000000..e92f342
--- /dev/null
+++ b/build/apex/com.android.art.pk8
Binary files differ
diff --git a/build/apex/com.android.art.x509.pem b/build/apex/com.android.art.x509.pem
new file mode 100644
index 0000000..0310f20
--- /dev/null
+++ b/build/apex/com.android.art.x509.pem
@@ -0,0 +1,35 @@
+-----BEGIN CERTIFICATE-----
+MIIGHTCCBAWgAwIBAgIUGUj74fy+MBFtWuvs4ia1V8xBzfUwDQYJKoZIhvcNAQEL
+BQAwgZwxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQH
+DA1Nb3VudGFpbiBWaWV3MRAwDgYDVQQKDAdBbmRyb2lkMRAwDgYDVQQLDAdBbmRy
+b2lkMRgwFgYDVQQDDA9jb20uYW5kcm9pZC5hcnQxIjAgBgkqhkiG9w0BCQEWE2Fu
+ZHJvaWRAYW5kcm9pZC5jb20wIBcNMTkwODE1MTM0MDA3WhgPNDc1NzA3MTExMzQw
+MDdaMIGcMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UE
+BwwNTW91bnRhaW4gVmlldzEQMA4GA1UECgwHQW5kcm9pZDEQMA4GA1UECwwHQW5k
+cm9pZDEYMBYGA1UEAwwPY29tLmFuZHJvaWQuYXJ0MSIwIAYJKoZIhvcNAQkBFhNh
+bmRyb2lkQGFuZHJvaWQuY29tMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC
+AgEAwa44WcQo0IKt5866YHNcsMOhCExtrC6/zL+WP0aX3CEnEIGocdXijyA/dyDZ
+Cgrdh5FpPIUQaprhHYIsoklxYh5JocpbfH0wnkZhhyrjH+Lcks6GTlGNPxUa4c8e
+vECwSM0RyO2oyxuwmVObmaz3poBXkikcFtE0rfNeRPD6j/SNaVf7lByrc7G+Ckz8
+CoxTxhrKcvu9TEsp37fM+3lrXnAz49FsguCqEEAoH+JHHWE7OoCCaBIyCMk8ZCK3
+39uAJSXeaoqW5WVDKw+ZKNJ3WL2xwB1QcYqG7ISp1rPY2mDUD0X8UpX1XljTToeQ
+e8oTpl3z+jOX9Y9W7M0Pq16U7Ru7kFfLr6Xj14G74yrHgrcw6hCOKUjF0WJW4H27
+ey6Lsn3ndj3IZYUERabB51q02yYu1b00X+ioa0uapucb4LU4663eSmHuWtpRj5Bq
+waLLkWfFjmR4ct5ykKcULndB8VzZZgVc5PvGQDHNEB2OMoXj0DNH3Ey9V2Mtf1W6
+Lp6lqjDUw0Ke36SvxXkMDf4PIIQJ355JWVAKkOGO4oIo3t2D3fLgrz8diwGi3dO8
+EhOLJzaKh4qE3qHWp1x9aFfpYRF2qOWYQAi76CQ+8e/CzZDBxw+QGF8m4dRf7dxl
+5I//5DBlcYPfeJ5iHpHrIcZtQjF2Gt7SDvJWrV7604jgwdMCAwEAAaNTMFEwHQYD
+VR0OBBYEFFsPbt17BGjTSJ9ZR4RY3+lm19tfMB8GA1UdIwQYMBaAFFsPbt17BGjT
+SJ9ZR4RY3+lm19tfMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggIB
+AD17YQ0AaRxZZGCFVUh/EOUIFCNP2j/4iYWeep/HYdQZ7xLWju6YQjoSLZ7ijbWw
+IarkJiDgE9tT8C9KCNsQYR1ghG4OMppCypv2ZcfPdJZDyMQ3IfxbdPDywa1a1qZ/
+QbvjPXei6KRlM/H0dFScM9TZqhKITGxih2eraZbvVyaPz/AHhbEn3BvN4l45IsAn
++K46uqBK8i6xm93nnPUp6yF3YppjC8TBfY0nSviNvce+XJ2izeGHrQ/IdLXucEby
+TD7kcfFz1G5p8sSeqT6gc/SFJzjwuuwEUuKgq4HPiQOrQbFSvJ1kjSUdXlfs+o05
+Ho4Fw+xrC6VFzmwWjkW4smpFT0MqpGE8buc36XOQFm5jCPF3sqjxwDxSXzpUeLt0
+kmOp40rpeGMJ3AqWPr4vUmGVn6TPHLVKE3inETJLdO7Y6R0z39ccjKgomjEi8Mx9
+oHKMGca96orY4kg+DOpnQ25LOwymJrBjrHnHIFgkRb4LrWibuqdMTwPAeHYxnnUe
+Nv9rp2Usl8K1B9hSzm4pQwvsdVEE59SCYROU9qaymXBxR3TVlIOXqfIdOlTMf/0v
+mR/02JvSHYbQiuqCYZkburQiPRxAH8DlJJFOVMMfuPmITXGNEsOonT2nqe36sfYz
+ZIrC7hm5VeaEO8vExe6GV8gX5eIvFN+xuLxUf0FV7C7R
+-----END CERTIFICATE-----
diff --git a/build/apex/com.android.runtime.avbpubkey b/build/apex/com.android.runtime.avbpubkey
deleted file mode 100644
index b0ffc9b..0000000
--- a/build/apex/com.android.runtime.avbpubkey
+++ /dev/null
Binary files differ
diff --git a/build/apex/com.android.runtime.debug.pk8 b/build/apex/com.android.runtime.debug.pk8
deleted file mode 100644
index 5eec5d8..0000000
--- a/build/apex/com.android.runtime.debug.pk8
+++ /dev/null
Binary files differ
diff --git a/build/apex/com.android.runtime.debug.x509.pem b/build/apex/com.android.runtime.debug.x509.pem
deleted file mode 100644
index 73402f5..0000000
--- a/build/apex/com.android.runtime.debug.x509.pem
+++ /dev/null
@@ -1,34 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIF0DCCA7igAwIBAgIJALbaJLNNAiRDMA0GCSqGSIb3DQEBCwUAMHwxCzAJBgNV
-BAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1Nb3VudGFpbiBW
-aWV3MRAwDgYDVQQKDAdBbmRyb2lkMRAwDgYDVQQLDAdBbmRyb2lkMRwwGgYDVQQD
-DBNjb20uYW5kcm9pZC5ydW50aW1lMCAXDTE5MDEyNTE3MTQ1NloYDzQ3NTYxMjIx
-MTcxNDU2WjB8MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQG
-A1UEBwwNTW91bnRhaW4gVmlldzEQMA4GA1UECgwHQW5kcm9pZDEQMA4GA1UECwwH
-QW5kcm9pZDEcMBoGA1UEAwwTY29tLmFuZHJvaWQucnVudGltZTCCAiIwDQYJKoZI
-hvcNAQEBBQADggIPADCCAgoCggIBAN1Ns75c0ZdLKnUvEuEotzJ0xyOLzOHYP3y6
-RzcwFyPf84aADc7rQDtjbmEuf9g9QpJhZAxe7G2Jg/wxqaxMW6wCfkpuElW5CAcj
-XQ0i12hRVtqePs5Z5bjzJ/8C7luWh82Vb/s2YoRPoKNXVWFT16CB4RMnw2nW5Uyo
-RHZ98N4MgFSGilafIc6Z0DImreTTwlEvcyKihVUSuzeyPG8CRjshw0C1Hqxt4a8J
-rxAgfPpd84Xo4etYePpVr2K5+vNAVwLpUdD48Y7q9peOJ0tbL8DSohudkzvZsQUo
-CfEfrVBfZv7aPnt6ZJYhcFo1WRBMYczKP4jWb0KgmF963ee3zliU1pXtIYsNBNth
-Mdvy3ml301tI7CKE5A3Yevm40VVqo+IDt7FNxoV3rKPhnO9vi/YqzX/1xMvAto8E
-9A5NvMTqHmS2P0wt1pt9KSuXXjoIAWaHZOATDkVI+jLjDrYFNdhqXVgbAaVtI60j
-lRaSWxzBr4o+g2W8ks/JgM2mwJ6qaTNoDMzg823MKzy/t3935sxm5GhFs9AY9Qz/
-4B3exqYUEFJLN6dJLCVppCmFCdCONSxN7bXPo+3b9LlZuKAOP17N04+eKcwXVeYz
-Z3a7SfyMzq+DtLhAn/TSliSbbCAVUxiOZnVX1nM0Gs3/BYCs0TUh2tSqO48pwDrx
-Pw7z9+m5AgMBAAGjUzBRMB0GA1UdDgQWBBRT9s/tu4uqtrglUFjQbwY5p+17DjAf
-BgNVHSMEGDAWgBRT9s/tu4uqtrglUFjQbwY5p+17DjAPBgNVHRMBAf8EBTADAQH/
-MA0GCSqGSIb3DQEBCwUAA4ICAQBNY5giwZCM0sE93Dj2zEe8qCRwIaV4bvSe744X
-Y1+405vdrNEHKPUfFn1xLSnhiGU3loZrP15lexmWvxycLNEy0UxZgq3eR7HuW6xp
-GIm9ttYDZEP+pL9hwew3jiR38NRRR1Ur1MsBNkZnCELC1W8RFWIi77Fsb4fj2mGn
-2R+2voBvVS5kjkytW079CEIsZN9RVYfERiKPCfJDa87kk0xduqyh7sDegQl0B2Ot
-R9KnD1dJZjbii2GRkhpJ/Ig17CQH3J8PY/SIt9L+QAchnIEF051sjbBRUJuPK9gL
-eBEkZkwD1JLqGO6fxkcjNx7MIevTnIBjX2Epr8luyRy7eR3TdBT3aRQcCUqBCi3i
-WxAVR5sOZ90INTXftFbztoklitpQ9mxKXgFr+xggL6u3BdJk1Nt9BsYmRzh5Bg+6
-1eMDBumy3JEA7whE8p75X9cSnKTNrDQU3DA5XzpIhmI91XJArBhBfxgqGxaTf0uq
-SfZRDfnaO456ZsZdKUy62mry6Vg/hvzX52x/HxDlSQWbpYp5t03hshaWxtNE376q
-GdqOoGRRWCvyWi/UOYzabp6czTjwV1JH9IU379CsqIO5UNJ2MM2re4TDXofefU1C
-6eiYihy28xDfIiCdretLRlvFYFF/5X5xby/XWsDA9sGlL5OOiXC6o0Pl9vbek2+T
-Ibx3Nw==
------END CERTIFICATE-----
diff --git a/build/apex/com.android.runtime.pem b/build/apex/com.android.runtime.pem
deleted file mode 100644
index 4c7ce4b..0000000
--- a/build/apex/com.android.runtime.pem
+++ /dev/null
@@ -1,51 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIJKgIBAAKCAgEAx/VRn+TOZ4Hah9tHkb2Jvw7aQcqurnNamGa1Ta3x09HEV45s
-KTAqeTIPpbagx7aj6LNimiqoJaClV8pFhYfC6y7GLnXBk6PRGb2kPmrWy2aQFRkh
-Z2LBQwu15Rqr3SVbzMPbC5qoXOpUopzZnjRwniR32fnpJedUwpSMxaJwXDxfgBku
-Wm3EHBuTQ33L/z3VGwbVp1Rh/QhI/RfcwT1u6o9XUl0LqiQu/8DLTLNmsjAsQkbA
-8O1ToIBu2l71HaMqXOKRKtVuAYPyAMml5hXSH0dva2ebKkUM8E7FhcsK71QJ5iCs
-L2uC+OmG9f4aSqiIJld7/nDU7nrkiUxjs5bGp2PIxHmuv725XdBMJ+IuqnZGGkA8
-4tF14bY1YX4Tq6ojzATBnbFcZEAU4epJvX13Wu11ktYndMbppUtnVCdhO2vnA/tP
-MpBSOCHMk2Y2Q96LcIN9ANJrcrkrSIGBTQdvCRJ9LtofXlk/ytGIUceCzRtFhmwL
-zWFwJVT7cQX04Pw/EX/zrZyOq7SUYCGDsBwZsUtlZ30Cx92dergtKlZyJFqKnwMv
-hajr55mqRCv4M1dumCgiQaml29ftXWE6wQxqI0jQN8seSVz/HUazjSb3QFXgX16z
-w4VkxqSKu4subqesMcxiyev5McGXUUthkRGDSSFbJwX0L5jNEPyYPUu2nJ0CAwEA
-AQKCAgEAxGKuDin8hjBE3tWAjyTmWp1Nwvw7X96vhaqqOmayceU9vviERlel/24p
-bAnYEw3QIcW8+8kVaA9FFNn2OdVCnRVNU2gX/NcRkQRugVcRKqfKrs4FvrKBOUYR
-Gbh+Py5n4M4jHlyBKvCCu0rteLHsQYVzqMQINk/jMVAQijKlxBEPgpI4slvIFgsH
-MWwlpMOnv2mRAUyhCJDQjrKW/7tEal7p1lzIDgyHlGxXvzcbj7o8XcN7z6RnU+WP
-+iz09GzCOIPVK4p/BkH+tsNVioq32jygs44IGRXERWg4GtV2IeQZ7Mj+E3y2H53M
-DWHJlLW9MlsNzrImjypntmkuKr3Uz+ipg/oXD1tv/XJkBkJUsWSQHzGw4DfxRfq7
-eJ9LlIMzrQn8ZJAJTSsckmGuakSyD9amSbtn1kl+fEZge9SvAoZVZelwB1qfGgyS
-qQVAN9x1SP0DCeX33syxT2rxZVOUZgRT8yt01jVcIU3dD66McYRjiUY6uG1aZ3Xb
-p8TD3xKMqPPc7dIN3xcN58S+sIejydmm636LE1ykA0dYPczqxDfIfhbqE/42B5LZ
-grjZdXN1pd97IeEFQLd+DfP8iq80D6k6ojmXxANXCz1ilJXyr2defWUwSSiwsD5v
-HacFeOQ6+KQyYrkdhbpa5XlO6luDIZmxN3B6rx7kqg6UZW9EzYkCggEBAPDNOZ6X
-TIKBIdV5zkr2rvjor/WvPnoWUOBCmxh8zaAZhReE3RitNjtEVz/ns/x8vyyMRdPA
-JDszBrawYlMjoEW9NQe6BYKfwKRl+QzsWEIsdBfYB70vmguwS/VdMbVaU/jWFbS+
-IFB9F88iEJiI8IeH+IomGXinCDxAkXqJztFZRzonaX5+QHC4s8geRyYn9gs6SxHy
-MqOOzifnebZg4dXLCL7jMDGsEa/Fu188FFae407BsOEt4bday37n91xysdilkPg3
-b3mIB3EFrsbnqXypayM/QUfQ/d48Xfa/l+74i1Qpd1MIeHYNndLDxtRes9Oc7Rnv
-oCdI9Lkc+KuR8AcCggEBANSUKb2jz0VfSZSZsgL5gj34Kcfoe5peQvP+pUuJmZhy
-8QkGUUNtq2l86PMJSfJknbUhVLPe0wzT8NG08HTMkVHlw7lve//avugfpnrR7hsZ
-BTWDjW44x+Y8Q8dwTUl3nYtEYn81ycUzmFBmYDEVXjlvyMlXe0HLEz90v2wwtZlp
-IxEXgEgMnLj36JH5iKh7YuLf9c8laok7Jed6u+h5nlXUcbfaSVN6U3K+6UdQKUrr
-TaSQLw2pEsZ6CEt0yGJDkoID7mfTfc1/olNWWGUz0RE9G5eqQYjgEoAiTBZZeSlm
-3Kaun8gydN7wwJ6AjPCPFOwtgV7dUoN4YbWgfsAgnTsCggEBALHOWCWKgqw6vcjr
-0C/6Ruj0qDk51WBA6icuB2flf9AgB+595OQ7wjexFtvRM03UrzUtvsHEtvwfiW2M
-gI3zWH0mYOn7qeXyIEVEJsosGl+Cg5a3pb9ETvMiknPzBKlssWSkcBKt8R59v/7q
-oGaBd1ocRKF90IEOlT4oT0O0Tkq3Kaj/QR5uCxcwy0+RS+gYyc0wlg4CUPIEmKVO
-fsj0cM10xlhtWUDUVZr83oZLzpjHagDVdM5RGsJRAMIMdtKEvl3Co3ElPeL3VsdV
-8uBcXwH1925nXsSwxUQ8PwXcI0wJqpfSppFhR9Gj7E2c0kwuQYqX7VuhXRik/k9R
-3SyS7jECggEBAL7q7m4GL8IjKSdPvgNT6TgUqBmFX3UtkT4nhnbH9u1m1bmANf20
-Ak20RFb6EbKj0Mv7SmJdDfkoY9FDiu2rSBxgmZ7yVFBeOjSpMFCAODOYDgiYxK2o
-S0go+cqlvpPr3M9WNIwBV9xHUVVsDJookb5N+etyKR3W78t+4+ib+oz0Uu0nySts
-QFkTNYncrXJ7lj0iXVaUSRFE0O8LWLYafCyjpxoy7sYNR+L3OPW2Nc+2cr4ITGod
-XeJpeQejs9Ak1fD07OnMlOC576SfGLaTigHMevqEi2UNsS/pHaK46stXOXZtwM0B
-G9uaJ7RyyaHHL0hKOjVj2pZ+yGph4VRWNj8CggEAQlp/QytXhKZtM9OqRy/th+XO
-ctoVEl8codUydwwxMCsKqGYiCXazeyDZQimOjaxSNFXo8hWuf694WGsQJ6TyXCEs
-0JAJbCooI+DI9Z4LbqHtLDg1/S6a1558Nyrc6j6amevvbB5xKS2mKhGl5JgzBsJO
-H3yE0DD1DHaSM3V1rTfdyGoaxNESw45bnpxkAooMrw62OIO/9f502FLUx+sq+koT
-aajw4qQ6rBll3/+PKCORKzncHDMkIbeD6c6sX+ONUz7vxg3pV4eZG7NClWvA24Td
-1sANz3m6EmqG41lBzeUGConWxWRwkEXJgbxmPwMariRKR8aNVOlDVVbDp9Hhxg==
------END RSA PRIVATE KEY-----
diff --git a/build/apex/com.android.runtime.release.pk8 b/build/apex/com.android.runtime.release.pk8
deleted file mode 100644
index c63efb8..0000000
--- a/build/apex/com.android.runtime.release.pk8
+++ /dev/null
Binary files differ
diff --git a/build/apex/com.android.runtime.release.x509.pem b/build/apex/com.android.runtime.release.x509.pem
deleted file mode 100644
index 4a7607a..0000000
--- a/build/apex/com.android.runtime.release.x509.pem
+++ /dev/null
@@ -1,34 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIF0DCCA7igAwIBAgIJAMtsu/wrkZurMA0GCSqGSIb3DQEBCwUAMHwxCzAJBgNV
-BAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1Nb3VudGFpbiBW
-aWV3MRAwDgYDVQQKDAdBbmRyb2lkMRAwDgYDVQQLDAdBbmRyb2lkMRwwGgYDVQQD
-DBNjb20uYW5kcm9pZC5ydW50aW1lMCAXDTE5MDEyNTE3MTU0MFoYDzQ3NTYxMjIx
-MTcxNTQwWjB8MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQG
-A1UEBwwNTW91bnRhaW4gVmlldzEQMA4GA1UECgwHQW5kcm9pZDEQMA4GA1UECwwH
-QW5kcm9pZDEcMBoGA1UEAwwTY29tLmFuZHJvaWQucnVudGltZTCCAiIwDQYJKoZI
-hvcNAQEBBQADggIPADCCAgoCggIBAL+aGSc+HU69vV1VbZb6WjXMDrE2Jo+JjXLU
-yVS3o8qlQeqN0RFbsbwnihnwg2xBnM6JiskAcuocz87dDuEt1zUEInC3Hpt/C2eg
-GUZepbq8po+v+b04YlX3aTaYTFqMjU0aQkjOqhnmVxg+KHpvudlvKB3VhH3D61al
-RReQzgM/Q6aUxcr4Z8XwvzV3i0K5NjiSuSt14K2yIaheh2OTbbwtvm3d+0sQDco6
-1gl0l4rM4e+GjxgWVqx8mfKhd4HTS3YIBDWXR6DFPBARzVBIpZu2QK4U6Jdsy2wI
-xg8+d5KWAoNQb7IQK6LQy7Fbw3PNJDo4Ph39G2wNgeMemz8uSQ9FZujc0AgxBom6
-J+ad7zlJBhYFC4UIKBYrRfJCHTN3GLuLvhu0p0jNMfdQXF6Tv/iG9g8JdZ0QjeWm
-/K+h1p6LUAIUV0UP7j8nIdp0j6NqMywkoeRDYlVQV/XdI7BiQe9Z8yNbF5Y3CxWT
-hMfN9iby11ImPilzpgv39ORVjDQdxxcwhJg2Xuu1752cBxcHu3ZcR8AiB7PCksXu
-EpUrjjOH8eVxoG1JJ/na5elUg/H35Or+JYYd8I8Ad1/GRkPrnIBAGzuyntOsNs4t
-2CEnhmV6EkEH8KP8miTdaa5NdPIwFRIHVBHcrqsqdmrINvoJhaVRH7YwmFjv48ak
-N4OyW3oLAgMBAAGjUzBRMB0GA1UdDgQWBBRqVJ0tsEOyqhKiZOrOfRD1+jQFMDAf
-BgNVHSMEGDAWgBRqVJ0tsEOyqhKiZOrOfRD1+jQFMDAPBgNVHRMBAf8EBTADAQH/
-MA0GCSqGSIb3DQEBCwUAA4ICAQAs+I1tRWRPmhA+FqcRdlAcY2Vy7NO12hjWXCT9
-hqenGk1/VnhH8aZT5lXZNaWeKonT5W7XydyrjiF09ndZBGew0rEZh6sMXH+G/drT
-9JxvfHsCQGrmX32V1XgAoRjV1VpUYIb2747fFWHHbl5frowNj955pkfseeKilSnF
-orUl5uGNxr6iNaVEUDfXBWkHwipYVyejAqdHkCQDhLtDBWsiskKpLWmmNKuy2QXQ
-uoyUyfeSR1Y+pT83qgmGb1LFLiOqL9ZKPrsIP+tG4lYB8L4SrYJf4MgfoJaKQ8ed
-2jsd42MegvOB2vdMyLgkf7EM/9DpE4BLpAy2mNd1AccL9YQ+0ezruxh6sYklJWGe
-2bHEbZk0duoNPsA87ZNKfFVV2cNVwSg/22HHjGieMUyPIwyGIzsHId8XiwXpZhLX
-VyacOVRd0SjTWK5Pxj6g21NrrcMXvFeCbveucf2ljKVxBVSbQKt67YlXxd9nLZjN
-zHnJWzDwlWXbyvxheLVVGEo0cqRbhxYMxXd9dM01EXJmIWqS8t0+aw90KKPFITNv
-qpxXnF5JJm1CzeBDtpmfepDupUR1bWansOd0sUuiDF/H1UcDiuBUC643RET1vjhv
-MllsShSeC6KGm3WwE0bhcvA9IdZC8CA3Btzw2J9aJO1gbVZ6vRkH+21cfR07so4N
-yXgprQ==
------END CERTIFICATE-----
diff --git a/build/apex/ld.config.txt b/build/apex/ld.config.txt
index ad1f9e4..0d80f3b 100644
--- a/build/apex/ld.config.txt
+++ b/build/apex/ld.config.txt
@@ -1,45 +1,97 @@
 # Copyright (C) 2018 The Android Open Source Project
 #
-# Bionic loader config file for the Runtime APEX.
+# Bionic loader config file for the ART APEX.
 #
 # There are no versioned APEX paths here - this APEX module does not support
 # having several versions mounted.
 
-dir.runtime = /apex/com.android.runtime/bin/
+dir.art = /apex/com.android.art/bin/
 
-[runtime]
-additional.namespaces = platform,conscrypt,runtime
+[art]
+additional.namespaces = system,conscrypt,art,neuralnetworks,adbd
 
-# Keep in sync with the runtime namespace in /system/etc/ld.config.txt.
+# The default namespace here only links to other namespaces, in particular "art"
+# where the real library loading takes place. Any outgoing links from "art" also
+# need to be present here.
 namespace.default.isolated = true
-# Visible because some libraries are dlopen'ed, e.g. libopenjdk is dlopen'ed by
-# libart.
-namespace.default.visible = true
-namespace.default.search.paths = /apex/com.android.runtime/${LIB}
-namespace.default.asan.search.paths = /apex/com.android.runtime/${LIB}
-# odex files are in /system/framework. dalvikvm has to be able to dlopen the
-# files for CTS.
-namespace.default.permitted.paths = /system/framework
-namespace.default.links = platform
-# TODO(b/119867084): Restrict fallback to platform namespace to PALette library.
-namespace.default.link.platform.allow_all_shared_libs = true
+namespace.default.links = art,system,adbd
+namespace.default.link.art.allow_all_shared_libs = true
+namespace.default.link.system.allow_all_shared_libs = true
+namespace.default.link.adbd.shared_libs = libadbconnection_client.so
 
-# Keep in sync with the default namespace in /system/etc/ld.config.txt.
-namespace.platform.isolated = true
-namespace.platform.search.paths = /system/${LIB}
-namespace.platform.asan.search.paths = /data/asan/system/${LIB}
-namespace.platform.links = default
-namespace.platform.link.default.shared_libs  = libdexfile_external.so
-# libicuuc.so and libicui18n.so are kept for app compat reason. http://b/130788466
-namespace.platform.link.default.shared_libs += libicui18n.so
-namespace.platform.link.default.shared_libs += libicuuc.so
-namespace.platform.link.default.shared_libs += libnativebridge.so
-namespace.platform.link.default.shared_libs += libnativehelper.so
-namespace.platform.link.default.shared_libs += libnativeloader.so
-namespace.platform.link.default.shared_libs += libandroidicu.so
+###############################################################################
+# "art" APEX namespace
+#
+# This is the local namespace of this APEX, with the proper name "art" to make
+# links created e.g. through android_link_namespace work consistently with the
+# system linker config.
+###############################################################################
+namespace.art.isolated = true
+# Visible to allow links to be created at runtime, e.g. through
+# android_link_namespaces in libnativeloader.
+namespace.art.visible = true
+
+# Keep in sync with the "art" namespace in system/core/rootdir/etc/ld.config*.txt.
+namespace.art.search.paths = /apex/com.android.art/${LIB}
+namespace.art.asan.search.paths = /apex/com.android.art/${LIB}
+# JVMTI libraries used in ART testing are located under /data; dalvikvm
+# has to be able to dlopen them.
+# TODO(b/129534335): Move this to the linker configuration of the Test
+# ART APEX when it is available.
+namespace.art.permitted.paths = /data
+namespace.art.asan.permitted.paths = /data
+# odex files are in /system/framework and /apex/com.android.art/javalib.
+# dalvikvm has to be able to dlopen the files for CTS.
+namespace.art.permitted.paths += /system/framework
+namespace.art.permitted.paths += /apex/com.android.art/javalib
+namespace.art.asan.permitted.paths += /system/framework
+namespace.art.asan.permitted.paths += /apex/com.android.art/javalib
+# TODO(b/144533348): to allow symlinks pointing the libs under /system/lib
+# Note that this however does not open all libs in the system partition to
+# the APEX namespaces, because searching of the libs are NOT done in
+# /system/lib, but in /apex/<module>/lib directory.
+namespace.art.permitted.paths += /system/${LIB}
+namespace.art.asan.permitted.paths += /system/${LIB}
+namespace.art.links = system,neuralnetworks,adbd
+# Need allow_all_shared_libs because libart.so can dlopen oat files in
+# /system/framework and /data.
+# TODO(b/130340935): Use a dynamically created linker namespace similar to
+# classloader-namespace for oat files, and tighten this up.
+namespace.art.link.system.allow_all_shared_libs = true
+namespace.art.link.neuralnetworks.shared_libs = libneuralnetworks.so
+namespace.art.link.adbd.shared_libs = libadbconnection_client.so
+
+###############################################################################
+# "system" namespace
+#
+# Corresponds to the default namespace in /system/etc/ld.config.txt. Please keep
+# in sync with linker config files in system/core/rootdir/etc.
+###############################################################################
+namespace.system.isolated = true
+# Visible to allow links to be created at runtime, e.g. through
+# android_link_namespaces in libnativeloader.
+namespace.system.visible = true
+
+namespace.system.search.paths = /system/${LIB}
+namespace.system.asan.search.paths = /data/asan/system/${LIB}
+
+namespace.system.links = art,adbd
+namespace.system.link.art.shared_libs  = libandroidicu.so
+namespace.system.link.art.shared_libs += libdexfile_external.so
+namespace.system.link.art.shared_libs += libdexfiled_external.so
+# TODO(b/120786417 or b/134659294): libicuuc.so and libicui18n.so are kept for app compat.
+namespace.system.link.art.shared_libs += libicui18n.so
+namespace.system.link.art.shared_libs += libicuuc.so
+namespace.system.link.art.shared_libs += libnativebridge.so
+namespace.system.link.art.shared_libs += libnativehelper.so
+namespace.system.link.art.shared_libs += libnativeloader.so
 
 # TODO(b/122876336): Remove libpac.so once it's migrated to Webview
-namespace.platform.link.default.shared_libs += libpac.so
+namespace.system.link.art.shared_libs += libpac.so
+
+namespace.system.link.adbd.shared_libs = libadb_pairing_auth.so
+namespace.system.link.adbd.shared_libs += libadb_pairing_connection.so
+namespace.system.link.adbd.shared_libs += libadb_pairing_server.so
 
 # /system/lib/libc.so, etc are symlinks to
 # /apex/com.android.runtime/lib/bionic/libc.so, etc. Add the path to the
@@ -50,10 +102,10 @@
 # then the latter is never tried because libc.so is always found in
 # /system/lib but fails to pass the accessibility test because of its realpath.
 # It's better to not depend on the ordering if possible.
-namespace.platform.permitted.paths = /apex/com.android.runtime/${LIB}/bionic
-namespace.platform.asan.permitted.paths = /apex/com.android.runtime/${LIB}/bionic
+namespace.system.permitted.paths = /apex/com.android.runtime/${LIB}/bionic
+namespace.system.asan.permitted.paths = /apex/com.android.runtime/${LIB}/bionic
 
-# Note that we don't need to link the default namespace with conscrypt:
+# Note that we don't need to link the art namespace with conscrypt:
 # the runtime Java code and binaries do not explicitly load native libraries
 # from it.
 
@@ -63,28 +115,65 @@
 # This namespace is for libraries within the conscrypt APEX.
 ###############################################################################
 
-# Keep in sync with conscrypt namespace in /system/etc/ld.config.txt.
+# Keep in sync with the "conscrypt" namespace in system/core/rootdir/etc/ld.config*.txt.
 namespace.conscrypt.isolated = true
 namespace.conscrypt.visible = true
 
 namespace.conscrypt.search.paths = /apex/com.android.conscrypt/${LIB}
 namespace.conscrypt.asan.search.paths = /apex/com.android.conscrypt/${LIB}
-namespace.conscrypt.links = runtime,platform
-namespace.conscrypt.link.runtime.shared_libs   = libandroidio.so
-namespace.conscrypt.link.platform.shared_libs  = libc.so
-namespace.conscrypt.link.platform.shared_libs += libm.so
-namespace.conscrypt.link.platform.shared_libs += libdl.so
-namespace.conscrypt.link.platform.shared_libs += liblog.so
+# TODO(b/144533348): to allow symlinks pointing the libs under /system/lib
+# Note that this however does not open all libs in the system partition to
+# the APEX namespaces, because searching of the libs are NOT done in
+# /system/lib, but in /apex/<module>/lib directory.
+namespace.conscrypt.permitted.paths = /system/${LIB}
+namespace.conscrypt.asan.permitted.paths = /system/${LIB}
+namespace.conscrypt.links = art,system
+namespace.conscrypt.link.art.shared_libs = libandroidio.so
+namespace.conscrypt.link.system.shared_libs  = libc.so
+namespace.conscrypt.link.system.shared_libs += libm.so
+namespace.conscrypt.link.system.shared_libs += libdl.so
+namespace.conscrypt.link.system.shared_libs += liblog.so
 
 ###############################################################################
-# "runtime" APEX namespace
+# "neuralnetworks" APEX namespace
 #
-# This namespace is an alias for the default namespace.
+# This namespace is for libraries within the NNAPI APEX.
 ###############################################################################
-namespace.runtime.isolated = true
-namespace.runtime.visible = true
-namespace.runtime.links = default
-namespace.runtime.link.default.allow_all_shared_libs = true
-namespace.runtime.links += platform
-# TODO(b/119867084): Restrict fallback to platform namespace to PALette library.
-namespace.runtime.link.platform.allow_all_shared_libs = true
+namespace.neuralnetworks.isolated = true
+namespace.neuralnetworks.visible = true
+
+namespace.neuralnetworks.search.paths = /apex/com.android.neuralnetworks/${LIB}
+namespace.neuralnetworks.asan.search.paths = /apex/com.android.neuralnetworks/${LIB}
+# TODO(b/144533348): to allow symlinks pointing the libs under /system/lib
+# Note that this however does not open all libs in the system partition to
+# the APEX namespaces, because searching of the libs are NOT done in
+# /system/lib, but in /apex/<module>/lib directory.
+namespace.neuralnetworks.permitted.paths = /system/${LIB}
+namespace.neuralnetworks.asan.permitted.paths = /system/${LIB}
+namespace.neuralnetworks.links = system
+namespace.neuralnetworks.link.system.shared_libs  = libc.so
+namespace.neuralnetworks.link.system.shared_libs += libcgrouprc.so
+namespace.neuralnetworks.link.system.shared_libs += libdl.so
+namespace.neuralnetworks.link.system.shared_libs += liblog.so
+namespace.neuralnetworks.link.system.shared_libs += libm.so
+namespace.neuralnetworks.link.system.shared_libs += libnativewindow.so
+namespace.neuralnetworks.link.system.shared_libs += libneuralnetworks_packageinfo.so
+namespace.neuralnetworks.link.system.shared_libs += libsync.so
+namespace.neuralnetworks.link.system.shared_libs += libvndksupport.so
+
+###############################################################################
+# "adbd" APEX namespace
+#
+# This namespace is for libraries within the adbd APEX.
+###############################################################################
+
+namespace.adbd.isolated = true
+namespace.adbd.visible = true
+
+namespace.adbd.search.paths = /apex/com.android.adbd/${LIB}
+namespace.adbd.asan.search.paths = /apex/com.android.adbd/${LIB}
+namespace.adbd.links = system
+namespace.adbd.link.system.shared_libs  = libc.so
+namespace.adbd.link.system.shared_libs += libm.so
+namespace.adbd.link.system.shared_libs += libdl.so
+namespace.adbd.link.system.shared_libs += liblog.so
diff --git a/build/apex/manifest-art.json b/build/apex/manifest-art.json
new file mode 100644
index 0000000..59cbfac
--- /dev/null
+++ b/build/apex/manifest-art.json
@@ -0,0 +1,4 @@
+{
+  "name": "com.android.art",
+  "version": 1
+}
diff --git a/build/apex/manifest.json b/build/apex/manifest.json
deleted file mode 100644
index 587c199..0000000
--- a/build/apex/manifest.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
-  "name": "com.android.runtime",
-  "version": 1,
-  "preInstallHook": "bin/art_preinstall_hook",
-  "postInstallHook": "bin/art_postinstall_hook"
-}
diff --git a/build/apex/runtests.sh b/build/apex/runtests.sh
index 2d9d21b..72bf74b 100755
--- a/build/apex/runtests.sh
+++ b/build/apex/runtests.sh
@@ -15,7 +15,7 @@
 # limitations under the License.
 #
 
-# Run Android Runtime APEX tests.
+# Run ART APEX tests.
 
 SCRIPT_DIR=$(dirname $0)
 
@@ -33,15 +33,24 @@
   exit 1
 }
 
-[[ -n "$ANDROID_PRODUCT_OUT" ]] \
-  || die "You need to source and lunch before you can use this script."
+function setup_die {
+  die "You need to source and lunch before you can use this script."
+}
 
-[[ -n "$ANDROID_HOST_OUT" ]] \
-  || die "You need to source and lunch before you can use this script."
+[[ -n "$ANDROID_BUILD_TOP" ]] || setup_die
+[[ -n "$ANDROID_PRODUCT_OUT" ]] || setup_die
+[[ -n "$ANDROID_HOST_OUT" ]] || setup_die
 
-if [ ! -e "$ANDROID_HOST_OUT/bin/debugfs" ] ; then
-  say "Could not find debugfs, building now."
-  make debugfs-host || die "Cannot build debugfs"
+flattened_apex_p=$($ANDROID_BUILD_TOP/build/soong/soong_ui.bash --dumpvar-mode TARGET_FLATTEN_APEX)\
+  || setup_die
+
+have_debugfs_p=false
+if $flattened_apex_p; then :; else
+  if [ ! -e "$ANDROID_HOST_OUT/bin/debugfs" ] ; then
+    say "Could not find debugfs, building now."
+    build/soong/soong_ui.bash --make-mode debugfs-host || die "Cannot build debugfs"
+  fi
+  have_debugfs_p=true
 fi
 
 # Fail early.
@@ -50,15 +59,17 @@
 build_apex_p=true
 list_image_files_p=false
 print_image_tree_p=false
+print_file_sizes_p=false
 
 function usage {
   cat <<EOF
 Usage: $0 [OPTION]
-Build (optional) and run tests on Android Runtime APEX package (on host).
+Build (optional) and run tests on ART APEX package (on host).
 
-  -s, --skip-build    skip the build step
+  -B, --skip-build    skip the build step
   -l, --list-files    list the contents of the ext4 image (\`find\`-like style)
   -t, --print-tree    list the contents of the ext4 image (\`tree\`-like style)
+  -s, --print-sizes   print the size in bytes of each file when listing contents
   -h, --help          display this help and exit
 
 EOF
@@ -67,9 +78,10 @@
 
 while [[ $# -gt 0 ]]; do
   case "$1" in
-    (-s|--skip-build) build_apex_p=false;;
-    (-l|--list-files) list_image_files_p=true;;
-    (-t|--print-tree) print_image_tree_p=true;;
+    (-B|--skip-build)  build_apex_p=false;;
+    (-l|--list-files)  list_image_files_p=true;;
+    (-t|--print-tree)  print_image_tree_p=true;;
+    (-s|--print-sizes) print_file_sizes_p=true;;
     (-h|--help) usage;;
     (*) die "Unknown option: '$1'
 Try '$0 --help' for more information.";;
@@ -82,22 +94,27 @@
 # Build APEX packages APEX_MODULES.
 function build_apex {
   if $build_apex_p; then
-    say "Building $@" && make "$@" || die "Cannot build $@"
+    say "Building $@" && build/soong/soong_ui.bash --make-mode "$@" || die "Cannot build $@"
   fi
 }
 
 # maybe_list_apex_contents_apex APEX TMPDIR [other]
 function maybe_list_apex_contents_apex {
+  local print_options=()
+  if $print_file_sizes_p; then
+    print_options+=(--size)
+  fi
+
   # List the contents of the apex in list form.
   if $list_image_files_p; then
     say "Listing image files"
-    $SCRIPT_DIR/art_apex_test.py --list $@
+    $SCRIPT_DIR/art_apex_test.py --list ${print_options[@]} $@
   fi
 
   # List the contents of the apex in tree form.
   if $print_image_tree_p; then
     say "Printing image tree"
-    $SCRIPT_DIR/art_apex_test.py --tree $@
+    $SCRIPT_DIR/art_apex_test.py --tree ${print_options[@]} $@
   fi
 }
 
@@ -107,13 +124,18 @@
   exit_status=1
 }
 
-# Test all modules
+# Test all modules, if possible.
 
 apex_modules=(
-  "com.android.runtime.release"
-  "com.android.runtime.debug"
-  "com.android.runtime.host"
+  "com.android.art.release"
+  "com.android.art.debug"
+  "com.android.art.testing"
 )
+if [[ "$HOST_PREFER_32_BIT" = true ]]; then
+  say "Skipping com.android.art.host, as \`HOST_PREFER_32_BIT\` equals \`true\`"
+else
+  apex_modules+=("com.android.art.host")
+fi
 
 # Build the APEX packages (optional).
 build_apex ${apex_modules[@]}
@@ -141,12 +163,24 @@
   if [[ $apex_module = *.host ]]; then
     apex_path="$ANDROID_HOST_OUT/apex/${apex_module}.zipapex"
     art_apex_test_args="$art_apex_test_args --host"
-    test_only_args="--debug"
+    test_only_args="--flavor debug"
   else
-    apex_path="$ANDROID_PRODUCT_OUT/system/apex/${apex_module}.apex"
-    art_apex_test_args="$art_apex_test_args --debugfs $ANDROID_HOST_OUT/bin/debugfs"
-    [[ $apex_module = *.debug ]] && test_only_args="--debug"
+    if $flattened_apex_p; then
+      apex_path="$ANDROID_PRODUCT_OUT/system/apex/${apex_module}"
+      art_apex_test_args="$art_apex_test_args --flattened"
+    else
+      apex_path="$ANDROID_PRODUCT_OUT/system/apex/${apex_module}.apex"
+    fi
+    if $have_debugfs_p; then
+      art_apex_test_args="$art_apex_test_args --debugfs $ANDROID_HOST_OUT/bin/debugfs"
+    fi
+    case $apex_module in
+      (*.release) test_only_args="--flavor release";;
+      (*.debug)   test_only_args="--flavor debug";;
+      (*.testing) test_only_args="--flavor testing";;
+    esac
   fi
+  say "APEX package path: $apex_path"
 
   # List the contents of the APEX image (optional).
   maybe_list_apex_contents_apex $art_apex_test_args $apex_path
@@ -163,6 +197,6 @@
   echo
 done
 
-[[ "$exit_status" = 0 ]] && say "All Android Runtime APEX tests passed"
+[[ "$exit_status" = 0 ]] && say "All ART APEX tests passed"
 
 exit $exit_status
diff --git a/build/art.go b/build/art.go
index 4db8da2..5a09be0 100644
--- a/build/art.go
+++ b/build/art.go
@@ -15,30 +15,31 @@
 package art
 
 import (
-	"android/soong/android"
-	"android/soong/apex"
-	"android/soong/cc"
 	"fmt"
 	"log"
 	"sync"
 
 	"github.com/google/blueprint/proptools"
+
+	"android/soong/android"
+	"android/soong/apex"
+	"android/soong/cc"
 )
 
-var supportedArches = []string{"arm", "arm64", "mips", "mips64", "x86", "x86_64"}
+var supportedArches = []string{"arm", "arm64", "x86", "x86_64"}
 
-func globalFlags(ctx android.BaseContext) ([]string, []string) {
+func globalFlags(ctx android.LoadHookContext) ([]string, []string) {
 	var cflags []string
 	var asflags []string
 
-	opt := envDefault(ctx, "ART_NDEBUG_OPT_FLAG", "-O3")
+	opt := ctx.Config().GetenvWithDefault("ART_NDEBUG_OPT_FLAG", "-O3")
 	cflags = append(cflags, opt)
 
 	tlab := false
 
-	gcType := envDefault(ctx, "ART_DEFAULT_GC_TYPE", "CMS")
+	gcType := ctx.Config().GetenvWithDefault("ART_DEFAULT_GC_TYPE", "CMS")
 
-	if envTrue(ctx, "ART_TEST_DEBUG_GC") {
+	if ctx.Config().IsEnvTrue("ART_TEST_DEBUG_GC") {
 		gcType = "SS"
 		tlab = true
 	}
@@ -48,21 +49,21 @@
 		cflags = append(cflags, "-DART_USE_TLAB=1")
 	}
 
-	imtSize := envDefault(ctx, "ART_IMT_SIZE", "43")
+	imtSize := ctx.Config().GetenvWithDefault("ART_IMT_SIZE", "43")
 	cflags = append(cflags, "-DIMT_SIZE="+imtSize)
 
-	if envTrue(ctx, "ART_HEAP_POISONING") {
+	if ctx.Config().IsEnvTrue("ART_HEAP_POISONING") {
 		cflags = append(cflags, "-DART_HEAP_POISONING=1")
 		asflags = append(asflags, "-DART_HEAP_POISONING=1")
 	}
-	if envTrue(ctx, "ART_USE_CXX_INTERPRETER") {
+	if ctx.Config().IsEnvTrue("ART_USE_CXX_INTERPRETER") {
 		cflags = append(cflags, "-DART_USE_CXX_INTERPRETER=1")
 	}
 
-	if !envFalse(ctx, "ART_USE_READ_BARRIER") && ctx.AConfig().ArtUseReadBarrier() {
+	if !ctx.Config().IsEnvFalse("ART_USE_READ_BARRIER") && ctx.Config().ArtUseReadBarrier() {
 		// Used to change the read barrier type. Valid values are BAKER, BROOKS,
 		// TABLELOOKUP. The default is BAKER.
-		barrierType := envDefault(ctx, "ART_READ_BARRIER_TYPE", "BAKER")
+		barrierType := ctx.Config().GetenvWithDefault("ART_READ_BARRIER_TYPE", "BAKER")
 		cflags = append(cflags,
 			"-DART_USE_READ_BARRIER=1",
 			"-DART_READ_BARRIER_TYPE_IS_"+barrierType+"=1")
@@ -71,11 +72,11 @@
 			"-DART_READ_BARRIER_TYPE_IS_"+barrierType+"=1")
 	}
 
-	if !envFalse(ctx, "ART_USE_GENERATIONAL_CC") {
+	if !ctx.Config().IsEnvFalse("ART_USE_GENERATIONAL_CC") {
 		cflags = append(cflags, "-DART_USE_GENERATIONAL_CC=1")
 	}
 
-	cdexLevel := envDefault(ctx, "ART_DEFAULT_COMPACT_DEX_LEVEL", "fast")
+	cdexLevel := ctx.Config().GetenvWithDefault("ART_DEFAULT_COMPACT_DEX_LEVEL", "fast")
 	cflags = append(cflags, "-DART_DEFAULT_COMPACT_DEX_LEVEL="+cdexLevel)
 
 	// We need larger stack overflow guards for ASAN, as the compiled code will have
@@ -83,55 +84,46 @@
 	// Note: We increase this for both debug and non-debug, as the overflow gap will
 	//       be compiled into managed code. We always preopt (and build core images) with
 	//       the debug version. So make the gap consistent (and adjust for the worst).
-	if len(ctx.AConfig().SanitizeDevice()) > 0 || len(ctx.AConfig().SanitizeHost()) > 0 {
+	if len(ctx.Config().SanitizeDevice()) > 0 || len(ctx.Config().SanitizeHost()) > 0 {
 		cflags = append(cflags,
 			"-DART_STACK_OVERFLOW_GAP_arm=8192",
-			"-DART_STACK_OVERFLOW_GAP_arm64=8192",
-			"-DART_STACK_OVERFLOW_GAP_mips=16384",
-			"-DART_STACK_OVERFLOW_GAP_mips64=16384",
+			"-DART_STACK_OVERFLOW_GAP_arm64=16384",
 			"-DART_STACK_OVERFLOW_GAP_x86=16384",
 			"-DART_STACK_OVERFLOW_GAP_x86_64=20480")
 	} else {
 		cflags = append(cflags,
 			"-DART_STACK_OVERFLOW_GAP_arm=8192",
 			"-DART_STACK_OVERFLOW_GAP_arm64=8192",
-			"-DART_STACK_OVERFLOW_GAP_mips=16384",
-			"-DART_STACK_OVERFLOW_GAP_mips64=16384",
 			"-DART_STACK_OVERFLOW_GAP_x86=8192",
 			"-DART_STACK_OVERFLOW_GAP_x86_64=8192")
 	}
 
-	if envTrue(ctx, "ART_ENABLE_ADDRESS_SANITIZER") {
+	if ctx.Config().IsEnvTrue("ART_ENABLE_ADDRESS_SANITIZER") {
 		// Used to enable full sanitization, i.e., user poisoning, under ASAN.
 		cflags = append(cflags, "-DART_ENABLE_ADDRESS_SANITIZER=1")
 		asflags = append(asflags, "-DART_ENABLE_ADDRESS_SANITIZER=1")
 	}
 
-	if envTrue(ctx, "ART_MIPS32_CHECK_ALIGNMENT") {
-		// Enable the use of MIPS32 CHECK_ALIGNMENT macro for debugging purposes
-		asflags = append(asflags, "-DART_MIPS32_CHECK_ALIGNMENT")
-	}
-
-	if envTrueOrDefault(ctx, "USE_D8_DESUGAR") {
+	if !ctx.Config().IsEnvFalse("USE_D8_DESUGAR") {
 		cflags = append(cflags, "-DUSE_D8_DESUGAR=1")
 	}
 
 	return cflags, asflags
 }
 
-func debugFlags(ctx android.BaseContext) []string {
+func debugFlags(ctx android.LoadHookContext) []string {
 	var cflags []string
 
-	opt := envDefault(ctx, "ART_DEBUG_OPT_FLAG", "-O2")
+	opt := ctx.Config().GetenvWithDefault("ART_DEBUG_OPT_FLAG", "-O2")
 	cflags = append(cflags, opt)
 
 	return cflags
 }
 
-func deviceFlags(ctx android.BaseContext) []string {
+func deviceFlags(ctx android.LoadHookContext) []string {
 	var cflags []string
 	deviceFrameSizeLimit := 1736
-	if len(ctx.AConfig().SanitizeDevice()) > 0 {
+	if len(ctx.Config().SanitizeDevice()) > 0 {
 		deviceFrameSizeLimit = 7400
 	}
 	cflags = append(cflags,
@@ -139,24 +131,24 @@
 		fmt.Sprintf("-DART_FRAME_SIZE_LIMIT=%d", deviceFrameSizeLimit),
 	)
 
-	cflags = append(cflags, "-DART_BASE_ADDRESS="+ctx.AConfig().LibartImgDeviceBaseAddress())
-	if envTrue(ctx, "ART_TARGET_LINUX") {
+	cflags = append(cflags, "-DART_BASE_ADDRESS="+ctx.Config().LibartImgDeviceBaseAddress())
+	if ctx.Config().IsEnvTrue("ART_TARGET_LINUX") {
 		cflags = append(cflags, "-DART_TARGET_LINUX")
 	} else {
 		cflags = append(cflags, "-DART_TARGET_ANDROID")
 	}
-	minDelta := envDefault(ctx, "LIBART_IMG_TARGET_MIN_BASE_ADDRESS_DELTA", "-0x1000000")
-	maxDelta := envDefault(ctx, "LIBART_IMG_TARGET_MAX_BASE_ADDRESS_DELTA", "0x1000000")
+	minDelta := ctx.Config().GetenvWithDefault("LIBART_IMG_TARGET_MIN_BASE_ADDRESS_DELTA", "-0x1000000")
+	maxDelta := ctx.Config().GetenvWithDefault("LIBART_IMG_TARGET_MAX_BASE_ADDRESS_DELTA", "0x1000000")
 	cflags = append(cflags, "-DART_BASE_ADDRESS_MIN_DELTA="+minDelta)
 	cflags = append(cflags, "-DART_BASE_ADDRESS_MAX_DELTA="+maxDelta)
 
 	return cflags
 }
 
-func hostFlags(ctx android.BaseContext) []string {
+func hostFlags(ctx android.LoadHookContext) []string {
 	var cflags []string
 	hostFrameSizeLimit := 1736
-	if len(ctx.AConfig().SanitizeHost()) > 0 {
+	if len(ctx.Config().SanitizeHost()) > 0 {
 		// art/test/137-cfi/cfi.cc
 		// error: stack frame size of 1944 bytes in function 'Java_Main_unwindInProcess'
 		hostFrameSizeLimit = 6400
@@ -166,13 +158,13 @@
 		fmt.Sprintf("-DART_FRAME_SIZE_LIMIT=%d", hostFrameSizeLimit),
 	)
 
-	cflags = append(cflags, "-DART_BASE_ADDRESS="+ctx.AConfig().LibartImgHostBaseAddress())
-	minDelta := envDefault(ctx, "LIBART_IMG_HOST_MIN_BASE_ADDRESS_DELTA", "-0x1000000")
-	maxDelta := envDefault(ctx, "LIBART_IMG_HOST_MAX_BASE_ADDRESS_DELTA", "0x1000000")
+	cflags = append(cflags, "-DART_BASE_ADDRESS="+ctx.Config().LibartImgHostBaseAddress())
+	minDelta := ctx.Config().GetenvWithDefault("LIBART_IMG_HOST_MIN_BASE_ADDRESS_DELTA", "-0x1000000")
+	maxDelta := ctx.Config().GetenvWithDefault("LIBART_IMG_HOST_MAX_BASE_ADDRESS_DELTA", "0x1000000")
 	cflags = append(cflags, "-DART_BASE_ADDRESS_MIN_DELTA="+minDelta)
 	cflags = append(cflags, "-DART_BASE_ADDRESS_MAX_DELTA="+maxDelta)
 
-	if len(ctx.AConfig().SanitizeHost()) > 0 && !envFalse(ctx, "ART_ENABLE_ADDRESS_SANITIZER") {
+	if len(ctx.Config().SanitizeHost()) > 0 && !ctx.Config().IsEnvFalse("ART_ENABLE_ADDRESS_SANITIZER") {
 		// We enable full sanitization on the host by default.
 		cflags = append(cflags, "-DART_ENABLE_ADDRESS_SANITIZER=1")
 	}
@@ -202,7 +194,7 @@
 	p.Target.Android.Cflags = deviceFlags(ctx)
 	p.Target.Host.Cflags = hostFlags(ctx)
 
-	if envTrue(ctx, "ART_DEX_FILE_ACCESS_TRACKING") {
+	if ctx.Config().IsEnvTrue("ART_DEX_FILE_ACCESS_TRACKING") {
 		p.Cflags = append(p.Cflags, "-DART_DEX_FILE_ACCESS_TRACKING")
 		p.Sanitize.Recover = []string{
 			"address",
@@ -223,7 +215,7 @@
 }
 
 func customLinker(ctx android.LoadHookContext) {
-	linker := envDefault(ctx, "CUSTOM_TARGET_LINKER", "")
+	linker := ctx.Config().Getenv("CUSTOM_TARGET_LINKER")
 	type props struct {
 		DynamicLinker string
 	}
@@ -246,11 +238,13 @@
 	}
 
 	p := &props{}
-	if envTrue(ctx, "HOST_PREFER_32_BIT") {
+	if ctx.Config().IsEnvTrue("HOST_PREFER_32_BIT") {
 		p.Target.Host.Compile_multilib = proptools.StringPtr("prefer32")
 	}
 
-	ctx.AppendProperties(p)
+	// Prepend to make it overridable in the blueprints. Note that it doesn't work
+	// to override the property in a cc_defaults module.
+	ctx.PrependProperties(p)
 }
 
 var testMapKey = android.NewOnceKey("artTests")
@@ -262,7 +256,7 @@
 }
 
 func testInstall(ctx android.InstallHookContext) {
-	testMap := testMap(ctx.AConfig())
+	testMap := testMap(ctx.Config())
 
 	var name string
 	if ctx.Host() {
@@ -276,13 +270,31 @@
 	defer artTestMutex.Unlock()
 
 	tests := testMap[name]
-	tests = append(tests, ctx.Path().RelPathString())
+	tests = append(tests, ctx.Path().ToMakePath().String())
 	testMap[name] = tests
 }
 
 var artTestMutex sync.Mutex
 
 func init() {
+	artModuleTypes := []string{
+		"art_cc_library",
+		"art_cc_library_static",
+		"art_cc_binary",
+		"art_cc_test",
+		"art_cc_test_library",
+		"art_cc_defaults",
+		"libart_cc_defaults",
+		"libart_static_cc_defaults",
+		"art_global_defaults",
+		"art_debug_defaults",
+		"art_apex_test_host",
+	}
+	android.AddNeverAllowRules(
+		android.NeverAllow().
+			NotIn("art", "external/vixl").
+			ModuleType(artModuleTypes...))
+
 	android.RegisterModuleType("art_cc_library", artLibrary)
 	android.RegisterModuleType("art_cc_library_static", artStaticLibrary)
 	android.RegisterModuleType("art_cc_binary", artBinary)
@@ -294,19 +306,31 @@
 	android.RegisterModuleType("art_global_defaults", artGlobalDefaultsFactory)
 	android.RegisterModuleType("art_debug_defaults", artDebugDefaultsFactory)
 
+	// ART apex is special because it must include dexpreopt files for bootclasspath jars.
+	android.RegisterModuleType("art_apex", artApexBundleFactory)
+	android.RegisterModuleType("art_apex_test", artTestApexBundleFactory)
+
 	// TODO: This makes the module disable itself for host if HOST_PREFER_32_BIT is
 	// set. We need this because the multilib types of binaries listed in the apex
 	// rule must match the declared type. This is normally not difficult but HOST_PREFER_32_BIT
 	// changes this to 'prefer32' on all host binaries. Since HOST_PREFER_32_BIT is
 	// only used for testing we can just disable the module.
 	// See b/120617876 for more information.
-	android.RegisterModuleType("art_apex_test", artTestApexBundleFactory)
+	android.RegisterModuleType("art_apex_test_host", artHostTestApexBundleFactory)
+}
+
+func artApexBundleFactory() android.Module {
+	return apex.ApexBundleFactory(false /*testApex*/, true /*artApex*/)
 }
 
 func artTestApexBundleFactory() android.Module {
-	module := apex.ApexBundleFactory( /*testApex*/ true)
+	return apex.ApexBundleFactory(true /*testApex*/, true /*artApex*/)
+}
+
+func artHostTestApexBundleFactory() android.Module {
+	module := apex.ApexBundleFactory(true /*testApex*/, true /*artApex*/)
 	android.AddLoadHook(module, func(ctx android.LoadHookContext) {
-		if envTrue(ctx, "HOST_PREFER_32_BIT") {
+		if ctx.Config().IsEnvTrue("HOST_PREFER_32_BIT") {
 			type props struct {
 				Target struct {
 					Host struct {
@@ -342,7 +366,7 @@
 func artDefaultsFactory() android.Module {
 	c := &codegenProperties{}
 	module := cc.DefaultsFactory(c)
-	android.AddLoadHook(module, func(ctx android.LoadHookContext) { codegen(ctx, c, true) })
+	android.AddLoadHook(module, func(ctx android.LoadHookContext) { codegen(ctx, c, staticAndSharedLibrary) })
 
 	return module
 }
@@ -350,7 +374,7 @@
 func libartDefaultsFactory() android.Module {
 	c := &codegenProperties{}
 	module := cc.DefaultsFactory(c)
-	android.AddLoadHook(module, func(ctx android.LoadHookContext) { codegen(ctx, c, true) })
+	android.AddLoadHook(module, func(ctx android.LoadHookContext) { codegen(ctx, c, staticAndSharedLibrary) })
 
 	return module
 }
@@ -358,33 +382,29 @@
 func libartStaticDefaultsFactory() android.Module {
 	c := &codegenProperties{}
 	module := cc.DefaultsFactory(c)
-	android.AddLoadHook(module, func(ctx android.LoadHookContext) { codegen(ctx, c, true) })
+	android.AddLoadHook(module, func(ctx android.LoadHookContext) { codegen(ctx, c, staticLibrary) })
 
 	return module
 }
 
 func artLibrary() android.Module {
-	m, _ := cc.NewLibrary(android.HostAndDeviceSupported)
-	module := m.Init()
+	module := cc.LibraryFactory()
 
-	installCodegenCustomizer(module, true)
+	installCodegenCustomizer(module, staticAndSharedLibrary)
 
 	return module
 }
 
 func artStaticLibrary() android.Module {
-	m, library := cc.NewLibrary(android.HostAndDeviceSupported)
-	library.BuildOnlyStatic()
-	module := m.Init()
+	module := cc.LibraryStaticFactory()
 
-	installCodegenCustomizer(module, true)
+	installCodegenCustomizer(module, staticLibrary)
 
 	return module
 }
 
 func artBinary() android.Module {
-	binary, _ := cc.NewBinary(android.HostAndDeviceSupported)
-	module := binary.Init()
+	module := cc.BinaryFactory()
 
 	android.AddLoadHook(module, customLinker)
 	android.AddLoadHook(module, prefer32Bit)
@@ -392,10 +412,9 @@
 }
 
 func artTest() android.Module {
-	test := cc.NewTest(android.HostAndDeviceSupported)
-	module := test.Init()
+	module := cc.TestFactory()
 
-	installCodegenCustomizer(module, false)
+	installCodegenCustomizer(module, binary)
 
 	android.AddLoadHook(module, customLinker)
 	android.AddLoadHook(module, prefer32Bit)
@@ -404,32 +423,11 @@
 }
 
 func artTestLibrary() android.Module {
-	test := cc.NewTestLibrary(android.HostAndDeviceSupported)
-	module := test.Init()
+	module := cc.TestLibraryFactory()
 
-	installCodegenCustomizer(module, false)
+	installCodegenCustomizer(module, staticAndSharedLibrary)
 
 	android.AddLoadHook(module, prefer32Bit)
 	android.AddInstallHook(module, testInstall)
 	return module
 }
-
-func envDefault(ctx android.BaseContext, key string, defaultValue string) string {
-	ret := ctx.AConfig().Getenv(key)
-	if ret == "" {
-		return defaultValue
-	}
-	return ret
-}
-
-func envTrue(ctx android.BaseContext, key string) bool {
-	return ctx.AConfig().Getenv(key) == "true"
-}
-
-func envFalse(ctx android.BaseContext, key string) bool {
-	return ctx.AConfig().Getenv(key) == "false"
-}
-
-func envTrueOrDefault(ctx android.BaseContext, key string) bool {
-	return ctx.AConfig().Getenv(key) != "false"
-}
diff --git a/build/codegen.go b/build/codegen.go
index d0db78e..bc7dc42 100644
--- a/build/codegen.go
+++ b/build/codegen.go
@@ -19,124 +19,179 @@
 // arches on the device.
 
 import (
-	"android/soong/android"
 	"sort"
 	"strings"
+
+	"android/soong/android"
 )
 
-func codegen(ctx android.LoadHookContext, c *codegenProperties, library bool) {
+type moduleType struct {
+	library bool
+	static  bool
+	shared  bool
+}
+
+var (
+	staticLibrary          = moduleType{true, true, false}
+	sharedLibrary          = moduleType{true, false, true}
+	staticAndSharedLibrary = moduleType{true, true, true}
+	binary                 = moduleType{false, false, false}
+)
+
+func codegen(ctx android.LoadHookContext, c *codegenProperties, t moduleType) {
 	var hostArches, deviceArches []string
 
-	e := envDefault(ctx, "ART_HOST_CODEGEN_ARCHS", "")
+	e := ctx.Config().Getenv("ART_HOST_CODEGEN_ARCHS")
 	if e == "" {
 		hostArches = supportedArches
 	} else {
 		hostArches = strings.Split(e, " ")
 	}
 
-	e = envDefault(ctx, "ART_TARGET_CODEGEN_ARCHS", "")
+	e = ctx.Config().Getenv("ART_TARGET_CODEGEN_ARCHS")
 	if e == "" {
 		deviceArches = defaultDeviceCodegenArches(ctx)
 	} else {
 		deviceArches = strings.Split(e, " ")
 	}
 
-	addCodegenArchProperties := func(host bool, archName string) {
-		type props struct {
-			Target struct {
-				Android *CodegenCommonArchProperties
-				Host    *CodegenCommonArchProperties
-			}
-		}
-
-		type libraryProps struct {
-			Target struct {
-				Android *CodegenLibraryArchProperties
-				Host    *CodegenLibraryArchProperties
-			}
-		}
-
+	getCodegenArchProperties := func(archName string) *codegenArchProperties {
 		var arch *codegenArchProperties
 		switch archName {
 		case "arm":
 			arch = &c.Codegen.Arm
 		case "arm64":
 			arch = &c.Codegen.Arm64
-		case "mips":
-			arch = &c.Codegen.Mips
-		case "mips64":
-			arch = &c.Codegen.Mips64
 		case "x86":
 			arch = &c.Codegen.X86
 		case "x86_64":
 			arch = &c.Codegen.X86_64
 		default:
 			ctx.ModuleErrorf("Unknown codegen architecture %q", archName)
-			return
+		}
+		return arch
+	}
+
+	appendCodegenSourceArchProperties := func(p *CodegenSourceArchProperties, archName string) {
+		arch := getCodegenArchProperties(archName)
+		p.Srcs = append(p.Srcs, arch.CodegenSourceArchProperties.Srcs...)
+	}
+
+	addCodegenSourceArchProperties := func(host bool, p *CodegenSourceArchProperties) {
+		type sourceProps struct {
+			Target struct {
+				Android *CodegenSourceArchProperties
+				Host    *CodegenSourceArchProperties
+			}
 		}
 
-		p := &props{}
-		l := &libraryProps{}
+		sp := &sourceProps{}
 		if host {
-			p.Target.Host = &arch.CodegenCommonArchProperties
-			l.Target.Host = &arch.CodegenLibraryArchProperties
+			sp.Target.Host = p
 		} else {
-			p.Target.Android = &arch.CodegenCommonArchProperties
-			l.Target.Android = &arch.CodegenLibraryArchProperties
+			sp.Target.Android = p
+		}
+		ctx.AppendProperties(sp)
+	}
+
+	addCodegenArchProperties := func(host bool, archName string) {
+		type commonProps struct {
+			Target struct {
+				Android *CodegenCommonArchProperties
+				Host    *CodegenCommonArchProperties
+			}
 		}
 
-		ctx.AppendProperties(p)
-		if library {
-			ctx.AppendProperties(l)
+		type sharedLibraryProps struct {
+			Target struct {
+				Android *CodegenLibraryArchSharedProperties
+				Host    *CodegenLibraryArchSharedProperties
+			}
+		}
+
+		type staticLibraryProps struct {
+			Target struct {
+				Android *CodegenLibraryArchStaticProperties
+				Host    *CodegenLibraryArchStaticProperties
+			}
+		}
+
+		arch := getCodegenArchProperties(archName)
+
+		cp := &commonProps{}
+		sharedLP := &sharedLibraryProps{}
+		staticLP := &staticLibraryProps{}
+		if host {
+			cp.Target.Host = &arch.CodegenCommonArchProperties
+			sharedLP.Target.Host = &arch.CodegenLibraryArchSharedProperties
+			staticLP.Target.Host = &arch.CodegenLibraryArchStaticProperties
+		} else {
+			cp.Target.Android = &arch.CodegenCommonArchProperties
+			sharedLP.Target.Android = &arch.CodegenLibraryArchSharedProperties
+			staticLP.Target.Android = &arch.CodegenLibraryArchStaticProperties
+		}
+
+		ctx.AppendProperties(cp)
+		if t.library {
+			if t.static {
+				ctx.AppendProperties(staticLP)
+			}
+			if t.shared {
+				ctx.AppendProperties(sharedLP)
+			}
 		}
 	}
 
-	for _, arch := range deviceArches {
-		addCodegenArchProperties(false, arch)
-		if ctx.Failed() {
-			return
+	addCodegenProperties := func(host bool, arches []string) {
+		sourceProps := &CodegenSourceArchProperties{}
+		for _, arch := range arches {
+			appendCodegenSourceArchProperties(sourceProps, arch)
+			addCodegenArchProperties(host, arch)
 		}
+		sourceProps.Srcs = android.FirstUniqueStrings(sourceProps.Srcs)
+		addCodegenSourceArchProperties(host, sourceProps)
 	}
 
-	for _, arch := range hostArches {
-		addCodegenArchProperties(true, arch)
-		if ctx.Failed() {
-			return
-		}
-	}
+	addCodegenProperties(false /* host */, deviceArches)
+	addCodegenProperties(true /* host */, hostArches)
+}
+
+// These properties are allowed to contain the same source file name in different architectures.
+// They we will be deduplicated automatically.
+type CodegenSourceArchProperties struct {
+	Srcs []string
 }
 
 type CodegenCommonArchProperties struct {
-	Srcs     []string
 	Cflags   []string
 	Cppflags []string
 }
 
-type CodegenLibraryArchProperties struct {
+type CodegenLibraryArchStaticProperties struct {
 	Static struct {
 		Whole_static_libs []string
 	}
+}
+type CodegenLibraryArchSharedProperties struct {
 	Shared struct {
-		Shared_libs []string
+		Shared_libs               []string
+		Export_shared_lib_headers []string
 	}
 }
 
 type codegenArchProperties struct {
+	CodegenSourceArchProperties
 	CodegenCommonArchProperties
-	CodegenLibraryArchProperties
+	CodegenLibraryArchStaticProperties
+	CodegenLibraryArchSharedProperties
 }
 
 type codegenProperties struct {
 	Codegen struct {
-		Arm, Arm64, Mips, Mips64, X86, X86_64 codegenArchProperties
+		Arm, Arm64, X86, X86_64 codegenArchProperties
 	}
 }
 
-type codegenCustomizer struct {
-	library           bool
-	codegenProperties codegenProperties
-}
-
 func defaultDeviceCodegenArches(ctx android.LoadHookContext) []string {
 	arches := make(map[string]bool)
 	for _, a := range ctx.DeviceConfig().Arches() {
@@ -144,8 +199,6 @@
 		arches[s] = true
 		if s == "arm64" {
 			arches["arm"] = true
-		} else if s == "mips64" {
-			arches["mips"] = true
 		} else if s == "x86_64" {
 			arches["x86"] = true
 		}
@@ -158,8 +211,8 @@
 	return ret
 }
 
-func installCodegenCustomizer(module android.Module, library bool) {
+func installCodegenCustomizer(module android.Module, t moduleType) {
 	c := &codegenProperties{}
-	android.AddLoadHook(module, func(ctx android.LoadHookContext) { codegen(ctx, c, library) })
+	android.AddLoadHook(module, func(ctx android.LoadHookContext) { codegen(ctx, c, t) })
 	module.AddProperties(c)
 }
diff --git a/build/sdk/Android.bp b/build/sdk/Android.bp
new file mode 100644
index 0000000..ed9a4fe
--- /dev/null
+++ b/build/sdk/Android.bp
@@ -0,0 +1,53 @@
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The SDK for the art module apex.
+sdk {
+    name: "art-module-sdk",
+    java_system_modules: [
+        "art-module-public-api-stubs-system-modules",
+        "art-module-intra-core-api-stubs-system-modules",
+        "art-module-platform-api-stubs-system-modules",
+    ],
+    native_static_libs: [
+        "libartimagevalues",
+    ],
+}
+
+// Exported host tools and libraries.
+module_exports {
+    name: "art-module-host-exports",
+    host_supported: true,
+    device_supported: false,
+    java_libs: [
+        "timezone-host",
+    ],
+}
+
+// Exported tests and supporting libraries
+module_exports {
+    name: "art-module-test-exports",
+    java_libs: [
+        "core-compat-test-rules",
+        "core-test-rules",
+        "core-tests-support",
+        "okhttp-tests-nojarjar",
+    ],
+    java_tests: [
+        "libcore-crypto-tests",
+    ],
+    native_shared_libs: [
+        "libjavacoretests",
+    ],
+}
diff --git a/cmdline/Android.bp b/cmdline/Android.bp
index b46e987..3eac0ed 100644
--- a/cmdline/Android.bp
+++ b/cmdline/Android.bp
@@ -19,6 +19,11 @@
     name: "art_cmdlineparser_headers",
     host_supported: true,
     export_include_dirs: ["."],
+
+    apex_available: [
+        "com.android.art.debug",
+        "com.android.art.release",
+    ],
 }
 
 art_cc_test {
diff --git a/cmdline/cmdline.h b/cmdline/cmdline.h
index 90be30b..5821496 100644
--- a/cmdline/cmdline.h
+++ b/cmdline/cmdline.h
@@ -221,7 +221,7 @@
         "               (specifies /system/framework/<arch>/boot.art as the image file)\n"
         "\n";
     usage += android::base::StringPrintf(  // Optional.
-        "  --instruction-set=(arm|arm64|mips|mips64|x86|x86_64): for locating the image\n"
+        "  --instruction-set=(arm|arm64|x86|x86_64): for locating the image\n"
         "      file based on the image location set.\n"
         "      Example: --instruction-set=x86\n"
         "      Default: %s\n"
@@ -269,6 +269,10 @@
     // Checks for --boot-image location.
     {
       std::string boot_image_location = boot_image_location_;
+      size_t separator_pos = boot_image_location.find(':');
+      if (separator_pos != std::string::npos) {
+        boot_image_location = boot_image_location.substr(/*pos*/ 0u, /*size*/ separator_pos);
+      }
       size_t file_name_idx = boot_image_location.rfind('/');
       if (file_name_idx == std::string::npos) {  // Prevent a InsertIsaDirectory check failure.
         *error_msg = "Boot image location must have a / in it";
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index a70b34d..37dcd16 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -256,7 +256,7 @@
 TEST_F(CmdlineParserTest, TestLogVerbosity) {
   {
     const char* log_args = "-verbose:"
-        "class,compiler,gc,heap,jdwp,jni,monitor,profiler,signals,simulator,startup,"
+        "class,compiler,gc,heap,interpreter,jdwp,jni,monitor,profiler,signals,simulator,startup,"
         "third-party-jni,threads,verifier,verifier-debug";
 
     LogVerbosity log_verbosity = LogVerbosity();
@@ -264,6 +264,7 @@
     log_verbosity.compiler = true;
     log_verbosity.gc = true;
     log_verbosity.heap = true;
+    log_verbosity.interpreter = true;
     log_verbosity.jdwp = true;
     log_verbosity.jni = true;
     log_verbosity.monitor = true;
@@ -390,11 +391,6 @@
   EXPECT_SINGLE_PARSE_VALUE(JdwpProvider::kDefaultJdwpProvider, opt_args, M::JdwpProvider);
 }  // TEST_F
 
-TEST_F(CmdlineParserTest, TestJdwpProviderInternal) {
-  const char* opt_args = "-XjdwpProvider:internal";
-  EXPECT_SINGLE_PARSE_VALUE(JdwpProvider::kInternal, opt_args, M::JdwpProvider);
-}  // TEST_F
-
 TEST_F(CmdlineParserTest, TestJdwpProviderNone) {
   const char* opt_args = "-XjdwpProvider:none";
   EXPECT_SINGLE_PARSE_VALUE(JdwpProvider::kNone, opt_args, M::JdwpProvider);
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index 6f784b3..25902f1 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -30,10 +30,10 @@
 // Includes for the types that are being specialized
 #include <string>
 #include "base/time_utils.h"
+#include "base/logging.h"
 #include "experimental_flags.h"
 #include "gc/collector_type.h"
 #include "gc/space/large_object_space.h"
-#include "jdwp/jdwp.h"
 #include "jdwp_provider.h"
 #include "jit/profile_saver_options.h"
 #include "plugin.h"
@@ -74,13 +74,10 @@
     if (option == "help") {
       return Result::Usage(
           "Example: -XjdwpProvider:none to disable JDWP\n"
-          "Example: -XjdwpProvider:internal for internal jdwp implementation\n"
           "Example: -XjdwpProvider:adbconnection for adb connection mediated jdwp implementation\n"
           "Example: -XjdwpProvider:default for the default jdwp implementation\n");
     } else if (option == "default") {
       return Result::Success(JdwpProvider::kDefaultJdwpProvider);
-    } else if (option == "internal") {
-      return Result::Success(JdwpProvider::kInternal);
     } else if (option == "adbconnection") {
       return Result::Success(JdwpProvider::kAdbConnection);
     } else if (option == "none") {
@@ -445,8 +442,6 @@
     return gc::kCollectorTypeCMS;
   } else if (option == "SS") {
     return gc::kCollectorTypeSS;
-  } else if (option == "GSS") {
-    return gc::kCollectorTypeGSS;
   } else if (option == "CC") {
     return gc::kCollectorTypeCC;
   } else {
@@ -600,6 +595,8 @@
         log_verbosity.gc = true;
       } else if (verbose_options[j] == "heap") {
         log_verbosity.heap = true;
+      } else if (verbose_options[j] == "interpreter") {
+        log_verbosity.interpreter = true;
       } else if (verbose_options[j] == "jdwp") {
         log_verbosity.jdwp = true;
       } else if (verbose_options[j] == "jit") {
@@ -630,6 +627,8 @@
         log_verbosity.image = true;
       } else if (verbose_options[j] == "systrace-locks") {
         log_verbosity.systrace_lock_logging = true;
+      } else if (verbose_options[j] == "plugin") {
+        log_verbosity.plugin = true;
       } else if (verbose_options[j] == "agents") {
         log_verbosity.agents = true;
       } else if (verbose_options[j] == "dex") {
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 52bd89f..cbfff89 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -121,28 +121,6 @@
                 "utils/arm64/managed_register_arm64.cc",
             ],
         },
-        mips: {
-            srcs: [
-                "jni/quick/mips/calling_convention_mips.cc",
-                "optimizing/code_generator_mips.cc",
-                "optimizing/code_generator_vector_mips.cc",
-                "optimizing/instruction_simplifier_mips.cc",
-                "optimizing/intrinsics_mips.cc",
-                "optimizing/pc_relative_fixups_mips.cc",
-                "utils/mips/assembler_mips.cc",
-                "utils/mips/managed_register_mips.cc",
-            ],
-        },
-        mips64: {
-            srcs: [
-                "jni/quick/mips64/calling_convention_mips64.cc",
-                "optimizing/code_generator_mips64.cc",
-                "optimizing/code_generator_vector_mips64.cc",
-                "optimizing/intrinsics_mips64.cc",
-                "utils/mips64/assembler_mips64.cc",
-                "utils/mips64/managed_register_mips64.cc",
-            ],
-        },
         x86: {
             srcs: [
                 "jni/quick/x86/calling_convention_x86.cc",
@@ -175,9 +153,9 @@
     shared_libs: [
         "libbase",
     ],
-    include_dirs: ["art/disassembler"],
     header_libs: [
-        "art_cmdlineparser_headers",  // For compiler_options.
+        "art_cmdlineparser_headers", // For compiler_options.
+        "art_disassembler_headers",
         "libnativehelper_header_only",
     ],
 
@@ -202,8 +180,6 @@
         "optimizing/optimizing_compiler_stats.h",
 
         "utils/arm/constants_arm.h",
-        "utils/mips/assembler_mips.h",
-        "utils/mips64/assembler_mips64.h",
     ],
     output_extension: "operator_out.cc",
 }
@@ -226,6 +202,10 @@
                 shared_libs: [
                     "libvixl",
                 ],
+                // Export vixl headers as they are included in this library's exported headers.
+                export_shared_lib_headers: [
+                    "libvixl",
+                ],
             },
         },
         arm64: {
@@ -239,6 +219,10 @@
                 shared_libs: [
                     "libvixl",
                 ],
+                // Export vixl headers as they are included in this library's exported headers.
+                export_shared_lib_headers: [
+                    "libvixl",
+                ],
             },
         },
     },
@@ -250,14 +234,23 @@
         "libdexfile",
     ],
     whole_static_libs: ["libelffile"],
+    runtime_libs: [
+        // `art::HGraphVisualizerDisassembler::HGraphVisualizerDisassembler` may dynamically load
+        // `libart-disassembler.so`.
+        "libart-disassembler",
+    ],
 
     target: {
         android: {
             lto: {
-                 thin: true,
+                thin: true,
             },
         },
     },
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
 }
 
 cc_defaults {
@@ -290,6 +283,10 @@
                 shared_libs: [
                     "libvixld",
                 ],
+                // Export vixl headers as they are included in this library's exported headers.
+                export_shared_lib_headers: [
+                    "libvixld",
+                ],
             },
         },
         arm64: {
@@ -303,6 +300,10 @@
                 shared_libs: [
                     "libvixld",
                 ],
+                // Export vixl headers as they are included in this library's exported headers.
+                export_shared_lib_headers: [
+                    "libvixld",
+                ],
             },
         },
     },
@@ -314,6 +315,15 @@
         "libdexfiled",
     ],
     whole_static_libs: ["libelffiled"],
+    runtime_libs: [
+        // `art::HGraphVisualizerDisassembler::HGraphVisualizerDisassembler` may dynamically load
+        // `libartd-disassembler.so`.
+        "libartd-disassembler",
+    ],
+
+    apex_available: [
+        "com.android.art.debug",
+    ],
 }
 
 cc_defaults {
@@ -382,12 +392,12 @@
         "optimizing/suspend_check_test.cc",
         "utils/atomic_dex_ref_map_test.cc",
         "utils/dedupe_set_test.cc",
-        "utils/intrusive_forward_list_test.cc",
         "utils/swap_space_test.cc",
 
         "jni/jni_cfi_test.cc",
         "optimizing/codegen_test.cc",
         "optimizing/load_store_analysis_test.cc",
+        "optimizing/load_store_elimination_test.cc",
         "optimizing/optimizing_cfi_test.cc",
         "optimizing/scheduler_test.cc",
     ],
@@ -403,15 +413,6 @@
                 "utils/arm64/managed_register_arm64_test.cc",
             ],
         },
-        mips: {
-            srcs: [
-            ],
-        },
-        mips64: {
-            srcs: [
-                "utils/mips64/managed_register_mips64_test.cc",
-            ],
-        },
         x86: {
             srcs: [
                 "utils/x86/managed_register_x86_test.cc",
@@ -450,10 +451,6 @@
         "libnativeloader",
     ],
 
-    include_dirs: [
-        "external/zlib",
-    ],
-
     target: {
         host: {
             shared_libs: [
@@ -475,19 +472,6 @@
                 "utils/assembler_thumb_test.cc",
             ],
         },
-        mips: {
-            srcs: [
-                "optimizing/emit_swap_mips_test.cc",
-                "utils/mips/assembler_mips_test.cc",
-                "utils/mips/assembler_mips32r5_test.cc",
-                "utils/mips/assembler_mips32r6_test.cc",
-            ],
-        },
-        mips64: {
-            srcs: [
-                "utils/mips64/assembler_mips64_test.cc",
-            ],
-        },
         x86: {
             srcs: [
                 "utils/x86/assembler_x86_test.cc",
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 18f00e2..aec6646 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -96,7 +96,9 @@
   uintptr_t base = RoundDown(data, kPageSize);
   uintptr_t limit = RoundUp(data + code_length, kPageSize);
   uintptr_t len = limit - base;
-  int result = mprotect(reinterpret_cast<void*>(base), len, PROT_READ | PROT_WRITE | PROT_EXEC);
+  // Remove hwasan tag.  This is done in kernel in newer versions.  This supports older kernels.
+  void* base_ptr = HWASanUntag(reinterpret_cast<void*>(base));
+  int result = mprotect(base_ptr, len, PROT_READ | PROT_WRITE | PROT_EXEC);
   CHECK_EQ(result, 0);
 
   CHECK(FlushCpuCaches(reinterpret_cast<void*>(base), reinterpret_cast<void*>(base + len)));
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 58f7e4f..03b87ef 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -62,8 +62,6 @@
   switch (instruction_set) {
     case InstructionSet::kArm:
     case InstructionSet::kArm64:
-    case InstructionSet::kMips:
-    case InstructionSet::kMips64:
     case InstructionSet::kX86:
     case InstructionSet::kX86_64:
       return 0;
@@ -81,8 +79,6 @@
   switch (instruction_set) {
     case InstructionSet::kArm:
     case InstructionSet::kArm64:
-    case InstructionSet::kMips:
-    case InstructionSet::kMips64:
     case InstructionSet::kX86:
     case InstructionSet::kX86_64:
       return code_pointer;
diff --git a/compiler/compiler.h b/compiler/compiler.h
index a496c6c..e363e70 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -29,6 +29,7 @@
 namespace jit {
 class JitCodeCache;
 class JitLogger;
+class JitMemoryRegion;
 }  // namespace jit
 namespace mirror {
 class ClassLoader;
@@ -41,7 +42,6 @@
 class CompilerOptions;
 class DexFile;
 template<class T> class Handle;
-class OatWriter;
 class Thread;
 
 class Compiler {
@@ -73,6 +73,7 @@
 
   virtual bool JitCompile(Thread* self ATTRIBUTE_UNUSED,
                           jit::JitCodeCache* code_cache ATTRIBUTE_UNUSED,
+                          jit::JitMemoryRegion* region ATTRIBUTE_UNUSED,
                           ArtMethod* method ATTRIBUTE_UNUSED,
                           bool baseline ATTRIBUTE_UNUSED,
                           bool osr ATTRIBUTE_UNUSED,
diff --git a/compiler/debug/elf_debug_frame_writer.h b/compiler/debug/elf_debug_frame_writer.h
index 31bfed6..f41db07 100644
--- a/compiler/debug/elf_debug_frame_writer.h
+++ b/compiler/debug/elf_debug_frame_writer.h
@@ -31,6 +31,10 @@
 
 static constexpr bool kWriteDebugFrameHdr = false;
 
+// Binary search table is not useful if the number of entries is small.
+// In particular, this avoids it for the in-memory JIT mini-debug-info.
+static constexpr size_t kMinDebugFrameHdrEntries = 100;
+
 static void WriteCIE(InstructionSet isa, /*inout*/ std::vector<uint8_t>* buffer) {
   using Reg = dwarf::Reg;
   // Scratch registers should be marked as undefined.  This tells the
@@ -84,30 +88,6 @@
       WriteCIE(is64bit, return_reg, opcodes, buffer);
       return;
     }
-    case InstructionSet::kMips:
-    case InstructionSet::kMips64: {
-      dwarf::DebugFrameOpCodeWriter<> opcodes;
-      opcodes.DefCFA(Reg::MipsCore(29), 0);  // R29(SP).
-      // core registers.
-      for (int reg = 1; reg < 26; reg++) {
-        if (reg < 16 || reg == 24 || reg == 25) {  // AT, V*, A*, T*.
-          opcodes.Undefined(Reg::MipsCore(reg));
-        } else {
-          opcodes.SameValue(Reg::MipsCore(reg));
-        }
-      }
-      // fp registers.
-      for (int reg = 0; reg < 32; reg++) {
-        if (reg < 24) {
-          opcodes.Undefined(Reg::Mips64Fp(reg));
-        } else {
-          opcodes.SameValue(Reg::Mips64Fp(reg));
-        }
-      }
-      auto return_reg = Reg::MipsCore(31);  // R31(RA).
-      WriteCIE(is64bit, return_reg, opcodes, buffer);
-      return;
-    }
     case InstructionSet::kX86: {
       // FIXME: Add fp registers once libunwind adds support for them. Bug: 20491296
       constexpr bool generate_opcodes_for_x86_fp = false;
@@ -230,7 +210,7 @@
     cfi_section->End();
   }
 
-  if (kWriteDebugFrameHdr) {
+  if (kWriteDebugFrameHdr && method_infos.size() > kMinDebugFrameHdrEntries) {
     std::sort(binary_search_table.begin(), binary_search_table.end());
 
     // Custom Android section. It is very similar to the official .eh_frame_hdr format.
diff --git a/compiler/debug/elf_debug_line_writer.h b/compiler/debug/elf_debug_line_writer.h
index 479725b..e7b2a1b 100644
--- a/compiler/debug/elf_debug_line_writer.h
+++ b/compiler/debug/elf_debug_line_writer.h
@@ -70,8 +70,6 @@
         dwarf_isa = 1;  // DW_ISA_ARM_thumb.
         break;
       case InstructionSet::kArm64:
-      case InstructionSet::kMips:
-      case InstructionSet::kMips64:
         code_factor_bits_ = 2;  // 32-bit instructions
         break;
       case InstructionSet::kNone:
diff --git a/compiler/debug/elf_debug_loc_writer.h b/compiler/debug/elf_debug_loc_writer.h
index a5a84bb..37ab948 100644
--- a/compiler/debug/elf_debug_loc_writer.h
+++ b/compiler/debug/elf_debug_loc_writer.h
@@ -42,10 +42,6 @@
       return Reg::X86Core(machine_reg);
     case InstructionSet::kX86_64:
       return Reg::X86_64Core(machine_reg);
-    case InstructionSet::kMips:
-      return Reg::MipsCore(machine_reg);
-    case InstructionSet::kMips64:
-      return Reg::Mips64Core(machine_reg);
     case InstructionSet::kNone:
       LOG(FATAL) << "No instruction set";
   }
@@ -63,10 +59,6 @@
       return Reg::X86Fp(machine_reg);
     case InstructionSet::kX86_64:
       return Reg::X86_64Fp(machine_reg);
-    case InstructionSet::kMips:
-      return Reg::MipsFp(machine_reg);
-    case InstructionSet::kMips64:
-      return Reg::Mips64Fp(machine_reg);
     case InstructionSet::kNone:
       LOG(FATAL) << "No instruction set";
   }
diff --git a/compiler/debug/elf_debug_writer.cc b/compiler/debug/elf_debug_writer.cc
index 10f673b..765a81d 100644
--- a/compiler/debug/elf_debug_writer.cc
+++ b/compiler/debug/elf_debug_writer.cc
@@ -34,6 +34,7 @@
 #include "elf/elf_debug_reader.h"
 #include "elf/elf_utils.h"
 #include "elf/xz_utils.h"
+#include "jit/debugger_interface.h"
 #include "oat.h"
 #include "stream/vector_output_stream.h"
 
@@ -227,17 +228,21 @@
 
 // Combine several mini-debug-info ELF files into one, while filtering some symbols.
 std::vector<uint8_t> PackElfFileForJIT(
-    InstructionSet isa,
-    const InstructionSetFeatures* features ATTRIBUTE_UNUSED,
-    std::vector<ArrayRef<const uint8_t>>& added_elf_files,
-    std::vector<const void*>& removed_symbols,
+    ArrayRef<const JITCodeEntry*> jit_entries,
+    ArrayRef<const void*> removed_symbols,
+    bool compress,
     /*out*/ size_t* num_symbols) {
   using ElfTypes = ElfRuntimeTypes;
   using Elf_Addr = typename ElfTypes::Addr;
   using Elf_Sym = typename ElfTypes::Sym;
+  const InstructionSet isa = kRuntimeISA;
   CHECK_EQ(sizeof(Elf_Addr), static_cast<size_t>(GetInstructionSetPointerSize(isa)));
+  const uint32_t kPcAlign = GetInstructionSetInstructionAlignment(isa);
+  auto is_pc_aligned = [](const void* pc) { return IsAligned<kPcAlign>(pc); };
+  DCHECK(std::all_of(removed_symbols.begin(), removed_symbols.end(), is_pc_aligned));
   auto is_removed_symbol = [&removed_symbols](Elf_Addr addr) {
-    const void* code_ptr = reinterpret_cast<const void*>(addr);
+    // Remove thumb-bit, if any (using the fact that address is instruction aligned).
+    const void* code_ptr = AlignDown(reinterpret_cast<const void*>(addr), kPcAlign);
     return std::binary_search(removed_symbols.begin(), removed_symbols.end(), code_ptr);
   };
   uint64_t min_address = std::numeric_limits<uint64_t>::max();
@@ -259,8 +264,8 @@
 
     using Reader = ElfDebugReader<ElfTypes>;
     std::deque<Reader> readers;
-    for (ArrayRef<const uint8_t> added_elf_file : added_elf_files) {
-      readers.emplace_back(added_elf_file);
+    for (const JITCodeEntry* it : jit_entries) {
+      readers.emplace_back(GetJITCodeEntrySymFile(it));
     }
 
     // Write symbols names. All other data is buffered.
@@ -318,8 +323,8 @@
   // Produce the outer ELF file.
   // It contains only the inner ELF file compressed as .gnu_debugdata section.
   // This extra wrapping is not necessary but the compression saves space.
-  std::vector<uint8_t> outer_elf_file;
-  {
+  if (compress) {
+    std::vector<uint8_t> outer_elf_file;
     std::vector<uint8_t> gnu_debugdata;
     gnu_debugdata.reserve(inner_elf_file.size() / 4);
     XzCompress(ArrayRef<const uint8_t>(inner_elf_file), &gnu_debugdata);
@@ -334,9 +339,10 @@
     builder->WriteSection(".gnu_debugdata", &gnu_debugdata);
     builder->End();
     CHECK(builder->Good());
+    return outer_elf_file;
+  } else {
+    return inner_elf_file;
   }
-
-  return outer_elf_file;
 }
 
 std::vector<uint8_t> WriteDebugElfFileForClasses(
diff --git a/compiler/debug/elf_debug_writer.h b/compiler/debug/elf_debug_writer.h
index 14a5edb..1ce3c6f 100644
--- a/compiler/debug/elf_debug_writer.h
+++ b/compiler/debug/elf_debug_writer.h
@@ -29,6 +29,7 @@
 
 namespace art {
 class OatHeader;
+struct JITCodeEntry;
 namespace mirror {
 class Class;
 }  // namespace mirror
@@ -56,10 +57,9 @@
     const MethodDebugInfo& method_info);
 
 std::vector<uint8_t> PackElfFileForJIT(
-    InstructionSet isa,
-    const InstructionSetFeatures* features,
-    std::vector<ArrayRef<const uint8_t>>& added_elf_files,
-    std::vector<const void*>& removed_symbols,
+    ArrayRef<const JITCodeEntry*> jit_entries,
+    ArrayRef<const void*> removed_symbols,
+    bool compress,
     /*out*/ size_t* num_symbols);
 
 std::vector<uint8_t> WriteDebugElfFileForClasses(
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 54f216a..172ec6b 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -76,6 +76,7 @@
         continue;
       }
       const verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc);
+      DCHECK(line != nullptr) << "Did not have line for dex pc 0x" << std::hex << dex_pc;
       const verifier::RegType& reg_type(line->GetRegisterType(method_verifier,
                                                               inst.VRegA_21c()));
       const verifier::RegType& cast_type =
diff --git a/compiler/driver/compiled_method_storage.cc b/compiler/driver/compiled_method_storage.cc
index 31062fb..03c906b 100644
--- a/compiler/driver/compiled_method_storage.cc
+++ b/compiler/driver/compiled_method_storage.cc
@@ -216,6 +216,9 @@
   uint32_t custom_value1 = 0u;
   uint32_t custom_value2 = 0u;
   switch (linker_patch.GetType()) {
+    case linker::LinkerPatch::Type::kCallEntrypoint:
+      custom_value1 = linker_patch.EntrypointOffset();
+      break;
     case linker::LinkerPatch::Type::kBakerReadBarrierBranch:
       custom_value1 = linker_patch.GetBakerCustomValue1();
       custom_value2 = linker_patch.GetBakerCustomValue2();
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 6f39488..cde6ae9 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -20,11 +20,11 @@
 #include <string_view>
 
 #include "android-base/stringprintf.h"
-#include "android-base/strings.h"
 
 #include "arch/instruction_set.h"
 #include "arch/instruction_set_features.h"
 #include "base/runtime_debug.h"
+#include "base/string_view_cpp20.h"
 #include "base/variant_map.h"
 #include "class_linker.h"
 #include "cmdline_parser.h"
@@ -42,8 +42,6 @@
     : compiler_filter_(CompilerFilter::kDefaultCompilerFilter),
       huge_method_threshold_(kDefaultHugeMethodThreshold),
       large_method_threshold_(kDefaultLargeMethodThreshold),
-      small_method_threshold_(kDefaultSmallMethodThreshold),
-      tiny_method_threshold_(kDefaultTinyMethodThreshold),
       num_dex_methods_threshold_(kDefaultNumDexMethodsThreshold),
       inline_max_code_units_(kUnsetInlineMaxCodeUnits),
       instruction_set_(kRuntimeISA == InstructionSet::kArm ? InstructionSet::kThumb2 : kRuntimeISA),
@@ -78,6 +76,7 @@
       deduplicate_code_(true),
       count_hotness_in_compiled_code_(false),
       resolve_startup_const_strings_(false),
+      initialize_app_image_classes_(false),
       check_profiled_methods_(ProfileMethodsCheck::kNone),
       max_image_block_size_(std::numeric_limits<uint32_t>::max()),
       register_allocation_strategy_(RegisterAllocator::kRegisterAllocatorDefault),
@@ -186,18 +185,23 @@
 }
 
 bool CompilerOptions::IsCoreImageFilename(const std::string& boot_image_filename) {
+  std::string_view filename(boot_image_filename);
+  size_t colon_pos = filename.find(':');
+  if (colon_pos != std::string_view::npos) {
+    filename = filename.substr(0u, colon_pos);
+  }
   // Look for "core.art" or "core-*.art".
-  if (android::base::EndsWith(boot_image_filename, "core.art")) {
+  if (EndsWith(filename, "core.art")) {
     return true;
   }
-  if (!android::base::EndsWith(boot_image_filename, ".art")) {
+  if (!EndsWith(filename, ".art")) {
     return false;
   }
-  size_t slash_pos = boot_image_filename.rfind('/');
+  size_t slash_pos = filename.rfind('/');
   if (slash_pos == std::string::npos) {
-    return android::base::StartsWith(boot_image_filename, "core-");
+    return StartsWith(filename, "core-");
   }
-  return boot_image_filename.compare(slash_pos + 1, 5u, "core-") == 0;
+  return filename.compare(slash_pos + 1, 5u, "core-") == 0;
 }
 
 }  // namespace art
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 0ab5ff1..79ba1c2 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -62,8 +62,6 @@
   // Guide heuristics to determine whether to compile method if profile data not available.
   static const size_t kDefaultHugeMethodThreshold = 10000;
   static const size_t kDefaultLargeMethodThreshold = 600;
-  static const size_t kDefaultSmallMethodThreshold = 60;
-  static const size_t kDefaultTinyMethodThreshold = 20;
   static const size_t kDefaultNumDexMethodsThreshold = 900;
   static constexpr double kDefaultTopKProfileThreshold = 90.0;
   static const bool kDefaultGenerateDebugInfo = false;
@@ -72,10 +70,10 @@
   static constexpr size_t kUnsetInlineMaxCodeUnits = -1;
 
   enum class ImageType : uint8_t {
-    kNone,                // JIT or AOT app compilation producing only an oat file but no image.
-    kBootImage,           // Creating boot image.
-    kAppImage,            // Creating app image.
-    kApexBootImage,       // Creating the apex image for jit/zygote experiment b/119800099.
+    kNone,                    // JIT or AOT app compilation producing only an oat file but no image.
+    kBootImage,               // Creating boot image.
+    kBootImageExtension,      // Creating boot image extension.
+    kAppImage,                // Creating app image.
   };
 
   CompilerOptions();
@@ -105,6 +103,10 @@
     return CompilerFilter::IsVerificationEnabled(compiler_filter_);
   }
 
+  bool AssumeDexFilesAreVerified() const {
+    return compiler_filter_ == CompilerFilter::kAssumeVerified;
+  }
+
   bool AssumeClassesAreVerified() const {
     return compiler_filter_ == CompilerFilter::kAssumeVerified;
   }
@@ -125,14 +127,6 @@
     return large_method_threshold_;
   }
 
-  size_t GetSmallMethodThreshold() const {
-    return small_method_threshold_;
-  }
-
-  size_t GetTinyMethodThreshold() const {
-    return tiny_method_threshold_;
-  }
-
   bool IsHugeMethod(size_t num_dalvik_instructions) const {
     return num_dalvik_instructions > huge_method_threshold_;
   }
@@ -141,14 +135,6 @@
     return num_dalvik_instructions > large_method_threshold_;
   }
 
-  bool IsSmallMethod(size_t num_dalvik_instructions) const {
-    return num_dalvik_instructions > small_method_threshold_;
-  }
-
-  bool IsTinyMethod(size_t num_dalvik_instructions) const {
-    return num_dalvik_instructions > tiny_method_threshold_;
-  }
-
   size_t GetNumDexMethodsThreshold() const {
     return num_dex_methods_threshold_;
   }
@@ -209,13 +195,18 @@
     return implicit_suspend_checks_;
   }
 
-  // Are we compiling a boot image?
-  bool IsBootImage() const {
-    return image_type_ == ImageType::kBootImage || image_type_ == ImageType::kApexBootImage;
+  bool IsGeneratingImage() const {
+    return IsBootImage() || IsBootImageExtension() || IsAppImage();
   }
 
-  bool IsApexBootImage() const {
-    return image_type_ == ImageType::kApexBootImage;
+  // Are we compiling a boot image?
+  bool IsBootImage() const {
+    return image_type_ == ImageType::kBootImage;
+  }
+
+  // Are we compiling a boot image extension?
+  bool IsBootImageExtension() const {
+    return image_type_ == ImageType::kBootImageExtension;
   }
 
   bool IsBaseline() const {
@@ -364,6 +355,10 @@
     max_image_block_size_ = size;
   }
 
+  bool InitializeAppImageClasses() const {
+    return initialize_app_image_classes_;
+  }
+
   // Is `boot_image_filename` the name of a core image (small boot
   // image used for ART testing only)?
   static bool IsCoreImageFilename(const std::string& boot_image_filename);
@@ -375,8 +370,6 @@
   CompilerFilter::Filter compiler_filter_;
   size_t huge_method_threshold_;
   size_t large_method_threshold_;
-  size_t small_method_threshold_;
-  size_t tiny_method_threshold_;
   size_t num_dex_methods_threshold_;
   size_t inline_max_code_units_;
 
@@ -449,6 +442,9 @@
   // profile.
   bool resolve_startup_const_strings_;
 
+  // Whether we attempt to run class initializers for app image classes.
+  bool initialize_app_image_classes_;
+
   // When running profile-guided compilation, check that methods intended to be compiled end
   // up compiled and are not punted.
   ProfileMethodsCheck check_profiled_methods_;
diff --git a/compiler/driver/compiler_options_map-inl.h b/compiler/driver/compiler_options_map-inl.h
index 7e2a64b..e8a425d 100644
--- a/compiler/driver/compiler_options_map-inl.h
+++ b/compiler/driver/compiler_options_map-inl.h
@@ -45,8 +45,6 @@
   }
   map.AssignIfExists(Base::HugeMethodMaxThreshold, &options->huge_method_threshold_);
   map.AssignIfExists(Base::LargeMethodMaxThreshold, &options->large_method_threshold_);
-  map.AssignIfExists(Base::SmallMethodMaxThreshold, &options->small_method_threshold_);
-  map.AssignIfExists(Base::TinyMethodMaxThreshold, &options->tiny_method_threshold_);
   map.AssignIfExists(Base::NumDexMethodsThreshold, &options->num_dex_methods_threshold_);
   map.AssignIfExists(Base::InlineMaxCodeUnitsThreshold, &options->inline_max_code_units_);
   map.AssignIfExists(Base::GenerateDebugInfo, &options->generate_debug_info_);
@@ -81,6 +79,7 @@
     options->count_hotness_in_compiled_code_ = true;
   }
   map.AssignIfExists(Base::ResolveStartupConstStrings, &options->resolve_startup_const_strings_);
+  map.AssignIfExists(Base::InitializeAppImageClasses, &options->initialize_app_image_classes_);
   if (map.Exists(Base::CheckProfiledMethods)) {
     options->check_profiled_methods_ = *map.Get(Base::CheckProfiledMethods);
   }
@@ -117,12 +116,6 @@
       .Define("--large-method-max=_")
           .template WithType<unsigned int>()
           .IntoKey(Map::LargeMethodMaxThreshold)
-      .Define("--small-method-max=_")
-          .template WithType<unsigned int>()
-          .IntoKey(Map::SmallMethodMaxThreshold)
-      .Define("--tiny-method-max=_")
-          .template WithType<unsigned int>()
-          .IntoKey(Map::TinyMethodMaxThreshold)
       .Define("--num-dex-methods=_")
           .template WithType<unsigned int>()
           .IntoKey(Map::NumDexMethodsThreshold)
@@ -200,6 +193,11 @@
           .WithValueMap({{"false", false}, {"true", true}})
           .IntoKey(Map::ResolveStartupConstStrings)
 
+      .Define("--initialize-app-image-classes=_")
+          .template WithType<bool>()
+          .WithValueMap({{"false", false}, {"true", true}})
+          .IntoKey(Map::InitializeAppImageClasses)
+
       .Define("--verbose-methods=_")
           .template WithType<ParseStringList<','>>()
           .IntoKey(Map::VerboseMethods)
diff --git a/compiler/driver/compiler_options_map.def b/compiler/driver/compiler_options_map.def
index 0a9c873..df06bd8 100644
--- a/compiler/driver/compiler_options_map.def
+++ b/compiler/driver/compiler_options_map.def
@@ -40,8 +40,6 @@
 COMPILER_OPTIONS_KEY (Unit,                        PIC)
 COMPILER_OPTIONS_KEY (unsigned int,                HugeMethodMaxThreshold)
 COMPILER_OPTIONS_KEY (unsigned int,                LargeMethodMaxThreshold)
-COMPILER_OPTIONS_KEY (unsigned int,                SmallMethodMaxThreshold)
-COMPILER_OPTIONS_KEY (unsigned int,                TinyMethodMaxThreshold)
 COMPILER_OPTIONS_KEY (unsigned int,                NumDexMethodsThreshold)
 COMPILER_OPTIONS_KEY (unsigned int,                InlineMaxCodeUnitsThreshold)
 COMPILER_OPTIONS_KEY (bool,                        GenerateDebugInfo)
@@ -53,6 +51,7 @@
 COMPILER_OPTIONS_KEY (bool,                        AbortOnHardVerifierFailure)
 COMPILER_OPTIONS_KEY (bool,                        AbortOnSoftVerifierFailure)
 COMPILER_OPTIONS_KEY (bool,                        ResolveStartupConstStrings, false)
+COMPILER_OPTIONS_KEY (bool,                        InitializeAppImageClasses, false)
 COMPILER_OPTIONS_KEY (std::string,                 DumpInitFailures)
 COMPILER_OPTIONS_KEY (std::string,                 DumpCFG)
 COMPILER_OPTIONS_KEY (Unit,                        DumpCFGAppend)
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 1957c82..0f12457 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -115,7 +115,7 @@
   }
 }
 
-extern "C" void* jit_load() {
+extern "C" JitCompilerInterface* jit_load() {
   VLOG(jit) << "Create jit compiler";
   auto* const jit_compiler = JitCompiler::Create();
   CHECK(jit_compiler != nullptr);
@@ -123,49 +123,30 @@
   return jit_compiler;
 }
 
-extern "C" void jit_unload(void* handle) {
-  DCHECK(handle != nullptr);
-  delete reinterpret_cast<JitCompiler*>(handle);
-}
-
-extern "C" bool jit_compile_method(
-    void* handle, ArtMethod* method, Thread* self, bool baseline, bool osr)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
-  DCHECK(jit_compiler != nullptr);
-  return jit_compiler->CompileMethod(self, method, baseline, osr);
-}
-
-extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t count)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
-  DCHECK(jit_compiler != nullptr);
-  const CompilerOptions& compiler_options = jit_compiler->GetCompilerOptions();
+void JitCompiler::TypesLoaded(mirror::Class** types, size_t count) {
+  const CompilerOptions& compiler_options = GetCompilerOptions();
   if (compiler_options.GetGenerateDebugInfo()) {
+    InstructionSet isa = compiler_options.GetInstructionSet();
+    const InstructionSetFeatures* features = compiler_options.GetInstructionSetFeatures();
     const ArrayRef<mirror::Class*> types_array(types, count);
-    std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForClasses(
-        kRuntimeISA, compiler_options.GetInstructionSetFeatures(), types_array);
-    // We never free debug info for types, so we don't need to provide a handle
-    // (which would have been otherwise used as identifier to remove it later).
-    AddNativeDebugInfoForJit(Thread::Current(),
-                             /*code_ptr=*/ nullptr,
-                             elf_file,
-                             debug::PackElfFileForJIT,
-                             compiler_options.GetInstructionSet(),
-                             compiler_options.GetInstructionSetFeatures());
+    std::vector<uint8_t> elf_file =
+        debug::WriteDebugElfFileForClasses(isa, features, types_array);
+
+    // NB: Don't allow packing since it would remove non-backtrace data.
+    MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+    AddNativeDebugInfoForJit(/*code_ptr=*/ nullptr, elf_file, /*allow_packing=*/ false);
   }
 }
 
-extern "C" void jit_update_options(void* handle) {
-  JitCompiler* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
-  DCHECK(jit_compiler != nullptr);
-  jit_compiler->ParseCompilerOptions();
+bool JitCompiler::GenerateDebugInfo() {
+  return GetCompilerOptions().GetGenerateDebugInfo();
 }
 
-extern "C" bool jit_generate_debug_info(void* handle) {
-  JitCompiler* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
-  DCHECK(jit_compiler != nullptr);
-  return jit_compiler->GetCompilerOptions().GetGenerateDebugInfo();
+std::vector<uint8_t> JitCompiler::PackElfFileForJIT(ArrayRef<const JITCodeEntry*> elf_files,
+                                                    ArrayRef<const void*> removed_symbols,
+                                                    bool compress,
+                                                    /*out*/ size_t* num_symbols) {
+  return debug::PackElfFileForJIT(elf_files, removed_symbols, compress, num_symbols);
 }
 
 JitCompiler::JitCompiler() {
@@ -181,8 +162,11 @@
   }
 }
 
-bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool baseline, bool osr) {
-  SCOPED_TRACE << "JIT compiling " << method->PrettyMethod();
+bool JitCompiler::CompileMethod(
+    Thread* self, JitMemoryRegion* region, ArtMethod* method, bool baseline, bool osr) {
+  SCOPED_TRACE << "JIT compiling "
+               << method->PrettyMethod()
+               << " (baseline=" << baseline << ", osr=" << osr << ")";
 
   DCHECK(!method->IsProxyMethod());
   DCHECK(method->GetDeclaringClass()->IsResolved());
@@ -198,7 +182,8 @@
     TimingLogger::ScopedTiming t2("Compiling", &logger);
     JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache();
     uint64_t start_ns = NanoTime();
-    success = compiler_->JitCompile(self, code_cache, method, baseline, osr, jit_logger_.get());
+    success = compiler_->JitCompile(
+        self, code_cache, region, method, baseline, osr, jit_logger_.get());
     uint64_t duration_ns = NanoTime() - start_ns;
     VLOG(jit) << "Compilation of "
               << method->PrettyMethod()
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index d008de4..09de1f8 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -19,10 +19,11 @@
 
 #include "base/mutex.h"
 
+#include "jit/jit.h"
+
 namespace art {
 
 class ArtMethod;
-class CompiledMethod;
 class Compiler;
 class CompilerOptions;
 class Thread;
@@ -30,21 +31,32 @@
 namespace jit {
 
 class JitLogger;
+class JitMemoryRegion;
 
-class JitCompiler {
+class JitCompiler : public JitCompilerInterface {
  public:
   static JitCompiler* Create();
   virtual ~JitCompiler();
 
   // Compilation entrypoint. Returns whether the compilation succeeded.
-  bool CompileMethod(Thread* self, ArtMethod* method, bool baseline, bool osr)
-      REQUIRES_SHARED(Locks::mutator_lock_);
+  bool CompileMethod(
+      Thread* self, JitMemoryRegion* region, ArtMethod* method, bool baseline, bool osr)
+      REQUIRES_SHARED(Locks::mutator_lock_) override;
 
   const CompilerOptions& GetCompilerOptions() const {
     return *compiler_options_.get();
   }
 
-  void ParseCompilerOptions();
+  bool GenerateDebugInfo() override;
+
+  void ParseCompilerOptions() override;
+
+  void TypesLoaded(mirror::Class**, size_t count) REQUIRES_SHARED(Locks::mutator_lock_) override;
+
+  std::vector<uint8_t> PackElfFileForJIT(ArrayRef<const JITCodeEntry*> elf_files,
+                                         ArrayRef<const void*> removed_symbols,
+                                         bool compress,
+                                         /*out*/ size_t* num_symbols) override;
 
  private:
   std::unique_ptr<CompilerOptions> compiler_options_;
diff --git a/compiler/jni/jni_cfi_test.cc b/compiler/jni/jni_cfi_test.cc
index b19a2b8..cec94c9 100644
--- a/compiler/jni/jni_cfi_test.cc
+++ b/compiler/jni/jni_cfi_test.cc
@@ -142,14 +142,6 @@
 TEST_ISA(kX86_64)
 #endif
 
-#ifdef ART_ENABLE_CODEGEN_mips
-TEST_ISA(kMips)
-#endif
-
-#ifdef ART_ENABLE_CODEGEN_mips64
-TEST_ISA(kMips64)
-#endif
-
 #endif  // ART_TARGET_ANDROID
 
 }  // namespace art
diff --git a/compiler/jni/jni_cfi_test_expected.inc b/compiler/jni/jni_cfi_test_expected.inc
index d641fe4..489ae00 100644
--- a/compiler/jni/jni_cfi_test_expected.inc
+++ b/compiler/jni/jni_cfi_test_expected.inc
@@ -328,146 +328,3 @@
 // 0x0000007f: .cfi_restore_state
 // 0x0000007f: .cfi_def_cfa_offset: 128
 
-static constexpr uint8_t expected_asm_kMips[] = {
-    0xC0, 0xFF, 0xBD, 0x27, 0x3C, 0x00, 0xBF, 0xAF, 0x38, 0x00, 0xBE, 0xAF,
-    0x34, 0x00, 0xB7, 0xAF, 0x30, 0x00, 0xB6, 0xAF, 0x2C, 0x00, 0xB5, 0xAF,
-    0x28, 0x00, 0xB4, 0xAF, 0x24, 0x00, 0xB3, 0xAF, 0x20, 0x00, 0xB2, 0xAF,
-    0x00, 0x00, 0xA4, 0xAF, 0x44, 0x00, 0xA5, 0xAF, 0x48, 0x00, 0xA8, 0xE7,
-    0x4C, 0x00, 0xA6, 0xAF, 0x50, 0x00, 0xA7, 0xAF, 0xE0, 0xFF, 0xBD, 0x27,
-    0x20, 0x00, 0xBD, 0x27, 0x20, 0x00, 0xB2, 0x8F, 0x24, 0x00, 0xB3, 0x8F,
-    0x28, 0x00, 0xB4, 0x8F, 0x2C, 0x00, 0xB5, 0x8F, 0x30, 0x00, 0xB6, 0x8F,
-    0x34, 0x00, 0xB7, 0x8F, 0x38, 0x00, 0xBE, 0x8F, 0x3C, 0x00, 0xBF, 0x8F,
-    0x09, 0x00, 0xE0, 0x03, 0x40, 0x00, 0xBD, 0x27,
-};
-static constexpr uint8_t expected_cfi_kMips[] = {
-    0x44, 0x0E, 0x40, 0x44, 0x9F, 0x01, 0x44, 0x9E, 0x02, 0x44, 0x97, 0x03,
-    0x44, 0x96, 0x04, 0x44, 0x95, 0x05, 0x44, 0x94, 0x06, 0x44, 0x93, 0x07,
-    0x44, 0x92, 0x08, 0x58, 0x0E, 0x60, 0x44, 0x0E, 0x40, 0x0A, 0x44, 0xD2,
-    0x44, 0xD3, 0x44, 0xD4, 0x44, 0xD5, 0x44, 0xD6, 0x44, 0xD7, 0x44, 0xDE,
-    0x44, 0xDF, 0x48, 0x0E, 0x00, 0x0B, 0x0E, 0x40,
-};
-// 0x00000000: addiu r29, r29, -64
-// 0x00000004: .cfi_def_cfa_offset: 64
-// 0x00000004: sw r31, +60(r29)
-// 0x00000008: .cfi_offset: r31 at cfa-4
-// 0x00000008: sw r30, +56(r29)
-// 0x0000000c: .cfi_offset: r30 at cfa-8
-// 0x0000000c: sw r23, +52(r29)
-// 0x00000010: .cfi_offset: r23 at cfa-12
-// 0x00000010: sw r22, +48(r29)
-// 0x00000014: .cfi_offset: r22 at cfa-16
-// 0x00000014: sw r21, +44(r29)
-// 0x00000018: .cfi_offset: r21 at cfa-20
-// 0x00000018: sw r20, +40(r29)
-// 0x0000001c: .cfi_offset: r20 at cfa-24
-// 0x0000001c: sw r19, +36(r29)
-// 0x00000020: .cfi_offset: r19 at cfa-28
-// 0x00000020: sw r18, +32(r29)
-// 0x00000024: .cfi_offset: r18 at cfa-32
-// 0x00000024: sw r4, +0(r29)
-// 0x00000028: sw r5, +68(r29)
-// 0x0000002c: swc1 f8, +72(r29)
-// 0x00000030: sw r6, +76(r29)
-// 0x00000034: sw r7, +80(r29)
-// 0x00000038: addiu r29, r29, -32
-// 0x0000003c: .cfi_def_cfa_offset: 96
-// 0x0000003c: addiu r29, r29, 32
-// 0x00000040: .cfi_def_cfa_offset: 64
-// 0x00000040: .cfi_remember_state
-// 0x00000040: lw r18, +32(r29)
-// 0x00000044: .cfi_restore: r18
-// 0x00000044: lw r19, +36(r29)
-// 0x00000048: .cfi_restore: r19
-// 0x00000048: lw r20, +40(r29)
-// 0x0000004c: .cfi_restore: r20
-// 0x0000004c: lw r21, +44(r29)
-// 0x00000050: .cfi_restore: r21
-// 0x00000050: lw r22, +48(r29)
-// 0x00000054: .cfi_restore: r22
-// 0x00000054: lw r23, +52(r29)
-// 0x00000058: .cfi_restore: r23
-// 0x00000058: lw r30, +56(r29)
-// 0x0000005c: .cfi_restore: r30
-// 0x0000005c: lw r31, +60(r29)
-// 0x00000060: .cfi_restore: r31
-// 0x00000060: jr r31
-// 0x00000064: addiu r29, r29, 64
-// 0x00000068: .cfi_def_cfa_offset: 0
-// 0x00000068: .cfi_restore_state
-// 0x00000068: .cfi_def_cfa_offset: 64
-
-static constexpr uint8_t expected_asm_kMips64[] = {
-    0x90, 0xFF, 0xBD, 0x67, 0x68, 0x00, 0xBF, 0xFF, 0x60, 0x00, 0xBE, 0xFF,
-    0x58, 0x00, 0xBC, 0xFF, 0x50, 0x00, 0xB7, 0xFF, 0x48, 0x00, 0xB6, 0xFF,
-    0x40, 0x00, 0xB5, 0xFF, 0x38, 0x00, 0xB4, 0xFF, 0x30, 0x00, 0xB3, 0xFF,
-    0x28, 0x00, 0xB2, 0xFF, 0x00, 0x00, 0xA4, 0xFF, 0x78, 0x00, 0xA5, 0xAF,
-    0x7C, 0x00, 0xAE, 0xE7, 0x80, 0x00, 0xA7, 0xAF, 0x84, 0x00, 0xA8, 0xAF,
-    0xE0, 0xFF, 0xBD, 0x67, 0x20, 0x00, 0xBD, 0x67, 0x28, 0x00, 0xB2, 0xDF,
-    0x30, 0x00, 0xB3, 0xDF, 0x38, 0x00, 0xB4, 0xDF, 0x40, 0x00, 0xB5, 0xDF,
-    0x48, 0x00, 0xB6, 0xDF, 0x50, 0x00, 0xB7, 0xDF, 0x58, 0x00, 0xBC, 0xDF,
-    0x60, 0x00, 0xBE, 0xDF, 0x68, 0x00, 0xBF, 0xDF, 0x70, 0x00, 0xBD, 0x67,
-    0x09, 0x00, 0xE0, 0x03, 0x00, 0x00, 0x00, 0x00,
-};
-static constexpr uint8_t expected_cfi_kMips64[] = {
-    0x44, 0x0E, 0x70, 0x44, 0x9F, 0x02, 0x44, 0x9E, 0x04, 0x44, 0x9C, 0x06,
-    0x44, 0x97, 0x08, 0x44, 0x96, 0x0A, 0x44, 0x95, 0x0C, 0x44, 0x94, 0x0E,
-    0x44, 0x93, 0x10, 0x44, 0x92, 0x12, 0x58, 0x0E, 0x90, 0x01, 0x44, 0x0E,
-    0x70, 0x0A, 0x44, 0xD2, 0x44, 0xD3, 0x44, 0xD4, 0x44, 0xD5, 0x44, 0xD6,
-    0x44, 0xD7, 0x44, 0xDC, 0x44, 0xDE, 0x44, 0xDF, 0x44, 0x0E, 0x00, 0x48,
-    0x0B, 0x0E, 0x70,
-};
-// 0x00000000: daddiu r29, r29, -112
-// 0x00000004: .cfi_def_cfa_offset: 112
-// 0x00000004: sd r31, +104(r29)
-// 0x00000008: .cfi_offset: r31 at cfa-8
-// 0x00000008: sd r30, +96(r29)
-// 0x0000000c: .cfi_offset: r30 at cfa-16
-// 0x0000000c: sd r28, +88(r29)
-// 0x00000010: .cfi_offset: r28 at cfa-24
-// 0x00000010: sd r23, +80(r29)
-// 0x00000014: .cfi_offset: r23 at cfa-32
-// 0x00000014: sd r22, +72(r29)
-// 0x00000018: .cfi_offset: r22 at cfa-40
-// 0x00000018: sd r21, +64(r29)
-// 0x0000001c: .cfi_offset: r21 at cfa-48
-// 0x0000001c: sd r20, +56(r29)
-// 0x00000020: .cfi_offset: r20 at cfa-56
-// 0x00000020: sd r19, +48(r29)
-// 0x00000024: .cfi_offset: r19 at cfa-64
-// 0x00000024: sd r18, +40(r29)
-// 0x00000028: .cfi_offset: r18 at cfa-72
-// 0x00000028: sd r4, +0(r29)
-// 0x0000002c: sw r5, +120(r29)
-// 0x00000030: swc1 f14, +124(r29)
-// 0x00000034: sw r7, +128(r29)
-// 0x00000038: sw r8, +132(r29)
-// 0x0000003c: daddiu r29, r29, -32
-// 0x00000040: .cfi_def_cfa_offset: 144
-// 0x00000040: daddiu r29, r29, 32
-// 0x00000044: .cfi_def_cfa_offset: 112
-// 0x00000044: .cfi_remember_state
-// 0x00000044: ld r18, +40(r29)
-// 0x00000048: .cfi_restore: r18
-// 0x00000048: ld r19, +48(r29)
-// 0x0000004c: .cfi_restore: r19
-// 0x0000004c: ld r20, +56(r29)
-// 0x00000050: .cfi_restore: r20
-// 0x00000050: ld r21, +64(r29)
-// 0x00000054: .cfi_restore: r21
-// 0x00000054: ld r22, +72(r29)
-// 0x00000058: .cfi_restore: r22
-// 0x00000058: ld r23, +80(r29)
-// 0x0000005c: .cfi_restore: r23
-// 0x0000005c: ld r28, +88(r29)
-// 0x00000060: .cfi_restore: r28
-// 0x00000060: ld r30, +96(r29)
-// 0x00000064: .cfi_restore: r30
-// 0x00000064: ld r31, +104(r29)
-// 0x00000068: .cfi_restore: r31
-// 0x00000068: daddiu r29, r29, 112
-// 0x0000006c: .cfi_def_cfa_offset: 0
-// 0x0000006c: jr r31
-// 0x00000070: nop
-// 0x00000074: .cfi_restore_state
-// 0x00000074: .cfi_def_cfa_offset: 112
-
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 0d0f8a0..405c9ec 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -45,10 +45,25 @@
   return count + 1;
 }
 
+// Note: JNI name mangling "_" -> "_1".
+extern "C" JNIEXPORT jint JNICALL Java_MyClassNatives_bar_1Fast(JNIEnv*, jobject, jint count) {
+  return count + 1;
+}
+
 extern "C" JNIEXPORT jint JNICALL Java_MyClassNatives_sbar(JNIEnv*, jclass, jint count) {
   return count + 1;
 }
 
+// Note: JNI name mangling "_" -> "_1".
+extern "C" JNIEXPORT jint JNICALL Java_MyClassNatives_sbar_1Fast(JNIEnv*, jclass, jint count) {
+  return count + 1;
+}
+
+// Note: JNI name mangling "_" -> "_1".
+extern "C" JNIEXPORT jint JNICALL Java_MyClassNatives_sbar_1Critical(jint count) {
+  return count + 1;
+}
+
 // TODO: In the Baker read barrier configuration, add checks to ensure
 // the Marking Register's value is correct.
 
@@ -71,6 +86,11 @@
   return gCurrentJni == static_cast<uint32_t>(JniKind::kCritical);
 }
 
+// Is the current native method under test @FastNative?
+static bool IsCurrentJniFast() {
+  return gCurrentJni == static_cast<uint32_t>(JniKind::kFast);
+}
+
 // Is the current native method a plain-old non-annotated native?
 static bool IsCurrentJniNormal() {
   return gCurrentJni == static_cast<uint32_t>(JniKind::kNormal);
@@ -147,21 +167,6 @@
       (jni_type_traits<Arg>::is_ref ? 1 : 0) + count_refs_helper<Args ...>::value;
 };
 
-template <typename T, T fn>
-struct count_refs_fn_helper;
-
-template <typename R, typename ... Args, R fn(Args...)>
-struct count_refs_fn_helper<R(Args...), fn> : public count_refs_helper<Args...> {};
-
-// Given a function type 'T' figure out how many of the parameter types are a reference.
-// -- The implicit jclass and thisObject also count as 1 reference.
-//
-// Fields:
-// * value - the result counting # of refs
-// * value_type - the type of value (size_t)
-template <typename T, T fn>
-struct count_refs : public count_refs_fn_helper<T, fn> {};
-
 // Base case: No parameters = 0 refs.
 size_t count_nonnull_refs_helper() {
   return 0;
@@ -200,10 +205,10 @@
   return count_nonnull_refs_helper(args...);
 }
 
-template <typename T, T fn>
+template <typename T, T* fn>
 struct remove_extra_parameters_helper;
 
-template <typename R, typename Arg1, typename Arg2, typename ... Args, R fn(Arg1, Arg2, Args...)>
+template <typename R, typename Arg1, typename Arg2, typename ... Args, R (*fn)(Arg1, Arg2, Args...)>
 struct remove_extra_parameters_helper<R(Arg1, Arg2, Args...), fn> {
   // Note: Do not use Args&& here to maintain C-style parameter types.
   static R apply(Args... args) {
@@ -216,7 +221,7 @@
 // Given a function 'fn' create a function 'apply' which will omit the JNIEnv/jklass parameters
 //
 // i.e. if fn(JNIEnv*,jklass,a,b,c,d,e...) then apply(a,b,c,d,e,...)
-template <typename T, T fn>
+template <typename T, T* fn>
 struct jni_remove_extra_parameters : public remove_extra_parameters_helper<T, fn> {};
 
 class JniCompilerTest : public CommonCompilerTest {
@@ -367,12 +372,10 @@
   void MaxParamNumberImpl();
   void WithoutImplementationImpl();
   void WithoutImplementationRefReturnImpl();
+  void StaticWithoutImplementationImpl();
   void StackArgsIntsFirstImpl();
   void StackArgsFloatsFirstImpl();
   void StackArgsMixedImpl();
-#if defined(__mips__) && defined(__LP64__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
-  void StackArgsSignExtendedMips64Impl();
-#endif
 
   void NormalNativeImpl();
   void FastNativeImpl();
@@ -391,9 +394,7 @@
 
 // Test the normal compiler and normal generic JNI only.
 // The following features are unsupported in @FastNative:
-// 1) JNI stubs (lookup via dlsym) when methods aren't explicitly registered
-// 2) synchronized keyword
-// -- TODO: We can support (1) if we remove the mutator lock assert during stub lookup.
+// 1) synchronized keyword
 # define JNI_TEST_NORMAL_ONLY(TestName)          \
   TEST_F(JniCompilerTest, TestName ## NormalCompiler) { \
     ScopedCheckHandleScope top_handle_scope_check;  \
@@ -575,11 +576,11 @@
 
 #define EXPECT_NUM_STACK_REFERENCES(val1, val2) expectNumStackReferences(val1, val2)
 
-template <typename T, T fn>
+template <typename T, T* fn>
 struct make_jni_test_decorator;
 
 // Decorator for "static" JNI callbacks.
-template <typename R, typename ... Args, R fn(JNIEnv*, jclass, Args...)>
+template <typename R, typename ... Args, R (*fn)(JNIEnv*, jclass, Args...)>
 struct make_jni_test_decorator<R(JNIEnv*, jclass kls, Args...), fn> {
   static R apply(JNIEnv* env, jclass kls, Args ... args) {
     EXPECT_THREAD_STATE_FOR_CURRENT_JNI();
@@ -594,7 +595,7 @@
 };
 
 // Decorator for instance JNI callbacks.
-template <typename R, typename ... Args, R fn(JNIEnv*, jobject, Args...)>
+template <typename R, typename ... Args, R (*fn)(JNIEnv*, jobject, Args...)>
 struct make_jni_test_decorator<R(JNIEnv*, jobject, Args...), fn> {
   static R apply(JNIEnv* env, jobject thisObj, Args ... args) {
     EXPECT_THREAD_STATE_FOR_CURRENT_JNI();
@@ -630,8 +631,8 @@
 #define NORMAL_JNI_ONLY_NOWRAP(func) \
     ({ ASSERT_TRUE(IsCurrentJniNormal()); reinterpret_cast<void*>(&(func)); })
 // Same as above, but with nullptr. When we want to test the stub functionality.
-#define NORMAL_JNI_ONLY_NULLPTR \
-    ({ ASSERT_TRUE(IsCurrentJniNormal()); nullptr; })
+#define NORMAL_OR_FAST_JNI_ONLY_NULLPTR \
+    ({ ASSERT_TRUE(IsCurrentJniNormal() || IsCurrentJniFast()); nullptr; })
 
 
 int gJava_MyClassNatives_foo_calls[kJniKindCount] = {};
@@ -654,8 +655,8 @@
 JNI_TEST(CompileAndRunNoArgMethod)
 
 void JniCompilerTest::CompileAndRunIntMethodThroughStubImpl() {
-  SetUpForTest(false, "bar", "(I)I", NORMAL_JNI_ONLY_NULLPTR);
-  // calling through stub will link with &Java_MyClassNatives_bar
+  SetUpForTest(false, "bar", "(I)I", NORMAL_OR_FAST_JNI_ONLY_NULLPTR);
+  // calling through stub will link with &Java_MyClassNatives_bar{,_1Fast}
 
   std::string reason;
   ASSERT_TRUE(Runtime::Current()->GetJavaVM()->
@@ -666,12 +667,12 @@
   EXPECT_EQ(25, result);
 }
 
-// TODO: Support @FastNative and @CriticalNative through stubs.
-JNI_TEST_NORMAL_ONLY(CompileAndRunIntMethodThroughStub)
+// Note: @CriticalNative is only for static methods.
+JNI_TEST(CompileAndRunIntMethodThroughStub)
 
 void JniCompilerTest::CompileAndRunStaticIntMethodThroughStubImpl() {
-  SetUpForTest(true, "sbar", "(I)I", NORMAL_JNI_ONLY_NULLPTR);
-  // calling through stub will link with &Java_MyClassNatives_sbar
+  SetUpForTest(true, "sbar", "(I)I", nullptr);
+  // calling through stub will link with &Java_MyClassNatives_sbar{,_1Fast,_1Critical}
 
   std::string reason;
   ASSERT_TRUE(Runtime::Current()->GetJavaVM()->
@@ -682,8 +683,7 @@
   EXPECT_EQ(43, result);
 }
 
-// TODO: Support @FastNative and @CriticalNative through stubs.
-JNI_TEST_NORMAL_ONLY(CompileAndRunStaticIntMethodThroughStub)
+JNI_TEST_CRITICAL(CompileAndRunStaticIntMethodThroughStub)
 
 int gJava_MyClassNatives_fooI_calls[kJniKindCount] = {};
 jint Java_MyClassNatives_fooI(JNIEnv*, jobject, jint x) {
@@ -1912,7 +1912,7 @@
   // This will lead to error messages in the log.
   ScopedLogSeverity sls(LogSeverity::FATAL);
 
-  SetUpForTest(false, "withoutImplementation", "()V", NORMAL_JNI_ONLY_NULLPTR);
+  SetUpForTest(false, "withoutImplementation", "()V", NORMAL_OR_FAST_JNI_ONLY_NULLPTR);
 
   env_->CallVoidMethod(jobj_, jmethod_);
 
@@ -1920,9 +1920,7 @@
   EXPECT_TRUE(env_->ExceptionCheck() == JNI_TRUE);
 }
 
-// TODO: Don't test @FastNative here since it goes through a stub lookup (unsupported) which would
-// normally fail with an exception, but fails with an assert.
-JNI_TEST_NORMAL_ONLY(WithoutImplementation)
+JNI_TEST(WithoutImplementation)
 
 void JniCompilerTest::WithoutImplementationRefReturnImpl() {
   // This will lead to error messages in the log.
@@ -1931,7 +1929,7 @@
   SetUpForTest(false,
                "withoutImplementationRefReturn",
                "()Ljava/lang/Object;",
-               NORMAL_JNI_ONLY_NULLPTR);
+               NORMAL_OR_FAST_JNI_ONLY_NULLPTR);
 
   env_->CallObjectMethod(jobj_, jmethod_);
 
@@ -1939,8 +1937,21 @@
   EXPECT_TRUE(env_->ExceptionCheck() == JNI_TRUE);
 }
 
-// TODO: Should work for @FastNative too.
-JNI_TEST_NORMAL_ONLY(WithoutImplementationRefReturn)
+JNI_TEST(WithoutImplementationRefReturn)
+
+void JniCompilerTest::StaticWithoutImplementationImpl() {
+  // This will lead to error messages in the log.
+  ScopedLogSeverity sls(LogSeverity::FATAL);
+
+  SetUpForTest(true, "staticWithoutImplementation", "()V", nullptr);
+
+  env_->CallStaticVoidMethod(jklass_, jmethod_);
+
+  EXPECT_TRUE(Thread::Current()->IsExceptionPending());
+  EXPECT_TRUE(env_->ExceptionCheck() == JNI_TRUE);
+}
+
+JNI_TEST_CRITICAL(StaticWithoutImplementation)
 
 void Java_MyClassNatives_stackArgsIntsFirst(JNIEnv*, jclass, jint i1, jint i2, jint i3,
                                             jint i4, jint i5, jint i6, jint i7, jint i8, jint i9,
@@ -2151,44 +2162,6 @@
 
 JNI_TEST_CRITICAL(StackArgsMixed)
 
-#if defined(__mips__) && defined(__LP64__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
-// Function will fetch the last argument passed from caller that is now on top of the stack and
-// return it as a 8B long. That way we can test if the caller has properly sign-extended the
-// value when placing it on the stack.
-__attribute__((naked))
-jlong Java_MyClassNatives_getStackArgSignExtendedMips64(
-    JNIEnv*, jclass,                      // Arguments passed from caller
-    jint, jint, jint, jint, jint, jint,   // through regs a0 to a7.
-    jint) {                               // The last argument will be passed on the stack.
-  __asm__(
-      ".set noreorder\n\t"                // Just return and store 8 bytes from the top of the stack
-      "jr  $ra\n\t"                       // in v0 (in branch delay slot). This should be the last
-      "ld  $v0, 0($sp)\n\t");             // argument. It is a 32-bit int, but it should be sign
-                                          // extended and it occupies 64-bit location.
-}
-
-void JniCompilerTest::StackArgsSignExtendedMips64Impl() {
-  uint64_t ret;
-  SetUpForTest(true,
-               "getStackArgSignExtendedMips64",
-               "(IIIIIII)J",
-               // Don't use wrapper because this is raw assembly function.
-               reinterpret_cast<void*>(&Java_MyClassNatives_getStackArgSignExtendedMips64));
-
-  // Mips64 ABI requires that arguments passed through stack be sign-extended 8B slots.
-  // First 8 arguments are passed through registers.
-  // Final argument's value is 7. When sign-extended, higher stack bits should be 0.
-  ret = env_->CallStaticLongMethod(jklass_, jmethod_, 1, 2, 3, 4, 5, 6, 7);
-  EXPECT_EQ(High32Bits(ret), static_cast<uint32_t>(0));
-
-  // Final argument's value is -8.  When sign-extended, higher stack bits should be 0xffffffff.
-  ret = env_->CallStaticLongMethod(jklass_, jmethod_, 1, 2, 3, 4, 5, 6, -8);
-  EXPECT_EQ(High32Bits(ret), static_cast<uint32_t>(0xffffffff));
-}
-
-JNI_TEST(StackArgsSignExtendedMips64)
-#endif
-
 void Java_MyClassNatives_normalNative(JNIEnv*, jclass) {
   // Intentionally left empty.
 }
diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc
index 42a4603..e06c914 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.cc
+++ b/compiler/jni/quick/arm/calling_convention_arm.cc
@@ -18,6 +18,7 @@
 
 #include <android-base/logging.h>
 
+#include "arch/arm/jni_frame_arm.h"
 #include "arch/instruction_set.h"
 #include "base/macros.h"
 #include "handle_scope-inl.h"
@@ -38,7 +39,7 @@
   R0, R1, R2, R3
 };
 
-static const size_t kJniArgumentRegisterCount = arraysize(kJniArgumentRegisters);
+static_assert(kJniArgumentRegisterCount == arraysize(kJniArgumentRegisters));
 
 //
 // Managed calling convention constants.
@@ -74,6 +75,7 @@
     ArmManagedRegister::FromCoreRegister(R8),
     ArmManagedRegister::FromCoreRegister(R10),
     ArmManagedRegister::FromCoreRegister(R11),
+    ArmManagedRegister::FromCoreRegister(LR),
     // Hard float registers.
     ArmManagedRegister::FromSRegister(S16),
     ArmManagedRegister::FromSRegister(S17),
@@ -93,37 +95,75 @@
     ArmManagedRegister::FromSRegister(S31)
 };
 
-static constexpr uint32_t CalculateCoreCalleeSpillMask() {
+template <size_t size>
+static constexpr uint32_t CalculateCoreCalleeSpillMask(
+    const ManagedRegister (&callee_saves)[size]) {
   // LR is a special callee save which is not reported by CalleeSaveRegisters().
-  uint32_t result = 1 << LR;
-  for (auto&& r : kCalleeSaveRegisters) {
+  uint32_t result = 0u;
+  for (auto&& r : callee_saves) {
     if (r.AsArm().IsCoreRegister()) {
-      result |= (1 << r.AsArm().AsCoreRegister());
+      result |= (1u << r.AsArm().AsCoreRegister());
     }
   }
   return result;
 }
 
-static constexpr uint32_t CalculateFpCalleeSpillMask() {
-  uint32_t result = 0;
-  for (auto&& r : kCalleeSaveRegisters) {
+template <size_t size>
+static constexpr uint32_t CalculateFpCalleeSpillMask(const ManagedRegister (&callee_saves)[size]) {
+  uint32_t result = 0u;
+  for (auto&& r : callee_saves) {
     if (r.AsArm().IsSRegister()) {
-      result |= (1 << r.AsArm().AsSRegister());
+      result |= (1u << r.AsArm().AsSRegister());
     }
   }
   return result;
 }
 
-static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask();
-static constexpr uint32_t kFpCalleeSpillMask = CalculateFpCalleeSpillMask();
+static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask(kCalleeSaveRegisters);
+static constexpr uint32_t kFpCalleeSpillMask = CalculateFpCalleeSpillMask(kCalleeSaveRegisters);
+
+static constexpr ManagedRegister kAapcsCalleeSaveRegisters[] = {
+    // Core registers.
+    ArmManagedRegister::FromCoreRegister(R4),
+    ArmManagedRegister::FromCoreRegister(R5),
+    ArmManagedRegister::FromCoreRegister(R6),
+    ArmManagedRegister::FromCoreRegister(R7),
+    ArmManagedRegister::FromCoreRegister(R8),
+    ArmManagedRegister::FromCoreRegister(R9),  // The platform register is callee-save on Android.
+    ArmManagedRegister::FromCoreRegister(R10),
+    ArmManagedRegister::FromCoreRegister(R11),
+    ArmManagedRegister::FromCoreRegister(LR),
+    // Hard float registers.
+    ArmManagedRegister::FromSRegister(S16),
+    ArmManagedRegister::FromSRegister(S17),
+    ArmManagedRegister::FromSRegister(S18),
+    ArmManagedRegister::FromSRegister(S19),
+    ArmManagedRegister::FromSRegister(S20),
+    ArmManagedRegister::FromSRegister(S21),
+    ArmManagedRegister::FromSRegister(S22),
+    ArmManagedRegister::FromSRegister(S23),
+    ArmManagedRegister::FromSRegister(S24),
+    ArmManagedRegister::FromSRegister(S25),
+    ArmManagedRegister::FromSRegister(S26),
+    ArmManagedRegister::FromSRegister(S27),
+    ArmManagedRegister::FromSRegister(S28),
+    ArmManagedRegister::FromSRegister(S29),
+    ArmManagedRegister::FromSRegister(S30),
+    ArmManagedRegister::FromSRegister(S31)
+};
+
+static constexpr uint32_t kAapcsCoreCalleeSpillMask =
+    CalculateCoreCalleeSpillMask(kAapcsCalleeSaveRegisters);
+static constexpr uint32_t kAapcsFpCalleeSpillMask =
+    CalculateFpCalleeSpillMask(kAapcsCalleeSaveRegisters);
 
 // Calling convention
 
-ManagedRegister ArmManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
+ManagedRegister ArmManagedRuntimeCallingConvention::InterproceduralScratchRegister() const {
   return ArmManagedRegister::FromCoreRegister(IP);  // R12
 }
 
-ManagedRegister ArmJniCallingConvention::InterproceduralScratchRegister() {
+ManagedRegister ArmJniCallingConvention::InterproceduralScratchRegister() const {
   return ArmManagedRegister::FromCoreRegister(IP);  // R12
 }
 
@@ -179,11 +219,9 @@
 
 FrameOffset ArmManagedRuntimeCallingConvention::CurrentParamStackOffset() {
   CHECK(IsCurrentParamOnStack());
-  FrameOffset result =
-      FrameOffset(displacement_.Int32Value() +        // displacement
-                  kFramePointerSize +                 // Method*
-                  (itr_slots_ * kFramePointerSize));  // offset into in args
-  return result;
+  return FrameOffset(displacement_.Int32Value() +        // displacement
+                     kFramePointerSize +                 // Method*
+                     (itr_slots_ * kFramePointerSize));  // offset into in args
 }
 
 const ManagedRegisterEntrySpills& ArmManagedRuntimeCallingConvention::EntrySpills() {
@@ -252,6 +290,7 @@
   }
   return entry_spills_;
 }
+
 // JNI calling convention
 
 ArmJniCallingConvention::ArmJniCallingConvention(bool is_static,
@@ -321,7 +360,7 @@
     }
   }
 
-  if (cur_reg < kJniArgumentRegisterCount) {
+  if (cur_reg <= kJniArgumentRegisterCount) {
     // As a special case when, as a result of shifting (or not) there are no arguments on the stack,
     // we actually have 0 stack padding.
     //
@@ -347,53 +386,88 @@
 
 uint32_t ArmJniCallingConvention::CoreSpillMask() const {
   // Compute spill mask to agree with callee saves initialized in the constructor
-  return kCoreCalleeSpillMask;
+  return is_critical_native_ ? 0u : kCoreCalleeSpillMask;
 }
 
 uint32_t ArmJniCallingConvention::FpSpillMask() const {
-  return kFpCalleeSpillMask;
+  return is_critical_native_ ? 0u : kFpCalleeSpillMask;
 }
 
 ManagedRegister ArmJniCallingConvention::ReturnScratchRegister() const {
   return ArmManagedRegister::FromCoreRegister(R2);
 }
 
-size_t ArmJniCallingConvention::FrameSize() {
-  // Method*, LR and callee save area size, local reference segment state
+size_t ArmJniCallingConvention::FrameSize() const {
+  if (UNLIKELY(is_critical_native_)) {
+    CHECK(!SpillsMethod());
+    CHECK(!HasLocalReferenceSegmentState());
+    CHECK(!HasHandleScope());
+    CHECK(!SpillsReturnValue());
+    return 0u;  // There is no managed frame for @CriticalNative.
+  }
+
+  // Method*, callee save area size, local reference segment state
+  CHECK(SpillsMethod());
   const size_t method_ptr_size = static_cast<size_t>(kArmPointerSize);
-  const size_t lr_return_addr_size = kFramePointerSize;
   const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
-  size_t frame_data_size = method_ptr_size + lr_return_addr_size + callee_save_area_size;
+  size_t total_size = method_ptr_size + callee_save_area_size;
 
-  if (LIKELY(HasLocalReferenceSegmentState())) {
-    // local reference segment state
-    frame_data_size += kFramePointerSize;
-    // TODO: Probably better to use sizeof(IRTSegmentState) here...
-  }
+  CHECK(HasLocalReferenceSegmentState());
+  // local reference segment state
+  total_size += kFramePointerSize;
+  // TODO: Probably better to use sizeof(IRTSegmentState) here...
 
-  // References plus link_ (pointer) and number_of_references_ (uint32_t) for HandleScope header
-  const size_t handle_scope_size = HandleScope::SizeOf(kArmPointerSize, ReferenceCount());
-
-  size_t total_size = frame_data_size;
-  if (LIKELY(HasHandleScope())) {
-    // HandleScope is sometimes excluded.
-    total_size += handle_scope_size;                                 // handle scope size
-  }
+  CHECK(HasHandleScope());
+  total_size += HandleScope::SizeOf(kArmPointerSize, ReferenceCount());
 
   // Plus return value spill area size
+  CHECK(SpillsReturnValue());
   total_size += SizeOfReturnValue();
 
   return RoundUp(total_size, kStackAlignment);
 }
 
-size_t ArmJniCallingConvention::OutArgSize() {
-  // TODO: Identical to x86_64 except for also adding additional padding.
-  return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize + padding_,
-                 kStackAlignment);
+size_t ArmJniCallingConvention::OutArgSize() const {
+  // Count param args, including JNIEnv* and jclass*; count 8-byte args twice.
+  size_t all_args = NumberOfExtraArgumentsForJni() + NumArgs() + NumLongOrDoubleArgs();
+  // Account for arguments passed through r0-r3. (No FP args, AAPCS32 is soft-float.)
+  size_t stack_args = all_args - std::min(kJniArgumentRegisterCount, all_args);
+  // The size of outgoing arguments.
+  size_t size = stack_args * kFramePointerSize + padding_;
+
+  // @CriticalNative can use tail call as all managed callee saves are preserved by AAPCS.
+  static_assert((kCoreCalleeSpillMask & ~kAapcsCoreCalleeSpillMask) == 0u);
+  static_assert((kFpCalleeSpillMask & ~kAapcsFpCalleeSpillMask) == 0u);
+
+  // For @CriticalNative, we can make a tail call if there are no stack args and the
+  // return type is not an FP type (otherwise we need to move the result to FP register).
+  DCHECK(!RequiresSmallResultTypeExtension());
+  if (is_critical_native_ && (size != 0u || GetShorty()[0] == 'F' || GetShorty()[0] == 'D')) {
+    size += kFramePointerSize;  // We need to spill LR with the args.
+  }
+  size_t out_args_size = RoundUp(size, kAapcsStackAlignment);
+  if (UNLIKELY(IsCriticalNative())) {
+    DCHECK_EQ(out_args_size, GetCriticalNativeOutArgsSize(GetShorty(), NumArgs() + 1u));
+  }
+  return out_args_size;
 }
 
 ArrayRef<const ManagedRegister> ArmJniCallingConvention::CalleeSaveRegisters() const {
-  return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
+  if (UNLIKELY(IsCriticalNative())) {
+    if (UseTailCall()) {
+      return ArrayRef<const ManagedRegister>();  // Do not spill anything.
+    } else {
+      // Spill LR with out args.
+      static_assert((kCoreCalleeSpillMask >> LR) == 1u);  // Contains LR as the highest bit.
+      constexpr size_t lr_index = POPCOUNT(kCoreCalleeSpillMask) - 1u;
+      static_assert(kCalleeSaveRegisters[lr_index].Equals(
+                        ArmManagedRegister::FromCoreRegister(LR)));
+      return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters).SubArray(
+          /*pos*/ lr_index, /*length=*/ 1u);
+    }
+  } else {
+    return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
+  }
 }
 
 // JniCallingConvention ABI follows AAPCS where longs and doubles must occur
@@ -451,18 +525,27 @@
   return FrameOffset(offset);
 }
 
-size_t ArmJniCallingConvention::NumberOfOutgoingStackArgs() {
-  size_t static_args = HasSelfClass() ? 1 : 0;  // count jclass
-  // regular argument parameters and this
-  size_t param_args = NumArgs() + NumLongOrDoubleArgs();  // twice count 8-byte args
-  // XX: Why is the long/ordouble counted twice but not JNIEnv* ???
-  // count JNIEnv* less arguments in registers
-  size_t internal_args = (HasJniEnv() ? 1 : 0 /* jni env */);
-  size_t total_args = static_args + param_args + internal_args;
+ManagedRegister ArmJniCallingConvention::HiddenArgumentRegister() const {
+  CHECK(IsCriticalNative());
+  // R4 is neither managed callee-save, nor argument register, nor scratch register.
+  // (It is native callee-save but the value coming from managed code can be clobbered.)
+  // TODO: Change to static_assert; std::none_of should be constexpr since C++20.
+  DCHECK(std::none_of(kCalleeSaveRegisters,
+                      kCalleeSaveRegisters + std::size(kCalleeSaveRegisters),
+                      [](ManagedRegister callee_save) constexpr {
+                        return callee_save.Equals(ArmManagedRegister::FromCoreRegister(R4));
+                      }));
+  DCHECK(std::none_of(kJniArgumentRegisters,
+                      kJniArgumentRegisters + std::size(kJniArgumentRegisters),
+                      [](Register reg) { return reg == R4; }));
+  DCHECK(!InterproceduralScratchRegister().Equals(ArmManagedRegister::FromCoreRegister(R4)));
+  return ArmManagedRegister::FromCoreRegister(R4);
+}
 
-  return total_args - std::min(kJniArgumentRegisterCount, static_cast<size_t>(total_args));
-
-  // TODO: Very similar to x86_64 except for the return pc.
+// Whether to use tail call (used only for @CriticalNative).
+bool ArmJniCallingConvention::UseTailCall() const {
+  CHECK(IsCriticalNative());
+  return OutArgSize() == 0u;
 }
 
 }  // namespace arm
diff --git a/compiler/jni/quick/arm/calling_convention_arm.h b/compiler/jni/quick/arm/calling_convention_arm.h
index b327898..e4b86fa 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.h
+++ b/compiler/jni/quick/arm/calling_convention_arm.h
@@ -23,8 +23,6 @@
 namespace art {
 namespace arm {
 
-constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k32);
-
 class ArmManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
  public:
   ArmManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
@@ -35,7 +33,7 @@
   ~ArmManagedRuntimeCallingConvention() override {}
   // Calling convention
   ManagedRegister ReturnRegister() override;
-  ManagedRegister InterproceduralScratchRegister() override;
+  ManagedRegister InterproceduralScratchRegister() const override;
   // Managed runtime calling convention
   ManagedRegister MethodRegister() override;
   bool IsCurrentParamInRegister() override;
@@ -60,11 +58,11 @@
   // Calling convention
   ManagedRegister ReturnRegister() override;
   ManagedRegister IntReturnRegister() override;
-  ManagedRegister InterproceduralScratchRegister() override;
+  ManagedRegister InterproceduralScratchRegister() const override;
   // JNI calling convention
   void Next() override;  // Override default behavior for AAPCS
-  size_t FrameSize() override;
-  size_t OutArgSize() override;
+  size_t FrameSize() const override;
+  size_t OutArgSize() const override;
   ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
   ManagedRegister ReturnScratchRegister() const override;
   uint32_t CoreSpillMask() const override;
@@ -79,8 +77,11 @@
     return false;
   }
 
- protected:
-  size_t NumberOfOutgoingStackArgs() override;
+  // Hidden argument register, used to pass the method pointer for @CriticalNative call.
+  ManagedRegister HiddenArgumentRegister() const override;
+
+  // Whether to use tail call (used only for @CriticalNative).
+  bool UseTailCall() const override;
 
  private:
   // Padding to ensure longs and doubles are not split in AAPCS
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index 4a6a754..231e140 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -18,6 +18,7 @@
 
 #include <android-base/logging.h>
 
+#include "arch/arm64/jni_frame_arm64.h"
 #include "arch/instruction_set.h"
 #include "handle_scope-inl.h"
 #include "utils/arm64/managed_register_arm64.h"
@@ -27,28 +28,25 @@
 
 static_assert(kArm64PointerSize == PointerSize::k64, "Unexpected ARM64 pointer size");
 
-// Up to how many float-like (float, double) args can be enregistered.
-// The rest of the args must go on the stack.
-constexpr size_t kMaxFloatOrDoubleRegisterArguments = 8u;
-// Up to how many integer-like (pointers, objects, longs, int, short, bool, etc) args can be
-// enregistered. The rest of the args must go on the stack.
-constexpr size_t kMaxIntLikeRegisterArguments = 8u;
-
 static const XRegister kXArgumentRegisters[] = {
   X0, X1, X2, X3, X4, X5, X6, X7
 };
+static_assert(kMaxIntLikeRegisterArguments == arraysize(kXArgumentRegisters));
 
 static const WRegister kWArgumentRegisters[] = {
   W0, W1, W2, W3, W4, W5, W6, W7
 };
+static_assert(kMaxIntLikeRegisterArguments == arraysize(kWArgumentRegisters));
 
 static const DRegister kDArgumentRegisters[] = {
   D0, D1, D2, D3, D4, D5, D6, D7
 };
+static_assert(kMaxFloatOrDoubleRegisterArguments == arraysize(kDArgumentRegisters));
 
 static const SRegister kSArgumentRegisters[] = {
   S0, S1, S2, S3, S4, S5, S6, S7
 };
+static_assert(kMaxFloatOrDoubleRegisterArguments == arraysize(kSArgumentRegisters));
 
 static constexpr ManagedRegister kCalleeSaveRegisters[] = {
     // Core registers.
@@ -59,6 +57,8 @@
     // Jni function is the native function which the java code wants to call.
     // Jni method is the method that is compiled by jni compiler.
     // Call chain: managed code(java) --> jni method --> jni function.
+    // This does not apply to the @CriticalNative.
+
     // Thread register(X19) is saved on stack.
     Arm64ManagedRegister::FromXRegister(X19),
     Arm64ManagedRegister::FromXRegister(X20),
@@ -86,58 +86,69 @@
     Arm64ManagedRegister::FromDRegister(D15),
 };
 
-static constexpr uint32_t CalculateCoreCalleeSpillMask() {
+template <size_t size>
+static constexpr uint32_t CalculateCoreCalleeSpillMask(
+    const ManagedRegister (&callee_saves)[size]) {
   uint32_t result = 0u;
-  for (auto&& r : kCalleeSaveRegisters) {
+  for (auto&& r : callee_saves) {
     if (r.AsArm64().IsXRegister()) {
-      result |= (1 << r.AsArm64().AsXRegister());
+      result |= (1u << r.AsArm64().AsXRegister());
     }
   }
   return result;
 }
 
-static constexpr uint32_t CalculateFpCalleeSpillMask() {
-  uint32_t result = 0;
-  for (auto&& r : kCalleeSaveRegisters) {
+template <size_t size>
+static constexpr uint32_t CalculateFpCalleeSpillMask(const ManagedRegister (&callee_saves)[size]) {
+  uint32_t result = 0u;
+  for (auto&& r : callee_saves) {
     if (r.AsArm64().IsDRegister()) {
-      result |= (1 << r.AsArm64().AsDRegister());
+      result |= (1u << r.AsArm64().AsDRegister());
     }
   }
   return result;
 }
 
-static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask();
-static constexpr uint32_t kFpCalleeSpillMask = CalculateFpCalleeSpillMask();
+static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask(kCalleeSaveRegisters);
+static constexpr uint32_t kFpCalleeSpillMask = CalculateFpCalleeSpillMask(kCalleeSaveRegisters);
+
+static constexpr ManagedRegister kAapcs64CalleeSaveRegisters[] = {
+    // Core registers.
+    Arm64ManagedRegister::FromXRegister(X19),
+    Arm64ManagedRegister::FromXRegister(X20),
+    Arm64ManagedRegister::FromXRegister(X21),
+    Arm64ManagedRegister::FromXRegister(X22),
+    Arm64ManagedRegister::FromXRegister(X23),
+    Arm64ManagedRegister::FromXRegister(X24),
+    Arm64ManagedRegister::FromXRegister(X25),
+    Arm64ManagedRegister::FromXRegister(X26),
+    Arm64ManagedRegister::FromXRegister(X27),
+    Arm64ManagedRegister::FromXRegister(X28),
+    Arm64ManagedRegister::FromXRegister(X29),
+    Arm64ManagedRegister::FromXRegister(LR),
+    // Hard float registers.
+    Arm64ManagedRegister::FromDRegister(D8),
+    Arm64ManagedRegister::FromDRegister(D9),
+    Arm64ManagedRegister::FromDRegister(D10),
+    Arm64ManagedRegister::FromDRegister(D11),
+    Arm64ManagedRegister::FromDRegister(D12),
+    Arm64ManagedRegister::FromDRegister(D13),
+    Arm64ManagedRegister::FromDRegister(D14),
+    Arm64ManagedRegister::FromDRegister(D15),
+};
+
+static constexpr uint32_t kAapcs64CoreCalleeSpillMask =
+    CalculateCoreCalleeSpillMask(kAapcs64CalleeSaveRegisters);
+static constexpr uint32_t kAapcs64FpCalleeSpillMask =
+    CalculateFpCalleeSpillMask(kAapcs64CalleeSaveRegisters);
 
 // Calling convention
-ManagedRegister Arm64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
-  // X20 is safe to use as a scratch register:
-  // - with Baker read barriers (in the case of a non-critical native
-  //   method), it is reserved as Marking Register, and thus does not
-  //   actually need to be saved/restored; it is refreshed on exit
-  //   (see Arm64JNIMacroAssembler::RemoveFrame);
-  // - in other cases, it is saved on entry (in
-  //   Arm64JNIMacroAssembler::BuildFrame) and restored on exit (in
-  //   Arm64JNIMacroAssembler::RemoveFrame). This is also expected in
-  //   the case of a critical native method in the Baker read barrier
-  //   configuration, where the value of MR must be preserved across
-  //   the JNI call (as there is no MR refresh in that case).
-  return Arm64ManagedRegister::FromXRegister(X20);
+ManagedRegister Arm64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() const {
+  return Arm64ManagedRegister::FromXRegister(IP0);  // X16
 }
 
-ManagedRegister Arm64JniCallingConvention::InterproceduralScratchRegister() {
-  // X20 is safe to use as a scratch register:
-  // - with Baker read barriers (in the case of a non-critical native
-  //   method), it is reserved as Marking Register, and thus does not
-  //   actually need to be saved/restored; it is refreshed on exit
-  //   (see Arm64JNIMacroAssembler::RemoveFrame);
-  // - in other cases, it is saved on entry (in
-  //   Arm64JNIMacroAssembler::BuildFrame) and restored on exit (in
-  //   Arm64JNIMacroAssembler::RemoveFrame). This is also expected in
-  //   the case of a critical native method in the Baker read barrier
-  //   configuration, where the value of MR must be preserved across
-  //   the JNI call (as there is no MR refresh in that case).
-  return Arm64ManagedRegister::FromXRegister(X20);
+ManagedRegister Arm64JniCallingConvention::InterproceduralScratchRegister() const {
+  return Arm64ManagedRegister::FromXRegister(IP0);  // X16
 }
 
 static ManagedRegister ReturnRegisterForShorty(const char* shorty) {
@@ -187,11 +198,9 @@
 
 FrameOffset Arm64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
   CHECK(IsCurrentParamOnStack());
-  FrameOffset result =
-      FrameOffset(displacement_.Int32Value() +  // displacement
-                  kFramePointerSize +  // Method ref
-                  (itr_slots_ * sizeof(uint32_t)));  // offset into in args
-  return result;
+  return FrameOffset(displacement_.Int32Value() +  // displacement
+                     kFramePointerSize +  // Method ref
+                     (itr_slots_ * sizeof(uint32_t)));  // offset into in args
 }
 
 const ManagedRegisterEntrySpills& Arm64ManagedRuntimeCallingConvention::EntrySpills() {
@@ -243,6 +252,7 @@
 }
 
 // JNI calling convention
+
 Arm64JniCallingConvention::Arm64JniCallingConvention(bool is_static,
                                                      bool is_synchronized,
                                                      bool is_critical_native,
@@ -255,52 +265,92 @@
 }
 
 uint32_t Arm64JniCallingConvention::CoreSpillMask() const {
-  return kCoreCalleeSpillMask;
+  return is_critical_native_ ? 0u : kCoreCalleeSpillMask;
 }
 
 uint32_t Arm64JniCallingConvention::FpSpillMask() const {
-  return kFpCalleeSpillMask;
+  return is_critical_native_ ? 0u : kFpCalleeSpillMask;
 }
 
 ManagedRegister Arm64JniCallingConvention::ReturnScratchRegister() const {
   return ManagedRegister::NoRegister();
 }
 
-size_t Arm64JniCallingConvention::FrameSize() {
+size_t Arm64JniCallingConvention::FrameSize() const {
+  if (is_critical_native_) {
+    CHECK(!SpillsMethod());
+    CHECK(!HasLocalReferenceSegmentState());
+    CHECK(!HasHandleScope());
+    CHECK(!SpillsReturnValue());
+    return 0u;  // There is no managed frame for @CriticalNative.
+  }
+
   // Method*, callee save area size, local reference segment state
-  //
-  // (Unlike x86_64, do not include return address, and the segment state is uint32
-  // instead of pointer).
+  CHECK(SpillsMethod());
   size_t method_ptr_size = static_cast<size_t>(kFramePointerSize);
   size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
+  size_t total_size = method_ptr_size + callee_save_area_size;
 
-  size_t frame_data_size = method_ptr_size + callee_save_area_size;
-  if (LIKELY(HasLocalReferenceSegmentState())) {
-    frame_data_size += sizeof(uint32_t);
-  }
-  // References plus 2 words for HandleScope header
-  size_t handle_scope_size = HandleScope::SizeOf(kArm64PointerSize, ReferenceCount());
+  CHECK(HasLocalReferenceSegmentState());
+  total_size += sizeof(uint32_t);
 
-  size_t total_size = frame_data_size;
-  if (LIKELY(HasHandleScope())) {
-    // HandleScope is sometimes excluded.
-    total_size += handle_scope_size;                                 // handle scope size
-  }
+  CHECK(HasHandleScope());
+  total_size += HandleScope::SizeOf(kArm64PointerSize, ReferenceCount());
 
   // Plus return value spill area size
+  CHECK(SpillsReturnValue());
   total_size += SizeOfReturnValue();
 
   return RoundUp(total_size, kStackAlignment);
 }
 
-size_t Arm64JniCallingConvention::OutArgSize() {
-  // Same as X86_64
-  return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment);
+size_t Arm64JniCallingConvention::OutArgSize() const {
+  // Count param args, including JNIEnv* and jclass*.
+  size_t all_args = NumberOfExtraArgumentsForJni() + NumArgs();
+  size_t num_fp_args = NumFloatOrDoubleArgs();
+  DCHECK_GE(all_args, num_fp_args);
+  size_t num_non_fp_args = all_args - num_fp_args;
+  // Account for FP arguments passed through v0-v7.
+  size_t num_stack_fp_args =
+      num_fp_args - std::min(kMaxFloatOrDoubleRegisterArguments, num_fp_args);
+  // Account for other (integer and pointer) arguments passed through GPR (x0-x7).
+  size_t num_stack_non_fp_args =
+      num_non_fp_args - std::min(kMaxIntLikeRegisterArguments, num_non_fp_args);
+  // The size of outgoing arguments.
+  size_t size = (num_stack_fp_args + num_stack_non_fp_args) * kFramePointerSize;
+
+  // @CriticalNative can use tail call as all managed callee saves are preserved by AAPCS64.
+  static_assert((kCoreCalleeSpillMask & ~kAapcs64CoreCalleeSpillMask) == 0u);
+  static_assert((kFpCalleeSpillMask & ~kAapcs64FpCalleeSpillMask) == 0u);
+
+  // For @CriticalNative, we can make a tail call if there are no stack args and
+  // we do not need to extend the result. Otherwise, add space for return PC.
+  if (is_critical_native_ && (size != 0u || RequiresSmallResultTypeExtension())) {
+    size += kFramePointerSize;  // We need to spill LR with the args.
+  }
+  size_t out_args_size = RoundUp(size, kAapcs64StackAlignment);
+  if (UNLIKELY(IsCriticalNative())) {
+    DCHECK_EQ(out_args_size, GetCriticalNativeOutArgsSize(GetShorty(), NumArgs() + 1u));
+  }
+  return out_args_size;
 }
 
 ArrayRef<const ManagedRegister> Arm64JniCallingConvention::CalleeSaveRegisters() const {
-  // Same as X86_64
-  return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
+  if (UNLIKELY(IsCriticalNative())) {
+    if (UseTailCall()) {
+      return ArrayRef<const ManagedRegister>();  // Do not spill anything.
+    } else {
+      // Spill LR with out args.
+      static_assert((kCoreCalleeSpillMask >> LR) == 1u);  // Contains LR as the highest bit.
+      constexpr size_t lr_index = POPCOUNT(kCoreCalleeSpillMask) - 1u;
+      static_assert(kCalleeSaveRegisters[lr_index].Equals(
+                        Arm64ManagedRegister::FromXRegister(LR)));
+      return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters).SubArray(
+          /*pos*/ lr_index, /*length=*/ 1u);
+    }
+  } else {
+    return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
+  }
 }
 
 bool Arm64JniCallingConvention::IsCurrentParamInRegister() {
@@ -347,25 +397,28 @@
   size_t offset = displacement_.Int32Value() - OutArgSize() + (args_on_stack * kFramePointerSize);
   CHECK_LT(offset, OutArgSize());
   return FrameOffset(offset);
-  // TODO: Seems identical to X86_64 code.
 }
 
-size_t Arm64JniCallingConvention::NumberOfOutgoingStackArgs() {
-  // all arguments including JNI args
-  size_t all_args = NumArgs() + NumberOfExtraArgumentsForJni();
+ManagedRegister Arm64JniCallingConvention::HiddenArgumentRegister() const {
+  CHECK(IsCriticalNative());
+  // X15 is neither managed callee-save, nor argument register, nor scratch register.
+  // TODO: Change to static_assert; std::none_of should be constexpr since C++20.
+  DCHECK(std::none_of(kCalleeSaveRegisters,
+                      kCalleeSaveRegisters + std::size(kCalleeSaveRegisters),
+                      [](ManagedRegister callee_save) constexpr {
+                        return callee_save.Equals(Arm64ManagedRegister::FromXRegister(X15));
+                      }));
+  DCHECK(std::none_of(kXArgumentRegisters,
+                      kXArgumentRegisters + std::size(kXArgumentRegisters),
+                      [](XRegister reg) { return reg == X15; }));
+  DCHECK(!InterproceduralScratchRegister().Equals(Arm64ManagedRegister::FromXRegister(X15)));
+  return Arm64ManagedRegister::FromXRegister(X15);
+}
 
-  DCHECK_GE(all_args, NumFloatOrDoubleArgs());
-
-  size_t all_stack_args =
-      all_args
-      - std::min(kMaxFloatOrDoubleRegisterArguments,
-                 static_cast<size_t>(NumFloatOrDoubleArgs()))
-      - std::min(kMaxIntLikeRegisterArguments,
-                 static_cast<size_t>((all_args - NumFloatOrDoubleArgs())));
-
-  // TODO: Seems similar to X86_64 code except it doesn't count return pc.
-
-  return all_stack_args;
+// Whether to use tail call (used only for @CriticalNative).
+bool Arm64JniCallingConvention::UseTailCall() const {
+  CHECK(IsCriticalNative());
+  return OutArgSize() == 0u;
 }
 
 }  // namespace arm64
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.h b/compiler/jni/quick/arm64/calling_convention_arm64.h
index ed0ddeb..64b29f1 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.h
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.h
@@ -23,8 +23,6 @@
 namespace art {
 namespace arm64 {
 
-constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k64);
-
 class Arm64ManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
  public:
   Arm64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
@@ -35,7 +33,7 @@
   ~Arm64ManagedRuntimeCallingConvention() override {}
   // Calling convention
   ManagedRegister ReturnRegister() override;
-  ManagedRegister InterproceduralScratchRegister() override;
+  ManagedRegister InterproceduralScratchRegister() const override;
   // Managed runtime calling convention
   ManagedRegister MethodRegister() override;
   bool IsCurrentParamInRegister() override;
@@ -60,10 +58,10 @@
   // Calling convention
   ManagedRegister ReturnRegister() override;
   ManagedRegister IntReturnRegister() override;
-  ManagedRegister InterproceduralScratchRegister() override;
+  ManagedRegister InterproceduralScratchRegister() const override;
   // JNI calling convention
-  size_t FrameSize() override;
-  size_t OutArgSize() override;
+  size_t FrameSize() const override;
+  size_t OutArgSize() const override;
   ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
   ManagedRegister ReturnScratchRegister() const override;
   uint32_t CoreSpillMask() const override;
@@ -75,11 +73,14 @@
 
   // aarch64 calling convention leaves upper bits undefined.
   bool RequiresSmallResultTypeExtension() const override {
-    return true;
+    return HasSmallReturnType();
   }
 
- protected:
-  size_t NumberOfOutgoingStackArgs() override;
+  // Hidden argument register, used to pass the method pointer for @CriticalNative call.
+  ManagedRegister HiddenArgumentRegister() const override;
+
+  // Whether to use tail call (used only for @CriticalNative).
+  bool UseTailCall() const override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(Arm64JniCallingConvention);
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index f031b9b..1943756 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -28,14 +28,6 @@
 #include "jni/quick/arm64/calling_convention_arm64.h"
 #endif
 
-#ifdef ART_ENABLE_CODEGEN_mips
-#include "jni/quick/mips/calling_convention_mips.h"
-#endif
-
-#ifdef ART_ENABLE_CODEGEN_mips64
-#include "jni/quick/mips64/calling_convention_mips64.h"
-#endif
-
 #ifdef ART_ENABLE_CODEGEN_x86
 #include "jni/quick/x86/calling_convention_x86.h"
 #endif
@@ -68,18 +60,6 @@
           new (allocator) arm64::Arm64ManagedRuntimeCallingConvention(
               is_static, is_synchronized, shorty));
 #endif
-#ifdef ART_ENABLE_CODEGEN_mips
-    case InstructionSet::kMips:
-      return std::unique_ptr<ManagedRuntimeCallingConvention>(
-          new (allocator) mips::MipsManagedRuntimeCallingConvention(
-              is_static, is_synchronized, shorty));
-#endif
-#ifdef ART_ENABLE_CODEGEN_mips64
-    case InstructionSet::kMips64:
-      return std::unique_ptr<ManagedRuntimeCallingConvention>(
-          new (allocator) mips64::Mips64ManagedRuntimeCallingConvention(
-              is_static, is_synchronized, shorty));
-#endif
 #ifdef ART_ENABLE_CODEGEN_x86
     case InstructionSet::kX86:
       return std::unique_ptr<ManagedRuntimeCallingConvention>(
@@ -170,18 +150,6 @@
           new (allocator) arm64::Arm64JniCallingConvention(
               is_static, is_synchronized, is_critical_native, shorty));
 #endif
-#ifdef ART_ENABLE_CODEGEN_mips
-    case InstructionSet::kMips:
-      return std::unique_ptr<JniCallingConvention>(
-          new (allocator) mips::MipsJniCallingConvention(
-              is_static, is_synchronized, is_critical_native, shorty));
-#endif
-#ifdef ART_ENABLE_CODEGEN_mips64
-    case InstructionSet::kMips64:
-      return std::unique_ptr<JniCallingConvention>(
-          new (allocator) mips64::Mips64JniCallingConvention(
-              is_static, is_synchronized, is_critical_native, shorty));
-#endif
 #ifdef ART_ENABLE_CODEGEN_x86
     case InstructionSet::kX86:
       return std::unique_ptr<JniCallingConvention>(
@@ -347,21 +315,6 @@
   }
 }
 
-bool JniCallingConvention::HasHandleScope() const {
-  // Exclude HandleScope for @CriticalNative methods for optimization speed.
-  return is_critical_native_ == false;
-}
-
-bool JniCallingConvention::HasLocalReferenceSegmentState() const {
-  // Exclude local reference segment states for @CriticalNative methods for optimization speed.
-  return is_critical_native_ == false;
-}
-
-bool JniCallingConvention::HasJniEnv() const {
-  // Exclude "JNIEnv*" parameter for @CriticalNative methods.
-  return HasExtraArgumentsForJni();
-}
-
 bool JniCallingConvention::HasSelfClass() const {
   if (!IsStatic()) {
     // Virtual functions: There is never an implicit jclass parameter.
@@ -372,11 +325,6 @@
   }
 }
 
-bool JniCallingConvention::HasExtraArgumentsForJni() const {
-  // @CriticalNative jni implementations exclude both JNIEnv* and the jclass/jobject parameters.
-  return is_critical_native_ == false;
-}
-
 unsigned int JniCallingConvention::GetIteratorPositionWithinShorty() const {
   // We need to subtract out the extra JNI arguments if we want to use this iterator position
   // with the inherited CallingConvention member functions, which rely on scanning the shorty.
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index 77a5d59..3d4cefe 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -49,12 +49,7 @@
   // Register that holds result of this method invocation.
   virtual ManagedRegister ReturnRegister() = 0;
   // Register reserved for scratch usage during procedure calls.
-  virtual ManagedRegister InterproceduralScratchRegister() = 0;
-
-  // Offset of Method within the frame.
-  FrameOffset MethodStackOffset() {
-    return displacement_;
-  }
+  virtual ManagedRegister InterproceduralScratchRegister() const = 0;
 
   // Iterator interface
 
@@ -70,6 +65,14 @@
     itr_float_and_doubles_ = 0;
   }
 
+  FrameOffset GetDisplacement() const {
+    return displacement_;
+  }
+
+  PointerSize GetFramePointerSize() const {
+    return frame_pointer_size_;
+  }
+
   virtual ~CallingConvention() {}
 
  protected:
@@ -239,6 +242,11 @@
                                                                  const char* shorty,
                                                                  InstructionSet instruction_set);
 
+  // Offset of Method within the managed frame.
+  FrameOffset MethodStackOffset() {
+    return FrameOffset(0u);
+  }
+
   // Register that holds the incoming method argument
   virtual ManagedRegister MethodRegister() = 0;
 
@@ -296,10 +304,10 @@
   // Size of frame excluding space for outgoing args (its assumed Method* is
   // always at the bottom of a frame, but this doesn't work for outgoing
   // native args). Includes alignment.
-  virtual size_t FrameSize() = 0;
+  virtual size_t FrameSize() const = 0;
   // Size of outgoing arguments (stack portion), including alignment.
   // -- Arguments that are passed via registers are excluded from this size.
-  virtual size_t OutArgSize() = 0;
+  virtual size_t OutArgSize() const = 0;
   // Number of references in stack indirect reference table
   size_t ReferenceCount() const;
   // Location where the segment state of the local indirect reference table is saved
@@ -365,6 +373,32 @@
 
   virtual ~JniCallingConvention() {}
 
+  bool IsCriticalNative() const {
+    return is_critical_native_;
+  }
+
+  // Does the transition have a method pointer in the stack frame?
+  bool SpillsMethod() const {
+    // Exclude method pointer for @CriticalNative methods for optimization speed.
+    return !IsCriticalNative();
+  }
+
+  // Hidden argument register, used to pass the method pointer for @CriticalNative call.
+  virtual ManagedRegister HiddenArgumentRegister() const = 0;
+
+  // Whether to use tail call (used only for @CriticalNative).
+  virtual bool UseTailCall() const = 0;
+
+  // Whether the return type is small. Used for RequiresSmallResultTypeExtension()
+  // on architectures that require the sign/zero extension.
+  bool HasSmallReturnType() const {
+    Primitive::Type return_type = GetReturnType();
+    return return_type == Primitive::kPrimByte ||
+           return_type == Primitive::kPrimShort ||
+           return_type == Primitive::kPrimBoolean ||
+           return_type == Primitive::kPrimChar;
+  }
+
  protected:
   // Named iterator positions
   enum IteratorPos {
@@ -380,24 +414,41 @@
       : CallingConvention(is_static, is_synchronized, shorty, frame_pointer_size),
         is_critical_native_(is_critical_native) {}
 
-  // Number of stack slots for outgoing arguments, above which the handle scope is
-  // located
-  virtual size_t NumberOfOutgoingStackArgs() = 0;
-
  protected:
   size_t NumberOfExtraArgumentsForJni() const;
 
   // Does the transition have a StackHandleScope?
-  bool HasHandleScope() const;
+  bool HasHandleScope() const {
+    // Exclude HandleScope for @CriticalNative methods for optimization speed.
+    return !IsCriticalNative();
+  }
+
   // Does the transition have a local reference segment state?
-  bool HasLocalReferenceSegmentState() const;
-  // Has a JNIEnv* parameter implicitly?
-  bool HasJniEnv() const;
-  // Has a 'jclass' parameter implicitly?
-  bool HasSelfClass() const;
+  bool HasLocalReferenceSegmentState() const {
+    // Exclude local reference segment states for @CriticalNative methods for optimization speed.
+    return !IsCriticalNative();
+  }
+
+  // Does the transition back spill the return value in the stack frame?
+  bool SpillsReturnValue() const {
+    // Exclude return value for @CriticalNative methods for optimization speed.
+    return !IsCriticalNative();
+  }
 
   // Are there extra JNI arguments (JNIEnv* and maybe jclass)?
-  bool HasExtraArgumentsForJni() const;
+  bool HasExtraArgumentsForJni() const {
+    // @CriticalNative jni implementations exclude both JNIEnv* and the jclass/jobject parameters.
+    return !IsCriticalNative();
+  }
+
+  // Has a JNIEnv* parameter implicitly?
+  bool HasJniEnv() const {
+    // Exclude "JNIEnv*" parameter for @CriticalNative methods.
+    return HasExtraArgumentsForJni();
+  }
+
+  // Has a 'jclass' parameter implicitly?
+  bool HasSelfClass() const;
 
   // Returns the position of itr_args_, fixed up by removing the offset of extra JNI arguments.
   unsigned int GetIteratorPositionWithinShorty() const;
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 7054078..c2db73a 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -43,8 +43,6 @@
 #include "utils/assembler.h"
 #include "utils/jni_macro_assembler.h"
 #include "utils/managed_register.h"
-#include "utils/mips/managed_register_mips.h"
-#include "utils/mips64/managed_register_mips64.h"
 #include "utils/x86/managed_register_x86.h"
 
 #define __ jni_asm->
@@ -54,8 +52,7 @@
 template <PointerSize kPointerSize>
 static void CopyParameter(JNIMacroAssembler<kPointerSize>* jni_asm,
                           ManagedRuntimeCallingConvention* mr_conv,
-                          JniCallingConvention* jni_conv,
-                          size_t frame_size, size_t out_arg_size);
+                          JniCallingConvention* jni_conv);
 template <PointerSize kPointerSize>
 static void SetNativeParameter(JNIMacroAssembler<kPointerSize>* jni_asm,
                                JniCallingConvention* jni_conv,
@@ -131,7 +128,7 @@
   const bool is_fast_native = (access_flags & kAccFastNative) != 0u;
 
   // i.e. if the method was annotated with @CriticalNative
-  bool is_critical_native = (access_flags & kAccCriticalNative) != 0u;
+  const bool is_critical_native = (access_flags & kAccCriticalNative) != 0u;
 
   VLOG(jni) << "JniCompile: Method :: "
               << dex_file.PrettyMethod(method_idx, /* with signature */ true)
@@ -220,17 +217,22 @@
   jni_asm->SetEmitRunTimeChecksInDebugMode(compiler_options.EmitRunTimeChecksInDebugMode());
 
   // 1. Build the frame saving all callee saves, Method*, and PC return address.
-  const size_t frame_size(main_jni_conv->FrameSize());  // Excludes outgoing args.
+  //    For @CriticalNative, this includes space for out args, otherwise just the managed frame.
+  const size_t managed_frame_size = main_jni_conv->FrameSize();
+  const size_t main_out_arg_size = main_jni_conv->OutArgSize();
+  size_t current_frame_size = is_critical_native ? main_out_arg_size : managed_frame_size;
+  ManagedRegister method_register =
+      is_critical_native ? ManagedRegister::NoRegister() : mr_conv->MethodRegister();
   ArrayRef<const ManagedRegister> callee_save_regs = main_jni_conv->CalleeSaveRegisters();
-  __ BuildFrame(frame_size, mr_conv->MethodRegister(), callee_save_regs, mr_conv->EntrySpills());
-  DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size));
+  __ BuildFrame(current_frame_size, method_register, callee_save_regs, mr_conv->EntrySpills());
+  DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast<int>(current_frame_size));
 
   if (LIKELY(!is_critical_native)) {
     // NOTE: @CriticalNative methods don't have a HandleScope
     //       because they can't have any reference parameters or return values.
 
     // 2. Set up the HandleScope
-    mr_conv->ResetIterator(FrameOffset(frame_size));
+    mr_conv->ResetIterator(FrameOffset(current_frame_size));
     main_jni_conv->ResetIterator(FrameOffset(0));
     __ StoreImmediateToFrame(main_jni_conv->HandleScopeNumRefsOffset(),
                              main_jni_conv->ReferenceCount(),
@@ -249,7 +251,7 @@
     if (is_static) {
       FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
       // Check handle scope offset is within frame
-      CHECK_LT(handle_scope_offset.Uint32Value(), frame_size);
+      CHECK_LT(handle_scope_offset.Uint32Value(), current_frame_size);
       // Note this LoadRef() doesn't need heap unpoisoning since it's from the ArtMethod.
       // Note this LoadRef() does not include read barrier. It will be handled below.
       //
@@ -272,7 +274,7 @@
         // must be null.
         FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
         // Check handle scope offset is within frame and doesn't run into the saved segment state.
-        CHECK_LT(handle_scope_offset.Uint32Value(), frame_size);
+        CHECK_LT(handle_scope_offset.Uint32Value(), current_frame_size);
         CHECK_NE(handle_scope_offset.Uint32Value(),
                  main_jni_conv->SavedLocalReferenceCookieOffset().Uint32Value());
         bool input_in_reg = mr_conv->IsCurrentParamInRegister();
@@ -304,16 +306,23 @@
   }  // if (!is_critical_native)
 
   // 5. Move frame down to allow space for out going args.
-  const size_t main_out_arg_size = main_jni_conv->OutArgSize();
   size_t current_out_arg_size = main_out_arg_size;
-  __ IncreaseFrameSize(main_out_arg_size);
+  if (UNLIKELY(is_critical_native)) {
+    DCHECK_EQ(main_out_arg_size, current_frame_size);
+    // Move the method pointer to the hidden argument register.
+    __ Move(main_jni_conv->HiddenArgumentRegister(),
+            mr_conv->MethodRegister(),
+            static_cast<size_t>(main_jni_conv->GetFramePointerSize()));
+  } else {
+    __ IncreaseFrameSize(main_out_arg_size);
+    current_frame_size += main_out_arg_size;
+  }
 
   // Call the read barrier for the declaring class loaded from the method for a static call.
   // Skip this for @CriticalNative because we didn't build a HandleScope to begin with.
   // Note that we always have outgoing param space available for at least two params.
   if (kUseReadBarrier && is_static && !is_critical_native) {
-    const bool kReadBarrierFastPath =
-        (instruction_set != InstructionSet::kMips) && (instruction_set != InstructionSet::kMips64);
+    const bool kReadBarrierFastPath = true;  // Always true after Mips codegen was removed.
     std::unique_ptr<JNIMacroLabel> skip_cold_path_label;
     if (kReadBarrierFastPath) {
       skip_cold_path_label = __ CreateLabel();
@@ -376,6 +385,8 @@
   //    abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer
   //    arguments.
   FrameOffset locked_object_handle_scope_offset(0xBEEFDEAD);
+  FrameOffset saved_cookie_offset(
+      FrameOffset(0xDEADBEEFu));  // @CriticalNative - use obviously bad value for debugging
   if (LIKELY(!is_critical_native)) {
     // Skip this for @CriticalNative methods. They do not call JniMethodStart.
     ThreadOffset<kPointerSize> jni_start(
@@ -414,12 +425,8 @@
     if (is_synchronized) {  // Check for exceptions from monitor enter.
       __ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), main_out_arg_size);
     }
-  }
 
-  // Store into stack_frame[saved_cookie_offset] the return value of JniMethodStart.
-  FrameOffset saved_cookie_offset(
-      FrameOffset(0xDEADBEEFu));  // @CriticalNative - use obviously bad value for debugging
-  if (LIKELY(!is_critical_native)) {
+    // Store into stack_frame[saved_cookie_offset] the return value of JniMethodStart.
     saved_cookie_offset = main_jni_conv->SavedLocalReferenceCookieOffset();
     __ Store(saved_cookie_offset, main_jni_conv->IntReturnRegister(), 4 /* sizeof cookie */);
   }
@@ -430,7 +437,7 @@
   //    null (which must be encoded as null).
   //    Note: we do this prior to materializing the JNIEnv* and static's jclass to
   //    give as many free registers for the shuffle as possible.
-  mr_conv->ResetIterator(FrameOffset(frame_size + main_out_arg_size));
+  mr_conv->ResetIterator(FrameOffset(current_frame_size));
   uint32_t args_count = 0;
   while (mr_conv->HasNext()) {
     args_count++;
@@ -440,8 +447,12 @@
   // Do a backward pass over arguments, so that the generated code will be "mov
   // R2, R3; mov R1, R2" instead of "mov R1, R2; mov R2, R3."
   // TODO: A reverse iterator to improve readability.
+  // TODO: This is currently useless as all archs spill args when building the frame.
+  //       To avoid the full spilling, we would have to do one pass before the BuildFrame()
+  //       to determine which arg registers are clobbered before they are needed.
+  // TODO: For @CriticalNative, do a forward pass because there are no JNIEnv* and jclass* args.
   for (uint32_t i = 0; i < args_count; ++i) {
-    mr_conv->ResetIterator(FrameOffset(frame_size + main_out_arg_size));
+    mr_conv->ResetIterator(FrameOffset(current_frame_size));
     main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
 
     // Skip the extra JNI parameters for now.
@@ -456,11 +467,11 @@
       mr_conv->Next();
       main_jni_conv->Next();
     }
-    CopyParameter(jni_asm.get(), mr_conv.get(), main_jni_conv.get(), frame_size, main_out_arg_size);
+    CopyParameter(jni_asm.get(), mr_conv.get(), main_jni_conv.get());
   }
   if (is_static && !is_critical_native) {
     // Create argument for Class
-    mr_conv->ResetIterator(FrameOffset(frame_size + main_out_arg_size));
+    mr_conv->ResetIterator(FrameOffset(current_frame_size));
     main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
     main_jni_conv->Next();  // Skip JNIEnv*
     FrameOffset handle_scope_offset = main_jni_conv->CurrentParamHandleScopeEntryOffset();
@@ -496,20 +507,33 @@
   // 9. Plant call to native code associated with method.
   MemberOffset jni_entrypoint_offset =
       ArtMethod::EntryPointFromJniOffset(InstructionSetPointerSize(instruction_set));
-  // FIXME: Not sure if MethodStackOffset will work here. What does it even do?
-  __ Call(main_jni_conv->MethodStackOffset(),
-          jni_entrypoint_offset,
-          // XX: Why not the jni conv scratch register?
-          mr_conv->InterproceduralScratchRegister());
+  if (UNLIKELY(is_critical_native)) {
+    if (main_jni_conv->UseTailCall()) {
+      __ Jump(main_jni_conv->HiddenArgumentRegister(),
+              jni_entrypoint_offset,
+              main_jni_conv->InterproceduralScratchRegister());
+    } else {
+      __ Call(main_jni_conv->HiddenArgumentRegister(),
+              jni_entrypoint_offset,
+              main_jni_conv->InterproceduralScratchRegister());
+    }
+  } else {
+    __ Call(FrameOffset(main_out_arg_size + mr_conv->MethodStackOffset().SizeValue()),
+            jni_entrypoint_offset,
+            main_jni_conv->InterproceduralScratchRegister());
+  }
 
   // 10. Fix differences in result widths.
   if (main_jni_conv->RequiresSmallResultTypeExtension()) {
+    DCHECK(main_jni_conv->HasSmallReturnType());
+    CHECK(!is_critical_native || !main_jni_conv->UseTailCall());
     if (main_jni_conv->GetReturnType() == Primitive::kPrimByte ||
         main_jni_conv->GetReturnType() == Primitive::kPrimShort) {
       __ SignExtend(main_jni_conv->ReturnRegister(),
                     Primitive::ComponentSize(main_jni_conv->GetReturnType()));
-    } else if (main_jni_conv->GetReturnType() == Primitive::kPrimBoolean ||
-               main_jni_conv->GetReturnType() == Primitive::kPrimChar) {
+    } else {
+      CHECK(main_jni_conv->GetReturnType() == Primitive::kPrimBoolean ||
+            main_jni_conv->GetReturnType() == Primitive::kPrimChar);
       __ ZeroExtend(main_jni_conv->ReturnRegister(),
                     Primitive::ComponentSize(main_jni_conv->GetReturnType()));
     }
@@ -521,17 +545,7 @@
     if (LIKELY(!is_critical_native)) {
       // For normal JNI, store the return value on the stack because the call to
       // JniMethodEnd will clobber the return value. It will be restored in (13).
-      if ((instruction_set == InstructionSet::kMips ||
-           instruction_set == InstructionSet::kMips64) &&
-          main_jni_conv->GetReturnType() == Primitive::kPrimDouble &&
-          return_save_location.Uint32Value() % 8 != 0) {
-        // Ensure doubles are 8-byte aligned for MIPS
-        return_save_location = FrameOffset(return_save_location.Uint32Value()
-                                               + static_cast<size_t>(kMipsPointerSize));
-        // TODO: refactor this into the JniCallingConvention code
-        // as a return value alignment requirement.
-      }
-      CHECK_LT(return_save_location.Uint32Value(), frame_size + main_out_arg_size);
+      CHECK_LT(return_save_location.Uint32Value(), current_frame_size);
       __ Store(return_save_location,
                main_jni_conv->ReturnRegister(),
                main_jni_conv->SizeOfReturnValue());
@@ -545,6 +559,7 @@
       // If they differ, only then do we have to do anything about it.
       // Otherwise the return value is already in the right place when we return.
       if (!jni_return_reg.Equals(mr_return_reg)) {
+        CHECK(!main_jni_conv->UseTailCall());
         // This is typically only necessary on ARM32 due to native being softfloat
         // while managed is hardfloat.
         // -- For example VMOV {r0, r1} -> D0; VMOV r0 -> S0.
@@ -557,23 +572,21 @@
     }
   }
 
-  // Increase frame size for out args if needed by the end_jni_conv.
-  const size_t end_out_arg_size = end_jni_conv->OutArgSize();
-  if (end_out_arg_size > current_out_arg_size) {
-    size_t out_arg_size_diff = end_out_arg_size - current_out_arg_size;
-    current_out_arg_size = end_out_arg_size;
-    // TODO: This is redundant for @CriticalNative but we need to
-    // conditionally do __DecreaseFrameSize below.
-    __ IncreaseFrameSize(out_arg_size_diff);
-    saved_cookie_offset = FrameOffset(saved_cookie_offset.SizeValue() + out_arg_size_diff);
-    locked_object_handle_scope_offset =
-        FrameOffset(locked_object_handle_scope_offset.SizeValue() + out_arg_size_diff);
-    return_save_location = FrameOffset(return_save_location.SizeValue() + out_arg_size_diff);
-  }
-  //     thread.
-  end_jni_conv->ResetIterator(FrameOffset(end_out_arg_size));
-
   if (LIKELY(!is_critical_native)) {
+    // Increase frame size for out args if needed by the end_jni_conv.
+    const size_t end_out_arg_size = end_jni_conv->OutArgSize();
+    if (end_out_arg_size > current_out_arg_size) {
+      size_t out_arg_size_diff = end_out_arg_size - current_out_arg_size;
+      current_out_arg_size = end_out_arg_size;
+      __ IncreaseFrameSize(out_arg_size_diff);
+      current_frame_size += out_arg_size_diff;
+      saved_cookie_offset = FrameOffset(saved_cookie_offset.SizeValue() + out_arg_size_diff);
+      locked_object_handle_scope_offset =
+          FrameOffset(locked_object_handle_scope_offset.SizeValue() + out_arg_size_diff);
+      return_save_location = FrameOffset(return_save_location.SizeValue() + out_arg_size_diff);
+    }
+    end_jni_conv->ResetIterator(FrameOffset(end_out_arg_size));
+
     // 12. Call JniMethodEnd
     ThreadOffset<kPointerSize> jni_end(
         GetJniEntrypointThreadOffset<kPointerSize>(JniEntrypoint::kEnd,
@@ -629,19 +642,28 @@
   }  // if (!is_critical_native)
 
   // 14. Move frame up now we're done with the out arg space.
-  __ DecreaseFrameSize(current_out_arg_size);
+  //     @CriticalNative remove out args together with the frame in RemoveFrame().
+  if (LIKELY(!is_critical_native)) {
+    __ DecreaseFrameSize(current_out_arg_size);
+    current_frame_size -= current_out_arg_size;
+  }
 
   // 15. Process pending exceptions from JNI call or monitor exit.
-  __ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), 0 /* stack_adjust= */);
+  //     @CriticalNative methods do not need exception poll in the stub.
+  if (LIKELY(!is_critical_native)) {
+    __ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), 0 /* stack_adjust= */);
+  }
 
   // 16. Remove activation - need to restore callee save registers since the GC may have changed
   //     them.
-  DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size));
-  // We expect the compiled method to possibly be suspended during its
-  // execution, except in the case of a CriticalNative method.
-  bool may_suspend = !is_critical_native;
-  __ RemoveFrame(frame_size, callee_save_regs, may_suspend);
-  DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size));
+  DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast<int>(current_frame_size));
+  if (LIKELY(!is_critical_native) || !main_jni_conv->UseTailCall()) {
+    // We expect the compiled method to possibly be suspended during its
+    // execution, except in the case of a CriticalNative method.
+    bool may_suspend = !is_critical_native;
+    __ RemoveFrame(current_frame_size, callee_save_regs, may_suspend);
+    DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast<int>(current_frame_size));
+  }
 
   // 17. Finalize code generation
   __ FinalizeCode();
@@ -652,7 +674,7 @@
 
   return JniCompiledMethod(instruction_set,
                            std::move(managed_code),
-                           frame_size,
+                           managed_frame_size,
                            main_jni_conv->CoreSpillMask(),
                            main_jni_conv->FpSpillMask(),
                            ArrayRef<const uint8_t>(*jni_asm->cfi().data()));
@@ -662,9 +684,7 @@
 template <PointerSize kPointerSize>
 static void CopyParameter(JNIMacroAssembler<kPointerSize>* jni_asm,
                           ManagedRuntimeCallingConvention* mr_conv,
-                          JniCallingConvention* jni_conv,
-                          size_t frame_size,
-                          size_t out_arg_size) {
+                          JniCallingConvention* jni_conv) {
   bool input_in_reg = mr_conv->IsCurrentParamInRegister();
   bool output_in_reg = jni_conv->IsCurrentParamInRegister();
   FrameOffset handle_scope_offset(0);
@@ -686,7 +706,7 @@
     // as with regular references).
     handle_scope_offset = jni_conv->CurrentParamHandleScopeEntryOffset();
     // Check handle scope offset is within frame.
-    CHECK_LT(handle_scope_offset.Uint32Value(), (frame_size + out_arg_size));
+    CHECK_LT(handle_scope_offset.Uint32Value(), mr_conv->GetDisplacement().Uint32Value());
   }
   if (input_in_reg && output_in_reg) {
     ManagedRegister in_reg = mr_conv->CurrentParamRegister();
@@ -716,7 +736,7 @@
     FrameOffset in_off = mr_conv->CurrentParamStackOffset();
     ManagedRegister out_reg = jni_conv->CurrentParamRegister();
     // Check that incoming stack arguments are above the current stack frame.
-    CHECK_GT(in_off.Uint32Value(), frame_size);
+    CHECK_GT(in_off.Uint32Value(), mr_conv->GetDisplacement().Uint32Value());
     if (ref_param) {
       __ CreateHandleScopeEntry(out_reg, handle_scope_offset, ManagedRegister::NoRegister(), null_allowed);
     } else {
@@ -728,8 +748,8 @@
     CHECK(input_in_reg && !output_in_reg);
     ManagedRegister in_reg = mr_conv->CurrentParamRegister();
     FrameOffset out_off = jni_conv->CurrentParamStackOffset();
-    // Check outgoing argument is within frame
-    CHECK_LT(out_off.Uint32Value(), frame_size);
+    // Check outgoing argument is within frame part dedicated to out args.
+    CHECK_LT(out_off.Uint32Value(), jni_conv->GetDisplacement().Uint32Value());
     if (ref_param) {
       // TODO: recycle value in in_reg rather than reload from handle scope
       __ CreateHandleScopeEntry(out_off, handle_scope_offset, mr_conv->InterproceduralScratchRegister(),
diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc
deleted file mode 100644
index c69854d..0000000
--- a/compiler/jni/quick/mips/calling_convention_mips.cc
+++ /dev/null
@@ -1,461 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "calling_convention_mips.h"
-
-#include <android-base/logging.h>
-
-#include "arch/instruction_set.h"
-#include "handle_scope-inl.h"
-#include "utils/mips/managed_register_mips.h"
-
-namespace art {
-namespace mips {
-
-//
-// JNI calling convention constants.
-//
-
-// Up to how many float-like (float, double) args can be enregistered in floating-point registers.
-// The rest of the args must go in integer registers or on the stack.
-constexpr size_t kMaxFloatOrDoubleRegisterArguments = 2u;
-// Up to how many integer-like (pointers, objects, longs, int, short, bool, etc) args can be
-// enregistered. The rest of the args must go on the stack.
-constexpr size_t kMaxIntLikeRegisterArguments = 4u;
-
-static const Register kJniCoreArgumentRegisters[] = { A0, A1, A2, A3 };
-static const FRegister kJniFArgumentRegisters[] = { F12, F14 };
-static const DRegister kJniDArgumentRegisters[] = { D6, D7 };
-
-//
-// Managed calling convention constants.
-//
-
-static const Register kManagedCoreArgumentRegisters[] = { A0, A1, A2, A3, T0, T1 };
-static const FRegister kManagedFArgumentRegisters[] = { F8, F10, F12, F14, F16, F18 };
-static const DRegister kManagedDArgumentRegisters[] = { D4, D5, D6, D7, D8, D9 };
-
-static constexpr ManagedRegister kCalleeSaveRegisters[] = {
-    // Core registers.
-    MipsManagedRegister::FromCoreRegister(S2),
-    MipsManagedRegister::FromCoreRegister(S3),
-    MipsManagedRegister::FromCoreRegister(S4),
-    MipsManagedRegister::FromCoreRegister(S5),
-    MipsManagedRegister::FromCoreRegister(S6),
-    MipsManagedRegister::FromCoreRegister(S7),
-    MipsManagedRegister::FromCoreRegister(FP),
-    // No hard float callee saves.
-};
-
-static constexpr uint32_t CalculateCoreCalleeSpillMask() {
-  // RA is a special callee save which is not reported by CalleeSaveRegisters().
-  uint32_t result = 1 << RA;
-  for (auto&& r : kCalleeSaveRegisters) {
-    if (r.AsMips().IsCoreRegister()) {
-      result |= (1 << r.AsMips().AsCoreRegister());
-    }
-  }
-  return result;
-}
-
-static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask();
-static constexpr uint32_t kFpCalleeSpillMask = 0u;
-
-// Calling convention
-ManagedRegister MipsManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
-  return MipsManagedRegister::FromCoreRegister(T9);
-}
-
-ManagedRegister MipsJniCallingConvention::InterproceduralScratchRegister() {
-  return MipsManagedRegister::FromCoreRegister(T9);
-}
-
-static ManagedRegister ReturnRegisterForShorty(const char* shorty) {
-  if (shorty[0] == 'F') {
-    return MipsManagedRegister::FromFRegister(F0);
-  } else if (shorty[0] == 'D') {
-    return MipsManagedRegister::FromDRegister(D0);
-  } else if (shorty[0] == 'J') {
-    return MipsManagedRegister::FromRegisterPair(V0_V1);
-  } else if (shorty[0] == 'V') {
-    return MipsManagedRegister::NoRegister();
-  } else {
-    return MipsManagedRegister::FromCoreRegister(V0);
-  }
-}
-
-ManagedRegister MipsManagedRuntimeCallingConvention::ReturnRegister() {
-  return ReturnRegisterForShorty(GetShorty());
-}
-
-ManagedRegister MipsJniCallingConvention::ReturnRegister() {
-  return ReturnRegisterForShorty(GetShorty());
-}
-
-ManagedRegister MipsJniCallingConvention::IntReturnRegister() {
-  return MipsManagedRegister::FromCoreRegister(V0);
-}
-
-// Managed runtime calling convention
-
-ManagedRegister MipsManagedRuntimeCallingConvention::MethodRegister() {
-  return MipsManagedRegister::FromCoreRegister(A0);
-}
-
-bool MipsManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
-  return false;  // Everything moved to stack on entry.
-}
-
-bool MipsManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
-  return true;
-}
-
-ManagedRegister MipsManagedRuntimeCallingConvention::CurrentParamRegister() {
-  LOG(FATAL) << "Should not reach here";
-  UNREACHABLE();
-}
-
-FrameOffset MipsManagedRuntimeCallingConvention::CurrentParamStackOffset() {
-  CHECK(IsCurrentParamOnStack());
-  FrameOffset result =
-      FrameOffset(displacement_.Int32Value() +        // displacement
-                  kFramePointerSize +                 // Method*
-                  (itr_slots_ * kFramePointerSize));  // offset into in args
-  return result;
-}
-
-const ManagedRegisterEntrySpills& MipsManagedRuntimeCallingConvention::EntrySpills() {
-  // We spill the argument registers on MIPS to free them up for scratch use, we then assume
-  // all arguments are on the stack.
-  if ((entry_spills_.size() == 0) && (NumArgs() > 0)) {
-    uint32_t gpr_index = 1;  // Skip A0, it is used for ArtMethod*.
-    uint32_t fpr_index = 0;
-
-    for (ResetIterator(FrameOffset(0)); HasNext(); Next()) {
-      if (IsCurrentParamAFloatOrDouble()) {
-        if (IsCurrentParamADouble()) {
-          if (fpr_index < arraysize(kManagedDArgumentRegisters)) {
-            entry_spills_.push_back(
-                MipsManagedRegister::FromDRegister(kManagedDArgumentRegisters[fpr_index++]));
-          } else {
-            entry_spills_.push_back(ManagedRegister::NoRegister(), 8);
-          }
-        } else {
-          if (fpr_index < arraysize(kManagedFArgumentRegisters)) {
-            entry_spills_.push_back(
-                MipsManagedRegister::FromFRegister(kManagedFArgumentRegisters[fpr_index++]));
-          } else {
-            entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
-          }
-        }
-      } else {
-        if (IsCurrentParamALong() && !IsCurrentParamAReference()) {
-          if (gpr_index == 1 || gpr_index == 3) {
-            // Don't use A1-A2(A3-T0) as a register pair, move to A2-A3(T0-T1) instead.
-            gpr_index++;
-          }
-          if (gpr_index < arraysize(kManagedCoreArgumentRegisters) - 1) {
-            entry_spills_.push_back(
-                MipsManagedRegister::FromCoreRegister(kManagedCoreArgumentRegisters[gpr_index++]));
-          } else if (gpr_index == arraysize(kManagedCoreArgumentRegisters) - 1) {
-            gpr_index++;
-            entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
-          } else {
-            entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
-          }
-        }
-
-        if (gpr_index < arraysize(kManagedCoreArgumentRegisters)) {
-          entry_spills_.push_back(
-              MipsManagedRegister::FromCoreRegister(kManagedCoreArgumentRegisters[gpr_index++]));
-        } else {
-          entry_spills_.push_back(ManagedRegister::NoRegister(), 4);
-        }
-      }
-    }
-  }
-  return entry_spills_;
-}
-
-// JNI calling convention
-
-MipsJniCallingConvention::MipsJniCallingConvention(bool is_static,
-                                                   bool is_synchronized,
-                                                   bool is_critical_native,
-                                                   const char* shorty)
-    : JniCallingConvention(is_static,
-                           is_synchronized,
-                           is_critical_native,
-                           shorty,
-                           kMipsPointerSize) {
-  // SYSTEM V - Application Binary Interface (MIPS RISC Processor):
-  // Data Representation - Fundamental Types (3-4) specifies fundamental alignments for each type.
-  //   "Each member is assigned to the lowest available offset with the appropriate alignment. This
-  // may require internal padding, depending on the previous member."
-  //
-  // All of our stack arguments are usually 4-byte aligned, however longs and doubles must be 8
-  // bytes aligned. Add padding to maintain 8-byte alignment invariant.
-  //
-  // Compute padding to ensure longs and doubles are not split in o32.
-  size_t padding = 0;
-  size_t cur_arg, cur_reg;
-  if (LIKELY(HasExtraArgumentsForJni())) {
-    // Ignore the 'this' jobject or jclass for static methods and the JNIEnv.
-    // We start at the aligned register A2.
-    //
-    // Ignore the first 2 parameters because they are guaranteed to be aligned.
-    cur_arg = NumImplicitArgs();  // Skip the "this" argument.
-    cur_reg = 2;  // Skip {A0=JNIEnv, A1=jobject} / {A0=JNIEnv, A1=jclass} parameters (start at A2).
-  } else {
-    // Check every parameter.
-    cur_arg = 0;
-    cur_reg = 0;
-  }
-
-  // Shift across a logical register mapping that looks like:
-  //
-  //   | A0 | A1 | A2 | A3 | SP+16 | SP+20 | SP+24 | ... | SP+n | SP+n+4 |
-  //
-  //   or some of variants with floating-point registers (F12 and F14), for example
-  //
-  //   | F12     | F14 | A3 | SP+16 | SP+20 | SP+24 | ... | SP+n | SP+n+4 |
-  //
-  //   (where SP is the stack pointer at the start of called function).
-  //
-  // Any time there would normally be a long/double in an odd logical register,
-  // we have to push out the rest of the mappings by 4 bytes to maintain an 8-byte alignment.
-  //
-  // This works for both physical register pairs {A0, A1}, {A2, A3},
-  // floating-point registers F12, F14 and for when the value is on the stack.
-  //
-  // For example:
-  // (a) long would normally go into A1, but we shift it into A2
-  //  | INT | (PAD) | LONG    |
-  //  | A0  |  A1   | A2 | A3 |
-  //
-  // (b) long would normally go into A3, but we shift it into SP
-  //  | INT | INT | INT | (PAD) | LONG        |
-  //  | A0  | A1  | A2  |  A3   | SP+16 SP+20 |
-  //
-  // where INT is any <=4 byte arg, and LONG is any 8-byte arg.
-  for (; cur_arg < NumArgs(); cur_arg++) {
-    if (IsParamALongOrDouble(cur_arg)) {
-      if ((cur_reg & 1) != 0) {
-        padding += 4;
-        cur_reg++;   // Additional bump to ensure alignment.
-      }
-      cur_reg += 2;  // Bump the iterator twice for every long argument.
-    } else {
-      cur_reg++;     // Bump the iterator for every argument.
-    }
-  }
-  if (cur_reg < kMaxIntLikeRegisterArguments) {
-    // As a special case when, as a result of shifting (or not) there are no arguments on the stack,
-    // we actually have 0 stack padding.
-    //
-    // For example with @CriticalNative and:
-    // (int, long) -> shifts the long but doesn't need to pad the stack
-    //
-    //          shift
-    //           \/
-    //  | INT | (PAD) | LONG      | (EMPTY) ...
-    //  | r0  |  r1   |  r2  | r3 |   SP    ...
-    //                                /\
-    //                          no stack padding
-    padding_ = 0;
-  } else {
-    padding_ = padding;
-  }
-
-  // Argument Passing (3-17):
-  //   "When the first argument is integral, the remaining arguments are passed in the integer
-  // registers."
-  //
-  //   "The rules that determine which arguments go into registers and which ones must be passed on
-  // the stack are most easily explained by considering the list of arguments as a structure,
-  // aligned according to normal structure rules. Mapping of this structure into the combination of
-  // stack and registers is as follows: up to two leading floating-point arguments can be passed in
-  // $f12 and $f14; everything else with a structure offset greater than or equal to 16 is passed on
-  // the stack. The remainder of the arguments are passed in $4..$7 based on their structure offset.
-  // Holes left in the structure for alignment are unused, whether in registers or in the stack."
-  //
-  // For example with @CriticalNative and:
-  // (a) first argument is not floating-point, so all go into integer registers
-  //  | INT | FLOAT | DOUBLE  |
-  //  | A0  |  A1   | A2 | A3 |
-  // (b) first argument is floating-point, but 2nd is integer
-  //  | FLOAT | INT | DOUBLE  |
-  //  |  F12  | A1  | A2 | A3 |
-  // (c) first two arguments are floating-point (float, double)
-  //  | FLOAT | (PAD) | DOUBLE |  INT  |
-  //  |  F12  |       |  F14   | SP+16 |
-  // (d) first two arguments are floating-point (double, float)
-  //  | DOUBLE | FLOAT | INT |
-  //  |  F12   |  F14  | A3  |
-  // (e) first three arguments are floating-point, but just first two will go into fp registers
-  //  | DOUBLE | FLOAT | FLOAT |
-  //  |  F12   |  F14  |  A3   |
-  //
-  // Find out if the first argument is a floating-point. In that case, floating-point registers will
-  // be used for up to two leading floating-point arguments. Otherwise, all arguments will be passed
-  // using integer registers.
-  use_fp_arg_registers_ = false;
-  if (is_critical_native) {
-    if (NumArgs() > 0) {
-      if (IsParamAFloatOrDouble(0)) {
-        use_fp_arg_registers_ = true;
-      }
-    }
-  }
-}
-
-uint32_t MipsJniCallingConvention::CoreSpillMask() const {
-  return kCoreCalleeSpillMask;
-}
-
-uint32_t MipsJniCallingConvention::FpSpillMask() const {
-  return kFpCalleeSpillMask;
-}
-
-ManagedRegister MipsJniCallingConvention::ReturnScratchRegister() const {
-  return MipsManagedRegister::FromCoreRegister(AT);
-}
-
-size_t MipsJniCallingConvention::FrameSize() {
-  // ArtMethod*, RA and callee save area size, local reference segment state.
-  const size_t method_ptr_size = static_cast<size_t>(kMipsPointerSize);
-  const size_t ra_return_addr_size = kFramePointerSize;
-  const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
-
-  size_t frame_data_size = method_ptr_size + ra_return_addr_size + callee_save_area_size;
-
-  if (LIKELY(HasLocalReferenceSegmentState())) {
-    // Local reference segment state.
-    frame_data_size += kFramePointerSize;
-  }
-
-  // References plus 2 words for HandleScope header.
-  const size_t handle_scope_size = HandleScope::SizeOf(kMipsPointerSize, ReferenceCount());
-
-  size_t total_size = frame_data_size;
-  if (LIKELY(HasHandleScope())) {
-    // HandleScope is sometimes excluded.
-    total_size += handle_scope_size;    // Handle scope size.
-  }
-
-  // Plus return value spill area size.
-  total_size += SizeOfReturnValue();
-
-  return RoundUp(total_size, kStackAlignment);
-}
-
-size_t MipsJniCallingConvention::OutArgSize() {
-  // Argument Passing (3-17):
-  //   "Despite the fact that some or all of the arguments to a function are passed in registers,
-  // always allocate space on the stack for all arguments. This stack space should be a structure
-  // large enough to contain all the arguments, aligned according to normal structure rules (after
-  // promotion and structure return pointer insertion). The locations within the stack frame used
-  // for arguments are called the home locations."
-  //
-  // Allocate 16 bytes for home locations + space needed for stack arguments.
-  return RoundUp(
-      (kMaxIntLikeRegisterArguments + NumberOfOutgoingStackArgs()) * kFramePointerSize + padding_,
-      kStackAlignment);
-}
-
-ArrayRef<const ManagedRegister> MipsJniCallingConvention::CalleeSaveRegisters() const {
-  return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
-}
-
-// JniCallingConvention ABI follows o32 where longs and doubles must occur
-// in even register numbers and stack slots.
-void MipsJniCallingConvention::Next() {
-  JniCallingConvention::Next();
-
-  if (LIKELY(HasNext())) {  // Avoid CHECK failure for IsCurrentParam
-    // Ensure slot is 8-byte aligned for longs/doubles (o32).
-    if (IsCurrentParamALongOrDouble() && ((itr_slots_ & 0x1u) != 0)) {
-      // itr_slots_ needs to be an even number, according to o32.
-      itr_slots_++;
-    }
-  }
-}
-
-bool MipsJniCallingConvention::IsCurrentParamInRegister() {
-  // Argument Passing (3-17):
-  //   "The rules that determine which arguments go into registers and which ones must be passed on
-  // the stack are most easily explained by considering the list of arguments as a structure,
-  // aligned according to normal structure rules. Mapping of this structure into the combination of
-  // stack and registers is as follows: up to two leading floating-point arguments can be passed in
-  // $f12 and $f14; everything else with a structure offset greater than or equal to 16 is passed on
-  // the stack. The remainder of the arguments are passed in $4..$7 based on their structure offset.
-  // Holes left in the structure for alignment are unused, whether in registers or in the stack."
-  //
-  // Even when floating-point registers are used, there can be up to 4 arguments passed in
-  // registers.
-  return itr_slots_ < kMaxIntLikeRegisterArguments;
-}
-
-bool MipsJniCallingConvention::IsCurrentParamOnStack() {
-  return !IsCurrentParamInRegister();
-}
-
-ManagedRegister MipsJniCallingConvention::CurrentParamRegister() {
-  CHECK_LT(itr_slots_, kMaxIntLikeRegisterArguments);
-  // Up to two leading floating-point arguments can be passed in floating-point registers.
-  if (use_fp_arg_registers_ && (itr_args_ < kMaxFloatOrDoubleRegisterArguments)) {
-    if (IsCurrentParamAFloatOrDouble()) {
-      if (IsCurrentParamADouble()) {
-        return MipsManagedRegister::FromDRegister(kJniDArgumentRegisters[itr_args_]);
-      } else {
-        return MipsManagedRegister::FromFRegister(kJniFArgumentRegisters[itr_args_]);
-      }
-    }
-  }
-  // All other arguments (including other floating-point arguments) will be passed in integer
-  // registers.
-  if (IsCurrentParamALongOrDouble()) {
-    if (itr_slots_ == 0u) {
-      return MipsManagedRegister::FromRegisterPair(A0_A1);
-    } else {
-      CHECK_EQ(itr_slots_, 2u);
-      return MipsManagedRegister::FromRegisterPair(A2_A3);
-    }
-  } else {
-    return MipsManagedRegister::FromCoreRegister(kJniCoreArgumentRegisters[itr_slots_]);
-  }
-}
-
-FrameOffset MipsJniCallingConvention::CurrentParamStackOffset() {
-  CHECK_GE(itr_slots_, kMaxIntLikeRegisterArguments);
-  size_t offset = displacement_.Int32Value() - OutArgSize() + (itr_slots_ * kFramePointerSize);
-  CHECK_LT(offset, OutArgSize());
-  return FrameOffset(offset);
-}
-
-size_t MipsJniCallingConvention::NumberOfOutgoingStackArgs() {
-  size_t static_args = HasSelfClass() ? 1 : 0;            // Count jclass.
-  // Regular argument parameters and this.
-  size_t param_args = NumArgs() + NumLongOrDoubleArgs();  // Twice count 8-byte args.
-  // Count JNIEnv* less arguments in registers.
-  size_t internal_args = (HasJniEnv() ? 1 : 0);
-  size_t total_args = static_args + param_args + internal_args;
-
-  return total_args - std::min(kMaxIntLikeRegisterArguments, static_cast<size_t>(total_args));
-}
-
-}  // namespace mips
-}  // namespace art
diff --git a/compiler/jni/quick/mips/calling_convention_mips.h b/compiler/jni/quick/mips/calling_convention_mips.h
deleted file mode 100644
index 8b395a0..0000000
--- a/compiler/jni/quick/mips/calling_convention_mips.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_JNI_QUICK_MIPS_CALLING_CONVENTION_MIPS_H_
-#define ART_COMPILER_JNI_QUICK_MIPS_CALLING_CONVENTION_MIPS_H_
-
-#include "base/enums.h"
-#include "jni/quick/calling_convention.h"
-
-namespace art {
-namespace mips {
-
-constexpr size_t kFramePointerSize = 4;
-static_assert(kFramePointerSize == static_cast<size_t>(PointerSize::k32),
-              "Invalid frame pointer size");
-
-class MipsManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
- public:
-  MipsManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
-      : ManagedRuntimeCallingConvention(is_static,
-                                        is_synchronized,
-                                        shorty,
-                                        PointerSize::k32) {}
-  ~MipsManagedRuntimeCallingConvention() override {}
-  // Calling convention
-  ManagedRegister ReturnRegister() override;
-  ManagedRegister InterproceduralScratchRegister() override;
-  // Managed runtime calling convention
-  ManagedRegister MethodRegister() override;
-  bool IsCurrentParamInRegister() override;
-  bool IsCurrentParamOnStack() override;
-  ManagedRegister CurrentParamRegister() override;
-  FrameOffset CurrentParamStackOffset() override;
-  const ManagedRegisterEntrySpills& EntrySpills() override;
-
- private:
-  ManagedRegisterEntrySpills entry_spills_;
-
-  DISALLOW_COPY_AND_ASSIGN(MipsManagedRuntimeCallingConvention);
-};
-
-class MipsJniCallingConvention final : public JniCallingConvention {
- public:
-  MipsJniCallingConvention(bool is_static,
-                           bool is_synchronized,
-                           bool is_critical_native,
-                           const char* shorty);
-  ~MipsJniCallingConvention() override {}
-  // Calling convention
-  ManagedRegister ReturnRegister() override;
-  ManagedRegister IntReturnRegister() override;
-  ManagedRegister InterproceduralScratchRegister() override;
-  // JNI calling convention
-  void Next() override;  // Override default behavior for o32.
-  size_t FrameSize() override;
-  size_t OutArgSize() override;
-  ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
-  ManagedRegister ReturnScratchRegister() const override;
-  uint32_t CoreSpillMask() const override;
-  uint32_t FpSpillMask() const override;
-  bool IsCurrentParamInRegister() override;
-  bool IsCurrentParamOnStack() override;
-  ManagedRegister CurrentParamRegister() override;
-  FrameOffset CurrentParamStackOffset() override;
-
-  // Mips does not need to extend small return types.
-  bool RequiresSmallResultTypeExtension() const override {
-    return false;
-  }
-
- protected:
-  size_t NumberOfOutgoingStackArgs() override;
-
- private:
-  // Padding to ensure longs and doubles are not split in o32.
-  size_t padding_;
-  bool use_fp_arg_registers_;
-
-  DISALLOW_COPY_AND_ASSIGN(MipsJniCallingConvention);
-};
-
-}  // namespace mips
-}  // namespace art
-
-#endif  // ART_COMPILER_JNI_QUICK_MIPS_CALLING_CONVENTION_MIPS_H_
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.cc b/compiler/jni/quick/mips64/calling_convention_mips64.cc
deleted file mode 100644
index 2c297b3..0000000
--- a/compiler/jni/quick/mips64/calling_convention_mips64.cc
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "calling_convention_mips64.h"
-
-#include <android-base/logging.h>
-
-#include "arch/instruction_set.h"
-#include "handle_scope-inl.h"
-#include "utils/mips64/managed_register_mips64.h"
-
-namespace art {
-namespace mips64 {
-
-// Up to kow many args can be enregistered. The rest of the args must go on the stack.
-constexpr size_t kMaxRegisterArguments = 8u;
-
-static const GpuRegister kGpuArgumentRegisters[] = {
-  A0, A1, A2, A3, A4, A5, A6, A7
-};
-
-static const FpuRegister kFpuArgumentRegisters[] = {
-  F12, F13, F14, F15, F16, F17, F18, F19
-};
-
-static constexpr ManagedRegister kCalleeSaveRegisters[] = {
-    // Core registers.
-    Mips64ManagedRegister::FromGpuRegister(S2),
-    Mips64ManagedRegister::FromGpuRegister(S3),
-    Mips64ManagedRegister::FromGpuRegister(S4),
-    Mips64ManagedRegister::FromGpuRegister(S5),
-    Mips64ManagedRegister::FromGpuRegister(S6),
-    Mips64ManagedRegister::FromGpuRegister(S7),
-    Mips64ManagedRegister::FromGpuRegister(GP),
-    Mips64ManagedRegister::FromGpuRegister(S8),
-    // No hard float callee saves.
-};
-
-static constexpr uint32_t CalculateCoreCalleeSpillMask() {
-  // RA is a special callee save which is not reported by CalleeSaveRegisters().
-  uint32_t result = 1 << RA;
-  for (auto&& r : kCalleeSaveRegisters) {
-    if (r.AsMips64().IsGpuRegister()) {
-      result |= (1 << r.AsMips64().AsGpuRegister());
-    }
-  }
-  return result;
-}
-
-static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask();
-static constexpr uint32_t kFpCalleeSpillMask = 0u;
-
-// Calling convention
-ManagedRegister Mips64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
-  return Mips64ManagedRegister::FromGpuRegister(T9);
-}
-
-ManagedRegister Mips64JniCallingConvention::InterproceduralScratchRegister() {
-  return Mips64ManagedRegister::FromGpuRegister(T9);
-}
-
-static ManagedRegister ReturnRegisterForShorty(const char* shorty) {
-  if (shorty[0] == 'F' || shorty[0] == 'D') {
-    return Mips64ManagedRegister::FromFpuRegister(F0);
-  } else if (shorty[0] == 'V') {
-    return Mips64ManagedRegister::NoRegister();
-  } else {
-    return Mips64ManagedRegister::FromGpuRegister(V0);
-  }
-}
-
-ManagedRegister Mips64ManagedRuntimeCallingConvention::ReturnRegister() {
-  return ReturnRegisterForShorty(GetShorty());
-}
-
-ManagedRegister Mips64JniCallingConvention::ReturnRegister() {
-  return ReturnRegisterForShorty(GetShorty());
-}
-
-ManagedRegister Mips64JniCallingConvention::IntReturnRegister() {
-  return Mips64ManagedRegister::FromGpuRegister(V0);
-}
-
-// Managed runtime calling convention
-
-ManagedRegister Mips64ManagedRuntimeCallingConvention::MethodRegister() {
-  return Mips64ManagedRegister::FromGpuRegister(A0);
-}
-
-bool Mips64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
-  return false;  // Everything moved to stack on entry.
-}
-
-bool Mips64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
-  return true;
-}
-
-ManagedRegister Mips64ManagedRuntimeCallingConvention::CurrentParamRegister() {
-  LOG(FATAL) << "Should not reach here";
-  UNREACHABLE();
-}
-
-FrameOffset Mips64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
-  CHECK(IsCurrentParamOnStack());
-  FrameOffset result =
-      FrameOffset(displacement_.Int32Value() +  // displacement
-                  kFramePointerSize +  // Method ref
-                  (itr_slots_ * sizeof(uint32_t)));  // offset into in args
-  return result;
-}
-
-const ManagedRegisterEntrySpills& Mips64ManagedRuntimeCallingConvention::EntrySpills() {
-  // We spill the argument registers on MIPS64 to free them up for scratch use,
-  // we then assume all arguments are on the stack.
-  if ((entry_spills_.size() == 0) && (NumArgs() > 0)) {
-    int reg_index = 1;   // we start from A1, A0 holds ArtMethod*.
-
-    // We need to choose the correct register size since the managed
-    // stack uses 32bit stack slots.
-    ResetIterator(FrameOffset(0));
-    while (HasNext()) {
-      if (reg_index < 8) {
-        if (IsCurrentParamAFloatOrDouble()) {  // FP regs.
-          FpuRegister arg = kFpuArgumentRegisters[reg_index];
-          Mips64ManagedRegister reg = Mips64ManagedRegister::FromFpuRegister(arg);
-          entry_spills_.push_back(reg, IsCurrentParamADouble() ? 8 : 4);
-        } else {  // GP regs.
-          GpuRegister arg = kGpuArgumentRegisters[reg_index];
-          Mips64ManagedRegister reg = Mips64ManagedRegister::FromGpuRegister(arg);
-          entry_spills_.push_back(reg,
-                                  (IsCurrentParamALong() && (!IsCurrentParamAReference())) ? 8 : 4);
-        }
-        // e.g. A1, A2, F3, A4, F5, F6, A7
-        reg_index++;
-      }
-
-      Next();
-    }
-  }
-  return entry_spills_;
-}
-
-// JNI calling convention
-
-Mips64JniCallingConvention::Mips64JniCallingConvention(bool is_static,
-                                                       bool is_synchronized,
-                                                       bool is_critical_native,
-                                                       const char* shorty)
-    : JniCallingConvention(is_static,
-                           is_synchronized,
-                           is_critical_native,
-                           shorty,
-                           kMips64PointerSize) {
-}
-
-uint32_t Mips64JniCallingConvention::CoreSpillMask() const {
-  return kCoreCalleeSpillMask;
-}
-
-uint32_t Mips64JniCallingConvention::FpSpillMask() const {
-  return kFpCalleeSpillMask;
-}
-
-ManagedRegister Mips64JniCallingConvention::ReturnScratchRegister() const {
-  return Mips64ManagedRegister::FromGpuRegister(AT);
-}
-
-size_t Mips64JniCallingConvention::FrameSize() {
-  // ArtMethod*, RA and callee save area size, local reference segment state.
-  size_t method_ptr_size = static_cast<size_t>(kFramePointerSize);
-  size_t ra_and_callee_save_area_size = (CalleeSaveRegisters().size() + 1) * kFramePointerSize;
-
-  size_t frame_data_size = method_ptr_size + ra_and_callee_save_area_size;
-  if (LIKELY(HasLocalReferenceSegmentState())) {                     // Local ref. segment state.
-    // Local reference segment state is sometimes excluded.
-    frame_data_size += sizeof(uint32_t);
-  }
-  // References plus 2 words for HandleScope header.
-  size_t handle_scope_size = HandleScope::SizeOf(kMips64PointerSize, ReferenceCount());
-
-  size_t total_size = frame_data_size;
-  if (LIKELY(HasHandleScope())) {
-    // HandleScope is sometimes excluded.
-    total_size += handle_scope_size;                                 // Handle scope size.
-  }
-
-  // Plus return value spill area size.
-  total_size += SizeOfReturnValue();
-
-  return RoundUp(total_size, kStackAlignment);
-}
-
-size_t Mips64JniCallingConvention::OutArgSize() {
-  return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment);
-}
-
-ArrayRef<const ManagedRegister> Mips64JniCallingConvention::CalleeSaveRegisters() const {
-  return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
-}
-
-bool Mips64JniCallingConvention::IsCurrentParamInRegister() {
-  return itr_args_ < kMaxRegisterArguments;
-}
-
-bool Mips64JniCallingConvention::IsCurrentParamOnStack() {
-  return !IsCurrentParamInRegister();
-}
-
-ManagedRegister Mips64JniCallingConvention::CurrentParamRegister() {
-  CHECK(IsCurrentParamInRegister());
-  if (IsCurrentParamAFloatOrDouble()) {
-    return Mips64ManagedRegister::FromFpuRegister(kFpuArgumentRegisters[itr_args_]);
-  } else {
-    return Mips64ManagedRegister::FromGpuRegister(kGpuArgumentRegisters[itr_args_]);
-  }
-}
-
-FrameOffset Mips64JniCallingConvention::CurrentParamStackOffset() {
-  CHECK(IsCurrentParamOnStack());
-  size_t args_on_stack = itr_args_ - kMaxRegisterArguments;
-  size_t offset = displacement_.Int32Value() - OutArgSize() + (args_on_stack * kFramePointerSize);
-  CHECK_LT(offset, OutArgSize());
-  return FrameOffset(offset);
-}
-
-size_t Mips64JniCallingConvention::NumberOfOutgoingStackArgs() {
-  // all arguments including JNI args
-  size_t all_args = NumArgs() + NumberOfExtraArgumentsForJni();
-
-  // Nothing on the stack unless there are more than 8 arguments
-  return (all_args > kMaxRegisterArguments) ? all_args - kMaxRegisterArguments : 0;
-}
-}  // namespace mips64
-}  // namespace art
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.h b/compiler/jni/quick/mips64/calling_convention_mips64.h
deleted file mode 100644
index d87f73a..0000000
--- a/compiler/jni/quick/mips64/calling_convention_mips64.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_
-#define ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_
-
-#include "base/enums.h"
-#include "jni/quick/calling_convention.h"
-
-namespace art {
-namespace mips64 {
-
-constexpr size_t kFramePointerSize = 8;
-static_assert(kFramePointerSize == static_cast<size_t>(PointerSize::k64),
-              "Invalid frame pointer size");
-
-class Mips64ManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
- public:
-  Mips64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
-      : ManagedRuntimeCallingConvention(is_static,
-                                        is_synchronized,
-                                        shorty,
-                                        PointerSize::k64) {}
-  ~Mips64ManagedRuntimeCallingConvention() override {}
-  // Calling convention
-  ManagedRegister ReturnRegister() override;
-  ManagedRegister InterproceduralScratchRegister() override;
-  // Managed runtime calling convention
-  ManagedRegister MethodRegister() override;
-  bool IsCurrentParamInRegister() override;
-  bool IsCurrentParamOnStack() override;
-  ManagedRegister CurrentParamRegister() override;
-  FrameOffset CurrentParamStackOffset() override;
-  const ManagedRegisterEntrySpills& EntrySpills() override;
-
- private:
-  ManagedRegisterEntrySpills entry_spills_;
-
-  DISALLOW_COPY_AND_ASSIGN(Mips64ManagedRuntimeCallingConvention);
-};
-
-class Mips64JniCallingConvention final : public JniCallingConvention {
- public:
-  Mips64JniCallingConvention(bool is_static,
-                             bool is_synchronized,
-                             bool is_critical_native,
-                             const char* shorty);
-  ~Mips64JniCallingConvention() override {}
-  // Calling convention
-  ManagedRegister ReturnRegister() override;
-  ManagedRegister IntReturnRegister() override;
-  ManagedRegister InterproceduralScratchRegister() override;
-  // JNI calling convention
-  size_t FrameSize() override;
-  size_t OutArgSize() override;
-  ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
-  ManagedRegister ReturnScratchRegister() const override;
-  uint32_t CoreSpillMask() const override;
-  uint32_t FpSpillMask() const override;
-  bool IsCurrentParamInRegister() override;
-  bool IsCurrentParamOnStack() override;
-  ManagedRegister CurrentParamRegister() override;
-  FrameOffset CurrentParamStackOffset() override;
-
-  // Mips64 does not need to extend small return types.
-  bool RequiresSmallResultTypeExtension() const override {
-    return false;
-  }
-
- protected:
-  size_t NumberOfOutgoingStackArgs() override;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(Mips64JniCallingConvention);
-};
-
-}  // namespace mips64
-}  // namespace art
-
-#endif  // ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_
diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc
index 1f255e2..4e643ba 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.cc
+++ b/compiler/jni/quick/x86/calling_convention_x86.cc
@@ -19,6 +19,7 @@
 #include <android-base/logging.h>
 
 #include "arch/instruction_set.h"
+#include "arch/x86/jni_frame_x86.h"
 #include "handle_scope-inl.h"
 #include "utils/x86/managed_register_x86.h"
 
@@ -26,7 +27,6 @@
 namespace x86 {
 
 static_assert(kX86PointerSize == PointerSize::k32, "Unexpected x86 pointer size");
-static_assert(kStackAlignment >= 16u, "IA-32 cdecl requires at least 16 byte stack alignment");
 
 static constexpr ManagedRegister kCalleeSaveRegisters[] = {
     // Core registers.
@@ -36,10 +36,12 @@
     // No hard float callee saves.
 };
 
-static constexpr uint32_t CalculateCoreCalleeSpillMask() {
+template <size_t size>
+static constexpr uint32_t CalculateCoreCalleeSpillMask(
+    const ManagedRegister (&callee_saves)[size]) {
   // The spilled PC gets a special marker.
   uint32_t result = 1 << kNumberOfCpuRegisters;
-  for (auto&& r : kCalleeSaveRegisters) {
+  for (auto&& r : callee_saves) {
     if (r.AsX86().IsCpuRegister()) {
       result |= (1 << r.AsX86().AsCpuRegister());
     }
@@ -47,16 +49,29 @@
   return result;
 }
 
-static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask();
+static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask(kCalleeSaveRegisters);
 static constexpr uint32_t kFpCalleeSpillMask = 0u;
 
+static constexpr ManagedRegister kNativeCalleeSaveRegisters[] = {
+    // Core registers.
+    X86ManagedRegister::FromCpuRegister(EBX),
+    X86ManagedRegister::FromCpuRegister(EBP),
+    X86ManagedRegister::FromCpuRegister(ESI),
+    X86ManagedRegister::FromCpuRegister(EDI),
+    // No hard float callee saves.
+};
+
+static constexpr uint32_t kNativeCoreCalleeSpillMask =
+    CalculateCoreCalleeSpillMask(kNativeCalleeSaveRegisters);
+static constexpr uint32_t kNativeFpCalleeSpillMask = 0u;
+
 // Calling convention
 
-ManagedRegister X86ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
+ManagedRegister X86ManagedRuntimeCallingConvention::InterproceduralScratchRegister() const {
   return X86ManagedRegister::FromCpuRegister(ECX);
 }
 
-ManagedRegister X86JniCallingConvention::InterproceduralScratchRegister() {
+ManagedRegister X86JniCallingConvention::InterproceduralScratchRegister() const {
   return X86ManagedRegister::FromCpuRegister(ECX);
 }
 
@@ -205,47 +220,85 @@
 }
 
 uint32_t X86JniCallingConvention::CoreSpillMask() const {
-  return kCoreCalleeSpillMask;
+  return is_critical_native_ ? 0u : kCoreCalleeSpillMask;
 }
 
 uint32_t X86JniCallingConvention::FpSpillMask() const {
-  return kFpCalleeSpillMask;
+  return is_critical_native_ ? 0u : kFpCalleeSpillMask;
 }
 
-size_t X86JniCallingConvention::FrameSize() {
+size_t X86JniCallingConvention::FrameSize() const {
+  if (is_critical_native_) {
+    CHECK(!SpillsMethod());
+    CHECK(!HasLocalReferenceSegmentState());
+    CHECK(!HasHandleScope());
+    CHECK(!SpillsReturnValue());
+    return 0u;  // There is no managed frame for @CriticalNative.
+  }
+
   // Method*, PC return address and callee save area size, local reference segment state
+  CHECK(SpillsMethod());
   const size_t method_ptr_size = static_cast<size_t>(kX86PointerSize);
   const size_t pc_return_addr_size = kFramePointerSize;
   const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
-  size_t frame_data_size = method_ptr_size + pc_return_addr_size + callee_save_area_size;
+  size_t total_size = method_ptr_size + pc_return_addr_size + callee_save_area_size;
 
-  if (LIKELY(HasLocalReferenceSegmentState())) {                     // local ref. segment state
-    // Local reference segment state is sometimes excluded.
-    frame_data_size += kFramePointerSize;
-  }
+  CHECK(HasLocalReferenceSegmentState());
+  total_size += kFramePointerSize;
 
-  // References plus link_ (pointer) and number_of_references_ (uint32_t) for HandleScope header
-  const size_t handle_scope_size = HandleScope::SizeOf(kX86PointerSize, ReferenceCount());
-
-  size_t total_size = frame_data_size;
-  if (LIKELY(HasHandleScope())) {
-    // HandleScope is sometimes excluded.
-    total_size += handle_scope_size;                                 // handle scope size
-  }
+  CHECK(HasHandleScope());
+  total_size += HandleScope::SizeOf(kX86_64PointerSize, ReferenceCount());
 
   // Plus return value spill area size
+  CHECK(SpillsReturnValue());
   total_size += SizeOfReturnValue();
 
   return RoundUp(total_size, kStackAlignment);
-  // TODO: Same thing as x64 except using different pointer size. Refactor?
 }
 
-size_t X86JniCallingConvention::OutArgSize() {
-  return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment);
+size_t X86JniCallingConvention::OutArgSize() const {
+  // Count param args, including JNIEnv* and jclass*; count 8-byte args twice.
+  size_t all_args = NumberOfExtraArgumentsForJni() + NumArgs() + NumLongOrDoubleArgs();
+  // The size of outgoiong arguments.
+  size_t size = all_args * kFramePointerSize;
+
+  // @CriticalNative can use tail call as all managed callee saves are preserved by AAPCS.
+  static_assert((kCoreCalleeSpillMask & ~kNativeCoreCalleeSpillMask) == 0u);
+  static_assert((kFpCalleeSpillMask & ~kNativeFpCalleeSpillMask) == 0u);
+
+  if (UNLIKELY(IsCriticalNative())) {
+    // Add return address size for @CriticalNative.
+    // For normal native the return PC is part of the managed stack frame instead of out args.
+    size += kFramePointerSize;
+    // For @CriticalNative, we can make a tail call if there are no stack args
+    // and the return type is not FP type (needs moving from ST0 to MMX0) and
+    // we do not need to extend the result.
+    bool return_type_ok = GetShorty()[0] == 'I' || GetShorty()[0] == 'J' || GetShorty()[0] == 'V';
+    DCHECK_EQ(
+        return_type_ok,
+        GetShorty()[0] != 'F' && GetShorty()[0] != 'D' && !RequiresSmallResultTypeExtension());
+    if (return_type_ok && size == kFramePointerSize) {
+      // Note: This is not aligned to kNativeStackAlignment but that's OK for tail call.
+      static_assert(kFramePointerSize < kNativeStackAlignment);
+      DCHECK_EQ(kFramePointerSize, GetCriticalNativeOutArgsSize(GetShorty(), NumArgs() + 1u));
+      return kFramePointerSize;
+    }
+  }
+
+  size_t out_args_size = RoundUp(size, kNativeStackAlignment);
+  if (UNLIKELY(IsCriticalNative())) {
+    DCHECK_EQ(out_args_size, GetCriticalNativeOutArgsSize(GetShorty(), NumArgs() + 1u));
+  }
+  return out_args_size;
 }
 
 ArrayRef<const ManagedRegister> X86JniCallingConvention::CalleeSaveRegisters() const {
-  return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
+  if (UNLIKELY(IsCriticalNative())) {
+    // Do not spill anything, whether tail call or not (return PC is already on the stack).
+    return ArrayRef<const ManagedRegister>();
+  } else {
+    return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
+  }
 }
 
 bool X86JniCallingConvention::IsCurrentParamInRegister() {
@@ -265,15 +318,21 @@
   return FrameOffset(displacement_.Int32Value() - OutArgSize() + (itr_slots_ * kFramePointerSize));
 }
 
-size_t X86JniCallingConvention::NumberOfOutgoingStackArgs() {
-  size_t static_args = HasSelfClass() ? 1 : 0;  // count jclass
-  // regular argument parameters and this
-  size_t param_args = NumArgs() + NumLongOrDoubleArgs();
-  // count JNIEnv* and return pc (pushed after Method*)
-  size_t internal_args = 1 /* return pc */ + (HasJniEnv() ? 1 : 0 /* jni env */);
-  // No register args.
-  size_t total_args = static_args + param_args + internal_args;
-  return total_args;
+ManagedRegister X86JniCallingConvention::HiddenArgumentRegister() const {
+  CHECK(IsCriticalNative());
+  // EAX is neither managed callee-save, nor argument register, nor scratch register.
+  DCHECK(std::none_of(kCalleeSaveRegisters,
+                      kCalleeSaveRegisters + std::size(kCalleeSaveRegisters),
+                      [](ManagedRegister callee_save) constexpr {
+                        return callee_save.Equals(X86ManagedRegister::FromCpuRegister(EAX));
+                      }));
+  DCHECK(!InterproceduralScratchRegister().Equals(X86ManagedRegister::FromCpuRegister(EAX)));
+  return X86ManagedRegister::FromCpuRegister(EAX);
+}
+
+bool X86JniCallingConvention::UseTailCall() const {
+  CHECK(IsCriticalNative());
+  return OutArgSize() == kFramePointerSize;
 }
 
 }  // namespace x86
diff --git a/compiler/jni/quick/x86/calling_convention_x86.h b/compiler/jni/quick/x86/calling_convention_x86.h
index d0c6198..1273e8d 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.h
+++ b/compiler/jni/quick/x86/calling_convention_x86.h
@@ -23,8 +23,6 @@
 namespace art {
 namespace x86 {
 
-constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k32);
-
 class X86ManagedRuntimeCallingConvention final : public ManagedRuntimeCallingConvention {
  public:
   X86ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
@@ -36,7 +34,7 @@
   ~X86ManagedRuntimeCallingConvention() override {}
   // Calling convention
   ManagedRegister ReturnRegister() override;
-  ManagedRegister InterproceduralScratchRegister() override;
+  ManagedRegister InterproceduralScratchRegister() const override;
   // Managed runtime calling convention
   ManagedRegister MethodRegister() override;
   bool IsCurrentParamInRegister() override;
@@ -63,10 +61,10 @@
   // Calling convention
   ManagedRegister ReturnRegister() override;
   ManagedRegister IntReturnRegister() override;
-  ManagedRegister InterproceduralScratchRegister() override;
+  ManagedRegister InterproceduralScratchRegister() const override;
   // JNI calling convention
-  size_t FrameSize() override;
-  size_t OutArgSize() override;
+  size_t FrameSize() const override;
+  size_t OutArgSize() const override;
   ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
   ManagedRegister ReturnScratchRegister() const override;
   uint32_t CoreSpillMask() const override;
@@ -78,11 +76,14 @@
 
   // x86 needs to extend small return types.
   bool RequiresSmallResultTypeExtension() const override {
-    return true;
+    return HasSmallReturnType();
   }
 
- protected:
-  size_t NumberOfOutgoingStackArgs() override;
+  // Hidden argument register, used to pass the method pointer for @CriticalNative call.
+  ManagedRegister HiddenArgumentRegister() const override;
+
+  // Whether to use tail call (used only for @CriticalNative).
+  bool UseTailCall() const override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(X86JniCallingConvention);
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
index 9e77d6b..9013b02 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -19,6 +19,7 @@
 #include <android-base/logging.h>
 
 #include "arch/instruction_set.h"
+#include "arch/x86_64/jni_frame_x86_64.h"
 #include "base/bit_utils.h"
 #include "handle_scope-inl.h"
 #include "utils/x86_64/managed_register_x86_64.h"
@@ -26,18 +27,6 @@
 namespace art {
 namespace x86_64 {
 
-constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k64);
-static_assert(kX86_64PointerSize == PointerSize::k64, "Unexpected x86_64 pointer size");
-static_assert(kStackAlignment >= 16u, "System V AMD64 ABI requires at least 16 byte stack alignment");
-
-// XMM0..XMM7 can be used to pass the first 8 floating args. The rest must go on the stack.
-// -- Managed and JNI calling conventions.
-constexpr size_t kMaxFloatOrDoubleRegisterArguments = 8u;
-// Up to how many integer-like (pointers, objects, longs, int, short, bool, etc) args can be
-// enregistered. The rest of the args must go on the stack.
-// -- JNI calling convention only (Managed excludes RDI, so it's actually 5).
-constexpr size_t kMaxIntLikeRegisterArguments = 6u;
-
 static constexpr ManagedRegister kCalleeSaveRegisters[] = {
     // Core registers.
     X86_64ManagedRegister::FromCpuRegister(RBX),
@@ -53,37 +42,56 @@
     X86_64ManagedRegister::FromXmmRegister(XMM15),
 };
 
-static constexpr uint32_t CalculateCoreCalleeSpillMask() {
+template <size_t size>
+static constexpr uint32_t CalculateCoreCalleeSpillMask(
+    const ManagedRegister (&callee_saves)[size]) {
   // The spilled PC gets a special marker.
-  uint32_t result = 1 << kNumberOfCpuRegisters;
-  for (auto&& r : kCalleeSaveRegisters) {
+  uint32_t result = 1u << kNumberOfCpuRegisters;
+  for (auto&& r : callee_saves) {
     if (r.AsX86_64().IsCpuRegister()) {
-      result |= (1 << r.AsX86_64().AsCpuRegister().AsRegister());
+      result |= (1u << r.AsX86_64().AsCpuRegister().AsRegister());
     }
   }
   return result;
 }
 
-static constexpr uint32_t CalculateFpCalleeSpillMask() {
-  uint32_t result = 0;
-  for (auto&& r : kCalleeSaveRegisters) {
+template <size_t size>
+static constexpr uint32_t CalculateFpCalleeSpillMask(const ManagedRegister (&callee_saves)[size]) {
+  uint32_t result = 0u;
+  for (auto&& r : callee_saves) {
     if (r.AsX86_64().IsXmmRegister()) {
-      result |= (1 << r.AsX86_64().AsXmmRegister().AsFloatRegister());
+      result |= (1u << r.AsX86_64().AsXmmRegister().AsFloatRegister());
     }
   }
   return result;
 }
 
-static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask();
-static constexpr uint32_t kFpCalleeSpillMask = CalculateFpCalleeSpillMask();
+static constexpr uint32_t kCoreCalleeSpillMask = CalculateCoreCalleeSpillMask(kCalleeSaveRegisters);
+static constexpr uint32_t kFpCalleeSpillMask = CalculateFpCalleeSpillMask(kCalleeSaveRegisters);
+
+static constexpr ManagedRegister kNativeCalleeSaveRegisters[] = {
+    // Core registers.
+    X86_64ManagedRegister::FromCpuRegister(RBX),
+    X86_64ManagedRegister::FromCpuRegister(RBP),
+    X86_64ManagedRegister::FromCpuRegister(R12),
+    X86_64ManagedRegister::FromCpuRegister(R13),
+    X86_64ManagedRegister::FromCpuRegister(R14),
+    X86_64ManagedRegister::FromCpuRegister(R15),
+    // No callee-save float registers.
+};
+
+static constexpr uint32_t kNativeCoreCalleeSpillMask =
+    CalculateCoreCalleeSpillMask(kNativeCalleeSaveRegisters);
+static constexpr uint32_t kNativeFpCalleeSpillMask =
+    CalculateFpCalleeSpillMask(kNativeCalleeSaveRegisters);
 
 // Calling convention
 
-ManagedRegister X86_64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
+ManagedRegister X86_64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() const {
   return X86_64ManagedRegister::FromCpuRegister(RAX);
 }
 
-ManagedRegister X86_64JniCallingConvention::InterproceduralScratchRegister() {
+ManagedRegister X86_64JniCallingConvention::InterproceduralScratchRegister() const {
   return X86_64ManagedRegister::FromCpuRegister(RAX);
 }
 
@@ -149,6 +157,7 @@
 }
 
 FrameOffset X86_64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
+  CHECK(IsCurrentParamOnStack());
   return FrameOffset(displacement_.Int32Value() +  // displacement
                      static_cast<size_t>(kX86_64PointerSize) +  // Method ref
                      itr_slots_ * sizeof(uint32_t));  // offset into in args
@@ -187,46 +196,92 @@
 }
 
 uint32_t X86_64JniCallingConvention::CoreSpillMask() const {
-  return kCoreCalleeSpillMask;
+  return is_critical_native_ ? 0u : kCoreCalleeSpillMask;
 }
 
 uint32_t X86_64JniCallingConvention::FpSpillMask() const {
-  return kFpCalleeSpillMask;
+  return is_critical_native_ ? 0u : kFpCalleeSpillMask;
 }
 
-size_t X86_64JniCallingConvention::FrameSize() {
+size_t X86_64JniCallingConvention::FrameSize() const {
+  if (is_critical_native_) {
+    CHECK(!SpillsMethod());
+    CHECK(!HasLocalReferenceSegmentState());
+    CHECK(!HasHandleScope());
+    CHECK(!SpillsReturnValue());
+    return 0u;  // There is no managed frame for @CriticalNative.
+  }
+
   // Method*, PC return address and callee save area size, local reference segment state
+  CHECK(SpillsMethod());
   const size_t method_ptr_size = static_cast<size_t>(kX86_64PointerSize);
   const size_t pc_return_addr_size = kFramePointerSize;
   const size_t callee_save_area_size = CalleeSaveRegisters().size() * kFramePointerSize;
-  size_t frame_data_size = method_ptr_size + pc_return_addr_size + callee_save_area_size;
+  size_t total_size = method_ptr_size + pc_return_addr_size + callee_save_area_size;
 
-  if (LIKELY(HasLocalReferenceSegmentState())) {                     // local ref. segment state
-    // Local reference segment state is sometimes excluded.
-    frame_data_size += kFramePointerSize;
-  }
+  CHECK(HasLocalReferenceSegmentState());
+  total_size += kFramePointerSize;
 
-  // References plus link_ (pointer) and number_of_references_ (uint32_t) for HandleScope header
-  const size_t handle_scope_size = HandleScope::SizeOf(kX86_64PointerSize, ReferenceCount());
-
-  size_t total_size = frame_data_size;
-  if (LIKELY(HasHandleScope())) {
-    // HandleScope is sometimes excluded.
-    total_size += handle_scope_size;                                 // handle scope size
-  }
+  CHECK(HasHandleScope());
+  total_size += HandleScope::SizeOf(kX86_64PointerSize, ReferenceCount());
 
   // Plus return value spill area size
+  CHECK(SpillsReturnValue());
   total_size += SizeOfReturnValue();
 
   return RoundUp(total_size, kStackAlignment);
 }
 
-size_t X86_64JniCallingConvention::OutArgSize() {
-  return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment);
+size_t X86_64JniCallingConvention::OutArgSize() const {
+  // Count param args, including JNIEnv* and jclass*.
+  size_t all_args = NumberOfExtraArgumentsForJni() + NumArgs();
+  size_t num_fp_args = NumFloatOrDoubleArgs();
+  DCHECK_GE(all_args, num_fp_args);
+  size_t num_non_fp_args = all_args - num_fp_args;
+  // Account for FP arguments passed through Xmm0..Xmm7.
+  size_t num_stack_fp_args =
+      num_fp_args - std::min(kMaxFloatOrDoubleRegisterArguments, num_fp_args);
+  // Account for other (integer) arguments passed through GPR (RDI, RSI, RDX, RCX, R8, R9).
+  size_t num_stack_non_fp_args =
+      num_non_fp_args - std::min(kMaxIntLikeRegisterArguments, num_non_fp_args);
+  // The size of outgoing arguments.
+  static_assert(kFramePointerSize == kMmxSpillSize);
+  size_t size = (num_stack_fp_args + num_stack_non_fp_args) * kFramePointerSize;
+
+  if (UNLIKELY(IsCriticalNative())) {
+    // We always need to spill xmm12-xmm15 as they are managed callee-saves
+    // but not native callee-saves.
+    static_assert((kCoreCalleeSpillMask & ~kNativeCoreCalleeSpillMask) == 0u);
+    static_assert((kFpCalleeSpillMask & ~kNativeFpCalleeSpillMask) != 0u);
+    static_assert(
+        kAlwaysSpilledMmxRegisters == POPCOUNT(kFpCalleeSpillMask & ~kNativeFpCalleeSpillMask));
+    size += kAlwaysSpilledMmxRegisters * kMmxSpillSize;
+    // Add return address size for @CriticalNative
+    // For normal native the return PC is part of the managed stack frame instead of out args.
+    size += kFramePointerSize;
+  }
+
+  size_t out_args_size = RoundUp(size, kNativeStackAlignment);
+  if (UNLIKELY(IsCriticalNative())) {
+    DCHECK_EQ(out_args_size, GetCriticalNativeOutArgsSize(GetShorty(), NumArgs() + 1u));
+  }
+  return out_args_size;
 }
 
 ArrayRef<const ManagedRegister> X86_64JniCallingConvention::CalleeSaveRegisters() const {
-  return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
+  if (UNLIKELY(IsCriticalNative())) {
+    DCHECK(!UseTailCall());
+    static_assert(std::size(kCalleeSaveRegisters) > std::size(kNativeCalleeSaveRegisters));
+    // TODO: Change to static_assert; std::equal should be constexpr since C++20.
+    DCHECK(std::equal(kCalleeSaveRegisters,
+                      kCalleeSaveRegisters + std::size(kNativeCalleeSaveRegisters),
+                      kNativeCalleeSaveRegisters,
+                      [](ManagedRegister lhs, ManagedRegister rhs) { return lhs.Equals(rhs); }));
+    return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters).SubArray(
+        /*pos=*/ std::size(kNativeCalleeSaveRegisters));
+  } else {
+    return ArrayRef<const ManagedRegister>(kCalleeSaveRegisters);
+  }
 }
 
 bool X86_64JniCallingConvention::IsCurrentParamInRegister() {
@@ -271,24 +326,24 @@
   return FrameOffset(offset);
 }
 
-// TODO: Calling this "NumberArgs" is misleading.
-// It's really more like NumberSlots (like itr_slots_)
-// because doubles/longs get counted twice.
-size_t X86_64JniCallingConvention::NumberOfOutgoingStackArgs() {
-  size_t static_args = HasSelfClass() ? 1 : 0;  // count jclass
-  // regular argument parameters and this
-  size_t param_args = NumArgs() + NumLongOrDoubleArgs();
-  // count JNIEnv* and return pc (pushed after Method*)
-  size_t internal_args = 1 /* return pc */ + (HasJniEnv() ? 1 : 0 /* jni env */);
-  size_t total_args = static_args + param_args + internal_args;
+ManagedRegister X86_64JniCallingConvention::HiddenArgumentRegister() const {
+  CHECK(IsCriticalNative());
+  // R11 is neither managed callee-save, nor argument register, nor scratch register.
+  DCHECK(std::none_of(kCalleeSaveRegisters,
+                      kCalleeSaveRegisters + std::size(kCalleeSaveRegisters),
+                      [](ManagedRegister callee_save) constexpr {
+                        return callee_save.Equals(X86_64ManagedRegister::FromCpuRegister(R11));
+                      }));
+  DCHECK(!InterproceduralScratchRegister().Equals(X86_64ManagedRegister::FromCpuRegister(R11)));
+  return X86_64ManagedRegister::FromCpuRegister(R11);
+}
 
-  // Float arguments passed through Xmm0..Xmm7
-  // Other (integer) arguments passed through GPR (RDI, RSI, RDX, RCX, R8, R9)
-  size_t total_stack_args = total_args
-                            - std::min(kMaxFloatOrDoubleRegisterArguments, static_cast<size_t>(NumFloatOrDoubleArgs()))
-                            - std::min(kMaxIntLikeRegisterArguments, static_cast<size_t>(NumArgs() - NumFloatOrDoubleArgs()));
-
-  return total_stack_args;
+// Whether to use tail call (used only for @CriticalNative).
+bool X86_64JniCallingConvention::UseTailCall() const {
+  CHECK(IsCriticalNative());
+  // We always need to spill xmm12-xmm15 as they are managed callee-saves
+  // but not native callee-saves, so we can never use a tail call.
+  return false;
 }
 
 }  // namespace x86_64
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.h b/compiler/jni/quick/x86_64/calling_convention_x86_64.h
index dfab41b..37b5978 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.h
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.h
@@ -33,7 +33,7 @@
   ~X86_64ManagedRuntimeCallingConvention() override {}
   // Calling convention
   ManagedRegister ReturnRegister() override;
-  ManagedRegister InterproceduralScratchRegister() override;
+  ManagedRegister InterproceduralScratchRegister() const override;
   // Managed runtime calling convention
   ManagedRegister MethodRegister() override;
   bool IsCurrentParamInRegister() override;
@@ -56,10 +56,10 @@
   // Calling convention
   ManagedRegister ReturnRegister() override;
   ManagedRegister IntReturnRegister() override;
-  ManagedRegister InterproceduralScratchRegister() override;
+  ManagedRegister InterproceduralScratchRegister() const override;
   // JNI calling convention
-  size_t FrameSize() override;
-  size_t OutArgSize() override;
+  size_t FrameSize() const override;
+  size_t OutArgSize() const override;
   ArrayRef<const ManagedRegister> CalleeSaveRegisters() const override;
   ManagedRegister ReturnScratchRegister() const override;
   uint32_t CoreSpillMask() const override;
@@ -71,11 +71,14 @@
 
   // x86-64 needs to extend small return types.
   bool RequiresSmallResultTypeExtension() const override {
-    return true;
+    return HasSmallReturnType();
   }
 
- protected:
-  size_t NumberOfOutgoingStackArgs() override;
+  // Hidden argument register, used to pass the method pointer for @CriticalNative call.
+  ManagedRegister HiddenArgumentRegister() const override;
+
+  // Whether to use tail call (used only for @CriticalNative).
+  bool UseTailCall() const override;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(X86_64JniCallingConvention);
diff --git a/compiler/linker/linker_patch.h b/compiler/linker/linker_patch.h
index f9e3930..1c523de 100644
--- a/compiler/linker/linker_patch.h
+++ b/compiler/linker/linker_patch.h
@@ -52,6 +52,7 @@
     kTypeBssEntry,
     kStringRelative,
     kStringBssEntry,
+    kCallEntrypoint,
     kBakerReadBarrierBranch,
   };
 
@@ -141,6 +142,15 @@
     return patch;
   }
 
+  static LinkerPatch CallEntrypointPatch(size_t literal_offset,
+                                         uint32_t entrypoint_offset) {
+    LinkerPatch patch(literal_offset,
+                      Type::kCallEntrypoint,
+                      /* target_dex_file= */ nullptr);
+    patch.entrypoint_offset_ = entrypoint_offset;
+    return patch;
+  }
+
   static LinkerPatch BakerReadBarrierBranchPatch(size_t literal_offset,
                                                  uint32_t custom_value1 = 0u,
                                                  uint32_t custom_value2 = 0u) {
@@ -216,6 +226,11 @@
     return pc_insn_offset_;
   }
 
+  uint32_t EntrypointOffset() const {
+    DCHECK(patch_type_ == Type::kCallEntrypoint);
+    return entrypoint_offset_;
+  }
+
   uint32_t GetBakerCustomValue1() const {
     DCHECK(patch_type_ == Type::kBakerReadBarrierBranch);
     return baker_custom_value1_;
@@ -249,6 +264,7 @@
     uint32_t type_idx_;           // Type index for Type patches.
     uint32_t string_idx_;         // String index for String patches.
     uint32_t intrinsic_data_;     // Data for IntrinsicObjects.
+    uint32_t entrypoint_offset_;  // Entrypoint offset in the Thread object.
     uint32_t baker_custom_value1_;
     static_assert(sizeof(method_idx_) == sizeof(cmp1_), "needed by relational operators");
     static_assert(sizeof(type_idx_) == sizeof(cmp1_), "needed by relational operators");
diff --git a/compiler/optimizing/block_builder.cc b/compiler/optimizing/block_builder.cc
index a5f78ca..e1f061a 100644
--- a/compiler/optimizing/block_builder.cc
+++ b/compiler/optimizing/block_builder.cc
@@ -398,6 +398,48 @@
   }
 }
 
+void HBasicBlockBuilder::InsertSynthesizedLoopsForOsr() {
+  ArenaSet<uint32_t> targets(allocator_->Adapter(kArenaAllocGraphBuilder));
+  // Collect basic blocks that are targets of a negative branch.
+  for (const DexInstructionPcPair& pair : code_item_accessor_) {
+    const uint32_t dex_pc = pair.DexPc();
+    const Instruction& instruction = pair.Inst();
+    if (instruction.IsBranch()) {
+      uint32_t target_dex_pc = dex_pc + instruction.GetTargetOffset();
+      if (target_dex_pc < dex_pc) {
+        HBasicBlock* block = GetBlockAt(target_dex_pc);
+        CHECK_NE(kNoDexPc, block->GetDexPc());
+        targets.insert(block->GetBlockId());
+      }
+    } else if (instruction.IsSwitch()) {
+      DexSwitchTable table(instruction, dex_pc);
+      for (DexSwitchTableIterator s_it(table); !s_it.Done(); s_it.Advance()) {
+        uint32_t target_dex_pc = dex_pc + s_it.CurrentTargetOffset();
+        if (target_dex_pc < dex_pc) {
+          HBasicBlock* block = GetBlockAt(target_dex_pc);
+          CHECK_NE(kNoDexPc, block->GetDexPc());
+          targets.insert(block->GetBlockId());
+        }
+      }
+    }
+  }
+
+  // Insert synthesized loops before the collected blocks.
+  for (uint32_t block_id : targets) {
+    HBasicBlock* block = graph_->GetBlocks()[block_id];
+    HBasicBlock* loop_block = new (allocator_) HBasicBlock(graph_, block->GetDexPc());
+    graph_->AddBlock(loop_block);
+    while (!block->GetPredecessors().empty()) {
+      block->GetPredecessors()[0]->ReplaceSuccessor(block, loop_block);
+    }
+    loop_block->AddSuccessor(loop_block);
+    loop_block->AddSuccessor(block);
+    // We loop on false - we know this won't be optimized later on as the loop
+    // is marked irreducible, which disables loop optimizations.
+    loop_block->AddInstruction(new (allocator_) HIf(graph_->GetIntConstant(0), kNoDexPc));
+  }
+}
+
 bool HBasicBlockBuilder::Build() {
   DCHECK(code_item_accessor_.HasCodeItem());
   DCHECK(graph_->GetBlocks().empty());
@@ -413,6 +455,10 @@
   ConnectBasicBlocks();
   InsertTryBoundaryBlocks();
 
+  if (graph_->IsCompilingOsr()) {
+    InsertSynthesizedLoopsForOsr();
+  }
+
   return true;
 }
 
diff --git a/compiler/optimizing/block_builder.h b/compiler/optimizing/block_builder.h
index 2c1f034..42a3f32 100644
--- a/compiler/optimizing/block_builder.h
+++ b/compiler/optimizing/block_builder.h
@@ -59,6 +59,11 @@
   void ConnectBasicBlocks();
   void InsertTryBoundaryBlocks();
 
+  // To ensure branches with negative offsets can always OSR jump to compiled
+  // code, we insert synthesized loops before each block that is the target of a
+  // negative branch.
+  void InsertSynthesizedLoopsForOsr();
+
   // Helper method which decides whether `catch_block` may have live normal
   // predecessors and thus whether a synthetic catch block needs to be created
   // to avoid mixing normal and exceptional predecessors.
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 2bbb570..cfd9ea6 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -32,14 +32,6 @@
 #include "code_generator_x86_64.h"
 #endif
 
-#ifdef ART_ENABLE_CODEGEN_mips
-#include "code_generator_mips.h"
-#endif
-
-#ifdef ART_ENABLE_CODEGEN_mips64
-#include "code_generator_mips64.h"
-#endif
-
 #include "base/bit_utils.h"
 #include "base/bit_utils_iterator.h"
 #include "base/casts.h"
@@ -64,6 +56,7 @@
 #include "ssa_liveness_analysis.h"
 #include "stack_map.h"
 #include "stack_map_stream.h"
+#include "string_builder_append.h"
 #include "thread-current-inl.h"
 #include "utils/assembler.h"
 
@@ -394,7 +387,8 @@
   GetStackMapStream()->BeginMethod(HasEmptyFrame() ? 0 : frame_size_,
                                    core_spill_mask_,
                                    fpu_spill_mask_,
-                                   GetGraph()->GetNumberOfVRegs());
+                                   GetGraph()->GetNumberOfVRegs(),
+                                   GetGraph()->IsCompilingBaseline());
 
   size_t frame_start = GetAssembler()->CodeSize();
   GenerateFrameEntry();
@@ -599,6 +593,57 @@
   InvokeRuntime(entrypoint, invoke, invoke->GetDexPc(), nullptr);
 }
 
+void CodeGenerator::CreateStringBuilderAppendLocations(HStringBuilderAppend* instruction,
+                                                       Location out) {
+  ArenaAllocator* allocator = GetGraph()->GetAllocator();
+  LocationSummary* locations =
+      new (allocator) LocationSummary(instruction, LocationSummary::kCallOnMainOnly);
+  locations->SetOut(out);
+  instruction->GetLocations()->SetInAt(instruction->FormatIndex(),
+                                       Location::ConstantLocation(instruction->GetFormat()));
+
+  uint32_t format = static_cast<uint32_t>(instruction->GetFormat()->GetValue());
+  uint32_t f = format;
+  PointerSize pointer_size = InstructionSetPointerSize(GetInstructionSet());
+  size_t stack_offset = static_cast<size_t>(pointer_size);  // Start after the ArtMethod*.
+  for (size_t i = 0, num_args = instruction->GetNumberOfArguments(); i != num_args; ++i) {
+    StringBuilderAppend::Argument arg_type =
+        static_cast<StringBuilderAppend::Argument>(f & StringBuilderAppend::kArgMask);
+    switch (arg_type) {
+      case StringBuilderAppend::Argument::kStringBuilder:
+      case StringBuilderAppend::Argument::kString:
+      case StringBuilderAppend::Argument::kCharArray:
+        static_assert(sizeof(StackReference<mirror::Object>) == sizeof(uint32_t), "Size check.");
+        FALLTHROUGH_INTENDED;
+      case StringBuilderAppend::Argument::kBoolean:
+      case StringBuilderAppend::Argument::kChar:
+      case StringBuilderAppend::Argument::kInt:
+      case StringBuilderAppend::Argument::kFloat:
+        locations->SetInAt(i, Location::StackSlot(stack_offset));
+        break;
+      case StringBuilderAppend::Argument::kLong:
+      case StringBuilderAppend::Argument::kDouble:
+        stack_offset = RoundUp(stack_offset, sizeof(uint64_t));
+        locations->SetInAt(i, Location::DoubleStackSlot(stack_offset));
+        // Skip the low word, let the common code skip the high word.
+        stack_offset += sizeof(uint32_t);
+        break;
+      default:
+        LOG(FATAL) << "Unexpected arg format: 0x" << std::hex
+            << (f & StringBuilderAppend::kArgMask) << " full format: 0x" << format;
+        UNREACHABLE();
+    }
+    f >>= StringBuilderAppend::kBitsPerArg;
+    stack_offset += sizeof(uint32_t);
+  }
+  DCHECK_EQ(f, 0u);
+
+  size_t param_size = stack_offset - static_cast<size_t>(pointer_size);
+  DCHECK_ALIGNED(param_size, kVRegSize);
+  size_t num_vregs = param_size / kVRegSize;
+  graph_->UpdateMaximumNumberOfOutVRegs(num_vregs);
+}
+
 void CodeGenerator::CreateUnresolvedFieldLocationSummary(
     HInstruction* field_access,
     DataType::Type field_type,
@@ -897,18 +942,6 @@
           new (allocator) arm64::CodeGeneratorARM64(graph, compiler_options, stats));
     }
 #endif
-#ifdef ART_ENABLE_CODEGEN_mips
-    case InstructionSet::kMips: {
-      return std::unique_ptr<CodeGenerator>(
-          new (allocator) mips::CodeGeneratorMIPS(graph, compiler_options, stats));
-    }
-#endif
-#ifdef ART_ENABLE_CODEGEN_mips64
-    case InstructionSet::kMips64: {
-      return std::unique_ptr<CodeGenerator>(
-          new (allocator) mips64::CodeGeneratorMIPS64(graph, compiler_options, stats));
-    }
-#endif
 #ifdef ART_ENABLE_CODEGEN_x86
     case InstructionSet::kX86: {
       return std::unique_ptr<CodeGenerator>(
@@ -958,6 +991,20 @@
       is_leaf_(true),
       requires_current_method_(false),
       code_generation_data_() {
+  if (GetGraph()->IsCompilingOsr()) {
+    // Make OSR methods have all registers spilled, this simplifies the logic of
+    // jumping to the compiled code directly.
+    for (size_t i = 0; i < number_of_core_registers_; ++i) {
+      if (IsCoreCalleeSaveRegister(i)) {
+        AddAllocatedRegister(Location::RegisterLocation(i));
+      }
+    }
+    for (size_t i = 0; i < number_of_fpu_registers_; ++i) {
+      if (IsFloatingPointCalleeSaveRegister(i)) {
+        AddAllocatedRegister(Location::FpuRegisterLocation(i));
+      }
+    }
+  }
 }
 
 CodeGenerator::~CodeGenerator() {}
@@ -1036,10 +1083,42 @@
   return stack_map;
 }
 
+// Returns whether stackmap dex register info is needed for the instruction.
+//
+// The following cases mandate having a dex register map:
+//  * Deoptimization
+//    when we need to obtain the values to restore actual vregisters for interpreter.
+//  * Debuggability
+//    when we want to observe the values / asynchronously deoptimize.
+//  * Monitor operations
+//    to allow dumping in a stack trace locked dex registers for non-debuggable code.
+//  * On-stack-replacement (OSR)
+//    when entering compiled for OSR code from the interpreter we need to initialize the compiled
+//    code values with the values from the vregisters.
+//  * Method local catch blocks
+//    a catch block must see the environment of the instruction from the same method that can
+//    throw to this block.
+static bool NeedsVregInfo(HInstruction* instruction, bool osr) {
+  HGraph* graph = instruction->GetBlock()->GetGraph();
+  return instruction->IsDeoptimize() ||
+         graph->IsDebuggable() ||
+         graph->HasMonitorOperations() ||
+         osr ||
+         instruction->CanThrowIntoCatchBlock();
+}
+
 void CodeGenerator::RecordPcInfo(HInstruction* instruction,
                                  uint32_t dex_pc,
                                  SlowPathCode* slow_path,
                                  bool native_debug_info) {
+  RecordPcInfo(instruction, dex_pc, GetAssembler()->CodePosition(), slow_path, native_debug_info);
+}
+
+void CodeGenerator::RecordPcInfo(HInstruction* instruction,
+                                 uint32_t dex_pc,
+                                 uint32_t native_pc,
+                                 SlowPathCode* slow_path,
+                                 bool native_debug_info) {
   if (instruction != nullptr) {
     // The code generated for some type conversions
     // may call the runtime, thus normally requiring a subsequent
@@ -1063,9 +1142,6 @@
     }
   }
 
-  // Collect PC infos for the mapping table.
-  uint32_t native_pc = GetAssembler()->CodePosition();
-
   StackMapStream* stack_map_stream = GetStackMapStream();
   if (instruction == nullptr) {
     // For stack overflow checks and native-debug-info entries without dex register
@@ -1114,12 +1190,15 @@
   StackMap::Kind kind = native_debug_info
       ? StackMap::Kind::Debug
       : (osr ? StackMap::Kind::OSR : StackMap::Kind::Default);
+  bool needs_vreg_info = NeedsVregInfo(instruction, osr);
   stack_map_stream->BeginStackMapEntry(outer_dex_pc,
                                        native_pc,
                                        register_mask,
                                        locations->GetStackMask(),
-                                       kind);
-  EmitEnvironment(environment, slow_path);
+                                       kind,
+                                       needs_vreg_info);
+
+  EmitEnvironment(environment, slow_path, needs_vreg_info);
   stack_map_stream->EndStackMapEntry();
 
   if (osr) {
@@ -1232,19 +1311,8 @@
   code_generation_data_->AddSlowPath(slow_path);
 }
 
-void CodeGenerator::EmitEnvironment(HEnvironment* environment, SlowPathCode* slow_path) {
-  if (environment == nullptr) return;
-
+void CodeGenerator::EmitVRegInfo(HEnvironment* environment, SlowPathCode* slow_path) {
   StackMapStream* stack_map_stream = GetStackMapStream();
-  if (environment->GetParent() != nullptr) {
-    // We emit the parent environment first.
-    EmitEnvironment(environment->GetParent(), slow_path);
-    stack_map_stream->BeginInlineInfoEntry(environment->GetMethod(),
-                                           environment->GetDexPc(),
-                                           environment->Size(),
-                                           &graph_->GetDexFile());
-  }
-
   // Walk over the environment, and record the location of dex registers.
   for (size_t i = 0, environment_size = environment->Size(); i < environment_size; ++i) {
     HInstruction* current = environment->GetInstructionAt(i);
@@ -1389,8 +1457,31 @@
         LOG(FATAL) << "Unexpected kind " << location.GetKind();
     }
   }
+}
 
-  if (environment->GetParent() != nullptr) {
+void CodeGenerator::EmitEnvironment(HEnvironment* environment,
+                                    SlowPathCode* slow_path,
+                                    bool needs_vreg_info) {
+  if (environment == nullptr) return;
+
+  StackMapStream* stack_map_stream = GetStackMapStream();
+  bool emit_inline_info = environment->GetParent() != nullptr;
+
+  if (emit_inline_info) {
+    // We emit the parent environment first.
+    EmitEnvironment(environment->GetParent(), slow_path, needs_vreg_info);
+    stack_map_stream->BeginInlineInfoEntry(environment->GetMethod(),
+                                           environment->GetDexPc(),
+                                           needs_vreg_info ? environment->Size() : 0,
+                                           &graph_->GetDexFile());
+  }
+
+  if (needs_vreg_info) {
+    // If a dex register map is not required we just won't emit it.
+    EmitVRegInfo(environment, slow_path);
+  }
+
+  if (emit_inline_info) {
     stack_map_stream->EndInlineInfoEntry();
   }
 }
@@ -1402,7 +1493,7 @@
 void CodeGenerator::MaybeRecordImplicitNullCheck(HInstruction* instr) {
   HNullCheck* null_check = instr->GetImplicitNullCheck();
   if (null_check != nullptr) {
-    RecordPcInfo(null_check, null_check->GetDexPc());
+    RecordPcInfo(null_check, null_check->GetDexPc(), GetAssembler()->CodePosition());
   }
 }
 
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index f70ecb6..9e3e454 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -222,7 +222,19 @@
   virtual Assembler* GetAssembler() = 0;
   virtual const Assembler& GetAssembler() const = 0;
   virtual size_t GetWordSize() const = 0;
-  virtual size_t GetFloatingPointSpillSlotSize() const = 0;
+
+  // Get FP register width in bytes for spilling/restoring in the slow paths.
+  //
+  // Note: In SIMD graphs this should return SIMD register width as all FP and SIMD registers
+  // alias and live SIMD registers are forced to be spilled in full size in the slow paths.
+  virtual size_t GetSlowPathFPWidth() const {
+    // Default implementation.
+    return GetCalleePreservedFPWidth();
+  }
+
+  // Get FP register width required to be preserved by the target ABI.
+  virtual size_t GetCalleePreservedFPWidth() const  = 0;
+
   virtual uintptr_t GetAddressOf(HBasicBlock* block) = 0;
   void InitializeCodeGeneration(size_t number_of_spill_slots,
                                 size_t maximum_safepoint_spill_size,
@@ -319,20 +331,36 @@
     return GetFrameSize() - FrameEntrySpillSize() - kShouldDeoptimizeFlagSize;
   }
 
-  // Record native to dex mapping for a suspend point.  Required by runtime.
+  // Record native to dex mapping for a suspend point. Required by runtime.
+  void RecordPcInfo(HInstruction* instruction,
+                    uint32_t dex_pc,
+                    uint32_t native_pc,
+                    SlowPathCode* slow_path = nullptr,
+                    bool native_debug_info = false);
+
+  // Record native to dex mapping for a suspend point.
+  // The native_pc is used from Assembler::CodePosition.
+  //
+  // Note: As Assembler::CodePosition is target dependent, it does not guarantee the exact native_pc
+  // for the instruction. If the exact native_pc is required it must be provided explicitly.
   void RecordPcInfo(HInstruction* instruction,
                     uint32_t dex_pc,
                     SlowPathCode* slow_path = nullptr,
                     bool native_debug_info = false);
+
   // Check whether we have already recorded mapping at this PC.
   bool HasStackMapAtCurrentPc();
+
   // Record extra stack maps if we support native debugging.
+  //
+  // ARM specific behaviour: The recorded native PC might be a branch over pools to instructions
+  // corresponding the dex PC.
   void MaybeRecordNativeDebugInfo(HInstruction* instruction,
                                   uint32_t dex_pc,
                                   SlowPathCode* slow_path = nullptr);
 
   bool CanMoveNullCheckToUser(HNullCheck* null_check);
-  void MaybeRecordImplicitNullCheck(HInstruction* instruction);
+  virtual void MaybeRecordImplicitNullCheck(HInstruction* instruction);
   LocationSummary* CreateThrowingSlowPathLocations(
       HInstruction* instruction, RegisterSet caller_saves = RegisterSet::Empty());
   void GenerateNullCheck(HNullCheck* null_check);
@@ -546,6 +574,8 @@
 
   void GenerateInvokeCustomCall(HInvokeCustom* invoke);
 
+  void CreateStringBuilderAppendLocations(HStringBuilderAppend* instruction, Location out);
+
   void CreateUnresolvedFieldLocationSummary(
       HInstruction* field_access,
       DataType::Type field_type,
@@ -673,7 +703,7 @@
   }
 
   uint32_t GetFpuSpillSize() const {
-    return POPCOUNT(fpu_spill_mask_) * GetFloatingPointSpillSlotSize();
+    return POPCOUNT(fpu_spill_mask_) * GetCalleePreservedFPWidth();
   }
 
   uint32_t GetCoreSpillSize() const {
@@ -759,7 +789,10 @@
   size_t GetStackOffsetOfSavedRegister(size_t index);
   void GenerateSlowPaths();
   void BlockIfInRegister(Location location, bool is_out = false) const;
-  void EmitEnvironment(HEnvironment* environment, SlowPathCode* slow_path);
+  void EmitEnvironment(HEnvironment* environment,
+                       SlowPathCode* slow_path,
+                       bool needs_vreg_info = true);
+  void EmitVRegInfo(HEnvironment* environment, SlowPathCode* slow_path);
 
   OptimizingCompilerStats* stats_;
 
@@ -788,6 +821,8 @@
   std::unique_ptr<CodeGenerationData> code_generation_data_;
 
   friend class OptimizingCFITest;
+  ART_FRIEND_TEST(CodegenTest, ARM64FrameSizeSIMD);
+  ART_FRIEND_TEST(CodegenTest, ARM64FrameSizeNoSIMD);
 
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index a436b98..7d1b0ea 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -18,7 +18,7 @@
 
 #include "arch/arm64/asm_support_arm64.h"
 #include "arch/arm64/instruction_set_features_arm64.h"
-#include "art_method.h"
+#include "art_method-inl.h"
 #include "base/bit_utils.h"
 #include "base/bit_utils_iterator.h"
 #include "class_table.h"
@@ -224,12 +224,13 @@
     stack_offset += kXRegSizeInBytes;
   }
 
+  const size_t fp_reg_size = codegen->GetGraph()->HasSIMD() ? kQRegSizeInBytes : kDRegSizeInBytes;
   const uint32_t fp_spills = codegen->GetSlowPathSpills(locations, /* core_registers= */ false);
   for (uint32_t i : LowToHighBits(fp_spills)) {
     DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
     DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
     saved_fpu_stack_offsets_[i] = stack_offset;
-    stack_offset += kDRegSizeInBytes;
+    stack_offset += fp_reg_size;
   }
 
   SaveRestoreLiveRegistersHelper(codegen,
@@ -887,18 +888,19 @@
       move_resolver_(graph->GetAllocator(), this),
       assembler_(graph->GetAllocator(),
                  compiler_options.GetInstructionSetFeatures()->AsArm64InstructionSetFeatures()),
-      uint32_literals_(std::less<uint32_t>(),
-                       graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-      uint64_literals_(std::less<uint64_t>(),
-                       graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
       boot_image_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
       method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
       boot_image_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
       type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
       boot_image_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
       string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-      boot_image_intrinsic_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+      boot_image_other_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+      call_entrypoint_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
       baker_read_barrier_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+      uint32_literals_(std::less<uint32_t>(),
+                       graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+      uint64_literals_(std::less<uint64_t>(),
+                       graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
       jit_string_patches_(StringReferenceValueComparator(),
                           graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
       jit_class_patches_(TypeReferenceValueComparator(),
@@ -1059,18 +1061,69 @@
   codegen_->MoveLocation(move->GetDestination(), move->GetSource(), DataType::Type::kVoid);
 }
 
+void CodeGeneratorARM64::MaybeIncrementHotness(bool is_frame_entry) {
+  MacroAssembler* masm = GetVIXLAssembler();
+  if (GetCompilerOptions().CountHotnessInCompiledCode()) {
+    UseScratchRegisterScope temps(masm);
+    Register counter = temps.AcquireX();
+    Register method = is_frame_entry ? kArtMethodRegister : temps.AcquireX();
+    if (!is_frame_entry) {
+      __ Ldr(method, MemOperand(sp, 0));
+    }
+    __ Ldrh(counter, MemOperand(method, ArtMethod::HotnessCountOffset().Int32Value()));
+    __ Add(counter, counter, 1);
+    // Subtract one if the counter would overflow.
+    __ Sub(counter, counter, Operand(counter, LSR, 16));
+    __ Strh(counter, MemOperand(method, ArtMethod::HotnessCountOffset().Int32Value()));
+  }
+
+  if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
+    ScopedObjectAccess soa(Thread::Current());
+    ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
+    if (info != nullptr) {
+      uint64_t address = reinterpret_cast64<uint64_t>(info);
+      vixl::aarch64::Label done;
+      UseScratchRegisterScope temps(masm);
+      Register temp = temps.AcquireX();
+      Register counter = temps.AcquireW();
+      __ Mov(temp, address);
+      __ Ldrh(counter, MemOperand(temp, ProfilingInfo::BaselineHotnessCountOffset().Int32Value()));
+      __ Add(counter, counter, 1);
+      __ Strh(counter, MemOperand(temp, ProfilingInfo::BaselineHotnessCountOffset().Int32Value()));
+      __ Tst(counter, 0xffff);
+      __ B(ne, &done);
+      if (is_frame_entry) {
+        if (HasEmptyFrame()) {
+          // The entyrpoint expects the method at the bottom of the stack. We
+          // claim stack space necessary for alignment.
+          __ Claim(kStackAlignment);
+          __ Stp(kArtMethodRegister, lr, MemOperand(sp, 0));
+        } else if (!RequiresCurrentMethod()) {
+          __ Str(kArtMethodRegister, MemOperand(sp, 0));
+        }
+      } else {
+        CHECK(RequiresCurrentMethod());
+      }
+      uint32_t entrypoint_offset =
+          GetThreadOffset<kArm64PointerSize>(kQuickCompileOptimized).Int32Value();
+      __ Ldr(lr, MemOperand(tr, entrypoint_offset));
+      // Note: we don't record the call here (and therefore don't generate a stack
+      // map), as the entrypoint should never be suspended.
+      __ Blr(lr);
+      if (HasEmptyFrame()) {
+        CHECK(is_frame_entry);
+        __ Ldr(lr, MemOperand(sp, 8));
+        __ Drop(kStackAlignment);
+      }
+      __ Bind(&done);
+    }
+  }
+}
+
 void CodeGeneratorARM64::GenerateFrameEntry() {
   MacroAssembler* masm = GetVIXLAssembler();
   __ Bind(&frame_entry_label_);
 
-  if (GetCompilerOptions().CountHotnessInCompiledCode()) {
-    UseScratchRegisterScope temps(masm);
-    Register temp = temps.AcquireX();
-    __ Ldrh(temp, MemOperand(kArtMethodRegister, ArtMethod::HotnessCountOffset().Int32Value()));
-    __ Add(temp, temp, 1);
-    __ Strh(temp, MemOperand(kArtMethodRegister, ArtMethod::HotnessCountOffset().Int32Value()));
-  }
-
   bool do_overflow_check =
       FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm64) || !IsLeafMethod();
   if (do_overflow_check) {
@@ -1089,27 +1142,42 @@
   }
 
   if (!HasEmptyFrame()) {
-    int frame_size = GetFrameSize();
     // Stack layout:
     //      sp[frame_size - 8]        : lr.
     //      ...                       : other preserved core registers.
     //      ...                       : other preserved fp registers.
     //      ...                       : reserved frame space.
     //      sp[0]                     : current method.
+    int32_t frame_size = dchecked_integral_cast<int32_t>(GetFrameSize());
+    uint32_t core_spills_offset = frame_size - GetCoreSpillSize();
+    CPURegList preserved_core_registers = GetFramePreservedCoreRegisters();
+    DCHECK(!preserved_core_registers.IsEmpty());
+    uint32_t fp_spills_offset = frame_size - FrameEntrySpillSize();
+    CPURegList preserved_fp_registers = GetFramePreservedFPRegisters();
 
-    // Save the current method if we need it. Note that we do not
-    // do this in HCurrentMethod, as the instruction might have been removed
-    // in the SSA graph.
-    if (RequiresCurrentMethod()) {
+    // Save the current method if we need it, or if using STP reduces code
+    // size. Note that we do not do this in HCurrentMethod, as the
+    // instruction might have been removed in the SSA graph.
+    CPURegister lowest_spill;
+    if (core_spills_offset == kXRegSizeInBytes) {
+      // If there is no gap between the method and the lowest core spill, use
+      // aligned STP pre-index to store both. Max difference is 512. We do
+      // that to reduce code size even if we do not have to save the method.
+      DCHECK_LE(frame_size, 512);  // 32 core registers are only 256 bytes.
+      lowest_spill = preserved_core_registers.PopLowestIndex();
+      __ Stp(kArtMethodRegister, lowest_spill, MemOperand(sp, -frame_size, PreIndex));
+    } else if (RequiresCurrentMethod()) {
       __ Str(kArtMethodRegister, MemOperand(sp, -frame_size, PreIndex));
     } else {
       __ Claim(frame_size);
     }
     GetAssembler()->cfi().AdjustCFAOffset(frame_size);
-    GetAssembler()->SpillRegisters(GetFramePreservedCoreRegisters(),
-        frame_size - GetCoreSpillSize());
-    GetAssembler()->SpillRegisters(GetFramePreservedFPRegisters(),
-        frame_size - FrameEntrySpillSize());
+    if (lowest_spill.IsValid()) {
+      GetAssembler()->cfi().RelOffset(DWARFReg(lowest_spill), core_spills_offset);
+      core_spills_offset += kXRegSizeInBytes;
+    }
+    GetAssembler()->SpillRegisters(preserved_core_registers, core_spills_offset);
+    GetAssembler()->SpillRegisters(preserved_fp_registers, fp_spills_offset);
 
     if (GetGraph()->HasShouldDeoptimizeFlag()) {
       // Initialize should_deoptimize flag to 0.
@@ -1117,19 +1185,37 @@
       __ Str(wzr, MemOperand(sp, GetStackOffsetOfShouldDeoptimizeFlag()));
     }
   }
-
+  MaybeIncrementHotness(/* is_frame_entry= */ true);
   MaybeGenerateMarkingRegisterCheck(/* code= */ __LINE__);
 }
 
 void CodeGeneratorARM64::GenerateFrameExit() {
   GetAssembler()->cfi().RememberState();
   if (!HasEmptyFrame()) {
-    int frame_size = GetFrameSize();
-    GetAssembler()->UnspillRegisters(GetFramePreservedFPRegisters(),
-        frame_size - FrameEntrySpillSize());
-    GetAssembler()->UnspillRegisters(GetFramePreservedCoreRegisters(),
-        frame_size - GetCoreSpillSize());
-    __ Drop(frame_size);
+    int32_t frame_size = dchecked_integral_cast<int32_t>(GetFrameSize());
+    uint32_t core_spills_offset = frame_size - GetCoreSpillSize();
+    CPURegList preserved_core_registers = GetFramePreservedCoreRegisters();
+    DCHECK(!preserved_core_registers.IsEmpty());
+    uint32_t fp_spills_offset = frame_size - FrameEntrySpillSize();
+    CPURegList preserved_fp_registers = GetFramePreservedFPRegisters();
+
+    CPURegister lowest_spill;
+    if (core_spills_offset == kXRegSizeInBytes) {
+      // If there is no gap between the method and the lowest core spill, use
+      // aligned LDP pre-index to pop both. Max difference is 504. We do
+      // that to reduce code size even though the loaded method is unused.
+      DCHECK_LE(frame_size, 504);  // 32 core registers are only 256 bytes.
+      lowest_spill = preserved_core_registers.PopLowestIndex();
+      core_spills_offset += kXRegSizeInBytes;
+    }
+    GetAssembler()->UnspillRegisters(preserved_fp_registers, fp_spills_offset);
+    GetAssembler()->UnspillRegisters(preserved_core_registers, core_spills_offset);
+    if (lowest_spill.IsValid()) {
+      __ Ldp(xzr, lowest_spill, MemOperand(sp, frame_size, PostIndex));
+      GetAssembler()->cfi().Restore(DWARFReg(lowest_spill));
+    } else {
+      __ Drop(frame_size);
+    }
     GetAssembler()->cfi().AdjustCFAOffset(-frame_size);
   }
   __ Ret();
@@ -1146,7 +1232,7 @@
 CPURegList CodeGeneratorARM64::GetFramePreservedFPRegisters() const {
   DCHECK(ArtVixlRegCodeCoherentForRegSet(0, 0, fpu_spill_mask_,
                                          GetNumberOfFloatingPointRegisters()));
-  return CPURegList(CPURegister::kFPRegister, kDRegSize,
+  return CPURegList(CPURegister::kVRegister, kDRegSize,
                     fpu_spill_mask_);
 }
 
@@ -1245,16 +1331,18 @@
   return kArm64WordSize;
 }
 
-size_t CodeGeneratorARM64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
-  FPRegister reg = FPRegister(reg_id, kDRegSize);
-  __ Str(reg, MemOperand(sp, stack_index));
-  return kArm64WordSize;
+size_t CodeGeneratorARM64::SaveFloatingPointRegister(size_t stack_index ATTRIBUTE_UNUSED,
+                                                     uint32_t reg_id ATTRIBUTE_UNUSED) {
+  LOG(FATAL) << "FP registers shouldn't be saved/restored individually, "
+             << "use SaveRestoreLiveRegistersHelper";
+  UNREACHABLE();
 }
 
-size_t CodeGeneratorARM64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
-  FPRegister reg = FPRegister(reg_id, kDRegSize);
-  __ Ldr(reg, MemOperand(sp, stack_index));
-  return kArm64WordSize;
+size_t CodeGeneratorARM64::RestoreFloatingPointRegister(size_t stack_index ATTRIBUTE_UNUSED,
+                                                        uint32_t reg_id ATTRIBUTE_UNUSED) {
+  LOG(FATAL) << "FP registers shouldn't be saved/restored individually, "
+             << "use SaveRestoreLiveRegistersHelper";
+  UNREACHABLE();
 }
 
 void CodeGeneratorARM64::DumpCoreRegister(std::ostream& stream, int reg) const {
@@ -1277,10 +1365,10 @@
   } else if (constant->IsNullConstant()) {
     __ Mov(Register(destination), 0);
   } else if (constant->IsFloatConstant()) {
-    __ Fmov(FPRegister(destination), constant->AsFloatConstant()->GetValue());
+    __ Fmov(VRegister(destination), constant->AsFloatConstant()->GetValue());
   } else {
     DCHECK(constant->IsDoubleConstant());
-    __ Fmov(FPRegister(destination), constant->AsDoubleConstant()->GetValue());
+    __ Fmov(VRegister(destination), constant->AsDoubleConstant()->GetValue());
   }
 }
 
@@ -1304,7 +1392,7 @@
 static CPURegister AcquireFPOrCoreCPURegisterOfSize(vixl::aarch64::MacroAssembler* masm,
                                                     vixl::aarch64::UseScratchRegisterScope* temps,
                                                     int size_in_bits) {
-  return masm->GetScratchFPRegisterList()->IsEmpty()
+  return masm->GetScratchVRegisterList()->IsEmpty()
       ? CPURegister(temps->AcquireRegisterOfSize(size_in_bits))
       : CPURegister(temps->AcquireVRegisterOfSize(size_in_bits));
 }
@@ -1372,7 +1460,7 @@
         if (GetGraph()->HasSIMD()) {
           __ Mov(QRegisterFrom(destination), QRegisterFrom(source));
         } else {
-          __ Fmov(FPRegister(dst), FPRegisterFrom(source, dst_type));
+          __ Fmov(VRegister(dst), FPRegisterFrom(source, dst_type));
         }
       }
     }
@@ -1382,14 +1470,14 @@
     } else {
       DCHECK(source.IsSIMDStackSlot());
       UseScratchRegisterScope temps(GetVIXLAssembler());
-      if (GetVIXLAssembler()->GetScratchFPRegisterList()->IsEmpty()) {
+      if (GetVIXLAssembler()->GetScratchVRegisterList()->IsEmpty()) {
         Register temp = temps.AcquireX();
         __ Ldr(temp, MemOperand(sp, source.GetStackIndex()));
         __ Str(temp, MemOperand(sp, destination.GetStackIndex()));
         __ Ldr(temp, MemOperand(sp, source.GetStackIndex() + kArm64WordSize));
         __ Str(temp, MemOperand(sp, destination.GetStackIndex() + kArm64WordSize));
       } else {
-        FPRegister temp = temps.AcquireVRegisterOfSize(kQRegSize);
+        VRegister temp = temps.AcquireVRegisterOfSize(kQRegSize);
         __ Ldr(temp, StackOperandFrom(source));
         __ Str(temp, StackOperandFrom(destination));
       }
@@ -1563,7 +1651,7 @@
             MaybeRecordImplicitNullCheck(instruction);
           }
         }
-        __ Fmov(FPRegister(dst), temp);
+        __ Fmov(VRegister(dst), temp);
         break;
       }
       case DataType::Type::kUint32:
@@ -1663,7 +1751,7 @@
       } else {
         DCHECK(src.IsFPRegister());
         temp_src = src.Is64Bits() ? temps.AcquireX() : temps.AcquireW();
-        __ Fmov(temp_src, FPRegister(src));
+        __ Fmov(temp_src, VRegister(src));
       }
       {
         ExactAssemblyScope eas(masm, kInstructionSize, CodeBufferCheckScope::kExactSize);
@@ -1687,14 +1775,25 @@
                                        SlowPathCode* slow_path) {
   ValidateInvokeRuntime(entrypoint, instruction, slow_path);
 
-  __ Ldr(lr, MemOperand(tr, GetThreadOffset<kArm64PointerSize>(entrypoint).Int32Value()));
-  {
+  ThreadOffset64 entrypoint_offset = GetThreadOffset<kArm64PointerSize>(entrypoint);
+  // Reduce code size for AOT by using shared trampolines for slow path runtime calls across the
+  // entire oat file. This adds an extra branch and we do not want to slow down the main path.
+  // For JIT, thunk sharing is per-method, so the gains would be smaller or even negative.
+  if (slow_path == nullptr || Runtime::Current()->UseJitCompilation()) {
+    __ Ldr(lr, MemOperand(tr, entrypoint_offset.Int32Value()));
     // Ensure the pc position is recorded immediately after the `blr` instruction.
     ExactAssemblyScope eas(GetVIXLAssembler(), kInstructionSize, CodeBufferCheckScope::kExactSize);
     __ blr(lr);
     if (EntrypointRequiresStackMap(entrypoint)) {
       RecordPcInfo(instruction, dex_pc, slow_path);
     }
+  } else {
+    // Ensure the pc position is recorded immediately after the `bl` instruction.
+    ExactAssemblyScope eas(GetVIXLAssembler(), kInstructionSize, CodeBufferCheckScope::kExactSize);
+    EmitEntrypointThunkCall(entrypoint_offset);
+    if (EntrypointRequiresStackMap(entrypoint)) {
+      RecordPcInfo(instruction, dex_pc, slow_path);
+    }
   }
 }
 
@@ -1713,14 +1812,16 @@
   constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
   const size_t status_byte_offset =
       mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
-  constexpr uint32_t shifted_initialized_value =
-      enum_cast<uint32_t>(ClassStatus::kInitialized) << (status_lsb_position % kBitsPerByte);
+  constexpr uint32_t shifted_visibly_initialized_value =
+      enum_cast<uint32_t>(ClassStatus::kVisiblyInitialized) << (status_lsb_position % kBitsPerByte);
 
-  // Even if the initialized flag is set, we need to ensure consistent memory ordering.
-  // TODO(vixl): Let the MacroAssembler handle MemOperand.
-  __ Add(temp, class_reg, status_byte_offset);
-  __ Ldarb(temp, HeapOperand(temp));
-  __ Cmp(temp, shifted_initialized_value);
+  // CMP (immediate) is limited to imm12 or imm12<<12, so we would need to materialize
+  // the constant 0xf0000000 for comparison with the full 32-bit field. To reduce the code
+  // size, load only the high byte of the field and compare with 0xf0.
+  // Note: The same code size could be achieved with LDR+MNV(asr #24)+CBNZ but benchmarks
+  // show that this pattern is slower (tested on little cores).
+  __ Ldrb(temp, HeapOperand(class_reg, status_byte_offset));
+  __ Cmp(temp, shifted_visibly_initialized_value);
   __ B(lo, slow_path->GetEntryLabel());
   __ Bind(slow_path->GetExitLabel());
 }
@@ -2005,9 +2106,9 @@
     }
     case DataType::Type::kFloat32:
     case DataType::Type::kFloat64: {
-      FPRegister dst = OutputFPRegister(instr);
-      FPRegister lhs = InputFPRegisterAt(instr, 0);
-      FPRegister rhs = InputFPRegisterAt(instr, 1);
+      VRegister dst = OutputFPRegister(instr);
+      VRegister lhs = InputFPRegisterAt(instr, 0);
+      VRegister rhs = InputFPRegisterAt(instr, 1);
       if (instr->IsAdd()) {
         __ Fadd(dst, lhs, rhs);
       } else if (instr->IsSub()) {
@@ -2497,12 +2598,10 @@
 void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) {
   DataType::Type value_type = instruction->GetComponentType();
 
-  bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
+  bool needs_type_check = instruction->NeedsTypeCheck();
   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
       instruction,
-      may_need_runtime_call_for_type_check ?
-          LocationSummary::kCallOnSlowPath :
-          LocationSummary::kNoCall);
+      needs_type_check ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall);
   locations->SetInAt(0, Location::RequiresRegister());
   locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
   if (IsConstantZeroBitPattern(instruction->InputAt(2))) {
@@ -2517,7 +2616,7 @@
 void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
   DataType::Type value_type = instruction->GetComponentType();
   LocationSummary* locations = instruction->GetLocations();
-  bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
+  bool needs_type_check = instruction->NeedsTypeCheck();
   bool needs_write_barrier =
       CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
 
@@ -2530,7 +2629,7 @@
   MacroAssembler* masm = GetVIXLAssembler();
 
   if (!needs_write_barrier) {
-    DCHECK(!may_need_runtime_call_for_type_check);
+    DCHECK(!needs_type_check);
     if (index.IsConstant()) {
       offset += Int64FromLocation(index) << DataType::SizeShift(value_type);
       destination = HeapOperand(array, offset);
@@ -2562,123 +2661,105 @@
     }
   } else {
     DCHECK(!instruction->GetArray()->IsIntermediateAddress());
-    vixl::aarch64::Label done;
+
+    bool can_value_be_null = instruction->GetValueCanBeNull();
+    vixl::aarch64::Label do_store;
+    if (can_value_be_null) {
+      __ Cbz(Register(value), &do_store);
+    }
+
     SlowPathCodeARM64* slow_path = nullptr;
-    {
-      // We use a block to end the scratch scope before the write barrier, thus
-      // freeing the temporary registers so they can be used in `MarkGCCard`.
+    if (needs_type_check) {
+      slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathARM64(instruction);
+      codegen_->AddSlowPath(slow_path);
+
+      const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+      const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+      const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+
       UseScratchRegisterScope temps(masm);
       Register temp = temps.AcquireSameSizeAs(array);
-      if (index.IsConstant()) {
-        offset += Int64FromLocation(index) << DataType::SizeShift(value_type);
-        destination = HeapOperand(array, offset);
-      } else {
-        destination = HeapOperand(temp,
-                                  XRegisterFrom(index),
-                                  LSL,
-                                  DataType::SizeShift(value_type));
+      Register temp2 = temps.AcquireSameSizeAs(array);
+
+      // Note that when Baker read barriers are enabled, the type
+      // checks are performed without read barriers.  This is fine,
+      // even in the case where a class object is in the from-space
+      // after the flip, as a comparison involving such a type would
+      // not produce a false positive; it may of course produce a
+      // false negative, in which case we would take the ArraySet
+      // slow path.
+
+      // /* HeapReference<Class> */ temp = array->klass_
+      {
+        // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+        EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+        __ Ldr(temp, HeapOperand(array, class_offset));
+        codegen_->MaybeRecordImplicitNullCheck(instruction);
       }
+      GetAssembler()->MaybeUnpoisonHeapReference(temp);
 
-      uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
-      uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
-      uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
+      // /* HeapReference<Class> */ temp = temp->component_type_
+      __ Ldr(temp, HeapOperand(temp, component_offset));
+      // /* HeapReference<Class> */ temp2 = value->klass_
+      __ Ldr(temp2, HeapOperand(Register(value), class_offset));
+      // If heap poisoning is enabled, no need to unpoison `temp`
+      // nor `temp2`, as we are comparing two poisoned references.
+      __ Cmp(temp, temp2);
 
-      if (may_need_runtime_call_for_type_check) {
-        slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathARM64(instruction);
-        codegen_->AddSlowPath(slow_path);
-        if (instruction->GetValueCanBeNull()) {
-          vixl::aarch64::Label non_zero;
-          __ Cbnz(Register(value), &non_zero);
-          if (!index.IsConstant()) {
-            __ Add(temp, array, offset);
-          }
-          {
-            // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools
-            // emitted.
-            EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
-            __ Str(wzr, destination);
-            codegen_->MaybeRecordImplicitNullCheck(instruction);
-          }
-          __ B(&done);
-          __ Bind(&non_zero);
-        }
-
-        // Note that when Baker read barriers are enabled, the type
-        // checks are performed without read barriers.  This is fine,
-        // even in the case where a class object is in the from-space
-        // after the flip, as a comparison involving such a type would
-        // not produce a false positive; it may of course produce a
-        // false negative, in which case we would take the ArraySet
-        // slow path.
-
-        Register temp2 = temps.AcquireSameSizeAs(array);
-        // /* HeapReference<Class> */ temp = array->klass_
-        {
-          // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
-          EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
-          __ Ldr(temp, HeapOperand(array, class_offset));
-          codegen_->MaybeRecordImplicitNullCheck(instruction);
-        }
+      if (instruction->StaticTypeOfArrayIsObjectArray()) {
+        vixl::aarch64::Label do_put;
+        __ B(eq, &do_put);
+        // If heap poisoning is enabled, the `temp` reference has
+        // not been unpoisoned yet; unpoison it now.
         GetAssembler()->MaybeUnpoisonHeapReference(temp);
 
-        // /* HeapReference<Class> */ temp = temp->component_type_
-        __ Ldr(temp, HeapOperand(temp, component_offset));
-        // /* HeapReference<Class> */ temp2 = value->klass_
-        __ Ldr(temp2, HeapOperand(Register(value), class_offset));
-        // If heap poisoning is enabled, no need to unpoison `temp`
-        // nor `temp2`, as we are comparing two poisoned references.
-        __ Cmp(temp, temp2);
-        temps.Release(temp2);
-
-        if (instruction->StaticTypeOfArrayIsObjectArray()) {
-          vixl::aarch64::Label do_put;
-          __ B(eq, &do_put);
-          // If heap poisoning is enabled, the `temp` reference has
-          // not been unpoisoned yet; unpoison it now.
-          GetAssembler()->MaybeUnpoisonHeapReference(temp);
-
-          // /* HeapReference<Class> */ temp = temp->super_class_
-          __ Ldr(temp, HeapOperand(temp, super_offset));
-          // If heap poisoning is enabled, no need to unpoison
-          // `temp`, as we are comparing against null below.
-          __ Cbnz(temp, slow_path->GetEntryLabel());
-          __ Bind(&do_put);
-        } else {
-          __ B(ne, slow_path->GetEntryLabel());
-        }
-      }
-
-      if (kPoisonHeapReferences) {
-        Register temp2 = temps.AcquireSameSizeAs(array);
-          DCHECK(value.IsW());
-        __ Mov(temp2, value.W());
-        GetAssembler()->PoisonHeapReference(temp2);
-        source = temp2;
-      }
-
-      if (!index.IsConstant()) {
-        __ Add(temp, array, offset);
+        // /* HeapReference<Class> */ temp = temp->super_class_
+        __ Ldr(temp, HeapOperand(temp, super_offset));
+        // If heap poisoning is enabled, no need to unpoison
+        // `temp`, as we are comparing against null below.
+        __ Cbnz(temp, slow_path->GetEntryLabel());
+        __ Bind(&do_put);
       } else {
-        // We no longer need the `temp` here so release it as the store below may
-        // need a scratch register (if the constant index makes the offset too large)
-        // and the poisoned `source` could be using the other scratch register.
-        temps.Release(temp);
-      }
-      {
-        // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
-        EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
-        __ Str(source, destination);
-
-        if (!may_need_runtime_call_for_type_check) {
-          codegen_->MaybeRecordImplicitNullCheck(instruction);
-        }
+        __ B(ne, slow_path->GetEntryLabel());
       }
     }
 
-    codegen_->MarkGCCard(array, value.W(), instruction->GetValueCanBeNull());
+    codegen_->MarkGCCard(array, value.W(), /* value_can_be_null= */ false);
 
-    if (done.IsLinked()) {
-      __ Bind(&done);
+    if (can_value_be_null) {
+      DCHECK(do_store.IsLinked());
+      __ Bind(&do_store);
+    }
+
+    UseScratchRegisterScope temps(masm);
+    if (kPoisonHeapReferences) {
+      Register temp_source = temps.AcquireSameSizeAs(array);
+        DCHECK(value.IsW());
+      __ Mov(temp_source, value.W());
+      GetAssembler()->PoisonHeapReference(temp_source);
+      source = temp_source;
+    }
+
+    if (index.IsConstant()) {
+      offset += Int64FromLocation(index) << DataType::SizeShift(value_type);
+      destination = HeapOperand(array, offset);
+    } else {
+      Register temp_base = temps.AcquireSameSizeAs(array);
+      __ Add(temp_base, array, offset);
+      destination = HeapOperand(temp_base,
+                                XRegisterFrom(index),
+                                LSL,
+                                DataType::SizeShift(value_type));
+    }
+
+    {
+      // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
+      EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+      __ Str(source, destination);
+
+      if (can_value_be_null || !needs_type_check) {
+        codegen_->MaybeRecordImplicitNullCheck(instruction);
+      }
     }
 
     if (slow_path != nullptr) {
@@ -2693,16 +2774,59 @@
   caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode()));
   caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1).GetCode()));
   LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction));
+
+  // If both index and length are constant, we can check the bounds statically and
+  // generate code accordingly. We want to make sure we generate constant locations
+  // in that case, regardless of whether they are encodable in the comparison or not.
+  HInstruction* index = instruction->InputAt(0);
+  HInstruction* length = instruction->InputAt(1);
+  bool both_const = index->IsConstant() && length->IsConstant();
+  locations->SetInAt(0, both_const
+      ? Location::ConstantLocation(index->AsConstant())
+      : ARM64EncodableConstantOrRegister(index, instruction));
+  locations->SetInAt(1, both_const
+      ? Location::ConstantLocation(length->AsConstant())
+      : ARM64EncodableConstantOrRegister(length, instruction));
 }
 
 void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
+  LocationSummary* locations = instruction->GetLocations();
+  Location index_loc = locations->InAt(0);
+  Location length_loc = locations->InAt(1);
+
+  int cmp_first_input = 0;
+  int cmp_second_input = 1;
+  Condition cond = hs;
+
+  if (index_loc.IsConstant()) {
+    int64_t index = Int64FromLocation(index_loc);
+    if (length_loc.IsConstant()) {
+      int64_t length = Int64FromLocation(length_loc);
+      if (index < 0 || index >= length) {
+        BoundsCheckSlowPathARM64* slow_path =
+            new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathARM64(instruction);
+        codegen_->AddSlowPath(slow_path);
+        __ B(slow_path->GetEntryLabel());
+      } else {
+        // BCE will remove the bounds check if we are guaranteed to pass.
+        // However, some optimization after BCE may have generated this, and we should not
+        // generate a bounds check if it is a valid range.
+      }
+      return;
+    }
+    // Only the index is constant: change the order of the operands and commute the condition
+    // so we can use an immediate constant for the index (only the second input to a cmp
+    // instruction can be an immediate).
+    cmp_first_input = 1;
+    cmp_second_input = 0;
+    cond = ls;
+  }
   BoundsCheckSlowPathARM64* slow_path =
       new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathARM64(instruction);
+  __ Cmp(InputRegisterAt(instruction, cmp_first_input),
+         InputOperandAt(instruction, cmp_second_input));
   codegen_->AddSlowPath(slow_path);
-  __ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1));
-  __ B(slow_path->GetEntryLabel(), hs);
+  __ B(slow_path->GetEntryLabel(), cond);
 }
 
 void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) {
@@ -2730,7 +2854,7 @@
 }
 
 void InstructionCodeGeneratorARM64::GenerateFcmp(HInstruction* instruction) {
-  FPRegister lhs_reg = InputFPRegisterAt(instruction, 0);
+  VRegister lhs_reg = InputFPRegisterAt(instruction, 0);
   Location rhs_loc = instruction->GetLocations()->InAt(1);
   if (rhs_loc.IsConstant()) {
     // 0.0 is the only immediate that can be encoded directly in
@@ -3102,15 +3226,7 @@
   HLoopInformation* info = block->GetLoopInformation();
 
   if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
-    if (codegen_->GetCompilerOptions().CountHotnessInCompiledCode()) {
-      UseScratchRegisterScope temps(GetVIXLAssembler());
-      Register temp1 = temps.AcquireX();
-      Register temp2 = temps.AcquireX();
-      __ Ldr(temp1, MemOperand(sp, 0));
-      __ Ldrh(temp2, MemOperand(temp1, ArtMethod::HotnessCountOffset().Int32Value()));
-      __ Add(temp2, temp2, 1);
-      __ Strh(temp2, MemOperand(temp1, ArtMethod::HotnessCountOffset().Int32Value()));
-    }
+    codegen_->MaybeIncrementHotness(/* is_frame_entry= */ false);
     GenerateSuspendCheck(info->GetSuspendCheck(), successor);
     return;
   }
@@ -3964,6 +4080,32 @@
   HandleInvoke(invoke);
 }
 
+void CodeGeneratorARM64::MaybeGenerateInlineCacheCheck(HInstruction* instruction,
+                                                       Register klass) {
+  DCHECK_EQ(klass.GetCode(), 0u);
+  // We know the destination of an intrinsic, so no need to record inline
+  // caches.
+  if (!instruction->GetLocations()->Intrinsified() &&
+      GetGraph()->IsCompilingBaseline() &&
+      !Runtime::Current()->IsAotCompiler()) {
+    DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke());
+    ScopedObjectAccess soa(Thread::Current());
+    ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
+    if (info != nullptr) {
+      InlineCache* cache = info->GetInlineCache(instruction->GetDexPc());
+      uint64_t address = reinterpret_cast64<uint64_t>(cache);
+      vixl::aarch64::Label done;
+      __ Mov(x8, address);
+      __ Ldr(x9, MemOperand(x8, InlineCache::ClassesOffset().Int32Value()));
+      // Fast path for a monomorphic cache.
+      __ Cmp(klass, x9);
+      __ B(eq, &done);
+      InvokeRuntime(kQuickUpdateInlineCache, instruction, instruction->GetDexPc());
+      __ Bind(&done);
+    }
+  }
+}
+
 void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invoke) {
   // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
   LocationSummary* locations = invoke->GetLocations();
@@ -3972,13 +4114,6 @@
   Offset class_offset = mirror::Object::ClassOffset();
   Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize);
 
-  // The register ip1 is required to be used for the hidden argument in
-  // art_quick_imt_conflict_trampoline, so prevent VIXL from using it.
-  MacroAssembler* masm = GetVIXLAssembler();
-  UseScratchRegisterScope scratch_scope(masm);
-  scratch_scope.Exclude(ip1);
-  __ Mov(ip1, invoke->GetDexMethodIndex());
-
   // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
   if (receiver.IsStackSlot()) {
     __ Ldr(temp.W(), StackOperandFrom(receiver));
@@ -4003,6 +4138,17 @@
   // intact/accessible until the end of the marking phase (the
   // concurrent copying collector may not in the future).
   GetAssembler()->MaybeUnpoisonHeapReference(temp.W());
+
+  // If we're compiling baseline, update the inline cache.
+  codegen_->MaybeGenerateInlineCacheCheck(invoke, temp);
+
+  // The register ip1 is required to be used for the hidden argument in
+  // art_quick_imt_conflict_trampoline, so prevent VIXL from using it.
+  MacroAssembler* masm = GetVIXLAssembler();
+  UseScratchRegisterScope scratch_scope(masm);
+  scratch_scope.Exclude(ip1);
+  __ Mov(ip1, invoke->GetDexMethodIndex());
+
   __ Ldr(temp,
       MemOperand(temp, mirror::Class::ImtPtrOffset(kArm64PointerSize).Uint32Value()));
   uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
@@ -4079,7 +4225,7 @@
       callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
       break;
     case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative: {
-      DCHECK(GetCompilerOptions().IsBootImage());
+      DCHECK(GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension());
       // Add ADRP with its PC-relative method patch.
       vixl::aarch64::Label* adrp_label = NewBootImageMethodPatch(invoke->GetTargetMethod());
       EmitAdrpPlaceholder(adrp_label, XRegisterFrom(temp));
@@ -4108,6 +4254,7 @@
       // Add LDR with its PC-relative .bss entry patch.
       vixl::aarch64::Label* ldr_label =
           NewMethodBssEntryPatch(target_method, adrp_label);
+      // All aligned loads are implicitly atomic consume operations on ARM64.
       EmitLdrOffsetPlaceholder(ldr_label, XRegisterFrom(temp), XRegisterFrom(temp));
       break;
     }
@@ -4182,6 +4329,10 @@
   // intact/accessible until the end of the marking phase (the
   // concurrent copying collector may not in the future).
   GetAssembler()->MaybeUnpoisonHeapReference(temp.W());
+
+  // If we're compiling baseline, update the inline cache.
+  MaybeGenerateInlineCacheCheck(invoke, temp);
+
   // temp = temp->GetMethodAt(method_offset);
   __ Ldr(temp, MemOperand(temp, method_offset));
   // lr = temp->GetEntryPoint();
@@ -4217,14 +4368,14 @@
     uint32_t intrinsic_data,
     vixl::aarch64::Label* adrp_label) {
   return NewPcRelativePatch(
-      /* dex_file= */ nullptr, intrinsic_data, adrp_label, &boot_image_intrinsic_patches_);
+      /* dex_file= */ nullptr, intrinsic_data, adrp_label, &boot_image_other_patches_);
 }
 
 vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageRelRoPatch(
     uint32_t boot_image_offset,
     vixl::aarch64::Label* adrp_label) {
   return NewPcRelativePatch(
-      /* dex_file= */ nullptr, boot_image_offset, adrp_label, &boot_image_method_patches_);
+      /* dex_file= */ nullptr, boot_image_offset, adrp_label, &boot_image_other_patches_);
 }
 
 vixl::aarch64::Label* CodeGeneratorARM64::NewBootImageMethodPatch(
@@ -4270,6 +4421,15 @@
   return NewPcRelativePatch(&dex_file, string_index.index_, adrp_label, &string_bss_entry_patches_);
 }
 
+void CodeGeneratorARM64::EmitEntrypointThunkCall(ThreadOffset64 entrypoint_offset) {
+  DCHECK(!__ AllowMacroInstructions());  // In ExactAssemblyScope.
+  DCHECK(!Runtime::Current()->UseJitCompilation());
+  call_entrypoint_patches_.emplace_back(/*dex_file*/ nullptr, entrypoint_offset.Uint32Value());
+  vixl::aarch64::Label* bl_label = &call_entrypoint_patches_.back().label;
+  __ bind(bl_label);
+  __ bl(static_cast<int64_t>(0));  // Placeholder, patched at link-time.
+}
+
 void CodeGeneratorARM64::EmitBakerReadBarrierCbnz(uint32_t custom_data) {
   DCHECK(!__ AllowMacroInstructions());  // In ExactAssemblyScope.
   if (Runtime::Current()->UseJitCompilation()) {
@@ -4425,24 +4585,28 @@
       type_bss_entry_patches_.size() +
       boot_image_string_patches_.size() +
       string_bss_entry_patches_.size() +
-      boot_image_intrinsic_patches_.size() +
+      boot_image_other_patches_.size() +
+      call_entrypoint_patches_.size() +
       baker_read_barrier_patches_.size();
   linker_patches->reserve(size);
-  if (GetCompilerOptions().IsBootImage()) {
+  if (GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension()) {
     EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeMethodPatch>(
         boot_image_method_patches_, linker_patches);
     EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeTypePatch>(
         boot_image_type_patches_, linker_patches);
     EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeStringPatch>(
         boot_image_string_patches_, linker_patches);
-    EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::IntrinsicReferencePatch>>(
-        boot_image_intrinsic_patches_, linker_patches);
   } else {
-    EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::DataBimgRelRoPatch>>(
-        boot_image_method_patches_, linker_patches);
+    DCHECK(boot_image_method_patches_.empty());
     DCHECK(boot_image_type_patches_.empty());
     DCHECK(boot_image_string_patches_.empty());
-    DCHECK(boot_image_intrinsic_patches_.empty());
+  }
+  if (GetCompilerOptions().IsBootImage()) {
+    EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::IntrinsicReferencePatch>>(
+        boot_image_other_patches_, linker_patches);
+  } else {
+    EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::DataBimgRelRoPatch>>(
+        boot_image_other_patches_, linker_patches);
   }
   EmitPcRelativeLinkerPatches<linker::LinkerPatch::MethodBssEntryPatch>(
       method_bss_entry_patches_, linker_patches);
@@ -4450,6 +4614,11 @@
       type_bss_entry_patches_, linker_patches);
   EmitPcRelativeLinkerPatches<linker::LinkerPatch::StringBssEntryPatch>(
       string_bss_entry_patches_, linker_patches);
+  for (const PatchInfo<vixl::aarch64::Label>& info : call_entrypoint_patches_) {
+    DCHECK(info.target_dex_file == nullptr);
+    linker_patches->push_back(linker::LinkerPatch::CallEntrypointPatch(
+        info.label.GetLocation(), info.offset_or_index));
+  }
   for (const BakerReadBarrierPatchInfo& info : baker_read_barrier_patches_) {
     linker_patches->push_back(linker::LinkerPatch::BakerReadBarrierBranchPatch(
         info.label.GetLocation(), info.custom_data));
@@ -4458,7 +4627,8 @@
 }
 
 bool CodeGeneratorARM64::NeedsThunkCode(const linker::LinkerPatch& patch) const {
-  return patch.GetType() == linker::LinkerPatch::Type::kBakerReadBarrierBranch ||
+  return patch.GetType() == linker::LinkerPatch::Type::kCallEntrypoint ||
+         patch.GetType() == linker::LinkerPatch::Type::kBakerReadBarrierBranch ||
          patch.GetType() == linker::LinkerPatch::Type::kCallRelative;
 }
 
@@ -4478,6 +4648,14 @@
       }
       break;
     }
+    case linker::LinkerPatch::Type::kCallEntrypoint: {
+      Offset offset(patch.EntrypointOffset());
+      assembler.JumpTo(ManagedRegister(arm64::TR), offset, ManagedRegister(arm64::IP0));
+      if (GetCompilerOptions().GenerateAnyDebugInfo()) {
+        *debug_name = "EntrypointCallThunk_" + std::to_string(offset.Uint32Value());
+      }
+      break;
+    }
     case linker::LinkerPatch::Type::kBakerReadBarrierBranch: {
       DCHECK_EQ(patch.GetBakerCustomValue2(), 0u);
       CompileBakerReadBarrierThunk(assembler, patch.GetBakerCustomValue1(), debug_name);
@@ -4638,6 +4816,8 @@
       break;
     }
     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
+      DCHECK(codegen_->GetCompilerOptions().IsBootImage() ||
+             codegen_->GetCompilerOptions().IsBootImageExtension());
       DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
       // Add ADRP with its PC-relative type patch.
       const DexFile& dex_file = cls->GetDexFile();
@@ -4673,6 +4853,7 @@
       vixl::aarch64::Label* ldr_label =
           codegen_->NewBssEntryTypePatch(dex_file, type_index, adrp_label);
       // /* GcRoot<mirror::Class> */ out = *(base_address + offset)  /* PC-relative */
+      // All aligned loads are implicitly atomic consume operations on ARM64.
       codegen_->GenerateGcRootFieldLoad(cls,
                                         out_loc,
                                         temp,
@@ -4812,7 +4993,8 @@
 
   switch (load->GetLoadKind()) {
     case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
-      DCHECK(codegen_->GetCompilerOptions().IsBootImage());
+      DCHECK(codegen_->GetCompilerOptions().IsBootImage() ||
+             codegen_->GetCompilerOptions().IsBootImageExtension());
       // Add ADRP with its PC-relative String patch.
       const DexFile& dex_file = load->GetDexFile();
       const dex::StringIndex string_index = load->GetStringIndex();
@@ -4847,6 +5029,7 @@
       vixl::aarch64::Label* ldr_label =
           codegen_->NewStringBssEntryPatch(dex_file, string_index, adrp_label);
       // /* GcRoot<mirror::String> */ out = *(base_address + offset)  /* PC-relative */
+      // All aligned loads are implicitly atomic consume operations on ARM64.
       codegen_->GenerateGcRootFieldLoad(load,
                                         out_loc,
                                         temp,
@@ -5318,8 +5501,8 @@
     }
     case DataType::Type::kFloat32:
     case DataType::Type::kFloat64: {
-      FPRegister in_reg = InputFPRegisterAt(abs, 0);
-      FPRegister out_reg = OutputFPRegister(abs);
+      VRegister in_reg = InputFPRegisterAt(abs, 0);
+      VRegister out_reg = OutputFPRegister(abs);
       __ Fabs(out_reg, in_reg);
       break;
     }
@@ -5351,7 +5534,21 @@
   locations->SetInAt(0, ARM64ReturnLocation(return_type));
 }
 
-void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction ATTRIBUTE_UNUSED) {
+void InstructionCodeGeneratorARM64::VisitReturn(HReturn* ret) {
+  if (GetGraph()->IsCompilingOsr()) {
+    // To simplify callers of an OSR method, we put the return value in both
+    // floating point and core register.
+    switch (ret->InputAt(0)->GetType()) {
+      case DataType::Type::kFloat32:
+        __ Fmov(w0, s0);
+        break;
+      case DataType::Type::kFloat64:
+        __ Fmov(x0, d0);
+        break;
+      default:
+        break;
+    }
+  }
   codegen_->GenerateFrameExit();
 }
 
@@ -5411,6 +5608,15 @@
   HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
 }
 
+void LocationsBuilderARM64::VisitStringBuilderAppend(HStringBuilderAppend* instruction) {
+  codegen_->CreateStringBuilderAppendLocations(instruction, LocationFrom(x0));
+}
+
+void InstructionCodeGeneratorARM64::VisitStringBuilderAppend(HStringBuilderAppend* instruction) {
+  __ Mov(w0, instruction->GetFormat()->GetValue());
+  codegen_->InvokeRuntime(kQuickStringBuilderAppend, instruction, instruction->GetDexPc());
+}
+
 void LocationsBuilderARM64::VisitUnresolvedInstanceFieldGet(
     HUnresolvedInstanceFieldGet* instruction) {
   FieldAccessCallingConventionARM64 calling_convention;
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index ada5742..6b2c805 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -63,7 +63,7 @@
   vixl::aarch64::x7
 };
 static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
-static const vixl::aarch64::FPRegister kParameterFPRegisters[] = {
+static const vixl::aarch64::VRegister kParameterFPRegisters[] = {
   vixl::aarch64::d0,
   vixl::aarch64::d1,
   vixl::aarch64::d2,
@@ -111,7 +111,7 @@
          ? vixl::aarch64::x21.GetCode()
          : vixl::aarch64::x20.GetCode()),
      vixl::aarch64::x30.GetCode());
-const vixl::aarch64::CPURegList callee_saved_fp_registers(vixl::aarch64::CPURegister::kFPRegister,
+const vixl::aarch64::CPURegList callee_saved_fp_registers(vixl::aarch64::CPURegister::kVRegister,
                                                           vixl::aarch64::kDRegSize,
                                                           vixl::aarch64::d8.GetCode(),
                                                           vixl::aarch64::d15.GetCode());
@@ -162,7 +162,7 @@
       vixl::aarch64::x7 };
 static constexpr size_t kRuntimeParameterCoreRegistersLength =
     arraysize(kRuntimeParameterCoreRegisters);
-static const vixl::aarch64::FPRegister kRuntimeParameterFpuRegisters[] =
+static const vixl::aarch64::VRegister kRuntimeParameterFpuRegisters[] =
     { vixl::aarch64::d0,
       vixl::aarch64::d1,
       vixl::aarch64::d2,
@@ -175,7 +175,7 @@
     arraysize(kRuntimeParameterCoreRegisters);
 
 class InvokeRuntimeCallingConvention : public CallingConvention<vixl::aarch64::Register,
-                                                                vixl::aarch64::FPRegister> {
+                                                                vixl::aarch64::VRegister> {
  public:
   static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
 
@@ -193,7 +193,7 @@
 };
 
 class InvokeDexCallingConvention : public CallingConvention<vixl::aarch64::Register,
-                                                            vixl::aarch64::FPRegister> {
+                                                            vixl::aarch64::VRegister> {
  public:
   InvokeDexCallingConvention()
       : CallingConvention(kParameterCoreRegisters,
@@ -435,10 +435,14 @@
     return kArm64WordSize;
   }
 
-  size_t GetFloatingPointSpillSlotSize() const override {
+  size_t GetSlowPathFPWidth() const override {
     return GetGraph()->HasSIMD()
-        ? 2 * kArm64WordSize   // 16 bytes == 2 arm64 words for each spill
-        : 1 * kArm64WordSize;  //  8 bytes == 1 arm64 words for each spill
+        ? vixl::aarch64::kQRegSizeInBytes
+        : vixl::aarch64::kDRegSizeInBytes;
+  }
+
+  size_t GetCalleePreservedFPWidth() const override {
+    return vixl::aarch64::kDRegSizeInBytes;
   }
 
   uintptr_t GetAddressOf(HBasicBlock* block) override {
@@ -476,7 +480,7 @@
   // requirements, etc.). This also facilitates our task as all other registers
   // can easily be mapped via to or from their type and index or code.
   static const int kNumberOfAllocatableRegisters = vixl::aarch64::kNumberOfRegisters - 1;
-  static const int kNumberOfAllocatableFPRegisters = vixl::aarch64::kNumberOfFPRegisters;
+  static const int kNumberOfAllocatableFPRegisters = vixl::aarch64::kNumberOfVRegisters;
   static constexpr int kNumberOfAllocatableRegisterPairs = 0;
 
   void DumpCoreRegister(std::ostream& stream, int reg) const override;
@@ -629,6 +633,9 @@
                                                dex::StringIndex string_index,
                                                vixl::aarch64::Label* adrp_label = nullptr);
 
+  // Emit the BL instruction for entrypoint thunk call and record the associated patch for AOT.
+  void EmitEntrypointThunkCall(ThreadOffset64 entrypoint_offset);
+
   // Emit the CBNZ instruction for baker read barrier and record
   // the associated patch for AOT or slow path for JIT.
   void EmitBakerReadBarrierCbnz(uint32_t custom_data);
@@ -770,6 +777,18 @@
   void GenerateImplicitNullCheck(HNullCheck* instruction) override;
   void GenerateExplicitNullCheck(HNullCheck* instruction) override;
 
+  void MaybeRecordImplicitNullCheck(HInstruction* instr) final {
+    // The function must be only called within special scopes
+    // (EmissionCheckScope, ExactAssemblyScope) which prevent generation of
+    // veneer/literal pools by VIXL assembler.
+    CHECK_EQ(GetVIXLAssembler()->ArePoolsBlocked(), true)
+        << "The function must only be called within EmissionCheckScope or ExactAssemblyScope";
+    CodeGenerator::MaybeRecordImplicitNullCheck(instr);
+  }
+
+  void MaybeGenerateInlineCacheCheck(HInstruction* instruction, vixl::aarch64::Register klass);
+  void MaybeIncrementHotness(bool is_frame_entry);
+
  private:
   // Encoding of thunk type and data for link-time generated thunks for Baker read barriers.
 
@@ -887,12 +906,7 @@
   ParallelMoveResolverARM64 move_resolver_;
   Arm64Assembler assembler_;
 
-  // Deduplication map for 32-bit literals, used for non-patchable boot image addresses.
-  Uint32ToLiteralMap uint32_literals_;
-  // Deduplication map for 64-bit literals, used for non-patchable method address or method code.
-  Uint64ToLiteralMap uint64_literals_;
-  // PC-relative method patch info for kBootImageLinkTimePcRelative/BootImageRelRo.
-  // Also used for type/string patches for kBootImageRelRo (same linker patch as for methods).
+  // PC-relative method patch info for kBootImageLinkTimePcRelative.
   ArenaDeque<PcRelativePatchInfo> boot_image_method_patches_;
   // PC-relative method patch info for kBssEntry.
   ArenaDeque<PcRelativePatchInfo> method_bss_entry_patches_;
@@ -904,11 +918,18 @@
   ArenaDeque<PcRelativePatchInfo> boot_image_string_patches_;
   // PC-relative String patch info for kBssEntry.
   ArenaDeque<PcRelativePatchInfo> string_bss_entry_patches_;
-  // PC-relative patch info for IntrinsicObjects.
-  ArenaDeque<PcRelativePatchInfo> boot_image_intrinsic_patches_;
+  // PC-relative patch info for IntrinsicObjects for the boot image,
+  // and for method/type/string patches for kBootImageRelRo otherwise.
+  ArenaDeque<PcRelativePatchInfo> boot_image_other_patches_;
+  // Patch info for calls to entrypoint dispatch thunks. Used for slow paths.
+  ArenaDeque<PatchInfo<vixl::aarch64::Label>> call_entrypoint_patches_;
   // Baker read barrier patch info.
   ArenaDeque<BakerReadBarrierPatchInfo> baker_read_barrier_patches_;
 
+  // Deduplication map for 32-bit literals, used for JIT for boot image addresses.
+  Uint32ToLiteralMap uint32_literals_;
+  // Deduplication map for 64-bit literals, used for JIT for method address or method code.
+  Uint64ToLiteralMap uint64_literals_;
   // Patches for string literals in JIT compiled code.
   StringToLiteralMap jit_string_patches_;
   // Patches for class literals in JIT compiled code.
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 507c453..3a2cf40 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -18,7 +18,7 @@
 
 #include "arch/arm/asm_support_arm.h"
 #include "arch/arm/instruction_set_features_arm.h"
-#include "art_method.h"
+#include "art_method-inl.h"
 #include "base/bit_utils.h"
 #include "base/bit_utils_iterator.h"
 #include "class_table.h"
@@ -34,6 +34,7 @@
 #include "linker/linker_patch.h"
 #include "mirror/array-inl.h"
 #include "mirror/class-inl.h"
+#include "scoped_thread_state_change-inl.h"
 #include "thread.h"
 #include "utils/arm/assembler_arm_vixl.h"
 #include "utils/arm/managed_register_arm.h"
@@ -47,7 +48,6 @@
 using namespace vixl32;  // NOLINT(build/namespaces)
 
 using helpers::DRegisterFrom;
-using helpers::DWARFReg;
 using helpers::HighRegisterFrom;
 using helpers::InputDRegisterAt;
 using helpers::InputOperandAt;
@@ -69,6 +69,7 @@
 using helpers::SRegisterFrom;
 using helpers::Uint64ConstantFrom;
 
+using vixl::EmissionCheckScope;
 using vixl::ExactAssemblyScope;
 using vixl::CodeBufferCheckScope;
 
@@ -1856,16 +1857,17 @@
       instruction_visitor_(graph, this),
       move_resolver_(graph->GetAllocator(), this),
       assembler_(graph->GetAllocator()),
-      uint32_literals_(std::less<uint32_t>(),
-                       graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
       boot_image_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
       method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
       boot_image_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
       type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
       boot_image_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
       string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-      boot_image_intrinsic_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+      boot_image_other_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+      call_entrypoint_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
       baker_read_barrier_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+      uint32_literals_(std::less<uint32_t>(),
+                       graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
       jit_string_patches_(StringReferenceValueComparator(),
                           graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
       jit_class_patches_(TypeReferenceValueComparator(),
@@ -2060,10 +2062,10 @@
 
 void CodeGeneratorARMVIXL::ComputeSpillMask() {
   core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
-  DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved";
-  // There is no easy instruction to restore just the PC on thumb2. We spill and
-  // restore another arbitrary register.
-  core_spill_mask_ |= (1 << kCoreAlwaysSpillRegister.GetCode());
+  DCHECK_NE(core_spill_mask_ & (1u << kLrCode), 0u)
+      << "At least the return address register must be saved";
+  // 16-bit PUSH/POP (T1) can save/restore just the LR/PC.
+  DCHECK(GetVIXLAssembler()->IsUsingT32());
   fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_;
   // We use vpush and vpop for saving and restoring floating point registers, which take
   // a SRegister and the number of registers to save/restore after that SRegister. We
@@ -2078,23 +2080,81 @@
   }
 }
 
+void CodeGeneratorARMVIXL::MaybeIncrementHotness(bool is_frame_entry) {
+  if (GetCompilerOptions().CountHotnessInCompiledCode()) {
+    UseScratchRegisterScope temps(GetVIXLAssembler());
+    vixl32::Register temp = temps.Acquire();
+    static_assert(ArtMethod::MaxCounter() == 0xFFFF, "asm is probably wrong");
+    if (!is_frame_entry) {
+      __ Push(vixl32::Register(kMethodRegister));
+      GetAssembler()->LoadFromOffset(kLoadWord, kMethodRegister, sp, kArmWordSize);
+    }
+    // Load with zero extend to clear the high bits for integer overflow check.
+    __ Ldrh(temp, MemOperand(kMethodRegister, ArtMethod::HotnessCountOffset().Int32Value()));
+    __ Add(temp, temp, 1);
+    // Subtract one if the counter would overflow.
+    __ Sub(temp, temp, Operand(temp, ShiftType::LSR, 16));
+    __ Strh(temp, MemOperand(kMethodRegister, ArtMethod::HotnessCountOffset().Int32Value()));
+    if (!is_frame_entry) {
+      __ Pop(vixl32::Register(kMethodRegister));
+    }
+  }
+
+  if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
+    ScopedObjectAccess soa(Thread::Current());
+    ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
+    if (info != nullptr) {
+      uint32_t address = reinterpret_cast32<uint32_t>(info);
+      vixl::aarch32::Label done;
+      UseScratchRegisterScope temps(GetVIXLAssembler());
+      temps.Exclude(ip);
+      if (!is_frame_entry) {
+        __ Push(r4);  // Will be used as temporary. For frame entry, r4 is always available.
+      }
+      __ Mov(r4, address);
+      __ Ldrh(ip, MemOperand(r4, ProfilingInfo::BaselineHotnessCountOffset().Int32Value()));
+      __ Add(ip, ip, 1);
+      __ Strh(ip, MemOperand(r4, ProfilingInfo::BaselineHotnessCountOffset().Int32Value()));
+      if (!is_frame_entry) {
+        __ Pop(r4);
+      }
+      __ Lsls(ip, ip, 16);
+      __ B(ne, &done);
+      uint32_t entry_point_offset =
+          GetThreadOffset<kArmPointerSize>(kQuickCompileOptimized).Int32Value();
+      if (HasEmptyFrame()) {
+        CHECK(is_frame_entry);
+        // For leaf methods, we need to spill lr and r0. Also spill r1 and r2 for
+        // alignment.
+        uint32_t core_spill_mask =
+            (1 << lr.GetCode()) | (1 << r0.GetCode()) | (1 << r1.GetCode()) | (1 << r2.GetCode());
+        __ Push(RegisterList(core_spill_mask));
+        __ Ldr(lr, MemOperand(tr, entry_point_offset));
+        __ Blx(lr);
+        __ Pop(RegisterList(core_spill_mask));
+      } else {
+        if (!RequiresCurrentMethod()) {
+          CHECK(is_frame_entry);
+          GetAssembler()->StoreToOffset(kStoreWord, kMethodRegister, sp, 0);
+        }
+      __ Ldr(lr, MemOperand(tr, entry_point_offset));
+      __ Blx(lr);
+      }
+      __ Bind(&done);
+    }
+  }
+}
+
 void CodeGeneratorARMVIXL::GenerateFrameEntry() {
   bool skip_overflow_check =
       IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm);
   DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
   __ Bind(&frame_entry_label_);
 
-  if (GetCompilerOptions().CountHotnessInCompiledCode()) {
-    UseScratchRegisterScope temps(GetVIXLAssembler());
-    vixl32::Register temp = temps.Acquire();
-    __ Ldrh(temp, MemOperand(kMethodRegister, ArtMethod::HotnessCountOffset().Int32Value()));
-    __ Add(temp, temp, 1);
-    __ Strh(temp, MemOperand(kMethodRegister, ArtMethod::HotnessCountOffset().Int32Value()));
-  }
-
   if (HasEmptyFrame()) {
     // Ensure that the CFI opcode list is not empty.
     GetAssembler()->cfi().Nop();
+    MaybeIncrementHotness(/* is_frame_entry= */ true);
     return;
   }
 
@@ -2125,32 +2185,66 @@
     RecordPcInfo(nullptr, 0);
   }
 
-  __ Push(RegisterList(core_spill_mask_));
-  GetAssembler()->cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(core_spill_mask_));
-  GetAssembler()->cfi().RelOffsetForMany(DWARFReg(kMethodRegister),
-                                         0,
-                                         core_spill_mask_,
-                                         kArmWordSize);
-  if (fpu_spill_mask_ != 0) {
-    uint32_t first = LeastSignificantBit(fpu_spill_mask_);
+  uint32_t frame_size = GetFrameSize();
+  uint32_t core_spills_offset = frame_size - GetCoreSpillSize();
+  uint32_t fp_spills_offset = frame_size - FrameEntrySpillSize();
+  if ((fpu_spill_mask_ == 0u || IsPowerOfTwo(fpu_spill_mask_)) &&
+      core_spills_offset <= 3u * kArmWordSize) {
+    // Do a single PUSH for core registers including the method and up to two
+    // filler registers. Then store the single FP spill if any.
+    // (The worst case is when the method is not required and we actually
+    // store 3 extra registers but they are stored in the same properly
+    // aligned 16-byte chunk where we're already writing anyway.)
+    DCHECK_EQ(kMethodRegister.GetCode(), 0u);
+    uint32_t extra_regs = MaxInt<uint32_t>(core_spills_offset / kArmWordSize);
+    DCHECK_LT(MostSignificantBit(extra_regs), LeastSignificantBit(core_spill_mask_));
+    __ Push(RegisterList(core_spill_mask_ | extra_regs));
+    GetAssembler()->cfi().AdjustCFAOffset(frame_size);
+    GetAssembler()->cfi().RelOffsetForMany(DWARFReg(kMethodRegister),
+                                           core_spills_offset,
+                                           core_spill_mask_,
+                                           kArmWordSize);
+    if (fpu_spill_mask_ != 0u) {
+      DCHECK(IsPowerOfTwo(fpu_spill_mask_));
+      vixl::aarch32::SRegister sreg(LeastSignificantBit(fpu_spill_mask_));
+      GetAssembler()->StoreSToOffset(sreg, sp, fp_spills_offset);
+      GetAssembler()->cfi().RelOffset(DWARFReg(sreg), /*offset=*/ fp_spills_offset);
+    }
+  } else {
+    __ Push(RegisterList(core_spill_mask_));
+    GetAssembler()->cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(core_spill_mask_));
+    GetAssembler()->cfi().RelOffsetForMany(DWARFReg(kMethodRegister),
+                                           /*offset=*/ 0,
+                                           core_spill_mask_,
+                                           kArmWordSize);
+    if (fpu_spill_mask_ != 0) {
+      uint32_t first = LeastSignificantBit(fpu_spill_mask_);
 
-    // Check that list is contiguous.
-    DCHECK_EQ(fpu_spill_mask_ >> CTZ(fpu_spill_mask_), ~0u >> (32 - POPCOUNT(fpu_spill_mask_)));
+      // Check that list is contiguous.
+      DCHECK_EQ(fpu_spill_mask_ >> CTZ(fpu_spill_mask_), ~0u >> (32 - POPCOUNT(fpu_spill_mask_)));
 
-    __ Vpush(SRegisterList(vixl32::SRegister(first), POPCOUNT(fpu_spill_mask_)));
-    GetAssembler()->cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(fpu_spill_mask_));
-    GetAssembler()->cfi().RelOffsetForMany(DWARFReg(s0), 0, fpu_spill_mask_, kArmWordSize);
-  }
+      __ Vpush(SRegisterList(vixl32::SRegister(first), POPCOUNT(fpu_spill_mask_)));
+      GetAssembler()->cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(fpu_spill_mask_));
+      GetAssembler()->cfi().RelOffsetForMany(DWARFReg(s0),
+                                             /*offset=*/ 0,
+                                             fpu_spill_mask_,
+                                             kArmWordSize);
+    }
 
-  int adjust = GetFrameSize() - FrameEntrySpillSize();
-  __ Sub(sp, sp, adjust);
-  GetAssembler()->cfi().AdjustCFAOffset(adjust);
-
-  // Save the current method if we need it. Note that we do not
-  // do this in HCurrentMethod, as the instruction might have been removed
-  // in the SSA graph.
-  if (RequiresCurrentMethod()) {
-    GetAssembler()->StoreToOffset(kStoreWord, kMethodRegister, sp, 0);
+    // Adjust SP and save the current method if we need it. Note that we do
+    // not save the method in HCurrentMethod, as the instruction might have
+    // been removed in the SSA graph.
+    if (RequiresCurrentMethod() && fp_spills_offset <= 3 * kArmWordSize) {
+      DCHECK_EQ(kMethodRegister.GetCode(), 0u);
+      __ Push(RegisterList(MaxInt<uint32_t>(fp_spills_offset / kArmWordSize)));
+      GetAssembler()->cfi().AdjustCFAOffset(fp_spills_offset);
+    } else {
+      __ Sub(sp, sp, dchecked_integral_cast<int32_t>(fp_spills_offset));
+      GetAssembler()->cfi().AdjustCFAOffset(fp_spills_offset);
+      if (RequiresCurrentMethod()) {
+        GetAssembler()->StoreToOffset(kStoreWord, kMethodRegister, sp, 0);
+      }
+    }
   }
 
   if (GetGraph()->HasShouldDeoptimizeFlag()) {
@@ -2161,6 +2255,7 @@
     GetAssembler()->StoreToOffset(kStoreWord, temp, sp, GetStackOffsetOfShouldDeoptimizeFlag());
   }
 
+  MaybeIncrementHotness(/* is_frame_entry= */ true);
   MaybeGenerateMarkingRegisterCheck(/* code= */ 1);
 }
 
@@ -2169,27 +2264,55 @@
     __ Bx(lr);
     return;
   }
-  GetAssembler()->cfi().RememberState();
-  int adjust = GetFrameSize() - FrameEntrySpillSize();
-  __ Add(sp, sp, adjust);
-  GetAssembler()->cfi().AdjustCFAOffset(-adjust);
-  if (fpu_spill_mask_ != 0) {
-    uint32_t first = LeastSignificantBit(fpu_spill_mask_);
 
-    // Check that list is contiguous.
-    DCHECK_EQ(fpu_spill_mask_ >> CTZ(fpu_spill_mask_), ~0u >> (32 - POPCOUNT(fpu_spill_mask_)));
-
-    __ Vpop(SRegisterList(vixl32::SRegister(first), POPCOUNT(fpu_spill_mask_)));
-    GetAssembler()->cfi().AdjustCFAOffset(
-        -static_cast<int>(kArmWordSize) * POPCOUNT(fpu_spill_mask_));
-    GetAssembler()->cfi().RestoreMany(DWARFReg(vixl32::SRegister(0)), fpu_spill_mask_);
-  }
   // Pop LR into PC to return.
   DCHECK_NE(core_spill_mask_ & (1 << kLrCode), 0U);
   uint32_t pop_mask = (core_spill_mask_ & (~(1 << kLrCode))) | 1 << kPcCode;
-  __ Pop(RegisterList(pop_mask));
-  GetAssembler()->cfi().RestoreState();
-  GetAssembler()->cfi().DefCFAOffset(GetFrameSize());
+
+  uint32_t frame_size = GetFrameSize();
+  uint32_t core_spills_offset = frame_size - GetCoreSpillSize();
+  uint32_t fp_spills_offset = frame_size - FrameEntrySpillSize();
+  if ((fpu_spill_mask_ == 0u || IsPowerOfTwo(fpu_spill_mask_)) &&
+      // r4 is blocked by TestCodeGeneratorARMVIXL used by some tests.
+      core_spills_offset <= (blocked_core_registers_[r4.GetCode()] ? 2u : 3u) * kArmWordSize) {
+    // Load the FP spill if any and then do a single POP including the method
+    // and up to two filler registers. If we have no FP spills, this also has
+    // the advantage that we do not need to emit CFI directives.
+    if (fpu_spill_mask_ != 0u) {
+      DCHECK(IsPowerOfTwo(fpu_spill_mask_));
+      vixl::aarch32::SRegister sreg(LeastSignificantBit(fpu_spill_mask_));
+      GetAssembler()->cfi().RememberState();
+      GetAssembler()->LoadSFromOffset(sreg, sp, fp_spills_offset);
+      GetAssembler()->cfi().Restore(DWARFReg(sreg));
+    }
+    // Clobber registers r2-r4 as they are caller-save in ART managed ABI and
+    // never hold the return value.
+    uint32_t extra_regs = MaxInt<uint32_t>(core_spills_offset / kArmWordSize) << r2.GetCode();
+    DCHECK_EQ(extra_regs & kCoreCalleeSaves.GetList(), 0u);
+    DCHECK_LT(MostSignificantBit(extra_regs), LeastSignificantBit(pop_mask));
+    __ Pop(RegisterList(pop_mask | extra_regs));
+    if (fpu_spill_mask_ != 0u) {
+      GetAssembler()->cfi().RestoreState();
+    }
+  } else {
+    GetAssembler()->cfi().RememberState();
+    __ Add(sp, sp, fp_spills_offset);
+    GetAssembler()->cfi().AdjustCFAOffset(-dchecked_integral_cast<int32_t>(fp_spills_offset));
+    if (fpu_spill_mask_ != 0) {
+      uint32_t first = LeastSignificantBit(fpu_spill_mask_);
+
+      // Check that list is contiguous.
+      DCHECK_EQ(fpu_spill_mask_ >> CTZ(fpu_spill_mask_), ~0u >> (32 - POPCOUNT(fpu_spill_mask_)));
+
+      __ Vpop(SRegisterList(vixl32::SRegister(first), POPCOUNT(fpu_spill_mask_)));
+      GetAssembler()->cfi().AdjustCFAOffset(
+          -static_cast<int>(kArmWordSize) * POPCOUNT(fpu_spill_mask_));
+      GetAssembler()->cfi().RestoreMany(DWARFReg(vixl32::SRegister(0)), fpu_spill_mask_);
+    }
+    __ Pop(RegisterList(pop_mask));
+    GetAssembler()->cfi().RestoreState();
+    GetAssembler()->cfi().DefCFAOffset(GetFrameSize());
+  }
 }
 
 void CodeGeneratorARMVIXL::Bind(HBasicBlock* block) {
@@ -2383,15 +2506,31 @@
                                          uint32_t dex_pc,
                                          SlowPathCode* slow_path) {
   ValidateInvokeRuntime(entrypoint, instruction, slow_path);
-  __ Ldr(lr, MemOperand(tr, GetThreadOffset<kArmPointerSize>(entrypoint).Int32Value()));
-  // Ensure the pc position is recorded immediately after the `blx` instruction.
-  // blx in T32 has only 16bit encoding that's why a stricter check for the scope is used.
-  ExactAssemblyScope aas(GetVIXLAssembler(),
-                         vixl32::k16BitT32InstructionSizeInBytes,
-                         CodeBufferCheckScope::kExactSize);
-  __ blx(lr);
-  if (EntrypointRequiresStackMap(entrypoint)) {
-    RecordPcInfo(instruction, dex_pc, slow_path);
+
+  ThreadOffset32 entrypoint_offset = GetThreadOffset<kArmPointerSize>(entrypoint);
+  // Reduce code size for AOT by using shared trampolines for slow path runtime calls across the
+  // entire oat file. This adds an extra branch and we do not want to slow down the main path.
+  // For JIT, thunk sharing is per-method, so the gains would be smaller or even negative.
+  if (slow_path == nullptr || Runtime::Current()->UseJitCompilation()) {
+    __ Ldr(lr, MemOperand(tr, entrypoint_offset.Int32Value()));
+    // Ensure the pc position is recorded immediately after the `blx` instruction.
+    // blx in T32 has only 16bit encoding that's why a stricter check for the scope is used.
+    ExactAssemblyScope aas(GetVIXLAssembler(),
+                           vixl32::k16BitT32InstructionSizeInBytes,
+                           CodeBufferCheckScope::kExactSize);
+    __ blx(lr);
+    if (EntrypointRequiresStackMap(entrypoint)) {
+      RecordPcInfo(instruction, dex_pc, slow_path);
+    }
+  } else {
+    // Ensure the pc position is recorded immediately after the `bl` instruction.
+    ExactAssemblyScope aas(GetVIXLAssembler(),
+                           vixl32::k32BitT32InstructionSizeInBytes,
+                           CodeBufferCheckScope::kExactSize);
+    EmitEntrypointThunkCall(entrypoint_offset);
+    if (EntrypointRequiresStackMap(entrypoint)) {
+      RecordPcInfo(instruction, dex_pc, slow_path);
+    }
   }
 }
 
@@ -2414,16 +2553,7 @@
   HLoopInformation* info = block->GetLoopInformation();
 
   if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
-    if (codegen_->GetCompilerOptions().CountHotnessInCompiledCode()) {
-      UseScratchRegisterScope temps(GetVIXLAssembler());
-      vixl32::Register temp = temps.Acquire();
-      __ Push(vixl32::Register(kMethodRegister));
-      GetAssembler()->LoadFromOffset(kLoadWord, kMethodRegister, sp, kArmWordSize);
-      __ Ldrh(temp, MemOperand(kMethodRegister, ArtMethod::HotnessCountOffset().Int32Value()));
-      __ Add(temp, temp, 1);
-      __ Strh(temp, MemOperand(kMethodRegister, ArtMethod::HotnessCountOffset().Int32Value()));
-      __ Pop(vixl32::Register(kMethodRegister));
-    }
+    codegen_->MaybeIncrementHotness(/* is_frame_entry= */ false);
     GenerateSuspendCheck(info->GetSuspendCheck(), successor);
     return;
   }
@@ -3124,7 +3254,21 @@
   locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType()));
 }
 
-void InstructionCodeGeneratorARMVIXL::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
+void InstructionCodeGeneratorARMVIXL::VisitReturn(HReturn* ret) {
+  if (GetGraph()->IsCompilingOsr()) {
+    // To simplify callers of an OSR method, we put the return value in both
+    // floating point and core registers.
+    switch (ret->InputAt(0)->GetType()) {
+      case DataType::Type::kFloat32:
+        __ Vmov(r0, s0);
+        break;
+      case DataType::Type::kFloat64:
+        __ Vmov(r0, r1, d0);
+        break;
+      default:
+        break;
+    }
+  }
   codegen_->GenerateFrameExit();
 }
 
@@ -3211,6 +3355,34 @@
   invoke->GetLocations()->AddTemp(LocationFrom(r12));
 }
 
+void CodeGeneratorARMVIXL::MaybeGenerateInlineCacheCheck(HInstruction* instruction,
+                                                         vixl32::Register klass) {
+  DCHECK_EQ(r0.GetCode(), klass.GetCode());
+  // We know the destination of an intrinsic, so no need to record inline
+  // caches.
+  if (!instruction->GetLocations()->Intrinsified() &&
+      GetGraph()->IsCompilingBaseline() &&
+      !Runtime::Current()->IsAotCompiler()) {
+    DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke());
+    ScopedObjectAccess soa(Thread::Current());
+    ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
+    if (info != nullptr) {
+      InlineCache* cache = info->GetInlineCache(instruction->GetDexPc());
+      uint32_t address = reinterpret_cast32<uint32_t>(cache);
+      vixl32::Label done;
+      UseScratchRegisterScope temps(GetVIXLAssembler());
+      temps.Exclude(ip);
+      __ Mov(r4, address);
+      __ Ldr(ip, MemOperand(r4, InlineCache::ClassesOffset().Int32Value()));
+      // Fast path for a monomorphic cache.
+      __ Cmp(klass, ip);
+      __ B(eq, &done, /* is_far_target= */ false);
+      InvokeRuntime(kQuickUpdateInlineCache, instruction, instruction->GetDexPc());
+      __ Bind(&done);
+    }
+  }
+}
+
 void InstructionCodeGeneratorARMVIXL::VisitInvokeInterface(HInvokeInterface* invoke) {
   // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
   LocationSummary* locations = invoke->GetLocations();
@@ -3238,10 +3410,15 @@
   // intact/accessible until the end of the marking phase (the
   // concurrent copying collector may not in the future).
   GetAssembler()->MaybeUnpoisonHeapReference(temp);
+
+  // If we're compiling baseline, update the inline cache.
+  codegen_->MaybeGenerateInlineCacheCheck(invoke, temp);
+
   GetAssembler()->LoadFromOffset(kLoadWord,
                                  temp,
                                  temp,
                                  mirror::Class::ImtPtrOffset(kArmPointerSize).Uint32Value());
+
   uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
       invoke->GetImtIndex(), kArmPointerSize));
   // temp = temp->GetImtEntryAt(method_offset);
@@ -5358,24 +5535,29 @@
     case DataType::Type::kUint16:
     case DataType::Type::kInt16:
     case DataType::Type::kInt32: {
+      // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
+      EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
       StoreOperandType operand_type = GetStoreOperandType(field_type);
       GetAssembler()->StoreToOffset(operand_type, RegisterFrom(value), base, offset);
+      codegen_->MaybeRecordImplicitNullCheck(instruction);
       break;
     }
 
     case DataType::Type::kReference: {
+      vixl32::Register value_reg = RegisterFrom(value);
       if (kPoisonHeapReferences && needs_write_barrier) {
         // Note that in the case where `value` is a null reference,
         // we do not enter this block, as a null reference does not
         // need poisoning.
         DCHECK_EQ(field_type, DataType::Type::kReference);
-        vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
-        __ Mov(temp, RegisterFrom(value));
-        GetAssembler()->PoisonHeapReference(temp);
-        GetAssembler()->StoreToOffset(kStoreWord, temp, base, offset);
-      } else {
-        GetAssembler()->StoreToOffset(kStoreWord, RegisterFrom(value), base, offset);
+        value_reg = RegisterFrom(locations->GetTemp(0));
+        __ Mov(value_reg, RegisterFrom(value));
+        GetAssembler()->PoisonHeapReference(value_reg);
       }
+      // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
+      EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+      GetAssembler()->StoreToOffset(kStoreWord, value_reg, base, offset);
+      codegen_->MaybeRecordImplicitNullCheck(instruction);
       break;
     }
 
@@ -5389,6 +5571,8 @@
                                 RegisterFrom(locations->GetTemp(1)),
                                 instruction);
       } else {
+        // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
+        EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
         GetAssembler()->StoreToOffset(kStoreWordPair, LowRegisterFrom(value), base, offset);
         codegen_->MaybeRecordImplicitNullCheck(instruction);
       }
@@ -5396,7 +5580,10 @@
     }
 
     case DataType::Type::kFloat32: {
+      // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
+      EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
       GetAssembler()->StoreSToOffset(SRegisterFrom(value), base, offset);
+      codegen_->MaybeRecordImplicitNullCheck(instruction);
       break;
     }
 
@@ -5416,6 +5603,8 @@
                                 RegisterFrom(locations->GetTemp(3)),
                                 instruction);
       } else {
+        // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
+        EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
         GetAssembler()->StoreDToOffset(value_reg, base, offset);
         codegen_->MaybeRecordImplicitNullCheck(instruction);
       }
@@ -5429,16 +5618,6 @@
       UNREACHABLE();
   }
 
-  // Longs and doubles are handled in the switch.
-  if (field_type != DataType::Type::kInt64 && field_type != DataType::Type::kFloat64) {
-    // TODO(VIXL): Here and for other calls to `MaybeRecordImplicitNullCheck` in this method, we
-    // should use a scope and the assembler to emit the store instruction to guarantee that we
-    // record the pc at the correct position. But the `Assembler` does not automatically handle
-    // unencodable offsets. Practically, everything is fine because the helper and VIXL, at the time
-    // of writing, do generate the store instruction last.
-    codegen_->MaybeRecordImplicitNullCheck(instruction);
-  }
-
   if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
     vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
     vixl32::Register card = RegisterFrom(locations->GetTemp(1));
@@ -5601,8 +5780,11 @@
     case DataType::Type::kUint16:
     case DataType::Type::kInt16:
     case DataType::Type::kInt32: {
+      // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+      EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
       LoadOperandType operand_type = GetLoadOperandType(load_type);
       GetAssembler()->LoadFromOffset(operand_type, RegisterFrom(out), base, offset);
+      codegen_->MaybeRecordImplicitNullCheck(instruction);
       break;
     }
 
@@ -5618,8 +5800,12 @@
           codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
         }
       } else {
-        GetAssembler()->LoadFromOffset(kLoadWord, RegisterFrom(out), base, offset);
-        codegen_->MaybeRecordImplicitNullCheck(instruction);
+        {
+          // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+          EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+          GetAssembler()->LoadFromOffset(kLoadWord, RegisterFrom(out), base, offset);
+          codegen_->MaybeRecordImplicitNullCheck(instruction);
+        }
         if (is_volatile) {
           codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
         }
@@ -5631,26 +5817,34 @@
       break;
     }
 
-    case DataType::Type::kInt64:
+    case DataType::Type::kInt64: {
+      // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+      EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
       if (is_volatile && !atomic_ldrd_strd) {
         GenerateWideAtomicLoad(base, offset, LowRegisterFrom(out), HighRegisterFrom(out));
       } else {
         GetAssembler()->LoadFromOffset(kLoadWordPair, LowRegisterFrom(out), base, offset);
       }
+      codegen_->MaybeRecordImplicitNullCheck(instruction);
       break;
+    }
 
-    case DataType::Type::kFloat32:
+    case DataType::Type::kFloat32: {
+      // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+      EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
       GetAssembler()->LoadSFromOffset(SRegisterFrom(out), base, offset);
+      codegen_->MaybeRecordImplicitNullCheck(instruction);
       break;
+    }
 
     case DataType::Type::kFloat64: {
+      // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+      EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
       vixl32::DRegister out_dreg = DRegisterFrom(out);
       if (is_volatile && !atomic_ldrd_strd) {
         vixl32::Register lo = RegisterFrom(locations->GetTemp(0));
         vixl32::Register hi = RegisterFrom(locations->GetTemp(1));
         GenerateWideAtomicLoad(base, offset, lo, hi);
-        // TODO(VIXL): Do we need to be immediately after the ldrexd instruction? If so we need a
-        // scope.
         codegen_->MaybeRecordImplicitNullCheck(instruction);
         __ Vmov(out_dreg, lo, hi);
       } else {
@@ -5667,19 +5861,6 @@
       UNREACHABLE();
   }
 
-  if (load_type == DataType::Type::kReference || load_type == DataType::Type::kFloat64) {
-    // Potential implicit null checks, in the case of reference or
-    // double fields, are handled in the previous switch statement.
-  } else {
-    // Address cases other than reference and double that may require an implicit null check.
-    // TODO(VIXL): Here and for other calls to `MaybeRecordImplicitNullCheck` in this method, we
-    // should use a scope and the assembler to emit the load instruction to guarantee that we
-    // record the pc at the correct position. But the `Assembler` does not automatically handle
-    // unencodable offsets. Practically, everything is fine because the helper and VIXL, at the time
-    // of writing, do generate the store instruction last.
-    codegen_->MaybeRecordImplicitNullCheck(instruction);
-  }
-
   if (is_volatile) {
     if (load_type == DataType::Type::kReference) {
       // Memory barriers, in the case of references, are also handled
@@ -5722,6 +5903,15 @@
   HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
 }
 
+void LocationsBuilderARMVIXL::VisitStringBuilderAppend(HStringBuilderAppend* instruction) {
+  codegen_->CreateStringBuilderAppendLocations(instruction, LocationFrom(r0));
+}
+
+void InstructionCodeGeneratorARMVIXL::VisitStringBuilderAppend(HStringBuilderAppend* instruction) {
+  __ Mov(r0, instruction->GetFormat()->GetValue());
+  codegen_->InvokeRuntime(kQuickStringBuilderAppend, instruction, instruction->GetDexPc());
+}
+
 void LocationsBuilderARMVIXL::VisitUnresolvedInstanceFieldGet(
     HUnresolvedInstanceFieldGet* instruction) {
   FieldAccessCallingConventionARMVIXL calling_convention;
@@ -5958,6 +6148,8 @@
       if (maybe_compressed_char_at) {
         length = RegisterFrom(locations->GetTemp(0));
         uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
+        // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+        EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
         GetAssembler()->LoadFromOffset(kLoadWord, length, obj, count_offset);
         codegen_->MaybeRecordImplicitNullCheck(instruction);
       }
@@ -5986,8 +6178,11 @@
         } else {
           uint32_t full_offset = data_offset + (const_index << DataType::SizeShift(type));
 
+          // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+          EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
           LoadOperandType load_type = GetLoadOperandType(type);
           GetAssembler()->LoadFromOffset(load_type, RegisterFrom(out_loc), obj, full_offset);
+          codegen_->MaybeRecordImplicitNullCheck(instruction);
         }
       } else {
         UseScratchRegisterScope temps(GetVIXLAssembler());
@@ -6020,7 +6215,10 @@
             __ Bind(&done);
           }
         } else {
+          // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+          EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
           codegen_->LoadFromShiftedRegOffset(type, out_loc, temp, RegisterFrom(index));
+          codegen_->MaybeRecordImplicitNullCheck(instruction);
         }
       }
       break;
@@ -6060,15 +6258,13 @@
       } else {
         vixl32::Register out = OutputRegister(instruction);
         if (index.IsConstant()) {
-          size_t offset =
-              (Int32ConstantFrom(index) << TIMES_4) + data_offset;
-          GetAssembler()->LoadFromOffset(kLoadWord, out, obj, offset);
-          // TODO(VIXL): Here and for other calls to `MaybeRecordImplicitNullCheck` in this method,
-          // we should use a scope and the assembler to emit the load instruction to guarantee that
-          // we record the pc at the correct position. But the `Assembler` does not automatically
-          // handle unencodable offsets. Practically, everything is fine because the helper and
-          // VIXL, at the time of writing, do generate the store instruction last.
-          codegen_->MaybeRecordImplicitNullCheck(instruction);
+          size_t offset = (Int32ConstantFrom(index) << TIMES_4) + data_offset;
+          {
+            // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+            EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+            GetAssembler()->LoadFromOffset(kLoadWord, out, obj, offset);
+            codegen_->MaybeRecordImplicitNullCheck(instruction);
+          }
           // If read barriers are enabled, emit read barriers other than
           // Baker's using a slow path (and also unpoison the loaded
           // reference, if heap poisoning is enabled).
@@ -6089,12 +6285,13 @@
           } else {
             __ Add(temp, obj, data_offset);
           }
-          codegen_->LoadFromShiftedRegOffset(type, out_loc, temp, RegisterFrom(index));
-          temps.Close();
-          // TODO(VIXL): Use a scope to ensure that we record the pc position immediately after the
-          // load instruction. Practically, everything is fine because the helper and VIXL, at the
-          // time of writing, do generate the store instruction last.
-          codegen_->MaybeRecordImplicitNullCheck(instruction);
+          {
+            // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+            EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
+            codegen_->LoadFromShiftedRegOffset(type, out_loc, temp, RegisterFrom(index));
+            temps.Close();
+            codegen_->MaybeRecordImplicitNullCheck(instruction);
+          }
           // If read barriers are enabled, emit read barriers other than
           // Baker's using a slow path (and also unpoison the loaded
           // reference, if heap poisoning is enabled).
@@ -6106,6 +6303,9 @@
     }
 
     case DataType::Type::kInt64: {
+      // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+      // As two macro instructions can be emitted the max size is doubled.
+      EmissionCheckScope guard(GetVIXLAssembler(), 2 * kMaxMacroInstructionSizeInBytes);
       if (index.IsConstant()) {
         size_t offset =
             (Int32ConstantFrom(index) << TIMES_8) + data_offset;
@@ -6116,10 +6316,14 @@
         __ Add(temp, obj, Operand(RegisterFrom(index), vixl32::LSL, TIMES_8));
         GetAssembler()->LoadFromOffset(kLoadWordPair, LowRegisterFrom(out_loc), temp, data_offset);
       }
+      codegen_->MaybeRecordImplicitNullCheck(instruction);
       break;
     }
 
     case DataType::Type::kFloat32: {
+      // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+      // As two macro instructions can be emitted the max size is doubled.
+      EmissionCheckScope guard(GetVIXLAssembler(), 2 * kMaxMacroInstructionSizeInBytes);
       vixl32::SRegister out = SRegisterFrom(out_loc);
       if (index.IsConstant()) {
         size_t offset = (Int32ConstantFrom(index) << TIMES_4) + data_offset;
@@ -6130,10 +6334,14 @@
         __ Add(temp, obj, Operand(RegisterFrom(index), vixl32::LSL, TIMES_4));
         GetAssembler()->LoadSFromOffset(out, temp, data_offset);
       }
+      codegen_->MaybeRecordImplicitNullCheck(instruction);
       break;
     }
 
     case DataType::Type::kFloat64: {
+      // Ensure that between load and MaybeRecordImplicitNullCheck there are no pools emitted.
+      // As two macro instructions can be emitted the max size is doubled.
+      EmissionCheckScope guard(GetVIXLAssembler(), 2 * kMaxMacroInstructionSizeInBytes);
       if (index.IsConstant()) {
         size_t offset = (Int32ConstantFrom(index) << TIMES_8) + data_offset;
         GetAssembler()->LoadDFromOffset(DRegisterFrom(out_loc), obj, offset);
@@ -6143,6 +6351,7 @@
         __ Add(temp, obj, Operand(RegisterFrom(index), vixl32::LSL, TIMES_8));
         GetAssembler()->LoadDFromOffset(DRegisterFrom(out_loc), temp, data_offset);
       }
+      codegen_->MaybeRecordImplicitNullCheck(instruction);
       break;
     }
 
@@ -6152,15 +6361,6 @@
       LOG(FATAL) << "Unreachable type " << type;
       UNREACHABLE();
   }
-
-  if (type == DataType::Type::kReference) {
-    // Potential implicit null checks, in the case of reference
-    // arrays, are handled in the previous switch statement.
-  } else if (!maybe_compressed_char_at) {
-    // TODO(VIXL): Use a scope to ensure we record the pc info immediately after
-    // the preceding load instruction.
-    codegen_->MaybeRecordImplicitNullCheck(instruction);
-  }
 }
 
 void LocationsBuilderARMVIXL::VisitArraySet(HArraySet* instruction) {
@@ -6168,13 +6368,11 @@
 
   bool needs_write_barrier =
       CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
-  bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
+  bool needs_type_check = instruction->NeedsTypeCheck();
 
   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
       instruction,
-      may_need_runtime_call_for_type_check ?
-          LocationSummary::kCallOnSlowPath :
-          LocationSummary::kNoCall);
+      needs_type_check ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall);
 
   locations->SetInAt(0, Location::RequiresRegister());
   locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
@@ -6195,7 +6393,7 @@
   vixl32::Register array = InputRegisterAt(instruction, 0);
   Location index = locations->InAt(1);
   DataType::Type value_type = instruction->GetComponentType();
-  bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
+  bool needs_type_check = instruction->NeedsTypeCheck();
   bool needs_write_barrier =
       CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
   uint32_t data_offset =
@@ -6216,7 +6414,10 @@
         uint32_t full_offset =
             data_offset + (const_index << DataType::SizeShift(value_type));
         StoreOperandType store_type = GetStoreOperandType(value_type);
+        // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
+        EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
         GetAssembler()->StoreToOffset(store_type, RegisterFrom(value_loc), array, full_offset);
+        codegen_->MaybeRecordImplicitNullCheck(instruction);
       } else {
         UseScratchRegisterScope temps(GetVIXLAssembler());
         vixl32::Register temp = temps.Acquire();
@@ -6233,7 +6434,10 @@
         } else {
           __ Add(temp, array, data_offset);
         }
+        // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
+        EmissionCheckScope guard(GetVIXLAssembler(), kMaxMacroInstructionSizeInBytes);
         codegen_->StoreToShiftedRegOffset(value_type, value_loc, temp, RegisterFrom(index));
+        codegen_->MaybeRecordImplicitNullCheck(instruction);
       }
       break;
     }
@@ -6245,10 +6449,12 @@
       DCHECK(!has_intermediate_address);
 
       if (instruction->InputAt(2)->IsNullConstant()) {
+        // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
+        // As two macro instructions can be emitted the max size is doubled.
+        EmissionCheckScope guard(GetVIXLAssembler(), 2 * kMaxMacroInstructionSizeInBytes);
         // Just setting null.
         if (index.IsConstant()) {
-          size_t offset =
-              (Int32ConstantFrom(index) << TIMES_4) + data_offset;
+          size_t offset = (Int32ConstantFrom(index) << TIMES_4) + data_offset;
           GetAssembler()->StoreToOffset(kStoreWord, value, array, offset);
         } else {
           DCHECK(index.IsRegister()) << index;
@@ -6257,11 +6463,9 @@
           __ Add(temp, array, data_offset);
           codegen_->StoreToShiftedRegOffset(value_type, value_loc, temp, RegisterFrom(index));
         }
-        // TODO(VIXL): Use a scope to ensure we record the pc info immediately after the preceding
-        // store instruction.
         codegen_->MaybeRecordImplicitNullCheck(instruction);
         DCHECK(!needs_write_barrier);
-        DCHECK(!may_need_runtime_call_for_type_check);
+        DCHECK(!needs_type_check);
         break;
       }
 
@@ -6270,36 +6474,21 @@
       vixl32::Register temp1 = RegisterFrom(temp1_loc);
       Location temp2_loc = locations->GetTemp(1);
       vixl32::Register temp2 = RegisterFrom(temp2_loc);
-      uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
-      uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
-      uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
-      vixl32::Label done;
-      vixl32::Label* final_label = codegen_->GetFinalLabel(instruction, &done);
-      SlowPathCodeARMVIXL* slow_path = nullptr;
 
-      if (may_need_runtime_call_for_type_check) {
+      bool can_value_be_null = instruction->GetValueCanBeNull();
+      vixl32::Label do_store;
+      if (can_value_be_null) {
+        __ CompareAndBranchIfZero(value, &do_store, /* is_far_target= */ false);
+      }
+
+      SlowPathCodeARMVIXL* slow_path = nullptr;
+      if (needs_type_check) {
         slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathARMVIXL(instruction);
         codegen_->AddSlowPath(slow_path);
-        if (instruction->GetValueCanBeNull()) {
-          vixl32::Label non_zero;
-          __ CompareAndBranchIfNonZero(value, &non_zero);
-          if (index.IsConstant()) {
-            size_t offset =
-               (Int32ConstantFrom(index) << TIMES_4) + data_offset;
-            GetAssembler()->StoreToOffset(kStoreWord, value, array, offset);
-          } else {
-            DCHECK(index.IsRegister()) << index;
-            UseScratchRegisterScope temps(GetVIXLAssembler());
-            vixl32::Register temp = temps.Acquire();
-            __ Add(temp, array, data_offset);
-            codegen_->StoreToShiftedRegOffset(value_type, value_loc, temp, RegisterFrom(index));
-          }
-          // TODO(VIXL): Use a scope to ensure we record the pc info immediately after the preceding
-          // store instruction.
-          codegen_->MaybeRecordImplicitNullCheck(instruction);
-          __ B(final_label);
-          __ Bind(&non_zero);
-        }
+
+        const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+        const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+        const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
 
         // Note that when read barriers are enabled, the type checks
         // are performed without read barriers.  This is fine, even in
@@ -6346,6 +6535,13 @@
         }
       }
 
+      codegen_->MarkGCCard(temp1, temp2, array, value, /* can_be_null= */ false);
+
+      if (can_value_be_null) {
+        DCHECK(do_store.IsReferenced());
+        __ Bind(&do_store);
+      }
+
       vixl32::Register source = value;
       if (kPoisonHeapReferences) {
         // Note that in the case where `value` is a null reference,
@@ -6357,32 +6553,28 @@
         source = temp1;
       }
 
-      if (index.IsConstant()) {
-        size_t offset =
-            (Int32ConstantFrom(index) << TIMES_4) + data_offset;
-        GetAssembler()->StoreToOffset(kStoreWord, source, array, offset);
-      } else {
-        DCHECK(index.IsRegister()) << index;
+      {
+        // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
+        // As two macro instructions can be emitted the max size is doubled.
+        EmissionCheckScope guard(GetVIXLAssembler(), 2 * kMaxMacroInstructionSizeInBytes);
+        if (index.IsConstant()) {
+          size_t offset = (Int32ConstantFrom(index) << TIMES_4) + data_offset;
+          GetAssembler()->StoreToOffset(kStoreWord, source, array, offset);
+        } else {
+          DCHECK(index.IsRegister()) << index;
 
-        UseScratchRegisterScope temps(GetVIXLAssembler());
-        vixl32::Register temp = temps.Acquire();
-        __ Add(temp, array, data_offset);
-        codegen_->StoreToShiftedRegOffset(value_type,
-                                          LocationFrom(source),
-                                          temp,
-                                          RegisterFrom(index));
-      }
+          UseScratchRegisterScope temps(GetVIXLAssembler());
+          vixl32::Register temp = temps.Acquire();
+          __ Add(temp, array, data_offset);
+          codegen_->StoreToShiftedRegOffset(value_type,
+                                            LocationFrom(source),
+                                            temp,
+                                            RegisterFrom(index));
+        }
 
-      if (!may_need_runtime_call_for_type_check) {
-        // TODO(VIXL): Ensure we record the pc position immediately after the preceding store
-        // instruction.
-        codegen_->MaybeRecordImplicitNullCheck(instruction);
-      }
-
-      codegen_->MarkGCCard(temp1, temp2, array, value, instruction->GetValueCanBeNull());
-
-      if (done.IsReferenced()) {
-        __ Bind(&done);
+        if (can_value_be_null || !needs_type_check) {
+          codegen_->MaybeRecordImplicitNullCheck(instruction);
+        }
       }
 
       if (slow_path != nullptr) {
@@ -6393,6 +6585,9 @@
     }
 
     case DataType::Type::kInt64: {
+      // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
+      // As two macro instructions can be emitted the max size is doubled.
+      EmissionCheckScope guard(GetVIXLAssembler(), 2 * kMaxMacroInstructionSizeInBytes);
       Location value = locations->InAt(2);
       if (index.IsConstant()) {
         size_t offset =
@@ -6404,10 +6599,14 @@
         __ Add(temp, array, Operand(RegisterFrom(index), vixl32::LSL, TIMES_8));
         GetAssembler()->StoreToOffset(kStoreWordPair, LowRegisterFrom(value), temp, data_offset);
       }
+      codegen_->MaybeRecordImplicitNullCheck(instruction);
       break;
     }
 
     case DataType::Type::kFloat32: {
+      // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
+      // As two macro instructions can be emitted the max size is doubled.
+      EmissionCheckScope guard(GetVIXLAssembler(), 2 * kMaxMacroInstructionSizeInBytes);
       Location value = locations->InAt(2);
       DCHECK(value.IsFpuRegister());
       if (index.IsConstant()) {
@@ -6419,10 +6618,14 @@
         __ Add(temp, array, Operand(RegisterFrom(index), vixl32::LSL, TIMES_4));
         GetAssembler()->StoreSToOffset(SRegisterFrom(value), temp, data_offset);
       }
+      codegen_->MaybeRecordImplicitNullCheck(instruction);
       break;
     }
 
     case DataType::Type::kFloat64: {
+      // Ensure that between store and MaybeRecordImplicitNullCheck there are no pools emitted.
+      // As two macro instructions can be emitted the max size is doubled.
+      EmissionCheckScope guard(GetVIXLAssembler(), 2 * kMaxMacroInstructionSizeInBytes);
       Location value = locations->InAt(2);
       DCHECK(value.IsFpuRegisterPair());
       if (index.IsConstant()) {
@@ -6434,6 +6637,7 @@
         __ Add(temp, array, Operand(RegisterFrom(index), vixl32::LSL, TIMES_8));
         GetAssembler()->StoreDToOffset(DRegisterFrom(value), temp, data_offset);
       }
+      codegen_->MaybeRecordImplicitNullCheck(instruction);
       break;
     }
 
@@ -6443,13 +6647,6 @@
       LOG(FATAL) << "Unreachable type " << value_type;
       UNREACHABLE();
   }
-
-  // Objects are handled in the switch.
-  if (value_type != DataType::Type::kReference) {
-    // TODO(VIXL): Ensure we record the pc position immediately after the preceding store
-    // instruction.
-    codegen_->MaybeRecordImplicitNullCheck(instruction);
-  }
 }
 
 void LocationsBuilderARMVIXL::VisitArrayLength(HArrayLength* instruction) {
@@ -7007,7 +7204,8 @@
       break;
     }
     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
-      DCHECK(codegen_->GetCompilerOptions().IsBootImage());
+      DCHECK(codegen_->GetCompilerOptions().IsBootImage() ||
+             codegen_->GetCompilerOptions().IsBootImageExtension());
       DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
       CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
           codegen_->NewBootImageTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
@@ -7026,6 +7224,7 @@
       CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
           codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
       codegen_->EmitMovwMovtPlaceholder(labels, out);
+      // All aligned loads are implicitly atomic consume operations on ARM.
       codegen_->GenerateGcRootFieldLoad(cls, out_loc, out, /* offset= */ 0, read_barrier_option);
       generate_null_check = true;
       break;
@@ -7112,17 +7311,13 @@
   UseScratchRegisterScope temps(GetVIXLAssembler());
   vixl32::Register temp = temps.Acquire();
   constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
-  const size_t status_byte_offset =
-      mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
-  constexpr uint32_t shifted_initialized_value =
-      enum_cast<uint32_t>(ClassStatus::kInitialized) << (status_lsb_position % kBitsPerByte);
+  constexpr uint32_t shifted_visibly_initialized_value =
+      enum_cast<uint32_t>(ClassStatus::kVisiblyInitialized) << status_lsb_position;
 
-  GetAssembler()->LoadFromOffset(kLoadUnsignedByte, temp, class_reg, status_byte_offset);
-  __ Cmp(temp, shifted_initialized_value);
+  const size_t status_offset = mirror::Class::StatusOffset().SizeValue();
+  GetAssembler()->LoadFromOffset(kLoadWord, temp, class_reg, status_offset);
+  __ Cmp(temp, shifted_visibly_initialized_value);
   __ B(lo, slow_path->GetEntryLabel());
-  // Even if the initialized flag is set, we may be in a situation where caches are not synced
-  // properly. Therefore, we do a memory fence.
-  __ Dmb(ISH);
   __ Bind(slow_path->GetExitLabel());
 }
 
@@ -7234,7 +7429,8 @@
 
   switch (load_kind) {
     case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
-      DCHECK(codegen_->GetCompilerOptions().IsBootImage());
+      DCHECK(codegen_->GetCompilerOptions().IsBootImage() ||
+             codegen_->GetCompilerOptions().IsBootImageExtension());
       CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
           codegen_->NewBootImageStringPatch(load->GetDexFile(), load->GetStringIndex());
       codegen_->EmitMovwMovtPlaceholder(labels, out);
@@ -7252,6 +7448,7 @@
       CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
           codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex());
       codegen_->EmitMovwMovtPlaceholder(labels, out);
+      // All aligned loads are implicitly atomic consume operations on ARM.
       codegen_->GenerateGcRootFieldLoad(
           load, out_loc, out, /* offset= */ 0, kCompilerReadBarrierOption);
       LoadStringSlowPathARMVIXL* slow_path =
@@ -8703,7 +8900,7 @@
       callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
       break;
     case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative: {
-      DCHECK(GetCompilerOptions().IsBootImage());
+      DCHECK(GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension());
       PcRelativePatchInfo* labels = NewBootImageMethodPatch(invoke->GetTargetMethod());
       vixl32::Register temp_reg = RegisterFrom(temp);
       EmitMovwMovtPlaceholder(labels, temp_reg);
@@ -8722,6 +8919,7 @@
           MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()));
       vixl32::Register temp_reg = RegisterFrom(temp);
       EmitMovwMovtPlaceholder(labels, temp_reg);
+      // All aligned loads are implicitly atomic consume operations on ARM.
       GetAssembler()->LoadFromOffset(kLoadWord, temp_reg, temp_reg, /* offset*/ 0);
       break;
     }
@@ -8799,6 +8997,9 @@
   // concurrent copying collector may not in the future).
   GetAssembler()->MaybeUnpoisonHeapReference(temp);
 
+  // If we're compiling baseline, update the inline cache.
+  MaybeGenerateInlineCacheCheck(invoke, temp);
+
   // temp = temp->GetMethodAt(method_offset);
   uint32_t entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
       kArmPointerSize).Int32Value();
@@ -8819,14 +9020,14 @@
 
 CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewBootImageIntrinsicPatch(
     uint32_t intrinsic_data) {
-  return NewPcRelativePatch(/* dex_file= */ nullptr, intrinsic_data, &boot_image_intrinsic_patches_);
+  return NewPcRelativePatch(/* dex_file= */ nullptr, intrinsic_data, &boot_image_other_patches_);
 }
 
 CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewBootImageRelRoPatch(
     uint32_t boot_image_offset) {
   return NewPcRelativePatch(/* dex_file= */ nullptr,
                             boot_image_offset,
-                            &boot_image_method_patches_);
+                            &boot_image_other_patches_);
 }
 
 CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewBootImageMethodPatch(
@@ -8867,6 +9068,17 @@
   return &patches->back();
 }
 
+void CodeGeneratorARMVIXL::EmitEntrypointThunkCall(ThreadOffset32 entrypoint_offset) {
+  DCHECK(!__ AllowMacroInstructions());  // In ExactAssemblyScope.
+  DCHECK(!Runtime::Current()->UseJitCompilation());
+  call_entrypoint_patches_.emplace_back(/*dex_file*/ nullptr, entrypoint_offset.Uint32Value());
+  vixl::aarch32::Label* bl_label = &call_entrypoint_patches_.back().label;
+  __ bind(bl_label);
+  vixl32::Label placeholder_label;
+  __ bl(&placeholder_label);  // Placeholder, patched at link-time.
+  __ bind(&placeholder_label);
+}
+
 void CodeGeneratorARMVIXL::EmitBakerReadBarrierBne(uint32_t custom_data) {
   DCHECK(!__ AllowMacroInstructions());  // In ExactAssemblyScope.
   if (Runtime::Current()->UseJitCompilation()) {
@@ -8988,24 +9200,28 @@
       /* MOVW+MOVT for each entry */ 2u * type_bss_entry_patches_.size() +
       /* MOVW+MOVT for each entry */ 2u * boot_image_string_patches_.size() +
       /* MOVW+MOVT for each entry */ 2u * string_bss_entry_patches_.size() +
-      /* MOVW+MOVT for each entry */ 2u * boot_image_intrinsic_patches_.size() +
+      /* MOVW+MOVT for each entry */ 2u * boot_image_other_patches_.size() +
+      call_entrypoint_patches_.size() +
       baker_read_barrier_patches_.size();
   linker_patches->reserve(size);
-  if (GetCompilerOptions().IsBootImage()) {
+  if (GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension()) {
     EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeMethodPatch>(
         boot_image_method_patches_, linker_patches);
     EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeTypePatch>(
         boot_image_type_patches_, linker_patches);
     EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeStringPatch>(
         boot_image_string_patches_, linker_patches);
-    EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::IntrinsicReferencePatch>>(
-        boot_image_intrinsic_patches_, linker_patches);
   } else {
-    EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::DataBimgRelRoPatch>>(
-        boot_image_method_patches_, linker_patches);
+    DCHECK(boot_image_method_patches_.empty());
     DCHECK(boot_image_type_patches_.empty());
     DCHECK(boot_image_string_patches_.empty());
-    DCHECK(boot_image_intrinsic_patches_.empty());
+  }
+  if (GetCompilerOptions().IsBootImage()) {
+    EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::IntrinsicReferencePatch>>(
+        boot_image_other_patches_, linker_patches);
+  } else {
+    EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::DataBimgRelRoPatch>>(
+        boot_image_other_patches_, linker_patches);
   }
   EmitPcRelativeLinkerPatches<linker::LinkerPatch::MethodBssEntryPatch>(
       method_bss_entry_patches_, linker_patches);
@@ -9013,6 +9229,11 @@
       type_bss_entry_patches_, linker_patches);
   EmitPcRelativeLinkerPatches<linker::LinkerPatch::StringBssEntryPatch>(
       string_bss_entry_patches_, linker_patches);
+  for (const PatchInfo<vixl32::Label>& info : call_entrypoint_patches_) {
+    DCHECK(info.target_dex_file == nullptr);
+    linker_patches->push_back(linker::LinkerPatch::CallEntrypointPatch(
+        info.label.GetLocation(), info.offset_or_index));
+  }
   for (const BakerReadBarrierPatchInfo& info : baker_read_barrier_patches_) {
     linker_patches->push_back(linker::LinkerPatch::BakerReadBarrierBranchPatch(
         info.label.GetLocation(), info.custom_data));
@@ -9021,7 +9242,8 @@
 }
 
 bool CodeGeneratorARMVIXL::NeedsThunkCode(const linker::LinkerPatch& patch) const {
-  return patch.GetType() == linker::LinkerPatch::Type::kBakerReadBarrierBranch ||
+  return patch.GetType() == linker::LinkerPatch::Type::kCallEntrypoint ||
+         patch.GetType() == linker::LinkerPatch::Type::kBakerReadBarrierBranch ||
          patch.GetType() == linker::LinkerPatch::Type::kCallRelative;
 }
 
@@ -9030,23 +9252,30 @@
                                          /*out*/ std::string* debug_name) {
   arm::ArmVIXLAssembler assembler(GetGraph()->GetAllocator());
   switch (patch.GetType()) {
-    case linker::LinkerPatch::Type::kCallRelative:
+    case linker::LinkerPatch::Type::kCallRelative: {
       // The thunk just uses the entry point in the ArtMethod. This works even for calls
       // to the generic JNI and interpreter trampolines.
-      assembler.LoadFromOffset(
-          arm::kLoadWord,
-          vixl32::pc,
-          vixl32::r0,
-          ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize).Int32Value());
+      MemberOffset offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArmPointerSize);
+      assembler.LoadFromOffset(arm::kLoadWord, vixl32::pc, vixl32::r0, offset.Int32Value());
       assembler.GetVIXLAssembler()->Bkpt(0);
       if (GetCompilerOptions().GenerateAnyDebugInfo()) {
         *debug_name = "MethodCallThunk";
       }
       break;
-    case linker::LinkerPatch::Type::kBakerReadBarrierBranch:
+    }
+    case linker::LinkerPatch::Type::kCallEntrypoint: {
+      assembler.LoadFromOffset(arm::kLoadWord, vixl32::pc, tr, patch.EntrypointOffset());
+      assembler.GetVIXLAssembler()->Bkpt(0);
+      if (GetCompilerOptions().GenerateAnyDebugInfo()) {
+        *debug_name = "EntrypointCallThunk_" + std::to_string(patch.EntrypointOffset());
+      }
+      break;
+    }
+    case linker::LinkerPatch::Type::kBakerReadBarrierBranch: {
       DCHECK_EQ(patch.GetBakerCustomValue2(), 0u);
       CompileBakerReadBarrierThunk(assembler, patch.GetBakerCustomValue1(), debug_name);
       break;
+    }
     default:
       LOG(FATAL) << "Unexpected patch type " << patch.GetType();
       UNREACHABLE();
diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h
index 5edca87..48fb082 100644
--- a/compiler/optimizing/code_generator_arm_vixl.h
+++ b/compiler/optimizing/code_generator_arm_vixl.h
@@ -76,8 +76,6 @@
 
 static const vixl::aarch32::Register kMethodRegister = vixl::aarch32::r0;
 
-static const vixl::aarch32::Register kCoreAlwaysSpillRegister = vixl::aarch32::r5;
-
 // Callee saves core registers r5, r6, r7, r8 (except when emitting Baker
 // read barriers, where it is used as Marking Register), r10, r11, and lr.
 static const vixl::aarch32::RegisterList kCoreCalleeSaves = vixl::aarch32::RegisterList::Union(
@@ -448,7 +446,9 @@
     return static_cast<size_t>(kArmPointerSize);
   }
 
-  size_t GetFloatingPointSpillSlotSize() const override { return vixl::aarch32::kRegSizeInBytes; }
+  size_t GetCalleePreservedFPWidth() const override {
+    return vixl::aarch32::kSRegSizeInBytes;
+  }
 
   HGraphVisitor* GetLocationBuilder() override { return &location_builder_; }
 
@@ -589,6 +589,9 @@
   PcRelativePatchInfo* NewStringBssEntryPatch(const DexFile& dex_file,
                                               dex::StringIndex string_index);
 
+  // Emit the BL instruction for entrypoint thunk call and record the associated patch for AOT.
+  void EmitEntrypointThunkCall(ThreadOffset32 entrypoint_offset);
+
   // Emit the BNE instruction for baker read barrier and record
   // the associated patch for AOT or slow path for JIT.
   void EmitBakerReadBarrierBne(uint32_t custom_data);
@@ -744,6 +747,18 @@
                                  vixl::aarch32::Register in,
                                  vixl::aarch32::Register temp = vixl32::Register());
 
+  void MaybeRecordImplicitNullCheck(HInstruction* instr) final {
+    // The function must be only be called within special scopes
+    // (EmissionCheckScope, ExactAssemblyScope) which prevent generation of
+    // veneer/literal pools by VIXL assembler.
+    CHECK_EQ(GetVIXLAssembler()->ArePoolsBlocked(), true)
+        << "The function must only be called within EmissionCheckScope or ExactAssemblyScope";
+    CodeGenerator::MaybeRecordImplicitNullCheck(instr);
+  }
+
+  void MaybeGenerateInlineCacheCheck(HInstruction* instruction, vixl32::Register klass);
+  void MaybeIncrementHotness(bool is_frame_entry);
+
  private:
   // Encoding of thunk type and data for link-time generated thunks for Baker read barriers.
 
@@ -869,10 +884,7 @@
 
   ArmVIXLAssembler assembler_;
 
-  // Deduplication map for 32-bit literals, used for non-patchable boot image addresses.
-  Uint32ToLiteralMap uint32_literals_;
-  // PC-relative method patch info for kBootImageLinkTimePcRelative/kBootImageRelRo.
-  // Also used for type/string patches for kBootImageRelRo (same linker patch as for methods).
+  // PC-relative method patch info for kBootImageLinkTimePcRelative.
   ArenaDeque<PcRelativePatchInfo> boot_image_method_patches_;
   // PC-relative method patch info for kBssEntry.
   ArenaDeque<PcRelativePatchInfo> method_bss_entry_patches_;
@@ -884,11 +896,16 @@
   ArenaDeque<PcRelativePatchInfo> boot_image_string_patches_;
   // PC-relative String patch info for kBssEntry.
   ArenaDeque<PcRelativePatchInfo> string_bss_entry_patches_;
-  // PC-relative patch info for IntrinsicObjects.
-  ArenaDeque<PcRelativePatchInfo> boot_image_intrinsic_patches_;
+  // PC-relative patch info for IntrinsicObjects for the boot image,
+  // and for method/type/string patches for kBootImageRelRo otherwise.
+  ArenaDeque<PcRelativePatchInfo> boot_image_other_patches_;
+  // Patch info for calls to entrypoint dispatch thunks. Used for slow paths.
+  ArenaDeque<PatchInfo<vixl::aarch32::Label>> call_entrypoint_patches_;
   // Baker read barrier patch info.
   ArenaDeque<BakerReadBarrierPatchInfo> baker_read_barrier_patches_;
 
+  // Deduplication map for 32-bit literals, used for JIT for boot image addresses.
+  Uint32ToLiteralMap uint32_literals_;
   // Patches for string literals in JIT compiled code.
   StringToLiteralMap jit_string_patches_;
   // Patches for class literals in JIT compiled code.
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
deleted file mode 100644
index 72334af..0000000
--- a/compiler/optimizing/code_generator_mips.cc
+++ /dev/null
@@ -1,10224 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "code_generator_mips.h"
-
-#include "arch/mips/asm_support_mips.h"
-#include "arch/mips/entrypoints_direct_mips.h"
-#include "arch/mips/instruction_set_features_mips.h"
-#include "art_method.h"
-#include "class_table.h"
-#include "code_generator_utils.h"
-#include "compiled_method.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "entrypoints/quick/quick_entrypoints_enum.h"
-#include "gc/accounting/card_table.h"
-#include "gc/space/image_space.h"
-#include "heap_poisoning.h"
-#include "intrinsics.h"
-#include "intrinsics_mips.h"
-#include "linker/linker_patch.h"
-#include "mirror/array-inl.h"
-#include "mirror/class-inl.h"
-#include "offsets.h"
-#include "stack_map_stream.h"
-#include "thread.h"
-#include "utils/assembler.h"
-#include "utils/mips/assembler_mips.h"
-#include "utils/stack_checks.h"
-
-namespace art {
-namespace mips {
-
-static constexpr int kCurrentMethodStackOffset = 0;
-static constexpr Register kMethodRegisterArgument = A0;
-
-// Flags controlling the use of thunks for Baker read barriers.
-constexpr bool kBakerReadBarrierThunksEnableForFields = true;
-constexpr bool kBakerReadBarrierThunksEnableForArrays = true;
-constexpr bool kBakerReadBarrierThunksEnableForGcRoots = true;
-
-Location MipsReturnLocation(DataType::Type return_type) {
-  switch (return_type) {
-    case DataType::Type::kReference:
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kUint32:
-    case DataType::Type::kInt32:
-      return Location::RegisterLocation(V0);
-
-    case DataType::Type::kUint64:
-    case DataType::Type::kInt64:
-      return Location::RegisterPairLocation(V0, V1);
-
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      return Location::FpuRegisterLocation(F0);
-
-    case DataType::Type::kVoid:
-      return Location();
-  }
-  UNREACHABLE();
-}
-
-Location InvokeDexCallingConventionVisitorMIPS::GetReturnLocation(DataType::Type type) const {
-  return MipsReturnLocation(type);
-}
-
-Location InvokeDexCallingConventionVisitorMIPS::GetMethodLocation() const {
-  return Location::RegisterLocation(kMethodRegisterArgument);
-}
-
-Location InvokeDexCallingConventionVisitorMIPS::GetNextLocation(DataType::Type type) {
-  Location next_location;
-
-  switch (type) {
-    case DataType::Type::kReference:
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32: {
-      uint32_t gp_index = gp_index_++;
-      if (gp_index < calling_convention.GetNumberOfRegisters()) {
-        next_location = Location::RegisterLocation(calling_convention.GetRegisterAt(gp_index));
-      } else {
-        size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
-        next_location = Location::StackSlot(stack_offset);
-      }
-      break;
-    }
-
-    case DataType::Type::kInt64: {
-      uint32_t gp_index = gp_index_;
-      gp_index_ += 2;
-      if (gp_index + 1 < calling_convention.GetNumberOfRegisters()) {
-        Register reg = calling_convention.GetRegisterAt(gp_index);
-        if (reg == A1 || reg == A3) {
-          gp_index_++;  // Skip A1(A3), and use A2_A3(T0_T1) instead.
-          gp_index++;
-        }
-        Register low_even = calling_convention.GetRegisterAt(gp_index);
-        Register high_odd = calling_convention.GetRegisterAt(gp_index + 1);
-        DCHECK_EQ(low_even + 1, high_odd);
-        next_location = Location::RegisterPairLocation(low_even, high_odd);
-      } else {
-        size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
-        next_location = Location::DoubleStackSlot(stack_offset);
-      }
-      break;
-    }
-
-    // Note: both float and double types are stored in even FPU registers. On 32 bit FPU, double
-    // will take up the even/odd pair, while floats are stored in even regs only.
-    // On 64 bit FPU, both double and float are stored in even registers only.
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64: {
-      uint32_t float_index = float_index_++;
-      if (float_index < calling_convention.GetNumberOfFpuRegisters()) {
-        next_location = Location::FpuRegisterLocation(
-            calling_convention.GetFpuRegisterAt(float_index));
-      } else {
-        size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
-        next_location = DataType::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
-                                                    : Location::StackSlot(stack_offset);
-      }
-      break;
-    }
-
-    case DataType::Type::kUint32:
-    case DataType::Type::kUint64:
-    case DataType::Type::kVoid:
-      LOG(FATAL) << "Unexpected parameter type " << type;
-      UNREACHABLE();
-  }
-
-  // Space on the stack is reserved for all arguments.
-  stack_index_ += DataType::Is64BitType(type) ? 2 : 1;
-
-  return next_location;
-}
-
-Location InvokeRuntimeCallingConvention::GetReturnLocation(DataType::Type type) {
-  return MipsReturnLocation(type);
-}
-
-static RegisterSet OneRegInReferenceOutSaveEverythingCallerSaves() {
-  InvokeRuntimeCallingConvention calling_convention;
-  RegisterSet caller_saves = RegisterSet::Empty();
-  caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  // The reference is returned in the same register. This differs from the standard return location.
-  return caller_saves;
-}
-
-// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
-#define __ down_cast<CodeGeneratorMIPS*>(codegen)->GetAssembler()->  // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, x).Int32Value()
-
-class BoundsCheckSlowPathMIPS : public SlowPathCodeMIPS {
- public:
-  explicit BoundsCheckSlowPathMIPS(HBoundsCheck* instruction) : SlowPathCodeMIPS(instruction) {}
-
-  void EmitNativeCode(CodeGenerator* codegen) override {
-    LocationSummary* locations = instruction_->GetLocations();
-    CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
-    __ Bind(GetEntryLabel());
-    if (instruction_->CanThrowIntoCatchBlock()) {
-      // Live registers will be restored in the catch block if caught.
-      SaveLiveRegisters(codegen, instruction_->GetLocations());
-    }
-    // We're moving two locations to locations that could overlap, so we need a parallel
-    // move resolver.
-    InvokeRuntimeCallingConvention calling_convention;
-    codegen->EmitParallelMoves(locations->InAt(0),
-                               Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
-                               DataType::Type::kInt32,
-                               locations->InAt(1),
-                               Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
-                               DataType::Type::kInt32);
-    QuickEntrypointEnum entrypoint = instruction_->AsBoundsCheck()->IsStringCharAt()
-        ? kQuickThrowStringBounds
-        : kQuickThrowArrayBounds;
-    mips_codegen->InvokeRuntime(entrypoint, instruction_, instruction_->GetDexPc(), this);
-    CheckEntrypointTypes<kQuickThrowStringBounds, void, int32_t, int32_t>();
-    CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
-  }
-
-  bool IsFatal() const override { return true; }
-
-  const char* GetDescription() const override { return "BoundsCheckSlowPathMIPS"; }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS);
-};
-
-class DivZeroCheckSlowPathMIPS : public SlowPathCodeMIPS {
- public:
-  explicit DivZeroCheckSlowPathMIPS(HDivZeroCheck* instruction) : SlowPathCodeMIPS(instruction) {}
-
-  void EmitNativeCode(CodeGenerator* codegen) override {
-    CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
-    __ Bind(GetEntryLabel());
-    mips_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
-    CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
-  }
-
-  bool IsFatal() const override { return true; }
-
-  const char* GetDescription() const override { return "DivZeroCheckSlowPathMIPS"; }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS);
-};
-
-class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
- public:
-  LoadClassSlowPathMIPS(HLoadClass* cls, HInstruction* at)
-      : SlowPathCodeMIPS(at), cls_(cls) {
-    DCHECK(at->IsLoadClass() || at->IsClinitCheck());
-    DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
-  }
-
-  void EmitNativeCode(CodeGenerator* codegen) override {
-    LocationSummary* locations = instruction_->GetLocations();
-    Location out = locations->Out();
-    const uint32_t dex_pc = instruction_->GetDexPc();
-    bool must_resolve_type = instruction_->IsLoadClass() && cls_->MustResolveTypeOnSlowPath();
-    bool must_do_clinit = instruction_->IsClinitCheck() || cls_->MustGenerateClinitCheck();
-
-    CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
-    __ Bind(GetEntryLabel());
-    SaveLiveRegisters(codegen, locations);
-
-    InvokeRuntimeCallingConvention calling_convention;
-    if (must_resolve_type) {
-      DCHECK(IsSameDexFile(cls_->GetDexFile(), mips_codegen->GetGraph()->GetDexFile()));
-      dex::TypeIndex type_index = cls_->GetTypeIndex();
-      __ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
-      mips_codegen->InvokeRuntime(kQuickResolveType, instruction_, dex_pc, this);
-      CheckEntrypointTypes<kQuickResolveType, void*, uint32_t>();
-      // If we also must_do_clinit, the resolved type is now in the correct register.
-    } else {
-      DCHECK(must_do_clinit);
-      Location source = instruction_->IsLoadClass() ? out : locations->InAt(0);
-      mips_codegen->MoveLocation(Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
-                                 source,
-                                 cls_->GetType());
-    }
-    if (must_do_clinit) {
-      mips_codegen->InvokeRuntime(kQuickInitializeStaticStorage, instruction_, dex_pc, this);
-      CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, mirror::Class*>();
-    }
-
-    // Move the class to the desired location.
-    if (out.IsValid()) {
-      DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
-      DataType::Type type = instruction_->GetType();
-      mips_codegen->MoveLocation(out,
-                                 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
-                                 type);
-    }
-    RestoreLiveRegisters(codegen, locations);
-
-    __ B(GetExitLabel());
-  }
-
-  const char* GetDescription() const override { return "LoadClassSlowPathMIPS"; }
-
- private:
-  // The class this slow path will load.
-  HLoadClass* const cls_;
-
-  DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS);
-};
-
-class LoadStringSlowPathMIPS : public SlowPathCodeMIPS {
- public:
-  explicit LoadStringSlowPathMIPS(HLoadString* instruction)
-      : SlowPathCodeMIPS(instruction) {}
-
-  void EmitNativeCode(CodeGenerator* codegen) override {
-    DCHECK(instruction_->IsLoadString());
-    DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
-    LocationSummary* locations = instruction_->GetLocations();
-    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
-    const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
-    CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
-    InvokeRuntimeCallingConvention calling_convention;
-    __ Bind(GetEntryLabel());
-    SaveLiveRegisters(codegen, locations);
-
-    __ LoadConst32(calling_convention.GetRegisterAt(0), string_index.index_);
-    mips_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
-    CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
-
-    DataType::Type type = instruction_->GetType();
-    mips_codegen->MoveLocation(locations->Out(),
-                               Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
-                               type);
-    RestoreLiveRegisters(codegen, locations);
-
-    __ B(GetExitLabel());
-  }
-
-  const char* GetDescription() const override { return "LoadStringSlowPathMIPS"; }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS);
-};
-
-class NullCheckSlowPathMIPS : public SlowPathCodeMIPS {
- public:
-  explicit NullCheckSlowPathMIPS(HNullCheck* instr) : SlowPathCodeMIPS(instr) {}
-
-  void EmitNativeCode(CodeGenerator* codegen) override {
-    CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
-    __ Bind(GetEntryLabel());
-    if (instruction_->CanThrowIntoCatchBlock()) {
-      // Live registers will be restored in the catch block if caught.
-      SaveLiveRegisters(codegen, instruction_->GetLocations());
-    }
-    mips_codegen->InvokeRuntime(kQuickThrowNullPointer,
-                                instruction_,
-                                instruction_->GetDexPc(),
-                                this);
-    CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
-  }
-
-  bool IsFatal() const override { return true; }
-
-  const char* GetDescription() const override { return "NullCheckSlowPathMIPS"; }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS);
-};
-
-class SuspendCheckSlowPathMIPS : public SlowPathCodeMIPS {
- public:
-  SuspendCheckSlowPathMIPS(HSuspendCheck* instruction, HBasicBlock* successor)
-      : SlowPathCodeMIPS(instruction), successor_(successor) {}
-
-  void EmitNativeCode(CodeGenerator* codegen) override {
-    LocationSummary* locations = instruction_->GetLocations();
-    CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
-    __ Bind(GetEntryLabel());
-    SaveLiveRegisters(codegen, locations);     // Only saves live vector registers for SIMD.
-    mips_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this);
-    CheckEntrypointTypes<kQuickTestSuspend, void, void>();
-    RestoreLiveRegisters(codegen, locations);  // Only restores live vector registers for SIMD.
-    if (successor_ == nullptr) {
-      __ B(GetReturnLabel());
-    } else {
-      __ B(mips_codegen->GetLabelOf(successor_));
-    }
-  }
-
-  MipsLabel* GetReturnLabel() {
-    DCHECK(successor_ == nullptr);
-    return &return_label_;
-  }
-
-  const char* GetDescription() const override { return "SuspendCheckSlowPathMIPS"; }
-
-  HBasicBlock* GetSuccessor() const {
-    return successor_;
-  }
-
- private:
-  // If not null, the block to branch to after the suspend check.
-  HBasicBlock* const successor_;
-
-  // If `successor_` is null, the label to branch to after the suspend check.
-  MipsLabel return_label_;
-
-  DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathMIPS);
-};
-
-class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
- public:
-  explicit TypeCheckSlowPathMIPS(HInstruction* instruction, bool is_fatal)
-      : SlowPathCodeMIPS(instruction), is_fatal_(is_fatal) {}
-
-  void EmitNativeCode(CodeGenerator* codegen) override {
-    LocationSummary* locations = instruction_->GetLocations();
-    uint32_t dex_pc = instruction_->GetDexPc();
-    DCHECK(instruction_->IsCheckCast()
-           || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
-    CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
-
-    __ Bind(GetEntryLabel());
-    if (!is_fatal_ || instruction_->CanThrowIntoCatchBlock()) {
-      SaveLiveRegisters(codegen, locations);
-    }
-
-    // We're moving two locations to locations that could overlap, so we need a parallel
-    // move resolver.
-    InvokeRuntimeCallingConvention calling_convention;
-    codegen->EmitParallelMoves(locations->InAt(0),
-                               Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
-                               DataType::Type::kReference,
-                               locations->InAt(1),
-                               Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
-                               DataType::Type::kReference);
-    if (instruction_->IsInstanceOf()) {
-      mips_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this);
-      CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>();
-      DataType::Type ret_type = instruction_->GetType();
-      Location ret_loc = calling_convention.GetReturnLocation(ret_type);
-      mips_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
-    } else {
-      DCHECK(instruction_->IsCheckCast());
-      mips_codegen->InvokeRuntime(kQuickCheckInstanceOf, instruction_, dex_pc, this);
-      CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
-    }
-
-    if (!is_fatal_) {
-      RestoreLiveRegisters(codegen, locations);
-      __ B(GetExitLabel());
-    }
-  }
-
-  const char* GetDescription() const override { return "TypeCheckSlowPathMIPS"; }
-
-  bool IsFatal() const override { return is_fatal_; }
-
- private:
-  const bool is_fatal_;
-
-  DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS);
-};
-
-class DeoptimizationSlowPathMIPS : public SlowPathCodeMIPS {
- public:
-  explicit DeoptimizationSlowPathMIPS(HDeoptimize* instruction)
-    : SlowPathCodeMIPS(instruction) {}
-
-  void EmitNativeCode(CodeGenerator* codegen) override {
-    CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
-    __ Bind(GetEntryLabel());
-    LocationSummary* locations = instruction_->GetLocations();
-    SaveLiveRegisters(codegen, locations);
-    InvokeRuntimeCallingConvention calling_convention;
-    __ LoadConst32(calling_convention.GetRegisterAt(0),
-                   static_cast<uint32_t>(instruction_->AsDeoptimize()->GetDeoptimizationKind()));
-    mips_codegen->InvokeRuntime(kQuickDeoptimize, instruction_, instruction_->GetDexPc(), this);
-    CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
-  }
-
-  const char* GetDescription() const override { return "DeoptimizationSlowPathMIPS"; }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS);
-};
-
-class ArraySetSlowPathMIPS : public SlowPathCodeMIPS {
- public:
-  explicit ArraySetSlowPathMIPS(HInstruction* instruction) : SlowPathCodeMIPS(instruction) {}
-
-  void EmitNativeCode(CodeGenerator* codegen) override {
-    LocationSummary* locations = instruction_->GetLocations();
-    __ Bind(GetEntryLabel());
-    SaveLiveRegisters(codegen, locations);
-
-    InvokeRuntimeCallingConvention calling_convention;
-    HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
-    parallel_move.AddMove(
-        locations->InAt(0),
-        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
-        DataType::Type::kReference,
-        nullptr);
-    parallel_move.AddMove(
-        locations->InAt(1),
-        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
-        DataType::Type::kInt32,
-        nullptr);
-    parallel_move.AddMove(
-        locations->InAt(2),
-        Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
-        DataType::Type::kReference,
-        nullptr);
-    codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
-
-    CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
-    mips_codegen->InvokeRuntime(kQuickAputObject, instruction_, instruction_->GetDexPc(), this);
-    CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
-    RestoreLiveRegisters(codegen, locations);
-    __ B(GetExitLabel());
-  }
-
-  const char* GetDescription() const override { return "ArraySetSlowPathMIPS"; }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathMIPS);
-};
-
-// Slow path marking an object reference `ref` during a read
-// barrier. The field `obj.field` in the object `obj` holding this
-// reference does not get updated by this slow path after marking (see
-// ReadBarrierMarkAndUpdateFieldSlowPathMIPS below for that).
-//
-// This means that after the execution of this slow path, `ref` will
-// always be up-to-date, but `obj.field` may not; i.e., after the
-// flip, `ref` will be a to-space reference, but `obj.field` will
-// probably still be a from-space reference (unless it gets updated by
-// another thread, or if another thread installed another object
-// reference (different from `ref`) in `obj.field`).
-//
-// If `entrypoint` is a valid location it is assumed to already be
-// holding the entrypoint. The case where the entrypoint is passed in
-// is for the GcRoot read barrier.
-class ReadBarrierMarkSlowPathMIPS : public SlowPathCodeMIPS {
- public:
-  ReadBarrierMarkSlowPathMIPS(HInstruction* instruction,
-                              Location ref,
-                              Location entrypoint = Location::NoLocation())
-      : SlowPathCodeMIPS(instruction), ref_(ref), entrypoint_(entrypoint) {
-    DCHECK(kEmitCompilerReadBarrier);
-  }
-
-  const char* GetDescription() const override { return "ReadBarrierMarkSlowPathMIPS"; }
-
-  void EmitNativeCode(CodeGenerator* codegen) override {
-    LocationSummary* locations = instruction_->GetLocations();
-    Register ref_reg = ref_.AsRegister<Register>();
-    DCHECK(locations->CanCall());
-    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
-    DCHECK(instruction_->IsInstanceFieldGet() ||
-           instruction_->IsStaticFieldGet() ||
-           instruction_->IsArrayGet() ||
-           instruction_->IsArraySet() ||
-           instruction_->IsLoadClass() ||
-           instruction_->IsLoadString() ||
-           instruction_->IsInstanceOf() ||
-           instruction_->IsCheckCast() ||
-           (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()) ||
-           (instruction_->IsInvokeStaticOrDirect() && instruction_->GetLocations()->Intrinsified()))
-        << "Unexpected instruction in read barrier marking slow path: "
-        << instruction_->DebugName();
-
-    __ Bind(GetEntryLabel());
-    // No need to save live registers; it's taken care of by the
-    // entrypoint. Also, there is no need to update the stack mask,
-    // as this runtime call will not trigger a garbage collection.
-    CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
-    DCHECK((V0 <= ref_reg && ref_reg <= T7) ||
-           (S2 <= ref_reg && ref_reg <= S7) ||
-           (ref_reg == FP)) << ref_reg;
-    // "Compact" slow path, saving two moves.
-    //
-    // Instead of using the standard runtime calling convention (input
-    // and output in A0 and V0 respectively):
-    //
-    //   A0 <- ref
-    //   V0 <- ReadBarrierMark(A0)
-    //   ref <- V0
-    //
-    // we just use rX (the register containing `ref`) as input and output
-    // of a dedicated entrypoint:
-    //
-    //   rX <- ReadBarrierMarkRegX(rX)
-    //
-    if (entrypoint_.IsValid()) {
-      mips_codegen->ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction_, this);
-      DCHECK_EQ(entrypoint_.AsRegister<Register>(), T9);
-      __ Jalr(entrypoint_.AsRegister<Register>());
-      __ NopIfNoReordering();
-    } else {
-      int32_t entry_point_offset =
-          Thread::ReadBarrierMarkEntryPointsOffset<kMipsPointerSize>(ref_reg - 1);
-      // This runtime call does not require a stack map.
-      mips_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset,
-                                                        instruction_,
-                                                        this,
-                                                        /* direct= */ false);
-    }
-    __ B(GetExitLabel());
-  }
-
- private:
-  // The location (register) of the marked object reference.
-  const Location ref_;
-
-  // The location of the entrypoint if already loaded.
-  const Location entrypoint_;
-
-  DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathMIPS);
-};
-
-// Slow path marking an object reference `ref` during a read barrier,
-// and if needed, atomically updating the field `obj.field` in the
-// object `obj` holding this reference after marking (contrary to
-// ReadBarrierMarkSlowPathMIPS above, which never tries to update
-// `obj.field`).
-//
-// This means that after the execution of this slow path, both `ref`
-// and `obj.field` will be up-to-date; i.e., after the flip, both will
-// hold the same to-space reference (unless another thread installed
-// another object reference (different from `ref`) in `obj.field`).
-class ReadBarrierMarkAndUpdateFieldSlowPathMIPS : public SlowPathCodeMIPS {
- public:
-  ReadBarrierMarkAndUpdateFieldSlowPathMIPS(HInstruction* instruction,
-                                            Location ref,
-                                            Register obj,
-                                            Location field_offset,
-                                            Register temp1)
-      : SlowPathCodeMIPS(instruction),
-        ref_(ref),
-        obj_(obj),
-        field_offset_(field_offset),
-        temp1_(temp1) {
-    DCHECK(kEmitCompilerReadBarrier);
-  }
-
-  const char* GetDescription() const override {
-    return "ReadBarrierMarkAndUpdateFieldSlowPathMIPS";
-  }
-
-  void EmitNativeCode(CodeGenerator* codegen) override {
-    LocationSummary* locations = instruction_->GetLocations();
-    Register ref_reg = ref_.AsRegister<Register>();
-    DCHECK(locations->CanCall());
-    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
-    // This slow path is only used by the UnsafeCASObject intrinsic.
-    DCHECK((instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
-        << "Unexpected instruction in read barrier marking and field updating slow path: "
-        << instruction_->DebugName();
-    DCHECK(instruction_->GetLocations()->Intrinsified());
-    DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kUnsafeCASObject);
-    DCHECK(field_offset_.IsRegisterPair()) << field_offset_;
-
-    __ Bind(GetEntryLabel());
-
-    // Save the old reference.
-    // Note that we cannot use AT or TMP to save the old reference, as those
-    // are used by the code that follows, but we need the old reference after
-    // the call to the ReadBarrierMarkRegX entry point.
-    DCHECK_NE(temp1_, AT);
-    DCHECK_NE(temp1_, TMP);
-    __ Move(temp1_, ref_reg);
-
-    // No need to save live registers; it's taken care of by the
-    // entrypoint. Also, there is no need to update the stack mask,
-    // as this runtime call will not trigger a garbage collection.
-    CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
-    DCHECK((V0 <= ref_reg && ref_reg <= T7) ||
-           (S2 <= ref_reg && ref_reg <= S7) ||
-           (ref_reg == FP)) << ref_reg;
-    // "Compact" slow path, saving two moves.
-    //
-    // Instead of using the standard runtime calling convention (input
-    // and output in A0 and V0 respectively):
-    //
-    //   A0 <- ref
-    //   V0 <- ReadBarrierMark(A0)
-    //   ref <- V0
-    //
-    // we just use rX (the register containing `ref`) as input and output
-    // of a dedicated entrypoint:
-    //
-    //   rX <- ReadBarrierMarkRegX(rX)
-    //
-    int32_t entry_point_offset =
-        Thread::ReadBarrierMarkEntryPointsOffset<kMipsPointerSize>(ref_reg - 1);
-    // This runtime call does not require a stack map.
-    mips_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset,
-                                                      instruction_,
-                                                      this,
-                                                      /* direct= */ false);
-
-    // If the new reference is different from the old reference,
-    // update the field in the holder (`*(obj_ + field_offset_)`).
-    //
-    // Note that this field could also hold a different object, if
-    // another thread had concurrently changed it. In that case, the
-    // the compare-and-set (CAS) loop below would abort, leaving the
-    // field as-is.
-    MipsLabel done;
-    __ Beq(temp1_, ref_reg, &done);
-
-    // Update the the holder's field atomically.  This may fail if
-    // mutator updates before us, but it's OK.  This is achieved
-    // using a strong compare-and-set (CAS) operation with relaxed
-    // memory synchronization ordering, where the expected value is
-    // the old reference and the desired value is the new reference.
-
-    // Convenience aliases.
-    Register base = obj_;
-    // The UnsafeCASObject intrinsic uses a register pair as field
-    // offset ("long offset"), of which only the low part contains
-    // data.
-    Register offset = field_offset_.AsRegisterPairLow<Register>();
-    Register expected = temp1_;
-    Register value = ref_reg;
-    Register tmp_ptr = TMP;      // Pointer to actual memory.
-    Register tmp = AT;           // Value in memory.
-
-    __ Addu(tmp_ptr, base, offset);
-
-    if (kPoisonHeapReferences) {
-      __ PoisonHeapReference(expected);
-      // Do not poison `value` if it is the same register as
-      // `expected`, which has just been poisoned.
-      if (value != expected) {
-        __ PoisonHeapReference(value);
-      }
-    }
-
-    // do {
-    //   tmp = [r_ptr] - expected;
-    // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
-
-    bool is_r6 = mips_codegen->GetInstructionSetFeatures().IsR6();
-    MipsLabel loop_head, exit_loop;
-    __ Bind(&loop_head);
-    if (is_r6) {
-      __ LlR6(tmp, tmp_ptr);
-    } else {
-      __ LlR2(tmp, tmp_ptr);
-    }
-    __ Bne(tmp, expected, &exit_loop);
-    __ Move(tmp, value);
-    if (is_r6) {
-      __ ScR6(tmp, tmp_ptr);
-    } else {
-      __ ScR2(tmp, tmp_ptr);
-    }
-    __ Beqz(tmp, &loop_head);
-    __ Bind(&exit_loop);
-
-    if (kPoisonHeapReferences) {
-      __ UnpoisonHeapReference(expected);
-      // Do not unpoison `value` if it is the same register as
-      // `expected`, which has just been unpoisoned.
-      if (value != expected) {
-        __ UnpoisonHeapReference(value);
-      }
-    }
-
-    __ Bind(&done);
-    __ B(GetExitLabel());
-  }
-
- private:
-  // The location (register) of the marked object reference.
-  const Location ref_;
-  // The register containing the object holding the marked object reference field.
-  const Register obj_;
-  // The location of the offset of the marked reference field within `obj_`.
-  Location field_offset_;
-
-  const Register temp1_;
-
-  DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkAndUpdateFieldSlowPathMIPS);
-};
-
-// Slow path generating a read barrier for a heap reference.
-class ReadBarrierForHeapReferenceSlowPathMIPS : public SlowPathCodeMIPS {
- public:
-  ReadBarrierForHeapReferenceSlowPathMIPS(HInstruction* instruction,
-                                          Location out,
-                                          Location ref,
-                                          Location obj,
-                                          uint32_t offset,
-                                          Location index)
-      : SlowPathCodeMIPS(instruction),
-        out_(out),
-        ref_(ref),
-        obj_(obj),
-        offset_(offset),
-        index_(index) {
-    DCHECK(kEmitCompilerReadBarrier);
-    // If `obj` is equal to `out` or `ref`, it means the initial object
-    // has been overwritten by (or after) the heap object reference load
-    // to be instrumented, e.g.:
-    //
-    //   __ LoadFromOffset(kLoadWord, out, out, offset);
-    //   codegen_->GenerateReadBarrierSlow(instruction, out_loc, out_loc, out_loc, offset);
-    //
-    // In that case, we have lost the information about the original
-    // object, and the emitted read barrier cannot work properly.
-    DCHECK(!obj.Equals(out)) << "obj=" << obj << " out=" << out;
-    DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
-  }
-
-  void EmitNativeCode(CodeGenerator* codegen) override {
-    CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
-    LocationSummary* locations = instruction_->GetLocations();
-    Register reg_out = out_.AsRegister<Register>();
-    DCHECK(locations->CanCall());
-    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
-    DCHECK(instruction_->IsInstanceFieldGet() ||
-           instruction_->IsStaticFieldGet() ||
-           instruction_->IsArrayGet() ||
-           instruction_->IsInstanceOf() ||
-           instruction_->IsCheckCast() ||
-           (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
-        << "Unexpected instruction in read barrier for heap reference slow path: "
-        << instruction_->DebugName();
-
-    __ Bind(GetEntryLabel());
-    SaveLiveRegisters(codegen, locations);
-
-    // We may have to change the index's value, but as `index_` is a
-    // constant member (like other "inputs" of this slow path),
-    // introduce a copy of it, `index`.
-    Location index = index_;
-    if (index_.IsValid()) {
-      // Handle `index_` for HArrayGet and UnsafeGetObject/UnsafeGetObjectVolatile intrinsics.
-      if (instruction_->IsArrayGet()) {
-        // Compute the actual memory offset and store it in `index`.
-        Register index_reg = index_.AsRegister<Register>();
-        DCHECK(locations->GetLiveRegisters()->ContainsCoreRegister(index_reg));
-        if (codegen->IsCoreCalleeSaveRegister(index_reg)) {
-          // We are about to change the value of `index_reg` (see the
-          // calls to art::mips::MipsAssembler::Sll and
-          // art::mips::MipsAssembler::Addiu32 below), but it has
-          // not been saved by the previous call to
-          // art::SlowPathCode::SaveLiveRegisters, as it is a
-          // callee-save register --
-          // art::SlowPathCode::SaveLiveRegisters does not consider
-          // callee-save registers, as it has been designed with the
-          // assumption that callee-save registers are supposed to be
-          // handled by the called function.  So, as a callee-save
-          // register, `index_reg` _would_ eventually be saved onto
-          // the stack, but it would be too late: we would have
-          // changed its value earlier.  Therefore, we manually save
-          // it here into another freely available register,
-          // `free_reg`, chosen of course among the caller-save
-          // registers (as a callee-save `free_reg` register would
-          // exhibit the same problem).
-          //
-          // Note we could have requested a temporary register from
-          // the register allocator instead; but we prefer not to, as
-          // this is a slow path, and we know we can find a
-          // caller-save register that is available.
-          Register free_reg = FindAvailableCallerSaveRegister(codegen);
-          __ Move(free_reg, index_reg);
-          index_reg = free_reg;
-          index = Location::RegisterLocation(index_reg);
-        } else {
-          // The initial register stored in `index_` has already been
-          // saved in the call to art::SlowPathCode::SaveLiveRegisters
-          // (as it is not a callee-save register), so we can freely
-          // use it.
-        }
-        // Shifting the index value contained in `index_reg` by the scale
-        // factor (2) cannot overflow in practice, as the runtime is
-        // unable to allocate object arrays with a size larger than
-        // 2^26 - 1 (that is, 2^28 - 4 bytes).
-        __ Sll(index_reg, index_reg, TIMES_4);
-        static_assert(
-            sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
-            "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
-        __ Addiu32(index_reg, index_reg, offset_);
-      } else {
-        // In the case of the UnsafeGetObject/UnsafeGetObjectVolatile
-        // intrinsics, `index_` is not shifted by a scale factor of 2
-        // (as in the case of ArrayGet), as it is actually an offset
-        // to an object field within an object.
-        DCHECK(instruction_->IsInvoke()) << instruction_->DebugName();
-        DCHECK(instruction_->GetLocations()->Intrinsified());
-        DCHECK((instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObject) ||
-               (instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile))
-            << instruction_->AsInvoke()->GetIntrinsic();
-        DCHECK_EQ(offset_, 0U);
-        DCHECK(index_.IsRegisterPair());
-        // UnsafeGet's offset location is a register pair, the low
-        // part contains the correct offset.
-        index = index_.ToLow();
-      }
-    }
-
-    // We're moving two or three locations to locations that could
-    // overlap, so we need a parallel move resolver.
-    InvokeRuntimeCallingConvention calling_convention;
-    HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
-    parallel_move.AddMove(ref_,
-                          Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
-                          DataType::Type::kReference,
-                          nullptr);
-    parallel_move.AddMove(obj_,
-                          Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
-                          DataType::Type::kReference,
-                          nullptr);
-    if (index.IsValid()) {
-      parallel_move.AddMove(index,
-                            Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
-                            DataType::Type::kInt32,
-                            nullptr);
-      codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
-    } else {
-      codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
-      __ LoadConst32(calling_convention.GetRegisterAt(2), offset_);
-    }
-    mips_codegen->InvokeRuntime(kQuickReadBarrierSlow,
-                                instruction_,
-                                instruction_->GetDexPc(),
-                                this);
-    CheckEntrypointTypes<
-        kQuickReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t>();
-    mips_codegen->MoveLocation(out_,
-                               calling_convention.GetReturnLocation(DataType::Type::kReference),
-                               DataType::Type::kReference);
-
-    RestoreLiveRegisters(codegen, locations);
-    __ B(GetExitLabel());
-  }
-
-  const char* GetDescription() const override { return "ReadBarrierForHeapReferenceSlowPathMIPS"; }
-
- private:
-  Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) {
-    size_t ref = static_cast<int>(ref_.AsRegister<Register>());
-    size_t obj = static_cast<int>(obj_.AsRegister<Register>());
-    for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
-      if (i != ref &&
-          i != obj &&
-          !codegen->IsCoreCalleeSaveRegister(i) &&
-          !codegen->IsBlockedCoreRegister(i)) {
-        return static_cast<Register>(i);
-      }
-    }
-    // We shall never fail to find a free caller-save register, as
-    // there are more than two core caller-save registers on MIPS
-    // (meaning it is possible to find one which is different from
-    // `ref` and `obj`).
-    DCHECK_GT(codegen->GetNumberOfCoreCallerSaveRegisters(), 2u);
-    LOG(FATAL) << "Could not find a free caller-save register";
-    UNREACHABLE();
-  }
-
-  const Location out_;
-  const Location ref_;
-  const Location obj_;
-  const uint32_t offset_;
-  // An additional location containing an index to an array.
-  // Only used for HArrayGet and the UnsafeGetObject &
-  // UnsafeGetObjectVolatile intrinsics.
-  const Location index_;
-
-  DISALLOW_COPY_AND_ASSIGN(ReadBarrierForHeapReferenceSlowPathMIPS);
-};
-
-// Slow path generating a read barrier for a GC root.
-class ReadBarrierForRootSlowPathMIPS : public SlowPathCodeMIPS {
- public:
-  ReadBarrierForRootSlowPathMIPS(HInstruction* instruction, Location out, Location root)
-      : SlowPathCodeMIPS(instruction), out_(out), root_(root) {
-    DCHECK(kEmitCompilerReadBarrier);
-  }
-
-  void EmitNativeCode(CodeGenerator* codegen) override {
-    LocationSummary* locations = instruction_->GetLocations();
-    Register reg_out = out_.AsRegister<Register>();
-    DCHECK(locations->CanCall());
-    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
-    DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString())
-        << "Unexpected instruction in read barrier for GC root slow path: "
-        << instruction_->DebugName();
-
-    __ Bind(GetEntryLabel());
-    SaveLiveRegisters(codegen, locations);
-
-    InvokeRuntimeCallingConvention calling_convention;
-    CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
-    mips_codegen->MoveLocation(Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
-                               root_,
-                               DataType::Type::kReference);
-    mips_codegen->InvokeRuntime(kQuickReadBarrierForRootSlow,
-                                instruction_,
-                                instruction_->GetDexPc(),
-                                this);
-    CheckEntrypointTypes<kQuickReadBarrierForRootSlow, mirror::Object*, GcRoot<mirror::Object>*>();
-    mips_codegen->MoveLocation(out_,
-                               calling_convention.GetReturnLocation(DataType::Type::kReference),
-                               DataType::Type::kReference);
-
-    RestoreLiveRegisters(codegen, locations);
-    __ B(GetExitLabel());
-  }
-
-  const char* GetDescription() const override { return "ReadBarrierForRootSlowPathMIPS"; }
-
- private:
-  const Location out_;
-  const Location root_;
-
-  DISALLOW_COPY_AND_ASSIGN(ReadBarrierForRootSlowPathMIPS);
-};
-
-CodeGeneratorMIPS::CodeGeneratorMIPS(HGraph* graph,
-                                     const CompilerOptions& compiler_options,
-                                     OptimizingCompilerStats* stats)
-    : CodeGenerator(graph,
-                    kNumberOfCoreRegisters,
-                    kNumberOfFRegisters,
-                    kNumberOfRegisterPairs,
-                    ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
-                                        arraysize(kCoreCalleeSaves)),
-                    ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
-                                        arraysize(kFpuCalleeSaves)),
-                    compiler_options,
-                    stats),
-      block_labels_(nullptr),
-      location_builder_(graph, this),
-      instruction_visitor_(graph, this),
-      move_resolver_(graph->GetAllocator(), this),
-      assembler_(graph->GetAllocator(),
-                 compiler_options.GetInstructionSetFeatures()->AsMipsInstructionSetFeatures()),
-      uint32_literals_(std::less<uint32_t>(),
-                       graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-      boot_image_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-      method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-      boot_image_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-      type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-      boot_image_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-      string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-      boot_image_intrinsic_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-      jit_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-      jit_class_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-      clobbered_ra_(false) {
-  // Save RA (containing the return address) to mimic Quick.
-  AddAllocatedRegister(Location::RegisterLocation(RA));
-}
-
-#undef __
-// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
-#define __ down_cast<MipsAssembler*>(GetAssembler())->  // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, x).Int32Value()
-
-void CodeGeneratorMIPS::Finalize(CodeAllocator* allocator) {
-  // Ensure that we fix up branches.
-  __ FinalizeCode();
-
-  // Adjust native pc offsets in stack maps.
-  StackMapStream* stack_map_stream = GetStackMapStream();
-  for (size_t i = 0, num = stack_map_stream->GetNumberOfStackMaps(); i != num; ++i) {
-    uint32_t old_position = stack_map_stream->GetStackMapNativePcOffset(i);
-    uint32_t new_position = __ GetAdjustedPosition(old_position);
-    DCHECK_GE(new_position, old_position);
-    stack_map_stream->SetStackMapNativePcOffset(i, new_position);
-  }
-
-  // Adjust pc offsets for the disassembly information.
-  if (disasm_info_ != nullptr) {
-    GeneratedCodeInterval* frame_entry_interval = disasm_info_->GetFrameEntryInterval();
-    frame_entry_interval->start = __ GetAdjustedPosition(frame_entry_interval->start);
-    frame_entry_interval->end = __ GetAdjustedPosition(frame_entry_interval->end);
-    for (auto& it : *disasm_info_->GetInstructionIntervals()) {
-      it.second.start = __ GetAdjustedPosition(it.second.start);
-      it.second.end = __ GetAdjustedPosition(it.second.end);
-    }
-    for (auto& it : *disasm_info_->GetSlowPathIntervals()) {
-      it.code_interval.start = __ GetAdjustedPosition(it.code_interval.start);
-      it.code_interval.end = __ GetAdjustedPosition(it.code_interval.end);
-    }
-  }
-
-  CodeGenerator::Finalize(allocator);
-}
-
-MipsAssembler* ParallelMoveResolverMIPS::GetAssembler() const {
-  return codegen_->GetAssembler();
-}
-
-void ParallelMoveResolverMIPS::EmitMove(size_t index) {
-  DCHECK_LT(index, moves_.size());
-  MoveOperands* move = moves_[index];
-  codegen_->MoveLocation(move->GetDestination(), move->GetSource(), move->GetType());
-}
-
-void ParallelMoveResolverMIPS::EmitSwap(size_t index) {
-  DCHECK_LT(index, moves_.size());
-  MoveOperands* move = moves_[index];
-  DataType::Type type = move->GetType();
-  Location loc1 = move->GetDestination();
-  Location loc2 = move->GetSource();
-
-  DCHECK(!loc1.IsConstant());
-  DCHECK(!loc2.IsConstant());
-
-  if (loc1.Equals(loc2)) {
-    return;
-  }
-
-  if (loc1.IsRegister() && loc2.IsRegister()) {
-    // Swap 2 GPRs.
-    Register r1 = loc1.AsRegister<Register>();
-    Register r2 = loc2.AsRegister<Register>();
-    __ Move(TMP, r2);
-    __ Move(r2, r1);
-    __ Move(r1, TMP);
-  } else if (loc1.IsFpuRegister() && loc2.IsFpuRegister()) {
-    if (codegen_->GetGraph()->HasSIMD()) {
-      __ MoveV(static_cast<VectorRegister>(FTMP), VectorRegisterFrom(loc1));
-      __ MoveV(VectorRegisterFrom(loc1), VectorRegisterFrom(loc2));
-      __ MoveV(VectorRegisterFrom(loc2), static_cast<VectorRegister>(FTMP));
-    } else {
-      FRegister f1 = loc1.AsFpuRegister<FRegister>();
-      FRegister f2 = loc2.AsFpuRegister<FRegister>();
-      if (type == DataType::Type::kFloat32) {
-        __ MovS(FTMP, f2);
-        __ MovS(f2, f1);
-        __ MovS(f1, FTMP);
-      } else {
-        DCHECK_EQ(type, DataType::Type::kFloat64);
-        __ MovD(FTMP, f2);
-        __ MovD(f2, f1);
-        __ MovD(f1, FTMP);
-      }
-    }
-  } else if ((loc1.IsRegister() && loc2.IsFpuRegister()) ||
-             (loc1.IsFpuRegister() && loc2.IsRegister())) {
-    // Swap FPR and GPR.
-    DCHECK_EQ(type, DataType::Type::kFloat32);  // Can only swap a float.
-    FRegister f1 = loc1.IsFpuRegister() ? loc1.AsFpuRegister<FRegister>()
-                                        : loc2.AsFpuRegister<FRegister>();
-    Register r2 = loc1.IsRegister() ? loc1.AsRegister<Register>() : loc2.AsRegister<Register>();
-    __ Move(TMP, r2);
-    __ Mfc1(r2, f1);
-    __ Mtc1(TMP, f1);
-  } else if (loc1.IsRegisterPair() && loc2.IsRegisterPair()) {
-    // Swap 2 GPR register pairs.
-    Register r1 = loc1.AsRegisterPairLow<Register>();
-    Register r2 = loc2.AsRegisterPairLow<Register>();
-    __ Move(TMP, r2);
-    __ Move(r2, r1);
-    __ Move(r1, TMP);
-    r1 = loc1.AsRegisterPairHigh<Register>();
-    r2 = loc2.AsRegisterPairHigh<Register>();
-    __ Move(TMP, r2);
-    __ Move(r2, r1);
-    __ Move(r1, TMP);
-  } else if ((loc1.IsRegisterPair() && loc2.IsFpuRegister()) ||
-             (loc1.IsFpuRegister() && loc2.IsRegisterPair())) {
-    // Swap FPR and GPR register pair.
-    DCHECK_EQ(type, DataType::Type::kFloat64);
-    FRegister f1 = loc1.IsFpuRegister() ? loc1.AsFpuRegister<FRegister>()
-                                        : loc2.AsFpuRegister<FRegister>();
-    Register r2_l = loc1.IsRegisterPair() ? loc1.AsRegisterPairLow<Register>()
-                                          : loc2.AsRegisterPairLow<Register>();
-    Register r2_h = loc1.IsRegisterPair() ? loc1.AsRegisterPairHigh<Register>()
-                                          : loc2.AsRegisterPairHigh<Register>();
-    // Use 2 temporary registers because we can't first swap the low 32 bits of an FPR and
-    // then swap the high 32 bits of the same FPR. mtc1 makes the high 32 bits of an FPR
-    // unpredictable and the following mfch1 will fail.
-    __ Mfc1(TMP, f1);
-    __ MoveFromFpuHigh(AT, f1);
-    __ Mtc1(r2_l, f1);
-    __ MoveToFpuHigh(r2_h, f1);
-    __ Move(r2_l, TMP);
-    __ Move(r2_h, AT);
-  } else if (loc1.IsStackSlot() && loc2.IsStackSlot()) {
-    Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot= */ false);
-  } else if (loc1.IsDoubleStackSlot() && loc2.IsDoubleStackSlot()) {
-    Exchange(loc1.GetStackIndex(), loc2.GetStackIndex(), /* double_slot= */ true);
-  } else if (loc1.IsSIMDStackSlot() && loc2.IsSIMDStackSlot()) {
-    ExchangeQuadSlots(loc1.GetStackIndex(), loc2.GetStackIndex());
-  } else if ((loc1.IsRegister() && loc2.IsStackSlot()) ||
-             (loc1.IsStackSlot() && loc2.IsRegister())) {
-    Register reg = loc1.IsRegister() ? loc1.AsRegister<Register>() : loc2.AsRegister<Register>();
-    intptr_t offset = loc1.IsStackSlot() ? loc1.GetStackIndex() : loc2.GetStackIndex();
-    __ Move(TMP, reg);
-    __ LoadFromOffset(kLoadWord, reg, SP, offset);
-    __ StoreToOffset(kStoreWord, TMP, SP, offset);
-  } else if ((loc1.IsRegisterPair() && loc2.IsDoubleStackSlot()) ||
-             (loc1.IsDoubleStackSlot() && loc2.IsRegisterPair())) {
-    Register reg_l = loc1.IsRegisterPair() ? loc1.AsRegisterPairLow<Register>()
-                                           : loc2.AsRegisterPairLow<Register>();
-    Register reg_h = loc1.IsRegisterPair() ? loc1.AsRegisterPairHigh<Register>()
-                                           : loc2.AsRegisterPairHigh<Register>();
-    intptr_t offset_l = loc1.IsDoubleStackSlot() ? loc1.GetStackIndex() : loc2.GetStackIndex();
-    intptr_t offset_h = loc1.IsDoubleStackSlot() ? loc1.GetHighStackIndex(kMipsWordSize)
-                                                 : loc2.GetHighStackIndex(kMipsWordSize);
-    __ Move(TMP, reg_l);
-    __ LoadFromOffset(kLoadWord, reg_l, SP, offset_l);
-    __ StoreToOffset(kStoreWord, TMP, SP, offset_l);
-    __ Move(TMP, reg_h);
-    __ LoadFromOffset(kLoadWord, reg_h, SP, offset_h);
-    __ StoreToOffset(kStoreWord, TMP, SP, offset_h);
-  } else if ((loc1.IsFpuRegister() && loc2.IsSIMDStackSlot()) ||
-             (loc1.IsSIMDStackSlot() && loc2.IsFpuRegister())) {
-    Location fp_loc = loc1.IsFpuRegister() ? loc1 : loc2;
-    intptr_t offset = loc1.IsFpuRegister() ? loc2.GetStackIndex() : loc1.GetStackIndex();
-    __ MoveV(static_cast<VectorRegister>(FTMP), VectorRegisterFrom(fp_loc));
-    __ LoadQFromOffset(fp_loc.AsFpuRegister<FRegister>(), SP, offset);
-    __ StoreQToOffset(FTMP, SP, offset);
-  } else if (loc1.IsFpuRegister() || loc2.IsFpuRegister()) {
-    FRegister reg = loc1.IsFpuRegister() ? loc1.AsFpuRegister<FRegister>()
-                                         : loc2.AsFpuRegister<FRegister>();
-    intptr_t offset = loc1.IsFpuRegister() ? loc2.GetStackIndex() : loc1.GetStackIndex();
-    if (type == DataType::Type::kFloat32) {
-      __ MovS(FTMP, reg);
-      __ LoadSFromOffset(reg, SP, offset);
-      __ StoreSToOffset(FTMP, SP, offset);
-    } else {
-      DCHECK_EQ(type, DataType::Type::kFloat64);
-      __ MovD(FTMP, reg);
-      __ LoadDFromOffset(reg, SP, offset);
-      __ StoreDToOffset(FTMP, SP, offset);
-    }
-  } else {
-    LOG(FATAL) << "Swap between " << loc1 << " and " << loc2 << " is unsupported";
-  }
-}
-
-void ParallelMoveResolverMIPS::RestoreScratch(int reg) {
-  __ Pop(static_cast<Register>(reg));
-}
-
-void ParallelMoveResolverMIPS::SpillScratch(int reg) {
-  __ Push(static_cast<Register>(reg));
-}
-
-void ParallelMoveResolverMIPS::Exchange(int index1, int index2, bool double_slot) {
-  // Allocate a scratch register other than TMP, if available.
-  // Else, spill V0 (arbitrary choice) and use it as a scratch register (it will be
-  // automatically unspilled when the scratch scope object is destroyed).
-  ScratchRegisterScope ensure_scratch(this, TMP, V0, codegen_->GetNumberOfCoreRegisters());
-  // If V0 spills onto the stack, SP-relative offsets need to be adjusted.
-  int stack_offset = ensure_scratch.IsSpilled() ? kStackAlignment : 0;
-  for (int i = 0; i <= (double_slot ? 1 : 0); i++, stack_offset += kMipsWordSize) {
-    __ LoadFromOffset(kLoadWord,
-                      Register(ensure_scratch.GetRegister()),
-                      SP,
-                      index1 + stack_offset);
-    __ LoadFromOffset(kLoadWord,
-                      TMP,
-                      SP,
-                      index2 + stack_offset);
-    __ StoreToOffset(kStoreWord,
-                     Register(ensure_scratch.GetRegister()),
-                     SP,
-                     index2 + stack_offset);
-    __ StoreToOffset(kStoreWord, TMP, SP, index1 + stack_offset);
-  }
-}
-
-void ParallelMoveResolverMIPS::ExchangeQuadSlots(int index1, int index2) {
-  __ LoadQFromOffset(FTMP, SP, index1);
-  __ LoadQFromOffset(FTMP2, SP, index2);
-  __ StoreQToOffset(FTMP, SP, index2);
-  __ StoreQToOffset(FTMP2, SP, index1);
-}
-
-void CodeGeneratorMIPS::ComputeSpillMask() {
-  core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
-  fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_;
-  DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved";
-  // If there're FPU callee-saved registers and there's an odd number of GPR callee-saved
-  // registers, include the ZERO register to force alignment of FPU callee-saved registers
-  // within the stack frame.
-  if ((fpu_spill_mask_ != 0) && (POPCOUNT(core_spill_mask_) % 2 != 0)) {
-    core_spill_mask_ |= (1 << ZERO);
-  }
-}
-
-bool CodeGeneratorMIPS::HasAllocatedCalleeSaveRegisters() const {
-  // If RA is clobbered by PC-relative operations on R2 and it's the only spilled register
-  // (this can happen in leaf methods), force CodeGenerator::InitializeCodeGeneration()
-  // into the path that creates a stack frame so that RA can be explicitly saved and restored.
-  // RA can't otherwise be saved/restored when it's the only spilled register.
-  return CodeGenerator::HasAllocatedCalleeSaveRegisters() || clobbered_ra_;
-}
-
-static dwarf::Reg DWARFReg(Register reg) {
-  return dwarf::Reg::MipsCore(static_cast<int>(reg));
-}
-
-// TODO: mapping of floating-point registers to DWARF.
-
-void CodeGeneratorMIPS::GenerateFrameEntry() {
-  __ Bind(&frame_entry_label_);
-
-  if (GetCompilerOptions().CountHotnessInCompiledCode()) {
-    __ Lhu(TMP, kMethodRegisterArgument, ArtMethod::HotnessCountOffset().Int32Value());
-    __ Addiu(TMP, TMP, 1);
-    __ Sh(TMP, kMethodRegisterArgument, ArtMethod::HotnessCountOffset().Int32Value());
-  }
-
-  bool do_overflow_check =
-      FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kMips) || !IsLeafMethod();
-
-  if (do_overflow_check) {
-    __ LoadFromOffset(kLoadWord,
-                      ZERO,
-                      SP,
-                      -static_cast<int32_t>(GetStackOverflowReservedBytes(InstructionSet::kMips)));
-    RecordPcInfo(nullptr, 0);
-  }
-
-  if (HasEmptyFrame()) {
-    CHECK_EQ(fpu_spill_mask_, 0u);
-    CHECK_EQ(core_spill_mask_, 1u << RA);
-    CHECK(!clobbered_ra_);
-    return;
-  }
-
-  // Make sure the frame size isn't unreasonably large.
-  if (GetFrameSize() > GetStackOverflowReservedBytes(InstructionSet::kMips)) {
-    LOG(FATAL) << "Stack frame larger than "
-        << GetStackOverflowReservedBytes(InstructionSet::kMips) << " bytes";
-  }
-
-  // Spill callee-saved registers.
-
-  uint32_t ofs = GetFrameSize();
-  __ IncreaseFrameSize(ofs);
-
-  for (uint32_t mask = core_spill_mask_; mask != 0; ) {
-    Register reg = static_cast<Register>(MostSignificantBit(mask));
-    mask ^= 1u << reg;
-    ofs -= kMipsWordSize;
-    // The ZERO register is only included for alignment.
-    if (reg != ZERO) {
-      __ StoreToOffset(kStoreWord, reg, SP, ofs);
-      __ cfi().RelOffset(DWARFReg(reg), ofs);
-    }
-  }
-
-  for (uint32_t mask = fpu_spill_mask_; mask != 0; ) {
-    FRegister reg = static_cast<FRegister>(MostSignificantBit(mask));
-    mask ^= 1u << reg;
-    ofs -= kMipsDoublewordSize;
-    __ StoreDToOffset(reg, SP, ofs);
-    // TODO: __ cfi().RelOffset(DWARFReg(reg), ofs);
-  }
-
-  // Save the current method if we need it. Note that we do not
-  // do this in HCurrentMethod, as the instruction might have been removed
-  // in the SSA graph.
-  if (RequiresCurrentMethod()) {
-    __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
-  }
-
-  if (GetGraph()->HasShouldDeoptimizeFlag()) {
-    // Initialize should deoptimize flag to 0.
-    __ StoreToOffset(kStoreWord, ZERO, SP, GetStackOffsetOfShouldDeoptimizeFlag());
-  }
-}
-
-void CodeGeneratorMIPS::GenerateFrameExit() {
-  __ cfi().RememberState();
-
-  if (!HasEmptyFrame()) {
-    // Restore callee-saved registers.
-
-    // For better instruction scheduling restore RA before other registers.
-    uint32_t ofs = GetFrameSize();
-    for (uint32_t mask = core_spill_mask_; mask != 0; ) {
-      Register reg = static_cast<Register>(MostSignificantBit(mask));
-      mask ^= 1u << reg;
-      ofs -= kMipsWordSize;
-      // The ZERO register is only included for alignment.
-      if (reg != ZERO) {
-        __ LoadFromOffset(kLoadWord, reg, SP, ofs);
-        __ cfi().Restore(DWARFReg(reg));
-      }
-    }
-
-    for (uint32_t mask = fpu_spill_mask_; mask != 0; ) {
-      FRegister reg = static_cast<FRegister>(MostSignificantBit(mask));
-      mask ^= 1u << reg;
-      ofs -= kMipsDoublewordSize;
-      __ LoadDFromOffset(reg, SP, ofs);
-      // TODO: __ cfi().Restore(DWARFReg(reg));
-    }
-
-    size_t frame_size = GetFrameSize();
-    // Adjust the stack pointer in the delay slot if doing so doesn't break CFI.
-    bool exchange = IsInt<16>(static_cast<int32_t>(frame_size));
-    bool reordering = __ SetReorder(false);
-    if (exchange) {
-      __ Jr(RA);
-      __ DecreaseFrameSize(frame_size);  // Single instruction in delay slot.
-    } else {
-      __ DecreaseFrameSize(frame_size);
-      __ Jr(RA);
-      __ Nop();  // In delay slot.
-    }
-    __ SetReorder(reordering);
-  } else {
-    __ Jr(RA);
-    __ NopIfNoReordering();
-  }
-
-  __ cfi().RestoreState();
-  __ cfi().DefCFAOffset(GetFrameSize());
-}
-
-void CodeGeneratorMIPS::Bind(HBasicBlock* block) {
-  __ Bind(GetLabelOf(block));
-}
-
-VectorRegister VectorRegisterFrom(Location location) {
-  DCHECK(location.IsFpuRegister());
-  return static_cast<VectorRegister>(location.AsFpuRegister<FRegister>());
-}
-
-void CodeGeneratorMIPS::MoveLocation(Location destination,
-                                     Location source,
-                                     DataType::Type dst_type) {
-  if (source.Equals(destination)) {
-    return;
-  }
-
-  if (source.IsConstant()) {
-    MoveConstant(destination, source.GetConstant());
-  } else {
-    if (destination.IsRegister()) {
-      if (source.IsRegister()) {
-        __ Move(destination.AsRegister<Register>(), source.AsRegister<Register>());
-      } else if (source.IsFpuRegister()) {
-        __ Mfc1(destination.AsRegister<Register>(), source.AsFpuRegister<FRegister>());
-      } else {
-        DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
-      __ LoadFromOffset(kLoadWord, destination.AsRegister<Register>(), SP, source.GetStackIndex());
-      }
-    } else if (destination.IsRegisterPair()) {
-      if (source.IsRegisterPair()) {
-        __ Move(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
-        __ Move(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
-      } else if (source.IsFpuRegister()) {
-        Register dst_high = destination.AsRegisterPairHigh<Register>();
-        Register dst_low =  destination.AsRegisterPairLow<Register>();
-        FRegister src = source.AsFpuRegister<FRegister>();
-        __ Mfc1(dst_low, src);
-        __ MoveFromFpuHigh(dst_high, src);
-      } else {
-        DCHECK(source.IsDoubleStackSlot())
-            << "Cannot move from " << source << " to " << destination;
-        int32_t off = source.GetStackIndex();
-        Register r = destination.AsRegisterPairLow<Register>();
-        __ LoadFromOffset(kLoadDoubleword, r, SP, off);
-      }
-    } else if (destination.IsFpuRegister()) {
-      if (source.IsRegister()) {
-        DCHECK(!DataType::Is64BitType(dst_type));
-        __ Mtc1(source.AsRegister<Register>(), destination.AsFpuRegister<FRegister>());
-      } else if (source.IsRegisterPair()) {
-        DCHECK(DataType::Is64BitType(dst_type));
-        FRegister dst = destination.AsFpuRegister<FRegister>();
-        Register src_high = source.AsRegisterPairHigh<Register>();
-        Register src_low = source.AsRegisterPairLow<Register>();
-        __ Mtc1(src_low, dst);
-        __ MoveToFpuHigh(src_high, dst);
-      } else if (source.IsFpuRegister()) {
-        if (GetGraph()->HasSIMD()) {
-          __ MoveV(VectorRegisterFrom(destination),
-                   VectorRegisterFrom(source));
-        } else {
-          if (DataType::Is64BitType(dst_type)) {
-            __ MovD(destination.AsFpuRegister<FRegister>(), source.AsFpuRegister<FRegister>());
-          } else {
-            DCHECK_EQ(dst_type, DataType::Type::kFloat32);
-            __ MovS(destination.AsFpuRegister<FRegister>(), source.AsFpuRegister<FRegister>());
-          }
-        }
-      } else if (source.IsSIMDStackSlot()) {
-        __ LoadQFromOffset(destination.AsFpuRegister<FRegister>(), SP, source.GetStackIndex());
-      } else if (source.IsDoubleStackSlot()) {
-        DCHECK(DataType::Is64BitType(dst_type));
-        __ LoadDFromOffset(destination.AsFpuRegister<FRegister>(), SP, source.GetStackIndex());
-      } else {
-        DCHECK(!DataType::Is64BitType(dst_type));
-        DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
-        __ LoadSFromOffset(destination.AsFpuRegister<FRegister>(), SP, source.GetStackIndex());
-      }
-    } else if (destination.IsSIMDStackSlot()) {
-      if (source.IsFpuRegister()) {
-        __ StoreQToOffset(source.AsFpuRegister<FRegister>(), SP, destination.GetStackIndex());
-      } else {
-        DCHECK(source.IsSIMDStackSlot());
-        __ LoadQFromOffset(FTMP, SP, source.GetStackIndex());
-        __ StoreQToOffset(FTMP, SP, destination.GetStackIndex());
-      }
-    } else if (destination.IsDoubleStackSlot()) {
-      int32_t dst_offset = destination.GetStackIndex();
-      if (source.IsRegisterPair()) {
-        __ StoreToOffset(kStoreDoubleword, source.AsRegisterPairLow<Register>(), SP, dst_offset);
-      } else if (source.IsFpuRegister()) {
-        __ StoreDToOffset(source.AsFpuRegister<FRegister>(), SP, dst_offset);
-      } else {
-        DCHECK(source.IsDoubleStackSlot())
-            << "Cannot move from " << source << " to " << destination;
-        __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
-        __ StoreToOffset(kStoreWord, TMP, SP, dst_offset);
-        __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex() + 4);
-        __ StoreToOffset(kStoreWord, TMP, SP, dst_offset + 4);
-      }
-    } else {
-      DCHECK(destination.IsStackSlot()) << destination;
-      int32_t dst_offset = destination.GetStackIndex();
-      if (source.IsRegister()) {
-        __ StoreToOffset(kStoreWord, source.AsRegister<Register>(), SP, dst_offset);
-      } else if (source.IsFpuRegister()) {
-        __ StoreSToOffset(source.AsFpuRegister<FRegister>(), SP, dst_offset);
-      } else {
-        DCHECK(source.IsStackSlot()) << "Cannot move from " << source << " to " << destination;
-        __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
-        __ StoreToOffset(kStoreWord, TMP, SP, dst_offset);
-      }
-    }
-  }
-}
-
-void CodeGeneratorMIPS::MoveConstant(Location destination, HConstant* c) {
-  if (c->IsIntConstant() || c->IsNullConstant()) {
-    // Move 32 bit constant.
-    int32_t value = GetInt32ValueOf(c);
-    if (destination.IsRegister()) {
-      Register dst = destination.AsRegister<Register>();
-      __ LoadConst32(dst, value);
-    } else {
-      DCHECK(destination.IsStackSlot())
-          << "Cannot move " << c->DebugName() << " to " << destination;
-      __ StoreConstToOffset(kStoreWord, value, SP, destination.GetStackIndex(), TMP);
-    }
-  } else if (c->IsLongConstant()) {
-    // Move 64 bit constant.
-    int64_t value = GetInt64ValueOf(c);
-    if (destination.IsRegisterPair()) {
-      Register r_h = destination.AsRegisterPairHigh<Register>();
-      Register r_l = destination.AsRegisterPairLow<Register>();
-      __ LoadConst64(r_h, r_l, value);
-    } else {
-      DCHECK(destination.IsDoubleStackSlot())
-          << "Cannot move " << c->DebugName() << " to " << destination;
-      __ StoreConstToOffset(kStoreDoubleword, value, SP, destination.GetStackIndex(), TMP);
-    }
-  } else if (c->IsFloatConstant()) {
-    // Move 32 bit float constant.
-    int32_t value = GetInt32ValueOf(c);
-    if (destination.IsFpuRegister()) {
-      __ LoadSConst32(destination.AsFpuRegister<FRegister>(), value, TMP);
-    } else {
-      DCHECK(destination.IsStackSlot())
-          << "Cannot move " << c->DebugName() << " to " << destination;
-      __ StoreConstToOffset(kStoreWord, value, SP, destination.GetStackIndex(), TMP);
-    }
-  } else {
-    // Move 64 bit double constant.
-    DCHECK(c->IsDoubleConstant()) << c->DebugName();
-    int64_t value = GetInt64ValueOf(c);
-    if (destination.IsFpuRegister()) {
-      FRegister fd = destination.AsFpuRegister<FRegister>();
-      __ LoadDConst64(fd, value, TMP);
-    } else {
-      DCHECK(destination.IsDoubleStackSlot())
-          << "Cannot move " << c->DebugName() << " to " << destination;
-      __ StoreConstToOffset(kStoreDoubleword, value, SP, destination.GetStackIndex(), TMP);
-    }
-  }
-}
-
-void CodeGeneratorMIPS::MoveConstant(Location destination, int32_t value) {
-  DCHECK(destination.IsRegister());
-  Register dst = destination.AsRegister<Register>();
-  __ LoadConst32(dst, value);
-}
-
-void CodeGeneratorMIPS::AddLocationAsTemp(Location location, LocationSummary* locations) {
-  if (location.IsRegister()) {
-    locations->AddTemp(location);
-  } else if (location.IsRegisterPair()) {
-    locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairLow<Register>()));
-    locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairHigh<Register>()));
-  } else {
-    UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
-  }
-}
-
-template <linker::LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
-inline void CodeGeneratorMIPS::EmitPcRelativeLinkerPatches(
-    const ArenaDeque<PcRelativePatchInfo>& infos,
-    ArenaVector<linker::LinkerPatch>* linker_patches) {
-  for (const PcRelativePatchInfo& info : infos) {
-    const DexFile* dex_file = info.target_dex_file;
-    size_t offset_or_index = info.offset_or_index;
-    DCHECK(info.label.IsBound());
-    uint32_t literal_offset = __ GetLabelLocation(&info.label);
-    // On R2 we use HMipsComputeBaseMethodAddress and patch relative to
-    // the assembler's base label used for PC-relative addressing.
-    const PcRelativePatchInfo& info_high = info.patch_info_high ? *info.patch_info_high : info;
-    uint32_t pc_rel_offset = info_high.pc_rel_label.IsBound()
-        ? __ GetLabelLocation(&info_high.pc_rel_label)
-        : __ GetPcRelBaseLabelLocation();
-    linker_patches->push_back(Factory(literal_offset, dex_file, pc_rel_offset, offset_or_index));
-  }
-}
-
-template <linker::LinkerPatch (*Factory)(size_t, uint32_t, uint32_t)>
-linker::LinkerPatch NoDexFileAdapter(size_t literal_offset,
-                                     const DexFile* target_dex_file,
-                                     uint32_t pc_insn_offset,
-                                     uint32_t boot_image_offset) {
-  DCHECK(target_dex_file == nullptr);  // Unused for these patches, should be null.
-  return Factory(literal_offset, pc_insn_offset, boot_image_offset);
-}
-
-void CodeGeneratorMIPS::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) {
-  DCHECK(linker_patches->empty());
-  size_t size =
-      boot_image_method_patches_.size() +
-      method_bss_entry_patches_.size() +
-      boot_image_type_patches_.size() +
-      type_bss_entry_patches_.size() +
-      boot_image_string_patches_.size() +
-      string_bss_entry_patches_.size() +
-      boot_image_intrinsic_patches_.size();
-  linker_patches->reserve(size);
-  if (GetCompilerOptions().IsBootImage()) {
-    EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeMethodPatch>(
-        boot_image_method_patches_, linker_patches);
-    EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeTypePatch>(
-        boot_image_type_patches_, linker_patches);
-    EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeStringPatch>(
-        boot_image_string_patches_, linker_patches);
-    EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::IntrinsicReferencePatch>>(
-        boot_image_intrinsic_patches_, linker_patches);
-  } else {
-    EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::DataBimgRelRoPatch>>(
-        boot_image_method_patches_, linker_patches);
-    DCHECK(boot_image_type_patches_.empty());
-    DCHECK(boot_image_string_patches_.empty());
-    DCHECK(boot_image_intrinsic_patches_.empty());
-  }
-  EmitPcRelativeLinkerPatches<linker::LinkerPatch::MethodBssEntryPatch>(
-      method_bss_entry_patches_, linker_patches);
-  EmitPcRelativeLinkerPatches<linker::LinkerPatch::TypeBssEntryPatch>(
-      type_bss_entry_patches_, linker_patches);
-  EmitPcRelativeLinkerPatches<linker::LinkerPatch::StringBssEntryPatch>(
-      string_bss_entry_patches_, linker_patches);
-  DCHECK_EQ(size, linker_patches->size());
-}
-
-CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewBootImageIntrinsicPatch(
-    uint32_t intrinsic_data,
-    const PcRelativePatchInfo* info_high) {
-  return NewPcRelativePatch(
-      /* dex_file= */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_);
-}
-
-CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewBootImageRelRoPatch(
-    uint32_t boot_image_offset,
-    const PcRelativePatchInfo* info_high) {
-  return NewPcRelativePatch(
-      /* dex_file= */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_);
-}
-
-CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewBootImageMethodPatch(
-    MethodReference target_method,
-    const PcRelativePatchInfo* info_high) {
-  return NewPcRelativePatch(
-      target_method.dex_file, target_method.index, info_high, &boot_image_method_patches_);
-}
-
-CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewMethodBssEntryPatch(
-    MethodReference target_method,
-    const PcRelativePatchInfo* info_high) {
-  return NewPcRelativePatch(
-      target_method.dex_file, target_method.index, info_high, &method_bss_entry_patches_);
-}
-
-CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewBootImageTypePatch(
-    const DexFile& dex_file,
-    dex::TypeIndex type_index,
-    const PcRelativePatchInfo* info_high) {
-  return NewPcRelativePatch(&dex_file, type_index.index_, info_high, &boot_image_type_patches_);
-}
-
-CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewTypeBssEntryPatch(
-    const DexFile& dex_file,
-    dex::TypeIndex type_index,
-    const PcRelativePatchInfo* info_high) {
-  return NewPcRelativePatch(&dex_file, type_index.index_, info_high, &type_bss_entry_patches_);
-}
-
-CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewBootImageStringPatch(
-    const DexFile& dex_file,
-    dex::StringIndex string_index,
-    const PcRelativePatchInfo* info_high) {
-  return NewPcRelativePatch(
-      &dex_file, string_index.index_, info_high, &boot_image_string_patches_);
-}
-
-CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewStringBssEntryPatch(
-    const DexFile& dex_file,
-    dex::StringIndex string_index,
-    const PcRelativePatchInfo* info_high) {
-  return NewPcRelativePatch(&dex_file, string_index.index_, info_high, &string_bss_entry_patches_);
-}
-
-CodeGeneratorMIPS::PcRelativePatchInfo* CodeGeneratorMIPS::NewPcRelativePatch(
-    const DexFile* dex_file,
-    uint32_t offset_or_index,
-    const PcRelativePatchInfo* info_high,
-    ArenaDeque<PcRelativePatchInfo>* patches) {
-  patches->emplace_back(dex_file, offset_or_index, info_high);
-  return &patches->back();
-}
-
-Literal* CodeGeneratorMIPS::DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map) {
-  return map->GetOrCreate(
-      value,
-      [this, value]() { return __ NewLiteral<uint32_t>(value); });
-}
-
-Literal* CodeGeneratorMIPS::DeduplicateBootImageAddressLiteral(uint32_t address) {
-  return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), &uint32_literals_);
-}
-
-void CodeGeneratorMIPS::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info_high,
-                                                             Register out,
-                                                             Register base) {
-  DCHECK(!info_high->patch_info_high);
-  DCHECK_NE(out, base);
-  bool reordering = __ SetReorder(false);
-  if (GetInstructionSetFeatures().IsR6()) {
-    DCHECK_EQ(base, ZERO);
-    __ Bind(&info_high->label);
-    __ Bind(&info_high->pc_rel_label);
-    // Add the high half of a 32-bit offset to PC.
-    __ Auipc(out, /* imm16= */ 0x1234);
-    __ SetReorder(reordering);
-  } else {
-    // If base is ZERO, emit NAL to obtain the actual base.
-    if (base == ZERO) {
-      // Generate a dummy PC-relative call to obtain PC.
-      __ Nal();
-    }
-    __ Bind(&info_high->label);
-    __ Lui(out, /* imm16= */ 0x1234);
-    // If we emitted the NAL, bind the pc_rel_label, otherwise base is a register holding
-    // the HMipsComputeBaseMethodAddress which has its own label stored in MipsAssembler.
-    if (base == ZERO) {
-      __ Bind(&info_high->pc_rel_label);
-    }
-    __ SetReorder(reordering);
-    // Add the high half of a 32-bit offset to PC.
-    __ Addu(out, out, (base == ZERO) ? RA : base);
-  }
-  // A following instruction will add the sign-extended low half of the 32-bit
-  // offset to `out` (e.g. lw, jialc, addiu).
-}
-
-void CodeGeneratorMIPS::LoadBootImageAddress(Register reg, uint32_t boot_image_reference) {
-  if (GetCompilerOptions().IsBootImage()) {
-    PcRelativePatchInfo* info_high = NewBootImageIntrinsicPatch(boot_image_reference);
-    PcRelativePatchInfo* info_low = NewBootImageIntrinsicPatch(boot_image_reference, info_high);
-    EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, /* base= */ ZERO);
-    __ Addiu(reg, TMP, /* imm16= */ 0x5678, &info_low->label);
-  } else if (GetCompilerOptions().GetCompilePic()) {
-    PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_reference);
-    PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_reference, info_high);
-    EmitPcRelativeAddressPlaceholderHigh(info_high, reg, /* base= */ ZERO);
-    __ Lw(reg, reg, /* imm16= */ 0x5678, &info_low->label);
-  } else {
-    DCHECK(Runtime::Current()->UseJitCompilation());
-    gc::Heap* heap = Runtime::Current()->GetHeap();
-    DCHECK(!heap->GetBootImageSpaces().empty());
-    const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_reference;
-    __ LoadConst32(reg, dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(address)));
-  }
-}
-
-void CodeGeneratorMIPS::AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke,
-                                                     uint32_t boot_image_offset) {
-  DCHECK(invoke->IsStatic());
-  InvokeRuntimeCallingConvention calling_convention;
-  Register argument = calling_convention.GetRegisterAt(0);
-  if (GetCompilerOptions().IsBootImage()) {
-    DCHECK_EQ(boot_image_offset, IntrinsicVisitor::IntegerValueOfInfo::kInvalidReference);
-    // Load the class the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative.
-    MethodReference target_method = invoke->GetTargetMethod();
-    dex::TypeIndex type_idx = target_method.dex_file->GetMethodId(target_method.index).class_idx_;
-    PcRelativePatchInfo* info_high = NewBootImageTypePatch(*target_method.dex_file, type_idx);
-    PcRelativePatchInfo* info_low =
-        NewBootImageTypePatch(*target_method.dex_file, type_idx, info_high);
-    EmitPcRelativeAddressPlaceholderHigh(info_high, argument, /* base= */ ZERO);
-    __ Addiu(argument, argument, /* imm16= */ 0x5678, &info_low->label);
-  } else {
-    LoadBootImageAddress(argument, boot_image_offset);
-  }
-  InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
-  CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
-}
-
-CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootStringPatch(
-    const DexFile& dex_file,
-    dex::StringIndex string_index,
-    Handle<mirror::String> handle) {
-  ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
-  jit_string_patches_.emplace_back(dex_file, string_index.index_);
-  return &jit_string_patches_.back();
-}
-
-CodeGeneratorMIPS::JitPatchInfo* CodeGeneratorMIPS::NewJitRootClassPatch(
-    const DexFile& dex_file,
-    dex::TypeIndex type_index,
-    Handle<mirror::Class> handle) {
-  ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
-  jit_class_patches_.emplace_back(dex_file, type_index.index_);
-  return &jit_class_patches_.back();
-}
-
-void CodeGeneratorMIPS::PatchJitRootUse(uint8_t* code,
-                                        const uint8_t* roots_data,
-                                        const CodeGeneratorMIPS::JitPatchInfo& info,
-                                        uint64_t index_in_table) const {
-  uint32_t high_literal_offset = GetAssembler().GetLabelLocation(&info.high_label);
-  uint32_t low_literal_offset = GetAssembler().GetLabelLocation(&info.low_label);
-  uintptr_t address =
-      reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
-  uint32_t addr32 = dchecked_integral_cast<uint32_t>(address);
-  // lui reg, addr32_high
-  DCHECK_EQ(code[high_literal_offset + 0], 0x34);
-  DCHECK_EQ(code[high_literal_offset + 1], 0x12);
-  DCHECK_EQ((code[high_literal_offset + 2] & 0xE0), 0x00);
-  DCHECK_EQ(code[high_literal_offset + 3], 0x3C);
-  // instr reg, reg, addr32_low
-  DCHECK_EQ(code[low_literal_offset + 0], 0x78);
-  DCHECK_EQ(code[low_literal_offset + 1], 0x56);
-  addr32 += (addr32 & 0x8000) << 1;  // Account for sign extension in "instr reg, reg, addr32_low".
-  // lui reg, addr32_high
-  code[high_literal_offset + 0] = static_cast<uint8_t>(addr32 >> 16);
-  code[high_literal_offset + 1] = static_cast<uint8_t>(addr32 >> 24);
-  // instr reg, reg, addr32_low
-  code[low_literal_offset + 0] = static_cast<uint8_t>(addr32 >> 0);
-  code[low_literal_offset + 1] = static_cast<uint8_t>(addr32 >> 8);
-}
-
-void CodeGeneratorMIPS::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
-  for (const JitPatchInfo& info : jit_string_patches_) {
-    StringReference string_reference(&info.target_dex_file, dex::StringIndex(info.index));
-    uint64_t index_in_table = GetJitStringRootIndex(string_reference);
-    PatchJitRootUse(code, roots_data, info, index_in_table);
-  }
-  for (const JitPatchInfo& info : jit_class_patches_) {
-    TypeReference type_reference(&info.target_dex_file, dex::TypeIndex(info.index));
-    uint64_t index_in_table = GetJitClassRootIndex(type_reference);
-    PatchJitRootUse(code, roots_data, info, index_in_table);
-  }
-}
-
-void CodeGeneratorMIPS::MarkGCCard(Register object,
-                                   Register value,
-                                   bool value_can_be_null) {
-  MipsLabel done;
-  Register card = AT;
-  Register temp = TMP;
-  if (value_can_be_null) {
-    __ Beqz(value, &done);
-  }
-  // Load the address of the card table into `card`.
-  __ LoadFromOffset(kLoadWord,
-                    card,
-                    TR,
-                    Thread::CardTableOffset<kMipsPointerSize>().Int32Value());
-  // Calculate the address of the card corresponding to `object`.
-  __ Srl(temp, object, gc::accounting::CardTable::kCardShift);
-  __ Addu(temp, card, temp);
-  // Write the `art::gc::accounting::CardTable::kCardDirty` value into the
-  // `object`'s card.
-  //
-  // Register `card` contains the address of the card table. Note that the card
-  // table's base is biased during its creation so that it always starts at an
-  // address whose least-significant byte is equal to `kCardDirty` (see
-  // art::gc::accounting::CardTable::Create). Therefore the SB instruction
-  // below writes the `kCardDirty` (byte) value into the `object`'s card
-  // (located at `card + object >> kCardShift`).
-  //
-  // This dual use of the value in register `card` (1. to calculate the location
-  // of the card to mark; and 2. to load the `kCardDirty` value) saves a load
-  // (no need to explicitly load `kCardDirty` as an immediate value).
-  __ Sb(card, temp, 0);
-  if (value_can_be_null) {
-    __ Bind(&done);
-  }
-}
-
-void CodeGeneratorMIPS::SetupBlockedRegisters() const {
-  // ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
-  blocked_core_registers_[ZERO] = true;
-  blocked_core_registers_[K0] = true;
-  blocked_core_registers_[K1] = true;
-  blocked_core_registers_[GP] = true;
-  blocked_core_registers_[SP] = true;
-  blocked_core_registers_[RA] = true;
-
-  // AT and TMP(T8) are used as temporary/scratch registers
-  // (similar to how AT is used by MIPS assemblers).
-  blocked_core_registers_[AT] = true;
-  blocked_core_registers_[TMP] = true;
-  blocked_fpu_registers_[FTMP] = true;
-
-  if (GetInstructionSetFeatures().HasMsa()) {
-    // To be used just for MSA instructions.
-    blocked_fpu_registers_[FTMP2] = true;
-  }
-
-  // Reserve suspend and thread registers.
-  blocked_core_registers_[S0] = true;
-  blocked_core_registers_[TR] = true;
-
-  // Reserve T9 for function calls
-  blocked_core_registers_[T9] = true;
-
-  // Reserve odd-numbered FPU registers.
-  for (size_t i = 1; i < kNumberOfFRegisters; i += 2) {
-    blocked_fpu_registers_[i] = true;
-  }
-
-  if (GetGraph()->IsDebuggable()) {
-    // Stubs do not save callee-save floating point registers. If the graph
-    // is debuggable, we need to deal with these registers differently. For
-    // now, just block them.
-    for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
-      blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
-    }
-  }
-}
-
-size_t CodeGeneratorMIPS::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
-  __ StoreToOffset(kStoreWord, Register(reg_id), SP, stack_index);
-  return kMipsWordSize;
-}
-
-size_t CodeGeneratorMIPS::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
-  __ LoadFromOffset(kLoadWord, Register(reg_id), SP, stack_index);
-  return kMipsWordSize;
-}
-
-size_t CodeGeneratorMIPS::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
-  if (GetGraph()->HasSIMD()) {
-    __ StoreQToOffset(FRegister(reg_id), SP, stack_index);
-  } else {
-    __ StoreDToOffset(FRegister(reg_id), SP, stack_index);
-  }
-  return GetFloatingPointSpillSlotSize();
-}
-
-size_t CodeGeneratorMIPS::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
-  if (GetGraph()->HasSIMD()) {
-    __ LoadQFromOffset(FRegister(reg_id), SP, stack_index);
-  } else {
-    __ LoadDFromOffset(FRegister(reg_id), SP, stack_index);
-  }
-  return GetFloatingPointSpillSlotSize();
-}
-
-void CodeGeneratorMIPS::DumpCoreRegister(std::ostream& stream, int reg) const {
-  stream << Register(reg);
-}
-
-void CodeGeneratorMIPS::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
-  stream << FRegister(reg);
-}
-
-const MipsInstructionSetFeatures& CodeGeneratorMIPS::GetInstructionSetFeatures() const {
-  return *GetCompilerOptions().GetInstructionSetFeatures()->AsMipsInstructionSetFeatures();
-}
-
-constexpr size_t kMipsDirectEntrypointRuntimeOffset = 16;
-
-void CodeGeneratorMIPS::InvokeRuntime(QuickEntrypointEnum entrypoint,
-                                      HInstruction* instruction,
-                                      uint32_t dex_pc,
-                                      SlowPathCode* slow_path) {
-  ValidateInvokeRuntime(entrypoint, instruction, slow_path);
-  GenerateInvokeRuntime(GetThreadOffset<kMipsPointerSize>(entrypoint).Int32Value(),
-                        IsDirectEntrypoint(entrypoint));
-  if (EntrypointRequiresStackMap(entrypoint)) {
-    RecordPcInfo(instruction, dex_pc, slow_path);
-  }
-}
-
-void CodeGeneratorMIPS::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
-                                                            HInstruction* instruction,
-                                                            SlowPathCode* slow_path,
-                                                            bool direct) {
-  ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction, slow_path);
-  GenerateInvokeRuntime(entry_point_offset, direct);
-}
-
-void CodeGeneratorMIPS::GenerateInvokeRuntime(int32_t entry_point_offset, bool direct) {
-  bool reordering = __ SetReorder(false);
-  __ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset);
-  __ Jalr(T9);
-  if (direct) {
-    // Reserve argument space on stack (for $a0-$a3) for
-    // entrypoints that directly reference native implementations.
-    // Called function may use this space to store $a0-$a3 regs.
-    __ IncreaseFrameSize(kMipsDirectEntrypointRuntimeOffset);  // Single instruction in delay slot.
-    __ DecreaseFrameSize(kMipsDirectEntrypointRuntimeOffset);
-  } else {
-    __ Nop();  // In delay slot.
-  }
-  __ SetReorder(reordering);
-}
-
-void InstructionCodeGeneratorMIPS::GenerateClassInitializationCheck(SlowPathCodeMIPS* slow_path,
-                                                                    Register class_reg) {
-  constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
-  const size_t status_byte_offset =
-      mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
-  constexpr uint32_t shifted_initialized_value =
-      enum_cast<uint32_t>(ClassStatus::kInitialized) << (status_lsb_position % kBitsPerByte);
-
-  __ LoadFromOffset(kLoadUnsignedByte, TMP, class_reg, status_byte_offset);
-  __ Sltiu(TMP, TMP, shifted_initialized_value);
-  __ Bnez(TMP, slow_path->GetEntryLabel());
-  // Even if the initialized flag is set, we need to ensure consistent memory ordering.
-  __ Sync(0);
-  __ Bind(slow_path->GetExitLabel());
-}
-
-void InstructionCodeGeneratorMIPS::GenerateBitstringTypeCheckCompare(HTypeCheckInstruction* check,
-                                                                     Register temp) {
-  uint32_t path_to_root = check->GetBitstringPathToRoot();
-  uint32_t mask = check->GetBitstringMask();
-  DCHECK(IsPowerOfTwo(mask + 1));
-  size_t mask_bits = WhichPowerOf2(mask + 1);
-
-  if (mask_bits == 16u) {
-    // Load only the bitstring part of the status word.
-    __ LoadFromOffset(
-        kLoadUnsignedHalfword, temp, temp, mirror::Class::StatusOffset().Int32Value());
-    // Compare the bitstring bits using XOR.
-    __ Xori(temp, temp, dchecked_integral_cast<uint16_t>(path_to_root));
-  } else {
-    // /* uint32_t */ temp = temp->status_
-    __ LoadFromOffset(kLoadWord, temp, temp, mirror::Class::StatusOffset().Int32Value());
-    // Compare the bitstring bits using XOR.
-    if (IsUint<16>(path_to_root)) {
-      __ Xori(temp, temp, dchecked_integral_cast<uint16_t>(path_to_root));
-    } else {
-      __ LoadConst32(TMP, path_to_root);
-      __ Xor(temp, temp, TMP);
-    }
-    // Shift out bits that do not contribute to the comparison.
-    __ Sll(temp, temp, 32 - mask_bits);
-  }
-}
-
-void InstructionCodeGeneratorMIPS::GenerateMemoryBarrier(MemBarrierKind kind ATTRIBUTE_UNUSED) {
-  __ Sync(0);  // Only stype 0 is supported.
-}
-
-void InstructionCodeGeneratorMIPS::GenerateSuspendCheck(HSuspendCheck* instruction,
-                                                        HBasicBlock* successor) {
-  SuspendCheckSlowPathMIPS* slow_path =
-      down_cast<SuspendCheckSlowPathMIPS*>(instruction->GetSlowPath());
-
-  if (slow_path == nullptr) {
-    slow_path =
-        new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathMIPS(instruction, successor);
-    instruction->SetSlowPath(slow_path);
-    codegen_->AddSlowPath(slow_path);
-    if (successor != nullptr) {
-      DCHECK(successor->IsLoopHeader());
-    }
-  } else {
-    DCHECK_EQ(slow_path->GetSuccessor(), successor);
-  }
-
-  __ LoadFromOffset(kLoadUnsignedHalfword,
-                    TMP,
-                    TR,
-                    Thread::ThreadFlagsOffset<kMipsPointerSize>().Int32Value());
-  if (successor == nullptr) {
-    __ Bnez(TMP, slow_path->GetEntryLabel());
-    __ Bind(slow_path->GetReturnLabel());
-  } else {
-    __ Beqz(TMP, codegen_->GetLabelOf(successor));
-    __ B(slow_path->GetEntryLabel());
-    // slow_path will return to GetLabelOf(successor).
-  }
-}
-
-InstructionCodeGeneratorMIPS::InstructionCodeGeneratorMIPS(HGraph* graph,
-                                                           CodeGeneratorMIPS* codegen)
-      : InstructionCodeGenerator(graph, codegen),
-        assembler_(codegen->GetAssembler()),
-        codegen_(codegen) {}
-
-void LocationsBuilderMIPS::HandleBinaryOp(HBinaryOperation* instruction) {
-  DCHECK_EQ(instruction->InputCount(), 2U);
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
-  DataType::Type type = instruction->GetResultType();
-  bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
-  switch (type) {
-    case DataType::Type::kInt32: {
-      locations->SetInAt(0, Location::RequiresRegister());
-      HInstruction* right = instruction->InputAt(1);
-      bool can_use_imm = false;
-      if (right->IsConstant()) {
-        int32_t imm = CodeGenerator::GetInt32ValueOf(right->AsConstant());
-        if (instruction->IsAnd() || instruction->IsOr() || instruction->IsXor()) {
-          can_use_imm = IsUint<16>(imm);
-        } else {
-          DCHECK(instruction->IsSub() || instruction->IsAdd());
-          if (instruction->IsSub()) {
-            imm = -imm;
-          }
-          if (isR6) {
-            bool single_use = right->GetUses().HasExactlyOneElement();
-            int16_t imm_high = High16Bits(imm);
-            int16_t imm_low = Low16Bits(imm);
-            if (imm_low < 0) {
-              imm_high += 1;
-            }
-            can_use_imm = !((imm_high != 0) && (imm_low != 0)) || single_use;
-          } else {
-            can_use_imm = IsInt<16>(imm);
-          }
-        }
-      }
-      if (can_use_imm)
-        locations->SetInAt(1, Location::ConstantLocation(right->AsConstant()));
-      else
-        locations->SetInAt(1, Location::RequiresRegister());
-      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-      break;
-    }
-
-    case DataType::Type::kInt64: {
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
-      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-      break;
-    }
-
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      DCHECK(instruction->IsAdd() || instruction->IsSub());
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetInAt(1, Location::RequiresFpuRegister());
-      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
-      break;
-
-    default:
-      LOG(FATAL) << "Unexpected " << instruction->DebugName() << " type " << type;
-  }
-}
-
-void InstructionCodeGeneratorMIPS::HandleBinaryOp(HBinaryOperation* instruction) {
-  DataType::Type type = instruction->GetType();
-  LocationSummary* locations = instruction->GetLocations();
-  bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
-
-  switch (type) {
-    case DataType::Type::kInt32: {
-      Register dst = locations->Out().AsRegister<Register>();
-      Register lhs = locations->InAt(0).AsRegister<Register>();
-      Location rhs_location = locations->InAt(1);
-
-      Register rhs_reg = ZERO;
-      int32_t rhs_imm = 0;
-      bool use_imm = rhs_location.IsConstant();
-      if (use_imm) {
-        rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
-      } else {
-        rhs_reg = rhs_location.AsRegister<Register>();
-      }
-
-      if (instruction->IsAnd()) {
-        if (use_imm)
-          __ Andi(dst, lhs, rhs_imm);
-        else
-          __ And(dst, lhs, rhs_reg);
-      } else if (instruction->IsOr()) {
-        if (use_imm)
-          __ Ori(dst, lhs, rhs_imm);
-        else
-          __ Or(dst, lhs, rhs_reg);
-      } else if (instruction->IsXor()) {
-        if (use_imm)
-          __ Xori(dst, lhs, rhs_imm);
-        else
-          __ Xor(dst, lhs, rhs_reg);
-      } else {
-        DCHECK(instruction->IsAdd() || instruction->IsSub());
-        if (use_imm) {
-          if (instruction->IsSub()) {
-            rhs_imm = -rhs_imm;
-          }
-          if (IsInt<16>(rhs_imm)) {
-            __ Addiu(dst, lhs, rhs_imm);
-          } else {
-            DCHECK(isR6);
-            int16_t rhs_imm_high = High16Bits(rhs_imm);
-            int16_t rhs_imm_low = Low16Bits(rhs_imm);
-            if (rhs_imm_low < 0) {
-              rhs_imm_high += 1;
-            }
-            __ Aui(dst, lhs, rhs_imm_high);
-            if (rhs_imm_low != 0) {
-              __ Addiu(dst, dst, rhs_imm_low);
-            }
-          }
-        } else if (instruction->IsAdd()) {
-          __ Addu(dst, lhs, rhs_reg);
-        } else {
-          DCHECK(instruction->IsSub());
-          __ Subu(dst, lhs, rhs_reg);
-        }
-      }
-      break;
-    }
-
-    case DataType::Type::kInt64: {
-      Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
-      Register dst_low = locations->Out().AsRegisterPairLow<Register>();
-      Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
-      Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
-      Location rhs_location = locations->InAt(1);
-      bool use_imm = rhs_location.IsConstant();
-      if (!use_imm) {
-        Register rhs_high = rhs_location.AsRegisterPairHigh<Register>();
-        Register rhs_low = rhs_location.AsRegisterPairLow<Register>();
-        if (instruction->IsAnd()) {
-          __ And(dst_low, lhs_low, rhs_low);
-          __ And(dst_high, lhs_high, rhs_high);
-        } else if (instruction->IsOr()) {
-          __ Or(dst_low, lhs_low, rhs_low);
-          __ Or(dst_high, lhs_high, rhs_high);
-        } else if (instruction->IsXor()) {
-          __ Xor(dst_low, lhs_low, rhs_low);
-          __ Xor(dst_high, lhs_high, rhs_high);
-        } else if (instruction->IsAdd()) {
-          if (lhs_low == rhs_low) {
-            // Special case for lhs = rhs and the sum potentially overwriting both lhs and rhs.
-            __ Slt(TMP, lhs_low, ZERO);
-            __ Addu(dst_low, lhs_low, rhs_low);
-          } else {
-            __ Addu(dst_low, lhs_low, rhs_low);
-            // If the sum overwrites rhs, lhs remains unchanged, otherwise rhs remains unchanged.
-            __ Sltu(TMP, dst_low, (dst_low == rhs_low) ? lhs_low : rhs_low);
-          }
-          __ Addu(dst_high, lhs_high, rhs_high);
-          __ Addu(dst_high, dst_high, TMP);
-        } else {
-          DCHECK(instruction->IsSub());
-          __ Sltu(TMP, lhs_low, rhs_low);
-          __ Subu(dst_low, lhs_low, rhs_low);
-          __ Subu(dst_high, lhs_high, rhs_high);
-          __ Subu(dst_high, dst_high, TMP);
-        }
-      } else {
-        int64_t value = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()->AsConstant());
-        if (instruction->IsOr()) {
-          uint32_t low = Low32Bits(value);
-          uint32_t high = High32Bits(value);
-          if (IsUint<16>(low)) {
-            if (dst_low != lhs_low || low != 0) {
-              __ Ori(dst_low, lhs_low, low);
-            }
-          } else {
-            __ LoadConst32(TMP, low);
-            __ Or(dst_low, lhs_low, TMP);
-          }
-          if (IsUint<16>(high)) {
-            if (dst_high != lhs_high || high != 0) {
-              __ Ori(dst_high, lhs_high, high);
-            }
-          } else {
-            if (high != low) {
-              __ LoadConst32(TMP, high);
-            }
-            __ Or(dst_high, lhs_high, TMP);
-          }
-        } else if (instruction->IsXor()) {
-          uint32_t low = Low32Bits(value);
-          uint32_t high = High32Bits(value);
-          if (IsUint<16>(low)) {
-            if (dst_low != lhs_low || low != 0) {
-              __ Xori(dst_low, lhs_low, low);
-            }
-          } else {
-            __ LoadConst32(TMP, low);
-            __ Xor(dst_low, lhs_low, TMP);
-          }
-          if (IsUint<16>(high)) {
-            if (dst_high != lhs_high || high != 0) {
-              __ Xori(dst_high, lhs_high, high);
-            }
-          } else {
-            if (high != low) {
-              __ LoadConst32(TMP, high);
-            }
-            __ Xor(dst_high, lhs_high, TMP);
-          }
-        } else if (instruction->IsAnd()) {
-          uint32_t low = Low32Bits(value);
-          uint32_t high = High32Bits(value);
-          if (IsUint<16>(low)) {
-            __ Andi(dst_low, lhs_low, low);
-          } else if (low != 0xFFFFFFFF) {
-            __ LoadConst32(TMP, low);
-            __ And(dst_low, lhs_low, TMP);
-          } else if (dst_low != lhs_low) {
-            __ Move(dst_low, lhs_low);
-          }
-          if (IsUint<16>(high)) {
-            __ Andi(dst_high, lhs_high, high);
-          } else if (high != 0xFFFFFFFF) {
-            if (high != low) {
-              __ LoadConst32(TMP, high);
-            }
-            __ And(dst_high, lhs_high, TMP);
-          } else if (dst_high != lhs_high) {
-            __ Move(dst_high, lhs_high);
-          }
-        } else {
-          if (instruction->IsSub()) {
-            value = -value;
-          } else {
-            DCHECK(instruction->IsAdd());
-          }
-          int32_t low = Low32Bits(value);
-          int32_t high = High32Bits(value);
-          if (IsInt<16>(low)) {
-            if (dst_low != lhs_low || low != 0) {
-              __ Addiu(dst_low, lhs_low, low);
-            }
-            if (low != 0) {
-              __ Sltiu(AT, dst_low, low);
-            }
-          } else {
-            __ LoadConst32(TMP, low);
-            __ Addu(dst_low, lhs_low, TMP);
-            __ Sltu(AT, dst_low, TMP);
-          }
-          if (IsInt<16>(high)) {
-            if (dst_high != lhs_high || high != 0) {
-              __ Addiu(dst_high, lhs_high, high);
-            }
-          } else {
-            if (high != low) {
-              __ LoadConst32(TMP, high);
-            }
-            __ Addu(dst_high, lhs_high, TMP);
-          }
-          if (low != 0) {
-            __ Addu(dst_high, dst_high, AT);
-          }
-        }
-      }
-      break;
-    }
-
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64: {
-      FRegister dst = locations->Out().AsFpuRegister<FRegister>();
-      FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
-      FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
-      if (instruction->IsAdd()) {
-        if (type == DataType::Type::kFloat32) {
-          __ AddS(dst, lhs, rhs);
-        } else {
-          __ AddD(dst, lhs, rhs);
-        }
-      } else {
-        DCHECK(instruction->IsSub());
-        if (type == DataType::Type::kFloat32) {
-          __ SubS(dst, lhs, rhs);
-        } else {
-          __ SubD(dst, lhs, rhs);
-        }
-      }
-      break;
-    }
-
-    default:
-      LOG(FATAL) << "Unexpected binary operation type " << type;
-  }
-}
-
-void LocationsBuilderMIPS::HandleShift(HBinaryOperation* instr) {
-  DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
-
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr);
-  DataType::Type type = instr->GetResultType();
-  switch (type) {
-    case DataType::Type::kInt32:
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
-      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-      break;
-    case DataType::Type::kInt64:
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
-      locations->SetOut(Location::RequiresRegister());
-      break;
-    default:
-      LOG(FATAL) << "Unexpected shift type " << type;
-  }
-}
-
-static constexpr size_t kMipsBitsPerWord = kMipsWordSize * kBitsPerByte;
-
-void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) {
-  DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
-  LocationSummary* locations = instr->GetLocations();
-  DataType::Type type = instr->GetType();
-
-  Location rhs_location = locations->InAt(1);
-  bool use_imm = rhs_location.IsConstant();
-  Register rhs_reg = use_imm ? ZERO : rhs_location.AsRegister<Register>();
-  int64_t rhs_imm = use_imm ? CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()) : 0;
-  const uint32_t shift_mask =
-      (type == DataType::Type::kInt32) ? kMaxIntShiftDistance : kMaxLongShiftDistance;
-  const uint32_t shift_value = rhs_imm & shift_mask;
-  // Are the INS (Insert Bit Field) and ROTR instructions supported?
-  bool has_ins_rotr = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
-
-  switch (type) {
-    case DataType::Type::kInt32: {
-      Register dst = locations->Out().AsRegister<Register>();
-      Register lhs = locations->InAt(0).AsRegister<Register>();
-      if (use_imm) {
-        if (shift_value == 0) {
-          if (dst != lhs) {
-            __ Move(dst, lhs);
-          }
-        } else if (instr->IsShl()) {
-          __ Sll(dst, lhs, shift_value);
-        } else if (instr->IsShr()) {
-          __ Sra(dst, lhs, shift_value);
-        } else if (instr->IsUShr()) {
-          __ Srl(dst, lhs, shift_value);
-        } else {
-          if (has_ins_rotr) {
-            __ Rotr(dst, lhs, shift_value);
-          } else {
-            __ Sll(TMP, lhs, (kMipsBitsPerWord - shift_value) & shift_mask);
-            __ Srl(dst, lhs, shift_value);
-            __ Or(dst, dst, TMP);
-          }
-        }
-      } else {
-        if (instr->IsShl()) {
-          __ Sllv(dst, lhs, rhs_reg);
-        } else if (instr->IsShr()) {
-          __ Srav(dst, lhs, rhs_reg);
-        } else if (instr->IsUShr()) {
-          __ Srlv(dst, lhs, rhs_reg);
-        } else {
-          if (has_ins_rotr) {
-            __ Rotrv(dst, lhs, rhs_reg);
-          } else {
-            __ Subu(TMP, ZERO, rhs_reg);
-            // 32-bit shift instructions use the 5 least significant bits of the shift count, so
-            // shifting by `-rhs_reg` is equivalent to shifting by `(32 - rhs_reg) & 31`. The case
-            // when `rhs_reg & 31 == 0` is OK even though we don't shift `lhs` left all the way out
-            // by 32, because the result in this case is computed as `(lhs >> 0) | (lhs << 0)`,
-            // IOW, the OR'd values are equal.
-            __ Sllv(TMP, lhs, TMP);
-            __ Srlv(dst, lhs, rhs_reg);
-            __ Or(dst, dst, TMP);
-          }
-        }
-      }
-      break;
-    }
-
-    case DataType::Type::kInt64: {
-      Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
-      Register dst_low = locations->Out().AsRegisterPairLow<Register>();
-      Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
-      Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
-      if (use_imm) {
-          if (shift_value == 0) {
-            codegen_->MoveLocation(locations->Out(), locations->InAt(0), type);
-          } else if (shift_value < kMipsBitsPerWord) {
-            if (has_ins_rotr) {
-              if (instr->IsShl()) {
-                __ Srl(dst_high, lhs_low, kMipsBitsPerWord - shift_value);
-                __ Ins(dst_high, lhs_high, shift_value, kMipsBitsPerWord - shift_value);
-                __ Sll(dst_low, lhs_low, shift_value);
-              } else if (instr->IsShr()) {
-                __ Srl(dst_low, lhs_low, shift_value);
-                __ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
-                __ Sra(dst_high, lhs_high, shift_value);
-              } else if (instr->IsUShr()) {
-                __ Srl(dst_low, lhs_low, shift_value);
-                __ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
-                __ Srl(dst_high, lhs_high, shift_value);
-              } else {
-                __ Srl(dst_low, lhs_low, shift_value);
-                __ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value);
-                __ Srl(dst_high, lhs_high, shift_value);
-                __ Ins(dst_high, lhs_low, kMipsBitsPerWord - shift_value, shift_value);
-              }
-            } else {
-              if (instr->IsShl()) {
-                __ Sll(dst_low, lhs_low, shift_value);
-                __ Srl(TMP, lhs_low, kMipsBitsPerWord - shift_value);
-                __ Sll(dst_high, lhs_high, shift_value);
-                __ Or(dst_high, dst_high, TMP);
-              } else if (instr->IsShr()) {
-                __ Sra(dst_high, lhs_high, shift_value);
-                __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
-                __ Srl(dst_low, lhs_low, shift_value);
-                __ Or(dst_low, dst_low, TMP);
-              } else if (instr->IsUShr()) {
-                __ Srl(dst_high, lhs_high, shift_value);
-                __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value);
-                __ Srl(dst_low, lhs_low, shift_value);
-                __ Or(dst_low, dst_low, TMP);
-              } else {
-                __ Srl(TMP, lhs_low, shift_value);
-                __ Sll(dst_low, lhs_high, kMipsBitsPerWord - shift_value);
-                __ Or(dst_low, dst_low, TMP);
-                __ Srl(TMP, lhs_high, shift_value);
-                __ Sll(dst_high, lhs_low, kMipsBitsPerWord - shift_value);
-                __ Or(dst_high, dst_high, TMP);
-              }
-            }
-          } else {
-            const uint32_t shift_value_high = shift_value - kMipsBitsPerWord;
-            if (instr->IsShl()) {
-              __ Sll(dst_high, lhs_low, shift_value_high);
-              __ Move(dst_low, ZERO);
-            } else if (instr->IsShr()) {
-              __ Sra(dst_low, lhs_high, shift_value_high);
-              __ Sra(dst_high, dst_low, kMipsBitsPerWord - 1);
-            } else if (instr->IsUShr()) {
-              __ Srl(dst_low, lhs_high, shift_value_high);
-              __ Move(dst_high, ZERO);
-            } else {
-              if (shift_value == kMipsBitsPerWord) {
-                // 64-bit rotation by 32 is just a swap.
-                __ Move(dst_low, lhs_high);
-                __ Move(dst_high, lhs_low);
-              } else {
-                if (has_ins_rotr) {
-                  __ Srl(dst_low, lhs_high, shift_value_high);
-                  __ Ins(dst_low, lhs_low, kMipsBitsPerWord - shift_value_high, shift_value_high);
-                  __ Srl(dst_high, lhs_low, shift_value_high);
-                  __ Ins(dst_high, lhs_high, kMipsBitsPerWord - shift_value_high, shift_value_high);
-                } else {
-                  __ Sll(TMP, lhs_low, kMipsBitsPerWord - shift_value_high);
-                  __ Srl(dst_low, lhs_high, shift_value_high);
-                  __ Or(dst_low, dst_low, TMP);
-                  __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value_high);
-                  __ Srl(dst_high, lhs_low, shift_value_high);
-                  __ Or(dst_high, dst_high, TMP);
-                }
-              }
-            }
-          }
-      } else {
-        const bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
-        MipsLabel done;
-        if (instr->IsShl()) {
-          __ Sllv(dst_low, lhs_low, rhs_reg);
-          __ Nor(AT, ZERO, rhs_reg);
-          __ Srl(TMP, lhs_low, 1);
-          __ Srlv(TMP, TMP, AT);
-          __ Sllv(dst_high, lhs_high, rhs_reg);
-          __ Or(dst_high, dst_high, TMP);
-          __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
-          if (isR6) {
-            __ Beqzc(TMP, &done, /* is_bare= */ true);
-            __ Move(dst_high, dst_low);
-            __ Move(dst_low, ZERO);
-          } else {
-            __ Movn(dst_high, dst_low, TMP);
-            __ Movn(dst_low, ZERO, TMP);
-          }
-        } else if (instr->IsShr()) {
-          __ Srav(dst_high, lhs_high, rhs_reg);
-          __ Nor(AT, ZERO, rhs_reg);
-          __ Sll(TMP, lhs_high, 1);
-          __ Sllv(TMP, TMP, AT);
-          __ Srlv(dst_low, lhs_low, rhs_reg);
-          __ Or(dst_low, dst_low, TMP);
-          __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
-          if (isR6) {
-            __ Beqzc(TMP, &done, /* is_bare= */ true);
-            __ Move(dst_low, dst_high);
-            __ Sra(dst_high, dst_high, 31);
-          } else {
-            __ Sra(AT, dst_high, 31);
-            __ Movn(dst_low, dst_high, TMP);
-            __ Movn(dst_high, AT, TMP);
-          }
-        } else if (instr->IsUShr()) {
-          __ Srlv(dst_high, lhs_high, rhs_reg);
-          __ Nor(AT, ZERO, rhs_reg);
-          __ Sll(TMP, lhs_high, 1);
-          __ Sllv(TMP, TMP, AT);
-          __ Srlv(dst_low, lhs_low, rhs_reg);
-          __ Or(dst_low, dst_low, TMP);
-          __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
-          if (isR6) {
-            __ Beqzc(TMP, &done, /* is_bare= */ true);
-            __ Move(dst_low, dst_high);
-            __ Move(dst_high, ZERO);
-          } else {
-            __ Movn(dst_low, dst_high, TMP);
-            __ Movn(dst_high, ZERO, TMP);
-          }
-        } else {  // Rotate.
-          __ Nor(AT, ZERO, rhs_reg);
-          __ Srlv(TMP, lhs_low, rhs_reg);
-          __ Sll(dst_low, lhs_high, 1);
-          __ Sllv(dst_low, dst_low, AT);
-          __ Or(dst_low, dst_low, TMP);
-          __ Srlv(TMP, lhs_high, rhs_reg);
-          __ Sll(dst_high, lhs_low, 1);
-          __ Sllv(dst_high, dst_high, AT);
-          __ Or(dst_high, dst_high, TMP);
-          __ Andi(TMP, rhs_reg, kMipsBitsPerWord);
-          if (isR6) {
-            __ Beqzc(TMP, &done, /* is_bare= */ true);
-            __ Move(TMP, dst_high);
-            __ Move(dst_high, dst_low);
-            __ Move(dst_low, TMP);
-          } else {
-            __ Movn(AT, dst_high, TMP);
-            __ Movn(dst_high, dst_low, TMP);
-            __ Movn(dst_low, AT, TMP);
-          }
-        }
-        __ Bind(&done);
-      }
-      break;
-    }
-
-    default:
-      LOG(FATAL) << "Unexpected shift operation type " << type;
-  }
-}
-
-void LocationsBuilderMIPS::VisitAdd(HAdd* instruction) {
-  HandleBinaryOp(instruction);
-}
-
-void InstructionCodeGeneratorMIPS::VisitAdd(HAdd* instruction) {
-  HandleBinaryOp(instruction);
-}
-
-void LocationsBuilderMIPS::VisitAnd(HAnd* instruction) {
-  HandleBinaryOp(instruction);
-}
-
-void InstructionCodeGeneratorMIPS::VisitAnd(HAnd* instruction) {
-  HandleBinaryOp(instruction);
-}
-
-void LocationsBuilderMIPS::VisitArrayGet(HArrayGet* instruction) {
-  DataType::Type type = instruction->GetType();
-  bool object_array_get_with_read_barrier =
-      kEmitCompilerReadBarrier && (type == DataType::Type::kReference);
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(instruction,
-                                                       object_array_get_with_read_barrier
-                                                           ? LocationSummary::kCallOnSlowPath
-                                                           : LocationSummary::kNoCall);
-  if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
-    locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
-  }
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
-  if (DataType::IsFloatingPointType(type)) {
-    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
-  } else {
-    // The output overlaps in the case of an object array get with
-    // read barriers enabled: we do not want the move to overwrite the
-    // array's location, as we need it to emit the read barrier.
-    locations->SetOut(Location::RequiresRegister(),
-                      object_array_get_with_read_barrier
-                          ? Location::kOutputOverlap
-                          : Location::kNoOutputOverlap);
-  }
-  // We need a temporary register for the read barrier marking slow
-  // path in CodeGeneratorMIPS::GenerateArrayLoadWithBakerReadBarrier.
-  if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
-    bool temp_needed = instruction->GetIndex()->IsConstant()
-        ? !kBakerReadBarrierThunksEnableForFields
-        : !kBakerReadBarrierThunksEnableForArrays;
-    if (temp_needed) {
-      locations->AddTemp(Location::RequiresRegister());
-    }
-  }
-}
-
-static auto GetImplicitNullChecker(HInstruction* instruction, CodeGeneratorMIPS* codegen) {
-  auto null_checker = [codegen, instruction]() {
-    codegen->MaybeRecordImplicitNullCheck(instruction);
-  };
-  return null_checker;
-}
-
-void InstructionCodeGeneratorMIPS::VisitArrayGet(HArrayGet* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  Location obj_loc = locations->InAt(0);
-  Register obj = obj_loc.AsRegister<Register>();
-  Location out_loc = locations->Out();
-  Location index = locations->InAt(1);
-  uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction);
-  auto null_checker = GetImplicitNullChecker(instruction, codegen_);
-
-  DataType::Type type = instruction->GetType();
-  const bool maybe_compressed_char_at = mirror::kUseStringCompression &&
-                                        instruction->IsStringCharAt();
-  switch (type) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8: {
-      Register out = out_loc.AsRegister<Register>();
-      if (index.IsConstant()) {
-        size_t offset =
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
-        __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset, null_checker);
-      } else {
-        __ Addu(TMP, obj, index.AsRegister<Register>());
-        __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset, null_checker);
-      }
-      break;
-    }
-
-    case DataType::Type::kInt8: {
-      Register out = out_loc.AsRegister<Register>();
-      if (index.IsConstant()) {
-        size_t offset =
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
-        __ LoadFromOffset(kLoadSignedByte, out, obj, offset, null_checker);
-      } else {
-        __ Addu(TMP, obj, index.AsRegister<Register>());
-        __ LoadFromOffset(kLoadSignedByte, out, TMP, data_offset, null_checker);
-      }
-      break;
-    }
-
-    case DataType::Type::kUint16: {
-      Register out = out_loc.AsRegister<Register>();
-      if (maybe_compressed_char_at) {
-        uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
-        __ LoadFromOffset(kLoadWord, TMP, obj, count_offset, null_checker);
-        __ Sll(TMP, TMP, 31);    // Extract compression flag into the most significant bit of TMP.
-        static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
-                      "Expecting 0=compressed, 1=uncompressed");
-      }
-      if (index.IsConstant()) {
-        int32_t const_index = index.GetConstant()->AsIntConstant()->GetValue();
-        if (maybe_compressed_char_at) {
-          MipsLabel uncompressed_load, done;
-          __ Bnez(TMP, &uncompressed_load);
-          __ LoadFromOffset(kLoadUnsignedByte,
-                            out,
-                            obj,
-                            data_offset + (const_index << TIMES_1));
-          __ B(&done);
-          __ Bind(&uncompressed_load);
-          __ LoadFromOffset(kLoadUnsignedHalfword,
-                            out,
-                            obj,
-                            data_offset + (const_index << TIMES_2));
-          __ Bind(&done);
-        } else {
-          __ LoadFromOffset(kLoadUnsignedHalfword,
-                            out,
-                            obj,
-                            data_offset + (const_index << TIMES_2),
-                            null_checker);
-        }
-      } else {
-        Register index_reg = index.AsRegister<Register>();
-        if (maybe_compressed_char_at) {
-          MipsLabel uncompressed_load, done;
-          __ Bnez(TMP, &uncompressed_load);
-          __ Addu(TMP, obj, index_reg);
-          __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
-          __ B(&done);
-          __ Bind(&uncompressed_load);
-          __ ShiftAndAdd(TMP, index_reg, obj, TIMES_2, TMP);
-          __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
-          __ Bind(&done);
-        } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
-          __ Addu(TMP, index_reg, obj);
-          __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset, null_checker);
-        } else {
-          __ ShiftAndAdd(TMP, index_reg, obj, TIMES_2, TMP);
-          __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset, null_checker);
-        }
-      }
-      break;
-    }
-
-    case DataType::Type::kInt16: {
-      Register out = out_loc.AsRegister<Register>();
-      if (index.IsConstant()) {
-        size_t offset =
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
-        __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset, null_checker);
-      } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
-        __ Addu(TMP, index.AsRegister<Register>(), obj);
-        __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset, null_checker);
-      } else {
-        __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_2, TMP);
-        __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset, null_checker);
-      }
-      break;
-    }
-
-    case DataType::Type::kInt32: {
-      DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
-      Register out = out_loc.AsRegister<Register>();
-      if (index.IsConstant()) {
-        size_t offset =
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
-        __ LoadFromOffset(kLoadWord, out, obj, offset, null_checker);
-      } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
-        __ Addu(TMP, index.AsRegister<Register>(), obj);
-        __ LoadFromOffset(kLoadWord, out, TMP, data_offset, null_checker);
-      } else {
-        __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_4, TMP);
-        __ LoadFromOffset(kLoadWord, out, TMP, data_offset, null_checker);
-      }
-      break;
-    }
-
-    case DataType::Type::kReference: {
-      static_assert(
-          sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
-          "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
-      // /* HeapReference<Object> */ out =
-      //     *(obj + data_offset + index * sizeof(HeapReference<Object>))
-      if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
-        bool temp_needed = index.IsConstant()
-            ? !kBakerReadBarrierThunksEnableForFields
-            : !kBakerReadBarrierThunksEnableForArrays;
-        Location temp = temp_needed ? locations->GetTemp(0) : Location::NoLocation();
-        // Note that a potential implicit null check is handled in this
-        // CodeGeneratorMIPS::GenerateArrayLoadWithBakerReadBarrier call.
-        DCHECK(!instruction->CanDoImplicitNullCheckOn(instruction->InputAt(0)));
-        if (index.IsConstant()) {
-          // Array load with a constant index can be treated as a field load.
-          size_t offset =
-              (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
-          codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
-                                                          out_loc,
-                                                          obj,
-                                                          offset,
-                                                          temp,
-                                                          /* needs_null_check= */ false);
-        } else {
-          codegen_->GenerateArrayLoadWithBakerReadBarrier(instruction,
-                                                          out_loc,
-                                                          obj,
-                                                          data_offset,
-                                                          index,
-                                                          temp,
-                                                          /* needs_null_check= */ false);
-        }
-      } else {
-        Register out = out_loc.AsRegister<Register>();
-        if (index.IsConstant()) {
-          size_t offset =
-              (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
-          __ LoadFromOffset(kLoadWord, out, obj, offset, null_checker);
-          // If read barriers are enabled, emit read barriers other than
-          // Baker's using a slow path (and also unpoison the loaded
-          // reference, if heap poisoning is enabled).
-          codegen_->MaybeGenerateReadBarrierSlow(instruction, out_loc, out_loc, obj_loc, offset);
-        } else {
-          __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_4, TMP);
-          __ LoadFromOffset(kLoadWord, out, TMP, data_offset, null_checker);
-          // If read barriers are enabled, emit read barriers other than
-          // Baker's using a slow path (and also unpoison the loaded
-          // reference, if heap poisoning is enabled).
-          codegen_->MaybeGenerateReadBarrierSlow(instruction,
-                                                 out_loc,
-                                                 out_loc,
-                                                 obj_loc,
-                                                 data_offset,
-                                                 index);
-        }
-      }
-      break;
-    }
-
-    case DataType::Type::kInt64: {
-      Register out = out_loc.AsRegisterPairLow<Register>();
-      if (index.IsConstant()) {
-        size_t offset =
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
-        __ LoadFromOffset(kLoadDoubleword, out, obj, offset, null_checker);
-      } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
-        __ Addu(TMP, index.AsRegister<Register>(), obj);
-        __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset, null_checker);
-      } else {
-        __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_8, TMP);
-        __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset, null_checker);
-      }
-      break;
-    }
-
-    case DataType::Type::kFloat32: {
-      FRegister out = out_loc.AsFpuRegister<FRegister>();
-      if (index.IsConstant()) {
-        size_t offset =
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
-        __ LoadSFromOffset(out, obj, offset, null_checker);
-      } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
-        __ Addu(TMP, index.AsRegister<Register>(), obj);
-        __ LoadSFromOffset(out, TMP, data_offset, null_checker);
-      } else {
-        __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_4, TMP);
-        __ LoadSFromOffset(out, TMP, data_offset, null_checker);
-      }
-      break;
-    }
-
-    case DataType::Type::kFloat64: {
-      FRegister out = out_loc.AsFpuRegister<FRegister>();
-      if (index.IsConstant()) {
-        size_t offset =
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
-        __ LoadDFromOffset(out, obj, offset, null_checker);
-      } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
-        __ Addu(TMP, index.AsRegister<Register>(), obj);
-        __ LoadDFromOffset(out, TMP, data_offset, null_checker);
-      } else {
-        __ ShiftAndAdd(TMP, index.AsRegister<Register>(), obj, TIMES_8, TMP);
-        __ LoadDFromOffset(out, TMP, data_offset, null_checker);
-      }
-      break;
-    }
-
-    case DataType::Type::kUint32:
-    case DataType::Type::kUint64:
-    case DataType::Type::kVoid:
-      LOG(FATAL) << "Unreachable type " << instruction->GetType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS::VisitArrayLength(HArrayLength* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
-void InstructionCodeGeneratorMIPS::VisitArrayLength(HArrayLength* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  uint32_t offset = CodeGenerator::GetArrayLengthOffset(instruction);
-  Register obj = locations->InAt(0).AsRegister<Register>();
-  Register out = locations->Out().AsRegister<Register>();
-  __ LoadFromOffset(kLoadWord, out, obj, offset);
-  codegen_->MaybeRecordImplicitNullCheck(instruction);
-  // Mask out compression flag from String's array length.
-  if (mirror::kUseStringCompression && instruction->IsStringLength()) {
-    __ Srl(out, out, 1u);
-  }
-}
-
-Location LocationsBuilderMIPS::RegisterOrZeroConstant(HInstruction* instruction) {
-  return (instruction->IsConstant() && instruction->AsConstant()->IsZeroBitPattern())
-      ? Location::ConstantLocation(instruction->AsConstant())
-      : Location::RequiresRegister();
-}
-
-Location LocationsBuilderMIPS::FpuRegisterOrConstantForStore(HInstruction* instruction) {
-  // We can store 0.0 directly (from the ZERO register) without loading it into an FPU register.
-  // We can store a non-zero float or double constant without first loading it into the FPU,
-  // but we should only prefer this if the constant has a single use.
-  if (instruction->IsConstant() &&
-      (instruction->AsConstant()->IsZeroBitPattern() ||
-       instruction->GetUses().HasExactlyOneElement())) {
-    return Location::ConstantLocation(instruction->AsConstant());
-    // Otherwise fall through and require an FPU register for the constant.
-  }
-  return Location::RequiresFpuRegister();
-}
-
-void LocationsBuilderMIPS::VisitArraySet(HArraySet* instruction) {
-  DataType::Type value_type = instruction->GetComponentType();
-
-  bool needs_write_barrier =
-      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
-  bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
-
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
-      instruction,
-      may_need_runtime_call_for_type_check ?
-          LocationSummary::kCallOnSlowPath :
-          LocationSummary::kNoCall);
-
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
-  if (DataType::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
-    locations->SetInAt(2, FpuRegisterOrConstantForStore(instruction->InputAt(2)));
-  } else {
-    locations->SetInAt(2, RegisterOrZeroConstant(instruction->InputAt(2)));
-  }
-  if (needs_write_barrier) {
-    // Temporary register for the write barrier.
-    locations->AddTemp(Location::RequiresRegister());  // Possibly used for ref. poisoning too.
-  }
-}
-
-void InstructionCodeGeneratorMIPS::VisitArraySet(HArraySet* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  Register obj = locations->InAt(0).AsRegister<Register>();
-  Location index = locations->InAt(1);
-  Location value_location = locations->InAt(2);
-  DataType::Type value_type = instruction->GetComponentType();
-  bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
-  bool needs_write_barrier =
-      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
-  auto null_checker = GetImplicitNullChecker(instruction, codegen_);
-  Register base_reg = index.IsConstant() ? obj : TMP;
-
-  switch (value_type) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8: {
-      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
-      if (index.IsConstant()) {
-        data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1;
-      } else {
-        __ Addu(base_reg, obj, index.AsRegister<Register>());
-      }
-      if (value_location.IsConstant()) {
-        int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
-        __ StoreConstToOffset(kStoreByte, value, base_reg, data_offset, TMP, null_checker);
-      } else {
-        Register value = value_location.AsRegister<Register>();
-        __ StoreToOffset(kStoreByte, value, base_reg, data_offset, null_checker);
-      }
-      break;
-    }
-
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16: {
-      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
-      if (index.IsConstant()) {
-        data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2;
-      } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
-        __ Addu(base_reg, index.AsRegister<Register>(), obj);
-      } else {
-        __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_2, base_reg);
-      }
-      if (value_location.IsConstant()) {
-        int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
-        __ StoreConstToOffset(kStoreHalfword, value, base_reg, data_offset, TMP, null_checker);
-      } else {
-        Register value = value_location.AsRegister<Register>();
-        __ StoreToOffset(kStoreHalfword, value, base_reg, data_offset, null_checker);
-      }
-      break;
-    }
-
-    case DataType::Type::kInt32: {
-      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
-      if (index.IsConstant()) {
-        data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
-      } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
-        __ Addu(base_reg, index.AsRegister<Register>(), obj);
-      } else {
-        __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
-      }
-      if (value_location.IsConstant()) {
-        int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
-        __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
-      } else {
-        Register value = value_location.AsRegister<Register>();
-        __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
-      }
-      break;
-    }
-
-    case DataType::Type::kReference: {
-      if (value_location.IsConstant()) {
-        // Just setting null.
-        uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
-        if (index.IsConstant()) {
-          data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
-        } else {
-          __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
-        }
-        int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
-        DCHECK_EQ(value, 0);
-        __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
-        DCHECK(!needs_write_barrier);
-        DCHECK(!may_need_runtime_call_for_type_check);
-        break;
-      }
-
-      DCHECK(needs_write_barrier);
-      Register value = value_location.AsRegister<Register>();
-      Register temp1 = locations->GetTemp(0).AsRegister<Register>();
-      Register temp2 = TMP;  // Doesn't need to survive slow path.
-      uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
-      uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
-      uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
-      MipsLabel done;
-      SlowPathCodeMIPS* slow_path = nullptr;
-
-      if (may_need_runtime_call_for_type_check) {
-        slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathMIPS(instruction);
-        codegen_->AddSlowPath(slow_path);
-        if (instruction->GetValueCanBeNull()) {
-          MipsLabel non_zero;
-          __ Bnez(value, &non_zero);
-          uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
-          if (index.IsConstant()) {
-            data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
-          } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
-            __ Addu(base_reg, index.AsRegister<Register>(), obj);
-          } else {
-            __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
-          }
-          __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
-          __ B(&done);
-          __ Bind(&non_zero);
-        }
-
-        // Note that when read barriers are enabled, the type checks
-        // are performed without read barriers.  This is fine, even in
-        // the case where a class object is in the from-space after
-        // the flip, as a comparison involving such a type would not
-        // produce a false positive; it may of course produce a false
-        // negative, in which case we would take the ArraySet slow
-        // path.
-
-        // /* HeapReference<Class> */ temp1 = obj->klass_
-        __ LoadFromOffset(kLoadWord, temp1, obj, class_offset, null_checker);
-        __ MaybeUnpoisonHeapReference(temp1);
-
-        // /* HeapReference<Class> */ temp1 = temp1->component_type_
-        __ LoadFromOffset(kLoadWord, temp1, temp1, component_offset);
-        // /* HeapReference<Class> */ temp2 = value->klass_
-        __ LoadFromOffset(kLoadWord, temp2, value, class_offset);
-        // If heap poisoning is enabled, no need to unpoison `temp1`
-        // nor `temp2`, as we are comparing two poisoned references.
-
-        if (instruction->StaticTypeOfArrayIsObjectArray()) {
-          MipsLabel do_put;
-          __ Beq(temp1, temp2, &do_put);
-          // If heap poisoning is enabled, the `temp1` reference has
-          // not been unpoisoned yet; unpoison it now.
-          __ MaybeUnpoisonHeapReference(temp1);
-
-          // /* HeapReference<Class> */ temp1 = temp1->super_class_
-          __ LoadFromOffset(kLoadWord, temp1, temp1, super_offset);
-          // If heap poisoning is enabled, no need to unpoison
-          // `temp1`, as we are comparing against null below.
-          __ Bnez(temp1, slow_path->GetEntryLabel());
-          __ Bind(&do_put);
-        } else {
-          __ Bne(temp1, temp2, slow_path->GetEntryLabel());
-        }
-      }
-
-      Register source = value;
-      if (kPoisonHeapReferences) {
-        // Note that in the case where `value` is a null reference,
-        // we do not enter this block, as a null reference does not
-        // need poisoning.
-        __ Move(temp1, value);
-        __ PoisonHeapReference(temp1);
-        source = temp1;
-      }
-
-      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
-      if (index.IsConstant()) {
-        data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
-      } else {
-        __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
-      }
-      __ StoreToOffset(kStoreWord, source, base_reg, data_offset);
-
-      if (!may_need_runtime_call_for_type_check) {
-        codegen_->MaybeRecordImplicitNullCheck(instruction);
-      }
-
-      codegen_->MarkGCCard(obj, value, instruction->GetValueCanBeNull());
-
-      if (done.IsLinked()) {
-        __ Bind(&done);
-      }
-
-      if (slow_path != nullptr) {
-        __ Bind(slow_path->GetExitLabel());
-      }
-      break;
-    }
-
-    case DataType::Type::kInt64: {
-      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
-      if (index.IsConstant()) {
-        data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8;
-      } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
-        __ Addu(base_reg, index.AsRegister<Register>(), obj);
-      } else {
-        __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_8, base_reg);
-      }
-      if (value_location.IsConstant()) {
-        int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
-        __ StoreConstToOffset(kStoreDoubleword, value, base_reg, data_offset, TMP, null_checker);
-      } else {
-        Register value = value_location.AsRegisterPairLow<Register>();
-        __ StoreToOffset(kStoreDoubleword, value, base_reg, data_offset, null_checker);
-      }
-      break;
-    }
-
-    case DataType::Type::kFloat32: {
-      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
-      if (index.IsConstant()) {
-        data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
-      } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
-        __ Addu(base_reg, index.AsRegister<Register>(), obj);
-      } else {
-        __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_4, base_reg);
-      }
-      if (value_location.IsConstant()) {
-        int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
-        __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
-      } else {
-        FRegister value = value_location.AsFpuRegister<FRegister>();
-        __ StoreSToOffset(value, base_reg, data_offset, null_checker);
-      }
-      break;
-    }
-
-    case DataType::Type::kFloat64: {
-      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
-      if (index.IsConstant()) {
-        data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8;
-      } else if (instruction->InputAt(1)->IsIntermediateArrayAddressIndex()) {
-        __ Addu(base_reg, index.AsRegister<Register>(), obj);
-      } else {
-        __ ShiftAndAdd(base_reg, index.AsRegister<Register>(), obj, TIMES_8, base_reg);
-      }
-      if (value_location.IsConstant()) {
-        int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
-        __ StoreConstToOffset(kStoreDoubleword, value, base_reg, data_offset, TMP, null_checker);
-      } else {
-        FRegister value = value_location.AsFpuRegister<FRegister>();
-        __ StoreDToOffset(value, base_reg, data_offset, null_checker);
-      }
-      break;
-    }
-
-    case DataType::Type::kUint32:
-    case DataType::Type::kUint64:
-    case DataType::Type::kVoid:
-      LOG(FATAL) << "Unreachable type " << instruction->GetType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS::VisitIntermediateArrayAddressIndex(
-    HIntermediateArrayAddressIndex* instruction) {
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
-
-  HIntConstant* shift = instruction->GetShift()->AsIntConstant();
-
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::ConstantLocation(shift));
-  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
-void InstructionCodeGeneratorMIPS::VisitIntermediateArrayAddressIndex(
-    HIntermediateArrayAddressIndex* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  Register index_reg = locations->InAt(0).AsRegister<Register>();
-  uint32_t shift = instruction->GetShift()->AsIntConstant()->GetValue();
-  __ Sll(locations->Out().AsRegister<Register>(), index_reg, shift);
-}
-
-void LocationsBuilderMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
-  RegisterSet caller_saves = RegisterSet::Empty();
-  InvokeRuntimeCallingConvention calling_convention;
-  caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
-  LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
-
-  HInstruction* index = instruction->InputAt(0);
-  HInstruction* length = instruction->InputAt(1);
-
-  bool const_index = false;
-  bool const_length = false;
-
-  if (index->IsConstant()) {
-    if (length->IsConstant()) {
-      const_index = true;
-      const_length = true;
-    } else {
-      int32_t index_value = index->AsIntConstant()->GetValue();
-      if (index_value < 0 || IsInt<16>(index_value + 1)) {
-        const_index = true;
-      }
-    }
-  } else if (length->IsConstant()) {
-    int32_t length_value = length->AsIntConstant()->GetValue();
-    if (IsUint<15>(length_value)) {
-      const_length = true;
-    }
-  }
-
-  locations->SetInAt(0, const_index
-      ? Location::ConstantLocation(index->AsConstant())
-      : Location::RequiresRegister());
-  locations->SetInAt(1, const_length
-      ? Location::ConstantLocation(length->AsConstant())
-      : Location::RequiresRegister());
-}
-
-void InstructionCodeGeneratorMIPS::VisitBoundsCheck(HBoundsCheck* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  Location index_loc = locations->InAt(0);
-  Location length_loc = locations->InAt(1);
-
-  if (length_loc.IsConstant()) {
-    int32_t length = length_loc.GetConstant()->AsIntConstant()->GetValue();
-    if (index_loc.IsConstant()) {
-      int32_t index = index_loc.GetConstant()->AsIntConstant()->GetValue();
-      if (index < 0 || index >= length) {
-        BoundsCheckSlowPathMIPS* slow_path =
-            new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS(instruction);
-        codegen_->AddSlowPath(slow_path);
-        __ B(slow_path->GetEntryLabel());
-      } else {
-        // Nothing to be done.
-      }
-      return;
-    }
-
-    BoundsCheckSlowPathMIPS* slow_path =
-        new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS(instruction);
-    codegen_->AddSlowPath(slow_path);
-    Register index = index_loc.AsRegister<Register>();
-    if (length == 0) {
-      __ B(slow_path->GetEntryLabel());
-    } else if (length == 1) {
-      __ Bnez(index, slow_path->GetEntryLabel());
-    } else {
-      DCHECK(IsUint<15>(length)) << length;
-      __ Sltiu(TMP, index, length);
-      __ Beqz(TMP, slow_path->GetEntryLabel());
-    }
-  } else {
-    Register length = length_loc.AsRegister<Register>();
-    BoundsCheckSlowPathMIPS* slow_path =
-        new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS(instruction);
-    codegen_->AddSlowPath(slow_path);
-    if (index_loc.IsConstant()) {
-      int32_t index = index_loc.GetConstant()->AsIntConstant()->GetValue();
-      if (index < 0) {
-        __ B(slow_path->GetEntryLabel());
-      } else if (index == 0) {
-        __ Blez(length, slow_path->GetEntryLabel());
-      } else {
-        DCHECK(IsInt<16>(index + 1)) << index;
-        __ Sltiu(TMP, length, index + 1);
-        __ Bnez(TMP, slow_path->GetEntryLabel());
-      }
-    } else {
-      Register index = index_loc.AsRegister<Register>();
-      __ Bgeu(index, length, slow_path->GetEntryLabel());
-    }
-  }
-}
-
-// Temp is used for read barrier.
-static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) {
-  if (kEmitCompilerReadBarrier &&
-      !(kUseBakerReadBarrier && kBakerReadBarrierThunksEnableForFields) &&
-      (kUseBakerReadBarrier ||
-       type_check_kind == TypeCheckKind::kAbstractClassCheck ||
-       type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
-       type_check_kind == TypeCheckKind::kArrayObjectCheck)) {
-    return 1;
-  }
-  return 0;
-}
-
-// Extra temp is used for read barrier.
-static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) {
-  return 1 + NumberOfInstanceOfTemps(type_check_kind);
-}
-
-void LocationsBuilderMIPS::VisitCheckCast(HCheckCast* instruction) {
-  TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
-  LocationSummary::CallKind call_kind = CodeGenerator::GetCheckCastCallKind(instruction);
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
-  locations->SetInAt(0, Location::RequiresRegister());
-  if (type_check_kind == TypeCheckKind::kBitstringCheck) {
-    locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant()));
-    locations->SetInAt(2, Location::ConstantLocation(instruction->InputAt(2)->AsConstant()));
-    locations->SetInAt(3, Location::ConstantLocation(instruction->InputAt(3)->AsConstant()));
-  } else {
-    locations->SetInAt(1, Location::RequiresRegister());
-  }
-  locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind));
-}
-
-void InstructionCodeGeneratorMIPS::VisitCheckCast(HCheckCast* instruction) {
-  TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
-  LocationSummary* locations = instruction->GetLocations();
-  Location obj_loc = locations->InAt(0);
-  Register obj = obj_loc.AsRegister<Register>();
-  Location cls = locations->InAt(1);
-  Location temp_loc = locations->GetTemp(0);
-  Register temp = temp_loc.AsRegister<Register>();
-  const size_t num_temps = NumberOfCheckCastTemps(type_check_kind);
-  DCHECK_LE(num_temps, 2u);
-  Location maybe_temp2_loc = (num_temps >= 2) ? locations->GetTemp(1) : Location::NoLocation();
-  const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
-  const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
-  const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
-  const uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
-  const uint32_t iftable_offset = mirror::Class::IfTableOffset().Uint32Value();
-  const uint32_t array_length_offset = mirror::Array::LengthOffset().Uint32Value();
-  const uint32_t object_array_data_offset =
-      mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
-  MipsLabel done;
-
-  bool is_type_check_slow_path_fatal = CodeGenerator::IsTypeCheckSlowPathFatal(instruction);
-  SlowPathCodeMIPS* slow_path =
-      new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
-          instruction, is_type_check_slow_path_fatal);
-  codegen_->AddSlowPath(slow_path);
-
-  // Avoid this check if we know `obj` is not null.
-  if (instruction->MustDoNullCheck()) {
-    __ Beqz(obj, &done);
-  }
-
-  switch (type_check_kind) {
-    case TypeCheckKind::kExactCheck:
-    case TypeCheckKind::kArrayCheck: {
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction,
-                                        temp_loc,
-                                        obj_loc,
-                                        class_offset,
-                                        maybe_temp2_loc,
-                                        kWithoutReadBarrier);
-      // Jump to slow path for throwing the exception or doing a
-      // more involved array check.
-      __ Bne(temp, cls.AsRegister<Register>(), slow_path->GetEntryLabel());
-      break;
-    }
-
-    case TypeCheckKind::kAbstractClassCheck: {
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction,
-                                        temp_loc,
-                                        obj_loc,
-                                        class_offset,
-                                        maybe_temp2_loc,
-                                        kWithoutReadBarrier);
-      // If the class is abstract, we eagerly fetch the super class of the
-      // object to avoid doing a comparison we know will fail.
-      MipsLabel loop;
-      __ Bind(&loop);
-      // /* HeapReference<Class> */ temp = temp->super_class_
-      GenerateReferenceLoadOneRegister(instruction,
-                                       temp_loc,
-                                       super_offset,
-                                       maybe_temp2_loc,
-                                       kWithoutReadBarrier);
-      // If the class reference currently in `temp` is null, jump to the slow path to throw the
-      // exception.
-      __ Beqz(temp, slow_path->GetEntryLabel());
-      // Otherwise, compare the classes.
-      __ Bne(temp, cls.AsRegister<Register>(), &loop);
-      break;
-    }
-
-    case TypeCheckKind::kClassHierarchyCheck: {
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction,
-                                        temp_loc,
-                                        obj_loc,
-                                        class_offset,
-                                        maybe_temp2_loc,
-                                        kWithoutReadBarrier);
-      // Walk over the class hierarchy to find a match.
-      MipsLabel loop;
-      __ Bind(&loop);
-      __ Beq(temp, cls.AsRegister<Register>(), &done);
-      // /* HeapReference<Class> */ temp = temp->super_class_
-      GenerateReferenceLoadOneRegister(instruction,
-                                       temp_loc,
-                                       super_offset,
-                                       maybe_temp2_loc,
-                                       kWithoutReadBarrier);
-      // If the class reference currently in `temp` is null, jump to the slow path to throw the
-      // exception. Otherwise, jump to the beginning of the loop.
-      __ Bnez(temp, &loop);
-      __ B(slow_path->GetEntryLabel());
-      break;
-    }
-
-    case TypeCheckKind::kArrayObjectCheck: {
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction,
-                                        temp_loc,
-                                        obj_loc,
-                                        class_offset,
-                                        maybe_temp2_loc,
-                                        kWithoutReadBarrier);
-      // Do an exact check.
-      __ Beq(temp, cls.AsRegister<Register>(), &done);
-      // Otherwise, we need to check that the object's class is a non-primitive array.
-      // /* HeapReference<Class> */ temp = temp->component_type_
-      GenerateReferenceLoadOneRegister(instruction,
-                                       temp_loc,
-                                       component_offset,
-                                       maybe_temp2_loc,
-                                       kWithoutReadBarrier);
-      // If the component type is null, jump to the slow path to throw the exception.
-      __ Beqz(temp, slow_path->GetEntryLabel());
-      // Otherwise, the object is indeed an array, further check that this component
-      // type is not a primitive type.
-      __ LoadFromOffset(kLoadUnsignedHalfword, temp, temp, primitive_offset);
-      static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
-      __ Bnez(temp, slow_path->GetEntryLabel());
-      break;
-    }
-
-    case TypeCheckKind::kUnresolvedCheck:
-      // We always go into the type check slow path for the unresolved check case.
-      // We cannot directly call the CheckCast runtime entry point
-      // without resorting to a type checking slow path here (i.e. by
-      // calling InvokeRuntime directly), as it would require to
-      // assign fixed registers for the inputs of this HInstanceOf
-      // instruction (following the runtime calling convention), which
-      // might be cluttered by the potential first read barrier
-      // emission at the beginning of this method.
-      __ B(slow_path->GetEntryLabel());
-      break;
-
-    case TypeCheckKind::kInterfaceCheck: {
-      // Avoid read barriers to improve performance of the fast path. We can not get false
-      // positives by doing this.
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction,
-                                        temp_loc,
-                                        obj_loc,
-                                        class_offset,
-                                        maybe_temp2_loc,
-                                        kWithoutReadBarrier);
-      // /* HeapReference<Class> */ temp = temp->iftable_
-      GenerateReferenceLoadTwoRegisters(instruction,
-                                        temp_loc,
-                                        temp_loc,
-                                        iftable_offset,
-                                        maybe_temp2_loc,
-                                        kWithoutReadBarrier);
-      // Iftable is never null.
-      __ Lw(TMP, temp, array_length_offset);
-      // Loop through the iftable and check if any class matches.
-      MipsLabel loop;
-      __ Bind(&loop);
-      __ Addiu(temp, temp, 2 * kHeapReferenceSize);  // Possibly in delay slot on R2.
-      __ Beqz(TMP, slow_path->GetEntryLabel());
-      __ Lw(AT, temp, object_array_data_offset - 2 * kHeapReferenceSize);
-      __ MaybeUnpoisonHeapReference(AT);
-      // Go to next interface.
-      __ Addiu(TMP, TMP, -2);
-      // Compare the classes and continue the loop if they do not match.
-      __ Bne(AT, cls.AsRegister<Register>(), &loop);
-      break;
-    }
-
-    case TypeCheckKind::kBitstringCheck: {
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction,
-                                        temp_loc,
-                                        obj_loc,
-                                        class_offset,
-                                        maybe_temp2_loc,
-                                        kWithoutReadBarrier);
-
-      GenerateBitstringTypeCheckCompare(instruction, temp);
-      __ Bnez(temp, slow_path->GetEntryLabel());
-      break;
-    }
-  }
-
-  __ Bind(&done);
-  __ Bind(slow_path->GetExitLabel());
-}
-
-void LocationsBuilderMIPS::VisitClinitCheck(HClinitCheck* check) {
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
-  locations->SetInAt(0, Location::RequiresRegister());
-  if (check->HasUses()) {
-    locations->SetOut(Location::SameAsFirstInput());
-  }
-  // Rely on the type initialization to save everything we need.
-  locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
-}
-
-void InstructionCodeGeneratorMIPS::VisitClinitCheck(HClinitCheck* check) {
-  // We assume the class is not null.
-  SlowPathCodeMIPS* slow_path =
-      new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS(check->GetLoadClass(), check);
-  codegen_->AddSlowPath(slow_path);
-  GenerateClassInitializationCheck(slow_path,
-                                   check->GetLocations()->InAt(0).AsRegister<Register>());
-}
-
-void LocationsBuilderMIPS::VisitCompare(HCompare* compare) {
-  DataType::Type in_type = compare->InputAt(0)->GetType();
-
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(compare, LocationSummary::kNoCall);
-
-  switch (in_type) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32:
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetInAt(1, Location::RequiresRegister());
-      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-      break;
-
-    case DataType::Type::kInt64:
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetInAt(1, Location::RequiresRegister());
-      // Output overlaps because it is written before doing the low comparison.
-      locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
-      break;
-
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetInAt(1, Location::RequiresFpuRegister());
-      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-      break;
-
-    default:
-      LOG(FATAL) << "Unexpected type for compare operation " << in_type;
-  }
-}
-
-void InstructionCodeGeneratorMIPS::VisitCompare(HCompare* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  Register res = locations->Out().AsRegister<Register>();
-  DataType::Type in_type = instruction->InputAt(0)->GetType();
-  bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
-
-  //  0 if: left == right
-  //  1 if: left  > right
-  // -1 if: left  < right
-  switch (in_type) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32: {
-      Register lhs = locations->InAt(0).AsRegister<Register>();
-      Register rhs = locations->InAt(1).AsRegister<Register>();
-      __ Slt(TMP, lhs, rhs);
-      __ Slt(res, rhs, lhs);
-      __ Subu(res, res, TMP);
-      break;
-    }
-    case DataType::Type::kInt64: {
-      MipsLabel done;
-      Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
-      Register lhs_low  = locations->InAt(0).AsRegisterPairLow<Register>();
-      Register rhs_high = locations->InAt(1).AsRegisterPairHigh<Register>();
-      Register rhs_low  = locations->InAt(1).AsRegisterPairLow<Register>();
-      // TODO: more efficient (direct) comparison with a constant.
-      __ Slt(TMP, lhs_high, rhs_high);
-      __ Slt(AT, rhs_high, lhs_high);  // Inverted: is actually gt.
-      __ Subu(res, AT, TMP);           // Result -1:1:0 for [ <, >, == ].
-      __ Bnez(res, &done);             // If we compared ==, check if lower bits are also equal.
-      __ Sltu(TMP, lhs_low, rhs_low);
-      __ Sltu(AT, rhs_low, lhs_low);   // Inverted: is actually gt.
-      __ Subu(res, AT, TMP);           // Result -1:1:0 for [ <, >, == ].
-      __ Bind(&done);
-      break;
-    }
-
-    case DataType::Type::kFloat32: {
-      bool gt_bias = instruction->IsGtBias();
-      FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
-      FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
-      MipsLabel done;
-      if (isR6) {
-        __ CmpEqS(FTMP, lhs, rhs);
-        __ LoadConst32(res, 0);
-        __ Bc1nez(FTMP, &done);
-        if (gt_bias) {
-          __ CmpLtS(FTMP, lhs, rhs);
-          __ LoadConst32(res, -1);
-          __ Bc1nez(FTMP, &done);
-          __ LoadConst32(res, 1);
-        } else {
-          __ CmpLtS(FTMP, rhs, lhs);
-          __ LoadConst32(res, 1);
-          __ Bc1nez(FTMP, &done);
-          __ LoadConst32(res, -1);
-        }
-      } else {
-        if (gt_bias) {
-          __ ColtS(0, lhs, rhs);
-          __ LoadConst32(res, -1);
-          __ Bc1t(0, &done);
-          __ CeqS(0, lhs, rhs);
-          __ LoadConst32(res, 1);
-          __ Movt(res, ZERO, 0);
-        } else {
-          __ ColtS(0, rhs, lhs);
-          __ LoadConst32(res, 1);
-          __ Bc1t(0, &done);
-          __ CeqS(0, lhs, rhs);
-          __ LoadConst32(res, -1);
-          __ Movt(res, ZERO, 0);
-        }
-      }
-      __ Bind(&done);
-      break;
-    }
-    case DataType::Type::kFloat64: {
-      bool gt_bias = instruction->IsGtBias();
-      FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
-      FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
-      MipsLabel done;
-      if (isR6) {
-        __ CmpEqD(FTMP, lhs, rhs);
-        __ LoadConst32(res, 0);
-        __ Bc1nez(FTMP, &done);
-        if (gt_bias) {
-          __ CmpLtD(FTMP, lhs, rhs);
-          __ LoadConst32(res, -1);
-          __ Bc1nez(FTMP, &done);
-          __ LoadConst32(res, 1);
-        } else {
-          __ CmpLtD(FTMP, rhs, lhs);
-          __ LoadConst32(res, 1);
-          __ Bc1nez(FTMP, &done);
-          __ LoadConst32(res, -1);
-        }
-      } else {
-        if (gt_bias) {
-          __ ColtD(0, lhs, rhs);
-          __ LoadConst32(res, -1);
-          __ Bc1t(0, &done);
-          __ CeqD(0, lhs, rhs);
-          __ LoadConst32(res, 1);
-          __ Movt(res, ZERO, 0);
-        } else {
-          __ ColtD(0, rhs, lhs);
-          __ LoadConst32(res, 1);
-          __ Bc1t(0, &done);
-          __ CeqD(0, lhs, rhs);
-          __ LoadConst32(res, -1);
-          __ Movt(res, ZERO, 0);
-        }
-      }
-      __ Bind(&done);
-      break;
-    }
-
-    default:
-      LOG(FATAL) << "Unimplemented compare type " << in_type;
-  }
-}
-
-void LocationsBuilderMIPS::HandleCondition(HCondition* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
-  switch (instruction->InputAt(0)->GetType()) {
-    default:
-    case DataType::Type::kInt64:
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
-      break;
-
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetInAt(1, Location::RequiresFpuRegister());
-      break;
-  }
-  if (!instruction->IsEmittedAtUseSite()) {
-    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-  }
-}
-
-void InstructionCodeGeneratorMIPS::HandleCondition(HCondition* instruction) {
-  if (instruction->IsEmittedAtUseSite()) {
-    return;
-  }
-
-  DataType::Type type = instruction->InputAt(0)->GetType();
-  LocationSummary* locations = instruction->GetLocations();
-
-  switch (type) {
-    default:
-      // Integer case.
-      GenerateIntCompare(instruction->GetCondition(), locations);
-      return;
-
-    case DataType::Type::kInt64:
-      GenerateLongCompare(instruction->GetCondition(), locations);
-      return;
-
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      GenerateFpCompare(instruction->GetCondition(), instruction->IsGtBias(), type, locations);
-      return;
-  }
-}
-
-void InstructionCodeGeneratorMIPS::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
-  DCHECK(instruction->IsDiv() || instruction->IsRem());
-
-  LocationSummary* locations = instruction->GetLocations();
-  Location second = locations->InAt(1);
-  DCHECK(second.IsConstant());
-  int64_t imm = Int64FromConstant(second.GetConstant());
-  DCHECK(imm == 1 || imm == -1);
-
-  if (instruction->GetResultType() == DataType::Type::kInt32) {
-    Register out = locations->Out().AsRegister<Register>();
-    Register dividend = locations->InAt(0).AsRegister<Register>();
-
-    if (instruction->IsRem()) {
-      __ Move(out, ZERO);
-    } else {
-      if (imm == -1) {
-        __ Subu(out, ZERO, dividend);
-      } else if (out != dividend) {
-        __ Move(out, dividend);
-      }
-    }
-  } else {
-    DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt64);
-    Register out_high = locations->Out().AsRegisterPairHigh<Register>();
-    Register out_low = locations->Out().AsRegisterPairLow<Register>();
-    Register in_high = locations->InAt(0).AsRegisterPairHigh<Register>();
-    Register in_low = locations->InAt(0).AsRegisterPairLow<Register>();
-
-    if (instruction->IsRem()) {
-      __ Move(out_high, ZERO);
-      __ Move(out_low, ZERO);
-    } else {
-      if (imm == -1) {
-        __ Subu(out_low, ZERO, in_low);
-        __ Sltu(AT, ZERO, out_low);
-        __ Subu(out_high, ZERO, in_high);
-        __ Subu(out_high, out_high, AT);
-      } else {
-        __ Move(out_low, in_low);
-        __ Move(out_high, in_high);
-      }
-    }
-  }
-}
-
-void InstructionCodeGeneratorMIPS::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
-  DCHECK(instruction->IsDiv() || instruction->IsRem());
-
-  LocationSummary* locations = instruction->GetLocations();
-  Location second = locations->InAt(1);
-  const bool is_r2_or_newer = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
-  const bool is_r6 = codegen_->GetInstructionSetFeatures().IsR6();
-  DCHECK(second.IsConstant());
-
-  if (instruction->GetResultType() == DataType::Type::kInt32) {
-    Register out = locations->Out().AsRegister<Register>();
-    Register dividend = locations->InAt(0).AsRegister<Register>();
-    int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
-    uint32_t abs_imm = static_cast<uint32_t>(AbsOrMin(imm));
-    int ctz_imm = CTZ(abs_imm);
-
-    if (instruction->IsDiv()) {
-      if (ctz_imm == 1) {
-        // Fast path for division by +/-2, which is very common.
-        __ Srl(TMP, dividend, 31);
-      } else {
-        __ Sra(TMP, dividend, 31);
-        __ Srl(TMP, TMP, 32 - ctz_imm);
-      }
-      __ Addu(out, dividend, TMP);
-      __ Sra(out, out, ctz_imm);
-      if (imm < 0) {
-        __ Subu(out, ZERO, out);
-      }
-    } else {
-      if (ctz_imm == 1) {
-        // Fast path for modulo +/-2, which is very common.
-        __ Sra(TMP, dividend, 31);
-        __ Subu(out, dividend, TMP);
-        __ Andi(out, out, 1);
-        __ Addu(out, out, TMP);
-      } else {
-        __ Sra(TMP, dividend, 31);
-        __ Srl(TMP, TMP, 32 - ctz_imm);
-        __ Addu(out, dividend, TMP);
-        if (IsUint<16>(abs_imm - 1)) {
-          __ Andi(out, out, abs_imm - 1);
-        } else {
-          if (is_r2_or_newer) {
-            __ Ins(out, ZERO, ctz_imm, 32 - ctz_imm);
-          } else {
-            __ Sll(out, out, 32 - ctz_imm);
-            __ Srl(out, out, 32 - ctz_imm);
-          }
-        }
-        __ Subu(out, out, TMP);
-      }
-    }
-  } else {
-    DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt64);
-    Register out_high = locations->Out().AsRegisterPairHigh<Register>();
-    Register out_low = locations->Out().AsRegisterPairLow<Register>();
-    Register in_high = locations->InAt(0).AsRegisterPairHigh<Register>();
-    Register in_low = locations->InAt(0).AsRegisterPairLow<Register>();
-    int64_t imm = Int64FromConstant(second.GetConstant());
-    uint64_t abs_imm = static_cast<uint64_t>(AbsOrMin(imm));
-    int ctz_imm = CTZ(abs_imm);
-
-    if (instruction->IsDiv()) {
-      if (ctz_imm < 32) {
-        if (ctz_imm == 1) {
-          __ Srl(AT, in_high, 31);
-        } else {
-          __ Sra(AT, in_high, 31);
-          __ Srl(AT, AT, 32 - ctz_imm);
-        }
-        __ Addu(AT, AT, in_low);
-        __ Sltu(TMP, AT, in_low);
-        __ Addu(out_high, in_high, TMP);
-        __ Srl(out_low, AT, ctz_imm);
-        if (is_r2_or_newer) {
-          __ Ins(out_low, out_high, 32 - ctz_imm, ctz_imm);
-          __ Sra(out_high, out_high, ctz_imm);
-        } else {
-          __ Sll(AT, out_high, 32 - ctz_imm);
-          __ Sra(out_high, out_high, ctz_imm);
-          __ Or(out_low, out_low, AT);
-        }
-        if (imm < 0) {
-          __ Subu(out_low, ZERO, out_low);
-          __ Sltu(AT, ZERO, out_low);
-          __ Subu(out_high, ZERO, out_high);
-          __ Subu(out_high, out_high, AT);
-        }
-      } else if (ctz_imm == 32) {
-        __ Sra(AT, in_high, 31);
-        __ Addu(AT, AT, in_low);
-        __ Sltu(AT, AT, in_low);
-        __ Addu(out_low, in_high, AT);
-        if (imm < 0) {
-          __ Srl(TMP, out_low, 31);
-          __ Subu(out_low, ZERO, out_low);
-          __ Sltu(AT, ZERO, out_low);
-          __ Subu(out_high, TMP, AT);
-        } else {
-          __ Sra(out_high, out_low, 31);
-        }
-      } else if (ctz_imm < 63) {
-        __ Sra(AT, in_high, 31);
-        __ Srl(TMP, AT, 64 - ctz_imm);
-        __ Addu(AT, AT, in_low);
-        __ Sltu(AT, AT, in_low);
-        __ Addu(out_low, in_high, AT);
-        __ Addu(out_low, out_low, TMP);
-        __ Sra(out_low, out_low, ctz_imm - 32);
-        if (imm < 0) {
-          __ Subu(out_low, ZERO, out_low);
-        }
-        __ Sra(out_high, out_low, 31);
-      } else {
-        DCHECK_LT(imm, 0);
-        if (is_r6) {
-          __ Aui(AT, in_high, 0x8000);
-        } else {
-          __ Lui(AT, 0x8000);
-          __ Xor(AT, AT, in_high);
-        }
-        __ Or(AT, AT, in_low);
-        __ Sltiu(out_low, AT, 1);
-        __ Move(out_high, ZERO);
-      }
-    } else {
-      if ((ctz_imm == 1) && !is_r6) {
-        __ Andi(AT, in_low, 1);
-        __ Sll(TMP, in_low, 31);
-        __ And(TMP, in_high, TMP);
-        __ Sra(out_high, TMP, 31);
-        __ Or(out_low, out_high, AT);
-      } else if (ctz_imm < 32) {
-        __ Sra(AT, in_high, 31);
-        if (ctz_imm <= 16) {
-          __ Andi(out_low, in_low, abs_imm - 1);
-        } else if (is_r2_or_newer) {
-          __ Ext(out_low, in_low, 0, ctz_imm);
-        } else {
-          __ Sll(out_low, in_low, 32 - ctz_imm);
-          __ Srl(out_low, out_low, 32 - ctz_imm);
-        }
-        if (is_r6) {
-          __ Selnez(out_high, AT, out_low);
-        } else {
-          __ Movz(AT, ZERO, out_low);
-          __ Move(out_high, AT);
-        }
-        if (is_r2_or_newer) {
-          __ Ins(out_low, out_high, ctz_imm, 32 - ctz_imm);
-        } else {
-          __ Sll(AT, out_high, ctz_imm);
-          __ Or(out_low, out_low, AT);
-        }
-      } else if (ctz_imm == 32) {
-        __ Sra(AT, in_high, 31);
-        __ Move(out_low, in_low);
-        if (is_r6) {
-          __ Selnez(out_high, AT, out_low);
-        } else {
-          __ Movz(AT, ZERO, out_low);
-          __ Move(out_high, AT);
-        }
-      } else if (ctz_imm < 63) {
-        __ Sra(AT, in_high, 31);
-        __ Move(TMP, in_low);
-        if (ctz_imm - 32 <= 16) {
-          __ Andi(out_high, in_high, (1 << (ctz_imm - 32)) - 1);
-        } else if (is_r2_or_newer) {
-          __ Ext(out_high, in_high, 0, ctz_imm - 32);
-        } else {
-          __ Sll(out_high, in_high, 64 - ctz_imm);
-          __ Srl(out_high, out_high, 64 - ctz_imm);
-        }
-        __ Move(out_low, TMP);
-        __ Or(TMP, TMP, out_high);
-        if (is_r6) {
-          __ Selnez(AT, AT, TMP);
-        } else {
-          __ Movz(AT, ZERO, TMP);
-        }
-        if (is_r2_or_newer) {
-          __ Ins(out_high, AT, ctz_imm - 32, 64 - ctz_imm);
-        } else {
-          __ Sll(AT, AT, ctz_imm - 32);
-          __ Or(out_high, out_high, AT);
-        }
-      } else {
-        if (is_r6) {
-          __ Aui(AT, in_high, 0x8000);
-        } else {
-          __ Lui(AT, 0x8000);
-          __ Xor(AT, AT, in_high);
-        }
-        __ Or(AT, AT, in_low);
-        __ Sltiu(AT, AT, 1);
-        __ Sll(AT, AT, 31);
-        __ Move(out_low, in_low);
-        __ Xor(out_high, in_high, AT);
-      }
-    }
-  }
-}
-
-void InstructionCodeGeneratorMIPS::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
-  DCHECK(instruction->IsDiv() || instruction->IsRem());
-  DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt32);
-
-  LocationSummary* locations = instruction->GetLocations();
-  Location second = locations->InAt(1);
-  DCHECK(second.IsConstant());
-
-  Register out = locations->Out().AsRegister<Register>();
-  Register dividend = locations->InAt(0).AsRegister<Register>();
-  int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
-
-  int64_t magic;
-  int shift;
-  CalculateMagicAndShiftForDivRem(imm, false /* is_long= */, &magic, &shift);
-
-  bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
-
-  __ LoadConst32(TMP, magic);
-  if (isR6) {
-    __ MuhR6(TMP, dividend, TMP);
-  } else {
-    __ MultR2(dividend, TMP);
-    __ Mfhi(TMP);
-  }
-  if (imm > 0 && magic < 0) {
-    __ Addu(TMP, TMP, dividend);
-  } else if (imm < 0 && magic > 0) {
-    __ Subu(TMP, TMP, dividend);
-  }
-
-  if (shift != 0) {
-    __ Sra(TMP, TMP, shift);
-  }
-
-  if (instruction->IsDiv()) {
-    __ Sra(out, TMP, 31);
-    __ Subu(out, TMP, out);
-  } else {
-    __ Sra(AT, TMP, 31);
-    __ Subu(AT, TMP, AT);
-    __ LoadConst32(TMP, imm);
-    if (isR6) {
-      __ MulR6(TMP, AT, TMP);
-    } else {
-      __ MulR2(TMP, AT, TMP);
-    }
-    __ Subu(out, dividend, TMP);
-  }
-}
-
-void InstructionCodeGeneratorMIPS::GenerateDivRemIntegral(HBinaryOperation* instruction) {
-  DCHECK(instruction->IsDiv() || instruction->IsRem());
-  DCHECK_EQ(instruction->GetResultType(), DataType::Type::kInt32);
-
-  LocationSummary* locations = instruction->GetLocations();
-  Register out = locations->Out().AsRegister<Register>();
-  Location second = locations->InAt(1);
-
-  if (second.IsConstant()) {
-    int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
-    if (imm == 0) {
-      // Do not generate anything. DivZeroCheck would prevent any code to be executed.
-    } else if (imm == 1 || imm == -1) {
-      DivRemOneOrMinusOne(instruction);
-    } else if (IsPowerOfTwo(AbsOrMin(imm))) {
-      DivRemByPowerOfTwo(instruction);
-    } else {
-      DCHECK(imm <= -2 || imm >= 2);
-      GenerateDivRemWithAnyConstant(instruction);
-    }
-  } else {
-    Register dividend = locations->InAt(0).AsRegister<Register>();
-    Register divisor = second.AsRegister<Register>();
-    bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
-    if (instruction->IsDiv()) {
-      if (isR6) {
-        __ DivR6(out, dividend, divisor);
-      } else {
-        __ DivR2(out, dividend, divisor);
-      }
-    } else {
-      if (isR6) {
-        __ ModR6(out, dividend, divisor);
-      } else {
-        __ ModR2(out, dividend, divisor);
-      }
-    }
-  }
-}
-
-void LocationsBuilderMIPS::VisitDiv(HDiv* div) {
-  DataType::Type type = div->GetResultType();
-  bool call_long_div = false;
-  if (type == DataType::Type::kInt64) {
-    if (div->InputAt(1)->IsConstant()) {
-      int64_t imm = CodeGenerator::GetInt64ValueOf(div->InputAt(1)->AsConstant());
-      call_long_div = (imm != 0) && !IsPowerOfTwo(static_cast<uint64_t>(AbsOrMin(imm)));
-    } else {
-      call_long_div = true;
-    }
-  }
-  LocationSummary::CallKind call_kind = call_long_div
-      ? LocationSummary::kCallOnMainOnly
-      : LocationSummary::kNoCall;
-
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(div, call_kind);
-
-  switch (type) {
-    case DataType::Type::kInt32:
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
-      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-      break;
-
-    case DataType::Type::kInt64: {
-      if (call_long_div) {
-        InvokeRuntimeCallingConvention calling_convention;
-        locations->SetInAt(0, Location::RegisterPairLocation(
-            calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
-        locations->SetInAt(1, Location::RegisterPairLocation(
-            calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
-        locations->SetOut(calling_convention.GetReturnLocation(type));
-      } else {
-        locations->SetInAt(0, Location::RequiresRegister());
-        locations->SetInAt(1, Location::ConstantLocation(div->InputAt(1)->AsConstant()));
-        locations->SetOut(Location::RequiresRegister());
-      }
-      break;
-    }
-
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetInAt(1, Location::RequiresFpuRegister());
-      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
-      break;
-
-    default:
-      LOG(FATAL) << "Unexpected div type " << type;
-  }
-}
-
-void InstructionCodeGeneratorMIPS::VisitDiv(HDiv* instruction) {
-  DataType::Type type = instruction->GetType();
-  LocationSummary* locations = instruction->GetLocations();
-
-  switch (type) {
-    case DataType::Type::kInt32:
-      GenerateDivRemIntegral(instruction);
-      break;
-    case DataType::Type::kInt64: {
-      if (locations->InAt(1).IsConstant()) {
-        int64_t imm = locations->InAt(1).GetConstant()->AsLongConstant()->GetValue();
-        if (imm == 0) {
-          // Do not generate anything. DivZeroCheck would prevent any code to be executed.
-        } else if (imm == 1 || imm == -1) {
-          DivRemOneOrMinusOne(instruction);
-        } else {
-          DCHECK(IsPowerOfTwo(static_cast<uint64_t>(AbsOrMin(imm))));
-          DivRemByPowerOfTwo(instruction);
-        }
-      } else {
-        codegen_->InvokeRuntime(kQuickLdiv, instruction, instruction->GetDexPc());
-        CheckEntrypointTypes<kQuickLdiv, int64_t, int64_t, int64_t>();
-      }
-      break;
-    }
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64: {
-      FRegister dst = locations->Out().AsFpuRegister<FRegister>();
-      FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
-      FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
-      if (type == DataType::Type::kFloat32) {
-        __ DivS(dst, lhs, rhs);
-      } else {
-        __ DivD(dst, lhs, rhs);
-      }
-      break;
-    }
-    default:
-      LOG(FATAL) << "Unexpected div type " << type;
-  }
-}
-
-void LocationsBuilderMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
-  LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
-  locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
-}
-
-void InstructionCodeGeneratorMIPS::VisitDivZeroCheck(HDivZeroCheck* instruction) {
-  SlowPathCodeMIPS* slow_path =
-      new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathMIPS(instruction);
-  codegen_->AddSlowPath(slow_path);
-  Location value = instruction->GetLocations()->InAt(0);
-  DataType::Type type = instruction->GetType();
-
-  switch (type) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32: {
-      if (value.IsConstant()) {
-        if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
-          __ B(slow_path->GetEntryLabel());
-        } else {
-          // A division by a non-null constant is valid. We don't need to perform
-          // any check, so simply fall through.
-        }
-      } else {
-        DCHECK(value.IsRegister()) << value;
-        __ Beqz(value.AsRegister<Register>(), slow_path->GetEntryLabel());
-      }
-      break;
-    }
-    case DataType::Type::kInt64: {
-      if (value.IsConstant()) {
-        if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
-          __ B(slow_path->GetEntryLabel());
-        } else {
-          // A division by a non-null constant is valid. We don't need to perform
-          // any check, so simply fall through.
-        }
-      } else {
-        DCHECK(value.IsRegisterPair()) << value;
-        __ Or(TMP, value.AsRegisterPairHigh<Register>(), value.AsRegisterPairLow<Register>());
-        __ Beqz(TMP, slow_path->GetEntryLabel());
-      }
-      break;
-    }
-    default:
-      LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
-  }
-}
-
-void LocationsBuilderMIPS::VisitDoubleConstant(HDoubleConstant* constant) {
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
-  locations->SetOut(Location::ConstantLocation(constant));
-}
-
-void InstructionCodeGeneratorMIPS::VisitDoubleConstant(HDoubleConstant* cst ATTRIBUTE_UNUSED) {
-  // Will be generated at use site.
-}
-
-void LocationsBuilderMIPS::VisitExit(HExit* exit) {
-  exit->SetLocations(nullptr);
-}
-
-void InstructionCodeGeneratorMIPS::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
-}
-
-void LocationsBuilderMIPS::VisitFloatConstant(HFloatConstant* constant) {
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
-  locations->SetOut(Location::ConstantLocation(constant));
-}
-
-void InstructionCodeGeneratorMIPS::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
-  // Will be generated at use site.
-}
-
-void LocationsBuilderMIPS::VisitGoto(HGoto* got) {
-  got->SetLocations(nullptr);
-}
-
-void InstructionCodeGeneratorMIPS::HandleGoto(HInstruction* got, HBasicBlock* successor) {
-  if (successor->IsExitBlock()) {
-    DCHECK(got->GetPrevious()->AlwaysThrows());
-    return;  // no code needed
-  }
-
-  HBasicBlock* block = got->GetBlock();
-  HInstruction* previous = got->GetPrevious();
-  HLoopInformation* info = block->GetLoopInformation();
-
-  if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
-    if (codegen_->GetCompilerOptions().CountHotnessInCompiledCode()) {
-      __ Lw(AT, SP, kCurrentMethodStackOffset);
-      __ Lhu(TMP, AT, ArtMethod::HotnessCountOffset().Int32Value());
-      __ Addiu(TMP, TMP, 1);
-      __ Sh(TMP, AT, ArtMethod::HotnessCountOffset().Int32Value());
-    }
-    GenerateSuspendCheck(info->GetSuspendCheck(), successor);
-    return;
-  }
-  if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
-    GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
-  }
-  if (!codegen_->GoesToNextBlock(block, successor)) {
-    __ B(codegen_->GetLabelOf(successor));
-  }
-}
-
-void InstructionCodeGeneratorMIPS::VisitGoto(HGoto* got) {
-  HandleGoto(got, got->GetSuccessor());
-}
-
-void LocationsBuilderMIPS::VisitTryBoundary(HTryBoundary* try_boundary) {
-  try_boundary->SetLocations(nullptr);
-}
-
-void InstructionCodeGeneratorMIPS::VisitTryBoundary(HTryBoundary* try_boundary) {
-  HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
-  if (!successor->IsExitBlock()) {
-    HandleGoto(try_boundary, successor);
-  }
-}
-
-void InstructionCodeGeneratorMIPS::GenerateIntCompare(IfCondition cond,
-                                                      LocationSummary* locations) {
-  Register dst = locations->Out().AsRegister<Register>();
-  Register lhs = locations->InAt(0).AsRegister<Register>();
-  Location rhs_location = locations->InAt(1);
-  Register rhs_reg = ZERO;
-  int64_t rhs_imm = 0;
-  bool use_imm = rhs_location.IsConstant();
-  if (use_imm) {
-    rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
-  } else {
-    rhs_reg = rhs_location.AsRegister<Register>();
-  }
-
-  switch (cond) {
-    case kCondEQ:
-    case kCondNE:
-      if (use_imm && IsInt<16>(-rhs_imm)) {
-        if (rhs_imm == 0) {
-          if (cond == kCondEQ) {
-            __ Sltiu(dst, lhs, 1);
-          } else {
-            __ Sltu(dst, ZERO, lhs);
-          }
-        } else {
-          __ Addiu(dst, lhs, -rhs_imm);
-          if (cond == kCondEQ) {
-            __ Sltiu(dst, dst, 1);
-          } else {
-            __ Sltu(dst, ZERO, dst);
-          }
-        }
-      } else {
-        if (use_imm && IsUint<16>(rhs_imm)) {
-          __ Xori(dst, lhs, rhs_imm);
-        } else {
-          if (use_imm) {
-            rhs_reg = TMP;
-            __ LoadConst32(rhs_reg, rhs_imm);
-          }
-          __ Xor(dst, lhs, rhs_reg);
-        }
-        if (cond == kCondEQ) {
-          __ Sltiu(dst, dst, 1);
-        } else {
-          __ Sltu(dst, ZERO, dst);
-        }
-      }
-      break;
-
-    case kCondLT:
-    case kCondGE:
-      if (use_imm && IsInt<16>(rhs_imm)) {
-        __ Slti(dst, lhs, rhs_imm);
-      } else {
-        if (use_imm) {
-          rhs_reg = TMP;
-          __ LoadConst32(rhs_reg, rhs_imm);
-        }
-        __ Slt(dst, lhs, rhs_reg);
-      }
-      if (cond == kCondGE) {
-        // Simulate lhs >= rhs via !(lhs < rhs) since there's
-        // only the slt instruction but no sge.
-        __ Xori(dst, dst, 1);
-      }
-      break;
-
-    case kCondLE:
-    case kCondGT:
-      if (use_imm && IsInt<16>(rhs_imm + 1)) {
-        // Simulate lhs <= rhs via lhs < rhs + 1.
-        __ Slti(dst, lhs, rhs_imm + 1);
-        if (cond == kCondGT) {
-          // Simulate lhs > rhs via !(lhs <= rhs) since there's
-          // only the slti instruction but no sgti.
-          __ Xori(dst, dst, 1);
-        }
-      } else {
-        if (use_imm) {
-          rhs_reg = TMP;
-          __ LoadConst32(rhs_reg, rhs_imm);
-        }
-        __ Slt(dst, rhs_reg, lhs);
-        if (cond == kCondLE) {
-          // Simulate lhs <= rhs via !(rhs < lhs) since there's
-          // only the slt instruction but no sle.
-          __ Xori(dst, dst, 1);
-        }
-      }
-      break;
-
-    case kCondB:
-    case kCondAE:
-      if (use_imm && IsInt<16>(rhs_imm)) {
-        // Sltiu sign-extends its 16-bit immediate operand before
-        // the comparison and thus lets us compare directly with
-        // unsigned values in the ranges [0, 0x7fff] and
-        // [0xffff8000, 0xffffffff].
-        __ Sltiu(dst, lhs, rhs_imm);
-      } else {
-        if (use_imm) {
-          rhs_reg = TMP;
-          __ LoadConst32(rhs_reg, rhs_imm);
-        }
-        __ Sltu(dst, lhs, rhs_reg);
-      }
-      if (cond == kCondAE) {
-        // Simulate lhs >= rhs via !(lhs < rhs) since there's
-        // only the sltu instruction but no sgeu.
-        __ Xori(dst, dst, 1);
-      }
-      break;
-
-    case kCondBE:
-    case kCondA:
-      if (use_imm && (rhs_imm != -1) && IsInt<16>(rhs_imm + 1)) {
-        // Simulate lhs <= rhs via lhs < rhs + 1.
-        // Note that this only works if rhs + 1 does not overflow
-        // to 0, hence the check above.
-        // Sltiu sign-extends its 16-bit immediate operand before
-        // the comparison and thus lets us compare directly with
-        // unsigned values in the ranges [0, 0x7fff] and
-        // [0xffff8000, 0xffffffff].
-        __ Sltiu(dst, lhs, rhs_imm + 1);
-        if (cond == kCondA) {
-          // Simulate lhs > rhs via !(lhs <= rhs) since there's
-          // only the sltiu instruction but no sgtiu.
-          __ Xori(dst, dst, 1);
-        }
-      } else {
-        if (use_imm) {
-          rhs_reg = TMP;
-          __ LoadConst32(rhs_reg, rhs_imm);
-        }
-        __ Sltu(dst, rhs_reg, lhs);
-        if (cond == kCondBE) {
-          // Simulate lhs <= rhs via !(rhs < lhs) since there's
-          // only the sltu instruction but no sleu.
-          __ Xori(dst, dst, 1);
-        }
-      }
-      break;
-  }
-}
-
-bool InstructionCodeGeneratorMIPS::MaterializeIntCompare(IfCondition cond,
-                                                         LocationSummary* input_locations,
-                                                         Register dst) {
-  Register lhs = input_locations->InAt(0).AsRegister<Register>();
-  Location rhs_location = input_locations->InAt(1);
-  Register rhs_reg = ZERO;
-  int64_t rhs_imm = 0;
-  bool use_imm = rhs_location.IsConstant();
-  if (use_imm) {
-    rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
-  } else {
-    rhs_reg = rhs_location.AsRegister<Register>();
-  }
-
-  switch (cond) {
-    case kCondEQ:
-    case kCondNE:
-      if (use_imm && IsInt<16>(-rhs_imm)) {
-        __ Addiu(dst, lhs, -rhs_imm);
-      } else if (use_imm && IsUint<16>(rhs_imm)) {
-        __ Xori(dst, lhs, rhs_imm);
-      } else {
-        if (use_imm) {
-          rhs_reg = TMP;
-          __ LoadConst32(rhs_reg, rhs_imm);
-        }
-        __ Xor(dst, lhs, rhs_reg);
-      }
-      return (cond == kCondEQ);
-
-    case kCondLT:
-    case kCondGE:
-      if (use_imm && IsInt<16>(rhs_imm)) {
-        __ Slti(dst, lhs, rhs_imm);
-      } else {
-        if (use_imm) {
-          rhs_reg = TMP;
-          __ LoadConst32(rhs_reg, rhs_imm);
-        }
-        __ Slt(dst, lhs, rhs_reg);
-      }
-      return (cond == kCondGE);
-
-    case kCondLE:
-    case kCondGT:
-      if (use_imm && IsInt<16>(rhs_imm + 1)) {
-        // Simulate lhs <= rhs via lhs < rhs + 1.
-        __ Slti(dst, lhs, rhs_imm + 1);
-        return (cond == kCondGT);
-      } else {
-        if (use_imm) {
-          rhs_reg = TMP;
-          __ LoadConst32(rhs_reg, rhs_imm);
-        }
-        __ Slt(dst, rhs_reg, lhs);
-        return (cond == kCondLE);
-      }
-
-    case kCondB:
-    case kCondAE:
-      if (use_imm && IsInt<16>(rhs_imm)) {
-        // Sltiu sign-extends its 16-bit immediate operand before
-        // the comparison and thus lets us compare directly with
-        // unsigned values in the ranges [0, 0x7fff] and
-        // [0xffff8000, 0xffffffff].
-        __ Sltiu(dst, lhs, rhs_imm);
-      } else {
-        if (use_imm) {
-          rhs_reg = TMP;
-          __ LoadConst32(rhs_reg, rhs_imm);
-        }
-        __ Sltu(dst, lhs, rhs_reg);
-      }
-      return (cond == kCondAE);
-
-    case kCondBE:
-    case kCondA:
-      if (use_imm && (rhs_imm != -1) && IsInt<16>(rhs_imm + 1)) {
-        // Simulate lhs <= rhs via lhs < rhs + 1.
-        // Note that this only works if rhs + 1 does not overflow
-        // to 0, hence the check above.
-        // Sltiu sign-extends its 16-bit immediate operand before
-        // the comparison and thus lets us compare directly with
-        // unsigned values in the ranges [0, 0x7fff] and
-        // [0xffff8000, 0xffffffff].
-        __ Sltiu(dst, lhs, rhs_imm + 1);
-        return (cond == kCondA);
-      } else {
-        if (use_imm) {
-          rhs_reg = TMP;
-          __ LoadConst32(rhs_reg, rhs_imm);
-        }
-        __ Sltu(dst, rhs_reg, lhs);
-        return (cond == kCondBE);
-      }
-  }
-}
-
-void InstructionCodeGeneratorMIPS::GenerateIntCompareAndBranch(IfCondition cond,
-                                                               LocationSummary* locations,
-                                                               MipsLabel* label) {
-  Register lhs = locations->InAt(0).AsRegister<Register>();
-  Location rhs_location = locations->InAt(1);
-  Register rhs_reg = ZERO;
-  int64_t rhs_imm = 0;
-  bool use_imm = rhs_location.IsConstant();
-  if (use_imm) {
-    rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
-  } else {
-    rhs_reg = rhs_location.AsRegister<Register>();
-  }
-
-  if (use_imm && rhs_imm == 0) {
-    switch (cond) {
-      case kCondEQ:
-      case kCondBE:  // <= 0 if zero
-        __ Beqz(lhs, label);
-        break;
-      case kCondNE:
-      case kCondA:  // > 0 if non-zero
-        __ Bnez(lhs, label);
-        break;
-      case kCondLT:
-        __ Bltz(lhs, label);
-        break;
-      case kCondGE:
-        __ Bgez(lhs, label);
-        break;
-      case kCondLE:
-        __ Blez(lhs, label);
-        break;
-      case kCondGT:
-        __ Bgtz(lhs, label);
-        break;
-      case kCondB:  // always false
-        break;
-      case kCondAE:  // always true
-        __ B(label);
-        break;
-    }
-  } else {
-    bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
-    if (isR6 || !use_imm) {
-      if (use_imm) {
-        rhs_reg = TMP;
-        __ LoadConst32(rhs_reg, rhs_imm);
-      }
-      switch (cond) {
-        case kCondEQ:
-          __ Beq(lhs, rhs_reg, label);
-          break;
-        case kCondNE:
-          __ Bne(lhs, rhs_reg, label);
-          break;
-        case kCondLT:
-          __ Blt(lhs, rhs_reg, label);
-          break;
-        case kCondGE:
-          __ Bge(lhs, rhs_reg, label);
-          break;
-        case kCondLE:
-          __ Bge(rhs_reg, lhs, label);
-          break;
-        case kCondGT:
-          __ Blt(rhs_reg, lhs, label);
-          break;
-        case kCondB:
-          __ Bltu(lhs, rhs_reg, label);
-          break;
-        case kCondAE:
-          __ Bgeu(lhs, rhs_reg, label);
-          break;
-        case kCondBE:
-          __ Bgeu(rhs_reg, lhs, label);
-          break;
-        case kCondA:
-          __ Bltu(rhs_reg, lhs, label);
-          break;
-      }
-    } else {
-      // Special cases for more efficient comparison with constants on R2.
-      switch (cond) {
-        case kCondEQ:
-          __ LoadConst32(TMP, rhs_imm);
-          __ Beq(lhs, TMP, label);
-          break;
-        case kCondNE:
-          __ LoadConst32(TMP, rhs_imm);
-          __ Bne(lhs, TMP, label);
-          break;
-        case kCondLT:
-          if (IsInt<16>(rhs_imm)) {
-            __ Slti(TMP, lhs, rhs_imm);
-            __ Bnez(TMP, label);
-          } else {
-            __ LoadConst32(TMP, rhs_imm);
-            __ Blt(lhs, TMP, label);
-          }
-          break;
-        case kCondGE:
-          if (IsInt<16>(rhs_imm)) {
-            __ Slti(TMP, lhs, rhs_imm);
-            __ Beqz(TMP, label);
-          } else {
-            __ LoadConst32(TMP, rhs_imm);
-            __ Bge(lhs, TMP, label);
-          }
-          break;
-        case kCondLE:
-          if (IsInt<16>(rhs_imm + 1)) {
-            // Simulate lhs <= rhs via lhs < rhs + 1.
-            __ Slti(TMP, lhs, rhs_imm + 1);
-            __ Bnez(TMP, label);
-          } else {
-            __ LoadConst32(TMP, rhs_imm);
-            __ Bge(TMP, lhs, label);
-          }
-          break;
-        case kCondGT:
-          if (IsInt<16>(rhs_imm + 1)) {
-            // Simulate lhs > rhs via !(lhs < rhs + 1).
-            __ Slti(TMP, lhs, rhs_imm + 1);
-            __ Beqz(TMP, label);
-          } else {
-            __ LoadConst32(TMP, rhs_imm);
-            __ Blt(TMP, lhs, label);
-          }
-          break;
-        case kCondB:
-          if (IsInt<16>(rhs_imm)) {
-            __ Sltiu(TMP, lhs, rhs_imm);
-            __ Bnez(TMP, label);
-          } else {
-            __ LoadConst32(TMP, rhs_imm);
-            __ Bltu(lhs, TMP, label);
-          }
-          break;
-        case kCondAE:
-          if (IsInt<16>(rhs_imm)) {
-            __ Sltiu(TMP, lhs, rhs_imm);
-            __ Beqz(TMP, label);
-          } else {
-            __ LoadConst32(TMP, rhs_imm);
-            __ Bgeu(lhs, TMP, label);
-          }
-          break;
-        case kCondBE:
-          if ((rhs_imm != -1) && IsInt<16>(rhs_imm + 1)) {
-            // Simulate lhs <= rhs via lhs < rhs + 1.
-            // Note that this only works if rhs + 1 does not overflow
-            // to 0, hence the check above.
-            __ Sltiu(TMP, lhs, rhs_imm + 1);
-            __ Bnez(TMP, label);
-          } else {
-            __ LoadConst32(TMP, rhs_imm);
-            __ Bgeu(TMP, lhs, label);
-          }
-          break;
-        case kCondA:
-          if ((rhs_imm != -1) && IsInt<16>(rhs_imm + 1)) {
-            // Simulate lhs > rhs via !(lhs < rhs + 1).
-            // Note that this only works if rhs + 1 does not overflow
-            // to 0, hence the check above.
-            __ Sltiu(TMP, lhs, rhs_imm + 1);
-            __ Beqz(TMP, label);
-          } else {
-            __ LoadConst32(TMP, rhs_imm);
-            __ Bltu(TMP, lhs, label);
-          }
-          break;
-      }
-    }
-  }
-}
-
-void InstructionCodeGeneratorMIPS::GenerateLongCompare(IfCondition cond,
-                                                       LocationSummary* locations) {
-  Register dst = locations->Out().AsRegister<Register>();
-  Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
-  Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
-  Location rhs_location = locations->InAt(1);
-  Register rhs_high = ZERO;
-  Register rhs_low = ZERO;
-  int64_t imm = 0;
-  uint32_t imm_high = 0;
-  uint32_t imm_low = 0;
-  bool use_imm = rhs_location.IsConstant();
-  if (use_imm) {
-    imm = rhs_location.GetConstant()->AsLongConstant()->GetValue();
-    imm_high = High32Bits(imm);
-    imm_low = Low32Bits(imm);
-  } else {
-    rhs_high = rhs_location.AsRegisterPairHigh<Register>();
-    rhs_low = rhs_location.AsRegisterPairLow<Register>();
-  }
-  if (use_imm && imm == 0) {
-    switch (cond) {
-      case kCondEQ:
-      case kCondBE:  // <= 0 if zero
-        __ Or(dst, lhs_high, lhs_low);
-        __ Sltiu(dst, dst, 1);
-        break;
-      case kCondNE:
-      case kCondA:  // > 0 if non-zero
-        __ Or(dst, lhs_high, lhs_low);
-        __ Sltu(dst, ZERO, dst);
-        break;
-      case kCondLT:
-        __ Slt(dst, lhs_high, ZERO);
-        break;
-      case kCondGE:
-        __ Slt(dst, lhs_high, ZERO);
-        __ Xori(dst, dst, 1);
-        break;
-      case kCondLE:
-        __ Or(TMP, lhs_high, lhs_low);
-        __ Sra(AT, lhs_high, 31);
-        __ Sltu(dst, AT, TMP);
-        __ Xori(dst, dst, 1);
-        break;
-      case kCondGT:
-        __ Or(TMP, lhs_high, lhs_low);
-        __ Sra(AT, lhs_high, 31);
-        __ Sltu(dst, AT, TMP);
-        break;
-      case kCondB:  // always false
-        __ Andi(dst, dst, 0);
-        break;
-      case kCondAE:  // always true
-        __ Ori(dst, ZERO, 1);
-        break;
-    }
-  } else if (use_imm) {
-    // TODO: more efficient comparison with constants without loading them into TMP/AT.
-    switch (cond) {
-      case kCondEQ:
-        __ LoadConst32(TMP, imm_high);
-        __ Xor(TMP, TMP, lhs_high);
-        __ LoadConst32(AT, imm_low);
-        __ Xor(AT, AT, lhs_low);
-        __ Or(dst, TMP, AT);
-        __ Sltiu(dst, dst, 1);
-        break;
-      case kCondNE:
-        __ LoadConst32(TMP, imm_high);
-        __ Xor(TMP, TMP, lhs_high);
-        __ LoadConst32(AT, imm_low);
-        __ Xor(AT, AT, lhs_low);
-        __ Or(dst, TMP, AT);
-        __ Sltu(dst, ZERO, dst);
-        break;
-      case kCondLT:
-      case kCondGE:
-        if (dst == lhs_low) {
-          __ LoadConst32(TMP, imm_low);
-          __ Sltu(dst, lhs_low, TMP);
-        }
-        __ LoadConst32(TMP, imm_high);
-        __ Slt(AT, lhs_high, TMP);
-        __ Slt(TMP, TMP, lhs_high);
-        if (dst != lhs_low) {
-          __ LoadConst32(dst, imm_low);
-          __ Sltu(dst, lhs_low, dst);
-        }
-        __ Slt(dst, TMP, dst);
-        __ Or(dst, dst, AT);
-        if (cond == kCondGE) {
-          __ Xori(dst, dst, 1);
-        }
-        break;
-      case kCondGT:
-      case kCondLE:
-        if (dst == lhs_low) {
-          __ LoadConst32(TMP, imm_low);
-          __ Sltu(dst, TMP, lhs_low);
-        }
-        __ LoadConst32(TMP, imm_high);
-        __ Slt(AT, TMP, lhs_high);
-        __ Slt(TMP, lhs_high, TMP);
-        if (dst != lhs_low) {
-          __ LoadConst32(dst, imm_low);
-          __ Sltu(dst, dst, lhs_low);
-        }
-        __ Slt(dst, TMP, dst);
-        __ Or(dst, dst, AT);
-        if (cond == kCondLE) {
-          __ Xori(dst, dst, 1);
-        }
-        break;
-      case kCondB:
-      case kCondAE:
-        if (dst == lhs_low) {
-          __ LoadConst32(TMP, imm_low);
-          __ Sltu(dst, lhs_low, TMP);
-        }
-        __ LoadConst32(TMP, imm_high);
-        __ Sltu(AT, lhs_high, TMP);
-        __ Sltu(TMP, TMP, lhs_high);
-        if (dst != lhs_low) {
-          __ LoadConst32(dst, imm_low);
-          __ Sltu(dst, lhs_low, dst);
-        }
-        __ Slt(dst, TMP, dst);
-        __ Or(dst, dst, AT);
-        if (cond == kCondAE) {
-          __ Xori(dst, dst, 1);
-        }
-        break;
-      case kCondA:
-      case kCondBE:
-        if (dst == lhs_low) {
-          __ LoadConst32(TMP, imm_low);
-          __ Sltu(dst, TMP, lhs_low);
-        }
-        __ LoadConst32(TMP, imm_high);
-        __ Sltu(AT, TMP, lhs_high);
-        __ Sltu(TMP, lhs_high, TMP);
-        if (dst != lhs_low) {
-          __ LoadConst32(dst, imm_low);
-          __ Sltu(dst, dst, lhs_low);
-        }
-        __ Slt(dst, TMP, dst);
-        __ Or(dst, dst, AT);
-        if (cond == kCondBE) {
-          __ Xori(dst, dst, 1);
-        }
-        break;
-    }
-  } else {
-    switch (cond) {
-      case kCondEQ:
-        __ Xor(TMP, lhs_high, rhs_high);
-        __ Xor(AT, lhs_low, rhs_low);
-        __ Or(dst, TMP, AT);
-        __ Sltiu(dst, dst, 1);
-        break;
-      case kCondNE:
-        __ Xor(TMP, lhs_high, rhs_high);
-        __ Xor(AT, lhs_low, rhs_low);
-        __ Or(dst, TMP, AT);
-        __ Sltu(dst, ZERO, dst);
-        break;
-      case kCondLT:
-      case kCondGE:
-        __ Slt(TMP, rhs_high, lhs_high);
-        __ Sltu(AT, lhs_low, rhs_low);
-        __ Slt(TMP, TMP, AT);
-        __ Slt(AT, lhs_high, rhs_high);
-        __ Or(dst, AT, TMP);
-        if (cond == kCondGE) {
-          __ Xori(dst, dst, 1);
-        }
-        break;
-      case kCondGT:
-      case kCondLE:
-        __ Slt(TMP, lhs_high, rhs_high);
-        __ Sltu(AT, rhs_low, lhs_low);
-        __ Slt(TMP, TMP, AT);
-        __ Slt(AT, rhs_high, lhs_high);
-        __ Or(dst, AT, TMP);
-        if (cond == kCondLE) {
-          __ Xori(dst, dst, 1);
-        }
-        break;
-      case kCondB:
-      case kCondAE:
-        __ Sltu(TMP, rhs_high, lhs_high);
-        __ Sltu(AT, lhs_low, rhs_low);
-        __ Slt(TMP, TMP, AT);
-        __ Sltu(AT, lhs_high, rhs_high);
-        __ Or(dst, AT, TMP);
-        if (cond == kCondAE) {
-          __ Xori(dst, dst, 1);
-        }
-        break;
-      case kCondA:
-      case kCondBE:
-        __ Sltu(TMP, lhs_high, rhs_high);
-        __ Sltu(AT, rhs_low, lhs_low);
-        __ Slt(TMP, TMP, AT);
-        __ Sltu(AT, rhs_high, lhs_high);
-        __ Or(dst, AT, TMP);
-        if (cond == kCondBE) {
-          __ Xori(dst, dst, 1);
-        }
-        break;
-    }
-  }
-}
-
-void InstructionCodeGeneratorMIPS::GenerateLongCompareAndBranch(IfCondition cond,
-                                                                LocationSummary* locations,
-                                                                MipsLabel* label) {
-  Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
-  Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
-  Location rhs_location = locations->InAt(1);
-  Register rhs_high = ZERO;
-  Register rhs_low = ZERO;
-  int64_t imm = 0;
-  uint32_t imm_high = 0;
-  uint32_t imm_low = 0;
-  bool use_imm = rhs_location.IsConstant();
-  if (use_imm) {
-    imm = rhs_location.GetConstant()->AsLongConstant()->GetValue();
-    imm_high = High32Bits(imm);
-    imm_low = Low32Bits(imm);
-  } else {
-    rhs_high = rhs_location.AsRegisterPairHigh<Register>();
-    rhs_low = rhs_location.AsRegisterPairLow<Register>();
-  }
-
-  if (use_imm && imm == 0) {
-    switch (cond) {
-      case kCondEQ:
-      case kCondBE:  // <= 0 if zero
-        __ Or(TMP, lhs_high, lhs_low);
-        __ Beqz(TMP, label);
-        break;
-      case kCondNE:
-      case kCondA:  // > 0 if non-zero
-        __ Or(TMP, lhs_high, lhs_low);
-        __ Bnez(TMP, label);
-        break;
-      case kCondLT:
-        __ Bltz(lhs_high, label);
-        break;
-      case kCondGE:
-        __ Bgez(lhs_high, label);
-        break;
-      case kCondLE:
-        __ Or(TMP, lhs_high, lhs_low);
-        __ Sra(AT, lhs_high, 31);
-        __ Bgeu(AT, TMP, label);
-        break;
-      case kCondGT:
-        __ Or(TMP, lhs_high, lhs_low);
-        __ Sra(AT, lhs_high, 31);
-        __ Bltu(AT, TMP, label);
-        break;
-      case kCondB:  // always false
-        break;
-      case kCondAE:  // always true
-        __ B(label);
-        break;
-    }
-  } else if (use_imm) {
-    // TODO: more efficient comparison with constants without loading them into TMP/AT.
-    switch (cond) {
-      case kCondEQ:
-        __ LoadConst32(TMP, imm_high);
-        __ Xor(TMP, TMP, lhs_high);
-        __ LoadConst32(AT, imm_low);
-        __ Xor(AT, AT, lhs_low);
-        __ Or(TMP, TMP, AT);
-        __ Beqz(TMP, label);
-        break;
-      case kCondNE:
-        __ LoadConst32(TMP, imm_high);
-        __ Xor(TMP, TMP, lhs_high);
-        __ LoadConst32(AT, imm_low);
-        __ Xor(AT, AT, lhs_low);
-        __ Or(TMP, TMP, AT);
-        __ Bnez(TMP, label);
-        break;
-      case kCondLT:
-        __ LoadConst32(TMP, imm_high);
-        __ Blt(lhs_high, TMP, label);
-        __ Slt(TMP, TMP, lhs_high);
-        __ LoadConst32(AT, imm_low);
-        __ Sltu(AT, lhs_low, AT);
-        __ Blt(TMP, AT, label);
-        break;
-      case kCondGE:
-        __ LoadConst32(TMP, imm_high);
-        __ Blt(TMP, lhs_high, label);
-        __ Slt(TMP, lhs_high, TMP);
-        __ LoadConst32(AT, imm_low);
-        __ Sltu(AT, lhs_low, AT);
-        __ Or(TMP, TMP, AT);
-        __ Beqz(TMP, label);
-        break;
-      case kCondLE:
-        __ LoadConst32(TMP, imm_high);
-        __ Blt(lhs_high, TMP, label);
-        __ Slt(TMP, TMP, lhs_high);
-        __ LoadConst32(AT, imm_low);
-        __ Sltu(AT, AT, lhs_low);
-        __ Or(TMP, TMP, AT);
-        __ Beqz(TMP, label);
-        break;
-      case kCondGT:
-        __ LoadConst32(TMP, imm_high);
-        __ Blt(TMP, lhs_high, label);
-        __ Slt(TMP, lhs_high, TMP);
-        __ LoadConst32(AT, imm_low);
-        __ Sltu(AT, AT, lhs_low);
-        __ Blt(TMP, AT, label);
-        break;
-      case kCondB:
-        __ LoadConst32(TMP, imm_high);
-        __ Bltu(lhs_high, TMP, label);
-        __ Sltu(TMP, TMP, lhs_high);
-        __ LoadConst32(AT, imm_low);
-        __ Sltu(AT, lhs_low, AT);
-        __ Blt(TMP, AT, label);
-        break;
-      case kCondAE:
-        __ LoadConst32(TMP, imm_high);
-        __ Bltu(TMP, lhs_high, label);
-        __ Sltu(TMP, lhs_high, TMP);
-        __ LoadConst32(AT, imm_low);
-        __ Sltu(AT, lhs_low, AT);
-        __ Or(TMP, TMP, AT);
-        __ Beqz(TMP, label);
-        break;
-      case kCondBE:
-        __ LoadConst32(TMP, imm_high);
-        __ Bltu(lhs_high, TMP, label);
-        __ Sltu(TMP, TMP, lhs_high);
-        __ LoadConst32(AT, imm_low);
-        __ Sltu(AT, AT, lhs_low);
-        __ Or(TMP, TMP, AT);
-        __ Beqz(TMP, label);
-        break;
-      case kCondA:
-        __ LoadConst32(TMP, imm_high);
-        __ Bltu(TMP, lhs_high, label);
-        __ Sltu(TMP, lhs_high, TMP);
-        __ LoadConst32(AT, imm_low);
-        __ Sltu(AT, AT, lhs_low);
-        __ Blt(TMP, AT, label);
-        break;
-    }
-  } else {
-    switch (cond) {
-      case kCondEQ:
-        __ Xor(TMP, lhs_high, rhs_high);
-        __ Xor(AT, lhs_low, rhs_low);
-        __ Or(TMP, TMP, AT);
-        __ Beqz(TMP, label);
-        break;
-      case kCondNE:
-        __ Xor(TMP, lhs_high, rhs_high);
-        __ Xor(AT, lhs_low, rhs_low);
-        __ Or(TMP, TMP, AT);
-        __ Bnez(TMP, label);
-        break;
-      case kCondLT:
-        __ Blt(lhs_high, rhs_high, label);
-        __ Slt(TMP, rhs_high, lhs_high);
-        __ Sltu(AT, lhs_low, rhs_low);
-        __ Blt(TMP, AT, label);
-        break;
-      case kCondGE:
-        __ Blt(rhs_high, lhs_high, label);
-        __ Slt(TMP, lhs_high, rhs_high);
-        __ Sltu(AT, lhs_low, rhs_low);
-        __ Or(TMP, TMP, AT);
-        __ Beqz(TMP, label);
-        break;
-      case kCondLE:
-        __ Blt(lhs_high, rhs_high, label);
-        __ Slt(TMP, rhs_high, lhs_high);
-        __ Sltu(AT, rhs_low, lhs_low);
-        __ Or(TMP, TMP, AT);
-        __ Beqz(TMP, label);
-        break;
-      case kCondGT:
-        __ Blt(rhs_high, lhs_high, label);
-        __ Slt(TMP, lhs_high, rhs_high);
-        __ Sltu(AT, rhs_low, lhs_low);
-        __ Blt(TMP, AT, label);
-        break;
-      case kCondB:
-        __ Bltu(lhs_high, rhs_high, label);
-        __ Sltu(TMP, rhs_high, lhs_high);
-        __ Sltu(AT, lhs_low, rhs_low);
-        __ Blt(TMP, AT, label);
-        break;
-      case kCondAE:
-        __ Bltu(rhs_high, lhs_high, label);
-        __ Sltu(TMP, lhs_high, rhs_high);
-        __ Sltu(AT, lhs_low, rhs_low);
-        __ Or(TMP, TMP, AT);
-        __ Beqz(TMP, label);
-        break;
-      case kCondBE:
-        __ Bltu(lhs_high, rhs_high, label);
-        __ Sltu(TMP, rhs_high, lhs_high);
-        __ Sltu(AT, rhs_low, lhs_low);
-        __ Or(TMP, TMP, AT);
-        __ Beqz(TMP, label);
-        break;
-      case kCondA:
-        __ Bltu(rhs_high, lhs_high, label);
-        __ Sltu(TMP, lhs_high, rhs_high);
-        __ Sltu(AT, rhs_low, lhs_low);
-        __ Blt(TMP, AT, label);
-        break;
-    }
-  }
-}
-
-void InstructionCodeGeneratorMIPS::GenerateFpCompare(IfCondition cond,
-                                                     bool gt_bias,
-                                                     DataType::Type type,
-                                                     LocationSummary* locations) {
-  Register dst = locations->Out().AsRegister<Register>();
-  FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
-  FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
-  bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
-  if (type == DataType::Type::kFloat32) {
-    if (isR6) {
-      switch (cond) {
-        case kCondEQ:
-          __ CmpEqS(FTMP, lhs, rhs);
-          __ Mfc1(dst, FTMP);
-          __ Andi(dst, dst, 1);
-          break;
-        case kCondNE:
-          __ CmpEqS(FTMP, lhs, rhs);
-          __ Mfc1(dst, FTMP);
-          __ Addiu(dst, dst, 1);
-          break;
-        case kCondLT:
-          if (gt_bias) {
-            __ CmpLtS(FTMP, lhs, rhs);
-          } else {
-            __ CmpUltS(FTMP, lhs, rhs);
-          }
-          __ Mfc1(dst, FTMP);
-          __ Andi(dst, dst, 1);
-          break;
-        case kCondLE:
-          if (gt_bias) {
-            __ CmpLeS(FTMP, lhs, rhs);
-          } else {
-            __ CmpUleS(FTMP, lhs, rhs);
-          }
-          __ Mfc1(dst, FTMP);
-          __ Andi(dst, dst, 1);
-          break;
-        case kCondGT:
-          if (gt_bias) {
-            __ CmpUltS(FTMP, rhs, lhs);
-          } else {
-            __ CmpLtS(FTMP, rhs, lhs);
-          }
-          __ Mfc1(dst, FTMP);
-          __ Andi(dst, dst, 1);
-          break;
-        case kCondGE:
-          if (gt_bias) {
-            __ CmpUleS(FTMP, rhs, lhs);
-          } else {
-            __ CmpLeS(FTMP, rhs, lhs);
-          }
-          __ Mfc1(dst, FTMP);
-          __ Andi(dst, dst, 1);
-          break;
-        default:
-          LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
-          UNREACHABLE();
-      }
-    } else {
-      switch (cond) {
-        case kCondEQ:
-          __ CeqS(0, lhs, rhs);
-          __ LoadConst32(dst, 1);
-          __ Movf(dst, ZERO, 0);
-          break;
-        case kCondNE:
-          __ CeqS(0, lhs, rhs);
-          __ LoadConst32(dst, 1);
-          __ Movt(dst, ZERO, 0);
-          break;
-        case kCondLT:
-          if (gt_bias) {
-            __ ColtS(0, lhs, rhs);
-          } else {
-            __ CultS(0, lhs, rhs);
-          }
-          __ LoadConst32(dst, 1);
-          __ Movf(dst, ZERO, 0);
-          break;
-        case kCondLE:
-          if (gt_bias) {
-            __ ColeS(0, lhs, rhs);
-          } else {
-            __ CuleS(0, lhs, rhs);
-          }
-          __ LoadConst32(dst, 1);
-          __ Movf(dst, ZERO, 0);
-          break;
-        case kCondGT:
-          if (gt_bias) {
-            __ CultS(0, rhs, lhs);
-          } else {
-            __ ColtS(0, rhs, lhs);
-          }
-          __ LoadConst32(dst, 1);
-          __ Movf(dst, ZERO, 0);
-          break;
-        case kCondGE:
-          if (gt_bias) {
-            __ CuleS(0, rhs, lhs);
-          } else {
-            __ ColeS(0, rhs, lhs);
-          }
-          __ LoadConst32(dst, 1);
-          __ Movf(dst, ZERO, 0);
-          break;
-        default:
-          LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
-          UNREACHABLE();
-      }
-    }
-  } else {
-    DCHECK_EQ(type, DataType::Type::kFloat64);
-    if (isR6) {
-      switch (cond) {
-        case kCondEQ:
-          __ CmpEqD(FTMP, lhs, rhs);
-          __ Mfc1(dst, FTMP);
-          __ Andi(dst, dst, 1);
-          break;
-        case kCondNE:
-          __ CmpEqD(FTMP, lhs, rhs);
-          __ Mfc1(dst, FTMP);
-          __ Addiu(dst, dst, 1);
-          break;
-        case kCondLT:
-          if (gt_bias) {
-            __ CmpLtD(FTMP, lhs, rhs);
-          } else {
-            __ CmpUltD(FTMP, lhs, rhs);
-          }
-          __ Mfc1(dst, FTMP);
-          __ Andi(dst, dst, 1);
-          break;
-        case kCondLE:
-          if (gt_bias) {
-            __ CmpLeD(FTMP, lhs, rhs);
-          } else {
-            __ CmpUleD(FTMP, lhs, rhs);
-          }
-          __ Mfc1(dst, FTMP);
-          __ Andi(dst, dst, 1);
-          break;
-        case kCondGT:
-          if (gt_bias) {
-            __ CmpUltD(FTMP, rhs, lhs);
-          } else {
-            __ CmpLtD(FTMP, rhs, lhs);
-          }
-          __ Mfc1(dst, FTMP);
-          __ Andi(dst, dst, 1);
-          break;
-        case kCondGE:
-          if (gt_bias) {
-            __ CmpUleD(FTMP, rhs, lhs);
-          } else {
-            __ CmpLeD(FTMP, rhs, lhs);
-          }
-          __ Mfc1(dst, FTMP);
-          __ Andi(dst, dst, 1);
-          break;
-        default:
-          LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
-          UNREACHABLE();
-      }
-    } else {
-      switch (cond) {
-        case kCondEQ:
-          __ CeqD(0, lhs, rhs);
-          __ LoadConst32(dst, 1);
-          __ Movf(dst, ZERO, 0);
-          break;
-        case kCondNE:
-          __ CeqD(0, lhs, rhs);
-          __ LoadConst32(dst, 1);
-          __ Movt(dst, ZERO, 0);
-          break;
-        case kCondLT:
-          if (gt_bias) {
-            __ ColtD(0, lhs, rhs);
-          } else {
-            __ CultD(0, lhs, rhs);
-          }
-          __ LoadConst32(dst, 1);
-          __ Movf(dst, ZERO, 0);
-          break;
-        case kCondLE:
-          if (gt_bias) {
-            __ ColeD(0, lhs, rhs);
-          } else {
-            __ CuleD(0, lhs, rhs);
-          }
-          __ LoadConst32(dst, 1);
-          __ Movf(dst, ZERO, 0);
-          break;
-        case kCondGT:
-          if (gt_bias) {
-            __ CultD(0, rhs, lhs);
-          } else {
-            __ ColtD(0, rhs, lhs);
-          }
-          __ LoadConst32(dst, 1);
-          __ Movf(dst, ZERO, 0);
-          break;
-        case kCondGE:
-          if (gt_bias) {
-            __ CuleD(0, rhs, lhs);
-          } else {
-            __ ColeD(0, rhs, lhs);
-          }
-          __ LoadConst32(dst, 1);
-          __ Movf(dst, ZERO, 0);
-          break;
-        default:
-          LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
-          UNREACHABLE();
-      }
-    }
-  }
-}
-
-bool InstructionCodeGeneratorMIPS::MaterializeFpCompareR2(IfCondition cond,
-                                                          bool gt_bias,
-                                                          DataType::Type type,
-                                                          LocationSummary* input_locations,
-                                                          int cc) {
-  FRegister lhs = input_locations->InAt(0).AsFpuRegister<FRegister>();
-  FRegister rhs = input_locations->InAt(1).AsFpuRegister<FRegister>();
-  CHECK(!codegen_->GetInstructionSetFeatures().IsR6());
-  if (type == DataType::Type::kFloat32) {
-    switch (cond) {
-      case kCondEQ:
-        __ CeqS(cc, lhs, rhs);
-        return false;
-      case kCondNE:
-        __ CeqS(cc, lhs, rhs);
-        return true;
-      case kCondLT:
-        if (gt_bias) {
-          __ ColtS(cc, lhs, rhs);
-        } else {
-          __ CultS(cc, lhs, rhs);
-        }
-        return false;
-      case kCondLE:
-        if (gt_bias) {
-          __ ColeS(cc, lhs, rhs);
-        } else {
-          __ CuleS(cc, lhs, rhs);
-        }
-        return false;
-      case kCondGT:
-        if (gt_bias) {
-          __ CultS(cc, rhs, lhs);
-        } else {
-          __ ColtS(cc, rhs, lhs);
-        }
-        return false;
-      case kCondGE:
-        if (gt_bias) {
-          __ CuleS(cc, rhs, lhs);
-        } else {
-          __ ColeS(cc, rhs, lhs);
-        }
-        return false;
-      default:
-        LOG(FATAL) << "Unexpected non-floating-point condition";
-        UNREACHABLE();
-    }
-  } else {
-    DCHECK_EQ(type, DataType::Type::kFloat64);
-    switch (cond) {
-      case kCondEQ:
-        __ CeqD(cc, lhs, rhs);
-        return false;
-      case kCondNE:
-        __ CeqD(cc, lhs, rhs);
-        return true;
-      case kCondLT:
-        if (gt_bias) {
-          __ ColtD(cc, lhs, rhs);
-        } else {
-          __ CultD(cc, lhs, rhs);
-        }
-        return false;
-      case kCondLE:
-        if (gt_bias) {
-          __ ColeD(cc, lhs, rhs);
-        } else {
-          __ CuleD(cc, lhs, rhs);
-        }
-        return false;
-      case kCondGT:
-        if (gt_bias) {
-          __ CultD(cc, rhs, lhs);
-        } else {
-          __ ColtD(cc, rhs, lhs);
-        }
-        return false;
-      case kCondGE:
-        if (gt_bias) {
-          __ CuleD(cc, rhs, lhs);
-        } else {
-          __ ColeD(cc, rhs, lhs);
-        }
-        return false;
-      default:
-        LOG(FATAL) << "Unexpected non-floating-point condition";
-        UNREACHABLE();
-    }
-  }
-}
-
-bool InstructionCodeGeneratorMIPS::MaterializeFpCompareR6(IfCondition cond,
-                                                          bool gt_bias,
-                                                          DataType::Type type,
-                                                          LocationSummary* input_locations,
-                                                          FRegister dst) {
-  FRegister lhs = input_locations->InAt(0).AsFpuRegister<FRegister>();
-  FRegister rhs = input_locations->InAt(1).AsFpuRegister<FRegister>();
-  CHECK(codegen_->GetInstructionSetFeatures().IsR6());
-  if (type == DataType::Type::kFloat32) {
-    switch (cond) {
-      case kCondEQ:
-        __ CmpEqS(dst, lhs, rhs);
-        return false;
-      case kCondNE:
-        __ CmpEqS(dst, lhs, rhs);
-        return true;
-      case kCondLT:
-        if (gt_bias) {
-          __ CmpLtS(dst, lhs, rhs);
-        } else {
-          __ CmpUltS(dst, lhs, rhs);
-        }
-        return false;
-      case kCondLE:
-        if (gt_bias) {
-          __ CmpLeS(dst, lhs, rhs);
-        } else {
-          __ CmpUleS(dst, lhs, rhs);
-        }
-        return false;
-      case kCondGT:
-        if (gt_bias) {
-          __ CmpUltS(dst, rhs, lhs);
-        } else {
-          __ CmpLtS(dst, rhs, lhs);
-        }
-        return false;
-      case kCondGE:
-        if (gt_bias) {
-          __ CmpUleS(dst, rhs, lhs);
-        } else {
-          __ CmpLeS(dst, rhs, lhs);
-        }
-        return false;
-      default:
-        LOG(FATAL) << "Unexpected non-floating-point condition";
-        UNREACHABLE();
-    }
-  } else {
-    DCHECK_EQ(type, DataType::Type::kFloat64);
-    switch (cond) {
-      case kCondEQ:
-        __ CmpEqD(dst, lhs, rhs);
-        return false;
-      case kCondNE:
-        __ CmpEqD(dst, lhs, rhs);
-        return true;
-      case kCondLT:
-        if (gt_bias) {
-          __ CmpLtD(dst, lhs, rhs);
-        } else {
-          __ CmpUltD(dst, lhs, rhs);
-        }
-        return false;
-      case kCondLE:
-        if (gt_bias) {
-          __ CmpLeD(dst, lhs, rhs);
-        } else {
-          __ CmpUleD(dst, lhs, rhs);
-        }
-        return false;
-      case kCondGT:
-        if (gt_bias) {
-          __ CmpUltD(dst, rhs, lhs);
-        } else {
-          __ CmpLtD(dst, rhs, lhs);
-        }
-        return false;
-      case kCondGE:
-        if (gt_bias) {
-          __ CmpUleD(dst, rhs, lhs);
-        } else {
-          __ CmpLeD(dst, rhs, lhs);
-        }
-        return false;
-      default:
-        LOG(FATAL) << "Unexpected non-floating-point condition";
-        UNREACHABLE();
-    }
-  }
-}
-
-void InstructionCodeGeneratorMIPS::GenerateFpCompareAndBranch(IfCondition cond,
-                                                              bool gt_bias,
-                                                              DataType::Type type,
-                                                              LocationSummary* locations,
-                                                              MipsLabel* label) {
-  FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
-  FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
-  bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
-  if (type == DataType::Type::kFloat32) {
-    if (isR6) {
-      switch (cond) {
-        case kCondEQ:
-          __ CmpEqS(FTMP, lhs, rhs);
-          __ Bc1nez(FTMP, label);
-          break;
-        case kCondNE:
-          __ CmpEqS(FTMP, lhs, rhs);
-          __ Bc1eqz(FTMP, label);
-          break;
-        case kCondLT:
-          if (gt_bias) {
-            __ CmpLtS(FTMP, lhs, rhs);
-          } else {
-            __ CmpUltS(FTMP, lhs, rhs);
-          }
-          __ Bc1nez(FTMP, label);
-          break;
-        case kCondLE:
-          if (gt_bias) {
-            __ CmpLeS(FTMP, lhs, rhs);
-          } else {
-            __ CmpUleS(FTMP, lhs, rhs);
-          }
-          __ Bc1nez(FTMP, label);
-          break;
-        case kCondGT:
-          if (gt_bias) {
-            __ CmpUltS(FTMP, rhs, lhs);
-          } else {
-            __ CmpLtS(FTMP, rhs, lhs);
-          }
-          __ Bc1nez(FTMP, label);
-          break;
-        case kCondGE:
-          if (gt_bias) {
-            __ CmpUleS(FTMP, rhs, lhs);
-          } else {
-            __ CmpLeS(FTMP, rhs, lhs);
-          }
-          __ Bc1nez(FTMP, label);
-          break;
-        default:
-          LOG(FATAL) << "Unexpected non-floating-point condition";
-          UNREACHABLE();
-      }
-    } else {
-      switch (cond) {
-        case kCondEQ:
-          __ CeqS(0, lhs, rhs);
-          __ Bc1t(0, label);
-          break;
-        case kCondNE:
-          __ CeqS(0, lhs, rhs);
-          __ Bc1f(0, label);
-          break;
-        case kCondLT:
-          if (gt_bias) {
-            __ ColtS(0, lhs, rhs);
-          } else {
-            __ CultS(0, lhs, rhs);
-          }
-          __ Bc1t(0, label);
-          break;
-        case kCondLE:
-          if (gt_bias) {
-            __ ColeS(0, lhs, rhs);
-          } else {
-            __ CuleS(0, lhs, rhs);
-          }
-          __ Bc1t(0, label);
-          break;
-        case kCondGT:
-          if (gt_bias) {
-            __ CultS(0, rhs, lhs);
-          } else {
-            __ ColtS(0, rhs, lhs);
-          }
-          __ Bc1t(0, label);
-          break;
-        case kCondGE:
-          if (gt_bias) {
-            __ CuleS(0, rhs, lhs);
-          } else {
-            __ ColeS(0, rhs, lhs);
-          }
-          __ Bc1t(0, label);
-          break;
-        default:
-          LOG(FATAL) << "Unexpected non-floating-point condition";
-          UNREACHABLE();
-      }
-    }
-  } else {
-    DCHECK_EQ(type, DataType::Type::kFloat64);
-    if (isR6) {
-      switch (cond) {
-        case kCondEQ:
-          __ CmpEqD(FTMP, lhs, rhs);
-          __ Bc1nez(FTMP, label);
-          break;
-        case kCondNE:
-          __ CmpEqD(FTMP, lhs, rhs);
-          __ Bc1eqz(FTMP, label);
-          break;
-        case kCondLT:
-          if (gt_bias) {
-            __ CmpLtD(FTMP, lhs, rhs);
-          } else {
-            __ CmpUltD(FTMP, lhs, rhs);
-          }
-          __ Bc1nez(FTMP, label);
-          break;
-        case kCondLE:
-          if (gt_bias) {
-            __ CmpLeD(FTMP, lhs, rhs);
-          } else {
-            __ CmpUleD(FTMP, lhs, rhs);
-          }
-          __ Bc1nez(FTMP, label);
-          break;
-        case kCondGT:
-          if (gt_bias) {
-            __ CmpUltD(FTMP, rhs, lhs);
-          } else {
-            __ CmpLtD(FTMP, rhs, lhs);
-          }
-          __ Bc1nez(FTMP, label);
-          break;
-        case kCondGE:
-          if (gt_bias) {
-            __ CmpUleD(FTMP, rhs, lhs);
-          } else {
-            __ CmpLeD(FTMP, rhs, lhs);
-          }
-          __ Bc1nez(FTMP, label);
-          break;
-        default:
-          LOG(FATAL) << "Unexpected non-floating-point condition";
-          UNREACHABLE();
-      }
-    } else {
-      switch (cond) {
-        case kCondEQ:
-          __ CeqD(0, lhs, rhs);
-          __ Bc1t(0, label);
-          break;
-        case kCondNE:
-          __ CeqD(0, lhs, rhs);
-          __ Bc1f(0, label);
-          break;
-        case kCondLT:
-          if (gt_bias) {
-            __ ColtD(0, lhs, rhs);
-          } else {
-            __ CultD(0, lhs, rhs);
-          }
-          __ Bc1t(0, label);
-          break;
-        case kCondLE:
-          if (gt_bias) {
-            __ ColeD(0, lhs, rhs);
-          } else {
-            __ CuleD(0, lhs, rhs);
-          }
-          __ Bc1t(0, label);
-          break;
-        case kCondGT:
-          if (gt_bias) {
-            __ CultD(0, rhs, lhs);
-          } else {
-            __ ColtD(0, rhs, lhs);
-          }
-          __ Bc1t(0, label);
-          break;
-        case kCondGE:
-          if (gt_bias) {
-            __ CuleD(0, rhs, lhs);
-          } else {
-            __ ColeD(0, rhs, lhs);
-          }
-          __ Bc1t(0, label);
-          break;
-        default:
-          LOG(FATAL) << "Unexpected non-floating-point condition";
-          UNREACHABLE();
-      }
-    }
-  }
-}
-
-void InstructionCodeGeneratorMIPS::GenerateTestAndBranch(HInstruction* instruction,
-                                                         size_t condition_input_index,
-                                                         MipsLabel* true_target,
-                                                         MipsLabel* false_target) {
-  HInstruction* cond = instruction->InputAt(condition_input_index);
-
-  if (true_target == nullptr && false_target == nullptr) {
-    // Nothing to do. The code always falls through.
-    return;
-  } else if (cond->IsIntConstant()) {
-    // Constant condition, statically compared against "true" (integer value 1).
-    if (cond->AsIntConstant()->IsTrue()) {
-      if (true_target != nullptr) {
-        __ B(true_target);
-      }
-    } else {
-      DCHECK(cond->AsIntConstant()->IsFalse()) << cond->AsIntConstant()->GetValue();
-      if (false_target != nullptr) {
-        __ B(false_target);
-      }
-    }
-    return;
-  }
-
-  // The following code generates these patterns:
-  //  (1) true_target == nullptr && false_target != nullptr
-  //        - opposite condition true => branch to false_target
-  //  (2) true_target != nullptr && false_target == nullptr
-  //        - condition true => branch to true_target
-  //  (3) true_target != nullptr && false_target != nullptr
-  //        - condition true => branch to true_target
-  //        - branch to false_target
-  if (IsBooleanValueOrMaterializedCondition(cond)) {
-    // The condition instruction has been materialized, compare the output to 0.
-    Location cond_val = instruction->GetLocations()->InAt(condition_input_index);
-    DCHECK(cond_val.IsRegister());
-    if (true_target == nullptr) {
-      __ Beqz(cond_val.AsRegister<Register>(), false_target);
-    } else {
-      __ Bnez(cond_val.AsRegister<Register>(), true_target);
-    }
-  } else {
-    // The condition instruction has not been materialized, use its inputs as
-    // the comparison and its condition as the branch condition.
-    HCondition* condition = cond->AsCondition();
-    DataType::Type type = condition->InputAt(0)->GetType();
-    LocationSummary* locations = cond->GetLocations();
-    IfCondition if_cond = condition->GetCondition();
-    MipsLabel* branch_target = true_target;
-
-    if (true_target == nullptr) {
-      if_cond = condition->GetOppositeCondition();
-      branch_target = false_target;
-    }
-
-    switch (type) {
-      default:
-        GenerateIntCompareAndBranch(if_cond, locations, branch_target);
-        break;
-      case DataType::Type::kInt64:
-        GenerateLongCompareAndBranch(if_cond, locations, branch_target);
-        break;
-      case DataType::Type::kFloat32:
-      case DataType::Type::kFloat64:
-        GenerateFpCompareAndBranch(if_cond, condition->IsGtBias(), type, locations, branch_target);
-        break;
-    }
-  }
-
-  // If neither branch falls through (case 3), the conditional branch to `true_target`
-  // was already emitted (case 2) and we need to emit a jump to `false_target`.
-  if (true_target != nullptr && false_target != nullptr) {
-    __ B(false_target);
-  }
-}
-
-void LocationsBuilderMIPS::VisitIf(HIf* if_instr) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr);
-  if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
-    locations->SetInAt(0, Location::RequiresRegister());
-  }
-}
-
-void InstructionCodeGeneratorMIPS::VisitIf(HIf* if_instr) {
-  HBasicBlock* true_successor = if_instr->IfTrueSuccessor();
-  HBasicBlock* false_successor = if_instr->IfFalseSuccessor();
-  MipsLabel* true_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), true_successor) ?
-      nullptr : codegen_->GetLabelOf(true_successor);
-  MipsLabel* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
-      nullptr : codegen_->GetLabelOf(false_successor);
-  GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
-}
-
-void LocationsBuilderMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator())
-      LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
-  InvokeRuntimeCallingConvention calling_convention;
-  RegisterSet caller_saves = RegisterSet::Empty();
-  caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  locations->SetCustomSlowPathCallerSaves(caller_saves);
-  if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
-    locations->SetInAt(0, Location::RequiresRegister());
-  }
-}
-
-void InstructionCodeGeneratorMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
-  SlowPathCodeMIPS* slow_path =
-      deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathMIPS>(deoptimize);
-  GenerateTestAndBranch(deoptimize,
-                        /* condition_input_index= */ 0,
-                        slow_path->GetEntryLabel(),
-                        /* false_target= */ nullptr);
-}
-
-// This function returns true if a conditional move can be generated for HSelect.
-// Otherwise it returns false and HSelect must be implemented in terms of conditonal
-// branches and regular moves.
-//
-// If `locations_to_set` isn't nullptr, its inputs and outputs are set for HSelect.
-//
-// While determining feasibility of a conditional move and setting inputs/outputs
-// are two distinct tasks, this function does both because they share quite a bit
-// of common logic.
-static bool CanMoveConditionally(HSelect* select, bool is_r6, LocationSummary* locations_to_set) {
-  bool materialized = IsBooleanValueOrMaterializedCondition(select->GetCondition());
-  HInstruction* cond = select->InputAt(/* i= */ 2);
-  HCondition* condition = cond->AsCondition();
-
-  DataType::Type cond_type =
-      materialized ? DataType::Type::kInt32 : condition->InputAt(0)->GetType();
-  DataType::Type dst_type = select->GetType();
-
-  HConstant* cst_true_value = select->GetTrueValue()->AsConstant();
-  HConstant* cst_false_value = select->GetFalseValue()->AsConstant();
-  bool is_true_value_zero_constant =
-      (cst_true_value != nullptr && cst_true_value->IsZeroBitPattern());
-  bool is_false_value_zero_constant =
-      (cst_false_value != nullptr && cst_false_value->IsZeroBitPattern());
-
-  bool can_move_conditionally = false;
-  bool use_const_for_false_in = false;
-  bool use_const_for_true_in = false;
-
-  if (!cond->IsConstant()) {
-    switch (cond_type) {
-      default:
-        switch (dst_type) {
-          default:
-            // Moving int on int condition.
-            if (is_r6) {
-              if (is_true_value_zero_constant) {
-                // seleqz out_reg, false_reg, cond_reg
-                can_move_conditionally = true;
-                use_const_for_true_in = true;
-              } else if (is_false_value_zero_constant) {
-                // selnez out_reg, true_reg, cond_reg
-                can_move_conditionally = true;
-                use_const_for_false_in = true;
-              } else if (materialized) {
-                // Not materializing unmaterialized int conditions
-                // to keep the instruction count low.
-                // selnez AT, true_reg, cond_reg
-                // seleqz TMP, false_reg, cond_reg
-                // or out_reg, AT, TMP
-                can_move_conditionally = true;
-              }
-            } else {
-              // movn out_reg, true_reg/ZERO, cond_reg
-              can_move_conditionally = true;
-              use_const_for_true_in = is_true_value_zero_constant;
-            }
-            break;
-          case DataType::Type::kInt64:
-            // Moving long on int condition.
-            if (is_r6) {
-              if (is_true_value_zero_constant) {
-                // seleqz out_reg_lo, false_reg_lo, cond_reg
-                // seleqz out_reg_hi, false_reg_hi, cond_reg
-                can_move_conditionally = true;
-                use_const_for_true_in = true;
-              } else if (is_false_value_zero_constant) {
-                // selnez out_reg_lo, true_reg_lo, cond_reg
-                // selnez out_reg_hi, true_reg_hi, cond_reg
-                can_move_conditionally = true;
-                use_const_for_false_in = true;
-              }
-              // Other long conditional moves would generate 6+ instructions,
-              // which is too many.
-            } else {
-              // movn out_reg_lo, true_reg_lo/ZERO, cond_reg
-              // movn out_reg_hi, true_reg_hi/ZERO, cond_reg
-              can_move_conditionally = true;
-              use_const_for_true_in = is_true_value_zero_constant;
-            }
-            break;
-          case DataType::Type::kFloat32:
-          case DataType::Type::kFloat64:
-            // Moving float/double on int condition.
-            if (is_r6) {
-              if (materialized) {
-                // Not materializing unmaterialized int conditions
-                // to keep the instruction count low.
-                can_move_conditionally = true;
-                if (is_true_value_zero_constant) {
-                  // sltu TMP, ZERO, cond_reg
-                  // mtc1 TMP, temp_cond_reg
-                  // seleqz.fmt out_reg, false_reg, temp_cond_reg
-                  use_const_for_true_in = true;
-                } else if (is_false_value_zero_constant) {
-                  // sltu TMP, ZERO, cond_reg
-                  // mtc1 TMP, temp_cond_reg
-                  // selnez.fmt out_reg, true_reg, temp_cond_reg
-                  use_const_for_false_in = true;
-                } else {
-                  // sltu TMP, ZERO, cond_reg
-                  // mtc1 TMP, temp_cond_reg
-                  // sel.fmt temp_cond_reg, false_reg, true_reg
-                  // mov.fmt out_reg, temp_cond_reg
-                }
-              }
-            } else {
-              // movn.fmt out_reg, true_reg, cond_reg
-              can_move_conditionally = true;
-            }
-            break;
-        }
-        break;
-      case DataType::Type::kInt64:
-        // We don't materialize long comparison now
-        // and use conditional branches instead.
-        break;
-      case DataType::Type::kFloat32:
-      case DataType::Type::kFloat64:
-        switch (dst_type) {
-          default:
-            // Moving int on float/double condition.
-            if (is_r6) {
-              if (is_true_value_zero_constant) {
-                // mfc1 TMP, temp_cond_reg
-                // seleqz out_reg, false_reg, TMP
-                can_move_conditionally = true;
-                use_const_for_true_in = true;
-              } else if (is_false_value_zero_constant) {
-                // mfc1 TMP, temp_cond_reg
-                // selnez out_reg, true_reg, TMP
-                can_move_conditionally = true;
-                use_const_for_false_in = true;
-              } else {
-                // mfc1 TMP, temp_cond_reg
-                // selnez AT, true_reg, TMP
-                // seleqz TMP, false_reg, TMP
-                // or out_reg, AT, TMP
-                can_move_conditionally = true;
-              }
-            } else {
-              // movt out_reg, true_reg/ZERO, cc
-              can_move_conditionally = true;
-              use_const_for_true_in = is_true_value_zero_constant;
-            }
-            break;
-          case DataType::Type::kInt64:
-            // Moving long on float/double condition.
-            if (is_r6) {
-              if (is_true_value_zero_constant) {
-                // mfc1 TMP, temp_cond_reg
-                // seleqz out_reg_lo, false_reg_lo, TMP
-                // seleqz out_reg_hi, false_reg_hi, TMP
-                can_move_conditionally = true;
-                use_const_for_true_in = true;
-              } else if (is_false_value_zero_constant) {
-                // mfc1 TMP, temp_cond_reg
-                // selnez out_reg_lo, true_reg_lo, TMP
-                // selnez out_reg_hi, true_reg_hi, TMP
-                can_move_conditionally = true;
-                use_const_for_false_in = true;
-              }
-              // Other long conditional moves would generate 6+ instructions,
-              // which is too many.
-            } else {
-              // movt out_reg_lo, true_reg_lo/ZERO, cc
-              // movt out_reg_hi, true_reg_hi/ZERO, cc
-              can_move_conditionally = true;
-              use_const_for_true_in = is_true_value_zero_constant;
-            }
-            break;
-          case DataType::Type::kFloat32:
-          case DataType::Type::kFloat64:
-            // Moving float/double on float/double condition.
-            if (is_r6) {
-              can_move_conditionally = true;
-              if (is_true_value_zero_constant) {
-                // seleqz.fmt out_reg, false_reg, temp_cond_reg
-                use_const_for_true_in = true;
-              } else if (is_false_value_zero_constant) {
-                // selnez.fmt out_reg, true_reg, temp_cond_reg
-                use_const_for_false_in = true;
-              } else {
-                // sel.fmt temp_cond_reg, false_reg, true_reg
-                // mov.fmt out_reg, temp_cond_reg
-              }
-            } else {
-              // movt.fmt out_reg, true_reg, cc
-              can_move_conditionally = true;
-            }
-            break;
-        }
-        break;
-    }
-  }
-
-  if (can_move_conditionally) {
-    DCHECK(!use_const_for_false_in || !use_const_for_true_in);
-  } else {
-    DCHECK(!use_const_for_false_in);
-    DCHECK(!use_const_for_true_in);
-  }
-
-  if (locations_to_set != nullptr) {
-    if (use_const_for_false_in) {
-      locations_to_set->SetInAt(0, Location::ConstantLocation(cst_false_value));
-    } else {
-      locations_to_set->SetInAt(0,
-                                DataType::IsFloatingPointType(dst_type)
-                                    ? Location::RequiresFpuRegister()
-                                    : Location::RequiresRegister());
-    }
-    if (use_const_for_true_in) {
-      locations_to_set->SetInAt(1, Location::ConstantLocation(cst_true_value));
-    } else {
-      locations_to_set->SetInAt(1,
-                                DataType::IsFloatingPointType(dst_type)
-                                    ? Location::RequiresFpuRegister()
-                                    : Location::RequiresRegister());
-    }
-    if (materialized) {
-      locations_to_set->SetInAt(2, Location::RequiresRegister());
-    }
-    // On R6 we don't require the output to be the same as the
-    // first input for conditional moves unlike on R2.
-    bool is_out_same_as_first_in = !can_move_conditionally || !is_r6;
-    if (is_out_same_as_first_in) {
-      locations_to_set->SetOut(Location::SameAsFirstInput());
-    } else {
-      locations_to_set->SetOut(DataType::IsFloatingPointType(dst_type)
-                                   ? Location::RequiresFpuRegister()
-                                   : Location::RequiresRegister());
-    }
-  }
-
-  return can_move_conditionally;
-}
-
-void InstructionCodeGeneratorMIPS::GenConditionalMoveR2(HSelect* select) {
-  LocationSummary* locations = select->GetLocations();
-  Location dst = locations->Out();
-  Location src = locations->InAt(1);
-  Register src_reg = ZERO;
-  Register src_reg_high = ZERO;
-  HInstruction* cond = select->InputAt(/* i= */ 2);
-  Register cond_reg = TMP;
-  int cond_cc = 0;
-  DataType::Type cond_type = DataType::Type::kInt32;
-  bool cond_inverted = false;
-  DataType::Type dst_type = select->GetType();
-
-  if (IsBooleanValueOrMaterializedCondition(cond)) {
-    cond_reg = locations->InAt(/* at= */ 2).AsRegister<Register>();
-  } else {
-    HCondition* condition = cond->AsCondition();
-    LocationSummary* cond_locations = cond->GetLocations();
-    IfCondition if_cond = condition->GetCondition();
-    cond_type = condition->InputAt(0)->GetType();
-    switch (cond_type) {
-      default:
-        DCHECK_NE(cond_type, DataType::Type::kInt64);
-        cond_inverted = MaterializeIntCompare(if_cond, cond_locations, cond_reg);
-        break;
-      case DataType::Type::kFloat32:
-      case DataType::Type::kFloat64:
-        cond_inverted = MaterializeFpCompareR2(if_cond,
-                                               condition->IsGtBias(),
-                                               cond_type,
-                                               cond_locations,
-                                               cond_cc);
-        break;
-    }
-  }
-
-  DCHECK(dst.Equals(locations->InAt(0)));
-  if (src.IsRegister()) {
-    src_reg = src.AsRegister<Register>();
-  } else if (src.IsRegisterPair()) {
-    src_reg = src.AsRegisterPairLow<Register>();
-    src_reg_high = src.AsRegisterPairHigh<Register>();
-  } else if (src.IsConstant()) {
-    DCHECK(src.GetConstant()->IsZeroBitPattern());
-  }
-
-  switch (cond_type) {
-    default:
-      switch (dst_type) {
-        default:
-          if (cond_inverted) {
-            __ Movz(dst.AsRegister<Register>(), src_reg, cond_reg);
-          } else {
-            __ Movn(dst.AsRegister<Register>(), src_reg, cond_reg);
-          }
-          break;
-        case DataType::Type::kInt64:
-          if (cond_inverted) {
-            __ Movz(dst.AsRegisterPairLow<Register>(), src_reg, cond_reg);
-            __ Movz(dst.AsRegisterPairHigh<Register>(), src_reg_high, cond_reg);
-          } else {
-            __ Movn(dst.AsRegisterPairLow<Register>(), src_reg, cond_reg);
-            __ Movn(dst.AsRegisterPairHigh<Register>(), src_reg_high, cond_reg);
-          }
-          break;
-        case DataType::Type::kFloat32:
-          if (cond_inverted) {
-            __ MovzS(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_reg);
-          } else {
-            __ MovnS(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_reg);
-          }
-          break;
-        case DataType::Type::kFloat64:
-          if (cond_inverted) {
-            __ MovzD(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_reg);
-          } else {
-            __ MovnD(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_reg);
-          }
-          break;
-      }
-      break;
-    case DataType::Type::kInt64:
-      LOG(FATAL) << "Unreachable";
-      UNREACHABLE();
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      switch (dst_type) {
-        default:
-          if (cond_inverted) {
-            __ Movf(dst.AsRegister<Register>(), src_reg, cond_cc);
-          } else {
-            __ Movt(dst.AsRegister<Register>(), src_reg, cond_cc);
-          }
-          break;
-        case DataType::Type::kInt64:
-          if (cond_inverted) {
-            __ Movf(dst.AsRegisterPairLow<Register>(), src_reg, cond_cc);
-            __ Movf(dst.AsRegisterPairHigh<Register>(), src_reg_high, cond_cc);
-          } else {
-            __ Movt(dst.AsRegisterPairLow<Register>(), src_reg, cond_cc);
-            __ Movt(dst.AsRegisterPairHigh<Register>(), src_reg_high, cond_cc);
-          }
-          break;
-        case DataType::Type::kFloat32:
-          if (cond_inverted) {
-            __ MovfS(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_cc);
-          } else {
-            __ MovtS(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_cc);
-          }
-          break;
-        case DataType::Type::kFloat64:
-          if (cond_inverted) {
-            __ MovfD(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_cc);
-          } else {
-            __ MovtD(dst.AsFpuRegister<FRegister>(), src.AsFpuRegister<FRegister>(), cond_cc);
-          }
-          break;
-      }
-      break;
-  }
-}
-
-void InstructionCodeGeneratorMIPS::GenConditionalMoveR6(HSelect* select) {
-  LocationSummary* locations = select->GetLocations();
-  Location dst = locations->Out();
-  Location false_src = locations->InAt(0);
-  Location true_src = locations->InAt(1);
-  HInstruction* cond = select->InputAt(/* i= */ 2);
-  Register cond_reg = TMP;
-  FRegister fcond_reg = FTMP;
-  DataType::Type cond_type = DataType::Type::kInt32;
-  bool cond_inverted = false;
-  DataType::Type dst_type = select->GetType();
-
-  if (IsBooleanValueOrMaterializedCondition(cond)) {
-    cond_reg = locations->InAt(/* at= */ 2).AsRegister<Register>();
-  } else {
-    HCondition* condition = cond->AsCondition();
-    LocationSummary* cond_locations = cond->GetLocations();
-    IfCondition if_cond = condition->GetCondition();
-    cond_type = condition->InputAt(0)->GetType();
-    switch (cond_type) {
-      default:
-        DCHECK_NE(cond_type, DataType::Type::kInt64);
-        cond_inverted = MaterializeIntCompare(if_cond, cond_locations, cond_reg);
-        break;
-      case DataType::Type::kFloat32:
-      case DataType::Type::kFloat64:
-        cond_inverted = MaterializeFpCompareR6(if_cond,
-                                               condition->IsGtBias(),
-                                               cond_type,
-                                               cond_locations,
-                                               fcond_reg);
-        break;
-    }
-  }
-
-  if (true_src.IsConstant()) {
-    DCHECK(true_src.GetConstant()->IsZeroBitPattern());
-  }
-  if (false_src.IsConstant()) {
-    DCHECK(false_src.GetConstant()->IsZeroBitPattern());
-  }
-
-  switch (dst_type) {
-    default:
-      if (DataType::IsFloatingPointType(cond_type)) {
-        __ Mfc1(cond_reg, fcond_reg);
-      }
-      if (true_src.IsConstant()) {
-        if (cond_inverted) {
-          __ Selnez(dst.AsRegister<Register>(), false_src.AsRegister<Register>(), cond_reg);
-        } else {
-          __ Seleqz(dst.AsRegister<Register>(), false_src.AsRegister<Register>(), cond_reg);
-        }
-      } else if (false_src.IsConstant()) {
-        if (cond_inverted) {
-          __ Seleqz(dst.AsRegister<Register>(), true_src.AsRegister<Register>(), cond_reg);
-        } else {
-          __ Selnez(dst.AsRegister<Register>(), true_src.AsRegister<Register>(), cond_reg);
-        }
-      } else {
-        DCHECK_NE(cond_reg, AT);
-        if (cond_inverted) {
-          __ Seleqz(AT, true_src.AsRegister<Register>(), cond_reg);
-          __ Selnez(TMP, false_src.AsRegister<Register>(), cond_reg);
-        } else {
-          __ Selnez(AT, true_src.AsRegister<Register>(), cond_reg);
-          __ Seleqz(TMP, false_src.AsRegister<Register>(), cond_reg);
-        }
-        __ Or(dst.AsRegister<Register>(), AT, TMP);
-      }
-      break;
-    case DataType::Type::kInt64: {
-      if (DataType::IsFloatingPointType(cond_type)) {
-        __ Mfc1(cond_reg, fcond_reg);
-      }
-      Register dst_lo = dst.AsRegisterPairLow<Register>();
-      Register dst_hi = dst.AsRegisterPairHigh<Register>();
-      if (true_src.IsConstant()) {
-        Register src_lo = false_src.AsRegisterPairLow<Register>();
-        Register src_hi = false_src.AsRegisterPairHigh<Register>();
-        if (cond_inverted) {
-          __ Selnez(dst_lo, src_lo, cond_reg);
-          __ Selnez(dst_hi, src_hi, cond_reg);
-        } else {
-          __ Seleqz(dst_lo, src_lo, cond_reg);
-          __ Seleqz(dst_hi, src_hi, cond_reg);
-        }
-      } else {
-        DCHECK(false_src.IsConstant());
-        Register src_lo = true_src.AsRegisterPairLow<Register>();
-        Register src_hi = true_src.AsRegisterPairHigh<Register>();
-        if (cond_inverted) {
-          __ Seleqz(dst_lo, src_lo, cond_reg);
-          __ Seleqz(dst_hi, src_hi, cond_reg);
-        } else {
-          __ Selnez(dst_lo, src_lo, cond_reg);
-          __ Selnez(dst_hi, src_hi, cond_reg);
-        }
-      }
-      break;
-    }
-    case DataType::Type::kFloat32: {
-      if (!DataType::IsFloatingPointType(cond_type)) {
-        // sel*.fmt tests bit 0 of the condition register, account for that.
-        __ Sltu(TMP, ZERO, cond_reg);
-        __ Mtc1(TMP, fcond_reg);
-      }
-      FRegister dst_reg = dst.AsFpuRegister<FRegister>();
-      if (true_src.IsConstant()) {
-        FRegister src_reg = false_src.AsFpuRegister<FRegister>();
-        if (cond_inverted) {
-          __ SelnezS(dst_reg, src_reg, fcond_reg);
-        } else {
-          __ SeleqzS(dst_reg, src_reg, fcond_reg);
-        }
-      } else if (false_src.IsConstant()) {
-        FRegister src_reg = true_src.AsFpuRegister<FRegister>();
-        if (cond_inverted) {
-          __ SeleqzS(dst_reg, src_reg, fcond_reg);
-        } else {
-          __ SelnezS(dst_reg, src_reg, fcond_reg);
-        }
-      } else {
-        if (cond_inverted) {
-          __ SelS(fcond_reg,
-                  true_src.AsFpuRegister<FRegister>(),
-                  false_src.AsFpuRegister<FRegister>());
-        } else {
-          __ SelS(fcond_reg,
-                  false_src.AsFpuRegister<FRegister>(),
-                  true_src.AsFpuRegister<FRegister>());
-        }
-        __ MovS(dst_reg, fcond_reg);
-      }
-      break;
-    }
-    case DataType::Type::kFloat64: {
-      if (!DataType::IsFloatingPointType(cond_type)) {
-        // sel*.fmt tests bit 0 of the condition register, account for that.
-        __ Sltu(TMP, ZERO, cond_reg);
-        __ Mtc1(TMP, fcond_reg);
-      }
-      FRegister dst_reg = dst.AsFpuRegister<FRegister>();
-      if (true_src.IsConstant()) {
-        FRegister src_reg = false_src.AsFpuRegister<FRegister>();
-        if (cond_inverted) {
-          __ SelnezD(dst_reg, src_reg, fcond_reg);
-        } else {
-          __ SeleqzD(dst_reg, src_reg, fcond_reg);
-        }
-      } else if (false_src.IsConstant()) {
-        FRegister src_reg = true_src.AsFpuRegister<FRegister>();
-        if (cond_inverted) {
-          __ SeleqzD(dst_reg, src_reg, fcond_reg);
-        } else {
-          __ SelnezD(dst_reg, src_reg, fcond_reg);
-        }
-      } else {
-        if (cond_inverted) {
-          __ SelD(fcond_reg,
-                  true_src.AsFpuRegister<FRegister>(),
-                  false_src.AsFpuRegister<FRegister>());
-        } else {
-          __ SelD(fcond_reg,
-                  false_src.AsFpuRegister<FRegister>(),
-                  true_src.AsFpuRegister<FRegister>());
-        }
-        __ MovD(dst_reg, fcond_reg);
-      }
-      break;
-    }
-  }
-}
-
-void LocationsBuilderMIPS::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator())
-      LocationSummary(flag, LocationSummary::kNoCall);
-  locations->SetOut(Location::RequiresRegister());
-}
-
-void InstructionCodeGeneratorMIPS::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
-  __ LoadFromOffset(kLoadWord,
-                    flag->GetLocations()->Out().AsRegister<Register>(),
-                    SP,
-                    codegen_->GetStackOffsetOfShouldDeoptimizeFlag());
-}
-
-void LocationsBuilderMIPS::VisitSelect(HSelect* select) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select);
-  CanMoveConditionally(select, codegen_->GetInstructionSetFeatures().IsR6(), locations);
-}
-
-void InstructionCodeGeneratorMIPS::VisitSelect(HSelect* select) {
-  bool is_r6 = codegen_->GetInstructionSetFeatures().IsR6();
-  if (CanMoveConditionally(select, is_r6, /* locations_to_set= */ nullptr)) {
-    if (is_r6) {
-      GenConditionalMoveR6(select);
-    } else {
-      GenConditionalMoveR2(select);
-    }
-  } else {
-    LocationSummary* locations = select->GetLocations();
-    MipsLabel false_target;
-    GenerateTestAndBranch(select,
-                          /* condition_input_index= */ 2,
-                          /* true_target= */ nullptr,
-                          &false_target);
-    codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
-    __ Bind(&false_target);
-  }
-}
-
-void LocationsBuilderMIPS::VisitNativeDebugInfo(HNativeDebugInfo* info) {
-  new (GetGraph()->GetAllocator()) LocationSummary(info);
-}
-
-void InstructionCodeGeneratorMIPS::VisitNativeDebugInfo(HNativeDebugInfo*) {
-  // MaybeRecordNativeDebugInfo is already called implicitly in CodeGenerator::Compile.
-}
-
-void CodeGeneratorMIPS::GenerateNop() {
-  __ Nop();
-}
-
-void LocationsBuilderMIPS::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
-  DataType::Type field_type = field_info.GetFieldType();
-  bool is_wide = (field_type == DataType::Type::kInt64) || (field_type == DataType::Type::kFloat64);
-  bool generate_volatile = field_info.IsVolatile() && is_wide;
-  bool object_field_get_with_read_barrier =
-      kEmitCompilerReadBarrier && (field_type == DataType::Type::kReference);
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
-      instruction,
-      generate_volatile
-          ? LocationSummary::kCallOnMainOnly
-          : (object_field_get_with_read_barrier
-              ? LocationSummary::kCallOnSlowPath
-              : LocationSummary::kNoCall));
-
-  if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
-    locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
-  }
-  locations->SetInAt(0, Location::RequiresRegister());
-  if (generate_volatile) {
-    InvokeRuntimeCallingConvention calling_convention;
-    // need A0 to hold base + offset
-    locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-    if (field_type == DataType::Type::kInt64) {
-      locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kInt64));
-    } else {
-      // Use Location::Any() to prevent situations when running out of available fp registers.
-      locations->SetOut(Location::Any());
-      // Need some temp core regs since FP results are returned in core registers
-      Location reg = calling_convention.GetReturnLocation(DataType::Type::kInt64);
-      locations->AddTemp(Location::RegisterLocation(reg.AsRegisterPairLow<Register>()));
-      locations->AddTemp(Location::RegisterLocation(reg.AsRegisterPairHigh<Register>()));
-    }
-  } else {
-    if (DataType::IsFloatingPointType(instruction->GetType())) {
-      locations->SetOut(Location::RequiresFpuRegister());
-    } else {
-      // The output overlaps in the case of an object field get with
-      // read barriers enabled: we do not want the move to overwrite the
-      // object's location, as we need it to emit the read barrier.
-      locations->SetOut(Location::RequiresRegister(),
-                        object_field_get_with_read_barrier
-                            ? Location::kOutputOverlap
-                            : Location::kNoOutputOverlap);
-    }
-    if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
-      // We need a temporary register for the read barrier marking slow
-      // path in CodeGeneratorMIPS::GenerateFieldLoadWithBakerReadBarrier.
-      if (!kBakerReadBarrierThunksEnableForFields) {
-        locations->AddTemp(Location::RequiresRegister());
-      }
-    }
-  }
-}
-
-void InstructionCodeGeneratorMIPS::HandleFieldGet(HInstruction* instruction,
-                                                  const FieldInfo& field_info,
-                                                  uint32_t dex_pc) {
-  DCHECK_EQ(DataType::Size(field_info.GetFieldType()), DataType::Size(instruction->GetType()));
-  DataType::Type type = instruction->GetType();
-  LocationSummary* locations = instruction->GetLocations();
-  Location obj_loc = locations->InAt(0);
-  Register obj = obj_loc.AsRegister<Register>();
-  Location dst_loc = locations->Out();
-  LoadOperandType load_type = kLoadUnsignedByte;
-  bool is_volatile = field_info.IsVolatile();
-  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
-  auto null_checker = GetImplicitNullChecker(instruction, codegen_);
-
-  switch (type) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-      load_type = kLoadUnsignedByte;
-      break;
-    case DataType::Type::kInt8:
-      load_type = kLoadSignedByte;
-      break;
-    case DataType::Type::kUint16:
-      load_type = kLoadUnsignedHalfword;
-      break;
-    case DataType::Type::kInt16:
-      load_type = kLoadSignedHalfword;
-      break;
-    case DataType::Type::kInt32:
-    case DataType::Type::kFloat32:
-    case DataType::Type::kReference:
-      load_type = kLoadWord;
-      break;
-    case DataType::Type::kInt64:
-    case DataType::Type::kFloat64:
-      load_type = kLoadDoubleword;
-      break;
-    case DataType::Type::kUint32:
-    case DataType::Type::kUint64:
-    case DataType::Type::kVoid:
-      LOG(FATAL) << "Unreachable type " << type;
-      UNREACHABLE();
-  }
-
-  if (is_volatile && load_type == kLoadDoubleword) {
-    InvokeRuntimeCallingConvention calling_convention;
-    __ Addiu32(locations->GetTemp(0).AsRegister<Register>(), obj, offset);
-    // Do implicit Null check
-    __ LoadFromOffset(kLoadWord,
-                      ZERO,
-                      locations->GetTemp(0).AsRegister<Register>(),
-                      0,
-                      null_checker);
-    codegen_->InvokeRuntime(kQuickA64Load, instruction, dex_pc);
-    CheckEntrypointTypes<kQuickA64Load, int64_t, volatile const int64_t*>();
-    if (type == DataType::Type::kFloat64) {
-      // FP results are returned in core registers. Need to move them.
-      if (dst_loc.IsFpuRegister()) {
-        __ Mtc1(locations->GetTemp(1).AsRegister<Register>(), dst_loc.AsFpuRegister<FRegister>());
-        __ MoveToFpuHigh(locations->GetTemp(2).AsRegister<Register>(),
-                         dst_loc.AsFpuRegister<FRegister>());
-      } else {
-        DCHECK(dst_loc.IsDoubleStackSlot());
-        __ StoreToOffset(kStoreWord,
-                         locations->GetTemp(1).AsRegister<Register>(),
-                         SP,
-                         dst_loc.GetStackIndex());
-        __ StoreToOffset(kStoreWord,
-                         locations->GetTemp(2).AsRegister<Register>(),
-                         SP,
-                         dst_loc.GetStackIndex() + 4);
-      }
-    }
-  } else {
-    if (type == DataType::Type::kReference) {
-      // /* HeapReference<Object> */ dst = *(obj + offset)
-      if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
-        Location temp_loc =
-            kBakerReadBarrierThunksEnableForFields ? Location::NoLocation() : locations->GetTemp(0);
-        // Note that a potential implicit null check is handled in this
-        // CodeGeneratorMIPS::GenerateFieldLoadWithBakerReadBarrier call.
-        codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
-                                                        dst_loc,
-                                                        obj,
-                                                        offset,
-                                                        temp_loc,
-                                                        /* needs_null_check= */ true);
-        if (is_volatile) {
-          GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
-        }
-      } else {
-        __ LoadFromOffset(kLoadWord, dst_loc.AsRegister<Register>(), obj, offset, null_checker);
-        if (is_volatile) {
-          GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
-        }
-        // If read barriers are enabled, emit read barriers other than
-        // Baker's using a slow path (and also unpoison the loaded
-        // reference, if heap poisoning is enabled).
-        codegen_->MaybeGenerateReadBarrierSlow(instruction, dst_loc, dst_loc, obj_loc, offset);
-      }
-    } else if (!DataType::IsFloatingPointType(type)) {
-      Register dst;
-      if (type == DataType::Type::kInt64) {
-        DCHECK(dst_loc.IsRegisterPair());
-        dst = dst_loc.AsRegisterPairLow<Register>();
-      } else {
-        DCHECK(dst_loc.IsRegister());
-        dst = dst_loc.AsRegister<Register>();
-      }
-      __ LoadFromOffset(load_type, dst, obj, offset, null_checker);
-    } else {
-      DCHECK(dst_loc.IsFpuRegister());
-      FRegister dst = dst_loc.AsFpuRegister<FRegister>();
-      if (type == DataType::Type::kFloat32) {
-        __ LoadSFromOffset(dst, obj, offset, null_checker);
-      } else {
-        __ LoadDFromOffset(dst, obj, offset, null_checker);
-      }
-    }
-  }
-
-  // Memory barriers, in the case of references, are handled in the
-  // previous switch statement.
-  if (is_volatile && (type != DataType::Type::kReference)) {
-    GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
-  }
-}
-
-void LocationsBuilderMIPS::HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info) {
-  DataType::Type field_type = field_info.GetFieldType();
-  bool is_wide = (field_type == DataType::Type::kInt64) || (field_type == DataType::Type::kFloat64);
-  bool generate_volatile = field_info.IsVolatile() && is_wide;
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
-      instruction, generate_volatile ? LocationSummary::kCallOnMainOnly : LocationSummary::kNoCall);
-
-  locations->SetInAt(0, Location::RequiresRegister());
-  if (generate_volatile) {
-    InvokeRuntimeCallingConvention calling_convention;
-    // need A0 to hold base + offset
-    locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-    if (field_type == DataType::Type::kInt64) {
-      locations->SetInAt(1, Location::RegisterPairLocation(
-          calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
-    } else {
-      // Use Location::Any() to prevent situations when running out of available fp registers.
-      locations->SetInAt(1, Location::Any());
-      // Pass FP parameters in core registers.
-      locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
-      locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(3)));
-    }
-  } else {
-    if (DataType::IsFloatingPointType(field_type)) {
-      locations->SetInAt(1, FpuRegisterOrConstantForStore(instruction->InputAt(1)));
-    } else {
-      locations->SetInAt(1, RegisterOrZeroConstant(instruction->InputAt(1)));
-    }
-  }
-}
-
-void InstructionCodeGeneratorMIPS::HandleFieldSet(HInstruction* instruction,
-                                                  const FieldInfo& field_info,
-                                                  uint32_t dex_pc,
-                                                  bool value_can_be_null) {
-  DataType::Type type = field_info.GetFieldType();
-  LocationSummary* locations = instruction->GetLocations();
-  Register obj = locations->InAt(0).AsRegister<Register>();
-  Location value_location = locations->InAt(1);
-  StoreOperandType store_type = kStoreByte;
-  bool is_volatile = field_info.IsVolatile();
-  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
-  bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1));
-  auto null_checker = GetImplicitNullChecker(instruction, codegen_);
-
-  switch (type) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      store_type = kStoreByte;
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      store_type = kStoreHalfword;
-      break;
-    case DataType::Type::kInt32:
-    case DataType::Type::kFloat32:
-    case DataType::Type::kReference:
-      store_type = kStoreWord;
-      break;
-    case DataType::Type::kInt64:
-    case DataType::Type::kFloat64:
-      store_type = kStoreDoubleword;
-      break;
-    case DataType::Type::kUint32:
-    case DataType::Type::kUint64:
-    case DataType::Type::kVoid:
-      LOG(FATAL) << "Unreachable type " << type;
-      UNREACHABLE();
-  }
-
-  if (is_volatile) {
-    GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
-  }
-
-  if (is_volatile && store_type == kStoreDoubleword) {
-    InvokeRuntimeCallingConvention calling_convention;
-    __ Addiu32(locations->GetTemp(0).AsRegister<Register>(), obj, offset);
-    // Do implicit Null check.
-    __ LoadFromOffset(kLoadWord,
-                      ZERO,
-                      locations->GetTemp(0).AsRegister<Register>(),
-                      0,
-                      null_checker);
-    if (type == DataType::Type::kFloat64) {
-      // Pass FP parameters in core registers.
-      if (value_location.IsFpuRegister()) {
-        __ Mfc1(locations->GetTemp(1).AsRegister<Register>(),
-                value_location.AsFpuRegister<FRegister>());
-        __ MoveFromFpuHigh(locations->GetTemp(2).AsRegister<Register>(),
-                           value_location.AsFpuRegister<FRegister>());
-      } else if (value_location.IsDoubleStackSlot()) {
-        __ LoadFromOffset(kLoadWord,
-                          locations->GetTemp(1).AsRegister<Register>(),
-                          SP,
-                          value_location.GetStackIndex());
-        __ LoadFromOffset(kLoadWord,
-                          locations->GetTemp(2).AsRegister<Register>(),
-                          SP,
-                          value_location.GetStackIndex() + 4);
-      } else {
-        DCHECK(value_location.IsConstant());
-        DCHECK(value_location.GetConstant()->IsDoubleConstant());
-        int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
-        __ LoadConst64(locations->GetTemp(2).AsRegister<Register>(),
-                       locations->GetTemp(1).AsRegister<Register>(),
-                       value);
-      }
-    }
-    codegen_->InvokeRuntime(kQuickA64Store, instruction, dex_pc);
-    CheckEntrypointTypes<kQuickA64Store, void, volatile int64_t *, int64_t>();
-  } else {
-    if (value_location.IsConstant()) {
-      int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
-      __ StoreConstToOffset(store_type, value, obj, offset, TMP, null_checker);
-    } else if (!DataType::IsFloatingPointType(type)) {
-      Register src;
-      if (type == DataType::Type::kInt64) {
-        src = value_location.AsRegisterPairLow<Register>();
-      } else {
-        src = value_location.AsRegister<Register>();
-      }
-      if (kPoisonHeapReferences && needs_write_barrier) {
-        // Note that in the case where `value` is a null reference,
-        // we do not enter this block, as a null reference does not
-        // need poisoning.
-        DCHECK_EQ(type, DataType::Type::kReference);
-        __ PoisonHeapReference(TMP, src);
-        __ StoreToOffset(store_type, TMP, obj, offset, null_checker);
-      } else {
-        __ StoreToOffset(store_type, src, obj, offset, null_checker);
-      }
-    } else {
-      FRegister src = value_location.AsFpuRegister<FRegister>();
-      if (type == DataType::Type::kFloat32) {
-        __ StoreSToOffset(src, obj, offset, null_checker);
-      } else {
-        __ StoreDToOffset(src, obj, offset, null_checker);
-      }
-    }
-  }
-
-  if (needs_write_barrier) {
-    Register src = value_location.AsRegister<Register>();
-    codegen_->MarkGCCard(obj, src, value_can_be_null);
-  }
-
-  if (is_volatile) {
-    GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
-  }
-}
-
-void LocationsBuilderMIPS::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
-  HandleFieldGet(instruction, instruction->GetFieldInfo());
-}
-
-void InstructionCodeGeneratorMIPS::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
-  HandleFieldGet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
-}
-
-void LocationsBuilderMIPS::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
-  HandleFieldSet(instruction, instruction->GetFieldInfo());
-}
-
-void InstructionCodeGeneratorMIPS::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
-  HandleFieldSet(instruction,
-                 instruction->GetFieldInfo(),
-                 instruction->GetDexPc(),
-                 instruction->GetValueCanBeNull());
-}
-
-void InstructionCodeGeneratorMIPS::GenerateReferenceLoadOneRegister(
-    HInstruction* instruction,
-    Location out,
-    uint32_t offset,
-    Location maybe_temp,
-    ReadBarrierOption read_barrier_option) {
-  Register out_reg = out.AsRegister<Register>();
-  if (read_barrier_option == kWithReadBarrier) {
-    CHECK(kEmitCompilerReadBarrier);
-    if (!kUseBakerReadBarrier || !kBakerReadBarrierThunksEnableForFields) {
-      DCHECK(maybe_temp.IsRegister()) << maybe_temp;
-    }
-    if (kUseBakerReadBarrier) {
-      // Load with fast path based Baker's read barrier.
-      // /* HeapReference<Object> */ out = *(out + offset)
-      codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
-                                                      out,
-                                                      out_reg,
-                                                      offset,
-                                                      maybe_temp,
-                                                      /* needs_null_check= */ false);
-    } else {
-      // Load with slow path based read barrier.
-      // Save the value of `out` into `maybe_temp` before overwriting it
-      // in the following move operation, as we will need it for the
-      // read barrier below.
-      __ Move(maybe_temp.AsRegister<Register>(), out_reg);
-      // /* HeapReference<Object> */ out = *(out + offset)
-      __ LoadFromOffset(kLoadWord, out_reg, out_reg, offset);
-      codegen_->GenerateReadBarrierSlow(instruction, out, out, maybe_temp, offset);
-    }
-  } else {
-    // Plain load with no read barrier.
-    // /* HeapReference<Object> */ out = *(out + offset)
-    __ LoadFromOffset(kLoadWord, out_reg, out_reg, offset);
-    __ MaybeUnpoisonHeapReference(out_reg);
-  }
-}
-
-void InstructionCodeGeneratorMIPS::GenerateReferenceLoadTwoRegisters(
-    HInstruction* instruction,
-    Location out,
-    Location obj,
-    uint32_t offset,
-    Location maybe_temp,
-    ReadBarrierOption read_barrier_option) {
-  Register out_reg = out.AsRegister<Register>();
-  Register obj_reg = obj.AsRegister<Register>();
-  if (read_barrier_option == kWithReadBarrier) {
-    CHECK(kEmitCompilerReadBarrier);
-    if (kUseBakerReadBarrier) {
-      if (!kBakerReadBarrierThunksEnableForFields) {
-        DCHECK(maybe_temp.IsRegister()) << maybe_temp;
-      }
-      // Load with fast path based Baker's read barrier.
-      // /* HeapReference<Object> */ out = *(obj + offset)
-      codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
-                                                      out,
-                                                      obj_reg,
-                                                      offset,
-                                                      maybe_temp,
-                                                      /* needs_null_check= */ false);
-    } else {
-      // Load with slow path based read barrier.
-      // /* HeapReference<Object> */ out = *(obj + offset)
-      __ LoadFromOffset(kLoadWord, out_reg, obj_reg, offset);
-      codegen_->GenerateReadBarrierSlow(instruction, out, out, obj, offset);
-    }
-  } else {
-    // Plain load with no read barrier.
-    // /* HeapReference<Object> */ out = *(obj + offset)
-    __ LoadFromOffset(kLoadWord, out_reg, obj_reg, offset);
-    __ MaybeUnpoisonHeapReference(out_reg);
-  }
-}
-
-static inline int GetBakerMarkThunkNumber(Register reg) {
-  static_assert(BAKER_MARK_INTROSPECTION_REGISTER_COUNT == 21, "Expecting equal");
-  if (reg >= V0 && reg <= T7) {  // 14 consequtive regs.
-    return reg - V0;
-  } else if (reg >= S2 && reg <= S7) {  // 6 consequtive regs.
-    return 14 + (reg - S2);
-  } else if (reg == FP) {  // One more.
-    return 20;
-  }
-  LOG(FATAL) << "Unexpected register " << reg;
-  UNREACHABLE();
-}
-
-static inline int GetBakerMarkFieldArrayThunkDisplacement(Register reg, bool short_offset) {
-  int num = GetBakerMarkThunkNumber(reg) +
-      (short_offset ? BAKER_MARK_INTROSPECTION_REGISTER_COUNT : 0);
-  return num * BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE;
-}
-
-static inline int GetBakerMarkGcRootThunkDisplacement(Register reg) {
-  return GetBakerMarkThunkNumber(reg) * BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRY_SIZE +
-      BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET;
-}
-
-void InstructionCodeGeneratorMIPS::GenerateGcRootFieldLoad(HInstruction* instruction,
-                                                           Location root,
-                                                           Register obj,
-                                                           uint32_t offset,
-                                                           ReadBarrierOption read_barrier_option,
-                                                           MipsLabel* label_low) {
-  bool reordering;
-  if (label_low != nullptr) {
-    DCHECK_EQ(offset, 0x5678u);
-  }
-  Register root_reg = root.AsRegister<Register>();
-  if (read_barrier_option == kWithReadBarrier) {
-    DCHECK(kEmitCompilerReadBarrier);
-    if (kUseBakerReadBarrier) {
-      // Fast path implementation of art::ReadBarrier::BarrierForRoot when
-      // Baker's read barrier are used:
-      if (kBakerReadBarrierThunksEnableForGcRoots) {
-        // Note that we do not actually check the value of `GetIsGcMarking()`
-        // to decide whether to mark the loaded GC root or not.  Instead, we
-        // load into `temp` (T9) the read barrier mark introspection entrypoint.
-        // If `temp` is null, it means that `GetIsGcMarking()` is false, and
-        // vice versa.
-        //
-        // We use thunks for the slow path. That thunk checks the reference
-        // and jumps to the entrypoint if needed.
-        //
-        //     temp = Thread::Current()->pReadBarrierMarkReg00
-        //     // AKA &art_quick_read_barrier_mark_introspection.
-        //     GcRoot<mirror::Object> root = *(obj+offset);  // Original reference load.
-        //     if (temp != nullptr) {
-        //        temp = &gc_root_thunk<root_reg>
-        //        root = temp(root)
-        //     }
-
-        bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
-        const int32_t entry_point_offset =
-            Thread::ReadBarrierMarkEntryPointsOffset<kMipsPointerSize>(0);
-        const int thunk_disp = GetBakerMarkGcRootThunkDisplacement(root_reg);
-        int16_t offset_low = Low16Bits(offset);
-        int16_t offset_high = High16Bits(offset - offset_low);  // Accounts for sign
-                                                                // extension in lw.
-        bool short_offset = IsInt<16>(static_cast<int32_t>(offset));
-        Register base = short_offset ? obj : TMP;
-        // Loading the entrypoint does not require a load acquire since it is only changed when
-        // threads are suspended or running a checkpoint.
-        __ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset);
-        reordering = __ SetReorder(false);
-        if (!short_offset) {
-          DCHECK(!label_low);
-          __ AddUpper(base, obj, offset_high);
-        }
-        MipsLabel skip_call;
-        __ Beqz(T9, &skip_call, /* is_bare= */ true);
-        if (label_low != nullptr) {
-          DCHECK(short_offset);
-          __ Bind(label_low);
-        }
-        // /* GcRoot<mirror::Object> */ root = *(obj + offset)
-        __ LoadFromOffset(kLoadWord, root_reg, base, offset_low);  // Single instruction
-                                                                   // in delay slot.
-        if (isR6) {
-          __ Jialc(T9, thunk_disp);
-        } else {
-          __ Addiu(T9, T9, thunk_disp);
-          __ Jalr(T9);
-          __ Nop();
-        }
-        __ Bind(&skip_call);
-        __ SetReorder(reordering);
-      } else {
-        // Note that we do not actually check the value of `GetIsGcMarking()`
-        // to decide whether to mark the loaded GC root or not.  Instead, we
-        // load into `temp` (T9) the read barrier mark entry point corresponding
-        // to register `root`. If `temp` is null, it means that `GetIsGcMarking()`
-        // is false, and vice versa.
-        //
-        //     GcRoot<mirror::Object> root = *(obj+offset);  // Original reference load.
-        //     temp = Thread::Current()->pReadBarrierMarkReg ## root.reg()
-        //     if (temp != null) {
-        //       root = temp(root)
-        //     }
-
-        if (label_low != nullptr) {
-          reordering = __ SetReorder(false);
-          __ Bind(label_low);
-        }
-        // /* GcRoot<mirror::Object> */ root = *(obj + offset)
-        __ LoadFromOffset(kLoadWord, root_reg, obj, offset);
-        if (label_low != nullptr) {
-          __ SetReorder(reordering);
-        }
-        static_assert(
-            sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(GcRoot<mirror::Object>),
-            "art::mirror::CompressedReference<mirror::Object> and art::GcRoot<mirror::Object> "
-            "have different sizes.");
-        static_assert(sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(int32_t),
-                      "art::mirror::CompressedReference<mirror::Object> and int32_t "
-                      "have different sizes.");
-
-        // Slow path marking the GC root `root`.
-        Location temp = Location::RegisterLocation(T9);
-        SlowPathCodeMIPS* slow_path =
-            new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS(
-                instruction,
-                root,
-                /*entrypoint*/ temp);
-        codegen_->AddSlowPath(slow_path);
-
-        const int32_t entry_point_offset =
-            Thread::ReadBarrierMarkEntryPointsOffset<kMipsPointerSize>(root.reg() - 1);
-        // Loading the entrypoint does not require a load acquire since it is only changed when
-        // threads are suspended or running a checkpoint.
-        __ LoadFromOffset(kLoadWord, temp.AsRegister<Register>(), TR, entry_point_offset);
-        __ Bnez(temp.AsRegister<Register>(), slow_path->GetEntryLabel());
-        __ Bind(slow_path->GetExitLabel());
-      }
-    } else {
-      if (label_low != nullptr) {
-        reordering = __ SetReorder(false);
-        __ Bind(label_low);
-      }
-      // GC root loaded through a slow path for read barriers other
-      // than Baker's.
-      // /* GcRoot<mirror::Object>* */ root = obj + offset
-      __ Addiu32(root_reg, obj, offset);
-      if (label_low != nullptr) {
-        __ SetReorder(reordering);
-      }
-      // /* mirror::Object* */ root = root->Read()
-      codegen_->GenerateReadBarrierForRootSlow(instruction, root, root);
-    }
-  } else {
-    if (label_low != nullptr) {
-      reordering = __ SetReorder(false);
-      __ Bind(label_low);
-    }
-    // Plain GC root load with no read barrier.
-    // /* GcRoot<mirror::Object> */ root = *(obj + offset)
-    __ LoadFromOffset(kLoadWord, root_reg, obj, offset);
-    // Note that GC roots are not affected by heap poisoning, thus we
-    // do not have to unpoison `root_reg` here.
-    if (label_low != nullptr) {
-      __ SetReorder(reordering);
-    }
-  }
-}
-
-void CodeGeneratorMIPS::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
-                                                              Location ref,
-                                                              Register obj,
-                                                              uint32_t offset,
-                                                              Location temp,
-                                                              bool needs_null_check) {
-  DCHECK(kEmitCompilerReadBarrier);
-  DCHECK(kUseBakerReadBarrier);
-
-  if (kBakerReadBarrierThunksEnableForFields) {
-    // Note that we do not actually check the value of `GetIsGcMarking()`
-    // to decide whether to mark the loaded reference or not.  Instead, we
-    // load into `temp` (T9) the read barrier mark introspection entrypoint.
-    // If `temp` is null, it means that `GetIsGcMarking()` is false, and
-    // vice versa.
-    //
-    // We use thunks for the slow path. That thunk checks the reference
-    // and jumps to the entrypoint if needed. If the holder is not gray,
-    // it issues a load-load memory barrier and returns to the original
-    // reference load.
-    //
-    //     temp = Thread::Current()->pReadBarrierMarkReg00
-    //     // AKA &art_quick_read_barrier_mark_introspection.
-    //     if (temp != nullptr) {
-    //        temp = &field_array_thunk<holder_reg>
-    //        temp()
-    //     }
-    //   not_gray_return_address:
-    //     // If the offset is too large to fit into the lw instruction, we
-    //     // use an adjusted base register (TMP) here. This register
-    //     // receives bits 16 ... 31 of the offset before the thunk invocation
-    //     // and the thunk benefits from it.
-    //     HeapReference<mirror::Object> reference = *(obj+offset);  // Original reference load.
-    //   gray_return_address:
-
-    DCHECK(temp.IsInvalid());
-    bool isR6 = GetInstructionSetFeatures().IsR6();
-    int16_t offset_low = Low16Bits(offset);
-    int16_t offset_high = High16Bits(offset - offset_low);  // Accounts for sign extension in lw.
-    bool short_offset = IsInt<16>(static_cast<int32_t>(offset));
-    bool reordering = __ SetReorder(false);
-    const int32_t entry_point_offset =
-        Thread::ReadBarrierMarkEntryPointsOffset<kMipsPointerSize>(0);
-    // There may have or may have not been a null check if the field offset is smaller than
-    // the page size.
-    // There must've been a null check in case it's actually a load from an array.
-    // We will, however, perform an explicit null check in the thunk as it's easier to
-    // do it than not.
-    if (instruction->IsArrayGet()) {
-      DCHECK(!needs_null_check);
-    }
-    const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, short_offset);
-    // Loading the entrypoint does not require a load acquire since it is only changed when
-    // threads are suspended or running a checkpoint.
-    __ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset);
-    Register ref_reg = ref.AsRegister<Register>();
-    Register base = short_offset ? obj : TMP;
-    MipsLabel skip_call;
-    if (short_offset) {
-      if (isR6) {
-        __ Beqzc(T9, &skip_call, /* is_bare= */ true);
-        __ Nop();  // In forbidden slot.
-        __ Jialc(T9, thunk_disp);
-      } else {
-        __ Beqz(T9, &skip_call, /* is_bare= */ true);
-        __ Addiu(T9, T9, thunk_disp);  // In delay slot.
-        __ Jalr(T9);
-        __ Nop();  // In delay slot.
-      }
-      __ Bind(&skip_call);
-    } else {
-      if (isR6) {
-        __ Beqz(T9, &skip_call, /* is_bare= */ true);
-        __ Aui(base, obj, offset_high);  // In delay slot.
-        __ Jialc(T9, thunk_disp);
-        __ Bind(&skip_call);
-      } else {
-        __ Lui(base, offset_high);
-        __ Beqz(T9, &skip_call, /* is_bare= */ true);
-        __ Addiu(T9, T9, thunk_disp);  // In delay slot.
-        __ Jalr(T9);
-        __ Bind(&skip_call);
-        __ Addu(base, base, obj);  // In delay slot.
-      }
-    }
-    // /* HeapReference<Object> */ ref = *(obj + offset)
-    __ LoadFromOffset(kLoadWord, ref_reg, base, offset_low);  // Single instruction.
-    if (needs_null_check) {
-      MaybeRecordImplicitNullCheck(instruction);
-    }
-    __ MaybeUnpoisonHeapReference(ref_reg);
-    __ SetReorder(reordering);
-    return;
-  }
-
-  // /* HeapReference<Object> */ ref = *(obj + offset)
-  Location no_index = Location::NoLocation();
-  ScaleFactor no_scale_factor = TIMES_1;
-  GenerateReferenceLoadWithBakerReadBarrier(instruction,
-                                            ref,
-                                            obj,
-                                            offset,
-                                            no_index,
-                                            no_scale_factor,
-                                            temp,
-                                            needs_null_check);
-}
-
-void CodeGeneratorMIPS::GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
-                                                              Location ref,
-                                                              Register obj,
-                                                              uint32_t data_offset,
-                                                              Location index,
-                                                              Location temp,
-                                                              bool needs_null_check) {
-  DCHECK(kEmitCompilerReadBarrier);
-  DCHECK(kUseBakerReadBarrier);
-
-  static_assert(
-      sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
-      "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
-  ScaleFactor scale_factor = TIMES_4;
-
-  if (kBakerReadBarrierThunksEnableForArrays) {
-    // Note that we do not actually check the value of `GetIsGcMarking()`
-    // to decide whether to mark the loaded reference or not.  Instead, we
-    // load into `temp` (T9) the read barrier mark introspection entrypoint.
-    // If `temp` is null, it means that `GetIsGcMarking()` is false, and
-    // vice versa.
-    //
-    // We use thunks for the slow path. That thunk checks the reference
-    // and jumps to the entrypoint if needed. If the holder is not gray,
-    // it issues a load-load memory barrier and returns to the original
-    // reference load.
-    //
-    //     temp = Thread::Current()->pReadBarrierMarkReg00
-    //     // AKA &art_quick_read_barrier_mark_introspection.
-    //     if (temp != nullptr) {
-    //        temp = &field_array_thunk<holder_reg>
-    //        temp()
-    //     }
-    //   not_gray_return_address:
-    //     // The element address is pre-calculated in the TMP register before the
-    //     // thunk invocation and the thunk benefits from it.
-    //     HeapReference<mirror::Object> reference = data[index];  // Original reference load.
-    //   gray_return_address:
-
-    DCHECK(temp.IsInvalid());
-    DCHECK(index.IsValid());
-    bool reordering = __ SetReorder(false);
-    const int32_t entry_point_offset =
-        Thread::ReadBarrierMarkEntryPointsOffset<kMipsPointerSize>(0);
-    // We will not do the explicit null check in the thunk as some form of a null check
-    // must've been done earlier.
-    DCHECK(!needs_null_check);
-    const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset= */ false);
-    // Loading the entrypoint does not require a load acquire since it is only changed when
-    // threads are suspended or running a checkpoint.
-    __ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset);
-    Register ref_reg = ref.AsRegister<Register>();
-    Register index_reg = index.IsRegisterPair()
-        ? index.AsRegisterPairLow<Register>()
-        : index.AsRegister<Register>();
-    MipsLabel skip_call;
-    if (GetInstructionSetFeatures().IsR6()) {
-      __ Beqz(T9, &skip_call, /* is_bare= */ true);
-      __ Lsa(TMP, index_reg, obj, scale_factor);  // In delay slot.
-      __ Jialc(T9, thunk_disp);
-      __ Bind(&skip_call);
-    } else {
-      __ Sll(TMP, index_reg, scale_factor);
-      __ Beqz(T9, &skip_call, /* is_bare= */ true);
-      __ Addiu(T9, T9, thunk_disp);  // In delay slot.
-      __ Jalr(T9);
-      __ Bind(&skip_call);
-      __ Addu(TMP, TMP, obj);  // In delay slot.
-    }
-    // /* HeapReference<Object> */ ref = *(obj + data_offset + (index << scale_factor))
-    DCHECK(IsInt<16>(static_cast<int32_t>(data_offset))) << data_offset;
-    __ LoadFromOffset(kLoadWord, ref_reg, TMP, data_offset);  // Single instruction.
-    __ MaybeUnpoisonHeapReference(ref_reg);
-    __ SetReorder(reordering);
-    return;
-  }
-
-  // /* HeapReference<Object> */ ref =
-  //     *(obj + data_offset + index * sizeof(HeapReference<Object>))
-  GenerateReferenceLoadWithBakerReadBarrier(instruction,
-                                            ref,
-                                            obj,
-                                            data_offset,
-                                            index,
-                                            scale_factor,
-                                            temp,
-                                            needs_null_check);
-}
-
-void CodeGeneratorMIPS::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
-                                                                  Location ref,
-                                                                  Register obj,
-                                                                  uint32_t offset,
-                                                                  Location index,
-                                                                  ScaleFactor scale_factor,
-                                                                  Location temp,
-                                                                  bool needs_null_check,
-                                                                  bool always_update_field) {
-  DCHECK(kEmitCompilerReadBarrier);
-  DCHECK(kUseBakerReadBarrier);
-
-  // In slow path based read barriers, the read barrier call is
-  // inserted after the original load. However, in fast path based
-  // Baker's read barriers, we need to perform the load of
-  // mirror::Object::monitor_ *before* the original reference load.
-  // This load-load ordering is required by the read barrier.
-  // The fast path/slow path (for Baker's algorithm) should look like:
-  //
-  //   uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
-  //   lfence;  // Load fence or artificial data dependency to prevent load-load reordering
-  //   HeapReference<Object> ref = *src;  // Original reference load.
-  //   bool is_gray = (rb_state == ReadBarrier::GrayState());
-  //   if (is_gray) {
-  //     ref = ReadBarrier::Mark(ref);  // Performed by runtime entrypoint slow path.
-  //   }
-  //
-  // Note: the original implementation in ReadBarrier::Barrier is
-  // slightly more complex as it performs additional checks that we do
-  // not do here for performance reasons.
-
-  Register ref_reg = ref.AsRegister<Register>();
-  Register temp_reg = temp.AsRegister<Register>();
-  uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
-
-  // /* int32_t */ monitor = obj->monitor_
-  __ LoadFromOffset(kLoadWord, temp_reg, obj, monitor_offset);
-  if (needs_null_check) {
-    MaybeRecordImplicitNullCheck(instruction);
-  }
-  // /* LockWord */ lock_word = LockWord(monitor)
-  static_assert(sizeof(LockWord) == sizeof(int32_t),
-                "art::LockWord and int32_t have different sizes.");
-
-  __ Sync(0);  // Barrier to prevent load-load reordering.
-
-  // The actual reference load.
-  if (index.IsValid()) {
-    // Load types involving an "index": ArrayGet,
-    // UnsafeGetObject/UnsafeGetObjectVolatile and UnsafeCASObject
-    // intrinsics.
-    // /* HeapReference<Object> */ ref = *(obj + offset + (index << scale_factor))
-    if (index.IsConstant()) {
-      size_t computed_offset =
-          (index.GetConstant()->AsIntConstant()->GetValue() << scale_factor) + offset;
-      __ LoadFromOffset(kLoadWord, ref_reg, obj, computed_offset);
-    } else {
-      // Handle the special case of the
-      // UnsafeGetObject/UnsafeGetObjectVolatile and UnsafeCASObject
-      // intrinsics, which use a register pair as index ("long
-      // offset"), of which only the low part contains data.
-      Register index_reg = index.IsRegisterPair()
-          ? index.AsRegisterPairLow<Register>()
-          : index.AsRegister<Register>();
-      __ ShiftAndAdd(TMP, index_reg, obj, scale_factor, TMP);
-      __ LoadFromOffset(kLoadWord, ref_reg, TMP, offset);
-    }
-  } else {
-    // /* HeapReference<Object> */ ref = *(obj + offset)
-    __ LoadFromOffset(kLoadWord, ref_reg, obj, offset);
-  }
-
-  // Object* ref = ref_addr->AsMirrorPtr()
-  __ MaybeUnpoisonHeapReference(ref_reg);
-
-  // Slow path marking the object `ref` when it is gray.
-  SlowPathCodeMIPS* slow_path;
-  if (always_update_field) {
-    // ReadBarrierMarkAndUpdateFieldSlowPathMIPS only supports address
-    // of the form `obj + field_offset`, where `obj` is a register and
-    // `field_offset` is a register pair (of which only the lower half
-    // is used). Thus `offset` and `scale_factor` above are expected
-    // to be null in this code path.
-    DCHECK_EQ(offset, 0u);
-    DCHECK_EQ(scale_factor, ScaleFactor::TIMES_1);
-    slow_path = new (GetScopedAllocator())
-        ReadBarrierMarkAndUpdateFieldSlowPathMIPS(instruction,
-                                                  ref,
-                                                  obj,
-                                                  /* field_offset= */ index,
-                                                  temp_reg);
-  } else {
-    slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS(instruction, ref);
-  }
-  AddSlowPath(slow_path);
-
-  // if (rb_state == ReadBarrier::GrayState())
-  //   ref = ReadBarrier::Mark(ref);
-  // Given the numeric representation, it's enough to check the low bit of the
-  // rb_state. We do that by shifting the bit into the sign bit (31) and
-  // performing a branch on less than zero.
-  static_assert(ReadBarrier::NonGrayState() == 0, "Expecting non-gray to have value 0");
-  static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
-  static_assert(LockWord::kReadBarrierStateSize == 1, "Expecting 1-bit read barrier state size");
-  __ Sll(temp_reg, temp_reg, 31 - LockWord::kReadBarrierStateShift);
-  __ Bltz(temp_reg, slow_path->GetEntryLabel());
-  __ Bind(slow_path->GetExitLabel());
-}
-
-void CodeGeneratorMIPS::GenerateReadBarrierSlow(HInstruction* instruction,
-                                                Location out,
-                                                Location ref,
-                                                Location obj,
-                                                uint32_t offset,
-                                                Location index) {
-  DCHECK(kEmitCompilerReadBarrier);
-
-  // Insert a slow path based read barrier *after* the reference load.
-  //
-  // If heap poisoning is enabled, the unpoisoning of the loaded
-  // reference will be carried out by the runtime within the slow
-  // path.
-  //
-  // Note that `ref` currently does not get unpoisoned (when heap
-  // poisoning is enabled), which is alright as the `ref` argument is
-  // not used by the artReadBarrierSlow entry point.
-  //
-  // TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
-  SlowPathCodeMIPS* slow_path = new (GetScopedAllocator())
-      ReadBarrierForHeapReferenceSlowPathMIPS(instruction, out, ref, obj, offset, index);
-  AddSlowPath(slow_path);
-
-  __ B(slow_path->GetEntryLabel());
-  __ Bind(slow_path->GetExitLabel());
-}
-
-void CodeGeneratorMIPS::MaybeGenerateReadBarrierSlow(HInstruction* instruction,
-                                                     Location out,
-                                                     Location ref,
-                                                     Location obj,
-                                                     uint32_t offset,
-                                                     Location index) {
-  if (kEmitCompilerReadBarrier) {
-    // Baker's read barriers shall be handled by the fast path
-    // (CodeGeneratorMIPS::GenerateReferenceLoadWithBakerReadBarrier).
-    DCHECK(!kUseBakerReadBarrier);
-    // If heap poisoning is enabled, unpoisoning will be taken care of
-    // by the runtime within the slow path.
-    GenerateReadBarrierSlow(instruction, out, ref, obj, offset, index);
-  } else if (kPoisonHeapReferences) {
-    __ UnpoisonHeapReference(out.AsRegister<Register>());
-  }
-}
-
-void CodeGeneratorMIPS::GenerateReadBarrierForRootSlow(HInstruction* instruction,
-                                                       Location out,
-                                                       Location root) {
-  DCHECK(kEmitCompilerReadBarrier);
-
-  // Insert a slow path based read barrier *after* the GC root load.
-  //
-  // Note that GC roots are not affected by heap poisoning, so we do
-  // not need to do anything special for this here.
-  SlowPathCodeMIPS* slow_path =
-      new (GetScopedAllocator()) ReadBarrierForRootSlowPathMIPS(instruction, out, root);
-  AddSlowPath(slow_path);
-
-  __ B(slow_path->GetEntryLabel());
-  __ Bind(slow_path->GetExitLabel());
-}
-
-void LocationsBuilderMIPS::VisitInstanceOf(HInstanceOf* instruction) {
-  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
-  TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
-  bool baker_read_barrier_slow_path = false;
-  switch (type_check_kind) {
-    case TypeCheckKind::kExactCheck:
-    case TypeCheckKind::kAbstractClassCheck:
-    case TypeCheckKind::kClassHierarchyCheck:
-    case TypeCheckKind::kArrayObjectCheck: {
-      bool needs_read_barrier = CodeGenerator::InstanceOfNeedsReadBarrier(instruction);
-      call_kind = needs_read_barrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
-      baker_read_barrier_slow_path = kUseBakerReadBarrier && needs_read_barrier;
-      break;
-    }
-    case TypeCheckKind::kArrayCheck:
-    case TypeCheckKind::kUnresolvedCheck:
-    case TypeCheckKind::kInterfaceCheck:
-      call_kind = LocationSummary::kCallOnSlowPath;
-      break;
-    case TypeCheckKind::kBitstringCheck:
-      break;
-  }
-
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
-  if (baker_read_barrier_slow_path) {
-    locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
-  }
-  locations->SetInAt(0, Location::RequiresRegister());
-  if (type_check_kind == TypeCheckKind::kBitstringCheck) {
-    locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant()));
-    locations->SetInAt(2, Location::ConstantLocation(instruction->InputAt(2)->AsConstant()));
-    locations->SetInAt(3, Location::ConstantLocation(instruction->InputAt(3)->AsConstant()));
-  } else {
-    locations->SetInAt(1, Location::RequiresRegister());
-  }
-  // The output does overlap inputs.
-  // Note that TypeCheckSlowPathMIPS uses this register too.
-  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
-  locations->AddRegisterTemps(NumberOfInstanceOfTemps(type_check_kind));
-}
-
-void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
-  TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
-  LocationSummary* locations = instruction->GetLocations();
-  Location obj_loc = locations->InAt(0);
-  Register obj = obj_loc.AsRegister<Register>();
-  Location cls = locations->InAt(1);
-  Location out_loc = locations->Out();
-  Register out = out_loc.AsRegister<Register>();
-  const size_t num_temps = NumberOfInstanceOfTemps(type_check_kind);
-  DCHECK_LE(num_temps, 1u);
-  Location maybe_temp_loc = (num_temps >= 1) ? locations->GetTemp(0) : Location::NoLocation();
-  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
-  uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
-  uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
-  uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
-  MipsLabel done;
-  SlowPathCodeMIPS* slow_path = nullptr;
-
-  // Return 0 if `obj` is null.
-  // Avoid this check if we know `obj` is not null.
-  if (instruction->MustDoNullCheck()) {
-    __ Move(out, ZERO);
-    __ Beqz(obj, &done);
-  }
-
-  switch (type_check_kind) {
-    case TypeCheckKind::kExactCheck: {
-      ReadBarrierOption read_barrier_option =
-          CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
-      // /* HeapReference<Class> */ out = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction,
-                                        out_loc,
-                                        obj_loc,
-                                        class_offset,
-                                        maybe_temp_loc,
-                                        read_barrier_option);
-      // Classes must be equal for the instanceof to succeed.
-      __ Xor(out, out, cls.AsRegister<Register>());
-      __ Sltiu(out, out, 1);
-      break;
-    }
-
-    case TypeCheckKind::kAbstractClassCheck: {
-      ReadBarrierOption read_barrier_option =
-          CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
-      // /* HeapReference<Class> */ out = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction,
-                                        out_loc,
-                                        obj_loc,
-                                        class_offset,
-                                        maybe_temp_loc,
-                                        read_barrier_option);
-      // If the class is abstract, we eagerly fetch the super class of the
-      // object to avoid doing a comparison we know will fail.
-      MipsLabel loop;
-      __ Bind(&loop);
-      // /* HeapReference<Class> */ out = out->super_class_
-      GenerateReferenceLoadOneRegister(instruction,
-                                       out_loc,
-                                       super_offset,
-                                       maybe_temp_loc,
-                                       read_barrier_option);
-      // If `out` is null, we use it for the result, and jump to `done`.
-      __ Beqz(out, &done);
-      __ Bne(out, cls.AsRegister<Register>(), &loop);
-      __ LoadConst32(out, 1);
-      break;
-    }
-
-    case TypeCheckKind::kClassHierarchyCheck: {
-      ReadBarrierOption read_barrier_option =
-          CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
-      // /* HeapReference<Class> */ out = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction,
-                                        out_loc,
-                                        obj_loc,
-                                        class_offset,
-                                        maybe_temp_loc,
-                                        read_barrier_option);
-      // Walk over the class hierarchy to find a match.
-      MipsLabel loop, success;
-      __ Bind(&loop);
-      __ Beq(out, cls.AsRegister<Register>(), &success);
-      // /* HeapReference<Class> */ out = out->super_class_
-      GenerateReferenceLoadOneRegister(instruction,
-                                       out_loc,
-                                       super_offset,
-                                       maybe_temp_loc,
-                                       read_barrier_option);
-      __ Bnez(out, &loop);
-      // If `out` is null, we use it for the result, and jump to `done`.
-      __ B(&done);
-      __ Bind(&success);
-      __ LoadConst32(out, 1);
-      break;
-    }
-
-    case TypeCheckKind::kArrayObjectCheck: {
-      ReadBarrierOption read_barrier_option =
-          CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
-      // /* HeapReference<Class> */ out = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction,
-                                        out_loc,
-                                        obj_loc,
-                                        class_offset,
-                                        maybe_temp_loc,
-                                        read_barrier_option);
-      // Do an exact check.
-      MipsLabel success;
-      __ Beq(out, cls.AsRegister<Register>(), &success);
-      // Otherwise, we need to check that the object's class is a non-primitive array.
-      // /* HeapReference<Class> */ out = out->component_type_
-      GenerateReferenceLoadOneRegister(instruction,
-                                       out_loc,
-                                       component_offset,
-                                       maybe_temp_loc,
-                                       read_barrier_option);
-      // If `out` is null, we use it for the result, and jump to `done`.
-      __ Beqz(out, &done);
-      __ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
-      static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
-      __ Sltiu(out, out, 1);
-      __ B(&done);
-      __ Bind(&success);
-      __ LoadConst32(out, 1);
-      break;
-    }
-
-    case TypeCheckKind::kArrayCheck: {
-      // No read barrier since the slow path will retry upon failure.
-      // /* HeapReference<Class> */ out = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction,
-                                        out_loc,
-                                        obj_loc,
-                                        class_offset,
-                                        maybe_temp_loc,
-                                        kWithoutReadBarrier);
-      DCHECK(locations->OnlyCallsOnSlowPath());
-      slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
-          instruction, /* is_fatal= */ false);
-      codegen_->AddSlowPath(slow_path);
-      __ Bne(out, cls.AsRegister<Register>(), slow_path->GetEntryLabel());
-      __ LoadConst32(out, 1);
-      break;
-    }
-
-    case TypeCheckKind::kUnresolvedCheck:
-    case TypeCheckKind::kInterfaceCheck: {
-      // Note that we indeed only call on slow path, but we always go
-      // into the slow path for the unresolved and interface check
-      // cases.
-      //
-      // We cannot directly call the InstanceofNonTrivial runtime
-      // entry point without resorting to a type checking slow path
-      // here (i.e. by calling InvokeRuntime directly), as it would
-      // require to assign fixed registers for the inputs of this
-      // HInstanceOf instruction (following the runtime calling
-      // convention), which might be cluttered by the potential first
-      // read barrier emission at the beginning of this method.
-      //
-      // TODO: Introduce a new runtime entry point taking the object
-      // to test (instead of its class) as argument, and let it deal
-      // with the read barrier issues. This will let us refactor this
-      // case of the `switch` code as it was previously (with a direct
-      // call to the runtime not using a type checking slow path).
-      // This should also be beneficial for the other cases above.
-      DCHECK(locations->OnlyCallsOnSlowPath());
-      slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
-          instruction, /* is_fatal= */ false);
-      codegen_->AddSlowPath(slow_path);
-      __ B(slow_path->GetEntryLabel());
-      break;
-    }
-
-    case TypeCheckKind::kBitstringCheck: {
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction,
-                                        out_loc,
-                                        obj_loc,
-                                        class_offset,
-                                        maybe_temp_loc,
-                                        kWithoutReadBarrier);
-
-      GenerateBitstringTypeCheckCompare(instruction, out);
-      __ Sltiu(out, out, 1);
-      break;
-    }
-  }
-
-  __ Bind(&done);
-
-  if (slow_path != nullptr) {
-    __ Bind(slow_path->GetExitLabel());
-  }
-}
-
-void LocationsBuilderMIPS::VisitIntConstant(HIntConstant* constant) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
-  locations->SetOut(Location::ConstantLocation(constant));
-}
-
-void InstructionCodeGeneratorMIPS::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
-  // Will be generated at use site.
-}
-
-void LocationsBuilderMIPS::VisitNullConstant(HNullConstant* constant) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
-  locations->SetOut(Location::ConstantLocation(constant));
-}
-
-void InstructionCodeGeneratorMIPS::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
-  // Will be generated at use site.
-}
-
-void LocationsBuilderMIPS::HandleInvoke(HInvoke* invoke) {
-  InvokeDexCallingConventionVisitorMIPS calling_convention_visitor;
-  CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
-}
-
-void LocationsBuilderMIPS::VisitInvokeInterface(HInvokeInterface* invoke) {
-  HandleInvoke(invoke);
-  // The register T7 is required to be used for the hidden argument in
-  // art_quick_imt_conflict_trampoline, so add the hidden argument.
-  invoke->GetLocations()->AddTemp(Location::RegisterLocation(T7));
-}
-
-void InstructionCodeGeneratorMIPS::VisitInvokeInterface(HInvokeInterface* invoke) {
-  // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
-  Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
-  Location receiver = invoke->GetLocations()->InAt(0);
-  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
-  Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsPointerSize);
-
-  // temp = object->GetClass();
-  if (receiver.IsStackSlot()) {
-    __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
-    __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
-  } else {
-    __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
-  }
-  codegen_->MaybeRecordImplicitNullCheck(invoke);
-  // Instead of simply (possibly) unpoisoning `temp` here, we should
-  // emit a read barrier for the previous class reference load.
-  // However this is not required in practice, as this is an
-  // intermediate/temporary reference and because the current
-  // concurrent copying collector keeps the from-space memory
-  // intact/accessible until the end of the marking phase (the
-  // concurrent copying collector may not in the future).
-  __ MaybeUnpoisonHeapReference(temp);
-  __ LoadFromOffset(kLoadWord, temp, temp,
-      mirror::Class::ImtPtrOffset(kMipsPointerSize).Uint32Value());
-  uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
-      invoke->GetImtIndex(), kMipsPointerSize));
-  // temp = temp->GetImtEntryAt(method_offset);
-  __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
-  // T9 = temp->GetEntryPoint();
-  __ LoadFromOffset(kLoadWord, T9, temp, entry_point.Int32Value());
-  // Set the hidden argument.
-  __ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<Register>(),
-                 invoke->GetDexMethodIndex());
-  // T9();
-  __ Jalr(T9);
-  __ NopIfNoReordering();
-  DCHECK(!codegen_->IsLeafMethod());
-  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
-}
-
-void LocationsBuilderMIPS::VisitInvokeVirtual(HInvokeVirtual* invoke) {
-  IntrinsicLocationsBuilderMIPS intrinsic(codegen_);
-  if (intrinsic.TryDispatch(invoke)) {
-    return;
-  }
-
-  HandleInvoke(invoke);
-}
-
-void LocationsBuilderMIPS::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
-  // Explicit clinit checks triggered by static invokes must have been pruned by
-  // art::PrepareForRegisterAllocation.
-  DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
-
-  bool is_r6 = codegen_->GetInstructionSetFeatures().IsR6();
-  bool has_irreducible_loops = codegen_->GetGraph()->HasIrreducibleLoops();
-  bool has_extra_input = invoke->HasPcRelativeMethodLoadKind() && !is_r6 && !has_irreducible_loops;
-
-  IntrinsicLocationsBuilderMIPS intrinsic(codegen_);
-  if (intrinsic.TryDispatch(invoke)) {
-    if (invoke->GetLocations()->CanCall() && has_extra_input) {
-      invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::Any());
-    }
-    return;
-  }
-
-  HandleInvoke(invoke);
-
-  // Add the extra input register if either the dex cache array base register
-  // or the PC-relative base register for accessing literals is needed.
-  if (has_extra_input) {
-    invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::RequiresRegister());
-  }
-}
-
-void LocationsBuilderMIPS::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
-  HandleInvoke(invoke);
-}
-
-void InstructionCodeGeneratorMIPS::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
-  codegen_->GenerateInvokePolymorphicCall(invoke);
-}
-
-void LocationsBuilderMIPS::VisitInvokeCustom(HInvokeCustom* invoke) {
-  HandleInvoke(invoke);
-}
-
-void InstructionCodeGeneratorMIPS::VisitInvokeCustom(HInvokeCustom* invoke) {
-  codegen_->GenerateInvokeCustomCall(invoke);
-}
-
-static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS* codegen) {
-  if (invoke->GetLocations()->Intrinsified()) {
-    IntrinsicCodeGeneratorMIPS intrinsic(codegen);
-    intrinsic.Dispatch(invoke);
-    return true;
-  }
-  return false;
-}
-
-HLoadString::LoadKind CodeGeneratorMIPS::GetSupportedLoadStringKind(
-    HLoadString::LoadKind desired_string_load_kind) {
-  switch (desired_string_load_kind) {
-    case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
-    case HLoadString::LoadKind::kBootImageRelRo:
-    case HLoadString::LoadKind::kBssEntry:
-      DCHECK(!Runtime::Current()->UseJitCompilation());
-      break;
-    case HLoadString::LoadKind::kJitBootImageAddress:
-    case HLoadString::LoadKind::kJitTableAddress:
-      DCHECK(Runtime::Current()->UseJitCompilation());
-      break;
-    case HLoadString::LoadKind::kRuntimeCall:
-      break;
-  }
-  return desired_string_load_kind;
-}
-
-HLoadClass::LoadKind CodeGeneratorMIPS::GetSupportedLoadClassKind(
-    HLoadClass::LoadKind desired_class_load_kind) {
-  switch (desired_class_load_kind) {
-    case HLoadClass::LoadKind::kInvalid:
-      LOG(FATAL) << "UNREACHABLE";
-      UNREACHABLE();
-    case HLoadClass::LoadKind::kReferrersClass:
-      break;
-    case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
-    case HLoadClass::LoadKind::kBootImageRelRo:
-    case HLoadClass::LoadKind::kBssEntry:
-      DCHECK(!Runtime::Current()->UseJitCompilation());
-      break;
-    case HLoadClass::LoadKind::kJitBootImageAddress:
-    case HLoadClass::LoadKind::kJitTableAddress:
-      DCHECK(Runtime::Current()->UseJitCompilation());
-      break;
-    case HLoadClass::LoadKind::kRuntimeCall:
-      break;
-  }
-  return desired_class_load_kind;
-}
-
-Register CodeGeneratorMIPS::GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke,
-                                                                  Register temp) {
-  CHECK(!GetInstructionSetFeatures().IsR6());
-  CHECK(!GetGraph()->HasIrreducibleLoops());
-  CHECK_EQ(invoke->InputCount(), invoke->GetNumberOfArguments() + 1u);
-  Location location = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
-  if (!invoke->GetLocations()->Intrinsified()) {
-    return location.AsRegister<Register>();
-  }
-  // For intrinsics we allow any location, so it may be on the stack.
-  if (!location.IsRegister()) {
-    __ LoadFromOffset(kLoadWord, temp, SP, location.GetStackIndex());
-    return temp;
-  }
-  // For register locations, check if the register was saved. If so, get it from the stack.
-  // Note: There is a chance that the register was saved but not overwritten, so we could
-  // save one load. However, since this is just an intrinsic slow path we prefer this
-  // simple and more robust approach rather that trying to determine if that's the case.
-  SlowPathCode* slow_path = GetCurrentSlowPath();
-  DCHECK(slow_path != nullptr);  // For intrinsified invokes the call is emitted on the slow path.
-  if (slow_path->IsCoreRegisterSaved(location.AsRegister<Register>())) {
-    int stack_offset = slow_path->GetStackOffsetOfCoreRegister(location.AsRegister<Register>());
-    __ LoadFromOffset(kLoadWord, temp, SP, stack_offset);
-    return temp;
-  }
-  return location.AsRegister<Register>();
-}
-
-HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS::GetSupportedInvokeStaticOrDirectDispatch(
-      const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      ArtMethod* method ATTRIBUTE_UNUSED) {
-  return desired_dispatch_info;
-}
-
-void CodeGeneratorMIPS::GenerateStaticOrDirectCall(
-    HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
-  // All registers are assumed to be correctly set up per the calling convention.
-  Location callee_method = temp;  // For all kinds except kRecursive, callee will be in temp.
-  HInvokeStaticOrDirect::MethodLoadKind method_load_kind = invoke->GetMethodLoadKind();
-  HInvokeStaticOrDirect::CodePtrLocation code_ptr_location = invoke->GetCodePtrLocation();
-  bool is_r6 = GetInstructionSetFeatures().IsR6();
-  bool has_irreducible_loops = GetGraph()->HasIrreducibleLoops();
-  Register base_reg = (invoke->HasPcRelativeMethodLoadKind() && !is_r6 && !has_irreducible_loops)
-      ? GetInvokeStaticOrDirectExtraParameter(invoke, temp.AsRegister<Register>())
-      : ZERO;
-
-  switch (method_load_kind) {
-    case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: {
-      // temp = thread->string_init_entrypoint
-      uint32_t offset =
-          GetThreadOffset<kMipsPointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
-      __ LoadFromOffset(kLoadWord,
-                        temp.AsRegister<Register>(),
-                        TR,
-                        offset);
-      break;
-    }
-    case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
-      callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
-      break;
-    case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative: {
-      DCHECK(GetCompilerOptions().IsBootImage());
-      PcRelativePatchInfo* info_high = NewBootImageMethodPatch(invoke->GetTargetMethod());
-      PcRelativePatchInfo* info_low =
-          NewBootImageMethodPatch(invoke->GetTargetMethod(), info_high);
-      Register temp_reg = temp.AsRegister<Register>();
-      EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base_reg);
-      __ Addiu(temp_reg, TMP, /* imm16= */ 0x5678, &info_low->label);
-      break;
-    }
-    case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
-      uint32_t boot_image_offset = GetBootImageOffset(invoke);
-      PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_offset);
-      PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_offset, info_high);
-      Register temp_reg = temp.AsRegister<Register>();
-      EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base_reg);
-      __ Lw(temp_reg, TMP, /* imm16= */ 0x5678, &info_low->label);
-      break;
-    }
-    case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
-      PcRelativePatchInfo* info_high = NewMethodBssEntryPatch(
-          MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()));
-      PcRelativePatchInfo* info_low = NewMethodBssEntryPatch(
-          MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()), info_high);
-      Register temp_reg = temp.AsRegister<Register>();
-      EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base_reg);
-      __ Lw(temp_reg, TMP, /* imm16= */ 0x5678, &info_low->label);
-      break;
-    }
-    case HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress:
-      __ LoadConst32(temp.AsRegister<Register>(), invoke->GetMethodAddress());
-      break;
-    case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall: {
-      GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path);
-      return;  // No code pointer retrieval; the runtime performs the call directly.
-    }
-  }
-
-  switch (code_ptr_location) {
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
-      __ Bal(&frame_entry_label_);
-      break;
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
-      // T9 = callee_method->entry_point_from_quick_compiled_code_;
-      __ LoadFromOffset(kLoadWord,
-                        T9,
-                        callee_method.AsRegister<Register>(),
-                        ArtMethod::EntryPointFromQuickCompiledCodeOffset(
-                            kMipsPointerSize).Int32Value());
-      // T9()
-      __ Jalr(T9);
-      __ NopIfNoReordering();
-      break;
-  }
-  RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
-
-  DCHECK(!IsLeafMethod());
-}
-
-void InstructionCodeGeneratorMIPS::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
-  // Explicit clinit checks triggered by static invokes must have been pruned by
-  // art::PrepareForRegisterAllocation.
-  DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
-
-  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
-    return;
-  }
-
-  LocationSummary* locations = invoke->GetLocations();
-  codegen_->GenerateStaticOrDirectCall(invoke,
-                                       locations->HasTemps()
-                                           ? locations->GetTemp(0)
-                                           : Location::NoLocation());
-}
-
-void CodeGeneratorMIPS::GenerateVirtualCall(
-    HInvokeVirtual* invoke, Location temp_location, SlowPathCode* slow_path) {
-  // Use the calling convention instead of the location of the receiver, as
-  // intrinsics may have put the receiver in a different register. In the intrinsics
-  // slow path, the arguments have been moved to the right place, so here we are
-  // guaranteed that the receiver is the first register of the calling convention.
-  InvokeDexCallingConvention calling_convention;
-  Register receiver = calling_convention.GetRegisterAt(0);
-
-  Register temp = temp_location.AsRegister<Register>();
-  size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
-      invoke->GetVTableIndex(), kMipsPointerSize).SizeValue();
-  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
-  Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsPointerSize);
-
-  // temp = object->GetClass();
-  __ LoadFromOffset(kLoadWord, temp, receiver, class_offset);
-  MaybeRecordImplicitNullCheck(invoke);
-  // Instead of simply (possibly) unpoisoning `temp` here, we should
-  // emit a read barrier for the previous class reference load.
-  // However this is not required in practice, as this is an
-  // intermediate/temporary reference and because the current
-  // concurrent copying collector keeps the from-space memory
-  // intact/accessible until the end of the marking phase (the
-  // concurrent copying collector may not in the future).
-  __ MaybeUnpoisonHeapReference(temp);
-  // temp = temp->GetMethodAt(method_offset);
-  __ LoadFromOffset(kLoadWord, temp, temp, method_offset);
-  // T9 = temp->GetEntryPoint();
-  __ LoadFromOffset(kLoadWord, T9, temp, entry_point.Int32Value());
-  // T9();
-  __ Jalr(T9);
-  __ NopIfNoReordering();
-  RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
-}
-
-void InstructionCodeGeneratorMIPS::VisitInvokeVirtual(HInvokeVirtual* invoke) {
-  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
-    return;
-  }
-
-  codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
-  DCHECK(!codegen_->IsLeafMethod());
-}
-
-void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) {
-  HLoadClass::LoadKind load_kind = cls->GetLoadKind();
-  if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
-    InvokeRuntimeCallingConvention calling_convention;
-    Location loc = Location::RegisterLocation(calling_convention.GetRegisterAt(0));
-    CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(cls, loc, loc);
-    return;
-  }
-  DCHECK(!cls->NeedsAccessCheck());
-  const bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
-  const bool has_irreducible_loops = codegen_->GetGraph()->HasIrreducibleLoops();
-  const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
-  LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
-      ? LocationSummary::kCallOnSlowPath
-      : LocationSummary::kNoCall;
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind);
-  if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
-    locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
-  }
-  switch (load_kind) {
-    // We need an extra register for PC-relative literals on R2.
-    case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
-    case HLoadClass::LoadKind::kBootImageRelRo:
-    case HLoadClass::LoadKind::kBssEntry:
-    case HLoadClass::LoadKind::kJitBootImageAddress:
-      if (isR6) {
-        break;
-      }
-      if (has_irreducible_loops) {
-        if (load_kind != HLoadClass::LoadKind::kJitBootImageAddress) {
-          codegen_->ClobberRA();
-        }
-        break;
-      }
-      FALLTHROUGH_INTENDED;
-    case HLoadClass::LoadKind::kReferrersClass:
-      locations->SetInAt(0, Location::RequiresRegister());
-      break;
-    default:
-      break;
-  }
-  locations->SetOut(Location::RequiresRegister());
-  if (load_kind == HLoadClass::LoadKind::kBssEntry) {
-    if (!kUseReadBarrier || kUseBakerReadBarrier) {
-      // Rely on the type resolution or initialization and marking to save everything we need.
-      locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
-    } else {
-      // For non-Baker read barriers we have a temp-clobbering call.
-    }
-  }
-}
-
-// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
-// move.
-void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
-  HLoadClass::LoadKind load_kind = cls->GetLoadKind();
-  if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
-    codegen_->GenerateLoadClassRuntimeCall(cls);
-    return;
-  }
-  DCHECK(!cls->NeedsAccessCheck());
-
-  LocationSummary* locations = cls->GetLocations();
-  Location out_loc = locations->Out();
-  Register out = out_loc.AsRegister<Register>();
-  Register base_or_current_method_reg;
-  bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
-  bool has_irreducible_loops = GetGraph()->HasIrreducibleLoops();
-  switch (load_kind) {
-    // We need an extra register for PC-relative literals on R2.
-    case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
-    case HLoadClass::LoadKind::kBootImageRelRo:
-    case HLoadClass::LoadKind::kBssEntry:
-    case HLoadClass::LoadKind::kJitBootImageAddress:
-      base_or_current_method_reg =
-          (isR6 || has_irreducible_loops) ? ZERO : locations->InAt(0).AsRegister<Register>();
-      break;
-    case HLoadClass::LoadKind::kReferrersClass:
-    case HLoadClass::LoadKind::kRuntimeCall:
-      base_or_current_method_reg = locations->InAt(0).AsRegister<Register>();
-      break;
-    default:
-      base_or_current_method_reg = ZERO;
-      break;
-  }
-
-  const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
-      ? kWithoutReadBarrier
-      : kCompilerReadBarrierOption;
-  bool generate_null_check = false;
-  switch (load_kind) {
-    case HLoadClass::LoadKind::kReferrersClass: {
-      DCHECK(!cls->CanCallRuntime());
-      DCHECK(!cls->MustGenerateClinitCheck());
-      // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
-      GenerateGcRootFieldLoad(cls,
-                              out_loc,
-                              base_or_current_method_reg,
-                              ArtMethod::DeclaringClassOffset().Int32Value(),
-                              read_barrier_option);
-      break;
-    }
-    case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
-      DCHECK(codegen_->GetCompilerOptions().IsBootImage());
-      DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
-      CodeGeneratorMIPS::PcRelativePatchInfo* info_high =
-          codegen_->NewBootImageTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
-      CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
-          codegen_->NewBootImageTypePatch(cls->GetDexFile(), cls->GetTypeIndex(), info_high);
-      codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
-                                                     out,
-                                                     base_or_current_method_reg);
-      __ Addiu(out, out, /* imm16= */ 0x5678, &info_low->label);
-      break;
-    }
-    case HLoadClass::LoadKind::kBootImageRelRo: {
-      DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
-      uint32_t boot_image_offset = codegen_->GetBootImageOffset(cls);
-      CodeGeneratorMIPS::PcRelativePatchInfo* info_high =
-          codegen_->NewBootImageRelRoPatch(boot_image_offset);
-      CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
-          codegen_->NewBootImageRelRoPatch(boot_image_offset, info_high);
-      codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
-                                                     out,
-                                                     base_or_current_method_reg);
-      __ Lw(out, out, /* imm16= */ 0x5678, &info_low->label);
-      break;
-    }
-    case HLoadClass::LoadKind::kBssEntry: {
-      CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high =
-          codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
-      CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
-          codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex(), bss_info_high);
-      codegen_->EmitPcRelativeAddressPlaceholderHigh(bss_info_high,
-                                                     out,
-                                                     base_or_current_method_reg);
-      GenerateGcRootFieldLoad(cls,
-                              out_loc,
-                              out,
-                              /* offset= */ 0x5678,
-                              read_barrier_option,
-                              &info_low->label);
-      generate_null_check = true;
-      break;
-    }
-    case HLoadClass::LoadKind::kJitBootImageAddress: {
-      DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
-      uint32_t address = reinterpret_cast32<uint32_t>(cls->GetClass().Get());
-      DCHECK_NE(address, 0u);
-      if (isR6 || !has_irreducible_loops) {
-        __ LoadLiteral(out,
-                       base_or_current_method_reg,
-                       codegen_->DeduplicateBootImageAddressLiteral(address));
-      } else {
-        __ LoadConst32(out, address);
-      }
-      break;
-    }
-    case HLoadClass::LoadKind::kJitTableAddress: {
-      CodeGeneratorMIPS::JitPatchInfo* info = codegen_->NewJitRootClassPatch(cls->GetDexFile(),
-                                                                             cls->GetTypeIndex(),
-                                                                             cls->GetClass());
-      bool reordering = __ SetReorder(false);
-      __ Bind(&info->high_label);
-      __ Lui(out, /* imm16= */ 0x1234);
-      __ SetReorder(reordering);
-      GenerateGcRootFieldLoad(cls,
-                              out_loc,
-                              out,
-                              /* offset= */ 0x5678,
-                              read_barrier_option,
-                              &info->low_label);
-      break;
-    }
-    case HLoadClass::LoadKind::kRuntimeCall:
-    case HLoadClass::LoadKind::kInvalid:
-      LOG(FATAL) << "UNREACHABLE";
-      UNREACHABLE();
-  }
-
-  if (generate_null_check || cls->MustGenerateClinitCheck()) {
-    DCHECK(cls->CanCallRuntime());
-    SlowPathCodeMIPS* slow_path =
-        new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS(cls, cls);
-    codegen_->AddSlowPath(slow_path);
-    if (generate_null_check) {
-      __ Beqz(out, slow_path->GetEntryLabel());
-    }
-    if (cls->MustGenerateClinitCheck()) {
-      GenerateClassInitializationCheck(slow_path, out);
-    } else {
-      __ Bind(slow_path->GetExitLabel());
-    }
-  }
-}
-
-void LocationsBuilderMIPS::VisitLoadMethodHandle(HLoadMethodHandle* load) {
-  InvokeRuntimeCallingConvention calling_convention;
-  Location loc = Location::RegisterLocation(calling_convention.GetRegisterAt(0));
-  CodeGenerator::CreateLoadMethodHandleRuntimeCallLocationSummary(load, loc, loc);
-}
-
-void InstructionCodeGeneratorMIPS::VisitLoadMethodHandle(HLoadMethodHandle* load) {
-  codegen_->GenerateLoadMethodHandleRuntimeCall(load);
-}
-
-void LocationsBuilderMIPS::VisitLoadMethodType(HLoadMethodType* load) {
-  InvokeRuntimeCallingConvention calling_convention;
-  Location loc = Location::RegisterLocation(calling_convention.GetRegisterAt(0));
-  CodeGenerator::CreateLoadMethodTypeRuntimeCallLocationSummary(load, loc, loc);
-}
-
-void InstructionCodeGeneratorMIPS::VisitLoadMethodType(HLoadMethodType* load) {
-  codegen_->GenerateLoadMethodTypeRuntimeCall(load);
-}
-
-static int32_t GetExceptionTlsOffset() {
-  return Thread::ExceptionOffset<kMipsPointerSize>().Int32Value();
-}
-
-void LocationsBuilderMIPS::VisitLoadException(HLoadException* load) {
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall);
-  locations->SetOut(Location::RequiresRegister());
-}
-
-void InstructionCodeGeneratorMIPS::VisitLoadException(HLoadException* load) {
-  Register out = load->GetLocations()->Out().AsRegister<Register>();
-  __ LoadFromOffset(kLoadWord, out, TR, GetExceptionTlsOffset());
-}
-
-void LocationsBuilderMIPS::VisitClearException(HClearException* clear) {
-  new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall);
-}
-
-void InstructionCodeGeneratorMIPS::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
-  __ StoreToOffset(kStoreWord, ZERO, TR, GetExceptionTlsOffset());
-}
-
-void LocationsBuilderMIPS::VisitLoadString(HLoadString* load) {
-  LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind);
-  HLoadString::LoadKind load_kind = load->GetLoadKind();
-  const bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
-  const bool has_irreducible_loops = codegen_->GetGraph()->HasIrreducibleLoops();
-  switch (load_kind) {
-    // We need an extra register for PC-relative literals on R2.
-    case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
-    case HLoadString::LoadKind::kBootImageRelRo:
-    case HLoadString::LoadKind::kBssEntry:
-    case HLoadString::LoadKind::kJitBootImageAddress:
-      if (isR6) {
-        break;
-      }
-      if (has_irreducible_loops) {
-        if (load_kind != HLoadString::LoadKind::kJitBootImageAddress) {
-          codegen_->ClobberRA();
-        }
-        break;
-      }
-      FALLTHROUGH_INTENDED;
-    // We need an extra register for PC-relative dex cache accesses.
-    case HLoadString::LoadKind::kRuntimeCall:
-      locations->SetInAt(0, Location::RequiresRegister());
-      break;
-    default:
-      break;
-  }
-  if (load_kind == HLoadString::LoadKind::kRuntimeCall) {
-    InvokeRuntimeCallingConvention calling_convention;
-    locations->SetOut(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  } else {
-    locations->SetOut(Location::RequiresRegister());
-    if (load_kind == HLoadString::LoadKind::kBssEntry) {
-      if (!kUseReadBarrier || kUseBakerReadBarrier) {
-        // Rely on the pResolveString and marking to save everything we need.
-        locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
-      } else {
-        // For non-Baker read barriers we have a temp-clobbering call.
-      }
-    }
-  }
-}
-
-// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
-// move.
-void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS {
-  HLoadString::LoadKind load_kind = load->GetLoadKind();
-  LocationSummary* locations = load->GetLocations();
-  Location out_loc = locations->Out();
-  Register out = out_loc.AsRegister<Register>();
-  Register base_or_current_method_reg;
-  bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
-  bool has_irreducible_loops = GetGraph()->HasIrreducibleLoops();
-  switch (load_kind) {
-    // We need an extra register for PC-relative literals on R2.
-    case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
-    case HLoadString::LoadKind::kBootImageRelRo:
-    case HLoadString::LoadKind::kBssEntry:
-    case HLoadString::LoadKind::kJitBootImageAddress:
-      base_or_current_method_reg =
-          (isR6 || has_irreducible_loops) ? ZERO : locations->InAt(0).AsRegister<Register>();
-      break;
-    default:
-      base_or_current_method_reg = ZERO;
-      break;
-  }
-
-  switch (load_kind) {
-    case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
-      DCHECK(codegen_->GetCompilerOptions().IsBootImage());
-      CodeGeneratorMIPS::PcRelativePatchInfo* info_high =
-          codegen_->NewBootImageStringPatch(load->GetDexFile(), load->GetStringIndex());
-      CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
-          codegen_->NewBootImageStringPatch(load->GetDexFile(), load->GetStringIndex(), info_high);
-      codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
-                                                     out,
-                                                     base_or_current_method_reg);
-      __ Addiu(out, out, /* imm16= */ 0x5678, &info_low->label);
-      return;
-    }
-    case HLoadString::LoadKind::kBootImageRelRo: {
-      DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
-      uint32_t boot_image_offset = codegen_->GetBootImageOffset(load);
-      CodeGeneratorMIPS::PcRelativePatchInfo* info_high =
-          codegen_->NewBootImageRelRoPatch(boot_image_offset);
-      CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
-          codegen_->NewBootImageRelRoPatch(boot_image_offset, info_high);
-      codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
-                                                     out,
-                                                     base_or_current_method_reg);
-      __ Lw(out, out, /* imm16= */ 0x5678, &info_low->label);
-      return;
-    }
-    case HLoadString::LoadKind::kBssEntry: {
-      CodeGeneratorMIPS::PcRelativePatchInfo* info_high =
-          codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex());
-      CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
-          codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex(), info_high);
-      codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
-                                                     out,
-                                                     base_or_current_method_reg);
-      GenerateGcRootFieldLoad(load,
-                              out_loc,
-                              out,
-                              /* offset= */ 0x5678,
-                              kCompilerReadBarrierOption,
-                              &info_low->label);
-      SlowPathCodeMIPS* slow_path =
-          new (codegen_->GetScopedAllocator()) LoadStringSlowPathMIPS(load);
-      codegen_->AddSlowPath(slow_path);
-      __ Beqz(out, slow_path->GetEntryLabel());
-      __ Bind(slow_path->GetExitLabel());
-      return;
-    }
-    case HLoadString::LoadKind::kJitBootImageAddress: {
-      uint32_t address = reinterpret_cast32<uint32_t>(load->GetString().Get());
-      DCHECK_NE(address, 0u);
-      if (isR6 || !has_irreducible_loops) {
-        __ LoadLiteral(out,
-                       base_or_current_method_reg,
-                       codegen_->DeduplicateBootImageAddressLiteral(address));
-      } else {
-        __ LoadConst32(out, address);
-      }
-      return;
-    }
-    case HLoadString::LoadKind::kJitTableAddress: {
-      CodeGeneratorMIPS::JitPatchInfo* info =
-          codegen_->NewJitRootStringPatch(load->GetDexFile(),
-                                          load->GetStringIndex(),
-                                          load->GetString());
-      bool reordering = __ SetReorder(false);
-      __ Bind(&info->high_label);
-      __ Lui(out, /* imm16= */ 0x1234);
-      __ SetReorder(reordering);
-      GenerateGcRootFieldLoad(load,
-                              out_loc,
-                              out,
-                              /* offset= */ 0x5678,
-                              kCompilerReadBarrierOption,
-                              &info->low_label);
-      return;
-    }
-    default:
-      break;
-  }
-
-  // TODO: Re-add the compiler code to do string dex cache lookup again.
-  DCHECK(load_kind == HLoadString::LoadKind::kRuntimeCall);
-  InvokeRuntimeCallingConvention calling_convention;
-  DCHECK_EQ(calling_convention.GetRegisterAt(0), out);
-  __ LoadConst32(calling_convention.GetRegisterAt(0), load->GetStringIndex().index_);
-  codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
-  CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
-}
-
-void LocationsBuilderMIPS::VisitLongConstant(HLongConstant* constant) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
-  locations->SetOut(Location::ConstantLocation(constant));
-}
-
-void InstructionCodeGeneratorMIPS::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
-  // Will be generated at use site.
-}
-
-void LocationsBuilderMIPS::VisitMonitorOperation(HMonitorOperation* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
-      instruction, LocationSummary::kCallOnMainOnly);
-  InvokeRuntimeCallingConvention calling_convention;
-  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-}
-
-void InstructionCodeGeneratorMIPS::VisitMonitorOperation(HMonitorOperation* instruction) {
-  if (instruction->IsEnter()) {
-    codegen_->InvokeRuntime(kQuickLockObject, instruction, instruction->GetDexPc());
-    CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
-  } else {
-    codegen_->InvokeRuntime(kQuickUnlockObject, instruction, instruction->GetDexPc());
-  }
-  CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>();
-}
-
-void LocationsBuilderMIPS::VisitMul(HMul* mul) {
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall);
-  switch (mul->GetResultType()) {
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetInAt(1, Location::RequiresRegister());
-      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-      break;
-
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetInAt(1, Location::RequiresFpuRegister());
-      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
-      break;
-
-    default:
-      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
-  }
-}
-
-void InstructionCodeGeneratorMIPS::VisitMul(HMul* instruction) {
-  DataType::Type type = instruction->GetType();
-  LocationSummary* locations = instruction->GetLocations();
-  bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
-
-  switch (type) {
-    case DataType::Type::kInt32: {
-      Register dst = locations->Out().AsRegister<Register>();
-      Register lhs = locations->InAt(0).AsRegister<Register>();
-      Register rhs = locations->InAt(1).AsRegister<Register>();
-
-      if (isR6) {
-        __ MulR6(dst, lhs, rhs);
-      } else {
-        __ MulR2(dst, lhs, rhs);
-      }
-      break;
-    }
-    case DataType::Type::kInt64: {
-      Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
-      Register dst_low = locations->Out().AsRegisterPairLow<Register>();
-      Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>();
-      Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>();
-      Register rhs_high = locations->InAt(1).AsRegisterPairHigh<Register>();
-      Register rhs_low = locations->InAt(1).AsRegisterPairLow<Register>();
-
-      // Extra checks to protect caused by the existance of A1_A2.
-      // The algorithm is wrong if dst_high is either lhs_lo or rhs_lo:
-      // (e.g. lhs=a0_a1, rhs=a2_a3 and dst=a1_a2).
-      DCHECK_NE(dst_high, lhs_low);
-      DCHECK_NE(dst_high, rhs_low);
-
-      // A_B * C_D
-      // dst_hi:  [ low(A*D) + low(B*C) + hi(B*D) ]
-      // dst_lo:  [ low(B*D) ]
-      // Note: R2 and R6 MUL produce the low 32 bit of the multiplication result.
-
-      if (isR6) {
-        __ MulR6(TMP, lhs_high, rhs_low);
-        __ MulR6(dst_high, lhs_low, rhs_high);
-        __ Addu(dst_high, dst_high, TMP);
-        __ MuhuR6(TMP, lhs_low, rhs_low);
-        __ Addu(dst_high, dst_high, TMP);
-        __ MulR6(dst_low, lhs_low, rhs_low);
-      } else {
-        __ MulR2(TMP, lhs_high, rhs_low);
-        __ MulR2(dst_high, lhs_low, rhs_high);
-        __ Addu(dst_high, dst_high, TMP);
-        __ MultuR2(lhs_low, rhs_low);
-        __ Mfhi(TMP);
-        __ Addu(dst_high, dst_high, TMP);
-        __ Mflo(dst_low);
-      }
-      break;
-    }
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64: {
-      FRegister dst = locations->Out().AsFpuRegister<FRegister>();
-      FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
-      FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
-      if (type == DataType::Type::kFloat32) {
-        __ MulS(dst, lhs, rhs);
-      } else {
-        __ MulD(dst, lhs, rhs);
-      }
-      break;
-    }
-    default:
-      LOG(FATAL) << "Unexpected mul type " << type;
-  }
-}
-
-void LocationsBuilderMIPS::VisitNeg(HNeg* neg) {
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall);
-  switch (neg->GetResultType()) {
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-      break;
-
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
-      break;
-
-    default:
-      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
-  }
-}
-
-void InstructionCodeGeneratorMIPS::VisitNeg(HNeg* instruction) {
-  DataType::Type type = instruction->GetType();
-  LocationSummary* locations = instruction->GetLocations();
-
-  switch (type) {
-    case DataType::Type::kInt32: {
-      Register dst = locations->Out().AsRegister<Register>();
-      Register src = locations->InAt(0).AsRegister<Register>();
-      __ Subu(dst, ZERO, src);
-      break;
-    }
-    case DataType::Type::kInt64: {
-      Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
-      Register dst_low = locations->Out().AsRegisterPairLow<Register>();
-      Register src_high = locations->InAt(0).AsRegisterPairHigh<Register>();
-      Register src_low = locations->InAt(0).AsRegisterPairLow<Register>();
-      __ Subu(dst_low, ZERO, src_low);
-      __ Sltu(TMP, ZERO, dst_low);
-      __ Subu(dst_high, ZERO, src_high);
-      __ Subu(dst_high, dst_high, TMP);
-      break;
-    }
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64: {
-      FRegister dst = locations->Out().AsFpuRegister<FRegister>();
-      FRegister src = locations->InAt(0).AsFpuRegister<FRegister>();
-      if (type == DataType::Type::kFloat32) {
-        __ NegS(dst, src);
-      } else {
-        __ NegD(dst, src);
-      }
-      break;
-    }
-    default:
-      LOG(FATAL) << "Unexpected neg type " << type;
-  }
-}
-
-void LocationsBuilderMIPS::VisitNewArray(HNewArray* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
-      instruction, LocationSummary::kCallOnMainOnly);
-  InvokeRuntimeCallingConvention calling_convention;
-  locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kReference));
-  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
-}
-
-void InstructionCodeGeneratorMIPS::VisitNewArray(HNewArray* instruction) {
-  // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
-  QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
-  codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
-  CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
-  DCHECK(!codegen_->IsLeafMethod());
-}
-
-void LocationsBuilderMIPS::VisitNewInstance(HNewInstance* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
-      instruction, LocationSummary::kCallOnMainOnly);
-  InvokeRuntimeCallingConvention calling_convention;
-  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kReference));
-}
-
-void InstructionCodeGeneratorMIPS::VisitNewInstance(HNewInstance* instruction) {
-  codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
-  CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
-}
-
-void LocationsBuilderMIPS::VisitNot(HNot* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
-void InstructionCodeGeneratorMIPS::VisitNot(HNot* instruction) {
-  DataType::Type type = instruction->GetType();
-  LocationSummary* locations = instruction->GetLocations();
-
-  switch (type) {
-    case DataType::Type::kInt32: {
-      Register dst = locations->Out().AsRegister<Register>();
-      Register src = locations->InAt(0).AsRegister<Register>();
-      __ Nor(dst, src, ZERO);
-      break;
-    }
-
-    case DataType::Type::kInt64: {
-      Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
-      Register dst_low = locations->Out().AsRegisterPairLow<Register>();
-      Register src_high = locations->InAt(0).AsRegisterPairHigh<Register>();
-      Register src_low = locations->InAt(0).AsRegisterPairLow<Register>();
-      __ Nor(dst_high, src_high, ZERO);
-      __ Nor(dst_low, src_low, ZERO);
-      break;
-    }
-
-    default:
-      LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
-  }
-}
-
-void LocationsBuilderMIPS::VisitBooleanNot(HBooleanNot* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
-void InstructionCodeGeneratorMIPS::VisitBooleanNot(HBooleanNot* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  __ Xori(locations->Out().AsRegister<Register>(),
-          locations->InAt(0).AsRegister<Register>(),
-          1);
-}
-
-void LocationsBuilderMIPS::VisitNullCheck(HNullCheck* instruction) {
-  LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
-  locations->SetInAt(0, Location::RequiresRegister());
-}
-
-void CodeGeneratorMIPS::GenerateImplicitNullCheck(HNullCheck* instruction) {
-  if (CanMoveNullCheckToUser(instruction)) {
-    return;
-  }
-  Location obj = instruction->GetLocations()->InAt(0);
-
-  __ Lw(ZERO, obj.AsRegister<Register>(), 0);
-  RecordPcInfo(instruction, instruction->GetDexPc());
-}
-
-void CodeGeneratorMIPS::GenerateExplicitNullCheck(HNullCheck* instruction) {
-  SlowPathCodeMIPS* slow_path = new (GetScopedAllocator()) NullCheckSlowPathMIPS(instruction);
-  AddSlowPath(slow_path);
-
-  Location obj = instruction->GetLocations()->InAt(0);
-
-  __ Beqz(obj.AsRegister<Register>(), slow_path->GetEntryLabel());
-}
-
-void InstructionCodeGeneratorMIPS::VisitNullCheck(HNullCheck* instruction) {
-  codegen_->GenerateNullCheck(instruction);
-}
-
-void LocationsBuilderMIPS::VisitOr(HOr* instruction) {
-  HandleBinaryOp(instruction);
-}
-
-void InstructionCodeGeneratorMIPS::VisitOr(HOr* instruction) {
-  HandleBinaryOp(instruction);
-}
-
-void LocationsBuilderMIPS::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unreachable";
-}
-
-void InstructionCodeGeneratorMIPS::VisitParallelMove(HParallelMove* instruction) {
-  if (instruction->GetNext()->IsSuspendCheck() &&
-      instruction->GetBlock()->GetLoopInformation() != nullptr) {
-    HSuspendCheck* suspend_check = instruction->GetNext()->AsSuspendCheck();
-    // The back edge will generate the suspend check.
-    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(suspend_check, instruction);
-  }
-
-  codegen_->GetMoveResolver()->EmitNativeCode(instruction);
-}
-
-void LocationsBuilderMIPS::VisitParameterValue(HParameterValue* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
-  Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
-  if (location.IsStackSlot()) {
-    location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
-  } else if (location.IsDoubleStackSlot()) {
-    location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
-  }
-  locations->SetOut(location);
-}
-
-void InstructionCodeGeneratorMIPS::VisitParameterValue(HParameterValue* instruction
-                                                         ATTRIBUTE_UNUSED) {
-  // Nothing to do, the parameter is already at its location.
-}
-
-void LocationsBuilderMIPS::VisitCurrentMethod(HCurrentMethod* instruction) {
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
-  locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
-}
-
-void InstructionCodeGeneratorMIPS::VisitCurrentMethod(HCurrentMethod* instruction
-                                                        ATTRIBUTE_UNUSED) {
-  // Nothing to do, the method is already at its location.
-}
-
-void LocationsBuilderMIPS::VisitPhi(HPhi* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
-  for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
-    locations->SetInAt(i, Location::Any());
-  }
-  locations->SetOut(Location::Any());
-}
-
-void InstructionCodeGeneratorMIPS::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unreachable";
-}
-
-void LocationsBuilderMIPS::VisitRem(HRem* rem) {
-  DataType::Type type = rem->GetResultType();
-  bool call_rem;
-  if ((type == DataType::Type::kInt64) && rem->InputAt(1)->IsConstant()) {
-    int64_t imm = CodeGenerator::GetInt64ValueOf(rem->InputAt(1)->AsConstant());
-    call_rem = (imm != 0) && !IsPowerOfTwo(static_cast<uint64_t>(AbsOrMin(imm)));
-  } else {
-    call_rem = (type != DataType::Type::kInt32);
-  }
-  LocationSummary::CallKind call_kind = call_rem
-      ? LocationSummary::kCallOnMainOnly
-      : LocationSummary::kNoCall;
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(rem, call_kind);
-
-  switch (type) {
-    case DataType::Type::kInt32:
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
-      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-      break;
-
-    case DataType::Type::kInt64: {
-      if (call_rem) {
-        InvokeRuntimeCallingConvention calling_convention;
-        locations->SetInAt(0, Location::RegisterPairLocation(
-            calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
-        locations->SetInAt(1, Location::RegisterPairLocation(
-            calling_convention.GetRegisterAt(2), calling_convention.GetRegisterAt(3)));
-        locations->SetOut(calling_convention.GetReturnLocation(type));
-      } else {
-        locations->SetInAt(0, Location::RequiresRegister());
-        locations->SetInAt(1, Location::ConstantLocation(rem->InputAt(1)->AsConstant()));
-        locations->SetOut(Location::RequiresRegister());
-      }
-      break;
-    }
-
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64: {
-      InvokeRuntimeCallingConvention calling_convention;
-      locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
-      locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
-      locations->SetOut(calling_convention.GetReturnLocation(type));
-      break;
-    }
-
-    default:
-      LOG(FATAL) << "Unexpected rem type " << type;
-  }
-}
-
-void InstructionCodeGeneratorMIPS::VisitRem(HRem* instruction) {
-  DataType::Type type = instruction->GetType();
-  LocationSummary* locations = instruction->GetLocations();
-
-  switch (type) {
-    case DataType::Type::kInt32:
-      GenerateDivRemIntegral(instruction);
-      break;
-    case DataType::Type::kInt64: {
-      if (locations->InAt(1).IsConstant()) {
-        int64_t imm = locations->InAt(1).GetConstant()->AsLongConstant()->GetValue();
-        if (imm == 0) {
-          // Do not generate anything. DivZeroCheck would prevent any code to be executed.
-        } else if (imm == 1 || imm == -1) {
-          DivRemOneOrMinusOne(instruction);
-        } else {
-          DCHECK(IsPowerOfTwo(static_cast<uint64_t>(AbsOrMin(imm))));
-          DivRemByPowerOfTwo(instruction);
-        }
-      } else {
-        codegen_->InvokeRuntime(kQuickLmod, instruction, instruction->GetDexPc());
-        CheckEntrypointTypes<kQuickLmod, int64_t, int64_t, int64_t>();
-      }
-      break;
-    }
-    case DataType::Type::kFloat32: {
-      codegen_->InvokeRuntime(kQuickFmodf, instruction, instruction->GetDexPc());
-      CheckEntrypointTypes<kQuickFmodf, float, float, float>();
-      break;
-    }
-    case DataType::Type::kFloat64: {
-      codegen_->InvokeRuntime(kQuickFmod, instruction, instruction->GetDexPc());
-      CheckEntrypointTypes<kQuickFmod, double, double, double>();
-      break;
-    }
-    default:
-      LOG(FATAL) << "Unexpected rem type " << type;
-  }
-}
-
-static void CreateMinMaxLocations(ArenaAllocator* allocator, HBinaryOperation* minmax) {
-  LocationSummary* locations = new (allocator) LocationSummary(minmax);
-  switch (minmax->GetResultType()) {
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetInAt(1, Location::RequiresRegister());
-      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-      break;
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetInAt(1, Location::RequiresFpuRegister());
-      locations->SetOut(Location::RequiresFpuRegister(), Location::kOutputOverlap);
-      break;
-    default:
-      LOG(FATAL) << "Unexpected type for HMinMax " << minmax->GetResultType();
-  }
-}
-
-void InstructionCodeGeneratorMIPS::GenerateMinMaxInt(LocationSummary* locations,
-                                                     bool is_min,
-                                                     bool isR6,
-                                                     DataType::Type type) {
-  if (isR6) {
-    // Some architectures, such as ARM and MIPS (prior to r6), have a
-    // conditional move instruction which only changes the target
-    // (output) register if the condition is true (MIPS prior to r6 had
-    // MOVF, MOVT, MOVN, and MOVZ). The SELEQZ and SELNEZ instructions
-    // always change the target (output) register.  If the condition is
-    // true the output register gets the contents of the "rs" register;
-    // otherwise, the output register is set to zero. One consequence
-    // of this is that to implement something like "rd = c==0 ? rs : rt"
-    // MIPS64r6 needs to use a pair of SELEQZ/SELNEZ instructions.
-    // After executing this pair of instructions one of the output
-    // registers from the pair will necessarily contain zero. Then the
-    // code ORs the output registers from the SELEQZ/SELNEZ instructions
-    // to get the final result.
-    //
-    // The initial test to see if the output register is same as the
-    // first input register is needed to make sure that value in the
-    // first input register isn't clobbered before we've finished
-    // computing the output value. The logic in the corresponding else
-    // clause performs the same task but makes sure the second input
-    // register isn't clobbered in the event that it's the same register
-    // as the output register; the else clause also handles the case
-    // where the output register is distinct from both the first, and the
-    // second input registers.
-    if (type == DataType::Type::kInt64) {
-      Register a_lo = locations->InAt(0).AsRegisterPairLow<Register>();
-      Register a_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
-      Register b_lo = locations->InAt(1).AsRegisterPairLow<Register>();
-      Register b_hi = locations->InAt(1).AsRegisterPairHigh<Register>();
-      Register out_lo = locations->Out().AsRegisterPairLow<Register>();
-      Register out_hi = locations->Out().AsRegisterPairHigh<Register>();
-
-      MipsLabel compare_done;
-
-      if (a_lo == b_lo) {
-        if (out_lo != a_lo) {
-          __ Move(out_lo, a_lo);
-          __ Move(out_hi, a_hi);
-        }
-      } else {
-        __ Slt(TMP, b_hi, a_hi);
-        __ Bne(b_hi, a_hi, &compare_done);
-
-        __ Sltu(TMP, b_lo, a_lo);
-
-        __ Bind(&compare_done);
-
-        if (is_min) {
-          __ Seleqz(AT, a_lo, TMP);
-          __ Selnez(out_lo, b_lo, TMP);  // Safe even if out_lo == a_lo/b_lo
-                                         // because at this point we're
-                                         // done using a_lo/b_lo.
-        } else {
-          __ Selnez(AT, a_lo, TMP);
-          __ Seleqz(out_lo, b_lo, TMP);  // ditto
-        }
-        __ Or(out_lo, out_lo, AT);
-        if (is_min) {
-          __ Seleqz(AT, a_hi, TMP);
-          __ Selnez(out_hi, b_hi, TMP);  // ditto but for out_hi & a_hi/b_hi
-        } else {
-          __ Selnez(AT, a_hi, TMP);
-          __ Seleqz(out_hi, b_hi, TMP);  // ditto but for out_hi & a_hi/b_hi
-        }
-        __ Or(out_hi, out_hi, AT);
-      }
-    } else {
-      DCHECK_EQ(type, DataType::Type::kInt32);
-      Register a = locations->InAt(0).AsRegister<Register>();
-      Register b = locations->InAt(1).AsRegister<Register>();
-      Register out = locations->Out().AsRegister<Register>();
-
-      if (a == b) {
-        if (out != a) {
-          __ Move(out, a);
-        }
-      } else {
-        __ Slt(AT, b, a);
-        if (is_min) {
-          __ Seleqz(TMP, a, AT);
-          __ Selnez(AT, b, AT);
-        } else {
-          __ Selnez(TMP, a, AT);
-          __ Seleqz(AT, b, AT);
-        }
-        __ Or(out, TMP, AT);
-      }
-    }
-  } else {  // !isR6
-    if (type == DataType::Type::kInt64) {
-      Register a_lo = locations->InAt(0).AsRegisterPairLow<Register>();
-      Register a_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
-      Register b_lo = locations->InAt(1).AsRegisterPairLow<Register>();
-      Register b_hi = locations->InAt(1).AsRegisterPairHigh<Register>();
-      Register out_lo = locations->Out().AsRegisterPairLow<Register>();
-      Register out_hi = locations->Out().AsRegisterPairHigh<Register>();
-
-      MipsLabel compare_done;
-
-      if (a_lo == b_lo) {
-        if (out_lo != a_lo) {
-          __ Move(out_lo, a_lo);
-          __ Move(out_hi, a_hi);
-        }
-      } else {
-        __ Slt(TMP, a_hi, b_hi);
-        __ Bne(a_hi, b_hi, &compare_done);
-
-        __ Sltu(TMP, a_lo, b_lo);
-
-        __ Bind(&compare_done);
-
-        if (is_min) {
-          if (out_lo != a_lo) {
-            __ Movn(out_hi, a_hi, TMP);
-            __ Movn(out_lo, a_lo, TMP);
-          }
-          if (out_lo != b_lo) {
-            __ Movz(out_hi, b_hi, TMP);
-            __ Movz(out_lo, b_lo, TMP);
-          }
-        } else {
-          if (out_lo != a_lo) {
-            __ Movz(out_hi, a_hi, TMP);
-            __ Movz(out_lo, a_lo, TMP);
-          }
-          if (out_lo != b_lo) {
-            __ Movn(out_hi, b_hi, TMP);
-            __ Movn(out_lo, b_lo, TMP);
-          }
-        }
-      }
-    } else {
-      DCHECK_EQ(type, DataType::Type::kInt32);
-      Register a = locations->InAt(0).AsRegister<Register>();
-      Register b = locations->InAt(1).AsRegister<Register>();
-      Register out = locations->Out().AsRegister<Register>();
-
-      if (a == b) {
-        if (out != a) {
-          __ Move(out, a);
-        }
-      } else {
-        __ Slt(AT, a, b);
-        if (is_min) {
-          if (out != a) {
-            __ Movn(out, a, AT);
-          }
-          if (out != b) {
-            __ Movz(out, b, AT);
-          }
-        } else {
-          if (out != a) {
-            __ Movz(out, a, AT);
-          }
-          if (out != b) {
-            __ Movn(out, b, AT);
-          }
-        }
-      }
-    }
-  }
-}
-
-void InstructionCodeGeneratorMIPS::GenerateMinMaxFP(LocationSummary* locations,
-                                                    bool is_min,
-                                                    bool isR6,
-                                                    DataType::Type type) {
-  FRegister out = locations->Out().AsFpuRegister<FRegister>();
-  FRegister a = locations->InAt(0).AsFpuRegister<FRegister>();
-  FRegister b = locations->InAt(1).AsFpuRegister<FRegister>();
-
-  if (isR6) {
-    MipsLabel noNaNs;
-    MipsLabel done;
-    FRegister ftmp = ((out != a) && (out != b)) ? out : FTMP;
-
-    // When Java computes min/max it prefers a NaN to a number; the
-    // behavior of MIPSR6 is to prefer numbers to NaNs, i.e., if one of
-    // the inputs is a NaN and the other is a valid number, the MIPS
-    // instruction will return the number; Java wants the NaN value
-    // returned. This is why there is extra logic preceding the use of
-    // the MIPS min.fmt/max.fmt instructions. If either a, or b holds a
-    // NaN, return the NaN, otherwise return the min/max.
-    if (type == DataType::Type::kFloat64) {
-      __ CmpUnD(FTMP, a, b);
-      __ Bc1eqz(FTMP, &noNaNs);
-
-      // One of the inputs is a NaN
-      __ CmpEqD(ftmp, a, a);
-      // If a == a then b is the NaN, otherwise a is the NaN.
-      __ SelD(ftmp, a, b);
-
-      if (ftmp != out) {
-        __ MovD(out, ftmp);
-      }
-
-      __ B(&done);
-
-      __ Bind(&noNaNs);
-
-      if (is_min) {
-        __ MinD(out, a, b);
-      } else {
-        __ MaxD(out, a, b);
-      }
-    } else {
-      DCHECK_EQ(type, DataType::Type::kFloat32);
-      __ CmpUnS(FTMP, a, b);
-      __ Bc1eqz(FTMP, &noNaNs);
-
-      // One of the inputs is a NaN
-      __ CmpEqS(ftmp, a, a);
-      // If a == a then b is the NaN, otherwise a is the NaN.
-      __ SelS(ftmp, a, b);
-
-      if (ftmp != out) {
-        __ MovS(out, ftmp);
-      }
-
-      __ B(&done);
-
-      __ Bind(&noNaNs);
-
-      if (is_min) {
-        __ MinS(out, a, b);
-      } else {
-        __ MaxS(out, a, b);
-      }
-    }
-
-    __ Bind(&done);
-
-  } else {  // !isR6
-    MipsLabel ordered;
-    MipsLabel compare;
-    MipsLabel select;
-    MipsLabel done;
-
-    if (type == DataType::Type::kFloat64) {
-      __ CunD(a, b);
-    } else {
-      DCHECK_EQ(type, DataType::Type::kFloat32);
-      __ CunS(a, b);
-    }
-    __ Bc1f(&ordered);
-
-    // a or b (or both) is a NaN. Return one, which is a NaN.
-    if (type == DataType::Type::kFloat64) {
-      __ CeqD(b, b);
-    } else {
-      __ CeqS(b, b);
-    }
-    __ B(&select);
-
-    __ Bind(&ordered);
-
-    // Neither is a NaN.
-    // a == b? (-0.0 compares equal with +0.0)
-    // If equal, handle zeroes, else compare further.
-    if (type == DataType::Type::kFloat64) {
-      __ CeqD(a, b);
-    } else {
-      __ CeqS(a, b);
-    }
-    __ Bc1f(&compare);
-
-    // a == b either bit for bit or one is -0.0 and the other is +0.0.
-    if (type == DataType::Type::kFloat64) {
-      __ MoveFromFpuHigh(TMP, a);
-      __ MoveFromFpuHigh(AT, b);
-    } else {
-      __ Mfc1(TMP, a);
-      __ Mfc1(AT, b);
-    }
-
-    if (is_min) {
-      // -0.0 prevails over +0.0.
-      __ Or(TMP, TMP, AT);
-    } else {
-      // +0.0 prevails over -0.0.
-      __ And(TMP, TMP, AT);
-    }
-
-    if (type == DataType::Type::kFloat64) {
-      __ Mfc1(AT, a);
-      __ Mtc1(AT, out);
-      __ MoveToFpuHigh(TMP, out);
-    } else {
-      __ Mtc1(TMP, out);
-    }
-    __ B(&done);
-
-    __ Bind(&compare);
-
-    if (type == DataType::Type::kFloat64) {
-      if (is_min) {
-        // return (a <= b) ? a : b;
-        __ ColeD(a, b);
-      } else {
-        // return (a >= b) ? a : b;
-        __ ColeD(b, a);  // b <= a
-      }
-    } else {
-      if (is_min) {
-        // return (a <= b) ? a : b;
-        __ ColeS(a, b);
-      } else {
-        // return (a >= b) ? a : b;
-        __ ColeS(b, a);  // b <= a
-      }
-    }
-
-    __ Bind(&select);
-
-    if (type == DataType::Type::kFloat64) {
-      __ MovtD(out, a);
-      __ MovfD(out, b);
-    } else {
-      __ MovtS(out, a);
-      __ MovfS(out, b);
-    }
-
-    __ Bind(&done);
-  }
-}
-
-void InstructionCodeGeneratorMIPS::GenerateMinMax(HBinaryOperation* minmax, bool is_min) {
-  bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
-  DataType::Type type = minmax->GetResultType();
-  switch (type) {
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-      GenerateMinMaxInt(minmax->GetLocations(), is_min, isR6, type);
-      break;
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      GenerateMinMaxFP(minmax->GetLocations(), is_min, isR6, type);
-      break;
-    default:
-      LOG(FATAL) << "Unexpected type for HMinMax " << type;
-  }
-}
-
-void LocationsBuilderMIPS::VisitMin(HMin* min) {
-  CreateMinMaxLocations(GetGraph()->GetAllocator(), min);
-}
-
-void InstructionCodeGeneratorMIPS::VisitMin(HMin* min) {
-  GenerateMinMax(min, /*is_min*/ true);
-}
-
-void LocationsBuilderMIPS::VisitMax(HMax* max) {
-  CreateMinMaxLocations(GetGraph()->GetAllocator(), max);
-}
-
-void InstructionCodeGeneratorMIPS::VisitMax(HMax* max) {
-  GenerateMinMax(max, /*is_min*/ false);
-}
-
-void LocationsBuilderMIPS::VisitAbs(HAbs* abs) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(abs);
-  switch (abs->GetResultType()) {
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-      break;
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
-      break;
-    default:
-      LOG(FATAL) << "Unexpected abs type " << abs->GetResultType();
-  }
-}
-
-void InstructionCodeGeneratorMIPS::GenerateAbsFP(LocationSummary* locations,
-                                                 DataType::Type type,
-                                                 bool isR2OrNewer,
-                                                 bool isR6) {
-  FRegister in = locations->InAt(0).AsFpuRegister<FRegister>();
-  FRegister out = locations->Out().AsFpuRegister<FRegister>();
-
-  // Note, as a "quality of implementation", rather than pure "spec compliance", we require that
-  // Math.abs() clears the sign bit (but changes nothing else) for all numbers, including NaN
-  // (signaling NaN may become quiet though).
-  //
-  // The ABS.fmt instructions (abs.s and abs.d) do exactly that when NAN2008=1 (R6). For this case,
-  // both regular floating point numbers and NAN values are treated alike, only the sign bit is
-  // affected by this instruction.
-  // But when NAN2008=0 (R2 and before), the ABS.fmt instructions can't be used. For this case, any
-  // NaN operand signals invalid operation. This means that other bits (not just sign bit) might be
-  // changed when doing abs(NaN). Because of that, we clear sign bit in a different way.
-  if (isR6) {
-    if (type == DataType::Type::kFloat64) {
-      __ AbsD(out, in);
-    } else {
-      DCHECK_EQ(type, DataType::Type::kFloat32);
-      __ AbsS(out, in);
-    }
-  } else {
-    if (type == DataType::Type::kFloat64) {
-      if (in != out) {
-        __ MovD(out, in);
-      }
-      __ MoveFromFpuHigh(TMP, in);
-      // ins instruction is not available for R1.
-      if (isR2OrNewer) {
-        __ Ins(TMP, ZERO, 31, 1);
-      } else {
-        __ Sll(TMP, TMP, 1);
-        __ Srl(TMP, TMP, 1);
-      }
-      __ MoveToFpuHigh(TMP, out);
-    } else {
-      DCHECK_EQ(type, DataType::Type::kFloat32);
-      __ Mfc1(TMP, in);
-      // ins instruction is not available for R1.
-      if (isR2OrNewer) {
-        __ Ins(TMP, ZERO, 31, 1);
-      } else {
-        __ Sll(TMP, TMP, 1);
-        __ Srl(TMP, TMP, 1);
-      }
-      __ Mtc1(TMP, out);
-    }
-  }
-}
-
-void InstructionCodeGeneratorMIPS::VisitAbs(HAbs* abs) {
-  LocationSummary* locations = abs->GetLocations();
-  bool isR2OrNewer = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
-  bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
-  switch (abs->GetResultType()) {
-    case DataType::Type::kInt32: {
-      Register in = locations->InAt(0).AsRegister<Register>();
-      Register out = locations->Out().AsRegister<Register>();
-      __ Sra(AT, in, 31);
-      __ Xor(out, in, AT);
-      __ Subu(out, out, AT);
-      break;
-    }
-    case DataType::Type::kInt64: {
-      Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>();
-      Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
-      Register out_lo = locations->Out().AsRegisterPairLow<Register>();
-      Register out_hi = locations->Out().AsRegisterPairHigh<Register>();
-      // The comments in this section show the analogous operations which would
-      // be performed if we had 64-bit registers "in", and "out".
-      // __ Dsra32(AT, in, 31);
-      __ Sra(AT, in_hi, 31);
-      // __ Xor(out, in, AT);
-      __ Xor(TMP, in_lo, AT);
-      __ Xor(out_hi, in_hi, AT);
-      // __ Dsubu(out, out, AT);
-      __ Subu(out_lo, TMP, AT);
-      __ Sltu(TMP, out_lo, TMP);
-      __ Addu(out_hi, out_hi, TMP);
-      break;
-    }
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      GenerateAbsFP(locations, abs->GetResultType(), isR2OrNewer, isR6);
-      break;
-    default:
-      LOG(FATAL) << "Unexpected abs type " << abs->GetResultType();
-  }
-}
-
-void LocationsBuilderMIPS::VisitConstructorFence(HConstructorFence* constructor_fence) {
-  constructor_fence->SetLocations(nullptr);
-}
-
-void InstructionCodeGeneratorMIPS::VisitConstructorFence(
-    HConstructorFence* constructor_fence ATTRIBUTE_UNUSED) {
-  GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
-}
-
-void LocationsBuilderMIPS::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
-  memory_barrier->SetLocations(nullptr);
-}
-
-void InstructionCodeGeneratorMIPS::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
-  GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
-}
-
-void LocationsBuilderMIPS::VisitReturn(HReturn* ret) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(ret);
-  DataType::Type return_type = ret->InputAt(0)->GetType();
-  locations->SetInAt(0, MipsReturnLocation(return_type));
-}
-
-void InstructionCodeGeneratorMIPS::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
-  codegen_->GenerateFrameExit();
-}
-
-void LocationsBuilderMIPS::VisitReturnVoid(HReturnVoid* ret) {
-  ret->SetLocations(nullptr);
-}
-
-void InstructionCodeGeneratorMIPS::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
-  codegen_->GenerateFrameExit();
-}
-
-void LocationsBuilderMIPS::VisitRor(HRor* ror) {
-  HandleShift(ror);
-}
-
-void InstructionCodeGeneratorMIPS::VisitRor(HRor* ror) {
-  HandleShift(ror);
-}
-
-void LocationsBuilderMIPS::VisitShl(HShl* shl) {
-  HandleShift(shl);
-}
-
-void InstructionCodeGeneratorMIPS::VisitShl(HShl* shl) {
-  HandleShift(shl);
-}
-
-void LocationsBuilderMIPS::VisitShr(HShr* shr) {
-  HandleShift(shr);
-}
-
-void InstructionCodeGeneratorMIPS::VisitShr(HShr* shr) {
-  HandleShift(shr);
-}
-
-void LocationsBuilderMIPS::VisitSub(HSub* instruction) {
-  HandleBinaryOp(instruction);
-}
-
-void InstructionCodeGeneratorMIPS::VisitSub(HSub* instruction) {
-  HandleBinaryOp(instruction);
-}
-
-void LocationsBuilderMIPS::VisitStaticFieldGet(HStaticFieldGet* instruction) {
-  HandleFieldGet(instruction, instruction->GetFieldInfo());
-}
-
-void InstructionCodeGeneratorMIPS::VisitStaticFieldGet(HStaticFieldGet* instruction) {
-  HandleFieldGet(instruction, instruction->GetFieldInfo(), instruction->GetDexPc());
-}
-
-void LocationsBuilderMIPS::VisitStaticFieldSet(HStaticFieldSet* instruction) {
-  HandleFieldSet(instruction, instruction->GetFieldInfo());
-}
-
-void InstructionCodeGeneratorMIPS::VisitStaticFieldSet(HStaticFieldSet* instruction) {
-  HandleFieldSet(instruction,
-                 instruction->GetFieldInfo(),
-                 instruction->GetDexPc(),
-                 instruction->GetValueCanBeNull());
-}
-
-void LocationsBuilderMIPS::VisitUnresolvedInstanceFieldGet(
-    HUnresolvedInstanceFieldGet* instruction) {
-  FieldAccessCallingConventionMIPS calling_convention;
-  codegen_->CreateUnresolvedFieldLocationSummary(instruction,
-                                                 instruction->GetFieldType(),
-                                                 calling_convention);
-}
-
-void InstructionCodeGeneratorMIPS::VisitUnresolvedInstanceFieldGet(
-    HUnresolvedInstanceFieldGet* instruction) {
-  FieldAccessCallingConventionMIPS calling_convention;
-  codegen_->GenerateUnresolvedFieldAccess(instruction,
-                                          instruction->GetFieldType(),
-                                          instruction->GetFieldIndex(),
-                                          instruction->GetDexPc(),
-                                          calling_convention);
-}
-
-void LocationsBuilderMIPS::VisitUnresolvedInstanceFieldSet(
-    HUnresolvedInstanceFieldSet* instruction) {
-  FieldAccessCallingConventionMIPS calling_convention;
-  codegen_->CreateUnresolvedFieldLocationSummary(instruction,
-                                                 instruction->GetFieldType(),
-                                                 calling_convention);
-}
-
-void InstructionCodeGeneratorMIPS::VisitUnresolvedInstanceFieldSet(
-    HUnresolvedInstanceFieldSet* instruction) {
-  FieldAccessCallingConventionMIPS calling_convention;
-  codegen_->GenerateUnresolvedFieldAccess(instruction,
-                                          instruction->GetFieldType(),
-                                          instruction->GetFieldIndex(),
-                                          instruction->GetDexPc(),
-                                          calling_convention);
-}
-
-void LocationsBuilderMIPS::VisitUnresolvedStaticFieldGet(
-    HUnresolvedStaticFieldGet* instruction) {
-  FieldAccessCallingConventionMIPS calling_convention;
-  codegen_->CreateUnresolvedFieldLocationSummary(instruction,
-                                                 instruction->GetFieldType(),
-                                                 calling_convention);
-}
-
-void InstructionCodeGeneratorMIPS::VisitUnresolvedStaticFieldGet(
-    HUnresolvedStaticFieldGet* instruction) {
-  FieldAccessCallingConventionMIPS calling_convention;
-  codegen_->GenerateUnresolvedFieldAccess(instruction,
-                                          instruction->GetFieldType(),
-                                          instruction->GetFieldIndex(),
-                                          instruction->GetDexPc(),
-                                          calling_convention);
-}
-
-void LocationsBuilderMIPS::VisitUnresolvedStaticFieldSet(
-    HUnresolvedStaticFieldSet* instruction) {
-  FieldAccessCallingConventionMIPS calling_convention;
-  codegen_->CreateUnresolvedFieldLocationSummary(instruction,
-                                                 instruction->GetFieldType(),
-                                                 calling_convention);
-}
-
-void InstructionCodeGeneratorMIPS::VisitUnresolvedStaticFieldSet(
-    HUnresolvedStaticFieldSet* instruction) {
-  FieldAccessCallingConventionMIPS calling_convention;
-  codegen_->GenerateUnresolvedFieldAccess(instruction,
-                                          instruction->GetFieldType(),
-                                          instruction->GetFieldIndex(),
-                                          instruction->GetDexPc(),
-                                          calling_convention);
-}
-
-void LocationsBuilderMIPS::VisitSuspendCheck(HSuspendCheck* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
-      instruction, LocationSummary::kCallOnSlowPath);
-  // In suspend check slow path, usually there are no caller-save registers at all.
-  // If SIMD instructions are present, however, we force spilling all live SIMD
-  // registers in full width (since the runtime only saves/restores lower part).
-  locations->SetCustomSlowPathCallerSaves(
-      GetGraph()->HasSIMD() ? RegisterSet::AllFpu() : RegisterSet::Empty());
-}
-
-void InstructionCodeGeneratorMIPS::VisitSuspendCheck(HSuspendCheck* instruction) {
-  HBasicBlock* block = instruction->GetBlock();
-  if (block->GetLoopInformation() != nullptr) {
-    DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
-    // The back edge will generate the suspend check.
-    return;
-  }
-  if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
-    // The goto will generate the suspend check.
-    return;
-  }
-  GenerateSuspendCheck(instruction, nullptr);
-}
-
-void LocationsBuilderMIPS::VisitThrow(HThrow* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
-      instruction, LocationSummary::kCallOnMainOnly);
-  InvokeRuntimeCallingConvention calling_convention;
-  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-}
-
-void InstructionCodeGeneratorMIPS::VisitThrow(HThrow* instruction) {
-  codegen_->InvokeRuntime(kQuickDeliverException, instruction, instruction->GetDexPc());
-  CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
-}
-
-void LocationsBuilderMIPS::VisitTypeConversion(HTypeConversion* conversion) {
-  DataType::Type input_type = conversion->GetInputType();
-  DataType::Type result_type = conversion->GetResultType();
-  DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type))
-      << input_type << " -> " << result_type;
-  bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
-
-  if ((input_type == DataType::Type::kReference) || (input_type == DataType::Type::kVoid) ||
-      (result_type == DataType::Type::kReference) || (result_type == DataType::Type::kVoid)) {
-    LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
-  }
-
-  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
-  if (!isR6 &&
-      ((DataType::IsFloatingPointType(result_type) && input_type == DataType::Type::kInt64) ||
-       (result_type == DataType::Type::kInt64 && DataType::IsFloatingPointType(input_type)))) {
-    call_kind = LocationSummary::kCallOnMainOnly;
-  }
-
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(conversion, call_kind);
-
-  if (call_kind == LocationSummary::kNoCall) {
-    if (DataType::IsFloatingPointType(input_type)) {
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-    } else {
-      locations->SetInAt(0, Location::RequiresRegister());
-    }
-
-    if (DataType::IsFloatingPointType(result_type)) {
-      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
-    } else {
-      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-    }
-  } else {
-    InvokeRuntimeCallingConvention calling_convention;
-
-    if (DataType::IsFloatingPointType(input_type)) {
-      locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
-    } else {
-      DCHECK_EQ(input_type, DataType::Type::kInt64);
-      locations->SetInAt(0, Location::RegisterPairLocation(
-                 calling_convention.GetRegisterAt(0), calling_convention.GetRegisterAt(1)));
-    }
-
-    locations->SetOut(calling_convention.GetReturnLocation(result_type));
-  }
-}
-
-void InstructionCodeGeneratorMIPS::VisitTypeConversion(HTypeConversion* conversion) {
-  LocationSummary* locations = conversion->GetLocations();
-  DataType::Type result_type = conversion->GetResultType();
-  DataType::Type input_type = conversion->GetInputType();
-  bool has_sign_extension = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
-  bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
-
-  DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type))
-      << input_type << " -> " << result_type;
-
-  if (result_type == DataType::Type::kInt64 && DataType::IsIntegralType(input_type)) {
-    Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
-    Register dst_low = locations->Out().AsRegisterPairLow<Register>();
-    Register src = locations->InAt(0).AsRegister<Register>();
-
-    if (dst_low != src) {
-      __ Move(dst_low, src);
-    }
-    __ Sra(dst_high, src, 31);
-  } else if (DataType::IsIntegralType(result_type) && DataType::IsIntegralType(input_type)) {
-    Register dst = locations->Out().AsRegister<Register>();
-    Register src = (input_type == DataType::Type::kInt64)
-        ? locations->InAt(0).AsRegisterPairLow<Register>()
-        : locations->InAt(0).AsRegister<Register>();
-
-    switch (result_type) {
-      case DataType::Type::kUint8:
-        __ Andi(dst, src, 0xFF);
-        break;
-      case DataType::Type::kInt8:
-        if (has_sign_extension) {
-          __ Seb(dst, src);
-        } else {
-          __ Sll(dst, src, 24);
-          __ Sra(dst, dst, 24);
-        }
-        break;
-      case DataType::Type::kUint16:
-        __ Andi(dst, src, 0xFFFF);
-        break;
-      case DataType::Type::kInt16:
-        if (has_sign_extension) {
-          __ Seh(dst, src);
-        } else {
-          __ Sll(dst, src, 16);
-          __ Sra(dst, dst, 16);
-        }
-        break;
-      case DataType::Type::kInt32:
-        if (dst != src) {
-          __ Move(dst, src);
-        }
-        break;
-
-      default:
-        LOG(FATAL) << "Unexpected type conversion from " << input_type
-                   << " to " << result_type;
-    }
-  } else if (DataType::IsFloatingPointType(result_type) && DataType::IsIntegralType(input_type)) {
-    if (input_type == DataType::Type::kInt64) {
-      if (isR6) {
-        // cvt.s.l/cvt.d.l requires MIPSR2+ with FR=1. MIPS32R6 is implemented as a secondary
-        // architecture on top of MIPS64R6, which has FR=1, and therefore can use the instruction.
-        Register src_high = locations->InAt(0).AsRegisterPairHigh<Register>();
-        Register src_low = locations->InAt(0).AsRegisterPairLow<Register>();
-        FRegister dst = locations->Out().AsFpuRegister<FRegister>();
-        __ Mtc1(src_low, FTMP);
-        __ Mthc1(src_high, FTMP);
-        if (result_type == DataType::Type::kFloat32) {
-          __ Cvtsl(dst, FTMP);
-        } else {
-          __ Cvtdl(dst, FTMP);
-        }
-      } else {
-        QuickEntrypointEnum entrypoint =
-            (result_type == DataType::Type::kFloat32) ? kQuickL2f : kQuickL2d;
-        codegen_->InvokeRuntime(entrypoint, conversion, conversion->GetDexPc());
-        if (result_type == DataType::Type::kFloat32) {
-          CheckEntrypointTypes<kQuickL2f, float, int64_t>();
-        } else {
-          CheckEntrypointTypes<kQuickL2d, double, int64_t>();
-        }
-      }
-    } else {
-      Register src = locations->InAt(0).AsRegister<Register>();
-      FRegister dst = locations->Out().AsFpuRegister<FRegister>();
-      __ Mtc1(src, FTMP);
-      if (result_type == DataType::Type::kFloat32) {
-        __ Cvtsw(dst, FTMP);
-      } else {
-        __ Cvtdw(dst, FTMP);
-      }
-    }
-  } else if (DataType::IsIntegralType(result_type) && DataType::IsFloatingPointType(input_type)) {
-    CHECK(result_type == DataType::Type::kInt32 || result_type == DataType::Type::kInt64);
-
-    // When NAN2008=1 (R6), the truncate instruction caps the output at the minimum/maximum
-    // value of the output type if the input is outside of the range after the truncation or
-    // produces 0 when the input is a NaN. IOW, the three special cases produce three distinct
-    // results. This matches the desired float/double-to-int/long conversion exactly.
-    //
-    // When NAN2008=0 (R2 and before), the truncate instruction produces the maximum positive
-    // value when the input is either a NaN or is outside of the range of the output type
-    // after the truncation. IOW, the three special cases (NaN, too small, too big) produce
-    // the same result.
-    //
-    // The code takes care of the different behaviors by first comparing the input to the
-    // minimum output value (-2**-63 for truncating to long, -2**-31 for truncating to int).
-    // If the input is greater than or equal to the minimum, it procedes to the truncate
-    // instruction, which will handle such an input the same way irrespective of NAN2008.
-    // Otherwise the input is compared to itself to determine whether it is a NaN or not
-    // in order to return either zero or the minimum value.
-    if (result_type == DataType::Type::kInt64) {
-      if (isR6) {
-        // trunc.l.s/trunc.l.d requires MIPSR2+ with FR=1. MIPS32R6 is implemented as a secondary
-        // architecture on top of MIPS64R6, which has FR=1, and therefore can use the instruction.
-        FRegister src = locations->InAt(0).AsFpuRegister<FRegister>();
-        Register dst_high = locations->Out().AsRegisterPairHigh<Register>();
-        Register dst_low = locations->Out().AsRegisterPairLow<Register>();
-
-        if (input_type == DataType::Type::kFloat32) {
-          __ TruncLS(FTMP, src);
-        } else {
-          __ TruncLD(FTMP, src);
-        }
-        __ Mfc1(dst_low, FTMP);
-        __ Mfhc1(dst_high, FTMP);
-      } else {
-        QuickEntrypointEnum entrypoint =
-            (input_type == DataType::Type::kFloat32) ? kQuickF2l : kQuickD2l;
-        codegen_->InvokeRuntime(entrypoint, conversion, conversion->GetDexPc());
-        if (input_type == DataType::Type::kFloat32) {
-          CheckEntrypointTypes<kQuickF2l, int64_t, float>();
-        } else {
-          CheckEntrypointTypes<kQuickD2l, int64_t, double>();
-        }
-      }
-    } else {
-      FRegister src = locations->InAt(0).AsFpuRegister<FRegister>();
-      Register dst = locations->Out().AsRegister<Register>();
-      MipsLabel truncate;
-      MipsLabel done;
-
-      if (!isR6) {
-        if (input_type == DataType::Type::kFloat32) {
-          uint32_t min_val = bit_cast<uint32_t, float>(std::numeric_limits<int32_t>::min());
-          __ LoadConst32(TMP, min_val);
-          __ Mtc1(TMP, FTMP);
-        } else {
-          uint64_t min_val = bit_cast<uint64_t, double>(std::numeric_limits<int32_t>::min());
-          __ LoadConst32(TMP, High32Bits(min_val));
-          __ Mtc1(ZERO, FTMP);
-          __ MoveToFpuHigh(TMP, FTMP);
-        }
-
-        if (input_type == DataType::Type::kFloat32) {
-          __ ColeS(0, FTMP, src);
-        } else {
-          __ ColeD(0, FTMP, src);
-        }
-        __ Bc1t(0, &truncate);
-
-        if (input_type == DataType::Type::kFloat32) {
-          __ CeqS(0, src, src);
-        } else {
-          __ CeqD(0, src, src);
-        }
-        __ LoadConst32(dst, std::numeric_limits<int32_t>::min());
-        __ Movf(dst, ZERO, 0);
-
-        __ B(&done);
-
-        __ Bind(&truncate);
-      }
-
-      if (input_type == DataType::Type::kFloat32) {
-        __ TruncWS(FTMP, src);
-      } else {
-        __ TruncWD(FTMP, src);
-      }
-      __ Mfc1(dst, FTMP);
-
-      if (!isR6) {
-        __ Bind(&done);
-      }
-    }
-  } else if (DataType::IsFloatingPointType(result_type) &&
-             DataType::IsFloatingPointType(input_type)) {
-    FRegister dst = locations->Out().AsFpuRegister<FRegister>();
-    FRegister src = locations->InAt(0).AsFpuRegister<FRegister>();
-    if (result_type == DataType::Type::kFloat32) {
-      __ Cvtsd(dst, src);
-    } else {
-      __ Cvtds(dst, src);
-    }
-  } else {
-    LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
-                << " to " << result_type;
-  }
-}
-
-void LocationsBuilderMIPS::VisitUShr(HUShr* ushr) {
-  HandleShift(ushr);
-}
-
-void InstructionCodeGeneratorMIPS::VisitUShr(HUShr* ushr) {
-  HandleShift(ushr);
-}
-
-void LocationsBuilderMIPS::VisitXor(HXor* instruction) {
-  HandleBinaryOp(instruction);
-}
-
-void InstructionCodeGeneratorMIPS::VisitXor(HXor* instruction) {
-  HandleBinaryOp(instruction);
-}
-
-void LocationsBuilderMIPS::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
-  // Nothing to do, this should be removed during prepare for register allocator.
-  LOG(FATAL) << "Unreachable";
-}
-
-void InstructionCodeGeneratorMIPS::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
-  // Nothing to do, this should be removed during prepare for register allocator.
-  LOG(FATAL) << "Unreachable";
-}
-
-void LocationsBuilderMIPS::VisitEqual(HEqual* comp) {
-  HandleCondition(comp);
-}
-
-void InstructionCodeGeneratorMIPS::VisitEqual(HEqual* comp) {
-  HandleCondition(comp);
-}
-
-void LocationsBuilderMIPS::VisitNotEqual(HNotEqual* comp) {
-  HandleCondition(comp);
-}
-
-void InstructionCodeGeneratorMIPS::VisitNotEqual(HNotEqual* comp) {
-  HandleCondition(comp);
-}
-
-void LocationsBuilderMIPS::VisitLessThan(HLessThan* comp) {
-  HandleCondition(comp);
-}
-
-void InstructionCodeGeneratorMIPS::VisitLessThan(HLessThan* comp) {
-  HandleCondition(comp);
-}
-
-void LocationsBuilderMIPS::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
-  HandleCondition(comp);
-}
-
-void InstructionCodeGeneratorMIPS::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
-  HandleCondition(comp);
-}
-
-void LocationsBuilderMIPS::VisitGreaterThan(HGreaterThan* comp) {
-  HandleCondition(comp);
-}
-
-void InstructionCodeGeneratorMIPS::VisitGreaterThan(HGreaterThan* comp) {
-  HandleCondition(comp);
-}
-
-void LocationsBuilderMIPS::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
-  HandleCondition(comp);
-}
-
-void InstructionCodeGeneratorMIPS::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
-  HandleCondition(comp);
-}
-
-void LocationsBuilderMIPS::VisitBelow(HBelow* comp) {
-  HandleCondition(comp);
-}
-
-void InstructionCodeGeneratorMIPS::VisitBelow(HBelow* comp) {
-  HandleCondition(comp);
-}
-
-void LocationsBuilderMIPS::VisitBelowOrEqual(HBelowOrEqual* comp) {
-  HandleCondition(comp);
-}
-
-void InstructionCodeGeneratorMIPS::VisitBelowOrEqual(HBelowOrEqual* comp) {
-  HandleCondition(comp);
-}
-
-void LocationsBuilderMIPS::VisitAbove(HAbove* comp) {
-  HandleCondition(comp);
-}
-
-void InstructionCodeGeneratorMIPS::VisitAbove(HAbove* comp) {
-  HandleCondition(comp);
-}
-
-void LocationsBuilderMIPS::VisitAboveOrEqual(HAboveOrEqual* comp) {
-  HandleCondition(comp);
-}
-
-void InstructionCodeGeneratorMIPS::VisitAboveOrEqual(HAboveOrEqual* comp) {
-  HandleCondition(comp);
-}
-
-void LocationsBuilderMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) {
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
-  locations->SetInAt(0, Location::RequiresRegister());
-  if (!codegen_->GetInstructionSetFeatures().IsR6()) {
-    uint32_t num_entries = switch_instr->GetNumEntries();
-    if (num_entries > InstructionCodeGeneratorMIPS::kPackedSwitchJumpTableThreshold) {
-      // When there's no HMipsComputeBaseMethodAddress input, R2 uses the NAL
-      // instruction to simulate PC-relative addressing when accessing the jump table.
-      // NAL clobbers RA. Make sure RA is preserved.
-      codegen_->ClobberRA();
-    }
-  }
-}
-
-void InstructionCodeGeneratorMIPS::GenPackedSwitchWithCompares(Register value_reg,
-                                                               int32_t lower_bound,
-                                                               uint32_t num_entries,
-                                                               HBasicBlock* switch_block,
-                                                               HBasicBlock* default_block) {
-  // Create a set of compare/jumps.
-  Register temp_reg = TMP;
-  __ Addiu32(temp_reg, value_reg, -lower_bound);
-  // Jump to default if index is negative
-  // Note: We don't check the case that index is positive while value < lower_bound, because in
-  // this case, index >= num_entries must be true. So that we can save one branch instruction.
-  __ Bltz(temp_reg, codegen_->GetLabelOf(default_block));
-
-  const ArenaVector<HBasicBlock*>& successors = switch_block->GetSuccessors();
-  // Jump to successors[0] if value == lower_bound.
-  __ Beqz(temp_reg, codegen_->GetLabelOf(successors[0]));
-  int32_t last_index = 0;
-  for (; num_entries - last_index > 2; last_index += 2) {
-    __ Addiu(temp_reg, temp_reg, -2);
-    // Jump to successors[last_index + 1] if value < case_value[last_index + 2].
-    __ Bltz(temp_reg, codegen_->GetLabelOf(successors[last_index + 1]));
-    // Jump to successors[last_index + 2] if value == case_value[last_index + 2].
-    __ Beqz(temp_reg, codegen_->GetLabelOf(successors[last_index + 2]));
-  }
-  if (num_entries - last_index == 2) {
-    // The last missing case_value.
-    __ Addiu(temp_reg, temp_reg, -1);
-    __ Beqz(temp_reg, codegen_->GetLabelOf(successors[last_index + 1]));
-  }
-
-  // And the default for any other value.
-  if (!codegen_->GoesToNextBlock(switch_block, default_block)) {
-    __ B(codegen_->GetLabelOf(default_block));
-  }
-}
-
-void InstructionCodeGeneratorMIPS::GenTableBasedPackedSwitch(Register value_reg,
-                                                             Register constant_area,
-                                                             int32_t lower_bound,
-                                                             uint32_t num_entries,
-                                                             HBasicBlock* switch_block,
-                                                             HBasicBlock* default_block) {
-  // Create a jump table.
-  std::vector<MipsLabel*> labels(num_entries);
-  const ArenaVector<HBasicBlock*>& successors = switch_block->GetSuccessors();
-  for (uint32_t i = 0; i < num_entries; i++) {
-    labels[i] = codegen_->GetLabelOf(successors[i]);
-  }
-  JumpTable* table = __ CreateJumpTable(std::move(labels));
-
-  // Is the value in range?
-  __ Addiu32(TMP, value_reg, -lower_bound);
-  if (IsInt<16>(static_cast<int32_t>(num_entries))) {
-    __ Sltiu(AT, TMP, num_entries);
-    __ Beqz(AT, codegen_->GetLabelOf(default_block));
-  } else {
-    __ LoadConst32(AT, num_entries);
-    __ Bgeu(TMP, AT, codegen_->GetLabelOf(default_block));
-  }
-
-  // We are in the range of the table.
-  // Load the target address from the jump table, indexing by the value.
-  __ LoadLabelAddress(AT, constant_area, table->GetLabel());
-  __ ShiftAndAdd(TMP, TMP, AT, 2, TMP);
-  __ Lw(TMP, TMP, 0);
-  // Compute the absolute target address by adding the table start address
-  // (the table contains offsets to targets relative to its start).
-  __ Addu(TMP, TMP, AT);
-  // And jump.
-  __ Jr(TMP);
-  __ NopIfNoReordering();
-}
-
-void InstructionCodeGeneratorMIPS::VisitPackedSwitch(HPackedSwitch* switch_instr) {
-  int32_t lower_bound = switch_instr->GetStartValue();
-  uint32_t num_entries = switch_instr->GetNumEntries();
-  LocationSummary* locations = switch_instr->GetLocations();
-  Register value_reg = locations->InAt(0).AsRegister<Register>();
-  HBasicBlock* switch_block = switch_instr->GetBlock();
-  HBasicBlock* default_block = switch_instr->GetDefaultBlock();
-
-  if (num_entries > kPackedSwitchJumpTableThreshold) {
-    // R6 uses PC-relative addressing to access the jump table.
-    //
-    // R2, OTOH, uses an HMipsComputeBaseMethodAddress input (when available)
-    // to access the jump table and it is implemented by changing HPackedSwitch to
-    // HMipsPackedSwitch, which bears HMipsComputeBaseMethodAddress (see
-    // VisitMipsPackedSwitch()).
-    //
-    // When there's no HMipsComputeBaseMethodAddress input (e.g. in presence of
-    // irreducible loops), R2 uses the NAL instruction to simulate PC-relative
-    // addressing.
-    GenTableBasedPackedSwitch(value_reg,
-                              ZERO,
-                              lower_bound,
-                              num_entries,
-                              switch_block,
-                              default_block);
-  } else {
-    GenPackedSwitchWithCompares(value_reg,
-                                lower_bound,
-                                num_entries,
-                                switch_block,
-                                default_block);
-  }
-}
-
-void LocationsBuilderMIPS::VisitMipsPackedSwitch(HMipsPackedSwitch* switch_instr) {
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
-  locations->SetInAt(0, Location::RequiresRegister());
-  // Constant area pointer (HMipsComputeBaseMethodAddress).
-  locations->SetInAt(1, Location::RequiresRegister());
-}
-
-void InstructionCodeGeneratorMIPS::VisitMipsPackedSwitch(HMipsPackedSwitch* switch_instr) {
-  int32_t lower_bound = switch_instr->GetStartValue();
-  uint32_t num_entries = switch_instr->GetNumEntries();
-  LocationSummary* locations = switch_instr->GetLocations();
-  Register value_reg = locations->InAt(0).AsRegister<Register>();
-  Register constant_area = locations->InAt(1).AsRegister<Register>();
-  HBasicBlock* switch_block = switch_instr->GetBlock();
-  HBasicBlock* default_block = switch_instr->GetDefaultBlock();
-
-  // This is an R2-only path. HPackedSwitch has been changed to
-  // HMipsPackedSwitch, which bears HMipsComputeBaseMethodAddress
-  // required to address the jump table relative to PC.
-  GenTableBasedPackedSwitch(value_reg,
-                            constant_area,
-                            lower_bound,
-                            num_entries,
-                            switch_block,
-                            default_block);
-}
-
-void LocationsBuilderMIPS::VisitMipsComputeBaseMethodAddress(
-    HMipsComputeBaseMethodAddress* insn) {
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(insn, LocationSummary::kNoCall);
-  locations->SetOut(Location::RequiresRegister());
-}
-
-void InstructionCodeGeneratorMIPS::VisitMipsComputeBaseMethodAddress(
-    HMipsComputeBaseMethodAddress* insn) {
-  LocationSummary* locations = insn->GetLocations();
-  Register reg = locations->Out().AsRegister<Register>();
-
-  CHECK(!codegen_->GetInstructionSetFeatures().IsR6());
-
-  // Generate a dummy PC-relative call to obtain PC.
-  __ Nal();
-  // Grab the return address off RA.
-  __ Move(reg, RA);
-
-  // Remember this offset (the obtained PC value) for later use with constant area.
-  __ BindPcRelBaseLabel();
-}
-
-void LocationsBuilderMIPS::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
-  // The trampoline uses the same calling convention as dex calling conventions,
-  // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
-  // the method_idx.
-  HandleInvoke(invoke);
-}
-
-void InstructionCodeGeneratorMIPS::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
-  codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
-}
-
-void LocationsBuilderMIPS::VisitClassTableGet(HClassTableGet* instruction) {
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister());
-}
-
-void InstructionCodeGeneratorMIPS::VisitClassTableGet(HClassTableGet* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  if (instruction->GetTableKind() == HClassTableGet::TableKind::kVTable) {
-    uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
-        instruction->GetIndex(), kMipsPointerSize).SizeValue();
-    __ LoadFromOffset(kLoadWord,
-                      locations->Out().AsRegister<Register>(),
-                      locations->InAt(0).AsRegister<Register>(),
-                      method_offset);
-  } else {
-    uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
-        instruction->GetIndex(), kMipsPointerSize));
-    __ LoadFromOffset(kLoadWord,
-                      locations->Out().AsRegister<Register>(),
-                      locations->InAt(0).AsRegister<Register>(),
-                      mirror::Class::ImtPtrOffset(kMipsPointerSize).Uint32Value());
-    __ LoadFromOffset(kLoadWord,
-                      locations->Out().AsRegister<Register>(),
-                      locations->Out().AsRegister<Register>(),
-                      method_offset);
-  }
-}
-
-void LocationsBuilderMIPS::VisitIntermediateAddress(HIntermediateAddress* instruction
-                                                    ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unreachable";
-}
-
-void InstructionCodeGeneratorMIPS::VisitIntermediateAddress(HIntermediateAddress* instruction
-                                                            ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unreachable";
-}
-
-#undef __
-#undef QUICK_ENTRY_POINT
-
-}  // namespace mips
-}  // namespace art
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
deleted file mode 100644
index 5080731..0000000
--- a/compiler/optimizing/code_generator_mips.h
+++ /dev/null
@@ -1,732 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS_H_
-#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS_H_
-
-#include "code_generator.h"
-#include "dex/dex_file_types.h"
-#include "dex/string_reference.h"
-#include "dex/type_reference.h"
-#include "driver/compiler_options.h"
-#include "nodes.h"
-#include "parallel_move_resolver.h"
-#include "utils/mips/assembler_mips.h"
-
-namespace art {
-namespace mips {
-
-// InvokeDexCallingConvention registers
-
-static constexpr Register kParameterCoreRegisters[] =
-    { A1, A2, A3, T0, T1 };
-static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
-
-static constexpr FRegister kParameterFpuRegisters[] =
-    { F8, F10, F12, F14, F16, F18 };
-static constexpr size_t kParameterFpuRegistersLength = arraysize(kParameterFpuRegisters);
-
-
-// InvokeRuntimeCallingConvention registers
-
-static constexpr Register kRuntimeParameterCoreRegisters[] =
-    { A0, A1, A2, A3 };
-static constexpr size_t kRuntimeParameterCoreRegistersLength =
-    arraysize(kRuntimeParameterCoreRegisters);
-
-static constexpr FRegister kRuntimeParameterFpuRegisters[] =
-    { F12, F14 };
-static constexpr size_t kRuntimeParameterFpuRegistersLength =
-    arraysize(kRuntimeParameterFpuRegisters);
-
-
-static constexpr Register kCoreCalleeSaves[] =
-    { S0, S1, S2, S3, S4, S5, S6, S7, FP, RA };
-static constexpr FRegister kFpuCalleeSaves[] =
-    { F20, F22, F24, F26, F28, F30 };
-
-
-class CodeGeneratorMIPS;
-
-VectorRegister VectorRegisterFrom(Location location);
-
-class InvokeDexCallingConvention : public CallingConvention<Register, FRegister> {
- public:
-  InvokeDexCallingConvention()
-      : CallingConvention(kParameterCoreRegisters,
-                          kParameterCoreRegistersLength,
-                          kParameterFpuRegisters,
-                          kParameterFpuRegistersLength,
-                          kMipsPointerSize) {}
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
-};
-
-class InvokeDexCallingConventionVisitorMIPS : public InvokeDexCallingConventionVisitor {
- public:
-  InvokeDexCallingConventionVisitorMIPS() {}
-  virtual ~InvokeDexCallingConventionVisitorMIPS() {}
-
-  Location GetNextLocation(DataType::Type type) override;
-  Location GetReturnLocation(DataType::Type type) const override;
-  Location GetMethodLocation() const override;
-
- private:
-  InvokeDexCallingConvention calling_convention;
-
-  DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorMIPS);
-};
-
-class InvokeRuntimeCallingConvention : public CallingConvention<Register, FRegister> {
- public:
-  InvokeRuntimeCallingConvention()
-      : CallingConvention(kRuntimeParameterCoreRegisters,
-                          kRuntimeParameterCoreRegistersLength,
-                          kRuntimeParameterFpuRegisters,
-                          kRuntimeParameterFpuRegistersLength,
-                          kMipsPointerSize) {}
-
-  Location GetReturnLocation(DataType::Type return_type);
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
-};
-
-class FieldAccessCallingConventionMIPS : public FieldAccessCallingConvention {
- public:
-  FieldAccessCallingConventionMIPS() {}
-
-  Location GetObjectLocation() const override {
-    return Location::RegisterLocation(A1);
-  }
-  Location GetFieldIndexLocation() const override {
-    return Location::RegisterLocation(A0);
-  }
-  Location GetReturnLocation(DataType::Type type) const override {
-    return DataType::Is64BitType(type)
-        ? Location::RegisterPairLocation(V0, V1)
-        : Location::RegisterLocation(V0);
-  }
-  Location GetSetValueLocation(DataType::Type type, bool is_instance) const override {
-    return DataType::Is64BitType(type)
-        ? Location::RegisterPairLocation(A2, A3)
-        : (is_instance ? Location::RegisterLocation(A2) : Location::RegisterLocation(A1));
-  }
-  Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
-    return Location::FpuRegisterLocation(F0);
-  }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionMIPS);
-};
-
-class ParallelMoveResolverMIPS : public ParallelMoveResolverWithSwap {
- public:
-  ParallelMoveResolverMIPS(ArenaAllocator* allocator, CodeGeneratorMIPS* codegen)
-      : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
-
-  void EmitMove(size_t index) override;
-  void EmitSwap(size_t index) override;
-  void SpillScratch(int reg) override;
-  void RestoreScratch(int reg) override;
-
-  void Exchange(int index1, int index2, bool double_slot);
-  void ExchangeQuadSlots(int index1, int index2);
-
-  MipsAssembler* GetAssembler() const;
-
- private:
-  CodeGeneratorMIPS* const codegen_;
-
-  DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverMIPS);
-};
-
-class SlowPathCodeMIPS : public SlowPathCode {
- public:
-  explicit SlowPathCodeMIPS(HInstruction* instruction)
-      : SlowPathCode(instruction), entry_label_(), exit_label_() {}
-
-  MipsLabel* GetEntryLabel() { return &entry_label_; }
-  MipsLabel* GetExitLabel() { return &exit_label_; }
-
- private:
-  MipsLabel entry_label_;
-  MipsLabel exit_label_;
-
-  DISALLOW_COPY_AND_ASSIGN(SlowPathCodeMIPS);
-};
-
-class LocationsBuilderMIPS : public HGraphVisitor {
- public:
-  LocationsBuilderMIPS(HGraph* graph, CodeGeneratorMIPS* codegen)
-      : HGraphVisitor(graph), codegen_(codegen) {}
-
-#define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  void Visit##name(H##name* instr) override;
-
-  FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
-  FOR_EACH_CONCRETE_INSTRUCTION_MIPS(DECLARE_VISIT_INSTRUCTION)
-
-#undef DECLARE_VISIT_INSTRUCTION
-
-  void VisitInstruction(HInstruction* instruction) override {
-    LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
-               << " (id " << instruction->GetId() << ")";
-  }
-
- private:
-  void HandleInvoke(HInvoke* invoke);
-  void HandleBinaryOp(HBinaryOperation* operation);
-  void HandleCondition(HCondition* instruction);
-  void HandleShift(HBinaryOperation* operation);
-  void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
-  void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
-  Location RegisterOrZeroConstant(HInstruction* instruction);
-  Location FpuRegisterOrConstantForStore(HInstruction* instruction);
-
-  InvokeDexCallingConventionVisitorMIPS parameter_visitor_;
-
-  CodeGeneratorMIPS* const codegen_;
-
-  DISALLOW_COPY_AND_ASSIGN(LocationsBuilderMIPS);
-};
-
-class InstructionCodeGeneratorMIPS : public InstructionCodeGenerator {
- public:
-  InstructionCodeGeneratorMIPS(HGraph* graph, CodeGeneratorMIPS* codegen);
-
-#define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  void Visit##name(H##name* instr) override;
-
-  FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
-  FOR_EACH_CONCRETE_INSTRUCTION_MIPS(DECLARE_VISIT_INSTRUCTION)
-
-#undef DECLARE_VISIT_INSTRUCTION
-
-  void VisitInstruction(HInstruction* instruction) override {
-    LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
-               << " (id " << instruction->GetId() << ")";
-  }
-
-  MipsAssembler* GetAssembler() const { return assembler_; }
-
-  // Compare-and-jump packed switch generates approx. 3 + 2.5 * N 32-bit
-  // instructions for N cases.
-  // Table-based packed switch generates approx. 11 32-bit instructions
-  // and N 32-bit data words for N cases.
-  // At N = 6 they come out as 18 and 17 32-bit words respectively.
-  // We switch to the table-based method starting with 7 cases.
-  static constexpr uint32_t kPackedSwitchJumpTableThreshold = 6;
-
-  void GenerateMemoryBarrier(MemBarrierKind kind);
-
- private:
-  void GenerateClassInitializationCheck(SlowPathCodeMIPS* slow_path, Register class_reg);
-  void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
-  void GenerateBitstringTypeCheckCompare(HTypeCheckInstruction* check, Register temp);
-  void HandleBinaryOp(HBinaryOperation* operation);
-  void HandleCondition(HCondition* instruction);
-  void HandleShift(HBinaryOperation* operation);
-  void HandleFieldSet(HInstruction* instruction,
-                      const FieldInfo& field_info,
-                      uint32_t dex_pc,
-                      bool value_can_be_null);
-  void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info, uint32_t dex_pc);
-
-  void GenerateMinMaxInt(LocationSummary* locations, bool is_min, bool isR6, DataType::Type type);
-  void GenerateMinMaxFP(LocationSummary* locations, bool is_min, bool isR6, DataType::Type type);
-  void GenerateMinMax(HBinaryOperation*, bool is_min);
-  void GenerateAbsFP(LocationSummary* locations, DataType::Type type, bool isR2OrNewer, bool isR6);
-
-  // Generate a heap reference load using one register `out`:
-  //
-  //   out <- *(out + offset)
-  //
-  // while honoring heap poisoning and/or read barriers (if any).
-  //
-  // Location `maybe_temp` is used when generating a read barrier and
-  // shall be a register in that case; it may be an invalid location
-  // otherwise.
-  void GenerateReferenceLoadOneRegister(HInstruction* instruction,
-                                        Location out,
-                                        uint32_t offset,
-                                        Location maybe_temp,
-                                        ReadBarrierOption read_barrier_option);
-  // Generate a heap reference load using two different registers
-  // `out` and `obj`:
-  //
-  //   out <- *(obj + offset)
-  //
-  // while honoring heap poisoning and/or read barriers (if any).
-  //
-  // Location `maybe_temp` is used when generating a Baker's (fast
-  // path) read barrier and shall be a register in that case; it may
-  // be an invalid location otherwise.
-  void GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
-                                         Location out,
-                                         Location obj,
-                                         uint32_t offset,
-                                         Location maybe_temp,
-                                         ReadBarrierOption read_barrier_option);
-
-  // Generate a GC root reference load:
-  //
-  //   root <- *(obj + offset)
-  //
-  // while honoring read barriers (if any).
-  void GenerateGcRootFieldLoad(HInstruction* instruction,
-                               Location root,
-                               Register obj,
-                               uint32_t offset,
-                               ReadBarrierOption read_barrier_option,
-                               MipsLabel* label_low = nullptr);
-
-  void GenerateIntCompare(IfCondition cond, LocationSummary* locations);
-  // When the function returns `false` it means that the condition holds if `dst` is non-zero
-  // and doesn't hold if `dst` is zero. If it returns `true`, the roles of zero and non-zero
-  // `dst` are exchanged.
-  bool MaterializeIntCompare(IfCondition cond,
-                             LocationSummary* input_locations,
-                             Register dst);
-  void GenerateIntCompareAndBranch(IfCondition cond,
-                                   LocationSummary* locations,
-                                   MipsLabel* label);
-  void GenerateLongCompare(IfCondition cond, LocationSummary* locations);
-  void GenerateLongCompareAndBranch(IfCondition cond,
-                                    LocationSummary* locations,
-                                    MipsLabel* label);
-  void GenerateFpCompare(IfCondition cond,
-                         bool gt_bias,
-                         DataType::Type type,
-                         LocationSummary* locations);
-  // When the function returns `false` it means that the condition holds if the condition
-  // code flag `cc` is non-zero and doesn't hold if `cc` is zero. If it returns `true`,
-  // the roles of zero and non-zero values of the `cc` flag are exchanged.
-  bool MaterializeFpCompareR2(IfCondition cond,
-                              bool gt_bias,
-                              DataType::Type type,
-                              LocationSummary* input_locations,
-                              int cc);
-  // When the function returns `false` it means that the condition holds if `dst` is non-zero
-  // and doesn't hold if `dst` is zero. If it returns `true`, the roles of zero and non-zero
-  // `dst` are exchanged.
-  bool MaterializeFpCompareR6(IfCondition cond,
-                              bool gt_bias,
-                              DataType::Type type,
-                              LocationSummary* input_locations,
-                              FRegister dst);
-  void GenerateFpCompareAndBranch(IfCondition cond,
-                                  bool gt_bias,
-                                  DataType::Type type,
-                                  LocationSummary* locations,
-                                  MipsLabel* label);
-  void GenerateTestAndBranch(HInstruction* instruction,
-                             size_t condition_input_index,
-                             MipsLabel* true_target,
-                             MipsLabel* false_target);
-  void DivRemOneOrMinusOne(HBinaryOperation* instruction);
-  void DivRemByPowerOfTwo(HBinaryOperation* instruction);
-  void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
-  void GenerateDivRemIntegral(HBinaryOperation* instruction);
-  void HandleGoto(HInstruction* got, HBasicBlock* successor);
-  void GenPackedSwitchWithCompares(Register value_reg,
-                                   int32_t lower_bound,
-                                   uint32_t num_entries,
-                                   HBasicBlock* switch_block,
-                                   HBasicBlock* default_block);
-  void GenTableBasedPackedSwitch(Register value_reg,
-                                 Register constant_area,
-                                 int32_t lower_bound,
-                                 uint32_t num_entries,
-                                 HBasicBlock* switch_block,
-                                 HBasicBlock* default_block);
-
-  int32_t VecAddress(LocationSummary* locations,
-                     size_t size,
-                     /* out */ Register* adjusted_base);
-  void GenConditionalMoveR2(HSelect* select);
-  void GenConditionalMoveR6(HSelect* select);
-
-  MipsAssembler* const assembler_;
-  CodeGeneratorMIPS* const codegen_;
-
-  DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorMIPS);
-};
-
-class CodeGeneratorMIPS : public CodeGenerator {
- public:
-  CodeGeneratorMIPS(HGraph* graph,
-                    const CompilerOptions& compiler_options,
-                    OptimizingCompilerStats* stats = nullptr);
-  virtual ~CodeGeneratorMIPS() {}
-
-  void ComputeSpillMask() override;
-  bool HasAllocatedCalleeSaveRegisters() const override;
-  void GenerateFrameEntry() override;
-  void GenerateFrameExit() override;
-
-  void Bind(HBasicBlock* block) override;
-
-  void MoveConstant(Location location, HConstant* c);
-
-  size_t GetWordSize() const override { return kMipsWordSize; }
-
-  size_t GetFloatingPointSpillSlotSize() const override {
-    return GetGraph()->HasSIMD()
-        ? 2 * kMipsDoublewordSize   // 16 bytes for each spill.
-        : 1 * kMipsDoublewordSize;  //  8 bytes for each spill.
-  }
-
-  uintptr_t GetAddressOf(HBasicBlock* block) override {
-    return assembler_.GetLabelLocation(GetLabelOf(block));
-  }
-
-  HGraphVisitor* GetLocationBuilder() override { return &location_builder_; }
-  HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; }
-  MipsAssembler* GetAssembler() override { return &assembler_; }
-  const MipsAssembler& GetAssembler() const override { return assembler_; }
-
-  // Emit linker patches.
-  void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
-  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
-
-  // Fast path implementation of ReadBarrier::Barrier for a heap
-  // reference field load when Baker's read barriers are used.
-  void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
-                                             Location ref,
-                                             Register obj,
-                                             uint32_t offset,
-                                             Location temp,
-                                             bool needs_null_check);
-  // Fast path implementation of ReadBarrier::Barrier for a heap
-  // reference array load when Baker's read barriers are used.
-  void GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
-                                             Location ref,
-                                             Register obj,
-                                             uint32_t data_offset,
-                                             Location index,
-                                             Location temp,
-                                             bool needs_null_check);
-
-  // Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier,
-  // GenerateArrayLoadWithBakerReadBarrier and some intrinsics.
-  //
-  // Load the object reference located at the address
-  // `obj + offset + (index << scale_factor)`, held by object `obj`, into
-  // `ref`, and mark it if needed.
-  //
-  // If `always_update_field` is true, the value of the reference is
-  // atomically updated in the holder (`obj`).
-  void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
-                                                 Location ref,
-                                                 Register obj,
-                                                 uint32_t offset,
-                                                 Location index,
-                                                 ScaleFactor scale_factor,
-                                                 Location temp,
-                                                 bool needs_null_check,
-                                                 bool always_update_field = false);
-
-  // Generate a read barrier for a heap reference within `instruction`
-  // using a slow path.
-  //
-  // A read barrier for an object reference read from the heap is
-  // implemented as a call to the artReadBarrierSlow runtime entry
-  // point, which is passed the values in locations `ref`, `obj`, and
-  // `offset`:
-  //
-  //   mirror::Object* artReadBarrierSlow(mirror::Object* ref,
-  //                                      mirror::Object* obj,
-  //                                      uint32_t offset);
-  //
-  // The `out` location contains the value returned by
-  // artReadBarrierSlow.
-  //
-  // When `index` is provided (i.e. for array accesses), the offset
-  // value passed to artReadBarrierSlow is adjusted to take `index`
-  // into account.
-  void GenerateReadBarrierSlow(HInstruction* instruction,
-                               Location out,
-                               Location ref,
-                               Location obj,
-                               uint32_t offset,
-                               Location index = Location::NoLocation());
-
-  // If read barriers are enabled, generate a read barrier for a heap
-  // reference using a slow path. If heap poisoning is enabled, also
-  // unpoison the reference in `out`.
-  void MaybeGenerateReadBarrierSlow(HInstruction* instruction,
-                                    Location out,
-                                    Location ref,
-                                    Location obj,
-                                    uint32_t offset,
-                                    Location index = Location::NoLocation());
-
-  // Generate a read barrier for a GC root within `instruction` using
-  // a slow path.
-  //
-  // A read barrier for an object reference GC root is implemented as
-  // a call to the artReadBarrierForRootSlow runtime entry point,
-  // which is passed the value in location `root`:
-  //
-  //   mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root);
-  //
-  // The `out` location contains the value returned by
-  // artReadBarrierForRootSlow.
-  void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
-
-  void MarkGCCard(Register object, Register value, bool value_can_be_null);
-
-  // Register allocation.
-
-  void SetupBlockedRegisters() const override;
-
-  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
-  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
-  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
-  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
-  void ClobberRA() {
-    clobbered_ra_ = true;
-  }
-
-  void DumpCoreRegister(std::ostream& stream, int reg) const override;
-  void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
-
-  InstructionSet GetInstructionSet() const override { return InstructionSet::kMips; }
-
-  const MipsInstructionSetFeatures& GetInstructionSetFeatures() const;
-
-  MipsLabel* GetLabelOf(HBasicBlock* block) const {
-    return CommonGetLabelOf<MipsLabel>(block_labels_, block);
-  }
-
-  void Initialize() override {
-    block_labels_ = CommonInitializeLabels<MipsLabel>();
-  }
-
-  void Finalize(CodeAllocator* allocator) override;
-
-  // Code generation helpers.
-
-  void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
-
-  void MoveConstant(Location destination, int32_t value) override;
-
-  void AddLocationAsTemp(Location location, LocationSummary* locations) override;
-
-  // Generate code to invoke a runtime entry point.
-  void InvokeRuntime(QuickEntrypointEnum entrypoint,
-                     HInstruction* instruction,
-                     uint32_t dex_pc,
-                     SlowPathCode* slow_path = nullptr) override;
-
-  // Generate code to invoke a runtime entry point, but do not record
-  // PC-related information in a stack map.
-  void InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
-                                           HInstruction* instruction,
-                                           SlowPathCode* slow_path,
-                                           bool direct);
-
-  void GenerateInvokeRuntime(int32_t entry_point_offset, bool direct);
-
-  ParallelMoveResolver* GetMoveResolver() override { return &move_resolver_; }
-
-  bool NeedsTwoRegisters(DataType::Type type) const override {
-    return type == DataType::Type::kInt64;
-  }
-
-  // Check if the desired_string_load_kind is supported. If it is, return it,
-  // otherwise return a fall-back kind that should be used instead.
-  HLoadString::LoadKind GetSupportedLoadStringKind(
-      HLoadString::LoadKind desired_string_load_kind) override;
-
-  // Check if the desired_class_load_kind is supported. If it is, return it,
-  // otherwise return a fall-back kind that should be used instead.
-  HLoadClass::LoadKind GetSupportedLoadClassKind(
-      HLoadClass::LoadKind desired_class_load_kind) override;
-
-  // Check if the desired_dispatch_info is supported. If it is, return it,
-  // otherwise return a fall-back info that should be used instead.
-  HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
-      const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      ArtMethod* method) override;
-
-  void GenerateStaticOrDirectCall(
-      HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
-  void GenerateVirtualCall(
-      HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
-
-  void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
-                              DataType::Type type ATTRIBUTE_UNUSED) override {
-    UNIMPLEMENTED(FATAL) << "Not implemented on MIPS";
-  }
-
-  void GenerateNop() override;
-  void GenerateImplicitNullCheck(HNullCheck* instruction) override;
-  void GenerateExplicitNullCheck(HNullCheck* instruction) override;
-
-  // The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types,
-  // whether through .data.bimg.rel.ro, .bss, or directly in the boot image.
-  //
-  // The 16-bit halves of the 32-bit PC-relative offset are patched separately, necessitating
-  // two patches/infos. There can be more than two patches/infos if the instruction supplying
-  // the high half is shared with e.g. a slow path, while the low half is supplied by separate
-  // instructions, e.g.:
-  //     lui   r1, high       // patch
-  //     addu  r1, r1, rbase
-  //     lw    r2, low(r1)    // patch
-  //     beqz  r2, slow_path
-  //   back:
-  //     ...
-  //   slow_path:
-  //     ...
-  //     sw    r2, low(r1)    // patch
-  //     b     back
-  struct PcRelativePatchInfo : PatchInfo<MipsLabel> {
-    PcRelativePatchInfo(const DexFile* dex_file,
-                        uint32_t off_or_idx,
-                        const PcRelativePatchInfo* info_high)
-        : PatchInfo<MipsLabel>(dex_file, off_or_idx),
-          pc_rel_label(),
-          patch_info_high(info_high) { }
-
-    // Label for the instruction corresponding to PC+0. Not bound or used in low half patches.
-    // Not bound in high half patches on R2 when using HMipsComputeBaseMethodAddress.
-    // Bound in high half patches on R2 when using the NAL instruction instead of
-    // HMipsComputeBaseMethodAddress.
-    // Bound in high half patches on R6.
-    MipsLabel pc_rel_label;
-    // Pointer to the info for the high half patch or nullptr if this is the high half patch info.
-    const PcRelativePatchInfo* patch_info_high;
-
-   private:
-    PcRelativePatchInfo(PcRelativePatchInfo&& other) = delete;
-    DISALLOW_COPY_AND_ASSIGN(PcRelativePatchInfo);
-  };
-
-  PcRelativePatchInfo* NewBootImageIntrinsicPatch(uint32_t intrinsic_data,
-                                                  const PcRelativePatchInfo* info_high = nullptr);
-  PcRelativePatchInfo* NewBootImageRelRoPatch(uint32_t boot_image_offset,
-                                              const PcRelativePatchInfo* info_high = nullptr);
-  PcRelativePatchInfo* NewBootImageMethodPatch(MethodReference target_method,
-                                               const PcRelativePatchInfo* info_high = nullptr);
-  PcRelativePatchInfo* NewMethodBssEntryPatch(MethodReference target_method,
-                                              const PcRelativePatchInfo* info_high = nullptr);
-  PcRelativePatchInfo* NewBootImageTypePatch(const DexFile& dex_file,
-                                             dex::TypeIndex type_index,
-                                             const PcRelativePatchInfo* info_high = nullptr);
-  PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file,
-                                            dex::TypeIndex type_index,
-                                            const PcRelativePatchInfo* info_high = nullptr);
-  PcRelativePatchInfo* NewBootImageStringPatch(const DexFile& dex_file,
-                                               dex::StringIndex string_index,
-                                               const PcRelativePatchInfo* info_high = nullptr);
-  PcRelativePatchInfo* NewStringBssEntryPatch(const DexFile& dex_file,
-                                              dex::StringIndex string_index,
-                                              const PcRelativePatchInfo* info_high = nullptr);
-  Literal* DeduplicateBootImageAddressLiteral(uint32_t address);
-
-  void EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info_high,
-                                            Register out,
-                                            Register base);
-
-  void LoadBootImageAddress(Register reg, uint32_t boot_image_reference);
-  void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset);
-
-  // The JitPatchInfo is used for JIT string and class loads.
-  struct JitPatchInfo {
-    JitPatchInfo(const DexFile& dex_file, uint64_t idx)
-        : target_dex_file(dex_file), index(idx) { }
-    JitPatchInfo(JitPatchInfo&& other) = default;
-
-    const DexFile& target_dex_file;
-    // String/type index.
-    uint64_t index;
-    // Label for the instruction loading the most significant half of the address.
-    MipsLabel high_label;
-    // Label for the instruction supplying the least significant half of the address.
-    MipsLabel low_label;
-  };
-
-  void PatchJitRootUse(uint8_t* code,
-                       const uint8_t* roots_data,
-                       const JitPatchInfo& info,
-                       uint64_t index_in_table) const;
-  JitPatchInfo* NewJitRootStringPatch(const DexFile& dex_file,
-                                      dex::StringIndex string_index,
-                                      Handle<mirror::String> handle);
-  JitPatchInfo* NewJitRootClassPatch(const DexFile& dex_file,
-                                     dex::TypeIndex type_index,
-                                     Handle<mirror::Class> handle);
-
- private:
-  Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, Register temp);
-
-  using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, Literal*>;
-
-  Literal* DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map);
-  PcRelativePatchInfo* NewPcRelativePatch(const DexFile* dex_file,
-                                          uint32_t offset_or_index,
-                                          const PcRelativePatchInfo* info_high,
-                                          ArenaDeque<PcRelativePatchInfo>* patches);
-
-  template <linker::LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
-  void EmitPcRelativeLinkerPatches(const ArenaDeque<PcRelativePatchInfo>& infos,
-                                   ArenaVector<linker::LinkerPatch>* linker_patches);
-
-  // Labels for each block that will be compiled.
-  MipsLabel* block_labels_;
-  MipsLabel frame_entry_label_;
-  LocationsBuilderMIPS location_builder_;
-  InstructionCodeGeneratorMIPS instruction_visitor_;
-  ParallelMoveResolverMIPS move_resolver_;
-  MipsAssembler assembler_;
-
-  // Deduplication map for 32-bit literals, used for non-patchable boot image addresses.
-  Uint32ToLiteralMap uint32_literals_;
-  // PC-relative method patch info for kBootImageLinkTimePcRelative/kBootImageRelRo.
-  // Also used for type/string patches for kBootImageRelRo (same linker patch as for methods).
-  ArenaDeque<PcRelativePatchInfo> boot_image_method_patches_;
-  // PC-relative method patch info for kBssEntry.
-  ArenaDeque<PcRelativePatchInfo> method_bss_entry_patches_;
-  // PC-relative type patch info for kBootImageLinkTimePcRelative.
-  ArenaDeque<PcRelativePatchInfo> boot_image_type_patches_;
-  // PC-relative type patch info for kBssEntry.
-  ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
-  // PC-relative String patch info for kBootImageLinkTimePcRelative.
-  ArenaDeque<PcRelativePatchInfo> boot_image_string_patches_;
-  // PC-relative String patch info for kBssEntry.
-  ArenaDeque<PcRelativePatchInfo> string_bss_entry_patches_;
-  // PC-relative patch info for IntrinsicObjects.
-  ArenaDeque<PcRelativePatchInfo> boot_image_intrinsic_patches_;
-
-  // Patches for string root accesses in JIT compiled code.
-  ArenaDeque<JitPatchInfo> jit_string_patches_;
-  // Patches for class root accesses in JIT compiled code.
-  ArenaDeque<JitPatchInfo> jit_class_patches_;
-
-  // PC-relative loads on R2 clobber RA, which may need to be preserved explicitly in leaf methods.
-  // This is a flag set by pc_relative_fixups_mips and dex_cache_array_fixups_mips optimizations.
-  bool clobbered_ra_;
-
-  DISALLOW_COPY_AND_ASSIGN(CodeGeneratorMIPS);
-};
-
-}  // namespace mips
-}  // namespace art
-
-#endif  // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS_H_
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
deleted file mode 100644
index 0d3cb3b..0000000
--- a/compiler/optimizing/code_generator_mips64.cc
+++ /dev/null
@@ -1,7633 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "code_generator_mips64.h"
-
-#include "arch/mips64/asm_support_mips64.h"
-#include "art_method.h"
-#include "class_table.h"
-#include "code_generator_utils.h"
-#include "compiled_method.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "entrypoints/quick/quick_entrypoints_enum.h"
-#include "gc/accounting/card_table.h"
-#include "gc/space/image_space.h"
-#include "heap_poisoning.h"
-#include "intrinsics.h"
-#include "intrinsics_mips64.h"
-#include "linker/linker_patch.h"
-#include "mirror/array-inl.h"
-#include "mirror/class-inl.h"
-#include "offsets.h"
-#include "stack_map_stream.h"
-#include "thread.h"
-#include "utils/assembler.h"
-#include "utils/mips64/assembler_mips64.h"
-#include "utils/stack_checks.h"
-
-namespace art {
-namespace mips64 {
-
-static constexpr int kCurrentMethodStackOffset = 0;
-static constexpr GpuRegister kMethodRegisterArgument = A0;
-
-// Flags controlling the use of thunks for Baker read barriers.
-constexpr bool kBakerReadBarrierThunksEnableForFields = true;
-constexpr bool kBakerReadBarrierThunksEnableForArrays = true;
-constexpr bool kBakerReadBarrierThunksEnableForGcRoots = true;
-
-Location Mips64ReturnLocation(DataType::Type return_type) {
-  switch (return_type) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kUint32:
-    case DataType::Type::kInt32:
-    case DataType::Type::kReference:
-    case DataType::Type::kUint64:
-    case DataType::Type::kInt64:
-      return Location::RegisterLocation(V0);
-
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      return Location::FpuRegisterLocation(F0);
-
-    case DataType::Type::kVoid:
-      return Location();
-  }
-  UNREACHABLE();
-}
-
-Location InvokeDexCallingConventionVisitorMIPS64::GetReturnLocation(DataType::Type type) const {
-  return Mips64ReturnLocation(type);
-}
-
-Location InvokeDexCallingConventionVisitorMIPS64::GetMethodLocation() const {
-  return Location::RegisterLocation(kMethodRegisterArgument);
-}
-
-Location InvokeDexCallingConventionVisitorMIPS64::GetNextLocation(DataType::Type type) {
-  Location next_location;
-  if (type == DataType::Type::kVoid) {
-    LOG(FATAL) << "Unexpected parameter type " << type;
-  }
-
-  if (DataType::IsFloatingPointType(type) &&
-      (float_index_ < calling_convention.GetNumberOfFpuRegisters())) {
-    next_location = Location::FpuRegisterLocation(
-        calling_convention.GetFpuRegisterAt(float_index_++));
-    gp_index_++;
-  } else if (!DataType::IsFloatingPointType(type) &&
-             (gp_index_ < calling_convention.GetNumberOfRegisters())) {
-    next_location = Location::RegisterLocation(calling_convention.GetRegisterAt(gp_index_++));
-    float_index_++;
-  } else {
-    size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
-    next_location = DataType::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
-                                                : Location::StackSlot(stack_offset);
-  }
-
-  // Space on the stack is reserved for all arguments.
-  stack_index_ += DataType::Is64BitType(type) ? 2 : 1;
-
-  return next_location;
-}
-
-Location InvokeRuntimeCallingConvention::GetReturnLocation(DataType::Type type) {
-  return Mips64ReturnLocation(type);
-}
-
-static RegisterSet OneRegInReferenceOutSaveEverythingCallerSaves() {
-  InvokeRuntimeCallingConvention calling_convention;
-  RegisterSet caller_saves = RegisterSet::Empty();
-  caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  // The reference is returned in the same register. This differs from the standard return location.
-  return caller_saves;
-}
-
-// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
-#define __ down_cast<CodeGeneratorMIPS64*>(codegen)->GetAssembler()->  // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, x).Int32Value()
-
-class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
- public:
-  explicit BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction) : SlowPathCodeMIPS64(instruction) {}
-
-  void EmitNativeCode(CodeGenerator* codegen) override {
-    LocationSummary* locations = instruction_->GetLocations();
-    CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
-    __ Bind(GetEntryLabel());
-    if (instruction_->CanThrowIntoCatchBlock()) {
-      // Live registers will be restored in the catch block if caught.
-      SaveLiveRegisters(codegen, instruction_->GetLocations());
-    }
-    // We're moving two locations to locations that could overlap, so we need a parallel
-    // move resolver.
-    InvokeRuntimeCallingConvention calling_convention;
-    codegen->EmitParallelMoves(locations->InAt(0),
-                               Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
-                               DataType::Type::kInt32,
-                               locations->InAt(1),
-                               Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
-                               DataType::Type::kInt32);
-    QuickEntrypointEnum entrypoint = instruction_->AsBoundsCheck()->IsStringCharAt()
-        ? kQuickThrowStringBounds
-        : kQuickThrowArrayBounds;
-    mips64_codegen->InvokeRuntime(entrypoint, instruction_, instruction_->GetDexPc(), this);
-    CheckEntrypointTypes<kQuickThrowStringBounds, void, int32_t, int32_t>();
-    CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
-  }
-
-  bool IsFatal() const override { return true; }
-
-  const char* GetDescription() const override { return "BoundsCheckSlowPathMIPS64"; }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS64);
-};
-
-class DivZeroCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
- public:
-  explicit DivZeroCheckSlowPathMIPS64(HDivZeroCheck* instruction)
-      : SlowPathCodeMIPS64(instruction) {}
-
-  void EmitNativeCode(CodeGenerator* codegen) override {
-    CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
-    __ Bind(GetEntryLabel());
-    mips64_codegen->InvokeRuntime(kQuickThrowDivZero, instruction_, instruction_->GetDexPc(), this);
-    CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
-  }
-
-  bool IsFatal() const override { return true; }
-
-  const char* GetDescription() const override { return "DivZeroCheckSlowPathMIPS64"; }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS64);
-};
-
-class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
- public:
-  LoadClassSlowPathMIPS64(HLoadClass* cls, HInstruction* at)
-      : SlowPathCodeMIPS64(at), cls_(cls) {
-    DCHECK(at->IsLoadClass() || at->IsClinitCheck());
-    DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
-  }
-
-  void EmitNativeCode(CodeGenerator* codegen) override {
-    LocationSummary* locations = instruction_->GetLocations();
-    Location out = locations->Out();
-    const uint32_t dex_pc = instruction_->GetDexPc();
-    bool must_resolve_type = instruction_->IsLoadClass() && cls_->MustResolveTypeOnSlowPath();
-    bool must_do_clinit = instruction_->IsClinitCheck() || cls_->MustGenerateClinitCheck();
-
-    CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
-    __ Bind(GetEntryLabel());
-    SaveLiveRegisters(codegen, locations);
-
-    InvokeRuntimeCallingConvention calling_convention;
-    if (must_resolve_type) {
-      DCHECK(IsSameDexFile(cls_->GetDexFile(), mips64_codegen->GetGraph()->GetDexFile()));
-      dex::TypeIndex type_index = cls_->GetTypeIndex();
-      __ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
-      mips64_codegen->InvokeRuntime(kQuickResolveType, instruction_, dex_pc, this);
-      CheckEntrypointTypes<kQuickResolveType, void*, uint32_t>();
-      // If we also must_do_clinit, the resolved type is now in the correct register.
-    } else {
-      DCHECK(must_do_clinit);
-      Location source = instruction_->IsLoadClass() ? out : locations->InAt(0);
-      mips64_codegen->MoveLocation(Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
-                                   source,
-                                   cls_->GetType());
-    }
-    if (must_do_clinit) {
-      mips64_codegen->InvokeRuntime(kQuickInitializeStaticStorage, instruction_, dex_pc, this);
-      CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, mirror::Class*>();
-    }
-
-    // Move the class to the desired location.
-    if (out.IsValid()) {
-      DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
-      DataType::Type type = instruction_->GetType();
-      mips64_codegen->MoveLocation(out,
-                                   Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
-                                   type);
-    }
-    RestoreLiveRegisters(codegen, locations);
-
-    __ Bc(GetExitLabel());
-  }
-
-  const char* GetDescription() const override { return "LoadClassSlowPathMIPS64"; }
-
- private:
-  // The class this slow path will load.
-  HLoadClass* const cls_;
-
-  DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS64);
-};
-
-class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
- public:
-  explicit LoadStringSlowPathMIPS64(HLoadString* instruction)
-      : SlowPathCodeMIPS64(instruction) {}
-
-  void EmitNativeCode(CodeGenerator* codegen) override {
-    DCHECK(instruction_->IsLoadString());
-    DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
-    LocationSummary* locations = instruction_->GetLocations();
-    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
-    const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
-    CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
-    InvokeRuntimeCallingConvention calling_convention;
-    __ Bind(GetEntryLabel());
-    SaveLiveRegisters(codegen, locations);
-
-    __ LoadConst32(calling_convention.GetRegisterAt(0), string_index.index_);
-    mips64_codegen->InvokeRuntime(kQuickResolveString,
-                                  instruction_,
-                                  instruction_->GetDexPc(),
-                                  this);
-    CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
-
-    DataType::Type type = instruction_->GetType();
-    mips64_codegen->MoveLocation(locations->Out(),
-                                 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
-                                 type);
-    RestoreLiveRegisters(codegen, locations);
-
-    __ Bc(GetExitLabel());
-  }
-
-  const char* GetDescription() const override { return "LoadStringSlowPathMIPS64"; }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS64);
-};
-
-class NullCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
- public:
-  explicit NullCheckSlowPathMIPS64(HNullCheck* instr) : SlowPathCodeMIPS64(instr) {}
-
-  void EmitNativeCode(CodeGenerator* codegen) override {
-    CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
-    __ Bind(GetEntryLabel());
-    if (instruction_->CanThrowIntoCatchBlock()) {
-      // Live registers will be restored in the catch block if caught.
-      SaveLiveRegisters(codegen, instruction_->GetLocations());
-    }
-    mips64_codegen->InvokeRuntime(kQuickThrowNullPointer,
-                                  instruction_,
-                                  instruction_->GetDexPc(),
-                                  this);
-    CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
-  }
-
-  bool IsFatal() const override { return true; }
-
-  const char* GetDescription() const override { return "NullCheckSlowPathMIPS64"; }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS64);
-};
-
-class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
- public:
-  SuspendCheckSlowPathMIPS64(HSuspendCheck* instruction, HBasicBlock* successor)
-      : SlowPathCodeMIPS64(instruction), successor_(successor) {}
-
-  void EmitNativeCode(CodeGenerator* codegen) override {
-    LocationSummary* locations = instruction_->GetLocations();
-    CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
-    __ Bind(GetEntryLabel());
-    SaveLiveRegisters(codegen, locations);     // Only saves live vector registers for SIMD.
-    mips64_codegen->InvokeRuntime(kQuickTestSuspend, instruction_, instruction_->GetDexPc(), this);
-    CheckEntrypointTypes<kQuickTestSuspend, void, void>();
-    RestoreLiveRegisters(codegen, locations);  // Only restores live vector registers for SIMD.
-    if (successor_ == nullptr) {
-      __ Bc(GetReturnLabel());
-    } else {
-      __ Bc(mips64_codegen->GetLabelOf(successor_));
-    }
-  }
-
-  Mips64Label* GetReturnLabel() {
-    DCHECK(successor_ == nullptr);
-    return &return_label_;
-  }
-
-  const char* GetDescription() const override { return "SuspendCheckSlowPathMIPS64"; }
-
-  HBasicBlock* GetSuccessor() const {
-    return successor_;
-  }
-
- private:
-  // If not null, the block to branch to after the suspend check.
-  HBasicBlock* const successor_;
-
-  // If `successor_` is null, the label to branch to after the suspend check.
-  Mips64Label return_label_;
-
-  DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathMIPS64);
-};
-
-class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
- public:
-  explicit TypeCheckSlowPathMIPS64(HInstruction* instruction, bool is_fatal)
-      : SlowPathCodeMIPS64(instruction), is_fatal_(is_fatal) {}
-
-  void EmitNativeCode(CodeGenerator* codegen) override {
-    LocationSummary* locations = instruction_->GetLocations();
-
-    uint32_t dex_pc = instruction_->GetDexPc();
-    DCHECK(instruction_->IsCheckCast()
-           || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
-    CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
-
-    __ Bind(GetEntryLabel());
-    if (!is_fatal_ || instruction_->CanThrowIntoCatchBlock()) {
-      SaveLiveRegisters(codegen, locations);
-    }
-
-    // We're moving two locations to locations that could overlap, so we need a parallel
-    // move resolver.
-    InvokeRuntimeCallingConvention calling_convention;
-    codegen->EmitParallelMoves(locations->InAt(0),
-                               Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
-                               DataType::Type::kReference,
-                               locations->InAt(1),
-                               Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
-                               DataType::Type::kReference);
-    if (instruction_->IsInstanceOf()) {
-      mips64_codegen->InvokeRuntime(kQuickInstanceofNonTrivial, instruction_, dex_pc, this);
-      CheckEntrypointTypes<kQuickInstanceofNonTrivial, size_t, mirror::Object*, mirror::Class*>();
-      DataType::Type ret_type = instruction_->GetType();
-      Location ret_loc = calling_convention.GetReturnLocation(ret_type);
-      mips64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
-    } else {
-      DCHECK(instruction_->IsCheckCast());
-      mips64_codegen->InvokeRuntime(kQuickCheckInstanceOf, instruction_, dex_pc, this);
-      CheckEntrypointTypes<kQuickCheckInstanceOf, void, mirror::Object*, mirror::Class*>();
-    }
-
-    if (!is_fatal_) {
-      RestoreLiveRegisters(codegen, locations);
-      __ Bc(GetExitLabel());
-    }
-  }
-
-  const char* GetDescription() const override { return "TypeCheckSlowPathMIPS64"; }
-
-  bool IsFatal() const override { return is_fatal_; }
-
- private:
-  const bool is_fatal_;
-
-  DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS64);
-};
-
-class DeoptimizationSlowPathMIPS64 : public SlowPathCodeMIPS64 {
- public:
-  explicit DeoptimizationSlowPathMIPS64(HDeoptimize* instruction)
-    : SlowPathCodeMIPS64(instruction) {}
-
-  void EmitNativeCode(CodeGenerator* codegen) override {
-    CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
-    __ Bind(GetEntryLabel());
-      LocationSummary* locations = instruction_->GetLocations();
-    SaveLiveRegisters(codegen, locations);
-    InvokeRuntimeCallingConvention calling_convention;
-    __ LoadConst32(calling_convention.GetRegisterAt(0),
-                   static_cast<uint32_t>(instruction_->AsDeoptimize()->GetDeoptimizationKind()));
-    mips64_codegen->InvokeRuntime(kQuickDeoptimize, instruction_, instruction_->GetDexPc(), this);
-    CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
-  }
-
-  const char* GetDescription() const override { return "DeoptimizationSlowPathMIPS64"; }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS64);
-};
-
-class ArraySetSlowPathMIPS64 : public SlowPathCodeMIPS64 {
- public:
-  explicit ArraySetSlowPathMIPS64(HInstruction* instruction) : SlowPathCodeMIPS64(instruction) {}
-
-  void EmitNativeCode(CodeGenerator* codegen) override {
-    LocationSummary* locations = instruction_->GetLocations();
-    __ Bind(GetEntryLabel());
-    SaveLiveRegisters(codegen, locations);
-
-    InvokeRuntimeCallingConvention calling_convention;
-    HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
-    parallel_move.AddMove(
-        locations->InAt(0),
-        Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
-        DataType::Type::kReference,
-        nullptr);
-    parallel_move.AddMove(
-        locations->InAt(1),
-        Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
-        DataType::Type::kInt32,
-        nullptr);
-    parallel_move.AddMove(
-        locations->InAt(2),
-        Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
-        DataType::Type::kReference,
-        nullptr);
-    codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
-
-    CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
-    mips64_codegen->InvokeRuntime(kQuickAputObject, instruction_, instruction_->GetDexPc(), this);
-    CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
-    RestoreLiveRegisters(codegen, locations);
-    __ Bc(GetExitLabel());
-  }
-
-  const char* GetDescription() const override { return "ArraySetSlowPathMIPS64"; }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathMIPS64);
-};
-
-// Slow path marking an object reference `ref` during a read
-// barrier. The field `obj.field` in the object `obj` holding this
-// reference does not get updated by this slow path after marking (see
-// ReadBarrierMarkAndUpdateFieldSlowPathMIPS64 below for that).
-//
-// This means that after the execution of this slow path, `ref` will
-// always be up-to-date, but `obj.field` may not; i.e., after the
-// flip, `ref` will be a to-space reference, but `obj.field` will
-// probably still be a from-space reference (unless it gets updated by
-// another thread, or if another thread installed another object
-// reference (different from `ref`) in `obj.field`).
-//
-// If `entrypoint` is a valid location it is assumed to already be
-// holding the entrypoint. The case where the entrypoint is passed in
-// is for the GcRoot read barrier.
-class ReadBarrierMarkSlowPathMIPS64 : public SlowPathCodeMIPS64 {
- public:
-  ReadBarrierMarkSlowPathMIPS64(HInstruction* instruction,
-                                Location ref,
-                                Location entrypoint = Location::NoLocation())
-      : SlowPathCodeMIPS64(instruction), ref_(ref), entrypoint_(entrypoint) {
-    DCHECK(kEmitCompilerReadBarrier);
-  }
-
-  const char* GetDescription() const override { return "ReadBarrierMarkSlowPathMIPS"; }
-
-  void EmitNativeCode(CodeGenerator* codegen) override {
-    LocationSummary* locations = instruction_->GetLocations();
-    GpuRegister ref_reg = ref_.AsRegister<GpuRegister>();
-    DCHECK(locations->CanCall());
-    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
-    DCHECK(instruction_->IsInstanceFieldGet() ||
-           instruction_->IsStaticFieldGet() ||
-           instruction_->IsArrayGet() ||
-           instruction_->IsArraySet() ||
-           instruction_->IsLoadClass() ||
-           instruction_->IsLoadString() ||
-           instruction_->IsInstanceOf() ||
-           instruction_->IsCheckCast() ||
-           (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()) ||
-           (instruction_->IsInvokeStaticOrDirect() && instruction_->GetLocations()->Intrinsified()))
-        << "Unexpected instruction in read barrier marking slow path: "
-        << instruction_->DebugName();
-
-    __ Bind(GetEntryLabel());
-    // No need to save live registers; it's taken care of by the
-    // entrypoint. Also, there is no need to update the stack mask,
-    // as this runtime call will not trigger a garbage collection.
-    CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
-    DCHECK((V0 <= ref_reg && ref_reg <= T2) ||
-           (S2 <= ref_reg && ref_reg <= S7) ||
-           (ref_reg == S8)) << ref_reg;
-    // "Compact" slow path, saving two moves.
-    //
-    // Instead of using the standard runtime calling convention (input
-    // and output in A0 and V0 respectively):
-    //
-    //   A0 <- ref
-    //   V0 <- ReadBarrierMark(A0)
-    //   ref <- V0
-    //
-    // we just use rX (the register containing `ref`) as input and output
-    // of a dedicated entrypoint:
-    //
-    //   rX <- ReadBarrierMarkRegX(rX)
-    //
-    if (entrypoint_.IsValid()) {
-      mips64_codegen->ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction_, this);
-      DCHECK_EQ(entrypoint_.AsRegister<GpuRegister>(), T9);
-      __ Jalr(entrypoint_.AsRegister<GpuRegister>());
-      __ Nop();
-    } else {
-      int32_t entry_point_offset =
-          Thread::ReadBarrierMarkEntryPointsOffset<kMips64PointerSize>(ref_reg - 1);
-      // This runtime call does not require a stack map.
-      mips64_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset,
-                                                          instruction_,
-                                                          this);
-    }
-    __ Bc(GetExitLabel());
-  }
-
- private:
-  // The location (register) of the marked object reference.
-  const Location ref_;
-
-  // The location of the entrypoint if already loaded.
-  const Location entrypoint_;
-
-  DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathMIPS64);
-};
-
-// Slow path marking an object reference `ref` during a read barrier,
-// and if needed, atomically updating the field `obj.field` in the
-// object `obj` holding this reference after marking (contrary to
-// ReadBarrierMarkSlowPathMIPS64 above, which never tries to update
-// `obj.field`).
-//
-// This means that after the execution of this slow path, both `ref`
-// and `obj.field` will be up-to-date; i.e., after the flip, both will
-// hold the same to-space reference (unless another thread installed
-// another object reference (different from `ref`) in `obj.field`).
-class ReadBarrierMarkAndUpdateFieldSlowPathMIPS64 : public SlowPathCodeMIPS64 {
- public:
-  ReadBarrierMarkAndUpdateFieldSlowPathMIPS64(HInstruction* instruction,
-                                              Location ref,
-                                              GpuRegister obj,
-                                              Location field_offset,
-                                              GpuRegister temp1)
-      : SlowPathCodeMIPS64(instruction),
-        ref_(ref),
-        obj_(obj),
-        field_offset_(field_offset),
-        temp1_(temp1) {
-    DCHECK(kEmitCompilerReadBarrier);
-  }
-
-  const char* GetDescription() const override {
-    return "ReadBarrierMarkAndUpdateFieldSlowPathMIPS64";
-  }
-
-  void EmitNativeCode(CodeGenerator* codegen) override {
-    LocationSummary* locations = instruction_->GetLocations();
-    GpuRegister ref_reg = ref_.AsRegister<GpuRegister>();
-    DCHECK(locations->CanCall());
-    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(ref_reg)) << ref_reg;
-    // This slow path is only used by the UnsafeCASObject intrinsic.
-    DCHECK((instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
-        << "Unexpected instruction in read barrier marking and field updating slow path: "
-        << instruction_->DebugName();
-    DCHECK(instruction_->GetLocations()->Intrinsified());
-    DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kUnsafeCASObject);
-    DCHECK(field_offset_.IsRegister()) << field_offset_;
-
-    __ Bind(GetEntryLabel());
-
-    // Save the old reference.
-    // Note that we cannot use AT or TMP to save the old reference, as those
-    // are used by the code that follows, but we need the old reference after
-    // the call to the ReadBarrierMarkRegX entry point.
-    DCHECK_NE(temp1_, AT);
-    DCHECK_NE(temp1_, TMP);
-    __ Move(temp1_, ref_reg);
-
-    // No need to save live registers; it's taken care of by the
-    // entrypoint. Also, there is no need to update the stack mask,
-    // as this runtime call will not trigger a garbage collection.
-    CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
-    DCHECK((V0 <= ref_reg && ref_reg <= T2) ||
-           (S2 <= ref_reg && ref_reg <= S7) ||
-           (ref_reg == S8)) << ref_reg;
-    // "Compact" slow path, saving two moves.
-    //
-    // Instead of using the standard runtime calling convention (input
-    // and output in A0 and V0 respectively):
-    //
-    //   A0 <- ref
-    //   V0 <- ReadBarrierMark(A0)
-    //   ref <- V0
-    //
-    // we just use rX (the register containing `ref`) as input and output
-    // of a dedicated entrypoint:
-    //
-    //   rX <- ReadBarrierMarkRegX(rX)
-    //
-    int32_t entry_point_offset =
-        Thread::ReadBarrierMarkEntryPointsOffset<kMips64PointerSize>(ref_reg - 1);
-    // This runtime call does not require a stack map.
-    mips64_codegen->InvokeRuntimeWithoutRecordingPcInfo(entry_point_offset,
-                                                        instruction_,
-                                                        this);
-
-    // If the new reference is different from the old reference,
-    // update the field in the holder (`*(obj_ + field_offset_)`).
-    //
-    // Note that this field could also hold a different object, if
-    // another thread had concurrently changed it. In that case, the
-    // the compare-and-set (CAS) loop below would abort, leaving the
-    // field as-is.
-    Mips64Label done;
-    __ Beqc(temp1_, ref_reg, &done);
-
-    // Update the the holder's field atomically.  This may fail if
-    // mutator updates before us, but it's OK.  This is achieved
-    // using a strong compare-and-set (CAS) operation with relaxed
-    // memory synchronization ordering, where the expected value is
-    // the old reference and the desired value is the new reference.
-
-    // Convenience aliases.
-    GpuRegister base = obj_;
-    GpuRegister offset = field_offset_.AsRegister<GpuRegister>();
-    GpuRegister expected = temp1_;
-    GpuRegister value = ref_reg;
-    GpuRegister tmp_ptr = TMP;      // Pointer to actual memory.
-    GpuRegister tmp = AT;           // Value in memory.
-
-    __ Daddu(tmp_ptr, base, offset);
-
-    if (kPoisonHeapReferences) {
-      __ PoisonHeapReference(expected);
-      // Do not poison `value` if it is the same register as
-      // `expected`, which has just been poisoned.
-      if (value != expected) {
-        __ PoisonHeapReference(value);
-      }
-    }
-
-    // do {
-    //   tmp = [r_ptr] - expected;
-    // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
-
-    Mips64Label loop_head, exit_loop;
-    __ Bind(&loop_head);
-    __ Ll(tmp, tmp_ptr);
-    // The LL instruction sign-extends the 32-bit value, but
-    // 32-bit references must be zero-extended. Zero-extend `tmp`.
-    __ Dext(tmp, tmp, 0, 32);
-    __ Bnec(tmp, expected, &exit_loop);
-    __ Move(tmp, value);
-    __ Sc(tmp, tmp_ptr);
-    __ Beqzc(tmp, &loop_head);
-    __ Bind(&exit_loop);
-
-    if (kPoisonHeapReferences) {
-      __ UnpoisonHeapReference(expected);
-      // Do not unpoison `value` if it is the same register as
-      // `expected`, which has just been unpoisoned.
-      if (value != expected) {
-        __ UnpoisonHeapReference(value);
-      }
-    }
-
-    __ Bind(&done);
-    __ Bc(GetExitLabel());
-  }
-
- private:
-  // The location (register) of the marked object reference.
-  const Location ref_;
-  // The register containing the object holding the marked object reference field.
-  const GpuRegister obj_;
-  // The location of the offset of the marked reference field within `obj_`.
-  Location field_offset_;
-
-  const GpuRegister temp1_;
-
-  DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkAndUpdateFieldSlowPathMIPS64);
-};
-
-// Slow path generating a read barrier for a heap reference.
-class ReadBarrierForHeapReferenceSlowPathMIPS64 : public SlowPathCodeMIPS64 {
- public:
-  ReadBarrierForHeapReferenceSlowPathMIPS64(HInstruction* instruction,
-                                            Location out,
-                                            Location ref,
-                                            Location obj,
-                                            uint32_t offset,
-                                            Location index)
-      : SlowPathCodeMIPS64(instruction),
-        out_(out),
-        ref_(ref),
-        obj_(obj),
-        offset_(offset),
-        index_(index) {
-    DCHECK(kEmitCompilerReadBarrier);
-    // If `obj` is equal to `out` or `ref`, it means the initial object
-    // has been overwritten by (or after) the heap object reference load
-    // to be instrumented, e.g.:
-    //
-    //   __ LoadFromOffset(kLoadWord, out, out, offset);
-    //   codegen_->GenerateReadBarrierSlow(instruction, out_loc, out_loc, out_loc, offset);
-    //
-    // In that case, we have lost the information about the original
-    // object, and the emitted read barrier cannot work properly.
-    DCHECK(!obj.Equals(out)) << "obj=" << obj << " out=" << out;
-    DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
-  }
-
-  void EmitNativeCode(CodeGenerator* codegen) override {
-    CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
-    LocationSummary* locations = instruction_->GetLocations();
-    DataType::Type type = DataType::Type::kReference;
-    GpuRegister reg_out = out_.AsRegister<GpuRegister>();
-    DCHECK(locations->CanCall());
-    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
-    DCHECK(instruction_->IsInstanceFieldGet() ||
-           instruction_->IsStaticFieldGet() ||
-           instruction_->IsArrayGet() ||
-           instruction_->IsInstanceOf() ||
-           instruction_->IsCheckCast() ||
-           (instruction_->IsInvokeVirtual() && instruction_->GetLocations()->Intrinsified()))
-        << "Unexpected instruction in read barrier for heap reference slow path: "
-        << instruction_->DebugName();
-
-    __ Bind(GetEntryLabel());
-    SaveLiveRegisters(codegen, locations);
-
-    // We may have to change the index's value, but as `index_` is a
-    // constant member (like other "inputs" of this slow path),
-    // introduce a copy of it, `index`.
-    Location index = index_;
-    if (index_.IsValid()) {
-      // Handle `index_` for HArrayGet and UnsafeGetObject/UnsafeGetObjectVolatile intrinsics.
-      if (instruction_->IsArrayGet()) {
-        // Compute the actual memory offset and store it in `index`.
-        GpuRegister index_reg = index_.AsRegister<GpuRegister>();
-        DCHECK(locations->GetLiveRegisters()->ContainsCoreRegister(index_reg));
-        if (codegen->IsCoreCalleeSaveRegister(index_reg)) {
-          // We are about to change the value of `index_reg` (see the
-          // calls to art::mips64::Mips64Assembler::Sll and
-          // art::mips64::MipsAssembler::Addiu32 below), but it has
-          // not been saved by the previous call to
-          // art::SlowPathCode::SaveLiveRegisters, as it is a
-          // callee-save register --
-          // art::SlowPathCode::SaveLiveRegisters does not consider
-          // callee-save registers, as it has been designed with the
-          // assumption that callee-save registers are supposed to be
-          // handled by the called function.  So, as a callee-save
-          // register, `index_reg` _would_ eventually be saved onto
-          // the stack, but it would be too late: we would have
-          // changed its value earlier.  Therefore, we manually save
-          // it here into another freely available register,
-          // `free_reg`, chosen of course among the caller-save
-          // registers (as a callee-save `free_reg` register would
-          // exhibit the same problem).
-          //
-          // Note we could have requested a temporary register from
-          // the register allocator instead; but we prefer not to, as
-          // this is a slow path, and we know we can find a
-          // caller-save register that is available.
-          GpuRegister free_reg = FindAvailableCallerSaveRegister(codegen);
-          __ Move(free_reg, index_reg);
-          index_reg = free_reg;
-          index = Location::RegisterLocation(index_reg);
-        } else {
-          // The initial register stored in `index_` has already been
-          // saved in the call to art::SlowPathCode::SaveLiveRegisters
-          // (as it is not a callee-save register), so we can freely
-          // use it.
-        }
-        // Shifting the index value contained in `index_reg` by the scale
-        // factor (2) cannot overflow in practice, as the runtime is
-        // unable to allocate object arrays with a size larger than
-        // 2^26 - 1 (that is, 2^28 - 4 bytes).
-        __ Sll(index_reg, index_reg, TIMES_4);
-        static_assert(
-            sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
-            "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
-        __ Addiu32(index_reg, index_reg, offset_);
-      } else {
-        // In the case of the UnsafeGetObject/UnsafeGetObjectVolatile
-        // intrinsics, `index_` is not shifted by a scale factor of 2
-        // (as in the case of ArrayGet), as it is actually an offset
-        // to an object field within an object.
-        DCHECK(instruction_->IsInvoke()) << instruction_->DebugName();
-        DCHECK(instruction_->GetLocations()->Intrinsified());
-        DCHECK((instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObject) ||
-               (instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile))
-            << instruction_->AsInvoke()->GetIntrinsic();
-        DCHECK_EQ(offset_, 0U);
-        DCHECK(index_.IsRegister());
-      }
-    }
-
-    // We're moving two or three locations to locations that could
-    // overlap, so we need a parallel move resolver.
-    InvokeRuntimeCallingConvention calling_convention;
-    HParallelMove parallel_move(codegen->GetGraph()->GetAllocator());
-    parallel_move.AddMove(ref_,
-                          Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
-                          DataType::Type::kReference,
-                          nullptr);
-    parallel_move.AddMove(obj_,
-                          Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
-                          DataType::Type::kReference,
-                          nullptr);
-    if (index.IsValid()) {
-      parallel_move.AddMove(index,
-                            Location::RegisterLocation(calling_convention.GetRegisterAt(2)),
-                            DataType::Type::kInt32,
-                            nullptr);
-      codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
-    } else {
-      codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
-      __ LoadConst32(calling_convention.GetRegisterAt(2), offset_);
-    }
-    mips64_codegen->InvokeRuntime(kQuickReadBarrierSlow,
-                                  instruction_,
-                                  instruction_->GetDexPc(),
-                                  this);
-    CheckEntrypointTypes<
-        kQuickReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t>();
-    mips64_codegen->MoveLocation(out_, calling_convention.GetReturnLocation(type), type);
-
-    RestoreLiveRegisters(codegen, locations);
-    __ Bc(GetExitLabel());
-  }
-
-  const char* GetDescription() const override {
-    return "ReadBarrierForHeapReferenceSlowPathMIPS64";
-  }
-
- private:
-  GpuRegister FindAvailableCallerSaveRegister(CodeGenerator* codegen) {
-    size_t ref = static_cast<int>(ref_.AsRegister<GpuRegister>());
-    size_t obj = static_cast<int>(obj_.AsRegister<GpuRegister>());
-    for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
-      if (i != ref &&
-          i != obj &&
-          !codegen->IsCoreCalleeSaveRegister(i) &&
-          !codegen->IsBlockedCoreRegister(i)) {
-        return static_cast<GpuRegister>(i);
-      }
-    }
-    // We shall never fail to find a free caller-save register, as
-    // there are more than two core caller-save registers on MIPS64
-    // (meaning it is possible to find one which is different from
-    // `ref` and `obj`).
-    DCHECK_GT(codegen->GetNumberOfCoreCallerSaveRegisters(), 2u);
-    LOG(FATAL) << "Could not find a free caller-save register";
-    UNREACHABLE();
-  }
-
-  const Location out_;
-  const Location ref_;
-  const Location obj_;
-  const uint32_t offset_;
-  // An additional location containing an index to an array.
-  // Only used for HArrayGet and the UnsafeGetObject &
-  // UnsafeGetObjectVolatile intrinsics.
-  const Location index_;
-
-  DISALLOW_COPY_AND_ASSIGN(ReadBarrierForHeapReferenceSlowPathMIPS64);
-};
-
-// Slow path generating a read barrier for a GC root.
-class ReadBarrierForRootSlowPathMIPS64 : public SlowPathCodeMIPS64 {
- public:
-  ReadBarrierForRootSlowPathMIPS64(HInstruction* instruction, Location out, Location root)
-      : SlowPathCodeMIPS64(instruction), out_(out), root_(root) {
-    DCHECK(kEmitCompilerReadBarrier);
-  }
-
-  void EmitNativeCode(CodeGenerator* codegen) override {
-    LocationSummary* locations = instruction_->GetLocations();
-    DataType::Type type = DataType::Type::kReference;
-    GpuRegister reg_out = out_.AsRegister<GpuRegister>();
-    DCHECK(locations->CanCall());
-    DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out));
-    DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString())
-        << "Unexpected instruction in read barrier for GC root slow path: "
-        << instruction_->DebugName();
-
-    __ Bind(GetEntryLabel());
-    SaveLiveRegisters(codegen, locations);
-
-    InvokeRuntimeCallingConvention calling_convention;
-    CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
-    mips64_codegen->MoveLocation(Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
-                                 root_,
-                                 DataType::Type::kReference);
-    mips64_codegen->InvokeRuntime(kQuickReadBarrierForRootSlow,
-                                  instruction_,
-                                  instruction_->GetDexPc(),
-                                  this);
-    CheckEntrypointTypes<kQuickReadBarrierForRootSlow, mirror::Object*, GcRoot<mirror::Object>*>();
-    mips64_codegen->MoveLocation(out_, calling_convention.GetReturnLocation(type), type);
-
-    RestoreLiveRegisters(codegen, locations);
-    __ Bc(GetExitLabel());
-  }
-
-  const char* GetDescription() const override { return "ReadBarrierForRootSlowPathMIPS64"; }
-
- private:
-  const Location out_;
-  const Location root_;
-
-  DISALLOW_COPY_AND_ASSIGN(ReadBarrierForRootSlowPathMIPS64);
-};
-
-CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
-                                         const CompilerOptions& compiler_options,
-                                         OptimizingCompilerStats* stats)
-    : CodeGenerator(graph,
-                    kNumberOfGpuRegisters,
-                    kNumberOfFpuRegisters,
-                    /* number_of_register_pairs= */ 0,
-                    ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
-                                        arraysize(kCoreCalleeSaves)),
-                    ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
-                                        arraysize(kFpuCalleeSaves)),
-                    compiler_options,
-                    stats),
-      block_labels_(nullptr),
-      location_builder_(graph, this),
-      instruction_visitor_(graph, this),
-      move_resolver_(graph->GetAllocator(), this),
-      assembler_(graph->GetAllocator(),
-                 compiler_options.GetInstructionSetFeatures()->AsMips64InstructionSetFeatures()),
-      uint32_literals_(std::less<uint32_t>(),
-                       graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-      uint64_literals_(std::less<uint64_t>(),
-                       graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-      boot_image_method_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-      method_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-      boot_image_type_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-      type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-      boot_image_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-      string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-      boot_image_intrinsic_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-      jit_string_patches_(StringReferenceValueComparator(),
-                          graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-      jit_class_patches_(TypeReferenceValueComparator(),
-                         graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
-  // Save RA (containing the return address) to mimic Quick.
-  AddAllocatedRegister(Location::RegisterLocation(RA));
-}
-
-#undef __
-// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
-#define __ down_cast<Mips64Assembler*>(GetAssembler())->  // NOLINT
-#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, x).Int32Value()
-
-void CodeGeneratorMIPS64::Finalize(CodeAllocator* allocator) {
-  // Ensure that we fix up branches.
-  __ FinalizeCode();
-
-  // Adjust native pc offsets in stack maps.
-  StackMapStream* stack_map_stream = GetStackMapStream();
-  for (size_t i = 0, num = stack_map_stream->GetNumberOfStackMaps(); i != num; ++i) {
-    uint32_t old_position = stack_map_stream->GetStackMapNativePcOffset(i);
-    uint32_t new_position = __ GetAdjustedPosition(old_position);
-    DCHECK_GE(new_position, old_position);
-    stack_map_stream->SetStackMapNativePcOffset(i, new_position);
-  }
-
-  // Adjust pc offsets for the disassembly information.
-  if (disasm_info_ != nullptr) {
-    GeneratedCodeInterval* frame_entry_interval = disasm_info_->GetFrameEntryInterval();
-    frame_entry_interval->start = __ GetAdjustedPosition(frame_entry_interval->start);
-    frame_entry_interval->end = __ GetAdjustedPosition(frame_entry_interval->end);
-    for (auto& it : *disasm_info_->GetInstructionIntervals()) {
-      it.second.start = __ GetAdjustedPosition(it.second.start);
-      it.second.end = __ GetAdjustedPosition(it.second.end);
-    }
-    for (auto& it : *disasm_info_->GetSlowPathIntervals()) {
-      it.code_interval.start = __ GetAdjustedPosition(it.code_interval.start);
-      it.code_interval.end = __ GetAdjustedPosition(it.code_interval.end);
-    }
-  }
-
-  CodeGenerator::Finalize(allocator);
-}
-
-Mips64Assembler* ParallelMoveResolverMIPS64::GetAssembler() const {
-  return codegen_->GetAssembler();
-}
-
-void ParallelMoveResolverMIPS64::EmitMove(size_t index) {
-  MoveOperands* move = moves_[index];
-  codegen_->MoveLocation(move->GetDestination(), move->GetSource(), move->GetType());
-}
-
-void ParallelMoveResolverMIPS64::EmitSwap(size_t index) {
-  MoveOperands* move = moves_[index];
-  codegen_->SwapLocations(move->GetDestination(), move->GetSource(), move->GetType());
-}
-
-void ParallelMoveResolverMIPS64::RestoreScratch(int reg) {
-  // Pop reg
-  __ Ld(GpuRegister(reg), SP, 0);
-  __ DecreaseFrameSize(kMips64DoublewordSize);
-}
-
-void ParallelMoveResolverMIPS64::SpillScratch(int reg) {
-  // Push reg
-  __ IncreaseFrameSize(kMips64DoublewordSize);
-  __ Sd(GpuRegister(reg), SP, 0);
-}
-
-void ParallelMoveResolverMIPS64::Exchange(int index1, int index2, bool double_slot) {
-  LoadOperandType load_type = double_slot ? kLoadDoubleword : kLoadWord;
-  StoreOperandType store_type = double_slot ? kStoreDoubleword : kStoreWord;
-  // Allocate a scratch register other than TMP, if available.
-  // Else, spill V0 (arbitrary choice) and use it as a scratch register (it will be
-  // automatically unspilled when the scratch scope object is destroyed).
-  ScratchRegisterScope ensure_scratch(this, TMP, V0, codegen_->GetNumberOfCoreRegisters());
-  // If V0 spills onto the stack, SP-relative offsets need to be adjusted.
-  int stack_offset = ensure_scratch.IsSpilled() ? kMips64DoublewordSize : 0;
-  __ LoadFromOffset(load_type,
-                    GpuRegister(ensure_scratch.GetRegister()),
-                    SP,
-                    index1 + stack_offset);
-  __ LoadFromOffset(load_type,
-                    TMP,
-                    SP,
-                    index2 + stack_offset);
-  __ StoreToOffset(store_type,
-                   GpuRegister(ensure_scratch.GetRegister()),
-                   SP,
-                   index2 + stack_offset);
-  __ StoreToOffset(store_type, TMP, SP, index1 + stack_offset);
-}
-
-void ParallelMoveResolverMIPS64::ExchangeQuadSlots(int index1, int index2) {
-  __ LoadFpuFromOffset(kLoadQuadword, FTMP, SP, index1);
-  __ LoadFpuFromOffset(kLoadQuadword, FTMP2, SP, index2);
-  __ StoreFpuToOffset(kStoreQuadword, FTMP, SP, index2);
-  __ StoreFpuToOffset(kStoreQuadword, FTMP2, SP, index1);
-}
-
-static dwarf::Reg DWARFReg(GpuRegister reg) {
-  return dwarf::Reg::Mips64Core(static_cast<int>(reg));
-}
-
-static dwarf::Reg DWARFReg(FpuRegister reg) {
-  return dwarf::Reg::Mips64Fp(static_cast<int>(reg));
-}
-
-void CodeGeneratorMIPS64::GenerateFrameEntry() {
-  __ Bind(&frame_entry_label_);
-
-  if (GetCompilerOptions().CountHotnessInCompiledCode()) {
-    __ Lhu(TMP, kMethodRegisterArgument, ArtMethod::HotnessCountOffset().Int32Value());
-    __ Addiu(TMP, TMP, 1);
-    __ Sh(TMP, kMethodRegisterArgument, ArtMethod::HotnessCountOffset().Int32Value());
-  }
-
-  bool do_overflow_check =
-      FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kMips64) || !IsLeafMethod();
-
-  if (do_overflow_check) {
-    __ LoadFromOffset(
-        kLoadWord,
-        ZERO,
-        SP,
-        -static_cast<int32_t>(GetStackOverflowReservedBytes(InstructionSet::kMips64)));
-    RecordPcInfo(nullptr, 0);
-  }
-
-  if (HasEmptyFrame()) {
-    return;
-  }
-
-  // Make sure the frame size isn't unreasonably large.
-  if (GetFrameSize() > GetStackOverflowReservedBytes(InstructionSet::kMips64)) {
-    LOG(FATAL) << "Stack frame larger than "
-        << GetStackOverflowReservedBytes(InstructionSet::kMips64) << " bytes";
-  }
-
-  // Spill callee-saved registers.
-
-  uint32_t ofs = GetFrameSize();
-  __ IncreaseFrameSize(ofs);
-
-  for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
-    GpuRegister reg = kCoreCalleeSaves[i];
-    if (allocated_registers_.ContainsCoreRegister(reg)) {
-      ofs -= kMips64DoublewordSize;
-      __ StoreToOffset(kStoreDoubleword, reg, SP, ofs);
-      __ cfi().RelOffset(DWARFReg(reg), ofs);
-    }
-  }
-
-  for (int i = arraysize(kFpuCalleeSaves) - 1; i >= 0; --i) {
-    FpuRegister reg = kFpuCalleeSaves[i];
-    if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
-      ofs -= kMips64DoublewordSize;
-      __ StoreFpuToOffset(kStoreDoubleword, reg, SP, ofs);
-      __ cfi().RelOffset(DWARFReg(reg), ofs);
-    }
-  }
-
-  // Save the current method if we need it. Note that we do not
-  // do this in HCurrentMethod, as the instruction might have been removed
-  // in the SSA graph.
-  if (RequiresCurrentMethod()) {
-    __ StoreToOffset(kStoreDoubleword, kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
-  }
-
-  if (GetGraph()->HasShouldDeoptimizeFlag()) {
-    // Initialize should_deoptimize flag to 0.
-    __ StoreToOffset(kStoreWord, ZERO, SP, GetStackOffsetOfShouldDeoptimizeFlag());
-  }
-}
-
-void CodeGeneratorMIPS64::GenerateFrameExit() {
-  __ cfi().RememberState();
-
-  if (!HasEmptyFrame()) {
-    // Restore callee-saved registers.
-
-    // For better instruction scheduling restore RA before other registers.
-    uint32_t ofs = GetFrameSize();
-    for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
-      GpuRegister reg = kCoreCalleeSaves[i];
-      if (allocated_registers_.ContainsCoreRegister(reg)) {
-        ofs -= kMips64DoublewordSize;
-        __ LoadFromOffset(kLoadDoubleword, reg, SP, ofs);
-        __ cfi().Restore(DWARFReg(reg));
-      }
-    }
-
-    for (int i = arraysize(kFpuCalleeSaves) - 1; i >= 0; --i) {
-      FpuRegister reg = kFpuCalleeSaves[i];
-      if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
-        ofs -= kMips64DoublewordSize;
-        __ LoadFpuFromOffset(kLoadDoubleword, reg, SP, ofs);
-        __ cfi().Restore(DWARFReg(reg));
-      }
-    }
-
-    __ DecreaseFrameSize(GetFrameSize());
-  }
-
-  __ Jic(RA, 0);
-
-  __ cfi().RestoreState();
-  __ cfi().DefCFAOffset(GetFrameSize());
-}
-
-void CodeGeneratorMIPS64::Bind(HBasicBlock* block) {
-  __ Bind(GetLabelOf(block));
-}
-
-void CodeGeneratorMIPS64::MoveLocation(Location destination,
-                                       Location source,
-                                       DataType::Type dst_type) {
-  if (source.Equals(destination)) {
-    return;
-  }
-
-  // A valid move can always be inferred from the destination and source
-  // locations. When moving from and to a register, the argument type can be
-  // used to generate 32bit instead of 64bit moves.
-  bool unspecified_type = (dst_type == DataType::Type::kVoid);
-  DCHECK_EQ(unspecified_type, false);
-
-  if (destination.IsRegister() || destination.IsFpuRegister()) {
-    if (unspecified_type) {
-      HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr;
-      if (source.IsStackSlot() ||
-          (src_cst != nullptr && (src_cst->IsIntConstant()
-                                  || src_cst->IsFloatConstant()
-                                  || src_cst->IsNullConstant()))) {
-        // For stack slots and 32bit constants, a 64bit type is appropriate.
-        dst_type = destination.IsRegister() ? DataType::Type::kInt32 : DataType::Type::kFloat32;
-      } else {
-        // If the source is a double stack slot or a 64bit constant, a 64bit
-        // type is appropriate. Else the source is a register, and since the
-        // type has not been specified, we chose a 64bit type to force a 64bit
-        // move.
-        dst_type = destination.IsRegister() ? DataType::Type::kInt64 : DataType::Type::kFloat64;
-      }
-    }
-    DCHECK((destination.IsFpuRegister() && DataType::IsFloatingPointType(dst_type)) ||
-           (destination.IsRegister() && !DataType::IsFloatingPointType(dst_type)));
-    if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
-      // Move to GPR/FPR from stack
-      LoadOperandType load_type = source.IsStackSlot() ? kLoadWord : kLoadDoubleword;
-      if (DataType::IsFloatingPointType(dst_type)) {
-        __ LoadFpuFromOffset(load_type,
-                             destination.AsFpuRegister<FpuRegister>(),
-                             SP,
-                             source.GetStackIndex());
-      } else {
-        // TODO: use load_type = kLoadUnsignedWord when type == DataType::Type::kReference.
-        __ LoadFromOffset(load_type,
-                          destination.AsRegister<GpuRegister>(),
-                          SP,
-                          source.GetStackIndex());
-      }
-    } else if (source.IsSIMDStackSlot()) {
-      __ LoadFpuFromOffset(kLoadQuadword,
-                           destination.AsFpuRegister<FpuRegister>(),
-                           SP,
-                           source.GetStackIndex());
-    } else if (source.IsConstant()) {
-      // Move to GPR/FPR from constant
-      GpuRegister gpr = AT;
-      if (!DataType::IsFloatingPointType(dst_type)) {
-        gpr = destination.AsRegister<GpuRegister>();
-      }
-      if (dst_type == DataType::Type::kInt32 || dst_type == DataType::Type::kFloat32) {
-        int32_t value = GetInt32ValueOf(source.GetConstant()->AsConstant());
-        if (DataType::IsFloatingPointType(dst_type) && value == 0) {
-          gpr = ZERO;
-        } else {
-          __ LoadConst32(gpr, value);
-        }
-      } else {
-        int64_t value = GetInt64ValueOf(source.GetConstant()->AsConstant());
-        if (DataType::IsFloatingPointType(dst_type) && value == 0) {
-          gpr = ZERO;
-        } else {
-          __ LoadConst64(gpr, value);
-        }
-      }
-      if (dst_type == DataType::Type::kFloat32) {
-        __ Mtc1(gpr, destination.AsFpuRegister<FpuRegister>());
-      } else if (dst_type == DataType::Type::kFloat64) {
-        __ Dmtc1(gpr, destination.AsFpuRegister<FpuRegister>());
-      }
-    } else if (source.IsRegister()) {
-      if (destination.IsRegister()) {
-        // Move to GPR from GPR
-        __ Move(destination.AsRegister<GpuRegister>(), source.AsRegister<GpuRegister>());
-      } else {
-        DCHECK(destination.IsFpuRegister());
-        if (DataType::Is64BitType(dst_type)) {
-          __ Dmtc1(source.AsRegister<GpuRegister>(), destination.AsFpuRegister<FpuRegister>());
-        } else {
-          __ Mtc1(source.AsRegister<GpuRegister>(), destination.AsFpuRegister<FpuRegister>());
-        }
-      }
-    } else if (source.IsFpuRegister()) {
-      if (destination.IsFpuRegister()) {
-        if (GetGraph()->HasSIMD()) {
-          __ MoveV(VectorRegisterFrom(destination),
-                   VectorRegisterFrom(source));
-        } else {
-          // Move to FPR from FPR
-          if (dst_type == DataType::Type::kFloat32) {
-            __ MovS(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
-          } else {
-            DCHECK_EQ(dst_type, DataType::Type::kFloat64);
-            __ MovD(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
-          }
-        }
-      } else {
-        DCHECK(destination.IsRegister());
-        if (DataType::Is64BitType(dst_type)) {
-          __ Dmfc1(destination.AsRegister<GpuRegister>(), source.AsFpuRegister<FpuRegister>());
-        } else {
-          __ Mfc1(destination.AsRegister<GpuRegister>(), source.AsFpuRegister<FpuRegister>());
-        }
-      }
-    }
-  } else if (destination.IsSIMDStackSlot()) {
-    if (source.IsFpuRegister()) {
-      __ StoreFpuToOffset(kStoreQuadword,
-                          source.AsFpuRegister<FpuRegister>(),
-                          SP,
-                          destination.GetStackIndex());
-    } else {
-      DCHECK(source.IsSIMDStackSlot());
-      __ LoadFpuFromOffset(kLoadQuadword,
-                           FTMP,
-                           SP,
-                           source.GetStackIndex());
-      __ StoreFpuToOffset(kStoreQuadword,
-                          FTMP,
-                          SP,
-                          destination.GetStackIndex());
-    }
-  } else {  // The destination is not a register. It must be a stack slot.
-    DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
-    if (source.IsRegister() || source.IsFpuRegister()) {
-      if (unspecified_type) {
-        if (source.IsRegister()) {
-          dst_type = destination.IsStackSlot() ? DataType::Type::kInt32 : DataType::Type::kInt64;
-        } else {
-          dst_type =
-              destination.IsStackSlot() ? DataType::Type::kFloat32 : DataType::Type::kFloat64;
-        }
-      }
-      DCHECK((destination.IsDoubleStackSlot() == DataType::Is64BitType(dst_type)) &&
-             (source.IsFpuRegister() == DataType::IsFloatingPointType(dst_type)));
-      // Move to stack from GPR/FPR
-      StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
-      if (source.IsRegister()) {
-        __ StoreToOffset(store_type,
-                         source.AsRegister<GpuRegister>(),
-                         SP,
-                         destination.GetStackIndex());
-      } else {
-        __ StoreFpuToOffset(store_type,
-                            source.AsFpuRegister<FpuRegister>(),
-                            SP,
-                            destination.GetStackIndex());
-      }
-    } else if (source.IsConstant()) {
-      // Move to stack from constant
-      HConstant* src_cst = source.GetConstant();
-      StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
-      GpuRegister gpr = ZERO;
-      if (destination.IsStackSlot()) {
-        int32_t value = GetInt32ValueOf(src_cst->AsConstant());
-        if (value != 0) {
-          gpr = TMP;
-          __ LoadConst32(gpr, value);
-        }
-      } else {
-        DCHECK(destination.IsDoubleStackSlot());
-        int64_t value = GetInt64ValueOf(src_cst->AsConstant());
-        if (value != 0) {
-          gpr = TMP;
-          __ LoadConst64(gpr, value);
-        }
-      }
-      __ StoreToOffset(store_type, gpr, SP, destination.GetStackIndex());
-    } else {
-      DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
-      DCHECK_EQ(source.IsDoubleStackSlot(), destination.IsDoubleStackSlot());
-      // Move to stack from stack
-      if (destination.IsStackSlot()) {
-        __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
-        __ StoreToOffset(kStoreWord, TMP, SP, destination.GetStackIndex());
-      } else {
-        __ LoadFromOffset(kLoadDoubleword, TMP, SP, source.GetStackIndex());
-        __ StoreToOffset(kStoreDoubleword, TMP, SP, destination.GetStackIndex());
-      }
-    }
-  }
-}
-
-void CodeGeneratorMIPS64::SwapLocations(Location loc1, Location loc2, DataType::Type type) {
-  DCHECK(!loc1.IsConstant());
-  DCHECK(!loc2.IsConstant());
-
-  if (loc1.Equals(loc2)) {
-    return;
-  }
-
-  bool is_slot1 = loc1.IsStackSlot() || loc1.IsDoubleStackSlot();
-  bool is_slot2 = loc2.IsStackSlot() || loc2.IsDoubleStackSlot();
-  bool is_simd1 = loc1.IsSIMDStackSlot();
-  bool is_simd2 = loc2.IsSIMDStackSlot();
-  bool is_fp_reg1 = loc1.IsFpuRegister();
-  bool is_fp_reg2 = loc2.IsFpuRegister();
-
-  if (loc2.IsRegister() && loc1.IsRegister()) {
-    // Swap 2 GPRs
-    GpuRegister r1 = loc1.AsRegister<GpuRegister>();
-    GpuRegister r2 = loc2.AsRegister<GpuRegister>();
-    __ Move(TMP, r2);
-    __ Move(r2, r1);
-    __ Move(r1, TMP);
-  } else if (is_fp_reg2 && is_fp_reg1) {
-    // Swap 2 FPRs
-    if (GetGraph()->HasSIMD()) {
-      __ MoveV(static_cast<VectorRegister>(FTMP), VectorRegisterFrom(loc1));
-      __ MoveV(VectorRegisterFrom(loc1), VectorRegisterFrom(loc2));
-      __ MoveV(VectorRegisterFrom(loc2), static_cast<VectorRegister>(FTMP));
-    } else {
-      FpuRegister r1 = loc1.AsFpuRegister<FpuRegister>();
-      FpuRegister r2 = loc2.AsFpuRegister<FpuRegister>();
-      if (type == DataType::Type::kFloat32) {
-        __ MovS(FTMP, r1);
-        __ MovS(r1, r2);
-        __ MovS(r2, FTMP);
-      } else {
-        DCHECK_EQ(type, DataType::Type::kFloat64);
-        __ MovD(FTMP, r1);
-        __ MovD(r1, r2);
-        __ MovD(r2, FTMP);
-      }
-    }
-  } else if (is_slot1 != is_slot2) {
-    // Swap GPR/FPR and stack slot
-    Location reg_loc = is_slot1 ? loc2 : loc1;
-    Location mem_loc = is_slot1 ? loc1 : loc2;
-    LoadOperandType load_type = mem_loc.IsStackSlot() ? kLoadWord : kLoadDoubleword;
-    StoreOperandType store_type = mem_loc.IsStackSlot() ? kStoreWord : kStoreDoubleword;
-    // TODO: use load_type = kLoadUnsignedWord when type == DataType::Type::kReference.
-    __ LoadFromOffset(load_type, TMP, SP, mem_loc.GetStackIndex());
-    if (reg_loc.IsFpuRegister()) {
-      __ StoreFpuToOffset(store_type,
-                          reg_loc.AsFpuRegister<FpuRegister>(),
-                          SP,
-                          mem_loc.GetStackIndex());
-      if (mem_loc.IsStackSlot()) {
-        __ Mtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
-      } else {
-        DCHECK(mem_loc.IsDoubleStackSlot());
-        __ Dmtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
-      }
-    } else {
-      __ StoreToOffset(store_type, reg_loc.AsRegister<GpuRegister>(), SP, mem_loc.GetStackIndex());
-      __ Move(reg_loc.AsRegister<GpuRegister>(), TMP);
-    }
-  } else if (is_slot1 && is_slot2) {
-    move_resolver_.Exchange(loc1.GetStackIndex(),
-                            loc2.GetStackIndex(),
-                            loc1.IsDoubleStackSlot());
-  } else if (is_simd1 && is_simd2) {
-    move_resolver_.ExchangeQuadSlots(loc1.GetStackIndex(), loc2.GetStackIndex());
-  } else if ((is_fp_reg1 && is_simd2) || (is_fp_reg2 && is_simd1)) {
-    Location fp_reg_loc = is_fp_reg1 ? loc1 : loc2;
-    Location mem_loc = is_fp_reg1 ? loc2 : loc1;
-    __ LoadFpuFromOffset(kLoadQuadword, FTMP, SP, mem_loc.GetStackIndex());
-    __ StoreFpuToOffset(kStoreQuadword,
-                        fp_reg_loc.AsFpuRegister<FpuRegister>(),
-                        SP,
-                        mem_loc.GetStackIndex());
-    __ MoveV(VectorRegisterFrom(fp_reg_loc), static_cast<VectorRegister>(FTMP));
-  } else {
-    LOG(FATAL) << "Unimplemented swap between locations " << loc1 << " and " << loc2;
-  }
-}
-
-void CodeGeneratorMIPS64::MoveConstant(Location location, int32_t value) {
-  DCHECK(location.IsRegister());
-  __ LoadConst32(location.AsRegister<GpuRegister>(), value);
-}
-
-void CodeGeneratorMIPS64::AddLocationAsTemp(Location location, LocationSummary* locations) {
-  if (location.IsRegister()) {
-    locations->AddTemp(location);
-  } else {
-    UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
-  }
-}
-
-void CodeGeneratorMIPS64::MarkGCCard(GpuRegister object,
-                                     GpuRegister value,
-                                     bool value_can_be_null) {
-  Mips64Label done;
-  GpuRegister card = AT;
-  GpuRegister temp = TMP;
-  if (value_can_be_null) {
-    __ Beqzc(value, &done);
-  }
-  // Load the address of the card table into `card`.
-  __ LoadFromOffset(kLoadDoubleword,
-                    card,
-                    TR,
-                    Thread::CardTableOffset<kMips64PointerSize>().Int32Value());
-  // Calculate the address of the card corresponding to `object`.
-  __ Dsrl(temp, object, gc::accounting::CardTable::kCardShift);
-  __ Daddu(temp, card, temp);
-  // Write the `art::gc::accounting::CardTable::kCardDirty` value into the
-  // `object`'s card.
-  //
-  // Register `card` contains the address of the card table. Note that the card
-  // table's base is biased during its creation so that it always starts at an
-  // address whose least-significant byte is equal to `kCardDirty` (see
-  // art::gc::accounting::CardTable::Create). Therefore the SB instruction
-  // below writes the `kCardDirty` (byte) value into the `object`'s card
-  // (located at `card + object >> kCardShift`).
-  //
-  // This dual use of the value in register `card` (1. to calculate the location
-  // of the card to mark; and 2. to load the `kCardDirty` value) saves a load
-  // (no need to explicitly load `kCardDirty` as an immediate value).
-  __ Sb(card, temp, 0);
-  if (value_can_be_null) {
-    __ Bind(&done);
-  }
-}
-
-template <linker::LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
-inline void CodeGeneratorMIPS64::EmitPcRelativeLinkerPatches(
-    const ArenaDeque<PcRelativePatchInfo>& infos,
-    ArenaVector<linker::LinkerPatch>* linker_patches) {
-  for (const PcRelativePatchInfo& info : infos) {
-    const DexFile* dex_file = info.target_dex_file;
-    size_t offset_or_index = info.offset_or_index;
-    DCHECK(info.label.IsBound());
-    uint32_t literal_offset = __ GetLabelLocation(&info.label);
-    const PcRelativePatchInfo& info_high = info.patch_info_high ? *info.patch_info_high : info;
-    uint32_t pc_rel_offset = __ GetLabelLocation(&info_high.label);
-    linker_patches->push_back(Factory(literal_offset, dex_file, pc_rel_offset, offset_or_index));
-  }
-}
-
-template <linker::LinkerPatch (*Factory)(size_t, uint32_t, uint32_t)>
-linker::LinkerPatch NoDexFileAdapter(size_t literal_offset,
-                                     const DexFile* target_dex_file,
-                                     uint32_t pc_insn_offset,
-                                     uint32_t boot_image_offset) {
-  DCHECK(target_dex_file == nullptr);  // Unused for these patches, should be null.
-  return Factory(literal_offset, pc_insn_offset, boot_image_offset);
-}
-
-void CodeGeneratorMIPS64::EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) {
-  DCHECK(linker_patches->empty());
-  size_t size =
-      boot_image_method_patches_.size() +
-      method_bss_entry_patches_.size() +
-      boot_image_type_patches_.size() +
-      type_bss_entry_patches_.size() +
-      boot_image_string_patches_.size() +
-      string_bss_entry_patches_.size() +
-      boot_image_intrinsic_patches_.size();
-  linker_patches->reserve(size);
-  if (GetCompilerOptions().IsBootImage()) {
-    EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeMethodPatch>(
-        boot_image_method_patches_, linker_patches);
-    EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeTypePatch>(
-        boot_image_type_patches_, linker_patches);
-    EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeStringPatch>(
-        boot_image_string_patches_, linker_patches);
-    EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::IntrinsicReferencePatch>>(
-        boot_image_intrinsic_patches_, linker_patches);
-  } else {
-    EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::DataBimgRelRoPatch>>(
-        boot_image_method_patches_, linker_patches);
-    DCHECK(boot_image_type_patches_.empty());
-    DCHECK(boot_image_string_patches_.empty());
-    DCHECK(boot_image_intrinsic_patches_.empty());
-  }
-  EmitPcRelativeLinkerPatches<linker::LinkerPatch::MethodBssEntryPatch>(
-      method_bss_entry_patches_, linker_patches);
-  EmitPcRelativeLinkerPatches<linker::LinkerPatch::TypeBssEntryPatch>(
-      type_bss_entry_patches_, linker_patches);
-  EmitPcRelativeLinkerPatches<linker::LinkerPatch::StringBssEntryPatch>(
-      string_bss_entry_patches_, linker_patches);
-  DCHECK_EQ(size, linker_patches->size());
-}
-
-CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewBootImageIntrinsicPatch(
-    uint32_t intrinsic_data,
-    const PcRelativePatchInfo* info_high) {
-  return NewPcRelativePatch(
-      /* dex_file= */ nullptr, intrinsic_data, info_high, &boot_image_intrinsic_patches_);
-}
-
-CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewBootImageRelRoPatch(
-    uint32_t boot_image_offset,
-    const PcRelativePatchInfo* info_high) {
-  return NewPcRelativePatch(
-      /* dex_file= */ nullptr, boot_image_offset, info_high, &boot_image_method_patches_);
-}
-
-CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewBootImageMethodPatch(
-    MethodReference target_method,
-    const PcRelativePatchInfo* info_high) {
-  return NewPcRelativePatch(
-      target_method.dex_file, target_method.index, info_high, &boot_image_method_patches_);
-}
-
-CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewMethodBssEntryPatch(
-    MethodReference target_method,
-    const PcRelativePatchInfo* info_high) {
-  return NewPcRelativePatch(
-      target_method.dex_file, target_method.index, info_high, &method_bss_entry_patches_);
-}
-
-CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewBootImageTypePatch(
-    const DexFile& dex_file,
-    dex::TypeIndex type_index,
-    const PcRelativePatchInfo* info_high) {
-  return NewPcRelativePatch(&dex_file, type_index.index_, info_high, &boot_image_type_patches_);
-}
-
-CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewTypeBssEntryPatch(
-    const DexFile& dex_file,
-    dex::TypeIndex type_index,
-    const PcRelativePatchInfo* info_high) {
-  return NewPcRelativePatch(&dex_file, type_index.index_, info_high, &type_bss_entry_patches_);
-}
-
-CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewBootImageStringPatch(
-    const DexFile& dex_file,
-    dex::StringIndex string_index,
-    const PcRelativePatchInfo* info_high) {
-  return NewPcRelativePatch(
-      &dex_file, string_index.index_, info_high, &boot_image_string_patches_);
-}
-
-CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewStringBssEntryPatch(
-    const DexFile& dex_file,
-    dex::StringIndex string_index,
-    const PcRelativePatchInfo* info_high) {
-  return NewPcRelativePatch(&dex_file, string_index.index_, info_high, &string_bss_entry_patches_);
-}
-
-CodeGeneratorMIPS64::PcRelativePatchInfo* CodeGeneratorMIPS64::NewPcRelativePatch(
-    const DexFile* dex_file,
-    uint32_t offset_or_index,
-    const PcRelativePatchInfo* info_high,
-    ArenaDeque<PcRelativePatchInfo>* patches) {
-  patches->emplace_back(dex_file, offset_or_index, info_high);
-  return &patches->back();
-}
-
-Literal* CodeGeneratorMIPS64::DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map) {
-  return map->GetOrCreate(
-      value,
-      [this, value]() { return __ NewLiteral<uint32_t>(value); });
-}
-
-Literal* CodeGeneratorMIPS64::DeduplicateUint64Literal(uint64_t value) {
-  return uint64_literals_.GetOrCreate(
-      value,
-      [this, value]() { return __ NewLiteral<uint64_t>(value); });
-}
-
-Literal* CodeGeneratorMIPS64::DeduplicateBootImageAddressLiteral(uint64_t address) {
-  return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), &uint32_literals_);
-}
-
-void CodeGeneratorMIPS64::EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info_high,
-                                                               GpuRegister out,
-                                                               PcRelativePatchInfo* info_low) {
-  DCHECK(!info_high->patch_info_high);
-  __ Bind(&info_high->label);
-  // Add the high half of a 32-bit offset to PC.
-  __ Auipc(out, /* imm16= */ 0x1234);
-  // A following instruction will add the sign-extended low half of the 32-bit
-  // offset to `out` (e.g. ld, jialc, daddiu).
-  if (info_low != nullptr) {
-    DCHECK_EQ(info_low->patch_info_high, info_high);
-    __ Bind(&info_low->label);
-  }
-}
-
-void CodeGeneratorMIPS64::LoadBootImageAddress(GpuRegister reg, uint32_t boot_image_reference) {
-  if (GetCompilerOptions().IsBootImage()) {
-    PcRelativePatchInfo* info_high = NewBootImageIntrinsicPatch(boot_image_reference);
-    PcRelativePatchInfo* info_low = NewBootImageIntrinsicPatch(boot_image_reference, info_high);
-    EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
-    __ Daddiu(reg, AT, /* imm16= */ 0x5678);
-  } else if (GetCompilerOptions().GetCompilePic()) {
-    PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_reference);
-    PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_reference, info_high);
-    EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
-    // Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
-    __ Lwu(reg, AT, /* imm16= */ 0x5678);
-  } else {
-    DCHECK(Runtime::Current()->UseJitCompilation());
-    gc::Heap* heap = Runtime::Current()->GetHeap();
-    DCHECK(!heap->GetBootImageSpaces().empty());
-    uintptr_t address =
-        reinterpret_cast<uintptr_t>(heap->GetBootImageSpaces()[0]->Begin() + boot_image_reference);
-    __ LoadLiteral(reg, kLoadDoubleword, DeduplicateBootImageAddressLiteral(address));
-  }
-}
-
-void CodeGeneratorMIPS64::AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke,
-                                                       uint32_t boot_image_offset) {
-  DCHECK(invoke->IsStatic());
-  InvokeRuntimeCallingConvention calling_convention;
-  GpuRegister argument = calling_convention.GetRegisterAt(0);
-  if (GetCompilerOptions().IsBootImage()) {
-    DCHECK_EQ(boot_image_offset, IntrinsicVisitor::IntegerValueOfInfo::kInvalidReference);
-    // Load the class the same way as for HLoadClass::LoadKind::kBootImageLinkTimePcRelative.
-    MethodReference target_method = invoke->GetTargetMethod();
-    dex::TypeIndex type_idx = target_method.dex_file->GetMethodId(target_method.index).class_idx_;
-    PcRelativePatchInfo* info_high = NewBootImageTypePatch(*target_method.dex_file, type_idx);
-    PcRelativePatchInfo* info_low =
-        NewBootImageTypePatch(*target_method.dex_file, type_idx, info_high);
-    EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
-    __ Daddiu(argument, AT, /* imm16= */ 0x5678);
-  } else {
-    LoadBootImageAddress(argument, boot_image_offset);
-  }
-  InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc());
-  CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
-}
-
-Literal* CodeGeneratorMIPS64::DeduplicateJitStringLiteral(const DexFile& dex_file,
-                                                          dex::StringIndex string_index,
-                                                          Handle<mirror::String> handle) {
-  ReserveJitStringRoot(StringReference(&dex_file, string_index), handle);
-  return jit_string_patches_.GetOrCreate(
-      StringReference(&dex_file, string_index),
-      [this]() { return __ NewLiteral<uint32_t>(/* value= */ 0u); });
-}
-
-Literal* CodeGeneratorMIPS64::DeduplicateJitClassLiteral(const DexFile& dex_file,
-                                                         dex::TypeIndex type_index,
-                                                         Handle<mirror::Class> handle) {
-  ReserveJitClassRoot(TypeReference(&dex_file, type_index), handle);
-  return jit_class_patches_.GetOrCreate(
-      TypeReference(&dex_file, type_index),
-      [this]() { return __ NewLiteral<uint32_t>(/* value= */ 0u); });
-}
-
-void CodeGeneratorMIPS64::PatchJitRootUse(uint8_t* code,
-                                          const uint8_t* roots_data,
-                                          const Literal* literal,
-                                          uint64_t index_in_table) const {
-  uint32_t literal_offset = GetAssembler().GetLabelLocation(literal->GetLabel());
-  uintptr_t address =
-      reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>);
-  reinterpret_cast<uint32_t*>(code + literal_offset)[0] = dchecked_integral_cast<uint32_t>(address);
-}
-
-void CodeGeneratorMIPS64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) {
-  for (const auto& entry : jit_string_patches_) {
-    const StringReference& string_reference = entry.first;
-    Literal* table_entry_literal = entry.second;
-    uint64_t index_in_table = GetJitStringRootIndex(string_reference);
-    PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table);
-  }
-  for (const auto& entry : jit_class_patches_) {
-    const TypeReference& type_reference = entry.first;
-    Literal* table_entry_literal = entry.second;
-    uint64_t index_in_table = GetJitClassRootIndex(type_reference);
-    PatchJitRootUse(code, roots_data, table_entry_literal, index_in_table);
-  }
-}
-
-void CodeGeneratorMIPS64::SetupBlockedRegisters() const {
-  // ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
-  blocked_core_registers_[ZERO] = true;
-  blocked_core_registers_[K0] = true;
-  blocked_core_registers_[K1] = true;
-  blocked_core_registers_[GP] = true;
-  blocked_core_registers_[SP] = true;
-  blocked_core_registers_[RA] = true;
-
-  // AT, TMP(T8) and TMP2(T3) are used as temporary/scratch
-  // registers (similar to how AT is used by MIPS assemblers).
-  blocked_core_registers_[AT] = true;
-  blocked_core_registers_[TMP] = true;
-  blocked_core_registers_[TMP2] = true;
-  blocked_fpu_registers_[FTMP] = true;
-
-  if (GetInstructionSetFeatures().HasMsa()) {
-    // To be used just for MSA instructions.
-    blocked_fpu_registers_[FTMP2] = true;
-  }
-
-  // Reserve suspend and thread registers.
-  blocked_core_registers_[S0] = true;
-  blocked_core_registers_[TR] = true;
-
-  // Reserve T9 for function calls
-  blocked_core_registers_[T9] = true;
-
-  if (GetGraph()->IsDebuggable()) {
-    // Stubs do not save callee-save floating point registers. If the graph
-    // is debuggable, we need to deal with these registers differently. For
-    // now, just block them.
-    for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
-      blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
-    }
-  }
-}
-
-size_t CodeGeneratorMIPS64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
-  __ StoreToOffset(kStoreDoubleword, GpuRegister(reg_id), SP, stack_index);
-  return kMips64DoublewordSize;
-}
-
-size_t CodeGeneratorMIPS64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
-  __ LoadFromOffset(kLoadDoubleword, GpuRegister(reg_id), SP, stack_index);
-  return kMips64DoublewordSize;
-}
-
-size_t CodeGeneratorMIPS64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
-  __ StoreFpuToOffset(GetGraph()->HasSIMD() ? kStoreQuadword : kStoreDoubleword,
-                      FpuRegister(reg_id),
-                      SP,
-                      stack_index);
-  return GetFloatingPointSpillSlotSize();
-}
-
-size_t CodeGeneratorMIPS64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
-  __ LoadFpuFromOffset(GetGraph()->HasSIMD() ? kLoadQuadword : kLoadDoubleword,
-                       FpuRegister(reg_id),
-                       SP,
-                       stack_index);
-  return GetFloatingPointSpillSlotSize();
-}
-
-void CodeGeneratorMIPS64::DumpCoreRegister(std::ostream& stream, int reg) const {
-  stream << GpuRegister(reg);
-}
-
-void CodeGeneratorMIPS64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
-  stream << FpuRegister(reg);
-}
-
-const Mips64InstructionSetFeatures& CodeGeneratorMIPS64::GetInstructionSetFeatures() const {
-  return *GetCompilerOptions().GetInstructionSetFeatures()->AsMips64InstructionSetFeatures();
-}
-
-void CodeGeneratorMIPS64::InvokeRuntime(QuickEntrypointEnum entrypoint,
-                                        HInstruction* instruction,
-                                        uint32_t dex_pc,
-                                        SlowPathCode* slow_path) {
-  ValidateInvokeRuntime(entrypoint, instruction, slow_path);
-  GenerateInvokeRuntime(GetThreadOffset<kMips64PointerSize>(entrypoint).Int32Value());
-  if (EntrypointRequiresStackMap(entrypoint)) {
-    RecordPcInfo(instruction, dex_pc, slow_path);
-  }
-}
-
-void CodeGeneratorMIPS64::InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
-                                                              HInstruction* instruction,
-                                                              SlowPathCode* slow_path) {
-  ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction, slow_path);
-  GenerateInvokeRuntime(entry_point_offset);
-}
-
-void CodeGeneratorMIPS64::GenerateInvokeRuntime(int32_t entry_point_offset) {
-  __ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
-  __ Jalr(T9);
-  __ Nop();
-}
-
-void InstructionCodeGeneratorMIPS64::GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path,
-                                                                      GpuRegister class_reg) {
-  constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
-  const size_t status_byte_offset =
-      mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
-  constexpr uint32_t shifted_initialized_value =
-      enum_cast<uint32_t>(ClassStatus::kInitialized) << (status_lsb_position % kBitsPerByte);
-
-  __ LoadFromOffset(kLoadUnsignedByte, TMP, class_reg, status_byte_offset);
-  __ Sltiu(TMP, TMP, shifted_initialized_value);
-  __ Bnezc(TMP, slow_path->GetEntryLabel());
-  // Even if the initialized flag is set, we need to ensure consistent memory ordering.
-  __ Sync(0);
-  __ Bind(slow_path->GetExitLabel());
-}
-
-void InstructionCodeGeneratorMIPS64::GenerateBitstringTypeCheckCompare(HTypeCheckInstruction* check,
-                                                                       GpuRegister temp) {
-  uint32_t path_to_root = check->GetBitstringPathToRoot();
-  uint32_t mask = check->GetBitstringMask();
-  DCHECK(IsPowerOfTwo(mask + 1));
-  size_t mask_bits = WhichPowerOf2(mask + 1);
-
-  if (mask_bits == 16u) {
-    // Load only the bitstring part of the status word.
-    __ LoadFromOffset(
-        kLoadUnsignedHalfword, temp, temp, mirror::Class::StatusOffset().Int32Value());
-    // Compare the bitstring bits using XOR.
-    __ Xori(temp, temp, dchecked_integral_cast<uint16_t>(path_to_root));
-  } else {
-    // /* uint32_t */ temp = temp->status_
-    __ LoadFromOffset(kLoadWord, temp, temp, mirror::Class::StatusOffset().Int32Value());
-    // Compare the bitstring bits using XOR.
-    if (IsUint<16>(path_to_root)) {
-      __ Xori(temp, temp, dchecked_integral_cast<uint16_t>(path_to_root));
-    } else {
-      __ LoadConst32(TMP, path_to_root);
-      __ Xor(temp, temp, TMP);
-    }
-    // Shift out bits that do not contribute to the comparison.
-    __ Sll(temp, temp, 32 - mask_bits);
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::GenerateMemoryBarrier(MemBarrierKind kind ATTRIBUTE_UNUSED) {
-  __ Sync(0);  // only stype 0 is supported
-}
-
-void InstructionCodeGeneratorMIPS64::GenerateSuspendCheck(HSuspendCheck* instruction,
-                                                          HBasicBlock* successor) {
-  SuspendCheckSlowPathMIPS64* slow_path =
-      down_cast<SuspendCheckSlowPathMIPS64*>(instruction->GetSlowPath());
-
-  if (slow_path == nullptr) {
-    slow_path =
-        new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathMIPS64(instruction, successor);
-    instruction->SetSlowPath(slow_path);
-    codegen_->AddSlowPath(slow_path);
-    if (successor != nullptr) {
-      DCHECK(successor->IsLoopHeader());
-    }
-  } else {
-    DCHECK_EQ(slow_path->GetSuccessor(), successor);
-  }
-
-  __ LoadFromOffset(kLoadUnsignedHalfword,
-                    TMP,
-                    TR,
-                    Thread::ThreadFlagsOffset<kMips64PointerSize>().Int32Value());
-  if (successor == nullptr) {
-    __ Bnezc(TMP, slow_path->GetEntryLabel());
-    __ Bind(slow_path->GetReturnLabel());
-  } else {
-    __ Beqzc(TMP, codegen_->GetLabelOf(successor));
-    __ Bc(slow_path->GetEntryLabel());
-    // slow_path will return to GetLabelOf(successor).
-  }
-}
-
-InstructionCodeGeneratorMIPS64::InstructionCodeGeneratorMIPS64(HGraph* graph,
-                                                               CodeGeneratorMIPS64* codegen)
-      : InstructionCodeGenerator(graph, codegen),
-        assembler_(codegen->GetAssembler()),
-        codegen_(codegen) {}
-
-void LocationsBuilderMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
-  DCHECK_EQ(instruction->InputCount(), 2U);
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
-  DataType::Type type = instruction->GetResultType();
-  switch (type) {
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64: {
-      locations->SetInAt(0, Location::RequiresRegister());
-      HInstruction* right = instruction->InputAt(1);
-      bool can_use_imm = false;
-      if (right->IsConstant()) {
-        int64_t imm = CodeGenerator::GetInt64ValueOf(right->AsConstant());
-        if (instruction->IsAnd() || instruction->IsOr() || instruction->IsXor()) {
-          can_use_imm = IsUint<16>(imm);
-        } else {
-          DCHECK(instruction->IsAdd() || instruction->IsSub());
-          bool single_use = right->GetUses().HasExactlyOneElement();
-          if (instruction->IsSub()) {
-            if (!(type == DataType::Type::kInt32 && imm == INT32_MIN)) {
-              imm = -imm;
-            }
-          }
-          if (type == DataType::Type::kInt32) {
-            can_use_imm = IsInt<16>(imm) || (Low16Bits(imm) == 0) || single_use;
-          } else {
-            can_use_imm = IsInt<16>(imm) || (IsInt<32>(imm) && (Low16Bits(imm) == 0)) || single_use;
-          }
-        }
-      }
-      if (can_use_imm)
-        locations->SetInAt(1, Location::ConstantLocation(right->AsConstant()));
-      else
-        locations->SetInAt(1, Location::RequiresRegister());
-      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-      }
-      break;
-
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetInAt(1, Location::RequiresFpuRegister());
-      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
-      break;
-
-    default:
-      LOG(FATAL) << "Unexpected " << instruction->DebugName() << " type " << type;
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
-  DataType::Type type = instruction->GetType();
-  LocationSummary* locations = instruction->GetLocations();
-
-  switch (type) {
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64: {
-      GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
-      GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
-      Location rhs_location = locations->InAt(1);
-
-      GpuRegister rhs_reg = ZERO;
-      int64_t rhs_imm = 0;
-      bool use_imm = rhs_location.IsConstant();
-      if (use_imm) {
-        rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
-      } else {
-        rhs_reg = rhs_location.AsRegister<GpuRegister>();
-      }
-
-      if (instruction->IsAnd()) {
-        if (use_imm)
-          __ Andi(dst, lhs, rhs_imm);
-        else
-          __ And(dst, lhs, rhs_reg);
-      } else if (instruction->IsOr()) {
-        if (use_imm)
-          __ Ori(dst, lhs, rhs_imm);
-        else
-          __ Or(dst, lhs, rhs_reg);
-      } else if (instruction->IsXor()) {
-        if (use_imm)
-          __ Xori(dst, lhs, rhs_imm);
-        else
-          __ Xor(dst, lhs, rhs_reg);
-      } else if (instruction->IsAdd() || instruction->IsSub()) {
-        if (instruction->IsSub()) {
-          rhs_imm = -rhs_imm;
-        }
-        if (type == DataType::Type::kInt32) {
-          if (use_imm) {
-            if (IsInt<16>(rhs_imm)) {
-              __ Addiu(dst, lhs, rhs_imm);
-            } else {
-              int16_t rhs_imm_high = High16Bits(rhs_imm);
-              int16_t rhs_imm_low = Low16Bits(rhs_imm);
-              if (rhs_imm_low < 0) {
-                rhs_imm_high += 1;
-              }
-              __ Aui(dst, lhs, rhs_imm_high);
-              if (rhs_imm_low != 0) {
-                __ Addiu(dst, dst, rhs_imm_low);
-              }
-            }
-          } else {
-            if (instruction->IsAdd()) {
-              __ Addu(dst, lhs, rhs_reg);
-            } else {
-              DCHECK(instruction->IsSub());
-              __ Subu(dst, lhs, rhs_reg);
-            }
-          }
-        } else {
-          if (use_imm) {
-            if (IsInt<16>(rhs_imm)) {
-              __ Daddiu(dst, lhs, rhs_imm);
-            } else if (IsInt<32>(rhs_imm)) {
-              int16_t rhs_imm_high = High16Bits(rhs_imm);
-              int16_t rhs_imm_low = Low16Bits(rhs_imm);
-              bool overflow_hi16 = false;
-              if (rhs_imm_low < 0) {
-                rhs_imm_high += 1;
-                overflow_hi16 = (rhs_imm_high == -32768);
-              }
-              __ Daui(dst, lhs, rhs_imm_high);
-              if (rhs_imm_low != 0) {
-                __ Daddiu(dst, dst, rhs_imm_low);
-              }
-              if (overflow_hi16) {
-                __ Dahi(dst, 1);
-              }
-            } else {
-              int16_t rhs_imm_low = Low16Bits(Low32Bits(rhs_imm));
-              if (rhs_imm_low < 0) {
-                rhs_imm += (INT64_C(1) << 16);
-              }
-              int16_t rhs_imm_upper = High16Bits(Low32Bits(rhs_imm));
-              if (rhs_imm_upper < 0) {
-                rhs_imm += (INT64_C(1) << 32);
-              }
-              int16_t rhs_imm_high = Low16Bits(High32Bits(rhs_imm));
-              if (rhs_imm_high < 0) {
-                rhs_imm += (INT64_C(1) << 48);
-              }
-              int16_t rhs_imm_top = High16Bits(High32Bits(rhs_imm));
-              GpuRegister tmp = lhs;
-              if (rhs_imm_low != 0) {
-                __ Daddiu(dst, tmp, rhs_imm_low);
-                tmp = dst;
-              }
-              // Dahi and Dati must use the same input and output register, so we have to initialize
-              // the dst register using Daddiu or Daui, even when the intermediate value is zero:
-              // Daui(dst, lhs, 0).
-              if ((rhs_imm_upper != 0) || (rhs_imm_low == 0)) {
-                __ Daui(dst, tmp, rhs_imm_upper);
-              }
-              if (rhs_imm_high != 0) {
-                __ Dahi(dst, rhs_imm_high);
-              }
-              if (rhs_imm_top != 0) {
-                __ Dati(dst, rhs_imm_top);
-              }
-            }
-          } else if (instruction->IsAdd()) {
-            __ Daddu(dst, lhs, rhs_reg);
-          } else {
-            DCHECK(instruction->IsSub());
-            __ Dsubu(dst, lhs, rhs_reg);
-          }
-        }
-      }
-      break;
-    }
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64: {
-      FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
-      FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
-      FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
-      if (instruction->IsAdd()) {
-        if (type == DataType::Type::kFloat32)
-          __ AddS(dst, lhs, rhs);
-        else
-          __ AddD(dst, lhs, rhs);
-      } else if (instruction->IsSub()) {
-        if (type == DataType::Type::kFloat32)
-          __ SubS(dst, lhs, rhs);
-        else
-          __ SubD(dst, lhs, rhs);
-      } else {
-        LOG(FATAL) << "Unexpected floating-point binary operation";
-      }
-      break;
-    }
-    default:
-      LOG(FATAL) << "Unexpected binary operation type " << type;
-  }
-}
-
-void LocationsBuilderMIPS64::HandleShift(HBinaryOperation* instr) {
-  DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
-
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instr);
-  DataType::Type type = instr->GetResultType();
-  switch (type) {
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64: {
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
-      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-      break;
-    }
-    default:
-      LOG(FATAL) << "Unexpected shift type " << type;
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) {
-  DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr() || instr->IsRor());
-  LocationSummary* locations = instr->GetLocations();
-  DataType::Type type = instr->GetType();
-
-  switch (type) {
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64: {
-      GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
-      GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
-      Location rhs_location = locations->InAt(1);
-
-      GpuRegister rhs_reg = ZERO;
-      int64_t rhs_imm = 0;
-      bool use_imm = rhs_location.IsConstant();
-      if (use_imm) {
-        rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
-      } else {
-        rhs_reg = rhs_location.AsRegister<GpuRegister>();
-      }
-
-      if (use_imm) {
-        uint32_t shift_value = rhs_imm &
-            (type == DataType::Type::kInt32 ? kMaxIntShiftDistance : kMaxLongShiftDistance);
-
-        if (shift_value == 0) {
-          if (dst != lhs) {
-            __ Move(dst, lhs);
-          }
-        } else if (type == DataType::Type::kInt32) {
-          if (instr->IsShl()) {
-            __ Sll(dst, lhs, shift_value);
-          } else if (instr->IsShr()) {
-            __ Sra(dst, lhs, shift_value);
-          } else if (instr->IsUShr()) {
-            __ Srl(dst, lhs, shift_value);
-          } else {
-            __ Rotr(dst, lhs, shift_value);
-          }
-        } else {
-          if (shift_value < 32) {
-            if (instr->IsShl()) {
-              __ Dsll(dst, lhs, shift_value);
-            } else if (instr->IsShr()) {
-              __ Dsra(dst, lhs, shift_value);
-            } else if (instr->IsUShr()) {
-              __ Dsrl(dst, lhs, shift_value);
-            } else {
-              __ Drotr(dst, lhs, shift_value);
-            }
-          } else {
-            shift_value -= 32;
-            if (instr->IsShl()) {
-              __ Dsll32(dst, lhs, shift_value);
-            } else if (instr->IsShr()) {
-              __ Dsra32(dst, lhs, shift_value);
-            } else if (instr->IsUShr()) {
-              __ Dsrl32(dst, lhs, shift_value);
-            } else {
-              __ Drotr32(dst, lhs, shift_value);
-            }
-          }
-        }
-      } else {
-        if (type == DataType::Type::kInt32) {
-          if (instr->IsShl()) {
-            __ Sllv(dst, lhs, rhs_reg);
-          } else if (instr->IsShr()) {
-            __ Srav(dst, lhs, rhs_reg);
-          } else if (instr->IsUShr()) {
-            __ Srlv(dst, lhs, rhs_reg);
-          } else {
-            __ Rotrv(dst, lhs, rhs_reg);
-          }
-        } else {
-          if (instr->IsShl()) {
-            __ Dsllv(dst, lhs, rhs_reg);
-          } else if (instr->IsShr()) {
-            __ Dsrav(dst, lhs, rhs_reg);
-          } else if (instr->IsUShr()) {
-            __ Dsrlv(dst, lhs, rhs_reg);
-          } else {
-            __ Drotrv(dst, lhs, rhs_reg);
-          }
-        }
-      }
-      break;
-    }
-    default:
-      LOG(FATAL) << "Unexpected shift operation type " << type;
-  }
-}
-
-void LocationsBuilderMIPS64::VisitAdd(HAdd* instruction) {
-  HandleBinaryOp(instruction);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitAdd(HAdd* instruction) {
-  HandleBinaryOp(instruction);
-}
-
-void LocationsBuilderMIPS64::VisitAnd(HAnd* instruction) {
-  HandleBinaryOp(instruction);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitAnd(HAnd* instruction) {
-  HandleBinaryOp(instruction);
-}
-
-void LocationsBuilderMIPS64::VisitArrayGet(HArrayGet* instruction) {
-  DataType::Type type = instruction->GetType();
-  bool object_array_get_with_read_barrier =
-      kEmitCompilerReadBarrier && (type == DataType::Type::kReference);
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(instruction,
-                                                       object_array_get_with_read_barrier
-                                                           ? LocationSummary::kCallOnSlowPath
-                                                           : LocationSummary::kNoCall);
-  if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
-    locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
-  }
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
-  if (DataType::IsFloatingPointType(type)) {
-    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
-  } else {
-    // The output overlaps in the case of an object array get with
-    // read barriers enabled: we do not want the move to overwrite the
-    // array's location, as we need it to emit the read barrier.
-    locations->SetOut(Location::RequiresRegister(),
-                      object_array_get_with_read_barrier
-                          ? Location::kOutputOverlap
-                          : Location::kNoOutputOverlap);
-  }
-  // We need a temporary register for the read barrier marking slow
-  // path in CodeGeneratorMIPS64::GenerateArrayLoadWithBakerReadBarrier.
-  if (object_array_get_with_read_barrier && kUseBakerReadBarrier) {
-    bool temp_needed = instruction->GetIndex()->IsConstant()
-        ? !kBakerReadBarrierThunksEnableForFields
-        : !kBakerReadBarrierThunksEnableForArrays;
-    if (temp_needed) {
-      locations->AddTemp(Location::RequiresRegister());
-    }
-  }
-}
-
-static auto GetImplicitNullChecker(HInstruction* instruction, CodeGeneratorMIPS64* codegen) {
-  auto null_checker = [codegen, instruction]() {
-    codegen->MaybeRecordImplicitNullCheck(instruction);
-  };
-  return null_checker;
-}
-
-void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  Location obj_loc = locations->InAt(0);
-  GpuRegister obj = obj_loc.AsRegister<GpuRegister>();
-  Location out_loc = locations->Out();
-  Location index = locations->InAt(1);
-  uint32_t data_offset = CodeGenerator::GetArrayDataOffset(instruction);
-  auto null_checker = GetImplicitNullChecker(instruction, codegen_);
-
-  DataType::Type type = instruction->GetType();
-  const bool maybe_compressed_char_at = mirror::kUseStringCompression &&
-                                        instruction->IsStringCharAt();
-  switch (type) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8: {
-      GpuRegister out = out_loc.AsRegister<GpuRegister>();
-      if (index.IsConstant()) {
-        size_t offset =
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
-        __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset, null_checker);
-      } else {
-        __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
-        __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset, null_checker);
-      }
-      break;
-    }
-
-    case DataType::Type::kInt8: {
-      GpuRegister out = out_loc.AsRegister<GpuRegister>();
-      if (index.IsConstant()) {
-        size_t offset =
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
-        __ LoadFromOffset(kLoadSignedByte, out, obj, offset, null_checker);
-      } else {
-        __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
-        __ LoadFromOffset(kLoadSignedByte, out, TMP, data_offset, null_checker);
-      }
-      break;
-    }
-
-    case DataType::Type::kUint16: {
-      GpuRegister out = out_loc.AsRegister<GpuRegister>();
-      if (maybe_compressed_char_at) {
-        uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
-        __ LoadFromOffset(kLoadWord, TMP, obj, count_offset, null_checker);
-        __ Dext(TMP, TMP, 0, 1);
-        static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
-                      "Expecting 0=compressed, 1=uncompressed");
-      }
-      if (index.IsConstant()) {
-        int32_t const_index = index.GetConstant()->AsIntConstant()->GetValue();
-        if (maybe_compressed_char_at) {
-          Mips64Label uncompressed_load, done;
-          __ Bnezc(TMP, &uncompressed_load);
-          __ LoadFromOffset(kLoadUnsignedByte,
-                            out,
-                            obj,
-                            data_offset + (const_index << TIMES_1));
-          __ Bc(&done);
-          __ Bind(&uncompressed_load);
-          __ LoadFromOffset(kLoadUnsignedHalfword,
-                            out,
-                            obj,
-                            data_offset + (const_index << TIMES_2));
-          __ Bind(&done);
-        } else {
-          __ LoadFromOffset(kLoadUnsignedHalfword,
-                            out,
-                            obj,
-                            data_offset + (const_index << TIMES_2),
-                            null_checker);
-        }
-      } else {
-        GpuRegister index_reg = index.AsRegister<GpuRegister>();
-        if (maybe_compressed_char_at) {
-          Mips64Label uncompressed_load, done;
-          __ Bnezc(TMP, &uncompressed_load);
-          __ Daddu(TMP, obj, index_reg);
-          __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
-          __ Bc(&done);
-          __ Bind(&uncompressed_load);
-          __ Dlsa(TMP, index_reg, obj, TIMES_2);
-          __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
-          __ Bind(&done);
-        } else {
-          __ Dlsa(TMP, index_reg, obj, TIMES_2);
-          __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset, null_checker);
-        }
-      }
-      break;
-    }
-
-    case DataType::Type::kInt16: {
-      GpuRegister out = out_loc.AsRegister<GpuRegister>();
-      if (index.IsConstant()) {
-        size_t offset =
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
-        __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset, null_checker);
-      } else {
-        __ Dlsa(TMP, index.AsRegister<GpuRegister>(), obj, TIMES_2);
-        __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset, null_checker);
-      }
-      break;
-    }
-
-    case DataType::Type::kInt32: {
-      DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
-      GpuRegister out = out_loc.AsRegister<GpuRegister>();
-      LoadOperandType load_type =
-          (type == DataType::Type::kReference) ? kLoadUnsignedWord : kLoadWord;
-      if (index.IsConstant()) {
-        size_t offset =
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
-        __ LoadFromOffset(load_type, out, obj, offset, null_checker);
-      } else {
-        __ Dlsa(TMP, index.AsRegister<GpuRegister>(), obj, TIMES_4);
-        __ LoadFromOffset(load_type, out, TMP, data_offset, null_checker);
-      }
-      break;
-    }
-
-    case DataType::Type::kReference: {
-      static_assert(
-          sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
-          "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
-      // /* HeapReference<Object> */ out =
-      //     *(obj + data_offset + index * sizeof(HeapReference<Object>))
-      if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
-        bool temp_needed = index.IsConstant()
-            ? !kBakerReadBarrierThunksEnableForFields
-            : !kBakerReadBarrierThunksEnableForArrays;
-        Location temp = temp_needed ? locations->GetTemp(0) : Location::NoLocation();
-        // Note that a potential implicit null check is handled in this
-        // CodeGeneratorMIPS64::GenerateArrayLoadWithBakerReadBarrier call.
-        DCHECK(!instruction->CanDoImplicitNullCheckOn(instruction->InputAt(0)));
-        if (index.IsConstant()) {
-          // Array load with a constant index can be treated as a field load.
-          size_t offset =
-              (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
-          codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
-                                                          out_loc,
-                                                          obj,
-                                                          offset,
-                                                          temp,
-                                                          /* needs_null_check= */ false);
-        } else {
-          codegen_->GenerateArrayLoadWithBakerReadBarrier(instruction,
-                                                          out_loc,
-                                                          obj,
-                                                          data_offset,
-                                                          index,
-                                                          temp,
-                                                          /* needs_null_check= */ false);
-        }
-      } else {
-        GpuRegister out = out_loc.AsRegister<GpuRegister>();
-        if (index.IsConstant()) {
-          size_t offset =
-              (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
-          __ LoadFromOffset(kLoadUnsignedWord, out, obj, offset, null_checker);
-          // If read barriers are enabled, emit read barriers other than
-          // Baker's using a slow path (and also unpoison the loaded
-          // reference, if heap poisoning is enabled).
-          codegen_->MaybeGenerateReadBarrierSlow(instruction, out_loc, out_loc, obj_loc, offset);
-        } else {
-          __ Dlsa(TMP, index.AsRegister<GpuRegister>(), obj, TIMES_4);
-          __ LoadFromOffset(kLoadUnsignedWord, out, TMP, data_offset, null_checker);
-          // If read barriers are enabled, emit read barriers other than
-          // Baker's using a slow path (and also unpoison the loaded
-          // reference, if heap poisoning is enabled).
-          codegen_->MaybeGenerateReadBarrierSlow(instruction,
-                                                 out_loc,
-                                                 out_loc,
-                                                 obj_loc,
-                                                 data_offset,
-                                                 index);
-        }
-      }
-      break;
-    }
-
-    case DataType::Type::kInt64: {
-      GpuRegister out = out_loc.AsRegister<GpuRegister>();
-      if (index.IsConstant()) {
-        size_t offset =
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
-        __ LoadFromOffset(kLoadDoubleword, out, obj, offset, null_checker);
-      } else {
-        __ Dlsa(TMP, index.AsRegister<GpuRegister>(), obj, TIMES_8);
-        __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset, null_checker);
-      }
-      break;
-    }
-
-    case DataType::Type::kFloat32: {
-      FpuRegister out = out_loc.AsFpuRegister<FpuRegister>();
-      if (index.IsConstant()) {
-        size_t offset =
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
-        __ LoadFpuFromOffset(kLoadWord, out, obj, offset, null_checker);
-      } else {
-        __ Dlsa(TMP, index.AsRegister<GpuRegister>(), obj, TIMES_4);
-        __ LoadFpuFromOffset(kLoadWord, out, TMP, data_offset, null_checker);
-      }
-      break;
-    }
-
-    case DataType::Type::kFloat64: {
-      FpuRegister out = out_loc.AsFpuRegister<FpuRegister>();
-      if (index.IsConstant()) {
-        size_t offset =
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
-        __ LoadFpuFromOffset(kLoadDoubleword, out, obj, offset, null_checker);
-      } else {
-        __ Dlsa(TMP, index.AsRegister<GpuRegister>(), obj, TIMES_8);
-        __ LoadFpuFromOffset(kLoadDoubleword, out, TMP, data_offset, null_checker);
-      }
-      break;
-    }
-
-    case DataType::Type::kUint32:
-    case DataType::Type::kUint64:
-    case DataType::Type::kVoid:
-      LOG(FATAL) << "Unreachable type " << instruction->GetType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitArrayLength(HArrayLength* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitArrayLength(HArrayLength* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  uint32_t offset = CodeGenerator::GetArrayLengthOffset(instruction);
-  GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
-  GpuRegister out = locations->Out().AsRegister<GpuRegister>();
-  __ LoadFromOffset(kLoadWord, out, obj, offset);
-  codegen_->MaybeRecordImplicitNullCheck(instruction);
-  // Mask out compression flag from String's array length.
-  if (mirror::kUseStringCompression && instruction->IsStringLength()) {
-    __ Srl(out, out, 1u);
-  }
-}
-
-Location LocationsBuilderMIPS64::RegisterOrZeroConstant(HInstruction* instruction) {
-  return (instruction->IsConstant() && instruction->AsConstant()->IsZeroBitPattern())
-      ? Location::ConstantLocation(instruction->AsConstant())
-      : Location::RequiresRegister();
-}
-
-Location LocationsBuilderMIPS64::FpuRegisterOrConstantForStore(HInstruction* instruction) {
-  // We can store 0.0 directly (from the ZERO register) without loading it into an FPU register.
-  // We can store a non-zero float or double constant without first loading it into the FPU,
-  // but we should only prefer this if the constant has a single use.
-  if (instruction->IsConstant() &&
-      (instruction->AsConstant()->IsZeroBitPattern() ||
-       instruction->GetUses().HasExactlyOneElement())) {
-    return Location::ConstantLocation(instruction->AsConstant());
-    // Otherwise fall through and require an FPU register for the constant.
-  }
-  return Location::RequiresFpuRegister();
-}
-
-void LocationsBuilderMIPS64::VisitArraySet(HArraySet* instruction) {
-  DataType::Type value_type = instruction->GetComponentType();
-
-  bool needs_write_barrier =
-      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
-  bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
-
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
-      instruction,
-      may_need_runtime_call_for_type_check ?
-          LocationSummary::kCallOnSlowPath :
-          LocationSummary::kNoCall);
-
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
-  if (DataType::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
-    locations->SetInAt(2, FpuRegisterOrConstantForStore(instruction->InputAt(2)));
-  } else {
-    locations->SetInAt(2, RegisterOrZeroConstant(instruction->InputAt(2)));
-  }
-  if (needs_write_barrier) {
-    // Temporary register for the write barrier.
-    locations->AddTemp(Location::RequiresRegister());  // Possibly used for ref. poisoning too.
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
-  Location index = locations->InAt(1);
-  Location value_location = locations->InAt(2);
-  DataType::Type value_type = instruction->GetComponentType();
-  bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
-  bool needs_write_barrier =
-      CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
-  auto null_checker = GetImplicitNullChecker(instruction, codegen_);
-  GpuRegister base_reg = index.IsConstant() ? obj : TMP;
-
-  switch (value_type) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8: {
-      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
-      if (index.IsConstant()) {
-        data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1;
-      } else {
-        __ Daddu(base_reg, obj, index.AsRegister<GpuRegister>());
-      }
-      if (value_location.IsConstant()) {
-        int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
-        __ StoreConstToOffset(kStoreByte, value, base_reg, data_offset, TMP, null_checker);
-      } else {
-        GpuRegister value = value_location.AsRegister<GpuRegister>();
-        __ StoreToOffset(kStoreByte, value, base_reg, data_offset, null_checker);
-      }
-      break;
-    }
-
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16: {
-      uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
-      if (index.IsConstant()) {
-        data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2;
-      } else {
-        __ Dlsa(base_reg, index.AsRegister<GpuRegister>(), obj, TIMES_2);
-      }
-      if (value_location.IsConstant()) {
-        int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
-        __ StoreConstToOffset(kStoreHalfword, value, base_reg, data_offset, TMP, null_checker);
-      } else {
-        GpuRegister value = value_location.AsRegister<GpuRegister>();
-        __ StoreToOffset(kStoreHalfword, value, base_reg, data_offset, null_checker);
-      }
-      break;
-    }
-
-    case DataType::Type::kInt32: {
-      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
-      if (index.IsConstant()) {
-        data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
-      } else {
-        __ Dlsa(base_reg, index.AsRegister<GpuRegister>(), obj, TIMES_4);
-      }
-      if (value_location.IsConstant()) {
-        int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
-        __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
-      } else {
-        GpuRegister value = value_location.AsRegister<GpuRegister>();
-        __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
-      }
-      break;
-    }
-
-    case DataType::Type::kReference: {
-      if (value_location.IsConstant()) {
-        // Just setting null.
-        uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
-        if (index.IsConstant()) {
-          data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
-        } else {
-          __ Dlsa(base_reg, index.AsRegister<GpuRegister>(), obj, TIMES_4);
-        }
-        int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
-        DCHECK_EQ(value, 0);
-        __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
-        DCHECK(!needs_write_barrier);
-        DCHECK(!may_need_runtime_call_for_type_check);
-        break;
-      }
-
-      DCHECK(needs_write_barrier);
-      GpuRegister value = value_location.AsRegister<GpuRegister>();
-      GpuRegister temp1 = locations->GetTemp(0).AsRegister<GpuRegister>();
-      GpuRegister temp2 = TMP;  // Doesn't need to survive slow path.
-      uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
-      uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
-      uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
-      Mips64Label done;
-      SlowPathCodeMIPS64* slow_path = nullptr;
-
-      if (may_need_runtime_call_for_type_check) {
-        slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathMIPS64(instruction);
-        codegen_->AddSlowPath(slow_path);
-        if (instruction->GetValueCanBeNull()) {
-          Mips64Label non_zero;
-          __ Bnezc(value, &non_zero);
-          uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
-          if (index.IsConstant()) {
-            data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
-          } else {
-            __ Dlsa(base_reg, index.AsRegister<GpuRegister>(), obj, TIMES_4);
-          }
-          __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
-          __ Bc(&done);
-          __ Bind(&non_zero);
-        }
-
-        // Note that when read barriers are enabled, the type checks
-        // are performed without read barriers.  This is fine, even in
-        // the case where a class object is in the from-space after
-        // the flip, as a comparison involving such a type would not
-        // produce a false positive; it may of course produce a false
-        // negative, in which case we would take the ArraySet slow
-        // path.
-
-        // /* HeapReference<Class> */ temp1 = obj->klass_
-        __ LoadFromOffset(kLoadUnsignedWord, temp1, obj, class_offset, null_checker);
-        __ MaybeUnpoisonHeapReference(temp1);
-
-        // /* HeapReference<Class> */ temp1 = temp1->component_type_
-        __ LoadFromOffset(kLoadUnsignedWord, temp1, temp1, component_offset);
-        // /* HeapReference<Class> */ temp2 = value->klass_
-        __ LoadFromOffset(kLoadUnsignedWord, temp2, value, class_offset);
-        // If heap poisoning is enabled, no need to unpoison `temp1`
-        // nor `temp2`, as we are comparing two poisoned references.
-
-        if (instruction->StaticTypeOfArrayIsObjectArray()) {
-          Mips64Label do_put;
-          __ Beqc(temp1, temp2, &do_put);
-          // If heap poisoning is enabled, the `temp1` reference has
-          // not been unpoisoned yet; unpoison it now.
-          __ MaybeUnpoisonHeapReference(temp1);
-
-          // /* HeapReference<Class> */ temp1 = temp1->super_class_
-          __ LoadFromOffset(kLoadUnsignedWord, temp1, temp1, super_offset);
-          // If heap poisoning is enabled, no need to unpoison
-          // `temp1`, as we are comparing against null below.
-          __ Bnezc(temp1, slow_path->GetEntryLabel());
-          __ Bind(&do_put);
-        } else {
-          __ Bnec(temp1, temp2, slow_path->GetEntryLabel());
-        }
-      }
-
-      GpuRegister source = value;
-      if (kPoisonHeapReferences) {
-        // Note that in the case where `value` is a null reference,
-        // we do not enter this block, as a null reference does not
-        // need poisoning.
-        __ Move(temp1, value);
-        __ PoisonHeapReference(temp1);
-        source = temp1;
-      }
-
-      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
-      if (index.IsConstant()) {
-        data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
-      } else {
-        __ Dlsa(base_reg, index.AsRegister<GpuRegister>(), obj, TIMES_4);
-      }
-      __ StoreToOffset(kStoreWord, source, base_reg, data_offset);
-
-      if (!may_need_runtime_call_for_type_check) {
-        codegen_->MaybeRecordImplicitNullCheck(instruction);
-      }
-
-      codegen_->MarkGCCard(obj, value, instruction->GetValueCanBeNull());
-
-      if (done.IsLinked()) {
-        __ Bind(&done);
-      }
-
-      if (slow_path != nullptr) {
-        __ Bind(slow_path->GetExitLabel());
-      }
-      break;
-    }
-
-    case DataType::Type::kInt64: {
-      uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
-      if (index.IsConstant()) {
-        data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8;
-      } else {
-        __ Dlsa(base_reg, index.AsRegister<GpuRegister>(), obj, TIMES_8);
-      }
-      if (value_location.IsConstant()) {
-        int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
-        __ StoreConstToOffset(kStoreDoubleword, value, base_reg, data_offset, TMP, null_checker);
-      } else {
-        GpuRegister value = value_location.AsRegister<GpuRegister>();
-        __ StoreToOffset(kStoreDoubleword, value, base_reg, data_offset, null_checker);
-      }
-      break;
-    }
-
-    case DataType::Type::kFloat32: {
-      uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
-      if (index.IsConstant()) {
-        data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4;
-      } else {
-        __ Dlsa(base_reg, index.AsRegister<GpuRegister>(), obj, TIMES_4);
-      }
-      if (value_location.IsConstant()) {
-        int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant());
-        __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker);
-      } else {
-        FpuRegister value = value_location.AsFpuRegister<FpuRegister>();
-        __ StoreFpuToOffset(kStoreWord, value, base_reg, data_offset, null_checker);
-      }
-      break;
-    }
-
-    case DataType::Type::kFloat64: {
-      uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
-      if (index.IsConstant()) {
-        data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8;
-      } else {
-        __ Dlsa(base_reg, index.AsRegister<GpuRegister>(), obj, TIMES_8);
-      }
-      if (value_location.IsConstant()) {
-        int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
-        __ StoreConstToOffset(kStoreDoubleword, value, base_reg, data_offset, TMP, null_checker);
-      } else {
-        FpuRegister value = value_location.AsFpuRegister<FpuRegister>();
-        __ StoreFpuToOffset(kStoreDoubleword, value, base_reg, data_offset, null_checker);
-      }
-      break;
-    }
-
-    case DataType::Type::kUint32:
-    case DataType::Type::kUint64:
-    case DataType::Type::kVoid:
-      LOG(FATAL) << "Unreachable type " << instruction->GetType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
-  RegisterSet caller_saves = RegisterSet::Empty();
-  InvokeRuntimeCallingConvention calling_convention;
-  caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
-  LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction, caller_saves);
-
-  HInstruction* index = instruction->InputAt(0);
-  HInstruction* length = instruction->InputAt(1);
-
-  bool const_index = false;
-  bool const_length = false;
-
-  if (index->IsConstant()) {
-    if (length->IsConstant()) {
-      const_index = true;
-      const_length = true;
-    } else {
-      int32_t index_value = index->AsIntConstant()->GetValue();
-      if (index_value < 0 || IsInt<16>(index_value + 1)) {
-        const_index = true;
-      }
-    }
-  } else if (length->IsConstant()) {
-    int32_t length_value = length->AsIntConstant()->GetValue();
-    if (IsUint<15>(length_value)) {
-      const_length = true;
-    }
-  }
-
-  locations->SetInAt(0, const_index
-      ? Location::ConstantLocation(index->AsConstant())
-      : Location::RequiresRegister());
-  locations->SetInAt(1, const_length
-      ? Location::ConstantLocation(length->AsConstant())
-      : Location::RequiresRegister());
-}
-
-void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  Location index_loc = locations->InAt(0);
-  Location length_loc = locations->InAt(1);
-
-  if (length_loc.IsConstant()) {
-    int32_t length = length_loc.GetConstant()->AsIntConstant()->GetValue();
-    if (index_loc.IsConstant()) {
-      int32_t index = index_loc.GetConstant()->AsIntConstant()->GetValue();
-      if (index < 0 || index >= length) {
-        BoundsCheckSlowPathMIPS64* slow_path =
-            new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS64(instruction);
-        codegen_->AddSlowPath(slow_path);
-        __ Bc(slow_path->GetEntryLabel());
-      } else {
-        // Nothing to be done.
-      }
-      return;
-    }
-
-    BoundsCheckSlowPathMIPS64* slow_path =
-        new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS64(instruction);
-    codegen_->AddSlowPath(slow_path);
-    GpuRegister index = index_loc.AsRegister<GpuRegister>();
-    if (length == 0) {
-      __ Bc(slow_path->GetEntryLabel());
-    } else if (length == 1) {
-      __ Bnezc(index, slow_path->GetEntryLabel());
-    } else {
-      DCHECK(IsUint<15>(length)) << length;
-      __ Sltiu(TMP, index, length);
-      __ Beqzc(TMP, slow_path->GetEntryLabel());
-    }
-  } else {
-    GpuRegister length = length_loc.AsRegister<GpuRegister>();
-    BoundsCheckSlowPathMIPS64* slow_path =
-        new (codegen_->GetScopedAllocator()) BoundsCheckSlowPathMIPS64(instruction);
-    codegen_->AddSlowPath(slow_path);
-    if (index_loc.IsConstant()) {
-      int32_t index = index_loc.GetConstant()->AsIntConstant()->GetValue();
-      if (index < 0) {
-        __ Bc(slow_path->GetEntryLabel());
-      } else if (index == 0) {
-        __ Blezc(length, slow_path->GetEntryLabel());
-      } else {
-        DCHECK(IsInt<16>(index + 1)) << index;
-        __ Sltiu(TMP, length, index + 1);
-        __ Bnezc(TMP, slow_path->GetEntryLabel());
-      }
-    } else {
-      GpuRegister index = index_loc.AsRegister<GpuRegister>();
-      __ Bgeuc(index, length, slow_path->GetEntryLabel());
-    }
-  }
-}
-
-// Temp is used for read barrier.
-static size_t NumberOfInstanceOfTemps(TypeCheckKind type_check_kind) {
-  if (kEmitCompilerReadBarrier &&
-      !(kUseBakerReadBarrier && kBakerReadBarrierThunksEnableForFields) &&
-      (kUseBakerReadBarrier ||
-       type_check_kind == TypeCheckKind::kAbstractClassCheck ||
-       type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
-       type_check_kind == TypeCheckKind::kArrayObjectCheck)) {
-    return 1;
-  }
-  return 0;
-}
-
-// Extra temp is used for read barrier.
-static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) {
-  return 1 + NumberOfInstanceOfTemps(type_check_kind);
-}
-
-void LocationsBuilderMIPS64::VisitCheckCast(HCheckCast* instruction) {
-  TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
-  LocationSummary::CallKind call_kind = CodeGenerator::GetCheckCastCallKind(instruction);
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
-  locations->SetInAt(0, Location::RequiresRegister());
-  if (type_check_kind == TypeCheckKind::kBitstringCheck) {
-    locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant()));
-    locations->SetInAt(2, Location::ConstantLocation(instruction->InputAt(2)->AsConstant()));
-    locations->SetInAt(3, Location::ConstantLocation(instruction->InputAt(3)->AsConstant()));
-  } else {
-    locations->SetInAt(1, Location::RequiresRegister());
-  }
-  locations->AddRegisterTemps(NumberOfCheckCastTemps(type_check_kind));
-}
-
-void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) {
-  TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
-  LocationSummary* locations = instruction->GetLocations();
-  Location obj_loc = locations->InAt(0);
-  GpuRegister obj = obj_loc.AsRegister<GpuRegister>();
-  Location cls = locations->InAt(1);
-  Location temp_loc = locations->GetTemp(0);
-  GpuRegister temp = temp_loc.AsRegister<GpuRegister>();
-  const size_t num_temps = NumberOfCheckCastTemps(type_check_kind);
-  DCHECK_LE(num_temps, 2u);
-  Location maybe_temp2_loc = (num_temps >= 2) ? locations->GetTemp(1) : Location::NoLocation();
-  const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
-  const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
-  const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
-  const uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
-  const uint32_t iftable_offset = mirror::Class::IfTableOffset().Uint32Value();
-  const uint32_t array_length_offset = mirror::Array::LengthOffset().Uint32Value();
-  const uint32_t object_array_data_offset =
-      mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
-  Mips64Label done;
-
-  bool is_type_check_slow_path_fatal = CodeGenerator::IsTypeCheckSlowPathFatal(instruction);
-  SlowPathCodeMIPS64* slow_path =
-      new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64(
-          instruction, is_type_check_slow_path_fatal);
-  codegen_->AddSlowPath(slow_path);
-
-  // Avoid this check if we know `obj` is not null.
-  if (instruction->MustDoNullCheck()) {
-    __ Beqzc(obj, &done);
-  }
-
-  switch (type_check_kind) {
-    case TypeCheckKind::kExactCheck:
-    case TypeCheckKind::kArrayCheck: {
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction,
-                                        temp_loc,
-                                        obj_loc,
-                                        class_offset,
-                                        maybe_temp2_loc,
-                                        kWithoutReadBarrier);
-      // Jump to slow path for throwing the exception or doing a
-      // more involved array check.
-      __ Bnec(temp, cls.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
-      break;
-    }
-
-    case TypeCheckKind::kAbstractClassCheck: {
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction,
-                                        temp_loc,
-                                        obj_loc,
-                                        class_offset,
-                                        maybe_temp2_loc,
-                                        kWithoutReadBarrier);
-      // If the class is abstract, we eagerly fetch the super class of the
-      // object to avoid doing a comparison we know will fail.
-      Mips64Label loop;
-      __ Bind(&loop);
-      // /* HeapReference<Class> */ temp = temp->super_class_
-      GenerateReferenceLoadOneRegister(instruction,
-                                       temp_loc,
-                                       super_offset,
-                                       maybe_temp2_loc,
-                                       kWithoutReadBarrier);
-      // If the class reference currently in `temp` is null, jump to the slow path to throw the
-      // exception.
-      __ Beqzc(temp, slow_path->GetEntryLabel());
-      // Otherwise, compare the classes.
-      __ Bnec(temp, cls.AsRegister<GpuRegister>(), &loop);
-      break;
-    }
-
-    case TypeCheckKind::kClassHierarchyCheck: {
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction,
-                                        temp_loc,
-                                        obj_loc,
-                                        class_offset,
-                                        maybe_temp2_loc,
-                                        kWithoutReadBarrier);
-      // Walk over the class hierarchy to find a match.
-      Mips64Label loop;
-      __ Bind(&loop);
-      __ Beqc(temp, cls.AsRegister<GpuRegister>(), &done);
-      // /* HeapReference<Class> */ temp = temp->super_class_
-      GenerateReferenceLoadOneRegister(instruction,
-                                       temp_loc,
-                                       super_offset,
-                                       maybe_temp2_loc,
-                                       kWithoutReadBarrier);
-      // If the class reference currently in `temp` is null, jump to the slow path to throw the
-      // exception. Otherwise, jump to the beginning of the loop.
-      __ Bnezc(temp, &loop);
-      __ Bc(slow_path->GetEntryLabel());
-      break;
-    }
-
-    case TypeCheckKind::kArrayObjectCheck: {
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction,
-                                        temp_loc,
-                                        obj_loc,
-                                        class_offset,
-                                        maybe_temp2_loc,
-                                        kWithoutReadBarrier);
-      // Do an exact check.
-      __ Beqc(temp, cls.AsRegister<GpuRegister>(), &done);
-      // Otherwise, we need to check that the object's class is a non-primitive array.
-      // /* HeapReference<Class> */ temp = temp->component_type_
-      GenerateReferenceLoadOneRegister(instruction,
-                                       temp_loc,
-                                       component_offset,
-                                       maybe_temp2_loc,
-                                       kWithoutReadBarrier);
-      // If the component type is null, jump to the slow path to throw the exception.
-      __ Beqzc(temp, slow_path->GetEntryLabel());
-      // Otherwise, the object is indeed an array, further check that this component
-      // type is not a primitive type.
-      __ LoadFromOffset(kLoadUnsignedHalfword, temp, temp, primitive_offset);
-      static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
-      __ Bnezc(temp, slow_path->GetEntryLabel());
-      break;
-    }
-
-    case TypeCheckKind::kUnresolvedCheck:
-      // We always go into the type check slow path for the unresolved check case.
-      // We cannot directly call the CheckCast runtime entry point
-      // without resorting to a type checking slow path here (i.e. by
-      // calling InvokeRuntime directly), as it would require to
-      // assign fixed registers for the inputs of this HInstanceOf
-      // instruction (following the runtime calling convention), which
-      // might be cluttered by the potential first read barrier
-      // emission at the beginning of this method.
-      __ Bc(slow_path->GetEntryLabel());
-      break;
-
-    case TypeCheckKind::kInterfaceCheck: {
-      // Avoid read barriers to improve performance of the fast path. We can not get false
-      // positives by doing this.
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction,
-                                        temp_loc,
-                                        obj_loc,
-                                        class_offset,
-                                        maybe_temp2_loc,
-                                        kWithoutReadBarrier);
-      // /* HeapReference<Class> */ temp = temp->iftable_
-      GenerateReferenceLoadTwoRegisters(instruction,
-                                        temp_loc,
-                                        temp_loc,
-                                        iftable_offset,
-                                        maybe_temp2_loc,
-                                        kWithoutReadBarrier);
-      // Iftable is never null.
-      __ Lw(TMP, temp, array_length_offset);
-      // Loop through the iftable and check if any class matches.
-      Mips64Label loop;
-      __ Bind(&loop);
-      __ Beqzc(TMP, slow_path->GetEntryLabel());
-      __ Lwu(AT, temp, object_array_data_offset);
-      __ MaybeUnpoisonHeapReference(AT);
-      // Go to next interface.
-      __ Daddiu(temp, temp, 2 * kHeapReferenceSize);
-      __ Addiu(TMP, TMP, -2);
-      // Compare the classes and continue the loop if they do not match.
-      __ Bnec(AT, cls.AsRegister<GpuRegister>(), &loop);
-      break;
-    }
-
-    case TypeCheckKind::kBitstringCheck: {
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction,
-                                        temp_loc,
-                                        obj_loc,
-                                        class_offset,
-                                        maybe_temp2_loc,
-                                        kWithoutReadBarrier);
-
-      GenerateBitstringTypeCheckCompare(instruction, temp);
-      __ Bnezc(temp, slow_path->GetEntryLabel());
-      break;
-    }
-  }
-
-  __ Bind(&done);
-  __ Bind(slow_path->GetExitLabel());
-}
-
-void LocationsBuilderMIPS64::VisitClinitCheck(HClinitCheck* check) {
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
-  locations->SetInAt(0, Location::RequiresRegister());
-  if (check->HasUses()) {
-    locations->SetOut(Location::SameAsFirstInput());
-  }
-  // Rely on the type initialization to save everything we need.
-  locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
-}
-
-void InstructionCodeGeneratorMIPS64::VisitClinitCheck(HClinitCheck* check) {
-  // We assume the class is not null.
-  SlowPathCodeMIPS64* slow_path =
-      new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS64(check->GetLoadClass(), check);
-  codegen_->AddSlowPath(slow_path);
-  GenerateClassInitializationCheck(slow_path,
-                                   check->GetLocations()->InAt(0).AsRegister<GpuRegister>());
-}
-
-void LocationsBuilderMIPS64::VisitCompare(HCompare* compare) {
-  DataType::Type in_type = compare->InputAt(0)->GetType();
-
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(compare);
-
-  switch (in_type) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetInAt(1, Location::RegisterOrConstant(compare->InputAt(1)));
-      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-      break;
-
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetInAt(1, Location::RequiresFpuRegister());
-      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-      break;
-
-    default:
-      LOG(FATAL) << "Unexpected type for compare operation " << in_type;
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::VisitCompare(HCompare* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  GpuRegister res = locations->Out().AsRegister<GpuRegister>();
-  DataType::Type in_type = instruction->InputAt(0)->GetType();
-
-  //  0 if: left == right
-  //  1 if: left  > right
-  // -1 if: left  < right
-  switch (in_type) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64: {
-      GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
-      Location rhs_location = locations->InAt(1);
-      bool use_imm = rhs_location.IsConstant();
-      GpuRegister rhs = ZERO;
-      if (use_imm) {
-        if (in_type == DataType::Type::kInt64) {
-          int64_t value = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()->AsConstant());
-          if (value != 0) {
-            rhs = AT;
-            __ LoadConst64(rhs, value);
-          }
-        } else {
-          int32_t value = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant()->AsConstant());
-          if (value != 0) {
-            rhs = AT;
-            __ LoadConst32(rhs, value);
-          }
-        }
-      } else {
-        rhs = rhs_location.AsRegister<GpuRegister>();
-      }
-      __ Slt(TMP, lhs, rhs);
-      __ Slt(res, rhs, lhs);
-      __ Subu(res, res, TMP);
-      break;
-    }
-
-    case DataType::Type::kFloat32: {
-      FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
-      FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
-      Mips64Label done;
-      __ CmpEqS(FTMP, lhs, rhs);
-      __ LoadConst32(res, 0);
-      __ Bc1nez(FTMP, &done);
-      if (instruction->IsGtBias()) {
-        __ CmpLtS(FTMP, lhs, rhs);
-        __ LoadConst32(res, -1);
-        __ Bc1nez(FTMP, &done);
-        __ LoadConst32(res, 1);
-      } else {
-        __ CmpLtS(FTMP, rhs, lhs);
-        __ LoadConst32(res, 1);
-        __ Bc1nez(FTMP, &done);
-        __ LoadConst32(res, -1);
-      }
-      __ Bind(&done);
-      break;
-    }
-
-    case DataType::Type::kFloat64: {
-      FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
-      FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
-      Mips64Label done;
-      __ CmpEqD(FTMP, lhs, rhs);
-      __ LoadConst32(res, 0);
-      __ Bc1nez(FTMP, &done);
-      if (instruction->IsGtBias()) {
-        __ CmpLtD(FTMP, lhs, rhs);
-        __ LoadConst32(res, -1);
-        __ Bc1nez(FTMP, &done);
-        __ LoadConst32(res, 1);
-      } else {
-        __ CmpLtD(FTMP, rhs, lhs);
-        __ LoadConst32(res, 1);
-        __ Bc1nez(FTMP, &done);
-        __ LoadConst32(res, -1);
-      }
-      __ Bind(&done);
-      break;
-    }
-
-    default:
-      LOG(FATAL) << "Unimplemented compare type " << in_type;
-  }
-}
-
-void LocationsBuilderMIPS64::HandleCondition(HCondition* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
-  switch (instruction->InputAt(0)->GetType()) {
-    default:
-    case DataType::Type::kInt64:
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
-      break;
-
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetInAt(1, Location::RequiresFpuRegister());
-      break;
-  }
-  if (!instruction->IsEmittedAtUseSite()) {
-    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::HandleCondition(HCondition* instruction) {
-  if (instruction->IsEmittedAtUseSite()) {
-    return;
-  }
-
-  DataType::Type type = instruction->InputAt(0)->GetType();
-  LocationSummary* locations = instruction->GetLocations();
-  switch (type) {
-    default:
-      // Integer case.
-      GenerateIntLongCompare(instruction->GetCondition(), /* is64bit= */ false, locations);
-      return;
-    case DataType::Type::kInt64:
-      GenerateIntLongCompare(instruction->GetCondition(), /* is64bit= */ true, locations);
-      return;
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      GenerateFpCompare(instruction->GetCondition(), instruction->IsGtBias(), type, locations);
-     return;
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
-  DCHECK(instruction->IsDiv() || instruction->IsRem());
-  DataType::Type type = instruction->GetResultType();
-
-  LocationSummary* locations = instruction->GetLocations();
-  Location second = locations->InAt(1);
-  DCHECK(second.IsConstant());
-
-  GpuRegister out = locations->Out().AsRegister<GpuRegister>();
-  GpuRegister dividend = locations->InAt(0).AsRegister<GpuRegister>();
-  int64_t imm = Int64FromConstant(second.GetConstant());
-  DCHECK(imm == 1 || imm == -1);
-
-  if (instruction->IsRem()) {
-    __ Move(out, ZERO);
-  } else {
-    if (imm == -1) {
-      if (type == DataType::Type::kInt32) {
-        __ Subu(out, ZERO, dividend);
-      } else {
-        DCHECK_EQ(type, DataType::Type::kInt64);
-        __ Dsubu(out, ZERO, dividend);
-      }
-    } else if (out != dividend) {
-      __ Move(out, dividend);
-    }
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
-  DCHECK(instruction->IsDiv() || instruction->IsRem());
-  DataType::Type type = instruction->GetResultType();
-
-  LocationSummary* locations = instruction->GetLocations();
-  Location second = locations->InAt(1);
-  DCHECK(second.IsConstant());
-
-  GpuRegister out = locations->Out().AsRegister<GpuRegister>();
-  GpuRegister dividend = locations->InAt(0).AsRegister<GpuRegister>();
-  int64_t imm = Int64FromConstant(second.GetConstant());
-  uint64_t abs_imm = static_cast<uint64_t>(AbsOrMin(imm));
-  int ctz_imm = CTZ(abs_imm);
-
-  if (instruction->IsDiv()) {
-    if (type == DataType::Type::kInt32) {
-      if (ctz_imm == 1) {
-        // Fast path for division by +/-2, which is very common.
-        __ Srl(TMP, dividend, 31);
-      } else {
-        __ Sra(TMP, dividend, 31);
-        __ Srl(TMP, TMP, 32 - ctz_imm);
-      }
-      __ Addu(out, dividend, TMP);
-      __ Sra(out, out, ctz_imm);
-      if (imm < 0) {
-        __ Subu(out, ZERO, out);
-      }
-    } else {
-      DCHECK_EQ(type, DataType::Type::kInt64);
-      if (ctz_imm == 1) {
-        // Fast path for division by +/-2, which is very common.
-        __ Dsrl32(TMP, dividend, 31);
-      } else {
-        __ Dsra32(TMP, dividend, 31);
-        if (ctz_imm > 32) {
-          __ Dsrl(TMP, TMP, 64 - ctz_imm);
-        } else {
-          __ Dsrl32(TMP, TMP, 32 - ctz_imm);
-        }
-      }
-      __ Daddu(out, dividend, TMP);
-      if (ctz_imm < 32) {
-        __ Dsra(out, out, ctz_imm);
-      } else {
-        __ Dsra32(out, out, ctz_imm - 32);
-      }
-      if (imm < 0) {
-        __ Dsubu(out, ZERO, out);
-      }
-    }
-  } else {
-    if (type == DataType::Type::kInt32) {
-      if (ctz_imm == 1) {
-        // Fast path for modulo +/-2, which is very common.
-        __ Sra(TMP, dividend, 31);
-        __ Subu(out, dividend, TMP);
-        __ Andi(out, out, 1);
-        __ Addu(out, out, TMP);
-      } else {
-        __ Sra(TMP, dividend, 31);
-        __ Srl(TMP, TMP, 32 - ctz_imm);
-        __ Addu(out, dividend, TMP);
-        __ Ins(out, ZERO, ctz_imm, 32 - ctz_imm);
-        __ Subu(out, out, TMP);
-      }
-    } else {
-      DCHECK_EQ(type, DataType::Type::kInt64);
-      if (ctz_imm == 1) {
-        // Fast path for modulo +/-2, which is very common.
-        __ Dsra32(TMP, dividend, 31);
-        __ Dsubu(out, dividend, TMP);
-        __ Andi(out, out, 1);
-        __ Daddu(out, out, TMP);
-      } else {
-        __ Dsra32(TMP, dividend, 31);
-        if (ctz_imm > 32) {
-          __ Dsrl(TMP, TMP, 64 - ctz_imm);
-        } else {
-          __ Dsrl32(TMP, TMP, 32 - ctz_imm);
-        }
-        __ Daddu(out, dividend, TMP);
-        __ DblIns(out, ZERO, ctz_imm, 64 - ctz_imm);
-        __ Dsubu(out, out, TMP);
-      }
-    }
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
-  DCHECK(instruction->IsDiv() || instruction->IsRem());
-
-  LocationSummary* locations = instruction->GetLocations();
-  Location second = locations->InAt(1);
-  DCHECK(second.IsConstant());
-
-  GpuRegister out = locations->Out().AsRegister<GpuRegister>();
-  GpuRegister dividend = locations->InAt(0).AsRegister<GpuRegister>();
-  int64_t imm = Int64FromConstant(second.GetConstant());
-
-  DataType::Type type = instruction->GetResultType();
-  DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64) << type;
-
-  int64_t magic;
-  int shift;
-  CalculateMagicAndShiftForDivRem(imm,
-                                  (type == DataType::Type::kInt64),
-                                  &magic,
-                                  &shift);
-
-  if (type == DataType::Type::kInt32) {
-    __ LoadConst32(TMP, magic);
-    __ MuhR6(TMP, dividend, TMP);
-
-    if (imm > 0 && magic < 0) {
-      __ Addu(TMP, TMP, dividend);
-    } else if (imm < 0 && magic > 0) {
-      __ Subu(TMP, TMP, dividend);
-    }
-
-    if (shift != 0) {
-      __ Sra(TMP, TMP, shift);
-    }
-
-    if (instruction->IsDiv()) {
-      __ Sra(out, TMP, 31);
-      __ Subu(out, TMP, out);
-    } else {
-      __ Sra(AT, TMP, 31);
-      __ Subu(AT, TMP, AT);
-      __ LoadConst32(TMP, imm);
-      __ MulR6(TMP, AT, TMP);
-      __ Subu(out, dividend, TMP);
-    }
-  } else {
-    __ LoadConst64(TMP, magic);
-    __ Dmuh(TMP, dividend, TMP);
-
-    if (imm > 0 && magic < 0) {
-      __ Daddu(TMP, TMP, dividend);
-    } else if (imm < 0 && magic > 0) {
-      __ Dsubu(TMP, TMP, dividend);
-    }
-
-    if (shift >= 32) {
-      __ Dsra32(TMP, TMP, shift - 32);
-    } else if (shift > 0) {
-      __ Dsra(TMP, TMP, shift);
-    }
-
-    if (instruction->IsDiv()) {
-      __ Dsra32(out, TMP, 31);
-      __ Dsubu(out, TMP, out);
-    } else {
-      __ Dsra32(AT, TMP, 31);
-      __ Dsubu(AT, TMP, AT);
-      __ LoadConst64(TMP, imm);
-      __ Dmul(TMP, AT, TMP);
-      __ Dsubu(out, dividend, TMP);
-    }
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::GenerateDivRemIntegral(HBinaryOperation* instruction) {
-  DCHECK(instruction->IsDiv() || instruction->IsRem());
-  DataType::Type type = instruction->GetResultType();
-  DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64) << type;
-
-  LocationSummary* locations = instruction->GetLocations();
-  GpuRegister out = locations->Out().AsRegister<GpuRegister>();
-  Location second = locations->InAt(1);
-
-  if (second.IsConstant()) {
-    int64_t imm = Int64FromConstant(second.GetConstant());
-    if (imm == 0) {
-      // Do not generate anything. DivZeroCheck would prevent any code to be executed.
-    } else if (imm == 1 || imm == -1) {
-      DivRemOneOrMinusOne(instruction);
-    } else if (IsPowerOfTwo(AbsOrMin(imm))) {
-      DivRemByPowerOfTwo(instruction);
-    } else {
-      DCHECK(imm <= -2 || imm >= 2);
-      GenerateDivRemWithAnyConstant(instruction);
-    }
-  } else {
-    GpuRegister dividend = locations->InAt(0).AsRegister<GpuRegister>();
-    GpuRegister divisor = second.AsRegister<GpuRegister>();
-    if (instruction->IsDiv()) {
-      if (type == DataType::Type::kInt32)
-        __ DivR6(out, dividend, divisor);
-      else
-        __ Ddiv(out, dividend, divisor);
-    } else {
-      if (type == DataType::Type::kInt32)
-        __ ModR6(out, dividend, divisor);
-      else
-        __ Dmod(out, dividend, divisor);
-    }
-  }
-}
-
-void LocationsBuilderMIPS64::VisitDiv(HDiv* div) {
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(div, LocationSummary::kNoCall);
-  switch (div->GetResultType()) {
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
-      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-      break;
-
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetInAt(1, Location::RequiresFpuRegister());
-      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
-      break;
-
-    default:
-      LOG(FATAL) << "Unexpected div type " << div->GetResultType();
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::VisitDiv(HDiv* instruction) {
-  DataType::Type type = instruction->GetType();
-  LocationSummary* locations = instruction->GetLocations();
-
-  switch (type) {
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-      GenerateDivRemIntegral(instruction);
-      break;
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64: {
-      FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
-      FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
-      FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
-      if (type == DataType::Type::kFloat32)
-        __ DivS(dst, lhs, rhs);
-      else
-        __ DivD(dst, lhs, rhs);
-      break;
-    }
-    default:
-      LOG(FATAL) << "Unexpected div type " << type;
-  }
-}
-
-void LocationsBuilderMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
-  LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
-  locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
-}
-
-void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
-  SlowPathCodeMIPS64* slow_path =
-      new (codegen_->GetScopedAllocator()) DivZeroCheckSlowPathMIPS64(instruction);
-  codegen_->AddSlowPath(slow_path);
-  Location value = instruction->GetLocations()->InAt(0);
-
-  DataType::Type type = instruction->GetType();
-
-  if (!DataType::IsIntegralType(type)) {
-    LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
-    UNREACHABLE();
-  }
-
-  if (value.IsConstant()) {
-    int64_t divisor = codegen_->GetInt64ValueOf(value.GetConstant()->AsConstant());
-    if (divisor == 0) {
-      __ Bc(slow_path->GetEntryLabel());
-    } else {
-      // A division by a non-null constant is valid. We don't need to perform
-      // any check, so simply fall through.
-    }
-  } else {
-    __ Beqzc(value.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
-  }
-}
-
-void LocationsBuilderMIPS64::VisitDoubleConstant(HDoubleConstant* constant) {
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
-  locations->SetOut(Location::ConstantLocation(constant));
-}
-
-void InstructionCodeGeneratorMIPS64::VisitDoubleConstant(HDoubleConstant* cst ATTRIBUTE_UNUSED) {
-  // Will be generated at use site.
-}
-
-void LocationsBuilderMIPS64::VisitExit(HExit* exit) {
-  exit->SetLocations(nullptr);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
-}
-
-void LocationsBuilderMIPS64::VisitFloatConstant(HFloatConstant* constant) {
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(constant, LocationSummary::kNoCall);
-  locations->SetOut(Location::ConstantLocation(constant));
-}
-
-void InstructionCodeGeneratorMIPS64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
-  // Will be generated at use site.
-}
-
-void InstructionCodeGeneratorMIPS64::HandleGoto(HInstruction* got, HBasicBlock* successor) {
-  if (successor->IsExitBlock()) {
-    DCHECK(got->GetPrevious()->AlwaysThrows());
-    return;  // no code needed
-  }
-
-  HBasicBlock* block = got->GetBlock();
-  HInstruction* previous = got->GetPrevious();
-  HLoopInformation* info = block->GetLoopInformation();
-
-  if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
-    if (codegen_->GetCompilerOptions().CountHotnessInCompiledCode()) {
-      __ Ld(AT, SP, kCurrentMethodStackOffset);
-      __ Lhu(TMP, AT, ArtMethod::HotnessCountOffset().Int32Value());
-      __ Addiu(TMP, TMP, 1);
-      __ Sh(TMP, AT, ArtMethod::HotnessCountOffset().Int32Value());
-    }
-    GenerateSuspendCheck(info->GetSuspendCheck(), successor);
-    return;
-  }
-  if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
-    GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
-  }
-  if (!codegen_->GoesToNextBlock(block, successor)) {
-    __ Bc(codegen_->GetLabelOf(successor));
-  }
-}
-
-void LocationsBuilderMIPS64::VisitGoto(HGoto* got) {
-  got->SetLocations(nullptr);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitGoto(HGoto* got) {
-  HandleGoto(got, got->GetSuccessor());
-}
-
-void LocationsBuilderMIPS64::VisitTryBoundary(HTryBoundary* try_boundary) {
-  try_boundary->SetLocations(nullptr);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitTryBoundary(HTryBoundary* try_boundary) {
-  HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
-  if (!successor->IsExitBlock()) {
-    HandleGoto(try_boundary, successor);
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::GenerateIntLongCompare(IfCondition cond,
-                                                            bool is64bit,
-                                                            LocationSummary* locations) {
-  GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
-  GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
-  Location rhs_location = locations->InAt(1);
-  GpuRegister rhs_reg = ZERO;
-  int64_t rhs_imm = 0;
-  bool use_imm = rhs_location.IsConstant();
-  if (use_imm) {
-    if (is64bit) {
-      rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
-    } else {
-      rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
-    }
-  } else {
-    rhs_reg = rhs_location.AsRegister<GpuRegister>();
-  }
-  int64_t rhs_imm_plus_one = rhs_imm + UINT64_C(1);
-
-  switch (cond) {
-    case kCondEQ:
-    case kCondNE:
-      if (use_imm && IsInt<16>(-rhs_imm)) {
-        if (rhs_imm == 0) {
-          if (cond == kCondEQ) {
-            __ Sltiu(dst, lhs, 1);
-          } else {
-            __ Sltu(dst, ZERO, lhs);
-          }
-        } else {
-          if (is64bit) {
-            __ Daddiu(dst, lhs, -rhs_imm);
-          } else {
-            __ Addiu(dst, lhs, -rhs_imm);
-          }
-          if (cond == kCondEQ) {
-            __ Sltiu(dst, dst, 1);
-          } else {
-            __ Sltu(dst, ZERO, dst);
-          }
-        }
-      } else {
-        if (use_imm && IsUint<16>(rhs_imm)) {
-          __ Xori(dst, lhs, rhs_imm);
-        } else {
-          if (use_imm) {
-            rhs_reg = TMP;
-            __ LoadConst64(rhs_reg, rhs_imm);
-          }
-          __ Xor(dst, lhs, rhs_reg);
-        }
-        if (cond == kCondEQ) {
-          __ Sltiu(dst, dst, 1);
-        } else {
-          __ Sltu(dst, ZERO, dst);
-        }
-      }
-      break;
-
-    case kCondLT:
-    case kCondGE:
-      if (use_imm && IsInt<16>(rhs_imm)) {
-        __ Slti(dst, lhs, rhs_imm);
-      } else {
-        if (use_imm) {
-          rhs_reg = TMP;
-          __ LoadConst64(rhs_reg, rhs_imm);
-        }
-        __ Slt(dst, lhs, rhs_reg);
-      }
-      if (cond == kCondGE) {
-        // Simulate lhs >= rhs via !(lhs < rhs) since there's
-        // only the slt instruction but no sge.
-        __ Xori(dst, dst, 1);
-      }
-      break;
-
-    case kCondLE:
-    case kCondGT:
-      if (use_imm && IsInt<16>(rhs_imm_plus_one)) {
-        // Simulate lhs <= rhs via lhs < rhs + 1.
-        __ Slti(dst, lhs, rhs_imm_plus_one);
-        if (cond == kCondGT) {
-          // Simulate lhs > rhs via !(lhs <= rhs) since there's
-          // only the slti instruction but no sgti.
-          __ Xori(dst, dst, 1);
-        }
-      } else {
-        if (use_imm) {
-          rhs_reg = TMP;
-          __ LoadConst64(rhs_reg, rhs_imm);
-        }
-        __ Slt(dst, rhs_reg, lhs);
-        if (cond == kCondLE) {
-          // Simulate lhs <= rhs via !(rhs < lhs) since there's
-          // only the slt instruction but no sle.
-          __ Xori(dst, dst, 1);
-        }
-      }
-      break;
-
-    case kCondB:
-    case kCondAE:
-      if (use_imm && IsInt<16>(rhs_imm)) {
-        // Sltiu sign-extends its 16-bit immediate operand before
-        // the comparison and thus lets us compare directly with
-        // unsigned values in the ranges [0, 0x7fff] and
-        // [0x[ffffffff]ffff8000, 0x[ffffffff]ffffffff].
-        __ Sltiu(dst, lhs, rhs_imm);
-      } else {
-        if (use_imm) {
-          rhs_reg = TMP;
-          __ LoadConst64(rhs_reg, rhs_imm);
-        }
-        __ Sltu(dst, lhs, rhs_reg);
-      }
-      if (cond == kCondAE) {
-        // Simulate lhs >= rhs via !(lhs < rhs) since there's
-        // only the sltu instruction but no sgeu.
-        __ Xori(dst, dst, 1);
-      }
-      break;
-
-    case kCondBE:
-    case kCondA:
-      if (use_imm && (rhs_imm_plus_one != 0) && IsInt<16>(rhs_imm_plus_one)) {
-        // Simulate lhs <= rhs via lhs < rhs + 1.
-        // Note that this only works if rhs + 1 does not overflow
-        // to 0, hence the check above.
-        // Sltiu sign-extends its 16-bit immediate operand before
-        // the comparison and thus lets us compare directly with
-        // unsigned values in the ranges [0, 0x7fff] and
-        // [0x[ffffffff]ffff8000, 0x[ffffffff]ffffffff].
-        __ Sltiu(dst, lhs, rhs_imm_plus_one);
-        if (cond == kCondA) {
-          // Simulate lhs > rhs via !(lhs <= rhs) since there's
-          // only the sltiu instruction but no sgtiu.
-          __ Xori(dst, dst, 1);
-        }
-      } else {
-        if (use_imm) {
-          rhs_reg = TMP;
-          __ LoadConst64(rhs_reg, rhs_imm);
-        }
-        __ Sltu(dst, rhs_reg, lhs);
-        if (cond == kCondBE) {
-          // Simulate lhs <= rhs via !(rhs < lhs) since there's
-          // only the sltu instruction but no sleu.
-          __ Xori(dst, dst, 1);
-        }
-      }
-      break;
-  }
-}
-
-bool InstructionCodeGeneratorMIPS64::MaterializeIntLongCompare(IfCondition cond,
-                                                               bool is64bit,
-                                                               LocationSummary* input_locations,
-                                                               GpuRegister dst) {
-  GpuRegister lhs = input_locations->InAt(0).AsRegister<GpuRegister>();
-  Location rhs_location = input_locations->InAt(1);
-  GpuRegister rhs_reg = ZERO;
-  int64_t rhs_imm = 0;
-  bool use_imm = rhs_location.IsConstant();
-  if (use_imm) {
-    if (is64bit) {
-      rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
-    } else {
-      rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
-    }
-  } else {
-    rhs_reg = rhs_location.AsRegister<GpuRegister>();
-  }
-  int64_t rhs_imm_plus_one = rhs_imm + UINT64_C(1);
-
-  switch (cond) {
-    case kCondEQ:
-    case kCondNE:
-      if (use_imm && IsInt<16>(-rhs_imm)) {
-        if (is64bit) {
-          __ Daddiu(dst, lhs, -rhs_imm);
-        } else {
-          __ Addiu(dst, lhs, -rhs_imm);
-        }
-      } else if (use_imm && IsUint<16>(rhs_imm)) {
-        __ Xori(dst, lhs, rhs_imm);
-      } else {
-        if (use_imm) {
-          rhs_reg = TMP;
-          __ LoadConst64(rhs_reg, rhs_imm);
-        }
-        __ Xor(dst, lhs, rhs_reg);
-      }
-      return (cond == kCondEQ);
-
-    case kCondLT:
-    case kCondGE:
-      if (use_imm && IsInt<16>(rhs_imm)) {
-        __ Slti(dst, lhs, rhs_imm);
-      } else {
-        if (use_imm) {
-          rhs_reg = TMP;
-          __ LoadConst64(rhs_reg, rhs_imm);
-        }
-        __ Slt(dst, lhs, rhs_reg);
-      }
-      return (cond == kCondGE);
-
-    case kCondLE:
-    case kCondGT:
-      if (use_imm && IsInt<16>(rhs_imm_plus_one)) {
-        // Simulate lhs <= rhs via lhs < rhs + 1.
-        __ Slti(dst, lhs, rhs_imm_plus_one);
-        return (cond == kCondGT);
-      } else {
-        if (use_imm) {
-          rhs_reg = TMP;
-          __ LoadConst64(rhs_reg, rhs_imm);
-        }
-        __ Slt(dst, rhs_reg, lhs);
-        return (cond == kCondLE);
-      }
-
-    case kCondB:
-    case kCondAE:
-      if (use_imm && IsInt<16>(rhs_imm)) {
-        // Sltiu sign-extends its 16-bit immediate operand before
-        // the comparison and thus lets us compare directly with
-        // unsigned values in the ranges [0, 0x7fff] and
-        // [0x[ffffffff]ffff8000, 0x[ffffffff]ffffffff].
-        __ Sltiu(dst, lhs, rhs_imm);
-      } else {
-        if (use_imm) {
-          rhs_reg = TMP;
-          __ LoadConst64(rhs_reg, rhs_imm);
-        }
-        __ Sltu(dst, lhs, rhs_reg);
-      }
-      return (cond == kCondAE);
-
-    case kCondBE:
-    case kCondA:
-      if (use_imm && (rhs_imm_plus_one != 0) && IsInt<16>(rhs_imm_plus_one)) {
-        // Simulate lhs <= rhs via lhs < rhs + 1.
-        // Note that this only works if rhs + 1 does not overflow
-        // to 0, hence the check above.
-        // Sltiu sign-extends its 16-bit immediate operand before
-        // the comparison and thus lets us compare directly with
-        // unsigned values in the ranges [0, 0x7fff] and
-        // [0x[ffffffff]ffff8000, 0x[ffffffff]ffffffff].
-        __ Sltiu(dst, lhs, rhs_imm_plus_one);
-        return (cond == kCondA);
-      } else {
-        if (use_imm) {
-          rhs_reg = TMP;
-          __ LoadConst64(rhs_reg, rhs_imm);
-        }
-        __ Sltu(dst, rhs_reg, lhs);
-        return (cond == kCondBE);
-      }
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::GenerateIntLongCompareAndBranch(IfCondition cond,
-                                                                     bool is64bit,
-                                                                     LocationSummary* locations,
-                                                                     Mips64Label* label) {
-  GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
-  Location rhs_location = locations->InAt(1);
-  GpuRegister rhs_reg = ZERO;
-  int64_t rhs_imm = 0;
-  bool use_imm = rhs_location.IsConstant();
-  if (use_imm) {
-    if (is64bit) {
-      rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
-    } else {
-      rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
-    }
-  } else {
-    rhs_reg = rhs_location.AsRegister<GpuRegister>();
-  }
-
-  if (use_imm && rhs_imm == 0) {
-    switch (cond) {
-      case kCondEQ:
-      case kCondBE:  // <= 0 if zero
-        __ Beqzc(lhs, label);
-        break;
-      case kCondNE:
-      case kCondA:  // > 0 if non-zero
-        __ Bnezc(lhs, label);
-        break;
-      case kCondLT:
-        __ Bltzc(lhs, label);
-        break;
-      case kCondGE:
-        __ Bgezc(lhs, label);
-        break;
-      case kCondLE:
-        __ Blezc(lhs, label);
-        break;
-      case kCondGT:
-        __ Bgtzc(lhs, label);
-        break;
-      case kCondB:  // always false
-        break;
-      case kCondAE:  // always true
-        __ Bc(label);
-        break;
-    }
-  } else {
-    if (use_imm) {
-      rhs_reg = TMP;
-      __ LoadConst64(rhs_reg, rhs_imm);
-    }
-    switch (cond) {
-      case kCondEQ:
-        __ Beqc(lhs, rhs_reg, label);
-        break;
-      case kCondNE:
-        __ Bnec(lhs, rhs_reg, label);
-        break;
-      case kCondLT:
-        __ Bltc(lhs, rhs_reg, label);
-        break;
-      case kCondGE:
-        __ Bgec(lhs, rhs_reg, label);
-        break;
-      case kCondLE:
-        __ Bgec(rhs_reg, lhs, label);
-        break;
-      case kCondGT:
-        __ Bltc(rhs_reg, lhs, label);
-        break;
-      case kCondB:
-        __ Bltuc(lhs, rhs_reg, label);
-        break;
-      case kCondAE:
-        __ Bgeuc(lhs, rhs_reg, label);
-        break;
-      case kCondBE:
-        __ Bgeuc(rhs_reg, lhs, label);
-        break;
-      case kCondA:
-        __ Bltuc(rhs_reg, lhs, label);
-        break;
-    }
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::GenerateFpCompare(IfCondition cond,
-                                                       bool gt_bias,
-                                                       DataType::Type type,
-                                                       LocationSummary* locations) {
-  GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
-  FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
-  FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
-  if (type == DataType::Type::kFloat32) {
-    switch (cond) {
-      case kCondEQ:
-        __ CmpEqS(FTMP, lhs, rhs);
-        __ Mfc1(dst, FTMP);
-        __ Andi(dst, dst, 1);
-        break;
-      case kCondNE:
-        __ CmpEqS(FTMP, lhs, rhs);
-        __ Mfc1(dst, FTMP);
-        __ Addiu(dst, dst, 1);
-        break;
-      case kCondLT:
-        if (gt_bias) {
-          __ CmpLtS(FTMP, lhs, rhs);
-        } else {
-          __ CmpUltS(FTMP, lhs, rhs);
-        }
-        __ Mfc1(dst, FTMP);
-        __ Andi(dst, dst, 1);
-        break;
-      case kCondLE:
-        if (gt_bias) {
-          __ CmpLeS(FTMP, lhs, rhs);
-        } else {
-          __ CmpUleS(FTMP, lhs, rhs);
-        }
-        __ Mfc1(dst, FTMP);
-        __ Andi(dst, dst, 1);
-        break;
-      case kCondGT:
-        if (gt_bias) {
-          __ CmpUltS(FTMP, rhs, lhs);
-        } else {
-          __ CmpLtS(FTMP, rhs, lhs);
-        }
-        __ Mfc1(dst, FTMP);
-        __ Andi(dst, dst, 1);
-        break;
-      case kCondGE:
-        if (gt_bias) {
-          __ CmpUleS(FTMP, rhs, lhs);
-        } else {
-          __ CmpLeS(FTMP, rhs, lhs);
-        }
-        __ Mfc1(dst, FTMP);
-        __ Andi(dst, dst, 1);
-        break;
-      default:
-        LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
-        UNREACHABLE();
-    }
-  } else {
-    DCHECK_EQ(type, DataType::Type::kFloat64);
-    switch (cond) {
-      case kCondEQ:
-        __ CmpEqD(FTMP, lhs, rhs);
-        __ Mfc1(dst, FTMP);
-        __ Andi(dst, dst, 1);
-        break;
-      case kCondNE:
-        __ CmpEqD(FTMP, lhs, rhs);
-        __ Mfc1(dst, FTMP);
-        __ Addiu(dst, dst, 1);
-        break;
-      case kCondLT:
-        if (gt_bias) {
-          __ CmpLtD(FTMP, lhs, rhs);
-        } else {
-          __ CmpUltD(FTMP, lhs, rhs);
-        }
-        __ Mfc1(dst, FTMP);
-        __ Andi(dst, dst, 1);
-        break;
-      case kCondLE:
-        if (gt_bias) {
-          __ CmpLeD(FTMP, lhs, rhs);
-        } else {
-          __ CmpUleD(FTMP, lhs, rhs);
-        }
-        __ Mfc1(dst, FTMP);
-        __ Andi(dst, dst, 1);
-        break;
-      case kCondGT:
-        if (gt_bias) {
-          __ CmpUltD(FTMP, rhs, lhs);
-        } else {
-          __ CmpLtD(FTMP, rhs, lhs);
-        }
-        __ Mfc1(dst, FTMP);
-        __ Andi(dst, dst, 1);
-        break;
-      case kCondGE:
-        if (gt_bias) {
-          __ CmpUleD(FTMP, rhs, lhs);
-        } else {
-          __ CmpLeD(FTMP, rhs, lhs);
-        }
-        __ Mfc1(dst, FTMP);
-        __ Andi(dst, dst, 1);
-        break;
-      default:
-        LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
-        UNREACHABLE();
-    }
-  }
-}
-
-bool InstructionCodeGeneratorMIPS64::MaterializeFpCompare(IfCondition cond,
-                                                          bool gt_bias,
-                                                          DataType::Type type,
-                                                          LocationSummary* input_locations,
-                                                          FpuRegister dst) {
-  FpuRegister lhs = input_locations->InAt(0).AsFpuRegister<FpuRegister>();
-  FpuRegister rhs = input_locations->InAt(1).AsFpuRegister<FpuRegister>();
-  if (type == DataType::Type::kFloat32) {
-    switch (cond) {
-      case kCondEQ:
-        __ CmpEqS(dst, lhs, rhs);
-        return false;
-      case kCondNE:
-        __ CmpEqS(dst, lhs, rhs);
-        return true;
-      case kCondLT:
-        if (gt_bias) {
-          __ CmpLtS(dst, lhs, rhs);
-        } else {
-          __ CmpUltS(dst, lhs, rhs);
-        }
-        return false;
-      case kCondLE:
-        if (gt_bias) {
-          __ CmpLeS(dst, lhs, rhs);
-        } else {
-          __ CmpUleS(dst, lhs, rhs);
-        }
-        return false;
-      case kCondGT:
-        if (gt_bias) {
-          __ CmpUltS(dst, rhs, lhs);
-        } else {
-          __ CmpLtS(dst, rhs, lhs);
-        }
-        return false;
-      case kCondGE:
-        if (gt_bias) {
-          __ CmpUleS(dst, rhs, lhs);
-        } else {
-          __ CmpLeS(dst, rhs, lhs);
-        }
-        return false;
-      default:
-        LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
-        UNREACHABLE();
-    }
-  } else {
-    DCHECK_EQ(type, DataType::Type::kFloat64);
-    switch (cond) {
-      case kCondEQ:
-        __ CmpEqD(dst, lhs, rhs);
-        return false;
-      case kCondNE:
-        __ CmpEqD(dst, lhs, rhs);
-        return true;
-      case kCondLT:
-        if (gt_bias) {
-          __ CmpLtD(dst, lhs, rhs);
-        } else {
-          __ CmpUltD(dst, lhs, rhs);
-        }
-        return false;
-      case kCondLE:
-        if (gt_bias) {
-          __ CmpLeD(dst, lhs, rhs);
-        } else {
-          __ CmpUleD(dst, lhs, rhs);
-        }
-        return false;
-      case kCondGT:
-        if (gt_bias) {
-          __ CmpUltD(dst, rhs, lhs);
-        } else {
-          __ CmpLtD(dst, rhs, lhs);
-        }
-        return false;
-      case kCondGE:
-        if (gt_bias) {
-          __ CmpUleD(dst, rhs, lhs);
-        } else {
-          __ CmpLeD(dst, rhs, lhs);
-        }
-        return false;
-      default:
-        LOG(FATAL) << "Unexpected non-floating-point condition " << cond;
-        UNREACHABLE();
-    }
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::GenerateFpCompareAndBranch(IfCondition cond,
-                                                                bool gt_bias,
-                                                                DataType::Type type,
-                                                                LocationSummary* locations,
-                                                                Mips64Label* label) {
-  FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
-  FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
-  if (type == DataType::Type::kFloat32) {
-    switch (cond) {
-      case kCondEQ:
-        __ CmpEqS(FTMP, lhs, rhs);
-        __ Bc1nez(FTMP, label);
-        break;
-      case kCondNE:
-        __ CmpEqS(FTMP, lhs, rhs);
-        __ Bc1eqz(FTMP, label);
-        break;
-      case kCondLT:
-        if (gt_bias) {
-          __ CmpLtS(FTMP, lhs, rhs);
-        } else {
-          __ CmpUltS(FTMP, lhs, rhs);
-        }
-        __ Bc1nez(FTMP, label);
-        break;
-      case kCondLE:
-        if (gt_bias) {
-          __ CmpLeS(FTMP, lhs, rhs);
-        } else {
-          __ CmpUleS(FTMP, lhs, rhs);
-        }
-        __ Bc1nez(FTMP, label);
-        break;
-      case kCondGT:
-        if (gt_bias) {
-          __ CmpUltS(FTMP, rhs, lhs);
-        } else {
-          __ CmpLtS(FTMP, rhs, lhs);
-        }
-        __ Bc1nez(FTMP, label);
-        break;
-      case kCondGE:
-        if (gt_bias) {
-          __ CmpUleS(FTMP, rhs, lhs);
-        } else {
-          __ CmpLeS(FTMP, rhs, lhs);
-        }
-        __ Bc1nez(FTMP, label);
-        break;
-      default:
-        LOG(FATAL) << "Unexpected non-floating-point condition";
-        UNREACHABLE();
-    }
-  } else {
-    DCHECK_EQ(type, DataType::Type::kFloat64);
-    switch (cond) {
-      case kCondEQ:
-        __ CmpEqD(FTMP, lhs, rhs);
-        __ Bc1nez(FTMP, label);
-        break;
-      case kCondNE:
-        __ CmpEqD(FTMP, lhs, rhs);
-        __ Bc1eqz(FTMP, label);
-        break;
-      case kCondLT:
-        if (gt_bias) {
-          __ CmpLtD(FTMP, lhs, rhs);
-        } else {
-          __ CmpUltD(FTMP, lhs, rhs);
-        }
-        __ Bc1nez(FTMP, label);
-        break;
-      case kCondLE:
-        if (gt_bias) {
-          __ CmpLeD(FTMP, lhs, rhs);
-        } else {
-          __ CmpUleD(FTMP, lhs, rhs);
-        }
-        __ Bc1nez(FTMP, label);
-        break;
-      case kCondGT:
-        if (gt_bias) {
-          __ CmpUltD(FTMP, rhs, lhs);
-        } else {
-          __ CmpLtD(FTMP, rhs, lhs);
-        }
-        __ Bc1nez(FTMP, label);
-        break;
-      case kCondGE:
-        if (gt_bias) {
-          __ CmpUleD(FTMP, rhs, lhs);
-        } else {
-          __ CmpLeD(FTMP, rhs, lhs);
-        }
-        __ Bc1nez(FTMP, label);
-        break;
-      default:
-        LOG(FATAL) << "Unexpected non-floating-point condition";
-        UNREACHABLE();
-    }
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruction,
-                                                           size_t condition_input_index,
-                                                           Mips64Label* true_target,
-                                                           Mips64Label* false_target) {
-  HInstruction* cond = instruction->InputAt(condition_input_index);
-
-  if (true_target == nullptr && false_target == nullptr) {
-    // Nothing to do. The code always falls through.
-    return;
-  } else if (cond->IsIntConstant()) {
-    // Constant condition, statically compared against "true" (integer value 1).
-    if (cond->AsIntConstant()->IsTrue()) {
-      if (true_target != nullptr) {
-        __ Bc(true_target);
-      }
-    } else {
-      DCHECK(cond->AsIntConstant()->IsFalse()) << cond->AsIntConstant()->GetValue();
-      if (false_target != nullptr) {
-        __ Bc(false_target);
-      }
-    }
-    return;
-  }
-
-  // The following code generates these patterns:
-  //  (1) true_target == nullptr && false_target != nullptr
-  //        - opposite condition true => branch to false_target
-  //  (2) true_target != nullptr && false_target == nullptr
-  //        - condition true => branch to true_target
-  //  (3) true_target != nullptr && false_target != nullptr
-  //        - condition true => branch to true_target
-  //        - branch to false_target
-  if (IsBooleanValueOrMaterializedCondition(cond)) {
-    // The condition instruction has been materialized, compare the output to 0.
-    Location cond_val = instruction->GetLocations()->InAt(condition_input_index);
-    DCHECK(cond_val.IsRegister());
-    if (true_target == nullptr) {
-      __ Beqzc(cond_val.AsRegister<GpuRegister>(), false_target);
-    } else {
-      __ Bnezc(cond_val.AsRegister<GpuRegister>(), true_target);
-    }
-  } else {
-    // The condition instruction has not been materialized, use its inputs as
-    // the comparison and its condition as the branch condition.
-    HCondition* condition = cond->AsCondition();
-    DataType::Type type = condition->InputAt(0)->GetType();
-    LocationSummary* locations = cond->GetLocations();
-    IfCondition if_cond = condition->GetCondition();
-    Mips64Label* branch_target = true_target;
-
-    if (true_target == nullptr) {
-      if_cond = condition->GetOppositeCondition();
-      branch_target = false_target;
-    }
-
-    switch (type) {
-      default:
-        GenerateIntLongCompareAndBranch(if_cond, /* is64bit= */ false, locations, branch_target);
-        break;
-      case DataType::Type::kInt64:
-        GenerateIntLongCompareAndBranch(if_cond, /* is64bit= */ true, locations, branch_target);
-        break;
-      case DataType::Type::kFloat32:
-      case DataType::Type::kFloat64:
-        GenerateFpCompareAndBranch(if_cond, condition->IsGtBias(), type, locations, branch_target);
-        break;
-    }
-  }
-
-  // If neither branch falls through (case 3), the conditional branch to `true_target`
-  // was already emitted (case 2) and we need to emit a jump to `false_target`.
-  if (true_target != nullptr && false_target != nullptr) {
-    __ Bc(false_target);
-  }
-}
-
-void LocationsBuilderMIPS64::VisitIf(HIf* if_instr) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(if_instr);
-  if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
-    locations->SetInAt(0, Location::RequiresRegister());
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::VisitIf(HIf* if_instr) {
-  HBasicBlock* true_successor = if_instr->IfTrueSuccessor();
-  HBasicBlock* false_successor = if_instr->IfFalseSuccessor();
-  Mips64Label* true_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), true_successor) ?
-      nullptr : codegen_->GetLabelOf(true_successor);
-  Mips64Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
-      nullptr : codegen_->GetLabelOf(false_successor);
-  GenerateTestAndBranch(if_instr, /* condition_input_index= */ 0, true_target, false_target);
-}
-
-void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator())
-      LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
-  InvokeRuntimeCallingConvention calling_convention;
-  RegisterSet caller_saves = RegisterSet::Empty();
-  caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  locations->SetCustomSlowPathCallerSaves(caller_saves);
-  if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
-    locations->SetInAt(0, Location::RequiresRegister());
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
-  SlowPathCodeMIPS64* slow_path =
-      deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathMIPS64>(deoptimize);
-  GenerateTestAndBranch(deoptimize,
-                        /* condition_input_index= */ 0,
-                        slow_path->GetEntryLabel(),
-                        /* false_target= */ nullptr);
-}
-
-// This function returns true if a conditional move can be generated for HSelect.
-// Otherwise it returns false and HSelect must be implemented in terms of conditonal
-// branches and regular moves.
-//
-// If `locations_to_set` isn't nullptr, its inputs and outputs are set for HSelect.
-//
-// While determining feasibility of a conditional move and setting inputs/outputs
-// are two distinct tasks, this function does both because they share quite a bit
-// of common logic.
-static bool CanMoveConditionally(HSelect* select, LocationSummary* locations_to_set) {
-  bool materialized = IsBooleanValueOrMaterializedCondition(select->GetCondition());
-  HInstruction* cond = select->InputAt(/* i= */ 2);
-  HCondition* condition = cond->AsCondition();
-
-  DataType::Type cond_type =
-      materialized ? DataType::Type::kInt32 : condition->InputAt(0)->GetType();
-  DataType::Type dst_type = select->GetType();
-
-  HConstant* cst_true_value = select->GetTrueValue()->AsConstant();
-  HConstant* cst_false_value = select->GetFalseValue()->AsConstant();
-  bool is_true_value_zero_constant =
-      (cst_true_value != nullptr && cst_true_value->IsZeroBitPattern());
-  bool is_false_value_zero_constant =
-      (cst_false_value != nullptr && cst_false_value->IsZeroBitPattern());
-
-  bool can_move_conditionally = false;
-  bool use_const_for_false_in = false;
-  bool use_const_for_true_in = false;
-
-  if (!cond->IsConstant()) {
-    if (!DataType::IsFloatingPointType(cond_type)) {
-      if (!DataType::IsFloatingPointType(dst_type)) {
-        // Moving int/long on int/long condition.
-        if (is_true_value_zero_constant) {
-          // seleqz out_reg, false_reg, cond_reg
-          can_move_conditionally = true;
-          use_const_for_true_in = true;
-        } else if (is_false_value_zero_constant) {
-          // selnez out_reg, true_reg, cond_reg
-          can_move_conditionally = true;
-          use_const_for_false_in = true;
-        } else if (materialized) {
-          // Not materializing unmaterialized int conditions
-          // to keep the instruction count low.
-          // selnez AT, true_reg, cond_reg
-          // seleqz TMP, false_reg, cond_reg
-          // or out_reg, AT, TMP
-          can_move_conditionally = true;
-        }
-      } else {
-        // Moving float/double on int/long condition.
-        if (materialized) {
-          // Not materializing unmaterialized int conditions
-          // to keep the instruction count low.
-          can_move_conditionally = true;
-          if (is_true_value_zero_constant) {
-            // sltu TMP, ZERO, cond_reg
-            // mtc1 TMP, temp_cond_reg
-            // seleqz.fmt out_reg, false_reg, temp_cond_reg
-            use_const_for_true_in = true;
-          } else if (is_false_value_zero_constant) {
-            // sltu TMP, ZERO, cond_reg
-            // mtc1 TMP, temp_cond_reg
-            // selnez.fmt out_reg, true_reg, temp_cond_reg
-            use_const_for_false_in = true;
-          } else {
-            // sltu TMP, ZERO, cond_reg
-            // mtc1 TMP, temp_cond_reg
-            // sel.fmt temp_cond_reg, false_reg, true_reg
-            // mov.fmt out_reg, temp_cond_reg
-          }
-        }
-      }
-    } else {
-      if (!DataType::IsFloatingPointType(dst_type)) {
-        // Moving int/long on float/double condition.
-        can_move_conditionally = true;
-        if (is_true_value_zero_constant) {
-          // mfc1 TMP, temp_cond_reg
-          // seleqz out_reg, false_reg, TMP
-          use_const_for_true_in = true;
-        } else if (is_false_value_zero_constant) {
-          // mfc1 TMP, temp_cond_reg
-          // selnez out_reg, true_reg, TMP
-          use_const_for_false_in = true;
-        } else {
-          // mfc1 TMP, temp_cond_reg
-          // selnez AT, true_reg, TMP
-          // seleqz TMP, false_reg, TMP
-          // or out_reg, AT, TMP
-        }
-      } else {
-        // Moving float/double on float/double condition.
-        can_move_conditionally = true;
-        if (is_true_value_zero_constant) {
-          // seleqz.fmt out_reg, false_reg, temp_cond_reg
-          use_const_for_true_in = true;
-        } else if (is_false_value_zero_constant) {
-          // selnez.fmt out_reg, true_reg, temp_cond_reg
-          use_const_for_false_in = true;
-        } else {
-          // sel.fmt temp_cond_reg, false_reg, true_reg
-          // mov.fmt out_reg, temp_cond_reg
-        }
-      }
-    }
-  }
-
-  if (can_move_conditionally) {
-    DCHECK(!use_const_for_false_in || !use_const_for_true_in);
-  } else {
-    DCHECK(!use_const_for_false_in);
-    DCHECK(!use_const_for_true_in);
-  }
-
-  if (locations_to_set != nullptr) {
-    if (use_const_for_false_in) {
-      locations_to_set->SetInAt(0, Location::ConstantLocation(cst_false_value));
-    } else {
-      locations_to_set->SetInAt(0,
-                                DataType::IsFloatingPointType(dst_type)
-                                    ? Location::RequiresFpuRegister()
-                                    : Location::RequiresRegister());
-    }
-    if (use_const_for_true_in) {
-      locations_to_set->SetInAt(1, Location::ConstantLocation(cst_true_value));
-    } else {
-      locations_to_set->SetInAt(1,
-                                DataType::IsFloatingPointType(dst_type)
-                                    ? Location::RequiresFpuRegister()
-                                    : Location::RequiresRegister());
-    }
-    if (materialized) {
-      locations_to_set->SetInAt(2, Location::RequiresRegister());
-    }
-
-    if (can_move_conditionally) {
-      locations_to_set->SetOut(DataType::IsFloatingPointType(dst_type)
-                                   ? Location::RequiresFpuRegister()
-                                   : Location::RequiresRegister());
-    } else {
-      locations_to_set->SetOut(Location::SameAsFirstInput());
-    }
-  }
-
-  return can_move_conditionally;
-}
-
-
-void InstructionCodeGeneratorMIPS64::GenConditionalMove(HSelect* select) {
-  LocationSummary* locations = select->GetLocations();
-  Location dst = locations->Out();
-  Location false_src = locations->InAt(0);
-  Location true_src = locations->InAt(1);
-  HInstruction* cond = select->InputAt(/* i= */ 2);
-  GpuRegister cond_reg = TMP;
-  FpuRegister fcond_reg = FTMP;
-  DataType::Type cond_type = DataType::Type::kInt32;
-  bool cond_inverted = false;
-  DataType::Type dst_type = select->GetType();
-
-  if (IsBooleanValueOrMaterializedCondition(cond)) {
-    cond_reg = locations->InAt(/* at= */ 2).AsRegister<GpuRegister>();
-  } else {
-    HCondition* condition = cond->AsCondition();
-    LocationSummary* cond_locations = cond->GetLocations();
-    IfCondition if_cond = condition->GetCondition();
-    cond_type = condition->InputAt(0)->GetType();
-    switch (cond_type) {
-      default:
-        cond_inverted = MaterializeIntLongCompare(if_cond,
-                                                  /* is64bit= */ false,
-                                                  cond_locations,
-                                                  cond_reg);
-        break;
-      case DataType::Type::kInt64:
-        cond_inverted = MaterializeIntLongCompare(if_cond,
-                                                  /* is64bit= */ true,
-                                                  cond_locations,
-                                                  cond_reg);
-        break;
-      case DataType::Type::kFloat32:
-      case DataType::Type::kFloat64:
-        cond_inverted = MaterializeFpCompare(if_cond,
-                                             condition->IsGtBias(),
-                                             cond_type,
-                                             cond_locations,
-                                             fcond_reg);
-        break;
-    }
-  }
-
-  if (true_src.IsConstant()) {
-    DCHECK(true_src.GetConstant()->IsZeroBitPattern());
-  }
-  if (false_src.IsConstant()) {
-    DCHECK(false_src.GetConstant()->IsZeroBitPattern());
-  }
-
-  switch (dst_type) {
-    default:
-      if (DataType::IsFloatingPointType(cond_type)) {
-        __ Mfc1(cond_reg, fcond_reg);
-      }
-      if (true_src.IsConstant()) {
-        if (cond_inverted) {
-          __ Selnez(dst.AsRegister<GpuRegister>(), false_src.AsRegister<GpuRegister>(), cond_reg);
-        } else {
-          __ Seleqz(dst.AsRegister<GpuRegister>(), false_src.AsRegister<GpuRegister>(), cond_reg);
-        }
-      } else if (false_src.IsConstant()) {
-        if (cond_inverted) {
-          __ Seleqz(dst.AsRegister<GpuRegister>(), true_src.AsRegister<GpuRegister>(), cond_reg);
-        } else {
-          __ Selnez(dst.AsRegister<GpuRegister>(), true_src.AsRegister<GpuRegister>(), cond_reg);
-        }
-      } else {
-        DCHECK_NE(cond_reg, AT);
-        if (cond_inverted) {
-          __ Seleqz(AT, true_src.AsRegister<GpuRegister>(), cond_reg);
-          __ Selnez(TMP, false_src.AsRegister<GpuRegister>(), cond_reg);
-        } else {
-          __ Selnez(AT, true_src.AsRegister<GpuRegister>(), cond_reg);
-          __ Seleqz(TMP, false_src.AsRegister<GpuRegister>(), cond_reg);
-        }
-        __ Or(dst.AsRegister<GpuRegister>(), AT, TMP);
-      }
-      break;
-    case DataType::Type::kFloat32: {
-      if (!DataType::IsFloatingPointType(cond_type)) {
-        // sel*.fmt tests bit 0 of the condition register, account for that.
-        __ Sltu(TMP, ZERO, cond_reg);
-        __ Mtc1(TMP, fcond_reg);
-      }
-      FpuRegister dst_reg = dst.AsFpuRegister<FpuRegister>();
-      if (true_src.IsConstant()) {
-        FpuRegister src_reg = false_src.AsFpuRegister<FpuRegister>();
-        if (cond_inverted) {
-          __ SelnezS(dst_reg, src_reg, fcond_reg);
-        } else {
-          __ SeleqzS(dst_reg, src_reg, fcond_reg);
-        }
-      } else if (false_src.IsConstant()) {
-        FpuRegister src_reg = true_src.AsFpuRegister<FpuRegister>();
-        if (cond_inverted) {
-          __ SeleqzS(dst_reg, src_reg, fcond_reg);
-        } else {
-          __ SelnezS(dst_reg, src_reg, fcond_reg);
-        }
-      } else {
-        if (cond_inverted) {
-          __ SelS(fcond_reg,
-                  true_src.AsFpuRegister<FpuRegister>(),
-                  false_src.AsFpuRegister<FpuRegister>());
-        } else {
-          __ SelS(fcond_reg,
-                  false_src.AsFpuRegister<FpuRegister>(),
-                  true_src.AsFpuRegister<FpuRegister>());
-        }
-        __ MovS(dst_reg, fcond_reg);
-      }
-      break;
-    }
-    case DataType::Type::kFloat64: {
-      if (!DataType::IsFloatingPointType(cond_type)) {
-        // sel*.fmt tests bit 0 of the condition register, account for that.
-        __ Sltu(TMP, ZERO, cond_reg);
-        __ Mtc1(TMP, fcond_reg);
-      }
-      FpuRegister dst_reg = dst.AsFpuRegister<FpuRegister>();
-      if (true_src.IsConstant()) {
-        FpuRegister src_reg = false_src.AsFpuRegister<FpuRegister>();
-        if (cond_inverted) {
-          __ SelnezD(dst_reg, src_reg, fcond_reg);
-        } else {
-          __ SeleqzD(dst_reg, src_reg, fcond_reg);
-        }
-      } else if (false_src.IsConstant()) {
-        FpuRegister src_reg = true_src.AsFpuRegister<FpuRegister>();
-        if (cond_inverted) {
-          __ SeleqzD(dst_reg, src_reg, fcond_reg);
-        } else {
-          __ SelnezD(dst_reg, src_reg, fcond_reg);
-        }
-      } else {
-        if (cond_inverted) {
-          __ SelD(fcond_reg,
-                  true_src.AsFpuRegister<FpuRegister>(),
-                  false_src.AsFpuRegister<FpuRegister>());
-        } else {
-          __ SelD(fcond_reg,
-                  false_src.AsFpuRegister<FpuRegister>(),
-                  true_src.AsFpuRegister<FpuRegister>());
-        }
-        __ MovD(dst_reg, fcond_reg);
-      }
-      break;
-    }
-  }
-}
-
-void LocationsBuilderMIPS64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator())
-      LocationSummary(flag, LocationSummary::kNoCall);
-  locations->SetOut(Location::RequiresRegister());
-}
-
-void InstructionCodeGeneratorMIPS64::VisitShouldDeoptimizeFlag(HShouldDeoptimizeFlag* flag) {
-  __ LoadFromOffset(kLoadWord,
-                    flag->GetLocations()->Out().AsRegister<GpuRegister>(),
-                    SP,
-                    codegen_->GetStackOffsetOfShouldDeoptimizeFlag());
-}
-
-void LocationsBuilderMIPS64::VisitSelect(HSelect* select) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(select);
-  CanMoveConditionally(select, locations);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitSelect(HSelect* select) {
-  if (CanMoveConditionally(select, /* locations_to_set= */ nullptr)) {
-    GenConditionalMove(select);
-  } else {
-    LocationSummary* locations = select->GetLocations();
-    Mips64Label false_target;
-    GenerateTestAndBranch(select,
-                          /* condition_input_index= */ 2,
-                          /* true_target= */ nullptr,
-                          &false_target);
-    codegen_->MoveLocation(locations->Out(), locations->InAt(1), select->GetType());
-    __ Bind(&false_target);
-  }
-}
-
-void LocationsBuilderMIPS64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
-  new (GetGraph()->GetAllocator()) LocationSummary(info);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitNativeDebugInfo(HNativeDebugInfo*) {
-  // MaybeRecordNativeDebugInfo is already called implicitly in CodeGenerator::Compile.
-}
-
-void CodeGeneratorMIPS64::GenerateNop() {
-  __ Nop();
-}
-
-void LocationsBuilderMIPS64::HandleFieldGet(HInstruction* instruction,
-                                            const FieldInfo& field_info) {
-  DataType::Type field_type = field_info.GetFieldType();
-  bool object_field_get_with_read_barrier =
-      kEmitCompilerReadBarrier && (field_type == DataType::Type::kReference);
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
-      instruction,
-      object_field_get_with_read_barrier
-          ? LocationSummary::kCallOnSlowPath
-          : LocationSummary::kNoCall);
-  if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
-    locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
-  }
-  locations->SetInAt(0, Location::RequiresRegister());
-  if (DataType::IsFloatingPointType(instruction->GetType())) {
-    locations->SetOut(Location::RequiresFpuRegister());
-  } else {
-    // The output overlaps in the case of an object field get with
-    // read barriers enabled: we do not want the move to overwrite the
-    // object's location, as we need it to emit the read barrier.
-    locations->SetOut(Location::RequiresRegister(),
-                      object_field_get_with_read_barrier
-                          ? Location::kOutputOverlap
-                          : Location::kNoOutputOverlap);
-  }
-  if (object_field_get_with_read_barrier && kUseBakerReadBarrier) {
-    // We need a temporary register for the read barrier marking slow
-    // path in CodeGeneratorMIPS64::GenerateFieldLoadWithBakerReadBarrier.
-    if (!kBakerReadBarrierThunksEnableForFields) {
-      locations->AddTemp(Location::RequiresRegister());
-    }
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction,
-                                                    const FieldInfo& field_info) {
-  DCHECK_EQ(DataType::Size(field_info.GetFieldType()), DataType::Size(instruction->GetType()));
-  DataType::Type type = instruction->GetType();
-  LocationSummary* locations = instruction->GetLocations();
-  Location obj_loc = locations->InAt(0);
-  GpuRegister obj = obj_loc.AsRegister<GpuRegister>();
-  Location dst_loc = locations->Out();
-  LoadOperandType load_type = kLoadUnsignedByte;
-  bool is_volatile = field_info.IsVolatile();
-  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
-  auto null_checker = GetImplicitNullChecker(instruction, codegen_);
-
-  switch (type) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-      load_type = kLoadUnsignedByte;
-      break;
-    case DataType::Type::kInt8:
-      load_type = kLoadSignedByte;
-      break;
-    case DataType::Type::kUint16:
-      load_type = kLoadUnsignedHalfword;
-      break;
-    case DataType::Type::kInt16:
-      load_type = kLoadSignedHalfword;
-      break;
-    case DataType::Type::kInt32:
-    case DataType::Type::kFloat32:
-      load_type = kLoadWord;
-      break;
-    case DataType::Type::kInt64:
-    case DataType::Type::kFloat64:
-      load_type = kLoadDoubleword;
-      break;
-    case DataType::Type::kReference:
-      load_type = kLoadUnsignedWord;
-      break;
-    case DataType::Type::kUint32:
-    case DataType::Type::kUint64:
-    case DataType::Type::kVoid:
-      LOG(FATAL) << "Unreachable type " << type;
-      UNREACHABLE();
-  }
-  if (!DataType::IsFloatingPointType(type)) {
-    DCHECK(dst_loc.IsRegister());
-    GpuRegister dst = dst_loc.AsRegister<GpuRegister>();
-    if (type == DataType::Type::kReference) {
-      // /* HeapReference<Object> */ dst = *(obj + offset)
-      if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
-        Location temp_loc =
-            kBakerReadBarrierThunksEnableForFields ? Location::NoLocation() : locations->GetTemp(0);
-        // Note that a potential implicit null check is handled in this
-        // CodeGeneratorMIPS64::GenerateFieldLoadWithBakerReadBarrier call.
-        codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
-                                                        dst_loc,
-                                                        obj,
-                                                        offset,
-                                                        temp_loc,
-                                                        /* needs_null_check= */ true);
-        if (is_volatile) {
-          GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
-        }
-      } else {
-        __ LoadFromOffset(kLoadUnsignedWord, dst, obj, offset, null_checker);
-        if (is_volatile) {
-          GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
-        }
-        // If read barriers are enabled, emit read barriers other than
-        // Baker's using a slow path (and also unpoison the loaded
-        // reference, if heap poisoning is enabled).
-        codegen_->MaybeGenerateReadBarrierSlow(instruction, dst_loc, dst_loc, obj_loc, offset);
-      }
-    } else {
-      __ LoadFromOffset(load_type, dst, obj, offset, null_checker);
-    }
-  } else {
-    DCHECK(dst_loc.IsFpuRegister());
-    FpuRegister dst = dst_loc.AsFpuRegister<FpuRegister>();
-    __ LoadFpuFromOffset(load_type, dst, obj, offset, null_checker);
-  }
-
-  // Memory barriers, in the case of references, are handled in the
-  // previous switch statement.
-  if (is_volatile && (type != DataType::Type::kReference)) {
-    GenerateMemoryBarrier(MemBarrierKind::kLoadAny);
-  }
-}
-
-void LocationsBuilderMIPS64::HandleFieldSet(HInstruction* instruction,
-                                            const FieldInfo& field_info ATTRIBUTE_UNUSED) {
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
-  locations->SetInAt(0, Location::RequiresRegister());
-  if (DataType::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
-    locations->SetInAt(1, FpuRegisterOrConstantForStore(instruction->InputAt(1)));
-  } else {
-    locations->SetInAt(1, RegisterOrZeroConstant(instruction->InputAt(1)));
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction,
-                                                    const FieldInfo& field_info,
-                                                    bool value_can_be_null) {
-  DataType::Type type = field_info.GetFieldType();
-  LocationSummary* locations = instruction->GetLocations();
-  GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
-  Location value_location = locations->InAt(1);
-  StoreOperandType store_type = kStoreByte;
-  bool is_volatile = field_info.IsVolatile();
-  uint32_t offset = field_info.GetFieldOffset().Uint32Value();
-  bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1));
-  auto null_checker = GetImplicitNullChecker(instruction, codegen_);
-
-  switch (type) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      store_type = kStoreByte;
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      store_type = kStoreHalfword;
-      break;
-    case DataType::Type::kInt32:
-    case DataType::Type::kFloat32:
-    case DataType::Type::kReference:
-      store_type = kStoreWord;
-      break;
-    case DataType::Type::kInt64:
-    case DataType::Type::kFloat64:
-      store_type = kStoreDoubleword;
-      break;
-    case DataType::Type::kUint32:
-    case DataType::Type::kUint64:
-    case DataType::Type::kVoid:
-      LOG(FATAL) << "Unreachable type " << type;
-      UNREACHABLE();
-  }
-
-  if (is_volatile) {
-    GenerateMemoryBarrier(MemBarrierKind::kAnyStore);
-  }
-
-  if (value_location.IsConstant()) {
-    int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant());
-    __ StoreConstToOffset(store_type, value, obj, offset, TMP, null_checker);
-  } else {
-    if (!DataType::IsFloatingPointType(type)) {
-      DCHECK(value_location.IsRegister());
-      GpuRegister src = value_location.AsRegister<GpuRegister>();
-      if (kPoisonHeapReferences && needs_write_barrier) {
-        // Note that in the case where `value` is a null reference,
-        // we do not enter this block, as a null reference does not
-        // need poisoning.
-        DCHECK_EQ(type, DataType::Type::kReference);
-        __ PoisonHeapReference(TMP, src);
-        __ StoreToOffset(store_type, TMP, obj, offset, null_checker);
-      } else {
-        __ StoreToOffset(store_type, src, obj, offset, null_checker);
-      }
-    } else {
-      DCHECK(value_location.IsFpuRegister());
-      FpuRegister src = value_location.AsFpuRegister<FpuRegister>();
-      __ StoreFpuToOffset(store_type, src, obj, offset, null_checker);
-    }
-  }
-
-  if (needs_write_barrier) {
-    DCHECK(value_location.IsRegister());
-    GpuRegister src = value_location.AsRegister<GpuRegister>();
-    codegen_->MarkGCCard(obj, src, value_can_be_null);
-  }
-
-  if (is_volatile) {
-    GenerateMemoryBarrier(MemBarrierKind::kAnyAny);
-  }
-}
-
-void LocationsBuilderMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
-  HandleFieldGet(instruction, instruction->GetFieldInfo());
-}
-
-void InstructionCodeGeneratorMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
-  HandleFieldGet(instruction, instruction->GetFieldInfo());
-}
-
-void LocationsBuilderMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
-  HandleFieldSet(instruction, instruction->GetFieldInfo());
-}
-
-void InstructionCodeGeneratorMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
-  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
-}
-
-void InstructionCodeGeneratorMIPS64::GenerateReferenceLoadOneRegister(
-    HInstruction* instruction,
-    Location out,
-    uint32_t offset,
-    Location maybe_temp,
-    ReadBarrierOption read_barrier_option) {
-  GpuRegister out_reg = out.AsRegister<GpuRegister>();
-  if (read_barrier_option == kWithReadBarrier) {
-    CHECK(kEmitCompilerReadBarrier);
-    if (!kUseBakerReadBarrier || !kBakerReadBarrierThunksEnableForFields) {
-      DCHECK(maybe_temp.IsRegister()) << maybe_temp;
-    }
-    if (kUseBakerReadBarrier) {
-      // Load with fast path based Baker's read barrier.
-      // /* HeapReference<Object> */ out = *(out + offset)
-      codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
-                                                      out,
-                                                      out_reg,
-                                                      offset,
-                                                      maybe_temp,
-                                                      /* needs_null_check= */ false);
-    } else {
-      // Load with slow path based read barrier.
-      // Save the value of `out` into `maybe_temp` before overwriting it
-      // in the following move operation, as we will need it for the
-      // read barrier below.
-      __ Move(maybe_temp.AsRegister<GpuRegister>(), out_reg);
-      // /* HeapReference<Object> */ out = *(out + offset)
-      __ LoadFromOffset(kLoadUnsignedWord, out_reg, out_reg, offset);
-      codegen_->GenerateReadBarrierSlow(instruction, out, out, maybe_temp, offset);
-    }
-  } else {
-    // Plain load with no read barrier.
-    // /* HeapReference<Object> */ out = *(out + offset)
-    __ LoadFromOffset(kLoadUnsignedWord, out_reg, out_reg, offset);
-    __ MaybeUnpoisonHeapReference(out_reg);
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::GenerateReferenceLoadTwoRegisters(
-    HInstruction* instruction,
-    Location out,
-    Location obj,
-    uint32_t offset,
-    Location maybe_temp,
-    ReadBarrierOption read_barrier_option) {
-  GpuRegister out_reg = out.AsRegister<GpuRegister>();
-  GpuRegister obj_reg = obj.AsRegister<GpuRegister>();
-  if (read_barrier_option == kWithReadBarrier) {
-    CHECK(kEmitCompilerReadBarrier);
-    if (kUseBakerReadBarrier) {
-      if (!kBakerReadBarrierThunksEnableForFields) {
-        DCHECK(maybe_temp.IsRegister()) << maybe_temp;
-      }
-      // Load with fast path based Baker's read barrier.
-      // /* HeapReference<Object> */ out = *(obj + offset)
-      codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
-                                                      out,
-                                                      obj_reg,
-                                                      offset,
-                                                      maybe_temp,
-                                                      /* needs_null_check= */ false);
-    } else {
-      // Load with slow path based read barrier.
-      // /* HeapReference<Object> */ out = *(obj + offset)
-      __ LoadFromOffset(kLoadUnsignedWord, out_reg, obj_reg, offset);
-      codegen_->GenerateReadBarrierSlow(instruction, out, out, obj, offset);
-    }
-  } else {
-    // Plain load with no read barrier.
-    // /* HeapReference<Object> */ out = *(obj + offset)
-    __ LoadFromOffset(kLoadUnsignedWord, out_reg, obj_reg, offset);
-    __ MaybeUnpoisonHeapReference(out_reg);
-  }
-}
-
-static inline int GetBakerMarkThunkNumber(GpuRegister reg) {
-  static_assert(BAKER_MARK_INTROSPECTION_REGISTER_COUNT == 20, "Expecting equal");
-  if (reg >= V0 && reg <= T2) {  // 13 consequtive regs.
-    return reg - V0;
-  } else if (reg >= S2 && reg <= S7) {  // 6 consequtive regs.
-    return 13 + (reg - S2);
-  } else if (reg == S8) {  // One more.
-    return 19;
-  }
-  LOG(FATAL) << "Unexpected register " << reg;
-  UNREACHABLE();
-}
-
-static inline int GetBakerMarkFieldArrayThunkDisplacement(GpuRegister reg, bool short_offset) {
-  int num = GetBakerMarkThunkNumber(reg) +
-      (short_offset ? BAKER_MARK_INTROSPECTION_REGISTER_COUNT : 0);
-  return num * BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE;
-}
-
-static inline int GetBakerMarkGcRootThunkDisplacement(GpuRegister reg) {
-  return GetBakerMarkThunkNumber(reg) * BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRY_SIZE +
-      BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET;
-}
-
-void InstructionCodeGeneratorMIPS64::GenerateGcRootFieldLoad(HInstruction* instruction,
-                                                             Location root,
-                                                             GpuRegister obj,
-                                                             uint32_t offset,
-                                                             ReadBarrierOption read_barrier_option,
-                                                             Mips64Label* label_low) {
-  if (label_low != nullptr) {
-    DCHECK_EQ(offset, 0x5678u);
-  }
-  GpuRegister root_reg = root.AsRegister<GpuRegister>();
-  if (read_barrier_option == kWithReadBarrier) {
-    DCHECK(kEmitCompilerReadBarrier);
-    if (kUseBakerReadBarrier) {
-      // Fast path implementation of art::ReadBarrier::BarrierForRoot when
-      // Baker's read barrier are used:
-      if (kBakerReadBarrierThunksEnableForGcRoots) {
-        // Note that we do not actually check the value of `GetIsGcMarking()`
-        // to decide whether to mark the loaded GC root or not.  Instead, we
-        // load into `temp` (T9) the read barrier mark introspection entrypoint.
-        // If `temp` is null, it means that `GetIsGcMarking()` is false, and
-        // vice versa.
-        //
-        // We use thunks for the slow path. That thunk checks the reference
-        // and jumps to the entrypoint if needed.
-        //
-        //     temp = Thread::Current()->pReadBarrierMarkReg00
-        //     // AKA &art_quick_read_barrier_mark_introspection.
-        //     GcRoot<mirror::Object> root = *(obj+offset);  // Original reference load.
-        //     if (temp != nullptr) {
-        //        temp = &gc_root_thunk<root_reg>
-        //        root = temp(root)
-        //     }
-
-        const int32_t entry_point_offset =
-            Thread::ReadBarrierMarkEntryPointsOffset<kMips64PointerSize>(0);
-        const int thunk_disp = GetBakerMarkGcRootThunkDisplacement(root_reg);
-        int16_t offset_low = Low16Bits(offset);
-        int16_t offset_high = High16Bits(offset - offset_low);  // Accounts for sign
-                                                                // extension in lwu.
-        bool short_offset = IsInt<16>(static_cast<int32_t>(offset));
-        GpuRegister base = short_offset ? obj : TMP;
-        // Loading the entrypoint does not require a load acquire since it is only changed when
-        // threads are suspended or running a checkpoint.
-        __ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
-        if (!short_offset) {
-          DCHECK(!label_low);
-          __ Daui(base, obj, offset_high);
-        }
-        Mips64Label skip_call;
-        __ Beqz(T9, &skip_call, /* is_bare= */ true);
-        if (label_low != nullptr) {
-          DCHECK(short_offset);
-          __ Bind(label_low);
-        }
-        // /* GcRoot<mirror::Object> */ root = *(obj + offset)
-        __ LoadFromOffset(kLoadUnsignedWord, root_reg, base, offset_low);  // Single instruction
-                                                                           // in delay slot.
-        __ Jialc(T9, thunk_disp);
-        __ Bind(&skip_call);
-      } else {
-        // Note that we do not actually check the value of `GetIsGcMarking()`
-        // to decide whether to mark the loaded GC root or not.  Instead, we
-        // load into `temp` (T9) the read barrier mark entry point corresponding
-        // to register `root`. If `temp` is null, it means that `GetIsGcMarking()`
-        // is false, and vice versa.
-        //
-        //     GcRoot<mirror::Object> root = *(obj+offset);  // Original reference load.
-        //     temp = Thread::Current()->pReadBarrierMarkReg ## root.reg()
-        //     if (temp != null) {
-        //       root = temp(root)
-        //     }
-
-        if (label_low != nullptr) {
-          __ Bind(label_low);
-        }
-        // /* GcRoot<mirror::Object> */ root = *(obj + offset)
-        __ LoadFromOffset(kLoadUnsignedWord, root_reg, obj, offset);
-        static_assert(
-            sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(GcRoot<mirror::Object>),
-            "art::mirror::CompressedReference<mirror::Object> and art::GcRoot<mirror::Object> "
-            "have different sizes.");
-        static_assert(sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(int32_t),
-                      "art::mirror::CompressedReference<mirror::Object> and int32_t "
-                      "have different sizes.");
-
-        // Slow path marking the GC root `root`.
-        Location temp = Location::RegisterLocation(T9);
-        SlowPathCodeMIPS64* slow_path =
-            new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS64(
-                instruction,
-                root,
-                /*entrypoint*/ temp);
-        codegen_->AddSlowPath(slow_path);
-
-        const int32_t entry_point_offset =
-            Thread::ReadBarrierMarkEntryPointsOffset<kMips64PointerSize>(root.reg() - 1);
-        // Loading the entrypoint does not require a load acquire since it is only changed when
-        // threads are suspended or running a checkpoint.
-        __ LoadFromOffset(kLoadDoubleword, temp.AsRegister<GpuRegister>(), TR, entry_point_offset);
-        __ Bnezc(temp.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
-        __ Bind(slow_path->GetExitLabel());
-      }
-    } else {
-      if (label_low != nullptr) {
-        __ Bind(label_low);
-      }
-      // GC root loaded through a slow path for read barriers other
-      // than Baker's.
-      // /* GcRoot<mirror::Object>* */ root = obj + offset
-      __ Daddiu64(root_reg, obj, static_cast<int32_t>(offset));
-      // /* mirror::Object* */ root = root->Read()
-      codegen_->GenerateReadBarrierForRootSlow(instruction, root, root);
-    }
-  } else {
-    if (label_low != nullptr) {
-      __ Bind(label_low);
-    }
-    // Plain GC root load with no read barrier.
-    // /* GcRoot<mirror::Object> */ root = *(obj + offset)
-    __ LoadFromOffset(kLoadUnsignedWord, root_reg, obj, offset);
-    // Note that GC roots are not affected by heap poisoning, thus we
-    // do not have to unpoison `root_reg` here.
-  }
-}
-
-void CodeGeneratorMIPS64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
-                                                                Location ref,
-                                                                GpuRegister obj,
-                                                                uint32_t offset,
-                                                                Location temp,
-                                                                bool needs_null_check) {
-  DCHECK(kEmitCompilerReadBarrier);
-  DCHECK(kUseBakerReadBarrier);
-
-  if (kBakerReadBarrierThunksEnableForFields) {
-    // Note that we do not actually check the value of `GetIsGcMarking()`
-    // to decide whether to mark the loaded reference or not.  Instead, we
-    // load into `temp` (T9) the read barrier mark introspection entrypoint.
-    // If `temp` is null, it means that `GetIsGcMarking()` is false, and
-    // vice versa.
-    //
-    // We use thunks for the slow path. That thunk checks the reference
-    // and jumps to the entrypoint if needed. If the holder is not gray,
-    // it issues a load-load memory barrier and returns to the original
-    // reference load.
-    //
-    //     temp = Thread::Current()->pReadBarrierMarkReg00
-    //     // AKA &art_quick_read_barrier_mark_introspection.
-    //     if (temp != nullptr) {
-    //        temp = &field_array_thunk<holder_reg>
-    //        temp()
-    //     }
-    //   not_gray_return_address:
-    //     // If the offset is too large to fit into the lw instruction, we
-    //     // use an adjusted base register (TMP) here. This register
-    //     // receives bits 16 ... 31 of the offset before the thunk invocation
-    //     // and the thunk benefits from it.
-    //     HeapReference<mirror::Object> reference = *(obj+offset);  // Original reference load.
-    //   gray_return_address:
-
-    DCHECK(temp.IsInvalid());
-    bool short_offset = IsInt<16>(static_cast<int32_t>(offset));
-    const int32_t entry_point_offset =
-        Thread::ReadBarrierMarkEntryPointsOffset<kMips64PointerSize>(0);
-    // There may have or may have not been a null check if the field offset is smaller than
-    // the page size.
-    // There must've been a null check in case it's actually a load from an array.
-    // We will, however, perform an explicit null check in the thunk as it's easier to
-    // do it than not.
-    if (instruction->IsArrayGet()) {
-      DCHECK(!needs_null_check);
-    }
-    const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, short_offset);
-    // Loading the entrypoint does not require a load acquire since it is only changed when
-    // threads are suspended or running a checkpoint.
-    __ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
-    GpuRegister ref_reg = ref.AsRegister<GpuRegister>();
-    Mips64Label skip_call;
-    if (short_offset) {
-      __ Beqzc(T9, &skip_call, /* is_bare= */ true);
-      __ Nop();  // In forbidden slot.
-      __ Jialc(T9, thunk_disp);
-      __ Bind(&skip_call);
-      // /* HeapReference<Object> */ ref = *(obj + offset)
-      __ LoadFromOffset(kLoadUnsignedWord, ref_reg, obj, offset);  // Single instruction.
-    } else {
-      int16_t offset_low = Low16Bits(offset);
-      int16_t offset_high = High16Bits(offset - offset_low);  // Accounts for sign extension in lwu.
-      __ Beqz(T9, &skip_call, /* is_bare= */ true);
-      __ Daui(TMP, obj, offset_high);  // In delay slot.
-      __ Jialc(T9, thunk_disp);
-      __ Bind(&skip_call);
-      // /* HeapReference<Object> */ ref = *(obj + offset)
-      __ LoadFromOffset(kLoadUnsignedWord, ref_reg, TMP, offset_low);  // Single instruction.
-    }
-    if (needs_null_check) {
-      MaybeRecordImplicitNullCheck(instruction);
-    }
-    __ MaybeUnpoisonHeapReference(ref_reg);
-    return;
-  }
-
-  // /* HeapReference<Object> */ ref = *(obj + offset)
-  Location no_index = Location::NoLocation();
-  ScaleFactor no_scale_factor = TIMES_1;
-  GenerateReferenceLoadWithBakerReadBarrier(instruction,
-                                            ref,
-                                            obj,
-                                            offset,
-                                            no_index,
-                                            no_scale_factor,
-                                            temp,
-                                            needs_null_check);
-}
-
-void CodeGeneratorMIPS64::GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
-                                                                Location ref,
-                                                                GpuRegister obj,
-                                                                uint32_t data_offset,
-                                                                Location index,
-                                                                Location temp,
-                                                                bool needs_null_check) {
-  DCHECK(kEmitCompilerReadBarrier);
-  DCHECK(kUseBakerReadBarrier);
-
-  static_assert(
-      sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
-      "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
-  ScaleFactor scale_factor = TIMES_4;
-
-  if (kBakerReadBarrierThunksEnableForArrays) {
-    // Note that we do not actually check the value of `GetIsGcMarking()`
-    // to decide whether to mark the loaded reference or not.  Instead, we
-    // load into `temp` (T9) the read barrier mark introspection entrypoint.
-    // If `temp` is null, it means that `GetIsGcMarking()` is false, and
-    // vice versa.
-    //
-    // We use thunks for the slow path. That thunk checks the reference
-    // and jumps to the entrypoint if needed. If the holder is not gray,
-    // it issues a load-load memory barrier and returns to the original
-    // reference load.
-    //
-    //     temp = Thread::Current()->pReadBarrierMarkReg00
-    //     // AKA &art_quick_read_barrier_mark_introspection.
-    //     if (temp != nullptr) {
-    //        temp = &field_array_thunk<holder_reg>
-    //        temp()
-    //     }
-    //   not_gray_return_address:
-    //     // The element address is pre-calculated in the TMP register before the
-    //     // thunk invocation and the thunk benefits from it.
-    //     HeapReference<mirror::Object> reference = data[index];  // Original reference load.
-    //   gray_return_address:
-
-    DCHECK(temp.IsInvalid());
-    DCHECK(index.IsValid());
-    const int32_t entry_point_offset =
-        Thread::ReadBarrierMarkEntryPointsOffset<kMips64PointerSize>(0);
-    // We will not do the explicit null check in the thunk as some form of a null check
-    // must've been done earlier.
-    DCHECK(!needs_null_check);
-    const int thunk_disp = GetBakerMarkFieldArrayThunkDisplacement(obj, /* short_offset= */ false);
-    // Loading the entrypoint does not require a load acquire since it is only changed when
-    // threads are suspended or running a checkpoint.
-    __ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
-    Mips64Label skip_call;
-    __ Beqz(T9, &skip_call, /* is_bare= */ true);
-    GpuRegister ref_reg = ref.AsRegister<GpuRegister>();
-    GpuRegister index_reg = index.AsRegister<GpuRegister>();
-    __ Dlsa(TMP, index_reg, obj, scale_factor);  // In delay slot.
-    __ Jialc(T9, thunk_disp);
-    __ Bind(&skip_call);
-    // /* HeapReference<Object> */ ref = *(obj + data_offset + (index << scale_factor))
-    DCHECK(IsInt<16>(static_cast<int32_t>(data_offset))) << data_offset;
-    __ LoadFromOffset(kLoadUnsignedWord, ref_reg, TMP, data_offset);  // Single instruction.
-    __ MaybeUnpoisonHeapReference(ref_reg);
-    return;
-  }
-
-  // /* HeapReference<Object> */ ref =
-  //     *(obj + data_offset + index * sizeof(HeapReference<Object>))
-  GenerateReferenceLoadWithBakerReadBarrier(instruction,
-                                            ref,
-                                            obj,
-                                            data_offset,
-                                            index,
-                                            scale_factor,
-                                            temp,
-                                            needs_null_check);
-}
-
-void CodeGeneratorMIPS64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
-                                                                    Location ref,
-                                                                    GpuRegister obj,
-                                                                    uint32_t offset,
-                                                                    Location index,
-                                                                    ScaleFactor scale_factor,
-                                                                    Location temp,
-                                                                    bool needs_null_check,
-                                                                    bool always_update_field) {
-  DCHECK(kEmitCompilerReadBarrier);
-  DCHECK(kUseBakerReadBarrier);
-
-  // In slow path based read barriers, the read barrier call is
-  // inserted after the original load. However, in fast path based
-  // Baker's read barriers, we need to perform the load of
-  // mirror::Object::monitor_ *before* the original reference load.
-  // This load-load ordering is required by the read barrier.
-  // The fast path/slow path (for Baker's algorithm) should look like:
-  //
-  //   uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
-  //   lfence;  // Load fence or artificial data dependency to prevent load-load reordering
-  //   HeapReference<Object> ref = *src;  // Original reference load.
-  //   bool is_gray = (rb_state == ReadBarrier::GrayState());
-  //   if (is_gray) {
-  //     ref = ReadBarrier::Mark(ref);  // Performed by runtime entrypoint slow path.
-  //   }
-  //
-  // Note: the original implementation in ReadBarrier::Barrier is
-  // slightly more complex as it performs additional checks that we do
-  // not do here for performance reasons.
-
-  GpuRegister ref_reg = ref.AsRegister<GpuRegister>();
-  GpuRegister temp_reg = temp.AsRegister<GpuRegister>();
-  uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
-
-  // /* int32_t */ monitor = obj->monitor_
-  __ LoadFromOffset(kLoadWord, temp_reg, obj, monitor_offset);
-  if (needs_null_check) {
-    MaybeRecordImplicitNullCheck(instruction);
-  }
-  // /* LockWord */ lock_word = LockWord(monitor)
-  static_assert(sizeof(LockWord) == sizeof(int32_t),
-                "art::LockWord and int32_t have different sizes.");
-
-  __ Sync(0);  // Barrier to prevent load-load reordering.
-
-  // The actual reference load.
-  if (index.IsValid()) {
-    // Load types involving an "index": ArrayGet,
-    // UnsafeGetObject/UnsafeGetObjectVolatile and UnsafeCASObject
-    // intrinsics.
-    // /* HeapReference<Object> */ ref = *(obj + offset + (index << scale_factor))
-    if (index.IsConstant()) {
-      size_t computed_offset =
-          (index.GetConstant()->AsIntConstant()->GetValue() << scale_factor) + offset;
-      __ LoadFromOffset(kLoadUnsignedWord, ref_reg, obj, computed_offset);
-    } else {
-      GpuRegister index_reg = index.AsRegister<GpuRegister>();
-      if (scale_factor == TIMES_1) {
-        __ Daddu(TMP, index_reg, obj);
-      } else {
-        __ Dlsa(TMP, index_reg, obj, scale_factor);
-      }
-      __ LoadFromOffset(kLoadUnsignedWord, ref_reg, TMP, offset);
-    }
-  } else {
-    // /* HeapReference<Object> */ ref = *(obj + offset)
-    __ LoadFromOffset(kLoadUnsignedWord, ref_reg, obj, offset);
-  }
-
-  // Object* ref = ref_addr->AsMirrorPtr()
-  __ MaybeUnpoisonHeapReference(ref_reg);
-
-  // Slow path marking the object `ref` when it is gray.
-  SlowPathCodeMIPS64* slow_path;
-  if (always_update_field) {
-    // ReadBarrierMarkAndUpdateFieldSlowPathMIPS64 only supports address
-    // of the form `obj + field_offset`, where `obj` is a register and
-    // `field_offset` is a register. Thus `offset` and `scale_factor`
-    // above are expected to be null in this code path.
-    DCHECK_EQ(offset, 0u);
-    DCHECK_EQ(scale_factor, ScaleFactor::TIMES_1);
-    slow_path = new (GetScopedAllocator())
-        ReadBarrierMarkAndUpdateFieldSlowPathMIPS64(instruction,
-                                                    ref,
-                                                    obj,
-                                                    /* field_offset= */ index,
-                                                    temp_reg);
-  } else {
-    slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathMIPS64(instruction, ref);
-  }
-  AddSlowPath(slow_path);
-
-  // if (rb_state == ReadBarrier::GrayState())
-  //   ref = ReadBarrier::Mark(ref);
-  // Given the numeric representation, it's enough to check the low bit of the
-  // rb_state. We do that by shifting the bit into the sign bit (31) and
-  // performing a branch on less than zero.
-  static_assert(ReadBarrier::NonGrayState() == 0, "Expecting non-gray to have value 0");
-  static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1");
-  static_assert(LockWord::kReadBarrierStateSize == 1, "Expecting 1-bit read barrier state size");
-  __ Sll(temp_reg, temp_reg, 31 - LockWord::kReadBarrierStateShift);
-  __ Bltzc(temp_reg, slow_path->GetEntryLabel());
-  __ Bind(slow_path->GetExitLabel());
-}
-
-void CodeGeneratorMIPS64::GenerateReadBarrierSlow(HInstruction* instruction,
-                                                  Location out,
-                                                  Location ref,
-                                                  Location obj,
-                                                  uint32_t offset,
-                                                  Location index) {
-  DCHECK(kEmitCompilerReadBarrier);
-
-  // Insert a slow path based read barrier *after* the reference load.
-  //
-  // If heap poisoning is enabled, the unpoisoning of the loaded
-  // reference will be carried out by the runtime within the slow
-  // path.
-  //
-  // Note that `ref` currently does not get unpoisoned (when heap
-  // poisoning is enabled), which is alright as the `ref` argument is
-  // not used by the artReadBarrierSlow entry point.
-  //
-  // TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
-  SlowPathCodeMIPS64* slow_path = new (GetScopedAllocator())
-      ReadBarrierForHeapReferenceSlowPathMIPS64(instruction, out, ref, obj, offset, index);
-  AddSlowPath(slow_path);
-
-  __ Bc(slow_path->GetEntryLabel());
-  __ Bind(slow_path->GetExitLabel());
-}
-
-void CodeGeneratorMIPS64::MaybeGenerateReadBarrierSlow(HInstruction* instruction,
-                                                       Location out,
-                                                       Location ref,
-                                                       Location obj,
-                                                       uint32_t offset,
-                                                       Location index) {
-  if (kEmitCompilerReadBarrier) {
-    // Baker's read barriers shall be handled by the fast path
-    // (CodeGeneratorMIPS64::GenerateReferenceLoadWithBakerReadBarrier).
-    DCHECK(!kUseBakerReadBarrier);
-    // If heap poisoning is enabled, unpoisoning will be taken care of
-    // by the runtime within the slow path.
-    GenerateReadBarrierSlow(instruction, out, ref, obj, offset, index);
-  } else if (kPoisonHeapReferences) {
-    __ UnpoisonHeapReference(out.AsRegister<GpuRegister>());
-  }
-}
-
-void CodeGeneratorMIPS64::GenerateReadBarrierForRootSlow(HInstruction* instruction,
-                                                         Location out,
-                                                         Location root) {
-  DCHECK(kEmitCompilerReadBarrier);
-
-  // Insert a slow path based read barrier *after* the GC root load.
-  //
-  // Note that GC roots are not affected by heap poisoning, so we do
-  // not need to do anything special for this here.
-  SlowPathCodeMIPS64* slow_path =
-      new (GetScopedAllocator()) ReadBarrierForRootSlowPathMIPS64(instruction, out, root);
-  AddSlowPath(slow_path);
-
-  __ Bc(slow_path->GetEntryLabel());
-  __ Bind(slow_path->GetExitLabel());
-}
-
-void LocationsBuilderMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
-  LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
-  TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
-  bool baker_read_barrier_slow_path = false;
-  switch (type_check_kind) {
-    case TypeCheckKind::kExactCheck:
-    case TypeCheckKind::kAbstractClassCheck:
-    case TypeCheckKind::kClassHierarchyCheck:
-    case TypeCheckKind::kArrayObjectCheck: {
-      bool needs_read_barrier = CodeGenerator::InstanceOfNeedsReadBarrier(instruction);
-      call_kind = needs_read_barrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
-      baker_read_barrier_slow_path = kUseBakerReadBarrier && needs_read_barrier;
-      break;
-    }
-    case TypeCheckKind::kArrayCheck:
-    case TypeCheckKind::kUnresolvedCheck:
-    case TypeCheckKind::kInterfaceCheck:
-      call_kind = LocationSummary::kCallOnSlowPath;
-      break;
-    case TypeCheckKind::kBitstringCheck:
-      break;
-  }
-
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
-  if (baker_read_barrier_slow_path) {
-    locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
-  }
-  locations->SetInAt(0, Location::RequiresRegister());
-  if (type_check_kind == TypeCheckKind::kBitstringCheck) {
-    locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant()));
-    locations->SetInAt(2, Location::ConstantLocation(instruction->InputAt(2)->AsConstant()));
-    locations->SetInAt(3, Location::ConstantLocation(instruction->InputAt(3)->AsConstant()));
-  } else {
-    locations->SetInAt(1, Location::RequiresRegister());
-  }
-  // The output does overlap inputs.
-  // Note that TypeCheckSlowPathMIPS64 uses this register too.
-  locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
-  locations->AddRegisterTemps(NumberOfInstanceOfTemps(type_check_kind));
-}
-
-void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
-  TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
-  LocationSummary* locations = instruction->GetLocations();
-  Location obj_loc = locations->InAt(0);
-  GpuRegister obj = obj_loc.AsRegister<GpuRegister>();
-  Location cls = locations->InAt(1);
-  Location out_loc = locations->Out();
-  GpuRegister out = out_loc.AsRegister<GpuRegister>();
-  const size_t num_temps = NumberOfInstanceOfTemps(type_check_kind);
-  DCHECK_LE(num_temps, 1u);
-  Location maybe_temp_loc = (num_temps >= 1) ? locations->GetTemp(0) : Location::NoLocation();
-  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
-  uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
-  uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
-  uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
-  Mips64Label done;
-  SlowPathCodeMIPS64* slow_path = nullptr;
-
-  // Return 0 if `obj` is null.
-  // Avoid this check if we know `obj` is not null.
-  if (instruction->MustDoNullCheck()) {
-    __ Move(out, ZERO);
-    __ Beqzc(obj, &done);
-  }
-
-  switch (type_check_kind) {
-    case TypeCheckKind::kExactCheck: {
-      ReadBarrierOption read_barrier_option =
-          CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
-      // /* HeapReference<Class> */ out = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction,
-                                        out_loc,
-                                        obj_loc,
-                                        class_offset,
-                                        maybe_temp_loc,
-                                        read_barrier_option);
-      // Classes must be equal for the instanceof to succeed.
-      __ Xor(out, out, cls.AsRegister<GpuRegister>());
-      __ Sltiu(out, out, 1);
-      break;
-    }
-
-    case TypeCheckKind::kAbstractClassCheck: {
-      ReadBarrierOption read_barrier_option =
-          CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
-      // /* HeapReference<Class> */ out = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction,
-                                        out_loc,
-                                        obj_loc,
-                                        class_offset,
-                                        maybe_temp_loc,
-                                        read_barrier_option);
-      // If the class is abstract, we eagerly fetch the super class of the
-      // object to avoid doing a comparison we know will fail.
-      Mips64Label loop;
-      __ Bind(&loop);
-      // /* HeapReference<Class> */ out = out->super_class_
-      GenerateReferenceLoadOneRegister(instruction,
-                                       out_loc,
-                                       super_offset,
-                                       maybe_temp_loc,
-                                       read_barrier_option);
-      // If `out` is null, we use it for the result, and jump to `done`.
-      __ Beqzc(out, &done);
-      __ Bnec(out, cls.AsRegister<GpuRegister>(), &loop);
-      __ LoadConst32(out, 1);
-      break;
-    }
-
-    case TypeCheckKind::kClassHierarchyCheck: {
-      ReadBarrierOption read_barrier_option =
-          CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
-      // /* HeapReference<Class> */ out = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction,
-                                        out_loc,
-                                        obj_loc,
-                                        class_offset,
-                                        maybe_temp_loc,
-                                        read_barrier_option);
-      // Walk over the class hierarchy to find a match.
-      Mips64Label loop, success;
-      __ Bind(&loop);
-      __ Beqc(out, cls.AsRegister<GpuRegister>(), &success);
-      // /* HeapReference<Class> */ out = out->super_class_
-      GenerateReferenceLoadOneRegister(instruction,
-                                       out_loc,
-                                       super_offset,
-                                       maybe_temp_loc,
-                                       read_barrier_option);
-      __ Bnezc(out, &loop);
-      // If `out` is null, we use it for the result, and jump to `done`.
-      __ Bc(&done);
-      __ Bind(&success);
-      __ LoadConst32(out, 1);
-      break;
-    }
-
-    case TypeCheckKind::kArrayObjectCheck: {
-      ReadBarrierOption read_barrier_option =
-          CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
-      // /* HeapReference<Class> */ out = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction,
-                                        out_loc,
-                                        obj_loc,
-                                        class_offset,
-                                        maybe_temp_loc,
-                                        read_barrier_option);
-      // Do an exact check.
-      Mips64Label success;
-      __ Beqc(out, cls.AsRegister<GpuRegister>(), &success);
-      // Otherwise, we need to check that the object's class is a non-primitive array.
-      // /* HeapReference<Class> */ out = out->component_type_
-      GenerateReferenceLoadOneRegister(instruction,
-                                       out_loc,
-                                       component_offset,
-                                       maybe_temp_loc,
-                                       read_barrier_option);
-      // If `out` is null, we use it for the result, and jump to `done`.
-      __ Beqzc(out, &done);
-      __ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
-      static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
-      __ Sltiu(out, out, 1);
-      __ Bc(&done);
-      __ Bind(&success);
-      __ LoadConst32(out, 1);
-      break;
-    }
-
-    case TypeCheckKind::kArrayCheck: {
-      // No read barrier since the slow path will retry upon failure.
-      // /* HeapReference<Class> */ out = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction,
-                                        out_loc,
-                                        obj_loc,
-                                        class_offset,
-                                        maybe_temp_loc,
-                                        kWithoutReadBarrier);
-      DCHECK(locations->OnlyCallsOnSlowPath());
-      slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64(
-          instruction, /* is_fatal= */ false);
-      codegen_->AddSlowPath(slow_path);
-      __ Bnec(out, cls.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
-      __ LoadConst32(out, 1);
-      break;
-    }
-
-    case TypeCheckKind::kUnresolvedCheck:
-    case TypeCheckKind::kInterfaceCheck: {
-      // Note that we indeed only call on slow path, but we always go
-      // into the slow path for the unresolved and interface check
-      // cases.
-      //
-      // We cannot directly call the InstanceofNonTrivial runtime
-      // entry point without resorting to a type checking slow path
-      // here (i.e. by calling InvokeRuntime directly), as it would
-      // require to assign fixed registers for the inputs of this
-      // HInstanceOf instruction (following the runtime calling
-      // convention), which might be cluttered by the potential first
-      // read barrier emission at the beginning of this method.
-      //
-      // TODO: Introduce a new runtime entry point taking the object
-      // to test (instead of its class) as argument, and let it deal
-      // with the read barrier issues. This will let us refactor this
-      // case of the `switch` code as it was previously (with a direct
-      // call to the runtime not using a type checking slow path).
-      // This should also be beneficial for the other cases above.
-      DCHECK(locations->OnlyCallsOnSlowPath());
-      slow_path = new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64(
-          instruction, /* is_fatal= */ false);
-      codegen_->AddSlowPath(slow_path);
-      __ Bc(slow_path->GetEntryLabel());
-      break;
-    }
-
-    case TypeCheckKind::kBitstringCheck: {
-      // /* HeapReference<Class> */ temp = obj->klass_
-      GenerateReferenceLoadTwoRegisters(instruction,
-                                        out_loc,
-                                        obj_loc,
-                                        class_offset,
-                                        maybe_temp_loc,
-                                        kWithoutReadBarrier);
-
-      GenerateBitstringTypeCheckCompare(instruction, out);
-      __ Sltiu(out, out, 1);
-      break;
-    }
-  }
-
-  __ Bind(&done);
-
-  if (slow_path != nullptr) {
-    __ Bind(slow_path->GetExitLabel());
-  }
-}
-
-void LocationsBuilderMIPS64::VisitIntConstant(HIntConstant* constant) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
-  locations->SetOut(Location::ConstantLocation(constant));
-}
-
-void InstructionCodeGeneratorMIPS64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
-  // Will be generated at use site.
-}
-
-void LocationsBuilderMIPS64::VisitNullConstant(HNullConstant* constant) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
-  locations->SetOut(Location::ConstantLocation(constant));
-}
-
-void InstructionCodeGeneratorMIPS64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
-  // Will be generated at use site.
-}
-
-void LocationsBuilderMIPS64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
-  // The trampoline uses the same calling convention as dex calling conventions,
-  // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
-  // the method_idx.
-  HandleInvoke(invoke);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
-  codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
-}
-
-void LocationsBuilderMIPS64::HandleInvoke(HInvoke* invoke) {
-  InvokeDexCallingConventionVisitorMIPS64 calling_convention_visitor;
-  CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
-}
-
-void LocationsBuilderMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
-  HandleInvoke(invoke);
-  // The register T0 is required to be used for the hidden argument in
-  // art_quick_imt_conflict_trampoline, so add the hidden argument.
-  invoke->GetLocations()->AddTemp(Location::RegisterLocation(T0));
-}
-
-void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
-  // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
-  GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
-  Location receiver = invoke->GetLocations()->InAt(0);
-  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
-  Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64PointerSize);
-
-  // Set the hidden argument.
-  __ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<GpuRegister>(),
-                 invoke->GetDexMethodIndex());
-
-  // temp = object->GetClass();
-  if (receiver.IsStackSlot()) {
-    __ LoadFromOffset(kLoadUnsignedWord, temp, SP, receiver.GetStackIndex());
-    __ LoadFromOffset(kLoadUnsignedWord, temp, temp, class_offset);
-  } else {
-    __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
-  }
-  codegen_->MaybeRecordImplicitNullCheck(invoke);
-  // Instead of simply (possibly) unpoisoning `temp` here, we should
-  // emit a read barrier for the previous class reference load.
-  // However this is not required in practice, as this is an
-  // intermediate/temporary reference and because the current
-  // concurrent copying collector keeps the from-space memory
-  // intact/accessible until the end of the marking phase (the
-  // concurrent copying collector may not in the future).
-  __ MaybeUnpoisonHeapReference(temp);
-  __ LoadFromOffset(kLoadDoubleword, temp, temp,
-      mirror::Class::ImtPtrOffset(kMips64PointerSize).Uint32Value());
-  uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
-      invoke->GetImtIndex(), kMips64PointerSize));
-  // temp = temp->GetImtEntryAt(method_offset);
-  __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
-  // T9 = temp->GetEntryPoint();
-  __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
-  // T9();
-  __ Jalr(T9);
-  __ Nop();
-  DCHECK(!codegen_->IsLeafMethod());
-  codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
-}
-
-void LocationsBuilderMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
-  IntrinsicLocationsBuilderMIPS64 intrinsic(codegen_);
-  if (intrinsic.TryDispatch(invoke)) {
-    return;
-  }
-
-  HandleInvoke(invoke);
-}
-
-void LocationsBuilderMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
-  // Explicit clinit checks triggered by static invokes must have been pruned by
-  // art::PrepareForRegisterAllocation.
-  DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
-
-  IntrinsicLocationsBuilderMIPS64 intrinsic(codegen_);
-  if (intrinsic.TryDispatch(invoke)) {
-    return;
-  }
-
-  HandleInvoke(invoke);
-}
-
-void LocationsBuilderMIPS64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
-  HandleInvoke(invoke);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitInvokePolymorphic(HInvokePolymorphic* invoke) {
-  codegen_->GenerateInvokePolymorphicCall(invoke);
-}
-
-void LocationsBuilderMIPS64::VisitInvokeCustom(HInvokeCustom* invoke) {
-  HandleInvoke(invoke);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitInvokeCustom(HInvokeCustom* invoke) {
-  codegen_->GenerateInvokeCustomCall(invoke);
-}
-
-static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS64* codegen) {
-  if (invoke->GetLocations()->Intrinsified()) {
-    IntrinsicCodeGeneratorMIPS64 intrinsic(codegen);
-    intrinsic.Dispatch(invoke);
-    return true;
-  }
-  return false;
-}
-
-HLoadString::LoadKind CodeGeneratorMIPS64::GetSupportedLoadStringKind(
-    HLoadString::LoadKind desired_string_load_kind) {
-  bool fallback_load = false;
-  switch (desired_string_load_kind) {
-    case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
-    case HLoadString::LoadKind::kBootImageRelRo:
-    case HLoadString::LoadKind::kBssEntry:
-      DCHECK(!Runtime::Current()->UseJitCompilation());
-      break;
-    case HLoadString::LoadKind::kJitBootImageAddress:
-    case HLoadString::LoadKind::kJitTableAddress:
-      DCHECK(Runtime::Current()->UseJitCompilation());
-      break;
-    case HLoadString::LoadKind::kRuntimeCall:
-      break;
-  }
-  if (fallback_load) {
-    desired_string_load_kind = HLoadString::LoadKind::kRuntimeCall;
-  }
-  return desired_string_load_kind;
-}
-
-HLoadClass::LoadKind CodeGeneratorMIPS64::GetSupportedLoadClassKind(
-    HLoadClass::LoadKind desired_class_load_kind) {
-  bool fallback_load = false;
-  switch (desired_class_load_kind) {
-    case HLoadClass::LoadKind::kInvalid:
-      LOG(FATAL) << "UNREACHABLE";
-      UNREACHABLE();
-    case HLoadClass::LoadKind::kReferrersClass:
-      break;
-    case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
-    case HLoadClass::LoadKind::kBootImageRelRo:
-    case HLoadClass::LoadKind::kBssEntry:
-      DCHECK(!Runtime::Current()->UseJitCompilation());
-      break;
-    case HLoadClass::LoadKind::kJitBootImageAddress:
-    case HLoadClass::LoadKind::kJitTableAddress:
-      DCHECK(Runtime::Current()->UseJitCompilation());
-      break;
-    case HLoadClass::LoadKind::kRuntimeCall:
-      break;
-  }
-  if (fallback_load) {
-    desired_class_load_kind = HLoadClass::LoadKind::kRuntimeCall;
-  }
-  return desired_class_load_kind;
-}
-
-HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS64::GetSupportedInvokeStaticOrDirectDispatch(
-      const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      ArtMethod* method ATTRIBUTE_UNUSED) {
-  // On MIPS64 we support all dispatch types.
-  return desired_dispatch_info;
-}
-
-void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(
-    HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path) {
-  // All registers are assumed to be correctly set up per the calling convention.
-  Location callee_method = temp;  // For all kinds except kRecursive, callee will be in temp.
-  HInvokeStaticOrDirect::MethodLoadKind method_load_kind = invoke->GetMethodLoadKind();
-  HInvokeStaticOrDirect::CodePtrLocation code_ptr_location = invoke->GetCodePtrLocation();
-
-  switch (method_load_kind) {
-    case HInvokeStaticOrDirect::MethodLoadKind::kStringInit: {
-      // temp = thread->string_init_entrypoint
-      uint32_t offset =
-          GetThreadOffset<kMips64PointerSize>(invoke->GetStringInitEntryPoint()).Int32Value();
-      __ LoadFromOffset(kLoadDoubleword,
-                        temp.AsRegister<GpuRegister>(),
-                        TR,
-                        offset);
-      break;
-    }
-    case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
-      callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
-      break;
-    case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative: {
-      DCHECK(GetCompilerOptions().IsBootImage());
-      CodeGeneratorMIPS64::PcRelativePatchInfo* info_high =
-          NewBootImageMethodPatch(invoke->GetTargetMethod());
-      CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
-          NewBootImageMethodPatch(invoke->GetTargetMethod(), info_high);
-      EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
-      __ Daddiu(temp.AsRegister<GpuRegister>(), AT, /* imm16= */ 0x5678);
-      break;
-    }
-    case HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo: {
-      uint32_t boot_image_offset = GetBootImageOffset(invoke);
-      PcRelativePatchInfo* info_high = NewBootImageRelRoPatch(boot_image_offset);
-      PcRelativePatchInfo* info_low = NewBootImageRelRoPatch(boot_image_offset, info_high);
-      EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
-      // Note: Boot image is in the low 4GiB and the entry is 32-bit, so emit a 32-bit load.
-      __ Lwu(temp.AsRegister<GpuRegister>(), AT, /* imm16= */ 0x5678);
-      break;
-    }
-    case HInvokeStaticOrDirect::MethodLoadKind::kBssEntry: {
-      PcRelativePatchInfo* info_high = NewMethodBssEntryPatch(
-          MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()));
-      PcRelativePatchInfo* info_low = NewMethodBssEntryPatch(
-          MethodReference(&GetGraph()->GetDexFile(), invoke->GetDexMethodIndex()), info_high);
-      EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
-      __ Ld(temp.AsRegister<GpuRegister>(), AT, /* imm16= */ 0x5678);
-      break;
-    }
-    case HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress:
-      __ LoadLiteral(temp.AsRegister<GpuRegister>(),
-                     kLoadDoubleword,
-                     DeduplicateUint64Literal(invoke->GetMethodAddress()));
-      break;
-    case HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall: {
-      GenerateInvokeStaticOrDirectRuntimeCall(invoke, temp, slow_path);
-      return;  // No code pointer retrieval; the runtime performs the call directly.
-    }
-  }
-
-  switch (code_ptr_location) {
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
-      __ Balc(&frame_entry_label_);
-      break;
-    case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
-      // T9 = callee_method->entry_point_from_quick_compiled_code_;
-      __ LoadFromOffset(kLoadDoubleword,
-                        T9,
-                        callee_method.AsRegister<GpuRegister>(),
-                        ArtMethod::EntryPointFromQuickCompiledCodeOffset(
-                            kMips64PointerSize).Int32Value());
-      // T9()
-      __ Jalr(T9);
-      __ Nop();
-      break;
-  }
-  RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
-
-  DCHECK(!IsLeafMethod());
-}
-
-void InstructionCodeGeneratorMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
-  // Explicit clinit checks triggered by static invokes must have been pruned by
-  // art::PrepareForRegisterAllocation.
-  DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
-
-  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
-    return;
-  }
-
-  LocationSummary* locations = invoke->GetLocations();
-  codegen_->GenerateStaticOrDirectCall(invoke,
-                                       locations->HasTemps()
-                                           ? locations->GetTemp(0)
-                                           : Location::NoLocation());
-}
-
-void CodeGeneratorMIPS64::GenerateVirtualCall(
-    HInvokeVirtual* invoke, Location temp_location, SlowPathCode* slow_path) {
-  // Use the calling convention instead of the location of the receiver, as
-  // intrinsics may have put the receiver in a different register. In the intrinsics
-  // slow path, the arguments have been moved to the right place, so here we are
-  // guaranteed that the receiver is the first register of the calling convention.
-  InvokeDexCallingConvention calling_convention;
-  GpuRegister receiver = calling_convention.GetRegisterAt(0);
-
-  GpuRegister temp = temp_location.AsRegister<GpuRegister>();
-  size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
-      invoke->GetVTableIndex(), kMips64PointerSize).SizeValue();
-  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
-  Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64PointerSize);
-
-  // temp = object->GetClass();
-  __ LoadFromOffset(kLoadUnsignedWord, temp, receiver, class_offset);
-  MaybeRecordImplicitNullCheck(invoke);
-  // Instead of simply (possibly) unpoisoning `temp` here, we should
-  // emit a read barrier for the previous class reference load.
-  // However this is not required in practice, as this is an
-  // intermediate/temporary reference and because the current
-  // concurrent copying collector keeps the from-space memory
-  // intact/accessible until the end of the marking phase (the
-  // concurrent copying collector may not in the future).
-  __ MaybeUnpoisonHeapReference(temp);
-  // temp = temp->GetMethodAt(method_offset);
-  __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
-  // T9 = temp->GetEntryPoint();
-  __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
-  // T9();
-  __ Jalr(T9);
-  __ Nop();
-  RecordPcInfo(invoke, invoke->GetDexPc(), slow_path);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
-  if (TryGenerateIntrinsicCode(invoke, codegen_)) {
-    return;
-  }
-
-  codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
-  DCHECK(!codegen_->IsLeafMethod());
-}
-
-void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
-  HLoadClass::LoadKind load_kind = cls->GetLoadKind();
-  if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
-    InvokeRuntimeCallingConvention calling_convention;
-    Location loc = Location::RegisterLocation(calling_convention.GetRegisterAt(0));
-    CodeGenerator::CreateLoadClassRuntimeCallLocationSummary(cls, loc, loc);
-    return;
-  }
-  DCHECK(!cls->NeedsAccessCheck());
-
-  const bool requires_read_barrier = kEmitCompilerReadBarrier && !cls->IsInBootImage();
-  LocationSummary::CallKind call_kind = (cls->NeedsEnvironment() || requires_read_barrier)
-      ? LocationSummary::kCallOnSlowPath
-      : LocationSummary::kNoCall;
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(cls, call_kind);
-  if (kUseBakerReadBarrier && requires_read_barrier && !cls->NeedsEnvironment()) {
-    locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
-  }
-  if (load_kind == HLoadClass::LoadKind::kReferrersClass) {
-    locations->SetInAt(0, Location::RequiresRegister());
-  }
-  locations->SetOut(Location::RequiresRegister());
-  if (load_kind == HLoadClass::LoadKind::kBssEntry) {
-    if (!kUseReadBarrier || kUseBakerReadBarrier) {
-      // Rely on the type resolution or initialization and marking to save everything we need.
-      locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
-    } else {
-      // For non-Baker read barriers we have a temp-clobbering call.
-    }
-  }
-}
-
-// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
-// move.
-void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAFETY_ANALYSIS {
-  HLoadClass::LoadKind load_kind = cls->GetLoadKind();
-  if (load_kind == HLoadClass::LoadKind::kRuntimeCall) {
-    codegen_->GenerateLoadClassRuntimeCall(cls);
-    return;
-  }
-  DCHECK(!cls->NeedsAccessCheck());
-
-  LocationSummary* locations = cls->GetLocations();
-  Location out_loc = locations->Out();
-  GpuRegister out = out_loc.AsRegister<GpuRegister>();
-  GpuRegister current_method_reg = ZERO;
-  if (load_kind == HLoadClass::LoadKind::kReferrersClass ||
-      load_kind == HLoadClass::LoadKind::kRuntimeCall) {
-      current_method_reg = locations->InAt(0).AsRegister<GpuRegister>();
-  }
-
-  const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
-      ? kWithoutReadBarrier
-      : kCompilerReadBarrierOption;
-  bool generate_null_check = false;
-  switch (load_kind) {
-    case HLoadClass::LoadKind::kReferrersClass:
-      DCHECK(!cls->CanCallRuntime());
-      DCHECK(!cls->MustGenerateClinitCheck());
-      // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
-      GenerateGcRootFieldLoad(cls,
-                              out_loc,
-                              current_method_reg,
-                              ArtMethod::DeclaringClassOffset().Int32Value(),
-                              read_barrier_option);
-      break;
-    case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
-      DCHECK(codegen_->GetCompilerOptions().IsBootImage());
-      DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
-      CodeGeneratorMIPS64::PcRelativePatchInfo* info_high =
-          codegen_->NewBootImageTypePatch(cls->GetDexFile(), cls->GetTypeIndex());
-      CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
-          codegen_->NewBootImageTypePatch(cls->GetDexFile(), cls->GetTypeIndex(), info_high);
-      codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
-      __ Daddiu(out, AT, /* imm16= */ 0x5678);
-      break;
-    }
-    case HLoadClass::LoadKind::kBootImageRelRo: {
-      DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
-      uint32_t boot_image_offset = codegen_->GetBootImageOffset(cls);
-      CodeGeneratorMIPS64::PcRelativePatchInfo* info_high =
-          codegen_->NewBootImageRelRoPatch(boot_image_offset);
-      CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
-          codegen_->NewBootImageRelRoPatch(boot_image_offset, info_high);
-      codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
-      __ Lwu(out, AT, /* imm16= */ 0x5678);
-      break;
-    }
-    case HLoadClass::LoadKind::kBssEntry: {
-      CodeGeneratorMIPS64::PcRelativePatchInfo* bss_info_high =
-          codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
-      CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
-          codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex(), bss_info_high);
-      codegen_->EmitPcRelativeAddressPlaceholderHigh(bss_info_high, out);
-      GenerateGcRootFieldLoad(cls,
-                              out_loc,
-                              out,
-                              /* offset= */ 0x5678,
-                              read_barrier_option,
-                              &info_low->label);
-      generate_null_check = true;
-      break;
-    }
-    case HLoadClass::LoadKind::kJitBootImageAddress: {
-      DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
-      uint32_t address = reinterpret_cast32<uint32_t>(cls->GetClass().Get());
-      DCHECK_NE(address, 0u);
-      __ LoadLiteral(out,
-                     kLoadUnsignedWord,
-                     codegen_->DeduplicateBootImageAddressLiteral(address));
-      break;
-    }
-    case HLoadClass::LoadKind::kJitTableAddress:
-      __ LoadLiteral(out,
-                     kLoadUnsignedWord,
-                     codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(),
-                                                          cls->GetTypeIndex(),
-                                                          cls->GetClass()));
-      GenerateGcRootFieldLoad(cls, out_loc, out, 0, read_barrier_option);
-      break;
-    case HLoadClass::LoadKind::kRuntimeCall:
-    case HLoadClass::LoadKind::kInvalid:
-      LOG(FATAL) << "UNREACHABLE";
-      UNREACHABLE();
-  }
-
-  if (generate_null_check || cls->MustGenerateClinitCheck()) {
-    DCHECK(cls->CanCallRuntime());
-    SlowPathCodeMIPS64* slow_path =
-        new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS64(cls, cls);
-    codegen_->AddSlowPath(slow_path);
-    if (generate_null_check) {
-      __ Beqzc(out, slow_path->GetEntryLabel());
-    }
-    if (cls->MustGenerateClinitCheck()) {
-      GenerateClassInitializationCheck(slow_path, out);
-    } else {
-      __ Bind(slow_path->GetExitLabel());
-    }
-  }
-}
-
-void LocationsBuilderMIPS64::VisitLoadMethodHandle(HLoadMethodHandle* load) {
-  InvokeRuntimeCallingConvention calling_convention;
-  Location loc = Location::RegisterLocation(calling_convention.GetRegisterAt(0));
-  CodeGenerator::CreateLoadMethodHandleRuntimeCallLocationSummary(load, loc, loc);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitLoadMethodHandle(HLoadMethodHandle* load) {
-  codegen_->GenerateLoadMethodHandleRuntimeCall(load);
-}
-
-void LocationsBuilderMIPS64::VisitLoadMethodType(HLoadMethodType* load) {
-  InvokeRuntimeCallingConvention calling_convention;
-  Location loc = Location::RegisterLocation(calling_convention.GetRegisterAt(0));
-  CodeGenerator::CreateLoadMethodTypeRuntimeCallLocationSummary(load, loc, loc);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitLoadMethodType(HLoadMethodType* load) {
-  codegen_->GenerateLoadMethodTypeRuntimeCall(load);
-}
-
-static int32_t GetExceptionTlsOffset() {
-  return Thread::ExceptionOffset<kMips64PointerSize>().Int32Value();
-}
-
-void LocationsBuilderMIPS64::VisitLoadException(HLoadException* load) {
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(load, LocationSummary::kNoCall);
-  locations->SetOut(Location::RequiresRegister());
-}
-
-void InstructionCodeGeneratorMIPS64::VisitLoadException(HLoadException* load) {
-  GpuRegister out = load->GetLocations()->Out().AsRegister<GpuRegister>();
-  __ LoadFromOffset(kLoadUnsignedWord, out, TR, GetExceptionTlsOffset());
-}
-
-void LocationsBuilderMIPS64::VisitClearException(HClearException* clear) {
-  new (GetGraph()->GetAllocator()) LocationSummary(clear, LocationSummary::kNoCall);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
-  __ StoreToOffset(kStoreWord, ZERO, TR, GetExceptionTlsOffset());
-}
-
-void LocationsBuilderMIPS64::VisitLoadString(HLoadString* load) {
-  HLoadString::LoadKind load_kind = load->GetLoadKind();
-  LocationSummary::CallKind call_kind = CodeGenerator::GetLoadStringCallKind(load);
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(load, call_kind);
-  if (load_kind == HLoadString::LoadKind::kRuntimeCall) {
-    InvokeRuntimeCallingConvention calling_convention;
-    locations->SetOut(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  } else {
-    locations->SetOut(Location::RequiresRegister());
-    if (load_kind == HLoadString::LoadKind::kBssEntry) {
-      if (!kUseReadBarrier || kUseBakerReadBarrier) {
-        // Rely on the pResolveString and marking to save everything we need.
-        locations->SetCustomSlowPathCallerSaves(OneRegInReferenceOutSaveEverythingCallerSaves());
-      } else {
-        // For non-Baker read barriers we have a temp-clobbering call.
-      }
-    }
-  }
-}
-
-// NO_THREAD_SAFETY_ANALYSIS as we manipulate handles whose internal object we know does not
-// move.
-void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREAD_SAFETY_ANALYSIS {
-  HLoadString::LoadKind load_kind = load->GetLoadKind();
-  LocationSummary* locations = load->GetLocations();
-  Location out_loc = locations->Out();
-  GpuRegister out = out_loc.AsRegister<GpuRegister>();
-
-  switch (load_kind) {
-    case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
-      DCHECK(codegen_->GetCompilerOptions().IsBootImage());
-      CodeGeneratorMIPS64::PcRelativePatchInfo* info_high =
-          codegen_->NewBootImageStringPatch(load->GetDexFile(), load->GetStringIndex());
-      CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
-          codegen_->NewBootImageStringPatch(load->GetDexFile(), load->GetStringIndex(), info_high);
-      codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
-      __ Daddiu(out, AT, /* imm16= */ 0x5678);
-      return;
-    }
-    case HLoadString::LoadKind::kBootImageRelRo: {
-      DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
-      uint32_t boot_image_offset = codegen_->GetBootImageOffset(load);
-      CodeGeneratorMIPS64::PcRelativePatchInfo* info_high =
-          codegen_->NewBootImageRelRoPatch(boot_image_offset);
-      CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
-          codegen_->NewBootImageRelRoPatch(boot_image_offset, info_high);
-      codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, AT, info_low);
-      __ Lwu(out, AT, /* imm16= */ 0x5678);
-      return;
-    }
-    case HLoadString::LoadKind::kBssEntry: {
-      CodeGeneratorMIPS64::PcRelativePatchInfo* info_high =
-          codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex());
-      CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
-          codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex(), info_high);
-      codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, out);
-      GenerateGcRootFieldLoad(load,
-                              out_loc,
-                              out,
-                              /* offset= */ 0x5678,
-                              kCompilerReadBarrierOption,
-                              &info_low->label);
-      SlowPathCodeMIPS64* slow_path =
-          new (codegen_->GetScopedAllocator()) LoadStringSlowPathMIPS64(load);
-      codegen_->AddSlowPath(slow_path);
-      __ Beqzc(out, slow_path->GetEntryLabel());
-      __ Bind(slow_path->GetExitLabel());
-      return;
-    }
-    case HLoadString::LoadKind::kJitBootImageAddress: {
-      uint32_t address = reinterpret_cast32<uint32_t>(load->GetString().Get());
-      DCHECK_NE(address, 0u);
-      __ LoadLiteral(out,
-                     kLoadUnsignedWord,
-                     codegen_->DeduplicateBootImageAddressLiteral(address));
-      return;
-    }
-    case HLoadString::LoadKind::kJitTableAddress:
-      __ LoadLiteral(out,
-                     kLoadUnsignedWord,
-                     codegen_->DeduplicateJitStringLiteral(load->GetDexFile(),
-                                                           load->GetStringIndex(),
-                                                           load->GetString()));
-      GenerateGcRootFieldLoad(load, out_loc, out, 0, kCompilerReadBarrierOption);
-      return;
-    default:
-      break;
-  }
-
-  // TODO: Re-add the compiler code to do string dex cache lookup again.
-  DCHECK(load_kind == HLoadString::LoadKind::kRuntimeCall);
-  InvokeRuntimeCallingConvention calling_convention;
-  DCHECK_EQ(calling_convention.GetRegisterAt(0), out);
-  __ LoadConst32(calling_convention.GetRegisterAt(0), load->GetStringIndex().index_);
-  codegen_->InvokeRuntime(kQuickResolveString, load, load->GetDexPc());
-  CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
-}
-
-void LocationsBuilderMIPS64::VisitLongConstant(HLongConstant* constant) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(constant);
-  locations->SetOut(Location::ConstantLocation(constant));
-}
-
-void InstructionCodeGeneratorMIPS64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
-  // Will be generated at use site.
-}
-
-void LocationsBuilderMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
-      instruction, LocationSummary::kCallOnMainOnly);
-  InvokeRuntimeCallingConvention calling_convention;
-  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-}
-
-void InstructionCodeGeneratorMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
-  codegen_->InvokeRuntime(instruction->IsEnter() ? kQuickLockObject : kQuickUnlockObject,
-                          instruction,
-                          instruction->GetDexPc());
-  if (instruction->IsEnter()) {
-    CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
-  } else {
-    CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitMul(HMul* mul) {
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(mul, LocationSummary::kNoCall);
-  switch (mul->GetResultType()) {
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetInAt(1, Location::RequiresRegister());
-      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-      break;
-
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetInAt(1, Location::RequiresFpuRegister());
-      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
-      break;
-
-    default:
-      LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::VisitMul(HMul* instruction) {
-  DataType::Type type = instruction->GetType();
-  LocationSummary* locations = instruction->GetLocations();
-
-  switch (type) {
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64: {
-      GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
-      GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
-      GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
-      if (type == DataType::Type::kInt32)
-        __ MulR6(dst, lhs, rhs);
-      else
-        __ Dmul(dst, lhs, rhs);
-      break;
-    }
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64: {
-      FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
-      FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
-      FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
-      if (type == DataType::Type::kFloat32)
-        __ MulS(dst, lhs, rhs);
-      else
-        __ MulD(dst, lhs, rhs);
-      break;
-    }
-    default:
-      LOG(FATAL) << "Unexpected mul type " << type;
-  }
-}
-
-void LocationsBuilderMIPS64::VisitNeg(HNeg* neg) {
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(neg, LocationSummary::kNoCall);
-  switch (neg->GetResultType()) {
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-      break;
-
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
-      break;
-
-    default:
-      LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::VisitNeg(HNeg* instruction) {
-  DataType::Type type = instruction->GetType();
-  LocationSummary* locations = instruction->GetLocations();
-
-  switch (type) {
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64: {
-      GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
-      GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
-      if (type == DataType::Type::kInt32)
-        __ Subu(dst, ZERO, src);
-      else
-        __ Dsubu(dst, ZERO, src);
-      break;
-    }
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64: {
-      FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
-      FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
-      if (type == DataType::Type::kFloat32)
-        __ NegS(dst, src);
-      else
-        __ NegD(dst, src);
-      break;
-    }
-    default:
-      LOG(FATAL) << "Unexpected neg type " << type;
-  }
-}
-
-void LocationsBuilderMIPS64::VisitNewArray(HNewArray* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
-      instruction, LocationSummary::kCallOnMainOnly);
-  InvokeRuntimeCallingConvention calling_convention;
-  locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kReference));
-  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
-}
-
-void InstructionCodeGeneratorMIPS64::VisitNewArray(HNewArray* instruction) {
-  // Note: if heap poisoning is enabled, the entry point takes care of poisoning the reference.
-  QuickEntrypointEnum entrypoint = CodeGenerator::GetArrayAllocationEntrypoint(instruction);
-  codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
-  CheckEntrypointTypes<kQuickAllocArrayResolved, void*, mirror::Class*, int32_t>();
-  DCHECK(!codegen_->IsLeafMethod());
-}
-
-void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
-      instruction, LocationSummary::kCallOnMainOnly);
-  InvokeRuntimeCallingConvention calling_convention;
-  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kReference));
-}
-
-void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction) {
-  codegen_->InvokeRuntime(instruction->GetEntrypoint(), instruction, instruction->GetDexPc());
-  CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>();
-}
-
-void LocationsBuilderMIPS64::VisitNot(HNot* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitNot(HNot* instruction) {
-  DataType::Type type = instruction->GetType();
-  LocationSummary* locations = instruction->GetLocations();
-
-  switch (type) {
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64: {
-      GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
-      GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
-      __ Nor(dst, src, ZERO);
-      break;
-    }
-
-    default:
-      LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  __ Xori(locations->Out().AsRegister<GpuRegister>(),
-          locations->InAt(0).AsRegister<GpuRegister>(),
-          1);
-}
-
-void LocationsBuilderMIPS64::VisitNullCheck(HNullCheck* instruction) {
-  LocationSummary* locations = codegen_->CreateThrowingSlowPathLocations(instruction);
-  locations->SetInAt(0, Location::RequiresRegister());
-}
-
-void CodeGeneratorMIPS64::GenerateImplicitNullCheck(HNullCheck* instruction) {
-  if (CanMoveNullCheckToUser(instruction)) {
-    return;
-  }
-  Location obj = instruction->GetLocations()->InAt(0);
-
-  __ Lw(ZERO, obj.AsRegister<GpuRegister>(), 0);
-  RecordPcInfo(instruction, instruction->GetDexPc());
-}
-
-void CodeGeneratorMIPS64::GenerateExplicitNullCheck(HNullCheck* instruction) {
-  SlowPathCodeMIPS64* slow_path =
-      new (GetScopedAllocator()) NullCheckSlowPathMIPS64(instruction);
-  AddSlowPath(slow_path);
-
-  Location obj = instruction->GetLocations()->InAt(0);
-
-  __ Beqzc(obj.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
-}
-
-void InstructionCodeGeneratorMIPS64::VisitNullCheck(HNullCheck* instruction) {
-  codegen_->GenerateNullCheck(instruction);
-}
-
-void LocationsBuilderMIPS64::VisitOr(HOr* instruction) {
-  HandleBinaryOp(instruction);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitOr(HOr* instruction) {
-  HandleBinaryOp(instruction);
-}
-
-void LocationsBuilderMIPS64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unreachable";
-}
-
-void InstructionCodeGeneratorMIPS64::VisitParallelMove(HParallelMove* instruction) {
-  if (instruction->GetNext()->IsSuspendCheck() &&
-      instruction->GetBlock()->GetLoopInformation() != nullptr) {
-    HSuspendCheck* suspend_check = instruction->GetNext()->AsSuspendCheck();
-    // The back edge will generate the suspend check.
-    codegen_->ClearSpillSlotsFromLoopPhisInStackMap(suspend_check, instruction);
-  }
-
-  codegen_->GetMoveResolver()->EmitNativeCode(instruction);
-}
-
-void LocationsBuilderMIPS64::VisitParameterValue(HParameterValue* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
-  Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
-  if (location.IsStackSlot()) {
-    location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
-  } else if (location.IsDoubleStackSlot()) {
-    location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
-  }
-  locations->SetOut(location);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitParameterValue(HParameterValue* instruction
-                                                         ATTRIBUTE_UNUSED) {
-  // Nothing to do, the parameter is already at its location.
-}
-
-void LocationsBuilderMIPS64::VisitCurrentMethod(HCurrentMethod* instruction) {
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
-  locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
-}
-
-void InstructionCodeGeneratorMIPS64::VisitCurrentMethod(HCurrentMethod* instruction
-                                                        ATTRIBUTE_UNUSED) {
-  // Nothing to do, the method is already at its location.
-}
-
-void LocationsBuilderMIPS64::VisitPhi(HPhi* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
-  for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
-    locations->SetInAt(i, Location::Any());
-  }
-  locations->SetOut(Location::Any());
-}
-
-void InstructionCodeGeneratorMIPS64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unreachable";
-}
-
-void LocationsBuilderMIPS64::VisitRem(HRem* rem) {
-  DataType::Type type = rem->GetResultType();
-  LocationSummary::CallKind call_kind =
-      DataType::IsFloatingPointType(type) ? LocationSummary::kCallOnMainOnly
-                                          : LocationSummary::kNoCall;
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(rem, call_kind);
-
-  switch (type) {
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
-      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-      break;
-
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64: {
-      InvokeRuntimeCallingConvention calling_convention;
-      locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
-      locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
-      locations->SetOut(calling_convention.GetReturnLocation(type));
-      break;
-    }
-
-    default:
-      LOG(FATAL) << "Unexpected rem type " << type;
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::VisitRem(HRem* instruction) {
-  DataType::Type type = instruction->GetType();
-
-  switch (type) {
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-      GenerateDivRemIntegral(instruction);
-      break;
-
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64: {
-      QuickEntrypointEnum entrypoint =
-          (type == DataType::Type::kFloat32) ? kQuickFmodf : kQuickFmod;
-      codegen_->InvokeRuntime(entrypoint, instruction, instruction->GetDexPc());
-      if (type == DataType::Type::kFloat32) {
-        CheckEntrypointTypes<kQuickFmodf, float, float, float>();
-      } else {
-        CheckEntrypointTypes<kQuickFmod, double, double, double>();
-      }
-      break;
-    }
-    default:
-      LOG(FATAL) << "Unexpected rem type " << type;
-  }
-}
-
-static void CreateMinMaxLocations(ArenaAllocator* allocator, HBinaryOperation* minmax) {
-  LocationSummary* locations = new (allocator) LocationSummary(minmax);
-  switch (minmax->GetResultType()) {
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetInAt(1, Location::RequiresRegister());
-      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-      break;
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetInAt(1, Location::RequiresFpuRegister());
-      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
-      break;
-    default:
-      LOG(FATAL) << "Unexpected type for HMinMax " << minmax->GetResultType();
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::GenerateMinMaxInt(LocationSummary* locations, bool is_min) {
-  GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
-  GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
-  GpuRegister out = locations->Out().AsRegister<GpuRegister>();
-
-  if (lhs == rhs) {
-    if (out != lhs) {
-      __ Move(out, lhs);
-    }
-  } else {
-    // Some architectures, such as ARM and MIPS (prior to r6), have a
-    // conditional move instruction which only changes the target
-    // (output) register if the condition is true (MIPS prior to r6 had
-    // MOVF, MOVT, and MOVZ). The SELEQZ and SELNEZ instructions always
-    // change the target (output) register.  If the condition is true the
-    // output register gets the contents of the "rs" register; otherwise,
-    // the output register is set to zero. One consequence of this is
-    // that to implement something like "rd = c==0 ? rs : rt" MIPS64r6
-    // needs to use a pair of SELEQZ/SELNEZ instructions.  After
-    // executing this pair of instructions one of the output registers
-    // from the pair will necessarily contain zero. Then the code ORs the
-    // output registers from the SELEQZ/SELNEZ instructions to get the
-    // final result.
-    //
-    // The initial test to see if the output register is same as the
-    // first input register is needed to make sure that value in the
-    // first input register isn't clobbered before we've finished
-    // computing the output value. The logic in the corresponding else
-    // clause performs the same task but makes sure the second input
-    // register isn't clobbered in the event that it's the same register
-    // as the output register; the else clause also handles the case
-    // where the output register is distinct from both the first, and the
-    // second input registers.
-    if (out == lhs) {
-      __ Slt(AT, rhs, lhs);
-      if (is_min) {
-        __ Seleqz(out, lhs, AT);
-        __ Selnez(AT, rhs, AT);
-      } else {
-        __ Selnez(out, lhs, AT);
-        __ Seleqz(AT, rhs, AT);
-      }
-    } else {
-      __ Slt(AT, lhs, rhs);
-      if (is_min) {
-        __ Seleqz(out, rhs, AT);
-        __ Selnez(AT, lhs, AT);
-      } else {
-        __ Selnez(out, rhs, AT);
-        __ Seleqz(AT, lhs, AT);
-      }
-    }
-    __ Or(out, out, AT);
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::GenerateMinMaxFP(LocationSummary* locations,
-                                                      bool is_min,
-                                                      DataType::Type type) {
-  FpuRegister a = locations->InAt(0).AsFpuRegister<FpuRegister>();
-  FpuRegister b = locations->InAt(1).AsFpuRegister<FpuRegister>();
-  FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
-
-  Mips64Label noNaNs;
-  Mips64Label done;
-  FpuRegister ftmp = ((out != a) && (out != b)) ? out : FTMP;
-
-  // When Java computes min/max it prefers a NaN to a number; the
-  // behavior of MIPSR6 is to prefer numbers to NaNs, i.e., if one of
-  // the inputs is a NaN and the other is a valid number, the MIPS
-  // instruction will return the number; Java wants the NaN value
-  // returned. This is why there is extra logic preceding the use of
-  // the MIPS min.fmt/max.fmt instructions. If either a, or b holds a
-  // NaN, return the NaN, otherwise return the min/max.
-  if (type == DataType::Type::kFloat64) {
-    __ CmpUnD(FTMP, a, b);
-    __ Bc1eqz(FTMP, &noNaNs);
-
-    // One of the inputs is a NaN
-    __ CmpEqD(ftmp, a, a);
-    // If a == a then b is the NaN, otherwise a is the NaN.
-    __ SelD(ftmp, a, b);
-
-    if (ftmp != out) {
-      __ MovD(out, ftmp);
-    }
-
-    __ Bc(&done);
-
-    __ Bind(&noNaNs);
-
-    if (is_min) {
-      __ MinD(out, a, b);
-    } else {
-      __ MaxD(out, a, b);
-    }
-  } else {
-    DCHECK_EQ(type, DataType::Type::kFloat32);
-    __ CmpUnS(FTMP, a, b);
-    __ Bc1eqz(FTMP, &noNaNs);
-
-    // One of the inputs is a NaN
-    __ CmpEqS(ftmp, a, a);
-    // If a == a then b is the NaN, otherwise a is the NaN.
-    __ SelS(ftmp, a, b);
-
-    if (ftmp != out) {
-      __ MovS(out, ftmp);
-    }
-
-    __ Bc(&done);
-
-    __ Bind(&noNaNs);
-
-    if (is_min) {
-      __ MinS(out, a, b);
-    } else {
-      __ MaxS(out, a, b);
-    }
-  }
-
-  __ Bind(&done);
-}
-
-void InstructionCodeGeneratorMIPS64::GenerateMinMax(HBinaryOperation* minmax, bool is_min) {
-  DataType::Type type = minmax->GetResultType();
-  switch (type) {
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-      GenerateMinMaxInt(minmax->GetLocations(), is_min);
-      break;
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      GenerateMinMaxFP(minmax->GetLocations(), is_min, type);
-      break;
-    default:
-      LOG(FATAL) << "Unexpected type for HMinMax " << type;
-  }
-}
-
-void LocationsBuilderMIPS64::VisitMin(HMin* min) {
-  CreateMinMaxLocations(GetGraph()->GetAllocator(), min);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitMin(HMin* min) {
-  GenerateMinMax(min, /*is_min*/ true);
-}
-
-void LocationsBuilderMIPS64::VisitMax(HMax* max) {
-  CreateMinMaxLocations(GetGraph()->GetAllocator(), max);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitMax(HMax* max) {
-  GenerateMinMax(max, /*is_min*/ false);
-}
-
-void LocationsBuilderMIPS64::VisitAbs(HAbs* abs) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(abs);
-  switch (abs->GetResultType()) {
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-      break;
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
-      break;
-    default:
-      LOG(FATAL) << "Unexpected abs type " << abs->GetResultType();
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::VisitAbs(HAbs* abs) {
-  LocationSummary* locations = abs->GetLocations();
-  switch (abs->GetResultType()) {
-    case DataType::Type::kInt32: {
-      GpuRegister in  = locations->InAt(0).AsRegister<GpuRegister>();
-      GpuRegister out = locations->Out().AsRegister<GpuRegister>();
-      __ Sra(AT, in, 31);
-      __ Xor(out, in, AT);
-      __ Subu(out, out, AT);
-      break;
-    }
-    case DataType::Type::kInt64: {
-      GpuRegister in  = locations->InAt(0).AsRegister<GpuRegister>();
-      GpuRegister out = locations->Out().AsRegister<GpuRegister>();
-      __ Dsra32(AT, in, 31);
-      __ Xor(out, in, AT);
-      __ Dsubu(out, out, AT);
-      break;
-    }
-    case DataType::Type::kFloat32: {
-      FpuRegister in = locations->InAt(0).AsFpuRegister<FpuRegister>();
-      FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
-      __ AbsS(out, in);
-      break;
-    }
-    case DataType::Type::kFloat64: {
-      FpuRegister in = locations->InAt(0).AsFpuRegister<FpuRegister>();
-      FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
-      __ AbsD(out, in);
-      break;
-    }
-    default:
-      LOG(FATAL) << "Unexpected abs type " << abs->GetResultType();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitConstructorFence(HConstructorFence* constructor_fence) {
-  constructor_fence->SetLocations(nullptr);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitConstructorFence(
-    HConstructorFence* constructor_fence ATTRIBUTE_UNUSED) {
-  GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
-}
-
-void LocationsBuilderMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
-  memory_barrier->SetLocations(nullptr);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
-  GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
-}
-
-void LocationsBuilderMIPS64::VisitReturn(HReturn* ret) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(ret);
-  DataType::Type return_type = ret->InputAt(0)->GetType();
-  locations->SetInAt(0, Mips64ReturnLocation(return_type));
-}
-
-void InstructionCodeGeneratorMIPS64::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
-  codegen_->GenerateFrameExit();
-}
-
-void LocationsBuilderMIPS64::VisitReturnVoid(HReturnVoid* ret) {
-  ret->SetLocations(nullptr);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
-  codegen_->GenerateFrameExit();
-}
-
-void LocationsBuilderMIPS64::VisitRor(HRor* ror) {
-  HandleShift(ror);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitRor(HRor* ror) {
-  HandleShift(ror);
-}
-
-void LocationsBuilderMIPS64::VisitShl(HShl* shl) {
-  HandleShift(shl);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitShl(HShl* shl) {
-  HandleShift(shl);
-}
-
-void LocationsBuilderMIPS64::VisitShr(HShr* shr) {
-  HandleShift(shr);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitShr(HShr* shr) {
-  HandleShift(shr);
-}
-
-void LocationsBuilderMIPS64::VisitSub(HSub* instruction) {
-  HandleBinaryOp(instruction);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitSub(HSub* instruction) {
-  HandleBinaryOp(instruction);
-}
-
-void LocationsBuilderMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
-  HandleFieldGet(instruction, instruction->GetFieldInfo());
-}
-
-void InstructionCodeGeneratorMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
-  HandleFieldGet(instruction, instruction->GetFieldInfo());
-}
-
-void LocationsBuilderMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
-  HandleFieldSet(instruction, instruction->GetFieldInfo());
-}
-
-void InstructionCodeGeneratorMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
-  HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
-}
-
-void LocationsBuilderMIPS64::VisitUnresolvedInstanceFieldGet(
-    HUnresolvedInstanceFieldGet* instruction) {
-  FieldAccessCallingConventionMIPS64 calling_convention;
-  codegen_->CreateUnresolvedFieldLocationSummary(
-      instruction, instruction->GetFieldType(), calling_convention);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitUnresolvedInstanceFieldGet(
-    HUnresolvedInstanceFieldGet* instruction) {
-  FieldAccessCallingConventionMIPS64 calling_convention;
-  codegen_->GenerateUnresolvedFieldAccess(instruction,
-                                          instruction->GetFieldType(),
-                                          instruction->GetFieldIndex(),
-                                          instruction->GetDexPc(),
-                                          calling_convention);
-}
-
-void LocationsBuilderMIPS64::VisitUnresolvedInstanceFieldSet(
-    HUnresolvedInstanceFieldSet* instruction) {
-  FieldAccessCallingConventionMIPS64 calling_convention;
-  codegen_->CreateUnresolvedFieldLocationSummary(
-      instruction, instruction->GetFieldType(), calling_convention);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitUnresolvedInstanceFieldSet(
-    HUnresolvedInstanceFieldSet* instruction) {
-  FieldAccessCallingConventionMIPS64 calling_convention;
-  codegen_->GenerateUnresolvedFieldAccess(instruction,
-                                          instruction->GetFieldType(),
-                                          instruction->GetFieldIndex(),
-                                          instruction->GetDexPc(),
-                                          calling_convention);
-}
-
-void LocationsBuilderMIPS64::VisitUnresolvedStaticFieldGet(
-    HUnresolvedStaticFieldGet* instruction) {
-  FieldAccessCallingConventionMIPS64 calling_convention;
-  codegen_->CreateUnresolvedFieldLocationSummary(
-      instruction, instruction->GetFieldType(), calling_convention);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitUnresolvedStaticFieldGet(
-    HUnresolvedStaticFieldGet* instruction) {
-  FieldAccessCallingConventionMIPS64 calling_convention;
-  codegen_->GenerateUnresolvedFieldAccess(instruction,
-                                          instruction->GetFieldType(),
-                                          instruction->GetFieldIndex(),
-                                          instruction->GetDexPc(),
-                                          calling_convention);
-}
-
-void LocationsBuilderMIPS64::VisitUnresolvedStaticFieldSet(
-    HUnresolvedStaticFieldSet* instruction) {
-  FieldAccessCallingConventionMIPS64 calling_convention;
-  codegen_->CreateUnresolvedFieldLocationSummary(
-      instruction, instruction->GetFieldType(), calling_convention);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitUnresolvedStaticFieldSet(
-    HUnresolvedStaticFieldSet* instruction) {
-  FieldAccessCallingConventionMIPS64 calling_convention;
-  codegen_->GenerateUnresolvedFieldAccess(instruction,
-                                          instruction->GetFieldType(),
-                                          instruction->GetFieldIndex(),
-                                          instruction->GetDexPc(),
-                                          calling_convention);
-}
-
-void LocationsBuilderMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
-      instruction, LocationSummary::kCallOnSlowPath);
-  // In suspend check slow path, usually there are no caller-save registers at all.
-  // If SIMD instructions are present, however, we force spilling all live SIMD
-  // registers in full width (since the runtime only saves/restores lower part).
-  locations->SetCustomSlowPathCallerSaves(
-      GetGraph()->HasSIMD() ? RegisterSet::AllFpu() : RegisterSet::Empty());
-}
-
-void InstructionCodeGeneratorMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
-  HBasicBlock* block = instruction->GetBlock();
-  if (block->GetLoopInformation() != nullptr) {
-    DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
-    // The back edge will generate the suspend check.
-    return;
-  }
-  if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
-    // The goto will generate the suspend check.
-    return;
-  }
-  GenerateSuspendCheck(instruction, nullptr);
-}
-
-void LocationsBuilderMIPS64::VisitThrow(HThrow* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
-      instruction, LocationSummary::kCallOnMainOnly);
-  InvokeRuntimeCallingConvention calling_convention;
-  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-}
-
-void InstructionCodeGeneratorMIPS64::VisitThrow(HThrow* instruction) {
-  codegen_->InvokeRuntime(kQuickDeliverException, instruction, instruction->GetDexPc());
-  CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
-}
-
-void LocationsBuilderMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
-  DataType::Type input_type = conversion->GetInputType();
-  DataType::Type result_type = conversion->GetResultType();
-  DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type))
-      << input_type << " -> " << result_type;
-
-  if ((input_type == DataType::Type::kReference) || (input_type == DataType::Type::kVoid) ||
-      (result_type == DataType::Type::kReference) || (result_type == DataType::Type::kVoid)) {
-    LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
-  }
-
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(conversion);
-
-  if (DataType::IsFloatingPointType(input_type)) {
-    locations->SetInAt(0, Location::RequiresFpuRegister());
-  } else {
-    locations->SetInAt(0, Location::RequiresRegister());
-  }
-
-  if (DataType::IsFloatingPointType(result_type)) {
-    locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
-  } else {
-    locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
-  LocationSummary* locations = conversion->GetLocations();
-  DataType::Type result_type = conversion->GetResultType();
-  DataType::Type input_type = conversion->GetInputType();
-
-  DCHECK(!DataType::IsTypeConversionImplicit(input_type, result_type))
-      << input_type << " -> " << result_type;
-
-  if (DataType::IsIntegralType(result_type) && DataType::IsIntegralType(input_type)) {
-    GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
-    GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
-
-    switch (result_type) {
-      case DataType::Type::kUint8:
-        __ Andi(dst, src, 0xFF);
-        break;
-      case DataType::Type::kInt8:
-        if (input_type == DataType::Type::kInt64) {
-          // Type conversion from long to types narrower than int is a result of code
-          // transformations. To avoid unpredictable results for SEB and SEH, we first
-          // need to sign-extend the low 32-bit value into bits 32 through 63.
-          __ Sll(dst, src, 0);
-          __ Seb(dst, dst);
-        } else {
-          __ Seb(dst, src);
-        }
-        break;
-      case DataType::Type::kUint16:
-        __ Andi(dst, src, 0xFFFF);
-        break;
-      case DataType::Type::kInt16:
-        if (input_type == DataType::Type::kInt64) {
-          // Type conversion from long to types narrower than int is a result of code
-          // transformations. To avoid unpredictable results for SEB and SEH, we first
-          // need to sign-extend the low 32-bit value into bits 32 through 63.
-          __ Sll(dst, src, 0);
-          __ Seh(dst, dst);
-        } else {
-          __ Seh(dst, src);
-        }
-        break;
-      case DataType::Type::kInt32:
-      case DataType::Type::kInt64:
-        // Sign-extend 32-bit int into bits 32 through 63 for int-to-long and long-to-int
-        // conversions, except when the input and output registers are the same and we are not
-        // converting longs to shorter types. In these cases, do nothing.
-        if ((input_type == DataType::Type::kInt64) || (dst != src)) {
-          __ Sll(dst, src, 0);
-        }
-        break;
-
-      default:
-        LOG(FATAL) << "Unexpected type conversion from " << input_type
-                   << " to " << result_type;
-    }
-  } else if (DataType::IsFloatingPointType(result_type) && DataType::IsIntegralType(input_type)) {
-    FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
-    GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
-    if (input_type == DataType::Type::kInt64) {
-      __ Dmtc1(src, FTMP);
-      if (result_type == DataType::Type::kFloat32) {
-        __ Cvtsl(dst, FTMP);
-      } else {
-        __ Cvtdl(dst, FTMP);
-      }
-    } else {
-      __ Mtc1(src, FTMP);
-      if (result_type == DataType::Type::kFloat32) {
-        __ Cvtsw(dst, FTMP);
-      } else {
-        __ Cvtdw(dst, FTMP);
-      }
-    }
-  } else if (DataType::IsIntegralType(result_type) && DataType::IsFloatingPointType(input_type)) {
-    CHECK(result_type == DataType::Type::kInt32 || result_type == DataType::Type::kInt64);
-    GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
-    FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
-
-    if (result_type == DataType::Type::kInt64) {
-      if (input_type == DataType::Type::kFloat32) {
-        __ TruncLS(FTMP, src);
-      } else {
-        __ TruncLD(FTMP, src);
-      }
-      __ Dmfc1(dst, FTMP);
-    } else {
-      if (input_type == DataType::Type::kFloat32) {
-        __ TruncWS(FTMP, src);
-      } else {
-        __ TruncWD(FTMP, src);
-      }
-      __ Mfc1(dst, FTMP);
-    }
-  } else if (DataType::IsFloatingPointType(result_type) &&
-             DataType::IsFloatingPointType(input_type)) {
-    FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
-    FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
-    if (result_type == DataType::Type::kFloat32) {
-      __ Cvtsd(dst, src);
-    } else {
-      __ Cvtds(dst, src);
-    }
-  } else {
-    LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
-                << " to " << result_type;
-  }
-}
-
-void LocationsBuilderMIPS64::VisitUShr(HUShr* ushr) {
-  HandleShift(ushr);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitUShr(HUShr* ushr) {
-  HandleShift(ushr);
-}
-
-void LocationsBuilderMIPS64::VisitXor(HXor* instruction) {
-  HandleBinaryOp(instruction);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitXor(HXor* instruction) {
-  HandleBinaryOp(instruction);
-}
-
-void LocationsBuilderMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
-  // Nothing to do, this should be removed during prepare for register allocator.
-  LOG(FATAL) << "Unreachable";
-}
-
-void InstructionCodeGeneratorMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
-  // Nothing to do, this should be removed during prepare for register allocator.
-  LOG(FATAL) << "Unreachable";
-}
-
-void LocationsBuilderMIPS64::VisitEqual(HEqual* comp) {
-  HandleCondition(comp);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitEqual(HEqual* comp) {
-  HandleCondition(comp);
-}
-
-void LocationsBuilderMIPS64::VisitNotEqual(HNotEqual* comp) {
-  HandleCondition(comp);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitNotEqual(HNotEqual* comp) {
-  HandleCondition(comp);
-}
-
-void LocationsBuilderMIPS64::VisitLessThan(HLessThan* comp) {
-  HandleCondition(comp);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitLessThan(HLessThan* comp) {
-  HandleCondition(comp);
-}
-
-void LocationsBuilderMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
-  HandleCondition(comp);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
-  HandleCondition(comp);
-}
-
-void LocationsBuilderMIPS64::VisitGreaterThan(HGreaterThan* comp) {
-  HandleCondition(comp);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitGreaterThan(HGreaterThan* comp) {
-  HandleCondition(comp);
-}
-
-void LocationsBuilderMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
-  HandleCondition(comp);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
-  HandleCondition(comp);
-}
-
-void LocationsBuilderMIPS64::VisitBelow(HBelow* comp) {
-  HandleCondition(comp);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitBelow(HBelow* comp) {
-  HandleCondition(comp);
-}
-
-void LocationsBuilderMIPS64::VisitBelowOrEqual(HBelowOrEqual* comp) {
-  HandleCondition(comp);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitBelowOrEqual(HBelowOrEqual* comp) {
-  HandleCondition(comp);
-}
-
-void LocationsBuilderMIPS64::VisitAbove(HAbove* comp) {
-  HandleCondition(comp);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitAbove(HAbove* comp) {
-  HandleCondition(comp);
-}
-
-void LocationsBuilderMIPS64::VisitAboveOrEqual(HAboveOrEqual* comp) {
-  HandleCondition(comp);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitAboveOrEqual(HAboveOrEqual* comp) {
-  HandleCondition(comp);
-}
-
-// Simple implementation of packed switch - generate cascaded compare/jumps.
-void LocationsBuilderMIPS64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(switch_instr, LocationSummary::kNoCall);
-  locations->SetInAt(0, Location::RequiresRegister());
-}
-
-void InstructionCodeGeneratorMIPS64::GenPackedSwitchWithCompares(GpuRegister value_reg,
-                                                                 int32_t lower_bound,
-                                                                 uint32_t num_entries,
-                                                                 HBasicBlock* switch_block,
-                                                                 HBasicBlock* default_block) {
-  // Create a set of compare/jumps.
-  GpuRegister temp_reg = TMP;
-  __ Addiu32(temp_reg, value_reg, -lower_bound);
-  // Jump to default if index is negative
-  // Note: We don't check the case that index is positive while value < lower_bound, because in
-  // this case, index >= num_entries must be true. So that we can save one branch instruction.
-  __ Bltzc(temp_reg, codegen_->GetLabelOf(default_block));
-
-  const ArenaVector<HBasicBlock*>& successors = switch_block->GetSuccessors();
-  // Jump to successors[0] if value == lower_bound.
-  __ Beqzc(temp_reg, codegen_->GetLabelOf(successors[0]));
-  int32_t last_index = 0;
-  for (; num_entries - last_index > 2; last_index += 2) {
-    __ Addiu(temp_reg, temp_reg, -2);
-    // Jump to successors[last_index + 1] if value < case_value[last_index + 2].
-    __ Bltzc(temp_reg, codegen_->GetLabelOf(successors[last_index + 1]));
-    // Jump to successors[last_index + 2] if value == case_value[last_index + 2].
-    __ Beqzc(temp_reg, codegen_->GetLabelOf(successors[last_index + 2]));
-  }
-  if (num_entries - last_index == 2) {
-    // The last missing case_value.
-    __ Addiu(temp_reg, temp_reg, -1);
-    __ Beqzc(temp_reg, codegen_->GetLabelOf(successors[last_index + 1]));
-  }
-
-  // And the default for any other value.
-  if (!codegen_->GoesToNextBlock(switch_block, default_block)) {
-    __ Bc(codegen_->GetLabelOf(default_block));
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::GenTableBasedPackedSwitch(GpuRegister value_reg,
-                                                               int32_t lower_bound,
-                                                               uint32_t num_entries,
-                                                               HBasicBlock* switch_block,
-                                                               HBasicBlock* default_block) {
-  // Create a jump table.
-  std::vector<Mips64Label*> labels(num_entries);
-  const ArenaVector<HBasicBlock*>& successors = switch_block->GetSuccessors();
-  for (uint32_t i = 0; i < num_entries; i++) {
-    labels[i] = codegen_->GetLabelOf(successors[i]);
-  }
-  JumpTable* table = __ CreateJumpTable(std::move(labels));
-
-  // Is the value in range?
-  __ Addiu32(TMP, value_reg, -lower_bound);
-  __ LoadConst32(AT, num_entries);
-  __ Bgeuc(TMP, AT, codegen_->GetLabelOf(default_block));
-
-  // We are in the range of the table.
-  // Load the target address from the jump table, indexing by the value.
-  __ LoadLabelAddress(AT, table->GetLabel());
-  __ Dlsa(TMP, TMP, AT, 2);
-  __ Lw(TMP, TMP, 0);
-  // Compute the absolute target address by adding the table start address
-  // (the table contains offsets to targets relative to its start).
-  __ Daddu(TMP, TMP, AT);
-  // And jump.
-  __ Jr(TMP);
-  __ Nop();
-}
-
-void InstructionCodeGeneratorMIPS64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
-  int32_t lower_bound = switch_instr->GetStartValue();
-  uint32_t num_entries = switch_instr->GetNumEntries();
-  LocationSummary* locations = switch_instr->GetLocations();
-  GpuRegister value_reg = locations->InAt(0).AsRegister<GpuRegister>();
-  HBasicBlock* switch_block = switch_instr->GetBlock();
-  HBasicBlock* default_block = switch_instr->GetDefaultBlock();
-
-  if (num_entries > kPackedSwitchJumpTableThreshold) {
-    GenTableBasedPackedSwitch(value_reg,
-                              lower_bound,
-                              num_entries,
-                              switch_block,
-                              default_block);
-  } else {
-    GenPackedSwitchWithCompares(value_reg,
-                                lower_bound,
-                                num_entries,
-                                switch_block,
-                                default_block);
-  }
-}
-
-void LocationsBuilderMIPS64::VisitClassTableGet(HClassTableGet* instruction) {
-  LocationSummary* locations =
-      new (GetGraph()->GetAllocator()) LocationSummary(instruction, LocationSummary::kNoCall);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister());
-}
-
-void InstructionCodeGeneratorMIPS64::VisitClassTableGet(HClassTableGet* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  if (instruction->GetTableKind() == HClassTableGet::TableKind::kVTable) {
-    uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
-        instruction->GetIndex(), kMips64PointerSize).SizeValue();
-    __ LoadFromOffset(kLoadDoubleword,
-                      locations->Out().AsRegister<GpuRegister>(),
-                      locations->InAt(0).AsRegister<GpuRegister>(),
-                      method_offset);
-  } else {
-    uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement(
-        instruction->GetIndex(), kMips64PointerSize));
-    __ LoadFromOffset(kLoadDoubleword,
-                      locations->Out().AsRegister<GpuRegister>(),
-                      locations->InAt(0).AsRegister<GpuRegister>(),
-                      mirror::Class::ImtPtrOffset(kMips64PointerSize).Uint32Value());
-    __ LoadFromOffset(kLoadDoubleword,
-                      locations->Out().AsRegister<GpuRegister>(),
-                      locations->Out().AsRegister<GpuRegister>(),
-                      method_offset);
-  }
-}
-
-void LocationsBuilderMIPS64::VisitIntermediateAddress(HIntermediateAddress* instruction
-                                                      ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unreachable";
-}
-
-void InstructionCodeGeneratorMIPS64::VisitIntermediateAddress(HIntermediateAddress* instruction
-                                                              ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "Unreachable";
-}
-
-}  // namespace mips64
-}  // namespace art
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
deleted file mode 100644
index 52f3a62..0000000
--- a/compiler/optimizing/code_generator_mips64.h
+++ /dev/null
@@ -1,693 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS64_H_
-#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS64_H_
-
-#include "code_generator.h"
-#include "dex/type_reference.h"
-#include "driver/compiler_options.h"
-#include "nodes.h"
-#include "parallel_move_resolver.h"
-#include "utils/mips64/assembler_mips64.h"
-
-namespace art {
-namespace mips64 {
-
-// InvokeDexCallingConvention registers
-
-static constexpr GpuRegister kParameterCoreRegisters[] =
-    { A1, A2, A3, A4, A5, A6, A7 };
-static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters);
-
-static constexpr FpuRegister kParameterFpuRegisters[] =
-    { F13, F14, F15, F16, F17, F18, F19 };
-static constexpr size_t kParameterFpuRegistersLength = arraysize(kParameterFpuRegisters);
-
-
-// InvokeRuntimeCallingConvention registers
-
-static constexpr GpuRegister kRuntimeParameterCoreRegisters[] =
-    { A0, A1, A2, A3, A4, A5, A6, A7 };
-static constexpr size_t kRuntimeParameterCoreRegistersLength =
-    arraysize(kRuntimeParameterCoreRegisters);
-
-static constexpr FpuRegister kRuntimeParameterFpuRegisters[] =
-    { F12, F13, F14, F15, F16, F17, F18, F19 };
-static constexpr size_t kRuntimeParameterFpuRegistersLength =
-    arraysize(kRuntimeParameterFpuRegisters);
-
-
-static constexpr GpuRegister kCoreCalleeSaves[] =
-    { S0, S1, S2, S3, S4, S5, S6, S7, GP, S8, RA };
-static constexpr FpuRegister kFpuCalleeSaves[] =
-    { F24, F25, F26, F27, F28, F29, F30, F31 };
-
-
-class CodeGeneratorMIPS64;
-
-VectorRegister VectorRegisterFrom(Location location);
-
-class InvokeDexCallingConvention : public CallingConvention<GpuRegister, FpuRegister> {
- public:
-  InvokeDexCallingConvention()
-      : CallingConvention(kParameterCoreRegisters,
-                          kParameterCoreRegistersLength,
-                          kParameterFpuRegisters,
-                          kParameterFpuRegistersLength,
-                          kMips64PointerSize) {}
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConvention);
-};
-
-class InvokeDexCallingConventionVisitorMIPS64 : public InvokeDexCallingConventionVisitor {
- public:
-  InvokeDexCallingConventionVisitorMIPS64() {}
-  virtual ~InvokeDexCallingConventionVisitorMIPS64() {}
-
-  Location GetNextLocation(DataType::Type type) override;
-  Location GetReturnLocation(DataType::Type type) const override;
-  Location GetMethodLocation() const override;
-
- private:
-  InvokeDexCallingConvention calling_convention;
-
-  DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitorMIPS64);
-};
-
-class InvokeRuntimeCallingConvention : public CallingConvention<GpuRegister, FpuRegister> {
- public:
-  InvokeRuntimeCallingConvention()
-      : CallingConvention(kRuntimeParameterCoreRegisters,
-                          kRuntimeParameterCoreRegistersLength,
-                          kRuntimeParameterFpuRegisters,
-                          kRuntimeParameterFpuRegistersLength,
-                          kMips64PointerSize) {}
-
-  Location GetReturnLocation(DataType::Type return_type);
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
-};
-
-class FieldAccessCallingConventionMIPS64 : public FieldAccessCallingConvention {
- public:
-  FieldAccessCallingConventionMIPS64() {}
-
-  Location GetObjectLocation() const override {
-    return Location::RegisterLocation(A1);
-  }
-  Location GetFieldIndexLocation() const override {
-    return Location::RegisterLocation(A0);
-  }
-  Location GetReturnLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
-    return Location::RegisterLocation(V0);
-  }
-  Location GetSetValueLocation(DataType::Type type ATTRIBUTE_UNUSED,
-                               bool is_instance) const override {
-    return is_instance
-        ? Location::RegisterLocation(A2)
-        : Location::RegisterLocation(A1);
-  }
-  Location GetFpuLocation(DataType::Type type ATTRIBUTE_UNUSED) const override {
-    return Location::FpuRegisterLocation(F0);
-  }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConventionMIPS64);
-};
-
-class ParallelMoveResolverMIPS64 : public ParallelMoveResolverWithSwap {
- public:
-  ParallelMoveResolverMIPS64(ArenaAllocator* allocator, CodeGeneratorMIPS64* codegen)
-      : ParallelMoveResolverWithSwap(allocator), codegen_(codegen) {}
-
-  void EmitMove(size_t index) override;
-  void EmitSwap(size_t index) override;
-  void SpillScratch(int reg) override;
-  void RestoreScratch(int reg) override;
-
-  void Exchange(int index1, int index2, bool double_slot);
-  void ExchangeQuadSlots(int index1, int index2);
-
-  Mips64Assembler* GetAssembler() const;
-
- private:
-  CodeGeneratorMIPS64* const codegen_;
-
-  DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverMIPS64);
-};
-
-class SlowPathCodeMIPS64 : public SlowPathCode {
- public:
-  explicit SlowPathCodeMIPS64(HInstruction* instruction)
-      : SlowPathCode(instruction), entry_label_(), exit_label_() {}
-
-  Mips64Label* GetEntryLabel() { return &entry_label_; }
-  Mips64Label* GetExitLabel() { return &exit_label_; }
-
- private:
-  Mips64Label entry_label_;
-  Mips64Label exit_label_;
-
-  DISALLOW_COPY_AND_ASSIGN(SlowPathCodeMIPS64);
-};
-
-class LocationsBuilderMIPS64 : public HGraphVisitor {
- public:
-  LocationsBuilderMIPS64(HGraph* graph, CodeGeneratorMIPS64* codegen)
-      : HGraphVisitor(graph), codegen_(codegen) {}
-
-#define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  void Visit##name(H##name* instr) override;
-
-  FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
-  FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(DECLARE_VISIT_INSTRUCTION)
-
-#undef DECLARE_VISIT_INSTRUCTION
-
-  void VisitInstruction(HInstruction* instruction) override {
-    LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
-               << " (id " << instruction->GetId() << ")";
-  }
-
- private:
-  void HandleInvoke(HInvoke* invoke);
-  void HandleBinaryOp(HBinaryOperation* operation);
-  void HandleCondition(HCondition* instruction);
-  void HandleShift(HBinaryOperation* operation);
-  void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
-  void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
-  Location RegisterOrZeroConstant(HInstruction* instruction);
-  Location FpuRegisterOrConstantForStore(HInstruction* instruction);
-
-  InvokeDexCallingConventionVisitorMIPS64 parameter_visitor_;
-
-  CodeGeneratorMIPS64* const codegen_;
-
-  DISALLOW_COPY_AND_ASSIGN(LocationsBuilderMIPS64);
-};
-
-class InstructionCodeGeneratorMIPS64 : public InstructionCodeGenerator {
- public:
-  InstructionCodeGeneratorMIPS64(HGraph* graph, CodeGeneratorMIPS64* codegen);
-
-#define DECLARE_VISIT_INSTRUCTION(name, super)     \
-  void Visit##name(H##name* instr) override;
-
-  FOR_EACH_CONCRETE_INSTRUCTION_COMMON(DECLARE_VISIT_INSTRUCTION)
-  FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(DECLARE_VISIT_INSTRUCTION)
-
-#undef DECLARE_VISIT_INSTRUCTION
-
-  void VisitInstruction(HInstruction* instruction) override {
-    LOG(FATAL) << "Unreachable instruction " << instruction->DebugName()
-               << " (id " << instruction->GetId() << ")";
-  }
-
-  Mips64Assembler* GetAssembler() const { return assembler_; }
-
-  // Compare-and-jump packed switch generates approx. 3 + 2.5 * N 32-bit
-  // instructions for N cases.
-  // Table-based packed switch generates approx. 11 32-bit instructions
-  // and N 32-bit data words for N cases.
-  // At N = 6 they come out as 18 and 17 32-bit words respectively.
-  // We switch to the table-based method starting with 7 cases.
-  static constexpr uint32_t kPackedSwitchJumpTableThreshold = 6;
-
-  void GenerateMemoryBarrier(MemBarrierKind kind);
-
- private:
-  void GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path, GpuRegister class_reg);
-  void GenerateBitstringTypeCheckCompare(HTypeCheckInstruction* check, GpuRegister temp);
-  void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
-  void HandleBinaryOp(HBinaryOperation* operation);
-  void HandleCondition(HCondition* instruction);
-  void HandleShift(HBinaryOperation* operation);
-  void HandleFieldSet(HInstruction* instruction,
-                      const FieldInfo& field_info,
-                      bool value_can_be_null);
-  void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
-
-  void GenerateMinMaxInt(LocationSummary* locations, bool is_min);
-  void GenerateMinMaxFP(LocationSummary* locations, bool is_min, DataType::Type type);
-  void GenerateMinMax(HBinaryOperation* minmax, bool is_min);
-
-  // Generate a heap reference load using one register `out`:
-  //
-  //   out <- *(out + offset)
-  //
-  // while honoring heap poisoning and/or read barriers (if any).
-  //
-  // Location `maybe_temp` is used when generating a read barrier and
-  // shall be a register in that case; it may be an invalid location
-  // otherwise.
-  void GenerateReferenceLoadOneRegister(HInstruction* instruction,
-                                        Location out,
-                                        uint32_t offset,
-                                        Location maybe_temp,
-                                        ReadBarrierOption read_barrier_option);
-  // Generate a heap reference load using two different registers
-  // `out` and `obj`:
-  //
-  //   out <- *(obj + offset)
-  //
-  // while honoring heap poisoning and/or read barriers (if any).
-  //
-  // Location `maybe_temp` is used when generating a Baker's (fast
-  // path) read barrier and shall be a register in that case; it may
-  // be an invalid location otherwise.
-  void GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
-                                         Location out,
-                                         Location obj,
-                                         uint32_t offset,
-                                         Location maybe_temp,
-                                         ReadBarrierOption read_barrier_option);
-
-  // Generate a GC root reference load:
-  //
-  //   root <- *(obj + offset)
-  //
-  // while honoring read barriers (if any).
-  void GenerateGcRootFieldLoad(HInstruction* instruction,
-                               Location root,
-                               GpuRegister obj,
-                               uint32_t offset,
-                               ReadBarrierOption read_barrier_option,
-                               Mips64Label* label_low = nullptr);
-
-  void GenerateTestAndBranch(HInstruction* instruction,
-                             size_t condition_input_index,
-                             Mips64Label* true_target,
-                             Mips64Label* false_target);
-  void DivRemOneOrMinusOne(HBinaryOperation* instruction);
-  void DivRemByPowerOfTwo(HBinaryOperation* instruction);
-  void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction);
-  void GenerateDivRemIntegral(HBinaryOperation* instruction);
-  void GenerateIntLongCompare(IfCondition cond, bool is64bit, LocationSummary* locations);
-  // When the function returns `false` it means that the condition holds if `dst` is non-zero
-  // and doesn't hold if `dst` is zero. If it returns `true`, the roles of zero and non-zero
-  // `dst` are exchanged.
-  bool MaterializeIntLongCompare(IfCondition cond,
-                                 bool is64bit,
-                                 LocationSummary* input_locations,
-                                 GpuRegister dst);
-  void GenerateIntLongCompareAndBranch(IfCondition cond,
-                                       bool is64bit,
-                                       LocationSummary* locations,
-                                       Mips64Label* label);
-  void GenerateFpCompare(IfCondition cond,
-                         bool gt_bias,
-                         DataType::Type type,
-                         LocationSummary* locations);
-  // When the function returns `false` it means that the condition holds if `dst` is non-zero
-  // and doesn't hold if `dst` is zero. If it returns `true`, the roles of zero and non-zero
-  // `dst` are exchanged.
-  bool MaterializeFpCompare(IfCondition cond,
-                            bool gt_bias,
-                            DataType::Type type,
-                            LocationSummary* input_locations,
-                            FpuRegister dst);
-  void GenerateFpCompareAndBranch(IfCondition cond,
-                                  bool gt_bias,
-                                  DataType::Type type,
-                                  LocationSummary* locations,
-                                  Mips64Label* label);
-  void HandleGoto(HInstruction* got, HBasicBlock* successor);
-  void GenPackedSwitchWithCompares(GpuRegister value_reg,
-                                   int32_t lower_bound,
-                                   uint32_t num_entries,
-                                   HBasicBlock* switch_block,
-                                   HBasicBlock* default_block);
-  void GenTableBasedPackedSwitch(GpuRegister value_reg,
-                                 int32_t lower_bound,
-                                 uint32_t num_entries,
-                                 HBasicBlock* switch_block,
-                                 HBasicBlock* default_block);
-  int32_t VecAddress(LocationSummary* locations,
-                     size_t size,
-                     /* out */ GpuRegister* adjusted_base);
-  void GenConditionalMove(HSelect* select);
-
-  Mips64Assembler* const assembler_;
-  CodeGeneratorMIPS64* const codegen_;
-
-  DISALLOW_COPY_AND_ASSIGN(InstructionCodeGeneratorMIPS64);
-};
-
-class CodeGeneratorMIPS64 : public CodeGenerator {
- public:
-  CodeGeneratorMIPS64(HGraph* graph,
-                      const CompilerOptions& compiler_options,
-                      OptimizingCompilerStats* stats = nullptr);
-  virtual ~CodeGeneratorMIPS64() {}
-
-  void GenerateFrameEntry() override;
-  void GenerateFrameExit() override;
-
-  void Bind(HBasicBlock* block) override;
-
-  size_t GetWordSize() const override { return kMips64DoublewordSize; }
-
-  size_t GetFloatingPointSpillSlotSize() const override {
-    return GetGraph()->HasSIMD()
-        ? 2 * kMips64DoublewordSize   // 16 bytes for each spill.
-        : 1 * kMips64DoublewordSize;  //  8 bytes for each spill.
-  }
-
-  uintptr_t GetAddressOf(HBasicBlock* block) override {
-    return assembler_.GetLabelLocation(GetLabelOf(block));
-  }
-
-  HGraphVisitor* GetLocationBuilder() override { return &location_builder_; }
-  HGraphVisitor* GetInstructionVisitor() override { return &instruction_visitor_; }
-  Mips64Assembler* GetAssembler() override { return &assembler_; }
-  const Mips64Assembler& GetAssembler() const override { return assembler_; }
-
-  // Emit linker patches.
-  void EmitLinkerPatches(ArenaVector<linker::LinkerPatch>* linker_patches) override;
-  void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) override;
-
-  // Fast path implementation of ReadBarrier::Barrier for a heap
-  // reference field load when Baker's read barriers are used.
-  void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
-                                             Location ref,
-                                             GpuRegister obj,
-                                             uint32_t offset,
-                                             Location temp,
-                                             bool needs_null_check);
-  // Fast path implementation of ReadBarrier::Barrier for a heap
-  // reference array load when Baker's read barriers are used.
-  void GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
-                                             Location ref,
-                                             GpuRegister obj,
-                                             uint32_t data_offset,
-                                             Location index,
-                                             Location temp,
-                                             bool needs_null_check);
-
-  // Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier,
-  // GenerateArrayLoadWithBakerReadBarrier and some intrinsics.
-  //
-  // Load the object reference located at the address
-  // `obj + offset + (index << scale_factor)`, held by object `obj`, into
-  // `ref`, and mark it if needed.
-  //
-  // If `always_update_field` is true, the value of the reference is
-  // atomically updated in the holder (`obj`).
-  void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
-                                                 Location ref,
-                                                 GpuRegister obj,
-                                                 uint32_t offset,
-                                                 Location index,
-                                                 ScaleFactor scale_factor,
-                                                 Location temp,
-                                                 bool needs_null_check,
-                                                 bool always_update_field = false);
-
-  // Generate a read barrier for a heap reference within `instruction`
-  // using a slow path.
-  //
-  // A read barrier for an object reference read from the heap is
-  // implemented as a call to the artReadBarrierSlow runtime entry
-  // point, which is passed the values in locations `ref`, `obj`, and
-  // `offset`:
-  //
-  //   mirror::Object* artReadBarrierSlow(mirror::Object* ref,
-  //                                      mirror::Object* obj,
-  //                                      uint32_t offset);
-  //
-  // The `out` location contains the value returned by
-  // artReadBarrierSlow.
-  //
-  // When `index` is provided (i.e. for array accesses), the offset
-  // value passed to artReadBarrierSlow is adjusted to take `index`
-  // into account.
-  void GenerateReadBarrierSlow(HInstruction* instruction,
-                               Location out,
-                               Location ref,
-                               Location obj,
-                               uint32_t offset,
-                               Location index = Location::NoLocation());
-
-  // If read barriers are enabled, generate a read barrier for a heap
-  // reference using a slow path. If heap poisoning is enabled, also
-  // unpoison the reference in `out`.
-  void MaybeGenerateReadBarrierSlow(HInstruction* instruction,
-                                    Location out,
-                                    Location ref,
-                                    Location obj,
-                                    uint32_t offset,
-                                    Location index = Location::NoLocation());
-
-  // Generate a read barrier for a GC root within `instruction` using
-  // a slow path.
-  //
-  // A read barrier for an object reference GC root is implemented as
-  // a call to the artReadBarrierForRootSlow runtime entry point,
-  // which is passed the value in location `root`:
-  //
-  //   mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root);
-  //
-  // The `out` location contains the value returned by
-  // artReadBarrierForRootSlow.
-  void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
-
-  void MarkGCCard(GpuRegister object, GpuRegister value, bool value_can_be_null);
-
-  // Register allocation.
-
-  void SetupBlockedRegisters() const override;
-
-  size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) override;
-  size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) override;
-  size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
-  size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) override;
-
-  void DumpCoreRegister(std::ostream& stream, int reg) const override;
-  void DumpFloatingPointRegister(std::ostream& stream, int reg) const override;
-
-  InstructionSet GetInstructionSet() const override { return InstructionSet::kMips64; }
-
-  const Mips64InstructionSetFeatures& GetInstructionSetFeatures() const;
-
-  Mips64Label* GetLabelOf(HBasicBlock* block) const {
-    return CommonGetLabelOf<Mips64Label>(block_labels_, block);
-  }
-
-  void Initialize() override {
-    block_labels_ = CommonInitializeLabels<Mips64Label>();
-  }
-
-  // We prefer aligned loads and stores (less code), so spill and restore registers in slow paths
-  // at aligned locations.
-  uint32_t GetPreferredSlotsAlignment() const override { return kMips64DoublewordSize; }
-
-  void Finalize(CodeAllocator* allocator) override;
-
-  // Code generation helpers.
-  void MoveLocation(Location dst, Location src, DataType::Type dst_type) override;
-
-  void MoveConstant(Location destination, int32_t value) override;
-
-  void AddLocationAsTemp(Location location, LocationSummary* locations) override;
-
-
-  void SwapLocations(Location loc1, Location loc2, DataType::Type type);
-
-  // Generate code to invoke a runtime entry point.
-  void InvokeRuntime(QuickEntrypointEnum entrypoint,
-                     HInstruction* instruction,
-                     uint32_t dex_pc,
-                     SlowPathCode* slow_path = nullptr) override;
-
-  // Generate code to invoke a runtime entry point, but do not record
-  // PC-related information in a stack map.
-  void InvokeRuntimeWithoutRecordingPcInfo(int32_t entry_point_offset,
-                                           HInstruction* instruction,
-                                           SlowPathCode* slow_path);
-
-  void GenerateInvokeRuntime(int32_t entry_point_offset);
-
-  ParallelMoveResolver* GetMoveResolver() override { return &move_resolver_; }
-
-  bool NeedsTwoRegisters(DataType::Type type ATTRIBUTE_UNUSED) const override { return false; }
-
-  // Check if the desired_string_load_kind is supported. If it is, return it,
-  // otherwise return a fall-back kind that should be used instead.
-  HLoadString::LoadKind GetSupportedLoadStringKind(
-      HLoadString::LoadKind desired_string_load_kind) override;
-
-  // Check if the desired_class_load_kind is supported. If it is, return it,
-  // otherwise return a fall-back kind that should be used instead.
-  HLoadClass::LoadKind GetSupportedLoadClassKind(
-      HLoadClass::LoadKind desired_class_load_kind) override;
-
-  // Check if the desired_dispatch_info is supported. If it is, return it,
-  // otherwise return a fall-back info that should be used instead.
-  HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
-      const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
-      ArtMethod* method) override;
-
-  void GenerateStaticOrDirectCall(
-      HInvokeStaticOrDirect* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
-  void GenerateVirtualCall(
-      HInvokeVirtual* invoke, Location temp, SlowPathCode* slow_path = nullptr) override;
-
-  void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
-                              DataType::Type type ATTRIBUTE_UNUSED) override {
-    UNIMPLEMENTED(FATAL) << "Not implemented on MIPS64";
-  }
-
-  void GenerateNop() override;
-  void GenerateImplicitNullCheck(HNullCheck* instruction) override;
-  void GenerateExplicitNullCheck(HNullCheck* instruction) override;
-
-  // The PcRelativePatchInfo is used for PC-relative addressing of methods/strings/types,
-  // whether through .data.bimg.rel.ro, .bss, or directly in the boot image.
-  //
-  // The 16-bit halves of the 32-bit PC-relative offset are patched separately, necessitating
-  // two patches/infos. There can be more than two patches/infos if the instruction supplying
-  // the high half is shared with e.g. a slow path, while the low half is supplied by separate
-  // instructions, e.g.:
-  //     auipc r1, high       // patch
-  //     lwu   r2, low(r1)    // patch
-  //     beqzc r2, slow_path
-  //   back:
-  //     ...
-  //   slow_path:
-  //     ...
-  //     sw    r2, low(r1)    // patch
-  //     bc    back
-  struct PcRelativePatchInfo : PatchInfo<Mips64Label> {
-    PcRelativePatchInfo(const DexFile* dex_file,
-                        uint32_t off_or_idx,
-                        const PcRelativePatchInfo* info_high)
-        : PatchInfo<Mips64Label>(dex_file, off_or_idx),
-          patch_info_high(info_high) { }
-
-    // Pointer to the info for the high half patch or nullptr if this is the high half patch info.
-    const PcRelativePatchInfo* patch_info_high;
-
-   private:
-    PcRelativePatchInfo(PcRelativePatchInfo&& other) = delete;
-    DISALLOW_COPY_AND_ASSIGN(PcRelativePatchInfo);
-  };
-
-  PcRelativePatchInfo* NewBootImageIntrinsicPatch(uint32_t intrinsic_data,
-                                                  const PcRelativePatchInfo* info_high = nullptr);
-  PcRelativePatchInfo* NewBootImageRelRoPatch(uint32_t boot_image_offset,
-                                              const PcRelativePatchInfo* info_high = nullptr);
-  PcRelativePatchInfo* NewBootImageMethodPatch(MethodReference target_method,
-                                               const PcRelativePatchInfo* info_high = nullptr);
-  PcRelativePatchInfo* NewMethodBssEntryPatch(MethodReference target_method,
-                                              const PcRelativePatchInfo* info_high = nullptr);
-  PcRelativePatchInfo* NewBootImageTypePatch(const DexFile& dex_file,
-                                             dex::TypeIndex type_index,
-                                             const PcRelativePatchInfo* info_high = nullptr);
-  PcRelativePatchInfo* NewTypeBssEntryPatch(const DexFile& dex_file,
-                                            dex::TypeIndex type_index,
-                                            const PcRelativePatchInfo* info_high = nullptr);
-  PcRelativePatchInfo* NewBootImageStringPatch(const DexFile& dex_file,
-                                               dex::StringIndex string_index,
-                                               const PcRelativePatchInfo* info_high = nullptr);
-  PcRelativePatchInfo* NewStringBssEntryPatch(const DexFile& dex_file,
-                                              dex::StringIndex string_index,
-                                              const PcRelativePatchInfo* info_high = nullptr);
-  Literal* DeduplicateBootImageAddressLiteral(uint64_t address);
-
-  void EmitPcRelativeAddressPlaceholderHigh(PcRelativePatchInfo* info_high,
-                                            GpuRegister out,
-                                            PcRelativePatchInfo* info_low = nullptr);
-
-  void LoadBootImageAddress(GpuRegister reg, uint32_t boot_image_reference);
-  void AllocateInstanceForIntrinsic(HInvokeStaticOrDirect* invoke, uint32_t boot_image_offset);
-
-  void PatchJitRootUse(uint8_t* code,
-                       const uint8_t* roots_data,
-                       const Literal* literal,
-                       uint64_t index_in_table) const;
-  Literal* DeduplicateJitStringLiteral(const DexFile& dex_file,
-                                       dex::StringIndex string_index,
-                                       Handle<mirror::String> handle);
-  Literal* DeduplicateJitClassLiteral(const DexFile& dex_file,
-                                      dex::TypeIndex type_index,
-                                      Handle<mirror::Class> handle);
-
- private:
-  using Uint32ToLiteralMap = ArenaSafeMap<uint32_t, Literal*>;
-  using Uint64ToLiteralMap = ArenaSafeMap<uint64_t, Literal*>;
-  using StringToLiteralMap = ArenaSafeMap<StringReference,
-                                          Literal*,
-                                          StringReferenceValueComparator>;
-  using TypeToLiteralMap = ArenaSafeMap<TypeReference,
-                                        Literal*,
-                                        TypeReferenceValueComparator>;
-
-  Literal* DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map);
-  Literal* DeduplicateUint64Literal(uint64_t value);
-
-  PcRelativePatchInfo* NewPcRelativePatch(const DexFile* dex_file,
-                                          uint32_t offset_or_index,
-                                          const PcRelativePatchInfo* info_high,
-                                          ArenaDeque<PcRelativePatchInfo>* patches);
-
-  template <linker::LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)>
-  void EmitPcRelativeLinkerPatches(const ArenaDeque<PcRelativePatchInfo>& infos,
-                                   ArenaVector<linker::LinkerPatch>* linker_patches);
-
-  // Labels for each block that will be compiled.
-  Mips64Label* block_labels_;  // Indexed by block id.
-  Mips64Label frame_entry_label_;
-  LocationsBuilderMIPS64 location_builder_;
-  InstructionCodeGeneratorMIPS64 instruction_visitor_;
-  ParallelMoveResolverMIPS64 move_resolver_;
-  Mips64Assembler assembler_;
-
-  // Deduplication map for 32-bit literals, used for non-patchable boot image addresses.
-  Uint32ToLiteralMap uint32_literals_;
-  // Deduplication map for 64-bit literals, used for non-patchable method address or method code
-  // address.
-  Uint64ToLiteralMap uint64_literals_;
-  // PC-relative method patch info for kBootImageLinkTimePcRelative/kBootImageRelRo.
-  // Also used for type/string patches for kBootImageRelRo (same linker patch as for methods).
-  ArenaDeque<PcRelativePatchInfo> boot_image_method_patches_;
-  // PC-relative method patch info for kBssEntry.
-  ArenaDeque<PcRelativePatchInfo> method_bss_entry_patches_;
-  // PC-relative type patch info for kBootImageLinkTimePcRelative.
-  ArenaDeque<PcRelativePatchInfo> boot_image_type_patches_;
-  // PC-relative type patch info for kBssEntry.
-  ArenaDeque<PcRelativePatchInfo> type_bss_entry_patches_;
-  // PC-relative String patch info for kBootImageLinkTimePcRelative.
-  ArenaDeque<PcRelativePatchInfo> boot_image_string_patches_;
-  // PC-relative type patch info for kBssEntry.
-  ArenaDeque<PcRelativePatchInfo> string_bss_entry_patches_;
-  // PC-relative patch info for IntrinsicObjects.
-  ArenaDeque<PcRelativePatchInfo> boot_image_intrinsic_patches_;
-
-  // Patches for string root accesses in JIT compiled code.
-  StringToLiteralMap jit_string_patches_;
-  // Patches for class root accesses in JIT compiled code.
-  TypeToLiteralMap jit_class_patches_;
-
-  DISALLOW_COPY_AND_ASSIGN(CodeGeneratorMIPS64);
-};
-
-}  // namespace mips64
-}  // namespace art
-
-#endif  // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_MIPS64_H_
diff --git a/compiler/optimizing/code_generator_vector_mips.cc b/compiler/optimizing/code_generator_vector_mips.cc
deleted file mode 100644
index 4e9ba0d..0000000
--- a/compiler/optimizing/code_generator_vector_mips.cc
+++ /dev/null
@@ -1,1430 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "code_generator_mips.h"
-#include "mirror/array-inl.h"
-
-namespace art {
-namespace mips {
-
-// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
-#define __ down_cast<MipsAssembler*>(GetAssembler())->  // NOLINT
-
-void LocationsBuilderMIPS::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetOut(Location::RequiresFpuRegister());
-      break;
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ FillB(dst, locations->InAt(0).AsRegister<Register>());
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ FillH(dst, locations->InAt(0).AsRegister<Register>());
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ FillW(dst, locations->InAt(0).AsRegister<Register>());
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ InsertW(static_cast<VectorRegister>(FTMP),
-                 locations->InAt(0).AsRegisterPairLow<Register>(),
-                 0);
-      __ InsertW(static_cast<VectorRegister>(FTMP),
-                 locations->InAt(0).AsRegisterPairHigh<Register>(),
-                 1);
-      __ ReplicateFPToVectorRegister(dst, FTMP, /* is_double= */ true);
-      break;
-    case DataType::Type::kFloat32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ ReplicateFPToVectorRegister(dst,
-                                     locations->InAt(0).AsFpuRegister<FRegister>(),
-                                     /* is_double= */ false);
-      break;
-    case DataType::Type::kFloat64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ ReplicateFPToVectorRegister(dst,
-                                     locations->InAt(0).AsFpuRegister<FRegister>(),
-                                     /* is_double= */ true);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS::VisitVecExtractScalar(HVecExtractScalar* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetOut(Location::RequiresRegister());
-      break;
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetOut(Location::SameAsFirstInput());
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecExtractScalar(HVecExtractScalar* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister src = VectorRegisterFrom(locations->InAt(0));
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ Copy_sW(locations->Out().AsRegister<Register>(), src, 0);
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ Copy_sW(locations->Out().AsRegisterPairLow<Register>(), src, 0);
-      __ Copy_sW(locations->Out().AsRegisterPairHigh<Register>(), src, 1);
-      break;
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      DCHECK_LE(2u, instruction->GetVectorLength());
-      DCHECK_LE(instruction->GetVectorLength(), 4u);
-      DCHECK(locations->InAt(0).Equals(locations->Out()));  // no code required
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-// Helper to set up locations for vector unary operations.
-static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) {
-  LocationSummary* locations = new (allocator) LocationSummary(instruction);
-  DataType::Type type = instruction->GetPackedType();
-  switch (type) {
-    case DataType::Type::kBool:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetOut(Location::RequiresFpuRegister(),
-                        instruction->IsVecNot() ? Location::kOutputOverlap
-                                                : Location::kNoOutputOverlap);
-      break;
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetOut(Location::RequiresFpuRegister(),
-                        (instruction->IsVecNeg() || instruction->IsVecAbs() ||
-                            (instruction->IsVecReduce() && type == DataType::Type::kInt64))
-                            ? Location::kOutputOverlap
-                            : Location::kNoOutputOverlap);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS::VisitVecReduce(HVecReduce* instruction) {
-  CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecReduce(HVecReduce* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister src = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  VectorRegister tmp = static_cast<VectorRegister>(FTMP);
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      switch (instruction->GetReductionKind()) {
-        case HVecReduce::kSum:
-          __ Hadd_sD(tmp, src, src);
-          __ IlvlD(dst, tmp, tmp);
-          __ AddvW(dst, dst, tmp);
-          break;
-        case HVecReduce::kMin:
-          __ IlvodW(tmp, src, src);
-          __ Min_sW(tmp, src, tmp);
-          __ IlvlW(dst, tmp, tmp);
-          __ Min_sW(dst, dst, tmp);
-          break;
-        case HVecReduce::kMax:
-          __ IlvodW(tmp, src, src);
-          __ Max_sW(tmp, src, tmp);
-          __ IlvlW(dst, tmp, tmp);
-          __ Max_sW(dst, dst, tmp);
-          break;
-      }
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      switch (instruction->GetReductionKind()) {
-        case HVecReduce::kSum:
-          __ IlvlD(dst, src, src);
-          __ AddvD(dst, dst, src);
-          break;
-        case HVecReduce::kMin:
-          __ IlvlD(dst, src, src);
-          __ Min_sD(dst, dst, src);
-          break;
-        case HVecReduce::kMax:
-          __ IlvlD(dst, src, src);
-          __ Max_sD(dst, dst, src);
-          break;
-      }
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS::VisitVecCnv(HVecCnv* instruction) {
-  CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecCnv(HVecCnv* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister src = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  DataType::Type from = instruction->GetInputType();
-  DataType::Type to = instruction->GetResultType();
-  if (from == DataType::Type::kInt32 && to == DataType::Type::kFloat32) {
-    DCHECK_EQ(4u, instruction->GetVectorLength());
-    __ Ffint_sW(dst, src);
-  } else {
-    LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-  }
-}
-
-void LocationsBuilderMIPS::VisitVecNeg(HVecNeg* instruction) {
-  CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecNeg(HVecNeg* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister src = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ FillB(dst, ZERO);
-      __ SubvB(dst, dst, src);
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ FillH(dst, ZERO);
-      __ SubvH(dst, dst, src);
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ FillW(dst, ZERO);
-      __ SubvW(dst, dst, src);
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ FillW(dst, ZERO);
-      __ SubvD(dst, dst, src);
-      break;
-    case DataType::Type::kFloat32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ FillW(dst, ZERO);
-      __ FsubW(dst, dst, src);
-      break;
-    case DataType::Type::kFloat64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ FillW(dst, ZERO);
-      __ FsubD(dst, dst, src);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS::VisitVecAbs(HVecAbs* instruction) {
-  CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecAbs(HVecAbs* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister src = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ FillB(dst, ZERO);       // all zeroes
-      __ Add_aB(dst, dst, src);  // dst = abs(0) + abs(src)
-      break;
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ FillH(dst, ZERO);       // all zeroes
-      __ Add_aH(dst, dst, src);  // dst = abs(0) + abs(src)
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ FillW(dst, ZERO);       // all zeroes
-      __ Add_aW(dst, dst, src);  // dst = abs(0) + abs(src)
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ FillW(dst, ZERO);       // all zeroes
-      __ Add_aD(dst, dst, src);  // dst = abs(0) + abs(src)
-      break;
-    case DataType::Type::kFloat32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ LdiW(dst, -1);          // all ones
-      __ SrliW(dst, dst, 1);
-      __ AndV(dst, dst, src);
-      break;
-    case DataType::Type::kFloat64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ LdiD(dst, -1);          // all ones
-      __ SrliD(dst, dst, 1);
-      __ AndV(dst, dst, src);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS::VisitVecNot(HVecNot* instruction) {
-  CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecNot(HVecNot* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister src = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kBool:  // special case boolean-not
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ LdiB(dst, 1);
-      __ XorV(dst, dst, src);
-      break;
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      DCHECK_LE(2u, instruction->GetVectorLength());
-      DCHECK_LE(instruction->GetVectorLength(), 16u);
-      __ NorV(dst, src, src);  // lanes do not matter
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-// Helper to set up locations for vector binary operations.
-static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
-  LocationSummary* locations = new (allocator) LocationSummary(instruction);
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetInAt(1, Location::RequiresFpuRegister());
-      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS::VisitVecAdd(HVecAdd* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecAdd(HVecAdd* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ AddvB(dst, lhs, rhs);
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ AddvH(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ AddvW(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ AddvD(dst, lhs, rhs);
-      break;
-    case DataType::Type::kFloat32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ FaddW(dst, lhs, rhs);
-      break;
-    case DataType::Type::kFloat64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ FaddD(dst, lhs, rhs);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS::VisitVecSaturationAdd(HVecSaturationAdd* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecSaturationAdd(HVecSaturationAdd* instruction) {
-  LOG(FATAL) << "Unsupported SIMD " << instruction->GetId();
-}
-
-void LocationsBuilderMIPS::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kUint8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      instruction->IsRounded()
-          ? __ Aver_uB(dst, lhs, rhs)
-          : __ Ave_uB(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      instruction->IsRounded()
-          ? __ Aver_sB(dst, lhs, rhs)
-          : __ Ave_sB(dst, lhs, rhs);
-      break;
-    case DataType::Type::kUint16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      instruction->IsRounded()
-          ? __ Aver_uH(dst, lhs, rhs)
-          : __ Ave_uH(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      instruction->IsRounded()
-          ? __ Aver_sH(dst, lhs, rhs)
-          : __ Ave_sH(dst, lhs, rhs);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS::VisitVecSub(HVecSub* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecSub(HVecSub* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ SubvB(dst, lhs, rhs);
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ SubvH(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ SubvW(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ SubvD(dst, lhs, rhs);
-      break;
-    case DataType::Type::kFloat32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ FsubW(dst, lhs, rhs);
-      break;
-    case DataType::Type::kFloat64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ FsubD(dst, lhs, rhs);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS::VisitVecSaturationSub(HVecSaturationSub* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecSaturationSub(HVecSaturationSub* instruction) {
-  LOG(FATAL) << "Unsupported SIMD " << instruction->GetId();
-}
-
-void LocationsBuilderMIPS::VisitVecMul(HVecMul* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecMul(HVecMul* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ MulvB(dst, lhs, rhs);
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ MulvH(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ MulvW(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ MulvD(dst, lhs, rhs);
-      break;
-    case DataType::Type::kFloat32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ FmulW(dst, lhs, rhs);
-      break;
-    case DataType::Type::kFloat64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ FmulD(dst, lhs, rhs);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS::VisitVecDiv(HVecDiv* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecDiv(HVecDiv* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kFloat32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ FdivW(dst, lhs, rhs);
-      break;
-    case DataType::Type::kFloat64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ FdivD(dst, lhs, rhs);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS::VisitVecMin(HVecMin* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecMin(HVecMin* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kUint8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ Min_uB(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ Min_sB(dst, lhs, rhs);
-      break;
-    case DataType::Type::kUint16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ Min_uH(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ Min_sH(dst, lhs, rhs);
-      break;
-    case DataType::Type::kUint32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ Min_uW(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ Min_sW(dst, lhs, rhs);
-      break;
-    case DataType::Type::kUint64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ Min_uD(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ Min_sD(dst, lhs, rhs);
-      break;
-    // When one of arguments is NaN, fmin.df returns other argument, but Java expects a NaN value.
-    // TODO: Fix min(x, NaN) cases for float and double.
-    case DataType::Type::kFloat32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ FminW(dst, lhs, rhs);
-      break;
-    case DataType::Type::kFloat64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ FminD(dst, lhs, rhs);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS::VisitVecMax(HVecMax* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecMax(HVecMax* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kUint8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ Max_uB(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ Max_sB(dst, lhs, rhs);
-      break;
-    case DataType::Type::kUint16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ Max_uH(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ Max_sH(dst, lhs, rhs);
-      break;
-    case DataType::Type::kUint32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ Max_uW(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ Max_sW(dst, lhs, rhs);
-      break;
-    case DataType::Type::kUint64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ Max_uD(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ Max_sD(dst, lhs, rhs);
-      break;
-    // When one of arguments is NaN, fmax.df returns other argument, but Java expects a NaN value.
-    // TODO: Fix max(x, NaN) cases for float and double.
-    case DataType::Type::kFloat32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ FmaxW(dst, lhs, rhs);
-      break;
-    case DataType::Type::kFloat64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ FmaxD(dst, lhs, rhs);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS::VisitVecAnd(HVecAnd* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecAnd(HVecAnd* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      DCHECK_LE(2u, instruction->GetVectorLength());
-      DCHECK_LE(instruction->GetVectorLength(), 16u);
-      __ AndV(dst, lhs, rhs);  // lanes do not matter
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS::VisitVecAndNot(HVecAndNot* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecAndNot(HVecAndNot* instruction) {
-  LOG(FATAL) << "No SIMD for " << instruction->GetId();
-}
-
-void LocationsBuilderMIPS::VisitVecOr(HVecOr* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecOr(HVecOr* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      DCHECK_LE(2u, instruction->GetVectorLength());
-      DCHECK_LE(instruction->GetVectorLength(), 16u);
-      __ OrV(dst, lhs, rhs);  // lanes do not matter
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS::VisitVecXor(HVecXor* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecXor(HVecXor* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      DCHECK_LE(2u, instruction->GetVectorLength());
-      DCHECK_LE(instruction->GetVectorLength(), 16u);
-      __ XorV(dst, lhs, rhs);  // lanes do not matter
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-// Helper to set up locations for vector shift operations.
-static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
-  LocationSummary* locations = new (allocator) LocationSummary(instruction);
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant()));
-      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS::VisitVecShl(HVecShl* instruction) {
-  CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecShl(HVecShl* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ SlliB(dst, lhs, value);
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ SlliH(dst, lhs, value);
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ SlliW(dst, lhs, value);
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ SlliD(dst, lhs, value);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS::VisitVecShr(HVecShr* instruction) {
-  CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecShr(HVecShr* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ SraiB(dst, lhs, value);
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ SraiH(dst, lhs, value);
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ SraiW(dst, lhs, value);
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ SraiD(dst, lhs, value);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS::VisitVecUShr(HVecUShr* instruction) {
-  CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecUShr(HVecUShr* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ SrliB(dst, lhs, value);
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ SrliH(dst, lhs, value);
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ SrliW(dst, lhs, value);
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ SrliD(dst, lhs, value);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS::VisitVecSetScalars(HVecSetScalars* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
-
-  DCHECK_EQ(1u, instruction->InputCount());  // only one input currently implemented
-
-  HInstruction* input = instruction->InputAt(0);
-  bool is_zero = IsZeroBitPattern(input);
-
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-      locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant())
-                                    : Location::RequiresRegister());
-      locations->SetOut(Location::RequiresFpuRegister());
-      break;
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant())
-                                    : Location::RequiresFpuRegister());
-      locations->SetOut(Location::RequiresFpuRegister());
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecSetScalars(HVecSetScalars* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-
-  DCHECK_EQ(1u, instruction->InputCount());  // only one input currently implemented
-
-  // Zero out all other elements first.
-  __ FillW(dst, ZERO);
-
-  // Shorthand for any type of zero.
-  if (IsZeroBitPattern(instruction->InputAt(0))) {
-    return;
-  }
-
-  // Set required elements.
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ InsertB(dst, locations->InAt(0).AsRegister<Register>(), 0);
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ InsertH(dst, locations->InAt(0).AsRegister<Register>(), 0);
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ InsertW(dst, locations->InAt(0).AsRegister<Register>(), 0);
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ InsertW(dst, locations->InAt(0).AsRegisterPairLow<Register>(), 0);
-      __ InsertW(dst, locations->InAt(0).AsRegisterPairHigh<Register>(), 1);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-// Helper to set up locations for vector accumulations.
-static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) {
-  LocationSummary* locations = new (allocator) LocationSummary(instruction);
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetInAt(1, Location::RequiresFpuRegister());
-      locations->SetInAt(2, Location::RequiresFpuRegister());
-      locations->SetOut(Location::SameAsFirstInput());
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
-  CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister acc = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister left = VectorRegisterFrom(locations->InAt(1));
-  VectorRegister right = VectorRegisterFrom(locations->InAt(2));
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      if (instruction->GetOpKind() == HInstruction::kAdd) {
-        __ MaddvB(acc, left, right);
-      } else {
-        __ MsubvB(acc, left, right);
-      }
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      if (instruction->GetOpKind() == HInstruction::kAdd) {
-        __ MaddvH(acc, left, right);
-      } else {
-        __ MsubvH(acc, left, right);
-      }
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      if (instruction->GetOpKind() == HInstruction::kAdd) {
-        __ MaddvW(acc, left, right);
-      } else {
-        __ MsubvW(acc, left, right);
-      }
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      if (instruction->GetOpKind() == HInstruction::kAdd) {
-        __ MaddvD(acc, left, right);
-      } else {
-        __ MsubvD(acc, left, right);
-      }
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
-  CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
-  LocationSummary* locations = instruction->GetLocations();
-  // All conversions require at least one temporary register.
-  locations->AddTemp(Location::RequiresFpuRegister());
-  // Some conversions require a second temporary register.
-  HVecOperation* a = instruction->InputAt(1)->AsVecOperation();
-  HVecOperation* b = instruction->InputAt(2)->AsVecOperation();
-  DCHECK_EQ(HVecOperation::ToSignedType(a->GetPackedType()),
-            HVecOperation::ToSignedType(b->GetPackedType()));
-  switch (a->GetPackedType()) {
-    case DataType::Type::kInt32:
-      if (instruction->GetPackedType() == DataType::Type::kInt32) {
-        break;
-      }
-      FALLTHROUGH_INTENDED;
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      locations->AddTemp(Location::RequiresFpuRegister());
-      break;
-    default:
-      break;
-  }
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister acc = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister left = VectorRegisterFrom(locations->InAt(1));
-  VectorRegister right = VectorRegisterFrom(locations->InAt(2));
-  VectorRegister tmp = static_cast<VectorRegister>(FTMP);
-  VectorRegister tmp1 = VectorRegisterFrom(locations->GetTemp(0));
-
-  DCHECK(locations->InAt(0).Equals(locations->Out()));
-
-  // Handle all feasible acc_T += sad(a_S, b_S) type combinations (T x S).
-  HVecOperation* a = instruction->InputAt(1)->AsVecOperation();
-  HVecOperation* b = instruction->InputAt(2)->AsVecOperation();
-  DCHECK_EQ(HVecOperation::ToSignedType(a->GetPackedType()),
-            HVecOperation::ToSignedType(b->GetPackedType()));
-  switch (a->GetPackedType()) {
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, a->GetVectorLength());
-      switch (instruction->GetPackedType()) {
-        case DataType::Type::kUint16:
-        case DataType::Type::kInt16: {
-          DCHECK_EQ(8u, instruction->GetVectorLength());
-          VectorRegister tmp2 = VectorRegisterFrom(locations->GetTemp(1));
-          __ FillB(tmp, ZERO);
-          __ Hadd_sH(tmp1, left, tmp);
-          __ Hadd_sH(tmp2, right, tmp);
-          __ Asub_sH(tmp1, tmp1, tmp2);
-          __ AddvH(acc, acc, tmp1);
-          __ Hadd_sH(tmp1, tmp, left);
-          __ Hadd_sH(tmp2, tmp, right);
-          __ Asub_sH(tmp1, tmp1, tmp2);
-          __ AddvH(acc, acc, tmp1);
-          break;
-        }
-        case DataType::Type::kInt32: {
-          DCHECK_EQ(4u, instruction->GetVectorLength());
-          VectorRegister tmp2 = VectorRegisterFrom(locations->GetTemp(1));
-          __ FillB(tmp, ZERO);
-          __ Hadd_sH(tmp1, left, tmp);
-          __ Hadd_sH(tmp2, right, tmp);
-          __ Asub_sH(tmp1, tmp1, tmp2);
-          __ Hadd_sW(tmp1, tmp1, tmp1);
-          __ AddvW(acc, acc, tmp1);
-          __ Hadd_sH(tmp1, tmp, left);
-          __ Hadd_sH(tmp2, tmp, right);
-          __ Asub_sH(tmp1, tmp1, tmp2);
-          __ Hadd_sW(tmp1, tmp1, tmp1);
-          __ AddvW(acc, acc, tmp1);
-          break;
-        }
-        case DataType::Type::kInt64: {
-          DCHECK_EQ(2u, instruction->GetVectorLength());
-          VectorRegister tmp2 = VectorRegisterFrom(locations->GetTemp(1));
-          __ FillB(tmp, ZERO);
-          __ Hadd_sH(tmp1, left, tmp);
-          __ Hadd_sH(tmp2, right, tmp);
-          __ Asub_sH(tmp1, tmp1, tmp2);
-          __ Hadd_sW(tmp1, tmp1, tmp1);
-          __ Hadd_sD(tmp1, tmp1, tmp1);
-          __ AddvD(acc, acc, tmp1);
-          __ Hadd_sH(tmp1, tmp, left);
-          __ Hadd_sH(tmp2, tmp, right);
-          __ Asub_sH(tmp1, tmp1, tmp2);
-          __ Hadd_sW(tmp1, tmp1, tmp1);
-          __ Hadd_sD(tmp1, tmp1, tmp1);
-          __ AddvD(acc, acc, tmp1);
-          break;
-        }
-        default:
-          LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-          UNREACHABLE();
-      }
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, a->GetVectorLength());
-      switch (instruction->GetPackedType()) {
-        case DataType::Type::kInt32: {
-          DCHECK_EQ(4u, instruction->GetVectorLength());
-          VectorRegister tmp2 = VectorRegisterFrom(locations->GetTemp(1));
-          __ FillH(tmp, ZERO);
-          __ Hadd_sW(tmp1, left, tmp);
-          __ Hadd_sW(tmp2, right, tmp);
-          __ Asub_sW(tmp1, tmp1, tmp2);
-          __ AddvW(acc, acc, tmp1);
-          __ Hadd_sW(tmp1, tmp, left);
-          __ Hadd_sW(tmp2, tmp, right);
-          __ Asub_sW(tmp1, tmp1, tmp2);
-          __ AddvW(acc, acc, tmp1);
-          break;
-        }
-        case DataType::Type::kInt64: {
-          DCHECK_EQ(2u, instruction->GetVectorLength());
-          VectorRegister tmp2 = VectorRegisterFrom(locations->GetTemp(1));
-          __ FillH(tmp, ZERO);
-          __ Hadd_sW(tmp1, left, tmp);
-          __ Hadd_sW(tmp2, right, tmp);
-          __ Asub_sW(tmp1, tmp1, tmp2);
-          __ Hadd_sD(tmp1, tmp1, tmp1);
-          __ AddvD(acc, acc, tmp1);
-          __ Hadd_sW(tmp1, tmp, left);
-          __ Hadd_sW(tmp2, tmp, right);
-          __ Asub_sW(tmp1, tmp1, tmp2);
-          __ Hadd_sD(tmp1, tmp1, tmp1);
-          __ AddvD(acc, acc, tmp1);
-          break;
-        }
-        default:
-          LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-          UNREACHABLE();
-      }
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, a->GetVectorLength());
-      switch (instruction->GetPackedType()) {
-        case DataType::Type::kInt32: {
-          DCHECK_EQ(4u, instruction->GetVectorLength());
-          __ FillW(tmp, ZERO);
-          __ SubvW(tmp1, left, right);
-          __ Add_aW(tmp1, tmp1, tmp);
-          __ AddvW(acc, acc, tmp1);
-          break;
-        }
-        case DataType::Type::kInt64: {
-          DCHECK_EQ(2u, instruction->GetVectorLength());
-          VectorRegister tmp2 = VectorRegisterFrom(locations->GetTemp(1));
-          __ FillW(tmp, ZERO);
-          __ Hadd_sD(tmp1, left, tmp);
-          __ Hadd_sD(tmp2, right, tmp);
-          __ Asub_sD(tmp1, tmp1, tmp2);
-          __ AddvD(acc, acc, tmp1);
-          __ Hadd_sD(tmp1, tmp, left);
-          __ Hadd_sD(tmp2, tmp, right);
-          __ Asub_sD(tmp1, tmp1, tmp2);
-          __ AddvD(acc, acc, tmp1);
-          break;
-        }
-        default:
-          LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-          UNREACHABLE();
-      }
-      break;
-    case DataType::Type::kInt64: {
-      DCHECK_EQ(2u, a->GetVectorLength());
-      switch (instruction->GetPackedType()) {
-        case DataType::Type::kInt64: {
-          DCHECK_EQ(2u, instruction->GetVectorLength());
-          __ FillW(tmp, ZERO);
-          __ SubvD(tmp1, left, right);
-          __ Add_aD(tmp1, tmp1, tmp);
-          __ AddvD(acc, acc, tmp1);
-          break;
-        }
-        default:
-          LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-          UNREACHABLE();
-      }
-      break;
-    }
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS::VisitVecDotProd(HVecDotProd* instruction) {
-  LOG(FATAL) << "No SIMD for " << instruction->GetId();
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecDotProd(HVecDotProd* instruction) {
-  LOG(FATAL) << "No SIMD for " << instruction->GetId();
-}
-
-// Helper to set up locations for vector memory operations.
-static void CreateVecMemLocations(ArenaAllocator* allocator,
-                                  HVecMemoryOperation* instruction,
-                                  bool is_load) {
-  LocationSummary* locations = new (allocator) LocationSummary(instruction);
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
-      if (is_load) {
-        locations->SetOut(Location::RequiresFpuRegister());
-      } else {
-        locations->SetInAt(2, Location::RequiresFpuRegister());
-      }
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-// Helper to prepare register and offset for vector memory operations. Returns the offset and sets
-// the output parameter adjusted_base to the original base or to a reserved temporary register (AT).
-int32_t InstructionCodeGeneratorMIPS::VecAddress(LocationSummary* locations,
-                                                 size_t size,
-                                                 /* out */ Register* adjusted_base) {
-  Register base = locations->InAt(0).AsRegister<Register>();
-  Location index = locations->InAt(1);
-  int scale = TIMES_1;
-  switch (size) {
-    case 2: scale = TIMES_2; break;
-    case 4: scale = TIMES_4; break;
-    case 8: scale = TIMES_8; break;
-    default: break;
-  }
-  int32_t offset = mirror::Array::DataOffset(size).Int32Value();
-
-  if (index.IsConstant()) {
-    offset += index.GetConstant()->AsIntConstant()->GetValue() << scale;
-    __ AdjustBaseOffsetAndElementSizeShift(base, offset, scale);
-    *adjusted_base = base;
-  } else {
-    Register index_reg = index.AsRegister<Register>();
-    if (scale != TIMES_1) {
-      __ Lsa(AT, index_reg, base, scale);
-    } else {
-      __ Addu(AT, base, index_reg);
-    }
-    *adjusted_base = AT;
-  }
-  return offset;
-}
-
-void LocationsBuilderMIPS::VisitVecLoad(HVecLoad* instruction) {
-  CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ true);
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecLoad(HVecLoad* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  size_t size = DataType::Size(instruction->GetPackedType());
-  VectorRegister reg = VectorRegisterFrom(locations->Out());
-  Register base;
-  int32_t offset = VecAddress(locations, size, &base);
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ LdB(reg, base, offset);
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      // Loading 8-bytes (needed if dealing with compressed strings in StringCharAt) from unaligned
-      // memory address may cause a trap to the kernel if the CPU doesn't directly support unaligned
-      // loads and stores.
-      // TODO: Implement support for StringCharAt.
-      DCHECK(!instruction->IsStringCharAt());
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ LdH(reg, base, offset);
-      break;
-    case DataType::Type::kInt32:
-    case DataType::Type::kFloat32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ LdW(reg, base, offset);
-      break;
-    case DataType::Type::kInt64:
-    case DataType::Type::kFloat64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ LdD(reg, base, offset);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS::VisitVecStore(HVecStore* instruction) {
-  CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ false);
-}
-
-void InstructionCodeGeneratorMIPS::VisitVecStore(HVecStore* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  size_t size = DataType::Size(instruction->GetPackedType());
-  VectorRegister reg = VectorRegisterFrom(locations->InAt(2));
-  Register base;
-  int32_t offset = VecAddress(locations, size, &base);
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ StB(reg, base, offset);
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ StH(reg, base, offset);
-      break;
-    case DataType::Type::kInt32:
-    case DataType::Type::kFloat32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ StW(reg, base, offset);
-      break;
-    case DataType::Type::kInt64:
-    case DataType::Type::kFloat64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ StD(reg, base, offset);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-#undef __
-
-}  // namespace mips
-}  // namespace art
diff --git a/compiler/optimizing/code_generator_vector_mips64.cc b/compiler/optimizing/code_generator_vector_mips64.cc
deleted file mode 100644
index 6467d3e..0000000
--- a/compiler/optimizing/code_generator_vector_mips64.cc
+++ /dev/null
@@ -1,1428 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "code_generator_mips64.h"
-#include "mirror/array-inl.h"
-
-namespace art {
-namespace mips64 {
-
-// NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy.
-#define __ down_cast<Mips64Assembler*>(GetAssembler())->  // NOLINT
-
-VectorRegister VectorRegisterFrom(Location location) {
-  DCHECK(location.IsFpuRegister());
-  return static_cast<VectorRegister>(location.AsFpuRegister<FpuRegister>());
-}
-
-void LocationsBuilderMIPS64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetOut(Location::RequiresFpuRegister());
-      break;
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecReplicateScalar(HVecReplicateScalar* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ FillB(dst, locations->InAt(0).AsRegister<GpuRegister>());
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ FillH(dst, locations->InAt(0).AsRegister<GpuRegister>());
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ FillW(dst, locations->InAt(0).AsRegister<GpuRegister>());
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ FillD(dst, locations->InAt(0).AsRegister<GpuRegister>());
-      break;
-    case DataType::Type::kFloat32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ ReplicateFPToVectorRegister(dst,
-                                     locations->InAt(0).AsFpuRegister<FpuRegister>(),
-                                     /* is_double= */ false);
-      break;
-    case DataType::Type::kFloat64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ ReplicateFPToVectorRegister(dst,
-                                     locations->InAt(0).AsFpuRegister<FpuRegister>(),
-                                     /* is_double= */ true);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitVecExtractScalar(HVecExtractScalar* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetOut(Location::RequiresRegister());
-      break;
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetOut(Location::SameAsFirstInput());
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecExtractScalar(HVecExtractScalar* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister src = VectorRegisterFrom(locations->InAt(0));
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ Copy_sW(locations->Out().AsRegister<GpuRegister>(), src, 0);
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ Copy_sD(locations->Out().AsRegister<GpuRegister>(), src, 0);
-      break;
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      DCHECK_LE(2u, instruction->GetVectorLength());
-      DCHECK_LE(instruction->GetVectorLength(), 4u);
-      DCHECK(locations->InAt(0).Equals(locations->Out()));  // no code required
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-// Helper to set up locations for vector unary operations.
-static void CreateVecUnOpLocations(ArenaAllocator* allocator, HVecUnaryOperation* instruction) {
-  LocationSummary* locations = new (allocator) LocationSummary(instruction);
-  DataType::Type type = instruction->GetPackedType();
-  switch (type) {
-    case DataType::Type::kBool:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetOut(Location::RequiresFpuRegister(),
-                        instruction->IsVecNot() ? Location::kOutputOverlap
-                                                : Location::kNoOutputOverlap);
-      break;
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetOut(Location::RequiresFpuRegister(),
-                        (instruction->IsVecNeg() || instruction->IsVecAbs() ||
-                            (instruction->IsVecReduce() && type == DataType::Type::kInt64))
-                            ? Location::kOutputOverlap
-                            : Location::kNoOutputOverlap);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitVecReduce(HVecReduce* instruction) {
-  CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecReduce(HVecReduce* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister src = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  VectorRegister tmp = static_cast<VectorRegister>(FTMP);
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      switch (instruction->GetReductionKind()) {
-        case HVecReduce::kSum:
-          __ Hadd_sD(tmp, src, src);
-          __ IlvlD(dst, tmp, tmp);
-          __ AddvW(dst, dst, tmp);
-          break;
-        case HVecReduce::kMin:
-          __ IlvodW(tmp, src, src);
-          __ Min_sW(tmp, src, tmp);
-          __ IlvlW(dst, tmp, tmp);
-          __ Min_sW(dst, dst, tmp);
-          break;
-        case HVecReduce::kMax:
-          __ IlvodW(tmp, src, src);
-          __ Max_sW(tmp, src, tmp);
-          __ IlvlW(dst, tmp, tmp);
-          __ Max_sW(dst, dst, tmp);
-          break;
-      }
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      switch (instruction->GetReductionKind()) {
-        case HVecReduce::kSum:
-          __ IlvlD(dst, src, src);
-          __ AddvD(dst, dst, src);
-          break;
-        case HVecReduce::kMin:
-          __ IlvlD(dst, src, src);
-          __ Min_sD(dst, dst, src);
-          break;
-        case HVecReduce::kMax:
-          __ IlvlD(dst, src, src);
-          __ Max_sD(dst, dst, src);
-          break;
-      }
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitVecCnv(HVecCnv* instruction) {
-  CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecCnv(HVecCnv* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister src = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  DataType::Type from = instruction->GetInputType();
-  DataType::Type to = instruction->GetResultType();
-  if (from == DataType::Type::kInt32 && to == DataType::Type::kFloat32) {
-    DCHECK_EQ(4u, instruction->GetVectorLength());
-    __ Ffint_sW(dst, src);
-  } else {
-    LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-    UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitVecNeg(HVecNeg* instruction) {
-  CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecNeg(HVecNeg* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister src = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ FillB(dst, ZERO);
-      __ SubvB(dst, dst, src);
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ FillH(dst, ZERO);
-      __ SubvH(dst, dst, src);
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ FillW(dst, ZERO);
-      __ SubvW(dst, dst, src);
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ FillD(dst, ZERO);
-      __ SubvD(dst, dst, src);
-      break;
-    case DataType::Type::kFloat32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ FillW(dst, ZERO);
-      __ FsubW(dst, dst, src);
-      break;
-    case DataType::Type::kFloat64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ FillD(dst, ZERO);
-      __ FsubD(dst, dst, src);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitVecAbs(HVecAbs* instruction) {
-  CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecAbs(HVecAbs* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister src = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ FillB(dst, ZERO);       // all zeroes
-      __ Add_aB(dst, dst, src);  // dst = abs(0) + abs(src)
-      break;
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ FillH(dst, ZERO);       // all zeroes
-      __ Add_aH(dst, dst, src);  // dst = abs(0) + abs(src)
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ FillW(dst, ZERO);       // all zeroes
-      __ Add_aW(dst, dst, src);  // dst = abs(0) + abs(src)
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ FillD(dst, ZERO);       // all zeroes
-      __ Add_aD(dst, dst, src);  // dst = abs(0) + abs(src)
-      break;
-    case DataType::Type::kFloat32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ LdiW(dst, -1);          // all ones
-      __ SrliW(dst, dst, 1);
-      __ AndV(dst, dst, src);
-      break;
-    case DataType::Type::kFloat64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ LdiD(dst, -1);          // all ones
-      __ SrliD(dst, dst, 1);
-      __ AndV(dst, dst, src);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitVecNot(HVecNot* instruction) {
-  CreateVecUnOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecNot(HVecNot* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister src = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kBool:  // special case boolean-not
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ LdiB(dst, 1);
-      __ XorV(dst, dst, src);
-      break;
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      DCHECK_LE(2u, instruction->GetVectorLength());
-      DCHECK_LE(instruction->GetVectorLength(), 16u);
-      __ NorV(dst, src, src);  // lanes do not matter
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-// Helper to set up locations for vector binary operations.
-static void CreateVecBinOpLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
-  LocationSummary* locations = new (allocator) LocationSummary(instruction);
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetInAt(1, Location::RequiresFpuRegister());
-      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitVecAdd(HVecAdd* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecAdd(HVecAdd* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ AddvB(dst, lhs, rhs);
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ AddvH(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ AddvW(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ AddvD(dst, lhs, rhs);
-      break;
-    case DataType::Type::kFloat32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ FaddW(dst, lhs, rhs);
-      break;
-    case DataType::Type::kFloat64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ FaddD(dst, lhs, rhs);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitVecSaturationAdd(HVecSaturationAdd* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecSaturationAdd(HVecSaturationAdd* instruction) {
-  LOG(FATAL) << "Unsupported SIMD " << instruction->GetId();
-}
-
-void LocationsBuilderMIPS64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecHalvingAdd(HVecHalvingAdd* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kUint8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      instruction->IsRounded()
-          ? __ Aver_uB(dst, lhs, rhs)
-          : __ Ave_uB(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      instruction->IsRounded()
-          ? __ Aver_sB(dst, lhs, rhs)
-          : __ Ave_sB(dst, lhs, rhs);
-      break;
-    case DataType::Type::kUint16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      instruction->IsRounded()
-          ? __ Aver_uH(dst, lhs, rhs)
-          : __ Ave_uH(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      instruction->IsRounded()
-          ? __ Aver_sH(dst, lhs, rhs)
-          : __ Ave_sH(dst, lhs, rhs);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitVecSub(HVecSub* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecSub(HVecSub* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ SubvB(dst, lhs, rhs);
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ SubvH(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ SubvW(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ SubvD(dst, lhs, rhs);
-      break;
-    case DataType::Type::kFloat32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ FsubW(dst, lhs, rhs);
-      break;
-    case DataType::Type::kFloat64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ FsubD(dst, lhs, rhs);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitVecSaturationSub(HVecSaturationSub* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecSaturationSub(HVecSaturationSub* instruction) {
-  LOG(FATAL) << "Unsupported SIMD " << instruction->GetId();
-}
-
-void LocationsBuilderMIPS64::VisitVecMul(HVecMul* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecMul(HVecMul* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ MulvB(dst, lhs, rhs);
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ MulvH(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ MulvW(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ MulvD(dst, lhs, rhs);
-      break;
-    case DataType::Type::kFloat32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ FmulW(dst, lhs, rhs);
-      break;
-    case DataType::Type::kFloat64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ FmulD(dst, lhs, rhs);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitVecDiv(HVecDiv* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecDiv(HVecDiv* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kFloat32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ FdivW(dst, lhs, rhs);
-      break;
-    case DataType::Type::kFloat64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ FdivD(dst, lhs, rhs);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitVecMin(HVecMin* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecMin(HVecMin* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kUint8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ Min_uB(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ Min_sB(dst, lhs, rhs);
-      break;
-    case DataType::Type::kUint16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ Min_uH(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ Min_sH(dst, lhs, rhs);
-      break;
-    case DataType::Type::kUint32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ Min_uW(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ Min_sW(dst, lhs, rhs);
-      break;
-    case DataType::Type::kUint64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ Min_uD(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ Min_sD(dst, lhs, rhs);
-      break;
-    // When one of arguments is NaN, fmin.df returns other argument, but Java expects a NaN value.
-    // TODO: Fix min(x, NaN) cases for float and double.
-    case DataType::Type::kFloat32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ FminW(dst, lhs, rhs);
-      break;
-    case DataType::Type::kFloat64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ FminD(dst, lhs, rhs);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitVecMax(HVecMax* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecMax(HVecMax* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kUint8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ Max_uB(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ Max_sB(dst, lhs, rhs);
-      break;
-    case DataType::Type::kUint16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ Max_uH(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ Max_sH(dst, lhs, rhs);
-      break;
-    case DataType::Type::kUint32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ Max_uW(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ Max_sW(dst, lhs, rhs);
-      break;
-    case DataType::Type::kUint64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ Max_uD(dst, lhs, rhs);
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ Max_sD(dst, lhs, rhs);
-      break;
-    // When one of arguments is NaN, fmax.df returns other argument, but Java expects a NaN value.
-    // TODO: Fix max(x, NaN) cases for float and double.
-    case DataType::Type::kFloat32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ FmaxW(dst, lhs, rhs);
-      break;
-    case DataType::Type::kFloat64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ FmaxD(dst, lhs, rhs);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitVecAnd(HVecAnd* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecAnd(HVecAnd* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      DCHECK_LE(2u, instruction->GetVectorLength());
-      DCHECK_LE(instruction->GetVectorLength(), 16u);
-      __ AndV(dst, lhs, rhs);  // lanes do not matter
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitVecAndNot(HVecAndNot* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecAndNot(HVecAndNot* instruction) {
-  LOG(FATAL) << "No SIMD for " << instruction->GetId();
-}
-
-void LocationsBuilderMIPS64::VisitVecOr(HVecOr* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecOr(HVecOr* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      DCHECK_LE(2u, instruction->GetVectorLength());
-      DCHECK_LE(instruction->GetVectorLength(), 16u);
-      __ OrV(dst, lhs, rhs);  // lanes do not matter
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitVecXor(HVecXor* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecXor(HVecXor* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister rhs = VectorRegisterFrom(locations->InAt(1));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      DCHECK_LE(2u, instruction->GetVectorLength());
-      DCHECK_LE(instruction->GetVectorLength(), 16u);
-      __ XorV(dst, lhs, rhs);  // lanes do not matter
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-// Helper to set up locations for vector shift operations.
-static void CreateVecShiftLocations(ArenaAllocator* allocator, HVecBinaryOperation* instruction) {
-  LocationSummary* locations = new (allocator) LocationSummary(instruction);
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetInAt(1, Location::ConstantLocation(instruction->InputAt(1)->AsConstant()));
-      locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitVecShl(HVecShl* instruction) {
-  CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecShl(HVecShl* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ SlliB(dst, lhs, value);
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ SlliH(dst, lhs, value);
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ SlliW(dst, lhs, value);
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ SlliD(dst, lhs, value);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitVecShr(HVecShr* instruction) {
-  CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecShr(HVecShr* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ SraiB(dst, lhs, value);
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ SraiH(dst, lhs, value);
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ SraiW(dst, lhs, value);
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ SraiD(dst, lhs, value);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitVecUShr(HVecUShr* instruction) {
-  CreateVecShiftLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecUShr(HVecUShr* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister lhs = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-  int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ SrliB(dst, lhs, value);
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ SrliH(dst, lhs, value);
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ SrliW(dst, lhs, value);
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ SrliD(dst, lhs, value);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitVecSetScalars(HVecSetScalars* instruction) {
-  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
-
-  DCHECK_EQ(1u, instruction->InputCount());  // only one input currently implemented
-
-  HInstruction* input = instruction->InputAt(0);
-  bool is_zero = IsZeroBitPattern(input);
-
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-      locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant())
-                                    : Location::RequiresRegister());
-      locations->SetOut(Location::RequiresFpuRegister());
-      break;
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, is_zero ? Location::ConstantLocation(input->AsConstant())
-                                    : Location::RequiresFpuRegister());
-      locations->SetOut(Location::RequiresFpuRegister());
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecSetScalars(HVecSetScalars* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister dst = VectorRegisterFrom(locations->Out());
-
-  DCHECK_EQ(1u, instruction->InputCount());  // only one input currently implemented
-
-  // Zero out all other elements first.
-  __ FillW(dst, ZERO);
-
-  // Shorthand for any type of zero.
-  if (IsZeroBitPattern(instruction->InputAt(0))) {
-    return;
-  }
-
-  // Set required elements.
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ InsertB(dst, locations->InAt(0).AsRegister<GpuRegister>(), 0);
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ InsertH(dst, locations->InAt(0).AsRegister<GpuRegister>(), 0);
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ InsertW(dst, locations->InAt(0).AsRegister<GpuRegister>(), 0);
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ InsertD(dst, locations->InAt(0).AsRegister<GpuRegister>(), 0);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-// Helper to set up locations for vector accumulations.
-static void CreateVecAccumLocations(ArenaAllocator* allocator, HVecOperation* instruction) {
-  LocationSummary* locations = new (allocator) LocationSummary(instruction);
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-      locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetInAt(1, Location::RequiresFpuRegister());
-      locations->SetInAt(2, Location::RequiresFpuRegister());
-      locations->SetOut(Location::SameAsFirstInput());
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
-  CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecMultiplyAccumulate(HVecMultiplyAccumulate* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister acc = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister left = VectorRegisterFrom(locations->InAt(1));
-  VectorRegister right = VectorRegisterFrom(locations->InAt(2));
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      if (instruction->GetOpKind() == HInstruction::kAdd) {
-        __ MaddvB(acc, left, right);
-      } else {
-        __ MsubvB(acc, left, right);
-      }
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      if (instruction->GetOpKind() == HInstruction::kAdd) {
-        __ MaddvH(acc, left, right);
-      } else {
-        __ MsubvH(acc, left, right);
-      }
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      if (instruction->GetOpKind() == HInstruction::kAdd) {
-        __ MaddvW(acc, left, right);
-      } else {
-        __ MsubvW(acc, left, right);
-      }
-      break;
-    case DataType::Type::kInt64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      if (instruction->GetOpKind() == HInstruction::kAdd) {
-        __ MaddvD(acc, left, right);
-      } else {
-        __ MsubvD(acc, left, right);
-      }
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
-  CreateVecAccumLocations(GetGraph()->GetAllocator(), instruction);
-  LocationSummary* locations = instruction->GetLocations();
-  // All conversions require at least one temporary register.
-  locations->AddTemp(Location::RequiresFpuRegister());
-  // Some conversions require a second temporary register.
-  HVecOperation* a = instruction->InputAt(1)->AsVecOperation();
-  HVecOperation* b = instruction->InputAt(2)->AsVecOperation();
-  DCHECK_EQ(HVecOperation::ToSignedType(a->GetPackedType()),
-            HVecOperation::ToSignedType(b->GetPackedType()));
-  switch (a->GetPackedType()) {
-    case DataType::Type::kInt32:
-      if (instruction->GetPackedType() == DataType::Type::kInt32) {
-        break;
-      }
-      FALLTHROUGH_INTENDED;
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      locations->AddTemp(Location::RequiresFpuRegister());
-      break;
-    default:
-      break;
-  }
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecSADAccumulate(HVecSADAccumulate* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  VectorRegister acc = VectorRegisterFrom(locations->InAt(0));
-  VectorRegister left = VectorRegisterFrom(locations->InAt(1));
-  VectorRegister right = VectorRegisterFrom(locations->InAt(2));
-  VectorRegister tmp = static_cast<VectorRegister>(FTMP);
-  VectorRegister tmp1 = VectorRegisterFrom(locations->GetTemp(0));
-
-  DCHECK(locations->InAt(0).Equals(locations->Out()));
-
-  // Handle all feasible acc_T += sad(a_S, b_S) type combinations (T x S).
-  HVecOperation* a = instruction->InputAt(1)->AsVecOperation();
-  HVecOperation* b = instruction->InputAt(2)->AsVecOperation();
-  DCHECK_EQ(HVecOperation::ToSignedType(a->GetPackedType()),
-            HVecOperation::ToSignedType(b->GetPackedType()));
-  switch (a->GetPackedType()) {
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, a->GetVectorLength());
-      switch (instruction->GetPackedType()) {
-        case DataType::Type::kUint16:
-        case DataType::Type::kInt16: {
-          DCHECK_EQ(8u, instruction->GetVectorLength());
-          VectorRegister tmp2 = VectorRegisterFrom(locations->GetTemp(1));
-          __ FillB(tmp, ZERO);
-          __ Hadd_sH(tmp1, left, tmp);
-          __ Hadd_sH(tmp2, right, tmp);
-          __ Asub_sH(tmp1, tmp1, tmp2);
-          __ AddvH(acc, acc, tmp1);
-          __ Hadd_sH(tmp1, tmp, left);
-          __ Hadd_sH(tmp2, tmp, right);
-          __ Asub_sH(tmp1, tmp1, tmp2);
-          __ AddvH(acc, acc, tmp1);
-          break;
-        }
-        case DataType::Type::kInt32: {
-          DCHECK_EQ(4u, instruction->GetVectorLength());
-          VectorRegister tmp2 = VectorRegisterFrom(locations->GetTemp(1));
-          __ FillB(tmp, ZERO);
-          __ Hadd_sH(tmp1, left, tmp);
-          __ Hadd_sH(tmp2, right, tmp);
-          __ Asub_sH(tmp1, tmp1, tmp2);
-          __ Hadd_sW(tmp1, tmp1, tmp1);
-          __ AddvW(acc, acc, tmp1);
-          __ Hadd_sH(tmp1, tmp, left);
-          __ Hadd_sH(tmp2, tmp, right);
-          __ Asub_sH(tmp1, tmp1, tmp2);
-          __ Hadd_sW(tmp1, tmp1, tmp1);
-          __ AddvW(acc, acc, tmp1);
-          break;
-        }
-        case DataType::Type::kInt64: {
-          DCHECK_EQ(2u, instruction->GetVectorLength());
-          VectorRegister tmp2 = VectorRegisterFrom(locations->GetTemp(1));
-          __ FillB(tmp, ZERO);
-          __ Hadd_sH(tmp1, left, tmp);
-          __ Hadd_sH(tmp2, right, tmp);
-          __ Asub_sH(tmp1, tmp1, tmp2);
-          __ Hadd_sW(tmp1, tmp1, tmp1);
-          __ Hadd_sD(tmp1, tmp1, tmp1);
-          __ AddvD(acc, acc, tmp1);
-          __ Hadd_sH(tmp1, tmp, left);
-          __ Hadd_sH(tmp2, tmp, right);
-          __ Asub_sH(tmp1, tmp1, tmp2);
-          __ Hadd_sW(tmp1, tmp1, tmp1);
-          __ Hadd_sD(tmp1, tmp1, tmp1);
-          __ AddvD(acc, acc, tmp1);
-          break;
-        }
-        default:
-          LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-          UNREACHABLE();
-      }
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, a->GetVectorLength());
-      switch (instruction->GetPackedType()) {
-        case DataType::Type::kInt32: {
-          DCHECK_EQ(4u, instruction->GetVectorLength());
-          VectorRegister tmp2 = VectorRegisterFrom(locations->GetTemp(1));
-          __ FillH(tmp, ZERO);
-          __ Hadd_sW(tmp1, left, tmp);
-          __ Hadd_sW(tmp2, right, tmp);
-          __ Asub_sW(tmp1, tmp1, tmp2);
-          __ AddvW(acc, acc, tmp1);
-          __ Hadd_sW(tmp1, tmp, left);
-          __ Hadd_sW(tmp2, tmp, right);
-          __ Asub_sW(tmp1, tmp1, tmp2);
-          __ AddvW(acc, acc, tmp1);
-          break;
-        }
-        case DataType::Type::kInt64: {
-          DCHECK_EQ(2u, instruction->GetVectorLength());
-          VectorRegister tmp2 = VectorRegisterFrom(locations->GetTemp(1));
-          __ FillH(tmp, ZERO);
-          __ Hadd_sW(tmp1, left, tmp);
-          __ Hadd_sW(tmp2, right, tmp);
-          __ Asub_sW(tmp1, tmp1, tmp2);
-          __ Hadd_sD(tmp1, tmp1, tmp1);
-          __ AddvD(acc, acc, tmp1);
-          __ Hadd_sW(tmp1, tmp, left);
-          __ Hadd_sW(tmp2, tmp, right);
-          __ Asub_sW(tmp1, tmp1, tmp2);
-          __ Hadd_sD(tmp1, tmp1, tmp1);
-          __ AddvD(acc, acc, tmp1);
-          break;
-        }
-        default:
-          LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-          UNREACHABLE();
-      }
-      break;
-    case DataType::Type::kInt32:
-      DCHECK_EQ(4u, a->GetVectorLength());
-      switch (instruction->GetPackedType()) {
-        case DataType::Type::kInt32: {
-          DCHECK_EQ(4u, instruction->GetVectorLength());
-          __ FillW(tmp, ZERO);
-          __ SubvW(tmp1, left, right);
-          __ Add_aW(tmp1, tmp1, tmp);
-          __ AddvW(acc, acc, tmp1);
-          break;
-        }
-        case DataType::Type::kInt64: {
-          DCHECK_EQ(2u, instruction->GetVectorLength());
-          VectorRegister tmp2 = VectorRegisterFrom(locations->GetTemp(1));
-          __ FillW(tmp, ZERO);
-          __ Hadd_sD(tmp1, left, tmp);
-          __ Hadd_sD(tmp2, right, tmp);
-          __ Asub_sD(tmp1, tmp1, tmp2);
-          __ AddvD(acc, acc, tmp1);
-          __ Hadd_sD(tmp1, tmp, left);
-          __ Hadd_sD(tmp2, tmp, right);
-          __ Asub_sD(tmp1, tmp1, tmp2);
-          __ AddvD(acc, acc, tmp1);
-          break;
-        }
-        default:
-          LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-          UNREACHABLE();
-      }
-      break;
-    case DataType::Type::kInt64: {
-      DCHECK_EQ(2u, a->GetVectorLength());
-      switch (instruction->GetPackedType()) {
-        case DataType::Type::kInt64: {
-          DCHECK_EQ(2u, instruction->GetVectorLength());
-          __ FillD(tmp, ZERO);
-          __ SubvD(tmp1, left, right);
-          __ Add_aD(tmp1, tmp1, tmp);
-          __ AddvD(acc, acc, tmp1);
-          break;
-        }
-        default:
-          LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-          UNREACHABLE();
-      }
-      break;
-    }
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitVecDotProd(HVecDotProd* instruction) {
-  LOG(FATAL) << "No SIMD for " << instruction->GetId();
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecDotProd(HVecDotProd* instruction) {
-  LOG(FATAL) << "No SIMD for " << instruction->GetId();
-}
-
-// Helper to set up locations for vector memory operations.
-static void CreateVecMemLocations(ArenaAllocator* allocator,
-                                  HVecMemoryOperation* instruction,
-                                  bool is_load) {
-  LocationSummary* locations = new (allocator) LocationSummary(instruction);
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-    case DataType::Type::kInt32:
-    case DataType::Type::kInt64:
-    case DataType::Type::kFloat32:
-    case DataType::Type::kFloat64:
-      locations->SetInAt(0, Location::RequiresRegister());
-      locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
-      if (is_load) {
-        locations->SetOut(Location::RequiresFpuRegister());
-      } else {
-        locations->SetInAt(2, Location::RequiresFpuRegister());
-      }
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-// Helper to prepare register and offset for vector memory operations. Returns the offset and sets
-// the output parameter adjusted_base to the original base or to a reserved temporary register (AT).
-int32_t InstructionCodeGeneratorMIPS64::VecAddress(LocationSummary* locations,
-                                                   size_t size,
-                                                   /* out */ GpuRegister* adjusted_base) {
-  GpuRegister base = locations->InAt(0).AsRegister<GpuRegister>();
-  Location index = locations->InAt(1);
-  int scale = TIMES_1;
-  switch (size) {
-    case 2: scale = TIMES_2; break;
-    case 4: scale = TIMES_4; break;
-    case 8: scale = TIMES_8; break;
-    default: break;
-  }
-  int32_t offset = mirror::Array::DataOffset(size).Int32Value();
-
-  if (index.IsConstant()) {
-    offset += index.GetConstant()->AsIntConstant()->GetValue() << scale;
-    __ AdjustBaseOffsetAndElementSizeShift(base, offset, scale);
-    *adjusted_base = base;
-  } else {
-    GpuRegister index_reg = index.AsRegister<GpuRegister>();
-    if (scale != TIMES_1) {
-      __ Dlsa(AT, index_reg, base, scale);
-    } else {
-      __ Daddu(AT, base, index_reg);
-    }
-    *adjusted_base = AT;
-  }
-  return offset;
-}
-
-void LocationsBuilderMIPS64::VisitVecLoad(HVecLoad* instruction) {
-  CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ true);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecLoad(HVecLoad* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  size_t size = DataType::Size(instruction->GetPackedType());
-  VectorRegister reg = VectorRegisterFrom(locations->Out());
-  GpuRegister base;
-  int32_t offset = VecAddress(locations, size, &base);
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ LdB(reg, base, offset);
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      // Loading 8-bytes (needed if dealing with compressed strings in StringCharAt) from unaligned
-      // memory address may cause a trap to the kernel if the CPU doesn't directly support unaligned
-      // loads and stores.
-      // TODO: Implement support for StringCharAt.
-      DCHECK(!instruction->IsStringCharAt());
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ LdH(reg, base, offset);
-      break;
-    case DataType::Type::kInt32:
-    case DataType::Type::kFloat32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ LdW(reg, base, offset);
-      break;
-    case DataType::Type::kInt64:
-    case DataType::Type::kFloat64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ LdD(reg, base, offset);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-void LocationsBuilderMIPS64::VisitVecStore(HVecStore* instruction) {
-  CreateVecMemLocations(GetGraph()->GetAllocator(), instruction, /* is_load= */ false);
-}
-
-void InstructionCodeGeneratorMIPS64::VisitVecStore(HVecStore* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  size_t size = DataType::Size(instruction->GetPackedType());
-  VectorRegister reg = VectorRegisterFrom(locations->InAt(2));
-  GpuRegister base;
-  int32_t offset = VecAddress(locations, size, &base);
-  switch (instruction->GetPackedType()) {
-    case DataType::Type::kBool:
-    case DataType::Type::kUint8:
-    case DataType::Type::kInt8:
-      DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ StB(reg, base, offset);
-      break;
-    case DataType::Type::kUint16:
-    case DataType::Type::kInt16:
-      DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ StH(reg, base, offset);
-      break;
-    case DataType::Type::kInt32:
-    case DataType::Type::kFloat32:
-      DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ StW(reg, base, offset);
-      break;
-    case DataType::Type::kInt64:
-    case DataType::Type::kFloat64:
-      DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ StD(reg, base, offset);
-      break;
-    default:
-      LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
-      UNREACHABLE();
-  }
-}
-
-#undef __
-
-}  // namespace mips64
-}  // namespace art
diff --git a/compiler/optimizing/code_generator_vector_x86.cc b/compiler/optimizing/code_generator_vector_x86.cc
index 0ee0035..1390af2 100644
--- a/compiler/optimizing/code_generator_vector_x86.cc
+++ b/compiler/optimizing/code_generator_vector_x86.cc
@@ -63,9 +63,10 @@
   LocationSummary* locations = instruction->GetLocations();
   XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
 
+  bool cpu_has_avx = CpuHasAvxFeatureFlag();
   // Shorthand for any type of zero.
   if (IsZeroBitPattern(instruction->InputAt(0))) {
-    __ xorps(dst, dst);
+    cpu_has_avx ? __ vxorps(dst, dst, dst) : __ xorps(dst, dst);
     return;
   }
 
@@ -431,41 +432,69 @@
   }
 }
 
+static void CreateVecTerOpLocations(ArenaAllocator* allocator, HVecOperation* instruction) {
+  LocationSummary* locations = new (allocator) LocationSummary(instruction);
+  switch (instruction->GetPackedType()) {
+    case DataType::Type::kBool:
+    case DataType::Type::kUint8:
+    case DataType::Type::kInt8:
+    case DataType::Type::kUint16:
+    case DataType::Type::kInt16:
+    case DataType::Type::kInt32:
+    case DataType::Type::kInt64:
+    case DataType::Type::kFloat32:
+    case DataType::Type::kFloat64:
+      locations->SetInAt(0, Location::RequiresFpuRegister());
+      locations->SetInAt(1, Location::RequiresFpuRegister());
+      locations->SetOut(Location::RequiresFpuRegister());
+      break;
+    default:
+      LOG(FATAL) << "Unsupported SIMD type";
+      UNREACHABLE();
+  }
+}
+
 void LocationsBuilderX86::VisitVecAdd(HVecAdd* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  if (CpuHasAvxFeatureFlag()) {
+    CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction);
+  } else {
+    CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  }
 }
 
 void InstructionCodeGeneratorX86::VisitVecAdd(HVecAdd* instruction) {
+  bool cpu_has_avx = CpuHasAvxFeatureFlag();
   LocationSummary* locations = instruction->GetLocations();
-  DCHECK(locations->InAt(0).Equals(locations->Out()));
   XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+  XmmRegister other_src = locations->InAt(0).AsFpuRegister<XmmRegister>();
   XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+  DCHECK(cpu_has_avx || other_src == dst);
   switch (instruction->GetPackedType()) {
     case DataType::Type::kUint8:
     case DataType::Type::kInt8:
       DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ paddb(dst, src);
+      cpu_has_avx ? __ vpaddb(dst, other_src, src) : __ paddb(dst, src);
       break;
     case DataType::Type::kUint16:
     case DataType::Type::kInt16:
       DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ paddw(dst, src);
+      cpu_has_avx ? __ vpaddw(dst, other_src, src) : __ paddw(dst, src);
       break;
     case DataType::Type::kInt32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ paddd(dst, src);
+      cpu_has_avx ?  __ vpaddd(dst, other_src, src) : __ paddd(dst, src);
       break;
     case DataType::Type::kInt64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ paddq(dst, src);
+      cpu_has_avx ? __ vpaddq(dst, other_src, src) : __ paddq(dst, src);
       break;
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ addps(dst, src);
+      cpu_has_avx ? __ vaddps(dst, other_src, src) : __ addps(dst, src);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ addpd(dst, src);
+      cpu_has_avx ? __ vaddpd(dst, other_src, src) : __ addpd(dst, src);
       break;
     default:
       LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
@@ -533,40 +562,46 @@
 }
 
 void LocationsBuilderX86::VisitVecSub(HVecSub* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  if (CpuHasAvxFeatureFlag()) {
+    CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction);
+  } else {
+    CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  }
 }
 
 void InstructionCodeGeneratorX86::VisitVecSub(HVecSub* instruction) {
+  bool cpu_has_avx = CpuHasAvxFeatureFlag();
   LocationSummary* locations = instruction->GetLocations();
-  DCHECK(locations->InAt(0).Equals(locations->Out()));
   XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+  XmmRegister other_src = locations->InAt(0).AsFpuRegister<XmmRegister>();
   XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+  DCHECK(cpu_has_avx || other_src == dst);
   switch (instruction->GetPackedType()) {
     case DataType::Type::kUint8:
     case DataType::Type::kInt8:
       DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ psubb(dst, src);
+      cpu_has_avx ? __ vpsubb(dst, other_src, src) : __ psubb(dst, src);
       break;
     case DataType::Type::kUint16:
     case DataType::Type::kInt16:
       DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ psubw(dst, src);
+      cpu_has_avx ? __ vpsubw(dst, other_src, src) : __ psubw(dst, src);
       break;
     case DataType::Type::kInt32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ psubd(dst, src);
+      cpu_has_avx ?  __ vpsubd(dst, other_src, src) : __ psubd(dst, src);
       break;
     case DataType::Type::kInt64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ psubq(dst, src);
+      cpu_has_avx ? __ vpsubq(dst, other_src, src) : __ psubq(dst, src);
       break;
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ subps(dst, src);
+      cpu_has_avx ? __ vsubps(dst, other_src, src) : __ subps(dst, src);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ subpd(dst, src);
+      cpu_has_avx ? __ vsubpd(dst, other_src, src) : __ subpd(dst, src);
       break;
     default:
       LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
@@ -607,31 +642,37 @@
 }
 
 void LocationsBuilderX86::VisitVecMul(HVecMul* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  if (CpuHasAvxFeatureFlag()) {
+    CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction);
+  } else {
+    CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  }
 }
 
 void InstructionCodeGeneratorX86::VisitVecMul(HVecMul* instruction) {
+  bool cpu_has_avx = CpuHasAvxFeatureFlag();
   LocationSummary* locations = instruction->GetLocations();
-  DCHECK(locations->InAt(0).Equals(locations->Out()));
   XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+  XmmRegister other_src = locations->InAt(0).AsFpuRegister<XmmRegister>();
   XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+  DCHECK(cpu_has_avx || other_src == dst);
   switch (instruction->GetPackedType()) {
     case DataType::Type::kUint16:
     case DataType::Type::kInt16:
       DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ pmullw(dst, src);
+      cpu_has_avx ? __ vpmullw(dst, other_src, src) : __ pmullw(dst, src);
       break;
     case DataType::Type::kInt32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ pmulld(dst, src);
+      cpu_has_avx ? __ vpmulld(dst, other_src, src) : __ pmulld(dst, src);
       break;
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ mulps(dst, src);
+      cpu_has_avx ? __ vmulps(dst, other_src, src) : __ mulps(dst, src);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ mulpd(dst, src);
+      cpu_has_avx ? __ vmulpd(dst, other_src, src) : __ mulpd(dst, src);
       break;
     default:
       LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
@@ -640,22 +681,28 @@
 }
 
 void LocationsBuilderX86::VisitVecDiv(HVecDiv* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  if (CpuHasAvxFeatureFlag()) {
+    CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction);
+  } else {
+    CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  }
 }
 
 void InstructionCodeGeneratorX86::VisitVecDiv(HVecDiv* instruction) {
+  bool cpu_has_avx = CpuHasAvxFeatureFlag();
   LocationSummary* locations = instruction->GetLocations();
-  DCHECK(locations->InAt(0).Equals(locations->Out()));
   XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+  XmmRegister other_src = locations->InAt(0).AsFpuRegister<XmmRegister>();
   XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+  DCHECK(cpu_has_avx || other_src == dst);
   switch (instruction->GetPackedType()) {
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ divps(dst, src);
+      cpu_has_avx ? __ vdivps(dst, other_src, src) : __ divps(dst, src);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ divpd(dst, src);
+      cpu_has_avx ?  __ vdivpd(dst, other_src, src) : __ divpd(dst, src);
       break;
     default:
       LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
@@ -762,14 +809,20 @@
 }
 
 void LocationsBuilderX86::VisitVecAnd(HVecAnd* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  if (CpuHasAvxFeatureFlag()) {
+    CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction);
+  } else {
+    CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  }
 }
 
 void InstructionCodeGeneratorX86::VisitVecAnd(HVecAnd* instruction) {
+  bool cpu_has_avx = CpuHasAvxFeatureFlag();
   LocationSummary* locations = instruction->GetLocations();
-  DCHECK(locations->InAt(0).Equals(locations->Out()));
+  XmmRegister other_src = locations->InAt(0).AsFpuRegister<XmmRegister>();
   XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
   XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+  DCHECK(cpu_has_avx || other_src == dst);
   switch (instruction->GetPackedType()) {
     case DataType::Type::kBool:
     case DataType::Type::kUint8:
@@ -780,15 +833,15 @@
     case DataType::Type::kInt64:
       DCHECK_LE(2u, instruction->GetVectorLength());
       DCHECK_LE(instruction->GetVectorLength(), 16u);
-      __ pand(dst, src);
+      cpu_has_avx ? __ vpand(dst, other_src, src) : __ pand(dst, src);
       break;
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ andps(dst, src);
+      cpu_has_avx ? __ vandps(dst, other_src, src) : __ andps(dst, src);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ andpd(dst, src);
+      cpu_has_avx ? __ vandpd(dst, other_src, src) : __ andpd(dst, src);
       break;
     default:
       LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
@@ -797,14 +850,20 @@
 }
 
 void LocationsBuilderX86::VisitVecAndNot(HVecAndNot* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  if (CpuHasAvxFeatureFlag()) {
+    CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction);
+  } else {
+    CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  }
 }
 
 void InstructionCodeGeneratorX86::VisitVecAndNot(HVecAndNot* instruction) {
+  bool cpu_has_avx = CpuHasAvxFeatureFlag();
   LocationSummary* locations = instruction->GetLocations();
-  DCHECK(locations->InAt(0).Equals(locations->Out()));
+  XmmRegister other_src = locations->InAt(0).AsFpuRegister<XmmRegister>();
   XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
   XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+  DCHECK(cpu_has_avx || other_src == dst);
   switch (instruction->GetPackedType()) {
     case DataType::Type::kBool:
     case DataType::Type::kUint8:
@@ -815,15 +874,15 @@
     case DataType::Type::kInt64:
       DCHECK_LE(2u, instruction->GetVectorLength());
       DCHECK_LE(instruction->GetVectorLength(), 16u);
-      __ pandn(dst, src);
+      cpu_has_avx ? __ vpandn(dst, other_src, src) : __ pandn(dst, src);
       break;
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ andnps(dst, src);
+      cpu_has_avx ? __ vandnps(dst, other_src, src) : __ andnps(dst, src);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ andnpd(dst, src);
+      cpu_has_avx ? __ vandnpd(dst, other_src, src) : __ andnpd(dst, src);
       break;
     default:
       LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
@@ -832,14 +891,20 @@
 }
 
 void LocationsBuilderX86::VisitVecOr(HVecOr* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  if (CpuHasAvxFeatureFlag()) {
+    CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction);
+  } else {
+    CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  }
 }
 
 void InstructionCodeGeneratorX86::VisitVecOr(HVecOr* instruction) {
+  bool cpu_has_avx = CpuHasAvxFeatureFlag();
   LocationSummary* locations = instruction->GetLocations();
-  DCHECK(locations->InAt(0).Equals(locations->Out()));
+  XmmRegister other_src = locations->InAt(0).AsFpuRegister<XmmRegister>();
   XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
   XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+  DCHECK(cpu_has_avx || other_src == dst);
   switch (instruction->GetPackedType()) {
     case DataType::Type::kBool:
     case DataType::Type::kUint8:
@@ -850,15 +915,15 @@
     case DataType::Type::kInt64:
       DCHECK_LE(2u, instruction->GetVectorLength());
       DCHECK_LE(instruction->GetVectorLength(), 16u);
-      __ por(dst, src);
+      cpu_has_avx ? __ vpor(dst, other_src, src) : __ por(dst, src);
       break;
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ orps(dst, src);
+      cpu_has_avx ? __ vorps(dst, other_src, src) : __ orps(dst, src);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ orpd(dst, src);
+      cpu_has_avx ? __ vorpd(dst, other_src, src) : __ orpd(dst, src);
       break;
     default:
       LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
@@ -867,14 +932,20 @@
 }
 
 void LocationsBuilderX86::VisitVecXor(HVecXor* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  if (CpuHasAvxFeatureFlag()) {
+    CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction);
+  } else {
+    CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  }
 }
 
 void InstructionCodeGeneratorX86::VisitVecXor(HVecXor* instruction) {
+  bool cpu_has_avx = CpuHasAvxFeatureFlag();
   LocationSummary* locations = instruction->GetLocations();
-  DCHECK(locations->InAt(0).Equals(locations->Out()));
+  XmmRegister other_src = locations->InAt(0).AsFpuRegister<XmmRegister>();
   XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
   XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+  DCHECK(cpu_has_avx || other_src == dst);
   switch (instruction->GetPackedType()) {
     case DataType::Type::kBool:
     case DataType::Type::kUint8:
@@ -885,15 +956,15 @@
     case DataType::Type::kInt64:
       DCHECK_LE(2u, instruction->GetVectorLength());
       DCHECK_LE(instruction->GetVectorLength(), 16u);
-      __ pxor(dst, src);
+      cpu_has_avx ? __ vpxor(dst, other_src, src) : __ pxor(dst, src);
       break;
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ xorps(dst, src);
+      cpu_has_avx ? __ vxorps(dst, other_src, src) : __ xorps(dst, src);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ xorpd(dst, src);
+      cpu_has_avx ? __ vxorpd(dst, other_src, src) : __ xorpd(dst, src);
       break;
     default:
       LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
@@ -1046,7 +1117,8 @@
   DCHECK_EQ(1u, instruction->InputCount());  // only one input currently implemented
 
   // Zero out all other elements first.
-  __ xorps(dst, dst);
+  bool cpu_has_avx = CpuHasAvxFeatureFlag();
+  cpu_has_avx ? __ vxorps(dst, dst, dst) : __ xorps(dst, dst);
 
   // Shorthand for any type of zero.
   if (IsZeroBitPattern(instruction->InputAt(0))) {
@@ -1129,11 +1201,38 @@
 }
 
 void LocationsBuilderX86::VisitVecDotProd(HVecDotProd* instruction) {
-  LOG(FATAL) << "No SIMD for " << instruction->GetId();
+  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
+  locations->SetInAt(0, Location::RequiresFpuRegister());
+  locations->SetInAt(1, Location::RequiresFpuRegister());
+  locations->SetInAt(2, Location::RequiresFpuRegister());
+  locations->SetOut(Location::SameAsFirstInput());
+  locations->AddTemp(Location::RequiresFpuRegister());
 }
 
 void InstructionCodeGeneratorX86::VisitVecDotProd(HVecDotProd* instruction) {
-  LOG(FATAL) << "No SIMD for " << instruction->GetId();
+  bool cpu_has_avx = CpuHasAvxFeatureFlag();
+  LocationSummary* locations = instruction->GetLocations();
+  XmmRegister acc = locations->InAt(0).AsFpuRegister<XmmRegister>();
+  XmmRegister left = locations->InAt(1).AsFpuRegister<XmmRegister>();
+  XmmRegister right = locations->InAt(2).AsFpuRegister<XmmRegister>();
+  switch (instruction->GetPackedType()) {
+    case DataType::Type::kInt32: {
+      DCHECK_EQ(4u, instruction->GetVectorLength());
+      XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+      if (!cpu_has_avx) {
+        __ movaps(tmp, right);
+        __ pmaddwd(tmp, left);
+        __ paddd(acc, tmp);
+      } else {
+        __ vpmaddwd(tmp, left, right);
+        __ vpaddd(acc, acc, tmp);
+      }
+      break;
+    }
+    default:
+      LOG(FATAL) << "Unsupported SIMD Type" << instruction->GetPackedType();
+      UNREACHABLE();
+  }
 }
 
 // Helper to set up locations for vector memory operations.
diff --git a/compiler/optimizing/code_generator_vector_x86_64.cc b/compiler/optimizing/code_generator_vector_x86_64.cc
index 9c28827..7fac44d 100644
--- a/compiler/optimizing/code_generator_vector_x86_64.cc
+++ b/compiler/optimizing/code_generator_vector_x86_64.cc
@@ -58,9 +58,10 @@
   LocationSummary* locations = instruction->GetLocations();
   XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
 
+  bool cpu_has_avx = CpuHasAvxFeatureFlag();
   // Shorthand for any type of zero.
   if (IsZeroBitPattern(instruction->InputAt(0))) {
-    __ xorps(dst, dst);
+    cpu_has_avx ? __ vxorps(dst, dst, dst) : __ xorps(dst, dst);
     return;
   }
 
@@ -414,41 +415,69 @@
   }
 }
 
+static void CreateVecTerOpLocations(ArenaAllocator* allocator, HVecOperation* instruction) {
+  LocationSummary* locations = new (allocator) LocationSummary(instruction);
+  switch (instruction->GetPackedType()) {
+    case DataType::Type::kBool:
+    case DataType::Type::kUint8:
+    case DataType::Type::kInt8:
+    case DataType::Type::kUint16:
+    case DataType::Type::kInt16:
+    case DataType::Type::kInt32:
+    case DataType::Type::kInt64:
+    case DataType::Type::kFloat32:
+    case DataType::Type::kFloat64:
+      locations->SetInAt(0, Location::RequiresFpuRegister());
+      locations->SetInAt(1, Location::RequiresFpuRegister());
+      locations->SetOut(Location::RequiresFpuRegister());
+      break;
+    default:
+      LOG(FATAL) << "Unsupported SIMD type";
+      UNREACHABLE();
+  }
+}
+
 void LocationsBuilderX86_64::VisitVecAdd(HVecAdd* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  if (CpuHasAvxFeatureFlag()) {
+    CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction);
+  } else {
+    CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  }
 }
 
 void InstructionCodeGeneratorX86_64::VisitVecAdd(HVecAdd* instruction) {
+  bool cpu_has_avx = CpuHasAvxFeatureFlag();
   LocationSummary* locations = instruction->GetLocations();
-  DCHECK(locations->InAt(0).Equals(locations->Out()));
   XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+  XmmRegister other_src = locations->InAt(0).AsFpuRegister<XmmRegister>();
   XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+  DCHECK(cpu_has_avx || other_src == dst);
   switch (instruction->GetPackedType()) {
     case DataType::Type::kUint8:
     case DataType::Type::kInt8:
       DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ paddb(dst, src);
+      cpu_has_avx ? __ vpaddb(dst, other_src, src) : __ paddb(dst, src);
       break;
     case DataType::Type::kUint16:
     case DataType::Type::kInt16:
       DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ paddw(dst, src);
+      cpu_has_avx ? __ vpaddw(dst, other_src, src) : __ paddw(dst, src);
       break;
     case DataType::Type::kInt32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ paddd(dst, src);
+      cpu_has_avx ? __ vpaddd(dst, other_src, src) : __ paddd(dst, src);
       break;
     case DataType::Type::kInt64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ paddq(dst, src);
+      cpu_has_avx ? __ vpaddq(dst, other_src, src) : __ paddq(dst, src);
       break;
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ addps(dst, src);
+      cpu_has_avx ? __ vaddps(dst, other_src, src) : __ addps(dst, src);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ addpd(dst, src);
+      cpu_has_avx ? __ vaddpd(dst, other_src, src) : __ addpd(dst, src);
       break;
     default:
       LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
@@ -516,40 +545,46 @@
 }
 
 void LocationsBuilderX86_64::VisitVecSub(HVecSub* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  if (CpuHasAvxFeatureFlag()) {
+    CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction);
+  } else {
+    CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  }
 }
 
 void InstructionCodeGeneratorX86_64::VisitVecSub(HVecSub* instruction) {
+  bool cpu_has_avx = CpuHasAvxFeatureFlag();
   LocationSummary* locations = instruction->GetLocations();
-  DCHECK(locations->InAt(0).Equals(locations->Out()));
   XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+  XmmRegister other_src = locations->InAt(0).AsFpuRegister<XmmRegister>();
   XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+  DCHECK(cpu_has_avx || other_src == dst);
   switch (instruction->GetPackedType()) {
     case DataType::Type::kUint8:
     case DataType::Type::kInt8:
       DCHECK_EQ(16u, instruction->GetVectorLength());
-      __ psubb(dst, src);
+      cpu_has_avx ? __ vpsubb(dst, other_src, src) : __ psubb(dst, src);
       break;
     case DataType::Type::kUint16:
     case DataType::Type::kInt16:
       DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ psubw(dst, src);
+      cpu_has_avx ? __ vpsubw(dst, other_src, src) : __ psubw(dst, src);
       break;
     case DataType::Type::kInt32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ psubd(dst, src);
+      cpu_has_avx ? __ vpsubd(dst, other_src, src) : __ psubd(dst, src);
       break;
     case DataType::Type::kInt64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ psubq(dst, src);
+      cpu_has_avx ? __ vpsubq(dst, other_src, src) : __ psubq(dst, src);
       break;
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ subps(dst, src);
+      cpu_has_avx ? __ vsubps(dst, other_src, src) : __ subps(dst, src);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ subpd(dst, src);
+      cpu_has_avx ? __ vsubpd(dst, other_src, src) : __ subpd(dst, src);
       break;
     default:
       LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
@@ -590,31 +625,37 @@
 }
 
 void LocationsBuilderX86_64::VisitVecMul(HVecMul* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  if (CpuHasAvxFeatureFlag()) {
+    CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction);
+  } else {
+    CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  }
 }
 
 void InstructionCodeGeneratorX86_64::VisitVecMul(HVecMul* instruction) {
+  bool cpu_has_avx = CpuHasAvxFeatureFlag();
   LocationSummary* locations = instruction->GetLocations();
-  DCHECK(locations->InAt(0).Equals(locations->Out()));
   XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+  XmmRegister other_src = locations->InAt(0).AsFpuRegister<XmmRegister>();
   XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+  DCHECK(cpu_has_avx || other_src == dst);
   switch (instruction->GetPackedType()) {
     case DataType::Type::kUint16:
     case DataType::Type::kInt16:
       DCHECK_EQ(8u, instruction->GetVectorLength());
-      __ pmullw(dst, src);
+      cpu_has_avx ? __ vpmullw(dst, other_src, src) : __ pmullw(dst, src);
       break;
     case DataType::Type::kInt32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ pmulld(dst, src);
+      cpu_has_avx ? __ vpmulld(dst, other_src, src): __ pmulld(dst, src);
       break;
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ mulps(dst, src);
+      cpu_has_avx ? __ vmulps(dst, other_src, src) : __ mulps(dst, src);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ mulpd(dst, src);
+      cpu_has_avx ? __ vmulpd(dst, other_src, src) : __ mulpd(dst, src);
       break;
     default:
       LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
@@ -623,22 +664,28 @@
 }
 
 void LocationsBuilderX86_64::VisitVecDiv(HVecDiv* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  if (CpuHasAvxFeatureFlag()) {
+    CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction);
+  } else {
+    CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  }
 }
 
 void InstructionCodeGeneratorX86_64::VisitVecDiv(HVecDiv* instruction) {
+  bool cpu_has_avx = CpuHasAvxFeatureFlag();
   LocationSummary* locations = instruction->GetLocations();
-  DCHECK(locations->InAt(0).Equals(locations->Out()));
   XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
+  XmmRegister other_src = locations->InAt(0).AsFpuRegister<XmmRegister>();
   XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+  DCHECK(cpu_has_avx || other_src == dst);
   switch (instruction->GetPackedType()) {
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ divps(dst, src);
+      cpu_has_avx ? __ vdivps(dst, other_src, src) : __ divps(dst, src);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ divpd(dst, src);
+      cpu_has_avx ? __ vdivpd(dst, other_src, src) : __ divpd(dst, src);
       break;
     default:
       LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
@@ -745,14 +792,20 @@
 }
 
 void LocationsBuilderX86_64::VisitVecAnd(HVecAnd* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  if (CpuHasAvxFeatureFlag()) {
+    CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction);
+  } else {
+    CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  }
 }
 
 void InstructionCodeGeneratorX86_64::VisitVecAnd(HVecAnd* instruction) {
+  bool cpu_has_avx = CpuHasAvxFeatureFlag();
   LocationSummary* locations = instruction->GetLocations();
-  DCHECK(locations->InAt(0).Equals(locations->Out()));
+  XmmRegister other_src = locations->InAt(0).AsFpuRegister<XmmRegister>();
   XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
   XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+  DCHECK(cpu_has_avx || other_src == dst);
   switch (instruction->GetPackedType()) {
     case DataType::Type::kBool:
     case DataType::Type::kUint8:
@@ -763,15 +816,15 @@
     case DataType::Type::kInt64:
       DCHECK_LE(2u, instruction->GetVectorLength());
       DCHECK_LE(instruction->GetVectorLength(), 16u);
-      __ pand(dst, src);
+      cpu_has_avx ? __ vpand(dst, other_src, src) : __ pand(dst, src);
       break;
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ andps(dst, src);
+      cpu_has_avx ? __ vandps(dst, other_src, src) : __ andps(dst, src);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ andpd(dst, src);
+      cpu_has_avx ? __ vandpd(dst, other_src, src) : __ andpd(dst, src);
       break;
     default:
       LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
@@ -780,14 +833,20 @@
 }
 
 void LocationsBuilderX86_64::VisitVecAndNot(HVecAndNot* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  if (CpuHasAvxFeatureFlag()) {
+    CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction);
+  } else {
+    CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  }
 }
 
 void InstructionCodeGeneratorX86_64::VisitVecAndNot(HVecAndNot* instruction) {
+  bool cpu_has_avx = CpuHasAvxFeatureFlag();
   LocationSummary* locations = instruction->GetLocations();
-  DCHECK(locations->InAt(0).Equals(locations->Out()));
+  XmmRegister other_src = locations->InAt(0).AsFpuRegister<XmmRegister>();
   XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
   XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+  DCHECK(cpu_has_avx || other_src == dst);
   switch (instruction->GetPackedType()) {
     case DataType::Type::kBool:
     case DataType::Type::kUint8:
@@ -798,15 +857,15 @@
     case DataType::Type::kInt64:
       DCHECK_LE(2u, instruction->GetVectorLength());
       DCHECK_LE(instruction->GetVectorLength(), 16u);
-      __ pandn(dst, src);
+      cpu_has_avx ? __ vpandn(dst, other_src, src) : __ pandn(dst, src);
       break;
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ andnps(dst, src);
+      cpu_has_avx ? __ vandnps(dst, other_src, src) : __ andnps(dst, src);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ andnpd(dst, src);
+      cpu_has_avx ? __ vandnpd(dst, other_src, src) : __ andnpd(dst, src);
       break;
     default:
       LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
@@ -815,14 +874,20 @@
 }
 
 void LocationsBuilderX86_64::VisitVecOr(HVecOr* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  if (CpuHasAvxFeatureFlag()) {
+    CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction);
+  } else {
+    CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  }
 }
 
 void InstructionCodeGeneratorX86_64::VisitVecOr(HVecOr* instruction) {
+  bool cpu_has_avx = CpuHasAvxFeatureFlag();
   LocationSummary* locations = instruction->GetLocations();
-  DCHECK(locations->InAt(0).Equals(locations->Out()));
+  XmmRegister other_src = locations->InAt(0).AsFpuRegister<XmmRegister>();
   XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
   XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+  DCHECK(cpu_has_avx || other_src == dst);
   switch (instruction->GetPackedType()) {
     case DataType::Type::kBool:
     case DataType::Type::kUint8:
@@ -833,15 +898,15 @@
     case DataType::Type::kInt64:
       DCHECK_LE(2u, instruction->GetVectorLength());
       DCHECK_LE(instruction->GetVectorLength(), 16u);
-      __ por(dst, src);
+      cpu_has_avx ? __ vpor(dst, other_src, src) : __ por(dst, src);
       break;
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ orps(dst, src);
+      cpu_has_avx ? __ vorps(dst, other_src, src) : __ orps(dst, src);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ orpd(dst, src);
+      cpu_has_avx ? __ vorpd(dst, other_src, src) : __ orpd(dst, src);
       break;
     default:
       LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
@@ -850,14 +915,20 @@
 }
 
 void LocationsBuilderX86_64::VisitVecXor(HVecXor* instruction) {
-  CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  if (CpuHasAvxFeatureFlag()) {
+    CreateVecTerOpLocations(GetGraph()->GetAllocator(), instruction);
+  } else {
+    CreateVecBinOpLocations(GetGraph()->GetAllocator(), instruction);
+  }
 }
 
 void InstructionCodeGeneratorX86_64::VisitVecXor(HVecXor* instruction) {
+  bool cpu_has_avx = CpuHasAvxFeatureFlag();
   LocationSummary* locations = instruction->GetLocations();
-  DCHECK(locations->InAt(0).Equals(locations->Out()));
+  XmmRegister other_src = locations->InAt(0).AsFpuRegister<XmmRegister>();
   XmmRegister src = locations->InAt(1).AsFpuRegister<XmmRegister>();
   XmmRegister dst = locations->Out().AsFpuRegister<XmmRegister>();
+  DCHECK(cpu_has_avx || other_src == dst);
   switch (instruction->GetPackedType()) {
     case DataType::Type::kBool:
     case DataType::Type::kUint8:
@@ -868,15 +939,15 @@
     case DataType::Type::kInt64:
       DCHECK_LE(2u, instruction->GetVectorLength());
       DCHECK_LE(instruction->GetVectorLength(), 16u);
-      __ pxor(dst, src);
+      cpu_has_avx ? __ vpxor(dst, other_src, src) : __ pxor(dst, src);
       break;
     case DataType::Type::kFloat32:
       DCHECK_EQ(4u, instruction->GetVectorLength());
-      __ xorps(dst, src);
+      cpu_has_avx ? __ vxorps(dst, other_src, src) : __ xorps(dst, src);
       break;
     case DataType::Type::kFloat64:
       DCHECK_EQ(2u, instruction->GetVectorLength());
-      __ xorpd(dst, src);
+      cpu_has_avx ? __ vxorpd(dst, other_src, src) : __ xorpd(dst, src);
       break;
     default:
       LOG(FATAL) << "Unsupported SIMD type: " << instruction->GetPackedType();
@@ -1024,7 +1095,8 @@
   DCHECK_EQ(1u, instruction->InputCount());  // only one input currently implemented
 
   // Zero out all other elements first.
-  __ xorps(dst, dst);
+  bool cpu_has_avx = CpuHasAvxFeatureFlag();
+  cpu_has_avx ? __ vxorps(dst, dst, dst) : __ xorps(dst, dst);
 
   // Shorthand for any type of zero.
   if (IsZeroBitPattern(instruction->InputAt(0))) {
@@ -1102,11 +1174,38 @@
 }
 
 void LocationsBuilderX86_64::VisitVecDotProd(HVecDotProd* instruction) {
-  LOG(FATAL) << "No SIMD for " << instruction->GetId();
+  LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(instruction);
+  locations->SetInAt(0, Location::RequiresFpuRegister());
+  locations->SetInAt(1, Location::RequiresFpuRegister());
+  locations->SetInAt(2, Location::RequiresFpuRegister());
+  locations->SetOut(Location::SameAsFirstInput());
+  locations->AddTemp(Location::RequiresFpuRegister());
 }
 
 void InstructionCodeGeneratorX86_64::VisitVecDotProd(HVecDotProd* instruction) {
-  LOG(FATAL) << "No SIMD for " << instruction->GetId();
+  bool cpu_has_avx = CpuHasAvxFeatureFlag();
+  LocationSummary* locations = instruction->GetLocations();
+  XmmRegister acc = locations->InAt(0).AsFpuRegister<XmmRegister>();
+  XmmRegister left = locations->InAt(1).AsFpuRegister<XmmRegister>();
+  XmmRegister right = locations->InAt(2).AsFpuRegister<XmmRegister>();
+  switch (instruction->GetPackedType()) {
+    case DataType::Type::kInt32: {
+      DCHECK_EQ(4u, instruction->GetVectorLength());
+      XmmRegister tmp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
+      if (!cpu_has_avx) {
+        __ movaps(tmp, right);
+        __ pmaddwd(tmp, left);
+        __ paddd(acc, tmp);
+      } else {
+        __ vpmaddwd(tmp, left, right);
+        __ vpaddd(acc, acc, tmp);
+      }
+      break;
+    }
+    default:
+      LOG(FATAL) << "Unsupported SIMD Type" << instruction->GetPackedType();
+      UNREACHABLE();
+  }
 }
 
 // Helper to set up locations for vector memory operations.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 95118b0..ed1a536 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -16,7 +16,7 @@
 
 #include "code_generator_x86.h"
 
-#include "art_method.h"
+#include "art_method-inl.h"
 #include "class_table.h"
 #include "code_generator_utils.h"
 #include "compiled_method.h"
@@ -27,10 +27,12 @@
 #include "heap_poisoning.h"
 #include "intrinsics.h"
 #include "intrinsics_x86.h"
+#include "jit/profiling_info.h"
 #include "linker/linker_patch.h"
 #include "lock_word.h"
 #include "mirror/array-inl.h"
 #include "mirror/class-inl.h"
+#include "scoped_thread_state_change-inl.h"
 #include "thread.h"
 #include "utils/assembler.h"
 #include "utils/stack_checks.h"
@@ -987,7 +989,7 @@
   } else {
     __ movsd(Address(ESP, stack_index), XmmRegister(reg_id));
   }
-  return GetFloatingPointSpillSlotSize();
+  return GetSlowPathFPWidth();
 }
 
 size_t CodeGeneratorX86::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
@@ -996,7 +998,7 @@
   } else {
     __ movsd(XmmRegister(reg_id), Address(ESP, stack_index));
   }
-  return GetFloatingPointSpillSlotSize();
+  return GetSlowPathFPWidth();
 }
 
 void CodeGeneratorX86::InvokeRuntime(QuickEntrypointEnum entrypoint,
@@ -1045,7 +1047,7 @@
       type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
       boot_image_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
       string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-      boot_image_intrinsic_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+      boot_image_other_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
       jit_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
       jit_class_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
       constant_area_start_(-1),
@@ -1070,6 +1072,76 @@
   return dwarf::Reg::X86Core(static_cast<int>(reg));
 }
 
+void CodeGeneratorX86::MaybeIncrementHotness(bool is_frame_entry) {
+  if (GetCompilerOptions().CountHotnessInCompiledCode()) {
+    Register reg = EAX;
+    if (is_frame_entry) {
+      reg = kMethodRegisterArgument;
+    } else {
+      __ pushl(EAX);
+      __ movl(EAX, Address(ESP, kX86WordSize));
+    }
+    NearLabel overflow;
+    __ cmpw(Address(reg, ArtMethod::HotnessCountOffset().Int32Value()),
+            Immediate(ArtMethod::MaxCounter()));
+    __ j(kEqual, &overflow);
+    __ addw(Address(reg, ArtMethod::HotnessCountOffset().Int32Value()),
+            Immediate(1));
+    __ Bind(&overflow);
+    if (!is_frame_entry) {
+      __ popl(EAX);
+    }
+  }
+
+  if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
+    ScopedObjectAccess soa(Thread::Current());
+    ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
+    if (info != nullptr) {
+      uint32_t address = reinterpret_cast32<uint32_t>(info);
+      NearLabel done;
+      if (HasEmptyFrame()) {
+        CHECK(is_frame_entry);
+        // Alignment
+        __ subl(ESP, Immediate(8));
+        __ cfi().AdjustCFAOffset(8);
+        // We need a temporary. The stub also expects the method at bottom of stack.
+        __ pushl(EAX);
+        __ cfi().AdjustCFAOffset(4);
+        __ movl(EAX, Immediate(address));
+        __ addw(Address(EAX, ProfilingInfo::BaselineHotnessCountOffset().Int32Value()),
+                Immediate(1));
+        __ j(kCarryClear, &done);
+        GenerateInvokeRuntime(
+            GetThreadOffset<kX86PointerSize>(kQuickCompileOptimized).Int32Value());
+        __ Bind(&done);
+        // We don't strictly require to restore EAX, but this makes the generated
+        // code easier to reason about.
+        __ popl(EAX);
+        __ cfi().AdjustCFAOffset(-4);
+        __ addl(ESP, Immediate(8));
+        __ cfi().AdjustCFAOffset(-8);
+      } else {
+        if (!RequiresCurrentMethod()) {
+          CHECK(is_frame_entry);
+          __ movl(Address(ESP, kCurrentMethodStackOffset), kMethodRegisterArgument);
+        }
+        // We need a temporary.
+        __ pushl(EAX);
+        __ cfi().AdjustCFAOffset(4);
+        __ movl(EAX, Immediate(address));
+        __ addw(Address(EAX, ProfilingInfo::BaselineHotnessCountOffset().Int32Value()),
+                Immediate(1));
+        __ popl(EAX);  // Put stack as expected before exiting or calling stub.
+        __ cfi().AdjustCFAOffset(-4);
+        __ j(kCarryClear, &done);
+        GenerateInvokeRuntime(
+            GetThreadOffset<kX86PointerSize>(kQuickCompileOptimized).Int32Value());
+        __ Bind(&done);
+      }
+    }
+  }
+}
+
 void CodeGeneratorX86::GenerateFrameEntry() {
   __ cfi().SetCurrentCFAOffset(kX86WordSize);  // return address
   __ Bind(&frame_entry_label_);
@@ -1077,44 +1149,39 @@
       IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kX86);
   DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
 
-  if (GetCompilerOptions().CountHotnessInCompiledCode()) {
-    __ addw(Address(kMethodRegisterArgument, ArtMethod::HotnessCountOffset().Int32Value()),
-            Immediate(1));
-  }
-
   if (!skip_overflow_check) {
     size_t reserved_bytes = GetStackOverflowReservedBytes(InstructionSet::kX86);
     __ testl(EAX, Address(ESP, -static_cast<int32_t>(reserved_bytes)));
     RecordPcInfo(nullptr, 0);
   }
 
-  if (HasEmptyFrame()) {
-    return;
-  }
+  if (!HasEmptyFrame()) {
+    for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
+      Register reg = kCoreCalleeSaves[i];
+      if (allocated_registers_.ContainsCoreRegister(reg)) {
+        __ pushl(reg);
+        __ cfi().AdjustCFAOffset(kX86WordSize);
+        __ cfi().RelOffset(DWARFReg(reg), 0);
+      }
+    }
 
-  for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
-    Register reg = kCoreCalleeSaves[i];
-    if (allocated_registers_.ContainsCoreRegister(reg)) {
-      __ pushl(reg);
-      __ cfi().AdjustCFAOffset(kX86WordSize);
-      __ cfi().RelOffset(DWARFReg(reg), 0);
+    int adjust = GetFrameSize() - FrameEntrySpillSize();
+    __ subl(ESP, Immediate(adjust));
+    __ cfi().AdjustCFAOffset(adjust);
+    // Save the current method if we need it. Note that we do not
+    // do this in HCurrentMethod, as the instruction might have been removed
+    // in the SSA graph.
+    if (RequiresCurrentMethod()) {
+      __ movl(Address(ESP, kCurrentMethodStackOffset), kMethodRegisterArgument);
+    }
+
+    if (GetGraph()->HasShouldDeoptimizeFlag()) {
+      // Initialize should_deoptimize flag to 0.
+      __ movl(Address(ESP, GetStackOffsetOfShouldDeoptimizeFlag()), Immediate(0));
     }
   }
 
-  int adjust = GetFrameSize() - FrameEntrySpillSize();
-  __ subl(ESP, Immediate(adjust));
-  __ cfi().AdjustCFAOffset(adjust);
-  // Save the current method if we need it. Note that we do not
-  // do this in HCurrentMethod, as the instruction might have been removed
-  // in the SSA graph.
-  if (RequiresCurrentMethod()) {
-    __ movl(Address(ESP, kCurrentMethodStackOffset), kMethodRegisterArgument);
-  }
-
-  if (GetGraph()->HasShouldDeoptimizeFlag()) {
-    // Initialize should_deoptimize flag to 0.
-    __ movl(Address(ESP, GetStackOffsetOfShouldDeoptimizeFlag()), Immediate(0));
-  }
+  MaybeIncrementHotness(/* is_frame_entry= */ true);
 }
 
 void CodeGeneratorX86::GenerateFrameExit() {
@@ -1382,12 +1449,7 @@
 
   HLoopInformation* info = block->GetLoopInformation();
   if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
-    if (codegen_->GetCompilerOptions().CountHotnessInCompiledCode()) {
-      __ pushl(EAX);
-      __ movl(EAX, Address(ESP, kX86WordSize));
-      __ addw(Address(EAX, ArtMethod::HotnessCountOffset().Int32Value()), Immediate(1));
-      __ popl(EAX);
-    }
+    codegen_->MaybeIncrementHotness(/* is_frame_entry= */ false);
     GenerateSuspendCheck(info->GetSuspendCheck(), successor);
     return;
   }
@@ -2154,31 +2216,46 @@
 }
 
 void InstructionCodeGeneratorX86::VisitReturn(HReturn* ret) {
-  if (kIsDebugBuild) {
-    switch (ret->InputAt(0)->GetType()) {
-      case DataType::Type::kReference:
-      case DataType::Type::kBool:
-      case DataType::Type::kUint8:
-      case DataType::Type::kInt8:
-      case DataType::Type::kUint16:
-      case DataType::Type::kInt16:
-      case DataType::Type::kInt32:
-        DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegister<Register>(), EAX);
-        break;
+  switch (ret->InputAt(0)->GetType()) {
+    case DataType::Type::kReference:
+    case DataType::Type::kBool:
+    case DataType::Type::kUint8:
+    case DataType::Type::kInt8:
+    case DataType::Type::kUint16:
+    case DataType::Type::kInt16:
+    case DataType::Type::kInt32:
+      DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegister<Register>(), EAX);
+      break;
 
-      case DataType::Type::kInt64:
-        DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegisterPairLow<Register>(), EAX);
-        DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegisterPairHigh<Register>(), EDX);
-        break;
+    case DataType::Type::kInt64:
+      DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegisterPairLow<Register>(), EAX);
+      DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegisterPairHigh<Register>(), EDX);
+      break;
 
-      case DataType::Type::kFloat32:
-      case DataType::Type::kFloat64:
-        DCHECK_EQ(ret->GetLocations()->InAt(0).AsFpuRegister<XmmRegister>(), XMM0);
-        break;
+    case DataType::Type::kFloat32:
+      DCHECK_EQ(ret->GetLocations()->InAt(0).AsFpuRegister<XmmRegister>(), XMM0);
+      if (GetGraph()->IsCompilingOsr()) {
+        // To simplify callers of an OSR method, we put the return value in both
+        // floating point and core registers.
+        __ movd(EAX, XMM0);
+      }
+      break;
 
-      default:
-        LOG(FATAL) << "Unknown return type " << ret->InputAt(0)->GetType();
-    }
+    case DataType::Type::kFloat64:
+      DCHECK_EQ(ret->GetLocations()->InAt(0).AsFpuRegister<XmmRegister>(), XMM0);
+      if (GetGraph()->IsCompilingOsr()) {
+        // To simplify callers of an OSR method, we put the return value in both
+        // floating point and core registers.
+        __ movd(EAX, XMM0);
+        // Use XMM1 as temporary register to not clobber XMM0.
+        __ movaps(XMM1, XMM0);
+        __ psrlq(XMM1, Immediate(32));
+        __ movd(EDX, XMM1);
+      }
+      break;
+
+    default:
+      LOG(FATAL) << "Unknown return type " << ret->InputAt(0)->GetType();
   }
   codegen_->GenerateFrameExit();
 }
@@ -2247,6 +2324,11 @@
   }
 
   HandleInvoke(invoke);
+
+  if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
+    // Add one temporary for inline cache update.
+    invoke->GetLocations()->AddTemp(Location::RegisterLocation(EBP));
+  }
 }
 
 void LocationsBuilderX86::HandleInvoke(HInvoke* invoke) {
@@ -2270,6 +2352,41 @@
   HandleInvoke(invoke);
   // Add the hidden argument.
   invoke->GetLocations()->AddTemp(Location::FpuRegisterLocation(XMM7));
+
+  if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
+    // Add one temporary for inline cache update.
+    invoke->GetLocations()->AddTemp(Location::RegisterLocation(EBP));
+  }
+}
+
+void CodeGeneratorX86::MaybeGenerateInlineCacheCheck(HInstruction* instruction, Register klass) {
+  DCHECK_EQ(EAX, klass);
+  // We know the destination of an intrinsic, so no need to record inline
+  // caches (also the intrinsic location builder doesn't request an additional
+  // temporary).
+  if (!instruction->GetLocations()->Intrinsified() &&
+      GetGraph()->IsCompilingBaseline() &&
+      !Runtime::Current()->IsAotCompiler()) {
+    DCHECK(!instruction->GetEnvironment()->IsFromInlinedInvoke());
+    ScopedObjectAccess soa(Thread::Current());
+    ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
+    if (info != nullptr) {
+      InlineCache* cache = info->GetInlineCache(instruction->GetDexPc());
+      uint32_t address = reinterpret_cast32<uint32_t>(cache);
+      if (kIsDebugBuild) {
+        uint32_t temp_index = instruction->GetLocations()->GetTempCount() - 1u;
+        CHECK_EQ(EBP, instruction->GetLocations()->GetTemp(temp_index).AsRegister<Register>());
+      }
+      Register temp = EBP;
+      NearLabel done;
+      __ movl(temp, Immediate(address));
+      // Fast path for a monomorphic cache.
+      __ cmpl(klass, Address(temp, InlineCache::ClassesOffset().Int32Value()));
+      __ j(kEqual, &done);
+      GenerateInvokeRuntime(GetThreadOffset<kX86PointerSize>(kQuickUpdateInlineCache).Int32Value());
+      __ Bind(&done);
+    }
+  }
 }
 
 void InstructionCodeGeneratorX86::VisitInvokeInterface(HInvokeInterface* invoke) {
@@ -2303,6 +2420,9 @@
   // intact/accessible until the end of the marking phase (the
   // concurrent copying collector may not in the future).
   __ MaybeUnpoisonHeapReference(temp);
+
+  codegen_->MaybeGenerateInlineCacheCheck(invoke, temp);
+
   // temp = temp->GetAddressOfIMT()
   __ movl(temp,
       Address(temp, mirror::Class::ImtPtrOffset(kX86PointerSize).Uint32Value()));
@@ -4853,7 +4973,7 @@
       callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
       break;
     case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative: {
-      DCHECK(GetCompilerOptions().IsBootImage());
+      DCHECK(GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension());
       Register base_reg = GetInvokeStaticOrDirectExtraParameter(invoke,
                                                                 temp.AsRegister<Register>());
       __ leal(temp.AsRegister<Register>(), Address(base_reg, CodeGeneratorX86::kDummy32BitOffset));
@@ -4874,6 +4994,7 @@
                                                                 temp.AsRegister<Register>());
       __ movl(temp.AsRegister<Register>(), Address(base_reg, kDummy32BitOffset));
       RecordMethodBssEntryPatch(invoke);
+      // No need for memory fence, thanks to the x86 memory model.
       break;
     }
     case HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress:
@@ -4925,6 +5046,9 @@
   // intact/accessible until the end of the marking phase (the
   // concurrent copying collector may not in the future).
   __ MaybeUnpoisonHeapReference(temp);
+
+  MaybeGenerateInlineCacheCheck(invoke, temp);
+
   // temp = temp->GetMethodAt(method_offset);
   __ movl(temp, Address(temp, method_offset));
   // call temp->GetEntryPoint();
@@ -4935,16 +5059,16 @@
 
 void CodeGeneratorX86::RecordBootImageIntrinsicPatch(HX86ComputeBaseMethodAddress* method_address,
                                                      uint32_t intrinsic_data) {
-  boot_image_intrinsic_patches_.emplace_back(
+  boot_image_other_patches_.emplace_back(
       method_address, /* target_dex_file= */ nullptr, intrinsic_data);
-  __ Bind(&boot_image_intrinsic_patches_.back().label);
+  __ Bind(&boot_image_other_patches_.back().label);
 }
 
 void CodeGeneratorX86::RecordBootImageRelRoPatch(HX86ComputeBaseMethodAddress* method_address,
                                                  uint32_t boot_image_offset) {
-  boot_image_method_patches_.emplace_back(
+  boot_image_other_patches_.emplace_back(
       method_address, /* target_dex_file= */ nullptr, boot_image_offset);
-  __ Bind(&boot_image_method_patches_.back().label);
+  __ Bind(&boot_image_other_patches_.back().label);
 }
 
 void CodeGeneratorX86::RecordBootImageMethodPatch(HInvokeStaticOrDirect* invoke) {
@@ -5089,23 +5213,26 @@
       type_bss_entry_patches_.size() +
       boot_image_string_patches_.size() +
       string_bss_entry_patches_.size() +
-      boot_image_intrinsic_patches_.size();
+      boot_image_other_patches_.size();
   linker_patches->reserve(size);
-  if (GetCompilerOptions().IsBootImage()) {
+  if (GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension()) {
     EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeMethodPatch>(
         boot_image_method_patches_, linker_patches);
     EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeTypePatch>(
         boot_image_type_patches_, linker_patches);
     EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeStringPatch>(
         boot_image_string_patches_, linker_patches);
-    EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::IntrinsicReferencePatch>>(
-        boot_image_intrinsic_patches_, linker_patches);
   } else {
-    EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::DataBimgRelRoPatch>>(
-        boot_image_method_patches_, linker_patches);
+    DCHECK(boot_image_method_patches_.empty());
     DCHECK(boot_image_type_patches_.empty());
     DCHECK(boot_image_string_patches_.empty());
-    DCHECK(boot_image_intrinsic_patches_.empty());
+  }
+  if (GetCompilerOptions().IsBootImage()) {
+    EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::IntrinsicReferencePatch>>(
+        boot_image_other_patches_, linker_patches);
+  } else {
+    EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::DataBimgRelRoPatch>>(
+        boot_image_other_patches_, linker_patches);
   }
   EmitPcRelativeLinkerPatches<linker::LinkerPatch::MethodBssEntryPatch>(
       method_bss_entry_patches_, linker_patches);
@@ -5510,6 +5637,15 @@
   HandleFieldGet(instruction, instruction->GetFieldInfo());
 }
 
+void LocationsBuilderX86::VisitStringBuilderAppend(HStringBuilderAppend* instruction) {
+  codegen_->CreateStringBuilderAppendLocations(instruction, Location::RegisterLocation(EAX));
+}
+
+void InstructionCodeGeneratorX86::VisitStringBuilderAppend(HStringBuilderAppend* instruction) {
+  __ movl(EAX, Immediate(instruction->GetFormat()->GetValue()));
+  codegen_->InvokeRuntime(kQuickStringBuilderAppend, instruction, instruction->GetDexPc());
+}
+
 void LocationsBuilderX86::VisitUnresolvedInstanceFieldGet(
     HUnresolvedInstanceFieldGet* instruction) {
   FieldAccessCallingConventionX86 calling_convention;
@@ -5781,13 +5917,11 @@
 
   bool needs_write_barrier =
       CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
-  bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
+  bool needs_type_check = instruction->NeedsTypeCheck();
 
   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
       instruction,
-      may_need_runtime_call_for_type_check ?
-          LocationSummary::kCallOnSlowPath :
-          LocationSummary::kNoCall);
+      needs_type_check ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall);
 
   bool is_byte_type = DataType::Size(value_type) == 1u;
   // We need the inputs to be different than the output in case of long operation.
@@ -5818,10 +5952,7 @@
   Location index = locations->InAt(1);
   Location value = locations->InAt(2);
   DataType::Type value_type = instruction->GetComponentType();
-  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
-  uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
-  uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
-  bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
+  bool needs_type_check = instruction->NeedsTypeCheck();
   bool needs_write_barrier =
       CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
 
@@ -5864,30 +5995,30 @@
         __ movl(address, Immediate(0));
         codegen_->MaybeRecordImplicitNullCheck(instruction);
         DCHECK(!needs_write_barrier);
-        DCHECK(!may_need_runtime_call_for_type_check);
+        DCHECK(!needs_type_check);
         break;
       }
 
       DCHECK(needs_write_barrier);
       Register register_value = value.AsRegister<Register>();
-      // We cannot use a NearLabel for `done`, as its range may be too
-      // short when Baker read barriers are enabled.
-      Label done;
-      NearLabel not_null, do_put;
-      SlowPathCode* slow_path = nullptr;
       Location temp_loc = locations->GetTemp(0);
       Register temp = temp_loc.AsRegister<Register>();
-      if (may_need_runtime_call_for_type_check) {
+
+      bool can_value_be_null = instruction->GetValueCanBeNull();
+      NearLabel do_store;
+      if (can_value_be_null) {
+        __ testl(register_value, register_value);
+        __ j(kEqual, &do_store);
+      }
+
+      SlowPathCode* slow_path = nullptr;
+      if (needs_type_check) {
         slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathX86(instruction);
         codegen_->AddSlowPath(slow_path);
-        if (instruction->GetValueCanBeNull()) {
-          __ testl(register_value, register_value);
-          __ j(kNotEqual, &not_null);
-          __ movl(address, Immediate(0));
-          codegen_->MaybeRecordImplicitNullCheck(instruction);
-          __ jmp(&done);
-          __ Bind(&not_null);
-        }
+
+        const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+        const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+        const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
 
         // Note that when Baker read barriers are enabled, the type
         // checks are performed without read barriers.  This is fine,
@@ -5910,6 +6041,7 @@
         __ cmpl(temp, Address(register_value, class_offset));
 
         if (instruction->StaticTypeOfArrayIsObjectArray()) {
+          NearLabel do_put;
           __ j(kEqual, &do_put);
           // If heap poisoning is enabled, the `temp` reference has
           // not been unpoisoned yet; unpoison it now.
@@ -5926,21 +6058,27 @@
         }
       }
 
+      Register card = locations->GetTemp(1).AsRegister<Register>();
+      codegen_->MarkGCCard(
+          temp, card, array, value.AsRegister<Register>(), /* value_can_be_null= */ false);
+
+      if (can_value_be_null) {
+        DCHECK(do_store.IsLinked());
+        __ Bind(&do_store);
+      }
+
+      Register source = register_value;
       if (kPoisonHeapReferences) {
         __ movl(temp, register_value);
         __ PoisonHeapReference(temp);
-        __ movl(address, temp);
-      } else {
-        __ movl(address, register_value);
-      }
-      if (!may_need_runtime_call_for_type_check) {
-        codegen_->MaybeRecordImplicitNullCheck(instruction);
+        source = temp;
       }
 
-      Register card = locations->GetTemp(1).AsRegister<Register>();
-      codegen_->MarkGCCard(
-          temp, card, array, value.AsRegister<Register>(), instruction->GetValueCanBeNull());
-      __ Bind(&done);
+      __ movl(address, source);
+
+      if (can_value_be_null || !needs_type_check) {
+        codegen_->MaybeRecordImplicitNullCheck(instruction);
+      }
 
       if (slow_path != nullptr) {
         __ Bind(slow_path->GetExitLabel());
@@ -6586,7 +6724,8 @@
       break;
     }
     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative: {
-      DCHECK(codegen_->GetCompilerOptions().IsBootImage());
+      DCHECK(codegen_->GetCompilerOptions().IsBootImage() ||
+             codegen_->GetCompilerOptions().IsBootImageExtension());
       DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
       Register method_address = locations->InAt(0).AsRegister<Register>();
       __ leal(out, Address(method_address, CodeGeneratorX86::kDummy32BitOffset));
@@ -6606,6 +6745,7 @@
       Address address(method_address, CodeGeneratorX86::kDummy32BitOffset);
       Label* fixup_label = codegen_->NewTypeBssEntryPatch(cls);
       GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, read_barrier_option);
+      // No need for memory fence, thanks to the x86 memory model.
       generate_null_check = true;
       break;
     }
@@ -6693,13 +6833,12 @@
   constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
   const size_t status_byte_offset =
       mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
-  constexpr uint32_t shifted_initialized_value =
-      enum_cast<uint32_t>(ClassStatus::kInitialized) << (status_lsb_position % kBitsPerByte);
+  constexpr uint32_t shifted_visibly_initialized_value =
+      enum_cast<uint32_t>(ClassStatus::kVisiblyInitialized) << (status_lsb_position % kBitsPerByte);
 
-  __ cmpb(Address(class_reg,  status_byte_offset), Immediate(shifted_initialized_value));
+  __ cmpb(Address(class_reg,  status_byte_offset), Immediate(shifted_visibly_initialized_value));
   __ j(kBelow, slow_path->GetEntryLabel());
   __ Bind(slow_path->GetExitLabel());
-  // No need for memory fence, thanks to the X86 memory model.
 }
 
 void InstructionCodeGeneratorX86::GenerateBitstringTypeCheckCompare(HTypeCheckInstruction* check,
@@ -6783,7 +6922,8 @@
 
   switch (load->GetLoadKind()) {
     case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
-      DCHECK(codegen_->GetCompilerOptions().IsBootImage());
+      DCHECK(codegen_->GetCompilerOptions().IsBootImage() ||
+             codegen_->GetCompilerOptions().IsBootImageExtension());
       Register method_address = locations->InAt(0).AsRegister<Register>();
       __ leal(out, Address(method_address, CodeGeneratorX86::kDummy32BitOffset));
       codegen_->RecordBootImageStringPatch(load);
@@ -6803,6 +6943,7 @@
       Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
       // /* GcRoot<mirror::String> */ out = *address  /* PC-relative */
       GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
+      // No need for memory fence, thanks to the x86 memory model.
       SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadStringSlowPathX86(load);
       codegen_->AddSlowPath(slow_path);
       __ testl(out, out);
@@ -8236,6 +8377,7 @@
 void CodeGeneratorX86::Finalize(CodeAllocator* allocator) {
   // Generate the constant area if needed.
   X86Assembler* assembler = GetAssembler();
+
   if (!assembler->IsConstantAreaEmpty() || !fixups_to_jump_tables_.empty()) {
     // Align to 4 byte boundary to reduce cache misses, as the data is 4 and 8
     // byte values.
@@ -8407,6 +8549,19 @@
   LOG(FATAL) << "Unreachable";
 }
 
+bool LocationsBuilderX86::CpuHasAvxFeatureFlag() {
+  return codegen_->GetInstructionSetFeatures().HasAVX();
+}
+bool LocationsBuilderX86::CpuHasAvx2FeatureFlag() {
+  return codegen_->GetInstructionSetFeatures().HasAVX2();
+}
+bool InstructionCodeGeneratorX86::CpuHasAvxFeatureFlag() {
+  return codegen_->GetInstructionSetFeatures().HasAVX();
+}
+bool InstructionCodeGeneratorX86::CpuHasAvx2FeatureFlag() {
+  return codegen_->GetInstructionSetFeatures().HasAVX2();
+}
+
 #undef __
 
 }  // namespace x86
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index deeef88..16446ce 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -175,6 +175,8 @@
   void HandleShift(HBinaryOperation* instruction);
   void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
   void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info);
+  bool CpuHasAvxFeatureFlag();
+  bool CpuHasAvx2FeatureFlag();
 
   CodeGeneratorX86* const codegen_;
   InvokeDexCallingConventionVisitorX86 parameter_visitor_;
@@ -307,6 +309,8 @@
                                    HBasicBlock* default_block);
 
   void GenerateFPCompare(Location lhs, Location rhs, HInstruction* insn, bool is_double);
+  bool CpuHasAvxFeatureFlag();
+  bool CpuHasAvx2FeatureFlag();
 
   X86Assembler* const assembler_;
   CodeGeneratorX86* const codegen_;
@@ -353,12 +357,16 @@
     return kX86WordSize;
   }
 
-  size_t GetFloatingPointSpillSlotSize() const override {
+  size_t GetSlowPathFPWidth() const override {
     return GetGraph()->HasSIMD()
         ? 4 * kX86WordSize   // 16 bytes == 4 words for each spill
         : 2 * kX86WordSize;  //  8 bytes == 2 words for each spill
   }
 
+  size_t GetCalleePreservedFPWidth() const override {
+    return 2 * kX86WordSize;
+  }
+
   HGraphVisitor* GetLocationBuilder() override {
     return &location_builder_;
   }
@@ -616,6 +624,9 @@
   void GenerateImplicitNullCheck(HNullCheck* instruction) override;
   void GenerateExplicitNullCheck(HNullCheck* instruction) override;
 
+  void MaybeGenerateInlineCacheCheck(HInstruction* instruction, Register klass);
+  void MaybeIncrementHotness(bool is_frame_entry);
+
   // When we don't know the proper offset for the value, we use kDummy32BitOffset.
   // The correct value will be inserted when processing Assembler fixups.
   static constexpr int32_t kDummy32BitOffset = 256;
@@ -644,8 +655,7 @@
   ParallelMoveResolverX86 move_resolver_;
   X86Assembler assembler_;
 
-  // PC-relative method patch info for kBootImageLinkTimePcRelative/kBootImageRelRo.
-  // Also used for type/string patches for kBootImageRelRo (same linker patch as for methods).
+  // PC-relative method patch info for kBootImageLinkTimePcRelative.
   ArenaDeque<X86PcRelativePatchInfo> boot_image_method_patches_;
   // PC-relative method patch info for kBssEntry.
   ArenaDeque<X86PcRelativePatchInfo> method_bss_entry_patches_;
@@ -657,8 +667,9 @@
   ArenaDeque<X86PcRelativePatchInfo> boot_image_string_patches_;
   // PC-relative String patch info for kBssEntry.
   ArenaDeque<X86PcRelativePatchInfo> string_bss_entry_patches_;
-  // PC-relative patch info for IntrinsicObjects.
-  ArenaDeque<X86PcRelativePatchInfo> boot_image_intrinsic_patches_;
+  // PC-relative patch info for IntrinsicObjects for the boot image,
+  // and for method/type/string patches for kBootImageRelRo otherwise.
+  ArenaDeque<X86PcRelativePatchInfo> boot_image_other_patches_;
 
   // Patches for string root accesses in JIT compiled code.
   ArenaDeque<PatchInfo<Label>> jit_string_patches_;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 7c293b86..8518b6d 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -16,7 +16,7 @@
 
 #include "code_generator_x86_64.h"
 
-#include "art_method.h"
+#include "art_method-inl.h"
 #include "class_table.h"
 #include "code_generator_utils.h"
 #include "compiled_method.h"
@@ -26,11 +26,13 @@
 #include "heap_poisoning.h"
 #include "intrinsics.h"
 #include "intrinsics_x86_64.h"
+#include "jit/profiling_info.h"
 #include "linker/linker_patch.h"
 #include "lock_word.h"
 #include "mirror/array-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/object_reference.h"
+#include "scoped_thread_state_change-inl.h"
 #include "thread.h"
 #include "utils/assembler.h"
 #include "utils/stack_checks.h"
@@ -999,7 +1001,7 @@
       callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
       break;
     case HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative:
-      DCHECK(GetCompilerOptions().IsBootImage());
+      DCHECK(GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension());
       __ leal(temp.AsRegister<CpuRegister>(),
               Address::Absolute(kDummy32BitOffset, /* no_rip= */ false));
       RecordBootImageMethodPatch(invoke);
@@ -1015,6 +1017,7 @@
       __ movq(temp.AsRegister<CpuRegister>(),
               Address::Absolute(kDummy32BitOffset, /* no_rip= */ false));
       RecordMethodBssEntryPatch(invoke);
+      // No need for memory fence, thanks to the x86-64 memory model.
       break;
     }
     case HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress:
@@ -1067,6 +1070,9 @@
   // intact/accessible until the end of the marking phase (the
   // concurrent copying collector may not in the future).
   __ MaybeUnpoisonHeapReference(temp);
+
+  MaybeGenerateInlineCacheCheck(invoke, temp);
+
   // temp = temp->GetMethodAt(method_offset);
   __ movq(temp, Address(temp, method_offset));
   // call temp->GetEntryPoint();
@@ -1076,13 +1082,13 @@
 }
 
 void CodeGeneratorX86_64::RecordBootImageIntrinsicPatch(uint32_t intrinsic_data) {
-  boot_image_intrinsic_patches_.emplace_back(/* target_dex_file= */ nullptr, intrinsic_data);
-  __ Bind(&boot_image_intrinsic_patches_.back().label);
+  boot_image_other_patches_.emplace_back(/* target_dex_file= */ nullptr, intrinsic_data);
+  __ Bind(&boot_image_other_patches_.back().label);
 }
 
 void CodeGeneratorX86_64::RecordBootImageRelRoPatch(uint32_t boot_image_offset) {
-  boot_image_method_patches_.emplace_back(/* target_dex_file= */ nullptr, boot_image_offset);
-  __ Bind(&boot_image_method_patches_.back().label);
+  boot_image_other_patches_.emplace_back(/* target_dex_file= */ nullptr, boot_image_offset);
+  __ Bind(&boot_image_other_patches_.back().label);
 }
 
 void CodeGeneratorX86_64::RecordBootImageMethodPatch(HInvokeStaticOrDirect* invoke) {
@@ -1190,23 +1196,26 @@
       type_bss_entry_patches_.size() +
       boot_image_string_patches_.size() +
       string_bss_entry_patches_.size() +
-      boot_image_intrinsic_patches_.size();
+      boot_image_other_patches_.size();
   linker_patches->reserve(size);
-  if (GetCompilerOptions().IsBootImage()) {
+  if (GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension()) {
     EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeMethodPatch>(
         boot_image_method_patches_, linker_patches);
     EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeTypePatch>(
         boot_image_type_patches_, linker_patches);
     EmitPcRelativeLinkerPatches<linker::LinkerPatch::RelativeStringPatch>(
         boot_image_string_patches_, linker_patches);
-    EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::IntrinsicReferencePatch>>(
-        boot_image_intrinsic_patches_, linker_patches);
   } else {
-    EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::DataBimgRelRoPatch>>(
-        boot_image_method_patches_, linker_patches);
+    DCHECK(boot_image_method_patches_.empty());
     DCHECK(boot_image_type_patches_.empty());
     DCHECK(boot_image_string_patches_.empty());
-    DCHECK(boot_image_intrinsic_patches_.empty());
+  }
+  if (GetCompilerOptions().IsBootImage()) {
+    EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::IntrinsicReferencePatch>>(
+        boot_image_other_patches_, linker_patches);
+  } else {
+    EmitPcRelativeLinkerPatches<NoDexFileAdapter<linker::LinkerPatch::DataBimgRelRoPatch>>(
+        boot_image_other_patches_, linker_patches);
   }
   EmitPcRelativeLinkerPatches<linker::LinkerPatch::MethodBssEntryPatch>(
       method_bss_entry_patches_, linker_patches);
@@ -1245,7 +1254,7 @@
   } else {
     __ movsd(Address(CpuRegister(RSP), stack_index), XmmRegister(reg_id));
   }
-  return GetFloatingPointSpillSlotSize();
+  return GetSlowPathFPWidth();
 }
 
 size_t CodeGeneratorX86_64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
@@ -1254,7 +1263,7 @@
   } else {
     __ movsd(XmmRegister(reg_id), Address(CpuRegister(RSP), stack_index));
   }
-  return GetFloatingPointSpillSlotSize();
+  return GetSlowPathFPWidth();
 }
 
 void CodeGeneratorX86_64::InvokeRuntime(QuickEntrypointEnum entrypoint,
@@ -1308,7 +1317,7 @@
         type_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
         boot_image_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
         string_bss_entry_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
-        boot_image_intrinsic_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
+        boot_image_other_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
         jit_string_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
         jit_class_patches_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)),
         fixups_to_jump_tables_(graph->GetAllocator()->Adapter(kArenaAllocCodeGenerator)) {
@@ -1337,6 +1346,55 @@
   return dwarf::Reg::X86_64Fp(static_cast<int>(reg));
 }
 
+void CodeGeneratorX86_64::MaybeIncrementHotness(bool is_frame_entry) {
+  if (GetCompilerOptions().CountHotnessInCompiledCode()) {
+    NearLabel overflow;
+    Register method = kMethodRegisterArgument;
+    if (!is_frame_entry) {
+      CHECK(RequiresCurrentMethod());
+      method = TMP;
+      __ movq(CpuRegister(method), Address(CpuRegister(RSP), kCurrentMethodStackOffset));
+    }
+    __ cmpw(Address(CpuRegister(method), ArtMethod::HotnessCountOffset().Int32Value()),
+            Immediate(ArtMethod::MaxCounter()));
+    __ j(kEqual, &overflow);
+    __ addw(Address(CpuRegister(method), ArtMethod::HotnessCountOffset().Int32Value()),
+            Immediate(1));
+    __ Bind(&overflow);
+  }
+
+  if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
+    ScopedObjectAccess soa(Thread::Current());
+    ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
+    if (info != nullptr) {
+      uint64_t address = reinterpret_cast64<uint64_t>(info);
+      NearLabel done;
+      __ movq(CpuRegister(TMP), Immediate(address));
+      __ addw(Address(CpuRegister(TMP), ProfilingInfo::BaselineHotnessCountOffset().Int32Value()),
+              Immediate(1));
+      __ j(kCarryClear, &done);
+      if (HasEmptyFrame()) {
+        CHECK(is_frame_entry);
+        // Frame alignment, and the stub expects the method on the stack.
+        __ pushq(CpuRegister(RDI));
+        __ cfi().AdjustCFAOffset(kX86_64WordSize);
+        __ cfi().RelOffset(DWARFReg(RDI), 0);
+      } else if (!RequiresCurrentMethod()) {
+        CHECK(is_frame_entry);
+        __ movq(Address(CpuRegister(RSP), kCurrentMethodStackOffset), CpuRegister(RDI));
+      }
+      GenerateInvokeRuntime(
+          GetThreadOffset<kX86_64PointerSize>(kQuickCompileOptimized).Int32Value());
+      if (HasEmptyFrame()) {
+        __ popq(CpuRegister(RDI));
+        __ cfi().AdjustCFAOffset(-static_cast<int>(kX86_64WordSize));
+        __ cfi().Restore(DWARFReg(RDI));
+      }
+      __ Bind(&done);
+    }
+  }
+}
+
 void CodeGeneratorX86_64::GenerateFrameEntry() {
   __ cfi().SetCurrentCFAOffset(kX86_64WordSize);  // return address
   __ Bind(&frame_entry_label_);
@@ -1344,11 +1402,6 @@
       && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kX86_64);
   DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
 
-  if (GetCompilerOptions().CountHotnessInCompiledCode()) {
-    __ addw(Address(CpuRegister(kMethodRegisterArgument),
-                    ArtMethod::HotnessCountOffset().Int32Value()),
-            Immediate(1));
-  }
 
   if (!skip_overflow_check) {
     size_t reserved_bytes = GetStackOverflowReservedBytes(InstructionSet::kX86_64);
@@ -1356,52 +1409,54 @@
     RecordPcInfo(nullptr, 0);
   }
 
-  if (HasEmptyFrame()) {
-    return;
-  }
+  if (!HasEmptyFrame()) {
+    for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
+      Register reg = kCoreCalleeSaves[i];
+      if (allocated_registers_.ContainsCoreRegister(reg)) {
+        __ pushq(CpuRegister(reg));
+        __ cfi().AdjustCFAOffset(kX86_64WordSize);
+        __ cfi().RelOffset(DWARFReg(reg), 0);
+      }
+    }
 
-  for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
-    Register reg = kCoreCalleeSaves[i];
-    if (allocated_registers_.ContainsCoreRegister(reg)) {
-      __ pushq(CpuRegister(reg));
-      __ cfi().AdjustCFAOffset(kX86_64WordSize);
-      __ cfi().RelOffset(DWARFReg(reg), 0);
+    int adjust = GetFrameSize() - GetCoreSpillSize();
+    __ subq(CpuRegister(RSP), Immediate(adjust));
+    __ cfi().AdjustCFAOffset(adjust);
+    uint32_t xmm_spill_location = GetFpuSpillStart();
+    size_t xmm_spill_slot_size = GetCalleePreservedFPWidth();
+
+    for (int i = arraysize(kFpuCalleeSaves) - 1; i >= 0; --i) {
+      if (allocated_registers_.ContainsFloatingPointRegister(kFpuCalleeSaves[i])) {
+        int offset = xmm_spill_location + (xmm_spill_slot_size * i);
+        __ movsd(Address(CpuRegister(RSP), offset), XmmRegister(kFpuCalleeSaves[i]));
+        __ cfi().RelOffset(DWARFReg(kFpuCalleeSaves[i]), offset);
+      }
+    }
+
+    // Save the current method if we need it. Note that we do not
+    // do this in HCurrentMethod, as the instruction might have been removed
+    // in the SSA graph.
+    if (RequiresCurrentMethod()) {
+      CHECK(!HasEmptyFrame());
+      __ movq(Address(CpuRegister(RSP), kCurrentMethodStackOffset),
+              CpuRegister(kMethodRegisterArgument));
+    }
+
+    if (GetGraph()->HasShouldDeoptimizeFlag()) {
+      CHECK(!HasEmptyFrame());
+      // Initialize should_deoptimize flag to 0.
+      __ movl(Address(CpuRegister(RSP), GetStackOffsetOfShouldDeoptimizeFlag()), Immediate(0));
     }
   }
 
-  int adjust = GetFrameSize() - GetCoreSpillSize();
-  __ subq(CpuRegister(RSP), Immediate(adjust));
-  __ cfi().AdjustCFAOffset(adjust);
-  uint32_t xmm_spill_location = GetFpuSpillStart();
-  size_t xmm_spill_slot_size = GetFloatingPointSpillSlotSize();
-
-  for (int i = arraysize(kFpuCalleeSaves) - 1; i >= 0; --i) {
-    if (allocated_registers_.ContainsFloatingPointRegister(kFpuCalleeSaves[i])) {
-      int offset = xmm_spill_location + (xmm_spill_slot_size * i);
-      __ movsd(Address(CpuRegister(RSP), offset), XmmRegister(kFpuCalleeSaves[i]));
-      __ cfi().RelOffset(DWARFReg(kFpuCalleeSaves[i]), offset);
-    }
-  }
-
-  // Save the current method if we need it. Note that we do not
-  // do this in HCurrentMethod, as the instruction might have been removed
-  // in the SSA graph.
-  if (RequiresCurrentMethod()) {
-    __ movq(Address(CpuRegister(RSP), kCurrentMethodStackOffset),
-            CpuRegister(kMethodRegisterArgument));
-  }
-
-  if (GetGraph()->HasShouldDeoptimizeFlag()) {
-    // Initialize should_deoptimize flag to 0.
-    __ movl(Address(CpuRegister(RSP), GetStackOffsetOfShouldDeoptimizeFlag()), Immediate(0));
-  }
+  MaybeIncrementHotness(/* is_frame_entry= */ true);
 }
 
 void CodeGeneratorX86_64::GenerateFrameExit() {
   __ cfi().RememberState();
   if (!HasEmptyFrame()) {
     uint32_t xmm_spill_location = GetFpuSpillStart();
-    size_t xmm_spill_slot_size = GetFloatingPointSpillSlotSize();
+    size_t xmm_spill_slot_size = GetCalleePreservedFPWidth();
     for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
       if (allocated_registers_.ContainsFloatingPointRegister(kFpuCalleeSaves[i])) {
         int offset = xmm_spill_location + (xmm_spill_slot_size * i);
@@ -1541,11 +1596,7 @@
 
   HLoopInformation* info = block->GetLoopInformation();
   if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
-    if (codegen_->GetCompilerOptions().CountHotnessInCompiledCode()) {
-      __ movq(CpuRegister(TMP), Address(CpuRegister(RSP), 0));
-      __ addw(Address(CpuRegister(TMP), ArtMethod::HotnessCountOffset().Int32Value()),
-              Immediate(1));
-    }
+    codegen_->MaybeIncrementHotness(/* is_frame_entry= */ false);
     GenerateSuspendCheck(info->GetSuspendCheck(), successor);
     return;
   }
@@ -2315,28 +2366,41 @@
 }
 
 void InstructionCodeGeneratorX86_64::VisitReturn(HReturn* ret) {
-  if (kIsDebugBuild) {
-    switch (ret->InputAt(0)->GetType()) {
-      case DataType::Type::kReference:
-      case DataType::Type::kBool:
-      case DataType::Type::kUint8:
-      case DataType::Type::kInt8:
-      case DataType::Type::kUint16:
-      case DataType::Type::kInt16:
-      case DataType::Type::kInt32:
-      case DataType::Type::kInt64:
-        DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegister<CpuRegister>().AsRegister(), RAX);
-        break;
+  switch (ret->InputAt(0)->GetType()) {
+    case DataType::Type::kReference:
+    case DataType::Type::kBool:
+    case DataType::Type::kUint8:
+    case DataType::Type::kInt8:
+    case DataType::Type::kUint16:
+    case DataType::Type::kInt16:
+    case DataType::Type::kInt32:
+    case DataType::Type::kInt64:
+      DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegister<CpuRegister>().AsRegister(), RAX);
+      break;
 
-      case DataType::Type::kFloat32:
-      case DataType::Type::kFloat64:
-        DCHECK_EQ(ret->GetLocations()->InAt(0).AsFpuRegister<XmmRegister>().AsFloatRegister(),
-                  XMM0);
-        break;
-
-      default:
-        LOG(FATAL) << "Unexpected return type " << ret->InputAt(0)->GetType();
+    case DataType::Type::kFloat32: {
+      DCHECK_EQ(ret->GetLocations()->InAt(0).AsFpuRegister<XmmRegister>().AsFloatRegister(),
+                XMM0);
+      // To simplify callers of an OSR method, we put the return value in both
+      // floating point and core register.
+      if (GetGraph()->IsCompilingOsr()) {
+        __ movd(CpuRegister(RAX), XmmRegister(XMM0), /* is64bit= */ false);
+      }
+      break;
     }
+    case DataType::Type::kFloat64: {
+      DCHECK_EQ(ret->GetLocations()->InAt(0).AsFpuRegister<XmmRegister>().AsFloatRegister(),
+                XMM0);
+      // To simplify callers of an OSR method, we put the return value in both
+      // floating point and core register.
+      if (GetGraph()->IsCompilingOsr()) {
+        __ movd(CpuRegister(RAX), XmmRegister(XMM0), /* is64bit= */ true);
+      }
+      break;
+    }
+
+    default:
+      LOG(FATAL) << "Unexpected return type " << ret->InputAt(0)->GetType();
   }
   codegen_->GenerateFrameExit();
 }
@@ -2505,6 +2569,31 @@
   invoke->GetLocations()->AddTemp(Location::RegisterLocation(RAX));
 }
 
+void CodeGeneratorX86_64::MaybeGenerateInlineCacheCheck(HInstruction* instruction,
+                                                        CpuRegister klass) {
+  DCHECK_EQ(RDI, klass.AsRegister());
+  // We know the destination of an intrinsic, so no need to record inline
+  // caches.
+  if (!instruction->GetLocations()->Intrinsified() &&
+      GetGraph()->IsCompilingBaseline() &&
+      !Runtime::Current()->IsAotCompiler()) {
+    ScopedObjectAccess soa(Thread::Current());
+    ProfilingInfo* info = GetGraph()->GetArtMethod()->GetProfilingInfo(kRuntimePointerSize);
+    if (info != nullptr) {
+      InlineCache* cache = info->GetInlineCache(instruction->GetDexPc());
+      uint64_t address = reinterpret_cast64<uint64_t>(cache);
+      NearLabel done;
+      __ movq(CpuRegister(TMP), Immediate(address));
+      // Fast path for a monomorphic cache.
+      __ cmpl(Address(CpuRegister(TMP), InlineCache::ClassesOffset().Int32Value()), klass);
+      __ j(kEqual, &done);
+      GenerateInvokeRuntime(
+          GetThreadOffset<kX86_64PointerSize>(kQuickUpdateInlineCache).Int32Value());
+      __ Bind(&done);
+    }
+  }
+}
+
 void InstructionCodeGeneratorX86_64::VisitInvokeInterface(HInvokeInterface* invoke) {
   // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
   LocationSummary* locations = invoke->GetLocations();
@@ -2513,11 +2602,6 @@
   Location receiver = locations->InAt(0);
   size_t class_offset = mirror::Object::ClassOffset().SizeValue();
 
-  // Set the hidden argument. This is safe to do this here, as RAX
-  // won't be modified thereafter, before the `call` instruction.
-  DCHECK_EQ(RAX, hidden_reg.AsRegister());
-  codegen_->Load64BitValue(hidden_reg, invoke->GetDexMethodIndex());
-
   if (receiver.IsStackSlot()) {
     __ movl(temp, Address(CpuRegister(RSP), receiver.GetStackIndex()));
     // /* HeapReference<Class> */ temp = temp->klass_
@@ -2535,6 +2619,15 @@
   // intact/accessible until the end of the marking phase (the
   // concurrent copying collector may not in the future).
   __ MaybeUnpoisonHeapReference(temp);
+
+  codegen_->MaybeGenerateInlineCacheCheck(invoke, temp);
+
+  // Set the hidden argument. This is safe to do this here, as RAX
+  // won't be modified thereafter, before the `call` instruction.
+  // We also di it after MaybeGenerateInlineCache that may use RAX.
+  DCHECK_EQ(RAX, hidden_reg.AsRegister());
+  codegen_->Load64BitValue(hidden_reg, invoke->GetDexMethodIndex());
+
   // temp = temp->GetAddressOfIMT()
   __ movq(temp,
       Address(temp, mirror::Class::ImtPtrOffset(kX86_64PointerSize).Uint32Value()));
@@ -2891,7 +2984,7 @@
 
           __ movl(output, Immediate(kPrimIntMax));
           // if input >= (float)INT_MAX goto done
-          __ comiss(input, codegen_->LiteralFloatAddress(kPrimIntMax));
+          __ comiss(input, codegen_->LiteralFloatAddress(static_cast<float>(kPrimIntMax)));
           __ j(kAboveEqual, &done);
           // if input == NaN goto nan
           __ j(kUnordered, &nan);
@@ -2952,7 +3045,7 @@
 
           codegen_->Load64BitValue(output, kPrimLongMax);
           // if input >= (float)LONG_MAX goto done
-          __ comiss(input, codegen_->LiteralFloatAddress(kPrimLongMax));
+          __ comiss(input, codegen_->LiteralFloatAddress(static_cast<float>(kPrimLongMax)));
           __ j(kAboveEqual, &done);
           // if input == NaN goto nan
           __ j(kUnordered, &nan);
@@ -2973,7 +3066,8 @@
 
           codegen_->Load64BitValue(output, kPrimLongMax);
           // if input >= (double)LONG_MAX goto done
-          __ comisd(input, codegen_->LiteralDoubleAddress(kPrimLongMax));
+          __ comisd(input, codegen_->LiteralDoubleAddress(
+                static_cast<double>(kPrimLongMax)));
           __ j(kAboveEqual, &done);
           // if input == NaN goto nan
           __ j(kUnordered, &nan);
@@ -4882,6 +4976,15 @@
   HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
 }
 
+void LocationsBuilderX86_64::VisitStringBuilderAppend(HStringBuilderAppend* instruction) {
+  codegen_->CreateStringBuilderAppendLocations(instruction, Location::RegisterLocation(RAX));
+}
+
+void InstructionCodeGeneratorX86_64::VisitStringBuilderAppend(HStringBuilderAppend* instruction) {
+  __ movl(CpuRegister(RDI), Immediate(instruction->GetFormat()->GetValue()));
+  codegen_->InvokeRuntime(kQuickStringBuilderAppend, instruction, instruction->GetDexPc());
+}
+
 void LocationsBuilderX86_64::VisitUnresolvedInstanceFieldGet(
     HUnresolvedInstanceFieldGet* instruction) {
   FieldAccessCallingConventionX86_64 calling_convention;
@@ -5143,13 +5246,11 @@
 
   bool needs_write_barrier =
       CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
-  bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
+  bool needs_type_check = instruction->NeedsTypeCheck();
 
   LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(
       instruction,
-      may_need_runtime_call_for_type_check ?
-          LocationSummary::kCallOnSlowPath :
-          LocationSummary::kNoCall);
+      needs_type_check ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall);
 
   locations->SetInAt(0, Location::RequiresRegister());
   locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
@@ -5173,12 +5274,9 @@
   Location index = locations->InAt(1);
   Location value = locations->InAt(2);
   DataType::Type value_type = instruction->GetComponentType();
-  bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
+  bool needs_type_check = instruction->NeedsTypeCheck();
   bool needs_write_barrier =
       CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
-  uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
-  uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
-  uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
 
   switch (value_type) {
     case DataType::Type::kBool:
@@ -5220,30 +5318,30 @@
         __ movl(address, Immediate(0));
         codegen_->MaybeRecordImplicitNullCheck(instruction);
         DCHECK(!needs_write_barrier);
-        DCHECK(!may_need_runtime_call_for_type_check);
+        DCHECK(!needs_type_check);
         break;
       }
 
       DCHECK(needs_write_barrier);
       CpuRegister register_value = value.AsRegister<CpuRegister>();
-      // We cannot use a NearLabel for `done`, as its range may be too
-      // short when Baker read barriers are enabled.
-      Label done;
-      NearLabel not_null, do_put;
-      SlowPathCode* slow_path = nullptr;
       Location temp_loc = locations->GetTemp(0);
       CpuRegister temp = temp_loc.AsRegister<CpuRegister>();
-      if (may_need_runtime_call_for_type_check) {
+
+      bool can_value_be_null = instruction->GetValueCanBeNull();
+      NearLabel do_store;
+      if (can_value_be_null) {
+        __ testl(register_value, register_value);
+        __ j(kEqual, &do_store);
+      }
+
+      SlowPathCode* slow_path = nullptr;
+      if (needs_type_check) {
         slow_path = new (codegen_->GetScopedAllocator()) ArraySetSlowPathX86_64(instruction);
         codegen_->AddSlowPath(slow_path);
-        if (instruction->GetValueCanBeNull()) {
-          __ testl(register_value, register_value);
-          __ j(kNotEqual, &not_null);
-          __ movl(address, Immediate(0));
-          codegen_->MaybeRecordImplicitNullCheck(instruction);
-          __ jmp(&done);
-          __ Bind(&not_null);
-        }
+
+        const uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+        const uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
+        const uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
 
         // Note that when Baker read barriers are enabled, the type
         // checks are performed without read barriers.  This is fine,
@@ -5266,6 +5364,7 @@
         __ cmpl(temp, Address(register_value, class_offset));
 
         if (instruction->StaticTypeOfArrayIsObjectArray()) {
+          NearLabel do_put;
           __ j(kEqual, &do_put);
           // If heap poisoning is enabled, the `temp` reference has
           // not been unpoisoned yet; unpoison it now.
@@ -5282,21 +5381,27 @@
         }
       }
 
+      CpuRegister card = locations->GetTemp(1).AsRegister<CpuRegister>();
+      codegen_->MarkGCCard(
+          temp, card, array, value.AsRegister<CpuRegister>(), /* value_can_be_null= */ false);
+
+      if (can_value_be_null) {
+        DCHECK(do_store.IsLinked());
+        __ Bind(&do_store);
+      }
+
+      Location source = value;
       if (kPoisonHeapReferences) {
         __ movl(temp, register_value);
         __ PoisonHeapReference(temp);
-        __ movl(address, temp);
-      } else {
-        __ movl(address, register_value);
-      }
-      if (!may_need_runtime_call_for_type_check) {
-        codegen_->MaybeRecordImplicitNullCheck(instruction);
+        source = temp_loc;
       }
 
-      CpuRegister card = locations->GetTemp(1).AsRegister<CpuRegister>();
-      codegen_->MarkGCCard(
-          temp, card, array, value.AsRegister<CpuRegister>(), instruction->GetValueCanBeNull());
-      __ Bind(&done);
+      __ movl(address, source.AsRegister<CpuRegister>());
+
+      if (can_value_be_null || !needs_type_check) {
+        codegen_->MaybeRecordImplicitNullCheck(instruction);
+      }
 
       if (slow_path != nullptr) {
         __ Bind(slow_path->GetExitLabel());
@@ -5822,13 +5927,12 @@
   constexpr size_t status_lsb_position = SubtypeCheckBits::BitStructSizeOf();
   const size_t status_byte_offset =
       mirror::Class::StatusOffset().SizeValue() + (status_lsb_position / kBitsPerByte);
-  constexpr uint32_t shifted_initialized_value =
-      enum_cast<uint32_t>(ClassStatus::kInitialized) << (status_lsb_position % kBitsPerByte);
+  constexpr uint32_t shifted_visibly_initialized_value =
+      enum_cast<uint32_t>(ClassStatus::kVisiblyInitialized) << (status_lsb_position % kBitsPerByte);
 
-  __ cmpb(Address(class_reg,  status_byte_offset), Immediate(shifted_initialized_value));
+  __ cmpb(Address(class_reg,  status_byte_offset), Immediate(shifted_visibly_initialized_value));
   __ j(kBelow, slow_path->GetEntryLabel());
   __ Bind(slow_path->GetExitLabel());
-  // No need for memory fence, thanks to the x86-64 memory model.
 }
 
 void InstructionCodeGeneratorX86_64::GenerateBitstringTypeCheckCompare(HTypeCheckInstruction* check,
@@ -5952,7 +6056,8 @@
       break;
     }
     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
-      DCHECK(codegen_->GetCompilerOptions().IsBootImage());
+      DCHECK(codegen_->GetCompilerOptions().IsBootImage() ||
+             codegen_->GetCompilerOptions().IsBootImageExtension());
       DCHECK_EQ(read_barrier_option, kWithoutReadBarrier);
       __ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
       codegen_->RecordBootImageTypePatch(cls);
@@ -5969,6 +6074,7 @@
       Label* fixup_label = codegen_->NewTypeBssEntryPatch(cls);
       // /* GcRoot<mirror::Class> */ out = *address  /* PC-relative */
       GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, read_barrier_option);
+      // No need for memory fence, thanks to the x86-64 memory model.
       generate_null_check = true;
       break;
     }
@@ -6105,7 +6211,8 @@
 
   switch (load->GetLoadKind()) {
     case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
-      DCHECK(codegen_->GetCompilerOptions().IsBootImage());
+      DCHECK(codegen_->GetCompilerOptions().IsBootImage() ||
+             codegen_->GetCompilerOptions().IsBootImageExtension());
       __ leal(out, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
       codegen_->RecordBootImageStringPatch(load);
       return;
@@ -6122,6 +6229,7 @@
       Label* fixup_label = codegen_->NewStringBssEntryPatch(load);
       // /* GcRoot<mirror::Class> */ out = *address  /* PC-relative */
       GenerateGcRootFieldLoad(load, out_loc, address, fixup_label, kCompilerReadBarrierOption);
+      // No need for memory fence, thanks to the x86-64 memory model.
       SlowPathCode* slow_path = new (codegen_->GetScopedAllocator()) LoadStringSlowPathX86_64(load);
       codegen_->AddSlowPath(slow_path);
       __ testl(out, out);
@@ -7650,6 +7758,22 @@
   }
 }
 
+bool LocationsBuilderX86_64::CpuHasAvxFeatureFlag() {
+  return codegen_->GetInstructionSetFeatures().HasAVX();
+}
+
+bool LocationsBuilderX86_64::CpuHasAvx2FeatureFlag() {
+  return codegen_->GetInstructionSetFeatures().HasAVX2();
+}
+
+bool InstructionCodeGeneratorX86_64::CpuHasAvxFeatureFlag() {
+  return codegen_->GetInstructionSetFeatures().HasAVX();
+}
+
+bool InstructionCodeGeneratorX86_64::CpuHasAvx2FeatureFlag() {
+  return codegen_->GetInstructionSetFeatures().HasAVX2();
+}
+
 #undef __
 
 }  // namespace x86_64
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index f74e130..2e8d9b3 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -177,6 +177,8 @@
   void HandleShift(HBinaryOperation* operation);
   void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info);
   void HandleFieldGet(HInstruction* instruction);
+  bool CpuHasAvxFeatureFlag();
+  bool CpuHasAvx2FeatureFlag();
 
   CodeGeneratorX86_64* const codegen_;
   InvokeDexCallingConventionVisitorX86_64 parameter_visitor_;
@@ -287,6 +289,9 @@
 
   void HandleGoto(HInstruction* got, HBasicBlock* successor);
 
+  bool CpuHasAvxFeatureFlag();
+  bool CpuHasAvx2FeatureFlag();
+
   X86_64Assembler* const assembler_;
   CodeGeneratorX86_64* const codegen_;
 
@@ -333,12 +338,16 @@
     return kX86_64WordSize;
   }
 
-  size_t GetFloatingPointSpillSlotSize() const override {
+  size_t GetSlowPathFPWidth() const override {
     return GetGraph()->HasSIMD()
         ? 2 * kX86_64WordSize   // 16 bytes == 2 x86_64 words for each spill
         : 1 * kX86_64WordSize;  //  8 bytes == 1 x86_64 words for each spill
   }
 
+  size_t GetCalleePreservedFPWidth() const override {
+    return 1 * kX86_64WordSize;
+  }
+
   HGraphVisitor* GetLocationBuilder() override {
     return &location_builder_;
   }
@@ -591,6 +600,10 @@
   void GenerateNop() override;
   void GenerateImplicitNullCheck(HNullCheck* instruction) override;
   void GenerateExplicitNullCheck(HNullCheck* instruction) override;
+  void MaybeGenerateInlineCacheCheck(HInstruction* instruction, CpuRegister cls);
+
+
+  void MaybeIncrementHotness(bool is_frame_entry);
 
   // When we don't know the proper offset for the value, we use kDummy32BitOffset.
   // We will fix this up in the linker later to have the right value.
@@ -613,8 +626,7 @@
   // Used for fixups to the constant area.
   int constant_area_start_;
 
-  // PC-relative method patch info for kBootImageLinkTimePcRelative/kBootImageRelRo.
-  // Also used for type/string patches for kBootImageRelRo (same linker patch as for methods).
+  // PC-relative method patch info for kBootImageLinkTimePcRelative.
   ArenaDeque<PatchInfo<Label>> boot_image_method_patches_;
   // PC-relative method patch info for kBssEntry.
   ArenaDeque<PatchInfo<Label>> method_bss_entry_patches_;
@@ -626,8 +638,9 @@
   ArenaDeque<PatchInfo<Label>> boot_image_string_patches_;
   // PC-relative String patch info for kBssEntry.
   ArenaDeque<PatchInfo<Label>> string_bss_entry_patches_;
-  // PC-relative patch info for IntrinsicObjects.
-  ArenaDeque<PatchInfo<Label>> boot_image_intrinsic_patches_;
+  // PC-relative patch info for IntrinsicObjects for the boot image,
+  // and for method/type/string patches for kBootImageRelRo otherwise.
+  ArenaDeque<PatchInfo<Label>> boot_image_other_patches_;
 
   // Patches for string literals in JIT compiled code.
   ArenaDeque<PatchInfo<Label>> jit_string_patches_;
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index b5a7c13..d9b4f79 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -29,8 +29,6 @@
 #include "register_allocator_linear_scan.h"
 #include "utils/arm/assembler_arm_vixl.h"
 #include "utils/arm/managed_register_arm.h"
-#include "utils/mips/managed_register_mips.h"
-#include "utils/mips64/managed_register_mips64.h"
 #include "utils/x86/managed_register_x86.h"
 
 #include "gtest/gtest.h"
@@ -55,12 +53,6 @@
 #ifdef ART_ENABLE_CODEGEN_x86_64
     CodegenTargetConfig(InstructionSet::kX86_64, create_codegen_x86_64),
 #endif
-#ifdef ART_ENABLE_CODEGEN_mips
-    CodegenTargetConfig(InstructionSet::kMips, create_codegen_mips),
-#endif
-#ifdef ART_ENABLE_CODEGEN_mips64
-    CodegenTargetConfig(InstructionSet::kMips64, create_codegen_mips64)
-#endif
   };
 
   for (const CodegenTargetConfig& test_config : test_config_candidates) {
@@ -834,6 +826,7 @@
   EXPECT_TRUE(features->Has(vixl::CPUFeatures::kCRC32));
   EXPECT_TRUE(features->Has(vixl::CPUFeatures::kDotProduct));
   EXPECT_TRUE(features->Has(vixl::CPUFeatures::kFPHalf));
+  EXPECT_TRUE(features->Has(vixl::CPUFeatures::kNEONHalf));
   EXPECT_TRUE(features->Has(vixl::CPUFeatures::kAtomics));
 }
 
@@ -847,70 +840,53 @@
   EXPECT_TRUE(features->Has(vixl::CPUFeatures::kCRC32));
   EXPECT_FALSE(features->Has(vixl::CPUFeatures::kDotProduct));
   EXPECT_FALSE(features->Has(vixl::CPUFeatures::kFPHalf));
+  EXPECT_FALSE(features->Has(vixl::CPUFeatures::kNEONHalf));
   EXPECT_FALSE(features->Has(vixl::CPUFeatures::kAtomics));
 }
 
-#endif
+constexpr static size_t kExpectedFPSpillSize = 8 * vixl::aarch64::kDRegSizeInBytes;
 
-#ifdef ART_ENABLE_CODEGEN_mips
-TEST_F(CodegenTest, MipsClobberRA) {
-  OverrideInstructionSetFeatures(InstructionSet::kMips, "mips32r");
-  CHECK(!instruction_set_features_->AsMipsInstructionSetFeatures()->IsR6());
-  if (!CanExecute(InstructionSet::kMips)) {
-    // HMipsComputeBaseMethodAddress and the NAL instruction behind it
-    // should only be generated on non-R6.
-    return;
-  }
-
+// The following two tests check that for both SIMD and non-SIMD graphs exactly 64-bit is
+// allocated on stack per callee-saved FP register to be preserved in the frame entry as
+// ABI states.
+TEST_F(CodegenTest, ARM64FrameSizeSIMD) {
+  OverrideInstructionSetFeatures(InstructionSet::kArm64, "default");
   HGraph* graph = CreateGraph();
+  arm64::CodeGeneratorARM64 codegen(graph, *compiler_options_);
 
-  HBasicBlock* entry_block = new (GetAllocator()) HBasicBlock(graph);
-  graph->AddBlock(entry_block);
-  graph->SetEntryBlock(entry_block);
-  entry_block->AddInstruction(new (GetAllocator()) HGoto());
+  codegen.Initialize();
+  graph->SetHasSIMD(true);
 
-  HBasicBlock* block = new (GetAllocator()) HBasicBlock(graph);
-  graph->AddBlock(block);
+  DCHECK_EQ(arm64::callee_saved_fp_registers.GetCount(), 8);
+  vixl::aarch64::CPURegList reg_list = arm64::callee_saved_fp_registers;
+  while (!reg_list.IsEmpty()) {
+    uint32_t reg_code = reg_list.PopLowestIndex().GetCode();
+    codegen.AddAllocatedRegister(Location::FpuRegisterLocation(reg_code));
+  }
+  codegen.ComputeSpillMask();
 
-  HBasicBlock* exit_block = new (GetAllocator()) HBasicBlock(graph);
-  graph->AddBlock(exit_block);
-  graph->SetExitBlock(exit_block);
-  exit_block->AddInstruction(new (GetAllocator()) HExit());
-
-  entry_block->AddSuccessor(block);
-  block->AddSuccessor(exit_block);
-
-  // To simplify matters, don't create PC-relative HLoadClass or HLoadString.
-  // Instead, generate HMipsComputeBaseMethodAddress directly.
-  HMipsComputeBaseMethodAddress* base = new (GetAllocator()) HMipsComputeBaseMethodAddress();
-  block->AddInstruction(base);
-  // HMipsComputeBaseMethodAddress is defined as int, so just make the
-  // compiled method return it.
-  block->AddInstruction(new (GetAllocator()) HReturn(base));
-
-  graph->BuildDominatorTree();
-
-  mips::CodeGeneratorMIPS codegenMIPS(graph, *compiler_options_);
-  // Since there isn't HLoadClass or HLoadString, we need to manually indicate
-  // that RA is clobbered and the method entry code should generate a stack frame
-  // and preserve RA in it. And this is what we're testing here.
-  codegenMIPS.ClobberRA();
-  // Without ClobberRA() the code would be:
-  //   nal              # Sets RA to point to the jr instruction below
-  //   move  v0, ra     # and the CPU falls into an infinite loop.
-  //   jr    ra
-  //   nop
-  // The expected code is:
-  //   addiu sp, sp, -16
-  //   sw    ra, 12(sp)
-  //   sw    a0, 0(sp)
-  //   nal              # Sets RA to point to the lw instruction below.
-  //   move  v0, ra
-  //   lw    ra, 12(sp)
-  //   jr    ra
-  //   addiu sp, sp, 16
-  RunCode(&codegenMIPS, graph, [](HGraph*) {}, false, 0);
+  EXPECT_EQ(codegen.GetFpuSpillSize(), kExpectedFPSpillSize);
 }
+
+TEST_F(CodegenTest, ARM64FrameSizeNoSIMD) {
+  OverrideInstructionSetFeatures(InstructionSet::kArm64, "default");
+  HGraph* graph = CreateGraph();
+  arm64::CodeGeneratorARM64 codegen(graph, *compiler_options_);
+
+  codegen.Initialize();
+  graph->SetHasSIMD(false);
+
+  DCHECK_EQ(arm64::callee_saved_fp_registers.GetCount(), 8);
+  vixl::aarch64::CPURegList reg_list = arm64::callee_saved_fp_registers;
+  while (!reg_list.IsEmpty()) {
+    uint32_t reg_code = reg_list.PopLowestIndex().GetCode();
+    codegen.AddAllocatedRegister(Location::FpuRegisterLocation(reg_code));
+  }
+  codegen.ComputeSpillMask();
+
+  EXPECT_EQ(codegen.GetFpuSpillSize(), kExpectedFPSpillSize);
+}
+
 #endif
 
 }  // namespace art
diff --git a/compiler/optimizing/codegen_test_utils.h b/compiler/optimizing/codegen_test_utils.h
index dde39d4..9fbd7d6 100644
--- a/compiler/optimizing/codegen_test_utils.h
+++ b/compiler/optimizing/codegen_test_utils.h
@@ -19,8 +19,6 @@
 
 #include "arch/arm/registers_arm.h"
 #include "arch/instruction_set.h"
-#include "arch/mips/registers_mips.h"
-#include "arch/mips64/registers_mips64.h"
 #include "arch/x86/registers_x86.h"
 #include "code_simulator.h"
 #include "code_simulator_container.h"
@@ -45,14 +43,6 @@
 #include "code_generator_x86_64.h"
 #endif
 
-#ifdef ART_ENABLE_CODEGEN_mips
-#include "code_generator_mips.h"
-#endif
-
-#ifdef ART_ENABLE_CODEGEN_mips64
-#include "code_generator_mips64.h"
-#endif
-
 namespace art {
 
 typedef CodeGenerator* (*CreateCodegenFn)(HGraph*, const CompilerOptions&);
@@ -346,18 +336,6 @@
 }
 #endif
 
-#ifdef ART_ENABLE_CODEGEN_mips
-CodeGenerator* create_codegen_mips(HGraph* graph, const CompilerOptions& compiler_options) {
-  return new (graph->GetAllocator()) mips::CodeGeneratorMIPS(graph, compiler_options);
-}
-#endif
-
-#ifdef ART_ENABLE_CODEGEN_mips64
-CodeGenerator* create_codegen_mips64(HGraph* graph, const CompilerOptions& compiler_options) {
-  return new (graph->GetAllocator()) mips64::CodeGeneratorMIPS64(graph, compiler_options);
-}
-#endif
-
 }  // namespace art
 
 #endif  // ART_COMPILER_OPTIMIZING_CODEGEN_TEST_UTILS_H_
diff --git a/compiler/optimizing/common_arm.h b/compiler/optimizing/common_arm.h
index 7d3af95..320915e 100644
--- a/compiler/optimizing/common_arm.h
+++ b/compiler/optimizing/common_arm.h
@@ -17,7 +17,6 @@
 #ifndef ART_COMPILER_OPTIMIZING_COMMON_ARM_H_
 #define ART_COMPILER_OPTIMIZING_COMMON_ARM_H_
 
-#include "dwarf/register.h"
 #include "instruction_simplifier_shared.h"
 #include "locations.h"
 #include "nodes.h"
@@ -38,14 +37,6 @@
 
 static_assert(vixl::aarch32::kSpCode == SP, "vixl::aarch32::kSpCode must equal ART's SP");
 
-inline dwarf::Reg DWARFReg(vixl::aarch32::Register reg) {
-  return dwarf::Reg::ArmCore(static_cast<int>(reg.GetCode()));
-}
-
-inline dwarf::Reg DWARFReg(vixl::aarch32::SRegister reg) {
-  return dwarf::Reg::ArmFp(static_cast<int>(reg.GetCode()));
-}
-
 inline vixl::aarch32::Register HighRegisterFrom(Location location) {
   DCHECK(location.IsRegisterPair()) << location;
   return vixl::aarch32::Register(location.AsRegisterPairHigh<vixl::aarch32::Register>());
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index 5556f16..41f284f 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -87,36 +87,41 @@
                       instr->InputAt(input_index)->GetType());
 }
 
-inline vixl::aarch64::FPRegister DRegisterFrom(Location location) {
+inline vixl::aarch64::VRegister DRegisterFrom(Location location) {
   DCHECK(location.IsFpuRegister()) << location;
-  return vixl::aarch64::FPRegister::GetDRegFromCode(location.reg());
+  return vixl::aarch64::VRegister::GetDRegFromCode(location.reg());
 }
 
-inline vixl::aarch64::FPRegister QRegisterFrom(Location location) {
+inline vixl::aarch64::VRegister QRegisterFrom(Location location) {
   DCHECK(location.IsFpuRegister()) << location;
-  return vixl::aarch64::FPRegister::GetQRegFromCode(location.reg());
+  return vixl::aarch64::VRegister::GetQRegFromCode(location.reg());
 }
 
-inline vixl::aarch64::FPRegister VRegisterFrom(Location location) {
+inline vixl::aarch64::VRegister VRegisterFrom(Location location) {
   DCHECK(location.IsFpuRegister()) << location;
-  return vixl::aarch64::FPRegister::GetVRegFromCode(location.reg());
+  return vixl::aarch64::VRegister::GetVRegFromCode(location.reg());
 }
 
-inline vixl::aarch64::FPRegister SRegisterFrom(Location location) {
+inline vixl::aarch64::VRegister SRegisterFrom(Location location) {
   DCHECK(location.IsFpuRegister()) << location;
-  return vixl::aarch64::FPRegister::GetSRegFromCode(location.reg());
+  return vixl::aarch64::VRegister::GetSRegFromCode(location.reg());
 }
 
-inline vixl::aarch64::FPRegister FPRegisterFrom(Location location, DataType::Type type) {
+inline vixl::aarch64::VRegister HRegisterFrom(Location location) {
+  DCHECK(location.IsFpuRegister()) << location;
+  return vixl::aarch64::VRegister::GetHRegFromCode(location.reg());
+}
+
+inline vixl::aarch64::VRegister FPRegisterFrom(Location location, DataType::Type type) {
   DCHECK(DataType::IsFloatingPointType(type)) << type;
   return type == DataType::Type::kFloat64 ? DRegisterFrom(location) : SRegisterFrom(location);
 }
 
-inline vixl::aarch64::FPRegister OutputFPRegister(HInstruction* instr) {
+inline vixl::aarch64::VRegister OutputFPRegister(HInstruction* instr) {
   return FPRegisterFrom(instr->GetLocations()->Out(), instr->GetType());
 }
 
-inline vixl::aarch64::FPRegister InputFPRegisterAt(HInstruction* instr, int input_index) {
+inline vixl::aarch64::VRegister InputFPRegisterAt(HInstruction* instr, int input_index) {
   return FPRegisterFrom(instr->GetLocations()->InAt(input_index),
                         instr->InputAt(input_index)->GetType());
 }
@@ -201,7 +206,7 @@
   return Location::RegisterLocation(ARTRegCodeFromVIXL(reg.GetCode()));
 }
 
-inline Location LocationFrom(const vixl::aarch64::FPRegister& fpreg) {
+inline Location LocationFrom(const vixl::aarch64::VRegister& fpreg) {
   return Location::FpuRegisterLocation(fpreg.GetCode());
 }
 
diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc
index 09e7cab..2031707 100644
--- a/compiler/optimizing/constant_folding.cc
+++ b/compiler/optimizing/constant_folding.cc
@@ -217,6 +217,7 @@
 }
 
 void InstructionWithAbsorbingInputSimplifier::VisitAnd(HAnd* instruction) {
+  DataType::Type type = instruction->GetType();
   HConstant* input_cst = instruction->GetConstantRight();
   if ((input_cst != nullptr) && input_cst->IsZeroBitPattern()) {
     // Replace code looking like
@@ -226,6 +227,25 @@
     instruction->ReplaceWith(input_cst);
     instruction->GetBlock()->RemoveInstruction(instruction);
   }
+
+  HInstruction* left = instruction->GetLeft();
+  HInstruction* right = instruction->GetRight();
+
+  if (left->IsNot() ^ right->IsNot()) {
+    // Replace code looking like
+    //    NOT notsrc, src
+    //    AND dst, notsrc, src
+    // with
+    //    CONSTANT 0
+    HInstruction* hnot = (left->IsNot() ? left : right);
+    HInstruction* hother = (left->IsNot() ? right : left);
+    HInstruction* src = hnot->AsNot()->GetInput();
+
+    if (src == hother) {
+      instruction->ReplaceWith(GetGraph()->GetConstant(type, 0));
+      instruction->GetBlock()->RemoveInstruction(instruction);
+    }
+  }
 }
 
 void InstructionWithAbsorbingInputSimplifier::VisitCompare(HCompare* instruction) {
diff --git a/compiler/optimizing/emit_swap_mips_test.cc b/compiler/optimizing/emit_swap_mips_test.cc
deleted file mode 100644
index 63a370a..0000000
--- a/compiler/optimizing/emit_swap_mips_test.cc
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/arena_allocator.h"
-#include "code_generator_mips.h"
-#include "optimizing_unit_test.h"
-#include "parallel_move_resolver.h"
-#include "utils/assembler_test_base.h"
-#include "utils/mips/assembler_mips.h"
-
-#include "gtest/gtest.h"
-
-namespace art {
-
-class EmitSwapMipsTest : public OptimizingUnitTest {
- public:
-  void SetUp() override {
-    instruction_set_ = InstructionSet::kMips;
-    instruction_set_features_ = MipsInstructionSetFeatures::FromCppDefines();
-    OptimizingUnitTest::SetUp();
-    graph_ = CreateGraph();
-    codegen_.reset(
-        new (graph_->GetAllocator()) mips::CodeGeneratorMIPS(graph_, *compiler_options_));
-    moves_ = new (GetAllocator()) HParallelMove(GetAllocator());
-    test_helper_.reset(
-        new AssemblerTestInfrastructure(GetArchitectureString(),
-                                        GetAssemblerCmdName(),
-                                        GetAssemblerParameters(),
-                                        GetObjdumpCmdName(),
-                                        GetObjdumpParameters(),
-                                        GetDisassembleCmdName(),
-                                        GetDisassembleParameters(),
-                                        GetAssemblyHeader()));
-  }
-
-  void TearDown() override {
-    test_helper_.reset();
-    codegen_.reset();
-    graph_ = nullptr;
-    ResetPoolAndAllocator();
-    OptimizingUnitTest::TearDown();
-  }
-
-  // Get the typically used name for this architecture.
-  std::string GetArchitectureString() {
-    return "mips";
-  }
-
-  // Get the name of the assembler.
-  std::string GetAssemblerCmdName() {
-    return "as";
-  }
-
-  // Switches to the assembler command.
-  std::string GetAssemblerParameters() {
-    return " --no-warn -32 -march=mips32r2";
-  }
-
-  // Get the name of the objdump.
-  std::string GetObjdumpCmdName() {
-    return "objdump";
-  }
-
-  // Switches to the objdump command.
-  std::string GetObjdumpParameters() {
-    return " -h";
-  }
-
-  // Get the name of the objdump.
-  std::string GetDisassembleCmdName() {
-    return "objdump";
-  }
-
-  // Switches to the objdump command.
-  std::string GetDisassembleParameters() {
-    return " -D -bbinary -mmips:isa32r2";
-  }
-
-  // No need for assembly header here.
-  const char* GetAssemblyHeader() {
-    return nullptr;
-  }
-
-  void DriverWrapper(HParallelMove* move,
-                     const std::string& assembly_text,
-                     const std::string& test_name) {
-    codegen_->GetMoveResolver()->EmitNativeCode(move);
-    assembler_ = codegen_->GetAssembler();
-    assembler_->FinalizeCode();
-    std::unique_ptr<std::vector<uint8_t>> data(new std::vector<uint8_t>(assembler_->CodeSize()));
-    MemoryRegion code(&(*data)[0], data->size());
-    assembler_->FinalizeInstructions(code);
-    test_helper_->Driver(*data, assembly_text, test_name);
-  }
-
- protected:
-  HGraph* graph_;
-  HParallelMove* moves_;
-  std::unique_ptr<mips::CodeGeneratorMIPS> codegen_;
-  mips::MipsAssembler* assembler_;
-  std::unique_ptr<AssemblerTestInfrastructure> test_helper_;
-};
-
-TEST_F(EmitSwapMipsTest, TwoRegisters) {
-  moves_->AddMove(
-      Location::RegisterLocation(4),
-      Location::RegisterLocation(5),
-      DataType::Type::kInt32,
-      nullptr);
-  moves_->AddMove(
-      Location::RegisterLocation(5),
-      Location::RegisterLocation(4),
-      DataType::Type::kInt32,
-      nullptr);
-  const char* expected =
-      "or $t8, $a1, $zero\n"
-      "or $a1, $a0, $zero\n"
-      "or $a0, $t8, $zero\n";
-  DriverWrapper(moves_, expected, "TwoRegisters");
-}
-
-TEST_F(EmitSwapMipsTest, TwoRegisterPairs) {
-  moves_->AddMove(
-      Location::RegisterPairLocation(4, 5),
-      Location::RegisterPairLocation(6, 7),
-      DataType::Type::kInt64,
-      nullptr);
-  moves_->AddMove(
-      Location::RegisterPairLocation(6, 7),
-      Location::RegisterPairLocation(4, 5),
-      DataType::Type::kInt64,
-      nullptr);
-  const char* expected =
-      "or $t8, $a2, $zero\n"
-      "or $a2, $a0, $zero\n"
-      "or $a0, $t8, $zero\n"
-      "or $t8, $a3, $zero\n"
-      "or $a3, $a1, $zero\n"
-      "or $a1, $t8, $zero\n";
-  DriverWrapper(moves_, expected, "TwoRegisterPairs");
-}
-
-TEST_F(EmitSwapMipsTest, TwoFpuRegistersFloat) {
-  moves_->AddMove(
-      Location::FpuRegisterLocation(4),
-      Location::FpuRegisterLocation(2),
-      DataType::Type::kFloat32,
-      nullptr);
-  moves_->AddMove(
-      Location::FpuRegisterLocation(2),
-      Location::FpuRegisterLocation(4),
-      DataType::Type::kFloat32,
-      nullptr);
-  const char* expected =
-      "mov.s $f6, $f2\n"
-      "mov.s $f2, $f4\n"
-      "mov.s $f4, $f6\n";
-  DriverWrapper(moves_, expected, "TwoFpuRegistersFloat");
-}
-
-TEST_F(EmitSwapMipsTest, TwoFpuRegistersDouble) {
-  moves_->AddMove(
-      Location::FpuRegisterLocation(4),
-      Location::FpuRegisterLocation(2),
-      DataType::Type::kFloat64,
-      nullptr);
-  moves_->AddMove(
-      Location::FpuRegisterLocation(2),
-      Location::FpuRegisterLocation(4),
-      DataType::Type::kFloat64,
-      nullptr);
-  const char* expected =
-      "mov.d $f6, $f2\n"
-      "mov.d $f2, $f4\n"
-      "mov.d $f4, $f6\n";
-  DriverWrapper(moves_, expected, "TwoFpuRegistersDouble");
-}
-
-TEST_F(EmitSwapMipsTest, RegisterAndFpuRegister) {
-  moves_->AddMove(
-      Location::RegisterLocation(4),
-      Location::FpuRegisterLocation(2),
-      DataType::Type::kFloat32,
-      nullptr);
-  moves_->AddMove(
-      Location::FpuRegisterLocation(2),
-      Location::RegisterLocation(4),
-      DataType::Type::kFloat32,
-      nullptr);
-  const char* expected =
-      "or $t8, $a0, $zero\n"
-      "mfc1 $a0, $f2\n"
-      "mtc1 $t8, $f2\n";
-  DriverWrapper(moves_, expected, "RegisterAndFpuRegister");
-}
-
-TEST_F(EmitSwapMipsTest, RegisterPairAndFpuRegister) {
-  moves_->AddMove(
-      Location::RegisterPairLocation(4, 5),
-      Location::FpuRegisterLocation(4),
-      DataType::Type::kFloat64,
-      nullptr);
-  moves_->AddMove(
-      Location::FpuRegisterLocation(4),
-      Location::RegisterPairLocation(4, 5),
-      DataType::Type::kFloat64,
-      nullptr);
-  const char* expected =
-      "mfc1 $t8, $f4\n"
-      "mfc1 $at, $f5\n"
-      "mtc1 $a0, $f4\n"
-      "mtc1 $a1, $f5\n"
-      "or $a0, $t8, $zero\n"
-      "or $a1, $at, $zero\n";
-  DriverWrapper(moves_, expected, "RegisterPairAndFpuRegister");
-}
-
-TEST_F(EmitSwapMipsTest, TwoStackSlots) {
-  moves_->AddMove(
-      Location::StackSlot(52),
-      Location::StackSlot(48),
-      DataType::Type::kInt32,
-      nullptr);
-  moves_->AddMove(
-      Location::StackSlot(48),
-      Location::StackSlot(52),
-      DataType::Type::kInt32,
-      nullptr);
-  const char* expected =
-      "addiu $sp, $sp, -16\n"
-      "sw $v0, 0($sp)\n"
-      "lw $v0, 68($sp)\n"
-      "lw $t8, 64($sp)\n"
-      "sw $v0, 64($sp)\n"
-      "sw $t8, 68($sp)\n"
-      "lw $v0, 0($sp)\n"
-      "addiu $sp, $sp, 16\n";
-  DriverWrapper(moves_, expected, "TwoStackSlots");
-}
-
-TEST_F(EmitSwapMipsTest, TwoDoubleStackSlots) {
-  moves_->AddMove(
-      Location::DoubleStackSlot(56),
-      Location::DoubleStackSlot(48),
-      DataType::Type::kInt64,
-      nullptr);
-  moves_->AddMove(
-      Location::DoubleStackSlot(48),
-      Location::DoubleStackSlot(56),
-      DataType::Type::kInt64,
-      nullptr);
-  const char* expected =
-      "addiu $sp, $sp, -16\n"
-      "sw $v0, 0($sp)\n"
-      "lw $v0, 72($sp)\n"
-      "lw $t8, 64($sp)\n"
-      "sw $v0, 64($sp)\n"
-      "sw $t8, 72($sp)\n"
-      "lw $v0, 76($sp)\n"
-      "lw $t8, 68($sp)\n"
-      "sw $v0, 68($sp)\n"
-      "sw $t8, 76($sp)\n"
-      "lw $v0, 0($sp)\n"
-      "addiu $sp, $sp, 16\n";
-  DriverWrapper(moves_, expected, "TwoDoubleStackSlots");
-}
-
-TEST_F(EmitSwapMipsTest, RegisterAndStackSlot) {
-  moves_->AddMove(
-      Location::RegisterLocation(4),
-      Location::StackSlot(48),
-      DataType::Type::kInt32,
-      nullptr);
-  moves_->AddMove(
-      Location::StackSlot(48),
-      Location::RegisterLocation(4),
-      DataType::Type::kInt32,
-      nullptr);
-  const char* expected =
-      "or $t8, $a0, $zero\n"
-      "lw $a0, 48($sp)\n"
-      "sw $t8, 48($sp)\n";
-  DriverWrapper(moves_, expected, "RegisterAndStackSlot");
-}
-
-TEST_F(EmitSwapMipsTest, RegisterPairAndDoubleStackSlot) {
-  moves_->AddMove(
-      Location::RegisterPairLocation(4, 5),
-      Location::DoubleStackSlot(32),
-      DataType::Type::kInt64,
-      nullptr);
-  moves_->AddMove(
-      Location::DoubleStackSlot(32),
-      Location::RegisterPairLocation(4, 5),
-      DataType::Type::kInt64,
-      nullptr);
-  const char* expected =
-      "or $t8, $a0, $zero\n"
-      "lw $a0, 32($sp)\n"
-      "sw $t8, 32($sp)\n"
-      "or $t8, $a1, $zero\n"
-      "lw $a1, 36($sp)\n"
-      "sw $t8, 36($sp)\n";
-  DriverWrapper(moves_, expected, "RegisterPairAndDoubleStackSlot");
-}
-
-TEST_F(EmitSwapMipsTest, FpuRegisterAndStackSlot) {
-  moves_->AddMove(
-      Location::FpuRegisterLocation(4),
-      Location::StackSlot(48),
-      DataType::Type::kFloat32,
-      nullptr);
-  moves_->AddMove(
-      Location::StackSlot(48),
-      Location::FpuRegisterLocation(4),
-      DataType::Type::kFloat32,
-      nullptr);
-  const char* expected =
-      "mov.s $f6, $f4\n"
-      "lwc1 $f4, 48($sp)\n"
-      "swc1 $f6, 48($sp)\n";
-  DriverWrapper(moves_, expected, "FpuRegisterAndStackSlot");
-}
-
-TEST_F(EmitSwapMipsTest, FpuRegisterAndDoubleStackSlot) {
-  moves_->AddMove(
-      Location::FpuRegisterLocation(4),
-      Location::DoubleStackSlot(48),
-      DataType::Type::kFloat64,
-      nullptr);
-  moves_->AddMove(
-      Location::DoubleStackSlot(48),
-      Location::FpuRegisterLocation(4),
-      DataType::Type::kFloat64,
-      nullptr);
-  const char* expected =
-      "mov.d $f6, $f4\n"
-      "ldc1 $f4, 48($sp)\n"
-      "sdc1 $f6, 48($sp)\n";
-  DriverWrapper(moves_, expected, "FpuRegisterAndDoubleStackSlot");
-}
-
-}  // namespace art
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index 01d9603..95cfe3e 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -497,7 +497,7 @@
   }
 
   // Ensure that reference type instructions have reference type info.
-  if (instruction->GetType() == DataType::Type::kReference) {
+  if (check_reference_type_info_ && instruction->GetType() == DataType::Type::kReference) {
     if (!instruction->GetReferenceTypeInfo().IsValid()) {
       AddError(StringPrintf("Reference type instruction %s:%d does not have "
                             "valid reference type information.",
diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h
index d085609..564b137 100644
--- a/compiler/optimizing/graph_checker.h
+++ b/compiler/optimizing/graph_checker.h
@@ -95,6 +95,15 @@
     }
   }
 
+  // Enable/Disable the reference type info check.
+  //
+  // Return: the previous status of the check.
+  bool SetRefTypeInfoCheckEnabled(bool value = true) {
+    bool old_value = check_reference_type_info_;
+    check_reference_type_info_ = value;
+    return old_value;
+  }
+
  protected:
   // Report a new error.
   void AddError(const std::string& error) {
@@ -111,6 +120,10 @@
   const char* const dump_prefix_;
   ScopedArenaAllocator allocator_;
   ArenaBitVector seen_ids_;
+  // Whether to perform the reference type info check for instructions which use or produce
+  // object references, e.g. HNewInstance, HLoadClass.
+  // The default value is true.
+  bool check_reference_type_info_ = true;
 
   DISALLOW_COPY_AND_ASSIGN(GraphChecker);
 };
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 2a7bbcb..d94c1fa 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -22,6 +22,7 @@
 #include <sstream>
 
 #include "art_method.h"
+#include "base/intrusive_forward_list.h"
 #include "bounds_check_elimination.h"
 #include "builder.h"
 #include "code_generator.h"
@@ -38,7 +39,6 @@
 #include "scoped_thread_state_change-inl.h"
 #include "ssa_liveness_analysis.h"
 #include "utils/assembler.h"
-#include "utils/intrusive_forward_list.h"
 
 namespace art {
 
@@ -113,16 +113,19 @@
                                const uint8_t* base_address,
                                const uint8_t* end_address)
       : instruction_set_(instruction_set), disassembler_(nullptr) {
-    libart_disassembler_handle_ =
-        dlopen(kIsDebugBuild ? "libartd-disassembler.so" : "libart-disassembler.so", RTLD_NOW);
+    constexpr const char* libart_disassembler_so_name =
+        kIsDebugBuild ? "libartd-disassembler.so" : "libart-disassembler.so";
+    libart_disassembler_handle_ = dlopen(libart_disassembler_so_name, RTLD_NOW);
     if (libart_disassembler_handle_ == nullptr) {
-      LOG(WARNING) << "Failed to dlopen libart-disassembler: " << dlerror();
+      LOG(ERROR) << "Failed to dlopen " << libart_disassembler_so_name << ": " << dlerror();
       return;
     }
+    constexpr const char* create_disassembler_symbol = "create_disassembler";
     create_disasm_prototype* create_disassembler = reinterpret_cast<create_disasm_prototype*>(
-        dlsym(libart_disassembler_handle_, "create_disassembler"));
+        dlsym(libart_disassembler_handle_, create_disassembler_symbol));
     if (create_disassembler == nullptr) {
-      LOG(WARNING) << "Could not find create_disassembler entry: " << dlerror();
+      LOG(ERROR) << "Could not find " << create_disassembler_symbol << " entry in "
+                 << libart_disassembler_so_name << ": " << dlerror();
       return;
     }
     // Reading the disassembly from 0x0 is easier, so we print relative
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 205077f..24d6e65 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -465,6 +465,30 @@
   return throw_seen;
 }
 
+ArtMethod* HInliner::FindActualCallTarget(HInvoke* invoke_instruction, bool* cha_devirtualize) {
+  ArtMethod* resolved_method = invoke_instruction->GetResolvedMethod();
+  DCHECK(resolved_method != nullptr);
+
+  ArtMethod* actual_method = nullptr;
+  if (invoke_instruction->IsInvokeStaticOrDirect()) {
+    actual_method = resolved_method;
+  } else {
+    // Check if we can statically find the method.
+    actual_method = FindVirtualOrInterfaceTarget(invoke_instruction, resolved_method);
+  }
+
+  if (actual_method == nullptr) {
+    ArtMethod* method = TryCHADevirtualization(resolved_method);
+    if (method != nullptr) {
+      *cha_devirtualize = true;
+      actual_method = method;
+      LOG_NOTE() << "Try CHA-based inlining of " << actual_method->PrettyMethod();
+    }
+  }
+
+  return actual_method;
+}
+
 bool HInliner::TryInline(HInvoke* invoke_instruction) {
   if (invoke_instruction->IsInvokeUnresolved() ||
       invoke_instruction->IsInvokePolymorphic() ||
@@ -485,56 +509,42 @@
     LOG_FAIL_NO_STAT() << "Not inlining a String.<init> method";
     return false;
   }
-  ArtMethod* actual_method = nullptr;
-
-  if (invoke_instruction->IsInvokeStaticOrDirect()) {
-    actual_method = resolved_method;
-  } else {
-    // Check if we can statically find the method.
-    actual_method = FindVirtualOrInterfaceTarget(invoke_instruction, resolved_method);
-  }
 
   bool cha_devirtualize = false;
+  ArtMethod* actual_method = FindActualCallTarget(invoke_instruction, &cha_devirtualize);
+
+  // If we didn't find a method, see if we can inline from the inline caches.
   if (actual_method == nullptr) {
-    ArtMethod* method = TryCHADevirtualization(resolved_method);
-    if (method != nullptr) {
-      cha_devirtualize = true;
-      actual_method = method;
-      LOG_NOTE() << "Try CHA-based inlining of " << actual_method->PrettyMethod();
-    }
+    DCHECK(!invoke_instruction->IsInvokeStaticOrDirect());
+
+    return TryInlineFromInlineCache(caller_dex_file, invoke_instruction, resolved_method);
   }
 
-  if (actual_method != nullptr) {
-    // Single target.
-    bool result = TryInlineAndReplace(invoke_instruction,
-                                      actual_method,
-                                      ReferenceTypeInfo::CreateInvalid(),
-                                      /* do_rtp= */ true,
-                                      cha_devirtualize);
-    if (result) {
-      // Successfully inlined.
-      if (!invoke_instruction->IsInvokeStaticOrDirect()) {
-        if (cha_devirtualize) {
-          // Add dependency due to devirtualization. We've assumed resolved_method
-          // has single implementation.
-          outermost_graph_->AddCHASingleImplementationDependency(resolved_method);
-          MaybeRecordStat(stats_, MethodCompilationStat::kCHAInline);
-        } else {
-          MaybeRecordStat(stats_, MethodCompilationStat::kInlinedInvokeVirtualOrInterface);
-        }
+  // Single target.
+  bool result = TryInlineAndReplace(invoke_instruction,
+                                    actual_method,
+                                    ReferenceTypeInfo::CreateInvalid(),
+                                    /* do_rtp= */ true,
+                                    cha_devirtualize);
+  if (result) {
+    // Successfully inlined.
+    if (!invoke_instruction->IsInvokeStaticOrDirect()) {
+      if (cha_devirtualize) {
+        // Add dependency due to devirtualization. We've assumed resolved_method
+        // has single implementation.
+        outermost_graph_->AddCHASingleImplementationDependency(resolved_method);
+        MaybeRecordStat(stats_, MethodCompilationStat::kCHAInline);
+      } else {
+        MaybeRecordStat(stats_, MethodCompilationStat::kInlinedInvokeVirtualOrInterface);
       }
-    } else if (!cha_devirtualize && AlwaysThrows(codegen_->GetCompilerOptions(), actual_method)) {
-      // Set always throws property for non-inlined method call with single target
-      // (unless it was obtained through CHA, because that would imply we have
-      // to add the CHA dependency, which seems not worth it).
-      invoke_instruction->SetAlwaysThrows(true);
     }
-    return result;
+  } else if (!cha_devirtualize && AlwaysThrows(codegen_->GetCompilerOptions(), actual_method)) {
+    // Set always throws property for non-inlined method call with single target
+    // (unless it was obtained through CHA, because that would imply we have
+    // to add the CHA dependency, which seems not worth it).
+    invoke_instruction->SetAlwaysThrows(true);
   }
-  DCHECK(!invoke_instruction->IsInvokeStaticOrDirect());
-
-  // Try using inline caches.
-  return TryInlineFromInlineCache(caller_dex_file, invoke_instruction, resolved_method);
+  return result;
 }
 
 static Handle<mirror::ObjectArray<mirror::Class>> AllocateInlineCacheHolder(
@@ -604,9 +614,8 @@
   switch (inline_cache_type) {
     case kInlineCacheNoData: {
       LOG_FAIL_NO_STAT()
-          << "Interface or virtual call to "
-          << caller_dex_file.PrettyMethod(invoke_instruction->GetDexMethodIndex())
-          << " could not be statically determined";
+          << "No inline cache information for call to "
+          << caller_dex_file.PrettyMethod(invoke_instruction->GetDexMethodIndex());
       return false;
     }
 
@@ -693,9 +702,8 @@
   }
 
   std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> offline_profile =
-      pci->GetMethod(caller_dex_file.GetLocation(),
-                     caller_dex_file.GetLocationChecksum(),
-                     caller_compilation_unit_.GetDexMethodIndex());
+      pci->GetHotMethodInfo(MethodReference(
+          &caller_dex_file, caller_compilation_unit_.GetDexMethodIndex()));
   if (offline_profile == nullptr) {
     return kInlineCacheNoData;  // no profile information for this invocation.
   }
@@ -748,8 +756,7 @@
       }
     }
     if (!found) {
-      VLOG(compiler) << "Could not find profiled dex file: "
-          << offline_profile.dex_references[i].dex_location;
+      VLOG(compiler) << "Could not find profiled dex file: " << offline_profile.dex_references[i];
       return kInlineCacheMissingTypes;
     }
   }
@@ -1435,71 +1442,14 @@
   return true;
 }
 
-bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
-                                 ArtMethod* method,
-                                 ReferenceTypeInfo receiver_type,
-                                 HInstruction** return_replacement) {
-  if (method->IsProxyMethod()) {
-    LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedProxy)
-        << "Method " << method->PrettyMethod()
-        << " is not inlined because of unimplemented inline support for proxy methods.";
-    return false;
-  }
-
-  if (CountRecursiveCallsOf(method) > kMaximumNumberOfRecursiveCalls) {
-    LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedRecursiveBudget)
-        << "Method "
-        << method->PrettyMethod()
-        << " is not inlined because it has reached its recursive call budget.";
-    return false;
-  }
-
-  // Check whether we're allowed to inline. The outermost compilation unit is the relevant
-  // dex file here (though the transitivity of an inline chain would allow checking the calller).
-  if (!MayInline(codegen_->GetCompilerOptions(),
-                 *method->GetDexFile(),
-                 *outer_compilation_unit_.GetDexFile())) {
-    if (TryPatternSubstitution(invoke_instruction, method, return_replacement)) {
-      LOG_SUCCESS() << "Successfully replaced pattern of invoke "
-                    << method->PrettyMethod();
-      MaybeRecordStat(stats_, MethodCompilationStat::kReplacedInvokeWithSimplePattern);
-      return true;
-    }
-    LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedWont)
-        << "Won't inline " << method->PrettyMethod() << " in "
-        << outer_compilation_unit_.GetDexFile()->GetLocation() << " ("
-        << caller_compilation_unit_.GetDexFile()->GetLocation() << ") from "
-        << method->GetDexFile()->GetLocation();
-    return false;
-  }
-
-  bool same_dex_file = IsSameDexFile(*outer_compilation_unit_.GetDexFile(), *method->GetDexFile());
-
-  CodeItemDataAccessor accessor(method->DexInstructionData());
-
+// Returns whether inlining is allowed based on ART semantics.
+bool HInliner::IsInliningAllowed(ArtMethod* method, const CodeItemDataAccessor& accessor) const {
   if (!accessor.HasCodeItem()) {
     LOG_FAIL_NO_STAT()
         << "Method " << method->PrettyMethod() << " is not inlined because it is native";
     return false;
   }
 
-  size_t inline_max_code_units = codegen_->GetCompilerOptions().GetInlineMaxCodeUnits();
-  if (accessor.InsnsSizeInCodeUnits() > inline_max_code_units) {
-    LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedCodeItem)
-        << "Method " << method->PrettyMethod()
-        << " is not inlined because its code item is too big: "
-        << accessor.InsnsSizeInCodeUnits()
-        << " > "
-        << inline_max_code_units;
-    return false;
-  }
-
-  if (accessor.TriesSize() != 0) {
-    LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedTryCatch)
-        << "Method " << method->PrettyMethod() << " is not inlined because of try block";
-    return false;
-  }
-
   if (!method->IsCompilable()) {
     LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedNotVerified)
         << "Method " << method->PrettyMethod()
@@ -1514,6 +1464,30 @@
     return false;
   }
 
+  return true;
+}
+
+// Returns whether ART supports inlining this method.
+//
+// Some methods are not supported because they have features for which inlining
+// is not implemented. For example, we do not currently support inlining throw
+// instructions into a try block.
+bool HInliner::IsInliningSupported(const HInvoke* invoke_instruction,
+                                   ArtMethod* method,
+                                   const CodeItemDataAccessor& accessor) const {
+  if (method->IsProxyMethod()) {
+    LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedProxy)
+        << "Method " << method->PrettyMethod()
+        << " is not inlined because of unimplemented inline support for proxy methods.";
+    return false;
+  }
+
+  if (accessor.TriesSize() != 0) {
+    LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedTryCatch)
+        << "Method " << method->PrettyMethod() << " is not inlined because of try block";
+    return false;
+  }
+
   if (invoke_instruction->IsInvokeStaticOrDirect() &&
       invoke_instruction->AsInvokeStaticOrDirect()->IsStaticWithImplicitClinitCheck()) {
     // Case of a static method that cannot be inlined because it implicitly
@@ -1525,8 +1499,73 @@
     return false;
   }
 
+  return true;
+}
+
+// Returns whether our resource limits allow inlining this method.
+bool HInliner::IsInliningBudgetAvailable(ArtMethod* method,
+                                         const CodeItemDataAccessor& accessor) const {
+  if (CountRecursiveCallsOf(method) > kMaximumNumberOfRecursiveCalls) {
+    LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedRecursiveBudget)
+        << "Method "
+        << method->PrettyMethod()
+        << " is not inlined because it has reached its recursive call budget.";
+    return false;
+  }
+
+  size_t inline_max_code_units = codegen_->GetCompilerOptions().GetInlineMaxCodeUnits();
+  if (accessor.InsnsSizeInCodeUnits() > inline_max_code_units) {
+    LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedCodeItem)
+        << "Method " << method->PrettyMethod()
+        << " is not inlined because its code item is too big: "
+        << accessor.InsnsSizeInCodeUnits()
+        << " > "
+        << inline_max_code_units;
+    return false;
+  }
+
+  return true;
+}
+
+bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
+                                 ArtMethod* method,
+                                 ReferenceTypeInfo receiver_type,
+                                 HInstruction** return_replacement) {
+  // Check whether we're allowed to inline. The outermost compilation unit is the relevant
+  // dex file here (though the transitivity of an inline chain would allow checking the caller).
+  if (!MayInline(codegen_->GetCompilerOptions(),
+                 *method->GetDexFile(),
+                 *outer_compilation_unit_.GetDexFile())) {
+    if (TryPatternSubstitution(invoke_instruction, method, return_replacement)) {
+      LOG_SUCCESS() << "Successfully replaced pattern of invoke "
+                    << method->PrettyMethod();
+      MaybeRecordStat(stats_, MethodCompilationStat::kReplacedInvokeWithSimplePattern);
+      return true;
+    }
+    LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedWont)
+        << "Won't inline " << method->PrettyMethod() << " in "
+        << outer_compilation_unit_.GetDexFile()->GetLocation() << " ("
+        << caller_compilation_unit_.GetDexFile()->GetLocation() << ") from "
+        << method->GetDexFile()->GetLocation();
+    return false;
+  }
+
+  CodeItemDataAccessor accessor(method->DexInstructionData());
+
+  if (!IsInliningAllowed(method, accessor)) {
+    return false;
+  }
+
+  if (!IsInliningSupported(invoke_instruction, method, accessor)) {
+    return false;
+  }
+
+  if (!IsInliningBudgetAvailable(method, accessor)) {
+    return false;
+  }
+
   if (!TryBuildAndInlineHelper(
-          invoke_instruction, method, receiver_type, same_dex_file, return_replacement)) {
+          invoke_instruction, method, receiver_type, return_replacement)) {
     return false;
   }
 
@@ -1756,105 +1795,12 @@
   return false;
 }
 
-bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
-                                       ArtMethod* resolved_method,
-                                       ReferenceTypeInfo receiver_type,
-                                       bool same_dex_file,
-                                       HInstruction** return_replacement) {
-  DCHECK(!(resolved_method->IsStatic() && receiver_type.IsValid()));
-  ScopedObjectAccess soa(Thread::Current());
-  const dex::CodeItem* code_item = resolved_method->GetCodeItem();
-  const DexFile& callee_dex_file = *resolved_method->GetDexFile();
-  uint32_t method_index = resolved_method->GetDexMethodIndex();
-  CodeItemDebugInfoAccessor code_item_accessor(resolved_method->DexInstructionDebugInfo());
-  ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
-  Handle<mirror::DexCache> dex_cache = NewHandleIfDifferent(resolved_method->GetDexCache(),
-                                                            caller_compilation_unit_.GetDexCache(),
-                                                            handles_);
-  Handle<mirror::ClassLoader> class_loader =
-      NewHandleIfDifferent(resolved_method->GetDeclaringClass()->GetClassLoader(),
-                           caller_compilation_unit_.GetClassLoader(),
-                           handles_);
-
-  Handle<mirror::Class> compiling_class = handles_->NewHandle(resolved_method->GetDeclaringClass());
-  DexCompilationUnit dex_compilation_unit(
-      class_loader,
-      class_linker,
-      callee_dex_file,
-      code_item,
-      resolved_method->GetDeclaringClass()->GetDexClassDefIndex(),
-      method_index,
-      resolved_method->GetAccessFlags(),
-      /* verified_method= */ nullptr,
-      dex_cache,
-      compiling_class);
-
-  InvokeType invoke_type = invoke_instruction->GetInvokeType();
-  if (invoke_type == kInterface) {
-    // We have statically resolved the dispatch. To please the class linker
-    // at runtime, we change this call as if it was a virtual call.
-    invoke_type = kVirtual;
-  }
-
-  bool caller_dead_reference_safe = graph_->IsDeadReferenceSafe();
-  const dex::ClassDef& callee_class = resolved_method->GetClassDef();
-  // MethodContainsRSensitiveAccess is currently slow, but HasDeadReferenceSafeAnnotation()
-  // is currently rarely true.
-  bool callee_dead_reference_safe =
-      annotations::HasDeadReferenceSafeAnnotation(callee_dex_file, callee_class)
-      && !annotations::MethodContainsRSensitiveAccess(callee_dex_file, callee_class, method_index);
-
-  const int32_t caller_instruction_counter = graph_->GetCurrentInstructionId();
-  HGraph* callee_graph = new (graph_->GetAllocator()) HGraph(
-      graph_->GetAllocator(),
-      graph_->GetArenaStack(),
-      callee_dex_file,
-      method_index,
-      codegen_->GetCompilerOptions().GetInstructionSet(),
-      invoke_type,
-      callee_dead_reference_safe,
-      graph_->IsDebuggable(),
-      /* osr= */ false,
-      caller_instruction_counter);
-  callee_graph->SetArtMethod(resolved_method);
-
-  // When they are needed, allocate `inline_stats_` on the Arena instead
-  // of on the stack, as Clang might produce a stack frame too large
-  // for this function, that would not fit the requirements of the
-  // `-Wframe-larger-than` option.
-  if (stats_ != nullptr) {
-    // Reuse one object for all inline attempts from this caller to keep Arena memory usage low.
-    if (inline_stats_ == nullptr) {
-      void* storage = graph_->GetAllocator()->Alloc<OptimizingCompilerStats>(kArenaAllocMisc);
-      inline_stats_ = new (storage) OptimizingCompilerStats;
-    } else {
-      inline_stats_->Reset();
-    }
-  }
-  HGraphBuilder builder(callee_graph,
-                        code_item_accessor,
-                        &dex_compilation_unit,
-                        &outer_compilation_unit_,
-                        codegen_,
-                        inline_stats_,
-                        resolved_method->GetQuickenedInfo(),
-                        handles_);
-
-  if (builder.BuildGraph() != kAnalysisSuccess) {
-    LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedCannotBuild)
-        << "Method " << callee_dex_file.PrettyMethod(method_index)
-        << " could not be built, so cannot be inlined";
-    return false;
-  }
-
-  if (!RegisterAllocator::CanAllocateRegistersFor(
-          *callee_graph, codegen_->GetCompilerOptions().GetInstructionSet())) {
-    LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedRegisterAllocator)
-        << "Method " << callee_dex_file.PrettyMethod(method_index)
-        << " cannot be inlined because of the register allocator";
-    return false;
-  }
-
+  // Substitutes parameters in the callee graph with their values from the caller.
+void HInliner::SubstituteArguments(HGraph* callee_graph,
+                                   HInvoke* invoke_instruction,
+                                   ReferenceTypeInfo receiver_type,
+                                   const DexCompilationUnit& dex_compilation_unit) {
+  ArtMethod* const resolved_method = callee_graph->GetArtMethod();
   size_t parameter_index = 0;
   bool run_rtp = false;
   for (HInstructionIterator instructions(callee_graph->GetEntryBlock()->GetInstructions());
@@ -1897,8 +1843,23 @@
                              handles_,
                              /* is_first_run= */ false).Run();
   }
+}
 
-  RunOptimizations(callee_graph, code_item, dex_compilation_unit);
+// Returns whether we can inline the callee_graph into the target_block.
+//
+// This performs a combination of semantics checks, compiler support checks, and
+// resource limit checks.
+//
+// If this function returns true, it will also set out_number_of_instructions to
+// the number of instructions in the inlined body.
+bool HInliner::CanInlineBody(const HGraph* callee_graph,
+                             const HBasicBlock* target_block,
+                             size_t* out_number_of_instructions) const {
+  const DexFile& callee_dex_file = callee_graph->GetDexFile();
+  ArtMethod* const resolved_method = callee_graph->GetArtMethod();
+  const uint32_t method_index = resolved_method->GetMethodIndex();
+  const bool same_dex_file =
+      IsSameDexFile(*outer_compilation_unit_.GetDexFile(), *resolved_method->GetDexFile());
 
   HBasicBlock* exit_block = callee_graph->GetExitBlock();
   if (exit_block == nullptr) {
@@ -1911,7 +1872,7 @@
   bool has_one_return = false;
   for (HBasicBlock* predecessor : exit_block->GetPredecessors()) {
     if (predecessor->GetLastInstruction()->IsThrow()) {
-      if (invoke_instruction->GetBlock()->IsTryBlock()) {
+      if (target_block->IsTryBlock()) {
         // TODO(ngeoffray): Support adding HTryBoundary in Hgraph::InlineInto.
         LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedTryCatch)
             << "Method " << callee_dex_file.PrettyMethod(method_index)
@@ -2020,6 +1981,111 @@
       }
     }
   }
+
+  *out_number_of_instructions = number_of_instructions;
+  return true;
+}
+
+bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
+                                       ArtMethod* resolved_method,
+                                       ReferenceTypeInfo receiver_type,
+                                       HInstruction** return_replacement) {
+  DCHECK(!(resolved_method->IsStatic() && receiver_type.IsValid()));
+  const dex::CodeItem* code_item = resolved_method->GetCodeItem();
+  const DexFile& callee_dex_file = *resolved_method->GetDexFile();
+  uint32_t method_index = resolved_method->GetDexMethodIndex();
+  CodeItemDebugInfoAccessor code_item_accessor(resolved_method->DexInstructionDebugInfo());
+  ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker();
+  Handle<mirror::DexCache> dex_cache = NewHandleIfDifferent(resolved_method->GetDexCache(),
+                                                            caller_compilation_unit_.GetDexCache(),
+                                                            handles_);
+  Handle<mirror::ClassLoader> class_loader =
+      NewHandleIfDifferent(resolved_method->GetDeclaringClass()->GetClassLoader(),
+                           caller_compilation_unit_.GetClassLoader(),
+                           handles_);
+
+  Handle<mirror::Class> compiling_class = handles_->NewHandle(resolved_method->GetDeclaringClass());
+  DexCompilationUnit dex_compilation_unit(
+      class_loader,
+      class_linker,
+      callee_dex_file,
+      code_item,
+      resolved_method->GetDeclaringClass()->GetDexClassDefIndex(),
+      method_index,
+      resolved_method->GetAccessFlags(),
+      /* verified_method= */ nullptr,
+      dex_cache,
+      compiling_class);
+
+  InvokeType invoke_type = invoke_instruction->GetInvokeType();
+  if (invoke_type == kInterface) {
+    // We have statically resolved the dispatch. To please the class linker
+    // at runtime, we change this call as if it was a virtual call.
+    invoke_type = kVirtual;
+  }
+
+  bool caller_dead_reference_safe = graph_->IsDeadReferenceSafe();
+  const dex::ClassDef& callee_class = resolved_method->GetClassDef();
+  // MethodContainsRSensitiveAccess is currently slow, but HasDeadReferenceSafeAnnotation()
+  // is currently rarely true.
+  bool callee_dead_reference_safe =
+      annotations::HasDeadReferenceSafeAnnotation(callee_dex_file, callee_class)
+      && !annotations::MethodContainsRSensitiveAccess(callee_dex_file, callee_class, method_index);
+
+  const int32_t caller_instruction_counter = graph_->GetCurrentInstructionId();
+  HGraph* callee_graph = new (graph_->GetAllocator()) HGraph(
+      graph_->GetAllocator(),
+      graph_->GetArenaStack(),
+      callee_dex_file,
+      method_index,
+      codegen_->GetCompilerOptions().GetInstructionSet(),
+      invoke_type,
+      callee_dead_reference_safe,
+      graph_->IsDebuggable(),
+      /* osr= */ false,
+      /* is_shared_jit_code= */ graph_->IsCompilingForSharedJitCode(),
+      /* baseline= */ graph_->IsCompilingBaseline(),
+      /* start_instruction_id= */ caller_instruction_counter);
+  callee_graph->SetArtMethod(resolved_method);
+
+  // When they are needed, allocate `inline_stats_` on the Arena instead
+  // of on the stack, as Clang might produce a stack frame too large
+  // for this function, that would not fit the requirements of the
+  // `-Wframe-larger-than` option.
+  if (stats_ != nullptr) {
+    // Reuse one object for all inline attempts from this caller to keep Arena memory usage low.
+    if (inline_stats_ == nullptr) {
+      void* storage = graph_->GetAllocator()->Alloc<OptimizingCompilerStats>(kArenaAllocMisc);
+      inline_stats_ = new (storage) OptimizingCompilerStats;
+    } else {
+      inline_stats_->Reset();
+    }
+  }
+  HGraphBuilder builder(callee_graph,
+                        code_item_accessor,
+                        &dex_compilation_unit,
+                        &outer_compilation_unit_,
+                        codegen_,
+                        inline_stats_,
+                        resolved_method->GetQuickenedInfo(),
+                        handles_);
+
+  if (builder.BuildGraph() != kAnalysisSuccess) {
+    LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedCannotBuild)
+        << "Method " << callee_dex_file.PrettyMethod(method_index)
+        << " could not be built, so cannot be inlined";
+    return false;
+  }
+
+  SubstituteArguments(callee_graph, invoke_instruction, receiver_type, dex_compilation_unit);
+
+  RunOptimizations(callee_graph, code_item, dex_compilation_unit);
+
+  size_t number_of_instructions = 0;
+  if (!CanInlineBody(callee_graph, invoke_instruction->GetBlock(), &number_of_instructions)) {
+    return false;
+  }
+
   DCHECK_EQ(caller_instruction_counter, graph_->GetCurrentInstructionId())
       << "No instructions can be added to the outer graph while inner graph is being built";
 
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 15d7349..882ba4e 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -73,6 +73,15 @@
 
   bool TryInline(HInvoke* invoke_instruction);
 
+  // Attempt to resolve the target of the invoke instruction to an acutal call
+  // target.
+  //
+  // Returns the target directly in the case of static or direct invokes.
+  // Otherwise, uses CHA devirtualization or other methods to try to find the
+  // call target.
+  ArtMethod* FindActualCallTarget(HInvoke* invoke_instruction, bool* cha_devirtualize)
+    REQUIRES_SHARED(Locks::mutator_lock_);
+
   // Try to inline `resolved_method` in place of `invoke_instruction`. `do_rtp` is whether
   // reference type propagation can run after the inlining. If the inlining is successful, this
   // method will replace and remove the `invoke_instruction`. If `cha_devirtualize` is true,
@@ -93,8 +102,15 @@
   bool TryBuildAndInlineHelper(HInvoke* invoke_instruction,
                                ArtMethod* resolved_method,
                                ReferenceTypeInfo receiver_type,
-                               bool same_dex_file,
-                               HInstruction** return_replacement);
+                               HInstruction** return_replacement)
+    REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Substitutes parameters in the callee graph with their values from the caller.
+  void SubstituteArguments(HGraph* callee_graph,
+                           HInvoke* invoke_instruction,
+                           ReferenceTypeInfo receiver_type,
+                           const DexCompilationUnit& dex_compilation_unit)
+    REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Run simple optimizations on `callee_graph`.
   void RunOptimizations(HGraph* callee_graph,
@@ -108,6 +124,38 @@
                               HInstruction** return_replacement)
     REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Returns whether inlining is allowed based on ART semantics.
+  bool IsInliningAllowed(art::ArtMethod* method, const CodeItemDataAccessor& accessor) const
+    REQUIRES_SHARED(Locks::mutator_lock_);
+
+
+  // Returns whether ART supports inlining this method.
+  //
+  // Some methods are not supported because they have features for which inlining
+  // is not implemented. For example, we do not currently support inlining throw
+  // instructions into a try block.
+  bool IsInliningSupported(const HInvoke* invoke_instruction,
+                           art::ArtMethod* method,
+                           const CodeItemDataAccessor& accessor) const
+    REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Returns whether the inlining budget allows inlining method.
+  //
+  // For example, this checks whether the function has grown too large and
+  // inlining should be prevented.
+  bool IsInliningBudgetAvailable(art::ArtMethod* method, const CodeItemDataAccessor& accessor) const
+    REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Inspects the body of a method (callee_graph) and returns whether it can be
+  // inlined.
+  //
+  // This checks for instructions and constructs that we do not support
+  // inlining, such as inlining a throw instruction into a try block.
+  bool CanInlineBody(const HGraph* callee_graph,
+                     const HBasicBlock* target_block,
+                     size_t* out_number_of_instructions) const
+    REQUIRES_SHARED(Locks::mutator_lock_);
+
   // Create a new HInstanceFieldGet.
   HInstanceFieldGet* CreateInstanceFieldGet(uint32_t field_index,
                                             ArtMethod* referrer,
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 5e7b575..1e7b48e 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -29,6 +29,7 @@
 #include "driver/dex_compilation_unit.h"
 #include "driver/compiler_options.h"
 #include "imtable-inl.h"
+#include "jit/jit.h"
 #include "mirror/dex_cache.h"
 #include "oat_file.h"
 #include "optimizing_compiler_stats.h"
@@ -1001,14 +1002,27 @@
                                              resolved_method->GetMethodIndex());
   } else {
     DCHECK_EQ(invoke_type, kInterface);
-    ScopedObjectAccess soa(Thread::Current());  // Needed for the IMT index.
-    invoke = new (allocator_) HInvokeInterface(allocator_,
+    ScopedObjectAccess soa(Thread::Current());  // Needed for the IMT index and class check below.
+    if (resolved_method->GetDeclaringClass()->IsObjectClass()) {
+      // If the resolved method is from j.l.Object, emit a virtual call instead.
+      // The IMT conflict stub only handles interface methods.
+      invoke = new (allocator_) HInvokeVirtual(allocator_,
                                                number_of_arguments,
                                                return_type,
                                                dex_pc,
                                                method_idx,
                                                resolved_method,
-                                               ImTable::GetImtIndex(resolved_method));
+                                               resolved_method->GetMethodIndex());
+    } else {
+      DCHECK(resolved_method->GetDeclaringClass()->IsInterface());
+      invoke = new (allocator_) HInvokeInterface(allocator_,
+                                                 number_of_arguments,
+                                                 return_type,
+                                                 dex_pc,
+                                                 method_idx,
+                                                 resolved_method,
+                                                 ImTable::GetImtIndex(resolved_method));
+    }
   }
   return HandleInvoke(invoke, operands, shorty, /* is_unresolved= */ false, clinit_check);
 }
@@ -1139,12 +1153,15 @@
 
 static bool IsInBootImage(ObjPtr<mirror::Class> cls, const CompilerOptions& compiler_options)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  if (compiler_options.IsBootImage()) {
+  if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(cls)) {
+    return true;
+  }
+  if (compiler_options.IsBootImage() || compiler_options.IsBootImageExtension()) {
     std::string temp;
     const char* descriptor = cls->GetDescriptor(&temp);
     return compiler_options.IsImageClass(descriptor);
   } else {
-    return Runtime::Current()->GetHeap()->FindSpaceFromObject(cls, false)->IsImageSpace();
+    return false;
   }
 }
 
@@ -1290,15 +1307,20 @@
   // Check if the class will be initialized at runtime.
   if (cls->IsInitialized()) {
     Runtime* runtime = Runtime::Current();
-    if (!runtime->IsAotCompiler()) {
+    if (runtime->IsAotCompiler()) {
+      // Assume loaded only if klass is in the boot image. App classes cannot be assumed
+      // loaded because we don't even know what class loader will be used to load them.
+      if (IsInBootImage(cls.Get(), code_generator_->GetCompilerOptions())) {
+        return true;
+      }
+    } else {
       DCHECK(runtime->UseJitCompilation());
-      // For JIT, the class cannot revert to an uninitialized state.
-      return true;
-    }
-    // Assume loaded only if klass is in the boot image. App classes cannot be assumed
-    // loaded because we don't even know what class loader will be used to load them.
-    if (IsInBootImage(cls.Get(), code_generator_->GetCompilerOptions())) {
-      return true;
+      if (Runtime::Current()->GetJit()->CanAssumeInitialized(
+              cls.Get(),
+              graph_->IsCompilingForSharedJitCode())) {
+        // For JIT, the class cannot revert to an uninitialized state.
+        return true;
+      }
     }
   }
 
@@ -3090,6 +3112,7 @@
           LoadLocal(instruction.VRegA_11x(), DataType::Type::kReference),
           HMonitorOperation::OperationKind::kEnter,
           dex_pc));
+      graph_->SetHasMonitorOperations(true);
       break;
     }
 
@@ -3098,6 +3121,7 @@
           LoadLocal(instruction.VRegA_11x(), DataType::Type::kReference),
           HMonitorOperation::OperationKind::kExit,
           dex_pc));
+      graph_->SetHasMonitorOperations(true);
       break;
     }
 
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index ce62495..84297ec 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -25,6 +25,7 @@
 #include "mirror/class-inl.h"
 #include "scoped_thread_state_change-inl.h"
 #include "sharpening.h"
+#include "string_builder_append.h"
 
 namespace art {
 
@@ -275,17 +276,6 @@
         return false;
       }
       break;
-    case InstructionSet::kMips:
-    case InstructionSet::kMips64:
-      if (!(type == DataType::Type::kUint8 ||
-            type == DataType::Type::kInt8 ||
-            type == DataType::Type::kUint16 ||
-            type == DataType::Type::kInt16 ||
-            type == DataType::Type::kInt32 ||
-            type == DataType::Type::kInt64)) {
-        return false;
-      }
-      break;
     default:
       return false;
   }
@@ -2467,6 +2457,192 @@
   return false;
 }
 
+static bool TryReplaceStringBuilderAppend(HInvoke* invoke) {
+  DCHECK_EQ(invoke->GetIntrinsic(), Intrinsics::kStringBuilderToString);
+  if (invoke->CanThrowIntoCatchBlock()) {
+    return false;
+  }
+
+  HBasicBlock* block = invoke->GetBlock();
+  HInstruction* sb = invoke->InputAt(0);
+
+  // We support only a new StringBuilder, otherwise we cannot ensure that
+  // the StringBuilder data does not need to be populated for other users.
+  if (!sb->IsNewInstance()) {
+    return false;
+  }
+
+  // For now, we support only single-block recognition.
+  // (Ternary operators feeding the append could be implemented.)
+  for (const HUseListNode<HInstruction*>& use : sb->GetUses()) {
+    if (use.GetUser()->GetBlock() != block) {
+      return false;
+    }
+    // The append pattern uses the StringBuilder only as the first argument.
+    if (use.GetIndex() != 0u) {
+      return false;
+    }
+  }
+
+  // Collect args and check for unexpected uses.
+  // We expect one call to a constructor with no arguments, one constructor fence (unless
+  // eliminated), some number of append calls and one call to StringBuilder.toString().
+  bool seen_constructor = false;
+  bool seen_constructor_fence = false;
+  bool seen_to_string = false;
+  uint32_t format = 0u;
+  uint32_t num_args = 0u;
+  HInstruction* args[StringBuilderAppend::kMaxArgs];  // Added in reverse order.
+  for (HBackwardInstructionIterator iter(block->GetInstructions()); !iter.Done(); iter.Advance()) {
+    HInstruction* user = iter.Current();
+    // Instructions of interest apply to `sb`, skip those that do not involve `sb`.
+    if (user->InputCount() == 0u || user->InputAt(0u) != sb) {
+      continue;
+    }
+    // We visit the uses in reverse order, so the StringBuilder.toString() must come first.
+    if (!seen_to_string) {
+      if (user == invoke) {
+        seen_to_string = true;
+        continue;
+      } else {
+        return false;
+      }
+    }
+    // Then we should see the arguments.
+    if (user->IsInvokeVirtual()) {
+      HInvokeVirtual* as_invoke_virtual = user->AsInvokeVirtual();
+      DCHECK(!seen_constructor);
+      DCHECK(!seen_constructor_fence);
+      StringBuilderAppend::Argument arg;
+      switch (as_invoke_virtual->GetIntrinsic()) {
+        case Intrinsics::kStringBuilderAppendObject:
+          // TODO: Unimplemented, needs to call String.valueOf().
+          return false;
+        case Intrinsics::kStringBuilderAppendString:
+          arg = StringBuilderAppend::Argument::kString;
+          break;
+        case Intrinsics::kStringBuilderAppendCharArray:
+          // TODO: Unimplemented, StringBuilder.append(char[]) can throw NPE and we would
+          // not have the correct stack trace for it.
+          return false;
+        case Intrinsics::kStringBuilderAppendBoolean:
+          arg = StringBuilderAppend::Argument::kBoolean;
+          break;
+        case Intrinsics::kStringBuilderAppendChar:
+          arg = StringBuilderAppend::Argument::kChar;
+          break;
+        case Intrinsics::kStringBuilderAppendInt:
+          arg = StringBuilderAppend::Argument::kInt;
+          break;
+        case Intrinsics::kStringBuilderAppendLong:
+          arg = StringBuilderAppend::Argument::kLong;
+          break;
+        case Intrinsics::kStringBuilderAppendCharSequence: {
+          ReferenceTypeInfo rti = user->AsInvokeVirtual()->InputAt(1)->GetReferenceTypeInfo();
+          if (!rti.IsValid()) {
+            return false;
+          }
+          ScopedObjectAccess soa(Thread::Current());
+          Handle<mirror::Class> input_type = rti.GetTypeHandle();
+          DCHECK(input_type != nullptr);
+          if (input_type.Get() == GetClassRoot<mirror::String>()) {
+            arg = StringBuilderAppend::Argument::kString;
+          } else {
+            // TODO: Check and implement for StringBuilder. We could find the StringBuilder's
+            // internal char[] inconsistent with the length, or the string compression
+            // of the result could be compromised with a concurrent modification, and
+            // we would need to throw appropriate exceptions.
+            return false;
+          }
+          break;
+        }
+        case Intrinsics::kStringBuilderAppendFloat:
+        case Intrinsics::kStringBuilderAppendDouble:
+          // TODO: Unimplemented, needs to call FloatingDecimal.getBinaryToASCIIConverter().
+          return false;
+        default: {
+          return false;
+        }
+      }
+      // Uses of the append return value should have been replaced with the first input.
+      DCHECK(!as_invoke_virtual->HasUses());
+      DCHECK(!as_invoke_virtual->HasEnvironmentUses());
+      if (num_args == StringBuilderAppend::kMaxArgs) {
+        return false;
+      }
+      format = (format << StringBuilderAppend::kBitsPerArg) | static_cast<uint32_t>(arg);
+      args[num_args] = as_invoke_virtual->InputAt(1u);
+      ++num_args;
+    } else if (user->IsInvokeStaticOrDirect() &&
+               user->AsInvokeStaticOrDirect()->GetResolvedMethod() != nullptr &&
+               user->AsInvokeStaticOrDirect()->GetResolvedMethod()->IsConstructor() &&
+               user->AsInvokeStaticOrDirect()->GetNumberOfArguments() == 1u) {
+      // After arguments, we should see the constructor.
+      // We accept only the constructor with no extra arguments.
+      DCHECK(!seen_constructor);
+      DCHECK(!seen_constructor_fence);
+      seen_constructor = true;
+    } else if (user->IsConstructorFence()) {
+      // The last use we see is the constructor fence.
+      DCHECK(seen_constructor);
+      DCHECK(!seen_constructor_fence);
+      seen_constructor_fence = true;
+    } else {
+      return false;
+    }
+  }
+
+  if (num_args == 0u) {
+    return false;
+  }
+
+  // Check environment uses.
+  for (const HUseListNode<HEnvironment*>& use : sb->GetEnvUses()) {
+    HInstruction* holder = use.GetUser()->GetHolder();
+    if (holder->GetBlock() != block) {
+      return false;
+    }
+    // Accept only calls on the StringBuilder (which shall all be removed).
+    // TODO: Carve-out for const-string? Or rely on environment pruning (to be implemented)?
+    if (holder->InputCount() == 0 || holder->InputAt(0) != sb) {
+      return false;
+    }
+  }
+
+  // Create replacement instruction.
+  HIntConstant* fmt = block->GetGraph()->GetIntConstant(static_cast<int32_t>(format));
+  ArenaAllocator* allocator = block->GetGraph()->GetAllocator();
+  HStringBuilderAppend* append =
+      new (allocator) HStringBuilderAppend(fmt, num_args, allocator, invoke->GetDexPc());
+  append->SetReferenceTypeInfo(invoke->GetReferenceTypeInfo());
+  for (size_t i = 0; i != num_args; ++i) {
+    append->SetArgumentAt(i, args[num_args - 1u - i]);
+  }
+  block->InsertInstructionBefore(append, invoke);
+  DCHECK(!invoke->CanBeNull());
+  DCHECK(!append->CanBeNull());
+  invoke->ReplaceWith(append);
+  // Copy environment, except for the StringBuilder uses.
+  for (HEnvironment* env = invoke->GetEnvironment(); env != nullptr; env = env->GetParent()) {
+    for (size_t i = 0, size = env->Size(); i != size; ++i) {
+      if (env->GetInstructionAt(i) == sb) {
+        env->RemoveAsUserOfInput(i);
+        env->SetRawEnvAt(i, /*instruction=*/ nullptr);
+      }
+    }
+  }
+  append->CopyEnvironmentFrom(invoke->GetEnvironment());
+  // Remove the old instruction.
+  block->RemoveInstruction(invoke);
+  // Remove the StringBuilder's uses and StringBuilder.
+  while (sb->HasNonEnvironmentUses()) {
+    block->RemoveInstruction(sb->GetUses().front().GetUser());
+  }
+  DCHECK(!sb->HasEnvironmentUses());
+  block->RemoveInstruction(sb);
+  return true;
+}
+
 // Certain allocation intrinsics are not removed by dead code elimination
 // because of potentially throwing an OOM exception or other side effects.
 // This method removes such intrinsics when special circumstances allow.
@@ -2481,6 +2657,9 @@
       invoke->GetBlock()->RemoveInstruction(invoke);
       RecordSimplification();
     }
+  } else if (invoke->GetIntrinsic() == Intrinsics::kStringBuilderToString &&
+             TryReplaceStringBuilderAppend(invoke)) {
+    RecordSimplification();
   }
 }
 
@@ -2569,7 +2748,16 @@
       SimplifyNPEOnArgN(instruction, 1);  // 0th has own NullCheck
       break;
     case Intrinsics::kStringBufferAppend:
-    case Intrinsics::kStringBuilderAppend:
+    case Intrinsics::kStringBuilderAppendObject:
+    case Intrinsics::kStringBuilderAppendString:
+    case Intrinsics::kStringBuilderAppendCharSequence:
+    case Intrinsics::kStringBuilderAppendCharArray:
+    case Intrinsics::kStringBuilderAppendBoolean:
+    case Intrinsics::kStringBuilderAppendChar:
+    case Intrinsics::kStringBuilderAppendInt:
+    case Intrinsics::kStringBuilderAppendLong:
+    case Intrinsics::kStringBuilderAppendFloat:
+    case Intrinsics::kStringBuilderAppendDouble:
       SimplifyReturnThis(instruction);
       break;
     case Intrinsics::kStringBufferToString:
diff --git a/compiler/optimizing/instruction_simplifier_mips.cc b/compiler/optimizing/instruction_simplifier_mips.cc
deleted file mode 100644
index 5d0c63b..0000000
--- a/compiler/optimizing/instruction_simplifier_mips.cc
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "instruction_simplifier_mips.h"
-
-#include "arch/mips/instruction_set_features_mips.h"
-#include "mirror/array-inl.h"
-
-namespace art {
-namespace mips {
-
-class InstructionSimplifierMipsVisitor : public HGraphVisitor {
- public:
-  InstructionSimplifierMipsVisitor(HGraph* graph,
-                                   CodeGenerator* codegen,
-                                   OptimizingCompilerStats* stats)
-      : HGraphVisitor(graph),
-        stats_(stats),
-        codegen_(down_cast<CodeGeneratorMIPS*>(codegen)) {}
-
- private:
-  void RecordSimplification() {
-    MaybeRecordStat(stats_, MethodCompilationStat::kInstructionSimplificationsArch);
-  }
-
-  bool TryExtractArrayAccessIndex(HInstruction* access,
-                                  HInstruction* index,
-                                  DataType::Type packed_type);
-  void VisitArrayGet(HArrayGet* instruction) override;
-  void VisitArraySet(HArraySet* instruction) override;
-
-  OptimizingCompilerStats* stats_;
-  CodeGeneratorMIPS* codegen_;
-};
-
-bool InstructionSimplifierMipsVisitor::TryExtractArrayAccessIndex(HInstruction* access,
-                                                                  HInstruction* index,
-                                                                  DataType::Type packed_type) {
-  if (codegen_->GetInstructionSetFeatures().IsR6() ||
-      codegen_->GetInstructionSetFeatures().HasMsa()) {
-    return false;
-  }
-  if (index->IsConstant() ||
-      (index->IsBoundsCheck() && index->AsBoundsCheck()->GetIndex()->IsConstant())) {
-    // If index is constant the whole address calculation often can be done by load/store
-    // instructions themselves.
-    // TODO: Treat the case with non-embeddable constants.
-    return false;
-  }
-
-  if (packed_type != DataType::Type::kInt16 && packed_type != DataType::Type::kUint16 &&
-      packed_type != DataType::Type::kInt32 && packed_type != DataType::Type::kInt64 &&
-      packed_type != DataType::Type::kFloat32 && packed_type != DataType::Type::kFloat64) {
-    return false;
-  }
-
-  if (access->IsArrayGet() && access->AsArrayGet()->IsStringCharAt()) {
-    return false;
-  }
-
-  HGraph* graph = access->GetBlock()->GetGraph();
-  ArenaAllocator* allocator = graph->GetAllocator();
-  size_t component_shift = DataType::SizeShift(packed_type);
-
-  bool is_extracting_beneficial = false;
-  // It is beneficial to extract index intermediate address only if there are at least 2 users.
-  for (const HUseListNode<HInstruction*>& use : index->GetUses()) {
-    HInstruction* user = use.GetUser();
-    if (user->IsArrayGet() && user != access && !user->AsArrayGet()->IsStringCharAt()) {
-      HArrayGet* another_access = user->AsArrayGet();
-      DataType::Type another_packed_type = another_access->GetType();
-      size_t another_component_shift = DataType::SizeShift(another_packed_type);
-      if (another_component_shift == component_shift) {
-        is_extracting_beneficial = true;
-        break;
-      }
-    } else if (user->IsArraySet() && user != access) {
-      HArraySet* another_access = user->AsArraySet();
-      DataType::Type another_packed_type = another_access->GetType();
-      size_t another_component_shift = DataType::SizeShift(another_packed_type);
-      if (another_component_shift == component_shift) {
-        is_extracting_beneficial = true;
-        break;
-      }
-    } else if (user->IsIntermediateArrayAddressIndex()) {
-      HIntermediateArrayAddressIndex* another_access = user->AsIntermediateArrayAddressIndex();
-      size_t another_component_shift = another_access->GetShift()->AsIntConstant()->GetValue();
-      if (another_component_shift == component_shift) {
-        is_extracting_beneficial = true;
-        break;
-      }
-    }
-  }
-
-  if (!is_extracting_beneficial) {
-    return false;
-  }
-
-  HIntConstant* shift = graph->GetIntConstant(component_shift);
-  HIntermediateArrayAddressIndex* address =
-      new (allocator) HIntermediateArrayAddressIndex(index, shift, kNoDexPc);
-  access->GetBlock()->InsertInstructionBefore(address, access);
-  access->ReplaceInput(address, 1);
-  return true;
-}
-
-void InstructionSimplifierMipsVisitor::VisitArrayGet(HArrayGet* instruction) {
-  DataType::Type packed_type = instruction->GetType();
-  if (TryExtractArrayAccessIndex(instruction, instruction->GetIndex(), packed_type)) {
-    RecordSimplification();
-  }
-}
-
-void InstructionSimplifierMipsVisitor::VisitArraySet(HArraySet* instruction) {
-  DataType::Type packed_type = instruction->GetComponentType();
-  if (TryExtractArrayAccessIndex(instruction, instruction->GetIndex(), packed_type)) {
-    RecordSimplification();
-  }
-}
-
-bool InstructionSimplifierMips::Run() {
-  InstructionSimplifierMipsVisitor visitor(graph_, codegen_, stats_);
-  visitor.VisitReversePostOrder();
-  return true;
-}
-
-}  // namespace mips
-}  // namespace art
diff --git a/compiler/optimizing/instruction_simplifier_mips.h b/compiler/optimizing/instruction_simplifier_mips.h
deleted file mode 100644
index b431334..0000000
--- a/compiler/optimizing/instruction_simplifier_mips.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_MIPS_H_
-#define ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_MIPS_H_
-
-#include "nodes.h"
-#include "optimization.h"
-#include "code_generator_mips.h"
-
-namespace art {
-
-class CodeGenerator;
-
-namespace mips {
-
-class InstructionSimplifierMips : public HOptimization {
- public:
-  InstructionSimplifierMips(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
-      : HOptimization(graph, kInstructionSimplifierMipsPassName, stats),
-        codegen_(down_cast<CodeGeneratorMIPS*>(codegen)) {}
-
-  static constexpr const char* kInstructionSimplifierMipsPassName = "instruction_simplifier_mips";
-
-  bool Run() override;
-
- private:
-  CodeGeneratorMIPS* codegen_;
-};
-
-}  // namespace mips
-}  // namespace art
-
-#endif  // ART_COMPILER_OPTIMIZING_INSTRUCTION_SIMPLIFIER_MIPS_H_
diff --git a/compiler/optimizing/intrinsic_objects.cc b/compiler/optimizing/intrinsic_objects.cc
index c345624..5f6f562 100644
--- a/compiler/optimizing/intrinsic_objects.cc
+++ b/compiler/optimizing/intrinsic_objects.cc
@@ -17,18 +17,18 @@
 #include "intrinsic_objects.h"
 
 #include "art_field-inl.h"
+#include "base/casts.h"
 #include "base/logging.h"
-#include "class_root.h"
-#include "handle.h"
+#include "image.h"
 #include "obj_ptr-inl.h"
-#include "mirror/object_array-alloc-inl.h"
-#include "mirror/object_array-inl.h"
 
 namespace art {
 
-static ObjPtr<mirror::ObjectArray<mirror::Object>> LookupIntegerCache(Thread* self,
-                                                                      ClassLinker* class_linker)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
+static constexpr size_t kIntrinsicObjectsOffset =
+    enum_cast<size_t>(ImageHeader::kIntrinsicObjectsStart);
+
+ObjPtr<mirror::ObjectArray<mirror::Object>> IntrinsicObjects::LookupIntegerCache(
+    Thread* self, ClassLinker* class_linker) {
   ObjPtr<mirror::Class> integer_cache_class = class_linker->LookupClass(
       self, "Ljava/lang/Integer$IntegerCache;", /* class_loader= */ nullptr);
   if (integer_cache_class == nullptr || !integer_cache_class->IsInitialized()) {
@@ -44,47 +44,24 @@
   return integer_cache;
 }
 
-ObjPtr<mirror::ObjectArray<mirror::Object>> IntrinsicObjects::AllocateBootImageLiveObjects(
-    Thread* self,
-    ClassLinker* class_linker) REQUIRES_SHARED(Locks::mutator_lock_) {
-  // The objects used for the Integer.valueOf() intrinsic must remain live even if references
-  // to them are removed using reflection. Image roots are not accessible through reflection,
-  // so the array we construct here shall keep them alive.
-  StackHandleScope<1> hs(self);
-  Handle<mirror::ObjectArray<mirror::Object>> integer_cache =
-      hs.NewHandle(LookupIntegerCache(self, class_linker));
-  size_t live_objects_size =
-      (integer_cache != nullptr) ? (/* cache */ 1u + integer_cache->GetLength()) : 0u;
-  ObjPtr<mirror::ObjectArray<mirror::Object>> live_objects =
-      mirror::ObjectArray<mirror::Object>::Alloc(
-          self, GetClassRoot<mirror::ObjectArray<mirror::Object>>(class_linker), live_objects_size);
-  int32_t index = 0;
-  if (integer_cache != nullptr) {
-    live_objects->Set(index++, integer_cache.Get());
-    for (int32_t i = 0, length = integer_cache->GetLength(); i != length; ++i) {
-      live_objects->Set(index++, integer_cache->Get(i));
-    }
-  }
-  CHECK_EQ(index, live_objects->GetLength());
-
-  if (kIsDebugBuild && integer_cache != nullptr) {
-    CHECK_EQ(integer_cache.Get(), GetIntegerValueOfCache(live_objects));
-    for (int32_t i = 0, len = integer_cache->GetLength(); i != len; ++i) {
-      CHECK_EQ(integer_cache->GetWithoutChecks(i), GetIntegerValueOfObject(live_objects, i));
-    }
-  }
-  return live_objects;
+static bool HasIntrinsicObjects(
+    ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  DCHECK(boot_image_live_objects != nullptr);
+  uint32_t length = static_cast<uint32_t>(boot_image_live_objects->GetLength());
+  DCHECK_GE(length, kIntrinsicObjectsOffset);
+  return length != kIntrinsicObjectsOffset;
 }
 
 ObjPtr<mirror::ObjectArray<mirror::Object>> IntrinsicObjects::GetIntegerValueOfCache(
     ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects) {
-  DCHECK(boot_image_live_objects != nullptr);
-  if (boot_image_live_objects->GetLength() == 0u) {
+  if (!HasIntrinsicObjects(boot_image_live_objects)) {
     return nullptr;  // No intrinsic objects.
   }
   // No need for read barrier for boot image object or for verifying the value that was just stored.
   ObjPtr<mirror::Object> result =
-      boot_image_live_objects->GetWithoutChecks<kVerifyNone, kWithoutReadBarrier>(0);
+      boot_image_live_objects->GetWithoutChecks<kVerifyNone, kWithoutReadBarrier>(
+          kIntrinsicObjectsOffset);
   DCHECK(result != nullptr);
   DCHECK(result->IsObjectArray());
   DCHECK(result->GetClass()->DescriptorEquals("[Ljava/lang/Integer;"));
@@ -94,15 +71,14 @@
 ObjPtr<mirror::Object> IntrinsicObjects::GetIntegerValueOfObject(
     ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects,
     uint32_t index) {
-  DCHECK(boot_image_live_objects != nullptr);
-  DCHECK_NE(boot_image_live_objects->GetLength(), 0);
+  DCHECK(HasIntrinsicObjects(boot_image_live_objects));
   DCHECK_LT(index,
             static_cast<uint32_t>(GetIntegerValueOfCache(boot_image_live_objects)->GetLength()));
 
   // No need for read barrier for boot image object or for verifying the value that was just stored.
   ObjPtr<mirror::Object> result =
       boot_image_live_objects->GetWithoutChecks<kVerifyNone, kWithoutReadBarrier>(
-          /* skip the IntegerCache.cache */ 1u + index);
+          kIntrinsicObjectsOffset + /* skip the IntegerCache.cache */ 1u + index);
   DCHECK(result != nullptr);
   DCHECK(result->GetClass()->DescriptorEquals("Ljava/lang/Integer;"));
   return result;
@@ -110,8 +86,9 @@
 
 MemberOffset IntrinsicObjects::GetIntegerValueOfArrayDataOffset(
     ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects) {
-  DCHECK_NE(boot_image_live_objects->GetLength(), 0);
-  MemberOffset result = mirror::ObjectArray<mirror::Object>::OffsetOfElement(1u);
+  DCHECK(HasIntrinsicObjects(boot_image_live_objects));
+  MemberOffset result =
+      mirror::ObjectArray<mirror::Object>::OffsetOfElement(kIntrinsicObjectsOffset + 1u);
   DCHECK_EQ(GetIntegerValueOfObject(boot_image_live_objects, 0u),
             (boot_image_live_objects
                  ->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(result)));
diff --git a/compiler/optimizing/intrinsic_objects.h b/compiler/optimizing/intrinsic_objects.h
index 863017b..ed764bd 100644
--- a/compiler/optimizing/intrinsic_objects.h
+++ b/compiler/optimizing/intrinsic_objects.h
@@ -55,11 +55,9 @@
     return IndexField::Decode(intrinsic_data);
   }
 
-  static ObjPtr<mirror::ObjectArray<mirror::Object>> AllocateBootImageLiveObjects(
-      Thread* self,
-      ClassLinker* class_linker) REQUIRES_SHARED(Locks::mutator_lock_);
-
   // Functions for retrieving data for Integer.valueOf().
+  static ObjPtr<mirror::ObjectArray<mirror::Object>> LookupIntegerCache(
+      Thread* self, ClassLinker* class_linker) REQUIRES_SHARED(Locks::mutator_lock_);
   static ObjPtr<mirror::ObjectArray<mirror::Object>> GetIntegerValueOfCache(
       ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects)
       REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index ec5d17a..d88e034 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -54,6 +54,7 @@
 using helpers::SRegisterFrom;
 using helpers::WRegisterFrom;
 using helpers::XRegisterFrom;
+using helpers::HRegisterFrom;
 using helpers::InputRegisterAt;
 using helpers::OutputRegister;
 
@@ -90,8 +91,8 @@
     Register res_reg = RegisterFrom(ARM64ReturnLocation(type), type);
     __ Mov(trg_reg, res_reg, kDiscardForSameWReg);
   } else {
-    FPRegister trg_reg = FPRegisterFrom(trg, type);
-    FPRegister res_reg = FPRegisterFrom(ARM64ReturnLocation(type), type);
+    VRegister trg_reg = FPRegisterFrom(trg, type);
+    VRegister res_reg = FPRegisterFrom(ARM64ReturnLocation(type), type);
     __ Fmov(trg_reg, res_reg);
   }
 }
@@ -299,6 +300,14 @@
   locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
 }
 
+static void CreateIntIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
+  LocationSummary* locations =
+      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
 static void GenReverseBytes(LocationSummary* locations,
                             DataType::Type type,
                             MacroAssembler* masm) {
@@ -435,7 +444,7 @@
 
   Register src = InputRegisterAt(instr, 0);
   Register dst = RegisterFrom(instr->GetLocations()->Out(), type);
-  FPRegister fpr = (type == DataType::Type::kInt64) ? temps.AcquireD() : temps.AcquireS();
+  VRegister fpr = (type == DataType::Type::kInt64) ? temps.AcquireD() : temps.AcquireS();
 
   __ Fmov(fpr, src);
   __ Cnt(fpr.V8B(), fpr.V8B());
@@ -591,8 +600,8 @@
   // For example, FCVTPS(-1.9) = -1 and FCVTPS(1.1) = 2.
   // If we were using this instruction, for most inputs, more handling code would be needed.
   LocationSummary* l = invoke->GetLocations();
-  FPRegister in_reg = is_double ? DRegisterFrom(l->InAt(0)) : SRegisterFrom(l->InAt(0));
-  FPRegister tmp_fp = is_double ? DRegisterFrom(l->GetTemp(0)) : SRegisterFrom(l->GetTemp(0));
+  VRegister in_reg = is_double ? DRegisterFrom(l->InAt(0)) : SRegisterFrom(l->InAt(0));
+  VRegister tmp_fp = is_double ? DRegisterFrom(l->GetTemp(0)) : SRegisterFrom(l->GetTemp(0));
   Register out_reg = is_double ? XRegisterFrom(l->Out()) : WRegisterFrom(l->Out());
   vixl::aarch64::Label done;
 
@@ -1960,7 +1969,8 @@
   Register tmp2 = temps.AcquireX();
 
   vixl::aarch64::Label done;
-  vixl::aarch64::Label compressed_string_loop;
+  vixl::aarch64::Label compressed_string_vector_loop;
+  vixl::aarch64::Label compressed_string_remainder;
   __ Sub(num_chr, srcEnd, srcBegin);
   // Early out for valid zero-length retrievals.
   __ Cbz(num_chr, &done);
@@ -2013,16 +2023,39 @@
   __ B(&done);
 
   if (mirror::kUseStringCompression) {
+    // For compressed strings, acquire a SIMD temporary register.
+    VRegister vtmp1 = temps.AcquireVRegisterOfSize(kQRegSize);
     const size_t c_char_size = DataType::Size(DataType::Type::kInt8);
     DCHECK_EQ(c_char_size, 1u);
     __ Bind(&compressed_string_preloop);
     __ Add(src_ptr, src_ptr, Operand(srcBegin));
-    // Copy loop for compressed src, copying 1 character (8-bit) to (16-bit) at a time.
-    __ Bind(&compressed_string_loop);
+
+    // Save repairing the value of num_chr on the < 8 character path.
+    __ Subs(tmp1, num_chr, 8);
+    __ B(lt, &compressed_string_remainder);
+
+    // Keep the result of the earlier subs, we are going to fetch at least 8 characters.
+    __ Mov(num_chr, tmp1);
+
+    // Main loop for compressed src, copying 8 characters (8-bit) to (16-bit) at a time.
+    // Uses SIMD instructions.
+    __ Bind(&compressed_string_vector_loop);
+    __ Ld1(vtmp1.V8B(), MemOperand(src_ptr, c_char_size * 8, PostIndex));
+    __ Subs(num_chr, num_chr, 8);
+    __ Uxtl(vtmp1.V8H(), vtmp1.V8B());
+    __ St1(vtmp1.V8H(), MemOperand(dst_ptr, char_size * 8, PostIndex));
+    __ B(ge, &compressed_string_vector_loop);
+
+    __ Adds(num_chr, num_chr, 8);
+    __ B(eq, &done);
+
+    // Loop for < 8 character case and remainder handling with a compressed src.
+    // Copies 1 character (8-bit) to (16-bit) at a time.
+    __ Bind(&compressed_string_remainder);
     __ Ldrb(tmp1, MemOperand(src_ptr, c_char_size, PostIndex));
     __ Strh(tmp1, MemOperand(dst_ptr, char_size, PostIndex));
     __ Subs(num_chr, num_chr, Operand(1));
-    __ B(gt, &compressed_string_loop);
+    __ B(gt, &compressed_string_remainder);
   }
 
   __ Bind(&done);
@@ -2796,22 +2829,25 @@
                           bool is64bit,
                           MacroAssembler* masm) {
   Operand infinity;
+  Operand tst_mask;
   Register out;
 
   if (is64bit) {
     infinity = kPositiveInfinityDouble;
+    tst_mask = MaskLeastSignificant<uint64_t>(63);
     out = XRegisterFrom(locations->Out());
   } else {
     infinity = kPositiveInfinityFloat;
+    tst_mask = MaskLeastSignificant<uint32_t>(31);
     out = WRegisterFrom(locations->Out());
   }
 
-  const Register zero = vixl::aarch64::Assembler::AppropriateZeroRegFor(out);
-
   MoveFPToInt(locations, is64bit, masm);
+  // Checks whether exponent bits are all 1 and fraction bits are all 0.
   __ Eor(out, out, infinity);
-  // We don't care about the sign bit, so shift left.
-  __ Cmp(zero, Operand(out, LSL, 1));
+  // TST bitmask is used to mask out the sign bit: either 0x7fffffff or 0x7fffffffffffffff
+  // depending on is64bit.
+  __ Tst(out, tst_mask);
   __ Cset(out, eq);
 }
 
@@ -3169,6 +3205,203 @@
   GenerateCodeForCalculationCRC32ValueOfBytes(masm, crc, ptr, length, out);
 }
 
+void IntrinsicLocationsBuilderARM64::VisitFP16ToFloat(HInvoke* invoke) {
+  if (!codegen_->GetInstructionSetFeatures().HasFP16()) {
+    return;
+  }
+
+  LocationSummary* locations = new (allocator_) LocationSummary(invoke,
+                                                                LocationSummary::kNoCall,
+                                                                kIntrinsified);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresFpuRegister());
+}
+
+void IntrinsicCodeGeneratorARM64::VisitFP16ToFloat(HInvoke* invoke) {
+  DCHECK(codegen_->GetInstructionSetFeatures().HasFP16());
+  MacroAssembler* masm = GetVIXLAssembler();
+  UseScratchRegisterScope scratch_scope(masm);
+  Register bits = InputRegisterAt(invoke, 0);
+  VRegister out = SRegisterFrom(invoke->GetLocations()->Out());
+  VRegister half = scratch_scope.AcquireH();
+  __ Fmov(half, bits);  // ARMv8.2
+  __ Fcvt(out, half);
+}
+
+void IntrinsicLocationsBuilderARM64::VisitFP16ToHalf(HInvoke* invoke) {
+  if (!codegen_->GetInstructionSetFeatures().HasFP16()) {
+    return;
+  }
+
+  LocationSummary* locations = new (allocator_) LocationSummary(invoke,
+                                                                LocationSummary::kNoCall,
+                                                                kIntrinsified);
+  locations->SetInAt(0, Location::RequiresFpuRegister());
+  locations->SetOut(Location::RequiresRegister());
+}
+
+void IntrinsicCodeGeneratorARM64::VisitFP16ToHalf(HInvoke* invoke) {
+  DCHECK(codegen_->GetInstructionSetFeatures().HasFP16());
+  MacroAssembler* masm = GetVIXLAssembler();
+  UseScratchRegisterScope scratch_scope(masm);
+  VRegister in = SRegisterFrom(invoke->GetLocations()->InAt(0));
+  VRegister half = scratch_scope.AcquireH();
+  Register out = WRegisterFrom(invoke->GetLocations()->Out());
+  __ Fcvt(half, in);
+  __ Fmov(out, half);
+  __ Sxth(out, out);  // sign extend due to returning a short type.
+}
+
+template<typename OP>
+void GenerateFP16Round(HInvoke* invoke,
+                       CodeGeneratorARM64* const codegen_,
+                       MacroAssembler* masm,
+                       const OP roundOp) {
+  DCHECK(codegen_->GetInstructionSetFeatures().HasFP16());
+  LocationSummary* locations = invoke->GetLocations();
+  UseScratchRegisterScope scratch_scope(masm);
+  Register out = WRegisterFrom(locations->Out());
+  VRegister half = scratch_scope.AcquireH();
+  __ Fmov(half, WRegisterFrom(locations->InAt(0)));
+  roundOp(half, half);
+  __ Fmov(out, half);
+  __ Sxth(out, out);
+}
+
+void IntrinsicLocationsBuilderARM64::VisitFP16Floor(HInvoke* invoke) {
+  if (!codegen_->GetInstructionSetFeatures().HasFP16()) {
+    return;
+  }
+
+  CreateIntToIntLocations(allocator_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitFP16Floor(HInvoke* invoke) {
+  MacroAssembler* masm = GetVIXLAssembler();
+  auto roundOp = [masm](const VRegister& out, const VRegister& in) {
+    __ Frintm(out, in);  // Round towards Minus infinity
+  };
+  GenerateFP16Round(invoke, codegen_, masm, roundOp);
+}
+
+void IntrinsicLocationsBuilderARM64::VisitFP16Ceil(HInvoke* invoke) {
+  if (!codegen_->GetInstructionSetFeatures().HasFP16()) {
+    return;
+  }
+
+  CreateIntToIntLocations(allocator_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitFP16Ceil(HInvoke* invoke) {
+  MacroAssembler* masm = GetVIXLAssembler();
+  auto roundOp = [masm](const VRegister& out, const VRegister& in) {
+    __ Frintp(out, in);  // Round towards Plus infinity
+  };
+  GenerateFP16Round(invoke, codegen_, masm, roundOp);
+}
+
+void IntrinsicLocationsBuilderARM64::VisitFP16Rint(HInvoke* invoke) {
+  if (!codegen_->GetInstructionSetFeatures().HasFP16()) {
+    return;
+  }
+
+  CreateIntToIntLocations(allocator_, invoke);
+}
+
+void IntrinsicCodeGeneratorARM64::VisitFP16Rint(HInvoke* invoke) {
+  MacroAssembler* masm = GetVIXLAssembler();
+  auto roundOp = [masm](const VRegister& out, const VRegister& in) {
+    __ Frintn(out, in);  // Round to nearest, with ties to even
+  };
+  GenerateFP16Round(invoke, codegen_, masm, roundOp);
+}
+
+template<typename OP>
+void GenerateFP16Compare(HInvoke* invoke,
+                         CodeGeneratorARM64* codegen,
+                         MacroAssembler* masm,
+                         const OP compareOp) {
+  DCHECK(codegen->GetInstructionSetFeatures().HasFP16());
+  LocationSummary* locations = invoke->GetLocations();
+  Register out = WRegisterFrom(locations->Out());
+  VRegister half0 = HRegisterFrom(locations->GetTemp(0));
+  VRegister half1 = HRegisterFrom(locations->GetTemp(1));
+  __ Fmov(half0, WRegisterFrom(locations->InAt(0)));
+  __ Fmov(half1, WRegisterFrom(locations->InAt(1)));
+  compareOp(out, half0, half1);
+}
+
+static inline void GenerateFP16Compare(HInvoke* invoke,
+                                       CodeGeneratorARM64* codegen,
+                                       MacroAssembler* masm,
+                                       vixl::aarch64::Condition cond) {
+  auto compareOp = [masm, cond](const Register out, const VRegister& in0, const VRegister& in1) {
+    __ Fcmp(in0, in1);
+    __ Cset(out, cond);
+  };
+  GenerateFP16Compare(invoke, codegen, masm, compareOp);
+}
+
+void IntrinsicLocationsBuilderARM64::VisitFP16Greater(HInvoke* invoke) {
+  if (!codegen_->GetInstructionSetFeatures().HasFP16()) {
+    return;
+  }
+
+  CreateIntIntToIntLocations(allocator_, invoke);
+  invoke->GetLocations()->AddTemp(Location::RequiresFpuRegister());
+  invoke->GetLocations()->AddTemp(Location::RequiresFpuRegister());
+}
+
+void IntrinsicCodeGeneratorARM64::VisitFP16Greater(HInvoke* invoke) {
+  MacroAssembler* masm = GetVIXLAssembler();
+  GenerateFP16Compare(invoke, codegen_, masm, gt);
+}
+
+void IntrinsicLocationsBuilderARM64::VisitFP16GreaterEquals(HInvoke* invoke) {
+  if (!codegen_->GetInstructionSetFeatures().HasFP16()) {
+    return;
+  }
+
+  CreateIntIntToIntLocations(allocator_, invoke);
+  invoke->GetLocations()->AddTemp(Location::RequiresFpuRegister());
+  invoke->GetLocations()->AddTemp(Location::RequiresFpuRegister());
+}
+
+void IntrinsicCodeGeneratorARM64::VisitFP16GreaterEquals(HInvoke* invoke) {
+  MacroAssembler* masm = GetVIXLAssembler();
+  GenerateFP16Compare(invoke, codegen_, masm, ge);
+}
+
+void IntrinsicLocationsBuilderARM64::VisitFP16Less(HInvoke* invoke) {
+  if (!codegen_->GetInstructionSetFeatures().HasFP16()) {
+    return;
+  }
+
+  CreateIntIntToIntLocations(allocator_, invoke);
+  invoke->GetLocations()->AddTemp(Location::RequiresFpuRegister());
+  invoke->GetLocations()->AddTemp(Location::RequiresFpuRegister());
+}
+
+void IntrinsicCodeGeneratorARM64::VisitFP16Less(HInvoke* invoke) {
+  MacroAssembler* masm = GetVIXLAssembler();
+  GenerateFP16Compare(invoke, codegen_, masm, mi);
+}
+
+void IntrinsicLocationsBuilderARM64::VisitFP16LessEquals(HInvoke* invoke) {
+  if (!codegen_->GetInstructionSetFeatures().HasFP16()) {
+    return;
+  }
+
+  CreateIntIntToIntLocations(allocator_, invoke);
+  invoke->GetLocations()->AddTemp(Location::RequiresFpuRegister());
+  invoke->GetLocations()->AddTemp(Location::RequiresFpuRegister());
+}
+
+void IntrinsicCodeGeneratorARM64::VisitFP16LessEquals(HInvoke* invoke) {
+  MacroAssembler* masm = GetVIXLAssembler();
+  GenerateFP16Compare(invoke, codegen_, masm, ls);
+}
+
 UNIMPLEMENTED_INTRINSIC(ARM64, ReferenceGetReferent)
 
 UNIMPLEMENTED_INTRINSIC(ARM64, StringStringIndexOf);
@@ -3176,7 +3409,16 @@
 UNIMPLEMENTED_INTRINSIC(ARM64, StringBufferAppend);
 UNIMPLEMENTED_INTRINSIC(ARM64, StringBufferLength);
 UNIMPLEMENTED_INTRINSIC(ARM64, StringBufferToString);
-UNIMPLEMENTED_INTRINSIC(ARM64, StringBuilderAppend);
+UNIMPLEMENTED_INTRINSIC(ARM64, StringBuilderAppendObject);
+UNIMPLEMENTED_INTRINSIC(ARM64, StringBuilderAppendString);
+UNIMPLEMENTED_INTRINSIC(ARM64, StringBuilderAppendCharSequence);
+UNIMPLEMENTED_INTRINSIC(ARM64, StringBuilderAppendCharArray);
+UNIMPLEMENTED_INTRINSIC(ARM64, StringBuilderAppendBoolean);
+UNIMPLEMENTED_INTRINSIC(ARM64, StringBuilderAppendChar);
+UNIMPLEMENTED_INTRINSIC(ARM64, StringBuilderAppendInt);
+UNIMPLEMENTED_INTRINSIC(ARM64, StringBuilderAppendLong);
+UNIMPLEMENTED_INTRINSIC(ARM64, StringBuilderAppendFloat);
+UNIMPLEMENTED_INTRINSIC(ARM64, StringBuilderAppendDouble);
 UNIMPLEMENTED_INTRINSIC(ARM64, StringBuilderLength);
 UNIMPLEMENTED_INTRINSIC(ARM64, StringBuilderToString);
 
diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc
index f0aa92e..89e5203 100644
--- a/compiler/optimizing/intrinsics_arm_vixl.cc
+++ b/compiler/optimizing/intrinsics_arm_vixl.cc
@@ -3070,13 +3070,31 @@
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, CRC32Update)
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, CRC32UpdateBytes)
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, CRC32UpdateByteBuffer)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, FP16ToFloat)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, FP16ToHalf)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, FP16Floor)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, FP16Ceil)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, FP16Rint)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, FP16Greater)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, FP16GreaterEquals)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, FP16Less)
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, FP16LessEquals)
 
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOf);
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringStringIndexOfAfter);
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBufferAppend);
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBufferLength);
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBufferToString);
-UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBuilderAppend);
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBuilderAppendObject);
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBuilderAppendString);
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBuilderAppendCharSequence);
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBuilderAppendCharArray);
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBuilderAppendBoolean);
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBuilderAppendChar);
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBuilderAppendInt);
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBuilderAppendLong);
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBuilderAppendFloat);
+UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBuilderAppendDouble);
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBuilderLength);
 UNIMPLEMENTED_INTRINSIC(ARMVIXL, StringBuilderToString);
 
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
deleted file mode 100644
index 3da0e57..0000000
--- a/compiler/optimizing/intrinsics_mips.cc
+++ /dev/null
@@ -1,2732 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "intrinsics_mips.h"
-
-#include "arch/mips/instruction_set_features_mips.h"
-#include "art_method.h"
-#include "code_generator_mips.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "heap_poisoning.h"
-#include "intrinsics.h"
-#include "mirror/array-inl.h"
-#include "mirror/object_array-inl.h"
-#include "mirror/string.h"
-#include "scoped_thread_state_change-inl.h"
-#include "thread.h"
-#include "utils/mips/assembler_mips.h"
-#include "utils/mips/constants_mips.h"
-
-namespace art {
-
-namespace mips {
-
-IntrinsicLocationsBuilderMIPS::IntrinsicLocationsBuilderMIPS(CodeGeneratorMIPS* codegen)
-  : codegen_(codegen), allocator_(codegen->GetGraph()->GetAllocator()) {
-}
-
-MipsAssembler* IntrinsicCodeGeneratorMIPS::GetAssembler() {
-  return reinterpret_cast<MipsAssembler*>(codegen_->GetAssembler());
-}
-
-ArenaAllocator* IntrinsicCodeGeneratorMIPS::GetAllocator() {
-  return codegen_->GetGraph()->GetAllocator();
-}
-
-inline bool IntrinsicCodeGeneratorMIPS::IsR2OrNewer() const {
-  return codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2();
-}
-
-inline bool IntrinsicCodeGeneratorMIPS::IsR6() const {
-  return codegen_->GetInstructionSetFeatures().IsR6();
-}
-
-inline bool IntrinsicCodeGeneratorMIPS::Is32BitFPU() const {
-  return codegen_->GetInstructionSetFeatures().Is32BitFloatingPoint();
-}
-
-inline bool IntrinsicCodeGeneratorMIPS::HasMsa() const {
-  return codegen_->GetInstructionSetFeatures().HasMsa();
-}
-
-#define __ codegen->GetAssembler()->
-
-static void MoveFromReturnRegister(Location trg,
-                                   DataType::Type type,
-                                   CodeGeneratorMIPS* codegen) {
-  if (!trg.IsValid()) {
-    DCHECK_EQ(type, DataType::Type::kVoid);
-    return;
-  }
-
-  DCHECK_NE(type, DataType::Type::kVoid);
-
-  if (DataType::IsIntegralType(type) || type == DataType::Type::kReference) {
-    Register trg_reg = trg.AsRegister<Register>();
-    if (trg_reg != V0) {
-      __ Move(V0, trg_reg);
-    }
-  } else {
-    FRegister trg_reg = trg.AsFpuRegister<FRegister>();
-    if (trg_reg != F0) {
-      if (type == DataType::Type::kFloat32) {
-        __ MovS(F0, trg_reg);
-      } else {
-        __ MovD(F0, trg_reg);
-      }
-    }
-  }
-}
-
-static void MoveArguments(HInvoke* invoke, CodeGeneratorMIPS* codegen) {
-  InvokeDexCallingConventionVisitorMIPS calling_convention_visitor;
-  IntrinsicVisitor::MoveArguments(invoke, codegen, &calling_convention_visitor);
-}
-
-// Slow-path for fallback (calling the managed code to handle the
-// intrinsic) in an intrinsified call. This will copy the arguments
-// into the positions for a regular call.
-//
-// Note: The actual parameters are required to be in the locations
-//       given by the invoke's location summary. If an intrinsic
-//       modifies those locations before a slowpath call, they must be
-//       restored!
-class IntrinsicSlowPathMIPS : public SlowPathCodeMIPS {
- public:
-  explicit IntrinsicSlowPathMIPS(HInvoke* invoke) : SlowPathCodeMIPS(invoke), invoke_(invoke) { }
-
-  void EmitNativeCode(CodeGenerator* codegen_in) override {
-    CodeGeneratorMIPS* codegen = down_cast<CodeGeneratorMIPS*>(codegen_in);
-
-    __ Bind(GetEntryLabel());
-
-    SaveLiveRegisters(codegen, invoke_->GetLocations());
-
-    MoveArguments(invoke_, codegen);
-
-    if (invoke_->IsInvokeStaticOrDirect()) {
-      codegen->GenerateStaticOrDirectCall(
-          invoke_->AsInvokeStaticOrDirect(), Location::RegisterLocation(A0), this);
-    } else {
-      codegen->GenerateVirtualCall(
-          invoke_->AsInvokeVirtual(), Location::RegisterLocation(A0), this);
-    }
-
-    // Copy the result back to the expected output.
-    Location out = invoke_->GetLocations()->Out();
-    if (out.IsValid()) {
-      DCHECK(out.IsRegister());  // TODO: Replace this when we support output in memory.
-      DCHECK(!invoke_->GetLocations()->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
-      MoveFromReturnRegister(out, invoke_->GetType(), codegen);
-    }
-
-    RestoreLiveRegisters(codegen, invoke_->GetLocations());
-    __ B(GetExitLabel());
-  }
-
-  const char* GetDescription() const override { return "IntrinsicSlowPathMIPS"; }
-
- private:
-  // The instruction where this slow path is happening.
-  HInvoke* const invoke_;
-
-  DISALLOW_COPY_AND_ASSIGN(IntrinsicSlowPathMIPS);
-};
-
-#undef __
-
-bool IntrinsicLocationsBuilderMIPS::TryDispatch(HInvoke* invoke) {
-  Dispatch(invoke);
-  LocationSummary* res = invoke->GetLocations();
-  return res != nullptr && res->Intrinsified();
-}
-
-#define __ assembler->
-
-static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresFpuRegister());
-  locations->SetOut(Location::RequiresRegister());
-}
-
-static void MoveFPToInt(LocationSummary* locations, bool is64bit, MipsAssembler* assembler) {
-  FRegister in = locations->InAt(0).AsFpuRegister<FRegister>();
-
-  if (is64bit) {
-    Register out_lo = locations->Out().AsRegisterPairLow<Register>();
-    Register out_hi = locations->Out().AsRegisterPairHigh<Register>();
-
-    __ Mfc1(out_lo, in);
-    __ MoveFromFpuHigh(out_hi, in);
-  } else {
-    Register out = locations->Out().AsRegister<Register>();
-
-    __ Mfc1(out, in);
-  }
-}
-
-// long java.lang.Double.doubleToRawLongBits(double)
-void IntrinsicLocationsBuilderMIPS::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
-  CreateFPToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
-}
-
-// int java.lang.Float.floatToRawIntBits(float)
-void IntrinsicLocationsBuilderMIPS::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
-  CreateFPToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
-}
-
-static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresFpuRegister());
-}
-
-static void MoveIntToFP(LocationSummary* locations, bool is64bit, MipsAssembler* assembler) {
-  FRegister out = locations->Out().AsFpuRegister<FRegister>();
-
-  if (is64bit) {
-    Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>();
-    Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
-
-    __ Mtc1(in_lo, out);
-    __ MoveToFpuHigh(in_hi, out);
-  } else {
-    Register in = locations->InAt(0).AsRegister<Register>();
-
-    __ Mtc1(in, out);
-  }
-}
-
-// double java.lang.Double.longBitsToDouble(long)
-void IntrinsicLocationsBuilderMIPS::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
-  CreateIntToFPLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
-}
-
-// float java.lang.Float.intBitsToFloat(int)
-void IntrinsicLocationsBuilderMIPS::VisitFloatIntBitsToFloat(HInvoke* invoke) {
-  CreateIntToFPLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitFloatIntBitsToFloat(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
-}
-
-static void CreateIntToIntLocations(ArenaAllocator* allocator,
-                                    HInvoke* invoke,
-                                    Location::OutputOverlap overlaps = Location::kNoOutputOverlap) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister(), overlaps);
-}
-
-static void GenReverse(LocationSummary* locations,
-                       DataType::Type type,
-                       bool isR2OrNewer,
-                       bool isR6,
-                       bool reverseBits,
-                       MipsAssembler* assembler) {
-  DCHECK(type == DataType::Type::kInt16 ||
-         type == DataType::Type::kInt32 ||
-         type == DataType::Type::kInt64);
-  DCHECK(type != DataType::Type::kInt16 || !reverseBits);
-
-  if (type == DataType::Type::kInt16) {
-    Register in = locations->InAt(0).AsRegister<Register>();
-    Register out = locations->Out().AsRegister<Register>();
-
-    if (isR2OrNewer) {
-      __ Wsbh(out, in);
-      __ Seh(out, out);
-    } else {
-      __ Sll(TMP, in, 24);
-      __ Sra(TMP, TMP, 16);
-      __ Sll(out, in, 16);
-      __ Srl(out, out, 24);
-      __ Or(out, out, TMP);
-    }
-  } else if (type == DataType::Type::kInt32) {
-    Register in = locations->InAt(0).AsRegister<Register>();
-    Register out = locations->Out().AsRegister<Register>();
-
-    if (isR2OrNewer) {
-      __ Rotr(out, in, 16);
-      __ Wsbh(out, out);
-    } else {
-      // MIPS32r1
-      // __ Rotr(out, in, 16);
-      __ Sll(TMP, in, 16);
-      __ Srl(out, in, 16);
-      __ Or(out, out, TMP);
-      // __ Wsbh(out, out);
-      __ LoadConst32(AT, 0x00FF00FF);
-      __ And(TMP, out, AT);
-      __ Sll(TMP, TMP, 8);
-      __ Srl(out, out, 8);
-      __ And(out, out, AT);
-      __ Or(out, out, TMP);
-    }
-    if (reverseBits) {
-      if (isR6) {
-        __ Bitswap(out, out);
-      } else {
-        __ LoadConst32(AT, 0x0F0F0F0F);
-        __ And(TMP, out, AT);
-        __ Sll(TMP, TMP, 4);
-        __ Srl(out, out, 4);
-        __ And(out, out, AT);
-        __ Or(out, TMP, out);
-        __ LoadConst32(AT, 0x33333333);
-        __ And(TMP, out, AT);
-        __ Sll(TMP, TMP, 2);
-        __ Srl(out, out, 2);
-        __ And(out, out, AT);
-        __ Or(out, TMP, out);
-        __ LoadConst32(AT, 0x55555555);
-        __ And(TMP, out, AT);
-        __ Sll(TMP, TMP, 1);
-        __ Srl(out, out, 1);
-        __ And(out, out, AT);
-        __ Or(out, TMP, out);
-      }
-    }
-  } else if (type == DataType::Type::kInt64) {
-    Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>();
-    Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
-    Register out_lo = locations->Out().AsRegisterPairLow<Register>();
-    Register out_hi = locations->Out().AsRegisterPairHigh<Register>();
-
-    if (isR2OrNewer) {
-      __ Rotr(AT, in_hi, 16);
-      __ Rotr(TMP, in_lo, 16);
-      __ Wsbh(out_lo, AT);
-      __ Wsbh(out_hi, TMP);
-    } else {
-      // When calling CreateIntToIntLocations() we promised that the
-      // use of the out_lo/out_hi wouldn't overlap with the use of
-      // in_lo/in_hi. Be very careful not to write to out_lo/out_hi
-      // until we're completely done reading from in_lo/in_hi.
-      // __ Rotr(TMP, in_lo, 16);
-      __ Sll(TMP, in_lo, 16);
-      __ Srl(AT, in_lo, 16);
-      __ Or(TMP, TMP, AT);             // Hold in TMP until it's safe
-                                       // to write to out_hi.
-      // __ Rotr(out_lo, in_hi, 16);
-      __ Sll(AT, in_hi, 16);
-      __ Srl(out_lo, in_hi, 16);        // Here we are finally done reading
-                                        // from in_lo/in_hi so it's okay to
-                                        // write to out_lo/out_hi.
-      __ Or(out_lo, out_lo, AT);
-      // __ Wsbh(out_hi, out_hi);
-      __ LoadConst32(AT, 0x00FF00FF);
-      __ And(out_hi, TMP, AT);
-      __ Sll(out_hi, out_hi, 8);
-      __ Srl(TMP, TMP, 8);
-      __ And(TMP, TMP, AT);
-      __ Or(out_hi, out_hi, TMP);
-      // __ Wsbh(out_lo, out_lo);
-      __ And(TMP, out_lo, AT);  // AT already holds the correct mask value
-      __ Sll(TMP, TMP, 8);
-      __ Srl(out_lo, out_lo, 8);
-      __ And(out_lo, out_lo, AT);
-      __ Or(out_lo, out_lo, TMP);
-    }
-    if (reverseBits) {
-      if (isR6) {
-        __ Bitswap(out_hi, out_hi);
-        __ Bitswap(out_lo, out_lo);
-      } else {
-        __ LoadConst32(AT, 0x0F0F0F0F);
-        __ And(TMP, out_hi, AT);
-        __ Sll(TMP, TMP, 4);
-        __ Srl(out_hi, out_hi, 4);
-        __ And(out_hi, out_hi, AT);
-        __ Or(out_hi, TMP, out_hi);
-        __ And(TMP, out_lo, AT);
-        __ Sll(TMP, TMP, 4);
-        __ Srl(out_lo, out_lo, 4);
-        __ And(out_lo, out_lo, AT);
-        __ Or(out_lo, TMP, out_lo);
-        __ LoadConst32(AT, 0x33333333);
-        __ And(TMP, out_hi, AT);
-        __ Sll(TMP, TMP, 2);
-        __ Srl(out_hi, out_hi, 2);
-        __ And(out_hi, out_hi, AT);
-        __ Or(out_hi, TMP, out_hi);
-        __ And(TMP, out_lo, AT);
-        __ Sll(TMP, TMP, 2);
-        __ Srl(out_lo, out_lo, 2);
-        __ And(out_lo, out_lo, AT);
-        __ Or(out_lo, TMP, out_lo);
-        __ LoadConst32(AT, 0x55555555);
-        __ And(TMP, out_hi, AT);
-        __ Sll(TMP, TMP, 1);
-        __ Srl(out_hi, out_hi, 1);
-        __ And(out_hi, out_hi, AT);
-        __ Or(out_hi, TMP, out_hi);
-        __ And(TMP, out_lo, AT);
-        __ Sll(TMP, TMP, 1);
-        __ Srl(out_lo, out_lo, 1);
-        __ And(out_lo, out_lo, AT);
-        __ Or(out_lo, TMP, out_lo);
-      }
-    }
-  }
-}
-
-// int java.lang.Integer.reverseBytes(int)
-void IntrinsicLocationsBuilderMIPS::VisitIntegerReverseBytes(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitIntegerReverseBytes(HInvoke* invoke) {
-  GenReverse(invoke->GetLocations(),
-             DataType::Type::kInt32,
-             IsR2OrNewer(),
-             IsR6(),
-             /* reverseBits= */ false,
-             GetAssembler());
-}
-
-// long java.lang.Long.reverseBytes(long)
-void IntrinsicLocationsBuilderMIPS::VisitLongReverseBytes(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitLongReverseBytes(HInvoke* invoke) {
-  GenReverse(invoke->GetLocations(),
-             DataType::Type::kInt64,
-             IsR2OrNewer(),
-             IsR6(),
-             /* reverseBits= */ false,
-             GetAssembler());
-}
-
-// short java.lang.Short.reverseBytes(short)
-void IntrinsicLocationsBuilderMIPS::VisitShortReverseBytes(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitShortReverseBytes(HInvoke* invoke) {
-  GenReverse(invoke->GetLocations(),
-             DataType::Type::kInt16,
-             IsR2OrNewer(),
-             IsR6(),
-             /* reverseBits= */ false,
-             GetAssembler());
-}
-
-static void GenNumberOfLeadingZeroes(LocationSummary* locations,
-                                     bool is64bit,
-                                     bool isR6,
-                                     MipsAssembler* assembler) {
-  Register out = locations->Out().AsRegister<Register>();
-  if (is64bit) {
-    Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>();
-    Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
-
-    if (isR6) {
-      __ ClzR6(AT, in_hi);
-      __ ClzR6(TMP, in_lo);
-      __ Seleqz(TMP, TMP, in_hi);
-    } else {
-      __ ClzR2(AT, in_hi);
-      __ ClzR2(TMP, in_lo);
-      __ Movn(TMP, ZERO, in_hi);
-    }
-    __ Addu(out, AT, TMP);
-  } else {
-    Register in = locations->InAt(0).AsRegister<Register>();
-
-    if (isR6) {
-      __ ClzR6(out, in);
-    } else {
-      __ ClzR2(out, in);
-    }
-  }
-}
-
-// int java.lang.Integer.numberOfLeadingZeros(int i)
-void IntrinsicLocationsBuilderMIPS::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
-  GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ false, IsR6(), GetAssembler());
-}
-
-// int java.lang.Long.numberOfLeadingZeros(long i)
-void IntrinsicLocationsBuilderMIPS::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
-  GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ true, IsR6(), GetAssembler());
-}
-
-static void GenNumberOfTrailingZeroes(LocationSummary* locations,
-                                      bool is64bit,
-                                      bool isR6,
-                                      MipsAssembler* assembler) {
-  Register out = locations->Out().AsRegister<Register>();
-  Register in_lo;
-  Register in;
-
-  if (is64bit) {
-    Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
-
-    in_lo = locations->InAt(0).AsRegisterPairLow<Register>();
-
-    // If in_lo is zero then count the number of trailing zeroes in in_hi;
-    // otherwise count the number of trailing zeroes in in_lo.
-    // out = in_lo ? in_lo : in_hi;
-    if (isR6) {
-      __ Seleqz(out, in_hi, in_lo);
-      __ Selnez(TMP, in_lo, in_lo);
-      __ Or(out, out, TMP);
-    } else {
-      __ Movz(out, in_hi, in_lo);
-      __ Movn(out, in_lo, in_lo);
-    }
-
-    in = out;
-  } else {
-    in = locations->InAt(0).AsRegister<Register>();
-    // Give in_lo a dummy value to keep the compiler from complaining.
-    // Since we only get here in the 32-bit case, this value will never
-    // be used.
-    in_lo = in;
-  }
-
-  if (isR6) {
-    // We don't have an instruction to count the number of trailing zeroes.
-    // Start by flipping the bits end-for-end so we can count the number of
-    // leading zeroes instead.
-    __ Rotr(out, in, 16);
-    __ Wsbh(out, out);
-    __ Bitswap(out, out);
-    __ ClzR6(out, out);
-  } else {
-    // Convert trailing zeroes to trailing ones, and bits to their left
-    // to zeroes.
-    __ Addiu(TMP, in, -1);
-    __ Xor(out, TMP, in);
-    __ And(out, out, TMP);
-    // Count number of leading zeroes.
-    __ ClzR2(out, out);
-    // Subtract number of leading zeroes from 32 to get number of trailing ones.
-    // Remember that the trailing ones were formerly trailing zeroes.
-    __ LoadConst32(TMP, 32);
-    __ Subu(out, TMP, out);
-  }
-
-  if (is64bit) {
-    // If in_lo is zero, then we counted the number of trailing zeroes in in_hi so we must add the
-    // number of trailing zeroes in in_lo (32) to get the correct final count
-    __ LoadConst32(TMP, 32);
-    if (isR6) {
-      __ Seleqz(TMP, TMP, in_lo);
-    } else {
-      __ Movn(TMP, ZERO, in_lo);
-    }
-    __ Addu(out, out, TMP);
-  }
-}
-
-// int java.lang.Integer.numberOfTrailingZeros(int i)
-void IntrinsicLocationsBuilderMIPS::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
-  GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ false, IsR6(), GetAssembler());
-}
-
-// int java.lang.Long.numberOfTrailingZeros(long i)
-void IntrinsicLocationsBuilderMIPS::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
-  GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ true, IsR6(), GetAssembler());
-}
-
-// int java.lang.Integer.reverse(int)
-void IntrinsicLocationsBuilderMIPS::VisitIntegerReverse(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitIntegerReverse(HInvoke* invoke) {
-  GenReverse(invoke->GetLocations(),
-             DataType::Type::kInt32,
-             IsR2OrNewer(),
-             IsR6(),
-             /* reverseBits= */ true,
-             GetAssembler());
-}
-
-// long java.lang.Long.reverse(long)
-void IntrinsicLocationsBuilderMIPS::VisitLongReverse(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitLongReverse(HInvoke* invoke) {
-  GenReverse(invoke->GetLocations(),
-             DataType::Type::kInt64,
-             IsR2OrNewer(),
-             IsR6(),
-             /* reverseBits= */ true,
-             GetAssembler());
-}
-
-static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresFpuRegister());
-  locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
-}
-
-static void GenBitCount(LocationSummary* locations,
-                        DataType::Type type,
-                        bool isR6,
-                        bool hasMsa,
-                        MipsAssembler* assembler) {
-  Register out = locations->Out().AsRegister<Register>();
-
-  // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
-  //
-  // A generalization of the best bit counting method to integers of
-  // bit-widths up to 128 (parameterized by type T) is this:
-  //
-  // v = v - ((v >> 1) & (T)~(T)0/3);                           // temp
-  // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3);      // temp
-  // v = (v + (v >> 4)) & (T)~(T)0/255*15;                      // temp
-  // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; // count
-  //
-  // For comparison, for 32-bit quantities, this algorithm can be executed
-  // using 20 MIPS instructions (the calls to LoadConst32() generate two
-  // machine instructions each for the values being used in this algorithm).
-  // A(n unrolled) loop-based algorithm required 25 instructions.
-  //
-  // For 64-bit quantities, this algorithm gets executed twice, (once
-  // for in_lo, and again for in_hi), but saves a few instructions
-  // because the mask values only have to be loaded once.  Using this
-  // algorithm the count for a 64-bit operand can be performed in 29
-  // instructions compared to a loop-based algorithm which required 47
-  // instructions.
-
-  if (hasMsa) {
-    if (type == DataType::Type::kInt32) {
-      Register in = locations->InAt(0).AsRegister<Register>();
-      __ Mtc1(in, FTMP);
-      __ PcntW(static_cast<VectorRegister>(FTMP), static_cast<VectorRegister>(FTMP));
-      __ Mfc1(out, FTMP);
-    } else {
-      DCHECK_EQ(type, DataType::Type::kInt64);
-      Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>();
-      Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
-      __ Mtc1(in_lo, FTMP);
-      __ Mthc1(in_hi, FTMP);
-      __ PcntD(static_cast<VectorRegister>(FTMP), static_cast<VectorRegister>(FTMP));
-      __ Mfc1(out, FTMP);
-    }
-  } else {
-    if (type == DataType::Type::kInt32) {
-      Register in = locations->InAt(0).AsRegister<Register>();
-
-      __ Srl(TMP, in, 1);
-      __ LoadConst32(AT, 0x55555555);
-      __ And(TMP, TMP, AT);
-      __ Subu(TMP, in, TMP);
-      __ LoadConst32(AT, 0x33333333);
-      __ And(out, TMP, AT);
-      __ Srl(TMP, TMP, 2);
-      __ And(TMP, TMP, AT);
-      __ Addu(TMP, out, TMP);
-      __ Srl(out, TMP, 4);
-      __ Addu(out, out, TMP);
-      __ LoadConst32(AT, 0x0F0F0F0F);
-      __ And(out, out, AT);
-      __ LoadConst32(TMP, 0x01010101);
-      if (isR6) {
-        __ MulR6(out, out, TMP);
-      } else {
-        __ MulR2(out, out, TMP);
-      }
-      __ Srl(out, out, 24);
-    } else {
-      DCHECK_EQ(type, DataType::Type::kInt64);
-      Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>();
-      Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
-      Register tmp_hi = locations->GetTemp(0).AsRegister<Register>();
-      Register out_hi = locations->GetTemp(1).AsRegister<Register>();
-      Register tmp_lo = TMP;
-      Register out_lo = out;
-
-      __ Srl(tmp_lo, in_lo, 1);
-      __ Srl(tmp_hi, in_hi, 1);
-
-      __ LoadConst32(AT, 0x55555555);
-
-      __ And(tmp_lo, tmp_lo, AT);
-      __ Subu(tmp_lo, in_lo, tmp_lo);
-
-      __ And(tmp_hi, tmp_hi, AT);
-      __ Subu(tmp_hi, in_hi, tmp_hi);
-
-      __ LoadConst32(AT, 0x33333333);
-
-      __ And(out_lo, tmp_lo, AT);
-      __ Srl(tmp_lo, tmp_lo, 2);
-      __ And(tmp_lo, tmp_lo, AT);
-      __ Addu(tmp_lo, out_lo, tmp_lo);
-
-      __ And(out_hi, tmp_hi, AT);
-      __ Srl(tmp_hi, tmp_hi, 2);
-      __ And(tmp_hi, tmp_hi, AT);
-      __ Addu(tmp_hi, out_hi, tmp_hi);
-
-      // Here we deviate from the original algorithm a bit. We've reached
-      // the stage where the bitfields holding the subtotals are large
-      // enough to hold the combined subtotals for both the low word, and
-      // the high word. This means that we can add the subtotals for the
-      // the high, and low words into a single word, and compute the final
-      // result for both the high, and low words using fewer instructions.
-      __ LoadConst32(AT, 0x0F0F0F0F);
-
-      __ Addu(TMP, tmp_hi, tmp_lo);
-
-      __ Srl(out, TMP, 4);
-      __ And(out, out, AT);
-      __ And(TMP, TMP, AT);
-      __ Addu(out, out, TMP);
-
-      __ LoadConst32(AT, 0x01010101);
-
-      if (isR6) {
-        __ MulR6(out, out, AT);
-      } else {
-        __ MulR2(out, out, AT);
-      }
-
-      __ Srl(out, out, 24);
-    }
-  }
-}
-
-// int java.lang.Integer.bitCount(int)
-void IntrinsicLocationsBuilderMIPS::VisitIntegerBitCount(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitIntegerBitCount(HInvoke* invoke) {
-  GenBitCount(invoke->GetLocations(), DataType::Type::kInt32, IsR6(), HasMsa(), GetAssembler());
-}
-
-// int java.lang.Long.bitCount(int)
-void IntrinsicLocationsBuilderMIPS::VisitLongBitCount(HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister());
-  locations->AddTemp(Location::RequiresRegister());
-  locations->AddTemp(Location::RequiresRegister());
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitLongBitCount(HInvoke* invoke) {
-  GenBitCount(invoke->GetLocations(), DataType::Type::kInt64, IsR6(), HasMsa(), GetAssembler());
-}
-
-// double java.lang.Math.sqrt(double)
-void IntrinsicLocationsBuilderMIPS::VisitMathSqrt(HInvoke* invoke) {
-  CreateFPToFPLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathSqrt(HInvoke* invoke) {
-  LocationSummary* locations = invoke->GetLocations();
-  MipsAssembler* assembler = GetAssembler();
-  FRegister in = locations->InAt(0).AsFpuRegister<FRegister>();
-  FRegister out = locations->Out().AsFpuRegister<FRegister>();
-
-  __ SqrtD(out, in);
-}
-
-// byte libcore.io.Memory.peekByte(long address)
-void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekByte(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekByte(HInvoke* invoke) {
-  MipsAssembler* assembler = GetAssembler();
-  Register adr = invoke->GetLocations()->InAt(0).AsRegisterPairLow<Register>();
-  Register out = invoke->GetLocations()->Out().AsRegister<Register>();
-
-  __ Lb(out, adr, 0);
-}
-
-// short libcore.io.Memory.peekShort(long address)
-void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekShortNative(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekShortNative(HInvoke* invoke) {
-  MipsAssembler* assembler = GetAssembler();
-  Register adr = invoke->GetLocations()->InAt(0).AsRegisterPairLow<Register>();
-  Register out = invoke->GetLocations()->Out().AsRegister<Register>();
-
-  if (IsR6()) {
-    __ Lh(out, adr, 0);
-  } else if (IsR2OrNewer()) {
-    // Unlike for words, there are no lhl/lhr instructions to load
-    // unaligned halfwords so the code loads individual bytes, in case
-    // the address isn't halfword-aligned, and assembles them into a
-    // signed halfword.
-    __ Lb(AT, adr, 1);   // This byte must be sign-extended.
-    __ Lb(out, adr, 0);  // This byte can be either sign-extended, or
-                         // zero-extended because the following
-                         // instruction overwrites the sign bits.
-    __ Ins(out, AT, 8, 24);
-  } else {
-    __ Lbu(AT, adr, 0);  // This byte must be zero-extended.  If it's not
-                         // the "or" instruction below will destroy the upper
-                         // 24 bits of the final result.
-    __ Lb(out, adr, 1);  // This byte must be sign-extended.
-    __ Sll(out, out, 8);
-    __ Or(out, out, AT);
-  }
-}
-
-// int libcore.io.Memory.peekInt(long address)
-void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekIntNative(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekIntNative(HInvoke* invoke) {
-  MipsAssembler* assembler = GetAssembler();
-  Register adr = invoke->GetLocations()->InAt(0).AsRegisterPairLow<Register>();
-  Register out = invoke->GetLocations()->Out().AsRegister<Register>();
-
-  if (IsR6()) {
-    __ Lw(out, adr, 0);
-  } else {
-    __ Lwr(out, adr, 0);
-    __ Lwl(out, adr, 3);
-  }
-}
-
-// long libcore.io.Memory.peekLong(long address)
-void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekLongNative(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekLongNative(HInvoke* invoke) {
-  MipsAssembler* assembler = GetAssembler();
-  Register adr = invoke->GetLocations()->InAt(0).AsRegisterPairLow<Register>();
-  Register out_lo = invoke->GetLocations()->Out().AsRegisterPairLow<Register>();
-  Register out_hi = invoke->GetLocations()->Out().AsRegisterPairHigh<Register>();
-
-  if (IsR6()) {
-    __ Lw(out_lo, adr, 0);
-    __ Lw(out_hi, adr, 4);
-  } else {
-    __ Lwr(out_lo, adr, 0);
-    __ Lwl(out_lo, adr, 3);
-    __ Lwr(out_hi, adr, 4);
-    __ Lwl(out_hi, adr, 7);
-  }
-}
-
-static void CreateIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RequiresRegister());
-}
-
-// void libcore.io.Memory.pokeByte(long address, byte value)
-void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeByte(HInvoke* invoke) {
-  CreateIntIntToVoidLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeByte(HInvoke* invoke) {
-  MipsAssembler* assembler = GetAssembler();
-  Register adr = invoke->GetLocations()->InAt(0).AsRegisterPairLow<Register>();
-  Register val = invoke->GetLocations()->InAt(1).AsRegister<Register>();
-
-  __ Sb(val, adr, 0);
-}
-
-// void libcore.io.Memory.pokeShort(long address, short value)
-void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeShortNative(HInvoke* invoke) {
-  CreateIntIntToVoidLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeShortNative(HInvoke* invoke) {
-  MipsAssembler* assembler = GetAssembler();
-  Register adr = invoke->GetLocations()->InAt(0).AsRegisterPairLow<Register>();
-  Register val = invoke->GetLocations()->InAt(1).AsRegister<Register>();
-
-  if (IsR6()) {
-    __ Sh(val, adr, 0);
-  } else {
-    // Unlike for words, there are no shl/shr instructions to store
-    // unaligned halfwords so the code stores individual bytes, in case
-    // the address isn't halfword-aligned.
-    __ Sb(val, adr, 0);
-    __ Srl(AT, val, 8);
-    __ Sb(AT, adr, 1);
-  }
-}
-
-// void libcore.io.Memory.pokeInt(long address, int value)
-void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeIntNative(HInvoke* invoke) {
-  CreateIntIntToVoidLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeIntNative(HInvoke* invoke) {
-  MipsAssembler* assembler = GetAssembler();
-  Register adr = invoke->GetLocations()->InAt(0).AsRegisterPairLow<Register>();
-  Register val = invoke->GetLocations()->InAt(1).AsRegister<Register>();
-
-  if (IsR6()) {
-    __ Sw(val, adr, 0);
-  } else {
-    __ Swr(val, adr, 0);
-    __ Swl(val, adr, 3);
-  }
-}
-
-// void libcore.io.Memory.pokeLong(long address, long value)
-void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeLongNative(HInvoke* invoke) {
-  CreateIntIntToVoidLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeLongNative(HInvoke* invoke) {
-  MipsAssembler* assembler = GetAssembler();
-  Register adr = invoke->GetLocations()->InAt(0).AsRegisterPairLow<Register>();
-  Register val_lo = invoke->GetLocations()->InAt(1).AsRegisterPairLow<Register>();
-  Register val_hi = invoke->GetLocations()->InAt(1).AsRegisterPairHigh<Register>();
-
-  if (IsR6()) {
-    __ Sw(val_lo, adr, 0);
-    __ Sw(val_hi, adr, 4);
-  } else {
-    __ Swr(val_lo, adr, 0);
-    __ Swl(val_lo, adr, 3);
-    __ Swr(val_hi, adr, 4);
-    __ Swl(val_hi, adr, 7);
-  }
-}
-
-// Thread java.lang.Thread.currentThread()
-void IntrinsicLocationsBuilderMIPS::VisitThreadCurrentThread(HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetOut(Location::RequiresRegister());
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitThreadCurrentThread(HInvoke* invoke) {
-  MipsAssembler* assembler = GetAssembler();
-  Register out = invoke->GetLocations()->Out().AsRegister<Register>();
-
-  __ LoadFromOffset(kLoadWord,
-                    out,
-                    TR,
-                    Thread::PeerOffset<kMipsPointerSize>().Int32Value());
-}
-
-static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator,
-                                          HInvoke* invoke,
-                                          DataType::Type type) {
-  bool can_call = kEmitCompilerReadBarrier &&
-      (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
-       invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke,
-                                      can_call
-                                          ? LocationSummary::kCallOnSlowPath
-                                          : LocationSummary::kNoCall,
-                                      kIntrinsified);
-  if (can_call && kUseBakerReadBarrier) {
-    locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
-  }
-  locations->SetInAt(0, Location::NoLocation());        // Unused receiver.
-  locations->SetInAt(1, Location::RequiresRegister());
-  locations->SetInAt(2, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister(),
-                    (can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap));
-  if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
-    // We need a temporary register for the read barrier marking slow
-    // path in InstructionCodeGeneratorMIPS::GenerateReferenceLoadWithBakerReadBarrier.
-    locations->AddTemp(Location::RequiresRegister());
-  }
-}
-
-// Note that the caller must supply a properly aligned memory address.
-// If they do not, the behavior is undefined (atomicity not guaranteed, exception may occur).
-static void GenUnsafeGet(HInvoke* invoke,
-                         DataType::Type type,
-                         bool is_volatile,
-                         bool is_R6,
-                         CodeGeneratorMIPS* codegen) {
-  LocationSummary* locations = invoke->GetLocations();
-  DCHECK((type == DataType::Type::kInt32) ||
-         (type == DataType::Type::kInt64) ||
-         (type == DataType::Type::kReference)) << type;
-  MipsAssembler* assembler = codegen->GetAssembler();
-  // Target register.
-  Location trg_loc = locations->Out();
-  // Object pointer.
-  Location base_loc = locations->InAt(1);
-  Register base = base_loc.AsRegister<Register>();
-  // The "offset" argument is passed as a "long". Since this code is for
-  // a 32-bit processor, we can only use 32-bit addresses, so we only
-  // need the low 32-bits of offset.
-  Location offset_loc = locations->InAt(2);
-  Register offset_lo = offset_loc.AsRegisterPairLow<Register>();
-
-  if (!(kEmitCompilerReadBarrier && kUseBakerReadBarrier && (type == DataType::Type::kReference))) {
-    __ Addu(TMP, base, offset_lo);
-  }
-
-  switch (type) {
-    case DataType::Type::kInt64: {
-      Register trg_lo = trg_loc.AsRegisterPairLow<Register>();
-      Register trg_hi = trg_loc.AsRegisterPairHigh<Register>();
-      CHECK(!is_volatile);  // TODO: support atomic 8-byte volatile loads.
-      if (is_R6) {
-        __ Lw(trg_lo, TMP, 0);
-        __ Lw(trg_hi, TMP, 4);
-      } else {
-        __ Lwr(trg_lo, TMP, 0);
-        __ Lwl(trg_lo, TMP, 3);
-        __ Lwr(trg_hi, TMP, 4);
-        __ Lwl(trg_hi, TMP, 7);
-      }
-      break;
-    }
-
-    case DataType::Type::kInt32: {
-      Register trg = trg_loc.AsRegister<Register>();
-      if (is_R6) {
-        __ Lw(trg, TMP, 0);
-      } else {
-        __ Lwr(trg, TMP, 0);
-        __ Lwl(trg, TMP, 3);
-      }
-      if (is_volatile) {
-        __ Sync(0);
-      }
-      break;
-    }
-
-    case DataType::Type::kReference: {
-      Register trg = trg_loc.AsRegister<Register>();
-      if (kEmitCompilerReadBarrier) {
-        if (kUseBakerReadBarrier) {
-          Location temp = locations->GetTemp(0);
-          codegen->GenerateReferenceLoadWithBakerReadBarrier(invoke,
-                                                             trg_loc,
-                                                             base,
-                                                             /* offset= */ 0U,
-                                                             /* index= */ offset_loc,
-                                                             TIMES_1,
-                                                             temp,
-                                                             /* needs_null_check= */ false);
-          if (is_volatile) {
-            __ Sync(0);
-          }
-        } else {
-          if (is_R6) {
-            __ Lw(trg, TMP, 0);
-          } else {
-            __ Lwr(trg, TMP, 0);
-            __ Lwl(trg, TMP, 3);
-          }
-          if (is_volatile) {
-            __ Sync(0);
-          }
-          codegen->GenerateReadBarrierSlow(invoke,
-                                           trg_loc,
-                                           trg_loc,
-                                           base_loc,
-                                           /* offset= */ 0U,
-                                           /* index= */ offset_loc);
-        }
-      } else {
-        if (is_R6) {
-          __ Lw(trg, TMP, 0);
-        } else {
-          __ Lwr(trg, TMP, 0);
-          __ Lwl(trg, TMP, 3);
-        }
-        if (is_volatile) {
-          __ Sync(0);
-        }
-        __ MaybeUnpoisonHeapReference(trg);
-      }
-      break;
-    }
-
-    default:
-      LOG(FATAL) << "Unexpected type " << type;
-      UNREACHABLE();
-  }
-}
-
-// int sun.misc.Unsafe.getInt(Object o, long offset)
-void IntrinsicLocationsBuilderMIPS::VisitUnsafeGet(HInvoke* invoke) {
-  CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitUnsafeGet(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, IsR6(), codegen_);
-}
-
-// int sun.misc.Unsafe.getIntVolatile(Object o, long offset)
-void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) {
-  CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, IsR6(), codegen_);
-}
-
-// long sun.misc.Unsafe.getLong(Object o, long offset)
-void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetLong(HInvoke* invoke) {
-  CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetLong(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, IsR6(), codegen_);
-}
-
-// Object sun.misc.Unsafe.getObject(Object o, long offset)
-void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetObject(HInvoke* invoke) {
-  CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObject(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, IsR6(), codegen_);
-}
-
-// Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset)
-void IntrinsicLocationsBuilderMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
-  CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, IsR6(), codegen_);
-}
-
-static void CreateIntIntIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::NoLocation());        // Unused receiver.
-  locations->SetInAt(1, Location::RequiresRegister());
-  locations->SetInAt(2, Location::RequiresRegister());
-  locations->SetInAt(3, Location::RequiresRegister());
-}
-
-// Note that the caller must supply a properly aligned memory address.
-// If they do not, the behavior is undefined (atomicity not guaranteed, exception may occur).
-static void GenUnsafePut(LocationSummary* locations,
-                         DataType::Type type,
-                         bool is_volatile,
-                         bool is_ordered,
-                         bool is_R6,
-                         CodeGeneratorMIPS* codegen) {
-  DCHECK((type == DataType::Type::kInt32) ||
-         (type == DataType::Type::kInt64) ||
-         (type == DataType::Type::kReference)) << type;
-  MipsAssembler* assembler = codegen->GetAssembler();
-  // Object pointer.
-  Register base = locations->InAt(1).AsRegister<Register>();
-  // The "offset" argument is passed as a "long", i.e., it's 64-bits in
-  // size. Since this code is for a 32-bit processor, we can only use
-  // 32-bit addresses, so we only need the low 32-bits of offset.
-  Register offset_lo = locations->InAt(2).AsRegisterPairLow<Register>();
-
-  __ Addu(TMP, base, offset_lo);
-  if (is_volatile || is_ordered) {
-    __ Sync(0);
-  }
-  if ((type == DataType::Type::kInt32) || (type == DataType::Type::kReference)) {
-    Register value = locations->InAt(3).AsRegister<Register>();
-
-    if (kPoisonHeapReferences && type == DataType::Type::kReference) {
-      __ PoisonHeapReference(AT, value);
-      value = AT;
-    }
-
-    if (is_R6) {
-      __ Sw(value, TMP, 0);
-    } else {
-      __ Swr(value, TMP, 0);
-      __ Swl(value, TMP, 3);
-    }
-  } else {
-    Register value_lo = locations->InAt(3).AsRegisterPairLow<Register>();
-    Register value_hi = locations->InAt(3).AsRegisterPairHigh<Register>();
-    CHECK(!is_volatile);  // TODO: support atomic 8-byte volatile stores.
-    if (is_R6) {
-      __ Sw(value_lo, TMP, 0);
-      __ Sw(value_hi, TMP, 4);
-    } else {
-      __ Swr(value_lo, TMP, 0);
-      __ Swl(value_lo, TMP, 3);
-      __ Swr(value_hi, TMP, 4);
-      __ Swl(value_hi, TMP, 7);
-    }
-  }
-
-  if (is_volatile) {
-    __ Sync(0);
-  }
-
-  if (type == DataType::Type::kReference) {
-    bool value_can_be_null = true;  // TODO: Worth finding out this information?
-    codegen->MarkGCCard(base, locations->InAt(3).AsRegister<Register>(), value_can_be_null);
-  }
-}
-
-// void sun.misc.Unsafe.putInt(Object o, long offset, int x)
-void IntrinsicLocationsBuilderMIPS::VisitUnsafePut(HInvoke* invoke) {
-  CreateIntIntIntIntToVoidLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitUnsafePut(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(),
-               DataType::Type::kInt32,
-               /* is_volatile= */ false,
-               /* is_ordered= */ false,
-               IsR6(),
-               codegen_);
-}
-
-// void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x)
-void IntrinsicLocationsBuilderMIPS::VisitUnsafePutOrdered(HInvoke* invoke) {
-  CreateIntIntIntIntToVoidLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitUnsafePutOrdered(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(),
-               DataType::Type::kInt32,
-               /* is_volatile= */ false,
-               /* is_ordered= */ true,
-               IsR6(),
-               codegen_);
-}
-
-// void sun.misc.Unsafe.putIntVolatile(Object o, long offset, int x)
-void IntrinsicLocationsBuilderMIPS::VisitUnsafePutVolatile(HInvoke* invoke) {
-  CreateIntIntIntIntToVoidLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitUnsafePutVolatile(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(),
-               DataType::Type::kInt32,
-               /* is_volatile= */ true,
-               /* is_ordered= */ false,
-               IsR6(),
-               codegen_);
-}
-
-// void sun.misc.Unsafe.putObject(Object o, long offset, Object x)
-void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObject(HInvoke* invoke) {
-  CreateIntIntIntIntToVoidLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObject(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(),
-               DataType::Type::kReference,
-               /* is_volatile= */ false,
-               /* is_ordered= */ false,
-               IsR6(),
-               codegen_);
-}
-
-// void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x)
-void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
-  CreateIntIntIntIntToVoidLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(),
-               DataType::Type::kReference,
-               /* is_volatile= */ false,
-               /* is_ordered= */ true,
-               IsR6(),
-               codegen_);
-}
-
-// void sun.misc.Unsafe.putObjectVolatile(Object o, long offset, Object x)
-void IntrinsicLocationsBuilderMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
-  CreateIntIntIntIntToVoidLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(),
-               DataType::Type::kReference,
-               /* is_volatile= */ true,
-               /* is_ordered= */ false,
-               IsR6(),
-               codegen_);
-}
-
-// void sun.misc.Unsafe.putLong(Object o, long offset, long x)
-void IntrinsicLocationsBuilderMIPS::VisitUnsafePutLong(HInvoke* invoke) {
-  CreateIntIntIntIntToVoidLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLong(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(),
-               DataType::Type::kInt64,
-               /* is_volatile= */ false,
-               /* is_ordered= */ false,
-               IsR6(),
-               codegen_);
-}
-
-// void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x)
-void IntrinsicLocationsBuilderMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) {
-  CreateIntIntIntIntToVoidLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitUnsafePutLongOrdered(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(),
-               DataType::Type::kInt64,
-               /* is_volatile= */ false,
-               /* is_ordered= */ true,
-               IsR6(),
-               codegen_);
-}
-
-static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* allocator, HInvoke* invoke) {
-  bool can_call = kEmitCompilerReadBarrier &&
-      kUseBakerReadBarrier &&
-      (invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke,
-                                      can_call
-                                          ? LocationSummary::kCallOnSlowPath
-                                          : LocationSummary::kNoCall,
-                                      kIntrinsified);
-  locations->SetInAt(0, Location::NoLocation());        // Unused receiver.
-  locations->SetInAt(1, Location::RequiresRegister());
-  locations->SetInAt(2, Location::RequiresRegister());
-  locations->SetInAt(3, Location::RequiresRegister());
-  locations->SetInAt(4, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister());
-
-  // Temporary register used in CAS by (Baker) read barrier.
-  if (can_call) {
-    locations->AddTemp(Location::RequiresRegister());
-  }
-}
-
-// Note that the caller must supply a properly aligned memory address.
-// If they do not, the behavior is undefined (atomicity not guaranteed, exception may occur).
-static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorMIPS* codegen) {
-  MipsAssembler* assembler = codegen->GetAssembler();
-  LocationSummary* locations = invoke->GetLocations();
-  bool isR6 = codegen->GetInstructionSetFeatures().IsR6();
-  Register base = locations->InAt(1).AsRegister<Register>();
-  Location offset_loc = locations->InAt(2);
-  Register offset_lo = offset_loc.AsRegisterPairLow<Register>();
-  Register expected = locations->InAt(3).AsRegister<Register>();
-  Register value = locations->InAt(4).AsRegister<Register>();
-  Location out_loc = locations->Out();
-  Register out = out_loc.AsRegister<Register>();
-
-  DCHECK_NE(base, out);
-  DCHECK_NE(offset_lo, out);
-  DCHECK_NE(expected, out);
-
-  if (type == DataType::Type::kReference) {
-    // The only read barrier implementation supporting the
-    // UnsafeCASObject intrinsic is the Baker-style read barriers.
-    DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
-
-    // Mark card for object assuming new value is stored. Worst case we will mark an unchanged
-    // object and scan the receiver at the next GC for nothing.
-    bool value_can_be_null = true;  // TODO: Worth finding out this information?
-    codegen->MarkGCCard(base, value, value_can_be_null);
-
-    if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
-      Location temp = locations->GetTemp(0);
-      // Need to make sure the reference stored in the field is a to-space
-      // one before attempting the CAS or the CAS could fail incorrectly.
-      codegen->GenerateReferenceLoadWithBakerReadBarrier(
-          invoke,
-          out_loc,  // Unused, used only as a "temporary" within the read barrier.
-          base,
-          /* offset= */ 0u,
-          /* index= */ offset_loc,
-          ScaleFactor::TIMES_1,
-          temp,
-          /* needs_null_check= */ false,
-          /* always_update_field= */ true);
-    }
-  }
-
-  MipsLabel loop_head, exit_loop;
-  __ Addu(TMP, base, offset_lo);
-
-  if (kPoisonHeapReferences && type == DataType::Type::kReference) {
-    __ PoisonHeapReference(expected);
-    // Do not poison `value`, if it is the same register as
-    // `expected`, which has just been poisoned.
-    if (value != expected) {
-      __ PoisonHeapReference(value);
-    }
-  }
-
-  // do {
-  //   tmp_value = [tmp_ptr] - expected;
-  // } while (tmp_value == 0 && failure([tmp_ptr] <- r_new_value));
-  // result = tmp_value != 0;
-
-  __ Sync(0);
-  __ Bind(&loop_head);
-  if ((type == DataType::Type::kInt32) || (type == DataType::Type::kReference)) {
-    if (isR6) {
-      __ LlR6(out, TMP);
-    } else {
-      __ LlR2(out, TMP);
-    }
-  } else {
-    LOG(FATAL) << "Unsupported op size " << type;
-    UNREACHABLE();
-  }
-  __ Subu(out, out, expected);          // If we didn't get the 'expected'
-  __ Sltiu(out, out, 1);                // value, set 'out' to false, and
-  __ Beqz(out, &exit_loop);             // return.
-  __ Move(out, value);  // Use 'out' for the 'store conditional' instruction.
-                        // If we use 'value' directly, we would lose 'value'
-                        // in the case that the store fails.  Whether the
-                        // store succeeds, or fails, it will load the
-                        // correct Boolean value into the 'out' register.
-  // This test isn't really necessary. We only support DataType::Type::kInt,
-  // DataType::Type::kReference, and we already verified that we're working on one
-  // of those two types. It's left here in case the code needs to support
-  // other types in the future.
-  if ((type == DataType::Type::kInt32) || (type == DataType::Type::kReference)) {
-    if (isR6) {
-      __ ScR6(out, TMP);
-    } else {
-      __ ScR2(out, TMP);
-    }
-  }
-  __ Beqz(out, &loop_head);     // If we couldn't do the read-modify-write
-                                // cycle atomically then retry.
-  __ Bind(&exit_loop);
-  __ Sync(0);
-
-  if (kPoisonHeapReferences && type == DataType::Type::kReference) {
-    __ UnpoisonHeapReference(expected);
-    // Do not unpoison `value`, if it is the same register as
-    // `expected`, which has just been unpoisoned.
-    if (value != expected) {
-      __ UnpoisonHeapReference(value);
-    }
-  }
-}
-
-// boolean sun.misc.Unsafe.compareAndSwapInt(Object o, long offset, int expected, int x)
-void IntrinsicLocationsBuilderMIPS::VisitUnsafeCASInt(HInvoke* invoke) {
-  CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitUnsafeCASInt(HInvoke* invoke) {
-  GenCas(invoke, DataType::Type::kInt32, codegen_);
-}
-
-// boolean sun.misc.Unsafe.compareAndSwapObject(Object o, long offset, Object expected, Object x)
-void IntrinsicLocationsBuilderMIPS::VisitUnsafeCASObject(HInvoke* invoke) {
-  // The only read barrier implementation supporting the
-  // UnsafeCASObject intrinsic is the Baker-style read barriers.
-  if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
-    return;
-  }
-
-  CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitUnsafeCASObject(HInvoke* invoke) {
-  // The only read barrier implementation supporting the
-  // UnsafeCASObject intrinsic is the Baker-style read barriers.
-  DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
-
-  GenCas(invoke, DataType::Type::kReference, codegen_);
-}
-
-// int java.lang.String.compareTo(String anotherString)
-void IntrinsicLocationsBuilderMIPS::VisitStringCompareTo(HInvoke* invoke) {
-  LocationSummary* locations = new (allocator_) LocationSummary(
-      invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
-  InvokeRuntimeCallingConvention calling_convention;
-  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
-  Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32);
-  locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>()));
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitStringCompareTo(HInvoke* invoke) {
-  MipsAssembler* assembler = GetAssembler();
-  LocationSummary* locations = invoke->GetLocations();
-
-  // Note that the null check must have been done earlier.
-  DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
-
-  Register argument = locations->InAt(1).AsRegister<Register>();
-  SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke);
-  codegen_->AddSlowPath(slow_path);
-  __ Beqz(argument, slow_path->GetEntryLabel());
-  codegen_->InvokeRuntime(kQuickStringCompareTo, invoke, invoke->GetDexPc(), slow_path);
-  __ Bind(slow_path->GetExitLabel());
-}
-
-// boolean java.lang.String.equals(Object anObject)
-void IntrinsicLocationsBuilderMIPS::VisitStringEquals(HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister());
-
-  // Temporary registers to store lengths of strings and for calculations.
-  locations->AddTemp(Location::RequiresRegister());
-  locations->AddTemp(Location::RequiresRegister());
-  locations->AddTemp(Location::RequiresRegister());
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitStringEquals(HInvoke* invoke) {
-  MipsAssembler* assembler = GetAssembler();
-  LocationSummary* locations = invoke->GetLocations();
-
-  Register str = locations->InAt(0).AsRegister<Register>();
-  Register arg = locations->InAt(1).AsRegister<Register>();
-  Register out = locations->Out().AsRegister<Register>();
-
-  Register temp1 = locations->GetTemp(0).AsRegister<Register>();
-  Register temp2 = locations->GetTemp(1).AsRegister<Register>();
-  Register temp3 = locations->GetTemp(2).AsRegister<Register>();
-
-  MipsLabel loop;
-  MipsLabel end;
-  MipsLabel return_true;
-  MipsLabel return_false;
-
-  // Get offsets of count, value, and class fields within a string object.
-  const uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
-  const uint32_t value_offset = mirror::String::ValueOffset().Uint32Value();
-  const uint32_t class_offset = mirror::Object::ClassOffset().Uint32Value();
-
-  // Note that the null check must have been done earlier.
-  DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
-
-  // If the register containing the pointer to "this", and the register
-  // containing the pointer to "anObject" are the same register then
-  // "this", and "anObject" are the same object and we can
-  // short-circuit the logic to a true result.
-  if (str == arg) {
-    __ LoadConst32(out, 1);
-    return;
-  }
-  StringEqualsOptimizations optimizations(invoke);
-  if (!optimizations.GetArgumentNotNull()) {
-    // Check if input is null, return false if it is.
-    __ Beqz(arg, &return_false);
-  }
-
-  // Reference equality check, return true if same reference.
-  __ Beq(str, arg, &return_true);
-
-  if (!optimizations.GetArgumentIsString()) {
-    // Instanceof check for the argument by comparing class fields.
-    // All string objects must have the same type since String cannot be subclassed.
-    // Receiver must be a string object, so its class field is equal to all strings' class fields.
-    // If the argument is a string object, its class field must be equal to receiver's class field.
-    //
-    // As the String class is expected to be non-movable, we can read the class
-    // field from String.equals' arguments without read barriers.
-    AssertNonMovableStringClass();
-    // /* HeapReference<Class> */ temp1 = str->klass_
-    __ Lw(temp1, str, class_offset);
-    // /* HeapReference<Class> */ temp2 = arg->klass_
-    __ Lw(temp2, arg, class_offset);
-    // Also, because we use the previously loaded class references only in the
-    // following comparison, we don't need to unpoison them.
-    __ Bne(temp1, temp2, &return_false);
-  }
-
-  // Load `count` fields of this and argument strings.
-  __ Lw(temp1, str, count_offset);
-  __ Lw(temp2, arg, count_offset);
-  // Check if `count` fields are equal, return false if they're not.
-  // Also compares the compression style, if differs return false.
-  __ Bne(temp1, temp2, &return_false);
-  // Return true if both strings are empty. Even with string compression `count == 0` means empty.
-  static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
-                "Expecting 0=compressed, 1=uncompressed");
-  __ Beqz(temp1, &return_true);
-
-  // Don't overwrite input registers
-  __ Move(TMP, str);
-  __ Move(temp3, arg);
-
-  // Assertions that must hold in order to compare strings 4 bytes at a time.
-  DCHECK_ALIGNED(value_offset, 4);
-  static_assert(IsAligned<4>(kObjectAlignment), "String of odd length is not zero padded");
-
-  // For string compression, calculate the number of bytes to compare (not chars).
-  if (mirror::kUseStringCompression) {
-    // Extract compression flag.
-    if (IsR2OrNewer()) {
-      __ Ext(temp2, temp1, 0, 1);
-    } else {
-      __ Sll(temp2, temp1, 31);
-      __ Srl(temp2, temp2, 31);
-    }
-    __ Srl(temp1, temp1, 1);             // Extract length.
-    __ Sllv(temp1, temp1, temp2);        // Double the byte count if uncompressed.
-  }
-
-  // Loop to compare strings 4 bytes at a time starting at the beginning of the string.
-  // Ok to do this because strings are zero-padded to kObjectAlignment.
-  __ Bind(&loop);
-  __ Lw(out, TMP, value_offset);
-  __ Lw(temp2, temp3, value_offset);
-  __ Bne(out, temp2, &return_false);
-  __ Addiu(TMP, TMP, 4);
-  __ Addiu(temp3, temp3, 4);
-  // With string compression, we have compared 4 bytes, otherwise 2 chars.
-  __ Addiu(temp1, temp1, mirror::kUseStringCompression ? -4 : -2);
-  __ Bgtz(temp1, &loop);
-
-  // Return true and exit the function.
-  // If loop does not result in returning false, we return true.
-  __ Bind(&return_true);
-  __ LoadConst32(out, 1);
-  __ B(&end);
-
-  // Return false and exit the function.
-  __ Bind(&return_false);
-  __ LoadConst32(out, 0);
-  __ Bind(&end);
-}
-
-static void GenerateStringIndexOf(HInvoke* invoke,
-                                  bool start_at_zero,
-                                  MipsAssembler* assembler,
-                                  CodeGeneratorMIPS* codegen) {
-  LocationSummary* locations = invoke->GetLocations();
-  Register tmp_reg = start_at_zero ? locations->GetTemp(0).AsRegister<Register>() : TMP;
-
-  // Note that the null check must have been done earlier.
-  DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
-
-  // Check for code points > 0xFFFF. Either a slow-path check when we don't know statically,
-  // or directly dispatch for a large constant, or omit slow-path for a small constant or a char.
-  SlowPathCodeMIPS* slow_path = nullptr;
-  HInstruction* code_point = invoke->InputAt(1);
-  if (code_point->IsIntConstant()) {
-    if (!IsUint<16>(code_point->AsIntConstant()->GetValue())) {
-      // Always needs the slow-path. We could directly dispatch to it,
-      // but this case should be rare, so for simplicity just put the
-      // full slow-path down and branch unconditionally.
-      slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke);
-      codegen->AddSlowPath(slow_path);
-      __ B(slow_path->GetEntryLabel());
-      __ Bind(slow_path->GetExitLabel());
-      return;
-    }
-  } else if (code_point->GetType() != DataType::Type::kUint16) {
-    Register char_reg = locations->InAt(1).AsRegister<Register>();
-    // The "bltu" conditional branch tests to see if the character value
-    // fits in a valid 16-bit (MIPS halfword) value. If it doesn't then
-    // the character being searched for, if it exists in the string, is
-    // encoded using UTF-16 and stored in the string as two (16-bit)
-    // halfwords. Currently the assembly code used to implement this
-    // intrinsic doesn't support searching for a character stored as
-    // two halfwords so we fallback to using the generic implementation
-    // of indexOf().
-    __ LoadConst32(tmp_reg, std::numeric_limits<uint16_t>::max());
-    slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke);
-    codegen->AddSlowPath(slow_path);
-    __ Bltu(tmp_reg, char_reg, slow_path->GetEntryLabel());
-  }
-
-  if (start_at_zero) {
-    DCHECK_EQ(tmp_reg, A2);
-    // Start-index = 0.
-    __ Clear(tmp_reg);
-  }
-
-  codegen->InvokeRuntime(kQuickIndexOf, invoke, invoke->GetDexPc(), slow_path);
-  if (slow_path != nullptr) {
-    __ Bind(slow_path->GetExitLabel());
-  }
-}
-
-// int java.lang.String.indexOf(int ch)
-void IntrinsicLocationsBuilderMIPS::VisitStringIndexOf(HInvoke* invoke) {
-  LocationSummary* locations = new (allocator_) LocationSummary(
-      invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
-  // We have a hand-crafted assembly stub that follows the runtime
-  // calling convention. So it's best to align the inputs accordingly.
-  InvokeRuntimeCallingConvention calling_convention;
-  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
-  Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32);
-  locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>()));
-
-  // Need a temp for slow-path codepoint compare, and need to send start-index=0.
-  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitStringIndexOf(HInvoke* invoke) {
-  GenerateStringIndexOf(invoke, /* start_at_zero= */ true, GetAssembler(), codegen_);
-}
-
-// int java.lang.String.indexOf(int ch, int fromIndex)
-void IntrinsicLocationsBuilderMIPS::VisitStringIndexOfAfter(HInvoke* invoke) {
-  LocationSummary* locations = new (allocator_) LocationSummary(
-      invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
-  // We have a hand-crafted assembly stub that follows the runtime
-  // calling convention. So it's best to align the inputs accordingly.
-  InvokeRuntimeCallingConvention calling_convention;
-  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
-  locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
-  Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32);
-  locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>()));
-
-  // Need a temp for slow-path codepoint compare.
-  locations->AddTemp(Location::RequiresRegister());
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitStringIndexOfAfter(HInvoke* invoke) {
-  GenerateStringIndexOf(invoke, /* start_at_zero= */ false, GetAssembler(), codegen_);
-}
-
-// java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
-void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromBytes(HInvoke* invoke) {
-  LocationSummary* locations = new (allocator_) LocationSummary(
-      invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
-  InvokeRuntimeCallingConvention calling_convention;
-  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
-  locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
-  locations->SetInAt(3, Location::RegisterLocation(calling_convention.GetRegisterAt(3)));
-  Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32);
-  locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>()));
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitStringNewStringFromBytes(HInvoke* invoke) {
-  MipsAssembler* assembler = GetAssembler();
-  LocationSummary* locations = invoke->GetLocations();
-
-  Register byte_array = locations->InAt(0).AsRegister<Register>();
-  SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke);
-  codegen_->AddSlowPath(slow_path);
-  __ Beqz(byte_array, slow_path->GetEntryLabel());
-  codegen_->InvokeRuntime(kQuickAllocStringFromBytes, invoke, invoke->GetDexPc(), slow_path);
-  __ Bind(slow_path->GetExitLabel());
-}
-
-// java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
-void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromChars(HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
-  InvokeRuntimeCallingConvention calling_convention;
-  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
-  locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
-  Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32);
-  locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>()));
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitStringNewStringFromChars(HInvoke* invoke) {
-  // No need to emit code checking whether `locations->InAt(2)` is a null
-  // pointer, as callers of the native method
-  //
-  //   java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
-  //
-  // all include a null check on `data` before calling that method.
-  codegen_->InvokeRuntime(kQuickAllocStringFromChars, invoke, invoke->GetDexPc());
-}
-
-// java.lang.StringFactory.newStringFromString(String toCopy)
-void IntrinsicLocationsBuilderMIPS::VisitStringNewStringFromString(HInvoke* invoke) {
-  LocationSummary* locations = new (allocator_) LocationSummary(
-      invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
-  InvokeRuntimeCallingConvention calling_convention;
-  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32);
-  locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<Register>()));
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitStringNewStringFromString(HInvoke* invoke) {
-  MipsAssembler* assembler = GetAssembler();
-  LocationSummary* locations = invoke->GetLocations();
-
-  Register string_to_copy = locations->InAt(0).AsRegister<Register>();
-  SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke);
-  codegen_->AddSlowPath(slow_path);
-  __ Beqz(string_to_copy, slow_path->GetEntryLabel());
-  codegen_->InvokeRuntime(kQuickAllocStringFromString, invoke, invoke->GetDexPc());
-  __ Bind(slow_path->GetExitLabel());
-}
-
-static void GenIsInfinite(LocationSummary* locations,
-                          const DataType::Type type,
-                          const bool isR6,
-                          MipsAssembler* assembler) {
-  FRegister in = locations->InAt(0).AsFpuRegister<FRegister>();
-  Register out = locations->Out().AsRegister<Register>();
-
-  DCHECK(type == DataType::Type::kFloat32 || type == DataType::Type::kFloat64);
-
-  if (isR6) {
-    if (type == DataType::Type::kFloat64) {
-        __ ClassD(FTMP, in);
-    } else {
-        __ ClassS(FTMP, in);
-    }
-    __ Mfc1(out, FTMP);
-    __ Andi(out, out, kPositiveInfinity | kNegativeInfinity);
-    __ Sltu(out, ZERO, out);
-  } else {
-    // If one, or more, of the exponent bits is zero, then the number can't be infinite.
-    if (type == DataType::Type::kFloat64) {
-      __ MoveFromFpuHigh(TMP, in);
-      __ LoadConst32(AT, High32Bits(kPositiveInfinityDouble));
-    } else {
-      __ Mfc1(TMP, in);
-      __ LoadConst32(AT, kPositiveInfinityFloat);
-    }
-    __ Xor(TMP, TMP, AT);
-
-    __ Sll(TMP, TMP, 1);
-
-    if (type == DataType::Type::kFloat64) {
-      __ Mfc1(AT, in);
-      __ Or(TMP, TMP, AT);
-    }
-    // If any of the significand bits are one, then the number is not infinite.
-    __ Sltiu(out, TMP, 1);
-  }
-}
-
-// boolean java.lang.Float.isInfinite(float)
-void IntrinsicLocationsBuilderMIPS::VisitFloatIsInfinite(HInvoke* invoke) {
-  CreateFPToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitFloatIsInfinite(HInvoke* invoke) {
-  GenIsInfinite(invoke->GetLocations(), DataType::Type::kFloat32, IsR6(), GetAssembler());
-}
-
-// boolean java.lang.Double.isInfinite(double)
-void IntrinsicLocationsBuilderMIPS::VisitDoubleIsInfinite(HInvoke* invoke) {
-  CreateFPToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitDoubleIsInfinite(HInvoke* invoke) {
-  GenIsInfinite(invoke->GetLocations(), DataType::Type::kFloat64, IsR6(), GetAssembler());
-}
-
-static void GenHighestOneBit(LocationSummary* locations,
-                             const DataType::Type type,
-                             bool isR6,
-                             MipsAssembler* assembler) {
-  DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64);
-
-  if (type == DataType::Type::kInt64) {
-    Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>();
-    Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
-    Register out_lo = locations->Out().AsRegisterPairLow<Register>();
-    Register out_hi = locations->Out().AsRegisterPairHigh<Register>();
-
-    if (isR6) {
-      __ ClzR6(TMP, in_hi);
-    } else {
-      __ ClzR2(TMP, in_hi);
-    }
-    __ LoadConst32(AT, 0x80000000);
-    __ Srlv(out_hi, AT, TMP);
-    __ And(out_hi, out_hi, in_hi);
-    if (isR6) {
-      __ ClzR6(TMP, in_lo);
-    } else {
-      __ ClzR2(TMP, in_lo);
-    }
-    __ Srlv(out_lo, AT, TMP);
-    __ And(out_lo, out_lo, in_lo);
-    if (isR6) {
-      __ Seleqz(out_lo, out_lo, out_hi);
-    } else {
-      __ Movn(out_lo, ZERO, out_hi);
-    }
-  } else {
-    Register in = locations->InAt(0).AsRegister<Register>();
-    Register out = locations->Out().AsRegister<Register>();
-
-    if (isR6) {
-      __ ClzR6(TMP, in);
-    } else {
-      __ ClzR2(TMP, in);
-    }
-    __ LoadConst32(AT, 0x80000000);
-    __ Srlv(AT, AT, TMP);  // Srlv shifts in the range of [0;31] bits (lower 5 bits of arg).
-    __ And(out, AT, in);   // So this is required for 0 (=shift by 32).
-  }
-}
-
-// int java.lang.Integer.highestOneBit(int)
-void IntrinsicLocationsBuilderMIPS::VisitIntegerHighestOneBit(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitIntegerHighestOneBit(HInvoke* invoke) {
-  GenHighestOneBit(invoke->GetLocations(), DataType::Type::kInt32, IsR6(), GetAssembler());
-}
-
-// long java.lang.Long.highestOneBit(long)
-void IntrinsicLocationsBuilderMIPS::VisitLongHighestOneBit(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke, Location::kOutputOverlap);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitLongHighestOneBit(HInvoke* invoke) {
-  GenHighestOneBit(invoke->GetLocations(), DataType::Type::kInt64, IsR6(), GetAssembler());
-}
-
-static void GenLowestOneBit(LocationSummary* locations,
-                            const DataType::Type type,
-                            bool isR6,
-                            MipsAssembler* assembler) {
-  DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64);
-
-  if (type == DataType::Type::kInt64) {
-    Register in_lo = locations->InAt(0).AsRegisterPairLow<Register>();
-    Register in_hi = locations->InAt(0).AsRegisterPairHigh<Register>();
-    Register out_lo = locations->Out().AsRegisterPairLow<Register>();
-    Register out_hi = locations->Out().AsRegisterPairHigh<Register>();
-
-    __ Subu(TMP, ZERO, in_lo);
-    __ And(out_lo, TMP, in_lo);
-    __ Subu(TMP, ZERO, in_hi);
-    __ And(out_hi, TMP, in_hi);
-    if (isR6) {
-      __ Seleqz(out_hi, out_hi, out_lo);
-    } else {
-      __ Movn(out_hi, ZERO, out_lo);
-    }
-  } else {
-    Register in = locations->InAt(0).AsRegister<Register>();
-    Register out = locations->Out().AsRegister<Register>();
-
-    __ Subu(TMP, ZERO, in);
-    __ And(out, TMP, in);
-  }
-}
-
-// int java.lang.Integer.lowestOneBit(int)
-void IntrinsicLocationsBuilderMIPS::VisitIntegerLowestOneBit(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitIntegerLowestOneBit(HInvoke* invoke) {
-  GenLowestOneBit(invoke->GetLocations(), DataType::Type::kInt32, IsR6(), GetAssembler());
-}
-
-// long java.lang.Long.lowestOneBit(long)
-void IntrinsicLocationsBuilderMIPS::VisitLongLowestOneBit(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitLongLowestOneBit(HInvoke* invoke) {
-  GenLowestOneBit(invoke->GetLocations(), DataType::Type::kInt64, IsR6(), GetAssembler());
-}
-
-// int java.lang.Math.round(float)
-void IntrinsicLocationsBuilderMIPS::VisitMathRoundFloat(HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresFpuRegister());
-  locations->AddTemp(Location::RequiresFpuRegister());
-  locations->SetOut(Location::RequiresRegister());
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathRoundFloat(HInvoke* invoke) {
-  LocationSummary* locations = invoke->GetLocations();
-  MipsAssembler* assembler = GetAssembler();
-  FRegister in = locations->InAt(0).AsFpuRegister<FRegister>();
-  FRegister half = locations->GetTemp(0).AsFpuRegister<FRegister>();
-  Register out = locations->Out().AsRegister<Register>();
-
-  MipsLabel done;
-
-  if (IsR6()) {
-    // out = floor(in);
-    //
-    // if (out != MAX_VALUE && out != MIN_VALUE) {
-    //     TMP = ((in - out) >= 0.5) ? 1 : 0;
-    //     return out += TMP;
-    // }
-    // return out;
-
-    // out = floor(in);
-    __ FloorWS(FTMP, in);
-    __ Mfc1(out, FTMP);
-
-    // if (out != MAX_VALUE && out != MIN_VALUE)
-    __ Addiu(TMP, out, 1);
-    __ Aui(TMP, TMP, 0x8000);  // TMP = out + 0x8000 0001
-                               // or    out - 0x7FFF FFFF.
-                               // IOW, TMP = 1 if out = Int.MIN_VALUE
-                               // or   TMP = 0 if out = Int.MAX_VALUE.
-    __ Srl(TMP, TMP, 1);       // TMP = 0 if out = Int.MIN_VALUE
-                               //         or out = Int.MAX_VALUE.
-    __ Beqz(TMP, &done);
-
-    // TMP = (0.5f <= (in - out)) ? -1 : 0;
-    __ Cvtsw(FTMP, FTMP);      // Convert output of floor.w.s back to "float".
-    __ LoadConst32(AT, bit_cast<int32_t, float>(0.5f));
-    __ SubS(FTMP, in, FTMP);
-    __ Mtc1(AT, half);
-
-    __ CmpLeS(FTMP, half, FTMP);
-    __ Mfc1(TMP, FTMP);
-
-    // Return out -= TMP.
-    __ Subu(out, out, TMP);
-  } else {
-    // if (in.isNaN) {
-    //   return 0;
-    // }
-    //
-    // out = floor.w.s(in);
-    //
-    // /*
-    //  * This "if" statement is only needed for the pre-R6 version of floor.w.s
-    //  * which outputs Integer.MAX_VALUE for negative numbers with magnitudes
-    //  * too large to fit in a 32-bit integer.
-    //  */
-    // if (out == Integer.MAX_VALUE) {
-    //   TMP = (in < 0.0f) ? 1 : 0;
-    //   /*
-    //    * If TMP is 1, then adding it to out will wrap its value from
-    //    * Integer.MAX_VALUE to Integer.MIN_VALUE.
-    //    */
-    //   return out += TMP;
-    // }
-    //
-    // /*
-    //  * For negative values not handled by the previous "if" statement the
-    //  * test here will correctly set the value of TMP.
-    //  */
-    // TMP = ((in - out) >= 0.5f) ? 1 : 0;
-    // return out += TMP;
-
-    MipsLabel finite;
-    MipsLabel add;
-
-    // Test for NaN.
-    __ CunS(in, in);
-
-    // Return zero for NaN.
-    __ Move(out, ZERO);
-    __ Bc1t(&done);
-
-    // out = floor(in);
-    __ FloorWS(FTMP, in);
-    __ Mfc1(out, FTMP);
-
-    __ LoadConst32(TMP, -1);
-
-    // TMP = (out = java.lang.Integer.MAX_VALUE) ? -1 : 0;
-    __ LoadConst32(AT, std::numeric_limits<int32_t>::max());
-    __ Bne(AT, out, &finite);
-
-    __ Mtc1(ZERO, FTMP);
-    __ ColtS(in, FTMP);
-
-    __ B(&add);
-
-    __ Bind(&finite);
-
-    // TMP = (0.5f <= (in - out)) ? -1 : 0;
-    __ Cvtsw(FTMP, FTMP);  // Convert output of floor.w.s back to "float".
-    __ LoadConst32(AT, bit_cast<int32_t, float>(0.5f));
-    __ SubS(FTMP, in, FTMP);
-    __ Mtc1(AT, half);
-    __ ColeS(half, FTMP);
-
-    __ Bind(&add);
-
-    __ Movf(TMP, ZERO);
-
-    // Return out -= TMP.
-    __ Subu(out, out, TMP);
-  }
-  __ Bind(&done);
-}
-
-// void java.lang.String.getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin)
-void IntrinsicLocationsBuilderMIPS::VisitStringGetCharsNoCheck(HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RequiresRegister());
-  locations->SetInAt(2, Location::RequiresRegister());
-  locations->SetInAt(3, Location::RequiresRegister());
-  locations->SetInAt(4, Location::RequiresRegister());
-
-  locations->AddTemp(Location::RequiresRegister());
-  locations->AddTemp(Location::RequiresRegister());
-  locations->AddTemp(Location::RequiresRegister());
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitStringGetCharsNoCheck(HInvoke* invoke) {
-  MipsAssembler* assembler = GetAssembler();
-  LocationSummary* locations = invoke->GetLocations();
-
-  // Check assumption that sizeof(Char) is 2 (used in scaling below).
-  const size_t char_size = DataType::Size(DataType::Type::kUint16);
-  DCHECK_EQ(char_size, 2u);
-  const size_t char_shift = DataType::SizeShift(DataType::Type::kUint16);
-
-  Register srcObj = locations->InAt(0).AsRegister<Register>();
-  Register srcBegin = locations->InAt(1).AsRegister<Register>();
-  Register srcEnd = locations->InAt(2).AsRegister<Register>();
-  Register dstObj = locations->InAt(3).AsRegister<Register>();
-  Register dstBegin = locations->InAt(4).AsRegister<Register>();
-
-  Register dstPtr = locations->GetTemp(0).AsRegister<Register>();
-  Register srcPtr = locations->GetTemp(1).AsRegister<Register>();
-  Register numChrs = locations->GetTemp(2).AsRegister<Register>();
-
-  MipsLabel done;
-  MipsLabel loop;
-
-  // Location of data in char array buffer.
-  const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value();
-
-  // Get offset of value field within a string object.
-  const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
-
-  __ Beq(srcEnd, srcBegin, &done);  // No characters to move.
-
-  // Calculate number of characters to be copied.
-  __ Subu(numChrs, srcEnd, srcBegin);
-
-  // Calculate destination address.
-  __ Addiu(dstPtr, dstObj, data_offset);
-  __ ShiftAndAdd(dstPtr, dstBegin, dstPtr, char_shift);
-
-  if (mirror::kUseStringCompression) {
-    MipsLabel uncompressed_copy, compressed_loop;
-    const uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
-    // Load count field and extract compression flag.
-    __ LoadFromOffset(kLoadWord, TMP, srcObj, count_offset);
-    __ Sll(TMP, TMP, 31);
-
-    // If string is uncompressed, use uncompressed path.
-    __ Bnez(TMP, &uncompressed_copy);
-
-    // Copy loop for compressed src, copying 1 character (8-bit) to (16-bit) at a time.
-    __ Addu(srcPtr, srcObj, srcBegin);
-    __ Bind(&compressed_loop);
-    __ LoadFromOffset(kLoadUnsignedByte, TMP, srcPtr, value_offset);
-    __ StoreToOffset(kStoreHalfword, TMP, dstPtr, 0);
-    __ Addiu(numChrs, numChrs, -1);
-    __ Addiu(srcPtr, srcPtr, 1);
-    __ Addiu(dstPtr, dstPtr, 2);
-    __ Bnez(numChrs, &compressed_loop);
-
-    __ B(&done);
-    __ Bind(&uncompressed_copy);
-  }
-
-  // Calculate source address.
-  __ Addiu(srcPtr, srcObj, value_offset);
-  __ ShiftAndAdd(srcPtr, srcBegin, srcPtr, char_shift);
-
-  __ Bind(&loop);
-  __ Lh(AT, srcPtr, 0);
-  __ Addiu(numChrs, numChrs, -1);
-  __ Addiu(srcPtr, srcPtr, char_size);
-  __ Sh(AT, dstPtr, 0);
-  __ Addiu(dstPtr, dstPtr, char_size);
-  __ Bnez(numChrs, &loop);
-
-  __ Bind(&done);
-}
-
-static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
-  InvokeRuntimeCallingConvention calling_convention;
-
-  locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
-  locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kFloat64));
-}
-
-static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
-  InvokeRuntimeCallingConvention calling_convention;
-
-  locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
-  locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
-  locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kFloat64));
-}
-
-static void GenFPToFPCall(HInvoke* invoke, CodeGeneratorMIPS* codegen, QuickEntrypointEnum entry) {
-  LocationSummary* locations = invoke->GetLocations();
-  FRegister in = locations->InAt(0).AsFpuRegister<FRegister>();
-  DCHECK_EQ(in, F12);
-  FRegister out = locations->Out().AsFpuRegister<FRegister>();
-  DCHECK_EQ(out, F0);
-
-  codegen->InvokeRuntime(entry, invoke, invoke->GetDexPc());
-}
-
-static void GenFPFPToFPCall(HInvoke* invoke,
-                            CodeGeneratorMIPS* codegen,
-                            QuickEntrypointEnum entry) {
-  LocationSummary* locations = invoke->GetLocations();
-  FRegister in0 = locations->InAt(0).AsFpuRegister<FRegister>();
-  DCHECK_EQ(in0, F12);
-  FRegister in1 = locations->InAt(1).AsFpuRegister<FRegister>();
-  DCHECK_EQ(in1, F14);
-  FRegister out = locations->Out().AsFpuRegister<FRegister>();
-  DCHECK_EQ(out, F0);
-
-  codegen->InvokeRuntime(entry, invoke, invoke->GetDexPc());
-}
-
-// static double java.lang.Math.cos(double a)
-void IntrinsicLocationsBuilderMIPS::VisitMathCos(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathCos(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickCos);
-}
-
-// static double java.lang.Math.sin(double a)
-void IntrinsicLocationsBuilderMIPS::VisitMathSin(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathSin(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickSin);
-}
-
-// static double java.lang.Math.acos(double a)
-void IntrinsicLocationsBuilderMIPS::VisitMathAcos(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathAcos(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickAcos);
-}
-
-// static double java.lang.Math.asin(double a)
-void IntrinsicLocationsBuilderMIPS::VisitMathAsin(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathAsin(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickAsin);
-}
-
-// static double java.lang.Math.atan(double a)
-void IntrinsicLocationsBuilderMIPS::VisitMathAtan(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathAtan(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickAtan);
-}
-
-// static double java.lang.Math.atan2(double y, double x)
-void IntrinsicLocationsBuilderMIPS::VisitMathAtan2(HInvoke* invoke) {
-  CreateFPFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathAtan2(HInvoke* invoke) {
-  GenFPFPToFPCall(invoke, codegen_, kQuickAtan2);
-}
-
-// static double java.lang.Math.pow(double y, double x)
-void IntrinsicLocationsBuilderMIPS::VisitMathPow(HInvoke* invoke) {
-  CreateFPFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathPow(HInvoke* invoke) {
-  GenFPFPToFPCall(invoke, codegen_, kQuickPow);
-}
-
-// static double java.lang.Math.cbrt(double a)
-void IntrinsicLocationsBuilderMIPS::VisitMathCbrt(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathCbrt(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickCbrt);
-}
-
-// static double java.lang.Math.cosh(double x)
-void IntrinsicLocationsBuilderMIPS::VisitMathCosh(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathCosh(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickCosh);
-}
-
-// static double java.lang.Math.exp(double a)
-void IntrinsicLocationsBuilderMIPS::VisitMathExp(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathExp(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickExp);
-}
-
-// static double java.lang.Math.expm1(double x)
-void IntrinsicLocationsBuilderMIPS::VisitMathExpm1(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathExpm1(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickExpm1);
-}
-
-// static double java.lang.Math.hypot(double x, double y)
-void IntrinsicLocationsBuilderMIPS::VisitMathHypot(HInvoke* invoke) {
-  CreateFPFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathHypot(HInvoke* invoke) {
-  GenFPFPToFPCall(invoke, codegen_, kQuickHypot);
-}
-
-// static double java.lang.Math.log(double a)
-void IntrinsicLocationsBuilderMIPS::VisitMathLog(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathLog(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickLog);
-}
-
-// static double java.lang.Math.log10(double x)
-void IntrinsicLocationsBuilderMIPS::VisitMathLog10(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathLog10(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickLog10);
-}
-
-// static double java.lang.Math.nextAfter(double start, double direction)
-void IntrinsicLocationsBuilderMIPS::VisitMathNextAfter(HInvoke* invoke) {
-  CreateFPFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathNextAfter(HInvoke* invoke) {
-  GenFPFPToFPCall(invoke, codegen_, kQuickNextAfter);
-}
-
-// static double java.lang.Math.sinh(double x)
-void IntrinsicLocationsBuilderMIPS::VisitMathSinh(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathSinh(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickSinh);
-}
-
-// static double java.lang.Math.tan(double a)
-void IntrinsicLocationsBuilderMIPS::VisitMathTan(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathTan(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickTan);
-}
-
-// static double java.lang.Math.tanh(double x)
-void IntrinsicLocationsBuilderMIPS::VisitMathTanh(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitMathTanh(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickTanh);
-}
-
-// static void java.lang.System.arraycopy(Object src, int srcPos,
-//                                        Object dest, int destPos,
-//                                        int length)
-void IntrinsicLocationsBuilderMIPS::VisitSystemArrayCopyChar(HInvoke* invoke) {
-  HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstant();
-  HIntConstant* dest_pos = invoke->InputAt(3)->AsIntConstant();
-  HIntConstant* length = invoke->InputAt(4)->AsIntConstant();
-
-  // As long as we are checking, we might as well check to see if the src and dest
-  // positions are >= 0.
-  if ((src_pos != nullptr && src_pos->GetValue() < 0) ||
-      (dest_pos != nullptr && dest_pos->GetValue() < 0)) {
-    // We will have to fail anyways.
-    return;
-  }
-
-  // And since we are already checking, check the length too.
-  if (length != nullptr) {
-    int32_t len = length->GetValue();
-    if (len < 0) {
-      // Just call as normal.
-      return;
-    }
-  }
-
-  // Okay, it is safe to generate inline code.
-  LocationSummary* locations =
-      new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
-  // arraycopy(Object src, int srcPos, Object dest, int destPos, int length).
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
-  locations->SetInAt(2, Location::RequiresRegister());
-  locations->SetInAt(3, Location::RegisterOrConstant(invoke->InputAt(3)));
-  locations->SetInAt(4, Location::RegisterOrConstant(invoke->InputAt(4)));
-
-  locations->AddTemp(Location::RequiresRegister());
-  locations->AddTemp(Location::RequiresRegister());
-  locations->AddTemp(Location::RequiresRegister());
-}
-
-// Utility routine to verify that "length(input) - pos >= length"
-static void EnoughItems(MipsAssembler* assembler,
-                        Register length_input_minus_pos,
-                        Location length,
-                        SlowPathCodeMIPS* slow_path) {
-  if (length.IsConstant()) {
-    int32_t length_constant = length.GetConstant()->AsIntConstant()->GetValue();
-
-    if (IsInt<16>(length_constant)) {
-      __ Slti(TMP, length_input_minus_pos, length_constant);
-      __ Bnez(TMP, slow_path->GetEntryLabel());
-    } else {
-      __ LoadConst32(TMP, length_constant);
-      __ Blt(length_input_minus_pos, TMP, slow_path->GetEntryLabel());
-    }
-  } else {
-    __ Blt(length_input_minus_pos, length.AsRegister<Register>(), slow_path->GetEntryLabel());
-  }
-}
-
-static void CheckPosition(MipsAssembler* assembler,
-                          Location pos,
-                          Register input,
-                          Location length,
-                          SlowPathCodeMIPS* slow_path,
-                          bool length_is_input_length = false) {
-  // Where is the length in the Array?
-  const uint32_t length_offset = mirror::Array::LengthOffset().Uint32Value();
-
-  // Calculate length(input) - pos.
-  if (pos.IsConstant()) {
-    int32_t pos_const = pos.GetConstant()->AsIntConstant()->GetValue();
-    if (pos_const == 0) {
-      if (!length_is_input_length) {
-        // Check that length(input) >= length.
-        __ LoadFromOffset(kLoadWord, AT, input, length_offset);
-        EnoughItems(assembler, AT, length, slow_path);
-      }
-    } else {
-      // Check that (length(input) - pos) >= zero.
-      __ LoadFromOffset(kLoadWord, AT, input, length_offset);
-      DCHECK_GT(pos_const, 0);
-      __ Addiu32(AT, AT, -pos_const, TMP);
-      __ Bltz(AT, slow_path->GetEntryLabel());
-
-      // Verify that (length(input) - pos) >= length.
-      EnoughItems(assembler, AT, length, slow_path);
-    }
-  } else if (length_is_input_length) {
-    // The only way the copy can succeed is if pos is zero.
-    Register pos_reg = pos.AsRegister<Register>();
-    __ Bnez(pos_reg, slow_path->GetEntryLabel());
-  } else {
-    // Verify that pos >= 0.
-    Register pos_reg = pos.AsRegister<Register>();
-    __ Bltz(pos_reg, slow_path->GetEntryLabel());
-
-    // Check that (length(input) - pos) >= zero.
-    __ LoadFromOffset(kLoadWord, AT, input, length_offset);
-    __ Subu(AT, AT, pos_reg);
-    __ Bltz(AT, slow_path->GetEntryLabel());
-
-    // Verify that (length(input) - pos) >= length.
-    EnoughItems(assembler, AT, length, slow_path);
-  }
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitSystemArrayCopyChar(HInvoke* invoke) {
-  MipsAssembler* assembler = GetAssembler();
-  LocationSummary* locations = invoke->GetLocations();
-
-  Register src = locations->InAt(0).AsRegister<Register>();
-  Location src_pos = locations->InAt(1);
-  Register dest = locations->InAt(2).AsRegister<Register>();
-  Location dest_pos = locations->InAt(3);
-  Location length = locations->InAt(4);
-
-  MipsLabel loop;
-
-  Register dest_base = locations->GetTemp(0).AsRegister<Register>();
-  Register src_base = locations->GetTemp(1).AsRegister<Register>();
-  Register count = locations->GetTemp(2).AsRegister<Register>();
-
-  SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS(invoke);
-  codegen_->AddSlowPath(slow_path);
-
-  // Bail out if the source and destination are the same (to handle overlap).
-  __ Beq(src, dest, slow_path->GetEntryLabel());
-
-  // Bail out if the source is null.
-  __ Beqz(src, slow_path->GetEntryLabel());
-
-  // Bail out if the destination is null.
-  __ Beqz(dest, slow_path->GetEntryLabel());
-
-  // Load length into register for count.
-  if (length.IsConstant()) {
-    __ LoadConst32(count, length.GetConstant()->AsIntConstant()->GetValue());
-  } else {
-    // If the length is negative, bail out.
-    // We have already checked in the LocationsBuilder for the constant case.
-    __ Bltz(length.AsRegister<Register>(), slow_path->GetEntryLabel());
-
-    __ Move(count, length.AsRegister<Register>());
-  }
-
-  // Validity checks: source.
-  CheckPosition(assembler, src_pos, src, Location::RegisterLocation(count), slow_path);
-
-  // Validity checks: dest.
-  CheckPosition(assembler, dest_pos, dest, Location::RegisterLocation(count), slow_path);
-
-  // If count is zero, we're done.
-  __ Beqz(count, slow_path->GetExitLabel());
-
-  // Okay, everything checks out.  Finally time to do the copy.
-  // Check assumption that sizeof(Char) is 2 (used in scaling below).
-  const size_t char_size = DataType::Size(DataType::Type::kUint16);
-  DCHECK_EQ(char_size, 2u);
-
-  const size_t char_shift = DataType::SizeShift(DataType::Type::kUint16);
-
-  const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value();
-
-  // Calculate source and destination addresses.
-  if (src_pos.IsConstant()) {
-    int32_t src_pos_const = src_pos.GetConstant()->AsIntConstant()->GetValue();
-
-    __ Addiu32(src_base, src, data_offset + char_size * src_pos_const, TMP);
-  } else {
-    __ Addiu32(src_base, src, data_offset, TMP);
-    __ ShiftAndAdd(src_base, src_pos.AsRegister<Register>(), src_base, char_shift);
-  }
-  if (dest_pos.IsConstant()) {
-    int32_t dest_pos_const = dest_pos.GetConstant()->AsIntConstant()->GetValue();
-
-    __ Addiu32(dest_base, dest, data_offset + char_size * dest_pos_const, TMP);
-  } else {
-    __ Addiu32(dest_base, dest, data_offset, TMP);
-    __ ShiftAndAdd(dest_base, dest_pos.AsRegister<Register>(), dest_base, char_shift);
-  }
-
-  __ Bind(&loop);
-  __ Lh(TMP, src_base, 0);
-  __ Addiu(src_base, src_base, char_size);
-  __ Addiu(count, count, -1);
-  __ Sh(TMP, dest_base, 0);
-  __ Addiu(dest_base, dest_base, char_size);
-  __ Bnez(count, &loop);
-
-  __ Bind(slow_path->GetExitLabel());
-}
-
-// long java.lang.Integer.valueOf(long)
-void IntrinsicLocationsBuilderMIPS::VisitIntegerValueOf(HInvoke* invoke) {
-  InvokeRuntimeCallingConvention calling_convention;
-  IntrinsicVisitor::ComputeIntegerValueOfLocations(
-      invoke,
-      codegen_,
-      calling_convention.GetReturnLocation(DataType::Type::kReference),
-      Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitIntegerValueOf(HInvoke* invoke) {
-  IntrinsicVisitor::IntegerValueOfInfo info =
-      IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke, codegen_->GetCompilerOptions());
-  LocationSummary* locations = invoke->GetLocations();
-  MipsAssembler* assembler = GetAssembler();
-  InstructionCodeGeneratorMIPS* icodegen =
-      down_cast<InstructionCodeGeneratorMIPS*>(codegen_->GetInstructionVisitor());
-
-  Register out = locations->Out().AsRegister<Register>();
-  if (invoke->InputAt(0)->IsConstant()) {
-    int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
-    if (static_cast<uint32_t>(value - info.low) < info.length) {
-      // Just embed the j.l.Integer in the code.
-      DCHECK_NE(info.value_boot_image_reference, IntegerValueOfInfo::kInvalidReference);
-      codegen_->LoadBootImageAddress(out, info.value_boot_image_reference);
-    } else {
-      DCHECK(locations->CanCall());
-      // Allocate and initialize a new j.l.Integer.
-      // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
-      // JIT object table.
-      codegen_->AllocateInstanceForIntrinsic(invoke->AsInvokeStaticOrDirect(),
-                                             info.integer_boot_image_offset);
-      __ StoreConstToOffset(kStoreWord, value, out, info.value_offset, TMP);
-      // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
-      // one.
-      icodegen->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
-    }
-  } else {
-    DCHECK(locations->CanCall());
-    Register in = locations->InAt(0).AsRegister<Register>();
-    MipsLabel allocate, done;
-
-    __ Addiu32(out, in, -info.low);
-    // As unsigned quantities is out < info.length ?
-    if (IsUint<15>(info.length)) {
-      __ Sltiu(AT, out, info.length);
-    } else {
-      __ LoadConst32(AT, info.length);
-      __ Sltu(AT, out, AT);
-    }
-    // Branch if out >= info.length. This means that "in" is outside of the valid range.
-    __ Beqz(AT, &allocate);
-
-    // If the value is within the bounds, load the j.l.Integer directly from the array.
-    codegen_->LoadBootImageAddress(TMP, info.array_data_boot_image_reference);
-    __ ShiftAndAdd(out, out, TMP, TIMES_4);
-    __ Lw(out, out, 0);
-    __ MaybeUnpoisonHeapReference(out);
-    __ B(&done);
-
-    __ Bind(&allocate);
-    // Otherwise allocate and initialize a new j.l.Integer.
-    codegen_->AllocateInstanceForIntrinsic(invoke->AsInvokeStaticOrDirect(),
-                                           info.integer_boot_image_offset);
-    __ StoreToOffset(kStoreWord, in, out, info.value_offset);
-    // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
-    // one.
-    icodegen->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
-    __ Bind(&done);
-  }
-}
-
-// static boolean java.lang.Thread.interrupted()
-void IntrinsicLocationsBuilderMIPS::VisitThreadInterrupted(HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetOut(Location::RequiresRegister());
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitThreadInterrupted(HInvoke* invoke) {
-  MipsAssembler* assembler = GetAssembler();
-  Register out = invoke->GetLocations()->Out().AsRegister<Register>();
-  int32_t offset = Thread::InterruptedOffset<kMipsPointerSize>().Int32Value();
-  __ LoadFromOffset(kLoadWord, out, TR, offset);
-  MipsLabel done;
-  __ Beqz(out, &done);
-  __ Sync(0);
-  __ StoreToOffset(kStoreWord, ZERO, TR, offset);
-  __ Sync(0);
-  __ Bind(&done);
-}
-
-void IntrinsicLocationsBuilderMIPS::VisitReachabilityFence(HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::Any());
-}
-
-void IntrinsicCodeGeneratorMIPS::VisitReachabilityFence(HInvoke* invoke ATTRIBUTE_UNUSED) { }
-
-// Unimplemented intrinsics.
-
-UNIMPLEMENTED_INTRINSIC(MIPS, MathCeil)
-UNIMPLEMENTED_INTRINSIC(MIPS, MathFloor)
-UNIMPLEMENTED_INTRINSIC(MIPS, MathRint)
-UNIMPLEMENTED_INTRINSIC(MIPS, MathRoundDouble)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetLongVolatile);
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafePutLongVolatile);
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeCASLong)
-
-UNIMPLEMENTED_INTRINSIC(MIPS, ReferenceGetReferent)
-UNIMPLEMENTED_INTRINSIC(MIPS, SystemArrayCopy)
-
-UNIMPLEMENTED_INTRINSIC(MIPS, CRC32Update)
-UNIMPLEMENTED_INTRINSIC(MIPS, CRC32UpdateBytes)
-UNIMPLEMENTED_INTRINSIC(MIPS, CRC32UpdateByteBuffer)
-
-UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOf);
-UNIMPLEMENTED_INTRINSIC(MIPS, StringStringIndexOfAfter);
-UNIMPLEMENTED_INTRINSIC(MIPS, StringBufferAppend);
-UNIMPLEMENTED_INTRINSIC(MIPS, StringBufferLength);
-UNIMPLEMENTED_INTRINSIC(MIPS, StringBufferToString);
-UNIMPLEMENTED_INTRINSIC(MIPS, StringBuilderAppend);
-UNIMPLEMENTED_INTRINSIC(MIPS, StringBuilderLength);
-UNIMPLEMENTED_INTRINSIC(MIPS, StringBuilderToString);
-
-// 1.8.
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndAddInt)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndAddLong)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetInt)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetLong)
-UNIMPLEMENTED_INTRINSIC(MIPS, UnsafeGetAndSetObject)
-
-UNREACHABLE_INTRINSICS(MIPS)
-
-#undef __
-
-}  // namespace mips
-}  // namespace art
diff --git a/compiler/optimizing/intrinsics_mips.h b/compiler/optimizing/intrinsics_mips.h
deleted file mode 100644
index 08d4e82..0000000
--- a/compiler/optimizing/intrinsics_mips.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_OPTIMIZING_INTRINSICS_MIPS_H_
-#define ART_COMPILER_OPTIMIZING_INTRINSICS_MIPS_H_
-
-#include "intrinsics.h"
-
-namespace art {
-
-class ArenaAllocator;
-class HInvokeStaticOrDirect;
-class HInvokeVirtual;
-
-namespace mips {
-
-class CodeGeneratorMIPS;
-class MipsAssembler;
-
-class IntrinsicLocationsBuilderMIPS final : public IntrinsicVisitor {
- public:
-  explicit IntrinsicLocationsBuilderMIPS(CodeGeneratorMIPS* codegen);
-
-  // Define visitor methods.
-
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) override;
-#include "intrinsics_list.h"
-  INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
-#undef INTRINSICS_LIST
-#undef OPTIMIZING_INTRINSICS
-
-  // Check whether an invoke is an intrinsic, and if so, create a location summary. Returns whether
-  // a corresponding LocationSummary with the intrinsified_ flag set was generated and attached to
-  // the invoke.
-  bool TryDispatch(HInvoke* invoke);
-
- private:
-  CodeGeneratorMIPS* const codegen_;
-  ArenaAllocator* const allocator_;
-
-  DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS);
-};
-
-class IntrinsicCodeGeneratorMIPS final : public IntrinsicVisitor {
- public:
-  explicit IntrinsicCodeGeneratorMIPS(CodeGeneratorMIPS* codegen) : codegen_(codegen) {}
-
-  // Define visitor methods.
-
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) override;
-#include "intrinsics_list.h"
-  INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
-#undef INTRINSICS_LIST
-#undef OPTIMIZING_INTRINSICS
-
-  bool IsR2OrNewer() const;
-  bool IsR6() const;
-  bool Is32BitFPU() const;
-  bool HasMsa() const;
-
- private:
-  MipsAssembler* GetAssembler();
-
-  ArenaAllocator* GetAllocator();
-
-  CodeGeneratorMIPS* const codegen_;
-
-  DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorMIPS);
-};
-
-}  // namespace mips
-}  // namespace art
-
-#endif  // ART_COMPILER_OPTIMIZING_INTRINSICS_MIPS_H_
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
deleted file mode 100644
index 3e68765..0000000
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ /dev/null
@@ -1,2382 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "intrinsics_mips64.h"
-
-#include "arch/mips64/instruction_set_features_mips64.h"
-#include "art_method.h"
-#include "code_generator_mips64.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "heap_poisoning.h"
-#include "intrinsics.h"
-#include "mirror/array-inl.h"
-#include "mirror/object_array-inl.h"
-#include "mirror/string.h"
-#include "scoped_thread_state_change-inl.h"
-#include "thread.h"
-#include "utils/mips64/assembler_mips64.h"
-#include "utils/mips64/constants_mips64.h"
-
-namespace art {
-
-namespace mips64 {
-
-IntrinsicLocationsBuilderMIPS64::IntrinsicLocationsBuilderMIPS64(CodeGeneratorMIPS64* codegen)
-  : codegen_(codegen), allocator_(codegen->GetGraph()->GetAllocator()) {
-}
-
-Mips64Assembler* IntrinsicCodeGeneratorMIPS64::GetAssembler() {
-  return reinterpret_cast<Mips64Assembler*>(codegen_->GetAssembler());
-}
-
-ArenaAllocator* IntrinsicCodeGeneratorMIPS64::GetAllocator() {
-  return codegen_->GetGraph()->GetAllocator();
-}
-
-inline bool IntrinsicCodeGeneratorMIPS64::HasMsa() const {
-  return codegen_->GetInstructionSetFeatures().HasMsa();
-}
-
-#define __ codegen->GetAssembler()->
-
-static void MoveFromReturnRegister(Location trg,
-                                   DataType::Type type,
-                                   CodeGeneratorMIPS64* codegen) {
-  if (!trg.IsValid()) {
-    DCHECK_EQ(type, DataType::Type::kVoid);
-    return;
-  }
-
-  DCHECK_NE(type, DataType::Type::kVoid);
-
-  if (DataType::IsIntegralType(type) || type == DataType::Type::kReference) {
-    GpuRegister trg_reg = trg.AsRegister<GpuRegister>();
-    if (trg_reg != V0) {
-      __ Move(V0, trg_reg);
-    }
-  } else {
-    FpuRegister trg_reg = trg.AsFpuRegister<FpuRegister>();
-    if (trg_reg != F0) {
-      if (type == DataType::Type::kFloat32) {
-        __ MovS(F0, trg_reg);
-      } else {
-        __ MovD(F0, trg_reg);
-      }
-    }
-  }
-}
-
-static void MoveArguments(HInvoke* invoke, CodeGeneratorMIPS64* codegen) {
-  InvokeDexCallingConventionVisitorMIPS64 calling_convention_visitor;
-  IntrinsicVisitor::MoveArguments(invoke, codegen, &calling_convention_visitor);
-}
-
-// Slow-path for fallback (calling the managed code to handle the
-// intrinsic) in an intrinsified call. This will copy the arguments
-// into the positions for a regular call.
-//
-// Note: The actual parameters are required to be in the locations
-//       given by the invoke's location summary. If an intrinsic
-//       modifies those locations before a slowpath call, they must be
-//       restored!
-class IntrinsicSlowPathMIPS64 : public SlowPathCodeMIPS64 {
- public:
-  explicit IntrinsicSlowPathMIPS64(HInvoke* invoke)
-     : SlowPathCodeMIPS64(invoke), invoke_(invoke) { }
-
-  void EmitNativeCode(CodeGenerator* codegen_in) override {
-    CodeGeneratorMIPS64* codegen = down_cast<CodeGeneratorMIPS64*>(codegen_in);
-
-    __ Bind(GetEntryLabel());
-
-    SaveLiveRegisters(codegen, invoke_->GetLocations());
-
-    MoveArguments(invoke_, codegen);
-
-    if (invoke_->IsInvokeStaticOrDirect()) {
-      codegen->GenerateStaticOrDirectCall(
-          invoke_->AsInvokeStaticOrDirect(), Location::RegisterLocation(A0), this);
-    } else {
-      codegen->GenerateVirtualCall(
-          invoke_->AsInvokeVirtual(), Location::RegisterLocation(A0), this);
-    }
-
-    // Copy the result back to the expected output.
-    Location out = invoke_->GetLocations()->Out();
-    if (out.IsValid()) {
-      DCHECK(out.IsRegister());  // TODO: Replace this when we support output in memory.
-      DCHECK(!invoke_->GetLocations()->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
-      MoveFromReturnRegister(out, invoke_->GetType(), codegen);
-    }
-
-    RestoreLiveRegisters(codegen, invoke_->GetLocations());
-    __ Bc(GetExitLabel());
-  }
-
-  const char* GetDescription() const override { return "IntrinsicSlowPathMIPS64"; }
-
- private:
-  // The instruction where this slow path is happening.
-  HInvoke* const invoke_;
-
-  DISALLOW_COPY_AND_ASSIGN(IntrinsicSlowPathMIPS64);
-};
-
-#undef __
-
-bool IntrinsicLocationsBuilderMIPS64::TryDispatch(HInvoke* invoke) {
-  Dispatch(invoke);
-  LocationSummary* res = invoke->GetLocations();
-  return res != nullptr && res->Intrinsified();
-}
-
-#define __ assembler->
-
-static void CreateFPToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresFpuRegister());
-  locations->SetOut(Location::RequiresRegister());
-}
-
-static void MoveFPToInt(LocationSummary* locations, bool is64bit, Mips64Assembler* assembler) {
-  FpuRegister in  = locations->InAt(0).AsFpuRegister<FpuRegister>();
-  GpuRegister out = locations->Out().AsRegister<GpuRegister>();
-
-  if (is64bit) {
-    __ Dmfc1(out, in);
-  } else {
-    __ Mfc1(out, in);
-  }
-}
-
-// long java.lang.Double.doubleToRawLongBits(double)
-void IntrinsicLocationsBuilderMIPS64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
-  CreateFPToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
-}
-
-// int java.lang.Float.floatToRawIntBits(float)
-void IntrinsicLocationsBuilderMIPS64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
-  CreateFPToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
-  MoveFPToInt(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
-}
-
-static void CreateIntToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresFpuRegister());
-}
-
-static void MoveIntToFP(LocationSummary* locations, bool is64bit, Mips64Assembler* assembler) {
-  GpuRegister in  = locations->InAt(0).AsRegister<GpuRegister>();
-  FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
-
-  if (is64bit) {
-    __ Dmtc1(in, out);
-  } else {
-    __ Mtc1(in, out);
-  }
-}
-
-// double java.lang.Double.longBitsToDouble(long)
-void IntrinsicLocationsBuilderMIPS64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
-  CreateIntToFPLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
-}
-
-// float java.lang.Float.intBitsToFloat(int)
-void IntrinsicLocationsBuilderMIPS64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
-  CreateIntToFPLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitFloatIntBitsToFloat(HInvoke* invoke) {
-  MoveIntToFP(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
-}
-
-static void CreateIntToIntLocations(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
-}
-
-static void GenReverseBytes(LocationSummary* locations,
-                            DataType::Type type,
-                            Mips64Assembler* assembler) {
-  GpuRegister in  = locations->InAt(0).AsRegister<GpuRegister>();
-  GpuRegister out = locations->Out().AsRegister<GpuRegister>();
-
-  switch (type) {
-    case DataType::Type::kInt16:
-      __ Dsbh(out, in);
-      __ Seh(out, out);
-      break;
-    case DataType::Type::kInt32:
-      __ Rotr(out, in, 16);
-      __ Wsbh(out, out);
-      break;
-    case DataType::Type::kInt64:
-      __ Dsbh(out, in);
-      __ Dshd(out, out);
-      break;
-    default:
-      LOG(FATAL) << "Unexpected size for reverse-bytes: " << type;
-      UNREACHABLE();
-  }
-}
-
-// int java.lang.Integer.reverseBytes(int)
-void IntrinsicLocationsBuilderMIPS64::VisitIntegerReverseBytes(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitIntegerReverseBytes(HInvoke* invoke) {
-  GenReverseBytes(invoke->GetLocations(), DataType::Type::kInt32, GetAssembler());
-}
-
-// long java.lang.Long.reverseBytes(long)
-void IntrinsicLocationsBuilderMIPS64::VisitLongReverseBytes(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitLongReverseBytes(HInvoke* invoke) {
-  GenReverseBytes(invoke->GetLocations(), DataType::Type::kInt64, GetAssembler());
-}
-
-// short java.lang.Short.reverseBytes(short)
-void IntrinsicLocationsBuilderMIPS64::VisitShortReverseBytes(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitShortReverseBytes(HInvoke* invoke) {
-  GenReverseBytes(invoke->GetLocations(), DataType::Type::kInt16, GetAssembler());
-}
-
-static void GenNumberOfLeadingZeroes(LocationSummary* locations,
-                                     bool is64bit,
-                                     Mips64Assembler* assembler) {
-  GpuRegister in  = locations->InAt(0).AsRegister<GpuRegister>();
-  GpuRegister out = locations->Out().AsRegister<GpuRegister>();
-
-  if (is64bit) {
-    __ Dclz(out, in);
-  } else {
-    __ Clz(out, in);
-  }
-}
-
-// int java.lang.Integer.numberOfLeadingZeros(int i)
-void IntrinsicLocationsBuilderMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
-  GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
-}
-
-// int java.lang.Long.numberOfLeadingZeros(long i)
-void IntrinsicLocationsBuilderMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
-  GenNumberOfLeadingZeroes(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
-}
-
-static void GenNumberOfTrailingZeroes(LocationSummary* locations,
-                                      bool is64bit,
-                                      Mips64Assembler* assembler) {
-  Location in = locations->InAt(0);
-  Location out = locations->Out();
-
-  if (is64bit) {
-    __ Dsbh(out.AsRegister<GpuRegister>(), in.AsRegister<GpuRegister>());
-    __ Dshd(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
-    __ Dbitswap(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
-    __ Dclz(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
-  } else {
-    __ Rotr(out.AsRegister<GpuRegister>(), in.AsRegister<GpuRegister>(), 16);
-    __ Wsbh(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
-    __ Bitswap(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
-    __ Clz(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
-  }
-}
-
-// int java.lang.Integer.numberOfTrailingZeros(int i)
-void IntrinsicLocationsBuilderMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
-  GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
-}
-
-// int java.lang.Long.numberOfTrailingZeros(long i)
-void IntrinsicLocationsBuilderMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
-  GenNumberOfTrailingZeroes(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
-}
-
-static void GenReverse(LocationSummary* locations,
-                       DataType::Type type,
-                       Mips64Assembler* assembler) {
-  DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64);
-
-  GpuRegister in  = locations->InAt(0).AsRegister<GpuRegister>();
-  GpuRegister out = locations->Out().AsRegister<GpuRegister>();
-
-  if (type == DataType::Type::kInt32) {
-    __ Rotr(out, in, 16);
-    __ Wsbh(out, out);
-    __ Bitswap(out, out);
-  } else {
-    __ Dsbh(out, in);
-    __ Dshd(out, out);
-    __ Dbitswap(out, out);
-  }
-}
-
-// int java.lang.Integer.reverse(int)
-void IntrinsicLocationsBuilderMIPS64::VisitIntegerReverse(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitIntegerReverse(HInvoke* invoke) {
-  GenReverse(invoke->GetLocations(), DataType::Type::kInt32, GetAssembler());
-}
-
-// long java.lang.Long.reverse(long)
-void IntrinsicLocationsBuilderMIPS64::VisitLongReverse(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitLongReverse(HInvoke* invoke) {
-  GenReverse(invoke->GetLocations(), DataType::Type::kInt64, GetAssembler());
-}
-
-static void CreateFPToFPLocations(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresFpuRegister());
-  locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
-}
-
-static void GenBitCount(LocationSummary* locations,
-                        const DataType::Type type,
-                        const bool hasMsa,
-                        Mips64Assembler* assembler) {
-  GpuRegister out = locations->Out().AsRegister<GpuRegister>();
-  GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
-
-  DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64);
-
-  // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
-  //
-  // A generalization of the best bit counting method to integers of
-  // bit-widths up to 128 (parameterized by type T) is this:
-  //
-  // v = v - ((v >> 1) & (T)~(T)0/3);                           // temp
-  // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3);      // temp
-  // v = (v + (v >> 4)) & (T)~(T)0/255*15;                      // temp
-  // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; // count
-  //
-  // For comparison, for 32-bit quantities, this algorithm can be executed
-  // using 20 MIPS instructions (the calls to LoadConst32() generate two
-  // machine instructions each for the values being used in this algorithm).
-  // A(n unrolled) loop-based algorithm requires 25 instructions.
-  //
-  // For a 64-bit operand this can be performed in 24 instructions compared
-  // to a(n unrolled) loop based algorithm which requires 38 instructions.
-  //
-  // There are algorithms which are faster in the cases where very few
-  // bits are set but the algorithm here attempts to minimize the total
-  // number of instructions executed even when a large number of bits
-  // are set.
-  if (hasMsa) {
-    if (type == DataType::Type::kInt32) {
-      __ Mtc1(in, FTMP);
-      __ PcntW(static_cast<VectorRegister>(FTMP), static_cast<VectorRegister>(FTMP));
-      __ Mfc1(out, FTMP);
-    } else {
-      __ Dmtc1(in, FTMP);
-      __ PcntD(static_cast<VectorRegister>(FTMP), static_cast<VectorRegister>(FTMP));
-      __ Dmfc1(out, FTMP);
-    }
-  } else {
-    if (type == DataType::Type::kInt32) {
-      __ Srl(TMP, in, 1);
-      __ LoadConst32(AT, 0x55555555);
-      __ And(TMP, TMP, AT);
-      __ Subu(TMP, in, TMP);
-      __ LoadConst32(AT, 0x33333333);
-      __ And(out, TMP, AT);
-      __ Srl(TMP, TMP, 2);
-      __ And(TMP, TMP, AT);
-      __ Addu(TMP, out, TMP);
-      __ Srl(out, TMP, 4);
-      __ Addu(out, out, TMP);
-      __ LoadConst32(AT, 0x0F0F0F0F);
-      __ And(out, out, AT);
-      __ LoadConst32(TMP, 0x01010101);
-      __ MulR6(out, out, TMP);
-      __ Srl(out, out, 24);
-    } else {
-      __ Dsrl(TMP, in, 1);
-      __ LoadConst64(AT, 0x5555555555555555L);
-      __ And(TMP, TMP, AT);
-      __ Dsubu(TMP, in, TMP);
-      __ LoadConst64(AT, 0x3333333333333333L);
-      __ And(out, TMP, AT);
-      __ Dsrl(TMP, TMP, 2);
-      __ And(TMP, TMP, AT);
-      __ Daddu(TMP, out, TMP);
-      __ Dsrl(out, TMP, 4);
-      __ Daddu(out, out, TMP);
-      __ LoadConst64(AT, 0x0F0F0F0F0F0F0F0FL);
-      __ And(out, out, AT);
-      __ LoadConst64(TMP, 0x0101010101010101L);
-      __ Dmul(out, out, TMP);
-      __ Dsrl32(out, out, 24);
-    }
-  }
-}
-
-// int java.lang.Integer.bitCount(int)
-void IntrinsicLocationsBuilderMIPS64::VisitIntegerBitCount(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitIntegerBitCount(HInvoke* invoke) {
-  GenBitCount(invoke->GetLocations(), DataType::Type::kInt32, HasMsa(), GetAssembler());
-}
-
-// int java.lang.Long.bitCount(long)
-void IntrinsicLocationsBuilderMIPS64::VisitLongBitCount(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitLongBitCount(HInvoke* invoke) {
-  GenBitCount(invoke->GetLocations(), DataType::Type::kInt64, HasMsa(), GetAssembler());
-}
-
-// double java.lang.Math.sqrt(double)
-void IntrinsicLocationsBuilderMIPS64::VisitMathSqrt(HInvoke* invoke) {
-  CreateFPToFPLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathSqrt(HInvoke* invoke) {
-  LocationSummary* locations = invoke->GetLocations();
-  Mips64Assembler* assembler = GetAssembler();
-  FpuRegister in = locations->InAt(0).AsFpuRegister<FpuRegister>();
-  FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
-
-  __ SqrtD(out, in);
-}
-
-static void CreateFPToFP(ArenaAllocator* allocator,
-                         HInvoke* invoke,
-                         Location::OutputOverlap overlaps = Location::kOutputOverlap) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresFpuRegister());
-  locations->SetOut(Location::RequiresFpuRegister(), overlaps);
-}
-
-// double java.lang.Math.rint(double)
-void IntrinsicLocationsBuilderMIPS64::VisitMathRint(HInvoke* invoke) {
-  CreateFPToFP(allocator_, invoke, Location::kNoOutputOverlap);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathRint(HInvoke* invoke) {
-  LocationSummary* locations = invoke->GetLocations();
-  Mips64Assembler* assembler = GetAssembler();
-  FpuRegister in = locations->InAt(0).AsFpuRegister<FpuRegister>();
-  FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
-
-  __ RintD(out, in);
-}
-
-// double java.lang.Math.floor(double)
-void IntrinsicLocationsBuilderMIPS64::VisitMathFloor(HInvoke* invoke) {
-  CreateFPToFP(allocator_, invoke);
-}
-
-const constexpr uint16_t kFPLeaveUnchanged = kPositiveZero |
-                                             kPositiveInfinity |
-                                             kNegativeZero |
-                                             kNegativeInfinity |
-                                             kQuietNaN |
-                                             kSignalingNaN;
-
-enum FloatRoundingMode {
-  kFloor,
-  kCeil,
-};
-
-static void GenRoundingMode(LocationSummary* locations,
-                            FloatRoundingMode mode,
-                            Mips64Assembler* assembler) {
-  FpuRegister in = locations->InAt(0).AsFpuRegister<FpuRegister>();
-  FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
-
-  DCHECK_NE(in, out);
-
-  Mips64Label done;
-
-  // double floor/ceil(double in) {
-  //     if in.isNaN || in.isInfinite || in.isZero {
-  //         return in;
-  //     }
-  __ ClassD(out, in);
-  __ Dmfc1(AT, out);
-  __ Andi(AT, AT, kFPLeaveUnchanged);   // +0.0 | +Inf | -0.0 | -Inf | qNaN | sNaN
-  __ MovD(out, in);
-  __ Bnezc(AT, &done);
-
-  //     Long outLong = floor/ceil(in);
-  //     if (outLong == Long.MAX_VALUE) || (outLong == Long.MIN_VALUE) {
-  //         // floor()/ceil() has almost certainly returned a value
-  //         // which can't be successfully represented as a signed
-  //         // 64-bit number.  Java expects that the input value will
-  //         // be returned in these cases.
-  //         // There is also a small probability that floor(in)/ceil(in)
-  //         // correctly truncates/rounds up the input value to
-  //         // Long.MAX_VALUE or Long.MIN_VALUE. In these cases, this
-  //         // exception handling code still does the correct thing.
-  //         return in;
-  //     }
-  if (mode == kFloor) {
-    __ FloorLD(out, in);
-  } else  if (mode == kCeil) {
-    __ CeilLD(out, in);
-  }
-  __ Dmfc1(AT, out);
-  __ MovD(out, in);
-  __ Daddiu(TMP, AT, 1);
-  __ Dati(TMP, 0x8000);  // TMP = AT + 0x8000 0000 0000 0001
-                         // or    AT - 0x7FFF FFFF FFFF FFFF.
-                         // IOW, TMP = 1 if AT = Long.MIN_VALUE
-                         // or   TMP = 0 if AT = Long.MAX_VALUE.
-  __ Dsrl(TMP, TMP, 1);  // TMP = 0 if AT = Long.MIN_VALUE
-                         //         or AT = Long.MAX_VALUE.
-  __ Beqzc(TMP, &done);
-
-  //     double out = outLong;
-  //     return out;
-  __ Dmtc1(AT, out);
-  __ Cvtdl(out, out);
-  __ Bind(&done);
-  // }
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathFloor(HInvoke* invoke) {
-  GenRoundingMode(invoke->GetLocations(), kFloor, GetAssembler());
-}
-
-// double java.lang.Math.ceil(double)
-void IntrinsicLocationsBuilderMIPS64::VisitMathCeil(HInvoke* invoke) {
-  CreateFPToFP(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathCeil(HInvoke* invoke) {
-  GenRoundingMode(invoke->GetLocations(), kCeil, GetAssembler());
-}
-
-static void GenRound(LocationSummary* locations, Mips64Assembler* assembler, DataType::Type type) {
-  FpuRegister in = locations->InAt(0).AsFpuRegister<FpuRegister>();
-  FpuRegister half = locations->GetTemp(0).AsFpuRegister<FpuRegister>();
-  GpuRegister out = locations->Out().AsRegister<GpuRegister>();
-
-  DCHECK(type == DataType::Type::kFloat32 || type == DataType::Type::kFloat64);
-
-  Mips64Label done;
-
-  // out = floor(in);
-  //
-  // if (out != MAX_VALUE && out != MIN_VALUE) {
-  //   TMP = ((in - out) >= 0.5) ? 1 : 0;
-  //   return out += TMP;
-  // }
-  // return out;
-
-  // out = floor(in);
-  if (type == DataType::Type::kFloat64) {
-    __ FloorLD(FTMP, in);
-    __ Dmfc1(out, FTMP);
-  } else {
-    __ FloorWS(FTMP, in);
-    __ Mfc1(out, FTMP);
-  }
-
-  // if (out != MAX_VALUE && out != MIN_VALUE)
-  if (type == DataType::Type::kFloat64) {
-    __ Daddiu(TMP, out, 1);
-    __ Dati(TMP, 0x8000);  // TMP = out + 0x8000 0000 0000 0001
-                           // or    out - 0x7FFF FFFF FFFF FFFF.
-                           // IOW, TMP = 1 if out = Long.MIN_VALUE
-                           // or   TMP = 0 if out = Long.MAX_VALUE.
-    __ Dsrl(TMP, TMP, 1);  // TMP = 0 if out = Long.MIN_VALUE
-                           //         or out = Long.MAX_VALUE.
-    __ Beqzc(TMP, &done);
-  } else {
-    __ Addiu(TMP, out, 1);
-    __ Aui(TMP, TMP, 0x8000);  // TMP = out + 0x8000 0001
-                               // or    out - 0x7FFF FFFF.
-                               // IOW, TMP = 1 if out = Int.MIN_VALUE
-                               // or   TMP = 0 if out = Int.MAX_VALUE.
-    __ Srl(TMP, TMP, 1);       // TMP = 0 if out = Int.MIN_VALUE
-                               //         or out = Int.MAX_VALUE.
-    __ Beqzc(TMP, &done);
-  }
-
-  // TMP = (0.5 <= (in - out)) ? -1 : 0;
-  if (type == DataType::Type::kFloat64) {
-    __ Cvtdl(FTMP, FTMP);  // Convert output of floor.l.d back to "double".
-    __ LoadConst64(AT, bit_cast<int64_t, double>(0.5));
-    __ SubD(FTMP, in, FTMP);
-    __ Dmtc1(AT, half);
-    __ CmpLeD(FTMP, half, FTMP);
-    __ Dmfc1(TMP, FTMP);
-  } else {
-    __ Cvtsw(FTMP, FTMP);  // Convert output of floor.w.s back to "float".
-    __ LoadConst32(AT, bit_cast<int32_t, float>(0.5f));
-    __ SubS(FTMP, in, FTMP);
-    __ Mtc1(AT, half);
-    __ CmpLeS(FTMP, half, FTMP);
-    __ Mfc1(TMP, FTMP);
-  }
-
-  // Return out -= TMP.
-  if (type == DataType::Type::kFloat64) {
-    __ Dsubu(out, out, TMP);
-  } else {
-    __ Subu(out, out, TMP);
-  }
-
-  __ Bind(&done);
-}
-
-// int java.lang.Math.round(float)
-void IntrinsicLocationsBuilderMIPS64::VisitMathRoundFloat(HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresFpuRegister());
-  locations->AddTemp(Location::RequiresFpuRegister());
-  locations->SetOut(Location::RequiresRegister());
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathRoundFloat(HInvoke* invoke) {
-  GenRound(invoke->GetLocations(), GetAssembler(), DataType::Type::kFloat32);
-}
-
-// long java.lang.Math.round(double)
-void IntrinsicLocationsBuilderMIPS64::VisitMathRoundDouble(HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresFpuRegister());
-  locations->AddTemp(Location::RequiresFpuRegister());
-  locations->SetOut(Location::RequiresRegister());
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathRoundDouble(HInvoke* invoke) {
-  GenRound(invoke->GetLocations(), GetAssembler(), DataType::Type::kFloat64);
-}
-
-// byte libcore.io.Memory.peekByte(long address)
-void IntrinsicLocationsBuilderMIPS64::VisitMemoryPeekByte(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekByte(HInvoke* invoke) {
-  Mips64Assembler* assembler = GetAssembler();
-  GpuRegister adr = invoke->GetLocations()->InAt(0).AsRegister<GpuRegister>();
-  GpuRegister out = invoke->GetLocations()->Out().AsRegister<GpuRegister>();
-
-  __ Lb(out, adr, 0);
-}
-
-// short libcore.io.Memory.peekShort(long address)
-void IntrinsicLocationsBuilderMIPS64::VisitMemoryPeekShortNative(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekShortNative(HInvoke* invoke) {
-  Mips64Assembler* assembler = GetAssembler();
-  GpuRegister adr = invoke->GetLocations()->InAt(0).AsRegister<GpuRegister>();
-  GpuRegister out = invoke->GetLocations()->Out().AsRegister<GpuRegister>();
-
-  __ Lh(out, adr, 0);
-}
-
-// int libcore.io.Memory.peekInt(long address)
-void IntrinsicLocationsBuilderMIPS64::VisitMemoryPeekIntNative(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekIntNative(HInvoke* invoke) {
-  Mips64Assembler* assembler = GetAssembler();
-  GpuRegister adr = invoke->GetLocations()->InAt(0).AsRegister<GpuRegister>();
-  GpuRegister out = invoke->GetLocations()->Out().AsRegister<GpuRegister>();
-
-  __ Lw(out, adr, 0);
-}
-
-// long libcore.io.Memory.peekLong(long address)
-void IntrinsicLocationsBuilderMIPS64::VisitMemoryPeekLongNative(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMemoryPeekLongNative(HInvoke* invoke) {
-  Mips64Assembler* assembler = GetAssembler();
-  GpuRegister adr = invoke->GetLocations()->InAt(0).AsRegister<GpuRegister>();
-  GpuRegister out = invoke->GetLocations()->Out().AsRegister<GpuRegister>();
-
-  __ Ld(out, adr, 0);
-}
-
-static void CreateIntIntToVoidLocations(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RequiresRegister());
-}
-
-// void libcore.io.Memory.pokeByte(long address, byte value)
-void IntrinsicLocationsBuilderMIPS64::VisitMemoryPokeByte(HInvoke* invoke) {
-  CreateIntIntToVoidLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeByte(HInvoke* invoke) {
-  Mips64Assembler* assembler = GetAssembler();
-  GpuRegister adr = invoke->GetLocations()->InAt(0).AsRegister<GpuRegister>();
-  GpuRegister val = invoke->GetLocations()->InAt(1).AsRegister<GpuRegister>();
-
-  __ Sb(val, adr, 0);
-}
-
-// void libcore.io.Memory.pokeShort(long address, short value)
-void IntrinsicLocationsBuilderMIPS64::VisitMemoryPokeShortNative(HInvoke* invoke) {
-  CreateIntIntToVoidLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeShortNative(HInvoke* invoke) {
-  Mips64Assembler* assembler = GetAssembler();
-  GpuRegister adr = invoke->GetLocations()->InAt(0).AsRegister<GpuRegister>();
-  GpuRegister val = invoke->GetLocations()->InAt(1).AsRegister<GpuRegister>();
-
-  __ Sh(val, adr, 0);
-}
-
-// void libcore.io.Memory.pokeInt(long address, int value)
-void IntrinsicLocationsBuilderMIPS64::VisitMemoryPokeIntNative(HInvoke* invoke) {
-  CreateIntIntToVoidLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeIntNative(HInvoke* invoke) {
-  Mips64Assembler* assembler = GetAssembler();
-  GpuRegister adr = invoke->GetLocations()->InAt(0).AsRegister<GpuRegister>();
-  GpuRegister val = invoke->GetLocations()->InAt(1).AsRegister<GpuRegister>();
-
-  __ Sw(val, adr, 00);
-}
-
-// void libcore.io.Memory.pokeLong(long address, long value)
-void IntrinsicLocationsBuilderMIPS64::VisitMemoryPokeLongNative(HInvoke* invoke) {
-  CreateIntIntToVoidLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMemoryPokeLongNative(HInvoke* invoke) {
-  Mips64Assembler* assembler = GetAssembler();
-  GpuRegister adr = invoke->GetLocations()->InAt(0).AsRegister<GpuRegister>();
-  GpuRegister val = invoke->GetLocations()->InAt(1).AsRegister<GpuRegister>();
-
-  __ Sd(val, adr, 0);
-}
-
-// Thread java.lang.Thread.currentThread()
-void IntrinsicLocationsBuilderMIPS64::VisitThreadCurrentThread(HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetOut(Location::RequiresRegister());
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitThreadCurrentThread(HInvoke* invoke) {
-  Mips64Assembler* assembler = GetAssembler();
-  GpuRegister out = invoke->GetLocations()->Out().AsRegister<GpuRegister>();
-
-  __ LoadFromOffset(kLoadUnsignedWord,
-                    out,
-                    TR,
-                    Thread::PeerOffset<kMips64PointerSize>().Int32Value());
-}
-
-static void CreateIntIntIntToIntLocations(ArenaAllocator* allocator,
-                                          HInvoke* invoke,
-                                          DataType::Type type) {
-  bool can_call = kEmitCompilerReadBarrier &&
-      (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject ||
-       invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile);
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke,
-                                      can_call
-                                          ? LocationSummary::kCallOnSlowPath
-                                          : LocationSummary::kNoCall,
-                                      kIntrinsified);
-  if (can_call && kUseBakerReadBarrier) {
-    locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty());  // No caller-save registers.
-  }
-  locations->SetInAt(0, Location::NoLocation());        // Unused receiver.
-  locations->SetInAt(1, Location::RequiresRegister());
-  locations->SetInAt(2, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister(),
-                    (can_call ? Location::kOutputOverlap : Location::kNoOutputOverlap));
-  if (type == DataType::Type::kReference && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
-    // We need a temporary register for the read barrier marking slow
-    // path in InstructionCodeGeneratorMIPS64::GenerateReferenceLoadWithBakerReadBarrier.
-    locations->AddTemp(Location::RequiresRegister());
-  }
-}
-
-// Note that the caller must supply a properly aligned memory address.
-// If they do not, the behavior is undefined (atomicity not guaranteed, exception may occur).
-static void GenUnsafeGet(HInvoke* invoke,
-                         DataType::Type type,
-                         bool is_volatile,
-                         CodeGeneratorMIPS64* codegen) {
-  LocationSummary* locations = invoke->GetLocations();
-  DCHECK((type == DataType::Type::kInt32) ||
-         (type == DataType::Type::kInt64) ||
-         (type == DataType::Type::kReference)) << type;
-  Mips64Assembler* assembler = codegen->GetAssembler();
-  // Target register.
-  Location trg_loc = locations->Out();
-  GpuRegister trg = trg_loc.AsRegister<GpuRegister>();
-  // Object pointer.
-  Location base_loc = locations->InAt(1);
-  GpuRegister base = base_loc.AsRegister<GpuRegister>();
-  // Long offset.
-  Location offset_loc = locations->InAt(2);
-  GpuRegister offset = offset_loc.AsRegister<GpuRegister>();
-
-  if (!(kEmitCompilerReadBarrier && kUseBakerReadBarrier && (type == DataType::Type::kReference))) {
-    __ Daddu(TMP, base, offset);
-  }
-
-  switch (type) {
-    case DataType::Type::kInt64:
-      __ Ld(trg, TMP, 0);
-      if (is_volatile) {
-        __ Sync(0);
-      }
-      break;
-
-    case DataType::Type::kInt32:
-      __ Lw(trg, TMP, 0);
-      if (is_volatile) {
-        __ Sync(0);
-      }
-      break;
-
-    case DataType::Type::kReference:
-      if (kEmitCompilerReadBarrier) {
-        if (kUseBakerReadBarrier) {
-          Location temp = locations->GetTemp(0);
-          codegen->GenerateReferenceLoadWithBakerReadBarrier(invoke,
-                                                             trg_loc,
-                                                             base,
-                                                             /* offset= */ 0U,
-                                                             /* index= */ offset_loc,
-                                                             TIMES_1,
-                                                             temp,
-                                                             /* needs_null_check= */ false);
-          if (is_volatile) {
-            __ Sync(0);
-          }
-        } else {
-          __ Lwu(trg, TMP, 0);
-          if (is_volatile) {
-            __ Sync(0);
-          }
-          codegen->GenerateReadBarrierSlow(invoke,
-                                           trg_loc,
-                                           trg_loc,
-                                           base_loc,
-                                           /* offset= */ 0U,
-                                           /* index= */ offset_loc);
-        }
-      } else {
-        __ Lwu(trg, TMP, 0);
-        if (is_volatile) {
-          __ Sync(0);
-        }
-        __ MaybeUnpoisonHeapReference(trg);
-      }
-      break;
-
-    default:
-      LOG(FATAL) << "Unsupported op size " << type;
-      UNREACHABLE();
-  }
-}
-
-// int sun.misc.Unsafe.getInt(Object o, long offset)
-void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGet(HInvoke* invoke) {
-  CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGet(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ false, codegen_);
-}
-
-// int sun.misc.Unsafe.getIntVolatile(Object o, long offset)
-void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) {
-  CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt32);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt32, /* is_volatile= */ true, codegen_);
-}
-
-// long sun.misc.Unsafe.getLong(Object o, long offset)
-void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLong(HInvoke* invoke) {
-  CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLong(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ false, codegen_);
-}
-
-// long sun.misc.Unsafe.getLongVolatile(Object o, long offset)
-void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
-  CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kInt64);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kInt64, /* is_volatile= */ true, codegen_);
-}
-
-// Object sun.misc.Unsafe.getObject(Object o, long offset)
-void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObject(HInvoke* invoke) {
-  CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObject(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ false, codegen_);
-}
-
-// Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset)
-void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
-  CreateIntIntIntToIntLocations(allocator_, invoke, DataType::Type::kReference);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
-  GenUnsafeGet(invoke, DataType::Type::kReference, /* is_volatile= */ true, codegen_);
-}
-
-static void CreateIntIntIntIntToVoid(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::NoLocation());        // Unused receiver.
-  locations->SetInAt(1, Location::RequiresRegister());
-  locations->SetInAt(2, Location::RequiresRegister());
-  locations->SetInAt(3, Location::RequiresRegister());
-}
-
-// Note that the caller must supply a properly aligned memory address.
-// If they do not, the behavior is undefined (atomicity not guaranteed, exception may occur).
-static void GenUnsafePut(LocationSummary* locations,
-                         DataType::Type type,
-                         bool is_volatile,
-                         bool is_ordered,
-                         CodeGeneratorMIPS64* codegen) {
-  DCHECK((type == DataType::Type::kInt32) ||
-         (type == DataType::Type::kInt64) ||
-         (type == DataType::Type::kReference));
-  Mips64Assembler* assembler = codegen->GetAssembler();
-  // Object pointer.
-  GpuRegister base = locations->InAt(1).AsRegister<GpuRegister>();
-  // Long offset.
-  GpuRegister offset = locations->InAt(2).AsRegister<GpuRegister>();
-  GpuRegister value = locations->InAt(3).AsRegister<GpuRegister>();
-
-  __ Daddu(TMP, base, offset);
-  if (is_volatile || is_ordered) {
-    __ Sync(0);
-  }
-  switch (type) {
-    case DataType::Type::kInt32:
-    case DataType::Type::kReference:
-      if (kPoisonHeapReferences && type == DataType::Type::kReference) {
-        __ PoisonHeapReference(AT, value);
-        __ Sw(AT, TMP, 0);
-      } else {
-        __ Sw(value, TMP, 0);
-      }
-      break;
-
-    case DataType::Type::kInt64:
-      __ Sd(value, TMP, 0);
-      break;
-
-    default:
-      LOG(FATAL) << "Unsupported op size " << type;
-      UNREACHABLE();
-  }
-  if (is_volatile) {
-    __ Sync(0);
-  }
-
-  if (type == DataType::Type::kReference) {
-    bool value_can_be_null = true;  // TODO: Worth finding out this information?
-    codegen->MarkGCCard(base, value, value_can_be_null);
-  }
-}
-
-// void sun.misc.Unsafe.putInt(Object o, long offset, int x)
-void IntrinsicLocationsBuilderMIPS64::VisitUnsafePut(HInvoke* invoke) {
-  CreateIntIntIntIntToVoid(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitUnsafePut(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(),
-               DataType::Type::kInt32,
-               /* is_volatile= */ false,
-               /* is_ordered= */ false,
-               codegen_);
-}
-
-// void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x)
-void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) {
-  CreateIntIntIntIntToVoid(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(),
-               DataType::Type::kInt32,
-               /* is_volatile= */ false,
-               /* is_ordered= */ true,
-               codegen_);
-}
-
-// void sun.misc.Unsafe.putIntVolatile(Object o, long offset, int x)
-void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) {
-  CreateIntIntIntIntToVoid(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(),
-               DataType::Type::kInt32,
-               /* is_volatile= */ true,
-               /* is_ordered= */ false,
-               codegen_);
-}
-
-// void sun.misc.Unsafe.putObject(Object o, long offset, Object x)
-void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObject(HInvoke* invoke) {
-  CreateIntIntIntIntToVoid(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObject(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(),
-               DataType::Type::kReference,
-               /* is_volatile= */ false,
-               /* is_ordered= */ false,
-               codegen_);
-}
-
-// void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x)
-void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
-  CreateIntIntIntIntToVoid(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(),
-               DataType::Type::kReference,
-               /* is_volatile= */ false,
-               /* is_ordered= */ true,
-               codegen_);
-}
-
-// void sun.misc.Unsafe.putObjectVolatile(Object o, long offset, Object x)
-void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
-  CreateIntIntIntIntToVoid(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(),
-               DataType::Type::kReference,
-               /* is_volatile= */ true,
-               /* is_ordered= */ false,
-               codegen_);
-}
-
-// void sun.misc.Unsafe.putLong(Object o, long offset, long x)
-void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLong(HInvoke* invoke) {
-  CreateIntIntIntIntToVoid(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLong(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(),
-               DataType::Type::kInt64,
-               /* is_volatile= */ false,
-               /* is_ordered= */ false,
-               codegen_);
-}
-
-// void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x)
-void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
-  CreateIntIntIntIntToVoid(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(),
-               DataType::Type::kInt64,
-               /* is_volatile= */ false,
-               /* is_ordered= */ true,
-               codegen_);
-}
-
-// void sun.misc.Unsafe.putLongVolatile(Object o, long offset, long x)
-void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
-  CreateIntIntIntIntToVoid(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
-  GenUnsafePut(invoke->GetLocations(),
-               DataType::Type::kInt64,
-               /* is_volatile= */ true,
-               /* is_ordered= */ false,
-               codegen_);
-}
-
-static void CreateIntIntIntIntIntToIntPlusTemps(ArenaAllocator* allocator, HInvoke* invoke) {
-  bool can_call = kEmitCompilerReadBarrier &&
-      kUseBakerReadBarrier &&
-      (invoke->GetIntrinsic() == Intrinsics::kUnsafeCASObject);
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke,
-                                      can_call
-                                          ? LocationSummary::kCallOnSlowPath
-                                          : LocationSummary::kNoCall,
-                                      kIntrinsified);
-  locations->SetInAt(0, Location::NoLocation());        // Unused receiver.
-  locations->SetInAt(1, Location::RequiresRegister());
-  locations->SetInAt(2, Location::RequiresRegister());
-  locations->SetInAt(3, Location::RequiresRegister());
-  locations->SetInAt(4, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister());
-
-  // Temporary register used in CAS by (Baker) read barrier.
-  if (can_call) {
-    locations->AddTemp(Location::RequiresRegister());
-  }
-}
-
-// Note that the caller must supply a properly aligned memory address.
-// If they do not, the behavior is undefined (atomicity not guaranteed, exception may occur).
-static void GenCas(HInvoke* invoke, DataType::Type type, CodeGeneratorMIPS64* codegen) {
-  Mips64Assembler* assembler = codegen->GetAssembler();
-  LocationSummary* locations = invoke->GetLocations();
-  GpuRegister base = locations->InAt(1).AsRegister<GpuRegister>();
-  Location offset_loc = locations->InAt(2);
-  GpuRegister offset = offset_loc.AsRegister<GpuRegister>();
-  GpuRegister expected = locations->InAt(3).AsRegister<GpuRegister>();
-  GpuRegister value = locations->InAt(4).AsRegister<GpuRegister>();
-  Location out_loc = locations->Out();
-  GpuRegister out = out_loc.AsRegister<GpuRegister>();
-
-  DCHECK_NE(base, out);
-  DCHECK_NE(offset, out);
-  DCHECK_NE(expected, out);
-
-  if (type == DataType::Type::kReference) {
-    // The only read barrier implementation supporting the
-    // UnsafeCASObject intrinsic is the Baker-style read barriers.
-    DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
-
-    // Mark card for object assuming new value is stored. Worst case we will mark an unchanged
-    // object and scan the receiver at the next GC for nothing.
-    bool value_can_be_null = true;  // TODO: Worth finding out this information?
-    codegen->MarkGCCard(base, value, value_can_be_null);
-
-    if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
-      Location temp = locations->GetTemp(0);
-      // Need to make sure the reference stored in the field is a to-space
-      // one before attempting the CAS or the CAS could fail incorrectly.
-      codegen->GenerateReferenceLoadWithBakerReadBarrier(
-          invoke,
-          out_loc,  // Unused, used only as a "temporary" within the read barrier.
-          base,
-          /* offset= */ 0u,
-          /* index= */ offset_loc,
-          ScaleFactor::TIMES_1,
-          temp,
-          /* needs_null_check= */ false,
-          /* always_update_field= */ true);
-    }
-  }
-
-  Mips64Label loop_head, exit_loop;
-  __ Daddu(TMP, base, offset);
-
-  if (kPoisonHeapReferences && type == DataType::Type::kReference) {
-    __ PoisonHeapReference(expected);
-    // Do not poison `value`, if it is the same register as
-    // `expected`, which has just been poisoned.
-    if (value != expected) {
-      __ PoisonHeapReference(value);
-    }
-  }
-
-  // do {
-  //   tmp_value = [tmp_ptr] - expected;
-  // } while (tmp_value == 0 && failure([tmp_ptr] <- r_new_value));
-  // result = tmp_value != 0;
-
-  __ Sync(0);
-  __ Bind(&loop_head);
-  if (type == DataType::Type::kInt64) {
-    __ Lld(out, TMP);
-  } else {
-    // Note: We will need a read barrier here, when read barrier
-    // support is added to the MIPS64 back end.
-    __ Ll(out, TMP);
-    if (type == DataType::Type::kReference) {
-      // The LL instruction sign-extends the 32-bit value, but
-      // 32-bit references must be zero-extended. Zero-extend `out`.
-      __ Dext(out, out, 0, 32);
-    }
-  }
-  __ Dsubu(out, out, expected);         // If we didn't get the 'expected'
-  __ Sltiu(out, out, 1);                // value, set 'out' to false, and
-  __ Beqzc(out, &exit_loop);            // return.
-  __ Move(out, value);  // Use 'out' for the 'store conditional' instruction.
-                        // If we use 'value' directly, we would lose 'value'
-                        // in the case that the store fails.  Whether the
-                        // store succeeds, or fails, it will load the
-                        // correct Boolean value into the 'out' register.
-  if (type == DataType::Type::kInt64) {
-    __ Scd(out, TMP);
-  } else {
-    __ Sc(out, TMP);
-  }
-  __ Beqzc(out, &loop_head);    // If we couldn't do the read-modify-write
-                                // cycle atomically then retry.
-  __ Bind(&exit_loop);
-  __ Sync(0);
-
-  if (kPoisonHeapReferences && type == DataType::Type::kReference) {
-    __ UnpoisonHeapReference(expected);
-    // Do not unpoison `value`, if it is the same register as
-    // `expected`, which has just been unpoisoned.
-    if (value != expected) {
-      __ UnpoisonHeapReference(value);
-    }
-  }
-}
-
-// boolean sun.misc.Unsafe.compareAndSwapInt(Object o, long offset, int expected, int x)
-void IntrinsicLocationsBuilderMIPS64::VisitUnsafeCASInt(HInvoke* invoke) {
-  CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASInt(HInvoke* invoke) {
-  GenCas(invoke, DataType::Type::kInt32, codegen_);
-}
-
-// boolean sun.misc.Unsafe.compareAndSwapLong(Object o, long offset, long expected, long x)
-void IntrinsicLocationsBuilderMIPS64::VisitUnsafeCASLong(HInvoke* invoke) {
-  CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASLong(HInvoke* invoke) {
-  GenCas(invoke, DataType::Type::kInt64, codegen_);
-}
-
-// boolean sun.misc.Unsafe.compareAndSwapObject(Object o, long offset, Object expected, Object x)
-void IntrinsicLocationsBuilderMIPS64::VisitUnsafeCASObject(HInvoke* invoke) {
-  // The only read barrier implementation supporting the
-  // UnsafeCASObject intrinsic is the Baker-style read barriers.
-  if (kEmitCompilerReadBarrier && !kUseBakerReadBarrier) {
-    return;
-  }
-
-  CreateIntIntIntIntIntToIntPlusTemps(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitUnsafeCASObject(HInvoke* invoke) {
-  // The only read barrier implementation supporting the
-  // UnsafeCASObject intrinsic is the Baker-style read barriers.
-  DCHECK(!kEmitCompilerReadBarrier || kUseBakerReadBarrier);
-
-  GenCas(invoke, DataType::Type::kReference, codegen_);
-}
-
-// int java.lang.String.compareTo(String anotherString)
-void IntrinsicLocationsBuilderMIPS64::VisitStringCompareTo(HInvoke* invoke) {
-  LocationSummary* locations = new (allocator_) LocationSummary(
-      invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
-  InvokeRuntimeCallingConvention calling_convention;
-  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
-  Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32);
-  locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitStringCompareTo(HInvoke* invoke) {
-  Mips64Assembler* assembler = GetAssembler();
-  LocationSummary* locations = invoke->GetLocations();
-
-  // Note that the null check must have been done earlier.
-  DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
-
-  GpuRegister argument = locations->InAt(1).AsRegister<GpuRegister>();
-  SlowPathCodeMIPS64* slow_path =
-      new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS64(invoke);
-  codegen_->AddSlowPath(slow_path);
-  __ Beqzc(argument, slow_path->GetEntryLabel());
-
-  codegen_->InvokeRuntime(kQuickStringCompareTo, invoke, invoke->GetDexPc(), slow_path);
-  __ Bind(slow_path->GetExitLabel());
-}
-
-// boolean java.lang.String.equals(Object anObject)
-void IntrinsicLocationsBuilderMIPS64::VisitStringEquals(HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RequiresRegister());
-  locations->SetOut(Location::RequiresRegister());
-
-  // Temporary registers to store lengths of strings and for calculations.
-  locations->AddTemp(Location::RequiresRegister());
-  locations->AddTemp(Location::RequiresRegister());
-  locations->AddTemp(Location::RequiresRegister());
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitStringEquals(HInvoke* invoke) {
-  Mips64Assembler* assembler = GetAssembler();
-  LocationSummary* locations = invoke->GetLocations();
-
-  GpuRegister str = locations->InAt(0).AsRegister<GpuRegister>();
-  GpuRegister arg = locations->InAt(1).AsRegister<GpuRegister>();
-  GpuRegister out = locations->Out().AsRegister<GpuRegister>();
-
-  GpuRegister temp1 = locations->GetTemp(0).AsRegister<GpuRegister>();
-  GpuRegister temp2 = locations->GetTemp(1).AsRegister<GpuRegister>();
-  GpuRegister temp3 = locations->GetTemp(2).AsRegister<GpuRegister>();
-
-  Mips64Label loop;
-  Mips64Label end;
-  Mips64Label return_true;
-  Mips64Label return_false;
-
-  // Get offsets of count, value, and class fields within a string object.
-  const int32_t count_offset = mirror::String::CountOffset().Int32Value();
-  const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
-  const int32_t class_offset = mirror::Object::ClassOffset().Int32Value();
-
-  // Note that the null check must have been done earlier.
-  DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
-
-  // If the register containing the pointer to "this", and the register
-  // containing the pointer to "anObject" are the same register then
-  // "this", and "anObject" are the same object and we can
-  // short-circuit the logic to a true result.
-  if (str == arg) {
-    __ LoadConst64(out, 1);
-    return;
-  }
-
-  StringEqualsOptimizations optimizations(invoke);
-  if (!optimizations.GetArgumentNotNull()) {
-    // Check if input is null, return false if it is.
-    __ Beqzc(arg, &return_false);
-  }
-
-  // Reference equality check, return true if same reference.
-  __ Beqc(str, arg, &return_true);
-
-  if (!optimizations.GetArgumentIsString()) {
-    // Instanceof check for the argument by comparing class fields.
-    // All string objects must have the same type since String cannot be subclassed.
-    // Receiver must be a string object, so its class field is equal to all strings' class fields.
-    // If the argument is a string object, its class field must be equal to receiver's class field.
-    //
-    // As the String class is expected to be non-movable, we can read the class
-    // field from String.equals' arguments without read barriers.
-    AssertNonMovableStringClass();
-    // /* HeapReference<Class> */ temp1 = str->klass_
-    __ Lw(temp1, str, class_offset);
-    // /* HeapReference<Class> */ temp2 = arg->klass_
-    __ Lw(temp2, arg, class_offset);
-    // Also, because we use the previously loaded class references only in the
-    // following comparison, we don't need to unpoison them.
-    __ Bnec(temp1, temp2, &return_false);
-  }
-
-  // Load `count` fields of this and argument strings.
-  __ Lw(temp1, str, count_offset);
-  __ Lw(temp2, arg, count_offset);
-  // Check if `count` fields are equal, return false if they're not.
-  // Also compares the compression style, if differs return false.
-  __ Bnec(temp1, temp2, &return_false);
-  // Return true if both strings are empty. Even with string compression `count == 0` means empty.
-  static_assert(static_cast<uint32_t>(mirror::StringCompressionFlag::kCompressed) == 0u,
-                "Expecting 0=compressed, 1=uncompressed");
-  __ Beqzc(temp1, &return_true);
-
-  // Don't overwrite input registers
-  __ Move(TMP, str);
-  __ Move(temp3, arg);
-
-  // Assertions that must hold in order to compare strings 8 bytes at a time.
-  DCHECK_ALIGNED(value_offset, 8);
-  static_assert(IsAligned<8>(kObjectAlignment), "String of odd length is not zero padded");
-
-  if (mirror::kUseStringCompression) {
-    // For string compression, calculate the number of bytes to compare (not chars).
-    __ Dext(temp2, temp1, 0, 1);         // Extract compression flag.
-    __ Srl(temp1, temp1, 1);             // Extract length.
-    __ Sllv(temp1, temp1, temp2);        // Double the byte count if uncompressed.
-  }
-
-  // Loop to compare strings 8 bytes at a time starting at the beginning of the string.
-  // Ok to do this because strings are zero-padded to kObjectAlignment.
-  __ Bind(&loop);
-  __ Ld(out, TMP, value_offset);
-  __ Ld(temp2, temp3, value_offset);
-  __ Bnec(out, temp2, &return_false);
-  __ Daddiu(TMP, TMP, 8);
-  __ Daddiu(temp3, temp3, 8);
-  // With string compression, we have compared 8 bytes, otherwise 4 chars.
-  __ Addiu(temp1, temp1, mirror::kUseStringCompression ? -8 : -4);
-  __ Bgtzc(temp1, &loop);
-
-  // Return true and exit the function.
-  // If loop does not result in returning false, we return true.
-  __ Bind(&return_true);
-  __ LoadConst64(out, 1);
-  __ Bc(&end);
-
-  // Return false and exit the function.
-  __ Bind(&return_false);
-  __ LoadConst64(out, 0);
-  __ Bind(&end);
-}
-
-static void GenerateStringIndexOf(HInvoke* invoke,
-                                  Mips64Assembler* assembler,
-                                  CodeGeneratorMIPS64* codegen,
-                                  bool start_at_zero) {
-  LocationSummary* locations = invoke->GetLocations();
-  GpuRegister tmp_reg = start_at_zero ? locations->GetTemp(0).AsRegister<GpuRegister>() : TMP;
-
-  // Note that the null check must have been done earlier.
-  DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
-
-  // Check for code points > 0xFFFF. Either a slow-path check when we don't know statically,
-  // or directly dispatch for a large constant, or omit slow-path for a small constant or a char.
-  SlowPathCodeMIPS64* slow_path = nullptr;
-  HInstruction* code_point = invoke->InputAt(1);
-  if (code_point->IsIntConstant()) {
-    if (!IsUint<16>(code_point->AsIntConstant()->GetValue())) {
-      // Always needs the slow-path. We could directly dispatch to it,
-      // but this case should be rare, so for simplicity just put the
-      // full slow-path down and branch unconditionally.
-      slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathMIPS64(invoke);
-      codegen->AddSlowPath(slow_path);
-      __ Bc(slow_path->GetEntryLabel());
-      __ Bind(slow_path->GetExitLabel());
-      return;
-    }
-  } else if (code_point->GetType() != DataType::Type::kUint16) {
-    GpuRegister char_reg = locations->InAt(1).AsRegister<GpuRegister>();
-    __ LoadConst32(tmp_reg, std::numeric_limits<uint16_t>::max());
-    slow_path = new (codegen->GetScopedAllocator()) IntrinsicSlowPathMIPS64(invoke);
-    codegen->AddSlowPath(slow_path);
-    __ Bltuc(tmp_reg, char_reg, slow_path->GetEntryLabel());    // UTF-16 required
-  }
-
-  if (start_at_zero) {
-    DCHECK_EQ(tmp_reg, A2);
-    // Start-index = 0.
-    __ Clear(tmp_reg);
-  }
-
-  codegen->InvokeRuntime(kQuickIndexOf, invoke, invoke->GetDexPc(), slow_path);
-  CheckEntrypointTypes<kQuickIndexOf, int32_t, void*, uint32_t, uint32_t>();
-
-  if (slow_path != nullptr) {
-    __ Bind(slow_path->GetExitLabel());
-  }
-}
-
-// int java.lang.String.indexOf(int ch)
-void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOf(HInvoke* invoke) {
-  LocationSummary* locations = new (allocator_) LocationSummary(
-      invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
-  // We have a hand-crafted assembly stub that follows the runtime
-  // calling convention. So it's best to align the inputs accordingly.
-  InvokeRuntimeCallingConvention calling_convention;
-  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
-  Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32);
-  locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
-
-  // Need a temp for slow-path codepoint compare, and need to send start-index=0.
-  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOf(HInvoke* invoke) {
-  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ true);
-}
-
-// int java.lang.String.indexOf(int ch, int fromIndex)
-void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
-  LocationSummary* locations = new (allocator_) LocationSummary(
-      invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
-  // We have a hand-crafted assembly stub that follows the runtime
-  // calling convention. So it's best to align the inputs accordingly.
-  InvokeRuntimeCallingConvention calling_convention;
-  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
-  locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
-  Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32);
-  locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
-  GenerateStringIndexOf(invoke, GetAssembler(), codegen_, /* start_at_zero= */ false);
-}
-
-// java.lang.StringFactory.newStringFromBytes(byte[] data, int high, int offset, int byteCount)
-void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromBytes(HInvoke* invoke) {
-  LocationSummary* locations = new (allocator_) LocationSummary(
-      invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
-  InvokeRuntimeCallingConvention calling_convention;
-  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
-  locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
-  locations->SetInAt(3, Location::RegisterLocation(calling_convention.GetRegisterAt(3)));
-  Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32);
-  locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromBytes(HInvoke* invoke) {
-  Mips64Assembler* assembler = GetAssembler();
-  LocationSummary* locations = invoke->GetLocations();
-
-  GpuRegister byte_array = locations->InAt(0).AsRegister<GpuRegister>();
-  SlowPathCodeMIPS64* slow_path =
-      new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS64(invoke);
-  codegen_->AddSlowPath(slow_path);
-  __ Beqzc(byte_array, slow_path->GetEntryLabel());
-
-  codegen_->InvokeRuntime(kQuickAllocStringFromBytes, invoke, invoke->GetDexPc(), slow_path);
-  CheckEntrypointTypes<kQuickAllocStringFromBytes, void*, void*, int32_t, int32_t, int32_t>();
-  __ Bind(slow_path->GetExitLabel());
-}
-
-// java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
-void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromChars(HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
-  InvokeRuntimeCallingConvention calling_convention;
-  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
-  locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
-  Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32);
-  locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromChars(HInvoke* invoke) {
-  // No need to emit code checking whether `locations->InAt(2)` is a null
-  // pointer, as callers of the native method
-  //
-  //   java.lang.StringFactory.newStringFromChars(int offset, int charCount, char[] data)
-  //
-  // all include a null check on `data` before calling that method.
-  codegen_->InvokeRuntime(kQuickAllocStringFromChars, invoke, invoke->GetDexPc());
-  CheckEntrypointTypes<kQuickAllocStringFromChars, void*, int32_t, int32_t, void*>();
-}
-
-// java.lang.StringFactory.newStringFromString(String toCopy)
-void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromString(HInvoke* invoke) {
-  LocationSummary* locations = new (allocator_) LocationSummary(
-      invoke, LocationSummary::kCallOnMainAndSlowPath, kIntrinsified);
-  InvokeRuntimeCallingConvention calling_convention;
-  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  Location outLocation = calling_convention.GetReturnLocation(DataType::Type::kInt32);
-  locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromString(HInvoke* invoke) {
-  Mips64Assembler* assembler = GetAssembler();
-  LocationSummary* locations = invoke->GetLocations();
-
-  GpuRegister string_to_copy = locations->InAt(0).AsRegister<GpuRegister>();
-  SlowPathCodeMIPS64* slow_path =
-      new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS64(invoke);
-  codegen_->AddSlowPath(slow_path);
-  __ Beqzc(string_to_copy, slow_path->GetEntryLabel());
-
-  codegen_->InvokeRuntime(kQuickAllocStringFromString, invoke, invoke->GetDexPc(), slow_path);
-  CheckEntrypointTypes<kQuickAllocStringFromString, void*, void*>();
-  __ Bind(slow_path->GetExitLabel());
-}
-
-static void GenIsInfinite(LocationSummary* locations,
-                          bool is64bit,
-                          Mips64Assembler* assembler) {
-  FpuRegister in = locations->InAt(0).AsFpuRegister<FpuRegister>();
-  GpuRegister out = locations->Out().AsRegister<GpuRegister>();
-
-  if (is64bit) {
-    __ ClassD(FTMP, in);
-  } else {
-    __ ClassS(FTMP, in);
-  }
-  __ Mfc1(out, FTMP);
-  __ Andi(out, out, kPositiveInfinity | kNegativeInfinity);
-  __ Sltu(out, ZERO, out);
-}
-
-// boolean java.lang.Float.isInfinite(float)
-void IntrinsicLocationsBuilderMIPS64::VisitFloatIsInfinite(HInvoke* invoke) {
-  CreateFPToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitFloatIsInfinite(HInvoke* invoke) {
-  GenIsInfinite(invoke->GetLocations(), /* is64bit= */ false, GetAssembler());
-}
-
-// boolean java.lang.Double.isInfinite(double)
-void IntrinsicLocationsBuilderMIPS64::VisitDoubleIsInfinite(HInvoke* invoke) {
-  CreateFPToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitDoubleIsInfinite(HInvoke* invoke) {
-  GenIsInfinite(invoke->GetLocations(), /* is64bit= */ true, GetAssembler());
-}
-
-// void java.lang.String.getChars(int srcBegin, int srcEnd, char[] dst, int dstBegin)
-void IntrinsicLocationsBuilderMIPS64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RequiresRegister());
-  locations->SetInAt(2, Location::RequiresRegister());
-  locations->SetInAt(3, Location::RequiresRegister());
-  locations->SetInAt(4, Location::RequiresRegister());
-
-  locations->AddTemp(Location::RequiresRegister());
-  locations->AddTemp(Location::RequiresRegister());
-  locations->AddTemp(Location::RequiresRegister());
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitStringGetCharsNoCheck(HInvoke* invoke) {
-  Mips64Assembler* assembler = GetAssembler();
-  LocationSummary* locations = invoke->GetLocations();
-
-  // Check assumption that sizeof(Char) is 2 (used in scaling below).
-  const size_t char_size = DataType::Size(DataType::Type::kUint16);
-  DCHECK_EQ(char_size, 2u);
-  const size_t char_shift = DataType::SizeShift(DataType::Type::kUint16);
-
-  GpuRegister srcObj = locations->InAt(0).AsRegister<GpuRegister>();
-  GpuRegister srcBegin = locations->InAt(1).AsRegister<GpuRegister>();
-  GpuRegister srcEnd = locations->InAt(2).AsRegister<GpuRegister>();
-  GpuRegister dstObj = locations->InAt(3).AsRegister<GpuRegister>();
-  GpuRegister dstBegin = locations->InAt(4).AsRegister<GpuRegister>();
-
-  GpuRegister dstPtr = locations->GetTemp(0).AsRegister<GpuRegister>();
-  GpuRegister srcPtr = locations->GetTemp(1).AsRegister<GpuRegister>();
-  GpuRegister numChrs = locations->GetTemp(2).AsRegister<GpuRegister>();
-
-  Mips64Label done;
-  Mips64Label loop;
-
-  // Location of data in char array buffer.
-  const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value();
-
-  // Get offset of value field within a string object.
-  const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
-
-  __ Beqc(srcEnd, srcBegin, &done);  // No characters to move.
-
-  // Calculate number of characters to be copied.
-  __ Dsubu(numChrs, srcEnd, srcBegin);
-
-  // Calculate destination address.
-  __ Daddiu(dstPtr, dstObj, data_offset);
-  __ Dlsa(dstPtr, dstBegin, dstPtr, char_shift);
-
-  if (mirror::kUseStringCompression) {
-    Mips64Label uncompressed_copy, compressed_loop;
-    const uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
-    // Load count field and extract compression flag.
-    __ LoadFromOffset(kLoadWord, TMP, srcObj, count_offset);
-    __ Dext(TMP, TMP, 0, 1);
-
-    // If string is uncompressed, use uncompressed path.
-    __ Bnezc(TMP, &uncompressed_copy);
-
-    // Copy loop for compressed src, copying 1 character (8-bit) to (16-bit) at a time.
-    __ Daddu(srcPtr, srcObj, srcBegin);
-    __ Bind(&compressed_loop);
-    __ LoadFromOffset(kLoadUnsignedByte, TMP, srcPtr, value_offset);
-    __ StoreToOffset(kStoreHalfword, TMP, dstPtr, 0);
-    __ Daddiu(numChrs, numChrs, -1);
-    __ Daddiu(srcPtr, srcPtr, 1);
-    __ Daddiu(dstPtr, dstPtr, 2);
-    __ Bnezc(numChrs, &compressed_loop);
-
-    __ Bc(&done);
-    __ Bind(&uncompressed_copy);
-  }
-
-  // Calculate source address.
-  __ Daddiu(srcPtr, srcObj, value_offset);
-  __ Dlsa(srcPtr, srcBegin, srcPtr, char_shift);
-
-  __ Bind(&loop);
-  __ Lh(AT, srcPtr, 0);
-  __ Daddiu(numChrs, numChrs, -1);
-  __ Daddiu(srcPtr, srcPtr, char_size);
-  __ Sh(AT, dstPtr, 0);
-  __ Daddiu(dstPtr, dstPtr, char_size);
-  __ Bnezc(numChrs, &loop);
-
-  __ Bind(&done);
-}
-
-// static void java.lang.System.arraycopy(Object src, int srcPos,
-//                                        Object dest, int destPos,
-//                                        int length)
-void IntrinsicLocationsBuilderMIPS64::VisitSystemArrayCopyChar(HInvoke* invoke) {
-  HIntConstant* src_pos = invoke->InputAt(1)->AsIntConstant();
-  HIntConstant* dest_pos = invoke->InputAt(3)->AsIntConstant();
-  HIntConstant* length = invoke->InputAt(4)->AsIntConstant();
-
-  // As long as we are checking, we might as well check to see if the src and dest
-  // positions are >= 0.
-  if ((src_pos != nullptr && src_pos->GetValue() < 0) ||
-      (dest_pos != nullptr && dest_pos->GetValue() < 0)) {
-    // We will have to fail anyways.
-    return;
-  }
-
-  // And since we are already checking, check the length too.
-  if (length != nullptr) {
-    int32_t len = length->GetValue();
-    if (len < 0) {
-      // Just call as normal.
-      return;
-    }
-  }
-
-  // Okay, it is safe to generate inline code.
-  LocationSummary* locations =
-      new (allocator_) LocationSummary(invoke, LocationSummary::kCallOnSlowPath, kIntrinsified);
-  // arraycopy(Object src, int srcPos, Object dest, int destPos, int length).
-  locations->SetInAt(0, Location::RequiresRegister());
-  locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
-  locations->SetInAt(2, Location::RequiresRegister());
-  locations->SetInAt(3, Location::RegisterOrConstant(invoke->InputAt(3)));
-  locations->SetInAt(4, Location::RegisterOrConstant(invoke->InputAt(4)));
-
-  locations->AddTemp(Location::RequiresRegister());
-  locations->AddTemp(Location::RequiresRegister());
-  locations->AddTemp(Location::RequiresRegister());
-}
-
-// Utility routine to verify that "length(input) - pos >= length"
-static void EnoughItems(Mips64Assembler* assembler,
-                        GpuRegister length_input_minus_pos,
-                        Location length,
-                        SlowPathCodeMIPS64* slow_path) {
-  if (length.IsConstant()) {
-    int32_t length_constant = length.GetConstant()->AsIntConstant()->GetValue();
-
-    if (IsInt<16>(length_constant)) {
-      __ Slti(TMP, length_input_minus_pos, length_constant);
-      __ Bnezc(TMP, slow_path->GetEntryLabel());
-    } else {
-      __ LoadConst32(TMP, length_constant);
-      __ Bltc(length_input_minus_pos, TMP, slow_path->GetEntryLabel());
-    }
-  } else {
-    __ Bltc(length_input_minus_pos, length.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
-  }
-}
-
-static void CheckPosition(Mips64Assembler* assembler,
-                          Location pos,
-                          GpuRegister input,
-                          Location length,
-                          SlowPathCodeMIPS64* slow_path,
-                          bool length_is_input_length = false) {
-  // Where is the length in the Array?
-  const uint32_t length_offset = mirror::Array::LengthOffset().Uint32Value();
-
-  // Calculate length(input) - pos.
-  if (pos.IsConstant()) {
-    int32_t pos_const = pos.GetConstant()->AsIntConstant()->GetValue();
-    if (pos_const == 0) {
-      if (!length_is_input_length) {
-        // Check that length(input) >= length.
-        __ LoadFromOffset(kLoadWord, AT, input, length_offset);
-        EnoughItems(assembler, AT, length, slow_path);
-      }
-    } else {
-      // Check that (length(input) - pos) >= zero.
-      __ LoadFromOffset(kLoadWord, AT, input, length_offset);
-      DCHECK_GT(pos_const, 0);
-      __ Addiu32(AT, AT, -pos_const);
-      __ Bltzc(AT, slow_path->GetEntryLabel());
-
-      // Verify that (length(input) - pos) >= length.
-      EnoughItems(assembler, AT, length, slow_path);
-    }
-  } else if (length_is_input_length) {
-    // The only way the copy can succeed is if pos is zero.
-    GpuRegister pos_reg = pos.AsRegister<GpuRegister>();
-    __ Bnezc(pos_reg, slow_path->GetEntryLabel());
-  } else {
-    // Verify that pos >= 0.
-    GpuRegister pos_reg = pos.AsRegister<GpuRegister>();
-    __ Bltzc(pos_reg, slow_path->GetEntryLabel());
-
-    // Check that (length(input) - pos) >= zero.
-    __ LoadFromOffset(kLoadWord, AT, input, length_offset);
-    __ Subu(AT, AT, pos_reg);
-    __ Bltzc(AT, slow_path->GetEntryLabel());
-
-    // Verify that (length(input) - pos) >= length.
-    EnoughItems(assembler, AT, length, slow_path);
-  }
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitSystemArrayCopyChar(HInvoke* invoke) {
-  Mips64Assembler* assembler = GetAssembler();
-  LocationSummary* locations = invoke->GetLocations();
-
-  GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
-  Location src_pos = locations->InAt(1);
-  GpuRegister dest = locations->InAt(2).AsRegister<GpuRegister>();
-  Location dest_pos = locations->InAt(3);
-  Location length = locations->InAt(4);
-
-  Mips64Label loop;
-
-  GpuRegister dest_base = locations->GetTemp(0).AsRegister<GpuRegister>();
-  GpuRegister src_base = locations->GetTemp(1).AsRegister<GpuRegister>();
-  GpuRegister count = locations->GetTemp(2).AsRegister<GpuRegister>();
-
-  SlowPathCodeMIPS64* slow_path =
-      new (codegen_->GetScopedAllocator()) IntrinsicSlowPathMIPS64(invoke);
-  codegen_->AddSlowPath(slow_path);
-
-  // Bail out if the source and destination are the same (to handle overlap).
-  __ Beqc(src, dest, slow_path->GetEntryLabel());
-
-  // Bail out if the source is null.
-  __ Beqzc(src, slow_path->GetEntryLabel());
-
-  // Bail out if the destination is null.
-  __ Beqzc(dest, slow_path->GetEntryLabel());
-
-  // Load length into register for count.
-  if (length.IsConstant()) {
-    __ LoadConst32(count, length.GetConstant()->AsIntConstant()->GetValue());
-  } else {
-    // If the length is negative, bail out.
-    // We have already checked in the LocationsBuilder for the constant case.
-    __ Bltzc(length.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
-
-    __ Move(count, length.AsRegister<GpuRegister>());
-  }
-
-  // Validity checks: source.
-  CheckPosition(assembler, src_pos, src, Location::RegisterLocation(count), slow_path);
-
-  // Validity checks: dest.
-  CheckPosition(assembler, dest_pos, dest, Location::RegisterLocation(count), slow_path);
-
-  // If count is zero, we're done.
-  __ Beqzc(count, slow_path->GetExitLabel());
-
-  // Okay, everything checks out.  Finally time to do the copy.
-  // Check assumption that sizeof(Char) is 2 (used in scaling below).
-  const size_t char_size = DataType::Size(DataType::Type::kUint16);
-  DCHECK_EQ(char_size, 2u);
-
-  const size_t char_shift = DataType::SizeShift(DataType::Type::kUint16);
-
-  const uint32_t data_offset = mirror::Array::DataOffset(char_size).Uint32Value();
-
-  // Calculate source and destination addresses.
-  if (src_pos.IsConstant()) {
-    int32_t src_pos_const = src_pos.GetConstant()->AsIntConstant()->GetValue();
-
-    __ Daddiu64(src_base, src, data_offset + char_size * src_pos_const, TMP);
-  } else {
-    __ Daddiu64(src_base, src, data_offset, TMP);
-    __ Dlsa(src_base, src_pos.AsRegister<GpuRegister>(), src_base, char_shift);
-  }
-  if (dest_pos.IsConstant()) {
-    int32_t dest_pos_const = dest_pos.GetConstant()->AsIntConstant()->GetValue();
-
-    __ Daddiu64(dest_base, dest, data_offset + char_size * dest_pos_const, TMP);
-  } else {
-    __ Daddiu64(dest_base, dest, data_offset, TMP);
-    __ Dlsa(dest_base, dest_pos.AsRegister<GpuRegister>(), dest_base, char_shift);
-  }
-
-  __ Bind(&loop);
-  __ Lh(TMP, src_base, 0);
-  __ Daddiu(src_base, src_base, char_size);
-  __ Daddiu(count, count, -1);
-  __ Sh(TMP, dest_base, 0);
-  __ Daddiu(dest_base, dest_base, char_size);
-  __ Bnezc(count, &loop);
-
-  __ Bind(slow_path->GetExitLabel());
-}
-
-static void GenHighestOneBit(LocationSummary* locations,
-                             DataType::Type type,
-                             Mips64Assembler* assembler) {
-  DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64) << type;
-
-  GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
-  GpuRegister out = locations->Out().AsRegister<GpuRegister>();
-
-  if (type == DataType::Type::kInt64) {
-    __ Dclz(TMP, in);
-    __ LoadConst64(AT, INT64_C(0x8000000000000000));
-    __ Dsrlv(AT, AT, TMP);
-  } else {
-    __ Clz(TMP, in);
-    __ LoadConst32(AT, 0x80000000);
-    __ Srlv(AT, AT, TMP);
-  }
-  // For either value of "type", when "in" is zero, "out" should also
-  // be zero. Without this extra "and" operation, when "in" is zero,
-  // "out" would be either Integer.MIN_VALUE, or Long.MIN_VALUE because
-  // the MIPS logical shift operations "dsrlv", and "srlv" don't use
-  // the shift amount (TMP) directly; they use either (TMP % 64) or
-  // (TMP % 32), respectively.
-  __ And(out, AT, in);
-}
-
-// int java.lang.Integer.highestOneBit(int)
-void IntrinsicLocationsBuilderMIPS64::VisitIntegerHighestOneBit(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitIntegerHighestOneBit(HInvoke* invoke) {
-  GenHighestOneBit(invoke->GetLocations(), DataType::Type::kInt32, GetAssembler());
-}
-
-// long java.lang.Long.highestOneBit(long)
-void IntrinsicLocationsBuilderMIPS64::VisitLongHighestOneBit(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitLongHighestOneBit(HInvoke* invoke) {
-  GenHighestOneBit(invoke->GetLocations(), DataType::Type::kInt64, GetAssembler());
-}
-
-static void GenLowestOneBit(LocationSummary* locations,
-                            DataType::Type type,
-                            Mips64Assembler* assembler) {
-  DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64) << type;
-
-  GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
-  GpuRegister out = locations->Out().AsRegister<GpuRegister>();
-
-  if (type == DataType::Type::kInt64) {
-    __ Dsubu(TMP, ZERO, in);
-  } else {
-    __ Subu(TMP, ZERO, in);
-  }
-  __ And(out, TMP, in);
-}
-
-// int java.lang.Integer.lowestOneBit(int)
-void IntrinsicLocationsBuilderMIPS64::VisitIntegerLowestOneBit(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitIntegerLowestOneBit(HInvoke* invoke) {
-  GenLowestOneBit(invoke->GetLocations(), DataType::Type::kInt32, GetAssembler());
-}
-
-// long java.lang.Long.lowestOneBit(long)
-void IntrinsicLocationsBuilderMIPS64::VisitLongLowestOneBit(HInvoke* invoke) {
-  CreateIntToIntLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitLongLowestOneBit(HInvoke* invoke) {
-  GenLowestOneBit(invoke->GetLocations(), DataType::Type::kInt64, GetAssembler());
-}
-
-static void CreateFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
-  InvokeRuntimeCallingConvention calling_convention;
-
-  locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
-  locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kFloat64));
-}
-
-static void CreateFPFPToFPCallLocations(ArenaAllocator* allocator, HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator) LocationSummary(invoke, LocationSummary::kCallOnMainOnly, kIntrinsified);
-  InvokeRuntimeCallingConvention calling_convention;
-
-  locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
-  locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
-  locations->SetOut(calling_convention.GetReturnLocation(DataType::Type::kFloat64));
-}
-
-static void GenFPToFPCall(HInvoke* invoke,
-                          CodeGeneratorMIPS64* codegen,
-                          QuickEntrypointEnum entry) {
-  LocationSummary* locations = invoke->GetLocations();
-  FpuRegister in = locations->InAt(0).AsFpuRegister<FpuRegister>();
-  DCHECK_EQ(in, F12);
-  FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
-  DCHECK_EQ(out, F0);
-
-  codegen->InvokeRuntime(entry, invoke, invoke->GetDexPc());
-}
-
-static void GenFPFPToFPCall(HInvoke* invoke,
-                            CodeGeneratorMIPS64* codegen,
-                            QuickEntrypointEnum entry) {
-  LocationSummary* locations = invoke->GetLocations();
-  FpuRegister in0 = locations->InAt(0).AsFpuRegister<FpuRegister>();
-  DCHECK_EQ(in0, F12);
-  FpuRegister in1 = locations->InAt(1).AsFpuRegister<FpuRegister>();
-  DCHECK_EQ(in1, F13);
-  FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
-  DCHECK_EQ(out, F0);
-
-  codegen->InvokeRuntime(entry, invoke, invoke->GetDexPc());
-}
-
-// static double java.lang.Math.cos(double a)
-void IntrinsicLocationsBuilderMIPS64::VisitMathCos(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathCos(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickCos);
-}
-
-// static double java.lang.Math.sin(double a)
-void IntrinsicLocationsBuilderMIPS64::VisitMathSin(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathSin(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickSin);
-}
-
-// static double java.lang.Math.acos(double a)
-void IntrinsicLocationsBuilderMIPS64::VisitMathAcos(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathAcos(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickAcos);
-}
-
-// static double java.lang.Math.asin(double a)
-void IntrinsicLocationsBuilderMIPS64::VisitMathAsin(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathAsin(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickAsin);
-}
-
-// static double java.lang.Math.atan(double a)
-void IntrinsicLocationsBuilderMIPS64::VisitMathAtan(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathAtan(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickAtan);
-}
-
-// static double java.lang.Math.atan2(double y, double x)
-void IntrinsicLocationsBuilderMIPS64::VisitMathAtan2(HInvoke* invoke) {
-  CreateFPFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathAtan2(HInvoke* invoke) {
-  GenFPFPToFPCall(invoke, codegen_, kQuickAtan2);
-}
-
-// static double java.lang.Math.pow(double y, double x)
-void IntrinsicLocationsBuilderMIPS64::VisitMathPow(HInvoke* invoke) {
-  CreateFPFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathPow(HInvoke* invoke) {
-  GenFPFPToFPCall(invoke, codegen_, kQuickPow);
-}
-
-// static double java.lang.Math.cbrt(double a)
-void IntrinsicLocationsBuilderMIPS64::VisitMathCbrt(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathCbrt(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickCbrt);
-}
-
-// static double java.lang.Math.cosh(double x)
-void IntrinsicLocationsBuilderMIPS64::VisitMathCosh(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathCosh(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickCosh);
-}
-
-// static double java.lang.Math.exp(double a)
-void IntrinsicLocationsBuilderMIPS64::VisitMathExp(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathExp(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickExp);
-}
-
-// static double java.lang.Math.expm1(double x)
-void IntrinsicLocationsBuilderMIPS64::VisitMathExpm1(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathExpm1(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickExpm1);
-}
-
-// static double java.lang.Math.hypot(double x, double y)
-void IntrinsicLocationsBuilderMIPS64::VisitMathHypot(HInvoke* invoke) {
-  CreateFPFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathHypot(HInvoke* invoke) {
-  GenFPFPToFPCall(invoke, codegen_, kQuickHypot);
-}
-
-// static double java.lang.Math.log(double a)
-void IntrinsicLocationsBuilderMIPS64::VisitMathLog(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathLog(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickLog);
-}
-
-// static double java.lang.Math.log10(double x)
-void IntrinsicLocationsBuilderMIPS64::VisitMathLog10(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathLog10(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickLog10);
-}
-
-// static double java.lang.Math.nextAfter(double start, double direction)
-void IntrinsicLocationsBuilderMIPS64::VisitMathNextAfter(HInvoke* invoke) {
-  CreateFPFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathNextAfter(HInvoke* invoke) {
-  GenFPFPToFPCall(invoke, codegen_, kQuickNextAfter);
-}
-
-// static double java.lang.Math.sinh(double x)
-void IntrinsicLocationsBuilderMIPS64::VisitMathSinh(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathSinh(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickSinh);
-}
-
-// static double java.lang.Math.tan(double a)
-void IntrinsicLocationsBuilderMIPS64::VisitMathTan(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathTan(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickTan);
-}
-
-// static double java.lang.Math.tanh(double x)
-void IntrinsicLocationsBuilderMIPS64::VisitMathTanh(HInvoke* invoke) {
-  CreateFPToFPCallLocations(allocator_, invoke);
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitMathTanh(HInvoke* invoke) {
-  GenFPToFPCall(invoke, codegen_, kQuickTanh);
-}
-
-// long java.lang.Integer.valueOf(long)
-void IntrinsicLocationsBuilderMIPS64::VisitIntegerValueOf(HInvoke* invoke) {
-  InvokeRuntimeCallingConvention calling_convention;
-  IntrinsicVisitor::ComputeIntegerValueOfLocations(
-      invoke,
-      codegen_,
-      calling_convention.GetReturnLocation(DataType::Type::kReference),
-      Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitIntegerValueOf(HInvoke* invoke) {
-  IntrinsicVisitor::IntegerValueOfInfo info =
-      IntrinsicVisitor::ComputeIntegerValueOfInfo(invoke, codegen_->GetCompilerOptions());
-  LocationSummary* locations = invoke->GetLocations();
-  Mips64Assembler* assembler = GetAssembler();
-  InstructionCodeGeneratorMIPS64* icodegen =
-      down_cast<InstructionCodeGeneratorMIPS64*>(codegen_->GetInstructionVisitor());
-
-  GpuRegister out = locations->Out().AsRegister<GpuRegister>();
-  if (invoke->InputAt(0)->IsConstant()) {
-    int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
-    if (static_cast<uint32_t>(value - info.low) < info.length) {
-      // Just embed the j.l.Integer in the code.
-      DCHECK_NE(info.value_boot_image_reference, IntegerValueOfInfo::kInvalidReference);
-      codegen_->LoadBootImageAddress(out, info.value_boot_image_reference);
-    } else {
-      DCHECK(locations->CanCall());
-      // Allocate and initialize a new j.l.Integer.
-      // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the
-      // JIT object table.
-      codegen_->AllocateInstanceForIntrinsic(invoke->AsInvokeStaticOrDirect(),
-                                             info.integer_boot_image_offset);
-      __ StoreConstToOffset(kStoreWord, value, out, info.value_offset, TMP);
-      // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
-      // one.
-      icodegen->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
-    }
-  } else {
-    DCHECK(locations->CanCall());
-    GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
-    Mips64Label allocate, done;
-
-    __ Addiu32(out, in, -info.low);
-    // As unsigned quantities is out < info.length ?
-    __ LoadConst32(AT, info.length);
-    // Branch if out >= info.length . This means that "in" is outside of the valid range.
-    __ Bgeuc(out, AT, &allocate);
-
-    // If the value is within the bounds, load the j.l.Integer directly from the array.
-    codegen_->LoadBootImageAddress(TMP, info.array_data_boot_image_reference);
-    __ Dlsa(out, out, TMP, TIMES_4);
-    __ Lwu(out, out, 0);
-    __ MaybeUnpoisonHeapReference(out);
-    __ Bc(&done);
-
-    __ Bind(&allocate);
-    // Otherwise allocate and initialize a new j.l.Integer.
-    codegen_->AllocateInstanceForIntrinsic(invoke->AsInvokeStaticOrDirect(),
-                                           info.integer_boot_image_offset);
-    __ StoreToOffset(kStoreWord, in, out, info.value_offset);
-    // `value` is a final field :-( Ideally, we'd merge this memory barrier with the allocation
-    // one.
-    icodegen->GenerateMemoryBarrier(MemBarrierKind::kStoreStore);
-    __ Bind(&done);
-  }
-}
-
-// static boolean java.lang.Thread.interrupted()
-void IntrinsicLocationsBuilderMIPS64::VisitThreadInterrupted(HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetOut(Location::RequiresRegister());
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitThreadInterrupted(HInvoke* invoke) {
-  Mips64Assembler* assembler = GetAssembler();
-  GpuRegister out = invoke->GetLocations()->Out().AsRegister<GpuRegister>();
-  int32_t offset = Thread::InterruptedOffset<kMips64PointerSize>().Int32Value();
-  __ LoadFromOffset(kLoadWord, out, TR, offset);
-  Mips64Label done;
-  __ Beqzc(out, &done);
-  __ Sync(0);
-  __ StoreToOffset(kStoreWord, ZERO, TR, offset);
-  __ Sync(0);
-  __ Bind(&done);
-}
-
-void IntrinsicLocationsBuilderMIPS64::VisitReachabilityFence(HInvoke* invoke) {
-  LocationSummary* locations =
-      new (allocator_) LocationSummary(invoke, LocationSummary::kNoCall, kIntrinsified);
-  locations->SetInAt(0, Location::Any());
-}
-
-void IntrinsicCodeGeneratorMIPS64::VisitReachabilityFence(HInvoke* invoke ATTRIBUTE_UNUSED) { }
-
-UNIMPLEMENTED_INTRINSIC(MIPS64, ReferenceGetReferent)
-UNIMPLEMENTED_INTRINSIC(MIPS64, SystemArrayCopy)
-UNIMPLEMENTED_INTRINSIC(MIPS64, CRC32Update)
-UNIMPLEMENTED_INTRINSIC(MIPS64, CRC32UpdateBytes)
-UNIMPLEMENTED_INTRINSIC(MIPS64, CRC32UpdateByteBuffer)
-
-UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOf);
-UNIMPLEMENTED_INTRINSIC(MIPS64, StringStringIndexOfAfter);
-UNIMPLEMENTED_INTRINSIC(MIPS64, StringBufferAppend);
-UNIMPLEMENTED_INTRINSIC(MIPS64, StringBufferLength);
-UNIMPLEMENTED_INTRINSIC(MIPS64, StringBufferToString);
-UNIMPLEMENTED_INTRINSIC(MIPS64, StringBuilderAppend);
-UNIMPLEMENTED_INTRINSIC(MIPS64, StringBuilderLength);
-UNIMPLEMENTED_INTRINSIC(MIPS64, StringBuilderToString);
-
-// 1.8.
-UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndAddInt)
-UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndAddLong)
-UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetInt)
-UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetLong)
-UNIMPLEMENTED_INTRINSIC(MIPS64, UnsafeGetAndSetObject)
-
-UNREACHABLE_INTRINSICS(MIPS64)
-
-#undef __
-
-}  // namespace mips64
-}  // namespace art
diff --git a/compiler/optimizing/intrinsics_mips64.h b/compiler/optimizing/intrinsics_mips64.h
deleted file mode 100644
index ca8bc8f..0000000
--- a/compiler/optimizing/intrinsics_mips64.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_OPTIMIZING_INTRINSICS_MIPS64_H_
-#define ART_COMPILER_OPTIMIZING_INTRINSICS_MIPS64_H_
-
-#include "intrinsics.h"
-
-namespace art {
-
-class ArenaAllocator;
-class HInvokeStaticOrDirect;
-class HInvokeVirtual;
-
-namespace mips64 {
-
-class CodeGeneratorMIPS64;
-class Mips64Assembler;
-
-class IntrinsicLocationsBuilderMIPS64 final : public IntrinsicVisitor {
- public:
-  explicit IntrinsicLocationsBuilderMIPS64(CodeGeneratorMIPS64* codegen);
-
-  // Define visitor methods.
-
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) override;
-#include "intrinsics_list.h"
-  INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
-#undef INTRINSICS_LIST
-#undef OPTIMIZING_INTRINSICS
-
-  // Check whether an invoke is an intrinsic, and if so, create a location summary. Returns whether
-  // a corresponding LocationSummary with the intrinsified_ flag set was generated and attached to
-  // the invoke.
-  bool TryDispatch(HInvoke* invoke);
-
- private:
-  CodeGeneratorMIPS64* const codegen_;
-  ArenaAllocator* const allocator_;
-
-  DISALLOW_COPY_AND_ASSIGN(IntrinsicLocationsBuilderMIPS64);
-};
-
-class IntrinsicCodeGeneratorMIPS64 final : public IntrinsicVisitor {
- public:
-  explicit IntrinsicCodeGeneratorMIPS64(CodeGeneratorMIPS64* codegen) : codegen_(codegen) {}
-
-  // Define visitor methods.
-
-#define OPTIMIZING_INTRINSICS(Name, IsStatic, NeedsEnvironmentOrCache, SideEffects, Exceptions, ...) \
-  void Visit ## Name(HInvoke* invoke) override;
-#include "intrinsics_list.h"
-  INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
-#undef INTRINSICS_LIST
-#undef OPTIMIZING_INTRINSICS
-
-  bool HasMsa() const;
-
- private:
-  Mips64Assembler* GetAssembler();
-
-  ArenaAllocator* GetAllocator();
-
-  CodeGeneratorMIPS64* const codegen_;
-
-  DISALLOW_COPY_AND_ASSIGN(IntrinsicCodeGeneratorMIPS64);
-};
-
-}  // namespace mips64
-}  // namespace art
-
-#endif  // ART_COMPILER_OPTIMIZING_INTRINSICS_MIPS64_H_
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index de697f0..6d7462e 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -3081,13 +3081,31 @@
 UNIMPLEMENTED_INTRINSIC(X86, CRC32Update)
 UNIMPLEMENTED_INTRINSIC(X86, CRC32UpdateBytes)
 UNIMPLEMENTED_INTRINSIC(X86, CRC32UpdateByteBuffer)
+UNIMPLEMENTED_INTRINSIC(X86, FP16ToFloat)
+UNIMPLEMENTED_INTRINSIC(X86, FP16ToHalf)
+UNIMPLEMENTED_INTRINSIC(X86, FP16Floor)
+UNIMPLEMENTED_INTRINSIC(X86, FP16Ceil)
+UNIMPLEMENTED_INTRINSIC(X86, FP16Rint)
+UNIMPLEMENTED_INTRINSIC(X86, FP16Greater)
+UNIMPLEMENTED_INTRINSIC(X86, FP16GreaterEquals)
+UNIMPLEMENTED_INTRINSIC(X86, FP16Less)
+UNIMPLEMENTED_INTRINSIC(X86, FP16LessEquals)
 
 UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOf);
 UNIMPLEMENTED_INTRINSIC(X86, StringStringIndexOfAfter);
 UNIMPLEMENTED_INTRINSIC(X86, StringBufferAppend);
 UNIMPLEMENTED_INTRINSIC(X86, StringBufferLength);
 UNIMPLEMENTED_INTRINSIC(X86, StringBufferToString);
-UNIMPLEMENTED_INTRINSIC(X86, StringBuilderAppend);
+UNIMPLEMENTED_INTRINSIC(X86, StringBuilderAppendObject);
+UNIMPLEMENTED_INTRINSIC(X86, StringBuilderAppendString);
+UNIMPLEMENTED_INTRINSIC(X86, StringBuilderAppendCharSequence);
+UNIMPLEMENTED_INTRINSIC(X86, StringBuilderAppendCharArray);
+UNIMPLEMENTED_INTRINSIC(X86, StringBuilderAppendBoolean);
+UNIMPLEMENTED_INTRINSIC(X86, StringBuilderAppendChar);
+UNIMPLEMENTED_INTRINSIC(X86, StringBuilderAppendInt);
+UNIMPLEMENTED_INTRINSIC(X86, StringBuilderAppendLong);
+UNIMPLEMENTED_INTRINSIC(X86, StringBuilderAppendFloat);
+UNIMPLEMENTED_INTRINSIC(X86, StringBuilderAppendDouble);
 UNIMPLEMENTED_INTRINSIC(X86, StringBuilderLength);
 UNIMPLEMENTED_INTRINSIC(X86, StringBuilderToString);
 
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index e79c0c9..0f6b006 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -2748,13 +2748,31 @@
 UNIMPLEMENTED_INTRINSIC(X86_64, CRC32Update)
 UNIMPLEMENTED_INTRINSIC(X86_64, CRC32UpdateBytes)
 UNIMPLEMENTED_INTRINSIC(X86_64, CRC32UpdateByteBuffer)
+UNIMPLEMENTED_INTRINSIC(X86_64, FP16ToFloat)
+UNIMPLEMENTED_INTRINSIC(X86_64, FP16ToHalf)
+UNIMPLEMENTED_INTRINSIC(X86_64, FP16Floor)
+UNIMPLEMENTED_INTRINSIC(X86_64, FP16Ceil)
+UNIMPLEMENTED_INTRINSIC(X86_64, FP16Rint)
+UNIMPLEMENTED_INTRINSIC(X86_64, FP16Greater)
+UNIMPLEMENTED_INTRINSIC(X86_64, FP16GreaterEquals)
+UNIMPLEMENTED_INTRINSIC(X86_64, FP16Less)
+UNIMPLEMENTED_INTRINSIC(X86_64, FP16LessEquals)
 
 UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOf);
 UNIMPLEMENTED_INTRINSIC(X86_64, StringStringIndexOfAfter);
 UNIMPLEMENTED_INTRINSIC(X86_64, StringBufferAppend);
 UNIMPLEMENTED_INTRINSIC(X86_64, StringBufferLength);
 UNIMPLEMENTED_INTRINSIC(X86_64, StringBufferToString);
-UNIMPLEMENTED_INTRINSIC(X86_64, StringBuilderAppend);
+UNIMPLEMENTED_INTRINSIC(X86_64, StringBuilderAppendObject);
+UNIMPLEMENTED_INTRINSIC(X86_64, StringBuilderAppendString);
+UNIMPLEMENTED_INTRINSIC(X86_64, StringBuilderAppendCharSequence);
+UNIMPLEMENTED_INTRINSIC(X86_64, StringBuilderAppendCharArray);
+UNIMPLEMENTED_INTRINSIC(X86_64, StringBuilderAppendBoolean);
+UNIMPLEMENTED_INTRINSIC(X86_64, StringBuilderAppendChar);
+UNIMPLEMENTED_INTRINSIC(X86_64, StringBuilderAppendInt);
+UNIMPLEMENTED_INTRINSIC(X86_64, StringBuilderAppendLong);
+UNIMPLEMENTED_INTRINSIC(X86_64, StringBuilderAppendFloat);
+UNIMPLEMENTED_INTRINSIC(X86_64, StringBuilderAppendDouble);
 UNIMPLEMENTED_INTRINSIC(X86_64, StringBuilderLength);
 UNIMPLEMENTED_INTRINSIC(X86_64, StringBuilderToString);
 
diff --git a/compiler/optimizing/load_store_analysis_test.cc b/compiler/optimizing/load_store_analysis_test.cc
index bfe7a4f..d725aba 100644
--- a/compiler/optimizing/load_store_analysis_test.cc
+++ b/compiler/optimizing/load_store_analysis_test.cc
@@ -106,6 +106,8 @@
   ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
   ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc3));
   ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc3));
+
+  EXPECT_TRUE(CheckGraph(graph_));
 }
 
 TEST_F(LoadStoreAnalysisTest, FieldHeapLocations) {
@@ -183,6 +185,8 @@
   ASSERT_TRUE(loc1 != loc2);
   // accesses to different fields of the same object should not alias.
   ASSERT_FALSE(heap_location_collector.MayAlias(loc1, loc2));
+
+  EXPECT_TRUE(CheckGraph(graph_));
 }
 
 TEST_F(LoadStoreAnalysisTest, ArrayIndexAliasingTest) {
@@ -273,6 +277,8 @@
   loc1 = heap_location_collector.GetArrayHeapLocation(arr_set4);
   loc2 = heap_location_collector.GetArrayHeapLocation(arr_set8);
   ASSERT_TRUE(heap_location_collector.MayAlias(loc1, loc2));
+
+  EXPECT_TRUE(CheckGraphSkipRefTypeInfoChecks(graph_));
 }
 
 TEST_F(LoadStoreAnalysisTest, ArrayAliasingTest) {
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index b33d0f4..4c150da 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -23,8 +23,6 @@
 #include "load_store_analysis.h"
 #include "side_effects_analysis.h"
 
-#include <iostream>
-
 /**
  * The general algorithm of load-store elimination (LSE).
  * Load-store analysis in the previous pass collects a list of heap locations
@@ -64,8 +62,9 @@
  *   all the heap values, depending on the instruction's side effects.
  * - Finalizable objects are considered as persisting at method
  *   return/deoptimization.
- * - Currently this LSE algorithm doesn't handle SIMD graph, e.g. with VecLoad
- *   and VecStore instructions.
+ * - SIMD graphs (with VecLoad and VecStore instructions) are also handled. Any
+ *   partial overlap access among ArrayGet/ArraySet/VecLoad/Store is seen as
+ *   alias and no load/store is eliminated in such case.
  * - Currently this LSE algorithm doesn't handle graph with try-catch, due to
  *   the special block merging structure.
  */
@@ -172,9 +171,7 @@
         DCHECK(substitute2->IsTypeConversion());
         continue;
       }
-      DCHECK(load2->IsInstanceFieldGet() ||
-             load2->IsStaticFieldGet() ||
-             load2->IsArrayGet());
+      DCHECK(IsLoad(load2));
       DCHECK(substitute2 != nullptr);
       if (substitute2 == substitute &&
           load2->GetType() == load->GetType() &&
@@ -204,9 +201,7 @@
         DCHECK(substitute_instructions_for_loads_[i]->IsTypeConversion());
         continue;
       }
-      DCHECK(load->IsInstanceFieldGet() ||
-             load->IsStaticFieldGet() ||
-             load->IsArrayGet());
+      DCHECK(IsLoad(load));
       HInstruction* substitute = substitute_instructions_for_loads_[i];
       DCHECK(substitute != nullptr);
       // We proactively retrieve the substitute for a removed load, so
@@ -224,7 +219,7 @@
       // We guarantee that type A stored as type B and then fetched out as
       // type C is the same as casting from type A to type C directly, since
       // type B and type C will have the same size which is guarenteed in
-      // HInstanceFieldGet/HStaticFieldGet/HArrayGet's SetType().
+      // HInstanceFieldGet/HStaticFieldGet/HArrayGet/HVecLoad's SetType().
       // So we only need one type conversion from type A to type C.
       HTypeConversion* type_conversion = AddTypeConversionIfNecessary(
           load, substitute, load->GetType());
@@ -240,7 +235,7 @@
 
     // At this point, stores in possibly_removed_stores_ can be safely removed.
     for (HInstruction* store : possibly_removed_stores_) {
-      DCHECK(store->IsInstanceFieldSet() || store->IsStaticFieldSet() || store->IsArraySet());
+      DCHECK(IsStore(store));
       store->GetBlock()->RemoveInstruction(store);
     }
 
@@ -261,26 +256,37 @@
   }
 
  private:
-  static bool IsLoad(HInstruction* instruction) {
+  static bool IsLoad(const HInstruction* instruction) {
     if (instruction == kUnknownHeapValue || instruction == kDefaultHeapValue) {
       return false;
     }
     // Unresolved load is not treated as a load.
     return instruction->IsInstanceFieldGet() ||
         instruction->IsStaticFieldGet() ||
+        instruction->IsVecLoad() ||
         instruction->IsArrayGet();
   }
 
-  static bool IsStore(HInstruction* instruction) {
+  static bool IsStore(const HInstruction* instruction) {
     if (instruction == kUnknownHeapValue || instruction == kDefaultHeapValue) {
       return false;
     }
     // Unresolved store is not treated as a store.
     return instruction->IsInstanceFieldSet() ||
         instruction->IsArraySet() ||
+        instruction->IsVecStore() ||
         instruction->IsStaticFieldSet();
   }
 
+  // Check if it is allowed to use default values for the specified load.
+  static bool IsDefaultAllowedForLoad(const HInstruction* load) {
+    DCHECK(IsLoad(load));
+    // Using defaults for VecLoads requires to create additional vector operations.
+    // As there are some issues with scheduling vector operations it is better to avoid creating
+    // them.
+    return !load->IsVecOperation();
+  }
+
   // Returns the real heap value by finding its substitute or by "peeling"
   // a store instruction.
   HInstruction* GetRealHeapValue(HInstruction* heap_value) {
@@ -298,6 +304,8 @@
       heap_value = heap_value->AsInstanceFieldSet()->GetValue();
     } else if (heap_value->IsStaticFieldSet()) {
       heap_value = heap_value->AsStaticFieldSet()->GetValue();
+    } else if (heap_value->IsVecStore()) {
+      heap_value = heap_value->AsVecStore()->GetValue();
     } else {
       DCHECK(heap_value->IsArraySet());
       heap_value = heap_value->AsArraySet()->GetValue();
@@ -553,10 +561,15 @@
         heap_values_for_[instruction->GetBlock()->GetBlockId()];
     HInstruction* heap_value = heap_values[idx];
     if (heap_value == kDefaultHeapValue) {
-      HInstruction* constant = GetDefaultValue(instruction->GetType());
-      AddRemovedLoad(instruction, constant);
-      heap_values[idx] = constant;
-      return;
+      if (IsDefaultAllowedForLoad(instruction)) {
+        HInstruction* constant = GetDefaultValue(instruction->GetType());
+        AddRemovedLoad(instruction, constant);
+        heap_values[idx] = constant;
+        return;
+      } else {
+        heap_values[idx] = kUnknownHeapValue;
+        heap_value = kUnknownHeapValue;
+      }
     }
     heap_value = GetRealHeapValue(heap_value);
     if (heap_value == kUnknownHeapValue) {
@@ -590,6 +603,35 @@
     return false;
   }
 
+  bool CanValueBeKeptIfSameAsNew(HInstruction* value,
+                                 HInstruction* new_value,
+                                 HInstruction* new_value_set_instr) {
+    // For field/array set location operations, if the value is the same as the new_value
+    // it can be kept even if aliasing happens. All aliased operations will access the same memory
+    // range.
+    // For vector values, this is not true. For example:
+    //  packed_data = [0xA, 0xB, 0xC, 0xD];            <-- Different values in each lane.
+    //  VecStore array[i  ,i+1,i+2,i+3] = packed_data;
+    //  VecStore array[i+1,i+2,i+3,i+4] = packed_data; <-- We are here (partial overlap).
+    //  VecLoad  vx = array[i,i+1,i+2,i+3];            <-- Cannot be eliminated because the value
+    //                                                     here is not packed_data anymore.
+    //
+    // TODO: to allow such 'same value' optimization on vector data,
+    // LSA needs to report more fine-grain MAY alias information:
+    // (1) May alias due to two vector data partial overlap.
+    //     e.g. a[i..i+3] and a[i+1,..,i+4].
+    // (2) May alias due to two vector data may complete overlap each other.
+    //     e.g. a[i..i+3] and b[i..i+3].
+    // (3) May alias but the exact relationship between two locations is unknown.
+    //     e.g. a[i..i+3] and b[j..j+3], where values of a,b,i,j are all unknown.
+    // This 'same value' optimization can apply only on case (2).
+    if (new_value_set_instr->IsVecOperation()) {
+      return false;
+    }
+
+    return Equal(value, new_value);
+  }
+
   void VisitSetLocation(HInstruction* instruction, size_t idx, HInstruction* value) {
     DCHECK_NE(idx, HeapLocationCollector::kHeapLocationNotFound);
     DCHECK(!IsStore(value)) << value->DebugName();
@@ -636,23 +678,16 @@
 
     // This store may kill values in other heap locations due to aliasing.
     for (size_t i = 0; i < heap_values.size(); i++) {
-      if (i == idx) {
+      if (i == idx ||
+          heap_values[i] == kUnknownHeapValue ||
+          CanValueBeKeptIfSameAsNew(heap_values[i], value, instruction) ||
+          !heap_location_collector_.MayAlias(i, idx)) {
         continue;
       }
-      if (Equal(heap_values[i], value)) {
-        // Same value should be kept even if aliasing happens.
-        continue;
-      }
-      if (heap_values[i] == kUnknownHeapValue) {
-        // Value is already unknown, no need for aliasing check.
-        continue;
-      }
-      if (heap_location_collector_.MayAlias(i, idx)) {
-        // Kill heap locations that may alias and as a result if the heap value
-        // is a store, the store needs to be kept.
-        KeepIfIsStore(heap_values[i]);
-        heap_values[i] = kUnknownHeapValue;
-      }
+      // Kill heap locations that may alias and as a result if the heap value
+      // is a store, the store needs to be kept.
+      KeepIfIsStore(heap_values[i]);
+      heap_values[i] = kUnknownHeapValue;
     }
   }
 
@@ -689,7 +724,16 @@
 
   void VisitArraySet(HArraySet* instruction) override {
     size_t idx = heap_location_collector_.GetArrayHeapLocation(instruction);
-    VisitSetLocation(instruction, idx, instruction->InputAt(2));
+    VisitSetLocation(instruction, idx, instruction->GetValue());
+  }
+
+  void VisitVecLoad(HVecLoad* instruction) override {
+    VisitGetLocation(instruction, heap_location_collector_.GetArrayHeapLocation(instruction));
+  }
+
+  void VisitVecStore(HVecStore* instruction) override {
+    size_t idx = heap_location_collector_.GetArrayHeapLocation(instruction);
+    VisitSetLocation(instruction, idx, instruction->GetValue());
   }
 
   void VisitDeoptimize(HDeoptimize* instruction) override {
@@ -892,11 +936,6 @@
     return false;
   }
 
-  // TODO: analyze VecLoad/VecStore better.
-  if (graph_->HasSIMD()) {
-    return false;
-  }
-
   LSEVisitor lse_visitor(graph_, heap_location_collector, side_effects_, stats_);
   for (HBasicBlock* block : graph_->GetReversePostOrder()) {
     lse_visitor.VisitBasicBlock(block);
diff --git a/compiler/optimizing/load_store_elimination_test.cc b/compiler/optimizing/load_store_elimination_test.cc
new file mode 100644
index 0000000..7380378
--- /dev/null
+++ b/compiler/optimizing/load_store_elimination_test.cc
@@ -0,0 +1,893 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <tuple>
+
+#include "load_store_analysis.h"
+#include "load_store_elimination.h"
+#include "nodes.h"
+#include "optimizing_unit_test.h"
+#include "side_effects_analysis.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+class LoadStoreEliminationTest : public ImprovedOptimizingUnitTest {
+ public:
+  void PerformLSE() {
+    graph_->BuildDominatorTree();
+    SideEffectsAnalysis side_effects(graph_);
+    side_effects.Run();
+    LoadStoreAnalysis lsa(graph_);
+    lsa.Run();
+    LoadStoreElimination lse(graph_, side_effects, lsa, nullptr);
+    lse.Run();
+    EXPECT_TRUE(CheckGraphSkipRefTypeInfoChecks());
+  }
+
+  // Create instructions shared among tests.
+  void CreateEntryBlockInstructions() {
+    HInstruction* c1 = graph_->GetIntConstant(1);
+    HInstruction* c4 = graph_->GetIntConstant(4);
+    i_add1_ = new (GetAllocator()) HAdd(DataType::Type::kInt32, i_, c1);
+    i_add4_ = new (GetAllocator()) HAdd(DataType::Type::kInt32, i_, c4);
+    entry_block_->AddInstruction(i_add1_);
+    entry_block_->AddInstruction(i_add4_);
+    entry_block_->AddInstruction(new (GetAllocator()) HGoto());
+  }
+
+  // Create the major CFG used by tests:
+  //    entry
+  //      |
+  //  pre_header
+  //      |
+  //    loop[]
+  //      |
+  //   return
+  //      |
+  //     exit
+  void CreateTestControlFlowGraph() {
+    pre_header_ = new (GetAllocator()) HBasicBlock(graph_);
+    loop_ = new (GetAllocator()) HBasicBlock(graph_);
+
+    graph_->AddBlock(pre_header_);
+    graph_->AddBlock(loop_);
+
+    entry_block_->ReplaceSuccessor(return_block_, pre_header_);
+    pre_header_->AddSuccessor(loop_);
+    loop_->AddSuccessor(loop_);
+    loop_->AddSuccessor(return_block_);
+
+    HInstruction* c0 = graph_->GetIntConstant(0);
+    HInstruction* c1 = graph_->GetIntConstant(1);
+    HInstruction* c128 = graph_->GetIntConstant(128);
+
+    CreateEntryBlockInstructions();
+
+    // pre_header block
+    //   phi = 0;
+    phi_ = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32);
+    loop_->AddPhi(phi_);
+    pre_header_->AddInstruction(new (GetAllocator()) HGoto());
+    phi_->AddInput(c0);
+
+    // loop block:
+    //   suspend_check
+    //   phi++;
+    //   if (phi >= 128)
+    suspend_check_ = new (GetAllocator()) HSuspendCheck();
+    HInstruction* inc_phi = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi_, c1);
+    HInstruction* cmp = new (GetAllocator()) HGreaterThanOrEqual(phi_, c128);
+    HInstruction* hif = new (GetAllocator()) HIf(cmp);
+    loop_->AddInstruction(suspend_check_);
+    loop_->AddInstruction(inc_phi);
+    loop_->AddInstruction(cmp);
+    loop_->AddInstruction(hif);
+    phi_->AddInput(inc_phi);
+
+    CreateEnvForSuspendCheck();
+  }
+
+  void CreateEnvForSuspendCheck() {
+    ArenaVector<HInstruction*> current_locals({array_, i_, j_},
+                                              GetAllocator()->Adapter(kArenaAllocInstruction));
+    ManuallyBuildEnvFor(suspend_check_, &current_locals);
+  }
+
+  // Create the diamond-shaped CFG:
+  //      upper
+  //      /   \
+  //    left  right
+  //      \   /
+  //      down
+  //
+  // Return: the basic blocks forming the CFG in the following order {upper, left, right, down}.
+  std::tuple<HBasicBlock*, HBasicBlock*, HBasicBlock*, HBasicBlock*> CreateDiamondShapedCFG() {
+    CreateEntryBlockInstructions();
+
+    HBasicBlock* upper = new (GetAllocator()) HBasicBlock(graph_);
+    HBasicBlock* left = new (GetAllocator()) HBasicBlock(graph_);
+    HBasicBlock* right = new (GetAllocator()) HBasicBlock(graph_);
+
+    graph_->AddBlock(upper);
+    graph_->AddBlock(left);
+    graph_->AddBlock(right);
+
+    entry_block_->ReplaceSuccessor(return_block_, upper);
+    upper->AddSuccessor(left);
+    upper->AddSuccessor(right);
+    left->AddSuccessor(return_block_);
+    right->AddSuccessor(return_block_);
+
+    HInstruction* cmp = new (GetAllocator()) HGreaterThanOrEqual(i_, j_);
+    HInstruction* hif = new (GetAllocator()) HIf(cmp);
+    upper->AddInstruction(cmp);
+    upper->AddInstruction(hif);
+
+    left->AddInstruction(new (GetAllocator()) HGoto());
+    right->AddInstruction(new (GetAllocator()) HGoto());
+
+    return std::make_tuple(upper, left, right, return_block_);
+  }
+
+  // Add a HVecLoad instruction to the end of the provided basic block.
+  //
+  // Return: the created HVecLoad instruction.
+  HInstruction* AddVecLoad(HBasicBlock* block, HInstruction* array, HInstruction* index) {
+    DCHECK(block != nullptr);
+    DCHECK(array != nullptr);
+    DCHECK(index != nullptr);
+    HInstruction* vload = new (GetAllocator()) HVecLoad(
+        GetAllocator(),
+        array,
+        index,
+        DataType::Type::kInt32,
+        SideEffects::ArrayReadOfType(DataType::Type::kInt32),
+        4,
+        /*is_string_char_at*/ false,
+        kNoDexPc);
+    block->InsertInstructionBefore(vload, block->GetLastInstruction());
+    return vload;
+  }
+
+  // Add a HVecStore instruction to the end of the provided basic block.
+  // If no vdata is specified, generate HVecStore: array[index] = [1,1,1,1].
+  //
+  // Return: the created HVecStore instruction.
+  HInstruction* AddVecStore(HBasicBlock* block,
+                            HInstruction* array,
+                            HInstruction* index,
+                            HInstruction* vdata = nullptr) {
+    DCHECK(block != nullptr);
+    DCHECK(array != nullptr);
+    DCHECK(index != nullptr);
+    if (vdata == nullptr) {
+      HInstruction* c1 = graph_->GetIntConstant(1);
+      vdata = new (GetAllocator()) HVecReplicateScalar(GetAllocator(),
+                                                       c1,
+                                                       DataType::Type::kInt32,
+                                                       4,
+                                                       kNoDexPc);
+      block->InsertInstructionBefore(vdata, block->GetLastInstruction());
+    }
+    HInstruction* vstore = new (GetAllocator()) HVecStore(
+        GetAllocator(),
+        array,
+        index,
+        vdata,
+        DataType::Type::kInt32,
+        SideEffects::ArrayWriteOfType(DataType::Type::kInt32),
+        4,
+        kNoDexPc);
+    block->InsertInstructionBefore(vstore, block->GetLastInstruction());
+    return vstore;
+  }
+
+  // Add a HArrayGet instruction to the end of the provided basic block.
+  //
+  // Return: the created HArrayGet instruction.
+  HInstruction* AddArrayGet(HBasicBlock* block, HInstruction* array, HInstruction* index) {
+    DCHECK(block != nullptr);
+    DCHECK(array != nullptr);
+    DCHECK(index != nullptr);
+    HInstruction* get = new (GetAllocator()) HArrayGet(array, index, DataType::Type::kInt32, 0);
+    block->InsertInstructionBefore(get, block->GetLastInstruction());
+    return get;
+  }
+
+  // Add a HArraySet instruction to the end of the provided basic block.
+  // If no data is specified, generate HArraySet: array[index] = 1.
+  //
+  // Return: the created HArraySet instruction.
+  HInstruction* AddArraySet(HBasicBlock* block,
+                            HInstruction* array,
+                            HInstruction* index,
+                            HInstruction* data = nullptr) {
+    DCHECK(block != nullptr);
+    DCHECK(array != nullptr);
+    DCHECK(index != nullptr);
+    if (data == nullptr) {
+      data = graph_->GetIntConstant(1);
+    }
+    HInstruction* store = new (GetAllocator()) HArraySet(array,
+                                                         index,
+                                                         data,
+                                                         DataType::Type::kInt32,
+                                                         0);
+    block->InsertInstructionBefore(store, block->GetLastInstruction());
+    return store;
+  }
+
+  void CreateParameters() override {
+    parameters_.push_back(new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+                                                               dex::TypeIndex(0),
+                                                               0,
+                                                               DataType::Type::kInt32));
+    array_ = parameters_.back();
+    parameters_.push_back(new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+                                                               dex::TypeIndex(1),
+                                                               1,
+                                                               DataType::Type::kInt32));
+    i_ = parameters_.back();
+    parameters_.push_back(new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+                                                               dex::TypeIndex(1),
+                                                               2,
+                                                               DataType::Type::kInt32));
+    j_ = parameters_.back();
+  }
+
+  HBasicBlock* pre_header_;
+  HBasicBlock* loop_;
+
+  HInstruction* array_;
+  HInstruction* i_;
+  HInstruction* j_;
+  HInstruction* i_add1_;
+  HInstruction* i_add4_;
+  HInstruction* suspend_check_;
+
+  HPhi* phi_;
+};
+
+TEST_F(LoadStoreEliminationTest, ArrayGetSetElimination) {
+  InitGraph();
+  CreateTestControlFlowGraph();
+
+  HInstruction* c1 = graph_->GetIntConstant(1);
+  HInstruction* c2 = graph_->GetIntConstant(2);
+  HInstruction* c3 = graph_->GetIntConstant(3);
+
+  // array[1] = 1;
+  // x = array[1];  <--- Remove.
+  // y = array[2];
+  // array[1] = 1;  <--- Remove, since it stores same value.
+  // array[i] = 3;  <--- MAY alias.
+  // array[1] = 1;  <--- Cannot remove, even if it stores the same value.
+  AddArraySet(entry_block_, array_, c1, c1);
+  HInstruction* load1 = AddArrayGet(entry_block_, array_, c1);
+  HInstruction* load2 = AddArrayGet(entry_block_, array_, c2);
+  HInstruction* store1 = AddArraySet(entry_block_, array_, c1, c1);
+  AddArraySet(entry_block_, array_, i_, c3);
+  HInstruction* store2 = AddArraySet(entry_block_, array_, c1, c1);
+
+  PerformLSE();
+
+  ASSERT_TRUE(IsRemoved(load1));
+  ASSERT_FALSE(IsRemoved(load2));
+  ASSERT_TRUE(IsRemoved(store1));
+  ASSERT_FALSE(IsRemoved(store2));
+}
+
+TEST_F(LoadStoreEliminationTest, SameHeapValue1) {
+  InitGraph();
+  CreateTestControlFlowGraph();
+
+  HInstruction* c1 = graph_->GetIntConstant(1);
+  HInstruction* c2 = graph_->GetIntConstant(2);
+
+  // Test LSE handling same value stores on array.
+  // array[1] = 1;
+  // array[2] = 1;
+  // array[1] = 1;  <--- Can remove.
+  // array[1] = 2;  <--- Can NOT remove.
+  AddArraySet(entry_block_, array_, c1, c1);
+  AddArraySet(entry_block_, array_, c2, c1);
+  HInstruction* store1 = AddArraySet(entry_block_, array_, c1, c1);
+  HInstruction* store2 = AddArraySet(entry_block_, array_, c1, c2);
+
+  PerformLSE();
+
+  ASSERT_TRUE(IsRemoved(store1));
+  ASSERT_FALSE(IsRemoved(store2));
+}
+
+TEST_F(LoadStoreEliminationTest, SameHeapValue2) {
+  InitGraph();
+  CreateTestControlFlowGraph();
+
+  // Test LSE handling same value stores on vector.
+  // vdata = [0x1, 0x2, 0x3, 0x4, ...]
+  // VecStore array[i...] = vdata;
+  // VecStore array[j...] = vdata;  <--- MAY ALIAS.
+  // VecStore array[i...] = vdata;  <--- Cannot Remove, even if it's same value.
+  AddVecStore(entry_block_, array_, i_);
+  AddVecStore(entry_block_, array_, j_);
+  HInstruction* vstore = AddVecStore(entry_block_, array_, i_);
+
+  PerformLSE();
+
+  ASSERT_FALSE(IsRemoved(vstore));
+}
+
+TEST_F(LoadStoreEliminationTest, SameHeapValue3) {
+  InitGraph();
+  CreateTestControlFlowGraph();
+
+  // VecStore array[i...] = vdata;
+  // VecStore array[i+1...] = vdata;  <--- MAY alias due to partial overlap.
+  // VecStore array[i...] = vdata;    <--- Cannot remove, even if it's same value.
+  AddVecStore(entry_block_, array_, i_);
+  AddVecStore(entry_block_, array_, i_add1_);
+  HInstruction* vstore = AddVecStore(entry_block_, array_, i_);
+
+  PerformLSE();
+
+  ASSERT_FALSE(IsRemoved(vstore));
+}
+
+TEST_F(LoadStoreEliminationTest, OverlappingLoadStore) {
+  InitGraph();
+  CreateTestControlFlowGraph();
+
+  HInstruction* c1 = graph_->GetIntConstant(1);
+
+  // Test LSE handling array LSE when there is vector store in between.
+  // a[i] = 1;
+  // .. = a[i];                <-- Remove.
+  // a[i,i+1,i+2,i+3] = data;  <-- PARTIAL OVERLAP !
+  // .. = a[i];                <-- Cannot remove.
+  AddArraySet(entry_block_, array_, i_, c1);
+  HInstruction* load1 = AddArrayGet(entry_block_, array_, i_);
+  AddVecStore(entry_block_, array_, i_);
+  HInstruction* load2 = AddArrayGet(entry_block_, array_, i_);
+
+  // Test LSE handling vector load/store partial overlap.
+  // a[i,i+1,i+2,i+3] = data;
+  // a[i+4,i+5,i+6,i+7] = data;
+  // .. = a[i,i+1,i+2,i+3];
+  // .. = a[i+4,i+5,i+6,i+7];
+  // a[i+1,i+2,i+3,i+4] = data;  <-- PARTIAL OVERLAP !
+  // .. = a[i,i+1,i+2,i+3];
+  // .. = a[i+4,i+5,i+6,i+7];
+  AddVecStore(entry_block_, array_, i_);
+  AddVecStore(entry_block_, array_, i_add4_);
+  HInstruction* vload1 = AddVecLoad(entry_block_, array_, i_);
+  HInstruction* vload2 = AddVecLoad(entry_block_, array_, i_add4_);
+  AddVecStore(entry_block_, array_, i_add1_);
+  HInstruction* vload3 = AddVecLoad(entry_block_, array_, i_);
+  HInstruction* vload4 = AddVecLoad(entry_block_, array_, i_add4_);
+
+  // Test LSE handling vector LSE when there is array store in between.
+  // a[i,i+1,i+2,i+3] = data;
+  // a[i+1] = 1;                 <-- PARTIAL OVERLAP !
+  // .. = a[i,i+1,i+2,i+3];
+  AddVecStore(entry_block_, array_, i_);
+  AddArraySet(entry_block_, array_, i_, c1);
+  HInstruction* vload5 = AddVecLoad(entry_block_, array_, i_);
+
+  PerformLSE();
+
+  ASSERT_TRUE(IsRemoved(load1));
+  ASSERT_FALSE(IsRemoved(load2));
+
+  ASSERT_TRUE(IsRemoved(vload1));
+  ASSERT_TRUE(IsRemoved(vload2));
+  ASSERT_FALSE(IsRemoved(vload3));
+  ASSERT_FALSE(IsRemoved(vload4));
+
+  ASSERT_FALSE(IsRemoved(vload5));
+}
+// function (int[] a, int j) {
+// a[j] = 1;
+// for (int i=0; i<128; i++) {
+//    /* doesn't do any write */
+// }
+// a[j] = 1;
+TEST_F(LoadStoreEliminationTest, StoreAfterLoopWithoutSideEffects) {
+  InitGraph();
+  CreateTestControlFlowGraph();
+
+  HInstruction* c1 = graph_->GetIntConstant(1);
+
+  // a[j] = 1
+  AddArraySet(pre_header_, array_, j_, c1);
+
+  // LOOP BODY:
+  // .. = a[i,i+1,i+2,i+3];
+  AddVecLoad(loop_, array_, phi_);
+
+  // a[j] = 1;
+  HInstruction* array_set = AddArraySet(return_block_, array_, j_, c1);
+
+  PerformLSE();
+
+  ASSERT_TRUE(IsRemoved(array_set));
+}
+
+// function (int[] a, int j) {
+//   int[] b = new int[128];
+//   a[j] = 0;
+//   for (int phi=0; phi<128; phi++) {
+//     a[phi,phi+1,phi+2,phi+3] = [1,1,1,1];
+//     b[phi,phi+1,phi+2,phi+3] = a[phi,phi+1,phi+2,phi+3];
+//   }
+//   a[j] = 0;
+// }
+TEST_F(LoadStoreEliminationTest, StoreAfterSIMDLoopWithSideEffects) {
+  InitGraph();
+  CreateTestControlFlowGraph();
+
+  HInstruction* c0 = graph_->GetIntConstant(0);
+  HInstruction* c128 = graph_->GetIntConstant(128);
+
+  HInstruction* array_b = new (GetAllocator()) HNewArray(c0, c128, 0, 0);
+  pre_header_->InsertInstructionBefore(array_b, pre_header_->GetLastInstruction());
+  array_b->CopyEnvironmentFrom(suspend_check_->GetEnvironment());
+
+  // a[j] = 0;
+  AddArraySet(pre_header_, array_, j_, c0);
+
+  // LOOP BODY:
+  // a[phi,phi+1,phi+2,phi+3] = [1,1,1,1];
+  // b[phi,phi+1,phi+2,phi+3] = a[phi,phi+1,phi+2,phi+3];
+  AddVecStore(loop_, array_, phi_);
+  HInstruction* vload = AddVecLoad(loop_, array_, phi_);
+  AddVecStore(loop_, array_b, phi_, vload->AsVecLoad());
+
+  // a[j] = 0;
+  HInstruction* a_set = AddArraySet(return_block_, array_, j_, c0);
+
+  PerformLSE();
+
+  ASSERT_TRUE(IsRemoved(vload));
+  ASSERT_FALSE(IsRemoved(a_set));  // Cannot remove due to write side-effect in the loop.
+}
+
+// function (int[] a, int j) {
+//   int[] b = new int[128];
+//   a[j] = 0;
+//   for (int phi=0; phi<128; phi++) {
+//     a[phi,phi+1,phi+2,phi+3] = [1,1,1,1];
+//     b[phi,phi+1,phi+2,phi+3] = a[phi,phi+1,phi+2,phi+3];
+//   }
+//   x = a[j];
+// }
+TEST_F(LoadStoreEliminationTest, LoadAfterSIMDLoopWithSideEffects) {
+  InitGraph();
+  CreateTestControlFlowGraph();
+
+  HInstruction* c0 = graph_->GetIntConstant(0);
+  HInstruction* c128 = graph_->GetIntConstant(128);
+
+  HInstruction* array_b = new (GetAllocator()) HNewArray(c0, c128, 0, 0);
+  pre_header_->InsertInstructionBefore(array_b, pre_header_->GetLastInstruction());
+  array_b->CopyEnvironmentFrom(suspend_check_->GetEnvironment());
+
+  // a[j] = 0;
+  AddArraySet(pre_header_, array_, j_, c0);
+
+  // LOOP BODY:
+  // a[phi,phi+1,phi+2,phi+3] = [1,1,1,1];
+  // b[phi,phi+1,phi+2,phi+3] = a[phi,phi+1,phi+2,phi+3];
+  AddVecStore(loop_, array_, phi_);
+  HInstruction* vload = AddVecLoad(loop_, array_, phi_);
+  AddVecStore(loop_, array_b, phi_, vload->AsVecLoad());
+
+  // x = a[j];
+  HInstruction* load = AddArrayGet(return_block_, array_, j_);
+
+  PerformLSE();
+
+  ASSERT_TRUE(IsRemoved(vload));
+  ASSERT_FALSE(IsRemoved(load));  // Cannot remove due to write side-effect in the loop.
+}
+
+// Check that merging works correctly when there are VecStors in predecessors.
+//
+//                  vstore1: a[i,... i + 3] = [1,...1]
+//                       /          \
+//                      /            \
+// vstore2: a[i,... i + 3] = [1,...1]  vstore3: a[i+1, ... i + 4] = [1, ... 1]
+//                     \              /
+//                      \            /
+//                  vstore4: a[i,... i + 3] = [1,...1]
+//
+// Expected:
+//   'vstore2' is removed.
+//   'vstore3' is not removed.
+//   'vstore4' is not removed. Such cases are not supported at the moment.
+TEST_F(LoadStoreEliminationTest, MergePredecessorVecStores) {
+  InitGraph();
+
+  HBasicBlock* upper;
+  HBasicBlock* left;
+  HBasicBlock* right;
+  HBasicBlock* down;
+  std::tie(upper, left, right, down) = CreateDiamondShapedCFG();
+
+  // upper: a[i,... i + 3] = [1,...1]
+  HInstruction* vstore1 = AddVecStore(upper, array_, i_);
+  HInstruction* vdata = vstore1->InputAt(2);
+
+  // left: a[i,... i + 3] = [1,...1]
+  HInstruction* vstore2 = AddVecStore(left, array_, i_, vdata);
+
+  // right: a[i+1, ... i + 4] = [1, ... 1]
+  HInstruction* vstore3 = AddVecStore(right, array_, i_add1_, vdata);
+
+  // down: a[i,... i + 3] = [1,...1]
+  HInstruction* vstore4 = AddVecStore(down, array_, i_, vdata);
+
+  PerformLSE();
+
+  ASSERT_TRUE(IsRemoved(vstore2));
+  ASSERT_FALSE(IsRemoved(vstore3));
+  ASSERT_FALSE(IsRemoved(vstore4));
+}
+
+// Check that merging works correctly when there are ArraySets in predecessors.
+//
+//          a[i] = 1
+//        /          \
+//       /            \
+// store1: a[i] = 1  store2: a[i+1] = 1
+//       \            /
+//        \          /
+//          store3: a[i] = 1
+//
+// Expected:
+//   'store1' is removed.
+//   'store2' is not removed.
+//   'store3' is removed.
+TEST_F(LoadStoreEliminationTest, MergePredecessorStores) {
+  InitGraph();
+
+  HBasicBlock* upper;
+  HBasicBlock* left;
+  HBasicBlock* right;
+  HBasicBlock* down;
+  std::tie(upper, left, right, down) = CreateDiamondShapedCFG();
+
+  // upper: a[i,... i + 3] = [1,...1]
+  AddArraySet(upper, array_, i_);
+
+  // left: a[i,... i + 3] = [1,...1]
+  HInstruction* store1 = AddArraySet(left, array_, i_);
+
+  // right: a[i+1, ... i + 4] = [1, ... 1]
+  HInstruction* store2 = AddArraySet(right, array_, i_add1_);
+
+  // down: a[i,... i + 3] = [1,...1]
+  HInstruction* store3 = AddArraySet(down, array_, i_);
+
+  PerformLSE();
+
+  ASSERT_TRUE(IsRemoved(store1));
+  ASSERT_FALSE(IsRemoved(store2));
+  ASSERT_TRUE(IsRemoved(store3));
+}
+
+// Check that redundant VStore/VLoad are removed from a SIMD loop.
+//
+//  LOOP BODY
+//     vstore1: a[i,... i + 3] = [1,...1]
+//     vload:   x = a[i,... i + 3]
+//     vstore2: b[i,... i + 3] = x
+//     vstore3: a[i,... i + 3] = [1,...1]
+//
+// Expected:
+//   'vstore1' is not removed.
+//   'vload' is removed.
+//   'vstore3' is removed.
+TEST_F(LoadStoreEliminationTest, RedundantVStoreVLoadInLoop) {
+  InitGraph();
+  CreateTestControlFlowGraph();
+
+  HInstruction* c0 = graph_->GetIntConstant(0);
+  HInstruction* c128 = graph_->GetIntConstant(128);
+
+  HInstruction* array_a = new (GetAllocator()) HNewArray(c0, c128, 0, 0);
+  pre_header_->InsertInstructionBefore(array_a, pre_header_->GetLastInstruction());
+  array_a->CopyEnvironmentFrom(suspend_check_->GetEnvironment());
+
+  HInstruction* array_b = new (GetAllocator()) HNewArray(c0, c128, 0, 0);
+  pre_header_->InsertInstructionBefore(array_b, pre_header_->GetLastInstruction());
+  array_b->CopyEnvironmentFrom(suspend_check_->GetEnvironment());
+
+  // LOOP BODY:
+  //    a[i,... i + 3] = [1,...1]
+  //    x = a[i,... i + 3]
+  //    b[i,... i + 3] = x
+  //    a[i,... i + 3] = [1,...1]
+  HInstruction* vstore1 = AddVecStore(loop_, array_a, phi_);
+  HInstruction* vload = AddVecLoad(loop_, array_a, phi_);
+  AddVecStore(loop_, array_b, phi_, vload->AsVecLoad());
+  HInstruction* vstore3 = AddVecStore(loop_, array_a, phi_, vstore1->InputAt(2));
+
+  PerformLSE();
+
+  ASSERT_FALSE(IsRemoved(vstore1));
+  ASSERT_TRUE(IsRemoved(vload));
+  ASSERT_TRUE(IsRemoved(vstore3));
+}
+
+// Loop write side effects invalidate all stores.
+// This causes stores after such loops not to be removed, even
+// their values are known.
+TEST_F(LoadStoreEliminationTest, StoreAfterLoopWithSideEffects) {
+  InitGraph();
+  CreateTestControlFlowGraph();
+
+  HInstruction* c0 = graph_->GetIntConstant(0);
+  HInstruction* c2 = graph_->GetIntConstant(2);
+  HInstruction* c128 = graph_->GetIntConstant(128);
+
+  // array[0] = 2;
+  // loop:
+  //   b[i] = array[i]
+  // array[0] = 2
+  AddArraySet(entry_block_, array_, c0, c2);
+
+  HInstruction* array_b = new (GetAllocator()) HNewArray(c0, c128, 0, 0);
+  pre_header_->InsertInstructionBefore(array_b, pre_header_->GetLastInstruction());
+  array_b->CopyEnvironmentFrom(suspend_check_->GetEnvironment());
+
+  HInstruction* load = AddArrayGet(loop_, array_, phi_);
+  AddArraySet(loop_, array_b, phi_, load);
+
+  HInstruction* store = AddArraySet(return_block_, array_, c0, c2);
+
+  PerformLSE();
+
+  ASSERT_FALSE(IsRemoved(store));
+}
+
+// As it is not allowed to use defaults for VecLoads, check if there is a new created array
+// a VecLoad used in a loop and after it is not replaced with a default.
+TEST_F(LoadStoreEliminationTest, VLoadDefaultValueInLoopWithoutWriteSideEffects) {
+  InitGraph();
+  CreateTestControlFlowGraph();
+
+  HInstruction* c0 = graph_->GetIntConstant(0);
+  HInstruction* c128 = graph_->GetIntConstant(128);
+
+  HInstruction* array_a = new (GetAllocator()) HNewArray(c0, c128, 0, 0);
+  pre_header_->InsertInstructionBefore(array_a, pre_header_->GetLastInstruction());
+  array_a->CopyEnvironmentFrom(suspend_check_->GetEnvironment());
+
+  // LOOP BODY:
+  //    v = a[i,... i + 3]
+  // array[0,... 3] = v
+  HInstruction* vload = AddVecLoad(loop_, array_a, phi_);
+  HInstruction* vstore = AddVecStore(return_block_, array_, c0, vload->AsVecLoad());
+
+  PerformLSE();
+
+  ASSERT_FALSE(IsRemoved(vload));
+  ASSERT_FALSE(IsRemoved(vstore));
+}
+
+// As it is not allowed to use defaults for VecLoads, check if there is a new created array
+// a VecLoad is not replaced with a default.
+TEST_F(LoadStoreEliminationTest, VLoadDefaultValue) {
+  InitGraph();
+  CreateTestControlFlowGraph();
+
+  HInstruction* c0 = graph_->GetIntConstant(0);
+  HInstruction* c128 = graph_->GetIntConstant(128);
+
+  HInstruction* array_a = new (GetAllocator()) HNewArray(c0, c128, 0, 0);
+  pre_header_->InsertInstructionBefore(array_a, pre_header_->GetLastInstruction());
+  array_a->CopyEnvironmentFrom(suspend_check_->GetEnvironment());
+
+  // v = a[0,... 3]
+  // array[0,... 3] = v
+  HInstruction* vload = AddVecLoad(pre_header_, array_a, c0);
+  HInstruction* vstore = AddVecStore(return_block_, array_, c0, vload->AsVecLoad());
+
+  PerformLSE();
+
+  ASSERT_FALSE(IsRemoved(vload));
+  ASSERT_FALSE(IsRemoved(vstore));
+}
+
+// As it is allowed to use defaults for ordinary loads, check if there is a new created array
+// a load used in a loop and after it is replaced with a default.
+TEST_F(LoadStoreEliminationTest, LoadDefaultValueInLoopWithoutWriteSideEffects) {
+  InitGraph();
+  CreateTestControlFlowGraph();
+
+  HInstruction* c0 = graph_->GetIntConstant(0);
+  HInstruction* c128 = graph_->GetIntConstant(128);
+
+  HInstruction* array_a = new (GetAllocator()) HNewArray(c0, c128, 0, 0);
+  pre_header_->InsertInstructionBefore(array_a, pre_header_->GetLastInstruction());
+  array_a->CopyEnvironmentFrom(suspend_check_->GetEnvironment());
+
+  // LOOP BODY:
+  //    v = a[i]
+  // array[0] = v
+  HInstruction* load = AddArrayGet(loop_, array_a, phi_);
+  HInstruction* store = AddArraySet(return_block_, array_, c0, load);
+
+  PerformLSE();
+
+  ASSERT_TRUE(IsRemoved(load));
+  ASSERT_FALSE(IsRemoved(store));
+}
+
+// As it is allowed to use defaults for ordinary loads, check if there is a new created array
+// a load is replaced with a default.
+TEST_F(LoadStoreEliminationTest, LoadDefaultValue) {
+  InitGraph();
+  CreateTestControlFlowGraph();
+
+  HInstruction* c0 = graph_->GetIntConstant(0);
+  HInstruction* c128 = graph_->GetIntConstant(128);
+
+  HInstruction* array_a = new (GetAllocator()) HNewArray(c0, c128, 0, 0);
+  pre_header_->InsertInstructionBefore(array_a, pre_header_->GetLastInstruction());
+  array_a->CopyEnvironmentFrom(suspend_check_->GetEnvironment());
+
+  // v = a[0]
+  // array[0] = v
+  HInstruction* load = AddArrayGet(pre_header_, array_a, c0);
+  HInstruction* store = AddArraySet(return_block_, array_, c0, load);
+
+  PerformLSE();
+
+  ASSERT_TRUE(IsRemoved(load));
+  ASSERT_FALSE(IsRemoved(store));
+}
+
+// As it is not allowed to use defaults for VecLoads but allowed for regular loads,
+// check if there is a new created array, a VecLoad and a load used in a loop and after it,
+// VecLoad is not replaced with a default but the load is.
+TEST_F(LoadStoreEliminationTest, VLoadAndLoadDefaultValueInLoopWithoutWriteSideEffects) {
+  InitGraph();
+  CreateTestControlFlowGraph();
+
+  HInstruction* c0 = graph_->GetIntConstant(0);
+  HInstruction* c128 = graph_->GetIntConstant(128);
+
+  HInstruction* array_a = new (GetAllocator()) HNewArray(c0, c128, 0, 0);
+  pre_header_->InsertInstructionBefore(array_a, pre_header_->GetLastInstruction());
+  array_a->CopyEnvironmentFrom(suspend_check_->GetEnvironment());
+
+  // LOOP BODY:
+  //    v = a[i,... i + 3]
+  //    v1 = a[i]
+  // array[0,... 3] = v
+  // array[0] = v1
+  HInstruction* vload = AddVecLoad(loop_, array_a, phi_);
+  HInstruction* load = AddArrayGet(loop_, array_a, phi_);
+  HInstruction* vstore = AddVecStore(return_block_, array_, c0, vload->AsVecLoad());
+  HInstruction* store = AddArraySet(return_block_, array_, c0, load);
+
+  PerformLSE();
+
+  ASSERT_FALSE(IsRemoved(vload));
+  ASSERT_TRUE(IsRemoved(load));
+  ASSERT_FALSE(IsRemoved(vstore));
+  ASSERT_FALSE(IsRemoved(store));
+}
+
+// As it is not allowed to use defaults for VecLoads but allowed for regular loads,
+// check if there is a new created array, a VecLoad and a load,
+// VecLoad is not replaced with a default but the load is.
+TEST_F(LoadStoreEliminationTest, VLoadAndLoadDefaultValue) {
+  InitGraph();
+  CreateTestControlFlowGraph();
+
+  HInstruction* c0 = graph_->GetIntConstant(0);
+  HInstruction* c128 = graph_->GetIntConstant(128);
+
+  HInstruction* array_a = new (GetAllocator()) HNewArray(c0, c128, 0, 0);
+  pre_header_->InsertInstructionBefore(array_a, pre_header_->GetLastInstruction());
+  array_a->CopyEnvironmentFrom(suspend_check_->GetEnvironment());
+
+  // v = a[0,... 3]
+  // v1 = a[0]
+  // array[0,... 3] = v
+  // array[0] = v1
+  HInstruction* vload = AddVecLoad(pre_header_, array_a, c0);
+  HInstruction* load = AddArrayGet(pre_header_, array_a, c0);
+  HInstruction* vstore = AddVecStore(return_block_, array_, c0, vload->AsVecLoad());
+  HInstruction* store = AddArraySet(return_block_, array_, c0, load);
+
+  PerformLSE();
+
+  ASSERT_FALSE(IsRemoved(vload));
+  ASSERT_TRUE(IsRemoved(load));
+  ASSERT_FALSE(IsRemoved(vstore));
+  ASSERT_FALSE(IsRemoved(store));
+}
+
+// It is not allowed to use defaults for VecLoads. However it should not prevent from removing
+// loads getting the same value.
+// Check a load getting a known value is eliminated (a loop test case).
+TEST_F(LoadStoreEliminationTest, VLoadDefaultValueAndVLoadInLoopWithoutWriteSideEffects) {
+  InitGraph();
+  CreateTestControlFlowGraph();
+
+  HInstruction* c0 = graph_->GetIntConstant(0);
+  HInstruction* c128 = graph_->GetIntConstant(128);
+
+  HInstruction* array_a = new (GetAllocator()) HNewArray(c0, c128, 0, 0);
+  pre_header_->InsertInstructionBefore(array_a, pre_header_->GetLastInstruction());
+  array_a->CopyEnvironmentFrom(suspend_check_->GetEnvironment());
+
+  // LOOP BODY:
+  //    v = a[i,... i + 3]
+  //    v1 = a[i,... i + 3]
+  // array[0,... 3] = v
+  // array[128,... 131] = v1
+  HInstruction* vload1 = AddVecLoad(loop_, array_a, phi_);
+  HInstruction* vload2 = AddVecLoad(loop_, array_a, phi_);
+  HInstruction* vstore1 = AddVecStore(return_block_, array_, c0, vload1->AsVecLoad());
+  HInstruction* vstore2 = AddVecStore(return_block_, array_, c128, vload2->AsVecLoad());
+
+  PerformLSE();
+
+  ASSERT_FALSE(IsRemoved(vload1));
+  ASSERT_TRUE(IsRemoved(vload2));
+  ASSERT_FALSE(IsRemoved(vstore1));
+  ASSERT_FALSE(IsRemoved(vstore2));
+}
+
+// It is not allowed to use defaults for VecLoads. However it should not prevent from removing
+// loads getting the same value.
+// Check a load getting a known value is eliminated.
+TEST_F(LoadStoreEliminationTest, VLoadDefaultValueAndVLoad) {
+  InitGraph();
+  CreateTestControlFlowGraph();
+
+  HInstruction* c0 = graph_->GetIntConstant(0);
+  HInstruction* c128 = graph_->GetIntConstant(128);
+
+  HInstruction* array_a = new (GetAllocator()) HNewArray(c0, c128, 0, 0);
+  pre_header_->InsertInstructionBefore(array_a, pre_header_->GetLastInstruction());
+  array_a->CopyEnvironmentFrom(suspend_check_->GetEnvironment());
+
+  // v = a[0,... 3]
+  // v1 = a[0,... 3]
+  // array[0,... 3] = v
+  // array[128,... 131] = v1
+  HInstruction* vload1 = AddVecLoad(pre_header_, array_a, c0);
+  HInstruction* vload2 = AddVecLoad(pre_header_, array_a, c0);
+  HInstruction* vstore1 = AddVecStore(return_block_, array_, c0, vload1->AsVecLoad());
+  HInstruction* vstore2 = AddVecStore(return_block_, array_, c128, vload2->AsVecLoad());
+
+  PerformLSE();
+
+  ASSERT_FALSE(IsRemoved(vload1));
+  ASSERT_TRUE(IsRemoved(vload2));
+  ASSERT_FALSE(IsRemoved(vstore1));
+  ASSERT_FALSE(IsRemoved(vstore2));
+}
+
+}  // namespace art
diff --git a/compiler/optimizing/loop_analysis.cc b/compiler/optimizing/loop_analysis.cc
index 2ae3683..7850517 100644
--- a/compiler/optimizing/loop_analysis.cc
+++ b/compiler/optimizing/loop_analysis.cc
@@ -178,12 +178,232 @@
   }
 };
 
+// Custom implementation of loop helper for X86_64 target. Enables heuristics for scalar loop
+// peeling and unrolling and supports SIMD loop unrolling.
+class X86_64LoopHelper : public ArchDefaultLoopHelper {
+  // mapping of machine instruction count for most used IR instructions
+  // Few IRs generate different number of instructions based on input and result type.
+  // We checked top java apps, benchmarks and used the most generated instruction count.
+  uint32_t GetMachineInstructionCount(HInstruction* inst) const {
+    switch (inst->GetKind()) {
+      case HInstruction::InstructionKind::kAbs:
+        return 3;
+      case HInstruction::InstructionKind::kAdd:
+        return 1;
+      case HInstruction::InstructionKind::kAnd:
+        return 1;
+      case HInstruction::InstructionKind::kArrayLength:
+        return 1;
+      case HInstruction::InstructionKind::kArrayGet:
+        return 1;
+      case HInstruction::InstructionKind::kArraySet:
+        return 1;
+      case HInstruction::InstructionKind::kBoundsCheck:
+        return 2;
+      case HInstruction::InstructionKind::kCheckCast:
+        return 9;
+      case HInstruction::InstructionKind::kDiv:
+        return 8;
+      case HInstruction::InstructionKind::kDivZeroCheck:
+        return 2;
+      case HInstruction::InstructionKind::kEqual:
+        return 3;
+      case HInstruction::InstructionKind::kGreaterThan:
+        return 3;
+      case HInstruction::InstructionKind::kGreaterThanOrEqual:
+        return 3;
+      case HInstruction::InstructionKind::kIf:
+        return 2;
+      case HInstruction::InstructionKind::kInstanceFieldGet:
+        return 2;
+      case HInstruction::InstructionKind::kInstanceFieldSet:
+        return 1;
+      case HInstruction::InstructionKind::kLessThan:
+        return 3;
+      case HInstruction::InstructionKind::kLessThanOrEqual:
+        return 3;
+      case HInstruction::InstructionKind::kMax:
+        return 2;
+      case HInstruction::InstructionKind::kMin:
+        return 2;
+      case HInstruction::InstructionKind::kMul:
+        return 1;
+      case HInstruction::InstructionKind::kNotEqual:
+        return 3;
+      case HInstruction::InstructionKind::kOr:
+        return 1;
+      case HInstruction::InstructionKind::kRem:
+        return 11;
+      case HInstruction::InstructionKind::kSelect:
+        return 2;
+      case HInstruction::InstructionKind::kShl:
+        return 1;
+      case HInstruction::InstructionKind::kShr:
+        return 1;
+      case HInstruction::InstructionKind::kSub:
+        return 1;
+      case HInstruction::InstructionKind::kTypeConversion:
+        return 1;
+      case HInstruction::InstructionKind::kUShr:
+        return 1;
+      case HInstruction::InstructionKind::kVecReplicateScalar:
+        return 2;
+      case HInstruction::InstructionKind::kVecExtractScalar:
+       return 1;
+      case HInstruction::InstructionKind::kVecReduce:
+        return 4;
+      case HInstruction::InstructionKind::kVecNeg:
+        return 2;
+      case HInstruction::InstructionKind::kVecAbs:
+        return 4;
+      case HInstruction::InstructionKind::kVecNot:
+        return 3;
+      case HInstruction::InstructionKind::kVecAdd:
+        return 1;
+      case HInstruction::InstructionKind::kVecSub:
+        return 1;
+      case HInstruction::InstructionKind::kVecMul:
+        return 1;
+      case HInstruction::InstructionKind::kVecDiv:
+        return 1;
+      case HInstruction::InstructionKind::kVecMax:
+        return 1;
+      case HInstruction::InstructionKind::kVecMin:
+        return 1;
+      case HInstruction::InstructionKind::kVecOr:
+        return 1;
+      case HInstruction::InstructionKind::kVecXor:
+        return 1;
+      case HInstruction::InstructionKind::kVecShl:
+        return 1;
+      case HInstruction::InstructionKind::kVecShr:
+        return 1;
+      case HInstruction::InstructionKind::kVecLoad:
+        return 1;
+      case HInstruction::InstructionKind::kVecStore:
+        return 1;
+      case HInstruction::InstructionKind::kXor:
+        return 1;
+      default:
+        return 1;
+    }
+  }
+
+  // Maximum possible unrolling factor.
+  static constexpr uint32_t kX86_64MaxUnrollFactor = 2;  // pow(2,2) = 4
+
+  // According to Intel® 64 and IA-32 Architectures Optimization Reference Manual,
+  // avoid excessive loop unrolling to ensure LSD (loop stream decoder) is operating efficiently.
+  // This variable takes care that unrolled loop instructions should not exceed LSD size.
+  // For Intel Atom processors (silvermont & goldmont), LSD size is 28
+  // TODO - identify architecture and LSD size at runtime
+  static constexpr uint32_t kX86_64UnrolledMaxBodySizeInstr = 28;
+
+  // Loop's maximum basic block count. Loops with higher count will not be partial
+  // unrolled (unknown iterations).
+  static constexpr uint32_t kX86_64UnknownIterMaxBodySizeBlocks = 2;
+
+  uint32_t GetUnrollingFactor(HLoopInformation* loop_info, HBasicBlock* header) const;
+
+ public:
+  uint32_t GetSIMDUnrollingFactor(HBasicBlock* block,
+                                  int64_t trip_count,
+                                  uint32_t max_peel,
+                                  uint32_t vector_length) const override {
+    DCHECK_NE(vector_length, 0u);
+    HLoopInformation* loop_info = block->GetLoopInformation();
+    DCHECK(loop_info);
+    HBasicBlock* header = loop_info->GetHeader();
+    DCHECK(header);
+    uint32_t unroll_factor = 0;
+
+    if ((trip_count == 0) || (trip_count == LoopAnalysisInfo::kUnknownTripCount)) {
+      // Don't unroll for large loop body size.
+      unroll_factor = GetUnrollingFactor(loop_info, header);
+      if (unroll_factor <= 1) {
+        return LoopAnalysisInfo::kNoUnrollingFactor;
+      }
+    } else {
+      // Don't unroll with insufficient iterations.
+      if (trip_count < (2 * vector_length + max_peel)) {
+        return LoopAnalysisInfo::kNoUnrollingFactor;
+      }
+
+      // Don't unroll for large loop body size.
+      uint32_t unroll_cnt = GetUnrollingFactor(loop_info, header);
+      if (unroll_cnt <= 1) {
+        return LoopAnalysisInfo::kNoUnrollingFactor;
+      }
+
+      // Find a beneficial unroll factor with the following restrictions:
+      //  - At least one iteration of the transformed loop should be executed.
+      //  - The loop body shouldn't be "too big" (heuristic).
+      uint32_t uf2 = (trip_count - max_peel) / vector_length;
+      unroll_factor = TruncToPowerOfTwo(std::min(uf2, unroll_cnt));
+      DCHECK_GE(unroll_factor, 1u);
+    }
+
+    return unroll_factor;
+  }
+};
+
+uint32_t X86_64LoopHelper::GetUnrollingFactor(HLoopInformation* loop_info,
+                                              HBasicBlock* header) const {
+  uint32_t num_inst = 0, num_inst_header = 0, num_inst_loop_body = 0;
+  for (HBlocksInLoopIterator it(*loop_info); !it.Done(); it.Advance()) {
+    HBasicBlock* block = it.Current();
+    DCHECK(block);
+    num_inst = 0;
+
+    for (HInstructionIterator it1(block->GetInstructions()); !it1.Done(); it1.Advance()) {
+      HInstruction* inst = it1.Current();
+      DCHECK(inst);
+
+      // SuspendCheck inside loop is handled with Goto.
+      // Ignoring SuspendCheck & Goto as partially unrolled loop body will have only one Goto.
+      // Instruction count for Goto is being handled during unroll factor calculation below.
+      if (inst->IsSuspendCheck() || inst->IsGoto()) {
+        continue;
+      }
+
+      num_inst += GetMachineInstructionCount(inst);
+    }
+
+    if (block == header) {
+      num_inst_header = num_inst;
+    } else {
+      num_inst_loop_body += num_inst;
+    }
+  }
+
+  // Calculate actual unroll factor.
+  uint32_t unrolling_factor = kX86_64MaxUnrollFactor;
+  uint32_t unrolling_inst = kX86_64UnrolledMaxBodySizeInstr;
+  // "-3" for one Goto instruction.
+  uint32_t desired_size = unrolling_inst - num_inst_header - 3;
+  if (desired_size < (2 * num_inst_loop_body)) {
+    return 1;
+  }
+
+  while (unrolling_factor > 0) {
+    if ((desired_size >> unrolling_factor) >= num_inst_loop_body) {
+      break;
+    }
+    unrolling_factor--;
+  }
+
+  return (1 << unrolling_factor);
+}
+
 ArchNoOptsLoopHelper* ArchNoOptsLoopHelper::Create(InstructionSet isa,
                                                    ArenaAllocator* allocator) {
   switch (isa) {
     case InstructionSet::kArm64: {
       return new (allocator) Arm64LoopHelper;
     }
+    case InstructionSet::kX86_64: {
+      return new (allocator) X86_64LoopHelper;
+    }
     default: {
       return new (allocator) ArchDefaultLoopHelper;
     }
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index 6c76ab8..5784707 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -19,8 +19,6 @@
 #include "arch/arm/instruction_set_features_arm.h"
 #include "arch/arm64/instruction_set_features_arm64.h"
 #include "arch/instruction_set.h"
-#include "arch/mips/instruction_set_features_mips.h"
-#include "arch/mips64/instruction_set_features_mips64.h"
 #include "arch/x86/instruction_set_features_x86.h"
 #include "arch/x86_64/instruction_set_features_x86_64.h"
 #include "driver/compiler_options.h"
@@ -351,7 +349,7 @@
 
 // Translates vector operation to reduction kind.
 static HVecReduce::ReductionKind GetReductionKind(HVecOperation* reduction) {
-  if (reduction->IsVecAdd() ||
+  if (reduction->IsVecAdd()  ||
       reduction->IsVecSub() ||
       reduction->IsVecSADAccumulate() ||
       reduction->IsVecDotProd()) {
@@ -763,6 +761,11 @@
   }
   // Vectorize loop, if possible and valid.
   if (kEnableVectorization &&
+      // Disable vectorization for debuggable graphs: this is a workaround for the bug
+      // in 'GenerateNewLoop' which caused the SuspendCheck environment to be invalid.
+      // TODO: b/138601207, investigate other possible cases with wrong environment values and
+      // possibly switch back vectorization on for debuggable graphs.
+      !graph_->IsDebuggable() &&
       TrySetSimpleLoopHeader(header, &main_phi) &&
       ShouldVectorize(node, body, trip_count) &&
       TryAssignLastValue(node->loop_info, main_phi, preheader, /*collect_loop_uses*/ true)) {
@@ -1278,6 +1281,10 @@
   // (3) unit stride index,
   // (4) vectorizable right-hand-side value.
   uint64_t restrictions = kNone;
+  // Don't accept expressions that can throw.
+  if (instruction->CanThrow()) {
+    return false;
+  }
   if (instruction->IsArraySet()) {
     DataType::Type type = instruction->AsArraySet()->GetComponentType();
     HInstruction* base = instruction->InputAt(0);
@@ -1329,7 +1336,8 @@
   }
   // Otherwise accept only expressions with no effects outside the immediate loop-body.
   // Note that actual uses are inspected during right-hand-side tree traversal.
-  return !IsUsedOutsideLoop(node->loop_info, instruction) && !instruction->DoesAnyWrite();
+  return !IsUsedOutsideLoop(node->loop_info, instruction)
+         && !instruction->DoesAnyWrite();
 }
 
 bool HLoopOptimization::VectorizeUse(LoopNode* node,
@@ -1613,13 +1621,19 @@
                              kNoDotProd;
             return TrySetVectorLength(16);
           case DataType::Type::kUint16:
+            *restrictions |= kNoDiv |
+                             kNoAbs |
+                             kNoSignedHAdd |
+                             kNoUnroundedHAdd |
+                             kNoSAD |
+                             kNoDotProd;
+            return TrySetVectorLength(8);
           case DataType::Type::kInt16:
             *restrictions |= kNoDiv |
                              kNoAbs |
                              kNoSignedHAdd |
                              kNoUnroundedHAdd |
-                             kNoSAD|
-                             kNoDotProd;
+                             kNoSAD;
             return TrySetVectorLength(8);
           case DataType::Type::kInt32:
             *restrictions |= kNoDiv | kNoSAD;
@@ -1638,64 +1652,6 @@
         }  // switch type
       }
       return false;
-    case InstructionSet::kMips:
-      if (features->AsMipsInstructionSetFeatures()->HasMsa()) {
-        switch (type) {
-          case DataType::Type::kBool:
-          case DataType::Type::kUint8:
-          case DataType::Type::kInt8:
-            *restrictions |= kNoDiv | kNoDotProd;
-            return TrySetVectorLength(16);
-          case DataType::Type::kUint16:
-          case DataType::Type::kInt16:
-            *restrictions |= kNoDiv | kNoStringCharAt | kNoDotProd;
-            return TrySetVectorLength(8);
-          case DataType::Type::kInt32:
-            *restrictions |= kNoDiv;
-            return TrySetVectorLength(4);
-          case DataType::Type::kInt64:
-            *restrictions |= kNoDiv;
-            return TrySetVectorLength(2);
-          case DataType::Type::kFloat32:
-            *restrictions |= kNoReduction;
-            return TrySetVectorLength(4);
-          case DataType::Type::kFloat64:
-            *restrictions |= kNoReduction;
-            return TrySetVectorLength(2);
-          default:
-            break;
-        }  // switch type
-      }
-      return false;
-    case InstructionSet::kMips64:
-      if (features->AsMips64InstructionSetFeatures()->HasMsa()) {
-        switch (type) {
-          case DataType::Type::kBool:
-          case DataType::Type::kUint8:
-          case DataType::Type::kInt8:
-            *restrictions |= kNoDiv | kNoDotProd;
-            return TrySetVectorLength(16);
-          case DataType::Type::kUint16:
-          case DataType::Type::kInt16:
-            *restrictions |= kNoDiv | kNoStringCharAt | kNoDotProd;
-            return TrySetVectorLength(8);
-          case DataType::Type::kInt32:
-            *restrictions |= kNoDiv;
-            return TrySetVectorLength(4);
-          case DataType::Type::kInt64:
-            *restrictions |= kNoDiv;
-            return TrySetVectorLength(2);
-          case DataType::Type::kFloat32:
-            *restrictions |= kNoReduction;
-            return TrySetVectorLength(4);
-          case DataType::Type::kFloat64:
-            *restrictions |= kNoReduction;
-            return TrySetVectorLength(2);
-          default:
-            break;
-        }  // switch type
-      }
-      return false;
     default:
       return false;
   }  // switch instruction set
@@ -2156,7 +2112,7 @@
                                               bool generate_code,
                                               DataType::Type reduction_type,
                                               uint64_t restrictions) {
-  if (!instruction->IsAdd() || (reduction_type != DataType::Type::kInt32)) {
+  if (!instruction->IsAdd() || reduction_type != DataType::Type::kInt32) {
     return false;
   }
 
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 1940d55..810871c 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1594,7 +1594,7 @@
       case DataType::Type::kInt32:
         if (std::isnan(value))
           return graph->GetIntConstant(0, GetDexPc());
-        if (value >= kPrimIntMax)
+        if (value >= static_cast<float>(kPrimIntMax))
           return graph->GetIntConstant(kPrimIntMax, GetDexPc());
         if (value <= kPrimIntMin)
           return graph->GetIntConstant(kPrimIntMin, GetDexPc());
@@ -1602,7 +1602,7 @@
       case DataType::Type::kInt64:
         if (std::isnan(value))
           return graph->GetLongConstant(0, GetDexPc());
-        if (value >= kPrimLongMax)
+        if (value >= static_cast<float>(kPrimLongMax))
           return graph->GetLongConstant(kPrimLongMax, GetDexPc());
         if (value <= kPrimLongMin)
           return graph->GetLongConstant(kPrimLongMin, GetDexPc());
@@ -1626,7 +1626,7 @@
       case DataType::Type::kInt64:
         if (std::isnan(value))
           return graph->GetLongConstant(0, GetDexPc());
-        if (value >= kPrimLongMax)
+        if (value >= static_cast<double>(kPrimLongMax))
           return graph->GetLongConstant(kPrimLongMax, GetDexPc());
         if (value <= kPrimLongMin)
           return graph->GetLongConstant(kPrimLongMin, GetDexPc());
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index fedad0c..7ed5bca 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -25,6 +25,7 @@
 #include "base/arena_containers.h"
 #include "base/arena_object.h"
 #include "base/array_ref.h"
+#include "base/intrusive_forward_list.h"
 #include "base/iteration_range.h"
 #include "base/mutex.h"
 #include "base/quasi_atomic.h"
@@ -45,7 +46,6 @@
 #include "mirror/class.h"
 #include "mirror/method_type.h"
 #include "offsets.h"
-#include "utils/intrusive_forward_list.h"
 
 namespace art {
 
@@ -131,6 +131,7 @@
   kAnalysisFailThrowCatchLoop,
   kAnalysisFailAmbiguousArrayOp,
   kAnalysisFailIrreducibleLoopAndStringInit,
+  kAnalysisFailPhiEquivalentInOsr,
   kAnalysisSuccess,
 };
 
@@ -320,6 +321,8 @@
          bool dead_reference_safe = false,
          bool debuggable = false,
          bool osr = false,
+         bool is_shared_jit_code = false,
+         bool baseline = false,
          int start_instruction_id = 0)
       : allocator_(allocator),
         arena_stack_(arena_stack),
@@ -334,6 +337,7 @@
         temporaries_vreg_slots_(0),
         has_bounds_checks_(false),
         has_try_catch_(false),
+        has_monitor_operations_(false),
         has_simd_(false),
         has_loops_(false),
         has_irreducible_loops_(false),
@@ -355,7 +359,9 @@
         art_method_(nullptr),
         inexact_object_rti_(ReferenceTypeInfo::CreateInvalid()),
         osr_(osr),
-        cha_single_implementation_list_(allocator->Adapter(kArenaAllocCHA)) {
+        baseline_(baseline),
+        cha_single_implementation_list_(allocator->Adapter(kArenaAllocCHA)),
+        is_shared_jit_code_(is_shared_jit_code) {
     blocks_.reserve(kDefaultNumberOfBlocks);
   }
 
@@ -503,7 +509,7 @@
     return reverse_post_order_;
   }
 
-  ArrayRef<HBasicBlock* const> GetReversePostOrderSkipEntryBlock() {
+  ArrayRef<HBasicBlock* const> GetReversePostOrderSkipEntryBlock() const {
     DCHECK(GetReversePostOrder()[0] == entry_block_);
     return ArrayRef<HBasicBlock* const>(GetReversePostOrder()).SubArray(1);
   }
@@ -585,6 +591,12 @@
 
   bool IsCompilingOsr() const { return osr_; }
 
+  bool IsCompilingBaseline() const { return baseline_; }
+
+  bool IsCompilingForSharedJitCode() const {
+    return is_shared_jit_code_;
+  }
+
   ArenaSet<ArtMethod*>& GetCHASingleImplementationList() {
     return cha_single_implementation_list_;
   }
@@ -600,6 +612,9 @@
   bool HasTryCatch() const { return has_try_catch_; }
   void SetHasTryCatch(bool value) { has_try_catch_ = value; }
 
+  bool HasMonitorOperations() const { return has_monitor_operations_; }
+  void SetHasMonitorOperations(bool value) { has_monitor_operations_ = value; }
+
   bool HasSIMD() const { return has_simd_; }
   void SetHasSIMD(bool value) { has_simd_ = value; }
 
@@ -696,6 +711,10 @@
   // false positives.
   bool has_try_catch_;
 
+  // Flag whether there are any HMonitorOperation in the graph. If yes this will mandate
+  // DexRegisterMap to be present to allow deadlock analysis for non-debuggable code.
+  bool has_monitor_operations_;
+
   // Flag whether SIMD instructions appear in the graph. If true, the
   // code generators may have to be more careful spilling the wider
   // contents of SIMD registers.
@@ -771,9 +790,17 @@
   // compiled code entries which the interpreter can directly jump to.
   const bool osr_;
 
+  // Whether we are compiling baseline (not running optimizations). This affects
+  // the code being generated.
+  const bool baseline_;
+
   // List of methods that are assumed to have single implementation.
   ArenaSet<ArtMethod*> cha_single_implementation_list_;
 
+  // Whether we are JIT compiling in the shared region area, putting
+  // restrictions on, for example, how literals are being generated.
+  bool is_shared_jit_code_;
+
   friend class SsaBuilder;           // For caching constants.
   friend class SsaLivenessAnalysis;  // For the linear order.
   friend class HInliner;             // For the reverse post order.
@@ -1099,7 +1126,7 @@
   }
 
   // Insert `this` between `predecessor` and `successor. This method
-  // preserves the indicies, and will update the first edge found between
+  // preserves the indices, and will update the first edge found between
   // `predecessor` and `successor`.
   void InsertBetween(HBasicBlock* predecessor, HBasicBlock* successor) {
     size_t predecessor_index = successor->GetPredecessorIndexOf(predecessor);
@@ -1438,6 +1465,7 @@
   M(Shr, BinaryOperation)                                               \
   M(StaticFieldGet, Instruction)                                        \
   M(StaticFieldSet, Instruction)                                        \
+  M(StringBuilderAppend, Instruction)                                   \
   M(UnresolvedInstanceFieldGet, Instruction)                            \
   M(UnresolvedInstanceFieldSet, Instruction)                            \
   M(UnresolvedStaticFieldGet, Instruction)                              \
@@ -1497,17 +1525,6 @@
 
 #define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M)
 
-#ifndef ART_ENABLE_CODEGEN_mips
-#define FOR_EACH_CONCRETE_INSTRUCTION_MIPS(M)
-#else
-#define FOR_EACH_CONCRETE_INSTRUCTION_MIPS(M)                           \
-  M(MipsComputeBaseMethodAddress, Instruction)                          \
-  M(MipsPackedSwitch, Instruction)                                      \
-  M(IntermediateArrayAddressIndex, Instruction)
-#endif
-
-#define FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(M)
-
 #ifndef ART_ENABLE_CODEGEN_x86
 #define FOR_EACH_CONCRETE_INSTRUCTION_X86(M)
 #else
@@ -1520,7 +1537,7 @@
 
 #if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
 #define FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(M)                     \
-  M(X86AndNot, Instruction)                                                \
+  M(X86AndNot, Instruction)                                             \
   M(X86MaskOrResetLeastSetBit, Instruction)
 #else
 #define FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(M)
@@ -1533,8 +1550,6 @@
   FOR_EACH_CONCRETE_INSTRUCTION_SHARED(M)                               \
   FOR_EACH_CONCRETE_INSTRUCTION_ARM(M)                                  \
   FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M)                                \
-  FOR_EACH_CONCRETE_INSTRUCTION_MIPS(M)                                 \
-  FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(M)                               \
   FOR_EACH_CONCRETE_INSTRUCTION_X86(M)                                  \
   FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M)                               \
   FOR_EACH_CONCRETE_INSTRUCTION_X86_COMMON(M)
@@ -2137,12 +2152,13 @@
   // If this instruction will do an implicit null check, return the `HNullCheck` associated
   // with it. Otherwise return null.
   HNullCheck* GetImplicitNullCheck() const {
-    // Find the first previous instruction which is not a move.
-    HInstruction* first_prev_not_move = GetPreviousDisregardingMoves();
-    if (first_prev_not_move != nullptr &&
-        first_prev_not_move->IsNullCheck() &&
-        first_prev_not_move->IsEmittedAtUseSite()) {
-      return first_prev_not_move->AsNullCheck();
+    // Go over previous non-move instructions that are emitted at use site.
+    HInstruction* prev_not_move = GetPreviousDisregardingMoves();
+    while (prev_not_move != nullptr && prev_not_move->IsEmittedAtUseSite()) {
+      if (prev_not_move->IsNullCheck()) {
+        return prev_not_move->AsNullCheck();
+      }
+      prev_not_move = prev_not_move->GetPreviousDisregardingMoves();
     }
     return nullptr;
   }
@@ -4775,7 +4791,16 @@
       case Intrinsics::kThreadCurrentThread:
       case Intrinsics::kStringBufferAppend:
       case Intrinsics::kStringBufferToString:
-      case Intrinsics::kStringBuilderAppend:
+      case Intrinsics::kStringBuilderAppendObject:
+      case Intrinsics::kStringBuilderAppendString:
+      case Intrinsics::kStringBuilderAppendCharSequence:
+      case Intrinsics::kStringBuilderAppendCharArray:
+      case Intrinsics::kStringBuilderAppendBoolean:
+      case Intrinsics::kStringBuilderAppendChar:
+      case Intrinsics::kStringBuilderAppendInt:
+      case Intrinsics::kStringBuilderAppendLong:
+      case Intrinsics::kStringBuilderAppendFloat:
+      case Intrinsics::kStringBuilderAppendDouble:
       case Intrinsics::kStringBuilderToString:
         return false;
       default:
@@ -6880,6 +6905,57 @@
   const FieldInfo field_info_;
 };
 
+class HStringBuilderAppend final : public HVariableInputSizeInstruction {
+ public:
+  HStringBuilderAppend(HIntConstant* format,
+                       uint32_t number_of_arguments,
+                       ArenaAllocator* allocator,
+                       uint32_t dex_pc)
+      : HVariableInputSizeInstruction(
+            kStringBuilderAppend,
+            DataType::Type::kReference,
+            // The runtime call may read memory from inputs. It never writes outside
+            // of the newly allocated result object (or newly allocated helper objects).
+            SideEffects::AllReads().Union(SideEffects::CanTriggerGC()),
+            dex_pc,
+            allocator,
+            number_of_arguments + /* format */ 1u,
+            kArenaAllocInvokeInputs) {
+    DCHECK_GE(number_of_arguments, 1u);  // There must be something to append.
+    SetRawInputAt(FormatIndex(), format);
+  }
+
+  void SetArgumentAt(size_t index, HInstruction* argument) {
+    DCHECK_LE(index, GetNumberOfArguments());
+    SetRawInputAt(index, argument);
+  }
+
+  // Return the number of arguments, excluding the format.
+  size_t GetNumberOfArguments() const {
+    DCHECK_GE(InputCount(), 1u);
+    return InputCount() - 1u;
+  }
+
+  size_t FormatIndex() const {
+    return GetNumberOfArguments();
+  }
+
+  HIntConstant* GetFormat() {
+    return InputAt(FormatIndex())->AsIntConstant();
+  }
+
+  bool NeedsEnvironment() const override { return true; }
+
+  bool CanThrow() const override { return true; }
+
+  bool CanBeNull() const override { return false; }
+
+  DECLARE_INSTRUCTION(StringBuilderAppend);
+
+ protected:
+  DEFAULT_COPY_CONSTRUCTOR(StringBuilderAppend);
+};
+
 class HUnresolvedInstanceFieldGet final : public HExpression<1> {
  public:
   HUnresolvedInstanceFieldGet(HInstruction* obj,
@@ -7222,7 +7298,7 @@
   }
 
   static bool CanCallRuntime(TypeCheckKind check_kind) {
-    // Mips currently does runtime calls for any other checks.
+    // TODO: Re-evaluate now that mips codegen has been removed.
     return check_kind != TypeCheckKind::kExactCheck;
   }
 
@@ -7789,9 +7865,6 @@
 #if defined(ART_ENABLE_CODEGEN_arm) || defined(ART_ENABLE_CODEGEN_arm64)
 #include "nodes_shared.h"
 #endif
-#ifdef ART_ENABLE_CODEGEN_mips
-#include "nodes_mips.h"
-#endif
 #if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
 #include "nodes_x86.h"
 #endif
diff --git a/compiler/optimizing/nodes_mips.h b/compiler/optimizing/nodes_mips.h
deleted file mode 100644
index 4993f57..0000000
--- a/compiler/optimizing/nodes_mips.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_OPTIMIZING_NODES_MIPS_H_
-#define ART_COMPILER_OPTIMIZING_NODES_MIPS_H_
-
-namespace art {
-
-// Compute the address of the method for MIPS Constant area support.
-class HMipsComputeBaseMethodAddress : public HExpression<0> {
- public:
-  // Treat the value as an int32_t, but it is really a 32 bit native pointer.
-  HMipsComputeBaseMethodAddress()
-      : HExpression(kMipsComputeBaseMethodAddress,
-                    DataType::Type::kInt32,
-                    SideEffects::None(),
-                    kNoDexPc) {
-  }
-
-  bool CanBeMoved() const override { return true; }
-
-  DECLARE_INSTRUCTION(MipsComputeBaseMethodAddress);
-
- protected:
-  DEFAULT_COPY_CONSTRUCTOR(MipsComputeBaseMethodAddress);
-};
-
-// Mips version of HPackedSwitch that holds a pointer to the base method address.
-class HMipsPackedSwitch final : public HExpression<2> {
- public:
-  HMipsPackedSwitch(int32_t start_value,
-                    int32_t num_entries,
-                    HInstruction* input,
-                    HMipsComputeBaseMethodAddress* method_base,
-                    uint32_t dex_pc)
-    : HExpression(kMipsPackedSwitch, SideEffects::None(), dex_pc),
-      start_value_(start_value),
-      num_entries_(num_entries) {
-    SetRawInputAt(0, input);
-    SetRawInputAt(1, method_base);
-  }
-
-  bool IsControlFlow() const override { return true; }
-
-  int32_t GetStartValue() const { return start_value_; }
-
-  int32_t GetNumEntries() const { return num_entries_; }
-
-  HBasicBlock* GetDefaultBlock() const {
-    // Last entry is the default block.
-    return GetBlock()->GetSuccessors()[num_entries_];
-  }
-
-  DECLARE_INSTRUCTION(MipsPackedSwitch);
-
- protected:
-  DEFAULT_COPY_CONSTRUCTOR(MipsPackedSwitch);
-
- private:
-  const int32_t start_value_;
-  const int32_t num_entries_;
-};
-
-// This instruction computes part of the array access offset (index offset).
-//
-// For array accesses the element address has the following structure:
-// Address = CONST_OFFSET + base_addr + index << ELEM_SHIFT. The address part
-// (index << ELEM_SHIFT) can be shared across array accesses with
-// the same data type and index. For example, in the following loop 5 accesses can share address
-// computation:
-//
-// void foo(int[] a, int[] b, int[] c) {
-//   for (i...) {
-//     a[i] = a[i] + 5;
-//     b[i] = b[i] + c[i];
-//   }
-// }
-//
-// Note: as the instruction doesn't involve base array address into computations it has no side
-// effects.
-class HIntermediateArrayAddressIndex final : public HExpression<2> {
- public:
-  HIntermediateArrayAddressIndex(HInstruction* index, HInstruction* shift, uint32_t dex_pc)
-      : HExpression(kIntermediateArrayAddressIndex,
-                    DataType::Type::kInt32,
-                    SideEffects::None(),
-                    dex_pc) {
-    SetRawInputAt(0, index);
-    SetRawInputAt(1, shift);
-  }
-
-  bool CanBeMoved() const override { return true; }
-  bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const override {
-    return true;
-  }
-  bool IsActualObject() const override { return false; }
-
-  HInstruction* GetIndex() const { return InputAt(0); }
-  HInstruction* GetShift() const { return InputAt(1); }
-
-  DECLARE_INSTRUCTION(IntermediateArrayAddressIndex);
-
- protected:
-  DEFAULT_COPY_CONSTRUCTOR(IntermediateArrayAddressIndex);
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_OPTIMIZING_NODES_MIPS_H_
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index efe4d6b..e817048 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -1155,6 +1155,8 @@
   // A store needs to stay in place.
   bool CanBeMoved() const override { return false; }
 
+  HInstruction* GetValue() const { return InputAt(2); }
+
   DECLARE_INSTRUCTION(VecStore);
 
  protected:
diff --git a/compiler/optimizing/optimization.cc b/compiler/optimizing/optimization.cc
index 8864a12..7024660 100644
--- a/compiler/optimizing/optimization.cc
+++ b/compiler/optimizing/optimization.cc
@@ -22,10 +22,6 @@
 #ifdef ART_ENABLE_CODEGEN_arm64
 #include "instruction_simplifier_arm64.h"
 #endif
-#ifdef ART_ENABLE_CODEGEN_mips
-#include "instruction_simplifier_mips.h"
-#include "pc_relative_fixups_mips.h"
-#endif
 #ifdef ART_ENABLE_CODEGEN_x86
 #include "pc_relative_fixups_x86.h"
 #include "instruction_simplifier_x86.h"
@@ -108,12 +104,6 @@
     case OptimizationPass::kInstructionSimplifierArm64:
       return arm64::InstructionSimplifierArm64::kInstructionSimplifierArm64PassName;
 #endif
-#ifdef ART_ENABLE_CODEGEN_mips
-    case OptimizationPass::kPcRelativeFixupsMips:
-      return mips::PcRelativeFixups::kPcRelativeFixupsMipsPassName;
-    case OptimizationPass::kInstructionSimplifierMips:
-      return mips::InstructionSimplifierMips::kInstructionSimplifierMipsPassName;
-#endif
 #ifdef ART_ENABLE_CODEGEN_x86
     case OptimizationPass::kPcRelativeFixupsX86:
       return x86::PcRelativeFixups::kPcRelativeFixupsX86PassName;
@@ -160,10 +150,6 @@
 #ifdef ART_ENABLE_CODEGEN_arm64
   X(OptimizationPass::kInstructionSimplifierArm64);
 #endif
-#ifdef ART_ENABLE_CODEGEN_mips
-  X(OptimizationPass::kPcRelativeFixupsMips);
-  X(OptimizationPass::kInstructionSimplifierMips);
-#endif
 #ifdef ART_ENABLE_CODEGEN_x86
   X(OptimizationPass::kPcRelativeFixupsX86);
   X(OptimizationPass::kX86MemoryOperandGeneration);
@@ -300,16 +286,6 @@
         opt = new (allocator) arm64::InstructionSimplifierArm64(graph, stats);
         break;
 #endif
-#ifdef ART_ENABLE_CODEGEN_mips
-      case OptimizationPass::kPcRelativeFixupsMips:
-        DCHECK(alt_name == nullptr) << "arch-specific pass does not support alternative name";
-        opt = new (allocator) mips::PcRelativeFixups(graph, codegen, stats);
-        break;
-      case OptimizationPass::kInstructionSimplifierMips:
-        DCHECK(alt_name == nullptr) << "arch-specific pass does not support alternative name";
-        opt = new (allocator) mips::InstructionSimplifierMips(graph, codegen, stats);
-        break;
-#endif
 #ifdef ART_ENABLE_CODEGEN_x86
       case OptimizationPass::kPcRelativeFixupsX86:
         DCHECK(alt_name == nullptr) << "arch-specific pass does not support alternative name";
diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h
index b84e038..f4777ad 100644
--- a/compiler/optimizing/optimization.h
+++ b/compiler/optimizing/optimization.h
@@ -89,10 +89,6 @@
 #ifdef ART_ENABLE_CODEGEN_arm64
   kInstructionSimplifierArm64,
 #endif
-#ifdef ART_ENABLE_CODEGEN_mips
-  kPcRelativeFixupsMips,
-  kInstructionSimplifierMips,
-#endif
 #ifdef ART_ENABLE_CODEGEN_x86
   kPcRelativeFixupsX86,
   kInstructionSimplifierX86,
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index a52031c..bad540e 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -27,8 +27,6 @@
 #include "read_barrier_config.h"
 #include "utils/arm/assembler_arm_vixl.h"
 #include "utils/assembler.h"
-#include "utils/mips/assembler_mips.h"
-#include "utils/mips64/assembler_mips64.h"
 
 #include "optimizing/optimizing_cfi_test_expected.inc"
 
@@ -182,14 +180,6 @@
 TEST_ISA(kX86_64)
 #endif
 
-#ifdef ART_ENABLE_CODEGEN_mips
-TEST_ISA(kMips)
-#endif
-
-#ifdef ART_ENABLE_CODEGEN_mips64
-TEST_ISA(kMips64)
-#endif
-
 #ifdef ART_ENABLE_CODEGEN_arm
 TEST_F(OptimizingCFITest, kThumb2Adjust) {
   using vixl32::r0;
@@ -215,66 +205,6 @@
 }
 #endif
 
-#ifdef ART_ENABLE_CODEGEN_mips
-TEST_F(OptimizingCFITest, kMipsAdjust) {
-  // One NOP in delay slot, 1 << 15 NOPS have size 1 << 17 which exceeds 18-bit signed maximum.
-  static constexpr size_t kNumNops = 1u + (1u << 15);
-  std::vector<uint8_t> expected_asm(
-      expected_asm_kMips_adjust_head,
-      expected_asm_kMips_adjust_head + arraysize(expected_asm_kMips_adjust_head));
-  expected_asm.resize(expected_asm.size() + kNumNops * 4u, 0u);
-  expected_asm.insert(
-      expected_asm.end(),
-      expected_asm_kMips_adjust_tail,
-      expected_asm_kMips_adjust_tail + arraysize(expected_asm_kMips_adjust_tail));
-  std::vector<uint8_t> expected_cfi(
-      expected_cfi_kMips_adjust,
-      expected_cfi_kMips_adjust + arraysize(expected_cfi_kMips_adjust));
-  SetUpFrame(InstructionSet::kMips);
-#define __ down_cast<mips::MipsAssembler*>(GetCodeGenerator()->GetAssembler())->
-  mips::MipsLabel target;
-  __ Beqz(mips::A0, &target);
-  // Push the target out of range of BEQZ.
-  for (size_t i = 0; i != kNumNops; ++i) {
-    __ Nop();
-  }
-  __ Bind(&target);
-#undef __
-  Finish();
-  Check(InstructionSet::kMips, "kMips_adjust", expected_asm, expected_cfi);
-}
-#endif
-
-#ifdef ART_ENABLE_CODEGEN_mips64
-TEST_F(OptimizingCFITest, kMips64Adjust) {
-  // One NOP in forbidden slot, 1 << 15 NOPS have size 1 << 17 which exceeds 18-bit signed maximum.
-  static constexpr size_t kNumNops = 1u + (1u << 15);
-  std::vector<uint8_t> expected_asm(
-      expected_asm_kMips64_adjust_head,
-      expected_asm_kMips64_adjust_head + arraysize(expected_asm_kMips64_adjust_head));
-  expected_asm.resize(expected_asm.size() + kNumNops * 4u, 0u);
-  expected_asm.insert(
-      expected_asm.end(),
-      expected_asm_kMips64_adjust_tail,
-      expected_asm_kMips64_adjust_tail + arraysize(expected_asm_kMips64_adjust_tail));
-  std::vector<uint8_t> expected_cfi(
-      expected_cfi_kMips64_adjust,
-      expected_cfi_kMips64_adjust + arraysize(expected_cfi_kMips64_adjust));
-  SetUpFrame(InstructionSet::kMips64);
-#define __ down_cast<mips64::Mips64Assembler*>(GetCodeGenerator()->GetAssembler())->
-  mips64::Mips64Label target;
-  __ Beqc(mips64::A1, mips64::A2, &target);
-  // Push the target out of range of BEQC.
-  for (size_t i = 0; i != kNumNops; ++i) {
-    __ Nop();
-  }
-  __ Bind(&target);
-#undef __
-  Finish();
-  Check(InstructionSet::kMips64, "kMips64_adjust", expected_asm, expected_cfi);
-}
-#endif
-
 #endif  // ART_TARGET_ANDROID
 
 }  // namespace art
diff --git a/compiler/optimizing/optimizing_cfi_test_expected.inc b/compiler/optimizing/optimizing_cfi_test_expected.inc
index 1e82c4b0..4c99700 100644
--- a/compiler/optimizing/optimizing_cfi_test_expected.inc
+++ b/compiler/optimizing/optimizing_cfi_test_expected.inc
@@ -136,84 +136,6 @@
 // 0x00000029: .cfi_restore_state
 // 0x00000029: .cfi_def_cfa_offset: 64
 
-static constexpr uint8_t expected_asm_kMips[] = {
-    0xC0, 0xFF, 0xBD, 0x27, 0x3C, 0x00, 0xBF, 0xAF, 0x38, 0x00, 0xB1, 0xAF,
-    0x34, 0x00, 0xB0, 0xAF, 0x28, 0x00, 0xB6, 0xF7, 0x20, 0x00, 0xB4, 0xF7,
-    0x3C, 0x00, 0xBF, 0x8F, 0x38, 0x00, 0xB1, 0x8F,
-    0x34, 0x00, 0xB0, 0x8F, 0x28, 0x00, 0xB6, 0xD7, 0x20, 0x00, 0xB4, 0xD7,
-    0x09, 0x00, 0xE0, 0x03, 0x40, 0x00, 0xBD, 0x27,
-};
-static constexpr uint8_t expected_cfi_kMips[] = {
-    0x44, 0x0E, 0x40, 0x44, 0x9F, 0x01, 0x44, 0x91, 0x02, 0x44, 0x90, 0x03,
-    0x48, 0x0A, 0x44, 0xDF, 0x44, 0xD1, 0x44, 0xD0, 0x50, 0x0E, 0x00, 0x0B,
-    0x0E, 0x40,
-};
-// 0x00000000: addiu sp, sp, -64
-// 0x00000004: .cfi_def_cfa_offset: 64
-// 0x00000004: sw ra, +60(sp)
-// 0x00000008: .cfi_offset: r31 at cfa-4
-// 0x00000008: sw s1, +56(sp)
-// 0x0000000c: .cfi_offset: r17 at cfa-8
-// 0x0000000c: sw s0, +52(sp)
-// 0x00000010: .cfi_offset: r16 at cfa-12
-// 0x00000010: sdc1 f22, +40(sp)
-// 0x00000014: sdc1 f20, +32(sp)
-// 0x00000018: .cfi_remember_state
-// 0x00000018: lw ra, +60(sp)
-// 0x0000001c: .cfi_restore: r31
-// 0x0000001c: lw s1, +56(sp)
-// 0x00000020: .cfi_restore: r17
-// 0x00000020: lw s0, +52(sp)
-// 0x00000024: .cfi_restore: r16
-// 0x00000024: ldc1 f22, +40(sp)
-// 0x00000028: ldc1 f20, +32(sp)
-// 0x0000002c: jr ra
-// 0x00000030: addiu sp, sp, 64
-// 0x00000034: .cfi_def_cfa_offset: 0
-// 0x00000034: .cfi_restore_state
-// 0x00000034: .cfi_def_cfa_offset: 64
-
-static constexpr uint8_t expected_asm_kMips64[] = {
-    0xC0, 0xFF, 0xBD, 0x67, 0x38, 0x00, 0xBF, 0xFF, 0x30, 0x00, 0xB1, 0xFF,
-    0x28, 0x00, 0xB0, 0xFF, 0x20, 0x00, 0xB9, 0xF7, 0x18, 0x00, 0xB8, 0xF7,
-    0x38, 0x00, 0xBF, 0xDF, 0x30, 0x00, 0xB1, 0xDF, 0x28, 0x00, 0xB0, 0xDF,
-    0x20, 0x00, 0xB9, 0xD7, 0x18, 0x00, 0xB8, 0xD7, 0x40, 0x00, 0xBD, 0x67,
-    0x00, 0x00, 0x1F, 0xD8,
-};
-static constexpr uint8_t expected_cfi_kMips64[] = {
-    0x44, 0x0E, 0x40, 0x44, 0x9F, 0x02, 0x44, 0x91, 0x04, 0x44, 0x90, 0x06,
-    0x44, 0xB9, 0x08, 0x44, 0xB8, 0x0A, 0x0A, 0x44, 0xDF, 0x44, 0xD1, 0x44,
-    0xD0, 0x44, 0xF9, 0x44, 0xF8, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0x40,
-};
-// 0x00000000: daddiu sp, sp, -64
-// 0x00000004: .cfi_def_cfa_offset: 64
-// 0x00000004: sd ra, +56(sp)
-// 0x00000008: .cfi_offset: r31 at cfa-8
-// 0x00000008: sd s1, +48(sp)
-// 0x0000000c: .cfi_offset: r17 at cfa-16
-// 0x0000000c: sd s0, +40(sp)
-// 0x00000010: .cfi_offset: r16 at cfa-24
-// 0x00000010: sdc1 f25, +32(sp)
-// 0x00000014: .cfi_offset: r57 at cfa-32
-// 0x00000014: sdc1 f24, +24(sp)
-// 0x00000018: .cfi_offset: r56 at cfa-40
-// 0x00000018: .cfi_remember_state
-// 0x00000018: ld ra, +56(sp)
-// 0x0000001c: .cfi_restore: r31
-// 0x0000001c: ld s1, +48(sp)
-// 0x00000020: .cfi_restore: r17
-// 0x00000020: ld s0, +40(sp)
-// 0x00000024: .cfi_restore: r16
-// 0x00000024: ldc1 f25, +32(sp)
-// 0x00000028: .cfi_restore: r57
-// 0x00000028: ldc1 f24, +24(sp)
-// 0x0000002c: .cfi_restore: r56
-// 0x0000002c: daddiu sp, sp, 64
-// 0x00000030: .cfi_def_cfa_offset: 0
-// 0x00000030: jic ra, 0
-// 0x00000034: .cfi_restore_state
-// 0x00000034: .cfi_def_cfa_offset: 64
-
 static constexpr uint8_t expected_asm_kThumb2_adjust[] = {
     // VIXL emits an extra 2 bytes here for a 32-bit beq as there is no
     // optimistic 16-bit emit and subsequent fixup for out of reach targets
@@ -326,112 +248,3 @@
 // 0x00000094: pop {r5, r6, pc}
 // 0x00000096: .cfi_restore_state
 // 0x00000096: .cfi_def_cfa_offset: 64
-
-static constexpr uint8_t expected_asm_kMips_adjust_head[] = {
-    0xC0, 0xFF, 0xBD, 0x27, 0x3C, 0x00, 0xBF, 0xAF, 0x38, 0x00, 0xB1, 0xAF,
-    0x34, 0x00, 0xB0, 0xAF, 0x28, 0x00, 0xB6, 0xF7, 0x20, 0x00, 0xB4, 0xF7,
-    0x08, 0x00, 0x80, 0x14, 0xF0, 0xFF, 0xBD, 0x27,
-    0x00, 0x00, 0xBF, 0xAF, 0x00, 0x00, 0x10, 0x04, 0x02, 0x00, 0x01, 0x3C,
-    0x18, 0x00, 0x21, 0x34, 0x21, 0x08, 0x3F, 0x00, 0x00, 0x00, 0xBF, 0x8F,
-    0x09, 0x00, 0x20, 0x00, 0x10, 0x00, 0xBD, 0x27,
-};
-static constexpr uint8_t expected_asm_kMips_adjust_tail[] = {
-    0x3C, 0x00, 0xBF, 0x8F, 0x38, 0x00, 0xB1, 0x8F, 0x34, 0x00, 0xB0, 0x8F,
-    0x28, 0x00, 0xB6, 0xD7, 0x20, 0x00, 0xB4, 0xD7, 0x09, 0x00, 0xE0, 0x03,
-    0x40, 0x00, 0xBD, 0x27,
-};
-static constexpr uint8_t expected_cfi_kMips_adjust[] = {
-    0x44, 0x0E, 0x40, 0x44, 0x9F, 0x01, 0x44, 0x91, 0x02, 0x44, 0x90, 0x03,
-    0x50, 0x0E, 0x50, 0x60, 0x0E, 0x40, 0x04, 0x04, 0x00, 0x02, 0x00, 0x0A,
-    0x44, 0xDF, 0x44, 0xD1, 0x44, 0xD0, 0x50, 0x0E, 0x00, 0x0B, 0x0E, 0x40,
-};
-// 0x00000000: addiu sp, sp, -64
-// 0x00000004: .cfi_def_cfa_offset: 64
-// 0x00000004: sw ra, +60(sp)
-// 0x00000008: .cfi_offset: r31 at cfa-4
-// 0x00000008: sw s1, +56(sp)
-// 0x0000000c: .cfi_offset: r17 at cfa-8
-// 0x0000000c: sw s0, +52(sp)
-// 0x00000010: .cfi_offset: r16 at cfa-12
-// 0x00000010: sdc1 f22, +40(sp)
-// 0x00000014: sdc1 f20, +32(sp)
-// 0x00000018: bnez a0, 0x0000003c ; +36
-// 0x0000001c: addiu sp, sp, -16
-// 0x00000020: .cfi_def_cfa_offset: 80
-// 0x00000020: sw ra, +0(sp)
-// 0x00000024: nal
-// 0x00000028: lui at, 2
-// 0x0000002c: ori at, at, 24
-// 0x00000030: addu at, at, ra
-// 0x00000034: lw ra, +0(sp)
-// 0x00000038: jr at
-// 0x0000003c: addiu sp, sp, 16
-// 0x00000040: .cfi_def_cfa_offset: 64
-// 0x00000040: nop
-//             ...
-// 0x00020040: nop
-// 0x00020044: .cfi_remember_state
-// 0x00020044: lw ra, +60(sp)
-// 0x00020048: .cfi_restore: r31
-// 0x00020048: lw s1, +56(sp)
-// 0x0002004c: .cfi_restore: r17
-// 0x0002004c: lw s0, +52(sp)
-// 0x00020050: .cfi_restore: r16
-// 0x00020050: ldc1 f22, +40(sp)
-// 0x00020054: ldc1 f20, +32(sp)
-// 0x00020058: jr ra
-// 0x0002005c: addiu sp, sp, 64
-// 0x00020060: .cfi_def_cfa_offset: 0
-// 0x00020060: .cfi_restore_state
-// 0x00020060: .cfi_def_cfa_offset: 64
-
-static constexpr uint8_t expected_asm_kMips64_adjust_head[] = {
-    0xC0, 0xFF, 0xBD, 0x67, 0x38, 0x00, 0xBF, 0xFF, 0x30, 0x00, 0xB1, 0xFF,
-    0x28, 0x00, 0xB0, 0xFF, 0x20, 0x00, 0xB9, 0xF7, 0x18, 0x00, 0xB8, 0xF7,
-    0x02, 0x00, 0xA6, 0x60, 0x02, 0x00, 0x3E, 0xEC, 0x0C, 0x00, 0x01, 0xD8,
-};
-static constexpr uint8_t expected_asm_kMips64_adjust_tail[] = {
-    0x38, 0x00, 0xBF, 0xDF, 0x30, 0x00, 0xB1, 0xDF, 0x28, 0x00, 0xB0, 0xDF,
-    0x20, 0x00, 0xB9, 0xD7, 0x18, 0x00, 0xB8, 0xD7, 0x40, 0x00, 0xBD, 0x67,
-    0x00, 0x00, 0x1F, 0xD8,
-};
-static constexpr uint8_t expected_cfi_kMips64_adjust[] = {
-    0x44, 0x0E, 0x40, 0x44, 0x9F, 0x02, 0x44, 0x91, 0x04, 0x44, 0x90, 0x06,
-    0x44, 0xB9, 0x08, 0x44, 0xB8, 0x0A, 0x04, 0x10, 0x00, 0x02, 0x00, 0x0A,
-    0x44, 0xDF, 0x44, 0xD1, 0x44, 0xD0, 0x44, 0xF9, 0x44, 0xF8, 0x44, 0x0E,
-    0x00, 0x44, 0x0B, 0x0E, 0x40,
-};
-// 0x00000000: daddiu sp, sp, -64
-// 0x00000004: .cfi_def_cfa_offset: 64
-// 0x00000004: sd ra, +56(sp)
-// 0x00000008: .cfi_offset: r31 at cfa-8
-// 0x00000008: sd s1, +48(sp)
-// 0x0000000c: .cfi_offset: r17 at cfa-16
-// 0x0000000c: sd s0, +40(sp)
-// 0x00000010: .cfi_offset: r16 at cfa-24
-// 0x00000010: sdc1 f25, +32(sp)
-// 0x00000014: .cfi_offset: r57 at cfa-32
-// 0x00000014: sdc1 f24, +24(sp)
-// 0x00000018: .cfi_offset: r56 at cfa-40
-// 0x00000018: bnec a1, a2, 0x00000024 ; +12
-// 0x0000001c: auipc at, 2
-// 0x00000020: jic at, 12 ; bc 0x00020028 ; +131080
-// 0x00000024: nop
-//             ...
-// 0x00020024: nop
-// 0x00020028: .cfi_remember_state
-// 0x00020028: ld ra, +56(sp)
-// 0x0002002c: .cfi_restore: r31
-// 0x0002002c: ld s1, +48(sp)
-// 0x00020030: .cfi_restore: r17
-// 0x00020030: ld s0, +40(sp)
-// 0x00020034: .cfi_restore: r16
-// 0x00020034: ldc1 f25, +32(sp)
-// 0x00020038: .cfi_restore: r57
-// 0x00020038: ldc1 f24, +24(sp)
-// 0x0002003c: .cfi_restore: r56
-// 0x0002003c: daddiu sp, sp, 64
-// 0x00020040: .cfi_def_cfa_offset: 0
-// 0x00020040: jic ra, 0
-// 0x00020044: .cfi_restore_state
-// 0x00020044: .cfi_def_cfa_offset: 64
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index f4bf11d..9978a6f 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -137,13 +137,15 @@
       LOG(INFO) << "TIMINGS " << GetMethodName();
       LOG(INFO) << Dumpable<TimingLogger>(timing_logger_);
     }
+    if (visualizer_enabled_) {
+      FlushVisualizer();
+    }
     DCHECK(visualizer_oss_.str().empty());
   }
 
-  void DumpDisassembly() REQUIRES(!visualizer_dump_mutex_) {
+  void DumpDisassembly() {
     if (visualizer_enabled_) {
       visualizer_.DumpGraphWithDisassembly();
-      FlushVisualizer();
     }
   }
 
@@ -158,12 +160,11 @@
   }
 
  private:
-  void StartPass(const char* pass_name) REQUIRES(!visualizer_dump_mutex_) {
+  void StartPass(const char* pass_name) {
     VLOG(compiler) << "Starting pass: " << pass_name;
     // Dump graph first, then start timer.
     if (visualizer_enabled_) {
       visualizer_.DumpGraph(pass_name, /* is_after_pass= */ false, graph_in_bad_state_);
-      FlushVisualizer();
     }
     if (timing_logger_enabled_) {
       timing_logger_.StartTiming(pass_name);
@@ -178,14 +179,13 @@
     visualizer_oss_.clear();
   }
 
-  void EndPass(const char* pass_name, bool pass_change) REQUIRES(!visualizer_dump_mutex_) {
+  void EndPass(const char* pass_name, bool pass_change) {
     // Pause timer first, then dump graph.
     if (timing_logger_enabled_) {
       timing_logger_.EndTiming();
     }
     if (visualizer_enabled_) {
       visualizer_.DumpGraph(pass_name, /* is_after_pass= */ true, graph_in_bad_state_);
-      FlushVisualizer();
     }
 
     // Validate the HGraph if running in debug mode.
@@ -295,6 +295,7 @@
 
   bool JitCompile(Thread* self,
                   jit::JitCodeCache* code_cache,
+                  jit::JitMemoryRegion* region,
                   ArtMethod* method,
                   bool baseline,
                   bool osr,
@@ -383,6 +384,7 @@
                             ArtMethod* method,
                             bool baseline,
                             bool osr,
+                            bool is_shared_jit_code,
                             VariableSizedHandleScope* handles) const;
 
   CodeGenerator* TryCompileIntrinsic(ArenaAllocator* allocator,
@@ -404,9 +406,7 @@
                                 PassObserver* pass_observer,
                                 VariableSizedHandleScope* handles) const;
 
-  void GenerateJitDebugInfo(ArtMethod* method,
-                            const debug::MethodDebugInfo& method_debug_info)
-      REQUIRES_SHARED(Locks::mutator_lock_);
+  void GenerateJitDebugInfo(const debug::MethodDebugInfo& method_debug_info);
 
   std::unique_ptr<OptimizingCompilerStats> compilation_stats_;
 
@@ -450,8 +450,6 @@
   return instruction_set == InstructionSet::kArm
       || instruction_set == InstructionSet::kArm64
       || instruction_set == InstructionSet::kThumb2
-      || instruction_set == InstructionSet::kMips
-      || instruction_set == InstructionSet::kMips64
       || instruction_set == InstructionSet::kX86
       || instruction_set == InstructionSet::kX86_64;
 }
@@ -462,19 +460,6 @@
                                                   PassObserver* pass_observer,
                                                   VariableSizedHandleScope* handles) const {
   switch (codegen->GetCompilerOptions().GetInstructionSet()) {
-#ifdef ART_ENABLE_CODEGEN_mips
-    case InstructionSet::kMips: {
-      OptimizationDef mips_optimizations[] = {
-        OptDef(OptimizationPass::kPcRelativeFixupsMips)
-      };
-      return RunOptimizations(graph,
-                              codegen,
-                              dex_compilation_unit,
-                              pass_observer,
-                              handles,
-                              mips_optimizations);
-    }
-#endif
 #ifdef ART_ENABLE_CODEGEN_x86
     case InstructionSet::kX86: {
       OptimizationDef x86_optimizations[] = {
@@ -537,36 +522,6 @@
                               arm64_optimizations);
     }
 #endif
-#ifdef ART_ENABLE_CODEGEN_mips
-    case InstructionSet::kMips: {
-      OptimizationDef mips_optimizations[] = {
-        OptDef(OptimizationPass::kInstructionSimplifierMips),
-        OptDef(OptimizationPass::kSideEffectsAnalysis),
-        OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
-        OptDef(OptimizationPass::kPcRelativeFixupsMips)
-      };
-      return RunOptimizations(graph,
-                              codegen,
-                              dex_compilation_unit,
-                              pass_observer,
-                              handles,
-                              mips_optimizations);
-    }
-#endif
-#ifdef ART_ENABLE_CODEGEN_mips64
-    case InstructionSet::kMips64: {
-      OptimizationDef mips64_optimizations[] = {
-        OptDef(OptimizationPass::kSideEffectsAnalysis),
-        OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch")
-      };
-      return RunOptimizations(graph,
-                              codegen,
-                              dex_compilation_unit,
-                              pass_observer,
-                              handles,
-                              mips64_optimizations);
-    }
-#endif
 #ifdef ART_ENABLE_CODEGEN_x86
     case InstructionSet::kX86: {
       OptimizationDef x86_optimizations[] = {
@@ -782,6 +737,7 @@
                                               ArtMethod* method,
                                               bool baseline,
                                               bool osr,
+                                              bool is_shared_jit_code,
                                               VariableSizedHandleScope* handles) const {
   MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptBytecodeCompilation);
   const CompilerOptions& compiler_options = GetCompilerOptions();
@@ -849,7 +805,9 @@
       kInvalidInvokeType,
       dead_reference_safe,
       compiler_options.GetDebuggable(),
-      /* osr= */ osr);
+      /* osr= */ osr,
+      /* is_shared_jit_code= */ is_shared_jit_code,
+      /* baseline= */ baseline);
 
   if (method != nullptr) {
     graph->SetArtMethod(method);
@@ -910,6 +868,11 @@
                           MethodCompilationStat::kNotCompiledIrreducibleLoopAndStringInit);
           break;
         }
+        case kAnalysisFailPhiEquivalentInOsr: {
+          MaybeRecordStat(compilation_stats_.get(),
+                          MethodCompilationStat::kNotCompiledPhiEquivalentInOsr);
+          break;
+        }
         case kAnalysisSuccess:
           UNREACHABLE();
       }
@@ -1106,6 +1069,7 @@
                        method,
                        compiler_options.IsBaseline(),
                        /* osr= */ false,
+                       /* is_shared_jit_code= */ false,
                        &handles));
       }
     }
@@ -1166,7 +1130,8 @@
       jni_compiled_method.GetFrameSize(),
       jni_compiled_method.GetCoreSpillMask(),
       jni_compiled_method.GetFpSpillMask(),
-      /* num_dex_registers= */ 0);
+      /* num_dex_registers= */ 0,
+      /* baseline= */ false);
   stack_map_stream->EndMethod();
   return stack_map_stream->Encode();
 }
@@ -1248,6 +1213,7 @@
 
 bool OptimizingCompiler::JitCompile(Thread* self,
                                     jit::JitCodeCache* code_cache,
+                                    jit::JitMemoryRegion* region,
                                     ArtMethod* method,
                                     bool baseline,
                                     bool osr,
@@ -1279,39 +1245,24 @@
     ScopedArenaAllocator stack_map_allocator(&arena_stack);  // Will hold the stack map.
     ScopedArenaVector<uint8_t> stack_map = CreateJniStackMap(&stack_map_allocator,
                                                              jni_compiled_method);
-    uint8_t* stack_map_data = nullptr;
-    uint8_t* roots_data = nullptr;
-    uint32_t data_size = code_cache->ReserveData(self,
-                                                 stack_map.size(),
-                                                 /* number_of_roots= */ 0,
-                                                 method,
-                                                 &stack_map_data,
-                                                 &roots_data);
-    if (stack_map_data == nullptr || roots_data == nullptr) {
+
+    ArrayRef<const uint8_t> reserved_code;
+    ArrayRef<const uint8_t> reserved_data;
+    if (!code_cache->Reserve(self,
+                             region,
+                             jni_compiled_method.GetCode().size(),
+                             stack_map.size(),
+                             /* number_of_roots= */ 0,
+                             method,
+                             /*out*/ &reserved_code,
+                             /*out*/ &reserved_data)) {
       MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
       return false;
     }
-    memcpy(stack_map_data, stack_map.data(), stack_map.size());
+    const uint8_t* code = reserved_code.data() + OatQuickMethodHeader::InstructionAlignedSize();
 
-    const void* code = code_cache->CommitCode(
-        self,
-        method,
-        stack_map_data,
-        roots_data,
-        jni_compiled_method.GetCode().data(),
-        jni_compiled_method.GetCode().size(),
-        data_size,
-        osr,
-        roots,
-        /* has_should_deoptimize_flag= */ false,
-        cha_single_implementation_list);
-    if (code == nullptr) {
-      return false;
-    }
-
+    // Add debug info after we know the code location but before we update entry-point.
     if (compiler_options.GenerateAnyDebugInfo()) {
-      const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code);
-      const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode());
       debug::MethodDebugInfo info = {};
       info.custom_name = "art_jni_trampoline";
       info.dex_file = dex_file;
@@ -1324,12 +1275,27 @@
       info.is_native_debuggable = compiler_options.GetNativeDebuggable();
       info.is_optimized = true;
       info.is_code_address_text_relative = false;
-      info.code_address = code_address;
+      info.code_address = reinterpret_cast<uintptr_t>(code);
       info.code_size = jni_compiled_method.GetCode().size();
-      info.frame_size_in_bytes = method_header->GetFrameSizeInBytes();
+      info.frame_size_in_bytes = jni_compiled_method.GetFrameSize();
       info.code_info = nullptr;
       info.cfi = jni_compiled_method.GetCfi();
-      GenerateJitDebugInfo(method, info);
+      GenerateJitDebugInfo(info);
+    }
+
+    if (!code_cache->Commit(self,
+                            region,
+                            method,
+                            reserved_code,
+                            jni_compiled_method.GetCode(),
+                            reserved_data,
+                            roots,
+                            ArrayRef<const uint8_t>(stack_map),
+                            osr,
+                            /* has_should_deoptimize_flag= */ false,
+                            cha_single_implementation_list)) {
+      code_cache->Free(self, region, reserved_code.data(), reserved_data.data());
+      return false;
     }
 
     Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed());
@@ -1366,8 +1332,9 @@
                    &code_allocator,
                    dex_compilation_unit,
                    method,
-                   baseline,
+                   baseline || GetCompilerOptions().IsBaseline(),
                    osr,
+                   /* is_shared_jit_code= */ code_cache->IsSharedRegion(*region),
                    &handles));
     if (codegen.get() == nullptr) {
       return false;
@@ -1375,20 +1342,23 @@
   }
 
   ScopedArenaVector<uint8_t> stack_map = codegen->BuildStackMaps(code_item);
-  size_t number_of_roots = codegen->GetNumberOfJitRoots();
-  uint8_t* stack_map_data = nullptr;
-  uint8_t* roots_data = nullptr;
-  uint32_t data_size = code_cache->ReserveData(self,
-                                               stack_map.size(),
-                                               number_of_roots,
-                                               method,
-                                               &stack_map_data,
-                                               &roots_data);
-  if (stack_map_data == nullptr || roots_data == nullptr) {
+
+  ArrayRef<const uint8_t> reserved_code;
+  ArrayRef<const uint8_t> reserved_data;
+  if (!code_cache->Reserve(self,
+                           region,
+                           code_allocator.GetMemory().size(),
+                           stack_map.size(),
+                           /*number_of_roots=*/codegen->GetNumberOfJitRoots(),
+                           method,
+                           /*out*/ &reserved_code,
+                           /*out*/ &reserved_data)) {
     MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
     return false;
   }
-  memcpy(stack_map_data, stack_map.data(), stack_map.size());
+  const uint8_t* code = reserved_code.data() + OatQuickMethodHeader::InstructionAlignedSize();
+  const uint8_t* roots_data = reserved_data.data();
+
   std::vector<Handle<mirror::Object>> roots;
   codegen->EmitJitRoots(code_allocator.GetData(), roots_data, &roots);
   // The root Handle<>s filled by the codegen reference entries in the VariableSizedHandleScope.
@@ -1398,29 +1368,9 @@
                        return handles.Contains(root.GetReference());
                      }));
 
-  const void* code = code_cache->CommitCode(
-      self,
-      method,
-      stack_map_data,
-      roots_data,
-      code_allocator.GetMemory().data(),
-      code_allocator.GetMemory().size(),
-      data_size,
-      osr,
-      roots,
-      codegen->GetGraph()->HasShouldDeoptimizeFlag(),
-      codegen->GetGraph()->GetCHASingleImplementationList());
-
-  if (code == nullptr) {
-    MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
-    code_cache->ClearData(self, stack_map_data, roots_data);
-    return false;
-  }
-
+  // Add debug info after we know the code location but before we update entry-point.
   const CompilerOptions& compiler_options = GetCompilerOptions();
   if (compiler_options.GenerateAnyDebugInfo()) {
-    const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code);
-    const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode());
     debug::MethodDebugInfo info = {};
     DCHECK(info.custom_name.empty());
     info.dex_file = dex_file;
@@ -1433,12 +1383,27 @@
     info.is_native_debuggable = compiler_options.GetNativeDebuggable();
     info.is_optimized = true;
     info.is_code_address_text_relative = false;
-    info.code_address = code_address;
+    info.code_address = reinterpret_cast<uintptr_t>(code);
     info.code_size = code_allocator.GetMemory().size();
-    info.frame_size_in_bytes = method_header->GetFrameSizeInBytes();
-    info.code_info = stack_map.size() == 0 ? nullptr : stack_map_data;
+    info.frame_size_in_bytes = codegen->GetFrameSize();
+    info.code_info = stack_map.size() == 0 ? nullptr : stack_map.data();
     info.cfi = ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data());
-    GenerateJitDebugInfo(method, info);
+    GenerateJitDebugInfo(info);
+  }
+
+  if (!code_cache->Commit(self,
+                          region,
+                          method,
+                          reserved_code,
+                          code_allocator.GetMemory(),
+                          reserved_data,
+                          roots,
+                          ArrayRef<const uint8_t>(stack_map),
+                          osr,
+                          codegen->GetGraph()->HasShouldDeoptimizeFlag(),
+                          codegen->GetGraph()->GetCHASingleImplementationList())) {
+    code_cache->Free(self, region, reserved_code.data(), reserved_data.data());
+    return false;
   }
 
   Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed());
@@ -1462,31 +1427,22 @@
   return true;
 }
 
-void OptimizingCompiler::GenerateJitDebugInfo(ArtMethod* method ATTRIBUTE_UNUSED,
-                                              const debug::MethodDebugInfo& info) {
+void OptimizingCompiler::GenerateJitDebugInfo(const debug::MethodDebugInfo& info) {
   const CompilerOptions& compiler_options = GetCompilerOptions();
-  DCHECK(compiler_options.GenerateAnyDebugInfo());
-  TimingLogger logger("Generate JIT debug info logger", true, VLOG_IS_ON(jit));
-  {
-    TimingLogger::ScopedTiming st("Generate JIT debug info", &logger);
-
+  if (compiler_options.GenerateAnyDebugInfo()) {
     // If both flags are passed, generate full debug info.
     const bool mini_debug_info = !compiler_options.GetGenerateDebugInfo();
 
     // Create entry for the single method that we just compiled.
-    std::vector<uint8_t> elf_file = debug::MakeElfFileForJIT(
-        compiler_options.GetInstructionSet(),
-        compiler_options.GetInstructionSetFeatures(),
-        mini_debug_info,
-        info);
-    AddNativeDebugInfoForJit(Thread::Current(),
-                             reinterpret_cast<const void*>(info.code_address),
-                             elf_file,
-                             debug::PackElfFileForJIT,
-                             compiler_options.GetInstructionSet(),
-                             compiler_options.GetInstructionSetFeatures());
+    InstructionSet isa = compiler_options.GetInstructionSet();
+    const InstructionSetFeatures* features = compiler_options.GetInstructionSetFeatures();
+    std::vector<uint8_t> elf = debug::MakeElfFileForJIT(isa, features, mini_debug_info, info);
+
+    // NB: Don't allow packing of full info since it would remove non-backtrace data.
+    MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+    const void* code_ptr = reinterpret_cast<const void*>(info.code_address);
+    AddNativeDebugInfoForJit(code_ptr, elf, /*allow_packing=*/ mini_debug_info);
   }
-  Runtime::Current()->GetJit()->AddTimingLogger(logger);
 }
 
 }  // namespace art
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index ddd57f5..83dbef7 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -61,6 +61,7 @@
   kNotCompiledVerificationError,
   kNotCompiledVerifyAtRuntime,
   kNotCompiledIrreducibleLoopAndStringInit,
+  kNotCompiledPhiEquivalentInOsr,
   kInlinedMonomorphicCall,
   kInlinedPolymorphicCall,
   kMonomorphicCall,
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index e5f6941..eb262bc 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -180,7 +180,29 @@
     }
   }
 
+  // Run GraphChecker with all checks.
+  //
+  // Return: the status whether the run is successful.
+  bool CheckGraph(HGraph* graph) {
+    return CheckGraph(graph, /*check_ref_type_info=*/true);
+  }
+
+  // Run GraphChecker with all checks except reference type information checks.
+  //
+  // Return: the status whether the run is successful.
+  bool CheckGraphSkipRefTypeInfoChecks(HGraph* graph) {
+    return CheckGraph(graph, /*check_ref_type_info=*/false);
+  }
+
  private:
+  bool CheckGraph(HGraph* graph, bool check_ref_type_info) {
+    GraphChecker checker(graph);
+    checker.SetRefTypeInfoCheckEnabled(check_ref_type_info);
+    checker.Run();
+    checker.Dump(std::cerr);
+    return checker.IsValid();
+  }
+
   std::vector<std::unique_ptr<const StandardDexFile>> dex_files_;
   std::unique_ptr<ArenaPoolAndAllocator> pool_and_allocator_;
   std::unique_ptr<VariableSizedHandleScope> handles_;
@@ -194,8 +216,7 @@
   ImprovedOptimizingUnitTest() : graph_(CreateGraph()),
                                  entry_block_(nullptr),
                                  return_block_(nullptr),
-                                 exit_block_(nullptr),
-                                 parameter_(nullptr) {}
+                                 exit_block_(nullptr) {}
 
   virtual ~ImprovedOptimizingUnitTest() {}
 
@@ -214,25 +235,21 @@
     entry_block_->AddSuccessor(return_block_);
     return_block_->AddSuccessor(exit_block_);
 
-    parameter_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
-                                                      dex::TypeIndex(0),
-                                                      0,
-                                                      DataType::Type::kInt32);
-    entry_block_->AddInstruction(parameter_);
+    CreateParameters();
+    for (HInstruction* parameter : parameters_) {
+      entry_block_->AddInstruction(parameter);
+    }
+
     return_block_->AddInstruction(new (GetAllocator()) HReturnVoid());
     exit_block_->AddInstruction(new (GetAllocator()) HExit());
   }
 
   bool CheckGraph() {
-    GraphChecker checker(graph_);
-    checker.Run();
-    if (!checker.IsValid()) {
-      for (const std::string& error : checker.GetErrors()) {
-        std::cout << error << std::endl;
-      }
-      return false;
-    }
-    return true;
+    return OptimizingUnitTestHelper::CheckGraph(graph_);
+  }
+
+  bool CheckGraphSkipRefTypeInfoChecks() {
+    return OptimizingUnitTestHelper::CheckGraphSkipRefTypeInfoChecks(graph_);
   }
 
   HEnvironment* ManuallyBuildEnvFor(HInstruction* instruction,
@@ -250,13 +267,17 @@
   }
 
  protected:
+  // Create parameters to be added to the graph entry block.
+  // Subclasses can override it to create parameters they need.
+  virtual void CreateParameters() { /* do nothing */ }
+
   HGraph* graph_;
 
   HBasicBlock* entry_block_;
   HBasicBlock* return_block_;
   HBasicBlock* exit_block_;
 
-  HInstruction* parameter_;
+  std::vector<HInstruction*> parameters_;
 };
 
 // Naive string diff data type.
diff --git a/compiler/optimizing/pc_relative_fixups_mips.cc b/compiler/optimizing/pc_relative_fixups_mips.cc
deleted file mode 100644
index 05208ff..0000000
--- a/compiler/optimizing/pc_relative_fixups_mips.cc
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "pc_relative_fixups_mips.h"
-#include "code_generator_mips.h"
-#include "intrinsics_mips.h"
-
-namespace art {
-namespace mips {
-
-/**
- * Finds instructions that need the constant area base as an input.
- */
-class PCRelativeHandlerVisitor : public HGraphVisitor {
- public:
-  PCRelativeHandlerVisitor(HGraph* graph, CodeGenerator* codegen)
-      : HGraphVisitor(graph),
-        codegen_(down_cast<CodeGeneratorMIPS*>(codegen)),
-        base_(nullptr) {}
-
-  void MoveBaseIfNeeded() {
-    if (base_ != nullptr) {
-      // Bring the base closer to the first use (previously, it was in the
-      // entry block) and relieve some pressure on the register allocator
-      // while avoiding recalculation of the base in a loop.
-      base_->MoveBeforeFirstUserAndOutOfLoops();
-      // Computing the base for PC-relative literals will clobber RA with
-      // the NAL instruction on R2. Take a note of this before generating
-      // the method entry.
-      codegen_->ClobberRA();
-    }
-  }
-
- private:
-  void InitializePCRelativeBasePointer() {
-    // Ensure we only initialize the pointer once.
-    if (base_ != nullptr) {
-      return;
-    }
-    // Insert the base at the start of the entry block, move it to a better
-    // position later in MoveBaseIfNeeded().
-    base_ = new (GetGraph()->GetAllocator()) HMipsComputeBaseMethodAddress();
-    HBasicBlock* entry_block = GetGraph()->GetEntryBlock();
-    entry_block->InsertInstructionBefore(base_, entry_block->GetFirstInstruction());
-    DCHECK(base_ != nullptr);
-  }
-
-  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) override {
-    // If this is an invoke with PC-relative load kind,
-    // we need to add the base as the special input.
-    if (invoke->HasPcRelativeMethodLoadKind() &&
-        !IsCallFreeIntrinsic<IntrinsicLocationsBuilderMIPS>(invoke, codegen_)) {
-      InitializePCRelativeBasePointer();
-      // Add the special argument base to the method.
-      DCHECK(!invoke->HasCurrentMethodInput());
-      invoke->AddSpecialInput(base_);
-    }
-  }
-
-  void VisitLoadClass(HLoadClass* load_class) override {
-    HLoadClass::LoadKind load_kind = load_class->GetLoadKind();
-    switch (load_kind) {
-      case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
-      case HLoadClass::LoadKind::kBootImageRelRo:
-      case HLoadClass::LoadKind::kBssEntry:
-      case HLoadClass::LoadKind::kJitBootImageAddress:
-        // Add a base register for PC-relative literals on R2.
-        InitializePCRelativeBasePointer();
-        load_class->AddSpecialInput(base_);
-        break;
-      default:
-        break;
-    }
-  }
-
-  void VisitLoadString(HLoadString* load_string) override {
-    HLoadString::LoadKind load_kind = load_string->GetLoadKind();
-    switch (load_kind) {
-      case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
-      case HLoadString::LoadKind::kBootImageRelRo:
-      case HLoadString::LoadKind::kBssEntry:
-      case HLoadString::LoadKind::kJitBootImageAddress:
-        // Add a base register for PC-relative literals on R2.
-        InitializePCRelativeBasePointer();
-        load_string->AddSpecialInput(base_);
-        break;
-      default:
-        break;
-    }
-  }
-
-  void VisitPackedSwitch(HPackedSwitch* switch_insn) override {
-    if (switch_insn->GetNumEntries() <=
-        InstructionCodeGeneratorMIPS::kPackedSwitchJumpTableThreshold) {
-      return;
-    }
-    // We need to replace the HPackedSwitch with a HMipsPackedSwitch in order to
-    // address the constant area.
-    InitializePCRelativeBasePointer();
-    HGraph* graph = GetGraph();
-    HBasicBlock* block = switch_insn->GetBlock();
-    HMipsPackedSwitch* mips_switch = new (graph->GetAllocator()) HMipsPackedSwitch(
-        switch_insn->GetStartValue(),
-        switch_insn->GetNumEntries(),
-        switch_insn->InputAt(0),
-        base_,
-        switch_insn->GetDexPc());
-    block->ReplaceAndRemoveInstructionWith(switch_insn, mips_switch);
-  }
-
-  CodeGeneratorMIPS* codegen_;
-
-  // The generated HMipsComputeBaseMethodAddress in the entry block needed as an
-  // input to the HMipsLoadFromConstantTable instructions.
-  HMipsComputeBaseMethodAddress* base_;
-};
-
-bool PcRelativeFixups::Run() {
-  CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen_);
-  if (mips_codegen->GetInstructionSetFeatures().IsR6()) {
-    // Do nothing for R6 because it has PC-relative addressing.
-    return false;
-  }
-  if (graph_->HasIrreducibleLoops()) {
-    // Do not run this optimization, as irreducible loops do not work with an instruction
-    // that can be live-in at the irreducible loop header.
-    return false;
-  }
-  PCRelativeHandlerVisitor visitor(graph_, codegen_);
-  visitor.VisitInsertionOrder();
-  visitor.MoveBaseIfNeeded();
-  return true;
-}
-
-}  // namespace mips
-}  // namespace art
diff --git a/compiler/optimizing/pc_relative_fixups_mips.h b/compiler/optimizing/pc_relative_fixups_mips.h
deleted file mode 100644
index 872370b..0000000
--- a/compiler/optimizing/pc_relative_fixups_mips.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_OPTIMIZING_PC_RELATIVE_FIXUPS_MIPS_H_
-#define ART_COMPILER_OPTIMIZING_PC_RELATIVE_FIXUPS_MIPS_H_
-
-#include "nodes.h"
-#include "optimization.h"
-
-namespace art {
-
-class CodeGenerator;
-
-namespace mips {
-
-class PcRelativeFixups : public HOptimization {
- public:
-  PcRelativeFixups(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
-      : HOptimization(graph, kPcRelativeFixupsMipsPassName, stats),
-        codegen_(codegen) {}
-
-  static constexpr const char* kPcRelativeFixupsMipsPassName = "pc_relative_fixups_mips";
-
-  bool Run() override;
-
- private:
-  CodeGenerator* codegen_;
-};
-
-}  // namespace mips
-}  // namespace art
-
-#endif  // ART_COMPILER_OPTIMIZING_PC_RELATIVE_FIXUPS_MIPS_H_
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index fbdbf9d..8c4615d 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -160,8 +160,8 @@
   if (implicit_clinit != nullptr) {
     // Remove the check from the graph. It has been merged into the invoke or new-instance.
     check->GetBlock()->RemoveInstruction(check);
-    // Check if we can merge the load class as well.
-    if (can_merge_with_load_class && !load_class->HasUses()) {
+    // Check if we can merge the load class as well, or whether the LoadClass is now dead.
+    if ((can_merge_with_load_class || !load_class->CanThrow()) && !load_class->HasUses()) {
       load_class->GetBlock()->RemoveInstruction(load_class);
     }
   } else if (can_merge_with_load_class &&
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
index b1f0a1a..1786048 100644
--- a/compiler/optimizing/register_allocation_resolver.cc
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -274,7 +274,7 @@
 size_t RegisterAllocationResolver::CalculateMaximumSafepointSpillSize(
     ArrayRef<HInstruction* const> safepoints) {
   size_t core_register_spill_size = codegen_->GetWordSize();
-  size_t fp_register_spill_size = codegen_->GetFloatingPointSpillSlotSize();
+  size_t fp_register_spill_size = codegen_->GetSlowPathFPWidth();
   size_t maximum_safepoint_spill_size = 0u;
   for (HInstruction* instruction : safepoints) {
     LocationSummary* locations = instruction->GetLocations();
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index bad73e1..a9c217f 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -68,17 +68,6 @@
   }
 }
 
-bool RegisterAllocator::CanAllocateRegistersFor(const HGraph& graph ATTRIBUTE_UNUSED,
-                                                InstructionSet instruction_set) {
-  return instruction_set == InstructionSet::kArm
-      || instruction_set == InstructionSet::kArm64
-      || instruction_set == InstructionSet::kMips
-      || instruction_set == InstructionSet::kMips64
-      || instruction_set == InstructionSet::kThumb2
-      || instruction_set == InstructionSet::kX86
-      || instruction_set == InstructionSet::kX86_64;
-}
-
 class AllRangesIterator : public ValueObject {
  public:
   explicit AllRangesIterator(LiveInterval* interval)
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index 18ef69f..4d22687 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -60,9 +60,6 @@
   // intervals that intersect each other. Returns false if it failed.
   virtual bool Validate(bool log_fatal_on_failure) = 0;
 
-  static bool CanAllocateRegistersFor(const HGraph& graph,
-                                      InstructionSet instruction_set);
-
   // Verifies that live intervals do not conflict. Used by unit testing.
   static bool ValidateIntervals(ArrayRef<LiveInterval* const> intervals,
                                 size_t number_of_spill_slots,
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index fdef45e..f722cf9 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -43,34 +43,37 @@
   }
 
   if (is_data_dependency) {
-    if (!HasImmediateDataDependency(node, dependency)) {
-      node->AddDataPredecessor(dependency);
-    }
-  } else if (!HasImmediateOtherDependency(node, dependency)) {
+    node->AddDataPredecessor(dependency);
+  } else {
     node->AddOtherPredecessor(dependency);
   }
 }
 
-static bool MayHaveReorderingDependency(SideEffects node, SideEffects other) {
+bool SideEffectDependencyAnalysis::HasReorderingDependency(const HInstruction* instr1,
+                                                           const HInstruction* instr2) {
+  SideEffects instr1_side_effects = instr1->GetSideEffects();
+  SideEffects instr2_side_effects = instr2->GetSideEffects();
+
   // Read after write.
-  if (node.MayDependOn(other)) {
+  if (instr1_side_effects.MayDependOn(instr2_side_effects)) {
     return true;
   }
 
   // Write after read.
-  if (other.MayDependOn(node)) {
+  if (instr2_side_effects.MayDependOn(instr1_side_effects)) {
     return true;
   }
 
   // Memory write after write.
-  if (node.DoesAnyWrite() && other.DoesAnyWrite()) {
+  if (instr1_side_effects.DoesAnyWrite() && instr2_side_effects.DoesAnyWrite()) {
     return true;
   }
 
   return false;
 }
 
-size_t SchedulingGraph::ArrayAccessHeapLocation(HInstruction* instruction) const {
+size_t SideEffectDependencyAnalysis::MemoryDependencyAnalysis::ArrayAccessHeapLocation(
+    HInstruction* instruction) const {
   DCHECK(heap_location_collector_ != nullptr);
   size_t heap_loc = heap_location_collector_->GetArrayHeapLocation(instruction);
   // This array access should be analyzed and added to HeapLocationCollector before.
@@ -78,19 +81,19 @@
   return heap_loc;
 }
 
-bool SchedulingGraph::ArrayAccessMayAlias(HInstruction* node,
-                                          HInstruction* other) const {
+bool SideEffectDependencyAnalysis::MemoryDependencyAnalysis::ArrayAccessMayAlias(
+    HInstruction* instr1, HInstruction* instr2) const {
   DCHECK(heap_location_collector_ != nullptr);
-  size_t node_heap_loc = ArrayAccessHeapLocation(node);
-  size_t other_heap_loc = ArrayAccessHeapLocation(other);
+  size_t instr1_heap_loc = ArrayAccessHeapLocation(instr1);
+  size_t instr2_heap_loc = ArrayAccessHeapLocation(instr2);
 
   // For example: arr[0] and arr[0]
-  if (node_heap_loc == other_heap_loc) {
+  if (instr1_heap_loc == instr2_heap_loc) {
     return true;
   }
 
   // For example: arr[0] and arr[i]
-  if (heap_location_collector_->MayAlias(node_heap_loc, other_heap_loc)) {
+  if (heap_location_collector_->MayAlias(instr1_heap_loc, instr2_heap_loc)) {
     return true;
   }
 
@@ -148,55 +151,55 @@
   }
 }
 
-size_t SchedulingGraph::FieldAccessHeapLocation(HInstruction* obj, const FieldInfo* field) const {
-  DCHECK(obj != nullptr);
-  DCHECK(field != nullptr);
+size_t SideEffectDependencyAnalysis::MemoryDependencyAnalysis::FieldAccessHeapLocation(
+    const HInstruction* instr) const {
+  DCHECK(instr != nullptr);
+  DCHECK(GetFieldInfo(instr) != nullptr);
   DCHECK(heap_location_collector_ != nullptr);
 
-  size_t heap_loc = heap_location_collector_->GetFieldHeapLocation(obj, field);
+  size_t heap_loc = heap_location_collector_->GetFieldHeapLocation(instr->InputAt(0),
+                                                                   GetFieldInfo(instr));
   // This field access should be analyzed and added to HeapLocationCollector before.
   DCHECK(heap_loc != HeapLocationCollector::kHeapLocationNotFound);
 
   return heap_loc;
 }
 
-bool SchedulingGraph::FieldAccessMayAlias(const HInstruction* node,
-                                          const HInstruction* other) const {
+bool SideEffectDependencyAnalysis::MemoryDependencyAnalysis::FieldAccessMayAlias(
+    const HInstruction* instr1, const HInstruction* instr2) const {
   DCHECK(heap_location_collector_ != nullptr);
 
   // Static and instance field accesses should not alias.
-  if ((IsInstanceFieldAccess(node) && IsStaticFieldAccess(other)) ||
-      (IsStaticFieldAccess(node) && IsInstanceFieldAccess(other))) {
+  if ((IsInstanceFieldAccess(instr1) && IsStaticFieldAccess(instr2)) ||
+      (IsStaticFieldAccess(instr1) && IsInstanceFieldAccess(instr2))) {
     return false;
   }
 
   // If either of the field accesses is unresolved.
-  if (IsUnresolvedFieldAccess(node) || IsUnresolvedFieldAccess(other)) {
+  if (IsUnresolvedFieldAccess(instr1) || IsUnresolvedFieldAccess(instr2)) {
     // Conservatively treat these two accesses may alias.
     return true;
   }
 
   // If both fields accesses are resolved.
-  const FieldInfo* node_field = GetFieldInfo(node);
-  const FieldInfo* other_field = GetFieldInfo(other);
+  size_t instr1_field_access_heap_loc = FieldAccessHeapLocation(instr1);
+  size_t instr2_field_access_heap_loc = FieldAccessHeapLocation(instr2);
 
-  size_t node_loc = FieldAccessHeapLocation(node->InputAt(0), node_field);
-  size_t other_loc = FieldAccessHeapLocation(other->InputAt(0), other_field);
-
-  if (node_loc == other_loc) {
+  if (instr1_field_access_heap_loc == instr2_field_access_heap_loc) {
     return true;
   }
 
-  if (!heap_location_collector_->MayAlias(node_loc, other_loc)) {
+  if (!heap_location_collector_->MayAlias(instr1_field_access_heap_loc,
+                                          instr2_field_access_heap_loc)) {
     return false;
   }
 
   return true;
 }
 
-bool SchedulingGraph::HasMemoryDependency(HInstruction* node,
-                                          HInstruction* other) const {
-  if (!MayHaveReorderingDependency(node->GetSideEffects(), other->GetSideEffects())) {
+bool SideEffectDependencyAnalysis::MemoryDependencyAnalysis::HasMemoryDependency(
+    HInstruction* instr1, HInstruction* instr2) const {
+  if (!HasReorderingDependency(instr1, instr2)) {
     return false;
   }
 
@@ -208,35 +211,35 @@
     return true;
   }
 
-  if (IsArrayAccess(node) && IsArrayAccess(other)) {
-    return ArrayAccessMayAlias(node, other);
+  if (IsArrayAccess(instr1) && IsArrayAccess(instr2)) {
+    return ArrayAccessMayAlias(instr1, instr2);
   }
-  if (IsFieldAccess(node) && IsFieldAccess(other)) {
-    return FieldAccessMayAlias(node, other);
+  if (IsFieldAccess(instr1) && IsFieldAccess(instr2)) {
+    return FieldAccessMayAlias(instr1, instr2);
   }
 
   // TODO(xueliang): LSA to support alias analysis among HVecLoad, HVecStore and ArrayAccess
-  if (node->IsVecMemoryOperation() && other->IsVecMemoryOperation()) {
+  if (instr1->IsVecMemoryOperation() && instr2->IsVecMemoryOperation()) {
     return true;
   }
-  if (node->IsVecMemoryOperation() && IsArrayAccess(other)) {
+  if (instr1->IsVecMemoryOperation() && IsArrayAccess(instr2)) {
     return true;
   }
-  if (IsArrayAccess(node) && other->IsVecMemoryOperation()) {
+  if (IsArrayAccess(instr1) && instr2->IsVecMemoryOperation()) {
     return true;
   }
 
   // Heap accesses of different kinds should not alias.
-  if (IsArrayAccess(node) && IsFieldAccess(other)) {
+  if (IsArrayAccess(instr1) && IsFieldAccess(instr2)) {
     return false;
   }
-  if (IsFieldAccess(node) && IsArrayAccess(other)) {
+  if (IsFieldAccess(instr1) && IsArrayAccess(instr2)) {
     return false;
   }
-  if (node->IsVecMemoryOperation() && IsFieldAccess(other)) {
+  if (instr1->IsVecMemoryOperation() && IsFieldAccess(instr2)) {
     return false;
   }
-  if (IsFieldAccess(node) && other->IsVecMemoryOperation()) {
+  if (IsFieldAccess(instr1) && instr2->IsVecMemoryOperation()) {
     return false;
   }
 
@@ -245,15 +248,15 @@
   return true;
 }
 
-bool SchedulingGraph::HasExceptionDependency(const HInstruction* node,
-                                             const HInstruction* other) const {
-  if (other->CanThrow() && node->GetSideEffects().DoesAnyWrite()) {
+bool SideEffectDependencyAnalysis::HasExceptionDependency(const HInstruction* instr1,
+                                                          const HInstruction* instr2) {
+  if (instr2->CanThrow() && instr1->GetSideEffects().DoesAnyWrite()) {
     return true;
   }
-  if (other->GetSideEffects().DoesAnyWrite() && node->CanThrow()) {
+  if (instr2->GetSideEffects().DoesAnyWrite() && instr1->CanThrow()) {
     return true;
   }
-  if (other->CanThrow() && node->CanThrow()) {
+  if (instr2->CanThrow() && instr1->CanThrow()) {
     return true;
   }
 
@@ -262,24 +265,6 @@
   return false;
 }
 
-// Check whether `node` depends on `other`, taking into account `SideEffect`
-// information and `CanThrow` information.
-bool SchedulingGraph::HasSideEffectDependency(HInstruction* node,
-                                              HInstruction* other) const {
-  if (HasMemoryDependency(node, other)) {
-    return true;
-  }
-
-  // Even if above memory dependency check has passed, it is still necessary to
-  // check dependencies between instructions that can throw and instructions
-  // that write to memory.
-  if (HasExceptionDependency(node, other)) {
-    return true;
-  }
-
-  return false;
-}
-
 // Check if the specified instruction is a better candidate which more likely will
 // have other instructions depending on it.
 static bool IsBetterCandidateWithMoreLikelyDependencies(HInstruction* new_candidate,
@@ -297,8 +282,39 @@
   }
 }
 
-void SchedulingGraph::AddDependencies(HInstruction* instruction, bool is_scheduling_barrier) {
-  SchedulingNode* instruction_node = GetNode(instruction);
+void SchedulingGraph::AddCrossIterationDependencies(SchedulingNode* node) {
+  for (HInstruction* instruction : node->GetInstruction()->GetInputs()) {
+    // Having a phi-function from a loop header as an input means the current node of the
+    // scheduling graph has a cross-iteration dependency because such phi-functions bring values
+    // from the previous iteration to the current iteration.
+    if (!instruction->IsLoopHeaderPhi()) {
+      continue;
+    }
+    for (HInstruction* phi_input : instruction->GetInputs()) {
+      // As a scheduling graph of the current basic block is built by
+      // processing instructions bottom-up, nullptr returned by GetNode means
+      // an instruction defining a value for the phi is either before the
+      // instruction represented by node or it is in a different basic block.
+      SchedulingNode* def_node = GetNode(phi_input);
+
+      // We don't create a dependency if there are uses besides the use in phi.
+      // In such cases a register to hold phi_input is usually allocated and
+      // a MOV instruction is generated. In cases with multiple uses and no MOV
+      // instruction, reordering creating a MOV instruction can improve
+      // performance more than an attempt to avoid a MOV instruction.
+      if (def_node != nullptr && def_node != node && phi_input->GetUses().HasExactlyOneElement()) {
+        // We have an implicit data dependency between node and def_node.
+        // AddAddDataDependency cannot be used because it is for explicit data dependencies.
+        // So AddOtherDependency is used.
+        AddOtherDependency(def_node, node);
+      }
+    }
+  }
+}
+
+void SchedulingGraph::AddDependencies(SchedulingNode* instruction_node,
+                                      bool is_scheduling_barrier) {
+  HInstruction* instruction = instruction_node->GetInstruction();
 
   // Define-use dependencies.
   for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
@@ -354,12 +370,16 @@
       if (other_node->IsSchedulingBarrier()) {
         // We have reached a scheduling barrier so we can stop further
         // processing.
-        DCHECK(HasImmediateOtherDependency(other_node, instruction_node));
+        //
+        // As a "other" dependency is not set up if a data dependency exists, we need to check that
+        // one of them must exist.
+        DCHECK(other_node->HasOtherDependency(instruction_node)
+               || other_node->HasDataDependency(instruction_node));
         break;
       }
-      if (HasSideEffectDependency(other, instruction)) {
+      if (side_effect_dependency_analysis_.HasSideEffectDependency(other, instruction)) {
         if (dep_chain_candidate != nullptr &&
-            HasSideEffectDependency(other, dep_chain_candidate)) {
+            side_effect_dependency_analysis_.HasSideEffectDependency(other, dep_chain_candidate)) {
           // Skip an explicit dependency to reduce memory usage, rely on the transitive dependency.
         } else {
           AddOtherDependency(other_node, instruction_node);
@@ -386,44 +406,8 @@
       AddOtherDependency(GetNode(use.GetUser()->GetHolder()), instruction_node);
     }
   }
-}
 
-bool SchedulingGraph::HasImmediateDataDependency(const SchedulingNode* node,
-                                                 const SchedulingNode* other) const {
-  return ContainsElement(node->GetDataPredecessors(), other);
-}
-
-bool SchedulingGraph::HasImmediateDataDependency(const HInstruction* instruction,
-                                                 const HInstruction* other_instruction) const {
-  const SchedulingNode* node = GetNode(instruction);
-  const SchedulingNode* other = GetNode(other_instruction);
-  if (node == nullptr || other == nullptr) {
-    // Both instructions must be in current basic block, i.e. the SchedulingGraph can see their
-    // corresponding SchedulingNode in the graph, and tell whether there is a dependency.
-    // Otherwise there is no dependency from SchedulingGraph's perspective, for example,
-    // instruction and other_instruction are in different basic blocks.
-    return false;
-  }
-  return HasImmediateDataDependency(node, other);
-}
-
-bool SchedulingGraph::HasImmediateOtherDependency(const SchedulingNode* node,
-                                                  const SchedulingNode* other) const {
-  return ContainsElement(node->GetOtherPredecessors(), other);
-}
-
-bool SchedulingGraph::HasImmediateOtherDependency(const HInstruction* instruction,
-                                                  const HInstruction* other_instruction) const {
-  const SchedulingNode* node = GetNode(instruction);
-  const SchedulingNode* other = GetNode(other_instruction);
-  if (node == nullptr || other == nullptr) {
-    // Both instructions must be in current basic block, i.e. the SchedulingGraph can see their
-    // corresponding SchedulingNode in the graph, and tell whether there is a dependency.
-    // Otherwise there is no dependency from SchedulingGraph's perspective, for example,
-    // instruction and other_instruction are in different basic blocks.
-    return false;
-  }
-  return HasImmediateOtherDependency(node, other);
+  AddCrossIterationDependencies(instruction_node);
 }
 
 static const std::string InstructionTypeId(const HInstruction* instruction) {
@@ -594,7 +578,7 @@
   ScopedArenaVector<SchedulingNode*> scheduling_nodes(allocator.Adapter(kArenaAllocScheduler));
 
   // Build the scheduling graph.
-  SchedulingGraph scheduling_graph(this, &allocator, heap_location_collector);
+  SchedulingGraph scheduling_graph(&allocator, heap_location_collector);
   for (HBackwardInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
     HInstruction* instruction = it.Current();
     CHECK_EQ(instruction->GetBlock(), block)
diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h
index d2dbeca..f7180a0 100644
--- a/compiler/optimizing/scheduler.h
+++ b/compiler/optimizing/scheduler.h
@@ -21,6 +21,7 @@
 
 #include "base/scoped_arena_allocator.h"
 #include "base/scoped_arena_containers.h"
+#include "base/stl_util.h"
 #include "base/time_utils.h"
 #include "code_generator.h"
 #include "load_store_analysis.h"
@@ -168,6 +169,10 @@
   }
 
   void AddDataPredecessor(SchedulingNode* predecessor) {
+    // Check whether the predecessor has been added earlier.
+    if (HasDataDependency(predecessor)) {
+      return;
+    }
     data_predecessors_.push_back(predecessor);
     predecessor->num_unscheduled_successors_++;
   }
@@ -177,6 +182,12 @@
   }
 
   void AddOtherPredecessor(SchedulingNode* predecessor) {
+    // Check whether the predecessor has been added earlier.
+    // As an optimization of the scheduling graph, we don't need to create another dependency if
+    // there is a data dependency between scheduling nodes.
+    if (HasOtherDependency(predecessor) || HasDataDependency(predecessor)) {
+      return;
+    }
     other_predecessors_.push_back(predecessor);
     predecessor->num_unscheduled_successors_++;
   }
@@ -205,6 +216,14 @@
   uint32_t GetCriticalPath() const { return critical_path_; }
   bool IsSchedulingBarrier() const { return is_scheduling_barrier_; }
 
+  bool HasDataDependency(const SchedulingNode* node) const {
+    return ContainsElement(data_predecessors_, node);
+  }
+
+  bool HasOtherDependency(const SchedulingNode* node) const {
+    return ContainsElement(other_predecessors_, node);
+  }
+
  private:
   // The latency of this node. It represents the latency between the moment the
   // last instruction for this node has executed to the moment the result
@@ -246,18 +265,67 @@
 };
 
 /*
+ * Provide analysis of instruction dependencies (side effects) which are not in a form of explicit
+ * def-use data dependencies.
+ */
+class SideEffectDependencyAnalysis {
+ public:
+  explicit SideEffectDependencyAnalysis(const HeapLocationCollector* heap_location_collector)
+      : memory_dependency_analysis_(heap_location_collector) {}
+
+  bool HasSideEffectDependency(HInstruction* instr1, HInstruction* instr2) const {
+    if (memory_dependency_analysis_.HasMemoryDependency(instr1, instr2)) {
+      return true;
+    }
+
+    // Even if above memory dependency check has passed, it is still necessary to
+    // check dependencies between instructions that can throw and instructions
+    // that write to memory.
+    if (HasExceptionDependency(instr1, instr2)) {
+      return true;
+    }
+
+    return false;
+  }
+
+ private:
+  static bool HasExceptionDependency(const HInstruction* instr1, const HInstruction* instr2);
+  static bool HasReorderingDependency(const HInstruction* instr1, const HInstruction* instr2);
+
+  /*
+   * Memory dependency analysis of instructions based on their memory side effects
+   * and heap location information from the LCA pass if it is provided.
+   */
+  class MemoryDependencyAnalysis {
+   public:
+    explicit MemoryDependencyAnalysis(const HeapLocationCollector* heap_location_collector)
+        : heap_location_collector_(heap_location_collector) {}
+
+    bool HasMemoryDependency(HInstruction* instr1, HInstruction* instr2) const;
+
+   private:
+    bool ArrayAccessMayAlias(HInstruction* instr1, HInstruction* instr2) const;
+    bool FieldAccessMayAlias(const HInstruction* instr1, const HInstruction* instr2) const;
+    size_t ArrayAccessHeapLocation(HInstruction* instruction) const;
+    size_t FieldAccessHeapLocation(const HInstruction* instruction) const;
+
+    const HeapLocationCollector* const heap_location_collector_;
+  };
+
+  MemoryDependencyAnalysis memory_dependency_analysis_;
+};
+
+/*
  * Directed acyclic graph for scheduling.
  */
 class SchedulingGraph : public ValueObject {
  public:
-  SchedulingGraph(const HScheduler* scheduler,
-                  ScopedArenaAllocator* allocator,
+  SchedulingGraph(ScopedArenaAllocator* allocator,
                   const HeapLocationCollector* heap_location_collector)
-      : scheduler_(scheduler),
-        allocator_(allocator),
+      : allocator_(allocator),
         contains_scheduling_barrier_(false),
         nodes_map_(allocator_->Adapter(kArenaAllocScheduler)),
-        heap_location_collector_(heap_location_collector) {}
+        side_effect_dependency_analysis_(heap_location_collector) {}
 
   SchedulingNode* AddNode(HInstruction* instr, bool is_scheduling_barrier = false) {
     std::unique_ptr<SchedulingNode> node(
@@ -265,7 +333,7 @@
     SchedulingNode* result = node.get();
     nodes_map_.insert(std::make_pair(instr, std::move(node)));
     contains_scheduling_barrier_ |= is_scheduling_barrier;
-    AddDependencies(instr, is_scheduling_barrier);
+    AddDependencies(result, is_scheduling_barrier);
     return result;
   }
 
@@ -278,13 +346,6 @@
     }
   }
 
-  bool IsSchedulingBarrier(const HInstruction* instruction) const;
-
-  bool HasImmediateDataDependency(const SchedulingNode* node, const SchedulingNode* other) const;
-  bool HasImmediateDataDependency(const HInstruction* node, const HInstruction* other) const;
-  bool HasImmediateOtherDependency(const SchedulingNode* node, const SchedulingNode* other) const;
-  bool HasImmediateOtherDependency(const HInstruction* node, const HInstruction* other) const;
-
   size_t Size() const {
     return nodes_map_.size();
   }
@@ -302,26 +363,33 @@
   void AddOtherDependency(SchedulingNode* node, SchedulingNode* dependency) {
     AddDependency(node, dependency, /*is_data_dependency*/false);
   }
-  bool HasMemoryDependency(HInstruction* node, HInstruction* other) const;
-  bool HasExceptionDependency(const HInstruction* node, const HInstruction* other) const;
-  bool HasSideEffectDependency(HInstruction* node, HInstruction* other) const;
-  bool ArrayAccessMayAlias(HInstruction* node, HInstruction* other) const;
-  bool FieldAccessMayAlias(const HInstruction* node, const HInstruction* other) const;
-  size_t ArrayAccessHeapLocation(HInstruction* instruction) const;
-  size_t FieldAccessHeapLocation(HInstruction* obj, const FieldInfo* field) const;
 
-  // Add dependencies nodes for the given `HInstruction`: inputs, environments, and side-effects.
-  void AddDependencies(HInstruction* instruction, bool is_scheduling_barrier = false);
+  // Analyze whether the scheduling node has cross-iteration dependencies which mean it uses
+  // values defined on the previous iteration.
+  //
+  // Supported cases:
+  //
+  //   L:
+  //     v2 = loop_head_phi(v1)
+  //     instr1(v2)
+  //     v1 = instr2
+  //     goto L
+  //
+  // In such cases moving instr2 before instr1 creates intersecting live ranges
+  // of v1 and v2. As a result a separate register is needed to keep the value
+  // defined by instr2 which is only used on the next iteration.
+  // If instr2 is not moved, no additional register is needed. The register
+  // used by instr1 is reused.
+  // To prevent such a situation a "other" dependency between instr1 and instr2 must be set.
+  void AddCrossIterationDependencies(SchedulingNode* node);
 
-  const HScheduler* const scheduler_;
+  // Add dependencies nodes for the given `SchedulingNode`: inputs, environments, and side-effects.
+  void AddDependencies(SchedulingNode* node, bool is_scheduling_barrier = false);
 
   ScopedArenaAllocator* const allocator_;
-
   bool contains_scheduling_barrier_;
-
   ScopedArenaHashMap<const HInstruction*, std::unique_ptr<SchedulingNode>> nodes_map_;
-
-  const HeapLocationCollector* const heap_location_collector_;
+  SideEffectDependencyAnalysis side_effect_dependency_analysis_;
 };
 
 /*
@@ -477,10 +545,6 @@
   DISALLOW_COPY_AND_ASSIGN(HScheduler);
 };
 
-inline bool SchedulingGraph::IsSchedulingBarrier(const HInstruction* instruction) const {
-  return scheduler_->IsSchedulingBarrier(instruction);
-}
-
 class HInstructionScheduling : public HOptimization {
  public:
   HInstructionScheduling(HGraph* graph,
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index e0e265a..7835b1d 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -54,12 +54,6 @@
 #ifdef ART_ENABLE_CODEGEN_x86_64
     CodegenTargetConfig(InstructionSet::kX86_64, create_codegen_x86_64),
 #endif
-#ifdef ART_ENABLE_CODEGEN_mips
-    CodegenTargetConfig(InstructionSet::kMips, create_codegen_mips),
-#endif
-#ifdef ART_ENABLE_CODEGEN_mips64
-    CodegenTargetConfig(InstructionSet::kMips64, create_codegen_mips64)
-#endif
   };
 
   for (const CodegenTargetConfig& test_config : test_config_candidates) {
@@ -146,9 +140,7 @@
     environment->SetRawEnvAt(1, mul);
     mul->AddEnvUseAt(div_check->GetEnvironment(), 1);
 
-    SchedulingGraph scheduling_graph(scheduler,
-                                     GetScopedAllocator(),
-                                     /* heap_location_collector= */ nullptr);
+    TestSchedulingGraph scheduling_graph(GetScopedAllocator());
     // Instructions must be inserted in reverse order into the scheduling graph.
     for (HInstruction* instr : ReverseRange(block_instructions)) {
       scheduling_graph.AddNode(instr);
@@ -283,7 +275,7 @@
     HeapLocationCollector heap_location_collector(graph_);
     heap_location_collector.VisitBasicBlock(entry);
     heap_location_collector.BuildAliasingMatrix();
-    SchedulingGraph scheduling_graph(scheduler, GetScopedAllocator(), &heap_location_collector);
+    TestSchedulingGraph scheduling_graph(GetScopedAllocator(), &heap_location_collector);
 
     for (HInstruction* instr : ReverseRange(block_instructions)) {
       // Build scheduling graph with memory access aliasing information
@@ -357,6 +349,41 @@
     scheduler->Schedule(graph_);
   }
 
+  class TestSchedulingGraph : public SchedulingGraph {
+   public:
+    explicit TestSchedulingGraph(ScopedArenaAllocator* allocator,
+                                 const HeapLocationCollector *heap_location_collector = nullptr)
+        : SchedulingGraph(allocator, heap_location_collector) {}
+
+    bool HasImmediateDataDependency(const HInstruction* instruction,
+                                    const HInstruction* other_instruction) const {
+      const SchedulingNode* node = GetNode(instruction);
+      const SchedulingNode* other = GetNode(other_instruction);
+      if (node == nullptr || other == nullptr) {
+        // Both instructions must be in current basic block, i.e. the SchedulingGraph can see their
+        // corresponding SchedulingNode in the graph, and tell whether there is a dependency.
+        // Otherwise there is no dependency from SchedulingGraph's perspective, for example,
+        // instruction and other_instruction are in different basic blocks.
+        return false;
+      }
+      return node->HasDataDependency(other);
+    }
+
+    bool HasImmediateOtherDependency(const HInstruction* instruction,
+                                     const HInstruction* other_instruction) const {
+      const SchedulingNode* node = GetNode(instruction);
+      const SchedulingNode* other = GetNode(other_instruction);
+      if (node == nullptr || other == nullptr) {
+        // Both instructions must be in current basic block, i.e. the SchedulingGraph can see their
+        // corresponding SchedulingNode in the graph, and tell whether there is a dependency.
+        // Otherwise there is no dependency from SchedulingGraph's perspective, for example,
+        // instruction and other_instruction are in different basic blocks.
+        return false;
+      }
+      return node->HasOtherDependency(other);
+    }
+  };
+
   HGraph* graph_;
 };
 
diff --git a/compiler/optimizing/select_generator_test.cc b/compiler/optimizing/select_generator_test.cc
index 6e65497..6e68c6c 100644
--- a/compiler/optimizing/select_generator_test.cc
+++ b/compiler/optimizing/select_generator_test.cc
@@ -25,6 +25,14 @@
 namespace art {
 
 class SelectGeneratorTest : public ImprovedOptimizingUnitTest {
+ private:
+  void CreateParameters() override {
+    parameters_.push_back(new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+                                                               dex::TypeIndex(0),
+                                                               0,
+                                                               DataType::Type::kInt32));
+  }
+
  public:
   void ConstructBasicGraphForSelect(HInstruction* instr) {
     HBasicBlock* if_block = new (GetAllocator()) HBasicBlock(graph_);
@@ -75,10 +83,10 @@
 // HDivZeroCheck might throw and should not be hoisted from the conditional to an unconditional.
 TEST_F(SelectGeneratorTest, testZeroCheck) {
   InitGraph();
-  HDivZeroCheck* instr = new (GetAllocator()) HDivZeroCheck(parameter_, 0);
+  HDivZeroCheck* instr = new (GetAllocator()) HDivZeroCheck(parameters_[0], 0);
   ConstructBasicGraphForSelect(instr);
 
-  ArenaVector<HInstruction*> current_locals({parameter_, graph_->GetIntConstant(1)},
+  ArenaVector<HInstruction*> current_locals({parameters_[0], graph_->GetIntConstant(1)},
                                             GetAllocator()->Adapter(kArenaAllocInstruction));
   ManuallyBuildEnvFor(instr, &current_locals);
 
@@ -88,7 +96,9 @@
 // Test that SelectGenerator succeeds with HAdd.
 TEST_F(SelectGeneratorTest, testAdd) {
   InitGraph();
-  HAdd* instr = new (GetAllocator()) HAdd(DataType::Type::kInt32, parameter_, parameter_, 0);
+  HAdd* instr = new (GetAllocator()) HAdd(DataType::Type::kInt32,
+                                          parameters_[0],
+                                          parameters_[0], 0);
   ConstructBasicGraphForSelect(instr);
   EXPECT_TRUE(CheckGraphAndTrySelectGenerator());
 }
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 8637db1..b8471e3 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -19,6 +19,7 @@
 #include "art_method-inl.h"
 #include "base/casts.h"
 #include "base/enums.h"
+#include "base/logging.h"
 #include "class_linker.h"
 #include "code_generator.h"
 #include "driver/compiler_options.h"
@@ -26,29 +27,29 @@
 #include "gc/heap.h"
 #include "gc/space/image_space.h"
 #include "handle_scope-inl.h"
+#include "jit/jit.h"
 #include "mirror/dex_cache.h"
 #include "mirror/string.h"
 #include "nodes.h"
 #include "runtime.h"
 #include "scoped_thread_state_change-inl.h"
-#include "utils/dex_cache_arrays_layout-inl.h"
 
 namespace art {
 
 static bool IsInBootImage(ArtMethod* method) {
-  const std::vector<gc::space::ImageSpace*>& image_spaces =
-      Runtime::Current()->GetHeap()->GetBootImageSpaces();
-  for (gc::space::ImageSpace* image_space : image_spaces) {
-    const ImageSection& method_section = image_space->GetImageHeader().GetMethodsSection();
-    if (method_section.Contains(reinterpret_cast<uint8_t*>(method) - image_space->Begin())) {
-      return true;
-    }
-  }
-  return false;
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  DCHECK_EQ(heap->IsBootImageAddress(method),
+            std::any_of(heap->GetBootImageSpaces().begin(),
+                        heap->GetBootImageSpaces().end(),
+                        [=](gc::space::ImageSpace* space) REQUIRES_SHARED(Locks::mutator_lock_) {
+                          return space->GetImageHeader().GetMethodsSection().Contains(
+                              reinterpret_cast<uint8_t*>(method) - space->Begin());
+                        }));
+  return heap->IsBootImageAddress(method);
 }
 
 static bool BootImageAOTCanEmbedMethod(ArtMethod* method, const CompilerOptions& compiler_options) {
-  DCHECK(compiler_options.IsBootImage());
+  DCHECK(compiler_options.IsBootImage() || compiler_options.IsBootImageExtension());
   ScopedObjectAccess soa(Thread::Current());
   ObjPtr<mirror::Class> klass = method->GetDeclaringClass();
   DCHECK(klass != nullptr);
@@ -86,10 +87,13 @@
     // Recursive call.
     method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kRecursive;
     code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallSelf;
-  } else if (compiler_options.IsBootImage()) {
+  } else if (compiler_options.IsBootImage() || compiler_options.IsBootImageExtension()) {
     if (!compiler_options.GetCompilePic()) {
       // Test configuration, do not sharpen.
       method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall;
+    } else if (IsInBootImage(callee)) {
+      DCHECK(compiler_options.IsBootImageExtension());
+      method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo;
     } else if (BootImageAOTCanEmbedMethod(callee, compiler_options)) {
       method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kBootImageLinkTimePcRelative;
     } else {
@@ -98,11 +102,18 @@
     }
     code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
   } else if (Runtime::Current()->UseJitCompilation()) {
-    // JIT or on-device AOT compilation referencing a boot image method.
-    // Use the method address directly.
-    method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress;
-    method_load_data = reinterpret_cast<uintptr_t>(callee);
-    code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
+    ScopedObjectAccess soa(Thread::Current());
+    if (Runtime::Current()->GetJit()->CanEncodeMethod(
+            callee,
+            codegen->GetGraph()->IsCompilingForSharedJitCode())) {
+      method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress;
+      method_load_data = reinterpret_cast<uintptr_t>(callee);
+      code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
+    } else {
+      // Do not sharpen.
+      method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kRuntimeCall;
+      code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
+    }
   } else if (IsInBootImage(callee)) {
     // Use PC-relative access to the .data.bimg.rel.ro methods array.
     method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kBootImageRelRo;
@@ -152,19 +163,22 @@
     HLoadClass::LoadKind desired_load_kind = HLoadClass::LoadKind::kInvalid;
     Runtime* runtime = Runtime::Current();
     const CompilerOptions& compiler_options = codegen->GetCompilerOptions();
-    if (compiler_options.IsBootImage()) {
-      // Compiling boot image. Check if the class is a boot image class.
+    if (compiler_options.IsBootImage() || compiler_options.IsBootImageExtension()) {
+      // Compiling boot image or boot image extension. Check if the class is a boot image class.
       DCHECK(!runtime->UseJitCompilation());
       if (!compiler_options.GetCompilePic()) {
         // Test configuration, do not sharpen.
         desired_load_kind = HLoadClass::LoadKind::kRuntimeCall;
+      } else if (klass != nullptr && runtime->GetHeap()->ObjectIsInBootImageSpace(klass.Get())) {
+        DCHECK(compiler_options.IsBootImageExtension());
+        is_in_boot_image = true;
+        desired_load_kind = HLoadClass::LoadKind::kBootImageRelRo;
       } else if ((klass != nullptr) &&
                  compiler_options.IsImageClass(dex_file.StringByTypeIdx(type_index))) {
         is_in_boot_image = true;
         desired_load_kind = HLoadClass::LoadKind::kBootImageLinkTimePcRelative;
       } else {
         // Not a boot image class.
-        DCHECK(ContainsElement(compiler_options.GetDexFilesForOatFile(), &dex_file));
         desired_load_kind = HLoadClass::LoadKind::kBssEntry;
       }
     } else {
@@ -175,7 +189,16 @@
         if (is_in_boot_image) {
           desired_load_kind = HLoadClass::LoadKind::kJitBootImageAddress;
         } else if (klass != nullptr) {
-          desired_load_kind = HLoadClass::LoadKind::kJitTableAddress;
+          if (runtime->GetJit()->CanEncodeClass(
+                  klass.Get(),
+                  codegen->GetGraph()->IsCompilingForSharedJitCode())) {
+            desired_load_kind = HLoadClass::LoadKind::kJitTableAddress;
+          } else {
+            // Shared JIT code cannot encode a literal that the GC can move.
+            VLOG(jit) << "Unable to encode in shared region class literal: "
+                      << klass->PrettyClass();
+            desired_load_kind = HLoadClass::LoadKind::kRuntimeCall;
+          }
         } else {
           // Class not loaded yet. This happens when the dex code requesting
           // this `HLoadClass` hasn't been executed in the interpreter.
@@ -299,12 +322,11 @@
     ObjPtr<mirror::String> string = nullptr;
 
     const CompilerOptions& compiler_options = codegen->GetCompilerOptions();
-    if (compiler_options.IsBootImage()) {
-      // Compiling boot image. Resolve the string and allocate it if needed, to ensure
-      // the string will be added to the boot image.
+    if (compiler_options.IsBootImage() || compiler_options.IsBootImageExtension()) {
+      // Compiling boot image or boot image extension. Resolve the string and allocate it
+      // if needed, to ensure the string will be added to the boot image.
       DCHECK(!runtime->UseJitCompilation());
       if (compiler_options.GetCompilePic()) {
-        DCHECK(ContainsElement(compiler_options.GetDexFilesForOatFile(), &dex_file));
         if (compiler_options.IsForceDeterminism()) {
           // Strings for methods we're compiling should be pre-resolved but Strings in inlined
           // methods may not be if these inlined methods are not in the boot image profile.
@@ -319,7 +341,12 @@
           CHECK(string != nullptr);
         }
         if (string != nullptr) {
-          desired_load_kind = HLoadString::LoadKind::kBootImageLinkTimePcRelative;
+          if (runtime->GetHeap()->ObjectIsInBootImageSpace(string)) {
+            DCHECK(compiler_options.IsBootImageExtension());
+            desired_load_kind = HLoadString::LoadKind::kBootImageRelRo;
+          } else {
+            desired_load_kind = HLoadString::LoadKind::kBootImageLinkTimePcRelative;
+          }
         } else {
           desired_load_kind = HLoadString::LoadKind::kBssEntry;
         }
@@ -331,10 +358,18 @@
       DCHECK(!codegen->GetCompilerOptions().GetCompilePic());
       string = class_linker->LookupString(string_index, dex_cache.Get());
       if (string != nullptr) {
-        if (runtime->GetHeap()->ObjectIsInBootImageSpace(string)) {
+        gc::Heap* heap = runtime->GetHeap();
+        if (heap->ObjectIsInBootImageSpace(string)) {
           desired_load_kind = HLoadString::LoadKind::kJitBootImageAddress;
-        } else {
+        } else if (runtime->GetJit()->CanEncodeString(
+                  string,
+                  codegen->GetGraph()->IsCompilingForSharedJitCode())) {
           desired_load_kind = HLoadString::LoadKind::kJitTableAddress;
+        } else {
+          // Shared JIT code cannot encode a literal that the GC can move.
+          VLOG(jit) << "Unable to encode in shared region string literal: "
+                    << string->ToModifiedUtf8();
+          desired_load_kind = HLoadString::LoadKind::kRuntimeCall;
         }
       } else {
         desired_load_kind = HLoadString::LoadKind::kRuntimeCall;
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 0d0e1ec..a5e8ff6 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -496,6 +496,22 @@
   }
 }
 
+static bool HasPhiEquivalentAtLoopEntry(HGraph* graph) {
+  // Phi equivalents for a dex register do not work with OSR, as the phis will
+  // receive two different stack slots but only one is recorded in the stack
+  // map.
+  for (HBasicBlock* block : graph->GetReversePostOrder()) {
+    if (block->IsLoopHeader()) {
+      for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
+        if (it.Current()->AsPhi()->HasEquivalentPhi()) {
+          return true;
+        }
+      }
+    }
+  }
+  return false;
+}
+
 GraphAnalysisResult SsaBuilder::BuildSsa() {
   DCHECK(!graph_->IsInSsaForm());
 
@@ -574,6 +590,10 @@
   // other optimizations.
   RemoveRedundantUninitializedStrings();
 
+  if (graph_->IsCompilingOsr() && HasPhiEquivalentAtLoopEntry(graph_)) {
+    return kAnalysisFailPhiEquivalentInOsr;
+  }
+
   graph_->SetInSsaForm();
   return kAnalysisSuccess;
 }
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index c883907..3ea2815 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -19,11 +19,11 @@
 
 #include <iostream>
 
+#include "base/intrusive_forward_list.h"
 #include "base/iteration_range.h"
 #include "base/scoped_arena_allocator.h"
 #include "base/scoped_arena_containers.h"
 #include "nodes.h"
-#include "utils/intrusive_forward_list.h"
 
 namespace art {
 
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 60ca61c..dd6d1a2 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -42,7 +42,8 @@
 void StackMapStream::BeginMethod(size_t frame_size_in_bytes,
                                  size_t core_spill_mask,
                                  size_t fp_spill_mask,
-                                 uint32_t num_dex_registers) {
+                                 uint32_t num_dex_registers,
+                                 bool baseline) {
   DCHECK(!in_method_) << "Mismatched Begin/End calls";
   in_method_ = true;
   DCHECK_EQ(packed_frame_size_, 0u) << "BeginMethod was already called";
@@ -52,6 +53,16 @@
   core_spill_mask_ = core_spill_mask;
   fp_spill_mask_ = fp_spill_mask;
   num_dex_registers_ = num_dex_registers;
+  baseline_ = baseline;
+
+  if (kVerifyStackMaps) {
+    dchecks_.emplace_back([=](const CodeInfo& code_info) {
+      DCHECK_EQ(code_info.packed_frame_size_, frame_size_in_bytes / kStackAlignment);
+      DCHECK_EQ(code_info.core_spill_mask_, core_spill_mask);
+      DCHECK_EQ(code_info.fp_spill_mask_, fp_spill_mask);
+      DCHECK_EQ(code_info.number_of_dex_registers_, num_dex_registers);
+    });
+  }
 }
 
 void StackMapStream::EndMethod() {
@@ -72,7 +83,8 @@
                                         uint32_t native_pc_offset,
                                         uint32_t register_mask,
                                         BitVector* stack_mask,
-                                        StackMap::Kind kind) {
+                                        StackMap::Kind kind,
+                                        bool needs_vreg_info) {
   DCHECK(in_method_) << "Call BeginMethod first";
   DCHECK(!in_stack_map_) << "Mismatched Begin/End calls";
   in_stack_map_ = true;
@@ -105,7 +117,7 @@
   lazy_stack_masks_.push_back(stack_mask);
   current_inline_infos_.clear();
   current_dex_registers_.clear();
-  expected_num_dex_registers_ = num_dex_registers_;
+  expected_num_dex_registers_ = needs_vreg_info  ? num_dex_registers_ : 0u;
 
   if (kVerifyStackMaps) {
     size_t stack_map_index = stack_maps_.size();
@@ -284,34 +296,39 @@
   }
 }
 
-template<typename Writer, typename Builder>
-ALWAYS_INLINE static void EncodeTable(Writer& out, const Builder& bit_table) {
-  out.WriteBit(false);  // Is not deduped.
-  bit_table.Encode(out);
-}
-
 ScopedArenaVector<uint8_t> StackMapStream::Encode() {
   DCHECK(in_stack_map_ == false) << "Mismatched Begin/End calls";
   DCHECK(in_inline_info_ == false) << "Mismatched Begin/End calls";
 
+  uint32_t flags = (inline_infos_.size() > 0) ? CodeInfo::kHasInlineInfo : 0;
+  flags |= baseline_ ? CodeInfo::kIsBaseline : 0;
+  uint32_t bit_table_flags = 0;
+  ForEachBitTable([&bit_table_flags](size_t i, auto bit_table) {
+    if (bit_table->size() != 0) {  // Record which bit-tables are stored.
+      bit_table_flags |= 1 << i;
+    }
+  });
+
   ScopedArenaVector<uint8_t> buffer(allocator_->Adapter(kArenaAllocStackMapStream));
   BitMemoryWriter<ScopedArenaVector<uint8_t>> out(&buffer);
-  out.WriteVarint(packed_frame_size_);
-  out.WriteVarint(core_spill_mask_);
-  out.WriteVarint(fp_spill_mask_);
-  out.WriteVarint(num_dex_registers_);
-  EncodeTable(out, stack_maps_);
-  EncodeTable(out, register_masks_);
-  EncodeTable(out, stack_masks_);
-  EncodeTable(out, inline_infos_);
-  EncodeTable(out, method_infos_);
-  EncodeTable(out, dex_register_masks_);
-  EncodeTable(out, dex_register_maps_);
-  EncodeTable(out, dex_register_catalog_);
+  out.WriteInterleavedVarints(std::array<uint32_t, CodeInfo::kNumHeaders>{
+    flags,
+    packed_frame_size_,
+    core_spill_mask_,
+    fp_spill_mask_,
+    num_dex_registers_,
+    bit_table_flags,
+  });
+  ForEachBitTable([&out](size_t, auto bit_table) {
+    if (bit_table->size() != 0) {  // Skip empty bit-tables.
+      bit_table->Encode(out);
+    }
+  });
 
   // Verify that we can load the CodeInfo and check some essentials.
-  CodeInfo code_info(buffer.data());
-  CHECK_EQ(code_info.Size(), buffer.size());
+  size_t number_of_read_bits;
+  CodeInfo code_info(buffer.data(), &number_of_read_bits);
+  CHECK_EQ(number_of_read_bits, out.NumberOfWrittenBits());
   CHECK_EQ(code_info.GetNumberOfStackMaps(), stack_maps_.size());
 
   // Verify all written data (usually only in debug builds).
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 01c6bf9..67f716c 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -40,10 +40,10 @@
       : allocator_(allocator),
         instruction_set_(instruction_set),
         stack_maps_(allocator),
-        inline_infos_(allocator),
-        method_infos_(allocator),
         register_masks_(allocator),
         stack_masks_(allocator),
+        inline_infos_(allocator),
+        method_infos_(allocator),
         dex_register_masks_(allocator),
         dex_register_maps_(allocator),
         dex_register_catalog_(allocator),
@@ -61,14 +61,16 @@
   void BeginMethod(size_t frame_size_in_bytes,
                    size_t core_spill_mask,
                    size_t fp_spill_mask,
-                   uint32_t num_dex_registers);
+                   uint32_t num_dex_registers,
+                   bool baseline = false);
   void EndMethod();
 
   void BeginStackMapEntry(uint32_t dex_pc,
                           uint32_t native_pc_offset,
                           uint32_t register_mask = 0,
                           BitVector* sp_mask = nullptr,
-                          StackMap::Kind kind = StackMap::Kind::Default);
+                          StackMap::Kind kind = StackMap::Kind::Default,
+                          bool needs_vreg_info = true);
   void EndStackMapEntry();
 
   void AddDexRegisterEntry(DexRegisterLocation::Kind kind, int32_t value) {
@@ -97,17 +99,33 @@
 
   void CreateDexRegisterMap();
 
+  // Invokes the callback with pointer of each BitTableBuilder field.
+  template<typename Callback>
+  void ForEachBitTable(Callback callback) {
+    size_t index = 0;
+    callback(index++, &stack_maps_);
+    callback(index++, &register_masks_);
+    callback(index++, &stack_masks_);
+    callback(index++, &inline_infos_);
+    callback(index++, &method_infos_);
+    callback(index++, &dex_register_masks_);
+    callback(index++, &dex_register_maps_);
+    callback(index++, &dex_register_catalog_);
+    CHECK_EQ(index, CodeInfo::kNumBitTables);
+  }
+
   ScopedArenaAllocator* allocator_;
   const InstructionSet instruction_set_;
   uint32_t packed_frame_size_ = 0;
   uint32_t core_spill_mask_ = 0;
   uint32_t fp_spill_mask_ = 0;
   uint32_t num_dex_registers_ = 0;
+  bool baseline_;
   BitTableBuilder<StackMap> stack_maps_;
-  BitTableBuilder<InlineInfo> inline_infos_;
-  BitTableBuilder<MethodInfo> method_infos_;
   BitTableBuilder<RegisterMask> register_masks_;
   BitmapTableBuilder stack_masks_;
+  BitTableBuilder<InlineInfo> inline_infos_;
+  BitTableBuilder<MethodInfo> method_infos_;
   BitmapTableBuilder dex_register_masks_;
   BitTableBuilder<DexRegisterMapInfo> dex_register_maps_;
   BitTableBuilder<DexRegisterInfo> dex_register_catalog_;
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index d28f09f..0dd5773 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -689,10 +689,6 @@
       StackMap::PackNativePc(kX86InstructionAlignment, InstructionSet::kX86);
   uint32_t packed_x86_64 =
       StackMap::PackNativePc(kX86_64InstructionAlignment, InstructionSet::kX86_64);
-  uint32_t packed_mips =
-      StackMap::PackNativePc(kMipsInstructionAlignment, InstructionSet::kMips);
-  uint32_t packed_mips64 =
-      StackMap::PackNativePc(kMips64InstructionAlignment, InstructionSet::kMips64);
   EXPECT_EQ(StackMap::UnpackNativePc(packed_thumb2, InstructionSet::kThumb2),
             kThumb2InstructionAlignment);
   EXPECT_EQ(StackMap::UnpackNativePc(packed_arm64, InstructionSet::kArm64),
@@ -701,10 +697,6 @@
             kX86InstructionAlignment);
   EXPECT_EQ(StackMap::UnpackNativePc(packed_x86_64, InstructionSet::kX86_64),
             kX86_64InstructionAlignment);
-  EXPECT_EQ(StackMap::UnpackNativePc(packed_mips, InstructionSet::kMips),
-            kMipsInstructionAlignment);
-  EXPECT_EQ(StackMap::UnpackNativePc(packed_mips64, InstructionSet::kMips64),
-            kMips64InstructionAlignment);
 }
 
 TEST(StackMapTest, TestDeduplicateStackMask) {
diff --git a/compiler/optimizing/superblock_cloner_test.cc b/compiler/optimizing/superblock_cloner_test.cc
index aa19de6..ddcf154 100644
--- a/compiler/optimizing/superblock_cloner_test.cc
+++ b/compiler/optimizing/superblock_cloner_test.cc
@@ -31,6 +31,14 @@
 // This class provides methods and helpers for testing various cloning and copying routines:
 // individual instruction cloning and cloning of the more coarse-grain structures.
 class SuperblockClonerTest : public ImprovedOptimizingUnitTest {
+ private:
+  void CreateParameters() override {
+    parameters_.push_back(new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+                                                               dex::TypeIndex(0),
+                                                               0,
+                                                               DataType::Type::kInt32));
+  }
+
  public:
   void CreateBasicLoopControlFlow(HBasicBlock* position,
                                   HBasicBlock* successor,
@@ -75,7 +83,7 @@
     loop_header->AddInstruction(new (GetAllocator()) HIf(loop_check));
 
     // Loop body block.
-    HInstruction* null_check = new (GetAllocator()) HNullCheck(parameter_, dex_pc);
+    HInstruction* null_check = new (GetAllocator()) HNullCheck(parameters_[0], dex_pc);
     HInstruction* array_length = new (GetAllocator()) HArrayLength(null_check, dex_pc);
     HInstruction* bounds_check = new (GetAllocator()) HBoundsCheck(phi, array_length, dex_pc);
     HInstruction* array_get =
@@ -100,7 +108,7 @@
     graph_->SetHasBoundsChecks(true);
 
     // Adjust HEnvironment for each instruction which require that.
-    ArenaVector<HInstruction*> current_locals({phi, const_128, parameter_},
+    ArenaVector<HInstruction*> current_locals({phi, const_128, parameters_[0]},
                                               GetAllocator()->Adapter(kArenaAllocInstruction));
 
     HEnvironment* env = ManuallyBuildEnvFor(suspend_check, &current_locals);
@@ -421,7 +429,7 @@
   if_block->AddSuccessor(temp1);
   temp1->AddSuccessor(header);
 
-  if_block->AddInstruction(new (GetAllocator()) HIf(parameter_));
+  if_block->AddInstruction(new (GetAllocator()) HIf(parameters_[0]));
 
   HInstructionIterator it(header->GetPhis());
   DCHECK(!it.Done());
@@ -586,7 +594,7 @@
   // Change the loop3 - insert an exit which leads to loop1.
   HBasicBlock* loop3_extra_if_block = new (GetAllocator()) HBasicBlock(graph_);
   graph_->AddBlock(loop3_extra_if_block);
-  loop3_extra_if_block->AddInstruction(new (GetAllocator()) HIf(parameter_));
+  loop3_extra_if_block->AddInstruction(new (GetAllocator()) HIf(parameters_[0]));
 
   loop3_header->ReplaceSuccessor(loop_body3, loop3_extra_if_block);
   loop3_extra_if_block->AddSuccessor(loop_body1);  // Long exit.
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index 26aa434..0aaeaa5 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -28,14 +28,6 @@
 #include "utils/arm64/assembler_arm64.h"
 #endif
 
-#ifdef ART_ENABLE_CODEGEN_mips
-#include "utils/mips/assembler_mips.h"
-#endif
-
-#ifdef ART_ENABLE_CODEGEN_mips64
-#include "utils/mips64/assembler_mips64.h"
-#endif
-
 #ifdef ART_ENABLE_CODEGEN_x86
 #include "utils/x86/assembler_x86.h"
 #endif
@@ -135,70 +127,6 @@
 }  // namespace arm64
 #endif  // ART_ENABLE_CODEGEN_arm64
 
-#ifdef ART_ENABLE_CODEGEN_mips
-namespace mips {
-static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
-    ArenaAllocator* allocator, EntryPointCallingConvention abi, ThreadOffset32 offset) {
-  MipsAssembler assembler(allocator);
-
-  switch (abi) {
-    case kInterpreterAbi:  // Thread* is first argument (A0) in interpreter ABI.
-      __ LoadFromOffset(kLoadWord, T9, A0, offset.Int32Value());
-      break;
-    case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (A0).
-      __ LoadFromOffset(kLoadWord, T9, A0, JNIEnvExt::SelfOffset(4).Int32Value());
-      __ LoadFromOffset(kLoadWord, T9, T9, offset.Int32Value());
-      break;
-    case kQuickAbi:  // S1 holds Thread*.
-      __ LoadFromOffset(kLoadWord, T9, S1, offset.Int32Value());
-  }
-  __ Jr(T9);
-  __ NopIfNoReordering();
-  __ Break();
-
-  __ FinalizeCode();
-  size_t cs = __ CodeSize();
-  std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
-  MemoryRegion code(entry_stub->data(), entry_stub->size());
-  __ FinalizeInstructions(code);
-
-  return std::move(entry_stub);
-}
-}  // namespace mips
-#endif  // ART_ENABLE_CODEGEN_mips
-
-#ifdef ART_ENABLE_CODEGEN_mips64
-namespace mips64 {
-static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
-    ArenaAllocator* allocator, EntryPointCallingConvention abi, ThreadOffset64 offset) {
-  Mips64Assembler assembler(allocator);
-
-  switch (abi) {
-    case kInterpreterAbi:  // Thread* is first argument (A0) in interpreter ABI.
-      __ LoadFromOffset(kLoadDoubleword, T9, A0, offset.Int32Value());
-      break;
-    case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (A0).
-      __ LoadFromOffset(kLoadDoubleword, T9, A0, JNIEnvExt::SelfOffset(8).Int32Value());
-      __ LoadFromOffset(kLoadDoubleword, T9, T9, offset.Int32Value());
-      break;
-    case kQuickAbi:  // Fall-through.
-      __ LoadFromOffset(kLoadDoubleword, T9, S1, offset.Int32Value());
-  }
-  __ Jr(T9);
-  __ Nop();
-  __ Break();
-
-  __ FinalizeCode();
-  size_t cs = __ CodeSize();
-  std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
-  MemoryRegion code(entry_stub->data(), entry_stub->size());
-  __ FinalizeInstructions(code);
-
-  return std::move(entry_stub);
-}
-}  // namespace mips64
-#endif  // ART_ENABLE_CODEGEN_mips
-
 #ifdef ART_ENABLE_CODEGEN_x86
 namespace x86 {
 static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocator* allocator,
@@ -251,10 +179,6 @@
     case InstructionSet::kArm64:
       return arm64::CreateTrampoline(&allocator, abi, offset);
 #endif
-#ifdef ART_ENABLE_CODEGEN_mips64
-    case InstructionSet::kMips64:
-      return mips64::CreateTrampoline(&allocator, abi, offset);
-#endif
 #ifdef ART_ENABLE_CODEGEN_x86_64
     case InstructionSet::kX86_64:
       return x86_64::CreateTrampoline(&allocator, offset);
@@ -278,10 +202,6 @@
     case InstructionSet::kThumb2:
       return arm::CreateTrampoline(&allocator, abi, offset);
 #endif
-#ifdef ART_ENABLE_CODEGEN_mips
-    case InstructionSet::kMips:
-      return mips::CreateTrampoline(&allocator, abi, offset);
-#endif
 #ifdef ART_ENABLE_CODEGEN_x86
     case InstructionSet::kX86:
       UNUSED(abi);
diff --git a/compiler/utils/arm/assembler_arm_vixl.h b/compiler/utils/arm/assembler_arm_vixl.h
index 98c0191..59d7edd 100644
--- a/compiler/utils/arm/assembler_arm_vixl.h
+++ b/compiler/utils/arm/assembler_arm_vixl.h
@@ -22,6 +22,7 @@
 #include "base/arena_containers.h"
 #include "base/macros.h"
 #include "constants_arm.h"
+#include "dwarf/register.h"
 #include "offsets.h"
 #include "utils/arm/assembler_arm_shared.h"
 #include "utils/arm/managed_register_arm.h"
@@ -39,6 +40,14 @@
 namespace art {
 namespace arm {
 
+inline dwarf::Reg DWARFReg(vixl32::Register reg) {
+  return dwarf::Reg::ArmCore(static_cast<int>(reg.GetCode()));
+}
+
+inline dwarf::Reg DWARFReg(vixl32::SRegister reg) {
+  return dwarf::Reg::ArmFp(static_cast<int>(reg.GetCode()));
+}
+
 class ArmVIXLMacroAssembler final : public vixl32::MacroAssembler {
  public:
   // Most methods fit in a 1KB code buffer, which results in more optimal alloc/realloc and
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
index c6c764e..ffb58ac 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -37,6 +37,10 @@
 #define ___   asm_.GetVIXLAssembler()->
 #endif
 
+// The AAPCS requires 8-byte alignement. This is not as strict as the Managed ABI stack alignment.
+static constexpr size_t kAapcsStackAlignment = 8u;
+static_assert(kAapcsStackAlignment < kStackAlignment);
+
 vixl::aarch32::Register AsVIXLRegister(ArmManagedRegister reg) {
   CHECK(reg.IsCoreRegister());
   return vixl::aarch32::Register(reg.RegId());
@@ -68,25 +72,22 @@
   asm_.FinalizeCode();
 }
 
-static dwarf::Reg DWARFReg(vixl32::Register reg) {
-  return dwarf::Reg::ArmCore(static_cast<int>(reg.GetCode()));
-}
-
-static dwarf::Reg DWARFReg(vixl32::SRegister reg) {
-  return dwarf::Reg::ArmFp(static_cast<int>(reg.GetCode()));
-}
-
 static constexpr size_t kFramePointerSize = static_cast<size_t>(kArmPointerSize);
 
 void ArmVIXLJNIMacroAssembler::BuildFrame(size_t frame_size,
                                           ManagedRegister method_reg,
                                           ArrayRef<const ManagedRegister> callee_save_regs,
                                           const ManagedRegisterEntrySpills& entry_spills) {
-  CHECK_ALIGNED(frame_size, kStackAlignment);
-  CHECK(r0.Is(AsVIXLRegister(method_reg.AsArm())));
+  // If we're creating an actual frame with the method, enforce managed stack alignment,
+  // otherwise only the native stack alignment.
+  if (method_reg.IsNoRegister()) {
+    CHECK_ALIGNED_PARAM(frame_size, kAapcsStackAlignment);
+  } else {
+    CHECK_ALIGNED_PARAM(frame_size, kStackAlignment);
+  }
 
   // Push callee saves and link register.
-  RegList core_spill_mask = 1 << LR;
+  RegList core_spill_mask = 0;
   uint32_t fp_spill_mask = 0;
   for (const ManagedRegister& reg : callee_save_regs) {
     if (reg.AsArm().IsCoreRegister()) {
@@ -95,9 +96,11 @@
       fp_spill_mask |= 1 << reg.AsArm().AsSRegister();
     }
   }
-  ___ Push(RegisterList(core_spill_mask));
-  cfi().AdjustCFAOffset(POPCOUNT(core_spill_mask) * kFramePointerSize);
-  cfi().RelOffsetForMany(DWARFReg(r0), 0, core_spill_mask, kFramePointerSize);
+  if (core_spill_mask != 0u) {
+    ___ Push(RegisterList(core_spill_mask));
+    cfi().AdjustCFAOffset(POPCOUNT(core_spill_mask) * kFramePointerSize);
+    cfi().RelOffsetForMany(DWARFReg(r0), 0, core_spill_mask, kFramePointerSize);
+  }
   if (fp_spill_mask != 0) {
     uint32_t first = CTZ(fp_spill_mask);
 
@@ -111,12 +114,15 @@
 
   // Increase frame to required size.
   int pushed_values = POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask);
-  // Must at least have space for Method*.
-  CHECK_GT(frame_size, pushed_values * kFramePointerSize);
+  // Must at least have space for Method* if we're going to spill it.
+  CHECK_GE(frame_size, (pushed_values + (method_reg.IsRegister() ? 1u : 0u)) * kFramePointerSize);
   IncreaseFrameSize(frame_size - pushed_values * kFramePointerSize);  // handles CFI as well.
 
-  // Write out Method*.
-  asm_.StoreToOffset(kStoreWord, r0, sp, 0);
+  if (method_reg.IsRegister()) {
+    // Write out Method*.
+    CHECK(r0.Is(AsVIXLRegister(method_reg.AsArm())));
+    asm_.StoreToOffset(kStoreWord, r0, sp, 0);
+  }
 
   // Write out entry spills.
   int32_t offset = frame_size + kFramePointerSize;
@@ -141,27 +147,27 @@
 void ArmVIXLJNIMacroAssembler::RemoveFrame(size_t frame_size,
                                            ArrayRef<const ManagedRegister> callee_save_regs,
                                            bool may_suspend) {
-  CHECK_ALIGNED(frame_size, kStackAlignment);
+  CHECK_ALIGNED(frame_size, kAapcsStackAlignment);
   cfi().RememberState();
 
-  // Compute callee saves to pop and LR.
-  RegList core_spill_mask = 1 << LR;
-  uint32_t fp_spill_mask = 0;
+  // Compute callee saves to pop.
+  RegList core_spill_mask = 0u;
+  uint32_t fp_spill_mask = 0u;
   for (const ManagedRegister& reg : callee_save_regs) {
     if (reg.AsArm().IsCoreRegister()) {
-      core_spill_mask |= 1 << reg.AsArm().AsCoreRegister();
+      core_spill_mask |= 1u << reg.AsArm().AsCoreRegister();
     } else {
-      fp_spill_mask |= 1 << reg.AsArm().AsSRegister();
+      fp_spill_mask |= 1u << reg.AsArm().AsSRegister();
     }
   }
 
   // Decrease frame to start of callee saves.
-  int pop_values = POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask);
-  CHECK_GT(frame_size, pop_values * kFramePointerSize);
+  size_t pop_values = POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask);
+  CHECK_GE(frame_size, pop_values * kFramePointerSize);
   DecreaseFrameSize(frame_size - (pop_values * kFramePointerSize));  // handles CFI as well.
 
   // Pop FP callee saves.
-  if (fp_spill_mask != 0) {
+  if (fp_spill_mask != 0u) {
     uint32_t first = CTZ(fp_spill_mask);
     // Check that list is contiguous.
      DCHECK_EQ(fp_spill_mask >> CTZ(fp_spill_mask), ~0u >> (32 - POPCOUNT(fp_spill_mask)));
@@ -172,7 +178,9 @@
   }
 
   // Pop core callee saves and LR.
-  ___ Pop(RegisterList(core_spill_mask));
+  if (core_spill_mask != 0u) {
+    ___ Pop(RegisterList(core_spill_mask));
+  }
 
   if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
     if (may_suspend) {
@@ -181,11 +189,8 @@
     } else {
       // The method shall not be suspended; no need to refresh the Marking Register.
 
-      // Check that the Marking Register is a callee-save register,
-      // and thus has been preserved by native code following the
-      // AAPCS calling convention.
-      DCHECK_NE(core_spill_mask & (1 << MR), 0)
-          << "core_spill_mask should contain Marking Register R" << MR;
+      // The Marking Register is a callee-save register, and thus has been
+      // preserved by native code following the AAPCS calling convention.
 
       // The following condition is a compile-time one, so it does not have a run-time cost.
       if (kIsDebugBuild) {
@@ -214,13 +219,17 @@
 
 
 void ArmVIXLJNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
-  asm_.AddConstant(sp, -adjust);
-  cfi().AdjustCFAOffset(adjust);
+  if (adjust != 0u) {
+    asm_.AddConstant(sp, -adjust);
+    cfi().AdjustCFAOffset(adjust);
+  }
 }
 
 void ArmVIXLJNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
-  asm_.AddConstant(sp, adjust);
-  cfi().AdjustCFAOffset(-adjust);
+  if (adjust != 0u) {
+    asm_.AddConstant(sp, adjust);
+    cfi().AdjustCFAOffset(-adjust);
+  }
 }
 
 void ArmVIXLJNIMacroAssembler::Store(FrameOffset dest, ManagedRegister m_src, size_t size) {
@@ -570,6 +579,17 @@
   // TODO: not validating references.
 }
 
+void ArmVIXLJNIMacroAssembler::Jump(ManagedRegister mbase,
+                                    Offset offset,
+                                    ManagedRegister mscratch) {
+  vixl::aarch32::Register base = AsVIXLRegister(mbase.AsArm());
+  vixl::aarch32::Register scratch = AsVIXLRegister(mscratch.AsArm());
+  UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
+  temps.Exclude(scratch);
+  asm_.LoadFromOffset(kLoadWord, scratch, base, offset.Int32Value());
+  ___ Bx(scratch);
+}
+
 void ArmVIXLJNIMacroAssembler::Call(ManagedRegister mbase,
                                     Offset offset,
                                     ManagedRegister mscratch) {
@@ -610,7 +630,7 @@
 }
 
 void ArmVIXLJNIMacroAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
-  CHECK_ALIGNED(stack_adjust, kStackAlignment);
+  CHECK_ALIGNED(stack_adjust, kAapcsStackAlignment);
   vixl::aarch32::Register scratch = AsVIXLRegister(mscratch.AsArm());
   UseScratchRegisterScope temps(asm_.GetVIXLAssembler());
   temps.Exclude(scratch);
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
index 0b1b6d2..1724671 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
@@ -181,6 +181,9 @@
   void VerifyObject(ManagedRegister src, bool could_be_null) override;
   void VerifyObject(FrameOffset src, bool could_be_null) override;
 
+  // Jump to address held at [base+offset] (used for tail calls).
+  void Jump(ManagedRegister base, Offset offset, ManagedRegister scratch) override;
+
   // Call to address held at [base+offset].
   void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) override;
   void Call(FrameOffset base, Offset offset, ManagedRegister scratch) override;
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index d7ade05..d722e00 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -49,6 +49,7 @@
   }
   if (art_features->HasFP16()) {
     features->Combine(vixl::CPUFeatures::kFPHalf);
+    features->Combine(vixl::CPUFeatures::kNEONHalf);
   }
   if (art_features->HasLSE()) {
     features->Combine(vixl::CPUFeatures::kAtomics);
@@ -103,15 +104,6 @@
   ___ Br(reg_x(scratch.AsXRegister()));
 }
 
-static inline dwarf::Reg DWARFReg(CPURegister reg) {
-  if (reg.IsFPRegister()) {
-    return dwarf::Reg::Arm64Fp(reg.GetCode());
-  } else {
-    DCHECK_LT(reg.GetCode(), 31u);  // X0 - X30.
-    return dwarf::Reg::Arm64Core(reg.GetCode());
-  }
-}
-
 void Arm64Assembler::SpillRegisters(CPURegList registers, int offset) {
   int size = registers.GetRegisterSizeInBytes();
   const Register sp = vixl_masm_.StackPointer();
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 9e01a70..fe2f176 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -25,6 +25,7 @@
 
 #include "base/arena_containers.h"
 #include "base/macros.h"
+#include "dwarf/register.h"
 #include "offsets.h"
 #include "utils/arm64/managed_register_arm64.h"
 #include "utils/assembler.h"
@@ -42,6 +43,15 @@
 
 namespace arm64 {
 
+static inline dwarf::Reg DWARFReg(vixl::aarch64::CPURegister reg) {
+  if (reg.IsFPRegister()) {
+    return dwarf::Reg::Arm64Fp(reg.GetCode());
+  } else {
+    DCHECK_LT(reg.GetCode(), 31u);  // X0 - X30.
+    return dwarf::Reg::Arm64Core(reg.GetCode());
+  }
+}
+
 #define MEM_OP(...)      vixl::aarch64::MemOperand(__VA_ARGS__)
 
 enum LoadOperandType {
@@ -140,12 +150,12 @@
     return vixl::aarch64::Register::GetWRegFromCode(code);
   }
 
-  static vixl::aarch64::FPRegister reg_d(int code) {
-    return vixl::aarch64::FPRegister::GetDRegFromCode(code);
+  static vixl::aarch64::VRegister reg_d(int code) {
+    return vixl::aarch64::VRegister::GetDRegFromCode(code);
   }
 
-  static vixl::aarch64::FPRegister reg_s(int code) {
-    return vixl::aarch64::FPRegister::GetSRegFromCode(code);
+  static vixl::aarch64::VRegister reg_s(int code) {
+    return vixl::aarch64::VRegister::GetSRegFromCode(code);
   }
 
  private:
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.cc b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
index d6ce033..5b46971 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.cc
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
@@ -37,6 +37,10 @@
 #define reg_d(D) Arm64Assembler::reg_d(D)
 #define reg_s(S) Arm64Assembler::reg_s(S)
 
+// The AAPCS64 requires 16-byte alignement. This is the same as the Managed ABI stack alignment.
+static constexpr size_t kAapcs64StackAlignment = 16u;
+static_assert(kAapcs64StackAlignment == kStackAlignment);
+
 Arm64JNIMacroAssembler::~Arm64JNIMacroAssembler() {
 }
 
@@ -57,16 +61,20 @@
 
 // See Arm64 PCS Section 5.2.2.1.
 void Arm64JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
-  CHECK_ALIGNED(adjust, kStackAlignment);
-  AddConstant(SP, -adjust);
-  cfi().AdjustCFAOffset(adjust);
+  if (adjust != 0u) {
+    CHECK_ALIGNED(adjust, kStackAlignment);
+    AddConstant(SP, -adjust);
+    cfi().AdjustCFAOffset(adjust);
+  }
 }
 
 // See Arm64 PCS Section 5.2.2.1.
 void Arm64JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
-  CHECK_ALIGNED(adjust, kStackAlignment);
-  AddConstant(SP, adjust);
-  cfi().AdjustCFAOffset(-adjust);
+  if (adjust != 0u) {
+    CHECK_ALIGNED(adjust, kStackAlignment);
+    AddConstant(SP, adjust);
+    cfi().AdjustCFAOffset(-adjust);
+  }
 }
 
 void Arm64JNIMacroAssembler::AddConstant(XRegister rd, int32_t value, Condition cond) {
@@ -531,6 +539,15 @@
   // TODO: not validating references.
 }
 
+void Arm64JNIMacroAssembler::Jump(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
+  Arm64ManagedRegister base = m_base.AsArm64();
+  Arm64ManagedRegister scratch = m_scratch.AsArm64();
+  CHECK(base.IsXRegister()) << base;
+  CHECK(scratch.IsXRegister()) << scratch;
+  LoadFromOffset(scratch.AsXRegister(), base.AsXRegister(), offs.Int32Value());
+  ___ Br(reg_x(scratch.AsXRegister()));
+}
+
 void Arm64JNIMacroAssembler::Call(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
   Arm64ManagedRegister base = m_base.AsArm64();
   Arm64ManagedRegister scratch = m_scratch.AsArm64();
@@ -689,7 +706,7 @@
                                         const ManagedRegisterEntrySpills& entry_spills) {
   // Setup VIXL CPURegList for callee-saves.
   CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
-  CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
+  CPURegList fp_reg_list(CPURegister::kVRegister, kDRegSize, 0);
   for (auto r : callee_save_regs) {
     Arm64ManagedRegister reg = r.AsArm64();
     if (reg.IsXRegister()) {
@@ -704,18 +721,20 @@
 
   // Increase frame to required size.
   DCHECK_ALIGNED(frame_size, kStackAlignment);
-  DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
+  // Must at least have space for Method* if we're going to spill it.
+  DCHECK_GE(frame_size,
+            core_reg_size + fp_reg_size + (method_reg.IsRegister() ? kXRegSizeInBytes : 0u));
   IncreaseFrameSize(frame_size);
 
   // Save callee-saves.
   asm_.SpillRegisters(core_reg_list, frame_size - core_reg_size);
   asm_.SpillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
 
-  DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
-
-  // Write ArtMethod*
-  DCHECK(X0 == method_reg.AsArm64().AsXRegister());
-  StoreToOffset(X0, SP, 0);
+  if (method_reg.IsRegister()) {
+    // Write ArtMethod*
+    DCHECK(X0 == method_reg.AsArm64().AsXRegister());
+    StoreToOffset(X0, SP, 0);
+  }
 
   // Write out entry spills
   int32_t offset = frame_size + static_cast<size_t>(kArm64PointerSize);
@@ -745,7 +764,7 @@
                                          bool may_suspend) {
   // Setup VIXL CPURegList for callee-saves.
   CPURegList core_reg_list(CPURegister::kRegister, kXRegSize, 0);
-  CPURegList fp_reg_list(CPURegister::kFPRegister, kDRegSize, 0);
+  CPURegList fp_reg_list(CPURegister::kVRegister, kDRegSize, 0);
   for (auto r : callee_save_regs) {
     Arm64ManagedRegister reg = r.AsArm64();
     if (reg.IsXRegister()) {
@@ -760,10 +779,8 @@
 
   // For now we only check that the size of the frame is large enough to hold spills and method
   // reference.
-  DCHECK_GE(frame_size, core_reg_size + fp_reg_size + static_cast<size_t>(kArm64PointerSize));
-  DCHECK_ALIGNED(frame_size, kStackAlignment);
-
-  DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
+  DCHECK_GE(frame_size, core_reg_size + fp_reg_size);
+  DCHECK_ALIGNED(frame_size, kAapcs64StackAlignment);
 
   cfi().RememberState();
 
@@ -781,11 +798,8 @@
     } else {
       // The method shall not be suspended; no need to refresh the Marking Register.
 
-      // Check that the Marking Register is a callee-save register,
-      // and thus has been preserved by native code following the
-      // AAPCS64 calling convention.
-      DCHECK(core_reg_list.IncludesAliasOf(mr))
-          << "core_reg_list should contain Marking Register X" << mr.GetCode();
+      // The Marking Register is a callee-save register and thus has been
+      // preserved by native code following the AAPCS64 calling convention.
 
       // The following condition is a compile-time one, so it does not have a run-time cost.
       if (kIsDebugBuild) {
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.h b/compiler/utils/arm64/jni_macro_assembler_arm64.h
index 45316ed..54592a3 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.h
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.h
@@ -162,6 +162,9 @@
   void VerifyObject(ManagedRegister src, bool could_be_null) override;
   void VerifyObject(FrameOffset src, bool could_be_null) override;
 
+  // Jump to address held at [base+offset] (used for tail calls).
+  void Jump(ManagedRegister base, Offset offset, ManagedRegister scratch) override;
+
   // Call to address held at [base+offset].
   void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) override;
   void Call(FrameOffset base, Offset offset, ManagedRegister scratch) override;
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index aa21f86..0744aec2 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -33,7 +33,6 @@
 #include "dwarf/debug_frame_opcode_writer.h"
 #include "label.h"
 #include "managed_register.h"
-#include "mips/constants_mips.h"
 #include "offsets.h"
 #include "x86/constants_x86.h"
 #include "x86_64/constants_x86_64.h"
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index 842716f..6475607 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -76,7 +76,7 @@
   "  f0:	f1bc 0f00 	cmp.w	ip, #0\n",
   "  f4:	bf18      	it	ne\n",
   "  f6:	f20d 4c01 	addwne	ip, sp, #1025	; 0x401\n",
-  "  fa:	f8d9 c09c 	ldr.w	ip, [r9, #156]	; 0x9c\n",
+  "  fa:	f8d9 c0a4 	ldr.w	ip, [r9, #164]	; 0xa4\n",
   "  fe:	f1bc 0f00 	cmp.w	ip, #0\n",
   " 102:	d171      	bne.n	1e8 <VixlJniHelpers+0x1e8>\n",
   " 104:	f8cd c7ff 	str.w	ip, [sp, #2047]	; 0x7ff\n",
@@ -153,7 +153,7 @@
   " 21c:	f8d9 8034 	ldr.w	r8, [r9, #52]	; 0x34\n",
   " 220:	4770      	bx	lr\n",
   " 222:	4660      	mov	r0, ip\n",
-  " 224:	f8d9 c2e4 	ldr.w	ip, [r9, #740]	; 0x2e4\n",
+  " 224:	f8d9 c2e8 	ldr.w	ip, [r9, #744]	; 0x2e8\n",
   " 228:	47e0      	blx	ip\n",
   nullptr
 };
diff --git a/compiler/utils/intrusive_forward_list.h b/compiler/utils/intrusive_forward_list.h
deleted file mode 100644
index ccdd32a..0000000
--- a/compiler/utils/intrusive_forward_list.h
+++ /dev/null
@@ -1,477 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_UTILS_INTRUSIVE_FORWARD_LIST_H_
-#define ART_COMPILER_UTILS_INTRUSIVE_FORWARD_LIST_H_
-
-#include <stdint.h>
-#include <functional>
-#include <iterator>
-#include <memory>
-#include <type_traits>
-
-#include <android-base/logging.h>
-
-#include "base/casts.h"
-#include "base/macros.h"
-
-namespace art {
-
-struct IntrusiveForwardListHook {
-  IntrusiveForwardListHook() : next_hook(nullptr) { }
-  explicit IntrusiveForwardListHook(const IntrusiveForwardListHook* hook) : next_hook(hook) { }
-
-  // Allow copyable values but do not copy the hook, it is not part of the value.
-  IntrusiveForwardListHook(const IntrusiveForwardListHook& other ATTRIBUTE_UNUSED)
-      : next_hook(nullptr) { }
-  IntrusiveForwardListHook& operator=(const IntrusiveForwardListHook& src ATTRIBUTE_UNUSED) {
-    return *this;
-  }
-
-  mutable const IntrusiveForwardListHook* next_hook;
-};
-
-template <typename Derived, typename Tag = void>
-struct IntrusiveForwardListNode : public IntrusiveForwardListHook {
-};
-
-template <typename T, IntrusiveForwardListHook T::* NextPtr = &T::hook>
-class IntrusiveForwardListMemberHookTraits;
-
-template <typename T, typename Tag = void>
-class IntrusiveForwardListBaseHookTraits;
-
-template <typename T,
-          typename HookTraits =
-              IntrusiveForwardListBaseHookTraits<typename std::remove_const<T>::type>>
-class IntrusiveForwardList;
-
-template <typename T, typename HookTraits>
-class IntrusiveForwardListIterator : public std::iterator<std::forward_iterator_tag, T> {
- public:
-  // Construct/copy/destroy (except the private constructor used by IntrusiveForwardList<>).
-  IntrusiveForwardListIterator() : hook_(nullptr) { }
-  IntrusiveForwardListIterator(const IntrusiveForwardListIterator& src) = default;
-  IntrusiveForwardListIterator& operator=(const IntrusiveForwardListIterator& src) = default;
-
-  // Conversion from iterator to const_iterator.
-  template <typename OtherT,
-            typename = typename std::enable_if<std::is_same<T, const OtherT>::value>::type>
-  IntrusiveForwardListIterator(const IntrusiveForwardListIterator<OtherT, HookTraits>& src)  // NOLINT, implicit
-      : hook_(src.hook_) { }
-
-  // Iteration.
-  IntrusiveForwardListIterator& operator++() {
-    DCHECK(hook_ != nullptr);
-    hook_ = hook_->next_hook;
-    return *this;
-  }
-  IntrusiveForwardListIterator operator++(int) {
-    IntrusiveForwardListIterator tmp(*this);
-    ++*this;
-    return tmp;
-  }
-
-  // Dereference
-  T& operator*() const {
-    DCHECK(hook_ != nullptr);
-    return *HookTraits::GetValue(hook_);
-  }
-  T* operator->() const {
-    return &**this;
-  }
-
- private:
-  explicit IntrusiveForwardListIterator(const IntrusiveForwardListHook* hook) : hook_(hook) { }
-
-  const IntrusiveForwardListHook* hook_;
-
-  template <typename OtherT, typename OtherTraits>
-  friend class IntrusiveForwardListIterator;
-
-  template <typename OtherT, typename OtherTraits>
-  friend class IntrusiveForwardList;
-
-  template <typename OtherT1, typename OtherT2, typename OtherTraits>
-  friend typename std::enable_if<std::is_same<const OtherT1, const OtherT2>::value, bool>::type
-  operator==(const IntrusiveForwardListIterator<OtherT1, OtherTraits>& lhs,
-             const IntrusiveForwardListIterator<OtherT2, OtherTraits>& rhs);
-};
-
-template <typename T, typename OtherT, typename HookTraits>
-typename std::enable_if<std::is_same<const T, const OtherT>::value, bool>::type operator==(
-    const IntrusiveForwardListIterator<T, HookTraits>& lhs,
-    const IntrusiveForwardListIterator<OtherT, HookTraits>& rhs) {
-  return lhs.hook_ == rhs.hook_;
-}
-
-template <typename T, typename OtherT, typename HookTraits>
-typename std::enable_if<std::is_same<const T, const OtherT>::value, bool>::type operator!=(
-    const IntrusiveForwardListIterator<T, HookTraits>& lhs,
-    const IntrusiveForwardListIterator<OtherT, HookTraits>& rhs) {
-  return !(lhs == rhs);
-}
-
-// Intrusive version of std::forward_list<>. See also slist<> in Boost.Intrusive.
-//
-// This class template provides the same interface as std::forward_list<> as long
-// as the functions are meaningful for an intrusive container; this excludes emplace
-// functions and functions taking an std::initializer_list<> as the container does
-// not construct elements.
-template <typename T, typename HookTraits>
-class IntrusiveForwardList {
- public:
-  typedef HookTraits hook_traits;
-  typedef       T  value_type;
-  typedef       T& reference;
-  typedef const T& const_reference;
-  typedef       T* pointer;
-  typedef const T* const_pointer;
-  typedef IntrusiveForwardListIterator<      T, hook_traits> iterator;
-  typedef IntrusiveForwardListIterator<const T, hook_traits> const_iterator;
-
-  // Construct/copy/destroy.
-  IntrusiveForwardList() = default;
-  template <typename InputIterator>
-  IntrusiveForwardList(InputIterator first, InputIterator last) : IntrusiveForwardList() {
-    insert_after(before_begin(), first, last);
-  }
-  IntrusiveForwardList(IntrusiveForwardList&& src) : first_(src.first_.next_hook) {
-    src.first_.next_hook = nullptr;
-  }
-  IntrusiveForwardList& operator=(const IntrusiveForwardList& src) = delete;
-  IntrusiveForwardList& operator=(IntrusiveForwardList&& src) {
-    IntrusiveForwardList tmp(std::move(src));
-    tmp.swap(*this);
-    return *this;
-  }
-  ~IntrusiveForwardList() = default;
-
-  // Iterators.
-  iterator before_begin() { return iterator(&first_); }
-  const_iterator before_begin() const { return const_iterator(&first_); }
-  iterator begin() { return iterator(first_.next_hook); }
-  const_iterator begin() const { return const_iterator(first_.next_hook); }
-  iterator end() { return iterator(nullptr); }
-  const_iterator end() const { return const_iterator(nullptr); }
-  const_iterator cbefore_begin() const { return const_iterator(&first_); }
-  const_iterator cbegin() const { return const_iterator(first_.next_hook); }
-  const_iterator cend() const { return const_iterator(nullptr); }
-
-  // Capacity.
-  bool empty() const { return begin() == end(); }
-  size_t max_size() { return static_cast<size_t>(-1); }
-
-  // Element access.
-  reference front() { return *begin(); }
-  const_reference front() const { return *begin(); }
-
-  // Modifiers.
-  template <typename InputIterator>
-  void assign(InputIterator first, InputIterator last) {
-    IntrusiveForwardList tmp(first, last);
-    tmp.swap(*this);
-  }
-  void push_front(value_type& value) {
-    insert_after(before_begin(), value);
-  }
-  void pop_front() {
-    DCHECK(!empty());
-    erase_after(before_begin());
-  }
-  iterator insert_after(const_iterator position, value_type& value) {
-    const IntrusiveForwardListHook* new_hook = hook_traits::GetHook(&value);
-    new_hook->next_hook = position.hook_->next_hook;
-    position.hook_->next_hook = new_hook;
-    return iterator(new_hook);
-  }
-  template <typename InputIterator>
-  iterator insert_after(const_iterator position, InputIterator first, InputIterator last) {
-    while (first != last) {
-      position = insert_after(position, *first++);
-    }
-    return iterator(position.hook_);
-  }
-  iterator erase_after(const_iterator position) {
-    const_iterator last = position;
-    std::advance(last, 2);
-    return erase_after(position, last);
-  }
-  iterator erase_after(const_iterator position, const_iterator last) {
-    DCHECK(position != last);
-    position.hook_->next_hook = last.hook_;
-    return iterator(last.hook_);
-  }
-  void swap(IntrusiveForwardList& other) {
-    std::swap(first_.next_hook, other.first_.next_hook);
-  }
-  void clear() {
-    first_.next_hook = nullptr;
-  }
-
-  // Operations.
-  void splice_after(const_iterator position, IntrusiveForwardList& src) {
-    DCHECK(position != end());
-    splice_after(position, src, src.before_begin(), src.end());
-  }
-  void splice_after(const_iterator position, IntrusiveForwardList&& src) {
-    splice_after(position, src);  // Use l-value overload.
-  }
-  // Splice the element after `i`.
-  void splice_after(const_iterator position, IntrusiveForwardList& src, const_iterator i) {
-    // The standard specifies that this version does nothing if `position == i`
-    // or `position == ++i`. We must handle the latter here because the overload
-    // `splice_after(position, src, first, last)` does not allow `position` inside
-    // the range `(first, last)`.
-    if (++const_iterator(i) == position) {
-      return;
-    }
-    const_iterator last = i;
-    std::advance(last, 2);
-    splice_after(position, src, i, last);
-  }
-  // Splice the element after `i`.
-  void splice_after(const_iterator position, IntrusiveForwardList&& src, const_iterator i) {
-    splice_after(position, src, i);  // Use l-value overload.
-  }
-  // Splice elements between `first` and `last`, i.e. open range `(first, last)`.
-  void splice_after(const_iterator position,
-                    IntrusiveForwardList& src,
-                    const_iterator first,
-                    const_iterator last) {
-    DCHECK(position != end());
-    DCHECK(first != last);
-    if (++const_iterator(first) == last) {
-      // Nothing to do.
-      return;
-    }
-    // If position is just before end() and last is src.end(), we can finish this quickly.
-    if (++const_iterator(position) == end() && last == src.end()) {
-      position.hook_->next_hook = first.hook_->next_hook;
-      first.hook_->next_hook = nullptr;
-      return;
-    }
-    // Otherwise we need to find the position before last to fix up the hook.
-    const_iterator before_last = first;
-    while (++const_iterator(before_last) != last) {
-      ++before_last;
-    }
-    // Detach (first, last).
-    const IntrusiveForwardListHook* first_taken = first.hook_->next_hook;
-    first.hook_->next_hook = last.hook_;
-    // Attach the sequence to the new position.
-    before_last.hook_->next_hook = position.hook_->next_hook;
-    position.hook_->next_hook = first_taken;
-  }
-  // Splice elements between `first` and `last`, i.e. open range `(first, last)`.
-  void splice_after(const_iterator position,
-                    IntrusiveForwardList&& src,
-                    const_iterator first,
-                    const_iterator last) {
-    splice_after(position, src, first, last);  // Use l-value overload.
-  }
-  void remove(const value_type& value) {
-    remove_if([value](const value_type& v) { return value == v; });
-  }
-  template <typename Predicate>
-  void remove_if(Predicate pred) {
-    iterator prev = before_begin();
-    for (iterator current = begin(); current != end(); ++current) {
-      if (pred(*current)) {
-        erase_after(prev);
-        current = prev;
-      } else {
-        prev = current;
-      }
-    }
-  }
-  void unique() {
-    unique(std::equal_to<value_type>());
-  }
-  template <typename BinaryPredicate>
-  void unique(BinaryPredicate pred) {
-    if (!empty()) {
-      iterator prev = begin();
-      iterator current = prev;
-      ++current;
-      for (; current != end(); ++current) {
-        if (pred(*prev, *current)) {
-          erase_after(prev);
-          current = prev;
-        } else {
-          prev = current;
-        }
-      }
-    }
-  }
-  void merge(IntrusiveForwardList& other) {
-    merge(other, std::less<value_type>());
-  }
-  void merge(IntrusiveForwardList&& other) {
-    merge(other);  // Use l-value overload.
-  }
-  template <typename Compare>
-  void merge(IntrusiveForwardList& other, Compare cmp) {
-    iterator prev = before_begin();
-    iterator current = begin();
-    iterator other_prev = other.before_begin();
-    iterator other_current = other.begin();
-    while (current != end() && other_current != other.end()) {
-      if (cmp(*other_current, *current)) {
-        ++other_current;
-        splice_after(prev, other, other_prev);
-        ++prev;
-      } else {
-        prev = current;
-        ++current;
-      }
-      DCHECK(++const_iterator(prev) == current);
-      DCHECK(++const_iterator(other_prev) == other_current);
-    }
-    splice_after(prev, other);
-  }
-  template <typename Compare>
-  void merge(IntrusiveForwardList&& other, Compare cmp) {
-    merge(other, cmp);  // Use l-value overload.
-  }
-  void sort() {
-    sort(std::less<value_type>());
-  }
-  template <typename Compare>
-  void sort(Compare cmp) {
-    size_t n = std::distance(begin(), end());
-    if (n >= 2u) {
-      const_iterator middle = before_begin();
-      std::advance(middle, n / 2u);
-      IntrusiveForwardList second_half;
-      second_half.splice_after(second_half.before_begin(), *this, middle, end());
-      sort(cmp);
-      second_half.sort(cmp);
-      merge(second_half, cmp);
-    }
-  }
-  void reverse() {
-    IntrusiveForwardList reversed;
-    while (!empty()) {
-      value_type& value = front();
-      erase_after(before_begin());
-      reversed.insert_after(reversed.before_begin(), value);
-    }
-    reversed.swap(*this);
-  }
-
-  // Extensions.
-  bool HasExactlyOneElement() const {
-    return !empty() && ++begin() == end();
-  }
-  size_t SizeSlow() const {
-    return std::distance(begin(), end());
-  }
-  bool ContainsNode(const_reference node) const {
-    for (auto&& n : *this) {
-      if (std::addressof(n) == std::addressof(node)) {
-        return true;
-      }
-    }
-    return false;
-  }
-
- private:
-  static IntrusiveForwardListHook* ModifiableHook(const IntrusiveForwardListHook* hook) {
-    return const_cast<IntrusiveForwardListHook*>(hook);
-  }
-
-  IntrusiveForwardListHook first_;
-};
-
-template <typename T, typename HookTraits>
-void swap(IntrusiveForwardList<T, HookTraits>& lhs, IntrusiveForwardList<T, HookTraits>& rhs) {
-  lhs.swap(rhs);
-}
-
-template <typename T, typename HookTraits>
-bool operator==(const IntrusiveForwardList<T, HookTraits>& lhs,
-                const IntrusiveForwardList<T, HookTraits>& rhs) {
-  auto lit = lhs.begin();
-  auto rit = rhs.begin();
-  for (; lit != lhs.end() && rit != rhs.end(); ++lit, ++rit) {
-    if (*lit != *rit) {
-      return false;
-    }
-  }
-  return lit == lhs.end() && rit == rhs.end();
-}
-
-template <typename T, typename HookTraits>
-bool operator!=(const IntrusiveForwardList<T, HookTraits>& lhs,
-                const IntrusiveForwardList<T, HookTraits>& rhs) {
-  return !(lhs == rhs);
-}
-
-template <typename T, typename HookTraits>
-bool operator<(const IntrusiveForwardList<T, HookTraits>& lhs,
-               const IntrusiveForwardList<T, HookTraits>& rhs) {
-  return std::lexicographical_compare(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
-}
-
-template <typename T, typename HookTraits>
-bool operator>(const IntrusiveForwardList<T, HookTraits>& lhs,
-               const IntrusiveForwardList<T, HookTraits>& rhs) {
-  return rhs < lhs;
-}
-
-template <typename T, typename HookTraits>
-bool operator<=(const IntrusiveForwardList<T, HookTraits>& lhs,
-                const IntrusiveForwardList<T, HookTraits>& rhs) {
-  return !(rhs < lhs);
-}
-
-template <typename T, typename HookTraits>
-bool operator>=(const IntrusiveForwardList<T, HookTraits>& lhs,
-                const IntrusiveForwardList<T, HookTraits>& rhs) {
-  return !(lhs < rhs);
-}
-
-template <typename T, IntrusiveForwardListHook T::* NextPtr>
-class IntrusiveForwardListMemberHookTraits {
- public:
-  static const IntrusiveForwardListHook* GetHook(const T* value) {
-    return &(value->*NextPtr);
-  }
-
-  static T* GetValue(const IntrusiveForwardListHook* hook) {
-    return reinterpret_cast<T*>(
-        reinterpret_cast<uintptr_t>(hook) - OFFSETOF_MEMBERPTR(T, NextPtr));
-  }
-};
-
-template <typename T, typename Tag>
-class IntrusiveForwardListBaseHookTraits {
- public:
-  static const IntrusiveForwardListHook* GetHook(const T* value) {
-    // Explicit conversion to the "node" followed by implicit conversion to the "hook".
-    return static_cast<const IntrusiveForwardListNode<T, Tag>*>(value);
-  }
-
-  static T* GetValue(const IntrusiveForwardListHook* hook) {
-    return down_cast<T*>(down_cast<IntrusiveForwardListNode<T, Tag>*>(
-        const_cast<IntrusiveForwardListHook*>(hook)));
-  }
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_UTILS_INTRUSIVE_FORWARD_LIST_H_
diff --git a/compiler/utils/jni_macro_assembler.cc b/compiler/utils/jni_macro_assembler.cc
index 5f405f3..d6d49f8 100644
--- a/compiler/utils/jni_macro_assembler.cc
+++ b/compiler/utils/jni_macro_assembler.cc
@@ -25,12 +25,6 @@
 #ifdef ART_ENABLE_CODEGEN_arm64
 #include "arm64/jni_macro_assembler_arm64.h"
 #endif
-#ifdef ART_ENABLE_CODEGEN_mips
-#include "mips/assembler_mips.h"
-#endif
-#ifdef ART_ENABLE_CODEGEN_mips64
-#include "mips64/assembler_mips64.h"
-#endif
 #ifdef ART_ENABLE_CODEGEN_x86
 #include "x86/jni_macro_assembler_x86.h"
 #endif
@@ -50,9 +44,8 @@
     ArenaAllocator* allocator,
     InstructionSet instruction_set,
     const InstructionSetFeatures* instruction_set_features) {
-#ifndef ART_ENABLE_CODEGEN_mips
+  // TODO: Remove the parameter from API (not needed after Mips target was removed).
   UNUSED(instruction_set_features);
-#endif
 
   switch (instruction_set) {
 #ifdef ART_ENABLE_CODEGEN_arm
@@ -60,14 +53,6 @@
     case InstructionSet::kThumb2:
       return MacroAsm32UniquePtr(new (allocator) arm::ArmVIXLJNIMacroAssembler(allocator));
 #endif
-#ifdef ART_ENABLE_CODEGEN_mips
-    case InstructionSet::kMips:
-      return MacroAsm32UniquePtr(new (allocator) mips::MipsAssembler(
-          allocator,
-          instruction_set_features != nullptr
-              ? instruction_set_features->AsMipsInstructionSetFeatures()
-              : nullptr));
-#endif
 #ifdef ART_ENABLE_CODEGEN_x86
     case InstructionSet::kX86:
       return MacroAsm32UniquePtr(new (allocator) x86::X86JNIMacroAssembler(allocator));
@@ -85,23 +70,14 @@
     ArenaAllocator* allocator,
     InstructionSet instruction_set,
     const InstructionSetFeatures* instruction_set_features) {
-#ifndef ART_ENABLE_CODEGEN_mips64
+  // TODO: Remove the parameter from API (not needed after Mips64 target was removed).
   UNUSED(instruction_set_features);
-#endif
 
   switch (instruction_set) {
 #ifdef ART_ENABLE_CODEGEN_arm64
     case InstructionSet::kArm64:
       return MacroAsm64UniquePtr(new (allocator) arm64::Arm64JNIMacroAssembler(allocator));
 #endif
-#ifdef ART_ENABLE_CODEGEN_mips64
-    case InstructionSet::kMips64:
-      return MacroAsm64UniquePtr(new (allocator) mips64::Mips64Assembler(
-          allocator,
-          instruction_set_features != nullptr
-              ? instruction_set_features->AsMips64InstructionSetFeatures()
-              : nullptr));
-#endif
 #ifdef ART_ENABLE_CODEGEN_x86_64
     case InstructionSet::kX86_64:
       return MacroAsm64UniquePtr(new (allocator) x86_64::X86_64JNIMacroAssembler(allocator));
diff --git a/compiler/utils/jni_macro_assembler.h b/compiler/utils/jni_macro_assembler.h
index e6130cf..bbe0f73 100644
--- a/compiler/utils/jni_macro_assembler.h
+++ b/compiler/utils/jni_macro_assembler.h
@@ -197,6 +197,9 @@
   virtual void VerifyObject(ManagedRegister src, bool could_be_null) = 0;
   virtual void VerifyObject(FrameOffset src, bool could_be_null) = 0;
 
+  // Jump to address held at [base+offset] (used for tail calls).
+  virtual void Jump(ManagedRegister base, Offset offset, ManagedRegister scratch) = 0;
+
   // Call to address held at [base+offset]
   virtual void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) = 0;
   virtual void Call(FrameOffset base, Offset offset, ManagedRegister scratch) = 0;
diff --git a/compiler/utils/label.h b/compiler/utils/label.h
index 3c91b2f..9586a19 100644
--- a/compiler/utils/label.h
+++ b/compiler/utils/label.h
@@ -29,14 +29,6 @@
 namespace arm64 {
 class Arm64Assembler;
 }  // namespace arm64
-namespace mips {
-class MipsAssembler;
-class MipsLabel;
-}  // namespace mips
-namespace mips64 {
-class Mips64Assembler;
-class Mips64Label;
-}  // namespace mips64
 namespace x86 {
 class X86Assembler;
 class NearLabel;
@@ -115,10 +107,6 @@
   }
 
   friend class arm64::Arm64Assembler;
-  friend class mips::MipsAssembler;
-  friend class mips::MipsLabel;
-  friend class mips64::Mips64Assembler;
-  friend class mips64::Mips64Label;
   friend class x86::X86Assembler;
   friend class x86::NearLabel;
   friend class x86_64::X86_64Assembler;
diff --git a/compiler/utils/managed_register.h b/compiler/utils/managed_register.h
index db9c36c..f20750b 100644
--- a/compiler/utils/managed_register.h
+++ b/compiler/utils/managed_register.h
@@ -30,12 +30,6 @@
 namespace arm64 {
 class Arm64ManagedRegister;
 }  // namespace arm64
-namespace mips {
-class MipsManagedRegister;
-}  // namespace mips
-namespace mips64 {
-class Mips64ManagedRegister;
-}  // namespace mips64
 
 namespace x86 {
 class X86ManagedRegister;
@@ -56,8 +50,6 @@
 
   constexpr arm::ArmManagedRegister AsArm() const;
   constexpr arm64::Arm64ManagedRegister AsArm64() const;
-  constexpr mips::MipsManagedRegister AsMips() const;
-  constexpr mips64::Mips64ManagedRegister AsMips64() const;
   constexpr x86::X86ManagedRegister AsX86() const;
   constexpr x86_64::X86_64ManagedRegister AsX86_64() const;
 
@@ -66,6 +58,10 @@
     return id_ == other.id_;
   }
 
+  constexpr bool IsRegister() const {
+    return id_ != kNoRegister;
+  }
+
   constexpr bool IsNoRegister() const {
     return id_ == kNoRegister;
   }
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
deleted file mode 100644
index a9d1a25..0000000
--- a/compiler/utils/mips/assembler_mips.cc
+++ /dev/null
@@ -1,5260 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "assembler_mips.h"
-
-#include "base/bit_utils.h"
-#include "base/casts.h"
-#include "base/memory_region.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "entrypoints/quick/quick_entrypoints_enum.h"
-#include "thread.h"
-
-namespace art {
-namespace mips {
-
-static_assert(static_cast<size_t>(kMipsPointerSize) == kMipsWordSize,
-              "Unexpected Mips pointer size.");
-static_assert(kMipsPointerSize == PointerSize::k32, "Unexpected Mips pointer size.");
-
-
-std::ostream& operator<<(std::ostream& os, const DRegister& rhs) {
-  if (rhs >= D0 && rhs < kNumberOfDRegisters) {
-    os << "d" << static_cast<int>(rhs);
-  } else {
-    os << "DRegister[" << static_cast<int>(rhs) << "]";
-  }
-  return os;
-}
-
-MipsAssembler::DelaySlot::DelaySlot()
-    : instruction_(0),
-      patcher_label_(nullptr) {}
-
-InOutRegMasks& MipsAssembler::DsFsmInstr(uint32_t instruction, MipsLabel* patcher_label) {
-  if (!reordering_) {
-    CHECK_EQ(ds_fsm_state_, kExpectingLabel);
-    CHECK_EQ(delay_slot_.instruction_, 0u);
-    return delay_slot_.masks_;
-  }
-  switch (ds_fsm_state_) {
-    case kExpectingLabel:
-      break;
-    case kExpectingInstruction:
-      CHECK_EQ(ds_fsm_target_pc_ + sizeof(uint32_t), buffer_.Size());
-      // If the last instruction is not suitable for delay slots, drop
-      // the PC of the label preceding it so that no unconditional branch
-      // uses this instruction to fill its delay slot.
-      if (instruction == 0) {
-        DsFsmDropLabel();  // Sets ds_fsm_state_ = kExpectingLabel.
-      } else {
-        // Otherwise wait for another instruction or label before we can
-        // commit the label PC. The label PC will be dropped if instead
-        // of another instruction or label there's a call from the code
-        // generator to CodePosition() to record the buffer size.
-        // Instructions after which the buffer size is recorded cannot
-        // be moved into delay slots or anywhere else because they may
-        // trigger signals and the signal handlers expect these signals
-        // to be coming from the instructions immediately preceding the
-        // recorded buffer locations.
-        ds_fsm_state_ = kExpectingCommit;
-      }
-      break;
-    case kExpectingCommit:
-      CHECK_EQ(ds_fsm_target_pc_ + 2 * sizeof(uint32_t), buffer_.Size());
-      DsFsmCommitLabel();  // Sets ds_fsm_state_ = kExpectingLabel.
-      break;
-  }
-  delay_slot_.instruction_ = instruction;
-  delay_slot_.masks_ = InOutRegMasks();
-  delay_slot_.patcher_label_ = patcher_label;
-  return delay_slot_.masks_;
-}
-
-void MipsAssembler::DsFsmLabel() {
-  if (!reordering_) {
-    CHECK_EQ(ds_fsm_state_, kExpectingLabel);
-    CHECK_EQ(delay_slot_.instruction_, 0u);
-    return;
-  }
-  switch (ds_fsm_state_) {
-    case kExpectingLabel:
-      ds_fsm_target_pc_ = buffer_.Size();
-      ds_fsm_state_ = kExpectingInstruction;
-      break;
-    case kExpectingInstruction:
-      // Allow consecutive labels.
-      CHECK_EQ(ds_fsm_target_pc_, buffer_.Size());
-      break;
-    case kExpectingCommit:
-      CHECK_EQ(ds_fsm_target_pc_ + sizeof(uint32_t), buffer_.Size());
-      DsFsmCommitLabel();
-      ds_fsm_target_pc_ = buffer_.Size();
-      ds_fsm_state_ = kExpectingInstruction;
-      break;
-  }
-  // We cannot move instructions into delay slots across labels.
-  delay_slot_.instruction_ = 0;
-}
-
-void MipsAssembler::DsFsmCommitLabel() {
-  if (ds_fsm_state_ == kExpectingCommit) {
-    ds_fsm_target_pcs_.emplace_back(ds_fsm_target_pc_);
-  }
-  ds_fsm_state_ = kExpectingLabel;
-}
-
-void MipsAssembler::DsFsmDropLabel() {
-  ds_fsm_state_ = kExpectingLabel;
-}
-
-bool MipsAssembler::SetReorder(bool enable) {
-  bool last_state = reordering_;
-  if (last_state != enable) {
-    DsFsmCommitLabel();
-    DsFsmInstrNop(0);
-  }
-  reordering_ = enable;
-  return last_state;
-}
-
-size_t MipsAssembler::CodePosition() {
-  // The last instruction cannot be used in a delay slot, do not commit
-  // the label before it (if any) and clear the delay slot.
-  DsFsmDropLabel();
-  DsFsmInstrNop(0);
-  size_t size = buffer_.Size();
-  // In theory we can get the following sequence:
-  //   label1:
-  //     instr
-  //   label2: # label1 gets committed when label2 is seen
-  //     CodePosition() call
-  // and we need to uncommit label1.
-  if (ds_fsm_target_pcs_.size() != 0 && ds_fsm_target_pcs_.back() + sizeof(uint32_t) == size) {
-    ds_fsm_target_pcs_.pop_back();
-  }
-  return size;
-}
-
-void MipsAssembler::DsFsmInstrNop(uint32_t instruction ATTRIBUTE_UNUSED) {
-  DsFsmInstr(0);
-}
-
-void MipsAssembler::FinalizeCode() {
-  for (auto& exception_block : exception_blocks_) {
-    EmitExceptionPoll(&exception_block);
-  }
-  // Commit the last branch target label (if any) and disable instruction reordering.
-  DsFsmCommitLabel();
-  SetReorder(false);
-  EmitLiterals();
-  ReserveJumpTableSpace();
-  PromoteBranches();
-}
-
-void MipsAssembler::FinalizeInstructions(const MemoryRegion& region) {
-  size_t number_of_delayed_adjust_pcs = cfi().NumberOfDelayedAdvancePCs();
-  EmitBranches();
-  EmitJumpTables();
-  Assembler::FinalizeInstructions(region);
-  PatchCFI(number_of_delayed_adjust_pcs);
-}
-
-void MipsAssembler::PatchCFI(size_t number_of_delayed_adjust_pcs) {
-  if (cfi().NumberOfDelayedAdvancePCs() == 0u) {
-    DCHECK_EQ(number_of_delayed_adjust_pcs, 0u);
-    return;
-  }
-
-  using DelayedAdvancePC = DebugFrameOpCodeWriterForAssembler::DelayedAdvancePC;
-  const auto data = cfi().ReleaseStreamAndPrepareForDelayedAdvancePC();
-  const std::vector<uint8_t>& old_stream = data.first;
-  const std::vector<DelayedAdvancePC>& advances = data.second;
-
-  // PCs recorded before EmitBranches() need to be adjusted.
-  // PCs recorded during EmitBranches() are already adjusted.
-  // Both ranges are separately sorted but they may overlap.
-  if (kIsDebugBuild) {
-    auto cmp = [](const DelayedAdvancePC& lhs, const DelayedAdvancePC& rhs) {
-      return lhs.pc < rhs.pc;
-    };
-    CHECK(std::is_sorted(advances.begin(), advances.begin() + number_of_delayed_adjust_pcs, cmp));
-    CHECK(std::is_sorted(advances.begin() + number_of_delayed_adjust_pcs, advances.end(), cmp));
-  }
-
-  // Append initial CFI data if any.
-  size_t size = advances.size();
-  DCHECK_NE(size, 0u);
-  cfi().AppendRawData(old_stream, 0u, advances[0].stream_pos);
-  // Emit PC adjustments interleaved with the old CFI stream.
-  size_t adjust_pos = 0u;
-  size_t late_emit_pos = number_of_delayed_adjust_pcs;
-  while (adjust_pos != number_of_delayed_adjust_pcs || late_emit_pos != size) {
-    size_t adjusted_pc = (adjust_pos != number_of_delayed_adjust_pcs)
-        ? GetAdjustedPosition(advances[adjust_pos].pc)
-        : static_cast<size_t>(-1);
-    size_t late_emit_pc = (late_emit_pos != size)
-        ? advances[late_emit_pos].pc
-        : static_cast<size_t>(-1);
-    size_t advance_pc = std::min(adjusted_pc, late_emit_pc);
-    DCHECK_NE(advance_pc, static_cast<size_t>(-1));
-    size_t entry = (adjusted_pc <= late_emit_pc) ? adjust_pos : late_emit_pos;
-    if (adjusted_pc <= late_emit_pc) {
-      ++adjust_pos;
-    } else {
-      ++late_emit_pos;
-    }
-    cfi().AdvancePC(advance_pc);
-    size_t end_pos = (entry + 1u == size) ? old_stream.size() : advances[entry + 1u].stream_pos;
-    cfi().AppendRawData(old_stream, advances[entry].stream_pos, end_pos);
-  }
-}
-
-void MipsAssembler::EmitBranches() {
-  CHECK(!overwriting_);
-  CHECK(!reordering_);
-  // Now that everything has its final position in the buffer (the branches have
-  // been promoted), adjust the target label PCs.
-  for (size_t cnt = ds_fsm_target_pcs_.size(), i = 0; i < cnt; i++) {
-    ds_fsm_target_pcs_[i] = GetAdjustedPosition(ds_fsm_target_pcs_[i]);
-  }
-  // Switch from appending instructions at the end of the buffer to overwriting
-  // existing instructions (branch placeholders) in the buffer.
-  overwriting_ = true;
-  for (size_t id = 0; id < branches_.size(); id++) {
-    EmitBranch(id);
-  }
-  overwriting_ = false;
-}
-
-void MipsAssembler::Emit(uint32_t value) {
-  if (overwriting_) {
-    // Branches to labels are emitted into their placeholders here.
-    buffer_.Store<uint32_t>(overwrite_location_, value);
-    overwrite_location_ += sizeof(uint32_t);
-  } else {
-    // Other instructions are simply appended at the end here.
-    AssemblerBuffer::EnsureCapacity ensured(&buffer_);
-    buffer_.Emit<uint32_t>(value);
-  }
-}
-
-uint32_t MipsAssembler::EmitR(int opcode,
-                              Register rs,
-                              Register rt,
-                              Register rd,
-                              int shamt,
-                              int funct) {
-  CHECK_NE(rs, kNoRegister);
-  CHECK_NE(rt, kNoRegister);
-  CHECK_NE(rd, kNoRegister);
-  uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
-                      static_cast<uint32_t>(rs) << kRsShift |
-                      static_cast<uint32_t>(rt) << kRtShift |
-                      static_cast<uint32_t>(rd) << kRdShift |
-                      shamt << kShamtShift |
-                      funct;
-  Emit(encoding);
-  return encoding;
-}
-
-uint32_t MipsAssembler::EmitI(int opcode, Register rs, Register rt, uint16_t imm) {
-  CHECK_NE(rs, kNoRegister);
-  CHECK_NE(rt, kNoRegister);
-  uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
-                      static_cast<uint32_t>(rs) << kRsShift |
-                      static_cast<uint32_t>(rt) << kRtShift |
-                      imm;
-  Emit(encoding);
-  return encoding;
-}
-
-uint32_t MipsAssembler::EmitI21(int opcode, Register rs, uint32_t imm21) {
-  CHECK_NE(rs, kNoRegister);
-  CHECK(IsUint<21>(imm21)) << imm21;
-  uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
-                      static_cast<uint32_t>(rs) << kRsShift |
-                      imm21;
-  Emit(encoding);
-  return encoding;
-}
-
-uint32_t MipsAssembler::EmitI26(int opcode, uint32_t imm26) {
-  CHECK(IsUint<26>(imm26)) << imm26;
-  uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift | imm26;
-  Emit(encoding);
-  return encoding;
-}
-
-uint32_t MipsAssembler::EmitFR(int opcode,
-                               int fmt,
-                               FRegister ft,
-                               FRegister fs,
-                               FRegister fd,
-                               int funct) {
-  CHECK_NE(ft, kNoFRegister);
-  CHECK_NE(fs, kNoFRegister);
-  CHECK_NE(fd, kNoFRegister);
-  uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
-                      fmt << kFmtShift |
-                      static_cast<uint32_t>(ft) << kFtShift |
-                      static_cast<uint32_t>(fs) << kFsShift |
-                      static_cast<uint32_t>(fd) << kFdShift |
-                      funct;
-  Emit(encoding);
-  return encoding;
-}
-
-uint32_t MipsAssembler::EmitFI(int opcode, int fmt, FRegister ft, uint16_t imm) {
-  CHECK_NE(ft, kNoFRegister);
-  uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
-                      fmt << kFmtShift |
-                      static_cast<uint32_t>(ft) << kFtShift |
-                      imm;
-  Emit(encoding);
-  return encoding;
-}
-
-uint32_t MipsAssembler::EmitMsa3R(int operation,
-                                  int df,
-                                  VectorRegister wt,
-                                  VectorRegister ws,
-                                  VectorRegister wd,
-                                  int minor_opcode) {
-  CHECK_NE(wt, kNoVectorRegister);
-  CHECK_NE(ws, kNoVectorRegister);
-  CHECK_NE(wd, kNoVectorRegister);
-  uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
-                      operation << kMsaOperationShift |
-                      df << kDfShift |
-                      static_cast<uint32_t>(wt) << kWtShift |
-                      static_cast<uint32_t>(ws) << kWsShift |
-                      static_cast<uint32_t>(wd) << kWdShift |
-                      minor_opcode;
-  Emit(encoding);
-  return encoding;
-}
-
-uint32_t MipsAssembler::EmitMsaBIT(int operation,
-                                   int df_m,
-                                   VectorRegister ws,
-                                   VectorRegister wd,
-                                   int minor_opcode) {
-  CHECK_NE(ws, kNoVectorRegister);
-  CHECK_NE(wd, kNoVectorRegister);
-  uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
-                      operation << kMsaOperationShift |
-                      df_m << kDfMShift |
-                      static_cast<uint32_t>(ws) << kWsShift |
-                      static_cast<uint32_t>(wd) << kWdShift |
-                      minor_opcode;
-  Emit(encoding);
-  return encoding;
-}
-
-uint32_t MipsAssembler::EmitMsaELM(int operation,
-                                   int df_n,
-                                   VectorRegister ws,
-                                   VectorRegister wd,
-                                   int minor_opcode) {
-  CHECK_NE(ws, kNoVectorRegister);
-  CHECK_NE(wd, kNoVectorRegister);
-  uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
-                      operation << kMsaELMOperationShift |
-                      df_n << kDfNShift |
-                      static_cast<uint32_t>(ws) << kWsShift |
-                      static_cast<uint32_t>(wd) << kWdShift |
-                      minor_opcode;
-  Emit(encoding);
-  return encoding;
-}
-
-uint32_t MipsAssembler::EmitMsaMI10(int s10,
-                                    Register rs,
-                                    VectorRegister wd,
-                                    int minor_opcode,
-                                    int df) {
-  CHECK_NE(rs, kNoRegister);
-  CHECK_NE(wd, kNoVectorRegister);
-  CHECK(IsUint<10>(s10)) << s10;
-  uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
-                      s10 << kS10Shift |
-                      static_cast<uint32_t>(rs) << kWsShift |
-                      static_cast<uint32_t>(wd) << kWdShift |
-                      minor_opcode << kS10MinorShift |
-                      df;
-  Emit(encoding);
-  return encoding;
-}
-
-uint32_t MipsAssembler::EmitMsaI10(int operation,
-                                   int df,
-                                   int i10,
-                                   VectorRegister wd,
-                                   int minor_opcode) {
-  CHECK_NE(wd, kNoVectorRegister);
-  CHECK(IsUint<10>(i10)) << i10;
-  uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
-                      operation << kMsaOperationShift |
-                      df << kDfShift |
-                      i10 << kI10Shift |
-                      static_cast<uint32_t>(wd) << kWdShift |
-                      minor_opcode;
-  Emit(encoding);
-  return encoding;
-}
-
-uint32_t MipsAssembler::EmitMsa2R(int operation,
-                                  int df,
-                                  VectorRegister ws,
-                                  VectorRegister wd,
-                                  int minor_opcode) {
-  CHECK_NE(ws, kNoVectorRegister);
-  CHECK_NE(wd, kNoVectorRegister);
-  uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
-                      operation << kMsa2ROperationShift |
-                      df << kDf2RShift |
-                      static_cast<uint32_t>(ws) << kWsShift |
-                      static_cast<uint32_t>(wd) << kWdShift |
-                      minor_opcode;
-  Emit(encoding);
-  return encoding;
-}
-
-uint32_t MipsAssembler::EmitMsa2RF(int operation,
-                                   int df,
-                                   VectorRegister ws,
-                                   VectorRegister wd,
-                                   int minor_opcode) {
-  CHECK_NE(ws, kNoVectorRegister);
-  CHECK_NE(wd, kNoVectorRegister);
-  uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
-                      operation << kMsa2RFOperationShift |
-                      df << kDf2RShift |
-                      static_cast<uint32_t>(ws) << kWsShift |
-                      static_cast<uint32_t>(wd) << kWdShift |
-                      minor_opcode;
-  Emit(encoding);
-  return encoding;
-}
-
-void MipsAssembler::Addu(Register rd, Register rs, Register rt) {
-  DsFsmInstr(EmitR(0, rs, rt, rd, 0, 0x21)).GprOuts(rd).GprIns(rs, rt);
-}
-
-void MipsAssembler::Addiu(Register rt, Register rs, uint16_t imm16, MipsLabel* patcher_label) {
-  if (patcher_label != nullptr) {
-    Bind(patcher_label);
-  }
-  DsFsmInstr(EmitI(0x9, rs, rt, imm16), patcher_label).GprOuts(rt).GprIns(rs);
-}
-
-void MipsAssembler::Addiu(Register rt, Register rs, uint16_t imm16) {
-  Addiu(rt, rs, imm16, /* patcher_label= */ nullptr);
-}
-
-void MipsAssembler::Subu(Register rd, Register rs, Register rt) {
-  DsFsmInstr(EmitR(0, rs, rt, rd, 0, 0x23)).GprOuts(rd).GprIns(rs, rt);
-}
-
-void MipsAssembler::MultR2(Register rs, Register rt) {
-  CHECK(!IsR6());
-  DsFsmInstr(EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x18)).GprIns(rs, rt);
-}
-
-void MipsAssembler::MultuR2(Register rs, Register rt) {
-  CHECK(!IsR6());
-  DsFsmInstr(EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x19)).GprIns(rs, rt);
-}
-
-void MipsAssembler::DivR2(Register rs, Register rt) {
-  CHECK(!IsR6());
-  DsFsmInstr(EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x1a)).GprIns(rs, rt);
-}
-
-void MipsAssembler::DivuR2(Register rs, Register rt) {
-  CHECK(!IsR6());
-  DsFsmInstr(EmitR(0, rs, rt, static_cast<Register>(0), 0, 0x1b)).GprIns(rs, rt);
-}
-
-void MipsAssembler::MulR2(Register rd, Register rs, Register rt) {
-  CHECK(!IsR6());
-  DsFsmInstr(EmitR(0x1c, rs, rt, rd, 0, 2)).GprOuts(rd).GprIns(rs, rt);
-}
-
-void MipsAssembler::DivR2(Register rd, Register rs, Register rt) {
-  CHECK(!IsR6());
-  DivR2(rs, rt);
-  Mflo(rd);
-}
-
-void MipsAssembler::ModR2(Register rd, Register rs, Register rt) {
-  CHECK(!IsR6());
-  DivR2(rs, rt);
-  Mfhi(rd);
-}
-
-void MipsAssembler::DivuR2(Register rd, Register rs, Register rt) {
-  CHECK(!IsR6());
-  DivuR2(rs, rt);
-  Mflo(rd);
-}
-
-void MipsAssembler::ModuR2(Register rd, Register rs, Register rt) {
-  CHECK(!IsR6());
-  DivuR2(rs, rt);
-  Mfhi(rd);
-}
-
-void MipsAssembler::MulR6(Register rd, Register rs, Register rt) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitR(0, rs, rt, rd, 2, 0x18)).GprOuts(rd).GprIns(rs, rt);
-}
-
-void MipsAssembler::MuhR6(Register rd, Register rs, Register rt) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitR(0, rs, rt, rd, 3, 0x18)).GprOuts(rd).GprIns(rs, rt);
-}
-
-void MipsAssembler::MuhuR6(Register rd, Register rs, Register rt) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitR(0, rs, rt, rd, 3, 0x19)).GprOuts(rd).GprIns(rs, rt);
-}
-
-void MipsAssembler::DivR6(Register rd, Register rs, Register rt) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitR(0, rs, rt, rd, 2, 0x1a)).GprOuts(rd).GprIns(rs, rt);
-}
-
-void MipsAssembler::ModR6(Register rd, Register rs, Register rt) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitR(0, rs, rt, rd, 3, 0x1a)).GprOuts(rd).GprIns(rs, rt);
-}
-
-void MipsAssembler::DivuR6(Register rd, Register rs, Register rt) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitR(0, rs, rt, rd, 2, 0x1b)).GprOuts(rd).GprIns(rs, rt);
-}
-
-void MipsAssembler::ModuR6(Register rd, Register rs, Register rt) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitR(0, rs, rt, rd, 3, 0x1b)).GprOuts(rd).GprIns(rs, rt);
-}
-
-void MipsAssembler::And(Register rd, Register rs, Register rt) {
-  DsFsmInstr(EmitR(0, rs, rt, rd, 0, 0x24)).GprOuts(rd).GprIns(rs, rt);
-}
-
-void MipsAssembler::Andi(Register rt, Register rs, uint16_t imm16) {
-  DsFsmInstr(EmitI(0xc, rs, rt, imm16)).GprOuts(rt).GprIns(rs);
-}
-
-void MipsAssembler::Or(Register rd, Register rs, Register rt) {
-  DsFsmInstr(EmitR(0, rs, rt, rd, 0, 0x25)).GprOuts(rd).GprIns(rs, rt);
-}
-
-void MipsAssembler::Ori(Register rt, Register rs, uint16_t imm16) {
-  DsFsmInstr(EmitI(0xd, rs, rt, imm16)).GprOuts(rt).GprIns(rs);
-}
-
-void MipsAssembler::Xor(Register rd, Register rs, Register rt) {
-  DsFsmInstr(EmitR(0, rs, rt, rd, 0, 0x26)).GprOuts(rd).GprIns(rs, rt);
-}
-
-void MipsAssembler::Xori(Register rt, Register rs, uint16_t imm16) {
-  DsFsmInstr(EmitI(0xe, rs, rt, imm16)).GprOuts(rt).GprIns(rs);
-}
-
-void MipsAssembler::Nor(Register rd, Register rs, Register rt) {
-  DsFsmInstr(EmitR(0, rs, rt, rd, 0, 0x27)).GprOuts(rd).GprIns(rs, rt);
-}
-
-void MipsAssembler::Movz(Register rd, Register rs, Register rt) {
-  CHECK(!IsR6());
-  DsFsmInstr(EmitR(0, rs, rt, rd, 0, 0x0A)).GprInOuts(rd).GprIns(rs, rt);
-}
-
-void MipsAssembler::Movn(Register rd, Register rs, Register rt) {
-  CHECK(!IsR6());
-  DsFsmInstr(EmitR(0, rs, rt, rd, 0, 0x0B)).GprInOuts(rd).GprIns(rs, rt);
-}
-
-void MipsAssembler::Seleqz(Register rd, Register rs, Register rt) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitR(0, rs, rt, rd, 0, 0x35)).GprOuts(rd).GprIns(rs, rt);
-}
-
-void MipsAssembler::Selnez(Register rd, Register rs, Register rt) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitR(0, rs, rt, rd, 0, 0x37)).GprOuts(rd).GprIns(rs, rt);
-}
-
-void MipsAssembler::ClzR6(Register rd, Register rs) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitR(0, rs, static_cast<Register>(0), rd, 0x01, 0x10)).GprOuts(rd).GprIns(rs);
-}
-
-void MipsAssembler::ClzR2(Register rd, Register rs) {
-  CHECK(!IsR6());
-  DsFsmInstr(EmitR(0x1C, rs, rd, rd, 0, 0x20)).GprOuts(rd).GprIns(rs);
-}
-
-void MipsAssembler::CloR6(Register rd, Register rs) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitR(0, rs, static_cast<Register>(0), rd, 0x01, 0x11)).GprOuts(rd).GprIns(rs);
-}
-
-void MipsAssembler::CloR2(Register rd, Register rs) {
-  CHECK(!IsR6());
-  DsFsmInstr(EmitR(0x1C, rs, rd, rd, 0, 0x21)).GprOuts(rd).GprIns(rs);
-}
-
-void MipsAssembler::Seb(Register rd, Register rt) {
-  DsFsmInstr(EmitR(0x1f, static_cast<Register>(0), rt, rd, 0x10, 0x20)).GprOuts(rd).GprIns(rt);
-}
-
-void MipsAssembler::Seh(Register rd, Register rt) {
-  DsFsmInstr(EmitR(0x1f, static_cast<Register>(0), rt, rd, 0x18, 0x20)).GprOuts(rd).GprIns(rt);
-}
-
-void MipsAssembler::Wsbh(Register rd, Register rt) {
-  DsFsmInstr(EmitR(0x1f, static_cast<Register>(0), rt, rd, 2, 0x20)).GprOuts(rd).GprIns(rt);
-}
-
-void MipsAssembler::Bitswap(Register rd, Register rt) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitR(0x1f, static_cast<Register>(0), rt, rd, 0x0, 0x20)).GprOuts(rd).GprIns(rt);
-}
-
-void MipsAssembler::Sll(Register rd, Register rt, int shamt) {
-  CHECK(IsUint<5>(shamt)) << shamt;
-  DsFsmInstr(EmitR(0, static_cast<Register>(0), rt, rd, shamt, 0x00)).GprOuts(rd).GprIns(rt);
-}
-
-void MipsAssembler::Srl(Register rd, Register rt, int shamt) {
-  CHECK(IsUint<5>(shamt)) << shamt;
-  DsFsmInstr(EmitR(0, static_cast<Register>(0), rt, rd, shamt, 0x02)).GprOuts(rd).GprIns(rt);
-}
-
-void MipsAssembler::Rotr(Register rd, Register rt, int shamt) {
-  CHECK(IsUint<5>(shamt)) << shamt;
-  DsFsmInstr(EmitR(0, static_cast<Register>(1), rt, rd, shamt, 0x02)).GprOuts(rd).GprIns(rt);
-}
-
-void MipsAssembler::Sra(Register rd, Register rt, int shamt) {
-  CHECK(IsUint<5>(shamt)) << shamt;
-  DsFsmInstr(EmitR(0, static_cast<Register>(0), rt, rd, shamt, 0x03)).GprOuts(rd).GprIns(rt);
-}
-
-void MipsAssembler::Sllv(Register rd, Register rt, Register rs) {
-  DsFsmInstr(EmitR(0, rs, rt, rd, 0, 0x04)).GprOuts(rd).GprIns(rs, rt);
-}
-
-void MipsAssembler::Srlv(Register rd, Register rt, Register rs) {
-  DsFsmInstr(EmitR(0, rs, rt, rd, 0, 0x06)).GprOuts(rd).GprIns(rs, rt);
-}
-
-void MipsAssembler::Rotrv(Register rd, Register rt, Register rs) {
-  DsFsmInstr(EmitR(0, rs, rt, rd, 1, 0x06)).GprOuts(rd).GprIns(rs, rt);
-}
-
-void MipsAssembler::Srav(Register rd, Register rt, Register rs) {
-  DsFsmInstr(EmitR(0, rs, rt, rd, 0, 0x07)).GprOuts(rd).GprIns(rs, rt);
-}
-
-void MipsAssembler::Ext(Register rd, Register rt, int pos, int size) {
-  CHECK(IsUint<5>(pos)) << pos;
-  CHECK(0 < size && size <= 32) << size;
-  CHECK(0 < pos + size && pos + size <= 32) << pos << " + " << size;
-  DsFsmInstr(EmitR(0x1f, rt, rd, static_cast<Register>(size - 1), pos, 0x00))
-      .GprOuts(rd).GprIns(rt);
-}
-
-void MipsAssembler::Ins(Register rd, Register rt, int pos, int size) {
-  CHECK(IsUint<5>(pos)) << pos;
-  CHECK(0 < size && size <= 32) << size;
-  CHECK(0 < pos + size && pos + size <= 32) << pos << " + " << size;
-  DsFsmInstr(EmitR(0x1f, rt, rd, static_cast<Register>(pos + size - 1), pos, 0x04))
-      .GprInOuts(rd).GprIns(rt);
-}
-
-void MipsAssembler::Lsa(Register rd, Register rs, Register rt, int saPlusOne) {
-  CHECK(IsR6() || HasMsa());
-  CHECK(1 <= saPlusOne && saPlusOne <= 4) << saPlusOne;
-  int sa = saPlusOne - 1;
-  DsFsmInstr(EmitR(0x0, rs, rt, rd, sa, 0x05)).GprOuts(rd).GprIns(rs, rt);
-}
-
-void MipsAssembler::ShiftAndAdd(Register dst,
-                                Register src_idx,
-                                Register src_base,
-                                int shamt,
-                                Register tmp) {
-  CHECK(0 <= shamt && shamt <= 4) << shamt;
-  CHECK_NE(src_base, tmp);
-  if (shamt == TIMES_1) {
-    // Catch the special case where the shift amount is zero (0).
-    Addu(dst, src_base, src_idx);
-  } else if (IsR6() || HasMsa()) {
-    Lsa(dst, src_idx, src_base, shamt);
-  } else {
-    Sll(tmp, src_idx, shamt);
-    Addu(dst, src_base, tmp);
-  }
-}
-
-void MipsAssembler::Lb(Register rt, Register rs, uint16_t imm16) {
-  DsFsmInstr(EmitI(0x20, rs, rt, imm16)).GprOuts(rt).GprIns(rs);
-}
-
-void MipsAssembler::Lh(Register rt, Register rs, uint16_t imm16) {
-  DsFsmInstr(EmitI(0x21, rs, rt, imm16)).GprOuts(rt).GprIns(rs);
-}
-
-void MipsAssembler::Lw(Register rt, Register rs, uint16_t imm16, MipsLabel* patcher_label) {
-  if (patcher_label != nullptr) {
-    Bind(patcher_label);
-  }
-  DsFsmInstr(EmitI(0x23, rs, rt, imm16), patcher_label).GprOuts(rt).GprIns(rs);
-}
-
-void MipsAssembler::Lw(Register rt, Register rs, uint16_t imm16) {
-  Lw(rt, rs, imm16, /* patcher_label= */ nullptr);
-}
-
-void MipsAssembler::Lwl(Register rt, Register rs, uint16_t imm16) {
-  CHECK(!IsR6());
-  DsFsmInstr(EmitI(0x22, rs, rt, imm16)).GprInOuts(rt).GprIns(rs);
-}
-
-void MipsAssembler::Lwr(Register rt, Register rs, uint16_t imm16) {
-  CHECK(!IsR6());
-  DsFsmInstr(EmitI(0x26, rs, rt, imm16)).GprInOuts(rt).GprIns(rs);
-}
-
-void MipsAssembler::Lbu(Register rt, Register rs, uint16_t imm16) {
-  DsFsmInstr(EmitI(0x24, rs, rt, imm16)).GprOuts(rt).GprIns(rs);
-}
-
-void MipsAssembler::Lhu(Register rt, Register rs, uint16_t imm16) {
-  DsFsmInstr(EmitI(0x25, rs, rt, imm16)).GprOuts(rt).GprIns(rs);
-}
-
-void MipsAssembler::Lwpc(Register rs, uint32_t imm19) {
-  CHECK(IsR6());
-  CHECK(IsUint<19>(imm19)) << imm19;
-  DsFsmInstrNop(EmitI21(0x3B, rs, (0x01 << 19) | imm19));
-}
-
-void MipsAssembler::Lui(Register rt, uint16_t imm16) {
-  DsFsmInstr(EmitI(0xf, static_cast<Register>(0), rt, imm16)).GprOuts(rt);
-}
-
-void MipsAssembler::Aui(Register rt, Register rs, uint16_t imm16) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitI(0xf, rs, rt, imm16)).GprOuts(rt).GprIns(rs);
-}
-
-void MipsAssembler::AddUpper(Register rt, Register rs, uint16_t imm16, Register tmp) {
-  bool increment = (rs == rt);
-  if (increment) {
-    CHECK_NE(rs, tmp);
-  }
-  if (IsR6()) {
-    Aui(rt, rs, imm16);
-  } else if (increment) {
-    Lui(tmp, imm16);
-    Addu(rt, rs, tmp);
-  } else {
-    Lui(rt, imm16);
-    Addu(rt, rs, rt);
-  }
-}
-
-void MipsAssembler::Sync(uint32_t stype) {
-  DsFsmInstrNop(EmitR(0, ZERO, ZERO, ZERO, stype & 0x1f, 0xf));
-}
-
-void MipsAssembler::Mfhi(Register rd) {
-  CHECK(!IsR6());
-  DsFsmInstr(EmitR(0, ZERO, ZERO, rd, 0, 0x10)).GprOuts(rd);
-}
-
-void MipsAssembler::Mflo(Register rd) {
-  CHECK(!IsR6());
-  DsFsmInstr(EmitR(0, ZERO, ZERO, rd, 0, 0x12)).GprOuts(rd);
-}
-
-void MipsAssembler::Sb(Register rt, Register rs, uint16_t imm16) {
-  DsFsmInstr(EmitI(0x28, rs, rt, imm16)).GprIns(rt, rs);
-}
-
-void MipsAssembler::Sh(Register rt, Register rs, uint16_t imm16) {
-  DsFsmInstr(EmitI(0x29, rs, rt, imm16)).GprIns(rt, rs);
-}
-
-void MipsAssembler::Sw(Register rt, Register rs, uint16_t imm16, MipsLabel* patcher_label) {
-  if (patcher_label != nullptr) {
-    Bind(patcher_label);
-  }
-  DsFsmInstr(EmitI(0x2b, rs, rt, imm16), patcher_label).GprIns(rt, rs);
-}
-
-void MipsAssembler::Sw(Register rt, Register rs, uint16_t imm16) {
-  Sw(rt, rs, imm16, /* patcher_label= */ nullptr);
-}
-
-void MipsAssembler::Swl(Register rt, Register rs, uint16_t imm16) {
-  CHECK(!IsR6());
-  DsFsmInstr(EmitI(0x2a, rs, rt, imm16)).GprIns(rt, rs);
-}
-
-void MipsAssembler::Swr(Register rt, Register rs, uint16_t imm16) {
-  CHECK(!IsR6());
-  DsFsmInstr(EmitI(0x2e, rs, rt, imm16)).GprIns(rt, rs);
-}
-
-void MipsAssembler::LlR2(Register rt, Register base, int16_t imm16) {
-  CHECK(!IsR6());
-  DsFsmInstr(EmitI(0x30, base, rt, imm16)).GprOuts(rt).GprIns(base);
-}
-
-void MipsAssembler::ScR2(Register rt, Register base, int16_t imm16) {
-  CHECK(!IsR6());
-  DsFsmInstr(EmitI(0x38, base, rt, imm16)).GprInOuts(rt).GprIns(base);
-}
-
-void MipsAssembler::LlR6(Register rt, Register base, int16_t imm9) {
-  CHECK(IsR6());
-  CHECK(IsInt<9>(imm9));
-  DsFsmInstr(EmitI(0x1f, base, rt, ((imm9 & 0x1ff) << 7) | 0x36)).GprOuts(rt).GprIns(base);
-}
-
-void MipsAssembler::ScR6(Register rt, Register base, int16_t imm9) {
-  CHECK(IsR6());
-  CHECK(IsInt<9>(imm9));
-  DsFsmInstr(EmitI(0x1f, base, rt, ((imm9 & 0x1ff) << 7) | 0x26)).GprInOuts(rt).GprIns(base);
-}
-
-void MipsAssembler::Slt(Register rd, Register rs, Register rt) {
-  DsFsmInstr(EmitR(0, rs, rt, rd, 0, 0x2a)).GprOuts(rd).GprIns(rs, rt);
-}
-
-void MipsAssembler::Sltu(Register rd, Register rs, Register rt) {
-  DsFsmInstr(EmitR(0, rs, rt, rd, 0, 0x2b)).GprOuts(rd).GprIns(rs, rt);
-}
-
-void MipsAssembler::Slti(Register rt, Register rs, uint16_t imm16) {
-  DsFsmInstr(EmitI(0xa, rs, rt, imm16)).GprOuts(rt).GprIns(rs);
-}
-
-void MipsAssembler::Sltiu(Register rt, Register rs, uint16_t imm16) {
-  DsFsmInstr(EmitI(0xb, rs, rt, imm16)).GprOuts(rt).GprIns(rs);
-}
-
-void MipsAssembler::B(uint16_t imm16) {
-  DsFsmInstrNop(EmitI(0x4, static_cast<Register>(0), static_cast<Register>(0), imm16));
-}
-
-void MipsAssembler::Bal(uint16_t imm16) {
-  DsFsmInstrNop(EmitI(0x1, static_cast<Register>(0), static_cast<Register>(0x11), imm16));
-}
-
-void MipsAssembler::Beq(Register rs, Register rt, uint16_t imm16) {
-  DsFsmInstrNop(EmitI(0x4, rs, rt, imm16));
-}
-
-void MipsAssembler::Bne(Register rs, Register rt, uint16_t imm16) {
-  DsFsmInstrNop(EmitI(0x5, rs, rt, imm16));
-}
-
-void MipsAssembler::Beqz(Register rt, uint16_t imm16) {
-  Beq(rt, ZERO, imm16);
-}
-
-void MipsAssembler::Bnez(Register rt, uint16_t imm16) {
-  Bne(rt, ZERO, imm16);
-}
-
-void MipsAssembler::Bltz(Register rt, uint16_t imm16) {
-  DsFsmInstrNop(EmitI(0x1, rt, static_cast<Register>(0), imm16));
-}
-
-void MipsAssembler::Bgez(Register rt, uint16_t imm16) {
-  DsFsmInstrNop(EmitI(0x1, rt, static_cast<Register>(0x1), imm16));
-}
-
-void MipsAssembler::Blez(Register rt, uint16_t imm16) {
-  DsFsmInstrNop(EmitI(0x6, rt, static_cast<Register>(0), imm16));
-}
-
-void MipsAssembler::Bgtz(Register rt, uint16_t imm16) {
-  DsFsmInstrNop(EmitI(0x7, rt, static_cast<Register>(0), imm16));
-}
-
-void MipsAssembler::Bc1f(uint16_t imm16) {
-  Bc1f(0, imm16);
-}
-
-void MipsAssembler::Bc1f(int cc, uint16_t imm16) {
-  CHECK(!IsR6());
-  CHECK(IsUint<3>(cc)) << cc;
-  DsFsmInstrNop(EmitI(0x11, static_cast<Register>(0x8), static_cast<Register>(cc << 2), imm16));
-}
-
-void MipsAssembler::Bc1t(uint16_t imm16) {
-  Bc1t(0, imm16);
-}
-
-void MipsAssembler::Bc1t(int cc, uint16_t imm16) {
-  CHECK(!IsR6());
-  CHECK(IsUint<3>(cc)) << cc;
-  DsFsmInstrNop(EmitI(0x11,
-                      static_cast<Register>(0x8),
-                      static_cast<Register>((cc << 2) | 1),
-                      imm16));
-}
-
-void MipsAssembler::J(uint32_t addr26) {
-  DsFsmInstrNop(EmitI26(0x2, addr26));
-}
-
-void MipsAssembler::Jal(uint32_t addr26) {
-  DsFsmInstrNop(EmitI26(0x3, addr26));
-}
-
-void MipsAssembler::Jalr(Register rd, Register rs) {
-  uint32_t last_instruction = delay_slot_.instruction_;
-  MipsLabel* patcher_label = delay_slot_.patcher_label_;
-  bool exchange = (last_instruction != 0 &&
-      (delay_slot_.masks_.gpr_outs_ & (1u << rs)) == 0 &&
-      ((delay_slot_.masks_.gpr_ins_ | delay_slot_.masks_.gpr_outs_) & (1u << rd)) == 0);
-  if (exchange) {
-    // The last instruction cannot be used in a different delay slot,
-    // do not commit the label before it (if any).
-    DsFsmDropLabel();
-  }
-  DsFsmInstrNop(EmitR(0, rs, static_cast<Register>(0), rd, 0, 0x09));
-  if (exchange) {
-    // Exchange the last two instructions in the assembler buffer.
-    size_t size = buffer_.Size();
-    CHECK_GE(size, 2 * sizeof(uint32_t));
-    size_t pos1 = size - 2 * sizeof(uint32_t);
-    size_t pos2 = size - sizeof(uint32_t);
-    uint32_t instr1 = buffer_.Load<uint32_t>(pos1);
-    uint32_t instr2 = buffer_.Load<uint32_t>(pos2);
-    CHECK_EQ(instr1, last_instruction);
-    buffer_.Store<uint32_t>(pos1, instr2);
-    buffer_.Store<uint32_t>(pos2, instr1);
-    // Move the patcher label along with the patched instruction.
-    if (patcher_label != nullptr) {
-      patcher_label->AdjustBoundPosition(sizeof(uint32_t));
-    }
-  } else if (reordering_) {
-    Nop();
-  }
-}
-
-void MipsAssembler::Jalr(Register rs) {
-  Jalr(RA, rs);
-}
-
-void MipsAssembler::Jr(Register rs) {
-  Jalr(ZERO, rs);
-}
-
-void MipsAssembler::Nal() {
-  DsFsmInstrNop(EmitI(0x1, static_cast<Register>(0), static_cast<Register>(0x10), 0));
-}
-
-void MipsAssembler::Auipc(Register rs, uint16_t imm16) {
-  CHECK(IsR6());
-  DsFsmInstrNop(EmitI(0x3B, rs, static_cast<Register>(0x1E), imm16));
-}
-
-void MipsAssembler::Addiupc(Register rs, uint32_t imm19) {
-  CHECK(IsR6());
-  CHECK(IsUint<19>(imm19)) << imm19;
-  DsFsmInstrNop(EmitI21(0x3B, rs, imm19));
-}
-
-void MipsAssembler::Bc(uint32_t imm26) {
-  CHECK(IsR6());
-  DsFsmInstrNop(EmitI26(0x32, imm26));
-}
-
-void MipsAssembler::Balc(uint32_t imm26) {
-  CHECK(IsR6());
-  DsFsmInstrNop(EmitI26(0x3A, imm26));
-}
-
-void MipsAssembler::Jic(Register rt, uint16_t imm16) {
-  CHECK(IsR6());
-  DsFsmInstrNop(EmitI(0x36, static_cast<Register>(0), rt, imm16));
-}
-
-void MipsAssembler::Jialc(Register rt, uint16_t imm16) {
-  CHECK(IsR6());
-  DsFsmInstrNop(EmitI(0x3E, static_cast<Register>(0), rt, imm16));
-}
-
-void MipsAssembler::Bltc(Register rs, Register rt, uint16_t imm16) {
-  CHECK(IsR6());
-  CHECK_NE(rs, ZERO);
-  CHECK_NE(rt, ZERO);
-  CHECK_NE(rs, rt);
-  DsFsmInstrNop(EmitI(0x17, rs, rt, imm16));
-}
-
-void MipsAssembler::Bltzc(Register rt, uint16_t imm16) {
-  CHECK(IsR6());
-  CHECK_NE(rt, ZERO);
-  DsFsmInstrNop(EmitI(0x17, rt, rt, imm16));
-}
-
-void MipsAssembler::Bgtzc(Register rt, uint16_t imm16) {
-  CHECK(IsR6());
-  CHECK_NE(rt, ZERO);
-  DsFsmInstrNop(EmitI(0x17, static_cast<Register>(0), rt, imm16));
-}
-
-void MipsAssembler::Bgec(Register rs, Register rt, uint16_t imm16) {
-  CHECK(IsR6());
-  CHECK_NE(rs, ZERO);
-  CHECK_NE(rt, ZERO);
-  CHECK_NE(rs, rt);
-  DsFsmInstrNop(EmitI(0x16, rs, rt, imm16));
-}
-
-void MipsAssembler::Bgezc(Register rt, uint16_t imm16) {
-  CHECK(IsR6());
-  CHECK_NE(rt, ZERO);
-  DsFsmInstrNop(EmitI(0x16, rt, rt, imm16));
-}
-
-void MipsAssembler::Blezc(Register rt, uint16_t imm16) {
-  CHECK(IsR6());
-  CHECK_NE(rt, ZERO);
-  DsFsmInstrNop(EmitI(0x16, static_cast<Register>(0), rt, imm16));
-}
-
-void MipsAssembler::Bltuc(Register rs, Register rt, uint16_t imm16) {
-  CHECK(IsR6());
-  CHECK_NE(rs, ZERO);
-  CHECK_NE(rt, ZERO);
-  CHECK_NE(rs, rt);
-  DsFsmInstrNop(EmitI(0x7, rs, rt, imm16));
-}
-
-void MipsAssembler::Bgeuc(Register rs, Register rt, uint16_t imm16) {
-  CHECK(IsR6());
-  CHECK_NE(rs, ZERO);
-  CHECK_NE(rt, ZERO);
-  CHECK_NE(rs, rt);
-  DsFsmInstrNop(EmitI(0x6, rs, rt, imm16));
-}
-
-void MipsAssembler::Beqc(Register rs, Register rt, uint16_t imm16) {
-  CHECK(IsR6());
-  CHECK_NE(rs, ZERO);
-  CHECK_NE(rt, ZERO);
-  CHECK_NE(rs, rt);
-  DsFsmInstrNop(EmitI(0x8, std::min(rs, rt), std::max(rs, rt), imm16));
-}
-
-void MipsAssembler::Bnec(Register rs, Register rt, uint16_t imm16) {
-  CHECK(IsR6());
-  CHECK_NE(rs, ZERO);
-  CHECK_NE(rt, ZERO);
-  CHECK_NE(rs, rt);
-  DsFsmInstrNop(EmitI(0x18, std::min(rs, rt), std::max(rs, rt), imm16));
-}
-
-void MipsAssembler::Beqzc(Register rs, uint32_t imm21) {
-  CHECK(IsR6());
-  CHECK_NE(rs, ZERO);
-  DsFsmInstrNop(EmitI21(0x36, rs, imm21));
-}
-
-void MipsAssembler::Bnezc(Register rs, uint32_t imm21) {
-  CHECK(IsR6());
-  CHECK_NE(rs, ZERO);
-  DsFsmInstrNop(EmitI21(0x3E, rs, imm21));
-}
-
-void MipsAssembler::Bc1eqz(FRegister ft, uint16_t imm16) {
-  CHECK(IsR6());
-  DsFsmInstrNop(EmitFI(0x11, 0x9, ft, imm16));
-}
-
-void MipsAssembler::Bc1nez(FRegister ft, uint16_t imm16) {
-  CHECK(IsR6());
-  DsFsmInstrNop(EmitFI(0x11, 0xD, ft, imm16));
-}
-
-void MipsAssembler::EmitBcondR2(BranchCondition cond, Register rs, Register rt, uint16_t imm16) {
-  switch (cond) {
-    case kCondLTZ:
-      CHECK_EQ(rt, ZERO);
-      Bltz(rs, imm16);
-      break;
-    case kCondGEZ:
-      CHECK_EQ(rt, ZERO);
-      Bgez(rs, imm16);
-      break;
-    case kCondLEZ:
-      CHECK_EQ(rt, ZERO);
-      Blez(rs, imm16);
-      break;
-    case kCondGTZ:
-      CHECK_EQ(rt, ZERO);
-      Bgtz(rs, imm16);
-      break;
-    case kCondEQ:
-      Beq(rs, rt, imm16);
-      break;
-    case kCondNE:
-      Bne(rs, rt, imm16);
-      break;
-    case kCondEQZ:
-      CHECK_EQ(rt, ZERO);
-      Beqz(rs, imm16);
-      break;
-    case kCondNEZ:
-      CHECK_EQ(rt, ZERO);
-      Bnez(rs, imm16);
-      break;
-    case kCondF:
-      CHECK_EQ(rt, ZERO);
-      Bc1f(static_cast<int>(rs), imm16);
-      break;
-    case kCondT:
-      CHECK_EQ(rt, ZERO);
-      Bc1t(static_cast<int>(rs), imm16);
-      break;
-    case kCondLT:
-    case kCondGE:
-    case kCondLE:
-    case kCondGT:
-    case kCondLTU:
-    case kCondGEU:
-    case kUncond:
-      // We don't support synthetic R2 branches (preceded with slt[u]) at this level
-      // (R2 doesn't have branches to compare 2 registers using <, <=, >=, >).
-      LOG(FATAL) << "Unexpected branch condition " << cond;
-      UNREACHABLE();
-  }
-}
-
-void MipsAssembler::EmitBcondR6(BranchCondition cond, Register rs, Register rt, uint32_t imm16_21) {
-  switch (cond) {
-    case kCondLT:
-      Bltc(rs, rt, imm16_21);
-      break;
-    case kCondGE:
-      Bgec(rs, rt, imm16_21);
-      break;
-    case kCondLE:
-      Bgec(rt, rs, imm16_21);
-      break;
-    case kCondGT:
-      Bltc(rt, rs, imm16_21);
-      break;
-    case kCondLTZ:
-      CHECK_EQ(rt, ZERO);
-      Bltzc(rs, imm16_21);
-      break;
-    case kCondGEZ:
-      CHECK_EQ(rt, ZERO);
-      Bgezc(rs, imm16_21);
-      break;
-    case kCondLEZ:
-      CHECK_EQ(rt, ZERO);
-      Blezc(rs, imm16_21);
-      break;
-    case kCondGTZ:
-      CHECK_EQ(rt, ZERO);
-      Bgtzc(rs, imm16_21);
-      break;
-    case kCondEQ:
-      Beqc(rs, rt, imm16_21);
-      break;
-    case kCondNE:
-      Bnec(rs, rt, imm16_21);
-      break;
-    case kCondEQZ:
-      CHECK_EQ(rt, ZERO);
-      Beqzc(rs, imm16_21);
-      break;
-    case kCondNEZ:
-      CHECK_EQ(rt, ZERO);
-      Bnezc(rs, imm16_21);
-      break;
-    case kCondLTU:
-      Bltuc(rs, rt, imm16_21);
-      break;
-    case kCondGEU:
-      Bgeuc(rs, rt, imm16_21);
-      break;
-    case kCondF:
-      CHECK_EQ(rt, ZERO);
-      Bc1eqz(static_cast<FRegister>(rs), imm16_21);
-      break;
-    case kCondT:
-      CHECK_EQ(rt, ZERO);
-      Bc1nez(static_cast<FRegister>(rs), imm16_21);
-      break;
-    case kUncond:
-      LOG(FATAL) << "Unexpected branch condition " << cond;
-      UNREACHABLE();
-  }
-}
-
-void MipsAssembler::AddS(FRegister fd, FRegister fs, FRegister ft) {
-  DsFsmInstr(EmitFR(0x11, 0x10, ft, fs, fd, 0x0)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::SubS(FRegister fd, FRegister fs, FRegister ft) {
-  DsFsmInstr(EmitFR(0x11, 0x10, ft, fs, fd, 0x1)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::MulS(FRegister fd, FRegister fs, FRegister ft) {
-  DsFsmInstr(EmitFR(0x11, 0x10, ft, fs, fd, 0x2)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::DivS(FRegister fd, FRegister fs, FRegister ft) {
-  DsFsmInstr(EmitFR(0x11, 0x10, ft, fs, fd, 0x3)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::AddD(FRegister fd, FRegister fs, FRegister ft) {
-  DsFsmInstr(EmitFR(0x11, 0x11, ft, fs, fd, 0x0)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::SubD(FRegister fd, FRegister fs, FRegister ft) {
-  DsFsmInstr(EmitFR(0x11, 0x11, ft, fs, fd, 0x1)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::MulD(FRegister fd, FRegister fs, FRegister ft) {
-  DsFsmInstr(EmitFR(0x11, 0x11, ft, fs, fd, 0x2)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::DivD(FRegister fd, FRegister fs, FRegister ft) {
-  DsFsmInstr(EmitFR(0x11, 0x11, ft, fs, fd, 0x3)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::SqrtS(FRegister fd, FRegister fs) {
-  DsFsmInstr(EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x4)).FprOuts(fd).FprIns(fs);
-}
-
-void MipsAssembler::SqrtD(FRegister fd, FRegister fs) {
-  DsFsmInstr(EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x4)).FprOuts(fd).FprIns(fs);
-}
-
-void MipsAssembler::AbsS(FRegister fd, FRegister fs) {
-  DsFsmInstr(EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x5)).FprOuts(fd).FprIns(fs);
-}
-
-void MipsAssembler::AbsD(FRegister fd, FRegister fs) {
-  DsFsmInstr(EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x5)).FprOuts(fd).FprIns(fs);
-}
-
-void MipsAssembler::MovS(FRegister fd, FRegister fs) {
-  DsFsmInstr(EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x6)).FprOuts(fd).FprIns(fs);
-}
-
-void MipsAssembler::MovD(FRegister fd, FRegister fs) {
-  DsFsmInstr(EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x6)).FprOuts(fd).FprIns(fs);
-}
-
-void MipsAssembler::NegS(FRegister fd, FRegister fs) {
-  DsFsmInstr(EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x7)).FprOuts(fd).FprIns(fs);
-}
-
-void MipsAssembler::NegD(FRegister fd, FRegister fs) {
-  DsFsmInstr(EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x7)).FprOuts(fd).FprIns(fs);
-}
-
-void MipsAssembler::CunS(FRegister fs, FRegister ft) {
-  CunS(0, fs, ft);
-}
-
-void MipsAssembler::CunS(int cc, FRegister fs, FRegister ft) {
-  CHECK(!IsR6());
-  CHECK(IsUint<3>(cc)) << cc;
-  DsFsmInstr(EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x31))
-      .CcOuts(cc).FprIns(fs, ft);
-}
-
-void MipsAssembler::CeqS(FRegister fs, FRegister ft) {
-  CeqS(0, fs, ft);
-}
-
-void MipsAssembler::CeqS(int cc, FRegister fs, FRegister ft) {
-  CHECK(!IsR6());
-  CHECK(IsUint<3>(cc)) << cc;
-  DsFsmInstr(EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x32))
-      .CcOuts(cc).FprIns(fs, ft);
-}
-
-void MipsAssembler::CueqS(FRegister fs, FRegister ft) {
-  CueqS(0, fs, ft);
-}
-
-void MipsAssembler::CueqS(int cc, FRegister fs, FRegister ft) {
-  CHECK(!IsR6());
-  CHECK(IsUint<3>(cc)) << cc;
-  DsFsmInstr(EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x33))
-      .CcOuts(cc).FprIns(fs, ft);
-}
-
-void MipsAssembler::ColtS(FRegister fs, FRegister ft) {
-  ColtS(0, fs, ft);
-}
-
-void MipsAssembler::ColtS(int cc, FRegister fs, FRegister ft) {
-  CHECK(!IsR6());
-  CHECK(IsUint<3>(cc)) << cc;
-  DsFsmInstr(EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x34))
-      .CcOuts(cc).FprIns(fs, ft);
-}
-
-void MipsAssembler::CultS(FRegister fs, FRegister ft) {
-  CultS(0, fs, ft);
-}
-
-void MipsAssembler::CultS(int cc, FRegister fs, FRegister ft) {
-  CHECK(!IsR6());
-  CHECK(IsUint<3>(cc)) << cc;
-  DsFsmInstr(EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x35))
-      .CcOuts(cc).FprIns(fs, ft);
-}
-
-void MipsAssembler::ColeS(FRegister fs, FRegister ft) {
-  ColeS(0, fs, ft);
-}
-
-void MipsAssembler::ColeS(int cc, FRegister fs, FRegister ft) {
-  CHECK(!IsR6());
-  CHECK(IsUint<3>(cc)) << cc;
-  DsFsmInstr(EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x36))
-      .CcOuts(cc).FprIns(fs, ft);
-}
-
-void MipsAssembler::CuleS(FRegister fs, FRegister ft) {
-  CuleS(0, fs, ft);
-}
-
-void MipsAssembler::CuleS(int cc, FRegister fs, FRegister ft) {
-  CHECK(!IsR6());
-  CHECK(IsUint<3>(cc)) << cc;
-  DsFsmInstr(EmitFR(0x11, 0x10, ft, fs, static_cast<FRegister>(cc << 2), 0x37))
-      .CcOuts(cc).FprIns(fs, ft);
-}
-
-void MipsAssembler::CunD(FRegister fs, FRegister ft) {
-  CunD(0, fs, ft);
-}
-
-void MipsAssembler::CunD(int cc, FRegister fs, FRegister ft) {
-  CHECK(!IsR6());
-  CHECK(IsUint<3>(cc)) << cc;
-  DsFsmInstr(EmitFR(0x11, 0x11, ft, fs, static_cast<FRegister>(cc << 2), 0x31))
-      .CcOuts(cc).FprIns(fs, ft);
-}
-
-void MipsAssembler::CeqD(FRegister fs, FRegister ft) {
-  CeqD(0, fs, ft);
-}
-
-void MipsAssembler::CeqD(int cc, FRegister fs, FRegister ft) {
-  CHECK(!IsR6());
-  CHECK(IsUint<3>(cc)) << cc;
-  DsFsmInstr(EmitFR(0x11, 0x11, ft, fs, static_cast<FRegister>(cc << 2), 0x32))
-      .CcOuts(cc).FprIns(fs, ft);
-}
-
-void MipsAssembler::CueqD(FRegister fs, FRegister ft) {
-  CueqD(0, fs, ft);
-}
-
-void MipsAssembler::CueqD(int cc, FRegister fs, FRegister ft) {
-  CHECK(!IsR6());
-  CHECK(IsUint<3>(cc)) << cc;
-  DsFsmInstr(EmitFR(0x11, 0x11, ft, fs, static_cast<FRegister>(cc << 2), 0x33))
-      .CcOuts(cc).FprIns(fs, ft);
-}
-
-void MipsAssembler::ColtD(FRegister fs, FRegister ft) {
-  ColtD(0, fs, ft);
-}
-
-void MipsAssembler::ColtD(int cc, FRegister fs, FRegister ft) {
-  CHECK(!IsR6());
-  CHECK(IsUint<3>(cc)) << cc;
-  DsFsmInstr(EmitFR(0x11, 0x11, ft, fs, static_cast<FRegister>(cc << 2), 0x34))
-      .CcOuts(cc).FprIns(fs, ft);
-}
-
-void MipsAssembler::CultD(FRegister fs, FRegister ft) {
-  CultD(0, fs, ft);
-}
-
-void MipsAssembler::CultD(int cc, FRegister fs, FRegister ft) {
-  CHECK(!IsR6());
-  CHECK(IsUint<3>(cc)) << cc;
-  DsFsmInstr(EmitFR(0x11, 0x11, ft, fs, static_cast<FRegister>(cc << 2), 0x35))
-      .CcOuts(cc).FprIns(fs, ft);
-}
-
-void MipsAssembler::ColeD(FRegister fs, FRegister ft) {
-  ColeD(0, fs, ft);
-}
-
-void MipsAssembler::ColeD(int cc, FRegister fs, FRegister ft) {
-  CHECK(!IsR6());
-  CHECK(IsUint<3>(cc)) << cc;
-  DsFsmInstr(EmitFR(0x11, 0x11, ft, fs, static_cast<FRegister>(cc << 2), 0x36))
-      .CcOuts(cc).FprIns(fs, ft);
-}
-
-void MipsAssembler::CuleD(FRegister fs, FRegister ft) {
-  CuleD(0, fs, ft);
-}
-
-void MipsAssembler::CuleD(int cc, FRegister fs, FRegister ft) {
-  CHECK(!IsR6());
-  CHECK(IsUint<3>(cc)) << cc;
-  DsFsmInstr(EmitFR(0x11, 0x11, ft, fs, static_cast<FRegister>(cc << 2), 0x37))
-      .CcOuts(cc).FprIns(fs, ft);
-}
-
-void MipsAssembler::CmpUnS(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x14, ft, fs, fd, 0x01)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::CmpEqS(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x14, ft, fs, fd, 0x02)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::CmpUeqS(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x14, ft, fs, fd, 0x03)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::CmpLtS(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x14, ft, fs, fd, 0x04)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::CmpUltS(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x14, ft, fs, fd, 0x05)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::CmpLeS(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x14, ft, fs, fd, 0x06)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::CmpUleS(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x14, ft, fs, fd, 0x07)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::CmpOrS(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x14, ft, fs, fd, 0x11)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::CmpUneS(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x14, ft, fs, fd, 0x12)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::CmpNeS(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x14, ft, fs, fd, 0x13)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::CmpUnD(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x15, ft, fs, fd, 0x01)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::CmpEqD(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x15, ft, fs, fd, 0x02)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::CmpUeqD(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x15, ft, fs, fd, 0x03)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::CmpLtD(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x15, ft, fs, fd, 0x04)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::CmpUltD(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x15, ft, fs, fd, 0x05)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::CmpLeD(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x15, ft, fs, fd, 0x06)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::CmpUleD(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x15, ft, fs, fd, 0x07)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::CmpOrD(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x15, ft, fs, fd, 0x11)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::CmpUneD(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x15, ft, fs, fd, 0x12)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::CmpNeD(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x15, ft, fs, fd, 0x13)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::Movf(Register rd, Register rs, int cc) {
-  CHECK(!IsR6());
-  CHECK(IsUint<3>(cc)) << cc;
-  DsFsmInstr(EmitR(0, rs, static_cast<Register>(cc << 2), rd, 0, 0x01))
-      .GprInOuts(rd).GprIns(rs).CcIns(cc);
-}
-
-void MipsAssembler::Movt(Register rd, Register rs, int cc) {
-  CHECK(!IsR6());
-  CHECK(IsUint<3>(cc)) << cc;
-  DsFsmInstr(EmitR(0, rs, static_cast<Register>((cc << 2) | 1), rd, 0, 0x01))
-      .GprInOuts(rd).GprIns(rs).CcIns(cc);
-}
-
-void MipsAssembler::MovfS(FRegister fd, FRegister fs, int cc) {
-  CHECK(!IsR6());
-  CHECK(IsUint<3>(cc)) << cc;
-  DsFsmInstr(EmitFR(0x11, 0x10, static_cast<FRegister>(cc << 2), fs, fd, 0x11))
-      .FprInOuts(fd).FprIns(fs).CcIns(cc);
-}
-
-void MipsAssembler::MovfD(FRegister fd, FRegister fs, int cc) {
-  CHECK(!IsR6());
-  CHECK(IsUint<3>(cc)) << cc;
-  DsFsmInstr(EmitFR(0x11, 0x11, static_cast<FRegister>(cc << 2), fs, fd, 0x11))
-      .FprInOuts(fd).FprIns(fs).CcIns(cc);
-}
-
-void MipsAssembler::MovtS(FRegister fd, FRegister fs, int cc) {
-  CHECK(!IsR6());
-  CHECK(IsUint<3>(cc)) << cc;
-  DsFsmInstr(EmitFR(0x11, 0x10, static_cast<FRegister>((cc << 2) | 1), fs, fd, 0x11))
-      .FprInOuts(fd).FprIns(fs).CcIns(cc);
-}
-
-void MipsAssembler::MovtD(FRegister fd, FRegister fs, int cc) {
-  CHECK(!IsR6());
-  CHECK(IsUint<3>(cc)) << cc;
-  DsFsmInstr(EmitFR(0x11, 0x11, static_cast<FRegister>((cc << 2) | 1), fs, fd, 0x11))
-      .FprInOuts(fd).FprIns(fs).CcIns(cc);
-}
-
-void MipsAssembler::MovzS(FRegister fd, FRegister fs, Register rt) {
-  CHECK(!IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x10, static_cast<FRegister>(rt), fs, fd, 0x12))
-      .FprInOuts(fd).FprIns(fs).GprIns(rt);
-}
-
-void MipsAssembler::MovzD(FRegister fd, FRegister fs, Register rt) {
-  CHECK(!IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x11, static_cast<FRegister>(rt), fs, fd, 0x12))
-      .FprInOuts(fd).FprIns(fs).GprIns(rt);
-}
-
-void MipsAssembler::MovnS(FRegister fd, FRegister fs, Register rt) {
-  CHECK(!IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x10, static_cast<FRegister>(rt), fs, fd, 0x13))
-      .FprInOuts(fd).FprIns(fs).GprIns(rt);
-}
-
-void MipsAssembler::MovnD(FRegister fd, FRegister fs, Register rt) {
-  CHECK(!IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x11, static_cast<FRegister>(rt), fs, fd, 0x13))
-      .FprInOuts(fd).FprIns(fs).GprIns(rt);
-}
-
-void MipsAssembler::SelS(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x10, ft, fs, fd, 0x10)).FprInOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::SelD(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x11, ft, fs, fd, 0x10)).FprInOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::SeleqzS(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x10, ft, fs, fd, 0x14)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::SeleqzD(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x11, ft, fs, fd, 0x14)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::SelnezS(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x10, ft, fs, fd, 0x17)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::SelnezD(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x11, ft, fs, fd, 0x17)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::ClassS(FRegister fd, FRegister fs) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x1b)).FprOuts(fd).FprIns(fs);
-}
-
-void MipsAssembler::ClassD(FRegister fd, FRegister fs) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x1b)).FprOuts(fd).FprIns(fs);
-}
-
-void MipsAssembler::MinS(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x10, ft, fs, fd, 0x1c)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::MinD(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x11, ft, fs, fd, 0x1c)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::MaxS(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x10, ft, fs, fd, 0x1e)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::MaxD(FRegister fd, FRegister fs, FRegister ft) {
-  CHECK(IsR6());
-  DsFsmInstr(EmitFR(0x11, 0x11, ft, fs, fd, 0x1e)).FprOuts(fd).FprIns(fs, ft);
-}
-
-void MipsAssembler::TruncLS(FRegister fd, FRegister fs) {
-  DsFsmInstr(EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x09)).FprOuts(fd).FprIns(fs);
-}
-
-void MipsAssembler::TruncLD(FRegister fd, FRegister fs) {
-  DsFsmInstr(EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x09)).FprOuts(fd).FprIns(fs);
-}
-
-void MipsAssembler::TruncWS(FRegister fd, FRegister fs) {
-  DsFsmInstr(EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x0D)).FprOuts(fd).FprIns(fs);
-}
-
-void MipsAssembler::TruncWD(FRegister fd, FRegister fs) {
-  DsFsmInstr(EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x0D)).FprOuts(fd).FprIns(fs);
-}
-
-void MipsAssembler::Cvtsw(FRegister fd, FRegister fs) {
-  DsFsmInstr(EmitFR(0x11, 0x14, static_cast<FRegister>(0), fs, fd, 0x20)).FprOuts(fd).FprIns(fs);
-}
-
-void MipsAssembler::Cvtdw(FRegister fd, FRegister fs) {
-  DsFsmInstr(EmitFR(0x11, 0x14, static_cast<FRegister>(0), fs, fd, 0x21)).FprOuts(fd).FprIns(fs);
-}
-
-void MipsAssembler::Cvtsd(FRegister fd, FRegister fs) {
-  DsFsmInstr(EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0x20)).FprOuts(fd).FprIns(fs);
-}
-
-void MipsAssembler::Cvtds(FRegister fd, FRegister fs) {
-  DsFsmInstr(EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0x21)).FprOuts(fd).FprIns(fs);
-}
-
-void MipsAssembler::Cvtsl(FRegister fd, FRegister fs) {
-  DsFsmInstr(EmitFR(0x11, 0x15, static_cast<FRegister>(0), fs, fd, 0x20)).FprOuts(fd).FprIns(fs);
-}
-
-void MipsAssembler::Cvtdl(FRegister fd, FRegister fs) {
-  DsFsmInstr(EmitFR(0x11, 0x15, static_cast<FRegister>(0), fs, fd, 0x21)).FprOuts(fd).FprIns(fs);
-}
-
-void MipsAssembler::FloorWS(FRegister fd, FRegister fs) {
-  DsFsmInstr(EmitFR(0x11, 0x10, static_cast<FRegister>(0), fs, fd, 0xf)).FprOuts(fd).FprIns(fs);
-}
-
-void MipsAssembler::FloorWD(FRegister fd, FRegister fs) {
-  DsFsmInstr(EmitFR(0x11, 0x11, static_cast<FRegister>(0), fs, fd, 0xf)).FprOuts(fd).FprIns(fs);
-}
-
-FRegister MipsAssembler::GetFpuRegLow(FRegister reg) {
-  // If FPRs are 32-bit (and get paired to hold 64-bit values), accesses to
-  // odd-numbered FPRs are reattributed to even-numbered FPRs. This lets us
-  // use only even-numbered FPRs irrespective of whether we're doing single-
-  // or double-precision arithmetic. (We don't use odd-numbered 32-bit FPRs
-  // to hold single-precision values).
-  return Is32BitFPU() ? static_cast<FRegister>(reg & ~1u) : reg;
-}
-
-void MipsAssembler::Mfc1(Register rt, FRegister fs) {
-  DsFsmInstr(EmitFR(0x11, 0x00, static_cast<FRegister>(rt), fs, static_cast<FRegister>(0), 0x0))
-      .GprOuts(rt).FprIns(GetFpuRegLow(fs));
-}
-
-// Note, the 32 LSBs of a 64-bit value must be loaded into an FPR before the 32 MSBs
-// when loading the value as 32-bit halves.
-void MipsAssembler::Mtc1(Register rt, FRegister fs) {
-  uint32_t encoding =
-      EmitFR(0x11, 0x04, static_cast<FRegister>(rt), fs, static_cast<FRegister>(0), 0x0);
-  if (Is32BitFPU() && (fs % 2 != 0)) {
-    // If mtc1 is used to simulate mthc1 by writing to the odd-numbered FPR in
-    // a pair of 32-bit FPRs, the associated even-numbered FPR is an in/out.
-    DsFsmInstr(encoding).FprInOuts(GetFpuRegLow(fs)).GprIns(rt);
-  } else {
-    // Otherwise (the FPR is 64-bit or even-numbered), the FPR is an out.
-    DsFsmInstr(encoding).FprOuts(fs).GprIns(rt);
-  }
-}
-
-void MipsAssembler::Mfhc1(Register rt, FRegister fs) {
-  DsFsmInstr(EmitFR(0x11, 0x03, static_cast<FRegister>(rt), fs, static_cast<FRegister>(0), 0x0))
-      .GprOuts(rt).FprIns(fs);
-}
-
-// Note, the 32 LSBs of a 64-bit value must be loaded into an FPR before the 32 MSBs
-// when loading the value as 32-bit halves.
-void MipsAssembler::Mthc1(Register rt, FRegister fs) {
-  DsFsmInstr(EmitFR(0x11, 0x07, static_cast<FRegister>(rt), fs, static_cast<FRegister>(0), 0x0))
-      .FprInOuts(fs).GprIns(rt);
-}
-
-void MipsAssembler::MoveFromFpuHigh(Register rt, FRegister fs) {
-  if (Is32BitFPU()) {
-    CHECK_EQ(fs % 2, 0) << fs;
-    Mfc1(rt, static_cast<FRegister>(fs + 1));
-  } else {
-    Mfhc1(rt, fs);
-  }
-}
-
-void MipsAssembler::MoveToFpuHigh(Register rt, FRegister fs) {
-  if (Is32BitFPU()) {
-    CHECK_EQ(fs % 2, 0) << fs;
-    Mtc1(rt, static_cast<FRegister>(fs + 1));
-  } else {
-    Mthc1(rt, fs);
-  }
-}
-
-// Note, the 32 LSBs of a 64-bit value must be loaded into an FPR before the 32 MSBs
-// when loading the value as 32-bit halves.
-void MipsAssembler::Lwc1(FRegister ft, Register rs, uint16_t imm16) {
-  uint32_t encoding = EmitI(0x31, rs, static_cast<Register>(ft), imm16);
-  if (Is32BitFPU() && (ft % 2 != 0)) {
-    // If lwc1 is used to load the odd-numbered FPR in a pair of 32-bit FPRs,
-    // the associated even-numbered FPR is an in/out.
-    DsFsmInstr(encoding).FprInOuts(GetFpuRegLow(ft)).GprIns(rs);
-  } else {
-    // Otherwise (the FPR is 64-bit or even-numbered), the FPR is an out.
-    DsFsmInstr(encoding).FprOuts(ft).GprIns(rs);
-  }
-}
-
-void MipsAssembler::Ldc1(FRegister ft, Register rs, uint16_t imm16) {
-  DsFsmInstr(EmitI(0x35, rs, static_cast<Register>(ft), imm16)).FprOuts(ft).GprIns(rs);
-}
-
-void MipsAssembler::Swc1(FRegister ft, Register rs, uint16_t imm16) {
-  DsFsmInstr(EmitI(0x39, rs, static_cast<Register>(ft), imm16)).FprIns(GetFpuRegLow(ft)).GprIns(rs);
-}
-
-void MipsAssembler::Sdc1(FRegister ft, Register rs, uint16_t imm16) {
-  DsFsmInstr(EmitI(0x3d, rs, static_cast<Register>(ft), imm16)).FprIns(ft).GprIns(rs);
-}
-
-void MipsAssembler::Break() {
-  DsFsmInstrNop(EmitR(0, ZERO, ZERO, ZERO, 0, 0xD));
-}
-
-void MipsAssembler::Nop() {
-  DsFsmInstrNop(EmitR(0x0, ZERO, ZERO, ZERO, 0, 0x0));
-}
-
-void MipsAssembler::NopIfNoReordering() {
-  if (!reordering_) {
-    Nop();
-  }
-}
-
-void MipsAssembler::Move(Register rd, Register rs) {
-  Or(rd, rs, ZERO);
-}
-
-void MipsAssembler::Clear(Register rd) {
-  Move(rd, ZERO);
-}
-
-void MipsAssembler::Not(Register rd, Register rs) {
-  Nor(rd, rs, ZERO);
-}
-
-void MipsAssembler::Push(Register rs) {
-  IncreaseFrameSize(kStackAlignment);
-  Sw(rs, SP, 0);
-}
-
-void MipsAssembler::Pop(Register rd) {
-  Lw(rd, SP, 0);
-  DecreaseFrameSize(kStackAlignment);
-}
-
-void MipsAssembler::PopAndReturn(Register rd, Register rt) {
-  bool reordering = SetReorder(false);
-  Lw(rd, SP, 0);
-  Jr(rt);
-  DecreaseFrameSize(kStackAlignment);  // Single instruction in delay slot.
-  SetReorder(reordering);
-}
-
-void MipsAssembler::AndV(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x1e)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::OrV(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x0, 0x1, wt, ws, wd, 0x1e)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::NorV(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x0, 0x2, wt, ws, wd, 0x1e)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::XorV(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x0, 0x3, wt, ws, wd, 0x1e)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::AddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x0, 0x0, wt, ws, wd, 0xe)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::AddvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x0, 0x1, wt, ws, wd, 0xe)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::AddvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x0, 0x2, wt, ws, wd, 0xe)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::AddvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x0, 0x3, wt, ws, wd, 0xe)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::SubvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x1, 0x0, wt, ws, wd, 0xe)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::SubvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x1, 0x1, wt, ws, wd, 0xe)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::SubvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x1, 0x2, wt, ws, wd, 0xe)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::SubvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x1, 0x3, wt, ws, wd, 0xe)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::MulvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x12)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::MulvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x0, 0x1, wt, ws, wd, 0x12)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::MulvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x0, 0x2, wt, ws, wd, 0x12)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::MulvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x0, 0x3, wt, ws, wd, 0x12)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Div_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x4, 0x0, wt, ws, wd, 0x12)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Div_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x4, 0x1, wt, ws, wd, 0x12)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Div_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x4, 0x2, wt, ws, wd, 0x12)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Div_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x4, 0x3, wt, ws, wd, 0x12)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Div_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x5, 0x0, wt, ws, wd, 0x12)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Div_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x5, 0x1, wt, ws, wd, 0x12)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Div_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x5, 0x2, wt, ws, wd, 0x12)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Div_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x12)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Mod_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x6, 0x0, wt, ws, wd, 0x12)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Mod_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x6, 0x1, wt, ws, wd, 0x12)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Mod_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x6, 0x2, wt, ws, wd, 0x12)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Mod_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x6, 0x3, wt, ws, wd, 0x12)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Mod_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x7, 0x0, wt, ws, wd, 0x12)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Mod_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x7, 0x1, wt, ws, wd, 0x12)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Mod_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x7, 0x2, wt, ws, wd, 0x12)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Mod_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x7, 0x3, wt, ws, wd, 0x12)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Add_aB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x10)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Add_aH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x0, 0x1, wt, ws, wd, 0x10)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Add_aW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x0, 0x2, wt, ws, wd, 0x10)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Add_aD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x0, 0x3, wt, ws, wd, 0x10)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Ave_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x4, 0x0, wt, ws, wd, 0x10)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Ave_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x4, 0x1, wt, ws, wd, 0x10)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Ave_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x4, 0x2, wt, ws, wd, 0x10)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Ave_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x4, 0x3, wt, ws, wd, 0x10)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Ave_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x5, 0x0, wt, ws, wd, 0x10)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Ave_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x5, 0x1, wt, ws, wd, 0x10)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Ave_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x5, 0x2, wt, ws, wd, 0x10)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Ave_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x10)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Aver_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x6, 0x0, wt, ws, wd, 0x10)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Aver_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x6, 0x1, wt, ws, wd, 0x10)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Aver_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x6, 0x2, wt, ws, wd, 0x10)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Aver_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x6, 0x3, wt, ws, wd, 0x10)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Aver_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x7, 0x0, wt, ws, wd, 0x10)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Aver_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x7, 0x1, wt, ws, wd, 0x10)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Aver_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x7, 0x2, wt, ws, wd, 0x10)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Aver_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x7, 0x3, wt, ws, wd, 0x10)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Max_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x2, 0x0, wt, ws, wd, 0xe)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Max_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x2, 0x1, wt, ws, wd, 0xe)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Max_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x2, 0x2, wt, ws, wd, 0xe)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Max_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x2, 0x3, wt, ws, wd, 0xe)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Max_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x3, 0x0, wt, ws, wd, 0xe)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Max_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x3, 0x1, wt, ws, wd, 0xe)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Max_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x3, 0x2, wt, ws, wd, 0xe)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Max_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x3, 0x3, wt, ws, wd, 0xe)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Min_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x4, 0x0, wt, ws, wd, 0xe)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Min_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x4, 0x1, wt, ws, wd, 0xe)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Min_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x4, 0x2, wt, ws, wd, 0xe)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Min_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x4, 0x3, wt, ws, wd, 0xe)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Min_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x5, 0x0, wt, ws, wd, 0xe)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Min_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x5, 0x1, wt, ws, wd, 0xe)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Min_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x5, 0x2, wt, ws, wd, 0xe)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Min_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x5, 0x3, wt, ws, wd, 0xe)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::FaddW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x1b)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::FaddD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x0, 0x1, wt, ws, wd, 0x1b)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::FsubW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x0, 0x2, wt, ws, wd, 0x1b)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::FsubD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x0, 0x3, wt, ws, wd, 0x1b)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::FmulW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x1, 0x0, wt, ws, wd, 0x1b)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::FmulD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x1, 0x1, wt, ws, wd, 0x1b)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::FdivW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x1, 0x2, wt, ws, wd, 0x1b)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::FdivD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x1, 0x3, wt, ws, wd, 0x1b)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::FmaxW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x7, 0x0, wt, ws, wd, 0x1b)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::FmaxD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x7, 0x1, wt, ws, wd, 0x1b)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::FminW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x6, 0x0, wt, ws, wd, 0x1b)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::FminD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x6, 0x1, wt, ws, wd, 0x1b)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Ffint_sW(VectorRegister wd, VectorRegister ws) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa2RF(0x19e, 0x0, ws, wd, 0x1e)).FprOuts(wd).FprIns(ws);
-}
-
-void MipsAssembler::Ffint_sD(VectorRegister wd, VectorRegister ws) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa2RF(0x19e, 0x1, ws, wd, 0x1e)).FprOuts(wd).FprIns(ws);
-}
-
-void MipsAssembler::Ftint_sW(VectorRegister wd, VectorRegister ws) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa2RF(0x19c, 0x0, ws, wd, 0x1e)).FprOuts(wd).FprIns(ws);
-}
-
-void MipsAssembler::Ftint_sD(VectorRegister wd, VectorRegister ws) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa2RF(0x19c, 0x1, ws, wd, 0x1e)).FprOuts(wd).FprIns(ws);
-}
-
-void MipsAssembler::SllB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x0, 0x0, wt, ws, wd, 0xd)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::SllH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x0, 0x1, wt, ws, wd, 0xd)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::SllW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x0, 0x2, wt, ws, wd, 0xd)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::SllD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x0, 0x3, wt, ws, wd, 0xd)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::SraB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x1, 0x0, wt, ws, wd, 0xd)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::SraH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x1, 0x1, wt, ws, wd, 0xd)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::SraW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x1, 0x2, wt, ws, wd, 0xd)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::SraD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x1, 0x3, wt, ws, wd, 0xd)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::SrlB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x2, 0x0, wt, ws, wd, 0xd)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::SrlH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x2, 0x1, wt, ws, wd, 0xd)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::SrlW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x2, 0x2, wt, ws, wd, 0xd)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::SrlD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x2, 0x3, wt, ws, wd, 0xd)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::SlliB(VectorRegister wd, VectorRegister ws, int shamt3) {
-  CHECK(HasMsa());
-  CHECK(IsUint<3>(shamt3)) << shamt3;
-  DsFsmInstr(EmitMsaBIT(0x0, shamt3 | kMsaDfMByteMask, ws, wd, 0x9)).FprOuts(wd).FprIns(ws);
-}
-
-void MipsAssembler::SlliH(VectorRegister wd, VectorRegister ws, int shamt4) {
-  CHECK(HasMsa());
-  CHECK(IsUint<4>(shamt4)) << shamt4;
-  DsFsmInstr(EmitMsaBIT(0x0, shamt4 | kMsaDfMHalfwordMask, ws, wd, 0x9)).FprOuts(wd).FprIns(ws);
-}
-
-void MipsAssembler::SlliW(VectorRegister wd, VectorRegister ws, int shamt5) {
-  CHECK(HasMsa());
-  CHECK(IsUint<5>(shamt5)) << shamt5;
-  DsFsmInstr(EmitMsaBIT(0x0, shamt5 | kMsaDfMWordMask, ws, wd, 0x9)).FprOuts(wd).FprIns(ws);
-}
-
-void MipsAssembler::SlliD(VectorRegister wd, VectorRegister ws, int shamt6) {
-  CHECK(HasMsa());
-  CHECK(IsUint<6>(shamt6)) << shamt6;
-  DsFsmInstr(EmitMsaBIT(0x0, shamt6 | kMsaDfMDoublewordMask, ws, wd, 0x9)).FprOuts(wd).FprIns(ws);
-}
-
-void MipsAssembler::SraiB(VectorRegister wd, VectorRegister ws, int shamt3) {
-  CHECK(HasMsa());
-  CHECK(IsUint<3>(shamt3)) << shamt3;
-  DsFsmInstr(EmitMsaBIT(0x1, shamt3 | kMsaDfMByteMask, ws, wd, 0x9)).FprOuts(wd).FprIns(ws);
-}
-
-void MipsAssembler::SraiH(VectorRegister wd, VectorRegister ws, int shamt4) {
-  CHECK(HasMsa());
-  CHECK(IsUint<4>(shamt4)) << shamt4;
-  DsFsmInstr(EmitMsaBIT(0x1, shamt4 | kMsaDfMHalfwordMask, ws, wd, 0x9)).FprOuts(wd).FprIns(ws);
-}
-
-void MipsAssembler::SraiW(VectorRegister wd, VectorRegister ws, int shamt5) {
-  CHECK(HasMsa());
-  CHECK(IsUint<5>(shamt5)) << shamt5;
-  DsFsmInstr(EmitMsaBIT(0x1, shamt5 | kMsaDfMWordMask, ws, wd, 0x9)).FprOuts(wd).FprIns(ws);
-}
-
-void MipsAssembler::SraiD(VectorRegister wd, VectorRegister ws, int shamt6) {
-  CHECK(HasMsa());
-  CHECK(IsUint<6>(shamt6)) << shamt6;
-  DsFsmInstr(EmitMsaBIT(0x1, shamt6 | kMsaDfMDoublewordMask, ws, wd, 0x9)).FprOuts(wd).FprIns(ws);
-}
-
-void MipsAssembler::SrliB(VectorRegister wd, VectorRegister ws, int shamt3) {
-  CHECK(HasMsa());
-  CHECK(IsUint<3>(shamt3)) << shamt3;
-  DsFsmInstr(EmitMsaBIT(0x2, shamt3 | kMsaDfMByteMask, ws, wd, 0x9)).FprOuts(wd).FprIns(ws);
-}
-
-void MipsAssembler::SrliH(VectorRegister wd, VectorRegister ws, int shamt4) {
-  CHECK(HasMsa());
-  CHECK(IsUint<4>(shamt4)) << shamt4;
-  DsFsmInstr(EmitMsaBIT(0x2, shamt4 | kMsaDfMHalfwordMask, ws, wd, 0x9)).FprOuts(wd).FprIns(ws);
-}
-
-void MipsAssembler::SrliW(VectorRegister wd, VectorRegister ws, int shamt5) {
-  CHECK(HasMsa());
-  CHECK(IsUint<5>(shamt5)) << shamt5;
-  DsFsmInstr(EmitMsaBIT(0x2, shamt5 | kMsaDfMWordMask, ws, wd, 0x9)).FprOuts(wd).FprIns(ws);
-}
-
-void MipsAssembler::SrliD(VectorRegister wd, VectorRegister ws, int shamt6) {
-  CHECK(HasMsa());
-  CHECK(IsUint<6>(shamt6)) << shamt6;
-  DsFsmInstr(EmitMsaBIT(0x2, shamt6 | kMsaDfMDoublewordMask, ws, wd, 0x9)).FprOuts(wd).FprIns(ws);
-}
-
-void MipsAssembler::MoveV(VectorRegister wd, VectorRegister ws) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsaBIT(0x1, 0x3e, ws, wd, 0x19)).FprOuts(wd).FprIns(ws);
-}
-
-void MipsAssembler::SplatiB(VectorRegister wd, VectorRegister ws, int n4) {
-  CHECK(HasMsa());
-  CHECK(IsUint<4>(n4)) << n4;
-  DsFsmInstr(EmitMsaELM(0x1, n4 | kMsaDfNByteMask, ws, wd, 0x19)).FprOuts(wd).FprIns(ws);
-}
-
-void MipsAssembler::SplatiH(VectorRegister wd, VectorRegister ws, int n3) {
-  CHECK(HasMsa());
-  CHECK(IsUint<3>(n3)) << n3;
-  DsFsmInstr(EmitMsaELM(0x1, n3 | kMsaDfNHalfwordMask, ws, wd, 0x19)).FprOuts(wd).FprIns(ws);
-}
-
-void MipsAssembler::SplatiW(VectorRegister wd, VectorRegister ws, int n2) {
-  CHECK(HasMsa());
-  CHECK(IsUint<2>(n2)) << n2;
-  DsFsmInstr(EmitMsaELM(0x1, n2 | kMsaDfNWordMask, ws, wd, 0x19)).FprOuts(wd).FprIns(ws);
-}
-
-void MipsAssembler::SplatiD(VectorRegister wd, VectorRegister ws, int n1) {
-  CHECK(HasMsa());
-  CHECK(IsUint<1>(n1)) << n1;
-  DsFsmInstr(EmitMsaELM(0x1, n1 | kMsaDfNDoublewordMask, ws, wd, 0x19)).FprOuts(wd).FprIns(ws);
-}
-
-void MipsAssembler::Copy_sB(Register rd, VectorRegister ws, int n4) {
-  CHECK(HasMsa());
-  CHECK(IsUint<4>(n4)) << n4;
-  DsFsmInstr(EmitMsaELM(0x2, n4 | kMsaDfNByteMask, ws, static_cast<VectorRegister>(rd), 0x19))
-      .GprOuts(rd).FprIns(ws);
-}
-
-void MipsAssembler::Copy_sH(Register rd, VectorRegister ws, int n3) {
-  CHECK(HasMsa());
-  CHECK(IsUint<3>(n3)) << n3;
-  DsFsmInstr(EmitMsaELM(0x2, n3 | kMsaDfNHalfwordMask, ws, static_cast<VectorRegister>(rd), 0x19))
-      .GprOuts(rd).FprIns(ws);
-}
-
-void MipsAssembler::Copy_sW(Register rd, VectorRegister ws, int n2) {
-  CHECK(HasMsa());
-  CHECK(IsUint<2>(n2)) << n2;
-  DsFsmInstr(EmitMsaELM(0x2, n2 | kMsaDfNWordMask, ws, static_cast<VectorRegister>(rd), 0x19))
-      .GprOuts(rd).FprIns(ws);
-}
-
-void MipsAssembler::Copy_uB(Register rd, VectorRegister ws, int n4) {
-  CHECK(HasMsa());
-  CHECK(IsUint<4>(n4)) << n4;
-  DsFsmInstr(EmitMsaELM(0x3, n4 | kMsaDfNByteMask, ws, static_cast<VectorRegister>(rd), 0x19))
-      .GprOuts(rd).FprIns(ws);
-}
-
-void MipsAssembler::Copy_uH(Register rd, VectorRegister ws, int n3) {
-  CHECK(HasMsa());
-  CHECK(IsUint<3>(n3)) << n3;
-  DsFsmInstr(EmitMsaELM(0x3, n3 | kMsaDfNHalfwordMask, ws, static_cast<VectorRegister>(rd), 0x19))
-      .GprOuts(rd).FprIns(ws);
-}
-
-void MipsAssembler::InsertB(VectorRegister wd, Register rs, int n4) {
-  CHECK(HasMsa());
-  CHECK(IsUint<4>(n4)) << n4;
-  DsFsmInstr(EmitMsaELM(0x4, n4 | kMsaDfNByteMask, static_cast<VectorRegister>(rs), wd, 0x19))
-      .FprInOuts(wd).GprIns(rs);
-}
-
-void MipsAssembler::InsertH(VectorRegister wd, Register rs, int n3) {
-  CHECK(HasMsa());
-  CHECK(IsUint<3>(n3)) << n3;
-  DsFsmInstr(EmitMsaELM(0x4, n3 | kMsaDfNHalfwordMask, static_cast<VectorRegister>(rs), wd, 0x19))
-      .FprInOuts(wd).GprIns(rs);
-}
-
-void MipsAssembler::InsertW(VectorRegister wd, Register rs, int n2) {
-  CHECK(HasMsa());
-  CHECK(IsUint<2>(n2)) << n2;
-  DsFsmInstr(EmitMsaELM(0x4, n2 | kMsaDfNWordMask, static_cast<VectorRegister>(rs), wd, 0x19))
-      .FprInOuts(wd).GprIns(rs);
-}
-
-void MipsAssembler::FillB(VectorRegister wd, Register rs) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa2R(0xc0, 0x0, static_cast<VectorRegister>(rs), wd, 0x1e))
-      .FprOuts(wd).GprIns(rs);
-}
-
-void MipsAssembler::FillH(VectorRegister wd, Register rs) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa2R(0xc0, 0x1, static_cast<VectorRegister>(rs), wd, 0x1e))
-      .FprOuts(wd).GprIns(rs);
-}
-
-void MipsAssembler::FillW(VectorRegister wd, Register rs) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa2R(0xc0, 0x2, static_cast<VectorRegister>(rs), wd, 0x1e))
-      .FprOuts(wd).GprIns(rs);
-}
-
-void MipsAssembler::LdiB(VectorRegister wd, int imm8) {
-  CHECK(HasMsa());
-  CHECK(IsInt<8>(imm8)) << imm8;
-  DsFsmInstr(EmitMsaI10(0x6, 0x0, imm8 & kMsaS10Mask, wd, 0x7)).FprOuts(wd);
-}
-
-void MipsAssembler::LdiH(VectorRegister wd, int imm10) {
-  CHECK(HasMsa());
-  CHECK(IsInt<10>(imm10)) << imm10;
-  DsFsmInstr(EmitMsaI10(0x6, 0x1, imm10 & kMsaS10Mask, wd, 0x7)).FprOuts(wd);
-}
-
-void MipsAssembler::LdiW(VectorRegister wd, int imm10) {
-  CHECK(HasMsa());
-  CHECK(IsInt<10>(imm10)) << imm10;
-  DsFsmInstr(EmitMsaI10(0x6, 0x2, imm10 & kMsaS10Mask, wd, 0x7)).FprOuts(wd);
-}
-
-void MipsAssembler::LdiD(VectorRegister wd, int imm10) {
-  CHECK(HasMsa());
-  CHECK(IsInt<10>(imm10)) << imm10;
-  DsFsmInstr(EmitMsaI10(0x6, 0x3, imm10 & kMsaS10Mask, wd, 0x7)).FprOuts(wd);
-}
-
-void MipsAssembler::LdB(VectorRegister wd, Register rs, int offset) {
-  CHECK(HasMsa());
-  CHECK(IsInt<10>(offset)) << offset;
-  DsFsmInstr(EmitMsaMI10(offset & kMsaS10Mask, rs, wd, 0x8, 0x0)).FprOuts(wd).GprIns(rs);
-}
-
-void MipsAssembler::LdH(VectorRegister wd, Register rs, int offset) {
-  CHECK(HasMsa());
-  CHECK(IsInt<11>(offset)) << offset;
-  CHECK_ALIGNED(offset, kMipsHalfwordSize);
-  DsFsmInstr(EmitMsaMI10((offset >> TIMES_2) & kMsaS10Mask, rs, wd, 0x8, 0x1))
-      .FprOuts(wd).GprIns(rs);
-}
-
-void MipsAssembler::LdW(VectorRegister wd, Register rs, int offset) {
-  CHECK(HasMsa());
-  CHECK(IsInt<12>(offset)) << offset;
-  CHECK_ALIGNED(offset, kMipsWordSize);
-  DsFsmInstr(EmitMsaMI10((offset >> TIMES_4) & kMsaS10Mask, rs, wd, 0x8, 0x2))
-      .FprOuts(wd).GprIns(rs);
-}
-
-void MipsAssembler::LdD(VectorRegister wd, Register rs, int offset) {
-  CHECK(HasMsa());
-  CHECK(IsInt<13>(offset)) << offset;
-  CHECK_ALIGNED(offset, kMipsDoublewordSize);
-  DsFsmInstr(EmitMsaMI10((offset >> TIMES_8) & kMsaS10Mask, rs, wd, 0x8, 0x3))
-      .FprOuts(wd).GprIns(rs);
-}
-
-void MipsAssembler::StB(VectorRegister wd, Register rs, int offset) {
-  CHECK(HasMsa());
-  CHECK(IsInt<10>(offset)) << offset;
-  DsFsmInstr(EmitMsaMI10(offset & kMsaS10Mask, rs, wd, 0x9, 0x0)).FprIns(wd).GprIns(rs);
-}
-
-void MipsAssembler::StH(VectorRegister wd, Register rs, int offset) {
-  CHECK(HasMsa());
-  CHECK(IsInt<11>(offset)) << offset;
-  CHECK_ALIGNED(offset, kMipsHalfwordSize);
-  DsFsmInstr(EmitMsaMI10((offset >> TIMES_2) & kMsaS10Mask, rs, wd, 0x9, 0x1))
-      .FprIns(wd).GprIns(rs);
-}
-
-void MipsAssembler::StW(VectorRegister wd, Register rs, int offset) {
-  CHECK(HasMsa());
-  CHECK(IsInt<12>(offset)) << offset;
-  CHECK_ALIGNED(offset, kMipsWordSize);
-  DsFsmInstr(EmitMsaMI10((offset >> TIMES_4) & kMsaS10Mask, rs, wd, 0x9, 0x2))
-      .FprIns(wd).GprIns(rs);
-}
-
-void MipsAssembler::StD(VectorRegister wd, Register rs, int offset) {
-  CHECK(HasMsa());
-  CHECK(IsInt<13>(offset)) << offset;
-  CHECK_ALIGNED(offset, kMipsDoublewordSize);
-  DsFsmInstr(EmitMsaMI10((offset >> TIMES_8) & kMsaS10Mask, rs, wd, 0x9, 0x3))
-      .FprIns(wd).GprIns(rs);
-}
-
-void MipsAssembler::IlvlB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x4, 0x0, wt, ws, wd, 0x14)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::IlvlH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x4, 0x1, wt, ws, wd, 0x14)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::IlvlW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x4, 0x2, wt, ws, wd, 0x14)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::IlvlD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x4, 0x3, wt, ws, wd, 0x14)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::IlvrB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x5, 0x0, wt, ws, wd, 0x14)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::IlvrH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x5, 0x1, wt, ws, wd, 0x14)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::IlvrW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x5, 0x2, wt, ws, wd, 0x14)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::IlvrD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x14)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::IlvevB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x6, 0x0, wt, ws, wd, 0x14)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::IlvevH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x6, 0x1, wt, ws, wd, 0x14)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::IlvevW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x6, 0x2, wt, ws, wd, 0x14)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::IlvevD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x6, 0x3, wt, ws, wd, 0x14)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::IlvodB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x7, 0x0, wt, ws, wd, 0x14)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::IlvodH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x7, 0x1, wt, ws, wd, 0x14)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::IlvodW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x7, 0x2, wt, ws, wd, 0x14)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::IlvodD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x7, 0x3, wt, ws, wd, 0x14)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::MaddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x1, 0x0, wt, ws, wd, 0x12)).FprInOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::MaddvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x1, 0x1, wt, ws, wd, 0x12)).FprInOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::MaddvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x1, 0x2, wt, ws, wd, 0x12)).FprInOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::MaddvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x1, 0x3, wt, ws, wd, 0x12)).FprInOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::MsubvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x2, 0x0, wt, ws, wd, 0x12)).FprInOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::MsubvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x2, 0x1, wt, ws, wd, 0x12)).FprInOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::MsubvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x2, 0x2, wt, ws, wd, 0x12)).FprInOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::MsubvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x2, 0x3, wt, ws, wd, 0x12)).FprInOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Asub_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x4, 0x0, wt, ws, wd, 0x11)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Asub_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x4, 0x1, wt, ws, wd, 0x11)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Asub_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x4, 0x2, wt, ws, wd, 0x11)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Asub_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x4, 0x3, wt, ws, wd, 0x11)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Asub_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x5, 0x0, wt, ws, wd, 0x11)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Asub_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x5, 0x1, wt, ws, wd, 0x11)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Asub_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x5, 0x2, wt, ws, wd, 0x11)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Asub_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x11)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::FmaddW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x2, 0x0, wt, ws, wd, 0x1b)).FprInOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::FmaddD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x2, 0x1, wt, ws, wd, 0x1b)).FprInOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::FmsubW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x2, 0x2, wt, ws, wd, 0x1b)).FprInOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::FmsubD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x2, 0x3, wt, ws, wd, 0x1b)).FprInOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Hadd_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x4, 0x1, wt, ws, wd, 0x15)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Hadd_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x4, 0x2, wt, ws, wd, 0x15)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Hadd_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x4, 0x3, wt, ws, wd, 0x15)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Hadd_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x5, 0x1, wt, ws, wd, 0x15)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Hadd_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x5, 0x2, wt, ws, wd, 0x15)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::Hadd_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x15)).FprOuts(wd).FprIns(ws, wt);
-}
-
-void MipsAssembler::PcntB(VectorRegister wd, VectorRegister ws) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa2R(0xc1, 0x0, ws, wd, 0x1e)).FprOuts(wd).FprIns(ws);
-}
-
-void MipsAssembler::PcntH(VectorRegister wd, VectorRegister ws) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa2R(0xc1, 0x1, ws, wd, 0x1e)).FprOuts(wd).FprIns(ws);
-}
-
-void MipsAssembler::PcntW(VectorRegister wd, VectorRegister ws) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa2R(0xc1, 0x2, ws, wd, 0x1e)).FprOuts(wd).FprIns(ws);
-}
-
-void MipsAssembler::PcntD(VectorRegister wd, VectorRegister ws) {
-  CHECK(HasMsa());
-  DsFsmInstr(EmitMsa2R(0xc1, 0x3, ws, wd, 0x1e)).FprOuts(wd).FprIns(ws);
-}
-
-void MipsAssembler::ReplicateFPToVectorRegister(VectorRegister dst,
-                                                FRegister src,
-                                                bool is_double) {
-  // Float or double in FPU register Fx can be considered as 0th element in vector register Wx.
-  if (is_double) {
-    SplatiD(dst, static_cast<VectorRegister>(src), 0);
-  } else {
-    SplatiW(dst, static_cast<VectorRegister>(src), 0);
-  }
-}
-
-void MipsAssembler::LoadConst32(Register rd, int32_t value) {
-  if (IsUint<16>(value)) {
-    // Use OR with (unsigned) immediate to encode 16b unsigned int.
-    Ori(rd, ZERO, value);
-  } else if (IsInt<16>(value)) {
-    // Use ADD with (signed) immediate to encode 16b signed int.
-    Addiu(rd, ZERO, value);
-  } else {
-    Lui(rd, High16Bits(value));
-    if (value & 0xFFFF)
-      Ori(rd, rd, Low16Bits(value));
-  }
-}
-
-void MipsAssembler::LoadConst64(Register reg_hi, Register reg_lo, int64_t value) {
-  uint32_t low = Low32Bits(value);
-  uint32_t high = High32Bits(value);
-  LoadConst32(reg_lo, low);
-  if (high != low) {
-    LoadConst32(reg_hi, high);
-  } else {
-    Move(reg_hi, reg_lo);
-  }
-}
-
-void MipsAssembler::LoadSConst32(FRegister r, int32_t value, Register temp) {
-  if (value == 0) {
-    temp = ZERO;
-  } else {
-    LoadConst32(temp, value);
-  }
-  Mtc1(temp, r);
-}
-
-void MipsAssembler::LoadDConst64(FRegister rd, int64_t value, Register temp) {
-  uint32_t low = Low32Bits(value);
-  uint32_t high = High32Bits(value);
-  if (low == 0) {
-    Mtc1(ZERO, rd);
-  } else {
-    LoadConst32(temp, low);
-    Mtc1(temp, rd);
-  }
-  if (high == 0) {
-    MoveToFpuHigh(ZERO, rd);
-  } else {
-    LoadConst32(temp, high);
-    MoveToFpuHigh(temp, rd);
-  }
-}
-
-void MipsAssembler::Addiu32(Register rt, Register rs, int32_t value, Register temp) {
-  CHECK_NE(rs, temp);  // Must not overwrite the register `rs` while loading `value`.
-  if (IsInt<16>(value)) {
-    Addiu(rt, rs, value);
-  } else if (IsR6()) {
-    int16_t high = High16Bits(value);
-    int16_t low = Low16Bits(value);
-    high += (low < 0) ? 1 : 0;  // Account for sign extension in addiu.
-    if (low != 0) {
-      Aui(temp, rs, high);
-      Addiu(rt, temp, low);
-    } else {
-      Aui(rt, rs, high);
-    }
-  } else {
-    // Do not load the whole 32-bit `value` if it can be represented as
-    // a sum of two 16-bit signed values. This can save an instruction.
-    constexpr int32_t kMinValueForSimpleAdjustment = std::numeric_limits<int16_t>::min() * 2;
-    constexpr int32_t kMaxValueForSimpleAdjustment = std::numeric_limits<int16_t>::max() * 2;
-    if (0 <= value && value <= kMaxValueForSimpleAdjustment) {
-      Addiu(temp, rs, kMaxValueForSimpleAdjustment / 2);
-      Addiu(rt, temp, value - kMaxValueForSimpleAdjustment / 2);
-    } else if (kMinValueForSimpleAdjustment <= value && value < 0) {
-      Addiu(temp, rs, kMinValueForSimpleAdjustment / 2);
-      Addiu(rt, temp, value - kMinValueForSimpleAdjustment / 2);
-    } else {
-      // Now that all shorter options have been exhausted, load the full 32-bit value.
-      LoadConst32(temp, value);
-      Addu(rt, rs, temp);
-    }
-  }
-}
-
-void MipsAssembler::Branch::InitShortOrLong(MipsAssembler::Branch::OffsetBits offset_size,
-                                            MipsAssembler::Branch::Type short_type,
-                                            MipsAssembler::Branch::Type long_type) {
-  type_ = (offset_size <= branch_info_[short_type].offset_size) ? short_type : long_type;
-}
-
-void MipsAssembler::Branch::InitializeType(Type initial_type, bool is_r6) {
-  OffsetBits offset_size_needed = GetOffsetSizeNeeded(location_, target_);
-  if (is_r6) {
-    // R6
-    switch (initial_type) {
-      case kLabel:
-        CHECK(!IsResolved());
-        type_ = kR6Label;
-        break;
-      case kLiteral:
-        CHECK(!IsResolved());
-        type_ = kR6Literal;
-        break;
-      case kCall:
-        InitShortOrLong(offset_size_needed, kR6Call, kR6LongCall);
-        break;
-      case kCondBranch:
-        switch (condition_) {
-          case kUncond:
-            InitShortOrLong(offset_size_needed, kR6UncondBranch, kR6LongUncondBranch);
-            break;
-          case kCondEQZ:
-          case kCondNEZ:
-            // Special case for beqzc/bnezc with longer offset than in other b<cond>c instructions.
-            type_ = (offset_size_needed <= kOffset23) ? kR6CondBranch : kR6LongCondBranch;
-            break;
-          default:
-            InitShortOrLong(offset_size_needed, kR6CondBranch, kR6LongCondBranch);
-            break;
-        }
-        break;
-      case kBareCall:
-        type_ = kR6BareCall;
-        CHECK_LE(offset_size_needed, GetOffsetSize());
-        break;
-      case kBareCondBranch:
-        type_ = (condition_ == kUncond) ? kR6BareUncondBranch : kR6BareCondBranch;
-        CHECK_LE(offset_size_needed, GetOffsetSize());
-        break;
-      default:
-        LOG(FATAL) << "Unexpected branch type " << initial_type;
-        UNREACHABLE();
-    }
-  } else {
-    // R2
-    switch (initial_type) {
-      case kLabel:
-        CHECK(!IsResolved());
-        type_ = kLabel;
-        break;
-      case kLiteral:
-        CHECK(!IsResolved());
-        type_ = kLiteral;
-        break;
-      case kCall:
-        InitShortOrLong(offset_size_needed, kCall, kLongCall);
-        break;
-      case kCondBranch:
-        switch (condition_) {
-          case kUncond:
-            InitShortOrLong(offset_size_needed, kUncondBranch, kLongUncondBranch);
-            break;
-          default:
-            InitShortOrLong(offset_size_needed, kCondBranch, kLongCondBranch);
-            break;
-        }
-        break;
-      case kBareCall:
-        type_ = kBareCall;
-        CHECK_LE(offset_size_needed, GetOffsetSize());
-        break;
-      case kBareCondBranch:
-        type_ = (condition_ == kUncond) ? kBareUncondBranch : kBareCondBranch;
-        CHECK_LE(offset_size_needed, GetOffsetSize());
-        break;
-      default:
-        LOG(FATAL) << "Unexpected branch type " << initial_type;
-        UNREACHABLE();
-    }
-  }
-  old_type_ = type_;
-}
-
-bool MipsAssembler::Branch::IsNop(BranchCondition condition, Register lhs, Register rhs) {
-  switch (condition) {
-    case kCondLT:
-    case kCondGT:
-    case kCondNE:
-    case kCondLTU:
-      return lhs == rhs;
-    default:
-      return false;
-  }
-}
-
-bool MipsAssembler::Branch::IsUncond(BranchCondition condition, Register lhs, Register rhs) {
-  switch (condition) {
-    case kUncond:
-      return true;
-    case kCondGE:
-    case kCondLE:
-    case kCondEQ:
-    case kCondGEU:
-      return lhs == rhs;
-    default:
-      return false;
-  }
-}
-
-MipsAssembler::Branch::Branch(bool is_r6,
-                              uint32_t location,
-                              uint32_t target,
-                              bool is_call,
-                              bool is_bare)
-    : old_location_(location),
-      location_(location),
-      target_(target),
-      lhs_reg_(0),
-      rhs_reg_(0),
-      condition_(kUncond),
-      delayed_instruction_(kUnfilledDelaySlot),
-      patcher_label_(nullptr) {
-  InitializeType(
-      (is_call ? (is_bare ? kBareCall : kCall) : (is_bare ? kBareCondBranch : kCondBranch)),
-      is_r6);
-}
-
-MipsAssembler::Branch::Branch(bool is_r6,
-                              uint32_t location,
-                              uint32_t target,
-                              MipsAssembler::BranchCondition condition,
-                              Register lhs_reg,
-                              Register rhs_reg,
-                              bool is_bare)
-    : old_location_(location),
-      location_(location),
-      target_(target),
-      lhs_reg_(lhs_reg),
-      rhs_reg_(rhs_reg),
-      condition_(condition),
-      delayed_instruction_(kUnfilledDelaySlot),
-      patcher_label_(nullptr) {
-  CHECK_NE(condition, kUncond);
-  switch (condition) {
-    case kCondLT:
-    case kCondGE:
-    case kCondLE:
-    case kCondGT:
-    case kCondLTU:
-    case kCondGEU:
-      // We don't support synthetic R2 branches (preceded with slt[u]) at this level
-      // (R2 doesn't have branches to compare 2 registers using <, <=, >=, >).
-      // We leave this up to the caller.
-      CHECK(is_r6);
-      FALLTHROUGH_INTENDED;
-    case kCondEQ:
-    case kCondNE:
-      // Require registers other than 0 not only for R6, but also for R2 to catch errors.
-      // To compare with 0, use dedicated kCond*Z conditions.
-      CHECK_NE(lhs_reg, ZERO);
-      CHECK_NE(rhs_reg, ZERO);
-      break;
-    case kCondLTZ:
-    case kCondGEZ:
-    case kCondLEZ:
-    case kCondGTZ:
-    case kCondEQZ:
-    case kCondNEZ:
-      // Require registers other than 0 not only for R6, but also for R2 to catch errors.
-      CHECK_NE(lhs_reg, ZERO);
-      CHECK_EQ(rhs_reg, ZERO);
-      break;
-    case kCondF:
-    case kCondT:
-      CHECK_EQ(rhs_reg, ZERO);
-      break;
-    case kUncond:
-      UNREACHABLE();
-  }
-  CHECK(!IsNop(condition, lhs_reg, rhs_reg));
-  if (IsUncond(condition, lhs_reg, rhs_reg)) {
-    // Branch condition is always true, make the branch unconditional.
-    condition_ = kUncond;
-  }
-  InitializeType((is_bare ? kBareCondBranch : kCondBranch), is_r6);
-}
-
-MipsAssembler::Branch::Branch(bool is_r6,
-                              uint32_t location,
-                              Register dest_reg,
-                              Register base_reg,
-                              Type label_or_literal_type)
-    : old_location_(location),
-      location_(location),
-      target_(kUnresolved),
-      lhs_reg_(dest_reg),
-      rhs_reg_(base_reg),
-      condition_(kUncond),
-      delayed_instruction_(kUnfilledDelaySlot),
-      patcher_label_(nullptr) {
-  CHECK_NE(dest_reg, ZERO);
-  if (is_r6) {
-    CHECK_EQ(base_reg, ZERO);
-  }
-  InitializeType(label_or_literal_type, is_r6);
-}
-
-MipsAssembler::BranchCondition MipsAssembler::Branch::OppositeCondition(
-    MipsAssembler::BranchCondition cond) {
-  switch (cond) {
-    case kCondLT:
-      return kCondGE;
-    case kCondGE:
-      return kCondLT;
-    case kCondLE:
-      return kCondGT;
-    case kCondGT:
-      return kCondLE;
-    case kCondLTZ:
-      return kCondGEZ;
-    case kCondGEZ:
-      return kCondLTZ;
-    case kCondLEZ:
-      return kCondGTZ;
-    case kCondGTZ:
-      return kCondLEZ;
-    case kCondEQ:
-      return kCondNE;
-    case kCondNE:
-      return kCondEQ;
-    case kCondEQZ:
-      return kCondNEZ;
-    case kCondNEZ:
-      return kCondEQZ;
-    case kCondLTU:
-      return kCondGEU;
-    case kCondGEU:
-      return kCondLTU;
-    case kCondF:
-      return kCondT;
-    case kCondT:
-      return kCondF;
-    case kUncond:
-      LOG(FATAL) << "Unexpected branch condition " << cond;
-  }
-  UNREACHABLE();
-}
-
-MipsAssembler::Branch::Type MipsAssembler::Branch::GetType() const {
-  return type_;
-}
-
-MipsAssembler::BranchCondition MipsAssembler::Branch::GetCondition() const {
-  return condition_;
-}
-
-Register MipsAssembler::Branch::GetLeftRegister() const {
-  return static_cast<Register>(lhs_reg_);
-}
-
-Register MipsAssembler::Branch::GetRightRegister() const {
-  return static_cast<Register>(rhs_reg_);
-}
-
-uint32_t MipsAssembler::Branch::GetTarget() const {
-  return target_;
-}
-
-uint32_t MipsAssembler::Branch::GetLocation() const {
-  return location_;
-}
-
-uint32_t MipsAssembler::Branch::GetOldLocation() const {
-  return old_location_;
-}
-
-uint32_t MipsAssembler::Branch::GetPrecedingInstructionLength(Type type) const {
-  // Short branches with delay slots always consist of two instructions, the branch
-  // and the delay slot, irrespective of whether the delay slot is filled with a
-  // useful instruction or not.
-  // Long composite branches may have a length longer by one instruction than
-  // specified in branch_info_[].length. This happens when an instruction is taken
-  // to fill the short branch delay slot, but the branch eventually becomes long
-  // and formally has no delay slot to fill. This instruction is placed at the
-  // beginning of the long composite branch and this needs to be accounted for in
-  // the branch length and the location of the offset encoded in the branch.
-  switch (type) {
-    case kLongUncondBranch:
-    case kLongCondBranch:
-    case kLongCall:
-    case kR6LongCondBranch:
-      return (delayed_instruction_ != kUnfilledDelaySlot &&
-          delayed_instruction_ != kUnfillableDelaySlot) ? 1 : 0;
-    default:
-      return 0;
-  }
-}
-
-uint32_t MipsAssembler::Branch::GetPrecedingInstructionSize(Type type) const {
-  return GetPrecedingInstructionLength(type) * sizeof(uint32_t);
-}
-
-uint32_t MipsAssembler::Branch::GetLength() const {
-  return GetPrecedingInstructionLength(type_) + branch_info_[type_].length;
-}
-
-uint32_t MipsAssembler::Branch::GetOldLength() const {
-  return GetPrecedingInstructionLength(old_type_) + branch_info_[old_type_].length;
-}
-
-uint32_t MipsAssembler::Branch::GetSize() const {
-  return GetLength() * sizeof(uint32_t);
-}
-
-uint32_t MipsAssembler::Branch::GetOldSize() const {
-  return GetOldLength() * sizeof(uint32_t);
-}
-
-uint32_t MipsAssembler::Branch::GetEndLocation() const {
-  return GetLocation() + GetSize();
-}
-
-uint32_t MipsAssembler::Branch::GetOldEndLocation() const {
-  return GetOldLocation() + GetOldSize();
-}
-
-bool MipsAssembler::Branch::IsBare() const {
-  switch (type_) {
-    // R2 short branches (can't be promoted to long), delay slots filled manually.
-    case kBareUncondBranch:
-    case kBareCondBranch:
-    case kBareCall:
-    // R6 short branches (can't be promoted to long), forbidden/delay slots filled manually.
-    case kR6BareUncondBranch:
-    case kR6BareCondBranch:
-    case kR6BareCall:
-      return true;
-    default:
-      return false;
-  }
-}
-
-bool MipsAssembler::Branch::IsLong() const {
-  switch (type_) {
-    // R2 short branches (can be promoted to long).
-    case kUncondBranch:
-    case kCondBranch:
-    case kCall:
-    // R2 short branches (can't be promoted to long), delay slots filled manually.
-    case kBareUncondBranch:
-    case kBareCondBranch:
-    case kBareCall:
-    // R2 near label.
-    case kLabel:
-    // R2 near literal.
-    case kLiteral:
-    // R6 short branches (can be promoted to long).
-    case kR6UncondBranch:
-    case kR6CondBranch:
-    case kR6Call:
-    // R6 short branches (can't be promoted to long), forbidden/delay slots filled manually.
-    case kR6BareUncondBranch:
-    case kR6BareCondBranch:
-    case kR6BareCall:
-    // R6 near label.
-    case kR6Label:
-    // R6 near literal.
-    case kR6Literal:
-      return false;
-    // R2 long branches.
-    case kLongUncondBranch:
-    case kLongCondBranch:
-    case kLongCall:
-    // R2 far label.
-    case kFarLabel:
-    // R2 far literal.
-    case kFarLiteral:
-    // R6 long branches.
-    case kR6LongUncondBranch:
-    case kR6LongCondBranch:
-    case kR6LongCall:
-    // R6 far label.
-    case kR6FarLabel:
-    // R6 far literal.
-    case kR6FarLiteral:
-      return true;
-  }
-  UNREACHABLE();
-}
-
-bool MipsAssembler::Branch::IsResolved() const {
-  return target_ != kUnresolved;
-}
-
-MipsAssembler::Branch::OffsetBits MipsAssembler::Branch::GetOffsetSize() const {
-  bool r6_cond_branch = (type_ == kR6CondBranch || type_ == kR6BareCondBranch);
-  OffsetBits offset_size =
-      (r6_cond_branch && (condition_ == kCondEQZ || condition_ == kCondNEZ))
-          ? kOffset23
-          : branch_info_[type_].offset_size;
-  return offset_size;
-}
-
-MipsAssembler::Branch::OffsetBits MipsAssembler::Branch::GetOffsetSizeNeeded(uint32_t location,
-                                                                             uint32_t target) {
-  // For unresolved targets assume the shortest encoding
-  // (later it will be made longer if needed).
-  if (target == kUnresolved)
-    return kOffset16;
-  int64_t distance = static_cast<int64_t>(target) - location;
-  // To simplify calculations in composite branches consisting of multiple instructions
-  // bump up the distance by a value larger than the max byte size of a composite branch.
-  distance += (distance >= 0) ? kMaxBranchSize : -kMaxBranchSize;
-  if (IsInt<kOffset16>(distance))
-    return kOffset16;
-  else if (IsInt<kOffset18>(distance))
-    return kOffset18;
-  else if (IsInt<kOffset21>(distance))
-    return kOffset21;
-  else if (IsInt<kOffset23>(distance))
-    return kOffset23;
-  else if (IsInt<kOffset28>(distance))
-    return kOffset28;
-  return kOffset32;
-}
-
-void MipsAssembler::Branch::Resolve(uint32_t target) {
-  target_ = target;
-}
-
-void MipsAssembler::Branch::Relocate(uint32_t expand_location, uint32_t delta) {
-  if (location_ > expand_location) {
-    location_ += delta;
-  }
-  if (!IsResolved()) {
-    return;  // Don't know the target yet.
-  }
-  if (target_ > expand_location) {
-    target_ += delta;
-  }
-}
-
-void MipsAssembler::Branch::PromoteToLong() {
-  CHECK(!IsBare());  // Bare branches do not promote.
-  switch (type_) {
-    // R2 short branches (can be promoted to long).
-    case kUncondBranch:
-      type_ = kLongUncondBranch;
-      break;
-    case kCondBranch:
-      type_ = kLongCondBranch;
-      break;
-    case kCall:
-      type_ = kLongCall;
-      break;
-    // R2 near label.
-    case kLabel:
-      type_ = kFarLabel;
-      break;
-    // R2 near literal.
-    case kLiteral:
-      type_ = kFarLiteral;
-      break;
-    // R6 short branches (can be promoted to long).
-    case kR6UncondBranch:
-      type_ = kR6LongUncondBranch;
-      break;
-    case kR6CondBranch:
-      type_ = kR6LongCondBranch;
-      break;
-    case kR6Call:
-      type_ = kR6LongCall;
-      break;
-    // R6 near label.
-    case kR6Label:
-      type_ = kR6FarLabel;
-      break;
-    // R6 near literal.
-    case kR6Literal:
-      type_ = kR6FarLiteral;
-      break;
-    default:
-      // Note: 'type_' is already long.
-      break;
-  }
-  CHECK(IsLong());
-}
-
-uint32_t MipsAssembler::GetBranchLocationOrPcRelBase(const MipsAssembler::Branch* branch) const {
-  switch (branch->GetType()) {
-    case Branch::kLabel:
-    case Branch::kFarLabel:
-    case Branch::kLiteral:
-    case Branch::kFarLiteral:
-      if (branch->GetRightRegister() != ZERO) {
-        return GetLabelLocation(&pc_rel_base_label_);
-      }
-      // For those label/literal loads which come with their own NAL instruction
-      // and don't depend on `pc_rel_base_label_` we can simply use the location
-      // of the "branch" (the NAL precedes the "branch" immediately). The location
-      // is close enough for the user of the returned location, PromoteIfNeeded(),
-      // to not miss needed promotion to a far load.
-      // (GetOffsetSizeNeeded() provides a little leeway by means of kMaxBranchSize,
-      // which is larger than all composite branches and label/literal loads: it's
-      // OK to promote a bit earlier than strictly necessary, it makes things
-      // simpler.)
-      FALLTHROUGH_INTENDED;
-    default:
-      return branch->GetLocation();
-  }
-}
-
-uint32_t MipsAssembler::Branch::PromoteIfNeeded(uint32_t location, uint32_t max_short_distance) {
-  // `location` comes from GetBranchLocationOrPcRelBase() and is either the location
-  // of the PC-relative branch or (for some R2 label and literal loads) the location
-  // of `pc_rel_base_label_`. The PC-relative offset of the branch/load is relative
-  // to this location.
-  // If the branch is still unresolved or already long, nothing to do.
-  if (IsLong() || !IsResolved()) {
-    return 0;
-  }
-  // Promote the short branch to long if the offset size is too small
-  // to hold the distance between location and target_.
-  if (GetOffsetSizeNeeded(location, target_) > GetOffsetSize()) {
-    PromoteToLong();
-    uint32_t old_size = GetOldSize();
-    uint32_t new_size = GetSize();
-    CHECK_GT(new_size, old_size);
-    return new_size - old_size;
-  }
-  // The following logic is for debugging/testing purposes.
-  // Promote some short branches to long when it's not really required.
-  if (UNLIKELY(max_short_distance != std::numeric_limits<uint32_t>::max() && !IsBare())) {
-    int64_t distance = static_cast<int64_t>(target_) - location;
-    distance = (distance >= 0) ? distance : -distance;
-    if (distance >= max_short_distance) {
-      PromoteToLong();
-      uint32_t old_size = GetOldSize();
-      uint32_t new_size = GetSize();
-      CHECK_GT(new_size, old_size);
-      return new_size - old_size;
-    }
-  }
-  return 0;
-}
-
-uint32_t MipsAssembler::Branch::GetOffsetLocation() const {
-  return location_ + GetPrecedingInstructionSize(type_) +
-      branch_info_[type_].instr_offset * sizeof(uint32_t);
-}
-
-uint32_t MipsAssembler::GetBranchOrPcRelBaseForEncoding(const MipsAssembler::Branch* branch) const {
-  switch (branch->GetType()) {
-    case Branch::kLabel:
-    case Branch::kFarLabel:
-    case Branch::kLiteral:
-    case Branch::kFarLiteral:
-      if (branch->GetRightRegister() == ZERO) {
-        // These loads don't use `pc_rel_base_label_` and instead rely on their own
-        // NAL instruction (it immediately precedes the "branch"). Therefore the
-        // effective PC-relative base register is RA and it corresponds to the 2nd
-        // instruction after the NAL.
-        return branch->GetLocation() + sizeof(uint32_t);
-      } else {
-        return GetLabelLocation(&pc_rel_base_label_);
-      }
-    default:
-      return branch->GetOffsetLocation() +
-          Branch::branch_info_[branch->GetType()].pc_org * sizeof(uint32_t);
-  }
-}
-
-uint32_t MipsAssembler::Branch::GetOffset(uint32_t location) const {
-  // `location` comes from GetBranchOrPcRelBaseForEncoding() and is either a location
-  // within/near the PC-relative branch or (for some R2 label and literal loads) the
-  // location of `pc_rel_base_label_`. The PC-relative offset of the branch/load is
-  // relative to this location.
-  CHECK(IsResolved());
-  uint32_t ofs_mask = 0xFFFFFFFF >> (32 - GetOffsetSize());
-  // Calculate the byte distance between instructions and also account for
-  // different PC-relative origins.
-  uint32_t offset = target_ - location;
-  // Prepare the offset for encoding into the instruction(s).
-  offset = (offset & ofs_mask) >> branch_info_[type_].offset_shift;
-  return offset;
-}
-
-MipsAssembler::Branch* MipsAssembler::GetBranch(uint32_t branch_id) {
-  CHECK_LT(branch_id, branches_.size());
-  return &branches_[branch_id];
-}
-
-const MipsAssembler::Branch* MipsAssembler::GetBranch(uint32_t branch_id) const {
-  CHECK_LT(branch_id, branches_.size());
-  return &branches_[branch_id];
-}
-
-void MipsAssembler::BindRelativeToPrecedingBranch(MipsLabel* label,
-                                                  uint32_t prev_branch_id_plus_one,
-                                                  uint32_t position) {
-  if (prev_branch_id_plus_one != 0) {
-    const Branch* branch = GetBranch(prev_branch_id_plus_one - 1);
-    position -= branch->GetEndLocation();
-  }
-  label->prev_branch_id_plus_one_ = prev_branch_id_plus_one;
-  label->BindTo(position);
-}
-
-void MipsAssembler::Bind(MipsLabel* label) {
-  CHECK(!label->IsBound());
-  uint32_t bound_pc = buffer_.Size();
-
-  // Make the delay slot FSM aware of the new label.
-  DsFsmLabel();
-
-  // Walk the list of branches referring to and preceding this label.
-  // Store the previously unknown target addresses in them.
-  while (label->IsLinked()) {
-    uint32_t branch_id = label->Position();
-    Branch* branch = GetBranch(branch_id);
-    branch->Resolve(bound_pc);
-
-    uint32_t branch_location = branch->GetLocation();
-    // Extract the location of the previous branch in the list (walking the list backwards;
-    // the previous branch ID was stored in the space reserved for this branch).
-    uint32_t prev = buffer_.Load<uint32_t>(branch_location);
-
-    // On to the previous branch in the list...
-    label->position_ = prev;
-  }
-
-  // Now make the label object contain its own location (relative to the end of the preceding
-  // branch, if any; it will be used by the branches referring to and following this label).
-  BindRelativeToPrecedingBranch(label, branches_.size(), bound_pc);
-}
-
-uint32_t MipsAssembler::GetLabelLocation(const MipsLabel* label) const {
-  CHECK(label->IsBound());
-  uint32_t target = label->Position();
-  if (label->prev_branch_id_plus_one_ != 0) {
-    // Get label location based on the branch preceding it.
-    const Branch* branch = GetBranch(label->prev_branch_id_plus_one_ - 1);
-    target += branch->GetEndLocation();
-  }
-  return target;
-}
-
-uint32_t MipsAssembler::GetAdjustedPosition(uint32_t old_position) {
-  // We can reconstruct the adjustment by going through all the branches from the beginning
-  // up to the old_position. Since we expect AdjustedPosition() to be called in a loop
-  // with increasing old_position, we can use the data from last AdjustedPosition() to
-  // continue where we left off and the whole loop should be O(m+n) where m is the number
-  // of positions to adjust and n is the number of branches.
-  if (old_position < last_old_position_) {
-    last_position_adjustment_ = 0;
-    last_old_position_ = 0;
-    last_branch_id_ = 0;
-  }
-  while (last_branch_id_ != branches_.size()) {
-    const Branch* branch = GetBranch(last_branch_id_);
-    if (branch->GetLocation() >= old_position + last_position_adjustment_) {
-      break;
-    }
-    last_position_adjustment_ += branch->GetSize() - branch->GetOldSize();
-    ++last_branch_id_;
-  }
-  last_old_position_ = old_position;
-  return old_position + last_position_adjustment_;
-}
-
-void MipsAssembler::BindPcRelBaseLabel() {
-  Bind(&pc_rel_base_label_);
-}
-
-uint32_t MipsAssembler::GetPcRelBaseLabelLocation() const {
-  return GetLabelLocation(&pc_rel_base_label_);
-}
-
-void MipsAssembler::FinalizeLabeledBranch(MipsLabel* label) {
-  uint32_t length = branches_.back().GetLength();
-  // Commit the last branch target label (if any).
-  DsFsmCommitLabel();
-  if (!label->IsBound()) {
-    // Branch forward (to a following label), distance is unknown.
-    // The first branch forward will contain 0, serving as the terminator of
-    // the list of forward-reaching branches.
-    Emit(label->position_);
-    // Nothing for the delay slot (yet).
-    DsFsmInstrNop(0);
-    length--;
-    // Now make the label object point to this branch
-    // (this forms a linked list of branches preceding this label).
-    uint32_t branch_id = branches_.size() - 1;
-    label->LinkTo(branch_id);
-  }
-  // Reserve space for the branch.
-  for (; length != 0u; --length) {
-    Nop();
-  }
-}
-
-bool MipsAssembler::Branch::CanHaveDelayedInstruction(const DelaySlot& delay_slot) const {
-  if (delay_slot.instruction_ == 0) {
-    // NOP or no instruction for the delay slot.
-    return false;
-  }
-  switch (type_) {
-    // R2 unconditional branches.
-    case kUncondBranch:
-    case kLongUncondBranch:
-      // There are no register interdependencies.
-      return true;
-
-    // R2 calls.
-    case kCall:
-    case kLongCall:
-      // Instructions depending on or modifying RA should not be moved into delay slots
-      // of branches modifying RA.
-      return ((delay_slot.masks_.gpr_ins_ | delay_slot.masks_.gpr_outs_) & (1u << RA)) == 0;
-
-    // R2 conditional branches.
-    case kCondBranch:
-    case kLongCondBranch:
-      switch (condition_) {
-        // Branches with one GPR source.
-        case kCondLTZ:
-        case kCondGEZ:
-        case kCondLEZ:
-        case kCondGTZ:
-        case kCondEQZ:
-        case kCondNEZ:
-          return (delay_slot.masks_.gpr_outs_ & (1u << lhs_reg_)) == 0;
-
-        // Branches with two GPR sources.
-        case kCondEQ:
-        case kCondNE:
-          return (delay_slot.masks_.gpr_outs_ & ((1u << lhs_reg_) | (1u << rhs_reg_))) == 0;
-
-        // Branches with one FPU condition code source.
-        case kCondF:
-        case kCondT:
-          return (delay_slot.masks_.cc_outs_ & (1u << lhs_reg_)) == 0;
-
-        default:
-          // We don't support synthetic R2 branches (preceded with slt[u]) at this level
-          // (R2 doesn't have branches to compare 2 registers using <, <=, >=, >).
-          LOG(FATAL) << "Unexpected branch condition " << condition_;
-          UNREACHABLE();
-      }
-
-    // R6 unconditional branches.
-    case kR6UncondBranch:
-    case kR6LongUncondBranch:
-    // R6 calls.
-    case kR6Call:
-    case kR6LongCall:
-      // There are no delay slots.
-      return false;
-
-    // R6 conditional branches.
-    case kR6CondBranch:
-    case kR6LongCondBranch:
-      switch (condition_) {
-        // Branches with one FPU register source.
-        case kCondF:
-        case kCondT:
-          return (delay_slot.masks_.fpr_outs_ & (1u << lhs_reg_)) == 0;
-        // Others have a forbidden slot instead of a delay slot.
-        default:
-          return false;
-      }
-
-    // Literals.
-    default:
-      LOG(FATAL) << "Unexpected branch type " << type_;
-      UNREACHABLE();
-  }
-}
-
-uint32_t MipsAssembler::Branch::GetDelayedInstruction() const {
-  return delayed_instruction_;
-}
-
-MipsLabel* MipsAssembler::Branch::GetPatcherLabel() const {
-  return patcher_label_;
-}
-
-void MipsAssembler::Branch::SetDelayedInstruction(uint32_t instruction, MipsLabel* patcher_label) {
-  CHECK_NE(instruction, kUnfilledDelaySlot);
-  CHECK_EQ(delayed_instruction_, kUnfilledDelaySlot);
-  delayed_instruction_ = instruction;
-  patcher_label_ = patcher_label;
-}
-
-void MipsAssembler::Branch::DecrementLocations() {
-  // We first create a branch object, which gets its type and locations initialized,
-  // and then we check if the branch can actually have the preceding instruction moved
-  // into its delay slot. If it can, the branch locations need to be decremented.
-  //
-  // We could make the check before creating the branch object and avoid the location
-  // adjustment, but the check is cleaner when performed on an initialized branch
-  // object.
-  //
-  // If the branch is backwards (to a previously bound label), reducing the locations
-  // cannot cause a short branch to exceed its offset range because the offset reduces.
-  // And this is not at all a problem for a long branch backwards.
-  //
-  // If the branch is forward (not linked to any label yet), reducing the locations
-  // is harmless. The branch will be promoted to long if needed when the target is known.
-  CHECK_EQ(location_, old_location_);
-  CHECK_GE(old_location_, sizeof(uint32_t));
-  old_location_ -= sizeof(uint32_t);
-  location_ = old_location_;
-}
-
-void MipsAssembler::MoveInstructionToDelaySlot(Branch& branch) {
-  if (branch.IsBare()) {
-    // Delay slots are filled manually in bare branches.
-    return;
-  }
-  if (branch.CanHaveDelayedInstruction(delay_slot_)) {
-    // The last instruction cannot be used in a different delay slot,
-    // do not commit the label before it (if any).
-    DsFsmDropLabel();
-    // Remove the last emitted instruction.
-    size_t size = buffer_.Size();
-    CHECK_GE(size, sizeof(uint32_t));
-    size -= sizeof(uint32_t);
-    CHECK_EQ(buffer_.Load<uint32_t>(size), delay_slot_.instruction_);
-    buffer_.Resize(size);
-    // Attach it to the branch and adjust the branch locations.
-    branch.DecrementLocations();
-    branch.SetDelayedInstruction(delay_slot_.instruction_, delay_slot_.patcher_label_);
-  } else if (!reordering_ && branch.GetType() == Branch::kUncondBranch) {
-    // If reordefing is disabled, prevent absorption of the target instruction.
-    branch.SetDelayedInstruction(Branch::kUnfillableDelaySlot);
-  }
-}
-
-void MipsAssembler::Buncond(MipsLabel* label, bool is_r6, bool is_bare) {
-  uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
-  branches_.emplace_back(is_r6, buffer_.Size(), target, /* is_call= */ false, is_bare);
-  MoveInstructionToDelaySlot(branches_.back());
-  FinalizeLabeledBranch(label);
-}
-
-void MipsAssembler::Bcond(MipsLabel* label,
-                          bool is_r6,
-                          bool is_bare,
-                          BranchCondition condition,
-                          Register lhs,
-                          Register rhs) {
-  // If lhs = rhs, this can be a NOP.
-  if (Branch::IsNop(condition, lhs, rhs)) {
-    return;
-  }
-  uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
-  branches_.emplace_back(is_r6, buffer_.Size(), target, condition, lhs, rhs, is_bare);
-  MoveInstructionToDelaySlot(branches_.back());
-  FinalizeLabeledBranch(label);
-}
-
-void MipsAssembler::Call(MipsLabel* label, bool is_r6, bool is_bare) {
-  uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
-  branches_.emplace_back(is_r6, buffer_.Size(), target, /* is_call= */ true, is_bare);
-  MoveInstructionToDelaySlot(branches_.back());
-  FinalizeLabeledBranch(label);
-}
-
-void MipsAssembler::LoadLabelAddress(Register dest_reg, Register base_reg, MipsLabel* label) {
-  // Label address loads are treated as pseudo branches since they require very similar handling.
-  DCHECK(!label->IsBound());
-  // If `pc_rel_base_label_` isn't bound or none of registers contains its address, we
-  // may generate an individual NAL instruction to simulate PC-relative addressing on R2
-  // by specifying `base_reg` of `ZERO`. Check for it.
-  if (base_reg == ZERO && !IsR6()) {
-    Nal();
-  }
-  branches_.emplace_back(IsR6(), buffer_.Size(), dest_reg, base_reg, Branch::kLabel);
-  FinalizeLabeledBranch(label);
-}
-
-Literal* MipsAssembler::NewLiteral(size_t size, const uint8_t* data) {
-  DCHECK(size == 4u || size == 8u) << size;
-  literals_.emplace_back(size, data);
-  return &literals_.back();
-}
-
-void MipsAssembler::LoadLiteral(Register dest_reg, Register base_reg, Literal* literal) {
-  // Literal loads are treated as pseudo branches since they require very similar handling.
-  DCHECK_EQ(literal->GetSize(), 4u);
-  MipsLabel* label = literal->GetLabel();
-  DCHECK(!label->IsBound());
-  // If `pc_rel_base_label_` isn't bound or none of registers contains its address, we
-  // may generate an individual NAL instruction to simulate PC-relative addressing on R2
-  // by specifying `base_reg` of `ZERO`. Check for it.
-  if (base_reg == ZERO && !IsR6()) {
-    Nal();
-  }
-  branches_.emplace_back(IsR6(), buffer_.Size(), dest_reg, base_reg, Branch::kLiteral);
-  FinalizeLabeledBranch(label);
-}
-
-JumpTable* MipsAssembler::CreateJumpTable(std::vector<MipsLabel*>&& labels) {
-  jump_tables_.emplace_back(std::move(labels));
-  JumpTable* table = &jump_tables_.back();
-  DCHECK(!table->GetLabel()->IsBound());
-  return table;
-}
-
-void MipsAssembler::EmitLiterals() {
-  if (!literals_.empty()) {
-    // We don't support byte and half-word literals.
-    // TODO: proper alignment for 64-bit literals when they're implemented.
-    for (Literal& literal : literals_) {
-      MipsLabel* label = literal.GetLabel();
-      Bind(label);
-      AssemblerBuffer::EnsureCapacity ensured(&buffer_);
-      DCHECK(literal.GetSize() == 4u || literal.GetSize() == 8u);
-      for (size_t i = 0, size = literal.GetSize(); i != size; ++i) {
-        buffer_.Emit<uint8_t>(literal.GetData()[i]);
-      }
-    }
-  }
-}
-
-void MipsAssembler::ReserveJumpTableSpace() {
-  if (!jump_tables_.empty()) {
-    for (JumpTable& table : jump_tables_) {
-      MipsLabel* label = table.GetLabel();
-      Bind(label);
-
-      // Bulk ensure capacity, as this may be large.
-      size_t orig_size = buffer_.Size();
-      size_t required_capacity = orig_size + table.GetSize();
-      if (required_capacity > buffer_.Capacity()) {
-        buffer_.ExtendCapacity(required_capacity);
-      }
-#ifndef NDEBUG
-      buffer_.has_ensured_capacity_ = true;
-#endif
-
-      // Fill the space with dummy data as the data is not final
-      // until the branches have been promoted. And we shouldn't
-      // be moving uninitialized data during branch promotion.
-      for (size_t cnt = table.GetData().size(), i = 0; i < cnt; i++) {
-        buffer_.Emit<uint32_t>(0x1abe1234u);
-      }
-
-#ifndef NDEBUG
-      buffer_.has_ensured_capacity_ = false;
-#endif
-    }
-  }
-}
-
-void MipsAssembler::EmitJumpTables() {
-  if (!jump_tables_.empty()) {
-    CHECK(!overwriting_);
-    // Switch from appending instructions at the end of the buffer to overwriting
-    // existing instructions (here, jump tables) in the buffer.
-    overwriting_ = true;
-
-    for (JumpTable& table : jump_tables_) {
-      MipsLabel* table_label = table.GetLabel();
-      uint32_t start = GetLabelLocation(table_label);
-      overwrite_location_ = start;
-
-      for (MipsLabel* target : table.GetData()) {
-        CHECK_EQ(buffer_.Load<uint32_t>(overwrite_location_), 0x1abe1234u);
-        // The table will contain target addresses relative to the table start.
-        uint32_t offset = GetLabelLocation(target) - start;
-        Emit(offset);
-      }
-    }
-
-    overwriting_ = false;
-  }
-}
-
-void MipsAssembler::PromoteBranches() {
-  // Promote short branches to long as necessary.
-  bool changed;
-  do {
-    changed = false;
-    for (auto& branch : branches_) {
-      CHECK(branch.IsResolved());
-      uint32_t base = GetBranchLocationOrPcRelBase(&branch);
-      uint32_t delta = branch.PromoteIfNeeded(base);
-      // If this branch has been promoted and needs to expand in size,
-      // relocate all branches by the expansion size.
-      if (delta) {
-        changed = true;
-        uint32_t expand_location = branch.GetLocation();
-        for (auto& branch2 : branches_) {
-          branch2.Relocate(expand_location, delta);
-        }
-      }
-    }
-  } while (changed);
-
-  // Account for branch expansion by resizing the code buffer
-  // and moving the code in it to its final location.
-  size_t branch_count = branches_.size();
-  if (branch_count > 0) {
-    // Resize.
-    Branch& last_branch = branches_[branch_count - 1];
-    uint32_t size_delta = last_branch.GetEndLocation() - last_branch.GetOldEndLocation();
-    uint32_t old_size = buffer_.Size();
-    buffer_.Resize(old_size + size_delta);
-    // Move the code residing between branch placeholders.
-    uint32_t end = old_size;
-    for (size_t i = branch_count; i > 0; ) {
-      Branch& branch = branches_[--i];
-      CHECK_GE(end, branch.GetOldEndLocation());
-      uint32_t size = end - branch.GetOldEndLocation();
-      buffer_.Move(branch.GetEndLocation(), branch.GetOldEndLocation(), size);
-      end = branch.GetOldLocation();
-    }
-  }
-}
-
-// Note: make sure branch_info_[] and EmitBranch() are kept synchronized.
-const MipsAssembler::Branch::BranchInfo MipsAssembler::Branch::branch_info_[] = {
-  // R2 short branches (can be promoted to long).
-  {  2, 0, 1, MipsAssembler::Branch::kOffset18, 2 },  // kUncondBranch
-  {  2, 0, 1, MipsAssembler::Branch::kOffset18, 2 },  // kCondBranch
-  {  2, 0, 1, MipsAssembler::Branch::kOffset18, 2 },  // kCall
-  // R2 short branches (can't be promoted to long), delay slots filled manually.
-  {  1, 0, 1, MipsAssembler::Branch::kOffset18, 2 },  // kBareUncondBranch
-  {  1, 0, 1, MipsAssembler::Branch::kOffset18, 2 },  // kBareCondBranch
-  {  1, 0, 1, MipsAssembler::Branch::kOffset18, 2 },  // kBareCall
-  // R2 near label.
-  {  1, 0, 0, MipsAssembler::Branch::kOffset16, 0 },  // kLabel
-  // R2 near literal.
-  {  1, 0, 0, MipsAssembler::Branch::kOffset16, 0 },  // kLiteral
-  // R2 long branches.
-  {  9, 3, 1, MipsAssembler::Branch::kOffset32, 0 },  // kLongUncondBranch
-  { 10, 4, 1, MipsAssembler::Branch::kOffset32, 0 },  // kLongCondBranch
-  {  6, 1, 1, MipsAssembler::Branch::kOffset32, 0 },  // kLongCall
-  // R2 far label.
-  {  3, 0, 0, MipsAssembler::Branch::kOffset32, 0 },  // kFarLabel
-  // R2 far literal.
-  {  3, 0, 0, MipsAssembler::Branch::kOffset32, 0 },  // kFarLiteral
-  // R6 short branches (can be promoted to long).
-  {  1, 0, 1, MipsAssembler::Branch::kOffset28, 2 },  // kR6UncondBranch
-  {  2, 0, 1, MipsAssembler::Branch::kOffset18, 2 },  // kR6CondBranch
-                                                      // Exception: kOffset23 for beqzc/bnezc.
-  {  1, 0, 1, MipsAssembler::Branch::kOffset28, 2 },  // kR6Call
-  // R6 short branches (can't be promoted to long), forbidden/delay slots filled manually.
-  {  1, 0, 1, MipsAssembler::Branch::kOffset28, 2 },  // kR6BareUncondBranch
-  {  1, 0, 1, MipsAssembler::Branch::kOffset18, 2 },  // kR6BareCondBranch
-                                                      // Exception: kOffset23 for beqzc/bnezc.
-  {  1, 0, 1, MipsAssembler::Branch::kOffset28, 2 },  // kR6BareCall
-  // R6 near label.
-  {  1, 0, 0, MipsAssembler::Branch::kOffset21, 2 },  // kR6Label
-  // R6 near literal.
-  {  1, 0, 0, MipsAssembler::Branch::kOffset21, 2 },  // kR6Literal
-  // R6 long branches.
-  {  2, 0, 0, MipsAssembler::Branch::kOffset32, 0 },  // kR6LongUncondBranch
-  {  3, 1, 0, MipsAssembler::Branch::kOffset32, 0 },  // kR6LongCondBranch
-  {  2, 0, 0, MipsAssembler::Branch::kOffset32, 0 },  // kR6LongCall
-  // R6 far label.
-  {  2, 0, 0, MipsAssembler::Branch::kOffset32, 0 },  // kR6FarLabel
-  // R6 far literal.
-  {  2, 0, 0, MipsAssembler::Branch::kOffset32, 0 },  // kR6FarLiteral
-};
-
-static inline bool IsAbsorbableInstruction(uint32_t instruction) {
-  // The relative patcher patches addiu, lw and sw with an immediate operand of 0x5678.
-  // We want to make sure that these instructions do not get absorbed into delay slots
-  // of unconditional branches on R2. Absorption would otherwise make copies of
-  // unpatched instructions.
-  if ((instruction & 0xFFFF) != 0x5678) {
-    return true;
-  }
-  switch (instruction >> kOpcodeShift) {
-    case 0x09:  // Addiu.
-    case 0x23:  // Lw.
-    case 0x2B:  // Sw.
-      return false;
-    default:
-      return true;
-  }
-}
-
-static inline Register GetR2PcRelBaseRegister(Register reg) {
-  // LoadLabelAddress() and LoadLiteral() generate individual NAL
-  // instructions on R2 when the specified base register is ZERO
-  // and so the effective PC-relative base register is RA, not ZERO.
-  return (reg == ZERO) ? RA : reg;
-}
-
-// Note: make sure branch_info_[] and EmitBranch() are kept synchronized.
-void MipsAssembler::EmitBranch(uint32_t branch_id) {
-  CHECK_EQ(overwriting_, true);
-  Branch* branch = GetBranch(branch_id);
-  overwrite_location_ = branch->GetLocation();
-  uint32_t offset = branch->GetOffset(GetBranchOrPcRelBaseForEncoding(branch));
-  BranchCondition condition = branch->GetCondition();
-  Register lhs = branch->GetLeftRegister();
-  Register rhs = branch->GetRightRegister();
-  uint32_t delayed_instruction = branch->GetDelayedInstruction();
-  MipsLabel* patcher_label = branch->GetPatcherLabel();
-  if (patcher_label != nullptr) {
-    // Update the patcher label location to account for branch promotion and
-    // delay slot filling.
-    CHECK(patcher_label->IsBound());
-    uint32_t bound_pc = branch->GetLocation();
-    if (!branch->IsLong()) {
-      // Short branches precede delay slots.
-      // Long branches follow "delay slots".
-      bound_pc += sizeof(uint32_t);
-    }
-    // Rebind the label.
-    patcher_label->Reinitialize();
-    BindRelativeToPrecedingBranch(patcher_label, branch_id, bound_pc);
-  }
-  switch (branch->GetType()) {
-    // R2 short branches.
-    case Branch::kUncondBranch:
-      if (delayed_instruction == Branch::kUnfillableDelaySlot) {
-        // The branch was created when reordering was disabled, do not absorb the target
-        // instruction.
-        delayed_instruction = 0;  // NOP.
-      } else if (delayed_instruction == Branch::kUnfilledDelaySlot) {
-        // Try to absorb the target instruction into the delay slot.
-        delayed_instruction = 0;  // NOP.
-        // Incrementing the signed 16-bit offset past the target instruction must not
-        // cause overflow into the negative subrange, check for the max offset.
-        if (offset != 0x7FFF) {
-          uint32_t target = branch->GetTarget();
-          if (std::binary_search(ds_fsm_target_pcs_.begin(), ds_fsm_target_pcs_.end(), target)) {
-            uint32_t target_instruction = buffer_.Load<uint32_t>(target);
-            if (IsAbsorbableInstruction(target_instruction)) {
-              delayed_instruction = target_instruction;
-              offset++;
-            }
-          }
-        }
-      }
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      B(offset);
-      Emit(delayed_instruction);
-      break;
-    case Branch::kCondBranch:
-      DCHECK_NE(delayed_instruction, Branch::kUnfillableDelaySlot);
-      if (delayed_instruction == Branch::kUnfilledDelaySlot) {
-        delayed_instruction = 0;  // NOP.
-      }
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      EmitBcondR2(condition, lhs, rhs, offset);
-      Emit(delayed_instruction);
-      break;
-    case Branch::kCall:
-      DCHECK_NE(delayed_instruction, Branch::kUnfillableDelaySlot);
-      if (delayed_instruction == Branch::kUnfilledDelaySlot) {
-        delayed_instruction = 0;  // NOP.
-      }
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Bal(offset);
-      Emit(delayed_instruction);
-      break;
-    case Branch::kBareUncondBranch:
-      DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      B(offset);
-      break;
-    case Branch::kBareCondBranch:
-      DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      EmitBcondR2(condition, lhs, rhs, offset);
-      break;
-    case Branch::kBareCall:
-      DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Bal(offset);
-      break;
-
-    // R2 near label.
-    case Branch::kLabel:
-      DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Addiu(lhs, GetR2PcRelBaseRegister(rhs), offset);
-      break;
-    // R2 near literal.
-    case Branch::kLiteral:
-      DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Lw(lhs, GetR2PcRelBaseRegister(rhs), offset);
-      break;
-
-    // R2 long branches.
-    case Branch::kLongUncondBranch:
-      // To get the value of the PC register we need to use the NAL instruction.
-      // NAL clobbers the RA register. However, RA must be preserved if the
-      // method is compiled without the entry/exit sequences that would take care
-      // of preserving RA (typically, leaf methods don't preserve RA explicitly).
-      // So, we need to preserve RA in some temporary storage ourselves. The AT
-      // register can't be used for this because we need it to load a constant
-      // which will be added to the value that NAL stores in RA. And we can't
-      // use T9 for this in the context of the JNI compiler, which uses it
-      // as a scratch register (see InterproceduralScratchRegister()).
-      // If we were to add a 32-bit constant to RA using two ADDIU instructions,
-      // we'd also need to use the ROTR instruction, which requires no less than
-      // MIPSR2.
-      // Perhaps, we could use T8 or one of R2's multiplier/divider registers
-      // (LO or HI) or even a floating-point register, but that doesn't seem
-      // like a nice solution. We may want this to work on both R6 and pre-R6.
-      // For now simply use the stack for RA. This should be OK since for the
-      // vast majority of code a short PC-relative branch is sufficient.
-      // TODO: can this be improved?
-      // TODO: consider generation of a shorter sequence when we know that RA
-      // is explicitly preserved by the method entry/exit code.
-      if (delayed_instruction != Branch::kUnfilledDelaySlot &&
-          delayed_instruction != Branch::kUnfillableDelaySlot) {
-        Emit(delayed_instruction);
-      }
-      Push(RA);
-      Nal();
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Lui(AT, High16Bits(offset));
-      Ori(AT, AT, Low16Bits(offset));
-      Addu(AT, AT, RA);
-      Lw(RA, SP, 0);
-      Jr(AT);
-      DecreaseFrameSize(kStackAlignment);
-      break;
-    case Branch::kLongCondBranch:
-      // The comment on case 'Branch::kLongUncondBranch' applies here as well.
-      DCHECK_NE(delayed_instruction, Branch::kUnfillableDelaySlot);
-      if (delayed_instruction != Branch::kUnfilledDelaySlot) {
-        Emit(delayed_instruction);
-      }
-      // Note: the opposite condition branch encodes 8 as the distance, which is equal to the
-      // number of instructions skipped:
-      // (PUSH(IncreaseFrameSize(ADDIU) + SW) + NAL + LUI + ORI + ADDU + LW + JR).
-      EmitBcondR2(Branch::OppositeCondition(condition), lhs, rhs, 8);
-      Push(RA);
-      Nal();
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Lui(AT, High16Bits(offset));
-      Ori(AT, AT, Low16Bits(offset));
-      Addu(AT, AT, RA);
-      Lw(RA, SP, 0);
-      Jr(AT);
-      DecreaseFrameSize(kStackAlignment);
-      break;
-    case Branch::kLongCall:
-      DCHECK_NE(delayed_instruction, Branch::kUnfillableDelaySlot);
-      if (delayed_instruction != Branch::kUnfilledDelaySlot) {
-        Emit(delayed_instruction);
-      }
-      Nal();
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Lui(AT, High16Bits(offset));
-      Ori(AT, AT, Low16Bits(offset));
-      Addu(AT, AT, RA);
-      Jalr(AT);
-      Nop();
-      break;
-
-    // R2 far label.
-    case Branch::kFarLabel:
-      DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Lui(AT, High16Bits(offset));
-      Ori(AT, AT, Low16Bits(offset));
-      Addu(lhs, AT, GetR2PcRelBaseRegister(rhs));
-      break;
-    // R2 far literal.
-    case Branch::kFarLiteral:
-      DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
-      offset += (offset & 0x8000) << 1;  // Account for sign extension in lw.
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Lui(AT, High16Bits(offset));
-      Addu(AT, AT, GetR2PcRelBaseRegister(rhs));
-      Lw(lhs, AT, Low16Bits(offset));
-      break;
-
-    // R6 short branches.
-    case Branch::kR6UncondBranch:
-      DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Bc(offset);
-      break;
-    case Branch::kR6CondBranch:
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      EmitBcondR6(condition, lhs, rhs, offset);
-      DCHECK_NE(delayed_instruction, Branch::kUnfillableDelaySlot);
-      if (delayed_instruction != Branch::kUnfilledDelaySlot) {
-        Emit(delayed_instruction);
-      } else {
-        // TODO: improve by filling the forbidden slot (IFF this is
-        // a forbidden and not a delay slot).
-        Nop();
-      }
-      break;
-    case Branch::kR6Call:
-      DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Balc(offset);
-      break;
-    case Branch::kR6BareUncondBranch:
-      DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Bc(offset);
-      break;
-    case Branch::kR6BareCondBranch:
-      DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      EmitBcondR6(condition, lhs, rhs, offset);
-      break;
-    case Branch::kR6BareCall:
-      DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Balc(offset);
-      break;
-
-    // R6 near label.
-    case Branch::kR6Label:
-      DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Addiupc(lhs, offset);
-      break;
-    // R6 near literal.
-    case Branch::kR6Literal:
-      DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Lwpc(lhs, offset);
-      break;
-
-    // R6 long branches.
-    case Branch::kR6LongUncondBranch:
-      DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
-      offset += (offset & 0x8000) << 1;  // Account for sign extension in jic.
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Auipc(AT, High16Bits(offset));
-      Jic(AT, Low16Bits(offset));
-      break;
-    case Branch::kR6LongCondBranch:
-      DCHECK_NE(delayed_instruction, Branch::kUnfillableDelaySlot);
-      if (delayed_instruction != Branch::kUnfilledDelaySlot) {
-        Emit(delayed_instruction);
-      }
-      EmitBcondR6(Branch::OppositeCondition(condition), lhs, rhs, 2);
-      offset += (offset & 0x8000) << 1;  // Account for sign extension in jic.
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Auipc(AT, High16Bits(offset));
-      Jic(AT, Low16Bits(offset));
-      break;
-    case Branch::kR6LongCall:
-      DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
-      offset += (offset & 0x8000) << 1;  // Account for sign extension in jialc.
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Auipc(AT, High16Bits(offset));
-      Jialc(AT, Low16Bits(offset));
-      break;
-
-    // R6 far label.
-    case Branch::kR6FarLabel:
-      DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
-      offset += (offset & 0x8000) << 1;  // Account for sign extension in addiu.
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Auipc(AT, High16Bits(offset));
-      Addiu(lhs, AT, Low16Bits(offset));
-      break;
-    // R6 far literal.
-    case Branch::kR6FarLiteral:
-      DCHECK_EQ(delayed_instruction, Branch::kUnfilledDelaySlot);
-      offset += (offset & 0x8000) << 1;  // Account for sign extension in lw.
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Auipc(AT, High16Bits(offset));
-      Lw(lhs, AT, Low16Bits(offset));
-      break;
-  }
-  CHECK_EQ(overwrite_location_, branch->GetEndLocation());
-  CHECK_LT(branch->GetSize(), static_cast<uint32_t>(Branch::kMaxBranchSize));
-  if (patcher_label != nullptr) {
-    // The patched instruction should look like one.
-    uint32_t patched_instruction = buffer_.Load<uint32_t>(GetLabelLocation(patcher_label));
-    CHECK(!IsAbsorbableInstruction(patched_instruction));
-  }
-}
-
-void MipsAssembler::B(MipsLabel* label, bool is_bare) {
-  Buncond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare);
-}
-
-void MipsAssembler::Bal(MipsLabel* label, bool is_bare) {
-  Call(label, /* is_r6= */ (IsR6() && !is_bare), is_bare);
-}
-
-void MipsAssembler::Beq(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondEQ, rs, rt);
-}
-
-void MipsAssembler::Bne(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondNE, rs, rt);
-}
-
-void MipsAssembler::Beqz(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondEQZ, rt);
-}
-
-void MipsAssembler::Bnez(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondNEZ, rt);
-}
-
-void MipsAssembler::Bltz(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondLTZ, rt);
-}
-
-void MipsAssembler::Bgez(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondGEZ, rt);
-}
-
-void MipsAssembler::Blez(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondLEZ, rt);
-}
-
-void MipsAssembler::Bgtz(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ (IsR6() && !is_bare), is_bare, kCondGTZ, rt);
-}
-
-bool MipsAssembler::CanExchangeWithSlt(Register rs, Register rt) const {
-  // If the instruction modifies AT, `rs` or `rt`, it can't be exchanged with the slt[u]
-  // instruction because either slt[u] depends on `rs` or `rt` or the following
-  // conditional branch depends on AT set by slt[u].
-  // Likewise, if the instruction depends on AT, it can't be exchanged with slt[u]
-  // because slt[u] changes AT.
-  return (delay_slot_.instruction_ != 0 &&
-      (delay_slot_.masks_.gpr_outs_ & ((1u << AT) | (1u << rs) | (1u << rt))) == 0 &&
-      (delay_slot_.masks_.gpr_ins_ & (1u << AT)) == 0);
-}
-
-void MipsAssembler::ExchangeWithSlt(const DelaySlot& forwarded_slot) {
-  // Exchange the last two instructions in the assembler buffer.
-  size_t size = buffer_.Size();
-  CHECK_GE(size, 2 * sizeof(uint32_t));
-  size_t pos1 = size - 2 * sizeof(uint32_t);
-  size_t pos2 = size - sizeof(uint32_t);
-  uint32_t instr1 = buffer_.Load<uint32_t>(pos1);
-  uint32_t instr2 = buffer_.Load<uint32_t>(pos2);
-  CHECK_EQ(instr1, forwarded_slot.instruction_);
-  CHECK_EQ(instr2, delay_slot_.instruction_);
-  buffer_.Store<uint32_t>(pos1, instr2);
-  buffer_.Store<uint32_t>(pos2, instr1);
-  // Set the current delay slot information to that of the last instruction
-  // in the buffer.
-  delay_slot_ = forwarded_slot;
-}
-
-void MipsAssembler::GenerateSltForCondBranch(bool unsigned_slt, Register rs, Register rt) {
-  // If possible, exchange the slt[u] instruction with the preceding instruction,
-  // so it can fill the delay slot.
-  DelaySlot forwarded_slot = delay_slot_;
-  bool exchange = CanExchangeWithSlt(rs, rt);
-  if (exchange) {
-    // The last instruction cannot be used in a different delay slot,
-    // do not commit the label before it (if any).
-    DsFsmDropLabel();
-  }
-  if (unsigned_slt) {
-    Sltu(AT, rs, rt);
-  } else {
-    Slt(AT, rs, rt);
-  }
-  if (exchange) {
-    ExchangeWithSlt(forwarded_slot);
-  }
-}
-
-void MipsAssembler::Blt(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  if (IsR6() && !is_bare) {
-    Bcond(label, IsR6(), is_bare, kCondLT, rs, rt);
-  } else if (!Branch::IsNop(kCondLT, rs, rt)) {
-    // Synthesize the instruction (not available on R2).
-    GenerateSltForCondBranch(/* unsigned_slt= */ false, rs, rt);
-    Bnez(AT, label, is_bare);
-  }
-}
-
-void MipsAssembler::Bge(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  if (IsR6() && !is_bare) {
-    Bcond(label, IsR6(), is_bare, kCondGE, rs, rt);
-  } else if (Branch::IsUncond(kCondGE, rs, rt)) {
-    B(label, is_bare);
-  } else {
-    // Synthesize the instruction (not available on R2).
-    GenerateSltForCondBranch(/* unsigned_slt= */ false, rs, rt);
-    Beqz(AT, label, is_bare);
-  }
-}
-
-void MipsAssembler::Bltu(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  if (IsR6() && !is_bare) {
-    Bcond(label, IsR6(), is_bare, kCondLTU, rs, rt);
-  } else if (!Branch::IsNop(kCondLTU, rs, rt)) {
-    // Synthesize the instruction (not available on R2).
-    GenerateSltForCondBranch(/* unsigned_slt= */ true, rs, rt);
-    Bnez(AT, label, is_bare);
-  }
-}
-
-void MipsAssembler::Bgeu(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  if (IsR6() && !is_bare) {
-    Bcond(label, IsR6(), is_bare, kCondGEU, rs, rt);
-  } else if (Branch::IsUncond(kCondGEU, rs, rt)) {
-    B(label, is_bare);
-  } else {
-    // Synthesize the instruction (not available on R2).
-    GenerateSltForCondBranch(/* unsigned_slt= */ true, rs, rt);
-    Beqz(AT, label, is_bare);
-  }
-}
-
-void MipsAssembler::Bc1f(MipsLabel* label, bool is_bare) {
-  Bc1f(0, label, is_bare);
-}
-
-void MipsAssembler::Bc1f(int cc, MipsLabel* label, bool is_bare) {
-  CHECK(IsUint<3>(cc)) << cc;
-  Bcond(label, /* is_r6= */ false, is_bare, kCondF, static_cast<Register>(cc), ZERO);
-}
-
-void MipsAssembler::Bc1t(MipsLabel* label, bool is_bare) {
-  Bc1t(0, label, is_bare);
-}
-
-void MipsAssembler::Bc1t(int cc, MipsLabel* label, bool is_bare) {
-  CHECK(IsUint<3>(cc)) << cc;
-  Bcond(label, /* is_r6= */ false, is_bare, kCondT, static_cast<Register>(cc), ZERO);
-}
-
-void MipsAssembler::Bc(MipsLabel* label, bool is_bare) {
-  Buncond(label, /* is_r6= */ true, is_bare);
-}
-
-void MipsAssembler::Balc(MipsLabel* label, bool is_bare) {
-  Call(label, /* is_r6= */ true, is_bare);
-}
-
-void MipsAssembler::Beqc(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondEQ, rs, rt);
-}
-
-void MipsAssembler::Bnec(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondNE, rs, rt);
-}
-
-void MipsAssembler::Beqzc(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondEQZ, rt);
-}
-
-void MipsAssembler::Bnezc(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondNEZ, rt);
-}
-
-void MipsAssembler::Bltzc(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondLTZ, rt);
-}
-
-void MipsAssembler::Bgezc(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondGEZ, rt);
-}
-
-void MipsAssembler::Blezc(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondLEZ, rt);
-}
-
-void MipsAssembler::Bgtzc(Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondGTZ, rt);
-}
-
-void MipsAssembler::Bltc(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondLT, rs, rt);
-}
-
-void MipsAssembler::Bgec(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondGE, rs, rt);
-}
-
-void MipsAssembler::Bltuc(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondLTU, rs, rt);
-}
-
-void MipsAssembler::Bgeuc(Register rs, Register rt, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondGEU, rs, rt);
-}
-
-void MipsAssembler::Bc1eqz(FRegister ft, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondF, static_cast<Register>(ft), ZERO);
-}
-
-void MipsAssembler::Bc1nez(FRegister ft, MipsLabel* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondT, static_cast<Register>(ft), ZERO);
-}
-
-void MipsAssembler::AdjustBaseAndOffset(Register& base,
-                                        int32_t& offset,
-                                        bool is_doubleword,
-                                        bool is_float) {
-  // This method is used to adjust the base register and offset pair
-  // for a load/store when the offset doesn't fit into int16_t.
-  // It is assumed that `base + offset` is sufficiently aligned for memory
-  // operands that are machine word in size or smaller. For doubleword-sized
-  // operands it's assumed that `base` is a multiple of 8, while `offset`
-  // may be a multiple of 4 (e.g. 4-byte-aligned long and double arguments
-  // and spilled variables on the stack accessed relative to the stack
-  // pointer register).
-  // We preserve the "alignment" of `offset` by adjusting it by a multiple of 8.
-  CHECK_NE(base, AT);  // Must not overwrite the register `base` while loading `offset`.
-
-  bool doubleword_aligned = IsAligned<kMipsDoublewordSize>(offset);
-  bool two_accesses = is_doubleword && (!is_float || !doubleword_aligned);
-
-  // IsInt<16> must be passed a signed value, hence the static cast below.
-  if (IsInt<16>(offset) &&
-      (!two_accesses || IsInt<16>(static_cast<int32_t>(offset + kMipsWordSize)))) {
-    // Nothing to do: `offset` (and, if needed, `offset + 4`) fits into int16_t.
-    return;
-  }
-
-  // Remember the "(mis)alignment" of `offset`, it will be checked at the end.
-  uint32_t misalignment = offset & (kMipsDoublewordSize - 1);
-
-  // Do not load the whole 32-bit `offset` if it can be represented as
-  // a sum of two 16-bit signed offsets. This can save an instruction or two.
-  // To simplify matters, only do this for a symmetric range of offsets from
-  // about -64KB to about +64KB, allowing further addition of 4 when accessing
-  // 64-bit variables with two 32-bit accesses.
-  constexpr int32_t kMinOffsetForSimpleAdjustment = 0x7ff8;  // Max int16_t that's a multiple of 8.
-  constexpr int32_t kMaxOffsetForSimpleAdjustment = 2 * kMinOffsetForSimpleAdjustment;
-  if (0 <= offset && offset <= kMaxOffsetForSimpleAdjustment) {
-    Addiu(AT, base, kMinOffsetForSimpleAdjustment);
-    offset -= kMinOffsetForSimpleAdjustment;
-  } else if (-kMaxOffsetForSimpleAdjustment <= offset && offset < 0) {
-    Addiu(AT, base, -kMinOffsetForSimpleAdjustment);
-    offset += kMinOffsetForSimpleAdjustment;
-  } else if (IsR6()) {
-    // On R6 take advantage of the aui instruction, e.g.:
-    //   aui   AT, base, offset_high
-    //   lw    reg_lo, offset_low(AT)
-    //   lw    reg_hi, (offset_low+4)(AT)
-    // or when offset_low+4 overflows int16_t:
-    //   aui   AT, base, offset_high
-    //   addiu AT, AT, 8
-    //   lw    reg_lo, (offset_low-8)(AT)
-    //   lw    reg_hi, (offset_low-4)(AT)
-    int16_t offset_high = High16Bits(offset);
-    int16_t offset_low = Low16Bits(offset);
-    offset_high += (offset_low < 0) ? 1 : 0;  // Account for offset sign extension in load/store.
-    Aui(AT, base, offset_high);
-    if (two_accesses && !IsInt<16>(static_cast<int32_t>(offset_low + kMipsWordSize))) {
-      // Avoid overflow in the 16-bit offset of the load/store instruction when adding 4.
-      Addiu(AT, AT, kMipsDoublewordSize);
-      offset_low -= kMipsDoublewordSize;
-    }
-    offset = offset_low;
-  } else {
-    // Do not load the whole 32-bit `offset` if it can be represented as
-    // a sum of three 16-bit signed offsets. This can save an instruction.
-    // To simplify matters, only do this for a symmetric range of offsets from
-    // about -96KB to about +96KB, allowing further addition of 4 when accessing
-    // 64-bit variables with two 32-bit accesses.
-    constexpr int32_t kMinOffsetForMediumAdjustment = 2 * kMinOffsetForSimpleAdjustment;
-    constexpr int32_t kMaxOffsetForMediumAdjustment = 3 * kMinOffsetForSimpleAdjustment;
-    if (0 <= offset && offset <= kMaxOffsetForMediumAdjustment) {
-      Addiu(AT, base, kMinOffsetForMediumAdjustment / 2);
-      Addiu(AT, AT, kMinOffsetForMediumAdjustment / 2);
-      offset -= kMinOffsetForMediumAdjustment;
-    } else if (-kMaxOffsetForMediumAdjustment <= offset && offset < 0) {
-      Addiu(AT, base, -kMinOffsetForMediumAdjustment / 2);
-      Addiu(AT, AT, -kMinOffsetForMediumAdjustment / 2);
-      offset += kMinOffsetForMediumAdjustment;
-    } else {
-      // Now that all shorter options have been exhausted, load the full 32-bit offset.
-      int32_t loaded_offset = RoundDown(offset, kMipsDoublewordSize);
-      LoadConst32(AT, loaded_offset);
-      Addu(AT, AT, base);
-      offset -= loaded_offset;
-    }
-  }
-  base = AT;
-
-  CHECK(IsInt<16>(offset));
-  if (two_accesses) {
-    CHECK(IsInt<16>(static_cast<int32_t>(offset + kMipsWordSize)));
-  }
-  CHECK_EQ(misalignment, offset & (kMipsDoublewordSize - 1));
-}
-
-void MipsAssembler::AdjustBaseOffsetAndElementSizeShift(Register& base,
-                                                        int32_t& offset,
-                                                        int& element_size_shift) {
-  // This method is used to adjust the base register, offset and element_size_shift
-  // for a vector load/store when the offset doesn't fit into allowed number of bits.
-  // MSA ld.df and st.df instructions take signed offsets as arguments, but maximum
-  // offset is dependant on the size of the data format df (10-bit offsets for ld.b,
-  // 11-bit for ld.h, 12-bit for ld.w and 13-bit for ld.d).
-  // If element_size_shift is non-negative at entry, it won't be changed, but offset
-  // will be checked for appropriate alignment. If negative at entry, it will be
-  // adjusted based on offset for maximum fit.
-  // It's assumed that `base` is a multiple of 8.
-  CHECK_NE(base, AT);  // Must not overwrite the register `base` while loading `offset`.
-
-  if (element_size_shift >= 0) {
-    CHECK_LE(element_size_shift, TIMES_8);
-    CHECK_GE(JAVASTYLE_CTZ(offset), element_size_shift);
-  } else if (IsAligned<kMipsDoublewordSize>(offset)) {
-    element_size_shift = TIMES_8;
-  } else if (IsAligned<kMipsWordSize>(offset)) {
-    element_size_shift = TIMES_4;
-  } else if (IsAligned<kMipsHalfwordSize>(offset)) {
-    element_size_shift = TIMES_2;
-  } else {
-    element_size_shift = TIMES_1;
-  }
-
-  const int low_len = 10 + element_size_shift;  // How many low bits of `offset` ld.df/st.df
-                                                // will take.
-  int16_t low = offset & ((1 << low_len) - 1);  // Isolate these bits.
-  low -= (low & (1 << (low_len - 1))) << 1;     // Sign-extend these bits.
-  if (low == offset) {
-    return;  // `offset` fits into ld.df/st.df.
-  }
-
-  // First, see if `offset` can be represented as a sum of two or three signed offsets.
-  // This can save an instruction or two.
-
-  // Max int16_t that's a multiple of element size.
-  const int32_t kMaxDeltaForSimpleAdjustment = 0x8000 - (1 << element_size_shift);
-  // Max ld.df/st.df offset that's a multiple of element size.
-  const int32_t kMaxLoadStoreOffset = 0x1ff << element_size_shift;
-  const int32_t kMaxOffsetForSimpleAdjustment = kMaxDeltaForSimpleAdjustment + kMaxLoadStoreOffset;
-  const int32_t kMinOffsetForMediumAdjustment = 2 * kMaxDeltaForSimpleAdjustment;
-  const int32_t kMaxOffsetForMediumAdjustment = kMinOffsetForMediumAdjustment + kMaxLoadStoreOffset;
-
-  if (IsInt<16>(offset)) {
-    Addiu(AT, base, offset);
-    offset = 0;
-  } else if (0 <= offset && offset <= kMaxOffsetForSimpleAdjustment) {
-    Addiu(AT, base, kMaxDeltaForSimpleAdjustment);
-    offset -= kMaxDeltaForSimpleAdjustment;
-  } else if (-kMaxOffsetForSimpleAdjustment <= offset && offset < 0) {
-    Addiu(AT, base, -kMaxDeltaForSimpleAdjustment);
-    offset += kMaxDeltaForSimpleAdjustment;
-  } else if (!IsR6() && 0 <= offset && offset <= kMaxOffsetForMediumAdjustment) {
-    Addiu(AT, base, kMaxDeltaForSimpleAdjustment);
-    if (offset <= kMinOffsetForMediumAdjustment) {
-      Addiu(AT, AT, offset - kMaxDeltaForSimpleAdjustment);
-      offset = 0;
-    } else {
-      Addiu(AT, AT, kMaxDeltaForSimpleAdjustment);
-      offset -= kMinOffsetForMediumAdjustment;
-    }
-  } else if (!IsR6() && -kMaxOffsetForMediumAdjustment <= offset && offset < 0) {
-    Addiu(AT, base, -kMaxDeltaForSimpleAdjustment);
-    if (-kMinOffsetForMediumAdjustment <= offset) {
-      Addiu(AT, AT, offset + kMaxDeltaForSimpleAdjustment);
-      offset = 0;
-    } else {
-      Addiu(AT, AT, -kMaxDeltaForSimpleAdjustment);
-      offset += kMinOffsetForMediumAdjustment;
-    }
-  } else {
-    // 16-bit or smaller parts of `offset`:
-    // |31  hi  16|15  mid  13-10|12-9  low  0|
-    //
-    // Instructions that supply each part as a signed integer addend:
-    // |aui       |addiu         |ld.df/st.df |
-    uint32_t tmp = static_cast<uint32_t>(offset) - low;  // Exclude `low` from the rest of `offset`
-                                                         // (accounts for sign of `low`).
-    tmp += (tmp & (UINT32_C(1) << 15)) << 1;  // Account for sign extension in addiu.
-    int16_t mid = Low16Bits(tmp);
-    int16_t hi = High16Bits(tmp);
-    if (IsR6()) {
-      Aui(AT, base, hi);
-    } else {
-      Lui(AT, hi);
-      Addu(AT, AT, base);
-    }
-    if (mid != 0) {
-      Addiu(AT, AT, mid);
-    }
-    offset = low;
-  }
-  base = AT;
-  CHECK_GE(JAVASTYLE_CTZ(offset), element_size_shift);
-  CHECK(IsInt<10>(offset >> element_size_shift));
-}
-
-void MipsAssembler::LoadFromOffset(LoadOperandType type,
-                                   Register reg,
-                                   Register base,
-                                   int32_t offset) {
-  LoadFromOffset<>(type, reg, base, offset);
-}
-
-void MipsAssembler::LoadSFromOffset(FRegister reg, Register base, int32_t offset) {
-  LoadSFromOffset<>(reg, base, offset);
-}
-
-void MipsAssembler::LoadDFromOffset(FRegister reg, Register base, int32_t offset) {
-  LoadDFromOffset<>(reg, base, offset);
-}
-
-void MipsAssembler::LoadQFromOffset(FRegister reg, Register base, int32_t offset) {
-  LoadQFromOffset<>(reg, base, offset);
-}
-
-void MipsAssembler::EmitLoad(ManagedRegister m_dst, Register src_register, int32_t src_offset,
-                             size_t size) {
-  MipsManagedRegister dst = m_dst.AsMips();
-  if (dst.IsNoRegister()) {
-    CHECK_EQ(0u, size) << dst;
-  } else if (dst.IsCoreRegister()) {
-    CHECK_EQ(kMipsWordSize, size) << dst;
-    LoadFromOffset(kLoadWord, dst.AsCoreRegister(), src_register, src_offset);
-  } else if (dst.IsRegisterPair()) {
-    CHECK_EQ(kMipsDoublewordSize, size) << dst;
-    LoadFromOffset(kLoadDoubleword, dst.AsRegisterPairLow(), src_register, src_offset);
-  } else if (dst.IsFRegister()) {
-    if (size == kMipsWordSize) {
-      LoadSFromOffset(dst.AsFRegister(), src_register, src_offset);
-    } else {
-      CHECK_EQ(kMipsDoublewordSize, size) << dst;
-      LoadDFromOffset(dst.AsFRegister(), src_register, src_offset);
-    }
-  } else if (dst.IsDRegister()) {
-    CHECK_EQ(kMipsDoublewordSize, size) << dst;
-    LoadDFromOffset(dst.AsOverlappingDRegisterLow(), src_register, src_offset);
-  }
-}
-
-void MipsAssembler::StoreToOffset(StoreOperandType type,
-                                  Register reg,
-                                  Register base,
-                                  int32_t offset) {
-  StoreToOffset<>(type, reg, base, offset);
-}
-
-void MipsAssembler::StoreSToOffset(FRegister reg, Register base, int32_t offset) {
-  StoreSToOffset<>(reg, base, offset);
-}
-
-void MipsAssembler::StoreDToOffset(FRegister reg, Register base, int32_t offset) {
-  StoreDToOffset<>(reg, base, offset);
-}
-
-void MipsAssembler::StoreQToOffset(FRegister reg, Register base, int32_t offset) {
-  StoreQToOffset<>(reg, base, offset);
-}
-
-static dwarf::Reg DWARFReg(Register reg) {
-  return dwarf::Reg::MipsCore(static_cast<int>(reg));
-}
-
-constexpr size_t kFramePointerSize = 4;
-
-void MipsAssembler::BuildFrame(size_t frame_size,
-                               ManagedRegister method_reg,
-                               ArrayRef<const ManagedRegister> callee_save_regs,
-                               const ManagedRegisterEntrySpills& entry_spills) {
-  CHECK_ALIGNED(frame_size, kStackAlignment);
-  DCHECK(!overwriting_);
-
-  // Increase frame to required size.
-  IncreaseFrameSize(frame_size);
-
-  // Push callee saves and return address.
-  int stack_offset = frame_size - kFramePointerSize;
-  StoreToOffset(kStoreWord, RA, SP, stack_offset);
-  cfi_.RelOffset(DWARFReg(RA), stack_offset);
-  for (int i = callee_save_regs.size() - 1; i >= 0; --i) {
-    stack_offset -= kFramePointerSize;
-    Register reg = callee_save_regs[i].AsMips().AsCoreRegister();
-    StoreToOffset(kStoreWord, reg, SP, stack_offset);
-    cfi_.RelOffset(DWARFReg(reg), stack_offset);
-  }
-
-  // Write out Method*.
-  StoreToOffset(kStoreWord, method_reg.AsMips().AsCoreRegister(), SP, 0);
-
-  // Write out entry spills.
-  int32_t offset = frame_size + kFramePointerSize;
-  for (const ManagedRegisterSpill& spill : entry_spills) {
-    MipsManagedRegister reg = spill.AsMips();
-    if (reg.IsNoRegister()) {
-      offset += spill.getSize();
-    } else if (reg.IsCoreRegister()) {
-      StoreToOffset(kStoreWord, reg.AsCoreRegister(), SP, offset);
-      offset += kMipsWordSize;
-    } else if (reg.IsFRegister()) {
-      StoreSToOffset(reg.AsFRegister(), SP, offset);
-      offset += kMipsWordSize;
-    } else if (reg.IsDRegister()) {
-      StoreDToOffset(reg.AsOverlappingDRegisterLow(), SP, offset);
-      offset += kMipsDoublewordSize;
-    }
-  }
-}
-
-void MipsAssembler::RemoveFrame(size_t frame_size,
-                                ArrayRef<const ManagedRegister> callee_save_regs,
-                                bool may_suspend ATTRIBUTE_UNUSED) {
-  CHECK_ALIGNED(frame_size, kStackAlignment);
-  DCHECK(!overwriting_);
-  cfi_.RememberState();
-
-  // Pop callee saves and return address.
-  int stack_offset = frame_size - (callee_save_regs.size() * kFramePointerSize) - kFramePointerSize;
-  for (size_t i = 0; i < callee_save_regs.size(); ++i) {
-    Register reg = callee_save_regs[i].AsMips().AsCoreRegister();
-    LoadFromOffset(kLoadWord, reg, SP, stack_offset);
-    cfi_.Restore(DWARFReg(reg));
-    stack_offset += kFramePointerSize;
-  }
-  LoadFromOffset(kLoadWord, RA, SP, stack_offset);
-  cfi_.Restore(DWARFReg(RA));
-
-  // Adjust the stack pointer in the delay slot if doing so doesn't break CFI.
-  bool exchange = IsInt<16>(static_cast<int32_t>(frame_size));
-  bool reordering = SetReorder(false);
-  if (exchange) {
-    // Jump to the return address.
-    Jr(RA);
-    // Decrease frame to required size.
-    DecreaseFrameSize(frame_size);  // Single instruction in delay slot.
-  } else {
-    // Decrease frame to required size.
-    DecreaseFrameSize(frame_size);
-    // Jump to the return address.
-    Jr(RA);
-    Nop();  // In delay slot.
-  }
-  SetReorder(reordering);
-
-  // The CFI should be restored for any code that follows the exit block.
-  cfi_.RestoreState();
-  cfi_.DefCFAOffset(frame_size);
-}
-
-void MipsAssembler::IncreaseFrameSize(size_t adjust) {
-  CHECK_ALIGNED(adjust, kFramePointerSize);
-  Addiu32(SP, SP, -adjust);
-  cfi_.AdjustCFAOffset(adjust);
-  if (overwriting_) {
-    cfi_.OverrideDelayedPC(overwrite_location_);
-  }
-}
-
-void MipsAssembler::DecreaseFrameSize(size_t adjust) {
-  CHECK_ALIGNED(adjust, kFramePointerSize);
-  Addiu32(SP, SP, adjust);
-  cfi_.AdjustCFAOffset(-adjust);
-  if (overwriting_) {
-    cfi_.OverrideDelayedPC(overwrite_location_);
-  }
-}
-
-void MipsAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
-  MipsManagedRegister src = msrc.AsMips();
-  if (src.IsNoRegister()) {
-    CHECK_EQ(0u, size);
-  } else if (src.IsCoreRegister()) {
-    CHECK_EQ(kMipsWordSize, size);
-    StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
-  } else if (src.IsRegisterPair()) {
-    CHECK_EQ(kMipsDoublewordSize, size);
-    StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value());
-    StoreToOffset(kStoreWord, src.AsRegisterPairHigh(),
-                  SP, dest.Int32Value() + kMipsWordSize);
-  } else if (src.IsFRegister()) {
-    if (size == kMipsWordSize) {
-      StoreSToOffset(src.AsFRegister(), SP, dest.Int32Value());
-    } else {
-      CHECK_EQ(kMipsDoublewordSize, size);
-      StoreDToOffset(src.AsFRegister(), SP, dest.Int32Value());
-    }
-  } else if (src.IsDRegister()) {
-    CHECK_EQ(kMipsDoublewordSize, size);
-    StoreDToOffset(src.AsOverlappingDRegisterLow(), SP, dest.Int32Value());
-  }
-}
-
-void MipsAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
-  MipsManagedRegister src = msrc.AsMips();
-  CHECK(src.IsCoreRegister());
-  StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
-}
-
-void MipsAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
-  MipsManagedRegister src = msrc.AsMips();
-  CHECK(src.IsCoreRegister());
-  StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
-}
-
-void MipsAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
-                                          ManagedRegister mscratch) {
-  MipsManagedRegister scratch = mscratch.AsMips();
-  CHECK(scratch.IsCoreRegister()) << scratch;
-  LoadConst32(scratch.AsCoreRegister(), imm);
-  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
-}
-
-void MipsAssembler::StoreStackOffsetToThread(ThreadOffset32 thr_offs,
-                                             FrameOffset fr_offs,
-                                             ManagedRegister mscratch) {
-  MipsManagedRegister scratch = mscratch.AsMips();
-  CHECK(scratch.IsCoreRegister()) << scratch;
-  Addiu32(scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
-  StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
-                S1, thr_offs.Int32Value());
-}
-
-void MipsAssembler::StoreStackPointerToThread(ThreadOffset32 thr_offs) {
-  StoreToOffset(kStoreWord, SP, S1, thr_offs.Int32Value());
-}
-
-void MipsAssembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc,
-                                  FrameOffset in_off, ManagedRegister mscratch) {
-  MipsManagedRegister src = msrc.AsMips();
-  MipsManagedRegister scratch = mscratch.AsMips();
-  StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
-  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value());
-  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + kMipsWordSize);
-}
-
-void MipsAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
-  return EmitLoad(mdest, SP, src.Int32Value(), size);
-}
-
-void MipsAssembler::LoadFromThread(ManagedRegister mdest, ThreadOffset32 src, size_t size) {
-  return EmitLoad(mdest, S1, src.Int32Value(), size);
-}
-
-void MipsAssembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
-  MipsManagedRegister dest = mdest.AsMips();
-  CHECK(dest.IsCoreRegister());
-  LoadFromOffset(kLoadWord, dest.AsCoreRegister(), SP, src.Int32Value());
-}
-
-void MipsAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
-                            bool unpoison_reference) {
-  MipsManagedRegister dest = mdest.AsMips();
-  CHECK(dest.IsCoreRegister() && base.AsMips().IsCoreRegister());
-  LoadFromOffset(kLoadWord, dest.AsCoreRegister(),
-                 base.AsMips().AsCoreRegister(), offs.Int32Value());
-  if (unpoison_reference) {
-    MaybeUnpoisonHeapReference(dest.AsCoreRegister());
-  }
-}
-
-void MipsAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) {
-  MipsManagedRegister dest = mdest.AsMips();
-  CHECK(dest.IsCoreRegister() && base.AsMips().IsCoreRegister());
-  LoadFromOffset(kLoadWord, dest.AsCoreRegister(),
-                 base.AsMips().AsCoreRegister(), offs.Int32Value());
-}
-
-void MipsAssembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) {
-  MipsManagedRegister dest = mdest.AsMips();
-  CHECK(dest.IsCoreRegister());
-  LoadFromOffset(kLoadWord, dest.AsCoreRegister(), S1, offs.Int32Value());
-}
-
-void MipsAssembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
-  UNIMPLEMENTED(FATAL) << "no sign extension necessary for mips";
-}
-
-void MipsAssembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
-  UNIMPLEMENTED(FATAL) << "no zero extension necessary for mips";
-}
-
-void MipsAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
-  MipsManagedRegister dest = mdest.AsMips();
-  MipsManagedRegister src = msrc.AsMips();
-  if (!dest.Equals(src)) {
-    if (dest.IsCoreRegister()) {
-      CHECK(src.IsCoreRegister()) << src;
-      Move(dest.AsCoreRegister(), src.AsCoreRegister());
-    } else if (dest.IsFRegister()) {
-      CHECK(src.IsFRegister()) << src;
-      if (size == kMipsWordSize) {
-        MovS(dest.AsFRegister(), src.AsFRegister());
-      } else {
-        CHECK_EQ(kMipsDoublewordSize, size);
-        MovD(dest.AsFRegister(), src.AsFRegister());
-      }
-    } else if (dest.IsDRegister()) {
-      CHECK(src.IsDRegister()) << src;
-      MovD(dest.AsOverlappingDRegisterLow(), src.AsOverlappingDRegisterLow());
-    } else {
-      CHECK(dest.IsRegisterPair()) << dest;
-      CHECK(src.IsRegisterPair()) << src;
-      // Ensure that the first move doesn't clobber the input of the second.
-      if (src.AsRegisterPairHigh() != dest.AsRegisterPairLow()) {
-        Move(dest.AsRegisterPairLow(), src.AsRegisterPairLow());
-        Move(dest.AsRegisterPairHigh(), src.AsRegisterPairHigh());
-      } else {
-        Move(dest.AsRegisterPairHigh(), src.AsRegisterPairHigh());
-        Move(dest.AsRegisterPairLow(), src.AsRegisterPairLow());
-      }
-    }
-  }
-}
-
-void MipsAssembler::CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) {
-  MipsManagedRegister scratch = mscratch.AsMips();
-  CHECK(scratch.IsCoreRegister()) << scratch;
-  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
-  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
-}
-
-void MipsAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
-                                         ThreadOffset32 thr_offs,
-                                         ManagedRegister mscratch) {
-  MipsManagedRegister scratch = mscratch.AsMips();
-  CHECK(scratch.IsCoreRegister()) << scratch;
-  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
-                 S1, thr_offs.Int32Value());
-  StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
-                SP, fr_offs.Int32Value());
-}
-
-void MipsAssembler::CopyRawPtrToThread(ThreadOffset32 thr_offs,
-                                       FrameOffset fr_offs,
-                                       ManagedRegister mscratch) {
-  MipsManagedRegister scratch = mscratch.AsMips();
-  CHECK(scratch.IsCoreRegister()) << scratch;
-  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
-                 SP, fr_offs.Int32Value());
-  StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
-                S1, thr_offs.Int32Value());
-}
-
-void MipsAssembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) {
-  MipsManagedRegister scratch = mscratch.AsMips();
-  CHECK(scratch.IsCoreRegister()) << scratch;
-  CHECK(size == kMipsWordSize || size == kMipsDoublewordSize) << size;
-  if (size == kMipsWordSize) {
-    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
-    StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
-  } else if (size == kMipsDoublewordSize) {
-    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
-    StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
-    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + kMipsWordSize);
-    StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + kMipsWordSize);
-  }
-}
-
-void MipsAssembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
-                         ManagedRegister mscratch, size_t size) {
-  Register scratch = mscratch.AsMips().AsCoreRegister();
-  CHECK_EQ(size, kMipsWordSize);
-  LoadFromOffset(kLoadWord, scratch, src_base.AsMips().AsCoreRegister(), src_offset.Int32Value());
-  StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value());
-}
-
-void MipsAssembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
-                         ManagedRegister mscratch, size_t size) {
-  Register scratch = mscratch.AsMips().AsCoreRegister();
-  CHECK_EQ(size, kMipsWordSize);
-  LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
-  StoreToOffset(kStoreWord, scratch, dest_base.AsMips().AsCoreRegister(), dest_offset.Int32Value());
-}
-
-void MipsAssembler::Copy(FrameOffset dest ATTRIBUTE_UNUSED,
-                         FrameOffset src_base ATTRIBUTE_UNUSED,
-                         Offset src_offset ATTRIBUTE_UNUSED,
-                         ManagedRegister mscratch ATTRIBUTE_UNUSED,
-                         size_t size ATTRIBUTE_UNUSED) {
-  UNIMPLEMENTED(FATAL) << "no MIPS implementation";
-}
-
-void MipsAssembler::Copy(ManagedRegister dest, Offset dest_offset,
-                         ManagedRegister src, Offset src_offset,
-                         ManagedRegister mscratch, size_t size) {
-  CHECK_EQ(size, kMipsWordSize);
-  Register scratch = mscratch.AsMips().AsCoreRegister();
-  LoadFromOffset(kLoadWord, scratch, src.AsMips().AsCoreRegister(), src_offset.Int32Value());
-  StoreToOffset(kStoreWord, scratch, dest.AsMips().AsCoreRegister(), dest_offset.Int32Value());
-}
-
-void MipsAssembler::Copy(FrameOffset dest ATTRIBUTE_UNUSED,
-                         Offset dest_offset ATTRIBUTE_UNUSED,
-                         FrameOffset src ATTRIBUTE_UNUSED,
-                         Offset src_offset ATTRIBUTE_UNUSED,
-                         ManagedRegister mscratch ATTRIBUTE_UNUSED,
-                         size_t size ATTRIBUTE_UNUSED) {
-  UNIMPLEMENTED(FATAL) << "no MIPS implementation";
-}
-
-void MipsAssembler::MemoryBarrier(ManagedRegister) {
-  // TODO: sync?
-  UNIMPLEMENTED(FATAL) << "no MIPS implementation";
-}
-
-void MipsAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
-                                           FrameOffset handle_scope_offset,
-                                           ManagedRegister min_reg,
-                                           bool null_allowed) {
-  MipsManagedRegister out_reg = mout_reg.AsMips();
-  MipsManagedRegister in_reg = min_reg.AsMips();
-  CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
-  CHECK(out_reg.IsCoreRegister()) << out_reg;
-  if (null_allowed) {
-    MipsLabel null_arg;
-    // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
-    // the address in the handle scope holding the reference.
-    // E.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset).
-    if (in_reg.IsNoRegister()) {
-      LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
-                     SP, handle_scope_offset.Int32Value());
-      in_reg = out_reg;
-    }
-    if (!out_reg.Equals(in_reg)) {
-      LoadConst32(out_reg.AsCoreRegister(), 0);
-    }
-    Beqz(in_reg.AsCoreRegister(), &null_arg);
-    Addiu32(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
-    Bind(&null_arg);
-  } else {
-    Addiu32(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
-  }
-}
-
-void MipsAssembler::CreateHandleScopeEntry(FrameOffset out_off,
-                                           FrameOffset handle_scope_offset,
-                                           ManagedRegister mscratch,
-                                           bool null_allowed) {
-  MipsManagedRegister scratch = mscratch.AsMips();
-  CHECK(scratch.IsCoreRegister()) << scratch;
-  if (null_allowed) {
-    MipsLabel null_arg;
-    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
-    // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
-    // the address in the handle scope holding the reference.
-    // E.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset).
-    Beqz(scratch.AsCoreRegister(), &null_arg);
-    Addiu32(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
-    Bind(&null_arg);
-  } else {
-    Addiu32(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value());
-  }
-  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
-}
-
-// Given a handle scope entry, load the associated reference.
-void MipsAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
-                                                 ManagedRegister min_reg) {
-  MipsManagedRegister out_reg = mout_reg.AsMips();
-  MipsManagedRegister in_reg = min_reg.AsMips();
-  CHECK(out_reg.IsCoreRegister()) << out_reg;
-  CHECK(in_reg.IsCoreRegister()) << in_reg;
-  MipsLabel null_arg;
-  if (!out_reg.Equals(in_reg)) {
-    LoadConst32(out_reg.AsCoreRegister(), 0);
-  }
-  Beqz(in_reg.AsCoreRegister(), &null_arg);
-  LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
-                 in_reg.AsCoreRegister(), 0);
-  Bind(&null_arg);
-}
-
-void MipsAssembler::VerifyObject(ManagedRegister src ATTRIBUTE_UNUSED,
-                                 bool could_be_null ATTRIBUTE_UNUSED) {
-  // TODO: not validating references.
-}
-
-void MipsAssembler::VerifyObject(FrameOffset src ATTRIBUTE_UNUSED,
-                                 bool could_be_null ATTRIBUTE_UNUSED) {
-  // TODO: not validating references.
-}
-
-void MipsAssembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister mscratch) {
-  MipsManagedRegister base = mbase.AsMips();
-  MipsManagedRegister scratch = mscratch.AsMips();
-  CHECK(base.IsCoreRegister()) << base;
-  CHECK(scratch.IsCoreRegister()) << scratch;
-  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
-                 base.AsCoreRegister(), offset.Int32Value());
-  Jalr(scratch.AsCoreRegister());
-  NopIfNoReordering();
-  // TODO: place reference map on call.
-}
-
-void MipsAssembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
-  MipsManagedRegister scratch = mscratch.AsMips();
-  CHECK(scratch.IsCoreRegister()) << scratch;
-  // Call *(*(SP + base) + offset)
-  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, base.Int32Value());
-  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
-                 scratch.AsCoreRegister(), offset.Int32Value());
-  Jalr(scratch.AsCoreRegister());
-  NopIfNoReordering();
-  // TODO: place reference map on call.
-}
-
-void MipsAssembler::CallFromThread(ThreadOffset32 offset ATTRIBUTE_UNUSED,
-                                   ManagedRegister mscratch ATTRIBUTE_UNUSED) {
-  UNIMPLEMENTED(FATAL) << "no mips implementation";
-}
-
-void MipsAssembler::GetCurrentThread(ManagedRegister tr) {
-  Move(tr.AsMips().AsCoreRegister(), S1);
-}
-
-void MipsAssembler::GetCurrentThread(FrameOffset offset,
-                                     ManagedRegister mscratch ATTRIBUTE_UNUSED) {
-  StoreToOffset(kStoreWord, S1, SP, offset.Int32Value());
-}
-
-void MipsAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
-  MipsManagedRegister scratch = mscratch.AsMips();
-  exception_blocks_.emplace_back(scratch, stack_adjust);
-  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
-                 S1, Thread::ExceptionOffset<kMipsPointerSize>().Int32Value());
-  Bnez(scratch.AsCoreRegister(), exception_blocks_.back().Entry());
-}
-
-void MipsAssembler::EmitExceptionPoll(MipsExceptionSlowPath* exception) {
-  Bind(exception->Entry());
-  if (exception->stack_adjust_ != 0) {  // Fix up the frame.
-    DecreaseFrameSize(exception->stack_adjust_);
-  }
-  // Pass exception object as argument.
-  // Don't care about preserving A0 as this call won't return.
-  CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
-  Move(A0, exception->scratch_.AsCoreRegister());
-  // Set up call to Thread::Current()->pDeliverException.
-  LoadFromOffset(kLoadWord, T9, S1,
-    QUICK_ENTRYPOINT_OFFSET(kMipsPointerSize, pDeliverException).Int32Value());
-  Jr(T9);
-  NopIfNoReordering();
-
-  // Call never returns.
-  Break();
-}
-
-}  // namespace mips
-}  // namespace art
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
deleted file mode 100644
index a24071d..0000000
--- a/compiler/utils/mips/assembler_mips.h
+++ /dev/null
@@ -1,1826 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_UTILS_MIPS_ASSEMBLER_MIPS_H_
-#define ART_COMPILER_UTILS_MIPS_ASSEMBLER_MIPS_H_
-
-#include <deque>
-#include <utility>
-#include <vector>
-
-#include "arch/mips/instruction_set_features_mips.h"
-#include "base/arena_containers.h"
-#include "base/enums.h"
-#include "base/globals.h"
-#include "base/macros.h"
-#include "base/stl_util_identity.h"
-#include "constants_mips.h"
-#include "heap_poisoning.h"
-#include "managed_register_mips.h"
-#include "offsets.h"
-#include "utils/assembler.h"
-#include "utils/jni_macro_assembler.h"
-#include "utils/label.h"
-
-namespace art {
-namespace mips {
-
-static constexpr size_t kMipsHalfwordSize = 2;
-static constexpr size_t kMipsWordSize = 4;
-static constexpr size_t kMipsDoublewordSize = 8;
-
-enum LoadOperandType {
-  kLoadSignedByte,
-  kLoadUnsignedByte,
-  kLoadSignedHalfword,
-  kLoadUnsignedHalfword,
-  kLoadWord,
-  kLoadDoubleword,
-  kLoadQuadword
-};
-
-enum StoreOperandType {
-  kStoreByte,
-  kStoreHalfword,
-  kStoreWord,
-  kStoreDoubleword,
-  kStoreQuadword
-};
-
-// Used to test the values returned by ClassS/ClassD.
-enum FPClassMaskType {
-  kSignalingNaN      = 0x001,
-  kQuietNaN          = 0x002,
-  kNegativeInfinity  = 0x004,
-  kNegativeNormal    = 0x008,
-  kNegativeSubnormal = 0x010,
-  kNegativeZero      = 0x020,
-  kPositiveInfinity  = 0x040,
-  kPositiveNormal    = 0x080,
-  kPositiveSubnormal = 0x100,
-  kPositiveZero      = 0x200,
-};
-
-// Instruction description in terms of input and output registers.
-// Used for instruction reordering.
-struct InOutRegMasks {
-  InOutRegMasks()
-      : gpr_outs_(0), gpr_ins_(0), fpr_outs_(0), fpr_ins_(0), cc_outs_(0), cc_ins_(0) {}
-
-  inline InOutRegMasks& GprOuts(Register reg) {
-    gpr_outs_ |= (1u << reg);
-    gpr_outs_ &= ~1u;  // Ignore register ZERO.
-    return *this;
-  }
-  template<typename T, typename... Ts>
-  inline InOutRegMasks& GprOuts(T one, Ts... more) { GprOuts(one); GprOuts(more...); return *this; }
-
-  inline InOutRegMasks& GprIns(Register reg) {
-    gpr_ins_ |= (1u << reg);
-    gpr_ins_ &= ~1u;  // Ignore register ZERO.
-    return *this;
-  }
-  template<typename T, typename... Ts>
-  inline InOutRegMasks& GprIns(T one, Ts... more) { GprIns(one); GprIns(more...); return *this; }
-
-  inline InOutRegMasks& GprInOuts(Register reg) { GprIns(reg); GprOuts(reg); return *this; }
-  template<typename T, typename... Ts>
-  inline InOutRegMasks& GprInOuts(T one, Ts... more) {
-    GprInOuts(one);
-    GprInOuts(more...);
-    return *this;
-  }
-
-  inline InOutRegMasks& FprOuts(FRegister reg) { fpr_outs_ |= (1u << reg); return *this; }
-  inline InOutRegMasks& FprOuts(VectorRegister reg) { return FprOuts(static_cast<FRegister>(reg)); }
-  template<typename T, typename... Ts>
-  inline InOutRegMasks& FprOuts(T one, Ts... more) { FprOuts(one); FprOuts(more...); return *this; }
-
-  inline InOutRegMasks& FprIns(FRegister reg) { fpr_ins_ |= (1u << reg); return *this; }
-  inline InOutRegMasks& FprIns(VectorRegister reg) { return FprIns(static_cast<FRegister>(reg)); }
-  template<typename T, typename... Ts>
-  inline InOutRegMasks& FprIns(T one, Ts... more) { FprIns(one); FprIns(more...); return *this; }
-
-  inline InOutRegMasks& FprInOuts(FRegister reg) { FprIns(reg); FprOuts(reg); return *this; }
-  inline InOutRegMasks& FprInOuts(VectorRegister reg) {
-    return FprInOuts(static_cast<FRegister>(reg));
-  }
-  template<typename T, typename... Ts>
-  inline InOutRegMasks& FprInOuts(T one, Ts... more) {
-    FprInOuts(one);
-    FprInOuts(more...);
-    return *this;
-  }
-
-  inline InOutRegMasks& CcOuts(int cc) { cc_outs_ |= (1u << cc); return *this; }
-  template<typename T, typename... Ts>
-  inline InOutRegMasks& CcOuts(T one, Ts... more) { CcOuts(one); CcOuts(more...); return *this; }
-
-  inline InOutRegMasks& CcIns(int cc) { cc_ins_ |= (1u << cc); return *this; }
-  template<typename T, typename... Ts>
-  inline InOutRegMasks& CcIns(T one, Ts... more) { CcIns(one); CcIns(more...); return *this; }
-
-  // Mask of output GPRs for the instruction.
-  uint32_t gpr_outs_;
-  // Mask of input GPRs for the instruction.
-  uint32_t gpr_ins_;
-  // Mask of output FPRs for the instruction.
-  uint32_t fpr_outs_;
-  // Mask of input FPRs for the instruction.
-  uint32_t fpr_ins_;
-  // Mask of output FPU condition code flags for the instruction.
-  uint32_t cc_outs_;
-  // Mask of input FPU condition code flags for the instruction.
-  uint32_t cc_ins_;
-
-  // TODO: add LO and HI.
-};
-
-class MipsLabel : public Label {
- public:
-  MipsLabel() : prev_branch_id_plus_one_(0) {}
-
-  MipsLabel(MipsLabel&& src)
-      : Label(std::move(src)), prev_branch_id_plus_one_(src.prev_branch_id_plus_one_) {}
-
-  void AdjustBoundPosition(int delta) {
-    CHECK(IsBound());
-    // Bound label's position is negative, hence decrementing it.
-    position_ -= delta;
-  }
-
- private:
-  uint32_t prev_branch_id_plus_one_;  // To get distance from preceding branch, if any.
-
-  friend class MipsAssembler;
-  DISALLOW_COPY_AND_ASSIGN(MipsLabel);
-};
-
-// Assembler literal is a value embedded in code, retrieved using a PC-relative load.
-class Literal {
- public:
-  static constexpr size_t kMaxSize = 8;
-
-  Literal(uint32_t size, const uint8_t* data)
-      : label_(), size_(size) {
-    DCHECK_LE(size, Literal::kMaxSize);
-    memcpy(data_, data, size);
-  }
-
-  template <typename T>
-  T GetValue() const {
-    DCHECK_EQ(size_, sizeof(T));
-    T value;
-    memcpy(&value, data_, sizeof(T));
-    return value;
-  }
-
-  uint32_t GetSize() const {
-    return size_;
-  }
-
-  const uint8_t* GetData() const {
-    return data_;
-  }
-
-  MipsLabel* GetLabel() {
-    return &label_;
-  }
-
-  const MipsLabel* GetLabel() const {
-    return &label_;
-  }
-
- private:
-  MipsLabel label_;
-  const uint32_t size_;
-  uint8_t data_[kMaxSize];
-
-  DISALLOW_COPY_AND_ASSIGN(Literal);
-};
-
-// Jump table: table of labels emitted after the literals. Similar to literals.
-class JumpTable {
- public:
-  explicit JumpTable(std::vector<MipsLabel*>&& labels)
-      : label_(), labels_(std::move(labels)) {
-  }
-
-  uint32_t GetSize() const {
-    return static_cast<uint32_t>(labels_.size()) * sizeof(uint32_t);
-  }
-
-  const std::vector<MipsLabel*>& GetData() const {
-    return labels_;
-  }
-
-  MipsLabel* GetLabel() {
-    return &label_;
-  }
-
-  const MipsLabel* GetLabel() const {
-    return &label_;
-  }
-
- private:
-  MipsLabel label_;
-  std::vector<MipsLabel*> labels_;
-
-  DISALLOW_COPY_AND_ASSIGN(JumpTable);
-};
-
-// Slowpath entered when Thread::Current()->_exception is non-null.
-class MipsExceptionSlowPath {
- public:
-  explicit MipsExceptionSlowPath(MipsManagedRegister scratch, size_t stack_adjust)
-      : scratch_(scratch), stack_adjust_(stack_adjust) {}
-
-  MipsExceptionSlowPath(MipsExceptionSlowPath&& src)
-      : scratch_(src.scratch_),
-        stack_adjust_(src.stack_adjust_),
-        exception_entry_(std::move(src.exception_entry_)) {}
-
- private:
-  MipsLabel* Entry() { return &exception_entry_; }
-  const MipsManagedRegister scratch_;
-  const size_t stack_adjust_;
-  MipsLabel exception_entry_;
-
-  friend class MipsAssembler;
-  DISALLOW_COPY_AND_ASSIGN(MipsExceptionSlowPath);
-};
-
-class MipsAssembler final : public Assembler, public JNIMacroAssembler<PointerSize::k32> {
- public:
-  using JNIBase = JNIMacroAssembler<PointerSize::k32>;
-
-  explicit MipsAssembler(ArenaAllocator* allocator,
-                         const MipsInstructionSetFeatures* instruction_set_features = nullptr)
-      : Assembler(allocator),
-        overwriting_(false),
-        overwrite_location_(0),
-        reordering_(true),
-        ds_fsm_state_(kExpectingLabel),
-        ds_fsm_target_pc_(0),
-        literals_(allocator->Adapter(kArenaAllocAssembler)),
-        jump_tables_(allocator->Adapter(kArenaAllocAssembler)),
-        last_position_adjustment_(0),
-        last_old_position_(0),
-        last_branch_id_(0),
-        has_msa_(instruction_set_features != nullptr ? instruction_set_features->HasMsa() : false),
-        isa_features_(instruction_set_features) {
-    cfi().DelayEmittingAdvancePCs();
-  }
-
-  size_t CodeSize() const override { return Assembler::CodeSize(); }
-  size_t CodePosition() override;
-  DebugFrameOpCodeWriterForAssembler& cfi() override { return Assembler::cfi(); }
-
-  virtual ~MipsAssembler() {
-    for (auto& branch : branches_) {
-      CHECK(branch.IsResolved());
-    }
-  }
-
-  // Emit Machine Instructions.
-  void Addu(Register rd, Register rs, Register rt);
-  void Addiu(Register rt, Register rs, uint16_t imm16, MipsLabel* patcher_label);
-  void Addiu(Register rt, Register rs, uint16_t imm16);
-  void Subu(Register rd, Register rs, Register rt);
-
-  void MultR2(Register rs, Register rt);  // R2
-  void MultuR2(Register rs, Register rt);  // R2
-  void DivR2(Register rs, Register rt);  // R2
-  void DivuR2(Register rs, Register rt);  // R2
-  void MulR2(Register rd, Register rs, Register rt);  // R2
-  void DivR2(Register rd, Register rs, Register rt);  // R2
-  void ModR2(Register rd, Register rs, Register rt);  // R2
-  void DivuR2(Register rd, Register rs, Register rt);  // R2
-  void ModuR2(Register rd, Register rs, Register rt);  // R2
-  void MulR6(Register rd, Register rs, Register rt);  // R6
-  void MuhR6(Register rd, Register rs, Register rt);  // R6
-  void MuhuR6(Register rd, Register rs, Register rt);  // R6
-  void DivR6(Register rd, Register rs, Register rt);  // R6
-  void ModR6(Register rd, Register rs, Register rt);  // R6
-  void DivuR6(Register rd, Register rs, Register rt);  // R6
-  void ModuR6(Register rd, Register rs, Register rt);  // R6
-
-  void And(Register rd, Register rs, Register rt);
-  void Andi(Register rt, Register rs, uint16_t imm16);
-  void Or(Register rd, Register rs, Register rt);
-  void Ori(Register rt, Register rs, uint16_t imm16);
-  void Xor(Register rd, Register rs, Register rt);
-  void Xori(Register rt, Register rs, uint16_t imm16);
-  void Nor(Register rd, Register rs, Register rt);
-
-  void Movz(Register rd, Register rs, Register rt);  // R2
-  void Movn(Register rd, Register rs, Register rt);  // R2
-  void Seleqz(Register rd, Register rs, Register rt);  // R6
-  void Selnez(Register rd, Register rs, Register rt);  // R6
-  void ClzR6(Register rd, Register rs);
-  void ClzR2(Register rd, Register rs);
-  void CloR6(Register rd, Register rs);
-  void CloR2(Register rd, Register rs);
-
-  void Seb(Register rd, Register rt);  // R2+
-  void Seh(Register rd, Register rt);  // R2+
-  void Wsbh(Register rd, Register rt);  // R2+
-  void Bitswap(Register rd, Register rt);  // R6
-
-  void Sll(Register rd, Register rt, int shamt);
-  void Srl(Register rd, Register rt, int shamt);
-  void Rotr(Register rd, Register rt, int shamt);  // R2+
-  void Sra(Register rd, Register rt, int shamt);
-  void Sllv(Register rd, Register rt, Register rs);
-  void Srlv(Register rd, Register rt, Register rs);
-  void Rotrv(Register rd, Register rt, Register rs);  // R2+
-  void Srav(Register rd, Register rt, Register rs);
-  void Ext(Register rd, Register rt, int pos, int size);  // R2+
-  void Ins(Register rd, Register rt, int pos, int size);  // R2+
-  void Lsa(Register rd, Register rs, Register rt, int saPlusOne);  // R6
-  void ShiftAndAdd(Register dst, Register src_idx, Register src_base, int shamt, Register tmp = AT);
-
-  void Lb(Register rt, Register rs, uint16_t imm16);
-  void Lh(Register rt, Register rs, uint16_t imm16);
-  void Lw(Register rt, Register rs, uint16_t imm16, MipsLabel* patcher_label);
-  void Lw(Register rt, Register rs, uint16_t imm16);
-  void Lwl(Register rt, Register rs, uint16_t imm16);
-  void Lwr(Register rt, Register rs, uint16_t imm16);
-  void Lbu(Register rt, Register rs, uint16_t imm16);
-  void Lhu(Register rt, Register rs, uint16_t imm16);
-  void Lwpc(Register rs, uint32_t imm19);  // R6
-  void Lui(Register rt, uint16_t imm16);
-  void Aui(Register rt, Register rs, uint16_t imm16);  // R6
-  void AddUpper(Register rt, Register rs, uint16_t imm16, Register tmp = AT);
-  void Sync(uint32_t stype);
-  void Mfhi(Register rd);  // R2
-  void Mflo(Register rd);  // R2
-
-  void Sb(Register rt, Register rs, uint16_t imm16);
-  void Sh(Register rt, Register rs, uint16_t imm16);
-  void Sw(Register rt, Register rs, uint16_t imm16, MipsLabel* patcher_label);
-  void Sw(Register rt, Register rs, uint16_t imm16);
-  void Swl(Register rt, Register rs, uint16_t imm16);
-  void Swr(Register rt, Register rs, uint16_t imm16);
-
-  void LlR2(Register rt, Register base, int16_t imm16 = 0);
-  void ScR2(Register rt, Register base, int16_t imm16 = 0);
-  void LlR6(Register rt, Register base, int16_t imm9 = 0);
-  void ScR6(Register rt, Register base, int16_t imm9 = 0);
-
-  void Slt(Register rd, Register rs, Register rt);
-  void Sltu(Register rd, Register rs, Register rt);
-  void Slti(Register rt, Register rs, uint16_t imm16);
-  void Sltiu(Register rt, Register rs, uint16_t imm16);
-
-  // Branches and jumps to immediate offsets/addresses do not take care of their
-  // delay/forbidden slots and generally should not be used directly. This applies
-  // to the following R2 and R6 branch/jump instructions with imm16, imm21, addr26
-  // offsets/addresses.
-  // Use branches/jumps to labels instead.
-  void B(uint16_t imm16);
-  void Bal(uint16_t imm16);
-  void Beq(Register rs, Register rt, uint16_t imm16);
-  void Bne(Register rs, Register rt, uint16_t imm16);
-  void Beqz(Register rt, uint16_t imm16);
-  void Bnez(Register rt, uint16_t imm16);
-  void Bltz(Register rt, uint16_t imm16);
-  void Bgez(Register rt, uint16_t imm16);
-  void Blez(Register rt, uint16_t imm16);
-  void Bgtz(Register rt, uint16_t imm16);
-  void Bc1f(uint16_t imm16);  // R2
-  void Bc1f(int cc, uint16_t imm16);  // R2
-  void Bc1t(uint16_t imm16);  // R2
-  void Bc1t(int cc, uint16_t imm16);  // R2
-  void J(uint32_t addr26);
-  void Jal(uint32_t addr26);
-  // Jalr() and Jr() fill their delay slots when reordering is enabled.
-  // When reordering is disabled, the delay slots must be filled manually.
-  // You may use NopIfNoReordering() to fill them when reordering is disabled.
-  void Jalr(Register rd, Register rs);
-  void Jalr(Register rs);
-  void Jr(Register rs);
-  // Nal() does not fill its delay slot. It must be filled manually.
-  void Nal();
-  void Auipc(Register rs, uint16_t imm16);  // R6
-  void Addiupc(Register rs, uint32_t imm19);  // R6
-  void Bc(uint32_t imm26);  // R6
-  void Balc(uint32_t imm26);  // R6
-  void Jic(Register rt, uint16_t imm16);  // R6
-  void Jialc(Register rt, uint16_t imm16);  // R6
-  void Bltc(Register rs, Register rt, uint16_t imm16);  // R6
-  void Bltzc(Register rt, uint16_t imm16);  // R6
-  void Bgtzc(Register rt, uint16_t imm16);  // R6
-  void Bgec(Register rs, Register rt, uint16_t imm16);  // R6
-  void Bgezc(Register rt, uint16_t imm16);  // R6
-  void Blezc(Register rt, uint16_t imm16);  // R6
-  void Bltuc(Register rs, Register rt, uint16_t imm16);  // R6
-  void Bgeuc(Register rs, Register rt, uint16_t imm16);  // R6
-  void Beqc(Register rs, Register rt, uint16_t imm16);  // R6
-  void Bnec(Register rs, Register rt, uint16_t imm16);  // R6
-  void Beqzc(Register rs, uint32_t imm21);  // R6
-  void Bnezc(Register rs, uint32_t imm21);  // R6
-  void Bc1eqz(FRegister ft, uint16_t imm16);  // R6
-  void Bc1nez(FRegister ft, uint16_t imm16);  // R6
-
-  void AddS(FRegister fd, FRegister fs, FRegister ft);
-  void SubS(FRegister fd, FRegister fs, FRegister ft);
-  void MulS(FRegister fd, FRegister fs, FRegister ft);
-  void DivS(FRegister fd, FRegister fs, FRegister ft);
-  void AddD(FRegister fd, FRegister fs, FRegister ft);
-  void SubD(FRegister fd, FRegister fs, FRegister ft);
-  void MulD(FRegister fd, FRegister fs, FRegister ft);
-  void DivD(FRegister fd, FRegister fs, FRegister ft);
-  void SqrtS(FRegister fd, FRegister fs);
-  void SqrtD(FRegister fd, FRegister fs);
-  void AbsS(FRegister fd, FRegister fs);
-  void AbsD(FRegister fd, FRegister fs);
-  void MovS(FRegister fd, FRegister fs);
-  void MovD(FRegister fd, FRegister fs);
-  void NegS(FRegister fd, FRegister fs);
-  void NegD(FRegister fd, FRegister fs);
-
-  void CunS(FRegister fs, FRegister ft);  // R2
-  void CunS(int cc, FRegister fs, FRegister ft);  // R2
-  void CeqS(FRegister fs, FRegister ft);  // R2
-  void CeqS(int cc, FRegister fs, FRegister ft);  // R2
-  void CueqS(FRegister fs, FRegister ft);  // R2
-  void CueqS(int cc, FRegister fs, FRegister ft);  // R2
-  void ColtS(FRegister fs, FRegister ft);  // R2
-  void ColtS(int cc, FRegister fs, FRegister ft);  // R2
-  void CultS(FRegister fs, FRegister ft);  // R2
-  void CultS(int cc, FRegister fs, FRegister ft);  // R2
-  void ColeS(FRegister fs, FRegister ft);  // R2
-  void ColeS(int cc, FRegister fs, FRegister ft);  // R2
-  void CuleS(FRegister fs, FRegister ft);  // R2
-  void CuleS(int cc, FRegister fs, FRegister ft);  // R2
-  void CunD(FRegister fs, FRegister ft);  // R2
-  void CunD(int cc, FRegister fs, FRegister ft);  // R2
-  void CeqD(FRegister fs, FRegister ft);  // R2
-  void CeqD(int cc, FRegister fs, FRegister ft);  // R2
-  void CueqD(FRegister fs, FRegister ft);  // R2
-  void CueqD(int cc, FRegister fs, FRegister ft);  // R2
-  void ColtD(FRegister fs, FRegister ft);  // R2
-  void ColtD(int cc, FRegister fs, FRegister ft);  // R2
-  void CultD(FRegister fs, FRegister ft);  // R2
-  void CultD(int cc, FRegister fs, FRegister ft);  // R2
-  void ColeD(FRegister fs, FRegister ft);  // R2
-  void ColeD(int cc, FRegister fs, FRegister ft);  // R2
-  void CuleD(FRegister fs, FRegister ft);  // R2
-  void CuleD(int cc, FRegister fs, FRegister ft);  // R2
-  void CmpUnS(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void CmpEqS(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void CmpUeqS(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void CmpLtS(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void CmpUltS(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void CmpLeS(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void CmpUleS(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void CmpOrS(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void CmpUneS(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void CmpNeS(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void CmpUnD(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void CmpEqD(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void CmpUeqD(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void CmpLtD(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void CmpUltD(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void CmpLeD(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void CmpUleD(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void CmpOrD(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void CmpUneD(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void CmpNeD(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void Movf(Register rd, Register rs, int cc = 0);  // R2
-  void Movt(Register rd, Register rs, int cc = 0);  // R2
-  void MovfS(FRegister fd, FRegister fs, int cc = 0);  // R2
-  void MovfD(FRegister fd, FRegister fs, int cc = 0);  // R2
-  void MovtS(FRegister fd, FRegister fs, int cc = 0);  // R2
-  void MovtD(FRegister fd, FRegister fs, int cc = 0);  // R2
-  void MovzS(FRegister fd, FRegister fs, Register rt);  // R2
-  void MovzD(FRegister fd, FRegister fs, Register rt);  // R2
-  void MovnS(FRegister fd, FRegister fs, Register rt);  // R2
-  void MovnD(FRegister fd, FRegister fs, Register rt);  // R2
-  void SelS(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void SelD(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void SeleqzS(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void SeleqzD(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void SelnezS(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void SelnezD(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void ClassS(FRegister fd, FRegister fs);  // R6
-  void ClassD(FRegister fd, FRegister fs);  // R6
-  void MinS(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void MinD(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void MaxS(FRegister fd, FRegister fs, FRegister ft);  // R6
-  void MaxD(FRegister fd, FRegister fs, FRegister ft);  // R6
-
-  void TruncLS(FRegister fd, FRegister fs);  // R2+, FR=1
-  void TruncLD(FRegister fd, FRegister fs);  // R2+, FR=1
-  void TruncWS(FRegister fd, FRegister fs);
-  void TruncWD(FRegister fd, FRegister fs);
-  void Cvtsw(FRegister fd, FRegister fs);
-  void Cvtdw(FRegister fd, FRegister fs);
-  void Cvtsd(FRegister fd, FRegister fs);
-  void Cvtds(FRegister fd, FRegister fs);
-  void Cvtsl(FRegister fd, FRegister fs);  // R2+, FR=1
-  void Cvtdl(FRegister fd, FRegister fs);  // R2+, FR=1
-  void FloorWS(FRegister fd, FRegister fs);
-  void FloorWD(FRegister fd, FRegister fs);
-
-  // Note, the 32 LSBs of a 64-bit value must be loaded into an FPR before the 32 MSBs
-  // when loading the value as 32-bit halves. This applies to all 32-bit FPR loads:
-  // Mtc1(), Mthc1(), MoveToFpuHigh(), Lwc1(). Even if you need two Mtc1()'s or two
-  // Lwc1()'s to load a pair of 32-bit FPRs and these loads do not interfere with one
-  // another (unlike Mtc1() and Mthc1() with 64-bit FPRs), maintain the order:
-  // low then high.
-  //
-  // Also, prefer MoveFromFpuHigh()/MoveToFpuHigh() over Mfhc1()/Mthc1() and Mfc1()/Mtc1().
-  // This will save you some if statements.
-  FRegister GetFpuRegLow(FRegister reg);
-  void Mfc1(Register rt, FRegister fs);
-  void Mtc1(Register rt, FRegister fs);
-  void Mfhc1(Register rt, FRegister fs);
-  void Mthc1(Register rt, FRegister fs);
-  void MoveFromFpuHigh(Register rt, FRegister fs);
-  void MoveToFpuHigh(Register rt, FRegister fs);
-  void Lwc1(FRegister ft, Register rs, uint16_t imm16);
-  void Ldc1(FRegister ft, Register rs, uint16_t imm16);
-  void Swc1(FRegister ft, Register rs, uint16_t imm16);
-  void Sdc1(FRegister ft, Register rs, uint16_t imm16);
-
-  void Break();
-  void Nop();
-  void NopIfNoReordering();
-  void Move(Register rd, Register rs);
-  void Clear(Register rd);
-  void Not(Register rd, Register rs);
-
-  // MSA instructions.
-  void AndV(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void OrV(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void NorV(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void XorV(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-
-  void AddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void AddvH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void AddvW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void AddvD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SubvB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SubvH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SubvW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SubvD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Asub_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Asub_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Asub_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Asub_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Asub_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Asub_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Asub_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Asub_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void MulvB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void MulvH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void MulvW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void MulvD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Div_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Div_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Div_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Div_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Div_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Div_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Div_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Div_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Mod_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Mod_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Mod_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Mod_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Mod_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Mod_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Mod_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Mod_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Add_aB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Add_aH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Add_aW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Add_aD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Ave_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Ave_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Ave_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Ave_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Ave_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Ave_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Ave_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Ave_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Aver_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Aver_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Aver_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Aver_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Aver_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Aver_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Aver_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Aver_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Max_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Max_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Max_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Max_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Max_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Max_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Max_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Max_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Min_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Min_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Min_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Min_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Min_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Min_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Min_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Min_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-
-  void FaddW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FaddD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FsubW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FsubD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FmulW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FmulD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FdivW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FdivD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FmaxW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FmaxD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FminW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FminD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-
-  void Ffint_sW(VectorRegister wd, VectorRegister ws);
-  void Ffint_sD(VectorRegister wd, VectorRegister ws);
-  void Ftint_sW(VectorRegister wd, VectorRegister ws);
-  void Ftint_sD(VectorRegister wd, VectorRegister ws);
-
-  void SllB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SllH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SllW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SllD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SraB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SraH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SraW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SraD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SrlB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SrlH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SrlW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SrlD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-
-  // Immediate shift instructions, where shamtN denotes shift amount (must be between 0 and 2^N-1).
-  void SlliB(VectorRegister wd, VectorRegister ws, int shamt3);
-  void SlliH(VectorRegister wd, VectorRegister ws, int shamt4);
-  void SlliW(VectorRegister wd, VectorRegister ws, int shamt5);
-  void SlliD(VectorRegister wd, VectorRegister ws, int shamt6);
-  void SraiB(VectorRegister wd, VectorRegister ws, int shamt3);
-  void SraiH(VectorRegister wd, VectorRegister ws, int shamt4);
-  void SraiW(VectorRegister wd, VectorRegister ws, int shamt5);
-  void SraiD(VectorRegister wd, VectorRegister ws, int shamt6);
-  void SrliB(VectorRegister wd, VectorRegister ws, int shamt3);
-  void SrliH(VectorRegister wd, VectorRegister ws, int shamt4);
-  void SrliW(VectorRegister wd, VectorRegister ws, int shamt5);
-  void SrliD(VectorRegister wd, VectorRegister ws, int shamt6);
-
-  void MoveV(VectorRegister wd, VectorRegister ws);
-  void SplatiB(VectorRegister wd, VectorRegister ws, int n4);
-  void SplatiH(VectorRegister wd, VectorRegister ws, int n3);
-  void SplatiW(VectorRegister wd, VectorRegister ws, int n2);
-  void SplatiD(VectorRegister wd, VectorRegister ws, int n1);
-  void Copy_sB(Register rd, VectorRegister ws, int n4);
-  void Copy_sH(Register rd, VectorRegister ws, int n3);
-  void Copy_sW(Register rd, VectorRegister ws, int n2);
-  void Copy_uB(Register rd, VectorRegister ws, int n4);
-  void Copy_uH(Register rd, VectorRegister ws, int n3);
-  void InsertB(VectorRegister wd, Register rs, int n4);
-  void InsertH(VectorRegister wd, Register rs, int n3);
-  void InsertW(VectorRegister wd, Register rs, int n2);
-  void FillB(VectorRegister wd, Register rs);
-  void FillH(VectorRegister wd, Register rs);
-  void FillW(VectorRegister wd, Register rs);
-
-  void LdiB(VectorRegister wd, int imm8);
-  void LdiH(VectorRegister wd, int imm10);
-  void LdiW(VectorRegister wd, int imm10);
-  void LdiD(VectorRegister wd, int imm10);
-  void LdB(VectorRegister wd, Register rs, int offset);
-  void LdH(VectorRegister wd, Register rs, int offset);
-  void LdW(VectorRegister wd, Register rs, int offset);
-  void LdD(VectorRegister wd, Register rs, int offset);
-  void StB(VectorRegister wd, Register rs, int offset);
-  void StH(VectorRegister wd, Register rs, int offset);
-  void StW(VectorRegister wd, Register rs, int offset);
-  void StD(VectorRegister wd, Register rs, int offset);
-
-  void IlvlB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvlH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvlW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvlD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvrB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvrH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvrW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvrD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvevB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvevH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvevW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvevD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvodB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvodH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvodW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvodD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-
-  void MaddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void MaddvH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void MaddvW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void MaddvD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void MsubvB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void MsubvH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void MsubvW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void MsubvD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FmaddW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FmaddD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FmsubW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FmsubD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-
-  void Hadd_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Hadd_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Hadd_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Hadd_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Hadd_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Hadd_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-
-  void PcntB(VectorRegister wd, VectorRegister ws);
-  void PcntH(VectorRegister wd, VectorRegister ws);
-  void PcntW(VectorRegister wd, VectorRegister ws);
-  void PcntD(VectorRegister wd, VectorRegister ws);
-
-  // Helper for replicating floating point value in all destination elements.
-  void ReplicateFPToVectorRegister(VectorRegister dst, FRegister src, bool is_double);
-
-  // Higher level composite instructions.
-  void LoadConst32(Register rd, int32_t value);
-  void LoadConst64(Register reg_hi, Register reg_lo, int64_t value);
-  void LoadDConst64(FRegister rd, int64_t value, Register temp);
-  void LoadSConst32(FRegister r, int32_t value, Register temp);
-  void Addiu32(Register rt, Register rs, int32_t value, Register rtmp = AT);
-
-  void Bind(MipsLabel* label);
-  // When `is_bare` is false, the branches will promote to long (if the range
-  // of the individual branch instruction is insufficient) and the delay/
-  // forbidden slots will be taken care of.
-  // Use `is_bare = false` when the branch target may be out of reach of the
-  // individual branch instruction. IOW, this is for general purpose use.
-  //
-  // When `is_bare` is true, just the branch instructions will be generated
-  // leaving delay/forbidden slot filling up to the caller and the branches
-  // won't promote to long if the range is insufficient (you'll get a
-  // compilation error when the range is exceeded).
-  // Use `is_bare = true` when the branch target is known to be within reach
-  // of the individual branch instruction. This is intended for small local
-  // optimizations around delay/forbidden slots.
-  // Also prefer using `is_bare = true` if the code near the branch is to be
-  // patched or analyzed at run time (e.g. introspection) to
-  // - show the intent and
-  // - fail during compilation rather than during patching/execution if the
-  //   bare branch range is insufficent but the code size and layout are
-  //   expected to remain unchanged
-  //
-  // R2 branches with delay slots that are also available on R6.
-  // On R6 when `is_bare` is false these convert to equivalent R6 compact
-  // branches (to reduce code size). On R2 or when `is_bare` is true they
-  // remain R2 branches with delay slots.
-  void B(MipsLabel* label, bool is_bare = false);
-  void Bal(MipsLabel* label, bool is_bare = false);
-  void Beq(Register rs, Register rt, MipsLabel* label, bool is_bare = false);
-  void Bne(Register rs, Register rt, MipsLabel* label, bool is_bare = false);
-  void Beqz(Register rt, MipsLabel* label, bool is_bare = false);
-  void Bnez(Register rt, MipsLabel* label, bool is_bare = false);
-  void Bltz(Register rt, MipsLabel* label, bool is_bare = false);
-  void Bgez(Register rt, MipsLabel* label, bool is_bare = false);
-  void Blez(Register rt, MipsLabel* label, bool is_bare = false);
-  void Bgtz(Register rt, MipsLabel* label, bool is_bare = false);
-  void Blt(Register rs, Register rt, MipsLabel* label, bool is_bare = false);
-  void Bge(Register rs, Register rt, MipsLabel* label, bool is_bare = false);
-  void Bltu(Register rs, Register rt, MipsLabel* label, bool is_bare = false);
-  void Bgeu(Register rs, Register rt, MipsLabel* label, bool is_bare = false);
-  // R2-only branches with delay slots.
-  void Bc1f(MipsLabel* label, bool is_bare = false);  // R2
-  void Bc1f(int cc, MipsLabel* label, bool is_bare = false);  // R2
-  void Bc1t(MipsLabel* label, bool is_bare = false);  // R2
-  void Bc1t(int cc, MipsLabel* label, bool is_bare = false);  // R2
-  // R6-only compact branches without delay/forbidden slots.
-  void Bc(MipsLabel* label, bool is_bare = false);  // R6
-  void Balc(MipsLabel* label, bool is_bare = false);  // R6
-  // R6-only compact branches with forbidden slots.
-  void Beqc(Register rs, Register rt, MipsLabel* label, bool is_bare = false);  // R6
-  void Bnec(Register rs, Register rt, MipsLabel* label, bool is_bare = false);  // R6
-  void Beqzc(Register rt, MipsLabel* label, bool is_bare = false);  // R6
-  void Bnezc(Register rt, MipsLabel* label, bool is_bare = false);  // R6
-  void Bltzc(Register rt, MipsLabel* label, bool is_bare = false);  // R6
-  void Bgezc(Register rt, MipsLabel* label, bool is_bare = false);  // R6
-  void Blezc(Register rt, MipsLabel* label, bool is_bare = false);  // R6
-  void Bgtzc(Register rt, MipsLabel* label, bool is_bare = false);  // R6
-  void Bltc(Register rs, Register rt, MipsLabel* label, bool is_bare = false);  // R6
-  void Bgec(Register rs, Register rt, MipsLabel* label, bool is_bare = false);  // R6
-  void Bltuc(Register rs, Register rt, MipsLabel* label, bool is_bare = false);  // R6
-  void Bgeuc(Register rs, Register rt, MipsLabel* label, bool is_bare = false);  // R6
-  // R6-only branches with delay slots.
-  void Bc1eqz(FRegister ft, MipsLabel* label, bool is_bare = false);  // R6
-  void Bc1nez(FRegister ft, MipsLabel* label, bool is_bare = false);  // R6
-
-  void EmitLoad(ManagedRegister m_dst, Register src_register, int32_t src_offset, size_t size);
-  void AdjustBaseAndOffset(Register& base,
-                           int32_t& offset,
-                           bool is_doubleword,
-                           bool is_float = false);
-  void AdjustBaseOffsetAndElementSizeShift(Register& base,
-                                           int32_t& offset,
-                                           int& element_size_shift);
-
- private:
-  // This will be used as an argument for loads/stores
-  // when there is no need for implicit null checks.
-  struct NoImplicitNullChecker {
-    void operator()() const {}
-  };
-
- public:
-  template <typename ImplicitNullChecker = NoImplicitNullChecker>
-  void StoreConstToOffset(StoreOperandType type,
-                          int64_t value,
-                          Register base,
-                          int32_t offset,
-                          Register temp,
-                          ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
-    // We permit `base` and `temp` to coincide (however, we check that neither is AT),
-    // in which case the `base` register may be overwritten in the process.
-    CHECK_NE(temp, AT);  // Must not use AT as temp, so as not to overwrite the adjusted base.
-    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kStoreDoubleword));
-    uint32_t low = Low32Bits(value);
-    uint32_t high = High32Bits(value);
-    Register reg;
-    // If the adjustment left `base` unchanged and equal to `temp`, we can't use `temp`
-    // to load and hold the value but we can use AT instead as AT hasn't been used yet.
-    // Otherwise, `temp` can be used for the value. And if `temp` is the same as the
-    // original `base` (that is, `base` prior to the adjustment), the original `base`
-    // register will be overwritten.
-    if (base == temp) {
-      temp = AT;
-    }
-    if (low == 0) {
-      reg = ZERO;
-    } else {
-      reg = temp;
-      LoadConst32(reg, low);
-    }
-    switch (type) {
-      case kStoreByte:
-        Sb(reg, base, offset);
-        break;
-      case kStoreHalfword:
-        Sh(reg, base, offset);
-        break;
-      case kStoreWord:
-        Sw(reg, base, offset);
-        break;
-      case kStoreDoubleword:
-        Sw(reg, base, offset);
-        null_checker();
-        if (high == 0) {
-          reg = ZERO;
-        } else {
-          reg = temp;
-          if (high != low) {
-            LoadConst32(reg, high);
-          }
-        }
-        Sw(reg, base, offset + kMipsWordSize);
-        break;
-      default:
-        LOG(FATAL) << "UNREACHABLE";
-    }
-    if (type != kStoreDoubleword) {
-      null_checker();
-    }
-  }
-
-  template <typename ImplicitNullChecker = NoImplicitNullChecker>
-  void LoadFromOffset(LoadOperandType type,
-                      Register reg,
-                      Register base,
-                      int32_t offset,
-                      ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
-    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kLoadDoubleword));
-    switch (type) {
-      case kLoadSignedByte:
-        Lb(reg, base, offset);
-        break;
-      case kLoadUnsignedByte:
-        Lbu(reg, base, offset);
-        break;
-      case kLoadSignedHalfword:
-        Lh(reg, base, offset);
-        break;
-      case kLoadUnsignedHalfword:
-        Lhu(reg, base, offset);
-        break;
-      case kLoadWord:
-        Lw(reg, base, offset);
-        break;
-      case kLoadDoubleword:
-        if (reg == base) {
-          // This will clobber the base when loading the lower register. Since we have to load the
-          // higher register as well, this will fail. Solution: reverse the order.
-          Lw(static_cast<Register>(reg + 1), base, offset + kMipsWordSize);
-          null_checker();
-          Lw(reg, base, offset);
-        } else {
-          Lw(reg, base, offset);
-          null_checker();
-          Lw(static_cast<Register>(reg + 1), base, offset + kMipsWordSize);
-        }
-        break;
-      default:
-        LOG(FATAL) << "UNREACHABLE";
-    }
-    if (type != kLoadDoubleword) {
-      null_checker();
-    }
-  }
-
-  template <typename ImplicitNullChecker = NoImplicitNullChecker>
-  void LoadSFromOffset(FRegister reg,
-                       Register base,
-                       int32_t offset,
-                       ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
-    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ false, /* is_float= */ true);
-    Lwc1(reg, base, offset);
-    null_checker();
-  }
-
-  template <typename ImplicitNullChecker = NoImplicitNullChecker>
-  void LoadDFromOffset(FRegister reg,
-                       Register base,
-                       int32_t offset,
-                       ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
-    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ true, /* is_float= */ true);
-    if (IsAligned<kMipsDoublewordSize>(offset)) {
-      Ldc1(reg, base, offset);
-      null_checker();
-    } else {
-      if (Is32BitFPU()) {
-        Lwc1(reg, base, offset);
-        null_checker();
-        Lwc1(static_cast<FRegister>(reg + 1), base, offset + kMipsWordSize);
-      } else {
-        // 64-bit FPU.
-        Lwc1(reg, base, offset);
-        null_checker();
-        Lw(T8, base, offset + kMipsWordSize);
-        Mthc1(T8, reg);
-      }
-    }
-  }
-
-  template <typename ImplicitNullChecker = NoImplicitNullChecker>
-  void LoadQFromOffset(FRegister reg,
-                       Register base,
-                       int32_t offset,
-                       ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
-    int element_size_shift = -1;
-    AdjustBaseOffsetAndElementSizeShift(base, offset, element_size_shift);
-    switch (element_size_shift) {
-      case TIMES_1: LdB(static_cast<VectorRegister>(reg), base, offset); break;
-      case TIMES_2: LdH(static_cast<VectorRegister>(reg), base, offset); break;
-      case TIMES_4: LdW(static_cast<VectorRegister>(reg), base, offset); break;
-      case TIMES_8: LdD(static_cast<VectorRegister>(reg), base, offset); break;
-      default:
-        LOG(FATAL) << "UNREACHABLE";
-    }
-    null_checker();
-  }
-
-  template <typename ImplicitNullChecker = NoImplicitNullChecker>
-  void StoreToOffset(StoreOperandType type,
-                     Register reg,
-                     Register base,
-                     int32_t offset,
-                     ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
-    // Must not use AT as `reg`, so as not to overwrite the value being stored
-    // with the adjusted `base`.
-    CHECK_NE(reg, AT);
-    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kStoreDoubleword));
-    switch (type) {
-      case kStoreByte:
-        Sb(reg, base, offset);
-        break;
-      case kStoreHalfword:
-        Sh(reg, base, offset);
-        break;
-      case kStoreWord:
-        Sw(reg, base, offset);
-        break;
-      case kStoreDoubleword:
-        CHECK_NE(reg, base);
-        CHECK_NE(static_cast<Register>(reg + 1), base);
-        Sw(reg, base, offset);
-        null_checker();
-        Sw(static_cast<Register>(reg + 1), base, offset + kMipsWordSize);
-        break;
-      default:
-        LOG(FATAL) << "UNREACHABLE";
-    }
-    if (type != kStoreDoubleword) {
-      null_checker();
-    }
-  }
-
-  template <typename ImplicitNullChecker = NoImplicitNullChecker>
-  void StoreSToOffset(FRegister reg,
-                      Register base,
-                      int32_t offset,
-                      ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
-    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ false, /* is_float= */ true);
-    Swc1(reg, base, offset);
-    null_checker();
-  }
-
-  template <typename ImplicitNullChecker = NoImplicitNullChecker>
-  void StoreDToOffset(FRegister reg,
-                      Register base,
-                      int32_t offset,
-                      ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
-    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ true, /* is_float= */ true);
-    if (IsAligned<kMipsDoublewordSize>(offset)) {
-      Sdc1(reg, base, offset);
-      null_checker();
-    } else {
-      if (Is32BitFPU()) {
-        Swc1(reg, base, offset);
-        null_checker();
-        Swc1(static_cast<FRegister>(reg + 1), base, offset + kMipsWordSize);
-      } else {
-        // 64-bit FPU.
-        Mfhc1(T8, reg);
-        Swc1(reg, base, offset);
-        null_checker();
-        Sw(T8, base, offset + kMipsWordSize);
-      }
-    }
-  }
-
-  template <typename ImplicitNullChecker = NoImplicitNullChecker>
-  void StoreQToOffset(FRegister reg,
-                      Register base,
-                      int32_t offset,
-                      ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
-    int element_size_shift = -1;
-    AdjustBaseOffsetAndElementSizeShift(base, offset, element_size_shift);
-    switch (element_size_shift) {
-      case TIMES_1: StB(static_cast<VectorRegister>(reg), base, offset); break;
-      case TIMES_2: StH(static_cast<VectorRegister>(reg), base, offset); break;
-      case TIMES_4: StW(static_cast<VectorRegister>(reg), base, offset); break;
-      case TIMES_8: StD(static_cast<VectorRegister>(reg), base, offset); break;
-      default:
-        LOG(FATAL) << "UNREACHABLE";
-    }
-    null_checker();
-  }
-
-  void LoadFromOffset(LoadOperandType type, Register reg, Register base, int32_t offset);
-  void LoadSFromOffset(FRegister reg, Register base, int32_t offset);
-  void LoadDFromOffset(FRegister reg, Register base, int32_t offset);
-  void LoadQFromOffset(FRegister reg, Register base, int32_t offset);
-  void StoreToOffset(StoreOperandType type, Register reg, Register base, int32_t offset);
-  void StoreSToOffset(FRegister reg, Register base, int32_t offset);
-  void StoreDToOffset(FRegister reg, Register base, int32_t offset);
-  void StoreQToOffset(FRegister reg, Register base, int32_t offset);
-
-  // Emit data (e.g. encoded instruction or immediate) to the instruction stream.
-  void Emit(uint32_t value);
-
-  // Push/pop composite routines.
-  void Push(Register rs);
-  void Pop(Register rd);
-  void PopAndReturn(Register rd, Register rt);
-
-  //
-  // Heap poisoning.
-  //
-
-  // Poison a heap reference contained in `src` and store it in `dst`.
-  void PoisonHeapReference(Register dst, Register src) {
-    // dst = -src.
-    Subu(dst, ZERO, src);
-  }
-  // Poison a heap reference contained in `reg`.
-  void PoisonHeapReference(Register reg) {
-    // reg = -reg.
-    PoisonHeapReference(reg, reg);
-  }
-  // Unpoison a heap reference contained in `reg`.
-  void UnpoisonHeapReference(Register reg) {
-    // reg = -reg.
-    Subu(reg, ZERO, reg);
-  }
-  // Poison a heap reference contained in `reg` if heap poisoning is enabled.
-  void MaybePoisonHeapReference(Register reg) {
-    if (kPoisonHeapReferences) {
-      PoisonHeapReference(reg);
-    }
-  }
-  // Unpoison a heap reference contained in `reg` if heap poisoning is enabled.
-  void MaybeUnpoisonHeapReference(Register reg) {
-    if (kPoisonHeapReferences) {
-      UnpoisonHeapReference(reg);
-    }
-  }
-
-  void Bind(Label* label) override {
-    Bind(down_cast<MipsLabel*>(label));
-  }
-  void Jump(Label* label ATTRIBUTE_UNUSED) override {
-    UNIMPLEMENTED(FATAL) << "Do not use Jump for MIPS";
-  }
-
-  // Don't warn about a different virtual Bind/Jump in the base class.
-  using JNIBase::Bind;
-  using JNIBase::Jump;
-
-  // Create a new label that can be used with Jump/Bind calls.
-  std::unique_ptr<JNIMacroLabel> CreateLabel() override {
-    LOG(FATAL) << "Not implemented on MIPS32";
-    UNREACHABLE();
-  }
-  // Emit an unconditional jump to the label.
-  void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED) override {
-    LOG(FATAL) << "Not implemented on MIPS32";
-    UNREACHABLE();
-  }
-  // Emit a conditional jump to the label by applying a unary condition test to the register.
-  void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED,
-            JNIMacroUnaryCondition cond ATTRIBUTE_UNUSED,
-            ManagedRegister test ATTRIBUTE_UNUSED) override {
-    LOG(FATAL) << "Not implemented on MIPS32";
-    UNREACHABLE();
-  }
-
-  // Code at this offset will serve as the target for the Jump call.
-  void Bind(JNIMacroLabel* label ATTRIBUTE_UNUSED) override {
-    LOG(FATAL) << "Not implemented on MIPS32";
-    UNREACHABLE();
-  }
-
-  // Create a new literal with a given value.
-  // NOTE: Force the template parameter to be explicitly specified.
-  template <typename T>
-  Literal* NewLiteral(typename Identity<T>::type value) {
-    static_assert(std::is_integral<T>::value, "T must be an integral type.");
-    return NewLiteral(sizeof(value), reinterpret_cast<const uint8_t*>(&value));
-  }
-
-  // Load label address using PC-relative addressing.
-  // To be used with data labels in the literal / jump table area only and not
-  // with regular code labels.
-  //
-  // For R6 base_reg must be ZERO.
-  //
-  // On R2 there are two possible uses w.r.t. base_reg:
-  //
-  // - base_reg = ZERO:
-  //   The NAL instruction will be generated as part of the load and it will
-  //   clobber the RA register.
-  //
-  // - base_reg != ZERO:
-  //   The RA-clobbering NAL instruction won't be generated as part of the load.
-  //   The label pc_rel_base_label_ must be bound (with BindPcRelBaseLabel())
-  //   and base_reg must hold the address of the label. Example:
-  //     __ Nal();
-  //     __ Move(S3, RA);
-  //     __ BindPcRelBaseLabel();  // S3 holds the address of pc_rel_base_label_.
-  //     __ LoadLabelAddress(A0, S3, label1);
-  //     __ LoadLabelAddress(A1, S3, label2);
-  //     __ LoadLiteral(V0, S3, literal1);
-  //     __ LoadLiteral(V1, S3, literal2);
-  void LoadLabelAddress(Register dest_reg, Register base_reg, MipsLabel* label);
-
-  // Create a new literal with the given data.
-  Literal* NewLiteral(size_t size, const uint8_t* data);
-
-  // Load literal using PC-relative addressing.
-  // See the above comments for LoadLabelAddress() on the value of base_reg.
-  void LoadLiteral(Register dest_reg, Register base_reg, Literal* literal);
-
-  // Create a jump table for the given labels that will be emitted when finalizing.
-  // When the table is emitted, offsets will be relative to the location of the table.
-  // The table location is determined by the location of its label (the label precedes
-  // the table data) and should be loaded using LoadLabelAddress().
-  JumpTable* CreateJumpTable(std::vector<MipsLabel*>&& labels);
-
-  //
-  // Overridden common assembler high-level functionality.
-  //
-
-  // Emit code that will create an activation on the stack.
-  void BuildFrame(size_t frame_size,
-                  ManagedRegister method_reg,
-                  ArrayRef<const ManagedRegister> callee_save_regs,
-                  const ManagedRegisterEntrySpills& entry_spills) override;
-
-  // Emit code that will remove an activation from the stack.
-  void RemoveFrame(size_t frame_size,
-                   ArrayRef<const ManagedRegister> callee_save_regs,
-                   bool may_suspend) override;
-
-  void IncreaseFrameSize(size_t adjust) override;
-  void DecreaseFrameSize(size_t adjust) override;
-
-  // Store routines.
-  void Store(FrameOffset offs, ManagedRegister msrc, size_t size) override;
-  void StoreRef(FrameOffset dest, ManagedRegister msrc) override;
-  void StoreRawPtr(FrameOffset dest, ManagedRegister msrc) override;
-
-  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) override;
-
-  void StoreStackOffsetToThread(ThreadOffset32 thr_offs,
-                                FrameOffset fr_offs,
-                                ManagedRegister mscratch) override;
-
-  void StoreStackPointerToThread(ThreadOffset32 thr_offs) override;
-
-  void StoreSpanning(FrameOffset dest,
-                     ManagedRegister msrc,
-                     FrameOffset in_off,
-                     ManagedRegister mscratch) override;
-
-  // Load routines.
-  void Load(ManagedRegister mdest, FrameOffset src, size_t size) override;
-
-  void LoadFromThread(ManagedRegister mdest, ThreadOffset32 src, size_t size) override;
-
-  void LoadRef(ManagedRegister dest, FrameOffset src) override;
-
-  void LoadRef(ManagedRegister mdest,
-               ManagedRegister base,
-               MemberOffset offs,
-               bool unpoison_reference) override;
-
-  void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) override;
-
-  void LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset32 offs) override;
-
-  // Copying routines.
-  void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) override;
-
-  void CopyRawPtrFromThread(FrameOffset fr_offs,
-                            ThreadOffset32 thr_offs,
-                            ManagedRegister mscratch) override;
-
-  void CopyRawPtrToThread(ThreadOffset32 thr_offs,
-                          FrameOffset fr_offs,
-                          ManagedRegister mscratch) override;
-
-  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) override;
-
-  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) override;
-
-  void Copy(FrameOffset dest,
-            ManagedRegister src_base,
-            Offset src_offset,
-            ManagedRegister mscratch,
-            size_t size) override;
-
-  void Copy(ManagedRegister dest_base,
-            Offset dest_offset,
-            FrameOffset src,
-            ManagedRegister mscratch,
-            size_t size) override;
-
-  void Copy(FrameOffset dest,
-            FrameOffset src_base,
-            Offset src_offset,
-            ManagedRegister mscratch,
-            size_t size) override;
-
-  void Copy(ManagedRegister dest,
-            Offset dest_offset,
-            ManagedRegister src,
-            Offset src_offset,
-            ManagedRegister mscratch,
-            size_t size) override;
-
-  void Copy(FrameOffset dest,
-            Offset dest_offset,
-            FrameOffset src,
-            Offset src_offset,
-            ManagedRegister mscratch,
-            size_t size) override;
-
-  void MemoryBarrier(ManagedRegister) override;
-
-  // Sign extension.
-  void SignExtend(ManagedRegister mreg, size_t size) override;
-
-  // Zero extension.
-  void ZeroExtend(ManagedRegister mreg, size_t size) override;
-
-  // Exploit fast access in managed code to Thread::Current().
-  void GetCurrentThread(ManagedRegister tr) override;
-  void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) override;
-
-  // Set up out_reg to hold a Object** into the handle scope, or to be null if the
-  // value is null and null_allowed. in_reg holds a possibly stale reference
-  // that can be used to avoid loading the handle scope entry to see if the value is
-  // null.
-  void CreateHandleScopeEntry(ManagedRegister out_reg,
-                              FrameOffset handlescope_offset,
-                              ManagedRegister in_reg,
-                              bool null_allowed) override;
-
-  // Set up out_off to hold a Object** into the handle scope, or to be null if the
-  // value is null and null_allowed.
-  void CreateHandleScopeEntry(FrameOffset out_off,
-                              FrameOffset handlescope_offset,
-                              ManagedRegister mscratch,
-                              bool null_allowed) override;
-
-  // src holds a handle scope entry (Object**) load this into dst.
-  void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
-
-  // Heap::VerifyObject on src. In some cases (such as a reference to this) we
-  // know that src may not be null.
-  void VerifyObject(ManagedRegister src, bool could_be_null) override;
-  void VerifyObject(FrameOffset src, bool could_be_null) override;
-
-  // Call to address held at [base+offset].
-  void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) override;
-  void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) override;
-  void CallFromThread(ThreadOffset32 offset, ManagedRegister mscratch) override;
-
-  // Generate code to check if Thread::Current()->exception_ is non-null
-  // and branch to a ExceptionSlowPath if it is.
-  void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) override;
-
-  // Emit slow paths queued during assembly and promote short branches to long if needed.
-  void FinalizeCode() override;
-
-  // Emit branches and finalize all instructions.
-  void FinalizeInstructions(const MemoryRegion& region) override;
-
-  // Returns the (always-)current location of a label (can be used in class CodeGeneratorMIPS,
-  // must be used instead of MipsLabel::GetPosition()).
-  uint32_t GetLabelLocation(const MipsLabel* label) const;
-
-  // Get the final position of a label after local fixup based on the old position
-  // recorded before FinalizeCode().
-  uint32_t GetAdjustedPosition(uint32_t old_position);
-
-  // R2 doesn't have PC-relative addressing, which we need to access literals. We simulate it by
-  // reading the PC value into a general-purpose register with the NAL instruction and then loading
-  // literals through this base register. The code generator calls this method (at most once per
-  // method being compiled) to bind a label to the location for which the PC value is acquired.
-  // The assembler then computes literal offsets relative to this label.
-  void BindPcRelBaseLabel();
-
-  // Returns the location of the label bound with BindPcRelBaseLabel().
-  uint32_t GetPcRelBaseLabelLocation() const;
-
-  // Note that PC-relative literal loads are handled as pseudo branches because they need very
-  // similar relocation and may similarly expand in size to accomodate for larger offsets relative
-  // to PC.
-  enum BranchCondition {
-    kCondLT,
-    kCondGE,
-    kCondLE,
-    kCondGT,
-    kCondLTZ,
-    kCondGEZ,
-    kCondLEZ,
-    kCondGTZ,
-    kCondEQ,
-    kCondNE,
-    kCondEQZ,
-    kCondNEZ,
-    kCondLTU,
-    kCondGEU,
-    kCondF,    // Floating-point predicate false.
-    kCondT,    // Floating-point predicate true.
-    kUncond,
-  };
-  friend std::ostream& operator<<(std::ostream& os, const BranchCondition& rhs);
-
-  // Enables or disables instruction reordering (IOW, automatic filling of delay slots)
-  // similarly to ".set reorder" / ".set noreorder" in traditional MIPS assembly.
-  // Returns the last state, which may be useful for temporary enabling/disabling of
-  // reordering.
-  bool SetReorder(bool enable);
-
- private:
-  // Description of the last instruction in terms of input and output registers.
-  // Used to make the decision of moving the instruction into a delay slot.
-  struct DelaySlot {
-    DelaySlot();
-
-    // Encoded instruction that may be used to fill the delay slot or 0
-    // (0 conveniently represents NOP).
-    uint32_t instruction_;
-
-    // Input/output register masks.
-    InOutRegMasks masks_;
-
-    // Label for patchable instructions to allow moving them into delay slots.
-    MipsLabel* patcher_label_;
-  };
-
-  // Delay slot finite state machine's (DS FSM's) state. The FSM state is updated
-  // upon every new instruction and label generated. The FSM detects instructions
-  // suitable for delay slots and immediately preceded with labels. These are target
-  // instructions for branches. If an unconditional R2 branch does not get its delay
-  // slot filled with the immediately preceding instruction, it may instead get the
-  // slot filled with the target instruction (the branch will need its offset
-  // incremented past the target instruction). We call this "absorption". The FSM
-  // records PCs of the target instructions suitable for this optimization.
-  enum DsFsmState {
-    kExpectingLabel,
-    kExpectingInstruction,
-    kExpectingCommit
-  };
-  friend std::ostream& operator<<(std::ostream& os, const DsFsmState& rhs);
-
-  class Branch {
-   public:
-    enum Type {
-      // R2 short branches (can be promoted to long).
-      kUncondBranch,
-      kCondBranch,
-      kCall,
-      // R2 short branches (can't be promoted to long), delay slots filled manually.
-      kBareUncondBranch,
-      kBareCondBranch,
-      kBareCall,
-      // R2 near label.
-      kLabel,
-      // R2 near literal.
-      kLiteral,
-      // R2 long branches.
-      kLongUncondBranch,
-      kLongCondBranch,
-      kLongCall,
-      // R2 far label.
-      kFarLabel,
-      // R2 far literal.
-      kFarLiteral,
-      // R6 short branches (can be promoted to long).
-      kR6UncondBranch,
-      kR6CondBranch,
-      kR6Call,
-      // R6 short branches (can't be promoted to long), forbidden/delay slots filled manually.
-      kR6BareUncondBranch,
-      kR6BareCondBranch,
-      kR6BareCall,
-      // R6 near label.
-      kR6Label,
-      // R6 near literal.
-      kR6Literal,
-      // R6 long branches.
-      kR6LongUncondBranch,
-      kR6LongCondBranch,
-      kR6LongCall,
-      // R6 far label.
-      kR6FarLabel,
-      // R6 far literal.
-      kR6FarLiteral,
-    };
-    // Bit sizes of offsets defined as enums to minimize chance of typos.
-    enum OffsetBits {
-      kOffset16 = 16,
-      kOffset18 = 18,
-      kOffset21 = 21,
-      kOffset23 = 23,
-      kOffset28 = 28,
-      kOffset32 = 32,
-    };
-
-    static constexpr uint32_t kUnresolved = 0xffffffff;  // Unresolved target_
-    static constexpr int32_t kMaxBranchLength = 32;
-    static constexpr int32_t kMaxBranchSize = kMaxBranchLength * sizeof(uint32_t);
-    // The following two instruction encodings can never legally occur in branch delay
-    // slots and are used as markers.
-    //
-    // kUnfilledDelaySlot means that the branch may use either the preceding or the target
-    // instruction to fill its delay slot (the latter is only possible with unconditional
-    // R2 branches and is termed here as "absorption").
-    static constexpr uint32_t kUnfilledDelaySlot = 0x10000000;  // beq zero, zero, 0.
-    // kUnfillableDelaySlot means that the branch cannot use an instruction (other than NOP)
-    // to fill its delay slot. This is only used for unconditional R2 branches to prevent
-    // absorption of the target instruction when reordering is disabled.
-    static constexpr uint32_t kUnfillableDelaySlot = 0x13FF0000;  // beq ra, ra, 0.
-
-    struct BranchInfo {
-      // Branch length as a number of 4-byte-long instructions.
-      uint32_t length;
-      // Ordinal number (0-based) of the first (or the only) instruction that contains the branch's
-      // PC-relative offset (or its most significant 16-bit half, which goes first).
-      uint32_t instr_offset;
-      // Different MIPS instructions with PC-relative offsets apply said offsets to slightly
-      // different origins, e.g. to PC or PC+4. Encode the origin distance (as a number of 4-byte
-      // instructions) from the instruction containing the offset.
-      uint32_t pc_org;
-      // How large (in bits) a PC-relative offset can be for a given type of branch (kR6CondBranch
-      // and kR6BareCondBranch are an exception: use kOffset23 for beqzc/bnezc).
-      OffsetBits offset_size;
-      // Some MIPS instructions with PC-relative offsets shift the offset by 2. Encode the shift
-      // count.
-      int offset_shift;
-    };
-    static const BranchInfo branch_info_[/* Type */];
-
-    // Unconditional branch or call.
-    Branch(bool is_r6, uint32_t location, uint32_t target, bool is_call, bool is_bare);
-    // Conditional branch.
-    Branch(bool is_r6,
-           uint32_t location,
-           uint32_t target,
-           BranchCondition condition,
-           Register lhs_reg,
-           Register rhs_reg,
-           bool is_bare);
-    // Label address (in literal area) or literal.
-    Branch(bool is_r6,
-           uint32_t location,
-           Register dest_reg,
-           Register base_reg,
-           Type label_or_literal_type);
-
-    // Some conditional branches with lhs = rhs are effectively NOPs, while some
-    // others are effectively unconditional. MIPSR6 conditional branches require lhs != rhs.
-    // So, we need a way to identify such branches in order to emit no instructions for them
-    // or change them to unconditional.
-    static bool IsNop(BranchCondition condition, Register lhs, Register rhs);
-    static bool IsUncond(BranchCondition condition, Register lhs, Register rhs);
-
-    static BranchCondition OppositeCondition(BranchCondition cond);
-
-    Type GetType() const;
-    BranchCondition GetCondition() const;
-    Register GetLeftRegister() const;
-    Register GetRightRegister() const;
-    uint32_t GetTarget() const;
-    uint32_t GetLocation() const;
-    uint32_t GetOldLocation() const;
-    uint32_t GetPrecedingInstructionLength(Type type) const;
-    uint32_t GetPrecedingInstructionSize(Type type) const;
-    uint32_t GetLength() const;
-    uint32_t GetOldLength() const;
-    uint32_t GetSize() const;
-    uint32_t GetOldSize() const;
-    uint32_t GetEndLocation() const;
-    uint32_t GetOldEndLocation() const;
-    bool IsBare() const;
-    bool IsLong() const;
-    bool IsResolved() const;
-
-    // Various helpers for branch delay slot management.
-    bool CanHaveDelayedInstruction(const DelaySlot& delay_slot) const;
-    void SetDelayedInstruction(uint32_t instruction, MipsLabel* patcher_label = nullptr);
-    uint32_t GetDelayedInstruction() const;
-    MipsLabel* GetPatcherLabel() const;
-    void DecrementLocations();
-
-    // Returns the bit size of the signed offset that the branch instruction can handle.
-    OffsetBits GetOffsetSize() const;
-
-    // Calculates the distance between two byte locations in the assembler buffer and
-    // returns the number of bits needed to represent the distance as a signed integer.
-    //
-    // Branch instructions have signed offsets of 16, 19 (addiupc), 21 (beqzc/bnezc),
-    // and 26 (bc) bits, which are additionally shifted left 2 positions at run time.
-    //
-    // Composite branches (made of several instructions) with longer reach have 32-bit
-    // offsets encoded as 2 16-bit "halves" in two instructions (high half goes first).
-    // The composite branches cover the range of PC + +/-2GB on MIPS32 CPUs. However,
-    // the range is not end-to-end on MIPS64 (unless addresses are forced to zero- or
-    // sign-extend from 32 to 64 bits by the appropriate CPU configuration).
-    // Consider the following implementation of a long unconditional branch, for
-    // example:
-    //
-    //   auipc at, offset_31_16  // at = pc + sign_extend(offset_31_16) << 16
-    //   jic   at, offset_15_0   // pc = at + sign_extend(offset_15_0)
-    //
-    // Both of the above instructions take 16-bit signed offsets as immediate operands.
-    // When bit 15 of offset_15_0 is 1, it effectively causes subtraction of 0x10000
-    // due to sign extension. This must be compensated for by incrementing offset_31_16
-    // by 1. offset_31_16 can only be incremented by 1 if it's not 0x7FFF. If it is
-    // 0x7FFF, adding 1 will overflow the positive offset into the negative range.
-    // Therefore, the long branch range is something like from PC - 0x80000000 to
-    // PC + 0x7FFF7FFF, IOW, shorter by 32KB on one side.
-    //
-    // The returned values are therefore: 18, 21, 23, 28 and 32. There's also a special
-    // case with the addiu instruction and a 16 bit offset.
-    static OffsetBits GetOffsetSizeNeeded(uint32_t location, uint32_t target);
-
-    // Resolve a branch when the target is known.
-    void Resolve(uint32_t target);
-
-    // Relocate a branch by a given delta if needed due to expansion of this or another
-    // branch at a given location by this delta (just changes location_ and target_).
-    void Relocate(uint32_t expand_location, uint32_t delta);
-
-    // If the branch is short, changes its type to long.
-    void PromoteToLong();
-
-    // If necessary, updates the type by promoting a short branch to a long branch
-    // based on the branch location and target. Returns the amount (in bytes) by
-    // which the branch size has increased.
-    // max_short_distance caps the maximum distance between location_ and target_
-    // that is allowed for short branches. This is for debugging/testing purposes.
-    // max_short_distance = 0 forces all short branches to become long.
-    // Use the implicit default argument when not debugging/testing.
-    uint32_t PromoteIfNeeded(uint32_t location,
-                             uint32_t max_short_distance = std::numeric_limits<uint32_t>::max());
-
-    // Returns the location of the instruction(s) containing the offset.
-    uint32_t GetOffsetLocation() const;
-
-    // Calculates and returns the offset ready for encoding in the branch instruction(s).
-    uint32_t GetOffset(uint32_t location) const;
-
-   private:
-    // Completes branch construction by determining and recording its type.
-    void InitializeType(Type initial_type, bool is_r6);
-    // Helper for the above.
-    void InitShortOrLong(OffsetBits ofs_size, Type short_type, Type long_type);
-
-    uint32_t old_location_;         // Offset into assembler buffer in bytes.
-    uint32_t location_;             // Offset into assembler buffer in bytes.
-    uint32_t target_;               // Offset into assembler buffer in bytes.
-
-    uint32_t lhs_reg_;              // Left-hand side register in conditional branches or
-                                    // FPU condition code. Destination register in literals.
-    uint32_t rhs_reg_;              // Right-hand side register in conditional branches.
-                                    // Base register in literals (ZERO on R6).
-    BranchCondition condition_;     // Condition for conditional branches.
-
-    Type type_;                     // Current type of the branch.
-    Type old_type_;                 // Initial type of the branch.
-
-    uint32_t delayed_instruction_;  // Encoded instruction for the delay slot or
-                                    // kUnfilledDelaySlot if none but fillable or
-                                    // kUnfillableDelaySlot if none and unfillable
-                                    // (the latter is only used for unconditional R2
-                                    // branches).
-
-    MipsLabel* patcher_label_;      // Patcher label for the instruction in the delay slot.
-  };
-  friend std::ostream& operator<<(std::ostream& os, const Branch::Type& rhs);
-  friend std::ostream& operator<<(std::ostream& os, const Branch::OffsetBits& rhs);
-
-  uint32_t EmitR(int opcode, Register rs, Register rt, Register rd, int shamt, int funct);
-  uint32_t EmitI(int opcode, Register rs, Register rt, uint16_t imm);
-  uint32_t EmitI21(int opcode, Register rs, uint32_t imm21);
-  uint32_t EmitI26(int opcode, uint32_t imm26);
-  uint32_t EmitFR(int opcode, int fmt, FRegister ft, FRegister fs, FRegister fd, int funct);
-  uint32_t EmitFI(int opcode, int fmt, FRegister rt, uint16_t imm);
-  void EmitBcondR2(BranchCondition cond, Register rs, Register rt, uint16_t imm16);
-  void EmitBcondR6(BranchCondition cond, Register rs, Register rt, uint32_t imm16_21);
-  uint32_t EmitMsa3R(int operation,
-                     int df,
-                     VectorRegister wt,
-                     VectorRegister ws,
-                     VectorRegister wd,
-                     int minor_opcode);
-  uint32_t EmitMsaBIT(int operation,
-                      int df_m,
-                      VectorRegister ws,
-                      VectorRegister wd,
-                      int minor_opcode);
-  uint32_t EmitMsaELM(int operation,
-                      int df_n,
-                      VectorRegister ws,
-                      VectorRegister wd,
-                      int minor_opcode);
-  uint32_t EmitMsaMI10(int s10, Register rs, VectorRegister wd, int minor_opcode, int df);
-  uint32_t EmitMsaI10(int operation, int df, int i10, VectorRegister wd, int minor_opcode);
-  uint32_t EmitMsa2R(int operation, int df, VectorRegister ws, VectorRegister wd, int minor_opcode);
-  uint32_t EmitMsa2RF(int operation,
-                      int df,
-                      VectorRegister ws,
-                      VectorRegister wd,
-                      int minor_opcode);
-
-  void Buncond(MipsLabel* label, bool is_r6, bool is_bare);
-  void Bcond(MipsLabel* label,
-             bool is_r6,
-             bool is_bare,
-             BranchCondition condition,
-             Register lhs,
-             Register rhs = ZERO);
-  void Call(MipsLabel* label, bool is_r6, bool is_bare);
-  void FinalizeLabeledBranch(MipsLabel* label);
-
-  // Various helpers for branch delay slot management.
-  InOutRegMasks& DsFsmInstr(uint32_t instruction, MipsLabel* patcher_label = nullptr);
-  void DsFsmInstrNop(uint32_t instruction);
-  void DsFsmLabel();
-  void DsFsmCommitLabel();
-  void DsFsmDropLabel();
-  void MoveInstructionToDelaySlot(Branch& branch);
-  bool CanExchangeWithSlt(Register rs, Register rt) const;
-  void ExchangeWithSlt(const DelaySlot& forwarded_slot);
-  void GenerateSltForCondBranch(bool unsigned_slt, Register rs, Register rt);
-
-  Branch* GetBranch(uint32_t branch_id);
-  const Branch* GetBranch(uint32_t branch_id) const;
-  uint32_t GetBranchLocationOrPcRelBase(const MipsAssembler::Branch* branch) const;
-  uint32_t GetBranchOrPcRelBaseForEncoding(const MipsAssembler::Branch* branch) const;
-  void BindRelativeToPrecedingBranch(MipsLabel* label,
-                                     uint32_t prev_branch_id_plus_one,
-                                     uint32_t position);
-
-  void EmitLiterals();
-  void ReserveJumpTableSpace();
-  void EmitJumpTables();
-  void PromoteBranches();
-  void EmitBranch(uint32_t branch_id);
-  void EmitBranches();
-  void PatchCFI(size_t number_of_delayed_adjust_pcs);
-
-  // Emits exception block.
-  void EmitExceptionPoll(MipsExceptionSlowPath* exception);
-
-  bool HasMsa() const {
-    return has_msa_;
-  }
-
-  bool IsR6() const {
-    if (isa_features_ != nullptr) {
-      return isa_features_->IsR6();
-    } else {
-      return false;
-    }
-  }
-
-  bool Is32BitFPU() const {
-    if (isa_features_ != nullptr) {
-      return isa_features_->Is32BitFloatingPoint();
-    } else {
-      return true;
-    }
-  }
-
-  // List of exception blocks to generate at the end of the code cache.
-  std::vector<MipsExceptionSlowPath> exception_blocks_;
-
-  std::vector<Branch> branches_;
-
-  // Whether appending instructions at the end of the buffer or overwriting the existing ones.
-  bool overwriting_;
-  // The current overwrite location.
-  uint32_t overwrite_location_;
-
-  // Whether instruction reordering (IOW, automatic filling of delay slots) is enabled.
-  bool reordering_;
-  // Information about the last instruction that may be used to fill a branch delay slot.
-  DelaySlot delay_slot_;
-  // Delay slot FSM state.
-  DsFsmState ds_fsm_state_;
-  // PC of the current labeled target instruction.
-  uint32_t ds_fsm_target_pc_;
-  // PCs of labeled target instructions.
-  std::vector<uint32_t> ds_fsm_target_pcs_;
-
-  // Use std::deque<> for literal labels to allow insertions at the end
-  // without invalidating pointers and references to existing elements.
-  ArenaDeque<Literal> literals_;
-
-  // Jump table list.
-  ArenaDeque<JumpTable> jump_tables_;
-
-  // There's no PC-relative addressing on MIPS32R2. So, in order to access literals relative to PC
-  // we get PC using the NAL instruction. This label marks the position within the assembler buffer
-  // that PC (from NAL) points to.
-  MipsLabel pc_rel_base_label_;
-
-  // Data for GetAdjustedPosition(), see the description there.
-  uint32_t last_position_adjustment_;
-  uint32_t last_old_position_;
-  uint32_t last_branch_id_;
-
-  const bool has_msa_;
-
-  const MipsInstructionSetFeatures* isa_features_;
-
-  DISALLOW_COPY_AND_ASSIGN(MipsAssembler);
-};
-
-}  // namespace mips
-}  // namespace art
-
-#endif  // ART_COMPILER_UTILS_MIPS_ASSEMBLER_MIPS_H_
diff --git a/compiler/utils/mips/assembler_mips32r5_test.cc b/compiler/utils/mips/assembler_mips32r5_test.cc
deleted file mode 100644
index 98fc44b..0000000
--- a/compiler/utils/mips/assembler_mips32r5_test.cc
+++ /dev/null
@@ -1,558 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "assembler_mips.h"
-
-#include <map>
-
-#include "base/stl_util.h"
-#include "utils/assembler_test.h"
-
-#define __ GetAssembler()->
-
-namespace art {
-
-struct MIPSCpuRegisterCompare {
-  bool operator()(const mips::Register& a, const mips::Register& b) const {
-    return a < b;
-  }
-};
-
-class AssemblerMIPS32r5Test : public AssemblerTest<mips::MipsAssembler,
-                                                   mips::MipsLabel,
-                                                   mips::Register,
-                                                   mips::FRegister,
-                                                   uint32_t,
-                                                   mips::VectorRegister> {
- public:
-  using Base = AssemblerTest<mips::MipsAssembler,
-                             mips::MipsLabel,
-                             mips::Register,
-                             mips::FRegister,
-                             uint32_t,
-                             mips::VectorRegister>;
-
-  // These tests were taking too long, so we hide the DriverStr() from AssemblerTest<>
-  // and reimplement it without the verification against `assembly_string`. b/73903608
-  void DriverStr(const std::string& assembly_string ATTRIBUTE_UNUSED,
-                 const std::string& test_name ATTRIBUTE_UNUSED) {
-    GetAssembler()->FinalizeCode();
-    std::vector<uint8_t> data(GetAssembler()->CodeSize());
-    MemoryRegion code(data.data(), data.size());
-    GetAssembler()->FinalizeInstructions(code);
-  }
-
-  AssemblerMIPS32r5Test() :
-    instruction_set_features_(MipsInstructionSetFeatures::FromVariant("mips32r5", nullptr)) {
-  }
-
- protected:
-  // Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
-  std::string GetArchitectureString() override {
-    return "mips";
-  }
-
-  std::string GetAssemblerParameters() override {
-    return " --no-warn -32 -march=mips32r5 -mmsa";
-  }
-
-  void Pad(std::vector<uint8_t>& data) override {
-    // The GNU linker unconditionally pads the code segment with NOPs to a size that is a multiple
-    // of 16 and there doesn't appear to be a way to suppress this padding. Our assembler doesn't
-    // pad, so, in order for two assembler outputs to match, we need to match the padding as well.
-    // NOP is encoded as four zero bytes on MIPS.
-    size_t pad_size = RoundUp(data.size(), 16u) - data.size();
-    data.insert(data.end(), pad_size, 0);
-  }
-
-  std::string GetDisassembleParameters() override {
-    return " -D -bbinary -mmips:isa32r5";
-  }
-
-  mips::MipsAssembler* CreateAssembler(ArenaAllocator* allocator) override {
-    return new (allocator) mips::MipsAssembler(allocator, instruction_set_features_.get());
-  }
-
-  void SetUpHelpers() override {
-    if (registers_.size() == 0) {
-      registers_.push_back(new mips::Register(mips::ZERO));
-      registers_.push_back(new mips::Register(mips::AT));
-      registers_.push_back(new mips::Register(mips::V0));
-      registers_.push_back(new mips::Register(mips::V1));
-      registers_.push_back(new mips::Register(mips::A0));
-      registers_.push_back(new mips::Register(mips::A1));
-      registers_.push_back(new mips::Register(mips::A2));
-      registers_.push_back(new mips::Register(mips::A3));
-      registers_.push_back(new mips::Register(mips::T0));
-      registers_.push_back(new mips::Register(mips::T1));
-      registers_.push_back(new mips::Register(mips::T2));
-      registers_.push_back(new mips::Register(mips::T3));
-      registers_.push_back(new mips::Register(mips::T4));
-      registers_.push_back(new mips::Register(mips::T5));
-      registers_.push_back(new mips::Register(mips::T6));
-      registers_.push_back(new mips::Register(mips::T7));
-      registers_.push_back(new mips::Register(mips::S0));
-      registers_.push_back(new mips::Register(mips::S1));
-      registers_.push_back(new mips::Register(mips::S2));
-      registers_.push_back(new mips::Register(mips::S3));
-      registers_.push_back(new mips::Register(mips::S4));
-      registers_.push_back(new mips::Register(mips::S5));
-      registers_.push_back(new mips::Register(mips::S6));
-      registers_.push_back(new mips::Register(mips::S7));
-      registers_.push_back(new mips::Register(mips::T8));
-      registers_.push_back(new mips::Register(mips::T9));
-      registers_.push_back(new mips::Register(mips::K0));
-      registers_.push_back(new mips::Register(mips::K1));
-      registers_.push_back(new mips::Register(mips::GP));
-      registers_.push_back(new mips::Register(mips::SP));
-      registers_.push_back(new mips::Register(mips::FP));
-      registers_.push_back(new mips::Register(mips::RA));
-
-      secondary_register_names_.emplace(mips::Register(mips::ZERO), "zero");
-      secondary_register_names_.emplace(mips::Register(mips::AT), "at");
-      secondary_register_names_.emplace(mips::Register(mips::V0), "v0");
-      secondary_register_names_.emplace(mips::Register(mips::V1), "v1");
-      secondary_register_names_.emplace(mips::Register(mips::A0), "a0");
-      secondary_register_names_.emplace(mips::Register(mips::A1), "a1");
-      secondary_register_names_.emplace(mips::Register(mips::A2), "a2");
-      secondary_register_names_.emplace(mips::Register(mips::A3), "a3");
-      secondary_register_names_.emplace(mips::Register(mips::T0), "t0");
-      secondary_register_names_.emplace(mips::Register(mips::T1), "t1");
-      secondary_register_names_.emplace(mips::Register(mips::T2), "t2");
-      secondary_register_names_.emplace(mips::Register(mips::T3), "t3");
-      secondary_register_names_.emplace(mips::Register(mips::T4), "t4");
-      secondary_register_names_.emplace(mips::Register(mips::T5), "t5");
-      secondary_register_names_.emplace(mips::Register(mips::T6), "t6");
-      secondary_register_names_.emplace(mips::Register(mips::T7), "t7");
-      secondary_register_names_.emplace(mips::Register(mips::S0), "s0");
-      secondary_register_names_.emplace(mips::Register(mips::S1), "s1");
-      secondary_register_names_.emplace(mips::Register(mips::S2), "s2");
-      secondary_register_names_.emplace(mips::Register(mips::S3), "s3");
-      secondary_register_names_.emplace(mips::Register(mips::S4), "s4");
-      secondary_register_names_.emplace(mips::Register(mips::S5), "s5");
-      secondary_register_names_.emplace(mips::Register(mips::S6), "s6");
-      secondary_register_names_.emplace(mips::Register(mips::S7), "s7");
-      secondary_register_names_.emplace(mips::Register(mips::T8), "t8");
-      secondary_register_names_.emplace(mips::Register(mips::T9), "t9");
-      secondary_register_names_.emplace(mips::Register(mips::K0), "k0");
-      secondary_register_names_.emplace(mips::Register(mips::K1), "k1");
-      secondary_register_names_.emplace(mips::Register(mips::GP), "gp");
-      secondary_register_names_.emplace(mips::Register(mips::SP), "sp");
-      secondary_register_names_.emplace(mips::Register(mips::FP), "fp");
-      secondary_register_names_.emplace(mips::Register(mips::RA), "ra");
-
-      fp_registers_.push_back(new mips::FRegister(mips::F0));
-      fp_registers_.push_back(new mips::FRegister(mips::F1));
-      fp_registers_.push_back(new mips::FRegister(mips::F2));
-      fp_registers_.push_back(new mips::FRegister(mips::F3));
-      fp_registers_.push_back(new mips::FRegister(mips::F4));
-      fp_registers_.push_back(new mips::FRegister(mips::F5));
-      fp_registers_.push_back(new mips::FRegister(mips::F6));
-      fp_registers_.push_back(new mips::FRegister(mips::F7));
-      fp_registers_.push_back(new mips::FRegister(mips::F8));
-      fp_registers_.push_back(new mips::FRegister(mips::F9));
-      fp_registers_.push_back(new mips::FRegister(mips::F10));
-      fp_registers_.push_back(new mips::FRegister(mips::F11));
-      fp_registers_.push_back(new mips::FRegister(mips::F12));
-      fp_registers_.push_back(new mips::FRegister(mips::F13));
-      fp_registers_.push_back(new mips::FRegister(mips::F14));
-      fp_registers_.push_back(new mips::FRegister(mips::F15));
-      fp_registers_.push_back(new mips::FRegister(mips::F16));
-      fp_registers_.push_back(new mips::FRegister(mips::F17));
-      fp_registers_.push_back(new mips::FRegister(mips::F18));
-      fp_registers_.push_back(new mips::FRegister(mips::F19));
-      fp_registers_.push_back(new mips::FRegister(mips::F20));
-      fp_registers_.push_back(new mips::FRegister(mips::F21));
-      fp_registers_.push_back(new mips::FRegister(mips::F22));
-      fp_registers_.push_back(new mips::FRegister(mips::F23));
-      fp_registers_.push_back(new mips::FRegister(mips::F24));
-      fp_registers_.push_back(new mips::FRegister(mips::F25));
-      fp_registers_.push_back(new mips::FRegister(mips::F26));
-      fp_registers_.push_back(new mips::FRegister(mips::F27));
-      fp_registers_.push_back(new mips::FRegister(mips::F28));
-      fp_registers_.push_back(new mips::FRegister(mips::F29));
-      fp_registers_.push_back(new mips::FRegister(mips::F30));
-      fp_registers_.push_back(new mips::FRegister(mips::F31));
-
-      vec_registers_.push_back(new mips::VectorRegister(mips::W0));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W1));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W2));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W3));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W4));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W5));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W6));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W7));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W8));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W9));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W10));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W11));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W12));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W13));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W14));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W15));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W16));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W17));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W18));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W19));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W20));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W21));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W22));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W23));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W24));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W25));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W26));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W27));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W28));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W29));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W30));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W31));
-    }
-  }
-
-  void TearDown() override {
-    AssemblerTest::TearDown();
-    STLDeleteElements(&registers_);
-    STLDeleteElements(&fp_registers_);
-    STLDeleteElements(&vec_registers_);
-  }
-
-  std::vector<mips::MipsLabel> GetAddresses() override {
-    UNIMPLEMENTED(FATAL) << "Feature not implemented yet";
-    UNREACHABLE();
-  }
-
-  std::vector<mips::Register*> GetRegisters() override {
-    return registers_;
-  }
-
-  std::vector<mips::FRegister*> GetFPRegisters() override {
-    return fp_registers_;
-  }
-
-  std::vector<mips::VectorRegister*> GetVectorRegisters() override {
-    return vec_registers_;
-  }
-
-  uint32_t CreateImmediate(int64_t imm_value) override {
-    return imm_value;
-  }
-
-  std::string GetSecondaryRegisterName(const mips::Register& reg) override {
-    CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
-    return secondary_register_names_[reg];
-  }
-
-  std::string RepeatInsn(size_t count, const std::string& insn) {
-    std::string result;
-    for (; count != 0u; --count) {
-      result += insn;
-    }
-    return result;
-  }
-
- private:
-  std::vector<mips::Register*> registers_;
-  std::map<mips::Register, std::string, MIPSCpuRegisterCompare> secondary_register_names_;
-
-  std::vector<mips::FRegister*> fp_registers_;
-  std::vector<mips::VectorRegister*> vec_registers_;
-  std::unique_ptr<const MipsInstructionSetFeatures> instruction_set_features_;
-};
-
-TEST_F(AssemblerMIPS32r5Test, Toolchain) {
-  EXPECT_TRUE(CheckTools());
-}
-
-TEST_F(AssemblerMIPS32r5Test, LoadQFromOffset) {
-  __ LoadQFromOffset(mips::F0, mips::A0, 0);
-  __ LoadQFromOffset(mips::F0, mips::A0, 1);
-  __ LoadQFromOffset(mips::F0, mips::A0, 2);
-  __ LoadQFromOffset(mips::F0, mips::A0, 4);
-  __ LoadQFromOffset(mips::F0, mips::A0, 8);
-  __ LoadQFromOffset(mips::F0, mips::A0, 511);
-  __ LoadQFromOffset(mips::F0, mips::A0, 512);
-  __ LoadQFromOffset(mips::F0, mips::A0, 513);
-  __ LoadQFromOffset(mips::F0, mips::A0, 514);
-  __ LoadQFromOffset(mips::F0, mips::A0, 516);
-  __ LoadQFromOffset(mips::F0, mips::A0, 1022);
-  __ LoadQFromOffset(mips::F0, mips::A0, 1024);
-  __ LoadQFromOffset(mips::F0, mips::A0, 1025);
-  __ LoadQFromOffset(mips::F0, mips::A0, 1026);
-  __ LoadQFromOffset(mips::F0, mips::A0, 1028);
-  __ LoadQFromOffset(mips::F0, mips::A0, 2044);
-  __ LoadQFromOffset(mips::F0, mips::A0, 2048);
-  __ LoadQFromOffset(mips::F0, mips::A0, 2049);
-  __ LoadQFromOffset(mips::F0, mips::A0, 2050);
-  __ LoadQFromOffset(mips::F0, mips::A0, 2052);
-  __ LoadQFromOffset(mips::F0, mips::A0, 4088);
-  __ LoadQFromOffset(mips::F0, mips::A0, 4096);
-  __ LoadQFromOffset(mips::F0, mips::A0, 4097);
-  __ LoadQFromOffset(mips::F0, mips::A0, 4098);
-  __ LoadQFromOffset(mips::F0, mips::A0, 4100);
-  __ LoadQFromOffset(mips::F0, mips::A0, 4104);
-  __ LoadQFromOffset(mips::F0, mips::A0, 0x7FFC);
-  __ LoadQFromOffset(mips::F0, mips::A0, 0x8000);
-  __ LoadQFromOffset(mips::F0, mips::A0, 0x10000);
-  __ LoadQFromOffset(mips::F0, mips::A0, 0x12345678);
-  __ LoadQFromOffset(mips::F0, mips::A0, 0x12350078);
-  __ LoadQFromOffset(mips::F0, mips::A0, -256);
-  __ LoadQFromOffset(mips::F0, mips::A0, -511);
-  __ LoadQFromOffset(mips::F0, mips::A0, -513);
-  __ LoadQFromOffset(mips::F0, mips::A0, -1022);
-  __ LoadQFromOffset(mips::F0, mips::A0, -1026);
-  __ LoadQFromOffset(mips::F0, mips::A0, -2044);
-  __ LoadQFromOffset(mips::F0, mips::A0, -2052);
-  __ LoadQFromOffset(mips::F0, mips::A0, -4096);
-  __ LoadQFromOffset(mips::F0, mips::A0, -4104);
-  __ LoadQFromOffset(mips::F0, mips::A0, -32768);
-  __ LoadQFromOffset(mips::F0, mips::A0, -36856);
-  __ LoadQFromOffset(mips::F0, mips::A0, 36856);
-  __ LoadQFromOffset(mips::F0, mips::A0, -69608);
-  __ LoadQFromOffset(mips::F0, mips::A0, 69608);
-  __ LoadQFromOffset(mips::F0, mips::A0, 0xABCDEF00);
-  __ LoadQFromOffset(mips::F0, mips::A0, 0x7FFFABCD);
-
-  const char* expected =
-      "ld.d $w0, 0($a0)\n"
-      "ld.b $w0, 1($a0)\n"
-      "ld.h $w0, 2($a0)\n"
-      "ld.w $w0, 4($a0)\n"
-      "ld.d $w0, 8($a0)\n"
-      "ld.b $w0, 511($a0)\n"
-      "ld.d $w0, 512($a0)\n"
-      "addiu $at, $a0, 513\n"
-      "ld.b $w0, 0($at)\n"
-      "ld.h $w0, 514($a0)\n"
-      "ld.w $w0, 516($a0)\n"
-      "ld.h $w0, 1022($a0)\n"
-      "ld.d $w0, 1024($a0)\n"
-      "addiu $at, $a0, 1025\n"
-      "ld.b $w0, 0($at)\n"
-      "addiu $at, $a0, 1026\n"
-      "ld.h $w0, 0($at)\n"
-      "ld.w $w0, 1028($a0)\n"
-      "ld.w $w0, 2044($a0)\n"
-      "ld.d $w0, 2048($a0)\n"
-      "addiu $at, $a0, 2049\n"
-      "ld.b $w0, 0($at)\n"
-      "addiu $at, $a0, 2050\n"
-      "ld.h $w0, 0($at)\n"
-      "addiu $at, $a0, 2052\n"
-      "ld.w $w0, 0($at)\n"
-      "ld.d $w0, 4088($a0)\n"
-      "addiu $at, $a0, 4096\n"
-      "ld.d $w0, 0($at)\n"
-      "addiu $at, $a0, 4097\n"
-      "ld.b $w0, 0($at)\n"
-      "addiu $at, $a0, 4098\n"
-      "ld.h $w0, 0($at)\n"
-      "addiu $at, $a0, 4100\n"
-      "ld.w $w0, 0($at)\n"
-      "addiu $at, $a0, 4104\n"
-      "ld.d $w0, 0($at)\n"
-      "addiu $at, $a0, 0x7FFC\n"
-      "ld.w $w0, 0($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "ld.d $w0, 8($at)\n"
-      "addiu $at, $a0, 32760\n"
-      "addiu $at, $at, 32760\n"
-      "ld.d $w0, 16($at)\n"
-      "lui $at, 4660\n"
-      "addu $at, $at, $a0\n"
-      "addiu $at, $at, 24576\n"
-      "ld.d $w0, -2440($at) # 0xF678\n"
-      "lui $at, 4661\n"
-      "addu $at, $at, $a0\n"
-      "ld.d $w0, 120($at)\n"
-      "ld.d $w0, -256($a0)\n"
-      "ld.b $w0, -511($a0)\n"
-      "addiu $at, $a0, -513\n"
-      "ld.b $w0, 0($at)\n"
-      "ld.h $w0, -1022($a0)\n"
-      "addiu $at, $a0, -1026\n"
-      "ld.h $w0, 0($at)\n"
-      "ld.w $w0, -2044($a0)\n"
-      "addiu $at, $a0, -2052\n"
-      "ld.w $w0, 0($at)\n"
-      "ld.d $w0, -4096($a0)\n"
-      "addiu $at, $a0, -4104\n"
-      "ld.d $w0, 0($at)\n"
-      "addiu $at, $a0, -32768\n"
-      "ld.d $w0, 0($at)\n"
-      "addiu $at, $a0, -32760\n"
-      "addiu $at, $at, -4096\n"
-      "ld.d $w0, 0($at)\n"
-      "addiu $at, $a0, 32760\n"
-      "addiu $at, $at, 4096\n"
-      "ld.d $w0, 0($at)\n"
-      "addiu $at, $a0, -32760\n"
-      "addiu $at, $at, -32760\n"
-      "ld.d $w0, -4088($at)\n"
-      "addiu $at, $a0, 32760\n"
-      "addiu $at, $at, 32760\n"
-      "ld.d $w0, 4088($at)\n"
-      "lui $at, 0xABCE\n"
-      "addu $at, $at, $a0\n"
-      "addiu $at, $at, -8192 # 0xE000\n"
-      "ld.d $w0, 0xF00($at)\n"
-      "lui $at, 0x8000\n"
-      "addu $at, $at, $a0\n"
-      "addiu $at, $at, -21504 # 0xAC00\n"
-      "ld.b $w0, -51($at) # 0xFFCD\n";
-  DriverStr(expected, "LoadQFromOffset");
-}
-
-TEST_F(AssemblerMIPS32r5Test, StoreQToOffset) {
-  __ StoreQToOffset(mips::F0, mips::A0, 0);
-  __ StoreQToOffset(mips::F0, mips::A0, 1);
-  __ StoreQToOffset(mips::F0, mips::A0, 2);
-  __ StoreQToOffset(mips::F0, mips::A0, 4);
-  __ StoreQToOffset(mips::F0, mips::A0, 8);
-  __ StoreQToOffset(mips::F0, mips::A0, 511);
-  __ StoreQToOffset(mips::F0, mips::A0, 512);
-  __ StoreQToOffset(mips::F0, mips::A0, 513);
-  __ StoreQToOffset(mips::F0, mips::A0, 514);
-  __ StoreQToOffset(mips::F0, mips::A0, 516);
-  __ StoreQToOffset(mips::F0, mips::A0, 1022);
-  __ StoreQToOffset(mips::F0, mips::A0, 1024);
-  __ StoreQToOffset(mips::F0, mips::A0, 1025);
-  __ StoreQToOffset(mips::F0, mips::A0, 1026);
-  __ StoreQToOffset(mips::F0, mips::A0, 1028);
-  __ StoreQToOffset(mips::F0, mips::A0, 2044);
-  __ StoreQToOffset(mips::F0, mips::A0, 2048);
-  __ StoreQToOffset(mips::F0, mips::A0, 2049);
-  __ StoreQToOffset(mips::F0, mips::A0, 2050);
-  __ StoreQToOffset(mips::F0, mips::A0, 2052);
-  __ StoreQToOffset(mips::F0, mips::A0, 4088);
-  __ StoreQToOffset(mips::F0, mips::A0, 4096);
-  __ StoreQToOffset(mips::F0, mips::A0, 4097);
-  __ StoreQToOffset(mips::F0, mips::A0, 4098);
-  __ StoreQToOffset(mips::F0, mips::A0, 4100);
-  __ StoreQToOffset(mips::F0, mips::A0, 4104);
-  __ StoreQToOffset(mips::F0, mips::A0, 0x7FFC);
-  __ StoreQToOffset(mips::F0, mips::A0, 0x8000);
-  __ StoreQToOffset(mips::F0, mips::A0, 0x10000);
-  __ StoreQToOffset(mips::F0, mips::A0, 0x12345678);
-  __ StoreQToOffset(mips::F0, mips::A0, 0x12350078);
-  __ StoreQToOffset(mips::F0, mips::A0, -256);
-  __ StoreQToOffset(mips::F0, mips::A0, -511);
-  __ StoreQToOffset(mips::F0, mips::A0, -513);
-  __ StoreQToOffset(mips::F0, mips::A0, -1022);
-  __ StoreQToOffset(mips::F0, mips::A0, -1026);
-  __ StoreQToOffset(mips::F0, mips::A0, -2044);
-  __ StoreQToOffset(mips::F0, mips::A0, -2052);
-  __ StoreQToOffset(mips::F0, mips::A0, -4096);
-  __ StoreQToOffset(mips::F0, mips::A0, -4104);
-  __ StoreQToOffset(mips::F0, mips::A0, -32768);
-  __ StoreQToOffset(mips::F0, mips::A0, -36856);
-  __ StoreQToOffset(mips::F0, mips::A0, 36856);
-  __ StoreQToOffset(mips::F0, mips::A0, -69608);
-  __ StoreQToOffset(mips::F0, mips::A0, 69608);
-  __ StoreQToOffset(mips::F0, mips::A0, 0xABCDEF00);
-  __ StoreQToOffset(mips::F0, mips::A0, 0x7FFFABCD);
-
-  const char* expected =
-      "st.d $w0, 0($a0)\n"
-      "st.b $w0, 1($a0)\n"
-      "st.h $w0, 2($a0)\n"
-      "st.w $w0, 4($a0)\n"
-      "st.d $w0, 8($a0)\n"
-      "st.b $w0, 511($a0)\n"
-      "st.d $w0, 512($a0)\n"
-      "addiu $at, $a0, 513\n"
-      "st.b $w0, 0($at)\n"
-      "st.h $w0, 514($a0)\n"
-      "st.w $w0, 516($a0)\n"
-      "st.h $w0, 1022($a0)\n"
-      "st.d $w0, 1024($a0)\n"
-      "addiu $at, $a0, 1025\n"
-      "st.b $w0, 0($at)\n"
-      "addiu $at, $a0, 1026\n"
-      "st.h $w0, 0($at)\n"
-      "st.w $w0, 1028($a0)\n"
-      "st.w $w0, 2044($a0)\n"
-      "st.d $w0, 2048($a0)\n"
-      "addiu $at, $a0, 2049\n"
-      "st.b $w0, 0($at)\n"
-      "addiu $at, $a0, 2050\n"
-      "st.h $w0, 0($at)\n"
-      "addiu $at, $a0, 2052\n"
-      "st.w $w0, 0($at)\n"
-      "st.d $w0, 4088($a0)\n"
-      "addiu $at, $a0, 4096\n"
-      "st.d $w0, 0($at)\n"
-      "addiu $at, $a0, 4097\n"
-      "st.b $w0, 0($at)\n"
-      "addiu $at, $a0, 4098\n"
-      "st.h $w0, 0($at)\n"
-      "addiu $at, $a0, 4100\n"
-      "st.w $w0, 0($at)\n"
-      "addiu $at, $a0, 4104\n"
-      "st.d $w0, 0($at)\n"
-      "addiu $at, $a0, 0x7FFC\n"
-      "st.w $w0, 0($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "st.d $w0, 8($at)\n"
-      "addiu $at, $a0, 32760\n"
-      "addiu $at, $at, 32760\n"
-      "st.d $w0, 16($at)\n"
-      "lui $at, 4660\n"
-      "addu $at, $at, $a0\n"
-      "addiu $at, $at, 24576\n"
-      "st.d $w0, -2440($at) # 0xF678\n"
-      "lui $at, 4661\n"
-      "addu $at, $at, $a0\n"
-      "st.d $w0, 120($at)\n"
-      "st.d $w0, -256($a0)\n"
-      "st.b $w0, -511($a0)\n"
-      "addiu $at, $a0, -513\n"
-      "st.b $w0, 0($at)\n"
-      "st.h $w0, -1022($a0)\n"
-      "addiu $at, $a0, -1026\n"
-      "st.h $w0, 0($at)\n"
-      "st.w $w0, -2044($a0)\n"
-      "addiu $at, $a0, -2052\n"
-      "st.w $w0, 0($at)\n"
-      "st.d $w0, -4096($a0)\n"
-      "addiu $at, $a0, -4104\n"
-      "st.d $w0, 0($at)\n"
-      "addiu $at, $a0, -32768\n"
-      "st.d $w0, 0($at)\n"
-      "addiu $at, $a0, -32760\n"
-      "addiu $at, $at, -4096\n"
-      "st.d $w0, 0($at)\n"
-      "addiu $at, $a0, 32760\n"
-      "addiu $at, $at, 4096\n"
-      "st.d $w0, 0($at)\n"
-      "addiu $at, $a0, -32760\n"
-      "addiu $at, $at, -32760\n"
-      "st.d $w0, -4088($at)\n"
-      "addiu $at, $a0, 32760\n"
-      "addiu $at, $at, 32760\n"
-      "st.d $w0, 4088($at)\n"
-      "lui $at, 0xABCE\n"
-      "addu $at, $at, $a0\n"
-      "addiu $at, $at, -8192 # 0xE000\n"
-      "st.d $w0, 0xF00($at)\n"
-      "lui $at, 0x8000\n"
-      "addu $at, $at, $a0\n"
-      "addiu $at, $at, -21504 # 0xAC00\n"
-      "st.b $w0, -51($at) # 0xFFCD\n";
-  DriverStr(expected, "StoreQToOffset");
-}
-
-#undef __
-}  // namespace art
diff --git a/compiler/utils/mips/assembler_mips32r6_test.cc b/compiler/utils/mips/assembler_mips32r6_test.cc
deleted file mode 100644
index 4e27bbf..0000000
--- a/compiler/utils/mips/assembler_mips32r6_test.cc
+++ /dev/null
@@ -1,2524 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "assembler_mips.h"
-
-#include <map>
-
-#include "base/stl_util.h"
-#include "utils/assembler_test.h"
-
-#define __ GetAssembler()->
-
-namespace art {
-
-struct MIPSCpuRegisterCompare {
-  bool operator()(const mips::Register& a, const mips::Register& b) const {
-    return a < b;
-  }
-};
-
-class AssemblerMIPS32r6Test : public AssemblerTest<mips::MipsAssembler,
-                                                   mips::MipsLabel,
-                                                   mips::Register,
-                                                   mips::FRegister,
-                                                   uint32_t,
-                                                   mips::VectorRegister> {
- public:
-  using Base = AssemblerTest<mips::MipsAssembler,
-                             mips::MipsLabel,
-                             mips::Register,
-                             mips::FRegister,
-                             uint32_t,
-                             mips::VectorRegister>;
-
-  // These tests were taking too long, so we hide the DriverStr() from AssemblerTest<>
-  // and reimplement it without the verification against `assembly_string`. b/73903608
-  void DriverStr(const std::string& assembly_string ATTRIBUTE_UNUSED,
-                 const std::string& test_name ATTRIBUTE_UNUSED) {
-    GetAssembler()->FinalizeCode();
-    std::vector<uint8_t> data(GetAssembler()->CodeSize());
-    MemoryRegion code(data.data(), data.size());
-    GetAssembler()->FinalizeInstructions(code);
-  }
-
-  AssemblerMIPS32r6Test() :
-    instruction_set_features_(MipsInstructionSetFeatures::FromVariant("mips32r6", nullptr)) {
-  }
-
- protected:
-  // Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
-  std::string GetArchitectureString() override {
-    return "mips";
-  }
-
-  std::string GetAssemblerCmdName() override {
-    // We assemble and link for MIPS32R6. See GetAssemblerParameters() for details.
-    return "gcc";
-  }
-
-  std::string GetAssemblerParameters() override {
-    // We assemble and link for MIPS32R6. The reason is that object files produced for MIPS32R6
-    // (and MIPS64R6) with the GNU assembler don't have correct final offsets in PC-relative
-    // branches in the .text section and so they require a relocation pass (there's a relocation
-    // section, .rela.text, that has the needed info to fix up the branches).
-    // We use "-modd-spreg" so we can use odd-numbered single precision FPU registers.
-    // We put the code at address 0x1000000 (instead of 0) to avoid overlapping with the
-    // .MIPS.abiflags section (there doesn't seem to be a way to suppress its generation easily).
-    return " -march=mips32r6 -mmsa -modd-spreg -Wa,--no-warn"
-        " -Wl,-Ttext=0x1000000 -Wl,-e0x1000000 -nostdlib";
-  }
-
-  void Pad(std::vector<uint8_t>& data) override {
-    // The GNU linker unconditionally pads the code segment with NOPs to a size that is a multiple
-    // of 16 and there doesn't appear to be a way to suppress this padding. Our assembler doesn't
-    // pad, so, in order for two assembler outputs to match, we need to match the padding as well.
-    // NOP is encoded as four zero bytes on MIPS.
-    size_t pad_size = RoundUp(data.size(), 16u) - data.size();
-    data.insert(data.end(), pad_size, 0);
-  }
-
-  std::string GetDisassembleParameters() override {
-    return " -D -bbinary -mmips:isa32r6";
-  }
-
-  mips::MipsAssembler* CreateAssembler(ArenaAllocator* allocator) override {
-    return new (allocator) mips::MipsAssembler(allocator, instruction_set_features_.get());
-  }
-
-  void SetUpHelpers() override {
-    if (registers_.size() == 0) {
-      registers_.push_back(new mips::Register(mips::ZERO));
-      registers_.push_back(new mips::Register(mips::AT));
-      registers_.push_back(new mips::Register(mips::V0));
-      registers_.push_back(new mips::Register(mips::V1));
-      registers_.push_back(new mips::Register(mips::A0));
-      registers_.push_back(new mips::Register(mips::A1));
-      registers_.push_back(new mips::Register(mips::A2));
-      registers_.push_back(new mips::Register(mips::A3));
-      registers_.push_back(new mips::Register(mips::T0));
-      registers_.push_back(new mips::Register(mips::T1));
-      registers_.push_back(new mips::Register(mips::T2));
-      registers_.push_back(new mips::Register(mips::T3));
-      registers_.push_back(new mips::Register(mips::T4));
-      registers_.push_back(new mips::Register(mips::T5));
-      registers_.push_back(new mips::Register(mips::T6));
-      registers_.push_back(new mips::Register(mips::T7));
-      registers_.push_back(new mips::Register(mips::S0));
-      registers_.push_back(new mips::Register(mips::S1));
-      registers_.push_back(new mips::Register(mips::S2));
-      registers_.push_back(new mips::Register(mips::S3));
-      registers_.push_back(new mips::Register(mips::S4));
-      registers_.push_back(new mips::Register(mips::S5));
-      registers_.push_back(new mips::Register(mips::S6));
-      registers_.push_back(new mips::Register(mips::S7));
-      registers_.push_back(new mips::Register(mips::T8));
-      registers_.push_back(new mips::Register(mips::T9));
-      registers_.push_back(new mips::Register(mips::K0));
-      registers_.push_back(new mips::Register(mips::K1));
-      registers_.push_back(new mips::Register(mips::GP));
-      registers_.push_back(new mips::Register(mips::SP));
-      registers_.push_back(new mips::Register(mips::FP));
-      registers_.push_back(new mips::Register(mips::RA));
-
-      secondary_register_names_.emplace(mips::Register(mips::ZERO), "zero");
-      secondary_register_names_.emplace(mips::Register(mips::AT), "at");
-      secondary_register_names_.emplace(mips::Register(mips::V0), "v0");
-      secondary_register_names_.emplace(mips::Register(mips::V1), "v1");
-      secondary_register_names_.emplace(mips::Register(mips::A0), "a0");
-      secondary_register_names_.emplace(mips::Register(mips::A1), "a1");
-      secondary_register_names_.emplace(mips::Register(mips::A2), "a2");
-      secondary_register_names_.emplace(mips::Register(mips::A3), "a3");
-      secondary_register_names_.emplace(mips::Register(mips::T0), "t0");
-      secondary_register_names_.emplace(mips::Register(mips::T1), "t1");
-      secondary_register_names_.emplace(mips::Register(mips::T2), "t2");
-      secondary_register_names_.emplace(mips::Register(mips::T3), "t3");
-      secondary_register_names_.emplace(mips::Register(mips::T4), "t4");
-      secondary_register_names_.emplace(mips::Register(mips::T5), "t5");
-      secondary_register_names_.emplace(mips::Register(mips::T6), "t6");
-      secondary_register_names_.emplace(mips::Register(mips::T7), "t7");
-      secondary_register_names_.emplace(mips::Register(mips::S0), "s0");
-      secondary_register_names_.emplace(mips::Register(mips::S1), "s1");
-      secondary_register_names_.emplace(mips::Register(mips::S2), "s2");
-      secondary_register_names_.emplace(mips::Register(mips::S3), "s3");
-      secondary_register_names_.emplace(mips::Register(mips::S4), "s4");
-      secondary_register_names_.emplace(mips::Register(mips::S5), "s5");
-      secondary_register_names_.emplace(mips::Register(mips::S6), "s6");
-      secondary_register_names_.emplace(mips::Register(mips::S7), "s7");
-      secondary_register_names_.emplace(mips::Register(mips::T8), "t8");
-      secondary_register_names_.emplace(mips::Register(mips::T9), "t9");
-      secondary_register_names_.emplace(mips::Register(mips::K0), "k0");
-      secondary_register_names_.emplace(mips::Register(mips::K1), "k1");
-      secondary_register_names_.emplace(mips::Register(mips::GP), "gp");
-      secondary_register_names_.emplace(mips::Register(mips::SP), "sp");
-      secondary_register_names_.emplace(mips::Register(mips::FP), "fp");
-      secondary_register_names_.emplace(mips::Register(mips::RA), "ra");
-
-      fp_registers_.push_back(new mips::FRegister(mips::F0));
-      fp_registers_.push_back(new mips::FRegister(mips::F1));
-      fp_registers_.push_back(new mips::FRegister(mips::F2));
-      fp_registers_.push_back(new mips::FRegister(mips::F3));
-      fp_registers_.push_back(new mips::FRegister(mips::F4));
-      fp_registers_.push_back(new mips::FRegister(mips::F5));
-      fp_registers_.push_back(new mips::FRegister(mips::F6));
-      fp_registers_.push_back(new mips::FRegister(mips::F7));
-      fp_registers_.push_back(new mips::FRegister(mips::F8));
-      fp_registers_.push_back(new mips::FRegister(mips::F9));
-      fp_registers_.push_back(new mips::FRegister(mips::F10));
-      fp_registers_.push_back(new mips::FRegister(mips::F11));
-      fp_registers_.push_back(new mips::FRegister(mips::F12));
-      fp_registers_.push_back(new mips::FRegister(mips::F13));
-      fp_registers_.push_back(new mips::FRegister(mips::F14));
-      fp_registers_.push_back(new mips::FRegister(mips::F15));
-      fp_registers_.push_back(new mips::FRegister(mips::F16));
-      fp_registers_.push_back(new mips::FRegister(mips::F17));
-      fp_registers_.push_back(new mips::FRegister(mips::F18));
-      fp_registers_.push_back(new mips::FRegister(mips::F19));
-      fp_registers_.push_back(new mips::FRegister(mips::F20));
-      fp_registers_.push_back(new mips::FRegister(mips::F21));
-      fp_registers_.push_back(new mips::FRegister(mips::F22));
-      fp_registers_.push_back(new mips::FRegister(mips::F23));
-      fp_registers_.push_back(new mips::FRegister(mips::F24));
-      fp_registers_.push_back(new mips::FRegister(mips::F25));
-      fp_registers_.push_back(new mips::FRegister(mips::F26));
-      fp_registers_.push_back(new mips::FRegister(mips::F27));
-      fp_registers_.push_back(new mips::FRegister(mips::F28));
-      fp_registers_.push_back(new mips::FRegister(mips::F29));
-      fp_registers_.push_back(new mips::FRegister(mips::F30));
-      fp_registers_.push_back(new mips::FRegister(mips::F31));
-
-      vec_registers_.push_back(new mips::VectorRegister(mips::W0));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W1));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W2));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W3));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W4));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W5));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W6));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W7));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W8));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W9));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W10));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W11));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W12));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W13));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W14));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W15));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W16));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W17));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W18));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W19));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W20));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W21));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W22));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W23));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W24));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W25));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W26));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W27));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W28));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W29));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W30));
-      vec_registers_.push_back(new mips::VectorRegister(mips::W31));
-    }
-  }
-
-  void TearDown() override {
-    AssemblerTest::TearDown();
-    STLDeleteElements(&registers_);
-    STLDeleteElements(&fp_registers_);
-    STLDeleteElements(&vec_registers_);
-  }
-
-  std::vector<mips::MipsLabel> GetAddresses() override {
-    UNIMPLEMENTED(FATAL) << "Feature not implemented yet";
-    UNREACHABLE();
-  }
-
-  std::vector<mips::Register*> GetRegisters() override {
-    return registers_;
-  }
-
-  std::vector<mips::FRegister*> GetFPRegisters() override {
-    return fp_registers_;
-  }
-
-  std::vector<mips::VectorRegister*> GetVectorRegisters() override {
-    return vec_registers_;
-  }
-
-  uint32_t CreateImmediate(int64_t imm_value) override {
-    return imm_value;
-  }
-
-  std::string GetSecondaryRegisterName(const mips::Register& reg) override {
-    CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
-    return secondary_register_names_[reg];
-  }
-
-  std::string RepeatInsn(size_t count, const std::string& insn) {
-    std::string result;
-    for (; count != 0u; --count) {
-      result += insn;
-    }
-    return result;
-  }
-
-  void BranchHelper(void (mips::MipsAssembler::*f)(mips::MipsLabel*,
-                                                   bool),
-                    const std::string& instr_name,
-                    bool has_slot,
-                    bool is_bare = false) {
-    __ SetReorder(false);
-    mips::MipsLabel label1, label2;
-    (Base::GetAssembler()->*f)(&label1, is_bare);
-    constexpr size_t kAdduCount1 = 63;
-    for (size_t i = 0; i != kAdduCount1; ++i) {
-      __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-    }
-    __ Bind(&label1);
-    (Base::GetAssembler()->*f)(&label2, is_bare);
-    constexpr size_t kAdduCount2 = 64;
-    for (size_t i = 0; i != kAdduCount2; ++i) {
-      __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-    }
-    __ Bind(&label2);
-    (Base::GetAssembler()->*f)(&label1, is_bare);
-    __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-
-    std::string expected =
-        ".set noreorder\n" +
-        instr_name + " 1f\n" +
-        ((is_bare || !has_slot) ? "" : "nop\n") +
-        RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
-        "1:\n" +
-        instr_name + " 2f\n" +
-        ((is_bare || !has_slot) ? "" : "nop\n") +
-        RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
-        "2:\n" +
-        instr_name + " 1b\n" +
-        ((is_bare || !has_slot) ? "" : "nop\n") +
-        "addu $zero, $zero, $zero\n";
-    DriverStr(expected, instr_name);
-  }
-
-  void BranchCondOneRegHelper(void (mips::MipsAssembler::*f)(mips::Register,
-                                                             mips::MipsLabel*,
-                                                             bool),
-                              const std::string& instr_name,
-                              bool is_bare = false) {
-    __ SetReorder(false);
-    mips::MipsLabel label;
-    (Base::GetAssembler()->*f)(mips::A0, &label, is_bare);
-    constexpr size_t kAdduCount1 = 63;
-    for (size_t i = 0; i != kAdduCount1; ++i) {
-      __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-    }
-    __ Bind(&label);
-    constexpr size_t kAdduCount2 = 64;
-    for (size_t i = 0; i != kAdduCount2; ++i) {
-      __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-    }
-    (Base::GetAssembler()->*f)(mips::A1, &label, is_bare);
-    __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-
-    std::string expected =
-        ".set noreorder\n" +
-        instr_name + " $a0, 1f\n" +
-        (is_bare ? "" : "nop\n") +
-        RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
-        "1:\n" +
-        RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
-        instr_name + " $a1, 1b\n" +
-        (is_bare ? "" : "nop\n") +
-        "addu $zero, $zero, $zero\n";
-    DriverStr(expected, instr_name);
-  }
-
-  void BranchCondTwoRegsHelper(void (mips::MipsAssembler::*f)(mips::Register,
-                                                              mips::Register,
-                                                              mips::MipsLabel*,
-                                                              bool),
-                               const std::string& instr_name,
-                               bool is_bare = false) {
-    __ SetReorder(false);
-    mips::MipsLabel label;
-    (Base::GetAssembler()->*f)(mips::A0, mips::A1, &label, is_bare);
-    constexpr size_t kAdduCount1 = 63;
-    for (size_t i = 0; i != kAdduCount1; ++i) {
-      __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-    }
-    __ Bind(&label);
-    constexpr size_t kAdduCount2 = 64;
-    for (size_t i = 0; i != kAdduCount2; ++i) {
-      __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-    }
-    (Base::GetAssembler()->*f)(mips::A2, mips::A3, &label, is_bare);
-    __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-
-    std::string expected =
-        ".set noreorder\n" +
-        instr_name + " $a0, $a1, 1f\n" +
-        (is_bare ? "" : "nop\n") +
-        RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
-        "1:\n" +
-        RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
-        instr_name + " $a2, $a3, 1b\n" +
-        (is_bare ? "" : "nop\n") +
-        "addu $zero, $zero, $zero\n";
-    DriverStr(expected, instr_name);
-  }
-
-  void BranchFpuCondHelper(void (mips::MipsAssembler::*f)(mips::FRegister,
-                                                          mips::MipsLabel*,
-                                                          bool),
-                           const std::string& instr_name,
-                           bool is_bare = false) {
-    __ SetReorder(false);
-    mips::MipsLabel label;
-    (Base::GetAssembler()->*f)(mips::F0, &label, is_bare);
-    constexpr size_t kAdduCount1 = 63;
-    for (size_t i = 0; i != kAdduCount1; ++i) {
-      __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-    }
-    __ Bind(&label);
-    constexpr size_t kAdduCount2 = 64;
-    for (size_t i = 0; i != kAdduCount2; ++i) {
-      __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-    }
-    (Base::GetAssembler()->*f)(mips::F30, &label, is_bare);
-    __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-
-    std::string expected =
-        ".set noreorder\n" +
-        instr_name + " $f0, 1f\n" +
-        (is_bare ? "" : "nop\n") +
-        RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
-        "1:\n" +
-        RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
-        instr_name + " $f30, 1b\n" +
-        (is_bare ? "" : "nop\n") +
-        "addu $zero, $zero, $zero\n";
-    DriverStr(expected, instr_name);
-  }
-
- private:
-  std::vector<mips::Register*> registers_;
-  std::map<mips::Register, std::string, MIPSCpuRegisterCompare> secondary_register_names_;
-
-  std::vector<mips::FRegister*> fp_registers_;
-  std::vector<mips::VectorRegister*> vec_registers_;
-  std::unique_ptr<const MipsInstructionSetFeatures> instruction_set_features_;
-};
-
-
-TEST_F(AssemblerMIPS32r6Test, Toolchain) {
-  EXPECT_TRUE(CheckTools());
-}
-
-TEST_F(AssemblerMIPS32r6Test, MulR6) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::MulR6, "mul ${reg1}, ${reg2}, ${reg3}"), "MulR6");
-}
-
-TEST_F(AssemblerMIPS32r6Test, MuhR6) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::MuhR6, "muh ${reg1}, ${reg2}, ${reg3}"), "MuhR6");
-}
-
-TEST_F(AssemblerMIPS32r6Test, MuhuR6) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::MuhuR6, "muhu ${reg1}, ${reg2}, ${reg3}"), "MuhuR6");
-}
-
-TEST_F(AssemblerMIPS32r6Test, DivR6) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::DivR6, "div ${reg1}, ${reg2}, ${reg3}"), "DivR6");
-}
-
-TEST_F(AssemblerMIPS32r6Test, ModR6) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::ModR6, "mod ${reg1}, ${reg2}, ${reg3}"), "ModR6");
-}
-
-TEST_F(AssemblerMIPS32r6Test, DivuR6) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::DivuR6, "divu ${reg1}, ${reg2}, ${reg3}"), "DivuR6");
-}
-
-TEST_F(AssemblerMIPS32r6Test, ModuR6) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::ModuR6, "modu ${reg1}, ${reg2}, ${reg3}"), "ModuR6");
-}
-
-//////////
-// MISC //
-//////////
-
-TEST_F(AssemblerMIPS32r6Test, Aui) {
-  DriverStr(RepeatRRIb(&mips::MipsAssembler::Aui, 16, "aui ${reg1}, ${reg2}, {imm}"), "Aui");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Auipc) {
-  DriverStr(RepeatRIb(&mips::MipsAssembler::Auipc, 16, "auipc ${reg}, {imm}"), "Auipc");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Lwpc) {
-  // Lwpc() takes an unsigned 19-bit immediate, while the GNU assembler needs a signed offset,
-  // hence the sign extension from bit 18 with `imm - ((imm & 0x40000) << 1)`.
-  // The GNU assembler also wants the offset to be a multiple of 4, which it will shift right
-  // by 2 positions when encoding, hence `<< 2` to compensate for that shift.
-  // We capture the value of the immediate with `.set imm, {imm}` because the value is needed
-  // twice for the sign extension, but `{imm}` is substituted only once.
-  const char* code = ".set imm, {imm}\nlw ${reg}, ((imm - ((imm & 0x40000) << 1)) << 2)($pc)";
-  DriverStr(RepeatRIb(&mips::MipsAssembler::Lwpc, 19, code), "Lwpc");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Addiupc) {
-  // The comment from the Lwpc() test applies to this Addiupc() test as well.
-  const char* code = ".set imm, {imm}\naddiupc ${reg}, (imm - ((imm & 0x40000) << 1)) << 2";
-  DriverStr(RepeatRIb(&mips::MipsAssembler::Addiupc, 19, code), "Addiupc");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Bitswap) {
-  DriverStr(RepeatRR(&mips::MipsAssembler::Bitswap, "bitswap ${reg1}, ${reg2}"), "bitswap");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Lsa) {
-  DriverStr(RepeatRRRIb(&mips::MipsAssembler::Lsa,
-                        2,
-                        "lsa ${reg1}, ${reg2}, ${reg3}, {imm}",
-                        1),
-            "lsa");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Seleqz) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::Seleqz, "seleqz ${reg1}, ${reg2}, ${reg3}"), "seleqz");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Selnez) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::Selnez, "selnez ${reg1}, ${reg2}, ${reg3}"), "selnez");
-}
-
-TEST_F(AssemblerMIPS32r6Test, ClzR6) {
-  DriverStr(RepeatRR(&mips::MipsAssembler::ClzR6, "clz ${reg1}, ${reg2}"), "clzR6");
-}
-
-TEST_F(AssemblerMIPS32r6Test, CloR6) {
-  DriverStr(RepeatRR(&mips::MipsAssembler::CloR6, "clo ${reg1}, ${reg2}"), "cloR6");
-}
-
-////////////////////
-// FLOATING POINT //
-////////////////////
-
-TEST_F(AssemblerMIPS32r6Test, SelS) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::SelS, "sel.s ${reg1}, ${reg2}, ${reg3}"), "sel.s");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SelD) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::SelD, "sel.d ${reg1}, ${reg2}, ${reg3}"), "sel.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SeleqzS) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::SeleqzS, "seleqz.s ${reg1}, ${reg2}, ${reg3}"),
-            "seleqz.s");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SeleqzD) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::SeleqzD, "seleqz.d ${reg1}, ${reg2}, ${reg3}"),
-            "seleqz.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SelnezS) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::SelnezS, "selnez.s ${reg1}, ${reg2}, ${reg3}"),
-            "selnez.s");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SelnezD) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::SelnezD, "selnez.d ${reg1}, ${reg2}, ${reg3}"),
-            "selnez.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, ClassS) {
-  DriverStr(RepeatFF(&mips::MipsAssembler::ClassS, "class.s ${reg1}, ${reg2}"), "class.s");
-}
-
-TEST_F(AssemblerMIPS32r6Test, ClassD) {
-  DriverStr(RepeatFF(&mips::MipsAssembler::ClassD, "class.d ${reg1}, ${reg2}"), "class.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, MinS) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::MinS, "min.s ${reg1}, ${reg2}, ${reg3}"), "min.s");
-}
-
-TEST_F(AssemblerMIPS32r6Test, MinD) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::MinD, "min.d ${reg1}, ${reg2}, ${reg3}"), "min.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, MaxS) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::MaxS, "max.s ${reg1}, ${reg2}, ${reg3}"), "max.s");
-}
-
-TEST_F(AssemblerMIPS32r6Test, MaxD) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::MaxD, "max.d ${reg1}, ${reg2}, ${reg3}"), "max.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, CmpUnS) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUnS, "cmp.un.s ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.un.s");
-}
-
-TEST_F(AssemblerMIPS32r6Test, CmpEqS) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::CmpEqS, "cmp.eq.s ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.eq.s");
-}
-
-TEST_F(AssemblerMIPS32r6Test, CmpUeqS) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUeqS, "cmp.ueq.s ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.ueq.s");
-}
-
-TEST_F(AssemblerMIPS32r6Test, CmpLtS) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::CmpLtS, "cmp.lt.s ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.lt.s");
-}
-
-TEST_F(AssemblerMIPS32r6Test, CmpUltS) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUltS, "cmp.ult.s ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.ult.s");
-}
-
-TEST_F(AssemblerMIPS32r6Test, CmpLeS) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::CmpLeS, "cmp.le.s ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.le.s");
-}
-
-TEST_F(AssemblerMIPS32r6Test, CmpUleS) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUleS, "cmp.ule.s ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.ule.s");
-}
-
-TEST_F(AssemblerMIPS32r6Test, CmpOrS) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::CmpOrS, "cmp.or.s ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.or.s");
-}
-
-TEST_F(AssemblerMIPS32r6Test, CmpUneS) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUneS, "cmp.une.s ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.une.s");
-}
-
-TEST_F(AssemblerMIPS32r6Test, CmpNeS) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::CmpNeS, "cmp.ne.s ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.ne.s");
-}
-
-TEST_F(AssemblerMIPS32r6Test, CmpUnD) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUnD, "cmp.un.d ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.un.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, CmpEqD) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::CmpEqD, "cmp.eq.d ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.eq.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, CmpUeqD) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUeqD, "cmp.ueq.d ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.ueq.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, CmpLtD) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::CmpLtD, "cmp.lt.d ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.lt.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, CmpUltD) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUltD, "cmp.ult.d ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.ult.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, CmpLeD) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::CmpLeD, "cmp.le.d ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.le.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, CmpUleD) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUleD, "cmp.ule.d ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.ule.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, CmpOrD) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::CmpOrD, "cmp.or.d ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.or.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, CmpUneD) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::CmpUneD, "cmp.une.d ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.une.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, CmpNeD) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::CmpNeD, "cmp.ne.d ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.ne.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, LoadDFromOffset) {
-  __ LoadDFromOffset(mips::F0, mips::A0, -0x8000);
-  __ LoadDFromOffset(mips::F0, mips::A0, +0);
-  __ LoadDFromOffset(mips::F0, mips::A0, +0x7FF8);
-  __ LoadDFromOffset(mips::F0, mips::A0, +0x7FFB);
-  __ LoadDFromOffset(mips::F0, mips::A0, +0x7FFC);
-  __ LoadDFromOffset(mips::F0, mips::A0, +0x7FFF);
-  __ LoadDFromOffset(mips::F0, mips::A0, -0xFFF0);
-  __ LoadDFromOffset(mips::F0, mips::A0, -0x8008);
-  __ LoadDFromOffset(mips::F0, mips::A0, -0x8001);
-  __ LoadDFromOffset(mips::F0, mips::A0, +0x8000);
-  __ LoadDFromOffset(mips::F0, mips::A0, +0xFFF0);
-  __ LoadDFromOffset(mips::F0, mips::A0, -0x17FE8);
-  __ LoadDFromOffset(mips::F0, mips::A0, -0x0FFF8);
-  __ LoadDFromOffset(mips::F0, mips::A0, -0x0FFF1);
-  __ LoadDFromOffset(mips::F0, mips::A0, +0x0FFF1);
-  __ LoadDFromOffset(mips::F0, mips::A0, +0x0FFF8);
-  __ LoadDFromOffset(mips::F0, mips::A0, +0x17FE8);
-  __ LoadDFromOffset(mips::F0, mips::A0, -0x17FF0);
-  __ LoadDFromOffset(mips::F0, mips::A0, -0x17FE9);
-  __ LoadDFromOffset(mips::F0, mips::A0, +0x17FE9);
-  __ LoadDFromOffset(mips::F0, mips::A0, +0x17FF0);
-  __ LoadDFromOffset(mips::F0, mips::A0, +0x12345678);
-
-  const char* expected =
-      "ldc1 $f0, -0x8000($a0)\n"
-      "ldc1 $f0, 0($a0)\n"
-      "ldc1 $f0, 0x7FF8($a0)\n"
-      "lwc1 $f0, 0x7FFB($a0)\n"
-      "lw $t8, 0x7FFF($a0)\n"
-      "mthc1 $t8, $f0\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "lwc1 $f0, 4($at)\n"
-      "lw $t8, 8($at)\n"
-      "mthc1 $t8, $f0\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "lwc1 $f0, 7($at)\n"
-      "lw $t8, 11($at)\n"
-      "mthc1 $t8, $f0\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "ldc1 $f0, -0x7FF8($at)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "ldc1 $f0, -0x10($at)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "lwc1 $f0, -9($at)\n"
-      "lw $t8, -5($at)\n"
-      "mthc1 $t8, $f0\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "ldc1 $f0, 8($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "ldc1 $f0, 0x7FF8($at)\n"
-      "aui $at, $a0, 0xFFFF\n"
-      "ldc1 $f0, -0x7FE8($at)\n"
-      "aui $at, $a0, 0xFFFF\n"
-      "ldc1 $f0, 0x8($at)\n"
-      "aui $at, $a0, 0xFFFF\n"
-      "lwc1 $f0, 0xF($at)\n"
-      "lw $t8, 0x13($at)\n"
-      "mthc1 $t8, $f0\n"
-      "aui $at, $a0, 0x1\n"
-      "lwc1 $f0, -0xF($at)\n"
-      "lw $t8, -0xB($at)\n"
-      "mthc1 $t8, $f0\n"
-      "aui $at, $a0, 0x1\n"
-      "ldc1 $f0, -0x8($at)\n"
-      "aui $at, $a0, 0x1\n"
-      "ldc1 $f0, 0x7FE8($at)\n"
-      "aui $at, $a0, 0xFFFF\n"
-      "ldc1 $f0, -0x7FF0($at)\n"
-      "aui $at, $a0, 0xFFFF\n"
-      "lwc1 $f0, -0x7FE9($at)\n"
-      "lw $t8, -0x7FE5($at)\n"
-      "mthc1 $t8, $f0\n"
-      "aui $at, $a0, 0x1\n"
-      "lwc1 $f0, 0x7FE9($at)\n"
-      "lw $t8, 0x7FED($at)\n"
-      "mthc1 $t8, $f0\n"
-      "aui $at, $a0, 0x1\n"
-      "ldc1 $f0, 0x7FF0($at)\n"
-      "aui $at, $a0, 0x1234\n"
-      "ldc1 $f0, 0x5678($at)\n";
-  DriverStr(expected, "LoadDFromOffset");
-}
-
-TEST_F(AssemblerMIPS32r6Test, LoadQFromOffset) {
-  __ LoadQFromOffset(mips::F0, mips::A0, 0);
-  __ LoadQFromOffset(mips::F0, mips::A0, 1);
-  __ LoadQFromOffset(mips::F0, mips::A0, 2);
-  __ LoadQFromOffset(mips::F0, mips::A0, 4);
-  __ LoadQFromOffset(mips::F0, mips::A0, 8);
-  __ LoadQFromOffset(mips::F0, mips::A0, 511);
-  __ LoadQFromOffset(mips::F0, mips::A0, 512);
-  __ LoadQFromOffset(mips::F0, mips::A0, 513);
-  __ LoadQFromOffset(mips::F0, mips::A0, 514);
-  __ LoadQFromOffset(mips::F0, mips::A0, 516);
-  __ LoadQFromOffset(mips::F0, mips::A0, 1022);
-  __ LoadQFromOffset(mips::F0, mips::A0, 1024);
-  __ LoadQFromOffset(mips::F0, mips::A0, 1025);
-  __ LoadQFromOffset(mips::F0, mips::A0, 1026);
-  __ LoadQFromOffset(mips::F0, mips::A0, 1028);
-  __ LoadQFromOffset(mips::F0, mips::A0, 2044);
-  __ LoadQFromOffset(mips::F0, mips::A0, 2048);
-  __ LoadQFromOffset(mips::F0, mips::A0, 2049);
-  __ LoadQFromOffset(mips::F0, mips::A0, 2050);
-  __ LoadQFromOffset(mips::F0, mips::A0, 2052);
-  __ LoadQFromOffset(mips::F0, mips::A0, 4088);
-  __ LoadQFromOffset(mips::F0, mips::A0, 4096);
-  __ LoadQFromOffset(mips::F0, mips::A0, 4097);
-  __ LoadQFromOffset(mips::F0, mips::A0, 4098);
-  __ LoadQFromOffset(mips::F0, mips::A0, 4100);
-  __ LoadQFromOffset(mips::F0, mips::A0, 4104);
-  __ LoadQFromOffset(mips::F0, mips::A0, 0x7FFC);
-  __ LoadQFromOffset(mips::F0, mips::A0, 0x8000);
-  __ LoadQFromOffset(mips::F0, mips::A0, 0x10000);
-  __ LoadQFromOffset(mips::F0, mips::A0, 0x12345678);
-  __ LoadQFromOffset(mips::F0, mips::A0, 0x12350078);
-  __ LoadQFromOffset(mips::F0, mips::A0, -256);
-  __ LoadQFromOffset(mips::F0, mips::A0, -511);
-  __ LoadQFromOffset(mips::F0, mips::A0, -513);
-  __ LoadQFromOffset(mips::F0, mips::A0, -1022);
-  __ LoadQFromOffset(mips::F0, mips::A0, -1026);
-  __ LoadQFromOffset(mips::F0, mips::A0, -2044);
-  __ LoadQFromOffset(mips::F0, mips::A0, -2052);
-  __ LoadQFromOffset(mips::F0, mips::A0, -4096);
-  __ LoadQFromOffset(mips::F0, mips::A0, -4104);
-  __ LoadQFromOffset(mips::F0, mips::A0, -32768);
-  __ LoadQFromOffset(mips::F0, mips::A0, 0xABCDEF00);
-  __ LoadQFromOffset(mips::F0, mips::A0, 0x7FFFABCD);
-
-  const char* expected =
-      "ld.d $w0, 0($a0)\n"
-      "ld.b $w0, 1($a0)\n"
-      "ld.h $w0, 2($a0)\n"
-      "ld.w $w0, 4($a0)\n"
-      "ld.d $w0, 8($a0)\n"
-      "ld.b $w0, 511($a0)\n"
-      "ld.d $w0, 512($a0)\n"
-      "addiu $at, $a0, 513\n"
-      "ld.b $w0, 0($at)\n"
-      "ld.h $w0, 514($a0)\n"
-      "ld.w $w0, 516($a0)\n"
-      "ld.h $w0, 1022($a0)\n"
-      "ld.d $w0, 1024($a0)\n"
-      "addiu $at, $a0, 1025\n"
-      "ld.b $w0, 0($at)\n"
-      "addiu $at, $a0, 1026\n"
-      "ld.h $w0, 0($at)\n"
-      "ld.w $w0, 1028($a0)\n"
-      "ld.w $w0, 2044($a0)\n"
-      "ld.d $w0, 2048($a0)\n"
-      "addiu $at, $a0, 2049\n"
-      "ld.b $w0, 0($at)\n"
-      "addiu $at, $a0, 2050\n"
-      "ld.h $w0, 0($at)\n"
-      "addiu $at, $a0, 2052\n"
-      "ld.w $w0, 0($at)\n"
-      "ld.d $w0, 4088($a0)\n"
-      "addiu $at, $a0, 4096\n"
-      "ld.d $w0, 0($at)\n"
-      "addiu $at, $a0, 4097\n"
-      "ld.b $w0, 0($at)\n"
-      "addiu $at, $a0, 4098\n"
-      "ld.h $w0, 0($at)\n"
-      "addiu $at, $a0, 4100\n"
-      "ld.w $w0, 0($at)\n"
-      "addiu $at, $a0, 4104\n"
-      "ld.d $w0, 0($at)\n"
-      "addiu $at, $a0, 0x7FFC\n"
-      "ld.w $w0, 0($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "ld.d $w0, 8($at)\n"
-      "aui $at, $a0, 0x1\n"
-      "ld.d $w0, 0($at)\n"
-      "aui $at, $a0, 0x1234\n"
-      "addiu $at, $at, 0x6000\n"
-      "ld.d $w0, -2440($at) # 0xF678\n"
-      "aui $at, $a0, 0x1235\n"
-      "ld.d $w0, 0x78($at)\n"
-      "ld.d $w0, -256($a0)\n"
-      "ld.b $w0, -511($a0)\n"
-      "addiu $at, $a0, -513\n"
-      "ld.b $w0, 0($at)\n"
-      "ld.h $w0, -1022($a0)\n"
-      "addiu $at, $a0, -1026\n"
-      "ld.h $w0, 0($at)\n"
-      "ld.w $w0, -2044($a0)\n"
-      "addiu $at, $a0, -2052\n"
-      "ld.w $w0, 0($at)\n"
-      "ld.d $w0, -4096($a0)\n"
-      "addiu $at, $a0, -4104\n"
-      "ld.d $w0, 0($at)\n"
-      "addiu $at, $a0, -32768\n"
-      "ld.d $w0, 0($at)\n"
-      "aui $at, $a0, 0xABCE\n"
-      "addiu $at, $at, -8192 # 0xE000\n"
-      "ld.d $w0, 0xF00($at)\n"
-      "aui $at, $a0, 0x8000\n"
-      "addiu $at, $at, -21504 # 0xAC00\n"
-      "ld.b $w0, -51($at) # 0xFFCD\n";
-  DriverStr(expected, "LoadQFromOffset");
-}
-
-TEST_F(AssemblerMIPS32r6Test, StoreDToOffset) {
-  __ StoreDToOffset(mips::F0, mips::A0, -0x8000);
-  __ StoreDToOffset(mips::F0, mips::A0, +0);
-  __ StoreDToOffset(mips::F0, mips::A0, +0x7FF8);
-  __ StoreDToOffset(mips::F0, mips::A0, +0x7FFB);
-  __ StoreDToOffset(mips::F0, mips::A0, +0x7FFC);
-  __ StoreDToOffset(mips::F0, mips::A0, +0x7FFF);
-  __ StoreDToOffset(mips::F0, mips::A0, -0xFFF0);
-  __ StoreDToOffset(mips::F0, mips::A0, -0x8008);
-  __ StoreDToOffset(mips::F0, mips::A0, -0x8001);
-  __ StoreDToOffset(mips::F0, mips::A0, +0x8000);
-  __ StoreDToOffset(mips::F0, mips::A0, +0xFFF0);
-  __ StoreDToOffset(mips::F0, mips::A0, -0x17FE8);
-  __ StoreDToOffset(mips::F0, mips::A0, -0x0FFF8);
-  __ StoreDToOffset(mips::F0, mips::A0, -0x0FFF1);
-  __ StoreDToOffset(mips::F0, mips::A0, +0x0FFF1);
-  __ StoreDToOffset(mips::F0, mips::A0, +0x0FFF8);
-  __ StoreDToOffset(mips::F0, mips::A0, +0x17FE8);
-  __ StoreDToOffset(mips::F0, mips::A0, -0x17FF0);
-  __ StoreDToOffset(mips::F0, mips::A0, -0x17FE9);
-  __ StoreDToOffset(mips::F0, mips::A0, +0x17FE9);
-  __ StoreDToOffset(mips::F0, mips::A0, +0x17FF0);
-  __ StoreDToOffset(mips::F0, mips::A0, +0x12345678);
-
-  const char* expected =
-      "sdc1 $f0, -0x8000($a0)\n"
-      "sdc1 $f0, 0($a0)\n"
-      "sdc1 $f0, 0x7FF8($a0)\n"
-      "mfhc1 $t8, $f0\n"
-      "swc1 $f0, 0x7FFB($a0)\n"
-      "sw $t8, 0x7FFF($a0)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "mfhc1 $t8, $f0\n"
-      "swc1 $f0, 4($at)\n"
-      "sw $t8, 8($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "mfhc1 $t8, $f0\n"
-      "swc1 $f0, 7($at)\n"
-      "sw $t8, 11($at)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "sdc1 $f0, -0x7FF8($at)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "sdc1 $f0, -0x10($at)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "mfhc1 $t8, $f0\n"
-      "swc1 $f0, -9($at)\n"
-      "sw $t8, -5($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "sdc1 $f0, 8($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "sdc1 $f0, 0x7FF8($at)\n"
-      "aui $at, $a0, 0xFFFF\n"
-      "sdc1 $f0, -0x7FE8($at)\n"
-      "aui $at, $a0, 0xFFFF\n"
-      "sdc1 $f0, 0x8($at)\n"
-      "aui $at, $a0, 0xFFFF\n"
-      "mfhc1 $t8, $f0\n"
-      "swc1 $f0, 0xF($at)\n"
-      "sw $t8, 0x13($at)\n"
-      "aui $at, $a0, 0x1\n"
-      "mfhc1 $t8, $f0\n"
-      "swc1 $f0, -0xF($at)\n"
-      "sw $t8, -0xB($at)\n"
-      "aui $at, $a0, 0x1\n"
-      "sdc1 $f0, -0x8($at)\n"
-      "aui $at, $a0, 0x1\n"
-      "sdc1 $f0, 0x7FE8($at)\n"
-      "aui $at, $a0, 0xFFFF\n"
-      "sdc1 $f0, -0x7FF0($at)\n"
-      "aui $at, $a0, 0xFFFF\n"
-      "mfhc1 $t8, $f0\n"
-      "swc1 $f0, -0x7FE9($at)\n"
-      "sw $t8, -0x7FE5($at)\n"
-      "aui $at, $a0, 0x1\n"
-      "mfhc1 $t8, $f0\n"
-      "swc1 $f0, 0x7FE9($at)\n"
-      "sw $t8, 0x7FED($at)\n"
-      "aui $at, $a0, 0x1\n"
-      "sdc1 $f0, 0x7FF0($at)\n"
-      "aui $at, $a0, 0x1234\n"
-      "sdc1 $f0, 0x5678($at)\n";
-  DriverStr(expected, "StoreDToOffset");
-}
-
-TEST_F(AssemblerMIPS32r6Test, StoreQToOffset) {
-  __ StoreQToOffset(mips::F0, mips::A0, 0);
-  __ StoreQToOffset(mips::F0, mips::A0, 1);
-  __ StoreQToOffset(mips::F0, mips::A0, 2);
-  __ StoreQToOffset(mips::F0, mips::A0, 4);
-  __ StoreQToOffset(mips::F0, mips::A0, 8);
-  __ StoreQToOffset(mips::F0, mips::A0, 511);
-  __ StoreQToOffset(mips::F0, mips::A0, 512);
-  __ StoreQToOffset(mips::F0, mips::A0, 513);
-  __ StoreQToOffset(mips::F0, mips::A0, 514);
-  __ StoreQToOffset(mips::F0, mips::A0, 516);
-  __ StoreQToOffset(mips::F0, mips::A0, 1022);
-  __ StoreQToOffset(mips::F0, mips::A0, 1024);
-  __ StoreQToOffset(mips::F0, mips::A0, 1025);
-  __ StoreQToOffset(mips::F0, mips::A0, 1026);
-  __ StoreQToOffset(mips::F0, mips::A0, 1028);
-  __ StoreQToOffset(mips::F0, mips::A0, 2044);
-  __ StoreQToOffset(mips::F0, mips::A0, 2048);
-  __ StoreQToOffset(mips::F0, mips::A0, 2049);
-  __ StoreQToOffset(mips::F0, mips::A0, 2050);
-  __ StoreQToOffset(mips::F0, mips::A0, 2052);
-  __ StoreQToOffset(mips::F0, mips::A0, 4088);
-  __ StoreQToOffset(mips::F0, mips::A0, 4096);
-  __ StoreQToOffset(mips::F0, mips::A0, 4097);
-  __ StoreQToOffset(mips::F0, mips::A0, 4098);
-  __ StoreQToOffset(mips::F0, mips::A0, 4100);
-  __ StoreQToOffset(mips::F0, mips::A0, 4104);
-  __ StoreQToOffset(mips::F0, mips::A0, 0x7FFC);
-  __ StoreQToOffset(mips::F0, mips::A0, 0x8000);
-  __ StoreQToOffset(mips::F0, mips::A0, 0x10000);
-  __ StoreQToOffset(mips::F0, mips::A0, 0x12345678);
-  __ StoreQToOffset(mips::F0, mips::A0, 0x12350078);
-  __ StoreQToOffset(mips::F0, mips::A0, -256);
-  __ StoreQToOffset(mips::F0, mips::A0, -511);
-  __ StoreQToOffset(mips::F0, mips::A0, -513);
-  __ StoreQToOffset(mips::F0, mips::A0, -1022);
-  __ StoreQToOffset(mips::F0, mips::A0, -1026);
-  __ StoreQToOffset(mips::F0, mips::A0, -2044);
-  __ StoreQToOffset(mips::F0, mips::A0, -2052);
-  __ StoreQToOffset(mips::F0, mips::A0, -4096);
-  __ StoreQToOffset(mips::F0, mips::A0, -4104);
-  __ StoreQToOffset(mips::F0, mips::A0, -32768);
-  __ StoreQToOffset(mips::F0, mips::A0, 0xABCDEF00);
-  __ StoreQToOffset(mips::F0, mips::A0, 0x7FFFABCD);
-
-  const char* expected =
-      "st.d $w0, 0($a0)\n"
-      "st.b $w0, 1($a0)\n"
-      "st.h $w0, 2($a0)\n"
-      "st.w $w0, 4($a0)\n"
-      "st.d $w0, 8($a0)\n"
-      "st.b $w0, 511($a0)\n"
-      "st.d $w0, 512($a0)\n"
-      "addiu $at, $a0, 513\n"
-      "st.b $w0, 0($at)\n"
-      "st.h $w0, 514($a0)\n"
-      "st.w $w0, 516($a0)\n"
-      "st.h $w0, 1022($a0)\n"
-      "st.d $w0, 1024($a0)\n"
-      "addiu $at, $a0, 1025\n"
-      "st.b $w0, 0($at)\n"
-      "addiu $at, $a0, 1026\n"
-      "st.h $w0, 0($at)\n"
-      "st.w $w0, 1028($a0)\n"
-      "st.w $w0, 2044($a0)\n"
-      "st.d $w0, 2048($a0)\n"
-      "addiu $at, $a0, 2049\n"
-      "st.b $w0, 0($at)\n"
-      "addiu $at, $a0, 2050\n"
-      "st.h $w0, 0($at)\n"
-      "addiu $at, $a0, 2052\n"
-      "st.w $w0, 0($at)\n"
-      "st.d $w0, 4088($a0)\n"
-      "addiu $at, $a0, 4096\n"
-      "st.d $w0, 0($at)\n"
-      "addiu $at, $a0, 4097\n"
-      "st.b $w0, 0($at)\n"
-      "addiu $at, $a0, 4098\n"
-      "st.h $w0, 0($at)\n"
-      "addiu $at, $a0, 4100\n"
-      "st.w $w0, 0($at)\n"
-      "addiu $at, $a0, 4104\n"
-      "st.d $w0, 0($at)\n"
-      "addiu $at, $a0, 0x7FFC\n"
-      "st.w $w0, 0($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "st.d $w0, 8($at)\n"
-      "aui $at, $a0, 0x1\n"
-      "st.d $w0, 0($at)\n"
-      "aui $at, $a0, 0x1234\n"
-      "addiu $at, $at, 0x6000\n"
-      "st.d $w0, -2440($at) # 0xF678\n"
-      "aui $at, $a0, 0x1235\n"
-      "st.d $w0, 0x78($at)\n"
-      "st.d $w0, -256($a0)\n"
-      "st.b $w0, -511($a0)\n"
-      "addiu $at, $a0, -513\n"
-      "st.b $w0, 0($at)\n"
-      "st.h $w0, -1022($a0)\n"
-      "addiu $at, $a0, -1026\n"
-      "st.h $w0, 0($at)\n"
-      "st.w $w0, -2044($a0)\n"
-      "addiu $at, $a0, -2052\n"
-      "st.w $w0, 0($at)\n"
-      "st.d $w0, -4096($a0)\n"
-      "addiu $at, $a0, -4104\n"
-      "st.d $w0, 0($at)\n"
-      "addiu $at, $a0, -32768\n"
-      "st.d $w0, 0($at)\n"
-      "aui $at, $a0, 0xABCE\n"
-      "addiu $at, $at, -8192 # 0xE000\n"
-      "st.d $w0, 0xF00($at)\n"
-      "aui $at, $a0, 0x8000\n"
-      "addiu $at, $at, -21504 # 0xAC00\n"
-      "st.b $w0, -51($at) # 0xFFCD\n";
-  DriverStr(expected, "StoreQToOffset");
-}
-
-//////////////
-// BRANCHES //
-//////////////
-
-TEST_F(AssemblerMIPS32r6Test, Bc) {
-  BranchHelper(&mips::MipsAssembler::Bc, "Bc", /* has_slot= */ false);
-}
-
-TEST_F(AssemblerMIPS32r6Test, Balc) {
-  BranchHelper(&mips::MipsAssembler::Balc, "Balc", /* has_slot= */ false);
-}
-
-TEST_F(AssemblerMIPS32r6Test, Beqc) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Beqc, "Beqc");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Bnec) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bnec, "Bnec");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Beqzc) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Beqzc, "Beqzc");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Bnezc) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bnezc, "Bnezc");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Bltzc) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bltzc, "Bltzc");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Bgezc) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bgezc, "Bgezc");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Blezc) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Blezc, "Blezc");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Bgtzc) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bgtzc, "Bgtzc");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Bltc) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltc, "Bltc");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Bgec) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgec, "Bgec");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Bltuc) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltuc, "Bltuc");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Bgeuc) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeuc, "Bgeuc");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Bc1eqz) {
-  BranchFpuCondHelper(&mips::MipsAssembler::Bc1eqz, "Bc1eqz");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Bc1nez) {
-  BranchFpuCondHelper(&mips::MipsAssembler::Bc1nez, "Bc1nez");
-}
-
-TEST_F(AssemblerMIPS32r6Test, B) {
-  BranchHelper(&mips::MipsAssembler::B, "Bc", /* has_slot= */ false);
-}
-
-TEST_F(AssemblerMIPS32r6Test, Bal) {
-  BranchHelper(&mips::MipsAssembler::Bal, "Balc", /* has_slot= */ false);
-}
-
-TEST_F(AssemblerMIPS32r6Test, Beq) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Beq, "Beqc");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Bne) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bne, "Bnec");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Beqz) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Beqz, "Beqzc");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Bnez) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bnez, "Bnezc");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Bltz) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bltz, "Bltzc");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Bgez) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bgez, "Bgezc");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Blez) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Blez, "Blezc");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Bgtz) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bgtz, "Bgtzc");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Blt) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Blt, "Bltc");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Bge) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bge, "Bgec");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Bltu) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltu, "Bltuc");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Bgeu) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeu, "Bgeuc");
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBc) {
-  BranchHelper(&mips::MipsAssembler::Bc, "Bc", /* has_slot= */ false, /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBalc) {
-  BranchHelper(&mips::MipsAssembler::Balc, "Balc", /* has_slot= */ false, /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBeqc) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Beqc, "Beqc", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBnec) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bnec, "Bnec", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBeqzc) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Beqzc, "Beqzc", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBnezc) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bnezc, "Bnezc", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBltzc) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bltzc, "Bltzc", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBgezc) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bgezc, "Bgezc", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBlezc) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Blezc, "Blezc", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBgtzc) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bgtzc, "Bgtzc", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBltc) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltc, "Bltc", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBgec) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgec, "Bgec", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBltuc) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltuc, "Bltuc", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBgeuc) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeuc, "Bgeuc", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBc1eqz) {
-  BranchFpuCondHelper(&mips::MipsAssembler::Bc1eqz, "Bc1eqz", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBc1nez) {
-  BranchFpuCondHelper(&mips::MipsAssembler::Bc1nez, "Bc1nez", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareB) {
-  BranchHelper(&mips::MipsAssembler::B, "B", /* has_slot= */ true, /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBal) {
-  BranchHelper(&mips::MipsAssembler::Bal, "Bal", /* has_slot= */ true, /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBeq) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Beq, "Beq", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBne) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bne, "Bne", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBeqz) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Beqz, "Beqz", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBnez) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bnez, "Bnez", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBltz) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bltz, "Bltz", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBgez) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bgez, "Bgez", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBlez) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Blez, "Blez", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBgtz) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bgtz, "Bgtz", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBlt) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Blt, "Blt", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBge) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bge, "Bge", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBltu) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltu, "Bltu", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, BareBgeu) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeu, "Bgeu", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS32r6Test, LongBeqc) {
-  mips::MipsLabel label;
-  __ Beqc(mips::A0, mips::A1, &label);
-  constexpr uint32_t kAdduCount1 = (1u << 15) + 1;
-  for (uint32_t i = 0; i != kAdduCount1; ++i) {
-    __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-  }
-  __ Bind(&label);
-  constexpr uint32_t kAdduCount2 = (1u << 15) + 1;
-  for (uint32_t i = 0; i != kAdduCount2; ++i) {
-    __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-  }
-  __ Beqc(mips::A2, mips::A3, &label);
-
-  uint32_t offset_forward = 2 + kAdduCount1;  // 2: account for auipc and jic.
-  offset_forward <<= 2;
-  offset_forward += (offset_forward & 0x8000) << 1;  // Account for sign extension in jic.
-
-  uint32_t offset_back = -(kAdduCount2 + 1);  // 1: account for bnec.
-  offset_back <<= 2;
-  offset_back += (offset_back & 0x8000) << 1;  // Account for sign extension in jic.
-
-  std::ostringstream oss;
-  oss <<
-      ".set noreorder\n"
-      "bnec $a0, $a1, 1f\n"
-      "auipc $at, 0x" << std::hex << High16Bits(offset_forward) << "\n"
-      "jic $at, 0x" << std::hex << Low16Bits(offset_forward) << "\n"
-      "1:\n" <<
-      RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") <<
-      "2:\n" <<
-      RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") <<
-      "bnec $a2, $a3, 3f\n"
-      "auipc $at, 0x" << std::hex << High16Bits(offset_back) << "\n"
-      "jic $at, 0x" << std::hex << Low16Bits(offset_back) << "\n"
-      "3:\n";
-  std::string expected = oss.str();
-  DriverStr(expected, "LongBeqc");
-}
-
-TEST_F(AssemblerMIPS32r6Test, LongBeqzc) {
-  constexpr uint32_t kNopCount1 = (1u << 20) + 1;
-  constexpr uint32_t kNopCount2 = (1u << 20) + 1;
-  constexpr uint32_t kRequiredCapacity = (kNopCount1 + kNopCount2 + 6u) * 4u;
-  ASSERT_LT(__ GetBuffer()->Capacity(), kRequiredCapacity);
-  __ GetBuffer()->ExtendCapacity(kRequiredCapacity);
-  mips::MipsLabel label;
-  __ Beqzc(mips::A0, &label);
-  for (uint32_t i = 0; i != kNopCount1; ++i) {
-    __ Nop();
-  }
-  __ Bind(&label);
-  for (uint32_t i = 0; i != kNopCount2; ++i) {
-    __ Nop();
-  }
-  __ Beqzc(mips::A2, &label);
-
-  uint32_t offset_forward = 2 + kNopCount1;  // 2: account for auipc and jic.
-  offset_forward <<= 2;
-  offset_forward += (offset_forward & 0x8000) << 1;  // Account for sign extension in jic.
-
-  uint32_t offset_back = -(kNopCount2 + 1);  // 1: account for bnezc.
-  offset_back <<= 2;
-  offset_back += (offset_back & 0x8000) << 1;  // Account for sign extension in jic.
-
-  // Note, we're using the ".fill" directive to tell the assembler to generate many NOPs
-  // instead of generating them ourselves in the source code. This saves test time.
-  std::ostringstream oss;
-  oss <<
-      ".set noreorder\n"
-      "bnezc $a0, 1f\n"
-      "auipc $at, 0x" << std::hex << High16Bits(offset_forward) << "\n"
-      "jic $at, 0x" << std::hex << Low16Bits(offset_forward) << "\n"
-      "1:\n" <<
-      ".fill 0x" << std::hex << kNopCount1 << " , 4, 0\n"
-      "2:\n" <<
-      ".fill 0x" << std::hex << kNopCount2 << " , 4, 0\n"
-      "bnezc $a2, 3f\n"
-      "auipc $at, 0x" << std::hex << High16Bits(offset_back) << "\n"
-      "jic $at, 0x" << std::hex << Low16Bits(offset_back) << "\n"
-      "3:\n";
-  std::string expected = oss.str();
-  DriverStr(expected, "LongBeqzc");
-}
-
-TEST_F(AssemblerMIPS32r6Test, LongBc) {
-  constexpr uint32_t kNopCount1 = (1u << 25) + 1;
-  constexpr uint32_t kNopCount2 = (1u << 25) + 1;
-  constexpr uint32_t kRequiredCapacity = (kNopCount1 + kNopCount2 + 6u) * 4u;
-  ASSERT_LT(__ GetBuffer()->Capacity(), kRequiredCapacity);
-  __ GetBuffer()->ExtendCapacity(kRequiredCapacity);
-  mips::MipsLabel label1, label2;
-  __ Bc(&label1);
-  for (uint32_t i = 0; i != kNopCount1; ++i) {
-    __ Nop();
-  }
-  __ Bind(&label1);
-  __ Bc(&label2);
-  for (uint32_t i = 0; i != kNopCount2; ++i) {
-    __ Nop();
-  }
-  __ Bind(&label2);
-  __ Bc(&label1);
-
-  uint32_t offset_forward1 = 2 + kNopCount1;  // 2: account for auipc and jic.
-  offset_forward1 <<= 2;
-  offset_forward1 += (offset_forward1 & 0x8000) << 1;  // Account for sign extension in jic.
-
-  uint32_t offset_forward2 = 2 + kNopCount2;  // 2: account for auipc and jic.
-  offset_forward2 <<= 2;
-  offset_forward2 += (offset_forward2 & 0x8000) << 1;  // Account for sign extension in jic.
-
-  uint32_t offset_back = -(2 + kNopCount2);  // 2: account for auipc and jic.
-  offset_back <<= 2;
-  offset_back += (offset_back & 0x8000) << 1;  // Account for sign extension in jic.
-
-  // Note, we're using the ".fill" directive to tell the assembler to generate many NOPs
-  // instead of generating them ourselves in the source code. This saves a few minutes
-  // of test time.
-  std::ostringstream oss;
-  oss <<
-      ".set noreorder\n"
-      "auipc $at, 0x" << std::hex << High16Bits(offset_forward1) << "\n"
-      "jic $at, 0x" << std::hex << Low16Bits(offset_forward1) << "\n"
-      ".fill 0x" << std::hex << kNopCount1 << " , 4, 0\n"
-      "1:\n"
-      "auipc $at, 0x" << std::hex << High16Bits(offset_forward2) << "\n"
-      "jic $at, 0x" << std::hex << Low16Bits(offset_forward2) << "\n"
-      ".fill 0x" << std::hex << kNopCount2 << " , 4, 0\n"
-      "2:\n"
-      "auipc $at, 0x" << std::hex << High16Bits(offset_back) << "\n"
-      "jic $at, 0x" << std::hex << Low16Bits(offset_back) << "\n";
-  std::string expected = oss.str();
-  DriverStr(expected, "LongBc");
-}
-
-TEST_F(AssemblerMIPS32r6Test, ImpossibleReordering) {
-  mips::MipsLabel label;
-  __ SetReorder(true);
-  __ Bind(&label);
-
-  __ CmpLtD(mips::F0, mips::F2, mips::F4);
-  __ Bc1nez(mips::F0, &label);  // F0 dependency.
-
-  __ MulD(mips::F10, mips::F2, mips::F4);
-  __ Bc1eqz(mips::F10, &label);  // F10 dependency.
-
-  std::string expected =
-      ".set noreorder\n"
-      "1:\n"
-
-      "cmp.lt.d $f0, $f2, $f4\n"
-      "bc1nez $f0, 1b\n"
-      "nop\n"
-
-      "mul.d $f10, $f2, $f4\n"
-      "bc1eqz $f10, 1b\n"
-      "nop\n";
-  DriverStr(expected, "ImpossibleReordering");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Reordering) {
-  mips::MipsLabel label;
-  __ SetReorder(true);
-  __ Bind(&label);
-
-  __ CmpLtD(mips::F0, mips::F2, mips::F4);
-  __ Bc1nez(mips::F2, &label);
-
-  __ MulD(mips::F0, mips::F2, mips::F4);
-  __ Bc1eqz(mips::F4, &label);
-
-  std::string expected =
-      ".set noreorder\n"
-      "1:\n"
-
-      "bc1nez $f2, 1b\n"
-      "cmp.lt.d $f0, $f2, $f4\n"
-
-      "bc1eqz $f4, 1b\n"
-      "mul.d $f0, $f2, $f4\n";
-  DriverStr(expected, "Reordering");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SetReorder) {
-  mips::MipsLabel label1, label2, label3, label4;
-
-  __ SetReorder(true);
-  __ Bind(&label1);
-  __ Addu(mips::T0, mips::T1, mips::T2);
-  __ Bc1nez(mips::F0, &label1);
-
-  __ SetReorder(false);
-  __ Bind(&label2);
-  __ Addu(mips::T0, mips::T1, mips::T2);
-  __ Bc1nez(mips::F0, &label2);
-
-  __ SetReorder(true);
-  __ Bind(&label3);
-  __ Addu(mips::T0, mips::T1, mips::T2);
-  __ Bc1eqz(mips::F0, &label3);
-
-  __ SetReorder(false);
-  __ Bind(&label4);
-  __ Addu(mips::T0, mips::T1, mips::T2);
-  __ Bc1eqz(mips::F0, &label4);
-
-  std::string expected =
-      ".set noreorder\n"
-      "1:\n"
-      "bc1nez $f0, 1b\n"
-      "addu $t0, $t1, $t2\n"
-
-      "2:\n"
-      "addu $t0, $t1, $t2\n"
-      "bc1nez $f0, 2b\n"
-      "nop\n"
-
-      "3:\n"
-      "bc1eqz $f0, 3b\n"
-      "addu $t0, $t1, $t2\n"
-
-      "4:\n"
-      "addu $t0, $t1, $t2\n"
-      "bc1eqz $f0, 4b\n"
-      "nop\n";
-  DriverStr(expected, "SetReorder");
-}
-
-TEST_F(AssemblerMIPS32r6Test, ReorderPatchedInstruction) {
-  __ SetReorder(true);
-  mips::MipsLabel label1, label2;
-  mips::MipsLabel patcher_label1, patcher_label2, patcher_label3, patcher_label4, patcher_label5;
-  __ Lw(mips::V0, mips::A0, 0x5678, &patcher_label1);
-  __ Bc1eqz(mips::F0, &label1);
-  constexpr uint32_t kAdduCount1 = 63;
-  for (size_t i = 0; i != kAdduCount1; ++i) {
-    __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-  }
-  __ Bind(&label1);
-  __ Sw(mips::V0, mips::A0, 0x5678, &patcher_label2);
-  __ Bc1nez(mips::F2, &label2);
-  constexpr uint32_t kAdduCount2 = 64;
-  for (size_t i = 0; i != kAdduCount2; ++i) {
-    __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-  }
-  __ Bind(&label2);
-  __ Addiu(mips::V0, mips::A0, 0x5678, &patcher_label3);
-  __ Bc1eqz(mips::F4, &label1);
-  __ Lw(mips::V0, mips::A0, 0x5678, &patcher_label4);
-  __ Jalr(mips::T9);
-  __ Sw(mips::V0, mips::A0, 0x5678, &patcher_label5);
-  __ Bltc(mips::V0, mips::V1, &label2);
-  __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-
-  std::string expected =
-      ".set noreorder\n"
-      "bc1eqz $f0, 1f\n"
-      "lw $v0, 0x5678($a0)\n" +
-      RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
-      "1:\n"
-      "bc1nez $f2, 2f\n"
-      "sw $v0, 0x5678($a0)\n" +
-      RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
-      "2:\n"
-      "bc1eqz $f4, 1b\n"
-      "addiu $v0, $a0, 0x5678\n"
-      "jalr $t9\n"
-      "lw $v0, 0x5678($a0)\n"
-      "sw $v0, 0x5678($a0)\n"
-      "bltc $v0, $v1, 2b\n"
-      "nop\n"
-      "addu $zero, $zero, $zero\n";
-  DriverStr(expected, "ReorderPatchedInstruction");
-  EXPECT_EQ(__ GetLabelLocation(&patcher_label1), 1 * 4u);
-  EXPECT_EQ(__ GetLabelLocation(&patcher_label2), (kAdduCount1 + 3) * 4u);
-  EXPECT_EQ(__ GetLabelLocation(&patcher_label3), (kAdduCount1 + kAdduCount2 + 5) * 4u);
-  EXPECT_EQ(__ GetLabelLocation(&patcher_label4), (kAdduCount1 + kAdduCount2 + 7) * 4u);
-  EXPECT_EQ(__ GetLabelLocation(&patcher_label5), (kAdduCount1 + kAdduCount2 + 8) * 4u);
-}
-
-TEST_F(AssemblerMIPS32r6Test, LongBranchReorder) {
-  mips::MipsLabel label, patcher_label1, patcher_label2;
-  __ SetReorder(true);
-  __ Addiu(mips::T0, mips::T1, 0x5678, &patcher_label1);
-  __ Bc1nez(mips::F0, &label);
-  constexpr uint32_t kAdduCount1 = (1u << 15) + 1;
-  for (uint32_t i = 0; i != kAdduCount1; ++i) {
-    __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-  }
-  __ Bind(&label);
-  constexpr uint32_t kAdduCount2 = (1u << 15) + 1;
-  for (uint32_t i = 0; i != kAdduCount2; ++i) {
-    __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-  }
-  __ Addiu(mips::T0, mips::T1, 0x5678, &patcher_label2);
-  __ Bc1eqz(mips::F0, &label);
-
-  uint32_t offset_forward = 2 + kAdduCount1;  // 2: account for auipc and jic.
-  offset_forward <<= 2;
-  offset_forward += (offset_forward & 0x8000) << 1;  // Account for sign extension in jic.
-
-  uint32_t offset_back = -(kAdduCount2 + 2);  // 2: account for subu and bc1nez.
-  offset_back <<= 2;
-  offset_back += (offset_back & 0x8000) << 1;  // Account for sign extension in jic.
-
-  std::ostringstream oss;
-  oss <<
-      ".set noreorder\n"
-      "addiu $t0, $t1, 0x5678\n"
-      "bc1eqz $f0, 1f\n"
-      "auipc $at, 0x" << std::hex << High16Bits(offset_forward) << "\n"
-      "jic $at, 0x" << std::hex << Low16Bits(offset_forward) << "\n"
-      "1:\n" <<
-      RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") <<
-      "2:\n" <<
-      RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") <<
-      "addiu $t0, $t1, 0x5678\n"
-      "bc1nez $f0, 3f\n"
-      "auipc $at, 0x" << std::hex << High16Bits(offset_back) << "\n"
-      "jic $at, 0x" << std::hex << Low16Bits(offset_back) << "\n"
-      "3:\n";
-  std::string expected = oss.str();
-  DriverStr(expected, "LongBranchReorder");
-  EXPECT_EQ(__ GetLabelLocation(&patcher_label1), 0 * 4u);
-  EXPECT_EQ(__ GetLabelLocation(&patcher_label2), (kAdduCount1 + kAdduCount2 + 4) * 4u);
-}
-
-///////////////////////
-// Loading Constants //
-///////////////////////
-
-TEST_F(AssemblerMIPS32r6Test, LoadFarthestNearLabelAddress) {
-  mips::MipsLabel label;
-  __ LoadLabelAddress(mips::V0, mips::ZERO, &label);
-  constexpr size_t kAdduCount = 0x3FFDE;
-  for (size_t i = 0; i != kAdduCount; ++i) {
-    __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-  }
-  __ Bind(&label);
-
-  std::string expected =
-      "lapc $v0, 1f\n" +
-      RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
-      "1:\n";
-  DriverStr(expected, "LoadFarthestNearLabelAddress");
-}
-
-TEST_F(AssemblerMIPS32r6Test, LoadNearestFarLabelAddress) {
-  mips::MipsLabel label;
-  __ LoadLabelAddress(mips::V0, mips::ZERO, &label);
-  constexpr size_t kAdduCount = 0x3FFDF;
-  for (size_t i = 0; i != kAdduCount; ++i) {
-    __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-  }
-  __ Bind(&label);
-
-  std::string expected =
-      "1:\n"
-      "auipc $at, %hi(2f - 1b)\n"
-      "addiu $v0, $at, %lo(2f - 1b)\n" +
-      RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
-      "2:\n";
-  DriverStr(expected, "LoadNearestFarLabelAddress");
-}
-
-TEST_F(AssemblerMIPS32r6Test, LoadFarthestNearLiteral) {
-  mips::Literal* literal = __ NewLiteral<uint32_t>(0x12345678);
-  __ LoadLiteral(mips::V0, mips::ZERO, literal);
-  constexpr size_t kAdduCount = 0x3FFDE;
-  for (size_t i = 0; i != kAdduCount; ++i) {
-    __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-  }
-
-  std::string expected =
-      "lwpc $v0, 1f\n" +
-      RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
-      "1:\n"
-      ".word 0x12345678\n";
-  DriverStr(expected, "LoadFarthestNearLiteral");
-}
-
-TEST_F(AssemblerMIPS32r6Test, LoadNearestFarLiteral) {
-  mips::Literal* literal = __ NewLiteral<uint32_t>(0x12345678);
-  __ LoadLiteral(mips::V0, mips::ZERO, literal);
-  constexpr size_t kAdduCount = 0x3FFDF;
-  for (size_t i = 0; i != kAdduCount; ++i) {
-    __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-  }
-
-  std::string expected =
-      "1:\n"
-      "auipc $at, %hi(2f - 1b)\n"
-      "lw $v0, %lo(2f - 1b)($at)\n" +
-      RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
-      "2:\n"
-      ".word 0x12345678\n";
-  DriverStr(expected, "LoadNearestFarLiteral");
-}
-
-// MSA instructions.
-
-TEST_F(AssemblerMIPS32r6Test, AndV) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::AndV, "and.v ${reg1}, ${reg2}, ${reg3}"), "and.v");
-}
-
-TEST_F(AssemblerMIPS32r6Test, OrV) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::OrV, "or.v ${reg1}, ${reg2}, ${reg3}"), "or.v");
-}
-
-TEST_F(AssemblerMIPS32r6Test, NorV) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::NorV, "nor.v ${reg1}, ${reg2}, ${reg3}"), "nor.v");
-}
-
-TEST_F(AssemblerMIPS32r6Test, XorV) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::XorV, "xor.v ${reg1}, ${reg2}, ${reg3}"), "xor.v");
-}
-
-TEST_F(AssemblerMIPS32r6Test, AddvB) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::AddvB, "addv.b ${reg1}, ${reg2}, ${reg3}"), "addv.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, AddvH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::AddvH, "addv.h ${reg1}, ${reg2}, ${reg3}"), "addv.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, AddvW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::AddvW, "addv.w ${reg1}, ${reg2}, ${reg3}"), "addv.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, AddvD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::AddvD, "addv.d ${reg1}, ${reg2}, ${reg3}"), "addv.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SubvB) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::SubvB, "subv.b ${reg1}, ${reg2}, ${reg3}"), "subv.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SubvH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::SubvH, "subv.h ${reg1}, ${reg2}, ${reg3}"), "subv.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SubvW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::SubvW, "subv.w ${reg1}, ${reg2}, ${reg3}"), "subv.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SubvD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::SubvD, "subv.d ${reg1}, ${reg2}, ${reg3}"), "subv.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Asub_sB) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Asub_sB, "asub_s.b ${reg1}, ${reg2}, ${reg3}"),
-            "asub_s.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Asub_sH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Asub_sH, "asub_s.h ${reg1}, ${reg2}, ${reg3}"),
-            "asub_s.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Asub_sW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Asub_sW, "asub_s.w ${reg1}, ${reg2}, ${reg3}"),
-            "asub_s.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Asub_sD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Asub_sD, "asub_s.d ${reg1}, ${reg2}, ${reg3}"),
-            "asub_s.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Asub_uB) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Asub_uB, "asub_u.b ${reg1}, ${reg2}, ${reg3}"),
-            "asub_u.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Asub_uH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Asub_uH, "asub_u.h ${reg1}, ${reg2}, ${reg3}"),
-            "asub_u.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Asub_uW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Asub_uW, "asub_u.w ${reg1}, ${reg2}, ${reg3}"),
-            "asub_u.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Asub_uD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Asub_uD, "asub_u.d ${reg1}, ${reg2}, ${reg3}"),
-            "asub_u.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, MulvB) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::MulvB, "mulv.b ${reg1}, ${reg2}, ${reg3}"), "mulv.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, MulvH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::MulvH, "mulv.h ${reg1}, ${reg2}, ${reg3}"), "mulv.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, MulvW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::MulvW, "mulv.w ${reg1}, ${reg2}, ${reg3}"), "mulv.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, MulvD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::MulvD, "mulv.d ${reg1}, ${reg2}, ${reg3}"), "mulv.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Div_sB) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Div_sB, "div_s.b ${reg1}, ${reg2}, ${reg3}"),
-            "div_s.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Div_sH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Div_sH, "div_s.h ${reg1}, ${reg2}, ${reg3}"),
-            "div_s.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Div_sW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Div_sW, "div_s.w ${reg1}, ${reg2}, ${reg3}"),
-            "div_s.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Div_sD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Div_sD, "div_s.d ${reg1}, ${reg2}, ${reg3}"),
-            "div_s.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Div_uB) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Div_uB, "div_u.b ${reg1}, ${reg2}, ${reg3}"),
-            "div_u.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Div_uH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Div_uH, "div_u.h ${reg1}, ${reg2}, ${reg3}"),
-            "div_u.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Div_uW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Div_uW, "div_u.w ${reg1}, ${reg2}, ${reg3}"),
-            "div_u.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Div_uD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Div_uD, "div_u.d ${reg1}, ${reg2}, ${reg3}"),
-            "div_u.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Mod_sB) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Mod_sB, "mod_s.b ${reg1}, ${reg2}, ${reg3}"),
-            "mod_s.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Mod_sH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Mod_sH, "mod_s.h ${reg1}, ${reg2}, ${reg3}"),
-            "mod_s.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Mod_sW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Mod_sW, "mod_s.w ${reg1}, ${reg2}, ${reg3}"),
-            "mod_s.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Mod_sD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Mod_sD, "mod_s.d ${reg1}, ${reg2}, ${reg3}"),
-            "mod_s.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Mod_uB) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Mod_uB, "mod_u.b ${reg1}, ${reg2}, ${reg3}"),
-            "mod_u.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Mod_uH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Mod_uH, "mod_u.h ${reg1}, ${reg2}, ${reg3}"),
-            "mod_u.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Mod_uW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Mod_uW, "mod_u.w ${reg1}, ${reg2}, ${reg3}"),
-            "mod_u.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Mod_uD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Mod_uD, "mod_u.d ${reg1}, ${reg2}, ${reg3}"),
-            "mod_u.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Add_aB) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Add_aB, "add_a.b ${reg1}, ${reg2}, ${reg3}"),
-            "add_a.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Add_aH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Add_aH, "add_a.h ${reg1}, ${reg2}, ${reg3}"),
-            "add_a.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Add_aW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Add_aW, "add_a.w ${reg1}, ${reg2}, ${reg3}"),
-            "add_a.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Add_aD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Add_aD, "add_a.d ${reg1}, ${reg2}, ${reg3}"),
-            "add_a.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Ave_sB) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Ave_sB, "ave_s.b ${reg1}, ${reg2}, ${reg3}"),
-            "ave_s.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Ave_sH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Ave_sH, "ave_s.h ${reg1}, ${reg2}, ${reg3}"),
-            "ave_s.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Ave_sW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Ave_sW, "ave_s.w ${reg1}, ${reg2}, ${reg3}"),
-            "ave_s.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Ave_sD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Ave_sD, "ave_s.d ${reg1}, ${reg2}, ${reg3}"),
-            "ave_s.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Ave_uB) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Ave_uB, "ave_u.b ${reg1}, ${reg2}, ${reg3}"),
-            "ave_u.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Ave_uH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Ave_uH, "ave_u.h ${reg1}, ${reg2}, ${reg3}"),
-            "ave_u.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Ave_uW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Ave_uW, "ave_u.w ${reg1}, ${reg2}, ${reg3}"),
-            "ave_u.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Ave_uD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Ave_uD, "ave_u.d ${reg1}, ${reg2}, ${reg3}"),
-            "ave_u.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Aver_sB) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Aver_sB, "aver_s.b ${reg1}, ${reg2}, ${reg3}"),
-            "aver_s.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Aver_sH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Aver_sH, "aver_s.h ${reg1}, ${reg2}, ${reg3}"),
-            "aver_s.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Aver_sW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Aver_sW, "aver_s.w ${reg1}, ${reg2}, ${reg3}"),
-            "aver_s.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Aver_sD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Aver_sD, "aver_s.d ${reg1}, ${reg2}, ${reg3}"),
-            "aver_s.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Aver_uB) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Aver_uB, "aver_u.b ${reg1}, ${reg2}, ${reg3}"),
-            "aver_u.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Aver_uH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Aver_uH, "aver_u.h ${reg1}, ${reg2}, ${reg3}"),
-            "aver_u.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Aver_uW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Aver_uW, "aver_u.w ${reg1}, ${reg2}, ${reg3}"),
-            "aver_u.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Aver_uD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Aver_uD, "aver_u.d ${reg1}, ${reg2}, ${reg3}"),
-            "aver_u.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Max_sB) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Max_sB, "max_s.b ${reg1}, ${reg2}, ${reg3}"),
-            "max_s.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Max_sH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Max_sH, "max_s.h ${reg1}, ${reg2}, ${reg3}"),
-            "max_s.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Max_sW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Max_sW, "max_s.w ${reg1}, ${reg2}, ${reg3}"),
-            "max_s.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Max_sD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Max_sD, "max_s.d ${reg1}, ${reg2}, ${reg3}"),
-            "max_s.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Max_uB) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Max_uB, "max_u.b ${reg1}, ${reg2}, ${reg3}"),
-            "max_u.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Max_uH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Max_uH, "max_u.h ${reg1}, ${reg2}, ${reg3}"),
-            "max_u.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Max_uW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Max_uW, "max_u.w ${reg1}, ${reg2}, ${reg3}"),
-            "max_u.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Max_uD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Max_uD, "max_u.d ${reg1}, ${reg2}, ${reg3}"),
-            "max_u.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Min_sB) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Min_sB, "min_s.b ${reg1}, ${reg2}, ${reg3}"),
-            "min_s.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Min_sH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Min_sH, "min_s.h ${reg1}, ${reg2}, ${reg3}"),
-            "min_s.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Min_sW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Min_sW, "min_s.w ${reg1}, ${reg2}, ${reg3}"),
-            "min_s.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Min_sD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Min_sD, "min_s.d ${reg1}, ${reg2}, ${reg3}"),
-            "min_s.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Min_uB) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Min_uB, "min_u.b ${reg1}, ${reg2}, ${reg3}"),
-            "min_u.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Min_uH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Min_uH, "min_u.h ${reg1}, ${reg2}, ${reg3}"),
-            "min_u.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Min_uW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Min_uW, "min_u.w ${reg1}, ${reg2}, ${reg3}"),
-            "min_u.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Min_uD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Min_uD, "min_u.d ${reg1}, ${reg2}, ${reg3}"),
-            "min_u.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, FaddW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::FaddW, "fadd.w ${reg1}, ${reg2}, ${reg3}"), "fadd.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, FaddD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::FaddD, "fadd.d ${reg1}, ${reg2}, ${reg3}"), "fadd.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, FsubW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::FsubW, "fsub.w ${reg1}, ${reg2}, ${reg3}"), "fsub.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, FsubD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::FsubD, "fsub.d ${reg1}, ${reg2}, ${reg3}"), "fsub.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, FmulW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::FmulW, "fmul.w ${reg1}, ${reg2}, ${reg3}"), "fmul.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, FmulD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::FmulD, "fmul.d ${reg1}, ${reg2}, ${reg3}"), "fmul.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, FdivW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::FdivW, "fdiv.w ${reg1}, ${reg2}, ${reg3}"), "fdiv.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, FdivD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::FdivD, "fdiv.d ${reg1}, ${reg2}, ${reg3}"), "fdiv.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, FmaxW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::FmaxW, "fmax.w ${reg1}, ${reg2}, ${reg3}"), "fmax.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, FmaxD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::FmaxD, "fmax.d ${reg1}, ${reg2}, ${reg3}"), "fmax.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, FminW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::FminW, "fmin.w ${reg1}, ${reg2}, ${reg3}"), "fmin.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, FminD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::FminD, "fmin.d ${reg1}, ${reg2}, ${reg3}"), "fmin.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Ffint_sW) {
-  DriverStr(RepeatVV(&mips::MipsAssembler::Ffint_sW, "ffint_s.w ${reg1}, ${reg2}"), "ffint_s.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Ffint_sD) {
-  DriverStr(RepeatVV(&mips::MipsAssembler::Ffint_sD, "ffint_s.d ${reg1}, ${reg2}"), "ffint_s.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Ftint_sW) {
-  DriverStr(RepeatVV(&mips::MipsAssembler::Ftint_sW, "ftint_s.w ${reg1}, ${reg2}"), "ftint_s.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Ftint_sD) {
-  DriverStr(RepeatVV(&mips::MipsAssembler::Ftint_sD, "ftint_s.d ${reg1}, ${reg2}"), "ftint_s.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SllB) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::SllB, "sll.b ${reg1}, ${reg2}, ${reg3}"), "sll.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SllH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::SllH, "sll.h ${reg1}, ${reg2}, ${reg3}"), "sll.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SllW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::SllW, "sll.w ${reg1}, ${reg2}, ${reg3}"), "sll.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SllD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::SllD, "sll.d ${reg1}, ${reg2}, ${reg3}"), "sll.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SraB) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::SraB, "sra.b ${reg1}, ${reg2}, ${reg3}"), "sra.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SraH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::SraH, "sra.h ${reg1}, ${reg2}, ${reg3}"), "sra.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SraW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::SraW, "sra.w ${reg1}, ${reg2}, ${reg3}"), "sra.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SraD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::SraD, "sra.d ${reg1}, ${reg2}, ${reg3}"), "sra.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SrlB) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::SrlB, "srl.b ${reg1}, ${reg2}, ${reg3}"), "srl.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SrlH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::SrlH, "srl.h ${reg1}, ${reg2}, ${reg3}"), "srl.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SrlW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::SrlW, "srl.w ${reg1}, ${reg2}, ${reg3}"), "srl.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SrlD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::SrlD, "srl.d ${reg1}, ${reg2}, ${reg3}"), "srl.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SlliB) {
-  DriverStr(RepeatVVIb(&mips::MipsAssembler::SlliB, 3, "slli.b ${reg1}, ${reg2}, {imm}"), "slli.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SlliH) {
-  DriverStr(RepeatVVIb(&mips::MipsAssembler::SlliH, 4, "slli.h ${reg1}, ${reg2}, {imm}"), "slli.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SlliW) {
-  DriverStr(RepeatVVIb(&mips::MipsAssembler::SlliW, 5, "slli.w ${reg1}, ${reg2}, {imm}"), "slli.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SlliD) {
-  DriverStr(RepeatVVIb(&mips::MipsAssembler::SlliD, 6, "slli.d ${reg1}, ${reg2}, {imm}"), "slli.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, MoveV) {
-  DriverStr(RepeatVV(&mips::MipsAssembler::MoveV, "move.v ${reg1}, ${reg2}"), "move.v");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SplatiB) {
-  DriverStr(RepeatVVIb(&mips::MipsAssembler::SplatiB, 4, "splati.b ${reg1}, ${reg2}[{imm}]"),
-            "splati.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SplatiH) {
-  DriverStr(RepeatVVIb(&mips::MipsAssembler::SplatiH, 3, "splati.h ${reg1}, ${reg2}[{imm}]"),
-            "splati.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SplatiW) {
-  DriverStr(RepeatVVIb(&mips::MipsAssembler::SplatiW, 2, "splati.w ${reg1}, ${reg2}[{imm}]"),
-            "splati.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, SplatiD) {
-  DriverStr(RepeatVVIb(&mips::MipsAssembler::SplatiD, 1, "splati.d ${reg1}, ${reg2}[{imm}]"),
-            "splati.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Copy_sB) {
-  DriverStr(RepeatRVIb(&mips::MipsAssembler::Copy_sB, 4, "copy_s.b ${reg1}, ${reg2}[{imm}]"),
-            "copy_s.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Copy_sH) {
-  DriverStr(RepeatRVIb(&mips::MipsAssembler::Copy_sH, 3, "copy_s.h ${reg1}, ${reg2}[{imm}]"),
-            "copy_s.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Copy_sW) {
-  DriverStr(RepeatRVIb(&mips::MipsAssembler::Copy_sW, 2, "copy_s.w ${reg1}, ${reg2}[{imm}]"),
-            "copy_s.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Copy_uB) {
-  DriverStr(RepeatRVIb(&mips::MipsAssembler::Copy_uB, 4, "copy_u.b ${reg1}, ${reg2}[{imm}]"),
-            "copy_u.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Copy_uH) {
-  DriverStr(RepeatRVIb(&mips::MipsAssembler::Copy_uH, 3, "copy_u.h ${reg1}, ${reg2}[{imm}]"),
-            "copy_u.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, InsertB) {
-  DriverStr(RepeatVRIb(&mips::MipsAssembler::InsertB, 4, "insert.b ${reg1}[{imm}], ${reg2}"),
-            "insert.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, InsertH) {
-  DriverStr(RepeatVRIb(&mips::MipsAssembler::InsertH, 3, "insert.h ${reg1}[{imm}], ${reg2}"),
-            "insert.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, InsertW) {
-  DriverStr(RepeatVRIb(&mips::MipsAssembler::InsertW, 2, "insert.w ${reg1}[{imm}], ${reg2}"),
-            "insert.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, FillB) {
-  DriverStr(RepeatVR(&mips::MipsAssembler::FillB, "fill.b ${reg1}, ${reg2}"), "fill.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, FillH) {
-  DriverStr(RepeatVR(&mips::MipsAssembler::FillH, "fill.h ${reg1}, ${reg2}"), "fill.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, FillW) {
-  DriverStr(RepeatVR(&mips::MipsAssembler::FillW, "fill.w ${reg1}, ${reg2}"), "fill.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, PcntB) {
-  DriverStr(RepeatVV(&mips::MipsAssembler::PcntB, "pcnt.b ${reg1}, ${reg2}"), "pcnt.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, PcntH) {
-  DriverStr(RepeatVV(&mips::MipsAssembler::PcntH, "pcnt.h ${reg1}, ${reg2}"), "pcnt.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, PcntW) {
-  DriverStr(RepeatVV(&mips::MipsAssembler::PcntW, "pcnt.w ${reg1}, ${reg2}"), "pcnt.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, PcntD) {
-  DriverStr(RepeatVV(&mips::MipsAssembler::PcntD, "pcnt.d ${reg1}, ${reg2}"), "pcnt.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, LdiB) {
-  DriverStr(RepeatVIb(&mips::MipsAssembler::LdiB, -8, "ldi.b ${reg}, {imm}"), "ldi.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, LdiH) {
-  DriverStr(RepeatVIb(&mips::MipsAssembler::LdiH, -10, "ldi.h ${reg}, {imm}"), "ldi.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, LdiW) {
-  DriverStr(RepeatVIb(&mips::MipsAssembler::LdiW, -10, "ldi.w ${reg}, {imm}"), "ldi.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, LdiD) {
-  DriverStr(RepeatVIb(&mips::MipsAssembler::LdiD, -10, "ldi.d ${reg}, {imm}"), "ldi.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, LdB) {
-  DriverStr(RepeatVRIb(&mips::MipsAssembler::LdB, -10, "ld.b ${reg1}, {imm}(${reg2})"), "ld.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, LdH) {
-  DriverStr(RepeatVRIb(&mips::MipsAssembler::LdH, -10, "ld.h ${reg1}, {imm}(${reg2})", 0, 2),
-            "ld.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, LdW) {
-  DriverStr(RepeatVRIb(&mips::MipsAssembler::LdW, -10, "ld.w ${reg1}, {imm}(${reg2})", 0, 4),
-            "ld.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, LdD) {
-  DriverStr(RepeatVRIb(&mips::MipsAssembler::LdD, -10, "ld.d ${reg1}, {imm}(${reg2})", 0, 8),
-            "ld.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, StB) {
-  DriverStr(RepeatVRIb(&mips::MipsAssembler::StB, -10, "st.b ${reg1}, {imm}(${reg2})"), "st.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, StH) {
-  DriverStr(RepeatVRIb(&mips::MipsAssembler::StH, -10, "st.h ${reg1}, {imm}(${reg2})", 0, 2),
-            "st.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, StW) {
-  DriverStr(RepeatVRIb(&mips::MipsAssembler::StW, -10, "st.w ${reg1}, {imm}(${reg2})", 0, 4),
-            "st.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, StD) {
-  DriverStr(RepeatVRIb(&mips::MipsAssembler::StD, -10, "st.d ${reg1}, {imm}(${reg2})", 0, 8),
-            "st.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, IlvlB) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::IlvlB, "ilvl.b ${reg1}, ${reg2}, ${reg3}"), "ilvl.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, IlvlH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::IlvlH, "ilvl.h ${reg1}, ${reg2}, ${reg3}"), "ilvl.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, IlvlW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::IlvlW, "ilvl.w ${reg1}, ${reg2}, ${reg3}"), "ilvl.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, IlvlD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::IlvlD, "ilvl.d ${reg1}, ${reg2}, ${reg3}"), "ilvl.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, IlvrB) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::IlvrB, "ilvr.b ${reg1}, ${reg2}, ${reg3}"), "ilvr.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, IlvrH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::IlvrH, "ilvr.h ${reg1}, ${reg2}, ${reg3}"), "ilvr.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, IlvrW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::IlvrW, "ilvr.w ${reg1}, ${reg2}, ${reg3}"), "ilvr.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, IlvrD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::IlvrD, "ilvr.d ${reg1}, ${reg2}, ${reg3}"), "ilvr.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, IlvevB) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::IlvevB, "ilvev.b ${reg1}, ${reg2}, ${reg3}"),
-            "ilvev.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, IlvevH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::IlvevH, "ilvev.h ${reg1}, ${reg2}, ${reg3}"),
-            "ilvev.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, IlvevW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::IlvevW, "ilvev.w ${reg1}, ${reg2}, ${reg3}"),
-            "ilvev.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, IlvevD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::IlvevD, "ilvev.d ${reg1}, ${reg2}, ${reg3}"),
-            "ilvev.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, IlvodB) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::IlvodB, "ilvod.b ${reg1}, ${reg2}, ${reg3}"),
-            "ilvod.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, IlvodH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::IlvodH, "ilvod.h ${reg1}, ${reg2}, ${reg3}"),
-            "ilvod.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, IlvodW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::IlvodW, "ilvod.w ${reg1}, ${reg2}, ${reg3}"),
-            "ilvod.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, IlvodD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::IlvodD, "ilvod.d ${reg1}, ${reg2}, ${reg3}"),
-            "ilvod.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, MaddvB) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::MaddvB, "maddv.b ${reg1}, ${reg2}, ${reg3}"),
-            "maddv.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, MaddvH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::MaddvH, "maddv.h ${reg1}, ${reg2}, ${reg3}"),
-            "maddv.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, MaddvW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::MaddvW, "maddv.w ${reg1}, ${reg2}, ${reg3}"),
-            "maddv.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, MaddvD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::MaddvD, "maddv.d ${reg1}, ${reg2}, ${reg3}"),
-            "maddv.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Hadd_sH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Hadd_sH, "hadd_s.h ${reg1}, ${reg2}, ${reg3}"),
-            "hadd_s.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Hadd_sW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Hadd_sW, "hadd_s.w ${reg1}, ${reg2}, ${reg3}"),
-            "hadd_s.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Hadd_sD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Hadd_sD, "hadd_s.d ${reg1}, ${reg2}, ${reg3}"),
-            "hadd_s.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Hadd_uH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Hadd_uH, "hadd_u.h ${reg1}, ${reg2}, ${reg3}"),
-            "hadd_u.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Hadd_uW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Hadd_uW, "hadd_u.w ${reg1}, ${reg2}, ${reg3}"),
-            "hadd_u.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, Hadd_uD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::Hadd_uD, "hadd_u.d ${reg1}, ${reg2}, ${reg3}"),
-            "hadd_u.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, MsubvB) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::MsubvB, "msubv.b ${reg1}, ${reg2}, ${reg3}"),
-            "msubv.b");
-}
-
-TEST_F(AssemblerMIPS32r6Test, MsubvH) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::MsubvH, "msubv.h ${reg1}, ${reg2}, ${reg3}"),
-            "msubv.h");
-}
-
-TEST_F(AssemblerMIPS32r6Test, MsubvW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::MsubvW, "msubv.w ${reg1}, ${reg2}, ${reg3}"),
-            "msubv.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, MsubvD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::MsubvD, "msubv.d ${reg1}, ${reg2}, ${reg3}"),
-            "msubv.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, FmaddW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::FmaddW, "fmadd.w ${reg1}, ${reg2}, ${reg3}"),
-            "fmadd.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, FmaddD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::FmaddD, "fmadd.d ${reg1}, ${reg2}, ${reg3}"),
-            "fmadd.d");
-}
-
-TEST_F(AssemblerMIPS32r6Test, FmsubW) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::FmsubW, "fmsub.w ${reg1}, ${reg2}, ${reg3}"),
-            "fmsub.w");
-}
-
-TEST_F(AssemblerMIPS32r6Test, FmsubD) {
-  DriverStr(RepeatVVV(&mips::MipsAssembler::FmsubD, "fmsub.d ${reg1}, ${reg2}, ${reg3}"),
-            "fmsub.d");
-}
-
-#undef __
-
-}  // namespace art
diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc
deleted file mode 100644
index c0894d3..0000000
--- a/compiler/utils/mips/assembler_mips_test.cc
+++ /dev/null
@@ -1,3046 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "assembler_mips.h"
-
-#include <map>
-
-#include "base/stl_util.h"
-#include "utils/assembler_test.h"
-
-#define __ GetAssembler()->
-
-namespace art {
-
-struct MIPSCpuRegisterCompare {
-  bool operator()(const mips::Register& a, const mips::Register& b) const {
-    return a < b;
-  }
-};
-
-class AssemblerMIPSTest : public AssemblerTest<mips::MipsAssembler,
-                                               mips::MipsLabel,
-                                               mips::Register,
-                                               mips::FRegister,
-                                               uint32_t> {
- public:
-  using Base = AssemblerTest<mips::MipsAssembler,
-                             mips::MipsLabel,
-                             mips::Register,
-                             mips::FRegister,
-                             uint32_t>;
-
-  // These tests were taking too long, so we hide the DriverStr() from AssemblerTest<>
-  // and reimplement it without the verification against `assembly_string`. b/73903608
-  void DriverStr(const std::string& assembly_string ATTRIBUTE_UNUSED,
-                 const std::string& test_name ATTRIBUTE_UNUSED) {
-    GetAssembler()->FinalizeCode();
-    std::vector<uint8_t> data(GetAssembler()->CodeSize());
-    MemoryRegion code(data.data(), data.size());
-    GetAssembler()->FinalizeInstructions(code);
-  }
-
- protected:
-  // Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
-  std::string GetArchitectureString() override {
-    return "mips";
-  }
-
-  std::string GetAssemblerParameters() override {
-    return " --no-warn -32 -march=mips32r2";
-  }
-
-  std::string GetDisassembleParameters() override {
-    return " -D -bbinary -mmips:isa32r2";
-  }
-
-  void SetUpHelpers() override {
-    if (registers_.size() == 0) {
-      registers_.push_back(new mips::Register(mips::ZERO));
-      registers_.push_back(new mips::Register(mips::AT));
-      registers_.push_back(new mips::Register(mips::V0));
-      registers_.push_back(new mips::Register(mips::V1));
-      registers_.push_back(new mips::Register(mips::A0));
-      registers_.push_back(new mips::Register(mips::A1));
-      registers_.push_back(new mips::Register(mips::A2));
-      registers_.push_back(new mips::Register(mips::A3));
-      registers_.push_back(new mips::Register(mips::T0));
-      registers_.push_back(new mips::Register(mips::T1));
-      registers_.push_back(new mips::Register(mips::T2));
-      registers_.push_back(new mips::Register(mips::T3));
-      registers_.push_back(new mips::Register(mips::T4));
-      registers_.push_back(new mips::Register(mips::T5));
-      registers_.push_back(new mips::Register(mips::T6));
-      registers_.push_back(new mips::Register(mips::T7));
-      registers_.push_back(new mips::Register(mips::S0));
-      registers_.push_back(new mips::Register(mips::S1));
-      registers_.push_back(new mips::Register(mips::S2));
-      registers_.push_back(new mips::Register(mips::S3));
-      registers_.push_back(new mips::Register(mips::S4));
-      registers_.push_back(new mips::Register(mips::S5));
-      registers_.push_back(new mips::Register(mips::S6));
-      registers_.push_back(new mips::Register(mips::S7));
-      registers_.push_back(new mips::Register(mips::T8));
-      registers_.push_back(new mips::Register(mips::T9));
-      registers_.push_back(new mips::Register(mips::K0));
-      registers_.push_back(new mips::Register(mips::K1));
-      registers_.push_back(new mips::Register(mips::GP));
-      registers_.push_back(new mips::Register(mips::SP));
-      registers_.push_back(new mips::Register(mips::FP));
-      registers_.push_back(new mips::Register(mips::RA));
-
-      secondary_register_names_.emplace(mips::Register(mips::ZERO), "zero");
-      secondary_register_names_.emplace(mips::Register(mips::AT), "at");
-      secondary_register_names_.emplace(mips::Register(mips::V0), "v0");
-      secondary_register_names_.emplace(mips::Register(mips::V1), "v1");
-      secondary_register_names_.emplace(mips::Register(mips::A0), "a0");
-      secondary_register_names_.emplace(mips::Register(mips::A1), "a1");
-      secondary_register_names_.emplace(mips::Register(mips::A2), "a2");
-      secondary_register_names_.emplace(mips::Register(mips::A3), "a3");
-      secondary_register_names_.emplace(mips::Register(mips::T0), "t0");
-      secondary_register_names_.emplace(mips::Register(mips::T1), "t1");
-      secondary_register_names_.emplace(mips::Register(mips::T2), "t2");
-      secondary_register_names_.emplace(mips::Register(mips::T3), "t3");
-      secondary_register_names_.emplace(mips::Register(mips::T4), "t4");
-      secondary_register_names_.emplace(mips::Register(mips::T5), "t5");
-      secondary_register_names_.emplace(mips::Register(mips::T6), "t6");
-      secondary_register_names_.emplace(mips::Register(mips::T7), "t7");
-      secondary_register_names_.emplace(mips::Register(mips::S0), "s0");
-      secondary_register_names_.emplace(mips::Register(mips::S1), "s1");
-      secondary_register_names_.emplace(mips::Register(mips::S2), "s2");
-      secondary_register_names_.emplace(mips::Register(mips::S3), "s3");
-      secondary_register_names_.emplace(mips::Register(mips::S4), "s4");
-      secondary_register_names_.emplace(mips::Register(mips::S5), "s5");
-      secondary_register_names_.emplace(mips::Register(mips::S6), "s6");
-      secondary_register_names_.emplace(mips::Register(mips::S7), "s7");
-      secondary_register_names_.emplace(mips::Register(mips::T8), "t8");
-      secondary_register_names_.emplace(mips::Register(mips::T9), "t9");
-      secondary_register_names_.emplace(mips::Register(mips::K0), "k0");
-      secondary_register_names_.emplace(mips::Register(mips::K1), "k1");
-      secondary_register_names_.emplace(mips::Register(mips::GP), "gp");
-      secondary_register_names_.emplace(mips::Register(mips::SP), "sp");
-      secondary_register_names_.emplace(mips::Register(mips::FP), "fp");
-      secondary_register_names_.emplace(mips::Register(mips::RA), "ra");
-
-      fp_registers_.push_back(new mips::FRegister(mips::F0));
-      fp_registers_.push_back(new mips::FRegister(mips::F1));
-      fp_registers_.push_back(new mips::FRegister(mips::F2));
-      fp_registers_.push_back(new mips::FRegister(mips::F3));
-      fp_registers_.push_back(new mips::FRegister(mips::F4));
-      fp_registers_.push_back(new mips::FRegister(mips::F5));
-      fp_registers_.push_back(new mips::FRegister(mips::F6));
-      fp_registers_.push_back(new mips::FRegister(mips::F7));
-      fp_registers_.push_back(new mips::FRegister(mips::F8));
-      fp_registers_.push_back(new mips::FRegister(mips::F9));
-      fp_registers_.push_back(new mips::FRegister(mips::F10));
-      fp_registers_.push_back(new mips::FRegister(mips::F11));
-      fp_registers_.push_back(new mips::FRegister(mips::F12));
-      fp_registers_.push_back(new mips::FRegister(mips::F13));
-      fp_registers_.push_back(new mips::FRegister(mips::F14));
-      fp_registers_.push_back(new mips::FRegister(mips::F15));
-      fp_registers_.push_back(new mips::FRegister(mips::F16));
-      fp_registers_.push_back(new mips::FRegister(mips::F17));
-      fp_registers_.push_back(new mips::FRegister(mips::F18));
-      fp_registers_.push_back(new mips::FRegister(mips::F19));
-      fp_registers_.push_back(new mips::FRegister(mips::F20));
-      fp_registers_.push_back(new mips::FRegister(mips::F21));
-      fp_registers_.push_back(new mips::FRegister(mips::F22));
-      fp_registers_.push_back(new mips::FRegister(mips::F23));
-      fp_registers_.push_back(new mips::FRegister(mips::F24));
-      fp_registers_.push_back(new mips::FRegister(mips::F25));
-      fp_registers_.push_back(new mips::FRegister(mips::F26));
-      fp_registers_.push_back(new mips::FRegister(mips::F27));
-      fp_registers_.push_back(new mips::FRegister(mips::F28));
-      fp_registers_.push_back(new mips::FRegister(mips::F29));
-      fp_registers_.push_back(new mips::FRegister(mips::F30));
-      fp_registers_.push_back(new mips::FRegister(mips::F31));
-    }
-  }
-
-  void TearDown() override {
-    AssemblerTest::TearDown();
-    STLDeleteElements(&registers_);
-    STLDeleteElements(&fp_registers_);
-  }
-
-  std::vector<mips::MipsLabel> GetAddresses() override {
-    UNIMPLEMENTED(FATAL) << "Feature not implemented yet";
-    UNREACHABLE();
-  }
-
-  std::vector<mips::Register*> GetRegisters() override {
-    return registers_;
-  }
-
-  std::vector<mips::FRegister*> GetFPRegisters() override {
-    return fp_registers_;
-  }
-
-  uint32_t CreateImmediate(int64_t imm_value) override {
-    return imm_value;
-  }
-
-  std::string GetSecondaryRegisterName(const mips::Register& reg) override {
-    CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
-    return secondary_register_names_[reg];
-  }
-
-  std::string RepeatInsn(size_t count, const std::string& insn) {
-    std::string result;
-    for (; count != 0u; --count) {
-      result += insn;
-    }
-    return result;
-  }
-
-  void BranchHelper(void (mips::MipsAssembler::*f)(mips::MipsLabel*,
-                                                   bool),
-                    const std::string& instr_name,
-                    bool is_bare = false) {
-    __ SetReorder(false);
-    mips::MipsLabel label1, label2;
-    (Base::GetAssembler()->*f)(&label1, is_bare);
-    constexpr size_t kAdduCount1 = 63;
-    for (size_t i = 0; i != kAdduCount1; ++i) {
-      __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-    }
-    __ Bind(&label1);
-    (Base::GetAssembler()->*f)(&label2, is_bare);
-    constexpr size_t kAdduCount2 = 64;
-    for (size_t i = 0; i != kAdduCount2; ++i) {
-      __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-    }
-    __ Bind(&label2);
-    (Base::GetAssembler()->*f)(&label1, is_bare);
-    __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-
-    std::string expected =
-        ".set noreorder\n" +
-        instr_name + " 1f\n" +
-        (is_bare ? "" : "nop\n") +
-        RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
-        "1:\n" +
-        instr_name + " 2f\n" +
-        (is_bare ? "" : "nop\n") +
-        RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
-        "2:\n" +
-        instr_name + " 1b\n" +
-        (is_bare ? "" : "nop\n") +
-        "addu $zero, $zero, $zero\n";
-    DriverStr(expected, instr_name);
-  }
-
-  void BranchCondOneRegHelper(void (mips::MipsAssembler::*f)(mips::Register,
-                                                             mips::MipsLabel*,
-                                                             bool),
-                              const std::string& instr_name,
-                              bool is_bare = false) {
-    __ SetReorder(false);
-    mips::MipsLabel label;
-    (Base::GetAssembler()->*f)(mips::A0, &label, is_bare);
-    constexpr size_t kAdduCount1 = 63;
-    for (size_t i = 0; i != kAdduCount1; ++i) {
-      __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-    }
-    __ Bind(&label);
-    constexpr size_t kAdduCount2 = 64;
-    for (size_t i = 0; i != kAdduCount2; ++i) {
-      __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-    }
-    (Base::GetAssembler()->*f)(mips::A1, &label, is_bare);
-    __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-
-    std::string expected =
-        ".set noreorder\n" +
-        instr_name + " $a0, 1f\n" +
-        (is_bare ? "" : "nop\n") +
-        RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
-        "1:\n" +
-        RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
-        instr_name + " $a1, 1b\n" +
-        (is_bare ? "" : "nop\n") +
-        "addu $zero, $zero, $zero\n";
-    DriverStr(expected, instr_name);
-  }
-
-  void BranchCondTwoRegsHelper(void (mips::MipsAssembler::*f)(mips::Register,
-                                                              mips::Register,
-                                                              mips::MipsLabel*,
-                                                              bool),
-                               const std::string& instr_name,
-                               bool is_bare = false) {
-    __ SetReorder(false);
-    mips::MipsLabel label;
-    (Base::GetAssembler()->*f)(mips::A0, mips::A1, &label, is_bare);
-    constexpr size_t kAdduCount1 = 63;
-    for (size_t i = 0; i != kAdduCount1; ++i) {
-      __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-    }
-    __ Bind(&label);
-    constexpr size_t kAdduCount2 = 64;
-    for (size_t i = 0; i != kAdduCount2; ++i) {
-      __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-    }
-    (Base::GetAssembler()->*f)(mips::A2, mips::A3, &label, is_bare);
-    __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-
-    std::string expected =
-        ".set noreorder\n" +
-        instr_name + " $a0, $a1, 1f\n" +
-        (is_bare ? "" : "nop\n") +
-        RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
-        "1:\n" +
-        RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
-        instr_name + " $a2, $a3, 1b\n" +
-        (is_bare ? "" : "nop\n") +
-        "addu $zero, $zero, $zero\n";
-    DriverStr(expected, instr_name);
-  }
-
-  void BranchFpuCondCodeHelper(void (mips::MipsAssembler::*f)(int,
-                                                              mips::MipsLabel*,
-                                                              bool),
-                               const std::string& instr_name,
-                               bool is_bare = false) {
-    __ SetReorder(false);
-    mips::MipsLabel label;
-    (Base::GetAssembler()->*f)(0, &label, is_bare);
-    constexpr size_t kAdduCount1 = 63;
-    for (size_t i = 0; i != kAdduCount1; ++i) {
-      __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-    }
-    __ Bind(&label);
-    constexpr size_t kAdduCount2 = 64;
-    for (size_t i = 0; i != kAdduCount2; ++i) {
-      __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-    }
-    (Base::GetAssembler()->*f)(7, &label, is_bare);
-    __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-
-    std::string expected =
-        ".set noreorder\n" +
-        instr_name + " $fcc0, 1f\n" +
-        (is_bare ? "" : "nop\n") +
-        RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
-        "1:\n" +
-        RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
-        instr_name + " $fcc7, 1b\n" +
-        (is_bare ? "" : "nop\n") +
-        "addu $zero, $zero, $zero\n";
-    DriverStr(expected, instr_name);
-  }
-
- private:
-  std::vector<mips::Register*> registers_;
-  std::map<mips::Register, std::string, MIPSCpuRegisterCompare> secondary_register_names_;
-
-  std::vector<mips::FRegister*> fp_registers_;
-};
-
-
-TEST_F(AssemblerMIPSTest, Toolchain) {
-  EXPECT_TRUE(CheckTools());
-}
-
-TEST_F(AssemblerMIPSTest, Addu) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::Addu, "addu ${reg1}, ${reg2}, ${reg3}"), "Addu");
-}
-
-TEST_F(AssemblerMIPSTest, Addiu) {
-  DriverStr(RepeatRRIb(&mips::MipsAssembler::Addiu, -16, "addiu ${reg1}, ${reg2}, {imm}"), "Addiu");
-}
-
-TEST_F(AssemblerMIPSTest, Subu) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::Subu, "subu ${reg1}, ${reg2}, ${reg3}"), "Subu");
-}
-
-TEST_F(AssemblerMIPSTest, MultR2) {
-  DriverStr(RepeatRR(&mips::MipsAssembler::MultR2, "mult ${reg1}, ${reg2}"), "MultR2");
-}
-
-TEST_F(AssemblerMIPSTest, MultuR2) {
-  DriverStr(RepeatRR(&mips::MipsAssembler::MultuR2, "multu ${reg1}, ${reg2}"), "MultuR2");
-}
-
-TEST_F(AssemblerMIPSTest, DivR2Basic) {
-  DriverStr(RepeatRR(&mips::MipsAssembler::DivR2, "div $zero, ${reg1}, ${reg2}"), "DivR2Basic");
-}
-
-TEST_F(AssemblerMIPSTest, DivuR2Basic) {
-  DriverStr(RepeatRR(&mips::MipsAssembler::DivuR2, "divu $zero, ${reg1}, ${reg2}"), "DivuR2Basic");
-}
-
-TEST_F(AssemblerMIPSTest, MulR2) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::MulR2, "mul ${reg1}, ${reg2}, ${reg3}"), "MulR2");
-}
-
-TEST_F(AssemblerMIPSTest, DivR2) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::DivR2, "div $zero, ${reg2}, ${reg3}\nmflo ${reg1}"),
-            "DivR2");
-}
-
-TEST_F(AssemblerMIPSTest, ModR2) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::ModR2, "div $zero, ${reg2}, ${reg3}\nmfhi ${reg1}"),
-            "ModR2");
-}
-
-TEST_F(AssemblerMIPSTest, DivuR2) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::DivuR2, "divu $zero, ${reg2}, ${reg3}\nmflo ${reg1}"),
-            "DivuR2");
-}
-
-TEST_F(AssemblerMIPSTest, ModuR2) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::ModuR2, "divu $zero, ${reg2}, ${reg3}\nmfhi ${reg1}"),
-            "ModuR2");
-}
-
-TEST_F(AssemblerMIPSTest, And) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::And, "and ${reg1}, ${reg2}, ${reg3}"), "And");
-}
-
-TEST_F(AssemblerMIPSTest, Andi) {
-  DriverStr(RepeatRRIb(&mips::MipsAssembler::Andi, 16, "andi ${reg1}, ${reg2}, {imm}"), "Andi");
-}
-
-TEST_F(AssemblerMIPSTest, Or) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::Or, "or ${reg1}, ${reg2}, ${reg3}"), "Or");
-}
-
-TEST_F(AssemblerMIPSTest, Ori) {
-  DriverStr(RepeatRRIb(&mips::MipsAssembler::Ori, 16, "ori ${reg1}, ${reg2}, {imm}"), "Ori");
-}
-
-TEST_F(AssemblerMIPSTest, Xor) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::Xor, "xor ${reg1}, ${reg2}, ${reg3}"), "Xor");
-}
-
-TEST_F(AssemblerMIPSTest, Xori) {
-  DriverStr(RepeatRRIb(&mips::MipsAssembler::Xori, 16, "xori ${reg1}, ${reg2}, {imm}"), "Xori");
-}
-
-TEST_F(AssemblerMIPSTest, Nor) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::Nor, "nor ${reg1}, ${reg2}, ${reg3}"), "Nor");
-}
-
-//////////
-// MISC //
-//////////
-
-TEST_F(AssemblerMIPSTest, Movz) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::Movz, "movz ${reg1}, ${reg2}, ${reg3}"), "Movz");
-}
-
-TEST_F(AssemblerMIPSTest, Movn) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::Movn, "movn ${reg1}, ${reg2}, ${reg3}"), "Movn");
-}
-
-TEST_F(AssemblerMIPSTest, Seb) {
-  DriverStr(RepeatRR(&mips::MipsAssembler::Seb, "seb ${reg1}, ${reg2}"), "Seb");
-}
-
-TEST_F(AssemblerMIPSTest, Seh) {
-  DriverStr(RepeatRR(&mips::MipsAssembler::Seh, "seh ${reg1}, ${reg2}"), "Seh");
-}
-
-TEST_F(AssemblerMIPSTest, Sll) {
-  DriverStr(RepeatRRIb(&mips::MipsAssembler::Sll, 5, "sll ${reg1}, ${reg2}, {imm}"), "Sll");
-}
-
-TEST_F(AssemblerMIPSTest, Srl) {
-  DriverStr(RepeatRRIb(&mips::MipsAssembler::Srl, 5, "srl ${reg1}, ${reg2}, {imm}"), "Srl");
-}
-
-TEST_F(AssemblerMIPSTest, Sra) {
-  DriverStr(RepeatRRIb(&mips::MipsAssembler::Sra, 5, "sra ${reg1}, ${reg2}, {imm}"), "Sra");
-}
-
-TEST_F(AssemblerMIPSTest, Sllv) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::Sllv, "sllv ${reg1}, ${reg2}, ${reg3}"), "Sllv");
-}
-
-TEST_F(AssemblerMIPSTest, Srlv) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::Srlv, "srlv ${reg1}, ${reg2}, ${reg3}"), "Srlv");
-}
-
-TEST_F(AssemblerMIPSTest, Rotrv) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::Rotrv, "rotrv ${reg1}, ${reg2}, ${reg3}"), "rotrv");
-}
-
-TEST_F(AssemblerMIPSTest, Srav) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::Srav, "srav ${reg1}, ${reg2}, ${reg3}"), "Srav");
-}
-
-TEST_F(AssemblerMIPSTest, Ins) {
-  std::vector<mips::Register*> regs = GetRegisters();
-  WarnOnCombinations(regs.size() * regs.size() * 33 * 16);
-  std::string expected;
-  for (mips::Register* reg1 : regs) {
-    for (mips::Register* reg2 : regs) {
-      for (int32_t pos = 0; pos < 32; pos++) {
-        for (int32_t size = 1; pos + size <= 32; size++) {
-          __ Ins(*reg1, *reg2, pos, size);
-          std::ostringstream instr;
-          instr << "ins $" << *reg1 << ", $" << *reg2 << ", " << pos << ", " << size << "\n";
-          expected += instr.str();
-        }
-      }
-    }
-  }
-  DriverStr(expected, "Ins");
-}
-
-TEST_F(AssemblerMIPSTest, Ext) {
-  std::vector<mips::Register*> regs = GetRegisters();
-  WarnOnCombinations(regs.size() * regs.size() * 33 * 16);
-  std::string expected;
-  for (mips::Register* reg1 : regs) {
-    for (mips::Register* reg2 : regs) {
-      for (int32_t pos = 0; pos < 32; pos++) {
-        for (int32_t size = 1; pos + size <= 32; size++) {
-          __ Ext(*reg1, *reg2, pos, size);
-          std::ostringstream instr;
-          instr << "ext $" << *reg1 << ", $" << *reg2 << ", " << pos << ", " << size << "\n";
-          expected += instr.str();
-        }
-      }
-    }
-  }
-  DriverStr(expected, "Ext");
-}
-
-TEST_F(AssemblerMIPSTest, ClzR2) {
-  DriverStr(RepeatRR(&mips::MipsAssembler::ClzR2, "clz ${reg1}, ${reg2}"), "clzR2");
-}
-
-TEST_F(AssemblerMIPSTest, CloR2) {
-  DriverStr(RepeatRR(&mips::MipsAssembler::CloR2, "clo ${reg1}, ${reg2}"), "cloR2");
-}
-
-TEST_F(AssemblerMIPSTest, Lb) {
-  DriverStr(RepeatRRIb(&mips::MipsAssembler::Lb, -16, "lb ${reg1}, {imm}(${reg2})"), "Lb");
-}
-
-TEST_F(AssemblerMIPSTest, Lh) {
-  DriverStr(RepeatRRIb(&mips::MipsAssembler::Lh, -16, "lh ${reg1}, {imm}(${reg2})"), "Lh");
-}
-
-TEST_F(AssemblerMIPSTest, Lwl) {
-  DriverStr(RepeatRRIb(&mips::MipsAssembler::Lwl, -16, "lwl ${reg1}, {imm}(${reg2})"), "Lwl");
-}
-
-TEST_F(AssemblerMIPSTest, Lw) {
-  DriverStr(RepeatRRIb(&mips::MipsAssembler::Lw, -16, "lw ${reg1}, {imm}(${reg2})"), "Lw");
-}
-
-TEST_F(AssemblerMIPSTest, Lwr) {
-  DriverStr(RepeatRRIb(&mips::MipsAssembler::Lwr, -16, "lwr ${reg1}, {imm}(${reg2})"), "Lwr");
-}
-
-TEST_F(AssemblerMIPSTest, Lbu) {
-  DriverStr(RepeatRRIb(&mips::MipsAssembler::Lbu, -16, "lbu ${reg1}, {imm}(${reg2})"), "Lbu");
-}
-
-TEST_F(AssemblerMIPSTest, Lhu) {
-  DriverStr(RepeatRRIb(&mips::MipsAssembler::Lhu, -16, "lhu ${reg1}, {imm}(${reg2})"), "Lhu");
-}
-
-TEST_F(AssemblerMIPSTest, Lui) {
-  DriverStr(RepeatRIb(&mips::MipsAssembler::Lui, 16, "lui ${reg}, {imm}"), "Lui");
-}
-
-TEST_F(AssemblerMIPSTest, Mfhi) {
-  DriverStr(RepeatR(&mips::MipsAssembler::Mfhi, "mfhi ${reg}"), "Mfhi");
-}
-
-TEST_F(AssemblerMIPSTest, Mflo) {
-  DriverStr(RepeatR(&mips::MipsAssembler::Mflo, "mflo ${reg}"), "Mflo");
-}
-
-TEST_F(AssemblerMIPSTest, Sb) {
-  DriverStr(RepeatRRIb(&mips::MipsAssembler::Sb, -16, "sb ${reg1}, {imm}(${reg2})"), "Sb");
-}
-
-TEST_F(AssemblerMIPSTest, Sh) {
-  DriverStr(RepeatRRIb(&mips::MipsAssembler::Sh, -16, "sh ${reg1}, {imm}(${reg2})"), "Sh");
-}
-
-TEST_F(AssemblerMIPSTest, Swl) {
-  DriverStr(RepeatRRIb(&mips::MipsAssembler::Swl, -16, "swl ${reg1}, {imm}(${reg2})"), "Swl");
-}
-
-TEST_F(AssemblerMIPSTest, Sw) {
-  DriverStr(RepeatRRIb(&mips::MipsAssembler::Sw, -16, "sw ${reg1}, {imm}(${reg2})"), "Sw");
-}
-
-TEST_F(AssemblerMIPSTest, Swr) {
-  DriverStr(RepeatRRIb(&mips::MipsAssembler::Swr, -16, "swr ${reg1}, {imm}(${reg2})"), "Swr");
-}
-
-TEST_F(AssemblerMIPSTest, LlR2) {
-  DriverStr(RepeatRRIb(&mips::MipsAssembler::LlR2, -16, "ll ${reg1}, {imm}(${reg2})"), "LlR2");
-}
-
-TEST_F(AssemblerMIPSTest, ScR2) {
-  DriverStr(RepeatRRIb(&mips::MipsAssembler::ScR2, -16, "sc ${reg1}, {imm}(${reg2})"), "ScR2");
-}
-
-TEST_F(AssemblerMIPSTest, Slt) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::Slt, "slt ${reg1}, ${reg2}, ${reg3}"), "Slt");
-}
-
-TEST_F(AssemblerMIPSTest, Sltu) {
-  DriverStr(RepeatRRR(&mips::MipsAssembler::Sltu, "sltu ${reg1}, ${reg2}, ${reg3}"), "Sltu");
-}
-
-TEST_F(AssemblerMIPSTest, Slti) {
-  DriverStr(RepeatRRIb(&mips::MipsAssembler::Slti, -16, "slti ${reg1}, ${reg2}, {imm}"), "Slti");
-}
-
-TEST_F(AssemblerMIPSTest, Sltiu) {
-  DriverStr(RepeatRRIb(&mips::MipsAssembler::Sltiu, -16, "sltiu ${reg1}, ${reg2}, {imm}"), "Sltiu");
-}
-
-TEST_F(AssemblerMIPSTest, AddS) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::AddS, "add.s ${reg1}, ${reg2}, ${reg3}"), "AddS");
-}
-
-TEST_F(AssemblerMIPSTest, AddD) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::AddD, "add.d ${reg1}, ${reg2}, ${reg3}"), "AddD");
-}
-
-TEST_F(AssemblerMIPSTest, SubS) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::SubS, "sub.s ${reg1}, ${reg2}, ${reg3}"), "SubS");
-}
-
-TEST_F(AssemblerMIPSTest, SubD) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::SubD, "sub.d ${reg1}, ${reg2}, ${reg3}"), "SubD");
-}
-
-TEST_F(AssemblerMIPSTest, MulS) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::MulS, "mul.s ${reg1}, ${reg2}, ${reg3}"), "MulS");
-}
-
-TEST_F(AssemblerMIPSTest, MulD) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::MulD, "mul.d ${reg1}, ${reg2}, ${reg3}"), "MulD");
-}
-
-TEST_F(AssemblerMIPSTest, DivS) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::DivS, "div.s ${reg1}, ${reg2}, ${reg3}"), "DivS");
-}
-
-TEST_F(AssemblerMIPSTest, DivD) {
-  DriverStr(RepeatFFF(&mips::MipsAssembler::DivD, "div.d ${reg1}, ${reg2}, ${reg3}"), "DivD");
-}
-
-TEST_F(AssemblerMIPSTest, MovS) {
-  DriverStr(RepeatFF(&mips::MipsAssembler::MovS, "mov.s ${reg1}, ${reg2}"), "MovS");
-}
-
-TEST_F(AssemblerMIPSTest, MovD) {
-  DriverStr(RepeatFF(&mips::MipsAssembler::MovD, "mov.d ${reg1}, ${reg2}"), "MovD");
-}
-
-TEST_F(AssemblerMIPSTest, NegS) {
-  DriverStr(RepeatFF(&mips::MipsAssembler::NegS, "neg.s ${reg1}, ${reg2}"), "NegS");
-}
-
-TEST_F(AssemblerMIPSTest, NegD) {
-  DriverStr(RepeatFF(&mips::MipsAssembler::NegD, "neg.d ${reg1}, ${reg2}"), "NegD");
-}
-
-TEST_F(AssemblerMIPSTest, FloorWS) {
-  DriverStr(RepeatFF(&mips::MipsAssembler::FloorWS, "floor.w.s ${reg1}, ${reg2}"), "floor.w.s");
-}
-
-TEST_F(AssemblerMIPSTest, FloorWD) {
-  DriverStr(RepeatFF(&mips::MipsAssembler::FloorWD, "floor.w.d ${reg1}, ${reg2}"), "floor.w.d");
-}
-
-TEST_F(AssemblerMIPSTest, CunS) {
-  DriverStr(RepeatIbFF(&mips::MipsAssembler::CunS, 3, "c.un.s $fcc{imm}, ${reg1}, ${reg2}"),
-            "CunS");
-}
-
-TEST_F(AssemblerMIPSTest, CeqS) {
-  DriverStr(RepeatIbFF(&mips::MipsAssembler::CeqS, 3, "c.eq.s $fcc{imm}, ${reg1}, ${reg2}"),
-            "CeqS");
-}
-
-TEST_F(AssemblerMIPSTest, CueqS) {
-  DriverStr(RepeatIbFF(&mips::MipsAssembler::CueqS, 3, "c.ueq.s $fcc{imm}, ${reg1}, ${reg2}"),
-            "CueqS");
-}
-
-TEST_F(AssemblerMIPSTest, ColtS) {
-  DriverStr(RepeatIbFF(&mips::MipsAssembler::ColtS, 3, "c.olt.s $fcc{imm}, ${reg1}, ${reg2}"),
-            "ColtS");
-}
-
-TEST_F(AssemblerMIPSTest, CultS) {
-  DriverStr(RepeatIbFF(&mips::MipsAssembler::CultS, 3, "c.ult.s $fcc{imm}, ${reg1}, ${reg2}"),
-            "CultS");
-}
-
-TEST_F(AssemblerMIPSTest, ColeS) {
-  DriverStr(RepeatIbFF(&mips::MipsAssembler::ColeS, 3, "c.ole.s $fcc{imm}, ${reg1}, ${reg2}"),
-            "ColeS");
-}
-
-TEST_F(AssemblerMIPSTest, CuleS) {
-  DriverStr(RepeatIbFF(&mips::MipsAssembler::CuleS, 3, "c.ule.s $fcc{imm}, ${reg1}, ${reg2}"),
-            "CuleS");
-}
-
-TEST_F(AssemblerMIPSTest, CunD) {
-  DriverStr(RepeatIbFF(&mips::MipsAssembler::CunD, 3, "c.un.d $fcc{imm}, ${reg1}, ${reg2}"),
-            "CunD");
-}
-
-TEST_F(AssemblerMIPSTest, CeqD) {
-  DriverStr(RepeatIbFF(&mips::MipsAssembler::CeqD, 3, "c.eq.d $fcc{imm}, ${reg1}, ${reg2}"),
-            "CeqD");
-}
-
-TEST_F(AssemblerMIPSTest, CueqD) {
-  DriverStr(RepeatIbFF(&mips::MipsAssembler::CueqD, 3, "c.ueq.d $fcc{imm}, ${reg1}, ${reg2}"),
-            "CueqD");
-}
-
-TEST_F(AssemblerMIPSTest, ColtD) {
-  DriverStr(RepeatIbFF(&mips::MipsAssembler::ColtD, 3, "c.olt.d $fcc{imm}, ${reg1}, ${reg2}"),
-            "ColtD");
-}
-
-TEST_F(AssemblerMIPSTest, CultD) {
-  DriverStr(RepeatIbFF(&mips::MipsAssembler::CultD, 3, "c.ult.d $fcc{imm}, ${reg1}, ${reg2}"),
-            "CultD");
-}
-
-TEST_F(AssemblerMIPSTest, ColeD) {
-  DriverStr(RepeatIbFF(&mips::MipsAssembler::ColeD, 3, "c.ole.d $fcc{imm}, ${reg1}, ${reg2}"),
-            "ColeD");
-}
-
-TEST_F(AssemblerMIPSTest, CuleD) {
-  DriverStr(RepeatIbFF(&mips::MipsAssembler::CuleD, 3, "c.ule.d $fcc{imm}, ${reg1}, ${reg2}"),
-            "CuleD");
-}
-
-TEST_F(AssemblerMIPSTest, Movf) {
-  DriverStr(RepeatRRIb(&mips::MipsAssembler::Movf, 3, "movf ${reg1}, ${reg2}, $fcc{imm}"), "Movf");
-}
-
-TEST_F(AssemblerMIPSTest, Movt) {
-  DriverStr(RepeatRRIb(&mips::MipsAssembler::Movt, 3, "movt ${reg1}, ${reg2}, $fcc{imm}"), "Movt");
-}
-
-TEST_F(AssemblerMIPSTest, MovfS) {
-  DriverStr(RepeatFFIb(&mips::MipsAssembler::MovfS, 3, "movf.s ${reg1}, ${reg2}, $fcc{imm}"),
-            "MovfS");
-}
-
-TEST_F(AssemblerMIPSTest, MovfD) {
-  DriverStr(RepeatFFIb(&mips::MipsAssembler::MovfD, 3, "movf.d ${reg1}, ${reg2}, $fcc{imm}"),
-            "MovfD");
-}
-
-TEST_F(AssemblerMIPSTest, MovtS) {
-  DriverStr(RepeatFFIb(&mips::MipsAssembler::MovtS, 3, "movt.s ${reg1}, ${reg2}, $fcc{imm}"),
-            "MovtS");
-}
-
-TEST_F(AssemblerMIPSTest, MovtD) {
-  DriverStr(RepeatFFIb(&mips::MipsAssembler::MovtD, 3, "movt.d ${reg1}, ${reg2}, $fcc{imm}"),
-            "MovtD");
-}
-
-TEST_F(AssemblerMIPSTest, MovzS) {
-  DriverStr(RepeatFFR(&mips::MipsAssembler::MovzS, "movz.s ${reg1}, ${reg2}, ${reg3}"), "MovzS");
-}
-
-TEST_F(AssemblerMIPSTest, MovzD) {
-  DriverStr(RepeatFFR(&mips::MipsAssembler::MovzD, "movz.d ${reg1}, ${reg2}, ${reg3}"), "MovzD");
-}
-
-TEST_F(AssemblerMIPSTest, MovnS) {
-  DriverStr(RepeatFFR(&mips::MipsAssembler::MovnS, "movn.s ${reg1}, ${reg2}, ${reg3}"), "MovnS");
-}
-
-TEST_F(AssemblerMIPSTest, MovnD) {
-  DriverStr(RepeatFFR(&mips::MipsAssembler::MovnD, "movn.d ${reg1}, ${reg2}, ${reg3}"), "MovnD");
-}
-
-TEST_F(AssemblerMIPSTest, CvtSW) {
-  DriverStr(RepeatFF(&mips::MipsAssembler::Cvtsw, "cvt.s.w ${reg1}, ${reg2}"), "CvtSW");
-}
-
-TEST_F(AssemblerMIPSTest, CvtDW) {
-  DriverStr(RepeatFF(&mips::MipsAssembler::Cvtdw, "cvt.d.w ${reg1}, ${reg2}"), "CvtDW");
-}
-
-TEST_F(AssemblerMIPSTest, CvtSL) {
-  DriverStr(RepeatFF(&mips::MipsAssembler::Cvtsl, "cvt.s.l ${reg1}, ${reg2}"), "CvtSL");
-}
-
-TEST_F(AssemblerMIPSTest, CvtDL) {
-  DriverStr(RepeatFF(&mips::MipsAssembler::Cvtdl, "cvt.d.l ${reg1}, ${reg2}"), "CvtDL");
-}
-
-TEST_F(AssemblerMIPSTest, CvtSD) {
-  DriverStr(RepeatFF(&mips::MipsAssembler::Cvtsd, "cvt.s.d ${reg1}, ${reg2}"), "CvtSD");
-}
-
-TEST_F(AssemblerMIPSTest, CvtDS) {
-  DriverStr(RepeatFF(&mips::MipsAssembler::Cvtds, "cvt.d.s ${reg1}, ${reg2}"), "CvtDS");
-}
-
-TEST_F(AssemblerMIPSTest, TruncWS) {
-  DriverStr(RepeatFF(&mips::MipsAssembler::TruncWS, "trunc.w.s ${reg1}, ${reg2}"), "TruncWS");
-}
-
-TEST_F(AssemblerMIPSTest, TruncWD) {
-  DriverStr(RepeatFF(&mips::MipsAssembler::TruncWD, "trunc.w.d ${reg1}, ${reg2}"), "TruncWD");
-}
-
-TEST_F(AssemblerMIPSTest, TruncLS) {
-  DriverStr(RepeatFF(&mips::MipsAssembler::TruncLS, "trunc.l.s ${reg1}, ${reg2}"), "TruncLS");
-}
-
-TEST_F(AssemblerMIPSTest, TruncLD) {
-  DriverStr(RepeatFF(&mips::MipsAssembler::TruncLD, "trunc.l.d ${reg1}, ${reg2}"), "TruncLD");
-}
-
-TEST_F(AssemblerMIPSTest, Mfc1) {
-  DriverStr(RepeatRF(&mips::MipsAssembler::Mfc1, "mfc1 ${reg1}, ${reg2}"), "Mfc1");
-}
-
-TEST_F(AssemblerMIPSTest, Mtc1) {
-  DriverStr(RepeatRF(&mips::MipsAssembler::Mtc1, "mtc1 ${reg1}, ${reg2}"), "Mtc1");
-}
-
-TEST_F(AssemblerMIPSTest, Mfhc1) {
-  DriverStr(RepeatRF(&mips::MipsAssembler::Mfhc1, "mfhc1 ${reg1}, ${reg2}"), "Mfhc1");
-}
-
-TEST_F(AssemblerMIPSTest, Mthc1) {
-  DriverStr(RepeatRF(&mips::MipsAssembler::Mthc1, "mthc1 ${reg1}, ${reg2}"), "Mthc1");
-}
-
-TEST_F(AssemblerMIPSTest, Lwc1) {
-  DriverStr(RepeatFRIb(&mips::MipsAssembler::Lwc1, -16, "lwc1 ${reg1}, {imm}(${reg2})"), "Lwc1");
-}
-
-TEST_F(AssemblerMIPSTest, Ldc1) {
-  DriverStr(RepeatFRIb(&mips::MipsAssembler::Ldc1, -16, "ldc1 ${reg1}, {imm}(${reg2})"), "Ldc1");
-}
-
-TEST_F(AssemblerMIPSTest, Swc1) {
-  DriverStr(RepeatFRIb(&mips::MipsAssembler::Swc1, -16, "swc1 ${reg1}, {imm}(${reg2})"), "Swc1");
-}
-
-TEST_F(AssemblerMIPSTest, Sdc1) {
-  DriverStr(RepeatFRIb(&mips::MipsAssembler::Sdc1, -16, "sdc1 ${reg1}, {imm}(${reg2})"), "Sdc1");
-}
-
-TEST_F(AssemblerMIPSTest, Move) {
-  DriverStr(RepeatRR(&mips::MipsAssembler::Move, "or ${reg1}, ${reg2}, $zero"), "Move");
-}
-
-TEST_F(AssemblerMIPSTest, Clear) {
-  DriverStr(RepeatR(&mips::MipsAssembler::Clear, "or ${reg}, $zero, $zero"), "Clear");
-}
-
-TEST_F(AssemblerMIPSTest, Not) {
-  DriverStr(RepeatRR(&mips::MipsAssembler::Not, "nor ${reg1}, ${reg2}, $zero"), "Not");
-}
-
-TEST_F(AssemblerMIPSTest, Addiu32) {
-  __ Addiu32(mips::A1, mips::A2, -0x8000);
-  __ Addiu32(mips::A1, mips::A2, +0);
-  __ Addiu32(mips::A1, mips::A2, +0x7FFF);
-  __ Addiu32(mips::A1, mips::A2, -0x10000);
-  __ Addiu32(mips::A1, mips::A2, -0x8001);
-  __ Addiu32(mips::A1, mips::A2, +0x8000);
-  __ Addiu32(mips::A1, mips::A2, +0xFFFE);
-  __ Addiu32(mips::A1, mips::A2, -0x10001);
-  __ Addiu32(mips::A1, mips::A2, +0xFFFF);
-  __ Addiu32(mips::A1, mips::A2, +0x10000);
-  __ Addiu32(mips::A1, mips::A2, +0x10001);
-  __ Addiu32(mips::A1, mips::A2, +0x12345678);
-
-  const char* expected =
-      "addiu $a1, $a2, -0x8000\n"
-      "addiu $a1, $a2, 0\n"
-      "addiu $a1, $a2, 0x7FFF\n"
-      "addiu $at, $a2, -0x8000\n"
-      "addiu $a1, $at, -0x8000\n"
-      "addiu $at, $a2, -0x8000\n"
-      "addiu $a1, $at, -1\n"
-      "addiu $at, $a2, 0x7FFF\n"
-      "addiu $a1, $at, 1\n"
-      "addiu $at, $a2, 0x7FFF\n"
-      "addiu $a1, $at, 0x7FFF\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0xFFFF\n"
-      "addu $a1, $a2, $at\n"
-      "ori $at, $zero, 0xFFFF\n"
-      "addu $a1, $a2, $at\n"
-      "lui $at, 1\n"
-      "addu $a1, $a2, $at\n"
-      "lui $at, 1\n"
-      "ori $at, $at, 1\n"
-      "addu $a1, $a2, $at\n"
-      "lui $at, 0x1234\n"
-      "ori $at, $at, 0x5678\n"
-      "addu $a1, $a2, $at\n";
-  DriverStr(expected, "Addiu32");
-}
-
-TEST_F(AssemblerMIPSTest, LoadFromOffset) {
-  __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, -0x8000);
-  __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, +0);
-  __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, +0x7FF8);
-  __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, +0x7FFB);
-  __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, +0x7FFC);
-  __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, +0x7FFF);
-  __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, -0xFFF0);
-  __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, -0x8008);
-  __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, -0x8001);
-  __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, +0x8000);
-  __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, +0xFFF0);
-  __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, -0x17FE8);
-  __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, -0x0FFF8);
-  __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, -0x0FFF1);
-  __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, +0x0FFF1);
-  __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, +0x0FFF8);
-  __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, +0x17FE8);
-  __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, -0x17FF0);
-  __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, -0x17FE9);
-  __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, +0x17FE9);
-  __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, +0x17FF0);
-  __ LoadFromOffset(mips::kLoadSignedByte, mips::A3, mips::A1, +0x12345678);
-
-  __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, -0x8000);
-  __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, +0);
-  __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, +0x7FF8);
-  __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, +0x7FFB);
-  __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, +0x7FFC);
-  __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, +0x7FFF);
-  __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, -0xFFF0);
-  __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, -0x8008);
-  __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, -0x8001);
-  __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, +0x8000);
-  __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, +0xFFF0);
-  __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, -0x17FE8);
-  __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, -0x0FFF8);
-  __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, -0x0FFF1);
-  __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, +0x0FFF1);
-  __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, +0x0FFF8);
-  __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, +0x17FE8);
-  __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, -0x17FF0);
-  __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, -0x17FE9);
-  __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, +0x17FE9);
-  __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, +0x17FF0);
-  __ LoadFromOffset(mips::kLoadUnsignedByte, mips::A3, mips::A1, +0x12345678);
-
-  __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, -0x8000);
-  __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, +0);
-  __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, +0x7FF8);
-  __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, +0x7FFB);
-  __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, +0x7FFC);
-  __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, +0x7FFF);
-  __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, -0xFFF0);
-  __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, -0x8008);
-  __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, -0x8001);
-  __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, +0x8000);
-  __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, +0xFFF0);
-  __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, -0x17FE8);
-  __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, -0x0FFF8);
-  __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, -0x0FFF1);
-  __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, +0x0FFF1);
-  __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, +0x0FFF8);
-  __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, +0x17FE8);
-  __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, -0x17FF0);
-  __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, -0x17FE9);
-  __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, +0x17FE9);
-  __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, +0x17FF0);
-  __ LoadFromOffset(mips::kLoadSignedHalfword, mips::A3, mips::A1, +0x12345678);
-
-  __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, -0x8000);
-  __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, +0);
-  __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, +0x7FF8);
-  __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, +0x7FFB);
-  __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, +0x7FFC);
-  __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, +0x7FFF);
-  __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, -0xFFF0);
-  __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, -0x8008);
-  __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, -0x8001);
-  __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, +0x8000);
-  __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, +0xFFF0);
-  __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, -0x17FE8);
-  __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, -0x0FFF8);
-  __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, -0x0FFF1);
-  __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, +0x0FFF1);
-  __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, +0x0FFF8);
-  __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, +0x17FE8);
-  __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, -0x17FF0);
-  __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, -0x17FE9);
-  __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, +0x17FE9);
-  __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, +0x17FF0);
-  __ LoadFromOffset(mips::kLoadUnsignedHalfword, mips::A3, mips::A1, +0x12345678);
-
-  __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, -0x8000);
-  __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, +0);
-  __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, +0x7FF8);
-  __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, +0x7FFB);
-  __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, +0x7FFC);
-  __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, +0x7FFF);
-  __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, -0xFFF0);
-  __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, -0x8008);
-  __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, -0x8001);
-  __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, +0x8000);
-  __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, +0xFFF0);
-  __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, -0x17FE8);
-  __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, -0x0FFF8);
-  __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, -0x0FFF1);
-  __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, +0x0FFF1);
-  __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, +0x0FFF8);
-  __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, +0x17FE8);
-  __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, -0x17FF0);
-  __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, -0x17FE9);
-  __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, +0x17FE9);
-  __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, +0x17FF0);
-  __ LoadFromOffset(mips::kLoadWord, mips::A3, mips::A1, +0x12345678);
-
-  __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, -0x8000);
-  __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, +0);
-  __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, +0x7FF8);
-  __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, +0x7FFB);
-  __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, +0x7FFC);
-  __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, +0x7FFF);
-  __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, -0xFFF0);
-  __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, -0x8008);
-  __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, -0x8001);
-  __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, +0x8000);
-  __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, +0xFFF0);
-  __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, -0x17FE8);
-  __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, -0x0FFF8);
-  __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, -0x0FFF1);
-  __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, +0x0FFF1);
-  __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, +0x0FFF8);
-  __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, +0x17FE8);
-  __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, -0x17FF0);
-  __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, -0x17FE9);
-  __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, +0x17FE9);
-  __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, +0x17FF0);
-  __ LoadFromOffset(mips::kLoadDoubleword, mips::A0, mips::A2, +0x12345678);
-
-  const char* expected =
-      "lb $a3, -0x8000($a1)\n"
-      "lb $a3, 0($a1)\n"
-      "lb $a3, 0x7FF8($a1)\n"
-      "lb $a3, 0x7FFB($a1)\n"
-      "lb $a3, 0x7FFC($a1)\n"
-      "lb $a3, 0x7FFF($a1)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "lb $a3, -0x7FF8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "lb $a3, -0x10($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "lb $a3, -9($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "lb $a3, 8($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "lb $a3, 0x7FF8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "lb $a3, -0x7FF8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "lb $a3, -8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "lb $a3, -1($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "lb $a3, 1($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "lb $a3, 8($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "lb $a3, 0x7FF8($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a1\n"
-      "lb $a3, 0($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a1\n"
-      "lb $a3, 7($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FE8\n"
-      "addu $at, $at, $a1\n"
-      "lb $a3, 1($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FF0\n"
-      "addu $at, $at, $a1\n"
-      "lb $a3, 0($at)\n"
-      "lui $at, 0x1234\n"
-      "ori $at, $at, 0x5678\n"
-      "addu $at, $at, $a1\n"
-      "lb $a3, 0($at)\n"
-
-      "lbu $a3, -0x8000($a1)\n"
-      "lbu $a3, 0($a1)\n"
-      "lbu $a3, 0x7FF8($a1)\n"
-      "lbu $a3, 0x7FFB($a1)\n"
-      "lbu $a3, 0x7FFC($a1)\n"
-      "lbu $a3, 0x7FFF($a1)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "lbu $a3, -0x7FF8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "lbu $a3, -0x10($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "lbu $a3, -9($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "lbu $a3, 8($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "lbu $a3, 0x7FF8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "lbu $a3, -0x7FF8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "lbu $a3, -8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "lbu $a3, -1($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "lbu $a3, 1($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "lbu $a3, 8($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "lbu $a3, 0x7FF8($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a1\n"
-      "lbu $a3, 0($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a1\n"
-      "lbu $a3, 7($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FE8\n"
-      "addu $at, $at, $a1\n"
-      "lbu $a3, 1($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FF0\n"
-      "addu $at, $at, $a1\n"
-      "lbu $a3, 0($at)\n"
-      "lui $at, 0x1234\n"
-      "ori $at, $at, 0x5678\n"
-      "addu $at, $at, $a1\n"
-      "lbu $a3, 0($at)\n"
-
-      "lh $a3, -0x8000($a1)\n"
-      "lh $a3, 0($a1)\n"
-      "lh $a3, 0x7FF8($a1)\n"
-      "lh $a3, 0x7FFB($a1)\n"
-      "lh $a3, 0x7FFC($a1)\n"
-      "lh $a3, 0x7FFF($a1)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "lh $a3, -0x7FF8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "lh $a3, -0x10($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "lh $a3, -9($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "lh $a3, 8($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "lh $a3, 0x7FF8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "lh $a3, -0x7FF8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "lh $a3, -8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "lh $a3, -1($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "lh $a3, 1($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "lh $a3, 8($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "lh $a3, 0x7FF8($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a1\n"
-      "lh $a3, 0($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a1\n"
-      "lh $a3, 7($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FE8\n"
-      "addu $at, $at, $a1\n"
-      "lh $a3, 1($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FF0\n"
-      "addu $at, $at, $a1\n"
-      "lh $a3, 0($at)\n"
-      "lui $at, 0x1234\n"
-      "ori $at, $at, 0x5678\n"
-      "addu $at, $at, $a1\n"
-      "lh $a3, 0($at)\n"
-
-      "lhu $a3, -0x8000($a1)\n"
-      "lhu $a3, 0($a1)\n"
-      "lhu $a3, 0x7FF8($a1)\n"
-      "lhu $a3, 0x7FFB($a1)\n"
-      "lhu $a3, 0x7FFC($a1)\n"
-      "lhu $a3, 0x7FFF($a1)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "lhu $a3, -0x7FF8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "lhu $a3, -0x10($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "lhu $a3, -9($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "lhu $a3, 8($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "lhu $a3, 0x7FF8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "lhu $a3, -0x7FF8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "lhu $a3, -8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "lhu $a3, -1($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "lhu $a3, 1($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "lhu $a3, 8($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "lhu $a3, 0x7FF8($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a1\n"
-      "lhu $a3, 0($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a1\n"
-      "lhu $a3, 7($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FE8\n"
-      "addu $at, $at, $a1\n"
-      "lhu $a3, 1($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FF0\n"
-      "addu $at, $at, $a1\n"
-      "lhu $a3, 0($at)\n"
-      "lui $at, 0x1234\n"
-      "ori $at, $at, 0x5678\n"
-      "addu $at, $at, $a1\n"
-      "lhu $a3, 0($at)\n"
-
-      "lw $a3, -0x8000($a1)\n"
-      "lw $a3, 0($a1)\n"
-      "lw $a3, 0x7FF8($a1)\n"
-      "lw $a3, 0x7FFB($a1)\n"
-      "lw $a3, 0x7FFC($a1)\n"
-      "lw $a3, 0x7FFF($a1)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "lw $a3, -0x7FF8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "lw $a3, -0x10($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "lw $a3, -9($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "lw $a3, 8($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "lw $a3, 0x7FF8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "lw $a3, -0x7FF8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "lw $a3, -8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "lw $a3, -1($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "lw $a3, 1($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "lw $a3, 8($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "lw $a3, 0x7FF8($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a1\n"
-      "lw $a3, 0($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a1\n"
-      "lw $a3, 7($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FE8\n"
-      "addu $at, $at, $a1\n"
-      "lw $a3, 1($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FF0\n"
-      "addu $at, $at, $a1\n"
-      "lw $a3, 0($at)\n"
-      "lui $at, 0x1234\n"
-      "ori $at, $at, 0x5678\n"
-      "addu $at, $at, $a1\n"
-      "lw $a3, 0($at)\n"
-
-      "lw $a0, -0x8000($a2)\n"
-      "lw $a1, -0x7FFC($a2)\n"
-      "lw $a0, 0($a2)\n"
-      "lw $a1, 4($a2)\n"
-      "lw $a0, 0x7FF8($a2)\n"
-      "lw $a1, 0x7FFC($a2)\n"
-      "lw $a0, 0x7FFB($a2)\n"
-      "lw $a1, 0x7FFF($a2)\n"
-      "addiu $at, $a2, 0x7FF8\n"
-      "lw $a0, 4($at)\n"
-      "lw $a1, 8($at)\n"
-      "addiu $at, $a2, 0x7FF8\n"
-      "lw $a0, 7($at)\n"
-      "lw $a1, 11($at)\n"
-      "addiu $at, $a2, -0x7FF8\n"
-      "lw $a0, -0x7FF8($at)\n"
-      "lw $a1, -0x7FF4($at)\n"
-      "addiu $at, $a2, -0x7FF8\n"
-      "lw $a0, -0x10($at)\n"
-      "lw $a1, -0xC($at)\n"
-      "addiu $at, $a2, -0x7FF8\n"
-      "lw $a0, -9($at)\n"
-      "lw $a1, -5($at)\n"
-      "addiu $at, $a2, 0x7FF8\n"
-      "lw $a0, 8($at)\n"
-      "lw $a1, 12($at)\n"
-      "addiu $at, $a2, 0x7FF8\n"
-      "lw $a0, 0x7FF8($at)\n"
-      "lw $a1, 0x7FFC($at)\n"
-      "addiu $at, $a2, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "lw $a0, -0x7FF8($at)\n"
-      "lw $a1, -0x7FF4($at)\n"
-      "addiu $at, $a2, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "lw $a0, -8($at)\n"
-      "lw $a1, -4($at)\n"
-      "addiu $at, $a2, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "lw $a0, -1($at)\n"
-      "lw $a1, 3($at)\n"
-      "addiu $at, $a2, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "lw $a0, 1($at)\n"
-      "lw $a1, 5($at)\n"
-      "addiu $at, $a2, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "lw $a0, 8($at)\n"
-      "lw $a1, 12($at)\n"
-      "addiu $at, $a2, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "lw $a0, 0x7FF8($at)\n"
-      "lw $a1, 0x7FFC($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a2\n"
-      "lw $a0, 0($at)\n"
-      "lw $a1, 4($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a2\n"
-      "lw $a0, 7($at)\n"
-      "lw $a1, 11($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FE8\n"
-      "addu $at, $at, $a2\n"
-      "lw $a0, 1($at)\n"
-      "lw $a1, 5($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FF0\n"
-      "addu $at, $at, $a2\n"
-      "lw $a0, 0($at)\n"
-      "lw $a1, 4($at)\n"
-      "lui $at, 0x1234\n"
-      "ori $at, $at, 0x5678\n"
-      "addu $at, $at, $a2\n"
-      "lw $a0, 0($at)\n"
-      "lw $a1, 4($at)\n";
-  DriverStr(expected, "LoadFromOffset");
-}
-
-TEST_F(AssemblerMIPSTest, LoadSFromOffset) {
-  __ LoadSFromOffset(mips::F2, mips::A0, -0x8000);
-  __ LoadSFromOffset(mips::F2, mips::A0, +0);
-  __ LoadSFromOffset(mips::F2, mips::A0, +0x7FF8);
-  __ LoadSFromOffset(mips::F2, mips::A0, +0x7FFB);
-  __ LoadSFromOffset(mips::F2, mips::A0, +0x7FFC);
-  __ LoadSFromOffset(mips::F2, mips::A0, +0x7FFF);
-  __ LoadSFromOffset(mips::F2, mips::A0, -0xFFF0);
-  __ LoadSFromOffset(mips::F2, mips::A0, -0x8008);
-  __ LoadSFromOffset(mips::F2, mips::A0, -0x8001);
-  __ LoadSFromOffset(mips::F2, mips::A0, +0x8000);
-  __ LoadSFromOffset(mips::F2, mips::A0, +0xFFF0);
-  __ LoadSFromOffset(mips::F2, mips::A0, -0x17FE8);
-  __ LoadSFromOffset(mips::F2, mips::A0, -0x0FFF8);
-  __ LoadSFromOffset(mips::F2, mips::A0, -0x0FFF1);
-  __ LoadSFromOffset(mips::F2, mips::A0, +0x0FFF1);
-  __ LoadSFromOffset(mips::F2, mips::A0, +0x0FFF8);
-  __ LoadSFromOffset(mips::F2, mips::A0, +0x17FE8);
-  __ LoadSFromOffset(mips::F2, mips::A0, -0x17FF0);
-  __ LoadSFromOffset(mips::F2, mips::A0, -0x17FE9);
-  __ LoadSFromOffset(mips::F2, mips::A0, +0x17FE9);
-  __ LoadSFromOffset(mips::F2, mips::A0, +0x17FF0);
-  __ LoadSFromOffset(mips::F2, mips::A0, +0x12345678);
-
-  const char* expected =
-      "lwc1 $f2, -0x8000($a0)\n"
-      "lwc1 $f2, 0($a0)\n"
-      "lwc1 $f2, 0x7FF8($a0)\n"
-      "lwc1 $f2, 0x7FFB($a0)\n"
-      "lwc1 $f2, 0x7FFC($a0)\n"
-      "lwc1 $f2, 0x7FFF($a0)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "lwc1 $f2, -0x7FF8($at)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "lwc1 $f2, -0x10($at)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "lwc1 $f2, -9($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "lwc1 $f2, 8($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "lwc1 $f2, 0x7FF8($at)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "lwc1 $f2, -0x7FF8($at)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "lwc1 $f2, -8($at)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "lwc1 $f2, -1($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "lwc1 $f2, 1($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "lwc1 $f2, 8($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "lwc1 $f2, 0x7FF8($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a0\n"
-      "lwc1 $f2, 0($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a0\n"
-      "lwc1 $f2, 7($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FE8\n"
-      "addu $at, $at, $a0\n"
-      "lwc1 $f2, 1($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FF0\n"
-      "addu $at, $at, $a0\n"
-      "lwc1 $f2, 0($at)\n"
-      "lui $at, 0x1234\n"
-      "ori $at, $at, 0x5678\n"
-      "addu $at, $at, $a0\n"
-      "lwc1 $f2, 0($at)\n";
-  DriverStr(expected, "LoadSFromOffset");
-}
-
-TEST_F(AssemblerMIPSTest, LoadDFromOffset) {
-  __ LoadDFromOffset(mips::F0, mips::A0, -0x8000);
-  __ LoadDFromOffset(mips::F0, mips::A0, +0);
-  __ LoadDFromOffset(mips::F0, mips::A0, +0x7FF8);
-  __ LoadDFromOffset(mips::F0, mips::A0, +0x7FFB);
-  __ LoadDFromOffset(mips::F0, mips::A0, +0x7FFC);
-  __ LoadDFromOffset(mips::F0, mips::A0, +0x7FFF);
-  __ LoadDFromOffset(mips::F0, mips::A0, -0xFFF0);
-  __ LoadDFromOffset(mips::F0, mips::A0, -0x8008);
-  __ LoadDFromOffset(mips::F0, mips::A0, -0x8001);
-  __ LoadDFromOffset(mips::F0, mips::A0, +0x8000);
-  __ LoadDFromOffset(mips::F0, mips::A0, +0xFFF0);
-  __ LoadDFromOffset(mips::F0, mips::A0, -0x17FE8);
-  __ LoadDFromOffset(mips::F0, mips::A0, -0x0FFF8);
-  __ LoadDFromOffset(mips::F0, mips::A0, -0x0FFF1);
-  __ LoadDFromOffset(mips::F0, mips::A0, +0x0FFF1);
-  __ LoadDFromOffset(mips::F0, mips::A0, +0x0FFF8);
-  __ LoadDFromOffset(mips::F0, mips::A0, +0x17FE8);
-  __ LoadDFromOffset(mips::F0, mips::A0, -0x17FF0);
-  __ LoadDFromOffset(mips::F0, mips::A0, -0x17FE9);
-  __ LoadDFromOffset(mips::F0, mips::A0, +0x17FE9);
-  __ LoadDFromOffset(mips::F0, mips::A0, +0x17FF0);
-  __ LoadDFromOffset(mips::F0, mips::A0, +0x12345678);
-
-  const char* expected =
-      "ldc1 $f0, -0x8000($a0)\n"
-      "ldc1 $f0, 0($a0)\n"
-      "ldc1 $f0, 0x7FF8($a0)\n"
-      "lwc1 $f0, 0x7FFB($a0)\n"
-      "lwc1 $f1, 0x7FFF($a0)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "lwc1 $f0, 4($at)\n"
-      "lwc1 $f1, 8($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "lwc1 $f0, 7($at)\n"
-      "lwc1 $f1, 11($at)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "ldc1 $f0, -0x7FF8($at)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "ldc1 $f0, -0x10($at)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "lwc1 $f0, -9($at)\n"
-      "lwc1 $f1, -5($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "ldc1 $f0, 8($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "ldc1 $f0, 0x7FF8($at)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "ldc1 $f0, -0x7FF8($at)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "ldc1 $f0, -8($at)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "lwc1 $f0, -1($at)\n"
-      "lwc1 $f1, 3($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "lwc1 $f0, 1($at)\n"
-      "lwc1 $f1, 5($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "ldc1 $f0, 8($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "ldc1 $f0, 0x7FF8($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a0\n"
-      "ldc1 $f0, 0($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a0\n"
-      "lwc1 $f0, 7($at)\n"
-      "lwc1 $f1, 11($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FE8\n"
-      "addu $at, $at, $a0\n"
-      "lwc1 $f0, 1($at)\n"
-      "lwc1 $f1, 5($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FF0\n"
-      "addu $at, $at, $a0\n"
-      "ldc1 $f0, 0($at)\n"
-      "lui $at, 0x1234\n"
-      "ori $at, $at, 0x5678\n"
-      "addu $at, $at, $a0\n"
-      "ldc1 $f0, 0($at)\n";
-  DriverStr(expected, "LoadDFromOffset");
-}
-
-TEST_F(AssemblerMIPSTest, StoreToOffset) {
-  __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, -0x8000);
-  __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, +0);
-  __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, +0x7FF8);
-  __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, +0x7FFB);
-  __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, +0x7FFC);
-  __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, +0x7FFF);
-  __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, -0xFFF0);
-  __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, -0x8008);
-  __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, -0x8001);
-  __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, +0x8000);
-  __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, +0xFFF0);
-  __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, -0x17FE8);
-  __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, -0x0FFF8);
-  __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, -0x0FFF1);
-  __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, +0x0FFF1);
-  __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, +0x0FFF8);
-  __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, +0x17FE8);
-  __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, -0x17FF0);
-  __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, -0x17FE9);
-  __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, +0x17FE9);
-  __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, +0x17FF0);
-  __ StoreToOffset(mips::kStoreByte, mips::A3, mips::A1, +0x12345678);
-
-  __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, -0x8000);
-  __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, +0);
-  __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, +0x7FF8);
-  __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, +0x7FFB);
-  __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, +0x7FFC);
-  __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, +0x7FFF);
-  __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, -0xFFF0);
-  __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, -0x8008);
-  __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, -0x8001);
-  __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, +0x8000);
-  __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, +0xFFF0);
-  __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, -0x17FE8);
-  __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, -0x0FFF8);
-  __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, -0x0FFF1);
-  __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, +0x0FFF1);
-  __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, +0x0FFF8);
-  __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, +0x17FE8);
-  __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, -0x17FF0);
-  __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, -0x17FE9);
-  __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, +0x17FE9);
-  __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, +0x17FF0);
-  __ StoreToOffset(mips::kStoreHalfword, mips::A3, mips::A1, +0x12345678);
-
-  __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, -0x8000);
-  __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, +0);
-  __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, +0x7FF8);
-  __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, +0x7FFB);
-  __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, +0x7FFC);
-  __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, +0x7FFF);
-  __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, -0xFFF0);
-  __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, -0x8008);
-  __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, -0x8001);
-  __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, +0x8000);
-  __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, +0xFFF0);
-  __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, -0x17FE8);
-  __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, -0x0FFF8);
-  __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, -0x0FFF1);
-  __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, +0x0FFF1);
-  __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, +0x0FFF8);
-  __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, +0x17FE8);
-  __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, -0x17FF0);
-  __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, -0x17FE9);
-  __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, +0x17FE9);
-  __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, +0x17FF0);
-  __ StoreToOffset(mips::kStoreWord, mips::A3, mips::A1, +0x12345678);
-
-  __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, -0x8000);
-  __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, +0);
-  __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, +0x7FF8);
-  __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, +0x7FFB);
-  __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, +0x7FFC);
-  __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, +0x7FFF);
-  __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, -0xFFF0);
-  __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, -0x8008);
-  __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, -0x8001);
-  __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, +0x8000);
-  __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, +0xFFF0);
-  __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, -0x17FE8);
-  __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, -0x0FFF8);
-  __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, -0x0FFF1);
-  __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, +0x0FFF1);
-  __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, +0x0FFF8);
-  __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, +0x17FE8);
-  __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, -0x17FF0);
-  __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, -0x17FE9);
-  __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, +0x17FE9);
-  __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, +0x17FF0);
-  __ StoreToOffset(mips::kStoreDoubleword, mips::A0, mips::A2, +0x12345678);
-
-  const char* expected =
-      "sb $a3, -0x8000($a1)\n"
-      "sb $a3, 0($a1)\n"
-      "sb $a3, 0x7FF8($a1)\n"
-      "sb $a3, 0x7FFB($a1)\n"
-      "sb $a3, 0x7FFC($a1)\n"
-      "sb $a3, 0x7FFF($a1)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "sb $a3, -0x7FF8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "sb $a3, -0x10($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "sb $a3, -9($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "sb $a3, 8($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "sb $a3, 0x7FF8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "sb $a3, -0x7FF8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "sb $a3, -8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "sb $a3, -1($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "sb $a3, 1($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "sb $a3, 8($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "sb $a3, 0x7FF8($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a1\n"
-      "sb $a3, 0($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a1\n"
-      "sb $a3, 7($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FE8\n"
-      "addu $at, $at, $a1\n"
-      "sb $a3, 1($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FF0\n"
-      "addu $at, $at, $a1\n"
-      "sb $a3, 0($at)\n"
-      "lui $at, 0x1234\n"
-      "ori $at, $at, 0x5678\n"
-      "addu $at, $at, $a1\n"
-      "sb $a3, 0($at)\n"
-
-      "sh $a3, -0x8000($a1)\n"
-      "sh $a3, 0($a1)\n"
-      "sh $a3, 0x7FF8($a1)\n"
-      "sh $a3, 0x7FFB($a1)\n"
-      "sh $a3, 0x7FFC($a1)\n"
-      "sh $a3, 0x7FFF($a1)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "sh $a3, -0x7FF8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "sh $a3, -0x10($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "sh $a3, -9($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "sh $a3, 8($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "sh $a3, 0x7FF8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "sh $a3, -0x7FF8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "sh $a3, -8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "sh $a3, -1($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "sh $a3, 1($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "sh $a3, 8($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "sh $a3, 0x7FF8($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a1\n"
-      "sh $a3, 0($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a1\n"
-      "sh $a3, 7($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FE8\n"
-      "addu $at, $at, $a1\n"
-      "sh $a3, 1($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FF0\n"
-      "addu $at, $at, $a1\n"
-      "sh $a3, 0($at)\n"
-      "lui $at, 0x1234\n"
-      "ori $at, $at, 0x5678\n"
-      "addu $at, $at, $a1\n"
-      "sh $a3, 0($at)\n"
-
-      "sw $a3, -0x8000($a1)\n"
-      "sw $a3, 0($a1)\n"
-      "sw $a3, 0x7FF8($a1)\n"
-      "sw $a3, 0x7FFB($a1)\n"
-      "sw $a3, 0x7FFC($a1)\n"
-      "sw $a3, 0x7FFF($a1)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "sw $a3, -0x7FF8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "sw $a3, -0x10($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "sw $a3, -9($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "sw $a3, 8($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "sw $a3, 0x7FF8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "sw $a3, -0x7FF8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "sw $a3, -8($at)\n"
-      "addiu $at, $a1, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "sw $a3, -1($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "sw $a3, 1($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "sw $a3, 8($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "sw $a3, 0x7FF8($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a1\n"
-      "sw $a3, 0($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a1\n"
-      "sw $a3, 7($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FE8\n"
-      "addu $at, $at, $a1\n"
-      "sw $a3, 1($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FF0\n"
-      "addu $at, $at, $a1\n"
-      "sw $a3, 0($at)\n"
-      "lui $at, 0x1234\n"
-      "ori $at, $at, 0x5678\n"
-      "addu $at, $at, $a1\n"
-      "sw $a3, 0($at)\n"
-
-      "sw $a0, -0x8000($a2)\n"
-      "sw $a1, -0x7FFC($a2)\n"
-      "sw $a0, 0($a2)\n"
-      "sw $a1, 4($a2)\n"
-      "sw $a0, 0x7FF8($a2)\n"
-      "sw $a1, 0x7FFC($a2)\n"
-      "sw $a0, 0x7FFB($a2)\n"
-      "sw $a1, 0x7FFF($a2)\n"
-      "addiu $at, $a2, 0x7FF8\n"
-      "sw $a0, 4($at)\n"
-      "sw $a1, 8($at)\n"
-      "addiu $at, $a2, 0x7FF8\n"
-      "sw $a0, 7($at)\n"
-      "sw $a1, 11($at)\n"
-      "addiu $at, $a2, -0x7FF8\n"
-      "sw $a0, -0x7FF8($at)\n"
-      "sw $a1, -0x7FF4($at)\n"
-      "addiu $at, $a2, -0x7FF8\n"
-      "sw $a0, -0x10($at)\n"
-      "sw $a1, -0xC($at)\n"
-      "addiu $at, $a2, -0x7FF8\n"
-      "sw $a0, -9($at)\n"
-      "sw $a1, -5($at)\n"
-      "addiu $at, $a2, 0x7FF8\n"
-      "sw $a0, 8($at)\n"
-      "sw $a1, 12($at)\n"
-      "addiu $at, $a2, 0x7FF8\n"
-      "sw $a0, 0x7FF8($at)\n"
-      "sw $a1, 0x7FFC($at)\n"
-      "addiu $at, $a2, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "sw $a0, -0x7FF8($at)\n"
-      "sw $a1, -0x7FF4($at)\n"
-      "addiu $at, $a2, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "sw $a0, -8($at)\n"
-      "sw $a1, -4($at)\n"
-      "addiu $at, $a2, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "sw $a0, -1($at)\n"
-      "sw $a1, 3($at)\n"
-      "addiu $at, $a2, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "sw $a0, 1($at)\n"
-      "sw $a1, 5($at)\n"
-      "addiu $at, $a2, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "sw $a0, 8($at)\n"
-      "sw $a1, 12($at)\n"
-      "addiu $at, $a2, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "sw $a0, 0x7FF8($at)\n"
-      "sw $a1, 0x7FFC($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a2\n"
-      "sw $a0, 0($at)\n"
-      "sw $a1, 4($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a2\n"
-      "sw $a0, 7($at)\n"
-      "sw $a1, 11($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FE8\n"
-      "addu $at, $at, $a2\n"
-      "sw $a0, 1($at)\n"
-      "sw $a1, 5($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FF0\n"
-      "addu $at, $at, $a2\n"
-      "sw $a0, 0($at)\n"
-      "sw $a1, 4($at)\n"
-      "lui $at, 0x1234\n"
-      "ori $at, $at, 0x5678\n"
-      "addu $at, $at, $a2\n"
-      "sw $a0, 0($at)\n"
-      "sw $a1, 4($at)\n";
-  DriverStr(expected, "StoreToOffset");
-}
-
-TEST_F(AssemblerMIPSTest, StoreSToOffset) {
-  __ StoreSToOffset(mips::F2, mips::A0, -0x8000);
-  __ StoreSToOffset(mips::F2, mips::A0, +0);
-  __ StoreSToOffset(mips::F2, mips::A0, +0x7FF8);
-  __ StoreSToOffset(mips::F2, mips::A0, +0x7FFB);
-  __ StoreSToOffset(mips::F2, mips::A0, +0x7FFC);
-  __ StoreSToOffset(mips::F2, mips::A0, +0x7FFF);
-  __ StoreSToOffset(mips::F2, mips::A0, -0xFFF0);
-  __ StoreSToOffset(mips::F2, mips::A0, -0x8008);
-  __ StoreSToOffset(mips::F2, mips::A0, -0x8001);
-  __ StoreSToOffset(mips::F2, mips::A0, +0x8000);
-  __ StoreSToOffset(mips::F2, mips::A0, +0xFFF0);
-  __ StoreSToOffset(mips::F2, mips::A0, -0x17FE8);
-  __ StoreSToOffset(mips::F2, mips::A0, -0x0FFF8);
-  __ StoreSToOffset(mips::F2, mips::A0, -0x0FFF1);
-  __ StoreSToOffset(mips::F2, mips::A0, +0x0FFF1);
-  __ StoreSToOffset(mips::F2, mips::A0, +0x0FFF8);
-  __ StoreSToOffset(mips::F2, mips::A0, +0x17FE8);
-  __ StoreSToOffset(mips::F2, mips::A0, -0x17FF0);
-  __ StoreSToOffset(mips::F2, mips::A0, -0x17FE9);
-  __ StoreSToOffset(mips::F2, mips::A0, +0x17FE9);
-  __ StoreSToOffset(mips::F2, mips::A0, +0x17FF0);
-  __ StoreSToOffset(mips::F2, mips::A0, +0x12345678);
-
-  const char* expected =
-      "swc1 $f2, -0x8000($a0)\n"
-      "swc1 $f2, 0($a0)\n"
-      "swc1 $f2, 0x7FF8($a0)\n"
-      "swc1 $f2, 0x7FFB($a0)\n"
-      "swc1 $f2, 0x7FFC($a0)\n"
-      "swc1 $f2, 0x7FFF($a0)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "swc1 $f2, -0x7FF8($at)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "swc1 $f2, -0x10($at)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "swc1 $f2, -9($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "swc1 $f2, 8($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "swc1 $f2, 0x7FF8($at)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "swc1 $f2, -0x7FF8($at)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "swc1 $f2, -8($at)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "swc1 $f2, -1($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "swc1 $f2, 1($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "swc1 $f2, 8($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "swc1 $f2, 0x7FF8($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a0\n"
-      "swc1 $f2, 0($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a0\n"
-      "swc1 $f2, 7($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FE8\n"
-      "addu $at, $at, $a0\n"
-      "swc1 $f2, 1($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FF0\n"
-      "addu $at, $at, $a0\n"
-      "swc1 $f2, 0($at)\n"
-      "lui $at, 0x1234\n"
-      "ori $at, $at, 0x5678\n"
-      "addu $at, $at, $a0\n"
-      "swc1 $f2, 0($at)\n";
-  DriverStr(expected, "StoreSToOffset");
-}
-
-TEST_F(AssemblerMIPSTest, StoreDToOffset) {
-  __ StoreDToOffset(mips::F0, mips::A0, -0x8000);
-  __ StoreDToOffset(mips::F0, mips::A0, +0);
-  __ StoreDToOffset(mips::F0, mips::A0, +0x7FF8);
-  __ StoreDToOffset(mips::F0, mips::A0, +0x7FFB);
-  __ StoreDToOffset(mips::F0, mips::A0, +0x7FFC);
-  __ StoreDToOffset(mips::F0, mips::A0, +0x7FFF);
-  __ StoreDToOffset(mips::F0, mips::A0, -0xFFF0);
-  __ StoreDToOffset(mips::F0, mips::A0, -0x8008);
-  __ StoreDToOffset(mips::F0, mips::A0, -0x8001);
-  __ StoreDToOffset(mips::F0, mips::A0, +0x8000);
-  __ StoreDToOffset(mips::F0, mips::A0, +0xFFF0);
-  __ StoreDToOffset(mips::F0, mips::A0, -0x17FE8);
-  __ StoreDToOffset(mips::F0, mips::A0, -0x0FFF8);
-  __ StoreDToOffset(mips::F0, mips::A0, -0x0FFF1);
-  __ StoreDToOffset(mips::F0, mips::A0, +0x0FFF1);
-  __ StoreDToOffset(mips::F0, mips::A0, +0x0FFF8);
-  __ StoreDToOffset(mips::F0, mips::A0, +0x17FE8);
-  __ StoreDToOffset(mips::F0, mips::A0, -0x17FF0);
-  __ StoreDToOffset(mips::F0, mips::A0, -0x17FE9);
-  __ StoreDToOffset(mips::F0, mips::A0, +0x17FE9);
-  __ StoreDToOffset(mips::F0, mips::A0, +0x17FF0);
-  __ StoreDToOffset(mips::F0, mips::A0, +0x12345678);
-
-  const char* expected =
-      "sdc1 $f0, -0x8000($a0)\n"
-      "sdc1 $f0, 0($a0)\n"
-      "sdc1 $f0, 0x7FF8($a0)\n"
-      "swc1 $f0, 0x7FFB($a0)\n"
-      "swc1 $f1, 0x7FFF($a0)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "swc1 $f0, 4($at)\n"
-      "swc1 $f1, 8($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "swc1 $f0, 7($at)\n"
-      "swc1 $f1, 11($at)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "sdc1 $f0, -0x7FF8($at)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "sdc1 $f0, -0x10($at)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "swc1 $f0, -9($at)\n"
-      "swc1 $f1, -5($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "sdc1 $f0, 8($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "sdc1 $f0, 0x7FF8($at)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "sdc1 $f0, -0x7FF8($at)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "sdc1 $f0, -8($at)\n"
-      "addiu $at, $a0, -0x7FF8\n"
-      "addiu $at, $at, -0x7FF8\n"
-      "swc1 $f0, -1($at)\n"
-      "swc1 $f1, 3($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "swc1 $f0, 1($at)\n"
-      "swc1 $f1, 5($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "sdc1 $f0, 8($at)\n"
-      "addiu $at, $a0, 0x7FF8\n"
-      "addiu $at, $at, 0x7FF8\n"
-      "sdc1 $f0, 0x7FF8($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a0\n"
-      "sdc1 $f0, 0($at)\n"
-      "lui $at, 0xFFFE\n"
-      "ori $at, $at, 0x8010\n"
-      "addu $at, $at, $a0\n"
-      "swc1 $f0, 7($at)\n"
-      "swc1 $f1, 11($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FE8\n"
-      "addu $at, $at, $a0\n"
-      "swc1 $f0, 1($at)\n"
-      "swc1 $f1, 5($at)\n"
-      "lui $at, 0x1\n"
-      "ori $at, $at, 0x7FF0\n"
-      "addu $at, $at, $a0\n"
-      "sdc1 $f0, 0($at)\n"
-      "lui $at, 0x1234\n"
-      "ori $at, $at, 0x5678\n"
-      "addu $at, $at, $a0\n"
-      "sdc1 $f0, 0($at)\n";
-  DriverStr(expected, "StoreDToOffset");
-}
-
-TEST_F(AssemblerMIPSTest, StoreConstToOffset) {
-  __ StoreConstToOffset(mips::kStoreByte, 0xFF, mips::A1, +0, mips::T8);
-  __ StoreConstToOffset(mips::kStoreHalfword, 0xFFFF, mips::A1, +0, mips::T8);
-  __ StoreConstToOffset(mips::kStoreWord, 0x12345678, mips::A1, +0, mips::T8);
-  __ StoreConstToOffset(mips::kStoreDoubleword, 0x123456789ABCDEF0, mips::A1, +0, mips::T8);
-
-  __ StoreConstToOffset(mips::kStoreByte, 0, mips::A1, +0, mips::T8);
-  __ StoreConstToOffset(mips::kStoreHalfword, 0, mips::A1, +0, mips::T8);
-  __ StoreConstToOffset(mips::kStoreWord, 0, mips::A1, +0, mips::T8);
-  __ StoreConstToOffset(mips::kStoreDoubleword, 0, mips::A1, +0, mips::T8);
-
-  __ StoreConstToOffset(mips::kStoreDoubleword, 0x1234567812345678, mips::A1, +0, mips::T8);
-  __ StoreConstToOffset(mips::kStoreDoubleword, 0x1234567800000000, mips::A1, +0, mips::T8);
-  __ StoreConstToOffset(mips::kStoreDoubleword, 0x0000000012345678, mips::A1, +0, mips::T8);
-
-  __ StoreConstToOffset(mips::kStoreWord, 0, mips::T8, +0, mips::T8);
-  __ StoreConstToOffset(mips::kStoreWord, 0x12345678, mips::T8, +0, mips::T8);
-
-  __ StoreConstToOffset(mips::kStoreWord, 0, mips::A1, -0xFFF0, mips::T8);
-  __ StoreConstToOffset(mips::kStoreWord, 0x12345678, mips::A1, +0xFFF0, mips::T8);
-
-  __ StoreConstToOffset(mips::kStoreWord, 0, mips::T8, -0xFFF0, mips::T8);
-  __ StoreConstToOffset(mips::kStoreWord, 0x12345678, mips::T8, +0xFFF0, mips::T8);
-
-  const char* expected =
-      "ori $t8, $zero, 0xFF\n"
-      "sb $t8, 0($a1)\n"
-      "ori $t8, $zero, 0xFFFF\n"
-      "sh $t8, 0($a1)\n"
-      "lui $t8, 0x1234\n"
-      "ori $t8, $t8, 0x5678\n"
-      "sw $t8, 0($a1)\n"
-      "lui $t8, 0x9ABC\n"
-      "ori $t8, $t8, 0xDEF0\n"
-      "sw $t8, 0($a1)\n"
-      "lui $t8, 0x1234\n"
-      "ori $t8, $t8, 0x5678\n"
-      "sw $t8, 4($a1)\n"
-
-      "sb $zero, 0($a1)\n"
-      "sh $zero, 0($a1)\n"
-      "sw $zero, 0($a1)\n"
-      "sw $zero, 0($a1)\n"
-      "sw $zero, 4($a1)\n"
-
-      "lui $t8, 0x1234\n"
-      "ori $t8, $t8, 0x5678\n"
-      "sw $t8, 0($a1)\n"
-      "sw $t8, 4($a1)\n"
-      "sw $zero, 0($a1)\n"
-      "lui $t8, 0x1234\n"
-      "ori $t8, $t8, 0x5678\n"
-      "sw $t8, 4($a1)\n"
-      "lui $t8, 0x1234\n"
-      "ori $t8, $t8, 0x5678\n"
-      "sw $t8, 0($a1)\n"
-      "sw $zero, 4($a1)\n"
-
-      "sw $zero, 0($t8)\n"
-      "lui $at, 0x1234\n"
-      "ori $at, $at, 0x5678\n"
-      "sw $at, 0($t8)\n"
-
-      "addiu $at, $a1, -0x7FF8\n"
-      "sw $zero, -0x7FF8($at)\n"
-      "addiu $at, $a1, 0x7FF8\n"
-      "lui $t8, 0x1234\n"
-      "ori $t8, $t8, 0x5678\n"
-      "sw $t8, 0x7FF8($at)\n"
-
-      "addiu $at, $t8, -0x7FF8\n"
-      "sw $zero, -0x7FF8($at)\n"
-      "addiu $at, $t8, 0x7FF8\n"
-      "lui $t8, 0x1234\n"
-      "ori $t8, $t8, 0x5678\n"
-      "sw $t8, 0x7FF8($at)\n";
-  DriverStr(expected, "StoreConstToOffset");
-}
-
-//////////////
-// BRANCHES //
-//////////////
-
-TEST_F(AssemblerMIPSTest, B) {
-  BranchHelper(&mips::MipsAssembler::B, "B");
-}
-
-TEST_F(AssemblerMIPSTest, Bal) {
-  BranchHelper(&mips::MipsAssembler::Bal, "Bal");
-}
-
-TEST_F(AssemblerMIPSTest, Beq) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Beq, "Beq");
-}
-
-TEST_F(AssemblerMIPSTest, Bne) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bne, "Bne");
-}
-
-TEST_F(AssemblerMIPSTest, Beqz) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Beqz, "Beqz");
-}
-
-TEST_F(AssemblerMIPSTest, Bnez) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bnez, "Bnez");
-}
-
-TEST_F(AssemblerMIPSTest, Bltz) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bltz, "Bltz");
-}
-
-TEST_F(AssemblerMIPSTest, Bgez) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bgez, "Bgez");
-}
-
-TEST_F(AssemblerMIPSTest, Blez) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Blez, "Blez");
-}
-
-TEST_F(AssemblerMIPSTest, Bgtz) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bgtz, "Bgtz");
-}
-
-TEST_F(AssemblerMIPSTest, Blt) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Blt, "Blt");
-}
-
-TEST_F(AssemblerMIPSTest, Bge) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bge, "Bge");
-}
-
-TEST_F(AssemblerMIPSTest, Bltu) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltu, "Bltu");
-}
-
-TEST_F(AssemblerMIPSTest, Bgeu) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeu, "Bgeu");
-}
-
-TEST_F(AssemblerMIPSTest, Bc1f) {
-  BranchFpuCondCodeHelper(&mips::MipsAssembler::Bc1f, "Bc1f");
-}
-
-TEST_F(AssemblerMIPSTest, Bc1t) {
-  BranchFpuCondCodeHelper(&mips::MipsAssembler::Bc1t, "Bc1t");
-}
-
-TEST_F(AssemblerMIPSTest, BareB) {
-  BranchHelper(&mips::MipsAssembler::B, "B", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPSTest, BareBal) {
-  BranchHelper(&mips::MipsAssembler::Bal, "Bal", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPSTest, BareBeq) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Beq, "Beq", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPSTest, BareBne) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bne, "Bne", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPSTest, BareBeqz) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Beqz, "Beqz", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPSTest, BareBnez) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bnez, "Bnez", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPSTest, BareBltz) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bltz, "Bltz", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPSTest, BareBgez) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bgez, "Bgez", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPSTest, BareBlez) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Blez, "Blez", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPSTest, BareBgtz) {
-  BranchCondOneRegHelper(&mips::MipsAssembler::Bgtz, "Bgtz", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPSTest, BareBlt) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Blt, "Blt", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPSTest, BareBge) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bge, "Bge", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPSTest, BareBltu) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bltu, "Bltu", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPSTest, BareBgeu) {
-  BranchCondTwoRegsHelper(&mips::MipsAssembler::Bgeu, "Bgeu", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPSTest, BareBc1f) {
-  BranchFpuCondCodeHelper(&mips::MipsAssembler::Bc1f, "Bc1f", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPSTest, BareBc1t) {
-  BranchFpuCondCodeHelper(&mips::MipsAssembler::Bc1t, "Bc1t", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPSTest, ImpossibleReordering) {
-  mips::MipsLabel label1, label2;
-  __ SetReorder(true);
-
-  __ B(&label1);  // No preceding or target instruction for the delay slot.
-
-  __ Addu(mips::T0, mips::T1, mips::T2);
-  __ Bind(&label1);
-  __ B(&label1);  // The preceding label prevents moving Addu into the delay slot.
-  __ B(&label1);  // No preceding or target instruction for the delay slot.
-
-  __ Addu(mips::T0, mips::T1, mips::T2);
-  __ Beqz(mips::T0, &label1);  // T0 dependency.
-
-  __ Or(mips::T1, mips::T2, mips::T3);
-  __ Bne(mips::T2, mips::T1, &label1);  // T1 dependency.
-
-  __ And(mips::T0, mips::T1, mips::T2);
-  __ Blt(mips::T1, mips::T0, &label1);  // T0 dependency.
-
-  __ Xor(mips::AT, mips::T0, mips::T1);
-  __ Bge(mips::T1, mips::T0, &label1);  // AT dependency.
-
-  __ Subu(mips::T0, mips::T1, mips::AT);
-  __ Bltu(mips::T1, mips::T0, &label1);  // AT dependency.
-
-  __ ColtS(1, mips::F2, mips::F4);
-  __ Bc1t(1, &label1);  // cc1 dependency.
-
-  __ Move(mips::T0, mips::RA);
-  __ Bal(&label1);  // RA dependency.
-
-  __ Lw(mips::RA, mips::T0, 0);
-  __ Bal(&label1);  // RA dependency.
-
-  __ LlR2(mips::T9, mips::T0, 0);
-  __ Jalr(mips::T9);  // T9 dependency.
-
-  __ Sw(mips::RA, mips::T0, 0);
-  __ Jalr(mips::T9);  // RA dependency.
-
-  __ Lw(mips::T1, mips::T0, 0);
-  __ Jalr(mips::T1, mips::T9);  // T1 dependency.
-
-  __ ScR2(mips::T9, mips::T0, 0);
-  __ Jr(mips::T9);  // T9 dependency.
-
-  __ Bind(&label2);
-
-  __ Bnez(mips::T0, &label2);  // No preceding instruction for the delay slot.
-
-  __ Bgeu(mips::T1, mips::T0, &label2);  // No preceding instruction for the delay slot.
-
-  __ Bc1f(2, &label2);  // No preceding instruction for the delay slot.
-
-  __ Bal(&label2);  // No preceding instruction for the delay slot.
-
-  __ Jalr(mips::T9);  // No preceding instruction for the delay slot.
-
-  __ Addu(mips::T0, mips::T1, mips::T2);
-  __ CodePosition();  // Drops the delay slot candidate (the last instruction).
-  __ Beq(mips::T1, mips::T2, &label2);  // No preceding or target instruction for the delay slot.
-
-  std::string expected =
-      ".set noreorder\n"
-      "b 1f\n"
-      "nop\n"
-
-      "addu $t0, $t1, $t2\n"
-      "1:\n"
-      "b 1b\n"
-      "nop\n"
-      "b 1b\n"
-      "nop\n"
-
-      "addu $t0, $t1, $t2\n"
-      "beqz $t0, 1b\n"
-      "nop\n"
-
-      "or $t1, $t2, $t3\n"
-      "bne $t2, $t1, 1b\n"
-      "nop\n"
-
-      "and $t0, $t1, $t2\n"
-      "slt $at, $t1, $t0\n"
-      "bnez $at, 1b\n"
-      "nop\n"
-
-      "xor $at, $t0, $t1\n"
-      "slt $at, $t1, $t0\n"
-      "beqz $at, 1b\n"
-      "nop\n"
-
-      "subu $t0, $t1, $at\n"
-      "sltu $at, $t1, $t0\n"
-      "bnez $at, 1b\n"
-      "nop\n"
-
-      "c.olt.s $fcc1, $f2, $f4\n"
-      "bc1t $fcc1, 1b\n"
-      "nop\n"
-
-      "or $t0, $ra, $zero\n"
-      "bal 1b\n"
-      "nop\n"
-
-      "lw $ra, 0($t0)\n"
-      "bal 1b\n"
-      "nop\n"
-
-      "ll $t9, 0($t0)\n"
-      "jalr $t9\n"
-      "nop\n"
-
-      "sw $ra, 0($t0)\n"
-      "jalr $t9\n"
-      "nop\n"
-
-      "lw $t1, 0($t0)\n"
-      "jalr $t1, $t9\n"
-      "nop\n"
-
-      "sc $t9, 0($t0)\n"
-      "jalr $zero, $t9\n"
-      "nop\n"
-
-      "2:\n"
-
-      "bnez $t0, 2b\n"
-      "nop\n"
-
-      "sltu $at, $t1, $t0\n"
-      "beqz $at, 2b\n"
-      "nop\n"
-
-      "bc1f $fcc2, 2b\n"
-      "nop\n"
-
-      "bal 2b\n"
-      "nop\n"
-
-      "jalr $t9\n"
-      "nop\n"
-
-      "addu $t0, $t1, $t2\n"
-      "beq $t1, $t2, 2b\n"
-      "nop\n";
-  DriverStr(expected, "ImpossibleReordering");
-}
-
-TEST_F(AssemblerMIPSTest, Reordering) {
-  mips::MipsLabel label1, label2;
-  __ SetReorder(true);
-
-  __ Bind(&label1);
-  __ Bind(&label2);
-
-  __ Addu(mips::T0, mips::T1, mips::T2);
-  __ Beqz(mips::T1, &label1);
-
-  __ Or(mips::T1, mips::T2, mips::T3);
-  __ Bne(mips::T2, mips::T3, &label1);
-
-  __ And(mips::T0, mips::T1, mips::T2);
-  __ Blt(mips::T1, mips::T2, &label1);
-
-  __ Xor(mips::T2, mips::T0, mips::T1);
-  __ Bge(mips::T1, mips::T0, &label1);
-
-  __ Subu(mips::T2, mips::T1, mips::T0);
-  __ Bltu(mips::T1, mips::T0, &label1);
-
-  __ ColtS(0, mips::F2, mips::F4);
-  __ Bc1t(1, &label1);
-
-  __ Move(mips::T0, mips::T1);
-  __ Bal(&label1);
-
-  __ LlR2(mips::T1, mips::T0, 0);
-  __ Jalr(mips::T9);
-
-  __ ScR2(mips::T1, mips::T0, 0);
-  __ Jr(mips::T9);
-
-  std::string expected =
-      ".set noreorder\n"
-      "1:\n"
-
-      "beqz $t1, 1b\n"
-      "addu $t0, $t1, $t2\n"
-
-      "bne $t2, $t3, 1b\n"
-      "or $t1, $t2, $t3\n"
-
-      "slt $at, $t1, $t2\n"
-      "bnez $at, 1b\n"
-      "and $t0, $t1, $t2\n"
-
-      "slt $at, $t1, $t0\n"
-      "beqz $at, 1b\n"
-      "xor $t2, $t0, $t1\n"
-
-      "sltu $at, $t1, $t0\n"
-      "bnez $at, 1b\n"
-      "subu $t2, $t1, $t0\n"
-
-      "bc1t $fcc1, 1b\n"
-      "c.olt.s $fcc0, $f2, $f4\n"
-
-      "bal 1b\n"
-      "or $t0, $t1, $zero\n"
-
-      "jalr $t9\n"
-      "ll $t1, 0($t0)\n"
-
-      "jalr $zero, $t9\n"
-      "sc $t1, 0($t0)\n";
-  DriverStr(expected, "Reordering");
-}
-
-TEST_F(AssemblerMIPSTest, AbsorbTargetInstruction) {
-  mips::MipsLabel label1, label2, label3, label4, label5, label6;
-  mips::MipsLabel label7, label8, label9, label10, label11, label12, label13;
-  __ SetReorder(true);
-
-  __ B(&label1);
-  __ Bind(&label1);
-  __ Addu(mips::T0, mips::T1, mips::T2);
-
-  __ Bind(&label2);
-  __ Xor(mips::T0, mips::T1, mips::T2);
-  __ Addu(mips::T0, mips::T1, mips::T2);
-  __ Bind(&label3);  // Prevents reordering ADDU above with B below.
-  __ B(&label2);
-
-  __ B(&label4);
-  __ Bind(&label4);
-  __ Addu(mips::T0, mips::T1, mips::T2);
-  __ CodePosition();  // Prevents absorbing ADDU above.
-
-  __ B(&label5);
-  __ Bind(&label5);
-  __ Addu(mips::T0, mips::T1, mips::T2);
-  __ Bind(&label6);
-  __ CodePosition();  // Even across Bind(), CodePosition() prevents absorbing the ADDU above.
-
-  __ Nop();
-  __ B(&label7);
-  __ Bind(&label7);
-  __ Lw(mips::V0, mips::A0, 0x5678);  // Possibly patchable instruction, not absorbed.
-
-  __ Nop();
-  __ B(&label8);
-  __ Bind(&label8);
-  __ Sw(mips::V0, mips::A0, 0x5678);  // Possibly patchable instruction, not absorbed.
-
-  __ Nop();
-  __ B(&label9);
-  __ Bind(&label9);
-  __ Addiu(mips::V0, mips::A0, 0x5678);  // Possibly patchable instruction, not absorbed.
-
-  __ Nop();
-  __ B(&label10);
-  __ Bind(&label10);
-  __ Lw(mips::V0, mips::A0, 0x5680);  // Immediate isn't 0x5678, absorbed.
-
-  __ Nop();
-  __ B(&label11);
-  __ Bind(&label11);
-  __ Sw(mips::V0, mips::A0, 0x5680);  // Immediate isn't 0x5678, absorbed.
-
-  __ Nop();
-  __ B(&label12);
-  __ Bind(&label12);
-  __ Addiu(mips::V0, mips::A0, 0x5680);  // Immediate isn't 0x5678, absorbed.
-
-  __ Nop();
-  __ B(&label13);
-  __ Bind(&label13);
-  __ Andi(mips::V0, mips::A0, 0x5678);  // Not one of patchable instructions, absorbed.
-
-  std::string expected =
-      ".set noreorder\n"
-      "b 1f\n"
-      "addu $t0, $t1, $t2\n"
-      "addu $t0, $t1, $t2\n"
-      "1:\n"
-
-      "xor $t0, $t1, $t2\n"
-      "2:\n"
-      "addu $t0, $t1, $t2\n"
-      "b 2b\n"
-      "xor $t0, $t1, $t2\n"
-
-      "b 4f\n"
-      "nop\n"
-      "4:\n"
-      "addu $t0, $t1, $t2\n"
-
-      "b 5f\n"
-      "nop\n"
-      "5:\n"
-      "addu $t0, $t1, $t2\n"
-
-      "nop\n"
-      "b 7f\n"
-      "nop\n"
-      "7:\n"
-      "lw $v0, 0x5678($a0)\n"
-
-      "nop\n"
-      "b 8f\n"
-      "nop\n"
-      "8:\n"
-      "sw $v0, 0x5678($a0)\n"
-
-      "nop\n"
-      "b 9f\n"
-      "nop\n"
-      "9:\n"
-      "addiu $v0, $a0, 0x5678\n"
-
-      "nop\n"
-      "b 10f\n"
-      "lw $v0, 0x5680($a0)\n"
-      "lw $v0, 0x5680($a0)\n"
-      "10:\n"
-
-      "nop\n"
-      "b 11f\n"
-      "sw $v0, 0x5680($a0)\n"
-      "sw $v0, 0x5680($a0)\n"
-      "11:\n"
-
-      "nop\n"
-      "b 12f\n"
-      "addiu $v0, $a0, 0x5680\n"
-      "addiu $v0, $a0, 0x5680\n"
-      "12:\n"
-
-      "nop\n"
-      "b 13f\n"
-      "andi $v0, $a0, 0x5678\n"
-      "andi $v0, $a0, 0x5678\n"
-      "13:\n";
-  DriverStr(expected, "AbsorbTargetInstruction");
-}
-
-TEST_F(AssemblerMIPSTest, SetReorder) {
-  mips::MipsLabel label1, label2, label3, label4, label5, label6;
-
-  __ SetReorder(true);
-  __ Bind(&label1);
-  __ Addu(mips::T0, mips::T1, mips::T2);
-  __ B(&label1);
-  __ B(&label5);
-  __ B(&label6);
-
-  __ SetReorder(false);
-  __ Bind(&label2);
-  __ Addu(mips::T0, mips::T1, mips::T2);
-  __ B(&label2);
-  __ B(&label5);
-  __ B(&label6);
-
-  __ SetReorder(true);
-  __ Bind(&label3);
-  __ Addu(mips::T0, mips::T1, mips::T2);
-  __ B(&label3);
-  __ B(&label5);
-  __ B(&label6);
-
-  __ SetReorder(false);
-  __ Bind(&label4);
-  __ Addu(mips::T0, mips::T1, mips::T2);
-  __ B(&label4);
-  __ B(&label5);
-  __ B(&label6);
-
-  __ SetReorder(true);
-  __ Bind(&label5);
-  __ Subu(mips::T0, mips::T1, mips::T2);
-
-  __ SetReorder(false);
-  __ Bind(&label6);
-  __ Xor(mips::T0, mips::T1, mips::T2);
-
-  std::string expected =
-      ".set noreorder\n"
-      "1:\n"
-      "b 1b\n"
-      "addu $t0, $t1, $t2\n"
-      "b 55f\n"
-      "subu $t0, $t1, $t2\n"
-      "b 6f\n"
-      "nop\n"
-
-      "2:\n"
-      "addu $t0, $t1, $t2\n"
-      "b 2b\n"
-      "nop\n"
-      "b 5f\n"
-      "nop\n"
-      "b 6f\n"
-      "nop\n"
-
-      "3:\n"
-      "b 3b\n"
-      "addu $t0, $t1, $t2\n"
-      "b 55f\n"
-      "subu $t0, $t1, $t2\n"
-      "b 6f\n"
-      "nop\n"
-
-      "4:\n"
-      "addu $t0, $t1, $t2\n"
-      "b 4b\n"
-      "nop\n"
-      "b 5f\n"
-      "nop\n"
-      "b 6f\n"
-      "nop\n"
-
-      "5:\n"
-      "subu $t0, $t1, $t2\n"
-      "55:\n"
-      "6:\n"
-      "xor $t0, $t1, $t2\n";
-  DriverStr(expected, "SetReorder");
-}
-
-TEST_F(AssemblerMIPSTest, ReorderPatchedInstruction) {
-  __ SetReorder(true);
-  mips::MipsLabel label1, label2;
-  mips::MipsLabel patcher_label1, patcher_label2, patcher_label3, patcher_label4, patcher_label5;
-  __ Lw(mips::V0, mips::A0, 0x5678, &patcher_label1);
-  __ Beq(mips::A0, mips::A1, &label1);
-  constexpr uint32_t kAdduCount1 = 63;
-  for (size_t i = 0; i != kAdduCount1; ++i) {
-    __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-  }
-  __ Bind(&label1);
-  __ Sw(mips::V0, mips::A0, 0x5678, &patcher_label2);
-  __ Bltz(mips::V1, &label2);
-  constexpr uint32_t kAdduCount2 = 64;
-  for (size_t i = 0; i != kAdduCount2; ++i) {
-    __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-  }
-  __ Bind(&label2);
-  __ Addiu(mips::V0, mips::A0, 0x5678, &patcher_label3);
-  __ B(&label1);
-  __ Lw(mips::V0, mips::A0, 0x5678, &patcher_label4);
-  __ Jalr(mips::T9);
-  __ Sw(mips::V0, mips::A0, 0x5678, &patcher_label5);
-  __ Blt(mips::V0, mips::V1, &label2);
-  __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-
-  std::string expected =
-      ".set noreorder\n"
-      "beq $a0, $a1, 1f\n"
-      "lw $v0, 0x5678($a0)\n" +
-      RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
-      "1:\n"
-      "bltz $v1, 2f\n"
-      "sw $v0, 0x5678($a0)\n" +
-      RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
-      "2:\n"
-      "b 1b\n"
-      "addiu $v0, $a0, 0x5678\n"
-      "jalr $t9\n"
-      "lw $v0, 0x5678($a0)\n"
-      "slt $at, $v0, $v1\n"
-      "bnez $at, 2b\n"
-      "sw $v0, 0x5678($a0)\n"
-      "addu $zero, $zero, $zero\n";
-  DriverStr(expected, "ReorderPatchedInstruction");
-  EXPECT_EQ(__ GetLabelLocation(&patcher_label1), 1 * 4u);
-  EXPECT_EQ(__ GetLabelLocation(&patcher_label2), (kAdduCount1 + 3) * 4u);
-  EXPECT_EQ(__ GetLabelLocation(&patcher_label3), (kAdduCount1 + kAdduCount2 + 5) * 4u);
-  EXPECT_EQ(__ GetLabelLocation(&patcher_label4), (kAdduCount1 + kAdduCount2 + 7) * 4u);
-  EXPECT_EQ(__ GetLabelLocation(&patcher_label5), (kAdduCount1 + kAdduCount2 + 10) * 4u);
-}
-
-TEST_F(AssemblerMIPSTest, LongBranchReorder) {
-  mips::MipsLabel label, patcher_label1, patcher_label2;
-  __ SetReorder(true);
-  __ Addiu(mips::T0, mips::T1, 0x5678, &patcher_label1);
-  __ B(&label);
-  constexpr uint32_t kAdduCount1 = (1u << 15) + 1;
-  for (size_t i = 0; i != kAdduCount1; ++i) {
-    __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-  }
-  __ Bind(&label);
-  constexpr uint32_t kAdduCount2 = (1u << 15) + 1;
-  for (size_t i = 0; i != kAdduCount2; ++i) {
-    __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-  }
-  __ Addiu(mips::T0, mips::T1, 0x5678, &patcher_label2);
-  __ B(&label);
-
-  // Account for 5 extra instructions: ori, addu, lw, jalr, addiu.
-  uint32_t offset_forward = (kAdduCount1 + 5) * sizeof(uint32_t);
-  // Account for 5 extra instructions: subu, addiu, sw, nal, lui.
-  uint32_t offset_back = static_cast<uint32_t>(-(kAdduCount1 + 5) * sizeof(uint32_t));
-
-  std::ostringstream oss;
-  oss <<
-      ".set noreorder\n"
-      "addiu $t0, $t1, 0x5678\n"
-      "addiu $sp, $sp, -16\n"
-      "sw $ra, 0($sp)\n"
-      "bltzal $zero, .+4\n"
-      "lui $at, 0x" << std::hex << High16Bits(offset_forward) << "\n"
-      "ori $at, $at, 0x" << std::hex << Low16Bits(offset_forward) << "\n"
-      "addu $at, $at, $ra\n"
-      "lw $ra, 0($sp)\n"
-      "jalr $zero, $at\n"
-      "addiu $sp, $sp, 16\n" <<
-      RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") <<
-      RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") <<
-      "addiu $t0, $t1, 0x5678\n"
-      "addiu $sp, $sp, -16\n"
-      "sw $ra, 0($sp)\n"
-      "bltzal $zero, .+4\n"
-      "lui $at, 0x" << std::hex << High16Bits(offset_back) << "\n"
-      "ori $at, $at, 0x" << std::hex << Low16Bits(offset_back) << "\n"
-      "addu $at, $at, $ra\n"
-      "lw $ra, 0($sp)\n"
-      "jalr $zero, $at\n"
-      "addiu $sp, $sp, 16\n";
-  std::string expected = oss.str();
-  DriverStr(expected, "LongBranchReorder");
-  EXPECT_EQ(__ GetLabelLocation(&patcher_label1), 0 * 4u);
-  EXPECT_EQ(__ GetLabelLocation(&patcher_label2), (kAdduCount1 + kAdduCount2 + 10) * 4u);
-}
-
-///////////////////////
-// Loading Constants //
-///////////////////////
-
-TEST_F(AssemblerMIPSTest, LoadConst32) {
-  // IsUint<16>(value)
-  __ LoadConst32(mips::V0, 0);
-  __ LoadConst32(mips::V0, 65535);
-  // IsInt<16>(value)
-  __ LoadConst32(mips::V0, -1);
-  __ LoadConst32(mips::V0, -32768);
-  // Everything else
-  __ LoadConst32(mips::V0, 65536);
-  __ LoadConst32(mips::V0, 65537);
-  __ LoadConst32(mips::V0, 2147483647);
-  __ LoadConst32(mips::V0, -32769);
-  __ LoadConst32(mips::V0, -65536);
-  __ LoadConst32(mips::V0, -65537);
-  __ LoadConst32(mips::V0, -2147483647);
-  __ LoadConst32(mips::V0, -2147483648);
-
-  const char* expected =
-      // IsUint<16>(value)
-      "ori $v0, $zero, 0\n"         // __ LoadConst32(mips::V0, 0);
-      "ori $v0, $zero, 65535\n"     // __ LoadConst32(mips::V0, 65535);
-      // IsInt<16>(value)
-      "addiu $v0, $zero, -1\n"      // __ LoadConst32(mips::V0, -1);
-      "addiu $v0, $zero, -32768\n"  // __ LoadConst32(mips::V0, -32768);
-      // Everything else
-      "lui $v0, 1\n"                // __ LoadConst32(mips::V0, 65536);
-      "lui $v0, 1\n"                // __ LoadConst32(mips::V0, 65537);
-      "ori $v0, 1\n"                //                 "
-      "lui $v0, 32767\n"            // __ LoadConst32(mips::V0, 2147483647);
-      "ori $v0, 65535\n"            //                 "
-      "lui $v0, 65535\n"            // __ LoadConst32(mips::V0, -32769);
-      "ori $v0, 32767\n"            //                 "
-      "lui $v0, 65535\n"            // __ LoadConst32(mips::V0, -65536);
-      "lui $v0, 65534\n"            // __ LoadConst32(mips::V0, -65537);
-      "ori $v0, 65535\n"            //                 "
-      "lui $v0, 32768\n"            // __ LoadConst32(mips::V0, -2147483647);
-      "ori $v0, 1\n"                //                 "
-      "lui $v0, 32768\n";           // __ LoadConst32(mips::V0, -2147483648);
-  DriverStr(expected, "LoadConst32");
-}
-
-TEST_F(AssemblerMIPSTest, LoadFarthestNearLabelAddress) {
-  mips::MipsLabel label;
-  __ BindPcRelBaseLabel();
-  __ LoadLabelAddress(mips::V0, mips::V1, &label);
-  constexpr size_t kAddiuCount = 0x1FDE;
-  for (size_t i = 0; i != kAddiuCount; ++i) {
-    __ Addiu(mips::A0, mips::A1, 0);
-  }
-  __ Bind(&label);
-
-  std::string expected =
-      "1:\n"
-      "addiu $v0, $v1, %lo(2f - 1b)\n" +
-      RepeatInsn(kAddiuCount, "addiu $a0, $a1, %hi(2f - 1b)\n") +
-      "2:\n";
-  DriverStr(expected, "LoadFarthestNearLabelAddress");
-}
-
-TEST_F(AssemblerMIPSTest, LoadNearestFarLabelAddress) {
-  mips::MipsLabel label;
-  __ BindPcRelBaseLabel();
-  __ LoadLabelAddress(mips::V0, mips::V1, &label);
-  constexpr size_t kAdduCount = 0x1FDF;
-  for (size_t i = 0; i != kAdduCount; ++i) {
-    __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-  }
-  __ Bind(&label);
-
-  std::string expected =
-      "1:\n"
-      "lui $at, %hi(2f - 1b)\n"
-      "ori $at, $at, %lo(2f - 1b)\n"
-      "addu $v0, $at, $v1\n" +
-      RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
-      "2:\n";
-  DriverStr(expected, "LoadNearestFarLabelAddress");
-}
-
-TEST_F(AssemblerMIPSTest, LoadFarthestNearLabelAddressUsingNal) {
-  mips::MipsLabel label;
-  __ LoadLabelAddress(mips::V0, mips::ZERO, &label);
-  constexpr size_t kAddiuCount = 0x1FDE;
-  for (size_t i = 0; i != kAddiuCount; ++i) {
-    __ Addiu(mips::A0, mips::A1, 0);
-  }
-  __ Bind(&label);
-
-  std::string expected =
-      ".set noreorder\n"
-      "bltzal $zero, .+4\n"
-      "addiu $v0, $ra, %lo(2f - 1f)\n"
-      "1:\n" +
-      RepeatInsn(kAddiuCount, "addiu $a0, $a1, %hi(2f - 1b)\n") +
-      "2:\n";
-  DriverStr(expected, "LoadFarthestNearLabelAddressUsingNal");
-}
-
-TEST_F(AssemblerMIPSTest, LoadNearestFarLabelAddressUsingNal) {
-  mips::MipsLabel label;
-  __ LoadLabelAddress(mips::V0, mips::ZERO, &label);
-  constexpr size_t kAdduCount = 0x1FDF;
-  for (size_t i = 0; i != kAdduCount; ++i) {
-    __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-  }
-  __ Bind(&label);
-
-  std::string expected =
-      ".set noreorder\n"
-      "bltzal $zero, .+4\n"
-      "lui $at, %hi(2f - 1f)\n"
-      "1:\n"
-      "ori $at, $at, %lo(2f - 1b)\n"
-      "addu $v0, $at, $ra\n" +
-      RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
-      "2:\n";
-  DriverStr(expected, "LoadNearestFarLabelAddressUsingNal");
-}
-
-TEST_F(AssemblerMIPSTest, LoadFarthestNearLiteral) {
-  mips::Literal* literal = __ NewLiteral<uint32_t>(0x12345678);
-  __ BindPcRelBaseLabel();
-  __ LoadLiteral(mips::V0, mips::V1, literal);
-  constexpr size_t kAddiuCount = 0x1FDE;
-  for (size_t i = 0; i != kAddiuCount; ++i) {
-    __ Addiu(mips::A0, mips::A1, 0);
-  }
-
-  std::string expected =
-      "1:\n"
-      "lw $v0, %lo(2f - 1b)($v1)\n" +
-      RepeatInsn(kAddiuCount, "addiu $a0, $a1, %hi(2f - 1b)\n") +
-      "2:\n"
-      ".word 0x12345678\n";
-  DriverStr(expected, "LoadFarthestNearLiteral");
-}
-
-TEST_F(AssemblerMIPSTest, LoadNearestFarLiteral) {
-  mips::Literal* literal = __ NewLiteral<uint32_t>(0x12345678);
-  __ BindPcRelBaseLabel();
-  __ LoadLiteral(mips::V0, mips::V1, literal);
-  constexpr size_t kAdduCount = 0x1FDF;
-  for (size_t i = 0; i != kAdduCount; ++i) {
-    __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-  }
-
-  std::string expected =
-      "1:\n"
-      "lui $at, %hi(2f - 1b)\n"
-      "addu $at, $at, $v1\n"
-      "lw $v0, %lo(2f - 1b)($at)\n" +
-      RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
-      "2:\n"
-      ".word 0x12345678\n";
-  DriverStr(expected, "LoadNearestFarLiteral");
-}
-
-TEST_F(AssemblerMIPSTest, LoadFarthestNearLiteralUsingNal) {
-  mips::Literal* literal = __ NewLiteral<uint32_t>(0x12345678);
-  __ LoadLiteral(mips::V0, mips::ZERO, literal);
-  constexpr size_t kAddiuCount = 0x1FDE;
-  for (size_t i = 0; i != kAddiuCount; ++i) {
-    __ Addiu(mips::A0, mips::A1, 0);
-  }
-
-  std::string expected =
-      ".set noreorder\n"
-      "bltzal $zero, .+4\n"
-      "lw $v0, %lo(2f - 1f)($ra)\n"
-      "1:\n" +
-      RepeatInsn(kAddiuCount, "addiu $a0, $a1, %hi(2f - 1b)\n") +
-      "2:\n"
-      ".word 0x12345678\n";
-  DriverStr(expected, "LoadFarthestNearLiteralUsingNal");
-}
-
-TEST_F(AssemblerMIPSTest, LoadNearestFarLiteralUsingNal) {
-  mips::Literal* literal = __ NewLiteral<uint32_t>(0x12345678);
-  __ LoadLiteral(mips::V0, mips::ZERO, literal);
-  constexpr size_t kAdduCount = 0x1FDF;
-  for (size_t i = 0; i != kAdduCount; ++i) {
-    __ Addu(mips::ZERO, mips::ZERO, mips::ZERO);
-  }
-
-  std::string expected =
-      ".set noreorder\n"
-      "bltzal $zero, .+4\n"
-      "lui $at, %hi(2f - 1f)\n"
-      "1:\n"
-      "addu $at, $at, $ra\n"
-      "lw $v0, %lo(2f - 1b)($at)\n" +
-      RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
-      "2:\n"
-      ".word 0x12345678\n";
-  DriverStr(expected, "LoadNearestFarLiteralUsingNal");
-}
-
-#undef __
-
-}  // namespace art
diff --git a/compiler/utils/mips/constants_mips.h b/compiler/utils/mips/constants_mips.h
deleted file mode 100644
index 07d8b7d..0000000
--- a/compiler/utils/mips/constants_mips.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_UTILS_MIPS_CONSTANTS_MIPS_H_
-#define ART_COMPILER_UTILS_MIPS_CONSTANTS_MIPS_H_
-
-#include <iosfwd>
-
-#include <android-base/logging.h>
-
-#include "arch/mips/registers_mips.h"
-#include "base/globals.h"
-#include "base/macros.h"
-
-namespace art {
-namespace mips {
-
-// Values for double-precision floating point registers.
-enum DRegister {
-  D0  =  0,
-  D1  =  1,
-  D2  =  2,
-  D3  =  3,
-  D4  =  4,
-  D5  =  5,
-  D6  =  6,
-  D7  =  7,
-  D8  =  8,
-  D9  =  9,
-  D10 = 10,
-  D11 = 11,
-  D12 = 12,
-  D13 = 13,
-  D14 = 14,
-  D15 = 15,
-  kNumberOfDRegisters = 16,
-  kNumberOfOverlappingDRegisters = 16,
-  kNoDRegister = -1,
-};
-std::ostream& operator<<(std::ostream& os, const DRegister& rhs);
-
-// Constants used for the decoding or encoding of the individual fields of instructions.
-enum InstructionFields {
-  kOpcodeShift = 26,
-  kOpcodeBits = 6,
-  kRsShift = 21,
-  kRsBits = 5,
-  kRtShift = 16,
-  kRtBits = 5,
-  kRdShift = 11,
-  kRdBits = 5,
-  kShamtShift = 6,
-  kShamtBits = 5,
-  kFunctShift = 0,
-  kFunctBits = 6,
-
-  kFmtShift = 21,
-  kFmtBits = 5,
-  kFtShift = 16,
-  kFtBits = 5,
-  kFsShift = 11,
-  kFsBits = 5,
-  kFdShift = 6,
-  kFdBits = 5,
-
-  kMsaOperationShift = 23,
-  kMsaELMOperationShift = 22,
-  kMsa2ROperationShift = 18,
-  kMsa2RFOperationShift = 17,
-  kDfShift = 21,
-  kDfMShift = 16,
-  kDf2RShift = 16,
-  kDfNShift = 16,
-  kWtShift = 16,
-  kWtBits = 5,
-  kWsShift = 11,
-  kWsBits = 5,
-  kWdShift = 6,
-  kWdBits = 5,
-  kS10Shift = 16,
-  kI10Shift = 11,
-  kS10MinorShift = 2,
-
-  kBranchOffsetMask = 0x0000ffff,
-  kJumpOffsetMask = 0x03ffffff,
-
-  kMsaMajorOpcode = 0x1e,
-  kMsaDfMByteMask = 0x70,
-  kMsaDfMHalfwordMask = 0x60,
-  kMsaDfMWordMask = 0x40,
-  kMsaDfMDoublewordMask = 0x00,
-  kMsaDfNByteMask = 0x00,
-  kMsaDfNHalfwordMask = 0x20,
-  kMsaDfNWordMask = 0x30,
-  kMsaDfNDoublewordMask = 0x38,
-  kMsaS10Mask = 0x3ff,
-};
-
-enum ScaleFactor {
-  TIMES_1 = 0,
-  TIMES_2 = 1,
-  TIMES_4 = 2,
-  TIMES_8 = 3
-};
-
-class Instr {
- public:
-  static const uint32_t kBreakPointInstruction = 0x0000000D;
-
-  bool IsBreakPoint() {
-    return ((*reinterpret_cast<const uint32_t*>(this)) & 0xFC0000CF) == kBreakPointInstruction;
-  }
-
-  // Instructions are read out of a code stream. The only way to get a
-  // reference to an instruction is to convert a pointer. There is no way
-  // to allocate or create instances of class Instr.
-  // Use the At(pc) function to create references to Instr.
-  static Instr* At(uintptr_t pc) { return reinterpret_cast<Instr*>(pc); }
-
- private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(Instr);
-};
-
-}  // namespace mips
-}  // namespace art
-
-#endif  // ART_COMPILER_UTILS_MIPS_CONSTANTS_MIPS_H_
diff --git a/compiler/utils/mips/managed_register_mips.cc b/compiler/utils/mips/managed_register_mips.cc
deleted file mode 100644
index 9b3ed79..0000000
--- a/compiler/utils/mips/managed_register_mips.cc
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "managed_register_mips.h"
-
-#include "base/globals.h"
-
-namespace art {
-namespace mips {
-
-bool MipsManagedRegister::Overlaps(const MipsManagedRegister& other) const {
-  if (IsNoRegister() || other.IsNoRegister()) return false;
-  CHECK(IsValidManagedRegister());
-  CHECK(other.IsValidManagedRegister());
-  if (Equals(other)) return true;
-  if (IsRegisterPair()) {
-    Register low = AsRegisterPairLow();
-    Register high = AsRegisterPairHigh();
-    return MipsManagedRegister::FromCoreRegister(low).Overlaps(other) ||
-        MipsManagedRegister::FromCoreRegister(high).Overlaps(other);
-  }
-  if (IsOverlappingDRegister()) {
-    if (other.IsDRegister()) return Equals(other);
-    if (other.IsFRegister()) {
-      FRegister low = AsOverlappingDRegisterLow();
-      FRegister high = AsOverlappingDRegisterHigh();
-      FRegister other_freg = other.AsFRegister();
-      return (low == other_freg) || (high == other_freg);
-    }
-    return false;
-  }
-  if (other.IsRegisterPair() || other.IsOverlappingDRegister()) {
-    return other.Overlaps(*this);
-  }
-  return false;
-}
-
-
-int MipsManagedRegister::AllocIdLow() const {
-  CHECK(IsOverlappingDRegister() || IsRegisterPair());
-  const int r = RegId() - (kNumberOfCoreRegIds + kNumberOfFRegIds);
-  int low;
-  if (r < kNumberOfOverlappingDRegIds) {
-    CHECK(IsOverlappingDRegister());
-    low = (r * 2) + kNumberOfCoreRegIds;  // Return an FRegister.
-  } else {
-    CHECK(IsRegisterPair());
-    low = (r - kNumberOfDRegIds) * 2 + 2;  // Return a Register.
-    if (low >= 24) {
-      // we got a pair higher than S6_S7, must be the dalvik special case
-      low = 5;
-    }
-  }
-  return low;
-}
-
-
-int MipsManagedRegister::AllocIdHigh() const {
-  return AllocIdLow() + 1;
-}
-
-
-void MipsManagedRegister::Print(std::ostream& os) const {
-  if (!IsValidManagedRegister()) {
-    os << "No Register";
-  } else if (IsCoreRegister()) {
-    os << "Core: " << static_cast<int>(AsCoreRegister());
-  } else if (IsRegisterPair()) {
-    os << "Pair: " << AsRegisterPairLow() << ", " << AsRegisterPairHigh();
-  } else if (IsFRegister()) {
-    os << "FRegister: " << static_cast<int>(AsFRegister());
-  } else if (IsDRegister()) {
-    os << "DRegister: " << static_cast<int>(AsDRegister());
-  } else {
-    os << "??: " << RegId();
-  }
-}
-
-std::ostream& operator<<(std::ostream& os, const MipsManagedRegister& reg) {
-  reg.Print(os);
-  return os;
-}
-
-std::ostream& operator<<(std::ostream& os, const RegisterPair& reg) {
-  os << MipsManagedRegister::FromRegisterPair(reg);
-  return os;
-}
-
-}  // namespace mips
-}  // namespace art
diff --git a/compiler/utils/mips/managed_register_mips.h b/compiler/utils/mips/managed_register_mips.h
deleted file mode 100644
index 18d5821..0000000
--- a/compiler/utils/mips/managed_register_mips.h
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_UTILS_MIPS_MANAGED_REGISTER_MIPS_H_
-#define ART_COMPILER_UTILS_MIPS_MANAGED_REGISTER_MIPS_H_
-
-#include "constants_mips.h"
-#include "utils/managed_register.h"
-
-namespace art {
-namespace mips {
-
-// Values for register pairs.
-enum RegisterPair {
-  V0_V1 = 0,
-  A0_A1 = 1,
-  A2_A3 = 2,
-  T0_T1 = 3,
-  T2_T3 = 4,
-  T4_T5 = 5,
-  T6_T7 = 6,
-  S0_S1 = 7,
-  S2_S3 = 8,
-  S4_S5 = 9,
-  S6_S7 = 10,
-  A1_A2 = 11,  // Dalvik style passing
-  kNumberOfRegisterPairs = 12,
-  kNoRegisterPair = -1,
-};
-
-std::ostream& operator<<(std::ostream& os, const RegisterPair& reg);
-
-const int kNumberOfCoreRegIds = kNumberOfCoreRegisters;
-const int kNumberOfCoreAllocIds = kNumberOfCoreRegisters;
-
-const int kNumberOfFRegIds = kNumberOfFRegisters;
-const int kNumberOfFAllocIds = kNumberOfFRegisters;
-
-const int kNumberOfDRegIds = kNumberOfDRegisters;
-const int kNumberOfOverlappingDRegIds = kNumberOfOverlappingDRegisters;
-const int kNumberOfDAllocIds = kNumberOfDRegisters;
-
-const int kNumberOfPairRegIds = kNumberOfRegisterPairs;
-
-const int kNumberOfRegIds = kNumberOfCoreRegIds + kNumberOfFRegIds +
-    kNumberOfDRegIds + kNumberOfPairRegIds;
-const int kNumberOfAllocIds =
-    kNumberOfCoreAllocIds + kNumberOfFAllocIds + kNumberOfDAllocIds;
-
-// Register ids map:
-//   [0..R[  core registers (enum Register)
-//   [R..F[  single precision FP registers (enum FRegister)
-//   [F..D[  double precision FP registers (enum DRegister)
-//   [D..P[  core register pairs (enum RegisterPair)
-// where
-//   R = kNumberOfCoreRegIds
-//   F = R + kNumberOfFRegIds
-//   D = F + kNumberOfDRegIds
-//   P = D + kNumberOfRegisterPairs
-
-// Allocation ids map:
-//   [0..R[  core registers (enum Register)
-//   [R..F[  single precision FP registers (enum FRegister)
-// where
-//   R = kNumberOfCoreRegIds
-//   F = R + kNumberOfFRegIds
-
-
-// An instance of class 'ManagedRegister' represents a single core register (enum
-// Register), a single precision FP register (enum FRegister), a double precision
-// FP register (enum DRegister), or a pair of core registers (enum RegisterPair).
-// 'ManagedRegister::NoRegister()' provides an invalid register.
-// There is a one-to-one mapping between ManagedRegister and register id.
-class MipsManagedRegister : public ManagedRegister {
- public:
-  constexpr Register AsCoreRegister() const {
-    CHECK(IsCoreRegister());
-    return static_cast<Register>(id_);
-  }
-
-  constexpr FRegister AsFRegister() const {
-    CHECK(IsFRegister());
-    return static_cast<FRegister>(id_ - kNumberOfCoreRegIds);
-  }
-
-  constexpr DRegister AsDRegister() const {
-    CHECK(IsDRegister());
-    return static_cast<DRegister>(id_ - kNumberOfCoreRegIds - kNumberOfFRegIds);
-  }
-
-  constexpr FRegister AsOverlappingDRegisterLow() const {
-    CHECK(IsOverlappingDRegister());
-    DRegister d_reg = AsDRegister();
-    return static_cast<FRegister>(d_reg * 2);
-  }
-
-  constexpr FRegister AsOverlappingDRegisterHigh() const {
-    CHECK(IsOverlappingDRegister());
-    DRegister d_reg = AsDRegister();
-    return static_cast<FRegister>(d_reg * 2 + 1);
-  }
-
-  constexpr Register AsRegisterPairLow() const {
-    CHECK(IsRegisterPair());
-    // Appropriate mapping of register ids allows to use AllocIdLow().
-    return FromRegId(AllocIdLow()).AsCoreRegister();
-  }
-
-  constexpr Register AsRegisterPairHigh() const {
-    CHECK(IsRegisterPair());
-    // Appropriate mapping of register ids allows to use AllocIdHigh().
-    return FromRegId(AllocIdHigh()).AsCoreRegister();
-  }
-
-  constexpr bool IsCoreRegister() const {
-    CHECK(IsValidManagedRegister());
-    return (0 <= id_) && (id_ < kNumberOfCoreRegIds);
-  }
-
-  constexpr bool IsFRegister() const {
-    CHECK(IsValidManagedRegister());
-    const int test = id_ - kNumberOfCoreRegIds;
-    return (0 <= test) && (test < kNumberOfFRegIds);
-  }
-
-  constexpr bool IsDRegister() const {
-    CHECK(IsValidManagedRegister());
-    const int test = id_ - (kNumberOfCoreRegIds + kNumberOfFRegIds);
-    return (0 <= test) && (test < kNumberOfDRegIds);
-  }
-
-  // Returns true if this DRegister overlaps FRegisters.
-  constexpr bool IsOverlappingDRegister() const {
-    CHECK(IsValidManagedRegister());
-    const int test = id_ - (kNumberOfCoreRegIds + kNumberOfFRegIds);
-    return (0 <= test) && (test < kNumberOfOverlappingDRegIds);
-  }
-
-  constexpr bool IsRegisterPair() const {
-    CHECK(IsValidManagedRegister());
-    const int test =
-        id_ - (kNumberOfCoreRegIds + kNumberOfFRegIds + kNumberOfDRegIds);
-    return (0 <= test) && (test < kNumberOfPairRegIds);
-  }
-
-  void Print(std::ostream& os) const;
-
-  // Returns true if the two managed-registers ('this' and 'other') overlap.
-  // Either managed-register may be the NoRegister. If both are the NoRegister
-  // then false is returned.
-  bool Overlaps(const MipsManagedRegister& other) const;
-
-  static constexpr MipsManagedRegister FromCoreRegister(Register r) {
-    CHECK_NE(r, kNoRegister);
-    return FromRegId(r);
-  }
-
-  static constexpr MipsManagedRegister FromFRegister(FRegister r) {
-    CHECK_NE(r, kNoFRegister);
-    return FromRegId(r + kNumberOfCoreRegIds);
-  }
-
-  static constexpr MipsManagedRegister FromDRegister(DRegister r) {
-    CHECK_NE(r, kNoDRegister);
-    return FromRegId(r + kNumberOfCoreRegIds + kNumberOfFRegIds);
-  }
-
-  static constexpr MipsManagedRegister FromRegisterPair(RegisterPair r) {
-    CHECK_NE(r, kNoRegisterPair);
-    return FromRegId(r + (kNumberOfCoreRegIds + kNumberOfFRegIds + kNumberOfDRegIds));
-  }
-
- private:
-  constexpr bool IsValidManagedRegister() const {
-    return (0 <= id_) && (id_ < kNumberOfRegIds);
-  }
-
-  constexpr int RegId() const {
-    CHECK(!IsNoRegister());
-    return id_;
-  }
-
-  int AllocId() const {
-    CHECK(IsValidManagedRegister() && !IsOverlappingDRegister() && !IsRegisterPair());
-    CHECK_LT(id_, kNumberOfAllocIds);
-    return id_;
-  }
-
-  int AllocIdLow() const;
-  int AllocIdHigh() const;
-
-  friend class ManagedRegister;
-
-  explicit constexpr MipsManagedRegister(int reg_id) : ManagedRegister(reg_id) {}
-
-  static constexpr MipsManagedRegister FromRegId(int reg_id) {
-    MipsManagedRegister reg(reg_id);
-    CHECK(reg.IsValidManagedRegister());
-    return reg;
-  }
-};
-
-std::ostream& operator<<(std::ostream& os, const MipsManagedRegister& reg);
-
-}  // namespace mips
-
-constexpr inline mips::MipsManagedRegister ManagedRegister::AsMips() const {
-  mips::MipsManagedRegister reg(id_);
-  CHECK(reg.IsNoRegister() || reg.IsValidManagedRegister());
-  return reg;
-}
-
-}  // namespace art
-
-#endif  // ART_COMPILER_UTILS_MIPS_MANAGED_REGISTER_MIPS_H_
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
deleted file mode 100644
index 70313ca..0000000
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ /dev/null
@@ -1,4101 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "assembler_mips64.h"
-
-#include "base/bit_utils.h"
-#include "base/casts.h"
-#include "base/memory_region.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "entrypoints/quick/quick_entrypoints_enum.h"
-#include "thread.h"
-
-namespace art {
-namespace mips64 {
-
-static_assert(static_cast<size_t>(kMips64PointerSize) == kMips64DoublewordSize,
-              "Unexpected Mips64 pointer size.");
-static_assert(kMips64PointerSize == PointerSize::k64, "Unexpected Mips64 pointer size.");
-
-
-void Mips64Assembler::FinalizeCode() {
-  for (auto& exception_block : exception_blocks_) {
-    EmitExceptionPoll(&exception_block);
-  }
-  ReserveJumpTableSpace();
-  EmitLiterals();
-  PromoteBranches();
-}
-
-void Mips64Assembler::FinalizeInstructions(const MemoryRegion& region) {
-  EmitBranches();
-  EmitJumpTables();
-  Assembler::FinalizeInstructions(region);
-  PatchCFI();
-}
-
-void Mips64Assembler::PatchCFI() {
-  if (cfi().NumberOfDelayedAdvancePCs() == 0u) {
-    return;
-  }
-
-  using DelayedAdvancePC = DebugFrameOpCodeWriterForAssembler::DelayedAdvancePC;
-  const auto data = cfi().ReleaseStreamAndPrepareForDelayedAdvancePC();
-  const std::vector<uint8_t>& old_stream = data.first;
-  const std::vector<DelayedAdvancePC>& advances = data.second;
-
-  // Refill our data buffer with patched opcodes.
-  cfi().ReserveCFIStream(old_stream.size() + advances.size() + 16);
-  size_t stream_pos = 0;
-  for (const DelayedAdvancePC& advance : advances) {
-    DCHECK_GE(advance.stream_pos, stream_pos);
-    // Copy old data up to the point where advance was issued.
-    cfi().AppendRawData(old_stream, stream_pos, advance.stream_pos);
-    stream_pos = advance.stream_pos;
-    // Insert the advance command with its final offset.
-    size_t final_pc = GetAdjustedPosition(advance.pc);
-    cfi().AdvancePC(final_pc);
-  }
-  // Copy the final segment if any.
-  cfi().AppendRawData(old_stream, stream_pos, old_stream.size());
-}
-
-void Mips64Assembler::EmitBranches() {
-  CHECK(!overwriting_);
-  // Switch from appending instructions at the end of the buffer to overwriting
-  // existing instructions (branch placeholders) in the buffer.
-  overwriting_ = true;
-  for (auto& branch : branches_) {
-    EmitBranch(&branch);
-  }
-  overwriting_ = false;
-}
-
-void Mips64Assembler::Emit(uint32_t value) {
-  if (overwriting_) {
-    // Branches to labels are emitted into their placeholders here.
-    buffer_.Store<uint32_t>(overwrite_location_, value);
-    overwrite_location_ += sizeof(uint32_t);
-  } else {
-    // Other instructions are simply appended at the end here.
-    AssemblerBuffer::EnsureCapacity ensured(&buffer_);
-    buffer_.Emit<uint32_t>(value);
-  }
-}
-
-void Mips64Assembler::EmitR(int opcode, GpuRegister rs, GpuRegister rt, GpuRegister rd,
-                            int shamt, int funct) {
-  CHECK_NE(rs, kNoGpuRegister);
-  CHECK_NE(rt, kNoGpuRegister);
-  CHECK_NE(rd, kNoGpuRegister);
-  uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
-                      static_cast<uint32_t>(rs) << kRsShift |
-                      static_cast<uint32_t>(rt) << kRtShift |
-                      static_cast<uint32_t>(rd) << kRdShift |
-                      shamt << kShamtShift |
-                      funct;
-  Emit(encoding);
-}
-
-void Mips64Assembler::EmitRsd(int opcode, GpuRegister rs, GpuRegister rd,
-                              int shamt, int funct) {
-  CHECK_NE(rs, kNoGpuRegister);
-  CHECK_NE(rd, kNoGpuRegister);
-  uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
-                      static_cast<uint32_t>(rs) << kRsShift |
-                      static_cast<uint32_t>(ZERO) << kRtShift |
-                      static_cast<uint32_t>(rd) << kRdShift |
-                      shamt << kShamtShift |
-                      funct;
-  Emit(encoding);
-}
-
-void Mips64Assembler::EmitRtd(int opcode, GpuRegister rt, GpuRegister rd,
-                              int shamt, int funct) {
-  CHECK_NE(rt, kNoGpuRegister);
-  CHECK_NE(rd, kNoGpuRegister);
-  uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
-                      static_cast<uint32_t>(ZERO) << kRsShift |
-                      static_cast<uint32_t>(rt) << kRtShift |
-                      static_cast<uint32_t>(rd) << kRdShift |
-                      shamt << kShamtShift |
-                      funct;
-  Emit(encoding);
-}
-
-void Mips64Assembler::EmitI(int opcode, GpuRegister rs, GpuRegister rt, uint16_t imm) {
-  CHECK_NE(rs, kNoGpuRegister);
-  CHECK_NE(rt, kNoGpuRegister);
-  uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
-                      static_cast<uint32_t>(rs) << kRsShift |
-                      static_cast<uint32_t>(rt) << kRtShift |
-                      imm;
-  Emit(encoding);
-}
-
-void Mips64Assembler::EmitI21(int opcode, GpuRegister rs, uint32_t imm21) {
-  CHECK_NE(rs, kNoGpuRegister);
-  CHECK(IsUint<21>(imm21)) << imm21;
-  uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
-                      static_cast<uint32_t>(rs) << kRsShift |
-                      imm21;
-  Emit(encoding);
-}
-
-void Mips64Assembler::EmitI26(int opcode, uint32_t imm26) {
-  CHECK(IsUint<26>(imm26)) << imm26;
-  uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift | imm26;
-  Emit(encoding);
-}
-
-void Mips64Assembler::EmitFR(int opcode, int fmt, FpuRegister ft, FpuRegister fs, FpuRegister fd,
-                             int funct) {
-  CHECK_NE(ft, kNoFpuRegister);
-  CHECK_NE(fs, kNoFpuRegister);
-  CHECK_NE(fd, kNoFpuRegister);
-  uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
-                      fmt << kFmtShift |
-                      static_cast<uint32_t>(ft) << kFtShift |
-                      static_cast<uint32_t>(fs) << kFsShift |
-                      static_cast<uint32_t>(fd) << kFdShift |
-                      funct;
-  Emit(encoding);
-}
-
-void Mips64Assembler::EmitFI(int opcode, int fmt, FpuRegister ft, uint16_t imm) {
-  CHECK_NE(ft, kNoFpuRegister);
-  uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
-                      fmt << kFmtShift |
-                      static_cast<uint32_t>(ft) << kFtShift |
-                      imm;
-  Emit(encoding);
-}
-
-void Mips64Assembler::EmitMsa3R(int operation,
-                                int df,
-                                VectorRegister wt,
-                                VectorRegister ws,
-                                VectorRegister wd,
-                                int minor_opcode) {
-  CHECK_NE(wt, kNoVectorRegister);
-  CHECK_NE(ws, kNoVectorRegister);
-  CHECK_NE(wd, kNoVectorRegister);
-  uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
-                      operation << kMsaOperationShift |
-                      df << kDfShift |
-                      static_cast<uint32_t>(wt) << kWtShift |
-                      static_cast<uint32_t>(ws) << kWsShift |
-                      static_cast<uint32_t>(wd) << kWdShift |
-                      minor_opcode;
-  Emit(encoding);
-}
-
-void Mips64Assembler::EmitMsaBIT(int operation,
-                                 int df_m,
-                                 VectorRegister ws,
-                                 VectorRegister wd,
-                                 int minor_opcode) {
-  CHECK_NE(ws, kNoVectorRegister);
-  CHECK_NE(wd, kNoVectorRegister);
-  uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
-                      operation << kMsaOperationShift |
-                      df_m << kDfMShift |
-                      static_cast<uint32_t>(ws) << kWsShift |
-                      static_cast<uint32_t>(wd) << kWdShift |
-                      minor_opcode;
-  Emit(encoding);
-}
-
-void Mips64Assembler::EmitMsaELM(int operation,
-                                 int df_n,
-                                 VectorRegister ws,
-                                 VectorRegister wd,
-                                 int minor_opcode) {
-  CHECK_NE(ws, kNoVectorRegister);
-  CHECK_NE(wd, kNoVectorRegister);
-  uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
-                      operation << kMsaELMOperationShift |
-                      df_n << kDfNShift |
-                      static_cast<uint32_t>(ws) << kWsShift |
-                      static_cast<uint32_t>(wd) << kWdShift |
-                      minor_opcode;
-  Emit(encoding);
-}
-
-void Mips64Assembler::EmitMsaMI10(int s10,
-                                  GpuRegister rs,
-                                  VectorRegister wd,
-                                  int minor_opcode,
-                                  int df) {
-  CHECK_NE(rs, kNoGpuRegister);
-  CHECK_NE(wd, kNoVectorRegister);
-  CHECK(IsUint<10>(s10)) << s10;
-  uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
-                      s10 << kS10Shift |
-                      static_cast<uint32_t>(rs) << kWsShift |
-                      static_cast<uint32_t>(wd) << kWdShift |
-                      minor_opcode << kS10MinorShift |
-                      df;
-  Emit(encoding);
-}
-
-void Mips64Assembler::EmitMsaI10(int operation,
-                                 int df,
-                                 int i10,
-                                 VectorRegister wd,
-                                 int minor_opcode) {
-  CHECK_NE(wd, kNoVectorRegister);
-  CHECK(IsUint<10>(i10)) << i10;
-  uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
-                      operation << kMsaOperationShift |
-                      df << kDfShift |
-                      i10 << kI10Shift |
-                      static_cast<uint32_t>(wd) << kWdShift |
-                      minor_opcode;
-  Emit(encoding);
-}
-
-void Mips64Assembler::EmitMsa2R(int operation,
-                                int df,
-                                VectorRegister ws,
-                                VectorRegister wd,
-                                int minor_opcode) {
-  CHECK_NE(ws, kNoVectorRegister);
-  CHECK_NE(wd, kNoVectorRegister);
-  uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
-                      operation << kMsa2ROperationShift |
-                      df << kDf2RShift |
-                      static_cast<uint32_t>(ws) << kWsShift |
-                      static_cast<uint32_t>(wd) << kWdShift |
-                      minor_opcode;
-  Emit(encoding);
-}
-
-void Mips64Assembler::EmitMsa2RF(int operation,
-                                 int df,
-                                 VectorRegister ws,
-                                 VectorRegister wd,
-                                 int minor_opcode) {
-  CHECK_NE(ws, kNoVectorRegister);
-  CHECK_NE(wd, kNoVectorRegister);
-  uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift |
-                      operation << kMsa2RFOperationShift |
-                      df << kDf2RShift |
-                      static_cast<uint32_t>(ws) << kWsShift |
-                      static_cast<uint32_t>(wd) << kWdShift |
-                      minor_opcode;
-  Emit(encoding);
-}
-
-void Mips64Assembler::Addu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
-  EmitR(0, rs, rt, rd, 0, 0x21);
-}
-
-void Mips64Assembler::Addiu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
-  EmitI(0x9, rs, rt, imm16);
-}
-
-void Mips64Assembler::Daddu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
-  EmitR(0, rs, rt, rd, 0, 0x2d);
-}
-
-void Mips64Assembler::Daddiu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
-  EmitI(0x19, rs, rt, imm16);
-}
-
-void Mips64Assembler::Subu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
-  EmitR(0, rs, rt, rd, 0, 0x23);
-}
-
-void Mips64Assembler::Dsubu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
-  EmitR(0, rs, rt, rd, 0, 0x2f);
-}
-
-void Mips64Assembler::MulR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
-  EmitR(0, rs, rt, rd, 2, 0x18);
-}
-
-void Mips64Assembler::MuhR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
-  EmitR(0, rs, rt, rd, 3, 0x18);
-}
-
-void Mips64Assembler::DivR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
-  EmitR(0, rs, rt, rd, 2, 0x1a);
-}
-
-void Mips64Assembler::ModR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
-  EmitR(0, rs, rt, rd, 3, 0x1a);
-}
-
-void Mips64Assembler::DivuR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
-  EmitR(0, rs, rt, rd, 2, 0x1b);
-}
-
-void Mips64Assembler::ModuR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
-  EmitR(0, rs, rt, rd, 3, 0x1b);
-}
-
-void Mips64Assembler::Dmul(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
-  EmitR(0, rs, rt, rd, 2, 0x1c);
-}
-
-void Mips64Assembler::Dmuh(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
-  EmitR(0, rs, rt, rd, 3, 0x1c);
-}
-
-void Mips64Assembler::Ddiv(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
-  EmitR(0, rs, rt, rd, 2, 0x1e);
-}
-
-void Mips64Assembler::Dmod(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
-  EmitR(0, rs, rt, rd, 3, 0x1e);
-}
-
-void Mips64Assembler::Ddivu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
-  EmitR(0, rs, rt, rd, 2, 0x1f);
-}
-
-void Mips64Assembler::Dmodu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
-  EmitR(0, rs, rt, rd, 3, 0x1f);
-}
-
-void Mips64Assembler::And(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
-  EmitR(0, rs, rt, rd, 0, 0x24);
-}
-
-void Mips64Assembler::Andi(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
-  EmitI(0xc, rs, rt, imm16);
-}
-
-void Mips64Assembler::Or(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
-  EmitR(0, rs, rt, rd, 0, 0x25);
-}
-
-void Mips64Assembler::Ori(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
-  EmitI(0xd, rs, rt, imm16);
-}
-
-void Mips64Assembler::Xor(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
-  EmitR(0, rs, rt, rd, 0, 0x26);
-}
-
-void Mips64Assembler::Xori(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
-  EmitI(0xe, rs, rt, imm16);
-}
-
-void Mips64Assembler::Nor(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
-  EmitR(0, rs, rt, rd, 0, 0x27);
-}
-
-void Mips64Assembler::Bitswap(GpuRegister rd, GpuRegister rt) {
-  EmitRtd(0x1f, rt, rd, 0x0, 0x20);
-}
-
-void Mips64Assembler::Dbitswap(GpuRegister rd, GpuRegister rt) {
-  EmitRtd(0x1f, rt, rd, 0x0, 0x24);
-}
-
-void Mips64Assembler::Seb(GpuRegister rd, GpuRegister rt) {
-  EmitR(0x1f, static_cast<GpuRegister>(0), rt, rd, 0x10, 0x20);
-}
-
-void Mips64Assembler::Seh(GpuRegister rd, GpuRegister rt) {
-  EmitR(0x1f, static_cast<GpuRegister>(0), rt, rd, 0x18, 0x20);
-}
-
-void Mips64Assembler::Dsbh(GpuRegister rd, GpuRegister rt) {
-  EmitRtd(0x1f, rt, rd, 0x2, 0x24);
-}
-
-void Mips64Assembler::Dshd(GpuRegister rd, GpuRegister rt) {
-  EmitRtd(0x1f, rt, rd, 0x5, 0x24);
-}
-
-void Mips64Assembler::Dext(GpuRegister rt, GpuRegister rs, int pos, int size) {
-  CHECK(IsUint<5>(pos)) << pos;
-  CHECK(IsUint<5>(size - 1)) << size;
-  EmitR(0x1f, rs, rt, static_cast<GpuRegister>(size - 1), pos, 0x3);
-}
-
-void Mips64Assembler::Ins(GpuRegister rd, GpuRegister rt, int pos, int size) {
-  CHECK(IsUint<5>(pos)) << pos;
-  CHECK(IsUint<5>(size - 1)) << size;
-  CHECK(IsUint<5>(pos + size - 1)) << pos << " + " << size;
-  EmitR(0x1f, rt, rd, static_cast<GpuRegister>(pos + size - 1), pos, 0x04);
-}
-
-void Mips64Assembler::Dinsm(GpuRegister rt, GpuRegister rs, int pos, int size) {
-  CHECK(IsUint<5>(pos)) << pos;
-  CHECK(2 <= size && size <= 64) << size;
-  CHECK(IsUint<5>(pos + size - 33)) << pos << " + " << size;
-  EmitR(0x1f, rs, rt, static_cast<GpuRegister>(pos + size - 33), pos, 0x5);
-}
-
-void Mips64Assembler::Dinsu(GpuRegister rt, GpuRegister rs, int pos, int size) {
-  CHECK(IsUint<5>(pos - 32)) << pos;
-  CHECK(IsUint<5>(size - 1)) << size;
-  CHECK(IsUint<5>(pos + size - 33)) << pos << " + " << size;
-  EmitR(0x1f, rs, rt, static_cast<GpuRegister>(pos + size - 33), pos - 32, 0x6);
-}
-
-void Mips64Assembler::Dins(GpuRegister rt, GpuRegister rs, int pos, int size) {
-  CHECK(IsUint<5>(pos)) << pos;
-  CHECK(IsUint<5>(size - 1)) << size;
-  CHECK(IsUint<5>(pos + size - 1)) << pos << " + " << size;
-  EmitR(0x1f, rs, rt, static_cast<GpuRegister>(pos + size - 1), pos, 0x7);
-}
-
-void Mips64Assembler::DblIns(GpuRegister rt, GpuRegister rs, int pos, int size) {
-  if (pos >= 32) {
-    Dinsu(rt, rs, pos, size);
-  } else if ((static_cast<int64_t>(pos) + size - 1) >= 32) {
-    Dinsm(rt, rs, pos, size);
-  } else {
-    Dins(rt, rs, pos, size);
-  }
-}
-
-void Mips64Assembler::Lsa(GpuRegister rd, GpuRegister rs, GpuRegister rt, int saPlusOne) {
-  CHECK(1 <= saPlusOne && saPlusOne <= 4) << saPlusOne;
-  int sa = saPlusOne - 1;
-  EmitR(0x0, rs, rt, rd, sa, 0x05);
-}
-
-void Mips64Assembler::Dlsa(GpuRegister rd, GpuRegister rs, GpuRegister rt, int saPlusOne) {
-  CHECK(1 <= saPlusOne && saPlusOne <= 4) << saPlusOne;
-  int sa = saPlusOne - 1;
-  EmitR(0x0, rs, rt, rd, sa, 0x15);
-}
-
-void Mips64Assembler::Wsbh(GpuRegister rd, GpuRegister rt) {
-  EmitRtd(0x1f, rt, rd, 2, 0x20);
-}
-
-void Mips64Assembler::Sc(GpuRegister rt, GpuRegister base, int16_t imm9) {
-  CHECK(IsInt<9>(imm9));
-  EmitI(0x1f, base, rt, ((imm9 & 0x1FF) << 7) | 0x26);
-}
-
-void Mips64Assembler::Scd(GpuRegister rt, GpuRegister base, int16_t imm9) {
-  CHECK(IsInt<9>(imm9));
-  EmitI(0x1f, base, rt, ((imm9 & 0x1FF) << 7) | 0x27);
-}
-
-void Mips64Assembler::Ll(GpuRegister rt, GpuRegister base, int16_t imm9) {
-  CHECK(IsInt<9>(imm9));
-  EmitI(0x1f, base, rt, ((imm9 & 0x1FF) << 7) | 0x36);
-}
-
-void Mips64Assembler::Lld(GpuRegister rt, GpuRegister base, int16_t imm9) {
-  CHECK(IsInt<9>(imm9));
-  EmitI(0x1f, base, rt, ((imm9 & 0x1FF) << 7) | 0x37);
-}
-
-void Mips64Assembler::Sll(GpuRegister rd, GpuRegister rt, int shamt) {
-  EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x00);
-}
-
-void Mips64Assembler::Srl(GpuRegister rd, GpuRegister rt, int shamt) {
-  EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x02);
-}
-
-void Mips64Assembler::Rotr(GpuRegister rd, GpuRegister rt, int shamt) {
-  EmitR(0, static_cast<GpuRegister>(1), rt, rd, shamt, 0x02);
-}
-
-void Mips64Assembler::Sra(GpuRegister rd, GpuRegister rt, int shamt) {
-  EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x03);
-}
-
-void Mips64Assembler::Sllv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
-  EmitR(0, rs, rt, rd, 0, 0x04);
-}
-
-void Mips64Assembler::Rotrv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
-  EmitR(0, rs, rt, rd, 1, 0x06);
-}
-
-void Mips64Assembler::Srlv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
-  EmitR(0, rs, rt, rd, 0, 0x06);
-}
-
-void Mips64Assembler::Srav(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
-  EmitR(0, rs, rt, rd, 0, 0x07);
-}
-
-void Mips64Assembler::Dsll(GpuRegister rd, GpuRegister rt, int shamt) {
-  EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x38);
-}
-
-void Mips64Assembler::Dsrl(GpuRegister rd, GpuRegister rt, int shamt) {
-  EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3a);
-}
-
-void Mips64Assembler::Drotr(GpuRegister rd, GpuRegister rt, int shamt) {
-  EmitR(0, static_cast<GpuRegister>(1), rt, rd, shamt, 0x3a);
-}
-
-void Mips64Assembler::Dsra(GpuRegister rd, GpuRegister rt, int shamt) {
-  EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3b);
-}
-
-void Mips64Assembler::Dsll32(GpuRegister rd, GpuRegister rt, int shamt) {
-  EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3c);
-}
-
-void Mips64Assembler::Dsrl32(GpuRegister rd, GpuRegister rt, int shamt) {
-  EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3e);
-}
-
-void Mips64Assembler::Drotr32(GpuRegister rd, GpuRegister rt, int shamt) {
-  EmitR(0, static_cast<GpuRegister>(1), rt, rd, shamt, 0x3e);
-}
-
-void Mips64Assembler::Dsra32(GpuRegister rd, GpuRegister rt, int shamt) {
-  EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3f);
-}
-
-void Mips64Assembler::Dsllv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
-  EmitR(0, rs, rt, rd, 0, 0x14);
-}
-
-void Mips64Assembler::Dsrlv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
-  EmitR(0, rs, rt, rd, 0, 0x16);
-}
-
-void Mips64Assembler::Drotrv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
-  EmitR(0, rs, rt, rd, 1, 0x16);
-}
-
-void Mips64Assembler::Dsrav(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
-  EmitR(0, rs, rt, rd, 0, 0x17);
-}
-
-void Mips64Assembler::Lb(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
-  EmitI(0x20, rs, rt, imm16);
-}
-
-void Mips64Assembler::Lh(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
-  EmitI(0x21, rs, rt, imm16);
-}
-
-void Mips64Assembler::Lw(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
-  EmitI(0x23, rs, rt, imm16);
-}
-
-void Mips64Assembler::Ld(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
-  EmitI(0x37, rs, rt, imm16);
-}
-
-void Mips64Assembler::Lbu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
-  EmitI(0x24, rs, rt, imm16);
-}
-
-void Mips64Assembler::Lhu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
-  EmitI(0x25, rs, rt, imm16);
-}
-
-void Mips64Assembler::Lwu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
-  EmitI(0x27, rs, rt, imm16);
-}
-
-void Mips64Assembler::Lwpc(GpuRegister rs, uint32_t imm19) {
-  CHECK(IsUint<19>(imm19)) << imm19;
-  EmitI21(0x3B, rs, (0x01 << 19) | imm19);
-}
-
-void Mips64Assembler::Lwupc(GpuRegister rs, uint32_t imm19) {
-  CHECK(IsUint<19>(imm19)) << imm19;
-  EmitI21(0x3B, rs, (0x02 << 19) | imm19);
-}
-
-void Mips64Assembler::Ldpc(GpuRegister rs, uint32_t imm18) {
-  CHECK(IsUint<18>(imm18)) << imm18;
-  EmitI21(0x3B, rs, (0x06 << 18) | imm18);
-}
-
-void Mips64Assembler::Lui(GpuRegister rt, uint16_t imm16) {
-  EmitI(0xf, static_cast<GpuRegister>(0), rt, imm16);
-}
-
-void Mips64Assembler::Aui(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
-  EmitI(0xf, rs, rt, imm16);
-}
-
-void Mips64Assembler::Daui(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
-  CHECK_NE(rs, ZERO);
-  EmitI(0x1d, rs, rt, imm16);
-}
-
-void Mips64Assembler::Dahi(GpuRegister rs, uint16_t imm16) {
-  EmitI(1, rs, static_cast<GpuRegister>(6), imm16);
-}
-
-void Mips64Assembler::Dati(GpuRegister rs, uint16_t imm16) {
-  EmitI(1, rs, static_cast<GpuRegister>(0x1e), imm16);
-}
-
-void Mips64Assembler::Sync(uint32_t stype) {
-  EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0),
-           static_cast<GpuRegister>(0), stype & 0x1f, 0xf);
-}
-
-void Mips64Assembler::Sb(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
-  EmitI(0x28, rs, rt, imm16);
-}
-
-void Mips64Assembler::Sh(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
-  EmitI(0x29, rs, rt, imm16);
-}
-
-void Mips64Assembler::Sw(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
-  EmitI(0x2b, rs, rt, imm16);
-}
-
-void Mips64Assembler::Sd(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
-  EmitI(0x3f, rs, rt, imm16);
-}
-
-void Mips64Assembler::Slt(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
-  EmitR(0, rs, rt, rd, 0, 0x2a);
-}
-
-void Mips64Assembler::Sltu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
-  EmitR(0, rs, rt, rd, 0, 0x2b);
-}
-
-void Mips64Assembler::Slti(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
-  EmitI(0xa, rs, rt, imm16);
-}
-
-void Mips64Assembler::Sltiu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
-  EmitI(0xb, rs, rt, imm16);
-}
-
-void Mips64Assembler::Seleqz(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
-  EmitR(0, rs, rt, rd, 0, 0x35);
-}
-
-void Mips64Assembler::Selnez(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
-  EmitR(0, rs, rt, rd, 0, 0x37);
-}
-
-void Mips64Assembler::Clz(GpuRegister rd, GpuRegister rs) {
-  EmitRsd(0, rs, rd, 0x01, 0x10);
-}
-
-void Mips64Assembler::Clo(GpuRegister rd, GpuRegister rs) {
-  EmitRsd(0, rs, rd, 0x01, 0x11);
-}
-
-void Mips64Assembler::Dclz(GpuRegister rd, GpuRegister rs) {
-  EmitRsd(0, rs, rd, 0x01, 0x12);
-}
-
-void Mips64Assembler::Dclo(GpuRegister rd, GpuRegister rs) {
-  EmitRsd(0, rs, rd, 0x01, 0x13);
-}
-
-void Mips64Assembler::Jalr(GpuRegister rd, GpuRegister rs) {
-  EmitR(0, rs, static_cast<GpuRegister>(0), rd, 0, 0x09);
-}
-
-void Mips64Assembler::Jalr(GpuRegister rs) {
-  Jalr(RA, rs);
-}
-
-void Mips64Assembler::Jr(GpuRegister rs) {
-  Jalr(ZERO, rs);
-}
-
-void Mips64Assembler::Auipc(GpuRegister rs, uint16_t imm16) {
-  EmitI(0x3B, rs, static_cast<GpuRegister>(0x1E), imm16);
-}
-
-void Mips64Assembler::Addiupc(GpuRegister rs, uint32_t imm19) {
-  CHECK(IsUint<19>(imm19)) << imm19;
-  EmitI21(0x3B, rs, imm19);
-}
-
-void Mips64Assembler::Bc(uint32_t imm26) {
-  EmitI26(0x32, imm26);
-}
-
-void Mips64Assembler::Balc(uint32_t imm26) {
-  EmitI26(0x3A, imm26);
-}
-
-void Mips64Assembler::Jic(GpuRegister rt, uint16_t imm16) {
-  EmitI(0x36, static_cast<GpuRegister>(0), rt, imm16);
-}
-
-void Mips64Assembler::Jialc(GpuRegister rt, uint16_t imm16) {
-  EmitI(0x3E, static_cast<GpuRegister>(0), rt, imm16);
-}
-
-void Mips64Assembler::Bltc(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
-  CHECK_NE(rs, ZERO);
-  CHECK_NE(rt, ZERO);
-  CHECK_NE(rs, rt);
-  EmitI(0x17, rs, rt, imm16);
-}
-
-void Mips64Assembler::Bltzc(GpuRegister rt, uint16_t imm16) {
-  CHECK_NE(rt, ZERO);
-  EmitI(0x17, rt, rt, imm16);
-}
-
-void Mips64Assembler::Bgtzc(GpuRegister rt, uint16_t imm16) {
-  CHECK_NE(rt, ZERO);
-  EmitI(0x17, static_cast<GpuRegister>(0), rt, imm16);
-}
-
-void Mips64Assembler::Bgec(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
-  CHECK_NE(rs, ZERO);
-  CHECK_NE(rt, ZERO);
-  CHECK_NE(rs, rt);
-  EmitI(0x16, rs, rt, imm16);
-}
-
-void Mips64Assembler::Bgezc(GpuRegister rt, uint16_t imm16) {
-  CHECK_NE(rt, ZERO);
-  EmitI(0x16, rt, rt, imm16);
-}
-
-void Mips64Assembler::Blezc(GpuRegister rt, uint16_t imm16) {
-  CHECK_NE(rt, ZERO);
-  EmitI(0x16, static_cast<GpuRegister>(0), rt, imm16);
-}
-
-void Mips64Assembler::Bltuc(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
-  CHECK_NE(rs, ZERO);
-  CHECK_NE(rt, ZERO);
-  CHECK_NE(rs, rt);
-  EmitI(0x7, rs, rt, imm16);
-}
-
-void Mips64Assembler::Bgeuc(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
-  CHECK_NE(rs, ZERO);
-  CHECK_NE(rt, ZERO);
-  CHECK_NE(rs, rt);
-  EmitI(0x6, rs, rt, imm16);
-}
-
-void Mips64Assembler::Beqc(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
-  CHECK_NE(rs, ZERO);
-  CHECK_NE(rt, ZERO);
-  CHECK_NE(rs, rt);
-  EmitI(0x8, std::min(rs, rt), std::max(rs, rt), imm16);
-}
-
-void Mips64Assembler::Bnec(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
-  CHECK_NE(rs, ZERO);
-  CHECK_NE(rt, ZERO);
-  CHECK_NE(rs, rt);
-  EmitI(0x18, std::min(rs, rt), std::max(rs, rt), imm16);
-}
-
-void Mips64Assembler::Beqzc(GpuRegister rs, uint32_t imm21) {
-  CHECK_NE(rs, ZERO);
-  EmitI21(0x36, rs, imm21);
-}
-
-void Mips64Assembler::Bnezc(GpuRegister rs, uint32_t imm21) {
-  CHECK_NE(rs, ZERO);
-  EmitI21(0x3E, rs, imm21);
-}
-
-void Mips64Assembler::Bc1eqz(FpuRegister ft, uint16_t imm16) {
-  EmitFI(0x11, 0x9, ft, imm16);
-}
-
-void Mips64Assembler::Bc1nez(FpuRegister ft, uint16_t imm16) {
-  EmitFI(0x11, 0xD, ft, imm16);
-}
-
-void Mips64Assembler::Beq(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
-  EmitI(0x4, rs, rt, imm16);
-}
-
-void Mips64Assembler::Bne(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
-  EmitI(0x5, rs, rt, imm16);
-}
-
-void Mips64Assembler::Beqz(GpuRegister rt, uint16_t imm16) {
-  Beq(rt, ZERO, imm16);
-}
-
-void Mips64Assembler::Bnez(GpuRegister rt, uint16_t imm16) {
-  Bne(rt, ZERO, imm16);
-}
-
-void Mips64Assembler::Bltz(GpuRegister rt, uint16_t imm16) {
-  EmitI(0x1, rt, static_cast<GpuRegister>(0), imm16);
-}
-
-void Mips64Assembler::Bgez(GpuRegister rt, uint16_t imm16) {
-  EmitI(0x1, rt, static_cast<GpuRegister>(0x1), imm16);
-}
-
-void Mips64Assembler::Blez(GpuRegister rt, uint16_t imm16) {
-  EmitI(0x6, rt, static_cast<GpuRegister>(0), imm16);
-}
-
-void Mips64Assembler::Bgtz(GpuRegister rt, uint16_t imm16) {
-  EmitI(0x7, rt, static_cast<GpuRegister>(0), imm16);
-}
-
-void Mips64Assembler::EmitBcondR6(BranchCondition cond,
-                                  GpuRegister rs,
-                                  GpuRegister rt,
-                                  uint32_t imm16_21) {
-  switch (cond) {
-    case kCondLT:
-      Bltc(rs, rt, imm16_21);
-      break;
-    case kCondGE:
-      Bgec(rs, rt, imm16_21);
-      break;
-    case kCondLE:
-      Bgec(rt, rs, imm16_21);
-      break;
-    case kCondGT:
-      Bltc(rt, rs, imm16_21);
-      break;
-    case kCondLTZ:
-      CHECK_EQ(rt, ZERO);
-      Bltzc(rs, imm16_21);
-      break;
-    case kCondGEZ:
-      CHECK_EQ(rt, ZERO);
-      Bgezc(rs, imm16_21);
-      break;
-    case kCondLEZ:
-      CHECK_EQ(rt, ZERO);
-      Blezc(rs, imm16_21);
-      break;
-    case kCondGTZ:
-      CHECK_EQ(rt, ZERO);
-      Bgtzc(rs, imm16_21);
-      break;
-    case kCondEQ:
-      Beqc(rs, rt, imm16_21);
-      break;
-    case kCondNE:
-      Bnec(rs, rt, imm16_21);
-      break;
-    case kCondEQZ:
-      CHECK_EQ(rt, ZERO);
-      Beqzc(rs, imm16_21);
-      break;
-    case kCondNEZ:
-      CHECK_EQ(rt, ZERO);
-      Bnezc(rs, imm16_21);
-      break;
-    case kCondLTU:
-      Bltuc(rs, rt, imm16_21);
-      break;
-    case kCondGEU:
-      Bgeuc(rs, rt, imm16_21);
-      break;
-    case kCondF:
-      CHECK_EQ(rt, ZERO);
-      Bc1eqz(static_cast<FpuRegister>(rs), imm16_21);
-      break;
-    case kCondT:
-      CHECK_EQ(rt, ZERO);
-      Bc1nez(static_cast<FpuRegister>(rs), imm16_21);
-      break;
-    case kUncond:
-      LOG(FATAL) << "Unexpected branch condition " << cond;
-      UNREACHABLE();
-  }
-}
-
-void Mips64Assembler::EmitBcondR2(BranchCondition cond,
-                                  GpuRegister rs,
-                                  GpuRegister rt,
-                                  uint16_t imm16) {
-  switch (cond) {
-    case kCondLTZ:
-      CHECK_EQ(rt, ZERO);
-      Bltz(rs, imm16);
-      break;
-    case kCondGEZ:
-      CHECK_EQ(rt, ZERO);
-      Bgez(rs, imm16);
-      break;
-    case kCondLEZ:
-      CHECK_EQ(rt, ZERO);
-      Blez(rs, imm16);
-      break;
-    case kCondGTZ:
-      CHECK_EQ(rt, ZERO);
-      Bgtz(rs, imm16);
-      break;
-    case kCondEQ:
-      Beq(rs, rt, imm16);
-      break;
-    case kCondNE:
-      Bne(rs, rt, imm16);
-      break;
-    case kCondEQZ:
-      CHECK_EQ(rt, ZERO);
-      Beqz(rs, imm16);
-      break;
-    case kCondNEZ:
-      CHECK_EQ(rt, ZERO);
-      Bnez(rs, imm16);
-      break;
-    case kCondF:
-    case kCondT:
-    case kCondLT:
-    case kCondGE:
-    case kCondLE:
-    case kCondGT:
-    case kCondLTU:
-    case kCondGEU:
-    case kUncond:
-      LOG(FATAL) << "Unexpected branch condition " << cond;
-      UNREACHABLE();
-  }
-}
-
-void Mips64Assembler::AddS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x10, ft, fs, fd, 0x0);
-}
-
-void Mips64Assembler::SubS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x10, ft, fs, fd, 0x1);
-}
-
-void Mips64Assembler::MulS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x10, ft, fs, fd, 0x2);
-}
-
-void Mips64Assembler::DivS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x10, ft, fs, fd, 0x3);
-}
-
-void Mips64Assembler::AddD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x11, ft, fs, fd, 0x0);
-}
-
-void Mips64Assembler::SubD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x11, ft, fs, fd, 0x1);
-}
-
-void Mips64Assembler::MulD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x11, ft, fs, fd, 0x2);
-}
-
-void Mips64Assembler::DivD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x11, ft, fs, fd, 0x3);
-}
-
-void Mips64Assembler::SqrtS(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x4);
-}
-
-void Mips64Assembler::SqrtD(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x4);
-}
-
-void Mips64Assembler::AbsS(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x5);
-}
-
-void Mips64Assembler::AbsD(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x5);
-}
-
-void Mips64Assembler::MovS(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x6);
-}
-
-void Mips64Assembler::MovD(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x6);
-}
-
-void Mips64Assembler::NegS(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x7);
-}
-
-void Mips64Assembler::NegD(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x7);
-}
-
-void Mips64Assembler::RoundLS(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x8);
-}
-
-void Mips64Assembler::RoundLD(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x8);
-}
-
-void Mips64Assembler::RoundWS(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0xc);
-}
-
-void Mips64Assembler::RoundWD(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0xc);
-}
-
-void Mips64Assembler::TruncLS(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x9);
-}
-
-void Mips64Assembler::TruncLD(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x9);
-}
-
-void Mips64Assembler::TruncWS(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0xd);
-}
-
-void Mips64Assembler::TruncWD(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0xd);
-}
-
-void Mips64Assembler::CeilLS(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0xa);
-}
-
-void Mips64Assembler::CeilLD(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0xa);
-}
-
-void Mips64Assembler::CeilWS(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0xe);
-}
-
-void Mips64Assembler::CeilWD(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0xe);
-}
-
-void Mips64Assembler::FloorLS(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0xb);
-}
-
-void Mips64Assembler::FloorLD(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0xb);
-}
-
-void Mips64Assembler::FloorWS(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0xf);
-}
-
-void Mips64Assembler::FloorWD(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0xf);
-}
-
-void Mips64Assembler::SelS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x10, ft, fs, fd, 0x10);
-}
-
-void Mips64Assembler::SelD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x11, ft, fs, fd, 0x10);
-}
-
-void Mips64Assembler::SeleqzS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x10, ft, fs, fd, 0x14);
-}
-
-void Mips64Assembler::SeleqzD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x11, ft, fs, fd, 0x14);
-}
-
-void Mips64Assembler::SelnezS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x10, ft, fs, fd, 0x17);
-}
-
-void Mips64Assembler::SelnezD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x11, ft, fs, fd, 0x17);
-}
-
-void Mips64Assembler::RintS(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x1a);
-}
-
-void Mips64Assembler::RintD(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x1a);
-}
-
-void Mips64Assembler::ClassS(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x1b);
-}
-
-void Mips64Assembler::ClassD(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x1b);
-}
-
-void Mips64Assembler::MinS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x10, ft, fs, fd, 0x1c);
-}
-
-void Mips64Assembler::MinD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x11, ft, fs, fd, 0x1c);
-}
-
-void Mips64Assembler::MaxS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x10, ft, fs, fd, 0x1e);
-}
-
-void Mips64Assembler::MaxD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x11, ft, fs, fd, 0x1e);
-}
-
-void Mips64Assembler::CmpUnS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x14, ft, fs, fd, 0x01);
-}
-
-void Mips64Assembler::CmpEqS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x14, ft, fs, fd, 0x02);
-}
-
-void Mips64Assembler::CmpUeqS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x14, ft, fs, fd, 0x03);
-}
-
-void Mips64Assembler::CmpLtS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x14, ft, fs, fd, 0x04);
-}
-
-void Mips64Assembler::CmpUltS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x14, ft, fs, fd, 0x05);
-}
-
-void Mips64Assembler::CmpLeS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x14, ft, fs, fd, 0x06);
-}
-
-void Mips64Assembler::CmpUleS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x14, ft, fs, fd, 0x07);
-}
-
-void Mips64Assembler::CmpOrS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x14, ft, fs, fd, 0x11);
-}
-
-void Mips64Assembler::CmpUneS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x14, ft, fs, fd, 0x12);
-}
-
-void Mips64Assembler::CmpNeS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x14, ft, fs, fd, 0x13);
-}
-
-void Mips64Assembler::CmpUnD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x15, ft, fs, fd, 0x01);
-}
-
-void Mips64Assembler::CmpEqD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x15, ft, fs, fd, 0x02);
-}
-
-void Mips64Assembler::CmpUeqD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x15, ft, fs, fd, 0x03);
-}
-
-void Mips64Assembler::CmpLtD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x15, ft, fs, fd, 0x04);
-}
-
-void Mips64Assembler::CmpUltD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x15, ft, fs, fd, 0x05);
-}
-
-void Mips64Assembler::CmpLeD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x15, ft, fs, fd, 0x06);
-}
-
-void Mips64Assembler::CmpUleD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x15, ft, fs, fd, 0x07);
-}
-
-void Mips64Assembler::CmpOrD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x15, ft, fs, fd, 0x11);
-}
-
-void Mips64Assembler::CmpUneD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x15, ft, fs, fd, 0x12);
-}
-
-void Mips64Assembler::CmpNeD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
-  EmitFR(0x11, 0x15, ft, fs, fd, 0x13);
-}
-
-void Mips64Assembler::Cvtsw(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x14, static_cast<FpuRegister>(0), fs, fd, 0x20);
-}
-
-void Mips64Assembler::Cvtdw(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x14, static_cast<FpuRegister>(0), fs, fd, 0x21);
-}
-
-void Mips64Assembler::Cvtsd(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x20);
-}
-
-void Mips64Assembler::Cvtds(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x21);
-}
-
-void Mips64Assembler::Cvtsl(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x15, static_cast<FpuRegister>(0), fs, fd, 0x20);
-}
-
-void Mips64Assembler::Cvtdl(FpuRegister fd, FpuRegister fs) {
-  EmitFR(0x11, 0x15, static_cast<FpuRegister>(0), fs, fd, 0x21);
-}
-
-void Mips64Assembler::Mfc1(GpuRegister rt, FpuRegister fs) {
-  EmitFR(0x11, 0x00, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
-}
-
-void Mips64Assembler::Mfhc1(GpuRegister rt, FpuRegister fs) {
-  EmitFR(0x11, 0x03, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
-}
-
-void Mips64Assembler::Mtc1(GpuRegister rt, FpuRegister fs) {
-  EmitFR(0x11, 0x04, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
-}
-
-void Mips64Assembler::Mthc1(GpuRegister rt, FpuRegister fs) {
-  EmitFR(0x11, 0x07, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
-}
-
-void Mips64Assembler::Dmfc1(GpuRegister rt, FpuRegister fs) {
-  EmitFR(0x11, 0x01, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
-}
-
-void Mips64Assembler::Dmtc1(GpuRegister rt, FpuRegister fs) {
-  EmitFR(0x11, 0x05, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
-}
-
-void Mips64Assembler::Lwc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
-  EmitI(0x31, rs, static_cast<GpuRegister>(ft), imm16);
-}
-
-void Mips64Assembler::Ldc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
-  EmitI(0x35, rs, static_cast<GpuRegister>(ft), imm16);
-}
-
-void Mips64Assembler::Swc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
-  EmitI(0x39, rs, static_cast<GpuRegister>(ft), imm16);
-}
-
-void Mips64Assembler::Sdc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
-  EmitI(0x3d, rs, static_cast<GpuRegister>(ft), imm16);
-}
-
-void Mips64Assembler::Break() {
-  EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0),
-        static_cast<GpuRegister>(0), 0, 0xD);
-}
-
-void Mips64Assembler::Nop() {
-  EmitR(0x0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0),
-        static_cast<GpuRegister>(0), 0, 0x0);
-}
-
-void Mips64Assembler::Move(GpuRegister rd, GpuRegister rs) {
-  Or(rd, rs, ZERO);
-}
-
-void Mips64Assembler::Clear(GpuRegister rd) {
-  Move(rd, ZERO);
-}
-
-void Mips64Assembler::Not(GpuRegister rd, GpuRegister rs) {
-  Nor(rd, rs, ZERO);
-}
-
-void Mips64Assembler::AndV(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x1e);
-}
-
-void Mips64Assembler::OrV(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x0, 0x1, wt, ws, wd, 0x1e);
-}
-
-void Mips64Assembler::NorV(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x0, 0x2, wt, ws, wd, 0x1e);
-}
-
-void Mips64Assembler::XorV(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x0, 0x3, wt, ws, wd, 0x1e);
-}
-
-void Mips64Assembler::AddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x0, 0x0, wt, ws, wd, 0xe);
-}
-
-void Mips64Assembler::AddvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x0, 0x1, wt, ws, wd, 0xe);
-}
-
-void Mips64Assembler::AddvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x0, 0x2, wt, ws, wd, 0xe);
-}
-
-void Mips64Assembler::AddvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x0, 0x3, wt, ws, wd, 0xe);
-}
-
-void Mips64Assembler::SubvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x1, 0x0, wt, ws, wd, 0xe);
-}
-
-void Mips64Assembler::SubvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x1, 0x1, wt, ws, wd, 0xe);
-}
-
-void Mips64Assembler::SubvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x1, 0x2, wt, ws, wd, 0xe);
-}
-
-void Mips64Assembler::SubvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x1, 0x3, wt, ws, wd, 0xe);
-}
-
-void Mips64Assembler::Asub_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x4, 0x0, wt, ws, wd, 0x11);
-}
-
-void Mips64Assembler::Asub_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x4, 0x1, wt, ws, wd, 0x11);
-}
-
-void Mips64Assembler::Asub_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x4, 0x2, wt, ws, wd, 0x11);
-}
-
-void Mips64Assembler::Asub_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x4, 0x3, wt, ws, wd, 0x11);
-}
-
-void Mips64Assembler::Asub_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x5, 0x0, wt, ws, wd, 0x11);
-}
-
-void Mips64Assembler::Asub_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x5, 0x1, wt, ws, wd, 0x11);
-}
-
-void Mips64Assembler::Asub_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x5, 0x2, wt, ws, wd, 0x11);
-}
-
-void Mips64Assembler::Asub_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x11);
-}
-
-void Mips64Assembler::MulvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::MulvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x0, 0x1, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::MulvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x0, 0x2, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::MulvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x0, 0x3, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::Div_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x4, 0x0, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::Div_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x4, 0x1, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::Div_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x4, 0x2, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::Div_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x4, 0x3, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::Div_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x5, 0x0, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::Div_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x5, 0x1, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::Div_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x5, 0x2, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::Div_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::Mod_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x6, 0x0, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::Mod_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x6, 0x1, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::Mod_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x6, 0x2, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::Mod_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x6, 0x3, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::Mod_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x7, 0x0, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::Mod_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x7, 0x1, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::Mod_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x7, 0x2, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::Mod_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x7, 0x3, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::Add_aB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x10);
-}
-
-void Mips64Assembler::Add_aH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x0, 0x1, wt, ws, wd, 0x10);
-}
-
-void Mips64Assembler::Add_aW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x0, 0x2, wt, ws, wd, 0x10);
-}
-
-void Mips64Assembler::Add_aD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x0, 0x3, wt, ws, wd, 0x10);
-}
-
-void Mips64Assembler::Ave_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x4, 0x0, wt, ws, wd, 0x10);
-}
-
-void Mips64Assembler::Ave_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x4, 0x1, wt, ws, wd, 0x10);
-}
-
-void Mips64Assembler::Ave_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x4, 0x2, wt, ws, wd, 0x10);
-}
-
-void Mips64Assembler::Ave_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x4, 0x3, wt, ws, wd, 0x10);
-}
-
-void Mips64Assembler::Ave_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x5, 0x0, wt, ws, wd, 0x10);
-}
-
-void Mips64Assembler::Ave_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x5, 0x1, wt, ws, wd, 0x10);
-}
-
-void Mips64Assembler::Ave_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x5, 0x2, wt, ws, wd, 0x10);
-}
-
-void Mips64Assembler::Ave_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x10);
-}
-
-void Mips64Assembler::Aver_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x6, 0x0, wt, ws, wd, 0x10);
-}
-
-void Mips64Assembler::Aver_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x6, 0x1, wt, ws, wd, 0x10);
-}
-
-void Mips64Assembler::Aver_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x6, 0x2, wt, ws, wd, 0x10);
-}
-
-void Mips64Assembler::Aver_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x6, 0x3, wt, ws, wd, 0x10);
-}
-
-void Mips64Assembler::Aver_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x7, 0x0, wt, ws, wd, 0x10);
-}
-
-void Mips64Assembler::Aver_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x7, 0x1, wt, ws, wd, 0x10);
-}
-
-void Mips64Assembler::Aver_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x7, 0x2, wt, ws, wd, 0x10);
-}
-
-void Mips64Assembler::Aver_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x7, 0x3, wt, ws, wd, 0x10);
-}
-
-void Mips64Assembler::Max_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x2, 0x0, wt, ws, wd, 0xe);
-}
-
-void Mips64Assembler::Max_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x2, 0x1, wt, ws, wd, 0xe);
-}
-
-void Mips64Assembler::Max_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x2, 0x2, wt, ws, wd, 0xe);
-}
-
-void Mips64Assembler::Max_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x2, 0x3, wt, ws, wd, 0xe);
-}
-
-void Mips64Assembler::Max_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x3, 0x0, wt, ws, wd, 0xe);
-}
-
-void Mips64Assembler::Max_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x3, 0x1, wt, ws, wd, 0xe);
-}
-
-void Mips64Assembler::Max_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x3, 0x2, wt, ws, wd, 0xe);
-}
-
-void Mips64Assembler::Max_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x3, 0x3, wt, ws, wd, 0xe);
-}
-
-void Mips64Assembler::Min_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x4, 0x0, wt, ws, wd, 0xe);
-}
-
-void Mips64Assembler::Min_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x4, 0x1, wt, ws, wd, 0xe);
-}
-
-void Mips64Assembler::Min_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x4, 0x2, wt, ws, wd, 0xe);
-}
-
-void Mips64Assembler::Min_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x4, 0x3, wt, ws, wd, 0xe);
-}
-
-void Mips64Assembler::Min_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x5, 0x0, wt, ws, wd, 0xe);
-}
-
-void Mips64Assembler::Min_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x5, 0x1, wt, ws, wd, 0xe);
-}
-
-void Mips64Assembler::Min_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x5, 0x2, wt, ws, wd, 0xe);
-}
-
-void Mips64Assembler::Min_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x5, 0x3, wt, ws, wd, 0xe);
-}
-
-void Mips64Assembler::FaddW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x1b);
-}
-
-void Mips64Assembler::FaddD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x0, 0x1, wt, ws, wd, 0x1b);
-}
-
-void Mips64Assembler::FsubW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x0, 0x2, wt, ws, wd, 0x1b);
-}
-
-void Mips64Assembler::FsubD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x0, 0x3, wt, ws, wd, 0x1b);
-}
-
-void Mips64Assembler::FmulW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x1, 0x0, wt, ws, wd, 0x1b);
-}
-
-void Mips64Assembler::FmulD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x1, 0x1, wt, ws, wd, 0x1b);
-}
-
-void Mips64Assembler::FdivW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x1, 0x2, wt, ws, wd, 0x1b);
-}
-
-void Mips64Assembler::FdivD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x1, 0x3, wt, ws, wd, 0x1b);
-}
-
-void Mips64Assembler::FmaxW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x7, 0x0, wt, ws, wd, 0x1b);
-}
-
-void Mips64Assembler::FmaxD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x7, 0x1, wt, ws, wd, 0x1b);
-}
-
-void Mips64Assembler::FminW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x6, 0x0, wt, ws, wd, 0x1b);
-}
-
-void Mips64Assembler::FminD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x6, 0x1, wt, ws, wd, 0x1b);
-}
-
-void Mips64Assembler::Ffint_sW(VectorRegister wd, VectorRegister ws) {
-  CHECK(HasMsa());
-  EmitMsa2RF(0x19e, 0x0, ws, wd, 0x1e);
-}
-
-void Mips64Assembler::Ffint_sD(VectorRegister wd, VectorRegister ws) {
-  CHECK(HasMsa());
-  EmitMsa2RF(0x19e, 0x1, ws, wd, 0x1e);
-}
-
-void Mips64Assembler::Ftint_sW(VectorRegister wd, VectorRegister ws) {
-  CHECK(HasMsa());
-  EmitMsa2RF(0x19c, 0x0, ws, wd, 0x1e);
-}
-
-void Mips64Assembler::Ftint_sD(VectorRegister wd, VectorRegister ws) {
-  CHECK(HasMsa());
-  EmitMsa2RF(0x19c, 0x1, ws, wd, 0x1e);
-}
-
-void Mips64Assembler::SllB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x0, 0x0, wt, ws, wd, 0xd);
-}
-
-void Mips64Assembler::SllH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x0, 0x1, wt, ws, wd, 0xd);
-}
-
-void Mips64Assembler::SllW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x0, 0x2, wt, ws, wd, 0xd);
-}
-
-void Mips64Assembler::SllD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x0, 0x3, wt, ws, wd, 0xd);
-}
-
-void Mips64Assembler::SraB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x1, 0x0, wt, ws, wd, 0xd);
-}
-
-void Mips64Assembler::SraH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x1, 0x1, wt, ws, wd, 0xd);
-}
-
-void Mips64Assembler::SraW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x1, 0x2, wt, ws, wd, 0xd);
-}
-
-void Mips64Assembler::SraD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x1, 0x3, wt, ws, wd, 0xd);
-}
-
-void Mips64Assembler::SrlB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x2, 0x0, wt, ws, wd, 0xd);
-}
-
-void Mips64Assembler::SrlH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x2, 0x1, wt, ws, wd, 0xd);
-}
-
-void Mips64Assembler::SrlW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x2, 0x2, wt, ws, wd, 0xd);
-}
-
-void Mips64Assembler::SrlD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x2, 0x3, wt, ws, wd, 0xd);
-}
-
-void Mips64Assembler::SlliB(VectorRegister wd, VectorRegister ws, int shamt3) {
-  CHECK(HasMsa());
-  CHECK(IsUint<3>(shamt3)) << shamt3;
-  EmitMsaBIT(0x0, shamt3 | kMsaDfMByteMask, ws, wd, 0x9);
-}
-
-void Mips64Assembler::SlliH(VectorRegister wd, VectorRegister ws, int shamt4) {
-  CHECK(HasMsa());
-  CHECK(IsUint<4>(shamt4)) << shamt4;
-  EmitMsaBIT(0x0, shamt4 | kMsaDfMHalfwordMask, ws, wd, 0x9);
-}
-
-void Mips64Assembler::SlliW(VectorRegister wd, VectorRegister ws, int shamt5) {
-  CHECK(HasMsa());
-  CHECK(IsUint<5>(shamt5)) << shamt5;
-  EmitMsaBIT(0x0, shamt5 | kMsaDfMWordMask, ws, wd, 0x9);
-}
-
-void Mips64Assembler::SlliD(VectorRegister wd, VectorRegister ws, int shamt6) {
-  CHECK(HasMsa());
-  CHECK(IsUint<6>(shamt6)) << shamt6;
-  EmitMsaBIT(0x0, shamt6 | kMsaDfMDoublewordMask, ws, wd, 0x9);
-}
-
-void Mips64Assembler::SraiB(VectorRegister wd, VectorRegister ws, int shamt3) {
-  CHECK(HasMsa());
-  CHECK(IsUint<3>(shamt3)) << shamt3;
-  EmitMsaBIT(0x1, shamt3 | kMsaDfMByteMask, ws, wd, 0x9);
-}
-
-void Mips64Assembler::SraiH(VectorRegister wd, VectorRegister ws, int shamt4) {
-  CHECK(HasMsa());
-  CHECK(IsUint<4>(shamt4)) << shamt4;
-  EmitMsaBIT(0x1, shamt4 | kMsaDfMHalfwordMask, ws, wd, 0x9);
-}
-
-void Mips64Assembler::SraiW(VectorRegister wd, VectorRegister ws, int shamt5) {
-  CHECK(HasMsa());
-  CHECK(IsUint<5>(shamt5)) << shamt5;
-  EmitMsaBIT(0x1, shamt5 | kMsaDfMWordMask, ws, wd, 0x9);
-}
-
-void Mips64Assembler::SraiD(VectorRegister wd, VectorRegister ws, int shamt6) {
-  CHECK(HasMsa());
-  CHECK(IsUint<6>(shamt6)) << shamt6;
-  EmitMsaBIT(0x1, shamt6 | kMsaDfMDoublewordMask, ws, wd, 0x9);
-}
-
-void Mips64Assembler::SrliB(VectorRegister wd, VectorRegister ws, int shamt3) {
-  CHECK(HasMsa());
-  CHECK(IsUint<3>(shamt3)) << shamt3;
-  EmitMsaBIT(0x2, shamt3 | kMsaDfMByteMask, ws, wd, 0x9);
-}
-
-void Mips64Assembler::SrliH(VectorRegister wd, VectorRegister ws, int shamt4) {
-  CHECK(HasMsa());
-  CHECK(IsUint<4>(shamt4)) << shamt4;
-  EmitMsaBIT(0x2, shamt4 | kMsaDfMHalfwordMask, ws, wd, 0x9);
-}
-
-void Mips64Assembler::SrliW(VectorRegister wd, VectorRegister ws, int shamt5) {
-  CHECK(HasMsa());
-  CHECK(IsUint<5>(shamt5)) << shamt5;
-  EmitMsaBIT(0x2, shamt5 | kMsaDfMWordMask, ws, wd, 0x9);
-}
-
-void Mips64Assembler::SrliD(VectorRegister wd, VectorRegister ws, int shamt6) {
-  CHECK(HasMsa());
-  CHECK(IsUint<6>(shamt6)) << shamt6;
-  EmitMsaBIT(0x2, shamt6 | kMsaDfMDoublewordMask, ws, wd, 0x9);
-}
-
-void Mips64Assembler::MoveV(VectorRegister wd, VectorRegister ws) {
-  CHECK(HasMsa());
-  EmitMsaBIT(0x1, 0x3e, ws, wd, 0x19);
-}
-
-void Mips64Assembler::SplatiB(VectorRegister wd, VectorRegister ws, int n4) {
-  CHECK(HasMsa());
-  CHECK(IsUint<4>(n4)) << n4;
-  EmitMsaELM(0x1, n4 | kMsaDfNByteMask, ws, wd, 0x19);
-}
-
-void Mips64Assembler::SplatiH(VectorRegister wd, VectorRegister ws, int n3) {
-  CHECK(HasMsa());
-  CHECK(IsUint<3>(n3)) << n3;
-  EmitMsaELM(0x1, n3 | kMsaDfNHalfwordMask, ws, wd, 0x19);
-}
-
-void Mips64Assembler::SplatiW(VectorRegister wd, VectorRegister ws, int n2) {
-  CHECK(HasMsa());
-  CHECK(IsUint<2>(n2)) << n2;
-  EmitMsaELM(0x1, n2 | kMsaDfNWordMask, ws, wd, 0x19);
-}
-
-void Mips64Assembler::SplatiD(VectorRegister wd, VectorRegister ws, int n1) {
-  CHECK(HasMsa());
-  CHECK(IsUint<1>(n1)) << n1;
-  EmitMsaELM(0x1, n1 | kMsaDfNDoublewordMask, ws, wd, 0x19);
-}
-
-void Mips64Assembler::Copy_sB(GpuRegister rd, VectorRegister ws, int n4) {
-  CHECK(HasMsa());
-  CHECK(IsUint<4>(n4)) << n4;
-  EmitMsaELM(0x2, n4 | kMsaDfNByteMask, ws, static_cast<VectorRegister>(rd), 0x19);
-}
-
-void Mips64Assembler::Copy_sH(GpuRegister rd, VectorRegister ws, int n3) {
-  CHECK(HasMsa());
-  CHECK(IsUint<3>(n3)) << n3;
-  EmitMsaELM(0x2, n3 | kMsaDfNHalfwordMask, ws, static_cast<VectorRegister>(rd), 0x19);
-}
-
-void Mips64Assembler::Copy_sW(GpuRegister rd, VectorRegister ws, int n2) {
-  CHECK(HasMsa());
-  CHECK(IsUint<2>(n2)) << n2;
-  EmitMsaELM(0x2, n2 | kMsaDfNWordMask, ws, static_cast<VectorRegister>(rd), 0x19);
-}
-
-void Mips64Assembler::Copy_sD(GpuRegister rd, VectorRegister ws, int n1) {
-  CHECK(HasMsa());
-  CHECK(IsUint<1>(n1)) << n1;
-  EmitMsaELM(0x2, n1 | kMsaDfNDoublewordMask, ws, static_cast<VectorRegister>(rd), 0x19);
-}
-
-void Mips64Assembler::Copy_uB(GpuRegister rd, VectorRegister ws, int n4) {
-  CHECK(HasMsa());
-  CHECK(IsUint<4>(n4)) << n4;
-  EmitMsaELM(0x3, n4 | kMsaDfNByteMask, ws, static_cast<VectorRegister>(rd), 0x19);
-}
-
-void Mips64Assembler::Copy_uH(GpuRegister rd, VectorRegister ws, int n3) {
-  CHECK(HasMsa());
-  CHECK(IsUint<3>(n3)) << n3;
-  EmitMsaELM(0x3, n3 | kMsaDfNHalfwordMask, ws, static_cast<VectorRegister>(rd), 0x19);
-}
-
-void Mips64Assembler::Copy_uW(GpuRegister rd, VectorRegister ws, int n2) {
-  CHECK(HasMsa());
-  CHECK(IsUint<2>(n2)) << n2;
-  EmitMsaELM(0x3, n2 | kMsaDfNWordMask, ws, static_cast<VectorRegister>(rd), 0x19);
-}
-
-void Mips64Assembler::InsertB(VectorRegister wd, GpuRegister rs, int n4) {
-  CHECK(HasMsa());
-  CHECK(IsUint<4>(n4)) << n4;
-  EmitMsaELM(0x4, n4 | kMsaDfNByteMask, static_cast<VectorRegister>(rs), wd, 0x19);
-}
-
-void Mips64Assembler::InsertH(VectorRegister wd, GpuRegister rs, int n3) {
-  CHECK(HasMsa());
-  CHECK(IsUint<3>(n3)) << n3;
-  EmitMsaELM(0x4, n3 | kMsaDfNHalfwordMask, static_cast<VectorRegister>(rs), wd, 0x19);
-}
-
-void Mips64Assembler::InsertW(VectorRegister wd, GpuRegister rs, int n2) {
-  CHECK(HasMsa());
-  CHECK(IsUint<2>(n2)) << n2;
-  EmitMsaELM(0x4, n2 | kMsaDfNWordMask, static_cast<VectorRegister>(rs), wd, 0x19);
-}
-
-void Mips64Assembler::InsertD(VectorRegister wd, GpuRegister rs, int n1) {
-  CHECK(HasMsa());
-  CHECK(IsUint<1>(n1)) << n1;
-  EmitMsaELM(0x4, n1 | kMsaDfNDoublewordMask, static_cast<VectorRegister>(rs), wd, 0x19);
-}
-
-void Mips64Assembler::FillB(VectorRegister wd, GpuRegister rs) {
-  CHECK(HasMsa());
-  EmitMsa2R(0xc0, 0x0, static_cast<VectorRegister>(rs), wd, 0x1e);
-}
-
-void Mips64Assembler::FillH(VectorRegister wd, GpuRegister rs) {
-  CHECK(HasMsa());
-  EmitMsa2R(0xc0, 0x1, static_cast<VectorRegister>(rs), wd, 0x1e);
-}
-
-void Mips64Assembler::FillW(VectorRegister wd, GpuRegister rs) {
-  CHECK(HasMsa());
-  EmitMsa2R(0xc0, 0x2, static_cast<VectorRegister>(rs), wd, 0x1e);
-}
-
-void Mips64Assembler::FillD(VectorRegister wd, GpuRegister rs) {
-  CHECK(HasMsa());
-  EmitMsa2R(0xc0, 0x3, static_cast<VectorRegister>(rs), wd, 0x1e);
-}
-
-void Mips64Assembler::LdiB(VectorRegister wd, int imm8) {
-  CHECK(HasMsa());
-  CHECK(IsInt<8>(imm8)) << imm8;
-  EmitMsaI10(0x6, 0x0, imm8 & kMsaS10Mask, wd, 0x7);
-}
-
-void Mips64Assembler::LdiH(VectorRegister wd, int imm10) {
-  CHECK(HasMsa());
-  CHECK(IsInt<10>(imm10)) << imm10;
-  EmitMsaI10(0x6, 0x1, imm10 & kMsaS10Mask, wd, 0x7);
-}
-
-void Mips64Assembler::LdiW(VectorRegister wd, int imm10) {
-  CHECK(HasMsa());
-  CHECK(IsInt<10>(imm10)) << imm10;
-  EmitMsaI10(0x6, 0x2, imm10 & kMsaS10Mask, wd, 0x7);
-}
-
-void Mips64Assembler::LdiD(VectorRegister wd, int imm10) {
-  CHECK(HasMsa());
-  CHECK(IsInt<10>(imm10)) << imm10;
-  EmitMsaI10(0x6, 0x3, imm10 & kMsaS10Mask, wd, 0x7);
-}
-
-void Mips64Assembler::LdB(VectorRegister wd, GpuRegister rs, int offset) {
-  CHECK(HasMsa());
-  CHECK(IsInt<10>(offset)) << offset;
-  EmitMsaMI10(offset & kMsaS10Mask, rs, wd, 0x8, 0x0);
-}
-
-void Mips64Assembler::LdH(VectorRegister wd, GpuRegister rs, int offset) {
-  CHECK(HasMsa());
-  CHECK(IsInt<11>(offset)) << offset;
-  CHECK_ALIGNED(offset, kMips64HalfwordSize);
-  EmitMsaMI10((offset >> TIMES_2) & kMsaS10Mask, rs, wd, 0x8, 0x1);
-}
-
-void Mips64Assembler::LdW(VectorRegister wd, GpuRegister rs, int offset) {
-  CHECK(HasMsa());
-  CHECK(IsInt<12>(offset)) << offset;
-  CHECK_ALIGNED(offset, kMips64WordSize);
-  EmitMsaMI10((offset >> TIMES_4) & kMsaS10Mask, rs, wd, 0x8, 0x2);
-}
-
-void Mips64Assembler::LdD(VectorRegister wd, GpuRegister rs, int offset) {
-  CHECK(HasMsa());
-  CHECK(IsInt<13>(offset)) << offset;
-  CHECK_ALIGNED(offset, kMips64DoublewordSize);
-  EmitMsaMI10((offset >> TIMES_8) & kMsaS10Mask, rs, wd, 0x8, 0x3);
-}
-
-void Mips64Assembler::StB(VectorRegister wd, GpuRegister rs, int offset) {
-  CHECK(HasMsa());
-  CHECK(IsInt<10>(offset)) << offset;
-  EmitMsaMI10(offset & kMsaS10Mask, rs, wd, 0x9, 0x0);
-}
-
-void Mips64Assembler::StH(VectorRegister wd, GpuRegister rs, int offset) {
-  CHECK(HasMsa());
-  CHECK(IsInt<11>(offset)) << offset;
-  CHECK_ALIGNED(offset, kMips64HalfwordSize);
-  EmitMsaMI10((offset >> TIMES_2) & kMsaS10Mask, rs, wd, 0x9, 0x1);
-}
-
-void Mips64Assembler::StW(VectorRegister wd, GpuRegister rs, int offset) {
-  CHECK(HasMsa());
-  CHECK(IsInt<12>(offset)) << offset;
-  CHECK_ALIGNED(offset, kMips64WordSize);
-  EmitMsaMI10((offset >> TIMES_4) & kMsaS10Mask, rs, wd, 0x9, 0x2);
-}
-
-void Mips64Assembler::StD(VectorRegister wd, GpuRegister rs, int offset) {
-  CHECK(HasMsa());
-  CHECK(IsInt<13>(offset)) << offset;
-  CHECK_ALIGNED(offset, kMips64DoublewordSize);
-  EmitMsaMI10((offset >> TIMES_8) & kMsaS10Mask, rs, wd, 0x9, 0x3);
-}
-
-void Mips64Assembler::IlvlB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x4, 0x0, wt, ws, wd, 0x14);
-}
-
-void Mips64Assembler::IlvlH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x4, 0x1, wt, ws, wd, 0x14);
-}
-
-void Mips64Assembler::IlvlW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x4, 0x2, wt, ws, wd, 0x14);
-}
-
-void Mips64Assembler::IlvlD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x4, 0x3, wt, ws, wd, 0x14);
-}
-
-void Mips64Assembler::IlvrB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x5, 0x0, wt, ws, wd, 0x14);
-}
-
-void Mips64Assembler::IlvrH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x5, 0x1, wt, ws, wd, 0x14);
-}
-
-void Mips64Assembler::IlvrW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x5, 0x2, wt, ws, wd, 0x14);
-}
-
-void Mips64Assembler::IlvrD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x14);
-}
-
-void Mips64Assembler::IlvevB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x6, 0x0, wt, ws, wd, 0x14);
-}
-
-void Mips64Assembler::IlvevH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x6, 0x1, wt, ws, wd, 0x14);
-}
-
-void Mips64Assembler::IlvevW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x6, 0x2, wt, ws, wd, 0x14);
-}
-
-void Mips64Assembler::IlvevD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x6, 0x3, wt, ws, wd, 0x14);
-}
-
-void Mips64Assembler::IlvodB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x7, 0x0, wt, ws, wd, 0x14);
-}
-
-void Mips64Assembler::IlvodH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x7, 0x1, wt, ws, wd, 0x14);
-}
-
-void Mips64Assembler::IlvodW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x7, 0x2, wt, ws, wd, 0x14);
-}
-
-void Mips64Assembler::IlvodD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x7, 0x3, wt, ws, wd, 0x14);
-}
-
-void Mips64Assembler::MaddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x1, 0x0, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::MaddvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x1, 0x1, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::MaddvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x1, 0x2, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::MaddvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x1, 0x3, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::MsubvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x2, 0x0, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::MsubvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x2, 0x1, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::MsubvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x2, 0x2, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::MsubvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x2, 0x3, wt, ws, wd, 0x12);
-}
-
-void Mips64Assembler::FmaddW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x2, 0x0, wt, ws, wd, 0x1b);
-}
-
-void Mips64Assembler::FmaddD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x2, 0x1, wt, ws, wd, 0x1b);
-}
-
-void Mips64Assembler::FmsubW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x2, 0x2, wt, ws, wd, 0x1b);
-}
-
-void Mips64Assembler::FmsubD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x2, 0x3, wt, ws, wd, 0x1b);
-}
-
-void Mips64Assembler::Hadd_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x4, 0x1, wt, ws, wd, 0x15);
-}
-
-void Mips64Assembler::Hadd_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x4, 0x2, wt, ws, wd, 0x15);
-}
-
-void Mips64Assembler::Hadd_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x4, 0x3, wt, ws, wd, 0x15);
-}
-
-void Mips64Assembler::Hadd_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x5, 0x1, wt, ws, wd, 0x15);
-}
-
-void Mips64Assembler::Hadd_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x5, 0x2, wt, ws, wd, 0x15);
-}
-
-void Mips64Assembler::Hadd_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) {
-  CHECK(HasMsa());
-  EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x15);
-}
-
-void Mips64Assembler::PcntB(VectorRegister wd, VectorRegister ws) {
-  CHECK(HasMsa());
-  EmitMsa2R(0xc1, 0x0, ws, wd, 0x1e);
-}
-
-void Mips64Assembler::PcntH(VectorRegister wd, VectorRegister ws) {
-  CHECK(HasMsa());
-  EmitMsa2R(0xc1, 0x1, ws, wd, 0x1e);
-}
-
-void Mips64Assembler::PcntW(VectorRegister wd, VectorRegister ws) {
-  CHECK(HasMsa());
-  EmitMsa2R(0xc1, 0x2, ws, wd, 0x1e);
-}
-
-void Mips64Assembler::PcntD(VectorRegister wd, VectorRegister ws) {
-  CHECK(HasMsa());
-  EmitMsa2R(0xc1, 0x3, ws, wd, 0x1e);
-}
-
-void Mips64Assembler::ReplicateFPToVectorRegister(VectorRegister dst,
-                                                  FpuRegister src,
-                                                  bool is_double) {
-  // Float or double in FPU register Fx can be considered as 0th element in vector register Wx.
-  if (is_double) {
-    SplatiD(dst, static_cast<VectorRegister>(src), 0);
-  } else {
-    SplatiW(dst, static_cast<VectorRegister>(src), 0);
-  }
-}
-
-void Mips64Assembler::LoadConst32(GpuRegister rd, int32_t value) {
-  TemplateLoadConst32(this, rd, value);
-}
-
-// This function is only used for testing purposes.
-void Mips64Assembler::RecordLoadConst64Path(int value ATTRIBUTE_UNUSED) {
-}
-
-void Mips64Assembler::LoadConst64(GpuRegister rd, int64_t value) {
-  TemplateLoadConst64(this, rd, value);
-}
-
-void Mips64Assembler::Addiu32(GpuRegister rt, GpuRegister rs, int32_t value) {
-  if (IsInt<16>(value)) {
-    Addiu(rt, rs, value);
-  } else {
-    int16_t high = High16Bits(value);
-    int16_t low = Low16Bits(value);
-    high += (low < 0) ? 1 : 0;  // Account for sign extension in addiu.
-    Aui(rt, rs, high);
-    if (low != 0) {
-      Addiu(rt, rt, low);
-    }
-  }
-}
-
-// TODO: don't use rtmp, use daui, dahi, dati.
-void Mips64Assembler::Daddiu64(GpuRegister rt, GpuRegister rs, int64_t value, GpuRegister rtmp) {
-  CHECK_NE(rs, rtmp);
-  if (IsInt<16>(value)) {
-    Daddiu(rt, rs, value);
-  } else {
-    LoadConst64(rtmp, value);
-    Daddu(rt, rs, rtmp);
-  }
-}
-
-void Mips64Assembler::Branch::InitShortOrLong(Mips64Assembler::Branch::OffsetBits offset_size,
-                                              Mips64Assembler::Branch::Type short_type,
-                                              Mips64Assembler::Branch::Type long_type) {
-  type_ = (offset_size <= branch_info_[short_type].offset_size) ? short_type : long_type;
-}
-
-void Mips64Assembler::Branch::InitializeType(Type initial_type, bool is_r6) {
-  OffsetBits offset_size_needed = GetOffsetSizeNeeded(location_, target_);
-  if (is_r6) {
-    // R6
-    switch (initial_type) {
-      case kLabel:
-      case kLiteral:
-      case kLiteralUnsigned:
-      case kLiteralLong:
-        CHECK(!IsResolved());
-        type_ = initial_type;
-        break;
-      case kCall:
-        InitShortOrLong(offset_size_needed, kCall, kLongCall);
-        break;
-      case kCondBranch:
-        switch (condition_) {
-          case kUncond:
-            InitShortOrLong(offset_size_needed, kUncondBranch, kLongUncondBranch);
-            break;
-          case kCondEQZ:
-          case kCondNEZ:
-            // Special case for beqzc/bnezc with longer offset than in other b<cond>c instructions.
-            type_ = (offset_size_needed <= kOffset23) ? kCondBranch : kLongCondBranch;
-            break;
-          default:
-            InitShortOrLong(offset_size_needed, kCondBranch, kLongCondBranch);
-            break;
-        }
-        break;
-      case kBareCall:
-        type_ = kBareCall;
-        CHECK_LE(offset_size_needed, GetOffsetSize());
-        break;
-      case kBareCondBranch:
-        type_ = (condition_ == kUncond) ? kBareUncondBranch : kBareCondBranch;
-        CHECK_LE(offset_size_needed, GetOffsetSize());
-        break;
-      default:
-        LOG(FATAL) << "Unexpected branch type " << initial_type;
-        UNREACHABLE();
-    }
-  } else {
-    // R2
-    CHECK_EQ(initial_type, kBareCondBranch);
-    switch (condition_) {
-      case kCondLTZ:
-      case kCondGEZ:
-      case kCondLEZ:
-      case kCondGTZ:
-      case kCondEQ:
-      case kCondNE:
-      case kCondEQZ:
-      case kCondNEZ:
-        break;
-      default:
-        LOG(FATAL) << "Unexpected R2 branch condition " << condition_;
-        UNREACHABLE();
-    }
-    type_ = kR2BareCondBranch;
-    CHECK_LE(offset_size_needed, GetOffsetSize());
-  }
-  old_type_ = type_;
-}
-
-bool Mips64Assembler::Branch::IsNop(BranchCondition condition, GpuRegister lhs, GpuRegister rhs) {
-  switch (condition) {
-    case kCondLT:
-    case kCondGT:
-    case kCondNE:
-    case kCondLTU:
-      return lhs == rhs;
-    default:
-      return false;
-  }
-}
-
-bool Mips64Assembler::Branch::IsUncond(BranchCondition condition,
-                                       GpuRegister lhs,
-                                       GpuRegister rhs) {
-  switch (condition) {
-    case kUncond:
-      return true;
-    case kCondGE:
-    case kCondLE:
-    case kCondEQ:
-    case kCondGEU:
-      return lhs == rhs;
-    default:
-      return false;
-  }
-}
-
-Mips64Assembler::Branch::Branch(uint32_t location, uint32_t target, bool is_call, bool is_bare)
-    : old_location_(location),
-      location_(location),
-      target_(target),
-      lhs_reg_(ZERO),
-      rhs_reg_(ZERO),
-      condition_(kUncond) {
-  InitializeType(
-      (is_call ? (is_bare ? kBareCall : kCall) : (is_bare ? kBareCondBranch : kCondBranch)),
-      /* is_r6= */ true);
-}
-
-Mips64Assembler::Branch::Branch(bool is_r6,
-                                uint32_t location,
-                                uint32_t target,
-                                Mips64Assembler::BranchCondition condition,
-                                GpuRegister lhs_reg,
-                                GpuRegister rhs_reg,
-                                bool is_bare)
-    : old_location_(location),
-      location_(location),
-      target_(target),
-      lhs_reg_(lhs_reg),
-      rhs_reg_(rhs_reg),
-      condition_(condition) {
-  CHECK_NE(condition, kUncond);
-  switch (condition) {
-    case kCondEQ:
-    case kCondNE:
-    case kCondLT:
-    case kCondGE:
-    case kCondLE:
-    case kCondGT:
-    case kCondLTU:
-    case kCondGEU:
-      CHECK_NE(lhs_reg, ZERO);
-      CHECK_NE(rhs_reg, ZERO);
-      break;
-    case kCondLTZ:
-    case kCondGEZ:
-    case kCondLEZ:
-    case kCondGTZ:
-    case kCondEQZ:
-    case kCondNEZ:
-      CHECK_NE(lhs_reg, ZERO);
-      CHECK_EQ(rhs_reg, ZERO);
-      break;
-    case kCondF:
-    case kCondT:
-      CHECK_EQ(rhs_reg, ZERO);
-      break;
-    case kUncond:
-      UNREACHABLE();
-  }
-  CHECK(!IsNop(condition, lhs_reg, rhs_reg));
-  if (IsUncond(condition, lhs_reg, rhs_reg)) {
-    // Branch condition is always true, make the branch unconditional.
-    condition_ = kUncond;
-  }
-  InitializeType((is_bare ? kBareCondBranch : kCondBranch), is_r6);
-}
-
-Mips64Assembler::Branch::Branch(uint32_t location, GpuRegister dest_reg, Type label_or_literal_type)
-    : old_location_(location),
-      location_(location),
-      target_(kUnresolved),
-      lhs_reg_(dest_reg),
-      rhs_reg_(ZERO),
-      condition_(kUncond) {
-  CHECK_NE(dest_reg, ZERO);
-  InitializeType(label_or_literal_type, /* is_r6= */ true);
-}
-
-Mips64Assembler::BranchCondition Mips64Assembler::Branch::OppositeCondition(
-    Mips64Assembler::BranchCondition cond) {
-  switch (cond) {
-    case kCondLT:
-      return kCondGE;
-    case kCondGE:
-      return kCondLT;
-    case kCondLE:
-      return kCondGT;
-    case kCondGT:
-      return kCondLE;
-    case kCondLTZ:
-      return kCondGEZ;
-    case kCondGEZ:
-      return kCondLTZ;
-    case kCondLEZ:
-      return kCondGTZ;
-    case kCondGTZ:
-      return kCondLEZ;
-    case kCondEQ:
-      return kCondNE;
-    case kCondNE:
-      return kCondEQ;
-    case kCondEQZ:
-      return kCondNEZ;
-    case kCondNEZ:
-      return kCondEQZ;
-    case kCondLTU:
-      return kCondGEU;
-    case kCondGEU:
-      return kCondLTU;
-    case kCondF:
-      return kCondT;
-    case kCondT:
-      return kCondF;
-    case kUncond:
-      LOG(FATAL) << "Unexpected branch condition " << cond;
-  }
-  UNREACHABLE();
-}
-
-Mips64Assembler::Branch::Type Mips64Assembler::Branch::GetType() const {
-  return type_;
-}
-
-Mips64Assembler::BranchCondition Mips64Assembler::Branch::GetCondition() const {
-  return condition_;
-}
-
-GpuRegister Mips64Assembler::Branch::GetLeftRegister() const {
-  return lhs_reg_;
-}
-
-GpuRegister Mips64Assembler::Branch::GetRightRegister() const {
-  return rhs_reg_;
-}
-
-uint32_t Mips64Assembler::Branch::GetTarget() const {
-  return target_;
-}
-
-uint32_t Mips64Assembler::Branch::GetLocation() const {
-  return location_;
-}
-
-uint32_t Mips64Assembler::Branch::GetOldLocation() const {
-  return old_location_;
-}
-
-uint32_t Mips64Assembler::Branch::GetLength() const {
-  return branch_info_[type_].length;
-}
-
-uint32_t Mips64Assembler::Branch::GetOldLength() const {
-  return branch_info_[old_type_].length;
-}
-
-uint32_t Mips64Assembler::Branch::GetSize() const {
-  return GetLength() * sizeof(uint32_t);
-}
-
-uint32_t Mips64Assembler::Branch::GetOldSize() const {
-  return GetOldLength() * sizeof(uint32_t);
-}
-
-uint32_t Mips64Assembler::Branch::GetEndLocation() const {
-  return GetLocation() + GetSize();
-}
-
-uint32_t Mips64Assembler::Branch::GetOldEndLocation() const {
-  return GetOldLocation() + GetOldSize();
-}
-
-bool Mips64Assembler::Branch::IsBare() const {
-  switch (type_) {
-    // R6 short branches (can't be promoted to long), forbidden/delay slots filled manually.
-    case kBareUncondBranch:
-    case kBareCondBranch:
-    case kBareCall:
-    // R2 short branches (can't be promoted to long), delay slots filled manually.
-    case kR2BareCondBranch:
-      return true;
-    default:
-      return false;
-  }
-}
-
-bool Mips64Assembler::Branch::IsLong() const {
-  switch (type_) {
-    // R6 short branches (can be promoted to long).
-    case kUncondBranch:
-    case kCondBranch:
-    case kCall:
-    // R6 short branches (can't be promoted to long), forbidden/delay slots filled manually.
-    case kBareUncondBranch:
-    case kBareCondBranch:
-    case kBareCall:
-    // R2 short branches (can't be promoted to long), delay slots filled manually.
-    case kR2BareCondBranch:
-    // Near label.
-    case kLabel:
-    // Near literals.
-    case kLiteral:
-    case kLiteralUnsigned:
-    case kLiteralLong:
-      return false;
-    // Long branches.
-    case kLongUncondBranch:
-    case kLongCondBranch:
-    case kLongCall:
-    // Far label.
-    case kFarLabel:
-    // Far literals.
-    case kFarLiteral:
-    case kFarLiteralUnsigned:
-    case kFarLiteralLong:
-      return true;
-  }
-  UNREACHABLE();
-}
-
-bool Mips64Assembler::Branch::IsResolved() const {
-  return target_ != kUnresolved;
-}
-
-Mips64Assembler::Branch::OffsetBits Mips64Assembler::Branch::GetOffsetSize() const {
-  bool r6_cond_branch = (type_ == kCondBranch || type_ == kBareCondBranch);
-  OffsetBits offset_size =
-      (r6_cond_branch && (condition_ == kCondEQZ || condition_ == kCondNEZ))
-          ? kOffset23
-          : branch_info_[type_].offset_size;
-  return offset_size;
-}
-
-Mips64Assembler::Branch::OffsetBits Mips64Assembler::Branch::GetOffsetSizeNeeded(uint32_t location,
-                                                                                 uint32_t target) {
-  // For unresolved targets assume the shortest encoding
-  // (later it will be made longer if needed).
-  if (target == kUnresolved)
-    return kOffset16;
-  int64_t distance = static_cast<int64_t>(target) - location;
-  // To simplify calculations in composite branches consisting of multiple instructions
-  // bump up the distance by a value larger than the max byte size of a composite branch.
-  distance += (distance >= 0) ? kMaxBranchSize : -kMaxBranchSize;
-  if (IsInt<kOffset16>(distance))
-    return kOffset16;
-  else if (IsInt<kOffset18>(distance))
-    return kOffset18;
-  else if (IsInt<kOffset21>(distance))
-    return kOffset21;
-  else if (IsInt<kOffset23>(distance))
-    return kOffset23;
-  else if (IsInt<kOffset28>(distance))
-    return kOffset28;
-  return kOffset32;
-}
-
-void Mips64Assembler::Branch::Resolve(uint32_t target) {
-  target_ = target;
-}
-
-void Mips64Assembler::Branch::Relocate(uint32_t expand_location, uint32_t delta) {
-  if (location_ > expand_location) {
-    location_ += delta;
-  }
-  if (!IsResolved()) {
-    return;  // Don't know the target yet.
-  }
-  if (target_ > expand_location) {
-    target_ += delta;
-  }
-}
-
-void Mips64Assembler::Branch::PromoteToLong() {
-  CHECK(!IsBare());  // Bare branches do not promote.
-  switch (type_) {
-    // R6 short branches (can be promoted to long).
-    case kUncondBranch:
-      type_ = kLongUncondBranch;
-      break;
-    case kCondBranch:
-      type_ = kLongCondBranch;
-      break;
-    case kCall:
-      type_ = kLongCall;
-      break;
-    // Near label.
-    case kLabel:
-      type_ = kFarLabel;
-      break;
-    // Near literals.
-    case kLiteral:
-      type_ = kFarLiteral;
-      break;
-    case kLiteralUnsigned:
-      type_ = kFarLiteralUnsigned;
-      break;
-    case kLiteralLong:
-      type_ = kFarLiteralLong;
-      break;
-    default:
-      // Note: 'type_' is already long.
-      break;
-  }
-  CHECK(IsLong());
-}
-
-uint32_t Mips64Assembler::Branch::PromoteIfNeeded(uint32_t max_short_distance) {
-  // If the branch is still unresolved or already long, nothing to do.
-  if (IsLong() || !IsResolved()) {
-    return 0;
-  }
-  // Promote the short branch to long if the offset size is too small
-  // to hold the distance between location_ and target_.
-  if (GetOffsetSizeNeeded(location_, target_) > GetOffsetSize()) {
-    PromoteToLong();
-    uint32_t old_size = GetOldSize();
-    uint32_t new_size = GetSize();
-    CHECK_GT(new_size, old_size);
-    return new_size - old_size;
-  }
-  // The following logic is for debugging/testing purposes.
-  // Promote some short branches to long when it's not really required.
-  if (UNLIKELY(max_short_distance != std::numeric_limits<uint32_t>::max() && !IsBare())) {
-    int64_t distance = static_cast<int64_t>(target_) - location_;
-    distance = (distance >= 0) ? distance : -distance;
-    if (distance >= max_short_distance) {
-      PromoteToLong();
-      uint32_t old_size = GetOldSize();
-      uint32_t new_size = GetSize();
-      CHECK_GT(new_size, old_size);
-      return new_size - old_size;
-    }
-  }
-  return 0;
-}
-
-uint32_t Mips64Assembler::Branch::GetOffsetLocation() const {
-  return location_ + branch_info_[type_].instr_offset * sizeof(uint32_t);
-}
-
-uint32_t Mips64Assembler::Branch::GetOffset() const {
-  CHECK(IsResolved());
-  uint32_t ofs_mask = 0xFFFFFFFF >> (32 - GetOffsetSize());
-  // Calculate the byte distance between instructions and also account for
-  // different PC-relative origins.
-  uint32_t offset_location = GetOffsetLocation();
-  if (type_ == kLiteralLong) {
-    // Special case for the ldpc instruction, whose address (PC) is rounded down to
-    // a multiple of 8 before adding the offset.
-    // Note, branch promotion has already taken care of aligning `target_` to an
-    // address that's a multiple of 8.
-    offset_location = RoundDown(offset_location, sizeof(uint64_t));
-  }
-  uint32_t offset = target_ - offset_location - branch_info_[type_].pc_org * sizeof(uint32_t);
-  // Prepare the offset for encoding into the instruction(s).
-  offset = (offset & ofs_mask) >> branch_info_[type_].offset_shift;
-  return offset;
-}
-
-Mips64Assembler::Branch* Mips64Assembler::GetBranch(uint32_t branch_id) {
-  CHECK_LT(branch_id, branches_.size());
-  return &branches_[branch_id];
-}
-
-const Mips64Assembler::Branch* Mips64Assembler::GetBranch(uint32_t branch_id) const {
-  CHECK_LT(branch_id, branches_.size());
-  return &branches_[branch_id];
-}
-
-void Mips64Assembler::Bind(Mips64Label* label) {
-  CHECK(!label->IsBound());
-  uint32_t bound_pc = buffer_.Size();
-
-  // Walk the list of branches referring to and preceding this label.
-  // Store the previously unknown target addresses in them.
-  while (label->IsLinked()) {
-    uint32_t branch_id = label->Position();
-    Branch* branch = GetBranch(branch_id);
-    branch->Resolve(bound_pc);
-
-    uint32_t branch_location = branch->GetLocation();
-    // Extract the location of the previous branch in the list (walking the list backwards;
-    // the previous branch ID was stored in the space reserved for this branch).
-    uint32_t prev = buffer_.Load<uint32_t>(branch_location);
-
-    // On to the previous branch in the list...
-    label->position_ = prev;
-  }
-
-  // Now make the label object contain its own location (relative to the end of the preceding
-  // branch, if any; it will be used by the branches referring to and following this label).
-  label->prev_branch_id_plus_one_ = branches_.size();
-  if (label->prev_branch_id_plus_one_) {
-    uint32_t branch_id = label->prev_branch_id_plus_one_ - 1;
-    const Branch* branch = GetBranch(branch_id);
-    bound_pc -= branch->GetEndLocation();
-  }
-  label->BindTo(bound_pc);
-}
-
-uint32_t Mips64Assembler::GetLabelLocation(const Mips64Label* label) const {
-  CHECK(label->IsBound());
-  uint32_t target = label->Position();
-  if (label->prev_branch_id_plus_one_) {
-    // Get label location based on the branch preceding it.
-    uint32_t branch_id = label->prev_branch_id_plus_one_ - 1;
-    const Branch* branch = GetBranch(branch_id);
-    target += branch->GetEndLocation();
-  }
-  return target;
-}
-
-uint32_t Mips64Assembler::GetAdjustedPosition(uint32_t old_position) {
-  // We can reconstruct the adjustment by going through all the branches from the beginning
-  // up to the old_position. Since we expect AdjustedPosition() to be called in a loop
-  // with increasing old_position, we can use the data from last AdjustedPosition() to
-  // continue where we left off and the whole loop should be O(m+n) where m is the number
-  // of positions to adjust and n is the number of branches.
-  if (old_position < last_old_position_) {
-    last_position_adjustment_ = 0;
-    last_old_position_ = 0;
-    last_branch_id_ = 0;
-  }
-  while (last_branch_id_ != branches_.size()) {
-    const Branch* branch = GetBranch(last_branch_id_);
-    if (branch->GetLocation() >= old_position + last_position_adjustment_) {
-      break;
-    }
-    last_position_adjustment_ += branch->GetSize() - branch->GetOldSize();
-    ++last_branch_id_;
-  }
-  last_old_position_ = old_position;
-  return old_position + last_position_adjustment_;
-}
-
-void Mips64Assembler::FinalizeLabeledBranch(Mips64Label* label) {
-  uint32_t length = branches_.back().GetLength();
-  if (!label->IsBound()) {
-    // Branch forward (to a following label), distance is unknown.
-    // The first branch forward will contain 0, serving as the terminator of
-    // the list of forward-reaching branches.
-    Emit(label->position_);
-    length--;
-    // Now make the label object point to this branch
-    // (this forms a linked list of branches preceding this label).
-    uint32_t branch_id = branches_.size() - 1;
-    label->LinkTo(branch_id);
-  }
-  // Reserve space for the branch.
-  for (; length != 0u; --length) {
-    Nop();
-  }
-}
-
-void Mips64Assembler::Buncond(Mips64Label* label, bool is_bare) {
-  uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
-  branches_.emplace_back(buffer_.Size(), target, /* is_call= */ false, is_bare);
-  FinalizeLabeledBranch(label);
-}
-
-void Mips64Assembler::Bcond(Mips64Label* label,
-                            bool is_r6,
-                            bool is_bare,
-                            BranchCondition condition,
-                            GpuRegister lhs,
-                            GpuRegister rhs) {
-  // If lhs = rhs, this can be a NOP.
-  if (Branch::IsNop(condition, lhs, rhs)) {
-    return;
-  }
-  uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
-  branches_.emplace_back(is_r6, buffer_.Size(), target, condition, lhs, rhs, is_bare);
-  FinalizeLabeledBranch(label);
-}
-
-void Mips64Assembler::Call(Mips64Label* label, bool is_bare) {
-  uint32_t target = label->IsBound() ? GetLabelLocation(label) : Branch::kUnresolved;
-  branches_.emplace_back(buffer_.Size(), target, /* is_call= */ true, is_bare);
-  FinalizeLabeledBranch(label);
-}
-
-void Mips64Assembler::LoadLabelAddress(GpuRegister dest_reg, Mips64Label* label) {
-  // Label address loads are treated as pseudo branches since they require very similar handling.
-  DCHECK(!label->IsBound());
-  branches_.emplace_back(buffer_.Size(), dest_reg, Branch::kLabel);
-  FinalizeLabeledBranch(label);
-}
-
-Literal* Mips64Assembler::NewLiteral(size_t size, const uint8_t* data) {
-  // We don't support byte and half-word literals.
-  if (size == 4u) {
-    literals_.emplace_back(size, data);
-    return &literals_.back();
-  } else {
-    DCHECK_EQ(size, 8u);
-    long_literals_.emplace_back(size, data);
-    return &long_literals_.back();
-  }
-}
-
-void Mips64Assembler::LoadLiteral(GpuRegister dest_reg,
-                                  LoadOperandType load_type,
-                                  Literal* literal) {
-  // Literal loads are treated as pseudo branches since they require very similar handling.
-  Branch::Type literal_type;
-  switch (load_type) {
-    case kLoadWord:
-      DCHECK_EQ(literal->GetSize(), 4u);
-      literal_type = Branch::kLiteral;
-      break;
-    case kLoadUnsignedWord:
-      DCHECK_EQ(literal->GetSize(), 4u);
-      literal_type = Branch::kLiteralUnsigned;
-      break;
-    case kLoadDoubleword:
-      DCHECK_EQ(literal->GetSize(), 8u);
-      literal_type = Branch::kLiteralLong;
-      break;
-    default:
-      LOG(FATAL) << "Unexpected literal load type " << load_type;
-      UNREACHABLE();
-  }
-  Mips64Label* label = literal->GetLabel();
-  DCHECK(!label->IsBound());
-  branches_.emplace_back(buffer_.Size(), dest_reg, literal_type);
-  FinalizeLabeledBranch(label);
-}
-
-JumpTable* Mips64Assembler::CreateJumpTable(std::vector<Mips64Label*>&& labels) {
-  jump_tables_.emplace_back(std::move(labels));
-  JumpTable* table = &jump_tables_.back();
-  DCHECK(!table->GetLabel()->IsBound());
-  return table;
-}
-
-void Mips64Assembler::ReserveJumpTableSpace() {
-  if (!jump_tables_.empty()) {
-    for (JumpTable& table : jump_tables_) {
-      Mips64Label* label = table.GetLabel();
-      Bind(label);
-
-      // Bulk ensure capacity, as this may be large.
-      size_t orig_size = buffer_.Size();
-      size_t required_capacity = orig_size + table.GetSize();
-      if (required_capacity > buffer_.Capacity()) {
-        buffer_.ExtendCapacity(required_capacity);
-      }
-#ifndef NDEBUG
-      buffer_.has_ensured_capacity_ = true;
-#endif
-
-      // Fill the space with dummy data as the data is not final
-      // until the branches have been promoted. And we shouldn't
-      // be moving uninitialized data during branch promotion.
-      for (size_t cnt = table.GetData().size(), i = 0; i < cnt; i++) {
-        buffer_.Emit<uint32_t>(0x1abe1234u);
-      }
-
-#ifndef NDEBUG
-      buffer_.has_ensured_capacity_ = false;
-#endif
-    }
-  }
-}
-
-void Mips64Assembler::EmitJumpTables() {
-  if (!jump_tables_.empty()) {
-    CHECK(!overwriting_);
-    // Switch from appending instructions at the end of the buffer to overwriting
-    // existing instructions (here, jump tables) in the buffer.
-    overwriting_ = true;
-
-    for (JumpTable& table : jump_tables_) {
-      Mips64Label* table_label = table.GetLabel();
-      uint32_t start = GetLabelLocation(table_label);
-      overwrite_location_ = start;
-
-      for (Mips64Label* target : table.GetData()) {
-        CHECK_EQ(buffer_.Load<uint32_t>(overwrite_location_), 0x1abe1234u);
-        // The table will contain target addresses relative to the table start.
-        uint32_t offset = GetLabelLocation(target) - start;
-        Emit(offset);
-      }
-    }
-
-    overwriting_ = false;
-  }
-}
-
-void Mips64Assembler::EmitLiterals() {
-  if (!literals_.empty()) {
-    for (Literal& literal : literals_) {
-      Mips64Label* label = literal.GetLabel();
-      Bind(label);
-      AssemblerBuffer::EnsureCapacity ensured(&buffer_);
-      DCHECK_EQ(literal.GetSize(), 4u);
-      for (size_t i = 0, size = literal.GetSize(); i != size; ++i) {
-        buffer_.Emit<uint8_t>(literal.GetData()[i]);
-      }
-    }
-  }
-  if (!long_literals_.empty()) {
-    // Reserve 4 bytes for potential alignment. If after the branch promotion the 64-bit
-    // literals don't end up 8-byte-aligned, they will be moved down 4 bytes.
-    Emit(0);  // NOP.
-    for (Literal& literal : long_literals_) {
-      Mips64Label* label = literal.GetLabel();
-      Bind(label);
-      AssemblerBuffer::EnsureCapacity ensured(&buffer_);
-      DCHECK_EQ(literal.GetSize(), 8u);
-      for (size_t i = 0, size = literal.GetSize(); i != size; ++i) {
-        buffer_.Emit<uint8_t>(literal.GetData()[i]);
-      }
-    }
-  }
-}
-
-void Mips64Assembler::PromoteBranches() {
-  // Promote short branches to long as necessary.
-  bool changed;
-  do {
-    changed = false;
-    for (auto& branch : branches_) {
-      CHECK(branch.IsResolved());
-      uint32_t delta = branch.PromoteIfNeeded();
-      // If this branch has been promoted and needs to expand in size,
-      // relocate all branches by the expansion size.
-      if (delta) {
-        changed = true;
-        uint32_t expand_location = branch.GetLocation();
-        for (auto& branch2 : branches_) {
-          branch2.Relocate(expand_location, delta);
-        }
-      }
-    }
-  } while (changed);
-
-  // Account for branch expansion by resizing the code buffer
-  // and moving the code in it to its final location.
-  size_t branch_count = branches_.size();
-  if (branch_count > 0) {
-    // Resize.
-    Branch& last_branch = branches_[branch_count - 1];
-    uint32_t size_delta = last_branch.GetEndLocation() - last_branch.GetOldEndLocation();
-    uint32_t old_size = buffer_.Size();
-    buffer_.Resize(old_size + size_delta);
-    // Move the code residing between branch placeholders.
-    uint32_t end = old_size;
-    for (size_t i = branch_count; i > 0; ) {
-      Branch& branch = branches_[--i];
-      uint32_t size = end - branch.GetOldEndLocation();
-      buffer_.Move(branch.GetEndLocation(), branch.GetOldEndLocation(), size);
-      end = branch.GetOldLocation();
-    }
-  }
-
-  // Align 64-bit literals by moving them down by 4 bytes if needed.
-  // This will reduce the PC-relative distance, which should be safe for both near and far literals.
-  if (!long_literals_.empty()) {
-    uint32_t first_literal_location = GetLabelLocation(long_literals_.front().GetLabel());
-    size_t lit_size = long_literals_.size() * sizeof(uint64_t);
-    size_t buf_size = buffer_.Size();
-    // 64-bit literals must be at the very end of the buffer.
-    CHECK_EQ(first_literal_location + lit_size, buf_size);
-    if (!IsAligned<sizeof(uint64_t)>(first_literal_location)) {
-      buffer_.Move(first_literal_location - sizeof(uint32_t), first_literal_location, lit_size);
-      // The 4 reserved bytes proved useless, reduce the buffer size.
-      buffer_.Resize(buf_size - sizeof(uint32_t));
-      // Reduce target addresses in literal and address loads by 4 bytes in order for correct
-      // offsets from PC to be generated.
-      for (auto& branch : branches_) {
-        uint32_t target = branch.GetTarget();
-        if (target >= first_literal_location) {
-          branch.Resolve(target - sizeof(uint32_t));
-        }
-      }
-      // If after this we ever call GetLabelLocation() to get the location of a 64-bit literal,
-      // we need to adjust the location of the literal's label as well.
-      for (Literal& literal : long_literals_) {
-        // Bound label's position is negative, hence incrementing it instead of decrementing.
-        literal.GetLabel()->position_ += sizeof(uint32_t);
-      }
-    }
-  }
-}
-
-// Note: make sure branch_info_[] and EmitBranch() are kept synchronized.
-const Mips64Assembler::Branch::BranchInfo Mips64Assembler::Branch::branch_info_[] = {
-  // R6 short branches (can be promoted to long).
-  {  1, 0, 1, Mips64Assembler::Branch::kOffset28, 2 },  // kUncondBranch
-  {  2, 0, 1, Mips64Assembler::Branch::kOffset18, 2 },  // kCondBranch
-                                                        // Exception: kOffset23 for beqzc/bnezc
-  {  1, 0, 1, Mips64Assembler::Branch::kOffset28, 2 },  // kCall
-  // R6 short branches (can't be promoted to long), forbidden/delay slots filled manually.
-  {  1, 0, 1, Mips64Assembler::Branch::kOffset28, 2 },  // kBareUncondBranch
-  {  1, 0, 1, Mips64Assembler::Branch::kOffset18, 2 },  // kBareCondBranch
-                                                        // Exception: kOffset23 for beqzc/bnezc
-  {  1, 0, 1, Mips64Assembler::Branch::kOffset28, 2 },  // kBareCall
-  // R2 short branches (can't be promoted to long), delay slots filled manually.
-  {  1, 0, 1, Mips64Assembler::Branch::kOffset18, 2 },  // kR2BareCondBranch
-  // Near label.
-  {  1, 0, 0, Mips64Assembler::Branch::kOffset21, 2 },  // kLabel
-  // Near literals.
-  {  1, 0, 0, Mips64Assembler::Branch::kOffset21, 2 },  // kLiteral
-  {  1, 0, 0, Mips64Assembler::Branch::kOffset21, 2 },  // kLiteralUnsigned
-  {  1, 0, 0, Mips64Assembler::Branch::kOffset21, 3 },  // kLiteralLong
-  // Long branches.
-  {  2, 0, 0, Mips64Assembler::Branch::kOffset32, 0 },  // kLongUncondBranch
-  {  3, 1, 0, Mips64Assembler::Branch::kOffset32, 0 },  // kLongCondBranch
-  {  2, 0, 0, Mips64Assembler::Branch::kOffset32, 0 },  // kLongCall
-  // Far label.
-  {  2, 0, 0, Mips64Assembler::Branch::kOffset32, 0 },  // kFarLabel
-  // Far literals.
-  {  2, 0, 0, Mips64Assembler::Branch::kOffset32, 0 },  // kFarLiteral
-  {  2, 0, 0, Mips64Assembler::Branch::kOffset32, 0 },  // kFarLiteralUnsigned
-  {  2, 0, 0, Mips64Assembler::Branch::kOffset32, 0 },  // kFarLiteralLong
-};
-
-// Note: make sure branch_info_[] and EmitBranch() are kept synchronized.
-void Mips64Assembler::EmitBranch(Mips64Assembler::Branch* branch) {
-  CHECK(overwriting_);
-  overwrite_location_ = branch->GetLocation();
-  uint32_t offset = branch->GetOffset();
-  BranchCondition condition = branch->GetCondition();
-  GpuRegister lhs = branch->GetLeftRegister();
-  GpuRegister rhs = branch->GetRightRegister();
-  switch (branch->GetType()) {
-    // Short branches.
-    case Branch::kUncondBranch:
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Bc(offset);
-      break;
-    case Branch::kCondBranch:
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      EmitBcondR6(condition, lhs, rhs, offset);
-      Nop();  // TODO: improve by filling the forbidden/delay slot.
-      break;
-    case Branch::kCall:
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Balc(offset);
-      break;
-    case Branch::kBareUncondBranch:
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Bc(offset);
-      break;
-    case Branch::kBareCondBranch:
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      EmitBcondR6(condition, lhs, rhs, offset);
-      break;
-    case Branch::kBareCall:
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Balc(offset);
-      break;
-    case Branch::kR2BareCondBranch:
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      EmitBcondR2(condition, lhs, rhs, offset);
-      break;
-
-    // Near label.
-    case Branch::kLabel:
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Addiupc(lhs, offset);
-      break;
-    // Near literals.
-    case Branch::kLiteral:
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Lwpc(lhs, offset);
-      break;
-    case Branch::kLiteralUnsigned:
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Lwupc(lhs, offset);
-      break;
-    case Branch::kLiteralLong:
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Ldpc(lhs, offset);
-      break;
-
-    // Long branches.
-    case Branch::kLongUncondBranch:
-      offset += (offset & 0x8000) << 1;  // Account for sign extension in jic.
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Auipc(AT, High16Bits(offset));
-      Jic(AT, Low16Bits(offset));
-      break;
-    case Branch::kLongCondBranch:
-      EmitBcondR6(Branch::OppositeCondition(condition), lhs, rhs, 2);
-      offset += (offset & 0x8000) << 1;  // Account for sign extension in jic.
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Auipc(AT, High16Bits(offset));
-      Jic(AT, Low16Bits(offset));
-      break;
-    case Branch::kLongCall:
-      offset += (offset & 0x8000) << 1;  // Account for sign extension in jialc.
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Auipc(AT, High16Bits(offset));
-      Jialc(AT, Low16Bits(offset));
-      break;
-
-    // Far label.
-    case Branch::kFarLabel:
-      offset += (offset & 0x8000) << 1;  // Account for sign extension in daddiu.
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Auipc(AT, High16Bits(offset));
-      Daddiu(lhs, AT, Low16Bits(offset));
-      break;
-    // Far literals.
-    case Branch::kFarLiteral:
-      offset += (offset & 0x8000) << 1;  // Account for sign extension in lw.
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Auipc(AT, High16Bits(offset));
-      Lw(lhs, AT, Low16Bits(offset));
-      break;
-    case Branch::kFarLiteralUnsigned:
-      offset += (offset & 0x8000) << 1;  // Account for sign extension in lwu.
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Auipc(AT, High16Bits(offset));
-      Lwu(lhs, AT, Low16Bits(offset));
-      break;
-    case Branch::kFarLiteralLong:
-      offset += (offset & 0x8000) << 1;  // Account for sign extension in ld.
-      CHECK_EQ(overwrite_location_, branch->GetOffsetLocation());
-      Auipc(AT, High16Bits(offset));
-      Ld(lhs, AT, Low16Bits(offset));
-      break;
-  }
-  CHECK_EQ(overwrite_location_, branch->GetEndLocation());
-  CHECK_LT(branch->GetSize(), static_cast<uint32_t>(Branch::kMaxBranchSize));
-}
-
-void Mips64Assembler::Bc(Mips64Label* label, bool is_bare) {
-  Buncond(label, is_bare);
-}
-
-void Mips64Assembler::Balc(Mips64Label* label, bool is_bare) {
-  Call(label, is_bare);
-}
-
-void Mips64Assembler::Bltc(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondLT, rs, rt);
-}
-
-void Mips64Assembler::Bltzc(GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondLTZ, rt);
-}
-
-void Mips64Assembler::Bgtzc(GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondGTZ, rt);
-}
-
-void Mips64Assembler::Bgec(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondGE, rs, rt);
-}
-
-void Mips64Assembler::Bgezc(GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondGEZ, rt);
-}
-
-void Mips64Assembler::Blezc(GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondLEZ, rt);
-}
-
-void Mips64Assembler::Bltuc(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondLTU, rs, rt);
-}
-
-void Mips64Assembler::Bgeuc(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondGEU, rs, rt);
-}
-
-void Mips64Assembler::Beqc(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondEQ, rs, rt);
-}
-
-void Mips64Assembler::Bnec(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondNE, rs, rt);
-}
-
-void Mips64Assembler::Beqzc(GpuRegister rs, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondEQZ, rs);
-}
-
-void Mips64Assembler::Bnezc(GpuRegister rs, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondNEZ, rs);
-}
-
-void Mips64Assembler::Bc1eqz(FpuRegister ft, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondF, static_cast<GpuRegister>(ft), ZERO);
-}
-
-void Mips64Assembler::Bc1nez(FpuRegister ft, Mips64Label* label, bool is_bare) {
-  Bcond(label, /* is_r6= */ true, is_bare, kCondT, static_cast<GpuRegister>(ft), ZERO);
-}
-
-void Mips64Assembler::Bltz(GpuRegister rt, Mips64Label* label, bool is_bare) {
-  CHECK(is_bare);
-  Bcond(label, /* is_r6= */ false, is_bare, kCondLTZ, rt);
-}
-
-void Mips64Assembler::Bgtz(GpuRegister rt, Mips64Label* label, bool is_bare) {
-  CHECK(is_bare);
-  Bcond(label, /* is_r6= */ false, is_bare, kCondGTZ, rt);
-}
-
-void Mips64Assembler::Bgez(GpuRegister rt, Mips64Label* label, bool is_bare) {
-  CHECK(is_bare);
-  Bcond(label, /* is_r6= */ false, is_bare, kCondGEZ, rt);
-}
-
-void Mips64Assembler::Blez(GpuRegister rt, Mips64Label* label, bool is_bare) {
-  CHECK(is_bare);
-  Bcond(label, /* is_r6= */ false, is_bare, kCondLEZ, rt);
-}
-
-void Mips64Assembler::Beq(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
-  CHECK(is_bare);
-  Bcond(label, /* is_r6= */ false, is_bare, kCondEQ, rs, rt);
-}
-
-void Mips64Assembler::Bne(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare) {
-  CHECK(is_bare);
-  Bcond(label, /* is_r6= */ false, is_bare, kCondNE, rs, rt);
-}
-
-void Mips64Assembler::Beqz(GpuRegister rs, Mips64Label* label, bool is_bare) {
-  CHECK(is_bare);
-  Bcond(label, /* is_r6= */ false, is_bare, kCondEQZ, rs);
-}
-
-void Mips64Assembler::Bnez(GpuRegister rs, Mips64Label* label, bool is_bare) {
-  CHECK(is_bare);
-  Bcond(label, /* is_r6= */ false, is_bare, kCondNEZ, rs);
-}
-
-void Mips64Assembler::AdjustBaseAndOffset(GpuRegister& base,
-                                          int32_t& offset,
-                                          bool is_doubleword) {
-  // This method is used to adjust the base register and offset pair
-  // for a load/store when the offset doesn't fit into int16_t.
-  // It is assumed that `base + offset` is sufficiently aligned for memory
-  // operands that are machine word in size or smaller. For doubleword-sized
-  // operands it's assumed that `base` is a multiple of 8, while `offset`
-  // may be a multiple of 4 (e.g. 4-byte-aligned long and double arguments
-  // and spilled variables on the stack accessed relative to the stack
-  // pointer register).
-  // We preserve the "alignment" of `offset` by adjusting it by a multiple of 8.
-  CHECK_NE(base, AT);  // Must not overwrite the register `base` while loading `offset`.
-
-  bool doubleword_aligned = IsAligned<kMips64DoublewordSize>(offset);
-  bool two_accesses = is_doubleword && !doubleword_aligned;
-
-  // IsInt<16> must be passed a signed value, hence the static cast below.
-  if (IsInt<16>(offset) &&
-      (!two_accesses || IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)))) {
-    // Nothing to do: `offset` (and, if needed, `offset + 4`) fits into int16_t.
-    return;
-  }
-
-  // Remember the "(mis)alignment" of `offset`, it will be checked at the end.
-  uint32_t misalignment = offset & (kMips64DoublewordSize - 1);
-
-  // First, see if `offset` can be represented as a sum of two 16-bit signed
-  // offsets. This can save an instruction.
-  // To simplify matters, only do this for a symmetric range of offsets from
-  // about -64KB to about +64KB, allowing further addition of 4 when accessing
-  // 64-bit variables with two 32-bit accesses.
-  constexpr int32_t kMinOffsetForSimpleAdjustment = 0x7ff8;  // Max int16_t that's a multiple of 8.
-  constexpr int32_t kMaxOffsetForSimpleAdjustment = 2 * kMinOffsetForSimpleAdjustment;
-
-  if (0 <= offset && offset <= kMaxOffsetForSimpleAdjustment) {
-    Daddiu(AT, base, kMinOffsetForSimpleAdjustment);
-    offset -= kMinOffsetForSimpleAdjustment;
-  } else if (-kMaxOffsetForSimpleAdjustment <= offset && offset < 0) {
-    Daddiu(AT, base, -kMinOffsetForSimpleAdjustment);
-    offset += kMinOffsetForSimpleAdjustment;
-  } else {
-    // In more complex cases take advantage of the daui instruction, e.g.:
-    //    daui   AT, base, offset_high
-    //   [dahi   AT, 1]                       // When `offset` is close to +2GB.
-    //    lw     reg_lo, offset_low(AT)
-    //   [lw     reg_hi, (offset_low+4)(AT)]  // If misaligned 64-bit load.
-    // or when offset_low+4 overflows int16_t:
-    //    daui   AT, base, offset_high
-    //    daddiu AT, AT, 8
-    //    lw     reg_lo, (offset_low-8)(AT)
-    //    lw     reg_hi, (offset_low-4)(AT)
-    int16_t offset_low = Low16Bits(offset);
-    int32_t offset_low32 = offset_low;
-    int16_t offset_high = High16Bits(offset);
-    bool increment_hi16 = offset_low < 0;
-    bool overflow_hi16 = false;
-
-    if (increment_hi16) {
-      offset_high++;
-      overflow_hi16 = (offset_high == -32768);
-    }
-    Daui(AT, base, offset_high);
-
-    if (overflow_hi16) {
-      Dahi(AT, 1);
-    }
-
-    if (two_accesses && !IsInt<16>(static_cast<int32_t>(offset_low32 + kMips64WordSize))) {
-      // Avoid overflow in the 16-bit offset of the load/store instruction when adding 4.
-      Daddiu(AT, AT, kMips64DoublewordSize);
-      offset_low32 -= kMips64DoublewordSize;
-    }
-
-    offset = offset_low32;
-  }
-  base = AT;
-
-  CHECK(IsInt<16>(offset));
-  if (two_accesses) {
-    CHECK(IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)));
-  }
-  CHECK_EQ(misalignment, offset & (kMips64DoublewordSize - 1));
-}
-
-void Mips64Assembler::AdjustBaseOffsetAndElementSizeShift(GpuRegister& base,
-                                                          int32_t& offset,
-                                                          int& element_size_shift) {
-  // This method is used to adjust the base register, offset and element_size_shift
-  // for a vector load/store when the offset doesn't fit into allowed number of bits.
-  // MSA ld.df and st.df instructions take signed offsets as arguments, but maximum
-  // offset is dependant on the size of the data format df (10-bit offsets for ld.b,
-  // 11-bit for ld.h, 12-bit for ld.w and 13-bit for ld.d).
-  // If element_size_shift is non-negative at entry, it won't be changed, but offset
-  // will be checked for appropriate alignment. If negative at entry, it will be
-  // adjusted based on offset for maximum fit.
-  // It's assumed that `base` is a multiple of 8.
-
-  CHECK_NE(base, AT);  // Must not overwrite the register `base` while loading `offset`.
-
-  if (element_size_shift >= 0) {
-    CHECK_LE(element_size_shift, TIMES_8);
-    CHECK_GE(JAVASTYLE_CTZ(offset), element_size_shift);
-  } else if (IsAligned<kMips64DoublewordSize>(offset)) {
-    element_size_shift = TIMES_8;
-  } else if (IsAligned<kMips64WordSize>(offset)) {
-    element_size_shift = TIMES_4;
-  } else if (IsAligned<kMips64HalfwordSize>(offset)) {
-    element_size_shift = TIMES_2;
-  } else {
-    element_size_shift = TIMES_1;
-  }
-
-  const int low_len = 10 + element_size_shift;  // How many low bits of `offset` ld.df/st.df
-                                                // will take.
-  int16_t low = offset & ((1 << low_len) - 1);  // Isolate these bits.
-  low -= (low & (1 << (low_len - 1))) << 1;     // Sign-extend these bits.
-  if (low == offset) {
-    return;  // `offset` fits into ld.df/st.df.
-  }
-
-  // First, see if `offset` can be represented as a sum of two signed offsets.
-  // This can save an instruction.
-
-  // Max int16_t that's a multiple of element size.
-  const int32_t kMaxDeltaForSimpleAdjustment = 0x8000 - (1 << element_size_shift);
-  // Max ld.df/st.df offset that's a multiple of element size.
-  const int32_t kMaxLoadStoreOffset = 0x1ff << element_size_shift;
-  const int32_t kMaxOffsetForSimpleAdjustment = kMaxDeltaForSimpleAdjustment + kMaxLoadStoreOffset;
-
-  if (IsInt<16>(offset)) {
-    Daddiu(AT, base, offset);
-    offset = 0;
-  } else if (0 <= offset && offset <= kMaxOffsetForSimpleAdjustment) {
-    Daddiu(AT, base, kMaxDeltaForSimpleAdjustment);
-    offset -= kMaxDeltaForSimpleAdjustment;
-  } else if (-kMaxOffsetForSimpleAdjustment <= offset && offset < 0) {
-    Daddiu(AT, base, -kMaxDeltaForSimpleAdjustment);
-    offset += kMaxDeltaForSimpleAdjustment;
-  } else {
-    // Let's treat `offset` as 64-bit to simplify handling of sign
-    // extensions in the instructions that supply its smaller signed parts.
-    //
-    // 16-bit or smaller parts of `offset`:
-    // |63  top  48|47  hi  32|31  upper  16|15  mid  13-10|12-9  low  0|
-    //
-    // Instructions that supply each part as a signed integer addend:
-    // |dati       |dahi      |daui         |daddiu        |ld.df/st.df |
-    //
-    // `top` is always 0, so dati isn't used.
-    // `hi` is 1 when `offset` is close to +2GB and 0 otherwise.
-    uint64_t tmp = static_cast<uint64_t>(offset) - low;  // Exclude `low` from the rest of `offset`
-                                                         // (accounts for sign of `low`).
-    tmp += (tmp & (UINT64_C(1) << 15)) << 1;  // Account for sign extension in daddiu.
-    tmp += (tmp & (UINT64_C(1) << 31)) << 1;  // Account for sign extension in daui.
-    int16_t mid = Low16Bits(tmp);
-    int16_t upper = High16Bits(tmp);
-    int16_t hi = Low16Bits(High32Bits(tmp));
-    Daui(AT, base, upper);
-    if (hi != 0) {
-      CHECK_EQ(hi, 1);
-      Dahi(AT, hi);
-    }
-    if (mid != 0) {
-      Daddiu(AT, AT, mid);
-    }
-    offset = low;
-  }
-  base = AT;
-  CHECK_GE(JAVASTYLE_CTZ(offset), element_size_shift);
-  CHECK(IsInt<10>(offset >> element_size_shift));
-}
-
-void Mips64Assembler::LoadFromOffset(LoadOperandType type,
-                                     GpuRegister reg,
-                                     GpuRegister base,
-                                     int32_t offset) {
-  LoadFromOffset<>(type, reg, base, offset);
-}
-
-void Mips64Assembler::LoadFpuFromOffset(LoadOperandType type,
-                                        FpuRegister reg,
-                                        GpuRegister base,
-                                        int32_t offset) {
-  LoadFpuFromOffset<>(type, reg, base, offset);
-}
-
-void Mips64Assembler::EmitLoad(ManagedRegister m_dst, GpuRegister src_register, int32_t src_offset,
-                               size_t size) {
-  Mips64ManagedRegister dst = m_dst.AsMips64();
-  if (dst.IsNoRegister()) {
-    CHECK_EQ(0u, size) << dst;
-  } else if (dst.IsGpuRegister()) {
-    if (size == 4) {
-      LoadFromOffset(kLoadWord, dst.AsGpuRegister(), src_register, src_offset);
-    } else if (size == 8) {
-      CHECK_EQ(8u, size) << dst;
-      LoadFromOffset(kLoadDoubleword, dst.AsGpuRegister(), src_register, src_offset);
-    } else {
-      UNIMPLEMENTED(FATAL) << "We only support Load() of size 4 and 8";
-    }
-  } else if (dst.IsFpuRegister()) {
-    if (size == 4) {
-      CHECK_EQ(4u, size) << dst;
-      LoadFpuFromOffset(kLoadWord, dst.AsFpuRegister(), src_register, src_offset);
-    } else if (size == 8) {
-      CHECK_EQ(8u, size) << dst;
-      LoadFpuFromOffset(kLoadDoubleword, dst.AsFpuRegister(), src_register, src_offset);
-    } else {
-      UNIMPLEMENTED(FATAL) << "We only support Load() of size 4 and 8";
-    }
-  }
-}
-
-void Mips64Assembler::StoreToOffset(StoreOperandType type,
-                                    GpuRegister reg,
-                                    GpuRegister base,
-                                    int32_t offset) {
-  StoreToOffset<>(type, reg, base, offset);
-}
-
-void Mips64Assembler::StoreFpuToOffset(StoreOperandType type,
-                                       FpuRegister reg,
-                                       GpuRegister base,
-                                       int32_t offset) {
-  StoreFpuToOffset<>(type, reg, base, offset);
-}
-
-static dwarf::Reg DWARFReg(GpuRegister reg) {
-  return dwarf::Reg::Mips64Core(static_cast<int>(reg));
-}
-
-constexpr size_t kFramePointerSize = 8;
-
-void Mips64Assembler::BuildFrame(size_t frame_size,
-                                 ManagedRegister method_reg,
-                                 ArrayRef<const ManagedRegister> callee_save_regs,
-                                 const ManagedRegisterEntrySpills& entry_spills) {
-  CHECK_ALIGNED(frame_size, kStackAlignment);
-  DCHECK(!overwriting_);
-
-  // Increase frame to required size.
-  IncreaseFrameSize(frame_size);
-
-  // Push callee saves and return address
-  int stack_offset = frame_size - kFramePointerSize;
-  StoreToOffset(kStoreDoubleword, RA, SP, stack_offset);
-  cfi_.RelOffset(DWARFReg(RA), stack_offset);
-  for (int i = callee_save_regs.size() - 1; i >= 0; --i) {
-    stack_offset -= kFramePointerSize;
-    GpuRegister reg = callee_save_regs[i].AsMips64().AsGpuRegister();
-    StoreToOffset(kStoreDoubleword, reg, SP, stack_offset);
-    cfi_.RelOffset(DWARFReg(reg), stack_offset);
-  }
-
-  // Write out Method*.
-  StoreToOffset(kStoreDoubleword, method_reg.AsMips64().AsGpuRegister(), SP, 0);
-
-  // Write out entry spills.
-  int32_t offset = frame_size + kFramePointerSize;
-  for (const ManagedRegisterSpill& spill : entry_spills) {
-    Mips64ManagedRegister reg = spill.AsMips64();
-    int32_t size = spill.getSize();
-    if (reg.IsNoRegister()) {
-      // only increment stack offset.
-      offset += size;
-    } else if (reg.IsFpuRegister()) {
-      StoreFpuToOffset((size == 4) ? kStoreWord : kStoreDoubleword,
-          reg.AsFpuRegister(), SP, offset);
-      offset += size;
-    } else if (reg.IsGpuRegister()) {
-      StoreToOffset((size == 4) ? kStoreWord : kStoreDoubleword,
-          reg.AsGpuRegister(), SP, offset);
-      offset += size;
-    }
-  }
-}
-
-void Mips64Assembler::RemoveFrame(size_t frame_size,
-                                  ArrayRef<const ManagedRegister> callee_save_regs,
-                                  bool may_suspend ATTRIBUTE_UNUSED) {
-  CHECK_ALIGNED(frame_size, kStackAlignment);
-  DCHECK(!overwriting_);
-  cfi_.RememberState();
-
-  // Pop callee saves and return address
-  int stack_offset = frame_size - (callee_save_regs.size() * kFramePointerSize) - kFramePointerSize;
-  for (size_t i = 0; i < callee_save_regs.size(); ++i) {
-    GpuRegister reg = callee_save_regs[i].AsMips64().AsGpuRegister();
-    LoadFromOffset(kLoadDoubleword, reg, SP, stack_offset);
-    cfi_.Restore(DWARFReg(reg));
-    stack_offset += kFramePointerSize;
-  }
-  LoadFromOffset(kLoadDoubleword, RA, SP, stack_offset);
-  cfi_.Restore(DWARFReg(RA));
-
-  // Decrease frame to required size.
-  DecreaseFrameSize(frame_size);
-
-  // Then jump to the return address.
-  Jr(RA);
-  Nop();
-
-  // The CFI should be restored for any code that follows the exit block.
-  cfi_.RestoreState();
-  cfi_.DefCFAOffset(frame_size);
-}
-
-void Mips64Assembler::IncreaseFrameSize(size_t adjust) {
-  CHECK_ALIGNED(adjust, kFramePointerSize);
-  DCHECK(!overwriting_);
-  Daddiu64(SP, SP, static_cast<int32_t>(-adjust));
-  cfi_.AdjustCFAOffset(adjust);
-}
-
-void Mips64Assembler::DecreaseFrameSize(size_t adjust) {
-  CHECK_ALIGNED(adjust, kFramePointerSize);
-  DCHECK(!overwriting_);
-  Daddiu64(SP, SP, static_cast<int32_t>(adjust));
-  cfi_.AdjustCFAOffset(-adjust);
-}
-
-void Mips64Assembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
-  Mips64ManagedRegister src = msrc.AsMips64();
-  if (src.IsNoRegister()) {
-    CHECK_EQ(0u, size);
-  } else if (src.IsGpuRegister()) {
-    CHECK(size == 4 || size == 8) << size;
-    if (size == 8) {
-      StoreToOffset(kStoreDoubleword, src.AsGpuRegister(), SP, dest.Int32Value());
-    } else if (size == 4) {
-      StoreToOffset(kStoreWord, src.AsGpuRegister(), SP, dest.Int32Value());
-    } else {
-      UNIMPLEMENTED(FATAL) << "We only support Store() of size 4 and 8";
-    }
-  } else if (src.IsFpuRegister()) {
-    CHECK(size == 4 || size == 8) << size;
-    if (size == 8) {
-      StoreFpuToOffset(kStoreDoubleword, src.AsFpuRegister(), SP, dest.Int32Value());
-    } else if (size == 4) {
-      StoreFpuToOffset(kStoreWord, src.AsFpuRegister(), SP, dest.Int32Value());
-    } else {
-      UNIMPLEMENTED(FATAL) << "We only support Store() of size 4 and 8";
-    }
-  }
-}
-
-void Mips64Assembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
-  Mips64ManagedRegister src = msrc.AsMips64();
-  CHECK(src.IsGpuRegister());
-  StoreToOffset(kStoreWord, src.AsGpuRegister(), SP, dest.Int32Value());
-}
-
-void Mips64Assembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
-  Mips64ManagedRegister src = msrc.AsMips64();
-  CHECK(src.IsGpuRegister());
-  StoreToOffset(kStoreDoubleword, src.AsGpuRegister(), SP, dest.Int32Value());
-}
-
-void Mips64Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
-                                            ManagedRegister mscratch) {
-  Mips64ManagedRegister scratch = mscratch.AsMips64();
-  CHECK(scratch.IsGpuRegister()) << scratch;
-  LoadConst32(scratch.AsGpuRegister(), imm);
-  StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value());
-}
-
-void Mips64Assembler::StoreStackOffsetToThread(ThreadOffset64 thr_offs,
-                                               FrameOffset fr_offs,
-                                               ManagedRegister mscratch) {
-  Mips64ManagedRegister scratch = mscratch.AsMips64();
-  CHECK(scratch.IsGpuRegister()) << scratch;
-  Daddiu64(scratch.AsGpuRegister(), SP, fr_offs.Int32Value());
-  StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value());
-}
-
-void Mips64Assembler::StoreStackPointerToThread(ThreadOffset64 thr_offs) {
-  StoreToOffset(kStoreDoubleword, SP, S1, thr_offs.Int32Value());
-}
-
-void Mips64Assembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc,
-                                    FrameOffset in_off, ManagedRegister mscratch) {
-  Mips64ManagedRegister src = msrc.AsMips64();
-  Mips64ManagedRegister scratch = mscratch.AsMips64();
-  StoreToOffset(kStoreDoubleword, src.AsGpuRegister(), SP, dest.Int32Value());
-  LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), SP, in_off.Int32Value());
-  StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, dest.Int32Value() + 8);
-}
-
-void Mips64Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
-  return EmitLoad(mdest, SP, src.Int32Value(), size);
-}
-
-void Mips64Assembler::LoadFromThread(ManagedRegister mdest, ThreadOffset64 src, size_t size) {
-  return EmitLoad(mdest, S1, src.Int32Value(), size);
-}
-
-void Mips64Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
-  Mips64ManagedRegister dest = mdest.AsMips64();
-  CHECK(dest.IsGpuRegister());
-  LoadFromOffset(kLoadUnsignedWord, dest.AsGpuRegister(), SP, src.Int32Value());
-}
-
-void Mips64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
-                              bool unpoison_reference) {
-  Mips64ManagedRegister dest = mdest.AsMips64();
-  CHECK(dest.IsGpuRegister() && base.AsMips64().IsGpuRegister());
-  LoadFromOffset(kLoadUnsignedWord, dest.AsGpuRegister(),
-                 base.AsMips64().AsGpuRegister(), offs.Int32Value());
-  if (unpoison_reference) {
-    MaybeUnpoisonHeapReference(dest.AsGpuRegister());
-  }
-}
-
-void Mips64Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
-                                 Offset offs) {
-  Mips64ManagedRegister dest = mdest.AsMips64();
-  CHECK(dest.IsGpuRegister() && base.AsMips64().IsGpuRegister());
-  LoadFromOffset(kLoadDoubleword, dest.AsGpuRegister(),
-                 base.AsMips64().AsGpuRegister(), offs.Int32Value());
-}
-
-void Mips64Assembler::LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) {
-  Mips64ManagedRegister dest = mdest.AsMips64();
-  CHECK(dest.IsGpuRegister());
-  LoadFromOffset(kLoadDoubleword, dest.AsGpuRegister(), S1, offs.Int32Value());
-}
-
-void Mips64Assembler::SignExtend(ManagedRegister mreg ATTRIBUTE_UNUSED,
-                                 size_t size ATTRIBUTE_UNUSED) {
-  UNIMPLEMENTED(FATAL) << "No sign extension necessary for MIPS64";
-}
-
-void Mips64Assembler::ZeroExtend(ManagedRegister mreg ATTRIBUTE_UNUSED,
-                                 size_t size ATTRIBUTE_UNUSED) {
-  UNIMPLEMENTED(FATAL) << "No zero extension necessary for MIPS64";
-}
-
-void Mips64Assembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
-  Mips64ManagedRegister dest = mdest.AsMips64();
-  Mips64ManagedRegister src = msrc.AsMips64();
-  if (!dest.Equals(src)) {
-    if (dest.IsGpuRegister()) {
-      CHECK(src.IsGpuRegister()) << src;
-      Move(dest.AsGpuRegister(), src.AsGpuRegister());
-    } else if (dest.IsFpuRegister()) {
-      CHECK(src.IsFpuRegister()) << src;
-      if (size == 4) {
-        MovS(dest.AsFpuRegister(), src.AsFpuRegister());
-      } else if (size == 8) {
-        MovD(dest.AsFpuRegister(), src.AsFpuRegister());
-      } else {
-        UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
-      }
-    }
-  }
-}
-
-void Mips64Assembler::CopyRef(FrameOffset dest, FrameOffset src,
-                              ManagedRegister mscratch) {
-  Mips64ManagedRegister scratch = mscratch.AsMips64();
-  CHECK(scratch.IsGpuRegister()) << scratch;
-  LoadFromOffset(kLoadWord, scratch.AsGpuRegister(), SP, src.Int32Value());
-  StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value());
-}
-
-void Mips64Assembler::CopyRawPtrFromThread(FrameOffset fr_offs,
-                                           ThreadOffset64 thr_offs,
-                                           ManagedRegister mscratch) {
-  Mips64ManagedRegister scratch = mscratch.AsMips64();
-  CHECK(scratch.IsGpuRegister()) << scratch;
-  LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value());
-  StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, fr_offs.Int32Value());
-}
-
-void Mips64Assembler::CopyRawPtrToThread(ThreadOffset64 thr_offs,
-                                         FrameOffset fr_offs,
-                                         ManagedRegister mscratch) {
-  Mips64ManagedRegister scratch = mscratch.AsMips64();
-  CHECK(scratch.IsGpuRegister()) << scratch;
-  LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
-                 SP, fr_offs.Int32Value());
-  StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(),
-                S1, thr_offs.Int32Value());
-}
-
-void Mips64Assembler::Copy(FrameOffset dest, FrameOffset src,
-                           ManagedRegister mscratch, size_t size) {
-  Mips64ManagedRegister scratch = mscratch.AsMips64();
-  CHECK(scratch.IsGpuRegister()) << scratch;
-  CHECK(size == 4 || size == 8) << size;
-  if (size == 4) {
-    LoadFromOffset(kLoadWord, scratch.AsGpuRegister(), SP, src.Int32Value());
-    StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, dest.Int32Value());
-  } else if (size == 8) {
-    LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), SP, src.Int32Value());
-    StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, dest.Int32Value());
-  } else {
-    UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
-  }
-}
-
-void Mips64Assembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
-                           ManagedRegister mscratch, size_t size) {
-  GpuRegister scratch = mscratch.AsMips64().AsGpuRegister();
-  CHECK(size == 4 || size == 8) << size;
-  if (size == 4) {
-    LoadFromOffset(kLoadWord, scratch, src_base.AsMips64().AsGpuRegister(),
-                   src_offset.Int32Value());
-    StoreToOffset(kStoreDoubleword, scratch, SP, dest.Int32Value());
-  } else if (size == 8) {
-    LoadFromOffset(kLoadDoubleword, scratch, src_base.AsMips64().AsGpuRegister(),
-                   src_offset.Int32Value());
-    StoreToOffset(kStoreDoubleword, scratch, SP, dest.Int32Value());
-  } else {
-    UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
-  }
-}
-
-void Mips64Assembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
-                           ManagedRegister mscratch, size_t size) {
-  GpuRegister scratch = mscratch.AsMips64().AsGpuRegister();
-  CHECK(size == 4 || size == 8) << size;
-  if (size == 4) {
-    LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
-    StoreToOffset(kStoreDoubleword, scratch, dest_base.AsMips64().AsGpuRegister(),
-                  dest_offset.Int32Value());
-  } else if (size == 8) {
-    LoadFromOffset(kLoadDoubleword, scratch, SP, src.Int32Value());
-    StoreToOffset(kStoreDoubleword, scratch, dest_base.AsMips64().AsGpuRegister(),
-                  dest_offset.Int32Value());
-  } else {
-    UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
-  }
-}
-
-void Mips64Assembler::Copy(FrameOffset dest ATTRIBUTE_UNUSED,
-                           FrameOffset src_base ATTRIBUTE_UNUSED,
-                           Offset src_offset ATTRIBUTE_UNUSED,
-                           ManagedRegister mscratch ATTRIBUTE_UNUSED,
-                           size_t size ATTRIBUTE_UNUSED) {
-  UNIMPLEMENTED(FATAL) << "No MIPS64 implementation";
-}
-
-void Mips64Assembler::Copy(ManagedRegister dest, Offset dest_offset,
-                           ManagedRegister src, Offset src_offset,
-                           ManagedRegister mscratch, size_t size) {
-  GpuRegister scratch = mscratch.AsMips64().AsGpuRegister();
-  CHECK(size == 4 || size == 8) << size;
-  if (size == 4) {
-    LoadFromOffset(kLoadWord, scratch, src.AsMips64().AsGpuRegister(), src_offset.Int32Value());
-    StoreToOffset(kStoreDoubleword, scratch, dest.AsMips64().AsGpuRegister(), dest_offset.Int32Value());
-  } else if (size == 8) {
-    LoadFromOffset(kLoadDoubleword, scratch, src.AsMips64().AsGpuRegister(),
-                   src_offset.Int32Value());
-    StoreToOffset(kStoreDoubleword, scratch, dest.AsMips64().AsGpuRegister(),
-                  dest_offset.Int32Value());
-  } else {
-    UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
-  }
-}
-
-void Mips64Assembler::Copy(FrameOffset dest ATTRIBUTE_UNUSED,
-                           Offset dest_offset ATTRIBUTE_UNUSED,
-                           FrameOffset src ATTRIBUTE_UNUSED,
-                           Offset src_offset ATTRIBUTE_UNUSED,
-                           ManagedRegister mscratch ATTRIBUTE_UNUSED,
-                           size_t size ATTRIBUTE_UNUSED) {
-  UNIMPLEMENTED(FATAL) << "No MIPS64 implementation";
-}
-
-void Mips64Assembler::MemoryBarrier(ManagedRegister mreg ATTRIBUTE_UNUSED) {
-  // TODO: sync?
-  UNIMPLEMENTED(FATAL) << "No MIPS64 implementation";
-}
-
-void Mips64Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
-                                             FrameOffset handle_scope_offset,
-                                             ManagedRegister min_reg,
-                                             bool null_allowed) {
-  Mips64ManagedRegister out_reg = mout_reg.AsMips64();
-  Mips64ManagedRegister in_reg = min_reg.AsMips64();
-  CHECK(in_reg.IsNoRegister() || in_reg.IsGpuRegister()) << in_reg;
-  CHECK(out_reg.IsGpuRegister()) << out_reg;
-  if (null_allowed) {
-    Mips64Label null_arg;
-    // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
-    // the address in the handle scope holding the reference.
-    // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
-    if (in_reg.IsNoRegister()) {
-      LoadFromOffset(kLoadUnsignedWord, out_reg.AsGpuRegister(),
-                     SP, handle_scope_offset.Int32Value());
-      in_reg = out_reg;
-    }
-    if (!out_reg.Equals(in_reg)) {
-      LoadConst32(out_reg.AsGpuRegister(), 0);
-    }
-    Beqzc(in_reg.AsGpuRegister(), &null_arg);
-    Daddiu64(out_reg.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
-    Bind(&null_arg);
-  } else {
-    Daddiu64(out_reg.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
-  }
-}
-
-void Mips64Assembler::CreateHandleScopeEntry(FrameOffset out_off,
-                                             FrameOffset handle_scope_offset,
-                                             ManagedRegister mscratch,
-                                             bool null_allowed) {
-  Mips64ManagedRegister scratch = mscratch.AsMips64();
-  CHECK(scratch.IsGpuRegister()) << scratch;
-  if (null_allowed) {
-    Mips64Label null_arg;
-    LoadFromOffset(kLoadUnsignedWord, scratch.AsGpuRegister(), SP,
-                   handle_scope_offset.Int32Value());
-    // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
-    // the address in the handle scope holding the reference.
-    // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
-    Beqzc(scratch.AsGpuRegister(), &null_arg);
-    Daddiu64(scratch.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
-    Bind(&null_arg);
-  } else {
-    Daddiu64(scratch.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
-  }
-  StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, out_off.Int32Value());
-}
-
-// Given a handle scope entry, load the associated reference.
-void Mips64Assembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
-                                                   ManagedRegister min_reg) {
-  Mips64ManagedRegister out_reg = mout_reg.AsMips64();
-  Mips64ManagedRegister in_reg = min_reg.AsMips64();
-  CHECK(out_reg.IsGpuRegister()) << out_reg;
-  CHECK(in_reg.IsGpuRegister()) << in_reg;
-  Mips64Label null_arg;
-  if (!out_reg.Equals(in_reg)) {
-    LoadConst32(out_reg.AsGpuRegister(), 0);
-  }
-  Beqzc(in_reg.AsGpuRegister(), &null_arg);
-  LoadFromOffset(kLoadDoubleword, out_reg.AsGpuRegister(),
-                 in_reg.AsGpuRegister(), 0);
-  Bind(&null_arg);
-}
-
-void Mips64Assembler::VerifyObject(ManagedRegister src ATTRIBUTE_UNUSED,
-                                   bool could_be_null ATTRIBUTE_UNUSED) {
-  // TODO: not validating references
-}
-
-void Mips64Assembler::VerifyObject(FrameOffset src ATTRIBUTE_UNUSED,
-                                   bool could_be_null ATTRIBUTE_UNUSED) {
-  // TODO: not validating references
-}
-
-void Mips64Assembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister mscratch) {
-  Mips64ManagedRegister base = mbase.AsMips64();
-  Mips64ManagedRegister scratch = mscratch.AsMips64();
-  CHECK(base.IsGpuRegister()) << base;
-  CHECK(scratch.IsGpuRegister()) << scratch;
-  LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
-                 base.AsGpuRegister(), offset.Int32Value());
-  Jalr(scratch.AsGpuRegister());
-  Nop();
-  // TODO: place reference map on call
-}
-
-void Mips64Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
-  Mips64ManagedRegister scratch = mscratch.AsMips64();
-  CHECK(scratch.IsGpuRegister()) << scratch;
-  // Call *(*(SP + base) + offset)
-  LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
-                 SP, base.Int32Value());
-  LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
-                 scratch.AsGpuRegister(), offset.Int32Value());
-  Jalr(scratch.AsGpuRegister());
-  Nop();
-  // TODO: place reference map on call
-}
-
-void Mips64Assembler::CallFromThread(ThreadOffset64 offset ATTRIBUTE_UNUSED,
-                                     ManagedRegister mscratch ATTRIBUTE_UNUSED) {
-  UNIMPLEMENTED(FATAL) << "No MIPS64 implementation";
-}
-
-void Mips64Assembler::GetCurrentThread(ManagedRegister tr) {
-  Move(tr.AsMips64().AsGpuRegister(), S1);
-}
-
-void Mips64Assembler::GetCurrentThread(FrameOffset offset,
-                                       ManagedRegister mscratch ATTRIBUTE_UNUSED) {
-  StoreToOffset(kStoreDoubleword, S1, SP, offset.Int32Value());
-}
-
-void Mips64Assembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
-  Mips64ManagedRegister scratch = mscratch.AsMips64();
-  exception_blocks_.emplace_back(scratch, stack_adjust);
-  LoadFromOffset(kLoadDoubleword,
-                 scratch.AsGpuRegister(),
-                 S1,
-                 Thread::ExceptionOffset<kMips64PointerSize>().Int32Value());
-  Bnezc(scratch.AsGpuRegister(), exception_blocks_.back().Entry());
-}
-
-void Mips64Assembler::EmitExceptionPoll(Mips64ExceptionSlowPath* exception) {
-  Bind(exception->Entry());
-  if (exception->stack_adjust_ != 0) {  // Fix up the frame.
-    DecreaseFrameSize(exception->stack_adjust_);
-  }
-  // Pass exception object as argument.
-  // Don't care about preserving A0 as this call won't return.
-  CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
-  Move(A0, exception->scratch_.AsGpuRegister());
-  // Set up call to Thread::Current()->pDeliverException
-  LoadFromOffset(kLoadDoubleword,
-                 T9,
-                 S1,
-                 QUICK_ENTRYPOINT_OFFSET(kMips64PointerSize, pDeliverException).Int32Value());
-  Jr(T9);
-  Nop();
-
-  // Call never returns
-  Break();
-}
-
-}  // namespace mips64
-}  // namespace art
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
deleted file mode 100644
index b331cee..0000000
--- a/compiler/utils/mips64/assembler_mips64.h
+++ /dev/null
@@ -1,1736 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_UTILS_MIPS64_ASSEMBLER_MIPS64_H_
-#define ART_COMPILER_UTILS_MIPS64_ASSEMBLER_MIPS64_H_
-
-#include <deque>
-#include <utility>
-#include <vector>
-
-#include "arch/mips64/instruction_set_features_mips64.h"
-#include "base/arena_containers.h"
-#include "base/enums.h"
-#include "base/globals.h"
-#include "base/macros.h"
-#include "base/stl_util_identity.h"
-#include "constants_mips64.h"
-#include "heap_poisoning.h"
-#include "managed_register_mips64.h"
-#include "offsets.h"
-#include "utils/assembler.h"
-#include "utils/jni_macro_assembler.h"
-#include "utils/label.h"
-
-namespace art {
-namespace mips64 {
-
-enum LoadConst64Path {
-  kLoadConst64PathZero           = 0x0,
-  kLoadConst64PathOri            = 0x1,
-  kLoadConst64PathDaddiu         = 0x2,
-  kLoadConst64PathLui            = 0x4,
-  kLoadConst64PathLuiOri         = 0x8,
-  kLoadConst64PathOriDahi        = 0x10,
-  kLoadConst64PathOriDati        = 0x20,
-  kLoadConst64PathLuiDahi        = 0x40,
-  kLoadConst64PathLuiDati        = 0x80,
-  kLoadConst64PathDaddiuDsrlX    = 0x100,
-  kLoadConst64PathOriDsllX       = 0x200,
-  kLoadConst64PathDaddiuDsllX    = 0x400,
-  kLoadConst64PathLuiOriDsllX    = 0x800,
-  kLoadConst64PathOriDsllXOri    = 0x1000,
-  kLoadConst64PathDaddiuDsllXOri = 0x2000,
-  kLoadConst64PathDaddiuDahi     = 0x4000,
-  kLoadConst64PathDaddiuDati     = 0x8000,
-  kLoadConst64PathDinsu1         = 0x10000,
-  kLoadConst64PathDinsu2         = 0x20000,
-  kLoadConst64PathCatchAll       = 0x40000,
-  kLoadConst64PathAllPaths       = 0x7ffff,
-};
-
-template <typename Asm>
-void TemplateLoadConst32(Asm* a, GpuRegister rd, int32_t value) {
-  if (IsUint<16>(value)) {
-    // Use OR with (unsigned) immediate to encode 16b unsigned int.
-    a->Ori(rd, ZERO, value);
-  } else if (IsInt<16>(value)) {
-    // Use ADD with (signed) immediate to encode 16b signed int.
-    a->Addiu(rd, ZERO, value);
-  } else {
-    // Set 16 most significant bits of value. The "lui" instruction
-    // also clears the 16 least significant bits to zero.
-    a->Lui(rd, value >> 16);
-    if (value & 0xFFFF) {
-      // If the 16 least significant bits are non-zero, set them
-      // here.
-      a->Ori(rd, rd, value);
-    }
-  }
-}
-
-static inline int InstrCountForLoadReplicatedConst32(int64_t value) {
-  int32_t x = Low32Bits(value);
-  int32_t y = High32Bits(value);
-
-  if (x == y) {
-    return (IsUint<16>(x) || IsInt<16>(x) || ((x & 0xFFFF) == 0)) ? 2 : 3;
-  }
-
-  return INT_MAX;
-}
-
-template <typename Asm, typename Rtype, typename Vtype>
-void TemplateLoadConst64(Asm* a, Rtype rd, Vtype value) {
-  int bit31 = (value & UINT64_C(0x80000000)) != 0;
-  int rep32_count = InstrCountForLoadReplicatedConst32(value);
-
-  // Loads with 1 instruction.
-  if (IsUint<16>(value)) {
-    // 64-bit value can be loaded as an unsigned 16-bit number.
-    a->RecordLoadConst64Path(kLoadConst64PathOri);
-    a->Ori(rd, ZERO, value);
-  } else if (IsInt<16>(value)) {
-    // 64-bit value can be loaded as an signed 16-bit number.
-    a->RecordLoadConst64Path(kLoadConst64PathDaddiu);
-    a->Daddiu(rd, ZERO, value);
-  } else if ((value & 0xFFFF) == 0 && IsInt<16>(value >> 16)) {
-    // 64-bit value can be loaded as an signed 32-bit number which has all
-    // of its 16 least significant bits set to zero.
-    a->RecordLoadConst64Path(kLoadConst64PathLui);
-    a->Lui(rd, value >> 16);
-  } else if (IsInt<32>(value)) {
-    // Loads with 2 instructions.
-    // 64-bit value can be loaded as an signed 32-bit number which has some
-    // or all of its 16 least significant bits set to one.
-    a->RecordLoadConst64Path(kLoadConst64PathLuiOri);
-    a->Lui(rd, value >> 16);
-    a->Ori(rd, rd, value);
-  } else if ((value & 0xFFFF0000) == 0 && IsInt<16>(value >> 32)) {
-    // 64-bit value which consists of an unsigned 16-bit value in its
-    // least significant 32-bits, and a signed 16-bit value in its
-    // most significant 32-bits.
-    a->RecordLoadConst64Path(kLoadConst64PathOriDahi);
-    a->Ori(rd, ZERO, value);
-    a->Dahi(rd, value >> 32);
-  } else if ((value & UINT64_C(0xFFFFFFFF0000)) == 0) {
-    // 64-bit value which consists of an unsigned 16-bit value in its
-    // least significant 48-bits, and a signed 16-bit value in its
-    // most significant 16-bits.
-    a->RecordLoadConst64Path(kLoadConst64PathOriDati);
-    a->Ori(rd, ZERO, value);
-    a->Dati(rd, value >> 48);
-  } else if ((value & 0xFFFF) == 0 &&
-             (-32768 - bit31) <= (value >> 32) && (value >> 32) <= (32767 - bit31)) {
-    // 16 LSBs (Least Significant Bits) all set to zero.
-    // 48 MSBs (Most Significant Bits) hold a signed 32-bit value.
-    a->RecordLoadConst64Path(kLoadConst64PathLuiDahi);
-    a->Lui(rd, value >> 16);
-    a->Dahi(rd, (value >> 32) + bit31);
-  } else if ((value & 0xFFFF) == 0 && ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF)) {
-    // 16 LSBs all set to zero.
-    // 48 MSBs hold a signed value which can't be represented by signed
-    // 32-bit number, and the middle 16 bits are all zero, or all one.
-    a->RecordLoadConst64Path(kLoadConst64PathLuiDati);
-    a->Lui(rd, value >> 16);
-    a->Dati(rd, (value >> 48) + bit31);
-  } else if (IsInt<16>(static_cast<int32_t>(value)) &&
-             (-32768 - bit31) <= (value >> 32) && (value >> 32) <= (32767 - bit31)) {
-    // 32 LSBs contain an unsigned 16-bit number.
-    // 32 MSBs contain a signed 16-bit number.
-    a->RecordLoadConst64Path(kLoadConst64PathDaddiuDahi);
-    a->Daddiu(rd, ZERO, value);
-    a->Dahi(rd, (value >> 32) + bit31);
-  } else if (IsInt<16>(static_cast<int32_t>(value)) &&
-             ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF)) {
-    // 48 LSBs contain an unsigned 16-bit number.
-    // 16 MSBs contain a signed 16-bit number.
-    a->RecordLoadConst64Path(kLoadConst64PathDaddiuDati);
-    a->Daddiu(rd, ZERO, value);
-    a->Dati(rd, (value >> 48) + bit31);
-  } else if (IsPowerOfTwo(value + UINT64_C(1))) {
-    // 64-bit values which have their "n" MSBs set to one, and their
-    // "64-n" LSBs set to zero. "n" must meet the restrictions 0 < n < 64.
-    int shift_cnt = 64 - CTZ(value + UINT64_C(1));
-    a->RecordLoadConst64Path(kLoadConst64PathDaddiuDsrlX);
-    a->Daddiu(rd, ZERO, -1);
-    if (shift_cnt < 32) {
-      a->Dsrl(rd, rd, shift_cnt);
-    } else {
-      a->Dsrl32(rd, rd, shift_cnt & 31);
-    }
-  } else {
-    int shift_cnt = CTZ(value);
-    int64_t tmp = value >> shift_cnt;
-    a->RecordLoadConst64Path(kLoadConst64PathOriDsllX);
-    if (IsUint<16>(tmp)) {
-      // Value can be computed by loading a 16-bit unsigned value, and
-      // then shifting left.
-      a->Ori(rd, ZERO, tmp);
-      if (shift_cnt < 32) {
-        a->Dsll(rd, rd, shift_cnt);
-      } else {
-        a->Dsll32(rd, rd, shift_cnt & 31);
-      }
-    } else if (IsInt<16>(tmp)) {
-      // Value can be computed by loading a 16-bit signed value, and
-      // then shifting left.
-      a->RecordLoadConst64Path(kLoadConst64PathDaddiuDsllX);
-      a->Daddiu(rd, ZERO, tmp);
-      if (shift_cnt < 32) {
-        a->Dsll(rd, rd, shift_cnt);
-      } else {
-        a->Dsll32(rd, rd, shift_cnt & 31);
-      }
-    } else if (rep32_count < 3) {
-      // Value being loaded has 32 LSBs equal to the 32 MSBs, and the
-      // value loaded into the 32 LSBs can be loaded with a single
-      // MIPS instruction.
-      a->LoadConst32(rd, value);
-      a->Dinsu(rd, rd, 32, 32);
-      a->RecordLoadConst64Path(kLoadConst64PathDinsu1);
-    } else if (IsInt<32>(tmp)) {
-      // Loads with 3 instructions.
-      // Value can be computed by loading a 32-bit signed value, and
-      // then shifting left.
-      a->RecordLoadConst64Path(kLoadConst64PathLuiOriDsllX);
-      a->Lui(rd, tmp >> 16);
-      a->Ori(rd, rd, tmp);
-      if (shift_cnt < 32) {
-        a->Dsll(rd, rd, shift_cnt);
-      } else {
-        a->Dsll32(rd, rd, shift_cnt & 31);
-      }
-    } else {
-      shift_cnt = 16 + CTZ(value >> 16);
-      tmp = value >> shift_cnt;
-      if (IsUint<16>(tmp)) {
-        // Value can be computed by loading a 16-bit unsigned value,
-        // shifting left, and "or"ing in another 16-bit unsigned value.
-        a->RecordLoadConst64Path(kLoadConst64PathOriDsllXOri);
-        a->Ori(rd, ZERO, tmp);
-        if (shift_cnt < 32) {
-          a->Dsll(rd, rd, shift_cnt);
-        } else {
-          a->Dsll32(rd, rd, shift_cnt & 31);
-        }
-        a->Ori(rd, rd, value);
-      } else if (IsInt<16>(tmp)) {
-        // Value can be computed by loading a 16-bit signed value,
-        // shifting left, and "or"ing in a 16-bit unsigned value.
-        a->RecordLoadConst64Path(kLoadConst64PathDaddiuDsllXOri);
-        a->Daddiu(rd, ZERO, tmp);
-        if (shift_cnt < 32) {
-          a->Dsll(rd, rd, shift_cnt);
-        } else {
-          a->Dsll32(rd, rd, shift_cnt & 31);
-        }
-        a->Ori(rd, rd, value);
-      } else if (rep32_count < 4) {
-        // Value being loaded has 32 LSBs equal to the 32 MSBs, and the
-        // value in the 32 LSBs requires 2 MIPS instructions to load.
-        a->LoadConst32(rd, value);
-        a->Dinsu(rd, rd, 32, 32);
-        a->RecordLoadConst64Path(kLoadConst64PathDinsu2);
-      } else {
-        // Loads with 3-4 instructions.
-        // Catch-all case to get any other 64-bit values which aren't
-        // handled by special cases above.
-        uint64_t tmp2 = value;
-        a->RecordLoadConst64Path(kLoadConst64PathCatchAll);
-        a->LoadConst32(rd, value);
-        if (bit31) {
-          tmp2 += UINT64_C(0x100000000);
-        }
-        if (((tmp2 >> 32) & 0xFFFF) != 0) {
-          a->Dahi(rd, tmp2 >> 32);
-        }
-        if (tmp2 & UINT64_C(0x800000000000)) {
-          tmp2 += UINT64_C(0x1000000000000);
-        }
-        if ((tmp2 >> 48) != 0) {
-          a->Dati(rd, tmp2 >> 48);
-        }
-      }
-    }
-  }
-}
-
-static constexpr size_t kMips64HalfwordSize = 2;
-static constexpr size_t kMips64WordSize = 4;
-static constexpr size_t kMips64DoublewordSize = 8;
-
-enum LoadOperandType {
-  kLoadSignedByte,
-  kLoadUnsignedByte,
-  kLoadSignedHalfword,
-  kLoadUnsignedHalfword,
-  kLoadWord,
-  kLoadUnsignedWord,
-  kLoadDoubleword,
-  kLoadQuadword
-};
-
-enum StoreOperandType {
-  kStoreByte,
-  kStoreHalfword,
-  kStoreWord,
-  kStoreDoubleword,
-  kStoreQuadword
-};
-
-// Used to test the values returned by ClassS/ClassD.
-enum FPClassMaskType {
-  kSignalingNaN      = 0x001,
-  kQuietNaN          = 0x002,
-  kNegativeInfinity  = 0x004,
-  kNegativeNormal    = 0x008,
-  kNegativeSubnormal = 0x010,
-  kNegativeZero      = 0x020,
-  kPositiveInfinity  = 0x040,
-  kPositiveNormal    = 0x080,
-  kPositiveSubnormal = 0x100,
-  kPositiveZero      = 0x200,
-};
-
-class Mips64Label : public Label {
- public:
-  Mips64Label() : prev_branch_id_plus_one_(0) {}
-
-  Mips64Label(Mips64Label&& src)
-      : Label(std::move(src)), prev_branch_id_plus_one_(src.prev_branch_id_plus_one_) {}
-
- private:
-  uint32_t prev_branch_id_plus_one_;  // To get distance from preceding branch, if any.
-
-  friend class Mips64Assembler;
-  DISALLOW_COPY_AND_ASSIGN(Mips64Label);
-};
-
-// Assembler literal is a value embedded in code, retrieved using a PC-relative load.
-class Literal {
- public:
-  static constexpr size_t kMaxSize = 8;
-
-  Literal(uint32_t size, const uint8_t* data)
-      : label_(), size_(size) {
-    DCHECK_LE(size, Literal::kMaxSize);
-    memcpy(data_, data, size);
-  }
-
-  template <typename T>
-  T GetValue() const {
-    DCHECK_EQ(size_, sizeof(T));
-    T value;
-    memcpy(&value, data_, sizeof(T));
-    return value;
-  }
-
-  uint32_t GetSize() const {
-    return size_;
-  }
-
-  const uint8_t* GetData() const {
-    return data_;
-  }
-
-  Mips64Label* GetLabel() {
-    return &label_;
-  }
-
-  const Mips64Label* GetLabel() const {
-    return &label_;
-  }
-
- private:
-  Mips64Label label_;
-  const uint32_t size_;
-  uint8_t data_[kMaxSize];
-
-  DISALLOW_COPY_AND_ASSIGN(Literal);
-};
-
-// Jump table: table of labels emitted after the code and before the literals. Similar to literals.
-class JumpTable {
- public:
-  explicit JumpTable(std::vector<Mips64Label*>&& labels)
-      : label_(), labels_(std::move(labels)) {
-  }
-
-  size_t GetSize() const {
-    return labels_.size() * sizeof(uint32_t);
-  }
-
-  const std::vector<Mips64Label*>& GetData() const {
-    return labels_;
-  }
-
-  Mips64Label* GetLabel() {
-    return &label_;
-  }
-
-  const Mips64Label* GetLabel() const {
-    return &label_;
-  }
-
- private:
-  Mips64Label label_;
-  std::vector<Mips64Label*> labels_;
-
-  DISALLOW_COPY_AND_ASSIGN(JumpTable);
-};
-
-// Slowpath entered when Thread::Current()->_exception is non-null.
-class Mips64ExceptionSlowPath {
- public:
-  explicit Mips64ExceptionSlowPath(Mips64ManagedRegister scratch, size_t stack_adjust)
-      : scratch_(scratch), stack_adjust_(stack_adjust) {}
-
-  Mips64ExceptionSlowPath(Mips64ExceptionSlowPath&& src)
-      : scratch_(src.scratch_),
-        stack_adjust_(src.stack_adjust_),
-        exception_entry_(std::move(src.exception_entry_)) {}
-
- private:
-  Mips64Label* Entry() { return &exception_entry_; }
-  const Mips64ManagedRegister scratch_;
-  const size_t stack_adjust_;
-  Mips64Label exception_entry_;
-
-  friend class Mips64Assembler;
-  DISALLOW_COPY_AND_ASSIGN(Mips64ExceptionSlowPath);
-};
-
-class Mips64Assembler final : public Assembler, public JNIMacroAssembler<PointerSize::k64> {
- public:
-  using JNIBase = JNIMacroAssembler<PointerSize::k64>;
-
-  explicit Mips64Assembler(ArenaAllocator* allocator,
-                           const Mips64InstructionSetFeatures* instruction_set_features = nullptr)
-      : Assembler(allocator),
-        overwriting_(false),
-        overwrite_location_(0),
-        literals_(allocator->Adapter(kArenaAllocAssembler)),
-        long_literals_(allocator->Adapter(kArenaAllocAssembler)),
-        jump_tables_(allocator->Adapter(kArenaAllocAssembler)),
-        last_position_adjustment_(0),
-        last_old_position_(0),
-        last_branch_id_(0),
-        has_msa_(instruction_set_features != nullptr ? instruction_set_features->HasMsa() : false) {
-    cfi().DelayEmittingAdvancePCs();
-  }
-
-  virtual ~Mips64Assembler() {
-    for (auto& branch : branches_) {
-      CHECK(branch.IsResolved());
-    }
-  }
-
-  size_t CodeSize() const override { return Assembler::CodeSize(); }
-  DebugFrameOpCodeWriterForAssembler& cfi() override { return Assembler::cfi(); }
-
-  // Emit Machine Instructions.
-  void Addu(GpuRegister rd, GpuRegister rs, GpuRegister rt);
-  void Addiu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
-  void Daddu(GpuRegister rd, GpuRegister rs, GpuRegister rt);  // MIPS64
-  void Daddiu(GpuRegister rt, GpuRegister rs, uint16_t imm16);  // MIPS64
-  void Subu(GpuRegister rd, GpuRegister rs, GpuRegister rt);
-  void Dsubu(GpuRegister rd, GpuRegister rs, GpuRegister rt);  // MIPS64
-
-  void MulR6(GpuRegister rd, GpuRegister rs, GpuRegister rt);
-  void MuhR6(GpuRegister rd, GpuRegister rs, GpuRegister rt);
-  void DivR6(GpuRegister rd, GpuRegister rs, GpuRegister rt);
-  void ModR6(GpuRegister rd, GpuRegister rs, GpuRegister rt);
-  void DivuR6(GpuRegister rd, GpuRegister rs, GpuRegister rt);
-  void ModuR6(GpuRegister rd, GpuRegister rs, GpuRegister rt);
-  void Dmul(GpuRegister rd, GpuRegister rs, GpuRegister rt);  // MIPS64
-  void Dmuh(GpuRegister rd, GpuRegister rs, GpuRegister rt);  // MIPS64
-  void Ddiv(GpuRegister rd, GpuRegister rs, GpuRegister rt);  // MIPS64
-  void Dmod(GpuRegister rd, GpuRegister rs, GpuRegister rt);  // MIPS64
-  void Ddivu(GpuRegister rd, GpuRegister rs, GpuRegister rt);  // MIPS64
-  void Dmodu(GpuRegister rd, GpuRegister rs, GpuRegister rt);  // MIPS64
-
-  void And(GpuRegister rd, GpuRegister rs, GpuRegister rt);
-  void Andi(GpuRegister rt, GpuRegister rs, uint16_t imm16);
-  void Or(GpuRegister rd, GpuRegister rs, GpuRegister rt);
-  void Ori(GpuRegister rt, GpuRegister rs, uint16_t imm16);
-  void Xor(GpuRegister rd, GpuRegister rs, GpuRegister rt);
-  void Xori(GpuRegister rt, GpuRegister rs, uint16_t imm16);
-  void Nor(GpuRegister rd, GpuRegister rs, GpuRegister rt);
-
-  void Bitswap(GpuRegister rd, GpuRegister rt);
-  void Dbitswap(GpuRegister rd, GpuRegister rt);  // MIPS64
-  void Seb(GpuRegister rd, GpuRegister rt);
-  void Seh(GpuRegister rd, GpuRegister rt);
-  void Dsbh(GpuRegister rd, GpuRegister rt);  // MIPS64
-  void Dshd(GpuRegister rd, GpuRegister rt);  // MIPS64
-  void Dext(GpuRegister rs, GpuRegister rt, int pos, int size);  // MIPS64
-  void Ins(GpuRegister rt, GpuRegister rs, int pos, int size);
-  void Dins(GpuRegister rt, GpuRegister rs, int pos, int size);  // MIPS64
-  void Dinsm(GpuRegister rt, GpuRegister rs, int pos, int size);  // MIPS64
-  void Dinsu(GpuRegister rt, GpuRegister rs, int pos, int size);  // MIPS64
-  void DblIns(GpuRegister rt, GpuRegister rs, int pos, int size);  // MIPS64
-  void Lsa(GpuRegister rd, GpuRegister rs, GpuRegister rt, int saPlusOne);
-  void Dlsa(GpuRegister rd, GpuRegister rs, GpuRegister rt, int saPlusOne);  // MIPS64
-  void Wsbh(GpuRegister rd, GpuRegister rt);
-  void Sc(GpuRegister rt, GpuRegister base, int16_t imm9 = 0);
-  void Scd(GpuRegister rt, GpuRegister base, int16_t imm9 = 0);  // MIPS64
-  void Ll(GpuRegister rt, GpuRegister base, int16_t imm9 = 0);
-  void Lld(GpuRegister rt, GpuRegister base, int16_t imm9 = 0);  // MIPS64
-
-  void Sll(GpuRegister rd, GpuRegister rt, int shamt);
-  void Srl(GpuRegister rd, GpuRegister rt, int shamt);
-  void Rotr(GpuRegister rd, GpuRegister rt, int shamt);
-  void Sra(GpuRegister rd, GpuRegister rt, int shamt);
-  void Sllv(GpuRegister rd, GpuRegister rt, GpuRegister rs);
-  void Srlv(GpuRegister rd, GpuRegister rt, GpuRegister rs);
-  void Rotrv(GpuRegister rd, GpuRegister rt, GpuRegister rs);
-  void Srav(GpuRegister rd, GpuRegister rt, GpuRegister rs);
-  void Dsll(GpuRegister rd, GpuRegister rt, int shamt);  // MIPS64
-  void Dsrl(GpuRegister rd, GpuRegister rt, int shamt);  // MIPS64
-  void Drotr(GpuRegister rd, GpuRegister rt, int shamt);  // MIPS64
-  void Dsra(GpuRegister rd, GpuRegister rt, int shamt);  // MIPS64
-  void Dsll32(GpuRegister rd, GpuRegister rt, int shamt);  // MIPS64
-  void Dsrl32(GpuRegister rd, GpuRegister rt, int shamt);  // MIPS64
-  void Drotr32(GpuRegister rd, GpuRegister rt, int shamt);  // MIPS64
-  void Dsra32(GpuRegister rd, GpuRegister rt, int shamt);  // MIPS64
-  void Dsllv(GpuRegister rd, GpuRegister rt, GpuRegister rs);  // MIPS64
-  void Dsrlv(GpuRegister rd, GpuRegister rt, GpuRegister rs);  // MIPS64
-  void Drotrv(GpuRegister rd, GpuRegister rt, GpuRegister rs);  // MIPS64
-  void Dsrav(GpuRegister rd, GpuRegister rt, GpuRegister rs);  // MIPS64
-
-  void Lb(GpuRegister rt, GpuRegister rs, uint16_t imm16);
-  void Lh(GpuRegister rt, GpuRegister rs, uint16_t imm16);
-  void Lw(GpuRegister rt, GpuRegister rs, uint16_t imm16);
-  void Ld(GpuRegister rt, GpuRegister rs, uint16_t imm16);  // MIPS64
-  void Lbu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
-  void Lhu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
-  void Lwu(GpuRegister rt, GpuRegister rs, uint16_t imm16);  // MIPS64
-  void Lwpc(GpuRegister rs, uint32_t imm19);
-  void Lwupc(GpuRegister rs, uint32_t imm19);  // MIPS64
-  void Ldpc(GpuRegister rs, uint32_t imm18);  // MIPS64
-  void Lui(GpuRegister rt, uint16_t imm16);
-  void Aui(GpuRegister rt, GpuRegister rs, uint16_t imm16);
-  void Daui(GpuRegister rt, GpuRegister rs, uint16_t imm16);  // MIPS64
-  void Dahi(GpuRegister rs, uint16_t imm16);  // MIPS64
-  void Dati(GpuRegister rs, uint16_t imm16);  // MIPS64
-  void Sync(uint32_t stype);
-
-  void Sb(GpuRegister rt, GpuRegister rs, uint16_t imm16);
-  void Sh(GpuRegister rt, GpuRegister rs, uint16_t imm16);
-  void Sw(GpuRegister rt, GpuRegister rs, uint16_t imm16);
-  void Sd(GpuRegister rt, GpuRegister rs, uint16_t imm16);  // MIPS64
-
-  void Slt(GpuRegister rd, GpuRegister rs, GpuRegister rt);
-  void Sltu(GpuRegister rd, GpuRegister rs, GpuRegister rt);
-  void Slti(GpuRegister rt, GpuRegister rs, uint16_t imm16);
-  void Sltiu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
-  void Seleqz(GpuRegister rd, GpuRegister rs, GpuRegister rt);
-  void Selnez(GpuRegister rd, GpuRegister rs, GpuRegister rt);
-  void Clz(GpuRegister rd, GpuRegister rs);
-  void Clo(GpuRegister rd, GpuRegister rs);
-  void Dclz(GpuRegister rd, GpuRegister rs);  // MIPS64
-  void Dclo(GpuRegister rd, GpuRegister rs);  // MIPS64
-
-  void Jalr(GpuRegister rd, GpuRegister rs);
-  void Jalr(GpuRegister rs);
-  void Jr(GpuRegister rs);
-  void Auipc(GpuRegister rs, uint16_t imm16);
-  void Addiupc(GpuRegister rs, uint32_t imm19);
-  void Bc(uint32_t imm26);
-  void Balc(uint32_t imm26);
-  void Jic(GpuRegister rt, uint16_t imm16);
-  void Jialc(GpuRegister rt, uint16_t imm16);
-  void Bltc(GpuRegister rs, GpuRegister rt, uint16_t imm16);
-  void Bltzc(GpuRegister rt, uint16_t imm16);
-  void Bgtzc(GpuRegister rt, uint16_t imm16);
-  void Bgec(GpuRegister rs, GpuRegister rt, uint16_t imm16);
-  void Bgezc(GpuRegister rt, uint16_t imm16);
-  void Blezc(GpuRegister rt, uint16_t imm16);
-  void Bltuc(GpuRegister rs, GpuRegister rt, uint16_t imm16);
-  void Bgeuc(GpuRegister rs, GpuRegister rt, uint16_t imm16);
-  void Beqc(GpuRegister rs, GpuRegister rt, uint16_t imm16);
-  void Bnec(GpuRegister rs, GpuRegister rt, uint16_t imm16);
-  void Beqzc(GpuRegister rs, uint32_t imm21);
-  void Bnezc(GpuRegister rs, uint32_t imm21);
-  void Bc1eqz(FpuRegister ft, uint16_t imm16);
-  void Bc1nez(FpuRegister ft, uint16_t imm16);
-  void Beq(GpuRegister rs, GpuRegister rt, uint16_t imm16);  // R2
-  void Bne(GpuRegister rs, GpuRegister rt, uint16_t imm16);  // R2
-  void Beqz(GpuRegister rt, uint16_t imm16);  // R2
-  void Bnez(GpuRegister rt, uint16_t imm16);  // R2
-  void Bltz(GpuRegister rt, uint16_t imm16);  // R2
-  void Bgez(GpuRegister rt, uint16_t imm16);  // R2
-  void Blez(GpuRegister rt, uint16_t imm16);  // R2
-  void Bgtz(GpuRegister rt, uint16_t imm16);  // R2
-
-  void AddS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void SubS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void MulS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void DivS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void AddD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void SubD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void MulD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void DivD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void SqrtS(FpuRegister fd, FpuRegister fs);
-  void SqrtD(FpuRegister fd, FpuRegister fs);
-  void AbsS(FpuRegister fd, FpuRegister fs);
-  void AbsD(FpuRegister fd, FpuRegister fs);
-  void MovS(FpuRegister fd, FpuRegister fs);
-  void MovD(FpuRegister fd, FpuRegister fs);
-  void NegS(FpuRegister fd, FpuRegister fs);
-  void NegD(FpuRegister fd, FpuRegister fs);
-  void RoundLS(FpuRegister fd, FpuRegister fs);
-  void RoundLD(FpuRegister fd, FpuRegister fs);
-  void RoundWS(FpuRegister fd, FpuRegister fs);
-  void RoundWD(FpuRegister fd, FpuRegister fs);
-  void TruncLS(FpuRegister fd, FpuRegister fs);
-  void TruncLD(FpuRegister fd, FpuRegister fs);
-  void TruncWS(FpuRegister fd, FpuRegister fs);
-  void TruncWD(FpuRegister fd, FpuRegister fs);
-  void CeilLS(FpuRegister fd, FpuRegister fs);
-  void CeilLD(FpuRegister fd, FpuRegister fs);
-  void CeilWS(FpuRegister fd, FpuRegister fs);
-  void CeilWD(FpuRegister fd, FpuRegister fs);
-  void FloorLS(FpuRegister fd, FpuRegister fs);
-  void FloorLD(FpuRegister fd, FpuRegister fs);
-  void FloorWS(FpuRegister fd, FpuRegister fs);
-  void FloorWD(FpuRegister fd, FpuRegister fs);
-  void SelS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void SelD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void SeleqzS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void SeleqzD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void SelnezS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void SelnezD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void RintS(FpuRegister fd, FpuRegister fs);
-  void RintD(FpuRegister fd, FpuRegister fs);
-  void ClassS(FpuRegister fd, FpuRegister fs);
-  void ClassD(FpuRegister fd, FpuRegister fs);
-  void MinS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void MinD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void MaxS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void MaxD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void CmpUnS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void CmpEqS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void CmpUeqS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void CmpLtS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void CmpUltS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void CmpLeS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void CmpUleS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void CmpOrS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void CmpUneS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void CmpNeS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void CmpUnD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void CmpEqD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void CmpUeqD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void CmpLtD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void CmpUltD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void CmpLeD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void CmpUleD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void CmpOrD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void CmpUneD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-  void CmpNeD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
-
-  void Cvtsw(FpuRegister fd, FpuRegister fs);
-  void Cvtdw(FpuRegister fd, FpuRegister fs);
-  void Cvtsd(FpuRegister fd, FpuRegister fs);
-  void Cvtds(FpuRegister fd, FpuRegister fs);
-  void Cvtsl(FpuRegister fd, FpuRegister fs);
-  void Cvtdl(FpuRegister fd, FpuRegister fs);
-
-  void Mfc1(GpuRegister rt, FpuRegister fs);
-  void Mfhc1(GpuRegister rt, FpuRegister fs);
-  void Mtc1(GpuRegister rt, FpuRegister fs);
-  void Mthc1(GpuRegister rt, FpuRegister fs);
-  void Dmfc1(GpuRegister rt, FpuRegister fs);  // MIPS64
-  void Dmtc1(GpuRegister rt, FpuRegister fs);  // MIPS64
-  void Lwc1(FpuRegister ft, GpuRegister rs, uint16_t imm16);
-  void Ldc1(FpuRegister ft, GpuRegister rs, uint16_t imm16);
-  void Swc1(FpuRegister ft, GpuRegister rs, uint16_t imm16);
-  void Sdc1(FpuRegister ft, GpuRegister rs, uint16_t imm16);
-
-  void Break();
-  void Nop();
-  void Move(GpuRegister rd, GpuRegister rs);
-  void Clear(GpuRegister rd);
-  void Not(GpuRegister rd, GpuRegister rs);
-
-  // MSA instructions.
-  void AndV(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void OrV(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void NorV(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void XorV(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-
-  void AddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void AddvH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void AddvW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void AddvD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SubvB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SubvH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SubvW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SubvD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Asub_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Asub_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Asub_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Asub_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Asub_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Asub_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Asub_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Asub_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void MulvB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void MulvH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void MulvW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void MulvD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Div_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Div_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Div_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Div_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Div_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Div_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Div_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Div_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Mod_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Mod_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Mod_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Mod_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Mod_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Mod_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Mod_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Mod_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Add_aB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Add_aH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Add_aW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Add_aD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Ave_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Ave_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Ave_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Ave_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Ave_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Ave_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Ave_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Ave_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Aver_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Aver_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Aver_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Aver_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Aver_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Aver_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Aver_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Aver_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Max_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Max_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Max_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Max_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Max_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Max_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Max_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Max_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Min_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Min_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Min_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Min_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Min_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Min_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Min_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Min_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-
-  void FaddW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FaddD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FsubW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FsubD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FmulW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FmulD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FdivW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FdivD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FmaxW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FmaxD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FminW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FminD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-
-  void Ffint_sW(VectorRegister wd, VectorRegister ws);
-  void Ffint_sD(VectorRegister wd, VectorRegister ws);
-  void Ftint_sW(VectorRegister wd, VectorRegister ws);
-  void Ftint_sD(VectorRegister wd, VectorRegister ws);
-
-  void SllB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SllH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SllW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SllD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SraB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SraH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SraW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SraD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SrlB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SrlH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SrlW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void SrlD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-
-  // Immediate shift instructions, where shamtN denotes shift amount (must be between 0 and 2^N-1).
-  void SlliB(VectorRegister wd, VectorRegister ws, int shamt3);
-  void SlliH(VectorRegister wd, VectorRegister ws, int shamt4);
-  void SlliW(VectorRegister wd, VectorRegister ws, int shamt5);
-  void SlliD(VectorRegister wd, VectorRegister ws, int shamt6);
-  void SraiB(VectorRegister wd, VectorRegister ws, int shamt3);
-  void SraiH(VectorRegister wd, VectorRegister ws, int shamt4);
-  void SraiW(VectorRegister wd, VectorRegister ws, int shamt5);
-  void SraiD(VectorRegister wd, VectorRegister ws, int shamt6);
-  void SrliB(VectorRegister wd, VectorRegister ws, int shamt3);
-  void SrliH(VectorRegister wd, VectorRegister ws, int shamt4);
-  void SrliW(VectorRegister wd, VectorRegister ws, int shamt5);
-  void SrliD(VectorRegister wd, VectorRegister ws, int shamt6);
-
-  void MoveV(VectorRegister wd, VectorRegister ws);
-  void SplatiB(VectorRegister wd, VectorRegister ws, int n4);
-  void SplatiH(VectorRegister wd, VectorRegister ws, int n3);
-  void SplatiW(VectorRegister wd, VectorRegister ws, int n2);
-  void SplatiD(VectorRegister wd, VectorRegister ws, int n1);
-  void Copy_sB(GpuRegister rd, VectorRegister ws, int n4);
-  void Copy_sH(GpuRegister rd, VectorRegister ws, int n3);
-  void Copy_sW(GpuRegister rd, VectorRegister ws, int n2);
-  void Copy_sD(GpuRegister rd, VectorRegister ws, int n1);
-  void Copy_uB(GpuRegister rd, VectorRegister ws, int n4);
-  void Copy_uH(GpuRegister rd, VectorRegister ws, int n3);
-  void Copy_uW(GpuRegister rd, VectorRegister ws, int n2);
-  void InsertB(VectorRegister wd, GpuRegister rs, int n4);
-  void InsertH(VectorRegister wd, GpuRegister rs, int n3);
-  void InsertW(VectorRegister wd, GpuRegister rs, int n2);
-  void InsertD(VectorRegister wd, GpuRegister rs, int n1);
-  void FillB(VectorRegister wd, GpuRegister rs);
-  void FillH(VectorRegister wd, GpuRegister rs);
-  void FillW(VectorRegister wd, GpuRegister rs);
-  void FillD(VectorRegister wd, GpuRegister rs);
-
-  void LdiB(VectorRegister wd, int imm8);
-  void LdiH(VectorRegister wd, int imm10);
-  void LdiW(VectorRegister wd, int imm10);
-  void LdiD(VectorRegister wd, int imm10);
-  void LdB(VectorRegister wd, GpuRegister rs, int offset);
-  void LdH(VectorRegister wd, GpuRegister rs, int offset);
-  void LdW(VectorRegister wd, GpuRegister rs, int offset);
-  void LdD(VectorRegister wd, GpuRegister rs, int offset);
-  void StB(VectorRegister wd, GpuRegister rs, int offset);
-  void StH(VectorRegister wd, GpuRegister rs, int offset);
-  void StW(VectorRegister wd, GpuRegister rs, int offset);
-  void StD(VectorRegister wd, GpuRegister rs, int offset);
-
-  void IlvlB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvlH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvlW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvlD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvrB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvrH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvrW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvrD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvevB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvevH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvevW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvevD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvodB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvodH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvodW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void IlvodD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-
-  void MaddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void MaddvH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void MaddvW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void MaddvD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void MsubvB(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void MsubvH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void MsubvW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void MsubvD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FmaddW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FmaddD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FmsubW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void FmsubD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-
-  void Hadd_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Hadd_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Hadd_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Hadd_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Hadd_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-  void Hadd_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt);
-
-  void PcntB(VectorRegister wd, VectorRegister ws);
-  void PcntH(VectorRegister wd, VectorRegister ws);
-  void PcntW(VectorRegister wd, VectorRegister ws);
-  void PcntD(VectorRegister wd, VectorRegister ws);
-
-  // Helper for replicating floating point value in all destination elements.
-  void ReplicateFPToVectorRegister(VectorRegister dst, FpuRegister src, bool is_double);
-
-  // Higher level composite instructions.
-  int InstrCountForLoadReplicatedConst32(int64_t);
-  void LoadConst32(GpuRegister rd, int32_t value);
-  void LoadConst64(GpuRegister rd, int64_t value);  // MIPS64
-
-  // This function is only used for testing purposes.
-  void RecordLoadConst64Path(int value);
-
-  void Addiu32(GpuRegister rt, GpuRegister rs, int32_t value);
-  void Daddiu64(GpuRegister rt, GpuRegister rs, int64_t value, GpuRegister rtmp = AT);  // MIPS64
-
-  //
-  // Heap poisoning.
-  //
-
-  // Poison a heap reference contained in `src` and store it in `dst`.
-  void PoisonHeapReference(GpuRegister dst, GpuRegister src) {
-    // dst = -src.
-    // Negate the 32-bit ref.
-    Dsubu(dst, ZERO, src);
-    // And constrain it to 32 bits (zero-extend into bits 32 through 63) as on Arm64 and x86/64.
-    Dext(dst, dst, 0, 32);
-  }
-  // Poison a heap reference contained in `reg`.
-  void PoisonHeapReference(GpuRegister reg) {
-    // reg = -reg.
-    PoisonHeapReference(reg, reg);
-  }
-  // Unpoison a heap reference contained in `reg`.
-  void UnpoisonHeapReference(GpuRegister reg) {
-    // reg = -reg.
-    // Negate the 32-bit ref.
-    Dsubu(reg, ZERO, reg);
-    // And constrain it to 32 bits (zero-extend into bits 32 through 63) as on Arm64 and x86/64.
-    Dext(reg, reg, 0, 32);
-  }
-  // Poison a heap reference contained in `reg` if heap poisoning is enabled.
-  void MaybePoisonHeapReference(GpuRegister reg) {
-    if (kPoisonHeapReferences) {
-      PoisonHeapReference(reg);
-    }
-  }
-  // Unpoison a heap reference contained in `reg` if heap poisoning is enabled.
-  void MaybeUnpoisonHeapReference(GpuRegister reg) {
-    if (kPoisonHeapReferences) {
-      UnpoisonHeapReference(reg);
-    }
-  }
-
-  void Bind(Label* label) override {
-    Bind(down_cast<Mips64Label*>(label));
-  }
-  void Jump(Label* label ATTRIBUTE_UNUSED) override {
-    UNIMPLEMENTED(FATAL) << "Do not use Jump for MIPS64";
-  }
-
-  void Bind(Mips64Label* label);
-
-  // Don't warn about a different virtual Bind/Jump in the base class.
-  using JNIBase::Bind;
-  using JNIBase::Jump;
-
-  // Create a new label that can be used with Jump/Bind calls.
-  std::unique_ptr<JNIMacroLabel> CreateLabel() override {
-    LOG(FATAL) << "Not implemented on MIPS64";
-    UNREACHABLE();
-  }
-  // Emit an unconditional jump to the label.
-  void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED) override {
-    LOG(FATAL) << "Not implemented on MIPS64";
-    UNREACHABLE();
-  }
-  // Emit a conditional jump to the label by applying a unary condition test to the register.
-  void Jump(JNIMacroLabel* label ATTRIBUTE_UNUSED,
-            JNIMacroUnaryCondition cond ATTRIBUTE_UNUSED,
-            ManagedRegister test ATTRIBUTE_UNUSED) override {
-    LOG(FATAL) << "Not implemented on MIPS64";
-    UNREACHABLE();
-  }
-
-  // Code at this offset will serve as the target for the Jump call.
-  void Bind(JNIMacroLabel* label ATTRIBUTE_UNUSED) override {
-    LOG(FATAL) << "Not implemented on MIPS64";
-    UNREACHABLE();
-  }
-
-  // Create a new literal with a given value.
-  // NOTE: Force the template parameter to be explicitly specified.
-  template <typename T>
-  Literal* NewLiteral(typename Identity<T>::type value) {
-    static_assert(std::is_integral<T>::value, "T must be an integral type.");
-    return NewLiteral(sizeof(value), reinterpret_cast<const uint8_t*>(&value));
-  }
-
-  // Load label address using PC-relative loads. To be used with data labels in the literal /
-  // jump table area only and not with regular code labels.
-  void LoadLabelAddress(GpuRegister dest_reg, Mips64Label* label);
-
-  // Create a new literal with the given data.
-  Literal* NewLiteral(size_t size, const uint8_t* data);
-
-  // Load literal using PC-relative loads.
-  void LoadLiteral(GpuRegister dest_reg, LoadOperandType load_type, Literal* literal);
-
-  // Create a jump table for the given labels that will be emitted when finalizing.
-  // When the table is emitted, offsets will be relative to the location of the table.
-  // The table location is determined by the location of its label (the label precedes
-  // the table data) and should be loaded using LoadLabelAddress().
-  JumpTable* CreateJumpTable(std::vector<Mips64Label*>&& labels);
-
-  // When `is_bare` is false, the branches will promote to long (if the range
-  // of the individual branch instruction is insufficient) and the delay/
-  // forbidden slots will be taken care of.
-  // Use `is_bare = false` when the branch target may be out of reach of the
-  // individual branch instruction. IOW, this is for general purpose use.
-  //
-  // When `is_bare` is true, just the branch instructions will be generated
-  // leaving delay/forbidden slot filling up to the caller and the branches
-  // won't promote to long if the range is insufficient (you'll get a
-  // compilation error when the range is exceeded).
-  // Use `is_bare = true` when the branch target is known to be within reach
-  // of the individual branch instruction. This is intended for small local
-  // optimizations around delay/forbidden slots.
-  // Also prefer using `is_bare = true` if the code near the branch is to be
-  // patched or analyzed at run time (e.g. introspection) to
-  // - show the intent and
-  // - fail during compilation rather than during patching/execution if the
-  //   bare branch range is insufficent but the code size and layout are
-  //   expected to remain unchanged
-  //
-  // R6 compact branches without delay/forbidden slots.
-  void Bc(Mips64Label* label, bool is_bare = false);
-  void Balc(Mips64Label* label, bool is_bare = false);
-  // R6 compact branches with forbidden slots.
-  void Bltc(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare = false);
-  void Bltzc(GpuRegister rt, Mips64Label* label, bool is_bare = false);
-  void Bgtzc(GpuRegister rt, Mips64Label* label, bool is_bare = false);
-  void Bgec(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare = false);
-  void Bgezc(GpuRegister rt, Mips64Label* label, bool is_bare = false);
-  void Blezc(GpuRegister rt, Mips64Label* label, bool is_bare = false);
-  void Bltuc(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare = false);
-  void Bgeuc(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare = false);
-  void Beqc(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare = false);
-  void Bnec(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare = false);
-  void Beqzc(GpuRegister rs, Mips64Label* label, bool is_bare = false);
-  void Bnezc(GpuRegister rs, Mips64Label* label, bool is_bare = false);
-  // R6 branches with delay slots.
-  void Bc1eqz(FpuRegister ft, Mips64Label* label, bool is_bare = false);
-  void Bc1nez(FpuRegister ft, Mips64Label* label, bool is_bare = false);
-  // R2 branches with delay slots that are also available on R6.
-  // The `is_bare` parameter exists and is checked in these branches only to
-  // prevent programming mistakes. These branches never promote to long, not
-  // even if `is_bare` is false.
-  void Bltz(GpuRegister rt, Mips64Label* label, bool is_bare = false);  // R2
-  void Bgtz(GpuRegister rt, Mips64Label* label, bool is_bare = false);  // R2
-  void Bgez(GpuRegister rt, Mips64Label* label, bool is_bare = false);  // R2
-  void Blez(GpuRegister rt, Mips64Label* label, bool is_bare = false);  // R2
-  void Beq(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare = false);  // R2
-  void Bne(GpuRegister rs, GpuRegister rt, Mips64Label* label, bool is_bare = false);  // R2
-  void Beqz(GpuRegister rs, Mips64Label* label, bool is_bare = false);  // R2
-  void Bnez(GpuRegister rs, Mips64Label* label, bool is_bare = false);  // R2
-
-  void EmitLoad(ManagedRegister m_dst, GpuRegister src_register, int32_t src_offset, size_t size);
-  void AdjustBaseAndOffset(GpuRegister& base, int32_t& offset, bool is_doubleword);
-  // If element_size_shift is negative at entry, its value will be calculated based on the offset.
-  void AdjustBaseOffsetAndElementSizeShift(GpuRegister& base,
-                                           int32_t& offset,
-                                           int& element_size_shift);
-
- private:
-  // This will be used as an argument for loads/stores
-  // when there is no need for implicit null checks.
-  struct NoImplicitNullChecker {
-    void operator()() const {}
-  };
-
- public:
-  template <typename ImplicitNullChecker = NoImplicitNullChecker>
-  void StoreConstToOffset(StoreOperandType type,
-                          int64_t value,
-                          GpuRegister base,
-                          int32_t offset,
-                          GpuRegister temp,
-                          ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
-    // We permit `base` and `temp` to coincide (however, we check that neither is AT),
-    // in which case the `base` register may be overwritten in the process.
-    CHECK_NE(temp, AT);  // Must not use AT as temp, so as not to overwrite the adjusted base.
-    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kStoreDoubleword));
-    GpuRegister reg;
-    // If the adjustment left `base` unchanged and equal to `temp`, we can't use `temp`
-    // to load and hold the value but we can use AT instead as AT hasn't been used yet.
-    // Otherwise, `temp` can be used for the value. And if `temp` is the same as the
-    // original `base` (that is, `base` prior to the adjustment), the original `base`
-    // register will be overwritten.
-    if (base == temp) {
-      temp = AT;
-    }
-
-    if (type == kStoreDoubleword && IsAligned<kMips64DoublewordSize>(offset)) {
-      if (value == 0) {
-        reg = ZERO;
-      } else {
-        reg = temp;
-        LoadConst64(reg, value);
-      }
-      Sd(reg, base, offset);
-      null_checker();
-    } else {
-      uint32_t low = Low32Bits(value);
-      uint32_t high = High32Bits(value);
-      if (low == 0) {
-        reg = ZERO;
-      } else {
-        reg = temp;
-        LoadConst32(reg, low);
-      }
-      switch (type) {
-        case kStoreByte:
-          Sb(reg, base, offset);
-          break;
-        case kStoreHalfword:
-          Sh(reg, base, offset);
-          break;
-        case kStoreWord:
-          Sw(reg, base, offset);
-          break;
-        case kStoreDoubleword:
-          // not aligned to kMips64DoublewordSize
-          CHECK_ALIGNED(offset, kMips64WordSize);
-          Sw(reg, base, offset);
-          null_checker();
-          if (high == 0) {
-            reg = ZERO;
-          } else {
-            reg = temp;
-            if (high != low) {
-              LoadConst32(reg, high);
-            }
-          }
-          Sw(reg, base, offset + kMips64WordSize);
-          break;
-        default:
-          LOG(FATAL) << "UNREACHABLE";
-      }
-      if (type != kStoreDoubleword) {
-        null_checker();
-      }
-    }
-  }
-
-  template <typename ImplicitNullChecker = NoImplicitNullChecker>
-  void LoadFromOffset(LoadOperandType type,
-                      GpuRegister reg,
-                      GpuRegister base,
-                      int32_t offset,
-                      ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
-    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kLoadDoubleword));
-
-    switch (type) {
-      case kLoadSignedByte:
-        Lb(reg, base, offset);
-        break;
-      case kLoadUnsignedByte:
-        Lbu(reg, base, offset);
-        break;
-      case kLoadSignedHalfword:
-        Lh(reg, base, offset);
-        break;
-      case kLoadUnsignedHalfword:
-        Lhu(reg, base, offset);
-        break;
-      case kLoadWord:
-        CHECK_ALIGNED(offset, kMips64WordSize);
-        Lw(reg, base, offset);
-        break;
-      case kLoadUnsignedWord:
-        CHECK_ALIGNED(offset, kMips64WordSize);
-        Lwu(reg, base, offset);
-        break;
-      case kLoadDoubleword:
-        if (!IsAligned<kMips64DoublewordSize>(offset)) {
-          CHECK_ALIGNED(offset, kMips64WordSize);
-          Lwu(reg, base, offset);
-          null_checker();
-          Lwu(TMP2, base, offset + kMips64WordSize);
-          Dinsu(reg, TMP2, 32, 32);
-        } else {
-          Ld(reg, base, offset);
-          null_checker();
-        }
-        break;
-      default:
-        LOG(FATAL) << "UNREACHABLE";
-    }
-    if (type != kLoadDoubleword) {
-      null_checker();
-    }
-  }
-
-  template <typename ImplicitNullChecker = NoImplicitNullChecker>
-  void LoadFpuFromOffset(LoadOperandType type,
-                         FpuRegister reg,
-                         GpuRegister base,
-                         int32_t offset,
-                         ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
-    int element_size_shift = -1;
-    if (type != kLoadQuadword) {
-      AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kLoadDoubleword));
-    } else {
-      AdjustBaseOffsetAndElementSizeShift(base, offset, element_size_shift);
-    }
-
-    switch (type) {
-      case kLoadWord:
-        CHECK_ALIGNED(offset, kMips64WordSize);
-        Lwc1(reg, base, offset);
-        null_checker();
-        break;
-      case kLoadDoubleword:
-        if (!IsAligned<kMips64DoublewordSize>(offset)) {
-          CHECK_ALIGNED(offset, kMips64WordSize);
-          Lwc1(reg, base, offset);
-          null_checker();
-          Lw(TMP2, base, offset + kMips64WordSize);
-          Mthc1(TMP2, reg);
-        } else {
-          Ldc1(reg, base, offset);
-          null_checker();
-        }
-        break;
-      case kLoadQuadword:
-        switch (element_size_shift) {
-          case TIMES_1: LdB(static_cast<VectorRegister>(reg), base, offset); break;
-          case TIMES_2: LdH(static_cast<VectorRegister>(reg), base, offset); break;
-          case TIMES_4: LdW(static_cast<VectorRegister>(reg), base, offset); break;
-          case TIMES_8: LdD(static_cast<VectorRegister>(reg), base, offset); break;
-          default:
-            LOG(FATAL) << "UNREACHABLE";
-        }
-        null_checker();
-        break;
-      default:
-        LOG(FATAL) << "UNREACHABLE";
-    }
-  }
-
-  template <typename ImplicitNullChecker = NoImplicitNullChecker>
-  void StoreToOffset(StoreOperandType type,
-                     GpuRegister reg,
-                     GpuRegister base,
-                     int32_t offset,
-                     ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
-    // Must not use AT as `reg`, so as not to overwrite the value being stored
-    // with the adjusted `base`.
-    CHECK_NE(reg, AT);
-    AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kStoreDoubleword));
-
-    switch (type) {
-      case kStoreByte:
-        Sb(reg, base, offset);
-        break;
-      case kStoreHalfword:
-        Sh(reg, base, offset);
-        break;
-      case kStoreWord:
-        CHECK_ALIGNED(offset, kMips64WordSize);
-        Sw(reg, base, offset);
-        break;
-      case kStoreDoubleword:
-        if (!IsAligned<kMips64DoublewordSize>(offset)) {
-          CHECK_ALIGNED(offset, kMips64WordSize);
-          Sw(reg, base, offset);
-          null_checker();
-          Dsrl32(TMP2, reg, 0);
-          Sw(TMP2, base, offset + kMips64WordSize);
-        } else {
-          Sd(reg, base, offset);
-          null_checker();
-        }
-        break;
-      default:
-        LOG(FATAL) << "UNREACHABLE";
-    }
-    if (type != kStoreDoubleword) {
-      null_checker();
-    }
-  }
-
-  template <typename ImplicitNullChecker = NoImplicitNullChecker>
-  void StoreFpuToOffset(StoreOperandType type,
-                        FpuRegister reg,
-                        GpuRegister base,
-                        int32_t offset,
-                        ImplicitNullChecker null_checker = NoImplicitNullChecker()) {
-    int element_size_shift = -1;
-    if (type != kStoreQuadword) {
-      AdjustBaseAndOffset(base, offset, /* is_doubleword= */ (type == kStoreDoubleword));
-    } else {
-      AdjustBaseOffsetAndElementSizeShift(base, offset, element_size_shift);
-    }
-
-    switch (type) {
-      case kStoreWord:
-        CHECK_ALIGNED(offset, kMips64WordSize);
-        Swc1(reg, base, offset);
-        null_checker();
-        break;
-      case kStoreDoubleword:
-        if (!IsAligned<kMips64DoublewordSize>(offset)) {
-          CHECK_ALIGNED(offset, kMips64WordSize);
-          Mfhc1(TMP2, reg);
-          Swc1(reg, base, offset);
-          null_checker();
-          Sw(TMP2, base, offset + kMips64WordSize);
-        } else {
-          Sdc1(reg, base, offset);
-          null_checker();
-        }
-        break;
-      case kStoreQuadword:
-        switch (element_size_shift) {
-          case TIMES_1: StB(static_cast<VectorRegister>(reg), base, offset); break;
-          case TIMES_2: StH(static_cast<VectorRegister>(reg), base, offset); break;
-          case TIMES_4: StW(static_cast<VectorRegister>(reg), base, offset); break;
-          case TIMES_8: StD(static_cast<VectorRegister>(reg), base, offset); break;
-          default:
-            LOG(FATAL) << "UNREACHABLE";
-        }
-        null_checker();
-        break;
-      default:
-        LOG(FATAL) << "UNREACHABLE";
-    }
-  }
-
-  void LoadFromOffset(LoadOperandType type, GpuRegister reg, GpuRegister base, int32_t offset);
-  void LoadFpuFromOffset(LoadOperandType type, FpuRegister reg, GpuRegister base, int32_t offset);
-  void StoreToOffset(StoreOperandType type, GpuRegister reg, GpuRegister base, int32_t offset);
-  void StoreFpuToOffset(StoreOperandType type, FpuRegister reg, GpuRegister base, int32_t offset);
-
-  // Emit data (e.g. encoded instruction or immediate) to the instruction stream.
-  void Emit(uint32_t value);
-
-  //
-  // Overridden common assembler high-level functionality.
-  //
-
-  // Emit code that will create an activation on the stack.
-  void BuildFrame(size_t frame_size,
-                  ManagedRegister method_reg,
-                  ArrayRef<const ManagedRegister> callee_save_regs,
-                  const ManagedRegisterEntrySpills& entry_spills) override;
-
-  // Emit code that will remove an activation from the stack.
-  void RemoveFrame(size_t frame_size,
-                   ArrayRef<const ManagedRegister> callee_save_regs,
-                   bool may_suspend) override;
-
-  void IncreaseFrameSize(size_t adjust) override;
-  void DecreaseFrameSize(size_t adjust) override;
-
-  // Store routines.
-  void Store(FrameOffset offs, ManagedRegister msrc, size_t size) override;
-  void StoreRef(FrameOffset dest, ManagedRegister msrc) override;
-  void StoreRawPtr(FrameOffset dest, ManagedRegister msrc) override;
-
-  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister mscratch) override;
-
-  void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
-                                FrameOffset fr_offs,
-                                ManagedRegister mscratch) override;
-
-  void StoreStackPointerToThread(ThreadOffset64 thr_offs) override;
-
-  void StoreSpanning(FrameOffset dest, ManagedRegister msrc, FrameOffset in_off,
-                     ManagedRegister mscratch) override;
-
-  // Load routines.
-  void Load(ManagedRegister mdest, FrameOffset src, size_t size) override;
-
-  void LoadFromThread(ManagedRegister mdest, ThreadOffset64 src, size_t size) override;
-
-  void LoadRef(ManagedRegister dest, FrameOffset src) override;
-
-  void LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
-               bool unpoison_reference) override;
-
-  void LoadRawPtr(ManagedRegister mdest, ManagedRegister base, Offset offs) override;
-
-  void LoadRawPtrFromThread(ManagedRegister mdest, ThreadOffset64 offs) override;
-
-  // Copying routines.
-  void Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) override;
-
-  void CopyRawPtrFromThread(FrameOffset fr_offs,
-                            ThreadOffset64 thr_offs,
-                            ManagedRegister mscratch) override;
-
-  void CopyRawPtrToThread(ThreadOffset64 thr_offs,
-                          FrameOffset fr_offs,
-                          ManagedRegister mscratch) override;
-
-  void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister mscratch) override;
-
-  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) override;
-
-  void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister mscratch,
-            size_t size) override;
-
-  void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
-            ManagedRegister mscratch, size_t size) override;
-
-  void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister mscratch,
-            size_t size) override;
-
-  void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
-            ManagedRegister mscratch, size_t size) override;
-
-  void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
-            ManagedRegister mscratch, size_t size) override;
-
-  void MemoryBarrier(ManagedRegister) override;
-
-  // Sign extension.
-  void SignExtend(ManagedRegister mreg, size_t size) override;
-
-  // Zero extension.
-  void ZeroExtend(ManagedRegister mreg, size_t size) override;
-
-  // Exploit fast access in managed code to Thread::Current().
-  void GetCurrentThread(ManagedRegister tr) override;
-  void GetCurrentThread(FrameOffset dest_offset, ManagedRegister mscratch) override;
-
-  // Set up out_reg to hold a Object** into the handle scope, or to be null if the
-  // value is null and null_allowed. in_reg holds a possibly stale reference
-  // that can be used to avoid loading the handle scope entry to see if the value is
-  // null.
-  void CreateHandleScopeEntry(ManagedRegister out_reg, FrameOffset handlescope_offset,
-                              ManagedRegister in_reg, bool null_allowed) override;
-
-  // Set up out_off to hold a Object** into the handle scope, or to be null if the
-  // value is null and null_allowed.
-  void CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handlescope_offset, ManagedRegister
-                              mscratch, bool null_allowed) override;
-
-  // src holds a handle scope entry (Object**) load this into dst.
-  void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) override;
-
-  // Heap::VerifyObject on src. In some cases (such as a reference to this) we
-  // know that src may not be null.
-  void VerifyObject(ManagedRegister src, bool could_be_null) override;
-  void VerifyObject(FrameOffset src, bool could_be_null) override;
-
-  // Call to address held at [base+offset].
-  void Call(ManagedRegister base, Offset offset, ManagedRegister mscratch) override;
-  void Call(FrameOffset base, Offset offset, ManagedRegister mscratch) override;
-  void CallFromThread(ThreadOffset64 offset, ManagedRegister mscratch) override;
-
-  // Generate code to check if Thread::Current()->exception_ is non-null
-  // and branch to a ExceptionSlowPath if it is.
-  void ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) override;
-
-  // Emit slow paths queued during assembly and promote short branches to long if needed.
-  void FinalizeCode() override;
-
-  // Emit branches and finalize all instructions.
-  void FinalizeInstructions(const MemoryRegion& region) override;
-
-  // Returns the (always-)current location of a label (can be used in class CodeGeneratorMIPS64,
-  // must be used instead of Mips64Label::GetPosition()).
-  uint32_t GetLabelLocation(const Mips64Label* label) const;
-
-  // Get the final position of a label after local fixup based on the old position
-  // recorded before FinalizeCode().
-  uint32_t GetAdjustedPosition(uint32_t old_position);
-
-  // Note that PC-relative literal loads are handled as pseudo branches because they need very
-  // similar relocation and may similarly expand in size to accomodate for larger offsets relative
-  // to PC.
-  enum BranchCondition {
-    kCondLT,
-    kCondGE,
-    kCondLE,
-    kCondGT,
-    kCondLTZ,
-    kCondGEZ,
-    kCondLEZ,
-    kCondGTZ,
-    kCondEQ,
-    kCondNE,
-    kCondEQZ,
-    kCondNEZ,
-    kCondLTU,
-    kCondGEU,
-    kCondF,    // Floating-point predicate false.
-    kCondT,    // Floating-point predicate true.
-    kUncond,
-  };
-  friend std::ostream& operator<<(std::ostream& os, const BranchCondition& rhs);
-
- private:
-  class Branch {
-   public:
-    enum Type {
-      // R6 short branches (can be promoted to long).
-      kUncondBranch,
-      kCondBranch,
-      kCall,
-      // R6 short branches (can't be promoted to long), forbidden/delay slots filled manually.
-      kBareUncondBranch,
-      kBareCondBranch,
-      kBareCall,
-      // R2 short branches (can't be promoted to long), delay slots filled manually.
-      kR2BareCondBranch,
-      // Near label.
-      kLabel,
-      // Near literals.
-      kLiteral,
-      kLiteralUnsigned,
-      kLiteralLong,
-      // Long branches.
-      kLongUncondBranch,
-      kLongCondBranch,
-      kLongCall,
-      // Far label.
-      kFarLabel,
-      // Far literals.
-      kFarLiteral,
-      kFarLiteralUnsigned,
-      kFarLiteralLong,
-    };
-
-    // Bit sizes of offsets defined as enums to minimize chance of typos.
-    enum OffsetBits {
-      kOffset16 = 16,
-      kOffset18 = 18,
-      kOffset21 = 21,
-      kOffset23 = 23,
-      kOffset28 = 28,
-      kOffset32 = 32,
-    };
-
-    static constexpr uint32_t kUnresolved = 0xffffffff;  // Unresolved target_
-    static constexpr int32_t kMaxBranchLength = 32;
-    static constexpr int32_t kMaxBranchSize = kMaxBranchLength * sizeof(uint32_t);
-
-    struct BranchInfo {
-      // Branch length as a number of 4-byte-long instructions.
-      uint32_t length;
-      // Ordinal number (0-based) of the first (or the only) instruction that contains the branch's
-      // PC-relative offset (or its most significant 16-bit half, which goes first).
-      uint32_t instr_offset;
-      // Different MIPS instructions with PC-relative offsets apply said offsets to slightly
-      // different origins, e.g. to PC or PC+4. Encode the origin distance (as a number of 4-byte
-      // instructions) from the instruction containing the offset.
-      uint32_t pc_org;
-      // How large (in bits) a PC-relative offset can be for a given type of branch (kCondBranch
-      // and kBareCondBranch are an exception: use kOffset23 for beqzc/bnezc).
-      OffsetBits offset_size;
-      // Some MIPS instructions with PC-relative offsets shift the offset by 2. Encode the shift
-      // count.
-      int offset_shift;
-    };
-    static const BranchInfo branch_info_[/* Type */];
-
-    // Unconditional branch or call.
-    Branch(uint32_t location, uint32_t target, bool is_call, bool is_bare);
-    // Conditional branch.
-    Branch(bool is_r6,
-           uint32_t location,
-           uint32_t target,
-           BranchCondition condition,
-           GpuRegister lhs_reg,
-           GpuRegister rhs_reg,
-           bool is_bare);
-    // Label address (in literal area) or literal.
-    Branch(uint32_t location, GpuRegister dest_reg, Type label_or_literal_type);
-
-    // Some conditional branches with lhs = rhs are effectively NOPs, while some
-    // others are effectively unconditional. MIPSR6 conditional branches require lhs != rhs.
-    // So, we need a way to identify such branches in order to emit no instructions for them
-    // or change them to unconditional.
-    static bool IsNop(BranchCondition condition, GpuRegister lhs, GpuRegister rhs);
-    static bool IsUncond(BranchCondition condition, GpuRegister lhs, GpuRegister rhs);
-
-    static BranchCondition OppositeCondition(BranchCondition cond);
-
-    Type GetType() const;
-    BranchCondition GetCondition() const;
-    GpuRegister GetLeftRegister() const;
-    GpuRegister GetRightRegister() const;
-    uint32_t GetTarget() const;
-    uint32_t GetLocation() const;
-    uint32_t GetOldLocation() const;
-    uint32_t GetLength() const;
-    uint32_t GetOldLength() const;
-    uint32_t GetSize() const;
-    uint32_t GetOldSize() const;
-    uint32_t GetEndLocation() const;
-    uint32_t GetOldEndLocation() const;
-    bool IsBare() const;
-    bool IsLong() const;
-    bool IsResolved() const;
-
-    // Returns the bit size of the signed offset that the branch instruction can handle.
-    OffsetBits GetOffsetSize() const;
-
-    // Calculates the distance between two byte locations in the assembler buffer and
-    // returns the number of bits needed to represent the distance as a signed integer.
-    //
-    // Branch instructions have signed offsets of 16, 19 (addiupc), 21 (beqzc/bnezc),
-    // and 26 (bc) bits, which are additionally shifted left 2 positions at run time.
-    //
-    // Composite branches (made of several instructions) with longer reach have 32-bit
-    // offsets encoded as 2 16-bit "halves" in two instructions (high half goes first).
-    // The composite branches cover the range of PC + ~+/-2GB. The range is not end-to-end,
-    // however. Consider the following implementation of a long unconditional branch, for
-    // example:
-    //
-    //   auipc at, offset_31_16  // at = pc + sign_extend(offset_31_16) << 16
-    //   jic   at, offset_15_0   // pc = at + sign_extend(offset_15_0)
-    //
-    // Both of the above instructions take 16-bit signed offsets as immediate operands.
-    // When bit 15 of offset_15_0 is 1, it effectively causes subtraction of 0x10000
-    // due to sign extension. This must be compensated for by incrementing offset_31_16
-    // by 1. offset_31_16 can only be incremented by 1 if it's not 0x7FFF. If it is
-    // 0x7FFF, adding 1 will overflow the positive offset into the negative range.
-    // Therefore, the long branch range is something like from PC - 0x80000000 to
-    // PC + 0x7FFF7FFF, IOW, shorter by 32KB on one side.
-    //
-    // The returned values are therefore: 18, 21, 23, 28 and 32. There's also a special
-    // case with the addiu instruction and a 16 bit offset.
-    static OffsetBits GetOffsetSizeNeeded(uint32_t location, uint32_t target);
-
-    // Resolve a branch when the target is known.
-    void Resolve(uint32_t target);
-
-    // Relocate a branch by a given delta if needed due to expansion of this or another
-    // branch at a given location by this delta (just changes location_ and target_).
-    void Relocate(uint32_t expand_location, uint32_t delta);
-
-    // If the branch is short, changes its type to long.
-    void PromoteToLong();
-
-    // If necessary, updates the type by promoting a short branch to a long branch
-    // based on the branch location and target. Returns the amount (in bytes) by
-    // which the branch size has increased.
-    // max_short_distance caps the maximum distance between location_ and target_
-    // that is allowed for short branches. This is for debugging/testing purposes.
-    // max_short_distance = 0 forces all short branches to become long.
-    // Use the implicit default argument when not debugging/testing.
-    uint32_t PromoteIfNeeded(uint32_t max_short_distance = std::numeric_limits<uint32_t>::max());
-
-    // Returns the location of the instruction(s) containing the offset.
-    uint32_t GetOffsetLocation() const;
-
-    // Calculates and returns the offset ready for encoding in the branch instruction(s).
-    uint32_t GetOffset() const;
-
-   private:
-    // Completes branch construction by determining and recording its type.
-    void InitializeType(Type initial_type, bool is_r6);
-    // Helper for the above.
-    void InitShortOrLong(OffsetBits ofs_size, Type short_type, Type long_type);
-
-    uint32_t old_location_;      // Offset into assembler buffer in bytes.
-    uint32_t location_;          // Offset into assembler buffer in bytes.
-    uint32_t target_;            // Offset into assembler buffer in bytes.
-
-    GpuRegister lhs_reg_;        // Left-hand side register in conditional branches or
-                                 // destination register in literals.
-    GpuRegister rhs_reg_;        // Right-hand side register in conditional branches.
-    BranchCondition condition_;  // Condition for conditional branches.
-
-    Type type_;                  // Current type of the branch.
-    Type old_type_;              // Initial type of the branch.
-  };
-  friend std::ostream& operator<<(std::ostream& os, const Branch::Type& rhs);
-  friend std::ostream& operator<<(std::ostream& os, const Branch::OffsetBits& rhs);
-
-  void EmitR(int opcode, GpuRegister rs, GpuRegister rt, GpuRegister rd, int shamt, int funct);
-  void EmitRsd(int opcode, GpuRegister rs, GpuRegister rd, int shamt, int funct);
-  void EmitRtd(int opcode, GpuRegister rt, GpuRegister rd, int shamt, int funct);
-  void EmitI(int opcode, GpuRegister rs, GpuRegister rt, uint16_t imm);
-  void EmitI21(int opcode, GpuRegister rs, uint32_t imm21);
-  void EmitI26(int opcode, uint32_t imm26);
-  void EmitFR(int opcode, int fmt, FpuRegister ft, FpuRegister fs, FpuRegister fd, int funct);
-  void EmitFI(int opcode, int fmt, FpuRegister rt, uint16_t imm);
-  void EmitBcondR6(BranchCondition cond, GpuRegister rs, GpuRegister rt, uint32_t imm16_21);
-  void EmitBcondR2(BranchCondition cond, GpuRegister rs, GpuRegister rt, uint16_t imm16);
-  void EmitMsa3R(int operation,
-                 int df,
-                 VectorRegister wt,
-                 VectorRegister ws,
-                 VectorRegister wd,
-                 int minor_opcode);
-  void EmitMsaBIT(int operation, int df_m, VectorRegister ws, VectorRegister wd, int minor_opcode);
-  void EmitMsaELM(int operation, int df_n, VectorRegister ws, VectorRegister wd, int minor_opcode);
-  void EmitMsaMI10(int s10, GpuRegister rs, VectorRegister wd, int minor_opcode, int df);
-  void EmitMsaI10(int operation, int df, int i10, VectorRegister wd, int minor_opcode);
-  void EmitMsa2R(int operation, int df, VectorRegister ws, VectorRegister wd, int minor_opcode);
-  void EmitMsa2RF(int operation, int df, VectorRegister ws, VectorRegister wd, int minor_opcode);
-
-  void Buncond(Mips64Label* label, bool is_bare);
-  void Bcond(Mips64Label* label,
-             bool is_r6,
-             bool is_bare,
-             BranchCondition condition,
-             GpuRegister lhs,
-             GpuRegister rhs = ZERO);
-  void Call(Mips64Label* label, bool is_bare);
-  void FinalizeLabeledBranch(Mips64Label* label);
-
-  Branch* GetBranch(uint32_t branch_id);
-  const Branch* GetBranch(uint32_t branch_id) const;
-
-  void EmitLiterals();
-  void ReserveJumpTableSpace();
-  void EmitJumpTables();
-  void PromoteBranches();
-  void EmitBranch(Branch* branch);
-  void EmitBranches();
-  void PatchCFI();
-
-  // Emits exception block.
-  void EmitExceptionPoll(Mips64ExceptionSlowPath* exception);
-
-  bool HasMsa() const {
-    return has_msa_;
-  }
-
-  // List of exception blocks to generate at the end of the code cache.
-  std::vector<Mips64ExceptionSlowPath> exception_blocks_;
-
-  std::vector<Branch> branches_;
-
-  // Whether appending instructions at the end of the buffer or overwriting the existing ones.
-  bool overwriting_;
-  // The current overwrite location.
-  uint32_t overwrite_location_;
-
-  // Use std::deque<> for literal labels to allow insertions at the end
-  // without invalidating pointers and references to existing elements.
-  ArenaDeque<Literal> literals_;
-  ArenaDeque<Literal> long_literals_;  // 64-bit literals separated for alignment reasons.
-
-  // Jump table list.
-  ArenaDeque<JumpTable> jump_tables_;
-
-  // Data for AdjustedPosition(), see the description there.
-  uint32_t last_position_adjustment_;
-  uint32_t last_old_position_;
-  uint32_t last_branch_id_;
-
-  const bool has_msa_;
-
-  DISALLOW_COPY_AND_ASSIGN(Mips64Assembler);
-};
-
-}  // namespace mips64
-}  // namespace art
-
-#endif  // ART_COMPILER_UTILS_MIPS64_ASSEMBLER_MIPS64_H_
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
deleted file mode 100644
index 499e8f4..0000000
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ /dev/null
@@ -1,3784 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "assembler_mips64.h"
-
-#include <inttypes.h>
-#include <map>
-#include <random>
-
-#include "base/bit_utils.h"
-#include "base/stl_util.h"
-#include "utils/assembler_test.h"
-
-#define __ GetAssembler()->
-
-namespace art {
-
-struct MIPS64CpuRegisterCompare {
-  bool operator()(const mips64::GpuRegister& a, const mips64::GpuRegister& b) const {
-    return a < b;
-  }
-};
-
-class AssemblerMIPS64Test : public AssemblerTest<mips64::Mips64Assembler,
-                                                 mips64::Mips64Label,
-                                                 mips64::GpuRegister,
-                                                 mips64::FpuRegister,
-                                                 uint32_t,
-                                                 mips64::VectorRegister> {
- public:
-  using Base = AssemblerTest<mips64::Mips64Assembler,
-                             mips64::Mips64Label,
-                             mips64::GpuRegister,
-                             mips64::FpuRegister,
-                             uint32_t,
-                             mips64::VectorRegister>;
-
-  // These tests were taking too long, so we hide the DriverStr() from AssemblerTest<>
-  // and reimplement it without the verification against `assembly_string`. b/73903608
-  void DriverStr(const std::string& assembly_string ATTRIBUTE_UNUSED,
-                 const std::string& test_name ATTRIBUTE_UNUSED) {
-    GetAssembler()->FinalizeCode();
-    std::vector<uint8_t> data(GetAssembler()->CodeSize());
-    MemoryRegion code(data.data(), data.size());
-    GetAssembler()->FinalizeInstructions(code);
-  }
-
-  AssemblerMIPS64Test()
-      : instruction_set_features_(Mips64InstructionSetFeatures::FromVariant("default", nullptr)) {}
-
- protected:
-  // Get the typically used name for this architecture, e.g., aarch64, x86-64, ...
-  std::string GetArchitectureString() override {
-    return "mips64";
-  }
-
-  std::string GetAssemblerCmdName() override {
-    // We assemble and link for MIPS64R6. See GetAssemblerParameters() for details.
-    return "gcc";
-  }
-
-  std::string GetAssemblerParameters() override {
-    // We assemble and link for MIPS64R6. The reason is that object files produced for MIPS64R6
-    // (and MIPS32R6) with the GNU assembler don't have correct final offsets in PC-relative
-    // branches in the .text section and so they require a relocation pass (there's a relocation
-    // section, .rela.text, that has the needed info to fix up the branches).
-    return " -march=mips64r6 -mmsa -Wa,--no-warn -Wl,-Ttext=0 -Wl,-e0 -nostdlib";
-  }
-
-  void Pad(std::vector<uint8_t>& data) override {
-    // The GNU linker unconditionally pads the code segment with NOPs to a size that is a multiple
-    // of 16 and there doesn't appear to be a way to suppress this padding. Our assembler doesn't
-    // pad, so, in order for two assembler outputs to match, we need to match the padding as well.
-    // NOP is encoded as four zero bytes on MIPS.
-    size_t pad_size = RoundUp(data.size(), 16u) - data.size();
-    data.insert(data.end(), pad_size, 0);
-  }
-
-  std::string GetDisassembleParameters() override {
-    return " -D -bbinary -mmips:isa64r6";
-  }
-
-  mips64::Mips64Assembler* CreateAssembler(ArenaAllocator* allocator) override {
-    return new (allocator) mips64::Mips64Assembler(allocator, instruction_set_features_.get());
-  }
-
-  void SetUpHelpers() override {
-    if (registers_.size() == 0) {
-      registers_.push_back(new mips64::GpuRegister(mips64::ZERO));
-      registers_.push_back(new mips64::GpuRegister(mips64::AT));
-      registers_.push_back(new mips64::GpuRegister(mips64::V0));
-      registers_.push_back(new mips64::GpuRegister(mips64::V1));
-      registers_.push_back(new mips64::GpuRegister(mips64::A0));
-      registers_.push_back(new mips64::GpuRegister(mips64::A1));
-      registers_.push_back(new mips64::GpuRegister(mips64::A2));
-      registers_.push_back(new mips64::GpuRegister(mips64::A3));
-      registers_.push_back(new mips64::GpuRegister(mips64::A4));
-      registers_.push_back(new mips64::GpuRegister(mips64::A5));
-      registers_.push_back(new mips64::GpuRegister(mips64::A6));
-      registers_.push_back(new mips64::GpuRegister(mips64::A7));
-      registers_.push_back(new mips64::GpuRegister(mips64::T0));
-      registers_.push_back(new mips64::GpuRegister(mips64::T1));
-      registers_.push_back(new mips64::GpuRegister(mips64::T2));
-      registers_.push_back(new mips64::GpuRegister(mips64::T3));
-      registers_.push_back(new mips64::GpuRegister(mips64::S0));
-      registers_.push_back(new mips64::GpuRegister(mips64::S1));
-      registers_.push_back(new mips64::GpuRegister(mips64::S2));
-      registers_.push_back(new mips64::GpuRegister(mips64::S3));
-      registers_.push_back(new mips64::GpuRegister(mips64::S4));
-      registers_.push_back(new mips64::GpuRegister(mips64::S5));
-      registers_.push_back(new mips64::GpuRegister(mips64::S6));
-      registers_.push_back(new mips64::GpuRegister(mips64::S7));
-      registers_.push_back(new mips64::GpuRegister(mips64::T8));
-      registers_.push_back(new mips64::GpuRegister(mips64::T9));
-      registers_.push_back(new mips64::GpuRegister(mips64::K0));
-      registers_.push_back(new mips64::GpuRegister(mips64::K1));
-      registers_.push_back(new mips64::GpuRegister(mips64::GP));
-      registers_.push_back(new mips64::GpuRegister(mips64::SP));
-      registers_.push_back(new mips64::GpuRegister(mips64::S8));
-      registers_.push_back(new mips64::GpuRegister(mips64::RA));
-
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::ZERO), "zero");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::AT), "at");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::V0), "v0");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::V1), "v1");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::A0), "a0");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::A1), "a1");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::A2), "a2");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::A3), "a3");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::A4), "a4");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::A5), "a5");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::A6), "a6");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::A7), "a7");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::T0), "t0");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::T1), "t1");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::T2), "t2");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::T3), "t3");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::S0), "s0");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::S1), "s1");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::S2), "s2");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::S3), "s3");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::S4), "s4");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::S5), "s5");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::S6), "s6");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::S7), "s7");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::T8), "t8");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::T9), "t9");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::K0), "k0");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::K1), "k1");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::GP), "gp");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::SP), "sp");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::S8), "s8");
-      secondary_register_names_.emplace(mips64::GpuRegister(mips64::RA), "ra");
-
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F0));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F1));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F2));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F3));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F4));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F5));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F6));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F7));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F8));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F9));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F10));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F11));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F12));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F13));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F14));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F15));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F16));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F17));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F18));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F19));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F20));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F21));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F22));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F23));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F24));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F25));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F26));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F27));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F28));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F29));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F30));
-      fp_registers_.push_back(new mips64::FpuRegister(mips64::F31));
-
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W0));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W1));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W2));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W3));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W4));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W5));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W6));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W7));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W8));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W9));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W10));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W11));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W12));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W13));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W14));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W15));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W16));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W17));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W18));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W19));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W20));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W21));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W22));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W23));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W24));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W25));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W26));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W27));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W28));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W29));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W30));
-      vec_registers_.push_back(new mips64::VectorRegister(mips64::W31));
-    }
-  }
-
-  void TearDown() override {
-    AssemblerTest::TearDown();
-    STLDeleteElements(&registers_);
-    STLDeleteElements(&fp_registers_);
-    STLDeleteElements(&vec_registers_);
-  }
-
-  std::vector<mips64::Mips64Label> GetAddresses() override {
-    UNIMPLEMENTED(FATAL) << "Feature not implemented yet";
-    UNREACHABLE();
-  }
-
-  std::vector<mips64::GpuRegister*> GetRegisters() override {
-    return registers_;
-  }
-
-  std::vector<mips64::FpuRegister*> GetFPRegisters() override {
-    return fp_registers_;
-  }
-
-  std::vector<mips64::VectorRegister*> GetVectorRegisters() override {
-    return vec_registers_;
-  }
-
-  uint32_t CreateImmediate(int64_t imm_value) override {
-    return imm_value;
-  }
-
-  std::string GetSecondaryRegisterName(const mips64::GpuRegister& reg) override {
-    CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
-    return secondary_register_names_[reg];
-  }
-
-  std::string RepeatInsn(size_t count, const std::string& insn) {
-    std::string result;
-    for (; count != 0u; --count) {
-      result += insn;
-    }
-    return result;
-  }
-
-  void BranchHelper(void (mips64::Mips64Assembler::*f)(mips64::Mips64Label*,
-                                                       bool),
-                    const std::string& instr_name,
-                    bool is_bare = false) {
-    mips64::Mips64Label label1, label2;
-    (Base::GetAssembler()->*f)(&label1, is_bare);
-    constexpr size_t kAdduCount1 = 63;
-    for (size_t i = 0; i != kAdduCount1; ++i) {
-      __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
-    }
-    __ Bind(&label1);
-    (Base::GetAssembler()->*f)(&label2, is_bare);
-    constexpr size_t kAdduCount2 = 64;
-    for (size_t i = 0; i != kAdduCount2; ++i) {
-      __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
-    }
-    __ Bind(&label2);
-    (Base::GetAssembler()->*f)(&label1, is_bare);
-    __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
-
-    std::string expected =
-        ".set noreorder\n" +
-        instr_name + " 1f\n" +
-        RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
-        "1:\n" +
-        instr_name + " 2f\n" +
-        RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
-        "2:\n" +
-        instr_name + " 1b\n" +
-        "addu $zero, $zero, $zero\n";
-    DriverStr(expected, instr_name);
-  }
-
-  void BranchCondOneRegHelper(void (mips64::Mips64Assembler::*f)(mips64::GpuRegister,
-                                                                 mips64::Mips64Label*,
-                                                                 bool),
-                              const std::string& instr_name,
-                              bool is_bare = false) {
-    mips64::Mips64Label label;
-    (Base::GetAssembler()->*f)(mips64::A0, &label, is_bare);
-    constexpr size_t kAdduCount1 = 63;
-    for (size_t i = 0; i != kAdduCount1; ++i) {
-      __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
-    }
-    __ Bind(&label);
-    constexpr size_t kAdduCount2 = 64;
-    for (size_t i = 0; i != kAdduCount2; ++i) {
-      __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
-    }
-    (Base::GetAssembler()->*f)(mips64::A1, &label, is_bare);
-    __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
-
-    std::string expected =
-        ".set noreorder\n" +
-        instr_name + " $a0, 1f\n" +
-        (is_bare ? "" : "nop\n") +
-        RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
-        "1:\n" +
-        RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
-        instr_name + " $a1, 1b\n" +
-        (is_bare ? "" : "nop\n") +
-        "addu $zero, $zero, $zero\n";
-    DriverStr(expected, instr_name);
-  }
-
-  void BranchCondTwoRegsHelper(void (mips64::Mips64Assembler::*f)(mips64::GpuRegister,
-                                                                  mips64::GpuRegister,
-                                                                  mips64::Mips64Label*,
-                                                                  bool),
-                               const std::string& instr_name,
-                               bool is_bare = false) {
-    mips64::Mips64Label label;
-    (Base::GetAssembler()->*f)(mips64::A0, mips64::A1, &label, is_bare);
-    constexpr size_t kAdduCount1 = 63;
-    for (size_t i = 0; i != kAdduCount1; ++i) {
-      __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
-    }
-    __ Bind(&label);
-    constexpr size_t kAdduCount2 = 64;
-    for (size_t i = 0; i != kAdduCount2; ++i) {
-      __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
-    }
-    (Base::GetAssembler()->*f)(mips64::A2, mips64::A3, &label, is_bare);
-    __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
-
-    std::string expected =
-        ".set noreorder\n" +
-        instr_name + " $a0, $a1, 1f\n" +
-        (is_bare ? "" : "nop\n") +
-        RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
-        "1:\n" +
-        RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
-        instr_name + " $a2, $a3, 1b\n" +
-        (is_bare ? "" : "nop\n") +
-        "addu $zero, $zero, $zero\n";
-    DriverStr(expected, instr_name);
-  }
-
-  void BranchFpuCondHelper(void (mips64::Mips64Assembler::*f)(mips64::FpuRegister,
-                                                              mips64::Mips64Label*,
-                                                              bool),
-                           const std::string& instr_name,
-                           bool is_bare = false) {
-    mips64::Mips64Label label;
-    (Base::GetAssembler()->*f)(mips64::F0, &label, is_bare);
-    constexpr size_t kAdduCount1 = 63;
-    for (size_t i = 0; i != kAdduCount1; ++i) {
-      __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
-    }
-    __ Bind(&label);
-    constexpr size_t kAdduCount2 = 64;
-    for (size_t i = 0; i != kAdduCount2; ++i) {
-      __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
-    }
-    (Base::GetAssembler()->*f)(mips64::F31, &label, is_bare);
-    __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
-
-    std::string expected =
-        ".set noreorder\n" +
-        instr_name + " $f0, 1f\n" +
-        (is_bare ? "" : "nop\n") +
-        RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") +
-        "1:\n" +
-        RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") +
-        instr_name + " $f31, 1b\n" +
-        (is_bare ? "" : "nop\n") +
-        "addu $zero, $zero, $zero\n";
-    DriverStr(expected, instr_name);
-  }
-
- private:
-  std::vector<mips64::GpuRegister*> registers_;
-  std::map<mips64::GpuRegister, std::string, MIPS64CpuRegisterCompare> secondary_register_names_;
-
-  std::vector<mips64::FpuRegister*> fp_registers_;
-  std::vector<mips64::VectorRegister*> vec_registers_;
-
-  std::unique_ptr<const Mips64InstructionSetFeatures> instruction_set_features_;
-};
-
-TEST_F(AssemblerMIPS64Test, Toolchain) {
-  EXPECT_TRUE(CheckTools());
-}
-
-///////////////////
-// FP Operations //
-///////////////////
-
-TEST_F(AssemblerMIPS64Test, AddS) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::AddS, "add.s ${reg1}, ${reg2}, ${reg3}"), "add.s");
-}
-
-TEST_F(AssemblerMIPS64Test, AddD) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::AddD, "add.d ${reg1}, ${reg2}, ${reg3}"), "add.d");
-}
-
-TEST_F(AssemblerMIPS64Test, SubS) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::SubS, "sub.s ${reg1}, ${reg2}, ${reg3}"), "sub.s");
-}
-
-TEST_F(AssemblerMIPS64Test, SubD) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::SubD, "sub.d ${reg1}, ${reg2}, ${reg3}"), "sub.d");
-}
-
-TEST_F(AssemblerMIPS64Test, MulS) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::MulS, "mul.s ${reg1}, ${reg2}, ${reg3}"), "mul.s");
-}
-
-TEST_F(AssemblerMIPS64Test, MulD) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::MulD, "mul.d ${reg1}, ${reg2}, ${reg3}"), "mul.d");
-}
-
-TEST_F(AssemblerMIPS64Test, DivS) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::DivS, "div.s ${reg1}, ${reg2}, ${reg3}"), "div.s");
-}
-
-TEST_F(AssemblerMIPS64Test, DivD) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::DivD, "div.d ${reg1}, ${reg2}, ${reg3}"), "div.d");
-}
-
-TEST_F(AssemblerMIPS64Test, SqrtS) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::SqrtS, "sqrt.s ${reg1}, ${reg2}"), "sqrt.s");
-}
-
-TEST_F(AssemblerMIPS64Test, SqrtD) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::SqrtD, "sqrt.d ${reg1}, ${reg2}"), "sqrt.d");
-}
-
-TEST_F(AssemblerMIPS64Test, AbsS) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::AbsS, "abs.s ${reg1}, ${reg2}"), "abs.s");
-}
-
-TEST_F(AssemblerMIPS64Test, AbsD) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::AbsD, "abs.d ${reg1}, ${reg2}"), "abs.d");
-}
-
-TEST_F(AssemblerMIPS64Test, MovS) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::MovS, "mov.s ${reg1}, ${reg2}"), "mov.s");
-}
-
-TEST_F(AssemblerMIPS64Test, MovD) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::MovD, "mov.d ${reg1}, ${reg2}"), "mov.d");
-}
-
-TEST_F(AssemblerMIPS64Test, NegS) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::NegS, "neg.s ${reg1}, ${reg2}"), "neg.s");
-}
-
-TEST_F(AssemblerMIPS64Test, NegD) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::NegD, "neg.d ${reg1}, ${reg2}"), "neg.d");
-}
-
-TEST_F(AssemblerMIPS64Test, RoundLS) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::RoundLS, "round.l.s ${reg1}, ${reg2}"), "round.l.s");
-}
-
-TEST_F(AssemblerMIPS64Test, RoundLD) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::RoundLD, "round.l.d ${reg1}, ${reg2}"), "round.l.d");
-}
-
-TEST_F(AssemblerMIPS64Test, RoundWS) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::RoundWS, "round.w.s ${reg1}, ${reg2}"), "round.w.s");
-}
-
-TEST_F(AssemblerMIPS64Test, RoundWD) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::RoundWD, "round.w.d ${reg1}, ${reg2}"), "round.w.d");
-}
-
-TEST_F(AssemblerMIPS64Test, CeilLS) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::CeilLS, "ceil.l.s ${reg1}, ${reg2}"), "ceil.l.s");
-}
-
-TEST_F(AssemblerMIPS64Test, CeilLD) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::CeilLD, "ceil.l.d ${reg1}, ${reg2}"), "ceil.l.d");
-}
-
-TEST_F(AssemblerMIPS64Test, CeilWS) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::CeilWS, "ceil.w.s ${reg1}, ${reg2}"), "ceil.w.s");
-}
-
-TEST_F(AssemblerMIPS64Test, CeilWD) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::CeilWD, "ceil.w.d ${reg1}, ${reg2}"), "ceil.w.d");
-}
-
-TEST_F(AssemblerMIPS64Test, FloorLS) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::FloorLS, "floor.l.s ${reg1}, ${reg2}"), "floor.l.s");
-}
-
-TEST_F(AssemblerMIPS64Test, FloorLD) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::FloorLD, "floor.l.d ${reg1}, ${reg2}"), "floor.l.d");
-}
-
-TEST_F(AssemblerMIPS64Test, FloorWS) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::FloorWS, "floor.w.s ${reg1}, ${reg2}"), "floor.w.s");
-}
-
-TEST_F(AssemblerMIPS64Test, FloorWD) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::FloorWD, "floor.w.d ${reg1}, ${reg2}"), "floor.w.d");
-}
-
-TEST_F(AssemblerMIPS64Test, SelS) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::SelS, "sel.s ${reg1}, ${reg2}, ${reg3}"), "sel.s");
-}
-
-TEST_F(AssemblerMIPS64Test, SelD) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::SelD, "sel.d ${reg1}, ${reg2}, ${reg3}"), "sel.d");
-}
-
-TEST_F(AssemblerMIPS64Test, SeleqzS) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::SeleqzS, "seleqz.s ${reg1}, ${reg2}, ${reg3}"),
-            "seleqz.s");
-}
-
-TEST_F(AssemblerMIPS64Test, SeleqzD) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::SeleqzD, "seleqz.d ${reg1}, ${reg2}, ${reg3}"),
-            "seleqz.d");
-}
-
-TEST_F(AssemblerMIPS64Test, SelnezS) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::SelnezS, "selnez.s ${reg1}, ${reg2}, ${reg3}"),
-            "selnez.s");
-}
-
-TEST_F(AssemblerMIPS64Test, SelnezD) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::SelnezD, "selnez.d ${reg1}, ${reg2}, ${reg3}"),
-            "selnez.d");
-}
-
-TEST_F(AssemblerMIPS64Test, RintS) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::RintS, "rint.s ${reg1}, ${reg2}"), "rint.s");
-}
-
-TEST_F(AssemblerMIPS64Test, RintD) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::RintD, "rint.d ${reg1}, ${reg2}"), "rint.d");
-}
-
-TEST_F(AssemblerMIPS64Test, ClassS) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::ClassS, "class.s ${reg1}, ${reg2}"), "class.s");
-}
-
-TEST_F(AssemblerMIPS64Test, ClassD) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::ClassD, "class.d ${reg1}, ${reg2}"), "class.d");
-}
-
-TEST_F(AssemblerMIPS64Test, MinS) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::MinS, "min.s ${reg1}, ${reg2}, ${reg3}"), "min.s");
-}
-
-TEST_F(AssemblerMIPS64Test, MinD) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::MinD, "min.d ${reg1}, ${reg2}, ${reg3}"), "min.d");
-}
-
-TEST_F(AssemblerMIPS64Test, MaxS) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::MaxS, "max.s ${reg1}, ${reg2}, ${reg3}"), "max.s");
-}
-
-TEST_F(AssemblerMIPS64Test, MaxD) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::MaxD, "max.d ${reg1}, ${reg2}, ${reg3}"), "max.d");
-}
-
-TEST_F(AssemblerMIPS64Test, CmpUnS) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpUnS, "cmp.un.s ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.un.s");
-}
-
-TEST_F(AssemblerMIPS64Test, CmpEqS) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpEqS, "cmp.eq.s ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.eq.s");
-}
-
-TEST_F(AssemblerMIPS64Test, CmpUeqS) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpUeqS, "cmp.ueq.s ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.ueq.s");
-}
-
-TEST_F(AssemblerMIPS64Test, CmpLtS) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpLtS, "cmp.lt.s ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.lt.s");
-}
-
-TEST_F(AssemblerMIPS64Test, CmpUltS) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpUltS, "cmp.ult.s ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.ult.s");
-}
-
-TEST_F(AssemblerMIPS64Test, CmpLeS) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpLeS, "cmp.le.s ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.le.s");
-}
-
-TEST_F(AssemblerMIPS64Test, CmpUleS) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpUleS, "cmp.ule.s ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.ule.s");
-}
-
-TEST_F(AssemblerMIPS64Test, CmpOrS) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpOrS, "cmp.or.s ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.or.s");
-}
-
-TEST_F(AssemblerMIPS64Test, CmpUneS) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpUneS, "cmp.une.s ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.une.s");
-}
-
-TEST_F(AssemblerMIPS64Test, CmpNeS) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpNeS, "cmp.ne.s ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.ne.s");
-}
-
-TEST_F(AssemblerMIPS64Test, CmpUnD) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpUnD, "cmp.un.d ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.un.d");
-}
-
-TEST_F(AssemblerMIPS64Test, CmpEqD) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpEqD, "cmp.eq.d ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.eq.d");
-}
-
-TEST_F(AssemblerMIPS64Test, CmpUeqD) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpUeqD, "cmp.ueq.d ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.ueq.d");
-}
-
-TEST_F(AssemblerMIPS64Test, CmpLtD) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpLtD, "cmp.lt.d ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.lt.d");
-}
-
-TEST_F(AssemblerMIPS64Test, CmpUltD) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpUltD, "cmp.ult.d ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.ult.d");
-}
-
-TEST_F(AssemblerMIPS64Test, CmpLeD) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpLeD, "cmp.le.d ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.le.d");
-}
-
-TEST_F(AssemblerMIPS64Test, CmpUleD) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpUleD, "cmp.ule.d ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.ule.d");
-}
-
-TEST_F(AssemblerMIPS64Test, CmpOrD) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpOrD, "cmp.or.d ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.or.d");
-}
-
-TEST_F(AssemblerMIPS64Test, CmpUneD) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpUneD, "cmp.une.d ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.une.d");
-}
-
-TEST_F(AssemblerMIPS64Test, CmpNeD) {
-  DriverStr(RepeatFFF(&mips64::Mips64Assembler::CmpNeD, "cmp.ne.d ${reg1}, ${reg2}, ${reg3}"),
-            "cmp.ne.d");
-}
-
-TEST_F(AssemblerMIPS64Test, CvtDL) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::Cvtdl, "cvt.d.l ${reg1}, ${reg2}"), "cvt.d.l");
-}
-
-TEST_F(AssemblerMIPS64Test, CvtDS) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::Cvtds, "cvt.d.s ${reg1}, ${reg2}"), "cvt.d.s");
-}
-
-TEST_F(AssemblerMIPS64Test, CvtDW) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::Cvtdw, "cvt.d.w ${reg1}, ${reg2}"), "cvt.d.w");
-}
-
-TEST_F(AssemblerMIPS64Test, CvtSL) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::Cvtsl, "cvt.s.l ${reg1}, ${reg2}"), "cvt.s.l");
-}
-
-TEST_F(AssemblerMIPS64Test, CvtSD) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::Cvtsd, "cvt.s.d ${reg1}, ${reg2}"), "cvt.s.d");
-}
-
-TEST_F(AssemblerMIPS64Test, CvtSW) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::Cvtsw, "cvt.s.w ${reg1}, ${reg2}"), "cvt.s.w");
-}
-
-TEST_F(AssemblerMIPS64Test, TruncWS) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::TruncWS, "trunc.w.s ${reg1}, ${reg2}"), "trunc.w.s");
-}
-
-TEST_F(AssemblerMIPS64Test, TruncWD) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::TruncWD, "trunc.w.d ${reg1}, ${reg2}"), "trunc.w.d");
-}
-
-TEST_F(AssemblerMIPS64Test, TruncLS) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::TruncLS, "trunc.l.s ${reg1}, ${reg2}"), "trunc.l.s");
-}
-
-TEST_F(AssemblerMIPS64Test, TruncLD) {
-  DriverStr(RepeatFF(&mips64::Mips64Assembler::TruncLD, "trunc.l.d ${reg1}, ${reg2}"), "trunc.l.d");
-}
-
-TEST_F(AssemblerMIPS64Test, Mfc1) {
-  DriverStr(RepeatRF(&mips64::Mips64Assembler::Mfc1, "mfc1 ${reg1}, ${reg2}"), "Mfc1");
-}
-
-TEST_F(AssemblerMIPS64Test, Mfhc1) {
-  DriverStr(RepeatRF(&mips64::Mips64Assembler::Mfhc1, "mfhc1 ${reg1}, ${reg2}"), "Mfhc1");
-}
-
-TEST_F(AssemblerMIPS64Test, Mtc1) {
-  DriverStr(RepeatRF(&mips64::Mips64Assembler::Mtc1, "mtc1 ${reg1}, ${reg2}"), "Mtc1");
-}
-
-TEST_F(AssemblerMIPS64Test, Mthc1) {
-  DriverStr(RepeatRF(&mips64::Mips64Assembler::Mthc1, "mthc1 ${reg1}, ${reg2}"), "Mthc1");
-}
-
-TEST_F(AssemblerMIPS64Test, Dmfc1) {
-  DriverStr(RepeatRF(&mips64::Mips64Assembler::Dmfc1, "dmfc1 ${reg1}, ${reg2}"), "Dmfc1");
-}
-
-TEST_F(AssemblerMIPS64Test, Dmtc1) {
-  DriverStr(RepeatRF(&mips64::Mips64Assembler::Dmtc1, "dmtc1 ${reg1}, ${reg2}"), "Dmtc1");
-}
-
-TEST_F(AssemblerMIPS64Test, Lwc1) {
-  DriverStr(RepeatFRIb(&mips64::Mips64Assembler::Lwc1, -16, "lwc1 ${reg1}, {imm}(${reg2})"),
-            "lwc1");
-}
-
-TEST_F(AssemblerMIPS64Test, Ldc1) {
-  DriverStr(RepeatFRIb(&mips64::Mips64Assembler::Ldc1, -16, "ldc1 ${reg1}, {imm}(${reg2})"),
-            "ldc1");
-}
-
-TEST_F(AssemblerMIPS64Test, Swc1) {
-  DriverStr(RepeatFRIb(&mips64::Mips64Assembler::Swc1, -16, "swc1 ${reg1}, {imm}(${reg2})"),
-            "swc1");
-}
-
-TEST_F(AssemblerMIPS64Test, Sdc1) {
-  DriverStr(RepeatFRIb(&mips64::Mips64Assembler::Sdc1, -16, "sdc1 ${reg1}, {imm}(${reg2})"),
-            "sdc1");
-}
-
-//////////////
-// BRANCHES //
-//////////////
-
-TEST_F(AssemblerMIPS64Test, Jalr) {
-  DriverStr(".set noreorder\n" +
-            RepeatRRNoDupes(&mips64::Mips64Assembler::Jalr, "jalr ${reg1}, ${reg2}"), "jalr");
-}
-
-TEST_F(AssemblerMIPS64Test, Bc) {
-  BranchHelper(&mips64::Mips64Assembler::Bc, "Bc");
-}
-
-TEST_F(AssemblerMIPS64Test, Balc) {
-  BranchHelper(&mips64::Mips64Assembler::Balc, "Balc");
-}
-
-TEST_F(AssemblerMIPS64Test, Beqzc) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Beqzc, "Beqzc");
-}
-
-TEST_F(AssemblerMIPS64Test, Bnezc) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bnezc, "Bnezc");
-}
-
-TEST_F(AssemblerMIPS64Test, Bltzc) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bltzc, "Bltzc");
-}
-
-TEST_F(AssemblerMIPS64Test, Bgezc) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgezc, "Bgezc");
-}
-
-TEST_F(AssemblerMIPS64Test, Blezc) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Blezc, "Blezc");
-}
-
-TEST_F(AssemblerMIPS64Test, Bgtzc) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgtzc, "Bgtzc");
-}
-
-TEST_F(AssemblerMIPS64Test, Beqc) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Beqc, "Beqc");
-}
-
-TEST_F(AssemblerMIPS64Test, Bnec) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bnec, "Bnec");
-}
-
-TEST_F(AssemblerMIPS64Test, Bltc) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bltc, "Bltc");
-}
-
-TEST_F(AssemblerMIPS64Test, Bgec) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bgec, "Bgec");
-}
-
-TEST_F(AssemblerMIPS64Test, Bltuc) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bltuc, "Bltuc");
-}
-
-TEST_F(AssemblerMIPS64Test, Bgeuc) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bgeuc, "Bgeuc");
-}
-
-TEST_F(AssemblerMIPS64Test, Bc1eqz) {
-  BranchFpuCondHelper(&mips64::Mips64Assembler::Bc1eqz, "Bc1eqz");
-}
-
-TEST_F(AssemblerMIPS64Test, Bc1nez) {
-  BranchFpuCondHelper(&mips64::Mips64Assembler::Bc1nez, "Bc1nez");
-}
-
-TEST_F(AssemblerMIPS64Test, BareBc) {
-  BranchHelper(&mips64::Mips64Assembler::Bc, "Bc", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS64Test, BareBalc) {
-  BranchHelper(&mips64::Mips64Assembler::Balc, "Balc", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS64Test, BareBeqzc) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Beqzc, "Beqzc", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS64Test, BareBnezc) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bnezc, "Bnezc", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS64Test, BareBltzc) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bltzc, "Bltzc", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS64Test, BareBgezc) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgezc, "Bgezc", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS64Test, BareBlezc) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Blezc, "Blezc", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS64Test, BareBgtzc) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgtzc, "Bgtzc", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS64Test, BareBeqc) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Beqc, "Beqc", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS64Test, BareBnec) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bnec, "Bnec", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS64Test, BareBltc) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bltc, "Bltc", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS64Test, BareBgec) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bgec, "Bgec", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS64Test, BareBltuc) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bltuc, "Bltuc", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS64Test, BareBgeuc) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bgeuc, "Bgeuc", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS64Test, BareBc1eqz) {
-  BranchFpuCondHelper(&mips64::Mips64Assembler::Bc1eqz, "Bc1eqz", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS64Test, BareBc1nez) {
-  BranchFpuCondHelper(&mips64::Mips64Assembler::Bc1nez, "Bc1nez", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS64Test, BareBeqz) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Beqz, "Beqz", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS64Test, BareBnez) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bnez, "Bnez", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS64Test, BareBltz) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bltz, "Bltz", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS64Test, BareBgez) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgez, "Bgez", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS64Test, BareBlez) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Blez, "Blez", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS64Test, BareBgtz) {
-  BranchCondOneRegHelper(&mips64::Mips64Assembler::Bgtz, "Bgtz", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS64Test, BareBeq) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Beq, "Beq", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS64Test, BareBne) {
-  BranchCondTwoRegsHelper(&mips64::Mips64Assembler::Bne, "Bne", /* is_bare= */ true);
-}
-
-TEST_F(AssemblerMIPS64Test, LongBeqc) {
-  mips64::Mips64Label label;
-  __ Beqc(mips64::A0, mips64::A1, &label);
-  constexpr uint32_t kAdduCount1 = (1u << 15) + 1;
-  for (uint32_t i = 0; i != kAdduCount1; ++i) {
-    __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
-  }
-  __ Bind(&label);
-  constexpr uint32_t kAdduCount2 = (1u << 15) + 1;
-  for (uint32_t i = 0; i != kAdduCount2; ++i) {
-    __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
-  }
-  __ Beqc(mips64::A2, mips64::A3, &label);
-
-  uint32_t offset_forward = 2 + kAdduCount1;  // 2: account for auipc and jic.
-  offset_forward <<= 2;
-  offset_forward += (offset_forward & 0x8000) << 1;  // Account for sign extension in jic.
-
-  uint32_t offset_back = -(kAdduCount2 + 1);  // 1: account for bnec.
-  offset_back <<= 2;
-  offset_back += (offset_back & 0x8000) << 1;  // Account for sign extension in jic.
-
-  std::ostringstream oss;
-  oss <<
-      ".set noreorder\n"
-      "bnec $a0, $a1, 1f\n"
-      "auipc $at, 0x" << std::hex << High16Bits(offset_forward) << "\n"
-      "jic $at, 0x" << std::hex << Low16Bits(offset_forward) << "\n"
-      "1:\n" <<
-      RepeatInsn(kAdduCount1, "addu $zero, $zero, $zero\n") <<
-      "2:\n" <<
-      RepeatInsn(kAdduCount2, "addu $zero, $zero, $zero\n") <<
-      "bnec $a2, $a3, 3f\n"
-      "auipc $at, 0x" << std::hex << High16Bits(offset_back) << "\n"
-      "jic $at, 0x" << std::hex << Low16Bits(offset_back) << "\n"
-      "3:\n";
-  std::string expected = oss.str();
-  DriverStr(expected, "LongBeqc");
-}
-
-TEST_F(AssemblerMIPS64Test, LongBeqzc) {
-  constexpr uint32_t kNopCount1 = (1u << 20) + 1;
-  constexpr uint32_t kNopCount2 = (1u << 20) + 1;
-  constexpr uint32_t kRequiredCapacity = (kNopCount1 + kNopCount2 + 6u) * 4u;
-  ASSERT_LT(__ GetBuffer()->Capacity(), kRequiredCapacity);
-  __ GetBuffer()->ExtendCapacity(kRequiredCapacity);
-  mips64::Mips64Label label;
-  __ Beqzc(mips64::A0, &label);
-  for (uint32_t i = 0; i != kNopCount1; ++i) {
-    __ Nop();
-  }
-  __ Bind(&label);
-  for (uint32_t i = 0; i != kNopCount2; ++i) {
-    __ Nop();
-  }
-  __ Beqzc(mips64::A2, &label);
-
-  uint32_t offset_forward = 2 + kNopCount1;  // 2: account for auipc and jic.
-  offset_forward <<= 2;
-  offset_forward += (offset_forward & 0x8000) << 1;  // Account for sign extension in jic.
-
-  uint32_t offset_back = -(kNopCount2 + 1);  // 1: account for bnezc.
-  offset_back <<= 2;
-  offset_back += (offset_back & 0x8000) << 1;  // Account for sign extension in jic.
-
-  // Note, we're using the ".fill" directive to tell the assembler to generate many NOPs
-  // instead of generating them ourselves in the source code. This saves test time.
-  std::ostringstream oss;
-  oss <<
-      ".set noreorder\n"
-      "bnezc $a0, 1f\n"
-      "auipc $at, 0x" << std::hex << High16Bits(offset_forward) << "\n"
-      "jic $at, 0x" << std::hex << Low16Bits(offset_forward) << "\n"
-      "1:\n" <<
-      ".fill 0x" << std::hex << kNopCount1 << " , 4, 0\n"
-      "2:\n" <<
-      ".fill 0x" << std::hex << kNopCount2 << " , 4, 0\n"
-      "bnezc $a2, 3f\n"
-      "auipc $at, 0x" << std::hex << High16Bits(offset_back) << "\n"
-      "jic $at, 0x" << std::hex << Low16Bits(offset_back) << "\n"
-      "3:\n";
-  std::string expected = oss.str();
-  DriverStr(expected, "LongBeqzc");
-}
-
-TEST_F(AssemblerMIPS64Test, LongBalc) {
-  constexpr uint32_t kNopCount1 = (1u << 25) + 1;
-  constexpr uint32_t kNopCount2 = (1u << 25) + 1;
-  constexpr uint32_t kRequiredCapacity = (kNopCount1 + kNopCount2 + 6u) * 4u;
-  ASSERT_LT(__ GetBuffer()->Capacity(), kRequiredCapacity);
-  __ GetBuffer()->ExtendCapacity(kRequiredCapacity);
-  mips64::Mips64Label label1, label2;
-  __ Balc(&label1);
-  for (uint32_t i = 0; i != kNopCount1; ++i) {
-    __ Nop();
-  }
-  __ Bind(&label1);
-  __ Balc(&label2);
-  for (uint32_t i = 0; i != kNopCount2; ++i) {
-    __ Nop();
-  }
-  __ Bind(&label2);
-  __ Balc(&label1);
-
-  uint32_t offset_forward1 = 2 + kNopCount1;  // 2: account for auipc and jialc.
-  offset_forward1 <<= 2;
-  offset_forward1 += (offset_forward1 & 0x8000) << 1;  // Account for sign extension in jialc.
-
-  uint32_t offset_forward2 = 2 + kNopCount2;  // 2: account for auipc and jialc.
-  offset_forward2 <<= 2;
-  offset_forward2 += (offset_forward2 & 0x8000) << 1;  // Account for sign extension in jialc.
-
-  uint32_t offset_back = -(2 + kNopCount2);  // 2: account for auipc and jialc.
-  offset_back <<= 2;
-  offset_back += (offset_back & 0x8000) << 1;  // Account for sign extension in jialc.
-
-  // Note, we're using the ".fill" directive to tell the assembler to generate many NOPs
-  // instead of generating them ourselves in the source code. This saves a few minutes
-  // of test time.
-  std::ostringstream oss;
-  oss <<
-      ".set noreorder\n"
-      "auipc $at, 0x" << std::hex << High16Bits(offset_forward1) << "\n"
-      "jialc $at, 0x" << std::hex << Low16Bits(offset_forward1) << "\n"
-      ".fill 0x" << std::hex << kNopCount1 << " , 4, 0\n"
-      "1:\n"
-      "auipc $at, 0x" << std::hex << High16Bits(offset_forward2) << "\n"
-      "jialc $at, 0x" << std::hex << Low16Bits(offset_forward2) << "\n"
-      ".fill 0x" << std::hex << kNopCount2 << " , 4, 0\n"
-      "2:\n"
-      "auipc $at, 0x" << std::hex << High16Bits(offset_back) << "\n"
-      "jialc $at, 0x" << std::hex << Low16Bits(offset_back) << "\n";
-  std::string expected = oss.str();
-  DriverStr(expected, "LongBalc");
-}
-
-//////////
-// MISC //
-//////////
-
-TEST_F(AssemblerMIPS64Test, Lwpc) {
-  // Lwpc() takes an unsigned 19-bit immediate, while the GNU assembler needs a signed offset,
-  // hence the sign extension from bit 18 with `imm - ((imm & 0x40000) << 1)`.
-  // The GNU assembler also wants the offset to be a multiple of 4, which it will shift right
-  // by 2 positions when encoding, hence `<< 2` to compensate for that shift.
-  // We capture the value of the immediate with `.set imm, {imm}` because the value is needed
-  // twice for the sign extension, but `{imm}` is substituted only once.
-  const char* code = ".set imm, {imm}\nlw ${reg}, ((imm - ((imm & 0x40000) << 1)) << 2)($pc)";
-  DriverStr(RepeatRIb(&mips64::Mips64Assembler::Lwpc, 19, code), "Lwpc");
-}
-
-TEST_F(AssemblerMIPS64Test, Lwupc) {
-  // The comment for the Lwpc test applies here as well.
-  const char* code = ".set imm, {imm}\nlwu ${reg}, ((imm - ((imm & 0x40000) << 1)) << 2)($pc)";
-  DriverStr(RepeatRIb(&mips64::Mips64Assembler::Lwupc, 19, code), "Lwupc");
-}
-
-TEST_F(AssemblerMIPS64Test, Ldpc) {
-  // The comment for the Lwpc test applies here as well.
-  const char* code = ".set imm, {imm}\nld ${reg}, ((imm - ((imm & 0x20000) << 1)) << 3)($pc)";
-  DriverStr(RepeatRIb(&mips64::Mips64Assembler::Ldpc, 18, code), "Ldpc");
-}
-
-TEST_F(AssemblerMIPS64Test, Auipc) {
-  DriverStr(RepeatRIb(&mips64::Mips64Assembler::Auipc, 16, "auipc ${reg}, {imm}"), "Auipc");
-}
-
-TEST_F(AssemblerMIPS64Test, Addiupc) {
-  // The comment from the Lwpc() test applies to this Addiupc() test as well.
-  const char* code = ".set imm, {imm}\naddiupc ${reg}, (imm - ((imm & 0x40000) << 1)) << 2";
-  DriverStr(RepeatRIb(&mips64::Mips64Assembler::Addiupc, 19, code), "Addiupc");
-}
-
-TEST_F(AssemblerMIPS64Test, Addu) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Addu, "addu ${reg1}, ${reg2}, ${reg3}"), "addu");
-}
-
-TEST_F(AssemblerMIPS64Test, Addiu) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Addiu, -16, "addiu ${reg1}, ${reg2}, {imm}"),
-            "addiu");
-}
-
-TEST_F(AssemblerMIPS64Test, Daddu) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Daddu, "daddu ${reg1}, ${reg2}, ${reg3}"), "daddu");
-}
-
-TEST_F(AssemblerMIPS64Test, Daddiu) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Daddiu, -16, "daddiu ${reg1}, ${reg2}, {imm}"),
-            "daddiu");
-}
-
-TEST_F(AssemblerMIPS64Test, Subu) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Subu, "subu ${reg1}, ${reg2}, ${reg3}"), "subu");
-}
-
-TEST_F(AssemblerMIPS64Test, Dsubu) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Dsubu, "dsubu ${reg1}, ${reg2}, ${reg3}"), "dsubu");
-}
-
-TEST_F(AssemblerMIPS64Test, MulR6) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::MulR6, "mul ${reg1}, ${reg2}, ${reg3}"), "mulR6");
-}
-
-TEST_F(AssemblerMIPS64Test, DivR6) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::DivR6, "div ${reg1}, ${reg2}, ${reg3}"), "divR6");
-}
-
-TEST_F(AssemblerMIPS64Test, ModR6) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::ModR6, "mod ${reg1}, ${reg2}, ${reg3}"), "modR6");
-}
-
-TEST_F(AssemblerMIPS64Test, DivuR6) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::DivuR6, "divu ${reg1}, ${reg2}, ${reg3}"),
-            "divuR6");
-}
-
-TEST_F(AssemblerMIPS64Test, ModuR6) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::ModuR6, "modu ${reg1}, ${reg2}, ${reg3}"),
-            "moduR6");
-}
-
-TEST_F(AssemblerMIPS64Test, Dmul) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Dmul, "dmul ${reg1}, ${reg2}, ${reg3}"), "dmul");
-}
-
-TEST_F(AssemblerMIPS64Test, Ddiv) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Ddiv, "ddiv ${reg1}, ${reg2}, ${reg3}"), "ddiv");
-}
-
-TEST_F(AssemblerMIPS64Test, Dmod) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Dmod, "dmod ${reg1}, ${reg2}, ${reg3}"), "dmod");
-}
-
-TEST_F(AssemblerMIPS64Test, Ddivu) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Ddivu, "ddivu ${reg1}, ${reg2}, ${reg3}"), "ddivu");
-}
-
-TEST_F(AssemblerMIPS64Test, Dmodu) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Dmodu, "dmodu ${reg1}, ${reg2}, ${reg3}"), "dmodu");
-}
-
-TEST_F(AssemblerMIPS64Test, And) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::And, "and ${reg1}, ${reg2}, ${reg3}"), "and");
-}
-
-TEST_F(AssemblerMIPS64Test, Andi) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Andi, 16, "andi ${reg1}, ${reg2}, {imm}"), "andi");
-}
-
-TEST_F(AssemblerMIPS64Test, Or) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Or, "or ${reg1}, ${reg2}, ${reg3}"), "or");
-}
-
-TEST_F(AssemblerMIPS64Test, Ori) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Ori, 16, "ori ${reg1}, ${reg2}, {imm}"), "ori");
-}
-
-TEST_F(AssemblerMIPS64Test, Xor) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Xor, "xor ${reg1}, ${reg2}, ${reg3}"), "xor");
-}
-
-TEST_F(AssemblerMIPS64Test, Xori) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Xori, 16, "xori ${reg1}, ${reg2}, {imm}"), "xori");
-}
-
-TEST_F(AssemblerMIPS64Test, Nor) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Nor, "nor ${reg1}, ${reg2}, ${reg3}"), "nor");
-}
-
-TEST_F(AssemblerMIPS64Test, Lb) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Lb, -16, "lb ${reg1}, {imm}(${reg2})"), "lb");
-}
-
-TEST_F(AssemblerMIPS64Test, Lh) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Lh, -16, "lh ${reg1}, {imm}(${reg2})"), "lh");
-}
-
-TEST_F(AssemblerMIPS64Test, Lw) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Lw, -16, "lw ${reg1}, {imm}(${reg2})"), "lw");
-}
-
-TEST_F(AssemblerMIPS64Test, Ld) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Ld, -16, "ld ${reg1}, {imm}(${reg2})"), "ld");
-}
-
-TEST_F(AssemblerMIPS64Test, Lbu) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Lbu, -16, "lbu ${reg1}, {imm}(${reg2})"), "lbu");
-}
-
-TEST_F(AssemblerMIPS64Test, Lhu) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Lhu, -16, "lhu ${reg1}, {imm}(${reg2})"), "lhu");
-}
-
-TEST_F(AssemblerMIPS64Test, Lwu) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Lwu, -16, "lwu ${reg1}, {imm}(${reg2})"), "lwu");
-}
-
-TEST_F(AssemblerMIPS64Test, Lui) {
-  DriverStr(RepeatRIb(&mips64::Mips64Assembler::Lui, 16, "lui ${reg}, {imm}"), "lui");
-}
-
-TEST_F(AssemblerMIPS64Test, Daui) {
-  std::vector<mips64::GpuRegister*> reg1_registers = GetRegisters();
-  std::vector<mips64::GpuRegister*> reg2_registers = GetRegisters();
-  reg2_registers.erase(reg2_registers.begin());  // reg2 can't be ZERO, remove it.
-  std::vector<int64_t> imms = CreateImmediateValuesBits(/* imm_bits= */ 16, /* as_uint= */ true);
-  WarnOnCombinations(reg1_registers.size() * reg2_registers.size() * imms.size());
-  std::ostringstream expected;
-  for (mips64::GpuRegister* reg1 : reg1_registers) {
-    for (mips64::GpuRegister* reg2 : reg2_registers) {
-      for (int64_t imm : imms) {
-        __ Daui(*reg1, *reg2, imm);
-        expected << "daui $" << *reg1 << ", $" << *reg2 << ", " << imm << "\n";
-      }
-    }
-  }
-  DriverStr(expected.str(), "daui");
-}
-
-TEST_F(AssemblerMIPS64Test, Dahi) {
-  DriverStr(RepeatRIb(&mips64::Mips64Assembler::Dahi, 16, "dahi ${reg}, ${reg}, {imm}"), "dahi");
-}
-
-TEST_F(AssemblerMIPS64Test, Dati) {
-  DriverStr(RepeatRIb(&mips64::Mips64Assembler::Dati, 16, "dati ${reg}, ${reg}, {imm}"), "dati");
-}
-
-TEST_F(AssemblerMIPS64Test, Sb) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sb, -16, "sb ${reg1}, {imm}(${reg2})"), "sb");
-}
-
-TEST_F(AssemblerMIPS64Test, Sh) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sh, -16, "sh ${reg1}, {imm}(${reg2})"), "sh");
-}
-
-TEST_F(AssemblerMIPS64Test, Sw) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sw, -16, "sw ${reg1}, {imm}(${reg2})"), "sw");
-}
-
-TEST_F(AssemblerMIPS64Test, Sd) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sd, -16, "sd ${reg1}, {imm}(${reg2})"), "sd");
-}
-
-TEST_F(AssemblerMIPS64Test, Slt) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Slt, "slt ${reg1}, ${reg2}, ${reg3}"), "slt");
-}
-
-TEST_F(AssemblerMIPS64Test, Sltu) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Sltu, "sltu ${reg1}, ${reg2}, ${reg3}"), "sltu");
-}
-
-TEST_F(AssemblerMIPS64Test, Slti) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Slti, -16, "slti ${reg1}, ${reg2}, {imm}"),
-            "slti");
-}
-
-TEST_F(AssemblerMIPS64Test, Sltiu) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sltiu, -16, "sltiu ${reg1}, ${reg2}, {imm}"),
-            "sltiu");
-}
-
-TEST_F(AssemblerMIPS64Test, Move) {
-  DriverStr(RepeatRR(&mips64::Mips64Assembler::Move, "or ${reg1}, ${reg2}, $zero"), "move");
-}
-
-TEST_F(AssemblerMIPS64Test, Clear) {
-  DriverStr(RepeatR(&mips64::Mips64Assembler::Clear, "or ${reg}, $zero, $zero"), "clear");
-}
-
-TEST_F(AssemblerMIPS64Test, Not) {
-  DriverStr(RepeatRR(&mips64::Mips64Assembler::Not, "nor ${reg1}, ${reg2}, $zero"), "not");
-}
-
-TEST_F(AssemblerMIPS64Test, Bitswap) {
-  DriverStr(RepeatRR(&mips64::Mips64Assembler::Bitswap, "bitswap ${reg1}, ${reg2}"), "bitswap");
-}
-
-TEST_F(AssemblerMIPS64Test, Dbitswap) {
-  DriverStr(RepeatRR(&mips64::Mips64Assembler::Dbitswap, "dbitswap ${reg1}, ${reg2}"), "dbitswap");
-}
-
-TEST_F(AssemblerMIPS64Test, Seb) {
-  DriverStr(RepeatRR(&mips64::Mips64Assembler::Seb, "seb ${reg1}, ${reg2}"), "seb");
-}
-
-TEST_F(AssemblerMIPS64Test, Seh) {
-  DriverStr(RepeatRR(&mips64::Mips64Assembler::Seh, "seh ${reg1}, ${reg2}"), "seh");
-}
-
-TEST_F(AssemblerMIPS64Test, Dsbh) {
-  DriverStr(RepeatRR(&mips64::Mips64Assembler::Dsbh, "dsbh ${reg1}, ${reg2}"), "dsbh");
-}
-
-TEST_F(AssemblerMIPS64Test, Dshd) {
-  DriverStr(RepeatRR(&mips64::Mips64Assembler::Dshd, "dshd ${reg1}, ${reg2}"), "dshd");
-}
-
-TEST_F(AssemblerMIPS64Test, Dext) {
-  std::vector<mips64::GpuRegister*> reg1_registers = GetRegisters();
-  std::vector<mips64::GpuRegister*> reg2_registers = GetRegisters();
-  WarnOnCombinations(reg1_registers.size() * reg2_registers.size() * 33 * 16);
-  std::ostringstream expected;
-  for (mips64::GpuRegister* reg1 : reg1_registers) {
-    for (mips64::GpuRegister* reg2 : reg2_registers) {
-      for (int32_t pos = 0; pos < 32; pos++) {
-        for (int32_t size = 1; size <= 32; size++) {
-          __ Dext(*reg1, *reg2, pos, size);
-          expected << "dext $" << *reg1 << ", $" << *reg2 << ", " << pos << ", " << size << "\n";
-        }
-      }
-    }
-  }
-
-  DriverStr(expected.str(), "Dext");
-}
-
-TEST_F(AssemblerMIPS64Test, Ins) {
-  std::vector<mips64::GpuRegister*> regs = GetRegisters();
-  WarnOnCombinations(regs.size() * regs.size() * 33 * 16);
-  std::string expected;
-  for (mips64::GpuRegister* reg1 : regs) {
-    for (mips64::GpuRegister* reg2 : regs) {
-      for (int32_t pos = 0; pos < 32; pos++) {
-        for (int32_t size = 1; pos + size <= 32; size++) {
-          __ Ins(*reg1, *reg2, pos, size);
-          std::ostringstream instr;
-          instr << "ins $" << *reg1 << ", $" << *reg2 << ", " << pos << ", " << size << "\n";
-          expected += instr.str();
-        }
-      }
-    }
-  }
-  DriverStr(expected, "Ins");
-}
-
-TEST_F(AssemblerMIPS64Test, DblIns) {
-  std::vector<mips64::GpuRegister*> reg1_registers = GetRegisters();
-  std::vector<mips64::GpuRegister*> reg2_registers = GetRegisters();
-  WarnOnCombinations(reg1_registers.size() * reg2_registers.size() * 65 * 32);
-  std::ostringstream expected;
-  for (mips64::GpuRegister* reg1 : reg1_registers) {
-    for (mips64::GpuRegister* reg2 : reg2_registers) {
-      for (int32_t pos = 0; pos < 64; pos++) {
-        for (int32_t size = 1; pos + size <= 64; size++) {
-          __ DblIns(*reg1, *reg2, pos, size);
-          expected << "dins $" << *reg1 << ", $" << *reg2 << ", " << pos << ", " << size << "\n";
-        }
-      }
-    }
-  }
-
-  DriverStr(expected.str(), "DblIns");
-}
-
-TEST_F(AssemblerMIPS64Test, Lsa) {
-  DriverStr(RepeatRRRIb(&mips64::Mips64Assembler::Lsa,
-                        2,
-                        "lsa ${reg1}, ${reg2}, ${reg3}, {imm}",
-                        1),
-            "lsa");
-}
-
-TEST_F(AssemblerMIPS64Test, Dlsa) {
-  DriverStr(RepeatRRRIb(&mips64::Mips64Assembler::Dlsa,
-                        2,
-                        "dlsa ${reg1}, ${reg2}, ${reg3}, {imm}",
-                        1),
-            "dlsa");
-}
-
-TEST_F(AssemblerMIPS64Test, Wsbh) {
-  DriverStr(RepeatRR(&mips64::Mips64Assembler::Wsbh, "wsbh ${reg1}, ${reg2}"), "wsbh");
-}
-
-TEST_F(AssemblerMIPS64Test, Sll) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sll, 5, "sll ${reg1}, ${reg2}, {imm}"), "sll");
-}
-
-TEST_F(AssemblerMIPS64Test, Srl) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Srl, 5, "srl ${reg1}, ${reg2}, {imm}"), "srl");
-}
-
-TEST_F(AssemblerMIPS64Test, Rotr) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Rotr, 5, "rotr ${reg1}, ${reg2}, {imm}"), "rotr");
-}
-
-TEST_F(AssemblerMIPS64Test, Sra) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sra, 5, "sra ${reg1}, ${reg2}, {imm}"), "sra");
-}
-
-TEST_F(AssemblerMIPS64Test, Sllv) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Sllv, "sllv ${reg1}, ${reg2}, ${reg3}"), "sllv");
-}
-
-TEST_F(AssemblerMIPS64Test, Srlv) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Srlv, "srlv ${reg1}, ${reg2}, ${reg3}"), "srlv");
-}
-
-TEST_F(AssemblerMIPS64Test, Rotrv) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Rotrv, "rotrv ${reg1}, ${reg2}, ${reg3}"), "rotrv");
-}
-
-TEST_F(AssemblerMIPS64Test, Srav) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Srav, "srav ${reg1}, ${reg2}, ${reg3}"), "srav");
-}
-
-TEST_F(AssemblerMIPS64Test, Dsll) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsll, 5, "dsll ${reg1}, ${reg2}, {imm}"), "dsll");
-}
-
-TEST_F(AssemblerMIPS64Test, Dsrl) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsrl, 5, "dsrl ${reg1}, ${reg2}, {imm}"), "dsrl");
-}
-
-TEST_F(AssemblerMIPS64Test, Drotr) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Drotr, 5, "drotr ${reg1}, ${reg2}, {imm}"),
-            "drotr");
-}
-
-TEST_F(AssemblerMIPS64Test, Dsra) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsra, 5, "dsra ${reg1}, ${reg2}, {imm}"), "dsra");
-}
-
-TEST_F(AssemblerMIPS64Test, Dsll32) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsll32, 5, "dsll32 ${reg1}, ${reg2}, {imm}"),
-            "dsll32");
-}
-
-TEST_F(AssemblerMIPS64Test, Dsrl32) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsrl32, 5, "dsrl32 ${reg1}, ${reg2}, {imm}"),
-            "dsrl32");
-}
-
-TEST_F(AssemblerMIPS64Test, Drotr32) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Drotr32, 5, "drotr32 ${reg1}, ${reg2}, {imm}"),
-            "drotr32");
-}
-
-TEST_F(AssemblerMIPS64Test, Dsra32) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsra32, 5, "dsra32 ${reg1}, ${reg2}, {imm}"),
-            "dsra32");
-}
-
-TEST_F(AssemblerMIPS64Test, Dsllv) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Dsllv, "dsllv ${reg1}, ${reg2}, ${reg3}"), "dsllv");
-}
-
-TEST_F(AssemblerMIPS64Test, Dsrlv) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Dsrlv, "dsrlv ${reg1}, ${reg2}, ${reg3}"), "dsrlv");
-}
-
-TEST_F(AssemblerMIPS64Test, Dsrav) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Dsrav, "dsrav ${reg1}, ${reg2}, ${reg3}"), "dsrav");
-}
-
-TEST_F(AssemblerMIPS64Test, Sc) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sc, -9, "sc ${reg1}, {imm}(${reg2})"), "sc");
-}
-
-TEST_F(AssemblerMIPS64Test, Scd) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Scd, -9, "scd ${reg1}, {imm}(${reg2})"), "scd");
-}
-
-TEST_F(AssemblerMIPS64Test, Ll) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Ll, -9, "ll ${reg1}, {imm}(${reg2})"), "ll");
-}
-
-TEST_F(AssemblerMIPS64Test, Lld) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Lld, -9, "lld ${reg1}, {imm}(${reg2})"), "lld");
-}
-
-TEST_F(AssemblerMIPS64Test, Seleqz) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Seleqz, "seleqz ${reg1}, ${reg2}, ${reg3}"),
-            "seleqz");
-}
-
-TEST_F(AssemblerMIPS64Test, Selnez) {
-  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Selnez, "selnez ${reg1}, ${reg2}, ${reg3}"),
-            "selnez");
-}
-
-TEST_F(AssemblerMIPS64Test, Clz) {
-  DriverStr(RepeatRR(&mips64::Mips64Assembler::Clz, "clz ${reg1}, ${reg2}"), "clz");
-}
-
-TEST_F(AssemblerMIPS64Test, Clo) {
-  DriverStr(RepeatRR(&mips64::Mips64Assembler::Clo, "clo ${reg1}, ${reg2}"), "clo");
-}
-
-TEST_F(AssemblerMIPS64Test, Dclz) {
-  DriverStr(RepeatRR(&mips64::Mips64Assembler::Dclz, "dclz ${reg1}, ${reg2}"), "dclz");
-}
-
-TEST_F(AssemblerMIPS64Test, Dclo) {
-  DriverStr(RepeatRR(&mips64::Mips64Assembler::Dclo, "dclo ${reg1}, ${reg2}"), "dclo");
-}
-
-TEST_F(AssemblerMIPS64Test, LoadFromOffset) {
-  __ LoadFromOffset(mips64::kLoadSignedByte, mips64::A0, mips64::A0, 0);
-  __ LoadFromOffset(mips64::kLoadSignedByte, mips64::A0, mips64::A1, 0);
-  __ LoadFromOffset(mips64::kLoadSignedByte, mips64::A0, mips64::A1, 1);
-  __ LoadFromOffset(mips64::kLoadSignedByte, mips64::A0, mips64::A1, 256);
-  __ LoadFromOffset(mips64::kLoadSignedByte, mips64::A0, mips64::A1, 1000);
-  __ LoadFromOffset(mips64::kLoadSignedByte, mips64::A0, mips64::A1, 0x7FFF);
-  __ LoadFromOffset(mips64::kLoadSignedByte, mips64::A0, mips64::A1, 0x8000);
-  __ LoadFromOffset(mips64::kLoadSignedByte, mips64::A0, mips64::A1, 0x8001);
-  __ LoadFromOffset(mips64::kLoadSignedByte, mips64::A0, mips64::A1, 0x10000);
-  __ LoadFromOffset(mips64::kLoadSignedByte, mips64::A0, mips64::A1, 0x12345678);
-  __ LoadFromOffset(mips64::kLoadSignedByte, mips64::A0, mips64::A1, -256);
-  __ LoadFromOffset(mips64::kLoadSignedByte, mips64::A0, mips64::A1, -32768);
-  __ LoadFromOffset(mips64::kLoadSignedByte, mips64::A0, mips64::A1, 0xABCDEF00);
-  __ LoadFromOffset(mips64::kLoadSignedByte, mips64::A0, mips64::A1, 0x7FFFFFFE);
-  __ LoadFromOffset(mips64::kLoadSignedByte, mips64::A0, mips64::A1, 0x7FFFFFFF);
-  __ LoadFromOffset(mips64::kLoadSignedByte, mips64::A0, mips64::A1, 0x80000000);
-  __ LoadFromOffset(mips64::kLoadSignedByte, mips64::A0, mips64::A1, 0x80000001);
-
-  __ LoadFromOffset(mips64::kLoadUnsignedByte, mips64::A0, mips64::A0, 0);
-  __ LoadFromOffset(mips64::kLoadUnsignedByte, mips64::A0, mips64::A1, 0);
-  __ LoadFromOffset(mips64::kLoadUnsignedByte, mips64::A0, mips64::A1, 1);
-  __ LoadFromOffset(mips64::kLoadUnsignedByte, mips64::A0, mips64::A1, 256);
-  __ LoadFromOffset(mips64::kLoadUnsignedByte, mips64::A0, mips64::A1, 1000);
-  __ LoadFromOffset(mips64::kLoadUnsignedByte, mips64::A0, mips64::A1, 0x7FFF);
-  __ LoadFromOffset(mips64::kLoadUnsignedByte, mips64::A0, mips64::A1, 0x8000);
-  __ LoadFromOffset(mips64::kLoadUnsignedByte, mips64::A0, mips64::A1, 0x8001);
-  __ LoadFromOffset(mips64::kLoadUnsignedByte, mips64::A0, mips64::A1, 0x10000);
-  __ LoadFromOffset(mips64::kLoadUnsignedByte, mips64::A0, mips64::A1, 0x12345678);
-  __ LoadFromOffset(mips64::kLoadUnsignedByte, mips64::A0, mips64::A1, -256);
-  __ LoadFromOffset(mips64::kLoadUnsignedByte, mips64::A0, mips64::A1, -32768);
-  __ LoadFromOffset(mips64::kLoadUnsignedByte, mips64::A0, mips64::A1, 0xABCDEF00);
-  __ LoadFromOffset(mips64::kLoadUnsignedByte, mips64::A0, mips64::A1, 0x7FFFFFFE);
-  __ LoadFromOffset(mips64::kLoadUnsignedByte, mips64::A0, mips64::A1, 0x7FFFFFFF);
-  __ LoadFromOffset(mips64::kLoadUnsignedByte, mips64::A0, mips64::A1, 0x80000000);
-  __ LoadFromOffset(mips64::kLoadUnsignedByte, mips64::A0, mips64::A1, 0x80000001);
-
-  __ LoadFromOffset(mips64::kLoadSignedHalfword, mips64::A0, mips64::A0, 0);
-  __ LoadFromOffset(mips64::kLoadSignedHalfword, mips64::A0, mips64::A1, 0);
-  __ LoadFromOffset(mips64::kLoadSignedHalfword, mips64::A0, mips64::A1, 2);
-  __ LoadFromOffset(mips64::kLoadSignedHalfword, mips64::A0, mips64::A1, 256);
-  __ LoadFromOffset(mips64::kLoadSignedHalfword, mips64::A0, mips64::A1, 1000);
-  __ LoadFromOffset(mips64::kLoadSignedHalfword, mips64::A0, mips64::A1, 0x7FFE);
-  __ LoadFromOffset(mips64::kLoadSignedHalfword, mips64::A0, mips64::A1, 0x8000);
-  __ LoadFromOffset(mips64::kLoadSignedHalfword, mips64::A0, mips64::A1, 0x8002);
-  __ LoadFromOffset(mips64::kLoadSignedHalfword, mips64::A0, mips64::A1, 0x10000);
-  __ LoadFromOffset(mips64::kLoadSignedHalfword, mips64::A0, mips64::A1, 0x12345678);
-  __ LoadFromOffset(mips64::kLoadSignedHalfword, mips64::A0, mips64::A1, -256);
-  __ LoadFromOffset(mips64::kLoadSignedHalfword, mips64::A0, mips64::A1, -32768);
-  __ LoadFromOffset(mips64::kLoadSignedHalfword, mips64::A0, mips64::A1, 0xABCDEF00);
-  __ LoadFromOffset(mips64::kLoadSignedHalfword, mips64::A0, mips64::A1, 0x7FFFFFFC);
-  __ LoadFromOffset(mips64::kLoadSignedHalfword, mips64::A0, mips64::A1, 0x7FFFFFFE);
-  __ LoadFromOffset(mips64::kLoadSignedHalfword, mips64::A0, mips64::A1, 0x80000000);
-  __ LoadFromOffset(mips64::kLoadSignedHalfword, mips64::A0, mips64::A1, 0x80000002);
-
-  __ LoadFromOffset(mips64::kLoadUnsignedHalfword, mips64::A0, mips64::A0, 0);
-  __ LoadFromOffset(mips64::kLoadUnsignedHalfword, mips64::A0, mips64::A1, 0);
-  __ LoadFromOffset(mips64::kLoadUnsignedHalfword, mips64::A0, mips64::A1, 2);
-  __ LoadFromOffset(mips64::kLoadUnsignedHalfword, mips64::A0, mips64::A1, 256);
-  __ LoadFromOffset(mips64::kLoadUnsignedHalfword, mips64::A0, mips64::A1, 1000);
-  __ LoadFromOffset(mips64::kLoadUnsignedHalfword, mips64::A0, mips64::A1, 0x7FFE);
-  __ LoadFromOffset(mips64::kLoadUnsignedHalfword, mips64::A0, mips64::A1, 0x8000);
-  __ LoadFromOffset(mips64::kLoadUnsignedHalfword, mips64::A0, mips64::A1, 0x8002);
-  __ LoadFromOffset(mips64::kLoadUnsignedHalfword, mips64::A0, mips64::A1, 0x10000);
-  __ LoadFromOffset(mips64::kLoadUnsignedHalfword, mips64::A0, mips64::A1, 0x12345678);
-  __ LoadFromOffset(mips64::kLoadUnsignedHalfword, mips64::A0, mips64::A1, -256);
-  __ LoadFromOffset(mips64::kLoadUnsignedHalfword, mips64::A0, mips64::A1, -32768);
-  __ LoadFromOffset(mips64::kLoadUnsignedHalfword, mips64::A0, mips64::A1, 0xABCDEF00);
-  __ LoadFromOffset(mips64::kLoadUnsignedHalfword, mips64::A0, mips64::A1, 0x7FFFFFFC);
-  __ LoadFromOffset(mips64::kLoadUnsignedHalfword, mips64::A0, mips64::A1, 0x7FFFFFFE);
-  __ LoadFromOffset(mips64::kLoadUnsignedHalfword, mips64::A0, mips64::A1, 0x80000000);
-  __ LoadFromOffset(mips64::kLoadUnsignedHalfword, mips64::A0, mips64::A1, 0x80000002);
-
-  __ LoadFromOffset(mips64::kLoadWord, mips64::A0, mips64::A0, 0);
-  __ LoadFromOffset(mips64::kLoadWord, mips64::A0, mips64::A1, 0);
-  __ LoadFromOffset(mips64::kLoadWord, mips64::A0, mips64::A1, 4);
-  __ LoadFromOffset(mips64::kLoadWord, mips64::A0, mips64::A1, 256);
-  __ LoadFromOffset(mips64::kLoadWord, mips64::A0, mips64::A1, 1000);
-  __ LoadFromOffset(mips64::kLoadWord, mips64::A0, mips64::A1, 0x7FFC);
-  __ LoadFromOffset(mips64::kLoadWord, mips64::A0, mips64::A1, 0x8000);
-  __ LoadFromOffset(mips64::kLoadWord, mips64::A0, mips64::A1, 0x8004);
-  __ LoadFromOffset(mips64::kLoadWord, mips64::A0, mips64::A1, 0x10000);
-  __ LoadFromOffset(mips64::kLoadWord, mips64::A0, mips64::A1, 0x12345678);
-  __ LoadFromOffset(mips64::kLoadWord, mips64::A0, mips64::A1, -256);
-  __ LoadFromOffset(mips64::kLoadWord, mips64::A0, mips64::A1, -32768);
-  __ LoadFromOffset(mips64::kLoadWord, mips64::A0, mips64::A1, 0xABCDEF00);
-  __ LoadFromOffset(mips64::kLoadWord, mips64::A0, mips64::A1, 0x7FFFFFF8);
-  __ LoadFromOffset(mips64::kLoadWord, mips64::A0, mips64::A1, 0x7FFFFFFC);
-  __ LoadFromOffset(mips64::kLoadWord, mips64::A0, mips64::A1, 0x80000000);
-  __ LoadFromOffset(mips64::kLoadWord, mips64::A0, mips64::A1, 0x80000004);
-
-  __ LoadFromOffset(mips64::kLoadUnsignedWord, mips64::A0, mips64::A0, 0);
-  __ LoadFromOffset(mips64::kLoadUnsignedWord, mips64::A0, mips64::A1, 0);
-  __ LoadFromOffset(mips64::kLoadUnsignedWord, mips64::A0, mips64::A1, 4);
-  __ LoadFromOffset(mips64::kLoadUnsignedWord, mips64::A0, mips64::A1, 256);
-  __ LoadFromOffset(mips64::kLoadUnsignedWord, mips64::A0, mips64::A1, 1000);
-  __ LoadFromOffset(mips64::kLoadUnsignedWord, mips64::A0, mips64::A1, 0x7FFC);
-  __ LoadFromOffset(mips64::kLoadUnsignedWord, mips64::A0, mips64::A1, 0x8000);
-  __ LoadFromOffset(mips64::kLoadUnsignedWord, mips64::A0, mips64::A1, 0x8004);
-  __ LoadFromOffset(mips64::kLoadUnsignedWord, mips64::A0, mips64::A1, 0x10000);
-  __ LoadFromOffset(mips64::kLoadUnsignedWord, mips64::A0, mips64::A1, 0x12345678);
-  __ LoadFromOffset(mips64::kLoadUnsignedWord, mips64::A0, mips64::A1, -256);
-  __ LoadFromOffset(mips64::kLoadUnsignedWord, mips64::A0, mips64::A1, -32768);
-  __ LoadFromOffset(mips64::kLoadUnsignedWord, mips64::A0, mips64::A1, 0xABCDEF00);
-  __ LoadFromOffset(mips64::kLoadUnsignedWord, mips64::A0, mips64::A1, 0x7FFFFFF8);
-  __ LoadFromOffset(mips64::kLoadUnsignedWord, mips64::A0, mips64::A1, 0x7FFFFFFC);
-  __ LoadFromOffset(mips64::kLoadUnsignedWord, mips64::A0, mips64::A1, 0x80000000);
-  __ LoadFromOffset(mips64::kLoadUnsignedWord, mips64::A0, mips64::A1, 0x80000004);
-
-  __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A0, 0);
-  __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, 0);
-  __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, 4);
-  __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, 256);
-  __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, 1000);
-  __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, 0x7FFC);
-  __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, 0x8000);
-  __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, 0x8004);
-  __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, 0x10000);
-  __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, 0x27FFC);
-  __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, 0x12345678);
-  __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, -256);
-  __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, -32768);
-  __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, 0xABCDEF00);
-  __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, 0x7FFFFFF8);
-  __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, 0x7FFFFFFC);
-  __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, 0x80000000);
-  __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, 0x80000004);
-
-  const char* expected =
-      "lb $a0, 0($a0)\n"
-      "lb $a0, 0($a1)\n"
-      "lb $a0, 1($a1)\n"
-      "lb $a0, 256($a1)\n"
-      "lb $a0, 1000($a1)\n"
-      "lb $a0, 0x7FFF($a1)\n"
-      "daddiu $at, $a1, 0x7FF8\n"
-      "lb $a0, 8($at)\n"
-      "daddiu $at, $a1, 32760\n"
-      "lb $a0, 9($at)\n"
-      "daui $at, $a1, 1\n"
-      "lb $a0, 0($at)\n"
-      "daui $at, $a1, 0x1234\n"
-      "lb $a0, 0x5678($at)\n"
-      "lb $a0, -256($a1)\n"
-      "lb $a0, -32768($a1)\n"
-      "daui $at, $a1, 0xABCE\n"
-      "lb $a0, -4352($at)\n"
-      "daui $at, $a1, 32768\n"
-      "dahi $at, $at, 1\n"
-      "lb $a0, -2($at)\n"
-      "daui $at, $a1, 32768\n"
-      "dahi $at, $at, 1\n"
-      "lb $a0, -1($at)\n"
-      "daui $at, $a1, 32768\n"
-      "lb $a0, 0($at)\n"
-      "daui $at, $a1, 32768\n"
-      "lb $a0, 1($at)\n"
-
-      "lbu $a0, 0($a0)\n"
-      "lbu $a0, 0($a1)\n"
-      "lbu $a0, 1($a1)\n"
-      "lbu $a0, 256($a1)\n"
-      "lbu $a0, 1000($a1)\n"
-      "lbu $a0, 0x7FFF($a1)\n"
-      "daddiu $at, $a1, 0x7FF8\n"
-      "lbu $a0, 8($at)\n"
-      "daddiu $at, $a1, 32760\n"
-      "lbu $a0, 9($at)\n"
-      "daui $at, $a1, 1\n"
-      "lbu $a0, 0($at)\n"
-      "daui $at, $a1, 0x1234\n"
-      "lbu $a0, 0x5678($at)\n"
-      "lbu $a0, -256($a1)\n"
-      "lbu $a0, -32768($a1)\n"
-      "daui $at, $a1, 0xABCE\n"
-      "lbu $a0, -4352($at)\n"
-      "daui $at, $a1, 32768\n"
-      "dahi $at, $at, 1\n"
-      "lbu $a0, -2($at)\n"
-      "daui $at, $a1, 32768\n"
-      "dahi $at, $at, 1\n"
-      "lbu $a0, -1($at)\n"
-      "daui $at, $a1, 32768\n"
-      "lbu $a0, 0($at)\n"
-      "daui $at, $a1, 32768\n"
-      "lbu $a0, 1($at)\n"
-
-      "lh $a0, 0($a0)\n"
-      "lh $a0, 0($a1)\n"
-      "lh $a0, 2($a1)\n"
-      "lh $a0, 256($a1)\n"
-      "lh $a0, 1000($a1)\n"
-      "lh $a0, 0x7FFE($a1)\n"
-      "daddiu $at, $a1, 0x7FF8\n"
-      "lh $a0, 8($at)\n"
-      "daddiu $at, $a1, 32760\n"
-      "lh $a0, 10($at)\n"
-      "daui $at, $a1, 1\n"
-      "lh $a0, 0($at)\n"
-      "daui $at, $a1, 0x1234\n"
-      "lh $a0, 0x5678($at)\n"
-      "lh $a0, -256($a1)\n"
-      "lh $a0, -32768($a1)\n"
-      "daui $at, $a1, 0xABCE\n"
-      "lh $a0, -4352($at)\n"
-      "daui $at, $a1, 32768\n"
-      "dahi $at, $at, 1\n"
-      "lh $a0, -4($at)\n"
-      "daui $at, $a1, 32768\n"
-      "dahi $at, $at, 1\n"
-      "lh $a0, -2($at)\n"
-      "daui $at, $a1, 32768\n"
-      "lh $a0, 0($at)\n"
-      "daui $at, $a1, 32768\n"
-      "lh $a0, 2($at)\n"
-
-      "lhu $a0, 0($a0)\n"
-      "lhu $a0, 0($a1)\n"
-      "lhu $a0, 2($a1)\n"
-      "lhu $a0, 256($a1)\n"
-      "lhu $a0, 1000($a1)\n"
-      "lhu $a0, 0x7FFE($a1)\n"
-      "daddiu $at, $a1, 0x7FF8\n"
-      "lhu $a0, 8($at)\n"
-      "daddiu $at, $a1, 32760\n"
-      "lhu $a0, 10($at)\n"
-      "daui $at, $a1, 1\n"
-      "lhu $a0, 0($at)\n"
-      "daui $at, $a1, 0x1234\n"
-      "lhu $a0, 0x5678($at)\n"
-      "lhu $a0, -256($a1)\n"
-      "lhu $a0, -32768($a1)\n"
-      "daui $at, $a1, 0xABCE\n"
-      "lhu $a0, -4352($at)\n"
-      "daui $at, $a1, 32768\n"
-      "dahi $at, $at, 1\n"
-      "lhu $a0, -4($at)\n"
-      "daui $at, $a1, 32768\n"
-      "dahi $at, $at, 1\n"
-      "lhu $a0, -2($at)\n"
-      "daui $at, $a1, 32768\n"
-      "lhu $a0, 0($at)\n"
-      "daui $at, $a1, 32768\n"
-      "lhu $a0, 2($at)\n"
-
-      "lw $a0, 0($a0)\n"
-      "lw $a0, 0($a1)\n"
-      "lw $a0, 4($a1)\n"
-      "lw $a0, 256($a1)\n"
-      "lw $a0, 1000($a1)\n"
-      "lw $a0, 0x7FFC($a1)\n"
-      "daddiu $at, $a1, 0x7FF8\n"
-      "lw $a0, 8($at)\n"
-      "daddiu $at, $a1, 32760\n"
-      "lw $a0, 12($at)\n"
-      "daui $at, $a1, 1\n"
-      "lw $a0, 0($at)\n"
-      "daui $at, $a1, 0x1234\n"
-      "lw $a0, 0x5678($at)\n"
-      "lw $a0, -256($a1)\n"
-      "lw $a0, -32768($a1)\n"
-      "daui $at, $a1, 0xABCE\n"
-      "lw $a0, -4352($at)\n"
-      "daui $at, $a1, 32768\n"
-      "dahi $at, $at, 1\n"
-      "lw $a0, -8($at)\n"
-      "daui $at, $a1, 32768\n"
-      "dahi $at, $at, 1\n"
-      "lw $a0, -4($at)\n"
-      "daui $at, $a1, 32768\n"
-      "lw $a0, 0($at)\n"
-      "daui $at, $a1, 32768\n"
-      "lw $a0, 4($at)\n"
-
-      "lwu $a0, 0($a0)\n"
-      "lwu $a0, 0($a1)\n"
-      "lwu $a0, 4($a1)\n"
-      "lwu $a0, 256($a1)\n"
-      "lwu $a0, 1000($a1)\n"
-      "lwu $a0, 0x7FFC($a1)\n"
-      "daddiu $at, $a1, 0x7FF8\n"
-      "lwu $a0, 8($at)\n"
-      "daddiu $at, $a1, 32760\n"
-      "lwu $a0, 12($at)\n"
-      "daui $at, $a1, 1\n"
-      "lwu $a0, 0($at)\n"
-      "daui $at, $a1, 0x1234\n"
-      "lwu $a0, 0x5678($at)\n"
-      "lwu $a0, -256($a1)\n"
-      "lwu $a0, -32768($a1)\n"
-      "daui $at, $a1, 0xABCE\n"
-      "lwu $a0, -4352($at)\n"
-      "daui $at, $a1, 32768\n"
-      "dahi $at, $at, 1\n"
-      "lwu $a0, -8($at)\n"
-      "daui $at, $a1, 32768\n"
-      "dahi $at, $at, 1\n"
-      "lwu $a0, -4($at)\n"
-      "daui $at, $a1, 32768\n"
-      "lwu $a0, 0($at)\n"
-      "daui $at, $a1, 32768\n"
-      "lwu $a0, 4($at)\n"
-
-      "ld $a0, 0($a0)\n"
-      "ld $a0, 0($a1)\n"
-      "lwu $a0, 4($a1)\n"
-      "lwu $t3, 8($a1)\n"
-      "dinsu $a0, $t3, 32, 32\n"
-      "ld $a0, 256($a1)\n"
-      "ld $a0, 1000($a1)\n"
-      "daddiu $at, $a1, 32760\n"
-      "lwu $a0, 4($at)\n"
-      "lwu $t3, 8($at)\n"
-      "dinsu $a0, $t3, 32, 32\n"
-      "daddiu $at, $a1, 32760\n"
-      "ld $a0, 8($at)\n"
-      "daddiu $at, $a1, 32760\n"
-      "lwu $a0, 12($at)\n"
-      "lwu $t3, 16($at)\n"
-      "dinsu $a0, $t3, 32, 32\n"
-      "daui $at, $a1, 1\n"
-      "ld $a0, 0($at)\n"
-      "daui $at, $a1, 2\n"
-      "daddiu $at, $at, 8\n"
-      "lwu $a0, 0x7ff4($at)\n"
-      "lwu $t3, 0x7ff8($at)\n"
-      "dinsu $a0, $t3, 32, 32\n"
-      "daui $at, $a1, 0x1234\n"
-      "ld $a0, 0x5678($at)\n"
-      "ld $a0, -256($a1)\n"
-      "ld $a0, -32768($a1)\n"
-      "daui $at, $a1, 0xABCE\n"
-      "ld $a0, -4352($at)\n"
-      "daui $at, $a1, 32768\n"
-      "dahi $at, $at, 1\n"
-      "ld $a0, -8($at)\n"
-      "daui $at, $a1, 32768\n"
-      "dahi $at, $at, 1\n"
-      "lwu $a0, -4($at)\n"
-      "lwu $t3, 0($at)\n"
-      "dinsu $a0, $t3, 32, 32\n"
-      "daui $at, $a1, 32768\n"
-      "ld $a0, 0($at)\n"
-      "daui $at, $a1, 32768\n"
-      "lwu $a0, 4($at)\n"
-      "lwu $t3, 8($at)\n"
-      "dinsu $a0, $t3, 32, 32\n";
-  DriverStr(expected, "LoadFromOffset");
-}
-
-TEST_F(AssemblerMIPS64Test, LoadFpuFromOffset) {
-  __ LoadFpuFromOffset(mips64::kLoadWord, mips64::F0, mips64::A0, 0);
-  __ LoadFpuFromOffset(mips64::kLoadWord, mips64::F0, mips64::A0, 4);
-  __ LoadFpuFromOffset(mips64::kLoadWord, mips64::F0, mips64::A0, 256);
-  __ LoadFpuFromOffset(mips64::kLoadWord, mips64::F0, mips64::A0, 0x7FFC);
-  __ LoadFpuFromOffset(mips64::kLoadWord, mips64::F0, mips64::A0, 0x8000);
-  __ LoadFpuFromOffset(mips64::kLoadWord, mips64::F0, mips64::A0, 0x8004);
-  __ LoadFpuFromOffset(mips64::kLoadWord, mips64::F0, mips64::A0, 0x10000);
-  __ LoadFpuFromOffset(mips64::kLoadWord, mips64::F0, mips64::A0, 0x12345678);
-  __ LoadFpuFromOffset(mips64::kLoadWord, mips64::F0, mips64::A0, -256);
-  __ LoadFpuFromOffset(mips64::kLoadWord, mips64::F0, mips64::A0, -32768);
-  __ LoadFpuFromOffset(mips64::kLoadWord, mips64::F0, mips64::A0, 0xABCDEF00);
-
-  __ LoadFpuFromOffset(mips64::kLoadDoubleword, mips64::F0, mips64::A0, 0);
-  __ LoadFpuFromOffset(mips64::kLoadDoubleword, mips64::F0, mips64::A0, 4);
-  __ LoadFpuFromOffset(mips64::kLoadDoubleword, mips64::F0, mips64::A0, 256);
-  __ LoadFpuFromOffset(mips64::kLoadDoubleword, mips64::F0, mips64::A0, 0x7FFC);
-  __ LoadFpuFromOffset(mips64::kLoadDoubleword, mips64::F0, mips64::A0, 0x8000);
-  __ LoadFpuFromOffset(mips64::kLoadDoubleword, mips64::F0, mips64::A0, 0x8004);
-  __ LoadFpuFromOffset(mips64::kLoadDoubleword, mips64::F0, mips64::A0, 0x10000);
-  __ LoadFpuFromOffset(mips64::kLoadDoubleword, mips64::F0, mips64::A0, 0x12345678);
-  __ LoadFpuFromOffset(mips64::kLoadDoubleword, mips64::F0, mips64::A0, -256);
-  __ LoadFpuFromOffset(mips64::kLoadDoubleword, mips64::F0, mips64::A0, -32768);
-  __ LoadFpuFromOffset(mips64::kLoadDoubleword, mips64::F0, mips64::A0, 0xABCDEF00);
-
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 0);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 1);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 2);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 4);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 8);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 511);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 512);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 513);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 514);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 516);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 1022);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 1024);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 1025);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 1026);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 1028);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 2044);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 2048);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 2049);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 2050);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 2052);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 4088);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 4096);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 4097);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 4098);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 4100);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 4104);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 0x7FFC);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 0x8000);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 0x10000);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 0x12345678);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 0x12350078);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, -256);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, -511);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, -513);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, -1022);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, -1026);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, -2044);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, -2052);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, -4096);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, -4104);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, -32768);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 0xABCDEF00);
-  __ LoadFpuFromOffset(mips64::kLoadQuadword, mips64::F0, mips64::A0, 0x7FFFABCD);
-
-  const char* expected =
-      "lwc1 $f0, 0($a0)\n"
-      "lwc1 $f0, 4($a0)\n"
-      "lwc1 $f0, 256($a0)\n"
-      "lwc1 $f0, 0x7FFC($a0)\n"
-      "daddiu $at, $a0, 32760 # 0x7FF8\n"
-      "lwc1 $f0, 8($at)\n"
-      "daddiu $at, $a0, 32760 # 0x7FF8\n"
-      "lwc1 $f0, 12($at)\n"
-      "daui $at, $a0, 1\n"
-      "lwc1 $f0, 0($at)\n"
-      "daui $at, $a0, 4660 # 0x1234\n"
-      "lwc1 $f0, 22136($at) # 0x5678\n"
-      "lwc1 $f0, -256($a0)\n"
-      "lwc1 $f0, -32768($a0)\n"
-      "daui $at, $a0, 0xABCE\n"
-      "lwc1 $f0, -0x1100($at) # 0xEF00\n"
-
-      "ldc1 $f0, 0($a0)\n"
-      "lwc1 $f0, 4($a0)\n"
-      "lw $t3, 8($a0)\n"
-      "mthc1 $t3, $f0\n"
-      "ldc1 $f0, 256($a0)\n"
-      "daddiu $at, $a0, 32760 # 0x7FF8\n"
-      "lwc1 $f0, 4($at)\n"
-      "lw $t3, 8($at)\n"
-      "mthc1 $t3, $f0\n"
-      "daddiu $at, $a0, 32760 # 0x7FF8\n"
-      "ldc1 $f0, 8($at)\n"
-      "daddiu $at, $a0, 32760 # 0x7FF8\n"
-      "lwc1 $f0, 12($at)\n"
-      "lw $t3, 16($at)\n"
-      "mthc1 $t3, $f0\n"
-      "daui $at, $a0, 1\n"
-      "ldc1 $f0, 0($at)\n"
-      "daui $at, $a0, 4660 # 0x1234\n"
-      "ldc1 $f0, 22136($at) # 0x5678\n"
-      "ldc1 $f0, -256($a0)\n"
-      "ldc1 $f0, -32768($a0)\n"
-      "daui $at, $a0, 0xABCE\n"
-      "ldc1 $f0, -0x1100($at) # 0xEF00\n"
-
-      "ld.d $w0, 0($a0)\n"
-      "ld.b $w0, 1($a0)\n"
-      "ld.h $w0, 2($a0)\n"
-      "ld.w $w0, 4($a0)\n"
-      "ld.d $w0, 8($a0)\n"
-      "ld.b $w0, 511($a0)\n"
-      "ld.d $w0, 512($a0)\n"
-      "daddiu $at, $a0, 513\n"
-      "ld.b $w0, 0($at)\n"
-      "ld.h $w0, 514($a0)\n"
-      "ld.w $w0, 516($a0)\n"
-      "ld.h $w0, 1022($a0)\n"
-      "ld.d $w0, 1024($a0)\n"
-      "daddiu $at, $a0, 1025\n"
-      "ld.b $w0, 0($at)\n"
-      "daddiu $at, $a0, 1026\n"
-      "ld.h $w0, 0($at)\n"
-      "ld.w $w0, 1028($a0)\n"
-      "ld.w $w0, 2044($a0)\n"
-      "ld.d $w0, 2048($a0)\n"
-      "daddiu $at, $a0, 2049\n"
-      "ld.b $w0, 0($at)\n"
-      "daddiu $at, $a0, 2050\n"
-      "ld.h $w0, 0($at)\n"
-      "daddiu $at, $a0, 2052\n"
-      "ld.w $w0, 0($at)\n"
-      "ld.d $w0, 4088($a0)\n"
-      "daddiu $at, $a0, 4096\n"
-      "ld.d $w0, 0($at)\n"
-      "daddiu $at, $a0, 4097\n"
-      "ld.b $w0, 0($at)\n"
-      "daddiu $at, $a0, 4098\n"
-      "ld.h $w0, 0($at)\n"
-      "daddiu $at, $a0, 4100\n"
-      "ld.w $w0, 0($at)\n"
-      "daddiu $at, $a0, 4104\n"
-      "ld.d $w0, 0($at)\n"
-      "daddiu $at, $a0, 0x7FFC\n"
-      "ld.w $w0, 0($at)\n"
-      "daddiu $at, $a0, 0x7FF8\n"
-      "ld.d $w0, 8($at)\n"
-      "daui $at, $a0, 0x1\n"
-      "ld.d $w0, 0($at)\n"
-      "daui $at, $a0, 0x1234\n"
-      "daddiu $at, $at, 0x6000\n"
-      "ld.d $w0, -2440($at) # 0xF678\n"
-      "daui $at, $a0, 0x1235\n"
-      "ld.d $w0, 0x78($at)\n"
-      "ld.d $w0, -256($a0)\n"
-      "ld.b $w0, -511($a0)\n"
-      "daddiu $at, $a0, -513\n"
-      "ld.b $w0, 0($at)\n"
-      "ld.h $w0, -1022($a0)\n"
-      "daddiu $at, $a0, -1026\n"
-      "ld.h $w0, 0($at)\n"
-      "ld.w $w0, -2044($a0)\n"
-      "daddiu $at, $a0, -2052\n"
-      "ld.w $w0, 0($at)\n"
-      "ld.d $w0, -4096($a0)\n"
-      "daddiu $at, $a0, -4104\n"
-      "ld.d $w0, 0($at)\n"
-      "daddiu $at, $a0, -32768\n"
-      "ld.d $w0, 0($at)\n"
-      "daui $at, $a0, 0xABCE\n"
-      "daddiu $at, $at, -8192 # 0xE000\n"
-      "ld.d $w0, 0xF00($at)\n"
-      "daui $at, $a0, 0x8000\n"
-      "dahi $at, $at, 1\n"
-      "daddiu $at, $at, -21504 # 0xAC00\n"
-      "ld.b $w0, -51($at) # 0xFFCD\n";
-  DriverStr(expected, "LoadFpuFromOffset");
-}
-
-TEST_F(AssemblerMIPS64Test, StoreToOffset) {
-  __ StoreToOffset(mips64::kStoreByte, mips64::A0, mips64::A0, 0);
-  __ StoreToOffset(mips64::kStoreByte, mips64::A0, mips64::A1, 0);
-  __ StoreToOffset(mips64::kStoreByte, mips64::A0, mips64::A1, 1);
-  __ StoreToOffset(mips64::kStoreByte, mips64::A0, mips64::A1, 256);
-  __ StoreToOffset(mips64::kStoreByte, mips64::A0, mips64::A1, 1000);
-  __ StoreToOffset(mips64::kStoreByte, mips64::A0, mips64::A1, 0x7FFF);
-  __ StoreToOffset(mips64::kStoreByte, mips64::A0, mips64::A1, 0x8000);
-  __ StoreToOffset(mips64::kStoreByte, mips64::A0, mips64::A1, 0x8001);
-  __ StoreToOffset(mips64::kStoreByte, mips64::A0, mips64::A1, 0x10000);
-  __ StoreToOffset(mips64::kStoreByte, mips64::A0, mips64::A1, 0x12345678);
-  __ StoreToOffset(mips64::kStoreByte, mips64::A0, mips64::A1, -256);
-  __ StoreToOffset(mips64::kStoreByte, mips64::A0, mips64::A1, -32768);
-  __ StoreToOffset(mips64::kStoreByte, mips64::A0, mips64::A1, 0xABCDEF00);
-
-  __ StoreToOffset(mips64::kStoreHalfword, mips64::A0, mips64::A0, 0);
-  __ StoreToOffset(mips64::kStoreHalfword, mips64::A0, mips64::A1, 0);
-  __ StoreToOffset(mips64::kStoreHalfword, mips64::A0, mips64::A1, 2);
-  __ StoreToOffset(mips64::kStoreHalfword, mips64::A0, mips64::A1, 256);
-  __ StoreToOffset(mips64::kStoreHalfword, mips64::A0, mips64::A1, 1000);
-  __ StoreToOffset(mips64::kStoreHalfword, mips64::A0, mips64::A1, 0x7FFE);
-  __ StoreToOffset(mips64::kStoreHalfword, mips64::A0, mips64::A1, 0x8000);
-  __ StoreToOffset(mips64::kStoreHalfword, mips64::A0, mips64::A1, 0x8002);
-  __ StoreToOffset(mips64::kStoreHalfword, mips64::A0, mips64::A1, 0x10000);
-  __ StoreToOffset(mips64::kStoreHalfword, mips64::A0, mips64::A1, 0x12345678);
-  __ StoreToOffset(mips64::kStoreHalfword, mips64::A0, mips64::A1, -256);
-  __ StoreToOffset(mips64::kStoreHalfword, mips64::A0, mips64::A1, -32768);
-  __ StoreToOffset(mips64::kStoreHalfword, mips64::A0, mips64::A1, 0xABCDEF00);
-
-  __ StoreToOffset(mips64::kStoreWord, mips64::A0, mips64::A0, 0);
-  __ StoreToOffset(mips64::kStoreWord, mips64::A0, mips64::A1, 0);
-  __ StoreToOffset(mips64::kStoreWord, mips64::A0, mips64::A1, 4);
-  __ StoreToOffset(mips64::kStoreWord, mips64::A0, mips64::A1, 256);
-  __ StoreToOffset(mips64::kStoreWord, mips64::A0, mips64::A1, 1000);
-  __ StoreToOffset(mips64::kStoreWord, mips64::A0, mips64::A1, 0x7FFC);
-  __ StoreToOffset(mips64::kStoreWord, mips64::A0, mips64::A1, 0x8000);
-  __ StoreToOffset(mips64::kStoreWord, mips64::A0, mips64::A1, 0x8004);
-  __ StoreToOffset(mips64::kStoreWord, mips64::A0, mips64::A1, 0x10000);
-  __ StoreToOffset(mips64::kStoreWord, mips64::A0, mips64::A1, 0x12345678);
-  __ StoreToOffset(mips64::kStoreWord, mips64::A0, mips64::A1, -256);
-  __ StoreToOffset(mips64::kStoreWord, mips64::A0, mips64::A1, -32768);
-  __ StoreToOffset(mips64::kStoreWord, mips64::A0, mips64::A1, 0xABCDEF00);
-
-  __ StoreToOffset(mips64::kStoreDoubleword, mips64::A0, mips64::A0, 0);
-  __ StoreToOffset(mips64::kStoreDoubleword, mips64::A0, mips64::A1, 0);
-  __ StoreToOffset(mips64::kStoreDoubleword, mips64::A0, mips64::A1, 4);
-  __ StoreToOffset(mips64::kStoreDoubleword, mips64::A0, mips64::A1, 256);
-  __ StoreToOffset(mips64::kStoreDoubleword, mips64::A0, mips64::A1, 1000);
-  __ StoreToOffset(mips64::kStoreDoubleword, mips64::A0, mips64::A1, 0x7FFC);
-  __ StoreToOffset(mips64::kStoreDoubleword, mips64::A0, mips64::A1, 0x8000);
-  __ StoreToOffset(mips64::kStoreDoubleword, mips64::A0, mips64::A1, 0x8004);
-  __ StoreToOffset(mips64::kStoreDoubleword, mips64::A0, mips64::A1, 0x10000);
-  __ StoreToOffset(mips64::kStoreDoubleword, mips64::A0, mips64::A1, 0x12345678);
-  __ StoreToOffset(mips64::kStoreDoubleword, mips64::A0, mips64::A1, -256);
-  __ StoreToOffset(mips64::kStoreDoubleword, mips64::A0, mips64::A1, -32768);
-  __ StoreToOffset(mips64::kStoreDoubleword, mips64::A0, mips64::A1, 0xABCDEF00);
-  __ StoreToOffset(mips64::kStoreDoubleword, mips64::A0, mips64::A1, 0x7FFFFFF8);
-  __ StoreToOffset(mips64::kStoreDoubleword, mips64::A0, mips64::A1, 0x7FFFFFFC);
-  __ StoreToOffset(mips64::kStoreDoubleword, mips64::A0, mips64::A1, 0x80000000);
-  __ StoreToOffset(mips64::kStoreDoubleword, mips64::A0, mips64::A1, 0x80000004);
-
-  const char* expected =
-      "sb $a0, 0($a0)\n"
-      "sb $a0, 0($a1)\n"
-      "sb $a0, 1($a1)\n"
-      "sb $a0, 256($a1)\n"
-      "sb $a0, 1000($a1)\n"
-      "sb $a0, 0x7FFF($a1)\n"
-      "daddiu $at, $a1, 0x7FF8\n"
-      "sb $a0, 8($at)\n"
-      "daddiu $at, $a1, 0x7FF8\n"
-      "sb $a0, 9($at)\n"
-      "daui $at, $a1, 1\n"
-      "sb $a0, 0($at)\n"
-      "daui $at, $a1, 4660 # 0x1234\n"
-      "sb $a0, 22136($at) # 0x5678\n"
-      "sb $a0, -256($a1)\n"
-      "sb $a0, -32768($a1)\n"
-      "daui $at, $a1, 43982 # 0xABCE\n"
-      "sb $a0, -4352($at) # 0xEF00\n"
-
-      "sh $a0, 0($a0)\n"
-      "sh $a0, 0($a1)\n"
-      "sh $a0, 2($a1)\n"
-      "sh $a0, 256($a1)\n"
-      "sh $a0, 1000($a1)\n"
-      "sh $a0, 0x7FFE($a1)\n"
-      "daddiu $at, $a1, 0x7FF8\n"
-      "sh $a0, 8($at)\n"
-      "daddiu $at, $a1, 0x7FF8\n"
-      "sh $a0, 10($at)\n"
-      "daui $at, $a1, 1\n"
-      "sh $a0, 0($at)\n"
-      "daui $at, $a1, 4660 # 0x1234\n"
-      "sh $a0, 22136($at) # 0x5678\n"
-      "sh $a0, -256($a1)\n"
-      "sh $a0, -32768($a1)\n"
-      "daui $at, $a1, 43982 # 0xABCE\n"
-      "sh $a0, -4352($at) # 0xEF00\n"
-
-      "sw $a0, 0($a0)\n"
-      "sw $a0, 0($a1)\n"
-      "sw $a0, 4($a1)\n"
-      "sw $a0, 256($a1)\n"
-      "sw $a0, 1000($a1)\n"
-      "sw $a0, 0x7FFC($a1)\n"
-      "daddiu $at, $a1, 0x7FF8\n"
-      "sw $a0, 8($at)\n"
-      "daddiu $at, $a1, 0x7FF8\n"
-      "sw $a0, 12($at)\n"
-      "daui $at, $a1, 1\n"
-      "sw $a0, 0($at)\n"
-      "daui $at, $a1, 4660 # 0x1234\n"
-      "sw $a0, 22136($at) # 0x5678\n"
-      "sw $a0, -256($a1)\n"
-      "sw $a0, -32768($a1)\n"
-      "daui $at, $a1, 43982 # 0xABCE\n"
-      "sw $a0, -4352($at) # 0xEF00\n"
-
-      "sd $a0, 0($a0)\n"
-      "sd $a0, 0($a1)\n"
-      "sw $a0, 4($a1)\n"
-      "dsrl32 $t3, $a0, 0\n"
-      "sw $t3, 8($a1)\n"
-      "sd $a0, 256($a1)\n"
-      "sd $a0, 1000($a1)\n"
-      "daddiu $at, $a1, 0x7FF8\n"
-      "sw $a0, 4($at)\n"
-      "dsrl32 $t3, $a0, 0\n"
-      "sw $t3, 8($at)\n"
-      "daddiu $at, $a1, 32760 # 0x7FF8\n"
-      "sd $a0, 8($at)\n"
-      "daddiu $at, $a1, 32760 # 0x7FF8\n"
-      "sw $a0, 12($at)\n"
-      "dsrl32 $t3, $a0, 0\n"
-      "sw $t3, 16($at)\n"
-      "daui $at, $a1, 1\n"
-      "sd $a0, 0($at)\n"
-      "daui $at, $a1, 4660 # 0x1234\n"
-      "sd $a0, 22136($at) # 0x5678\n"
-      "sd $a0, -256($a1)\n"
-      "sd $a0, -32768($a1)\n"
-      "daui $at, $a1, 0xABCE\n"
-      "sd $a0, -0x1100($at)\n"
-      "daui $at, $a1, 0x8000\n"
-      "dahi $at, $at, 1\n"
-      "sd $a0, -8($at)\n"
-      "daui $at, $a1, 0x8000\n"
-      "dahi $at, $at, 1\n"
-      "sw $a0, -4($at) # 0xFFFC\n"
-      "dsrl32 $t3, $a0, 0\n"
-      "sw $t3, 0($at) # 0x0\n"
-      "daui $at, $a1, 0x8000\n"
-      "sd $a0, 0($at) # 0x0\n"
-      "daui $at, $a1, 0x8000\n"
-      "sw $a0, 4($at) # 0x4\n"
-      "dsrl32 $t3, $a0, 0\n"
-      "sw $t3, 8($at) # 0x8\n";
-  DriverStr(expected, "StoreToOffset");
-}
-
-TEST_F(AssemblerMIPS64Test, StoreFpuToOffset) {
-  __ StoreFpuToOffset(mips64::kStoreWord, mips64::F0, mips64::A0, 0);
-  __ StoreFpuToOffset(mips64::kStoreWord, mips64::F0, mips64::A0, 4);
-  __ StoreFpuToOffset(mips64::kStoreWord, mips64::F0, mips64::A0, 256);
-  __ StoreFpuToOffset(mips64::kStoreWord, mips64::F0, mips64::A0, 0x7FFC);
-  __ StoreFpuToOffset(mips64::kStoreWord, mips64::F0, mips64::A0, 0x8000);
-  __ StoreFpuToOffset(mips64::kStoreWord, mips64::F0, mips64::A0, 0x8004);
-  __ StoreFpuToOffset(mips64::kStoreWord, mips64::F0, mips64::A0, 0x10000);
-  __ StoreFpuToOffset(mips64::kStoreWord, mips64::F0, mips64::A0, 0x12345678);
-  __ StoreFpuToOffset(mips64::kStoreWord, mips64::F0, mips64::A0, -256);
-  __ StoreFpuToOffset(mips64::kStoreWord, mips64::F0, mips64::A0, -32768);
-  __ StoreFpuToOffset(mips64::kStoreWord, mips64::F0, mips64::A0, 0xABCDEF00);
-
-  __ StoreFpuToOffset(mips64::kStoreDoubleword, mips64::F0, mips64::A0, 0);
-  __ StoreFpuToOffset(mips64::kStoreDoubleword, mips64::F0, mips64::A0, 4);
-  __ StoreFpuToOffset(mips64::kStoreDoubleword, mips64::F0, mips64::A0, 256);
-  __ StoreFpuToOffset(mips64::kStoreDoubleword, mips64::F0, mips64::A0, 0x7FFC);
-  __ StoreFpuToOffset(mips64::kStoreDoubleword, mips64::F0, mips64::A0, 0x8000);
-  __ StoreFpuToOffset(mips64::kStoreDoubleword, mips64::F0, mips64::A0, 0x8004);
-  __ StoreFpuToOffset(mips64::kStoreDoubleword, mips64::F0, mips64::A0, 0x10000);
-  __ StoreFpuToOffset(mips64::kStoreDoubleword, mips64::F0, mips64::A0, 0x12345678);
-  __ StoreFpuToOffset(mips64::kStoreDoubleword, mips64::F0, mips64::A0, -256);
-  __ StoreFpuToOffset(mips64::kStoreDoubleword, mips64::F0, mips64::A0, -32768);
-  __ StoreFpuToOffset(mips64::kStoreDoubleword, mips64::F0, mips64::A0, 0xABCDEF00);
-
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 0);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 1);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 2);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 4);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 8);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 511);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 512);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 513);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 514);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 516);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 1022);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 1024);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 1025);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 1026);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 1028);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 2044);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 2048);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 2049);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 2050);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 2052);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 4088);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 4096);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 4097);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 4098);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 4100);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 4104);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 0x7FFC);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 0x8000);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 0x10000);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 0x12345678);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 0x12350078);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, -256);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, -511);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, -513);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, -1022);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, -1026);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, -2044);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, -2052);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, -4096);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, -4104);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, -32768);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 0xABCDEF00);
-  __ StoreFpuToOffset(mips64::kStoreQuadword, mips64::F0, mips64::A0, 0x7FFFABCD);
-
-  const char* expected =
-      "swc1 $f0, 0($a0)\n"
-      "swc1 $f0, 4($a0)\n"
-      "swc1 $f0, 256($a0)\n"
-      "swc1 $f0, 0x7FFC($a0)\n"
-      "daddiu $at, $a0, 32760 # 0x7FF8\n"
-      "swc1 $f0, 8($at)\n"
-      "daddiu $at, $a0, 32760 # 0x7FF8\n"
-      "swc1 $f0, 12($at)\n"
-      "daui $at, $a0, 1\n"
-      "swc1 $f0, 0($at)\n"
-      "daui $at, $a0, 4660 # 0x1234\n"
-      "swc1 $f0, 22136($at) # 0x5678\n"
-      "swc1 $f0, -256($a0)\n"
-      "swc1 $f0, -32768($a0)\n"
-      "daui $at, $a0, 0xABCE\n"
-      "swc1 $f0, -0x1100($at)\n"
-
-      "sdc1 $f0, 0($a0)\n"
-      "mfhc1 $t3, $f0\n"
-      "swc1 $f0, 4($a0)\n"
-      "sw $t3, 8($a0)\n"
-      "sdc1 $f0, 256($a0)\n"
-      "daddiu $at, $a0, 32760 # 0x7FF8\n"
-      "mfhc1 $t3, $f0\n"
-      "swc1 $f0, 4($at)\n"
-      "sw $t3, 8($at)\n"
-      "daddiu $at, $a0, 32760 # 0x7FF8\n"
-      "sdc1 $f0, 8($at)\n"
-      "daddiu $at, $a0, 32760 # 0x7FF8\n"
-      "mfhc1 $t3, $f0\n"
-      "swc1 $f0, 12($at)\n"
-      "sw $t3, 16($at)\n"
-      "daui $at, $a0, 1\n"
-      "sdc1 $f0, 0($at)\n"
-      "daui $at, $a0, 4660 # 0x1234\n"
-      "sdc1 $f0, 22136($at) # 0x5678\n"
-      "sdc1 $f0, -256($a0)\n"
-      "sdc1 $f0, -32768($a0)\n"
-      "daui $at, $a0, 0xABCE\n"
-      "sdc1 $f0, -0x1100($at)\n"
-
-      "st.d $w0, 0($a0)\n"
-      "st.b $w0, 1($a0)\n"
-      "st.h $w0, 2($a0)\n"
-      "st.w $w0, 4($a0)\n"
-      "st.d $w0, 8($a0)\n"
-      "st.b $w0, 511($a0)\n"
-      "st.d $w0, 512($a0)\n"
-      "daddiu $at, $a0, 513\n"
-      "st.b $w0, 0($at)\n"
-      "st.h $w0, 514($a0)\n"
-      "st.w $w0, 516($a0)\n"
-      "st.h $w0, 1022($a0)\n"
-      "st.d $w0, 1024($a0)\n"
-      "daddiu $at, $a0, 1025\n"
-      "st.b $w0, 0($at)\n"
-      "daddiu $at, $a0, 1026\n"
-      "st.h $w0, 0($at)\n"
-      "st.w $w0, 1028($a0)\n"
-      "st.w $w0, 2044($a0)\n"
-      "st.d $w0, 2048($a0)\n"
-      "daddiu $at, $a0, 2049\n"
-      "st.b $w0, 0($at)\n"
-      "daddiu $at, $a0, 2050\n"
-      "st.h $w0, 0($at)\n"
-      "daddiu $at, $a0, 2052\n"
-      "st.w $w0, 0($at)\n"
-      "st.d $w0, 4088($a0)\n"
-      "daddiu $at, $a0, 4096\n"
-      "st.d $w0, 0($at)\n"
-      "daddiu $at, $a0, 4097\n"
-      "st.b $w0, 0($at)\n"
-      "daddiu $at, $a0, 4098\n"
-      "st.h $w0, 0($at)\n"
-      "daddiu $at, $a0, 4100\n"
-      "st.w $w0, 0($at)\n"
-      "daddiu $at, $a0, 4104\n"
-      "st.d $w0, 0($at)\n"
-      "daddiu $at, $a0, 0x7FFC\n"
-      "st.w $w0, 0($at)\n"
-      "daddiu $at, $a0, 0x7FF8\n"
-      "st.d $w0, 8($at)\n"
-      "daui $at, $a0, 0x1\n"
-      "st.d $w0, 0($at)\n"
-      "daui $at, $a0, 0x1234\n"
-      "daddiu $at, $at, 0x6000\n"
-      "st.d $w0, -2440($at) # 0xF678\n"
-      "daui $at, $a0, 0x1235\n"
-      "st.d $w0, 0x78($at)\n"
-      "st.d $w0, -256($a0)\n"
-      "st.b $w0, -511($a0)\n"
-      "daddiu $at, $a0, -513\n"
-      "st.b $w0, 0($at)\n"
-      "st.h $w0, -1022($a0)\n"
-      "daddiu $at, $a0, -1026\n"
-      "st.h $w0, 0($at)\n"
-      "st.w $w0, -2044($a0)\n"
-      "daddiu $at, $a0, -2052\n"
-      "st.w $w0, 0($at)\n"
-      "st.d $w0, -4096($a0)\n"
-      "daddiu $at, $a0, -4104\n"
-      "st.d $w0, 0($at)\n"
-      "daddiu $at, $a0, -32768\n"
-      "st.d $w0, 0($at)\n"
-      "daui $at, $a0, 0xABCE\n"
-      "daddiu $at, $at, -8192 # 0xE000\n"
-      "st.d $w0, 0xF00($at)\n"
-      "daui $at, $a0, 0x8000\n"
-      "dahi $at, $at, 1\n"
-      "daddiu $at, $at, -21504 # 0xAC00\n"
-      "st.b $w0, -51($at) # 0xFFCD\n";
-  DriverStr(expected, "StoreFpuToOffset");
-}
-
-TEST_F(AssemblerMIPS64Test, StoreConstToOffset) {
-  __ StoreConstToOffset(mips64::kStoreByte, 0xFF, mips64::A1, +0, mips64::T8);
-  __ StoreConstToOffset(mips64::kStoreHalfword, 0xFFFF, mips64::A1, +0, mips64::T8);
-  __ StoreConstToOffset(mips64::kStoreWord, 0x12345678, mips64::A1, +0, mips64::T8);
-  __ StoreConstToOffset(mips64::kStoreDoubleword, 0x123456789ABCDEF0, mips64::A1, +0, mips64::T8);
-
-  __ StoreConstToOffset(mips64::kStoreByte, 0, mips64::A1, +0, mips64::T8);
-  __ StoreConstToOffset(mips64::kStoreHalfword, 0, mips64::A1, +0, mips64::T8);
-  __ StoreConstToOffset(mips64::kStoreWord, 0, mips64::A1, +0, mips64::T8);
-  __ StoreConstToOffset(mips64::kStoreDoubleword, 0, mips64::A1, +0, mips64::T8);
-
-  __ StoreConstToOffset(mips64::kStoreDoubleword, 0x1234567812345678, mips64::A1, +0, mips64::T8);
-  __ StoreConstToOffset(mips64::kStoreDoubleword, 0x1234567800000000, mips64::A1, +0, mips64::T8);
-  __ StoreConstToOffset(mips64::kStoreDoubleword, 0x0000000012345678, mips64::A1, +0, mips64::T8);
-
-  __ StoreConstToOffset(mips64::kStoreWord, 0, mips64::T8, +0, mips64::T8);
-  __ StoreConstToOffset(mips64::kStoreWord, 0x12345678, mips64::T8, +0, mips64::T8);
-
-  __ StoreConstToOffset(mips64::kStoreWord, 0, mips64::A1, -0xFFF0, mips64::T8);
-  __ StoreConstToOffset(mips64::kStoreWord, 0x12345678, mips64::A1, +0xFFF0, mips64::T8);
-
-  __ StoreConstToOffset(mips64::kStoreWord, 0, mips64::T8, -0xFFF0, mips64::T8);
-  __ StoreConstToOffset(mips64::kStoreWord, 0x12345678, mips64::T8, +0xFFF0, mips64::T8);
-
-  const char* expected =
-      "ori $t8, $zero, 0xFF\n"
-      "sb $t8, 0($a1)\n"
-      "ori $t8, $zero, 0xFFFF\n"
-      "sh $t8, 0($a1)\n"
-      "lui $t8, 0x1234\n"
-      "ori $t8, $t8,0x5678\n"
-      "sw $t8, 0($a1)\n"
-      "lui $t8, 0x9abc\n"
-      "ori $t8, $t8,0xdef0\n"
-      "dahi $t8, $t8, 0x5679\n"
-      "dati $t8, $t8, 0x1234\n"
-      "sd $t8, 0($a1)\n"
-      "sb $zero, 0($a1)\n"
-      "sh $zero, 0($a1)\n"
-      "sw $zero, 0($a1)\n"
-      "sd $zero, 0($a1)\n"
-      "lui $t8, 0x1234\n"
-      "ori $t8, $t8,0x5678\n"
-      "dins $t8, $t8, 0x20, 0x20\n"
-      "sd $t8, 0($a1)\n"
-      "lui $t8, 0x246\n"
-      "ori $t8, $t8, 0x8acf\n"
-      "dsll32 $t8, $t8, 0x3\n"
-      "sd $t8, 0($a1)\n"
-      "lui $t8, 0x1234\n"
-      "ori $t8, $t8, 0x5678\n"
-      "sd $t8, 0($a1)\n"
-      "sw $zero, 0($t8)\n"
-      "lui $at,0x1234\n"
-      "ori $at, $at, 0x5678\n"
-      "sw  $at, 0($t8)\n"
-      "daddiu $at, $a1, -32760 # 0x8008\n"
-      "sw $zero, -32760($at) # 0x8008\n"
-      "daddiu $at, $a1, 32760 # 0x7FF8\n"
-      "lui $t8, 4660 # 0x1234\n"
-      "ori $t8, $t8, 22136 # 0x5678\n"
-      "sw $t8, 32760($at) # 0x7FF8\n"
-      "daddiu $at, $t8, -32760 # 0x8008\n"
-      "sw $zero, -32760($at) # 0x8008\n"
-      "daddiu $at, $t8, 32760 # 0x7FF8\n"
-      "lui $t8, 4660 # 0x1234\n"
-      "ori $t8, $t8, 22136 # 0x5678\n"
-      "sw $t8, 32760($at) # 0x7FF8\n";
-  DriverStr(expected, "StoreConstToOffset");
-}
-//////////////////////////////
-// Loading/adding Constants //
-//////////////////////////////
-
-TEST_F(AssemblerMIPS64Test, LoadConst32) {
-  // IsUint<16>(value)
-  __ LoadConst32(mips64::V0, 0);
-  __ LoadConst32(mips64::V0, 65535);
-  // IsInt<16>(value)
-  __ LoadConst32(mips64::V0, -1);
-  __ LoadConst32(mips64::V0, -32768);
-  // Everything else
-  __ LoadConst32(mips64::V0, 65536);
-  __ LoadConst32(mips64::V0, 65537);
-  __ LoadConst32(mips64::V0, 2147483647);
-  __ LoadConst32(mips64::V0, -32769);
-  __ LoadConst32(mips64::V0, -65536);
-  __ LoadConst32(mips64::V0, -65537);
-  __ LoadConst32(mips64::V0, -2147483647);
-  __ LoadConst32(mips64::V0, -2147483648);
-
-  const char* expected =
-      // IsUint<16>(value)
-      "ori $v0, $zero, 0\n"         // __ LoadConst32(mips64::V0, 0);
-      "ori $v0, $zero, 65535\n"     // __ LoadConst32(mips64::V0, 65535);
-      // IsInt<16>(value)
-      "addiu $v0, $zero, -1\n"      // __ LoadConst32(mips64::V0, -1);
-      "addiu $v0, $zero, -32768\n"  // __ LoadConst32(mips64::V0, -32768);
-      // Everything else
-      "lui $v0, 1\n"                // __ LoadConst32(mips64::V0, 65536);
-      "lui $v0, 1\n"                // __ LoadConst32(mips64::V0, 65537);
-      "ori $v0, 1\n"                //                 "
-      "lui $v0, 32767\n"            // __ LoadConst32(mips64::V0, 2147483647);
-      "ori $v0, 65535\n"            //                 "
-      "lui $v0, 65535\n"            // __ LoadConst32(mips64::V0, -32769);
-      "ori $v0, 32767\n"            //                 "
-      "lui $v0, 65535\n"            // __ LoadConst32(mips64::V0, -65536);
-      "lui $v0, 65534\n"            // __ LoadConst32(mips64::V0, -65537);
-      "ori $v0, 65535\n"            //                 "
-      "lui $v0, 32768\n"            // __ LoadConst32(mips64::V0, -2147483647);
-      "ori $v0, 1\n"                //                 "
-      "lui $v0, 32768\n";           // __ LoadConst32(mips64::V0, -2147483648);
-  DriverStr(expected, "LoadConst32");
-}
-
-TEST_F(AssemblerMIPS64Test, Addiu32) {
-  __ Addiu32(mips64::A1, mips64::A2, -0x8000);
-  __ Addiu32(mips64::A1, mips64::A2, +0);
-  __ Addiu32(mips64::A1, mips64::A2, +0x7FFF);
-  __ Addiu32(mips64::A1, mips64::A2, -0x8001);
-  __ Addiu32(mips64::A1, mips64::A2, +0x8000);
-  __ Addiu32(mips64::A1, mips64::A2, -0x10000);
-  __ Addiu32(mips64::A1, mips64::A2, +0x10000);
-  __ Addiu32(mips64::A1, mips64::A2, +0x12345678);
-
-  const char* expected =
-      "addiu $a1, $a2, -0x8000\n"
-      "addiu $a1, $a2, 0\n"
-      "addiu $a1, $a2, 0x7FFF\n"
-      "aui $a1, $a2, 0xFFFF\n"
-      "addiu $a1, $a1, 0x7FFF\n"
-      "aui $a1, $a2, 1\n"
-      "addiu $a1, $a1, -0x8000\n"
-      "aui $a1, $a2, 0xFFFF\n"
-      "aui $a1, $a2, 1\n"
-      "aui $a1, $a2, 0x1234\n"
-      "addiu $a1, $a1, 0x5678\n";
-  DriverStr(expected, "Addiu32");
-}
-
-static uint64_t SignExtend16To64(uint16_t n) {
-  return static_cast<int16_t>(n);
-}
-
-// The art::mips64::Mips64Assembler::LoadConst64() method uses a template
-// to minimize the number of instructions needed to load a 64-bit constant
-// value into a register. The template calls various methods which emit
-// MIPS machine instructions. This struct (class) uses the same template
-// but overrides the definitions of the methods which emit MIPS instructions
-// to use methods which simulate the operation of the corresponding MIPS
-// instructions. After invoking LoadConst64() the target register should
-// contain the same 64-bit value as was input to LoadConst64(). If the
-// simulated register doesn't contain the correct value then there is probably
-// an error in the template function.
-struct LoadConst64Tester {
-  LoadConst64Tester() {
-    // Initialize all of the registers for simulation to zero.
-    for (int r = 0; r < 32; r++) {
-      regs_[r] = 0;
-    }
-    // Clear all of the path flags.
-    loadconst64_paths_ = art::mips64::kLoadConst64PathZero;
-  }
-  void Addiu(mips64::GpuRegister rd, mips64::GpuRegister rs, uint16_t c) {
-    regs_[rd] = static_cast<int32_t>(regs_[rs] + SignExtend16To64(c));
-  }
-  void Daddiu(mips64::GpuRegister rd, mips64::GpuRegister rs, uint16_t c) {
-    regs_[rd] = regs_[rs] + SignExtend16To64(c);
-  }
-  void Dahi(mips64::GpuRegister rd, uint16_t c) {
-    regs_[rd] += SignExtend16To64(c) << 32;
-  }
-  void Dati(mips64::GpuRegister rd, uint16_t c) {
-    regs_[rd] += SignExtend16To64(c) << 48;
-  }
-  void Dinsu(mips64::GpuRegister rt, mips64::GpuRegister rs, int pos, int size) {
-    CHECK(IsUint<5>(pos - 32)) << pos;
-    CHECK(IsUint<5>(size - 1)) << size;
-    CHECK(IsUint<5>(pos + size - 33)) << pos << " + " << size;
-    uint64_t src_mask = (UINT64_C(1) << size) - 1;
-    uint64_t dsk_mask = ~(src_mask << pos);
-
-    regs_[rt] = (regs_[rt] & dsk_mask) | ((regs_[rs] & src_mask) << pos);
-  }
-  void Dsll(mips64::GpuRegister rd, mips64::GpuRegister rt, int shamt) {
-    regs_[rd] = regs_[rt] << (shamt & 0x1f);
-  }
-  void Dsll32(mips64::GpuRegister rd, mips64::GpuRegister rt, int shamt) {
-    regs_[rd] = regs_[rt] << (32 + (shamt & 0x1f));
-  }
-  void Dsrl(mips64::GpuRegister rd, mips64::GpuRegister rt, int shamt) {
-    regs_[rd] = regs_[rt] >> (shamt & 0x1f);
-  }
-  void Dsrl32(mips64::GpuRegister rd, mips64::GpuRegister rt, int shamt) {
-    regs_[rd] = regs_[rt] >> (32 + (shamt & 0x1f));
-  }
-  void Lui(mips64::GpuRegister rd, uint16_t c) {
-    regs_[rd] = SignExtend16To64(c) << 16;
-  }
-  void Ori(mips64::GpuRegister rd, mips64::GpuRegister rs, uint16_t c) {
-    regs_[rd] = regs_[rs] | c;
-  }
-  void LoadConst32(mips64::GpuRegister rd, int32_t c) {
-    CHECK_NE(rd, 0);
-    mips64::TemplateLoadConst32<LoadConst64Tester>(this, rd, c);
-    CHECK_EQ(regs_[rd], static_cast<uint64_t>(c));
-  }
-  void LoadConst64(mips64::GpuRegister rd, int64_t c) {
-    CHECK_NE(rd, 0);
-    mips64::TemplateLoadConst64<LoadConst64Tester>(this, rd, c);
-    CHECK_EQ(regs_[rd], static_cast<uint64_t>(c));
-  }
-  uint64_t regs_[32];
-
-  // Getter function for loadconst64_paths_.
-  int GetPathsCovered() {
-    return loadconst64_paths_;
-  }
-
-  void RecordLoadConst64Path(int value) {
-    loadconst64_paths_ |= value;
-  }
-
- private:
-  // This variable holds a bitmask to tell us which paths were taken
-  // through the template function which loads 64-bit values.
-  int loadconst64_paths_;
-};
-
-TEST_F(AssemblerMIPS64Test, LoadConst64) {
-  const uint16_t imms[] = {
-      0, 1, 2, 3, 4, 0x33, 0x66, 0x55, 0x99, 0xaa, 0xcc, 0xff, 0x5500, 0x5555,
-      0x7ffc, 0x7ffd, 0x7ffe, 0x7fff, 0x8000, 0x8001, 0x8002, 0x8003, 0x8004,
-      0xaaaa, 0xfffc, 0xfffd, 0xfffe, 0xffff
-  };
-  unsigned d0, d1, d2, d3;
-  LoadConst64Tester tester;
-
-  union {
-    int64_t v64;
-    uint16_t v16[4];
-  } u;
-
-  for (d3 = 0; d3 < sizeof imms / sizeof imms[0]; d3++) {
-    u.v16[3] = imms[d3];
-
-    for (d2 = 0; d2 < sizeof imms / sizeof imms[0]; d2++) {
-      u.v16[2] = imms[d2];
-
-      for (d1 = 0; d1 < sizeof imms / sizeof imms[0]; d1++) {
-        u.v16[1] = imms[d1];
-
-        for (d0 = 0; d0 < sizeof imms / sizeof imms[0]; d0++) {
-          u.v16[0] = imms[d0];
-
-          tester.LoadConst64(mips64::V0, u.v64);
-        }
-      }
-    }
-  }
-
-  // Verify that we tested all paths through the "load 64-bit value"
-  // function template.
-  EXPECT_EQ(tester.GetPathsCovered(), art::mips64::kLoadConst64PathAllPaths);
-}
-
-TEST_F(AssemblerMIPS64Test, LoadFarthestNearLabelAddress) {
-  mips64::Mips64Label label;
-  __ LoadLabelAddress(mips64::V0, &label);
-  constexpr uint32_t kAdduCount = 0x3FFDE;
-  for (uint32_t i = 0; i != kAdduCount; ++i) {
-    __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
-  }
-  __ Bind(&label);
-
-  std::string expected =
-      "lapc $v0, 1f\n" +
-      RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
-      "1:\n";
-  DriverStr(expected, "LoadFarthestNearLabelAddress");
-  EXPECT_EQ(__ GetLabelLocation(&label), (1 + kAdduCount) * 4);
-}
-
-TEST_F(AssemblerMIPS64Test, LoadNearestFarLabelAddress) {
-  mips64::Mips64Label label;
-  __ LoadLabelAddress(mips64::V0, &label);
-  constexpr uint32_t kAdduCount = 0x3FFDF;
-  for (uint32_t i = 0; i != kAdduCount; ++i) {
-    __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
-  }
-  __ Bind(&label);
-
-  std::string expected =
-      "1:\n"
-      "auipc $at, %hi(2f - 1b)\n"
-      "daddiu $v0, $at, %lo(2f - 1b)\n" +
-      RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
-      "2:\n";
-  DriverStr(expected, "LoadNearestFarLabelAddress");
-  EXPECT_EQ(__ GetLabelLocation(&label), (2 + kAdduCount) * 4);
-}
-
-TEST_F(AssemblerMIPS64Test, LoadFarthestNearLiteral) {
-  mips64::Literal* literal = __ NewLiteral<uint32_t>(0x12345678);
-  __ LoadLiteral(mips64::V0, mips64::kLoadWord, literal);
-  constexpr uint32_t kAdduCount = 0x3FFDE;
-  for (uint32_t i = 0; i != kAdduCount; ++i) {
-    __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
-  }
-
-  std::string expected =
-      "lwpc $v0, 1f\n" +
-      RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
-      "1:\n"
-      ".word 0x12345678\n";
-  DriverStr(expected, "LoadFarthestNearLiteral");
-  EXPECT_EQ(__ GetLabelLocation(literal->GetLabel()), (1 + kAdduCount) * 4);
-}
-
-TEST_F(AssemblerMIPS64Test, LoadNearestFarLiteral) {
-  mips64::Literal* literal = __ NewLiteral<uint32_t>(0x12345678);
-  __ LoadLiteral(mips64::V0, mips64::kLoadWord, literal);
-  constexpr uint32_t kAdduCount = 0x3FFDF;
-  for (uint32_t i = 0; i != kAdduCount; ++i) {
-    __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
-  }
-
-  std::string expected =
-      "1:\n"
-      "auipc $at, %hi(2f - 1b)\n"
-      "lw $v0, %lo(2f - 1b)($at)\n" +
-      RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
-      "2:\n"
-      ".word 0x12345678\n";
-  DriverStr(expected, "LoadNearestFarLiteral");
-  EXPECT_EQ(__ GetLabelLocation(literal->GetLabel()), (2 + kAdduCount) * 4);
-}
-
-TEST_F(AssemblerMIPS64Test, LoadFarthestNearLiteralUnsigned) {
-  mips64::Literal* literal = __ NewLiteral<uint32_t>(0x12345678);
-  __ LoadLiteral(mips64::V0, mips64::kLoadUnsignedWord, literal);
-  constexpr uint32_t kAdduCount = 0x3FFDE;
-  for (uint32_t i = 0; i != kAdduCount; ++i) {
-    __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
-  }
-
-  std::string expected =
-      "lwupc $v0, 1f\n" +
-      RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
-      "1:\n"
-      ".word 0x12345678\n";
-  DriverStr(expected, "LoadFarthestNearLiteralUnsigned");
-  EXPECT_EQ(__ GetLabelLocation(literal->GetLabel()), (1 + kAdduCount) * 4);
-}
-
-TEST_F(AssemblerMIPS64Test, LoadNearestFarLiteralUnsigned) {
-  mips64::Literal* literal = __ NewLiteral<uint32_t>(0x12345678);
-  __ LoadLiteral(mips64::V0, mips64::kLoadUnsignedWord, literal);
-  constexpr uint32_t kAdduCount = 0x3FFDF;
-  for (uint32_t i = 0; i != kAdduCount; ++i) {
-    __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
-  }
-
-  std::string expected =
-      "1:\n"
-      "auipc $at, %hi(2f - 1b)\n"
-      "lwu $v0, %lo(2f - 1b)($at)\n" +
-      RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
-      "2:\n"
-      ".word 0x12345678\n";
-  DriverStr(expected, "LoadNearestFarLiteralUnsigned");
-  EXPECT_EQ(__ GetLabelLocation(literal->GetLabel()), (2 + kAdduCount) * 4);
-}
-
-TEST_F(AssemblerMIPS64Test, LoadFarthestNearLiteralLong) {
-  mips64::Literal* literal = __ NewLiteral<uint64_t>(UINT64_C(0x0123456789ABCDEF));
-  __ LoadLiteral(mips64::V0, mips64::kLoadDoubleword, literal);
-  constexpr uint32_t kAdduCount = 0x3FFDD;
-  for (uint32_t i = 0; i != kAdduCount; ++i) {
-    __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
-  }
-
-  std::string expected =
-      "ldpc $v0, 1f\n" +
-      RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
-      "1:\n"
-      ".dword 0x0123456789ABCDEF\n";
-  DriverStr(expected, "LoadFarthestNearLiteralLong");
-  EXPECT_EQ(__ GetLabelLocation(literal->GetLabel()), (1 + kAdduCount) * 4);
-}
-
-TEST_F(AssemblerMIPS64Test, LoadNearestFarLiteralLong) {
-  mips64::Literal* literal = __ NewLiteral<uint64_t>(UINT64_C(0x0123456789ABCDEF));
-  __ LoadLiteral(mips64::V0, mips64::kLoadDoubleword, literal);
-  constexpr uint32_t kAdduCount = 0x3FFDE;
-  for (uint32_t i = 0; i != kAdduCount; ++i) {
-    __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
-  }
-
-  std::string expected =
-      "1:\n"
-      "auipc $at, %hi(2f - 1b)\n"
-      "ld $v0, %lo(2f - 1b)($at)\n" +
-      RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
-      "2:\n"
-      ".dword 0x0123456789ABCDEF\n";
-  DriverStr(expected, "LoadNearestFarLiteralLong");
-  EXPECT_EQ(__ GetLabelLocation(literal->GetLabel()), (2 + kAdduCount) * 4);
-}
-
-TEST_F(AssemblerMIPS64Test, LongLiteralAlignmentNop) {
-  mips64::Literal* literal1 = __ NewLiteral<uint64_t>(UINT64_C(0x0123456789ABCDEF));
-  mips64::Literal* literal2 = __ NewLiteral<uint64_t>(UINT64_C(0x5555555555555555));
-  mips64::Literal* literal3 = __ NewLiteral<uint64_t>(UINT64_C(0xAAAAAAAAAAAAAAAA));
-  __ LoadLiteral(mips64::A1, mips64::kLoadDoubleword, literal1);
-  __ LoadLiteral(mips64::A2, mips64::kLoadDoubleword, literal2);
-  __ LoadLiteral(mips64::A3, mips64::kLoadDoubleword, literal3);
-  __ LoadLabelAddress(mips64::V0, literal1->GetLabel());
-  __ LoadLabelAddress(mips64::V1, literal2->GetLabel());
-  // A nop will be inserted here before the 64-bit literals.
-
-  std::string expected =
-      "ldpc $a1, 1f\n"
-      // The GNU assembler incorrectly requires the ldpc instruction to be located
-      // at an address that's a multiple of 8. TODO: Remove this workaround if/when
-      // the assembler is fixed.
-      // "ldpc $a2, 2f\n"
-      ".word 0xECD80004\n"
-      "ldpc $a3, 3f\n"
-      "lapc $v0, 1f\n"
-      "lapc $v1, 2f\n"
-      "nop\n"
-      "1:\n"
-      ".dword 0x0123456789ABCDEF\n"
-      "2:\n"
-      ".dword 0x5555555555555555\n"
-      "3:\n"
-      ".dword 0xAAAAAAAAAAAAAAAA\n";
-  DriverStr(expected, "LongLiteralAlignmentNop");
-  EXPECT_EQ(__ GetLabelLocation(literal1->GetLabel()), 6 * 4u);
-  EXPECT_EQ(__ GetLabelLocation(literal2->GetLabel()), 8 * 4u);
-  EXPECT_EQ(__ GetLabelLocation(literal3->GetLabel()), 10 * 4u);
-}
-
-TEST_F(AssemblerMIPS64Test, LongLiteralAlignmentNoNop) {
-  mips64::Literal* literal1 = __ NewLiteral<uint64_t>(UINT64_C(0x0123456789ABCDEF));
-  mips64::Literal* literal2 = __ NewLiteral<uint64_t>(UINT64_C(0x5555555555555555));
-  __ LoadLiteral(mips64::A1, mips64::kLoadDoubleword, literal1);
-  __ LoadLiteral(mips64::A2, mips64::kLoadDoubleword, literal2);
-  __ LoadLabelAddress(mips64::V0, literal1->GetLabel());
-  __ LoadLabelAddress(mips64::V1, literal2->GetLabel());
-
-  std::string expected =
-      "ldpc $a1, 1f\n"
-      // The GNU assembler incorrectly requires the ldpc instruction to be located
-      // at an address that's a multiple of 8. TODO: Remove this workaround if/when
-      // the assembler is fixed.
-      // "ldpc $a2, 2f\n"
-      ".word 0xECD80003\n"
-      "lapc $v0, 1f\n"
-      "lapc $v1, 2f\n"
-      "1:\n"
-      ".dword 0x0123456789ABCDEF\n"
-      "2:\n"
-      ".dword 0x5555555555555555\n";
-  DriverStr(expected, "LongLiteralAlignmentNoNop");
-  EXPECT_EQ(__ GetLabelLocation(literal1->GetLabel()), 4 * 4u);
-  EXPECT_EQ(__ GetLabelLocation(literal2->GetLabel()), 6 * 4u);
-}
-
-TEST_F(AssemblerMIPS64Test, FarLongLiteralAlignmentNop) {
-  mips64::Literal* literal = __ NewLiteral<uint64_t>(UINT64_C(0x0123456789ABCDEF));
-  __ LoadLiteral(mips64::V0, mips64::kLoadDoubleword, literal);
-  __ LoadLabelAddress(mips64::V1, literal->GetLabel());
-  constexpr uint32_t kAdduCount = 0x3FFDF;
-  for (uint32_t i = 0; i != kAdduCount; ++i) {
-    __ Addu(mips64::ZERO, mips64::ZERO, mips64::ZERO);
-  }
-  // A nop will be inserted here before the 64-bit literal.
-
-  std::string expected =
-      "1:\n"
-      "auipc $at, %hi(3f - 1b)\n"
-      "ld $v0, %lo(3f - 1b)($at)\n"
-      "2:\n"
-      "auipc $at, %hi(3f - 2b)\n"
-      "daddiu $v1, $at, %lo(3f - 2b)\n" +
-      RepeatInsn(kAdduCount, "addu $zero, $zero, $zero\n") +
-      "nop\n"
-      "3:\n"
-      ".dword 0x0123456789ABCDEF\n";
-  DriverStr(expected, "FarLongLiteralAlignmentNop");
-  EXPECT_EQ(__ GetLabelLocation(literal->GetLabel()), (5 + kAdduCount) * 4);
-}
-
-// MSA instructions.
-
-TEST_F(AssemblerMIPS64Test, AndV) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::AndV, "and.v ${reg1}, ${reg2}, ${reg3}"), "and.v");
-}
-
-TEST_F(AssemblerMIPS64Test, OrV) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::OrV, "or.v ${reg1}, ${reg2}, ${reg3}"), "or.v");
-}
-
-TEST_F(AssemblerMIPS64Test, NorV) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::NorV, "nor.v ${reg1}, ${reg2}, ${reg3}"), "nor.v");
-}
-
-TEST_F(AssemblerMIPS64Test, XorV) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::XorV, "xor.v ${reg1}, ${reg2}, ${reg3}"), "xor.v");
-}
-
-TEST_F(AssemblerMIPS64Test, AddvB) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::AddvB, "addv.b ${reg1}, ${reg2}, ${reg3}"),
-            "addv.b");
-}
-
-TEST_F(AssemblerMIPS64Test, AddvH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::AddvH, "addv.h ${reg1}, ${reg2}, ${reg3}"),
-            "addv.h");
-}
-
-TEST_F(AssemblerMIPS64Test, AddvW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::AddvW, "addv.w ${reg1}, ${reg2}, ${reg3}"),
-            "addv.w");
-}
-
-TEST_F(AssemblerMIPS64Test, AddvD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::AddvD, "addv.d ${reg1}, ${reg2}, ${reg3}"),
-            "addv.d");
-}
-
-TEST_F(AssemblerMIPS64Test, SubvB) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::SubvB, "subv.b ${reg1}, ${reg2}, ${reg3}"),
-            "subv.b");
-}
-
-TEST_F(AssemblerMIPS64Test, SubvH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::SubvH, "subv.h ${reg1}, ${reg2}, ${reg3}"),
-            "subv.h");
-}
-
-TEST_F(AssemblerMIPS64Test, SubvW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::SubvW, "subv.w ${reg1}, ${reg2}, ${reg3}"),
-            "subv.w");
-}
-
-TEST_F(AssemblerMIPS64Test, SubvD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::SubvD, "subv.d ${reg1}, ${reg2}, ${reg3}"),
-            "subv.d");
-}
-
-TEST_F(AssemblerMIPS64Test, Asub_sB) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Asub_sB, "asub_s.b ${reg1}, ${reg2}, ${reg3}"),
-            "asub_s.b");
-}
-
-TEST_F(AssemblerMIPS64Test, Asub_sH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Asub_sH, "asub_s.h ${reg1}, ${reg2}, ${reg3}"),
-            "asub_s.h");
-}
-
-TEST_F(AssemblerMIPS64Test, Asub_sW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Asub_sW, "asub_s.w ${reg1}, ${reg2}, ${reg3}"),
-            "asub_s.w");
-}
-
-TEST_F(AssemblerMIPS64Test, Asub_sD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Asub_sD, "asub_s.d ${reg1}, ${reg2}, ${reg3}"),
-            "asub_s.d");
-}
-
-TEST_F(AssemblerMIPS64Test, Asub_uB) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Asub_uB, "asub_u.b ${reg1}, ${reg2}, ${reg3}"),
-            "asub_u.b");
-}
-
-TEST_F(AssemblerMIPS64Test, Asub_uH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Asub_uH, "asub_u.h ${reg1}, ${reg2}, ${reg3}"),
-            "asub_u.h");
-}
-
-TEST_F(AssemblerMIPS64Test, Asub_uW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Asub_uW, "asub_u.w ${reg1}, ${reg2}, ${reg3}"),
-            "asub_u.w");
-}
-
-TEST_F(AssemblerMIPS64Test, Asub_uD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Asub_uD, "asub_u.d ${reg1}, ${reg2}, ${reg3}"),
-            "asub_u.d");
-}
-
-TEST_F(AssemblerMIPS64Test, MulvB) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::MulvB, "mulv.b ${reg1}, ${reg2}, ${reg3}"),
-            "mulv.b");
-}
-
-TEST_F(AssemblerMIPS64Test, MulvH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::MulvH, "mulv.h ${reg1}, ${reg2}, ${reg3}"),
-            "mulv.h");
-}
-
-TEST_F(AssemblerMIPS64Test, MulvW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::MulvW, "mulv.w ${reg1}, ${reg2}, ${reg3}"),
-            "mulv.w");
-}
-
-TEST_F(AssemblerMIPS64Test, MulvD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::MulvD, "mulv.d ${reg1}, ${reg2}, ${reg3}"),
-            "mulv.d");
-}
-
-TEST_F(AssemblerMIPS64Test, Div_sB) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Div_sB, "div_s.b ${reg1}, ${reg2}, ${reg3}"),
-            "div_s.b");
-}
-
-TEST_F(AssemblerMIPS64Test, Div_sH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Div_sH, "div_s.h ${reg1}, ${reg2}, ${reg3}"),
-            "div_s.h");
-}
-
-TEST_F(AssemblerMIPS64Test, Div_sW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Div_sW, "div_s.w ${reg1}, ${reg2}, ${reg3}"),
-            "div_s.w");
-}
-
-TEST_F(AssemblerMIPS64Test, Div_sD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Div_sD, "div_s.d ${reg1}, ${reg2}, ${reg3}"),
-            "div_s.d");
-}
-
-TEST_F(AssemblerMIPS64Test, Div_uB) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Div_uB, "div_u.b ${reg1}, ${reg2}, ${reg3}"),
-            "div_u.b");
-}
-
-TEST_F(AssemblerMIPS64Test, Div_uH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Div_uH, "div_u.h ${reg1}, ${reg2}, ${reg3}"),
-            "div_u.h");
-}
-
-TEST_F(AssemblerMIPS64Test, Div_uW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Div_uW, "div_u.w ${reg1}, ${reg2}, ${reg3}"),
-            "div_u.w");
-}
-
-TEST_F(AssemblerMIPS64Test, Div_uD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Div_uD, "div_u.d ${reg1}, ${reg2}, ${reg3}"),
-            "div_u.d");
-}
-
-TEST_F(AssemblerMIPS64Test, Mod_sB) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Mod_sB, "mod_s.b ${reg1}, ${reg2}, ${reg3}"),
-            "mod_s.b");
-}
-
-TEST_F(AssemblerMIPS64Test, Mod_sH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Mod_sH, "mod_s.h ${reg1}, ${reg2}, ${reg3}"),
-            "mod_s.h");
-}
-
-TEST_F(AssemblerMIPS64Test, Mod_sW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Mod_sW, "mod_s.w ${reg1}, ${reg2}, ${reg3}"),
-            "mod_s.w");
-}
-
-TEST_F(AssemblerMIPS64Test, Mod_sD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Mod_sD, "mod_s.d ${reg1}, ${reg2}, ${reg3}"),
-            "mod_s.d");
-}
-
-TEST_F(AssemblerMIPS64Test, Mod_uB) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Mod_uB, "mod_u.b ${reg1}, ${reg2}, ${reg3}"),
-            "mod_u.b");
-}
-
-TEST_F(AssemblerMIPS64Test, Mod_uH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Mod_uH, "mod_u.h ${reg1}, ${reg2}, ${reg3}"),
-            "mod_u.h");
-}
-
-TEST_F(AssemblerMIPS64Test, Mod_uW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Mod_uW, "mod_u.w ${reg1}, ${reg2}, ${reg3}"),
-            "mod_u.w");
-}
-
-TEST_F(AssemblerMIPS64Test, Mod_uD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Mod_uD, "mod_u.d ${reg1}, ${reg2}, ${reg3}"),
-            "mod_u.d");
-}
-
-TEST_F(AssemblerMIPS64Test, Add_aB) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Add_aB, "add_a.b ${reg1}, ${reg2}, ${reg3}"),
-            "add_a.b");
-}
-
-TEST_F(AssemblerMIPS64Test, Add_aH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Add_aH, "add_a.h ${reg1}, ${reg2}, ${reg3}"),
-            "add_a.h");
-}
-
-TEST_F(AssemblerMIPS64Test, Add_aW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Add_aW, "add_a.w ${reg1}, ${reg2}, ${reg3}"),
-            "add_a.w");
-}
-
-TEST_F(AssemblerMIPS64Test, Add_aD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Add_aD, "add_a.d ${reg1}, ${reg2}, ${reg3}"),
-            "add_a.d");
-}
-
-TEST_F(AssemblerMIPS64Test, Ave_sB) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Ave_sB, "ave_s.b ${reg1}, ${reg2}, ${reg3}"),
-            "ave_s.b");
-}
-
-TEST_F(AssemblerMIPS64Test, Ave_sH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Ave_sH, "ave_s.h ${reg1}, ${reg2}, ${reg3}"),
-            "ave_s.h");
-}
-
-TEST_F(AssemblerMIPS64Test, Ave_sW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Ave_sW, "ave_s.w ${reg1}, ${reg2}, ${reg3}"),
-            "ave_s.w");
-}
-
-TEST_F(AssemblerMIPS64Test, Ave_sD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Ave_sD, "ave_s.d ${reg1}, ${reg2}, ${reg3}"),
-            "ave_s.d");
-}
-
-TEST_F(AssemblerMIPS64Test, Ave_uB) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Ave_uB, "ave_u.b ${reg1}, ${reg2}, ${reg3}"),
-            "ave_u.b");
-}
-
-TEST_F(AssemblerMIPS64Test, Ave_uH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Ave_uH, "ave_u.h ${reg1}, ${reg2}, ${reg3}"),
-            "ave_u.h");
-}
-
-TEST_F(AssemblerMIPS64Test, Ave_uW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Ave_uW, "ave_u.w ${reg1}, ${reg2}, ${reg3}"),
-            "ave_u.w");
-}
-
-TEST_F(AssemblerMIPS64Test, Ave_uD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Ave_uD, "ave_u.d ${reg1}, ${reg2}, ${reg3}"),
-            "ave_u.d");
-}
-
-TEST_F(AssemblerMIPS64Test, Aver_sB) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Aver_sB, "aver_s.b ${reg1}, ${reg2}, ${reg3}"),
-            "aver_s.b");
-}
-
-TEST_F(AssemblerMIPS64Test, Aver_sH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Aver_sH, "aver_s.h ${reg1}, ${reg2}, ${reg3}"),
-            "aver_s.h");
-}
-
-TEST_F(AssemblerMIPS64Test, Aver_sW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Aver_sW, "aver_s.w ${reg1}, ${reg2}, ${reg3}"),
-            "aver_s.w");
-}
-
-TEST_F(AssemblerMIPS64Test, Aver_sD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Aver_sD, "aver_s.d ${reg1}, ${reg2}, ${reg3}"),
-            "aver_s.d");
-}
-
-TEST_F(AssemblerMIPS64Test, Aver_uB) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Aver_uB, "aver_u.b ${reg1}, ${reg2}, ${reg3}"),
-            "aver_u.b");
-}
-
-TEST_F(AssemblerMIPS64Test, Aver_uH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Aver_uH, "aver_u.h ${reg1}, ${reg2}, ${reg3}"),
-            "aver_u.h");
-}
-
-TEST_F(AssemblerMIPS64Test, Aver_uW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Aver_uW, "aver_u.w ${reg1}, ${reg2}, ${reg3}"),
-            "aver_u.w");
-}
-
-TEST_F(AssemblerMIPS64Test, Aver_uD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Aver_uD, "aver_u.d ${reg1}, ${reg2}, ${reg3}"),
-            "aver_u.d");
-}
-
-TEST_F(AssemblerMIPS64Test, Max_sB) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Max_sB, "max_s.b ${reg1}, ${reg2}, ${reg3}"),
-            "max_s.b");
-}
-
-TEST_F(AssemblerMIPS64Test, Max_sH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Max_sH, "max_s.h ${reg1}, ${reg2}, ${reg3}"),
-            "max_s.h");
-}
-
-TEST_F(AssemblerMIPS64Test, Max_sW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Max_sW, "max_s.w ${reg1}, ${reg2}, ${reg3}"),
-            "max_s.w");
-}
-
-TEST_F(AssemblerMIPS64Test, Max_sD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Max_sD, "max_s.d ${reg1}, ${reg2}, ${reg3}"),
-            "max_s.d");
-}
-
-TEST_F(AssemblerMIPS64Test, Max_uB) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Max_uB, "max_u.b ${reg1}, ${reg2}, ${reg3}"),
-            "max_u.b");
-}
-
-TEST_F(AssemblerMIPS64Test, Max_uH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Max_uH, "max_u.h ${reg1}, ${reg2}, ${reg3}"),
-            "max_u.h");
-}
-
-TEST_F(AssemblerMIPS64Test, Max_uW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Max_uW, "max_u.w ${reg1}, ${reg2}, ${reg3}"),
-            "max_u.w");
-}
-
-TEST_F(AssemblerMIPS64Test, Max_uD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Max_uD, "max_u.d ${reg1}, ${reg2}, ${reg3}"),
-            "max_u.d");
-}
-
-TEST_F(AssemblerMIPS64Test, Min_sB) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Min_sB, "min_s.b ${reg1}, ${reg2}, ${reg3}"),
-            "min_s.b");
-}
-
-TEST_F(AssemblerMIPS64Test, Min_sH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Min_sH, "min_s.h ${reg1}, ${reg2}, ${reg3}"),
-            "min_s.h");
-}
-
-TEST_F(AssemblerMIPS64Test, Min_sW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Min_sW, "min_s.w ${reg1}, ${reg2}, ${reg3}"),
-            "min_s.w");
-}
-
-TEST_F(AssemblerMIPS64Test, Min_sD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Min_sD, "min_s.d ${reg1}, ${reg2}, ${reg3}"),
-            "min_s.d");
-}
-
-TEST_F(AssemblerMIPS64Test, Min_uB) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Min_uB, "min_u.b ${reg1}, ${reg2}, ${reg3}"),
-            "min_u.b");
-}
-
-TEST_F(AssemblerMIPS64Test, Min_uH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Min_uH, "min_u.h ${reg1}, ${reg2}, ${reg3}"),
-            "min_u.h");
-}
-
-TEST_F(AssemblerMIPS64Test, Min_uW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Min_uW, "min_u.w ${reg1}, ${reg2}, ${reg3}"),
-            "min_u.w");
-}
-
-TEST_F(AssemblerMIPS64Test, Min_uD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Min_uD, "min_u.d ${reg1}, ${reg2}, ${reg3}"),
-            "min_u.d");
-}
-
-TEST_F(AssemblerMIPS64Test, FaddW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::FaddW, "fadd.w ${reg1}, ${reg2}, ${reg3}"),
-            "fadd.w");
-}
-
-TEST_F(AssemblerMIPS64Test, FaddD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::FaddD, "fadd.d ${reg1}, ${reg2}, ${reg3}"),
-            "fadd.d");
-}
-
-TEST_F(AssemblerMIPS64Test, FsubW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::FsubW, "fsub.w ${reg1}, ${reg2}, ${reg3}"),
-            "fsub.w");
-}
-
-TEST_F(AssemblerMIPS64Test, FsubD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::FsubD, "fsub.d ${reg1}, ${reg2}, ${reg3}"),
-            "fsub.d");
-}
-
-TEST_F(AssemblerMIPS64Test, FmulW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::FmulW, "fmul.w ${reg1}, ${reg2}, ${reg3}"),
-            "fmul.w");
-}
-
-TEST_F(AssemblerMIPS64Test, FmulD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::FmulD, "fmul.d ${reg1}, ${reg2}, ${reg3}"),
-            "fmul.d");
-}
-
-TEST_F(AssemblerMIPS64Test, FdivW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::FdivW, "fdiv.w ${reg1}, ${reg2}, ${reg3}"),
-            "fdiv.w");
-}
-
-TEST_F(AssemblerMIPS64Test, FdivD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::FdivD, "fdiv.d ${reg1}, ${reg2}, ${reg3}"),
-            "fdiv.d");
-}
-
-TEST_F(AssemblerMIPS64Test, FmaxW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::FmaxW, "fmax.w ${reg1}, ${reg2}, ${reg3}"),
-            "fmax.w");
-}
-
-TEST_F(AssemblerMIPS64Test, FmaxD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::FmaxD, "fmax.d ${reg1}, ${reg2}, ${reg3}"),
-            "fmax.d");
-}
-
-TEST_F(AssemblerMIPS64Test, FminW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::FminW, "fmin.w ${reg1}, ${reg2}, ${reg3}"),
-            "fmin.w");
-}
-
-TEST_F(AssemblerMIPS64Test, FminD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::FminD, "fmin.d ${reg1}, ${reg2}, ${reg3}"),
-            "fmin.d");
-}
-
-TEST_F(AssemblerMIPS64Test, Ffint_sW) {
-  DriverStr(RepeatVV(&mips64::Mips64Assembler::Ffint_sW, "ffint_s.w ${reg1}, ${reg2}"),
-            "ffint_s.w");
-}
-
-TEST_F(AssemblerMIPS64Test, Ffint_sD) {
-  DriverStr(RepeatVV(&mips64::Mips64Assembler::Ffint_sD, "ffint_s.d ${reg1}, ${reg2}"),
-            "ffint_s.d");
-}
-
-TEST_F(AssemblerMIPS64Test, Ftint_sW) {
-  DriverStr(RepeatVV(&mips64::Mips64Assembler::Ftint_sW, "ftint_s.w ${reg1}, ${reg2}"),
-            "ftint_s.w");
-}
-
-TEST_F(AssemblerMIPS64Test, Ftint_sD) {
-  DriverStr(RepeatVV(&mips64::Mips64Assembler::Ftint_sD, "ftint_s.d ${reg1}, ${reg2}"),
-            "ftint_s.d");
-}
-
-TEST_F(AssemblerMIPS64Test, SllB) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::SllB, "sll.b ${reg1}, ${reg2}, ${reg3}"), "sll.b");
-}
-
-TEST_F(AssemblerMIPS64Test, SllH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::SllH, "sll.h ${reg1}, ${reg2}, ${reg3}"), "sll.h");
-}
-
-TEST_F(AssemblerMIPS64Test, SllW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::SllW, "sll.w ${reg1}, ${reg2}, ${reg3}"), "sll.w");
-}
-
-TEST_F(AssemblerMIPS64Test, SllD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::SllD, "sll.d ${reg1}, ${reg2}, ${reg3}"), "sll.d");
-}
-
-TEST_F(AssemblerMIPS64Test, SraB) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::SraB, "sra.b ${reg1}, ${reg2}, ${reg3}"), "sra.b");
-}
-
-TEST_F(AssemblerMIPS64Test, SraH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::SraH, "sra.h ${reg1}, ${reg2}, ${reg3}"), "sra.h");
-}
-
-TEST_F(AssemblerMIPS64Test, SraW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::SraW, "sra.w ${reg1}, ${reg2}, ${reg3}"), "sra.w");
-}
-
-TEST_F(AssemblerMIPS64Test, SraD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::SraD, "sra.d ${reg1}, ${reg2}, ${reg3}"), "sra.d");
-}
-
-TEST_F(AssemblerMIPS64Test, SrlB) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::SrlB, "srl.b ${reg1}, ${reg2}, ${reg3}"), "srl.b");
-}
-
-TEST_F(AssemblerMIPS64Test, SrlH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::SrlH, "srl.h ${reg1}, ${reg2}, ${reg3}"), "srl.h");
-}
-
-TEST_F(AssemblerMIPS64Test, SrlW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::SrlW, "srl.w ${reg1}, ${reg2}, ${reg3}"), "srl.w");
-}
-
-TEST_F(AssemblerMIPS64Test, SrlD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::SrlD, "srl.d ${reg1}, ${reg2}, ${reg3}"), "srl.d");
-}
-
-TEST_F(AssemblerMIPS64Test, SlliB) {
-  DriverStr(RepeatVVIb(&mips64::Mips64Assembler::SlliB, 3, "slli.b ${reg1}, ${reg2}, {imm}"),
-            "slli.b");
-}
-
-TEST_F(AssemblerMIPS64Test, SlliH) {
-  DriverStr(RepeatVVIb(&mips64::Mips64Assembler::SlliH, 4, "slli.h ${reg1}, ${reg2}, {imm}"),
-            "slli.h");
-}
-
-TEST_F(AssemblerMIPS64Test, SlliW) {
-  DriverStr(RepeatVVIb(&mips64::Mips64Assembler::SlliW, 5, "slli.w ${reg1}, ${reg2}, {imm}"),
-            "slli.w");
-}
-
-TEST_F(AssemblerMIPS64Test, SlliD) {
-  DriverStr(RepeatVVIb(&mips64::Mips64Assembler::SlliD, 6, "slli.d ${reg1}, ${reg2}, {imm}"),
-            "slli.d");
-}
-
-TEST_F(AssemblerMIPS64Test, MoveV) {
-  DriverStr(RepeatVV(&mips64::Mips64Assembler::MoveV, "move.v ${reg1}, ${reg2}"), "move.v");
-}
-
-TEST_F(AssemblerMIPS64Test, SplatiB) {
-  DriverStr(RepeatVVIb(&mips64::Mips64Assembler::SplatiB, 4, "splati.b ${reg1}, ${reg2}[{imm}]"),
-            "splati.b");
-}
-
-TEST_F(AssemblerMIPS64Test, SplatiH) {
-  DriverStr(RepeatVVIb(&mips64::Mips64Assembler::SplatiH, 3, "splati.h ${reg1}, ${reg2}[{imm}]"),
-            "splati.h");
-}
-
-TEST_F(AssemblerMIPS64Test, SplatiW) {
-  DriverStr(RepeatVVIb(&mips64::Mips64Assembler::SplatiW, 2, "splati.w ${reg1}, ${reg2}[{imm}]"),
-            "splati.w");
-}
-
-TEST_F(AssemblerMIPS64Test, SplatiD) {
-  DriverStr(RepeatVVIb(&mips64::Mips64Assembler::SplatiD, 1, "splati.d ${reg1}, ${reg2}[{imm}]"),
-            "splati.d");
-}
-
-TEST_F(AssemblerMIPS64Test, Copy_sB) {
-  DriverStr(RepeatRVIb(&mips64::Mips64Assembler::Copy_sB, 4, "copy_s.b ${reg1}, ${reg2}[{imm}]"),
-            "copy_s.b");
-}
-
-TEST_F(AssemblerMIPS64Test, Copy_sH) {
-  DriverStr(RepeatRVIb(&mips64::Mips64Assembler::Copy_sH, 3, "copy_s.h ${reg1}, ${reg2}[{imm}]"),
-            "copy_s.h");
-}
-
-TEST_F(AssemblerMIPS64Test, Copy_sW) {
-  DriverStr(RepeatRVIb(&mips64::Mips64Assembler::Copy_sW, 2, "copy_s.w ${reg1}, ${reg2}[{imm}]"),
-            "copy_s.w");
-}
-
-TEST_F(AssemblerMIPS64Test, Copy_sD) {
-  DriverStr(RepeatRVIb(&mips64::Mips64Assembler::Copy_sD, 1, "copy_s.d ${reg1}, ${reg2}[{imm}]"),
-            "copy_s.d");
-}
-
-TEST_F(AssemblerMIPS64Test, Copy_uB) {
-  DriverStr(RepeatRVIb(&mips64::Mips64Assembler::Copy_uB, 4, "copy_u.b ${reg1}, ${reg2}[{imm}]"),
-            "copy_u.b");
-}
-
-TEST_F(AssemblerMIPS64Test, Copy_uH) {
-  DriverStr(RepeatRVIb(&mips64::Mips64Assembler::Copy_uH, 3, "copy_u.h ${reg1}, ${reg2}[{imm}]"),
-            "copy_u.h");
-}
-
-TEST_F(AssemblerMIPS64Test, Copy_uW) {
-  DriverStr(RepeatRVIb(&mips64::Mips64Assembler::Copy_uW, 2, "copy_u.w ${reg1}, ${reg2}[{imm}]"),
-            "copy_u.w");
-}
-
-TEST_F(AssemblerMIPS64Test, InsertB) {
-  DriverStr(RepeatVRIb(&mips64::Mips64Assembler::InsertB, 4, "insert.b ${reg1}[{imm}], ${reg2}"),
-            "insert.b");
-}
-
-TEST_F(AssemblerMIPS64Test, InsertH) {
-  DriverStr(RepeatVRIb(&mips64::Mips64Assembler::InsertH, 3, "insert.h ${reg1}[{imm}], ${reg2}"),
-            "insert.h");
-}
-
-TEST_F(AssemblerMIPS64Test, InsertW) {
-  DriverStr(RepeatVRIb(&mips64::Mips64Assembler::InsertW, 2, "insert.w ${reg1}[{imm}], ${reg2}"),
-            "insert.w");
-}
-
-TEST_F(AssemblerMIPS64Test, InsertD) {
-  DriverStr(RepeatVRIb(&mips64::Mips64Assembler::InsertD, 1, "insert.d ${reg1}[{imm}], ${reg2}"),
-            "insert.d");
-}
-
-TEST_F(AssemblerMIPS64Test, FillB) {
-  DriverStr(RepeatVR(&mips64::Mips64Assembler::FillB, "fill.b ${reg1}, ${reg2}"), "fill.b");
-}
-
-TEST_F(AssemblerMIPS64Test, FillH) {
-  DriverStr(RepeatVR(&mips64::Mips64Assembler::FillH, "fill.h ${reg1}, ${reg2}"), "fill.h");
-}
-
-TEST_F(AssemblerMIPS64Test, FillW) {
-  DriverStr(RepeatVR(&mips64::Mips64Assembler::FillW, "fill.w ${reg1}, ${reg2}"), "fill.w");
-}
-
-TEST_F(AssemblerMIPS64Test, FillD) {
-  DriverStr(RepeatVR(&mips64::Mips64Assembler::FillD, "fill.d ${reg1}, ${reg2}"), "fill.d");
-}
-
-TEST_F(AssemblerMIPS64Test, PcntB) {
-  DriverStr(RepeatVV(&mips64::Mips64Assembler::PcntB, "pcnt.b ${reg1}, ${reg2}"), "pcnt.b");
-}
-
-TEST_F(AssemblerMIPS64Test, PcntH) {
-  DriverStr(RepeatVV(&mips64::Mips64Assembler::PcntH, "pcnt.h ${reg1}, ${reg2}"), "pcnt.h");
-}
-
-TEST_F(AssemblerMIPS64Test, PcntW) {
-  DriverStr(RepeatVV(&mips64::Mips64Assembler::PcntW, "pcnt.w ${reg1}, ${reg2}"), "pcnt.w");
-}
-
-TEST_F(AssemblerMIPS64Test, PcntD) {
-  DriverStr(RepeatVV(&mips64::Mips64Assembler::PcntD, "pcnt.d ${reg1}, ${reg2}"), "pcnt.d");
-}
-
-TEST_F(AssemblerMIPS64Test, LdiB) {
-  DriverStr(RepeatVIb(&mips64::Mips64Assembler::LdiB, -8, "ldi.b ${reg}, {imm}"), "ldi.b");
-}
-
-TEST_F(AssemblerMIPS64Test, LdiH) {
-  DriverStr(RepeatVIb(&mips64::Mips64Assembler::LdiH, -10, "ldi.h ${reg}, {imm}"), "ldi.h");
-}
-
-TEST_F(AssemblerMIPS64Test, LdiW) {
-  DriverStr(RepeatVIb(&mips64::Mips64Assembler::LdiW, -10, "ldi.w ${reg}, {imm}"), "ldi.w");
-}
-
-TEST_F(AssemblerMIPS64Test, LdiD) {
-  DriverStr(RepeatVIb(&mips64::Mips64Assembler::LdiD, -10, "ldi.d ${reg}, {imm}"), "ldi.d");
-}
-
-TEST_F(AssemblerMIPS64Test, LdB) {
-  DriverStr(RepeatVRIb(&mips64::Mips64Assembler::LdB, -10, "ld.b ${reg1}, {imm}(${reg2})"), "ld.b");
-}
-
-TEST_F(AssemblerMIPS64Test, LdH) {
-  DriverStr(RepeatVRIb(&mips64::Mips64Assembler::LdH, -10, "ld.h ${reg1}, {imm}(${reg2})", 0, 2),
-            "ld.h");
-}
-
-TEST_F(AssemblerMIPS64Test, LdW) {
-  DriverStr(RepeatVRIb(&mips64::Mips64Assembler::LdW, -10, "ld.w ${reg1}, {imm}(${reg2})", 0, 4),
-            "ld.w");
-}
-
-TEST_F(AssemblerMIPS64Test, LdD) {
-  DriverStr(RepeatVRIb(&mips64::Mips64Assembler::LdD, -10, "ld.d ${reg1}, {imm}(${reg2})", 0, 8),
-            "ld.d");
-}
-
-TEST_F(AssemblerMIPS64Test, StB) {
-  DriverStr(RepeatVRIb(&mips64::Mips64Assembler::StB, -10, "st.b ${reg1}, {imm}(${reg2})"), "st.b");
-}
-
-TEST_F(AssemblerMIPS64Test, StH) {
-  DriverStr(RepeatVRIb(&mips64::Mips64Assembler::StH, -10, "st.h ${reg1}, {imm}(${reg2})", 0, 2),
-            "st.h");
-}
-
-TEST_F(AssemblerMIPS64Test, StW) {
-  DriverStr(RepeatVRIb(&mips64::Mips64Assembler::StW, -10, "st.w ${reg1}, {imm}(${reg2})", 0, 4),
-            "st.w");
-}
-
-TEST_F(AssemblerMIPS64Test, StD) {
-  DriverStr(RepeatVRIb(&mips64::Mips64Assembler::StD, -10, "st.d ${reg1}, {imm}(${reg2})", 0, 8),
-            "st.d");
-}
-
-TEST_F(AssemblerMIPS64Test, IlvlB) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvlB, "ilvl.b ${reg1}, ${reg2}, ${reg3}"),
-            "ilvl.b");
-}
-
-TEST_F(AssemblerMIPS64Test, IlvlH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvlH, "ilvl.h ${reg1}, ${reg2}, ${reg3}"),
-            "ilvl.h");
-}
-
-TEST_F(AssemblerMIPS64Test, IlvlW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvlW, "ilvl.w ${reg1}, ${reg2}, ${reg3}"),
-            "ilvl.w");
-}
-
-TEST_F(AssemblerMIPS64Test, IlvlD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvlD, "ilvl.d ${reg1}, ${reg2}, ${reg3}"),
-            "ilvl.d");
-}
-
-TEST_F(AssemblerMIPS64Test, IlvrB) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvrB, "ilvr.b ${reg1}, ${reg2}, ${reg3}"),
-            "ilvr.b");
-}
-
-TEST_F(AssemblerMIPS64Test, IlvrH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvrH, "ilvr.h ${reg1}, ${reg2}, ${reg3}"),
-            "ilvr.h");
-}
-
-TEST_F(AssemblerMIPS64Test, IlvrW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvrW, "ilvr.w ${reg1}, ${reg2}, ${reg3}"),
-            "ilvr.w");
-}
-
-TEST_F(AssemblerMIPS64Test, IlvrD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvrD, "ilvr.d ${reg1}, ${reg2}, ${reg3}"),
-            "ilvr.d");
-}
-
-TEST_F(AssemblerMIPS64Test, IlvevB) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvevB, "ilvev.b ${reg1}, ${reg2}, ${reg3}"),
-            "ilvev.b");
-}
-
-TEST_F(AssemblerMIPS64Test, IlvevH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvevH, "ilvev.h ${reg1}, ${reg2}, ${reg3}"),
-            "ilvev.h");
-}
-
-TEST_F(AssemblerMIPS64Test, IlvevW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvevW, "ilvev.w ${reg1}, ${reg2}, ${reg3}"),
-            "ilvev.w");
-}
-
-TEST_F(AssemblerMIPS64Test, IlvevD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvevD, "ilvev.d ${reg1}, ${reg2}, ${reg3}"),
-            "ilvev.d");
-}
-
-TEST_F(AssemblerMIPS64Test, IlvodB) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvodB, "ilvod.b ${reg1}, ${reg2}, ${reg3}"),
-            "ilvod.b");
-}
-
-TEST_F(AssemblerMIPS64Test, IlvodH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvodH, "ilvod.h ${reg1}, ${reg2}, ${reg3}"),
-            "ilvod.h");
-}
-
-TEST_F(AssemblerMIPS64Test, IlvodW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvodW, "ilvod.w ${reg1}, ${reg2}, ${reg3}"),
-            "ilvod.w");
-}
-
-TEST_F(AssemblerMIPS64Test, IlvodD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::IlvodD, "ilvod.d ${reg1}, ${reg2}, ${reg3}"),
-            "ilvod.d");
-}
-
-TEST_F(AssemblerMIPS64Test, MaddvB) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::MaddvB, "maddv.b ${reg1}, ${reg2}, ${reg3}"),
-            "maddv.b");
-}
-
-TEST_F(AssemblerMIPS64Test, MaddvH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::MaddvH, "maddv.h ${reg1}, ${reg2}, ${reg3}"),
-            "maddv.h");
-}
-
-TEST_F(AssemblerMIPS64Test, MaddvW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::MaddvW, "maddv.w ${reg1}, ${reg2}, ${reg3}"),
-            "maddv.w");
-}
-
-TEST_F(AssemblerMIPS64Test, MaddvD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::MaddvD, "maddv.d ${reg1}, ${reg2}, ${reg3}"),
-            "maddv.d");
-}
-
-TEST_F(AssemblerMIPS64Test, Hadd_sH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Hadd_sH, "hadd_s.h ${reg1}, ${reg2}, ${reg3}"),
-            "hadd_s.h");
-}
-
-TEST_F(AssemblerMIPS64Test, Hadd_sW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Hadd_sW, "hadd_s.w ${reg1}, ${reg2}, ${reg3}"),
-            "hadd_s.w");
-}
-
-TEST_F(AssemblerMIPS64Test, Hadd_sD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Hadd_sD, "hadd_s.d ${reg1}, ${reg2}, ${reg3}"),
-            "hadd_s.d");
-}
-
-TEST_F(AssemblerMIPS64Test, Hadd_uH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Hadd_uH, "hadd_u.h ${reg1}, ${reg2}, ${reg3}"),
-            "hadd_u.h");
-}
-
-TEST_F(AssemblerMIPS64Test, Hadd_uW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Hadd_uW, "hadd_u.w ${reg1}, ${reg2}, ${reg3}"),
-            "hadd_u.w");
-}
-
-TEST_F(AssemblerMIPS64Test, Hadd_uD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::Hadd_uD, "hadd_u.d ${reg1}, ${reg2}, ${reg3}"),
-            "hadd_u.d");
-}
-
-TEST_F(AssemblerMIPS64Test, MsubvB) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::MsubvB, "msubv.b ${reg1}, ${reg2}, ${reg3}"),
-            "msubv.b");
-}
-
-TEST_F(AssemblerMIPS64Test, MsubvH) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::MsubvH, "msubv.h ${reg1}, ${reg2}, ${reg3}"),
-            "msubv.h");
-}
-
-TEST_F(AssemblerMIPS64Test, MsubvW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::MsubvW, "msubv.w ${reg1}, ${reg2}, ${reg3}"),
-            "msubv.w");
-}
-
-TEST_F(AssemblerMIPS64Test, MsubvD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::MsubvD, "msubv.d ${reg1}, ${reg2}, ${reg3}"),
-            "msubv.d");
-}
-
-TEST_F(AssemblerMIPS64Test, FmaddW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::FmaddW, "fmadd.w ${reg1}, ${reg2}, ${reg3}"),
-            "fmadd.w");
-}
-
-TEST_F(AssemblerMIPS64Test, FmaddD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::FmaddD, "fmadd.d ${reg1}, ${reg2}, ${reg3}"),
-            "fmadd.d");
-}
-
-TEST_F(AssemblerMIPS64Test, FmsubW) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::FmsubW, "fmsub.w ${reg1}, ${reg2}, ${reg3}"),
-            "fmsub.w");
-}
-
-TEST_F(AssemblerMIPS64Test, FmsubD) {
-  DriverStr(RepeatVVV(&mips64::Mips64Assembler::FmsubD, "fmsub.d ${reg1}, ${reg2}, ${reg3}"),
-            "fmsub.d");
-}
-
-#undef __
-
-}  // namespace art
diff --git a/compiler/utils/mips64/constants_mips64.h b/compiler/utils/mips64/constants_mips64.h
deleted file mode 100644
index 41eb77c..0000000
--- a/compiler/utils/mips64/constants_mips64.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_UTILS_MIPS64_CONSTANTS_MIPS64_H_
-#define ART_COMPILER_UTILS_MIPS64_CONSTANTS_MIPS64_H_
-
-#include <iosfwd>
-
-#include <android-base/logging.h>
-
-#include "arch/mips64/registers_mips64.h"
-#include "base/globals.h"
-#include "base/macros.h"
-
-namespace art {
-namespace mips64 {
-
-// Constants used for the decoding or encoding of the individual fields of instructions.
-enum InstructionFields {
-  kOpcodeShift = 26,
-  kOpcodeBits = 6,
-  kRsShift = 21,
-  kRsBits = 5,
-  kRtShift = 16,
-  kRtBits = 5,
-  kRdShift = 11,
-  kRdBits = 5,
-  kShamtShift = 6,
-  kShamtBits = 5,
-  kFunctShift = 0,
-  kFunctBits = 6,
-
-  kFmtShift = 21,
-  kFmtBits = 5,
-  kFtShift = 16,
-  kFtBits = 5,
-  kFsShift = 11,
-  kFsBits = 5,
-  kFdShift = 6,
-  kFdBits = 5,
-
-  kMsaOperationShift = 23,
-  kMsaELMOperationShift = 22,
-  kMsa2ROperationShift = 18,
-  kMsa2RFOperationShift = 17,
-  kDfShift = 21,
-  kDfMShift = 16,
-  kDf2RShift = 16,
-  kDfNShift = 16,
-  kWtShift = 16,
-  kWtBits = 5,
-  kWsShift = 11,
-  kWsBits = 5,
-  kWdShift = 6,
-  kWdBits = 5,
-  kS10Shift = 16,
-  kI10Shift = 11,
-  kS10MinorShift = 2,
-
-  kBranchOffsetMask = 0x0000ffff,
-  kJumpOffsetMask = 0x03ffffff,
-  kMsaMajorOpcode = 0x1e,
-  kMsaDfMByteMask = 0x70,
-  kMsaDfMHalfwordMask = 0x60,
-  kMsaDfMWordMask = 0x40,
-  kMsaDfMDoublewordMask = 0x00,
-  kMsaDfNByteMask = 0x00,
-  kMsaDfNHalfwordMask = 0x20,
-  kMsaDfNWordMask = 0x30,
-  kMsaDfNDoublewordMask = 0x38,
-  kMsaS10Mask = 0x3ff,
-};
-
-enum ScaleFactor {
-  TIMES_1 = 0,
-  TIMES_2 = 1,
-  TIMES_4 = 2,
-  TIMES_8 = 3
-};
-
-class Instr {
- public:
-  static const uint32_t kBreakPointInstruction = 0x0000000D;
-
-  bool IsBreakPoint() {
-    return ((*reinterpret_cast<const uint32_t*>(this)) & 0xFC00003F) == kBreakPointInstruction;
-  }
-
-  // Instructions are read out of a code stream. The only way to get a
-  // reference to an instruction is to convert a pointer. There is no way
-  // to allocate or create instances of class Instr.
-  // Use the At(pc) function to create references to Instr.
-  static Instr* At(uintptr_t pc) { return reinterpret_cast<Instr*>(pc); }
-
- private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(Instr);
-};
-
-}  // namespace mips64
-}  // namespace art
-
-#endif  // ART_COMPILER_UTILS_MIPS64_CONSTANTS_MIPS64_H_
diff --git a/compiler/utils/mips64/managed_register_mips64.cc b/compiler/utils/mips64/managed_register_mips64.cc
deleted file mode 100644
index 01cb6dd..0000000
--- a/compiler/utils/mips64/managed_register_mips64.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "managed_register_mips64.h"
-
-#include "base/globals.h"
-
-namespace art {
-namespace mips64 {
-
-bool Mips64ManagedRegister::Overlaps(const Mips64ManagedRegister& other) const {
-  if (IsNoRegister() || other.IsNoRegister()) return false;
-  CHECK(IsValidManagedRegister());
-  CHECK(other.IsValidManagedRegister());
-  if (Equals(other)) return true;
-  if (IsFpuRegister() && other.IsVectorRegister()) {
-    return (AsFpuRegister() == other.AsOverlappingFpuRegister());
-  } else if (IsVectorRegister() && other.IsFpuRegister()) {
-    return (AsVectorRegister() == other.AsOverlappingVectorRegister());
-  }
-  return false;
-}
-
-void Mips64ManagedRegister::Print(std::ostream& os) const {
-  if (!IsValidManagedRegister()) {
-    os << "No Register";
-  } else if (IsGpuRegister()) {
-    os << "GPU: " << static_cast<int>(AsGpuRegister());
-  } else if (IsFpuRegister()) {
-     os << "FpuRegister: " << static_cast<int>(AsFpuRegister());
-  } else if (IsVectorRegister()) {
-     os << "VectorRegister: " << static_cast<int>(AsVectorRegister());
-  } else {
-    os << "??: " << RegId();
-  }
-}
-
-std::ostream& operator<<(std::ostream& os, const Mips64ManagedRegister& reg) {
-  reg.Print(os);
-  return os;
-}
-
-}  // namespace mips64
-}  // namespace art
diff --git a/compiler/utils/mips64/managed_register_mips64.h b/compiler/utils/mips64/managed_register_mips64.h
deleted file mode 100644
index 94166d3..0000000
--- a/compiler/utils/mips64/managed_register_mips64.h
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_UTILS_MIPS64_MANAGED_REGISTER_MIPS64_H_
-#define ART_COMPILER_UTILS_MIPS64_MANAGED_REGISTER_MIPS64_H_
-
-#include "constants_mips64.h"
-#include "utils/managed_register.h"
-
-namespace art {
-namespace mips64 {
-
-const int kNumberOfGpuRegIds = kNumberOfGpuRegisters;
-const int kNumberOfGpuAllocIds = kNumberOfGpuRegisters;
-
-const int kNumberOfFpuRegIds = kNumberOfFpuRegisters;
-const int kNumberOfFpuAllocIds = kNumberOfFpuRegisters;
-
-const int kNumberOfVecRegIds = kNumberOfVectorRegisters;
-const int kNumberOfVecAllocIds = kNumberOfVectorRegisters;
-
-const int kNumberOfRegIds = kNumberOfGpuRegIds + kNumberOfFpuRegIds + kNumberOfVecRegIds;
-const int kNumberOfAllocIds = kNumberOfGpuAllocIds + kNumberOfFpuAllocIds + kNumberOfVecAllocIds;
-
-// Register ids map:
-//   [0..R[  core registers (enum GpuRegister)
-//   [R..F[  floating-point registers (enum FpuRegister)
-//   [F..W[  MSA vector registers (enum VectorRegister)
-// where
-//   R = kNumberOfGpuRegIds
-//   F = R + kNumberOfFpuRegIds
-//   W = F + kNumberOfVecRegIds
-
-// An instance of class 'ManagedRegister' represents a single Mips64 register.
-// A register can be one of the following:
-//  * core register (enum GpuRegister)
-//  * floating-point register (enum FpuRegister)
-//  * MSA vector register (enum VectorRegister)
-//
-// 'ManagedRegister::NoRegister()' provides an invalid register.
-// There is a one-to-one mapping between ManagedRegister and register id.
-class Mips64ManagedRegister : public ManagedRegister {
- public:
-  constexpr GpuRegister AsGpuRegister() const {
-    CHECK(IsGpuRegister());
-    return static_cast<GpuRegister>(id_);
-  }
-
-  constexpr FpuRegister AsFpuRegister() const {
-    CHECK(IsFpuRegister());
-    return static_cast<FpuRegister>(id_ - kNumberOfGpuRegIds);
-  }
-
-  constexpr VectorRegister AsVectorRegister() const {
-    CHECK(IsVectorRegister());
-    return static_cast<VectorRegister>(id_ - (kNumberOfGpuRegIds + kNumberOfFpuRegisters));
-  }
-
-  constexpr FpuRegister AsOverlappingFpuRegister() const {
-    CHECK(IsValidManagedRegister());
-    return static_cast<FpuRegister>(AsVectorRegister());
-  }
-
-  constexpr VectorRegister AsOverlappingVectorRegister() const {
-    CHECK(IsValidManagedRegister());
-    return static_cast<VectorRegister>(AsFpuRegister());
-  }
-
-  constexpr bool IsGpuRegister() const {
-    CHECK(IsValidManagedRegister());
-    return (0 <= id_) && (id_ < kNumberOfGpuRegIds);
-  }
-
-  constexpr bool IsFpuRegister() const {
-    CHECK(IsValidManagedRegister());
-    const int test = id_ - kNumberOfGpuRegIds;
-    return (0 <= test) && (test < kNumberOfFpuRegIds);
-  }
-
-  constexpr bool IsVectorRegister() const {
-    CHECK(IsValidManagedRegister());
-    const int test = id_ - (kNumberOfGpuRegIds + kNumberOfFpuRegIds);
-    return (0 <= test) && (test < kNumberOfVecRegIds);
-  }
-
-  void Print(std::ostream& os) const;
-
-  // Returns true if the two managed-registers ('this' and 'other') overlap.
-  // Either managed-register may be the NoRegister. If both are the NoRegister
-  // then false is returned.
-  bool Overlaps(const Mips64ManagedRegister& other) const;
-
-  static constexpr Mips64ManagedRegister FromGpuRegister(GpuRegister r) {
-    CHECK_NE(r, kNoGpuRegister);
-    return FromRegId(r);
-  }
-
-  static constexpr Mips64ManagedRegister FromFpuRegister(FpuRegister r) {
-    CHECK_NE(r, kNoFpuRegister);
-    return FromRegId(r + kNumberOfGpuRegIds);
-  }
-
-  static constexpr Mips64ManagedRegister FromVectorRegister(VectorRegister r) {
-    CHECK_NE(r, kNoVectorRegister);
-    return FromRegId(r + kNumberOfGpuRegIds + kNumberOfFpuRegIds);
-  }
-
- private:
-  constexpr bool IsValidManagedRegister() const {
-    return (0 <= id_) && (id_ < kNumberOfRegIds);
-  }
-
-  constexpr int RegId() const {
-    CHECK(!IsNoRegister());
-    return id_;
-  }
-
-  int AllocId() const {
-    CHECK(IsValidManagedRegister());
-    CHECK_LT(id_, kNumberOfAllocIds);
-    return id_;
-  }
-
-  int AllocIdLow() const;
-  int AllocIdHigh() const;
-
-  friend class ManagedRegister;
-
-  explicit constexpr Mips64ManagedRegister(int reg_id) : ManagedRegister(reg_id) {}
-
-  static constexpr Mips64ManagedRegister FromRegId(int reg_id) {
-    Mips64ManagedRegister reg(reg_id);
-    CHECK(reg.IsValidManagedRegister());
-    return reg;
-  }
-};
-
-std::ostream& operator<<(std::ostream& os, const Mips64ManagedRegister& reg);
-
-}  // namespace mips64
-
-constexpr inline mips64::Mips64ManagedRegister ManagedRegister::AsMips64() const {
-  mips64::Mips64ManagedRegister reg(id_);
-  CHECK(reg.IsNoRegister() || reg.IsValidManagedRegister());
-  return reg;
-}
-
-}  // namespace art
-
-#endif  // ART_COMPILER_UTILS_MIPS64_MANAGED_REGISTER_MIPS64_H_
diff --git a/compiler/utils/mips64/managed_register_mips64_test.cc b/compiler/utils/mips64/managed_register_mips64_test.cc
deleted file mode 100644
index bbfeeee..0000000
--- a/compiler/utils/mips64/managed_register_mips64_test.cc
+++ /dev/null
@@ -1,481 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "managed_register_mips64.h"
-
-#include "base/globals.h"
-#include "gtest/gtest.h"
-
-namespace art {
-namespace mips64 {
-
-TEST(Mips64ManagedRegister, NoRegister) {
-  Mips64ManagedRegister reg = ManagedRegister::NoRegister().AsMips64();
-  EXPECT_TRUE(reg.IsNoRegister());
-  EXPECT_FALSE(reg.Overlaps(reg));
-}
-
-TEST(Mips64ManagedRegister, GpuRegister) {
-  Mips64ManagedRegister reg = Mips64ManagedRegister::FromGpuRegister(ZERO);
-  EXPECT_FALSE(reg.IsNoRegister());
-  EXPECT_TRUE(reg.IsGpuRegister());
-  EXPECT_FALSE(reg.IsFpuRegister());
-  EXPECT_FALSE(reg.IsVectorRegister());
-  EXPECT_EQ(ZERO, reg.AsGpuRegister());
-
-  reg = Mips64ManagedRegister::FromGpuRegister(AT);
-  EXPECT_FALSE(reg.IsNoRegister());
-  EXPECT_TRUE(reg.IsGpuRegister());
-  EXPECT_FALSE(reg.IsFpuRegister());
-  EXPECT_FALSE(reg.IsVectorRegister());
-  EXPECT_EQ(AT, reg.AsGpuRegister());
-
-  reg = Mips64ManagedRegister::FromGpuRegister(V0);
-  EXPECT_FALSE(reg.IsNoRegister());
-  EXPECT_TRUE(reg.IsGpuRegister());
-  EXPECT_FALSE(reg.IsFpuRegister());
-  EXPECT_FALSE(reg.IsVectorRegister());
-  EXPECT_EQ(V0, reg.AsGpuRegister());
-
-  reg = Mips64ManagedRegister::FromGpuRegister(A0);
-  EXPECT_FALSE(reg.IsNoRegister());
-  EXPECT_TRUE(reg.IsGpuRegister());
-  EXPECT_FALSE(reg.IsFpuRegister());
-  EXPECT_FALSE(reg.IsVectorRegister());
-  EXPECT_EQ(A0, reg.AsGpuRegister());
-
-  reg = Mips64ManagedRegister::FromGpuRegister(A7);
-  EXPECT_FALSE(reg.IsNoRegister());
-  EXPECT_TRUE(reg.IsGpuRegister());
-  EXPECT_FALSE(reg.IsFpuRegister());
-  EXPECT_FALSE(reg.IsVectorRegister());
-  EXPECT_EQ(A7, reg.AsGpuRegister());
-
-  reg = Mips64ManagedRegister::FromGpuRegister(T0);
-  EXPECT_FALSE(reg.IsNoRegister());
-  EXPECT_TRUE(reg.IsGpuRegister());
-  EXPECT_FALSE(reg.IsFpuRegister());
-  EXPECT_FALSE(reg.IsVectorRegister());
-  EXPECT_EQ(T0, reg.AsGpuRegister());
-
-  reg = Mips64ManagedRegister::FromGpuRegister(T3);
-  EXPECT_FALSE(reg.IsNoRegister());
-  EXPECT_TRUE(reg.IsGpuRegister());
-  EXPECT_FALSE(reg.IsFpuRegister());
-  EXPECT_FALSE(reg.IsVectorRegister());
-  EXPECT_EQ(T3, reg.AsGpuRegister());
-
-  reg = Mips64ManagedRegister::FromGpuRegister(S0);
-  EXPECT_FALSE(reg.IsNoRegister());
-  EXPECT_TRUE(reg.IsGpuRegister());
-  EXPECT_FALSE(reg.IsFpuRegister());
-  EXPECT_FALSE(reg.IsVectorRegister());
-  EXPECT_EQ(S0, reg.AsGpuRegister());
-
-  reg = Mips64ManagedRegister::FromGpuRegister(GP);
-  EXPECT_FALSE(reg.IsNoRegister());
-  EXPECT_TRUE(reg.IsGpuRegister());
-  EXPECT_FALSE(reg.IsFpuRegister());
-  EXPECT_FALSE(reg.IsVectorRegister());
-  EXPECT_EQ(GP, reg.AsGpuRegister());
-
-  reg = Mips64ManagedRegister::FromGpuRegister(SP);
-  EXPECT_FALSE(reg.IsNoRegister());
-  EXPECT_TRUE(reg.IsGpuRegister());
-  EXPECT_FALSE(reg.IsFpuRegister());
-  EXPECT_FALSE(reg.IsVectorRegister());
-  EXPECT_EQ(SP, reg.AsGpuRegister());
-
-  reg = Mips64ManagedRegister::FromGpuRegister(RA);
-  EXPECT_FALSE(reg.IsNoRegister());
-  EXPECT_TRUE(reg.IsGpuRegister());
-  EXPECT_FALSE(reg.IsFpuRegister());
-  EXPECT_FALSE(reg.IsVectorRegister());
-  EXPECT_EQ(RA, reg.AsGpuRegister());
-}
-
-TEST(Mips64ManagedRegister, FpuRegister) {
-  Mips64ManagedRegister reg = Mips64ManagedRegister::FromFpuRegister(F0);
-  Mips64ManagedRegister vreg = Mips64ManagedRegister::FromVectorRegister(W0);
-  EXPECT_FALSE(reg.IsNoRegister());
-  EXPECT_FALSE(reg.IsGpuRegister());
-  EXPECT_TRUE(reg.IsFpuRegister());
-  EXPECT_FALSE(reg.IsVectorRegister());
-  EXPECT_TRUE(reg.Overlaps(vreg));
-  EXPECT_EQ(F0, reg.AsFpuRegister());
-  EXPECT_EQ(W0, reg.AsOverlappingVectorRegister());
-  EXPECT_TRUE(reg.Equals(Mips64ManagedRegister::FromFpuRegister(F0)));
-
-  reg = Mips64ManagedRegister::FromFpuRegister(F1);
-  vreg = Mips64ManagedRegister::FromVectorRegister(W1);
-  EXPECT_FALSE(reg.IsNoRegister());
-  EXPECT_FALSE(reg.IsGpuRegister());
-  EXPECT_TRUE(reg.IsFpuRegister());
-  EXPECT_FALSE(reg.IsVectorRegister());
-  EXPECT_TRUE(reg.Overlaps(vreg));
-  EXPECT_EQ(F1, reg.AsFpuRegister());
-  EXPECT_EQ(W1, reg.AsOverlappingVectorRegister());
-  EXPECT_TRUE(reg.Equals(Mips64ManagedRegister::FromFpuRegister(F1)));
-
-  reg = Mips64ManagedRegister::FromFpuRegister(F20);
-  vreg = Mips64ManagedRegister::FromVectorRegister(W20);
-  EXPECT_FALSE(reg.IsNoRegister());
-  EXPECT_FALSE(reg.IsGpuRegister());
-  EXPECT_TRUE(reg.IsFpuRegister());
-  EXPECT_FALSE(reg.IsVectorRegister());
-  EXPECT_TRUE(reg.Overlaps(vreg));
-  EXPECT_EQ(F20, reg.AsFpuRegister());
-  EXPECT_EQ(W20, reg.AsOverlappingVectorRegister());
-  EXPECT_TRUE(reg.Equals(Mips64ManagedRegister::FromFpuRegister(F20)));
-
-  reg = Mips64ManagedRegister::FromFpuRegister(F31);
-  vreg = Mips64ManagedRegister::FromVectorRegister(W31);
-  EXPECT_FALSE(reg.IsNoRegister());
-  EXPECT_FALSE(reg.IsGpuRegister());
-  EXPECT_TRUE(reg.IsFpuRegister());
-  EXPECT_FALSE(reg.IsVectorRegister());
-  EXPECT_TRUE(reg.Overlaps(vreg));
-  EXPECT_EQ(F31, reg.AsFpuRegister());
-  EXPECT_EQ(W31, reg.AsOverlappingVectorRegister());
-  EXPECT_TRUE(reg.Equals(Mips64ManagedRegister::FromFpuRegister(F31)));
-}
-
-TEST(Mips64ManagedRegister, VectorRegister) {
-  Mips64ManagedRegister reg = Mips64ManagedRegister::FromVectorRegister(W0);
-  Mips64ManagedRegister freg = Mips64ManagedRegister::FromFpuRegister(F0);
-  EXPECT_FALSE(reg.IsNoRegister());
-  EXPECT_FALSE(reg.IsGpuRegister());
-  EXPECT_FALSE(reg.IsFpuRegister());
-  EXPECT_TRUE(reg.IsVectorRegister());
-  EXPECT_TRUE(reg.Overlaps(freg));
-  EXPECT_EQ(W0, reg.AsVectorRegister());
-  EXPECT_EQ(F0, reg.AsOverlappingFpuRegister());
-  EXPECT_TRUE(reg.Equals(Mips64ManagedRegister::FromVectorRegister(W0)));
-
-  reg = Mips64ManagedRegister::FromVectorRegister(W2);
-  freg = Mips64ManagedRegister::FromFpuRegister(F2);
-  EXPECT_FALSE(reg.IsNoRegister());
-  EXPECT_FALSE(reg.IsGpuRegister());
-  EXPECT_FALSE(reg.IsFpuRegister());
-  EXPECT_TRUE(reg.IsVectorRegister());
-  EXPECT_TRUE(reg.Overlaps(freg));
-  EXPECT_EQ(W2, reg.AsVectorRegister());
-  EXPECT_EQ(F2, reg.AsOverlappingFpuRegister());
-  EXPECT_TRUE(reg.Equals(Mips64ManagedRegister::FromVectorRegister(W2)));
-
-  reg = Mips64ManagedRegister::FromVectorRegister(W13);
-  freg = Mips64ManagedRegister::FromFpuRegister(F13);
-  EXPECT_FALSE(reg.IsNoRegister());
-  EXPECT_FALSE(reg.IsGpuRegister());
-  EXPECT_FALSE(reg.IsFpuRegister());
-  EXPECT_TRUE(reg.IsVectorRegister());
-  EXPECT_TRUE(reg.Overlaps(freg));
-  EXPECT_EQ(W13, reg.AsVectorRegister());
-  EXPECT_EQ(F13, reg.AsOverlappingFpuRegister());
-  EXPECT_TRUE(reg.Equals(Mips64ManagedRegister::FromVectorRegister(W13)));
-
-  reg = Mips64ManagedRegister::FromVectorRegister(W29);
-  freg = Mips64ManagedRegister::FromFpuRegister(F29);
-  EXPECT_FALSE(reg.IsNoRegister());
-  EXPECT_FALSE(reg.IsGpuRegister());
-  EXPECT_FALSE(reg.IsFpuRegister());
-  EXPECT_TRUE(reg.IsVectorRegister());
-  EXPECT_TRUE(reg.Overlaps(freg));
-  EXPECT_EQ(W29, reg.AsVectorRegister());
-  EXPECT_EQ(F29, reg.AsOverlappingFpuRegister());
-  EXPECT_TRUE(reg.Equals(Mips64ManagedRegister::FromVectorRegister(W29)));
-}
-
-TEST(Mips64ManagedRegister, Equals) {
-  ManagedRegister no_reg = ManagedRegister::NoRegister();
-  EXPECT_TRUE(no_reg.Equals(Mips64ManagedRegister::NoRegister()));
-  EXPECT_FALSE(no_reg.Equals(Mips64ManagedRegister::FromGpuRegister(ZERO)));
-  EXPECT_FALSE(no_reg.Equals(Mips64ManagedRegister::FromGpuRegister(A1)));
-  EXPECT_FALSE(no_reg.Equals(Mips64ManagedRegister::FromGpuRegister(S2)));
-  EXPECT_FALSE(no_reg.Equals(Mips64ManagedRegister::FromFpuRegister(F0)));
-  EXPECT_FALSE(no_reg.Equals(Mips64ManagedRegister::FromVectorRegister(W0)));
-
-  Mips64ManagedRegister reg_ZERO = Mips64ManagedRegister::FromGpuRegister(ZERO);
-  EXPECT_FALSE(reg_ZERO.Equals(Mips64ManagedRegister::NoRegister()));
-  EXPECT_TRUE(reg_ZERO.Equals(Mips64ManagedRegister::FromGpuRegister(ZERO)));
-  EXPECT_FALSE(reg_ZERO.Equals(Mips64ManagedRegister::FromGpuRegister(A1)));
-  EXPECT_FALSE(reg_ZERO.Equals(Mips64ManagedRegister::FromGpuRegister(S2)));
-  EXPECT_FALSE(reg_ZERO.Equals(Mips64ManagedRegister::FromFpuRegister(F0)));
-  EXPECT_FALSE(reg_ZERO.Equals(Mips64ManagedRegister::FromVectorRegister(W0)));
-
-  Mips64ManagedRegister reg_A1 = Mips64ManagedRegister::FromGpuRegister(A1);
-  EXPECT_FALSE(reg_A1.Equals(Mips64ManagedRegister::NoRegister()));
-  EXPECT_FALSE(reg_A1.Equals(Mips64ManagedRegister::FromGpuRegister(ZERO)));
-  EXPECT_FALSE(reg_A1.Equals(Mips64ManagedRegister::FromGpuRegister(A0)));
-  EXPECT_TRUE(reg_A1.Equals(Mips64ManagedRegister::FromGpuRegister(A1)));
-  EXPECT_FALSE(reg_A1.Equals(Mips64ManagedRegister::FromGpuRegister(S2)));
-  EXPECT_FALSE(reg_A1.Equals(Mips64ManagedRegister::FromFpuRegister(F0)));
-  EXPECT_FALSE(reg_A1.Equals(Mips64ManagedRegister::FromVectorRegister(W0)));
-
-  Mips64ManagedRegister reg_S2 = Mips64ManagedRegister::FromGpuRegister(S2);
-  EXPECT_FALSE(reg_S2.Equals(Mips64ManagedRegister::NoRegister()));
-  EXPECT_FALSE(reg_S2.Equals(Mips64ManagedRegister::FromGpuRegister(ZERO)));
-  EXPECT_FALSE(reg_S2.Equals(Mips64ManagedRegister::FromGpuRegister(A1)));
-  EXPECT_FALSE(reg_S2.Equals(Mips64ManagedRegister::FromGpuRegister(S1)));
-  EXPECT_TRUE(reg_S2.Equals(Mips64ManagedRegister::FromGpuRegister(S2)));
-  EXPECT_FALSE(reg_S2.Equals(Mips64ManagedRegister::FromFpuRegister(F0)));
-  EXPECT_FALSE(reg_S2.Equals(Mips64ManagedRegister::FromVectorRegister(W0)));
-
-  Mips64ManagedRegister reg_F0 = Mips64ManagedRegister::FromFpuRegister(F0);
-  EXPECT_FALSE(reg_F0.Equals(Mips64ManagedRegister::NoRegister()));
-  EXPECT_FALSE(reg_F0.Equals(Mips64ManagedRegister::FromGpuRegister(ZERO)));
-  EXPECT_FALSE(reg_F0.Equals(Mips64ManagedRegister::FromGpuRegister(A1)));
-  EXPECT_FALSE(reg_F0.Equals(Mips64ManagedRegister::FromGpuRegister(S2)));
-  EXPECT_TRUE(reg_F0.Equals(Mips64ManagedRegister::FromFpuRegister(F0)));
-  EXPECT_FALSE(reg_F0.Equals(Mips64ManagedRegister::FromFpuRegister(F1)));
-  EXPECT_FALSE(reg_F0.Equals(Mips64ManagedRegister::FromFpuRegister(F31)));
-  EXPECT_FALSE(reg_F0.Equals(Mips64ManagedRegister::FromVectorRegister(W0)));
-
-  Mips64ManagedRegister reg_F31 = Mips64ManagedRegister::FromFpuRegister(F31);
-  EXPECT_FALSE(reg_F31.Equals(Mips64ManagedRegister::NoRegister()));
-  EXPECT_FALSE(reg_F31.Equals(Mips64ManagedRegister::FromGpuRegister(ZERO)));
-  EXPECT_FALSE(reg_F31.Equals(Mips64ManagedRegister::FromGpuRegister(A1)));
-  EXPECT_FALSE(reg_F31.Equals(Mips64ManagedRegister::FromGpuRegister(S2)));
-  EXPECT_FALSE(reg_F31.Equals(Mips64ManagedRegister::FromFpuRegister(F0)));
-  EXPECT_FALSE(reg_F31.Equals(Mips64ManagedRegister::FromFpuRegister(F1)));
-  EXPECT_TRUE(reg_F31.Equals(Mips64ManagedRegister::FromFpuRegister(F31)));
-  EXPECT_FALSE(reg_F31.Equals(Mips64ManagedRegister::FromVectorRegister(W0)));
-
-  Mips64ManagedRegister reg_W0 = Mips64ManagedRegister::FromVectorRegister(W0);
-  EXPECT_FALSE(reg_W0.Equals(Mips64ManagedRegister::NoRegister()));
-  EXPECT_FALSE(reg_W0.Equals(Mips64ManagedRegister::FromGpuRegister(ZERO)));
-  EXPECT_FALSE(reg_W0.Equals(Mips64ManagedRegister::FromGpuRegister(A1)));
-  EXPECT_FALSE(reg_W0.Equals(Mips64ManagedRegister::FromGpuRegister(S1)));
-  EXPECT_FALSE(reg_W0.Equals(Mips64ManagedRegister::FromFpuRegister(F0)));
-  EXPECT_TRUE(reg_W0.Equals(Mips64ManagedRegister::FromVectorRegister(W0)));
-  EXPECT_FALSE(reg_W0.Equals(Mips64ManagedRegister::FromVectorRegister(W1)));
-  EXPECT_FALSE(reg_W0.Equals(Mips64ManagedRegister::FromVectorRegister(W31)));
-
-  Mips64ManagedRegister reg_W31 = Mips64ManagedRegister::FromVectorRegister(W31);
-  EXPECT_FALSE(reg_W31.Equals(Mips64ManagedRegister::NoRegister()));
-  EXPECT_FALSE(reg_W31.Equals(Mips64ManagedRegister::FromGpuRegister(ZERO)));
-  EXPECT_FALSE(reg_W31.Equals(Mips64ManagedRegister::FromGpuRegister(A1)));
-  EXPECT_FALSE(reg_W31.Equals(Mips64ManagedRegister::FromGpuRegister(S1)));
-  EXPECT_FALSE(reg_W31.Equals(Mips64ManagedRegister::FromFpuRegister(F0)));
-  EXPECT_FALSE(reg_W31.Equals(Mips64ManagedRegister::FromVectorRegister(W0)));
-  EXPECT_FALSE(reg_W31.Equals(Mips64ManagedRegister::FromVectorRegister(W1)));
-  EXPECT_TRUE(reg_W31.Equals(Mips64ManagedRegister::FromVectorRegister(W31)));
-}
-
-TEST(Mips64ManagedRegister, Overlaps) {
-  Mips64ManagedRegister reg = Mips64ManagedRegister::FromFpuRegister(F0);
-  Mips64ManagedRegister reg_o = Mips64ManagedRegister::FromVectorRegister(W0);
-  EXPECT_TRUE(reg.Overlaps(reg_o));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(ZERO)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(A0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(S0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(RA)));
-  EXPECT_EQ(F0, reg_o.AsOverlappingFpuRegister());
-  EXPECT_EQ(W0, reg.AsOverlappingVectorRegister());
-  EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F4)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F16)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F31)));
-  EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W4)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W16)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W31)));
-
-  reg = Mips64ManagedRegister::FromFpuRegister(F4);
-  reg_o = Mips64ManagedRegister::FromVectorRegister(W4);
-  EXPECT_TRUE(reg.Overlaps(reg_o));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(ZERO)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(A0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(S0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(RA)));
-  EXPECT_EQ(F4, reg_o.AsOverlappingFpuRegister());
-  EXPECT_EQ(W4, reg.AsOverlappingVectorRegister());
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F0)));
-  EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F4)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F16)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F31)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W0)));
-  EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W4)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W16)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W31)));
-
-  reg = Mips64ManagedRegister::FromFpuRegister(F16);
-  reg_o = Mips64ManagedRegister::FromVectorRegister(W16);
-  EXPECT_TRUE(reg.Overlaps(reg_o));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(ZERO)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(A0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(S0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(RA)));
-  EXPECT_EQ(F16, reg_o.AsOverlappingFpuRegister());
-  EXPECT_EQ(W16, reg.AsOverlappingVectorRegister());
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F4)));
-  EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F16)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F31)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W4)));
-  EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W16)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W31)));
-
-  reg = Mips64ManagedRegister::FromFpuRegister(F31);
-  reg_o = Mips64ManagedRegister::FromVectorRegister(W31);
-  EXPECT_TRUE(reg.Overlaps(reg_o));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(ZERO)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(A0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(S0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(RA)));
-  EXPECT_EQ(F31, reg_o.AsOverlappingFpuRegister());
-  EXPECT_EQ(W31, reg.AsOverlappingVectorRegister());
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F4)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F16)));
-  EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F31)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W4)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W16)));
-  EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W31)));
-
-  reg = Mips64ManagedRegister::FromVectorRegister(W0);
-  reg_o = Mips64ManagedRegister::FromFpuRegister(F0);
-  EXPECT_TRUE(reg.Overlaps(reg_o));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(ZERO)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(A0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(S0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(RA)));
-  EXPECT_EQ(W0, reg_o.AsOverlappingVectorRegister());
-  EXPECT_EQ(F0, reg.AsOverlappingFpuRegister());
-  EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F4)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F16)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F31)));
-  EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W4)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W16)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W31)));
-
-  reg = Mips64ManagedRegister::FromVectorRegister(W4);
-  reg_o = Mips64ManagedRegister::FromFpuRegister(F4);
-  EXPECT_TRUE(reg.Overlaps(reg_o));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(ZERO)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(A0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(S0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(RA)));
-  EXPECT_EQ(W4, reg_o.AsOverlappingVectorRegister());
-  EXPECT_EQ(F4, reg.AsOverlappingFpuRegister());
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F0)));
-  EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F4)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F16)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F31)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W0)));
-  EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W4)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W16)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W31)));
-
-  reg = Mips64ManagedRegister::FromVectorRegister(W16);
-  reg_o = Mips64ManagedRegister::FromFpuRegister(F16);
-  EXPECT_TRUE(reg.Overlaps(reg_o));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(ZERO)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(A0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(S0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(RA)));
-  EXPECT_EQ(W16, reg_o.AsOverlappingVectorRegister());
-  EXPECT_EQ(F16, reg.AsOverlappingFpuRegister());
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F4)));
-  EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F16)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F31)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W4)));
-  EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W16)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W31)));
-
-  reg = Mips64ManagedRegister::FromVectorRegister(W31);
-  reg_o = Mips64ManagedRegister::FromFpuRegister(F31);
-  EXPECT_TRUE(reg.Overlaps(reg_o));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(ZERO)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(A0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(S0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(RA)));
-  EXPECT_EQ(W31, reg_o.AsOverlappingVectorRegister());
-  EXPECT_EQ(F31, reg.AsOverlappingFpuRegister());
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F4)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F16)));
-  EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F31)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W4)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W16)));
-  EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W31)));
-
-  reg = Mips64ManagedRegister::FromGpuRegister(ZERO);
-  EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(ZERO)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(A0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(S0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(RA)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F4)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F16)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F31)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W4)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W16)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W31)));
-
-  reg = Mips64ManagedRegister::FromGpuRegister(A0);
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(ZERO)));
-  EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(A0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(S0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(RA)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F4)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F16)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F31)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W4)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W16)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W31)));
-
-  reg = Mips64ManagedRegister::FromGpuRegister(S0);
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(ZERO)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(A0)));
-  EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(S0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(RA)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F4)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F16)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F31)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W4)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W16)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W31)));
-
-  reg = Mips64ManagedRegister::FromGpuRegister(RA);
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(ZERO)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(A0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(S0)));
-  EXPECT_TRUE(reg.Overlaps(Mips64ManagedRegister::FromGpuRegister(RA)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F4)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F16)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromFpuRegister(F31)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W0)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W4)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W16)));
-  EXPECT_FALSE(reg.Overlaps(Mips64ManagedRegister::FromVectorRegister(W31)));
-}
-
-}  // namespace mips64
-}  // namespace art
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 4b073bd..55f7691 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -59,96 +59,11 @@
   }
 }
 
-uint8_t X86Assembler::EmitVexByteZero(bool is_two_byte) {
-  uint8_t vex_zero = 0xC0;
-  if (!is_two_byte) {
-    vex_zero |= 0xC4;
-  } else {
-    vex_zero |= 0xC5;
+bool X86Assembler::CpuHasAVXorAVX2FeatureFlag() {
+  if (has_AVX_ || has_AVX2_) {
+    return true;
   }
-  return vex_zero;
-}
-
-uint8_t X86Assembler::EmitVexByte1(bool r, bool x, bool b, int mmmmm ) {
-  // VEX Byte 1
-  uint8_t vex_prefix = 0;
-  if (!r) {
-    vex_prefix |= 0x80;  // VEX.R
-  }
-  if (!x) {
-    vex_prefix |= 0x40;  // VEX.X
-  }
-  if (!b) {
-    vex_prefix |= 0x20;  // VEX.B
-  }
-
-  // VEX.mmmmm
-  switch (mmmmm) {
-  case 1:
-    // implied 0F leading opcode byte
-    vex_prefix |= 0x01;
-    break;
-  case 2:
-    // implied leading 0F 38 opcode byte
-    vex_prefix |= 0x02;
-    break;
-  case 3:
-    // implied leading OF 3A opcode byte
-    vex_prefix |= 0x03;
-    break;
-  default:
-    LOG(FATAL) << "unknown opcode bytes";
-  }
-  return vex_prefix;
-}
-
-uint8_t X86Assembler::EmitVexByte2(bool w, int l, X86ManagedRegister operand, int pp) {
-  uint8_t vex_prefix = 0;
-  // VEX Byte 2
-  if (w) {
-    vex_prefix |= 0x80;
-  }
-  // VEX.vvvv
-  if (operand.IsXmmRegister()) {
-    XmmRegister vvvv = operand.AsXmmRegister();
-    int inverted_reg = 15-static_cast<int>(vvvv);
-    uint8_t reg = static_cast<uint8_t>(inverted_reg);
-    vex_prefix |= ((reg & 0x0F) << 3);
-  } else if (operand.IsCpuRegister()) {
-    Register vvvv = operand.AsCpuRegister();
-    int inverted_reg = 15 - static_cast<int>(vvvv);
-    uint8_t reg = static_cast<uint8_t>(inverted_reg);
-    vex_prefix |= ((reg & 0x0F) << 3);
-  }
-
-  // VEX.L
-  if (l == 256) {
-    vex_prefix |= 0x04;
-  }
-
-  // VEX.pp
-  switch (pp) {
-  case 0:
-    // SIMD Pefix - None
-    vex_prefix |= 0x00;
-    break;
-  case 1:
-    // SIMD Prefix - 66
-    vex_prefix |= 0x01;
-    break;
-  case 2:
-    // SIMD Prefix - F3
-    vex_prefix |= 0x02;
-    break;
-  case 3:
-    // SIMD Prefix - F2
-    vex_prefix |= 0x03;
-    break;
-  default:
-    LOG(FATAL) << "unknown SIMD Prefix";
-  }
-
-  return vex_prefix;
+  return false;
 }
 
 void X86Assembler::call(Register reg) {
@@ -273,15 +188,11 @@
 
 void X86Assembler::blsi(Register dst, Register src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
-  uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
-  uint8_t byte_one = EmitVexByte1(/*r=*/ false,
-                                  /*x=*/ false,
-                                  /*b=*/ false,
-                                  /*mmmmm=*/ 2);
-  uint8_t byte_two = EmitVexByte2(/*w=*/ false,
-                                  /*l=*/ 128,
-                                  X86ManagedRegister::FromCpuRegister(dst),
-                                  /*pp=*/ 0);
+  uint8_t byte_zero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ false);
+  uint8_t byte_one  = EmitVexPrefixByteOne(false, false, false, SET_VEX_M_0F_38);
+  uint8_t byte_two  = EmitVexPrefixByteTwo(false,
+                                           X86ManagedRegister::FromCpuRegister(dst),
+                                            SET_VEX_L_128, SET_VEX_PP_NONE);
   EmitUint8(byte_zero);
   EmitUint8(byte_one);
   EmitUint8(byte_two);
@@ -291,15 +202,11 @@
 
 void X86Assembler::blsmsk(Register dst, Register src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
-  uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
-  uint8_t byte_one = EmitVexByte1(/*r=*/ false,
-                                  /*x=*/ false,
-                                  /*b=*/ false,
-                                  /*mmmmm=*/ 2);
-  uint8_t byte_two = EmitVexByte2(/*w=*/ false,
-                                  /*l=*/ 128,
-                                  X86ManagedRegister::FromCpuRegister(dst),
-                                  /*pp=*/ 0);
+  uint8_t byte_zero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ false);
+  uint8_t byte_one = EmitVexPrefixByteOne(false, false, false, SET_VEX_M_0F_38);
+  uint8_t byte_two = EmitVexPrefixByteTwo(false,
+                                         X86ManagedRegister::FromCpuRegister(dst),
+                                         SET_VEX_L_128, SET_VEX_PP_NONE);
   EmitUint8(byte_zero);
   EmitUint8(byte_one);
   EmitUint8(byte_two);
@@ -309,15 +216,11 @@
 
 void X86Assembler::blsr(Register dst, Register src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
-  uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
-  uint8_t byte_one = EmitVexByte1(/*r=*/ false,
-                                  /*x=*/ false,
-                                  /*b=*/ false,
-                                  /*mmmmm=*/ 2);
-  uint8_t byte_two = EmitVexByte2(/*w=*/ false,
-                                  /*l=*/ 128,
-                                  X86ManagedRegister::FromCpuRegister(dst),
-                                  /*pp=*/ 0);
+  uint8_t byte_zero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ false);
+  uint8_t byte_one = EmitVexPrefixByteOne(false, false, false,  SET_VEX_M_0F_38);
+  uint8_t byte_two = EmitVexPrefixByteTwo(false,
+                                          X86ManagedRegister::FromCpuRegister(dst),
+                                          SET_VEX_L_128, SET_VEX_PP_NONE);
   EmitUint8(byte_zero);
   EmitUint8(byte_one);
   EmitUint8(byte_two);
@@ -516,44 +419,165 @@
 
 
 void X86Assembler::movaps(XmmRegister dst, XmmRegister src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovaps(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x0F);
   EmitUint8(0x28);
   EmitXmmRegisterOperand(dst, src);
 }
 
+/**VEX.128.0F.WIG 28 /r VMOVAPS xmm1, xmm2*/
+void X86Assembler::vmovaps(XmmRegister dst, XmmRegister src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  /**Instruction VEX Prefix*/
+  uint8_t byte_zero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  X86ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86();
+  /**a REX prefix is necessary only if an instruction references one of the
+  extended registers or uses a 64-bit operand.*/
+  uint8_t byte_one = EmitVexPrefixByteOne(/*R=*/ false,
+                                          vvvv_reg,
+                                          SET_VEX_L_128,
+                                          SET_VEX_PP_NONE);
+  EmitUint8(byte_zero);
+  EmitUint8(byte_one);
+  /**Instruction Opcode*/
+  EmitUint8(0x28);
+  /**Instruction Operands*/
+  EmitXmmRegisterOperand(dst, src);
+}
 
 void X86Assembler::movaps(XmmRegister dst, const Address& src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovaps(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x0F);
   EmitUint8(0x28);
   EmitOperand(dst, src);
 }
 
+/**VEX.128.0F.WIG 28 /r VMOVAPS xmm1, m128*/
+void X86Assembler::vmovaps(XmmRegister dst, const Address& src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  /**Instruction VEX Prefix*/
+  uint8_t ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  X86ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86();
+  /**a REX prefix is necessary only if an instruction references one of the
+  extended registers or uses a 64-bit operand.*/
+  uint8_t ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                         vvvv_reg,
+                                         SET_VEX_L_128,
+                                         SET_VEX_PP_NONE);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  /**Instruction Opcode*/
+  EmitUint8(0x28);
+  /**Instruction Operands*/
+  EmitOperand(dst, src);
+}
 
 void X86Assembler::movups(XmmRegister dst, const Address& src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovups(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x0F);
   EmitUint8(0x10);
   EmitOperand(dst, src);
 }
 
+/**VEX.128.0F.WIG 10 /r VMOVUPS xmm1, m128*/
+void X86Assembler::vmovups(XmmRegister dst, const Address& src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  /**Instruction VEX Prefix*/
+  uint8_t ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  X86ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86();
+  /**a REX prefix is necessary only if an instruction references one of the
+  extended registers or uses a 64-bit operand.*/
+  uint8_t ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                         vvvv_reg,
+                                         SET_VEX_L_128,
+                                         SET_VEX_PP_NONE);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  /*Instruction Opcode*/
+  EmitUint8(0x10);
+  /*Instruction Operands*/
+  EmitOperand(dst, src);
+}
 
 void X86Assembler::movaps(const Address& dst, XmmRegister src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovaps(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x0F);
   EmitUint8(0x29);
   EmitOperand(src, dst);
 }
 
+/**VEX.128.0F.WIG 29 /r VMOVAPS m128, xmm1*/
+void X86Assembler::vmovaps(const Address& dst, XmmRegister src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  /**Instruction VEX Prefix*/
+  uint8_t ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  X86ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86();
+  /**a REX prefix is necessary only if an instruction references one of the
+  extended registers or uses a 64-bit operand.*/
+  uint8_t ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                         vvvv_reg,
+                                         SET_VEX_L_128,
+                                         SET_VEX_PP_NONE);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  /**Instruction Opcode*/
+  EmitUint8(0x29);
+  /**Instruction Operands*/
+  EmitOperand(src, dst);
+}
 
 void X86Assembler::movups(const Address& dst, XmmRegister src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovups(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x0F);
   EmitUint8(0x11);
   EmitOperand(src, dst);
 }
 
+/**VEX.128.0F.WIG 11 /r VMOVUPS m128, xmm1*/
+void X86Assembler::vmovups(const Address& dst, XmmRegister src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  /**Instruction VEX Prefix*/
+  uint8_t ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  X86ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86();
+  /**a REX prefix is necessary only if an instruction references one of the
+  extended registers or uses a 64-bit operand.*/
+  uint8_t ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                         vvvv_reg,
+                                         SET_VEX_L_128,
+                                         SET_VEX_PP_NONE);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  // Instruction Opcode
+  EmitUint8(0x11);
+  // Instruction Operands
+  EmitOperand(src, dst);
+}
+
 
 void X86Assembler::movss(XmmRegister dst, const Address& src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -679,6 +703,20 @@
   EmitXmmRegisterOperand(dst, src);
 }
 
+void X86Assembler::vaddps(XmmRegister dst, XmmRegister add_left, XmmRegister add_right) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                 X86ManagedRegister::FromXmmRegister(add_left),
+                                 SET_VEX_L_128,
+                                 SET_VEX_PP_NONE);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  EmitUint8(0x58);
+  EmitXmmRegisterOperand(dst, add_right);
+}
 
 void X86Assembler::subps(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -687,6 +725,18 @@
   EmitXmmRegisterOperand(dst, src);
 }
 
+void X86Assembler::vsubps(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t byte_zero = 0x00, byte_one = 0x00;
+  byte_zero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  X86ManagedRegister vvvv_reg = X86ManagedRegister::FromXmmRegister(src1);
+  byte_one = EmitVexPrefixByteOne(/*R=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_NONE);
+  EmitUint8(byte_zero);
+  EmitUint8(byte_one);
+  EmitUint8(0x5C);
+  EmitXmmRegisterOperand(dst, src2);
+}
 
 void X86Assembler::mulps(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -695,6 +745,20 @@
   EmitXmmRegisterOperand(dst, src);
 }
 
+void X86Assembler::vmulps(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                 X86ManagedRegister::FromXmmRegister(src1),
+                                 SET_VEX_L_128,
+                                 SET_VEX_PP_NONE);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  EmitUint8(0x59);
+  EmitXmmRegisterOperand(dst, src2);
+}
 
 void X86Assembler::divps(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -704,7 +768,27 @@
 }
 
 
+void X86Assembler::vdivps(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                 X86ManagedRegister::FromXmmRegister(src1),
+                                 SET_VEX_L_128,
+                                 SET_VEX_PP_NONE);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  EmitUint8(0x5E);
+  EmitXmmRegisterOperand(dst, src2);
+}
+
+
 void X86Assembler::movapd(XmmRegister dst, XmmRegister src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovapd(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
   EmitUint8(0x0F);
@@ -712,8 +796,32 @@
   EmitXmmRegisterOperand(dst, src);
 }
 
+/**VEX.128.66.0F.WIG 28 /r VMOVAPD xmm1, xmm2*/
+void X86Assembler::vmovapd(XmmRegister dst, XmmRegister src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  /**Instruction VEX Prefix*/
+  uint8_t ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  X86ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86();
+  /**a REX prefix is necessary only if an instruction references one of the
+  extended registers or uses a 64-bit operand.*/
+  uint8_t ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                         vvvv_reg ,
+                                         SET_VEX_L_128,
+                                         SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  // Instruction Opcode
+  EmitUint8(0x28);
+  // Instruction Operands
+  EmitXmmRegisterOperand(dst, src);
+}
 
 void X86Assembler::movapd(XmmRegister dst, const Address& src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovapd(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
   EmitUint8(0x0F);
@@ -721,8 +829,32 @@
   EmitOperand(dst, src);
 }
 
+/**VEX.128.66.0F.WIG 28 /r VMOVAPD xmm1, m128*/
+void X86Assembler::vmovapd(XmmRegister dst, const Address& src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  /**Instruction VEX Prefix*/
+  uint8_t ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  X86ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86();
+  /**a REX prefix is necessary only if an instruction references one of the
+  extended registers or uses a 64-bit operand.*/
+  uint8_t ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                         vvvv_reg,
+                                         SET_VEX_L_128,
+                                         SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  // Instruction Opcode
+  EmitUint8(0x28);
+  // Instruction Operands
+  EmitOperand(dst, src);
+}
 
 void X86Assembler::movupd(XmmRegister dst, const Address& src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovupd(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
   EmitUint8(0x0F);
@@ -730,8 +862,33 @@
   EmitOperand(dst, src);
 }
 
+/**VEX.128.66.0F.WIG 10 /r VMOVUPD xmm1, m128*/
+void X86Assembler::vmovupd(XmmRegister dst, const Address& src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  /**Instruction VEX Prefix*/
+  uint8_t ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  X86ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86();
+  /**a REX prefix is necessary only if an instruction references one of the
+  extended registers or uses a 64-bit operand.*/
+  uint8_t ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                         vvvv_reg,
+                                         SET_VEX_L_128,
+                                         SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  // Instruction Opcode
+  EmitUint8(0x10);
+  // Instruction Operands
+  EmitOperand(dst, src);
+}
+
 
 void X86Assembler::movapd(const Address& dst, XmmRegister src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovapd(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
   EmitUint8(0x0F);
@@ -739,8 +896,32 @@
   EmitOperand(src, dst);
 }
 
+/**VEX.128.66.0F.WIG 29 /r VMOVAPD m128, xmm1 */
+void X86Assembler::vmovapd(const Address& dst, XmmRegister src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  /**Instruction VEX Prefix */
+  uint8_t ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  X86ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86();
+  /**a REX prefix is necessary only if an instruction references one of the
+  extended registers or uses a 64-bit operand.*/
+  uint8_t ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                         vvvv_reg,
+                                         SET_VEX_L_128,
+                                         SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  // Instruction Opcode
+  EmitUint8(0x29);
+  // Instruction Operands
+  EmitOperand(src, dst);
+}
 
 void X86Assembler::movupd(const Address& dst, XmmRegister src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovupd(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
   EmitUint8(0x0F);
@@ -748,6 +929,26 @@
   EmitOperand(src, dst);
 }
 
+/**VEX.128.66.0F.WIG 11 /r VMOVUPD m128, xmm1 */
+void X86Assembler::vmovupd(const Address& dst, XmmRegister src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  /**Instruction VEX Prefix */
+  uint8_t ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  X86ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86();
+  /**a REX prefix is necessary only if an instruction references one of the
+  extended registers or uses a 64-bit operand.**/
+  uint8_t ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                         vvvv_reg,
+                                         SET_VEX_L_128,
+                                         SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  // Instruction Opcode
+  EmitUint8(0x11);
+  // Instruction Operands
+  EmitOperand(src, dst);
+}
 
 void X86Assembler::flds(const Address& src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -896,6 +1097,21 @@
 }
 
 
+void X86Assembler::vaddpd(XmmRegister dst, XmmRegister add_left, XmmRegister add_right) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                 X86ManagedRegister::FromXmmRegister(add_left),
+                                 SET_VEX_L_128,
+                                 SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  EmitUint8(0x58);
+  EmitXmmRegisterOperand(dst, add_right);
+}
+
+
 void X86Assembler::subpd(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
@@ -905,6 +1121,20 @@
 }
 
 
+void X86Assembler::vsubpd(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form*/ true);
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                 X86ManagedRegister::FromXmmRegister(src1),
+                                 SET_VEX_L_128,
+                                 SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  EmitUint8(0x5C);
+  EmitXmmRegisterOperand(dst, src2);
+}
+
 void X86Assembler::mulpd(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
@@ -913,6 +1143,20 @@
   EmitXmmRegisterOperand(dst, src);
 }
 
+void X86Assembler::vmulpd(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                 X86ManagedRegister::FromXmmRegister(src1),
+                                 SET_VEX_L_128,
+                                 SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  EmitUint8(0x59);
+  EmitXmmRegisterOperand(dst, src2);
+}
 
 void X86Assembler::divpd(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -922,8 +1166,26 @@
   EmitXmmRegisterOperand(dst, src);
 }
 
+void X86Assembler::vdivpd(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                 X86ManagedRegister::FromXmmRegister(src1),
+                                 SET_VEX_L_128,
+                                 SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  EmitUint8(0x5E);
+  EmitXmmRegisterOperand(dst, src2);
+}
 
 void X86Assembler::movdqa(XmmRegister dst, XmmRegister src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovdqa(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
   EmitUint8(0x0F);
@@ -931,8 +1193,30 @@
   EmitXmmRegisterOperand(dst, src);
 }
 
+/**VEX.128.66.0F.WIG 6F /r VMOVDQA xmm1, xmm2 */
+void X86Assembler::vmovdqa(XmmRegister dst, XmmRegister src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  /**Instruction VEX Prefix */
+  uint8_t ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  X86ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86();
+  uint8_t ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                         vvvv_reg,
+                                         SET_VEX_L_128,
+                                         SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  // Instruction Opcode
+  EmitUint8(0x6F);
+  // Instruction Operands
+  EmitXmmRegisterOperand(dst, src);
+}
 
 void X86Assembler::movdqa(XmmRegister dst, const Address& src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovdqa(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
   EmitUint8(0x0F);
@@ -940,8 +1224,30 @@
   EmitOperand(dst, src);
 }
 
+/**VEX.128.66.0F.WIG 6F /r VMOVDQA xmm1, m128 */
+void X86Assembler::vmovdqa(XmmRegister dst, const Address& src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  /**Instruction VEX Prefix */
+  uint8_t ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  X86ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86();
+  uint8_t ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                         vvvv_reg,
+                                         SET_VEX_L_128,
+                                         SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  // Instruction Opcode
+  EmitUint8(0x6F);
+  // Instruction Operands
+  EmitOperand(dst, src);
+}
 
 void X86Assembler::movdqu(XmmRegister dst, const Address& src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovdqu(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0xF3);
   EmitUint8(0x0F);
@@ -949,8 +1255,30 @@
   EmitOperand(dst, src);
 }
 
+/**VEX.128.F3.0F.WIG 6F /r VMOVDQU xmm1, m128 */
+void X86Assembler::vmovdqu(XmmRegister dst, const Address& src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  /**Instruction VEX Prefix */
+  uint8_t ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  X86ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86();
+  uint8_t ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                         vvvv_reg,
+                                         SET_VEX_L_128,
+                                         SET_VEX_PP_F3);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  // Instruction Opcode
+  EmitUint8(0x6F);
+  // Instruction Operands
+  EmitOperand(dst, src);
+}
 
 void X86Assembler::movdqa(const Address& dst, XmmRegister src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovdqa(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
   EmitUint8(0x0F);
@@ -958,8 +1286,31 @@
   EmitOperand(src, dst);
 }
 
+/**VEX.128.66.0F.WIG 7F /r VMOVDQA m128, xmm1 */
+void X86Assembler::vmovdqa(const Address& dst, XmmRegister src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  /**Instruction VEX Prefix */
+  uint8_t ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  X86ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86();
+  uint8_t ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                         vvvv_reg,
+                                         SET_VEX_L_128,
+                                         SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  // Instruction Opcode
+  EmitUint8(0x7F);
+  // Instruction Operands
+  EmitOperand(src, dst);
+}
+
 
 void X86Assembler::movdqu(const Address& dst, XmmRegister src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovdqu(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0xF3);
   EmitUint8(0x0F);
@@ -967,6 +1318,24 @@
   EmitOperand(src, dst);
 }
 
+/**VEX.128.F3.0F.WIG 7F /r VMOVDQU m128, xmm1 */
+void X86Assembler::vmovdqu(const Address& dst, XmmRegister src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  // Instruction VEX Prefix
+  uint8_t ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  X86ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86();
+  uint8_t ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                         vvvv_reg,
+                                         SET_VEX_L_128,
+                                         SET_VEX_PP_F3);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  // Instruction Opcode
+  EmitUint8(0x7F);
+  // Instruction Operands
+  EmitOperand(src, dst);
+}
 
 void X86Assembler::paddb(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -976,6 +1345,18 @@
   EmitXmmRegisterOperand(dst, src);
 }
 
+void X86Assembler::vpaddb(XmmRegister dst, XmmRegister add_left, XmmRegister add_right) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteOne = 0x00, ByteZero = 0x00;
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  X86ManagedRegister vvvv_reg = X86ManagedRegister::FromXmmRegister(add_left);
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  EmitUint8(0xFC);
+  EmitXmmRegisterOperand(dst, add_right);
+}
 
 void X86Assembler::psubb(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -985,6 +1366,18 @@
   EmitXmmRegisterOperand(dst, src);
 }
 
+void X86Assembler::vpsubb(XmmRegister dst, XmmRegister add_left, XmmRegister add_right) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  X86ManagedRegister vvvv_reg = X86ManagedRegister::FromXmmRegister(add_left);
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  EmitUint8(0xF8);
+  EmitXmmRegisterOperand(dst, add_right);
+}
 
 void X86Assembler::paddw(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -994,6 +1387,18 @@
   EmitXmmRegisterOperand(dst, src);
 }
 
+void X86Assembler::vpaddw(XmmRegister dst, XmmRegister add_left, XmmRegister add_right) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  X86ManagedRegister vvvv_reg = X86ManagedRegister::FromXmmRegister(add_left);
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  EmitUint8(0xFD);
+  EmitXmmRegisterOperand(dst, add_right);
+}
 
 void X86Assembler::psubw(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -1003,6 +1408,18 @@
   EmitXmmRegisterOperand(dst, src);
 }
 
+void X86Assembler::vpsubw(XmmRegister dst, XmmRegister add_left, XmmRegister add_right) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  X86ManagedRegister vvvv_reg = X86ManagedRegister::FromXmmRegister(add_left);
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  EmitUint8(0xF9);
+  EmitXmmRegisterOperand(dst, add_right);
+}
 
 void X86Assembler::pmullw(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -1021,6 +1438,18 @@
   EmitXmmRegisterOperand(dst, src);
 }
 
+void X86Assembler::vpaddd(XmmRegister dst, XmmRegister add_left, XmmRegister add_right) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  X86ManagedRegister vvvv_reg = X86ManagedRegister::FromXmmRegister(add_left);
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  EmitUint8(0xFE);
+  EmitXmmRegisterOperand(dst, add_right);
+}
 
 void X86Assembler::psubd(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -1031,6 +1460,20 @@
 }
 
 
+void X86Assembler::vpsubd(XmmRegister dst, XmmRegister add_left, XmmRegister add_right) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  X86ManagedRegister vvvv_reg = X86ManagedRegister::FromXmmRegister(add_left);
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  EmitUint8(0xFA);
+  EmitXmmRegisterOperand(dst, add_right);
+}
+
+
 void X86Assembler::pmulld(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
@@ -1040,6 +1483,40 @@
   EmitXmmRegisterOperand(dst, src);
 }
 
+void X86Assembler::vpmulld(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ false);
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                 /*X=*/ false,
+                                 /*B=*/ false,
+                                 SET_VEX_M_0F_38);
+  ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false,
+                                 X86ManagedRegister::FromXmmRegister(src1),
+                                 SET_VEX_L_128,
+                                 SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  EmitUint8(ByteTwo);
+  EmitUint8(0x40);
+  EmitRegisterOperand(dst, src2);
+}
+
+void X86Assembler::vpmullw(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                 X86ManagedRegister::FromXmmRegister(src1),
+                                 SET_VEX_L_128,
+                                 SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  EmitUint8(0xD5);
+  EmitRegisterOperand(dst, src2);
+}
 
 void X86Assembler::paddq(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -1049,6 +1526,19 @@
   EmitXmmRegisterOperand(dst, src);
 }
 
+void X86Assembler::vpaddq(XmmRegister dst, XmmRegister add_left, XmmRegister add_right) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  X86ManagedRegister vvvv_reg = X86ManagedRegister::FromXmmRegister(add_left);
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  EmitUint8(0xD4);
+  EmitXmmRegisterOperand(dst, add_right);
+}
+
 
 void X86Assembler::psubq(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -1058,6 +1548,18 @@
   EmitXmmRegisterOperand(dst, src);
 }
 
+void X86Assembler::vpsubq(XmmRegister dst, XmmRegister add_left, XmmRegister add_right) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  X86ManagedRegister vvvv_reg = X86ManagedRegister::FromXmmRegister(add_left);
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  EmitUint8(0xFB);
+  EmitXmmRegisterOperand(dst, add_right);
+}
 
 void X86Assembler::paddusb(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -1370,6 +1872,68 @@
   EmitXmmRegisterOperand(dst, src);
 }
 
+/* VEX.128.66.0F.WIG EF /r VPXOR xmm1, xmm2, xmm3/m128 */
+void X86Assembler::vpxor(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  /* Instruction VEX Prefix */
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  /* REX prefix is necessary only if an instruction references one of extended
+  registers or uses a 64-bit operand. */
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                 X86ManagedRegister::FromXmmRegister(src1),
+                                 SET_VEX_L_128,
+                                 SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  // Instruction Opcode
+  EmitUint8(0xEF);
+  // Instruction Operands
+  EmitXmmRegisterOperand(dst, src2);
+}
+
+/* VEX.128.0F.WIG 57 /r VXORPS xmm1,xmm2, xmm3/m128 */
+void X86Assembler::vxorps(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  /* Instruction VEX Prefix */
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  /* REX prefix is necessary only if an instruction references one of extended
+  registers or uses a 64-bit operand. */
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                 X86ManagedRegister::FromXmmRegister(src1),
+                                 SET_VEX_L_128,
+                                 SET_VEX_PP_NONE);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  // Instruction Opcode
+  EmitUint8(0x57);
+  // Instruction Operands
+  EmitXmmRegisterOperand(dst, src2);
+}
+
+/* VEX.128.66.0F.WIG 57 /r VXORPD xmm1,xmm2, xmm3/m128 */
+void X86Assembler::vxorpd(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  /* Instruction VEX Prefix */
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  /* REX prefix is necessary only if an instruction references one of extended
+  registers or uses a 64-bit operand. */
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                 X86ManagedRegister::FromXmmRegister(src1),
+                                 SET_VEX_L_128,
+                                 SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  // Instruction Opcode
+  EmitUint8(0x57);
+  // Instruction Operands
+  EmitXmmRegisterOperand(dst, src2);
+}
 
 void X86Assembler::andpd(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -1413,25 +1977,66 @@
   EmitXmmRegisterOperand(dst, src);
 }
 
-void X86Assembler::andn(Register dst, Register src1, Register src2) {
+/* VEX.128.66.0F.WIG DB /r VPAND xmm1, xmm2, xmm3/m128 */
+void X86Assembler::vpand(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
-  uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
-  uint8_t byte_one = EmitVexByte1(/*r=*/ false,
-                                  /*x=*/ false,
-                                  /*b=*/ false,
-                                  /*mmmmm=*/ 2);
-  uint8_t byte_two = EmitVexByte2(/*w=*/ false,
-                                  /*l=*/ 128,
-                                  X86ManagedRegister::FromCpuRegister(src1),
-                                  /*pp=*/ 0);
-  EmitUint8(byte_zero);
-  EmitUint8(byte_one);
-  EmitUint8(byte_two);
-  // Opcode field
-  EmitUint8(0xF2);
-  EmitRegisterOperand(dst, src2);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  /* Instruction VEX Prefix */
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  /* REX prefix is necessary only if an instruction references one of extended
+  registers or uses a 64-bit operand. */
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                 X86ManagedRegister::FromXmmRegister(src1),
+                                 SET_VEX_L_128,
+                                 SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  // Instruction Opcode
+  EmitUint8(0xDB);
+  // Instruction Operands
+  EmitXmmRegisterOperand(dst, src2);
 }
 
+/* VEX.128.0F 54 /r VANDPS xmm1,xmm2, xmm3/m128 */
+void X86Assembler::vandps(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  /* Instruction VEX Prefix */
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                 X86ManagedRegister::FromXmmRegister(src1),
+                                 SET_VEX_L_128,
+                                 SET_VEX_PP_NONE);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  // Instruction Opcode
+  EmitUint8(0x54);
+  // Instruction Operands
+  EmitXmmRegisterOperand(dst, src2);
+}
+
+/* VEX.128.66.0F 54 /r VANDPD xmm1, xmm2, xmm3/m128 */
+void X86Assembler::vandpd(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  /* Instruction VEX Prefix */
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  /* REX prefix is necessary only if an instruction references one of extended
+  registers or uses a 64-bit operand. */
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                 X86ManagedRegister::FromXmmRegister(src1),
+                                 SET_VEX_L_128,
+                                 SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  // Instruction Opcode
+  EmitUint8(0x54);
+  // Instruction Operands
+  EmitXmmRegisterOperand(dst, src2);
+}
 
 void X86Assembler::andnpd(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -1458,6 +2063,68 @@
   EmitXmmRegisterOperand(dst, src);
 }
 
+/* VEX.128.66.0F.WIG DF /r VPANDN xmm1, xmm2, xmm3/m128 */
+void X86Assembler::vpandn(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  /* Instruction VEX Prefix */
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  /* REX prefix is necessary only if an instruction references one of extended
+  registers or uses a 64-bit operand. */
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                 X86ManagedRegister::FromXmmRegister(src1),
+                                 SET_VEX_L_128,
+                                 SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  // Instruction Opcode
+  EmitUint8(0xDF);
+  // Instruction Operands
+  EmitXmmRegisterOperand(dst, src2);
+}
+
+/* VEX.128.0F 55 /r VANDNPS xmm1, xmm2, xmm3/m128 */
+void X86Assembler::vandnps(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  /* Instruction VEX Prefix */
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  /* REX prefix is necessary only if an instruction references one of extended
+  registers or uses a 64-bit operand. */
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                 X86ManagedRegister::FromXmmRegister(src1),
+                                 SET_VEX_L_128,
+                                 SET_VEX_PP_NONE);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  // Instruction Opcode
+  EmitUint8(0x55);
+  // Instruction Operands
+  EmitXmmRegisterOperand(dst, src2);
+}
+
+/* VEX.128.66.0F 55 /r VANDNPD xmm1, xmm2, xmm3/m128 */
+void X86Assembler::vandnpd(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  /* Instruction VEX Prefix */
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  /* REX prefix is necessary only if an instruction references one of extended
+  registers or uses a 64-bit operand. */
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                 X86ManagedRegister::FromXmmRegister(src1),
+                                 SET_VEX_L_128,
+                                 SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  // Instruction Opcode
+  EmitUint8(0x55);
+  // Instruction Operands
+  EmitXmmRegisterOperand(dst, src2);
+}
 
 void X86Assembler::orpd(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -1475,6 +2142,24 @@
   EmitXmmRegisterOperand(dst, src);
 }
 
+void X86Assembler::andn(Register dst, Register src1, Register src2) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t byte_zero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ false);
+  uint8_t byte_one = EmitVexPrefixByteOne(/*R=*/ false,
+                                          /*X=*/ false,
+                                          /*B=*/ false,
+                                          SET_VEX_M_0F_38);
+  uint8_t byte_two = EmitVexPrefixByteTwo(/*W=*/ false,
+                                          X86ManagedRegister::FromCpuRegister(src1),
+                                          SET_VEX_L_128,
+                                          SET_VEX_PP_NONE);
+  EmitUint8(byte_zero);
+  EmitUint8(byte_one);
+  EmitUint8(byte_two);
+  // Opcode field
+  EmitUint8(0xF2);
+  EmitRegisterOperand(dst, src2);
+}
 
 void X86Assembler::por(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -1484,6 +2169,68 @@
   EmitXmmRegisterOperand(dst, src);
 }
 
+/* VEX.128.66.0F.WIG EB /r VPOR xmm1, xmm2, xmm3/m128 */
+void X86Assembler::vpor(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  /* Instruction VEX Prefix */
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  /* REX prefix is necessary only if an instruction references one of extended
+  registers or uses a 64-bit operand. */
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                 X86ManagedRegister::FromXmmRegister(src1),
+                                 SET_VEX_L_128,
+                                 SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  // Instruction Opcode
+  EmitUint8(0xEB);
+  // Instruction Operands
+  EmitXmmRegisterOperand(dst, src2);
+}
+
+/* VEX.128.0F 56 /r VORPS xmm1,xmm2, xmm3/m128 */
+void X86Assembler::vorps(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  /* Instruction VEX Prefix */
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  /* REX prefix is necessary only if an instruction references one of extended
+  registers or uses a 64-bit operand. */
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                 X86ManagedRegister::FromXmmRegister(src1),
+                                 SET_VEX_L_128,
+                                 SET_VEX_PP_NONE);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  // Instruction Opcode
+  EmitUint8(0x56);
+  // Instruction Operands
+  EmitXmmRegisterOperand(dst, src2);
+}
+
+/* VEX.128.66.0F 56 /r VORPD xmm1,xmm2, xmm3/m128 */
+void X86Assembler::vorpd(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  /* Instruction VEX Prefix */
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ true);
+  /* REX prefix is necessary only if an instruction references one of extended
+  registers or uses a 64-bit operand. */
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false,
+                                 X86ManagedRegister::FromXmmRegister(src1),
+                                 SET_VEX_L_128,
+                                 SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  // Instruction Opcode
+  EmitUint8(0x56);
+  // Instruction Operands
+  EmitXmmRegisterOperand(dst, src2);
+}
 
 void X86Assembler::pavgb(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -1521,6 +2268,20 @@
 }
 
 
+void X86Assembler::vpmaddwd(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00;
+  ByteZero = EmitVexPrefixByteZero(/* is_twobyte_form=*/ true);
+  X86ManagedRegister vvvv_reg = X86ManagedRegister::FromXmmRegister(src1);
+  ByteOne = EmitVexPrefixByteOne(/*R=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  EmitUint8(0xF5);
+  EmitXmmRegisterOperand(dst, src2);
+}
+
+
 void X86Assembler::phaddw(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
@@ -3143,5 +3904,139 @@
   return AddInt32(bit_cast<int32_t, float>(v));
 }
 
+uint8_t X86Assembler::EmitVexPrefixByteZero(bool is_twobyte_form) {
+  /**Vex Byte 0,
+  Bits [7:0] must contain the value 11000101b (0xC5) for 2-byte Vex
+  Bits [7:0] must contain the value 11000100b (0xC4) for 3-byte Vex */
+  uint8_t vex_prefix = 0xC0;
+  if (is_twobyte_form) {
+    // 2-Byte Vex
+    vex_prefix |= TWO_BYTE_VEX;
+  } else {
+    // 3-Byte Vex
+    vex_prefix |= THREE_BYTE_VEX;
+  }
+  return vex_prefix;
+}
+
+uint8_t X86Assembler::EmitVexPrefixByteOne(bool R,
+                                           bool X,
+                                           bool B,
+                                           int SET_VEX_M) {
+  /**Vex Byte 1, */
+  uint8_t vex_prefix = VEX_INIT;
+  /** Bit[7] This bit needs to be set to '1'
+  otherwise the instruction is LES or LDS */
+  if (!R) {
+    // R .
+    vex_prefix |= SET_VEX_R;
+  }
+  /** Bit[6] This bit needs to be set to '1'
+  otherwise the instruction is LES or LDS */
+  if (!X) {
+    // X .
+    vex_prefix |= SET_VEX_X;
+  }
+  /** Bit[5] This bit needs to be set to '1' */
+  if (!B) {
+    // B .
+    vex_prefix |= SET_VEX_B;
+  }
+  /** Bits[4:0], */
+  vex_prefix |= SET_VEX_M;
+  return vex_prefix;
+}
+
+uint8_t X86Assembler::EmitVexPrefixByteOne(bool R,
+                                           X86ManagedRegister operand,
+                                           int SET_VEX_L,
+                                           int SET_VEX_PP) {
+  /**Vex Byte 1, */
+  uint8_t vex_prefix = VEX_INIT;
+  /** Bit[7] This bit needs to be set to '1'
+  otherwise the instruction is LES or LDS */
+  if (!R) {
+    // R .
+    vex_prefix |= SET_VEX_R;
+  }
+  /**Bits[6:3] - 'vvvv' the source or dest register specifier */
+  if (operand.IsNoRegister()) {
+    vex_prefix |= 0x78;
+  } else if (operand.IsXmmRegister()) {
+    XmmRegister vvvv = operand.AsXmmRegister();
+    int inverted_reg = 15 - static_cast<int>(vvvv);
+    uint8_t reg = static_cast<uint8_t>(inverted_reg);
+    vex_prefix |= ((reg & 0x0F) << 3);
+  } else if (operand.IsCpuRegister()) {
+    Register vvvv = operand.AsCpuRegister();
+    int inverted_reg = 15 - static_cast<int>(vvvv);
+    uint8_t reg = static_cast<uint8_t>(inverted_reg);
+    vex_prefix |= ((reg & 0x0F) << 3);
+  }
+  /** Bit[2] - "L" If VEX.L = 1 indicates 256-bit vector operation ,
+  VEX.L = 0 indicates 128 bit vector operation */
+  vex_prefix |= SET_VEX_L;
+  /** Bits[1:0] -  "pp" */
+  vex_prefix |= SET_VEX_PP;
+  return vex_prefix;
+}
+
+uint8_t X86Assembler::EmitVexPrefixByteTwo(bool W,
+                                           X86ManagedRegister operand,
+                                           int SET_VEX_L,
+                                           int SET_VEX_PP) {
+  /** Vex Byte 2, */
+  uint8_t vex_prefix = VEX_INIT;
+  /** Bit[7] This bits needs to be set to '1' with default value.
+  When using C4H form of VEX prefix, W value is ignored */
+  if (W) {
+    vex_prefix |= SET_VEX_W;
+  }
+  /** Bits[6:3] - 'vvvv' the source or dest register specifier */
+  if (operand.IsXmmRegister()) {
+    XmmRegister vvvv = operand.AsXmmRegister();
+    int inverted_reg = 15 - static_cast<int>(vvvv);
+    uint8_t reg = static_cast<uint8_t>(inverted_reg);
+    vex_prefix |= ((reg & 0x0F) << 3);
+  } else if (operand.IsCpuRegister()) {
+    Register vvvv = operand.AsCpuRegister();
+    int inverted_reg = 15 - static_cast<int>(vvvv);
+    uint8_t reg = static_cast<uint8_t>(inverted_reg);
+    vex_prefix |= ((reg & 0x0F) << 3);
+  }
+  /** Bit[2] - "L" If VEX.L = 1 indicates 256-bit vector operation ,
+  VEX.L = 0 indicates 128 bit vector operation */
+  vex_prefix |= SET_VEX_L;
+  // Bits[1:0] -  "pp"
+  vex_prefix |= SET_VEX_PP;
+  return vex_prefix;
+}
+
+uint8_t X86Assembler::EmitVexPrefixByteTwo(bool W,
+                                           int SET_VEX_L,
+                                           int SET_VEX_PP) {
+  /**Vex Byte 2, */
+  uint8_t vex_prefix = VEX_INIT;
+
+  /** Bit[7] This bits needs to be set to '1' with default value.
+  When using C4H form of VEX prefix, W value is ignored */
+  if (W) {
+    vex_prefix |= SET_VEX_W;
+  }
+  /** Bits[6:3] - 'vvvv' the source or dest register specifier,
+  if unused set 1111 */
+  vex_prefix |= (0x0F << 3);
+
+  /** Bit[2] - "L" If VEX.L = 1 indicates 256-bit vector operation ,
+  VEX.L = 0 indicates 128 bit vector operation */
+  vex_prefix |= SET_VEX_L;
+
+  /** Bits[1:0] -  "pp" */
+  if (SET_VEX_PP != SET_VEX_PP_NONE) {
+    vex_prefix |= SET_VEX_PP;
+  }
+  return vex_prefix;
+}
+
 }  // namespace x86
 }  // namespace art
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 275e5c1..27fde26 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -19,6 +19,7 @@
 
 #include <vector>
 
+#include "arch/x86/instruction_set_features_x86.h"
 #include "base/arena_containers.h"
 #include "base/array_ref.h"
 #include "base/bit_utils.h"
@@ -308,8 +309,12 @@
 
 class X86Assembler final : public Assembler {
  public:
-  explicit X86Assembler(ArenaAllocator* allocator)
-      : Assembler(allocator), constant_area_(allocator) {}
+  explicit X86Assembler(ArenaAllocator* allocator,
+                        const X86InstructionSetFeatures* instruction_set_features = nullptr)
+                : Assembler(allocator),
+                  constant_area_(allocator),
+                  has_AVX_(instruction_set_features != nullptr ? instruction_set_features->HasAVX() : false),
+                  has_AVX2_(instruction_set_features != nullptr ? instruction_set_features->HasAVX2() :false) {}
   virtual ~X86Assembler() {}
 
   /*
@@ -385,6 +390,12 @@
   void movaps(const Address& dst, XmmRegister src);  // store aligned
   void movups(const Address& dst, XmmRegister src);  // store unaligned
 
+  void vmovaps(XmmRegister dst, XmmRegister src);     // move
+  void vmovaps(XmmRegister dst, const Address& src);  // load aligned
+  void vmovups(XmmRegister dst, const Address& src);  // load unaligned
+  void vmovaps(const Address& dst, XmmRegister src);  // store aligned
+  void vmovups(const Address& dst, XmmRegister src);  // store unaligned
+
   void movss(XmmRegister dst, const Address& src);
   void movss(const Address& dst, XmmRegister src);
   void movss(XmmRegister dst, XmmRegister src);
@@ -406,12 +417,28 @@
   void mulps(XmmRegister dst, XmmRegister src);
   void divps(XmmRegister dst, XmmRegister src);
 
+  void vmulps(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+  void vmulpd(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+  void vdivps(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+  void vdivpd(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+
+  void vaddps(XmmRegister dst, XmmRegister add_left, XmmRegister add_right);
+  void vsubps(XmmRegister dst, XmmRegister add_left, XmmRegister add_right);
+  void vsubpd(XmmRegister dst, XmmRegister add_left, XmmRegister add_right);
+  void vaddpd(XmmRegister dst, XmmRegister add_left, XmmRegister add_right);
+
   void movapd(XmmRegister dst, XmmRegister src);     // move
   void movapd(XmmRegister dst, const Address& src);  // load aligned
   void movupd(XmmRegister dst, const Address& src);  // load unaligned
   void movapd(const Address& dst, XmmRegister src);  // store aligned
   void movupd(const Address& dst, XmmRegister src);  // store unaligned
 
+  void vmovapd(XmmRegister dst, XmmRegister src);     // move
+  void vmovapd(XmmRegister dst, const Address& src);  // load aligned
+  void vmovupd(XmmRegister dst, const Address& src);  // load unaligned
+  void vmovapd(const Address& dst, XmmRegister src);  // store aligned
+  void vmovupd(const Address& dst, XmmRegister src);  // store unaligned
+
   void movsd(XmmRegister dst, const Address& src);
   void movsd(const Address& dst, XmmRegister src);
   void movsd(XmmRegister dst, XmmRegister src);
@@ -439,20 +466,41 @@
   void movdqa(const Address& dst, XmmRegister src);  // store aligned
   void movdqu(const Address& dst, XmmRegister src);  // store unaligned
 
+  void vmovdqa(XmmRegister dst, XmmRegister src);     // move
+  void vmovdqa(XmmRegister dst, const Address& src);  // load aligned
+  void vmovdqu(XmmRegister dst, const Address& src);  // load unaligned
+  void vmovdqa(const Address& dst, XmmRegister src);  // store aligned
+  void vmovdqu(const Address& dst, XmmRegister src);  // store unaligned
+
   void paddb(XmmRegister dst, XmmRegister src);  // no addr variant (for now)
   void psubb(XmmRegister dst, XmmRegister src);
 
+  void vpaddb(XmmRegister dst, XmmRegister add_left, XmmRegister add_right);
+  void vpaddw(XmmRegister dst, XmmRegister add_left, XmmRegister add_right);
+
   void paddw(XmmRegister dst, XmmRegister src);
   void psubw(XmmRegister dst, XmmRegister src);
   void pmullw(XmmRegister dst, XmmRegister src);
+  void vpmullw(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+
+  void vpsubb(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+  void vpsubw(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+  void vpsubd(XmmRegister dst, XmmRegister src1, XmmRegister src2);
 
   void paddd(XmmRegister dst, XmmRegister src);
   void psubd(XmmRegister dst, XmmRegister src);
   void pmulld(XmmRegister dst, XmmRegister src);
 
+  void vpmulld(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+
+  void vpaddd(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+
   void paddq(XmmRegister dst, XmmRegister src);
   void psubq(XmmRegister dst, XmmRegister src);
 
+  void vpaddq(XmmRegister dst, XmmRegister add_left, XmmRegister add_right);
+  void vpsubq(XmmRegister dst, XmmRegister add_left, XmmRegister add_right);
+
   void paddusb(XmmRegister dst, XmmRegister src);
   void paddsb(XmmRegister dst, XmmRegister src);
   void paddusw(XmmRegister dst, XmmRegister src);
@@ -497,26 +545,39 @@
   void xorps(XmmRegister dst, const Address& src);
   void xorps(XmmRegister dst, XmmRegister src);
   void pxor(XmmRegister dst, XmmRegister src);  // no addr variant (for now)
+  void vpxor(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+  void vxorps(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+  void vxorpd(XmmRegister dst, XmmRegister src1, XmmRegister src2);
 
   void andpd(XmmRegister dst, XmmRegister src);
   void andpd(XmmRegister dst, const Address& src);
   void andps(XmmRegister dst, XmmRegister src);
   void andps(XmmRegister dst, const Address& src);
   void pand(XmmRegister dst, XmmRegister src);  // no addr variant (for now)
+  void vpand(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+  void vandps(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+  void vandpd(XmmRegister dst, XmmRegister src1, XmmRegister src2);
 
   void andn(Register dst, Register src1, Register src2);  // no addr variant (for now)
   void andnpd(XmmRegister dst, XmmRegister src);  // no addr variant (for now)
   void andnps(XmmRegister dst, XmmRegister src);
   void pandn(XmmRegister dst, XmmRegister src);
+  void vpandn(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+  void vandnps(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+  void vandnpd(XmmRegister dst, XmmRegister src1, XmmRegister src2);
 
   void orpd(XmmRegister dst, XmmRegister src);  // no addr variant (for now)
   void orps(XmmRegister dst, XmmRegister src);
   void por(XmmRegister dst, XmmRegister src);
+  void vpor(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+  void vorps(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+  void vorpd(XmmRegister dst, XmmRegister src1, XmmRegister src2);
 
   void pavgb(XmmRegister dst, XmmRegister src);  // no addr variant (for now)
   void pavgw(XmmRegister dst, XmmRegister src);
   void psadbw(XmmRegister dst, XmmRegister src);
   void pmaddwd(XmmRegister dst, XmmRegister src);
+  void vpmaddwd(XmmRegister dst, XmmRegister src1, XmmRegister src2);
   void phaddw(XmmRegister dst, XmmRegister src);
   void phaddd(XmmRegister dst, XmmRegister src);
   void haddps(XmmRegister dst, XmmRegister src);
@@ -823,6 +884,8 @@
   // Return the current size of the constant area.
   size_t ConstantAreaSize() const { return constant_area_.GetSize(); }
 
+  bool CpuHasAVXorAVX2FeatureFlag();
+
  private:
   inline void EmitUint8(uint8_t value);
   inline void EmitInt32(int32_t value);
@@ -842,12 +905,22 @@
   void EmitGenericShift(int rm, const Operand& operand, const Immediate& imm);
   void EmitGenericShift(int rm, const Operand& operand, Register shifter);
 
-  // Emit a 3 byte VEX Prefix
-  uint8_t EmitVexByteZero(bool is_two_byte);
-  uint8_t EmitVexByte1(bool r, bool x, bool b, int mmmmm);
-  uint8_t EmitVexByte2(bool w , int l , X86ManagedRegister operand, int pp);
-
+  uint8_t EmitVexPrefixByteZero(bool is_twobyte_form);
+  uint8_t EmitVexPrefixByteOne(bool R, bool X, bool B, int SET_VEX_M);
+  uint8_t EmitVexPrefixByteOne(bool R,
+                               X86ManagedRegister operand,
+                               int SET_VEX_L,
+                               int SET_VEX_PP);
+  uint8_t EmitVexPrefixByteTwo(bool W,
+                               X86ManagedRegister operand,
+                               int SET_VEX_L,
+                               int SET_VEX_PP);
+  uint8_t EmitVexPrefixByteTwo(bool W,
+                               int SET_VEX_L,
+                               int SET_VEX_PP);
   ConstantArea constant_area_;
+  bool has_AVX_;     // x86 256bit SIMD AVX.
+  bool has_AVX2_;    // x86 256bit SIMD AVX 2.0.
 
   DISALLOW_COPY_AND_ASSIGN(X86Assembler);
 };
diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
index 1d8bfe7..9253730 100644
--- a/compiler/utils/x86/assembler_x86_test.cc
+++ b/compiler/utils/x86/assembler_x86_test.cc
@@ -148,6 +148,18 @@
   std::vector<x86::XmmRegister*> fp_registers_;
 };
 
+class AssemblerX86AVXTest : public AssemblerX86Test {
+ public:
+  AssemblerX86AVXTest()
+      : instruction_set_features_(X86InstructionSetFeatures::FromVariant("kabylake", nullptr)) {}
+ protected:
+  x86::X86Assembler* CreateAssembler(ArenaAllocator* allocator) override {
+  return new (allocator) x86::X86Assembler(allocator, instruction_set_features_.get());
+  }
+ private:
+  std::unique_ptr<const X86InstructionSetFeatures> instruction_set_features_;
+};
+
 //
 // Test some repeat drivers used in the tests.
 //
@@ -485,134 +497,326 @@
   DriverStr(RepeatFF(&x86::X86Assembler::movaps, "movaps %{reg2}, %{reg1}"), "movaps");
 }
 
+TEST_F(AssemblerX86AVXTest, VMovaps) {
+  DriverStr(RepeatFF(&x86::X86Assembler::vmovaps, "vmovaps %{reg2}, %{reg1}"), "vmovaps");
+}
+
+TEST_F(AssemblerX86AVXTest, Movaps) {
+  DriverStr(RepeatFF(&x86::X86Assembler::movaps, "vmovaps %{reg2}, %{reg1}"), "avx_movaps");
+}
+
 TEST_F(AssemblerX86Test, MovapsLoad) {
   DriverStr(RepeatFA(&x86::X86Assembler::movaps, "movaps {mem}, %{reg}"), "movaps_load");
 }
 
+TEST_F(AssemblerX86AVXTest, VMovapsLoad) {
+  DriverStr(RepeatFA(&x86::X86Assembler::vmovaps, "vmovaps {mem}, %{reg}"), "vmovaps_load");
+}
+
+TEST_F(AssemblerX86AVXTest, MovapsLoad) {
+  DriverStr(RepeatFA(&x86::X86Assembler::movaps, "vmovaps {mem}, %{reg}"), "avx_movaps_load");
+}
+
 TEST_F(AssemblerX86Test, MovapsStore) {
   DriverStr(RepeatAF(&x86::X86Assembler::movaps, "movaps %{reg}, {mem}"), "movaps_store");
 }
 
+TEST_F(AssemblerX86AVXTest, VMovapsStore) {
+  DriverStr(RepeatAF(&x86::X86Assembler::vmovaps, "vmovaps %{reg}, {mem}"), "vmovaps_store");
+}
+
+TEST_F(AssemblerX86AVXTest, MovapsStore) {
+  DriverStr(RepeatAF(&x86::X86Assembler::movaps, "vmovaps %{reg}, {mem}"), "avx_movaps_store");
+}
+
 TEST_F(AssemblerX86Test, MovupsLoad) {
   DriverStr(RepeatFA(&x86::X86Assembler::movups, "movups {mem}, %{reg}"), "movups_load");
 }
 
+TEST_F(AssemblerX86AVXTest, VMovupsLoad) {
+  DriverStr(RepeatFA(&x86::X86Assembler::vmovups, "vmovups {mem}, %{reg}"), "vmovups_load");
+}
+
+TEST_F(AssemblerX86AVXTest, MovupsLoad) {
+  DriverStr(RepeatFA(&x86::X86Assembler::movups, "vmovups {mem}, %{reg}"), "avx_movups_load");
+}
+
 TEST_F(AssemblerX86Test, MovupsStore) {
   DriverStr(RepeatAF(&x86::X86Assembler::movups, "movups %{reg}, {mem}"), "movups_store");
 }
 
+TEST_F(AssemblerX86AVXTest, VMovupsStore) {
+  DriverStr(RepeatAF(&x86::X86Assembler::vmovups, "vmovups %{reg}, {mem}"), "vmovups_store");
+}
+
+TEST_F(AssemblerX86AVXTest, MovupsStore) {
+  DriverStr(RepeatAF(&x86::X86Assembler::movups, "vmovups %{reg}, {mem}"), "avx_movups_store");
+}
+
 TEST_F(AssemblerX86Test, Movapd) {
   DriverStr(RepeatFF(&x86::X86Assembler::movapd, "movapd %{reg2}, %{reg1}"), "movapd");
 }
 
+TEST_F(AssemblerX86AVXTest, VMovapd) {
+  DriverStr(RepeatFF(&x86::X86Assembler::vmovapd, "vmovapd %{reg2}, %{reg1}"), "vmovapd");
+}
+
+TEST_F(AssemblerX86AVXTest, Movapd) {
+  DriverStr(RepeatFF(&x86::X86Assembler::movapd, "vmovapd %{reg2}, %{reg1}"), "avx_movapd");
+}
+
 TEST_F(AssemblerX86Test, MovapdLoad) {
   DriverStr(RepeatFA(&x86::X86Assembler::movapd, "movapd {mem}, %{reg}"), "movapd_load");
 }
 
+TEST_F(AssemblerX86AVXTest, VMovapdLoad) {
+  DriverStr(RepeatFA(&x86::X86Assembler::vmovapd, "vmovapd {mem}, %{reg}"), "vmovapd_load");
+}
+
+TEST_F(AssemblerX86AVXTest, MovapdLoad) {
+  DriverStr(RepeatFA(&x86::X86Assembler::movapd, "vmovapd {mem}, %{reg}"), "avx_movapd_load");
+}
+
 TEST_F(AssemblerX86Test, MovapdStore) {
   DriverStr(RepeatAF(&x86::X86Assembler::movapd, "movapd %{reg}, {mem}"), "movapd_store");
 }
 
+TEST_F(AssemblerX86AVXTest, VMovapdStore) {
+  DriverStr(RepeatAF(&x86::X86Assembler::vmovapd, "vmovapd %{reg}, {mem}"), "vmovapd_store");
+}
+
+TEST_F(AssemblerX86AVXTest, MovapdStore) {
+  DriverStr(RepeatAF(&x86::X86Assembler::movapd, "vmovapd %{reg}, {mem}"), "avx_movapd_store");
+}
+
 TEST_F(AssemblerX86Test, MovupdLoad) {
   DriverStr(RepeatFA(&x86::X86Assembler::movupd, "movupd {mem}, %{reg}"), "movupd_load");
 }
 
+TEST_F(AssemblerX86AVXTest, VMovupdLoad) {
+  DriverStr(RepeatFA(&x86::X86Assembler::vmovupd, "vmovupd {mem}, %{reg}"), "vmovupd_load");
+}
+
+TEST_F(AssemblerX86AVXTest, MovupdLoad) {
+  DriverStr(RepeatFA(&x86::X86Assembler::movupd, "vmovupd {mem}, %{reg}"), "avx_movupd_load");
+}
+
 TEST_F(AssemblerX86Test, MovupdStore) {
   DriverStr(RepeatAF(&x86::X86Assembler::movupd, "movupd %{reg}, {mem}"), "movupd_store");
 }
 
+TEST_F(AssemblerX86AVXTest, VMovupdStore) {
+  DriverStr(RepeatAF(&x86::X86Assembler::vmovupd, "vmovupd %{reg}, {mem}"), "vmovupd_store");
+}
+
+TEST_F(AssemblerX86AVXTest, MovupdStore) {
+  DriverStr(RepeatAF(&x86::X86Assembler::movupd, "vmovupd %{reg}, {mem}"), "avx_movupd_store");
+}
+
 TEST_F(AssemblerX86Test, Movdqa) {
   DriverStr(RepeatFF(&x86::X86Assembler::movdqa, "movdqa %{reg2}, %{reg1}"), "movdqa");
 }
 
+TEST_F(AssemblerX86AVXTest, VMovdqa) {
+  DriverStr(RepeatFF(&x86::X86Assembler::vmovdqa, "vmovdqa %{reg2}, %{reg1}"), "vmovdqa");
+}
+
+TEST_F(AssemblerX86AVXTest, Movdqa) {
+  DriverStr(RepeatFF(&x86::X86Assembler::movdqa, "vmovdqa %{reg2}, %{reg1}"), "avx_movdqa");
+}
+
 TEST_F(AssemblerX86Test, MovdqaLoad) {
   DriverStr(RepeatFA(&x86::X86Assembler::movdqa, "movdqa {mem}, %{reg}"), "movdqa_load");
 }
 
+TEST_F(AssemblerX86AVXTest, VMovdqaLoad) {
+  DriverStr(RepeatFA(&x86::X86Assembler::vmovdqa, "vmovdqa {mem}, %{reg}"), "vmovdqa_load");
+}
+
+TEST_F(AssemblerX86AVXTest, MovdqaLoad) {
+  DriverStr(RepeatFA(&x86::X86Assembler::movdqa, "vmovdqa {mem}, %{reg}"), "avx_movdqa_load");
+}
+
 TEST_F(AssemblerX86Test, MovdqaStore) {
   DriverStr(RepeatAF(&x86::X86Assembler::movdqa, "movdqa %{reg}, {mem}"), "movdqa_store");
 }
 
+TEST_F(AssemblerX86AVXTest, VMovdqaStore) {
+  DriverStr(RepeatAF(&x86::X86Assembler::vmovdqa, "vmovdqa %{reg}, {mem}"), "vmovdqa_store");
+}
+
+TEST_F(AssemblerX86AVXTest, MovdqaStore) {
+  DriverStr(RepeatAF(&x86::X86Assembler::movdqa, "vmovdqa %{reg}, {mem}"), "avx_movdqa_store");
+}
+
 TEST_F(AssemblerX86Test, MovdquLoad) {
   DriverStr(RepeatFA(&x86::X86Assembler::movdqu, "movdqu {mem}, %{reg}"), "movdqu_load");
 }
 
+TEST_F(AssemblerX86AVXTest, VMovdquLoad) {
+  DriverStr(RepeatFA(&x86::X86Assembler::vmovdqu, "vmovdqu {mem}, %{reg}"), "vmovdqu_load");
+}
+
+TEST_F(AssemblerX86AVXTest, MovdquLoad) {
+  DriverStr(RepeatFA(&x86::X86Assembler::movdqu, "vmovdqu {mem}, %{reg}"), "avx_movdqu_load");
+}
+
 TEST_F(AssemblerX86Test, MovdquStore) {
   DriverStr(RepeatAF(&x86::X86Assembler::movdqu, "movdqu %{reg}, {mem}"), "movdqu_store");
 }
 
+TEST_F(AssemblerX86AVXTest, VMovdquStore) {
+  DriverStr(RepeatAF(&x86::X86Assembler::vmovdqu, "vmovdqu %{reg}, {mem}"), "vmovdqu_store");
+}
+
+TEST_F(AssemblerX86AVXTest, MovdquStore) {
+  DriverStr(RepeatAF(&x86::X86Assembler::movdqu, "vmovdqu %{reg}, {mem}"), "avx_movdqu_store");
+}
+
 TEST_F(AssemblerX86Test, AddPS) {
   DriverStr(RepeatFF(&x86::X86Assembler::addps, "addps %{reg2}, %{reg1}"), "addps");
 }
 
+TEST_F(AssemblerX86AVXTest, VAddPS) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vaddps, "vaddps %{reg3}, %{reg2}, %{reg1}"), "vaddps");
+}
+
 TEST_F(AssemblerX86Test, AddPD) {
   DriverStr(RepeatFF(&x86::X86Assembler::addpd, "addpd %{reg2}, %{reg1}"), "addpd");
 }
 
+TEST_F(AssemblerX86AVXTest, VAddpd) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vaddpd, "vaddpd %{reg3}, %{reg2}, %{reg1}"), "vaddpd");
+}
+
 TEST_F(AssemblerX86Test, SubPS) {
   DriverStr(RepeatFF(&x86::X86Assembler::subps, "subps %{reg2}, %{reg1}"), "subps");
 }
 
+TEST_F(AssemblerX86AVXTest, VSubPS) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vsubps, "vsubps %{reg3},%{reg2}, %{reg1}"), "vsubps");
+}
+
 TEST_F(AssemblerX86Test, SubPD) {
   DriverStr(RepeatFF(&x86::X86Assembler::subpd, "subpd %{reg2}, %{reg1}"), "subpd");
 }
 
+TEST_F(AssemblerX86AVXTest, VSubPD) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vsubpd, "vsubpd %{reg3}, %{reg2}, %{reg1}"), "vsubpd");
+}
+
 TEST_F(AssemblerX86Test, MulPS) {
   DriverStr(RepeatFF(&x86::X86Assembler::mulps, "mulps %{reg2}, %{reg1}"), "mulps");
 }
 
+TEST_F(AssemblerX86AVXTest, VMulPS) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vmulps, "vmulps %{reg3}, %{reg2}, %{reg1}"), "vmulps");
+}
+
 TEST_F(AssemblerX86Test, MulPD) {
   DriverStr(RepeatFF(&x86::X86Assembler::mulpd, "mulpd %{reg2}, %{reg1}"), "mulpd");
 }
 
+TEST_F(AssemblerX86AVXTest, VMulPD) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vmulpd, "vmulpd %{reg3}, %{reg2}, %{reg1}"), "vmulpd");
+}
+
 TEST_F(AssemblerX86Test, DivPS) {
   DriverStr(RepeatFF(&x86::X86Assembler::divps, "divps %{reg2}, %{reg1}"), "divps");
 }
 
+TEST_F(AssemblerX86AVXTest, VDivPS) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vdivps, "vdivps %{reg3}, %{reg2}, %{reg1}"), "vdivps");
+}
+
 TEST_F(AssemblerX86Test, DivPD) {
   DriverStr(RepeatFF(&x86::X86Assembler::divpd, "divpd %{reg2}, %{reg1}"), "divpd");
 }
 
+TEST_F(AssemblerX86AVXTest, VDivPD) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vdivpd, "vdivpd %{reg3}, %{reg2}, %{reg1}"), "vdivpd");
+}
+
 TEST_F(AssemblerX86Test, PAddB) {
   DriverStr(RepeatFF(&x86::X86Assembler::paddb, "paddb %{reg2}, %{reg1}"), "paddb");
 }
 
+TEST_F(AssemblerX86AVXTest, VPaddB) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vpaddb, "vpaddb %{reg3}, %{reg2}, %{reg1}"), "vpaddb");
+}
+
 TEST_F(AssemblerX86Test, PSubB) {
   DriverStr(RepeatFF(&x86::X86Assembler::psubb, "psubb %{reg2}, %{reg1}"), "psubb");
 }
 
+TEST_F(AssemblerX86AVXTest, VPsubB) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vpsubb, "vpsubb %{reg3},%{reg2}, %{reg1}"), "vpsubb");
+}
+
 TEST_F(AssemblerX86Test, PAddW) {
   DriverStr(RepeatFF(&x86::X86Assembler::paddw, "paddw %{reg2}, %{reg1}"), "paddw");
 }
 
+TEST_F(AssemblerX86AVXTest, VPaddW) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vpaddw, "vpaddw %{reg3}, %{reg2}, %{reg1}"), "vpaddw");
+}
+
 TEST_F(AssemblerX86Test, PSubW) {
   DriverStr(RepeatFF(&x86::X86Assembler::psubw, "psubw %{reg2}, %{reg1}"), "psubw");
 }
 
+TEST_F(AssemblerX86AVXTest, VPsubW) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vpsubw, "vpsubw %{reg3}, %{reg2}, %{reg1}"), "vpsubw");
+}
+
 TEST_F(AssemblerX86Test, PMullW) {
   DriverStr(RepeatFF(&x86::X86Assembler::pmullw, "pmullw %{reg2}, %{reg1}"), "pmullw");
 }
 
+TEST_F(AssemblerX86AVXTest, VPMullW) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vpmullw, "vpmullw %{reg3}, %{reg2}, %{reg1}"), "vpmullw");
+}
+
 TEST_F(AssemblerX86Test, PAddD) {
   DriverStr(RepeatFF(&x86::X86Assembler::paddd, "paddd %{reg2}, %{reg1}"), "paddd");
 }
 
+TEST_F(AssemblerX86AVXTest, VPaddD) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vpaddd, "vpaddd %{reg3}, %{reg2}, %{reg1}"), "vpaddd");
+}
+
 TEST_F(AssemblerX86Test, PSubD) {
   DriverStr(RepeatFF(&x86::X86Assembler::psubd, "psubd %{reg2}, %{reg1}"), "psubd");
 }
 
+TEST_F(AssemblerX86AVXTest, VPsubD) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vpsubd, "vpsubd %{reg3}, %{reg2}, %{reg1}"), "vpsubd");
+}
+
 TEST_F(AssemblerX86Test, PMullD) {
   DriverStr(RepeatFF(&x86::X86Assembler::pmulld, "pmulld %{reg2}, %{reg1}"), "pmulld");
 }
 
+TEST_F(AssemblerX86AVXTest, VPMullD) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vpmulld, "vpmulld %{reg3}, %{reg2}, %{reg1}"), "vpmulld");
+}
+
 TEST_F(AssemblerX86Test, PAddQ) {
   DriverStr(RepeatFF(&x86::X86Assembler::paddq, "paddq %{reg2}, %{reg1}"), "paddq");
 }
 
+TEST_F(AssemblerX86AVXTest, VPaddQ) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vpaddq, "vpaddq %{reg3}, %{reg2}, %{reg1}"), "vpaddq");
+}
+
 TEST_F(AssemblerX86Test, PSubQ) {
   DriverStr(RepeatFF(&x86::X86Assembler::psubq, "psubq %{reg2}, %{reg1}"), "psubq");
 }
 
+TEST_F(AssemblerX86AVXTest, VPsubQ) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vpsubq, "vpsubq %{reg3}, %{reg2}, %{reg1}"), "vpsubq");
+}
+
 TEST_F(AssemblerX86Test, PAddUSB) {
   DriverStr(RepeatFF(&x86::X86Assembler::paddusb, "paddusb %{reg2}, %{reg1}"), "paddusb");
 }
@@ -657,6 +861,18 @@
   DriverStr(RepeatFF(&x86::X86Assembler::pxor, "pxor %{reg2}, %{reg1}"), "pxor");
 }
 
+TEST_F(AssemblerX86AVXTest, VPXor) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vpxor, "vpxor %{reg3}, %{reg2}, %{reg1}"), "vpxor");
+}
+
+TEST_F(AssemblerX86AVXTest, VXorPS) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vxorps, "vxorps %{reg3}, %{reg2}, %{reg1}"), "vxorps");
+}
+
+TEST_F(AssemblerX86AVXTest, VXorPD) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vxorpd, "vxorpd %{reg3}, %{reg2}, %{reg1}"), "vxorpd");
+}
+
 TEST_F(AssemblerX86Test, AndPD) {
   DriverStr(RepeatFF(&x86::X86Assembler::andpd, "andpd %{reg2}, %{reg1}"), "andpd");
 }
@@ -669,6 +885,18 @@
   DriverStr(RepeatFF(&x86::X86Assembler::pand, "pand %{reg2}, %{reg1}"), "pand");
 }
 
+TEST_F(AssemblerX86AVXTest, VPAnd) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vpand, "vpand %{reg3}, %{reg2}, %{reg1}"), "vpand");
+}
+
+TEST_F(AssemblerX86AVXTest, VAndPS) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vandps, "vandps %{reg3}, %{reg2}, %{reg1}"), "vandps");
+}
+
+TEST_F(AssemblerX86AVXTest, VAndPD) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vandpd, "vandpd %{reg3}, %{reg2}, %{reg1}"), "vandpd");
+}
+
 TEST_F(AssemblerX86Test, Andn) {
   DriverStr(RepeatRRR(&x86::X86Assembler::andn, "andn %{reg3}, %{reg2}, %{reg1}"), "andn");
 }
@@ -685,6 +913,18 @@
   DriverStr(RepeatFF(&x86::X86Assembler::pandn, "pandn %{reg2}, %{reg1}"), "pandn");
 }
 
+TEST_F(AssemblerX86AVXTest, VPAndn) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vpandn, "vpandn %{reg3}, %{reg2}, %{reg1}"), "vpandn");
+}
+
+TEST_F(AssemblerX86AVXTest, VAndnPS) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vandnps, "vandnps %{reg3}, %{reg2}, %{reg1}"), "vandnps");
+}
+
+TEST_F(AssemblerX86AVXTest, VAndnPD) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vandnpd, "vandnpd %{reg3}, %{reg2}, %{reg1}"), "vandnpd");
+}
+
 TEST_F(AssemblerX86Test, OrPD) {
   DriverStr(RepeatFF(&x86::X86Assembler::orpd, "orpd %{reg2}, %{reg1}"), "orpd");
 }
@@ -697,6 +937,18 @@
   DriverStr(RepeatFF(&x86::X86Assembler::por, "por %{reg2}, %{reg1}"), "por");
 }
 
+TEST_F(AssemblerX86AVXTest, VPor) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vpor, "vpor %{reg3}, %{reg2}, %{reg1}"), "vpor");
+}
+
+TEST_F(AssemblerX86AVXTest, VorPS) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vorps, "vorps %{reg3}, %{reg2}, %{reg1}"), "vorps");
+}
+
+TEST_F(AssemblerX86AVXTest, VorPD) {
+  DriverStr(RepeatFFF(&x86::X86Assembler::vorpd, "vorpd %{reg3}, %{reg2}, %{reg1}"), "vorpd");
+}
+
 TEST_F(AssemblerX86Test, PAvgB) {
   DriverStr(RepeatFF(&x86::X86Assembler::pavgb, "pavgb %{reg2}, %{reg1}"), "pavgb");
 }
@@ -713,6 +965,11 @@
   DriverStr(RepeatFF(&x86::X86Assembler::pmaddwd, "pmaddwd %{reg2}, %{reg1}"), "pmaddwd");
 }
 
+TEST_F(AssemblerX86AVXTest, VPMAddWD) {
+  DriverStr(
+      RepeatFFF(&x86::X86Assembler::vpmaddwd, "vpmaddwd %{reg3}, %{reg2}, %{reg1}"), "vpmaddwd");
+}
+
 TEST_F(AssemblerX86Test, PHAddW) {
   DriverStr(RepeatFF(&x86::X86Assembler::phaddw, "phaddw %{reg2}, %{reg1}"), "phaddw");
 }
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.cc b/compiler/utils/x86/jni_macro_assembler_x86.cc
index 540d72b..f4ea004 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.cc
+++ b/compiler/utils/x86/jni_macro_assembler_x86.cc
@@ -39,6 +39,9 @@
 
 constexpr size_t kFramePointerSize = 4;
 
+static constexpr size_t kNativeStackAlignment = 16;
+static_assert(kNativeStackAlignment == kStackAlignment);
+
 #define __ asm_.
 
 void X86JNIMacroAssembler::BuildFrame(size_t frame_size,
@@ -47,7 +50,15 @@
                                       const ManagedRegisterEntrySpills& entry_spills) {
   DCHECK_EQ(CodeSize(), 0U);  // Nothing emitted yet.
   cfi().SetCurrentCFAOffset(4);  // Return address on stack.
-  CHECK_ALIGNED(frame_size, kStackAlignment);
+  if (frame_size == kFramePointerSize) {
+    // For @CriticalNative tail call.
+    CHECK(method_reg.IsNoRegister());
+    CHECK(spill_regs.empty());
+  } else if (method_reg.IsNoRegister()) {
+    CHECK_ALIGNED(frame_size, kNativeStackAlignment);
+  } else {
+    CHECK_ALIGNED(frame_size, kStackAlignment);
+  }
   int gpr_count = 0;
   for (int i = spill_regs.size() - 1; i >= 0; --i) {
     Register spill = spill_regs[i].AsX86().AsCpuRegister();
@@ -59,12 +70,16 @@
 
   // return address then method on stack.
   int32_t adjust = frame_size - gpr_count * kFramePointerSize -
-      kFramePointerSize /*method*/ -
-      kFramePointerSize /*return address*/;
-  __ addl(ESP, Immediate(-adjust));
-  cfi().AdjustCFAOffset(adjust);
-  __ pushl(method_reg.AsX86().AsCpuRegister());
-  cfi().AdjustCFAOffset(kFramePointerSize);
+      kFramePointerSize /*return address*/ -
+      (method_reg.IsRegister() ? kFramePointerSize /*method*/ : 0u);
+  if (adjust != 0) {
+    __ addl(ESP, Immediate(-adjust));
+    cfi().AdjustCFAOffset(adjust);
+  }
+  if (method_reg.IsRegister()) {
+    __ pushl(method_reg.AsX86().AsCpuRegister());
+    cfi().AdjustCFAOffset(kFramePointerSize);
+  }
   DCHECK_EQ(static_cast<size_t>(cfi().GetCurrentCFAOffset()), frame_size);
 
   for (const ManagedRegisterSpill& spill : entry_spills) {
@@ -86,12 +101,14 @@
 void X86JNIMacroAssembler::RemoveFrame(size_t frame_size,
                                        ArrayRef<const ManagedRegister> spill_regs,
                                        bool may_suspend ATTRIBUTE_UNUSED) {
-  CHECK_ALIGNED(frame_size, kStackAlignment);
+  CHECK_ALIGNED(frame_size, kNativeStackAlignment);
   cfi().RememberState();
   // -kFramePointerSize for ArtMethod*.
   int adjust = frame_size - spill_regs.size() * kFramePointerSize - kFramePointerSize;
-  __ addl(ESP, Immediate(adjust));
-  cfi().AdjustCFAOffset(-adjust);
+  if (adjust != 0) {
+    __ addl(ESP, Immediate(adjust));
+    cfi().AdjustCFAOffset(-adjust);
+  }
   for (size_t i = 0; i < spill_regs.size(); ++i) {
     Register spill = spill_regs[i].AsX86().AsCpuRegister();
     __ popl(spill);
@@ -105,15 +122,19 @@
 }
 
 void X86JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
-  CHECK_ALIGNED(adjust, kStackAlignment);
-  __ addl(ESP, Immediate(-adjust));
-  cfi().AdjustCFAOffset(adjust);
+  if (adjust != 0u) {
+    CHECK_ALIGNED(adjust, kNativeStackAlignment);
+    __ addl(ESP, Immediate(-adjust));
+    cfi().AdjustCFAOffset(adjust);
+  }
 }
 
 static void DecreaseFrameSizeImpl(X86Assembler* assembler, size_t adjust) {
-  CHECK_ALIGNED(adjust, kStackAlignment);
-  assembler->addl(ESP, Immediate(adjust));
-  assembler->cfi().AdjustCFAOffset(-adjust);
+  if (adjust != 0u) {
+    CHECK_ALIGNED(adjust, kNativeStackAlignment);
+    assembler->addl(ESP, Immediate(adjust));
+    assembler->cfi().AdjustCFAOffset(-adjust);
+  }
 }
 
 void X86JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
@@ -301,7 +322,7 @@
       __ movl(dest.AsCpuRegister(), src.AsCpuRegister());
     } else if (src.IsX87Register() && dest.IsXmmRegister()) {
       // Pass via stack and pop X87 register
-      __ subl(ESP, Immediate(16));
+      IncreaseFrameSize(16);
       if (size == 4) {
         CHECK_EQ(src.AsX87Register(), ST0);
         __ fstps(Address(ESP, 0));
@@ -311,7 +332,7 @@
         __ fstpl(Address(ESP, 0));
         __ movsd(dest.AsXmmRegister(), Address(ESP, 0));
       }
-      __ addl(ESP, Immediate(16));
+      DecreaseFrameSize(16);
     } else {
       // TODO: x87, SSE
       UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
@@ -487,6 +508,12 @@
   // TODO: not validating references
 }
 
+void X86JNIMacroAssembler::Jump(ManagedRegister mbase, Offset offset, ManagedRegister) {
+  X86ManagedRegister base = mbase.AsX86();
+  CHECK(base.IsCpuRegister());
+  __ jmp(Address(base.AsCpuRegister(), offset.Int32Value()));
+}
+
 void X86JNIMacroAssembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) {
   X86ManagedRegister base = mbase.AsX86();
   CHECK(base.IsCpuRegister());
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.h b/compiler/utils/x86/jni_macro_assembler_x86.h
index a701080..7bf2f98 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.h
+++ b/compiler/utils/x86/jni_macro_assembler_x86.h
@@ -146,6 +146,9 @@
   void VerifyObject(ManagedRegister src, bool could_be_null) override;
   void VerifyObject(FrameOffset src, bool could_be_null) override;
 
+  // Jump to address held at [base+offset] (used for tail calls).
+  void Jump(ManagedRegister base, Offset offset, ManagedRegister scratch) override;
+
   // Call to address held at [base+offset]
   void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) override;
   void Call(FrameOffset base, Offset offset, ManagedRegister scratch) override;
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index c118bc6..2c5dd9e 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -64,98 +64,13 @@
   }
 }
 
-uint8_t X86_64Assembler::EmitVexByteZero(bool is_two_byte) {
-  uint8_t vex_zero = 0xC0;
-  if (!is_two_byte) {
-    vex_zero |= 0xC4;
-  } else {
-    vex_zero |= 0xC5;
+bool X86_64Assembler::CpuHasAVXorAVX2FeatureFlag() {
+  if (has_AVX_ || has_AVX2_) {
+    return true;
   }
-  return vex_zero;
+  return false;
 }
 
-uint8_t X86_64Assembler::EmitVexByte1(bool r, bool x, bool b, int mmmmm) {
-  // VEX Byte 1
-  uint8_t vex_prefix = 0;
-  if (!r) {
-    vex_prefix |= 0x80;  // VEX.R
-  }
-  if (!x) {
-    vex_prefix |= 0x40;  // VEX.X
-  }
-  if (!b) {
-    vex_prefix |= 0x20;  // VEX.B
-  }
-
-  // VEX.mmmmm
-  switch (mmmmm) {
-  case 1:
-    // implied 0F leading opcode byte
-    vex_prefix |= 0x01;
-    break;
-  case 2:
-    // implied leading 0F 38 opcode byte
-    vex_prefix |= 0x02;
-    break;
-  case 3:
-    // implied leading OF 3A opcode byte
-    vex_prefix |= 0x03;
-    break;
-  default:
-    LOG(FATAL) << "unknown opcode bytes";
-  }
-
-  return vex_prefix;
-}
-
-uint8_t X86_64Assembler::EmitVexByte2(bool w, int l, X86_64ManagedRegister operand, int pp) {
-  // VEX Byte 2
-  uint8_t vex_prefix = 0;
-  if (w) {
-    vex_prefix |= 0x80;
-  }
-  // VEX.vvvv
-  if (operand.IsXmmRegister()) {
-    XmmRegister vvvv = operand.AsXmmRegister();
-    int inverted_reg = 15-static_cast<int>(vvvv.AsFloatRegister());
-    uint8_t reg = static_cast<uint8_t>(inverted_reg);
-    vex_prefix |= ((reg & 0x0F) << 3);
-  } else if (operand.IsCpuRegister()) {
-    CpuRegister vvvv = operand.AsCpuRegister();
-    int inverted_reg = 15 - static_cast<int>(vvvv.AsRegister());
-    uint8_t reg = static_cast<uint8_t>(inverted_reg);
-    vex_prefix |= ((reg & 0x0F) << 3);
-  }
-
-  // VEX.L
-  if (l == 256) {
-    vex_prefix |= 0x04;
-  }
-
-  // VEX.pp
-  switch (pp) {
-  case 0:
-    // SIMD Pefix - None
-    vex_prefix |= 0x00;
-    break;
-  case 1:
-    // SIMD Prefix - 66
-    vex_prefix |= 0x01;
-    break;
-  case 2:
-    // SIMD Prefix - F3
-    vex_prefix |= 0x02;
-    break;
-  case 3:
-    // SIMD Prefix - F2
-    vex_prefix |= 0x03;
-    break;
-  default:
-    LOG(FATAL) << "unknown SIMD Prefix";
-  }
-
-  return vex_prefix;
-}
 
 void X86_64Assembler::call(CpuRegister reg) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -499,6 +414,10 @@
 
 
 void X86_64Assembler::movaps(XmmRegister dst, XmmRegister src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovaps(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitOptionalRex32(dst, src);
   EmitUint8(0x0F);
@@ -507,7 +426,60 @@
 }
 
 
+/**VEX.128.0F.WIG 28 /r VMOVAPS xmm1, xmm2 */
+void X86_64Assembler::vmovaps(XmmRegister dst, XmmRegister src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  uint8_t byte_zero, byte_one, byte_two;
+  bool is_twobyte_form = true;
+  bool load = dst.NeedsRex();
+  bool store = !load;
+
+  if (src.NeedsRex()&& dst.NeedsRex()) {
+    is_twobyte_form = false;
+  }
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  // Instruction VEX Prefix
+  byte_zero = EmitVexPrefixByteZero(is_twobyte_form);
+  X86_64ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86_64();
+  if (is_twobyte_form) {
+    bool rex_bit = (load) ? dst.NeedsRex() : src.NeedsRex();
+    byte_one = EmitVexPrefixByteOne(rex_bit,
+                                    vvvv_reg,
+                                    SET_VEX_L_128,
+                                    SET_VEX_PP_NONE);
+  } else {
+    byte_one = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                    /*X=*/ false,
+                                    src.NeedsRex(),
+                                    SET_VEX_M_0F);
+    byte_two = EmitVexPrefixByteTwo(/*W=*/ false,
+                                    SET_VEX_L_128,
+                                    SET_VEX_PP_NONE);
+  }
+  EmitUint8(byte_zero);
+  EmitUint8(byte_one);
+  if (!is_twobyte_form) {
+    EmitUint8(byte_two);
+  }
+  // Instruction Opcode
+  if (is_twobyte_form && store) {
+    EmitUint8(0x29);
+  } else {
+    EmitUint8(0x28);
+  }
+  // Instruction Operands
+  if (is_twobyte_form && store) {
+    EmitXmmRegisterOperand(src.LowBits(), dst);
+  } else {
+    EmitXmmRegisterOperand(dst.LowBits(), src);
+  }
+}
+
 void X86_64Assembler::movaps(XmmRegister dst, const Address& src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovaps(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitOptionalRex32(dst, src);
   EmitUint8(0x0F);
@@ -515,8 +487,51 @@
   EmitOperand(dst.LowBits(), src);
 }
 
+/**VEX.128.0F.WIG 28 /r VMOVAPS xmm1, m128 */
+void X86_64Assembler::vmovaps(XmmRegister dst, const Address& src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero, ByteOne, ByteTwo;
+  bool is_twobyte_form = false;
+  // Instruction VEX Prefix
+  uint8_t rex = src.rex();
+  bool Rex_x = rex & GET_REX_X;
+  bool Rex_b = rex & GET_REX_B;
+  if (!Rex_b && !Rex_x) {
+    is_twobyte_form = true;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  if (is_twobyte_form) {
+    X86_64ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86_64();
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   vvvv_reg,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_NONE);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   Rex_x,
+                                   Rex_b,
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_NONE);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  // Instruction Opcode
+  EmitUint8(0x28);
+  // Instruction Operands
+  EmitOperand(dst.LowBits(), src);
+}
 
 void X86_64Assembler::movups(XmmRegister dst, const Address& src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovups(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitOptionalRex32(dst, src);
   EmitUint8(0x0F);
@@ -524,8 +539,52 @@
   EmitOperand(dst.LowBits(), src);
 }
 
+/** VEX.128.0F.WIG 10 /r VMOVUPS xmm1, m128 */
+void X86_64Assembler::vmovups(XmmRegister dst, const Address& src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero, ByteOne, ByteTwo;
+  bool is_twobyte_form = false;
+  // Instruction VEX Prefix
+  uint8_t rex = src.rex();
+  bool Rex_x = rex & GET_REX_X;
+  bool Rex_b = rex & GET_REX_B;
+  if (!Rex_x && !Rex_b) {
+    is_twobyte_form = true;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  if (is_twobyte_form) {
+    X86_64ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86_64();
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   vvvv_reg,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_NONE);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   Rex_x,
+                                   Rex_b,
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_NONE);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  // Instruction Opcode
+  EmitUint8(0x10);
+  // Instruction Operands
+  EmitOperand(dst.LowBits(), src);
+}
+
 
 void X86_64Assembler::movaps(const Address& dst, XmmRegister src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovaps(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitOptionalRex32(src, dst);
   EmitUint8(0x0F);
@@ -533,8 +592,52 @@
   EmitOperand(src.LowBits(), dst);
 }
 
+/** VEX.128.0F.WIG 29 /r VMOVAPS m128, xmm1 */
+void X86_64Assembler::vmovaps(const Address& dst, XmmRegister src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero, ByteOne, ByteTwo;
+  bool is_twobyte_form = false;
+
+  // Instruction VEX Prefix
+  uint8_t rex = dst.rex();
+  bool Rex_x = rex & GET_REX_X;
+  bool Rex_b = rex & GET_REX_B;
+  if (!Rex_b && !Rex_x) {
+    is_twobyte_form = true;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  if (is_twobyte_form) {
+    X86_64ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86_64();
+    ByteOne = EmitVexPrefixByteOne(src.NeedsRex(),
+                                   vvvv_reg,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_NONE);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(src.NeedsRex(),
+                                   Rex_x,
+                                   Rex_b,
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_NONE);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  // Instruction Opcode
+  EmitUint8(0x29);
+  // Instruction Operands
+  EmitOperand(src.LowBits(), dst);
+}
 
 void X86_64Assembler::movups(const Address& dst, XmmRegister src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovups(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitOptionalRex32(src, dst);
   EmitUint8(0x0F);
@@ -542,6 +645,47 @@
   EmitOperand(src.LowBits(), dst);
 }
 
+/** VEX.128.0F.WIG 11 /r VMOVUPS m128, xmm1 */
+void X86_64Assembler::vmovups(const Address& dst, XmmRegister src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero, ByteOne, ByteTwo;
+  bool is_twobyte_form = false;
+
+  // Instruction VEX Prefix
+  uint8_t rex = dst.rex();
+  bool Rex_x = rex & GET_REX_X;
+  bool Rex_b = rex & GET_REX_B;
+  if (!Rex_b && !Rex_x) {
+    is_twobyte_form = true;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  if (is_twobyte_form) {
+    X86_64ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86_64();
+    ByteOne = EmitVexPrefixByteOne(src.NeedsRex(),
+                                   vvvv_reg,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_NONE);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(src.NeedsRex(),
+                                   Rex_x,
+                                   Rex_b,
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_NONE);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  // Instruction Opcode
+  EmitUint8(0x11);
+  // Instruction Operands
+  EmitOperand(src.LowBits(), dst);
+}
+
 
 void X86_64Assembler::movss(XmmRegister dst, const Address& src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -615,7 +759,6 @@
   EmitOperand(src.LowBits(), Operand(dst));
 }
 
-
 void X86_64Assembler::addss(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0xF3);
@@ -625,7 +768,6 @@
   EmitXmmRegisterOperand(dst.LowBits(), src);
 }
 
-
 void X86_64Assembler::addss(XmmRegister dst, const Address& src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0xF3);
@@ -713,6 +855,60 @@
   EmitXmmRegisterOperand(dst.LowBits(), src);
 }
 
+void X86_64Assembler::vaddps(XmmRegister dst, XmmRegister add_left, XmmRegister add_right) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!add_right.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(add_left.AsFloatRegister());
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_NONE);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   add_right.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_NONE);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0x58);
+  EmitXmmRegisterOperand(dst.LowBits(), add_right);
+}
+
+void X86_64Assembler::vsubps(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t byte_zero = 0x00, byte_one = 0x00, byte_two = 0x00;
+  if (!src2.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  byte_zero = EmitVexPrefixByteZero(is_twobyte_form);
+  X86_64ManagedRegister vvvv_reg = X86_64ManagedRegister::FromXmmRegister(src1.AsFloatRegister());
+  if (is_twobyte_form) {
+    byte_one = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_NONE);
+  } else {
+    byte_one = EmitVexPrefixByteOne(dst.NeedsRex(), /*X=*/ false, src2.NeedsRex(), SET_VEX_M_0F);
+    byte_two = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_NONE);
+  }
+  EmitUint8(byte_zero);
+  EmitUint8(byte_one);
+  if (!is_twobyte_form) {
+    EmitUint8(byte_two);
+  }
+  EmitUint8(0x5C);
+  EmitXmmRegisterOperand(dst.LowBits(), src2);
+}
+
 
 void X86_64Assembler::mulps(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -722,6 +918,34 @@
   EmitXmmRegisterOperand(dst.LowBits(), src);
 }
 
+void X86_64Assembler::vmulps(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!src2.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(src1.AsFloatRegister());
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_NONE);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   src2.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_NONE);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0x59);
+  EmitXmmRegisterOperand(dst.LowBits(), src2);
+}
 
 void X86_64Assembler::divps(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -731,6 +955,34 @@
   EmitXmmRegisterOperand(dst.LowBits(), src);
 }
 
+void X86_64Assembler::vdivps(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!src2.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(src1.AsFloatRegister());
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_NONE);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   src2.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_NONE);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0x5E);
+  EmitXmmRegisterOperand(dst.LowBits(), src2);
+}
 
 void X86_64Assembler::flds(const Address& src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -754,6 +1006,10 @@
 
 
 void X86_64Assembler::movapd(XmmRegister dst, XmmRegister src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovapd(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
   EmitOptionalRex32(dst, src);
@@ -762,8 +1018,59 @@
   EmitXmmRegisterOperand(dst.LowBits(), src);
 }
 
+/** VEX.128.66.0F.WIG 28 /r VMOVAPD xmm1, xmm2 */
+void X86_64Assembler::vmovapd(XmmRegister dst, XmmRegister src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero, ByteOne, ByteTwo;
+  bool is_twobyte_form = true;
+
+  if (src.NeedsRex() && dst.NeedsRex()) {
+    is_twobyte_form = false;
+  }
+  // Instruction VEX Prefix
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  bool load = dst.NeedsRex();
+  if (is_twobyte_form) {
+    X86_64ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86_64();
+    bool rex_bit = load ? dst.NeedsRex() : src.NeedsRex();
+    ByteOne = EmitVexPrefixByteOne(rex_bit,
+                                   vvvv_reg,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   src.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  // Instruction Opcode
+  if (is_twobyte_form && !load) {
+    EmitUint8(0x29);
+  } else {
+    EmitUint8(0x28);
+  }
+  // Instruction Operands
+  if (is_twobyte_form && !load) {
+    EmitXmmRegisterOperand(src.LowBits(), dst);
+  } else {
+    EmitXmmRegisterOperand(dst.LowBits(), src);
+  }
+}
 
 void X86_64Assembler::movapd(XmmRegister dst, const Address& src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovapd(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
   EmitOptionalRex32(dst, src);
@@ -772,8 +1079,52 @@
   EmitOperand(dst.LowBits(), src);
 }
 
+/** VEX.128.66.0F.WIG 28 /r VMOVAPD xmm1, m128 */
+void X86_64Assembler::vmovapd(XmmRegister dst, const Address& src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero, ByteOne, ByteTwo;
+  bool is_twobyte_form = false;
+
+  // Instruction VEX Prefix
+  uint8_t rex = src.rex();
+  bool Rex_x = rex & GET_REX_X;
+  bool Rex_b = rex & GET_REX_B;
+  if (!Rex_b && !Rex_x) {
+    is_twobyte_form = true;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  if (is_twobyte_form) {
+    X86_64ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86_64();
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   vvvv_reg,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   Rex_x,
+                                   Rex_b,
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  // Instruction Opcode
+  EmitUint8(0x28);
+  // Instruction Operands
+  EmitOperand(dst.LowBits(), src);
+}
 
 void X86_64Assembler::movupd(XmmRegister dst, const Address& src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovupd(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
   EmitOptionalRex32(dst, src);
@@ -782,8 +1133,51 @@
   EmitOperand(dst.LowBits(), src);
 }
 
+/** VEX.128.66.0F.WIG 10 /r VMOVUPD xmm1, m128 */
+void X86_64Assembler::vmovupd(XmmRegister dst, const Address& src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero, ByteOne, ByteTwo;
+
+  // Instruction VEX Prefix
+  uint8_t rex = src.rex();
+  bool Rex_x = rex & GET_REX_X;
+  bool Rex_b = rex & GET_REX_B;
+  if (!Rex_b && !Rex_x) {
+    is_twobyte_form = true;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  if (is_twobyte_form) {
+    X86_64ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86_64();
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   vvvv_reg,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   Rex_x,
+                                   Rex_b,
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form)
+  EmitUint8(ByteTwo);
+  // Instruction Opcode
+  EmitUint8(0x10);
+  // Instruction Operands
+  EmitOperand(dst.LowBits(), src);
+}
 
 void X86_64Assembler::movapd(const Address& dst, XmmRegister src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovapd(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
   EmitOptionalRex32(src, dst);
@@ -792,8 +1186,51 @@
   EmitOperand(src.LowBits(), dst);
 }
 
+/** VEX.128.66.0F.WIG 29 /r VMOVAPD m128, xmm1 */
+void X86_64Assembler::vmovapd(const Address& dst, XmmRegister src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero, ByteOne, ByteTwo;
+  // Instruction VEX Prefix
+  uint8_t rex = dst.rex();
+  bool Rex_x = rex & GET_REX_X;
+  bool Rex_b = rex & GET_REX_B;
+  if (!Rex_x && !Rex_b) {
+    is_twobyte_form = true;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  if (is_twobyte_form) {
+    X86_64ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86_64();
+    ByteOne = EmitVexPrefixByteOne(src.NeedsRex(),
+                                   vvvv_reg,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(src.NeedsRex(),
+                                   Rex_x,
+                                   Rex_b,
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  // Instruction Opcode
+  EmitUint8(0x29);
+  // Instruction Operands
+  EmitOperand(src.LowBits(), dst);
+}
 
 void X86_64Assembler::movupd(const Address& dst, XmmRegister src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovupd(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
   EmitOptionalRex32(src, dst);
@@ -802,6 +1239,47 @@
   EmitOperand(src.LowBits(), dst);
 }
 
+/** VEX.128.66.0F.WIG 11 /r VMOVUPD m128, xmm1 */
+void X86_64Assembler::vmovupd(const Address& dst, XmmRegister src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero, ByteOne, ByteTwo;
+
+  // Instruction VEX Prefix
+  uint8_t rex = dst.rex();
+  bool Rex_x = rex & GET_REX_X;
+  bool Rex_b = rex & GET_REX_B;
+  if (!Rex_x && !Rex_b) {
+    is_twobyte_form = true;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  if (is_twobyte_form) {
+    X86_64ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86_64();
+    ByteOne = EmitVexPrefixByteOne(src.NeedsRex(),
+                                   vvvv_reg,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(src.NeedsRex(),
+                                   Rex_x,
+                                   Rex_b,
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  // Instruction Opcode
+  EmitUint8(0x11);
+  // Instruction Operands
+  EmitOperand(src.LowBits(), dst);
+}
+
 
 void X86_64Assembler::movsd(XmmRegister dst, const Address& src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -923,6 +1401,35 @@
 }
 
 
+void X86_64Assembler::vaddpd(XmmRegister dst, XmmRegister add_left, XmmRegister add_right) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!add_right.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(add_left.AsFloatRegister());
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   add_right.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0x58);
+  EmitXmmRegisterOperand(dst.LowBits(), add_right);
+}
+
+
 void X86_64Assembler::subpd(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
@@ -933,6 +1440,35 @@
 }
 
 
+void X86_64Assembler::vsubpd(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!src2.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(src1.AsFloatRegister());
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   src2.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0x5C);
+  EmitXmmRegisterOperand(dst.LowBits(), src2);
+}
+
+
 void X86_64Assembler::mulpd(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
@@ -942,6 +1478,34 @@
   EmitXmmRegisterOperand(dst.LowBits(), src);
 }
 
+void X86_64Assembler::vmulpd(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!src2.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(src1.AsFloatRegister());
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   src2.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0x59);
+  EmitXmmRegisterOperand(dst.LowBits(), src2);
+}
 
 void X86_64Assembler::divpd(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -953,7 +1517,41 @@
 }
 
 
+void X86_64Assembler::vdivpd(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!src2.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(src1.AsFloatRegister());
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   src2.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0x5E);
+  EmitXmmRegisterOperand(dst.LowBits(), src2);
+}
+
+
 void X86_64Assembler::movdqa(XmmRegister dst, XmmRegister src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovdqa(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
   EmitOptionalRex32(dst, src);
@@ -962,8 +1560,59 @@
   EmitXmmRegisterOperand(dst.LowBits(), src);
 }
 
+/** VEX.128.66.0F.WIG 6F /r VMOVDQA xmm1, xmm2 */
+void X86_64Assembler::vmovdqa(XmmRegister dst, XmmRegister src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero, ByteOne, ByteTwo;
+  bool is_twobyte_form = true;
+
+  // Instruction VEX Prefix
+  if (src.NeedsRex() && dst.NeedsRex()) {
+    is_twobyte_form = false;
+  }
+  bool load = dst.NeedsRex();
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  if (is_twobyte_form) {
+    X86_64ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86_64();
+    bool rex_bit = load ? dst.NeedsRex() : src.NeedsRex();
+    ByteOne = EmitVexPrefixByteOne(rex_bit,
+                                   vvvv_reg,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   src.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  // Instruction Opcode
+  if (is_twobyte_form && !load) {
+    EmitUint8(0x7F);
+  } else {
+    EmitUint8(0x6F);
+  }
+  // Instruction Operands
+  if (is_twobyte_form && !load) {
+    EmitXmmRegisterOperand(src.LowBits(), dst);
+  } else {
+    EmitXmmRegisterOperand(dst.LowBits(), src);
+  }
+}
 
 void X86_64Assembler::movdqa(XmmRegister dst, const Address& src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovdqa(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
   EmitOptionalRex32(dst, src);
@@ -972,8 +1621,52 @@
   EmitOperand(dst.LowBits(), src);
 }
 
+/** VEX.128.66.0F.WIG 6F /r VMOVDQA xmm1, m128 */
+void X86_64Assembler::vmovdqa(XmmRegister dst, const Address& src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t  ByteZero, ByteOne, ByteTwo;
+  bool is_twobyte_form = false;
+
+  // Instruction VEX Prefix
+  uint8_t rex = src.rex();
+  bool Rex_x = rex & GET_REX_X;
+  bool Rex_b = rex & GET_REX_B;
+  if (!Rex_x && !Rex_b) {
+    is_twobyte_form = true;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  if (is_twobyte_form) {
+    X86_64ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86_64();
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   vvvv_reg,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   Rex_x,
+                                   Rex_b,
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  // Instruction Opcode
+  EmitUint8(0x6F);
+  // Instruction Operands
+  EmitOperand(dst.LowBits(), src);
+}
 
 void X86_64Assembler::movdqu(XmmRegister dst, const Address& src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovdqu(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0xF3);
   EmitOptionalRex32(dst, src);
@@ -982,8 +1675,53 @@
   EmitOperand(dst.LowBits(), src);
 }
 
+/** VEX.128.F3.0F.WIG 6F /r VMOVDQU xmm1, m128
+Load Unaligned */
+void X86_64Assembler::vmovdqu(XmmRegister dst, const Address& src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero, ByteOne, ByteTwo;
+  bool is_twobyte_form = false;
+
+  // Instruction VEX Prefix
+  uint8_t rex = src.rex();
+  bool Rex_x = rex & GET_REX_X;
+  bool Rex_b = rex & GET_REX_B;
+  if (!Rex_x && !Rex_b) {
+    is_twobyte_form = true;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  if (is_twobyte_form) {
+    X86_64ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86_64();
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   vvvv_reg,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_F3);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   Rex_x,
+                                   Rex_b,
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_F3);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  // Instruction Opcode
+  EmitUint8(0x6F);
+  // Instruction Operands
+  EmitOperand(dst.LowBits(), src);
+}
 
 void X86_64Assembler::movdqa(const Address& dst, XmmRegister src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovdqa(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
   EmitOptionalRex32(src, dst);
@@ -992,8 +1730,51 @@
   EmitOperand(src.LowBits(), dst);
 }
 
+/** VEX.128.66.0F.WIG 7F /r VMOVDQA m128, xmm1 */
+void X86_64Assembler::vmovdqa(const Address& dst, XmmRegister src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero, ByteOne, ByteTwo;
+  // Instruction VEX Prefix
+  uint8_t rex = dst.rex();
+  bool Rex_x = rex & GET_REX_X;
+  bool Rex_b = rex & GET_REX_B;
+  if (!Rex_x && !Rex_b) {
+    is_twobyte_form = true;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  if (is_twobyte_form) {
+    X86_64ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86_64();
+    ByteOne = EmitVexPrefixByteOne(src.NeedsRex(),
+                                   vvvv_reg,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(src.NeedsRex(),
+                                   Rex_x,
+                                   Rex_b,
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  // Instruction Opcode
+  EmitUint8(0x7F);
+  // Instruction Operands
+  EmitOperand(src.LowBits(), dst);
+}
 
 void X86_64Assembler::movdqu(const Address& dst, XmmRegister src) {
+  if (CpuHasAVXorAVX2FeatureFlag()) {
+    vmovdqu(dst, src);
+    return;
+  }
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0xF3);
   EmitOptionalRex32(src, dst);
@@ -1002,6 +1783,46 @@
   EmitOperand(src.LowBits(), dst);
 }
 
+/** VEX.128.F3.0F.WIG 7F /r VMOVDQU m128, xmm1 */
+void X86_64Assembler::vmovdqu(const Address& dst, XmmRegister src) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero, ByteOne, ByteTwo;
+  bool is_twobyte_form = false;
+
+  // Instruction VEX Prefix
+  uint8_t rex = dst.rex();
+  bool Rex_x = rex & GET_REX_X;
+  bool Rex_b = rex & GET_REX_B;
+  if (!Rex_b && !Rex_x) {
+    is_twobyte_form = true;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  if (is_twobyte_form) {
+    X86_64ManagedRegister vvvv_reg = ManagedRegister::NoRegister().AsX86_64();
+    ByteOne = EmitVexPrefixByteOne(src.NeedsRex(),
+                                   vvvv_reg,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_F3);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(src.NeedsRex(),
+                                   Rex_x,
+                                   Rex_b,
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false,
+                                   SET_VEX_L_128,
+                                   SET_VEX_PP_F3);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  // Instruction Opcode
+  EmitUint8(0x7F);
+  // Instruction Operands
+  EmitOperand(src.LowBits(), dst);
+}
 
 void X86_64Assembler::paddb(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -1013,6 +1834,36 @@
 }
 
 
+void X86_64Assembler::vpaddb(XmmRegister dst, XmmRegister add_left, XmmRegister add_right) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteOne = 0x00, ByteZero = 0x00, ByteTwo = 0x00;
+  bool is_twobyte_form = true;
+  if (add_right.NeedsRex()) {
+    is_twobyte_form = false;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(add_left.AsFloatRegister());
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   add_right.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0xFC);
+  EmitXmmRegisterOperand(dst.LowBits(), add_right);
+}
+
+
 void X86_64Assembler::psubb(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
@@ -1023,6 +1874,36 @@
 }
 
 
+void X86_64Assembler::vpsubb(XmmRegister dst, XmmRegister add_left, XmmRegister add_right) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!add_right.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(add_left.AsFloatRegister());
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   add_right.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0xF8);
+  EmitXmmRegisterOperand(dst.LowBits(), add_right);
+}
+
+
 void X86_64Assembler::paddw(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
@@ -1032,6 +1913,35 @@
   EmitXmmRegisterOperand(dst.LowBits(), src);
 }
 
+void X86_64Assembler::vpaddw(XmmRegister dst, XmmRegister add_left, XmmRegister add_right) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!add_right.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(add_left.AsFloatRegister());
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   add_right.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0xFD);
+  EmitXmmRegisterOperand(dst.LowBits(), add_right);
+}
+
 
 void X86_64Assembler::psubw(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -1042,6 +1952,35 @@
   EmitXmmRegisterOperand(dst.LowBits(), src);
 }
 
+void X86_64Assembler::vpsubw(XmmRegister dst, XmmRegister add_left, XmmRegister add_right) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!add_right.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(add_left.AsFloatRegister());
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   add_right.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0xF9);
+  EmitXmmRegisterOperand(dst.LowBits(), add_right);
+}
+
 
 void X86_64Assembler::pmullw(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -1052,6 +1991,34 @@
   EmitXmmRegisterOperand(dst.LowBits(), src);
 }
 
+void X86_64Assembler::vpmullw(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!src2.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(src1.AsFloatRegister());
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   src2.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0xD5);
+  EmitXmmRegisterOperand(dst.LowBits(), src2);
+}
 
 void X86_64Assembler::paddd(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -1062,6 +2029,34 @@
   EmitXmmRegisterOperand(dst.LowBits(), src);
 }
 
+void X86_64Assembler::vpaddd(XmmRegister dst, XmmRegister add_left, XmmRegister add_right) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!add_right.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(add_left.AsFloatRegister());
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   add_right.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0xFE);
+  EmitXmmRegisterOperand(dst.LowBits(), add_right);
+}
 
 void X86_64Assembler::psubd(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -1083,6 +2078,24 @@
   EmitXmmRegisterOperand(dst.LowBits(), src);
 }
 
+void X86_64Assembler::vpmulld(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  ByteZero = EmitVexPrefixByteZero(/*is_twobyte_form*/ false);
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(src1.AsFloatRegister());
+  ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   src2.NeedsRex(),
+                                   SET_VEX_M_0F_38);
+  ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  EmitUint8(ByteTwo);
+  EmitUint8(0x40);
+  EmitXmmRegisterOperand(dst.LowBits(), src2);
+}
 
 void X86_64Assembler::paddq(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -1094,6 +2107,36 @@
 }
 
 
+void X86_64Assembler::vpaddq(XmmRegister dst, XmmRegister add_left, XmmRegister add_right) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!add_right.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(add_left.AsFloatRegister());
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   add_right.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0xD4);
+  EmitXmmRegisterOperand(dst.LowBits(), add_right);
+}
+
+
 void X86_64Assembler::psubq(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
@@ -1103,6 +2146,35 @@
   EmitXmmRegisterOperand(dst.LowBits(), src);
 }
 
+void X86_64Assembler::vpsubq(XmmRegister dst, XmmRegister add_left, XmmRegister add_right) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!add_right.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(add_left.AsFloatRegister());
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   add_right.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0xFB);
+  EmitXmmRegisterOperand(dst.LowBits(), add_right);
+}
+
 
 void X86_64Assembler::paddusb(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -1164,6 +2236,36 @@
 }
 
 
+void X86_64Assembler::vpsubd(XmmRegister dst, XmmRegister add_left, XmmRegister add_right) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!add_right.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(add_left.AsFloatRegister());
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   add_right.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0xFA);
+  EmitXmmRegisterOperand(dst.LowBits(), add_right);
+}
+
+
 void X86_64Assembler::psubusw(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
@@ -1530,7 +2632,6 @@
   EmitXmmRegisterOperand(dst.LowBits(), src);
 }
 
-
 void X86_64Assembler::pxor(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
@@ -1540,6 +2641,95 @@
   EmitXmmRegisterOperand(dst.LowBits(), src);
 }
 
+/* VEX.128.66.0F.WIG EF /r VPXOR xmm1, xmm2, xmm3/m128 */
+void X86_64Assembler::vpxor(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!src2.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(src1.AsFloatRegister());
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   src2.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0xEF);
+  EmitXmmRegisterOperand(dst.LowBits(), src2);
+}
+
+/* VEX.128.0F.WIG 57 /r VXORPS xmm1,xmm2, xmm3/m128 */
+void X86_64Assembler::vxorps(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!src2.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(src1.AsFloatRegister());
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_NONE);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   src2.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_NONE);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0x57);
+  EmitXmmRegisterOperand(dst.LowBits(), src2);
+}
+
+/* VEX.128.66.0F.WIG 57 /r VXORPD xmm1,xmm2, xmm3/m128 */
+void X86_64Assembler::vxorpd(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!src2.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(src1.AsFloatRegister());
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   src2.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0x57);
+  EmitXmmRegisterOperand(dst.LowBits(), src2);
+}
 
 void X86_64Assembler::andpd(XmmRegister dst, const Address& src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
@@ -1576,17 +2766,107 @@
   EmitXmmRegisterOperand(dst.LowBits(), src);
 }
 
+/* VEX.128.66.0F.WIG DB /r VPAND xmm1, xmm2, xmm3/m128 */
+void X86_64Assembler::vpand(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!src2.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(src1.AsFloatRegister());
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   src2.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0xDB);
+  EmitXmmRegisterOperand(dst.LowBits(), src2);
+}
+
+/* VEX.128.0F 54 /r VANDPS xmm1,xmm2, xmm3/m128 */
+void X86_64Assembler::vandps(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!src2.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(src1.AsFloatRegister());
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_NONE);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   src2.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_NONE);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0x54);
+  EmitXmmRegisterOperand(dst.LowBits(), src2);
+}
+
+/* VEX.128.66.0F 54 /r VANDPD xmm1, xmm2, xmm3/m128 */
+void X86_64Assembler::vandpd(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!src2.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(src1.AsFloatRegister());
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   src2.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0x54);
+  EmitXmmRegisterOperand(dst.LowBits(), src2);
+}
+
 void X86_64Assembler::andn(CpuRegister dst, CpuRegister src1, CpuRegister src2) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
-  uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
-  uint8_t byte_one = EmitVexByte1(dst.NeedsRex(),
-                                  /*x=*/ false,
-                                  src2.NeedsRex(),
-                                  /*mmmmm=*/ 2);
-  uint8_t byte_two = EmitVexByte2(/*w=*/ true,
-                                  /*l=*/ 128,
-                                  X86_64ManagedRegister::FromCpuRegister(src1.AsRegister()),
-                                  /*pp=*/ 0);
+  uint8_t byte_zero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ false);
+  uint8_t byte_one = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                          /*X=*/ false,
+                                          src2.NeedsRex(),
+                                          SET_VEX_M_0F_38);
+  uint8_t byte_two = EmitVexPrefixByteTwo(/*W=*/ true,
+                                          X86_64ManagedRegister::FromCpuRegister(src1.AsRegister()),
+                                          SET_VEX_L_128,
+                                          SET_VEX_PP_NONE);
   EmitUint8(byte_zero);
   EmitUint8(byte_one);
   EmitUint8(byte_two);
@@ -1621,6 +2901,96 @@
   EmitXmmRegisterOperand(dst.LowBits(), src);
 }
 
+/* VEX.128.66.0F.WIG DF /r VPANDN xmm1, xmm2, xmm3/m128 */
+void X86_64Assembler::vpandn(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!src2.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(src1.AsFloatRegister());
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   src2.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0xDF);
+  EmitXmmRegisterOperand(dst.LowBits(), src2);
+}
+
+/* VEX.128.0F 55 /r VANDNPS xmm1, xmm2, xmm3/m128 */
+void X86_64Assembler::vandnps(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!src2.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(src1.AsFloatRegister());
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_NONE);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   src2.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_NONE);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0x55);
+  EmitXmmRegisterOperand(dst.LowBits(), src2);
+}
+
+/* VEX.128.66.0F 55 /r VANDNPD xmm1, xmm2, xmm3/m128 */
+void X86_64Assembler::vandnpd(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!src2.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(src1.AsFloatRegister());
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   src2.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0x55);
+  EmitXmmRegisterOperand(dst.LowBits(), src2);
+}
+
 void X86_64Assembler::orpd(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
@@ -1647,6 +3017,96 @@
   EmitXmmRegisterOperand(dst.LowBits(), src);
 }
 
+/* VEX.128.66.0F.WIG EB /r VPOR xmm1, xmm2, xmm3/m128 */
+void X86_64Assembler::vpor(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!src2.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(src1.AsFloatRegister());
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   src2.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0xEB);
+  EmitXmmRegisterOperand(dst.LowBits(), src2);
+}
+
+/* VEX.128.0F 56 /r VORPS xmm1,xmm2, xmm3/m128 */
+void X86_64Assembler::vorps(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!src2.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(src1.AsFloatRegister());
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_NONE);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   src2.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_NONE);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0x56);
+  EmitXmmRegisterOperand(dst.LowBits(), src2);
+}
+
+/* VEX.128.66.0F 56 /r VORPD xmm1,xmm2, xmm3/m128 */
+void X86_64Assembler::vorpd(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!src2.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(src1.AsFloatRegister());
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   src2.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0x56);
+  EmitXmmRegisterOperand(dst.LowBits(), src2);
+}
+
 void X86_64Assembler::pavgb(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
@@ -1683,6 +3143,35 @@
   EmitXmmRegisterOperand(dst.LowBits(), src);
 }
 
+void X86_64Assembler::vpmaddwd(XmmRegister dst, XmmRegister src1, XmmRegister src2) {
+  DCHECK(CpuHasAVXorAVX2FeatureFlag());
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  bool is_twobyte_form = false;
+  uint8_t ByteZero = 0x00, ByteOne = 0x00, ByteTwo = 0x00;
+  if (!src2.NeedsRex()) {
+    is_twobyte_form = true;
+  }
+  ByteZero = EmitVexPrefixByteZero(is_twobyte_form);
+  X86_64ManagedRegister vvvv_reg =
+      X86_64ManagedRegister::FromXmmRegister(src1.AsFloatRegister());
+  if (is_twobyte_form) {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(), vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  } else {
+    ByteOne = EmitVexPrefixByteOne(dst.NeedsRex(),
+                                   /*X=*/ false,
+                                   src2.NeedsRex(),
+                                   SET_VEX_M_0F);
+    ByteTwo = EmitVexPrefixByteTwo(/*W=*/ false, vvvv_reg, SET_VEX_L_128, SET_VEX_PP_66);
+  }
+  EmitUint8(ByteZero);
+  EmitUint8(ByteOne);
+  if (!is_twobyte_form) {
+    EmitUint8(ByteTwo);
+  }
+  EmitUint8(0xF5);
+  EmitXmmRegisterOperand(dst.LowBits(), src2);
+}
+
 void X86_64Assembler::phaddw(XmmRegister dst, XmmRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x66);
@@ -3374,15 +4863,15 @@
 
 void X86_64Assembler::blsi(CpuRegister dst, CpuRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
-  uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
-  uint8_t byte_one = EmitVexByte1(/*r=*/ false,
-                                  /*x=*/ false,
-                                  src.NeedsRex(),
-                                  /*mmmmm=*/ 2);
-  uint8_t byte_two = EmitVexByte2(/*w=*/ true,
-                                  /*l=*/ 128,
-                                  X86_64ManagedRegister::FromCpuRegister(dst.AsRegister()),
-                                  /*pp=*/ 0);
+  uint8_t byte_zero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ false);
+  uint8_t byte_one = EmitVexPrefixByteOne(/*R=*/ false,
+                                          /*X=*/ false,
+                                          src.NeedsRex(),
+                                          SET_VEX_M_0F_38);
+  uint8_t byte_two = EmitVexPrefixByteTwo(/*W=*/true,
+                                          X86_64ManagedRegister::FromCpuRegister(dst.AsRegister()),
+                                          SET_VEX_L_128,
+                                          SET_VEX_PP_NONE);
   EmitUint8(byte_zero);
   EmitUint8(byte_one);
   EmitUint8(byte_two);
@@ -3392,15 +4881,15 @@
 
 void X86_64Assembler::blsmsk(CpuRegister dst, CpuRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
-  uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
-  uint8_t byte_one = EmitVexByte1(/*r=*/ false,
-                                  /*x=*/ false,
-                                  src.NeedsRex(),
-                                  /*mmmmm=*/ 2);
-  uint8_t byte_two = EmitVexByte2(/*w=*/ true,
-                                  /*l=*/ 128,
-                                  X86_64ManagedRegister::FromCpuRegister(dst.AsRegister()),
-                                  /*pp=*/ 0);
+  uint8_t byte_zero = EmitVexPrefixByteZero(/*is_twobyte_form=*/ false);
+  uint8_t byte_one = EmitVexPrefixByteOne(/*R=*/ false,
+                                          /*X=*/ false,
+                                          src.NeedsRex(),
+                                          SET_VEX_M_0F_38);
+  uint8_t byte_two = EmitVexPrefixByteTwo(/*W=*/ true,
+                                          X86_64ManagedRegister::FromCpuRegister(dst.AsRegister()),
+                                          SET_VEX_L_128,
+                                          SET_VEX_PP_NONE);
   EmitUint8(byte_zero);
   EmitUint8(byte_one);
   EmitUint8(byte_two);
@@ -3410,15 +4899,15 @@
 
 void X86_64Assembler::blsr(CpuRegister dst, CpuRegister src) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
-  uint8_t byte_zero = EmitVexByteZero(/*is_two_byte=*/ false);
-  uint8_t byte_one = EmitVexByte1(/*r=*/ false,
-                                  /*x=*/ false,
-                                  src.NeedsRex(),
-                                  /*mmmmm=*/ 2);
-  uint8_t byte_two = EmitVexByte2(/*w=*/ true,
-                                  /*l=*/ 128,
-                                  X86_64ManagedRegister::FromCpuRegister(dst.AsRegister()),
-                                  /*pp=*/ 0);
+  uint8_t byte_zero = EmitVexPrefixByteZero(/*is_twobyte_form=*/false);
+  uint8_t byte_one = EmitVexPrefixByteOne(/*R=*/ false,
+                                          /*X=*/ false,
+                                          src.NeedsRex(),
+                                          SET_VEX_M_0F_38);
+  uint8_t byte_two = EmitVexPrefixByteTwo(/*W=*/ true,
+                                          X86_64ManagedRegister::FromCpuRegister(dst.AsRegister()),
+                                          SET_VEX_L_128,
+                                          SET_VEX_PP_NONE);
   EmitUint8(byte_zero);
   EmitUint8(byte_one);
   EmitUint8(byte_two);
@@ -3937,5 +5426,133 @@
   return AddInt32(bit_cast<int32_t, float>(v));
 }
 
+uint8_t X86_64Assembler::EmitVexPrefixByteZero(bool is_twobyte_form) {
+  // Vex Byte 0,
+  // Bits [7:0] must contain the value 11000101b (0xC5) for 2-byte Vex
+  // Bits [7:0] must contain the value 11000100b (0xC4) for 3-byte Vex
+  uint8_t vex_prefix = 0xC0;
+  if (is_twobyte_form) {
+    vex_prefix |= TWO_BYTE_VEX;  // 2-Byte Vex
+  } else {
+    vex_prefix |= THREE_BYTE_VEX;  // 3-Byte Vex
+  }
+  return vex_prefix;
+}
+
+uint8_t X86_64Assembler::EmitVexPrefixByteOne(bool R, bool X, bool B, int SET_VEX_M) {
+  // Vex Byte 1,
+  uint8_t vex_prefix = VEX_INIT;
+  /** Bit[7] This bit needs to be set to '1'
+  otherwise the instruction is LES or LDS */
+  if (!R) {
+    // R .
+    vex_prefix |= SET_VEX_R;
+  }
+  /** Bit[6] This bit needs to be set to '1'
+  otherwise the instruction is LES or LDS */
+  if (!X) {
+    // X .
+    vex_prefix |= SET_VEX_X;
+  }
+  /** Bit[5] This bit needs to be set to '1' */
+  if (!B) {
+    // B .
+    vex_prefix |= SET_VEX_B;
+  }
+  /** Bits[4:0], Based on the instruction documentaion */
+  vex_prefix |= SET_VEX_M;
+  return vex_prefix;
+}
+
+uint8_t X86_64Assembler::EmitVexPrefixByteOne(bool R,
+                                              X86_64ManagedRegister operand,
+                                              int SET_VEX_L,
+                                              int SET_VEX_PP) {
+  // Vex Byte 1,
+  uint8_t vex_prefix = VEX_INIT;
+  /** Bit[7] This bit needs to be set to '1'
+  otherwise the instruction is LES or LDS */
+  if (!R) {
+    // R .
+    vex_prefix |= SET_VEX_R;
+  }
+  /**Bits[6:3] - 'vvvv' the source or dest register specifier */
+  if (operand.IsNoRegister()) {
+    vex_prefix |= 0x78;
+  } else if (operand.IsXmmRegister()) {
+    XmmRegister vvvv = operand.AsXmmRegister();
+    int inverted_reg = 15 - static_cast<int>(vvvv.AsFloatRegister());
+    uint8_t reg = static_cast<uint8_t>(inverted_reg);
+    vex_prefix |= ((reg & 0x0F) << 3);
+  } else if (operand.IsCpuRegister()) {
+    CpuRegister vvvv = operand.AsCpuRegister();
+    int inverted_reg = 15 - static_cast<int>(vvvv.AsRegister());
+    uint8_t reg = static_cast<uint8_t>(inverted_reg);
+    vex_prefix |= ((reg & 0x0F) << 3);
+  }
+  /** Bit[2] - "L" If VEX.L = 1 indicates 256-bit vector operation,
+  VEX.L = 0 indicates 128 bit vector operation */
+  vex_prefix |= SET_VEX_L;
+  // Bits[1:0] -  "pp"
+  vex_prefix |= SET_VEX_PP;
+  return vex_prefix;
+}
+
+uint8_t X86_64Assembler::EmitVexPrefixByteTwo(bool W,
+                                              X86_64ManagedRegister operand,
+                                              int SET_VEX_L,
+                                              int SET_VEX_PP) {
+  // Vex Byte 2,
+  uint8_t vex_prefix = VEX_INIT;
+
+  /** Bit[7] This bits needs to be set to '1' with default value.
+  When using C4H form of VEX prefix, REX.W value is ignored */
+  if (W) {
+    vex_prefix |= SET_VEX_W;
+  }
+  // Bits[6:3] - 'vvvv' the source or dest register specifier
+  if (operand.IsXmmRegister()) {
+    XmmRegister vvvv = operand.AsXmmRegister();
+    int inverted_reg = 15 - static_cast<int>(vvvv.AsFloatRegister());
+    uint8_t reg = static_cast<uint8_t>(inverted_reg);
+    vex_prefix |= ((reg & 0x0F) << 3);
+  } else if (operand.IsCpuRegister()) {
+    CpuRegister vvvv = operand.AsCpuRegister();
+    int inverted_reg = 15 - static_cast<int>(vvvv.AsRegister());
+    uint8_t reg = static_cast<uint8_t>(inverted_reg);
+    vex_prefix |= ((reg & 0x0F) << 3);
+  }
+  /** Bit[2] - "L" If VEX.L = 1 indicates 256-bit vector operation,
+  VEX.L = 0 indicates 128 bit vector operation */
+  vex_prefix |= SET_VEX_L;
+  // Bits[1:0] -  "pp"
+  vex_prefix |= SET_VEX_PP;
+  return vex_prefix;
+}
+
+uint8_t X86_64Assembler::EmitVexPrefixByteTwo(bool W,
+                                              int SET_VEX_L,
+                                              int SET_VEX_PP) {
+  // Vex Byte 2,
+  uint8_t vex_prefix = VEX_INIT;
+
+  /** Bit[7] This bits needs to be set to '1' with default value.
+  When using C4H form of VEX prefix, REX.W value is ignored */
+  if (W) {
+    vex_prefix |= SET_VEX_W;
+  }
+  /** Bits[6:3] - 'vvvv' the source or dest register specifier */
+  vex_prefix |= (0x0F << 3);
+  /** Bit[2] - "L" If VEX.L = 1 indicates 256-bit vector operation,
+  VEX.L = 0 indicates 128 bit vector operation */
+  vex_prefix |= SET_VEX_L;
+
+  // Bits[1:0] -  "pp"
+  if (SET_VEX_PP != SET_VEX_PP_NONE) {
+    vex_prefix |= SET_VEX_PP;
+  }
+  return vex_prefix;
+}
+
 }  // namespace x86_64
 }  // namespace art
diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h
index ff13ea3..70072d9 100644
--- a/compiler/utils/x86_64/assembler_x86_64.h
+++ b/compiler/utils/x86_64/assembler_x86_64.h
@@ -19,6 +19,7 @@
 
 #include <vector>
 
+#include "arch/x86_64/instruction_set_features_x86_64.h"
 #include "base/arena_containers.h"
 #include "base/array_ref.h"
 #include "base/bit_utils.h"
@@ -353,8 +354,12 @@
 
 class X86_64Assembler final : public Assembler {
  public:
-  explicit X86_64Assembler(ArenaAllocator* allocator)
-      : Assembler(allocator), constant_area_(allocator) {}
+  explicit X86_64Assembler(ArenaAllocator* allocator,
+                           const X86_64InstructionSetFeatures* instruction_set_features = nullptr)
+      : Assembler(allocator),
+        constant_area_(allocator),
+        has_AVX_(instruction_set_features != nullptr ? instruction_set_features->HasAVX(): false),
+        has_AVX2_(instruction_set_features != nullptr ? instruction_set_features->HasAVX2() : false) {}
   virtual ~X86_64Assembler() {}
 
   /*
@@ -415,6 +420,12 @@
   void movaps(const Address& dst, XmmRegister src);  // store aligned
   void movups(const Address& dst, XmmRegister src);  // store unaligned
 
+  void vmovaps(XmmRegister dst, XmmRegister src);     // move
+  void vmovaps(XmmRegister dst, const Address& src);  // load aligned
+  void vmovaps(const Address& dst, XmmRegister src);  // store aligned
+  void vmovups(XmmRegister dst, const Address& src);  // load unaligned
+  void vmovups(const Address& dst, XmmRegister src);  // store unaligned
+
   void movss(XmmRegister dst, const Address& src);
   void movss(const Address& dst, XmmRegister src);
   void movss(XmmRegister dst, XmmRegister src);
@@ -441,12 +452,28 @@
   void mulps(XmmRegister dst, XmmRegister src);
   void divps(XmmRegister dst, XmmRegister src);
 
+  void vmulps(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+  void vmulpd(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+  void vdivps(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+  void vdivpd(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+
+  void vaddps(XmmRegister dst, XmmRegister add_left, XmmRegister add_right);
+  void vsubps(XmmRegister dst, XmmRegister add_left, XmmRegister add_right);
+  void vsubpd(XmmRegister dst, XmmRegister add_left, XmmRegister add_right);
+  void vaddpd(XmmRegister dst, XmmRegister add_left, XmmRegister add_right);
+
   void movapd(XmmRegister dst, XmmRegister src);     // move
   void movapd(XmmRegister dst, const Address& src);  // load aligned
   void movupd(XmmRegister dst, const Address& src);  // load unaligned
   void movapd(const Address& dst, XmmRegister src);  // store aligned
   void movupd(const Address& dst, XmmRegister src);  // store unaligned
 
+  void vmovapd(XmmRegister dst, XmmRegister src);     // move
+  void vmovapd(XmmRegister dst, const Address& src);  // load aligned
+  void vmovapd(const Address& dst, XmmRegister src);  // store aligned
+  void vmovupd(XmmRegister dst, const Address& src);  // load unaligned
+  void vmovupd(const Address& dst, XmmRegister src);  // store unaligned
+
   void movsd(XmmRegister dst, const Address& src);
   void movsd(const Address& dst, XmmRegister src);
   void movsd(XmmRegister dst, XmmRegister src);
@@ -471,20 +498,40 @@
   void movdqa(const Address& dst, XmmRegister src);  // store aligned
   void movdqu(const Address& dst, XmmRegister src);  // store unaligned
 
+  void vmovdqa(XmmRegister dst, XmmRegister src);     // move
+  void vmovdqa(XmmRegister dst, const Address& src);  // load aligned
+  void vmovdqa(const Address& dst, XmmRegister src);  // store aligned
+  void vmovdqu(XmmRegister dst, const Address& src);  // load unaligned
+  void vmovdqu(const Address& dst, XmmRegister src);  // store unaligned
+
   void paddb(XmmRegister dst, XmmRegister src);  // no addr variant (for now)
   void psubb(XmmRegister dst, XmmRegister src);
 
+  void vpaddb(XmmRegister dst, XmmRegister add_left, XmmRegister add_right);
+  void vpaddw(XmmRegister dst, XmmRegister add_left, XmmRegister add_right);
+
   void paddw(XmmRegister dst, XmmRegister src);
   void psubw(XmmRegister dst, XmmRegister src);
   void pmullw(XmmRegister dst, XmmRegister src);
+  void vpmullw(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+
+  void vpsubb(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+  void vpsubw(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+  void vpsubd(XmmRegister dst, XmmRegister src1, XmmRegister src2);
 
   void paddd(XmmRegister dst, XmmRegister src);
   void psubd(XmmRegister dst, XmmRegister src);
   void pmulld(XmmRegister dst, XmmRegister src);
+  void vpmulld(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+
+  void vpaddd(XmmRegister dst, XmmRegister src1, XmmRegister src2);
 
   void paddq(XmmRegister dst, XmmRegister src);
   void psubq(XmmRegister dst, XmmRegister src);
 
+  void vpaddq(XmmRegister dst, XmmRegister add_left, XmmRegister add_right);
+  void vpsubq(XmmRegister dst, XmmRegister add_left, XmmRegister add_right);
+
   void paddusb(XmmRegister dst, XmmRegister src);
   void paddsb(XmmRegister dst, XmmRegister src);
   void paddusw(XmmRegister dst, XmmRegister src);
@@ -537,25 +584,38 @@
   void xorps(XmmRegister dst, const Address& src);
   void xorps(XmmRegister dst, XmmRegister src);
   void pxor(XmmRegister dst, XmmRegister src);  // no addr variant (for now)
+  void vpxor(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+  void vxorps(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+  void vxorpd(XmmRegister dst, XmmRegister src1, XmmRegister src2);
 
   void andpd(XmmRegister dst, const Address& src);
   void andpd(XmmRegister dst, XmmRegister src);
   void andps(XmmRegister dst, XmmRegister src);  // no addr variant (for now)
   void pand(XmmRegister dst, XmmRegister src);
+  void vpand(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+  void vandps(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+  void vandpd(XmmRegister dst, XmmRegister src1, XmmRegister src2);
 
   void andn(CpuRegister dst, CpuRegister src1, CpuRegister src2);
   void andnpd(XmmRegister dst, XmmRegister src);  // no addr variant (for now)
   void andnps(XmmRegister dst, XmmRegister src);
   void pandn(XmmRegister dst, XmmRegister src);
+  void vpandn(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+  void vandnps(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+  void vandnpd(XmmRegister dst, XmmRegister src1, XmmRegister src2);
 
   void orpd(XmmRegister dst, XmmRegister src);  // no addr variant (for now)
   void orps(XmmRegister dst, XmmRegister src);
   void por(XmmRegister dst, XmmRegister src);
+  void vpor(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+  void vorps(XmmRegister dst, XmmRegister src1, XmmRegister src2);
+  void vorpd(XmmRegister dst, XmmRegister src1, XmmRegister src2);
 
   void pavgb(XmmRegister dst, XmmRegister src);  // no addr variant (for now)
   void pavgw(XmmRegister dst, XmmRegister src);
   void psadbw(XmmRegister dst, XmmRegister src);
   void pmaddwd(XmmRegister dst, XmmRegister src);
+  void vpmaddwd(XmmRegister dst, XmmRegister src1, XmmRegister src2);
   void phaddw(XmmRegister dst, XmmRegister src);
   void phaddd(XmmRegister dst, XmmRegister src);
   void haddps(XmmRegister dst, XmmRegister src);
@@ -909,6 +969,8 @@
     }
   }
 
+  bool CpuHasAVXorAVX2FeatureFlag();
+
  private:
   void EmitUint8(uint8_t value);
   void EmitInt32(int32_t value);
@@ -956,12 +1018,22 @@
   void EmitOptionalByteRegNormalizingRex32(CpuRegister dst, CpuRegister src);
   void EmitOptionalByteRegNormalizingRex32(CpuRegister dst, const Operand& operand);
 
-  // Emit a 3 byte VEX Prefix
-  uint8_t EmitVexByteZero(bool is_two_byte);
-  uint8_t EmitVexByte1(bool r, bool x, bool b, int mmmmm);
-  uint8_t EmitVexByte2(bool w , int l , X86_64ManagedRegister operand, int pp);
-
+  uint8_t EmitVexPrefixByteZero(bool is_twobyte_form);
+  uint8_t EmitVexPrefixByteOne(bool R, bool X, bool B, int SET_VEX_M);
+  uint8_t EmitVexPrefixByteOne(bool R,
+                               X86_64ManagedRegister operand,
+                               int SET_VEX_L,
+                               int SET_VEX_PP);
+  uint8_t EmitVexPrefixByteTwo(bool W,
+                               X86_64ManagedRegister operand,
+                               int SET_VEX_L,
+                               int SET_VEX_PP);
+  uint8_t EmitVexPrefixByteTwo(bool W,
+                               int SET_VEX_L,
+                               int SET_VEX_PP);
   ConstantArea constant_area_;
+  bool has_AVX_;     // x86 256bit SIMD AVX.
+  bool has_AVX2_;    // x86 256bit SIMD AVX 2.0.
 
   DISALLOW_COPY_AND_ASSIGN(X86_64Assembler);
 };
diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc
index 461f028..993cf95 100644
--- a/compiler/utils/x86_64/assembler_x86_64_test.cc
+++ b/compiler/utils/x86_64/assembler_x86_64_test.cc
@@ -339,6 +339,18 @@
   std::vector<x86_64::XmmRegister*> fp_registers_;
 };
 
+class AssemblerX86_64AVXTest : public AssemblerX86_64Test {
+ public:
+  AssemblerX86_64AVXTest()
+      : instruction_set_features_(X86_64InstructionSetFeatures::FromVariant("kabylake", nullptr)) {}
+ protected:
+  x86_64::X86_64Assembler* CreateAssembler(ArenaAllocator* allocator) override {
+    return new (allocator) x86_64::X86_64Assembler(allocator, instruction_set_features_.get());
+  }
+ private:
+  std::unique_ptr<const X86_64InstructionSetFeatures> instruction_set_features_;
+};
+
 //
 // Test some repeat drivers used in the tests.
 //
@@ -1107,22 +1119,62 @@
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::movaps, "movaps %{reg2}, %{reg1}"), "movaps");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VMovaps) {
+  DriverStr(RepeatFF(&x86_64::X86_64Assembler::vmovaps, "vmovaps %{reg2}, %{reg1}"), "vmovaps");
+}
+
+TEST_F(AssemblerX86_64AVXTest, Movaps) {
+  DriverStr(RepeatFF(&x86_64::X86_64Assembler::movaps, "vmovaps %{reg2}, %{reg1}"), "avx_movaps");
+}
+
 TEST_F(AssemblerX86_64Test, MovapsStore) {
   DriverStr(RepeatAF(&x86_64::X86_64Assembler::movaps, "movaps %{reg}, {mem}"), "movaps_s");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VMovapsStore) {
+  DriverStr(RepeatAF(&x86_64::X86_64Assembler::vmovaps, "vmovaps %{reg}, {mem}"), "vmovaps_s");
+}
+
+TEST_F(AssemblerX86_64AVXTest, MovapsStore) {
+  DriverStr(RepeatAF(&x86_64::X86_64Assembler::movaps, "vmovaps %{reg}, {mem}"), "avx_movaps_s");
+}
+
 TEST_F(AssemblerX86_64Test, MovapsLoad) {
   DriverStr(RepeatFA(&x86_64::X86_64Assembler::movaps, "movaps {mem}, %{reg}"), "movaps_l");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VMovapsLoad) {
+  DriverStr(RepeatFA(&x86_64::X86_64Assembler::vmovaps, "vmovaps {mem}, %{reg}"), "vmovaps_l");
+}
+
+TEST_F(AssemblerX86_64AVXTest, MovapsLoad) {
+  DriverStr(RepeatFA(&x86_64::X86_64Assembler::movaps, "vmovaps {mem}, %{reg}"), "avx_movaps_l");
+}
+
 TEST_F(AssemblerX86_64Test, MovupsStore) {
   DriverStr(RepeatAF(&x86_64::X86_64Assembler::movups, "movups %{reg}, {mem}"), "movups_s");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VMovupsStore) {
+  DriverStr(RepeatAF(&x86_64::X86_64Assembler::vmovups, "vmovups %{reg}, {mem}"), "vmovups_s");
+}
+
+TEST_F(AssemblerX86_64AVXTest, MovupsStore) {
+  DriverStr(RepeatAF(&x86_64::X86_64Assembler::movups, "vmovups %{reg}, {mem}"), "avx_movups_s");
+}
+
 TEST_F(AssemblerX86_64Test, MovupsLoad) {
   DriverStr(RepeatFA(&x86_64::X86_64Assembler::movups, "movups {mem}, %{reg}"), "movups_l");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VMovupsLoad) {
+  DriverStr(RepeatFA(&x86_64::X86_64Assembler::vmovups, "vmovups {mem}, %{reg}"), "vmovups_l");
+}
+
+TEST_F(AssemblerX86_64AVXTest, MovupsLoad) {
+  DriverStr(RepeatFA(&x86_64::X86_64Assembler::movups, "vmovups {mem}, %{reg}"), "avx_movups_l");
+}
+
 TEST_F(AssemblerX86_64Test, Movss) {
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::movss, "movss %{reg2}, %{reg1}"), "movss");
 }
@@ -1131,22 +1183,62 @@
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::movapd, "movapd %{reg2}, %{reg1}"), "movapd");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VMovapd) {
+  DriverStr(RepeatFF(&x86_64::X86_64Assembler::vmovapd, "vmovapd %{reg2}, %{reg1}"), "vmovapd");
+}
+
+TEST_F(AssemblerX86_64AVXTest, Movapd) {
+  DriverStr(RepeatFF(&x86_64::X86_64Assembler::movapd, "vmovapd %{reg2}, %{reg1}"), "avx_movapd");
+}
+
 TEST_F(AssemblerX86_64Test, MovapdStore) {
   DriverStr(RepeatAF(&x86_64::X86_64Assembler::movapd, "movapd %{reg}, {mem}"), "movapd_s");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VMovapdStore) {
+  DriverStr(RepeatAF(&x86_64::X86_64Assembler::vmovapd, "vmovapd %{reg}, {mem}"), "vmovapd_s");
+}
+
+TEST_F(AssemblerX86_64AVXTest, MovapdStore) {
+  DriverStr(RepeatAF(&x86_64::X86_64Assembler::movapd, "vmovapd %{reg}, {mem}"), "avx_movapd_s");
+}
+
 TEST_F(AssemblerX86_64Test, MovapdLoad) {
   DriverStr(RepeatFA(&x86_64::X86_64Assembler::movapd, "movapd {mem}, %{reg}"), "movapd_l");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VMovapdLoad) {
+  DriverStr(RepeatFA(&x86_64::X86_64Assembler::vmovapd, "vmovapd {mem}, %{reg}"), "vmovapd_l");
+}
+
+TEST_F(AssemblerX86_64AVXTest, MovapdLoad) {
+  DriverStr(RepeatFA(&x86_64::X86_64Assembler::movapd, "vmovapd {mem}, %{reg}"), "avx_movapd_l");
+}
+
 TEST_F(AssemblerX86_64Test, MovupdStore) {
   DriverStr(RepeatAF(&x86_64::X86_64Assembler::movupd, "movupd %{reg}, {mem}"), "movupd_s");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VMovupdStore) {
+  DriverStr(RepeatAF(&x86_64::X86_64Assembler::vmovupd, "vmovupd %{reg}, {mem}"), "vmovupd_s");
+}
+
+TEST_F(AssemblerX86_64AVXTest, MovupdStore) {
+  DriverStr(RepeatAF(&x86_64::X86_64Assembler::movupd, "vmovupd %{reg}, {mem}"), "avx_movupd_s");
+}
+
 TEST_F(AssemblerX86_64Test, MovupdLoad) {
   DriverStr(RepeatFA(&x86_64::X86_64Assembler::movupd, "movupd {mem}, %{reg}"), "movupd_l");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VMovupdLoad) {
+  DriverStr(RepeatFA(&x86_64::X86_64Assembler::vmovupd, "vmovupd {mem}, %{reg}"), "vmovupd_l");
+}
+
+TEST_F(AssemblerX86_64AVXTest, MovupdLoad) {
+  DriverStr(RepeatFA(&x86_64::X86_64Assembler::movupd, "vmovupd {mem}, %{reg}"), "avx_movupd_l");
+}
+
 TEST_F(AssemblerX86_64Test, Movsd) {
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::movsd, "movsd %{reg2}, %{reg1}"), "movsd");
 }
@@ -1155,22 +1247,62 @@
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::movdqa, "movdqa %{reg2}, %{reg1}"), "movdqa");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VMovdqa) {
+  DriverStr(RepeatFF(&x86_64::X86_64Assembler::vmovdqa, "vmovdqa %{reg2}, %{reg1}"), "vmovdqa");
+}
+
+TEST_F(AssemblerX86_64AVXTest, Movdqa) {
+  DriverStr(RepeatFF(&x86_64::X86_64Assembler::movdqa, "vmovdqa %{reg2}, %{reg1}"), "avx_movdqa");
+}
+
 TEST_F(AssemblerX86_64Test, MovdqaStore) {
   DriverStr(RepeatAF(&x86_64::X86_64Assembler::movdqa, "movdqa %{reg}, {mem}"), "movdqa_s");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VMovdqaStore) {
+  DriverStr(RepeatAF(&x86_64::X86_64Assembler::vmovdqa, "vmovdqa %{reg}, {mem}"), "vmovdqa_s");
+}
+
+TEST_F(AssemblerX86_64AVXTest, MovdqaStore) {
+  DriverStr(RepeatAF(&x86_64::X86_64Assembler::movdqa, "vmovdqa %{reg}, {mem}"), "avx_movdqa_s");
+}
+
 TEST_F(AssemblerX86_64Test, MovdqaLoad) {
   DriverStr(RepeatFA(&x86_64::X86_64Assembler::movdqa, "movdqa {mem}, %{reg}"), "movdqa_l");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VMovdqaLoad) {
+  DriverStr(RepeatFA(&x86_64::X86_64Assembler::vmovdqa, "vmovdqa {mem}, %{reg}"), "vmovdqa_l");
+}
+
+TEST_F(AssemblerX86_64AVXTest, MovdqaLoad) {
+  DriverStr(RepeatFA(&x86_64::X86_64Assembler::movdqa, "vmovdqa {mem}, %{reg}"), "avx_movdqa_l");
+}
+
 TEST_F(AssemblerX86_64Test, MovdquStore) {
   DriverStr(RepeatAF(&x86_64::X86_64Assembler::movdqu, "movdqu %{reg}, {mem}"), "movdqu_s");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VMovdquStore) {
+  DriverStr(RepeatAF(&x86_64::X86_64Assembler::vmovdqu, "vmovdqu %{reg}, {mem}"), "vmovdqu_s");
+}
+
+TEST_F(AssemblerX86_64AVXTest, MovdquStore) {
+  DriverStr(RepeatAF(&x86_64::X86_64Assembler::movdqu, "vmovdqu %{reg}, {mem}"), "avx_movdqu_s");
+}
+
 TEST_F(AssemblerX86_64Test, MovdquLoad) {
   DriverStr(RepeatFA(&x86_64::X86_64Assembler::movdqu, "movdqu {mem}, %{reg}"), "movdqu_l");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VMovdquLoad) {
+  DriverStr(RepeatFA(&x86_64::X86_64Assembler::vmovdqu, "vmovdqu {mem}, %{reg}"), "vmovdqu_l");
+}
+
+TEST_F(AssemblerX86_64AVXTest, MovdquLoad) {
+  DriverStr(RepeatFA(&x86_64::X86_64Assembler::movdqu, "vmovdqu {mem}, %{reg}"), "avx_movdqu_l");
+}
+
 TEST_F(AssemblerX86_64Test, Movd1) {
   DriverStr(RepeatFR(&x86_64::X86_64Assembler::movd, "movd %{reg2}, %{reg1}"), "movd.1");
 }
@@ -1191,10 +1323,20 @@
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::addps, "addps %{reg2}, %{reg1}"), "addps");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VAddps) {
+  DriverStr(
+      RepeatFFF(&x86_64::X86_64Assembler::vaddps, "vaddps %{reg3}, %{reg2}, %{reg1}"), "vaddps");
+}
+
 TEST_F(AssemblerX86_64Test, Addpd) {
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::addpd, "addpd %{reg2}, %{reg1}"), "addpd");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VAddpd) {
+  DriverStr(
+      RepeatFFF(&x86_64::X86_64Assembler::vaddpd, "vaddpd %{reg3}, %{reg2}, %{reg1}"), "vaddpd");
+}
+
 TEST_F(AssemblerX86_64Test, Subss) {
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::subss, "subss %{reg2}, %{reg1}"), "subss");
 }
@@ -1207,10 +1349,20 @@
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::subps, "subps %{reg2}, %{reg1}"), "subps");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VSubps) {
+  DriverStr(
+      RepeatFFF(&x86_64::X86_64Assembler::vsubps, "vsubps %{reg3},%{reg2}, %{reg1}"), "vsubps");
+}
+
 TEST_F(AssemblerX86_64Test, Subpd) {
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::subpd, "subpd %{reg2}, %{reg1}"), "subpd");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VSubpd) {
+  DriverStr(
+      RepeatFFF(&x86_64::X86_64Assembler::vsubpd, "vsubpd %{reg3}, %{reg2}, %{reg1}"), "vsubpd");
+}
+
 TEST_F(AssemblerX86_64Test, Mulss) {
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::mulss, "mulss %{reg2}, %{reg1}"), "mulss");
 }
@@ -1223,10 +1375,20 @@
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::mulps, "mulps %{reg2}, %{reg1}"), "mulps");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VMulps) {
+  DriverStr(
+      RepeatFFF(&x86_64::X86_64Assembler::vmulps, "vmulps %{reg3}, %{reg2}, %{reg1}"), "vmulps");
+}
+
 TEST_F(AssemblerX86_64Test, Mulpd) {
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::mulpd, "mulpd %{reg2}, %{reg1}"), "mulpd");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VMulpd) {
+  DriverStr(
+      RepeatFFF(&x86_64::X86_64Assembler::vmulpd, "vmulpd %{reg3}, %{reg2}, %{reg1}"), "vmulpd");
+}
+
 TEST_F(AssemblerX86_64Test, Divss) {
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::divss, "divss %{reg2}, %{reg1}"), "divss");
 }
@@ -1239,22 +1401,52 @@
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::divps, "divps %{reg2}, %{reg1}"), "divps");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VDivps) {
+  DriverStr(
+      RepeatFFF(&x86_64::X86_64Assembler::vdivps, "vdivps %{reg3}, %{reg2}, %{reg1}"), "vdivps");
+}
+
 TEST_F(AssemblerX86_64Test, Divpd) {
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::divpd, "divpd %{reg2}, %{reg1}"), "divpd");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VDivpd) {
+  DriverStr(
+      RepeatFFF(&x86_64::X86_64Assembler::vdivpd, "vdivpd %{reg3}, %{reg2}, %{reg1}"), "vdivpd");
+}
+
 TEST_F(AssemblerX86_64Test, Paddb) {
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::paddb, "paddb %{reg2}, %{reg1}"), "paddb");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VPaddb) {
+  DriverStr(
+      RepeatFFF(&x86_64::X86_64Assembler::vpaddb, "vpaddb %{reg3}, %{reg2}, %{reg1}"), "vpaddb");
+}
+
 TEST_F(AssemblerX86_64Test, Psubb) {
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::psubb, "psubb %{reg2}, %{reg1}"), "psubb");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VPsubb) {
+  DriverStr(
+      RepeatFFF(&x86_64::X86_64Assembler::vpsubb, "vpsubb %{reg3},%{reg2}, %{reg1}"), "vpsubb");
+}
+
 TEST_F(AssemblerX86_64Test, Paddw) {
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::paddw, "paddw %{reg2}, %{reg1}"), "paddw");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VPsubw) {
+  DriverStr(
+      RepeatFFF(&x86_64::X86_64Assembler::vpsubw, "vpsubw %{reg3}, %{reg2}, %{reg1}"), "vpsubw");
+}
+
+TEST_F(AssemblerX86_64AVXTest, VPaddw) {
+  DriverStr(
+      RepeatFFF(&x86_64::X86_64Assembler::vpaddw, "vpaddw %{reg3}, %{reg2}, %{reg1}"), "vpaddw");
+}
+
 TEST_F(AssemblerX86_64Test, Psubw) {
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::psubw, "psubw %{reg2}, %{reg1}"), "psubw");
 }
@@ -1263,26 +1455,56 @@
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::pmullw, "pmullw %{reg2}, %{reg1}"), "pmullw");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VPmullw) {
+  DriverStr(
+      RepeatFFF(&x86_64::X86_64Assembler::vpmullw, "vpmullw %{reg3}, %{reg2}, %{reg1}"), "vpmullw");
+}
+
 TEST_F(AssemblerX86_64Test, Paddd) {
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::paddd, "paddd %{reg2}, %{reg1}"), "paddd");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VPaddd) {
+  DriverStr(
+      RepeatFFF(&x86_64::X86_64Assembler::vpaddd, "vpaddd %{reg3}, %{reg2}, %{reg1}"), "vpaddd");
+}
+
 TEST_F(AssemblerX86_64Test, Psubd) {
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::psubd, "psubd %{reg2}, %{reg1}"), "psubd");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VPsubd) {
+  DriverStr(
+      RepeatFFF(&x86_64::X86_64Assembler::vpsubd, "vpsubd %{reg3}, %{reg2}, %{reg1}"), "vpsubd");
+}
+
 TEST_F(AssemblerX86_64Test, Pmulld) {
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::pmulld, "pmulld %{reg2}, %{reg1}"), "pmulld");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VPmulld) {
+  DriverStr(
+      RepeatFFF(&x86_64::X86_64Assembler::vpmulld, "vpmulld %{reg3}, %{reg2}, %{reg1}"), "vpmulld");
+}
+
 TEST_F(AssemblerX86_64Test, Paddq) {
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::paddq, "paddq %{reg2}, %{reg1}"), "paddq");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VPaddq) {
+  DriverStr(
+      RepeatFFF(&x86_64::X86_64Assembler::vpaddq, "vpaddq %{reg3}, %{reg2}, %{reg1}"), "vpaddq");
+}
+
 TEST_F(AssemblerX86_64Test, Psubq) {
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::psubq, "psubq %{reg2}, %{reg1}"), "psubq");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VPsubq) {
+  DriverStr(
+      RepeatFFF(&x86_64::X86_64Assembler::vpsubq, "vpsubq %{reg3}, %{reg2}, %{reg1}"), "vpsubq");
+}
+
 TEST_F(AssemblerX86_64Test, Paddusb) {
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::paddusb, "paddusb %{reg2}, %{reg1}"), "paddusb");
 }
@@ -1403,6 +1625,21 @@
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::pxor, "pxor %{reg2}, %{reg1}"), "pxor");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VPXor) {
+  DriverStr(RepeatFFF(&x86_64::X86_64Assembler::vpxor,
+                      "vpxor %{reg3}, %{reg2}, %{reg1}"), "vpxor");
+}
+
+TEST_F(AssemblerX86_64AVXTest, VXorps) {
+  DriverStr(RepeatFFF(&x86_64::X86_64Assembler::vxorps,
+                      "vxorps %{reg3}, %{reg2}, %{reg1}"), "vxorps");
+}
+
+TEST_F(AssemblerX86_64AVXTest, VXorpd) {
+  DriverStr(RepeatFFF(&x86_64::X86_64Assembler::vxorpd,
+                      "vxorpd %{reg3}, %{reg2}, %{reg1}"), "vxorpd");
+}
+
 TEST_F(AssemblerX86_64Test, Andps) {
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::andps, "andps %{reg2}, %{reg1}"), "andps");
 }
@@ -1414,6 +1651,22 @@
 TEST_F(AssemblerX86_64Test, Pand) {
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::pand, "pand %{reg2}, %{reg1}"), "pand");
 }
+
+TEST_F(AssemblerX86_64AVXTest, VPAnd) {
+  DriverStr(RepeatFFF(&x86_64::X86_64Assembler::vpand,
+                      "vpand %{reg3}, %{reg2}, %{reg1}"), "vpand");
+}
+
+TEST_F(AssemblerX86_64AVXTest, VAndps) {
+  DriverStr(RepeatFFF(&x86_64::X86_64Assembler::vandps,
+                      "vandps %{reg3}, %{reg2}, %{reg1}"), "vandps");
+}
+
+TEST_F(AssemblerX86_64AVXTest, VAndpd) {
+  DriverStr(RepeatFFF(&x86_64::X86_64Assembler::vandpd,
+                      "vandpd %{reg3}, %{reg2}, %{reg1}"), "vandpd");
+}
+
 TEST_F(AssemblerX86_64Test, Andn) {
   DriverStr(RepeatRRR(&x86_64::X86_64Assembler::andn, "andn %{reg3}, %{reg2}, %{reg1}"), "andn");
 }
@@ -1429,6 +1682,21 @@
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::pandn, "pandn %{reg2}, %{reg1}"), "pandn");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VPAndn) {
+  DriverStr(RepeatFFF(&x86_64::X86_64Assembler::vpandn,
+                      "vpandn %{reg3}, %{reg2}, %{reg1}"), "vpandn");
+}
+
+TEST_F(AssemblerX86_64AVXTest, VAndnps) {
+  DriverStr(RepeatFFF(&x86_64::X86_64Assembler::vandnps,
+                      "vandnps %{reg3}, %{reg2}, %{reg1}"), "vandnps");
+}
+
+TEST_F(AssemblerX86_64AVXTest, VAndnpd) {
+  DriverStr(RepeatFFF(&x86_64::X86_64Assembler::vandnpd,
+                      "vandnpd %{reg3}, %{reg2}, %{reg1}"), "vandnpd");
+}
+
 TEST_F(AssemblerX86_64Test, Orps) {
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::orps, "orps %{reg2}, %{reg1}"), "orps");
 }
@@ -1441,6 +1709,21 @@
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::por, "por %{reg2}, %{reg1}"), "por");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VPor) {
+  DriverStr(RepeatFFF(&x86_64::X86_64Assembler::vpor,
+                      "vpor %{reg3}, %{reg2}, %{reg1}"), "vpor");
+}
+
+TEST_F(AssemblerX86_64AVXTest, Vorps) {
+  DriverStr(RepeatFFF(&x86_64::X86_64Assembler::vorps,
+                      "vorps %{reg3}, %{reg2}, %{reg1}"), "vorps");
+}
+
+TEST_F(AssemblerX86_64AVXTest, Vorpd) {
+  DriverStr(RepeatFFF(&x86_64::X86_64Assembler::vorpd,
+                      "vorpd %{reg3}, %{reg2}, %{reg1}"), "vorpd");
+}
+
 TEST_F(AssemblerX86_64Test, Pavgb) {
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::pavgb, "pavgb %{reg2}, %{reg1}"), "pavgb");
 }
@@ -1457,6 +1740,11 @@
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::pmaddwd, "pmaddwd %{reg2}, %{reg1}"), "pmadwd");
 }
 
+TEST_F(AssemblerX86_64AVXTest, VPmaddwd) {
+  DriverStr(RepeatFFF(&x86_64::X86_64Assembler::vpmaddwd,
+                      "vpmaddwd %{reg3}, %{reg2}, %{reg1}"), "vpmaddwd");
+}
+
 TEST_F(AssemblerX86_64Test, Phaddw) {
   DriverStr(RepeatFF(&x86_64::X86_64Assembler::phaddw, "phaddw %{reg2}, %{reg1}"), "phaddw");
 }
@@ -2122,7 +2410,7 @@
 
   // Construct assembly text counterpart.
   std::ostringstream str;
-  str << "addq $0, %rsp\n";
+  // Increase by 0 is a NO-OP and ignored by the assembler.
   str << "addq $-" << kStackAlignment << ", %rsp\n";
   str << "addq $-" << 10 * kStackAlignment << ", %rsp\n";
 
@@ -2142,7 +2430,7 @@
 
   // Construct assembly text counterpart.
   std::ostringstream str;
-  str << "addq $0, %rsp\n";
+  // Decrease by 0 is a NO-OP and ignored by the assembler.
   str << "addq $" << kStackAlignment << ", %rsp\n";
   str << "addq $" << 10 * kStackAlignment << ", %rsp\n";
 
diff --git a/compiler/utils/x86_64/constants_x86_64.h b/compiler/utils/x86_64/constants_x86_64.h
index b02e246..5335398 100644
--- a/compiler/utils/x86_64/constants_x86_64.h
+++ b/compiler/utils/x86_64/constants_x86_64.h
@@ -59,6 +59,9 @@
   constexpr bool NeedsRex() const {
     return reg_ > 7;
   }
+  bool operator==(XmmRegister& other) {
+    return reg_ == other.reg_;
+  }
  private:
   const FloatRegister reg_;
 };
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
index 5924a8b..ffe9020 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
@@ -33,6 +33,9 @@
 
 constexpr size_t kFramePointerSize = 8;
 
+static constexpr size_t kNativeStackAlignment = 16;
+static_assert(kNativeStackAlignment == kStackAlignment);
+
 #define __ asm_.
 
 void X86_64JNIMacroAssembler::BuildFrame(size_t frame_size,
@@ -41,8 +44,13 @@
                                          const ManagedRegisterEntrySpills& entry_spills) {
   DCHECK_EQ(CodeSize(), 0U);  // Nothing emitted yet.
   cfi().SetCurrentCFAOffset(8);  // Return address on stack.
-  CHECK_ALIGNED(frame_size, kStackAlignment);
-  int gpr_count = 0;
+  // Note: @CriticalNative tail call is not used (would have frame_size == kFramePointerSize).
+  if (method_reg.IsNoRegister()) {
+    CHECK_ALIGNED(frame_size, kNativeStackAlignment);
+  } else {
+    CHECK_ALIGNED(frame_size, kStackAlignment);
+  }
+  size_t gpr_count = 0u;
   for (int i = spill_regs.size() - 1; i >= 0; --i) {
     x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
     if (spill.IsCpuRegister()) {
@@ -56,8 +64,10 @@
   int64_t rest_of_frame = static_cast<int64_t>(frame_size)
                           - (gpr_count * kFramePointerSize)
                           - kFramePointerSize /*return address*/;
-  __ subq(CpuRegister(RSP), Immediate(rest_of_frame));
-  cfi().AdjustCFAOffset(rest_of_frame);
+  if (rest_of_frame != 0) {
+    __ subq(CpuRegister(RSP), Immediate(rest_of_frame));
+    cfi().AdjustCFAOffset(rest_of_frame);
+  }
 
   // spill xmms
   int64_t offset = rest_of_frame;
@@ -73,7 +83,9 @@
   static_assert(static_cast<size_t>(kX86_64PointerSize) == kFramePointerSize,
                 "Unexpected frame pointer size.");
 
-  __ movq(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister());
+  if (method_reg.IsRegister()) {
+    __ movq(Address(CpuRegister(RSP), 0), method_reg.AsX86_64().AsCpuRegister());
+  }
 
   for (const ManagedRegisterSpill& spill : entry_spills) {
     if (spill.AsX86_64().IsCpuRegister()) {
@@ -101,26 +113,29 @@
 void X86_64JNIMacroAssembler::RemoveFrame(size_t frame_size,
                                           ArrayRef<const ManagedRegister> spill_regs,
                                           bool may_suspend ATTRIBUTE_UNUSED) {
-  CHECK_ALIGNED(frame_size, kStackAlignment);
+  CHECK_ALIGNED(frame_size, kNativeStackAlignment);
   cfi().RememberState();
   int gpr_count = 0;
   // unspill xmms
   int64_t offset = static_cast<int64_t>(frame_size)
       - (spill_regs.size() * kFramePointerSize)
-      - 2 * kFramePointerSize;
+      - kFramePointerSize;
   for (size_t i = 0; i < spill_regs.size(); ++i) {
     x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
     if (spill.IsXmmRegister()) {
-      offset += sizeof(double);
       __ movsd(spill.AsXmmRegister(), Address(CpuRegister(RSP), offset));
       cfi().Restore(DWARFReg(spill.AsXmmRegister().AsFloatRegister()));
+      offset += sizeof(double);
     } else {
       gpr_count++;
     }
   }
-  int adjust = static_cast<int>(frame_size) - (gpr_count * kFramePointerSize) - kFramePointerSize;
-  __ addq(CpuRegister(RSP), Immediate(adjust));
-  cfi().AdjustCFAOffset(-adjust);
+  DCHECK_EQ(static_cast<size_t>(offset),
+            frame_size - (gpr_count * kFramePointerSize) - kFramePointerSize);
+  if (offset != 0) {
+    __ addq(CpuRegister(RSP), Immediate(offset));
+    cfi().AdjustCFAOffset(-offset);
+  }
   for (size_t i = 0; i < spill_regs.size(); ++i) {
     x86_64::X86_64ManagedRegister spill = spill_regs[i].AsX86_64();
     if (spill.IsCpuRegister()) {
@@ -136,15 +151,19 @@
 }
 
 void X86_64JNIMacroAssembler::IncreaseFrameSize(size_t adjust) {
-  CHECK_ALIGNED(adjust, kStackAlignment);
-  __ addq(CpuRegister(RSP), Immediate(-static_cast<int64_t>(adjust)));
-  cfi().AdjustCFAOffset(adjust);
+  if (adjust != 0u) {
+    CHECK_ALIGNED(adjust, kNativeStackAlignment);
+    __ addq(CpuRegister(RSP), Immediate(-static_cast<int64_t>(adjust)));
+    cfi().AdjustCFAOffset(adjust);
+  }
 }
 
 static void DecreaseFrameSizeImpl(size_t adjust, X86_64Assembler* assembler) {
-  CHECK_ALIGNED(adjust, kStackAlignment);
-  assembler->addq(CpuRegister(RSP), Immediate(adjust));
-  assembler->cfi().AdjustCFAOffset(-adjust);
+  if (adjust != 0u) {
+    CHECK_ALIGNED(adjust, kNativeStackAlignment);
+    assembler->addq(CpuRegister(RSP), Immediate(adjust));
+    assembler->cfi().AdjustCFAOffset(-adjust);
+  }
 }
 
 void X86_64JNIMacroAssembler::DecreaseFrameSize(size_t adjust) {
@@ -544,6 +563,12 @@
   // TODO: not validating references
 }
 
+void X86_64JNIMacroAssembler::Jump(ManagedRegister mbase, Offset offset, ManagedRegister) {
+  X86_64ManagedRegister base = mbase.AsX86_64();
+  CHECK(base.IsCpuRegister());
+  __ jmp(Address(base.AsCpuRegister(), offset.Int32Value()));
+}
+
 void X86_64JNIMacroAssembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) {
   X86_64ManagedRegister base = mbase.AsX86_64();
   CHECK(base.IsCpuRegister());
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
index 4c2fd8f..d3f1fce 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.h
@@ -172,6 +172,9 @@
   void VerifyObject(ManagedRegister src, bool could_be_null) override;
   void VerifyObject(FrameOffset src, bool could_be_null) override;
 
+  // Jump to address held at [base+offset] (used for tail calls).
+  void Jump(ManagedRegister base, Offset offset, ManagedRegister scratch) override;
+
   // Call to address held at [base+offset]
   void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) override;
   void Call(FrameOffset base, Offset offset, ManagedRegister scratch) override;
diff --git a/dalvikvm/Android.bp b/dalvikvm/Android.bp
index 0688318..8cd8819 100644
--- a/dalvikvm/Android.bp
+++ b/dalvikvm/Android.bp
@@ -54,4 +54,8 @@
 
     // Create symlink for the primary version target.
     symlink_preferred_arch: true,
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
 }
diff --git a/dalvikvm/dalvikvm.cc b/dalvikvm/dalvikvm.cc
index e735e2f..4808a1f 100644
--- a/dalvikvm/dalvikvm.cc
+++ b/dalvikvm/dalvikvm.cc
@@ -208,6 +208,13 @@
 
 }  // namespace art
 
+// TODO(b/141622862): stop leaks
+extern "C" const char *__asan_default_options() {
+    return "detect_leaks=0";
+}
+
 int main(int argc, char** argv) {
-  return art::dalvikvm(argc, argv);
+  // Do not allow static destructors to be called, since it's conceivable that
+  // daemons may still awaken (literally).
+  _exit(art::dalvikvm(argc, argv));
 }
diff --git a/dex2oat/Android.bp b/dex2oat/Android.bp
index 12701b2..fb76dd9 100644
--- a/dex2oat/Android.bp
+++ b/dex2oat/Android.bp
@@ -42,16 +42,6 @@
                 "linker/arm64/relative_patcher_arm64.cc",
             ],
         },
-        mips: {
-            srcs: [
-                "linker/mips/relative_patcher_mips.cc",
-            ],
-        },
-        mips64: {
-            srcs: [
-                "linker/mips64/relative_patcher_mips64.cc",
-            ],
-        },
         x86: {
             srcs: [
                 "linker/x86/relative_patcher_x86.cc",
@@ -80,22 +70,18 @@
     generated_sources: ["art_dex2oat_operator_srcs"],
     shared_libs: [
         "libbase",
+
+        // For SHA-1 checksumming of build ID
+        "libcrypto",
     ],
     export_include_dirs: ["."],
-
-    // For SHA-1 checksumming of build ID
-    static: {
-        whole_static_libs: ["libcrypto"],
-    },
-    shared: {
-        shared_libs: ["libcrypto"],
-    },
 }
 
 cc_defaults {
     name: "libart-dex2oat_static_base_defaults",
     static_libs: [
         "libbase",
+        "libcrypto",
         "libz",
     ],
 }
@@ -122,6 +108,10 @@
         "libartpalette",
         "libprofile",
     ],
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
 }
 
 cc_defaults {
@@ -151,6 +141,9 @@
         "libartpalette",
         "libprofiled",
     ],
+    apex_available: [
+        "com.android.art.debug",
+    ],
 }
 
 cc_defaults {
@@ -169,8 +162,18 @@
 
 cc_library_headers {
     name: "dex2oat_headers",
+    visibility: [
+        // TODO(b/133140750): Clean this up.
+        "//frameworks/native/cmds/installd",
+    ],
     host_supported: true,
     export_include_dirs: ["include"],
+
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
 }
 
 cc_defaults {
@@ -184,8 +187,7 @@
 
     target: {
         android: {
-            // Use the 32-bit version of dex2oat on devices.
-            compile_multilib: "prefer32",
+            compile_multilib: "both",
         },
     },
     header_libs: [
@@ -196,6 +198,10 @@
 
 cc_defaults {
     name: "dex2oat-pgo-defaults",
+    defaults_visibility: [
+        "//art:__subpackages__",
+        "//external/vixl",
+    ],
     pgo: {
         instrumentation: true,
         benchmarks: ["dex2oat"],
@@ -221,16 +227,6 @@
                 profile_file: "art/dex2oat_x86_x86_64.profdata",
             },
         },
-        android_mips64: {
-            pgo: {
-                profile_file: "art/dex2oat_mips_mips64.profdata",
-            },
-        },
-        android_mips: {
-            pgo: {
-                profile_file: "art/dex2oat_mips_mips64.profdata",
-            },
-        },
     },
 }
 
@@ -240,11 +236,15 @@
         "dex2oat-defaults",
         "dex2oat-pgo-defaults",
     ],
+    // Modules that do dexpreopting, e.g. android_app, depend implicitly on
+    // either dex2oat or dex2oatd in ART source builds.
+    visibility: ["//visibility:public"],
     shared_libs: [
         "libprofile",
         "libart-compiler",
         "libart-dexlayout",
         "libart",
+        "libcrypto",
         "libdexfile",
         "libartbase",
         "libartpalette",
@@ -255,6 +255,15 @@
         "libart-dex2oat",
     ],
 
+    multilib: {
+        lib32: {
+            suffix: "32",
+        },
+        lib64: {
+            suffix: "64",
+        },
+    },
+
     pgo: {
         // Additional cflags just for dex2oat during PGO instrumentation
         cflags: [
@@ -266,7 +275,7 @@
     target: {
         android: {
             lto: {
-                 thin: true,
+                thin: true,
             },
             static_libs: [
                 "libz",
@@ -276,8 +285,21 @@
             shared_libs: [
                 "libz",
             ],
+            // Override the prefer32 added by art_cc_binary when
+            // HOST_PREFER_32_BIT is in use. Necessary because the logic in
+            // Soong for setting ctx.Config().BuildOSTarget (used in
+            // dexpreopt.RegisterToolDeps) doesn't take host prefer32 into
+            // account. Note that this override cannot be in cc_default because
+            // it'd get overridden by the load hook even when it uses
+            // PrependProperties.
+            compile_multilib: "64",
+            symlink_preferred_arch: true,
         },
     },
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
 }
 
 art_cc_binary {
@@ -286,11 +308,15 @@
         "art_debug_defaults",
         "dex2oat-defaults",
     ],
+    // Modules that do dexpreopting, e.g. android_app, depend implicitly on
+    // either dex2oat or dex2oatd in ART source builds.
+    visibility: ["//visibility:public"],
     shared_libs: [
         "libprofiled",
         "libartd-compiler",
         "libartd-dexlayout",
         "libartd",
+        "libcrypto",
         "libdexfiled",
         "libartbased",
         "libartpalette",
@@ -305,22 +331,31 @@
             static_libs: [
                 "libz",
             ],
-            compile_multilib: "prefer32",
         },
         host: {
             shared_libs: [
                 "libz",
             ],
-            compile_multilib: "both",
+            // Override the prefer32 added by art_cc_binary when
+            // HOST_PREFER_32_BIT is in use. Necessary because the logic in
+            // Soong for setting ctx.Config().BuildOSTarget (used in
+            // dexpreopt.RegisterToolDeps) doesn't take host prefer32 into
+            // account. Note that this override cannot be in cc_default because
+            // it'd get overridden by the load hook even when it uses
+            // PrependProperties.
+            compile_multilib: "64",
             symlink_preferred_arch: true,
         },
-        linux_glibc_x86: {
+    },
+    apex_available: [
+        "com.android.art.debug",
+    ],
+
+    multilib: {
+        lib32: {
             suffix: "32",
         },
-        linux_glibc_x86_64: {
-            suffix: "64",
-        },
-        linux_bionic_x86_64: {
+        lib64: {
             suffix: "64",
         },
     },
@@ -435,17 +470,6 @@
                 "linker/arm64/relative_patcher_arm64_test.cc",
             ],
         },
-        mips: {
-            srcs: [
-                "linker/mips/relative_patcher_mips_test.cc",
-                "linker/mips/relative_patcher_mips32r6_test.cc",
-            ],
-        },
-        mips64: {
-            srcs: [
-                "linker/mips64/relative_patcher_mips64_test.cc",
-            ],
-        },
         x86: {
             srcs: [
                 "linker/x86/relative_patcher_x86_test.cc",
@@ -459,15 +483,13 @@
     },
 
     header_libs: ["dex2oat_headers"],
-    include_dirs: [
-        "external/zlib",
-    ],
     shared_libs: [
         "libartbased",
         "libartd-compiler",
         "libartd-dexlayout",
         "libartpalette",
         "libbase",
+        "libcrypto",
         "libprofiled",
         "libsigchain",
         "libziparchive",
diff --git a/dex2oat/dex/dex_to_dex_compiler.cc b/dex2oat/dex/dex_to_dex_compiler.cc
index 23ce37e..de66c1e 100644
--- a/dex2oat/dex/dex_to_dex_compiler.cc
+++ b/dex2oat/dex/dex_to_dex_compiler.cc
@@ -343,7 +343,7 @@
     DCHECK_EQ(quicken_index_, existing_quicken_info_.NumIndices());
   }
 
-  // Even if there are no indicies, generate an empty quicken info so that we know the method was
+  // Even if there are no indices, generate an empty quicken info so that we know the method was
   // quickened.
 
   std::vector<uint8_t> quicken_data;
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 9961608..5654894 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -41,8 +41,8 @@
 #include "android-base/stringprintf.h"
 #include "android-base/strings.h"
 
+#include "aot_class_linker.h"
 #include "arch/instruction_set_features.h"
-#include "arch/mips/instruction_set_features_mips.h"
 #include "art_method-inl.h"
 #include "base/callee_save_type.h"
 #include "base/dumpable.h"
@@ -67,6 +67,7 @@
 #include "debug/method_debug_info.h"
 #include "dex/descriptors_names.h"
 #include "dex/dex_file-inl.h"
+#include "dex/dex_file_loader.h"
 #include "dex/quick_compiler_callbacks.h"
 #include "dex/verification_results.h"
 #include "dex2oat_options.h"
@@ -86,10 +87,11 @@
 #include "linker/image_writer.h"
 #include "linker/multi_oat_relative_patcher.h"
 #include "linker/oat_writer.h"
-#include "mirror/class-inl.h"
+#include "mirror/class-alloc-inl.h"
 #include "mirror/class_loader.h"
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
+#include "oat.h"
 #include "oat_file.h"
 #include "oat_file_assistant.h"
 #include "profile/profile_compilation_info.h"
@@ -191,7 +193,7 @@
 
   // Construct the final output.
   if (command.size() <= 1U) {
-    // It seems only "/apex/com.android.runtime/bin/dex2oat" is left, or not
+    // It seems only "/apex/com.android.art/bin/dex2oat" is left, or not
     // even that. Use a pretty line.
     return "Starting dex2oat.";
   }
@@ -249,6 +251,9 @@
   UsageError("  --oat-file=<file.oat>: specifies an oat output destination via a filename.");
   UsageError("      Example: --oat-file=/system/framework/boot.oat");
   UsageError("");
+  UsageError("  --oat-symbols=<file.oat>: specifies a symbolized oat output destination.");
+  UsageError("      Example: --oat-file=symbols/system/framework/boot.oat");
+  UsageError("");
   UsageError("  --oat-fd=<number>: specifies the oat output destination via a file descriptor.");
   UsageError("      Example: --oat-fd=6");
   UsageError("");
@@ -276,14 +281,14 @@
   UsageError("  --image=<file.art>: specifies an output image filename.");
   UsageError("      Example: --image=/system/framework/boot.art");
   UsageError("");
+  UsageError("  --image-fd=<number>: same as --image but accepts a file descriptor instead.");
+  UsageError("      Cannot be used together with --image.");
+  UsageError("");
   UsageError("  --image-format=(uncompressed|lz4|lz4hc):");
   UsageError("      Which format to store the image.");
   UsageError("      Example: --image-format=lz4");
   UsageError("      Default: uncompressed");
   UsageError("");
-  UsageError("  --image-classes=<classname-file>: specifies classes to include in an image.");
-  UsageError("      Example: --image=frameworks/base/preloaded-classes");
-  UsageError("");
   UsageError("  --base=<hex-address>: specifies the base address when creating a boot image.");
   UsageError("      Example: --base=0x50000000");
   UsageError("");
@@ -291,13 +296,21 @@
   UsageError("      Do not include the arch as part of the name, it is added automatically.");
   UsageError("      Example: --boot-image=/system/framework/boot.art");
   UsageError("               (specifies /system/framework/<arch>/boot.art as the image file)");
+  UsageError("      Example: --boot-image=boot.art:boot-framework.art");
+  UsageError("               (specifies <bcp-path1>/<arch>/boot.art as the image file and");
+  UsageError("               <bcp-path2>/<arch>/boot-framework.art as the image extension file");
+  UsageError("               with paths taken from corresponding boot class path components)");
+  UsageError("      Example: --boot-image=/apex/com.android.art/boot.art:/system/framework/*:*");
+  UsageError("               (specifies /apex/com.android.art/<arch>/boot.art as the image");
+  UsageError("               file and search for extensions in /framework/system and boot");
+  UsageError("               class path components' paths)");
   UsageError("      Default: $ANDROID_ROOT/system/framework/boot.art");
   UsageError("");
   UsageError("  --android-root=<path>: used to locate libraries for portable linking.");
   UsageError("      Example: --android-root=out/host/linux-x86");
   UsageError("      Default: $ANDROID_ROOT");
   UsageError("");
-  UsageError("  --instruction-set=(arm|arm64|mips|mips64|x86|x86_64): compile for a particular");
+  UsageError("  --instruction-set=(arm|arm64|x86|x86_64): compile for a particular");
   UsageError("      instruction set.");
   UsageError("      Example: --instruction-set=x86");
   UsageError("      Default: arm");
@@ -328,7 +341,8 @@
                 "|everything):");
   UsageError("      select compiler filter.");
   UsageError("      Example: --compiler-filter=everything");
-  UsageError("      Default: speed");
+  UsageError("      Default: speed-profile if --profile-file or --profile-file-fd is used,");
+  UsageError("               speed otherwise");
   UsageError("");
   UsageError("  --huge-method-max=<method-instruction-count>: threshold size for a huge");
   UsageError("      method for compiler filter tuning.");
@@ -340,16 +354,6 @@
   UsageError("      Example: --large-method-max=%d", CompilerOptions::kDefaultLargeMethodThreshold);
   UsageError("      Default: %d", CompilerOptions::kDefaultLargeMethodThreshold);
   UsageError("");
-  UsageError("  --small-method-max=<method-instruction-count>: threshold size for a small");
-  UsageError("      method for compiler filter tuning.");
-  UsageError("      Example: --small-method-max=%d", CompilerOptions::kDefaultSmallMethodThreshold);
-  UsageError("      Default: %d", CompilerOptions::kDefaultSmallMethodThreshold);
-  UsageError("");
-  UsageError("  --tiny-method-max=<method-instruction-count>: threshold size for a tiny");
-  UsageError("      method for compiler filter tuning.");
-  UsageError("      Example: --tiny-method-max=%d", CompilerOptions::kDefaultTinyMethodThreshold);
-  UsageError("      Default: %d", CompilerOptions::kDefaultTinyMethodThreshold);
-  UsageError("");
   UsageError("  --num-dex-methods=<method-count>: threshold size for a small dex file for");
   UsageError("      compiler filter tuning. If the input has fewer than this many methods");
   UsageError("      and the filter is not interpret-only or verify-none or verify-at-runtime, ");
@@ -433,7 +437,11 @@
   UsageError("  --app-image-file=<file-name>: specify a file name for app image.");
   UsageError("      Example: --app-image-file=/data/dalvik-cache/system@app@Calculator.apk.art");
   UsageError("");
-  UsageError("  --multi-image: obsolete, ignored");
+  UsageError("  --multi-image: specify that separate oat and image files be generated for ");
+  UsageError("      each input dex file; the default for boot image and boot image extension.");
+  UsageError("");
+  UsageError("  --single-image: specify that a single oat and image file be generated for ");
+  UsageError("      all input dex files; the default for app image.");
   UsageError("");
   UsageError("  --force-determinism: force the compiler to emit a deterministic output.");
   UsageError("");
@@ -445,6 +453,10 @@
   UsageError("      the default behavior). This option is only meaningful when used with");
   UsageError("      --dump-cfg.");
   UsageError("");
+  UsageError("  --verbose-methods=<method-names>: Restrict dumped CFG data to methods whose name");
+  UsageError("      contain one of the method names passed as argument");
+  UsageError("      Example: --verbose-methods=toString,hashCode");
+  UsageError("");
   UsageError("  --classpath-dir=<directory-path>: directory used to resolve relative class paths.");
   UsageError("");
   UsageError("  --class-loader-context=<string spec>: a string specifying the intended");
@@ -480,9 +492,14 @@
   UsageError("      for dex files in --class-loader-context. Their order must be the same as");
   UsageError("      dex files in flattened class loader context.");
   UsageError("");
-  UsageError("  --dirty-image-objects=<directory-path>: list of known dirty objects in the image.");
+  UsageError("  --dirty-image-objects=<file-path>: list of known dirty objects in the image.");
   UsageError("      The image writer will group them together.");
   UsageError("");
+  UsageError("  --updatable-bcp-packages-file=<file-path>: file with a list of updatable");
+  UsageError("      boot class path packages. Classes in these packages and sub-packages");
+  UsageError("      shall not be resolved during app compilation to avoid AOT assumptions");
+  UsageError("      being invalidated after applying updates to these components.");
+  UsageError("");
   UsageError("  --compact-dex-level=none|fast: None avoids generating compact dex, fast");
   UsageError("      generates compact dex with low compile time. If speed-profile is specified as");
   UsageError("      the compiler filter and the profile is not empty, the default compact dex");
@@ -687,6 +704,76 @@
 pthread_mutex_t WatchDog::runtime_mutex_ = PTHREAD_MUTEX_INITIALIZER;
 Runtime* WatchDog::runtime_ = nullptr;
 
+// Helper class for overriding `java.lang.ThreadLocal.nextHashCode`.
+//
+// The class ThreadLocal has a static field nextHashCode used for assigning hash codes to
+// new ThreadLocal objects. Since the class and the object referenced by the field are
+// in the boot image, they cannot be modified under normal rules for AOT compilation.
+// However, since this is a private detail that's used only for assigning hash codes and
+// everything should work fine with different hash codes, we override the field for the
+// compilation, providing another object that the AOT class initialization can modify.
+class ThreadLocalHashOverride {
+ public:
+  ThreadLocalHashOverride(bool apply, int32_t initial_value) {
+    Thread* self = Thread::Current();
+    ScopedObjectAccess soa(self);
+    hs_.emplace(self);  // While holding the mutator lock.
+    Runtime* runtime = Runtime::Current();
+    klass_ = hs_->NewHandle(apply
+        ? runtime->GetClassLinker()->LookupClass(self,
+                                                 "Ljava/lang/ThreadLocal;",
+                                                 /*class_loader=*/ nullptr)
+        : nullptr);
+    field_ = ((klass_ != nullptr) && klass_->IsVisiblyInitialized())
+        ? klass_->FindDeclaredStaticField("nextHashCode",
+                                          "Ljava/util/concurrent/atomic/AtomicInteger;")
+        : nullptr;
+    old_field_value_ =
+        hs_->NewHandle(field_ != nullptr ? field_->GetObject(klass_.Get()) : nullptr);
+    if (old_field_value_ != nullptr) {
+      gc::AllocatorType allocator_type = runtime->GetHeap()->GetCurrentAllocator();
+      StackHandleScope<1u> hs2(self);
+      Handle<mirror::Object> new_field_value = hs2.NewHandle(
+          old_field_value_->GetClass()->Alloc(self, allocator_type));
+      PointerSize pointer_size = runtime->GetClassLinker()->GetImagePointerSize();
+      ArtMethod* constructor = old_field_value_->GetClass()->FindConstructor("(I)V", pointer_size);
+      CHECK(constructor != nullptr);
+      uint32_t args[] = {
+          reinterpret_cast32<uint32_t>(new_field_value.Get()),
+          static_cast<uint32_t>(initial_value)
+      };
+      JValue result;
+      constructor->Invoke(self, args, sizeof(args), &result, /*shorty=*/ "VI");
+      CHECK(!self->IsExceptionPending());
+      field_->SetObject</*kTransactionActive=*/ false>(klass_.Get(), new_field_value.Get());
+    }
+    if (apply && old_field_value_ == nullptr) {
+      if ((klass_ != nullptr) && klass_->IsVisiblyInitialized()) {
+        // This would mean that the implementation of ThreadLocal has changed
+        // and the code above is no longer applicable.
+        LOG(ERROR) << "Failed to override ThreadLocal.nextHashCode";
+      } else {
+        VLOG(compiler) << "ThreadLocal is not initialized in the primary boot image.";
+      }
+    }
+  }
+
+  ~ThreadLocalHashOverride() {
+    ScopedObjectAccess soa(hs_->Self());
+    if (old_field_value_ != nullptr) {
+      // Allow the overriding object to be collected.
+      field_->SetObject</*kTransactionActive=*/ false>(klass_.Get(), old_field_value_.Get());
+    }
+    hs_.reset();  // While holding the mutator lock.
+  }
+
+ private:
+  std::optional<StackHandleScope<2u>> hs_;
+  Handle<mirror::Class> klass_;
+  ArtField* field_;
+  Handle<mirror::Object> old_field_value_;
+};
+
 class Dex2Oat final {
  public:
   explicit Dex2Oat(TimingLogger* timings) :
@@ -705,12 +792,14 @@
       input_vdex_file_(nullptr),
       dm_fd_(-1),
       zip_fd_(-1),
+      image_fd_(-1),
+      have_multi_image_arg_(false),
+      multi_image_(false),
       image_base_(0U),
-      image_classes_zip_filename_(nullptr),
-      image_classes_filename_(nullptr),
       image_storage_mode_(ImageHeader::kStorageModeUncompressed),
       passes_to_run_filename_(nullptr),
       dirty_image_objects_filename_(nullptr),
+      updatable_bcp_packages_filename_(nullptr),
       is_host_(false),
       elf_writers_(),
       oat_writers_(),
@@ -801,21 +890,38 @@
 
   void ProcessOptions(ParserOptions* parser_options) {
     compiler_options_->compile_pic_ = true;  // All AOT compilation is PIC.
-    DCHECK(compiler_options_->image_type_ == CompilerOptions::ImageType::kNone);
-    if (!image_filenames_.empty()) {
-      if (android::base::EndsWith(image_filenames_[0], "apex.art")) {
-        compiler_options_->image_type_ = CompilerOptions::ImageType::kApexBootImage;
-      } else {
-        compiler_options_->image_type_ = CompilerOptions::ImageType::kBootImage;
+
+    if (android_root_.empty()) {
+      const char* android_root_env_var = getenv("ANDROID_ROOT");
+      if (android_root_env_var == nullptr) {
+        Usage("--android-root unspecified and ANDROID_ROOT not set");
       }
+      android_root_ += android_root_env_var;
+    }
+
+    if (!parser_options->boot_image_filename.empty()) {
+      boot_image_filename_ = parser_options->boot_image_filename;
+    }
+
+    DCHECK(compiler_options_->image_type_ == CompilerOptions::ImageType::kNone);
+    if (!image_filenames_.empty() || image_fd_ != -1) {
+      // If no boot image is provided, then dex2oat is compiling the primary boot image,
+      // otherwise it is compiling the boot image extension.
+      compiler_options_->image_type_ = boot_image_filename_.empty()
+          ? CompilerOptions::ImageType::kBootImage
+          : CompilerOptions::ImageType::kBootImageExtension;
     }
     if (app_image_fd_ != -1 || !app_image_file_name_.empty()) {
-      if (compiler_options_->IsBootImage()) {
-        Usage("Can't have both --image and (--app-image-fd or --app-image-file)");
+      if (compiler_options_->IsBootImage() || compiler_options_->IsBootImageExtension()) {
+        Usage("Can't have both (--image or --image-fd) and (--app-image-fd or --app-image-file)");
       }
       compiler_options_->image_type_ = CompilerOptions::ImageType::kAppImage;
     }
 
+    if (!image_filenames_.empty() && image_fd_ != -1) {
+      Usage("Can't have both --image and --image-fd");
+    }
+
     if (oat_filenames_.empty() && oat_fd_ == -1) {
       Usage("Output must be supplied with either --oat-file or --oat-fd");
     }
@@ -837,6 +943,10 @@
             "or with --oat-fd and --output-vdex-fd file descriptors");
     }
 
+    if ((image_fd_ != -1) && (oat_fd_ == -1)) {
+      Usage("--image-fd must be used with --oat_fd and --output_vdex_fd");
+    }
+
     if (!parser_options->oat_symbols.empty() && oat_fd_ != -1) {
       Usage("--oat-symbols should not be used with --oat-fd");
     }
@@ -867,31 +977,9 @@
       Usage("--oat-file arguments do not match --image arguments");
     }
 
-    if (android_root_.empty()) {
-      const char* android_root_env_var = getenv("ANDROID_ROOT");
-      if (android_root_env_var == nullptr) {
-        Usage("--android-root unspecified and ANDROID_ROOT not set");
-      }
-      android_root_ += android_root_env_var;
-    }
-
-    if (!IsBootImage() && parser_options->boot_image_filename.empty()) {
-      parser_options->boot_image_filename = GetDefaultBootImageLocation(android_root_);
-    }
-    if (!parser_options->boot_image_filename.empty()) {
-      boot_image_filename_ = parser_options->boot_image_filename;
-    }
-
-    if (image_classes_filename_ != nullptr && !IsBootImage()) {
-      Usage("--image-classes should only be used with --image");
-    }
-
-    if (image_classes_filename_ != nullptr && !boot_image_filename_.empty()) {
-      Usage("--image-classes should not be used with --boot-image");
-    }
-
-    if (image_classes_zip_filename_ != nullptr && image_classes_filename_ == nullptr) {
-      Usage("--image-classes-zip should be used with --image-classes");
+    if (!IsBootImage() && boot_image_filename_.empty()) {
+      DCHECK(!IsBootImageExtension());
+      boot_image_filename_ = GetDefaultBootImageLocation(android_root_);
     }
 
     if (dex_filenames_.empty() && zip_fd_ == -1) {
@@ -924,8 +1012,31 @@
 
     if (boot_image_filename_.empty()) {
       if (image_base_ == 0) {
-        Usage("Non-zero --base not specified");
+        Usage("Non-zero --base not specified for boot image");
       }
+    } else {
+      if (image_base_ != 0) {
+        Usage("Non-zero --base specified for app image or boot image extension");
+      }
+    }
+
+    if (have_multi_image_arg_) {
+      if (!IsImage()) {
+        Usage("--multi-image or --single-image specified for non-image compilation");
+      }
+    } else {
+      // Use the default, i.e. multi-image for boot image and boot image extension.
+      multi_image_ = IsBootImage() || IsBootImageExtension();  // Shall pass checks below.
+    }
+    if (IsBootImage() && !multi_image_) {
+      Usage("--single-image specified for primary boot image");
+    }
+    if (IsAppImage() && multi_image_) {
+      Usage("--multi-image specified for app image");
+    }
+
+    if (image_fd_ != -1 && multi_image_) {
+      Usage("--single-image not specified for --image-fd");
     }
 
     const bool have_profile_file = !profile_file_.empty();
@@ -934,13 +1045,6 @@
       Usage("Profile file should not be specified with both --profile-file-fd and --profile-file");
     }
 
-    if (have_profile_file || have_profile_fd) {
-      if (image_classes_filename_ != nullptr ||
-          image_classes_zip_filename_ != nullptr) {
-        Usage("Profile based image creation is not supported with image or compiled classes");
-      }
-    }
-
     if (!parser_options->oat_symbols.empty()) {
       oat_unstripped_ = std::move(parser_options->oat_symbols);
     }
@@ -967,6 +1071,10 @@
       }
     }
 
+    if ((IsBootImage() || IsBootImageExtension()) && updatable_bcp_packages_filename_ != nullptr) {
+      Usage("Do not specify --updatable-bcp-packages-file for boot image compilation.");
+    }
+
     if (!cpu_set_.empty()) {
       SetCpuAffinity(cpu_set_);
     }
@@ -983,8 +1091,6 @@
       case InstructionSet::kArm64:
       case InstructionSet::kX86:
       case InstructionSet::kX86_64:
-      case InstructionSet::kMips:
-      case InstructionSet::kMips64:
         compiler_options_->implicit_null_checks_ = true;
         compiler_options_->implicit_so_checks_ = true;
         break;
@@ -1005,16 +1111,9 @@
     // Fill some values into the key-value store for the oat header.
     key_value_store_.reset(new SafeMap<std::string, std::string>());
 
-    // Automatically force determinism for the boot image in a host build if read barriers
-    // are enabled, or if the default GC is CMS or MS. When the default GC is CMS
-    // (Concurrent Mark-Sweep), the GC is switched to a non-concurrent one by passing the
-    // option `-Xgc:nonconcurrent` (see below).
-    if (!kIsTargetBuild && IsBootImage()) {
-      if (SupportsDeterministicCompilation()) {
-        force_determinism_ = true;
-      } else {
-        LOG(WARNING) << "Deterministic compilation is disabled.";
-      }
+    // Automatically force determinism for the boot image and boot image extensions in a host build.
+    if (!kIsTargetBuild && (IsBootImage() || IsBootImageExtension())) {
+      force_determinism_ = true;
     }
     compiler_options_->force_determinism_ = force_determinism_;
 
@@ -1026,34 +1125,65 @@
         Usage("Failed to read list of passes to run.");
       }
     }
+
+    // Trim the boot image location to not include any specified profile. Note
+    // that the logic below will include the first boot image extension, but not
+    // the ones that could be listed after the profile of that extension. This
+    // works for our current top use case:
+    // boot.art:/system/framework/boot-framework.art
+    // But this would need to be adjusted if we had to support different use
+    // cases.
+    size_t profile_separator_pos = boot_image_filename_.find(ImageSpace::kProfileSeparator);
+    if (profile_separator_pos != std::string::npos) {
+      DCHECK(!IsBootImage());  // For primary boot image the boot_image_filename_ is empty.
+      if (IsBootImageExtension()) {
+        Usage("Unsupported profile specification in boot image location (%s) for extension.",
+              boot_image_filename_.c_str());
+      }
+      VLOG(compiler)
+          << "Truncating boot image location " << boot_image_filename_
+          << " because it contains profile specification. Truncated: "
+          << boot_image_filename_.substr(/*pos*/ 0u, /*length*/ profile_separator_pos);
+      boot_image_filename_.resize(profile_separator_pos);
+    }
+
     compiler_options_->passes_to_run_ = passes_to_run_.get();
     compiler_options_->compiling_with_core_image_ =
         !boot_image_filename_.empty() &&
         CompilerOptions::IsCoreImageFilename(boot_image_filename_);
   }
 
-  static bool SupportsDeterministicCompilation() {
-    return (kUseReadBarrier ||
-            gc::kCollectorTypeDefault == gc::kCollectorTypeCMS ||
-            gc::kCollectorTypeDefault == gc::kCollectorTypeMS);
-  }
-
   void ExpandOatAndImageFilenames() {
-    if (image_filenames_[0].rfind('/') == std::string::npos) {
-      Usage("Unusable boot image filename %s", image_filenames_[0].c_str());
+    ArrayRef<const std::string> locations(dex_locations_);
+    if (!multi_image_) {
+      locations = locations.SubArray(/*pos=*/ 0u, /*length=*/ 1u);
     }
-    image_filenames_ = ImageSpace::ExpandMultiImageLocations(dex_locations_, image_filenames_[0]);
+    if (image_fd_ == -1) {
+      if (image_filenames_[0].rfind('/') == std::string::npos) {
+        Usage("Unusable boot image filename %s", image_filenames_[0].c_str());
+      }
+      image_filenames_ = ImageSpace::ExpandMultiImageLocations(
+          locations, image_filenames_[0], IsBootImageExtension());
 
-    if (oat_filenames_[0].rfind('/') == std::string::npos) {
-      Usage("Unusable boot image oat filename %s", oat_filenames_[0].c_str());
+      if (oat_filenames_[0].rfind('/') == std::string::npos) {
+        Usage("Unusable boot image oat filename %s", oat_filenames_[0].c_str());
+      }
+      oat_filenames_ = ImageSpace::ExpandMultiImageLocations(
+          locations, oat_filenames_[0], IsBootImageExtension());
+    } else {
+      DCHECK(!multi_image_);
+      std::vector<std::string> oat_locations = ImageSpace::ExpandMultiImageLocations(
+          locations, oat_location_, IsBootImageExtension());
+      DCHECK_EQ(1u, oat_locations.size());
+      oat_location_ = oat_locations[0];
     }
-    oat_filenames_ = ImageSpace::ExpandMultiImageLocations(dex_locations_, oat_filenames_[0]);
 
     if (!oat_unstripped_.empty()) {
       if (oat_unstripped_[0].rfind('/') == std::string::npos) {
         Usage("Unusable boot image symbol filename %s", oat_unstripped_[0].c_str());
       }
-      oat_unstripped_ = ImageSpace::ExpandMultiImageLocations(dex_locations_, oat_unstripped_[0]);
+      oat_unstripped_ = ImageSpace::ExpandMultiImageLocations(
+           locations, oat_unstripped_[0], IsBootImageExtension());
     }
   }
 
@@ -1132,6 +1262,15 @@
     }
   }
 
+  void AssignIfExists(Dex2oatArgumentMap& map,
+                      const Dex2oatArgumentMap::Key<std::string>& key,
+                      std::vector<std::string>* out) {
+    DCHECK(out->empty());
+    if (map.Exists(key)) {
+      out->push_back(*map.Get(key));
+    }
+  }
+
   // Parse the arguments from the command line. In case of an unrecognized option or impossible
   // values/combinations, a usage error will be displayed and exit() is called. Thus, if the method
   // returns, arguments have been successfully parsed.
@@ -1159,10 +1298,11 @@
     AssignIfExists(args, M::CompactDexLevel, &compact_dex_level_);
     AssignIfExists(args, M::DexFiles, &dex_filenames_);
     AssignIfExists(args, M::DexLocations, &dex_locations_);
-    AssignIfExists(args, M::OatFiles, &oat_filenames_);
+    AssignIfExists(args, M::OatFile, &oat_filenames_);
     AssignIfExists(args, M::OatSymbols, &parser_options->oat_symbols);
     AssignTrueIfExists(args, M::Strip, &strip_);
-    AssignIfExists(args, M::ImageFilenames, &image_filenames_);
+    AssignIfExists(args, M::ImageFilename, &image_filenames_);
+    AssignIfExists(args, M::ImageFd, &image_fd_);
     AssignIfExists(args, M::ZipFd, &zip_fd_);
     AssignIfExists(args, M::ZipLocation, &zip_location_);
     AssignIfExists(args, M::InputVdexFd, &input_vdex_fd_);
@@ -1176,8 +1316,6 @@
     AssignIfExists(args, M::Watchdog, &parser_options->watch_dog_enabled);
     AssignIfExists(args, M::WatchdogTimeout, &parser_options->watch_dog_timeout_in_ms);
     AssignIfExists(args, M::Threads, &thread_count_);
-    AssignIfExists(args, M::ImageClasses, &image_classes_filename_);
-    AssignIfExists(args, M::ImageClassesZip, &image_classes_zip_filename_);
     AssignIfExists(args, M::CpuSet, &cpu_set_);
     AssignIfExists(args, M::Passes, &passes_to_run_filename_);
     AssignIfExists(args, M::BootImage, &parser_options->boot_image_filename);
@@ -1195,6 +1333,7 @@
     AssignIfExists(args, M::NoInlineFrom, &no_inline_from_string_);
     AssignIfExists(args, M::ClasspathDir, &classpath_dir_);
     AssignIfExists(args, M::DirtyImageObjects, &dirty_image_objects_filename_);
+    AssignIfExists(args, M::UpdatableBcpPackagesFile, &updatable_bcp_packages_filename_);
     AssignIfExists(args, M::ImageFormat, &image_storage_mode_);
     AssignIfExists(args, M::CompilationReason, &compilation_reason_);
 
@@ -1221,10 +1360,10 @@
     }
     AssignIfExists(args, M::CopyDexFiles, &copy_dex_files_);
 
+    AssignTrueIfExists(args, M::MultiImage, &have_multi_image_arg_);
+    AssignIfExists(args, M::MultiImage, &multi_image_);
+
     if (args.Exists(M::ForceDeterminism)) {
-      if (!SupportsDeterministicCompilation()) {
-        Usage("Option --force-determinism requires read barriers or a CMS/MS garbage collector");
-      }
       force_determinism_ = true;
     }
 
@@ -1276,6 +1415,14 @@
             "--class-loader-context is also specified");
     }
 
+    // If we have a profile, change the default compiler filter to speed-profile
+    // before reading compiler options.
+    static_assert(CompilerFilter::kDefaultCompilerFilter == CompilerFilter::kSpeed);
+    DCHECK_EQ(compiler_options_->GetCompilerFilter(), CompilerFilter::kSpeed);
+    if (UseProfile()) {
+      compiler_options_->SetCompilerFilter(CompilerFilter::kSpeedProfile);
+    }
+
     if (!ReadCompilerOptions(args, compiler_options_.get(), &error_msg)) {
       Usage(error_msg.c_str());
     }
@@ -1292,8 +1439,9 @@
     // Prune non-existent dex files now so that we don't create empty oat files for multi-image.
     PruneNonExistentDexFiles();
 
-    // Expand oat and image filenames for multi image.
-    if (IsBootImage() && image_filenames_.size() == 1) {
+    // Expand oat and image filenames for boot image and boot image extension.
+    // This is mostly for multi-image but single-image also needs some processing.
+    if (IsBootImage() || IsBootImageExtension()) {
       ExpandOatAndImageFilenames();
     }
 
@@ -1407,18 +1555,17 @@
     // Note: we're only invalidating the magic data in the file, as dex2oat needs the rest of
     // the information to remain valid.
     if (update_input_vdex_) {
-      std::unique_ptr<BufferedOutputStream> vdex_out =
-          std::make_unique<BufferedOutputStream>(
-              std::make_unique<FileOutputStream>(vdex_files_.back().get()));
-      if (!vdex_out->WriteFully(&VdexFile::VerifierDepsHeader::kVdexInvalidMagic,
-                                arraysize(VdexFile::VerifierDepsHeader::kVdexInvalidMagic))) {
-        PLOG(ERROR) << "Failed to invalidate vdex header. File: " << vdex_out->GetLocation();
+      File* vdex_file = vdex_files_.back().get();
+      if (!vdex_file->PwriteFully(&VdexFile::VerifierDepsHeader::kVdexInvalidMagic,
+                                  arraysize(VdexFile::VerifierDepsHeader::kVdexInvalidMagic),
+                                  /*offset=*/ 0u)) {
+        PLOG(ERROR) << "Failed to invalidate vdex header. File: " << vdex_file->GetPath();
         return false;
       }
 
-      if (!vdex_out->Flush()) {
+      if (vdex_file->Flush() != 0) {
         PLOG(ERROR) << "Failed to flush stream after invalidating header of vdex file."
-                    << " File: " << vdex_out->GetLocation();
+                    << " File: " << vdex_file->GetPath();
         return false;
       }
     }
@@ -1505,8 +1652,6 @@
           LOG(INFO) << "Image class " << s;
         }
       }
-      // Note: If we have a profile, classes previously loaded for the --image-classes
-      // option are overwritten here.
       compiler_options_->image_classes_.swap(image_classes);
     }
   }
@@ -1516,16 +1661,17 @@
   dex2oat::ReturnCode Setup() {
     TimingLogger::ScopedTiming t("dex2oat Setup", timings_);
 
-    if (!PrepareImageClasses() || !PrepareDirtyObjects()) {
+    if (!PrepareDirtyObjects()) {
       return dex2oat::ReturnCode::kOther;
     }
 
-    // Verification results are null since we don't know if we will need them yet as the compler
+    // Verification results are null since we don't know if we will need them yet as the compiler
     // filter may change.
     callbacks_.reset(new QuickCompilerCallbacks(
-        IsBootImage() ?
-            CompilerCallbacks::CallbackMode::kCompileBootImage :
-            CompilerCallbacks::CallbackMode::kCompileApp));
+        // For class verification purposes, boot image extension is the same as boot image.
+        (IsBootImage() || IsBootImageExtension())
+            ? CompilerCallbacks::CallbackMode::kCompileBootImage
+            : CompilerCallbacks::CallbackMode::kCompileApp));
 
     RuntimeArgumentMap runtime_options;
     if (!PrepareRuntimeOptions(&runtime_options, callbacks_.get())) {
@@ -1537,6 +1683,132 @@
       return dex2oat::ReturnCode::kOther;
     }
 
+    {
+      TimingLogger::ScopedTiming t_dex("Writing and opening dex files", timings_);
+      for (size_t i = 0, size = oat_writers_.size(); i != size; ++i) {
+        // Unzip or copy dex files straight to the oat file.
+        std::vector<MemMap> opened_dex_files_map;
+        std::vector<std::unique_ptr<const DexFile>> opened_dex_files;
+        // No need to verify the dex file when we have a vdex file, which means it was already
+        // verified.
+        const bool verify =
+            (input_vdex_file_ == nullptr) && !compiler_options_->AssumeDexFilesAreVerified();
+        if (!oat_writers_[i]->WriteAndOpenDexFiles(
+            vdex_files_[i].get(),
+            verify,
+            update_input_vdex_,
+            copy_dex_files_,
+            &opened_dex_files_map,
+            &opened_dex_files)) {
+          return dex2oat::ReturnCode::kOther;
+        }
+        dex_files_per_oat_file_.push_back(MakeNonOwningPointerVector(opened_dex_files));
+        if (opened_dex_files_map.empty()) {
+          DCHECK(opened_dex_files.empty());
+        } else {
+          for (MemMap& map : opened_dex_files_map) {
+            opened_dex_files_maps_.push_back(std::move(map));
+          }
+          for (std::unique_ptr<const DexFile>& dex_file : opened_dex_files) {
+            dex_file_oat_index_map_.emplace(dex_file.get(), i);
+            opened_dex_files_.push_back(std::move(dex_file));
+          }
+        }
+      }
+    }
+
+    compiler_options_->dex_files_for_oat_file_ = MakeNonOwningPointerVector(opened_dex_files_);
+    const std::vector<const DexFile*>& dex_files = compiler_options_->dex_files_for_oat_file_;
+
+    // Check if we need to downgrade the compiler-filter for size reasons.
+    // Note: This does not affect the compiler filter already stored in the key-value
+    //       store which is used for determining whether the oat file is up to date,
+    //       together with the boot class path locations and checksums stored below.
+    CompilerFilter::Filter original_compiler_filter = compiler_options_->GetCompilerFilter();
+    if (!IsBootImage() && !IsBootImageExtension() && IsVeryLarge(dex_files)) {
+      // Disable app image to make sure dex2oat unloading is enabled.
+      compiler_options_->image_type_ = CompilerOptions::ImageType::kNone;
+
+      // If we need to downgrade the compiler-filter for size reasons, do that early before we read
+      // it below for creating verification callbacks.
+      if (!CompilerFilter::IsAsGoodAs(kLargeAppFilter, compiler_options_->GetCompilerFilter())) {
+        LOG(INFO) << "Very large app, downgrading to verify.";
+        compiler_options_->SetCompilerFilter(kLargeAppFilter);
+      }
+    }
+
+    if (CompilerFilter::IsAnyCompilationEnabled(compiler_options_->GetCompilerFilter()) ||
+        IsImage()) {
+      // Only modes with compilation or image generation require verification results.
+      // Do this here instead of when we
+      // create the compilation callbacks since the compilation mode may have been changed by the
+      // very large app logic.
+      // Avoiding setting the verification results saves RAM by not adding the dex files later in
+      // the function.
+      // Note: When compiling boot image, this must be done before creating the Runtime.
+      verification_results_.reset(new VerificationResults(compiler_options_.get()));
+      callbacks_->SetVerificationResults(verification_results_.get());
+    }
+
+    if (IsBootImage() || IsBootImageExtension()) {
+      // For boot image or boot image extension, pass opened dex files to the Runtime::Create().
+      // Note: Runtime acquires ownership of these dex files.
+      runtime_options.Set(RuntimeArgumentMap::BootClassPathDexList, &opened_dex_files_);
+    }
+    if (!CreateRuntime(std::move(runtime_options))) {
+      return dex2oat::ReturnCode::kCreateRuntime;
+    }
+    ArrayRef<const DexFile* const> bcp_dex_files(runtime_->GetClassLinker()->GetBootClassPath());
+    if (IsBootImage() || IsBootImageExtension()) {
+      // Check boot class path dex files and, if compiling an extension, the images it depends on.
+      if ((IsBootImage() && bcp_dex_files.size() != dex_files.size()) ||
+          (IsBootImageExtension() && bcp_dex_files.size() <= dex_files.size())) {
+        LOG(ERROR) << "Unexpected number of boot class path dex files for boot image or extension, "
+            << bcp_dex_files.size() << (IsBootImage() ? " != " : " <= ") << dex_files.size();
+        return dex2oat::ReturnCode::kOther;
+      }
+      if (!std::equal(dex_files.begin(), dex_files.end(), bcp_dex_files.end() - dex_files.size())) {
+        LOG(ERROR) << "Boot class path dex files do not end with the compiled dex files.";
+        return dex2oat::ReturnCode::kOther;
+      }
+      size_t bcp_df_pos = 0u;
+      size_t bcp_df_end = bcp_dex_files.size();
+      for (const std::string& bcp_location : runtime_->GetBootClassPathLocations()) {
+        if (bcp_df_pos == bcp_df_end || bcp_dex_files[bcp_df_pos]->GetLocation() != bcp_location) {
+          LOG(ERROR) << "Missing dex file for boot class component " << bcp_location;
+          return dex2oat::ReturnCode::kOther;
+        }
+        CHECK(!DexFileLoader::IsMultiDexLocation(bcp_dex_files[bcp_df_pos]->GetLocation().c_str()));
+        ++bcp_df_pos;
+        while (bcp_df_pos != bcp_df_end &&
+            DexFileLoader::IsMultiDexLocation(bcp_dex_files[bcp_df_pos]->GetLocation().c_str())) {
+          ++bcp_df_pos;
+        }
+      }
+      if (bcp_df_pos != bcp_df_end) {
+        LOG(ERROR) << "Unexpected dex file in boot class path "
+            << bcp_dex_files[bcp_df_pos]->GetLocation();
+        return dex2oat::ReturnCode::kOther;
+      }
+      auto lacks_image = [](const DexFile* df) {
+        if (kIsDebugBuild && df->GetOatDexFile() != nullptr) {
+          const OatFile* oat_file = df->GetOatDexFile()->GetOatFile();
+          CHECK(oat_file != nullptr);
+          const auto& image_spaces = Runtime::Current()->GetHeap()->GetBootImageSpaces();
+          CHECK(std::any_of(image_spaces.begin(),
+                            image_spaces.end(),
+                            [=](const ImageSpace* space) {
+                              return oat_file == space->GetOatFile();
+                            }));
+        }
+        return df->GetOatDexFile() == nullptr;
+      };
+      if (std::any_of(bcp_dex_files.begin(), bcp_dex_files.end() - dex_files.size(), lacks_image)) {
+        LOG(ERROR) << "Missing required boot image(s) for boot image extension.";
+        return dex2oat::ReturnCode::kOther;
+      }
+    }
+
     if (!compilation_reason_.empty()) {
       key_value_store_->Put(OatHeader::kCompilationReasonKey, compilation_reason_);
     }
@@ -1545,23 +1817,32 @@
       // If we're compiling the boot image, store the boot classpath into the Key-Value store.
       // We use this when loading the boot image.
       key_value_store_->Put(OatHeader::kBootClassPathKey, android::base::Join(dex_locations_, ':'));
-    }
-
-    if (!IsBootImage()) {
-      // When compiling an app, create the runtime early to retrieve
-      // the boot image checksums needed for the oat header.
-      if (!CreateRuntime(std::move(runtime_options))) {
-        return dex2oat::ReturnCode::kCreateRuntime;
+    } else if (IsBootImageExtension()) {
+      // Validate the boot class path and record the dependency on the loaded boot images.
+      TimingLogger::ScopedTiming t3("Loading image checksum", timings_);
+      Runtime* runtime = Runtime::Current();
+      std::string full_bcp = android::base::Join(runtime->GetBootClassPathLocations(), ':');
+      std::string extension_part = ":" + android::base::Join(dex_locations_, ':');
+      if (!android::base::EndsWith(full_bcp, extension_part)) {
+        LOG(ERROR) << "Full boot class path does not end with extension parts, full: " << full_bcp
+            << ", extension: " << extension_part.substr(1u);
+        return dex2oat::ReturnCode::kOther;
       }
-
-      if (CompilerFilter::DependsOnImageChecksum(compiler_options_->GetCompilerFilter())) {
+      std::string bcp_dependency = full_bcp.substr(0u, full_bcp.size() - extension_part.size());
+      key_value_store_->Put(OatHeader::kBootClassPathKey, bcp_dependency);
+      ArrayRef<const DexFile* const> bcp_dex_files_dependency =
+          bcp_dex_files.SubArray(/*pos=*/ 0u, bcp_dex_files.size() - dex_files.size());
+      ArrayRef<ImageSpace* const> image_spaces(runtime->GetHeap()->GetBootImageSpaces());
+      key_value_store_->Put(
+          OatHeader::kBootClassPathChecksumsKey,
+          gc::space::ImageSpace::GetBootClassPathChecksums(image_spaces, bcp_dex_files_dependency));
+    } else {
+      if (CompilerFilter::DependsOnImageChecksum(original_compiler_filter)) {
         TimingLogger::ScopedTiming t3("Loading image checksum", timings_);
         Runtime* runtime = Runtime::Current();
         key_value_store_->Put(OatHeader::kBootClassPathKey,
                               android::base::Join(runtime->GetBootClassPathLocations(), ':'));
-        std::vector<ImageSpace*> image_spaces = runtime->GetHeap()->GetBootImageSpaces();
-        const std::vector<const DexFile*>& bcp_dex_files =
-            runtime->GetClassLinker()->GetBootClassPath();
+        ArrayRef<ImageSpace* const> image_spaces(runtime->GetHeap()->GetBootImageSpaces());
         key_value_store_->Put(
             OatHeader::kBootClassPathChecksumsKey,
             gc::space::ImageSpace::GetBootClassPathChecksums(image_spaces, bcp_dex_files));
@@ -1605,76 +1886,28 @@
           class_loader_context_->EncodeContextForOatFile(classpath_dir_,
                                                          stored_class_loader_context_.get());
       key_value_store_->Put(OatHeader::kClassPathKey, class_path_key);
+
+      // Prepare exclusion list for updatable boot class path packages.
+      if (!PrepareUpdatableBcpPackages()) {
+        return dex2oat::ReturnCode::kOther;
+      }
     }
 
-    // Now that we have finalized key_value_store_, start writing the oat file.
+    // Now that we have finalized key_value_store_, start writing the .rodata section.
+    // Among other things, this creates type lookup tables that speed up the compilation.
     {
-      TimingLogger::ScopedTiming t_dex("Writing and opening dex files", timings_);
+      TimingLogger::ScopedTiming t_dex("Starting .rodata", timings_);
       rodata_.reserve(oat_writers_.size());
       for (size_t i = 0, size = oat_writers_.size(); i != size; ++i) {
         rodata_.push_back(elf_writers_[i]->StartRoData());
-        // Unzip or copy dex files straight to the oat file.
-        std::vector<MemMap> opened_dex_files_map;
-        std::vector<std::unique_ptr<const DexFile>> opened_dex_files;
-        // No need to verify the dex file when we have a vdex file, which means it was already
-        // verified.
-        const bool verify = (input_vdex_file_ == nullptr);
-        if (!oat_writers_[i]->WriteAndOpenDexFiles(
-            vdex_files_[i].get(),
-            rodata_.back(),
-            (i == 0u) ? key_value_store_.get() : nullptr,
-            verify,
-            update_input_vdex_,
-            copy_dex_files_,
-            &opened_dex_files_map,
-            &opened_dex_files)) {
+        if (!oat_writers_[i]->StartRoData(dex_files_per_oat_file_[i],
+                                          rodata_.back(),
+                                          (i == 0u) ? key_value_store_.get() : nullptr)) {
           return dex2oat::ReturnCode::kOther;
         }
-        dex_files_per_oat_file_.push_back(MakeNonOwningPointerVector(opened_dex_files));
-        if (opened_dex_files_map.empty()) {
-          DCHECK(opened_dex_files.empty());
-        } else {
-          for (MemMap& map : opened_dex_files_map) {
-            opened_dex_files_maps_.push_back(std::move(map));
-          }
-          for (std::unique_ptr<const DexFile>& dex_file : opened_dex_files) {
-            dex_file_oat_index_map_.emplace(dex_file.get(), i);
-            opened_dex_files_.push_back(std::move(dex_file));
-          }
-        }
       }
     }
 
-    compiler_options_->dex_files_for_oat_file_ = MakeNonOwningPointerVector(opened_dex_files_);
-    const std::vector<const DexFile*>& dex_files = compiler_options_->dex_files_for_oat_file_;
-
-    // If we need to downgrade the compiler-filter for size reasons.
-    if (!IsBootImage() && IsVeryLarge(dex_files)) {
-      // Disable app image to make sure dex2oat unloading is enabled.
-      compiler_options_->image_type_ = CompilerOptions::ImageType::kNone;
-
-      // If we need to downgrade the compiler-filter for size reasons, do that early before we read
-      // it below for creating verification callbacks.
-      if (!CompilerFilter::IsAsGoodAs(kLargeAppFilter, compiler_options_->GetCompilerFilter())) {
-        LOG(INFO) << "Very large app, downgrading to verify.";
-        // Note: this change won't be reflected in the key-value store, as that had to be
-        //       finalized before loading the dex files. This setup is currently required
-        //       to get the size from the DexFile objects.
-        // TODO: refactor. b/29790079
-        compiler_options_->SetCompilerFilter(kLargeAppFilter);
-      }
-    }
-
-    if (CompilerFilter::IsAnyCompilationEnabled(compiler_options_->GetCompilerFilter())) {
-      // Only modes with compilation require verification results, do this here instead of when we
-      // create the compilation callbacks since the compilation mode may have been changed by the
-      // very large app logic.
-      // Avoiding setting the verification results saves RAM by not adding the dex files later in
-      // the function.
-      verification_results_.reset(new VerificationResults(compiler_options_.get()));
-      callbacks_->SetVerificationResults(verification_results_.get());
-    }
-
     // We had to postpone the swap decision till now, as this is the point when we actually
     // know about the dex files we're going to use.
 
@@ -1682,7 +1915,7 @@
     CHECK(driver_ == nullptr);
     // If we use a swap file, ensure we are above the threshold to make it necessary.
     if (swap_fd_ != -1) {
-      if (!UseSwap(IsBootImage(), dex_files)) {
+      if (!UseSwap(IsBootImage() || IsBootImageExtension(), dex_files)) {
         close(swap_fd_);
         swap_fd_ = -1;
         VLOG(compiler) << "Decided to run without swap.";
@@ -1691,14 +1924,6 @@
       }
     }
     // Note that dex2oat won't close the swap_fd_. The compiler driver's swap space will do that.
-    if (IsBootImage()) {
-      // For boot image, pass opened dex files to the Runtime::Create().
-      // Note: Runtime acquires ownership of these dex files.
-      runtime_options.Set(RuntimeArgumentMap::BootClassPathDexList, &opened_dex_files_);
-      if (!CreateRuntime(std::move(runtime_options))) {
-        return dex2oat::ReturnCode::kOther;
-      }
-    }
 
     // If we're doing the image, override the compiler filter to force full compilation. Must be
     // done ahead of WellKnownClasses::Init that causes verification.  Note: doesn't force
@@ -1707,7 +1932,7 @@
     Thread* self = Thread::Current();
     WellKnownClasses::Init(self->GetJniEnv());
 
-    if (!IsBootImage()) {
+    if (!IsBootImage() && !IsBootImageExtension()) {
       constexpr bool kSaveDexInput = false;
       if (kSaveDexInput) {
         SaveDexInput();
@@ -1778,6 +2003,14 @@
         !CompilerFilter::IsAotCompilationEnabled(compiler_options_->GetCompilerFilter());
   }
 
+  uint32_t GetCombinedChecksums() const {
+    uint32_t combined_checksums = 0u;
+    for (const DexFile* dex_file : compiler_options_->GetDexFilesForOatFile()) {
+      combined_checksums ^= dex_file->GetLocationChecksum();
+    }
+    return combined_checksums;
+  }
+
   // Set up and create the compiler driver and then invoke it to compile all the dex files.
   jobject Compile() {
     ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
@@ -1796,7 +2029,7 @@
 
     if (!no_inline_filters.empty()) {
       std::vector<const DexFile*> class_path_files;
-      if (!IsBootImage()) {
+      if (!IsBootImage() && !IsBootImageExtension()) {
         // The class loader context is used only for apps.
         class_path_files = class_loader_context_->FlattenOpenedDexFiles();
       }
@@ -1841,7 +2074,10 @@
                                      compiler_kind_,
                                      thread_count_,
                                      swap_fd_));
-    if (!IsBootImage()) {
+
+    driver_->PrepareDexFilesForOatFile(timings_);
+
+    if (!IsBootImage() && !IsBootImageExtension()) {
       driver_->SetClasspathDexFiles(class_loader_context_->FlattenOpenedDexFiles());
     }
 
@@ -1875,6 +2111,12 @@
       // the results for all the dex files, not just the results for the current dex file.
       callbacks_->SetVerifierDeps(new verifier::VerifierDeps(dex_files));
     }
+
+    // To allow initialization of classes that construct ThreadLocal objects in class initializer,
+    // re-initialize the ThreadLocal.nextHashCode to a new object that's not in the boot image.
+    ThreadLocalHashOverride thread_local_hash_override(
+        /*apply=*/ !IsBootImage(), /*initial_value=*/ 123456789u ^ GetCombinedChecksums());
+
     // Invoke the compilation.
     if (compile_individually) {
       CompileDexFilesIndividually();
@@ -1889,9 +2131,11 @@
     ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
 
     jobject class_loader = nullptr;
-    if (!IsBootImage()) {
+    if (!IsBootImage() && !IsBootImageExtension()) {
       class_loader =
-          class_loader_context_->CreateClassLoader(compiler_options_->dex_files_for_oat_file_);
+          class_loader_context_->CreateClassLoader(compiler_options_->GetDexFilesForOatFile());
+    }
+    if (!IsBootImage()) {
       callbacks_->SetDexFiles(&dex_files);
     }
 
@@ -2000,23 +2244,12 @@
     }
 
     if (IsImage()) {
-      if (IsAppImage() && image_base_ == 0) {
+      if (!IsBootImage()) {
+        DCHECK_EQ(image_base_, 0u);
         gc::Heap* const heap = Runtime::Current()->GetHeap();
-        for (ImageSpace* image_space : heap->GetBootImageSpaces()) {
-          image_base_ = std::max(image_base_, RoundUp(
-              reinterpret_cast<uintptr_t>(image_space->GetImageHeader().GetOatFileEnd()),
-              kPageSize));
-        }
-        // The non moving space is right after the oat file. Put the preferred app image location
-        // right after the non moving space so that we ideally get a continuous immune region for
-        // the GC.
-        // Use the default non moving space capacity since dex2oat does not have a separate non-
-        // moving space. This means the runtime's non moving space space size will be as large
-        // as the growth limit for dex2oat, but smaller in the zygote.
-        const size_t non_moving_space_capacity = gc::Heap::kDefaultNonMovingSpaceCapacity;
-        image_base_ += non_moving_space_capacity;
-        VLOG(compiler) << "App image base=" << reinterpret_cast<void*>(image_base_);
+        image_base_ = heap->GetBootImagesStartAddress() + heap->GetBootImagesSize();
       }
+      VLOG(compiler) << "Image base=" << reinterpret_cast<void*>(image_base_);
 
       image_writer_.reset(new linker::ImageWriter(*compiler_options_,
                                                   image_base_,
@@ -2026,9 +2259,13 @@
                                                   class_loader,
                                                   dirty_image_objects_.get()));
 
-      // We need to prepare method offsets in the image address space for direct method patching.
+      // We need to prepare method offsets in the image address space for resolving linker patches.
       TimingLogger::ScopedTiming t2("dex2oat Prepare image address space", timings_);
-      if (!image_writer_->PrepareImageAddressSpace(timings_)) {
+      // Do not preload dex caches for "assume-verified". This filter is used for in-memory
+      // compilation of boot image extension; in that scenario it is undesirable to use a lot
+      // of time to look up things now in hope it will be somewhat useful later.
+      bool preload_dex_caches = !compiler_options_->AssumeDexFilesAreVerified();
+      if (!image_writer_->PrepareImageAddressSpace(preload_dex_caches, timings_)) {
         LOG(ERROR) << "Failed to prepare image address space.";
         return false;
       }
@@ -2044,7 +2281,7 @@
 
     {
       TimingLogger::ScopedTiming t2("dex2oat Write VDEX", timings_);
-      DCHECK(IsBootImage() || oat_files_.size() == 1u);
+      DCHECK(IsBootImage() || IsBootImageExtension() || oat_files_.size() == 1u);
       verifier::VerifierDeps* verifier_deps = callbacks_->GetVerifierDeps();
       for (size_t i = 0, size = oat_files_.size(); i != size; ++i) {
         File* vdex_file = vdex_files_[i].get();
@@ -2108,7 +2345,7 @@
         debug::DebugInfo debug_info = oat_writer->GetDebugInfo();  // Keep the variable alive.
         elf_writer->PrepareDebugInfo(debug_info);  // Processes the data on background thread.
 
-        OutputStream*& rodata = rodata_[i];
+        OutputStream* rodata = rodata_[i];
         DCHECK(rodata != nullptr);
         if (!oat_writer->WriteRodata(rodata)) {
           LOG(ERROR) << "Failed to write .rodata section to the ELF file " << oat_file->GetPath();
@@ -2268,7 +2505,7 @@
   }
 
   bool IsImage() const {
-    return IsAppImage() || IsBootImage();
+    return IsAppImage() || IsBootImage() || IsBootImageExtension();
   }
 
   bool IsAppImage() const {
@@ -2279,6 +2516,10 @@
     return compiler_options_->IsBootImage();
   }
 
+  bool IsBootImageExtension() const {
+    return compiler_options_->IsBootImageExtension();
+  }
+
   bool IsHost() const {
     return is_host_;
   }
@@ -2368,37 +2609,6 @@
     return dex_files_size >= very_large_threshold_;
   }
 
-  bool PrepareImageClasses() {
-    // If --image-classes was specified, calculate the full list of classes to include in the image.
-    DCHECK(compiler_options_->image_classes_.empty());
-    if (image_classes_filename_ != nullptr) {
-      std::unique_ptr<HashSet<std::string>> image_classes =
-          ReadClasses(image_classes_zip_filename_, image_classes_filename_, "image");
-      if (image_classes == nullptr) {
-        return false;
-      }
-      compiler_options_->image_classes_.swap(*image_classes);
-    }
-    return true;
-  }
-
-  static std::unique_ptr<HashSet<std::string>> ReadClasses(const char* zip_filename,
-                                                           const char* classes_filename,
-                                                           const char* tag) {
-    std::unique_ptr<HashSet<std::string>> classes;
-    std::string error_msg;
-    if (zip_filename != nullptr) {
-      classes = ReadImageClassesFromZip(zip_filename, classes_filename, &error_msg);
-    } else {
-      classes = ReadImageClassesFromFile(classes_filename);
-    }
-    if (classes == nullptr) {
-      LOG(ERROR) << "Failed to create list of " << tag << " classes from '"
-                 << classes_filename << "': " << error_msg;
-    }
-    return classes;
-  }
-
   bool PrepareDirtyObjects() {
     if (dirty_image_objects_filename_ != nullptr) {
       dirty_image_objects_ = ReadCommentedInputFromFile<HashSet<std::string>>(
@@ -2415,6 +2625,49 @@
     return true;
   }
 
+  bool PrepareUpdatableBcpPackages() {
+    DCHECK(!IsBootImage() && !IsBootImageExtension());
+    AotClassLinker* aot_class_linker = down_cast<AotClassLinker*>(runtime_->GetClassLinker());
+    if (updatable_bcp_packages_filename_ != nullptr) {
+      std::unique_ptr<std::vector<std::string>> updatable_bcp_packages =
+          ReadCommentedInputFromFile<std::vector<std::string>>(updatable_bcp_packages_filename_,
+                                                               nullptr);  // No post-processing.
+      if (updatable_bcp_packages == nullptr) {
+        LOG(ERROR) << "Failed to load updatable boot class path packages from '"
+            << updatable_bcp_packages_filename_ << "'";
+        return false;
+      }
+      return aot_class_linker->SetUpdatableBootClassPackages(*updatable_bcp_packages);
+    } else {
+      // Use the default list based on updatable packages for Android 11.
+      return aot_class_linker->SetUpdatableBootClassPackages({
+          // Reserved conscrypt packages (includes sub-packages under these paths).
+          // "android.net.ssl",  // Covered by android.net below.
+          "com.android.org.conscrypt",
+          // Reserved updatable-media package (includes sub-packages under this path).
+          "android.media",
+          // Reserved framework-mediaprovider package (includes sub-packages under this path).
+          "android.provider",
+          // Reserved framework-statsd packages (includes sub-packages under these paths).
+          "android.app",
+          "android.os",
+          "android.util",
+          "com.android.internal.statsd",
+          // Reserved framework-permission packages (includes sub-packages under this path).
+          "android.permission",
+          // "android.app.role",  // Covered by android.app above.
+          // Reserved framework-sdkextensions package (includes sub-packages under this path).
+          // "android.os.ext",  // Covered by android.os above.
+          // Reserved framework-wifi packages (includes sub-packages under these paths).
+          "android.hardware.wifi",
+          // "android.net.wifi",  // Covered by android.net below.
+          "com.android.wifi.x",
+          // Reserved framework-tethering package (includes sub-packages under this path).
+          "android.net",
+      });
+    }
+  }
+
   void PruneNonExistentDexFiles() {
     DCHECK_EQ(dex_filenames_.size(), dex_locations_.size());
     size_t kept = 0u;
@@ -2444,8 +2697,8 @@
       }
     } else if (zip_fd_ != -1) {
       DCHECK_EQ(oat_writers_.size(), 1u);
-      if (!oat_writers_[0]->AddZippedDexFilesSource(File(zip_fd_, /* check_usage */ false),
-                                                    zip_location_.c_str())) {
+      if (!oat_writers_[0]->AddDexFileSource(File(zip_fd_, /* check_usage */ false),
+                                             zip_location_.c_str())) {
         return false;
       }
     } else if (oat_writers_.size() > 1u) {
@@ -2461,7 +2714,6 @@
     } else {
       DCHECK_EQ(oat_writers_.size(), 1u);
       DCHECK_EQ(dex_filenames_.size(), dex_locations_.size());
-      DCHECK_NE(dex_filenames_.size(), 0u);
       for (size_t i = 0; i != dex_filenames_.size(); ++i) {
         if (!oat_writers_[0]->AddDexFileSource(dex_filenames_[i].c_str(),
                                                dex_locations_[i].c_str())) {
@@ -2514,7 +2766,7 @@
   bool PrepareRuntimeOptions(RuntimeArgumentMap* runtime_options,
                              QuickCompilerCallbacks* callbacks) {
     RuntimeOptions raw_options;
-    if (boot_image_filename_.empty()) {
+    if (IsBootImage()) {
       std::string boot_class_path = "-Xbootclasspath:";
       boot_class_path += android::base::Join(dex_filenames_, ':');
       raw_options.push_back(std::make_pair(boot_class_path, nullptr));
@@ -2551,31 +2803,6 @@
     // foreground collector by default for dex2oat.
     raw_options.push_back(std::make_pair("-XX:DisableHSpaceCompactForOOM", nullptr));
 
-    if (compiler_options_->IsForceDeterminism()) {
-      // If we're asked to be deterministic, ensure non-concurrent GC for determinism.
-      //
-      // Note that with read barriers, this option is ignored, because Runtime::Init
-      // overrides the foreground GC to be gc::kCollectorTypeCC when instantiating
-      // gc::Heap. This is fine, as concurrent GC requests are not honored in dex2oat,
-      // which uses an unstarted runtime.
-      raw_options.push_back(std::make_pair("-Xgc:nonconcurrent", nullptr));
-
-      // The default LOS implementation (map) is not deterministic. So disable it.
-      raw_options.push_back(std::make_pair("-XX:LargeObjectSpace=disabled", nullptr));
-
-      // We also need to turn off the nonmoving space. For that, we need to disable HSpace
-      // compaction (done above) and ensure that neither foreground nor background collectors
-      // are concurrent.
-      //
-      // Likewise, this option is ignored with read barriers because Runtime::Init
-      // overrides the background GC to be gc::kCollectorTypeCCBackground, but that's
-      // fine too, for the same reason (see above).
-      raw_options.push_back(std::make_pair("-XX:BackgroundGC=nonconcurrent", nullptr));
-
-      // To make identity hashcode deterministic, set a known seed.
-      mirror::Object::SetHashCodeSeed(987654321U);
-    }
-
     if (!Runtime::ParseOptions(raw_options, false, runtime_options)) {
       LOG(ERROR) << "Failed to parse runtime options";
       return false;
@@ -2585,6 +2812,11 @@
 
   // Create a runtime necessary for compilation.
   bool CreateRuntime(RuntimeArgumentMap&& runtime_options) {
+    // To make identity hashcode deterministic, set a seed based on the dex file checksums.
+    // That makes the seed also most likely different for different inputs, for example
+    // for primary boot image and different extensions that could be loaded together.
+    mirror::Object::SetHashCodeSeed(987654321u ^ GetCombinedChecksums());
+
     TimingLogger::ScopedTiming t_runtime("Create runtime", timings_);
     if (!Runtime::Create(std::move(runtime_options))) {
       LOG(ERROR) << "Failed to create runtime";
@@ -2624,13 +2856,21 @@
   bool CreateImageFile()
       REQUIRES(!Locks::mutator_lock_) {
     CHECK(image_writer_ != nullptr);
-    if (!IsBootImage()) {
-      CHECK(image_filenames_.empty());
-      image_filenames_.push_back(app_image_file_name_);
+    if (IsAppImage()) {
+      DCHECK(image_filenames_.empty());
+      if (app_image_fd_ != -1) {
+        image_filenames_.push_back(StringPrintf("FileDescriptor[%d]", app_image_fd_));
+      } else {
+        image_filenames_.push_back(app_image_file_name_);
+      }
     }
-    if (!image_writer_->Write(app_image_fd_,
+    if (image_fd_ != -1) {
+      DCHECK(image_filenames_.empty());
+      image_filenames_.push_back(StringPrintf("FileDescriptor[%d]", image_fd_));
+    }
+    if (!image_writer_->Write(IsAppImage() ? app_image_fd_ : image_fd_,
                               image_filenames_,
-                              oat_filenames_)) {
+                              IsAppImage() ? 1u : dex_locations_.size())) {
       LOG(ERROR) << "Failure during image file creation";
       return false;
     }
@@ -2646,25 +2886,6 @@
     return true;
   }
 
-  // Reads the class names (java.lang.Object) and returns a set of descriptors (Ljava/lang/Object;)
-  static std::unique_ptr<HashSet<std::string>> ReadImageClassesFromFile(
-      const char* image_classes_filename) {
-    std::function<std::string(const char*)> process = DotToDescriptor;
-    return ReadCommentedInputFromFile<HashSet<std::string>>(image_classes_filename, &process);
-  }
-
-  // Reads the class names (java.lang.Object) and returns a set of descriptors (Ljava/lang/Object;)
-  static std::unique_ptr<HashSet<std::string>> ReadImageClassesFromZip(
-        const char* zip_filename,
-        const char* image_classes_filename,
-        std::string* error_msg) {
-    std::function<std::string(const char*)> process = DotToDescriptor;
-    return ReadCommentedInputFromZip<HashSet<std::string>>(zip_filename,
-                                                           image_classes_filename,
-                                                           &process,
-                                                           error_msg);
-  }
-
   // Read lines from the given file, dropping comments and empty lines. Post-process each line with
   // the given function.
   template <typename T>
@@ -2812,12 +3033,14 @@
   std::string boot_image_filename_;
   std::vector<const char*> runtime_args_;
   std::vector<std::string> image_filenames_;
+  int image_fd_;
+  bool have_multi_image_arg_;
+  bool multi_image_;
   uintptr_t image_base_;
-  const char* image_classes_zip_filename_;
-  const char* image_classes_filename_;
   ImageHeader::StorageMode image_storage_mode_;
   const char* passes_to_run_filename_;
   const char* dirty_image_objects_filename_;
+  const char* updatable_bcp_packages_filename_;
   std::unique_ptr<HashSet<std::string>> dirty_image_objects_;
   std::unique_ptr<std::vector<std::string>> passes_to_run_;
   bool is_host_;
@@ -3006,6 +3229,8 @@
   // Parse arguments. Argument mistakes will lead to exit(EXIT_FAILURE) in UsageError.
   dex2oat->ParseArgs(argc, argv);
 
+  art::MemMap::Init();  // For ZipEntry::ExtractToMemMap, vdex and profiles.
+
   // If needed, process profile information for profile guided compilation.
   // This operation involves I/O.
   if (dex2oat->UseProfile()) {
@@ -3015,7 +3240,6 @@
     }
   }
 
-  art::MemMap::Init();  // For ZipEntry::ExtractToMemMap, and vdex.
 
   // Check early that the result of compilation can be written
   if (!dex2oat->OpenFile()) {
@@ -3028,7 +3252,10 @@
   //   3) Compiling with --host
   //   4) Compiling on the host (not a target build)
   // Otherwise, print a stripped command line.
-  if (kIsDebugBuild || dex2oat->IsBootImage() || dex2oat->IsHost() || !kIsTargetBuild) {
+  if (kIsDebugBuild ||
+      dex2oat->IsBootImage() || dex2oat->IsBootImageExtension() ||
+      dex2oat->IsHost() ||
+      !kIsTargetBuild) {
     LOG(INFO) << CommandLine();
   } else {
     LOG(INFO) << StrippedCommandLine();
diff --git a/dex2oat/dex2oat_image_test.cc b/dex2oat/dex2oat_image_test.cc
index e047b4f..d01b64f 100644
--- a/dex2oat/dex2oat_image_test.cc
+++ b/dex2oat/dex2oat_image_test.cc
@@ -14,31 +14,44 @@
  * limitations under the License.
  */
 
+#include <fstream>
 #include <regex>
 #include <sstream>
 #include <string>
 #include <vector>
 
+#include <sys/mman.h>
 #include <sys/wait.h>
 #include <unistd.h>
 
 #include <android-base/logging.h>
+#include <android-base/stringprintf.h>
+#include <android-base/strings.h>
 
 #include "common_runtime_test.h"
 
+#include "base/array_ref.h"
 #include "base/file_utils.h"
 #include "base/macros.h"
+#include "base/mem_map.h"
+#include "base/string_view_cpp20.h"
 #include "base/unix_file/fd_file.h"
 #include "base/utils.h"
 #include "dex/art_dex_file_loader.h"
 #include "dex/dex_file-inl.h"
 #include "dex/dex_file_loader.h"
 #include "dex/method_reference.h"
-#include "profile/profile_compilation_info.h"
+#include "dex/type_reference.h"
+#include "gc/space/image_space.h"
 #include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-current-inl.h"
 
 namespace art {
 
+// A suitable address for loading the core images.
+constexpr uint32_t kBaseAddress = ART_BASE_ADDRESS;
+
 struct ImageSizes {
   size_t art_size = 0;
   size_t oat_size = 0;
@@ -55,38 +68,9 @@
   void TearDown() override {}
 
  protected:
-  // Visitors take method and type references
-  template <typename MethodVisitor, typename ClassVisitor>
-  void VisitLibcoreDexes(const MethodVisitor& method_visitor,
-                         const ClassVisitor& class_visitor,
-                         size_t method_frequency = 1,
-                         size_t class_frequency = 1) {
-    size_t method_counter = 0;
-    size_t class_counter = 0;
-    for (const std::string& dex : GetLibCoreDexFileNames()) {
-      std::vector<std::unique_ptr<const DexFile>> dex_files;
-      std::string error_msg;
-      const ArtDexFileLoader dex_file_loader;
-      CHECK(dex_file_loader.Open(dex.c_str(),
-                                 dex,
-                                 /*verify*/ true,
-                                 /*verify_checksum*/ false,
-                                 &error_msg,
-                                 &dex_files))
-          << error_msg;
-      for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
-        for (size_t i = 0; i < dex_file->NumMethodIds(); ++i) {
-          if (++method_counter % method_frequency == 0) {
-            method_visitor(MethodReference(dex_file.get(), i));
-          }
-        }
-        for (size_t i = 0; i < dex_file->NumTypeIds(); ++i) {
-          if (++class_counter % class_frequency == 0) {
-            class_visitor(TypeReference(dex_file.get(), dex::TypeIndex(i)));
-          }
-        }
-      }
-    }
+  void SetUpRuntimeOptions(RuntimeOptions* options) override {
+    // Disable implicit dex2oat invocations when loading image spaces.
+    options->emplace_back("-Xnoimage-dex2oat", nullptr);
   }
 
   static void WriteLine(File* file, std::string line) {
@@ -94,41 +78,25 @@
     EXPECT_TRUE(file->WriteFully(&line[0], line.length()));
   }
 
-  void GenerateClasses(File* out_file, size_t frequency = 1) {
-    VisitLibcoreDexes(VoidFunctor(),
-                      [out_file](TypeReference ref) {
-      WriteLine(out_file, ref.dex_file->PrettyType(ref.TypeIndex()));
-    }, frequency, frequency);
-    EXPECT_EQ(out_file->Flush(), 0);
-  }
-
-  void GenerateMethods(File* out_file, size_t frequency = 1) {
-    VisitLibcoreDexes([out_file](MethodReference ref) {
-      WriteLine(out_file, ref.PrettyMethod());
-    }, VoidFunctor(), frequency, frequency);
-    EXPECT_EQ(out_file->Flush(), 0);
-  }
-
   void AddRuntimeArg(std::vector<std::string>& args, const std::string& arg) {
     args.push_back("--runtime-arg");
     args.push_back(arg);
   }
 
-  ImageSizes CompileImageAndGetSizes(const std::vector<std::string>& extra_args) {
+  ImageSizes CompileImageAndGetSizes(ArrayRef<const std::string> dex_files,
+                                     const std::vector<std::string>& extra_args) {
     ImageSizes ret;
-    ScratchFile scratch;
-    std::string scratch_dir = scratch.GetFilename();
-    while (!scratch_dir.empty() && scratch_dir.back() != '/') {
-      scratch_dir.pop_back();
-    }
-    CHECK(!scratch_dir.empty()) << "No directory " << scratch.GetFilename();
+    ScratchDir scratch;
+    std::string filename_prefix = scratch.GetPath() + "boot";
+    std::vector<std::string> local_extra_args = extra_args;
+    local_extra_args.push_back(android::base::StringPrintf("--base=0x%08x", kBaseAddress));
     std::string error_msg;
-    if (!CompileBootImage(extra_args, scratch.GetFilename(), &error_msg)) {
-      LOG(ERROR) << "Failed to compile image " << scratch.GetFilename() << error_msg;
+    if (!CompileBootImage(local_extra_args, filename_prefix, dex_files, &error_msg)) {
+      LOG(ERROR) << "Failed to compile image " << filename_prefix << error_msg;
     }
-    std::string art_file = scratch.GetFilename() + ".art";
-    std::string oat_file = scratch.GetFilename() + ".oat";
-    std::string vdex_file = scratch.GetFilename() + ".vdex";
+    std::string art_file = filename_prefix + ".art";
+    std::string oat_file = filename_prefix + ".oat";
+    std::string vdex_file = filename_prefix + ".vdex";
     int64_t art_size = OS::GetFileSizeBytes(art_file.c_str());
     int64_t oat_size = OS::GetFileSizeBytes(oat_file.c_str());
     int64_t vdex_size = OS::GetFileSizeBytes(vdex_file.c_str());
@@ -138,63 +106,69 @@
     ret.art_size = art_size;
     ret.oat_size = oat_size;
     ret.vdex_size = vdex_size;
-    scratch.Close();
-    // Clear image files since we compile the image multiple times and don't want to leave any
-    // artifacts behind.
-    ClearDirectory(scratch_dir.c_str(), /*recursive*/ false);
     return ret;
   }
 
-  bool CompileBootImage(const std::vector<std::string>& extra_args,
-                        const std::string& image_file_name_prefix,
-                        std::string* error_msg) {
-    Runtime* const runtime = Runtime::Current();
-    std::vector<std::string> argv;
-    argv.push_back(runtime->GetCompilerExecutable());
-    AddRuntimeArg(argv, "-Xms64m");
-    AddRuntimeArg(argv, "-Xmx64m");
-    std::vector<std::string> dex_files = GetLibCoreDexFileNames();
-    for (const std::string& dex_file : dex_files) {
-      argv.push_back("--dex-file=" + dex_file);
-      argv.push_back("--dex-location=" + dex_file);
-    }
-    if (runtime->IsJavaDebuggable()) {
-      argv.push_back("--debuggable");
-    }
-    runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
-
-    AddRuntimeArg(argv, "-Xverify:softfail");
-
-    if (!kIsTargetBuild) {
-      argv.push_back("--host");
-    }
-
-    argv.push_back("--image=" + image_file_name_prefix + ".art");
-    argv.push_back("--oat-file=" + image_file_name_prefix + ".oat");
-    argv.push_back("--oat-location=" + image_file_name_prefix + ".oat");
-    argv.push_back("--base=0x60000000");
-
-    std::vector<std::string> compiler_options = runtime->GetCompilerOptions();
-    argv.insert(argv.end(), compiler_options.begin(), compiler_options.end());
-
-    // We must set --android-root.
-    const char* android_root = getenv("ANDROID_ROOT");
-    CHECK(android_root != nullptr);
-    argv.push_back("--android-root=" + std::string(android_root));
-    argv.insert(argv.end(), extra_args.begin(), extra_args.end());
-
-    return RunDex2Oat(argv, error_msg);
+  MemMap ReserveCoreImageAddressSpace(/*out*/std::string* error_msg) {
+    constexpr size_t kReservationSize = 256 * MB;  // This should be enough for the compiled images.
+    // Extend to both directions for maximum relocation difference.
+    static_assert(ART_BASE_ADDRESS_MIN_DELTA < 0);
+    static_assert(ART_BASE_ADDRESS_MAX_DELTA > 0);
+    static_assert(IsAligned<kPageSize>(ART_BASE_ADDRESS_MIN_DELTA));
+    static_assert(IsAligned<kPageSize>(ART_BASE_ADDRESS_MAX_DELTA));
+    constexpr size_t kExtra = ART_BASE_ADDRESS_MAX_DELTA - ART_BASE_ADDRESS_MIN_DELTA;
+    uint32_t min_relocated_address = kBaseAddress + ART_BASE_ADDRESS_MIN_DELTA;
+    return MemMap::MapAnonymous("Reservation",
+                                reinterpret_cast<uint8_t*>(min_relocated_address),
+                                kReservationSize + kExtra,
+                                PROT_NONE,
+                                /*low_4gb=*/ true,
+                                /*reuse=*/ false,
+                                /*reservation=*/ nullptr,
+                                error_msg);
   }
 
-  bool RunDex2Oat(const std::vector<std::string>& args, std::string* error_msg) {
-    // We only want fatal logging for the error message.
-    auto post_fork_fn = []() { return setenv("ANDROID_LOG_TAGS", "*:f", 1) == 0; };
-    ForkAndExecResult res = ForkAndExec(args, post_fork_fn, error_msg);
-    if (res.stage != ForkAndExecResult::kFinished) {
-      *error_msg = strerror(errno);
+  void CopyDexFiles(const std::string& dir, /*inout*/std::vector<std::string>* dex_files) {
+    CHECK(EndsWith(dir, "/"));
+    for (std::string& dex_file : *dex_files) {
+      size_t slash_pos = dex_file.rfind('/');
+      CHECK(OS::FileExists(dex_file.c_str())) << dex_file;
+      CHECK_NE(std::string::npos, slash_pos);
+      std::string new_location = dir + dex_file.substr(slash_pos + 1u);
+      std::ifstream src_stream(dex_file, std::ios::binary);
+      std::ofstream dst_stream(new_location, std::ios::binary);
+      dst_stream << src_stream.rdbuf();
+      dex_file = new_location;
+    }
+  }
+
+  bool CompareFiles(const std::string& filename1, const std::string& filename2) {
+    std::unique_ptr<File> file1(OS::OpenFileForReading(filename1.c_str()));
+    std::unique_ptr<File> file2(OS::OpenFileForReading(filename2.c_str()));
+    // Did we open the files?
+    if (file1 == nullptr || file2 == nullptr) {
       return false;
     }
-    return res.StandardSuccess();
+    // Are they non-empty and the same length?
+    if (file1->GetLength() <= 0 || file2->GetLength() != file1->GetLength()) {
+      return false;
+    }
+    return file1->Compare(file2.get()) == 0;
+  }
+
+  void AddAndroidRootToImageCompilerOptions() {
+    const char* android_root = getenv("ANDROID_ROOT");
+    CHECK(android_root != nullptr);
+    Runtime::Current()->image_compiler_options_.push_back(
+        "--android-root=" + std::string(android_root));
+  }
+
+  void EnableImageDex2Oat() {
+    Runtime::Current()->image_dex2oat_enabled_ = true;
+  }
+
+  void DisableImageDex2Oat() {
+    Runtime::Current()->image_dex2oat_enabled_ = false;
   }
 };
 
@@ -208,92 +182,486 @@
     // This test is too slow for target builds.
     return;
   }
-  ImageSizes base_sizes = CompileImageAndGetSizes({});
-  ImageSizes image_classes_sizes;
-  ImageSizes compiled_classes_sizes;
-  ImageSizes compiled_methods_sizes;
-  ImageSizes profile_sizes;
+  // Compile only a subset of the libcore dex files to make this test shorter.
+  std::vector<std::string> libcore_dex_files = GetLibCoreDexFileNames();
+  // The primary image must contain at least core-oj and core-libart to initialize the runtime
+  // and we also need the core-icu4j if we want to compile these with full profile.
+  ASSERT_NE(std::string::npos, libcore_dex_files[0].find("core-oj"));
+  ASSERT_NE(std::string::npos, libcore_dex_files[1].find("core-libart"));
+  ASSERT_NE(std::string::npos, libcore_dex_files[2].find("core-icu4j"));
+  ArrayRef<const std::string> dex_files =
+      ArrayRef<const std::string>(libcore_dex_files).SubArray(/*pos=*/ 0u, /*length=*/ 3u);
+
+  ImageSizes base_sizes = CompileImageAndGetSizes(dex_files, {});
+  ImageSizes everything_sizes;
+  ImageSizes filter_sizes;
   std::cout << "Base compile sizes " << base_sizes << std::endl;
-  // Test image classes
+  // Compile all methods and classes
+  std::vector<std::string> libcore_dexes = GetLibCoreDexFileNames();
+  ArrayRef<const std::string> libcore_dexes_array(libcore_dexes);
   {
-    ScratchFile classes;
-    GenerateClasses(classes.GetFile(), /*frequency*/ 1u);
-    image_classes_sizes = CompileImageAndGetSizes(
-        {"--image-classes=" + classes.GetFilename()});
-    classes.Close();
-    std::cout << "Image classes sizes " << image_classes_sizes << std::endl;
+    ScratchFile profile_file;
+    GenerateProfile(libcore_dexes_array,
+                    profile_file.GetFile(),
+                    /*method_frequency=*/ 1u,
+                    /*type_frequency=*/ 1u);
+    everything_sizes = CompileImageAndGetSizes(
+        dex_files,
+        {"--profile-file=" + profile_file.GetFilename(),
+         "--compiler-filter=speed-profile"});
+    profile_file.Close();
+    std::cout << "All methods and classes sizes " << everything_sizes << std::endl;
     // Putting all classes as image classes should increase art size
-    EXPECT_GE(image_classes_sizes.art_size, base_sizes.art_size);
+    EXPECT_GE(everything_sizes.art_size, base_sizes.art_size);
     // Sanity check that dex is the same size.
-    EXPECT_EQ(image_classes_sizes.vdex_size, base_sizes.vdex_size);
-  }
-  // Test compiled classes.
-  {
-    ScratchFile classes;
-    // Only compile every even class.
-    GenerateClasses(classes.GetFile(), /*frequency*/ 2u);
-    compiled_classes_sizes = CompileImageAndGetSizes(
-        {"--image-classes=" + classes.GetFilename()});
-    classes.Close();
-    std::cout << "Compiled classes sizes " << compiled_classes_sizes << std::endl;
-    // Art file should be smaller than image classes version since we included fewer classes in the
-    // list.
-    EXPECT_LT(compiled_classes_sizes.art_size, image_classes_sizes.art_size);
+    EXPECT_EQ(everything_sizes.vdex_size, base_sizes.vdex_size);
   }
   static size_t kMethodFrequency = 3;
   static size_t kTypeFrequency = 4;
   // Test compiling fewer methods and classes.
   {
-    ScratchFile classes;
-    // Only compile every even class.
-    GenerateClasses(classes.GetFile(), kTypeFrequency);
-    compiled_methods_sizes = CompileImageAndGetSizes(
-        {"--image-classes=" + classes.GetFilename()});
-    classes.Close();
-    std::cout << "Compiled fewer methods sizes " << compiled_methods_sizes << std::endl;
-  }
-  // Cross verify profile based image against image-classes and compiled-methods to make sure it
-  // matches.
-  {
-    ProfileCompilationInfo profile;
-    VisitLibcoreDexes([&profile](MethodReference ref) {
-      uint32_t flags = ProfileCompilationInfo::MethodHotness::kFlagHot |
-          ProfileCompilationInfo::MethodHotness::kFlagStartup;
-      EXPECT_TRUE(profile.AddMethodIndex(
-          static_cast<ProfileCompilationInfo::MethodHotness::Flag>(flags),
-          ref));
-    }, [&profile](TypeReference ref) {
-      EXPECT_TRUE(profile.AddClassForDex(ref));
-    }, kMethodFrequency, kTypeFrequency);
     ScratchFile profile_file;
-    profile.Save(profile_file.GetFile()->Fd());
-    EXPECT_EQ(profile_file.GetFile()->Flush(), 0);
-    profile_sizes = CompileImageAndGetSizes(
+    GenerateProfile(libcore_dexes_array,
+                    profile_file.GetFile(),
+                    kMethodFrequency,
+                    kTypeFrequency);
+    filter_sizes = CompileImageAndGetSizes(
+        dex_files,
         {"--profile-file=" + profile_file.GetFilename(),
          "--compiler-filter=speed-profile"});
     profile_file.Close();
-    std::cout << "Profile sizes " << profile_sizes << std::endl;
-    // Since there is some difference between profile vs image + methods due to layout, check that
-    // the range is within expected margins (+-10%).
-    const double kRatio = 0.90;
-    EXPECT_LE(profile_sizes.art_size * kRatio, compiled_methods_sizes.art_size);
-    // TODO(mathieuc): Find a reliable way to check compiled code. b/63746626
-    // EXPECT_LE(profile_sizes.oat_size * kRatio, compiled_methods_sizes.oat_size);
-    EXPECT_LE(profile_sizes.vdex_size * kRatio, compiled_methods_sizes.vdex_size);
-    EXPECT_GE(profile_sizes.art_size / kRatio, compiled_methods_sizes.art_size);
-    // TODO(mathieuc): Find a reliable way to check compiled code. b/63746626
-    // EXPECT_GE(profile_sizes.oat_size / kRatio, compiled_methods_sizes.oat_size);
-    EXPECT_GE(profile_sizes.vdex_size / kRatio, compiled_methods_sizes.vdex_size);
+    std::cout << "Fewer methods and classes sizes " << filter_sizes << std::endl;
+    EXPECT_LE(filter_sizes.art_size, everything_sizes.art_size);
+    EXPECT_LE(filter_sizes.oat_size, everything_sizes.oat_size);
+    EXPECT_LE(filter_sizes.vdex_size, everything_sizes.vdex_size);
   }
   // Test dirty image objects.
   {
     ScratchFile classes;
-    GenerateClasses(classes.GetFile(), /*frequency*/ 1u);
-    image_classes_sizes = CompileImageAndGetSizes(
+    VisitDexes(libcore_dexes_array,
+               VoidFunctor(),
+               [&](TypeReference ref) {
+      WriteLine(classes.GetFile(), ref.dex_file->PrettyType(ref.TypeIndex()));
+    }, /*method_frequency=*/ 1u, /*class_frequency=*/ 1u);
+    ImageSizes image_classes_sizes = CompileImageAndGetSizes(
+        dex_files,
         {"--dirty-image-objects=" + classes.GetFilename()});
     classes.Close();
     std::cout << "Dirty image object sizes " << image_classes_sizes << std::endl;
   }
 }
 
+TEST_F(Dex2oatImageTest, TestExtension) {
+  std::string error_msg;
+  MemMap reservation = ReserveCoreImageAddressSpace(&error_msg);
+  ASSERT_TRUE(reservation.IsValid()) << error_msg;
+
+  ScratchDir scratch;
+  const std::string& scratch_dir = scratch.GetPath();
+  std::string image_dir = scratch_dir + GetInstructionSetString(kRuntimeISA);
+  int mkdir_result = mkdir(image_dir.c_str(), 0700);
+  ASSERT_EQ(0, mkdir_result);
+  std::string filename_prefix = image_dir + "/core";
+
+  // Copy the libcore dex files to a custom dir inside `scratch_dir` so that we do not
+  // accidentally load pre-compiled core images from their original directory based on BCP paths.
+  std::string jar_dir = scratch_dir + "jars";
+  mkdir_result = mkdir(jar_dir.c_str(), 0700);
+  ASSERT_EQ(0, mkdir_result);
+  jar_dir += '/';
+  std::vector<std::string> libcore_dex_files = GetLibCoreDexFileNames();
+  CopyDexFiles(jar_dir, &libcore_dex_files);
+
+  ArrayRef<const std::string> full_bcp(libcore_dex_files);
+  size_t total_dex_files = full_bcp.size();
+  ASSERT_GE(total_dex_files, 5u);  // 3 for "head", 1 for "tail", at least one for "mid", see below.
+
+  // The primary image must contain at least core-oj and core-libart to initialize the runtime
+  // and we also need the core-icu4j if we want to compile these with full profile.
+  ASSERT_NE(std::string::npos, full_bcp[0].find("core-oj"));
+  ASSERT_NE(std::string::npos, full_bcp[1].find("core-libart"));
+  ASSERT_NE(std::string::npos, full_bcp[2].find("core-icu4j"));
+  ArrayRef<const std::string> head_dex_files = full_bcp.SubArray(/*pos=*/ 0u, /*length=*/ 3u);
+  // Middle part is everything else except for conscrypt.
+  ASSERT_NE(std::string::npos, full_bcp[full_bcp.size() - 1u].find("conscrypt"));
+  ArrayRef<const std::string> mid_bcp =
+      full_bcp.SubArray(/*pos=*/ 0u, /*length=*/ total_dex_files - 1u);
+  ArrayRef<const std::string> mid_dex_files = mid_bcp.SubArray(/*pos=*/ 3u);
+  // Tail is just the conscrypt.
+  ArrayRef<const std::string> tail_dex_files =
+      full_bcp.SubArray(/*pos=*/ total_dex_files - 1u, /*length=*/ 1u);
+
+  // Prepare the "head", "mid" and "tail" names and locations.
+  std::string base_name = "core.art";
+  std::string base_location = scratch_dir + base_name;
+  std::vector<std::string> expanded_mid = gc::space::ImageSpace::ExpandMultiImageLocations(
+      mid_dex_files.SubArray(/*pos=*/ 0u, /*length=*/ 1u),
+      base_location,
+      /*boot_image_extension=*/ true);
+  CHECK_EQ(1u, expanded_mid.size());
+  std::string mid_location = expanded_mid[0];
+  size_t mid_slash_pos = mid_location.rfind('/');
+  ASSERT_NE(std::string::npos, mid_slash_pos);
+  std::string mid_name = mid_location.substr(mid_slash_pos + 1u);
+  CHECK_EQ(1u, tail_dex_files.size());
+  std::vector<std::string> expanded_tail = gc::space::ImageSpace::ExpandMultiImageLocations(
+      tail_dex_files, base_location, /*boot_image_extension=*/ true);
+  CHECK_EQ(1u, expanded_tail.size());
+  std::string tail_location = expanded_tail[0];
+  size_t tail_slash_pos = tail_location.rfind('/');
+  ASSERT_NE(std::string::npos, tail_slash_pos);
+  std::string tail_name = tail_location.substr(tail_slash_pos + 1u);
+
+  // Create profiles.
+  ScratchFile head_profile_file;
+  GenerateProfile(head_dex_files,
+                  head_profile_file.GetFile(),
+                  /*method_frequency=*/ 1u,
+                  /*type_frequency=*/ 1u);
+  const std::string& head_profile_filename = head_profile_file.GetFilename();
+  ScratchFile mid_profile_file;
+  GenerateProfile(mid_dex_files,
+                  mid_profile_file.GetFile(),
+                  /*method_frequency=*/ 5u,
+                  /*type_frequency=*/ 4u);
+  const std::string& mid_profile_filename = mid_profile_file.GetFilename();
+  ScratchFile tail_profile_file;
+  GenerateProfile(tail_dex_files,
+                  tail_profile_file.GetFile(),
+                  /*method_frequency=*/ 5u,
+                  /*type_frequency=*/ 4u);
+  const std::string& tail_profile_filename = tail_profile_file.GetFilename();
+
+  // Compile the "head", i.e. the primary boot image.
+  std::vector<std::string> extra_args;
+  extra_args.push_back("--profile-file=" + head_profile_filename);
+  extra_args.push_back(android::base::StringPrintf("--base=0x%08x", kBaseAddress));
+  bool head_ok = CompileBootImage(extra_args, filename_prefix, head_dex_files, &error_msg);
+  ASSERT_TRUE(head_ok) << error_msg;
+
+  // Compile the "mid", i.e. the first extension.
+  std::string mid_bcp_string = android::base::Join(mid_bcp, ':');
+  extra_args.clear();
+  extra_args.push_back("--profile-file=" + mid_profile_filename);
+  AddRuntimeArg(extra_args, "-Xbootclasspath:" + mid_bcp_string);
+  AddRuntimeArg(extra_args, "-Xbootclasspath-locations:" + mid_bcp_string);
+  extra_args.push_back("--boot-image=" + base_location);
+  bool mid_ok = CompileBootImage(extra_args, filename_prefix, mid_dex_files, &error_msg);
+  ASSERT_TRUE(mid_ok) << error_msg;
+
+  // Try to compile the "tail" without specifying the "mid" extension. This shall fail.
+  extra_args.clear();
+  extra_args.push_back("--profile-file=" + tail_profile_filename);
+  std::string full_bcp_string = android::base::Join(full_bcp, ':');
+  AddRuntimeArg(extra_args, "-Xbootclasspath:" + full_bcp_string);
+  AddRuntimeArg(extra_args, "-Xbootclasspath-locations:" + full_bcp_string);
+  extra_args.push_back("--boot-image=" + base_location);
+  bool tail_ok = CompileBootImage(extra_args, filename_prefix, tail_dex_files, &error_msg);
+  ASSERT_FALSE(tail_ok) << error_msg;
+
+  // Now compile the tail against both "head" and "mid".
+  CHECK(StartsWith(extra_args.back(), "--boot-image="));
+  extra_args.back() = "--boot-image=" + base_location + ':' + mid_location;
+  tail_ok = CompileBootImage(extra_args, filename_prefix, tail_dex_files, &error_msg);
+  ASSERT_TRUE(tail_ok) << error_msg;
+
+  // Prepare directory for the single-image test that squashes the "mid" and "tail".
+  std::string single_dir = scratch_dir + "single";
+  mkdir_result = mkdir(single_dir.c_str(), 0700);
+  ASSERT_EQ(0, mkdir_result);
+  single_dir += '/';
+  std::string single_image_dir = single_dir + GetInstructionSetString(kRuntimeISA);
+  mkdir_result = mkdir(single_image_dir.c_str(), 0700);
+  ASSERT_EQ(0, mkdir_result);
+  std::string single_filename_prefix = single_image_dir + "/core";
+
+  // The dex files for the single-image are everything not in the "head".
+  ArrayRef<const std::string> single_dex_files = full_bcp.SubArray(/*pos=*/ head_dex_files.size());
+
+  // Create a smaller profile for the single-image test that squashes the "mid" and "tail".
+  ScratchFile single_profile_file;
+  GenerateProfile(single_dex_files,
+                  single_profile_file.GetFile(),
+                  /*method_frequency=*/ 5u,
+                  /*type_frequency=*/ 4u);
+  const std::string& single_profile_filename = single_profile_file.GetFilename();
+
+  // Prepare the single image name and location.
+  CHECK_GE(single_dex_files.size(), 2u);
+  std::string single_base_location = single_dir + base_name;
+  std::vector<std::string> expanded_single = gc::space::ImageSpace::ExpandMultiImageLocations(
+      single_dex_files.SubArray(/*pos=*/ 0u, /*length=*/ 1u),
+      single_base_location,
+      /*boot_image_extension=*/ true);
+  CHECK_EQ(1u, expanded_single.size());
+  std::string single_location = expanded_single[0];
+  size_t single_slash_pos = single_location.rfind('/');
+  ASSERT_NE(std::string::npos, single_slash_pos);
+  std::string single_name = single_location.substr(single_slash_pos + 1u);
+  CHECK_EQ(single_name, mid_name);
+
+  // Compile the single-image against the primary boot image.
+  extra_args.clear();
+  extra_args.push_back("--profile-file=" + single_profile_filename);
+  AddRuntimeArg(extra_args, "-Xbootclasspath:" + full_bcp_string);
+  AddRuntimeArg(extra_args, "-Xbootclasspath-locations:" + full_bcp_string);
+  extra_args.push_back("--boot-image=" + base_location);
+  extra_args.push_back("--single-image");
+  extra_args.push_back("--avoid-storing-invocation");  // For comparison below.
+  error_msg.clear();
+  bool single_ok =
+      CompileBootImage(extra_args, single_filename_prefix, single_dex_files, &error_msg);
+  ASSERT_TRUE(single_ok) << error_msg;
+
+  reservation = MemMap::Invalid();  // Free the reserved memory for loading images.
+
+  // Try to load the boot image with different image locations.
+  std::vector<std::string> boot_class_path = libcore_dex_files;
+  std::vector<std::unique_ptr<gc::space::ImageSpace>> boot_image_spaces;
+  bool relocate = false;
+  MemMap extra_reservation;
+  auto load = [&](const std::string& image_location) {
+    boot_image_spaces.clear();
+    extra_reservation = MemMap::Invalid();
+    ScopedObjectAccess soa(Thread::Current());
+    return gc::space::ImageSpace::LoadBootImage(/*boot_class_path=*/ boot_class_path,
+                                                /*boot_class_path_locations=*/ libcore_dex_files,
+                                                image_location,
+                                                kRuntimeISA,
+                                                gc::space::ImageSpaceLoadingOrder::kSystemFirst,
+                                                relocate,
+                                                /*executable=*/ true,
+                                                /*is_zygote=*/ false,
+                                                /*extra_reservation_size=*/ 0u,
+                                                &boot_image_spaces,
+                                                &extra_reservation);
+  };
+  auto silent_load = [&](const std::string& image_location) {
+    ScopedLogSeverity quiet(LogSeverity::FATAL);
+    return load(image_location);
+  };
+
+  for (bool r : { false, true }) {
+    relocate = r;
+
+    // Load primary image with full path.
+    bool load_ok = load(base_location);
+    ASSERT_TRUE(load_ok) << error_msg;
+    ASSERT_FALSE(extra_reservation.IsValid());
+    ASSERT_EQ(head_dex_files.size(), boot_image_spaces.size());
+
+    // Fail to load primary image with just the name.
+    load_ok = silent_load(base_name);
+    ASSERT_FALSE(load_ok);
+
+    // Fail to load primary image with a search path.
+    load_ok = silent_load("*");
+    ASSERT_FALSE(load_ok);
+    load_ok = silent_load(scratch_dir + "*");
+    ASSERT_FALSE(load_ok);
+
+    // Load the primary and first extension with full path.
+    load_ok = load(base_location + ':' + mid_location);
+    ASSERT_TRUE(load_ok) << error_msg;
+    ASSERT_EQ(mid_bcp.size(), boot_image_spaces.size());
+
+    // Load the primary with full path and fail to load first extension without full path.
+    load_ok = load(base_location + ':' + mid_name);
+    ASSERT_TRUE(load_ok) << error_msg;  // Primary image loaded successfully.
+    ASSERT_EQ(head_dex_files.size(), boot_image_spaces.size());  // But only the primary image.
+
+    // Load all the libcore images with full paths.
+    load_ok = load(base_location + ':' + mid_location + ':' + tail_location);
+    ASSERT_TRUE(load_ok) << error_msg;
+    ASSERT_EQ(full_bcp.size(), boot_image_spaces.size());
+
+    // Load the primary and first extension with full paths, fail to load second extension by name.
+    load_ok = load(base_location + ':' + mid_location + ':' + tail_name);
+    ASSERT_TRUE(load_ok) << error_msg;
+    ASSERT_EQ(mid_bcp.size(), boot_image_spaces.size());
+
+    // Load the primary with full path and fail to load first extension without full path,
+    // fail to load second extension because it depends on the first.
+    load_ok = load(base_location + ':' + mid_name + ':' + tail_location);
+    ASSERT_TRUE(load_ok) << error_msg;  // Primary image loaded successfully.
+    ASSERT_EQ(head_dex_files.size(), boot_image_spaces.size());  // But only the primary image.
+
+    // Load the primary with full path and extensions with a specified search path.
+    load_ok = load(base_location + ':' + scratch_dir + '*');
+    ASSERT_TRUE(load_ok) << error_msg;
+    ASSERT_EQ(full_bcp.size(), boot_image_spaces.size());
+
+    // Load the primary with full path and fail to find extensions in BCP path.
+    load_ok = load(base_location + ":*");
+    ASSERT_TRUE(load_ok) << error_msg;
+    ASSERT_EQ(head_dex_files.size(), boot_image_spaces.size());
+  }
+
+  // Now copy the libcore dex files to the `scratch_dir` and retry loading the boot image
+  // with BCP in the scratch_dir so that the images can be found based on BCP paths.
+  CopyDexFiles(scratch_dir, &boot_class_path);
+
+  for (bool r : { false, true }) {
+    relocate = r;
+
+    // Loading the primary image with just the name now succeeds.
+    bool load_ok = load(base_name);
+    ASSERT_TRUE(load_ok) << error_msg;
+    ASSERT_EQ(head_dex_files.size(), boot_image_spaces.size());
+
+    // Loading the primary image with a search path still fails.
+    load_ok = silent_load("*");
+    ASSERT_FALSE(load_ok);
+    load_ok = silent_load(scratch_dir + "*");
+    ASSERT_FALSE(load_ok);
+
+    // Load the primary and first extension without paths.
+    load_ok = load(base_name + ':' + mid_name);
+    ASSERT_TRUE(load_ok) << error_msg;
+    ASSERT_EQ(mid_bcp.size(), boot_image_spaces.size());
+
+    // Load the primary without path and first extension with path.
+    load_ok = load(base_name + ':' + mid_location);
+    ASSERT_TRUE(load_ok) << error_msg;
+    ASSERT_EQ(mid_bcp.size(), boot_image_spaces.size());
+
+    // Load the primary with full path and the first extension without full path.
+    load_ok = load(base_location + ':' + mid_name);
+    ASSERT_TRUE(load_ok) << error_msg;  // Loaded successfully.
+    ASSERT_EQ(mid_bcp.size(), boot_image_spaces.size());  // Including the extension.
+
+    // Load all the libcore images without paths.
+    load_ok = load(base_name + ':' + mid_name + ':' + tail_name);
+    ASSERT_TRUE(load_ok) << error_msg;
+    ASSERT_EQ(full_bcp.size(), boot_image_spaces.size());
+
+    // Load the primary and first extension with full paths and second extension by name.
+    load_ok = load(base_location + ':' + mid_location + ':' + tail_name);
+    ASSERT_TRUE(load_ok) << error_msg;
+    ASSERT_EQ(full_bcp.size(), boot_image_spaces.size());
+
+    // Load the primary with full path, first extension without path,
+    // and second extension with full path.
+    load_ok = load(base_location + ':' + mid_name + ':' + tail_location);
+    ASSERT_TRUE(load_ok) << error_msg;  // Loaded successfully.
+    ASSERT_EQ(full_bcp.size(), boot_image_spaces.size());  // Including both extensions.
+
+    // Load the primary with full path and find both extensions in BCP path.
+    load_ok = load(base_location + ":*");
+    ASSERT_TRUE(load_ok) << error_msg;
+    ASSERT_EQ(full_bcp.size(), boot_image_spaces.size());
+
+    // Fail to load any images with invalid image locations (named component after search paths).
+    load_ok = silent_load(base_location + ":*:" + tail_location);
+    ASSERT_FALSE(load_ok);
+    load_ok = silent_load(base_location + ':' + scratch_dir + "*:" + tail_location);
+    ASSERT_FALSE(load_ok);
+
+    // Load the primary and single-image extension with full path.
+    load_ok = load(base_location + ':' + single_location);
+    ASSERT_TRUE(load_ok) << error_msg;
+    ASSERT_EQ(head_dex_files.size() + 1u, boot_image_spaces.size());
+
+    // Load the primary with full path and single-image extension with a specified search path.
+    load_ok = load(base_location + ':' + single_dir + '*');
+    ASSERT_TRUE(load_ok) << error_msg;
+    ASSERT_EQ(head_dex_files.size() + 1u, boot_image_spaces.size());
+  }
+
+  // Recompile the single-image extension using file descriptors and compare contents.
+  std::vector<std::string> expanded_single_filename_prefix =
+      gc::space::ImageSpace::ExpandMultiImageLocations(
+          single_dex_files.SubArray(/*pos=*/ 0u, /*length=*/ 1u),
+          single_filename_prefix,
+          /*boot_image_extension=*/ true);
+  CHECK_EQ(1u, expanded_single_filename_prefix.size());
+  std::string single_ext_prefix = expanded_single_filename_prefix[0];
+  std::string single_ext_prefix2 = single_ext_prefix + "2";
+  error_msg.clear();
+  single_ok = CompileBootImage(extra_args,
+                               single_filename_prefix,
+                               single_dex_files,
+                               &error_msg,
+                               /*use_fd_prefix=*/ single_ext_prefix2);
+  ASSERT_TRUE(single_ok) << error_msg;
+  EXPECT_TRUE(CompareFiles(single_ext_prefix + ".art", single_ext_prefix2 + ".art"));
+  EXPECT_TRUE(CompareFiles(single_ext_prefix + ".vdex", single_ext_prefix2 + ".vdex"));
+  EXPECT_TRUE(CompareFiles(single_ext_prefix + ".oat", single_ext_prefix2 + ".oat"));
+
+  // Test parsing profile specification and creating the boot image extension on-the-fly.
+  // We must set --android-root in the image compiler options.
+  AddAndroidRootToImageCompilerOptions();
+  for (bool r : { false, true }) {
+    relocate = r;
+
+    // Try and fail to load everything as compiled extension.
+    bool load_ok = silent_load(base_location + "!" + single_profile_filename);
+    ASSERT_FALSE(load_ok);
+
+    // Try and fail to load with invalid spec, two profile name separators.
+    load_ok = silent_load(base_location + ":" + single_location + "!!arbitrary-profile-name");
+    ASSERT_FALSE(load_ok);
+
+    // Try and fail to load with invalid spec, missing profile name.
+    load_ok = silent_load(base_location + ":" + single_location + "!");
+    ASSERT_FALSE(load_ok);
+
+    // Try and fail to load with invalid spec, missing component name.
+    load_ok = silent_load(base_location + ":!" + single_profile_filename);
+    ASSERT_FALSE(load_ok);
+
+    // Load primary boot image, specifying invalid extension component and profile name.
+    load_ok = load(base_location + ":/non-existent/" + single_name + "!non-existent-profile-name");
+    ASSERT_TRUE(load_ok) << error_msg;
+    ASSERT_EQ(head_dex_files.size(), boot_image_spaces.size());
+
+    // Load primary boot image and the single extension, specifying invalid profile name.
+    // (Load extension from file.)
+    load_ok = load(base_location + ":" + single_location + "!non-existent-profile-name");
+    ASSERT_TRUE(load_ok) << error_msg;
+    ASSERT_EQ(head_dex_files.size() + 1u, boot_image_spaces.size());
+    ASSERT_EQ(single_dex_files.size(),
+              boot_image_spaces.back()->GetImageHeader().GetComponentCount());
+
+    // Load primary boot image and fail to load the single extension, specifying
+    // invalid extension component name but a valid profile file.
+    // (Running dex2oat to compile extension is disabled.)
+    ASSERT_FALSE(Runtime::Current()->IsImageDex2OatEnabled());
+    load_ok = load(base_location + ":/non-existent/" + single_name + "!" + single_profile_filename);
+    ASSERT_TRUE(load_ok) << error_msg;
+    ASSERT_EQ(head_dex_files.size(), boot_image_spaces.size());
+
+    EnableImageDex2Oat();
+
+    // Load primary boot image and the single extension, specifying invalid extension
+    // component name but a valid profile file. (Compile extension by running dex2oat.)
+    load_ok = load(base_location + ":/non-existent/" + single_name + "!" + single_profile_filename);
+    ASSERT_TRUE(load_ok) << error_msg;
+    ASSERT_EQ(head_dex_files.size() + 1u, boot_image_spaces.size());
+    ASSERT_EQ(single_dex_files.size(),
+              boot_image_spaces.back()->GetImageHeader().GetComponentCount());
+
+    // Load primary boot image and two extensions, specifying invalid extension component
+    // names but valid profile files. (Compile extensions by running dex2oat.)
+    load_ok = load(base_location + ":/non-existent/" + mid_name + "!" + mid_profile_filename
+                                 + ":/non-existent/" + tail_name + "!" + tail_profile_filename);
+    ASSERT_TRUE(load_ok) << error_msg;
+    ASSERT_EQ(head_dex_files.size() + 2u, boot_image_spaces.size());
+    ASSERT_EQ(mid_dex_files.size(),
+              boot_image_spaces[head_dex_files.size()]->GetImageHeader().GetComponentCount());
+    ASSERT_EQ(tail_dex_files.size(),
+              boot_image_spaces[head_dex_files.size() + 1u]->GetImageHeader().GetComponentCount());
+
+    // Load primary boot image and fail to load extensions, specifying invalid component
+    // names but valid profile file only for the second one. As we fail to load the first
+    // extension, the second extension has a missing dependency and cannot be compiled.
+    load_ok = load(base_location + ":/non-existent/" + mid_name
+                                 + ":/non-existent/" + tail_name + "!" + tail_profile_filename);
+    ASSERT_TRUE(load_ok) << error_msg;
+    ASSERT_EQ(head_dex_files.size(), boot_image_spaces.size());
+
+    DisableImageDex2Oat();
+  }
+}
+
 }  // namespace art
diff --git a/dex2oat/dex2oat_options.cc b/dex2oat/dex2oat_options.cc
index 80c9a16..ddfc84b 100644
--- a/dex2oat/dex2oat_options.cc
+++ b/dex2oat/dex2oat_options.cc
@@ -93,10 +93,10 @@
           .WithType<std::string>()
           .IntoKey(M::DmFile)
       .Define("--oat-file=_")
-          .WithType<std::vector<std::string>>().AppendValues()
-          .IntoKey(M::OatFiles)
+          .WithType<std::string>()
+          .IntoKey(M::OatFile)
       .Define("--oat-symbols=_")
-          .WithType<std::vector<std::string>>().AppendValues()
+          .WithType<std::string>()
           .IntoKey(M::OatSymbols)
       .Define("--strip")
           .IntoKey(M::Strip)
@@ -111,14 +111,11 @@
 static void AddImageMappings(Builder& builder) {
   builder.
       Define("--image=_")
-          .WithType<std::vector<std::string>>().AppendValues()
-          .IntoKey(M::ImageFilenames)
-      .Define("--image-classes=_")
           .WithType<std::string>()
-          .IntoKey(M::ImageClasses)
-      .Define("--image-classes-zip=_")
-          .WithType<std::string>()
-          .IntoKey(M::ImageClassesZip)
+          .IntoKey(M::ImageFilename)
+      .Define("--image-fd=_")
+          .WithType<int>()
+          .IntoKey(M::ImageFd)
       .Define("--base=_")
           .WithType<std::string>()
           .IntoKey(M::Base)
@@ -128,11 +125,15 @@
       .Define("--app-image-fd=_")
           .WithType<int>()
           .IntoKey(M::AppImageFileFd)
-      .Define("--multi-image")
+      .Define({"--multi-image", "--single-image"})
+          .WithValues({true, false})
           .IntoKey(M::MultiImage)
       .Define("--dirty-image-objects=_")
           .WithType<std::string>()
           .IntoKey(M::DirtyImageObjects)
+      .Define("--updatable-bcp-packages-file=_")
+          .WithType<std::string>()
+          .IntoKey(M::UpdatableBcpPackagesFile)
       .Define("--image-format=_")
           .WithType<ImageHeader::StorageMode>()
           .WithValueMap({{"lz4", ImageHeader::kStorageModeLZ4},
diff --git a/dex2oat/dex2oat_options.def b/dex2oat/dex2oat_options.def
index 8201c3c..dbb7c9e 100644
--- a/dex2oat/dex2oat_options.def
+++ b/dex2oat/dex2oat_options.def
@@ -45,8 +45,8 @@
 DEX2OAT_OPTIONS_KEY (std::string,                    OutputVdex)
 DEX2OAT_OPTIONS_KEY (int,                            DmFd)
 DEX2OAT_OPTIONS_KEY (std::string,                    DmFile)
-DEX2OAT_OPTIONS_KEY (std::vector<std::string>,       OatFiles)
-DEX2OAT_OPTIONS_KEY (std::vector<std::string>,       OatSymbols)
+DEX2OAT_OPTIONS_KEY (std::string,                    OatFile)
+DEX2OAT_OPTIONS_KEY (std::string,                    OatSymbols)
 DEX2OAT_OPTIONS_KEY (Unit,                           Strip)
 DEX2OAT_OPTIONS_KEY (int,                            OatFd)
 DEX2OAT_OPTIONS_KEY (std::string,                    OatLocation)
@@ -54,9 +54,8 @@
 DEX2OAT_OPTIONS_KEY (int,                            WatchdogTimeout)
 DEX2OAT_OPTIONS_KEY (unsigned int,                   Threads)
 DEX2OAT_OPTIONS_KEY (std::vector<std::int32_t>,      CpuSet)
-DEX2OAT_OPTIONS_KEY (std::vector<std::string>,       ImageFilenames)
-DEX2OAT_OPTIONS_KEY (std::string,                    ImageClasses)
-DEX2OAT_OPTIONS_KEY (std::string,                    ImageClassesZip)
+DEX2OAT_OPTIONS_KEY (std::string,                    ImageFilename)
+DEX2OAT_OPTIONS_KEY (int,                            ImageFd)
 DEX2OAT_OPTIONS_KEY (ImageHeader::StorageMode,       ImageFormat)
 DEX2OAT_OPTIONS_KEY (std::string,                    Passes)
 DEX2OAT_OPTIONS_KEY (std::string,                    Base)  // TODO: Hex string parsing.
@@ -81,7 +80,7 @@
 DEX2OAT_OPTIONS_KEY (unsigned int,                   VeryLargeAppThreshold)
 DEX2OAT_OPTIONS_KEY (std::string,                    AppImageFile)
 DEX2OAT_OPTIONS_KEY (int,                            AppImageFileFd)
-DEX2OAT_OPTIONS_KEY (Unit,                           MultiImage)
+DEX2OAT_OPTIONS_KEY (bool,                           MultiImage)
 DEX2OAT_OPTIONS_KEY (std::string,                    NoInlineFrom)
 DEX2OAT_OPTIONS_KEY (Unit,                           ForceDeterminism)
 DEX2OAT_OPTIONS_KEY (std::string,                    ClasspathDir)
@@ -90,6 +89,7 @@
 DEX2OAT_OPTIONS_KEY (std::string,                    ClassLoaderContextFds)
 DEX2OAT_OPTIONS_KEY (std::string,                    StoredClassLoaderContext)
 DEX2OAT_OPTIONS_KEY (std::string,                    DirtyImageObjects)
+DEX2OAT_OPTIONS_KEY (std::string,                    UpdatableBcpPackagesFile)
 DEX2OAT_OPTIONS_KEY (std::vector<std::string>,       RuntimeOptions)
 DEX2OAT_OPTIONS_KEY (std::string,                    CompilationReason)
 
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index ca06b11..34c8c5e 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -32,7 +32,9 @@
 #include "arch/instruction_set_features.h"
 #include "base/macros.h"
 #include "base/mutex-inl.h"
+#include "base/string_view_cpp20.h"
 #include "base/utils.h"
+#include "base/zip_archive.h"
 #include "dex/art_dex_file_loader.h"
 #include "dex/base64_test_util.h"
 #include "dex/bytecode_utils.h"
@@ -52,7 +54,6 @@
 
 namespace art {
 
-static constexpr size_t kMaxMethodIds = 65535;
 static constexpr bool kDebugArgs = false;
 static const char* kDisableCompactDex = "--compact-dex-level=none";
 
@@ -112,13 +113,15 @@
       CompilerFilter::Filter filter,
       const std::vector<std::string>& extra_args = {},
       bool expect_success = true,
-      bool use_fd = false) WARN_UNUSED {
+      bool use_fd = false,
+      bool use_zip_fd = false) WARN_UNUSED {
     return GenerateOdexForTest(dex_location,
                                odex_location,
                                filter,
                                extra_args,
                                expect_success,
                                use_fd,
+                               use_zip_fd,
                                [](const OatFile&) {});
   }
 
@@ -132,9 +135,22 @@
       const std::vector<std::string>& extra_args,
       bool expect_success,
       bool use_fd,
+      bool use_zip_fd,
       T check_oat) WARN_UNUSED {
+    std::vector<std::string> dex_locations;
+    if (use_zip_fd) {
+      std::string loc_arg = "--zip-location=" + dex_location;
+      CHECK(std::any_of(extra_args.begin(),
+                        extra_args.end(),
+                        [&](const std::string& s) { return s == loc_arg; }));
+      CHECK(std::any_of(extra_args.begin(),
+                        extra_args.end(),
+                        [](const std::string& s) { return StartsWith(s, "--zip-fd="); }));
+    } else {
+      dex_locations.push_back(dex_location);
+    }
     std::string error_msg;
-    int status = GenerateOdexForTestWithStatus({dex_location},
+    int status = GenerateOdexForTestWithStatus(dex_locations,
                                                odex_location,
                                                filter,
                                                &error_msg,
@@ -153,8 +169,7 @@
                                                        odex_location.c_str(),
                                                        /*executable=*/ false,
                                                        /*low_4gb=*/ false,
-                                                       dex_location.c_str(),
-                                                       /*reservation=*/ nullptr,
+                                                       dex_location,
                                                        &error_msg));
       if (odex_file == nullptr) {
         return ::testing::AssertionFailure() << "Could not open odex file: " << error_msg;
@@ -176,8 +191,7 @@
                                                          odex_location.c_str(),
                                                          /*executable=*/ false,
                                                          /*low_4gb=*/ false,
-                                                         dex_location.c_str(),
-                                                         /*reservation=*/ nullptr,
+                                                         dex_location,
                                                          &error_msg));
         if (odex_file != nullptr) {
           return ::testing::AssertionFailure() << "Could open odex file: " << error_msg;
@@ -445,9 +459,10 @@
   TEST_DISABLED_FOR_MEMORY_TOOL();
 
   // The `native_alloc_2_ >= native_alloc_1_` assertion below may not
-  // hold true on some x86 systems; disable this test while we
+  // hold true on some x86 or x86_64 systems; disable this test while we
   // investigate (b/29259363).
   TEST_DISABLED_FOR_X86();
+  TEST_DISABLED_FOR_X86_64();
 
   RunTest(/*use_fd=*/ false,
           /*expect_use=*/ false);
@@ -518,8 +533,7 @@
                                                      odex_location.c_str(),
                                                      /*executable=*/ false,
                                                      /*low_4gb=*/ false,
-                                                     dex_location.c_str(),
-                                                     /*reservation=*/ nullptr,
+                                                     dex_location,
                                                      &error_msg));
     ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
     EXPECT_GT(app_image_file.length(), 0u);
@@ -630,21 +644,22 @@
     // Ignore, we'll do our own checks.
   }
 
-  // Emits a profile with a single dex file with the given location and a single class index of 1.
+  // Emits a profile with a single dex file with the given location and classes ranging
+  // from 0 to num_classes.
   void GenerateProfile(const std::string& test_profile,
-                       const std::string& dex_location,
-                       size_t num_classes,
-                       uint32_t checksum) {
+                       const DexFile* dex,
+                       size_t num_classes) {
     int profile_test_fd = open(test_profile.c_str(),
                                O_CREAT | O_TRUNC | O_WRONLY | O_CLOEXEC,
                                0644);
     CHECK_GE(profile_test_fd, 0);
 
     ProfileCompilationInfo info;
-    std::string profile_key = ProfileCompilationInfo::GetProfileDexFileKey(dex_location);
+    std::vector<dex::TypeIndex> classes;;
     for (size_t i = 0; i < num_classes; ++i) {
-      info.AddClassIndex(profile_key, checksum, dex::TypeIndex(1 + i), kMaxMethodIds);
+      classes.push_back(dex::TypeIndex(1 + i));
     }
+    info.AddClassesForDex(dex, classes.begin(), classes.end());
     bool result = info.Save(profile_test_fd);
     close(profile_test_fd);
     ASSERT_TRUE(result);
@@ -666,10 +681,7 @@
         location, location, /*verify=*/ true, /*verify_checksum=*/ true, &error_msg, &dex_files));
     EXPECT_EQ(dex_files.size(), 1U);
     std::unique_ptr<const DexFile>& dex_file = dex_files[0];
-    GenerateProfile(profile_location,
-                    dex_location,
-                    num_profile_classes,
-                    dex_file->GetLocationChecksum());
+    GenerateProfile(profile_location, dex_file.get(), num_profile_classes);
     std::vector<std::string> copy(extra_args);
     copy.push_back("--profile-file=" + profile_location);
     std::unique_ptr<File> app_image_file;
@@ -790,8 +802,7 @@
                                                      odex_location.c_str(),
                                                      /*executable=*/ false,
                                                      /*low_4gb=*/ false,
-                                                     dex_location.c_str(),
-                                                     /*reservation=*/ nullptr,
+                                                     dex_location,
                                                      &error_msg));
     ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
 
@@ -885,6 +896,9 @@
                                       /* use_fd= */ true));
       EXPECT_GT(vdex_file1->GetLength(), 0u);
     }
+    // Get the dex file checksums.
+    std::vector<uint32_t> checksums1;
+    GetDexFileChecksums(dex_location, odex_location, &checksums1);
     // Unquicken by running the verify compiler filter on the vdex file.
     {
       std::string input_vdex = StringPrintf("--input-vdex-fd=%d", vdex_file1->Fd());
@@ -898,6 +912,13 @@
     }
     ASSERT_EQ(vdex_file1->FlushCloseOrErase(), 0) << "Could not flush and close vdex file";
     CheckResult(dex_location, odex_location);
+    // Verify that the checksums did not change.
+    std::vector<uint32_t> checksums2;
+    GetDexFileChecksums(dex_location, odex_location, &checksums2);
+    ASSERT_EQ(checksums1.size(), checksums2.size());
+    for (size_t i = 0; i != checksums1.size(); ++i) {
+      EXPECT_EQ(checksums1[i], checksums2[i]) << i;
+    }
     ASSERT_TRUE(success_);
   }
 
@@ -926,7 +947,6 @@
                                       /* use_fd= */ true));
       EXPECT_GT(vdex_file1->GetLength(), 0u);
     }
-
     // Unquicken by running the verify compiler filter on the vdex file.
     {
       std::string input_vdex = StringPrintf("--input-vdex-fd=%d", vdex_file1->Fd());
@@ -951,8 +971,7 @@
                                                      odex_location.c_str(),
                                                      /*executable=*/ false,
                                                      /*low_4gb=*/ false,
-                                                     dex_location.c_str(),
-                                                     /*reservation=*/ nullptr,
+                                                     dex_location,
                                                      &error_msg));
     ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
     ASSERT_GE(odex_file->GetOatDexFiles().size(), 1u);
@@ -969,6 +988,24 @@
       }
     }
   }
+
+  void GetDexFileChecksums(const std::string& dex_location,
+                           const std::string& odex_location,
+                           /*out*/std::vector<uint32_t>* checksums) {
+    std::string error_msg;
+    std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
+                                                     odex_location.c_str(),
+                                                     odex_location.c_str(),
+                                                     /*executable=*/ false,
+                                                     /*low_4gb=*/ false,
+                                                     dex_location,
+                                                     &error_msg));
+    ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
+    ASSERT_GE(odex_file->GetOatDexFiles().size(), 1u);
+    for (const OatDexFile* oat_dex_file : odex_file->GetOatDexFiles()) {
+      checksums->push_back(oat_dex_file->GetDexFileLocationChecksum());
+    }
+  }
 };
 
 TEST_F(Dex2oatUnquickenTest, UnquickenMultiDex) {
@@ -1013,7 +1050,7 @@
 }
 
 TEST_F(Dex2oatWatchdogTest, TestWatchdogTrigger) {
-  // This test is frequently interrupted by timeout_dumper on host (x86);
+  // This test is frequently interrupted by signal_dumper on host (x86);
   // disable it while we investigate (b/121352534).
   TEST_DISABLED_FOR_X86();
 
@@ -1085,7 +1122,8 @@
                                     CompilerFilter::kQuicken,
                                     extra_args,
                                     expected_success,
-                                    /*use_fd*/ false,
+                                    /*use_fd=*/ false,
+                                    /*use_zip_fd=*/ false,
                                     check_oat));
   }
 
@@ -1262,12 +1300,6 @@
 class Dex2oatDeterminism : public Dex2oatTest {};
 
 TEST_F(Dex2oatDeterminism, UnloadCompile) {
-  if (!kUseReadBarrier &&
-      gc::kCollectorTypeDefault != gc::kCollectorTypeCMS &&
-      gc::kCollectorTypeDefault != gc::kCollectorTypeMS) {
-    LOG(INFO) << "Test requires determinism support.";
-    return;
-  }
   Runtime* const runtime = Runtime::Current();
   std::string out_dir = GetScratchDir();
   const std::string base_oat_name = out_dir + "/base.oat";
@@ -1402,8 +1434,7 @@
                                                    oat_filename.c_str(),
                                                    /*executable=*/ false,
                                                    /*low_4gb=*/ false,
-                                                   dex->GetLocation().c_str(),
-                                                   /*reservation=*/ nullptr,
+                                                   dex->GetLocation(),
                                                    &error_msg));
   ASSERT_TRUE(odex_file != nullptr);
   std::vector<const OatDexFile*> oat_dex_files = odex_file->GetOatDexFiles();
@@ -1511,8 +1542,7 @@
                                                    oat_filename.c_str(),
                                                    /*executable=*/ false,
                                                    /*low_4gb=*/ false,
-                                                   dex_location.c_str(),
-                                                   /*reservation=*/ nullptr,
+                                                   dex_location,
                                                    &error_msg));
   ASSERT_TRUE(odex_file != nullptr);
   std::vector<const OatDexFile*> oat_dex_files = odex_file->GetOatDexFiles();
@@ -1611,8 +1641,9 @@
                                   base_oat_name,
                                   CompilerFilter::Filter::kSpeed,
                                   { "--deduplicate-code=false" },
-                                  true,  // expect_success
-                                  false,  // use_fd
+                                  /*expect_success=*/ true,
+                                  /*use_fd=*/ false,
+                                  /*use_zip_fd=*/ false,
                                   [&no_dedupe_size](const OatFile& o) {
                                     no_dedupe_size = o.Size();
                                   }));
@@ -1622,8 +1653,9 @@
                                   base_oat_name,
                                   CompilerFilter::Filter::kSpeed,
                                   { "--deduplicate-code=true" },
-                                  true,  // expect_success
-                                  false,  // use_fd
+                                  /*expect_success=*/ true,
+                                  /*use_fd=*/ false,
+                                  /*use_zip_fd=*/ false,
                                   [&dedupe_size](const OatFile& o) {
                                     dedupe_size = o.Size();
                                   }));
@@ -1632,15 +1664,16 @@
 }
 
 TEST_F(Dex2oatTest, UncompressedTest) {
-  std::unique_ptr<const DexFile> dex(OpenTestDexFile("MainUncompressed"));
+  std::unique_ptr<const DexFile> dex(OpenTestDexFile("MainUncompressedAligned"));
   std::string out_dir = GetScratchDir();
   const std::string base_oat_name = out_dir + "/base.oat";
   ASSERT_TRUE(GenerateOdexForTest(dex->GetLocation(),
                                   base_oat_name,
                                   CompilerFilter::Filter::kQuicken,
                                   { },
-                                  true,  // expect_success
-                                  false,  // use_fd
+                                  /*expect_success=*/ true,
+                                  /*use_fd=*/ false,
+                                  /*use_zip_fd=*/ false,
                                   [](const OatFile& o) {
                                     CHECK(!o.ContainsDexCode());
                                   }));
@@ -1759,8 +1792,9 @@
                                   oat_filename,
                                   CompilerFilter::Filter::kVerify,
                                   { },
-                                  true,  // expect_success
-                                  false,  // use_fd
+                                  /*expect_success=*/ true,
+                                  /*use_fd=*/ false,
+                                  /*use_zip_fd=*/ false,
                                   [](const OatFile& o) {
                                     CHECK(o.ContainsDexCode());
                                   }));
@@ -1771,8 +1805,7 @@
                                                    oat_filename.c_str(),
                                                    /*executable=*/ false,
                                                    /*low_4gb=*/ false,
-                                                   temp_dex.GetFilename().c_str(),
-                                                   /*reservation=*/ nullptr,
+                                                   temp_dex.GetFilename(),
                                                    &error_msg));
   ASSERT_TRUE(odex_file != nullptr);
   std::vector<const OatDexFile*> oat_dex_files = odex_file->GetOatDexFiles();
@@ -1848,8 +1881,7 @@
                                                    odex_location.c_str(),
                                                    /*executable=*/ false,
                                                    /*low_4gb=*/ false,
-                                                   dex_location.c_str(),
-                                                   /*reservation=*/ nullptr,
+                                                   dex_location,
                                                    &error_msg));
   ASSERT_TRUE(odex_file != nullptr);
   ASSERT_STREQ("install", odex_file->GetCompilationReason());
@@ -1873,8 +1905,7 @@
                                                    odex_location.c_str(),
                                                    /*executable=*/ false,
                                                    /*low_4gb=*/ false,
-                                                   dex_location.c_str(),
-                                                   /*reservation=*/ nullptr,
+                                                   dex_location,
                                                    &error_msg));
   ASSERT_TRUE(odex_file != nullptr);
   ASSERT_EQ(nullptr, odex_file->GetCompilationReason());
@@ -1891,8 +1922,9 @@
                                   odex_location,
                                   CompilerFilter::Filter::kVerify,
                                   { "--copy-dex-files=false" },
-                                  true,  // expect_success
-                                  false,  // use_fd
+                                  /*expect_success=*/ true,
+                                  /*use_fd=*/ false,
+                                  /*use_zip_fd=*/ false,
                                   [](const OatFile&) {}));
   {
     // Check the vdex doesn't have dex.
@@ -1909,8 +1941,7 @@
                                                    odex_location.c_str(),
                                                    /*executable=*/ false,
                                                    /*low_4gb=*/ false,
-                                                   dex_location.c_str(),
-                                                   /*reservation=*/ nullptr,
+                                                   dex_location,
                                                    &error_msg));
   ASSERT_TRUE(odex_file != nullptr) << dex_location;
   std::vector<const OatDexFile*> oat_dex_files = odex_file->GetOatDexFiles();
@@ -1953,8 +1984,9 @@
                                       // target.
                                       "--runtime-arg",
                                       "-Xuse-stderr-logger" },
-                                    true,  // expect_success
-                                    false,  // use_fd
+                                    /*expect_success=*/ true,
+                                    /*use_fd=*/ false,
+                                    /*use_zip_fd=*/ false,
                                     [](const OatFile& o) {
                                       CHECK(o.ContainsDexCode());
                                     }));
@@ -2140,8 +2172,9 @@
                                   odex_location,
                                   CompilerFilter::Filter::kSpeedProfile,
                                   { "--app-image-fd=" + std::to_string(app_image_file.GetFd()) },
-                                  true,  // expect_success
-                                  false,  // use_fd
+                                  /*expect_success=*/ true,
+                                  /*use_fd=*/ false,
+                                  /*use_zip_fd=*/ false,
                                   [](const OatFile&) {}));
   // Open our generated oat file.
   std::string error_msg;
@@ -2150,8 +2183,6 @@
                                                    odex_location.c_str(),
                                                    /*executable=*/ false,
                                                    /*low_4gb=*/ false,
-                                                   odex_location.c_str(),
-                                                   /*reservation=*/ nullptr,
                                                    &error_msg));
   ASSERT_TRUE(odex_file != nullptr);
   ImageHeader header = {};
@@ -2164,6 +2195,123 @@
   EXPECT_EQ(header.GetImageSection(ImageHeader::kSectionArtFields).Size(), 0u);
 }
 
+TEST_F(Dex2oatTest, ZipFd) {
+  std::string zip_location = GetTestDexFileName("MainUncompressedAligned");
+  std::unique_ptr<File> dex_file(OS::OpenFileForReading(zip_location.c_str()));
+  std::vector<std::string> extra_args{
+      StringPrintf("--zip-fd=%d", dex_file->Fd()),
+      "--zip-location=" + zip_location,
+  };
+  std::string out_dir = GetScratchDir();
+  const std::string base_oat_name = out_dir + "/base.oat";
+  ASSERT_TRUE(GenerateOdexForTest(zip_location,
+                                  base_oat_name,
+                                  CompilerFilter::Filter::kQuicken,
+                                  extra_args,
+                                  /*expect_success=*/ true,
+                                  /*use_fd=*/ false,
+                                  /*use_zip_fd=*/ true));
+}
+
+TEST_F(Dex2oatTest, AppImageEmptyDex) {
+  // Create a profile with the startup method marked.
+  ScratchFile profile_file;
+  ScratchFile temp_dex;
+  const std::string& dex_location = temp_dex.GetFilename();
+  std::vector<uint16_t> methods;
+  std::vector<dex::TypeIndex> classes;
+  {
+    MutateDexFile(temp_dex.GetFile(), GetTestDexFileName("StringLiterals"), [&] (DexFile* dex) {
+      // Modify the header to make the dex file valid but empty.
+      DexFile::Header* header = const_cast<DexFile::Header*>(&dex->GetHeader());
+      header->string_ids_size_ = 0;
+      header->string_ids_off_ = 0;
+      header->type_ids_size_ = 0;
+      header->type_ids_off_ = 0;
+      header->proto_ids_size_ = 0;
+      header->proto_ids_off_ = 0;
+      header->field_ids_size_ = 0;
+      header->field_ids_off_ = 0;
+      header->method_ids_size_ = 0;
+      header->method_ids_off_ = 0;
+      header->class_defs_size_ = 0;
+      header->class_defs_off_ = 0;
+      ASSERT_GT(header->file_size_,
+                sizeof(*header) + sizeof(dex::MapList) + sizeof(dex::MapItem) * 2);
+      // Move map list to be right after the header.
+      header->map_off_ = sizeof(DexFile::Header);
+      dex::MapList* map_list = const_cast<dex::MapList*>(dex->GetMapList());
+      map_list->list_[0].type_ = DexFile::kDexTypeHeaderItem;
+      map_list->list_[0].size_ = 1u;
+      map_list->list_[0].offset_ = 0u;
+      map_list->list_[1].type_ = DexFile::kDexTypeMapList;
+      map_list->list_[1].size_ = 1u;
+      map_list->list_[1].offset_ = header->map_off_;
+      map_list->size_ = 2;
+      header->data_off_ = header->map_off_;
+      header->data_size_ = map_list->Size();
+    });
+  }
+  std::unique_ptr<const DexFile> dex_file(OpenDexFile(temp_dex.GetFilename().c_str()));
+  const std::string out_dir = GetScratchDir();
+  const std::string odex_location = out_dir + "/base.odex";
+  const std::string app_image_location = out_dir + "/base.art";
+  ASSERT_TRUE(GenerateOdexForTest(dex_location,
+                                  odex_location,
+                                  CompilerFilter::Filter::kSpeedProfile,
+                                  { "--app-image-file=" + app_image_location,
+                                    "--resolve-startup-const-strings=true",
+                                    "--profile-file=" + profile_file.GetFilename()},
+                                  /*expect_success=*/ true,
+                                  /*use_fd=*/ false,
+                                  /*use_zip_fd=*/ false,
+                                  [](const OatFile&) {}));
+  // Open our generated oat file.
+  std::string error_msg;
+  std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
+                                                   odex_location.c_str(),
+                                                   odex_location.c_str(),
+                                                   /*executable=*/ false,
+                                                   /*low_4gb=*/ false,
+                                                   &error_msg));
+  ASSERT_TRUE(odex_file != nullptr);
+}
+
+TEST_F(Dex2oatTest, DexFileFd) {
+  std::string error_msg;
+  std::string zip_location = GetTestDexFileName("Main");
+  std::unique_ptr<File> zip_file(OS::OpenFileForReading(zip_location.c_str()));
+  ASSERT_NE(-1, zip_file->Fd());
+
+  std::unique_ptr<ZipArchive> zip_archive(
+      ZipArchive::OpenFromFd(zip_file->Release(), zip_location.c_str(), &error_msg));
+  ASSERT_TRUE(zip_archive != nullptr);
+
+  std::string entry_name = DexFileLoader::GetMultiDexClassesDexName(0);
+  std::unique_ptr<ZipEntry> entry(zip_archive->Find(entry_name.c_str(), &error_msg));
+  ASSERT_TRUE(entry != nullptr);
+
+  ScratchFile dex_file;
+  const std::string& dex_location = dex_file.GetFilename();
+  const std::string base_oat_name = GetScratchDir() + "/base.oat";
+
+  bool success = entry->ExtractToFile(*(dex_file.GetFile()), &error_msg);
+  ASSERT_TRUE(success);
+  ASSERT_EQ(0, lseek(dex_file.GetFd(), 0, SEEK_SET));
+
+  std::vector<std::string> extra_args{
+      StringPrintf("--zip-fd=%d", dex_file.GetFd()),
+      "--zip-location=" + dex_location,
+  };
+  ASSERT_TRUE(GenerateOdexForTest(dex_location,
+                                  base_oat_name,
+                                  CompilerFilter::Filter::kQuicken,
+                                  extra_args,
+                                  /*expect_success=*/ true,
+                                  /*use_fd=*/ false,
+                                  /*use_zip_fd=*/ true));
+}
+
 TEST_F(Dex2oatTest, AppImageResolveStrings) {
   using Hotness = ProfileCompilationInfo::MethodHotness;
   // Create a profile with the startup method marked.
@@ -2234,8 +2382,9 @@
                                   { "--app-image-file=" + app_image_location,
                                     "--resolve-startup-const-strings=true",
                                     "--profile-file=" + profile_file.GetFilename()},
-                                  /* expect_success= */ true,
-                                  /* use_fd= */ false,
+                                  /*expect_success=*/ true,
+                                  /*use_fd=*/ false,
+                                  /*use_zip_fd=*/ false,
                                   [](const OatFile&) {}));
   // Open our generated oat file.
   std::string error_msg;
@@ -2244,8 +2393,6 @@
                                                    odex_location.c_str(),
                                                    /*executable=*/ false,
                                                    /*low_4gb=*/ false,
-                                                   odex_location.c_str(),
-                                                   /*reservation=*/ nullptr,
                                                    &error_msg));
   ASSERT_TRUE(odex_file != nullptr);
   // Check the strings in the app image intern table only contain the "startup" strigs.
@@ -2347,8 +2494,9 @@
                                   odex_location,
                                   CompilerFilter::Filter::kQuicken,
                                   { "--class-loader-context=" + stored_context },
-                                  true,  // expect_success
-                                  false,  // use_fd
+                                  /*expect_success=*/ true,
+                                  /*use_fd=*/ false,
+                                  /*use_zip_fd=*/ false,
                                   [&](const OatFile& oat_file) {
     EXPECT_NE(oat_file.GetClassLoaderContext(), stored_context) << output_;
     EXPECT_NE(oat_file.GetClassLoaderContext(), valid_context) << output_;
@@ -2359,8 +2507,9 @@
                                   CompilerFilter::Filter::kQuicken,
                                   { "--class-loader-context=" + valid_context,
                                     "--stored-class-loader-context=" + stored_context },
-                                  true,  // expect_success
-                                  false,  // use_fd
+                                  /*expect_success=*/ true,
+                                  /*use_fd=*/ false,
+                                  /*use_zip_fd=*/ false,
                                   [&](const OatFile& oat_file) {
     EXPECT_EQ(oat_file.GetClassLoaderContext(), expected_stored_context) << output_;
   }));
diff --git a/dex2oat/driver/compiler_driver.cc b/dex2oat/driver/compiler_driver.cc
index 8893d67..cb186d3 100644
--- a/dex2oat/driver/compiler_driver.cc
+++ b/dex2oat/driver/compiler_driver.cc
@@ -29,6 +29,7 @@
 #include "android-base/logging.h"
 #include "android-base/strings.h"
 
+#include "aot_class_linker.h"
 #include "art_field-inl.h"
 #include "art_method-inl.h"
 #include "base/arena_allocator.h"
@@ -293,10 +294,15 @@
                                 type ## _ENTRYPOINT_OFFSET(PointerSize::k32, offset));  \
     }
 
-std::unique_ptr<const std::vector<uint8_t>> CompilerDriver::CreateJniDlsymLookup() const {
+std::unique_ptr<const std::vector<uint8_t>> CompilerDriver::CreateJniDlsymLookupTrampoline() const {
   CREATE_TRAMPOLINE(JNI, kJniAbi, pDlsymLookup)
 }
 
+std::unique_ptr<const std::vector<uint8_t>>
+CompilerDriver::CreateJniDlsymLookupCriticalTrampoline() const {
+  CREATE_TRAMPOLINE(JNI, kJniAbi, pDlsymLookupCritical)
+}
+
 std::unique_ptr<const std::vector<uint8_t>> CompilerDriver::CreateQuickGenericJniTrampoline()
     const {
   CREATE_TRAMPOLINE(QUICK, kQuickAbi, pQuickGenericJniTrampoline)
@@ -326,9 +332,9 @@
   CheckThreadPools();
 
   if (GetCompilerOptions().IsBootImage()) {
-    // We don't need to setup the intrinsics for non boot image compilation, as
-    // those compilations will pick up a boot image that have the ArtMethod already
-    // set with the intrinsics flag.
+    // All intrinsics must be in the primary boot image, so we don't need to setup
+    // the intrinsics for any other compilation, as those compilations will pick up
+    // a boot image that have the ArtMethod already set with the intrinsics flag.
     InitializeIntrinsics();
   }
   // Compile:
@@ -404,8 +410,6 @@
     case InstructionSet::kArm:
     case InstructionSet::kArm64:
     case InstructionSet::kThumb2:
-    case InstructionSet::kMips:
-    case InstructionSet::kMips64:
     case InstructionSet::kX86:
     case InstructionSet::kX86_64: return true;
     default: return false;
@@ -830,7 +834,10 @@
       if (cls == nullptr) {
         soa.Self()->ClearException();
       } else if (&cls->GetDexFile() == dex_file) {
-        DCHECK(cls->IsErroneous() || cls->IsVerified() || cls->ShouldVerifyAtRuntime())
+        DCHECK(cls->IsErroneous() ||
+               cls->IsVerified() ||
+               cls->ShouldVerifyAtRuntime() ||
+               cls->IsVerifiedNeedsAccessChecks())
             << cls->PrettyClass()
             << " " << cls->GetStatus();
       }
@@ -838,6 +845,15 @@
   }
 }
 
+void CompilerDriver::PrepareDexFilesForOatFile(TimingLogger* timings) {
+  compiled_classes_.AddDexFiles(GetCompilerOptions().GetDexFilesForOatFile());
+
+  if (GetCompilerOptions().IsAnyCompilationEnabled()) {
+    TimingLogger::ScopedTiming t2("Dex2Dex SetDexFiles", timings);
+    dex_to_dex_compiler_.SetDexFiles(GetCompilerOptions().GetDexFilesForOatFile());
+  }
+}
+
 void CompilerDriver::PreCompile(jobject class_loader,
                                 const std::vector<const DexFile*>& dex_files,
                                 TimingLogger* timings,
@@ -847,9 +863,6 @@
 
   VLOG(compiler) << "Before precompile " << GetMemoryUsageString(false);
 
-  compiled_classes_.AddDexFiles(GetCompilerOptions().GetDexFilesForOatFile());
-  dex_to_dex_compiler_.SetDexFiles(GetCompilerOptions().GetDexFilesForOatFile());
-
   // Precompile:
   // 1) Load image classes.
   // 2) Resolve all classes.
@@ -879,66 +892,60 @@
   if (compiler_options_->AssumeClassesAreVerified()) {
     VLOG(compiler) << "Verify none mode specified, skipping verification.";
     SetVerified(class_loader, dex_files, timings);
-  }
+  } else if (compiler_options_->IsVerificationEnabled()) {
+    Verify(class_loader, dex_files, timings, verification_results);
+    VLOG(compiler) << "Verify: " << GetMemoryUsageString(false);
 
-  if (!compiler_options_->IsVerificationEnabled()) {
-    return;
-  }
-
-  Verify(class_loader, dex_files, timings, verification_results);
-  VLOG(compiler) << "Verify: " << GetMemoryUsageString(false);
-
-  if (GetCompilerOptions().IsForceDeterminism() && GetCompilerOptions().IsBootImage()) {
-    // Resolve strings from const-string. Do this now to have a deterministic image.
-    ResolveConstStrings(dex_files, /*only_startup_strings=*/ false, timings);
-    VLOG(compiler) << "Resolve const-strings: " << GetMemoryUsageString(false);
-  } else if (GetCompilerOptions().ResolveStartupConstStrings()) {
-    ResolveConstStrings(dex_files, /*only_startup_strings=*/ true, timings);
-  }
-
-  if (had_hard_verifier_failure_ && GetCompilerOptions().AbortOnHardVerifierFailure()) {
-    // Avoid dumping threads. Even if we shut down the thread pools, there will still be three
-    // instances of this thread's stack.
-    LOG(FATAL_WITHOUT_ABORT) << "Had a hard failure verifying all classes, and was asked to abort "
-                             << "in such situations. Please check the log.";
-    _exit(1);
-  } else if (number_of_soft_verifier_failures_ > 0 &&
-             GetCompilerOptions().AbortOnSoftVerifierFailure()) {
-    LOG(FATAL_WITHOUT_ABORT) << "Had " << number_of_soft_verifier_failures_ << " soft failure(s) "
-                             << "verifying all classes, and was asked to abort in such situations. "
-                             << "Please check the log.";
-    _exit(1);
-  }
-
-  if (compiler_options_->IsAnyCompilationEnabled()) {
-    if (kIsDebugBuild) {
-      EnsureVerifiedOrVerifyAtRuntime(class_loader, dex_files);
+    if (GetCompilerOptions().IsForceDeterminism() &&
+        (GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension())) {
+      // Resolve strings from const-string. Do this now to have a deterministic image.
+      ResolveConstStrings(dex_files, /*only_startup_strings=*/ false, timings);
+      VLOG(compiler) << "Resolve const-strings: " << GetMemoryUsageString(false);
+    } else if (GetCompilerOptions().ResolveStartupConstStrings()) {
+      ResolveConstStrings(dex_files, /*only_startup_strings=*/ true, timings);
     }
-    InitializeClasses(class_loader, dex_files, timings);
-    VLOG(compiler) << "InitializeClasses: " << GetMemoryUsageString(false);
+
+    if (had_hard_verifier_failure_ && GetCompilerOptions().AbortOnHardVerifierFailure()) {
+      // Avoid dumping threads. Even if we shut down the thread pools, there will still be three
+      // instances of this thread's stack.
+      LOG(FATAL_WITHOUT_ABORT) << "Had a hard failure verifying all classes, and was asked to abort "
+                               << "in such situations. Please check the log.";
+      _exit(1);
+    } else if (number_of_soft_verifier_failures_ > 0 &&
+               GetCompilerOptions().AbortOnSoftVerifierFailure()) {
+      LOG(FATAL_WITHOUT_ABORT) << "Had " << number_of_soft_verifier_failures_ << " soft failure(s) "
+                               << "verifying all classes, and was asked to abort in such situations. "
+                               << "Please check the log.";
+      _exit(1);
+    }
   }
 
-  UpdateImageClasses(timings, image_classes);
-  VLOG(compiler) << "UpdateImageClasses: " << GetMemoryUsageString(false);
+  if (GetCompilerOptions().IsGeneratingImage()) {
+    // We can only initialize classes when their verification bit is set.
+    if (compiler_options_->AssumeClassesAreVerified() ||
+        compiler_options_->IsVerificationEnabled()) {
+      if (kIsDebugBuild) {
+        EnsureVerifiedOrVerifyAtRuntime(class_loader, dex_files);
+      }
+      InitializeClasses(class_loader, dex_files, timings);
+      VLOG(compiler) << "InitializeClasses: " << GetMemoryUsageString(false);
+    }
 
-  if (kBitstringSubtypeCheckEnabled &&
-      GetCompilerOptions().IsForceDeterminism() && GetCompilerOptions().IsBootImage()) {
-    // Initialize type check bit string used by check-cast and instanceof.
-    // Do this now to have a deterministic image.
-    // Note: This is done after UpdateImageClasses() at it relies on the image classes to be final.
-    InitializeTypeCheckBitstrings(this, dex_files, timings);
+    UpdateImageClasses(timings, image_classes);
+    VLOG(compiler) << "UpdateImageClasses: " << GetMemoryUsageString(false);
+
+    if (kBitstringSubtypeCheckEnabled &&
+        GetCompilerOptions().IsForceDeterminism() && GetCompilerOptions().IsBootImage()) {
+      // Initialize type check bit string used by check-cast and instanceof.
+      // Do this now to have a deterministic image.
+      // Note: This is done after UpdateImageClasses() at it relies on the image
+      // classes to be final.
+      InitializeTypeCheckBitstrings(this, dex_files, timings);
+    }
   }
 }
 
 bool CompilerDriver::ShouldCompileBasedOnProfile(const MethodReference& method_ref) const {
-  // If compiling the apex image, filter out methods not in an apex file (the profile used
-  // for boot classpath is the same between the apex image and the boot image, so it includes
-  /// framewkro methods).
-  if (compiler_options_->IsApexBootImage() &&
-      !android::base::StartsWith(method_ref.dex_file->GetLocation(), "/apex")) {
-    return false;
-  }
-
   // Profile compilation info may be null if no profile is passed.
   if (!CompilerFilter::DependsOnProfile(compiler_options_->GetCompilerFilter())) {
     // Use the compiler filter instead of the presence of profile_compilation_info_ since
@@ -1025,14 +1032,40 @@
   std::vector<ObjPtr<mirror::Class>> classes_;
 };
 
+static inline bool CanIncludeInCurrentImage(ObjPtr<mirror::Class> klass)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  DCHECK(klass != nullptr);
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  if (heap->GetBootImageSpaces().empty()) {
+    return true;  // We can include any class when compiling the primary boot image.
+  }
+  if (heap->ObjectIsInBootImageSpace(klass)) {
+    return false;  // Already included in the boot image we're compiling against.
+  }
+  return AotClassLinker::CanReferenceInBootImageExtension(klass, heap);
+}
+
 class RecordImageClassesVisitor : public ClassVisitor {
  public:
   explicit RecordImageClassesVisitor(HashSet<std::string>* image_classes)
       : image_classes_(image_classes) {}
 
   bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
+    bool resolved = klass->IsResolved();
+    DCHECK(resolved || klass->IsErroneousUnresolved());
+    bool can_include_in_image = LIKELY(resolved) && CanIncludeInCurrentImage(klass);
     std::string temp;
-    image_classes_->insert(klass->GetDescriptor(&temp));
+    std::string_view descriptor(klass->GetDescriptor(&temp));
+    if (can_include_in_image) {
+      image_classes_->insert(std::string(descriptor));  // Does nothing if already present.
+    } else {
+      auto it = image_classes_->find(descriptor);
+      if (it != image_classes_->end()) {
+        VLOG(compiler) << "Removing " << (resolved ? "unsuitable" : "unresolved")
+            << " class from image classes: " << descriptor;
+        image_classes_->erase(it);
+      }
+    }
     return true;
   }
 
@@ -1044,12 +1077,18 @@
 void CompilerDriver::LoadImageClasses(TimingLogger* timings,
                                       /*inout*/ HashSet<std::string>* image_classes) {
   CHECK(timings != nullptr);
-  if (!GetCompilerOptions().IsBootImage()) {
+  if (!GetCompilerOptions().IsBootImage() && !GetCompilerOptions().IsBootImageExtension()) {
     return;
   }
 
+  // Make sure the File[] class is in the primary boot image. b/150319075
+  // TODO: Implement support for array classes in profiles and remove this workaround. b/148067697
+  if (GetCompilerOptions().IsBootImage()) {
+    image_classes->insert("[Ljava/io/File;");
+  }
+
   TimingLogger::ScopedTiming t("LoadImageClasses", timings);
-  // Make a first class to load all classes explicitly listed in the file
+  // Make a first pass to load all classes explicitly listed in the file
   Thread* self = Thread::Current();
   ScopedObjectAccess soa(self);
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -1061,7 +1100,7 @@
         hs.NewHandle(class_linker->FindSystemClass(self, descriptor.c_str())));
     if (klass == nullptr) {
       VLOG(compiler) << "Failed to find class " << descriptor;
-      it = image_classes->erase(it);
+      it = image_classes->erase(it);  // May cause some descriptors to be revisited.
       self->ClearException();
     } else {
       ++it;
@@ -1114,7 +1153,9 @@
   RecordImageClassesVisitor visitor(image_classes);
   class_linker->VisitClasses(&visitor);
 
-  CHECK(!image_classes->empty());
+  if (GetCompilerOptions().IsBootImage()) {
+    CHECK(!image_classes->empty());
+  }
 }
 
 static void MaybeAddToImageClasses(Thread* self,
@@ -1122,9 +1163,15 @@
                                    HashSet<std::string>* image_classes)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   DCHECK_EQ(self, Thread::Current());
-  StackHandleScope<1> hs(self);
+  Runtime* runtime = Runtime::Current();
+  gc::Heap* heap = runtime->GetHeap();
+  if (heap->ObjectIsInBootImageSpace(klass)) {
+    // We're compiling a boot image extension and the class is already
+    // in the boot image we're compiling against.
+    return;
+  }
+  const PointerSize pointer_size = runtime->GetClassLinker()->GetImagePointerSize();
   std::string temp;
-  const PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
   while (!klass->IsObjectClass()) {
     const char* descriptor = klass->GetDescriptor(&temp);
     if (image_classes->find(std::string_view(descriptor)) != image_classes->end()) {
@@ -1151,15 +1198,15 @@
 // Note: we can use object pointers because we suspend all threads.
 class ClinitImageUpdate {
  public:
-  static ClinitImageUpdate* Create(VariableSizedHandleScope& hs,
-                                   HashSet<std::string>* image_class_descriptors,
-                                   Thread* self,
-                                   ClassLinker* linker) {
-    std::unique_ptr<ClinitImageUpdate> res(new ClinitImageUpdate(hs,
-                                                                 image_class_descriptors,
-                                                                 self,
-                                                                 linker));
-    return res.release();
+  ClinitImageUpdate(HashSet<std::string>* image_class_descriptors,
+                    Thread* self) REQUIRES_SHARED(Locks::mutator_lock_)
+      : hs_(self),
+        image_class_descriptors_(image_class_descriptors),
+        self_(self) {
+    CHECK(image_class_descriptors != nullptr);
+
+    // Make sure nobody interferes with us.
+    old_cause_ = self->StartAssertNoThreadSuspension("Boot image closure");
   }
 
   ~ClinitImageUpdate() {
@@ -1188,42 +1235,49 @@
   void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
 
   void Walk() REQUIRES_SHARED(Locks::mutator_lock_) {
+    // Find all the already-marked classes.
+    WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_);
+    FindImageClassesVisitor visitor(this);
+    Runtime::Current()->GetClassLinker()->VisitClasses(&visitor);
+
     // Use the initial classes as roots for a search.
     for (Handle<mirror::Class> klass_root : image_classes_) {
       VisitClinitClassesObject(klass_root.Get());
     }
-    Thread* self = Thread::Current();
     ScopedAssertNoThreadSuspension ants(__FUNCTION__);
     for (Handle<mirror::Class> h_klass : to_insert_) {
-      MaybeAddToImageClasses(self, h_klass.Get(), image_class_descriptors_);
+      MaybeAddToImageClasses(self_, h_klass.Get(), image_class_descriptors_);
     }
   }
 
  private:
   class FindImageClassesVisitor : public ClassVisitor {
    public:
-    explicit FindImageClassesVisitor(VariableSizedHandleScope& hs,
-                                     ClinitImageUpdate* data)
-        : data_(data),
-          hs_(hs) {}
+    explicit FindImageClassesVisitor(ClinitImageUpdate* data)
+        : data_(data) {}
 
     bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
+      bool resolved = klass->IsResolved();
+      DCHECK(resolved || klass->IsErroneousUnresolved());
+      bool can_include_in_image = LIKELY(resolved) && CanIncludeInCurrentImage(klass);
       std::string temp;
-      std::string_view name(klass->GetDescriptor(&temp));
-      auto it = data_->image_class_descriptors_->find(name);
+      std::string_view descriptor(klass->GetDescriptor(&temp));
+      auto it = data_->image_class_descriptors_->find(descriptor);
       if (it != data_->image_class_descriptors_->end()) {
-        if (LIKELY(klass->IsResolved())) {
-          data_->image_classes_.push_back(hs_.NewHandle(klass));
+        if (can_include_in_image) {
+          data_->image_classes_.push_back(data_->hs_.NewHandle(klass));
         } else {
-          DCHECK(klass->IsErroneousUnresolved());
-          VLOG(compiler) << "Removing unresolved class from image classes: " << name;
+          VLOG(compiler) << "Removing " << (resolved ? "unsuitable" : "unresolved")
+              << " class from image classes: " << descriptor;
           data_->image_class_descriptors_->erase(it);
         }
-      } else {
+      } else if (can_include_in_image) {
         // Check whether it is initialized and has a clinit. They must be kept, too.
         if (klass->IsInitialized() && klass->FindClassInitializer(
             Runtime::Current()->GetClassLinker()->GetImagePointerSize()) != nullptr) {
-          data_->image_classes_.push_back(hs_.NewHandle(klass));
+          DCHECK(!Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass->GetDexCache()))
+              << klass->PrettyDescriptor();
+          data_->image_classes_.push_back(data_->hs_.NewHandle(klass));
         }
       }
       return true;
@@ -1231,28 +1285,8 @@
 
    private:
     ClinitImageUpdate* const data_;
-    VariableSizedHandleScope& hs_;
   };
 
-  ClinitImageUpdate(VariableSizedHandleScope& hs,
-                    HashSet<std::string>* image_class_descriptors,
-                    Thread* self,
-                    ClassLinker* linker) REQUIRES_SHARED(Locks::mutator_lock_)
-      : hs_(hs),
-        image_class_descriptors_(image_class_descriptors),
-        self_(self) {
-    CHECK(linker != nullptr);
-    CHECK(image_class_descriptors != nullptr);
-
-    // Make sure nobody interferes with us.
-    old_cause_ = self->StartAssertNoThreadSuspension("Boot image closure");
-
-    // Find all the already-marked classes.
-    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-    FindImageClassesVisitor visitor(hs_, this);
-    linker->VisitClasses(&visitor);
-  }
-
   void VisitClinitClassesObject(mirror::Object* object) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(object != nullptr);
@@ -1279,7 +1313,7 @@
     }
   }
 
-  VariableSizedHandleScope& hs_;
+  mutable VariableSizedHandleScope hs_;
   mutable std::vector<Handle<mirror::Class>> to_insert_;
   mutable std::unordered_set<mirror::Object*> marked_objects_;
   HashSet<std::string>* const image_class_descriptors_;
@@ -1292,23 +1326,16 @@
 
 void CompilerDriver::UpdateImageClasses(TimingLogger* timings,
                                         /*inout*/ HashSet<std::string>* image_classes) {
-  if (GetCompilerOptions().IsBootImage()) {
+  if (GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension()) {
     TimingLogger::ScopedTiming t("UpdateImageClasses", timings);
 
-    Runtime* runtime = Runtime::Current();
-
     // Suspend all threads.
     ScopedSuspendAll ssa(__FUNCTION__);
 
-    VariableSizedHandleScope hs(Thread::Current());
-    std::string error_msg;
-    std::unique_ptr<ClinitImageUpdate> update(ClinitImageUpdate::Create(hs,
-                                                                        image_classes,
-                                                                        Thread::Current(),
-                                                                        runtime->GetClassLinker()));
+    ClinitImageUpdate update(image_classes, Thread::Current());
 
     // Do the marking.
-    update->Walk();
+    update.Walk();
   }
 }
 
@@ -1686,7 +1713,7 @@
 
   ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, dex_files,
                                      thread_pool);
-  if (GetCompilerOptions().IsBootImage()) {
+  if (GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension()) {
     // For images we resolve all types, such as array, whereas for applications just those with
     // classdefs are resolved by ResolveClassFieldsAndMethods.
     TimingLogger::ScopedTiming t("Resolve Types", timings);
@@ -1770,7 +1797,9 @@
     return false;
   }
 
-  bool compiler_only_verifies = !GetCompilerOptions().IsAnyCompilationEnabled();
+  bool compiler_only_verifies =
+      !GetCompilerOptions().IsAnyCompilationEnabled() &&
+      !GetCompilerOptions().IsGeneratingImage();
 
   // We successfully validated the dependencies, now update class status
   // of verified classes. Note that the dependencies also record which classes
@@ -1830,7 +1859,7 @@
   // the existing `verifier_deps` is not valid anymore, create a new one for
   // non boot image compilation. The verifier will need it to record the new dependencies.
   // Then dex2oat can update the vdex file with these new dependencies.
-  if (!GetCompilerOptions().IsBootImage()) {
+  if (!GetCompilerOptions().IsBootImage() && !GetCompilerOptions().IsBootImageExtension()) {
     // Dex2oat creates the verifier deps.
     // Create the main VerifierDeps, and set it to this thread.
     verifier::VerifierDeps* verifier_deps =
@@ -1860,7 +1889,7 @@
                   timings);
   }
 
-  if (!GetCompilerOptions().IsBootImage()) {
+  if (!GetCompilerOptions().IsBootImage() && !GetCompilerOptions().IsBootImageExtension()) {
     // Merge all VerifierDeps into the main one.
     verifier::VerifierDeps* verifier_deps = Thread::Current()->GetVerifierDeps();
     for (ThreadPoolWorker* worker : parallel_thread_pool_->GetWorkers()) {
@@ -1927,7 +1956,8 @@
         // Force a soft failure for the VerifierDeps. This is a sanity measure, as
         // the vdex file already records that the class hasn't been resolved. It avoids
         // trying to do future verification optimizations when processing the vdex file.
-        DCHECK(failure_kind == verifier::FailureKind::kNoFailure) << failure_kind;
+        DCHECK(failure_kind == verifier::FailureKind::kNoFailure ||
+               failure_kind == verifier::FailureKind::kAccessChecksFailure) << failure_kind;
         failure_kind = verifier::FailureKind::kSoftFailure;
       }
     } else if (&klass->GetDexFile() != &dex_file) {
@@ -1956,12 +1986,20 @@
         manager_->GetCompiler()->AddSoftVerifierFailure();
       }
 
-      CHECK(klass->ShouldVerifyAtRuntime() || klass->IsVerified() || klass->IsErroneous())
+      CHECK(klass->ShouldVerifyAtRuntime() ||
+            klass->IsVerifiedNeedsAccessChecks() ||
+            klass->IsVerified() ||
+            klass->IsErroneous())
           << klass->PrettyDescriptor() << ": state=" << klass->GetStatus();
 
       // Class has a meaningful status for the compiler now, record it.
       ClassReference ref(manager_->GetDexFile(), class_def_index);
-      manager_->GetCompiler()->RecordClassStatus(ref, klass->GetStatus());
+      ClassStatus status = klass->GetStatus();
+      if (status == ClassStatus::kInitialized) {
+        // Initialized classes shall be visibly initialized when loaded from the image.
+        status = ClassStatus::kVisiblyInitialized;
+      }
+      manager_->GetCompiler()->RecordClassStatus(ref, status);
 
       // It is *very* problematic if there are resolution errors in the boot classpath.
       //
@@ -1971,7 +2009,8 @@
       //   --abort-on-hard-verifier-error --abort-on-soft-verifier-error
       // which is the default build system configuration.
       if (kIsDebugBuild) {
-        if (manager_->GetCompiler()->GetCompilerOptions().IsBootImage()) {
+        if (manager_->GetCompiler()->GetCompilerOptions().IsBootImage() ||
+            manager_->GetCompiler()->GetCompilerOptions().IsBootImageExtension()) {
           if (!klass->IsResolved() || klass->IsErroneous()) {
             LOG(FATAL) << "Boot classpath class " << klass->PrettyClass()
                        << " failed to resolve/is erroneous: state= " << klass->GetStatus();
@@ -1980,6 +2019,8 @@
         }
         if (klass->IsVerified()) {
           DCHECK_EQ(failure_kind, verifier::FailureKind::kNoFailure);
+        } else if (klass->IsVerifiedNeedsAccessChecks()) {
+          DCHECK_EQ(failure_kind, verifier::FailureKind::kAccessChecksFailure);
         } else if (klass->ShouldVerifyAtRuntime()) {
           DCHECK_EQ(failure_kind, verifier::FailureKind::kSoftFailure);
         } else {
@@ -2017,6 +2058,9 @@
                               : verifier::HardFailLogMode::kLogWarning;
   VerifyClassVisitor visitor(&context, log_level);
   context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count);
+
+  // Make initialized classes visibly initialized.
+  class_linker->MakeInitializedClassesVisiblyInitialized(Thread::Current(), /*wait=*/ true);
 }
 
 class SetVerifiedClassVisitor : public CompilationVisitor {
@@ -2073,7 +2117,7 @@
                                         ThreadPool* thread_pool,
                                         size_t thread_count,
                                         TimingLogger* timings) {
-  TimingLogger::ScopedTiming t("Verify Dex File", timings);
+  TimingLogger::ScopedTiming t("Set Verified Dex File", timings);
   if (!compiled_classes_.HaveDexFile(&dex_file)) {
     compiled_classes_.AddDexFile(&dex_file);
   }
@@ -2122,20 +2166,31 @@
     const char* descriptor = dex_file.StringDataByIdx(class_type_id.descriptor_idx_);
     ScopedObjectAccessUnchecked soa(Thread::Current());
     StackHandleScope<3> hs(soa.Self());
-    const bool is_boot_image = manager_->GetCompiler()->GetCompilerOptions().IsBootImage();
-    const bool is_app_image = manager_->GetCompiler()->GetCompilerOptions().IsAppImage();
+    ClassLinker* const class_linker = manager_->GetClassLinker();
+    Runtime* const runtime = Runtime::Current();
+    const CompilerOptions& compiler_options = manager_->GetCompiler()->GetCompilerOptions();
+    const bool is_boot_image = compiler_options.IsBootImage();
+    const bool is_boot_image_extension = compiler_options.IsBootImageExtension();
+    const bool is_app_image = compiler_options.IsAppImage();
 
-    ClassStatus old_status = klass->GetStatus();
-    // Don't initialize classes in boot space when compiling app image
-    if (is_app_image && klass->IsBootStrapClassLoaded()) {
+    // For boot image extension, do not initialize classes defined
+    // in dex files belonging to the boot image we're compiling against.
+    if (is_boot_image_extension &&
+        runtime->GetHeap()->ObjectIsInBootImageSpace(klass->GetDexCache())) {
       // Also return early and don't store the class status in the recorded class status.
       return;
     }
+    // Do not initialize classes in boot space when compiling app (with or without image).
+    if ((!is_boot_image && !is_boot_image_extension) && klass->IsBootStrapClassLoaded()) {
+      // Also return early and don't store the class status in the recorded class status.
+      return;
+    }
+    ClassStatus old_status = klass->GetStatus();
     // Only try to initialize classes that were successfully verified.
     if (klass->IsVerified()) {
       // Attempt to initialize the class but bail if we either need to initialize the super-class
       // or static fields.
-      manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, false);
+      class_linker->EnsureInitialized(soa.Self(), klass, false, false);
       old_status = klass->GetStatus();
       if (!klass->IsInitialized()) {
         // We don't want non-trivial class initialization occurring on multiple threads due to
@@ -2150,30 +2205,32 @@
         ObjectLock<mirror::Class> lock(soa.Self(), h_klass);
         // Attempt to initialize allowing initialization of parent classes but still not static
         // fields.
-        // Initialize dependencies first only for app image, to make TryInitialize recursive.
-        bool is_superclass_initialized = !is_app_image ? true :
-            InitializeDependencies(klass, class_loader, soa.Self());
-        if (!is_app_image || (is_app_image && is_superclass_initialized)) {
-          manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, false, true);
+        // Initialize dependencies first only for app or boot image extension,
+        // to make TryInitializeClass() recursive.
+        bool try_initialize_with_superclasses =
+            is_boot_image ? true : InitializeDependencies(klass, class_loader, soa.Self());
+        if (try_initialize_with_superclasses) {
+          class_linker->EnsureInitialized(soa.Self(), klass, false, true);
           // It's OK to clear the exception here since the compiler is supposed to be fault
           // tolerant and will silently not initialize classes that have exceptions.
           soa.Self()->ClearException();
         }
-        // Otherwise it's in app image but superclasses can't be initialized, no need to proceed.
+        // Otherwise it's in app image or boot image extension but superclasses
+        // cannot be initialized, no need to proceed.
         old_status = klass->GetStatus();
 
-        bool too_many_encoded_fields = !is_boot_image &&
+        bool too_many_encoded_fields = (!is_boot_image && !is_boot_image_extension) &&
             klass->NumStaticFields() > kMaxEncodedFields;
 
         // If the class was not initialized, we can proceed to see if we can initialize static
         // fields. Limit the max number of encoded fields.
         if (!klass->IsInitialized() &&
-            (is_app_image || is_boot_image) &&
-            is_superclass_initialized &&
+            (is_app_image || is_boot_image || is_boot_image_extension) &&
+            try_initialize_with_superclasses &&
             !too_many_encoded_fields &&
-            manager_->GetCompiler()->GetCompilerOptions().IsImageClass(descriptor)) {
+            compiler_options.IsImageClass(descriptor)) {
           bool can_init_static_fields = false;
-          if (is_boot_image) {
+          if (is_boot_image || is_boot_image_extension) {
             // We need to initialize static fields, we only do this for image classes that aren't
             // marked with the $NoPreloadHolder (which implies this should not be initialized
             // early).
@@ -2182,11 +2239,15 @@
             CHECK(is_app_image);
             // The boot image case doesn't need to recursively initialize the dependencies with
             // special logic since the class linker already does this.
+            // Optimization will be disabled in debuggable build, because in debuggable mode we
+            // want the <clinit> behavior to be observable for the debugger, so we don't do the
+            // <clinit> at compile time.
             can_init_static_fields =
                 ClassLinker::kAppImageMayContainStrings &&
                 !soa.Self()->IsExceptionPending() &&
-                is_superclass_initialized &&
-                NoClinitInDependency(klass, soa.Self(), &class_loader);
+                !compiler_options.GetDebuggable() &&
+                (compiler_options.InitializeAppImageClasses() ||
+                 NoClinitInDependency(klass, soa.Self(), &class_loader));
             // TODO The checking for clinit can be removed since it's already
             // checked when init superclass. Currently keep it because it contains
             // processing of intern strings. Will be removed later when intern strings
@@ -2199,12 +2260,23 @@
             // exclusive access to the runtime and the transaction. To achieve this, we could use
             // a ReaderWriterMutex but we're holding the mutator lock so we fail mutex sanity
             // checks in Thread::AssertThreadSuspensionIsAllowable.
-            Runtime* const runtime = Runtime::Current();
+
+            // Resolve and initialize the exception type before enabling the transaction in case
+            // the transaction aborts and cannot resolve the type.
+            // TransactionAbortError is not initialized ant not in boot image, needed only by
+            // compiler and will be pruned by ImageWriter.
+            Handle<mirror::Class> exception_class =
+                hs.NewHandle(class_linker->FindClass(soa.Self(),
+                                                     Transaction::kAbortExceptionSignature,
+                                                     class_loader));
+            bool exception_initialized =
+                class_linker->EnsureInitialized(soa.Self(), exception_class, true, true);
+            DCHECK(exception_initialized);
+
             // Run the class initializer in transaction mode.
             runtime->EnterTransactionMode(is_app_image, klass.Get());
 
-            bool success = manager_->GetClassLinker()->EnsureInitialized(soa.Self(), klass, true,
-                                                                         true);
+            bool success = class_linker->EnsureInitialized(soa.Self(), klass, true, true);
             // TODO we detach transaction from runtime to indicate we quit the transactional
             // mode which prevents the GC from visiting objects modified during the transaction.
             // Ensure GC is not run so don't access freed objects when aborting transaction.
@@ -2216,9 +2288,10 @@
                 runtime->ExitTransactionMode();
                 DCHECK(!runtime->IsActiveTransaction());
 
-                if (is_boot_image) {
-                  // For boot image, we want to put the updated status in the oat class since we
-                  // can't reject the image anyways.
+                if (is_boot_image || is_boot_image_extension) {
+                  // For boot image and boot image extension, we want to put the updated
+                  // status in the oat class. This is not the case for app image as we
+                  // want to keep the ability to load the oat file without the app image.
                   old_status = klass->GetStatus();
                 }
               } else {
@@ -2238,10 +2311,12 @@
               }
             }
 
-            if (!success) {
+            if (!success && (is_boot_image || is_boot_image_extension)) {
               // On failure, still intern strings of static fields and seen in <clinit>, as these
               // will be created in the zygote. This is separated from the transaction code just
               // above as we will allocate strings, so must be allowed to suspend.
+              // We only need to intern strings for boot image and boot image extension
+              // because classes that failed to be initialized will not appear in app image.
               if (&klass->GetDexFile() == manager_->GetDexFile()) {
                 InternStrings(klass, class_loader);
               } else {
@@ -2257,8 +2332,7 @@
 
         // If the class still isn't initialized, at least try some checks that initialization
         // would do so they can be skipped at runtime.
-        if (!klass->IsInitialized() &&
-            manager_->GetClassLinker()->ValidateSuperClassDescriptors(klass)) {
+        if (!klass->IsInitialized() && class_linker->ValidateSuperClassDescriptors(klass)) {
           old_status = ClassStatus::kSuperclassValidated;
         } else {
           soa.Self()->ClearException();
@@ -2266,6 +2340,10 @@
         soa.Self()->AssertNoPendingException();
       }
     }
+    if (old_status == ClassStatus::kInitialized) {
+      // Initialized classes shall be visibly initialized when loaded from the image.
+      old_status = ClassStatus::kVisiblyInitialized;
+    }
     // Record the final class status if necessary.
     ClassReference ref(&dex_file, klass->GetDexClassDefIndex());
     // Back up the status before doing initialization for static encoded fields,
@@ -2276,7 +2354,8 @@
  private:
   void InternStrings(Handle<mirror::Class> klass, Handle<mirror::ClassLoader> class_loader)
       REQUIRES_SHARED(Locks::mutator_lock_) {
-    DCHECK(manager_->GetCompiler()->GetCompilerOptions().IsBootImage());
+    DCHECK(manager_->GetCompiler()->GetCompilerOptions().IsBootImage() ||
+           manager_->GetCompiler()->GetCompilerOptions().IsBootImageExtension());
     DCHECK(klass->IsVerified());
     DCHECK(!klass->IsInitialized());
 
@@ -2379,35 +2458,34 @@
   }
 
   // Initialize the klass's dependencies recursively before initializing itself.
-  // Checking for interfaces is also necessary since interfaces can contain
-  // both default methods and static encoded fields.
+  // Checking for interfaces is also necessary since interfaces that contain
+  // default methods must be initialized before the class.
   bool InitializeDependencies(const Handle<mirror::Class>& klass,
                               Handle<mirror::ClassLoader> class_loader,
                               Thread* self)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     if (klass->HasSuperClass()) {
-      ObjPtr<mirror::Class> super_class = klass->GetSuperClass();
       StackHandleScope<1> hs(self);
-      Handle<mirror::Class> handle_scope_super(hs.NewHandle(super_class));
-      if (!handle_scope_super->IsInitialized()) {
-        this->TryInitializeClass(handle_scope_super, class_loader);
-        if (!handle_scope_super->IsInitialized()) {
+      Handle<mirror::Class> super_class = hs.NewHandle(klass->GetSuperClass());
+      if (!super_class->IsInitialized()) {
+        this->TryInitializeClass(super_class, class_loader);
+        if (!super_class->IsInitialized()) {
           return false;
         }
       }
     }
 
-    uint32_t num_if = klass->NumDirectInterfaces();
-    for (size_t i = 0; i < num_if; i++) {
-      ObjPtr<mirror::Class>
-          interface = mirror::Class::GetDirectInterface(self, klass.Get(), i);
-      StackHandleScope<1> hs(self);
-      Handle<mirror::Class> handle_interface(hs.NewHandle(interface));
-
-      TryInitializeClass(handle_interface, class_loader);
-
-      if (!handle_interface->IsInitialized()) {
-        return false;
+    if (!klass->IsInterface()) {
+      size_t num_interfaces = klass->GetIfTableCount();
+      for (size_t i = 0; i < num_interfaces; ++i) {
+        StackHandleScope<1> hs(self);
+        Handle<mirror::Class> iface = hs.NewHandle(klass->GetIfTable()->GetInterface(i));
+        if (iface->HasDefaultMethods() && !iface->IsInitialized()) {
+          TryInitializeClass(iface, class_loader);
+          if (!iface->IsInitialized()) {
+            return false;
+          }
+        }
       }
     }
 
@@ -2469,14 +2547,19 @@
   ParallelCompilationManager context(class_linker, jni_class_loader, this, &dex_file, dex_files,
                                      init_thread_pool);
 
-  if (GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsAppImage()) {
-    // Set the concurrency thread to 1 to support initialization for App Images since transaction
+  if (GetCompilerOptions().IsBootImage() ||
+      GetCompilerOptions().IsBootImageExtension() ||
+      GetCompilerOptions().IsAppImage()) {
+    // Set the concurrency thread to 1 to support initialization for images since transaction
     // doesn't support multithreading now.
     // TODO: remove this when transactional mode supports multithreading.
     init_thread_count = 1U;
   }
   InitializeClassVisitor visitor(&context);
   context.ForAll(0, dex_file.NumClassDefs(), &visitor, init_thread_count);
+
+  // Make initialized classes visibly initialized.
+  class_linker->MakeInitializedClassesVisiblyInitialized(Thread::Current(), /*wait=*/ true);
 }
 
 class InitializeArrayClassesAndCreateConflictTablesVisitor : public ClassVisitor {
@@ -2537,7 +2620,9 @@
     CHECK(dex_file != nullptr);
     InitializeClasses(class_loader, *dex_file, dex_files, timings);
   }
-  if (GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsAppImage()) {
+  if (GetCompilerOptions().IsBootImage() ||
+      GetCompilerOptions().IsBootImageExtension() ||
+      GetCompilerOptions().IsAppImage()) {
     // Make sure that we call EnsureIntiailized on all the array classes to call
     // SetVerificationAttempted so that the access flags are set. If we do not do this they get
     // changed at runtime resulting in more dirty image pages.
@@ -2549,7 +2634,7 @@
     Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(&visitor);
     visitor.FillAllIMTAndConflictTables();
   }
-  if (GetCompilerOptions().IsBootImage()) {
+  if (GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension()) {
     // Prune garbage objects created during aborted transactions.
     Runtime::Current()->GetHeap()->CollectGarbage(/* clear_soft_references= */ true);
   }
@@ -2738,9 +2823,10 @@
     case ClassStatus::kNotReady:
     case ClassStatus::kResolved:
     case ClassStatus::kRetryVerificationAtRuntime:
+    case ClassStatus::kVerifiedNeedsAccessChecks:
     case ClassStatus::kVerified:
     case ClassStatus::kSuperclassValidated:
-    case ClassStatus::kInitialized:
+    case ClassStatus::kVisiblyInitialized:
       break;  // Expected states.
     default:
       LOG(FATAL) << "Unexpected class status for class "
diff --git a/dex2oat/driver/compiler_driver.h b/dex2oat/driver/compiler_driver.h
index b474de5..4f2cb81 100644
--- a/dex2oat/driver/compiler_driver.h
+++ b/dex2oat/driver/compiler_driver.h
@@ -93,6 +93,8 @@
 
   ~CompilerDriver();
 
+  void PrepareDexFilesForOatFile(TimingLogger* timings);
+
   // Set dex files classpath.
   void SetClasspathDexFiles(const std::vector<const DexFile*>& dex_files);
 
@@ -121,7 +123,8 @@
   }
 
   // Generate the trampolines that are invoked by unresolved direct methods.
-  std::unique_ptr<const std::vector<uint8_t>> CreateJniDlsymLookup() const;
+  std::unique_ptr<const std::vector<uint8_t>> CreateJniDlsymLookupTrampoline() const;
+  std::unique_ptr<const std::vector<uint8_t>> CreateJniDlsymLookupCriticalTrampoline() const;
   std::unique_ptr<const std::vector<uint8_t>> CreateQuickGenericJniTrampoline() const;
   std::unique_ptr<const std::vector<uint8_t>> CreateQuickImtConflictTrampoline() const;
   std::unique_ptr<const std::vector<uint8_t>> CreateQuickResolutionTrampoline() const;
diff --git a/dex2oat/driver/compiler_driver_test.cc b/dex2oat/driver/compiler_driver_test.cc
index 81262d3..3096fc3 100644
--- a/dex2oat/driver/compiler_driver_test.cc
+++ b/dex2oat/driver/compiler_driver_test.cc
@@ -203,10 +203,10 @@
 
     ProfileCompilationInfo info;
     for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
-      profile_info_.AddMethodIndex(ProfileCompilationInfo::MethodHotness::kFlagHot,
-                                   MethodReference(dex_file.get(), 1));
-      profile_info_.AddMethodIndex(ProfileCompilationInfo::MethodHotness::kFlagHot,
-                                   MethodReference(dex_file.get(), 2));
+      profile_info_.AddMethod(ProfileMethodInfo(MethodReference(dex_file.get(), 1)),
+                              ProfileCompilationInfo::MethodHotness::kFlagHot);
+      profile_info_.AddMethod(ProfileMethodInfo(MethodReference(dex_file.get(), 2)),
+                              ProfileCompilationInfo::MethodHotness::kFlagHot);
     }
     return &profile_info_;
   }
@@ -308,7 +308,7 @@
     bool found = compiler_driver_->GetCompiledClass(
         ClassReference(&klass->GetDexFile(), klass->GetDexTypeIndex().index_), &status);
     ASSERT_TRUE(found);
-    EXPECT_EQ(status, ClassStatus::kVerified);
+    EXPECT_GE(status, ClassStatus::kVerified);
   }
 };
 
@@ -353,8 +353,8 @@
        ++i) {
     const ClassStatus expected_status = enum_cast<ClassStatus>(i);
     // Skip unsupported status that are not supposed to be ever recorded.
-    if (expected_status == ClassStatus::kVerifyingAtRuntime ||
-        expected_status == ClassStatus::kInitializing) {
+    if (expected_status == ClassStatus::kInitializing ||
+        expected_status == ClassStatus::kInitialized) {
       continue;
     }
     compiler_driver_->RecordClassStatus(ref, expected_status);
diff --git a/dex2oat/linker/arm/relative_patcher_arm_base.cc b/dex2oat/linker/arm/relative_patcher_arm_base.cc
index 828dc5d..35e799a 100644
--- a/dex2oat/linker/arm/relative_patcher_arm_base.cc
+++ b/dex2oat/linker/arm/relative_patcher_arm_base.cc
@@ -386,6 +386,12 @@
   return ThunkKey(ThunkType::kMethodCall);
 }
 
+ArmBaseRelativePatcher::ThunkKey ArmBaseRelativePatcher::GetEntrypointCallKey(
+    const LinkerPatch& patch) {
+  DCHECK_EQ(patch.GetType(), LinkerPatch::Type::kCallEntrypoint);
+  return ThunkKey(ThunkType::kEntrypointCall, patch.EntrypointOffset());
+}
+
 ArmBaseRelativePatcher::ThunkKey ArmBaseRelativePatcher::GetBakerThunkKey(
     const LinkerPatch& patch) {
   DCHECK_EQ(patch.GetType(), LinkerPatch::Type::kBakerReadBarrierBranch);
@@ -399,6 +405,7 @@
   for (const LinkerPatch& patch : compiled_method->GetPatches()) {
     uint32_t patch_offset = code_offset + patch.LiteralOffset();
     ThunkKey key(static_cast<ThunkType>(-1));
+    bool simple_thunk_patch = false;
     ThunkData* old_data = nullptr;
     if (patch.GetType() == LinkerPatch::Type::kCallRelative) {
       key = GetMethodCallKey();
@@ -411,8 +418,14 @@
       } else {
         old_data = method_call_thunk_;
       }
+    } else if (patch.GetType() == LinkerPatch::Type::kCallEntrypoint) {
+      key = GetEntrypointCallKey(patch);
+      simple_thunk_patch = true;
     } else if (patch.GetType() == LinkerPatch::Type::kBakerReadBarrierBranch) {
       key = GetBakerThunkKey(patch);
+      simple_thunk_patch = true;
+    }
+    if (simple_thunk_patch) {
       auto lb = thunks_.lower_bound(key);
       if (lb == thunks_.end() || thunks_.key_comp()(key, lb->first)) {
         uint32_t max_next_offset = CalculateMaxNextOffset(patch_offset, key);
diff --git a/dex2oat/linker/arm/relative_patcher_arm_base.h b/dex2oat/linker/arm/relative_patcher_arm_base.h
index 0eb4417..bf3e81f 100644
--- a/dex2oat/linker/arm/relative_patcher_arm_base.h
+++ b/dex2oat/linker/arm/relative_patcher_arm_base.h
@@ -44,6 +44,7 @@
 
   enum class ThunkType {
     kMethodCall,              // Method call thunk.
+    kEntrypointCall,          // Entrypoint call.
     kBakerReadBarrier,        // Baker read barrier.
   };
 
@@ -84,6 +85,7 @@
   };
 
   static ThunkKey GetMethodCallKey();
+  static ThunkKey GetEntrypointCallKey(const LinkerPatch& patch);
   static ThunkKey GetBakerThunkKey(const LinkerPatch& patch);
 
   uint32_t ReserveSpaceInternal(uint32_t offset,
diff --git a/dex2oat/linker/arm/relative_patcher_thumb2.cc b/dex2oat/linker/arm/relative_patcher_thumb2.cc
index 697fb09..72b93ec 100644
--- a/dex2oat/linker/arm/relative_patcher_thumb2.cc
+++ b/dex2oat/linker/arm/relative_patcher_thumb2.cc
@@ -58,28 +58,10 @@
                                       uint32_t literal_offset,
                                       uint32_t patch_offset,
                                       uint32_t target_offset) {
-  DCHECK_LE(literal_offset + 4u, code->size());
-  DCHECK_EQ(literal_offset & 1u, 0u);
-  DCHECK_EQ(patch_offset & 1u, 0u);
+  DCHECK_ALIGNED(patch_offset, 2u);
   DCHECK_EQ(target_offset & 1u, 1u);  // Thumb2 mode bit.
   uint32_t displacement = CalculateMethodCallDisplacement(patch_offset, target_offset & ~1u);
-  displacement -= kPcDisplacement;  // The base PC is at the end of the 4-byte patch.
-  DCHECK_EQ(displacement & 1u, 0u);
-  DCHECK((displacement >> 24) == 0u || (displacement >> 24) == 255u);  // 25-bit signed.
-  uint32_t signbit = (displacement >> 31) & 0x1;
-  uint32_t i1 = (displacement >> 23) & 0x1;
-  uint32_t i2 = (displacement >> 22) & 0x1;
-  uint32_t imm10 = (displacement >> 12) & 0x03ff;
-  uint32_t imm11 = (displacement >> 1) & 0x07ff;
-  uint32_t j1 = i1 ^ (signbit ^ 1);
-  uint32_t j2 = i2 ^ (signbit ^ 1);
-  uint32_t value = (signbit << 26) | (j1 << 13) | (j2 << 11) | (imm10 << 16) | imm11;
-  value |= 0xf000d000;  // BL
-
-  // Check that we're just overwriting an existing BL.
-  DCHECK_EQ(GetInsn32(code, literal_offset) & 0xf800d000, 0xf000d000);
-  // Write the new BL.
-  SetInsn32(code, literal_offset, value);
+  PatchBl(code, literal_offset, displacement);
 }
 
 void Thumb2RelativePatcher::PatchPcRelativeReference(std::vector<uint8_t>* code,
@@ -102,6 +84,17 @@
   SetInsn32(code, literal_offset, insn);
 }
 
+void Thumb2RelativePatcher::PatchEntrypointCall(std::vector<uint8_t>* code,
+                                                const LinkerPatch& patch,
+                                                uint32_t patch_offset) {
+  DCHECK_ALIGNED(patch_offset, 2u);
+  ThunkKey key = GetEntrypointCallKey(patch);
+  uint32_t target_offset = GetThunkTargetOffset(key, patch_offset);
+  DCHECK_ALIGNED(target_offset, 4u);
+  uint32_t displacement = target_offset - patch_offset;
+  PatchBl(code, patch.LiteralOffset(), displacement);
+}
+
 void Thumb2RelativePatcher::PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
                                                         const LinkerPatch& patch,
                                                         uint32_t patch_offset) {
@@ -127,6 +120,7 @@
 uint32_t Thumb2RelativePatcher::MaxPositiveDisplacement(const ThunkKey& key) {
   switch (key.GetType()) {
     case ThunkType::kMethodCall:
+    case ThunkType::kEntrypointCall:
       return kMaxMethodCallPositiveDisplacement;
     case ThunkType::kBakerReadBarrier:
       return kMaxBcondPositiveDisplacement;
@@ -136,12 +130,35 @@
 uint32_t Thumb2RelativePatcher::MaxNegativeDisplacement(const ThunkKey& key) {
   switch (key.GetType()) {
     case ThunkType::kMethodCall:
+    case ThunkType::kEntrypointCall:
       return kMaxMethodCallNegativeDisplacement;
     case ThunkType::kBakerReadBarrier:
       return kMaxBcondNegativeDisplacement;
   }
 }
 
+void Thumb2RelativePatcher::PatchBl(std::vector<uint8_t>* code,
+                                    uint32_t literal_offset,
+                                    uint32_t displacement) {
+  displacement -= kPcDisplacement;  // The base PC is at the end of the 4-byte patch.
+  DCHECK_EQ(displacement & 1u, 0u);
+  DCHECK((displacement >> 24) == 0u || (displacement >> 24) == 255u);  // 25-bit signed.
+  uint32_t signbit = (displacement >> 31) & 0x1;
+  uint32_t i1 = (displacement >> 23) & 0x1;
+  uint32_t i2 = (displacement >> 22) & 0x1;
+  uint32_t imm10 = (displacement >> 12) & 0x03ff;
+  uint32_t imm11 = (displacement >> 1) & 0x07ff;
+  uint32_t j1 = i1 ^ (signbit ^ 1);
+  uint32_t j2 = i2 ^ (signbit ^ 1);
+  uint32_t value = (signbit << 26) | (j1 << 13) | (j2 << 11) | (imm10 << 16) | imm11;
+  value |= 0xf000d000;  // BL
+
+  // Check that we're just overwriting an existing BL.
+  DCHECK_EQ(GetInsn32(code, literal_offset) & 0xf800d000, 0xf000d000);
+  // Write the new BL.
+  SetInsn32(code, literal_offset, value);
+}
+
 void Thumb2RelativePatcher::SetInsn32(std::vector<uint8_t>* code, uint32_t offset, uint32_t value) {
   DCHECK_LE(offset + 4u, code->size());
   DCHECK_ALIGNED(offset, 2u);
diff --git a/dex2oat/linker/arm/relative_patcher_thumb2.h b/dex2oat/linker/arm/relative_patcher_thumb2.h
index dbf64a1..d360482 100644
--- a/dex2oat/linker/arm/relative_patcher_thumb2.h
+++ b/dex2oat/linker/arm/relative_patcher_thumb2.h
@@ -42,6 +42,9 @@
                                 const LinkerPatch& patch,
                                 uint32_t patch_offset,
                                 uint32_t target_offset) override;
+  void PatchEntrypointCall(std::vector<uint8_t>* code,
+                           const LinkerPatch& patch,
+                           uint32_t patch_offset) override;
   void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
                                    const LinkerPatch& patch,
                                    uint32_t patch_offset) override;
@@ -51,7 +54,9 @@
   uint32_t MaxNegativeDisplacement(const ThunkKey& key) override;
 
  private:
-  void SetInsn32(std::vector<uint8_t>* code, uint32_t offset, uint32_t value);
+  static void PatchBl(std::vector<uint8_t>* code, uint32_t literal_offset, uint32_t displacement);
+
+  static void SetInsn32(std::vector<uint8_t>* code, uint32_t offset, uint32_t value);
   static uint32_t GetInsn32(ArrayRef<const uint8_t> code, uint32_t offset);
 
   template <typename Vector>
diff --git a/dex2oat/linker/arm/relative_patcher_thumb2_test.cc b/dex2oat/linker/arm/relative_patcher_thumb2_test.cc
index 04a897e..296bf61 100644
--- a/dex2oat/linker/arm/relative_patcher_thumb2_test.cc
+++ b/dex2oat/linker/arm/relative_patcher_thumb2_test.cc
@@ -225,7 +225,8 @@
 
     // Make sure the ThunkProvider has all the necessary thunks.
     for (const LinkerPatch& patch : patches) {
-      if (patch.GetType() == LinkerPatch::Type::kBakerReadBarrierBranch ||
+      if (patch.GetType() == LinkerPatch::Type::kCallEntrypoint ||
+          patch.GetType() == LinkerPatch::Type::kBakerReadBarrierBranch ||
           patch.GetType() == LinkerPatch::Type::kCallRelative) {
         std::string debug_name;
         std::vector<uint8_t> thunk_code = CompileThunk(patch, &debug_name);
@@ -662,6 +663,35 @@
   ASSERT_LT(GetMethodOffset(1u), 0xfcu);
 }
 
+TEST_F(Thumb2RelativePatcherTest, EntrypointCall) {
+  constexpr uint32_t kEntrypointOffset = 512;
+  const LinkerPatch patches[] = {
+      LinkerPatch::CallEntrypointPatch(0u, kEntrypointOffset),
+  };
+  AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<const LinkerPatch>(patches));
+  Link();
+
+  uint32_t method_offset = GetMethodOffset(1u);
+  uint32_t thunk_offset = CompiledCode::AlignCode(method_offset + kCallCode.size(),
+                                                  InstructionSet::kThumb2);
+  uint32_t diff = thunk_offset - method_offset - kPcAdjustment;
+  ASSERT_TRUE(IsAligned<2u>(diff));
+  ASSERT_LT(diff >> 1, 1u << 8);  // Simple encoding, (diff >> 1) fits into 8 bits.
+  auto expected_code = GenNopsAndBl(0u, kBlPlus0 | ((diff >> 1) & 0xffu));
+  EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
+
+  // Verify the thunk.
+  uint32_t ldr_pc_tr_offset =
+      0xf8d00000 |                        // LDR Rt, [Rn, #<imm12>]
+      (/* tr */ 9 << 16) |                // Rn = TR
+      (/* pc */ 15 << 12) |               // Rt = PC
+      kEntrypointOffset;                  // imm12
+  uint16_t bkpt = 0xbe00;
+  ASSERT_LE(6u, output_.size() - thunk_offset);
+  EXPECT_EQ(ldr_pc_tr_offset, GetOutputInsn32(thunk_offset));
+  EXPECT_EQ(bkpt, GetOutputInsn16(thunk_offset + 4u));
+}
+
 const uint32_t kBakerValidRegs[] = {
     0,  1,  2,  3,  4,  5,  6,  7,
     9, 10, 11,                      // r8 (rMR), IP, SP, LR and PC are reserved.
diff --git a/dex2oat/linker/arm64/relative_patcher_arm64.cc b/dex2oat/linker/arm64/relative_patcher_arm64.cc
index ee8d4d1..2260f66 100644
--- a/dex2oat/linker/arm64/relative_patcher_arm64.cc
+++ b/dex2oat/linker/arm64/relative_patcher_arm64.cc
@@ -58,6 +58,7 @@
 inline bool IsAdrpPatch(const LinkerPatch& patch) {
   switch (patch.GetType()) {
     case LinkerPatch::Type::kCallRelative:
+    case LinkerPatch::Type::kCallEntrypoint:
     case LinkerPatch::Type::kBakerReadBarrierBranch:
       return false;
     case LinkerPatch::Type::kIntrinsicReference:
@@ -189,30 +190,21 @@
 
 void Arm64RelativePatcher::PatchCall(std::vector<uint8_t>* code,
                                      uint32_t literal_offset,
-                                     uint32_t patch_offset, uint32_t
-                                     target_offset) {
-  DCHECK_LE(literal_offset + 4u, code->size());
-  DCHECK_EQ(literal_offset & 3u, 0u);
-  DCHECK_EQ(patch_offset & 3u, 0u);
-  DCHECK_EQ(target_offset & 3u, 0u);
+                                     uint32_t patch_offset,
+                                     uint32_t target_offset) {
+  DCHECK_ALIGNED(literal_offset, 4u);
+  DCHECK_ALIGNED(patch_offset, 4u);
+  DCHECK_ALIGNED(target_offset, 4u);
   uint32_t displacement = CalculateMethodCallDisplacement(patch_offset, target_offset & ~1u);
-  DCHECK_EQ(displacement & 3u, 0u);
-  DCHECK((displacement >> 27) == 0u || (displacement >> 27) == 31u);  // 28-bit signed.
-  uint32_t insn = (displacement & 0x0fffffffu) >> 2;
-  insn |= 0x94000000;  // BL
-
-  // Check that we're just overwriting an existing BL.
-  DCHECK_EQ(GetInsn(code, literal_offset) & 0xfc000000u, 0x94000000u);
-  // Write the new BL.
-  SetInsn(code, literal_offset, insn);
+  PatchBl(code, literal_offset, displacement);
 }
 
 void Arm64RelativePatcher::PatchPcRelativeReference(std::vector<uint8_t>* code,
                                                     const LinkerPatch& patch,
                                                     uint32_t patch_offset,
                                                     uint32_t target_offset) {
-  DCHECK_EQ(patch_offset & 3u, 0u);
-  DCHECK_EQ(target_offset & 3u, 0u);
+  DCHECK_ALIGNED(patch_offset, 4u);
+  DCHECK_ALIGNED(target_offset, 4u);
   uint32_t literal_offset = patch.LiteralOffset();
   uint32_t insn = GetInsn(code, literal_offset);
   uint32_t pc_insn_offset = patch.PcInsnOffset();
@@ -307,13 +299,21 @@
   }
 }
 
+void Arm64RelativePatcher::PatchEntrypointCall(std::vector<uint8_t>* code,
+                                               const LinkerPatch& patch,
+                                               uint32_t patch_offset) {
+  DCHECK_ALIGNED(patch_offset, 4u);
+  ThunkKey key = GetEntrypointCallKey(patch);
+  uint32_t target_offset = GetThunkTargetOffset(key, patch_offset);
+  uint32_t displacement = target_offset - patch_offset;
+  PatchBl(code, patch.LiteralOffset(), displacement);
+}
+
 void Arm64RelativePatcher::PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
                                                        const LinkerPatch& patch,
                                                        uint32_t patch_offset) {
   DCHECK_ALIGNED(patch_offset, 4u);
   uint32_t literal_offset = patch.LiteralOffset();
-  DCHECK_ALIGNED(literal_offset, 4u);
-  DCHECK_LT(literal_offset, code->size());
   uint32_t insn = GetInsn(code, literal_offset);
   DCHECK_EQ(insn & 0xffffffe0u, 0xb5000000);  // CBNZ Xt, +0 (unpatched)
   ThunkKey key = GetBakerThunkKey(patch);
@@ -328,6 +328,7 @@
 uint32_t Arm64RelativePatcher::MaxPositiveDisplacement(const ThunkKey& key) {
   switch (key.GetType()) {
     case ThunkType::kMethodCall:
+    case ThunkType::kEntrypointCall:
       return kMaxMethodCallPositiveDisplacement;
     case ThunkType::kBakerReadBarrier:
       return kMaxBcondPositiveDisplacement;
@@ -337,6 +338,7 @@
 uint32_t Arm64RelativePatcher::MaxNegativeDisplacement(const ThunkKey& key) {
   switch (key.GetType()) {
     case ThunkType::kMethodCall:
+    case ThunkType::kEntrypointCall:
       return kMaxMethodCallNegativeDisplacement;
     case ThunkType::kBakerReadBarrier:
       return kMaxBcondNegativeDisplacement;
@@ -357,6 +359,20 @@
       ((disp & 0x80000000u) >> (31 - 23));
 }
 
+void Arm64RelativePatcher::PatchBl(std::vector<uint8_t>* code,
+                                   uint32_t literal_offset,
+                                   uint32_t displacement) {
+  DCHECK_ALIGNED(displacement, 4u);
+  DCHECK((displacement >> 27) == 0u || (displacement >> 27) == 31u);  // 28-bit signed.
+  uint32_t insn = (displacement & 0x0fffffffu) >> 2;
+  insn |= 0x94000000;  // BL
+
+  // Check that we're just overwriting an existing BL.
+  DCHECK_EQ(GetInsn(code, literal_offset) & 0xfc000000u, 0x94000000u);
+  // Write the new BL.
+  SetInsn(code, literal_offset, insn);
+}
+
 bool Arm64RelativePatcher::NeedsErratum843419Thunk(ArrayRef<const uint8_t> code,
                                                    uint32_t literal_offset,
                                                    uint32_t patch_offset) {
@@ -409,7 +425,7 @@
 
 void Arm64RelativePatcher::SetInsn(std::vector<uint8_t>* code, uint32_t offset, uint32_t value) {
   DCHECK_LE(offset + 4u, code->size());
-  DCHECK_EQ(offset & 3u, 0u);
+  DCHECK_ALIGNED(offset, 4u);
   uint8_t* addr = &(*code)[offset];
   addr[0] = (value >> 0) & 0xff;
   addr[1] = (value >> 8) & 0xff;
@@ -419,7 +435,7 @@
 
 uint32_t Arm64RelativePatcher::GetInsn(ArrayRef<const uint8_t> code, uint32_t offset) {
   DCHECK_LE(offset + 4u, code.size());
-  DCHECK_EQ(offset & 3u, 0u);
+  DCHECK_ALIGNED(offset, 4u);
   const uint8_t* addr = &code[offset];
   return
       (static_cast<uint32_t>(addr[0]) << 0) +
diff --git a/dex2oat/linker/arm64/relative_patcher_arm64.h b/dex2oat/linker/arm64/relative_patcher_arm64.h
index e95d0fe..9ad2c96 100644
--- a/dex2oat/linker/arm64/relative_patcher_arm64.h
+++ b/dex2oat/linker/arm64/relative_patcher_arm64.h
@@ -47,6 +47,9 @@
                                 const LinkerPatch& patch,
                                 uint32_t patch_offset,
                                 uint32_t target_offset) override;
+  void PatchEntrypointCall(std::vector<uint8_t>* code,
+                           const LinkerPatch& patch,
+                           uint32_t patch_offset) override;
   void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
                                    const LinkerPatch& patch,
                                    uint32_t patch_offset) override;
@@ -57,10 +60,11 @@
 
  private:
   static uint32_t PatchAdrp(uint32_t adrp, uint32_t disp);
+  static void PatchBl(std::vector<uint8_t>* code, uint32_t literal_offset, uint32_t displacement);
 
   static bool NeedsErratum843419Thunk(ArrayRef<const uint8_t> code, uint32_t literal_offset,
                                       uint32_t patch_offset);
-  void SetInsn(std::vector<uint8_t>* code, uint32_t offset, uint32_t value);
+  static void SetInsn(std::vector<uint8_t>* code, uint32_t offset, uint32_t value);
   static uint32_t GetInsn(ArrayRef<const uint8_t> code, uint32_t offset);
 
   template <typename Alloc>
diff --git a/dex2oat/linker/arm64/relative_patcher_arm64_test.cc b/dex2oat/linker/arm64/relative_patcher_arm64_test.cc
index 9e54bbf..8bae5d4 100644
--- a/dex2oat/linker/arm64/relative_patcher_arm64_test.cc
+++ b/dex2oat/linker/arm64/relative_patcher_arm64_test.cc
@@ -198,7 +198,8 @@
 
     // Make sure the ThunkProvider has all the necessary thunks.
     for (const LinkerPatch& patch : patches) {
-      if (patch.GetType() == LinkerPatch::Type::kBakerReadBarrierBranch ||
+      if (patch.GetType() == LinkerPatch::Type::kCallEntrypoint ||
+          patch.GetType() == LinkerPatch::Type::kBakerReadBarrierBranch ||
           patch.GetType() == LinkerPatch::Type::kCallRelative) {
         std::string debug_name;
         std::vector<uint8_t> thunk_code = CompileThunk(patch, &debug_name);
@@ -1005,6 +1006,36 @@
       { 0u, 8u });
 }
 
+TEST_F(Arm64RelativePatcherTestDefault, EntrypointCall) {
+  constexpr uint32_t kEntrypointOffset = 512;
+  const LinkerPatch patches[] = {
+      LinkerPatch::CallEntrypointPatch(0u, kEntrypointOffset),
+  };
+  AddCompiledMethod(MethodRef(1u), kCallCode, ArrayRef<const LinkerPatch>(patches));
+  Link();
+
+  uint32_t method_offset = GetMethodOffset(1u);
+  uint32_t thunk_offset = CompiledCode::AlignCode(method_offset + kCallCode.size(),
+                                                  InstructionSet::kArm64);
+  uint32_t diff = thunk_offset - method_offset;
+  ASSERT_TRUE(IsAligned<4u>(diff));
+  ASSERT_LT(diff, 128 * MB);
+  auto expected_code = RawCode({kBlPlus0 | (diff >> 2)});
+  EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
+
+  // Verify the thunk.
+  uint32_t ldr_ip0_tr_offset =
+      0xf9400000 |                        // LDR Xt, [Xn, #<simm>]
+      ((kEntrypointOffset >> 3) << 10) |  // imm12 = (simm >> scale), scale = 3
+      (/* tr */ 19 << 5) |                // Xn = TR
+      /* ip0 */ 16;                       // Xt = ip0
+  uint32_t br_ip0 = 0xd61f0000 | (/* ip0 */ 16 << 5);
+  auto expected_thunk = RawCode({ ldr_ip0_tr_offset, br_ip0 });
+  ASSERT_LE(8u, output_.size() - thunk_offset);
+  EXPECT_EQ(ldr_ip0_tr_offset, GetOutputInsn(thunk_offset));
+  EXPECT_EQ(br_ip0, GetOutputInsn(thunk_offset + 4u));
+}
+
 void Arm64RelativePatcherTest::TestBakerField(uint32_t offset, uint32_t ref_reg) {
   uint32_t valid_regs[] = {
       0,  1,  2,  3,  4,  5,  6,  7,  8,  9,
diff --git a/dex2oat/linker/image_test.cc b/dex2oat/linker/image_test.cc
index 1a5701d..33d122b 100644
--- a/dex2oat/linker/image_test.cc
+++ b/dex2oat/linker/image_test.cc
@@ -87,6 +87,8 @@
                              oat_file_end,
                              /*boot_image_begin=*/ 0u,
                              /*boot_image_size=*/ 0u,
+                             /*boot_image_component_count=*/ 0u,
+                             /*boot_image_checksum=*/ 0u,
                              sizeof(void*));
 
     ASSERT_TRUE(image_header.IsValid());
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index a0f1093..303c262 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -48,6 +48,7 @@
 #include "linker/multi_oat_relative_patcher.h"
 #include "lock_word.h"
 #include "mirror/object-inl.h"
+#include "oat.h"
 #include "oat_writer.h"
 #include "scoped_thread_state_change-inl.h"
 #include "signal_catcher.h"
@@ -80,8 +81,6 @@
     CommonCompilerTest::SetUp();
   }
 
-  void TestWriteRead(ImageHeader::StorageMode storage_mode, uint32_t max_image_block_size);
-
   void Compile(ImageHeader::StorageMode storage_mode,
                uint32_t max_image_block_size,
                /*out*/ CompilationHelper& out_helper,
@@ -157,7 +156,7 @@
     {
       ScopedObjectAccess soa(Thread::Current());
       // Inject in boot class path so that the compiler driver can see it.
-      class_linker->AppendToBootClassPath(soa.Self(), *dex_file.get());
+      class_linker->AppendToBootClassPath(soa.Self(), dex_file.get());
     }
     class_path.push_back(dex_file.get());
   }
@@ -173,8 +172,9 @@
     // Create a generic tmp file, to be the base of the .art and .oat temporary files.
     ScratchFile location;
     std::vector<std::string> image_locations =
-        gc::space::ImageSpace::ExpandMultiImageLocations(out_helper.dex_file_locations,
-                                                         location.GetFilename() + ".art");
+        gc::space::ImageSpace::ExpandMultiImageLocations(
+            ArrayRef<const std::string>(out_helper.dex_file_locations),
+            location.GetFilename() + ".art");
     for (size_t i = 0u; i != class_path.size(); ++i) {
       out_helper.image_locations.push_back(ScratchFile(image_locations[i]));
     }
@@ -210,7 +210,6 @@
     dex_file_to_oat_index_map.emplace(dex_file, image_idx);
     ++image_idx;
   }
-  // TODO: compile_pic should be a test argument.
   std::unique_ptr<ImageWriter> writer(new ImageWriter(*compiler_options_,
                                                       kRequestedImageBase,
                                                       storage_mode,
@@ -258,8 +257,6 @@
         std::vector<std::unique_ptr<const DexFile>> cur_opened_dex_files;
         bool dex_files_ok = oat_writers[i]->WriteAndOpenDexFiles(
             out_helper.vdex_files[i].GetFile(),
-            rodata.back(),
-            (i == 0u) ? &key_value_store : nullptr,
             /* verify */ false,           // Dex files may be dex-to-dex-ed, don't verify.
             /* update_input_vdex */ false,
             /* copy_dex_files */ CopyOption::kOnlyIfCompressed,
@@ -279,7 +276,8 @@
           ASSERT_TRUE(cur_opened_dex_files.empty());
         }
       }
-      bool image_space_ok = writer->PrepareImageAddressSpace(&timings);
+      bool image_space_ok =
+          writer->PrepareImageAddressSpace(/*preload_dex_caches=*/ true, &timings);
       ASSERT_TRUE(image_space_ok);
 
       DCHECK_EQ(out_helper.vdex_files.size(), out_helper.oat_files.size());
@@ -290,6 +288,10 @@
         OatWriter* const oat_writer = oat_writers[i].get();
         ElfWriter* const elf_writer = elf_writers[i].get();
         std::vector<const DexFile*> cur_dex_files(1u, class_path[i]);
+        bool start_rodata_ok = oat_writer->StartRoData(cur_dex_files,
+                                                       rodata[i],
+                                                       (i == 0u) ? &key_value_store : nullptr);
+        ASSERT_TRUE(start_rodata_ok);
         oat_writer->Initialize(driver, writer.get(), cur_dex_files);
 
         std::unique_ptr<BufferedOutputStream> vdex_out =
@@ -344,7 +346,7 @@
 
     bool success_image = writer->Write(kInvalidFd,
                                        image_filenames,
-                                       oat_filenames);
+                                       image_filenames.size());
     ASSERT_TRUE(success_image);
 
     for (size_t i = 0, size = oat_filenames.size(); i != size; ++i) {
@@ -400,118 +402,6 @@
   }
 }
 
-inline void ImageTest::TestWriteRead(ImageHeader::StorageMode storage_mode,
-                                     uint32_t max_image_block_size) {
-  CompilationHelper helper;
-  Compile(storage_mode, max_image_block_size, /*out*/ helper);
-  std::vector<uint64_t> image_file_sizes;
-  for (ScratchFile& image_file : helper.image_files) {
-    std::unique_ptr<File> file(OS::OpenFileForReading(image_file.GetFilename().c_str()));
-    ASSERT_TRUE(file.get() != nullptr);
-    ImageHeader image_header;
-    ASSERT_EQ(file->ReadFully(&image_header, sizeof(image_header)), true);
-    ASSERT_TRUE(image_header.IsValid());
-    const auto& bitmap_section = image_header.GetImageBitmapSection();
-    ASSERT_GE(bitmap_section.Offset(), sizeof(image_header));
-    ASSERT_NE(0U, bitmap_section.Size());
-
-    gc::Heap* heap = Runtime::Current()->GetHeap();
-    ASSERT_TRUE(heap->HaveContinuousSpaces());
-    gc::space::ContinuousSpace* space = heap->GetNonMovingSpace();
-    ASSERT_FALSE(space->IsImageSpace());
-    ASSERT_TRUE(space != nullptr);
-    ASSERT_TRUE(space->IsMallocSpace());
-    image_file_sizes.push_back(file->GetLength());
-  }
-
-  // Need to delete the compiler since it has worker threads which are attached to runtime.
-  compiler_driver_.reset();
-
-  // Tear down old runtime before making a new one, clearing out misc state.
-
-  // Remove the reservation of the memory for use to load the image.
-  // Need to do this before we reset the runtime.
-  UnreserveImageSpace();
-
-  helper.extra_dex_files.clear();
-  runtime_.reset();
-  java_lang_dex_file_ = nullptr;
-
-  MemMap::Init();
-
-  RuntimeOptions options;
-  options.emplace_back(GetClassPathOption("-Xbootclasspath:", GetLibCoreDexFileNames()), nullptr);
-  options.emplace_back(
-      GetClassPathOption("-Xbootclasspath-locations:", GetLibCoreDexLocations()), nullptr);
-  std::string image("-Ximage:");
-  image.append(helper.image_locations[0].GetFilename());
-  options.push_back(std::make_pair(image.c_str(), static_cast<void*>(nullptr)));
-  // By default the compiler this creates will not include patch information.
-  options.push_back(std::make_pair("-Xnorelocate", nullptr));
-
-  if (!Runtime::Create(options, false)) {
-    LOG(FATAL) << "Failed to create runtime";
-    return;
-  }
-  runtime_.reset(Runtime::Current());
-  // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start,
-  // give it away now and then switch to a more managable ScopedObjectAccess.
-  Thread::Current()->TransitionFromRunnableToSuspended(kNative);
-  ScopedObjectAccess soa(Thread::Current());
-  ASSERT_TRUE(runtime_.get() != nullptr);
-  class_linker_ = runtime_->GetClassLinker();
-
-  gc::Heap* heap = Runtime::Current()->GetHeap();
-  ASSERT_TRUE(heap->HasBootImageSpace());
-  ASSERT_TRUE(heap->GetNonMovingSpace()->IsMallocSpace());
-
-  // We loaded the runtime with an explicit image, so it must exist.
-  ASSERT_EQ(heap->GetBootImageSpaces().size(), image_file_sizes.size());
-  const HashSet<std::string>& image_classes = compiler_options_->GetImageClasses();
-  for (size_t i = 0; i < helper.dex_file_locations.size(); ++i) {
-    std::unique_ptr<const DexFile> dex(
-        LoadExpectSingleDexFile(helper.dex_file_locations[i].c_str()));
-    ASSERT_TRUE(dex != nullptr);
-    uint64_t image_file_size = image_file_sizes[i];
-    gc::space::ImageSpace* image_space = heap->GetBootImageSpaces()[i];
-    ASSERT_TRUE(image_space != nullptr);
-    if (storage_mode == ImageHeader::kStorageModeUncompressed) {
-      // Uncompressed, image should be smaller than file.
-      ASSERT_LE(image_space->GetImageHeader().GetImageSize(), image_file_size);
-    } else if (image_file_size > 16 * KB) {
-      // Compressed, file should be smaller than image. Not really valid for small images.
-      ASSERT_LE(image_file_size, image_space->GetImageHeader().GetImageSize());
-      // TODO: Actually validate the blocks, this is hard since the blocks are not copied over for
-      // compressed images. Add kPageSize since image_size is rounded up to this.
-      ASSERT_GT(image_space->GetImageHeader().GetBlockCount() * max_image_block_size,
-                image_space->GetImageHeader().GetImageSize() - kPageSize);
-    }
-
-    image_space->VerifyImageAllocations();
-    uint8_t* image_begin = image_space->Begin();
-    uint8_t* image_end = image_space->End();
-    if (i == 0) {
-      // This check is only valid for image 0.
-      CHECK_EQ(kRequestedImageBase, reinterpret_cast<uintptr_t>(image_begin));
-    }
-    for (size_t j = 0; j < dex->NumClassDefs(); ++j) {
-      const dex::ClassDef& class_def = dex->GetClassDef(j);
-      const char* descriptor = dex->GetClassDescriptor(class_def);
-      ObjPtr<mirror::Class> klass = class_linker_->FindSystemClass(soa.Self(), descriptor);
-      EXPECT_TRUE(klass != nullptr) << descriptor;
-      uint8_t* raw_klass = reinterpret_cast<uint8_t*>(klass.Ptr());
-      if (image_classes.find(std::string_view(descriptor)) == image_classes.end()) {
-        EXPECT_TRUE(raw_klass >= image_end || raw_klass < image_begin) << descriptor;
-      } else {
-        // Image classes should be located inside the image.
-        EXPECT_LT(image_begin, raw_klass) << descriptor;
-        EXPECT_LT(raw_klass, image_end) << descriptor;
-      }
-      EXPECT_TRUE(Monitor::IsValidLockWord(klass->GetLockWord(false)));
-    }
-  }
-}
-
 }  // namespace linker
 }  // namespace art
 
diff --git a/dex2oat/linker/image_write_read_test.cc b/dex2oat/linker/image_write_read_test.cc
index 5ddbd09..2966f19 100644
--- a/dex2oat/linker/image_write_read_test.cc
+++ b/dex2oat/linker/image_write_read_test.cc
@@ -19,23 +19,140 @@
 namespace art {
 namespace linker {
 
-TEST_F(ImageTest, WriteReadUncompressed) {
+class ImageWriteReadTest : public ImageTest {
+ protected:
+  void TestWriteRead(ImageHeader::StorageMode storage_mode, uint32_t max_image_block_size);
+};
+
+void ImageWriteReadTest::TestWriteRead(ImageHeader::StorageMode storage_mode,
+                                       uint32_t max_image_block_size) {
+  CompilationHelper helper;
+  Compile(storage_mode, max_image_block_size, /*out*/ helper);
+  std::vector<uint64_t> image_file_sizes;
+  for (ScratchFile& image_file : helper.image_files) {
+    std::unique_ptr<File> file(OS::OpenFileForReading(image_file.GetFilename().c_str()));
+    ASSERT_TRUE(file.get() != nullptr);
+    ImageHeader image_header;
+    ASSERT_EQ(file->ReadFully(&image_header, sizeof(image_header)), true);
+    ASSERT_TRUE(image_header.IsValid());
+    const auto& bitmap_section = image_header.GetImageBitmapSection();
+    ASSERT_GE(bitmap_section.Offset(), sizeof(image_header));
+    ASSERT_NE(0U, bitmap_section.Size());
+
+    gc::Heap* heap = Runtime::Current()->GetHeap();
+    ASSERT_TRUE(heap->HaveContinuousSpaces());
+    gc::space::ContinuousSpace* space = heap->GetNonMovingSpace();
+    ASSERT_FALSE(space->IsImageSpace());
+    ASSERT_TRUE(space != nullptr);
+    ASSERT_TRUE(space->IsMallocSpace());
+    image_file_sizes.push_back(file->GetLength());
+  }
+
+  // Need to delete the compiler since it has worker threads which are attached to runtime.
+  compiler_driver_.reset();
+
+  // Tear down old runtime before making a new one, clearing out misc state.
+
+  // Remove the reservation of the memory for use to load the image.
+  // Need to do this before we reset the runtime.
+  UnreserveImageSpace();
+
+  helper.extra_dex_files.clear();
+  runtime_.reset();
+  java_lang_dex_file_ = nullptr;
+
+  MemMap::Init();
+
+  RuntimeOptions options;
+  options.emplace_back(GetClassPathOption("-Xbootclasspath:", GetLibCoreDexFileNames()), nullptr);
+  options.emplace_back(
+      GetClassPathOption("-Xbootclasspath-locations:", GetLibCoreDexLocations()), nullptr);
+  std::string image("-Ximage:");
+  image.append(helper.image_locations[0].GetFilename());
+  options.push_back(std::make_pair(image.c_str(), static_cast<void*>(nullptr)));
+  // By default the compiler this creates will not include patch information.
+  options.push_back(std::make_pair("-Xnorelocate", nullptr));
+
+  if (!Runtime::Create(options, false)) {
+    LOG(FATAL) << "Failed to create runtime";
+    return;
+  }
+  runtime_.reset(Runtime::Current());
+  // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start,
+  // give it away now and then switch to a more managable ScopedObjectAccess.
+  Thread::Current()->TransitionFromRunnableToSuspended(kNative);
+  ScopedObjectAccess soa(Thread::Current());
+  ASSERT_TRUE(runtime_.get() != nullptr);
+  class_linker_ = runtime_->GetClassLinker();
+
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  ASSERT_TRUE(heap->HasBootImageSpace());
+  ASSERT_TRUE(heap->GetNonMovingSpace()->IsMallocSpace());
+
+  // We loaded the runtime with an explicit image, so it must exist.
+  ASSERT_EQ(heap->GetBootImageSpaces().size(), image_file_sizes.size());
+  const HashSet<std::string>& image_classes = compiler_options_->GetImageClasses();
+  for (size_t i = 0; i < helper.dex_file_locations.size(); ++i) {
+    std::unique_ptr<const DexFile> dex(
+        LoadExpectSingleDexFile(helper.dex_file_locations[i].c_str()));
+    ASSERT_TRUE(dex != nullptr);
+    uint64_t image_file_size = image_file_sizes[i];
+    gc::space::ImageSpace* image_space = heap->GetBootImageSpaces()[i];
+    ASSERT_TRUE(image_space != nullptr);
+    if (storage_mode == ImageHeader::kStorageModeUncompressed) {
+      // Uncompressed, image should be smaller than file.
+      ASSERT_LE(image_space->GetImageHeader().GetImageSize(), image_file_size);
+    } else if (image_file_size > 16 * KB) {
+      // Compressed, file should be smaller than image. Not really valid for small images.
+      ASSERT_LE(image_file_size, image_space->GetImageHeader().GetImageSize());
+      // TODO: Actually validate the blocks, this is hard since the blocks are not copied over for
+      // compressed images. Add kPageSize since image_size is rounded up to this.
+      ASSERT_GT(image_space->GetImageHeader().GetBlockCount() * max_image_block_size,
+                image_space->GetImageHeader().GetImageSize() - kPageSize);
+    }
+
+    image_space->VerifyImageAllocations();
+    uint8_t* image_begin = image_space->Begin();
+    uint8_t* image_end = image_space->End();
+    if (i == 0) {
+      // This check is only valid for image 0.
+      CHECK_EQ(kRequestedImageBase, reinterpret_cast<uintptr_t>(image_begin));
+    }
+    for (size_t j = 0; j < dex->NumClassDefs(); ++j) {
+      const dex::ClassDef& class_def = dex->GetClassDef(j);
+      const char* descriptor = dex->GetClassDescriptor(class_def);
+      ObjPtr<mirror::Class> klass = class_linker_->FindSystemClass(soa.Self(), descriptor);
+      EXPECT_TRUE(klass != nullptr) << descriptor;
+      uint8_t* raw_klass = reinterpret_cast<uint8_t*>(klass.Ptr());
+      if (image_classes.find(std::string_view(descriptor)) == image_classes.end()) {
+        EXPECT_TRUE(raw_klass >= image_end || raw_klass < image_begin) << descriptor;
+      } else {
+        // Image classes should be located inside the image.
+        EXPECT_LT(image_begin, raw_klass) << descriptor;
+        EXPECT_LT(raw_klass, image_end) << descriptor;
+      }
+      EXPECT_TRUE(Monitor::IsValidLockWord(klass->GetLockWord(false)));
+    }
+  }
+}
+
+TEST_F(ImageWriteReadTest, WriteReadUncompressed) {
   TestWriteRead(ImageHeader::kStorageModeUncompressed,
                 /*max_image_block_size=*/std::numeric_limits<uint32_t>::max());
 }
 
-TEST_F(ImageTest, WriteReadLZ4) {
+TEST_F(ImageWriteReadTest, WriteReadLZ4) {
   TestWriteRead(ImageHeader::kStorageModeLZ4,
                 /*max_image_block_size=*/std::numeric_limits<uint32_t>::max());
 }
 
-TEST_F(ImageTest, WriteReadLZ4HC) {
+TEST_F(ImageWriteReadTest, WriteReadLZ4HC) {
   TestWriteRead(ImageHeader::kStorageModeLZ4HC,
                 /*max_image_block_size=*/std::numeric_limits<uint32_t>::max());
 }
 
 
-TEST_F(ImageTest, WriteReadLZ4HCKBBlock) {
+TEST_F(ImageWriteReadTest, WriteReadLZ4HCKBBlock) {
   TestWriteRead(ImageHeader::kStorageModeLZ4HC, /*max_image_block_size=*/KB);
 }
 
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index 9626e21..51edca4 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -42,6 +42,7 @@
 #include "driver/compiler_options.h"
 #include "elf/elf_utils.h"
 #include "elf_file.h"
+#include "entrypoints/entrypoint_utils-inl.h"
 #include "gc/accounting/card_table-inl.h"
 #include "gc/accounting/heap_bitmap.h"
 #include "gc/accounting/space_bitmap-inl.h"
@@ -53,7 +54,7 @@
 #include "gc/space/space-inl.h"
 #include "gc/verification.h"
 #include "handle_scope-inl.h"
-#include "image.h"
+#include "image-inl.h"
 #include "imt_conflict_table.h"
 #include "intern_table-inl.h"
 #include "jni/jni_internal.h"
@@ -148,6 +149,56 @@
 // Separate objects into multiple bins to optimize dirty memory use.
 static constexpr bool kBinObjects = true;
 
+ObjPtr<mirror::ObjectArray<mirror::Object>> AllocateBootImageLiveObjects(
+    Thread* self, Runtime* runtime) REQUIRES_SHARED(Locks::mutator_lock_) {
+  ClassLinker* class_linker = runtime->GetClassLinker();
+  // The objects used for the Integer.valueOf() intrinsic must remain live even if references
+  // to them are removed using reflection. Image roots are not accessible through reflection,
+  // so the array we construct here shall keep them alive.
+  StackHandleScope<1> hs(self);
+  Handle<mirror::ObjectArray<mirror::Object>> integer_cache =
+      hs.NewHandle(IntrinsicObjects::LookupIntegerCache(self, class_linker));
+  size_t live_objects_size =
+      enum_cast<size_t>(ImageHeader::kIntrinsicObjectsStart) +
+      ((integer_cache != nullptr) ? (/* cache */ 1u + integer_cache->GetLength()) : 0u);
+  ObjPtr<mirror::ObjectArray<mirror::Object>> live_objects =
+      mirror::ObjectArray<mirror::Object>::Alloc(
+          self, GetClassRoot<mirror::ObjectArray<mirror::Object>>(class_linker), live_objects_size);
+  int32_t index = 0u;
+  auto set_entry = [&](ImageHeader::BootImageLiveObjects entry,
+                       ObjPtr<mirror::Object> value) REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK_EQ(index, enum_cast<int32_t>(entry));
+    live_objects->Set</*kTransacrionActive=*/ false>(index, value);
+    ++index;
+  };
+  set_entry(ImageHeader::kOomeWhenThrowingException,
+            runtime->GetPreAllocatedOutOfMemoryErrorWhenThrowingException());
+  set_entry(ImageHeader::kOomeWhenThrowingOome,
+            runtime->GetPreAllocatedOutOfMemoryErrorWhenThrowingOOME());
+  set_entry(ImageHeader::kOomeWhenHandlingStackOverflow,
+            runtime->GetPreAllocatedOutOfMemoryErrorWhenHandlingStackOverflow());
+  set_entry(ImageHeader::kNoClassDefFoundError, runtime->GetPreAllocatedNoClassDefFoundError());
+  set_entry(ImageHeader::kClearedJniWeakSentinel, runtime->GetSentinel().Read());
+
+  DCHECK_EQ(index, enum_cast<int32_t>(ImageHeader::kIntrinsicObjectsStart));
+  if (integer_cache != nullptr) {
+    live_objects->Set(index++, integer_cache.Get());
+    for (int32_t i = 0, length = integer_cache->GetLength(); i != length; ++i) {
+      live_objects->Set(index++, integer_cache->Get(i));
+    }
+  }
+  CHECK_EQ(index, live_objects->GetLength());
+
+  if (kIsDebugBuild && integer_cache != nullptr) {
+    CHECK_EQ(integer_cache.Get(), IntrinsicObjects::GetIntegerValueOfCache(live_objects));
+    for (int32_t i = 0, len = integer_cache->GetLength(); i != len; ++i) {
+      CHECK_EQ(integer_cache->GetWithoutChecks(i),
+               IntrinsicObjects::GetIntegerValueOfObject(live_objects, i));
+    }
+  }
+  return live_objects;
+}
+
 ObjPtr<mirror::ClassLoader> ImageWriter::GetAppClassLoader() const
     REQUIRES_SHARED(Locks::mutator_lock_) {
   return compiler_options_.IsAppImage()
@@ -155,59 +206,24 @@
       : nullptr;
 }
 
-bool ImageWriter::IsImageObject(ObjPtr<mirror::Object> obj) const {
-  // For boot image, we keep all objects remaining after the GC in PrepareImageAddressSpace().
+bool ImageWriter::IsImageDexCache(ObjPtr<mirror::DexCache> dex_cache) const {
+  // For boot image, we keep all dex caches.
   if (compiler_options_.IsBootImage()) {
     return true;
   }
-  // Objects already in the boot image do not belong to the image being written.
-  if (IsInBootImage(obj.Ptr())) {
+  // Dex caches already in the boot image do not belong to the image being written.
+  if (IsInBootImage(dex_cache.Ptr())) {
     return false;
   }
-  // DexCaches for the boot class path components that are not a part of the boot image
+  // Dex caches for the boot class path components that are not part of the boot image
   // cannot be garbage collected in PrepareImageAddressSpace() but we do not want to
-  // include them in the app image. So make sure we include only the app DexCaches.
-  if (obj->IsDexCache() &&
-      !ContainsElement(compiler_options_.GetDexFilesForOatFile(),
-                       obj->AsDexCache()->GetDexFile())) {
+  // include them in the app image.
+  if (!ContainsElement(compiler_options_.GetDexFilesForOatFile(), dex_cache->GetDexFile())) {
     return false;
   }
   return true;
 }
 
-// Return true if an object is already in an image space.
-bool ImageWriter::IsInBootImage(const void* obj) const {
-  gc::Heap* const heap = Runtime::Current()->GetHeap();
-  if (compiler_options_.IsBootImage()) {
-    DCHECK(heap->GetBootImageSpaces().empty());
-    return false;
-  }
-  for (gc::space::ImageSpace* boot_image_space : heap->GetBootImageSpaces()) {
-    const uint8_t* image_begin = boot_image_space->Begin();
-    // Real image end including ArtMethods and ArtField sections.
-    const uint8_t* image_end = image_begin + boot_image_space->GetImageHeader().GetImageSize();
-    if (image_begin <= obj && obj < image_end) {
-      return true;
-    }
-  }
-  return false;
-}
-
-bool ImageWriter::IsInBootOatFile(const void* ptr) const {
-  gc::Heap* const heap = Runtime::Current()->GetHeap();
-  if (compiler_options_.IsBootImage()) {
-    DCHECK(heap->GetBootImageSpaces().empty());
-    return false;
-  }
-  for (gc::space::ImageSpace* boot_image_space : heap->GetBootImageSpaces()) {
-    const ImageHeader& image_header = boot_image_space->GetImageHeader();
-    if (image_header.GetOatFileBegin() <= ptr && ptr < image_header.GetOatFileEnd()) {
-      return true;
-    }
-  }
-  return false;
-}
-
 static void ClearDexFileCookies() REQUIRES_SHARED(Locks::mutator_lock_) {
   auto visitor = [](Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(obj != nullptr);
@@ -221,7 +237,7 @@
   Runtime::Current()->GetHeap()->VisitObjects(visitor);
 }
 
-bool ImageWriter::PrepareImageAddressSpace(TimingLogger* timings) {
+bool ImageWriter::PrepareImageAddressSpace(bool preload_dex_caches, TimingLogger* timings) {
   target_ptr_size_ = InstructionSetPointerSize(compiler_options_.GetInstructionSet());
 
   Thread* const self = Thread::Current();
@@ -254,36 +270,27 @@
   }
 
   {
+    // All remaining weak interns are referenced. Promote them to strong interns. Whether a
+    // string was strongly or weakly interned, we shall make it strongly interned in the image.
+    TimingLogger::ScopedTiming t("PromoteInterns", timings);
+    ScopedObjectAccess soa(self);
+    Runtime::Current()->GetInternTable()->PromoteWeakToStrong();
+  }
+
+  if (preload_dex_caches) {
+    TimingLogger::ScopedTiming t("PreloadDexCaches", timings);
     // Preload deterministic contents to the dex cache arrays we're going to write.
     ScopedObjectAccess soa(self);
     ObjPtr<mirror::ClassLoader> class_loader = GetAppClassLoader();
     std::vector<ObjPtr<mirror::DexCache>> dex_caches = FindDexCaches(self);
     for (ObjPtr<mirror::DexCache> dex_cache : dex_caches) {
-      if (!IsImageObject(dex_cache)) {
+      if (!IsImageDexCache(dex_cache)) {
         continue;  // Boot image DexCache is not written to the app image.
       }
       PreloadDexCache(dex_cache, class_loader);
     }
   }
 
-  // Used to store information that will later be used to calculate image
-  // offsets to string references in the AppImage.
-  std::vector<HeapReferencePointerInfo> string_ref_info;
-  if (ClassLinker::kAppImageMayContainStrings && compiler_options_.IsAppImage()) {
-    // Count the number of string fields so we can allocate the appropriate
-    // amount of space in the image section.
-    TimingLogger::ScopedTiming t("AppImage:CollectStringReferenceInfo", timings);
-    ScopedObjectAccess soa(self);
-
-    if (kIsDebugBuild) {
-      VerifyNativeGCRootInvariants();
-      CHECK_EQ(image_infos_.size(), 1u);
-    }
-
-    string_ref_info = CollectStringReferenceInfo();
-    image_infos_.back().num_string_references_ = string_ref_info.size();
-  }
-
   {
     TimingLogger::ScopedTiming t("CalculateNewObjectOffsets", timings);
     ScopedObjectAccess soa(self);
@@ -311,319 +318,12 @@
     VLOG(compiler) << "Dex2Oat:AppImage:classCount = " << app_image_class_count;
   }
 
-  if (ClassLinker::kAppImageMayContainStrings && compiler_options_.IsAppImage()) {
-    // Use the string reference information obtained earlier to calculate image
-    // offsets.  These will later be written to the image by Write/CopyMetadata.
-    TimingLogger::ScopedTiming t("AppImage:CalculateImageOffsets", timings);
-    ScopedObjectAccess soa(self);
-
-    size_t managed_string_refs = 0;
-    size_t native_string_refs = 0;
-
-    /*
-     * Iterate over the string reference info and calculate image offsets.
-     * The first element of the pair is either the object the reference belongs
-     * to or the beginning of the native reference array it is located in.  In
-     * the first case the second element is the offset of the field relative to
-     * the object's base address.  In the second case, it is the index of the
-     * StringDexCacheType object in the array.
-     */
-    for (const HeapReferencePointerInfo& ref_info : string_ref_info) {
-      uint32_t base_offset;
-
-      if (HasDexCacheStringNativeRefTag(ref_info.first)) {
-        ++native_string_refs;
-        auto* obj_ptr = reinterpret_cast<mirror::Object*>(ClearDexCacheNativeRefTags(
-            ref_info.first));
-        base_offset = SetDexCacheStringNativeRefTag(GetImageOffset(obj_ptr));
-      } else if (HasDexCachePreResolvedStringNativeRefTag(ref_info.first)) {
-        ++native_string_refs;
-        auto* obj_ptr = reinterpret_cast<mirror::Object*>(ClearDexCacheNativeRefTags(
-            ref_info.first));
-        base_offset = SetDexCachePreResolvedStringNativeRefTag(GetImageOffset(obj_ptr));
-      } else {
-        ++managed_string_refs;
-        base_offset = GetImageOffset(reinterpret_cast<mirror::Object*>(ref_info.first));
-      }
-
-      string_reference_offsets_.emplace_back(base_offset, ref_info.second);
-    }
-
-    CHECK_EQ(image_infos_.back().num_string_references_,
-             string_reference_offsets_.size());
-
-    VLOG(compiler) << "Dex2Oat:AppImage:stringReferences = " << string_reference_offsets_.size();
-    VLOG(compiler) << "Dex2Oat:AppImage:managedStringReferences = " << managed_string_refs;
-    VLOG(compiler) << "Dex2Oat:AppImage:nativeStringReferences = " << native_string_refs;
-  }
-
   // This needs to happen after CalculateNewObjectOffsets since it relies on intern_table_bytes_ and
   // bin size sums being calculated.
   TimingLogger::ScopedTiming t("AllocMemory", timings);
   return AllocMemory();
 }
 
-class ImageWriter::CollectStringReferenceVisitor {
- public:
-  explicit CollectStringReferenceVisitor(const ImageWriter& image_writer)
-      : image_writer_(image_writer),
-        curr_obj_(nullptr),
-        string_ref_info_(0),
-        dex_cache_string_ref_counter_(0) {}
-
-  // Used to prevent repeated null checks in the code that calls the visitor.
-  ALWAYS_INLINE
-  void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (!root->IsNull()) {
-      VisitRoot(root);
-    }
-  }
-
-  /*
-   * Counts the number of native references to strings reachable through
-   * DexCache objects for verification later.
-   */
-  ALWAYS_INLINE
-  void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
-      REQUIRES_SHARED(Locks::mutator_lock_)  {
-    ObjPtr<mirror::Object> referred_obj = root->AsMirrorPtr();
-
-    if (curr_obj_->IsDexCache() &&
-        image_writer_.IsValidAppImageStringReference(referred_obj)) {
-      ++dex_cache_string_ref_counter_;
-    }
-  }
-
-  // Collects info for managed fields that reference managed Strings.
-  ALWAYS_INLINE
-  void operator() (ObjPtr<mirror::Object> obj,
-                   MemberOffset member_offset,
-                   bool is_static ATTRIBUTE_UNUSED) const
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    ObjPtr<mirror::Object> referred_obj =
-        obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
-            member_offset);
-
-    if (image_writer_.IsValidAppImageStringReference(referred_obj)) {
-      string_ref_info_.emplace_back(reinterpret_cast<uintptr_t>(obj.Ptr()),
-                                    member_offset.Uint32Value());
-    }
-  }
-
-  ALWAYS_INLINE
-  void operator() (ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
-                   ObjPtr<mirror::Reference> ref) const
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    operator()(ref, mirror::Reference::ReferentOffset(), /* is_static */ false);
-  }
-
-  void AddStringRefInfo(uint32_t first, uint32_t second) {
-    string_ref_info_.emplace_back(first, second);
-  }
-
-  std::vector<HeapReferencePointerInfo>&& MoveRefInfo() {
-    return std::move(string_ref_info_);
-  }
-
-  // Used by the wrapper function to obtain a native reference count.
-  size_t GetDexCacheStringRefCount() const {
-    return dex_cache_string_ref_counter_;
-  }
-
-  void SetObject(ObjPtr<mirror::Object> obj) {
-    curr_obj_ = obj;
-    dex_cache_string_ref_counter_ = 0;
-  }
-
- private:
-  const ImageWriter& image_writer_;
-  ObjPtr<mirror::Object> curr_obj_;
-  mutable std::vector<HeapReferencePointerInfo> string_ref_info_;
-  mutable size_t dex_cache_string_ref_counter_;
-};
-
-std::vector<ImageWriter::HeapReferencePointerInfo> ImageWriter::CollectStringReferenceInfo() const
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  gc::Heap* const heap = Runtime::Current()->GetHeap();
-  CollectStringReferenceVisitor visitor(*this);
-
-  /*
-   * References to managed strings can occur either in the managed heap or in
-   * native memory regions.  Information about managed references is collected
-   * by the CollectStringReferenceVisitor and directly added to the internal
-   * info vector.
-   *
-   * Native references to managed strings can only occur through DexCache
-   * objects.  This is verified by VerifyNativeGCRootInvariants().  Due to the
-   * fact that these native references are encapsulated in std::atomic objects
-   * the VisitReferences() function can't pass the visiting object the address
-   * of the reference.  Instead, the VisitReferences() function loads the
-   * reference into a temporary variable and passes that address to the
-   * visitor.  As a consequence of this we can't uniquely identify the location
-   * of the string reference in the visitor.
-   *
-   * Due to these limitations, the visitor will only count the number of
-   * managed strings reachable through the native references of a DexCache
-   * object.  If there are any such strings, this function will then iterate
-   * over the native references, test the string for membership in the
-   * AppImage, and add the tagged DexCache pointer and string array offset to
-   * the info vector if necessary.
-   */
-  heap->VisitObjects([this, &visitor](ObjPtr<mirror::Object> object)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (IsImageObject(object)) {
-      visitor.SetObject(object);
-
-      if (object->IsDexCache()) {
-        object->VisitReferences</* kVisitNativeRoots= */ true,
-                                kVerifyNone,
-                                kWithoutReadBarrier>(visitor, visitor);
-
-        if (visitor.GetDexCacheStringRefCount() > 0) {
-          size_t string_info_collected = 0;
-
-          ObjPtr<mirror::DexCache> dex_cache = object->AsDexCache();
-          // Both of the dex cache string arrays are visited, so add up the total in the check.
-          DCHECK_LE(visitor.GetDexCacheStringRefCount(),
-                    dex_cache->NumPreResolvedStrings() + dex_cache->NumStrings());
-
-          for (uint32_t index = 0; index < dex_cache->NumStrings(); ++index) {
-            // GetResolvedString() can't be used here due to the circular
-            // nature of the cache and the collision detection this requires.
-            ObjPtr<mirror::String> referred_string =
-                dex_cache->GetStrings()[index].load().object.Read();
-
-            if (IsValidAppImageStringReference(referred_string)) {
-              ++string_info_collected;
-              visitor.AddStringRefInfo(
-                  SetDexCacheStringNativeRefTag(reinterpret_cast<uintptr_t>(object.Ptr())), index);
-            }
-          }
-
-          // Visit all of the preinitialized strings.
-          GcRoot<mirror::String>* preresolved_strings = dex_cache->GetPreResolvedStrings();
-          for (size_t index = 0; index < dex_cache->NumPreResolvedStrings(); ++index) {
-            ObjPtr<mirror::String> referred_string = preresolved_strings[index].Read();
-            if (IsValidAppImageStringReference(referred_string)) {
-              ++string_info_collected;
-              visitor.AddStringRefInfo(SetDexCachePreResolvedStringNativeRefTag(
-                reinterpret_cast<uintptr_t>(object.Ptr())),
-                index);
-            }
-          }
-
-          DCHECK_EQ(string_info_collected, visitor.GetDexCacheStringRefCount());
-        }
-      } else {
-        object->VisitReferences</* kVisitNativeRoots= */ false,
-                                kVerifyNone,
-                                kWithoutReadBarrier>(visitor, visitor);
-      }
-    }
-  });
-
-  return visitor.MoveRefInfo();
-}
-
-class ImageWriter::NativeGCRootInvariantVisitor {
- public:
-  explicit NativeGCRootInvariantVisitor(const ImageWriter& image_writer) :
-    curr_obj_(nullptr), class_violation_(false), class_loader_violation_(false),
-    image_writer_(image_writer) {}
-
-  ALWAYS_INLINE
-  void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (!root->IsNull()) {
-      VisitRoot(root);
-    }
-  }
-
-  ALWAYS_INLINE
-  void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
-      REQUIRES_SHARED(Locks::mutator_lock_)  {
-    ObjPtr<mirror::Object> referred_obj = root->AsMirrorPtr();
-
-    if (curr_obj_->IsClass()) {
-      class_violation_ = class_violation_ ||
-                         image_writer_.IsValidAppImageStringReference(referred_obj);
-
-    } else if (curr_obj_->IsClassLoader()) {
-      class_loader_violation_ = class_loader_violation_ ||
-                                image_writer_.IsValidAppImageStringReference(referred_obj);
-
-    } else if (!curr_obj_->IsDexCache()) {
-      LOG(FATAL) << "Dex2Oat:AppImage | " <<
-                    "Native reference to String found in unexpected object type.";
-    }
-  }
-
-  ALWAYS_INLINE
-  void operator() (ObjPtr<mirror::Object> obj ATTRIBUTE_UNUSED,
-                   MemberOffset member_offset ATTRIBUTE_UNUSED,
-                   bool is_static ATTRIBUTE_UNUSED) const
-      REQUIRES_SHARED(Locks::mutator_lock_) {}
-
-  ALWAYS_INLINE
-  void operator() (ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
-                   ObjPtr<mirror::Reference> ref ATTRIBUTE_UNUSED) const
-      REQUIRES_SHARED(Locks::mutator_lock_) {}
-
-  // Returns true iff the only reachable native string references are through DexCache objects.
-  bool InvariantsHold() const {
-    return !(class_violation_ || class_loader_violation_);
-  }
-
-  ObjPtr<mirror::Object> curr_obj_;
-  mutable bool class_violation_;
-  mutable bool class_loader_violation_;
-
- private:
-  const ImageWriter& image_writer_;
-};
-
-void ImageWriter::VerifyNativeGCRootInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) {
-  gc::Heap* const heap = Runtime::Current()->GetHeap();
-
-  NativeGCRootInvariantVisitor visitor(*this);
-
-  heap->VisitObjects([this, &visitor](ObjPtr<mirror::Object> object)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    visitor.curr_obj_ = object;
-
-    if (!IsInBootImage(object.Ptr())) {
-      object->VisitReferences</* kVisitNativeReferences= */ true,
-                              kVerifyNone,
-                              kWithoutReadBarrier>(visitor, visitor);
-    }
-  });
-
-  bool error = false;
-  std::ostringstream error_str;
-
-  /*
-   * Build the error string
-   */
-
-  if (UNLIKELY(visitor.class_violation_)) {
-    error_str << "Class";
-    error = true;
-  }
-
-  if (UNLIKELY(visitor.class_loader_violation_)) {
-    if (error) {
-      error_str << ", ";
-    }
-
-    error_str << "ClassLoader";
-  }
-
-  CHECK(visitor.InvariantsHold()) <<
-    "Native GC root invariant failure. String ref invariants don't hold for the following " <<
-    "object types: " << error_str.str();
-}
-
 void ImageWriter::CopyMetadata() {
   DCHECK(compiler_options_.IsAppImage());
   CHECK_EQ(image_infos_.size(), 1u);
@@ -635,15 +335,17 @@
       image_info.image_.Begin() +
       image_sections[ImageHeader::kSectionStringReferenceOffsets].Offset());
 
-  std::copy(string_reference_offsets_.begin(),
-            string_reference_offsets_.end(),
+  std::copy(image_info.string_reference_offsets_.begin(),
+            image_info.string_reference_offsets_.end(),
             sfo_section_base);
 }
 
-bool ImageWriter::IsValidAppImageStringReference(ObjPtr<mirror::Object> referred_obj) const {
+bool ImageWriter::IsInternedAppImageStringReference(ObjPtr<mirror::Object> referred_obj) const {
   return referred_obj != nullptr &&
          !IsInBootImage(referred_obj.Ptr()) &&
-         referred_obj->IsString();
+         referred_obj->IsString() &&
+         referred_obj == Runtime::Current()->GetInternTable()->LookupStrong(
+             Thread::Current(), referred_obj->AsString());
 }
 
 // Helper class that erases the image file if it isn't properly flushed and closed.
@@ -700,21 +402,21 @@
 
 bool ImageWriter::Write(int image_fd,
                         const std::vector<std::string>& image_filenames,
-                        const std::vector<std::string>& oat_filenames) {
+                        size_t component_count) {
   // If image_fd or oat_fd are not kInvalidFd then we may have empty strings in image_filenames or
   // oat_filenames.
   CHECK(!image_filenames.empty());
   if (image_fd != kInvalidFd) {
     CHECK_EQ(image_filenames.size(), 1u);
   }
-  CHECK(!oat_filenames.empty());
-  CHECK_EQ(image_filenames.size(), oat_filenames.size());
+  DCHECK(!oat_filenames_.empty());
+  CHECK_EQ(image_filenames.size(), oat_filenames_.size());
 
   Thread* const self = Thread::Current();
   {
     ScopedObjectAccess soa(self);
-    for (size_t i = 0; i < oat_filenames.size(); ++i) {
-      CreateHeader(i);
+    for (size_t i = 0; i < oat_filenames_.size(); ++i) {
+      CreateHeader(i, component_count);
       CopyAndFixupNativeData(i);
     }
   }
@@ -743,15 +445,12 @@
     ImageInfo& image_info = GetImageInfo(i);
     ImageFileGuard image_file;
     if (image_fd != kInvalidFd) {
-      if (image_filename.empty()) {
-        image_file.reset(new File(image_fd, unix_file::kCheckSafeUsage));
-        // Empty the file in case it already exists.
-        if (image_file != nullptr) {
-          TEMP_FAILURE_RETRY(image_file->SetLength(0));
-          TEMP_FAILURE_RETRY(image_file->Flush());
-        }
-      } else {
-        LOG(ERROR) << "image fd " << image_fd << " name " << image_filename;
+      // Ignore image_filename, it is supplied only for better diagnostic.
+      image_file.reset(new File(image_fd, unix_file::kCheckSafeUsage));
+      // Empty the file in case it already exists.
+      if (image_file != nullptr) {
+        TEMP_FAILURE_RETRY(image_file->SetLength(0));
+        TEMP_FAILURE_RETRY(image_file->Flush());
       }
     } else {
       image_file.reset(OS::CreateEmptyFile(image_filename.c_str()));
@@ -762,9 +461,10 @@
       return false;
     }
 
-    if (!compiler_options_.IsAppImage() && fchmod(image_file->Fd(), 0644) != 0) {
+    // Make file world readable if we have created it, i.e. when not passed as file descriptor.
+    if (image_fd == -1 && !compiler_options_.IsAppImage() && fchmod(image_file->Fd(), 0644) != 0) {
       PLOG(ERROR) << "Failed to make image file world readable: " << image_filename;
-      return EXIT_FAILURE;
+      return false;
     }
 
     // Image data size excludes the bitmap and the header.
@@ -853,7 +553,7 @@
     out_offset = RoundUp(out_offset, kPageSize);
     bitmap_section = ImageSection(out_offset, bitmap_section.Size());
 
-    if (!image_file->PwriteFully(image_info.image_bitmap_->Begin(),
+    if (!image_file->PwriteFully(image_info.image_bitmap_.Begin(),
                                  bitmap_section.Size(),
                                  bitmap_section.Offset())) {
       PLOG(ERROR) << "Failed to write image file bitmap " << image_filename;
@@ -868,7 +568,7 @@
 
     // Calculate the image checksum of the remaining data.
     image_checksum = adler32(image_checksum,
-                             reinterpret_cast<const uint8_t*>(image_info.image_bitmap_->Begin()),
+                             reinterpret_cast<const uint8_t*>(image_info.image_bitmap_.Begin()),
                              bitmap_section.Size());
     image_header->SetImageChecksum(image_checksum);
 
@@ -908,56 +608,16 @@
   return true;
 }
 
-void ImageWriter::SetImageOffset(mirror::Object* object, size_t offset) {
-  DCHECK(object != nullptr);
-  DCHECK_NE(offset, 0U);
-
-  // The object is already deflated from when we set the bin slot. Just overwrite the lock word.
-  object->SetLockWord(LockWord::FromForwardingAddress(offset), false);
-  DCHECK_EQ(object->GetLockWord(false).ReadBarrierState(), 0u);
-  DCHECK(IsImageOffsetAssigned(object));
-}
-
-void ImageWriter::UpdateImageOffset(mirror::Object* obj, uintptr_t offset) {
-  DCHECK(IsImageOffsetAssigned(obj)) << obj << " " << offset;
-  obj->SetLockWord(LockWord::FromForwardingAddress(offset), false);
-  DCHECK_EQ(obj->GetLockWord(false).ReadBarrierState(), 0u);
-}
-
-void ImageWriter::AssignImageOffset(mirror::Object* object, ImageWriter::BinSlot bin_slot) {
-  DCHECK(object != nullptr);
-  DCHECK_NE(image_objects_offset_begin_, 0u);
-
-  size_t oat_index = GetOatIndex(object);
-  ImageInfo& image_info = GetImageInfo(oat_index);
-  size_t bin_slot_offset = image_info.GetBinSlotOffset(bin_slot.GetBin());
-  size_t new_offset = bin_slot_offset + bin_slot.GetIndex();
-  DCHECK_ALIGNED(new_offset, kObjectAlignment);
-
-  SetImageOffset(object, new_offset);
-  DCHECK_LT(new_offset, image_info.image_end_);
-}
-
-bool ImageWriter::IsImageOffsetAssigned(mirror::Object* object) const {
-  // Will also return true if the bin slot was assigned since we are reusing the lock word.
-  DCHECK(object != nullptr);
-  return object->GetLockWord(false).GetState() == LockWord::kForwardingAddress;
-}
-
-size_t ImageWriter::GetImageOffset(mirror::Object* object) const {
-  DCHECK(object != nullptr);
-  DCHECK(IsImageOffsetAssigned(object));
-  LockWord lock_word = object->GetLockWord(false);
-  size_t offset = lock_word.ForwardingAddress();
-  size_t oat_index = GetOatIndex(object);
+size_t ImageWriter::GetImageOffset(mirror::Object* object, size_t oat_index) const {
+  BinSlot bin_slot = GetImageBinSlot(object, oat_index);
   const ImageInfo& image_info = GetImageInfo(oat_index);
+  size_t offset = image_info.GetBinSlotOffset(bin_slot.GetBin()) + bin_slot.GetOffset();
   DCHECK_LT(offset, image_info.image_end_);
   return offset;
 }
 
 void ImageWriter::SetImageBinSlot(mirror::Object* object, BinSlot bin_slot) {
   DCHECK(object != nullptr);
-  DCHECK(!IsImageOffsetAssigned(object));
   DCHECK(!IsImageBinSlotAssigned(object));
 
   // Before we stomp over the lock word, save the hash code for later.
@@ -988,7 +648,8 @@
       LOG(FATAL) << "Unreachable.";
       UNREACHABLE();
   }
-  object->SetLockWord(LockWord::FromForwardingAddress(bin_slot.Uint32Value()), false);
+  object->SetLockWord(LockWord::FromForwardingAddress(bin_slot.Uint32Value()),
+                      /*as_volatile=*/ false);
   DCHECK_EQ(object->GetLockWord(false).ReadBarrierState(), 0u);
   DCHECK(IsImageBinSlotAssigned(object));
 }
@@ -1013,7 +674,7 @@
   for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
     ObjPtr<mirror::DexCache> dex_cache =
         ObjPtr<mirror::DexCache>::DownCast(self->DecodeJObject(data.weak_root));
-    if (dex_cache == nullptr || !IsImageObject(dex_cache)) {
+    if (dex_cache == nullptr || !IsImageDexCache(dex_cache)) {
       continue;
     }
     const DexFile* dex_file = dex_cache->GetDexFile();
@@ -1021,8 +682,12 @@
         << "Dex cache should have been pruned " << dex_file->GetLocation()
         << "; possibly in class path";
     DexCacheArraysLayout layout(target_ptr_size_, dex_file);
-    DCHECK(layout.Valid());
-    size_t oat_index = GetOatIndexForDexCache(dex_cache);
+    // Empty dex files will not have a "valid" DexCacheArraysLayout.
+    if (dex_file->NumTypeIds() + dex_file->NumStringIds() + dex_file->NumMethodIds() +
+        dex_file->NumFieldIds() + dex_file->NumProtoIds() + dex_file->NumCallSiteIds() != 0) {
+      DCHECK(layout.Valid());
+    }
+    size_t oat_index = GetOatIndexForDexFile(dex_file);
     ImageInfo& image_info = GetImageInfo(oat_index);
     uint32_t start = image_info.dex_cache_array_starts_.Get(dex_file);
     DCHECK_EQ(dex_file->NumTypeIds() != 0u, dex_cache->GetResolvedTypes() != nullptr);
@@ -1092,7 +757,7 @@
   pointer_arrays_.emplace(arr.Ptr(), Bin::kArtMethodClean);
 }
 
-void ImageWriter::AssignImageBinSlot(mirror::Object* object, size_t oat_index) {
+ImageWriter::Bin ImageWriter::AssignImageBinSlot(mirror::Object* object, size_t oat_index) {
   DCHECK(object != nullptr);
   size_t object_size = object->SizeOf();
 
@@ -1161,7 +826,7 @@
       if (dirty_image_objects_ != nullptr &&
           dirty_image_objects_->find(klass->PrettyDescriptor()) != dirty_image_objects_->end()) {
         bin = Bin::kKnownDirty;
-      } else if (klass->GetStatus() == ClassStatus::kInitialized) {
+      } else if (klass->GetStatus() == ClassStatus::kVisiblyInitialized) {
         bin = Bin::kClassInitialized;
 
         // If the class's static fields are all final, put it into a separate bin
@@ -1217,6 +882,8 @@
 
   // Grow the image closer to the end by the object we just assigned.
   image_info.image_end_ += offset_delta;
+
+  return bin;
 }
 
 bool ImageWriter::WillMethodBeDirty(ArtMethod* m) const {
@@ -1225,7 +892,8 @@
   }
   ObjPtr<mirror::Class> declaring_class = m->GetDeclaringClass();
   // Initialized is highly unlikely to dirty since there's no entry points to mutate.
-  return declaring_class == nullptr || declaring_class->GetStatus() != ClassStatus::kInitialized;
+  return declaring_class == nullptr ||
+         declaring_class->GetStatus() != ClassStatus::kVisiblyInitialized;
 }
 
 bool ImageWriter::IsImageBinSlotAssigned(mirror::Object* object) const {
@@ -1241,13 +909,13 @@
     BinSlot bin_slot(offset);
     size_t oat_index = GetOatIndex(object);
     const ImageInfo& image_info = GetImageInfo(oat_index);
-    DCHECK_LT(bin_slot.GetIndex(), image_info.GetBinSlotSize(bin_slot.GetBin()))
+    DCHECK_LT(bin_slot.GetOffset(), image_info.GetBinSlotSize(bin_slot.GetBin()))
         << "bin slot offset should not exceed the size of that bin";
   }
   return true;
 }
 
-ImageWriter::BinSlot ImageWriter::GetImageBinSlot(mirror::Object* object) const {
+ImageWriter::BinSlot ImageWriter::GetImageBinSlot(mirror::Object* object, size_t oat_index) const {
   DCHECK(object != nullptr);
   DCHECK(IsImageBinSlotAssigned(object));
 
@@ -1256,13 +924,23 @@
   DCHECK_LE(offset, std::numeric_limits<uint32_t>::max());
 
   BinSlot bin_slot(static_cast<uint32_t>(offset));
-  size_t oat_index = GetOatIndex(object);
-  const ImageInfo& image_info = GetImageInfo(oat_index);
-  DCHECK_LT(bin_slot.GetIndex(), image_info.GetBinSlotSize(bin_slot.GetBin()));
+  DCHECK_LT(bin_slot.GetOffset(), GetImageInfo(oat_index).GetBinSlotSize(bin_slot.GetBin()));
 
   return bin_slot;
 }
 
+void ImageWriter::UpdateImageBinSlotOffset(mirror::Object* object,
+                                           size_t oat_index,
+                                           size_t new_offset) {
+  BinSlot old_bin_slot = GetImageBinSlot(object, oat_index);
+  DCHECK_LT(new_offset, GetImageInfo(oat_index).GetBinSlotSize(old_bin_slot.GetBin()));
+  BinSlot new_bin_slot(old_bin_slot.GetBin(), new_offset);
+  object->SetLockWord(LockWord::FromForwardingAddress(new_bin_slot.Uint32Value()),
+                      /*as_volatile=*/ false);
+  DCHECK_EQ(object->GetLockWord(false).ReadBarrierState(), 0u);
+  DCHECK(IsImageBinSlotAssigned(object));
+}
+
 bool ImageWriter::AllocMemory() {
   for (ImageInfo& image_info : image_infos_) {
     const size_t length = RoundUp(image_info.CreateImageSections().first, kPageSize);
@@ -1280,9 +958,9 @@
 
     // Create the image bitmap, only needs to cover mirror object section which is up to image_end_.
     CHECK_LE(image_info.image_end_, length);
-    image_info.image_bitmap_.reset(gc::accounting::ContinuousSpaceBitmap::Create(
-        "image bitmap", image_info.image_.Begin(), RoundUp(image_info.image_end_, kPageSize)));
-    if (image_info.image_bitmap_.get() == nullptr) {
+    image_info.image_bitmap_ = gc::accounting::ContinuousSpaceBitmap::Create(
+        "image bitmap", image_info.image_.Begin(), RoundUp(image_info.image_end_, kPageSize));
+    if (!image_info.image_bitmap_.IsValid()) {
       LOG(ERROR) << "Failed to allocate memory for image bitmap";
       return false;
     }
@@ -1338,12 +1016,12 @@
 
     if (ref->IsClass()) {
       *result_ = *result_ ||
-          image_writer_->PruneAppImageClassInternal(ref->AsClass(), early_exit_, visited_);
+          image_writer_->PruneImageClassInternal(ref->AsClass(), early_exit_, visited_);
     } else {
       // Record the object visited in case of circular reference.
       visited_->emplace(ref);
       *result_ = *result_ ||
-          image_writer_->PruneAppImageClassInternal(klass, early_exit_, visited_);
+          image_writer_->PruneImageClassInternal(klass, early_exit_, visited_);
       ref->VisitReferences(*this, *this);
       // Clean up before exit for next call of this function.
       visited_->erase(ref);
@@ -1356,10 +1034,6 @@
     operator()(ref, mirror::Reference::ReferentOffset(), /* is_static */ false);
   }
 
-  ALWAYS_INLINE bool GetResult() const {
-    return result_;
-  }
-
  private:
   ImageWriter* image_writer_;
   bool* early_exit_;
@@ -1368,19 +1042,19 @@
 };
 
 
-bool ImageWriter::PruneAppImageClass(ObjPtr<mirror::Class> klass) {
+bool ImageWriter::PruneImageClass(ObjPtr<mirror::Class> klass) {
   bool early_exit = false;
   std::unordered_set<mirror::Object*> visited;
-  return PruneAppImageClassInternal(klass, &early_exit, &visited);
+  return PruneImageClassInternal(klass, &early_exit, &visited);
 }
 
-bool ImageWriter::PruneAppImageClassInternal(
+bool ImageWriter::PruneImageClassInternal(
     ObjPtr<mirror::Class> klass,
     bool* early_exit,
     std::unordered_set<mirror::Object*>* visited) {
   DCHECK(early_exit != nullptr);
   DCHECK(visited != nullptr);
-  DCHECK(compiler_options_.IsAppImage());
+  DCHECK(compiler_options_.IsAppImage() || compiler_options_.IsBootImageExtension());
   if (klass == nullptr || IsInBootImage(klass.Ptr())) {
     return false;
   }
@@ -1413,15 +1087,15 @@
     // Check interfaces since these wont be visited through VisitReferences.)
     ObjPtr<mirror::IfTable> if_table = klass->GetIfTable();
     for (size_t i = 0, num_interfaces = klass->GetIfTableCount(); i < num_interfaces; ++i) {
-      result = result || PruneAppImageClassInternal(if_table->GetInterface(i),
-                                                    &my_early_exit,
-                                                    visited);
+      result = result || PruneImageClassInternal(if_table->GetInterface(i),
+                                                 &my_early_exit,
+                                                 visited);
     }
   }
   if (klass->IsObjectArrayClass()) {
-    result = result || PruneAppImageClassInternal(klass->GetComponentType(),
-                                                  &my_early_exit,
-                                                  visited);
+    result = result || PruneImageClassInternal(klass->GetComponentType(),
+                                               &my_early_exit,
+                                               visited);
   }
   // Check static fields and their classes.
   if (klass->IsResolved() && klass->NumReferenceStaticFields() != 0) {
@@ -1434,14 +1108,10 @@
       mirror::Object* ref = klass->GetFieldObject<mirror::Object>(field_offset);
       if (ref != nullptr) {
         if (ref->IsClass()) {
-          result = result || PruneAppImageClassInternal(ref->AsClass(),
-                                                        &my_early_exit,
-                                                        visited);
+          result = result || PruneImageClassInternal(ref->AsClass(), &my_early_exit, visited);
         } else {
           mirror::Class* type = ref->GetClass();
-          result = result || PruneAppImageClassInternal(type,
-                                                        &my_early_exit,
-                                                        visited);
+          result = result || PruneImageClassInternal(type, &my_early_exit, visited);
           if (!result) {
             // For non-class case, also go through all the types mentioned by it's fields'
             // references recursively to decide whether to keep this class.
@@ -1456,9 +1126,7 @@
                                   sizeof(mirror::HeapReference<mirror::Object>));
     }
   }
-  result = result || PruneAppImageClassInternal(klass->GetSuperClass(),
-                                                &my_early_exit,
-                                                visited);
+  result = result || PruneImageClassInternal(klass->GetSuperClass(), &my_early_exit, visited);
   // Remove the class if the dex file is not in the set of dex files. This happens for classes that
   // are from uses-library if there is no profile. b/30688277
   ObjPtr<mirror::DexCache> dex_cache = klass->GetDexCache();
@@ -1484,9 +1152,9 @@
   if (klass == nullptr) {
     return false;
   }
-  if (!compiler_options_.IsBootImage() &&
-      Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) {
+  if (IsInBootImage(klass.Ptr())) {
     // Already in boot image, return true.
+    DCHECK(!compiler_options_.IsBootImage());
     return true;
   }
   std::string temp;
@@ -1494,10 +1162,12 @@
     return false;
   }
   if (compiler_options_.IsAppImage()) {
-    // For app images, we need to prune boot loader classes that are not in the boot image since
-    // these may have already been loaded when the app image is loaded.
-    // Keep classes in the boot image space since we don't want to re-resolve these.
-    return !PruneAppImageClass(klass);
+    // For app images, we need to prune classes that
+    // are defined by the boot class path we're compiling against but not in
+    // the boot image spaces since these may have already been loaded at
+    // run time when this image is loaded. Keep classes in the boot image
+    // spaces we're compiling against since we don't want to re-resolve these.
+    return !PruneImageClass(klass);
   }
   return true;
 }
@@ -1569,75 +1239,40 @@
   Runtime::Current()->GetClassLinker()->VisitClassLoaders(visitor);
 }
 
-void ImageWriter::PruneDexCache(ObjPtr<mirror::DexCache> dex_cache,
-                                ObjPtr<mirror::ClassLoader> class_loader) {
-  Runtime* runtime = Runtime::Current();
-  ClassLinker* class_linker = runtime->GetClassLinker();
-  const DexFile& dex_file = *dex_cache->GetDexFile();
-  // Prune methods.
-  dex::TypeIndex last_class_idx;  // Initialized to invalid index.
-  ObjPtr<mirror::Class> last_class = nullptr;
+void ImageWriter::ClearDexCache(ObjPtr<mirror::DexCache> dex_cache) {
+  // Clear methods.
   mirror::MethodDexCacheType* resolved_methods = dex_cache->GetResolvedMethods();
   for (size_t slot_idx = 0, num = dex_cache->NumResolvedMethods(); slot_idx != num; ++slot_idx) {
     auto pair =
         mirror::DexCache::GetNativePairPtrSize(resolved_methods, slot_idx, target_ptr_size_);
-    uint32_t stored_index = pair.index;
-    ArtMethod* method = pair.object;
-    if (method == nullptr) {
-      continue;  // Empty entry.
-    }
-    // Check if the referenced class is in the image. Note that we want to check the referenced
-    // class rather than the declaring class to preserve the semantics, i.e. using a MethodId
-    // results in resolving the referenced class and that can for example throw OOME.
-    const dex::MethodId& method_id = dex_file.GetMethodId(stored_index);
-    if (method_id.class_idx_ != last_class_idx) {
-      last_class_idx = method_id.class_idx_;
-      last_class = class_linker->LookupResolvedType(last_class_idx, dex_cache, class_loader);
-      if (last_class != nullptr && !KeepClass(last_class)) {
-        last_class = nullptr;
-      }
-    }
-    if (last_class == nullptr) {
-      dex_cache->ClearResolvedMethod(stored_index, target_ptr_size_);
+    if (pair.object != nullptr) {
+      dex_cache->ClearResolvedMethod(pair.index, target_ptr_size_);
     }
   }
-  // Prune fields.
+  // Clear fields.
   mirror::FieldDexCacheType* resolved_fields = dex_cache->GetResolvedFields();
-  last_class_idx = dex::TypeIndex();  // Initialized to invalid index.
-  last_class = nullptr;
   for (size_t slot_idx = 0, num = dex_cache->NumResolvedFields(); slot_idx != num; ++slot_idx) {
     auto pair = mirror::DexCache::GetNativePairPtrSize(resolved_fields, slot_idx, target_ptr_size_);
-    uint32_t stored_index = pair.index;
-    ArtField* field = pair.object;
-    if (field == nullptr) {
-      continue;  // Empty entry.
-    }
-    // Check if the referenced class is in the image. Note that we want to check the referenced
-    // class rather than the declaring class to preserve the semantics, i.e. using a FieldId
-    // results in resolving the referenced class and that can for example throw OOME.
-    const dex::FieldId& field_id = dex_file.GetFieldId(stored_index);
-    if (field_id.class_idx_ != last_class_idx) {
-      last_class_idx = field_id.class_idx_;
-      last_class = class_linker->LookupResolvedType(last_class_idx, dex_cache, class_loader);
-      if (last_class != nullptr && !KeepClass(last_class)) {
-        last_class = nullptr;
-      }
-    }
-    if (last_class == nullptr) {
-      dex_cache->ClearResolvedField(stored_index, target_ptr_size_);
+    if (pair.object != nullptr) {
+      dex_cache->ClearResolvedField(pair.index, target_ptr_size_);
     }
   }
-  // Prune types.
+  // Clear types.
   for (size_t slot_idx = 0, num = dex_cache->NumResolvedTypes(); slot_idx != num; ++slot_idx) {
     mirror::TypeDexCachePair pair =
         dex_cache->GetResolvedTypes()[slot_idx].load(std::memory_order_relaxed);
-    uint32_t stored_index = pair.index;
-    ObjPtr<mirror::Class> klass = pair.object.Read();
-    if (klass != nullptr && !KeepClass(klass)) {
-      dex_cache->ClearResolvedType(dex::TypeIndex(stored_index));
+    if (!pair.object.IsNull()) {
+      dex_cache->ClearResolvedType(dex::TypeIndex(pair.index));
     }
   }
-  // Strings do not need pruning.
+  // Clear strings.
+  for (size_t slot_idx = 0, num = dex_cache->NumStrings(); slot_idx != num; ++slot_idx) {
+    mirror::StringDexCachePair pair =
+        dex_cache->GetStrings()[slot_idx].load(std::memory_order_relaxed);
+    if (!pair.object.IsNull()) {
+      dex_cache->ClearString(dex::StringIndex(pair.index));
+    }
+  }
 }
 
 void ImageWriter::PreloadDexCache(ObjPtr<mirror::DexCache> dex_cache,
@@ -1777,13 +1412,10 @@
     VLOG(compiler) << "Pruned " << class_loader_visitor.GetRemovedClassCount() << " classes";
   }
 
-  // Clear references to removed classes from the DexCaches.
+  // Completely clear DexCaches. They shall be re-filled in PreloadDexCaches if requested.
   std::vector<ObjPtr<mirror::DexCache>> dex_caches = FindDexCaches(self);
   for (ObjPtr<mirror::DexCache> dex_cache : dex_caches) {
-    // Pass the class loader associated with the DexCache. This can either be
-    // the app's `class_loader` or `nullptr` if boot class loader.
-    bool is_app_image_dex_cache = compiler_options_.IsAppImage() && IsImageObject(dex_cache);
-    PruneDexCache(dex_cache, is_app_image_dex_cache ? GetAppClassLoader() : nullptr);
+    ClearDexCache(dex_cache);
   }
 
   // Drop the array class cache in the ClassLinker, as these are roots holding those classes live.
@@ -1828,31 +1460,6 @@
   }
 }
 
-mirror::String* ImageWriter::FindInternedString(mirror::String* string) {
-  Thread* const self = Thread::Current();
-  for (const ImageInfo& image_info : image_infos_) {
-    const ObjPtr<mirror::String> found = image_info.intern_table_->LookupStrong(self, string);
-    DCHECK(image_info.intern_table_->LookupWeak(self, string) == nullptr)
-        << string->ToModifiedUtf8();
-    if (found != nullptr) {
-      return found.Ptr();
-    }
-  }
-  if (!compiler_options_.IsBootImage()) {
-    Runtime* const runtime = Runtime::Current();
-    ObjPtr<mirror::String> found = runtime->GetInternTable()->LookupStrong(self, string);
-    // If we found it in the runtime intern table it could either be in the boot image or interned
-    // during app image compilation. If it was in the boot image return that, otherwise return null
-    // since it belongs to another image space.
-    if (found != nullptr && runtime->GetHeap()->ObjectIsInBootImageSpace(found.Ptr())) {
-      return found.Ptr();
-    }
-    DCHECK(runtime->GetInternTable()->LookupWeak(self, string) == nullptr)
-        << string->ToModifiedUtf8();
-  }
-  return nullptr;
-}
-
 ObjPtr<mirror::ObjectArray<mirror::Object>> ImageWriter::CollectDexCaches(Thread* self,
                                                                           size_t oat_index) const {
   std::unordered_set<const DexFile*> image_dex_files;
@@ -1881,7 +1488,7 @@
         continue;
       }
       const DexFile* dex_file = dex_cache->GetDexFile();
-      if (IsImageObject(dex_cache)) {
+      if (IsImageDexCache(dex_cache)) {
         dex_cache_count += image_dex_files.find(dex_file) != image_dex_files.end() ? 1u : 0u;
       }
     }
@@ -1900,7 +1507,7 @@
         continue;
       }
       const DexFile* dex_file = dex_cache->GetDexFile();
-      if (IsImageObject(dex_cache)) {
+      if (IsImageDexCache(dex_cache)) {
         non_image_dex_caches += image_dex_files.find(dex_file) != image_dex_files.end() ? 1u : 0u;
       }
     }
@@ -1914,7 +1521,7 @@
         continue;
       }
       const DexFile* dex_file = dex_cache->GetDexFile();
-      if (IsImageObject(dex_cache) &&
+      if (IsImageDexCache(dex_cache) &&
           image_dex_files.find(dex_file) != image_dex_files.end()) {
         dex_caches->Set<false>(i, dex_cache.Ptr());
         ++i;
@@ -1940,192 +1547,161 @@
       self, GetClassRoot<ObjectArray<Object>>(class_linker), image_roots_size)));
   image_roots->Set<false>(ImageHeader::kDexCaches, dex_caches.Get());
   image_roots->Set<false>(ImageHeader::kClassRoots, class_linker->GetClassRoots());
-  image_roots->Set<false>(ImageHeader::kOomeWhenThrowingException,
-                          runtime->GetPreAllocatedOutOfMemoryErrorWhenThrowingException());
-  image_roots->Set<false>(ImageHeader::kOomeWhenThrowingOome,
-                          runtime->GetPreAllocatedOutOfMemoryErrorWhenThrowingOOME());
-  image_roots->Set<false>(ImageHeader::kOomeWhenHandlingStackOverflow,
-                          runtime->GetPreAllocatedOutOfMemoryErrorWhenHandlingStackOverflow());
-  image_roots->Set<false>(ImageHeader::kNoClassDefFoundError,
-                          runtime->GetPreAllocatedNoClassDefFoundError());
   if (!compiler_options_.IsAppImage()) {
     DCHECK(boot_image_live_objects != nullptr);
     image_roots->Set<false>(ImageHeader::kBootImageLiveObjects, boot_image_live_objects.Get());
   } else {
     DCHECK(boot_image_live_objects == nullptr);
+    image_roots->Set<false>(ImageHeader::kAppImageClassLoader, GetAppClassLoader());
   }
   for (int32_t i = 0; i != image_roots_size; ++i) {
-    if (compiler_options_.IsAppImage() && i == ImageHeader::kAppImageClassLoader) {
-      // image_roots[ImageHeader::kAppImageClassLoader] will be set later for app image.
-      continue;
-    }
     CHECK(image_roots->Get(i) != nullptr);
   }
   return image_roots.Get();
 }
 
-mirror::Object* ImageWriter::TryAssignBinSlot(WorkStack& work_stack,
-                                              mirror::Object* obj,
-                                              size_t oat_index) {
-  if (obj == nullptr || !IsImageObject(obj)) {
-    // Object is null or already in the image, there is no work to do.
-    return obj;
-  }
-  if (!IsImageBinSlotAssigned(obj)) {
-    // We want to intern all strings but also assign offsets for the source string. Since the
-    // pruning phase has already happened, if we intern a string to one in the image we still
-    // end up copying an unreachable string.
-    if (obj->IsString()) {
-      // Need to check if the string is already interned in another image info so that we don't have
-      // the intern tables of two different images contain the same string.
-      mirror::String* interned = FindInternedString(obj->AsString().Ptr());
-      if (interned == nullptr) {
-        // Not in another image space, insert to our table.
-        interned =
-            GetImageInfo(oat_index).intern_table_->InternStrongImageString(obj->AsString()).Ptr();
-        DCHECK_EQ(interned, obj);
-      }
-    } else if (obj->IsDexCache()) {
-      oat_index = GetOatIndexForDexCache(obj->AsDexCache());
-    } else if (obj->IsClass()) {
-      // Visit and assign offsets for fields and field arrays.
-      ObjPtr<mirror::Class> as_klass = obj->AsClass();
-      ObjPtr<mirror::DexCache> dex_cache = as_klass->GetDexCache();
-      DCHECK(!as_klass->IsErroneous()) << as_klass->GetStatus();
-      if (compiler_options_.IsAppImage()) {
-        // Extra sanity, no boot loader classes should be left!
-        CHECK(!IsBootClassLoaderClass(as_klass)) << as_klass->PrettyClass();
-      }
-      LengthPrefixedArray<ArtField>* fields[] = {
-          as_klass->GetSFieldsPtr(), as_klass->GetIFieldsPtr(),
-      };
-      // Overwrite the oat index value since the class' dex cache is more accurate of where it
-      // belongs.
-      oat_index = GetOatIndexForDexCache(dex_cache);
-      ImageInfo& image_info = GetImageInfo(oat_index);
-      if (!compiler_options_.IsAppImage()) {
-        // Note: Avoid locking to prevent lock order violations from root visiting;
-        // image_info.class_table_ is only accessed from the image writer.
-        image_info.class_table_->InsertWithoutLocks(as_klass);
-      }
-      for (LengthPrefixedArray<ArtField>* cur_fields : fields) {
-        // Total array length including header.
-        if (cur_fields != nullptr) {
-          const size_t header_size = LengthPrefixedArray<ArtField>::ComputeSize(0);
-          // Forward the entire array at once.
-          auto it = native_object_relocations_.find(cur_fields);
-          CHECK(it == native_object_relocations_.end()) << "Field array " << cur_fields
-                                                  << " already forwarded";
-          size_t offset = image_info.GetBinSlotSize(Bin::kArtField);
-          DCHECK(!IsInBootImage(cur_fields));
-          native_object_relocations_.emplace(
-              cur_fields,
-              NativeObjectRelocation {
-                  oat_index, offset, NativeObjectRelocationType::kArtFieldArray
-              });
-          offset += header_size;
-          // Forward individual fields so that we can quickly find where they belong.
-          for (size_t i = 0, count = cur_fields->size(); i < count; ++i) {
-            // Need to forward arrays separate of fields.
-            ArtField* field = &cur_fields->At(i);
-            auto it2 = native_object_relocations_.find(field);
-            CHECK(it2 == native_object_relocations_.end()) << "Field at index=" << i
-                << " already assigned " << field->PrettyField() << " static=" << field->IsStatic();
-            DCHECK(!IsInBootImage(field));
-            native_object_relocations_.emplace(
-                field,
-                NativeObjectRelocation { oat_index,
-                                         offset,
-                                         NativeObjectRelocationType::kArtField });
-            offset += sizeof(ArtField);
-          }
-          image_info.IncrementBinSlotSize(
-              Bin::kArtField, header_size + cur_fields->size() * sizeof(ArtField));
-          DCHECK_EQ(offset, image_info.GetBinSlotSize(Bin::kArtField));
-        }
-      }
-      // Visit and assign offsets for methods.
-      size_t num_methods = as_klass->NumMethods();
-      if (num_methods != 0) {
-        bool any_dirty = false;
-        for (auto& m : as_klass->GetMethods(target_ptr_size_)) {
-          if (WillMethodBeDirty(&m)) {
-            any_dirty = true;
-            break;
-          }
-        }
-        NativeObjectRelocationType type = any_dirty
-            ? NativeObjectRelocationType::kArtMethodDirty
-            : NativeObjectRelocationType::kArtMethodClean;
-        Bin bin_type = BinTypeForNativeRelocationType(type);
-        // Forward the entire array at once, but header first.
-        const size_t method_alignment = ArtMethod::Alignment(target_ptr_size_);
-        const size_t method_size = ArtMethod::Size(target_ptr_size_);
-        const size_t header_size = LengthPrefixedArray<ArtMethod>::ComputeSize(0,
-                                                                               method_size,
-                                                                               method_alignment);
-        LengthPrefixedArray<ArtMethod>* array = as_klass->GetMethodsPtr();
-        auto it = native_object_relocations_.find(array);
-        CHECK(it == native_object_relocations_.end())
-            << "Method array " << array << " already forwarded";
-        size_t offset = image_info.GetBinSlotSize(bin_type);
-        DCHECK(!IsInBootImage(array));
-        native_object_relocations_.emplace(array,
-            NativeObjectRelocation {
-                oat_index,
-                offset,
-                any_dirty ? NativeObjectRelocationType::kArtMethodArrayDirty
-                          : NativeObjectRelocationType::kArtMethodArrayClean });
-        image_info.IncrementBinSlotSize(bin_type, header_size);
-        for (auto& m : as_klass->GetMethods(target_ptr_size_)) {
-          AssignMethodOffset(&m, type, oat_index);
-        }
-        (any_dirty ? dirty_methods_ : clean_methods_) += num_methods;
-      }
-      // Assign offsets for all runtime methods in the IMT since these may hold conflict tables
-      // live.
-      if (as_klass->ShouldHaveImt()) {
-        ImTable* imt = as_klass->GetImt(target_ptr_size_);
-        if (TryAssignImTableOffset(imt, oat_index)) {
-          // Since imt's can be shared only do this the first time to not double count imt method
-          // fixups.
-          for (size_t i = 0; i < ImTable::kSize; ++i) {
-            ArtMethod* imt_method = imt->Get(i, target_ptr_size_);
-            DCHECK(imt_method != nullptr);
-            if (imt_method->IsRuntimeMethod() &&
-                !IsInBootImage(imt_method) &&
-                !NativeRelocationAssigned(imt_method)) {
-              AssignMethodOffset(imt_method, NativeObjectRelocationType::kRuntimeMethod, oat_index);
-            }
-          }
-        }
-      }
-    } else if (obj->IsClassLoader()) {
-      // Register the class loader if it has a class table.
-      // The fake boot class loader should not get registered.
-      ObjPtr<mirror::ClassLoader> class_loader = obj->AsClassLoader();
-      if (class_loader->GetClassTable() != nullptr) {
-        DCHECK(compiler_options_.IsAppImage());
-        if (class_loader == GetAppClassLoader()) {
-          ImageInfo& image_info = GetImageInfo(oat_index);
-          // Note: Avoid locking to prevent lock order violations from root visiting;
-          // image_info.class_table_ table is only accessed from the image writer
-          // and class_loader->GetClassTable() is iterated but not modified.
-          image_info.class_table_->CopyWithoutLocks(*class_loader->GetClassTable());
-        }
-      }
-    }
-    AssignImageBinSlot(obj, oat_index);
-    work_stack.emplace(obj, oat_index);
-  }
+void ImageWriter::RecordNativeRelocations(ObjPtr<mirror::Object> obj, size_t oat_index) {
   if (obj->IsString()) {
-    // Always return the interned string if there exists one.
-    mirror::String* interned = FindInternedString(obj->AsString().Ptr());
-    if (interned != nullptr) {
-      return interned;
+    ObjPtr<mirror::String> str = obj->AsString();
+    InternTable* intern_table = Runtime::Current()->GetInternTable();
+    Thread* const self = Thread::Current();
+    if (intern_table->LookupStrong(self, str) == str) {
+      DCHECK(std::none_of(image_infos_.begin(),
+                          image_infos_.end(),
+                          [=](ImageInfo& info) REQUIRES_SHARED(Locks::mutator_lock_) {
+                            return info.intern_table_->LookupStrong(self, str) != nullptr;
+                          }));
+      ObjPtr<mirror::String> interned =
+          GetImageInfo(oat_index).intern_table_->InternStrongImageString(str);
+      DCHECK_EQ(interned, obj);
+    }
+  } else if (obj->IsDexCache()) {
+    DCHECK_EQ(oat_index, GetOatIndexForDexFile(obj->AsDexCache()->GetDexFile()));
+  } else if (obj->IsClass()) {
+    // Visit and assign offsets for fields and field arrays.
+    ObjPtr<mirror::Class> as_klass = obj->AsClass();
+    DCHECK_EQ(oat_index, GetOatIndexForClass(as_klass));
+    DCHECK(!as_klass->IsErroneous()) << as_klass->GetStatus();
+    if (compiler_options_.IsAppImage()) {
+      // Extra sanity, no boot loader classes should be left!
+      CHECK(!IsBootClassLoaderClass(as_klass)) << as_klass->PrettyClass();
+    }
+    LengthPrefixedArray<ArtField>* fields[] = {
+        as_klass->GetSFieldsPtr(), as_klass->GetIFieldsPtr(),
+    };
+    ImageInfo& image_info = GetImageInfo(oat_index);
+    if (!compiler_options_.IsAppImage()) {
+      // Note: Avoid locking to prevent lock order violations from root visiting;
+      // image_info.class_table_ is only accessed from the image writer.
+      image_info.class_table_->InsertWithoutLocks(as_klass);
+    }
+    for (LengthPrefixedArray<ArtField>* cur_fields : fields) {
+      // Total array length including header.
+      if (cur_fields != nullptr) {
+        const size_t header_size = LengthPrefixedArray<ArtField>::ComputeSize(0);
+        // Forward the entire array at once.
+        auto it = native_object_relocations_.find(cur_fields);
+        CHECK(it == native_object_relocations_.end()) << "Field array " << cur_fields
+                                                << " already forwarded";
+        size_t offset = image_info.GetBinSlotSize(Bin::kArtField);
+        DCHECK(!IsInBootImage(cur_fields));
+        native_object_relocations_.emplace(
+            cur_fields,
+            NativeObjectRelocation {
+                oat_index, offset, NativeObjectRelocationType::kArtFieldArray
+            });
+        offset += header_size;
+        // Forward individual fields so that we can quickly find where they belong.
+        for (size_t i = 0, count = cur_fields->size(); i < count; ++i) {
+          // Need to forward arrays separate of fields.
+          ArtField* field = &cur_fields->At(i);
+          auto it2 = native_object_relocations_.find(field);
+          CHECK(it2 == native_object_relocations_.end()) << "Field at index=" << i
+              << " already assigned " << field->PrettyField() << " static=" << field->IsStatic();
+          DCHECK(!IsInBootImage(field));
+          native_object_relocations_.emplace(
+              field,
+              NativeObjectRelocation { oat_index,
+                                       offset,
+                                       NativeObjectRelocationType::kArtField });
+          offset += sizeof(ArtField);
+        }
+        image_info.IncrementBinSlotSize(
+            Bin::kArtField, header_size + cur_fields->size() * sizeof(ArtField));
+        DCHECK_EQ(offset, image_info.GetBinSlotSize(Bin::kArtField));
+      }
+    }
+    // Visit and assign offsets for methods.
+    size_t num_methods = as_klass->NumMethods();
+    if (num_methods != 0) {
+      bool any_dirty = false;
+      for (auto& m : as_klass->GetMethods(target_ptr_size_)) {
+        if (WillMethodBeDirty(&m)) {
+          any_dirty = true;
+          break;
+        }
+      }
+      NativeObjectRelocationType type = any_dirty
+          ? NativeObjectRelocationType::kArtMethodDirty
+          : NativeObjectRelocationType::kArtMethodClean;
+      Bin bin_type = BinTypeForNativeRelocationType(type);
+      // Forward the entire array at once, but header first.
+      const size_t method_alignment = ArtMethod::Alignment(target_ptr_size_);
+      const size_t method_size = ArtMethod::Size(target_ptr_size_);
+      const size_t header_size = LengthPrefixedArray<ArtMethod>::ComputeSize(0,
+                                                                             method_size,
+                                                                             method_alignment);
+      LengthPrefixedArray<ArtMethod>* array = as_klass->GetMethodsPtr();
+      auto it = native_object_relocations_.find(array);
+      CHECK(it == native_object_relocations_.end())
+          << "Method array " << array << " already forwarded";
+      size_t offset = image_info.GetBinSlotSize(bin_type);
+      DCHECK(!IsInBootImage(array));
+      native_object_relocations_.emplace(array,
+          NativeObjectRelocation {
+              oat_index,
+              offset,
+              any_dirty ? NativeObjectRelocationType::kArtMethodArrayDirty
+                        : NativeObjectRelocationType::kArtMethodArrayClean });
+      image_info.IncrementBinSlotSize(bin_type, header_size);
+      for (auto& m : as_klass->GetMethods(target_ptr_size_)) {
+        AssignMethodOffset(&m, type, oat_index);
+      }
+      (any_dirty ? dirty_methods_ : clean_methods_) += num_methods;
+    }
+    // Assign offsets for all runtime methods in the IMT since these may hold conflict tables
+    // live.
+    if (as_klass->ShouldHaveImt()) {
+      ImTable* imt = as_klass->GetImt(target_ptr_size_);
+      if (TryAssignImTableOffset(imt, oat_index)) {
+        // Since imt's can be shared only do this the first time to not double count imt method
+        // fixups.
+        for (size_t i = 0; i < ImTable::kSize; ++i) {
+          ArtMethod* imt_method = imt->Get(i, target_ptr_size_);
+          DCHECK(imt_method != nullptr);
+          if (imt_method->IsRuntimeMethod() &&
+              !IsInBootImage(imt_method) &&
+              !NativeRelocationAssigned(imt_method)) {
+            AssignMethodOffset(imt_method, NativeObjectRelocationType::kRuntimeMethod, oat_index);
+          }
+        }
+      }
+    }
+  } else if (obj->IsClassLoader()) {
+    // Register the class loader if it has a class table.
+    // The fake boot class loader should not get registered.
+    ObjPtr<mirror::ClassLoader> class_loader = obj->AsClassLoader();
+    if (class_loader->GetClassTable() != nullptr) {
+      DCHECK(compiler_options_.IsAppImage());
+      if (class_loader == GetAppClassLoader()) {
+        ImageInfo& image_info = GetImageInfo(oat_index);
+        // Note: Avoid locking to prevent lock order violations from root visiting;
+        // image_info.class_table_ table is only accessed from the image writer
+        // and class_loader->GetClassTable() is iterated but not modified.
+        image_info.class_table_->CopyWithoutLocks(*class_loader->GetClassTable());
+      }
     }
   }
-  return obj;
 }
 
 bool ImageWriter::NativeRelocationAssigned(void* ptr) const {
@@ -2184,23 +1760,213 @@
   image_info.IncrementBinSlotSize(bin_type, ArtMethod::Size(target_ptr_size_));
 }
 
-void ImageWriter::UnbinObjectsIntoOffset(mirror::Object* obj) {
-  DCHECK(!IsInBootImage(obj));
-  CHECK(obj != nullptr);
-
-  // We know the bin slot, and the total bin sizes for all objects by now,
-  // so calculate the object's final image offset.
-
-  DCHECK(IsImageBinSlotAssigned(obj));
-  BinSlot bin_slot = GetImageBinSlot(obj);
-  // Change the lockword from a bin slot into an offset
-  AssignImageOffset(obj, bin_slot);
-}
-
-class ImageWriter::VisitReferencesVisitor {
+class ImageWriter::LayoutHelper {
  public:
-  VisitReferencesVisitor(ImageWriter* image_writer, WorkStack* work_stack, size_t oat_index)
-      : image_writer_(image_writer), work_stack_(work_stack), oat_index_(oat_index) {}
+  explicit LayoutHelper(ImageWriter* image_writer)
+      : image_writer_(image_writer) {
+    bin_objects_.resize(image_writer_->image_infos_.size());
+    for (auto& inner : bin_objects_) {
+      inner.resize(enum_cast<size_t>(Bin::kMirrorCount));
+    }
+  }
+
+  void ProcessDexFileObjects(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
+  void ProcessRoots(VariableSizedHandleScope* handles) REQUIRES_SHARED(Locks::mutator_lock_);
+
+  void ProcessWorkQueue() REQUIRES_SHARED(Locks::mutator_lock_);
+
+  void VerifyImageBinSlotsAssigned() REQUIRES_SHARED(Locks::mutator_lock_);
+
+  void FinalizeBinSlotOffsets() REQUIRES_SHARED(Locks::mutator_lock_);
+
+  /*
+   * Collects the string reference info necessary for loading app images.
+   *
+   * Because AppImages may contain interned strings that must be deduplicated
+   * with previously interned strings when loading the app image, we need to
+   * visit references to these strings and update them to point to the correct
+   * string. To speed up the visiting of references at load time we include
+   * a list of offsets to string references in the AppImage.
+   */
+  void CollectStringReferenceInfo(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ private:
+  class CollectClassesVisitor;
+  class CollectRootsVisitor;
+  class CollectStringReferenceVisitor;
+  class VisitReferencesVisitor;
+
+  using WorkQueue = std::deque<std::pair<ObjPtr<mirror::Object>, size_t>>;
+
+  void VisitReferences(ObjPtr<mirror::Object> obj, size_t oat_index)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  bool TryAssignBinSlot(ObjPtr<mirror::Object> obj, size_t oat_index)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  ImageWriter* const image_writer_;
+
+  // Work list of <object, oat_index> for objects. Everything in the queue must already be
+  // assigned a bin slot.
+  WorkQueue work_queue_;
+
+  // Objects for individual bins. Indexed by `oat_index` and `bin`.
+  // Cannot use ObjPtr<> because of invalidation in Heap::VisitObjects().
+  dchecked_vector<dchecked_vector<dchecked_vector<mirror::Object*>>> bin_objects_;
+};
+
+class ImageWriter::LayoutHelper::CollectClassesVisitor : public ClassVisitor {
+ public:
+  explicit CollectClassesVisitor(ImageWriter* image_writer)
+      : image_writer_(image_writer),
+        dex_files_(image_writer_->compiler_options_.GetDexFilesForOatFile()) {}
+
+  bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (!image_writer_->IsInBootImage(klass.Ptr())) {
+      ObjPtr<mirror::Class> component_type = klass;
+      size_t dimension = 0u;
+      while (component_type->IsArrayClass()) {
+        ++dimension;
+        component_type = component_type->GetComponentType();
+      }
+      DCHECK(!component_type->IsProxyClass());
+      size_t dex_file_index;
+      uint32_t class_def_index = 0u;
+      if (UNLIKELY(component_type->IsPrimitive())) {
+        DCHECK(image_writer_->compiler_options_.IsBootImage());
+        dex_file_index = 0u;
+        class_def_index = enum_cast<uint32_t>(component_type->GetPrimitiveType());
+      } else {
+        auto it = std::find(dex_files_.begin(), dex_files_.end(), &component_type->GetDexFile());
+        DCHECK(it != dex_files_.end()) << klass->PrettyDescriptor();
+        dex_file_index = std::distance(dex_files_.begin(), it) + 1u;  // 0 is for primitive types.
+        class_def_index = component_type->GetDexClassDefIndex();
+      }
+      klasses_.push_back({klass, dex_file_index, class_def_index, dimension});
+    }
+    return true;
+  }
+
+  WorkQueue SortAndReleaseClasses()
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    std::sort(klasses_.begin(), klasses_.end());
+
+    WorkQueue result;
+    size_t last_dex_file_index = static_cast<size_t>(-1);
+    size_t last_oat_index = static_cast<size_t>(-1);
+    for (const ClassEntry& entry : klasses_) {
+      if (last_dex_file_index != entry.dex_file_index) {
+        if (UNLIKELY(entry.dex_file_index == 0u)) {
+          last_oat_index = GetDefaultOatIndex();  // Primitive type.
+        } else {
+          uint32_t dex_file_index = entry.dex_file_index - 1u;  // 0 is for primitive types.
+          last_oat_index = image_writer_->GetOatIndexForDexFile(dex_files_[dex_file_index]);
+        }
+        last_dex_file_index = entry.dex_file_index;
+      }
+      result.emplace_back(entry.klass, last_oat_index);
+    }
+    klasses_.clear();
+    return result;
+  }
+
+ private:
+  struct ClassEntry {
+    ObjPtr<mirror::Class> klass;
+    // We shall sort classes by dex file, class def index and array dimension.
+    size_t dex_file_index;
+    uint32_t class_def_index;
+    size_t dimension;
+
+    bool operator<(const ClassEntry& other) const {
+      return std::tie(dex_file_index, class_def_index, dimension) <
+             std::tie(other.dex_file_index, other.class_def_index, other.dimension);
+    }
+  };
+
+  ImageWriter* const image_writer_;
+  ArrayRef<const DexFile* const> dex_files_;
+  std::deque<ClassEntry> klasses_;
+};
+
+class ImageWriter::LayoutHelper::CollectRootsVisitor {
+ public:
+  CollectRootsVisitor() = default;
+
+  std::vector<ObjPtr<mirror::Object>> ReleaseRoots() {
+    std::vector<ObjPtr<mirror::Object>> roots;
+    roots.swap(roots_);
+    return roots;
+  }
+
+  void VisitRootIfNonNull(StackReference<mirror::Object>* ref) {
+    if (!ref->IsNull()) {
+      roots_.push_back(ref->AsMirrorPtr());
+    }
+  }
+
+ private:
+  std::vector<ObjPtr<mirror::Object>> roots_;
+};
+
+class ImageWriter::LayoutHelper::CollectStringReferenceVisitor {
+ public:
+  explicit CollectStringReferenceVisitor(
+      const ImageWriter* image_writer,
+      size_t oat_index,
+      std::vector<AppImageReferenceOffsetInfo>* const string_reference_offsets,
+      ObjPtr<mirror::Object> current_obj)
+      : image_writer_(image_writer),
+        oat_index_(oat_index),
+        string_reference_offsets_(string_reference_offsets),
+        current_obj_(current_obj) {}
+
+  void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (!root->IsNull()) {
+      VisitRoot(root);
+    }
+  }
+
+  void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+      REQUIRES_SHARED(Locks::mutator_lock_)  {
+    // Only dex caches have native String roots. These are collected separately.
+    DCHECK(current_obj_->IsDexCache() ||
+           !image_writer_->IsInternedAppImageStringReference(root->AsMirrorPtr()))
+        << mirror::Object::PrettyTypeOf(current_obj_);
+  }
+
+  // Collects info for managed fields that reference managed Strings.
+  void operator() (ObjPtr<mirror::Object> obj,
+                   MemberOffset member_offset,
+                   bool is_static ATTRIBUTE_UNUSED) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    ObjPtr<mirror::Object> referred_obj =
+        obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(member_offset);
+
+    if (image_writer_->IsInternedAppImageStringReference(referred_obj)) {
+      size_t base_offset = image_writer_->GetImageOffset(current_obj_.Ptr(), oat_index_);
+      string_reference_offsets_->emplace_back(base_offset, member_offset.Uint32Value());
+    }
+  }
+
+  ALWAYS_INLINE
+  void operator() (ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
+                   ObjPtr<mirror::Reference> ref) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    operator()(ref, mirror::Reference::ReferentOffset(), /* is_static */ false);
+  }
+
+ private:
+  const ImageWriter* const image_writer_;
+  const size_t oat_index_;
+  std::vector<AppImageReferenceOffsetInfo>* const string_reference_offsets_;
+  const ObjPtr<mirror::Object> current_obj_;
+};
+
+class ImageWriter::LayoutHelper::VisitReferencesVisitor {
+ public:
+  VisitReferencesVisitor(LayoutHelper* helper, size_t oat_index)
+      : helper_(helper), oat_index_(oat_index) {}
 
   // Fix up separately since we also need to fix up method entrypoints.
   ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
@@ -2232,50 +1998,397 @@
 
  private:
   mirror::Object* VisitReference(mirror::Object* ref) const REQUIRES_SHARED(Locks::mutator_lock_) {
-    return image_writer_->TryAssignBinSlot(*work_stack_, ref, oat_index_);
+    if (helper_->TryAssignBinSlot(ref, oat_index_)) {
+      // Remember how many objects we're adding at the front of the queue as we want
+      // to reverse that range to process these references in the order of addition.
+      helper_->work_queue_.emplace_front(ref, oat_index_);
+    }
+    if (ClassLinker::kAppImageMayContainStrings &&
+        helper_->image_writer_->compiler_options_.IsAppImage() &&
+        helper_->image_writer_->IsInternedAppImageStringReference(ref)) {
+      helper_->image_writer_->image_infos_[oat_index_].num_string_references_ += 1u;
+    }
+    return ref;
   }
 
-  ImageWriter* const image_writer_;
-  WorkStack* const work_stack_;
+  LayoutHelper* const helper_;
   const size_t oat_index_;
 };
 
-class ImageWriter::GetRootsVisitor : public RootVisitor  {
- public:
-  explicit GetRootsVisitor(std::vector<mirror::Object*>* roots) : roots_(roots) {}
+void ImageWriter::LayoutHelper::ProcessDexFileObjects(Thread* self) {
+  Runtime* runtime = Runtime::Current();
+  ClassLinker* class_linker = runtime->GetClassLinker();
 
-  void VisitRoots(mirror::Object*** roots,
-                  size_t count,
-                  const RootInfo& info ATTRIBUTE_UNUSED) override
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    for (size_t i = 0; i < count; ++i) {
-      roots_->push_back(*roots[i]);
+  // To ensure deterministic output, populate the work queue with objects in a pre-defined order.
+  // Note: If we decide to implement a profile-guided layout, this is the place to do so.
+
+  // Get initial work queue with the image classes and assign their bin slots.
+  CollectClassesVisitor visitor(image_writer_);
+  class_linker->VisitClasses(&visitor);
+  DCHECK(work_queue_.empty());
+  work_queue_ = visitor.SortAndReleaseClasses();
+  for (const std::pair<ObjPtr<mirror::Object>, size_t>& entry : work_queue_) {
+    DCHECK(entry.first->IsClass());
+    bool assigned = TryAssignBinSlot(entry.first, entry.second);
+    DCHECK(assigned);
+  }
+
+  // Assign bin slots to strings and dex caches.
+  for (const DexFile* dex_file : image_writer_->compiler_options_.GetDexFilesForOatFile()) {
+    auto it = image_writer_->dex_file_oat_index_map_.find(dex_file);
+    DCHECK(it != image_writer_->dex_file_oat_index_map_.end()) << dex_file->GetLocation();
+    const size_t oat_index = it->second;
+    // Assign bin slots for strings defined in this dex file in StringId (lexicographical) order.
+    InternTable* const intern_table = runtime->GetInternTable();
+    for (size_t i = 0, count = dex_file->NumStringIds(); i < count; ++i) {
+      uint32_t utf16_length;
+      const char* utf8_data = dex_file->StringDataAndUtf16LengthByIdx(dex::StringIndex(i),
+                                                                      &utf16_length);
+      ObjPtr<mirror::String> string = intern_table->LookupStrong(self, utf16_length, utf8_data);
+      if (string != nullptr && !image_writer_->IsInBootImage(string.Ptr())) {
+        // Try to assign bin slot to this string but do not add it to the work list.
+        // The only reference in a String is its class, processed above for the boot image.
+        bool assigned = TryAssignBinSlot(string, oat_index);
+        DCHECK(assigned ||
+               // We could have seen the same string in an earlier dex file.
+               dex_file != image_writer_->compiler_options_.GetDexFilesForOatFile().front());
+      }
+    }
+    // Assign bin slot to this file's dex cache and add it to the end of the work queue.
+    ObjPtr<mirror::DexCache> dex_cache = class_linker->FindDexCache(self, *dex_file);
+    DCHECK(dex_cache != nullptr);
+    bool assigned = TryAssignBinSlot(dex_cache, oat_index);
+    DCHECK(assigned);
+    work_queue_.emplace_back(dex_cache, oat_index);
+  }
+
+  // Since classes and dex caches have been assigned to their bins, when we process a class
+  // we do not follow through the class references or dex caches, so we correctly process
+  // only objects actually belonging to that class before taking a new class from the queue.
+  // If multiple class statics reference the same object (directly or indirectly), the object
+  // is treated as belonging to the first encountered referencing class.
+  ProcessWorkQueue();
+}
+
+void ImageWriter::LayoutHelper::ProcessRoots(VariableSizedHandleScope* handles) {
+  // Assing bin slots to the image objects referenced by `handles`, add them to the work queue
+  // and process the work queue. These objects are the image roots and boot image live objects
+  // and they reference other objects needed for the image, for example the array of dex cache
+  // references, or the pre-allocated exceptions for the boot image.
+  DCHECK(work_queue_.empty());
+  CollectRootsVisitor visitor;
+  handles->VisitRoots(visitor);
+  for (ObjPtr<mirror::Object> root : visitor.ReleaseRoots()) {
+    if (TryAssignBinSlot(root, GetDefaultOatIndex())) {
+      work_queue_.emplace_back(root, GetDefaultOatIndex());
+    }
+  }
+  ProcessWorkQueue();
+}
+
+void ImageWriter::LayoutHelper::ProcessWorkQueue() {
+  while (!work_queue_.empty()) {
+    std::pair<ObjPtr<mirror::Object>, size_t> pair = work_queue_.front();
+    work_queue_.pop_front();
+    VisitReferences(/*obj=*/ pair.first, /*oat_index=*/ pair.second);
+  }
+}
+
+void ImageWriter::LayoutHelper::VerifyImageBinSlotsAssigned() {
+  std::vector<mirror::Object*> carveout;
+  if (image_writer_->compiler_options_.IsAppImage()) {
+    // Exclude boot class path dex caches that are not part of the boot image.
+    // Also exclude their locations if they have not been visited through another path.
+    ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+    Thread* self = Thread::Current();
+    ReaderMutexLock mu(self, *Locks::dex_lock_);
+    for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
+      ObjPtr<mirror::DexCache> dex_cache =
+          ObjPtr<mirror::DexCache>::DownCast(self->DecodeJObject(data.weak_root));
+      if (dex_cache == nullptr ||
+          image_writer_->IsInBootImage(dex_cache.Ptr()) ||
+          ContainsElement(image_writer_->compiler_options_.GetDexFilesForOatFile(),
+                          dex_cache->GetDexFile())) {
+        continue;
+      }
+      CHECK(!image_writer_->IsImageBinSlotAssigned(dex_cache.Ptr()));
+      carveout.push_back(dex_cache.Ptr());
+      ObjPtr<mirror::String> location = dex_cache->GetLocation();
+      if (!image_writer_->IsImageBinSlotAssigned(location.Ptr())) {
+        carveout.push_back(location.Ptr());
+      }
     }
   }
 
-  void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
-                  size_t count,
-                  const RootInfo& info ATTRIBUTE_UNUSED) override
+  std::vector<mirror::Object*> missed_objects;
+  auto ensure_bin_slots_assigned = [&](mirror::Object* obj)
       REQUIRES_SHARED(Locks::mutator_lock_) {
-    for (size_t i = 0; i < count; ++i) {
-      roots_->push_back(roots[i]->AsMirrorPtr());
+    if (!image_writer_->IsInBootImage(obj)) {
+      if (!UNLIKELY(image_writer_->IsImageBinSlotAssigned(obj))) {
+        // Ignore the `carveout` objects.
+        if (ContainsElement(carveout, obj)) {
+          return;
+        }
+        // Ignore finalizer references for the dalvik.system.DexFile objects referenced by
+        // the app class loader.
+        if (obj->IsFinalizerReferenceInstance()) {
+          ArtField* ref_field =
+              obj->GetClass()->FindInstanceField("referent", "Ljava/lang/Object;");
+          CHECK(ref_field != nullptr);
+          ObjPtr<mirror::Object> ref = ref_field->GetObject(obj);
+          CHECK(ref != nullptr);
+          CHECK(image_writer_->IsImageBinSlotAssigned(ref.Ptr()));
+          ObjPtr<mirror::Class> klass = ref->GetClass();
+          CHECK(klass == WellKnownClasses::ToClass(WellKnownClasses::dalvik_system_DexFile));
+          // Note: The app class loader is used only for checking against the runtime
+          // class loader, the dex file cookie is cleared and therefore we do not need
+          // to run the finalizer even if we implement app image objects collection.
+          ArtField* field = jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie);
+          CHECK(field->GetObject(ref) == nullptr);
+          return;
+        }
+        if (obj->IsString()) {
+          // Ignore interned strings. These may come from reflection interning method names.
+          // TODO: Make dex file strings weak interns and GC them before writing the image.
+          Runtime* runtime = Runtime::Current();
+          ObjPtr<mirror::String> interned =
+              runtime->GetInternTable()->LookupStrong(Thread::Current(), obj->AsString());
+          if (interned == obj) {
+            return;
+          }
+        }
+        missed_objects.push_back(obj);
+      }
     }
+  };
+  Runtime::Current()->GetHeap()->VisitObjects(ensure_bin_slots_assigned);
+  if (!missed_objects.empty()) {
+    const gc::Verification* v = Runtime::Current()->GetHeap()->GetVerification();
+    size_t num_missed_objects = missed_objects.size();
+    size_t num_paths = std::min<size_t>(num_missed_objects, 5u);  // Do not flood the output.
+    ArrayRef<mirror::Object*> missed_objects_head =
+        ArrayRef<mirror::Object*>(missed_objects).SubArray(/*pos=*/ 0u, /*length=*/ num_paths);
+    for (mirror::Object* obj : missed_objects_head) {
+      LOG(ERROR) << "Image object without assigned bin slot: "
+          << mirror::Object::PrettyTypeOf(obj) << " " << obj
+          << " " << v->FirstPathFromRootSet(obj);
+    }
+    LOG(FATAL) << "Found " << num_missed_objects << " objects without assigned bin slots.";
+  }
+}
+
+void ImageWriter::LayoutHelper::FinalizeBinSlotOffsets() {
+  // Calculate bin slot offsets and adjust for region padding if needed.
+  const size_t region_size = image_writer_->region_size_;
+  const size_t num_image_infos = image_writer_->image_infos_.size();
+  for (size_t oat_index = 0; oat_index != num_image_infos; ++oat_index) {
+    ImageInfo& image_info = image_writer_->image_infos_[oat_index];
+    size_t bin_offset = image_writer_->image_objects_offset_begin_;
+
+    for (size_t i = 0; i != kNumberOfBins; ++i) {
+      Bin bin = enum_cast<Bin>(i);
+      switch (bin) {
+        case Bin::kArtMethodClean:
+        case Bin::kArtMethodDirty: {
+          bin_offset = RoundUp(bin_offset, ArtMethod::Alignment(image_writer_->target_ptr_size_));
+          break;
+        }
+        case Bin::kDexCacheArray:
+          bin_offset =
+              RoundUp(bin_offset, DexCacheArraysLayout::Alignment(image_writer_->target_ptr_size_));
+          break;
+        case Bin::kImTable:
+        case Bin::kIMTConflictTable: {
+          bin_offset = RoundUp(bin_offset, static_cast<size_t>(image_writer_->target_ptr_size_));
+          break;
+        }
+        default: {
+          // Normal alignment.
+        }
+      }
+      image_info.bin_slot_offsets_[i] = bin_offset;
+
+      // If the bin is for mirror objects, we may need to add region padding and update offsets.
+      if (i < enum_cast<size_t>(Bin::kMirrorCount) && region_size != 0u) {
+        const size_t offset_after_header = bin_offset - sizeof(ImageHeader);
+        size_t remaining_space =
+            RoundUp(offset_after_header + 1u, region_size) - offset_after_header;
+        // Exercise the loop below in debug builds to get coverage.
+        if (kIsDebugBuild || remaining_space < image_info.bin_slot_sizes_[i]) {
+          // The bin crosses a region boundary. Add padding if needed.
+          size_t object_offset = 0u;
+          size_t padding = 0u;
+          for (mirror::Object* object : bin_objects_[oat_index][i]) {
+            BinSlot bin_slot = image_writer_->GetImageBinSlot(object, oat_index);
+            DCHECK_EQ(enum_cast<size_t>(bin_slot.GetBin()), i);
+            DCHECK_EQ(bin_slot.GetOffset() + padding, object_offset);
+            size_t object_size = RoundUp(object->SizeOf<kVerifyNone>(), kObjectAlignment);
+
+            auto add_padding = [&](bool tail_region) {
+              DCHECK_NE(remaining_space, 0u);
+              DCHECK_LT(remaining_space, region_size);
+              DCHECK_ALIGNED(remaining_space, kObjectAlignment);
+              // TODO When copying to heap regions, leave the tail region padding zero-filled.
+              if (!tail_region || true) {
+                image_info.padding_offsets_.push_back(bin_offset + object_offset);
+              }
+              image_info.bin_slot_sizes_[i] += remaining_space;
+              padding += remaining_space;
+              object_offset += remaining_space;
+              remaining_space = region_size;
+            };
+            if (object_size > remaining_space) {
+              // Padding needed if we're not at region boundary (with a multi-region object).
+              if (remaining_space != region_size) {
+                // TODO: Instead of adding padding, we should consider reordering the bins
+                // or objects to reduce wasted space.
+                add_padding(/*tail_region=*/ false);
+              }
+              DCHECK_EQ(remaining_space, region_size);
+              // For huge objects, adjust the remaining space to hold the object and some more.
+              if (object_size > region_size) {
+                remaining_space = RoundUp(object_size + 1u, region_size);
+              }
+            } else if (remaining_space == object_size) {
+              // Move to the next region, no padding needed.
+              remaining_space += region_size;
+            }
+            DCHECK_GT(remaining_space, object_size);
+            remaining_space -= object_size;
+            image_writer_->UpdateImageBinSlotOffset(object, oat_index, object_offset);
+            object_offset += object_size;
+            // Add padding to the tail region of huge objects if not region-aligned.
+            if (object_size > region_size && remaining_space != region_size) {
+              DCHECK(!IsAlignedParam(object_size, region_size));
+              add_padding(/*tail_region=*/ true);
+            }
+          }
+          image_writer_->region_alignment_wasted_ += padding;
+          image_info.image_end_ += padding;
+        }
+      }
+      bin_offset += image_info.bin_slot_sizes_[i];
+    }
+    // NOTE: There may be additional padding between the bin slots and the intern table.
+    DCHECK_EQ(
+        image_info.image_end_,
+        image_info.GetBinSizeSum(Bin::kMirrorCount) + image_writer_->image_objects_offset_begin_);
   }
 
- private:
-  std::vector<mirror::Object*>* const roots_;
-};
+  VLOG(image) << "Space wasted for region alignment " << image_writer_->region_alignment_wasted_;
+}
 
-void ImageWriter::ProcessWorkStack(WorkStack* work_stack) {
-  while (!work_stack->empty()) {
-    std::pair<mirror::Object*, size_t> pair(work_stack->top());
-    work_stack->pop();
-    VisitReferencesVisitor visitor(this, work_stack, /*oat_index*/ pair.second);
-    // Walk references and assign bin slots for them.
-    pair.first->VisitReferences</*kVisitNativeRoots*/true, kVerifyNone, kWithoutReadBarrier>(
-        visitor,
-        visitor);
+void ImageWriter::LayoutHelper::CollectStringReferenceInfo(Thread* self) {
+  size_t managed_string_refs = 0u;
+  size_t total_string_refs = 0u;
+
+  const size_t num_image_infos = image_writer_->image_infos_.size();
+  for (size_t oat_index = 0; oat_index != num_image_infos; ++oat_index) {
+    ImageInfo& image_info = image_writer_->image_infos_[oat_index];
+    DCHECK(image_info.string_reference_offsets_.empty());
+    image_info.string_reference_offsets_.reserve(image_info.num_string_references_);
+
+    for (size_t i = 0; i < enum_cast<size_t>(Bin::kMirrorCount); ++i) {
+      for (mirror::Object* obj : bin_objects_[oat_index][i]) {
+        CollectStringReferenceVisitor visitor(image_writer_,
+                                              oat_index,
+                                              &image_info.string_reference_offsets_,
+                                              obj);
+        /*
+         * References to managed strings can occur either in the managed heap or in
+         * native memory regions. Information about managed references is collected
+         * by the CollectStringReferenceVisitor and directly added to the image info.
+         *
+         * Native references to managed strings can only occur through DexCache
+         * objects. This is verified by the visitor in debug mode and the references
+         * are collected separately below.
+         */
+        obj->VisitReferences</*kVisitNativeRoots=*/ kIsDebugBuild,
+                             kVerifyNone,
+                             kWithoutReadBarrier>(visitor, visitor);
+      }
+    }
+
+    managed_string_refs += image_info.string_reference_offsets_.size();
+
+    // Collect dex cache string arrays.
+    for (const DexFile* dex_file : image_writer_->compiler_options_.GetDexFilesForOatFile()) {
+      if (image_writer_->GetOatIndexForDexFile(dex_file) == oat_index) {
+        ObjPtr<mirror::DexCache> dex_cache =
+            Runtime::Current()->GetClassLinker()->FindDexCache(self, *dex_file);
+        DCHECK(dex_cache != nullptr);
+        size_t base_offset = image_writer_->GetImageOffset(dex_cache.Ptr(), oat_index);
+
+        // Visit all string cache entries.
+        mirror::StringDexCacheType* strings = dex_cache->GetStrings();
+        const size_t num_strings = dex_cache->NumStrings();
+        for (uint32_t index = 0; index != num_strings; ++index) {
+          ObjPtr<mirror::String> referred_string = strings[index].load().object.Read();
+          if (image_writer_->IsInternedAppImageStringReference(referred_string)) {
+            image_info.string_reference_offsets_.emplace_back(
+                SetDexCacheStringNativeRefTag(base_offset), index);
+          }
+        }
+
+        // Visit all pre-resolved string entries.
+        GcRoot<mirror::String>* preresolved_strings = dex_cache->GetPreResolvedStrings();
+        const size_t num_pre_resolved_strings = dex_cache->NumPreResolvedStrings();
+        for (uint32_t index = 0; index != num_pre_resolved_strings; ++index) {
+          ObjPtr<mirror::String> referred_string = preresolved_strings[index].Read();
+          if (image_writer_->IsInternedAppImageStringReference(referred_string)) {
+            image_info.string_reference_offsets_.emplace_back(
+                SetDexCachePreResolvedStringNativeRefTag(base_offset), index);
+          }
+        }
+      }
+    }
+
+    total_string_refs += image_info.string_reference_offsets_.size();
+
+    // Check that we collected the same number of string references as we saw in the previous pass.
+    CHECK_EQ(image_info.string_reference_offsets_.size(), image_info.num_string_references_);
   }
+
+  VLOG(compiler) << "Dex2Oat:AppImage:stringReferences = " << total_string_refs
+      << " (managed: " << managed_string_refs
+      << ", native: " << (total_string_refs - managed_string_refs) << ")";
+}
+
+void ImageWriter::LayoutHelper::VisitReferences(ObjPtr<mirror::Object> obj, size_t oat_index) {
+  size_t old_work_queue_size = work_queue_.size();
+  VisitReferencesVisitor visitor(this, oat_index);
+  // Walk references and assign bin slots for them.
+  obj->VisitReferences</*kVisitNativeRoots=*/ true, kVerifyNone, kWithoutReadBarrier>(
+      visitor,
+      visitor);
+  // Put the added references in the queue in the order in which they were added.
+  // The visitor just pushes them to the front as it visits them.
+  DCHECK_LE(old_work_queue_size, work_queue_.size());
+  size_t num_added = work_queue_.size() - old_work_queue_size;
+  std::reverse(work_queue_.begin(), work_queue_.begin() + num_added);
+}
+
+bool ImageWriter::LayoutHelper::TryAssignBinSlot(ObjPtr<mirror::Object> obj, size_t oat_index) {
+  if (obj == nullptr || image_writer_->IsInBootImage(obj.Ptr())) {
+    // Object is null or already in the image, there is no work to do.
+    return false;
+  }
+  bool assigned = false;
+  if (!image_writer_->IsImageBinSlotAssigned(obj.Ptr())) {
+    image_writer_->RecordNativeRelocations(obj, oat_index);
+    Bin bin = image_writer_->AssignImageBinSlot(obj.Ptr(), oat_index);
+    bin_objects_[oat_index][enum_cast<size_t>(bin)].push_back(obj.Ptr());
+    assigned = true;
+  }
+  return assigned;
+}
+
+static ObjPtr<ObjectArray<Object>> GetBootImageLiveObjects() REQUIRES_SHARED(Locks::mutator_lock_) {
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  DCHECK(!heap->GetBootImageSpaces().empty());
+  const ImageHeader& primary_header = heap->GetBootImageSpaces().front()->GetImageHeader();
+  return ObjPtr<ObjectArray<Object>>::DownCast(
+      primary_header.GetImageRoot<kWithReadBarrier>(ImageHeader::kBootImageLiveObjects));
 }
 
 void ImageWriter::CalculateNewObjectOffsets() {
@@ -2283,9 +2396,9 @@
   Runtime* const runtime = Runtime::Current();
   VariableSizedHandleScope handles(self);
   MutableHandle<ObjectArray<Object>> boot_image_live_objects = handles.NewHandle(
-      compiler_options_.IsAppImage()
-          ? nullptr
-          : IntrinsicObjects::AllocateBootImageLiveObjects(self, runtime->GetClassLinker()));
+      compiler_options_.IsBootImage()
+          ? AllocateBootImageLiveObjects(self, runtime)
+          : (compiler_options_.IsBootImageExtension() ? GetBootImageLiveObjects() : nullptr));
   std::vector<Handle<ObjectArray<Object>>> image_roots;
   for (size_t i = 0, size = oat_filenames_.size(); i != size; ++i) {
     image_roots.push_back(handles.NewHandle(CreateImageRoots(i, boot_image_live_objects)));
@@ -2297,7 +2410,6 @@
   // know where image_roots is going to end up
   image_objects_offset_begin_ = RoundUp(sizeof(ImageHeader), kObjectAlignment);  // 64-bit-alignment
 
-  const size_t method_alignment = ArtMethod::Alignment(target_ptr_size_);
   // Write the image runtime methods.
   image_methods_[ImageHeader::kResolutionMethod] = runtime->GetResolutionMethod();
   image_methods_[ImageHeader::kImtConflictMethod] = runtime->GetImtConflictMethod();
@@ -2337,73 +2449,12 @@
   // From this point on, there shall be no GC anymore and no objects shall be allocated.
   // We can now assign a BitSlot to each object and store it in its lockword.
 
-  // Work list of <object, oat_index> for objects. Everything on the stack must already be
-  // assigned a bin slot.
-  WorkStack work_stack;
-
-  // Special case interned strings to put them in the image they are likely to be resolved from.
-  for (const DexFile* dex_file : compiler_options_.GetDexFilesForOatFile()) {
-    auto it = dex_file_oat_index_map_.find(dex_file);
-    DCHECK(it != dex_file_oat_index_map_.end()) << dex_file->GetLocation();
-    const size_t oat_index = it->second;
-    InternTable* const intern_table = runtime->GetInternTable();
-    for (size_t i = 0, count = dex_file->NumStringIds(); i < count; ++i) {
-      uint32_t utf16_length;
-      const char* utf8_data = dex_file->StringDataAndUtf16LengthByIdx(dex::StringIndex(i),
-                                                                      &utf16_length);
-      mirror::String* string = intern_table->LookupStrong(self, utf16_length, utf8_data).Ptr();
-      TryAssignBinSlot(work_stack, string, oat_index);
-    }
-  }
-
-  // Get the GC roots and then visit them separately to avoid lock violations since the root visitor
-  // visits roots while holding various locks.
-  {
-    std::vector<mirror::Object*> roots;
-    GetRootsVisitor root_visitor(&roots);
-    runtime->VisitRoots(&root_visitor);
-    for (mirror::Object* obj : roots) {
-      TryAssignBinSlot(work_stack, obj, GetDefaultOatIndex());
-    }
-  }
-  ProcessWorkStack(&work_stack);
-
-  // For app images, there may be objects that are only held live by the boot image. One
-  // example is finalizer references. Forward these objects so that EnsureBinSlotAssignedCallback
-  // does not fail any checks.
-  if (compiler_options_.IsAppImage()) {
-    for (gc::space::ImageSpace* space : heap->GetBootImageSpaces()) {
-      DCHECK(space->IsImageSpace());
-      gc::accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
-      live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
-                                    reinterpret_cast<uintptr_t>(space->Limit()),
-                                    [this, &work_stack](mirror::Object* obj)
-          REQUIRES_SHARED(Locks::mutator_lock_) {
-        VisitReferencesVisitor visitor(this, &work_stack, GetDefaultOatIndex());
-        // Visit all references and try to assign bin slots for them (calls TryAssignBinSlot).
-        obj->VisitReferences</*kVisitNativeRoots*/true, kVerifyNone, kWithoutReadBarrier>(
-            visitor,
-            visitor);
-      });
-    }
-    // Process the work stack in case anything was added by TryAssignBinSlot.
-    ProcessWorkStack(&work_stack);
-
-    // Store the class loader in the class roots.
-    CHECK_EQ(image_roots.size(), 1u);
-    image_roots[0]->Set<false>(ImageHeader::kAppImageClassLoader, GetAppClassLoader());
-  }
+  LayoutHelper layout_helper(this);
+  layout_helper.ProcessDexFileObjects(self);
+  layout_helper.ProcessRoots(&handles);
 
   // Verify that all objects have assigned image bin slots.
-  {
-    auto ensure_bin_slots_assigned = [&](mirror::Object* obj)
-        REQUIRES_SHARED(Locks::mutator_lock_) {
-      if (IsImageObject(obj)) {
-        CHECK(IsImageBinSlotAssigned(obj)) << mirror::Object::PrettyTypeOf(obj) << " " << obj;
-      }
-    };
-    heap->VisitObjects(ensure_bin_slots_assigned);
-  }
+  layout_helper.VerifyImageBinSlotsAssigned();
 
   // Calculate size of the dex cache arrays slot and prepare offsets.
   PrepareDexCacheArraySlots();
@@ -2425,95 +2476,14 @@
     }
   }
 
-  // Calculate bin slot offsets.
-  for (size_t oat_index = 0; oat_index < image_infos_.size(); ++oat_index) {
-    ImageInfo& image_info = image_infos_[oat_index];
-    size_t bin_offset = image_objects_offset_begin_;
-    // Need to visit the objects in bin order since alignment requirements might change the
-    // section sizes.
-    // Avoid using ObjPtr since VisitObjects invalidates. This is safe since concurrent GC can not
-    // occur during image writing.
-    using BinPair = std::pair<BinSlot, mirror::Object*>;
-    std::vector<BinPair> objects;
-    heap->VisitObjects([&](mirror::Object* obj)
-        REQUIRES_SHARED(Locks::mutator_lock_) {
-      // Only visit the oat index for the current image.
-      if (IsImageObject(obj) && GetOatIndex(obj) == oat_index) {
-        objects.emplace_back(GetImageBinSlot(obj), obj);
-      }
-    });
-    std::sort(objects.begin(), objects.end(), [](const BinPair& a, const BinPair& b) -> bool {
-      if (a.first.GetBin() != b.first.GetBin()) {
-        return a.first.GetBin() < b.first.GetBin();
-      }
-      // Note that the index is really the relative offset in this case.
-      return a.first.GetIndex() < b.first.GetIndex();
-    });
-    auto it = objects.begin();
-    for (size_t i = 0; i != kNumberOfBins; ++i) {
-      Bin bin = enum_cast<Bin>(i);
-      switch (bin) {
-        case Bin::kArtMethodClean:
-        case Bin::kArtMethodDirty: {
-          bin_offset = RoundUp(bin_offset, method_alignment);
-          break;
-        }
-        case Bin::kDexCacheArray:
-          bin_offset = RoundUp(bin_offset, DexCacheArraysLayout::Alignment(target_ptr_size_));
-          break;
-        case Bin::kImTable:
-        case Bin::kIMTConflictTable: {
-          bin_offset = RoundUp(bin_offset, static_cast<size_t>(target_ptr_size_));
-          break;
-        }
-        default: {
-          // Normal alignment.
-        }
-      }
-      image_info.bin_slot_offsets_[i] = bin_offset;
+  // Finalize bin slot offsets. This may add padding for regions.
+  layout_helper.FinalizeBinSlotOffsets();
 
-      // If the bin is for mirror objects, assign the offsets since we may need to change sizes
-      // from alignment requirements.
-      if (i < static_cast<size_t>(Bin::kMirrorCount)) {
-        const size_t start_offset = bin_offset;
-        // Visit and assign offsets for all objects of the bin type.
-        while (it != objects.end() && it->first.GetBin() == bin) {
-          ObjPtr<mirror::Object> obj(it->second);
-          const size_t object_size = RoundUp(obj->SizeOf(), kObjectAlignment);
-          // If the object spans region bondaries, add padding objects between.
-          // TODO: Instead of adding padding, we should consider reordering the bins to reduce
-          // wasted space.
-          if (region_size_ != 0u) {
-            const size_t offset_after_header = bin_offset - sizeof(ImageHeader);
-            const size_t next_region = RoundUp(offset_after_header, region_size_);
-            if (offset_after_header != next_region &&
-                offset_after_header + object_size > next_region) {
-              // Add padding objects until aligned.
-              while (bin_offset - sizeof(ImageHeader) < next_region) {
-                image_info.padding_object_offsets_.push_back(bin_offset);
-                bin_offset += kObjectAlignment;
-                region_alignment_wasted_ += kObjectAlignment;
-                image_info.image_end_ += kObjectAlignment;
-              }
-              CHECK_EQ(bin_offset - sizeof(ImageHeader), next_region);
-            }
-          }
-          SetImageOffset(obj.Ptr(), bin_offset);
-          bin_offset = bin_offset + object_size;
-          ++it;
-        }
-        image_info.bin_slot_sizes_[i] = bin_offset - start_offset;
-      } else {
-        bin_offset += image_info.bin_slot_sizes_[i];
-      }
-    }
-    // NOTE: There may be additional padding between the bin slots and the intern table.
-    DCHECK_EQ(image_info.image_end_,
-              image_info.GetBinSizeSum(Bin::kMirrorCount) + image_objects_offset_begin_);
+  // Collect string reference info for app images.
+  if (ClassLinker::kAppImageMayContainStrings && compiler_options_.IsAppImage()) {
+    layout_helper.CollectStringReferenceInfo(self);
   }
 
-  VLOG(image) << "Space wasted for region alignment " << region_alignment_wasted_;
-
   // Calculate image offsets.
   size_t image_offset = 0;
   for (ImageInfo& image_info : image_infos_) {
@@ -2629,9 +2599,7 @@
   // compute the actual offsets.
   const ImageSection& string_reference_offsets =
       sections[ImageHeader::kSectionStringReferenceOffsets] =
-          ImageSection(cur_pos,
-                       sizeof(typename decltype(string_reference_offsets_)::value_type) *
-                           num_string_references_);
+          ImageSection(cur_pos, sizeof(string_reference_offsets_[0]) * num_string_references_);
 
   /*
    * Metadata section.
@@ -2650,7 +2618,7 @@
   return make_pair(metadata_section.End(), std::move(sections));
 }
 
-void ImageWriter::CreateHeader(size_t oat_index) {
+void ImageWriter::CreateHeader(size_t oat_index, size_t component_count) {
   ImageInfo& image_info = GetImageInfo(oat_index);
   const uint8_t* oat_file_begin = image_info.oat_file_begin_;
   const uint8_t* oat_file_end = oat_file_begin + image_info.oat_loaded_size_;
@@ -2658,18 +2626,39 @@
 
   uint32_t image_reservation_size = image_info.image_size_;
   DCHECK_ALIGNED(image_reservation_size, kPageSize);
-  uint32_t component_count = 1u;
-  if (!compiler_options_.IsAppImage()) {
+  uint32_t current_component_count = 1u;
+  if (compiler_options_.IsAppImage()) {
+    DCHECK_EQ(oat_index, 0u);
+    DCHECK_EQ(component_count, current_component_count);
+  } else {
+    DCHECK(image_infos_.size() == 1u || image_infos_.size() == component_count)
+        << image_infos_.size() << " " << component_count;
     if (oat_index == 0u) {
       const ImageInfo& last_info = image_infos_.back();
       const uint8_t* end = last_info.oat_file_begin_ + last_info.oat_loaded_size_;
       DCHECK_ALIGNED(image_info.image_begin_, kPageSize);
       image_reservation_size =
           dchecked_integral_cast<uint32_t>(RoundUp(end - image_info.image_begin_, kPageSize));
-      component_count = image_infos_.size();
+      current_component_count = component_count;
     } else {
       image_reservation_size = 0u;
-      component_count = 0u;
+      current_component_count = 0u;
+    }
+  }
+
+  // Compute boot image checksums for the primary component, leave as 0 otherwise.
+  uint32_t boot_image_components = 0u;
+  uint32_t boot_image_checksums = 0u;
+  if (oat_index == 0u) {
+    const std::vector<gc::space::ImageSpace*>& image_spaces =
+        Runtime::Current()->GetHeap()->GetBootImageSpaces();
+    DCHECK_EQ(image_spaces.empty(), compiler_options_.IsBootImage());
+    for (size_t i = 0u, size = image_spaces.size(); i != size; ) {
+      const ImageHeader& header = image_spaces[i]->GetImageHeader();
+      boot_image_components += header.GetComponentCount();
+      boot_image_checksums ^= header.GetImageChecksum();
+      DCHECK_LE(header.GetImageSpaceCount(), size - i);
+      i += header.GetImageSpaceCount();
     }
   }
 
@@ -2679,7 +2668,7 @@
   std::vector<ImageSection>& sections = section_info_pair.second;
 
   // Finally bitmap section.
-  const size_t bitmap_bytes = image_info.image_bitmap_->Size();
+  const size_t bitmap_bytes = image_info.image_bitmap_.Size();
   auto* bitmap_section = &sections[ImageHeader::kSectionImageBitmap];
   *bitmap_section = ImageSection(RoundUp(image_end, kPageSize), RoundUp(bitmap_bytes, kPageSize));
   if (VLOG_IS_ON(compiler)) {
@@ -2698,19 +2687,12 @@
               << " Oat data end=" << reinterpret_cast<uintptr_t>(oat_data_end)
               << " Oat file end=" << reinterpret_cast<uintptr_t>(oat_file_end);
   }
-  // Store boot image info for app image so that we can relocate.
-  uint32_t boot_image_begin = 0;
-  uint32_t boot_image_end = 0;
-  uint32_t boot_oat_begin = 0;
-  uint32_t boot_oat_end = 0;
-  gc::Heap* const heap = Runtime::Current()->GetHeap();
-  heap->GetBootImagesSize(&boot_image_begin, &boot_image_end, &boot_oat_begin, &boot_oat_end);
 
   // Create the header, leave 0 for data size since we will fill this in as we are writing the
   // image.
   new (image_info.image_.Begin()) ImageHeader(
       image_reservation_size,
-      component_count,
+      current_component_count,
       PointerToLowMemUInt32(image_info.image_begin_),
       image_end,
       sections.data(),
@@ -2720,8 +2702,10 @@
       PointerToLowMemUInt32(image_info.oat_data_begin_),
       PointerToLowMemUInt32(oat_data_end),
       PointerToLowMemUInt32(oat_file_end),
-      boot_image_begin,
-      boot_oat_end - boot_image_begin,
+      boot_image_begin_,
+      boot_image_size_,
+      boot_image_components,
+      boot_image_checksums,
       static_cast<uint32_t>(target_ptr_size_));
 }
 
@@ -2972,17 +2956,17 @@
 }
 
 void ImageWriter::CopyAndFixupObject(Object* obj) {
-  if (!IsImageObject(obj)) {
+  if (!IsImageBinSlotAssigned(obj)) {
     return;
   }
-  size_t offset = GetImageOffset(obj);
   size_t oat_index = GetOatIndex(obj);
+  size_t offset = GetImageOffset(obj, oat_index);
   ImageInfo& image_info = GetImageInfo(oat_index);
   auto* dst = reinterpret_cast<Object*>(image_info.image_.Begin() + offset);
   DCHECK_LT(offset, image_info.image_end_);
   const auto* src = reinterpret_cast<const uint8_t*>(obj);
 
-  image_info.image_bitmap_->Set(dst);  // Mark the obj as live.
+  image_info.image_bitmap_.Set(dst);  // Mark the obj as live.
 
   const size_t n = obj->SizeOf();
 
@@ -3052,13 +3036,25 @@
     CopyAndFixupObject(obj);
   };
   Runtime::Current()->GetHeap()->VisitObjects(visitor);
-  // Copy the padding objects since they are required for in order traversal of the image space.
-  for (const ImageInfo& image_info : image_infos_) {
-    for (const size_t offset : image_info.padding_object_offsets_) {
-      auto* dst = reinterpret_cast<Object*>(image_info.image_.Begin() + offset);
-      dst->SetClass<kVerifyNone>(GetImageAddress(GetClassRoot<mirror::Object>().Ptr()));
-      dst->SetLockWord<kVerifyNone>(LockWord::Default(), /*as_volatile=*/ false);
-      image_info.image_bitmap_->Set(dst);  // Mark the obj as live.
+  // Fill the padding objects since they are required for in order traversal of the image space.
+  for (ImageInfo& image_info : image_infos_) {
+    for (const size_t start_offset : image_info.padding_offsets_) {
+      const size_t offset_after_header = start_offset - sizeof(ImageHeader);
+      size_t remaining_space =
+          RoundUp(offset_after_header + 1u, region_size_) - offset_after_header;
+      DCHECK_NE(remaining_space, 0u);
+      DCHECK_LT(remaining_space, region_size_);
+      Object* dst = reinterpret_cast<Object*>(image_info.image_.Begin() + start_offset);
+      ObjPtr<Class> object_class = GetClassRoot<mirror::Object, kWithoutReadBarrier>();
+      DCHECK_ALIGNED_PARAM(remaining_space, object_class->GetObjectSize());
+      Object* end = dst + remaining_space / object_class->GetObjectSize();
+      Class* image_object_class = GetImageAddress(object_class.Ptr());
+      while (dst != end) {
+        dst->SetClass<kVerifyNone>(image_object_class);
+        dst->SetLockWord<kVerifyNone>(LockWord::Default(), /*as_volatile=*/ false);
+        image_info.image_bitmap_.Set(dst);  // Mark the obj as live.
+        ++dst;
+      }
     }
   }
   // We no longer need the hashcode map, values have already been copied to target objects.
@@ -3146,7 +3142,7 @@
   FixupClassVisitor visitor(this, copy);
   ObjPtr<mirror::Object>(orig)->VisitReferences(visitor, visitor);
 
-  if (kBitstringSubtypeCheckEnabled && compiler_options_.IsAppImage()) {
+  if (kBitstringSubtypeCheckEnabled && !compiler_options_.IsBootImage()) {
     // When we call SubtypeCheck::EnsureInitialize, it Assigns new bitstring
     // values to the parent of that class.
     //
@@ -3162,6 +3158,8 @@
     //
     // On startup, the class linker will then re-initialize all the app
     // image bitstrings. See also ClassLinker::AddImageSpace.
+    //
+    // FIXME: Deal with boot image extensions.
     MutexLock subtype_check_lock(Thread::Current(), *Locks::subtype_check_lock_);
     // Lock every time to prevent a dcheck failure when we suspend with the lock held.
     SubtypeCheck<mirror::Class*>::ForceUninitialize(copy);
@@ -3169,6 +3167,11 @@
 
   // Remove the clinitThreadId. This is required for image determinism.
   copy->SetClinitThreadId(static_cast<pid_t>(0));
+  // We never emit kRetryVerificationAtRuntime, instead we mark the class as
+  // resolved and the class will therefore be re-verified at runtime.
+  if (orig->ShouldVerifyAtRuntime()) {
+    copy->SetStatusInternal(ClassStatus::kResolved);
+  }
 }
 
 void ImageWriter::FixupObject(Object* orig, Object* copy) {
@@ -3326,7 +3329,8 @@
 
 const uint8_t* ImageWriter::GetOatAddress(StubType type) const {
   DCHECK_LE(type, StubType::kLast);
-  // If we are compiling an app image, we need to use the stubs of the boot image.
+  // If we are compiling a boot image extension or app image,
+  // we need to use the stubs of the primary boot image.
   if (!compiler_options_.IsBootImage()) {
     // Use the current image pointers.
     const std::vector<gc::space::ImageSpace*>& image_spaces =
@@ -3339,8 +3343,10 @@
       // TODO: We could maybe clean this up if we stored them in an array in the oat header.
       case StubType::kQuickGenericJNITrampoline:
         return static_cast<const uint8_t*>(header.GetQuickGenericJniTrampoline());
-      case StubType::kJNIDlsymLookup:
-        return static_cast<const uint8_t*>(header.GetJniDlsymLookup());
+      case StubType::kJNIDlsymLookupTrampoline:
+        return static_cast<const uint8_t*>(header.GetJniDlsymLookupTrampoline());
+      case StubType::kJNIDlsymLookupCriticalTrampoline:
+        return static_cast<const uint8_t*>(header.GetJniDlsymLookupCriticalTrampoline());
       case StubType::kQuickIMTConflictTrampoline:
         return static_cast<const uint8_t*>(header.GetQuickImtConflictTrampoline());
       case StubType::kQuickResolutionTrampoline:
@@ -3355,9 +3361,7 @@
   return GetOatAddressForOffset(primary_image_info.GetStubOffset(type), primary_image_info);
 }
 
-const uint8_t* ImageWriter::GetQuickCode(ArtMethod* method,
-                                         const ImageInfo& image_info,
-                                         bool* quick_is_interpreted) {
+const uint8_t* ImageWriter::GetQuickCode(ArtMethod* method, const ImageInfo& image_info) {
   DCHECK(!method->IsResolutionMethod()) << method->PrettyMethod();
   DCHECK_NE(method, Runtime::Current()->GetImtConflictMethod()) << method->PrettyMethod();
   DCHECK(!method->IsImtUnimplementedMethod()) << method->PrettyMethod();
@@ -3381,27 +3385,18 @@
     quick_code = GetOatAddressForOffset(quick_oat_code_offset, image_info);
   }
 
-  *quick_is_interpreted = false;
-  if (quick_code != nullptr && (!method->IsStatic() || method->IsConstructor() ||
-      method->GetDeclaringClass()->IsInitialized())) {
-    // We have code for a non-static or initialized method, just use the code.
-  } else if (quick_code == nullptr && method->IsNative() &&
-      (!method->IsStatic() || method->GetDeclaringClass()->IsInitialized())) {
-    // Non-static or initialized native method missing compiled code, use generic JNI version.
-    quick_code = GetOatAddress(StubType::kQuickGenericJNITrampoline);
-  } else if (quick_code == nullptr && !method->IsNative()) {
-    // We don't have code at all for a non-native method, use the interpreter.
-    quick_code = GetOatAddress(StubType::kQuickToInterpreterBridge);
-    *quick_is_interpreted = true;
-  } else {
-    CHECK(!method->GetDeclaringClass()->IsInitialized());
-    // We have code for a static method, but need to go through the resolution stub for class
-    // initialization.
+  if (quick_code == nullptr) {
+    // If we don't have code, use generic jni / interpreter bridge.
+    // Both perform class initialization check if needed.
+    quick_code = method->IsNative()
+        ? GetOatAddress(StubType::kQuickGenericJNITrampoline)
+        : GetOatAddress(StubType::kQuickToInterpreterBridge);
+  } else if (NeedsClinitCheckBeforeCall(method) &&
+             !method->GetDeclaringClass()->IsVisiblyInitialized()) {
+    // If we do have code but the method needs a class initialization check before calling
+    // that code, install the resolution stub that will perform the check.
     quick_code = GetOatAddress(StubType::kQuickResolutionTrampoline);
   }
-  if (!IsInBootOatFile(quick_code)) {
-    // DCHECK_GE(quick_code, oat_data_begin_);
-  }
   return quick_code;
 }
 
@@ -3458,16 +3453,16 @@
     if (UNLIKELY(!orig->IsInvokable())) {
       quick_code = GetOatAddress(StubType::kQuickToInterpreterBridge);
     } else {
-      bool quick_is_interpreted;
       const ImageInfo& image_info = image_infos_[oat_index];
-      quick_code = GetQuickCode(orig, image_info, &quick_is_interpreted);
+      quick_code = GetQuickCode(orig, image_info);
 
       // JNI entrypoint:
       if (orig->IsNative()) {
         // The native method's pointer is set to a stub to lookup via dlsym.
         // Note this is not the code_ pointer, that is handled above.
-        copy->SetEntryPointFromJniPtrSize(
-            GetOatAddress(StubType::kJNIDlsymLookup), target_ptr_size_);
+        StubType stub_type = orig->IsCriticalNative() ? StubType::kJNIDlsymLookupCriticalTrampoline
+                                                      : StubType::kJNIDlsymLookupTrampoline;
+        copy->SetEntryPointFromJniPtrSize(GetOatAddress(stub_type), target_ptr_size_);
       } else {
         CHECK(copy->GetDataPtrSize(target_ptr_size_) == nullptr);
       }
@@ -3492,19 +3487,19 @@
   static_assert(sizeof(BinSlot) == sizeof(LockWord), "BinSlot/LockWord must have equal sizes");
 
   DCHECK_LT(GetBin(), Bin::kMirrorCount);
-  DCHECK_ALIGNED(GetIndex(), kObjectAlignment);
+  DCHECK_ALIGNED(GetOffset(), kObjectAlignment);
 }
 
 ImageWriter::BinSlot::BinSlot(Bin bin, uint32_t index)
     : BinSlot(index | (static_cast<uint32_t>(bin) << kBinShift)) {
-  DCHECK_EQ(index, GetIndex());
+  DCHECK_EQ(index, GetOffset());
 }
 
 ImageWriter::Bin ImageWriter::BinSlot::GetBin() const {
   return static_cast<Bin>((lockword_ & kBinMask) >> kBinShift);
 }
 
-uint32_t ImageWriter::BinSlot::GetIndex() const {
+uint32_t ImageWriter::BinSlot::GetOffset() const {
   return lockword_ & ~kBinMask;
 }
 
@@ -3551,10 +3546,17 @@
   return it->second;
 }
 
-size_t ImageWriter::GetOatIndexForDexCache(ObjPtr<mirror::DexCache> dex_cache) const {
-  return (dex_cache == nullptr)
-      ? GetDefaultOatIndex()
-      : GetOatIndexForDexFile(dex_cache->GetDexFile());
+size_t ImageWriter::GetOatIndexForClass(ObjPtr<mirror::Class> klass) const {
+  while (klass->IsArrayClass()) {
+    klass = klass->GetComponentType();
+  }
+  if (UNLIKELY(klass->IsPrimitive())) {
+    DCHECK(klass->GetDexCache() == nullptr);
+    return GetDefaultOatIndex();
+  } else {
+    DCHECK(klass->GetDexCache() != nullptr);
+    return GetOatIndexForDexFile(&klass->GetDexFile());
+  }
 }
 
 void ImageWriter::UpdateOatFileLayout(size_t oat_index,
@@ -3595,8 +3597,10 @@
 
   if (oat_index == GetDefaultOatIndex()) {
     // Primary oat file, read the trampolines.
-    cur_image_info.SetStubOffset(StubType::kJNIDlsymLookup,
-                                 oat_header.GetJniDlsymLookupOffset());
+    cur_image_info.SetStubOffset(StubType::kJNIDlsymLookupTrampoline,
+                                 oat_header.GetJniDlsymLookupTrampolineOffset());
+    cur_image_info.SetStubOffset(StubType::kJNIDlsymLookupCriticalTrampoline,
+                                 oat_header.GetJniDlsymLookupCriticalTrampolineOffset());
     cur_image_info.SetStubOffset(StubType::kQuickGenericJNITrampoline,
                                  oat_header.GetQuickGenericJniTrampolineOffset());
     cur_image_info.SetStubOffset(StubType::kQuickIMTConflictTrampoline,
@@ -3617,6 +3621,8 @@
     jobject class_loader,
     const HashSet<std::string>* dirty_image_objects)
     : compiler_options_(compiler_options),
+      boot_image_begin_(Runtime::Current()->GetHeap()->GetBootImagesStartAddress()),
+      boot_image_size_(Runtime::Current()->GetHeap()->GetBootImagesSize()),
       global_image_begin_(reinterpret_cast<uint8_t*>(image_begin)),
       image_objects_offset_begin_(0),
       target_ptr_size_(InstructionSetPointerSize(compiler_options.GetInstructionSet())),
@@ -3629,7 +3635,11 @@
       oat_filenames_(oat_filenames),
       dex_file_oat_index_map_(dex_file_oat_index_map),
       dirty_image_objects_(dirty_image_objects) {
-  DCHECK(compiler_options.IsBootImage() || compiler_options.IsAppImage());
+  DCHECK(compiler_options.IsBootImage() ||
+         compiler_options.IsBootImageExtension() ||
+         compiler_options.IsAppImage());
+  DCHECK_EQ(compiler_options.IsBootImage(), boot_image_begin_ == 0u);
+  DCHECK_EQ(compiler_options.IsBootImage(), boot_image_size_ == 0u);
   CHECK_NE(image_begin, 0U);
   std::fill_n(image_methods_, arraysize(image_methods_), nullptr);
   CHECK_EQ(compiler_options.IsBootImage(),
diff --git a/dex2oat/linker/image_writer.h b/dex2oat/linker/image_writer.h
index cae28cf..769f2ff 100644
--- a/dex2oat/linker/image_writer.h
+++ b/dex2oat/linker/image_writer.h
@@ -27,6 +27,7 @@
 #include <stack>
 #include <string>
 #include <unordered_map>
+#include <unordered_set>
 
 #include "art_method.h"
 #include "base/bit_utils.h"
@@ -40,6 +41,7 @@
 #include "base/safe_map.h"
 #include "base/utils.h"
 #include "class_table.h"
+#include "gc/accounting/space_bitmap.h"
 #include "image.h"
 #include "intern_table.h"
 #include "lock_word.h"
@@ -100,7 +102,7 @@
    * image have been initialized and all native methods have been generated.  In
    * addition, no other thread should be modifying the heap.
    */
-  bool PrepareImageAddressSpace(TimingLogger* timings);
+  bool PrepareImageAddressSpace(bool preload_dex_caches, TimingLogger* timings);
 
   bool IsImageAddressSpaceReady() const {
     DCHECK(!image_infos_.empty());
@@ -121,7 +123,7 @@
     } else {
       size_t oat_index = GetOatIndex(object);
       const ImageInfo& image_info = GetImageInfo(oat_index);
-      return reinterpret_cast<T*>(image_info.image_begin_ + GetImageOffset(object));
+      return reinterpret_cast<T*>(image_info.image_begin_ + GetImageOffset(object, oat_index));
     }
   }
 
@@ -143,7 +145,7 @@
   // the names in oat_filenames.
   bool Write(int image_fd,
              const std::vector<std::string>& image_filenames,
-             const std::vector<std::string>& oat_filenames)
+             size_t component_count)
       REQUIRES(!Locks::mutator_lock_);
 
   uintptr_t GetOatDataBegin(size_t oat_index) {
@@ -157,8 +159,8 @@
   // of references to the image or across oat files.
   size_t GetOatIndexForDexFile(const DexFile* dex_file) const;
 
-  // Get the index of the oat file containing the dex file served by the dex cache.
-  size_t GetOatIndexForDexCache(ObjPtr<mirror::DexCache> dex_cache) const
+  // Get the index of the oat file containing the definition of the class.
+  size_t GetOatIndexForClass(ObjPtr<mirror::Class> klass) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Update the oat layout for the given oat file.
@@ -171,8 +173,6 @@
   void UpdateOatFileHeader(size_t oat_index, const OatHeader& oat_header);
 
  private:
-  using WorkStack = std::stack<std::pair<mirror::Object*, size_t>>;
-
   bool AllocMemory();
 
   // Mark the objects defined in this space in the given live bitmap.
@@ -234,7 +234,8 @@
   friend std::ostream& operator<<(std::ostream& stream, const NativeObjectRelocationType& type);
 
   enum class StubType {
-    kJNIDlsymLookup,
+    kJNIDlsymLookupTrampoline,
+    kJNIDlsymLookupCriticalTrampoline,
     kQuickGenericJNITrampoline,
     kQuickIMTConflictTrampoline,
     kQuickResolutionTrampoline,
@@ -269,7 +270,7 @@
     // The bin an object belongs to, i.e. regular, class/verified, class/initialized, etc.
     Bin GetBin() const;
     // The offset in bytes from the beginning of the bin. Aligned to object size.
-    uint32_t GetIndex() const;
+    uint32_t GetOffset() const;
     // Pack into a single uint32_t, for storing into a lock word.
     uint32_t Uint32Value() const { return lockword_; }
     // Comparison operator for map support
@@ -361,7 +362,7 @@
     uint32_t oat_checksum_ = 0u;
 
     // Image bitmap which lets us know where the objects inside of the image reside.
-    std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> image_bitmap_;
+    gc::accounting::ContinuousSpaceBitmap image_bitmap_;
 
     // The start offsets of the dex cache arrays.
     SafeMap<const DexFile*, size_t> dex_cache_array_starts_;
@@ -390,52 +391,43 @@
     // StringFieldOffsets section.
     size_t num_string_references_ = 0;
 
+    // Offsets into the image that indicate where string references are recorded.
+    std::vector<AppImageReferenceOffsetInfo> string_reference_offsets_;
+
     // Intern table associated with this image for serialization.
     std::unique_ptr<InternTable> intern_table_;
 
     // Class table associated with this image for serialization.
     std::unique_ptr<ClassTable> class_table_;
 
-    // Padding objects to ensure region alignment (if required).
-    std::vector<size_t> padding_object_offsets_;
+    // Padding offsets to ensure region alignment (if required).
+    // Objects need to be added from the recorded offset until the end of the region.
+    std::vector<size_t> padding_offsets_;
   };
 
   // We use the lock word to store the offset of the object in the image.
-  void AssignImageOffset(mirror::Object* object, BinSlot bin_slot)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  void SetImageOffset(mirror::Object* object, size_t offset)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  bool IsImageOffsetAssigned(mirror::Object* object) const
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  size_t GetImageOffset(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
-  void UpdateImageOffset(mirror::Object* obj, uintptr_t offset)
+  size_t GetImageOffset(mirror::Object* object, size_t oat_index) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void PrepareDexCacheArraySlots() REQUIRES_SHARED(Locks::mutator_lock_);
-  void AssignImageBinSlot(mirror::Object* object, size_t oat_index)
+  Bin AssignImageBinSlot(mirror::Object* object, size_t oat_index)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  mirror::Object* TryAssignBinSlot(WorkStack& work_stack, mirror::Object* obj, size_t oat_index)
+  void RecordNativeRelocations(ObjPtr<mirror::Object> obj, size_t oat_index)
       REQUIRES_SHARED(Locks::mutator_lock_);
   void SetImageBinSlot(mirror::Object* object, BinSlot bin_slot)
       REQUIRES_SHARED(Locks::mutator_lock_);
   bool IsImageBinSlotAssigned(mirror::Object* object) const
       REQUIRES_SHARED(Locks::mutator_lock_);
-  BinSlot GetImageBinSlot(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
+  BinSlot GetImageBinSlot(mirror::Object* object, size_t oat_index) const
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  void UpdateImageBinSlotOffset(mirror::Object* object, size_t oat_index, size_t new_offset)
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   void AddDexCacheArrayRelocation(void* array, size_t offset, size_t oat_index)
       REQUIRES_SHARED(Locks::mutator_lock_);
   void AddMethodPointerArray(ObjPtr<mirror::PointerArray> arr)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  mirror::Object* GetLocalAddress(mirror::Object* object) const
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    size_t offset = GetImageOffset(object);
-    size_t oat_index = GetOatIndex(object);
-    const ImageInfo& image_info = GetImageInfo(oat_index);
-    uint8_t* dst = image_info.image_.Begin() + offset;
-    return reinterpret_cast<mirror::Object*>(dst);
-  }
-
   // Returns the address in the boot image if we are compiling the app image.
   const uint8_t* GetOatAddress(StubType type) const;
 
@@ -459,10 +451,9 @@
   // Remove unwanted classes from various roots.
   void PruneNonImageClasses() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Remove unwanted classes from the DexCache roots.
-  void PruneDexCache(ObjPtr<mirror::DexCache> dex_cache, ObjPtr<mirror::ClassLoader> class_loader)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!Locks::classlinker_classes_lock_);
+  // Remove everything from the DexCache.
+  void ClearDexCache(ObjPtr<mirror::DexCache> dex_cache)
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Preload deterministic DexCache contents.
   void PreloadDexCache(ObjPtr<mirror::DexCache> dex_cache, ObjPtr<mirror::ClassLoader> class_loader)
@@ -480,9 +471,7 @@
   // Lays out where the image objects will be at runtime.
   void CalculateNewObjectOffsets()
       REQUIRES_SHARED(Locks::mutator_lock_);
-  void ProcessWorkStack(WorkStack* work_stack)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  void CreateHeader(size_t oat_index)
+  void CreateHeader(size_t oat_index, size_t component_count)
       REQUIRES_SHARED(Locks::mutator_lock_);
   ObjPtr<mirror::ObjectArray<mirror::Object>> CollectDexCaches(Thread* self, size_t oat_index) const
       REQUIRES_SHARED(Locks::mutator_lock_);
@@ -492,8 +481,6 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
   void CalculateObjectBinSlots(mirror::Object* obj)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  void UnbinObjectsIntoOffset(mirror::Object* obj)
-      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Creates the contiguous image in memory and adjusts pointers.
   void CopyAndFixupNativeData(size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -553,9 +540,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Get quick code for non-resolution/imt_conflict/abstract method.
-  const uint8_t* GetQuickCode(ArtMethod* method,
-                              const ImageInfo& image_info,
-                              bool* quick_is_interpreted)
+  const uint8_t* GetQuickCode(ArtMethod* method, const ImageInfo& image_info)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Return true if a method is likely to be dirtied at runtime.
@@ -578,76 +563,19 @@
   // Return true if klass is loaded by the boot class loader but not in the boot image.
   bool IsBootClassLoaderNonImageClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Return true if klass depends on a boot class loader non image class. We want to prune these
-  // classes since we do not want any boot class loader classes in the image. This means that
-  // we also cannot have any classes which refer to these boot class loader non image classes.
-  // PruneAppImageClass also prunes if klass depends on a non-image class according to the compiler
-  // options.
-  bool PruneAppImageClass(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
+  // Return true if `klass` depends on a class defined by the boot class path
+  // we're compiling against but not present in the boot image spaces. We want
+  // to prune these classes since we cannot guarantee that they will not be
+  // already loaded at run time when loading this image. This means that we
+  // also cannot have any classes which refer to these non image classes.
+  bool PruneImageClass(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // early_exit is true if we had a cyclic dependency anywhere down the chain.
-  bool PruneAppImageClassInternal(ObjPtr<mirror::Class> klass,
-                                  bool* early_exit,
-                                  std::unordered_set<mirror::Object*>* visited)
+  bool PruneImageClassInternal(ObjPtr<mirror::Class> klass,
+                               bool* early_exit,
+                               std::unordered_set<mirror::Object*>* visited)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  /*
-   * This type holds the information necessary for calculating
-   * AppImageReferenceOffsetInfo values after the object relocations have been
-   * computed.
-   *
-   * The first element will always be a pointer to a managed object.  If the
-   * pointer has been tagged (testable with HasDexCacheNativeRefTag) it
-   * indicates that the referenced object is a DexCache object that requires
-   * special handling during loading and the second element has no meaningful
-   * value.  If the pointer isn't tagged then the second element is an
-   * object-relative offset to a field containing a string reference.
-   *
-   * Note that it is possible for an untagged DexCache pointer to occur in the
-   * first position if it has a managed reference that needs to be updated.
-   *
-   * TODO (chriswailes): Add a note indicating the source line where we ensure
-   * that no moving garbage collection will occur.
-   *
-   * TODO (chriswailes): Replace with std::variant once ART is building with
-   * C++17
-   */
-  typedef std::pair<uintptr_t, uint32_t> HeapReferencePointerInfo;
-
-  /*
-   * Collects the info necessary for calculating image offsets to string field
-   * later.
-   *
-   * This function is used when constructing AppImages.  Because AppImages
-   * contain strings that must be interned we need to visit references to these
-   * strings when the AppImage is loaded and either insert them into the
-   * runtime intern table or replace the existing reference with a reference
-   * to the interned strings.
-   *
-   * To speed up the interning of strings when the AppImage is loaded we include
-   * a list of offsets to string references in the AppImage.  These are then
-   * iterated over at load time and fixed up.
-   *
-   * To record the offsets we first have to count the number of string
-   * references that will be included in the AppImage.  This allows use to both
-   * allocate enough memory for soring the offsets and correctly calculate the
-   * offsets of various objects into the image.  Once the image offset
-   * calculations are done for managed objects the reference object/offset pairs
-   * are translated to image offsets.  The CopyMetadata function then copies
-   * these offsets into the image.
-   */
-  std::vector<HeapReferencePointerInfo> CollectStringReferenceInfo() const
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  /*
-   * Ensures that assumptions about native GC roots and AppImages hold.
-   *
-   * This function verifies the following condition(s):
-   *   - Native references to managed strings are only reachable through DexCache
-   *     objects
-   */
-  void VerifyNativeGCRootInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
-
   bool IsMultiImage() const {
     return image_infos_.size() > 1;
   }
@@ -676,23 +604,23 @@
   template <typename T>
   T* NativeCopyLocation(T* obj) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Return true if `obj` belongs to the image we're writing.
-  // For a boot image, this is true for all objects.
-  // For an app image, boot image objects and boot class path dex caches are excluded.
-  bool IsImageObject(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_);
+  // Return true if `dex_cache` belongs to the image we're writing.
+  // For a boot image, this is true for all dex caches.
+  // For an app image, boot class path dex caches are excluded.
+  bool IsImageDexCache(ObjPtr<mirror::DexCache> dex_cache) const
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Return true if `obj` is inside of the boot image space. This may only return true if we are
-  // compiling an app image.
-  bool IsInBootImage(const void* obj) const;
-
-  // Return true if ptr is within the boot oat file.
-  bool IsInBootOatFile(const void* ptr) const;
+  // Return true if `obj` is inside of a boot image space that we're compiling against.
+  // (Always false when compiling the boot image.)
+  ALWAYS_INLINE bool IsInBootImage(const void* obj) const {
+    return reinterpret_cast<uintptr_t>(obj) - boot_image_begin_ < boot_image_size_;
+  }
 
   // Get the index of the oat file associated with the object.
   size_t GetOatIndex(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
 
   // The oat index for shared data in multi-image and all data in single-image compilation.
-  size_t GetDefaultOatIndex() const {
+  static constexpr size_t GetDefaultOatIndex() {
     return 0u;
   }
 
@@ -704,10 +632,6 @@
     return image_infos_[oat_index];
   }
 
-  // Find an already strong interned string in the other images or in the boot image. Used to
-  // remove duplicates in the multi image and app image case.
-  mirror::String* FindInternedString(mirror::String* string) REQUIRES_SHARED(Locks::mutator_lock_);
-
   // Return true if there already exists a native allocation for an object.
   bool NativeRelocationAssigned(void* ptr) const;
 
@@ -736,11 +660,15 @@
    *   - The referred-object is a Java String
    */
   ALWAYS_INLINE
-  bool IsValidAppImageStringReference(ObjPtr<mirror::Object> referred_obj) const
+  bool IsInternedAppImageStringReference(ObjPtr<mirror::Object> referred_obj) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   const CompilerOptions& compiler_options_;
 
+  // Cached boot image begin and size. This includes heap, native objects and oat files.
+  const uint32_t boot_image_begin_;
+  const uint32_t boot_image_size_;
+
   // Beginning target image address for the first image.
   uint8_t* global_image_begin_;
 
@@ -785,9 +713,6 @@
   // Boot image live objects, null for app image.
   mirror::ObjectArray<mirror::Object>* boot_image_live_objects_;
 
-  // Offsets into the image that indicate where string references are recorded.
-  std::vector<AppImageReferenceOffsetInfo> string_reference_offsets_;
-
   // Which mode the image is stored as, see image.h
   const ImageHeader::StorageMode image_storage_mode_;
 
@@ -806,28 +731,15 @@
   // Region alignment bytes wasted.
   size_t region_alignment_wasted_ = 0u;
 
-  class ImageFileGuard;
   class FixupClassVisitor;
   class FixupRootVisitor;
   class FixupVisitor;
-  class GetRootsVisitor;
+  class ImageFileGuard;
+  class LayoutHelper;
   class NativeLocationVisitor;
   class PruneClassesVisitor;
   class PruneClassLoaderClassesVisitor;
   class PruneObjectReferenceVisitor;
-  class RegisterBootClassPathClassesVisitor;
-  class VisitReferencesVisitor;
-
-  /*
-   * A visitor class for extracting object/offset pairs.
-   *
-   * This visitor walks the fields of an object and extracts object/offset pairs
-   * that are later translated to image offsets.  This visitor is only
-   * responsible for extracting info for Java references.  Native references to
-   * Java strings are handled in the wrapper function
-   * CollectStringReferenceInfo().
-   */
-  class CollectStringReferenceVisitor;
 
   // A visitor used by the VerifyNativeGCRootInvariants() function.
   class NativeGCRootInvariantVisitor;
diff --git a/dex2oat/linker/mips/relative_patcher_mips.cc b/dex2oat/linker/mips/relative_patcher_mips.cc
deleted file mode 100644
index 69e0846..0000000
--- a/dex2oat/linker/mips/relative_patcher_mips.cc
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "linker/mips/relative_patcher_mips.h"
-
-#include "compiled_method.h"
-#include "debug/method_debug_info.h"
-#include "linker/linker_patch.h"
-
-namespace art {
-namespace linker {
-
-uint32_t MipsRelativePatcher::ReserveSpace(
-    uint32_t offset,
-    const CompiledMethod* compiled_method ATTRIBUTE_UNUSED,
-    MethodReference method_ref ATTRIBUTE_UNUSED) {
-  return offset;  // No space reserved; no limit on relative call distance.
-}
-
-uint32_t MipsRelativePatcher::ReserveSpaceEnd(uint32_t offset) {
-  return offset;  // No space reserved; no limit on relative call distance.
-}
-
-uint32_t MipsRelativePatcher::WriteThunks(OutputStream* out ATTRIBUTE_UNUSED, uint32_t offset) {
-  return offset;  // No thunks added; no limit on relative call distance.
-}
-
-void MipsRelativePatcher::PatchCall(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
-                                    uint32_t literal_offset ATTRIBUTE_UNUSED,
-                                    uint32_t patch_offset ATTRIBUTE_UNUSED,
-                                    uint32_t target_offset ATTRIBUTE_UNUSED) {
-  UNIMPLEMENTED(FATAL) << "PatchCall unimplemented on MIPS";
-}
-
-void MipsRelativePatcher::PatchPcRelativeReference(std::vector<uint8_t>* code,
-                                                   const LinkerPatch& patch,
-                                                   uint32_t patch_offset,
-                                                   uint32_t target_offset) {
-  uint32_t anchor_literal_offset = patch.PcInsnOffset();
-  uint32_t literal_offset = patch.LiteralOffset();
-  bool high_patch = ((*code)[literal_offset + 0] == 0x34) && ((*code)[literal_offset + 1] == 0x12);
-
-  // Perform basic sanity checks.
-  if (high_patch) {
-    if (is_r6) {
-      // auipc reg, offset_high
-      DCHECK_EQ(((*code)[literal_offset + 2] & 0x1F), 0x1E);
-      DCHECK_EQ(((*code)[literal_offset + 3] & 0xFC), 0xEC);
-    } else {
-      // lui reg, offset_high
-      DCHECK_EQ(((*code)[literal_offset + 2] & 0xE0), 0x00);
-      DCHECK_EQ((*code)[literal_offset + 3], 0x3C);
-    }
-  } else {
-    // instr reg(s), offset_low
-    CHECK_EQ((*code)[literal_offset + 0], 0x78);
-    CHECK_EQ((*code)[literal_offset + 1], 0x56);
-  }
-
-  // Apply patch.
-  uint32_t anchor_offset = patch_offset - literal_offset + anchor_literal_offset;
-  uint32_t diff = target_offset - anchor_offset;
-  diff += (diff & 0x8000) << 1;  // Account for sign extension in "instr reg(s), offset_low".
-
-  if (high_patch) {
-    // lui reg, offset_high / auipc reg, offset_high
-    (*code)[literal_offset + 0] = static_cast<uint8_t>(diff >> 16);
-    (*code)[literal_offset + 1] = static_cast<uint8_t>(diff >> 24);
-  } else {
-    // instr reg(s), offset_low
-    (*code)[literal_offset + 0] = static_cast<uint8_t>(diff >> 0);
-    (*code)[literal_offset + 1] = static_cast<uint8_t>(diff >> 8);
-  }
-}
-
-void MipsRelativePatcher::PatchBakerReadBarrierBranch(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
-                                                      const LinkerPatch& patch ATTRIBUTE_UNUSED,
-                                                      uint32_t patch_offset ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "UNIMPLEMENTED";
-}
-
-std::vector<debug::MethodDebugInfo> MipsRelativePatcher::GenerateThunkDebugInfo(
-    uint32_t executable_offset ATTRIBUTE_UNUSED) {
-  return std::vector<debug::MethodDebugInfo>();  // No thunks added.
-}
-
-}  // namespace linker
-}  // namespace art
diff --git a/dex2oat/linker/mips/relative_patcher_mips.h b/dex2oat/linker/mips/relative_patcher_mips.h
deleted file mode 100644
index 4c385a3..0000000
--- a/dex2oat/linker/mips/relative_patcher_mips.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_DEX2OAT_LINKER_MIPS_RELATIVE_PATCHER_MIPS_H_
-#define ART_DEX2OAT_LINKER_MIPS_RELATIVE_PATCHER_MIPS_H_
-
-#include "arch/mips/instruction_set_features_mips.h"
-#include "linker/relative_patcher.h"
-
-namespace art {
-namespace linker {
-
-class MipsRelativePatcher final : public RelativePatcher {
- public:
-  explicit MipsRelativePatcher(const MipsInstructionSetFeatures* features)
-      : is_r6(features->IsR6()) {}
-
-  uint32_t ReserveSpace(uint32_t offset,
-                        const CompiledMethod* compiled_method,
-                        MethodReference method_ref) override;
-  uint32_t ReserveSpaceEnd(uint32_t offset) override;
-  uint32_t WriteThunks(OutputStream* out, uint32_t offset) override;
-  void PatchCall(std::vector<uint8_t>* code,
-                 uint32_t literal_offset,
-                 uint32_t patch_offset,
-                 uint32_t target_offset) override;
-  void PatchPcRelativeReference(std::vector<uint8_t>* code,
-                                const LinkerPatch& patch,
-                                uint32_t patch_offset,
-                                uint32_t target_offset) override;
-  void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
-                                   const LinkerPatch& patch,
-                                   uint32_t patch_offset) override;
-  std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) override;
-
- private:
-  bool is_r6;
-
-  DISALLOW_COPY_AND_ASSIGN(MipsRelativePatcher);
-};
-
-}  // namespace linker
-}  // namespace art
-
-#endif  // ART_DEX2OAT_LINKER_MIPS_RELATIVE_PATCHER_MIPS_H_
diff --git a/dex2oat/linker/mips/relative_patcher_mips32r6_test.cc b/dex2oat/linker/mips/relative_patcher_mips32r6_test.cc
deleted file mode 100644
index 629fdd5..0000000
--- a/dex2oat/linker/mips/relative_patcher_mips32r6_test.cc
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "linker/mips/relative_patcher_mips.h"
-#include "linker/relative_patcher_test.h"
-
-namespace art {
-namespace linker {
-
-class Mips32r6RelativePatcherTest : public RelativePatcherTest {
- public:
-  Mips32r6RelativePatcherTest() : RelativePatcherTest(InstructionSet::kMips, "mips32r6") {}
-
- protected:
-  static const uint8_t kUnpatchedPcRelativeRawCode[];
-  static const uint32_t kLiteralOffsetHigh;
-  static const uint32_t kLiteralOffsetLow1;
-  static const uint32_t kLiteralOffsetLow2;
-  static const uint32_t kAnchorOffset;
-  static const ArrayRef<const uint8_t> kUnpatchedPcRelativeCode;
-
-  uint32_t GetMethodOffset(uint32_t method_idx) {
-    auto result = method_offset_map_.FindMethodOffset(MethodRef(method_idx));
-    CHECK(result.first);
-    return result.second;
-  }
-
-  void CheckPcRelativePatch(const ArrayRef<const LinkerPatch>& patches, uint32_t target_offset);
-  void TestStringBssEntry(uint32_t bss_begin, uint32_t string_entry_offset);
-  void TestStringReference(uint32_t string_offset);
-};
-
-const uint8_t Mips32r6RelativePatcherTest::kUnpatchedPcRelativeRawCode[] = {
-    0x34, 0x12, 0x5E, 0xEE,  // auipc s2, high(diff); placeholder = 0x1234
-    0x78, 0x56, 0x52, 0x26,  // addiu s2, s2, low(diff); placeholder = 0x5678
-    0x78, 0x56, 0x52, 0x8E,  // lw    s2, (low(diff))(s2) ; placeholder = 0x5678
-};
-const uint32_t Mips32r6RelativePatcherTest::kLiteralOffsetHigh = 0;  // At auipc.
-const uint32_t Mips32r6RelativePatcherTest::kLiteralOffsetLow1 = 4;  // At addiu.
-const uint32_t Mips32r6RelativePatcherTest::kLiteralOffsetLow2 = 8;  // At lw.
-const uint32_t Mips32r6RelativePatcherTest::kAnchorOffset = 0;  // At auipc (where PC+0 points).
-const ArrayRef<const uint8_t> Mips32r6RelativePatcherTest::kUnpatchedPcRelativeCode(
-    kUnpatchedPcRelativeRawCode);
-
-void Mips32r6RelativePatcherTest::CheckPcRelativePatch(const ArrayRef<const LinkerPatch>& patches,
-                                                       uint32_t target_offset) {
-  AddCompiledMethod(MethodRef(1u), kUnpatchedPcRelativeCode, ArrayRef<const LinkerPatch>(patches));
-  Link();
-
-  auto result = method_offset_map_.FindMethodOffset(MethodRef(1u));
-  ASSERT_TRUE(result.first);
-
-  uint32_t diff = target_offset - (result.second + kAnchorOffset);
-  diff += (diff & 0x8000) << 1;  // Account for sign extension in addiu/lw.
-
-  const uint8_t expected_code[] = {
-      static_cast<uint8_t>(diff >> 16), static_cast<uint8_t>(diff >> 24), 0x5E, 0xEE,
-      static_cast<uint8_t>(diff), static_cast<uint8_t>(diff >> 8), 0x52, 0x26,
-      static_cast<uint8_t>(diff), static_cast<uint8_t>(diff >> 8), 0x52, 0x8E,
-  };
-  EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
-}
-
-void Mips32r6RelativePatcherTest::TestStringBssEntry(uint32_t bss_begin,
-                                                     uint32_t string_entry_offset) {
-  constexpr uint32_t kStringIndex = 1u;
-  string_index_to_offset_map_.Put(kStringIndex, string_entry_offset);
-  bss_begin_ = bss_begin;
-  LinkerPatch patches[] = {
-      LinkerPatch::StringBssEntryPatch(kLiteralOffsetHigh, nullptr, kAnchorOffset, kStringIndex),
-      LinkerPatch::StringBssEntryPatch(kLiteralOffsetLow1, nullptr, kAnchorOffset, kStringIndex),
-      LinkerPatch::StringBssEntryPatch(kLiteralOffsetLow2, nullptr, kAnchorOffset, kStringIndex)
-  };
-  CheckPcRelativePatch(ArrayRef<const LinkerPatch>(patches), bss_begin_ + string_entry_offset);
-}
-
-void Mips32r6RelativePatcherTest::TestStringReference(uint32_t string_offset) {
-  constexpr uint32_t kStringIndex = 1u;
-  string_index_to_offset_map_.Put(kStringIndex, string_offset);
-  LinkerPatch patches[] = {
-      LinkerPatch::RelativeStringPatch(kLiteralOffsetHigh, nullptr, kAnchorOffset, kStringIndex),
-      LinkerPatch::RelativeStringPatch(kLiteralOffsetLow1, nullptr, kAnchorOffset, kStringIndex),
-      LinkerPatch::RelativeStringPatch(kLiteralOffsetLow2, nullptr, kAnchorOffset, kStringIndex)
-  };
-  CheckPcRelativePatch(ArrayRef<const LinkerPatch>(patches), string_offset);
-}
-
-TEST_F(Mips32r6RelativePatcherTest, StringBssEntry) {
-  TestStringBssEntry(/* bss_begin */ 0x12345678, /* string_entry_offset */ 0x1234);
-}
-
-TEST_F(Mips32r6RelativePatcherTest, StringReference) {
-  TestStringReference(/* string_offset*/ 0x87651234);
-}
-
-}  // namespace linker
-}  // namespace art
diff --git a/dex2oat/linker/mips/relative_patcher_mips_test.cc b/dex2oat/linker/mips/relative_patcher_mips_test.cc
deleted file mode 100644
index d876c76..0000000
--- a/dex2oat/linker/mips/relative_patcher_mips_test.cc
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "linker/mips/relative_patcher_mips.h"
-
-#include "linker/relative_patcher_test.h"
-
-namespace art {
-namespace linker {
-
-class MipsRelativePatcherTest : public RelativePatcherTest {
- public:
-  MipsRelativePatcherTest() : RelativePatcherTest(InstructionSet::kMips, "mips32r2") {}
-
- protected:
-  static const uint8_t kUnpatchedPcRelativeRawCode[];
-  static const uint32_t kLiteralOffsetHigh;
-  static const uint32_t kLiteralOffsetLow1;
-  static const uint32_t kLiteralOffsetLow2;
-  static const uint32_t kAnchorOffset;
-  static const ArrayRef<const uint8_t> kUnpatchedPcRelativeCode;
-
-  uint32_t GetMethodOffset(uint32_t method_idx) {
-    auto result = method_offset_map_.FindMethodOffset(MethodRef(method_idx));
-    CHECK(result.first);
-    return result.second;
-  }
-
-  void CheckPcRelativePatch(const ArrayRef<const LinkerPatch>& patches, uint32_t target_offset);
-  void TestStringBssEntry(uint32_t bss_begin, uint32_t string_entry_offset);
-  void TestStringReference(uint32_t string_offset);
-};
-
-const uint8_t MipsRelativePatcherTest::kUnpatchedPcRelativeRawCode[] = {
-    0x00, 0x00, 0x10, 0x04,  // nal
-    0x34, 0x12, 0x12, 0x3C,  // lui   s2, high(diff); placeholder = 0x1234
-    0x21, 0x90, 0x5F, 0x02,  // addu  s2, s2, ra
-    0x78, 0x56, 0x52, 0x26,  // addiu s2, s2, low(diff); placeholder = 0x5678
-    0x78, 0x56, 0x52, 0x8E,  // lw    s2, (low(diff))(s2) ; placeholder = 0x5678
-};
-const uint32_t MipsRelativePatcherTest::kLiteralOffsetHigh = 4;  // At lui.
-const uint32_t MipsRelativePatcherTest::kLiteralOffsetLow1 = 12;  // At addiu.
-const uint32_t MipsRelativePatcherTest::kLiteralOffsetLow2 = 16;  // At lw.
-const uint32_t MipsRelativePatcherTest::kAnchorOffset = 8;  // At addu (where PC+0 points).
-const ArrayRef<const uint8_t> MipsRelativePatcherTest::kUnpatchedPcRelativeCode(
-    kUnpatchedPcRelativeRawCode);
-
-void MipsRelativePatcherTest::CheckPcRelativePatch(const ArrayRef<const LinkerPatch>& patches,
-                                                   uint32_t target_offset) {
-  AddCompiledMethod(MethodRef(1u), kUnpatchedPcRelativeCode, ArrayRef<const LinkerPatch>(patches));
-  Link();
-
-  auto result = method_offset_map_.FindMethodOffset(MethodRef(1u));
-  ASSERT_TRUE(result.first);
-
-  uint32_t diff = target_offset - (result.second + kAnchorOffset);
-  diff += (diff & 0x8000) << 1;  // Account for sign extension in addiu/lw.
-
-  const uint8_t expected_code[] = {
-      0x00, 0x00, 0x10, 0x04,
-      static_cast<uint8_t>(diff >> 16), static_cast<uint8_t>(diff >> 24), 0x12, 0x3C,
-      0x21, 0x90, 0x5F, 0x02,
-      static_cast<uint8_t>(diff), static_cast<uint8_t>(diff >> 8), 0x52, 0x26,
-      static_cast<uint8_t>(diff), static_cast<uint8_t>(diff >> 8), 0x52, 0x8E,
-  };
-  EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
-}
-
-void MipsRelativePatcherTest::TestStringBssEntry(uint32_t bss_begin,
-                                                 uint32_t string_entry_offset) {
-  constexpr uint32_t kStringIndex = 1u;
-  string_index_to_offset_map_.Put(kStringIndex, string_entry_offset);
-  bss_begin_ = bss_begin;
-  LinkerPatch patches[] = {
-      LinkerPatch::StringBssEntryPatch(kLiteralOffsetHigh, nullptr, kAnchorOffset, kStringIndex),
-      LinkerPatch::StringBssEntryPatch(kLiteralOffsetLow1, nullptr, kAnchorOffset, kStringIndex),
-      LinkerPatch::StringBssEntryPatch(kLiteralOffsetLow2, nullptr, kAnchorOffset, kStringIndex)
-  };
-  CheckPcRelativePatch(ArrayRef<const LinkerPatch>(patches), bss_begin_ + string_entry_offset);
-}
-
-void MipsRelativePatcherTest::TestStringReference(uint32_t string_offset) {
-  constexpr uint32_t kStringIndex = 1u;
-  string_index_to_offset_map_.Put(kStringIndex, string_offset);
-  LinkerPatch patches[] = {
-      LinkerPatch::RelativeStringPatch(kLiteralOffsetHigh, nullptr, kAnchorOffset, kStringIndex),
-      LinkerPatch::RelativeStringPatch(kLiteralOffsetLow1, nullptr, kAnchorOffset, kStringIndex),
-      LinkerPatch::RelativeStringPatch(kLiteralOffsetLow2, nullptr, kAnchorOffset, kStringIndex)
-  };
-  CheckPcRelativePatch(ArrayRef<const LinkerPatch>(patches), string_offset);
-}
-
-TEST_F(MipsRelativePatcherTest, StringBssEntry) {
-  TestStringBssEntry(/* bss_begin */ 0x12345678, /* string_entry_offset */ 0x1234);
-}
-
-TEST_F(MipsRelativePatcherTest, StringReference) {
-  TestStringReference(/* string_offset*/ 0x87651234);
-}
-
-}  // namespace linker
-}  // namespace art
diff --git a/dex2oat/linker/mips64/relative_patcher_mips64.cc b/dex2oat/linker/mips64/relative_patcher_mips64.cc
deleted file mode 100644
index aae5746..0000000
--- a/dex2oat/linker/mips64/relative_patcher_mips64.cc
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "linker/mips64/relative_patcher_mips64.h"
-
-#include "compiled_method.h"
-#include "debug/method_debug_info.h"
-#include "linker/linker_patch.h"
-
-namespace art {
-namespace linker {
-
-uint32_t Mips64RelativePatcher::ReserveSpace(
-    uint32_t offset,
-    const CompiledMethod* compiled_method ATTRIBUTE_UNUSED,
-    MethodReference method_ref ATTRIBUTE_UNUSED) {
-  return offset;  // No space reserved; no limit on relative call distance.
-}
-
-uint32_t Mips64RelativePatcher::ReserveSpaceEnd(uint32_t offset) {
-  return offset;  // No space reserved; no limit on relative call distance.
-}
-
-uint32_t Mips64RelativePatcher::WriteThunks(OutputStream* out ATTRIBUTE_UNUSED, uint32_t offset) {
-  return offset;  // No thunks added; no limit on relative call distance.
-}
-
-void Mips64RelativePatcher::PatchCall(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
-                                      uint32_t literal_offset ATTRIBUTE_UNUSED,
-                                      uint32_t patch_offset ATTRIBUTE_UNUSED,
-                                      uint32_t target_offset ATTRIBUTE_UNUSED) {
-  UNIMPLEMENTED(FATAL) << "PatchCall unimplemented on MIPS64";
-}
-
-void Mips64RelativePatcher::PatchPcRelativeReference(std::vector<uint8_t>* code,
-                                                     const LinkerPatch& patch,
-                                                     uint32_t patch_offset,
-                                                     uint32_t target_offset) {
-  uint32_t anchor_literal_offset = patch.PcInsnOffset();
-  uint32_t literal_offset = patch.LiteralOffset();
-  bool high_patch = ((*code)[literal_offset + 0] == 0x34) && ((*code)[literal_offset + 1] == 0x12);
-
-  // Perform basic sanity checks.
-  if (high_patch) {
-    // auipc reg, offset_high
-    DCHECK_EQ(((*code)[literal_offset + 2] & 0x1F), 0x1E);
-    DCHECK_EQ(((*code)[literal_offset + 3] & 0xFC), 0xEC);
-  } else {
-    // instr reg(s), offset_low
-    CHECK_EQ((*code)[literal_offset + 0], 0x78);
-    CHECK_EQ((*code)[literal_offset + 1], 0x56);
-  }
-
-  // Apply patch.
-  uint32_t anchor_offset = patch_offset - literal_offset + anchor_literal_offset;
-  uint32_t diff = target_offset - anchor_offset;
-  // Note that a combination of auipc with an instruction that adds a sign-extended
-  // 16-bit immediate operand (e.g. ld) provides a PC-relative range of
-  // PC-0x80000000 to PC+0x7FFF7FFF on MIPS64, that is, short of 2GB on one end
-  // by 32KB.
-  diff += (diff & 0x8000) << 1;  // Account for sign extension in "instr reg(s), offset_low".
-
-  if (high_patch) {
-    // auipc reg, offset_high
-    (*code)[literal_offset + 0] = static_cast<uint8_t>(diff >> 16);
-    (*code)[literal_offset + 1] = static_cast<uint8_t>(diff >> 24);
-  } else {
-    // instr reg(s), offset_low
-    (*code)[literal_offset + 0] = static_cast<uint8_t>(diff >> 0);
-    (*code)[literal_offset + 1] = static_cast<uint8_t>(diff >> 8);
-  }
-}
-
-void Mips64RelativePatcher::PatchBakerReadBarrierBranch(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
-                                                        const LinkerPatch& patch ATTRIBUTE_UNUSED,
-                                                        uint32_t patch_offset ATTRIBUTE_UNUSED) {
-  LOG(FATAL) << "UNIMPLEMENTED";
-}
-
-std::vector<debug::MethodDebugInfo> Mips64RelativePatcher::GenerateThunkDebugInfo(
-    uint32_t executable_offset ATTRIBUTE_UNUSED) {
-  return std::vector<debug::MethodDebugInfo>();  // No thunks added.
-}
-
-}  // namespace linker
-}  // namespace art
diff --git a/dex2oat/linker/mips64/relative_patcher_mips64.h b/dex2oat/linker/mips64/relative_patcher_mips64.h
deleted file mode 100644
index 7b7c2cc..0000000
--- a/dex2oat/linker/mips64/relative_patcher_mips64.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_DEX2OAT_LINKER_MIPS64_RELATIVE_PATCHER_MIPS64_H_
-#define ART_DEX2OAT_LINKER_MIPS64_RELATIVE_PATCHER_MIPS64_H_
-
-#include "linker/relative_patcher.h"
-
-namespace art {
-namespace linker {
-
-class Mips64RelativePatcher final : public RelativePatcher {
- public:
-  Mips64RelativePatcher() {}
-
-  uint32_t ReserveSpace(uint32_t offset,
-                        const CompiledMethod* compiled_method,
-                        MethodReference method_ref) override;
-  uint32_t ReserveSpaceEnd(uint32_t offset) override;
-  uint32_t WriteThunks(OutputStream* out, uint32_t offset) override;
-  void PatchCall(std::vector<uint8_t>* code,
-                 uint32_t literal_offset,
-                 uint32_t patch_offset,
-                 uint32_t target_offset) override;
-  void PatchPcRelativeReference(std::vector<uint8_t>* code,
-                                const LinkerPatch& patch,
-                                uint32_t patch_offset,
-                                uint32_t target_offset) override;
-  void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
-                                   const LinkerPatch& patch,
-                                   uint32_t patch_offset) override;
-  std::vector<debug::MethodDebugInfo> GenerateThunkDebugInfo(uint32_t executable_offset) override;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(Mips64RelativePatcher);
-};
-
-}  // namespace linker
-}  // namespace art
-
-#endif  // ART_DEX2OAT_LINKER_MIPS64_RELATIVE_PATCHER_MIPS64_H_
diff --git a/dex2oat/linker/mips64/relative_patcher_mips64_test.cc b/dex2oat/linker/mips64/relative_patcher_mips64_test.cc
deleted file mode 100644
index a02f500..0000000
--- a/dex2oat/linker/mips64/relative_patcher_mips64_test.cc
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "linker/mips64/relative_patcher_mips64.h"
-
-#include "linker/relative_patcher_test.h"
-
-namespace art {
-namespace linker {
-
-class Mips64RelativePatcherTest : public RelativePatcherTest {
- public:
-  Mips64RelativePatcherTest() : RelativePatcherTest(InstructionSet::kMips64, "default") {}
-
- protected:
-  static const uint8_t kUnpatchedPcRelativeRawCode[];
-  static const uint8_t kUnpatchedPcRelativeCallRawCode[];
-  static const uint32_t kLiteralOffsetHigh;
-  static const uint32_t kLiteralOffsetLow1;
-  static const uint32_t kLiteralOffsetLow2;
-  static const uint32_t kAnchorOffset;
-  static const ArrayRef<const uint8_t> kUnpatchedPcRelativeCode;
-
-  uint32_t GetMethodOffset(uint32_t method_idx) {
-    auto result = method_offset_map_.FindMethodOffset(MethodRef(method_idx));
-    CHECK(result.first);
-    return result.second;
-  }
-
-  void CheckPcRelativePatch(const ArrayRef<const LinkerPatch>& patches, uint32_t target_offset);
-  void TestStringBssEntry(uint32_t bss_begin, uint32_t string_entry_offset);
-  void TestStringReference(uint32_t string_offset);
-};
-
-const uint8_t Mips64RelativePatcherTest::kUnpatchedPcRelativeRawCode[] = {
-    0x34, 0x12, 0x5E, 0xEE,  // auipc  s2, high(diff); placeholder = 0x1234
-    0x78, 0x56, 0x52, 0x66,  // daddiu s2, s2, low(diff); placeholder = 0x5678
-    0x78, 0x56, 0x52, 0x9E,  // lwu    s2, (low(diff))(s2) ; placeholder = 0x5678
-};
-const uint32_t Mips64RelativePatcherTest::kLiteralOffsetHigh = 0;  // At auipc.
-const uint32_t Mips64RelativePatcherTest::kLiteralOffsetLow1 = 4;  // At daddiu.
-const uint32_t Mips64RelativePatcherTest::kLiteralOffsetLow2 = 8;  // At lwu.
-const uint32_t Mips64RelativePatcherTest::kAnchorOffset = 0;  // At auipc (where PC+0 points).
-const ArrayRef<const uint8_t> Mips64RelativePatcherTest::kUnpatchedPcRelativeCode(
-    kUnpatchedPcRelativeRawCode);
-
-void Mips64RelativePatcherTest::CheckPcRelativePatch(const ArrayRef<const LinkerPatch>& patches,
-                                                     uint32_t target_offset) {
-  AddCompiledMethod(MethodRef(1u), kUnpatchedPcRelativeCode, ArrayRef<const LinkerPatch>(patches));
-  Link();
-
-  auto result = method_offset_map_.FindMethodOffset(MethodRef(1u));
-  ASSERT_TRUE(result.first);
-
-  uint32_t diff = target_offset - (result.second + kAnchorOffset);
-  diff += (diff & 0x8000) << 1;  // Account for sign extension in daddiu/lwu.
-
-  const uint8_t expected_code[] = {
-      static_cast<uint8_t>(diff >> 16), static_cast<uint8_t>(diff >> 24), 0x5E, 0xEE,
-      static_cast<uint8_t>(diff), static_cast<uint8_t>(diff >> 8), 0x52, 0x66,
-      static_cast<uint8_t>(diff), static_cast<uint8_t>(diff >> 8), 0x52, 0x9E,
-  };
-  EXPECT_TRUE(CheckLinkedMethod(MethodRef(1u), ArrayRef<const uint8_t>(expected_code)));
-}
-
-void Mips64RelativePatcherTest::TestStringBssEntry(uint32_t bss_begin,
-                                                   uint32_t string_entry_offset) {
-  constexpr uint32_t kStringIndex = 1u;
-  string_index_to_offset_map_.Put(kStringIndex, string_entry_offset);
-  bss_begin_ = bss_begin;
-  LinkerPatch patches[] = {
-      LinkerPatch::StringBssEntryPatch(kLiteralOffsetHigh, nullptr, kAnchorOffset, kStringIndex),
-      LinkerPatch::StringBssEntryPatch(kLiteralOffsetLow1, nullptr, kAnchorOffset, kStringIndex),
-      LinkerPatch::StringBssEntryPatch(kLiteralOffsetLow2, nullptr, kAnchorOffset, kStringIndex)
-  };
-  CheckPcRelativePatch(ArrayRef<const LinkerPatch>(patches), bss_begin_ + string_entry_offset);
-}
-
-TEST_F(Mips64RelativePatcherTest, StringBssEntry) {
-  TestStringBssEntry(/* bss_begin */ 0x12345678, /* string_entry_offset */ 0x1234);
-}
-
-}  // namespace linker
-}  // namespace art
diff --git a/dex2oat/linker/multi_oat_relative_patcher.h b/dex2oat/linker/multi_oat_relative_patcher.h
index 9b47a0d..2daada4 100644
--- a/dex2oat/linker/multi_oat_relative_patcher.h
+++ b/dex2oat/linker/multi_oat_relative_patcher.h
@@ -114,6 +114,13 @@
     relative_patcher_->PatchPcRelativeReference(code, patch, patch_offset, target_offset);
   }
 
+  void PatchEntrypointCall(std::vector<uint8_t>* code,
+                           const LinkerPatch& patch,
+                           uint32_t patch_offset) {
+    patch_offset += adjustment_;
+    relative_patcher_->PatchEntrypointCall(code, patch, patch_offset);
+  }
+
   void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
                                    const LinkerPatch& patch,
                                    uint32_t patch_offset) {
diff --git a/dex2oat/linker/multi_oat_relative_patcher_test.cc b/dex2oat/linker/multi_oat_relative_patcher_test.cc
index 274084f..2a05816 100644
--- a/dex2oat/linker/multi_oat_relative_patcher_test.cc
+++ b/dex2oat/linker/multi_oat_relative_patcher_test.cc
@@ -94,6 +94,12 @@
       last_target_offset_ = target_offset;
     }
 
+    void PatchEntrypointCall(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
+                             const LinkerPatch& patch ATTRIBUTE_UNUSED,
+                             uint32_t patch_offset ATTRIBUTE_UNUSED) override {
+      LOG(FATAL) << "UNIMPLEMENTED";
+    }
+
     void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
                                      const LinkerPatch& patch ATTRIBUTE_UNUSED,
                                      uint32_t patch_offset ATTRIBUTE_UNUSED) override {
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index 6b23883..eed76cc 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -61,6 +61,7 @@
 #include "mirror/class_loader.h"
 #include "mirror/dex_cache-inl.h"
 #include "mirror/object-inl.h"
+#include "oat.h"
 #include "oat_quick_method_header.h"
 #include "profile/profile_compilation_info.h"
 #include "quicken_info.h"
@@ -415,7 +416,8 @@
     size_quickening_info_alignment_(0),
     size_interpreter_to_interpreter_bridge_(0),
     size_interpreter_to_compiled_code_bridge_(0),
-    size_jni_dlsym_lookup_(0),
+    size_jni_dlsym_lookup_trampoline_(0),
+    size_jni_dlsym_lookup_critical_trampoline_(0),
     size_quick_generic_jni_trampoline_(0),
     size_quick_imt_conflict_trampoline_(0),
     size_quick_resolution_trampoline_(0),
@@ -502,21 +504,38 @@
                                  const char* location,
                                  CreateTypeLookupTable create_type_lookup_table) {
   DCHECK(write_state_ == WriteState::kAddingDexFileSources);
-  uint32_t magic;
-  std::string error_msg;
-  File fd = OpenAndReadMagic(filename, &magic, &error_msg);
+  File fd(filename, O_RDONLY, /* check_usage= */ false);
   if (fd.Fd() == -1) {
-    PLOG(ERROR) << "Failed to read magic number from dex file: '" << filename << "'";
+    PLOG(ERROR) << "Failed to open dex file: '" << filename << "'";
     return false;
-  } else if (DexFileLoader::IsMagicValid(magic)) {
+  }
+
+  return AddDexFileSource(std::move(fd), location, create_type_lookup_table);
+}
+
+// Add dex file source(s) from a file specified by a file handle.
+// Note: The `dex_file_fd` specifies a plain dex file or a zip file.
+bool OatWriter::AddDexFileSource(File&& dex_file_fd,
+                                 const char* location,
+                                 CreateTypeLookupTable create_type_lookup_table) {
+  DCHECK(write_state_ == WriteState::kAddingDexFileSources);
+  std::string error_msg;
+  uint32_t magic;
+  if (!ReadMagicAndReset(dex_file_fd.Fd(), &magic, &error_msg)) {
+    LOG(ERROR) << "Failed to read magic number from dex file '" << location << "': " << error_msg;
+    return false;
+  }
+  if (DexFileLoader::IsMagicValid(magic)) {
     uint8_t raw_header[sizeof(DexFile::Header)];
-    const UnalignedDexFileHeader* header = GetDexFileHeader(&fd, raw_header, location);
+    const UnalignedDexFileHeader* header = GetDexFileHeader(&dex_file_fd, raw_header, location);
     if (header == nullptr) {
+      LOG(ERROR) << "Failed to get DexFileHeader from file descriptor for '"
+          << location << "': " << error_msg;
       return false;
     }
     // The file is open for reading, not writing, so it's OK to let the File destructor
     // close it without checking for explicit Close(), so pass checkUsage = false.
-    raw_dex_files_.emplace_back(new File(fd.Release(), location, /* checkUsage */ false));
+    raw_dex_files_.emplace_back(new File(dex_file_fd.Release(), location, /* checkUsage */ false));
     oat_dex_files_.emplace_back(/* OatDexFile */
         location,
         DexFileSource(raw_dex_files_.back().get()),
@@ -524,48 +543,36 @@
         header->checksum_,
         header->file_size_);
   } else if (IsZipMagic(magic)) {
-    if (!AddZippedDexFilesSource(std::move(fd), location, create_type_lookup_table)) {
+    zip_archives_.emplace_back(ZipArchive::OpenFromFd(dex_file_fd.Release(), location, &error_msg));
+    ZipArchive* zip_archive = zip_archives_.back().get();
+    if (zip_archive == nullptr) {
+      LOG(ERROR) << "Failed to open zip from file descriptor for '" << location << "': "
+          << error_msg;
+      return false;
+    }
+    for (size_t i = 0; ; ++i) {
+      std::string entry_name = DexFileLoader::GetMultiDexClassesDexName(i);
+      std::unique_ptr<ZipEntry> entry(zip_archive->Find(entry_name.c_str(), &error_msg));
+      if (entry == nullptr) {
+        break;
+      }
+      zipped_dex_files_.push_back(std::move(entry));
+      zipped_dex_file_locations_.push_back(DexFileLoader::GetMultiDexLocation(i, location));
+      const char* full_location = zipped_dex_file_locations_.back().c_str();
+      // We override the checksum from header with the CRC from ZIP entry.
+      oat_dex_files_.emplace_back(/* OatDexFile */
+          full_location,
+          DexFileSource(zipped_dex_files_.back().get()),
+          create_type_lookup_table,
+          zipped_dex_files_.back()->GetCrc32(),
+          zipped_dex_files_.back()->GetUncompressedLength());
+    }
+    if (zipped_dex_file_locations_.empty()) {
+      LOG(ERROR) << "No dex files in zip file '" << location << "': " << error_msg;
       return false;
     }
   } else {
-    LOG(ERROR) << "Expected valid zip or dex file: '" << filename << "'";
-    return false;
-  }
-  return true;
-}
-
-// Add dex file source(s) from a zip file specified by a file handle.
-bool OatWriter::AddZippedDexFilesSource(File&& zip_fd,
-                                        const char* location,
-                                        CreateTypeLookupTable create_type_lookup_table) {
-  DCHECK(write_state_ == WriteState::kAddingDexFileSources);
-  std::string error_msg;
-  zip_archives_.emplace_back(ZipArchive::OpenFromFd(zip_fd.Release(), location, &error_msg));
-  ZipArchive* zip_archive = zip_archives_.back().get();
-  if (zip_archive == nullptr) {
-    LOG(ERROR) << "Failed to open zip from file descriptor for '" << location << "': "
-        << error_msg;
-    return false;
-  }
-  for (size_t i = 0; ; ++i) {
-    std::string entry_name = DexFileLoader::GetMultiDexClassesDexName(i);
-    std::unique_ptr<ZipEntry> entry(zip_archive->Find(entry_name.c_str(), &error_msg));
-    if (entry == nullptr) {
-      break;
-    }
-    zipped_dex_files_.push_back(std::move(entry));
-    zipped_dex_file_locations_.push_back(DexFileLoader::GetMultiDexLocation(i, location));
-    const char* full_location = zipped_dex_file_locations_.back().c_str();
-    // We override the checksum from header with the CRC from ZIP entry.
-    oat_dex_files_.emplace_back(/* OatDexFile */
-        full_location,
-        DexFileSource(zipped_dex_files_.back().get()),
-        create_type_lookup_table,
-        zipped_dex_files_.back()->GetCrc32(),
-        zipped_dex_files_.back()->GetUncompressedLength());
-  }
-  if (zipped_dex_file_locations_.empty()) {
-    LOG(ERROR) << "No dex files in zip file '" << location << "': " << error_msg;
+    LOG(ERROR) << "Expected valid zip or dex file: '" << location << "'";
     return false;
   }
   return true;
@@ -658,8 +665,6 @@
 
 bool OatWriter::WriteAndOpenDexFiles(
     File* vdex_file,
-    OutputStream* oat_rodata,
-    SafeMap<std::string, std::string>* key_value_store,
     bool verify,
     bool update_input_vdex,
     CopyOption copy_dex_files,
@@ -667,23 +672,9 @@
     /*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files) {
   CHECK(write_state_ == WriteState::kAddingDexFileSources);
 
-  // Record the ELF rodata section offset, i.e. the beginning of the OAT data.
-  if (!RecordOatDataOffset(oat_rodata)) {
-     return false;
-  }
-
-  // Record whether this is the primary oat file.
-  primary_oat_file_ = (key_value_store != nullptr);
-
-  // Initialize VDEX and OAT headers.
-
   // Reserve space for Vdex header and checksums.
   vdex_size_ = sizeof(VdexFile::VerifierDepsHeader) +
       oat_dex_files_.size() * sizeof(VdexFile::VdexChecksum);
-  oat_size_ = InitOatHeader(dchecked_integral_cast<uint32_t>(oat_dex_files_.size()),
-                            key_value_store);
-
-  ChecksumUpdatingOutputStream checksum_updating_rodata(oat_rodata, this);
 
   std::unique_ptr<BufferedOutputStream> vdex_out =
       std::make_unique<BufferedOutputStream>(std::make_unique<FileOutputStream>(vdex_file));
@@ -695,6 +686,31 @@
     return false;
   }
 
+  *opened_dex_files_map = std::move(dex_files_map);
+  *opened_dex_files = std::move(dex_files);
+  write_state_ = WriteState::kStartRoData;
+  return true;
+}
+
+bool OatWriter::StartRoData(const std::vector<const DexFile*>& dex_files,
+                            OutputStream* oat_rodata,
+                            SafeMap<std::string, std::string>* key_value_store) {
+  CHECK(write_state_ == WriteState::kStartRoData);
+
+  // Record the ELF rodata section offset, i.e. the beginning of the OAT data.
+  if (!RecordOatDataOffset(oat_rodata)) {
+     return false;
+  }
+
+  // Record whether this is the primary oat file.
+  primary_oat_file_ = (key_value_store != nullptr);
+
+  // Initialize OAT header.
+  oat_size_ = InitOatHeader(dchecked_integral_cast<uint32_t>(oat_dex_files_.size()),
+                            key_value_store);
+
+  ChecksumUpdatingOutputStream checksum_updating_rodata(oat_rodata, this);
+
   // Write type lookup tables into the oat file.
   if (!WriteTypeLookupTables(&checksum_updating_rodata, dex_files)) {
     return false;
@@ -705,9 +721,7 @@
     return false;
   }
 
-  *opened_dex_files_map = std::move(dex_files_map);
-  *opened_dex_files = std::move(dex_files);
-  write_state_ = WriteState::kPrepareLayout;
+  write_state_ = WriteState::kInitialize;
   return true;
 }
 
@@ -715,9 +729,11 @@
 void OatWriter::Initialize(const CompilerDriver* compiler_driver,
                            ImageWriter* image_writer,
                            const std::vector<const DexFile*>& dex_files) {
+  CHECK(write_state_ == WriteState::kInitialize);
   compiler_driver_ = compiler_driver;
   image_writer_ = image_writer;
   dex_files_ = &dex_files;
+  write_state_ = WriteState::kPrepareLayout;
 }
 
 void OatWriter::PrepareLayout(MultiOatRelativePatcher* relative_patcher) {
@@ -726,7 +742,7 @@
   relative_patcher_ = relative_patcher;
   SetMultiOatRelativePatcherAdjustment();
 
-  if (GetCompilerOptions().IsBootImage()) {
+  if (GetCompilerOptions().IsBootImage() || GetCompilerOptions().IsBootImageExtension()) {
     CHECK(image_writer_ != nullptr);
   }
   InstructionSet instruction_set = compiler_options_.GetInstructionSet();
@@ -981,6 +997,11 @@
         status = ClassStatus::kNotReady;
       }
     }
+    // We never emit kRetryVerificationAtRuntime, instead we mark the class as
+    // resolved and the class will therefore be re-verified at runtime.
+    if (status == ClassStatus::kRetryVerificationAtRuntime) {
+      status = ClassStatus::kResolved;
+    }
 
     writer_->oat_class_headers_.emplace_back(offset_,
                                              compiled_methods_with_code_,
@@ -1560,7 +1581,8 @@
     Thread* self = Thread::Current();
     ObjPtr<mirror::DexCache> dex_cache = class_linker_->FindDexCache(self, *dex_file_);
     ArtMethod* resolved_method;
-    if (writer_->GetCompilerOptions().IsBootImage()) {
+    if (writer_->GetCompilerOptions().IsBootImage() ||
+        writer_->GetCompilerOptions().IsBootImageExtension()) {
       resolved_method = class_linker_->LookupResolvedMethod(
           method.GetIndex(), dex_cache, /*class_loader=*/ nullptr);
       if (resolved_method == nullptr) {
@@ -1601,7 +1623,7 @@
 
   // Assign a pointer to quick code for copied methods
   // not handled in the method StartClass
-  void Postprocess() {
+  void Postprocess() REQUIRES_SHARED(Locks::mutator_lock_) {
     for (std::pair<ArtMethod*, ArtMethod*>& p : methods_to_process_) {
       ArtMethod* method = p.first;
       ArtMethod* origin = p.second;
@@ -1640,7 +1662,8 @@
         dex_cache_(nullptr),
         no_thread_suspension_("OatWriter patching") {
     patched_code_.reserve(16 * KB);
-    if (writer_->GetCompilerOptions().IsBootImage()) {
+    if (writer_->GetCompilerOptions().IsBootImage() ||
+        writer_->GetCompilerOptions().IsBootImageExtension()) {
       // If we're creating the image, the address space must be ready so that we can apply patches.
       CHECK(writer_->image_writer_->IsImageAddressSpaceReady());
     }
@@ -1812,6 +1835,12 @@
                                                                    target_offset);
               break;
             }
+            case LinkerPatch::Type::kCallEntrypoint: {
+              writer_->relative_patcher_->PatchEntrypointCall(&patched_code_,
+                                                              patch,
+                                                              offset_ + literal_offset);
+              break;
+            }
             case LinkerPatch::Type::kBakerReadBarrierBranch: {
               writer_->relative_patcher_->PatchBakerReadBarrierBranch(&patched_code_,
                                                                       patch,
@@ -1883,24 +1912,20 @@
 
   uint32_t GetTargetOffset(const LinkerPatch& patch) REQUIRES_SHARED(Locks::mutator_lock_) {
     uint32_t target_offset = writer_->relative_patcher_->GetOffset(patch.TargetMethod());
-    // If there's no new compiled code, either we're compiling an app and the target method
-    // is in the boot image, or we need to point to the correct trampoline.
+    // If there's no new compiled code, we need to point to the correct trampoline.
     if (UNLIKELY(target_offset == 0)) {
       ArtMethod* target = GetTargetMethod(patch);
       DCHECK(target != nullptr);
-      const void* oat_code_offset =
-          target->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size_);
-      if (oat_code_offset != nullptr) {
-        DCHECK(!writer_->GetCompilerOptions().IsBootImage());
-        DCHECK(!Runtime::Current()->GetClassLinker()->IsQuickResolutionStub(oat_code_offset));
-        DCHECK(!Runtime::Current()->GetClassLinker()->IsQuickToInterpreterBridge(oat_code_offset));
-        DCHECK(!Runtime::Current()->GetClassLinker()->IsQuickGenericJniStub(oat_code_offset));
-        target_offset = PointerToLowMemUInt32(oat_code_offset);
-      } else {
-        target_offset = target->IsNative()
-            ? writer_->oat_header_->GetQuickGenericJniTrampolineOffset()
-            : writer_->oat_header_->GetQuickToInterpreterBridgeOffset();
-      }
+      // TODO: Remove kCallRelative? This patch type is currently not in use.
+      // If we want to use it again, we should make sure that we either use it
+      // only for target methods that were actually compiled, or call the
+      // method dispatch thunk. Currently, ARM/ARM64 patchers would emit the
+      // thunk for far `target_offset` (so we could teach them to use the
+      // thunk for `target_offset == 0`) but x86/x86-64 patchers do not.
+      // (When this was originally implemented, every oat file contained
+      // trampolines, so we could just return their offset here. Now only
+      // the boot image contains them, so this is not always an option.)
+      LOG(FATAL) << "The target method was not compiled.";
     }
     return target_offset;
   }
@@ -1929,7 +1954,7 @@
         linker->LookupString(patch.TargetStringIndex(), GetDexCache(patch.TargetStringDexFile()));
     DCHECK(string != nullptr);
     DCHECK(writer_->GetCompilerOptions().IsBootImage() ||
-           Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(string));
+           writer_->GetCompilerOptions().IsBootImageExtension());
     return string;
   }
 
@@ -1945,7 +1970,8 @@
   }
 
   uint32_t GetTargetMethodOffset(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
-    DCHECK(writer_->GetCompilerOptions().IsBootImage());
+    DCHECK(writer_->GetCompilerOptions().IsBootImage() ||
+           writer_->GetCompilerOptions().IsBootImageExtension());
     method = writer_->image_writer_->GetImageMethodAddress(method);
     size_t oat_index = writer_->image_writer_->GetOatIndexForDexFile(dex_file_);
     uintptr_t oat_data_begin = writer_->image_writer_->GetOatDataBegin(oat_index);
@@ -1955,33 +1981,14 @@
 
   uint32_t GetTargetObjectOffset(ObjPtr<mirror::Object> object)
       REQUIRES_SHARED(Locks::mutator_lock_) {
-    DCHECK(writer_->GetCompilerOptions().IsBootImage());
+    DCHECK(writer_->GetCompilerOptions().IsBootImage() ||
+           writer_->GetCompilerOptions().IsBootImageExtension());
     object = writer_->image_writer_->GetImageAddress(object.Ptr());
     size_t oat_index = writer_->image_writer_->GetOatIndexForDexFile(dex_file_);
     uintptr_t oat_data_begin = writer_->image_writer_->GetOatDataBegin(oat_index);
     // TODO: Clean up offset types. The target offset must be treated as signed.
     return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(object.Ptr()) - oat_data_begin);
   }
-
-  void PatchObjectAddress(std::vector<uint8_t>* code, uint32_t offset, mirror::Object* object)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (writer_->GetCompilerOptions().IsBootImage()) {
-      object = writer_->image_writer_->GetImageAddress(object);
-    } else {
-      // NOTE: We're using linker patches for app->boot references when the image can
-      // be relocated and therefore we need to emit .oat_patches. We're not using this
-      // for app->app references, so check that the object is in the image space.
-      DCHECK(Runtime::Current()->GetHeap()->FindSpaceFromObject(object, false)->IsImageSpace());
-    }
-    // Note: We only patch targeting Objects in image which is in the low 4gb.
-    uint32_t address = PointerToLowMemUInt32(object);
-    DCHECK_LE(offset + 4, code->size());
-    uint8_t* data = &(*code)[offset];
-    data[0] = address & 0xffu;
-    data[1] = (address >> 8) & 0xffu;
-    data[2] = (address >> 16) & 0xffu;
-    data[3] = (address >> 24) & 0xffu;
-  }
 };
 
 // Visit all methods from all classes in all dex files with the specified visitor.
@@ -2194,7 +2201,8 @@
     size_t adjusted_offset = offset;
 
     #define DO_TRAMPOLINE(field, fn_name)                                   \
-      offset = CompiledCode::AlignCode(offset, instruction_set);            \
+      /* Pad with at least four 0xFFs so we can do DCHECKs in OatQuickMethodHeader */ \
+      offset = CompiledCode::AlignCode(offset + 4, instruction_set);        \
       adjusted_offset = offset + CompiledCode::CodeDelta(instruction_set);  \
       oat_header_->Set ## fn_name ## Offset(adjusted_offset);               \
       (field) = compiler_driver_->Create ## fn_name();                      \
@@ -2210,7 +2218,8 @@
       }                                                                     \
       offset += (field)->size();
 
-    DO_TRAMPOLINE(jni_dlsym_lookup_, JniDlsymLookup);
+    DO_TRAMPOLINE(jni_dlsym_lookup_trampoline_, JniDlsymLookupTrampoline);
+    DO_TRAMPOLINE(jni_dlsym_lookup_critical_trampoline_, JniDlsymLookupCriticalTrampoline);
     DO_TRAMPOLINE(quick_generic_jni_trampoline_, QuickGenericJniTrampoline);
     DO_TRAMPOLINE(quick_imt_conflict_trampoline_, QuickImtConflictTrampoline);
     DO_TRAMPOLINE(quick_resolution_trampoline_, QuickResolutionTrampoline);
@@ -2218,7 +2227,8 @@
 
     #undef DO_TRAMPOLINE
   } else {
-    oat_header_->SetJniDlsymLookupOffset(0);
+    oat_header_->SetJniDlsymLookupTrampolineOffset(0);
+    oat_header_->SetJniDlsymLookupCriticalTrampolineOffset(0);
     oat_header_->SetQuickGenericJniTrampolineOffset(0);
     oat_header_->SetQuickImtConflictTrampolineOffset(0);
     oat_header_->SetQuickResolutionTrampolineOffset(0);
@@ -2275,6 +2285,7 @@
   }
 
   if (HasImage()) {
+    ScopedObjectAccess soa(Thread::Current());
     ScopedAssertNoThreadSuspension sants("Init image method visitor", Thread::Current());
     InitImageMethodVisitor image_visitor(this, offset, dex_files_);
     success = VisitDexMethods(&image_visitor);
@@ -2454,7 +2465,7 @@
     return written_bytes_;
   }
 
-  SafeMap<const DexFile*, std::vector<uint32_t>>& GetQuickenInfoOffsetIndicies() {
+  SafeMap<const DexFile*, std::vector<uint32_t>>& GetQuickenInfoOffsetIndices() {
     return quicken_info_offset_indices_;
   }
 
@@ -2565,7 +2576,7 @@
     WriteQuickeningInfoOffsetsMethodVisitor table_visitor(
         vdex_out,
         quicken_info_offset,
-        &write_quicken_info_visitor.GetQuickenInfoOffsetIndicies(),
+        &write_quicken_info_visitor.GetQuickenInfoOffsetIndices(),
         /*out*/ &table_offsets);
     if (!table_visitor.VisitDexMethods(*dex_files_)) {
       PLOG(ERROR) << "Failed to write the vdex quickening info. File: "
@@ -2756,7 +2767,8 @@
     DO_STAT(size_quickening_info_alignment_);
     DO_STAT(size_interpreter_to_interpreter_bridge_);
     DO_STAT(size_interpreter_to_compiled_code_bridge_);
-    DO_STAT(size_jni_dlsym_lookup_);
+    DO_STAT(size_jni_dlsym_lookup_trampoline_);
+    DO_STAT(size_jni_dlsym_lookup_critical_trampoline_);
     DO_STAT(size_quick_generic_jni_trampoline_);
     DO_STAT(size_quick_imt_conflict_trampoline_);
     DO_STAT(size_quick_resolution_trampoline_);
@@ -3070,9 +3082,13 @@
 
     #define DO_TRAMPOLINE(field) \
       do { \
-        uint32_t aligned_offset = CompiledCode::AlignCode(relative_offset, instruction_set); \
+        /* Pad with at least four 0xFFs so we can do DCHECKs in OatQuickMethodHeader */ \
+        uint32_t aligned_offset = CompiledCode::AlignCode(relative_offset + 4, instruction_set); \
         uint32_t alignment_padding = aligned_offset - relative_offset; \
-        out->Seek(alignment_padding, kSeekCurrent); \
+        for (size_t i = 0; i < alignment_padding; i++) { \
+          uint8_t padding = 0xFF; \
+          out->WriteFully(&padding, 1); \
+        } \
         size_trampoline_alignment_ += alignment_padding; \
         if (!out->WriteFully((field)->data(), (field)->size())) { \
           PLOG(ERROR) << "Failed to write " # field " to " << out->GetLocation(); \
@@ -3083,7 +3099,8 @@
         DCHECK_OFFSET(); \
       } while (false)
 
-    DO_TRAMPOLINE(jni_dlsym_lookup_);
+    DO_TRAMPOLINE(jni_dlsym_lookup_trampoline_);
+    DO_TRAMPOLINE(jni_dlsym_lookup_critical_trampoline_);
     DO_TRAMPOLINE(quick_generic_jni_trampoline_);
     DO_TRAMPOLINE(quick_imt_conflict_trampoline_);
     DO_TRAMPOLINE(quick_resolution_trampoline_);
@@ -3777,9 +3794,8 @@
   return true;
 }
 
-bool OatWriter::WriteTypeLookupTables(
-    OutputStream* oat_rodata,
-    const std::vector<std::unique_ptr<const DexFile>>& opened_dex_files) {
+bool OatWriter::WriteTypeLookupTables(OutputStream* oat_rodata,
+                                      const std::vector<const DexFile*>& opened_dex_files) {
   TimingLogger::ScopedTiming split("WriteTypeLookupTables", timings_);
 
   uint32_t expected_offset = oat_data_offset_ + oat_size_;
@@ -3807,6 +3823,8 @@
 
     // Create the lookup table. When `nullptr` is given as the storage buffer,
     // TypeLookupTable allocates its own and OatDexFile takes ownership.
+    // TODO: Create the table in an mmap()ed region of the output file to reduce dirty memory.
+    // (We used to do that when dex files were still copied into the oat file.)
     const DexFile& dex_file = *opened_dex_files[i];
     {
       TypeLookupTable type_lookup_table = TypeLookupTable::Create(dex_file);
@@ -3859,13 +3877,12 @@
   return true;
 }
 
-bool OatWriter::WriteDexLayoutSections(
-    OutputStream* oat_rodata,
-    const std::vector<std::unique_ptr<const DexFile>>& opened_dex_files) {
+bool OatWriter::WriteDexLayoutSections(OutputStream* oat_rodata,
+                                       const std::vector<const DexFile*>& opened_dex_files) {
   TimingLogger::ScopedTiming split(__FUNCTION__, timings_);
 
   if (!kWriteDexLayoutInfo) {
-    return true;;
+    return true;
   }
 
   uint32_t expected_offset = oat_data_offset_ + oat_size_;
diff --git a/dex2oat/linker/oat_writer.h b/dex2oat/linker/oat_writer.h
index 84d13a8..f83f077 100644
--- a/dex2oat/linker/oat_writer.h
+++ b/dex2oat/linker/oat_writer.h
@@ -35,7 +35,6 @@
 #include "dex/type_reference.h"
 #include "linker/relative_patcher.h"  // For RelativePatcherTargetProvider.
 #include "mirror/class.h"
-#include "oat.h"
 
 namespace art {
 
@@ -44,6 +43,7 @@
 class CompilerDriver;
 class CompilerOptions;
 class DexContainer;
+class OatHeader;
 class OutputStream;
 class ProfileCompilationInfo;
 class TimingLogger;
@@ -127,12 +127,11 @@
 
   // To produce a valid oat file, the user must first add sources with any combination of
   //   - AddDexFileSource(),
-  //   - AddZippedDexFilesSource(),
   //   - AddRawDexFileSource(),
   //   - AddVdexDexFilesSource().
   // Then the user must call in order
   //   - WriteAndOpenDexFiles()
-  //   - Initialize()
+  //   - StartRoData()
   //   - WriteVerifierDeps()
   //   - WriteQuickeningInfo()
   //   - WriteChecksumsAndVdexHeader()
@@ -148,9 +147,10 @@
       const char* filename,
       const char* location,
       CreateTypeLookupTable create_type_lookup_table = CreateTypeLookupTable::kDefault);
-  // Add dex file source(s) from a zip file specified by a file handle.
-  bool AddZippedDexFilesSource(
-      File&& zip_fd,
+  // Add dex file source(s) from a file specified by a file handle.
+  // Note: The `dex_file_fd` specifies a plain dex file or a zip file.
+  bool AddDexFileSource(
+      File&& dex_file_fd,
       const char* location,
       CreateTypeLookupTable create_type_lookup_table = CreateTypeLookupTable::kDefault);
   // Add dex file source from raw memory.
@@ -167,19 +167,20 @@
   dchecked_vector<std::string> GetSourceLocations() const;
 
   // Write raw dex files to the vdex file, mmap the file and open the dex files from it.
-  // Supporting data structures are written into the .rodata section of the oat file.
   // The `verify` setting dictates whether the dex file verifier should check the dex files.
   // This is generally the case, and should only be false for tests.
   // If `update_input_vdex` is true, then this method won't actually write the dex files,
   // and the compiler will just re-use the existing vdex file.
   bool WriteAndOpenDexFiles(File* vdex_file,
-                            OutputStream* oat_rodata,
-                            SafeMap<std::string, std::string>* key_value_store,
                             bool verify,
                             bool update_input_vdex,
                             CopyOption copy_dex_files,
                             /*out*/ std::vector<MemMap>* opened_dex_files_map,
                             /*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files);
+  // Start writing .rodata, including supporting data structures for dex files.
+  bool StartRoData(const std::vector<const DexFile*>& dex_files,
+                   OutputStream* oat_rodata,
+                   SafeMap<std::string, std::string>* key_value_store);
   // Initialize the writer with the given parameters.
   void Initialize(const CompilerDriver* compiler_driver,
                   ImageWriter* image_writer,
@@ -339,9 +340,9 @@
 
   bool RecordOatDataOffset(OutputStream* out);
   bool WriteTypeLookupTables(OutputStream* oat_rodata,
-                             const std::vector<std::unique_ptr<const DexFile>>& opened_dex_files);
+                             const std::vector<const DexFile*>& opened_dex_files);
   bool WriteDexLayoutSections(OutputStream* oat_rodata,
-                              const std::vector<std::unique_ptr<const DexFile>>& opened_dex_files);
+                              const std::vector<const DexFile*>& opened_dex_files);
   bool WriteCodeAlignment(OutputStream* out, uint32_t aligned_code_delta);
   bool WriteUpTo16BytesAlignment(OutputStream* out, uint32_t size, uint32_t* stat);
   void SetMultiOatRelativePatcherAdjustment();
@@ -355,6 +356,8 @@
 
   enum class WriteState {
     kAddingDexFileSources,
+    kStartRoData,
+    kInitialize,
     kPrepareLayout,
     kWriteRoData,
     kWriteText,
@@ -471,7 +474,8 @@
   dchecked_vector<OatDexFile> oat_dex_files_;
   dchecked_vector<OatClassHeader> oat_class_headers_;
   dchecked_vector<OatClass> oat_classes_;
-  std::unique_ptr<const std::vector<uint8_t>> jni_dlsym_lookup_;
+  std::unique_ptr<const std::vector<uint8_t>> jni_dlsym_lookup_trampoline_;
+  std::unique_ptr<const std::vector<uint8_t>> jni_dlsym_lookup_critical_trampoline_;
   std::unique_ptr<const std::vector<uint8_t>> quick_generic_jni_trampoline_;
   std::unique_ptr<const std::vector<uint8_t>> quick_imt_conflict_trampoline_;
   std::unique_ptr<const std::vector<uint8_t>> quick_resolution_trampoline_;
@@ -491,7 +495,8 @@
   uint32_t size_quickening_info_alignment_;
   uint32_t size_interpreter_to_interpreter_bridge_;
   uint32_t size_interpreter_to_compiled_code_bridge_;
-  uint32_t size_jni_dlsym_lookup_;
+  uint32_t size_jni_dlsym_lookup_trampoline_;
+  uint32_t size_jni_dlsym_lookup_critical_trampoline_;
   uint32_t size_quick_generic_jni_trampoline_;
   uint32_t size_quick_imt_conflict_trampoline_;
   uint32_t size_quick_resolution_trampoline_;
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index c46aa18..1b27bce 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -41,6 +41,7 @@
 #include "mirror/class-inl.h"
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
+#include "oat.h"
 #include "oat_file-inl.h"
 #include "oat_writer.h"
 #include "profile/profile_compilation_info.h"
@@ -146,18 +147,19 @@
 
   bool WriteElf(File* vdex_file,
                 File* oat_file,
-                File&& zip_fd,
+                File&& dex_file_fd,
                 const char* location,
                 SafeMap<std::string, std::string>& key_value_store,
                 bool verify,
-                CopyOption copy) {
+                CopyOption copy,
+                ProfileCompilationInfo* profile_compilation_info = nullptr) {
     TimingLogger timings("WriteElf", false, false);
     ClearBootImageOption();
     OatWriter oat_writer(*compiler_options_,
                          &timings,
-                         /*profile_compilation_info*/nullptr,
+                         profile_compilation_info,
                          CompactDexLevel::kCompactDexLevelNone);
-    if (!oat_writer.AddZippedDexFilesSource(std::move(zip_fd), location)) {
+    if (!oat_writer.AddDexFileSource(std::move(dex_file_fd), location)) {
       return false;
     }
     return DoWriteElf(vdex_file, oat_file, oat_writer, key_value_store, verify, copy);
@@ -178,8 +180,6 @@
     std::vector<std::unique_ptr<const DexFile>> opened_dex_files;
     if (!oat_writer.WriteAndOpenDexFiles(
         vdex_file,
-        oat_rodata,
-        &key_value_store,
         verify,
         /*update_input_vdex=*/ false,
         copy,
@@ -199,7 +199,10 @@
     MultiOatRelativePatcher patcher(compiler_options_->GetInstructionSet(),
                                     compiler_options_->GetInstructionSetFeatures(),
                                     compiler_driver_->GetCompiledMethodStorage());
-    oat_writer.Initialize(compiler_driver_.get(), nullptr, dex_files);
+    if (!oat_writer.StartRoData(dex_files, oat_rodata, &key_value_store)) {
+      return false;
+    }
+    oat_writer.Initialize(compiler_driver_.get(), /*image_writer=*/ nullptr, dex_files);
     oat_writer.PrepareLayout(&patcher);
     elf_writer->PrepareDynamicSection(oat_writer.GetOatHeader().GetExecutableOffset(),
                                       oat_writer.GetCodeSize(),
@@ -260,6 +263,56 @@
     return true;
   }
 
+  void CheckOatWriteResult(ScratchFile& oat_file,
+                           ScratchFile& vdex_file,
+                           std::vector<std::unique_ptr<const DexFile>>& input_dexfiles,
+                           const unsigned int expected_oat_dexfile_count,
+                           bool low_4gb) {
+    ASSERT_EQ(expected_oat_dexfile_count, input_dexfiles.size());
+
+    std::string error_msg;
+    std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(/*zip_fd=*/ -1,
+                                                           oat_file.GetFilename(),
+                                                           oat_file.GetFilename(),
+                                                           /*executable=*/ false,
+                                                           low_4gb,
+                                                           &error_msg));
+    ASSERT_TRUE(opened_oat_file != nullptr) << error_msg;
+    ASSERT_EQ(expected_oat_dexfile_count, opened_oat_file->GetOatDexFiles().size());
+
+    if (low_4gb) {
+      uintptr_t begin = reinterpret_cast<uintptr_t>(opened_oat_file->Begin());
+      EXPECT_EQ(begin, static_cast<uint32_t>(begin));
+    }
+
+    for (uint32_t i = 0; i <  input_dexfiles.size(); i++) {
+      const std::unique_ptr<const DexFile>& dex_file_data = input_dexfiles[i];
+      std::unique_ptr<const DexFile> opened_dex_file =
+          opened_oat_file->GetOatDexFiles()[i]->OpenDexFile(&error_msg);
+
+      ASSERT_EQ(opened_oat_file->GetOatDexFiles()[i]->GetDexFileLocationChecksum(),
+                dex_file_data->GetHeader().checksum_);
+
+      ASSERT_EQ(dex_file_data->GetHeader().file_size_, opened_dex_file->GetHeader().file_size_);
+      ASSERT_EQ(0, memcmp(&dex_file_data->GetHeader(),
+                          &opened_dex_file->GetHeader(),
+                          dex_file_data->GetHeader().file_size_));
+      ASSERT_EQ(dex_file_data->GetLocation(), opened_dex_file->GetLocation());
+    }
+    const VdexFile::DexSectionHeader &vdex_header =
+        opened_oat_file->GetVdexFile()->GetDexSectionHeader();
+    if (!compiler_driver_->GetCompilerOptions().IsQuickeningCompilationEnabled()) {
+      // If quickening is enabled we will always write the table since there is no special logic
+      // that checks for all methods not being quickened (not worth the complexity).
+      ASSERT_EQ(vdex_header.GetQuickeningInfoSize(), 0u);
+    }
+
+    int64_t actual_vdex_size = vdex_file.GetFile()->GetLength();
+    ASSERT_GE(actual_vdex_size, 0);
+    ASSERT_EQ(dchecked_integral_cast<uint64_t>(actual_vdex_size),
+              opened_oat_file->GetVdexFile()->GetComputedFileSize());
+  }
+
   void TestDexFileInput(bool verify, bool low_4gb, bool use_profile);
   void TestZipFileInput(bool verify, CopyOption copy);
   void TestZipFileInputWithEmptyDex();
@@ -412,8 +465,6 @@
                                                   tmp_oat.GetFilename(),
                                                   /*executable=*/ false,
                                                   /*low_4gb=*/ true,
-                                                  /*abs_dex_location=*/ nullptr,
-                                                  /*reservation=*/ nullptr,
                                                   &error_msg));
   ASSERT_TRUE(oat_file.get() != nullptr) << error_msg;
   const OatHeader& oat_header = oat_file->GetOatHeader();
@@ -466,10 +517,10 @@
 TEST_F(OatTest, OatHeaderSizeCheck) {
   // If this test is failing and you have to update these constants,
   // it is time to update OatHeader::kOatVersion
-  EXPECT_EQ(56U, sizeof(OatHeader));
+  EXPECT_EQ(60U, sizeof(OatHeader));
   EXPECT_EQ(4U, sizeof(OatMethodOffsets));
   EXPECT_EQ(8U, sizeof(OatQuickMethodHeader));
-  EXPECT_EQ(166 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
+  EXPECT_EQ(169 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
             sizeof(QuickEntryPoints));
 }
 
@@ -531,8 +582,6 @@
                                                   tmp_oat.GetFilename(),
                                                   /*executable=*/ false,
                                                   /*low_4gb=*/ false,
-                                                  /*abs_dex_location=*/ nullptr,
-                                                  /*reservation=*/ nullptr,
                                                   &error_msg));
   ASSERT_TRUE(oat_file != nullptr);
   EXPECT_LT(static_cast<size_t>(oat_file->Size()),
@@ -551,6 +600,8 @@
   TimingLogger timings("OatTest::DexFileInput", false, false);
 
   std::vector<const char*> input_filenames;
+  std::vector<std::unique_ptr<const DexFile>> input_dexfiles;
+  std::vector<const ScratchFile*> scratch_files;
 
   ScratchFile dex_file1;
   TestDexFileBuilder builder1;
@@ -566,6 +617,8 @@
   success = dex_file1.GetFile()->Flush() == 0;
   ASSERT_TRUE(success);
   input_filenames.push_back(dex_file1.GetFilename().c_str());
+  input_dexfiles.push_back(std::move(dex_file1_data));
+  scratch_files.push_back(&dex_file1);
 
   ScratchFile dex_file2;
   TestDexFileBuilder builder2;
@@ -581,75 +634,75 @@
   success = dex_file2.GetFile()->Flush() == 0;
   ASSERT_TRUE(success);
   input_filenames.push_back(dex_file2.GetFilename().c_str());
+  input_dexfiles.push_back(std::move(dex_file2_data));
+  scratch_files.push_back(&dex_file2);
 
-  ScratchFile tmp_base, tmp_oat(tmp_base, ".oat"), tmp_vdex(tmp_base, ".vdex");
   SafeMap<std::string, std::string> key_value_store;
-  std::unique_ptr<ProfileCompilationInfo>
-      profile_compilation_info(use_profile ? new ProfileCompilationInfo() : nullptr);
-  success = WriteElf(tmp_vdex.GetFile(),
-                     tmp_oat.GetFile(),
-                     input_filenames,
-                     key_value_store,
-                     verify,
-                     CopyOption::kOnlyIfCompressed,
-                     profile_compilation_info.get());
+  {
+    // Test using the AddDexFileSource() interface with the dex files.
+    ScratchFile tmp_base, tmp_oat(tmp_base, ".oat"), tmp_vdex(tmp_base, ".vdex");
+    std::unique_ptr<ProfileCompilationInfo>
+        profile_compilation_info(use_profile ? new ProfileCompilationInfo() : nullptr);
+    success = WriteElf(tmp_vdex.GetFile(),
+                       tmp_oat.GetFile(),
+                       input_filenames,
+                       key_value_store,
+                       verify,
+                       CopyOption::kOnlyIfCompressed,
+                       profile_compilation_info.get());
 
-  // In verify mode, we expect failure.
-  if (verify) {
-    ASSERT_FALSE(success);
-    return;
+    // In verify mode, we expect failure.
+    if (verify) {
+      ASSERT_FALSE(success);
+      return;
+    }
+
+    ASSERT_TRUE(success);
+
+    CheckOatWriteResult(tmp_oat,
+                        tmp_vdex,
+                        input_dexfiles,
+                        /* oat_dexfile_count */ 2,
+                        low_4gb);
   }
 
-  ASSERT_TRUE(success);
+  {
+    // Test using the AddDexFileSource() interface with the dexfile1's fd.
+    // Only need one input dexfile.
+    std::vector<std::unique_ptr<const DexFile>> input_dexfiles2;
+    input_dexfiles2.push_back(std::move(input_dexfiles[0]));
+    const ScratchFile* dex_file = scratch_files[0];
+    File dex_file_fd(DupCloexec(dex_file->GetFd()), /*check_usage=*/ false);
 
-  std::string error_msg;
-  std::unique_ptr<OatFile> opened_oat_file(OatFile::Open(/*zip_fd=*/ -1,
-                                                         tmp_oat.GetFilename(),
-                                                         tmp_oat.GetFilename(),
-                                                         /*executable=*/ false,
-                                                         low_4gb,
-                                                         /*abs_dex_location=*/ nullptr,
-                                                         /*reservation=*/ nullptr,
-                                                         &error_msg));
-  ASSERT_TRUE(opened_oat_file != nullptr) << error_msg;
-  if (low_4gb) {
-    uintptr_t begin = reinterpret_cast<uintptr_t>(opened_oat_file->Begin());
-    EXPECT_EQ(begin, static_cast<uint32_t>(begin));
+    ASSERT_NE(-1, dex_file_fd.Fd());
+    ASSERT_EQ(0, lseek(dex_file_fd.Fd(), 0, SEEK_SET));
+
+    ScratchFile tmp_base, tmp_oat(tmp_base, ".oat"), tmp_vdex(tmp_base, ".vdex");
+    std::unique_ptr<ProfileCompilationInfo>
+        profile_compilation_info(use_profile ? new ProfileCompilationInfo() : nullptr);
+    success = WriteElf(tmp_vdex.GetFile(),
+                       tmp_oat.GetFile(),
+                       std::move(dex_file_fd),
+                       dex_file->GetFilename().c_str(),
+                       key_value_store,
+                       verify,
+                       CopyOption::kOnlyIfCompressed,
+                       profile_compilation_info.get());
+
+    // In verify mode, we expect failure.
+    if (verify) {
+      ASSERT_FALSE(success);
+      return;
+    }
+
+    ASSERT_TRUE(success);
+
+    CheckOatWriteResult(tmp_oat,
+                        tmp_vdex,
+                        input_dexfiles2,
+                        /* oat_dexfile_count */ 1,
+                        low_4gb);
   }
-  ASSERT_EQ(2u, opened_oat_file->GetOatDexFiles().size());
-  std::unique_ptr<const DexFile> opened_dex_file1 =
-      opened_oat_file->GetOatDexFiles()[0]->OpenDexFile(&error_msg);
-  std::unique_ptr<const DexFile> opened_dex_file2 =
-      opened_oat_file->GetOatDexFiles()[1]->OpenDexFile(&error_msg);
-
-  ASSERT_EQ(opened_oat_file->GetOatDexFiles()[0]->GetDexFileLocationChecksum(),
-            dex_file1_data->GetHeader().checksum_);
-  ASSERT_EQ(opened_oat_file->GetOatDexFiles()[1]->GetDexFileLocationChecksum(),
-            dex_file2_data->GetHeader().checksum_);
-
-  ASSERT_EQ(dex_file1_data->GetHeader().file_size_, opened_dex_file1->GetHeader().file_size_);
-  ASSERT_EQ(0, memcmp(&dex_file1_data->GetHeader(),
-                      &opened_dex_file1->GetHeader(),
-                      dex_file1_data->GetHeader().file_size_));
-  ASSERT_EQ(dex_file1_data->GetLocation(), opened_dex_file1->GetLocation());
-
-  ASSERT_EQ(dex_file2_data->GetHeader().file_size_, opened_dex_file2->GetHeader().file_size_);
-  ASSERT_EQ(0, memcmp(&dex_file2_data->GetHeader(),
-                      &opened_dex_file2->GetHeader(),
-                      dex_file2_data->GetHeader().file_size_));
-  ASSERT_EQ(dex_file2_data->GetLocation(), opened_dex_file2->GetLocation());
-
-  const VdexFile::DexSectionHeader &vdex_header =
-      opened_oat_file->GetVdexFile()->GetDexSectionHeader();
-  if (!compiler_driver_->GetCompilerOptions().IsQuickeningCompilationEnabled()) {
-    // If quickening is enabled we will always write the table since there is no special logic that
-    // checks for all methods not being quickened (not worth the complexity).
-    ASSERT_EQ(vdex_header.GetQuickeningInfoSize(), 0u);
-  }
-
-  int64_t actual_vdex_size = tmp_vdex.GetFile()->GetLength();
-  ASSERT_GE(actual_vdex_size, 0);
-  ASSERT_EQ((uint64_t) actual_vdex_size, opened_oat_file->GetVdexFile()->GetComputedFileSize());
 }
 
 TEST_F(OatTest, DexFileInputCheckOutput) {
@@ -738,8 +791,6 @@
                                                              tmp_oat.GetFilename(),
                                                              /*executable=*/ false,
                                                              /*low_4gb=*/ false,
-                                                             /*abs_dex_location=*/ nullptr,
-                                                             /*reservation=*/ nullptr,
                                                              &error_msg));
       ASSERT_TRUE(opened_oat_file != nullptr) << error_msg;
       ASSERT_EQ(2u, opened_oat_file->GetOatDexFiles().size());
@@ -765,9 +816,10 @@
   }
 
   {
-    // Test using the AddZipDexFileSource() interface with the zip file handle.
+    // Test using the AddDexFileSource() interface with the zip file handle.
     File zip_fd(DupCloexec(zip_file.GetFd()), /*check_usage=*/ false);
     ASSERT_NE(-1, zip_fd.Fd());
+    ASSERT_EQ(0, lseek(zip_fd.Fd(), 0, SEEK_SET));
 
     ScratchFile tmp_base, tmp_oat(tmp_base, ".oat"), tmp_vdex(tmp_base, ".vdex");
     success = WriteElf(tmp_vdex.GetFile(),
@@ -788,8 +840,6 @@
                                                              tmp_oat.GetFilename(),
                                                              /*executable=*/ false,
                                                              /*low_4gb=*/ false,
-                                                             /*abs_dex_location=*/ nullptr,
-                                                             /*reservation=*/ nullptr,
                                                              &error_msg));
       ASSERT_TRUE(opened_oat_file != nullptr) << error_msg;
       ASSERT_EQ(2u, opened_oat_file->GetOatDexFiles().size());
diff --git a/dex2oat/linker/relative_patcher.cc b/dex2oat/linker/relative_patcher.cc
index 4db0e8a..40acb0b 100644
--- a/dex2oat/linker/relative_patcher.cc
+++ b/dex2oat/linker/relative_patcher.cc
@@ -23,12 +23,6 @@
 #ifdef ART_ENABLE_CODEGEN_arm64
 #include "linker/arm64/relative_patcher_arm64.h"
 #endif
-#ifdef ART_ENABLE_CODEGEN_mips
-#include "linker/mips/relative_patcher_mips.h"
-#endif
-#ifdef ART_ENABLE_CODEGEN_mips64
-#include "linker/mips64/relative_patcher_mips64.h"
-#endif
 #ifdef ART_ENABLE_CODEGEN_x86
 #include "linker/x86/relative_patcher_x86.h"
 #endif
@@ -77,6 +71,12 @@
       LOG(FATAL) << "Unexpected relative dex cache array patch.";
     }
 
+    void PatchEntrypointCall(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
+                             const LinkerPatch& patch ATTRIBUTE_UNUSED,
+                             uint32_t patch_offset ATTRIBUTE_UNUSED) override {
+      LOG(FATAL) << "Unexpected entrypoint call patch.";
+    }
+
     void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
                                      const LinkerPatch& patch ATTRIBUTE_UNUSED,
                                      uint32_t patch_offset ATTRIBUTE_UNUSED) override {
@@ -118,15 +118,6 @@
                                    target_provider,
                                    features->AsArm64InstructionSetFeatures()));
 #endif
-#ifdef ART_ENABLE_CODEGEN_mips
-    case InstructionSet::kMips:
-      return std::unique_ptr<RelativePatcher>(
-          new MipsRelativePatcher(features->AsMipsInstructionSetFeatures()));
-#endif
-#ifdef ART_ENABLE_CODEGEN_mips64
-    case InstructionSet::kMips64:
-      return std::unique_ptr<RelativePatcher>(new Mips64RelativePatcher());
-#endif
     default:
       return std::unique_ptr<RelativePatcher>(new RelativePatcherNone);
   }
diff --git a/dex2oat/linker/relative_patcher.h b/dex2oat/linker/relative_patcher.h
index e8e15c9..c05445c 100644
--- a/dex2oat/linker/relative_patcher.h
+++ b/dex2oat/linker/relative_patcher.h
@@ -137,6 +137,11 @@
                                         uint32_t patch_offset,
                                         uint32_t target_offset) = 0;
 
+  // Patch a call to an entrypoint trampoline.
+  virtual void PatchEntrypointCall(std::vector<uint8_t>* code,
+                                   const LinkerPatch& patch,
+                                   uint32_t patch_offset) = 0;
+
   // Patch a branch to a Baker read barrier thunk.
   virtual void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
                                            const LinkerPatch& patch,
diff --git a/dex2oat/linker/relative_patcher_test.h b/dex2oat/linker/relative_patcher_test.h
index dead38d..f34e6eb 100644
--- a/dex2oat/linker/relative_patcher_test.h
+++ b/dex2oat/linker/relative_patcher_test.h
@@ -29,7 +29,6 @@
 #include "dex/string_reference.h"
 #include "driver/compiled_method_storage.h"
 #include "linker/relative_patcher.h"
-#include "oat.h"
 #include "oat_quick_method_header.h"
 #include "stream/vector_output_stream.h"
 
@@ -174,8 +173,10 @@
             auto result = method_offset_map_.FindMethodOffset(patch.TargetMethod());
             uint32_t target_offset =
                 result.first ? result.second : kTrampolineOffset + compiled_method->CodeDelta();
-            patcher_->PatchCall(&patched_code_, patch.LiteralOffset(),
-                                offset + patch.LiteralOffset(), target_offset);
+            patcher_->PatchCall(&patched_code_,
+                                patch.LiteralOffset(),
+                                offset + patch.LiteralOffset(),
+                                target_offset);
           } else if (patch.GetType() == LinkerPatch::Type::kStringBssEntry) {
             uint32_t target_offset =
                 bss_begin_ + string_index_to_offset_map_.Get(patch.TargetStringIndex().index_);
@@ -190,6 +191,10 @@
                                                patch,
                                                offset + patch.LiteralOffset(),
                                                target_offset);
+          } else if (patch.GetType() == LinkerPatch::Type::kCallEntrypoint) {
+            patcher_->PatchEntrypointCall(&patched_code_,
+                                          patch,
+                                          offset + patch.LiteralOffset());
           } else if (patch.GetType() == LinkerPatch::Type::kBakerReadBarrierBranch) {
             patcher_->PatchBakerReadBarrierBranch(&patched_code_,
                                                   patch,
@@ -300,11 +305,10 @@
      public:
       explicit ThunkKey(const LinkerPatch& patch)
           : type_(patch.GetType()),
-            custom_value1_(patch.GetType() == LinkerPatch::Type::kBakerReadBarrierBranch
-                               ? patch.GetBakerCustomValue1() : 0u),
-            custom_value2_(patch.GetType() == LinkerPatch::Type::kBakerReadBarrierBranch
-                               ? patch.GetBakerCustomValue2() : 0u) {
-        CHECK(patch.GetType() == LinkerPatch::Type::kBakerReadBarrierBranch ||
+            custom_value1_(CustomValue1(patch)),
+            custom_value2_(CustomValue2(patch)) {
+        CHECK(patch.GetType() == LinkerPatch::Type::kCallEntrypoint ||
+              patch.GetType() == LinkerPatch::Type::kBakerReadBarrierBranch ||
               patch.GetType() == LinkerPatch::Type::kCallRelative);
       }
 
@@ -319,6 +323,26 @@
       }
 
      private:
+      static uint32_t CustomValue1(const LinkerPatch& patch) {
+        switch (patch.GetType()) {
+          case LinkerPatch::Type::kCallEntrypoint:
+            return patch.EntrypointOffset();
+          case LinkerPatch::Type::kBakerReadBarrierBranch:
+            return patch.GetBakerCustomValue1();
+          default:
+            return 0;
+        }
+      }
+
+      static uint32_t CustomValue2(const LinkerPatch& patch) {
+        switch (patch.GetType()) {
+          case LinkerPatch::Type::kBakerReadBarrierBranch:
+            return patch.GetBakerCustomValue2();
+          default:
+            return 0;
+        }
+      }
+
       const LinkerPatch::Type type_;
       const uint32_t custom_value1_;
       const uint32_t custom_value2_;
diff --git a/dex2oat/linker/x86/relative_patcher_x86.cc b/dex2oat/linker/x86/relative_patcher_x86.cc
index cdd2cef..3323506 100644
--- a/dex2oat/linker/x86/relative_patcher_x86.cc
+++ b/dex2oat/linker/x86/relative_patcher_x86.cc
@@ -57,6 +57,12 @@
   (*code)[literal_offset + 3u] = static_cast<uint8_t>(diff >> 24);
 }
 
+void X86RelativePatcher::PatchEntrypointCall(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
+                                             const LinkerPatch& patch ATTRIBUTE_UNUSED,
+                                             uint32_t patch_offset ATTRIBUTE_UNUSED) {
+  LOG(FATAL) << "UNIMPLEMENTED";
+}
+
 void X86RelativePatcher::PatchBakerReadBarrierBranch(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
                                                      const LinkerPatch& patch ATTRIBUTE_UNUSED,
                                                      uint32_t patch_offset ATTRIBUTE_UNUSED) {
diff --git a/dex2oat/linker/x86/relative_patcher_x86.h b/dex2oat/linker/x86/relative_patcher_x86.h
index 3da62fb..589a498 100644
--- a/dex2oat/linker/x86/relative_patcher_x86.h
+++ b/dex2oat/linker/x86/relative_patcher_x86.h
@@ -30,6 +30,9 @@
                                 const LinkerPatch& patch,
                                 uint32_t patch_offset,
                                 uint32_t target_offset) override;
+  void PatchEntrypointCall(std::vector<uint8_t>* code,
+                           const LinkerPatch& patch,
+                           uint32_t patch_offset) override;
   void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
                                    const LinkerPatch& patch,
                                    uint32_t patch_offset) override;
diff --git a/dex2oat/linker/x86_64/relative_patcher_x86_64.cc b/dex2oat/linker/x86_64/relative_patcher_x86_64.cc
index c80f6a9..0b9d07e 100644
--- a/dex2oat/linker/x86_64/relative_patcher_x86_64.cc
+++ b/dex2oat/linker/x86_64/relative_patcher_x86_64.cc
@@ -35,6 +35,12 @@
   reinterpret_cast<unaligned_int32_t*>(&(*code)[patch.LiteralOffset()])[0] = displacement;
 }
 
+void X86_64RelativePatcher::PatchEntrypointCall(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
+                                                const LinkerPatch& patch ATTRIBUTE_UNUSED,
+                                                uint32_t patch_offset ATTRIBUTE_UNUSED) {
+  LOG(FATAL) << "UNIMPLEMENTED";
+}
+
 void X86_64RelativePatcher::PatchBakerReadBarrierBranch(std::vector<uint8_t>* code ATTRIBUTE_UNUSED,
                                                         const LinkerPatch& patch ATTRIBUTE_UNUSED,
                                                         uint32_t patch_offset ATTRIBUTE_UNUSED) {
diff --git a/dex2oat/linker/x86_64/relative_patcher_x86_64.h b/dex2oat/linker/x86_64/relative_patcher_x86_64.h
index a82fef3..7b99bd8 100644
--- a/dex2oat/linker/x86_64/relative_patcher_x86_64.h
+++ b/dex2oat/linker/x86_64/relative_patcher_x86_64.h
@@ -30,6 +30,9 @@
                                 const LinkerPatch& patch,
                                 uint32_t patch_offset,
                                 uint32_t target_offset) override;
+  void PatchEntrypointCall(std::vector<uint8_t>* code,
+                           const LinkerPatch& patch,
+                           uint32_t patch_offset) override;
   void PatchBakerReadBarrierBranch(std::vector<uint8_t>* code,
                                    const LinkerPatch& patch,
                                    uint32_t patch_offset) override;
diff --git a/dexdump/Android.bp b/dexdump/Android.bp
index 434cb35..0bbb001 100644
--- a/dexdump/Android.bp
+++ b/dexdump/Android.bp
@@ -12,9 +12,6 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// TODO(ajcbik): rename dexdump2 into dexdump when Dalvik version is removed
-
-
 cc_defaults {
     name: "dexdump_defaults",
     defaults: ["art_defaults"],
@@ -26,33 +23,40 @@
 }
 
 art_cc_binary {
-    name: "dexdump2",
+    name: "dexdump",
     defaults: ["dexdump_defaults"],
     host_supported: true,
-    shared_libs: [
-        "libdexfile",
-        "libartbase",
-        "libbase",
-    ],
-}
-
-art_cc_binary {
-    name: "dexdumps",
-    defaults: [
-        "dexdump_defaults",
-        "libartbase_static_defaults",
-        "libdexfile_static_defaults",
-    ],
-    host_supported: true,
-    device_supported: false,
     target: {
-        darwin: {
-            enabled: false,
+        android: {
+            shared_libs: [
+                "libdexfile",
+                "libartbase",
+                "libbase",
+            ],
         },
-        windows: {
+        // Use static libs on host: required for Windows build and
+        // static_sdk_tools build.
+        host: {
+            enabled: true,
+            static_libs: [
+                "libdexfile",
+                "libartbase",
+                "libbase",
+                "libartpalette",
+                "liblog",
+                "libz",
+                "libziparchive",
+            ],
+        },
+        darwin: {
             enabled: true,
         },
     },
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+        "//apex_available:platform", // for SDK
+    ],
 }
 
 art_cc_test {
diff --git a/dexdump/dexdump_main.cc b/dexdump/dexdump_main.cc
index cf0d113..8b2b71c 100644
--- a/dexdump/dexdump_main.cc
+++ b/dexdump/dexdump_main.cc
@@ -144,7 +144,7 @@
   while (optind < argc) {
     result |= processFile(argv[optind++]);
   }  // while
-  return result != 0;
+  return result != 0 ? 1 : 0;
 }
 
 }  // namespace art
diff --git a/dexdump/dexdump_test.cc b/dexdump/dexdump_test.cc
index bb6d4a4..91ab187 100644
--- a/dexdump/dexdump_test.cc
+++ b/dexdump/dexdump_test.cc
@@ -39,8 +39,7 @@
 
   // Runs test with given arguments.
   bool Exec(const std::vector<std::string>& args, std::string* error_msg) {
-    // TODO(ajcbik): dexdump2 -> dexdump
-    std::string file_path = GetTestAndroidRoot() + "/bin/dexdump2";
+    std::string file_path = GetArtBinDir() + "/dexdump";
     EXPECT_TRUE(OS::FileExists(file_path.c_str())) << file_path << " should be a valid file path";
     std::vector<std::string> exec_argv = { file_path };
     exec_argv.insert(exec_argv.end(), args.begin(), args.end());
diff --git a/dexlayout/Android.bp b/dexlayout/Android.bp
index 838510b..cd40620 100644
--- a/dexlayout/Android.bp
+++ b/dexlayout/Android.bp
@@ -29,29 +29,20 @@
     target: {
         android: {
             shared_libs: [
-                "libartbase",
                 "libartpalette",
-                "libdexfile",
-                "libprofile",
                 "libbase",
             ],
         },
         not_windows: {
             shared_libs: [
-                "libartbase",
                 "libartpalette",
-                "libdexfile",
-                "libprofile",
                 "libbase",
             ],
         },
         windows: {
             cflags: ["-Wno-thread-safety"],
             static_libs: [
-                "libartbase",
                 "libartpalette",
-                "libdexfile",
-                "libprofile",
                 "libbase",
             ],
         },
@@ -76,16 +67,37 @@
     target: {
         android: {
             lto: {
-                 thin: true,
+                thin: true,
             },
+            shared_libs: [
+                "libartbase",
+                "libdexfile",
+                "libprofile",
+            ],
+        },
+        not_windows: {
+            shared_libs: [
+                "libartbase",
+                "libdexfile",
+                "libprofile",
+            ],
         },
         windows: {
             enabled: true,
             shared: {
                 enabled: false,
             },
+            static_libs: [
+                "libartbase",
+                "libdexfile",
+                "libprofile",
+            ],
         },
     },
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
 }
 
 cc_defaults {
@@ -102,13 +114,34 @@
 art_cc_library {
     name: "libartd-dexlayout",
     defaults: [
-      "libart-dexlayout-defaults",
-      "art_debug_defaults",
+        "libart-dexlayout-defaults",
+        "art_debug_defaults",
     ],
-    shared_libs: [
-        "libdexfiled",
-        "libartbased",
-        "libprofiled",
+    target: {
+        android: {
+            shared_libs: [
+                "libartbased",
+                "libdexfiled",
+                "libprofiled",
+            ],
+        },
+        not_windows: {
+            shared_libs: [
+                "libartbased",
+                "libdexfiled",
+                "libprofiled",
+            ],
+        },
+        windows: {
+            static_libs: [
+                "libartbased",
+                "libdexfiled",
+                "libprofiled",
+            ],
+        },
+    },
+    apex_available: [
+        "com.android.art.debug",
     ],
 }
 
@@ -142,6 +175,10 @@
         "libartbase",
         "libart-dexlayout",
     ],
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
 }
 
 art_cc_binary {
@@ -180,6 +217,9 @@
         "libartbased",
         "libartd-dexlayout",
     ],
+    apex_available: [
+        "com.android.art.debug",
+    ],
 }
 
 art_cc_test {
@@ -212,6 +252,10 @@
             ],
         },
     },
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
 }
 
 art_cc_test {
diff --git a/dexlayout/dex_ir_builder.cc b/dexlayout/dex_ir_builder.cc
index f4195b2..5636e2c 100644
--- a/dexlayout/dex_ir_builder.cc
+++ b/dexlayout/dex_ir_builder.cc
@@ -25,6 +25,7 @@
 #include "dex/class_accessor-inl.h"
 #include "dex/code_item_accessors-inl.h"
 #include "dex/dex_file_exception_helpers.h"
+#include "dex/dex_instruction-inl.h"
 #include "dexlayout.h"
 
 namespace art {
diff --git a/dexlayout/dex_writer.cc b/dexlayout/dex_writer.cc
index 268abe4..7f05ae8 100644
--- a/dexlayout/dex_writer.cc
+++ b/dexlayout/dex_writer.cc
@@ -794,8 +794,11 @@
   StandardDexFile::Header header;
   if (CompactDexFile::IsMagicValid(header_->Magic())) {
     StandardDexFile::WriteMagic(header.magic_);
-    // TODO: Should we write older versions based on the feature flags?
-    StandardDexFile::WriteCurrentVersion(header.magic_);
+    if (header_->SupportDefaultMethods()) {
+      StandardDexFile::WriteCurrentVersion(header.magic_);
+    } else {
+      StandardDexFile::WriteVersionBeforeDefaultMethods(header.magic_);
+    }
   } else {
     // Standard dex -> standard dex, just reuse the same header.
     static constexpr size_t kMagicAndVersionLen =
diff --git a/dexlayout/dexdiag.cc b/dexlayout/dexdiag.cc
index ca9018d..a9ea27d 100644
--- a/dexlayout/dexdiag.cc
+++ b/dexlayout/dexdiag.cc
@@ -495,7 +495,9 @@
   // get libmeminfo process information.
   ProcMemInfo proc(pid);
   // Get the set of mappings by the specified process.
-  const std::vector<Vma>& maps = proc.Maps();
+  // Do not get the map usage stats, they are never used and it can take
+  // a long time to get this data.
+  const std::vector<Vma>& maps = proc.MapsWithoutUsageStats();
   if (maps.empty()) {
     std::cerr << "Error listing maps." << std::endl;
     return EXIT_FAILURE;
diff --git a/dexlayout/dexdiag_test.cc b/dexlayout/dexdiag_test.cc
index 47ef0a5..27ac402 100644
--- a/dexlayout/dexdiag_test.cc
+++ b/dexlayout/dexdiag_test.cc
@@ -17,8 +17,7 @@
 #include <string>
 #include <vector>
 
-#include "common_runtime_test.h"
-
+#include "base/common_art_test.h"
 #include "base/file_utils.h"
 #include "base/os.h"
 #include "exec_utils.h"
@@ -26,32 +25,28 @@
 
 namespace art {
 
-static const char* kDexDiagContains = "--contains=core.vdex";
+static const char* kDexDiagContains = "--contains=boot.vdex";
 static const char* kDexDiagContainsFails = "--contains=anything_other_than_core.vdex";
 static const char* kDexDiagHelp = "--help";
 static const char* kDexDiagVerbose = "--verbose";
 static const char* kDexDiagBinaryName = "dexdiag";
 
-class DexDiagTest : public CommonRuntimeTest {
+class DexDiagTest : public CommonArtTest {
  protected:
   void SetUp() override {
-    CommonRuntimeTest::SetUp();
+    CommonArtTest::SetUp();
   }
 
   // Path to the dexdiag(d?)[32|64] binary.
   std::string GetDexDiagFilePath() {
-    std::string root = GetTestAndroidRoot();
-
-    root += "/bin/";
-    root += kDexDiagBinaryName;
-
-    std::string root32 = root + "32";
+    std::string path = GetArtBinDir() + '/' + kDexDiagBinaryName;
+    std::string path32 = path + "32";
     // If we have both a 32-bit and a 64-bit build, the 32-bit file will have a 32 suffix.
-    if (OS::FileExists(root32.c_str()) && !Is64BitInstructionSet(kRuntimeISA)) {
-      return root32;
+    if (OS::FileExists(path32.c_str()) && !Is64BitInstructionSet(kRuntimeISA)) {
+      return path32;
     } else {
       // This is a 64-bit build or only a single build exists.
-      return root;
+      return path;
     }
   }
 
@@ -73,8 +68,6 @@
                                                oat_location.c_str(),
                                                /*executable=*/ false,
                                                /*low_4gb=*/ false,
-                                               /*abs_dex_location=*/ nullptr,
-                                               /*reservation=*/ nullptr,
                                                &error_msg));
     EXPECT_TRUE(oat != nullptr) << error_msg;
     return oat;
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
index 7382a97..f5eeeb7 100644
--- a/dexlayout/dexlayout.cc
+++ b/dexlayout/dexlayout.cc
@@ -28,6 +28,7 @@
 #include <iostream>
 #include <memory>
 #include <sstream>
+#include <unordered_set>
 #include <vector>
 
 #include "android-base/stringprintf.h"
diff --git a/dexlayout/dexlayout_main.cc b/dexlayout/dexlayout_main.cc
index 2163f89..12674f5 100644
--- a/dexlayout/dexlayout_main.cc
+++ b/dexlayout/dexlayout_main.cc
@@ -222,7 +222,7 @@
     fclose(out_file);
   }
 
-  return result != 0;
+  return result != 0 ? 1 : 0;
 }
 
 }  // namespace art
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index b68449e..b4e023f 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -21,9 +21,9 @@
 #include <sys/types.h>
 #include <unistd.h>
 
+#include "base/common_art_test.h"
 #include "base/unix_file/fd_file.h"
 #include "base/utils.h"
-#include "common_runtime_test.h"
 #include "dex/art_dex_file_loader.h"
 #include "dex/base64_test_util.h"
 #include "dex/class_accessor-inl.h"
@@ -250,18 +250,17 @@
   }
 }
 
-class DexLayoutTest : public CommonRuntimeTest {
+class DexLayoutTest : public CommonArtTest {
  protected:
   std::string GetDexLayoutPath() {
-    return GetTestAndroidRoot() + "/bin/dexlayoutd";
+    return GetArtBinDir() + "/dexlayoutd";
   }
 
   // Runs FullPlainOutput test.
   bool FullPlainOutputExec(std::string* error_msg) {
-    // TODO: dexdump2 -> dexdump ?
     ScratchFile dexdump_output;
     const std::string& dexdump_filename = dexdump_output.GetFilename();
-    std::string dexdump = GetTestAndroidRoot() + "/bin/dexdump2";
+    std::string dexdump = GetArtBinDir() + "/dexdump";
     EXPECT_TRUE(OS::FileExists(dexdump.c_str())) << dexdump << " should be a valid file path";
 
     ScratchFile dexlayout_output;
@@ -326,8 +325,7 @@
 
   // Create a profile with some subset of methods and classes.
   void CreateProfile(const std::string& input_dex,
-                     const std::string& out_profile,
-                     const std::string& dex_location) {
+                     const std::string& out_profile) {
     std::vector<std::unique_ptr<const DexFile>> dex_files;
     std::string error_msg;
     const ArtDexFileLoader dex_file_loader;
@@ -344,7 +342,6 @@
     size_t profile_methods = 0;
     size_t profile_classes = 0;
     ProfileCompilationInfo pfi;
-    std::set<DexCacheResolvedClasses> classes;
     for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
       for (uint32_t i = 0; i < dex_file->NumMethodIds(); i += 2) {
         uint8_t flags = 0u;
@@ -356,26 +353,21 @@
           flags |= ProfileCompilationInfo::MethodHotness::kFlagStartup;
           ++profile_methods;
         }
-        pfi.AddMethodIndex(static_cast<ProfileCompilationInfo::MethodHotness::Flag>(flags),
-                           dex_location,
-                           dex_file->GetLocationChecksum(),
-                           /*method_idx=*/i,
-                           dex_file->NumMethodIds());
+        pfi.AddMethod(ProfileMethodInfo(MethodReference(dex_file.get(), /*index=*/ i)),
+                      static_cast<ProfileCompilationInfo::MethodHotness::Flag>(flags));
       }
-      DexCacheResolvedClasses cur_classes(dex_location,
-                                          dex_location,
-                                          dex_file->GetLocationChecksum(),
-                                          dex_file->NumMethodIds());
       // Add every even class too.
+      std::set<dex::TypeIndex> classes;
       for (uint32_t i = 0; i < dex_file->NumClassDefs(); i += 1) {
         if ((i & 2) == 0) {
-          cur_classes.AddClass(dex_file->GetClassDef(i).class_idx_);
+          classes.insert(dex::TypeIndex(dex_file->GetClassDef(i).class_idx_));
           ++profile_classes;
         }
       }
-      classes.insert(cur_classes);
+      if (!classes.empty()) {
+        pfi.AddClassesForDex(dex_file.get(), classes.begin(), classes.end());
+      }
     }
-    pfi.AddClasses(classes);
     // Write to provided file.
     std::unique_ptr<File> file(OS::CreateEmptyFile(out_profile.c_str()));
     ASSERT_TRUE(file != nullptr);
@@ -398,7 +390,7 @@
     std::string dex_file = tmp_dir + "classes.dex";
     WriteFileBase64(kDexFileLayoutInputDex, dex_file.c_str());
     std::string profile_file = tmp_dir + "primary.prof";
-    CreateProfile(dex_file, profile_file, dex_file);
+    CreateProfile(dex_file, profile_file);
     // WriteFileBase64(kDexFileLayoutInputProfile, profile_file.c_str());
     std::string output_dex = tmp_dir + "classes.dex.new";
 
@@ -440,7 +432,7 @@
     }
 
     std::string profile_file = tmp_dir + "primary.prof";
-    CreateProfile(dex_file, profile_file, dex_file);
+    CreateProfile(dex_file, profile_file);
     std::string output_dex = tmp_dir + "classes.dex.new";
     std::string second_output_dex = tmp_dir + "classes.dex.new.new";
 
@@ -453,8 +445,13 @@
 
     // Recreate the profile with the new dex location. This is required so that the profile dex
     // location matches.
-    CreateProfile(dex_file, profile_file, output_dex);
+    // For convenience we just copy the previous dex file to the new location so we can re-use it
+    // for profile generation.
 
+    // Don't check the output. The exec cmd wrongfully coplains that the cp cmd fails.
+    std::vector<std::string> cp_args = {"/usr/bin/cp", dex_file, output_dex};
+    art::Exec(cp_args, error_msg);
+    CreateProfile(output_dex, profile_file);
     // -v makes sure that the layout did not corrupt the dex file.
     // -i since the checksum won't match from the first layout.
     std::vector<std::string> second_dexlayout_args =
@@ -521,7 +518,7 @@
       EXPECT_EQ(dex_file->GetFile()->Flush(), 0);
     }
     if (profile_file != nullptr) {
-      CreateProfile(dex_file->GetFilename(), profile_file->GetFilename(), dex_file->GetFilename());
+      CreateProfile(dex_file->GetFilename(), profile_file->GetFilename());
     }
 
     std::string error_msg;
diff --git a/dexlist/Android.bp b/dexlist/Android.bp
index 356791c..860a5fd 100644
--- a/dexlist/Android.bp
+++ b/dexlist/Android.bp
@@ -20,7 +20,11 @@
     shared_libs: [
         "libdexfile",
         "libartbase",
-        "libbase"
+        "libbase",
+    ],
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
     ],
 }
 
diff --git a/dexlist/dexlist.cc b/dexlist/dexlist.cc
index dd32fae..9ef9ae9 100644
--- a/dexlist/dexlist.cc
+++ b/dexlist/dexlist.cc
@@ -273,7 +273,7 @@
   }  // while
 
   free(gOptions.argCopy);
-  return result != 0;
+  return result != 0 ? 1 : 0;
 }
 
 }  // namespace art
diff --git a/dexlist/dexlist_test.cc b/dexlist/dexlist_test.cc
index 39e5f8c..fa17a2f 100644
--- a/dexlist/dexlist_test.cc
+++ b/dexlist/dexlist_test.cc
@@ -41,8 +41,7 @@
 
   // Runs test with given arguments.
   bool Exec(const std::vector<std::string>& args, std::string* error_msg) {
-    std::string file_path = GetTestAndroidRoot();
-    file_path += "/bin/dexlist";
+    std::string file_path = GetArtBinDir() + "/dexlist";
     EXPECT_TRUE(OS::FileExists(file_path.c_str())) << file_path << " should be a valid file path";
     std::vector<std::string> exec_argv = { file_path };
     exec_argv.insert(exec_argv.end(), args.begin(), args.end());
diff --git a/dexoptanalyzer/Android.bp b/dexoptanalyzer/Android.bp
index 72896c8..7875dbb 100644
--- a/dexoptanalyzer/Android.bp
+++ b/dexoptanalyzer/Android.bp
@@ -41,6 +41,10 @@
         "libart",
         "libartbase",
     ],
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
 }
 
 art_cc_binary {
@@ -53,6 +57,9 @@
         "libartd",
         "libartbased",
     ],
+    apex_available: [
+        "com.android.art.debug",
+    ],
 }
 
 art_cc_test {
diff --git a/dexoptanalyzer/dexoptanalyzer.cc b/dexoptanalyzer/dexoptanalyzer.cc
index b411118..7454993 100644
--- a/dexoptanalyzer/dexoptanalyzer.cc
+++ b/dexoptanalyzer/dexoptanalyzer.cc
@@ -319,10 +319,10 @@
     }
 
     int dexoptNeeded = oat_file_assistant->GetDexOptNeeded(compiler_filter_,
-                                                           assume_profile_changed_,
-                                                           downgrade_,
                                                            class_loader_context.get(),
-                                                           context_fds_);
+                                                           context_fds_,
+                                                           assume_profile_changed_,
+                                                           downgrade_);
 
     // Convert OatFileAssitant codes to dexoptanalyzer codes.
     switch (dexoptNeeded) {
diff --git a/dexoptanalyzer/dexoptanalyzer_test.cc b/dexoptanalyzer/dexoptanalyzer_test.cc
index 7b6b36c..651fa4a 100644
--- a/dexoptanalyzer/dexoptanalyzer_test.cc
+++ b/dexoptanalyzer/dexoptanalyzer_test.cc
@@ -25,10 +25,9 @@
 class DexoptAnalyzerTest : public DexoptTest {
  protected:
   std::string GetDexoptAnalyzerCmd() {
-    std::string file_path = GetTestAndroidRoot();
-    file_path += "/bin/dexoptanalyzer";
+    std::string file_path = GetArtBinDir() + "/dexoptanalyzer";
     if (kIsDebugBuild) {
-      file_path += "d";
+      file_path += 'd';
     }
     EXPECT_TRUE(OS::FileExists(file_path.c_str())) << file_path << " should be a valid file path";
     return file_path;
@@ -37,7 +36,7 @@
   int Analyze(const std::string& dex_file,
               CompilerFilter::Filter compiler_filter,
               bool assume_profile_changed,
-              const std::string& class_loader_context) {
+              const char* class_loader_context) {
     std::string dexoptanalyzer_cmd = GetDexoptAnalyzerCmd();
     std::vector<std::string> argv_str;
     argv_str.push_back(dexoptanalyzer_cmd);
@@ -53,8 +52,8 @@
     argv_str.push_back(GetClassPathOption("-Xbootclasspath-locations:", GetLibCoreDexLocations()));
     argv_str.push_back("--image=" + GetImageLocation());
     argv_str.push_back("--android-data=" + android_data_);
-    if (!class_loader_context.empty()) {
-      argv_str.push_back("--class-loader-context=" + class_loader_context);
+    if (class_loader_context != nullptr) {
+      argv_str.push_back("--class-loader-context=" + std::string(class_loader_context));
     }
 
     std::string error;
@@ -79,13 +78,19 @@
               CompilerFilter::Filter compiler_filter,
               bool assume_profile_changed = false,
               bool downgrade = false,
-              const std::string& class_loader_context = "") {
+              const char* class_loader_context = "PCL[]") {
     int dexoptanalyzerResult = Analyze(
         dex_file, compiler_filter, assume_profile_changed, class_loader_context);
     dexoptanalyzerResult = DexoptanalyzerToOatFileAssistant(dexoptanalyzerResult);
     OatFileAssistant oat_file_assistant(dex_file.c_str(), kRuntimeISA, /*load_executable=*/ false);
+    std::vector<int> context_fds;
+
+    std::unique_ptr<ClassLoaderContext> context = class_loader_context == nullptr
+        ? nullptr
+        : ClassLoaderContext::Create(class_loader_context);
+
     int assistantResult = oat_file_assistant.GetDexOptNeeded(
-        compiler_filter, assume_profile_changed, downgrade);
+        compiler_filter, context.get(), context_fds, assume_profile_changed, downgrade);
     EXPECT_EQ(assistantResult, dexoptanalyzerResult);
   }
 };
@@ -101,6 +106,7 @@
   Verify(dex_location, CompilerFilter::kExtract);
   Verify(dex_location, CompilerFilter::kQuicken);
   Verify(dex_location, CompilerFilter::kSpeedProfile);
+  Verify(dex_location, CompilerFilter::kSpeed, false, false, nullptr);
 }
 
 // Case: We have a DEX file and up-to-date OAT file for it.
@@ -113,6 +119,7 @@
   Verify(dex_location, CompilerFilter::kQuicken);
   Verify(dex_location, CompilerFilter::kExtract);
   Verify(dex_location, CompilerFilter::kEverything);
+  Verify(dex_location, CompilerFilter::kSpeed, false, false, nullptr);
 }
 
 // Case: We have a DEX file and speed-profile OAT file for it.
@@ -326,7 +333,6 @@
   // Generate the odex to get the class loader context also open the dex files.
   GenerateOdexForTest(dex_location1, odex_location1, CompilerFilter::kSpeed, /* compilation_reason= */ nullptr, /* extra_args= */ { class_loader_context_option });
 
-  Verify(dex_location1, CompilerFilter::kSpeed, false, false, class_loader_context);
+  Verify(dex_location1, CompilerFilter::kSpeed, false, false, class_loader_context.c_str());
 }
-
 }  // namespace art
diff --git a/disassembler/Android.bp b/disassembler/Android.bp
index 5aa159e..064aaea 100644
--- a/disassembler/Android.bp
+++ b/disassembler/Android.bp
@@ -20,46 +20,27 @@
     host_supported: true,
     srcs: [
         "disassembler.cc",
-        "disassembler_mips.cc",
-        "disassembler_x86.cc",
     ],
     codegen: {
         arm: {
-            srcs: ["disassembler_arm.cc"]
+            srcs: ["disassembler_arm.cc"],
         },
         arm64: {
-            srcs: ["disassembler_arm64.cc"]
+            srcs: ["disassembler_arm64.cc"],
         },
-        // TODO: We should also conditionally include the MIPS32/MIPS64 and the
-        // x86/x86-64 disassembler definitions (b/119090273). However, using the
-        // following syntax here:
-        //
-        //   mips: {
-        //       srcs: ["disassembler_mips.cc"]
-        //   },
-        //   mips64: {
-        //       srcs: ["disassembler_mips.cc"]
-        //   },
-        //   x86: {
-        //       srcs: ["disassembler_x86.cc"]
-        //   },
-        //   x86_64: {
-        //       srcs: ["disassembler_x86.cc"]
-        //   },
-        //
-        // does not work, as it generates a file rejected by ninja with this
-        // error message (e.g. on host, where we include all the back ends by
-        // default):
-        //
-        //   FAILED: ninja: out/soong/build.ninja:320768: multiple rules generate out/soong/.intermediates/art/disassembler/libart-disassembler/linux_glibc_x86_64_static/obj/art/disassembler/disassembler_mips.o [-w dupbuild=err]
+        x86: {
+            srcs: ["disassembler_x86.cc"],
+        },
+        x86_64: {
+            srcs: ["disassembler_x86.cc"],
+        },
     },
-    include_dirs: ["art/runtime"],
-
     shared_libs: [
         "libbase",
     ],
     header_libs: [
         "art_libartbase_headers",
+        "libart_runtime_headers_ndk",
     ],
     export_include_dirs: ["."],
 }
@@ -71,6 +52,10 @@
         // For disassembler_arm*.
         "libvixl",
     ],
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
 }
 
 art_cc_library {
@@ -83,4 +68,22 @@
         // For disassembler_arm*.
         "libvixld",
     ],
+
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
+}
+
+cc_library_headers {
+    name: "art_disassembler_headers",
+    host_supported: true,
+    export_include_dirs: [
+        ".",
+    ],
+
+    apex_available: [
+        "com.android.art.debug",
+        "com.android.art.release",
+    ],
 }
diff --git a/disassembler/disassembler.cc b/disassembler/disassembler.cc
index 0662334..53461ce 100644
--- a/disassembler/disassembler.cc
+++ b/disassembler/disassembler.cc
@@ -29,10 +29,6 @@
 # include "disassembler_arm64.h"
 #endif
 
-#if defined(ART_ENABLE_CODEGEN_mips) || defined(ART_ENABLE_CODEGEN_mips64)
-# include "disassembler_mips.h"
-#endif
-
 #if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
 # include "disassembler_x86.h"
 #endif
@@ -57,14 +53,6 @@
     case InstructionSet::kArm64:
       return new arm64::DisassemblerArm64(options);
 #endif
-#ifdef ART_ENABLE_CODEGEN_mips
-    case InstructionSet::kMips:
-      return new mips::DisassemblerMips(options, /* is_o32_abi= */ true);
-#endif
-#ifdef ART_ENABLE_CODEGEN_mips64
-    case InstructionSet::kMips64:
-      return new mips::DisassemblerMips(options, /* is_o32_abi= */ false);
-#endif
 #ifdef ART_ENABLE_CODEGEN_x86
     case InstructionSet::kX86:
       return new x86::DisassemblerX86(options, /* supports_rex= */ false);
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
deleted file mode 100644
index eaf11be..0000000
--- a/disassembler/disassembler_mips.cc
+++ /dev/null
@@ -1,851 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "disassembler_mips.h"
-
-#include <ostream>
-#include <sstream>
-
-#include "android-base/logging.h"
-#include "android-base/stringprintf.h"
-
-#include "base/bit_utils.h"
-
-using android::base::StringPrintf;
-
-namespace art {
-namespace mips {
-
-struct MipsInstruction {
-  uint32_t mask;
-  uint32_t value;
-  const char* name;
-  const char* args_fmt;
-
-  bool Matches(uint32_t instruction) const {
-    return (instruction & mask) == value;
-  }
-};
-
-static const char* gO32AbiRegNames[]  = {
-  "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
-  "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
-  "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
-  "t8", "t9", "k0", "k1", "gp", "sp", "s8", "ra"
-};
-
-static const char* gN64AbiRegNames[]  = {
-  "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
-  "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3",
-  "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
-  "t8", "t9", "k0", "k1", "gp", "sp", "s8", "ra"
-};
-
-static const uint32_t kOpcodeShift = 26;
-
-static const uint32_t kCop1 = (17 << kOpcodeShift);
-static const uint32_t kMsa = (30 << kOpcodeShift);  // MSA major opcode.
-
-static const uint32_t kITypeMask = (0x3f << kOpcodeShift);
-static const uint32_t kJTypeMask = (0x3f << kOpcodeShift);
-static const uint32_t kRTypeMask = ((0x3f << kOpcodeShift) | (0x3f));
-static const uint32_t kSpecial0Mask = (0x3f << kOpcodeShift);
-static const uint32_t kSpecial2Mask = (0x3f << kOpcodeShift);
-static const uint32_t kSpecial3Mask = (0x3f << kOpcodeShift);
-static const uint32_t kFpMask = kRTypeMask;
-static const uint32_t kMsaMask = kRTypeMask;
-static const uint32_t kMsaSpecialMask = (0x3f << kOpcodeShift);
-
-static const MipsInstruction gMipsInstructions[] = {
-  // "sll r0, r0, 0" is the canonical "nop", used in delay slots.
-  { 0xffffffff, 0, "nop", "" },
-
-  // R-type instructions.
-  { kRTypeMask, 0, "sll", "DTA", },
-  // 0, 1, movci
-  { kRTypeMask | (0x1f << 21), 2, "srl", "DTA", },
-  { kRTypeMask, 3, "sra", "DTA", },
-  { kRTypeMask | (0x1f << 6), 4, "sllv", "DTS", },
-  { kRTypeMask | (0x1f << 6), 6, "srlv", "DTS", },
-  { kRTypeMask | (0x1f << 6), (1 << 6) | 6, "rotrv", "DTS", },
-  { kRTypeMask | (0x1f << 6), 7, "srav", "DTS", },
-  { kRTypeMask, 8, "jr", "S", },
-  { kRTypeMask | (0x1f << 11), 9 | (31 << 11), "jalr", "S", },  // rd = 31 is implicit.
-  { kRTypeMask | (0x1f << 11), 9, "jr", "S", },  // rd = 0 is implicit.
-  { kRTypeMask, 9, "jalr", "DS", },  // General case.
-  { kRTypeMask | (0x1f << 6), 10, "movz", "DST", },
-  { kRTypeMask | (0x1f << 6), 11, "movn", "DST", },
-  { kRTypeMask, 12, "syscall", "", },  // TODO: code
-  { kRTypeMask, 13, "break", "", },  // TODO: code
-  { kRTypeMask, 15, "sync", "", },  // TODO: type
-  { kRTypeMask, 16, "mfhi", "D", },
-  { kRTypeMask, 17, "mthi", "S", },
-  { kRTypeMask, 18, "mflo", "D", },
-  { kRTypeMask, 19, "mtlo", "S", },
-  { kRTypeMask | (0x1f << 6), 20, "dsllv", "DTS", },
-  { kRTypeMask | (0x1f << 6), 22, "dsrlv", "DTS", },
-  { kRTypeMask | (0x1f << 6), (1 << 6) | 22, "drotrv", "DTS", },
-  { kRTypeMask | (0x1f << 6), 23, "dsrav", "DTS", },
-  { kRTypeMask | (0x1f << 6), 24, "mult", "ST", },
-  { kRTypeMask | (0x1f << 6), 25, "multu", "ST", },
-  { kRTypeMask | (0x1f << 6), 26, "div", "ST", },
-  { kRTypeMask | (0x1f << 6), 27, "divu", "ST", },
-  { kRTypeMask | (0x1f << 6), 24 + (2 << 6), "mul", "DST", },
-  { kRTypeMask | (0x1f << 6), 24 + (3 << 6), "muh", "DST", },
-  { kRTypeMask | (0x1f << 6), 26 + (2 << 6), "div", "DST", },
-  { kRTypeMask | (0x1f << 6), 26 + (3 << 6), "mod", "DST", },
-  { kRTypeMask, 32, "add", "DST", },
-  { kRTypeMask, 33, "addu", "DST", },
-  { kRTypeMask, 34, "sub", "DST", },
-  { kRTypeMask, 35, "subu", "DST", },
-  { kRTypeMask, 36, "and", "DST", },
-  { kRTypeMask | (0x1f << 16), 37 | (0 << 16), "move", "DS" },
-  { kRTypeMask | (0x1f << 21), 37 | (0 << 21), "move", "DT" },
-  { kRTypeMask, 37, "or", "DST", },
-  { kRTypeMask, 38, "xor", "DST", },
-  { kRTypeMask, 39, "nor", "DST", },
-  { kRTypeMask, 42, "slt", "DST", },
-  { kRTypeMask, 43, "sltu", "DST", },
-  { kRTypeMask, 45, "daddu", "DST", },
-  { kRTypeMask, 46, "dsub", "DST", },
-  { kRTypeMask, 47, "dsubu", "DST", },
-  // TODO: tge[u], tlt[u], teg, tne
-  { kRTypeMask | (0x1f << 21), 56, "dsll", "DTA", },
-  { kRTypeMask | (0x1f << 21), 58, "dsrl", "DTA", },
-  { kRTypeMask | (0x1f << 21), (1 << 21) | 58, "drotr", "DTA", },
-  { kRTypeMask | (0x1f << 21), 59, "dsra", "DTA", },
-  { kRTypeMask | (0x1f << 21), 60, "dsll32", "DTA", },
-  { kRTypeMask | (0x1f << 21), 62, "dsrl32", "DTA", },
-  { kRTypeMask | (0x1f << 21), (1 << 21) | 62, "drotr32", "DTA", },
-  { kRTypeMask | (0x1f << 21), 63, "dsra32", "DTA", },
-
-  // SPECIAL0
-  { kSpecial0Mask | 0x307ff, 1, "movf", "DSc" },
-  { kSpecial0Mask | 0x307ff, 0x10001, "movt", "DSc" },
-  { kSpecial0Mask | 0x7ff, (2 << 6) | 24, "mul", "DST" },
-  { kSpecial0Mask | 0x7ff, (3 << 6) | 24, "muh", "DST" },
-  { kSpecial0Mask | 0x7ff, (2 << 6) | 25, "mulu", "DST" },
-  { kSpecial0Mask | 0x7ff, (3 << 6) | 25, "muhu", "DST" },
-  { kSpecial0Mask | 0x7ff, (2 << 6) | 26, "div", "DST" },
-  { kSpecial0Mask | 0x7ff, (3 << 6) | 26, "mod", "DST" },
-  { kSpecial0Mask | 0x7ff, (2 << 6) | 27, "divu", "DST" },
-  { kSpecial0Mask | 0x7ff, (3 << 6) | 27, "modu", "DST" },
-  { kSpecial0Mask | 0x7ff, (2 << 6) | 28, "dmul", "DST" },
-  { kSpecial0Mask | 0x7ff, (3 << 6) | 28, "dmuh", "DST" },
-  { kSpecial0Mask | 0x7ff, (2 << 6) | 29, "dmulu", "DST" },
-  { kSpecial0Mask | 0x7ff, (3 << 6) | 29, "dmuhu", "DST" },
-  { kSpecial0Mask | 0x7ff, (2 << 6) | 30, "ddiv", "DST" },
-  { kSpecial0Mask | 0x7ff, (3 << 6) | 30, "dmod", "DST" },
-  { kSpecial0Mask | 0x7ff, (2 << 6) | 31, "ddivu", "DST" },
-  { kSpecial0Mask | 0x7ff, (3 << 6) | 31, "dmodu", "DST" },
-  { kSpecial0Mask | 0x7ff, (0 << 6) | 53, "seleqz", "DST" },
-  { kSpecial0Mask | 0x7ff, (0 << 6) | 55, "selnez", "DST" },
-  { kSpecial0Mask | (0x1f << 21) | 0x3f, (1 << 21) | 2, "rotr", "DTA", },
-  { kSpecial0Mask | (0x1f << 16) | 0x7ff, (0x01 << 6) | 0x10, "clz", "DS" },
-  { kSpecial0Mask | (0x1f << 16) | 0x7ff, (0x01 << 6) | 0x11, "clo", "DS" },
-  { kSpecial0Mask | (0x1f << 16) | 0x7ff, (0x01 << 6) | 0x12, "dclz", "DS" },
-  { kSpecial0Mask | (0x1f << 16) | 0x7ff, (0x01 << 6) | 0x13, "dclo", "DS" },
-  { kSpecial0Mask | 0x73f, 0x05, "lsa", "DSTj" },
-  { kSpecial0Mask | 0x73f, 0x15, "dlsa", "DSTj" },
-  // TODO: sdbbp
-
-  // SPECIAL2
-  { kSpecial2Mask | 0x7ff, (28 << kOpcodeShift) | 2, "mul", "DST" },
-  { kSpecial2Mask | 0x7ff, (28 << kOpcodeShift) | 32, "clz", "DS" },
-  { kSpecial2Mask | 0x7ff, (28 << kOpcodeShift) | 33, "clo", "DS" },
-  { kSpecial2Mask | 0xffff, (28 << kOpcodeShift) | 0, "madd", "ST" },
-  { kSpecial2Mask | 0xffff, (28 << kOpcodeShift) | 1, "maddu", "ST" },
-  { kSpecial2Mask | 0xffff, (28 << kOpcodeShift) | 2, "mul", "DST" },
-  { kSpecial2Mask | 0xffff, (28 << kOpcodeShift) | 4, "msub", "ST" },
-  { kSpecial2Mask | 0xffff, (28 << kOpcodeShift) | 5, "msubu", "ST" },
-  { kSpecial2Mask | 0x3f, (28 << kOpcodeShift) | 0x3f, "sdbbp", "" },  // TODO: code
-
-  // SPECIAL3
-  { kSpecial3Mask | 0x3f, (31 << kOpcodeShift), "ext", "TSAZ", },
-  { kSpecial3Mask | 0x3f, (31 << kOpcodeShift) | 3, "dext", "TSAZ", },
-  { kSpecial3Mask | 0x3f, (31 << kOpcodeShift) | 4, "ins", "TSAz", },
-  { kSpecial3Mask | 0x3f, (31 << kOpcodeShift) | 5, "dinsm", "TSAJ", },
-  { kSpecial3Mask | 0x3f, (31 << kOpcodeShift) | 6, "dinsu", "TSFz", },
-  { kSpecial3Mask | 0x3f, (31 << kOpcodeShift) | 7, "dins", "TSAz", },
-  { kSpecial3Mask | (0x1f << 21) | (0x1f << 6) | 0x3f,
-    (31 << kOpcodeShift) | (16 << 6) | 32,
-    "seb",
-    "DT", },
-  { kSpecial3Mask | (0x1f << 21) | (0x1f << 6) | 0x3f,
-    (31 << kOpcodeShift) | (24 << 6) | 32,
-    "seh",
-    "DT", },
-  { kSpecial3Mask | (0x1f << 21) | (0x1f << 6) | 0x3f,
-    (31 << kOpcodeShift) | 32,
-    "bitswap",
-    "DT", },
-  { kSpecial3Mask | (0x1f << 21) | (0x1f << 6) | 0x3f,
-    (31 << kOpcodeShift) | 36,
-    "dbitswap",
-    "DT", },
-  { kSpecial3Mask | (0x1f << 21) | (0x1f << 6) | 0x3f,
-    (31 << kOpcodeShift) | (2 << 6) | 36,
-    "dsbh",
-    "DT", },
-  { kSpecial3Mask | (0x1f << 21) | (0x1f << 6) | 0x3f,
-    (31 << kOpcodeShift) | (5 << 6) | 36,
-    "dshd",
-    "DT", },
-  { kSpecial3Mask | (0x1f << 21) | (0x1f << 6) | 0x3f,
-    (31 << kOpcodeShift) | (2 << 6) | 32,
-    "wsbh",
-    "DT", },
-  { kSpecial3Mask | 0x7f, (31 << kOpcodeShift) | 0x26, "sc", "Tl", },
-  { kSpecial3Mask | 0x7f, (31 << kOpcodeShift) | 0x27, "scd", "Tl", },
-  { kSpecial3Mask | 0x7f, (31 << kOpcodeShift) | 0x36, "ll", "Tl", },
-  { kSpecial3Mask | 0x7f, (31 << kOpcodeShift) | 0x37, "lld", "Tl", },
-
-  // J-type instructions.
-  { kJTypeMask, 2 << kOpcodeShift, "j", "L" },
-  { kJTypeMask, 3 << kOpcodeShift, "jal", "L" },
-
-  // I-type instructions.
-  { kITypeMask | (0x3ff << 16), 4 << kOpcodeShift, "b", "B" },
-  { kITypeMask | (0x1f << 16), 4 << kOpcodeShift | (0 << 16), "beqz", "SB" },
-  { kITypeMask | (0x1f << 21), 4 << kOpcodeShift | (0 << 21), "beqz", "TB" },
-  { kITypeMask, 4 << kOpcodeShift, "beq", "STB" },
-  { kITypeMask | (0x1f << 16), 5 << kOpcodeShift | (0 << 16), "bnez", "SB" },
-  { kITypeMask | (0x1f << 21), 5 << kOpcodeShift | (0 << 21), "bnez", "TB" },
-  { kITypeMask, 5 << kOpcodeShift, "bne", "STB" },
-  { kITypeMask | (0x1f << 16), 1 << kOpcodeShift | (1 << 16), "bgez", "SB" },
-  { kITypeMask | (0x1f << 16), 1 << kOpcodeShift | (0 << 16), "bltz", "SB" },
-  { kITypeMask | (0x3ff << 16), 1 << kOpcodeShift | (16 << 16), "nal", "" },
-  { kITypeMask | (0x1f << 16), 1 << kOpcodeShift | (16 << 16), "bltzal", "SB" },
-  { kITypeMask | (0x3ff << 16), 1 << kOpcodeShift | (17 << 16), "bal", "B" },
-  { kITypeMask | (0x1f << 16), 1 << kOpcodeShift | (17 << 16), "bgezal", "SB" },
-  { kITypeMask | (0x1f << 16), 6 << kOpcodeShift | (0 << 16), "blez", "SB" },
-  { kITypeMask, 6 << kOpcodeShift, "bgeuc", "STB" },
-  { kITypeMask | (0x1f << 16), 7 << kOpcodeShift | (0 << 16), "bgtz", "SB" },
-  { kITypeMask, 7 << kOpcodeShift, "bltuc", "STB" },
-  { kITypeMask | (0x1f << 16), 1 << kOpcodeShift | (6 << 16), "dahi", "Si", },
-  { kITypeMask | (0x1f << 16), 1 << kOpcodeShift | (30 << 16), "dati", "Si", },
-
-  { kITypeMask, 8 << kOpcodeShift, "beqc", "STB" },
-
-  { kITypeMask | (0x1f << 21), 9 << kOpcodeShift | (0 << 21), "li", "Ti" },
-  { kITypeMask, 9 << kOpcodeShift, "addiu", "TSi", },
-  { kITypeMask, 10 << kOpcodeShift, "slti", "TSi", },
-  { kITypeMask, 11 << kOpcodeShift, "sltiu", "TSi", },
-  { kITypeMask, 12 << kOpcodeShift, "andi", "TSI", },
-  { kITypeMask | (0x1f << 21), 13 << kOpcodeShift | (0 << 21), "li", "TI" },
-  { kITypeMask, 13 << kOpcodeShift, "ori", "TSI", },
-  { kITypeMask, 14 << kOpcodeShift, "xori", "TSI", },
-  { kITypeMask | (0x1f << 21), 15 << kOpcodeShift, "lui", "Ti", },
-  { kITypeMask, 15 << kOpcodeShift, "aui", "TSi", },
-
-  { kITypeMask | (0x3e3 << 16), (17 << kOpcodeShift) | (8 << 21), "bc1f", "cB" },
-  { kITypeMask | (0x3e3 << 16), (17 << kOpcodeShift) | (8 << 21) | (1 << 16), "bc1t", "cB" },
-  { kITypeMask | (0x1f << 21), (17 << kOpcodeShift) | (9 << 21), "bc1eqz", "tB" },
-  { kITypeMask | (0x1f << 21), (17 << kOpcodeShift) | (13 << 21), "bc1nez", "tB" },
-
-  { kITypeMask | (0x1f << 21), 22 << kOpcodeShift, "blezc", "TB" },
-
-  // TODO: de-dup
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (1  << 21) | (1  << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (2  << 21) | (2  << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (3  << 21) | (3  << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (4  << 21) | (4  << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (5  << 21) | (5  << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (6  << 21) | (6  << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (7  << 21) | (7  << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (8  << 21) | (8  << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (9  << 21) | (9  << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (10 << 21) | (10 << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (11 << 21) | (11 << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (12 << 21) | (12 << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (13 << 21) | (13 << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (14 << 21) | (14 << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (15 << 21) | (15 << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (16 << 21) | (16 << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (17 << 21) | (17 << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (18 << 21) | (18 << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (19 << 21) | (19 << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (20 << 21) | (20 << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (21 << 21) | (21 << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (22 << 21) | (22 << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (23 << 21) | (23 << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (24 << 21) | (24 << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (25 << 21) | (25 << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (26 << 21) | (26 << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (27 << 21) | (27 << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (28 << 21) | (28 << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (29 << 21) | (29 << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (30 << 21) | (30 << 16), "bgezc", "TB" },
-  { kITypeMask | (0x3ff << 16), (22 << kOpcodeShift) | (31 << 21) | (31 << 16), "bgezc", "TB" },
-
-  { kITypeMask, 22 << kOpcodeShift, "bgec", "STB" },
-
-  { kITypeMask | (0x1f << 21), 23 << kOpcodeShift, "bgtzc", "TB" },
-
-  // TODO: de-dup
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (1  << 21) | (1  << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (2  << 21) | (2  << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (3  << 21) | (3  << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (4  << 21) | (4  << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (5  << 21) | (5  << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (6  << 21) | (6  << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (7  << 21) | (7  << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (8  << 21) | (8  << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (9  << 21) | (9  << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (10 << 21) | (10 << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (11 << 21) | (11 << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (12 << 21) | (12 << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (13 << 21) | (13 << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (14 << 21) | (14 << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (15 << 21) | (15 << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (16 << 21) | (16 << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (17 << 21) | (17 << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (18 << 21) | (18 << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (19 << 21) | (19 << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (20 << 21) | (20 << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (21 << 21) | (21 << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (22 << 21) | (22 << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (23 << 21) | (23 << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (24 << 21) | (24 << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (25 << 21) | (25 << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (26 << 21) | (26 << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (27 << 21) | (27 << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (28 << 21) | (28 << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (29 << 21) | (29 << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (30 << 21) | (30 << 16), "bltzc", "TB" },
-  { kITypeMask | (0x3ff << 16), (23 << kOpcodeShift) | (31 << 21) | (31 << 16), "bltzc", "TB" },
-
-  { kITypeMask, 23 << kOpcodeShift, "bltc", "STB" },
-
-  { kITypeMask, 24 << kOpcodeShift, "bnec", "STB" },
-
-  { kITypeMask | (0x1f << 21), 25 << kOpcodeShift | (0 << 21), "dli", "Ti" },
-  { kITypeMask, 25 << kOpcodeShift, "daddiu", "TSi", },
-  { kITypeMask, 29 << kOpcodeShift, "daui", "TSi", },
-
-  { kITypeMask, 32u << kOpcodeShift, "lb", "TO", },
-  { kITypeMask, 33u << kOpcodeShift, "lh", "TO", },
-  { kITypeMask, 34u << kOpcodeShift, "lwl", "TO", },
-  { kITypeMask, 35u << kOpcodeShift, "lw", "TO", },
-  { kITypeMask, 36u << kOpcodeShift, "lbu", "TO", },
-  { kITypeMask, 37u << kOpcodeShift, "lhu", "TO", },
-  { kITypeMask, 38u << kOpcodeShift, "lwr", "TO", },
-  { kITypeMask, 39u << kOpcodeShift, "lwu", "TO", },
-  { kITypeMask, 40u << kOpcodeShift, "sb", "TO", },
-  { kITypeMask, 41u << kOpcodeShift, "sh", "TO", },
-  { kITypeMask, 42u << kOpcodeShift, "swl", "TO", },
-  { kITypeMask, 43u << kOpcodeShift, "sw", "TO", },
-  { kITypeMask, 46u << kOpcodeShift, "swr", "TO", },
-  { kITypeMask, 48u << kOpcodeShift, "ll", "TO", },
-  { kITypeMask, 49u << kOpcodeShift, "lwc1", "tO", },
-  { kJTypeMask, 50u << kOpcodeShift, "bc", "P" },
-  { kITypeMask, 53u << kOpcodeShift, "ldc1", "tO", },
-  { kITypeMask | (0x1f << 21), 54u << kOpcodeShift, "jic", "Ti" },
-  { kITypeMask | (1 << 21), (54u << kOpcodeShift) | (1 << 21), "beqzc", "Sb" },  // TODO: de-dup?
-  { kITypeMask | (1 << 22), (54u << kOpcodeShift) | (1 << 22), "beqzc", "Sb" },
-  { kITypeMask | (1 << 23), (54u << kOpcodeShift) | (1 << 23), "beqzc", "Sb" },
-  { kITypeMask | (1 << 24), (54u << kOpcodeShift) | (1 << 24), "beqzc", "Sb" },
-  { kITypeMask | (1 << 25), (54u << kOpcodeShift) | (1 << 25), "beqzc", "Sb" },
-  { kITypeMask, 55u << kOpcodeShift, "ld", "TO", },
-  { kITypeMask, 56u << kOpcodeShift, "sc", "TO", },
-  { kITypeMask, 57u << kOpcodeShift, "swc1", "tO", },
-  { kJTypeMask, 58u << kOpcodeShift, "balc", "P" },
-  { kITypeMask | (0x1f << 16), (59u << kOpcodeShift) | (30 << 16), "auipc", "Si" },
-  { kITypeMask | (0x3 << 19), (59u << kOpcodeShift) | (0 << 19), "addiupc", "Sp" },
-  { kITypeMask | (0x3 << 19), (59u << kOpcodeShift) | (1 << 19), "lwpc", "So" },
-  { kITypeMask | (0x3 << 19), (59u << kOpcodeShift) | (2 << 19), "lwupc", "So" },
-  { kITypeMask | (0x7 << 18), (59u << kOpcodeShift) | (6 << 18), "ldpc", "S0" },
-  { kITypeMask, 61u << kOpcodeShift, "sdc1", "tO", },
-  { kITypeMask | (0x1f << 21), 62u << kOpcodeShift, "jialc", "Ti" },
-  { kITypeMask | (1 << 21), (62u << kOpcodeShift) | (1 << 21), "bnezc", "Sb" },  // TODO: de-dup?
-  { kITypeMask | (1 << 22), (62u << kOpcodeShift) | (1 << 22), "bnezc", "Sb" },
-  { kITypeMask | (1 << 23), (62u << kOpcodeShift) | (1 << 23), "bnezc", "Sb" },
-  { kITypeMask | (1 << 24), (62u << kOpcodeShift) | (1 << 24), "bnezc", "Sb" },
-  { kITypeMask | (1 << 25), (62u << kOpcodeShift) | (1 << 25), "bnezc", "Sb" },
-  { kITypeMask, 63u << kOpcodeShift, "sd", "TO", },
-
-  // Floating point.
-  { kFpMask | (0x1f << 21), kCop1 | (0x00 << 21), "mfc1", "Td" },
-  { kFpMask | (0x1f << 21), kCop1 | (0x01 << 21), "dmfc1", "Td" },
-  { kFpMask | (0x1f << 21), kCop1 | (0x03 << 21), "mfhc1", "Td" },
-  { kFpMask | (0x1f << 21), kCop1 | (0x04 << 21), "mtc1", "Td" },
-  { kFpMask | (0x1f << 21), kCop1 | (0x05 << 21), "dmtc1", "Td" },
-  { kFpMask | (0x1f << 21), kCop1 | (0x07 << 21), "mthc1", "Td" },
-  { kFpMask | (0x1f << 21), kCop1 | (0x14 << 21) | 1, "cmp.un.s", "adt" },
-  { kFpMask | (0x1f << 21), kCop1 | (0x14 << 21) | 2, "cmp.eq.s", "adt" },
-  { kFpMask | (0x1f << 21), kCop1 | (0x14 << 21) | 3, "cmp.ueq.s", "adt" },
-  { kFpMask | (0x1f << 21), kCop1 | (0x14 << 21) | 4, "cmp.lt.s", "adt" },
-  { kFpMask | (0x1f << 21), kCop1 | (0x14 << 21) | 5, "cmp.ult.s", "adt" },
-  { kFpMask | (0x1f << 21), kCop1 | (0x14 << 21) | 6, "cmp.le.s", "adt" },
-  { kFpMask | (0x1f << 21), kCop1 | (0x14 << 21) | 7, "cmp.ule.s", "adt" },
-  { kFpMask | (0x1f << 21), kCop1 | (0x14 << 21) | 17, "cmp.or.s", "adt" },
-  { kFpMask | (0x1f << 21), kCop1 | (0x14 << 21) | 18, "cmp.une.s", "adt" },
-  { kFpMask | (0x1f << 21), kCop1 | (0x14 << 21) | 19, "cmp.ne.s", "adt" },
-  { kFpMask | (0x1f << 21), kCop1 | (0x15 << 21) | 1, "cmp.un.d", "adt" },
-  { kFpMask | (0x1f << 21), kCop1 | (0x15 << 21) | 2, "cmp.eq.d", "adt" },
-  { kFpMask | (0x1f << 21), kCop1 | (0x15 << 21) | 3, "cmp.ueq.d", "adt" },
-  { kFpMask | (0x1f << 21), kCop1 | (0x15 << 21) | 4, "cmp.lt.d", "adt" },
-  { kFpMask | (0x1f << 21), kCop1 | (0x15 << 21) | 5, "cmp.ult.d", "adt" },
-  { kFpMask | (0x1f << 21), kCop1 | (0x15 << 21) | 6, "cmp.le.d", "adt" },
-  { kFpMask | (0x1f << 21), kCop1 | (0x15 << 21) | 7, "cmp.ule.d", "adt" },
-  { kFpMask | (0x1f << 21), kCop1 | (0x15 << 21) | 17, "cmp.or.d", "adt" },
-  { kFpMask | (0x1f << 21), kCop1 | (0x15 << 21) | 18, "cmp.une.d", "adt" },
-  { kFpMask | (0x1f << 21), kCop1 | (0x15 << 21) | 19, "cmp.ne.d", "adt" },
-  { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 0, "add", "fadt" },
-  { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 1, "sub", "fadt" },
-  { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 2, "mul", "fadt" },
-  { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 3, "div", "fadt" },
-  { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 4, "sqrt", "fad" },
-  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 5, "abs", "fad" },
-  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 6, "mov", "fad" },
-  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 7, "neg", "fad" },
-  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 8, "round.l", "fad" },
-  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 9, "trunc.l", "fad" },
-  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 10, "ceil.l", "fad" },
-  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 11, "floor.l", "fad" },
-  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 12, "round.w", "fad" },
-  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 13, "trunc.w", "fad" },
-  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 14, "ceil.w", "fad" },
-  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 15, "floor.w", "fad" },
-  { kFpMask | (0x201 << 16), kCop1 | (0x200 << 16) | 17, "movf", "fadc" },
-  { kFpMask | (0x201 << 16), kCop1 | (0x201 << 16) | 17, "movt", "fadc" },
-  { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 18, "movz", "fadT" },
-  { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 19, "movn", "fadT" },
-  { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 20, "seleqz", "fadt" },
-  { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 23, "selnez", "fadt" },
-  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 26, "rint", "fad" },
-  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 27, "class", "fad" },
-  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 32, "cvt.s", "fad" },
-  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 33, "cvt.d", "fad" },
-  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 36, "cvt.w", "fad" },
-  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 37, "cvt.l", "fad" },
-  { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 38, "cvt.ps", "fad" },
-  { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 49, "c.un", "fCdt" },
-  { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 50, "c.eq", "fCdt" },
-  { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 51, "c.ueq", "fCdt" },
-  { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 52, "c.olt", "fCdt" },
-  { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 53, "c.ult", "fCdt" },
-  { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 54, "c.ole", "fCdt" },
-  { kFpMask | (0x10 << 21), kCop1 | (0x10 << 21) | 55, "c.ule", "fCdt" },
-  { kFpMask, kCop1 | 0x10, "sel", "fadt" },
-  { kFpMask, kCop1 | 0x1e, "max", "fadt" },
-  { kFpMask, kCop1 | 0x1c, "min", "fadt" },
-
-  // MSA instructions.
-  { kMsaMask | (0x1f << 21), kMsa | (0x0 << 21) | 0x1e, "and.v", "kmn" },
-  { kMsaMask | (0x1f << 21), kMsa | (0x1 << 21) | 0x1e, "or.v", "kmn" },
-  { kMsaMask | (0x1f << 21), kMsa | (0x2 << 21) | 0x1e, "nor.v", "kmn" },
-  { kMsaMask | (0x1f << 21), kMsa | (0x3 << 21) | 0x1e, "xor.v", "kmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x0 << 23) | 0xe, "addv", "Vkmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x1 << 23) | 0xe, "subv", "Vkmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x4 << 23) | 0x11, "asub_s", "Vkmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x5 << 23) | 0x11, "asub_u", "Vkmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x0 << 23) | 0x12, "mulv", "Vkmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x4 << 23) | 0x12, "div_s", "Vkmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x5 << 23) | 0x12, "div_u", "Vkmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x6 << 23) | 0x12, "mod_s", "Vkmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x7 << 23) | 0x12, "mod_u", "Vkmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x0 << 23) | 0x10, "add_a", "Vkmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x4 << 23) | 0x10, "ave_s", "Vkmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x5 << 23) | 0x10, "ave_u", "Vkmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x6 << 23) | 0x10, "aver_s", "Vkmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x7 << 23) | 0x10, "aver_u", "Vkmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x2 << 23) | 0xe, "max_s", "Vkmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x3 << 23) | 0xe, "max_u", "Vkmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x4 << 23) | 0xe, "min_s", "Vkmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x5 << 23) | 0xe, "min_u", "Vkmn" },
-  { kMsaMask | (0xf << 22), kMsa | (0x0 << 22) | 0x1b, "fadd", "Ukmn" },
-  { kMsaMask | (0xf << 22), kMsa | (0x1 << 22) | 0x1b, "fsub", "Ukmn" },
-  { kMsaMask | (0xf << 22), kMsa | (0x2 << 22) | 0x1b, "fmul", "Ukmn" },
-  { kMsaMask | (0xf << 22), kMsa | (0x3 << 22) | 0x1b, "fdiv", "Ukmn" },
-  { kMsaMask | (0xf << 22), kMsa | (0xe << 22) | 0x1b, "fmax", "Ukmn" },
-  { kMsaMask | (0xf << 22), kMsa | (0xc << 22) | 0x1b, "fmin", "Ukmn" },
-  { kMsaMask | (0x1ff << 17), kMsa | (0x19e << 17) | 0x1e, "ffint_s", "ukm" },
-  { kMsaMask | (0x1ff << 17), kMsa | (0x19c << 17) | 0x1e, "ftint_s", "ukm" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x0 << 23) | 0xd, "sll", "Vkmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x1 << 23) | 0xd, "sra", "Vkmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x2 << 23) | 0xd, "srl", "Vkmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x0 << 23) | 0x9, "slli", "kmW" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x1 << 23) | 0x9, "srai", "kmW" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x2 << 23) | 0x9, "srli", "kmW" },
-  { kMsaMask | (0x3ff << 16), kMsa | (0xbe << 16) | 0x19, "move.v", "km" },
-  { kMsaMask | (0xf << 22), kMsa | (0x1 << 22) | 0x19, "splati", "kX" },
-  { kMsaMask | (0xf << 22), kMsa | (0x2 << 22) | 0x19, "copy_s", "yX" },
-  { kMsaMask | (0xf << 22), kMsa | (0x3 << 22) | 0x19, "copy_u", "yX" },
-  { kMsaMask | (0xf << 22), kMsa | (0x4 << 22) | 0x19, "insert", "YD" },
-  { kMsaMask | (0xff << 18), kMsa | (0xc0 << 18) | 0x1e, "fill", "vkD" },
-  { kMsaMask | (0xff << 18), kMsa | (0xc1 << 18) | 0x1e, "pcnt", "vkm" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x6 << 23) | 0x7, "ldi", "kx" },
-  { kMsaSpecialMask | (0xf << 2), kMsa | (0x8 << 2), "ld", "kw" },
-  { kMsaSpecialMask | (0xf << 2), kMsa | (0x9 << 2), "st", "kw" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x4 << 23) | 0x14, "ilvl", "Vkmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x5 << 23) | 0x14, "ilvr", "Vkmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x6 << 23) | 0x14, "ilvev", "Vkmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x7 << 23) | 0x14, "ilvod", "Vkmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x1 << 23) | 0x12, "maddv", "Vkmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x2 << 23) | 0x12, "msubv", "Vkmn" },
-  { kMsaMask | (0xf << 22), kMsa | (0x4 << 22) | 0x1b, "fmadd", "Ukmn" },
-  { kMsaMask | (0xf << 22), kMsa | (0x5 << 22) | 0x1b, "fmsub", "Ukmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x4 << 23) | 0x15, "hadd_s", "Vkmn" },
-  { kMsaMask | (0x7 << 23), kMsa | (0x5 << 23) | 0x15, "hadd_u", "Vkmn" },
-};
-
-static uint32_t ReadU32(const uint8_t* ptr) {
-  // We only support little-endian MIPS.
-  return ptr[0] | (ptr[1] << 8) | (ptr[2] << 16) | (ptr[3] << 24);
-}
-
-const char* DisassemblerMips::RegName(uint32_t reg) {
-  if (is_o32_abi_) {
-    return gO32AbiRegNames[reg];
-  } else {
-    return gN64AbiRegNames[reg];
-  }
-}
-
-size_t DisassemblerMips::Dump(std::ostream& os, const uint8_t* instr_ptr) {
-  uint32_t instruction = ReadU32(instr_ptr);
-
-  uint32_t rs = (instruction >> 21) & 0x1f;  // I-type, R-type.
-  uint32_t rt = (instruction >> 16) & 0x1f;  // I-type, R-type.
-  uint32_t rd = (instruction >> 11) & 0x1f;  // R-type.
-  uint32_t sa = (instruction >>  6) & 0x1f;  // R-type.
-
-  std::string opcode;
-  std::ostringstream args;
-
-  // TODO: remove this!
-  uint32_t op = (instruction >> 26) & 0x3f;
-  uint32_t function = (instruction & 0x3f);  // R-type.
-  opcode = StringPrintf("op=%d fn=%d", op, function);
-
-  for (size_t i = 0; i < arraysize(gMipsInstructions); ++i) {
-    if (gMipsInstructions[i].Matches(instruction)) {
-      opcode = gMipsInstructions[i].name;
-      for (const char* args_fmt = gMipsInstructions[i].args_fmt; *args_fmt; ++args_fmt) {
-        switch (*args_fmt) {
-          case 'A':  // sa (shift amount or [d]ins/[d]ext position).
-            args << sa;
-            break;
-          case 'B':  // Branch offset.
-            {
-              int32_t offset = static_cast<int16_t>(instruction & 0xffff);
-              offset <<= 2;
-              offset += 4;  // Delay slot.
-              args << FormatInstructionPointer(instr_ptr + offset)
-                   << StringPrintf("  ; %+d", offset);
-            }
-            break;
-          case 'b':  // 21-bit branch offset.
-            {
-              int32_t offset = (instruction & 0x1fffff) - ((instruction & 0x100000) << 1);
-              offset <<= 2;
-              offset += 4;  // Delay slot.
-              args << FormatInstructionPointer(instr_ptr + offset)
-                   << StringPrintf("  ; %+d", offset);
-            }
-            break;
-          case 'C':  // Floating-point condition code flag in c.<cond>.fmt.
-            args << "cc" << (sa >> 2);
-            break;
-          case 'c':  // Floating-point condition code flag in bc1f/bc1t and movf/movt.
-            args << "cc" << (rt >> 2);
-            break;
-          case 'D': args << RegName(rd); break;
-          case 'd': args << 'f' << rd; break;
-          case 'a': args << 'f' << sa; break;
-          case 'F': args << (sa + 32); break;  // dinsu position.
-          case 'f':  // Floating point "fmt".
-            {
-              size_t fmt = (instruction >> 21) & 0x7;  // TODO: other fmts?
-              switch (fmt) {
-                case 0: opcode += ".s"; break;
-                case 1: opcode += ".d"; break;
-                case 4: opcode += ".w"; break;
-                case 5: opcode += ".l"; break;
-                case 6: opcode += ".ps"; break;
-                default: opcode += ".?"; break;
-              }
-              continue;  // No ", ".
-            }
-          case 'I':  // Unsigned lower 16-bit immediate.
-            args << (instruction & 0xffff);
-            break;
-          case 'i':  // Sign-extended lower 16-bit immediate.
-            args << static_cast<int16_t>(instruction & 0xffff);
-            break;
-          case 'J':  // sz (dinsm size).
-            args << (rd - sa + 33);
-            break;
-          case 'j':  // sa value for lsa/dlsa.
-            args << (sa + 1);
-            break;
-          case 'L':  // Jump label.
-            {
-              // TODO: is this right?
-              uint32_t instr_index = (instruction & 0x1ffffff);
-              uint32_t target = (instr_index << 2);
-              target |= (reinterpret_cast<uintptr_t>(instr_ptr + 4) & 0xf0000000);
-              args << reinterpret_cast<void*>(target);
-            }
-            break;
-          case 'l':  // 9-bit signed offset
-            {
-              int32_t offset = static_cast<int16_t>(instruction) >> 7;
-              args << StringPrintf("%+d(%s)", offset, RegName(rs));
-            }
-            break;
-          case 'O':  // +x(rs)
-            {
-              int32_t offset = static_cast<int16_t>(instruction & 0xffff);
-              args << StringPrintf("%+d(%s)", offset, RegName(rs));
-              if (rs == 17) {
-                args << "  ; ";
-                GetDisassemblerOptions()->thread_offset_name_function_(args, offset);
-              }
-            }
-            break;
-          case 'o':  // 19-bit offset in lwpc and lwupc.
-            {
-              int32_t offset = (instruction & 0x7ffff) - ((instruction & 0x40000) << 1);
-              offset <<= 2;
-              args << FormatInstructionPointer(instr_ptr + offset);
-              args << StringPrintf("  ; %+d", offset);
-            }
-            break;
-          case '0':  // 18-bit offset in ldpc.
-            {
-              int32_t offset = (instruction & 0x3ffff) - ((instruction & 0x20000) << 1);
-              offset <<= 3;
-              uintptr_t ptr = RoundDown(reinterpret_cast<uintptr_t>(instr_ptr), 8);
-              args << FormatInstructionPointer(reinterpret_cast<const uint8_t*>(ptr + offset));
-              args << StringPrintf("  ; %+d", offset);
-            }
-            break;
-          case 'P':  // 26-bit offset in bc and balc.
-            {
-              int32_t offset = (instruction & 0x3ffffff) - ((instruction & 0x2000000) << 1);
-              offset <<= 2;
-              offset += 4;
-              args << FormatInstructionPointer(instr_ptr + offset);
-              args << StringPrintf("  ; %+d", offset);
-            }
-            break;
-          case 'p':  // 19-bit offset in addiupc.
-            {
-              int32_t offset = (instruction & 0x7ffff) - ((instruction & 0x40000) << 1);
-              args << offset << "  ; move " << RegName(rs) << ", ";
-              args << FormatInstructionPointer(instr_ptr + (offset << 2));
-            }
-            break;
-          case 'S': args << RegName(rs); break;
-          case 's': args << 'f' << rs; break;
-          case 'T': args << RegName(rt); break;
-          case 't': args << 'f' << rt; break;
-          case 'Z': args << (rd + 1); break;  // sz ([d]ext size).
-          case 'z': args << (rd - sa + 1); break;  // sz ([d]ins, dinsu size).
-          case 'k': args << 'w' << sa; break;
-          case 'm': args << 'w' << rd; break;
-          case 'n': args << 'w' << rt; break;
-          case 'U':  // MSA 1-bit df (word/doubleword), position 21.
-            {
-              int32_t df = (instruction >> 21) & 0x1;
-              switch (df) {
-                case 0: opcode += ".w"; break;
-                case 1: opcode += ".d"; break;
-              }
-              continue;  // No ", ".
-            }
-          case 'u':  // MSA 1-bit df (word/doubleword), position 16.
-            {
-              int32_t df = (instruction >> 16) & 0x1;
-              switch (df) {
-                case 0: opcode += ".w"; break;
-                case 1: opcode += ".d"; break;
-              }
-              continue;  // No ", ".
-            }
-          case 'V':  // MSA 2-bit df, position 21.
-            {
-              int32_t df = (instruction >> 21) & 0x3;
-              switch (df) {
-                case 0: opcode += ".b"; break;
-                case 1: opcode += ".h"; break;
-                case 2: opcode += ".w"; break;
-                case 3: opcode += ".d"; break;
-              }
-              continue;  // No ", ".
-            }
-          case 'v':  // MSA 2-bit df, position 16.
-            {
-              int32_t df = (instruction >> 16) & 0x3;
-              switch (df) {
-                case 0: opcode += ".b"; break;
-                case 1: opcode += ".h"; break;
-                case 2: opcode += ".w"; break;
-                case 3: opcode += ".d"; break;
-              }
-              continue;  // No ", ".
-            }
-          case 'W':  // MSA df/m.
-            {
-              int32_t df_m = (instruction >> 16) & 0x7f;
-              if ((df_m & (0x1 << 6)) == 0) {
-                opcode += ".d";
-                args << (df_m & 0x3f);
-                break;
-              }
-              if ((df_m & (0x1 << 5)) == 0) {
-                opcode += ".w";
-                args << (df_m & 0x1f);
-                break;
-              }
-              if ((df_m & (0x1 << 4)) == 0) {
-                opcode += ".h";
-                args << (df_m & 0xf);
-                break;
-              }
-              if ((df_m & (0x1 << 3)) == 0) {
-                opcode += ".b";
-                args << (df_m & 0x7);
-              }
-              break;
-            }
-          case 'w':  // MSA +x(rs).
-            {
-              int32_t df = instruction & 0x3;
-              int32_t s10 = (instruction >> 16) & 0x3ff;
-              s10 -= (s10 & 0x200) << 1;  // Sign-extend s10.
-              switch (df) {
-                case 0: opcode += ".b"; break;
-                case 1: opcode += ".h"; break;
-                case 2: opcode += ".w"; break;
-                case 3: opcode += ".d"; break;
-              }
-              args << StringPrintf("%+d(%s)", s10 << df, RegName(rd));
-              break;
-            }
-          case 'X':  // MSA df/n - ws[x].
-            {
-              int32_t df_n = (instruction >> 16) & 0x3f;
-              if ((df_n & (0x3 << 4)) == 0) {
-                opcode += ".b";
-                args << 'w' << rd << '[' << (df_n & 0xf) << ']';
-                break;
-              }
-              if ((df_n & (0x3 << 3)) == 0) {
-                opcode += ".h";
-                args << 'w' << rd << '[' << (df_n & 0x7) << ']';
-                break;
-              }
-              if ((df_n & (0x3 << 2)) == 0) {
-                opcode += ".w";
-                args << 'w' << rd << '[' << (df_n & 0x3) << ']';
-                break;
-              }
-              if ((df_n & (0x3 << 1)) == 0) {
-                opcode += ".d";
-                args << 'w' << rd << '[' << (df_n & 0x1) << ']';
-              }
-              break;
-            }
-          case 'x':  // MSA i10.
-            {
-              int32_t df = (instruction >> 21) & 0x3;
-              int32_t i10 = (instruction >> 11) & 0x3ff;
-              i10 -= (i10 & 0x200) << 1;  // Sign-extend i10.
-              switch (df) {
-                case 0: opcode += ".b"; break;
-                case 1: opcode += ".h"; break;
-                case 2: opcode += ".w"; break;
-                case 3: opcode += ".d"; break;
-              }
-              args << i10;
-              break;
-            }
-          case 'Y':  // MSA df/n - wd[x].
-            {
-              int32_t df_n = (instruction >> 16) & 0x3f;
-              if ((df_n & (0x3 << 4)) == 0) {
-                opcode += ".b";
-                args << 'w' << sa << '[' << (df_n & 0xf) << ']';
-                break;
-              }
-              if ((df_n & (0x3 << 3)) == 0) {
-                opcode += ".h";
-                args << 'w' << sa << '[' << (df_n & 0x7) << ']';
-                break;
-              }
-              if ((df_n & (0x3 << 2)) == 0) {
-                opcode += ".w";
-                args << 'w' << sa << '[' << (df_n & 0x3) << ']';
-                break;
-              }
-              if ((df_n & (0x3 << 1)) == 0) {
-                opcode += ".d";
-                args << 'w' << sa << '[' << (df_n & 0x1) << ']';
-              }
-              break;
-            }
-          case 'y': args << RegName(sa); break;
-        }
-        if (*(args_fmt + 1)) {
-          args << ", ";
-        }
-      }
-      break;
-    }
-  }
-
-  // Special cases for sequences of:
-  //   pc-relative +/- 2GB branch:
-  //     auipc  reg, imm
-  //     jic    reg, imm
-  //   pc-relative +/- 2GB branch and link:
-  //     auipc  reg, imm
-  //     jialc  reg, imm
-  if (((op == 0x36 || op == 0x3E) && rs == 0 && rt != 0) &&  // ji[al]c
-      last_ptr_ && (intptr_t)instr_ptr - (intptr_t)last_ptr_ == 4 &&
-      (last_instr_ & 0xFC1F0000) == 0xEC1E0000 &&  // auipc
-      ((last_instr_ >> 21) & 0x1F) == rt) {
-    uint32_t offset = (last_instr_ << 16) | (instruction & 0xFFFF);
-    offset -= (offset & 0x8000) << 1;
-    offset -= 4;
-    if (op == 0x36) {
-      args << "  ; bc ";
-    } else {
-      args << "  ; balc ";
-    }
-    args << FormatInstructionPointer(instr_ptr + (int32_t)offset);
-    args << StringPrintf("  ; %+d", (int32_t)offset);
-  }
-
-  os << FormatInstructionPointer(instr_ptr)
-     << StringPrintf(": %08x\t%-7s ", instruction, opcode.c_str())
-     << args.str() << '\n';
-  last_ptr_ = instr_ptr;
-  last_instr_ = instruction;
-  return 4;
-}
-
-void DisassemblerMips::Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) {
-  for (const uint8_t* cur = begin; cur < end; cur += 4) {
-    Dump(os, cur);
-  }
-}
-
-}  // namespace mips
-}  // namespace art
diff --git a/disassembler/disassembler_mips.h b/disassembler/disassembler_mips.h
deleted file mode 100644
index bc74b43..0000000
--- a/disassembler/disassembler_mips.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_DISASSEMBLER_DISASSEMBLER_MIPS_H_
-#define ART_DISASSEMBLER_DISASSEMBLER_MIPS_H_
-
-#include <vector>
-
-#include "disassembler.h"
-
-namespace art {
-namespace mips {
-
-class DisassemblerMips final : public Disassembler {
- public:
-  explicit DisassemblerMips(DisassemblerOptions* options, bool is_o32_abi)
-      : Disassembler(options),
-        last_ptr_(nullptr),
-        last_instr_(0),
-        is_o32_abi_(is_o32_abi) {}
-
-  const char* RegName(uint32_t reg);
-  size_t Dump(std::ostream& os, const uint8_t* begin) override;
-  void Dump(std::ostream& os, const uint8_t* begin, const uint8_t* end) override;
-
- private:
-  // Address and encoding of the last disassembled instruction.
-  // Needed to produce more readable disassembly of certain 2-instruction sequences.
-  const uint8_t* last_ptr_;
-  uint32_t last_instr_;
-  const bool is_o32_abi_;
-
-  DISALLOW_COPY_AND_ASSIGN(DisassemblerMips);
-};
-
-}  // namespace mips
-}  // namespace art
-
-#endif  // ART_DISASSEMBLER_DISASSEMBLER_MIPS_H_
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index dbdde64..98201f9 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -24,6 +24,16 @@
 #include "android-base/logging.h"
 #include "android-base/stringprintf.h"
 
+#define TWO_BYTE_VEX    0xC5
+#define THREE_BYTE_VEX  0xC4
+#define VEX_M_0F        0x01
+#define VEX_M_0F_38     0x02
+#define VEX_M_0F_3A     0x03
+#define VEX_PP_NONE     0x00
+#define VEX_PP_66       0x01
+#define VEX_PP_F3       0x02
+#define VEX_PP_F2       0x03
+
 using android::base::StringPrintf;
 
 namespace art {
@@ -316,9 +326,11 @@
   if (rex != 0) {
     instr++;
   }
+
   const char** modrm_opcodes = nullptr;
   bool has_modrm = false;
   bool reg_is_opcode = false;
+
   size_t immediate_bytes = 0;
   size_t branch_bytes = 0;
   std::string opcode_tmp;    // Storage to keep StringPrintf result alive.
@@ -340,6 +352,8 @@
   bool no_ops = false;
   RegFile src_reg_file = GPR;
   RegFile dst_reg_file = GPR;
+
+
   switch (*instr) {
 #define DISASSEMBLER_ENTRY(opname, \
                      rm8_r8, rm32_r32, \
@@ -381,11 +395,12 @@
   0x32 /* Reg8/RegMem8 */,     0x33 /* Reg32/RegMem32 */,
   0x34 /* Rax8/imm8 opcode */, 0x35 /* Rax32/imm32 */)
 DISASSEMBLER_ENTRY(cmp,
-  0x38 /* RegMem8/Reg8 */,     0x39 /* RegMem32/Reg32 */,
+  0x38 /* RegMem8/Reg8 */,     0x39 /* RegMem/Reg32 */,
   0x3A /* Reg8/RegMem8 */,     0x3B /* Reg32/RegMem32 */,
   0x3C /* Rax8/imm8 opcode */, 0x3D /* Rax32/imm32 */)
 
 #undef DISASSEMBLER_ENTRY
+
   case 0x50: case 0x51: case 0x52: case 0x53: case 0x54: case 0x55: case 0x56: case 0x57:
     opcode1 = "push";
     reg_in_opcode = true;
@@ -1372,6 +1387,7 @@
     byte_operand = (*instr == 0xC0);
     break;
   case 0xC3: opcode1 = "ret"; break;
+
   case 0xC6:
     static const char* c6_opcodes[] = {"mov",        "unknown-c6", "unknown-c6",
                                        "unknown-c6", "unknown-c6", "unknown-c6",
@@ -1521,6 +1537,7 @@
         args << ", ";
       }
       DumpSegmentOverride(args, prefix[1]);
+
       args << address;
     } else {
       DCHECK(store);
@@ -1595,7 +1612,7 @@
      << StringPrintf(": %22s    \t%-7s%s%s%s%s%s ", DumpCodeHex(begin_instr, instr).c_str(),
                      prefix_str, opcode0, opcode1, opcode2, opcode3, opcode4)
      << args.str() << '\n';
-  return instr - begin_instr;
+    return instr - begin_instr;
 }  // NOLINT(readability/fn_size)
 
 }  // namespace x86
diff --git a/dt_fd_forward/Android.bp b/dt_fd_forward/Android.bp
index 2a2aa18..e722a4d 100644
--- a/dt_fd_forward/Android.bp
+++ b/dt_fd_forward/Android.bp
@@ -41,21 +41,17 @@
     header_libs: [
         "javavm_headers",
         "dt_fd_forward_export",
-        "art_libartbase_headers",  // For strlcpy emulation.
+        "art_libartbase_headers", // For strlcpy emulation.
     ],
-    multilib: {
-        lib32: {
-            suffix: "32",
-        },
-        lib64: {
-            suffix: "64",
-        },
-    },
 }
 
 art_cc_library {
     name: "libdt_fd_forward",
     defaults: ["dt_fd_forward-defaults"],
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
 }
 
 art_cc_library {
diff --git a/dt_fd_forward/export/Android.bp b/dt_fd_forward/export/Android.bp
index c3a6321..4039196 100644
--- a/dt_fd_forward/export/Android.bp
+++ b/dt_fd_forward/export/Android.bp
@@ -16,7 +16,12 @@
 
 cc_library_headers {
     name: "dt_fd_forward_export",
-    export_include_dirs: [ "." ],
+    export_include_dirs: ["."],
     host_supported: true,
     device_supported: true,
+
+    apex_available: [
+        "com.android.art.debug",
+        "com.android.art.release",
+    ],
 }
diff --git a/imgdiag/Android.bp b/imgdiag/Android.bp
index 39720a0..7c2f515 100644
--- a/imgdiag/Android.bp
+++ b/imgdiag/Android.bp
@@ -57,6 +57,10 @@
         "libartbase",
         "libart-compiler",
     ],
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
 }
 
 art_cc_binary {
@@ -70,6 +74,9 @@
         "libartbased",
         "libartd-compiler",
     ],
+    apex_available: [
+        "com.android.art.debug",
+    ],
 }
 
 art_cc_test {
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index 3566498..421e7d7 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -849,9 +849,13 @@
     std::vector<const OatFile*> boot_oat_files = oat_file_manager.GetBootOatFiles();
     for (const OatFile* oat_file : boot_oat_files) {
       const OatHeader& oat_header = oat_file->GetOatHeader();
-      const void* jdl = oat_header.GetJniDlsymLookup();
+      const void* jdl = oat_header.GetJniDlsymLookupTrampoline();
       if (jdl != nullptr) {
-        entry_point_names_[jdl] = "JniDlsymLookup (from boot oat file)";
+        entry_point_names_[jdl] = "JniDlsymLookupTrampoline (from boot oat file)";
+      }
+      const void* jdlc = oat_header.GetJniDlsymLookupCriticalTrampoline();
+      if (jdlc != nullptr) {
+        entry_point_names_[jdlc] = "JniDlsymLookupCriticalTrampoline (from boot oat file)";
       }
       const void* qgjt = oat_header.GetQuickGenericJniTrampoline();
       if (qgjt != nullptr) {
@@ -897,6 +901,8 @@
           return "QuickResolutionStub";
         } else if (class_linker_->IsJniDlsymLookupStub(addr)) {
           return "JniDlsymLookupStub";
+        } else if (class_linker_->IsJniDlsymLookupCriticalStub(addr)) {
+          return "JniDlsymLookupCriticalStub";
         }
         // Match the address against those that we saved from the boot OAT files.
         if (entry_point_names_.find(addr) != entry_point_names_.end()) {
@@ -1442,7 +1448,10 @@
         -> std::optional<backtrace_map_t> {
       // Find the memory map for the current boot image component.
       for (const backtrace_map_t* map : maps) {
-        if (EndsWith(map->name, image_location_base_name)) {
+        // The map name ends with ']' if it's an anonymous memmap. We need to special case that
+        // to find the boot image map in some cases.
+        if (EndsWith(map->name, image_location_base_name) ||
+            EndsWith(map->name, image_location_base_name + "]")) {
           if ((map->flags & PROT_WRITE) != 0) {
             return *map;
           }
@@ -1464,6 +1473,10 @@
     backtrace_map_t boot_map = maybe_boot_map.value_or(backtrace_map_t{});
     // Sanity check boot_map_.
     CHECK(boot_map.end >= boot_map.start);
+
+    // Adjust the `end` of the mapping. Some other mappings may have been
+    // inserted within the image.
+    boot_map.end = RoundUp(boot_map.start + image_header.GetImageSize(), kPageSize);
     // The size of the boot image mapping.
     size_t boot_map_size = boot_map.end - boot_map.start;
 
@@ -1475,7 +1488,10 @@
         return false;
       }
       backtrace_map_t zygote_boot_map = maybe_zygote_boot_map.value_or(backtrace_map_t{});
-      if (zygote_boot_map.start != boot_map.start || zygote_boot_map.end != boot_map.end) {
+      // Adjust the `end` of the mapping. Some other mappings may have been
+      // inserted within the image.
+      zygote_boot_map.end = RoundUp(zygote_boot_map.start + image_header.GetImageSize(), kPageSize);
+      if (zygote_boot_map.start != boot_map.start) {
         os << "Zygote boot map does not match image boot map: "
            << "zygote begin " << reinterpret_cast<const void*>(zygote_boot_map.start)
            << ", zygote end " << reinterpret_cast<const void*>(zygote_boot_map.end)
@@ -1740,7 +1756,7 @@
       }
     }
 
-    return page_frame_number != page_frame_number_clean;
+    return (page_frame_number != page_frame_number_clean) ? 1 : 0;
   }
 
   void PrintPidLine(const std::string& kind, pid_t pid) {
diff --git a/imgdiag/imgdiag_test.cc b/imgdiag/imgdiag_test.cc
index 739d9b8..9dd7953 100644
--- a/imgdiag/imgdiag_test.cc
+++ b/imgdiag/imgdiag_test.cc
@@ -61,22 +61,17 @@
 
   // Path to the imgdiag(d?)[32|64] binary.
   std::string GetImgDiagFilePath() {
-    std::string root = GetTestAndroidRoot();
-
-    root += "/bin/";
-    root += kImgDiagBinaryName;
-
+    std::string path = GetArtBinDir() + '/' + kImgDiagBinaryName;
     if (kIsDebugBuild) {
-      root += "d";
+      path += 'd';
     }
-
-    std::string root32 = root + "32";
+    std::string path32 = path + "32";
     // If we have both a 32-bit and a 64-bit build, the 32-bit file will have a 32 suffix.
-    if (OS::FileExists(root32.c_str()) && !Is64BitInstructionSet(kRuntimeISA)) {
-      return root32;
+    if (OS::FileExists(path32.c_str()) && !Is64BitInstructionSet(kRuntimeISA)) {
+      return path32;
     // Only a single build exists, so the filename never has an extra suffix.
     } else {
-      return root;
+      return path;
     }
   }
 
@@ -113,12 +108,11 @@
   std::string boot_image_location_;
 };
 
-#if defined (ART_TARGET) && !defined(__mips__)
+#if defined (ART_TARGET)
 TEST_F(ImgDiagTest, ImageDiffPidSelf) {
 #else
 // Can't run this test on the host, it will fail when trying to open /proc/kpagestats
 // because it's root read-only.
-// Also test fails on mips. b/24596015.
 TEST_F(ImgDiagTest, DISABLED_ImageDiffPidSelf) {
 #endif
   // Invoke 'img_diag' against the current process.
diff --git a/libartbase/Android.bp b/libartbase/Android.bp
index 992f7ec..a9d5db2 100644
--- a/libartbase/Android.bp
+++ b/libartbase/Android.bp
@@ -73,6 +73,7 @@
         },
         not_windows: {
             srcs: [
+                "base/globals_unix.cc",
                 "base/mem_map_unix.cc",
             ],
             shared_libs: [
@@ -107,7 +108,7 @@
             cflags: ["-Wno-thread-safety"],
         },
         darwin: {
-            enabled: true,  // for libdexfile.
+            enabled: true, // for libdexfile.
         },
     },
     generated_sources: ["art_libartbase_operator_srcs"],
@@ -164,12 +165,19 @@
 
 art_cc_library {
     name: "libartbase",
-    defaults: ["libartbase_defaults"],
-    // Leave the symbols in the shared library so that stack unwinders can
-    // produce meaningful name resolution.
-    strip: {
-        keep_symbols: true,
-    },
+    defaults: [
+        "libartbase_defaults",
+        "libart_nativeunwind_defaults",
+    ],
+    visibility: [
+        // TODO(b/133140750): Clean this up.
+        "//packages/modules/NetworkStack/tests:__subpackages__",
+    ],
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
+
     shared_libs: [
         "libbase",
         "libziparchive",
@@ -191,6 +199,9 @@
         "art_debug_defaults",
         "libartbase_defaults",
     ],
+    apex_available: [
+        "com.android.art.debug",
+    ],
     shared_libs: [
         "libbase",
         "libziparchive",
@@ -221,6 +232,12 @@
     header_libs: [
         "libnativehelper_header_only",
     ],
+    static: {
+        whole_static_libs: ["libc++fs"],
+    },
+    shared: {
+        static_libs: ["libc++fs"],
+    },
 }
 
 art_cc_test {
@@ -243,13 +260,13 @@
         "base/hex_dump_test.cc",
         "base/histogram_test.cc",
         "base/indenter_test.cc",
+        "base/intrusive_forward_list_test.cc",
         "base/leb128_test.cc",
         "base/logging_test.cc",
         "base/memfd_test.cc",
         "base/membarrier_test.cc",
         "base/memory_region_test.cc",
         "base/mem_map_test.cc",
-        "base/memory_type_table_test.cc",
         "base/safe_copy_test.cc",
         "base/scoped_flock_test.cc",
         "base/time_utils_test.cc",
@@ -271,4 +288,9 @@
     export_include_dirs: ["."],
     shared_libs: ["libbase"],
     export_shared_lib_headers: ["libbase"],
+
+    apex_available: [
+        "com.android.art.debug",
+        "com.android.art.release",
+    ],
 }
diff --git a/libartbase/arch/instruction_set.cc b/libartbase/arch/instruction_set.cc
index 8d4fbf4..9ec66fe 100644
--- a/libartbase/arch/instruction_set.cc
+++ b/libartbase/arch/instruction_set.cc
@@ -29,8 +29,6 @@
     case InstructionSet::kArm64:
     case InstructionSet::kX86:
     case InstructionSet::kX86_64:
-    case InstructionSet::kMips:
-    case InstructionSet::kMips64:
     case InstructionSet::kNone:
       LOG(FATAL) << "Unsupported instruction set " << isa;
       UNREACHABLE();
@@ -50,10 +48,6 @@
       return "x86";
     case InstructionSet::kX86_64:
       return "x86_64";
-    case InstructionSet::kMips:
-      return "mips";
-    case InstructionSet::kMips64:
-      return "mips64";
     case InstructionSet::kNone:
       return "none";
   }
@@ -72,10 +66,6 @@
     return InstructionSet::kX86;
   } else if (strcmp("x86_64", isa_str) == 0) {
     return InstructionSet::kX86_64;
-  } else if (strcmp("mips", isa_str) == 0) {
-    return InstructionSet::kMips;
-  } else if (strcmp("mips64", isa_str) == 0) {
-    return InstructionSet::kMips64;
   }
 
   return InstructionSet::kNone;
@@ -93,10 +83,6 @@
       // Fall-through.
     case InstructionSet::kX86_64:
       return kX86Alignment;
-    case InstructionSet::kMips:
-      // Fall-through.
-    case InstructionSet::kMips64:
-      return kMipsAlignment;
     case InstructionSet::kNone:
       LOG(FATAL) << "ISA kNone does not have alignment.";
       UNREACHABLE();
@@ -109,9 +95,6 @@
 
 static_assert(IsAligned<kPageSize>(kArmStackOverflowReservedBytes), "ARM gap not page aligned");
 static_assert(IsAligned<kPageSize>(kArm64StackOverflowReservedBytes), "ARM64 gap not page aligned");
-static_assert(IsAligned<kPageSize>(kMipsStackOverflowReservedBytes), "Mips gap not page aligned");
-static_assert(IsAligned<kPageSize>(kMips64StackOverflowReservedBytes),
-              "Mips64 gap not page aligned");
 static_assert(IsAligned<kPageSize>(kX86StackOverflowReservedBytes), "X86 gap not page aligned");
 static_assert(IsAligned<kPageSize>(kX86_64StackOverflowReservedBytes),
               "X86_64 gap not page aligned");
@@ -124,10 +107,6 @@
 static_assert(ART_FRAME_SIZE_LIMIT < kArmStackOverflowReservedBytes, "Frame size limit too large");
 static_assert(ART_FRAME_SIZE_LIMIT < kArm64StackOverflowReservedBytes,
               "Frame size limit too large");
-static_assert(ART_FRAME_SIZE_LIMIT < kMipsStackOverflowReservedBytes,
-              "Frame size limit too large");
-static_assert(ART_FRAME_SIZE_LIMIT < kMips64StackOverflowReservedBytes,
-              "Frame size limit too large");
 static_assert(ART_FRAME_SIZE_LIMIT < kX86StackOverflowReservedBytes,
               "Frame size limit too large");
 static_assert(ART_FRAME_SIZE_LIMIT < kX86_64StackOverflowReservedBytes,
diff --git a/libartbase/arch/instruction_set.h b/libartbase/arch/instruction_set.h
index 7e071bd..6f0cf52 100644
--- a/libartbase/arch/instruction_set.h
+++ b/libartbase/arch/instruction_set.h
@@ -32,9 +32,7 @@
   kThumb2,
   kX86,
   kX86_64,
-  kMips,
-  kMips64,
-  kLast = kMips64
+  kLast = kX86_64
 };
 std::ostream& operator<<(std::ostream& os, const InstructionSet& rhs);
 
@@ -42,10 +40,6 @@
 static constexpr InstructionSet kRuntimeISA = InstructionSet::kArm;
 #elif defined(__aarch64__)
 static constexpr InstructionSet kRuntimeISA = InstructionSet::kArm64;
-#elif defined(__mips__) && !defined(__LP64__)
-static constexpr InstructionSet kRuntimeISA = InstructionSet::kMips;
-#elif defined(__mips__) && defined(__LP64__)
-static constexpr InstructionSet kRuntimeISA = InstructionSet::kMips64;
 #elif defined(__i386__)
 static constexpr InstructionSet kRuntimeISA = InstructionSet::kX86;
 #elif defined(__x86_64__)
@@ -57,8 +51,6 @@
 // Architecture-specific pointer sizes
 static constexpr PointerSize kArmPointerSize = PointerSize::k32;
 static constexpr PointerSize kArm64PointerSize = PointerSize::k64;
-static constexpr PointerSize kMipsPointerSize = PointerSize::k32;
-static constexpr PointerSize kMips64PointerSize = PointerSize::k64;
 static constexpr PointerSize kX86PointerSize = PointerSize::k32;
 static constexpr PointerSize kX86_64PointerSize = PointerSize::k64;
 
@@ -69,10 +61,6 @@
 // ARM64 instruction alignment. This is the recommended alignment for maximum performance.
 static constexpr size_t kArm64Alignment = 16;
 
-// MIPS instruction alignment.  MIPS processors require code to be 4-byte aligned,
-// but 64-bit literals must be 8-byte aligned.
-static constexpr size_t kMipsAlignment = 8;
-
 // X86 instruction alignment. This is the recommended alignment for maximum performance.
 static constexpr size_t kX86Alignment = 16;
 
@@ -81,8 +69,6 @@
 static constexpr size_t kArm64InstructionAlignment = 4;
 static constexpr size_t kX86InstructionAlignment = 1;
 static constexpr size_t kX86_64InstructionAlignment = 1;
-static constexpr size_t kMipsInstructionAlignment = 4;
-static constexpr size_t kMips64InstructionAlignment = 4;
 
 const char* GetInstructionSetString(InstructionSet isa);
 
@@ -104,10 +90,6 @@
       return kX86PointerSize;
     case InstructionSet::kX86_64:
       return kX86_64PointerSize;
-    case InstructionSet::kMips:
-      return kMipsPointerSize;
-    case InstructionSet::kMips64:
-      return kMips64PointerSize;
 
     case InstructionSet::kNone:
       break;
@@ -127,10 +109,6 @@
       return kX86InstructionAlignment;
     case InstructionSet::kX86_64:
       return kX86_64InstructionAlignment;
-    case InstructionSet::kMips:
-      return kMipsInstructionAlignment;
-    case InstructionSet::kMips64:
-      return kMips64InstructionAlignment;
 
     case InstructionSet::kNone:
       break;
@@ -145,8 +123,6 @@
     case InstructionSet::kArm64:
     case InstructionSet::kX86:
     case InstructionSet::kX86_64:
-    case InstructionSet::kMips:
-    case InstructionSet::kMips64:
       return true;
 
     case InstructionSet::kNone:
@@ -162,12 +138,10 @@
     case InstructionSet::kArm:
     case InstructionSet::kThumb2:
     case InstructionSet::kX86:
-    case InstructionSet::kMips:
       return false;
 
     case InstructionSet::kArm64:
     case InstructionSet::kX86_64:
-    case InstructionSet::kMips64:
       return true;
 
     case InstructionSet::kNone:
@@ -192,10 +166,6 @@
       return 4;
     case InstructionSet::kX86_64:
       return 8;
-    case InstructionSet::kMips:
-      return 4;
-    case InstructionSet::kMips64:
-      return 8;
 
     case InstructionSet::kNone:
       break;
@@ -215,10 +185,6 @@
       return 8;
     case InstructionSet::kX86_64:
       return 8;
-    case InstructionSet::kMips:
-      return 4;
-    case InstructionSet::kMips64:
-      return 8;
 
     case InstructionSet::kNone:
       break;
@@ -229,15 +195,12 @@
 namespace instruction_set_details {
 
 #if !defined(ART_STACK_OVERFLOW_GAP_arm) || !defined(ART_STACK_OVERFLOW_GAP_arm64) || \
-    !defined(ART_STACK_OVERFLOW_GAP_mips) || !defined(ART_STACK_OVERFLOW_GAP_mips64) || \
     !defined(ART_STACK_OVERFLOW_GAP_x86) || !defined(ART_STACK_OVERFLOW_GAP_x86_64)
 #error "Missing defines for stack overflow gap"
 #endif
 
 static constexpr size_t kArmStackOverflowReservedBytes    = ART_STACK_OVERFLOW_GAP_arm;
 static constexpr size_t kArm64StackOverflowReservedBytes  = ART_STACK_OVERFLOW_GAP_arm64;
-static constexpr size_t kMipsStackOverflowReservedBytes   = ART_STACK_OVERFLOW_GAP_mips;
-static constexpr size_t kMips64StackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_mips64;
 static constexpr size_t kX86StackOverflowReservedBytes    = ART_STACK_OVERFLOW_GAP_x86;
 static constexpr size_t kX86_64StackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_x86_64;
 
@@ -255,12 +218,6 @@
     case InstructionSet::kArm64:
       return instruction_set_details::kArm64StackOverflowReservedBytes;
 
-    case InstructionSet::kMips:
-      return instruction_set_details::kMipsStackOverflowReservedBytes;
-
-    case InstructionSet::kMips64:
-      return instruction_set_details::kMips64StackOverflowReservedBytes;
-
     case InstructionSet::kX86:
       return instruction_set_details::kX86StackOverflowReservedBytes;
 
@@ -278,10 +235,10 @@
 // in registers so that memory operations for the interface trampolines can be avoided. The entities
 // are the resolved method and the pointer to the code to be invoked.
 //
-// On x86, ARM32 and MIPS, this is given for a *scalar* 64bit value. The definition thus *must* be
+// On x86 and ARM32, this is given for a *scalar* 64bit value. The definition thus *must* be
 // uint64_t or long long int.
 //
-// On x86_64, ARM64 and MIPS64, structs are decomposed for allocation, so we can create a structs of
+// On x86_64 and ARM64, structs are decomposed for allocation, so we can create a structs of
 // two size_t-sized values.
 //
 // We need two operations:
@@ -297,7 +254,7 @@
 //            when the garbage collector can move objects concurrently. Ensure that required locks
 //            are held when using!
 
-#if defined(__i386__) || defined(__arm__) || (defined(__mips__) && !defined(__LP64__))
+#if defined(__i386__) || defined(__arm__)
 typedef uint64_t TwoWordReturn;
 
 // Encodes method_ptr==nullptr and code_ptr==nullptr
@@ -313,7 +270,7 @@
   return ((hi64 << 32) | lo32);
 }
 
-#elif defined(__x86_64__) || defined(__aarch64__) || (defined(__mips__) && defined(__LP64__))
+#elif defined(__x86_64__) || defined(__aarch64__)
 
 // Note: TwoWordReturn can't be constexpr for 64-bit targets. We'd need a constexpr constructor,
 //       which would violate C-linkage in the entrypoint functions.
diff --git a/libartbase/arch/instruction_set_test.cc b/libartbase/arch/instruction_set_test.cc
index 12a117d..26071f1 100644
--- a/libartbase/arch/instruction_set_test.cc
+++ b/libartbase/arch/instruction_set_test.cc
@@ -27,8 +27,6 @@
   EXPECT_EQ(InstructionSet::kArm64, GetInstructionSetFromString("arm64"));
   EXPECT_EQ(InstructionSet::kX86, GetInstructionSetFromString("x86"));
   EXPECT_EQ(InstructionSet::kX86_64, GetInstructionSetFromString("x86_64"));
-  EXPECT_EQ(InstructionSet::kMips, GetInstructionSetFromString("mips"));
-  EXPECT_EQ(InstructionSet::kMips64, GetInstructionSetFromString("mips64"));
   EXPECT_EQ(InstructionSet::kNone, GetInstructionSetFromString("none"));
   EXPECT_EQ(InstructionSet::kNone, GetInstructionSetFromString("random-string"));
 }
@@ -39,8 +37,6 @@
   EXPECT_STREQ("arm64", GetInstructionSetString(InstructionSet::kArm64));
   EXPECT_STREQ("x86", GetInstructionSetString(InstructionSet::kX86));
   EXPECT_STREQ("x86_64", GetInstructionSetString(InstructionSet::kX86_64));
-  EXPECT_STREQ("mips", GetInstructionSetString(InstructionSet::kMips));
-  EXPECT_STREQ("mips64", GetInstructionSetString(InstructionSet::kMips64));
   EXPECT_STREQ("none", GetInstructionSetString(InstructionSet::kNone));
 }
 
@@ -53,10 +49,6 @@
             kX86InstructionAlignment);
   EXPECT_EQ(GetInstructionSetInstructionAlignment(InstructionSet::kX86_64),
             kX86_64InstructionAlignment);
-  EXPECT_EQ(GetInstructionSetInstructionAlignment(InstructionSet::kMips),
-            kMipsInstructionAlignment);
-  EXPECT_EQ(GetInstructionSetInstructionAlignment(InstructionSet::kMips64),
-            kMips64InstructionAlignment);
 }
 
 TEST(InstructionSetTest, TestRoundTrip) {
diff --git a/libartbase/base/array_ref.h b/libartbase/base/array_ref.h
index 1d7bde60..e8b3bce 100644
--- a/libartbase/base/array_ref.h
+++ b/libartbase/base/array_ref.h
@@ -70,8 +70,8 @@
       : array_(array), size_(size) {
   }
 
-  constexpr ArrayRef(T* array_in, size_t size_in)
-      : array_(array_in), size_(size_in) {
+  constexpr ArrayRef(T* array, size_t size)
+      : array_(array), size_(size) {
   }
 
   template <typename Vector,
diff --git a/libartbase/base/array_slice.h b/libartbase/base/array_slice.h
index fb3da6b..4679146 100644
--- a/libartbase/base/array_slice.h
+++ b/libartbase/base/array_slice.h
@@ -20,6 +20,7 @@
 #include "bit_utils.h"
 #include "casts.h"
 #include "iteration_range.h"
+#include "length_prefixed_array.h"
 #include "stride_iterator.h"
 
 namespace art {
@@ -55,6 +56,14 @@
     DCHECK(array_ != nullptr || length == 0);
   }
 
+  ArraySlice(LengthPrefixedArray<T>* lpa,
+             size_t element_size = sizeof(T),
+             size_t alignment = alignof(T))
+      : ArraySlice(
+            lpa != nullptr && lpa->size() != 0 ? &lpa->At(0, element_size, alignment) : nullptr,
+            lpa != nullptr ? lpa->size() : 0,
+            element_size) {}
+
   // Iterators.
   iterator begin() { return iterator(&AtUnchecked(0), element_size_); }
   const_iterator begin() const { return const_iterator(&AtUnchecked(0), element_size_); }
@@ -130,7 +139,17 @@
   }
 
   bool Contains(const T* element) const {
-    return &AtUnchecked(0) <= element && element < &AtUnchecked(size_);
+    return &AtUnchecked(0) <= element && element < &AtUnchecked(size_) &&
+          ((reinterpret_cast<uintptr_t>(element) -
+            reinterpret_cast<uintptr_t>(&AtUnchecked(0))) % element_size_) == 0;
+  }
+
+  size_t OffsetOf(const T* element) const {
+    DCHECK(Contains(element));
+    // Since it's possible element_size_ != sizeof(T) we cannot just use pointer arithmatic
+    uintptr_t base_ptr = reinterpret_cast<uintptr_t>(&AtUnchecked(0));
+    uintptr_t obj_ptr = reinterpret_cast<uintptr_t>(element);
+    return (obj_ptr - base_ptr) / element_size_;
   }
 
  private:
diff --git a/libartbase/base/bit_memory_region.h b/libartbase/base/bit_memory_region.h
index 1f1011e..5d54445 100644
--- a/libartbase/base/bit_memory_region.h
+++ b/libartbase/base/bit_memory_region.h
@@ -22,6 +22,8 @@
 #include "bit_utils.h"
 #include "memory_tool.h"
 
+#include <array>
+
 namespace art {
 
 // Bit memory region is a bit offset subregion of a normal memoryregion. This is useful for
@@ -37,11 +39,9 @@
   BitMemoryRegion() = default;
   ALWAYS_INLINE BitMemoryRegion(uint8_t* data, ssize_t bit_start, size_t bit_size) {
     // Normalize the data pointer. Note that bit_start may be negative.
-    uint8_t* aligned_data = AlignDown(data + (bit_start >> kBitsPerByteLog2), sizeof(uintptr_t));
-    data_ = reinterpret_cast<uintptr_t*>(aligned_data);
-    bit_start_ = bit_start + kBitsPerByte * (data - aligned_data);
+    data_ = AlignDown(data + (bit_start >> kBitsPerByteLog2), kPageSize);
+    bit_start_ = bit_start + kBitsPerByte * (data - data_);
     bit_size_ = bit_size;
-    DCHECK_LT(bit_start_, static_cast<size_t>(kBitsPerIntPtrT));
   }
   ALWAYS_INLINE explicit BitMemoryRegion(MemoryRegion region)
     : BitMemoryRegion(region.begin(), /* bit_start */ 0, region.size_in_bits()) {
@@ -55,7 +55,7 @@
 
   const uint8_t* data() const {
     DCHECK_ALIGNED(bit_start_, kBitsPerByte);
-    return reinterpret_cast<const uint8_t*>(data_) + bit_start_ / kBitsPerByte;
+    return data_ + bit_start_ / kBitsPerByte;
   }
 
   size_t size_in_bits() const {
@@ -87,42 +87,51 @@
   // significant bit in the first byte.
   ALWAYS_INLINE bool LoadBit(size_t bit_offset) const {
     DCHECK_LT(bit_offset, bit_size_);
-    uint8_t* data = reinterpret_cast<uint8_t*>(data_);
     size_t index = (bit_start_ + bit_offset) / kBitsPerByte;
     size_t shift = (bit_start_ + bit_offset) % kBitsPerByte;
-    return ((data[index] >> shift) & 1) != 0;
+    return ((data_[index] >> shift) & 1) != 0;
   }
 
   ALWAYS_INLINE void StoreBit(size_t bit_offset, bool value) {
     DCHECK_LT(bit_offset, bit_size_);
-    uint8_t* data = reinterpret_cast<uint8_t*>(data_);
     size_t index = (bit_start_ + bit_offset) / kBitsPerByte;
     size_t shift = (bit_start_ + bit_offset) % kBitsPerByte;
-    data[index] &= ~(1 << shift);  // Clear bit.
-    data[index] |= (value ? 1 : 0) << shift;  // Set bit.
+    data_[index] &= ~(1 << shift);  // Clear bit.
+    data_[index] |= (value ? 1 : 0) << shift;  // Set bit.
     DCHECK_EQ(value, LoadBit(bit_offset));
   }
 
   // Load `bit_length` bits from `data` starting at given `bit_offset`.
   // The least significant bit is stored in the smallest memory offset.
+  template<typename Result = size_t>
   ATTRIBUTE_NO_SANITIZE_ADDRESS  // We might touch extra bytes due to the alignment.
-  ALWAYS_INLINE uint32_t LoadBits(size_t bit_offset, size_t bit_length) const {
-    DCHECK(IsAligned<sizeof(uintptr_t)>(data_));
+  ATTRIBUTE_NO_SANITIZE_HWADDRESS  // The hwasan uses different attribute.
+  ALWAYS_INLINE Result LoadBits(size_t bit_offset, size_t bit_length) const {
+    static_assert(std::is_integral<Result>::value, "Result must be integral");
+    static_assert(std::is_unsigned<Result>::value, "Result must be unsigned");
+    DCHECK(IsAligned<sizeof(Result)>(data_));
     DCHECK_LE(bit_offset, bit_size_);
     DCHECK_LE(bit_length, bit_size_ - bit_offset);
-    DCHECK_LE(bit_length, BitSizeOf<uint32_t>());
+    DCHECK_LE(bit_length, BitSizeOf<Result>());
     if (bit_length == 0) {
       return 0;
     }
-    uintptr_t mask = std::numeric_limits<uintptr_t>::max() >> (kBitsPerIntPtrT - bit_length);
-    size_t index = (bit_start_ + bit_offset) / kBitsPerIntPtrT;
-    size_t shift = (bit_start_ + bit_offset) % kBitsPerIntPtrT;
-    uintptr_t value = data_[index] >> shift;
-    size_t finished_bits = kBitsPerIntPtrT - shift;
-    if (finished_bits < bit_length) {
-      value |= data_[index + 1] << finished_bits;
-    }
-    return value & mask;
+    // Load naturally-aligned value which contains the least significant bit.
+    Result* data = reinterpret_cast<Result*>(data_);
+    size_t width = BitSizeOf<Result>();
+    size_t index = (bit_start_ + bit_offset) / width;
+    size_t shift = (bit_start_ + bit_offset) % width;
+    Result value = data[index] >> shift;
+    // Load extra value containing the most significant bit (it might be the same one).
+    // We can not just load the following value as that could potentially cause SIGSEGV.
+    Result extra = data[index + (shift + (bit_length - 1)) / width];
+    // Mask to clear unwanted bits (the 1s are needed to avoid avoid undefined shift).
+    Result clear = (std::numeric_limits<Result>::max() << 1) << (bit_length - 1);
+    // Prepend the extra value.  We add explicit '& (width - 1)' so that the shift is defined.
+    // It is a no-op for `shift != 0` and if `shift == 0` then `value == extra` because of
+    // bit_length <= width causing the `value` and `extra` to be read from the same location.
+    // The '& (width - 1)' is implied by the shift instruction on ARM and removed by compiler.
+    return (value | (extra << ((width - shift) & (width - 1)))) & ~clear;
   }
 
   // Store `bit_length` bits in `data` starting at given `bit_offset`.
@@ -137,16 +146,15 @@
     }
     // Write data byte by byte to avoid races with other threads
     // on bytes that do not overlap with this region.
-    uint8_t* data = reinterpret_cast<uint8_t*>(data_);
     uint32_t mask = std::numeric_limits<uint32_t>::max() >> (BitSizeOf<uint32_t>() - bit_length);
     size_t index = (bit_start_ + bit_offset) / kBitsPerByte;
     size_t shift = (bit_start_ + bit_offset) % kBitsPerByte;
-    data[index] &= ~(mask << shift);  // Clear bits.
-    data[index] |= (value << shift);  // Set bits.
+    data_[index] &= ~(mask << shift);  // Clear bits.
+    data_[index] |= (value << shift);  // Set bits.
     size_t finished_bits = kBitsPerByte - shift;
     for (int i = 1; finished_bits < bit_length; i++, finished_bits += kBitsPerByte) {
-      data[index + i] &= ~(mask >> finished_bits);  // Clear bits.
-      data[index + i] |= (value >> finished_bits);  // Set bits.
+      data_[index + i] &= ~(mask >> finished_bits);  // Clear bits.
+      data_[index + i] |= (value >> finished_bits);  // Set bits.
     }
     DCHECK_EQ(value, LoadBits(bit_offset, bit_length));
   }
@@ -201,14 +209,13 @@
   }
 
  private:
-  // The data pointer must be naturally aligned. This makes loading code faster.
-  uintptr_t* data_ = nullptr;
+  uint8_t* data_ = nullptr;  // The pointer is page aligned.
   size_t bit_start_ = 0;
   size_t bit_size_ = 0;
 };
 
-constexpr uint32_t kVarintHeaderBits = 4;
-constexpr uint32_t kVarintSmallValue = 11;  // Maximum value which is stored as-is.
+constexpr uint32_t kVarintBits = 4;  // Minimum number of bits used for varint.
+constexpr uint32_t kVarintMax = 11;  // Maximum value which is stored "inline".
 
 class BitMemoryReader {
  public:
@@ -232,8 +239,9 @@
     return finished_region_.Subregion(bit_offset, bit_length);
   }
 
-  ALWAYS_INLINE uint32_t ReadBits(size_t bit_length) {
-    return ReadRegion(bit_length).LoadBits(/* bit_offset */ 0, bit_length);
+  template<typename Result = size_t>
+  ALWAYS_INLINE Result ReadBits(size_t bit_length) {
+    return ReadRegion(bit_length).LoadBits<Result>(/* bit_offset */ 0, bit_length);
   }
 
   ALWAYS_INLINE bool ReadBit() {
@@ -245,11 +253,29 @@
   //   Values 0..11 represent the result as-is, with no further following bits.
   //   Values 12..15 mean the result is in the next 8/16/24/32-bits respectively.
   ALWAYS_INLINE uint32_t ReadVarint() {
-    uint32_t x = ReadBits(kVarintHeaderBits);
-    if (x > kVarintSmallValue) {
-      x = ReadBits((x - kVarintSmallValue) * kBitsPerByte);
+    uint32_t x = ReadBits(kVarintBits);
+    return (x <= kVarintMax) ? x : ReadBits((x - kVarintMax) * kBitsPerByte);
+  }
+
+  // Read N 'interleaved' varints (different to just reading consecutive varints).
+  // All small values are stored first and the large values are stored after them.
+  // This requires fewer bit-reads compared to indidually storing the varints.
+  template<size_t N>
+  ALWAYS_INLINE std::array<uint32_t, N> ReadInterleavedVarints() {
+    static_assert(N * kVarintBits <= sizeof(uint64_t) * kBitsPerByte, "N too big");
+    std::array<uint32_t, N> values;
+    // StackMap BitTable uses over 8 varints in the header, so we need uint64_t.
+    uint64_t data = ReadBits<uint64_t>(N * kVarintBits);
+    for (size_t i = 0; i < N; i++) {
+      values[i] = BitFieldExtract(data, i * kVarintBits, kVarintBits);
     }
-    return x;
+    // Do the second part in its own loop as that seems to produce better code in clang.
+    for (size_t i = 0; i < N; i++) {
+      if (UNLIKELY(values[i] > kVarintMax)) {
+        values[i] = ReadBits((values[i] - kVarintMax) * kBitsPerByte);
+      }
+    }
+    return values;
   }
 
  private:
@@ -296,16 +322,26 @@
     Allocate(1).StoreBit(/* bit_offset */ 0, value);
   }
 
-  // Write variable-length bit-packed integer.
-  ALWAYS_INLINE void WriteVarint(uint32_t value) {
-    if (value <= kVarintSmallValue) {
-      WriteBits(value, kVarintHeaderBits);
-    } else {
-      uint32_t num_bits = RoundUp(MinimumBitsToStore(value), kBitsPerByte);
-      uint32_t header = kVarintSmallValue + num_bits / kBitsPerByte;
-      WriteBits(header, kVarintHeaderBits);
-      WriteBits(value, num_bits);
+  template<size_t N>
+  ALWAYS_INLINE void WriteInterleavedVarints(std::array<uint32_t, N> values) {
+    // Write small values (or the number of bytes needed for the large values).
+    for (uint32_t value : values) {
+      if (value > kVarintMax) {
+        WriteBits(kVarintMax + BitsToBytesRoundUp(MinimumBitsToStore(value)), kVarintBits);
+      } else {
+        WriteBits(value, kVarintBits);
+      }
     }
+    // Write large values.
+    for (uint32_t value : values) {
+      if (value > kVarintMax) {
+        WriteBits(value, BitsToBytesRoundUp(MinimumBitsToStore(value)) * kBitsPerByte);
+      }
+    }
+  }
+
+  ALWAYS_INLINE void WriteVarint(uint32_t value) {
+    WriteInterleavedVarints<1>({value});
   }
 
   ALWAYS_INLINE void ByteAlign() {
diff --git a/libartbase/base/bit_memory_region_test.cc b/libartbase/base/bit_memory_region_test.cc
index 02623bf..bf9c6d6 100644
--- a/libartbase/base/bit_memory_region_test.cc
+++ b/libartbase/base/bit_memory_region_test.cc
@@ -43,7 +43,7 @@
 
       BitMemoryReader reader(buffer.data(), start_bit_offset);
       uint32_t result = reader.ReadVarint();
-      uint32_t upper_bound = RoundUp(MinimumBitsToStore(value), kBitsPerByte) + kVarintHeaderBits;
+      uint32_t upper_bound = RoundUp(MinimumBitsToStore(value), kBitsPerByte) + kVarintBits;
       EXPECT_EQ(writer.NumberOfWrittenBits(), reader.NumberOfReadBits());
       EXPECT_EQ(value, result);
       EXPECT_GE(upper_bound, writer.NumberOfWrittenBits());
diff --git a/libartbase/base/bit_struct.h b/libartbase/base/bit_struct.h
index 292eca0..eca8780 100644
--- a/libartbase/base/bit_struct.h
+++ b/libartbase/base/bit_struct.h
@@ -17,6 +17,9 @@
 #ifndef ART_LIBARTBASE_BASE_BIT_STRUCT_H_
 #define ART_LIBARTBASE_BASE_BIT_STRUCT_H_
 
+#include <type_traits>
+
+#include "base/casts.h"
 #include "bit_struct_detail.h"
 #include "bit_utils.h"
 
@@ -29,9 +32,9 @@
 //
 //   // Definition for type 'Example'
 //   BITSTRUCT_DEFINE_START(Example, 10)
-//     BitStructUint<0, 2> u2;     // Every field must be a BitStruct[*].
-//     BitStructInt<2, 7>  i7;
-//     BitStructUint<9, 1> i1;
+//     BITSTRUCT_UINT(0, 2) u2;     // Every field must be a BitStruct[*] with the same StorageType,
+//     BITSTRUCT_INT(2, 7)  i7;     // preferably using BITSTRUCT_{FIELD,UINT,INT}
+//     BITSTRUCT_UINT(9, 1) i1;     // to fill in the StorageType parameter.
 //   BITSTRUCT_DEFINE_END(Example);
 //
 //  Would define a bit struct with this layout:
@@ -106,8 +109,8 @@
 // of T can be represented by kBitWidth.
 template <typename T,
           size_t kBitOffset,
-          size_t kBitWidth = BitStructSizeOf<T>(),
-          typename StorageType = typename detail::MinimumTypeUnsignedHelper<kBitOffset + kBitWidth>::type>
+          size_t kBitWidth,
+          typename StorageType>
 struct BitStructField {
   static_assert(std::is_standard_layout<T>::value, "T must be standard layout");
 
@@ -119,7 +122,7 @@
   template <typename _ = void,
             typename = std::enable_if_t<std::is_same<T, StorageType>::value, _>>
   explicit operator StorageType() const {
-    return GetStorage();
+    return BitFieldExtract(storage_, kBitOffset, kBitWidth);
   }
 
   BitStructField& operator=(T value) {
@@ -154,46 +157,27 @@
   }
 
   T Get() const {
-    ValueStorage vs;
-    vs.pod_.val_ = GetStorage();
-    return vs.value_;
+    ExtractionType storage = static_cast<ExtractionType>(storage_);
+    ExtractionType extracted = BitFieldExtract(storage, kBitOffset, kBitWidth);
+    ConversionType to_convert = dchecked_integral_cast<ConversionType>(extracted);
+    return ValueConverter::FromUnderlyingStorage(to_convert);
   }
 
   void Set(T value) {
-    ValueStorage value_as_storage;
-    value_as_storage.value_ = value;
-
-    storage_.pod_.val_ = BitFieldInsert(storage_.pod_.val_,
-                                        value_as_storage.pod_.val_,
-                                        kBitOffset,
-                                        kBitWidth);
+    ConversionType converted = ValueConverter::ToUnderlyingStorage(value);
+    ExtractionType extracted = dchecked_integral_cast<ExtractionType>(converted);
+    storage_ = BitFieldInsert(storage_, extracted, kBitOffset, kBitWidth);
   }
 
  private:
-  StorageType GetStorage() const {
-    return BitFieldExtract(storage_.pod_.val_, kBitOffset, kBitWidth);
-  }
+  using ValueConverter = detail::ValueConverter<T>;
+  using ConversionType = typename ValueConverter::StorageType;
+  using ExtractionType =
+      typename std::conditional<std::is_signed_v<ConversionType>,
+                                std::make_signed_t<StorageType>,
+                                StorageType>::type;
 
-  // Underlying value must be wrapped in a separate standard-layout struct.
-  // See below for more details.
-  struct PodWrapper {
-    StorageType val_;
-  };
-
-  union ValueStorage {
-    // Safely alias pod_ and value_ together.
-    //
-    // See C++ 9.5.1 [class.union]:
-    // If a standard-layout union contains several standard-layout structs that share a common
-    // initial sequence ... it is permitted to inspect the common initial sequence of any of
-    // standard-layout struct members.
-    PodWrapper pod_;
-    T value_;
-  } storage_;
-
-  // Future work: In theory almost non-standard layout can be supported here,
-  // assuming they don't rely on the address of (this).
-  // We just have to use memcpy since the union-aliasing would not work.
+  StorageType storage_;
 };
 
 // Base class for number-like BitStruct fields.
@@ -202,10 +186,8 @@
 //
 // (Common usage should be BitStructInt, BitStructUint -- this
 // intermediate template allows a user-defined integer to be used.)
-template <typename T, size_t kBitOffset, size_t kBitWidth>
-struct BitStructNumber : public BitStructField<T, kBitOffset, kBitWidth, /*StorageType*/T> {
-  using StorageType = T;
-
+template <typename T, size_t kBitOffset, size_t kBitWidth, typename StorageType>
+struct BitStructNumber : public BitStructField<T, kBitOffset, kBitWidth, StorageType> {
   BitStructNumber& operator=(T value) {
     return BaseType::Assign(*this, value);
   }
@@ -237,7 +219,7 @@
   }
 
  private:
-  using BaseType = BitStructField<T, kBitOffset, kBitWidth, /*StorageType*/T>;
+  using BaseType = BitStructField<T, kBitOffset, kBitWidth, StorageType>;
   using BaseType::Get;
 };
 
@@ -245,21 +227,23 @@
 // in order to be large enough to fit (kBitOffset + kBitWidth).
 //
 // Values are sign-extended when they are read out.
-template <size_t kBitOffset, size_t kBitWidth>
+template <size_t kBitOffset, size_t kBitWidth, typename StorageType>
 using BitStructInt =
     BitStructNumber<typename detail::MinimumTypeHelper<int, kBitOffset + kBitWidth>::type,
                     kBitOffset,
-                    kBitWidth>;
+                    kBitWidth,
+                    StorageType>;
 
 // Create a BitStruct field which uses the smallest underlying uint storage type,
 // in order to be large enough to fit (kBitOffset + kBitWidth).
 //
 // Values are zero-extended when they are read out.
-template <size_t kBitOffset, size_t kBitWidth>
+template <size_t kBitOffset, size_t kBitWidth, typename StorageType>
 using BitStructUint =
     BitStructNumber<typename detail::MinimumTypeHelper<unsigned int, kBitOffset + kBitWidth>::type,
                     kBitOffset,
-                    kBitWidth>;
+                    kBitWidth,
+                    StorageType>;
 
 // Start a definition for a bitstruct.
 // A bitstruct is defined to be a union with a common initial subsequence
@@ -276,6 +260,8 @@
 // standard-layout struct members.
 #define BITSTRUCT_DEFINE_START(name, bitwidth)                                        \
     union name {                                                         /* NOLINT */ \
+      using StorageType =                                                             \
+          typename detail::MinimumTypeUnsignedHelper<(bitwidth)>::type;               \
       art::detail::DefineBitStructSize<(bitwidth)> _;                                 \
       static constexpr size_t BitStructSizeOf() { return (bitwidth); }                \
       name& operator=(const name& other) { _ = other._; return *this; }  /* NOLINT */ \
@@ -283,6 +269,14 @@
       name() = default;                                                               \
       ~name() = default;
 
+// Define a field. See top of file for usage example.
+#define BITSTRUCT_FIELD(type, bit_offset, bit_width)                           \
+    BitStructField<type, (bit_offset), (bit_width), StorageType>
+#define BITSTRUCT_INT(bit_offset, bit_width)                                   \
+    BitStructInt<(bit_offset), (bit_width), StorageType>
+#define BITSTRUCT_UINT(bit_offset, bit_width)                                  \
+    BitStructUint<(bit_offset), (bit_width), StorageType>
+
 // End the definition of a bitstruct, and insert a sanity check
 // to ensure that the bitstruct did not exceed the specified size.
 //
diff --git a/libartbase/base/bit_struct_detail.h b/libartbase/base/bit_struct_detail.h
index 60de1b6..ad7c0f4 100644
--- a/libartbase/base/bit_struct_detail.h
+++ b/libartbase/base/bit_struct_detail.h
@@ -56,6 +56,50 @@
                               /* else */ type_unsigned>::type;
 };
 
+// Helper for converting to and from T to an integral type.
+template <typename T>
+union ValueConverter {
+  using StorageType = typename MinimumTypeHelper<T, sizeof(T) * kBitsPerByte>::type;
+
+  static constexpr StorageType ToUnderlyingStorage(T value) {
+    ValueConverter converter;
+    converter.value_.val_ = value;
+    return converter.storage_.val_;
+  }
+
+  static constexpr T FromUnderlyingStorage(StorageType storage) {
+    ValueConverter converter;
+    converter.storage_.val_ = storage;
+    return converter.value_.val_;
+  }
+
+  // Underlying values must be wrapped in separate standard-layout structs.
+  // See below for more details.
+  struct StorageWrapper {
+    StorageType val_;
+  };
+  struct ValueWrapper {
+    T val_;
+  };
+
+  // Safely alias storage_ and value_ together.
+  //
+  // See C++ 9.5.1 [class.union]:
+  // If a standard-layout union contains several standard-layout structs that share a common
+  // initial sequence ... it is permitted to inspect the common initial sequence of any of
+  // standard-layout struct members.
+  StorageWrapper storage_;
+  ValueWrapper value_;
+#if __cplusplus >= 202000L
+#error "When upgrading to C++20, remove this error and check that this is OK for all use cases."
+  static_assert(std::is_layout_compatible_v<StorageWrapper, ValueWrapper>);
+#endif
+
+  // Future work: In theory almost non-standard layout can be supported here,
+  // assuming they don't rely on the address of (this).
+  // We just have to use memcpy since the union-aliasing would not work.
+};
+
 // Denotes the beginning of a bit struct.
 //
 // This marker is required by the C++ standard in order to
diff --git a/libartbase/base/bit_struct_test.cc b/libartbase/base/bit_struct_test.cc
index a2389eb..2055782 100644
--- a/libartbase/base/bit_struct_test.cc
+++ b/libartbase/base/bit_struct_test.cc
@@ -61,33 +61,33 @@
 
 struct CustomBitStruct {
   CustomBitStruct() = default;
-  explicit CustomBitStruct(int8_t data) : data(data) {}
+  explicit CustomBitStruct(uint8_t data) : data(data) {}
 
   static constexpr size_t BitStructSizeOf() {
     return 4;
   }
 
-  int8_t data;
+  uint8_t data;
 };
 
 TEST(BitStructs, Custom) {
-  CustomBitStruct expected(0b1111);
+  CustomBitStruct expected(0b1111u);
 
-  BitStructField<CustomBitStruct, /*lsb=*/4, /*width=*/4> f{};
+  BitStructField<CustomBitStruct, /*lsb=*/4, /*width=*/4, uint8_t> f{};
 
   EXPECT_EQ(1u, sizeof(f));
 
-  f = CustomBitStruct(0b1111);
+  f = CustomBitStruct(0b1111u);
 
   CustomBitStruct read_out = f;
-  EXPECT_EQ(read_out.data, 0b1111);
+  EXPECT_EQ(read_out.data, 0b1111u);
 
   EXPECT_EQ(AsUint(f), 0b11110000u);
 }
 
 BITSTRUCT_DEFINE_START(TestTwoCustom, /* size= */ 8)
-  BitStructField<CustomBitStruct, /*lsb=*/0, /*width=*/4> f4_a;
-  BitStructField<CustomBitStruct, /*lsb=*/4, /*width=*/4> f4_b;
+  BITSTRUCT_FIELD(CustomBitStruct, /*lsb=*/0, /*width=*/4) f4_a;
+  BITSTRUCT_FIELD(CustomBitStruct, /*lsb=*/4, /*width=*/4) f4_b;
 BITSTRUCT_DEFINE_END(TestTwoCustom);
 
 TEST(BitStructs, TwoCustom) {
@@ -122,7 +122,7 @@
 }
 
 TEST(BitStructs, Number) {
-  BitStructNumber<uint16_t, /*lsb=*/4, /*width=*/4> bsn{};
+  BitStructNumber<uint16_t, /*lsb=*/4, /*width=*/4, uint16_t> bsn{};
   EXPECT_EQ(2u, sizeof(bsn));
 
   bsn = 0b1111;
@@ -135,25 +135,28 @@
   EXPECT_EQ(AsUint(bsn), 0b11110000u);
 }
 
-BITSTRUCT_DEFINE_START(TestBitStruct, /* size= */ 8)
-  BitStructInt</*lsb=*/0, /*width=*/3> i3;
-  BitStructUint</*lsb=*/3, /*width=*/4> u4;
+TEST(BitStructs, NumberNarrowStorage) {
+  BitStructNumber<uint16_t, /*lsb=*/4, /*width=*/4, uint8_t> bsn{};
+  EXPECT_EQ(1u, sizeof(bsn));
 
-  BitStructUint</*lsb=*/0, /*width=*/7> alias_all;
+  bsn = 0b1111;
+
+  uint32_t read_out = static_cast<uint32_t>(bsn);
+  uint32_t read_out_impl = bsn;
+
+  EXPECT_EQ(read_out, read_out_impl);
+  EXPECT_EQ(read_out, 0b1111u);
+  EXPECT_EQ(AsUint(bsn), 0b11110000u);
+}
+
+BITSTRUCT_DEFINE_START(TestBitStruct, /* size= */ 8)
+  BITSTRUCT_INT(/*lsb=*/0, /*width=*/3) i3;
+  BITSTRUCT_UINT(/*lsb=*/3, /*width=*/4) u4;
+
+  BITSTRUCT_UINT(/*lsb=*/0, /*width=*/7) alias_all;
 BITSTRUCT_DEFINE_END(TestBitStruct);
 
 TEST(BitStructs, Test1) {
-  {
-    // Check minimal size selection is correct.
-    BitStructInt</*lsb=*/0, /*width=*/3> i3;
-    BitStructUint</*lsb=*/3, /*width=*/4> u4;
-
-    BitStructUint</*lsb=*/0, /*width=*/7> alias_all;
-
-    EXPECT_EQ(1u, sizeof(i3));
-    EXPECT_EQ(1u, sizeof(u4));
-    EXPECT_EQ(1u, sizeof(alias_all));
-  }
   TestBitStruct tst{};
 
   // Check minimal size selection is correct.
@@ -217,11 +220,11 @@
 }
 
 BITSTRUCT_DEFINE_START(MixedSizeBitStruct, /* size= */ 32)
-  BitStructUint</*lsb=*/0, /*width=*/3> u3;
-  BitStructUint</*lsb=*/3, /*width=*/10> u10;
-  BitStructUint</*lsb=*/13, /*width=*/19> u19;
+  BITSTRUCT_UINT(/*lsb=*/0, /*width=*/3) u3;
+  BITSTRUCT_UINT(/*lsb=*/3, /*width=*/10) u10;
+  BITSTRUCT_UINT(/*lsb=*/13, /*width=*/19) u19;
 
-  BitStructUint</*lsb=*/0, /*width=*/32> alias_all;
+  BITSTRUCT_UINT(/*lsb=*/0, /*width=*/32) alias_all;
 BITSTRUCT_DEFINE_END(MixedSizeBitStruct);
 
 // static_assert(sizeof(MixedSizeBitStruct) == sizeof(uint32_t), "TestBitStructs#MixedSize");
@@ -256,10 +259,10 @@
 }
 
 BITSTRUCT_DEFINE_START(TestBitStruct_u8, /* size= */ 8)
-  BitStructInt</*lsb=*/0, /*width=*/3> i3;
-  BitStructUint</*lsb=*/3, /*width=*/4> u4;
+  BITSTRUCT_INT(/*lsb=*/0, /*width=*/3) i3;
+  BITSTRUCT_UINT(/*lsb=*/3, /*width=*/4) u4;
 
-  BitStructUint</*lsb=*/0, /*width=*/8> alias_all;
+  BITSTRUCT_UINT(/*lsb=*/0, /*width=*/8) alias_all;
 BITSTRUCT_DEFINE_END(TestBitStruct_u8);
 
 TEST(BitStructs, FieldAssignment) {
@@ -283,11 +286,15 @@
   }
 }
 
-BITSTRUCT_DEFINE_START(NestedStruct, /* size= */ 64)
-  BitStructField<MixedSizeBitStruct, /*lsb=*/0> mixed_lower;
-  BitStructField<MixedSizeBitStruct, /*lsb=*/32> mixed_upper;
+BITSTRUCT_DEFINE_START(NestedStruct, /* size= */ 2 * MixedSizeBitStruct::BitStructSizeOf())
+  BITSTRUCT_FIELD(MixedSizeBitStruct,
+                  /*lsb=*/0,
+                  /*width=*/MixedSizeBitStruct::BitStructSizeOf()) mixed_lower;
+  BITSTRUCT_FIELD(MixedSizeBitStruct,
+                  /*lsb=*/MixedSizeBitStruct::BitStructSizeOf(),
+                  /*width=*/MixedSizeBitStruct::BitStructSizeOf()) mixed_upper;
 
-  BitStructUint</*lsb=*/0, /*width=*/64> alias_all;
+  BITSTRUCT_UINT(/*lsb=*/0, /*width=*/ 2 * MixedSizeBitStruct::BitStructSizeOf()) alias_all;
 BITSTRUCT_DEFINE_END(NestedStruct);
 
 TEST(BitStructs, NestedFieldAssignment) {
diff --git a/libartbase/base/bit_table.h b/libartbase/base/bit_table.h
index d6a1d7b..0c1b04e 100644
--- a/libartbase/base/bit_table.h
+++ b/libartbase/base/bit_table.h
@@ -49,13 +49,12 @@
 
   ALWAYS_INLINE void Decode(BitMemoryReader& reader) {
     // Decode row count and column sizes from the table header.
-    num_rows_ = reader.ReadVarint();
-    if (num_rows_ != 0) {
-      column_offset_[0] = 0;
-      for (uint32_t i = 0; i < kNumColumns; i++) {
-        size_t column_end = column_offset_[i] + reader.ReadVarint();
-        column_offset_[i + 1] = dchecked_integral_cast<uint16_t>(column_end);
-      }
+    std::array<uint32_t, 1+kNumColumns> header = reader.ReadInterleavedVarints<1+kNumColumns>();
+    num_rows_ = header[0];
+    column_offset_[0] = 0;
+    for (uint32_t i = 0; i < kNumColumns; i++) {
+      size_t column_end = column_offset_[i] + header[i + 1];
+      column_offset_[i + 1] = dchecked_integral_cast<uint16_t>(column_end);
     }
 
     // Record the region which contains the table data and skip past it.
@@ -109,6 +108,7 @@
   static constexpr uint32_t kNumColumns = NumColumns;
   static constexpr uint32_t kNoValue = BitTableBase<kNumColumns>::kNoValue;
 
+  BitTableAccessor() = default;
   BitTableAccessor(const BitTableBase<kNumColumns>* table, uint32_t row)
       : table_(table), row_(row) {
     DCHECK(table_ != nullptr);
@@ -335,7 +335,7 @@
   }
 
   // Calculate the column bit widths based on the current data.
-  void Measure(/*out*/ std::array<uint32_t, kNumColumns>* column_bits) const {
+  void Measure(/*out*/ uint32_t* column_bits) const {
     uint32_t max_column_value[kNumColumns];
     std::fill_n(max_column_value, kNumColumns, 0);
     for (uint32_t r = 0; r < size(); r++) {
@@ -344,7 +344,7 @@
       }
     }
     for (uint32_t c = 0; c < kNumColumns; c++) {
-      (*column_bits)[c] = MinimumBitsToStore(max_column_value[c]);
+      column_bits[c] = MinimumBitsToStore(max_column_value[c]);
     }
   }
 
@@ -353,20 +353,17 @@
   void Encode(BitMemoryWriter<Vector>& out) const {
     size_t initial_bit_offset = out.NumberOfWrittenBits();
 
-    std::array<uint32_t, kNumColumns> column_bits;
-    Measure(&column_bits);
-    out.WriteVarint(size());
-    if (size() != 0) {
-      // Write table header.
-      for (uint32_t c = 0; c < kNumColumns; c++) {
-        out.WriteVarint(column_bits[c]);
-      }
+    // Write table header.
+    std::array<uint32_t, 1 + kNumColumns> header;
+    header[0] = size();
+    uint32_t* column_bits = header.data() + 1;
+    Measure(column_bits);
+    out.WriteInterleavedVarints(header);
 
-      // Write table data.
-      for (uint32_t r = 0; r < size(); r++) {
-        for (uint32_t c = 0; c < kNumColumns; c++) {
-          out.WriteBits(rows_[r][c] - kValueBias, column_bits[c]);
-        }
+    // Write table data.
+    for (uint32_t r = 0; r < size(); r++) {
+      for (uint32_t c = 0; c < kNumColumns; c++) {
+        out.WriteBits(rows_[r][c] - kValueBias, column_bits[c]);
       }
     }
 
@@ -444,16 +441,17 @@
   void Encode(BitMemoryWriter<Vector>& out) const {
     size_t initial_bit_offset = out.NumberOfWrittenBits();
 
-    out.WriteVarint(size());
-    if (size() != 0) {
-      out.WriteVarint(max_num_bits_);
+    // Write table header.
+    out.WriteInterleavedVarints(std::array<uint32_t, 2>{
+      dchecked_integral_cast<uint32_t>(size()),
+      dchecked_integral_cast<uint32_t>(max_num_bits_),
+    });
 
-      // Write table data.
-      for (MemoryRegion row : rows_) {
-        BitMemoryRegion src(row);
-        BitMemoryRegion dst = out.Allocate(max_num_bits_);
-        dst.StoreBits(/* bit_offset */ 0, src, std::min(max_num_bits_, src.size_in_bits()));
-      }
+    // Write table data.
+    for (MemoryRegion row : rows_) {
+      BitMemoryRegion src(row);
+      BitMemoryRegion dst = out.Allocate(max_num_bits_);
+      dst.StoreBits(/* bit_offset */ 0, src, std::min(max_num_bits_, src.size_in_bits()));
     }
 
     // Verify the written data.
diff --git a/libartbase/base/bit_table_test.cc b/libartbase/base/bit_table_test.cc
index bf32dc6..692861a 100644
--- a/libartbase/base/bit_table_test.cc
+++ b/libartbase/base/bit_table_test.cc
@@ -143,10 +143,10 @@
   BitMemoryWriter<std::vector<uint8_t>> writer(&buffer);
   const uint64_t value = 0xDEADBEEF0BADF00Dull;
   BitmapTableBuilder builder(&allocator);
-  std::multimap<uint64_t, size_t> indicies;  // bitmap -> row.
+  std::multimap<uint64_t, size_t> indices;  // bitmap -> row.
   for (size_t bit_length = 0; bit_length <= BitSizeOf<uint64_t>(); ++bit_length) {
     uint64_t bitmap = value & MaxInt<uint64_t>(bit_length);
-    indicies.emplace(bitmap, builder.Dedup(&bitmap, MinimumBitsToStore(bitmap)));
+    indices.emplace(bitmap, builder.Dedup(&bitmap, MinimumBitsToStore(bitmap)));
   }
   builder.Encode(writer);
   EXPECT_EQ(1 + static_cast<uint32_t>(POPCOUNT(value)), builder.size());
@@ -154,7 +154,7 @@
   BitMemoryReader reader(buffer.data());
   BitTableBase<1> table(reader);
   EXPECT_EQ(writer.NumberOfWrittenBits(), reader.NumberOfReadBits());
-  for (auto it : indicies) {
+  for (auto it : indices) {
     uint64_t expected = it.first;
     BitMemoryRegion actual = table.GetBitMemoryRegion(it.second);
     EXPECT_GE(actual.size_in_bits(), MinimumBitsToStore(expected));
diff --git a/libartbase/base/casts.h b/libartbase/base/casts.h
index 76ff679..cedd624 100644
--- a/libartbase/base/casts.h
+++ b/libartbase/base/casts.h
@@ -17,7 +17,6 @@
 #ifndef ART_LIBARTBASE_BASE_CASTS_H_
 #define ART_LIBARTBASE_BASE_CASTS_H_
 
-#include <assert.h>
 #include <stdint.h>
 #include <string.h>
 
diff --git a/libartbase/base/common_art_test.cc b/libartbase/base/common_art_test.cc
index 242db42..978f69c 100644
--- a/libartbase/base/common_art_test.cc
+++ b/libartbase/base/common_art_test.cc
@@ -19,8 +19,13 @@
 #include <dirent.h>
 #include <dlfcn.h>
 #include <fcntl.h>
+#include <ftw.h>
 #include <stdlib.h>
+#include <unistd.h>
 #include <cstdio>
+#include <filesystem>
+#include "android-base/file.h"
+#include "android-base/logging.h"
 #include "nativehelper/scoped_local_ref.h"
 
 #include "android-base/stringprintf.h"
@@ -47,6 +52,29 @@
 
 using android::base::StringPrintf;
 
+ScratchDir::ScratchDir() {
+  // ANDROID_DATA needs to be set
+  CHECK_NE(static_cast<char*>(nullptr), getenv("ANDROID_DATA")) <<
+      "Are you subclassing RuntimeTest?";
+  path_ = getenv("ANDROID_DATA");
+  path_ += "/tmp-XXXXXX";
+  bool ok = (mkdtemp(&path_[0]) != nullptr);
+  CHECK(ok) << strerror(errno) << " for " << path_;
+  path_ += "/";
+}
+
+ScratchDir::~ScratchDir() {
+  // Recursively delete the directory and all its content.
+  nftw(path_.c_str(), [](const char* name, const struct stat*, int type, struct FTW *) {
+    if (type == FTW_F) {
+      unlink(name);
+    } else if (type == FTW_DP) {
+      rmdir(name);
+    }
+    return 0;
+  }, 256 /* max open file descriptors */, FTW_DEPTH);
+}
+
 ScratchFile::ScratchFile() {
   // ANDROID_DATA needs to be set
   CHECK_NE(static_cast<char*>(nullptr), getenv("ANDROID_DATA")) <<
@@ -126,11 +154,33 @@
     if (android_host_out_from_env == nullptr) {
       // Not set by build server, so default to the usual value of
       // ANDROID_HOST_OUT.
-      std::string android_host_out = android_build_top_from_env;
+      std::string android_host_out;
 #if defined(__linux__)
-      android_host_out += "/out/host/linux-x86";
+      // Fallback
+      android_host_out = std::string(android_build_top_from_env) + "/out/host/linux-x86";
+      // Look at how we were invoked
+      std::string argv;
+      if (android::base::ReadFileToString("/proc/self/cmdline", &argv)) {
+        // /proc/self/cmdline is the programs 'argv' with elements delimited by '\0'.
+        std::string cmdpath(argv.substr(0, argv.find('\0')));
+        std::filesystem::path path(cmdpath);
+        // If the path is relative then prepend the android_build_top_from_env to it
+        if (path.is_relative()) {
+          path = std::filesystem::path(android_build_top_from_env).append(cmdpath);
+          DCHECK(path.is_absolute()) << path;
+        }
+        // Walk up until we find the linux-x86 directory or we hit the root directory.
+        while (path.has_parent_path() && path.parent_path() != path &&
+               path.filename() != std::filesystem::path("linux-x86")) {
+          path = path.parent_path();
+        }
+        // If we found a linux-x86 directory path is now android_host_out
+        if (path.filename() == std::filesystem::path("linux-x86")) {
+          android_host_out = path.string();
+        }
+      }
 #elif defined(__APPLE__)
-      android_host_out += "/out/host/darwin-x86";
+      android_host_out = std::string(android_build_top_from_env) + "/out/host/darwin-x86";
 #else
 #error unsupported OS
 #endif
@@ -147,20 +197,31 @@
       android_root_from_env = getenv("ANDROID_ROOT");
     }
 
-    // Environment variable ANDROID_RUNTIME_ROOT is set on the device, but not
+    // Environment variable ANDROID_I18N_ROOT is set on the device, but not
     // necessarily on the host. It needs to be set so that various libraries
-    // like icu4c can find their data files.
-    const char* android_runtime_root_from_env = getenv("ANDROID_RUNTIME_ROOT");
-    if (android_runtime_root_from_env == nullptr) {
-      // Use ${ANDROID_HOST_OUT}/com.android.runtime for ANDROID_RUNTIME_ROOT.
-      std::string android_runtime_root = android_host_out_from_env;
-      android_runtime_root += "/com.android.runtime";
-      setenv("ANDROID_RUNTIME_ROOT", android_runtime_root.c_str(), 1);
+    // like libcore / icu4j / icu4c can find their data files.
+    const char* android_i18n_root_from_env = getenv("ANDROID_I18N_ROOT");
+    if (android_i18n_root_from_env == nullptr) {
+      // Use ${ANDROID_I18N_OUT}/com.android.i18n for ANDROID_I18N_ROOT.
+      std::string android_i18n_root = android_host_out_from_env;
+      android_i18n_root += "/com.android.i18n";
+      setenv("ANDROID_I18N_ROOT", android_i18n_root.c_str(), 1);
+    }
+
+    // Environment variable ANDROID_ART_ROOT is set on the device, but not
+    // necessarily on the host. It needs to be set so that various libraries
+    // like libcore / icu4j / icu4c can find their data files.
+    const char* android_art_root_from_env = getenv("ANDROID_ART_ROOT");
+    if (android_art_root_from_env == nullptr) {
+      // Use ${ANDROID_HOST_OUT}/com.android.art for ANDROID_ART_ROOT.
+      std::string android_art_root = android_host_out_from_env;
+      android_art_root += "/com.android.art";
+      setenv("ANDROID_ART_ROOT", android_art_root.c_str(), 1);
     }
 
     // Environment variable ANDROID_TZDATA_ROOT is set on the device, but not
     // necessarily on the host. It needs to be set so that various libraries
-    // like icu4c can find their data files.
+    // like libcore / icu4j / icu4c can find their data files.
     const char* android_tzdata_root_from_env = getenv("ANDROID_TZDATA_ROOT");
     if (android_tzdata_root_from_env == nullptr) {
       // Use ${ANDROID_HOST_OUT}/com.android.tzdata for ANDROID_TZDATA_ROOT.
@@ -199,6 +260,11 @@
   dalvik_cache_.append("/dalvik-cache");
   int mkdir_result = mkdir(dalvik_cache_.c_str(), 0700);
   ASSERT_EQ(mkdir_result, 0);
+
+  static bool gSlowDebugTestFlag = false;
+  RegisterRuntimeDebugFlag(&gSlowDebugTestFlag);
+  SetRuntimeDebugFlagsEnabled(true);
+  CHECK(gSlowDebugTestFlag);
 }
 
 void CommonArtTestImpl::TearDownAndroidDataDir(const std::string& android_data,
@@ -322,20 +388,13 @@
 }
 
 static std::string GetDexFileName(const std::string& jar_prefix, bool host) {
-  std::string path;
   if (host) {
-    const char* host_dir = getenv("ANDROID_HOST_OUT");
-    CHECK(host_dir != nullptr);
-    path = host_dir;
+    std::string path = GetAndroidRoot();
+    return StringPrintf("%s/framework/%s-hostdex.jar", path.c_str(), jar_prefix.c_str());
   } else {
-    path = GetAndroidRoot();
+    const char* apex = (jar_prefix == "conscrypt") ? "com.android.conscrypt" : "com.android.art";
+    return StringPrintf("/apex/%s/javalib/%s.jar", apex, jar_prefix.c_str());
   }
-
-  std::string suffix = host
-      ? "-hostdex"                 // The host version.
-      : "-testdex";                // The unstripped target version.
-
-  return StringPrintf("%s/framework/%s%s.jar", path.c_str(), jar_prefix.c_str(), suffix.c_str());
 }
 
 std::vector<std::string> CommonArtTestImpl::GetLibCoreModuleNames() const {
@@ -346,6 +405,7 @@
       // CORE_IMG_JARS modules.
       "core-oj",
       "core-libart",
+      "core-icu4j",
       "okhttp",
       "bouncycastle",
       "apache-xml",
@@ -400,15 +460,6 @@
   return option + android::base::Join(class_path, ':');
 }
 
-std::string CommonArtTestImpl::GetTestAndroidRoot() {
-  if (IsHost()) {
-    const char* host_dir = getenv("ANDROID_HOST_OUT");
-    CHECK(host_dir != nullptr);
-    return host_dir;
-  }
-  return GetAndroidRoot();
-}
-
 // Check that for target builds we have ART_TARGET_NATIVETEST_DIR set.
 #ifdef ART_TARGET
 #ifndef ART_TARGET_NATIVETEST_DIR
@@ -424,8 +475,7 @@
   CHECK(name != nullptr);
   std::string filename;
   if (IsHost()) {
-    filename += getenv("ANDROID_HOST_OUT");
-    filename += "/framework/";
+    filename += GetAndroidRoot() + "/framework/";
   } else {
     filename += ART_TARGET_NATIVETEST_DIR_STRING;
   }
@@ -475,11 +525,10 @@
 
   std::string location;
   if (IsHost()) {
-    const char* host_dir = getenv("ANDROID_HOST_OUT");
-    CHECK(host_dir != nullptr);
-    location = StringPrintf("%s/framework/core.%s", host_dir, suffix);
+    std::string host_dir = GetAndroidRoot();
+    location = StringPrintf("%s/framework/core.%s", host_dir.c_str(), suffix);
   } else {
-    location = StringPrintf("/data/art-test/core.%s", suffix);
+    location = StringPrintf("/apex/com.android.art/javalib/boot.%s", suffix);
   }
 
   return location;
diff --git a/libartbase/base/common_art_test.h b/libartbase/base/common_art_test.h
index 9b23d15..8cd25c3 100644
--- a/libartbase/base/common_art_test.h
+++ b/libartbase/base/common_art_test.h
@@ -26,37 +26,39 @@
 
 #include <android-base/logging.h>
 
+#include "base/file_utils.h"
 #include "base/globals.h"
+#include "base/memory_tool.h"
 #include "base/mutex.h"
 #include "base/os.h"
 #include "base/unix_file/fd_file.h"
 #include "dex/art_dex_file_loader.h"
 #include "dex/compact_dex_level.h"
-#include "obj_ptr-inl.h"
+#include "dex/compact_dex_file.h"
 
 namespace art {
 
 using LogSeverity = android::base::LogSeverity;
 using ScopedLogSeverity = android::base::ScopedLogSeverity;
 
-template<class MirrorType>
-static inline ObjPtr<MirrorType> MakeObjPtr(MirrorType* ptr) {
-  return ptr;
-}
-
-template<class MirrorType>
-static inline ObjPtr<MirrorType> MakeObjPtr(ObjPtr<MirrorType> ptr) {
-  return ptr;
-}
-
-// OBJ pointer helpers to avoid needing .Decode everywhere.
-#define EXPECT_OBJ_PTR_EQ(a, b) EXPECT_EQ(MakeObjPtr(a).Ptr(), MakeObjPtr(b).Ptr())
-#define ASSERT_OBJ_PTR_EQ(a, b) ASSERT_EQ(MakeObjPtr(a).Ptr(), MakeObjPtr(b).Ptr())
-#define EXPECT_OBJ_PTR_NE(a, b) EXPECT_NE(MakeObjPtr(a).Ptr(), MakeObjPtr(b).Ptr())
-#define ASSERT_OBJ_PTR_NE(a, b) ASSERT_NE(MakeObjPtr(a).Ptr(), MakeObjPtr(b).Ptr())
-
 class DexFile;
 
+class ScratchDir {
+ public:
+  ScratchDir();
+
+  ~ScratchDir();
+
+  const std::string& GetPath() const {
+    return path_;
+  }
+
+ private:
+  std::string path_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScratchDir);
+};
+
 class ScratchFile {
  public:
   ScratchFile();
@@ -91,13 +93,79 @@
   std::unique_ptr<File> file_;
 };
 
+// Close to store a fake dex file and its underlying data.
+class FakeDex {
+ public:
+  static std::unique_ptr<FakeDex> Create(
+      const std::string& location,
+      uint32_t checksum,
+      uint32_t num_method_ids) {
+    FakeDex* fake_dex = new FakeDex();
+    fake_dex->dex = CreateFakeDex(location, checksum, num_method_ids, &fake_dex->storage);
+    return std::unique_ptr<FakeDex>(fake_dex);
+  }
+
+  static std::unique_ptr<const DexFile> CreateFakeDex(
+      const std::string& location,
+      uint32_t checksum,
+      uint32_t num_method_ids,
+      std::vector<uint8_t>* storage) {
+    storage->resize(kPageSize);
+    CompactDexFile::Header* header =
+        const_cast<CompactDexFile::Header*>(CompactDexFile::Header::At(storage->data()));
+    CompactDexFile::WriteMagic(header->magic_);
+    CompactDexFile::WriteCurrentVersion(header->magic_);
+    header->data_off_ = 0;
+    header->data_size_ = storage->size();
+    header->method_ids_size_ = num_method_ids;
+
+    const DexFileLoader dex_file_loader;
+    std::string error_msg;
+    std::unique_ptr<const DexFile> dex(dex_file_loader.Open(storage->data(),
+                                                            storage->size(),
+                                                            location,
+                                                            checksum,
+                                                            /*oat_dex_file=*/nullptr,
+                                                            /*verify=*/false,
+                                                            /*verify_checksum=*/false,
+                                                            &error_msg));
+    CHECK(dex != nullptr) << error_msg;
+    return dex;
+  }
+
+  std::unique_ptr<const DexFile>& Dex() {
+    return dex;
+  }
+
+ private:
+  std::vector<uint8_t> storage;
+  std::unique_ptr<const DexFile> dex;
+};
+
+// Convenience class to store multiple fake dex files in order to make
+// allocation/de-allocation easier in tests.
+class FakeDexStorage {
+ public:
+  const DexFile* AddFakeDex(
+      const std::string& location,
+      uint32_t checksum,
+      uint32_t num_method_ids) {
+    fake_dex_files.push_back(FakeDex::Create(location, checksum, num_method_ids));
+    return fake_dex_files.back()->Dex().get();
+  }
+
+ private:
+  std::vector<std::unique_ptr<FakeDex>> fake_dex_files;
+};
+
 class CommonArtTestImpl {
  public:
   CommonArtTestImpl() = default;
   virtual ~CommonArtTestImpl() = default;
 
-  // Set up ANDROID_BUILD_TOP, ANDROID_HOST_OUT, ANDROID_ROOT, ANDROID_RUNTIME_ROOT,
-  // and ANDROID_TZDATA_ROOT environment variables using sensible defaults if not already set.
+  // Set up ANDROID_BUILD_TOP, ANDROID_HOST_OUT, ANDROID_ROOT, ANDROID_I18N_ROOT,
+  // ANDROID_ART_ROOT, and ANDROID_TZDATA_ROOT environment variables using sensible defaults
+  // if not already set.
   static void SetUpAndroidRootEnvVars();
 
   // Set up the ANDROID_DATA environment variable, creating the directory if required.
@@ -201,8 +269,6 @@
 
   void ClearDirectory(const char* dirpath, bool recursive = true);
 
-  std::string GetTestAndroidRoot();
-
   // Open a file (allows reading of framework jars).
   std::vector<std::unique_ptr<const DexFile>> OpenDexFiles(const char* filename);
 
diff --git a/libartbase/base/endian_utils.h b/libartbase/base/endian_utils.h
new file mode 100644
index 0000000..414668c
--- /dev/null
+++ b/libartbase/base/endian_utils.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTBASE_BASE_ENDIAN_UTILS_H_
+#define ART_LIBARTBASE_BASE_ENDIAN_UTILS_H_
+
+#include <stdint.h>
+#include <endian.h>
+#include <vector>
+
+namespace art {
+
+template<typename T>
+inline void AppendBytes(std::vector<uint8_t>& bytes, T data) {
+  size_t size = bytes.size();
+  bytes.resize(size + sizeof(T));
+  memcpy(bytes.data() + size, &data, sizeof(T));
+}
+
+inline void Append1BE(std::vector<uint8_t>& bytes, uint8_t value) {
+  bytes.push_back(value);
+}
+
+inline void Append2BE(std::vector<uint8_t>& bytes, uint16_t value) {
+  AppendBytes<uint16_t>(bytes, htobe16(value));
+}
+
+inline void Append4BE(std::vector<uint8_t>& bytes, uint32_t value) {
+  AppendBytes<uint32_t>(bytes, htobe32(value));
+}
+
+inline void Append8BE(std::vector<uint8_t>& bytes, uint64_t value) {
+  AppendBytes<uint64_t>(bytes, htobe64(value));
+}
+
+inline void AppendUtf16BE(std::vector<uint8_t>& bytes, const uint16_t* chars, size_t char_count) {
+  Append4BE(bytes, char_count);
+  for (size_t i = 0; i < char_count; ++i) {
+    Append2BE(bytes, chars[i]);
+  }
+}
+
+inline void AppendUtf16CompressedBE(std::vector<uint8_t>& bytes,
+                                    const uint8_t* chars,
+                                    size_t char_count) {
+  Append4BE(bytes, char_count);
+  for (size_t i = 0; i < char_count; ++i) {
+    Append2BE(bytes, static_cast<uint16_t>(chars[i]));
+  }
+}
+
+template <typename T>
+inline void SetBytes(uint8_t* buf, T val) {
+  memcpy(buf, &val, sizeof(T));
+}
+
+inline void Set1(uint8_t* buf, uint8_t val) {
+  *buf = val;
+}
+
+inline void Set2BE(uint8_t* buf, uint16_t val) {
+  SetBytes<uint16_t>(buf, htobe16(val));
+}
+
+inline void Set4BE(uint8_t* buf, uint32_t val) {
+  SetBytes<uint32_t>(buf, htobe32(val));
+}
+
+inline void Set8BE(uint8_t* buf, uint64_t val) {
+  SetBytes<uint64_t>(buf, htobe64(val));
+}
+
+inline void Write1BE(uint8_t** dst, uint8_t value) {
+  Set1(*dst, value);
+  *dst += sizeof(value);
+}
+
+inline void Write2BE(uint8_t** dst, uint16_t value) {
+  Set2BE(*dst, value);
+  *dst += sizeof(value);
+}
+
+inline void Write4BE(uint8_t** dst, uint32_t value) {
+  Set4BE(*dst, value);
+  *dst += sizeof(value);
+}
+
+inline void Write8BE(uint8_t** dst, uint64_t value) {
+  Set8BE(*dst, value);
+  *dst += sizeof(value);
+}
+
+}  // namespace art
+
+#endif  // ART_LIBARTBASE_BASE_ENDIAN_UTILS_H_
diff --git a/libartbase/base/file_utils.cc b/libartbase/base/file_utils.cc
index 1216ab0..a899b86 100644
--- a/libartbase/base/file_utils.cc
+++ b/libartbase/base/file_utils.cc
@@ -70,33 +70,41 @@
 static constexpr const char* kAndroidRootDefaultPath = "/system";
 static constexpr const char* kAndroidDataEnvVar = "ANDROID_DATA";
 static constexpr const char* kAndroidDataDefaultPath = "/data";
-static constexpr const char* kAndroidRuntimeRootEnvVar = "ANDROID_RUNTIME_ROOT";
-static constexpr const char* kAndroidRuntimeApexDefaultPath = "/apex/com.android.runtime";
+static constexpr const char* kAndroidArtRootEnvVar = "ANDROID_ART_ROOT";
+static constexpr const char* kAndroidArtApexDefaultPath = "/apex/com.android.art";
 static constexpr const char* kAndroidConscryptRootEnvVar = "ANDROID_CONSCRYPT_ROOT";
 static constexpr const char* kAndroidConscryptApexDefaultPath = "/apex/com.android.conscrypt";
 
-bool ReadFileToString(const std::string& file_name, std::string* result) {
-  File file(file_name, O_RDONLY, false);
-  if (!file.IsOpened()) {
-    return false;
-  }
+// Get the "root" directory containing the "lib" directory where this instance
+// of the libartbase library (which contains `GetRootContainingLibartbase`) is
+// located:
+// - on host this "root" is normally the Android Root (e.g. something like
+//   "$ANDROID_BUILD_TOP/out/host/linux-x86/");
+// - on target this "root" is normally the ART Root ("/apex/com.android.art").
+// Return the empty string if that directory cannot be found or if this code is
+// run on Windows or macOS.
+static std::string GetRootContainingLibartbase() {
+#if !defined( _WIN32) && !defined(__APPLE__)
+  // Check where libartbase is from, and derive from there.
+  Dl_info info;
+  if (dladdr(reinterpret_cast<const void*>(&GetRootContainingLibartbase), /* out */ &info) != 0) {
+    // Make a duplicate of the fname so dirname can modify it.
+    UniqueCPtr<char> fname(strdup(info.dli_fname));
 
-  std::vector<char> buf(8 * KB);
-  while (true) {
-    int64_t n = TEMP_FAILURE_RETRY(read(file.Fd(), &buf[0], buf.size()));
-    if (n == -1) {
-      return false;
+    char* dir1 = dirname(fname.get());  // This is the lib directory.
+    char* dir2 = dirname(dir1);         // This is the "root" directory.
+    if (OS::DirectoryExists(dir2)) {
+      std::string tmp = dir2;  // Make a copy here so that fname can be released.
+      return tmp;
     }
-    if (n == 0) {
-      return true;
-    }
-    result->append(&buf[0], n);
   }
+#endif
+  return "";
 }
 
 std::string GetAndroidRootSafe(std::string* error_msg) {
 #ifdef _WIN32
-  UNUSED(kAndroidRootEnvVar, kAndroidRootDefaultPath);
+  UNUSED(kAndroidRootEnvVar, kAndroidRootDefaultPath, GetRootContainingLibartbase);
   *error_msg = "GetAndroidRootSafe unsupported for Windows.";
   return "";
 #else
@@ -104,33 +112,30 @@
   const char* android_root_from_env = getenv(kAndroidRootEnvVar);
   if (android_root_from_env != nullptr) {
     if (!OS::DirectoryExists(android_root_from_env)) {
-      *error_msg = StringPrintf("Failed to find ANDROID_ROOT directory %s", android_root_from_env);
+      *error_msg =
+          StringPrintf("Failed to find %s directory %s", kAndroidRootEnvVar, android_root_from_env);
       return "";
     }
     return android_root_from_env;
   }
 
-  // Check where libart is from, and derive from there. Only do this for non-Mac.
-#ifndef __APPLE__
-  {
-    Dl_info info;
-    if (dladdr(reinterpret_cast<const void*>(&GetAndroidRootSafe), /* out */ &info) != 0) {
-      // Make a duplicate of the fname so dirname can modify it.
-      UniqueCPtr<char> fname(strdup(info.dli_fname));
-
-      char* dir1 = dirname(fname.get());  // This is the lib directory.
-      char* dir2 = dirname(dir1);         // This is the "system" directory.
-      if (OS::DirectoryExists(dir2)) {
-        std::string tmp = dir2;  // Make a copy here so that fname can be released.
-        return tmp;
-      }
+  // On host, libartbase is currently installed in "$ANDROID_ROOT/lib"
+  // (e.g. something like "$ANDROID_BUILD_TOP/out/host/linux-x86/lib". Use this
+  // information to infer the location of the Android Root (on host only).
+  //
+  // Note that this could change in the future, if we decided to install ART
+  // artifacts in a different location, e.g. within an "ART APEX" directory.
+  if (!kIsTargetBuild) {
+    std::string root_containing_libartbase = GetRootContainingLibartbase();
+    if (!root_containing_libartbase.empty()) {
+      return root_containing_libartbase;
     }
   }
-#endif
 
   // Try the default path.
   if (!OS::DirectoryExists(kAndroidRootDefaultPath)) {
-    *error_msg = StringPrintf("Failed to find directory %s", kAndroidRootDefaultPath);
+    *error_msg =
+        StringPrintf("Failed to find default Android Root directory %s", kAndroidRootDefaultPath);
     return "";
   }
   return kAndroidRootDefaultPath;
@@ -162,7 +167,7 @@
     }
   }
   if (must_exist && !OS::DirectoryExists(android_dir)) {
-    *error_msg = StringPrintf("Failed to find %s directory %s", env_var, android_dir);
+    *error_msg = StringPrintf("Failed to find directory %s", android_dir);
     return nullptr;
   }
   return android_dir;
@@ -179,16 +184,82 @@
   }
 }
 
-std::string GetAndroidRuntimeRootSafe(std::string* error_msg) {
-  const char* android_dir = GetAndroidDirSafe(kAndroidRuntimeRootEnvVar,
-                                              kAndroidRuntimeApexDefaultPath,
-                                              /* must_exist= */ true,
-                                              error_msg);
-  return (android_dir != nullptr) ? android_dir : "";
+static std::string GetArtRootSafe(bool must_exist, /*out*/ std::string* error_msg) {
+#ifdef _WIN32
+  UNUSED(kAndroidArtRootEnvVar, kAndroidArtApexDefaultPath, GetRootContainingLibartbase);
+  UNUSED(must_exist);
+  *error_msg = "GetArtRootSafe unsupported for Windows.";
+  return "";
+#else
+  // Prefer ANDROID_ART_ROOT if it's set.
+  const char* android_art_root_from_env = getenv(kAndroidArtRootEnvVar);
+  if (android_art_root_from_env != nullptr) {
+    if (must_exist && !OS::DirectoryExists(android_art_root_from_env)) {
+      *error_msg = StringPrintf("Failed to find %s directory %s",
+                                kAndroidArtRootEnvVar,
+                                android_art_root_from_env);
+      return "";
+    }
+    return android_art_root_from_env;
+  }
+
+  // On target, libartbase is normally installed in
+  // "$ANDROID_ART_ROOT/lib(64)" (e.g. something like
+  // "/apex/com.android.art/lib(64)". Use this information to infer the
+  // location of the ART Root (on target only).
+  if (kIsTargetBuild) {
+    // *However*, a copy of libartbase may still be installed outside the
+    // ART Root on some occasions, as ART target gtests install their binaries
+    // and their dependencies under the Android Root, i.e. "/system" (see
+    // b/129534335). For that reason, we cannot reliably use
+    // `GetRootContainingLibartbase` to find the ART Root. (Note that this is
+    // not really a problem in practice, as Android Q devices define
+    // ANDROID_ART_ROOT in their default environment, and will instead use
+    // the logic above anyway.)
+    //
+    // TODO(b/129534335): Re-enable this logic when the only instance of
+    // libartbase on target is the one from the ART APEX.
+    if ((false)) {
+      std::string root_containing_libartbase = GetRootContainingLibartbase();
+      if (!root_containing_libartbase.empty()) {
+        return root_containing_libartbase;
+      }
+    }
+  }
+
+  // Try the default path.
+  if (must_exist && !OS::DirectoryExists(kAndroidArtApexDefaultPath)) {
+    *error_msg = StringPrintf("Failed to find default ART root directory %s",
+                              kAndroidArtApexDefaultPath);
+    return "";
+  }
+  return kAndroidArtApexDefaultPath;
+#endif
 }
 
-std::string GetAndroidRuntimeRoot() {
-  return GetAndroidDir(kAndroidRuntimeRootEnvVar, kAndroidRuntimeApexDefaultPath);
+std::string GetArtRootSafe(std::string* error_msg) {
+  return GetArtRootSafe(/* must_exist= */ true, error_msg);
+}
+
+std::string GetArtRoot() {
+  std::string error_msg;
+  std::string ret = GetArtRootSafe(&error_msg);
+  if (ret.empty()) {
+    LOG(FATAL) << error_msg;
+    UNREACHABLE();
+  }
+  return ret;
+}
+
+std::string GetArtBinDir() {
+  // Environment variable `ANDROID_ART_ROOT` is defined as
+  // `$ANDROID_HOST_OUT/com.android.art` on host. However, host ART binaries are
+  // still installed in `$ANDROID_HOST_OUT/bin` (i.e. outside the ART Root). The
+  // situation is cleaner on target, where `ANDROID_ART_ROOT` is
+  // `$ANDROID_ROOT/apex/com.android.art` and ART binaries are installed in
+  // `$ANDROID_ROOT/apex/com.android.art/bin`.
+  std::string android_art_root = kIsTargetBuild ? GetArtRoot() : GetAndroidRoot();
+  return android_art_root + "/bin";
 }
 
 std::string GetAndroidDataSafe(std::string* error_msg) {
@@ -204,7 +275,13 @@
 }
 
 std::string GetDefaultBootImageLocation(const std::string& android_root) {
-  return StringPrintf("%s/framework/boot.art", android_root.c_str());
+  // Boot image consists of two parts:
+  //  - the primary boot image in the ART apex (contains the Core Libraries)
+  //  - the boot image extension on the system partition (contains framework libraries)
+  return StringPrintf("%s/javalib/boot.art:%s/framework/boot-framework.art!%s/etc/boot-image.prof",
+                      kAndroidArtApexDefaultPath,
+                      android_root.c_str(),
+                      android_root.c_str());
 }
 
 std::string GetDefaultBootImageLocation(std::string* error_msg) {
@@ -309,6 +386,15 @@
   }
 }
 
+bool LocationIsOnArtModule(const char* full_path) {
+  std::string unused_error_msg;
+  std::string module_path = GetArtRootSafe(/* must_exist= */ kIsTargetBuild, &unused_error_msg);
+  if (module_path.empty()) {
+    return false;
+  }
+  return android::base::StartsWith(full_path, module_path);
+}
+
 static bool StartsWithSlash(const char* str) {
   DCHECK(str != nullptr);
   return str[0] == '/';
@@ -364,10 +450,6 @@
                             /* subdir= */ "framework/");
 }
 
-bool LocationIsOnRuntimeModule(const char* full_path) {
-  return IsLocationOnModule(full_path, kAndroidRuntimeRootEnvVar, kAndroidRuntimeApexDefaultPath);
-}
-
 bool LocationIsOnConscryptModule(const char* full_path) {
   return IsLocationOnModule(
       full_path, kAndroidConscryptRootEnvVar, kAndroidConscryptApexDefaultPath);
@@ -389,19 +471,19 @@
 #endif
 }
 
-bool RuntimeModuleRootDistinctFromAndroidRoot() {
+bool ArtModuleRootDistinctFromAndroidRoot() {
   std::string error_msg;
   const char* android_root = GetAndroidDirSafe(kAndroidRootEnvVar,
                                                kAndroidRootDefaultPath,
                                                /* must_exist= */ kIsTargetBuild,
                                                &error_msg);
-  const char* runtime_root = GetAndroidDirSafe(kAndroidRuntimeRootEnvVar,
-                                               kAndroidRuntimeApexDefaultPath,
-                                               /* must_exist= */ kIsTargetBuild,
-                                               &error_msg);
+  const char* art_root = GetAndroidDirSafe(kAndroidArtRootEnvVar,
+                                           kAndroidArtApexDefaultPath,
+                                           /* must_exist= */ kIsTargetBuild,
+                                           &error_msg);
   return (android_root != nullptr)
-      && (runtime_root != nullptr)
-      && (std::string_view(android_root) != std::string_view(runtime_root));
+      && (art_root != nullptr)
+      && (std::string_view(android_root) != std::string_view(art_root));
 }
 
 int DupCloexec(int fd) {
diff --git a/libartbase/base/file_utils.h b/libartbase/base/file_utils.h
index 92b09c9..6a57fbd 100644
--- a/libartbase/base/file_utils.h
+++ b/libartbase/base/file_utils.h
@@ -27,17 +27,31 @@
 
 namespace art {
 
-bool ReadFileToString(const std::string& file_name, std::string* result);
-
+// These methods return the Android Root, which is the historical location of
+// the Android "system" directory, containing the built Android artifacts. On
+// target, this is normally "/system". On host this is usually a directory under
+// the build tree, e.g. "$ANDROID_BUILD_TOP/out/host/linux-x86". The location of
+// the Android Root can be overriden using the ANDROID_ROOT environment
+// variable.
+//
 // Find $ANDROID_ROOT, /system, or abort.
 std::string GetAndroidRoot();
 // Find $ANDROID_ROOT, /system, or return an empty string.
 std::string GetAndroidRootSafe(/*out*/ std::string* error_msg);
 
-// Find $ANDROID_RUNTIME_ROOT, /apex/com.android.runtime, or abort.
-std::string GetAndroidRuntimeRoot();
-// Find $ANDROID_RUNTIME_ROOT, /apex/com.android.runtime, or return an empty string.
-std::string GetAndroidRuntimeRootSafe(/*out*/ std::string* error_msg);
+// These methods return the ART Root, which is the location of the (activated)
+// ART APEX module. On target, this is normally "/apex/com.android.art". On
+// host, this is usually a subdirectory of the Android Root, e.g.
+// "$ANDROID_BUILD_TOP/out/host/linux-x86/com.android.art". The location of the
+// ART root can be overridden using the ANDROID_ART_ROOT environment variable.
+//
+// Find $ANDROID_ART_ROOT, /apex/com.android.art, or abort.
+std::string GetArtRoot();
+// Find $ANDROID_ART_ROOT, /apex/com.android.art, or return an empty string.
+std::string GetArtRootSafe(/*out*/ std::string* error_msg);
+
+// Return the path to the directory containing the ART binaries.
+std::string GetArtBinDir();
 
 // Find $ANDROID_DATA, /data, or abort.
 std::string GetAndroidData();
@@ -80,8 +94,8 @@
 //          ReplaceFileExtension("foo", "abc") == "foo.abc"
 std::string ReplaceFileExtension(const std::string& filename, const std::string& new_extension);
 
-// Return whether the location is on /apex/com.android.runtime
-bool LocationIsOnRuntimeModule(const char* location);
+// Return whether the location is on /apex/com.android.art
+bool LocationIsOnArtModule(const char* location);
 
 // Return whether the location is on /apex/com.android.conscrypt
 bool LocationIsOnConscryptModule(const char* location);
@@ -95,9 +109,9 @@
 // Return whether the location is on /apex/.
 bool LocationIsOnApex(const char* location);
 
-// Compare the runtime module root against android root. Returns true if they are
+// Compare the ART module root against android root. Returns true if they are
 // both known and distinct. This is meant to be a proxy for 'running with apex'.
-bool RuntimeModuleRootDistinctFromAndroidRoot();
+bool ArtModuleRootDistinctFromAndroidRoot();
 
 // dup(2), except setting the O_CLOEXEC flag atomically, when possible.
 int DupCloexec(int fd);
diff --git a/libartbase/base/file_utils_test.cc b/libartbase/base/file_utils_test.cc
index 0a5a7a7..85c1104 100644
--- a/libartbase/base/file_utils_test.cc
+++ b/libartbase/base/file_utils_test.cc
@@ -79,47 +79,80 @@
   ASSERT_EQ(0, setenv("ANDROID_ROOT", "/this/is/obviously/bogus", /* overwrite */ 1));
   EXPECT_EQ(GetAndroidRootSafe(&error_msg), "");
 
-  // Unset ANDROID_ROOT and see that it still returns something (as libart code is running).
-  ASSERT_EQ(0, unsetenv("ANDROID_ROOT"));
-  std::string android_root3 = GetAndroidRootSafe(&error_msg);
-  // This should be the same as the other root (modulo realpath), otherwise the test setup is
-  // broken. On non-bionic. On bionic we can be running with a different libart that lives outside
-  // of ANDROID_ROOT
-  UniqueCPtr<char> real_root3(realpath(android_root3.c_str(), nullptr));
+  // Inferring the Android Root from the location of libartbase only works on host.
+  if (!kIsTargetBuild) {
+    // Unset ANDROID_ROOT and see that it still returns something (as libartbase code is running).
+    ASSERT_EQ(0, unsetenv("ANDROID_ROOT"));
+    std::string android_root3 = GetAndroidRootSafe(&error_msg);
+    // This should be the same as the other root (modulo realpath), otherwise the test setup is
+    // broken. On non-bionic. On bionic we can be running with a different libartbase that lives
+    // outside of ANDROID_ROOT.
+    UniqueCPtr<char> real_root3(realpath(android_root3.c_str(), nullptr));
 #if !defined(__BIONIC__ ) || defined(__ANDROID__)
-  UniqueCPtr<char> real_root(realpath(android_root.c_str(), nullptr));
-  EXPECT_STREQ(real_root.get(), real_root3.get()) << error_msg;
+    UniqueCPtr<char> real_root(realpath(android_root.c_str(), nullptr));
+    EXPECT_STREQ(real_root.get(), real_root3.get()) << error_msg;
 #else
-  EXPECT_STRNE(real_root3.get(), "") << error_msg;
+    EXPECT_STRNE(real_root3.get(), "") << error_msg;
 #endif
+  }
 
   // Reset ANDROID_ROOT, as other things may depend on it.
   ASSERT_EQ(0, setenv("ANDROID_ROOT", android_root_env.c_str(), /* overwrite */ 1));
 }
 
-TEST_F(FileUtilsTest, GetAndroidRuntimeRootSafe) {
+TEST_F(FileUtilsTest, GetArtRootSafe) {
   std::string error_msg;
+  std::string android_art_root;
+  std::string android_art_root_env;
 
-  // We don't expect null returns for most cases, so don't check and let std::string crash.
+  // TODO(b/130295968): Re-enable this part when the directory exists on host
+  if (kIsTargetBuild) {
+    // We don't expect null returns for most cases, so don't check and let std::string crash.
 
-  // CommonArtTest sets ANDROID_RUNTIME_ROOT, so expect this to be the same.
-  std::string android_runtime_root = GetAndroidRuntimeRootSafe(&error_msg);
-  std::string android_runtime_root_env = getenv("ANDROID_RUNTIME_ROOT");
-  EXPECT_EQ(android_runtime_root, android_runtime_root_env) << error_msg;
+    // CommonArtTest sets ANDROID_ART_ROOT, so expect this to be the same.
+    android_art_root = GetArtRootSafe(&error_msg);
+    android_art_root_env = getenv("ANDROID_ART_ROOT");
+    EXPECT_EQ(android_art_root, android_art_root_env) << error_msg;
 
-  // Set ANDROID_RUNTIME_ROOT to something else (but the directory must exist). So use dirname.
-  UniqueCPtr<char> root_dup(strdup(android_runtime_root_env.c_str()));
-  char* dir = dirname(root_dup.get());
-  ASSERT_EQ(0, setenv("ANDROID_RUNTIME_ROOT", dir, /* overwrite */ 1));
-  std::string android_runtime_root2 = GetAndroidRuntimeRootSafe(&error_msg);
-  EXPECT_STREQ(dir, android_runtime_root2.c_str()) << error_msg;
+    // Set ANDROID_ART_ROOT to something else (but the directory must exist). So use dirname.
+    UniqueCPtr<char> root_dup(strdup(android_art_root_env.c_str()));
+    char* dir = dirname(root_dup.get());
+    ASSERT_EQ(0, setenv("ANDROID_ART_ROOT", dir, /* overwrite */ 1));
+    std::string android_art_root2 = GetArtRootSafe(&error_msg);
+    EXPECT_STREQ(dir, android_art_root2.c_str()) << error_msg;
+  }
 
-  // Set a bogus value for ANDROID_RUNTIME_ROOT. This should be an error.
-  ASSERT_EQ(0, setenv("ANDROID_RUNTIME_ROOT", "/this/is/obviously/bogus", /* overwrite */ 1));
-  EXPECT_EQ(GetAndroidRuntimeRootSafe(&error_msg), "");
+  // Set a bogus value for ANDROID_ART_ROOT. This should be an error.
+  ASSERT_EQ(0, setenv("ANDROID_ART_ROOT", "/this/is/obviously/bogus", /* overwrite */ 1));
+  EXPECT_EQ(GetArtRootSafe(&error_msg), "");
 
-  // Reset ANDROID_RUNTIME_ROOT, as other things may depend on it.
-  ASSERT_EQ(0, setenv("ANDROID_RUNTIME_ROOT", android_runtime_root_env.c_str(), /* overwrite */ 1));
+  // Inferring the ART root from the location of libartbase only works on target.
+  if (kIsTargetBuild) {
+    // Disabled for now, as we cannot reliably use `GetRootContainingLibartbase`
+    // to find the ART root on target yet (see comment in `GetArtRootSafe`).
+    //
+    // TODO(b/129534335): Re-enable this part of the test on target when the
+    // only instance of libartbase is the one from the ART APEX.
+    if ((false)) {
+      // Unset ANDROID_ART_ROOT and see that it still returns something (as
+      // libartbase code is running).
+      ASSERT_EQ(0, unsetenv("ANDROID_ART_ROOT"));
+      std::string android_art_root3 = GetArtRootSafe(&error_msg);
+      // This should be the same as the other root (modulo realpath), otherwise
+      // the test setup is broken. On non-bionic. On bionic we can be running
+      // with a different libartbase that lives outside of ANDROID_ART_ROOT.
+      UniqueCPtr<char> real_root3(realpath(android_art_root3.c_str(), nullptr));
+#if !defined(__BIONIC__ ) || defined(__ANDROID__)
+      UniqueCPtr<char> real_root(realpath(android_art_root.c_str(), nullptr));
+      EXPECT_STREQ(real_root.get(), real_root3.get()) << error_msg;
+#else
+      EXPECT_STRNE(real_root3.get(), "") << error_msg;
+#endif
+    }
+  }
+
+  // Reset ANDROID_ART_ROOT, as other things may depend on it.
+  ASSERT_EQ(0, setenv("ANDROID_ART_ROOT", android_art_root_env.c_str(), /* overwrite */ 1));
 }
 
 TEST_F(FileUtilsTest, ReplaceFileExtension) {
diff --git a/libartbase/base/globals_unix.cc b/libartbase/base/globals_unix.cc
new file mode 100644
index 0000000..0d2e8a9
--- /dev/null
+++ b/libartbase/base/globals_unix.cc
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <android-base/logging.h>
+
+#include <dlfcn.h>
+
+#include "globals.h"
+
+namespace art {
+
+#ifdef __APPLE__
+// dlopen(3) on Linux with just an SO name will search the already
+// opened libraries. On Darwin, we need dlopen(3) needs the SO name
+// qualified with it's path (if the SO is not on the search path). Use
+// linker path when compiling app. See man pages for dlopen(3) and
+// dlyd(1).
+  static constexpr const char kLibArtBaseDebug[] = "@rpath/libartbased.dylib";
+  static constexpr const char kLibArtBaseRelease[] = "@rpath/libartbase.dylib";
+#else
+  static constexpr const char kLibArtBaseDebug[] = "libartbased.so";
+  static constexpr const char kLibArtBaseRelease[] = "libartbase.so";
+#endif  // __APPLE__
+
+// Check that we have not loaded both debug and release version of libartbase at the same time.
+//
+// This can be a cascade problem originating from a call to
+// LoadLibdexfileExternal in libdexfile_support: If it was called before any ART
+// libraries were loaded it will default to the non-debug version, which can
+// then clash with a later load of the debug version.
+static struct CheckLoadedBuild {
+  CheckLoadedBuild() {
+    bool debug_build_loaded = (dlopen(kLibArtBaseDebug, RTLD_NOW | RTLD_NOLOAD) != nullptr);
+    bool release_build_loaded = (dlopen(kLibArtBaseRelease, RTLD_NOW | RTLD_NOLOAD) != nullptr);
+    // TODO: The exit calls below are needed as CHECK would cause recursive backtracing. Fix it.
+    if (!(debug_build_loaded || release_build_loaded)) {
+      LOG(FATAL_WITHOUT_ABORT) << "Failed to dlopen "
+                               << kLibArtBaseDebug
+                               << " or "
+                               << kLibArtBaseRelease;
+      exit(1);
+    }
+    if (kIsDebugBuild && release_build_loaded) {
+      LOG(FATAL_WITHOUT_ABORT) << "Loading "
+                               << kLibArtBaseDebug
+                               << " while "
+                               << kLibArtBaseRelease
+                               << " is already loaded";
+      exit(1);
+    }
+    if (!kIsDebugBuild && debug_build_loaded) {
+      LOG(FATAL_WITHOUT_ABORT) << "Loading "
+                               << kLibArtBaseRelease
+                               << " while "
+                               << kLibArtBaseDebug
+                               << " is already loaded";
+      exit(1);
+    }
+  }
+} g_check_loaded_build;
+
+}  // namespace art
diff --git a/libartbase/base/hash_set.h b/libartbase/base/hash_set.h
index 99b3df4..585c4ce 100644
--- a/libartbase/base/hash_set.h
+++ b/libartbase/base/hash_set.h
@@ -35,8 +35,14 @@
 namespace art {
 
 template <class Elem, class HashSetType>
-class HashSetIterator : std::iterator<std::forward_iterator_tag, Elem> {
+class HashSetIterator {
  public:
+  using iterator_category = std::forward_iterator_tag;
+  using value_type = Elem;
+  using difference_type = std::ptrdiff_t;
+  using pointer = Elem*;
+  using reference = Elem&;
+
   HashSetIterator(const HashSetIterator&) = default;
   HashSetIterator(HashSetIterator&&) = default;
   HashSetIterator(HashSetType* hash_set, size_t index) : index_(index), hash_set_(hash_set) {}
@@ -436,32 +442,39 @@
   // Insert an element with hint, allows duplicates.
   // Note: The hint is not very useful for a HashSet<> unless there are many hash conflicts
   // and in that case the use of HashSet<> itself should be reconsidered.
-  iterator insert(const_iterator hint ATTRIBUTE_UNUSED, const T& element) {
+  std::pair<iterator, bool> insert(const_iterator hint ATTRIBUTE_UNUSED, const T& element) {
     return insert(element);
   }
-  iterator insert(const_iterator hint ATTRIBUTE_UNUSED, T&& element) {
+  std::pair<iterator, bool> insert(const_iterator hint ATTRIBUTE_UNUSED, T&& element) {
     return insert(std::move(element));
   }
 
   // Insert an element, allows duplicates.
-  iterator insert(const T& element) {
+  std::pair<iterator, bool> insert(const T& element) {
     return InsertWithHash(element, hashfn_(element));
   }
-  iterator insert(T&& element) {
+  std::pair<iterator, bool> insert(T&& element) {
     return InsertWithHash(std::move(element), hashfn_(element));
   }
 
   template <typename U, typename = typename std::enable_if<std::is_convertible<U, T>::value>::type>
-  iterator InsertWithHash(U&& element, size_t hash) {
+  std::pair<iterator, bool> InsertWithHash(U&& element, size_t hash) {
     DCHECK_EQ(hash, hashfn_(element));
     if (num_elements_ >= elements_until_expand_) {
       Expand();
       DCHECK_LT(num_elements_, elements_until_expand_);
     }
-    const size_t index = FirstAvailableSlot(IndexForHash(hash));
-    data_[index] = std::forward<U>(element);
-    ++num_elements_;
-    return iterator(this, index);
+    bool find_failed = false;
+    auto find_fail_fn = [&](size_t index) {
+      find_failed = true;
+      return index;
+    };
+    size_t index = FindIndexImpl(element, hash, find_fail_fn);
+    if (find_failed) {
+      data_[index] = std::forward<U>(element);
+      ++num_elements_;
+    }
+    return std::make_pair(iterator(this, index), find_failed);
   }
 
   void swap(HashSet& other) {
@@ -615,12 +628,20 @@
     if (UNLIKELY(NumBuckets() == 0)) {
       return 0;
     }
+    auto fail_fn = [&](size_t index ATTRIBUTE_UNUSED) { return NumBuckets(); };
+    return FindIndexImpl(element, hash, fail_fn);
+  }
+
+  // Find the hash table slot for an element, or return an empty slot index if not found.
+  template <typename K, typename FailFn>
+  size_t FindIndexImpl(const K& element, size_t hash, FailFn fail_fn) const {
+    DCHECK_NE(NumBuckets(), 0u);
     DCHECK_EQ(hashfn_(element), hash);
     size_t index = IndexForHash(hash);
     while (true) {
       const T& slot = ElementForIndex(index);
       if (emptyfn_.IsEmpty(slot)) {
-        return NumBuckets();
+        return fail_fn(index);
       }
       if (pred_(slot, element)) {
         return index;
diff --git a/libartbase/base/hash_set_test.cc b/libartbase/base/hash_set_test.cc
index 0646967..9e6e6d2 100644
--- a/libartbase/base/hash_set_test.cc
+++ b/libartbase/base/hash_set_test.cc
@@ -218,7 +218,7 @@
 
 TEST_F(HashSetTest, TestStress) {
   HashSet<std::string, IsEmptyFnString> hash_set;
-  std::unordered_multiset<std::string> std_set;
+  std::unordered_set<std::string> std_set;
   std::vector<std::string> strings;
   static constexpr size_t string_count = 2000;
   static constexpr size_t operations = 100000;
@@ -277,7 +277,7 @@
   ASSERT_EQ(it->second, 123);
   hash_map.erase(it);
   it = hash_map.find(std::string("abcd"));
-  ASSERT_EQ(it->second, 124);
+  ASSERT_EQ(it, hash_map.end());
 }
 
 struct IsEmptyFnVectorInt {
@@ -359,18 +359,26 @@
 TEST_F(HashSetTest, IteratorConversion) {
   const char* test_string = "dummy";
   HashSet<std::string> hash_set;
-  HashSet<std::string>::iterator it = hash_set.insert(test_string);
+  HashSet<std::string>::iterator it = hash_set.insert(test_string).first;
   HashSet<std::string>::const_iterator cit = it;
   ASSERT_TRUE(it == cit);
   ASSERT_EQ(*it, *cit);
 }
 
-TEST_F(HashSetTest, StringSearchyStringView) {
+TEST_F(HashSetTest, StringSearchStringView) {
   const char* test_string = "dummy";
   HashSet<std::string> hash_set;
-  HashSet<std::string>::iterator insert_pos = hash_set.insert(test_string);
+  HashSet<std::string>::iterator insert_pos = hash_set.insert(test_string).first;
   HashSet<std::string>::iterator it = hash_set.find(std::string_view(test_string));
   ASSERT_TRUE(it == insert_pos);
 }
 
+TEST_F(HashSetTest, DoubleInsert) {
+  const char* test_string = "dummy";
+  HashSet<std::string> hash_set;
+  hash_set.insert(test_string);
+  hash_set.insert(test_string);
+  ASSERT_EQ(1u, hash_set.size());
+}
+
 }  // namespace art
diff --git a/libartbase/base/hiddenapi_domain.h b/libartbase/base/hiddenapi_domain.h
index 4cbc22d..a329090 100644
--- a/libartbase/base/hiddenapi_domain.h
+++ b/libartbase/base/hiddenapi_domain.h
@@ -24,14 +24,14 @@
 // ordinal is considered more "trusted", i.e. always allowed to access members of
 // domains with a greater ordinal. Access checks are performed when code tries to
 // access a method/field from a more trusted domain than itself.
-enum class Domain {
+enum class Domain : char {
   kCorePlatform = 0,
   kPlatform,
   kApplication,
 };
 
 inline bool IsDomainMoreTrustedThan(Domain domainA, Domain domainB) {
-  return static_cast<uint32_t>(domainA) <= static_cast<uint32_t>(domainB);
+  return static_cast<char>(domainA) <= static_cast<char>(domainB);
 }
 
 }  // namespace hiddenapi
diff --git a/libartbase/base/hiddenapi_flags.h b/libartbase/base/hiddenapi_flags.h
index e6b1879..a9a903b 100644
--- a/libartbase/base/hiddenapi_flags.h
+++ b/libartbase/base/hiddenapi_flags.h
@@ -64,6 +64,13 @@
   // kMin and kMax and no integer values are skipped between them.
   template<typename T>
   constexpr uint32_t NumValues() { return ToUint(T::kMax) - ToUint(T::kMin) + 1; }
+
+  // Returns enum value at position i from enum list.
+  template <typename T>
+  constexpr T GetEnumAt(uint32_t i) {
+    return static_cast<T>(ToUint(T::kMin) + i);
+  }
+
 }  // namespace helper
 
 /*
@@ -89,21 +96,23 @@
     // e.g. GreylistMaxO is accessible to targetSdkVersion <= 27 (O_MR1).
     kGreylistMaxO = 3,
     kGreylistMaxP = 4,
+    kGreylistMaxQ = 5,
 
     // Special values
     kInvalid =      (static_cast<uint32_t>(-1) & kValueBitMask),
     kMin =          kWhitelist,
-    kMax =          kGreylistMaxP,
+    kMax =          kGreylistMaxQ,
   };
 
   // Additional bit flags after the first kValueBitSize bits in dex flags.
   // These are used for domain-specific API.
   enum class DomainApi : uint32_t {
     kCorePlatformApi = kValueBitSize,
+    kTestApi = kValueBitSize + 1,
 
     // Special values
     kMin =             kCorePlatformApi,
-    kMax =             kCorePlatformApi,
+    kMax =             kTestApi,
   };
 
   // Bit mask of all domain API flags.
@@ -127,11 +136,13 @@
     "blacklist",
     "greylist-max-o",
     "greylist-max-p",
+    "greylist-max-q",
   };
 
   // Names corresponding to DomainApis.
   static constexpr const char* kDomainApiNames[] {
     "core-platform-api",
+    "test-api",
   };
 
   // Maximum SDK versions allowed to access ApiList of given Value.
@@ -141,6 +152,7 @@
     /* blacklist */ SdkVersion::kMin,
     /* greylist-max-o */ SdkVersion::kO_MR1,
     /* greylist-max-p */ SdkVersion::kP,
+    /* greylist-max-q */ SdkVersion::kQ,
   };
 
   explicit ApiList(Value val, uint32_t domain_apis = 0u)
@@ -181,7 +193,9 @@
   static ApiList Blacklist() { return ApiList(Value::kBlacklist); }
   static ApiList GreylistMaxO() { return ApiList(Value::kGreylistMaxO); }
   static ApiList GreylistMaxP() { return ApiList(Value::kGreylistMaxP); }
+  static ApiList GreylistMaxQ() { return ApiList(Value::kGreylistMaxQ); }
   static ApiList CorePlatformApi() { return ApiList(DomainApi::kCorePlatformApi); }
+  static ApiList TestApi() { return ApiList(DomainApi::kTestApi); }
 
   uint32_t GetDexFlags() const { return dex_flags_; }
   uint32_t GetIntValue() const { return helper::ToUint(GetValue()) - helper::ToUint(Value::kMin); }
@@ -190,12 +204,12 @@
   static ApiList FromName(const std::string& str) {
     for (uint32_t i = 0; i < kValueCount; ++i) {
       if (str == kValueNames[i]) {
-        return ApiList(static_cast<Value>(i + helper::ToUint(Value::kMin)));
+        return ApiList(helper::GetEnumAt<Value>(i));
       }
     }
     for (uint32_t i = 0; i < kDomainApiCount; ++i) {
       if (str == kDomainApiNames[i]) {
-        return ApiList(static_cast<DomainApi>(i + helper::ToUint(DomainApi::kMin)));
+        return ApiList(helper::GetEnumAt<DomainApi>(i));
       }
     }
     return ApiList();
@@ -228,6 +242,7 @@
 
   bool operator==(const ApiList& other) const { return dex_flags_ == other.dex_flags_; }
   bool operator!=(const ApiList& other) const { return !(*this == other); }
+  bool operator<(const ApiList& other) const { return dex_flags_ < other.dex_flags_; }
 
   // Returns true if combining this ApiList with `other` will succeed.
   bool CanCombineWith(const ApiList& other) const {
@@ -275,20 +290,35 @@
   // Returns true when no ApiList is specified and no domain_api flags either.
   bool IsEmpty() const { return (GetValue() == Value::kInvalid) && (GetDomainApis() == 0); }
 
+  // Returns true if the ApiList is on blacklist.
+  bool IsBlacklisted() const {
+    return GetValue() == Value::kBlacklist;
+  }
+
+  // Returns true if the ApiList is a test API.
+  bool IsTestApi() const {
+    return helper::MatchesBitMask(helper::ToBit(DomainApi::kTestApi), dex_flags_);
+  }
+
   // Returns the maximum target SDK version allowed to access this ApiList.
   SdkVersion GetMaxAllowedSdkVersion() const { return kMaxSdkVersions[GetIntValue()]; }
 
   void Dump(std::ostream& os) const {
     bool is_first = true;
 
+    if (IsEmpty()) {
+      os << "invalid";
+      return;
+    }
+
     if (GetValue() != Value::kInvalid) {
       os << kValueNames[GetIntValue()];
       is_first = false;
     }
 
     const uint32_t domain_apis = GetDomainApis();
-    for (uint32_t i = helper::ToUint(DomainApi::kMin); i <= helper::ToUint(DomainApi::kMax); i++) {
-      if (helper::MatchesBitMask(helper::ToBit(i), domain_apis)) {
+    for (uint32_t i = 0; i < kDomainApiCount; i++) {
+      if (helper::MatchesBitMask(helper::ToBit(helper::GetEnumAt<DomainApi>(i)), domain_apis)) {
         if (is_first) {
           is_first = false;
         } else {
@@ -301,8 +331,19 @@
     DCHECK_EQ(IsEmpty(), is_first);
   }
 
+  // Number of valid enum values in Value.
   static constexpr uint32_t kValueCount = helper::NumValues<Value>();
+  // Number of valid enum values in DomainApi.
   static constexpr uint32_t kDomainApiCount = helper::NumValues<DomainApi>();
+  // Total number of possible enum values, including invalid, in Value.
+  static constexpr uint32_t kValueSize = (1u << kValueBitSize) + 1;
+
+  // Check min and max values are calculated correctly.
+  static_assert(Value::kMin == helper::GetEnumAt<Value>(0));
+  static_assert(Value::kMax == helper::GetEnumAt<Value>(kValueCount - 1));
+
+  static_assert(DomainApi::kMin == helper::GetEnumAt<DomainApi>(0));
+  static_assert(DomainApi::kMax == helper::GetEnumAt<DomainApi>(kDomainApiCount - 1));
 };
 
 inline std::ostream& operator<<(std::ostream& os, ApiList value) {
diff --git a/libartbase/base/histogram-inl.h b/libartbase/base/histogram-inl.h
index 9832f03..ba2b75a 100644
--- a/libartbase/base/histogram-inl.h
+++ b/libartbase/base/histogram-inl.h
@@ -58,10 +58,12 @@
 inline Histogram<Value>::Histogram(const char* name, Value initial_bucket_width,
                                    size_t max_buckets)
     : kAdjust(1000),
-      kInitialBucketCount(8),
+      kInitialBucketCount(kMinBuckets),
       name_(name),
       max_buckets_(max_buckets),
       bucket_width_(initial_bucket_width) {
+  CHECK_GE(max_buckets, kInitialBucketCount);
+  CHECK_EQ(max_buckets_ % 2, 0u);
   Reset();
 }
 
@@ -69,8 +71,9 @@
 inline void Histogram<Value>::GrowBuckets(Value new_max) {
   while (max_ < new_max) {
     // If we have reached the maximum number of buckets, merge buckets together.
-    if (frequency_.size() >= max_buckets_) {
-      CHECK_ALIGNED(frequency_.size(), 2);
+    DCHECK_LE(frequency_.size(), max_buckets_);
+    if (frequency_.size() == max_buckets_) {
+      DCHECK_EQ(frequency_.size() % 2, 0u);
       // We double the width of each bucket to reduce the number of buckets by a factor of 2.
       bucket_width_ *= 2;
       const size_t limit = frequency_.size() / 2;
diff --git a/libartbase/base/histogram.h b/libartbase/base/histogram.h
index 39a1866..2993fbe 100644
--- a/libartbase/base/histogram.h
+++ b/libartbase/base/histogram.h
@@ -39,10 +39,13 @@
     std::vector<double> perc_;
   };
 
+  // Minimum and initial number of allocated buckets in histogram.
+  static constexpr size_t kMinBuckets = 8;
   // Used by the cumulative timing logger to search the histogram set using for an existing split
   // with the same name using CumulativeLogger::HistogramComparator.
   explicit Histogram(const char* name);
-  // This is the expected constructor when creating new Histograms.
+  // This is the expected constructor when creating new Histograms. Max_buckets must be even.
+  // Max_buckets, if specified, must be at least kMinBuckets.
   Histogram(const char* name, Value initial_bucket_width, size_t max_buckets = 100);
   void AddValue(Value);
   void AdjustAndAddValue(Value);  // Add a value after dividing it by kAdjust.
diff --git a/libartbase/base/intrusive_forward_list.h b/libartbase/base/intrusive_forward_list.h
new file mode 100644
index 0000000..984ae9c
--- /dev/null
+++ b/libartbase/base/intrusive_forward_list.h
@@ -0,0 +1,477 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTBASE_BASE_INTRUSIVE_FORWARD_LIST_H_
+#define ART_LIBARTBASE_BASE_INTRUSIVE_FORWARD_LIST_H_
+
+#include <stdint.h>
+#include <functional>
+#include <iterator>
+#include <memory>
+#include <type_traits>
+
+#include <android-base/logging.h>
+
+#include "base/casts.h"
+#include "base/macros.h"
+
+namespace art {
+
+struct IntrusiveForwardListHook {
+  IntrusiveForwardListHook() : next_hook(nullptr) { }
+  explicit IntrusiveForwardListHook(const IntrusiveForwardListHook* hook) : next_hook(hook) { }
+
+  // Allow copyable values but do not copy the hook, it is not part of the value.
+  IntrusiveForwardListHook(const IntrusiveForwardListHook& other ATTRIBUTE_UNUSED)
+      : next_hook(nullptr) { }
+  IntrusiveForwardListHook& operator=(const IntrusiveForwardListHook& src ATTRIBUTE_UNUSED) {
+    return *this;
+  }
+
+  mutable const IntrusiveForwardListHook* next_hook;
+};
+
+template <typename Derived, typename Tag = void>
+struct IntrusiveForwardListNode : public IntrusiveForwardListHook {
+};
+
+template <typename T, IntrusiveForwardListHook T::* NextPtr = &T::hook>
+class IntrusiveForwardListMemberHookTraits;
+
+template <typename T, typename Tag = void>
+class IntrusiveForwardListBaseHookTraits;
+
+template <typename T,
+          typename HookTraits =
+              IntrusiveForwardListBaseHookTraits<typename std::remove_const<T>::type>>
+class IntrusiveForwardList;
+
+template <typename T, typename HookTraits>
+class IntrusiveForwardListIterator : public std::iterator<std::forward_iterator_tag, T> {
+ public:
+  // Construct/copy/destroy (except the private constructor used by IntrusiveForwardList<>).
+  IntrusiveForwardListIterator() : hook_(nullptr) { }
+  IntrusiveForwardListIterator(const IntrusiveForwardListIterator& src) = default;
+  IntrusiveForwardListIterator& operator=(const IntrusiveForwardListIterator& src) = default;
+
+  // Conversion from iterator to const_iterator.
+  template <typename OtherT,
+            typename = typename std::enable_if<std::is_same<T, const OtherT>::value>::type>
+  IntrusiveForwardListIterator(const IntrusiveForwardListIterator<OtherT, HookTraits>& src)  // NOLINT, implicit
+      : hook_(src.hook_) { }
+
+  // Iteration.
+  IntrusiveForwardListIterator& operator++() {
+    DCHECK(hook_ != nullptr);
+    hook_ = hook_->next_hook;
+    return *this;
+  }
+  IntrusiveForwardListIterator operator++(int) {
+    IntrusiveForwardListIterator tmp(*this);
+    ++*this;
+    return tmp;
+  }
+
+  // Dereference
+  T& operator*() const {
+    DCHECK(hook_ != nullptr);
+    return *HookTraits::GetValue(hook_);
+  }
+  T* operator->() const {
+    return &**this;
+  }
+
+ private:
+  explicit IntrusiveForwardListIterator(const IntrusiveForwardListHook* hook) : hook_(hook) { }
+
+  const IntrusiveForwardListHook* hook_;
+
+  template <typename OtherT, typename OtherTraits>
+  friend class IntrusiveForwardListIterator;
+
+  template <typename OtherT, typename OtherTraits>
+  friend class IntrusiveForwardList;
+
+  template <typename OtherT1, typename OtherT2, typename OtherTraits>
+  friend typename std::enable_if<std::is_same<const OtherT1, const OtherT2>::value, bool>::type
+  operator==(const IntrusiveForwardListIterator<OtherT1, OtherTraits>& lhs,
+             const IntrusiveForwardListIterator<OtherT2, OtherTraits>& rhs);
+};
+
+template <typename T, typename OtherT, typename HookTraits>
+typename std::enable_if<std::is_same<const T, const OtherT>::value, bool>::type operator==(
+    const IntrusiveForwardListIterator<T, HookTraits>& lhs,
+    const IntrusiveForwardListIterator<OtherT, HookTraits>& rhs) {
+  return lhs.hook_ == rhs.hook_;
+}
+
+template <typename T, typename OtherT, typename HookTraits>
+typename std::enable_if<std::is_same<const T, const OtherT>::value, bool>::type operator!=(
+    const IntrusiveForwardListIterator<T, HookTraits>& lhs,
+    const IntrusiveForwardListIterator<OtherT, HookTraits>& rhs) {
+  return !(lhs == rhs);
+}
+
+// Intrusive version of std::forward_list<>. See also slist<> in Boost.Intrusive.
+//
+// This class template provides the same interface as std::forward_list<> as long
+// as the functions are meaningful for an intrusive container; this excludes emplace
+// functions and functions taking an std::initializer_list<> as the container does
+// not construct elements.
+template <typename T, typename HookTraits>
+class IntrusiveForwardList {
+ public:
+  typedef HookTraits hook_traits;
+  typedef       T  value_type;
+  typedef       T& reference;
+  typedef const T& const_reference;
+  typedef       T* pointer;
+  typedef const T* const_pointer;
+  typedef IntrusiveForwardListIterator<      T, hook_traits> iterator;
+  typedef IntrusiveForwardListIterator<const T, hook_traits> const_iterator;
+
+  // Construct/copy/destroy.
+  IntrusiveForwardList() = default;
+  template <typename InputIterator>
+  IntrusiveForwardList(InputIterator first, InputIterator last) : IntrusiveForwardList() {
+    insert_after(before_begin(), first, last);
+  }
+  IntrusiveForwardList(IntrusiveForwardList&& src) : first_(src.first_.next_hook) {
+    src.first_.next_hook = nullptr;
+  }
+  IntrusiveForwardList& operator=(const IntrusiveForwardList& src) = delete;
+  IntrusiveForwardList& operator=(IntrusiveForwardList&& src) {
+    IntrusiveForwardList tmp(std::move(src));
+    tmp.swap(*this);
+    return *this;
+  }
+  ~IntrusiveForwardList() = default;
+
+  // Iterators.
+  iterator before_begin() { return iterator(&first_); }
+  const_iterator before_begin() const { return const_iterator(&first_); }
+  iterator begin() { return iterator(first_.next_hook); }
+  const_iterator begin() const { return const_iterator(first_.next_hook); }
+  iterator end() { return iterator(nullptr); }
+  const_iterator end() const { return const_iterator(nullptr); }
+  const_iterator cbefore_begin() const { return const_iterator(&first_); }
+  const_iterator cbegin() const { return const_iterator(first_.next_hook); }
+  const_iterator cend() const { return const_iterator(nullptr); }
+
+  // Capacity.
+  bool empty() const { return begin() == end(); }
+  size_t max_size() { return static_cast<size_t>(-1); }
+
+  // Element access.
+  reference front() { return *begin(); }
+  const_reference front() const { return *begin(); }
+
+  // Modifiers.
+  template <typename InputIterator>
+  void assign(InputIterator first, InputIterator last) {
+    IntrusiveForwardList tmp(first, last);
+    tmp.swap(*this);
+  }
+  void push_front(value_type& value) {
+    insert_after(before_begin(), value);
+  }
+  void pop_front() {
+    DCHECK(!empty());
+    erase_after(before_begin());
+  }
+  iterator insert_after(const_iterator position, value_type& value) {
+    const IntrusiveForwardListHook* new_hook = hook_traits::GetHook(&value);
+    new_hook->next_hook = position.hook_->next_hook;
+    position.hook_->next_hook = new_hook;
+    return iterator(new_hook);
+  }
+  template <typename InputIterator>
+  iterator insert_after(const_iterator position, InputIterator first, InputIterator last) {
+    while (first != last) {
+      position = insert_after(position, *first++);
+    }
+    return iterator(position.hook_);
+  }
+  iterator erase_after(const_iterator position) {
+    const_iterator last = position;
+    std::advance(last, 2);
+    return erase_after(position, last);
+  }
+  iterator erase_after(const_iterator position, const_iterator last) {
+    DCHECK(position != last);
+    position.hook_->next_hook = last.hook_;
+    return iterator(last.hook_);
+  }
+  void swap(IntrusiveForwardList& other) {
+    std::swap(first_.next_hook, other.first_.next_hook);
+  }
+  void clear() {
+    first_.next_hook = nullptr;
+  }
+
+  // Operations.
+  void splice_after(const_iterator position, IntrusiveForwardList& src) {
+    DCHECK(position != end());
+    splice_after(position, src, src.before_begin(), src.end());
+  }
+  void splice_after(const_iterator position, IntrusiveForwardList&& src) {
+    splice_after(position, src);  // Use l-value overload.
+  }
+  // Splice the element after `i`.
+  void splice_after(const_iterator position, IntrusiveForwardList& src, const_iterator i) {
+    // The standard specifies that this version does nothing if `position == i`
+    // or `position == ++i`. We must handle the latter here because the overload
+    // `splice_after(position, src, first, last)` does not allow `position` inside
+    // the range `(first, last)`.
+    if (++const_iterator(i) == position) {
+      return;
+    }
+    const_iterator last = i;
+    std::advance(last, 2);
+    splice_after(position, src, i, last);
+  }
+  // Splice the element after `i`.
+  void splice_after(const_iterator position, IntrusiveForwardList&& src, const_iterator i) {
+    splice_after(position, src, i);  // Use l-value overload.
+  }
+  // Splice elements between `first` and `last`, i.e. open range `(first, last)`.
+  void splice_after(const_iterator position,
+                    IntrusiveForwardList& src,
+                    const_iterator first,
+                    const_iterator last) {
+    DCHECK(position != end());
+    DCHECK(first != last);
+    if (++const_iterator(first) == last) {
+      // Nothing to do.
+      return;
+    }
+    // If position is just before end() and last is src.end(), we can finish this quickly.
+    if (++const_iterator(position) == end() && last == src.end()) {
+      position.hook_->next_hook = first.hook_->next_hook;
+      first.hook_->next_hook = nullptr;
+      return;
+    }
+    // Otherwise we need to find the position before last to fix up the hook.
+    const_iterator before_last = first;
+    while (++const_iterator(before_last) != last) {
+      ++before_last;
+    }
+    // Detach (first, last).
+    const IntrusiveForwardListHook* first_taken = first.hook_->next_hook;
+    first.hook_->next_hook = last.hook_;
+    // Attach the sequence to the new position.
+    before_last.hook_->next_hook = position.hook_->next_hook;
+    position.hook_->next_hook = first_taken;
+  }
+  // Splice elements between `first` and `last`, i.e. open range `(first, last)`.
+  void splice_after(const_iterator position,
+                    IntrusiveForwardList&& src,
+                    const_iterator first,
+                    const_iterator last) {
+    splice_after(position, src, first, last);  // Use l-value overload.
+  }
+  void remove(const value_type& value) {
+    remove_if([value](const value_type& v) { return value == v; });
+  }
+  template <typename Predicate>
+  void remove_if(Predicate pred) {
+    iterator prev = before_begin();
+    for (iterator current = begin(); current != end(); ++current) {
+      if (pred(*current)) {
+        erase_after(prev);
+        current = prev;
+      } else {
+        prev = current;
+      }
+    }
+  }
+  void unique() {
+    unique(std::equal_to<value_type>());
+  }
+  template <typename BinaryPredicate>
+  void unique(BinaryPredicate pred) {
+    if (!empty()) {
+      iterator prev = begin();
+      iterator current = prev;
+      ++current;
+      for (; current != end(); ++current) {
+        if (pred(*prev, *current)) {
+          erase_after(prev);
+          current = prev;
+        } else {
+          prev = current;
+        }
+      }
+    }
+  }
+  void merge(IntrusiveForwardList& other) {
+    merge(other, std::less<value_type>());
+  }
+  void merge(IntrusiveForwardList&& other) {
+    merge(other);  // Use l-value overload.
+  }
+  template <typename Compare>
+  void merge(IntrusiveForwardList& other, Compare cmp) {
+    iterator prev = before_begin();
+    iterator current = begin();
+    iterator other_prev = other.before_begin();
+    iterator other_current = other.begin();
+    while (current != end() && other_current != other.end()) {
+      if (cmp(*other_current, *current)) {
+        ++other_current;
+        splice_after(prev, other, other_prev);
+        ++prev;
+      } else {
+        prev = current;
+        ++current;
+      }
+      DCHECK(++const_iterator(prev) == current);
+      DCHECK(++const_iterator(other_prev) == other_current);
+    }
+    splice_after(prev, other);
+  }
+  template <typename Compare>
+  void merge(IntrusiveForwardList&& other, Compare cmp) {
+    merge(other, cmp);  // Use l-value overload.
+  }
+  void sort() {
+    sort(std::less<value_type>());
+  }
+  template <typename Compare>
+  void sort(Compare cmp) {
+    size_t n = std::distance(begin(), end());
+    if (n >= 2u) {
+      const_iterator middle = before_begin();
+      std::advance(middle, n / 2u);
+      IntrusiveForwardList second_half;
+      second_half.splice_after(second_half.before_begin(), *this, middle, end());
+      sort(cmp);
+      second_half.sort(cmp);
+      merge(second_half, cmp);
+    }
+  }
+  void reverse() {
+    IntrusiveForwardList reversed;
+    while (!empty()) {
+      value_type& value = front();
+      erase_after(before_begin());
+      reversed.insert_after(reversed.before_begin(), value);
+    }
+    reversed.swap(*this);
+  }
+
+  // Extensions.
+  bool HasExactlyOneElement() const {
+    return !empty() && ++begin() == end();
+  }
+  size_t SizeSlow() const {
+    return std::distance(begin(), end());
+  }
+  bool ContainsNode(const_reference node) const {
+    for (auto&& n : *this) {
+      if (std::addressof(n) == std::addressof(node)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+ private:
+  static IntrusiveForwardListHook* ModifiableHook(const IntrusiveForwardListHook* hook) {
+    return const_cast<IntrusiveForwardListHook*>(hook);
+  }
+
+  IntrusiveForwardListHook first_;
+};
+
+template <typename T, typename HookTraits>
+void swap(IntrusiveForwardList<T, HookTraits>& lhs, IntrusiveForwardList<T, HookTraits>& rhs) {
+  lhs.swap(rhs);
+}
+
+template <typename T, typename HookTraits>
+bool operator==(const IntrusiveForwardList<T, HookTraits>& lhs,
+                const IntrusiveForwardList<T, HookTraits>& rhs) {
+  auto lit = lhs.begin();
+  auto rit = rhs.begin();
+  for (; lit != lhs.end() && rit != rhs.end(); ++lit, ++rit) {
+    if (*lit != *rit) {
+      return false;
+    }
+  }
+  return lit == lhs.end() && rit == rhs.end();
+}
+
+template <typename T, typename HookTraits>
+bool operator!=(const IntrusiveForwardList<T, HookTraits>& lhs,
+                const IntrusiveForwardList<T, HookTraits>& rhs) {
+  return !(lhs == rhs);
+}
+
+template <typename T, typename HookTraits>
+bool operator<(const IntrusiveForwardList<T, HookTraits>& lhs,
+               const IntrusiveForwardList<T, HookTraits>& rhs) {
+  return std::lexicographical_compare(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
+}
+
+template <typename T, typename HookTraits>
+bool operator>(const IntrusiveForwardList<T, HookTraits>& lhs,
+               const IntrusiveForwardList<T, HookTraits>& rhs) {
+  return rhs < lhs;
+}
+
+template <typename T, typename HookTraits>
+bool operator<=(const IntrusiveForwardList<T, HookTraits>& lhs,
+                const IntrusiveForwardList<T, HookTraits>& rhs) {
+  return !(rhs < lhs);
+}
+
+template <typename T, typename HookTraits>
+bool operator>=(const IntrusiveForwardList<T, HookTraits>& lhs,
+                const IntrusiveForwardList<T, HookTraits>& rhs) {
+  return !(lhs < rhs);
+}
+
+template <typename T, IntrusiveForwardListHook T::* NextPtr>
+class IntrusiveForwardListMemberHookTraits {
+ public:
+  static const IntrusiveForwardListHook* GetHook(const T* value) {
+    return &(value->*NextPtr);
+  }
+
+  static T* GetValue(const IntrusiveForwardListHook* hook) {
+    return reinterpret_cast<T*>(
+        reinterpret_cast<uintptr_t>(hook) - OFFSETOF_MEMBERPTR(T, NextPtr));
+  }
+};
+
+template <typename T, typename Tag>
+class IntrusiveForwardListBaseHookTraits {
+ public:
+  static const IntrusiveForwardListHook* GetHook(const T* value) {
+    // Explicit conversion to the "node" followed by implicit conversion to the "hook".
+    return static_cast<const IntrusiveForwardListNode<T, Tag>*>(value);
+  }
+
+  static T* GetValue(const IntrusiveForwardListHook* hook) {
+    return down_cast<T*>(down_cast<IntrusiveForwardListNode<T, Tag>*>(
+        const_cast<IntrusiveForwardListHook*>(hook)));
+  }
+};
+
+}  // namespace art
+
+#endif  // ART_LIBARTBASE_BASE_INTRUSIVE_FORWARD_LIST_H_
diff --git a/compiler/utils/intrusive_forward_list_test.cc b/libartbase/base/intrusive_forward_list_test.cc
similarity index 100%
rename from compiler/utils/intrusive_forward_list_test.cc
rename to libartbase/base/intrusive_forward_list_test.cc
diff --git a/libartbase/base/iteration_range.h b/libartbase/base/iteration_range.h
index cd87d85..eaed8b0 100644
--- a/libartbase/base/iteration_range.h
+++ b/libartbase/base/iteration_range.h
@@ -49,6 +49,11 @@
   return IterationRange<Iter>(begin_it, end_it);
 }
 
+template<typename List>
+inline IterationRange<typename List::iterator> MakeIterationRange(List& list) {
+  return IterationRange<typename List::iterator>(list.begin(), list.end());
+}
+
 template <typename Iter>
 inline IterationRange<Iter> MakeEmptyIterationRange(const Iter& it) {
   return IterationRange<Iter>(it, it);
diff --git a/libartbase/base/logging.cc b/libartbase/base/logging.cc
index a66a7e3..e9bffaf 100644
--- a/libartbase/base/logging.cc
+++ b/libartbase/base/logging.cc
@@ -18,6 +18,7 @@
 
 #include <iostream>
 #include <limits>
+#include <mutex>
 #include <sstream>
 
 #include "aborting.h"
@@ -79,7 +80,31 @@
   }
 
 #ifdef ART_TARGET_ANDROID
-#define INIT_LOGGING_DEFAULT_LOGGER android::base::LogdLogger()
+  // android::base::LogdLogger breaks messages up into line delimited 4K chunks, since that is the
+  // most that logd can handle per message.  To prevent other threads from interleaving their
+  // messages, LogdLoggerLocked uses a mutex to ensure that only one ART thread is logging at a
+  // time.
+  // Note that this lock makes logging after fork() unsafe in multi-threaded programs, which is part
+  // of the motivation that this lock is not a part of libbase logging.  Zygote guarantees that no
+  // threads are running before calling fork() via ZygoteHooks.waitUntilAllThreadsStopped().
+  class LogdLoggerLocked {
+   public:
+    LogdLoggerLocked() {}
+    void operator()(android::base::LogId id,
+                    android::base::LogSeverity severity,
+                    const char* tag,
+                    const char* file,
+                    unsigned int line,
+                    const char* message) {
+      static std::mutex* logging_lock_ = new std::mutex();
+      std::lock_guard<std::mutex> guard(*logging_lock_);
+      logd_logger_(id, severity, tag, file, line, message);
+    }
+
+   private:
+    android::base::LogdLogger logd_logger_;
+  };
+#define INIT_LOGGING_DEFAULT_LOGGER LogdLoggerLocked()
 #else
 #define INIT_LOGGING_DEFAULT_LOGGER android::base::StderrLogger
 #endif
diff --git a/libartbase/base/logging.h b/libartbase/base/logging.h
index 484db87..e573f03 100644
--- a/libartbase/base/logging.h
+++ b/libartbase/base/logging.h
@@ -17,6 +17,9 @@
 #ifndef ART_LIBARTBASE_BASE_LOGGING_H_
 #define ART_LIBARTBASE_BASE_LOGGING_H_
 
+#include <sstream>
+#include <variant>
+
 #include "android-base/logging.h"
 #include "macros.h"
 
@@ -38,6 +41,7 @@
   bool deopt;
   bool gc;
   bool heap;
+  bool interpreter;  // Enabled with "-verbose:interpreter".
   bool jdwp;
   bool jit;
   bool jni;
@@ -55,6 +59,7 @@
   bool systrace_lock_logging;  // Enabled with "-verbose:sys-locks".
   bool agents;
   bool dex;  // Some dex access output etc.
+  bool plugin;  // Used by some plugins.
 };
 
 // Global log verbosity setting, initialized by InitLogging.
@@ -74,7 +79,7 @@
 // performed.
 extern const char* GetCmdLine();
 
-// The command used to start the ART runtime, such as "/apex/com.android.runtime/bin/dalvikvm". If
+// The command used to start the ART runtime, such as "/apex/com.android.art/bin/dalvikvm". If
 // InitLogging hasn't been performed then just returns "art".
 extern const char* ProgramInvocationName();
 
@@ -105,8 +110,44 @@
 // VLOG(jni) << "A JNI operation was performed";
 #define VLOG(module) if (VLOG_IS_ON(module)) LOG(INFO)
 
-// Return the stream associated with logging for the given module.
-#define VLOG_STREAM(module) LOG_STREAM(INFO)
+// Holder to implement VLOG_STREAM.
+class VlogMessage {
+ public:
+  // TODO Taken from android_base.
+  VlogMessage(bool enable,
+              const char* file,
+              unsigned int line,
+              ::android::base::LogSeverity severity,
+              const char* tag,
+              int error)
+      : msg_(std::in_place_type<std::ostringstream>) {
+    if (enable) {
+      msg_.emplace<::android::base::LogMessage>(file, line, severity, tag, error);
+    }
+  }
+
+  std::ostream& stream() {
+    if (std::holds_alternative<std::ostringstream>(msg_)) {
+      return std::get<std::ostringstream>(msg_);
+    } else {
+      return std::get<::android::base::LogMessage>(msg_).stream();
+    }
+  }
+
+ private:
+  std::variant<::android::base::LogMessage, std::ostringstream> msg_;
+};
+
+// Return the stream associated with logging for the given module. NB Unlike VLOG function calls
+// will still be performed. Output will be suppressed if the module is not on.
+#define VLOG_STREAM(module)                    \
+  ::art::VlogMessage(VLOG_IS_ON(module),       \
+                     __FILE__,                 \
+                     __LINE__,                 \
+                     ::android::base::INFO,    \
+                     _LOG_TAG_INTERNAL,        \
+                     -1)                       \
+      .stream()
 
 }  // namespace art
 
diff --git a/libartbase/base/macros.h b/libartbase/base/macros.h
index 323fa4e..e6f72ff 100644
--- a/libartbase/base/macros.h
+++ b/libartbase/base/macros.h
@@ -69,8 +69,10 @@
 
 #ifndef NDEBUG
 #define ALWAYS_INLINE
+#define FLATTEN
 #else
 #define ALWAYS_INLINE  __attribute__ ((always_inline))
+#define FLATTEN  __attribute__ ((flatten))
 #endif
 
 // clang doesn't like attributes on lambda functions. It would be nice to say:
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc
index 3dbe7b8..03e8218 100644
--- a/libartbase/base/mem_map.cc
+++ b/libartbase/base/mem_map.cc
@@ -619,6 +619,13 @@
   Invalidate();
 }
 
+void MemMap::ResetInForkedProcess() {
+  // This should be called on a map that has MADV_DONTFORK.
+  // The kernel has already unmapped this.
+  already_unmapped_ = true;
+  Reset();
+}
+
 void MemMap::Invalidate() {
   DCHECK(IsValid());
 
@@ -745,10 +752,10 @@
                                                           fd,
                                                           offset));
   if (actual == MAP_FAILED) {
-    PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
-    *error_msg = StringPrintf("map(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process "
+    *error_msg = StringPrintf("map(%p, %zd, 0x%x, 0x%x, %d, 0) failed: %s. See process "
                               "maps in the log.", tail_base_begin, tail_base_size, tail_prot, flags,
-                              fd);
+                              fd, strerror(errno));
+    PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
     return Invalid();
   }
   // Update *this.
@@ -824,6 +831,15 @@
   }
 }
 
+int MemMap::MadviseDontFork() {
+#if defined(__linux__)
+  if (base_begin_ != nullptr || base_size_ != 0) {
+    return madvise(base_begin_, base_size_, MADV_DONTFORK);
+  }
+#endif
+  return -1;
+}
+
 bool MemMap::Sync() {
 #ifdef _WIN32
   // TODO: add FlushViewOfFile support.
diff --git a/libartbase/base/mem_map.h b/libartbase/base/mem_map.h
index 525e622..4b6257b 100644
--- a/libartbase/base/mem_map.h
+++ b/libartbase/base/mem_map.h
@@ -29,8 +29,7 @@
 
 namespace art {
 
-#if defined(__LP64__) && !defined(__Fuchsia__) && \
-    (defined(__aarch64__) || defined(__mips__) || defined(__APPLE__))
+#if defined(__LP64__) && !defined(__Fuchsia__) && (defined(__aarch64__) || defined(__APPLE__))
 #define USE_ART_LOW_4G_ALLOCATOR 1
 #else
 #if defined(__LP64__) && !defined(__Fuchsia__) && !defined(__x86_64__)
@@ -232,6 +231,7 @@
   bool Protect(int prot);
 
   void MadviseDontNeedAndZero();
+  int MadviseDontFork();
 
   int GetProtect() const {
     return prot_;
@@ -317,6 +317,10 @@
     return nullptr;
   }
 
+  // Reset in a forked process the MemMap whose memory has been madvised MADV_DONTFORK
+  // in the parent process.
+  void ResetInForkedProcess();
+
  private:
   MemMap(const std::string& name,
          uint8_t* begin,
diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc
index bf39fd1..64fd6c0 100644
--- a/libartbase/base/mem_map_test.cc
+++ b/libartbase/base/mem_map_test.cc
@@ -19,8 +19,8 @@
 #include <memory>
 #include <random>
 
+#include "bit_utils.h"
 #include "common_art_test.h"
-#include "common_runtime_test.h"  // For TEST_DISABLED_FOR_MIPS
 #include "logging.h"
 #include "memory_tool.h"
 #include "mman.h"
@@ -503,10 +503,6 @@
 }
 
 TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
-  // Some MIPS32 hardware (namely the Creator Ci20 development board)
-  // cannot allocate in the 2GB-4GB region.
-  TEST_DISABLED_FOR_MIPS();
-
   // This test does not work under AddressSanitizer.
   // Historical note: This test did not work under Valgrind either.
   TEST_DISABLED_FOR_MEMORY_TOOL();
@@ -902,6 +898,5 @@
 extern "C"
 __attribute__((visibility("default"))) __attribute__((used))
 void ArtTestGlobalInit() {
-  LOG(ERROR) << "Installing listener";
   testing::UnitTest::GetInstance()->listeners().Append(new DumpMapsOnFailListener());
 }
diff --git a/libartbase/base/membarrier.cc b/libartbase/base/membarrier.cc
index abb36bc..48f47df 100644
--- a/libartbase/base/membarrier.cc
+++ b/libartbase/base/membarrier.cc
@@ -17,9 +17,11 @@
 #include "membarrier.h"
 
 #include <errno.h>
+#include <stdio.h>
 
 #if !defined(_WIN32)
 #include <sys/syscall.h>
+#include <sys/utsname.h>
 #include <unistd.h>
 #endif
 #include "macros.h"
@@ -46,6 +48,18 @@
 #if defined(__NR_membarrier)
 
 int membarrier(MembarrierCommand command) {
+  // Check kernel version supports membarrier(2).
+  static constexpr int kRequiredMajor = 4;
+  static constexpr int kRequiredMinor = 16;
+  struct utsname uts;
+  int major, minor;
+  if (uname(&uts) != 0 ||
+      strcmp(uts.sysname, "Linux") != 0 ||
+      sscanf(uts.release, "%d.%d", &major, &minor) != 2 ||
+      (major < kRequiredMajor || (major == kRequiredMajor && minor < kRequiredMinor))) {
+    errno = ENOSYS;
+    return -1;
+  }
 #if defined(__BIONIC__)
   // Avoid calling membarrier on older Android versions where membarrier may be barred by secomp
   // causing the current process to be killed. The probing here could be considered expensive so
diff --git a/libartbase/base/memfd.cc b/libartbase/base/memfd.cc
index 780be32..8512a3a 100644
--- a/libartbase/base/memfd.cc
+++ b/libartbase/base/memfd.cc
@@ -19,13 +19,21 @@
 #include <errno.h>
 #include <stdio.h>
 #if !defined(_WIN32)
+#include <fcntl.h>
 #include <sys/syscall.h>
 #include <sys/utsname.h>
 #include <unistd.h>
 #endif
+#if defined(__BIONIC__)
+#include <linux/memfd.h>  // To access memfd flags.
+#endif
+
+#include <android-base/logging.h>
+#include <android-base/unique_fd.h>
 
 #include "macros.h"
 
+
 // When building for linux host, glibc in prebuilts does not include memfd_create system call
 // number. As a temporary testing measure, we add the definition here.
 #if defined(__linux__) && !defined(__NR_memfd_create)
@@ -67,4 +75,58 @@
 
 #endif  // __NR_memfd_create
 
+// This is a wrapper that will attempt to simulate memfd_create if normal running fails.
+int memfd_create_compat(const char* name, unsigned int flags) {
+  int res = memfd_create(name, flags);
+  if (res >= 0) {
+    return res;
+  }
+#if !defined(_WIN32)
+  // Try to create an anonymous file with tmpfile that we can use instead.
+  if (flags == 0) {
+    FILE* file = tmpfile();
+    if (file != nullptr) {
+      // We want the normal 'dup' semantics since memfd_create without any flags isn't CLOEXEC.
+      // Unfortunately on some android targets we will compiler error if we use dup directly and so
+      // need to use fcntl.
+      int nfd = fcntl(fileno(file), F_DUPFD, /*lowest allowed fd*/ 0);
+      fclose(file);
+      return nfd;
+    }
+  }
+#endif
+  return res;
+}
+
+#if defined(__BIONIC__)
+
+static bool IsSealFutureWriteSupportedInternal() {
+  android::base::unique_fd fd(art::memfd_create("test_android_memfd", MFD_ALLOW_SEALING));
+  if (fd == -1) {
+    LOG(INFO) << "memfd_create failed: " << strerror(errno) << ", no memfd support.";
+    return false;
+  }
+
+  if (fcntl(fd, F_ADD_SEALS, F_SEAL_FUTURE_WRITE) == -1) {
+    LOG(INFO) << "fcntl(F_ADD_SEALS) failed: " << strerror(errno) << ", no memfd support.";
+    return false;
+  }
+
+  LOG(INFO) << "Using memfd for future sealing";
+  return true;
+}
+
+bool IsSealFutureWriteSupported() {
+  static bool is_seal_future_write_supported = IsSealFutureWriteSupportedInternal();
+  return is_seal_future_write_supported;
+}
+
+#else
+
+bool IsSealFutureWriteSupported() {
+  return false;
+}
+
+#endif
+
 }  // namespace art
diff --git a/libartbase/base/memfd.h b/libartbase/base/memfd.h
index 91db0b2..0bb336d 100644
--- a/libartbase/base/memfd.h
+++ b/libartbase/base/memfd.h
@@ -17,12 +17,61 @@
 #ifndef ART_LIBARTBASE_BASE_MEMFD_H_
 #define ART_LIBARTBASE_BASE_MEMFD_H_
 
+#include <fcntl.h>
+#include <unistd.h>
+
+#if defined(__BIONIC__)
+#include <linux/memfd.h>  // To access memfd flags.
+#else
+
+// If memfd flags don't exist in the current toolchain, define them ourselves.
+#ifndef F_ADD_SEALS
+# define F_ADD_SEALS          (1033)
+#endif
+
+#ifndef F_GET_SEALS
+# define F_GET_SEALS          (1034)
+#endif
+
+#ifndef F_SEAL_SEAL
+# define F_SEAL_SEAL          0x0001
+#endif
+
+#ifndef F_SEAL_SHRINK
+# define F_SEAL_SHRINK        0x0002
+#endif
+
+#ifndef F_SEAL_GROW
+# define F_SEAL_GROW          0x0004
+#endif
+
+#ifndef F_SEAL_WRITE
+# define F_SEAL_WRITE         0x0008
+#endif
+
+#ifndef F_SEAL_FUTURE_WRITE
+# define F_SEAL_FUTURE_WRITE  0x0010
+#endif
+
+#ifndef MFD_ALLOW_SEALING
+# define MFD_ALLOW_SEALING    0x0002U
+#endif
+
+#endif
+
 namespace art {
 
 // Call memfd(2) if available on platform and return result. This call also makes a kernel version
 // check for safety on older kernels (b/116769556)..
 int memfd_create(const char* name, unsigned int flags);
 
+// Call memfd(2) if available on platform and return result. Try to give us an unlinked FD in some
+// other way if memfd fails or isn't supported.
+int memfd_create_compat(const char* name, unsigned int flags);
+
+// Return whether the kernel supports sealing future writes of a memfd.
+bool IsSealFutureWriteSupported();
+
 }  // namespace art
 
 #endif  // ART_LIBARTBASE_BASE_MEMFD_H_
diff --git a/libartbase/base/memory_tool.h b/libartbase/base/memory_tool.h
index 1a6a9bb..eba1d73 100644
--- a/libartbase/base/memory_tool.h
+++ b/libartbase/base/memory_tool.h
@@ -66,6 +66,26 @@
 
 #endif
 
+#if __has_feature(hwaddress_sanitizer)
+# define HWADDRESS_SANITIZER
+// NB: The attribute also implies NO_INLINE. If inlined, the hwasan attribute would be lost.
+//     If method is also separately marked as ALWAYS_INLINE, the NO_INLINE takes precedence.
+# define ATTRIBUTE_NO_SANITIZE_HWADDRESS __attribute__((no_sanitize("hwaddress"), noinline))
+#else
+# define ATTRIBUTE_NO_SANITIZE_HWADDRESS
+#endif
+
+// Removes the hwasan tag from the pointer (the top eight bits).
+// Those bits are used for verification by hwasan and they are ignored by normal ARM memory ops.
+template<typename PtrType>
+static inline PtrType* HWASanUntag(PtrType* p) {
+#if __has_feature(hwaddress_sanitizer) && defined(__aarch64__)
+  return reinterpret_cast<PtrType*>(reinterpret_cast<uintptr_t>(p) & ((1ULL << 56) - 1));
+#else
+  return p;
+#endif
+}
+
 }  // namespace art
 
 #endif  // ART_LIBARTBASE_BASE_MEMORY_TOOL_H_
diff --git a/libartbase/base/memory_type_table.h b/libartbase/base/memory_type_table.h
deleted file mode 100644
index 89d4ad5..0000000
--- a/libartbase/base/memory_type_table.h
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- * Copyright (C) 2019 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_LIBARTBASE_BASE_MEMORY_TYPE_TABLE_H_
-#define ART_LIBARTBASE_BASE_MEMORY_TYPE_TABLE_H_
-
-#include <iostream>
-#include <map>
-#include <vector>
-
-#include <android-base/macros.h>   // For DISALLOW_COPY_AND_ASSIGN
-#include <android-base/logging.h>  // For DCHECK macros
-
-namespace art {
-
-// Class representing a memory range together with type attribute.
-template <typename T>
-class MemoryTypeRange final {
- public:
-  MemoryTypeRange(uintptr_t start, uintptr_t limit, const T& type)
-      : start_(start), limit_(limit), type_(type) {}
-  MemoryTypeRange() : start_(0), limit_(0), type_() {}
-  MemoryTypeRange(MemoryTypeRange&& other) = default;
-  MemoryTypeRange(const MemoryTypeRange& other) = default;
-  MemoryTypeRange& operator=(const MemoryTypeRange& other) = default;
-
-  uintptr_t Start() const {
-    DCHECK(IsValid());
-    return start_;
-  }
-
-  uintptr_t Limit() const {
-    DCHECK(IsValid());
-    return limit_;
-  }
-
-  uintptr_t Size() const { return Limit() - Start(); }
-
-  const T& Type() const { return type_; }
-
-  bool IsValid() const { return start_ <= limit_; }
-
-  bool Contains(uintptr_t address) const {
-    return address >= Start() && address < Limit();
-  }
-
-  bool Overlaps(const MemoryTypeRange& other) const {
-    bool disjoint = Limit() <= other.Start() || Start() >= other.Limit();
-    return !disjoint;
-  }
-
-  bool Adjoins(const MemoryTypeRange& other) const {
-    return other.Start() == Limit() || other.Limit() == Start();
-  }
-
-  bool CombinableWith(const MemoryTypeRange& other) const {
-    return Type() == other.Type() && Adjoins(other);
-  }
-
- private:
-  uintptr_t start_;
-  uintptr_t limit_;
-  T type_;
-};
-
-// Class representing a table of memory ranges.
-template <typename T>
-class MemoryTypeTable final {
- public:
-  // Class used to construct and populate MemoryTypeTable instances.
-  class Builder;
-
-  MemoryTypeTable() {}
-
-  MemoryTypeTable(MemoryTypeTable&& table) : ranges_(std::move(table.ranges_)) {}
-
-  MemoryTypeTable& operator=(MemoryTypeTable&& table) {
-    ranges_ = std::move(table.ranges_);
-    return *this;
-  }
-
-  // Find the range containing |address|.
-  // Returns a pointer to a range on success, nullptr otherwise.
-  const MemoryTypeRange<T>* Lookup(uintptr_t address) const {
-    int last = static_cast<int>(ranges_.size()) - 1;
-    for (int l = 0, r = last; l <= r;) {
-      int m = l + (r - l) / 2;
-      if (address < ranges_[m].Start()) {
-        r = m - 1;
-      } else if (address >= ranges_[m].Limit()) {
-        l = m + 1;
-      } else {
-        DCHECK(ranges_[m].Contains(address))
-            << reinterpret_cast<void*>(address) << " " << ranges_[m];
-        return &ranges_[m];
-      }
-    }
-    return nullptr;
-  }
-
-  size_t Size() const { return ranges_.size(); }
-
-  void Print(std::ostream& os) const {
-    for (const auto& range : ranges_) {
-      os << range << std::endl;
-    }
-  }
-
- private:
-  std::vector<MemoryTypeRange<T>> ranges_;
-
-  DISALLOW_COPY_AND_ASSIGN(MemoryTypeTable);
-};
-
-// Class for building MemoryTypeTable instances. Supports
-// adding ranges and looking up ranges.
-template <typename T>
-class MemoryTypeTable<T>::Builder final {
- public:
-  Builder() {}
-
-  // Adds a range if it is valid and doesn't overlap with existing ranges.  If the range adjoins
-  // with an existing range, then the ranges are merged.
-  //
-  // Overlapping ranges and ranges of zero size are not supported.
-  //
-  // Returns true on success, false otherwise.
-  inline bool Add(const MemoryTypeRange<T>& region);
-
-  // Find the range containing |address|.
-  // Returns a pointer to a range on success, nullptr otherwise.
-  inline const MemoryTypeRange<T>* Lookup(uintptr_t address) const;
-
-  // Returns number of unique ranges.
-  inline size_t Size() const { return ranges_.size(); }
-
-  // Generates a MemoryTypeTable for added ranges.
-  MemoryTypeTable Build() const {
-    MemoryTypeTable table;
-    for (const auto& it : ranges_) {
-      table.ranges_.push_back(it.second);
-    }
-    return table;
-  }
-
- private:
-  std::map<uintptr_t, MemoryTypeRange<T>> ranges_;
-
-  DISALLOW_COPY_AND_ASSIGN(Builder);
-};
-
-template <typename T>
-bool operator==(const MemoryTypeRange<T>& lhs, const MemoryTypeRange<T>& rhs) {
-  return (lhs.Start() == rhs.Start() && lhs.Limit() == rhs.Limit() && lhs.Type() == rhs.Type());
-}
-
-template <typename T>
-bool operator!=(const MemoryTypeRange<T>& lhs, const MemoryTypeRange<T>& rhs) {
-  return !(lhs == rhs);
-}
-
-template <typename T>
-std::ostream& operator<<(std::ostream& os, const MemoryTypeRange<T>& range) {
-  os << reinterpret_cast<void*>(range.Start())
-     << '-'
-     << reinterpret_cast<void*>(range.Limit())
-     << ' '
-     << range.Type();
-  return os;
-}
-
-template <typename T>
-std::ostream& operator<<(std::ostream& os, const MemoryTypeTable<T>& table) {
-  table.Print(os);
-  return os;
-}
-
-template <typename T>
-bool MemoryTypeTable<T>::Builder::Add(const MemoryTypeRange<T>& range) {
-  if (UNLIKELY(!range.IsValid() || range.Size() == 0u)) {
-    return false;
-  }
-
-  typename std::map<uintptr_t, MemoryTypeRange<T>>::iterator pred, succ;
-
-  // Find an iterator for the next element in the ranges.
-  succ = ranges_.lower_bound(range.Limit());
-
-  // Find an iterator for a predecessor element.
-  if (succ == ranges_.begin()) {
-    // No predecessor element if the successor is at the beginning of ranges.
-    pred = ranges_.end();
-  } else if (succ != ranges_.end()) {
-    // Predecessor is element before successor.
-    pred = std::prev(succ);
-  } else {
-    // Predecessor is the last element in a non-empty map when there is no successor.
-    pred = ranges_.find(ranges_.rbegin()->first);
-  }
-
-  // Invalidate |succ| if it cannot be combined with |range|.
-  if (succ != ranges_.end()) {
-    DCHECK_GE(succ->second.Limit(), range.Start());
-    if (range.Overlaps(succ->second)) {
-      return false;
-    }
-    if (!range.CombinableWith(succ->second)) {
-      succ = ranges_.end();
-    }
-  }
-
-  // Invalidate |pred| if it cannot be combined with |range|.
-  if (pred != ranges_.end()) {
-    if (range.Overlaps(pred->second)) {
-      return false;
-    }
-    if (!range.CombinableWith(pred->second)) {
-      pred = ranges_.end();
-    }
-  }
-
-  if (pred == ranges_.end()) {
-    if (succ == ranges_.end()) {
-      // |pred| is invalid, |succ| is invalid.
-      // No compatible neighbors for merging.
-      DCHECK(ranges_.find(range.Limit()) == ranges_.end());
-      ranges_[range.Limit()] = range;
-    } else {
-      // |pred| is invalid, |succ| is valid. Merge into |succ|.
-      const uintptr_t limit = succ->second.Limit();
-      DCHECK_GT(limit, range.Limit());
-      ranges_.erase(succ);
-      ranges_[limit] = MemoryTypeRange<T>(range.Start(), limit, range.Type());
-    }
-  } else {
-    if (succ == ranges_.end()) {
-      // |pred| is valid, |succ| is invalid. Merge into |pred|.
-      const uintptr_t start = pred->second.Start();
-      const uintptr_t limit = range.Limit();
-      DCHECK_LT(start, range.Start());
-      DCHECK_GT(limit, pred->second.Limit());
-      ranges_.erase(pred);
-      ranges_[limit] = MemoryTypeRange<T>(start, limit, range.Type());
-    } else {
-      // |pred| is valid, |succ| is valid. Merge between |pred| and |succ|.
-      DCHECK_LT(pred->second.Start(), range.Start());
-      DCHECK_GT(succ->second.Limit(), range.Limit());
-      const uintptr_t start = pred->second.Start();
-      const uintptr_t limit = succ->second.Limit();
-      ranges_.erase(pred, ++succ);
-      ranges_[limit] = MemoryTypeRange<T>(start, limit, range.Type());
-    }
-  }
-  return true;
-}
-
-template <typename T>
-const MemoryTypeRange<T>* MemoryTypeTable<T>::Builder::Lookup(uintptr_t address) const {
-  auto it = ranges_.upper_bound(address);
-  if (it != ranges_.end() && it->second.Contains(address)) {
-    return &it->second;
-  } else {
-    return nullptr;
-  }
-}
-
-}  // namespace art
-
-#endif  // ART_LIBARTBASE_BASE_MEMORY_TYPE_TABLE_H_
diff --git a/libartbase/base/memory_type_table_test.cc b/libartbase/base/memory_type_table_test.cc
deleted file mode 100644
index 1ffefef..0000000
--- a/libartbase/base/memory_type_table_test.cc
+++ /dev/null
@@ -1,350 +0,0 @@
-/*
- * Copyright (C) 2019 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "memory_type_table.h"
-
-#include <limits>
-
-#include <gtest/gtest.h>
-
-namespace art {
-
-TEST(memory_type_range, range) {
-  MemoryTypeRange<int> r(0x1000u, 0x2000u, 42);
-  EXPECT_EQ(r.Start(), 0x1000u);
-  EXPECT_EQ(r.Limit(), 0x2000u);
-  EXPECT_EQ(r.Type(), 42);
-}
-
-TEST(memory_type_range, range_contains) {
-  MemoryTypeRange<int> r(0x1000u, 0x2000u, 42);
-  EXPECT_FALSE(r.Contains(0x0fffu));
-  EXPECT_TRUE(r.Contains(0x1000u));
-  EXPECT_TRUE(r.Contains(0x1fffu));
-  EXPECT_FALSE(r.Contains(0x2000u));
-}
-
-TEST(memory_type_range, range_overlaps) {
-  static const int kMemoryType = 42;
-  MemoryTypeRange<int> a(0x1000u, 0x2000u, kMemoryType);
-
-  {
-    // |<----- a ----->|<----- b ----->|
-    MemoryTypeRange<int> b(a.Limit(), a.Limit() + a.Size(), kMemoryType);
-    EXPECT_FALSE(a.Overlaps(b));
-    EXPECT_FALSE(b.Overlaps(a));
-  }
-
-  {
-    // |<----- a ----->| |<----- c ----->|
-    MemoryTypeRange<int> c(a.Limit() + a.Size(), a.Limit() + 2 * a.Size(), kMemoryType);
-    EXPECT_FALSE(a.Overlaps(c));
-    EXPECT_FALSE(c.Overlaps(a));
-  }
-
-  {
-    // |<----- a ----->|
-    //     |<- d ->|
-    MemoryTypeRange<int> d(a.Start() + a.Size() / 4, a.Limit() - a.Size() / 4, kMemoryType);
-    EXPECT_TRUE(a.Overlaps(d));
-    EXPECT_TRUE(d.Overlaps(a));
-  }
-
-  {
-    // |<----- a ----->|
-    // |<- e ->|
-    MemoryTypeRange<int> e(a.Start(), a.Start() + a.Size() / 2, kMemoryType);
-    EXPECT_TRUE(a.Overlaps(e));
-    EXPECT_TRUE(e.Overlaps(a));
-  }
-
-  {
-    // |<----- a ----->|
-    //         |<- f ->|
-    MemoryTypeRange<int> f(a.Start() + a.Size() / 2, a.Limit(), kMemoryType);
-    EXPECT_TRUE(a.Overlaps(f));
-    EXPECT_TRUE(f.Overlaps(a));
-  }
-
-  {
-    // |<----- a ----->|
-    //        |<----- g ----->|
-    MemoryTypeRange<int> g(a.Start() + a.Size() / 2, a.Limit() + a.Size() / 2, kMemoryType);
-    EXPECT_TRUE(a.Overlaps(g));
-    EXPECT_TRUE(g.Overlaps(a));
-  }
-}
-
-TEST(memory_type_range, range_adjoins) {
-  static const int kMemoryType = 42;
-  MemoryTypeRange<int> a(0x1000u, 0x2000u, kMemoryType);
-
-  {
-    // |<--- a --->|<--- b --->|
-    MemoryTypeRange<int> b(a.Limit(), a.Limit() + a.Size(), kMemoryType);
-    EXPECT_TRUE(a.Adjoins(b));
-    EXPECT_TRUE(b.Adjoins(a));
-  }
-
-  {
-    // |<--- a --->| |<--- c --->|
-    MemoryTypeRange<int> c(a.Limit() + a.Size(), a.Limit() + 2 * a.Size(), kMemoryType);
-    EXPECT_FALSE(a.Adjoins(c));
-    EXPECT_FALSE(c.Adjoins(a));
-  }
-
-  {
-    // |<--- a --->|
-    //       |<--- d --->|
-    MemoryTypeRange<int> d(a.Start() + a.Size() / 2, a.Limit() + a.Size() / 2, kMemoryType);
-    EXPECT_FALSE(a.Adjoins(d));
-    EXPECT_FALSE(d.Adjoins(a));
-  }
-}
-
-TEST(memory_type_range, combinable_with) {
-  // Adjoining ranges of same type.
-  EXPECT_TRUE(MemoryTypeRange<int>(0x1000, 0x2000, 0)
-              .CombinableWith(MemoryTypeRange<int>(0x800, 0x1000, 0)));
-  EXPECT_TRUE(MemoryTypeRange<int>(0x800, 0x1000, 0)
-              .CombinableWith(MemoryTypeRange<int>(0x1000, 0x2000, 0)));
-  // Adjoining ranges of different types.
-  EXPECT_FALSE(MemoryTypeRange<int>(0x1000, 0x2000, 0)
-               .CombinableWith(MemoryTypeRange<int>(0x800, 0x1000, 1)));
-  EXPECT_FALSE(MemoryTypeRange<int>(0x800, 0x1000, 1)
-               .CombinableWith(MemoryTypeRange<int>(0x1000, 0x2000, 0)));
-  // Disjoint ranges.
-  EXPECT_FALSE(MemoryTypeRange<int>(0x0800, 0x1000, 0)
-               .CombinableWith(MemoryTypeRange<int>(0x1f00, 0x2000, 0)));
-  EXPECT_FALSE(MemoryTypeRange<int>(0x1f00, 0x2000, 0)
-               .CombinableWith(MemoryTypeRange<int>(0x800, 0x1000, 0)));
-  // Overlapping ranges.
-  EXPECT_FALSE(MemoryTypeRange<int>(0x0800, 0x2000, 0)
-               .CombinableWith(MemoryTypeRange<int>(0x1f00, 0x2000, 0)));
-}
-
-TEST(memory_type_range, is_valid) {
-  EXPECT_TRUE(MemoryTypeRange<int>(std::numeric_limits<uintptr_t>::min(),
-                                   std::numeric_limits<uintptr_t>::max(),
-                                   0).IsValid());
-  EXPECT_TRUE(MemoryTypeRange<int>(1u, 2u, 0).IsValid());
-  EXPECT_TRUE(MemoryTypeRange<int>(0u, 0u, 0).IsValid());
-  EXPECT_FALSE(MemoryTypeRange<int>(2u, 1u, 0).IsValid());
-  EXPECT_FALSE(MemoryTypeRange<int>(std::numeric_limits<uintptr_t>::max(),
-                                    std::numeric_limits<uintptr_t>::min(),
-                                    0).IsValid());
-}
-
-TEST(memory_type_range, range_equality) {
-  static const int kMemoryType = 42;
-  MemoryTypeRange<int> a(0x1000u, 0x2000u, kMemoryType);
-
-  MemoryTypeRange<int> b(a.Start(), a.Limit(), a.Type());
-  EXPECT_TRUE(a == b);
-  EXPECT_FALSE(a != b);
-
-  MemoryTypeRange<int> c(a.Start() + 1, a.Limit(), a.Type());
-  EXPECT_FALSE(a == c);
-  EXPECT_TRUE(a != c);
-
-  MemoryTypeRange<int> d(a.Start(), a.Limit() + 1, a.Type());
-  EXPECT_FALSE(a == d);
-  EXPECT_TRUE(a != d);
-
-  MemoryTypeRange<int> e(a.Start(), a.Limit(), a.Type() + 1);
-  EXPECT_FALSE(a == e);
-  EXPECT_TRUE(a != e);
-}
-
-TEST(memory_type_table_builder, add_lookup) {
-  MemoryTypeTable<int>::Builder builder;
-  MemoryTypeRange<int> range(0x1000u, 0x2000u, 0);
-  EXPECT_EQ(builder.Size(), 0u);
-  EXPECT_EQ(builder.Add(range), true);
-  EXPECT_EQ(builder.Lookup(range.Start() - 1u), nullptr);
-  EXPECT_EQ(builder.Size(), 1u);
-
-  const MemoryTypeRange<int>* first = builder.Lookup(range.Start());
-  ASSERT_TRUE(first != nullptr);
-  EXPECT_EQ(range, *first);
-
-  const MemoryTypeRange<int>* last = builder.Lookup(range.Limit() - 1u);
-  ASSERT_TRUE(last != nullptr);
-  EXPECT_EQ(range, *last);
-
-  EXPECT_EQ(builder.Lookup(range.Limit()), nullptr);
-}
-
-TEST(memory_type_table_builder, add_lookup_multi) {
-  MemoryTypeTable<char>::Builder builder;
-  MemoryTypeRange<char> ranges[3] = {
-    MemoryTypeRange<char>(0x1, 0x2, 'a'),
-    MemoryTypeRange<char>(0x2, 0x4, 'b'),
-    MemoryTypeRange<char>(0x4, 0x8, 'c'),
-  };
-
-  for (const auto& range : ranges) {
-    builder.Add(range);
-  }
-
-  ASSERT_EQ(builder.Size(), sizeof(ranges) / sizeof(ranges[0]));
-  ASSERT_TRUE(builder.Lookup(0x0) == nullptr);
-  ASSERT_TRUE(builder.Lookup(0x8) == nullptr);
-  for (const auto& range : ranges) {
-    auto first = builder.Lookup(range.Start());
-    ASSERT_TRUE(first != nullptr);
-    EXPECT_EQ(*first, range);
-
-    auto last = builder.Lookup(range.Limit() - 1);
-    ASSERT_TRUE(last != nullptr);
-    EXPECT_EQ(*last, range);
-  }
-}
-
-TEST(memory_type_table_builder, add_overlapping) {
-  MemoryTypeTable<int>::Builder builder;
-  MemoryTypeRange<int> range(0x1000u, 0x2000u, 0);
-  builder.Add(range);
-  EXPECT_EQ(builder.Size(), 1u);
-  EXPECT_FALSE(builder.Add(MemoryTypeRange<int>(0x0800u, 0x2800u, 0)));
-  EXPECT_FALSE(builder.Add(MemoryTypeRange<int>(0x0800u, 0x1800u, 0)));
-  EXPECT_FALSE(builder.Add(MemoryTypeRange<int>(0x1800u, 0x2800u, 0)));
-  EXPECT_EQ(builder.Size(), 1u);
-}
-
-TEST(memory_type_table_builder, add_zero_size) {
-  MemoryTypeTable<int>::Builder builder;
-  EXPECT_FALSE(builder.Add(MemoryTypeRange<int>(0x1000u, 0x1000u, 0)));
-  EXPECT_TRUE(builder.Add(MemoryTypeRange<int>(0x1000u, 0x1001u, 0)));
-  // Checking adjoining zero length don't get included
-  EXPECT_FALSE(builder.Add(MemoryTypeRange<int>(0x1000u, 0x1000u, 0)));
-  EXPECT_FALSE(builder.Add(MemoryTypeRange<int>(0x1001u, 0x1001u, 0)));
-  // Check around extremes
-  EXPECT_FALSE(builder.Add(MemoryTypeRange<int>(0x0u, 0x0u, 0)));
-  EXPECT_FALSE(builder.Add(MemoryTypeRange<int>(~0u, ~0u, 0)));
-}
-
-TEST(memory_type_table_builder, add_invalid_range) {
-  MemoryTypeTable<int>::Builder builder;
-  EXPECT_FALSE(builder.Add(MemoryTypeRange<int>(0x1000u, 0x1000u, 0)));
-  EXPECT_FALSE(builder.Add(MemoryTypeRange<int>(0x2000u, 0x1000u, 0)));
-}
-
-TEST(memory_type_table_builder, add_adjoining) {
-  MemoryTypeTable<int>::Builder builder;
-  EXPECT_TRUE(builder.Add(MemoryTypeRange<int>(0x1000u, 0x2000u, 0)));
-  EXPECT_EQ(builder.Size(), 1u);
-  EXPECT_TRUE(builder.Add(MemoryTypeRange<int>(0x0800u, 0x1000u, 0)));
-  EXPECT_EQ(builder.Size(), 1u);
-  ASSERT_NE(builder.Lookup(0x0900u), nullptr);
-  EXPECT_EQ(builder.Lookup(0x0900u)->Start(), 0x0800u);
-  EXPECT_EQ(builder.Lookup(0x0900u)->Limit(), 0x2000u);
-  EXPECT_EQ(builder.Lookup(0x0900u)->Type(), 0);
-  EXPECT_TRUE(builder.Add(MemoryTypeRange<int>(0x2000u, 0x2100u, 0)));
-  EXPECT_EQ(builder.Size(), 1u);
-  EXPECT_TRUE(builder.Add(MemoryTypeRange<int>(0x3000u, 0x3100u, 0)));
-  EXPECT_EQ(builder.Size(), 2u);
-  EXPECT_TRUE(builder.Add(MemoryTypeRange<int>(0x2100u, 0x3000u, 0)));
-  ASSERT_NE(builder.Lookup(0x2000u), nullptr);
-  EXPECT_EQ(builder.Lookup(0x2000u)->Start(), 0x0800u);
-  EXPECT_EQ(builder.Lookup(0x2000u)->Limit(), 0x3100u);
-  EXPECT_EQ(builder.Lookup(0x2000u)->Type(), 0);
-  EXPECT_EQ(builder.Size(), 1u);
-  EXPECT_TRUE(builder.Add(MemoryTypeRange<int>(0x4000u, 0x4100u, 0)));
-  EXPECT_TRUE(builder.Add(MemoryTypeRange<int>(0x4f00u, 0x5000u, 0)));
-  EXPECT_EQ(builder.Size(), 3u);
-  EXPECT_TRUE(builder.Add(MemoryTypeRange<int>(0x4100u, 0x4f00u, 0)));
-  ASSERT_NE(builder.Lookup(0x4f00u), nullptr);
-  ASSERT_EQ(builder.Lookup(0x4f00u)->Start(), 0x4000u);
-  ASSERT_EQ(builder.Lookup(0x4f00u)->Limit(), 0x5000u);
-  ASSERT_EQ(builder.Lookup(0x4f00u)->Type(), 0);
-  EXPECT_EQ(builder.Size(), 2u);
-  ASSERT_NE(builder.Lookup(0x4f00u), nullptr);
-}
-
-TEST(memory_type_table_builder, add_adjoining_different_type) {
-  MemoryTypeTable<int>::Builder builder;
-  EXPECT_TRUE(builder.Add(MemoryTypeRange<int>(0x0000u, 0x1000u, 1)));
-  EXPECT_EQ(builder.Size(), 1u);
-  EXPECT_TRUE(builder.Add(MemoryTypeRange<int>(0x1000u, 0x2000u, 2)));
-  EXPECT_EQ(builder.Size(), 2u);
-  EXPECT_TRUE(builder.Add(MemoryTypeRange<int>(0x2000u, 0x3000u, 3)));
-  EXPECT_EQ(builder.Size(), 3u);
-}
-
-TEST(memory_type_table, create) {
-  MemoryTypeTable<int>::Builder builder;
-  builder.Add(MemoryTypeRange<int>(0x1000u, 0x2000u, 0));
-  builder.Add(MemoryTypeRange<int>(0x2000u, 0x3000u, 1));
-  builder.Add(MemoryTypeRange<int>(0x4000u, 0x5000u, 2));
-
-  MemoryTypeTable<int> table = builder.Build();
-  EXPECT_TRUE(table.Lookup(0x0000u) == nullptr);
-  EXPECT_TRUE(table.Lookup(0x0800u) == nullptr);
-  EXPECT_TRUE(table.Lookup(0x3000u) == nullptr);
-  EXPECT_TRUE(table.Lookup(0x3fffu) == nullptr);
-  EXPECT_TRUE(table.Lookup(0x5000u) == nullptr);
-  EXPECT_TRUE(table.Lookup(~0u) == nullptr);
-
-  ASSERT_TRUE(table.Lookup(0x1000u) != nullptr);
-  ASSERT_TRUE(table.Lookup(0x1fffu) != nullptr);
-  EXPECT_EQ(*table.Lookup(0x1000u), MemoryTypeRange<int>(0x1000u, 0x2000u, 0));
-  EXPECT_EQ(*table.Lookup(0x1fffu), MemoryTypeRange<int>(0x1000u, 0x2000u, 0));
-  ASSERT_TRUE(table.Lookup(0x2000u) != nullptr);
-  ASSERT_TRUE(table.Lookup(0x2fffu) != nullptr);
-  EXPECT_EQ(*table.Lookup(0x2000u), MemoryTypeRange<int>(0x2000u, 0x3000u, 1));
-  EXPECT_EQ(*table.Lookup(0x2fffu), MemoryTypeRange<int>(0x2000u, 0x3000u, 1));
-  ASSERT_TRUE(table.Lookup(0x4000u) != nullptr);
-  ASSERT_TRUE(table.Lookup(0x4fffu) != nullptr);
-  EXPECT_EQ(*table.Lookup(0x4000u), MemoryTypeRange<int>(0x4000u, 0x5000u, 2));
-  EXPECT_EQ(*table.Lookup(0x4fffu), MemoryTypeRange<int>(0x4000u, 0x5000u, 2));
-}
-
-TEST(memory_type_table, find_all) {
-  static constexpr size_t kRangeCount = 64;
-  static constexpr uintptr_t kRangeSize = 1024;
-
-  MemoryTypeTable<int>::Builder builder;
-  for (size_t i = 0; i < kRangeCount; i++) {
-    const uintptr_t start = i * kRangeSize;
-    builder.Add(MemoryTypeRange<int>(start, start + kRangeSize, static_cast<int>(i)));
-  }
-
-  for (size_t delta = 0; delta < kRangeSize; delta += kRangeSize / 2) {
-    for (size_t i = 0; i < kRangeCount; i++) {
-      const uintptr_t start = i * kRangeSize;
-      const MemoryTypeRange<int> expected(start, start + kRangeSize, static_cast<int>(i));
-      const uintptr_t address = i * kRangeSize + delta;
-      const MemoryTypeRange<int>* actual = builder.Lookup(address);
-      ASSERT_TRUE(actual != nullptr) << reinterpret_cast<void*>(address);
-      EXPECT_EQ(expected, *actual) << reinterpret_cast<void*>(address);
-    }
-  }
-
-  MemoryTypeTable<int> table = builder.Build();
-  for (size_t delta = 0; delta < kRangeSize; delta += kRangeSize / 2) {
-    for (size_t i = 0; i < kRangeCount; i++) {
-      const uintptr_t start = i * kRangeSize;
-      const MemoryTypeRange<int> expected(start, start + kRangeSize, static_cast<int>(i));
-      const uintptr_t address = i * kRangeSize + delta;
-      const MemoryTypeRange<int>* actual = table.Lookup(address);
-      ASSERT_TRUE(actual != nullptr) << reinterpret_cast<void*>(address);
-      EXPECT_EQ(expected, *actual) << reinterpret_cast<void*>(address);
-    }
-  }
-}
-
-}  // namespace art
diff --git a/libartbase/base/os_linux.cc b/libartbase/base/os_linux.cc
index b25e5e8..337c54f 100644
--- a/libartbase/base/os_linux.cc
+++ b/libartbase/base/os_linux.cc
@@ -63,7 +63,7 @@
   bool read_only = ((flags & O_ACCMODE) == O_RDONLY);
   bool check_usage = !read_only && auto_flush;
   std::unique_ptr<File> file(
-      new File(name, flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH, check_usage));
+      new File(name, flags,  S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH, check_usage));
   if (!file->IsOpened()) {
     return nullptr;
   }
diff --git a/libartbase/base/safe_map.h b/libartbase/base/safe_map.h
index a4d8459..f57a2aa 100644
--- a/libartbase/base/safe_map.h
+++ b/libartbase/base/safe_map.h
@@ -70,18 +70,22 @@
   void swap(Self& other) { map_.swap(other.map_); }
   void clear() { map_.clear(); }
   iterator erase(iterator it) { return map_.erase(it); }
-  size_type erase(const K& k) { return map_.erase(k); }
+  template<typename Kv> size_type erase(const Kv& k) { return map_.erase(k); }
 
-  iterator find(const K& k) { return map_.find(k); }
-  const_iterator find(const K& k) const { return map_.find(k); }
+  template<typename Kv> iterator find(const Kv& k) { return map_.find(k); }
+  template<typename Kv> const_iterator find(const Kv& k) const { return map_.find(k); }
 
-  iterator lower_bound(const K& k) { return map_.lower_bound(k); }
-  const_iterator lower_bound(const K& k) const { return map_.lower_bound(k); }
+  template<typename Kv> iterator lower_bound(const Kv& k) { return map_.lower_bound(k); }
+  template<typename Kv> const_iterator lower_bound(const Kv& k) const {
+    return map_.lower_bound(k);
+  }
 
-  iterator upper_bound(const K& k) { return map_.upper_bound(k); }
-  const_iterator upper_bound(const K& k) const { return map_.upper_bound(k); }
+  template<typename Kv> iterator upper_bound(const Kv& k) { return map_.upper_bound(k); }
+  template<typename Kv> const_iterator upper_bound(const Kv& k) const {
+    return map_.upper_bound(k);
+  }
 
-  size_type count(const K& k) const { return map_.count(k); }
+  template<typename Kv> size_type count(const Kv& k) const { return map_.count(k); }
 
   // Note that unlike std::map's operator[], this doesn't return a reference to the value.
   V Get(const K& k) const {
diff --git a/libartbase/base/scoped_arena_allocator.cc b/libartbase/base/scoped_arena_allocator.cc
index a54f350..a87064f 100644
--- a/libartbase/base/scoped_arena_allocator.cc
+++ b/libartbase/base/scoped_arena_allocator.cc
@@ -106,6 +106,15 @@
   return ptr;
 }
 
+size_t ArenaStack::ApproximatePeakBytes() {
+  UpdateBytesAllocated();
+  size_t sum = 0;
+  for (Arena* arena = bottom_arena_; arena != nullptr; arena = arena->next_) {
+    sum += arena->bytes_allocated_;
+  }
+  return sum;
+}
+
 ScopedArenaAllocator::ScopedArenaAllocator(ScopedArenaAllocator&& other) noexcept
     : DebugStackReference(std::move(other)),
       DebugStackRefCounter(),
@@ -158,4 +167,29 @@
   }
 }
 
+size_t ScopedArenaAllocator::ApproximatePeakBytes() {
+  size_t subtract;
+  Arena* start;
+  if (LIKELY(mark_arena_ != nullptr)) {
+    start = mark_arena_;
+    size_t mark_free = static_cast<size_t>(mark_end_ - mark_ptr_);
+    DCHECK_GE(mark_arena_->bytes_allocated_, mark_arena_->size_ - mark_free);
+    subtract = mark_arena_->bytes_allocated_ - (mark_arena_->size_ - mark_free);
+  } else {
+    start = arena_stack_->bottom_arena_;
+    subtract = 0;
+  }
+
+  size_t sum = 0;
+  for (Arena* arena = start; arena != nullptr; arena = arena->next_) {
+    if (arena == arena_stack_->top_arena_) {
+      sum += static_cast<size_t>(arena_stack_->top_ptr_ - arena->Begin());
+      break;
+    } else {
+      sum += arena->bytes_allocated_;
+    }
+  }
+  return sum - subtract;
+}
+
 }  // namespace art
diff --git a/libartbase/base/scoped_arena_allocator.h b/libartbase/base/scoped_arena_allocator.h
index 52d0361..6de0192 100644
--- a/libartbase/base/scoped_arena_allocator.h
+++ b/libartbase/base/scoped_arena_allocator.h
@@ -59,6 +59,8 @@
     return PeakStats()->BytesAllocated();
   }
 
+  size_t ApproximatePeakBytes();
+
   MemStats GetPeakStats() const;
 
   // Return the arena tag associated with a pointer.
@@ -166,6 +168,8 @@
   // Get adapter for use in STL containers. See scoped_arena_containers.h .
   ScopedArenaAllocatorAdapter<void> Adapter(ArenaAllocKind kind = kArenaAllocSTL);
 
+  size_t ApproximatePeakBytes();
+
   // Allow a delete-expression to destroy but not deallocate allocators created by Create().
   static void operator delete(void* ptr ATTRIBUTE_UNUSED) {}
 
diff --git a/libartbase/base/stl_util.h b/libartbase/base/stl_util.h
index 1e071ce..fbafd53 100644
--- a/libartbase/base/stl_util.h
+++ b/libartbase/base/stl_util.h
@@ -18,10 +18,13 @@
 #define ART_LIBARTBASE_BASE_STL_UTIL_H_
 
 #include <algorithm>
+#include <iterator>
 #include <sstream>
 
 #include <android-base/logging.h>
 
+#include "base/iteration_range.h"
+
 namespace art {
 
 // STLDeleteContainerPointers()
@@ -146,6 +149,80 @@
   return result;
 }
 
+template <typename IterLeft, typename IterRight>
+class ZipLeftIter : public std::iterator<
+                        std::forward_iterator_tag,
+                        std::pair<typename IterLeft::value_type, typename IterRight::value_type>> {
+ public:
+  ZipLeftIter(IterLeft left, IterRight right) : left_iter_(left), right_iter_(right) {}
+  ZipLeftIter<IterLeft, IterRight>& operator++() {
+    ++left_iter_;
+    ++right_iter_;
+    return *this;
+  }
+  ZipLeftIter<IterLeft, IterRight> operator++(int) {
+    ZipLeftIter<IterLeft, IterRight> ret(left_iter_, right_iter_);
+    ++(*this);
+    return ret;
+  }
+  bool operator==(const ZipLeftIter<IterLeft, IterRight>& other) const {
+    return left_iter_ == other.left_iter_;
+  }
+  bool operator!=(const ZipLeftIter<IterLeft, IterRight>& other) const {
+    return !(*this == other);
+  }
+  std::pair<typename IterLeft::value_type, typename IterRight::value_type> operator*() const {
+    return std::make_pair(*left_iter_, *right_iter_);
+  }
+
+ private:
+  IterLeft left_iter_;
+  IterRight right_iter_;
+};
+
+class CountIter : public std::iterator<std::forward_iterator_tag, size_t, size_t, size_t, size_t> {
+ public:
+  CountIter() : count_(0) {}
+  explicit CountIter(size_t count) : count_(count) {}
+  CountIter& operator++() {
+    ++count_;
+    return *this;
+  }
+  CountIter operator++(int) {
+    size_t ret = count_;
+    ++count_;
+    return CountIter(ret);
+  }
+  bool operator==(const CountIter& other) const {
+    return count_ == other.count_;
+  }
+  bool operator!=(const CountIter& other) const {
+    return !(*this == other);
+  }
+  size_t operator*() const {
+    return count_;
+  }
+
+ private:
+  size_t count_;
+};
+
+// Make an iteration range that returns a pair of the element and the index of the element.
+template <typename Iter>
+static inline IterationRange<ZipLeftIter<Iter, CountIter>> ZipCount(IterationRange<Iter> iter) {
+  return IterationRange(ZipLeftIter(iter.begin(), CountIter(0)),
+                        ZipLeftIter(iter.end(), CountIter(-1)));
+}
+
+// Make an iteration range that returns a pair of the outputs of two iterators. Stops when the first
+// (left) one is exhausted. The left iterator must be at least as long as the right one.
+template <typename IterLeft, typename IterRight>
+static inline IterationRange<ZipLeftIter<IterLeft, IterRight>> ZipLeft(
+    IterationRange<IterLeft> iter_left, IterationRange<IterRight> iter_right) {
+  return IterationRange(ZipLeftIter(iter_left.begin(), iter_right.begin()),
+                        ZipLeftIter(iter_left.end(), iter_right.end()));
+}
+
 }  // namespace art
 
 #endif  // ART_LIBARTBASE_BASE_STL_UTIL_H_
diff --git a/libartbase/base/time_utils.h b/libartbase/base/time_utils.h
index 15805f3..69c867c 100644
--- a/libartbase/base/time_utils.h
+++ b/libartbase/base/time_utils.h
@@ -17,14 +17,15 @@
 #ifndef ART_LIBARTBASE_BASE_TIME_UTILS_H_
 #define ART_LIBARTBASE_BASE_TIME_UTILS_H_
 
-#include <stdint.h>
-#include <stdio.h>  // Needed for correct _WIN32 build.
+#ifdef _WIN32
+#include <stdio.h>  // Needed for correct macro definitions.
+#endif
+
 #include <time.h>
 
+#include <cstdint>
 #include <string>
 
-#include "macros.h"
-
 namespace art {
 
 enum TimeUnit {
diff --git a/libartbase/base/utils.cc b/libartbase/base/utils.cc
index 5af80f4..19311b3 100644
--- a/libartbase/base/utils.cc
+++ b/libartbase/base/utils.cc
@@ -16,6 +16,7 @@
 
 #include "utils.h"
 
+#include <dirent.h>
 #include <inttypes.h>
 #include <pthread.h>
 #include <sys/stat.h>
@@ -47,6 +48,7 @@
 #if defined(__linux__)
 #include <linux/unistd.h>
 #include <sys/syscall.h>
+#include <sys/utsname.h>
 #endif
 
 #if defined(_WIN32)
@@ -124,7 +126,6 @@
   // (2) fault handling that allows flushing/invalidation to continue after
   //     a missing page has been faulted in.
 
-  // In the common case, this flush of the complete range succeeds.
   uintptr_t start = reinterpret_cast<uintptr_t>(begin);
   const uintptr_t limit = reinterpret_cast<uintptr_t>(end);
   if (LIKELY(CacheFlush(start, limit) == 0)) {
@@ -155,6 +156,29 @@
 
 #endif
 
+bool CacheOperationsMaySegFault() {
+#if defined(__linux__) && defined(__aarch64__)
+  // Avoid issue on older ARM64 kernels where data cache operations could be classified as writes
+  // and cause segmentation faults. This was fixed in Linux 3.11rc2:
+  //
+  // https://github.com/torvalds/linux/commit/db6f41063cbdb58b14846e600e6bc3f4e4c2e888
+  //
+  // This behaviour means we should avoid the dual view JIT on the device. This is just
+  // an issue when running tests on devices that have an old kernel.
+  static constexpr int kRequiredMajor = 3;
+  static constexpr int kRequiredMinor = 12;
+  struct utsname uts;
+  int major, minor;
+  if (uname(&uts) != 0 ||
+      strcmp(uts.sysname, "Linux") != 0 ||
+      sscanf(uts.release, "%d.%d", &major, &minor) != 2 ||
+      (major < kRequiredMajor || (major == kRequiredMajor && minor < kRequiredMinor))) {
+    return true;
+  }
+#endif
+  return false;
+}
+
 pid_t GetTid() {
 #if defined(__APPLE__)
   uint64_t owner;
@@ -228,14 +252,14 @@
 }
 
 void SetThreadName(const char* thread_name) {
-  int hasAt = 0;
-  int hasDot = 0;
+  bool hasAt = false;
+  bool hasDot = false;
   const char* s = thread_name;
   while (*s) {
     if (*s == '.') {
-      hasDot = 1;
+      hasDot = true;
     } else if (*s == '@') {
-      hasAt = 1;
+      hasAt = true;
     }
     s++;
   }
@@ -285,7 +309,7 @@
 
 void SleepForever() {
   while (true) {
-    usleep(1000000);
+    sleep(100000000);
   }
 }
 
@@ -310,4 +334,44 @@
   return "<unknown>";
 }
 
+bool IsAddressKnownBackedByFileOrShared(const void* addr) {
+  // We use the Linux pagemap interface for knowing if an address is backed
+  // by a file or is shared. See:
+  // https://www.kernel.org/doc/Documentation/vm/pagemap.txt
+  uintptr_t vmstart = reinterpret_cast<uintptr_t>(AlignDown(addr, kPageSize));
+  off_t index = (vmstart / kPageSize) * sizeof(uint64_t);
+  android::base::unique_fd pagemap(open("/proc/self/pagemap", O_RDONLY | O_CLOEXEC));
+  if (pagemap == -1) {
+    return false;
+  }
+  if (lseek(pagemap, index, SEEK_SET) != index) {
+    return false;
+  }
+  uint64_t flags;
+  if (read(pagemap, &flags, sizeof(uint64_t)) != sizeof(uint64_t)) {
+    return false;
+  }
+  // From https://www.kernel.org/doc/Documentation/vm/pagemap.txt:
+  //  * Bit  61    page is file-page or shared-anon (since 3.5)
+  return (flags & (1LL << 61)) != 0;
+}
+
+int GetTaskCount() {
+  DIR* directory = opendir("/proc/self/task");
+  if (directory == nullptr) {
+    return -1;
+  }
+
+  uint32_t count = 0;
+  struct dirent* entry = nullptr;
+  while ((entry = readdir(directory)) != nullptr) {
+    if ((strcmp(entry->d_name, ".") == 0) || (strcmp(entry->d_name, "..") == 0)) {
+      continue;
+    }
+    ++count;
+  }
+  closedir(directory);
+  return count;
+}
+
 }  // namespace art
diff --git a/libartbase/base/utils.h b/libartbase/base/utils.h
index f434cb4..4bcb915 100644
--- a/libartbase/base/utils.h
+++ b/libartbase/base/utils.h
@@ -116,6 +116,9 @@
 // Flush CPU caches. Returns true on success, false if flush failed.
 WARN_UNUSED bool FlushCpuCaches(void* begin, void* end);
 
+// On some old kernels, a cache operation may segfault.
+WARN_UNUSED bool CacheOperationsMaySegFault();
+
 template <typename T>
 constexpr PointerSize ConvertToPointerSize(T any) {
   if (any == 4 || any == 8) {
@@ -151,6 +154,14 @@
 // there is an I/O error.
 std::string GetProcessStatus(const char* key);
 
+// Return whether the address is guaranteed to be backed by a file or is shared.
+// This information can be used to know whether MADV_DONTNEED will make
+// following accesses repopulate the memory or return zero.
+bool IsAddressKnownBackedByFileOrShared(const void* addr);
+
+// Returns the number of threads running.
+int GetTaskCount();
+
 }  // namespace art
 
 #endif  // ART_LIBARTBASE_BASE_UTILS_H_
diff --git a/libartbase/base/zip_archive.cc b/libartbase/base/zip_archive.cc
index 5056edc..c899039 100644
--- a/libartbase/base/zip_archive.cc
+++ b/libartbase/base/zip_archive.cc
@@ -258,7 +258,7 @@
 
   // Resist the urge to delete the space. <: is a bigraph sequence.
   std::unique_ptr< ::ZipEntry> zip_entry(new ::ZipEntry);
-  const int32_t error = FindEntry(handle_, ZipString(name), zip_entry.get());
+  const int32_t error = FindEntry(handle_, name, zip_entry.get());
   if (error) {
     *error_msg = std::string(ErrorCodeString(error));
     return nullptr;
diff --git a/libartimagevalues/Android.bp b/libartimagevalues/Android.bp
new file mode 100644
index 0000000..0f38952
--- /dev/null
+++ b/libartimagevalues/Android.bp
@@ -0,0 +1,17 @@
+// Inherit image values.
+art_global_defaults {
+    name: "libartimagevalues_defaults",
+}
+
+cc_library_static {
+    name: "libartimagevalues",
+    defaults: ["libartimagevalues_defaults"],
+    visibility: [
+        "//art/build/sdk",
+        // TODO(b/133140750): Clean this up.
+        "//frameworks/native/cmds/installd",
+    ],
+    srcs: ["art_image_values.cpp"],
+    export_include_dirs: ["."],
+    cflags: ["-Wconversion"],
+}
diff --git a/libartimagevalues/art_image_values.cpp b/libartimagevalues/art_image_values.cpp
new file mode 100644
index 0000000..849e218
--- /dev/null
+++ b/libartimagevalues/art_image_values.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "art_image_values.h"
+
+namespace android {
+namespace art {
+namespace imagevalues {
+
+uint32_t GetImageBaseAddress() {
+    return ART_BASE_ADDRESS;
+}
+int32_t GetImageMinBaseAddressDelta() {
+    return ART_BASE_ADDRESS_MIN_DELTA;
+}
+int32_t GetImageMaxBaseAddressDelta() {
+    return ART_BASE_ADDRESS_MAX_DELTA;
+}
+
+static_assert(ART_BASE_ADDRESS_MIN_DELTA < ART_BASE_ADDRESS_MAX_DELTA, "Inconsistent setup");
+
+}  // namespace imagevalues
+}  // namespace art
+}  // namespace android
diff --git a/libartimagevalues/art_image_values.h b/libartimagevalues/art_image_values.h
new file mode 100644
index 0000000..14e5f75
--- /dev/null
+++ b/libartimagevalues/art_image_values.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTIMAGEVALUES_ART_IMAGE_VALUES_H_
+#define ART_LIBARTIMAGEVALUES_ART_IMAGE_VALUES_H_
+
+#include <cstdint>
+
+namespace android {
+namespace art {
+namespace imagevalues {
+
+uint32_t GetImageBaseAddress();
+int32_t GetImageMinBaseAddressDelta();
+int32_t GetImageMaxBaseAddressDelta();
+
+}  // namespace imagevalues
+}  // namespace art
+}  // namespace android
+
+#endif  // ART_LIBARTIMAGEVALUES_ART_IMAGE_VALUES_H_
diff --git a/libartpalette/Android.bp b/libartpalette/Android.bp
index 5a3e986..b4b2e0b 100644
--- a/libartpalette/Android.bp
+++ b/libartpalette/Android.bp
@@ -15,46 +15,17 @@
 //
 
 cc_defaults {
-  name: "libartpalette_defaults",
-  defaults: ["art_defaults"],
-  host_supported: true,
-  export_include_dirs: ["include"],
+    name: "libartpalette_defaults",
+    defaults: ["art_defaults"],
+    host_supported: true,
+    export_include_dirs: ["include"],
 }
 
-// libartpalette-system is the implementation of the abstraction layer. It is
-// only available as a shared library on Android.
-art_cc_library {
-    name: "libartpalette-system",
-    defaults: ["libartpalette_defaults"],
-    compile_multilib: "both",
-    target: {
-        android: {
-          srcs: ["system/palette_android.cc",],
-          header_libs: ["libbase_headers"],
-          shared_libs: [
-            "libbase",
-            "libcutils",
-            "liblog",
-            "libprocessgroup",
-            "libtombstoned_client",
-          ],
-        },
-        host: {
-          header_libs: ["libbase_headers"],
-          srcs: ["system/palette_fake.cc",],
-          shared_libs: ["libbase"],
-        },
-        darwin: {
-            enabled: false,
-        },
-        windows: {
-            enabled: false,
-        },
-    },
-    static: {
-        enabled: false,
-    },
-    version_script: "libartpalette.map.txt",
+cc_library_headers {
+    name: "libartpalette-headers",
+    export_include_dirs: ["include"],
+    host_supported: true,
+    visibility: ["//system/libartpalette"],
 }
 
 // libartpalette is the dynamic loader of the platform abstraction
@@ -63,40 +34,35 @@
 art_cc_library {
     name: "libartpalette",
     defaults: ["libartpalette_defaults"],
-    required: ["libartpalette-system"],  // libartpalette.so dlopen()'s libartpalette-system.
+    required: ["libartpalette-system"], // libartpalette.so dlopen()'s libartpalette-system.
     header_libs: ["libbase_headers"],
     target: {
         // Targets supporting dlopen build the client library which loads
         // and binds the methods in the libartpalette-system library.
         android: {
             srcs: ["apex/palette.cc"],
-            shared: {
-                shared_libs: ["liblog"],
-            },
-            static: {
-                static_libs: ["liblog"],
-            },
+            shared_libs: ["liblog"],
             version_script: "libartpalette.map.txt",
         },
         linux_bionic: {
-          header_libs: ["libbase_headers"],
+            header_libs: ["libbase_headers"],
             srcs: ["system/palette_fake.cc"],
             shared: {
-              shared_libs: [
-                "libbase",
-                "liblog"
-              ],
+                shared_libs: [
+                    "libbase",
+                    "liblog",
+                ],
             },
             version_script: "libartpalette.map.txt",
         },
         linux_glibc: {
-          header_libs: ["libbase_headers"],
+            header_libs: ["libbase_headers"],
             srcs: ["system/palette_fake.cc"],
             shared: {
-              shared_libs: [
-                "libbase",
-                "liblog"
-              ],
+                shared_libs: [
+                    "libbase",
+                    "liblog",
+                ],
             },
             version_script: "libartpalette.map.txt",
         },
@@ -107,8 +73,8 @@
             header_libs: ["libbase_headers"],
             srcs: ["system/palette_fake.cc"],
             static_libs: [
-              "libbase",
-              "liblog"
+                "libbase",
+                "liblog",
             ],
         },
         windows: {
@@ -116,11 +82,17 @@
             header_libs: ["libbase_headers"],
             srcs: ["system/palette_fake.cc"],
             static_libs: [
-              "libbase",
-              "liblog"
+                "libbase",
+                "liblog",
             ],
         },
-    }
+    },
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+        // TODO(b/142944931): remove this
+        "com.android.runtime", // due to the transitive dependency from linker
+    ],
 }
 
 art_cc_test {
diff --git a/libartpalette/apex/palette.cc b/libartpalette/apex/palette.cc
index 3570798..041fe7a 100644
--- a/libartpalette/apex/palette.cc
+++ b/libartpalette/apex/palette.cc
@@ -151,4 +151,16 @@
   return m(name, value);
 }
 
+enum PaletteStatus PaletteAshmemCreateRegion(const char* name, size_t size, int* fd) {
+  PaletteAshmemCreateRegionMethod m =
+      PaletteLoader::Instance().GetPaletteAshmemCreateRegionMethod();
+  return m(name, size, fd);
+}
+
+enum PaletteStatus PaletteAshmemSetProtRegion(int fd, int prot) {
+  PaletteAshmemSetProtRegionMethod m =
+      PaletteLoader::Instance().GetPaletteAshmemSetProtRegionMethod();
+  return m(fd, prot);
+}
+
 }  // extern "C"
diff --git a/libartpalette/include/palette/palette_method_list.h b/libartpalette/include/palette/palette_method_list.h
index 2738b57..1140399 100644
--- a/libartpalette/include/palette/palette_method_list.h
+++ b/libartpalette/include/palette/palette_method_list.h
@@ -29,6 +29,8 @@
   M(PaletteTraceEnabled, /*out*/int32_t* enabled)                           \
   M(PaletteTraceBegin, const char* name)                                    \
   M(PaletteTraceEnd)                                                        \
-  M(PaletteTraceIntegerValue, const char* name, int32_t value)
+  M(PaletteTraceIntegerValue, const char* name, int32_t value)              \
+  M(PaletteAshmemCreateRegion, const char* name, size_t size, int* fd)      \
+  M(PaletteAshmemSetProtRegion, int, int)
 
 #endif  // ART_LIBARTPALETTE_INCLUDE_PALETTE_PALETTE_METHOD_LIST_H_
diff --git a/libartpalette/libartpalette.map.txt b/libartpalette/libartpalette.map.txt
index e589986..d2c90d5 100644
--- a/libartpalette/libartpalette.map.txt
+++ b/libartpalette/libartpalette.map.txt
@@ -25,6 +25,8 @@
     PaletteTraceBegin;
     PaletteTraceEnd;
     PaletteTraceIntegerValue;
+    PaletteAshmemCreateRegion;
+    PaletteAshmemSetProtRegion;
 
   local:
     *;
diff --git a/libartpalette/system/palette_android.cc b/libartpalette/system/palette_android.cc
deleted file mode 100644
index 0c9db9d..0000000
--- a/libartpalette/system/palette_android.cc
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Copyright (C) 2019 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define ATRACE_TAG ATRACE_TAG_DALVIK
-
-#include "palette/palette.h"
-
-#include <errno.h>
-#include <sys/resource.h>
-#include <sys/time.h>
-#include <unistd.h>
-
-#include <mutex>
-
-#include <android-base/file.h>
-#include <android-base/logging.h>
-#include <android-base/macros.h>
-#include <cutils/sched_policy.h>
-#include <cutils/trace.h>
-#include <log/event_tag_map.h>
-#include <tombstoned/tombstoned.h>
-#include <utils/Thread.h>
-
-#include "palette_system.h"
-
-enum PaletteStatus PaletteGetVersion(int32_t* version) {
-  *version = art::palette::kPaletteVersion;
-  return PaletteStatus::kOkay;
-}
-
-// Conversion map for "nice" values.
-//
-// We use Android thread priority constants to be consistent with the rest
-// of the system.  In some cases adjacent entries may overlap.
-//
-static const int kNiceValues[art::palette::kNumManagedThreadPriorities] = {
-  ANDROID_PRIORITY_LOWEST,                // 1 (MIN_PRIORITY)
-  ANDROID_PRIORITY_BACKGROUND + 6,
-  ANDROID_PRIORITY_BACKGROUND + 3,
-  ANDROID_PRIORITY_BACKGROUND,
-  ANDROID_PRIORITY_NORMAL,                // 5 (NORM_PRIORITY)
-  ANDROID_PRIORITY_NORMAL - 2,
-  ANDROID_PRIORITY_NORMAL - 4,
-  ANDROID_PRIORITY_URGENT_DISPLAY + 3,
-  ANDROID_PRIORITY_URGENT_DISPLAY + 2,
-  ANDROID_PRIORITY_URGENT_DISPLAY         // 10 (MAX_PRIORITY)
-};
-
-enum PaletteStatus PaletteSchedSetPriority(int32_t tid, int32_t managed_priority) {
-  if (managed_priority < art::palette::kMinManagedThreadPriority ||
-      managed_priority > art::palette::kMaxManagedThreadPriority) {
-    return PaletteStatus::kInvalidArgument;
-  }
-  int new_nice = kNiceValues[managed_priority - art::palette::kMinManagedThreadPriority];
-
-  // TODO: b/18249098 The code below is broken. It uses getpriority() as a proxy for whether a
-  // thread is already in the SP_FOREGROUND cgroup. This is not necessarily true for background
-  // processes, where all threads are in the SP_BACKGROUND cgroup. This means that callers will
-  // have to call setPriority twice to do what they want :
-  //
-  //     Thread.setPriority(Thread.MIN_PRIORITY);  // no-op wrt to cgroups
-  //     Thread.setPriority(Thread.MAX_PRIORITY);  // will actually change cgroups.
-  if (new_nice >= ANDROID_PRIORITY_BACKGROUND) {
-    set_sched_policy(tid, SP_BACKGROUND);
-  } else if (getpriority(PRIO_PROCESS, tid) >= ANDROID_PRIORITY_BACKGROUND) {
-    set_sched_policy(tid, SP_FOREGROUND);
-  }
-
-  if (setpriority(PRIO_PROCESS, tid, new_nice) != 0) {
-    return PaletteStatus::kCheckErrno;
-  }
-  return PaletteStatus::kOkay;
-}
-
-enum PaletteStatus PaletteSchedGetPriority(int32_t tid, /*out*/int32_t* managed_priority) {
-  errno = 0;
-  int native_priority = getpriority(PRIO_PROCESS, tid);
-  if (native_priority == -1 && errno != 0) {
-    *managed_priority = art::palette::kNormalManagedThreadPriority;
-    return PaletteStatus::kCheckErrno;
-  }
-
-  for (int p = art::palette::kMinManagedThreadPriority;
-       p <= art::palette::kMaxManagedThreadPriority;
-       p = p + 1) {
-    int index = p - art::palette::kMinManagedThreadPriority;
-    if (native_priority >= kNiceValues[index]) {
-      *managed_priority = p;
-      return PaletteStatus::kOkay;
-    }
-  }
-  *managed_priority = art::palette::kMaxManagedThreadPriority;
-  return PaletteStatus::kOkay;
-}
-
-enum PaletteStatus PaletteWriteCrashThreadStacks(/*in*/const char* stacks, size_t stacks_len) {
-  android::base::unique_fd tombstone_fd;
-  android::base::unique_fd output_fd;
-
-  if (!tombstoned_connect(getpid(), &tombstone_fd, &output_fd, kDebuggerdJavaBacktrace)) {
-    // Failure here could be due to file descriptor resource exhaustion
-    // so write the stack trace message to the log in case it helps
-    // debug that.
-    LOG(INFO) << std::string_view(stacks, stacks_len);
-    // tombstoned_connect() logs failure reason.
-    return PaletteStatus::kFailedCheckLog;
-  }
-
-  PaletteStatus status = PaletteStatus::kOkay;
-  if (!android::base::WriteFully(output_fd, stacks, stacks_len)) {
-    PLOG(ERROR) << "Failed to write tombstoned output";
-    TEMP_FAILURE_RETRY(ftruncate(output_fd, 0));
-    status = PaletteStatus::kFailedCheckLog;
-  }
-
-  if (TEMP_FAILURE_RETRY(fdatasync(output_fd)) == -1 && errno != EINVAL) {
-    // Ignore EINVAL so we don't report failure if we just tried to flush a pipe
-    // or socket.
-    if (status == PaletteStatus::kOkay) {
-      PLOG(ERROR) << "Failed to fsync tombstoned output";
-      status = PaletteStatus::kFailedCheckLog;
-    }
-    TEMP_FAILURE_RETRY(ftruncate(output_fd, 0));
-    TEMP_FAILURE_RETRY(fdatasync(output_fd));
-  }
-
-  if (close(output_fd.release()) == -1 && errno != EINTR) {
-    if (status == PaletteStatus::kOkay) {
-      PLOG(ERROR) << "Failed to close tombstoned output";
-      status = PaletteStatus::kFailedCheckLog;
-    }
-  }
-
-  if (!tombstoned_notify_completion(tombstone_fd)) {
-    // tombstoned_notify_completion() logs failure.
-    status = PaletteStatus::kFailedCheckLog;
-  }
-
-  return status;
-}
-
-enum PaletteStatus PaletteTraceEnabled(/*out*/int32_t* enabled) {
-  *enabled = (ATRACE_ENABLED() != 0) ? 1 : 0;
-  return PaletteStatus::kOkay;
-}
-
-enum PaletteStatus PaletteTraceBegin(const char* name) {
-  ATRACE_BEGIN(name);
-  return PaletteStatus::kOkay;
-}
-
-enum PaletteStatus PaletteTraceEnd() {
-  ATRACE_END();
-  return PaletteStatus::kOkay;
-}
-
-enum PaletteStatus PaletteTraceIntegerValue(const char* name, int32_t value) {
-  ATRACE_INT(name, value);
-  return PaletteStatus::kOkay;
-}
diff --git a/libartpalette/system/palette_fake.cc b/libartpalette/system/palette_fake.cc
index 4cc00d0..dc0ee76 100644
--- a/libartpalette/system/palette_fake.cc
+++ b/libartpalette/system/palette_fake.cc
@@ -75,3 +75,15 @@
                                             int32_t value ATTRIBUTE_UNUSED) {
   return PaletteStatus::kOkay;
 }
+
+enum PaletteStatus PaletteAshmemCreateRegion(const char* name ATTRIBUTE_UNUSED,
+                                             size_t size ATTRIBUTE_UNUSED,
+                                             int* fd) {
+  *fd = -1;
+  return PaletteStatus::kNotSupported;
+}
+
+enum PaletteStatus PaletteAshmemSetProtRegion(int fd ATTRIBUTE_UNUSED,
+                                              int prot ATTRIBUTE_UNUSED) {
+  return PaletteStatus::kNotSupported;
+}
diff --git a/libdexfile/Android.bp b/libdexfile/Android.bp
index 30d1bcd..86e8170 100644
--- a/libdexfile/Android.bp
+++ b/libdexfile/Android.bp
@@ -45,15 +45,13 @@
                 "libz",
             ],
             shared_libs: [
-                 // For MemMap.
-                 "libartbase",
-                 "libartpalette",
-                 "liblog",
-                 // For common macros.
-                 "libbase",
+                // For MemMap.
+                "libartpalette",
+                "liblog",
+                // For common macros.
+                "libbase",
             ],
             export_shared_lib_headers: [
-                "libartbase",
                 "libbase",
             ],
         },
@@ -61,15 +59,13 @@
             shared_libs: [
                 "libziparchive",
                 "libz",
-                 // For MemMap.
-                 "libartbase",
-                 "libartpalette",
-                 "liblog",
-                 // For common macros.
-                 "libbase",
+                // For MemMap.
+                "libartpalette",
+                "liblog",
+                // For common macros.
+                "libbase",
             ],
             export_shared_lib_headers: [
-                "libartbase",
                 "libbase",
             ],
         },
@@ -77,15 +73,13 @@
             static_libs: [
                 "libziparchive",
                 "libz",
-                 // For MemMap.
-                 "libartbase",
-                 "libartpalette",
-                 "liblog",
-                 // For common macros.
-                 "libbase",
+                // For MemMap.
+                "libartpalette",
+                "liblog",
+                // For common macros.
+                "libbase",
             ],
             export_static_lib_headers: [
-                "libartbase",
                 "libbase",
             ],
             cflags: ["-Wno-thread-safety"],
@@ -114,6 +108,9 @@
         "libartbase_static_defaults",
         "libdexfile_static_base_defaults",
     ],
+    defaults_visibility: [
+        "//art:__subpackages__",
+    ],
     static_libs: ["libdexfile"],
 }
 
@@ -143,20 +140,44 @@
 
 art_cc_library {
     name: "libdexfile",
-    defaults: ["libdexfile_defaults"],
-    // Leave the symbols in the shared library so that stack unwinders can
-    // produce meaningful name resolution.
-    strip: {
-        keep_symbols: true,
-    },
+    defaults: [
+        "libdexfile_defaults",
+        "libart_nativeunwind_defaults",
+    ],
     target: {
+        android: {
+            shared_libs: [
+                "libartbase",
+            ],
+            export_shared_lib_headers: [
+                "libartbase",
+            ],
+        },
+        not_windows: {
+            shared_libs: [
+                "libartbase",
+            ],
+            export_shared_lib_headers: [
+                "libartbase",
+            ],
+        },
         windows: {
             enabled: true,
             shared: {
                 enabled: false,
             },
+            static_libs: [
+                "libartbase",
+            ],
+            export_static_lib_headers: [
+                "libartbase",
+            ],
         },
     },
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
 }
 
 art_cc_library {
@@ -166,13 +187,38 @@
         "libdexfile_defaults",
     ],
     target: {
+        android: {
+            shared_libs: [
+                "libartbased",
+            ],
+            export_shared_lib_headers: [
+                "libartbased",
+            ],
+        },
+        not_windows: {
+            shared_libs: [
+                "libartbased",
+            ],
+            export_shared_lib_headers: [
+                "libartbased",
+            ],
+        },
         windows: {
             enabled: true,
             shared: {
                 enabled: false,
             },
+            static_libs: [
+                "libartbased",
+            ],
+            export_static_lib_headers: [
+                "libartbased",
+            ],
         },
     },
+    apex_available: [
+        "com.android.art.debug",
+    ],
 }
 
 art_cc_test {
@@ -200,16 +246,12 @@
         "libbacktrace",
         "libziparchive",
     ],
-    include_dirs: [
-        "external/zlib",
-    ],
 }
 
 cc_library_headers {
     name: "libdexfile_external_headers",
+    visibility: ["//visibility:public"],
     host_supported: true,
-    vendor_available: true,
-    recovery_available: true,
     header_libs: ["libbase_headers"],
     export_header_lib_headers: ["libbase_headers"],
     export_include_dirs: ["external/include"],
@@ -219,10 +261,29 @@
             enabled: true,
         },
     },
+
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.art.debug",
+        "com.android.art.release",
+    ],
 }
 
-cc_library {
-    name: "libdexfile_external",
+// Make dex_instruction_list.h available for tools/jvmti-agents/titrace
+cc_library_headers {
+    name: "libdexfile_all_headers",
+    visibility: ["//art:__subpackages__"],
+    host_supported: true,
+    export_include_dirs: ["."],
+
+    apex_available: [
+        "com.android.art.debug",
+        "com.android.art.release",
+    ],
+}
+
+cc_defaults {
+    name: "libdexfile_external-defaults",
     host_supported: true,
     srcs: [
         "external/dex_file_ext.cc",
@@ -230,19 +291,58 @@
     header_libs: ["libdexfile_external_headers"],
     shared_libs: [
         "libbase",
-        "libdexfile",
     ],
-
     stubs: {
         symbol_file: "external/libdexfile_external.map.txt",
         versions: ["1"],
     },
+    export_header_lib_headers: ["libdexfile_external_headers"],
+}
+
+cc_library {
+    name: "libdexfile_external",
+    defaults: [
+        "art_defaults",
+        "libdexfile_external-defaults",
+    ],
+    visibility: ["//visibility:public"],
+    target: {
+        darwin: {
+            enabled: true,
+        },
+    },
+    shared_libs: [
+        "libdexfile",
+    ],
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
+}
+
+cc_library {
+    name: "libdexfiled_external",
+    defaults: [
+        "art_debug_defaults",
+        "libdexfile_external-defaults",
+    ],
+    target: {
+        darwin: {
+            enabled: true,
+        },
+    },
+    shared_libs: [
+        "libdexfiled",
+    ],
+    apex_available: [
+        "com.android.art.debug",
+    ],
 }
 
 art_cc_test {
     name: "art_libdexfile_external_tests",
     host_supported: true,
-    test_per_src: true,  // For consistency with other ART gtests.
+    test_per_src: true, // For consistency with other ART gtests.
     srcs: [
         "external/dex_file_ext_c_test.c",
     ],
@@ -258,24 +358,21 @@
 // stack frames.
 cc_library {
     name: "libdexfile_support",
+    visibility: ["//visibility:public"],
     host_supported: true,
-    vendor_available: true,
-    recovery_available: true,
     srcs: [
         "external/dex_file_supp.cc",
     ],
-    required: ["libdexfile_external"],
+    runtime_libs: ["libdexfile_external"],
     shared_libs: ["liblog"],
     header_libs: ["libdexfile_external_headers"],
     export_header_lib_headers: ["libdexfile_external_headers"],
-    target: {
-        recovery: {
-            cflags: ["-DNO_DEXFILE_SUPPORT"],
-        },
-        vendor: {
-            cflags: ["-DNO_DEXFILE_SUPPORT"],
-        },
-    },
+
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.art.debug",
+        "com.android.art.release",
+    ],
 }
 
 // The same source file is used in two tests here, so unlike other ART gtests it
@@ -283,6 +380,9 @@
 // test-art-{host,target}-gtest-art_libdexfile_support_tests.
 art_cc_test {
     name: "art_libdexfile_support_tests",
+    defaults: [
+        "art_test_defaults",
+    ],
     host_supported: true,
     srcs: [
         "external/dex_file_supp_test.cc",
@@ -295,10 +395,9 @@
     ],
 }
 
-cc_library_static {
-    name: "libdexfile_support_static",
+cc_defaults {
+    name: "libdexfile_support_static_defaults",
     host_supported: true,
-    defaults: ["libdexfile_static_defaults"],
     srcs: [
         "external/dex_file_supp.cc",
     ],
@@ -306,8 +405,6 @@
     // Using whole_static_libs here only as a "poor man's transitivity" kludge.
     whole_static_libs: [
         "libbase",
-        "libdexfile",
-        "libdexfile_external",
         "liblog",
         "libz",
         "libziparchive",
@@ -316,6 +413,36 @@
     export_header_lib_headers: ["libdexfile_external_headers"],
 }
 
+cc_library_static {
+    name: "libdexfile_support_static",
+    visibility: [
+        "//art:__subpackages__",
+        // Required for the simpleperf binary in the NDK. No other modules than
+        // //system/extras/simpleperf:simpleperf_ndk are allowed to use it.
+        "//system/extras/simpleperf",
+    ],
+    defaults: [
+        "libdexfile_static_defaults",
+        "libdexfile_support_static_defaults",
+    ],
+    whole_static_libs: [
+        "libdexfile",
+        "libdexfile_external",
+    ],
+}
+
+cc_library_static {
+    name: "libdexfiled_support_static",
+    defaults: [
+        "libdexfile_support_static_defaults",
+        "libdexfiled_static_defaults",
+    ],
+    whole_static_libs: [
+        "libdexfiled",
+        "libdexfiled_external",
+    ],
+}
+
 // The same source file is used in two tests here, so unlike other ART gtests it
 // doesn't use test_per_src. Its test target is
 // test-art-{host,target}-gtest-art_libdexfile_support_static_tests.
diff --git a/libdexfile/dex/class_accessor.h b/libdexfile/dex/class_accessor.h
index 1628256..a3ee2bd 100644
--- a/libdexfile/dex/class_accessor.h
+++ b/libdexfile/dex/class_accessor.h
@@ -27,6 +27,7 @@
 namespace dex {
 struct ClassDef;
 struct CodeItem;
+class DexFileVerifier;
 }  // namespace dex
 
 class ClassIteratorData;
@@ -146,7 +147,7 @@
     uint32_t code_off_ = 0u;
 
     friend class ClassAccessor;
-    friend class DexFileVerifier;
+    friend class dex::DexFileVerifier;
   };
 
   // A decoded version of the field of a class_data_item.
@@ -172,7 +173,7 @@
 
     bool is_static_ = true;
     friend class ClassAccessor;
-    friend class DexFileVerifier;
+    friend class dex::DexFileVerifier;
   };
 
   template <typename DataType>
@@ -264,7 +265,7 @@
     // At iterator_end_, the iterator is no longer valid.
     const uint32_t iterator_end_;
 
-    friend class DexFileVerifier;
+    friend class dex::DexFileVerifier;
   };
 
   // Not explicit specifically for range-based loops.
@@ -390,7 +391,7 @@
   const uint32_t num_direct_methods_ = 0u;
   const uint32_t num_virtual_methods_ = 0u;
 
-  friend class DexFileVerifier;
+  friend class dex::DexFileVerifier;
 };
 
 }  // namespace art
diff --git a/libdexfile/dex/code_item_accessors-inl.h b/libdexfile/dex/code_item_accessors-inl.h
index 632a787..261b913 100644
--- a/libdexfile/dex/code_item_accessors-inl.h
+++ b/libdexfile/dex/code_item_accessors-inl.h
@@ -22,6 +22,7 @@
 #include "base/iteration_range.h"
 #include "compact_dex_file.h"
 #include "dex_file-inl.h"
+#include "dex_instruction_iterator.h"
 #include "standard_dex_file.h"
 
 // The no ART version is used by binaries that don't include the whole runtime.
diff --git a/libdexfile/dex/code_item_accessors.h b/libdexfile/dex/code_item_accessors.h
index 794f234..24296c8 100644
--- a/libdexfile/dex/code_item_accessors.h
+++ b/libdexfile/dex/code_item_accessors.h
@@ -21,7 +21,7 @@
 
 #include <android-base/logging.h>
 
-#include "dex_instruction_iterator.h"
+#include "dex_instruction.h"
 
 namespace art {
 
@@ -32,6 +32,7 @@
 
 class ArtMethod;
 class DexFile;
+class DexInstructionIterator;
 template <typename Iter>
 class IterationRange;
 
diff --git a/libdexfile/dex/descriptors_names.cc b/libdexfile/dex/descriptors_names.cc
index 1e8eb33..44cb7cb 100644
--- a/libdexfile/dex/descriptors_names.cc
+++ b/libdexfile/dex/descriptors_names.cc
@@ -165,7 +165,7 @@
 // Helper for IsValidPartOfMemberNameUtf8(), a bit vector indicating valid low ascii.
 static constexpr uint32_t DEX_MEMBER_VALID_LOW_ASCII[4] = {
   0x00000000,  // 00..1f low control characters; nothing valid
-  0x03ff2010,  // 20..3f digits and symbols; valid: '0'..'9', '$', '-'
+  0x03ff2011,  // 20..3f space, digits and symbols; valid: ' ', '0'..'9', '$', '-'
   0x87fffffe,  // 40..5f uppercase etc.; valid: 'A'..'Z', '_'
   0x07fffffe   // 60..7f lowercase etc.; valid: 'a'..'z'
 };
@@ -175,12 +175,17 @@
 static bool IsValidPartOfMemberNameUtf8Slow(const char** pUtf8Ptr) {
   /*
    * It's a multibyte encoded character. Decode it and analyze. We
-   * accept anything that isn't (a) an improperly encoded low value,
-   * (b) an improper surrogate pair, (c) an encoded '\0', (d) a high
-   * control character, or (e) a high space, layout, or special
-   * character (U+00a0, U+2000..U+200f, U+2028..U+202f,
-   * U+fff0..U+ffff). This is all specified in the dex format
-   * document.
+   * accept anything that isn't:
+   *   - an improperly encoded low value
+   *   - an improper surrogate pair
+   *   - an encoded '\0'
+   *   - a C1 control character U+0080..U+009f
+   *   - a format character U+200b..U+200f, U+2028..U+202e
+   *   - a special character U+fff0..U+ffff
+   * Prior to DEX format version 040, we also excluded some of the Unicode
+   * space characters:
+   *   - U+00a0, U+2000..U+200a, U+202f
+   * This is all specified in the dex format document.
    */
 
   const uint32_t pair = GetUtf16FromUtf8(pUtf8Ptr);
@@ -200,8 +205,8 @@
   // three byte UTF-8 sequence could be one half of a surrogate pair.
   switch (leading >> 8) {
     case 0x00:
-      // It's only valid if it's above the ISO-8859-1 high space (0xa0).
-      return (leading > 0x00a0);
+      // It's in the range that has C1 control characters.
+      return (leading >= 0x00a0);
     case 0xd8:
     case 0xd9:
     case 0xda:
@@ -222,11 +227,12 @@
       return false;
     case 0x20:
     case 0xff:
-      // It's in the range that has spaces, controls, and specials.
+      // It's in the range that has format characters and specials.
       switch (leading & 0xfff8) {
-        case 0x2000:
         case 0x2008:
+          return (leading <= 0x200a);
         case 0x2028:
+          return (leading == 0x202f);
         case 0xfff0:
         case 0xfff8:
           return false;
diff --git a/libdexfile/dex/dex_cache_resolved_classes.h b/libdexfile/dex/dex_cache_resolved_classes.h
deleted file mode 100644
index 4c9acbf..0000000
--- a/libdexfile/dex/dex_cache_resolved_classes.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_LIBDEXFILE_DEX_DEX_CACHE_RESOLVED_CLASSES_H_
-#define ART_LIBDEXFILE_DEX_DEX_CACHE_RESOLVED_CLASSES_H_
-
-#include <string>
-#include <unordered_set>
-#include <vector>
-
-#include "dex/dex_file_types.h"
-
-namespace art {
-
-// Data structure for passing around which classes belonging to a dex cache / dex file are resolved.
-class DexCacheResolvedClasses {
- public:
-  DexCacheResolvedClasses(const std::string& dex_location,
-                          const std::string& base_location,
-                          uint32_t location_checksum,
-                          uint32_t num_method_ids)
-      : dex_location_(dex_location),
-        base_location_(base_location),
-        location_checksum_(location_checksum),
-        num_method_ids_(num_method_ids) {}
-
-  // Only compare the key elements, ignore the resolved classes.
-  int Compare(const DexCacheResolvedClasses& other) const {
-    if (location_checksum_ != other.location_checksum_) {
-      return static_cast<int>(location_checksum_ - other.location_checksum_);
-    }
-    // Don't need to compare base_location_ since dex_location_ has more info.
-    return dex_location_.compare(other.dex_location_);
-  }
-
-  bool AddClass(dex::TypeIndex index) const {
-    return classes_.insert(index).second;
-  }
-
-  template <class InputIt>
-  void AddClasses(InputIt begin, InputIt end) const {
-    classes_.insert(begin, end);
-  }
-
-  const std::string& GetDexLocation() const {
-    return dex_location_;
-  }
-
-  const std::string& GetBaseLocation() const {
-    return base_location_;
-  }
-
-  uint32_t GetLocationChecksum() const {
-    return location_checksum_;
-  }
-
-  const std::unordered_set<dex::TypeIndex>& GetClasses() const {
-    return classes_;
-  }
-
-  size_t NumMethodIds() const {
-    return num_method_ids_;
-  }
-
- private:
-  const std::string dex_location_;
-  const std::string base_location_;
-  const uint32_t location_checksum_;
-  const uint32_t num_method_ids_;
-  // Array of resolved class def indexes.
-  mutable std::unordered_set<dex::TypeIndex> classes_;
-};
-
-inline bool operator<(const DexCacheResolvedClasses& a, const DexCacheResolvedClasses& b) {
-  return a.Compare(b) < 0;
-}
-
-}  // namespace art
-
-#endif  // ART_LIBDEXFILE_DEX_DEX_CACHE_RESOLVED_CLASSES_H_
diff --git a/libdexfile/dex/dex_file-inl.h b/libdexfile/dex/dex_file-inl.h
index 0c3f949..61c4593 100644
--- a/libdexfile/dex/dex_file-inl.h
+++ b/libdexfile/dex/dex_file-inl.h
@@ -27,6 +27,7 @@
 #include "compact_dex_file.h"
 #include "dex_instruction_iterator.h"
 #include "invoke_type.h"
+#include "signature.h"
 #include "standard_dex_file.h"
 
 namespace art {
diff --git a/libdexfile/dex/dex_file.cc b/libdexfile/dex/dex_file.cc
index 7db4de0..030b43b 100644
--- a/libdexfile/dex/dex_file.cc
+++ b/libdexfile/dex/dex_file.cc
@@ -30,6 +30,7 @@
 #include "android-base/stringprintf.h"
 
 #include "base/enums.h"
+#include "base/hiddenapi_domain.h"
 #include "base/leb128.h"
 #include "base/stl_util.h"
 #include "class_accessor-inl.h"
diff --git a/libdexfile/dex/dex_file.h b/libdexfile/dex/dex_file.h
index b892d82..ca95e0e 100644
--- a/libdexfile/dex/dex_file.h
+++ b/libdexfile/dex/dex_file.h
@@ -25,28 +25,31 @@
 #include <android-base/logging.h>
 
 #include "base/globals.h"
-#include "base/hiddenapi_domain.h"
 #include "base/macros.h"
 #include "base/value_object.h"
-#include "class_iterator.h"
 #include "dex_file_structs.h"
 #include "dex_file_types.h"
 #include "jni.h"
 #include "modifiers.h"
-#include "signature.h"
 
 namespace art {
 
 class ClassDataItemIterator;
+class ClassIterator;
 class CompactDexFile;
 class DexInstructionIterator;
 enum InvokeType : uint32_t;
 template <typename Iter> class IterationRange;
 class MemMap;
 class OatDexFile;
+class Signature;
 class StandardDexFile;
 class ZipArchive;
 
+namespace hiddenapi {
+enum class Domain : char;
+}  // namespace hiddenapi
+
 // Some instances of DexFile own the storage referred to by DexFile.  Clients who create
 // such management do so by subclassing Container.
 class DexFileContainer {
diff --git a/libdexfile/dex/dex_file_loader.cc b/libdexfile/dex/dex_file_loader.cc
index a719d41..7ccc93f 100644
--- a/libdexfile/dex/dex_file_loader.cc
+++ b/libdexfile/dex/dex_file_loader.cc
@@ -123,7 +123,7 @@
     DCHECK(name != nullptr);
     // Resist the urge to delete the space. <: is a bigraph sequence.
     std::unique_ptr< ::ZipEntry> zip_entry(new ::ZipEntry);
-    const int32_t error = FindEntry(handle_, ZipString(name), zip_entry.get());
+    const int32_t error = FindEntry(handle_, name, zip_entry.get());
     if (error) {
       *error_msg = std::string(ErrorCodeString(error));
       return nullptr;
@@ -370,12 +370,12 @@
     dex_file.reset();
     return nullptr;
   }
-  if (verify && !DexFileVerifier::Verify(dex_file.get(),
-                                         dex_file->Begin(),
-                                         dex_file->Size(),
-                                         location.c_str(),
-                                         verify_checksum,
-                                         error_msg)) {
+  if (verify && !dex::Verify(dex_file.get(),
+                             dex_file->Begin(),
+                             dex_file->Size(),
+                             location.c_str(),
+                             verify_checksum,
+                             error_msg)) {
     if (verify_result != nullptr) {
       *verify_result = VerifyResult::kVerifyFailed;
     }
diff --git a/libdexfile/dex/dex_file_loader_test.cc b/libdexfile/dex/dex_file_loader_test.cc
index 8b7ca17..30c60b1 100644
--- a/libdexfile/dex/dex_file_loader_test.cc
+++ b/libdexfile/dex/dex_file_loader_test.cc
@@ -336,23 +336,13 @@
   EXPECT_EQ(39u, header.GetVersion());
 }
 
-TEST_F(DexFileLoaderTest, Version40Rejected) {
+TEST_F(DexFileLoaderTest, Version40Accepted) {
   std::vector<uint8_t> dex_bytes;
-  DecodeDexFile(kRawDex40, &dex_bytes);
+  std::unique_ptr<const DexFile> raw(OpenDexFileBase64(kRawDex40, kLocationString, &dex_bytes));
+  ASSERT_TRUE(raw.get() != nullptr);
 
-  static constexpr bool kVerifyChecksum = true;
-  DexFileLoaderErrorCode error_code;
-  std::string error_msg;
-  std::vector<std::unique_ptr<const DexFile>> dex_files;
-  const DexFileLoader dex_file_loader;
-  ASSERT_FALSE(dex_file_loader.OpenAll(dex_bytes.data(),
-                                       dex_bytes.size(),
-                                       kLocationString,
-                                       /* verify= */ true,
-                                       kVerifyChecksum,
-                                       &error_code,
-                                       &error_msg,
-                                       &dex_files));
+  const DexFile::Header& header = raw->GetHeader();
+  EXPECT_EQ(40u, header.GetVersion());
 }
 
 TEST_F(DexFileLoaderTest, Version41Rejected) {
diff --git a/libdexfile/dex/dex_file_reference.h b/libdexfile/dex/dex_file_reference.h
index 3ac7781..1c6ba13 100644
--- a/libdexfile/dex/dex_file_reference.h
+++ b/libdexfile/dex/dex_file_reference.h
@@ -39,7 +39,7 @@
   };
 };
 
-// Default comparators, compares the indicies, not the backing data.
+// Default comparators, compares the indices, not the backing data.
 inline bool operator<(const DexFileReference& a, const DexFileReference& b) {
   return DexFileReference::Comparator()(a, b);
 }
diff --git a/libdexfile/dex/dex_file_verifier.cc b/libdexfile/dex/dex_file_verifier.cc
index 86a28e5..1d33271 100644
--- a/libdexfile/dex/dex_file_verifier.cc
+++ b/libdexfile/dex/dex_file_verifier.cc
@@ -16,36 +16,45 @@
 
 #include "dex_file_verifier.h"
 
-#include <inttypes.h>
-
+#include <algorithm>
+#include <bitset>
+#include <limits>
 #include <memory>
 
+#include "android-base/logging.h"
+#include "android-base/macros.h"
 #include "android-base/stringprintf.h"
 
+#include "base/hash_map.h"
 #include "base/leb128.h"
+#include "base/safe_map.h"
 #include "class_accessor-inl.h"
 #include "code_item_accessors-inl.h"
 #include "descriptors_names.h"
 #include "dex_file-inl.h"
+#include "dex_file_types.h"
 #include "modifiers.h"
 #include "utf-inl.h"
 
 namespace art {
+namespace dex {
 
 using android::base::StringAppendV;
 using android::base::StringPrintf;
 
-static constexpr uint32_t kTypeIdLimit = std::numeric_limits<uint16_t>::max();
+namespace {
 
-static bool IsValidOrNoTypeId(uint16_t low, uint16_t high) {
+constexpr uint32_t kTypeIdLimit = std::numeric_limits<uint16_t>::max();
+
+constexpr bool IsValidOrNoTypeId(uint16_t low, uint16_t high) {
   return (high == 0) || ((high == 0xffffU) && (low == 0xffffU));
 }
 
-static bool IsValidTypeId(uint16_t low ATTRIBUTE_UNUSED, uint16_t high) {
+constexpr bool IsValidTypeId(uint16_t low ATTRIBUTE_UNUSED, uint16_t high) {
   return (high == 0);
 }
 
-static uint32_t MapTypeToBitMask(DexFile::MapItemType map_item_type) {
+constexpr uint32_t MapTypeToBitMask(DexFile::MapItemType map_item_type) {
   switch (map_item_type) {
     case DexFile::kDexTypeHeaderItem:               return 1 << 0;
     case DexFile::kDexTypeStringIdItem:             return 1 << 1;
@@ -72,7 +81,7 @@
   return 0;
 }
 
-static bool IsDataSectionType(DexFile::MapItemType map_item_type) {
+constexpr bool IsDataSectionType(DexFile::MapItemType map_item_type) {
   switch (map_item_type) {
     case DexFile::kDexTypeHeaderItem:
     case DexFile::kDexTypeStringIdItem:
@@ -101,81 +110,367 @@
   return true;
 }
 
-const char* DexFileVerifier::CheckLoadStringByIdx(dex::StringIndex idx, const char* error_string) {
-  if (UNLIKELY(!CheckIndex(idx.index_, dex_file_->NumStringIds(), error_string))) {
-    return nullptr;
-  }
-  return dex_file_->StringDataByIdx(idx);
+// Fields and methods may have only one of public/protected/private.
+ALWAYS_INLINE
+constexpr bool CheckAtMostOneOfPublicProtectedPrivate(uint32_t flags) {
+  // Semantically we want 'return POPCOUNT(flags & kAcc) <= 1;'.
+  static_assert(IsPowerOfTwo(0), "0 not marked as power of two");
+  static_assert(IsPowerOfTwo(kAccPublic), "kAccPublic not marked as power of two");
+  static_assert(IsPowerOfTwo(kAccProtected), "kAccProtected not marked as power of two");
+  static_assert(IsPowerOfTwo(kAccPrivate), "kAccPrivate not marked as power of two");
+  return IsPowerOfTwo(flags & (kAccPublic | kAccProtected | kAccPrivate));
 }
 
-const char* DexFileVerifier::CheckLoadStringByTypeIdx(dex::TypeIndex type_idx,
-                                                      const char* error_string) {
-  if (UNLIKELY(!CheckIndex(type_idx.index_, dex_file_->NumTypeIds(), error_string))) {
-    return nullptr;
-  }
-  return CheckLoadStringByIdx(dex_file_->GetTypeId(type_idx).descriptor_idx_, error_string);
+// Helper functions to retrieve names from the dex file. We do not want to rely on DexFile
+// functionality, as we're still verifying the dex file. begin and header correspond to the
+// underscored variants in the DexFileVerifier.
+
+std::string GetString(const uint8_t* const begin,
+                      const DexFile::Header* const header,
+                      dex::StringIndex string_idx) {
+  // All sources of the `string_idx` have already been checked in CheckIntraSection().
+  DCHECK_LT(string_idx.index_, header->string_ids_size_);
+  const dex::StringId* string_id =
+      reinterpret_cast<const dex::StringId*>(begin + header->string_ids_off_) + string_idx.index_;
+
+  // The string offset has been checked at the start of `CheckInterSection()`
+  // to point to a string data item checked by `CheckIntraSection()`.
+  const uint8_t* ptr = begin + string_id->string_data_off_;
+  DecodeUnsignedLeb128(&ptr);  // Ignore the result.
+  return reinterpret_cast<const char*>(ptr);
 }
 
-const dex::FieldId* DexFileVerifier::CheckLoadFieldId(uint32_t idx, const char* error_string) {
-  if (UNLIKELY(!CheckIndex(idx, dex_file_->NumFieldIds(), error_string))) {
-    return nullptr;
-  }
-  return &dex_file_->GetFieldId(idx);
+std::string GetClass(const uint8_t* const begin,
+                     const DexFile::Header* const header,
+                     dex::TypeIndex class_idx) {
+  // All sources of `class_idx` have already been checked in CheckIntraSection().
+  CHECK_LT(class_idx.index_, header->type_ids_size_);
+
+  const dex::TypeId* type_id =
+      reinterpret_cast<const dex::TypeId*>(begin + header->type_ids_off_) + class_idx.index_;
+
+  // The `type_id->descriptor_idx_` has already been checked in CheckIntraTypeIdItem().
+  // However, it may not have been checked to be a valid descriptor, so return the raw
+  // string without converting with `PrettyDescriptor()`.
+  return GetString(begin, header, type_id->descriptor_idx_);
 }
 
-const dex::MethodId* DexFileVerifier::CheckLoadMethodId(uint32_t idx, const char* err_string) {
-  if (UNLIKELY(!CheckIndex(idx, dex_file_->NumMethodIds(), err_string))) {
-    return nullptr;
-  }
-  return &dex_file_->GetMethodId(idx);
+std::string GetFieldDescription(const uint8_t* const begin,
+                                const DexFile::Header* const header,
+                                uint32_t idx) {
+  // The `idx` has already been checked in `DexFileVerifier::CheckIntraClassDataItemFields()`.
+  CHECK_LT(idx, header->field_ids_size_);
+
+  const dex::FieldId* field_id =
+      reinterpret_cast<const dex::FieldId*>(begin + header->field_ids_off_) + idx;
+
+  // Indexes in `*field_id` have already been checked in CheckIntraFieldIdItem().
+  std::string class_name = GetClass(begin, header, field_id->class_idx_);
+  std::string field_name = GetString(begin, header, field_id->name_idx_);
+  return class_name + "." + field_name;
 }
 
-const dex::ProtoId* DexFileVerifier::CheckLoadProtoId(dex::ProtoIndex idx,
-                                                      const char* err_string) {
-  if (UNLIKELY(!CheckIndex(idx.index_, dex_file_->NumProtoIds(), err_string))) {
-    return nullptr;
-  }
-  return &dex_file_->GetProtoId(idx);
+std::string GetMethodDescription(const uint8_t* const begin,
+                                 const DexFile::Header* const header,
+                                 uint32_t idx) {
+  // The `idx` has already been checked in `DexFileVerifier::CheckIntraClassDataItemMethods()`.
+  CHECK_LT(idx, header->method_ids_size_);
+
+  const dex::MethodId* method_id =
+      reinterpret_cast<const dex::MethodId*>(begin + header->method_ids_off_) + idx;
+
+  // Indexes in `*method_id` have already been checked in CheckIntraMethodIdItem().
+  std::string class_name = GetClass(begin, header, method_id->class_idx_);
+  std::string method_name = GetString(begin, header, method_id->name_idx_);
+  return class_name + "." + method_name;
 }
 
-// Helper macro to load string and return false on error.
-#define LOAD_STRING(var, idx, error)                    \
-  const char* (var) = CheckLoadStringByIdx(idx, error); \
-  if (UNLIKELY((var) == nullptr)) {                     \
-    return false;                                       \
+}  // namespace
+
+// Note: the anonymous namespace would be nice, but we need friend access into accessors.
+
+class DexFileVerifier {
+ public:
+  DexFileVerifier(const DexFile* dex_file,
+                  const uint8_t* begin,
+                  size_t size,
+                  const char* location,
+                  bool verify_checksum)
+      : dex_file_(dex_file),
+        begin_(begin),
+        size_(size),
+        location_(location),
+        verify_checksum_(verify_checksum),
+        header_(&dex_file->GetHeader()),
+        ptr_(nullptr),
+        previous_item_(nullptr),
+        init_indices_{std::numeric_limits<size_t>::max(),
+                      std::numeric_limits<size_t>::max(),
+                      std::numeric_limits<size_t>::max(),
+                      std::numeric_limits<size_t>::max()} {
   }
 
-// Helper macro to load string by type idx and return false on error.
-#define LOAD_STRING_BY_TYPE(var, type_idx, error)                \
-  const char* (var) = CheckLoadStringByTypeIdx(type_idx, error); \
-  if (UNLIKELY((var) == nullptr)) {                              \
-    return false;                                                \
+  bool Verify();
+
+  const std::string& FailureReason() const {
+    return failure_reason_;
   }
 
-// Helper macro to load method id. Return last parameter on error.
-#define LOAD_METHOD(var, idx, error_string, error_stmt)                   \
-  const dex::MethodId* (var)  = CheckLoadMethodId(idx, error_string); \
-  if (UNLIKELY((var) == nullptr)) {                                       \
-    error_stmt;                                                           \
+ private:
+  bool CheckShortyDescriptorMatch(char shorty_char, const char* descriptor, bool is_return_type);
+  bool CheckListSize(const void* start, size_t count, size_t element_size, const char* label);
+  // Check a list. The head is assumed to be at *ptr, and elements to be of size element_size. If
+  // successful, the ptr will be moved forward the amount covered by the list.
+  bool CheckList(size_t element_size, const char* label, const uint8_t* *ptr);
+  // Checks whether the offset is zero (when size is zero) or that the offset falls within the area
+  // claimed by the file.
+  bool CheckValidOffsetAndSize(uint32_t offset, uint32_t size, size_t alignment, const char* label);
+  // Checks whether the size is less than the limit.
+  ALWAYS_INLINE bool CheckSizeLimit(uint32_t size, uint32_t limit, const char* label) {
+    if (size > limit) {
+      ErrorStringPrintf("Size(%u) should not exceed limit(%u) for %s.", size, limit, label);
+      return false;
+    }
+    return true;
+  }
+  ALWAYS_INLINE bool CheckIndex(uint32_t field, uint32_t limit, const char* label) {
+    if (UNLIKELY(field >= limit)) {
+      ErrorStringPrintf("Bad index for %s: %x >= %x", label, field, limit);
+      return false;
+    }
+    return true;
   }
 
-// Helper macro to load method id. Return last parameter on error.
-#define LOAD_FIELD(var, idx, fmt, error_stmt)                 \
-  const dex::FieldId* (var) = CheckLoadFieldId(idx, fmt); \
-  if (UNLIKELY((var) == nullptr)) {                           \
-    error_stmt;                                               \
+  bool CheckHeader();
+  bool CheckMap();
+
+  uint32_t ReadUnsignedLittleEndian(uint32_t size) {
+    uint32_t result = 0;
+    if (LIKELY(CheckListSize(ptr_, size, sizeof(uint8_t), "encoded_value"))) {
+      for (uint32_t i = 0; i < size; i++) {
+        result |= ((uint32_t) *(ptr_++)) << (i * 8);
+      }
+    }
+    return result;
+  }
+  bool CheckAndGetHandlerOffsets(const dex::CodeItem* code_item,
+                                 uint32_t* handler_offsets, uint32_t handlers_size);
+  bool CheckClassDataItemField(uint32_t idx,
+                               uint32_t access_flags,
+                               uint32_t class_access_flags,
+                               dex::TypeIndex class_type_index);
+  bool CheckClassDataItemMethod(uint32_t idx,
+                                uint32_t access_flags,
+                                uint32_t class_access_flags,
+                                dex::TypeIndex class_type_index,
+                                uint32_t code_offset,
+                                bool expect_direct);
+  ALWAYS_INLINE
+  bool CheckOrder(const char* type_descr, uint32_t curr_index, uint32_t prev_index) {
+    if (UNLIKELY(curr_index < prev_index)) {
+      ErrorStringPrintf("out-of-order %s indexes %" PRIu32 " and %" PRIu32,
+                        type_descr,
+                        prev_index,
+                        curr_index);
+      return false;
+    }
+    return true;
+  }
+  bool CheckStaticFieldTypes(const dex::ClassDef& class_def);
+
+  bool CheckPadding(size_t offset, uint32_t aligned_offset, DexFile::MapItemType type);
+  bool CheckEncodedValue();
+  bool CheckEncodedArray();
+  bool CheckEncodedAnnotation();
+
+  bool CheckIntraTypeIdItem();
+  bool CheckIntraProtoIdItem();
+  bool CheckIntraFieldIdItem();
+  bool CheckIntraMethodIdItem();
+  bool CheckIntraClassDefItem(uint32_t class_def_index);
+  bool CheckIntraMethodHandleItem();
+  bool CheckIntraTypeList();
+  // Check all fields of the given type, reading `encoded_field` entries from `ptr_`.
+  template <bool kStatic>
+  bool CheckIntraClassDataItemFields(size_t count);
+  // Check direct or virtual methods, reading `encoded_method` entries from `ptr_`.
+  // Check virtual methods against duplicates with direct methods.
+  bool CheckIntraClassDataItemMethods(size_t num_methods,
+                                      ClassAccessor::Method* direct_methods,
+                                      size_t num_direct_methods);
+  bool CheckIntraClassDataItem();
+
+  bool CheckIntraCodeItem();
+  bool CheckIntraStringDataItem();
+  bool CheckIntraDebugInfoItem();
+  bool CheckIntraAnnotationItem();
+  bool CheckIntraAnnotationsDirectoryItem();
+  bool CheckIntraHiddenapiClassData();
+
+  template <DexFile::MapItemType kType>
+  bool CheckIntraSectionIterate(size_t offset, uint32_t count);
+  template <DexFile::MapItemType kType>
+  bool CheckIntraIdSection(size_t offset, uint32_t count);
+  template <DexFile::MapItemType kType>
+  bool CheckIntraDataSection(size_t offset, uint32_t count);
+  bool CheckIntraSection();
+
+  bool CheckOffsetToTypeMap(size_t offset, uint16_t type);
+
+  // Returns kDexNoIndex if there are no fields/methods, otherwise a 16-bit type index.
+  uint32_t FindFirstClassDataDefiner(const ClassAccessor& accessor);
+  uint32_t FindFirstAnnotationsDirectoryDefiner(const uint8_t* ptr);
+
+  bool CheckInterStringIdItem();
+  bool CheckInterTypeIdItem();
+  bool CheckInterProtoIdItem();
+  bool CheckInterFieldIdItem();
+  bool CheckInterMethodIdItem();
+  bool CheckInterClassDefItem();
+  bool CheckInterCallSiteIdItem();
+  bool CheckInterAnnotationSetRefList();
+  bool CheckInterAnnotationSetItem();
+  bool CheckInterClassDataItem();
+  bool CheckInterAnnotationsDirectoryItem();
+
+  bool CheckInterSectionIterate(size_t offset, uint32_t count, DexFile::MapItemType type);
+  bool CheckInterSection();
+
+  void ErrorStringPrintf(const char* fmt, ...)
+      __attribute__((__format__(__printf__, 2, 3))) COLD_ATTR {
+    va_list ap;
+    va_start(ap, fmt);
+    DCHECK(failure_reason_.empty()) << failure_reason_;
+    failure_reason_ = StringPrintf("Failure to verify dex file '%s': ", location_);
+    StringAppendV(&failure_reason_, fmt, ap);
+    va_end(ap);
+  }
+  bool FailureReasonIsSet() const { return failure_reason_.size() != 0; }
+
+  // Check validity of the given access flags, interpreted for a field in the context of a class
+  // with the given second access flags.
+  bool CheckFieldAccessFlags(uint32_t idx,
+                             uint32_t field_access_flags,
+                             uint32_t class_access_flags,
+                             std::string* error_message);
+
+  // Check validity of the given method and access flags, in the context of a class with the given
+  // second access flags.
+  bool CheckMethodAccessFlags(uint32_t method_index,
+                              uint32_t method_access_flags,
+                              uint32_t class_access_flags,
+                              uint32_t constructor_flags_by_name,
+                              bool has_code,
+                              bool expect_direct,
+                              std::string* error_message);
+
+  // Check validity of given method if it's a constructor or class initializer.
+  bool CheckConstructorProperties(uint32_t method_index, uint32_t constructor_flags);
+
+  void FindStringRangesForMethodNames();
+
+  template <typename ExtraCheckFn>
+  bool VerifyTypeDescriptor(dex::TypeIndex idx, const char* error_msg, ExtraCheckFn extra_check);
+
+  const DexFile* const dex_file_;
+  const uint8_t* const begin_;
+  const size_t size_;
+  const char* const location_;
+  const bool verify_checksum_;
+  const DexFile::Header* const header_;
+
+  struct OffsetTypeMapEmptyFn {
+    // Make a hash map slot empty by making the offset 0. Offset 0 is a valid dex file offset that
+    // is in the offset of the dex file header. However, we only store data section items in the
+    // map, and these are after the header.
+    void MakeEmpty(std::pair<uint32_t, uint16_t>& pair) const {
+      pair.first = 0u;
+    }
+    // Check if a hash map slot is empty.
+    bool IsEmpty(const std::pair<uint32_t, uint16_t>& pair) const {
+      return pair.first == 0;
+    }
+  };
+  struct OffsetTypeMapHashCompareFn {
+    // Hash function for offset.
+    size_t operator()(const uint32_t key) const {
+      return key;
+    }
+    // std::equal function for offset.
+    bool operator()(const uint32_t a, const uint32_t b) const {
+      return a == b;
+    }
+  };
+  // Map from offset to dex file type, HashMap for performance reasons.
+  HashMap<uint32_t,
+          uint16_t,
+          OffsetTypeMapEmptyFn,
+          OffsetTypeMapHashCompareFn,
+          OffsetTypeMapHashCompareFn> offset_to_type_map_;
+  const uint8_t* ptr_;
+  const void* previous_item_;
+
+  std::string failure_reason_;
+
+  // Cached string indices for "interesting" entries wrt/ method names. Will be populated by
+  // FindStringRangesForMethodNames (which is automatically called before verifying the
+  // classdataitem section).
+  //
+  // Strings starting with '<' are in the range
+  //    [angle_bracket_start_index_,angle_bracket_end_index_).
+  // angle_init_angle_index_ and angle_clinit_angle_index_ denote the indices of "<init>" and
+  // "<clinit>", respectively. If any value is not found, the corresponding index will be larger
+  // than any valid string index for this dex file.
+  struct {
+    size_t angle_bracket_start_index;
+    size_t angle_bracket_end_index;
+    size_t angle_init_angle_index;
+    size_t angle_clinit_angle_index;
+  } init_indices_;
+
+  // A bitvector for verified type descriptors. Each bit corresponds to a type index. A set
+  // bit denotes that the descriptor has been verified wrt/ IsValidDescriptor.
+  std::vector<char> verified_type_descriptors_;
+
+  // Set of type ids for which there are ClassDef elements in the dex file. Using a bitset
+  // avoids all allocations. The bitset should be implemented as 8K of storage, which is
+  // tight enough for all callers.
+  std::bitset<kTypeIdLimit + 1> defined_classes_;
+
+  // Class definition indexes, valid only if corresponding `defined_classes_[.]` is true.
+  std::vector<uint16_t> defined_class_indexes_;
+};
+
+template <typename ExtraCheckFn>
+bool DexFileVerifier::VerifyTypeDescriptor(dex::TypeIndex idx,
+                                           const char* error_msg,
+                                           ExtraCheckFn extra_check) {
+  // All sources of the `idx` have already been checked in CheckIntraSection().
+  DCHECK_LT(idx.index_, header_->type_ids_size_);
+
+  auto err_fn = [&](const char* descriptor) {
+    ErrorStringPrintf("%s: '%s'", error_msg, descriptor);
+  };
+
+  char cached_char = verified_type_descriptors_[idx.index_];
+  if (cached_char != 0) {
+    if (!extra_check(cached_char)) {
+      const char* descriptor = dex_file_->StringByTypeIdx(idx);
+      err_fn(descriptor);
+      return false;
+    }
+    return true;
   }
 
-bool DexFileVerifier::Verify(const DexFile* dex_file,
-                             const uint8_t* begin,
-                             size_t size,
-                             const char* location,
-                             bool verify_checksum,
-                             std::string* error_msg) {
-  std::unique_ptr<DexFileVerifier> verifier(
-      new DexFileVerifier(dex_file, begin, size, location, verify_checksum));
-  if (!verifier->Verify()) {
-    *error_msg = verifier->FailureReason();
+  const char* descriptor = dex_file_->StringByTypeIdx(idx);
+  if (UNLIKELY(!IsValidDescriptor(descriptor))) {
+    err_fn(descriptor);
+    return false;
+  }
+  verified_type_descriptors_[idx.index_] = descriptor[0];
+
+  if (!extra_check(descriptor[0])) {
+    err_fn(descriptor);
     return false;
   }
   return true;
@@ -219,32 +514,24 @@
 
 bool DexFileVerifier::CheckListSize(const void* start, size_t count, size_t elem_size,
                                     const char* label) {
-  // Check that size is not 0.
-  CHECK_NE(elem_size, 0U);
+  // Check that element size is not 0.
+  DCHECK_NE(elem_size, 0U);
 
-  const uint8_t* range_start = reinterpret_cast<const uint8_t*>(start);
-  const uint8_t* file_start = reinterpret_cast<const uint8_t*>(begin_);
-
-  // Check for overflow.
-  uintptr_t max = 0 - 1;
-  size_t available_bytes_till_end_of_mem = max - reinterpret_cast<uintptr_t>(start);
-  size_t max_count = available_bytes_till_end_of_mem / elem_size;
-  if (max_count < count) {
-    ErrorStringPrintf("Overflow in range for %s: %zx for %zu@%zu", label,
-                      static_cast<size_t>(range_start - file_start),
-                      count, elem_size);
+  size_t offset = reinterpret_cast<const uint8_t*>(start) - begin_;
+  if (UNLIKELY(offset > size_)) {
+    ErrorStringPrintf("Offset beyond end of file for %s: %zx to %zx", label, offset, size_);
     return false;
   }
 
-  const uint8_t* range_end = range_start + count * elem_size;
-  const uint8_t* file_end = file_start + size_;
-  if (UNLIKELY((range_start < file_start) || (range_end > file_end))) {
-    // Note: these two tests are enough as we make sure above that there's no overflow.
-    ErrorStringPrintf("Bad range for %s: %zx to %zx", label,
-                      static_cast<size_t>(range_start - file_start),
-                      static_cast<size_t>(range_end - file_start));
+  // Calculate the number of elements that fit until the end of file,
+  // rather than calculating the end of the range as that could overflow.
+  size_t max_elements = (size_ - offset) / elem_size;
+  if (UNLIKELY(max_elements < count)) {
+    ErrorStringPrintf(
+        "List too large for %s: %zx+%zu*%zu > %zx", label, offset, count, elem_size, size_);
     return false;
   }
+
   return true;
 }
 
@@ -265,14 +552,6 @@
   return true;
 }
 
-bool DexFileVerifier::CheckIndex(uint32_t field, uint32_t limit, const char* label) {
-  if (UNLIKELY(field >= limit)) {
-    ErrorStringPrintf("Bad index for %s: %x >= %x", label, field, limit);
-    return false;
-  }
-  return true;
-}
-
 bool DexFileVerifier::CheckValidOffsetAndSize(uint32_t offset,
                                               uint32_t size,
                                               size_t alignment,
@@ -294,14 +573,6 @@
   return true;
 }
 
-bool DexFileVerifier::CheckSizeLimit(uint32_t size, uint32_t limit, const char* label) {
-  if (size > limit) {
-    ErrorStringPrintf("Size(%u) should not exceed limit(%u) for %s.", size, limit, label);
-    return false;
-  }
-  return true;
-}
-
 bool DexFileVerifier::CheckHeader() {
   // Check file size from the header.
   uint32_t expected_size = header_->file_size_;
@@ -494,17 +765,6 @@
   return true;
 }
 
-uint32_t DexFileVerifier::ReadUnsignedLittleEndian(uint32_t size) {
-  uint32_t result = 0;
-  if (LIKELY(CheckListSize(ptr_, size, sizeof(uint8_t), "encoded_value"))) {
-    for (uint32_t i = 0; i < size; i++) {
-      result |= ((uint32_t) *(ptr_++)) << (i * 8);
-    }
-  }
-  return result;
-}
-
-
 #define DECODE_UNSIGNED_CHECKED_FROM_WITH_ERROR_VALUE(ptr, var, error_value)  \
   uint32_t var;                                                               \
   if (!DecodeUnsignedLeb128Checked(&(ptr), begin_ + size_, &(var))) {         \
@@ -578,12 +838,9 @@
 bool DexFileVerifier::CheckClassDataItemField(uint32_t idx,
                                               uint32_t access_flags,
                                               uint32_t class_access_flags,
-                                              dex::TypeIndex class_type_index,
-                                              bool expect_static) {
-  // Check for overflow.
-  if (!CheckIndex(idx, header_->field_ids_size_, "class_data_item field_idx")) {
-    return false;
-  }
+                                              dex::TypeIndex class_type_index) {
+  // The `idx` has already been checked in `CheckIntraClassDataItemFields()`.
+  DCHECK_LE(idx, header_->field_ids_size_);
 
   // Check that it's the right class.
   dex::TypeIndex my_class_index =
@@ -595,13 +852,6 @@
     return false;
   }
 
-  // Check that it falls into the right class-data list.
-  bool is_static = (access_flags & kAccStatic) != 0;
-  if (UNLIKELY(is_static != expect_static)) {
-    ErrorStringPrintf("Static/instance field not in expected list");
-    return false;
-  }
-
   // Check field access flags.
   std::string error_msg;
   if (!CheckFieldAccessFlags(idx, access_flags, class_access_flags, &error_msg)) {
@@ -617,12 +867,9 @@
                                                uint32_t class_access_flags,
                                                dex::TypeIndex class_type_index,
                                                uint32_t code_offset,
-                                               ClassAccessor::Method* direct_method,
-                                               size_t* remaining_directs) {
-  // Check for overflow.
-  if (!CheckIndex(idx, header_->method_ids_size_, "class_data_item method_idx")) {
-    return false;
-  }
+                                               bool expect_direct) {
+  // The `idx` has already been checked in `CheckIntraClassDataItemMethods()`.
+  DCHECK_LT(idx, header_->method_ids_size_);
 
   const dex::MethodId& method_id =
       *(reinterpret_cast<const dex::MethodId*>(begin_ + header_->method_ids_off_) + idx);
@@ -636,29 +883,6 @@
     return false;
   }
 
-  // For virtual methods, we cross reference the method index to make sure it doesn't match any
-  // direct methods.
-  const bool expect_direct = direct_method == nullptr;
-  if (!expect_direct && *remaining_directs > 0) {
-    // The direct methods are already known to be in ascending index order. So just keep up
-    // with the current index.
-    while (true) {
-      const uint32_t direct_idx = direct_method->GetIndex();
-      if (direct_idx > idx) {
-        break;
-      }
-      if (direct_idx == idx) {
-        ErrorStringPrintf("Found virtual method with same index as direct method: %d", idx);
-        return false;
-      }
-      --*remaining_directs;
-      if (*remaining_directs == 0u) {
-        break;
-      }
-      direct_method->Read();
-    }
-  }
-
   std::string error_msg;
   uint32_t constructor_flags_by_name = 0;
   {
@@ -666,11 +890,11 @@
     if (!CheckIndex(string_idx, header_->string_ids_size_, "method flags verification")) {
       return false;
     }
-    if (UNLIKELY(string_idx < angle_bracket_end_index_) &&
-            string_idx >= angle_bracket_start_index_) {
-      if (string_idx == angle_clinit_angle_index_) {
+    if (UNLIKELY(string_idx < init_indices_.angle_bracket_end_index) &&
+            string_idx >= init_indices_.angle_bracket_start_index) {
+      if (string_idx == init_indices_.angle_clinit_angle_index) {
         constructor_flags_by_name = kAccStatic | kAccConstructor;
-      } else if (string_idx == angle_init_angle_index_) {
+      } else if (string_idx == init_indices_.angle_init_angle_index) {
         constructor_flags_by_name = kAccConstructor;
       } else {
         ErrorStringPrintf("Bad method name for method index %u", idx);
@@ -907,76 +1131,17 @@
   return true;
 }
 
-bool DexFileVerifier::FindClassIndexAndDef(uint32_t index,
-                                           bool is_field,
-                                           dex::TypeIndex* class_type_index,
-                                           const dex::ClassDef** output_class_def) {
-  DCHECK(class_type_index != nullptr);
-  DCHECK(output_class_def != nullptr);
-
-  // First check if the index is valid.
-  if (index >= (is_field ? header_->field_ids_size_ : header_->method_ids_size_)) {
-    return false;
-  }
-
-  // Next get the type index.
-  if (is_field) {
-    *class_type_index =
-        (reinterpret_cast<const dex::FieldId*>(begin_ + header_->field_ids_off_) + index)->
-            class_idx_;
-  } else {
-    *class_type_index =
-        (reinterpret_cast<const dex::MethodId*>(begin_ + header_->method_ids_off_) + index)->
-            class_idx_;
-  }
-
-  // Check if that is valid.
-  if (class_type_index->index_ >= header_->type_ids_size_) {
-    return false;
-  }
-
-  // Now search for the class def. This is basically a specialized version of the DexFile code, as
-  // we should not trust that this is a valid DexFile just yet.
-  const dex::ClassDef* class_def_begin =
-      reinterpret_cast<const dex::ClassDef*>(begin_ + header_->class_defs_off_);
-  for (size_t i = 0; i < header_->class_defs_size_; ++i) {
-    const dex::ClassDef* class_def = class_def_begin + i;
-    if (class_def->class_idx_ == *class_type_index) {
-      *output_class_def = class_def;
-      return true;
-    }
-  }
-
-  // Didn't find the class-def, not defined here...
-  return false;
-}
-
-bool DexFileVerifier::CheckOrder(const char* type_descr,
-                                 uint32_t curr_index,
-                                 uint32_t prev_index) {
-  if (UNLIKELY(curr_index < prev_index)) {
-    ErrorStringPrintf("out-of-order %s indexes %" PRIu32 " and %" PRIu32,
-                      type_descr,
-                      prev_index,
-                      curr_index);
-    return false;
-  }
-  return true;
-}
-
-bool DexFileVerifier::CheckStaticFieldTypes(const dex::ClassDef* class_def) {
-  if (class_def == nullptr) {
-    return true;
-  }
-
+bool DexFileVerifier::CheckStaticFieldTypes(const dex::ClassDef& class_def) {
   ClassAccessor accessor(*dex_file_, ptr_);
-  EncodedStaticFieldValueIterator array_it(*dex_file_, *class_def);
+  EncodedStaticFieldValueIterator array_it(*dex_file_, class_def);
 
   for (const ClassAccessor::Field& field : accessor.GetStaticFields()) {
     if (!array_it.HasNext()) {
       break;
     }
     uint32_t index = field.GetIndex();
+    // The `index` has already been checked in `CheckIntraClassDataItemFields()`.
+    DCHECK_LT(index, header_->field_ids_size_);
     const dex::TypeId& type_id = dex_file_->GetTypeId(dex_file_->GetFieldId(index).type_idx_);
     const char* field_type_name =
         dex_file_->GetStringData(dex_file_->GetStringId(type_id.descriptor_idx_));
@@ -1063,170 +1228,305 @@
   return true;
 }
 
-template <bool kStatic>
-bool DexFileVerifier::CheckIntraClassDataItemFields(size_t count,
-                                                    ClassAccessor::Field* field,
-                                                    bool* have_class,
-                                                    dex::TypeIndex* class_type_index,
-                                                    const dex::ClassDef** class_def) {
-  DCHECK(field != nullptr);
-  constexpr const char* kTypeDescr = kStatic ? "static field" : "instance field";
-
-  if (count == 0u) {
-    return true;
+bool DexFileVerifier::CheckIntraTypeIdItem() {
+  if (!CheckListSize(ptr_, 1, sizeof(dex::TypeId), "type_ids")) {
+    return false;
   }
-  field->Read();
 
-  if (!*have_class) {
-    *have_class = FindClassIndexAndDef(field->GetIndex(), true, class_type_index, class_def);
-    if (!*have_class) {
-      // Should have really found one.
-      ErrorStringPrintf("could not find declaring class for %s index %" PRIu32,
-                        kTypeDescr,
-                        field->GetIndex());
-      return false;
-    }
+  const dex::TypeId* type_id = reinterpret_cast<const dex::TypeId*>(ptr_);
+  if (!CheckIndex(type_id->descriptor_idx_.index_,
+                  header_->string_ids_size_,
+                  "type_id.descriptor")) {
+    return false;
   }
-  DCHECK(*class_def != nullptr);
 
-  uint32_t prev_index = 0;
-  for (size_t i = 0; ;) {
-    uint32_t curr_index = field->GetIndex();
-    // These calls use the raw access flags to check whether the whole dex field is valid.
-    if (!CheckOrder(kTypeDescr, curr_index, prev_index)) {
+  ptr_ += sizeof(dex::TypeId);
+  return true;
+}
+
+bool DexFileVerifier::CheckIntraProtoIdItem() {
+  if (!CheckListSize(ptr_, 1, sizeof(dex::ProtoId), "proto_ids")) {
+    return false;
+  }
+
+  const dex::ProtoId* proto_id = reinterpret_cast<const dex::ProtoId*>(ptr_);
+  if (!CheckIndex(proto_id->shorty_idx_.index_, header_->string_ids_size_, "proto_id.shorty") ||
+      !CheckIndex(proto_id->return_type_idx_.index_,
+                  header_->type_ids_size_,
+                  "proto_id.return_type")) {
+    return false;
+  }
+
+  ptr_ += sizeof(dex::ProtoId);
+  return true;
+}
+
+bool DexFileVerifier::CheckIntraFieldIdItem() {
+  if (!CheckListSize(ptr_, 1, sizeof(dex::FieldId), "field_ids")) {
+    return false;
+  }
+
+  const dex::FieldId* field_id = reinterpret_cast<const dex::FieldId*>(ptr_);
+  if (!CheckIndex(field_id->class_idx_.index_, header_->type_ids_size_, "field_id.class") ||
+      !CheckIndex(field_id->type_idx_.index_, header_->type_ids_size_, "field_id.type") ||
+      !CheckIndex(field_id->name_idx_.index_, header_->string_ids_size_, "field_id.name")) {
+    return false;
+  }
+
+  ptr_ += sizeof(dex::FieldId);
+  return true;
+}
+
+bool DexFileVerifier::CheckIntraMethodIdItem() {
+  if (!CheckListSize(ptr_, 1, sizeof(dex::MethodId), "method_ids")) {
+    return false;
+  }
+
+  const dex::MethodId* method_id = reinterpret_cast<const dex::MethodId*>(ptr_);
+  if (!CheckIndex(method_id->class_idx_.index_, header_->type_ids_size_, "method_id.class") ||
+      !CheckIndex(method_id->proto_idx_.index_, header_->proto_ids_size_, "method_id.proto") ||
+      !CheckIndex(method_id->name_idx_.index_, header_->string_ids_size_, "method_id.name")) {
+    return false;
+  }
+
+  ptr_ += sizeof(dex::MethodId);
+  return true;
+}
+
+bool DexFileVerifier::CheckIntraClassDefItem(uint32_t class_def_index) {
+  if (!CheckListSize(ptr_, 1, sizeof(dex::ClassDef), "class_defs")) {
+    return false;
+  }
+
+  const dex::ClassDef* class_def = reinterpret_cast<const dex::ClassDef*>(ptr_);
+  if (!CheckIndex(class_def->class_idx_.index_, header_->type_ids_size_, "class_def.class")) {
+    return false;
+  }
+
+  // Check superclass, if any.
+  if (UNLIKELY(class_def->pad2_ != 0u)) {
+    uint32_t combined =
+        (static_cast<uint32_t>(class_def->pad2_) << 16) + class_def->superclass_idx_.index_;
+    if (combined != 0xffffffffu) {
+      ErrorStringPrintf("Invalid superclass type padding/index: %x", combined);
       return false;
     }
-    if (!CheckClassDataItemField(curr_index,
-                                 field->GetAccessFlags(),
-                                 (*class_def)->access_flags_,
-                                 *class_type_index,
-                                 kStatic)) {
-      return false;
-    }
-    ++i;
-    if (i >= count) {
+  } else if (!CheckIndex(class_def->superclass_idx_.index_,
+                         header_->type_ids_size_,
+                         "class_def.superclass")) {
+    return false;
+  }
+
+  DCHECK_LE(class_def->class_idx_.index_, kTypeIdLimit);
+  DCHECK_LT(kTypeIdLimit, defined_classes_.size());
+  if (defined_classes_[class_def->class_idx_.index_]) {
+    ErrorStringPrintf("Redefinition of class with type idx: '%u'", class_def->class_idx_.index_);
+    return false;
+  }
+  defined_classes_[class_def->class_idx_.index_] = true;
+  DCHECK_LE(class_def->class_idx_.index_, defined_class_indexes_.size());
+  defined_class_indexes_[class_def->class_idx_.index_] = class_def_index;
+
+  ptr_ += sizeof(dex::ClassDef);
+  return true;
+}
+
+bool DexFileVerifier::CheckIntraMethodHandleItem() {
+  if (!CheckListSize(ptr_, 1, sizeof(dex::MethodHandleItem), "method_handles")) {
+    return false;
+  }
+
+  const dex::MethodHandleItem* item = reinterpret_cast<const dex::MethodHandleItem*>(ptr_);
+
+  DexFile::MethodHandleType method_handle_type =
+      static_cast<DexFile::MethodHandleType>(item->method_handle_type_);
+  if (method_handle_type > DexFile::MethodHandleType::kLast) {
+    ErrorStringPrintf("Bad method handle type %x", item->method_handle_type_);
+    return false;
+  }
+
+  uint32_t index = item->field_or_method_idx_;
+  switch (method_handle_type) {
+    case DexFile::MethodHandleType::kStaticPut:
+    case DexFile::MethodHandleType::kStaticGet:
+    case DexFile::MethodHandleType::kInstancePut:
+    case DexFile::MethodHandleType::kInstanceGet:
+      if (!CheckIndex(index, header_->field_ids_size_, "method_handle_item field_idx")) {
+        return false;
+      }
+      break;
+    case DexFile::MethodHandleType::kInvokeStatic:
+    case DexFile::MethodHandleType::kInvokeInstance:
+    case DexFile::MethodHandleType::kInvokeConstructor:
+    case DexFile::MethodHandleType::kInvokeDirect:
+    case DexFile::MethodHandleType::kInvokeInterface: {
+      if (!CheckIndex(index, header_->method_ids_size_, "method_handle_item method_idx")) {
+        return false;
+      }
       break;
     }
-    field->Read();
-    prev_index = curr_index;
+  }
+
+  ptr_ += sizeof(dex::MethodHandleItem);
+  return true;
+}
+
+bool DexFileVerifier::CheckIntraTypeList() {
+  const dex::TypeList* type_list = reinterpret_cast<const dex::TypeList*>(ptr_);
+  if (!CheckList(sizeof(dex::TypeItem), "type_list", &ptr_)) {
+    return false;
+  }
+
+  for (uint32_t i = 0, size = type_list->Size(); i != size; ++i) {
+    if (!CheckIndex(type_list->GetTypeItem(i).type_idx_.index_,
+                    header_->type_ids_size_,
+                    "type_list.type")) {
+      return false;
+    }
   }
 
   return true;
 }
 
-bool DexFileVerifier::CheckIntraClassDataItemMethods(ClassAccessor::Method* method,
-                                                     size_t num_methods,
-                                                     ClassAccessor::Method* direct_method,
-                                                     size_t num_directs,
-                                                     bool* have_class,
-                                                     dex::TypeIndex* class_type_index,
-                                                     const dex::ClassDef** class_def) {
-  DCHECK(method != nullptr);
-  const char* kTypeDescr = method->IsStaticOrDirect() ? "direct method" : "virtual method";
+template <bool kStatic>
+bool DexFileVerifier::CheckIntraClassDataItemFields(size_t count) {
+  constexpr const char* kTypeDescr = kStatic ? "static field" : "instance field";
 
-  if (num_methods == 0u) {
-    return true;
-  }
-  method->Read();
-
-  if (!*have_class) {
-    *have_class = FindClassIndexAndDef(method->GetIndex(), false, class_type_index, class_def);
-    if (!*have_class) {
-      // Should have really found one.
-      ErrorStringPrintf("could not find declaring class for %s index %" PRIu32,
-                        kTypeDescr,
-                        method->GetIndex());
-      return false;
-    }
-  }
-  DCHECK(*class_def != nullptr);
+  // We cannot use ClassAccessor::Field yet as it could read beyond the end of the data section.
+  const uint8_t* ptr = ptr_;
+  const uint8_t* data_end = begin_ + header_->data_off_ + header_->data_size_;
 
   uint32_t prev_index = 0;
-  for (size_t i = 0; ;) {
-    uint32_t curr_index = method->GetIndex();
+  for (size_t i = 0; i != count; ++i) {
+    uint32_t field_idx_diff, access_flags;
+    if (UNLIKELY(!DecodeUnsignedLeb128Checked(&ptr, data_end, &field_idx_diff)) ||
+        UNLIKELY(!DecodeUnsignedLeb128Checked(&ptr, data_end, &access_flags))) {
+      ErrorStringPrintf("encoded_field read out of bounds");
+      return false;
+    }
+    uint32_t curr_index = prev_index + field_idx_diff;
+    // Check for overflow.
+    if (!CheckIndex(curr_index, header_->field_ids_size_, "class_data_item field_idx")) {
+      return false;
+    }
     if (!CheckOrder(kTypeDescr, curr_index, prev_index)) {
       return false;
     }
-    if (!CheckClassDataItemMethod(curr_index,
-                                  method->GetAccessFlags(),
-                                  (*class_def)->access_flags_,
-                                  *class_type_index,
-                                  method->GetCodeItemOffset(),
-                                  direct_method,
-                                  &num_directs)) {
+    // Check that it falls into the right class-data list.
+    bool is_static = (access_flags & kAccStatic) != 0;
+    if (UNLIKELY(is_static != kStatic)) {
+      ErrorStringPrintf("Static/instance field not in expected list");
       return false;
     }
-    ++i;
-    if (i >= num_methods) {
-      break;
-    }
-    method->Read();
+
     prev_index = curr_index;
   }
 
+  ptr_ = ptr;
+  return true;
+}
+
+bool DexFileVerifier::CheckIntraClassDataItemMethods(size_t num_methods,
+                                                     ClassAccessor::Method* direct_methods,
+                                                     size_t num_direct_methods) {
+  DCHECK(num_direct_methods == 0u || direct_methods != nullptr);
+  const char* kTypeDescr = (direct_methods == nullptr) ? "direct method" : "virtual method";
+
+  // We cannot use ClassAccessor::Method yet as it could read beyond the end of the data section.
+  const uint8_t* ptr = ptr_;
+  const uint8_t* data_end = begin_ + header_->data_off_ + header_->data_size_;
+
+  // Load the first direct method for the check below.
+  size_t remaining_direct_methods = num_direct_methods;
+  if (remaining_direct_methods != 0u) {
+    DCHECK(direct_methods != nullptr);
+    direct_methods->Read();
+  }
+
+  uint32_t prev_index = 0;
+  for (size_t i = 0; i != num_methods; ++i) {
+    uint32_t method_idx_diff, access_flags, code_off;
+    if (UNLIKELY(!DecodeUnsignedLeb128Checked(&ptr, data_end, &method_idx_diff)) ||
+        UNLIKELY(!DecodeUnsignedLeb128Checked(&ptr, data_end, &access_flags)) ||
+        UNLIKELY(!DecodeUnsignedLeb128Checked(&ptr, data_end, &code_off))) {
+      ErrorStringPrintf("encoded_method read out of bounds");
+      return false;
+    }
+    uint32_t curr_index = prev_index + method_idx_diff;
+    // Check for overflow.
+    if (!CheckIndex(curr_index, header_->method_ids_size_, "class_data_item method_idx")) {
+      return false;
+    }
+    if (!CheckOrder(kTypeDescr, curr_index, prev_index)) {
+      return false;
+    }
+
+    // For virtual methods, we cross reference the method index to make sure
+    // it doesn't match any direct methods.
+    if (remaining_direct_methods != 0) {
+      // The direct methods are already known to be in ascending index order.
+      // So just keep up with the current index.
+      while (true) {
+        const uint32_t direct_idx = direct_methods->GetIndex();
+        if (direct_idx > curr_index) {
+          break;
+        }
+        if (direct_idx == curr_index) {
+          ErrorStringPrintf("Found virtual method with same index as direct method: %u",
+                            curr_index);
+          return false;
+        }
+        --remaining_direct_methods;
+        if (remaining_direct_methods == 0u) {
+          break;
+        }
+        direct_methods->Read();
+      }
+    }
+
+    prev_index = curr_index;
+  }
+
+  ptr_ = ptr;
   return true;
 }
 
 bool DexFileVerifier::CheckIntraClassDataItem() {
-  ClassAccessor accessor(*dex_file_, ptr_);
+  // We cannot use ClassAccessor yet as it could read beyond the end of the data section.
+  const uint8_t* ptr = ptr_;
+  const uint8_t* data_end = begin_ + header_->data_off_ + header_->data_size_;
 
-  // This code is complicated by the fact that we don't directly know which class this belongs to.
-  // So we need to explicitly search with the first item we find (either field or method), and then,
-  // as the lookup is expensive, cache the result.
-  bool have_class = false;
-  dex::TypeIndex class_type_index;
-  const dex::ClassDef* class_def = nullptr;
-
-  ClassAccessor::Field field(*dex_file_, accessor.ptr_pos_);
-  // Check fields.
-  if (!CheckIntraClassDataItemFields<true>(accessor.NumStaticFields(),
-                                           &field,
-                                           &have_class,
-                                           &class_type_index,
-                                           &class_def)) {
+  uint32_t static_fields_size, instance_fields_size, direct_methods_size, virtual_methods_size;
+  if (UNLIKELY(!DecodeUnsignedLeb128Checked(&ptr, data_end, &static_fields_size)) ||
+      UNLIKELY(!DecodeUnsignedLeb128Checked(&ptr, data_end, &instance_fields_size)) ||
+      UNLIKELY(!DecodeUnsignedLeb128Checked(&ptr, data_end, &direct_methods_size)) ||
+      UNLIKELY(!DecodeUnsignedLeb128Checked(&ptr, data_end, &virtual_methods_size))) {
+    ErrorStringPrintf("class_data_item read out of bounds");
     return false;
   }
-  field.NextSection();
-  if (!CheckIntraClassDataItemFields<false>(accessor.NumInstanceFields(),
-                                            &field,
-                                            &have_class,
-                                            &class_type_index,
-                                            &class_def)) {
+  ptr_ = ptr;
+
+  // Check fields.
+  if (!CheckIntraClassDataItemFields</*kStatic=*/ true>(static_fields_size)) {
+    return false;
+  }
+  if (!CheckIntraClassDataItemFields</*kStatic=*/ false>(instance_fields_size)) {
     return false;
   }
 
   // Check methods.
-  ClassAccessor::Method method(*dex_file_, field.ptr_pos_);
-  if (!CheckIntraClassDataItemMethods(&method,
-                                      accessor.NumDirectMethods(),
-                                      /* direct_method= */ nullptr,
-                                      0u,
-                                      &have_class,
-                                      &class_type_index,
-                                      &class_def)) {
+  const uint8_t* direct_methods_ptr = ptr_;
+  if (!CheckIntraClassDataItemMethods(direct_methods_size,
+                                      /*direct_methods=*/ nullptr,
+                                      /*num_direct_methods=*/ 0u)) {
     return false;
   }
-  ClassAccessor::Method direct_methods(*dex_file_, field.ptr_pos_);
-  method.NextSection();
-  if (accessor.NumDirectMethods() != 0u) {
-    direct_methods.Read();
-  }
-  if (!CheckIntraClassDataItemMethods(&method,
-                                      accessor.NumVirtualMethods(),
-                                      &direct_methods,
-                                      accessor.NumDirectMethods(),
-                                      &have_class,
-                                      &class_type_index,
-                                      &class_def)) {
+  // Direct methods have been checked, so we can now use ClassAccessor::Method to read them again.
+  ClassAccessor::Method direct_methods(*dex_file_, direct_methods_ptr);
+  if (!CheckIntraClassDataItemMethods(virtual_methods_size, &direct_methods, direct_methods_size)) {
     return false;
   }
 
-  // Check static field types against initial static values in encoded array.
-  if (!CheckStaticFieldTypes(class_def)) {
-    return false;
-  }
-
-  ptr_ = method.ptr_pos_;
   return true;
 }
 
@@ -1676,6 +1976,9 @@
 
   uint32_t last_idx = 0;
   for (uint32_t i = 0; i < field_count; i++) {
+    if (!CheckIndex(field_item->field_idx_, header_->field_ids_size_, "field annotation")) {
+      return false;
+    }
     if (UNLIKELY(last_idx >= field_item->field_idx_ && i != 0)) {
       ErrorStringPrintf("Out-of-order field_idx for annotation: %x then %x",
                         last_idx, field_item->field_idx_);
@@ -1698,6 +2001,9 @@
 
   last_idx = 0;
   for (uint32_t i = 0; i < method_count; i++) {
+    if (!CheckIndex(method_item->method_idx_, header_->method_ids_size_, "method annotation")) {
+      return false;
+    }
     if (UNLIKELY(last_idx >= method_item->method_idx_ && i != 0)) {
       ErrorStringPrintf("Out-of-order method_idx for annotation: %x then %x",
                        last_idx, method_item->method_idx_);
@@ -1718,6 +2024,11 @@
 
   last_idx = 0;
   for (uint32_t i = 0; i < parameter_count; i++) {
+    if (!CheckIndex(parameter_item->method_idx_,
+                    header_->method_ids_size_,
+                    "parameter annotation method")) {
+      return false;
+    }
     if (UNLIKELY(last_idx >= parameter_item->method_idx_ && i != 0)) {
       ErrorStringPrintf("Out-of-order method_idx for annotation: %x then %x",
                         last_idx, parameter_item->method_idx_);
@@ -1769,38 +2080,33 @@
         break;
       }
       case DexFile::kDexTypeTypeIdItem: {
-        if (!CheckListSize(ptr_, 1, sizeof(dex::TypeId), "type_ids")) {
+        if (!CheckIntraTypeIdItem()) {
           return false;
         }
-        ptr_ += sizeof(dex::TypeId);
         break;
       }
       case DexFile::kDexTypeProtoIdItem: {
-        if (!CheckListSize(ptr_, 1, sizeof(dex::ProtoId), "proto_ids")) {
+        if (!CheckIntraProtoIdItem()) {
           return false;
         }
-        ptr_ += sizeof(dex::ProtoId);
         break;
       }
       case DexFile::kDexTypeFieldIdItem: {
-        if (!CheckListSize(ptr_, 1, sizeof(dex::FieldId), "field_ids")) {
+        if (!CheckIntraFieldIdItem()) {
           return false;
         }
-        ptr_ += sizeof(dex::FieldId);
         break;
       }
       case DexFile::kDexTypeMethodIdItem: {
-        if (!CheckListSize(ptr_, 1, sizeof(dex::MethodId), "method_ids")) {
+        if (!CheckIntraMethodIdItem()) {
           return false;
         }
-        ptr_ += sizeof(dex::MethodId);
         break;
       }
       case DexFile::kDexTypeClassDefItem: {
-        if (!CheckListSize(ptr_, 1, sizeof(dex::ClassDef), "class_defs")) {
+        if (!CheckIntraClassDefItem(/*class_def_index=*/ i)) {
           return false;
         }
-        ptr_ += sizeof(dex::ClassDef);
         break;
       }
       case DexFile::kDexTypeCallSiteIdItem: {
@@ -1811,14 +2117,13 @@
         break;
       }
       case DexFile::kDexTypeMethodHandleItem: {
-        if (!CheckListSize(ptr_, 1, sizeof(dex::MethodHandleItem), "method_handles")) {
+        if (!CheckIntraMethodHandleItem()) {
           return false;
         }
-        ptr_ += sizeof(dex::MethodHandleItem);
         break;
       }
       case DexFile::kDexTypeTypeList: {
-        if (!CheckList(sizeof(dex::TypeItem), "type_list", &ptr_)) {
+        if (!CheckIntraTypeList()) {
           return false;
         }
         break;
@@ -1978,6 +2283,8 @@
     return false;
   }
 
+  // FIXME: Doing this check late means we may have already read memory outside the
+  // data section and potentially outside the file, thus risking a segmentation fault.
   size_t next_offset = ptr_ - begin_;
   if (next_offset > data_end) {
     ErrorStringPrintf("Out-of-bounds end of data subsection: %zu data_off=%u data_size=%u",
@@ -1997,6 +2304,13 @@
   uint32_t count = map->size_;
   ptr_ = begin_;
 
+  // Preallocate offset map to avoid some allocations. We can only guess from the list items,
+  // not derived things.
+  offset_to_type_map_.reserve(
+      std::min(header_->class_defs_size_, 65535u) +
+      std::min(header_->string_ids_size_, 65535u) +
+      2 * std::min(header_->method_ids_size_, 65535u));
+
   // Check the items listed in the map.
   for (; count != 0u; --count) {
     const size_t current_offset = offset;
@@ -2060,10 +2374,12 @@
         offset = section_offset + sizeof(uint32_t) + (map->size_ * sizeof(dex::MapItem));
         break;
 
-#define CHECK_INTRA_SECTION_ITERATE_CASE(type)                              \
-      case type:                                                            \
-        CheckIntraSectionIterate<type>(section_offset, section_count);      \
-        offset = ptr_ - begin_;                                             \
+#define CHECK_INTRA_SECTION_ITERATE_CASE(type)                                 \
+      case type:                                                               \
+        if (!CheckIntraSectionIterate<type>(section_offset, section_count)) {  \
+          return false;                                                        \
+        }                                                                      \
+        offset = ptr_ - begin_;                                                \
         break;
       CHECK_INTRA_SECTION_ITERATE_CASE(DexFile::kDexTypeMethodHandleItem)
       CHECK_INTRA_SECTION_ITERATE_CASE(DexFile::kDexTypeCallSiteIdItem)
@@ -2116,66 +2432,57 @@
   return true;
 }
 
-dex::TypeIndex DexFileVerifier::FindFirstClassDataDefiner(const uint8_t* ptr, bool* success) {
-  ClassAccessor accessor(*dex_file_, ptr);
-  *success = true;
-
+uint32_t DexFileVerifier::FindFirstClassDataDefiner(const ClassAccessor& accessor) {
+  // The data item and field/method indexes have already been checked in
+  // `CheckIntraClassDataItem()` or its helper functions.
   if (accessor.NumFields() != 0) {
     ClassAccessor::Field read_field(*dex_file_, accessor.ptr_pos_);
     read_field.Read();
-    LOAD_FIELD(field, read_field.GetIndex(), "first_class_data_definer field_id",
-               *success = false; return dex::TypeIndex(DexFile::kDexNoIndex16))
-    return field->class_idx_;
+    DCHECK_LE(read_field.GetIndex(), dex_file_->NumFieldIds());
+    return dex_file_->GetFieldId(read_field.GetIndex()).class_idx_.index_;
   }
 
   if (accessor.NumMethods() != 0) {
     ClassAccessor::Method read_method(*dex_file_, accessor.ptr_pos_);
     read_method.Read();
-    LOAD_METHOD(method, read_method.GetIndex(), "first_class_data_definer method_id",
-                *success = false; return dex::TypeIndex(DexFile::kDexNoIndex16))
-    return method->class_idx_;
+    DCHECK_LE(read_method.GetIndex(), dex_file_->NumMethodIds());
+    return dex_file_->GetMethodId(read_method.GetIndex()).class_idx_.index_;
   }
 
-  return dex::TypeIndex(DexFile::kDexNoIndex16);
+  return kDexNoIndex;
 }
 
-dex::TypeIndex DexFileVerifier::FindFirstAnnotationsDirectoryDefiner(const uint8_t* ptr,
-                                                                     bool* success) {
+uint32_t DexFileVerifier::FindFirstAnnotationsDirectoryDefiner(const uint8_t* ptr) {
+  // The annotations directory and field/method indexes have already been checked in
+  // `CheckIntraAnnotationsDirectoryItem()`.
   const dex::AnnotationsDirectoryItem* item =
       reinterpret_cast<const dex::AnnotationsDirectoryItem*>(ptr);
-  *success = true;
 
   if (item->fields_size_ != 0) {
     dex::FieldAnnotationsItem* field_items = (dex::FieldAnnotationsItem*) (item + 1);
-    LOAD_FIELD(field, field_items[0].field_idx_, "first_annotations_dir_definer field_id",
-               *success = false; return dex::TypeIndex(DexFile::kDexNoIndex16))
-    return field->class_idx_;
+    DCHECK_LE(field_items[0].field_idx_, dex_file_->NumFieldIds());
+    return dex_file_->GetFieldId(field_items[0].field_idx_).class_idx_.index_;
   }
 
   if (item->methods_size_ != 0) {
     dex::MethodAnnotationsItem* method_items = (dex::MethodAnnotationsItem*) (item + 1);
-    LOAD_METHOD(method, method_items[0].method_idx_, "first_annotations_dir_definer method id",
-                *success = false; return dex::TypeIndex(DexFile::kDexNoIndex16))
-    return method->class_idx_;
+    DCHECK_LE(method_items[0].method_idx_, dex_file_->NumMethodIds());
+    return dex_file_->GetMethodId(method_items[0].method_idx_).class_idx_.index_;
   }
 
   if (item->parameters_size_ != 0) {
     dex::ParameterAnnotationsItem* parameter_items = (dex::ParameterAnnotationsItem*) (item + 1);
-    LOAD_METHOD(method, parameter_items[0].method_idx_, "first_annotations_dir_definer method id",
-                *success = false; return dex::TypeIndex(DexFile::kDexNoIndex16))
-    return method->class_idx_;
+    DCHECK_LE(parameter_items[0].method_idx_, dex_file_->NumMethodIds());
+    return dex_file_->GetMethodId(parameter_items[0].method_idx_).class_idx_.index_;
   }
 
-  return dex::TypeIndex(DexFile::kDexNoIndex16);
+  return kDexNoIndex;
 }
 
 bool DexFileVerifier::CheckInterStringIdItem() {
   const dex::StringId* item = reinterpret_cast<const dex::StringId*>(ptr_);
 
-  // Check the map to make sure it has the right offset->type.
-  if (!CheckOffsetToTypeMap(item->string_data_off_, DexFile::kDexTypeStringDataItem)) {
-    return false;
-  }
+  // Note: The mapping to string data items is eagerly verified at the start of CheckInterSection().
 
   // Check ordering between items.
   if (previous_item_ != nullptr) {
@@ -2195,12 +2502,17 @@
 bool DexFileVerifier::CheckInterTypeIdItem() {
   const dex::TypeId* item = reinterpret_cast<const dex::TypeId*>(ptr_);
 
-  LOAD_STRING(descriptor, item->descriptor_idx_, "inter_type_id_item descriptor_idx")
-
-  // Check that the descriptor is a valid type.
-  if (UNLIKELY(!IsValidDescriptor(descriptor))) {
-    ErrorStringPrintf("Invalid type descriptor: '%s'", descriptor);
-    return false;
+  {
+    // Translate to index to potentially use cache.
+    // The check in `CheckIntraIdSection()` guarantees that this index is valid.
+    size_t index = item - reinterpret_cast<const dex::TypeId*>(begin_ + header_->type_ids_off_);
+    DCHECK_LE(index, header_->type_ids_size_);
+    if (UNLIKELY(!VerifyTypeDescriptor(
+        dex::TypeIndex(static_cast<decltype(dex::TypeIndex::index_)>(index)),
+        "Invalid type descriptor",
+        [](char) { return true; }))) {
+      return false;
+    }
   }
 
   // Check ordering between items.
@@ -2221,7 +2533,7 @@
 bool DexFileVerifier::CheckInterProtoIdItem() {
   const dex::ProtoId* item = reinterpret_cast<const dex::ProtoId*>(ptr_);
 
-  LOAD_STRING(shorty, item->shorty_idx_, "inter_proto_id_item shorty_idx")
+  const char* shorty = dex_file_->StringDataByIdx(item->shorty_idx_);
 
   if (item->parameters_off_ != 0 &&
       !CheckOffsetToTypeMap(item->parameters_off_, DexFile::kDexTypeTypeList)) {
@@ -2235,7 +2547,7 @@
     return false;
   }
   // Check the return type and advance the shorty.
-  LOAD_STRING_BY_TYPE(return_type, item->return_type_idx_, "inter_proto_id_item return_type_idx")
+  const char* return_type = dex_file_->StringByTypeIdx(item->return_type_idx_);
   if (!CheckShortyDescriptorMatch(*shorty, return_type, true)) {
     return false;
   }
@@ -2303,23 +2615,23 @@
   const dex::FieldId* item = reinterpret_cast<const dex::FieldId*>(ptr_);
 
   // Check that the class descriptor is valid.
-  LOAD_STRING_BY_TYPE(class_descriptor, item->class_idx_, "inter_field_id_item class_idx")
-  if (UNLIKELY(!IsValidDescriptor(class_descriptor) || class_descriptor[0] != 'L')) {
-    ErrorStringPrintf("Invalid descriptor for class_idx: '%s'", class_descriptor);
+  if (UNLIKELY(!VerifyTypeDescriptor(item->class_idx_,
+                                     "Invalid descriptor for class_idx",
+                                     [](char d) { return d == 'L'; }))) {
     return false;
   }
 
   // Check that the type descriptor is a valid field name.
-  LOAD_STRING_BY_TYPE(type_descriptor, item->type_idx_, "inter_field_id_item type_idx")
-  if (UNLIKELY(!IsValidDescriptor(type_descriptor) || type_descriptor[0] == 'V')) {
-    ErrorStringPrintf("Invalid descriptor for type_idx: '%s'", type_descriptor);
+  if (UNLIKELY(!VerifyTypeDescriptor(item->type_idx_,
+                                     "Invalid descriptor for type_idx",
+                                     [](char d) { return d != 'V'; }))) {
     return false;
   }
 
   // Check that the name is valid.
-  LOAD_STRING(descriptor, item->name_idx_, "inter_field_id_item name_idx")
-  if (UNLIKELY(!IsValidMemberName(descriptor))) {
-    ErrorStringPrintf("Invalid field name: '%s'", descriptor);
+  const char* field_name = dex_file_->StringDataByIdx(item->name_idx_);
+  if (UNLIKELY(!IsValidMemberName(field_name))) {
+    ErrorStringPrintf("Invalid field name: '%s'", field_name);
     return false;
   }
 
@@ -2350,17 +2662,16 @@
   const dex::MethodId* item = reinterpret_cast<const dex::MethodId*>(ptr_);
 
   // Check that the class descriptor is a valid reference name.
-  LOAD_STRING_BY_TYPE(class_descriptor, item->class_idx_, "inter_method_id_item class_idx")
-  if (UNLIKELY(!IsValidDescriptor(class_descriptor) || (class_descriptor[0] != 'L' &&
-                                                        class_descriptor[0] != '['))) {
-    ErrorStringPrintf("Invalid descriptor for class_idx: '%s'", class_descriptor);
+  if (UNLIKELY(!VerifyTypeDescriptor(item->class_idx_,
+                                     "Invalid descriptor for class_idx",
+                                     [](char d) { return d == 'L' || d == '['; }))) {
     return false;
   }
 
   // Check that the name is valid.
-  LOAD_STRING(descriptor, item->name_idx_, "inter_method_id_item name_idx")
-  if (UNLIKELY(!IsValidMemberName(descriptor))) {
-    ErrorStringPrintf("Invalid method name: '%s'", descriptor);
+  const char* method_name = dex_file_->StringDataByIdx(item->name_idx_);
+  if (UNLIKELY(!IsValidMemberName(method_name))) {
+    ErrorStringPrintf("Invalid method name: '%s'", method_name);
     return false;
   }
 
@@ -2409,15 +2720,10 @@
     return false;
   }
   // Check for duplicate class def.
-  if (defined_classes_.find(item->class_idx_) != defined_classes_.end()) {
-    ErrorStringPrintf("Redefinition of class with type idx: '%d'", item->class_idx_.index_);
-    return false;
-  }
-  defined_classes_.insert(item->class_idx_);
 
-  LOAD_STRING_BY_TYPE(class_descriptor, item->class_idx_, "inter_class_def_item class_idx")
-  if (UNLIKELY(!IsValidDescriptor(class_descriptor) || class_descriptor[0] != 'L')) {
-    ErrorStringPrintf("Invalid class descriptor: '%s'", class_descriptor);
+  if (UNLIKELY(!VerifyTypeDescriptor(item->class_idx_,
+                                     "Invalid class descriptor",
+                                     [](char d) { return d == 'L'; }))) {
     return false;
   }
 
@@ -2471,10 +2777,9 @@
       }
     }
 
-    LOAD_STRING_BY_TYPE(superclass_descriptor, item->superclass_idx_,
-                        "inter_class_def_item superclass_idx")
-    if (UNLIKELY(!IsValidDescriptor(superclass_descriptor) || superclass_descriptor[0] != 'L')) {
-      ErrorStringPrintf("Invalid superclass: '%s'", superclass_descriptor);
+    if (UNLIKELY(!VerifyTypeDescriptor(item->superclass_idx_,
+                                       "Invalid superclass",
+                                       [](char d) { return d == 'L'; }))) {
       return false;
     }
   }
@@ -2512,10 +2817,9 @@
       }
 
       // Ensure that the interface refers to a class (not an array nor a primitive type).
-      LOAD_STRING_BY_TYPE(inf_descriptor, interfaces->GetTypeItem(i).type_idx_,
-                          "inter_class_def_item interface type_idx")
-      if (UNLIKELY(!IsValidDescriptor(inf_descriptor) || inf_descriptor[0] != 'L')) {
-        ErrorStringPrintf("Invalid interface: '%s'", inf_descriptor);
+      if (UNLIKELY(!VerifyTypeDescriptor(interfaces->GetTypeItem(i).type_idx_,
+                                         "Invalid interface",
+                                         [](char d) { return d == 'L'; }))) {
         return false;
       }
     }
@@ -2538,14 +2842,10 @@
 
   // Check that references in class_data_item are to the right class.
   if (item->class_data_off_ != 0) {
-    const uint8_t* data = begin_ + item->class_data_off_;
-    bool success;
-    dex::TypeIndex data_definer = FindFirstClassDataDefiner(data, &success);
-    if (!success) {
-      return false;
-    }
-    if (UNLIKELY((data_definer != item->class_idx_) &&
-                 (data_definer != dex::TypeIndex(DexFile::kDexNoIndex16)))) {
+    ClassAccessor accessor(*dex_file_, begin_ + item->class_data_off_);
+    uint32_t data_definer = FindFirstClassDataDefiner(accessor);
+    DCHECK(IsUint<16>(data_definer) || data_definer == kDexNoIndex) << data_definer;
+    if (UNLIKELY((data_definer != item->class_idx_.index_) && (data_definer != kDexNoIndex))) {
       ErrorStringPrintf("Invalid class_data_item");
       return false;
     }
@@ -2559,13 +2859,9 @@
       return false;
     }
     const uint8_t* data = begin_ + item->annotations_off_;
-    bool success;
-    dex::TypeIndex annotations_definer = FindFirstAnnotationsDirectoryDefiner(data, &success);
-    if (!success) {
-      return false;
-    }
-    if (UNLIKELY((annotations_definer != item->class_idx_) &&
-                 (annotations_definer != dex::TypeIndex(DexFile::kDexNoIndex16)))) {
+    uint32_t defining_class = FindFirstAnnotationsDirectoryDefiner(data);
+    DCHECK(IsUint<16>(defining_class) || defining_class == kDexNoIndex) << defining_class;
+    if (UNLIKELY((defining_class != item->class_idx_.index_) && (defining_class != kDexNoIndex))) {
       ErrorStringPrintf("Invalid annotations_directory_item");
       return false;
     }
@@ -2630,39 +2926,6 @@
   return true;
 }
 
-bool DexFileVerifier::CheckInterMethodHandleItem() {
-  const dex::MethodHandleItem* item = reinterpret_cast<const dex::MethodHandleItem*>(ptr_);
-
-  DexFile::MethodHandleType method_handle_type =
-      static_cast<DexFile::MethodHandleType>(item->method_handle_type_);
-  if (method_handle_type > DexFile::MethodHandleType::kLast) {
-    ErrorStringPrintf("Bad method handle type %x", item->method_handle_type_);
-    return false;
-  }
-
-  uint32_t index = item->field_or_method_idx_;
-  switch (method_handle_type) {
-    case DexFile::MethodHandleType::kStaticPut:
-    case DexFile::MethodHandleType::kStaticGet:
-    case DexFile::MethodHandleType::kInstancePut:
-    case DexFile::MethodHandleType::kInstanceGet: {
-      LOAD_FIELD(field, index, "method_handle_item field_idx", return false);
-      break;
-    }
-    case DexFile::MethodHandleType::kInvokeStatic:
-    case DexFile::MethodHandleType::kInvokeInstance:
-    case DexFile::MethodHandleType::kInvokeConstructor:
-    case DexFile::MethodHandleType::kInvokeDirect:
-    case DexFile::MethodHandleType::kInvokeInterface: {
-      LOAD_METHOD(method, index, "method_handle_item method_idx", return false);
-      break;
-    }
-  }
-
-  ptr_ += sizeof(dex::MethodHandleItem);
-  return true;
-}
-
 bool DexFileVerifier::CheckInterAnnotationSetRefList() {
   const dex::AnnotationSetRefList* list = reinterpret_cast<const dex::AnnotationSetRefList*>(ptr_);
   const dex::AnnotationSetRefItem* item = list->list_;
@@ -2712,31 +2975,64 @@
 
 bool DexFileVerifier::CheckInterClassDataItem() {
   ClassAccessor accessor(*dex_file_, ptr_);
-  bool success;
-  dex::TypeIndex defining_class = FindFirstClassDataDefiner(ptr_, &success);
-  if (!success) {
-    return false;
+  uint32_t defining_class = FindFirstClassDataDefiner(accessor);
+  DCHECK(IsUint<16>(defining_class) || defining_class == kDexNoIndex) << defining_class;
+  if (defining_class == kDexNoIndex) {
+    return true;  // Empty definitions are OK (but useless) and could be shared by multiple classes.
   }
+  if (!defined_classes_[defining_class]) {
+      // Should really have a class definition for this class data item.
+      ErrorStringPrintf("Could not find declaring class for non-empty class data item.");
+      return false;
+  }
+  const dex::TypeIndex class_type_index(defining_class);
+  const dex::ClassDef& class_def = dex_file_->GetClassDef(defined_class_indexes_[defining_class]);
 
   for (const ClassAccessor::Field& read_field : accessor.GetFields()) {
-    LOAD_FIELD(field, read_field.GetIndex(), "inter_class_data_item field_id", return false)
-    if (UNLIKELY(field->class_idx_ != defining_class)) {
+    // The index has already been checked in `CheckIntraClassDataItemFields()`.
+    DCHECK_LE(read_field.GetIndex(), header_->field_ids_size_);
+    const dex::FieldId& field = dex_file_->GetFieldId(read_field.GetIndex());
+    if (UNLIKELY(field.class_idx_ != class_type_index)) {
       ErrorStringPrintf("Mismatched defining class for class_data_item field");
       return false;
     }
+    if (!CheckClassDataItemField(read_field.GetIndex(),
+                                 read_field.GetAccessFlags(),
+                                 class_def.access_flags_,
+                                 class_type_index)) {
+      return false;
+    }
   }
+  size_t num_direct_methods = accessor.NumDirectMethods();
+  size_t num_processed_methods = 0u;
   auto methods = accessor.GetMethods();
   auto it = methods.begin();
-  for (; it != methods.end(); ++it) {
+  for (; it != methods.end(); ++it, ++num_processed_methods) {
     uint32_t code_off = it->GetCodeItemOffset();
     if (code_off != 0 && !CheckOffsetToTypeMap(code_off, DexFile::kDexTypeCodeItem)) {
       return false;
     }
-    LOAD_METHOD(method, it->GetIndex(), "inter_class_data_item method_id", return false)
-    if (UNLIKELY(method->class_idx_ != defining_class)) {
+    // The index has already been checked in `CheckIntraClassDataItemMethods()`.
+    DCHECK_LE(it->GetIndex(), header_->method_ids_size_);
+    const dex::MethodId& method = dex_file_->GetMethodId(it->GetIndex());
+    if (UNLIKELY(method.class_idx_ != class_type_index)) {
       ErrorStringPrintf("Mismatched defining class for class_data_item method");
       return false;
     }
+    bool expect_direct = num_processed_methods < num_direct_methods;
+    if (!CheckClassDataItemMethod(it->GetIndex(),
+                                  it->GetAccessFlags(),
+                                  class_def.access_flags_,
+                                  class_type_index,
+                                  it->GetCodeItemOffset(),
+                                  expect_direct)) {
+      return false;
+    }
+  }
+
+  // Check static field types against initial static values in encoded array.
+  if (!CheckStaticFieldTypes(class_def)) {
+    return false;
   }
 
   ptr_ = it.GetDataPointer();
@@ -2746,11 +3042,8 @@
 bool DexFileVerifier::CheckInterAnnotationsDirectoryItem() {
   const dex::AnnotationsDirectoryItem* item =
       reinterpret_cast<const dex::AnnotationsDirectoryItem*>(ptr_);
-  bool success;
-  dex::TypeIndex defining_class = FindFirstAnnotationsDirectoryDefiner(ptr_, &success);
-  if (!success) {
-    return false;
-  }
+  uint32_t defining_class = FindFirstAnnotationsDirectoryDefiner(ptr_);
+  DCHECK(IsUint<16>(defining_class) || defining_class == kDexNoIndex) << defining_class;
 
   if (item->class_annotations_off_ != 0 &&
       !CheckOffsetToTypeMap(item->class_annotations_off_, DexFile::kDexTypeAnnotationSetItem)) {
@@ -2762,9 +3055,10 @@
       reinterpret_cast<const dex::FieldAnnotationsItem*>(item + 1);
   uint32_t field_count = item->fields_size_;
   for (uint32_t i = 0; i < field_count; i++) {
-    LOAD_FIELD(field, field_item->field_idx_, "inter_annotations_directory_item field_id",
-               return false)
-    if (UNLIKELY(field->class_idx_ != defining_class)) {
+    // The index has already been checked in `CheckIntraAnnotationsDirectoryItem()`.
+    DCHECK_LE(field_item->field_idx_, header_->field_ids_size_);
+    const dex::FieldId& field = dex_file_->GetFieldId(field_item->field_idx_);
+    if (UNLIKELY(field.class_idx_.index_ != defining_class)) {
       ErrorStringPrintf("Mismatched defining class for field_annotation");
       return false;
     }
@@ -2779,9 +3073,10 @@
       reinterpret_cast<const dex::MethodAnnotationsItem*>(field_item);
   uint32_t method_count = item->methods_size_;
   for (uint32_t i = 0; i < method_count; i++) {
-    LOAD_METHOD(method, method_item->method_idx_, "inter_annotations_directory_item method_id",
-                return false)
-    if (UNLIKELY(method->class_idx_ != defining_class)) {
+    // The index has already been checked in `CheckIntraAnnotationsDirectoryItem()`.
+    DCHECK_LE(method_item->method_idx_, header_->method_ids_size_);
+    const dex::MethodId& method = dex_file_->GetMethodId(method_item->method_idx_);
+    if (UNLIKELY(method.class_idx_.index_ != defining_class)) {
       ErrorStringPrintf("Mismatched defining class for method_annotation");
       return false;
     }
@@ -2796,9 +3091,10 @@
       reinterpret_cast<const dex::ParameterAnnotationsItem*>(method_item);
   uint32_t parameter_count = item->parameters_size_;
   for (uint32_t i = 0; i < parameter_count; i++) {
-    LOAD_METHOD(parameter_method, parameter_item->method_idx_,
-                "inter_annotations_directory_item parameter method_id", return false)
-    if (UNLIKELY(parameter_method->class_idx_ != defining_class)) {
+    // The index has already been checked in `CheckIntraAnnotationsDirectoryItem()`.
+    DCHECK_LE(parameter_item->method_idx_, header_->method_ids_size_);
+    const dex::MethodId& parameter_method = dex_file_->GetMethodId(parameter_item->method_idx_);
+    if (UNLIKELY(parameter_method.class_idx_.index_ != defining_class)) {
       ErrorStringPrintf("Mismatched defining class for parameter_annotation");
       return false;
     }
@@ -2842,6 +3138,7 @@
     // Check depending on the section type.
     switch (type) {
       case DexFile::kDexTypeHeaderItem:
+      case DexFile::kDexTypeMethodHandleItem:
       case DexFile::kDexTypeMapList:
       case DexFile::kDexTypeTypeList:
       case DexFile::kDexTypeCodeItem:
@@ -2883,13 +3180,9 @@
       }
       case DexFile::kDexTypeClassDefItem: {
         // There shouldn't be more class definitions than type ids allow.
-        // This check should be redundant, since there are checks that the
-        // class_idx_ is within range and that there is only one definition
-        // for a given type id.
-        if (i > kTypeIdLimit) {
-          ErrorStringPrintf("Too many class definition items");
-          return false;
-        }
+        // This is checked in `CheckIntraClassDefItem()` by checking the type
+        // index against `kTypeIdLimit` and rejecting dulicate definitions.
+        DCHECK_LE(i, kTypeIdLimit);
         if (!CheckInterClassDefItem()) {
           return false;
         }
@@ -2901,12 +3194,6 @@
         }
         break;
       }
-      case DexFile::kDexTypeMethodHandleItem: {
-        if (!CheckInterMethodHandleItem()) {
-          return false;
-        }
-        break;
-      }
       case DexFile::kDexTypeAnnotationSetRefList: {
         if (!CheckInterAnnotationSetRefList()) {
           return false;
@@ -2949,6 +3236,18 @@
 }
 
 bool DexFileVerifier::CheckInterSection() {
+  // Eagerly verify that `StringId` offsets map to string data items to make sure
+  // we can retrieve the string data for verifying other items (types, shorties, etc.).
+  // After this we can safely use `DexFile` helpers such as `GetFieldId()` or `GetMethodId()`
+  // but not `PrettyMethod()` or `PrettyField()` as descriptors have not been verified yet.
+  const dex::StringId* string_ids =
+      reinterpret_cast<const dex::StringId*>(begin_ + header_->string_ids_off_);
+  for (size_t i = 0, num_strings = header_->string_ids_size_; i != num_strings; ++i) {
+    if (!CheckOffsetToTypeMap(string_ids[i].string_data_off_, DexFile::kDexTypeStringDataItem)) {
+      return false;
+    }
+  }
+
   const dex::MapList* map = reinterpret_cast<const dex::MapList*>(begin_ + header_->map_off_);
   const dex::MapItem* item = map->list_;
   uint32_t count = map->size_;
@@ -3014,6 +3313,10 @@
     return false;
   }
 
+  DCHECK_LE(header_->type_ids_size_, kTypeIdLimit + 1u);  // Checked in CheckHeader().
+  verified_type_descriptors_.resize(header_->type_ids_size_, 0);
+  defined_class_indexes_.resize(header_->type_ids_size_);
+
   // Check structure within remaining sections.
   if (!CheckIntraSection()) {
     return false;
@@ -3027,102 +3330,6 @@
   return true;
 }
 
-void DexFileVerifier::ErrorStringPrintf(const char* fmt, ...) {
-  va_list ap;
-  va_start(ap, fmt);
-  DCHECK(failure_reason_.empty()) << failure_reason_;
-  failure_reason_ = StringPrintf("Failure to verify dex file '%s': ", location_);
-  StringAppendV(&failure_reason_, fmt, ap);
-  va_end(ap);
-}
-
-// Fields and methods may have only one of public/protected/private.
-ALWAYS_INLINE
-static constexpr bool CheckAtMostOneOfPublicProtectedPrivate(uint32_t flags) {
-  // Semantically we want 'return POPCOUNT(flags & kAcc) <= 1;'.
-  static_assert(IsPowerOfTwo(0), "0 not marked as power of two");
-  static_assert(IsPowerOfTwo(kAccPublic), "kAccPublic not marked as power of two");
-  static_assert(IsPowerOfTwo(kAccProtected), "kAccProtected not marked as power of two");
-  static_assert(IsPowerOfTwo(kAccPrivate), "kAccPrivate not marked as power of two");
-  return IsPowerOfTwo(flags & (kAccPublic | kAccProtected | kAccPrivate));
-}
-
-// Helper functions to retrieve names from the dex file. We do not want to rely on DexFile
-// functionality, as we're still verifying the dex file. begin and header correspond to the
-// underscored variants in the DexFileVerifier.
-
-static std::string GetStringOrError(const uint8_t* const begin,
-                                    const DexFile::Header* const header,
-                                    dex::StringIndex string_idx) {
-  // The `string_idx` is not guaranteed to be valid yet.
-  if (header->string_ids_size_ <= string_idx.index_) {
-    return "(error)";
-  }
-
-  const dex::StringId* string_id =
-      reinterpret_cast<const dex::StringId*>(begin + header->string_ids_off_) + string_idx.index_;
-
-  // Assume that the data is OK at this point. String data has been checked at this point.
-
-  const uint8_t* ptr = begin + string_id->string_data_off_;
-  uint32_t dummy;
-  if (!DecodeUnsignedLeb128Checked(&ptr, begin + header->file_size_, &dummy)) {
-    return "(error)";
-  }
-  return reinterpret_cast<const char*>(ptr);
-}
-
-static std::string GetClassOrError(const uint8_t* const begin,
-                                   const DexFile::Header* const header,
-                                   dex::TypeIndex class_idx) {
-  // The `class_idx` is either `FieldId::class_idx_` or `MethodId::class_idx_` and
-  // it has already been checked in `DexFileVerifier::CheckClassDataItemField()`
-  // or `DexFileVerifier::CheckClassDataItemMethod()`, respectively, to match
-  // a valid defining class.
-  CHECK_LT(class_idx.index_, header->type_ids_size_);
-
-  const dex::TypeId* type_id =
-      reinterpret_cast<const dex::TypeId*>(begin + header->type_ids_off_) + class_idx.index_;
-
-  // Assume that the data is OK at this point. Type id offsets have been checked at this point.
-
-  return GetStringOrError(begin, header, type_id->descriptor_idx_);
-}
-
-static std::string GetFieldDescriptionOrError(const uint8_t* const begin,
-                                              const DexFile::Header* const header,
-                                              uint32_t idx) {
-  // The `idx` has already been checked in `DexFileVerifier::CheckClassDataItemField()`.
-  CHECK_LT(idx, header->field_ids_size_);
-
-  const dex::FieldId* field_id =
-      reinterpret_cast<const dex::FieldId*>(begin + header->field_ids_off_) + idx;
-
-  // Assume that the data is OK at this point. Field id offsets have been checked at this point.
-
-  std::string class_name = GetClassOrError(begin, header, field_id->class_idx_);
-  std::string field_name = GetStringOrError(begin, header, field_id->name_idx_);
-
-  return class_name + "." + field_name;
-}
-
-static std::string GetMethodDescriptionOrError(const uint8_t* const begin,
-                                               const DexFile::Header* const header,
-                                               uint32_t idx) {
-  // The `idx` has already been checked in `DexFileVerifier::CheckClassDataItemMethod()`.
-  CHECK_LT(idx, header->method_ids_size_);
-
-  const dex::MethodId* method_id =
-      reinterpret_cast<const dex::MethodId*>(begin + header->method_ids_off_) + idx;
-
-  // Assume that the data is OK at this point. Method id offsets have been checked at this point.
-
-  std::string class_name = GetClassOrError(begin, header, method_id->class_idx_);
-  std::string method_name = GetStringOrError(begin, header, method_id->name_idx_);
-
-  return class_name + "." + method_name;
-}
-
 bool DexFileVerifier::CheckFieldAccessFlags(uint32_t idx,
                                             uint32_t field_access_flags,
                                             uint32_t class_access_flags,
@@ -3130,7 +3337,7 @@
   // Generally sort out >16-bit flags.
   if ((field_access_flags & ~kAccJavaFlagsMask) != 0) {
     *error_msg = StringPrintf("Bad field access_flags for %s: %x(%s)",
-                              GetFieldDescriptionOrError(begin_, header_, idx).c_str(),
+                              GetFieldDescription(begin_, header_, idx).c_str(),
                               field_access_flags,
                               PrettyJavaAccessFlags(field_access_flags).c_str());
     return false;
@@ -3150,7 +3357,7 @@
   // Fields may have only one of public/protected/final.
   if (!CheckAtMostOneOfPublicProtectedPrivate(field_access_flags)) {
     *error_msg = StringPrintf("Field may have only one of public/protected/private, %s: %x(%s)",
-                              GetFieldDescriptionOrError(begin_, header_, idx).c_str(),
+                              GetFieldDescription(begin_, header_, idx).c_str(),
                               field_access_flags,
                               PrettyJavaAccessFlags(field_access_flags).c_str());
     return false;
@@ -3162,7 +3369,7 @@
     constexpr uint32_t kPublicFinalStatic = kAccPublic | kAccFinal | kAccStatic;
     if ((field_access_flags & kPublicFinalStatic) != kPublicFinalStatic) {
       *error_msg = StringPrintf("Interface field is not public final static, %s: %x(%s)",
-                                GetFieldDescriptionOrError(begin_, header_, idx).c_str(),
+                                GetFieldDescription(begin_, header_, idx).c_str(),
                                 field_access_flags,
                                 PrettyJavaAccessFlags(field_access_flags).c_str());
       if (dex_file_->SupportsDefaultMethods()) {
@@ -3177,7 +3384,7 @@
     constexpr uint32_t kDisallowed = ~(kPublicFinalStatic | kAccSynthetic);
     if ((field_access_flags & kFieldAccessFlags & kDisallowed) != 0) {
       *error_msg = StringPrintf("Interface field has disallowed flag, %s: %x(%s)",
-                                GetFieldDescriptionOrError(begin_, header_, idx).c_str(),
+                                GetFieldDescription(begin_, header_, idx).c_str(),
                                 field_access_flags,
                                 PrettyJavaAccessFlags(field_access_flags).c_str());
       if (dex_file_->SupportsDefaultMethods()) {
@@ -3195,7 +3402,7 @@
   constexpr uint32_t kVolatileFinal = kAccVolatile | kAccFinal;
   if ((field_access_flags & kVolatileFinal) == kVolatileFinal) {
     *error_msg = StringPrintf("Fields may not be volatile and final: %s",
-                              GetFieldDescriptionOrError(begin_, header_, idx).c_str());
+                              GetFieldDescription(begin_, header_, idx).c_str());
     return false;
   }
 
@@ -3220,14 +3427,14 @@
   // '=' follows '<'
   static_assert('<' + 1 == '=', "Unexpected character relation");
   const auto angle_end = std::lower_bound(first, last, "=", compare);
-  angle_bracket_end_index_ = angle_end - first;
+  init_indices_.angle_bracket_end_index = angle_end - first;
 
   const auto angle_start = std::lower_bound(first, angle_end, "<", compare);
-  angle_bracket_start_index_ = angle_start - first;
+  init_indices_.angle_bracket_start_index = angle_start - first;
   if (angle_start == angle_end) {
     // No strings starting with '<'.
-    angle_init_angle_index_ = std::numeric_limits<size_t>::max();
-    angle_clinit_angle_index_ = std::numeric_limits<size_t>::max();
+    init_indices_.angle_init_angle_index = std::numeric_limits<size_t>::max();
+    init_indices_.angle_clinit_angle_index = std::numeric_limits<size_t>::max();
     return;
   }
 
@@ -3235,18 +3442,18 @@
     constexpr const char* kClinit = "<clinit>";
     const auto it = std::lower_bound(angle_start, angle_end, kClinit, compare);
     if (it != angle_end && strcmp(get_string(*it), kClinit) == 0) {
-      angle_clinit_angle_index_ = it - first;
+      init_indices_.angle_clinit_angle_index = it - first;
     } else {
-      angle_clinit_angle_index_ = std::numeric_limits<size_t>::max();
+      init_indices_.angle_clinit_angle_index = std::numeric_limits<size_t>::max();
     }
   }
   {
     constexpr const char* kInit = "<init>";
     const auto it = std::lower_bound(angle_start, angle_end, kInit, compare);
     if (it != angle_end && strcmp(get_string(*it), kInit) == 0) {
-      angle_init_angle_index_ = it - first;
+      init_indices_.angle_init_angle_index = it - first;
     } else {
-      angle_init_angle_index_ = std::numeric_limits<size_t>::max();
+      init_indices_.angle_init_angle_index = std::numeric_limits<size_t>::max();
     }
   }
 }
@@ -3263,7 +3470,7 @@
       kAccJavaFlagsMask | kAccConstructor | kAccDeclaredSynchronized;
   if ((method_access_flags & ~kAllMethodFlags) != 0) {
     *error_msg = StringPrintf("Bad method access_flags for %s: %x",
-                              GetMethodDescriptionOrError(begin_, header_, method_index).c_str(),
+                              GetMethodDescription(begin_, header_, method_index).c_str(),
                               method_access_flags);
     return false;
   }
@@ -3285,7 +3492,7 @@
   // Methods may have only one of public/protected/final.
   if (!CheckAtMostOneOfPublicProtectedPrivate(method_access_flags)) {
     *error_msg = StringPrintf("Method may have only one of public/protected/private, %s: %x",
-                              GetMethodDescriptionOrError(begin_, header_, method_index).c_str(),
+                              GetMethodDescription(begin_, header_, method_index).c_str(),
                               method_access_flags);
     return false;
   }
@@ -3300,7 +3507,7 @@
     *error_msg =
         StringPrintf("Method %" PRIu32 "(%s) is marked constructor, but doesn't match name",
                       method_index,
-                      GetMethodDescriptionOrError(begin_, header_, method_index).c_str());
+                      GetMethodDescription(begin_, header_, method_index).c_str());
     return false;
   }
 
@@ -3311,7 +3518,7 @@
     if (is_static ^ is_clinit_by_name) {
       *error_msg = StringPrintf("Constructor %" PRIu32 "(%s) is not flagged correctly wrt/ static.",
                                 method_index,
-                                GetMethodDescriptionOrError(begin_, header_, method_index).c_str());
+                                GetMethodDescription(begin_, header_, method_index).c_str());
       if (dex_file_->SupportsDefaultMethods()) {
         return false;
       } else {
@@ -3329,7 +3536,7 @@
   if (is_direct != expect_direct) {
     *error_msg = StringPrintf("Direct/virtual method %" PRIu32 "(%s) not in expected list %d",
                               method_index,
-                              GetMethodDescriptionOrError(begin_, header_, method_index).c_str(),
+                              GetMethodDescription(begin_, header_, method_index).c_str(),
                               expect_direct);
     return false;
   }
@@ -3346,8 +3553,8 @@
     }
     if ((method_access_flags & desired_flags) == 0) {
       *error_msg = StringPrintf("Interface virtual method %" PRIu32 "(%s) is not public",
-          method_index,
-          GetMethodDescriptionOrError(begin_, header_, method_index).c_str());
+                                method_index,
+                                GetMethodDescription(begin_, header_, method_index).c_str());
       if (dex_file_->SupportsDefaultMethods()) {
         return false;
       } else {
@@ -3365,14 +3572,14 @@
       *error_msg = StringPrintf("Method %" PRIu32 "(%s) has no code, but is not marked native or "
                                 "abstract",
                                 method_index,
-                                GetMethodDescriptionOrError(begin_, header_, method_index).c_str());
+                                GetMethodDescription(begin_, header_, method_index).c_str());
       return false;
     }
     // Constructors must always have code.
     if (is_constructor_by_name) {
       *error_msg = StringPrintf("Constructor %u(%s) must not be abstract or native",
                                 method_index,
-                                GetMethodDescriptionOrError(begin_, header_, method_index).c_str());
+                                GetMethodDescription(begin_, header_, method_index).c_str());
       if (dex_file_->SupportsDefaultMethods()) {
         return false;
       } else {
@@ -3387,14 +3594,14 @@
           kAccPrivate | kAccStatic | kAccFinal | kAccNative | kAccStrict | kAccSynchronized;
       if ((method_access_flags & kForbidden) != 0) {
         *error_msg = StringPrintf("Abstract method %" PRIu32 "(%s) has disallowed access flags %x",
-            method_index,
-            GetMethodDescriptionOrError(begin_, header_, method_index).c_str(),
-            method_access_flags);
+                                  method_index,
+                                  GetMethodDescription(begin_, header_, method_index).c_str(),
+                                  method_access_flags);
         return false;
       }
       // Abstract methods should be in an abstract class or interface.
       if ((class_access_flags & (kAccInterface | kAccAbstract)) == 0) {
-        LOG(WARNING) << "Method " << GetMethodDescriptionOrError(begin_, header_, method_index)
+        LOG(WARNING) << "Method " << GetMethodDescription(begin_, header_, method_index)
                      << " is abstract, but the declaring class is neither abstract nor an "
                      << "interface in dex file "
                      << dex_file_->GetLocation();
@@ -3405,8 +3612,8 @@
       // Interface methods without code must be abstract.
       if ((method_access_flags & (kAccPublic | kAccAbstract)) != (kAccPublic | kAccAbstract)) {
         *error_msg = StringPrintf("Interface method %" PRIu32 "(%s) is not public and abstract",
-            method_index,
-            GetMethodDescriptionOrError(begin_, header_, method_index).c_str());
+                                  method_index,
+                                  GetMethodDescription(begin_, header_, method_index).c_str());
         if (dex_file_->SupportsDefaultMethods()) {
           return false;
         } else {
@@ -3426,7 +3633,7 @@
   if ((method_access_flags & (kAccNative | kAccAbstract)) != 0) {
     *error_msg = StringPrintf("Method %" PRIu32 "(%s) has code, but is marked native or abstract",
                               method_index,
-                              GetMethodDescriptionOrError(begin_, header_, method_index).c_str());
+                              GetMethodDescription(begin_, header_, method_index).c_str());
     return false;
   }
 
@@ -3437,7 +3644,7 @@
     if ((method_access_flags & ~kInitAllowed) != 0) {
       *error_msg = StringPrintf("Constructor %" PRIu32 "(%s) flagged inappropriately %x",
                                 method_index,
-                                GetMethodDescriptionOrError(begin_, header_, method_index).c_str(),
+                                GetMethodDescription(begin_, header_, method_index).c_str(),
                                 method_access_flags);
       return false;
     }
@@ -3453,24 +3660,14 @@
          constructor_flags == (kAccConstructor | kAccStatic));
 
   // Check signature matches expectations.
-  const dex::MethodId* const method_id = CheckLoadMethodId(method_index,
-                                                           "Bad <init>/<clinit> method id");
-  if (method_id == nullptr) {
-    return false;
-  }
+  // The `method_index` has already been checked in `CheckIntraClassDataItemMethods()`.
+  CHECK_LT(method_index, header_->method_ids_size_);
+  const dex::MethodId& method_id = dex_file_->GetMethodId(method_index);
 
-  // Check the ProtoId for the corresponding method.
-  //
-  // TODO(oth): the error message here is to satisfy the MethodId test
-  // in the DexFileVerifierTest. The test is checking that the error
-  // contains this string if the index is out of range.
-  const dex::ProtoId* const proto_id = CheckLoadProtoId(method_id->proto_idx_,
-                                                        "inter_method_id_item proto_idx");
-  if (proto_id == nullptr) {
-    return false;
-  }
+  // The `method_id.proto_idx_` has already been checked in `CheckIntraMethodIdItem()`
+  DCHECK_LE(method_id.proto_idx_.index_, header_->proto_ids_size_);
 
-  Signature signature = dex_file_->GetMethodSignature(*method_id);
+  Signature signature = dex_file_->GetMethodSignature(method_id);
   if (constructor_flags == (kAccStatic | kAccConstructor)) {
     if (!signature.IsVoid() || signature.GetNumberOfParameters() != 0) {
       ErrorStringPrintf("<clinit> must have descriptor ()V");
@@ -3479,11 +3676,27 @@
   } else if (!signature.IsVoid()) {
     ErrorStringPrintf("Constructor %u(%s) must be void",
                       method_index,
-                      GetMethodDescriptionOrError(begin_, header_, method_index).c_str());
+                      GetMethodDescription(begin_, header_, method_index).c_str());
     return false;
   }
 
   return true;
 }
 
+bool Verify(const DexFile* dex_file,
+            const uint8_t* begin,
+            size_t size,
+            const char* location,
+            bool verify_checksum,
+            std::string* error_msg) {
+  std::unique_ptr<DexFileVerifier> verifier(
+      new DexFileVerifier(dex_file, begin, size, location, verify_checksum));
+  if (!verifier->Verify()) {
+    *error_msg = verifier->FailureReason();
+    return false;
+  }
+  return true;
+}
+
+}  // namespace dex
 }  // namespace art
diff --git a/libdexfile/dex/dex_file_verifier.h b/libdexfile/dex/dex_file_verifier.h
index b51a417..8ae6e7a 100644
--- a/libdexfile/dex/dex_file_verifier.h
+++ b/libdexfile/dex/dex_file_verifier.h
@@ -17,253 +17,24 @@
 #ifndef ART_LIBDEXFILE_DEX_DEX_FILE_VERIFIER_H_
 #define ART_LIBDEXFILE_DEX_DEX_FILE_VERIFIER_H_
 
-#include <limits>
-#include <unordered_set>
+#include <string>
 
-#include "base/hash_map.h"
-#include "base/safe_map.h"
-#include "class_accessor.h"
-#include "dex_file.h"
-#include "dex_file_types.h"
+#include <inttypes.h>
 
 namespace art {
 
-class DexFileVerifier {
- public:
-  static bool Verify(const DexFile* dex_file,
-                     const uint8_t* begin,
-                     size_t size,
-                     const char* location,
-                     bool verify_checksum,
-                     std::string* error_msg);
+class DexFile;
 
-  const std::string& FailureReason() const {
-    return failure_reason_;
-  }
+namespace dex {
 
- private:
-  DexFileVerifier(const DexFile* dex_file,
-                  const uint8_t* begin,
-                  size_t size,
-                  const char* location,
-                  bool verify_checksum)
-      : dex_file_(dex_file),
-        begin_(begin),
-        size_(size),
-        location_(location),
-        verify_checksum_(verify_checksum),
-        header_(&dex_file->GetHeader()),
-        ptr_(nullptr),
-        previous_item_(nullptr),
-        angle_bracket_start_index_(std::numeric_limits<size_t>::max()),
-        angle_bracket_end_index_(std::numeric_limits<size_t>::max()),
-        angle_init_angle_index_(std::numeric_limits<size_t>::max()),
-        angle_clinit_angle_index_(std::numeric_limits<size_t>::max()) {
-  }
+bool Verify(const DexFile* dex_file,
+            const uint8_t* begin,
+            size_t size,
+            const char* location,
+            bool verify_checksum,
+            std::string* error_msg);
 
-  bool Verify();
-
-  bool CheckShortyDescriptorMatch(char shorty_char, const char* descriptor, bool is_return_type);
-  bool CheckListSize(const void* start, size_t count, size_t element_size, const char* label);
-  // Check a list. The head is assumed to be at *ptr, and elements to be of size element_size. If
-  // successful, the ptr will be moved forward the amount covered by the list.
-  bool CheckList(size_t element_size, const char* label, const uint8_t* *ptr);
-  // Checks whether the offset is zero (when size is zero) or that the offset falls within the area
-  // claimed by the file.
-  bool CheckValidOffsetAndSize(uint32_t offset, uint32_t size, size_t alignment, const char* label);
-  // Checks whether the size is less than the limit.
-  bool CheckSizeLimit(uint32_t size, uint32_t limit, const char* label);
-  bool CheckIndex(uint32_t field, uint32_t limit, const char* label);
-
-  bool CheckHeader();
-  bool CheckMap();
-
-  uint32_t ReadUnsignedLittleEndian(uint32_t size);
-  bool CheckAndGetHandlerOffsets(const dex::CodeItem* code_item,
-                                 uint32_t* handler_offsets, uint32_t handlers_size);
-  bool CheckClassDataItemField(uint32_t idx,
-                               uint32_t access_flags,
-                               uint32_t class_access_flags,
-                               dex::TypeIndex class_type_index,
-                               bool expect_static);
-  bool CheckClassDataItemMethod(uint32_t idx,
-                                uint32_t access_flags,
-                                uint32_t class_access_flags,
-                                dex::TypeIndex class_type_index,
-                                uint32_t code_offset,
-                                ClassAccessor::Method* direct_method,
-                                size_t* remaining_directs);
-  ALWAYS_INLINE
-  bool CheckOrder(const char* type_descr, uint32_t curr_index, uint32_t prev_index);
-  bool CheckStaticFieldTypes(const dex::ClassDef* class_def);
-
-  bool CheckPadding(size_t offset, uint32_t aligned_offset, DexFile::MapItemType type);
-  bool CheckEncodedValue();
-  bool CheckEncodedArray();
-  bool CheckEncodedAnnotation();
-
-  bool CheckIntraClassDataItem();
-  // Check all fields of the given type from the given iterator. Load the class data from the first
-  // field, if necessary (and return it), or use the given values.
-  template <bool kStatic>
-  bool CheckIntraClassDataItemFields(size_t count,
-                                     ClassAccessor::Field* field,
-                                     bool* have_class,
-                                     dex::TypeIndex* class_type_index,
-                                     const dex::ClassDef** class_def);
-  // Check all methods of the given type from the given iterator. Load the class data from the first
-  // method, if necessary (and return it), or use the given values.
-  bool CheckIntraClassDataItemMethods(ClassAccessor::Method* method,
-                                      size_t num_methods,
-                                      ClassAccessor::Method* direct_method,
-                                      size_t num_directs,
-                                      bool* have_class,
-                                      dex::TypeIndex* class_type_index,
-                                      const dex::ClassDef** class_def);
-
-  bool CheckIntraCodeItem();
-  bool CheckIntraStringDataItem();
-  bool CheckIntraDebugInfoItem();
-  bool CheckIntraAnnotationItem();
-  bool CheckIntraAnnotationsDirectoryItem();
-  bool CheckIntraHiddenapiClassData();
-
-  template <DexFile::MapItemType kType>
-  bool CheckIntraSectionIterate(size_t offset, uint32_t count);
-  template <DexFile::MapItemType kType>
-  bool CheckIntraIdSection(size_t offset, uint32_t count);
-  template <DexFile::MapItemType kType>
-  bool CheckIntraDataSection(size_t offset, uint32_t count);
-  bool CheckIntraSection();
-
-  bool CheckOffsetToTypeMap(size_t offset, uint16_t type);
-
-  // Note: as sometimes kDexNoIndex16, being 0xFFFF, is a valid return value, we need an
-  // additional out parameter to signal any errors loading an index.
-  dex::TypeIndex FindFirstClassDataDefiner(const uint8_t* ptr, bool* success);
-  dex::TypeIndex FindFirstAnnotationsDirectoryDefiner(const uint8_t* ptr, bool* success);
-
-  bool CheckInterStringIdItem();
-  bool CheckInterTypeIdItem();
-  bool CheckInterProtoIdItem();
-  bool CheckInterFieldIdItem();
-  bool CheckInterMethodIdItem();
-  bool CheckInterClassDefItem();
-  bool CheckInterCallSiteIdItem();
-  bool CheckInterMethodHandleItem();
-  bool CheckInterAnnotationSetRefList();
-  bool CheckInterAnnotationSetItem();
-  bool CheckInterClassDataItem();
-  bool CheckInterAnnotationsDirectoryItem();
-
-  bool CheckInterSectionIterate(size_t offset, uint32_t count, DexFile::MapItemType type);
-  bool CheckInterSection();
-
-  // Load a string by (type) index. Checks whether the index is in bounds, printing the error if
-  // not. If there is an error, null is returned.
-  const char* CheckLoadStringByIdx(dex::StringIndex idx, const char* error_fmt);
-  const char* CheckLoadStringByTypeIdx(dex::TypeIndex type_idx, const char* error_fmt);
-
-  // Load a field/method/proto Id by index. Checks whether the index is in bounds, printing the
-  // error if not. If there is an error, null is returned.
-  const dex::FieldId* CheckLoadFieldId(uint32_t idx, const char* error_fmt);
-  const dex::MethodId* CheckLoadMethodId(uint32_t idx, const char* error_fmt);
-  const dex::ProtoId* CheckLoadProtoId(dex::ProtoIndex idx, const char* error_fmt);
-
-  void ErrorStringPrintf(const char* fmt, ...)
-      __attribute__((__format__(__printf__, 2, 3))) COLD_ATTR;
-  bool FailureReasonIsSet() const { return failure_reason_.size() != 0; }
-
-  // Retrieve class index and class def from the given member. index is the member index, which is
-  // taken as either a field or a method index (as designated by is_field). The result, if the
-  // member and declaring class could be found, is stored in class_type_index and class_def.
-  // This is an expensive lookup, as we have to find the class def by type index, which is a
-  // linear search. The output values should thus be cached by the caller.
-  bool FindClassIndexAndDef(uint32_t index,
-                            bool is_field,
-                            dex::TypeIndex* class_type_index,
-                            const dex::ClassDef** output_class_def);
-
-  // Check validity of the given access flags, interpreted for a field in the context of a class
-  // with the given second access flags.
-  bool CheckFieldAccessFlags(uint32_t idx,
-                             uint32_t field_access_flags,
-                             uint32_t class_access_flags,
-                             std::string* error_message);
-
-  // Check validity of the given method and access flags, in the context of a class with the given
-  // second access flags.
-  bool CheckMethodAccessFlags(uint32_t method_index,
-                              uint32_t method_access_flags,
-                              uint32_t class_access_flags,
-                              uint32_t constructor_flags_by_name,
-                              bool has_code,
-                              bool expect_direct,
-                              std::string* error_message);
-
-  // Check validity of given method if it's a constructor or class initializer.
-  bool CheckConstructorProperties(uint32_t method_index, uint32_t constructor_flags);
-
-  void FindStringRangesForMethodNames();
-
-  const DexFile* const dex_file_;
-  const uint8_t* const begin_;
-  const size_t size_;
-  const char* const location_;
-  const bool verify_checksum_;
-  const DexFile::Header* const header_;
-
-  struct OffsetTypeMapEmptyFn {
-    // Make a hash map slot empty by making the offset 0. Offset 0 is a valid dex file offset that
-    // is in the offset of the dex file header. However, we only store data section items in the
-    // map, and these are after the header.
-    void MakeEmpty(std::pair<uint32_t, uint16_t>& pair) const {
-      pair.first = 0u;
-    }
-    // Check if a hash map slot is empty.
-    bool IsEmpty(const std::pair<uint32_t, uint16_t>& pair) const {
-      return pair.first == 0;
-    }
-  };
-  struct OffsetTypeMapHashCompareFn {
-    // Hash function for offset.
-    size_t operator()(const uint32_t key) const {
-      return key;
-    }
-    // std::equal function for offset.
-    bool operator()(const uint32_t a, const uint32_t b) const {
-      return a == b;
-    }
-  };
-  // Map from offset to dex file type, HashMap for performance reasons.
-  HashMap<uint32_t,
-          uint16_t,
-          OffsetTypeMapEmptyFn,
-          OffsetTypeMapHashCompareFn,
-          OffsetTypeMapHashCompareFn> offset_to_type_map_;
-  const uint8_t* ptr_;
-  const void* previous_item_;
-
-  std::string failure_reason_;
-
-  // Set of type ids for which there are ClassDef elements in the dex file.
-  std::unordered_set<decltype(dex::ClassDef::class_idx_)> defined_classes_;
-
-  // Cached string indices for "interesting" entries wrt/ method names. Will be populated by
-  // FindStringRangesForMethodNames (which is automatically called before verifying the
-  // classdataitem section).
-  //
-  // Strings starting with '<' are in the range
-  //    [angle_bracket_start_index_,angle_bracket_end_index_).
-  // angle_init_angle_index_ and angle_clinit_angle_index_ denote the indices of "<init>" and
-  // angle_clinit_angle_index_, respectively. If any value is not found, the corresponding
-  // index will be larger than any valid string index for this dex file.
-  size_t angle_bracket_start_index_;
-  size_t angle_bracket_end_index_;
-  size_t angle_init_angle_index_;
-  size_t angle_clinit_angle_index_;
-};
-
+}  // namespace dex
 }  // namespace art
 
 #endif  // ART_LIBDEXFILE_DEX_DEX_FILE_VERIFIER_H_
diff --git a/libdexfile/dex/dex_file_verifier_test.cc b/libdexfile/dex/dex_file_verifier_test.cc
index b2cff4f..79e9c8b 100644
--- a/libdexfile/dex/dex_file_verifier_test.cc
+++ b/libdexfile/dex/dex_file_verifier_test.cc
@@ -21,6 +21,8 @@
 #include <functional>
 #include <memory>
 
+#include <android-base/logging.h>
+
 #include "base/bit_utils.h"
 #include "base/leb128.h"
 #include "base/macros.h"
@@ -74,12 +76,12 @@
 
     static constexpr bool kVerifyChecksum = true;
     std::string error_msg;
-    bool success = DexFileVerifier::Verify(dex_file.get(),
-                                           dex_file->Begin(),
-                                           dex_file->Size(),
-                                           location,
-                                           kVerifyChecksum,
-                                           &error_msg);
+    bool success = dex::Verify(dex_file.get(),
+                               dex_file->Begin(),
+                               dex_file->Size(),
+                               location,
+                               kVerifyChecksum,
+                               &error_msg);
     if (expected_error == nullptr) {
       EXPECT_TRUE(success) << error_msg;
     } else {
@@ -156,7 +158,7 @@
         dex::MethodId* method_id = const_cast<dex::MethodId*>(&dex_file->GetMethodId(0));
         method_id->class_idx_ = dex::TypeIndex(0xFF);
       },
-      "could not find declaring class for direct method index 0");
+      "Bad index for method_id.class");
 
   // Proto idx error.
   VerifyModification(
@@ -166,7 +168,7 @@
         dex::MethodId* method_id = const_cast<dex::MethodId*>(&dex_file->GetMethodId(0));
         method_id->proto_idx_ = dex::ProtoIndex(0xFF);
       },
-      "inter_method_id_item proto_idx");
+      "Bad index for method_id.proto");
 
   // Name idx error.
   VerifyModification(
@@ -176,7 +178,7 @@
         dex::MethodId* method_id = const_cast<dex::MethodId*>(&dex_file->GetMethodId(0));
         method_id->name_idx_ = dex::StringIndex(0xFF);
       },
-      "Bad index for method flags verification");
+      "Bad index for method_id.name");
 }
 
 TEST_F(DexFileVerifierTest, InitCachingWithUnicode) {
@@ -1617,35 +1619,35 @@
   std::string error_msg;
 
   // Good checksum: all pass.
-  EXPECT_TRUE(DexFileVerifier::Verify(dex_file.get(),
-                                      dex_file->Begin(),
-                                      dex_file->Size(),
-                                       "good checksum, no verify",
-                                      /*verify_checksum=*/ false,
-                                      &error_msg));
-  EXPECT_TRUE(DexFileVerifier::Verify(dex_file.get(),
-                                      dex_file->Begin(),
-                                      dex_file->Size(),
-                                      "good checksum, verify",
-                                      /*verify_checksum=*/ true,
-                                      &error_msg));
+  EXPECT_TRUE(dex::Verify(dex_file.get(),
+                          dex_file->Begin(),
+                          dex_file->Size(),
+                          "good checksum, no verify",
+                          /*verify_checksum=*/ false,
+                          &error_msg));
+  EXPECT_TRUE(dex::Verify(dex_file.get(),
+                          dex_file->Begin(),
+                          dex_file->Size(),
+                          "good checksum, verify",
+                          /*verify_checksum=*/ true,
+                          &error_msg));
 
   // Bad checksum: !verify_checksum passes verify_checksum fails.
   DexFile::Header* header = reinterpret_cast<DexFile::Header*>(
       const_cast<uint8_t*>(dex_file->Begin()));
   header->checksum_ = 0;
-  EXPECT_TRUE(DexFileVerifier::Verify(dex_file.get(),
-                                      dex_file->Begin(),
-                                      dex_file->Size(),
-                                      "bad checksum, no verify",
-                                      /*verify_checksum=*/ false,
-                                      &error_msg));
-  EXPECT_FALSE(DexFileVerifier::Verify(dex_file.get(),
-                                       dex_file->Begin(),
-                                       dex_file->Size(),
-                                       "bad checksum, verify",
-                                       /*verify_checksum=*/ true,
-                                       &error_msg));
+  EXPECT_TRUE(dex::Verify(dex_file.get(),
+                          dex_file->Begin(),
+                          dex_file->Size(),
+                          "bad checksum, no verify",
+                          /*verify_checksum=*/ false,
+                          &error_msg));
+  EXPECT_FALSE(dex::Verify(dex_file.get(),
+                           dex_file->Begin(),
+                           dex_file->Size(),
+                           "bad checksum, verify",
+                           /*verify_checksum=*/ true,
+                           &error_msg));
   EXPECT_NE(error_msg.find("Bad checksum"), std::string::npos) << error_msg;
 }
 
@@ -1687,12 +1689,12 @@
   // Note: `dex_file` will be destroyed before `dex_bytes`.
   std::unique_ptr<DexFile> dex_file(GetDexFile(dex_bytes.get(), length));
   std::string error_msg;
-  EXPECT_FALSE(DexFileVerifier::Verify(dex_file.get(),
-                                       dex_file->Begin(),
-                                       dex_file->Size(),
-                                       "bad static method name",
-                                       /*verify_checksum=*/ true,
-                                       &error_msg));
+  EXPECT_FALSE(dex::Verify(dex_file.get(),
+                           dex_file->Begin(),
+                           dex_file->Size(),
+                           "bad static method name",
+                           /*verify_checksum=*/ true,
+                           &error_msg));
 }
 
 TEST_F(DexFileVerifierTest, BadVirtualMethodName) {
@@ -1731,12 +1733,12 @@
   // Note: `dex_file` will be destroyed before `dex_bytes`.
   std::unique_ptr<DexFile> dex_file(GetDexFile(dex_bytes.get(), length));
   std::string error_msg;
-  EXPECT_FALSE(DexFileVerifier::Verify(dex_file.get(),
-                                       dex_file->Begin(),
-                                       dex_file->Size(),
-                                       "bad virtual method name",
-                                       /*verify_checksum=*/ true,
-                                       &error_msg));
+  EXPECT_FALSE(dex::Verify(dex_file.get(),
+                           dex_file->Begin(),
+                           dex_file->Size(),
+                           "bad virtual method name",
+                           /*verify_checksum=*/ true,
+                           &error_msg));
 }
 
 TEST_F(DexFileVerifierTest, BadClinitSignature) {
@@ -1775,12 +1777,12 @@
   // Note: `dex_file` will be destroyed before `dex_bytes`.
   std::unique_ptr<DexFile> dex_file(GetDexFile(dex_bytes.get(), length));
   std::string error_msg;
-  EXPECT_FALSE(DexFileVerifier::Verify(dex_file.get(),
-                                       dex_file->Begin(),
-                                       dex_file->Size(),
-                                       "bad clinit signature",
-                                       /*verify_checksum=*/ true,
-                                       &error_msg));
+  EXPECT_FALSE(dex::Verify(dex_file.get(),
+                           dex_file->Begin(),
+                           dex_file->Size(),
+                           "bad clinit signature",
+                           /*verify_checksum=*/ true,
+                           &error_msg));
 }
 
 TEST_F(DexFileVerifierTest, BadClinitSignatureAgain) {
@@ -1819,12 +1821,12 @@
   // Note: `dex_file` will be destroyed before `dex_bytes`.
   std::unique_ptr<DexFile> dex_file(GetDexFile(dex_bytes.get(), length));
   std::string error_msg;
-  EXPECT_FALSE(DexFileVerifier::Verify(dex_file.get(),
-                                       dex_file->Begin(),
-                                       dex_file->Size(),
-                                       "bad clinit signature",
-                                       /*verify_checksum=*/ true,
-                                       &error_msg));
+  EXPECT_FALSE(dex::Verify(dex_file.get(),
+                           dex_file->Begin(),
+                           dex_file->Size(),
+                           "bad clinit signature",
+                           /*verify_checksum=*/ true,
+                           &error_msg));
 }
 
 TEST_F(DexFileVerifierTest, BadInitSignature) {
@@ -1856,12 +1858,12 @@
   // Note: `dex_file` will be destroyed before `dex_bytes`.
   std::unique_ptr<DexFile> dex_file(GetDexFile(dex_bytes.get(), length));
   std::string error_msg;
-  EXPECT_FALSE(DexFileVerifier::Verify(dex_file.get(),
-                                       dex_file->Begin(),
-                                       dex_file->Size(),
-                                       "bad init signature",
-                                       /*verify_checksum=*/ true,
-                                       &error_msg));
+  EXPECT_FALSE(dex::Verify(dex_file.get(),
+                           dex_file->Begin(),
+                           dex_file->Size(),
+                           "bad init signature",
+                           /*verify_checksum=*/ true,
+                           &error_msg));
 }
 
 static const char* kInvokeCustomDexFiles[] = {
@@ -2059,12 +2061,12 @@
     // Note: `dex_file` will be destroyed before `dex_bytes`.
     std::unique_ptr<DexFile> dex_file(GetDexFile(dex_bytes.get(), length));
     std::string error_msg;
-    EXPECT_TRUE(DexFileVerifier::Verify(dex_file.get(),
-                                        dex_file->Begin(),
-                                        dex_file->Size(),
-                                        "good checksum, verify",
-                                        /*verify_checksum=*/ true,
-                                        &error_msg));
+    EXPECT_TRUE(dex::Verify(dex_file.get(),
+                            dex_file->Begin(),
+                            dex_file->Size(),
+                            "good checksum, verify",
+                            /*verify_checksum=*/ true,
+                            &error_msg));
     // TODO(oth): Test corruptions (b/35308502)
   }
 }
@@ -2106,12 +2108,12 @@
   // Note: `dex_file` will be destroyed before `dex_bytes`.
   std::unique_ptr<DexFile> dex_file(GetDexFile(dex_bytes.get(), length));
   std::string error_msg;
-  EXPECT_FALSE(DexFileVerifier::Verify(dex_file.get(),
-                                       dex_file->Begin(),
-                                       dex_file->Size(),
-                                       "bad static field initial values array",
-                                       /*verify_checksum=*/ true,
-                                       &error_msg));
+  EXPECT_FALSE(dex::Verify(dex_file.get(),
+                           dex_file->Begin(),
+                           dex_file->Size(),
+                           "bad static field initial values array",
+                           /*verify_checksum=*/ true,
+                           &error_msg));
 }
 
 TEST_F(DexFileVerifierTest, GoodStaticFieldInitialValuesArray) {
@@ -2162,12 +2164,12 @@
   // Note: `dex_file` will be destroyed before `dex_bytes`.
   std::unique_ptr<DexFile> dex_file(GetDexFile(dex_bytes.get(), length));
   std::string error_msg;
-  EXPECT_TRUE(DexFileVerifier::Verify(dex_file.get(),
-                                      dex_file->Begin(),
-                                      dex_file->Size(),
-                                      "good static field initial values array",
-                                      /*verify_checksum=*/ true,
-                                      &error_msg));
+  EXPECT_TRUE(dex::Verify(dex_file.get(),
+                          dex_file->Begin(),
+                          dex_file->Size(),
+                          "good static field initial values array",
+                          /*verify_checksum=*/ true,
+                          &error_msg));
 }
 
 }  // namespace art
diff --git a/libdexfile/dex/dex_instruction-inl.h b/libdexfile/dex/dex_instruction-inl.h
index e0cffdd..a1e7267 100644
--- a/libdexfile/dex/dex_instruction-inl.h
+++ b/libdexfile/dex/dex_instruction-inl.h
@@ -21,6 +21,38 @@
 
 namespace art {
 
+inline constexpr size_t Instruction::SizeInCodeUnits(Format format) {
+  switch (format) {
+    case k10x:
+    case k12x:
+    case k11n:
+    case k11x:
+    case k10t: return 1;
+    case k20t:
+    case k22x:
+    case k21t:
+    case k21s:
+    case k21h:
+    case k21c:
+    case k23x:
+    case k22b:
+    case k22t:
+    case k22s:
+    case k22c: return 2;
+    case k32x:
+    case k30t:
+    case k31t:
+    case k31i:
+    case k31c:
+    case k35c:
+    case k3rc: return 3;
+    case k45cc:
+    case k4rcc: return 4;
+    case k51l: return 5;
+    case kInvalidFormat: return 0;
+  }
+}
+
 //------------------------------------------------------------------------------
 // VRegA
 //------------------------------------------------------------------------------
@@ -57,33 +89,38 @@
 }
 
 inline int32_t Instruction::VRegA() const {
-  switch (FormatOf(Opcode())) {
-    case k10t: return VRegA_10t();
-    case k10x: return VRegA_10x();
-    case k11n: return VRegA_11n();
-    case k11x: return VRegA_11x();
-    case k12x: return VRegA_12x();
+  return VRegA(FormatOf(Opcode()), Fetch16(0));
+}
+
+inline int32_t Instruction::VRegA(Format format, uint16_t inst_data) const {
+  DCHECK_EQ(format, FormatOf(Opcode()));
+  switch (format) {
+    case k10t: return VRegA_10t(inst_data);
+    case k10x: return VRegA_10x(inst_data);
+    case k11n: return VRegA_11n(inst_data);
+    case k11x: return VRegA_11x(inst_data);
+    case k12x: return VRegA_12x(inst_data);
     case k20t: return VRegA_20t();
-    case k21c: return VRegA_21c();
-    case k21h: return VRegA_21h();
-    case k21s: return VRegA_21s();
-    case k21t: return VRegA_21t();
-    case k22b: return VRegA_22b();
-    case k22c: return VRegA_22c();
-    case k22s: return VRegA_22s();
-    case k22t: return VRegA_22t();
-    case k22x: return VRegA_22x();
-    case k23x: return VRegA_23x();
+    case k21c: return VRegA_21c(inst_data);
+    case k21h: return VRegA_21h(inst_data);
+    case k21s: return VRegA_21s(inst_data);
+    case k21t: return VRegA_21t(inst_data);
+    case k22b: return VRegA_22b(inst_data);
+    case k22c: return VRegA_22c(inst_data);
+    case k22s: return VRegA_22s(inst_data);
+    case k22t: return VRegA_22t(inst_data);
+    case k22x: return VRegA_22x(inst_data);
+    case k23x: return VRegA_23x(inst_data);
     case k30t: return VRegA_30t();
-    case k31c: return VRegA_31c();
-    case k31i: return VRegA_31i();
-    case k31t: return VRegA_31t();
+    case k31c: return VRegA_31c(inst_data);
+    case k31i: return VRegA_31i(inst_data);
+    case k31t: return VRegA_31t(inst_data);
     case k32x: return VRegA_32x();
-    case k35c: return VRegA_35c();
-    case k3rc: return VRegA_3rc();
-    case k45cc: return VRegA_45cc();
-    case k4rcc: return VRegA_4rcc();
-    case k51l: return VRegA_51l();
+    case k35c: return VRegA_35c(inst_data);
+    case k3rc: return VRegA_3rc(inst_data);
+    case k45cc: return VRegA_45cc(inst_data);
+    case k4rcc: return VRegA_4rcc(inst_data);
+    case k51l: return VRegA_51l(inst_data);
     default:
       LOG(FATAL) << "Tried to access vA of instruction " << Name() << " which has no A operand.";
       exit(EXIT_FAILURE);
@@ -255,17 +292,22 @@
 }
 
 inline int32_t Instruction::VRegB() const {
-  switch (FormatOf(Opcode())) {
-    case k11n: return VRegB_11n();
-    case k12x: return VRegB_12x();
+  return VRegB(FormatOf(Opcode()), Fetch16(0));
+}
+
+inline int32_t Instruction::VRegB(Format format, uint16_t inst_data) const {
+  DCHECK_EQ(format, FormatOf(Opcode()));
+  switch (format) {
+    case k11n: return VRegB_11n(inst_data);
+    case k12x: return VRegB_12x(inst_data);
     case k21c: return VRegB_21c();
     case k21h: return VRegB_21h();
     case k21s: return VRegB_21s();
     case k21t: return VRegB_21t();
     case k22b: return VRegB_22b();
-    case k22c: return VRegB_22c();
-    case k22s: return VRegB_22s();
-    case k22t: return VRegB_22t();
+    case k22c: return VRegB_22c(inst_data);
+    case k22s: return VRegB_22s(inst_data);
+    case k22t: return VRegB_22t(inst_data);
     case k22x: return VRegB_22x();
     case k23x: return VRegB_23x();
     case k31c: return VRegB_31c();
@@ -412,7 +454,12 @@
 }
 
 inline int32_t Instruction::VRegC() const {
-  switch (FormatOf(Opcode())) {
+  return VRegC(FormatOf(Opcode()));
+}
+
+inline int32_t Instruction::VRegC(Format format) const {
+  DCHECK_EQ(format, FormatOf(Opcode()));
+  switch (format) {
     case k22b: return VRegC_22b();
     case k22c: return VRegC_22c();
     case k22s: return VRegC_22s();
diff --git a/libdexfile/dex/dex_instruction.cc b/libdexfile/dex/dex_instruction.cc
index f36a2aa..37fc455 100644
--- a/libdexfile/dex/dex_instruction.cc
+++ b/libdexfile/dex/dex_instruction.cc
@@ -510,6 +510,7 @@
       }
       break;
     case k51l: os << StringPrintf("%s v%d, #%+" PRId64, opcode, VRegA_51l(), VRegB_51l()); break;
+    case kInvalidFormat: os << "<invalid-opcode-format>";
   }
   return os.str();
 }
diff --git a/libdexfile/dex/dex_instruction.h b/libdexfile/dex/dex_instruction.h
index 4b38904..c15fa43 100644
--- a/libdexfile/dex/dex_instruction.h
+++ b/libdexfile/dex/dex_instruction.h
@@ -123,6 +123,7 @@
     k4rcc,  // op {VCCCC .. v(CCCC+AA-1)}, meth@BBBB, proto@HHHH (AA: count)
 
     k51l,  // op vAA, #+BBBBBBBBBBBBBBBB
+    kInvalidFormat,
   };
 
   enum IndexType : uint8_t {
@@ -226,6 +227,9 @@
     }
   }
 
+  // Returns the size (in 2 byte code units) of the given instruction format.
+  ALWAYS_INLINE static constexpr size_t SizeInCodeUnits(Format format);
+
   // Code units required to calculate the size of the instruction.
   size_t CodeUnitsRequiredForSizeComputation() const {
     const int8_t result = kInstructionDescriptors[Opcode()].size_in_code_units;
@@ -291,6 +295,7 @@
   // VRegA
   bool HasVRegA() const;
   ALWAYS_INLINE int32_t VRegA() const;
+  ALWAYS_INLINE int32_t VRegA(Format format, uint16_t inst_data) const;
 
   int8_t VRegA_10t() const {
     return VRegA_10t(Fetch16(0));
@@ -393,7 +398,8 @@
 
   // VRegB
   bool HasVRegB() const;
-  int32_t VRegB() const;
+  ALWAYS_INLINE int32_t VRegB() const;
+  ALWAYS_INLINE int32_t VRegB(Format format, uint16_t inst_data) const;
 
   bool HasWideVRegB() const;
   uint64_t WideVRegB() const;
@@ -441,7 +447,8 @@
 
   // VRegC
   bool HasVRegC() const;
-  int32_t VRegC() const;
+  ALWAYS_INLINE int32_t VRegC() const;
+  ALWAYS_INLINE int32_t VRegC(Format format) const;
 
   int8_t VRegC_22b() const;
   uint16_t VRegC_22c() const;
diff --git a/libdexfile/dex/modifiers.h b/libdexfile/dex/modifiers.h
index 0c79c96..bdb3781 100644
--- a/libdexfile/dex/modifiers.h
+++ b/libdexfile/dex/modifiers.h
@@ -17,6 +17,8 @@
 #ifndef ART_LIBDEXFILE_DEX_MODIFIERS_H_
 #define ART_LIBDEXFILE_DEX_MODIFIERS_H_
 
+#include <string>
+
 #include <stdint.h>
 
 namespace art {
@@ -53,10 +55,15 @@
 // Used by a class to denote that the verifier has attempted to check it at least once.
 static constexpr uint32_t kAccVerificationAttempted = 0x00080000;  // class (runtime)
 static constexpr uint32_t kAccSkipHiddenapiChecks =   0x00100000;  // class (runtime)
+// Used by a class to denote that this class and any objects with this as a
+// declaring-class/super-class are to be considered obsolete, meaning they should not be used by.
+static constexpr uint32_t kAccObsoleteObject =        0x00200000;  // class (runtime)
 // This is set by the class linker during LinkInterfaceMethods. It is used by a method to represent
 // that it was copied from its declaring class into another class. All methods marked kAccMiranda
 // and kAccDefaultConflict will have this bit set. Any kAccDefault method contained in the methods_
 // array of a concrete class will also have this bit set.
+// We need copies of the original method because the method may end up in
+// different places in classes vtables, and the vtable index is set in ArtMethod.method_index.
 static constexpr uint32_t kAccCopied =                0x00100000;  // method (runtime)
 static constexpr uint32_t kAccMiranda =               0x00200000;  // method (runtime, not native)
 static constexpr uint32_t kAccDefault =               0x00400000;  // method (runtime)
@@ -77,6 +84,10 @@
 // Set by the verifier for a method we do not want the compiler to compile.
 static constexpr uint32_t kAccCompileDontBother =     0x02000000;  // method (runtime)
 
+// Used in conjunction with kAccCompileDontBother to mark the method as pre
+// compiled by the JIT compiler.
+static constexpr uint32_t kAccPreCompiled =           0x00200000;  // method (runtime)
+
 // Set by the verifier for a method that could not be verified to follow structured locking.
 static constexpr uint32_t kAccMustCountLocks =        0x04000000;  // method (runtime)
 
diff --git a/libdexfile/dex/standard_dex_file.cc b/libdexfile/dex/standard_dex_file.cc
index 8bac44e..1f1bc19 100644
--- a/libdexfile/dex/standard_dex_file.cc
+++ b/libdexfile/dex/standard_dex_file.cc
@@ -32,8 +32,10 @@
   {'0', '3', '7', '\0'},
   // Dex version 038: Android "O" and beyond.
   {'0', '3', '8', '\0'},
-  // Dex verion 039: Beyond Android "O".
+  // Dex version 039: Android "P" and beyond.
   {'0', '3', '9', '\0'},
+  // Dex version 040: beyond Android "10" (previously known as Android "Q").
+  {'0', '4', '0', '\0'},
 };
 
 void StandardDexFile::WriteMagic(uint8_t* magic) {
@@ -46,6 +48,11 @@
               magic + kDexMagicSize);
 }
 
+
+void StandardDexFile::WriteVersionBeforeDefaultMethods(uint8_t* magic) {
+  std::copy_n(kDexMagicVersions[0u], kDexVersionLen, magic + kDexMagicSize);
+}
+
 bool StandardDexFile::IsMagicValid(const uint8_t* magic) {
   return (memcmp(magic, kDexMagic, sizeof(kDexMagic)) == 0);
 }
diff --git a/libdexfile/dex/standard_dex_file.h b/libdexfile/dex/standard_dex_file.h
index 48671c9..25cf62a 100644
--- a/libdexfile/dex/standard_dex_file.h
+++ b/libdexfile/dex/standard_dex_file.h
@@ -35,6 +35,22 @@
   struct CodeItem : public dex::CodeItem {
     static constexpr size_t kAlignment = 4;
 
+    static constexpr size_t InsSizeOffset() {
+      return OFFSETOF_MEMBER(CodeItem, ins_size_);
+    }
+
+    static constexpr size_t OutsSizeOffset() {
+      return OFFSETOF_MEMBER(CodeItem, outs_size_);
+    }
+
+    static constexpr size_t RegistersSizeOffset() {
+      return OFFSETOF_MEMBER(CodeItem, registers_size_);
+    }
+
+    static constexpr size_t InsnsOffset() {
+      return OFFSETOF_MEMBER(CodeItem, insns_);
+    }
+
    private:
     CodeItem() = default;
 
@@ -67,8 +83,12 @@
   // Write the current version, note that the input is the address of the magic.
   static void WriteCurrentVersion(uint8_t* magic);
 
+  // Write the last version before default method support,
+  // note that the input is the address of the magic.
+  static void WriteVersionBeforeDefaultMethods(uint8_t* magic);
+
   static const uint8_t kDexMagic[kDexMagicSize];
-  static constexpr size_t kNumDexVersions = 4;
+  static constexpr size_t kNumDexVersions = 5;
   static const uint8_t kDexMagicVersions[kNumDexVersions][kDexVersionLen];
 
   // Returns true if the byte string points to the magic value.
diff --git a/libdexfile/external/dex_file_supp.cc b/libdexfile/external/dex_file_supp.cc
index ba684fe..e207953 100644
--- a/libdexfile/external/dex_file_supp.cc
+++ b/libdexfile/external/dex_file_supp.cc
@@ -28,61 +28,81 @@
 namespace art_api {
 namespace dex {
 
+#define FOR_ALL_DLFUNCS(MACRO) \
+  MACRO(DexString, ExtDexFileMakeString) \
+  MACRO(DexString, ExtDexFileGetString) \
+  MACRO(DexString, ExtDexFileFreeString) \
+  MACRO(DexFile, ExtDexFileOpenFromMemory) \
+  MACRO(DexFile, ExtDexFileOpenFromFd) \
+  MACRO(DexFile, ExtDexFileGetMethodInfoForOffset) \
+  MACRO(DexFile, ExtDexFileGetAllMethodInfos) \
+  MACRO(DexFile, ExtDexFileFree)
+
 #ifdef STATIC_LIB
-#define DEFINE_DLFUNC_PTR(CLASS, DLFUNC) decltype(DLFUNC)* CLASS::g_##DLFUNC = DLFUNC
+#define DEFINE_DLFUNC_PTR(CLASS, DLFUNC) decltype(DLFUNC)* CLASS::g_##DLFUNC = DLFUNC;
 #else
-#define DEFINE_DLFUNC_PTR(CLASS, DLFUNC) decltype(DLFUNC)* CLASS::g_##DLFUNC = nullptr
+#define DEFINE_DLFUNC_PTR(CLASS, DLFUNC) decltype(DLFUNC)* CLASS::g_##DLFUNC = nullptr;
 #endif
-
-DEFINE_DLFUNC_PTR(DexString, ExtDexFileMakeString);
-DEFINE_DLFUNC_PTR(DexString, ExtDexFileGetString);
-DEFINE_DLFUNC_PTR(DexString, ExtDexFileFreeString);
-DEFINE_DLFUNC_PTR(DexFile, ExtDexFileOpenFromMemory);
-DEFINE_DLFUNC_PTR(DexFile, ExtDexFileOpenFromFd);
-DEFINE_DLFUNC_PTR(DexFile, ExtDexFileGetMethodInfoForOffset);
-DEFINE_DLFUNC_PTR(DexFile, ExtDexFileGetAllMethodInfos);
-DEFINE_DLFUNC_PTR(DexFile, ExtDexFileFree);
-
+FOR_ALL_DLFUNCS(DEFINE_DLFUNC_PTR)
 #undef DEFINE_DLFUNC_PTR
 
-void LoadLibdexfileExternal() {
+bool TryLoadLibdexfileExternal([[maybe_unused]] std::string* err_msg) {
 #if defined(STATIC_LIB)
   // Nothing to do here since all function pointers are initialised statically.
+  return true;
 #elif defined(NO_DEXFILE_SUPPORT)
-  LOG_FATAL("Dex file support not available.");
+  *err_msg = "Dex file support not available.";
+  return false;
 #else
-  static std::once_flag dlopen_once;
-  std::call_once(dlopen_once, []() {
-    constexpr char kLibdexfileExternalLib[] = "libdexfile_external.so";
-    void* handle =
-        dlopen(kLibdexfileExternalLib, RTLD_NOW | RTLD_GLOBAL | RTLD_NODELETE);
-    LOG_ALWAYS_FATAL_IF(handle == nullptr, "Failed to load %s: %s",
-                        kLibdexfileExternalLib, dlerror());
+  // Use a plain old mutex since we want to try again if loading fails (to set
+  // err_msg, if nothing else).
+  static std::mutex load_mutex;
+  static bool is_loaded = false;
+  std::lock_guard<std::mutex> lock(load_mutex);
 
-#define SET_DLFUNC_PTR(CLASS, DLFUNC) \
-  do { \
-    CLASS::g_##DLFUNC = reinterpret_cast<decltype(DLFUNC)*>(dlsym(handle, #DLFUNC)); \
-    LOG_ALWAYS_FATAL_IF(CLASS::g_##DLFUNC == nullptr, \
-                        "Failed to find %s in %s: %s", \
-                        #DLFUNC, \
-                        kLibdexfileExternalLib, \
-                        dlerror()); \
-  } while (0)
+  if (!is_loaded) {
+    // Check which version is already loaded to avoid loading both debug and
+    // release builds. We might also be backtracing from separate process, in
+    // which case neither is loaded.
+    const char* so_name = "libdexfiled_external.so";
+    void* handle = dlopen(so_name, RTLD_NOLOAD | RTLD_NOW | RTLD_NODELETE);
+    if (handle == nullptr) {
+      so_name = "libdexfile_external.so";
+      handle = dlopen(so_name, RTLD_NOW | RTLD_GLOBAL | RTLD_NODELETE);
+    }
+    if (handle == nullptr) {
+      *err_msg = dlerror();
+      return false;
+    }
 
-    SET_DLFUNC_PTR(DexString, ExtDexFileMakeString);
-    SET_DLFUNC_PTR(DexString, ExtDexFileGetString);
-    SET_DLFUNC_PTR(DexString, ExtDexFileFreeString);
-    SET_DLFUNC_PTR(DexFile, ExtDexFileOpenFromMemory);
-    SET_DLFUNC_PTR(DexFile, ExtDexFileOpenFromFd);
-    SET_DLFUNC_PTR(DexFile, ExtDexFileGetMethodInfoForOffset);
-    SET_DLFUNC_PTR(DexFile, ExtDexFileGetAllMethodInfos);
-    SET_DLFUNC_PTR(DexFile, ExtDexFileFree);
+#define RESOLVE_DLFUNC_PTR(CLASS, DLFUNC) \
+    decltype(DLFUNC)* DLFUNC##_ptr = reinterpret_cast<decltype(DLFUNC)*>(dlsym(handle, #DLFUNC)); \
+    if (DLFUNC == nullptr) { \
+      *err_msg = dlerror(); \
+      return false; \
+    }
+    FOR_ALL_DLFUNCS(RESOLVE_DLFUNC_PTR);
+#undef RESOLVE_DLFUNC_PTR
 
+#define SET_DLFUNC_PTR(CLASS, DLFUNC) CLASS::g_##DLFUNC = DLFUNC##_ptr;
+    FOR_ALL_DLFUNCS(SET_DLFUNC_PTR);
 #undef SET_DLFUNC_PTR
-  });
+
+    is_loaded = true;
+  }
+
+  return is_loaded;
 #endif  // !defined(NO_DEXFILE_SUPPORT) && !defined(STATIC_LIB)
 }
 
+void LoadLibdexfileExternal() {
+#ifndef STATIC_LIB
+  if (std::string err_msg; !TryLoadLibdexfileExternal(&err_msg)) {
+    LOG_ALWAYS_FATAL("%s", err_msg.c_str());
+  }
+#endif
+}
+
 DexFile::~DexFile() { g_ExtDexFileFree(ext_dex_file_); }
 
 MethodInfo DexFile::AbsorbMethodInfo(const ExtDexFileMethodInfo& ext_method_info) {
diff --git a/libdexfile/external/dex_file_supp_test.cc b/libdexfile/external/dex_file_supp_test.cc
index 2f7ad50..6c2f900 100644
--- a/libdexfile/external/dex_file_supp_test.cc
+++ b/libdexfile/external/dex_file_supp_test.cc
@@ -285,5 +285,16 @@
   EXPECT_EQ(info.offset, int32_t{0x100});
 }
 
+TEST(DexFileTest, pointer_construct) {
+  std::unique_ptr<DexFile> dex_file = GetTestDexData();
+  ASSERT_NE(dex_file, nullptr);
+
+  auto new_dex = DexFile(dex_file);
+  ASSERT_TRUE(dex_file.get() == nullptr);
+
+  MethodInfo info = new_dex.GetMethodInfoForOffset(0x100, false);
+  EXPECT_EQ(info.offset, int32_t{0x100});
+}
+
 }  // namespace dex
 }  // namespace art_api
diff --git a/libdexfile/external/include/art_api/dex_file_support.h b/libdexfile/external/include/art_api/dex_file_support.h
index a98ff0e..404fa65 100644
--- a/libdexfile/external/include/art_api/dex_file_support.h
+++ b/libdexfile/external/include/art_api/dex_file_support.h
@@ -33,6 +33,12 @@
 namespace art_api {
 namespace dex {
 
+// Returns true if libdexfile_external.so is already loaded. Otherwise tries to
+// load it and returns true if successful. Otherwise returns false and sets
+// *error_msg. If false is returned then calling any function below may abort
+// the process. Thread safe.
+bool TryLoadLibdexfileExternal(std::string* error_msg);
+
 // Loads the libdexfile_external.so library and sets up function pointers.
 // Aborts with a fatal error on any error. For internal use by the classes
 // below.
@@ -75,12 +81,13 @@
   }
 
  private:
-  friend void LoadLibdexfileExternal();
+  friend bool TryLoadLibdexfileExternal(std::string* error_msg);
   friend class DexFile;
   friend bool operator==(const DexString&, const DexString&);
   explicit DexString(const ExtDexFileString* ext_string) : ext_string_(ext_string) {}
   const ExtDexFileString* ext_string_;  // Owned instance. Never nullptr.
 
+  // These are initialized by TryLoadLibdexfileExternal.
   static decltype(ExtDexFileMakeString)* g_ExtDexFileMakeString;
   static decltype(ExtDexFileGetString)* g_ExtDexFileGetString;
   static decltype(ExtDexFileFreeString)* g_ExtDexFileFreeString;
@@ -122,6 +129,12 @@
     ext_dex_file_ = dex_file.ext_dex_file_;
     dex_file.ext_dex_file_ = nullptr;
   }
+
+  explicit DexFile(std::unique_ptr<DexFile>& dex_file) noexcept {
+    ext_dex_file_ = dex_file->ext_dex_file_;
+    dex_file->ext_dex_file_ = nullptr;
+    dex_file.reset(nullptr);
+  }
   virtual ~DexFile();
 
   // Interprets a chunk of memory as a dex file. As long as *size is too small,
@@ -203,7 +216,7 @@
   }
 
  private:
-  friend void LoadLibdexfileExternal();
+  friend bool TryLoadLibdexfileExternal(std::string* error_msg);
   explicit DexFile(ExtDexFile* ext_dex_file) : ext_dex_file_(ext_dex_file) {}
   ExtDexFile* ext_dex_file_;  // Owned instance. nullptr only in moved-from zombies.
 
@@ -212,6 +225,7 @@
   static MethodInfo AbsorbMethodInfo(const ExtDexFileMethodInfo& ext_method_info);
   static void AddMethodInfoCallback(const ExtDexFileMethodInfo* ext_method_info, void* user_data);
 
+  // These are initialized by TryLoadLibdexfileExternal.
   static decltype(ExtDexFileOpenFromMemory)* g_ExtDexFileOpenFromMemory;
   static decltype(ExtDexFileOpenFromFd)* g_ExtDexFileOpenFromFd;
   static decltype(ExtDexFileGetMethodInfoForOffset)* g_ExtDexFileGetMethodInfoForOffset;
diff --git a/libelffile/Android.bp b/libelffile/Android.bp
index d8db915..de7fa96 100644
--- a/libelffile/Android.bp
+++ b/libelffile/Android.bp
@@ -26,7 +26,6 @@
         "stream/vector_output_stream.cc",
     ],
     shared_libs: [
-        "libartbase",
         "libbase",
     ],
 }
@@ -37,6 +36,13 @@
         "art_defaults",
         "libelffile-defaults",
     ],
+    shared_libs: [
+        "libartbase",
+    ],
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
 }
 
 art_cc_library_static {
@@ -45,4 +51,10 @@
         "art_debug_defaults",
         "libelffile-defaults",
     ],
+    shared_libs: [
+        "libartbased",
+    ],
+    apex_available: [
+        "com.android.art.debug",
+    ],
 }
diff --git a/libelffile/dwarf/debug_frame_opcode_writer.h b/libelffile/dwarf/debug_frame_opcode_writer.h
index b255f9c..65ca6bf 100644
--- a/libelffile/dwarf/debug_frame_opcode_writer.h
+++ b/libelffile/dwarf/debug_frame_opcode_writer.h
@@ -80,8 +80,10 @@
   }
 
   // Custom alias - spill many registers based on bitmask.
-  void ALWAYS_INLINE RelOffsetForMany(Reg reg_base, int offset,
-                                      uint32_t reg_mask, int reg_size) {
+  void ALWAYS_INLINE RelOffsetForMany(Reg reg_base,
+                                      int32_t offset,
+                                      uint32_t reg_mask,
+                                      int32_t reg_size) {
     DCHECK(reg_size == 4 || reg_size == 8);
     if (UNLIKELY(enabled_)) {
       for (int i = 0; reg_mask != 0u; reg_mask >>= 1, i++) {
diff --git a/libelffile/elf/elf_builder.h b/libelffile/elf/elf_builder.h
index b528f6a..07f0d00 100644
--- a/libelffile/elf/elf_builder.h
+++ b/libelffile/elf/elf_builder.h
@@ -746,13 +746,13 @@
     hash_.AllocateVirtualMemory(hash_.GetCacheSize());
 
     Elf_Dyn dyns[] = {
-      { .d_tag = DT_HASH, .d_un.d_ptr = hash_.GetAddress() },
-      { .d_tag = DT_STRTAB, .d_un.d_ptr = dynstr_.GetAddress() },
-      { .d_tag = DT_SYMTAB, .d_un.d_ptr = dynsym_.GetAddress() },
-      { .d_tag = DT_SYMENT, .d_un.d_ptr = sizeof(Elf_Sym) },
-      { .d_tag = DT_STRSZ, .d_un.d_ptr = dynstr_.GetCacheSize() },
-      { .d_tag = DT_SONAME, .d_un.d_ptr = soname_offset },
-      { .d_tag = DT_NULL, .d_un.d_ptr = 0 },
+      { .d_tag = DT_HASH,   .d_un = { .d_ptr = hash_.GetAddress() }, },
+      { .d_tag = DT_STRTAB, .d_un = { .d_ptr = dynstr_.GetAddress() }, },
+      { .d_tag = DT_SYMTAB, .d_un = { .d_ptr = dynsym_.GetAddress() }, },
+      { .d_tag = DT_SYMENT, .d_un = { .d_ptr = sizeof(Elf_Sym) }, },
+      { .d_tag = DT_STRSZ,  .d_un = { .d_ptr = dynstr_.GetCacheSize() }, },
+      { .d_tag = DT_SONAME, .d_un = { .d_ptr = soname_offset }, },
+      { .d_tag = DT_NULL,   .d_un = { .d_ptr = 0 }, },
     };
     dynamic_.Add(&dyns, sizeof(dyns));
     dynamic_.AllocateVirtualMemory(dynamic_.GetCacheSize());
diff --git a/libnativebridge/.clang-format b/libnativebridge/.clang-format
new file mode 120000
index 0000000..fd0645f
--- /dev/null
+++ b/libnativebridge/.clang-format
@@ -0,0 +1 @@
+../.clang-format-2
\ No newline at end of file
diff --git a/libnativebridge/Android.bp b/libnativebridge/Android.bp
new file mode 100644
index 0000000..60bf3ad
--- /dev/null
+++ b/libnativebridge/Android.bp
@@ -0,0 +1,79 @@
+cc_defaults {
+    name: "libnativebridge-defaults",
+    defaults: ["art_defaults"],
+    cppflags: [
+        "-fvisibility=protected",
+    ],
+    header_libs: ["libnativebridge-headers"],
+    export_header_lib_headers: ["libnativebridge-headers"],
+}
+
+cc_library_headers {
+    name: "libnativebridge-headers",
+
+    host_supported: true,
+    export_include_dirs: ["include"],
+
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.art.debug",
+        "com.android.art.release",
+    ],
+}
+
+cc_library {
+    name: "libnativebridge",
+    defaults: ["libnativebridge-defaults"],
+    visibility: [
+        "//frameworks/base/cmds/app_process",
+        // TODO(b/133140750): Clean this up.
+        "//frameworks/base/native/webview/loader/libwebviewchromium_loader",
+    ],
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
+
+    host_supported: true,
+    srcs: ["native_bridge.cc"],
+    header_libs: [
+        "libbase_headers",
+    ],
+    shared_libs: [
+        "liblog",
+    ],
+    // TODO(jiyong): remove this line after aosp/885921 lands
+    export_include_dirs: ["include"],
+
+    target: {
+        android: {
+            version_script: "libnativebridge.map.txt",
+        },
+        linux: {
+            version_script: "libnativebridge.map.txt",
+        },
+    },
+
+    stubs: {
+        symbol_file: "libnativebridge.map.txt",
+        versions: ["1"],
+    },
+}
+
+// TODO(b/124250621): eliminate the need for this library
+cc_library {
+    name: "libnativebridge_lazy",
+    defaults: ["libnativebridge-defaults"],
+    visibility: [
+        "//art/libnativebridge/tests",
+        "//frameworks/base/core/jni",
+        "//frameworks/native/opengl/libs",
+        "//frameworks/native/vulkan/libvulkan",
+    ],
+
+    host_supported: false,
+    srcs: ["native_bridge_lazy.cc"],
+    required: ["libnativebridge"],
+}
+
+subdirs = ["tests"]
diff --git a/libnativebridge/OWNERS b/libnativebridge/OWNERS
new file mode 100644
index 0000000..daf87f4
--- /dev/null
+++ b/libnativebridge/OWNERS
@@ -0,0 +1,4 @@
+dimitry@google.com
+eaeltsin@google.com
+ngeoffray@google.com
+oth@google.com
diff --git a/libnativebridge/include/nativebridge/native_bridge.h b/libnativebridge/include/nativebridge/native_bridge.h
new file mode 100644
index 0000000..e20b627
--- /dev/null
+++ b/libnativebridge/include/nativebridge/native_bridge.h
@@ -0,0 +1,426 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBNATIVEBRIDGE_INCLUDE_NATIVEBRIDGE_NATIVE_BRIDGE_H_
+#define ART_LIBNATIVEBRIDGE_INCLUDE_NATIVEBRIDGE_NATIVE_BRIDGE_H_
+
+#include <signal.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+#include "jni.h"
+
+#ifdef __cplusplus
+namespace android {
+extern "C" {
+#endif  // __cplusplus
+
+struct NativeBridgeRuntimeCallbacks;
+struct NativeBridgeRuntimeValues;
+
+// Function pointer type for sigaction. This is mostly the signature of a signal handler, except
+// for the return type. The runtime needs to know whether the signal was handled or should be given
+// to the chain.
+typedef bool (*NativeBridgeSignalHandlerFn)(int, siginfo_t*, void*);
+
+// Open the native bridge, if any. Should be called by Runtime::Init(). A null library filename
+// signals that we do not want to load a native bridge.
+bool LoadNativeBridge(const char* native_bridge_library_filename,
+                      const struct NativeBridgeRuntimeCallbacks* runtime_callbacks);
+
+// Quick check whether a native bridge will be needed. This is based off of the instruction set
+// of the process.
+bool NeedsNativeBridge(const char* instruction_set);
+
+// Do the early initialization part of the native bridge, if necessary. This should be done under
+// high privileges.
+bool PreInitializeNativeBridge(const char* app_data_dir, const char* instruction_set);
+
+// Prepare to fork from zygote. May be required to clean-up the enviroment, e.g.
+// close emulated file descriptors, after doPreload() in app-zygote.
+void PreZygoteForkNativeBridge();
+
+// Initialize the native bridge, if any. Should be called by Runtime::DidForkFromZygote. The JNIEnv*
+// will be used to modify the app environment for the bridge.
+bool InitializeNativeBridge(JNIEnv* env, const char* instruction_set);
+
+// Unload the native bridge, if any. Should be called by Runtime::DidForkFromZygote.
+void UnloadNativeBridge();
+
+// Check whether a native bridge is available (opened or initialized). Requires a prior call to
+// LoadNativeBridge.
+bool NativeBridgeAvailable();
+
+// Check whether a native bridge is available (initialized). Requires a prior call to
+// LoadNativeBridge & InitializeNativeBridge.
+bool NativeBridgeInitialized();
+
+// Load a shared library that is supported by the native bridge.
+//
+// Starting with v3, NativeBridge has two scenarios: with/without namespace.
+// Use NativeBridgeLoadLibraryExt() instead in namespace scenario.
+void* NativeBridgeLoadLibrary(const char* libpath, int flag);
+
+// Get a native bridge trampoline for specified native method.
+void* NativeBridgeGetTrampoline(void* handle, const char* name, const char* shorty, uint32_t len);
+
+// True if native library paths are valid and is for an ABI that is supported by native bridge.
+// The *libpath* must point to a library.
+//
+// Starting with v3, NativeBridge has two scenarios: with/without namespace.
+// Use NativeBridgeIsPathSupported() instead in namespace scenario.
+bool NativeBridgeIsSupported(const char* libpath);
+
+// Returns the version number of the native bridge. This information is available after a
+// successful LoadNativeBridge() and before closing it, that is, as long as NativeBridgeAvailable()
+// returns true. Returns 0 otherwise.
+uint32_t NativeBridgeGetVersion();
+
+// Returns a signal handler that the bridge would like to be managed. Only valid for a native
+// bridge supporting the version 2 interface. Will return null if the bridge does not support
+// version 2, or if it doesn't have a signal handler it wants to be known.
+NativeBridgeSignalHandlerFn NativeBridgeGetSignalHandler(int signal);
+
+// Returns whether we have seen a native bridge error. This could happen because the library
+// was not found, rejected, could not be initialized and so on.
+//
+// This functionality is mainly for testing.
+bool NativeBridgeError();
+
+// Returns whether a given string is acceptable as a native bridge library filename.
+//
+// This functionality is exposed mainly for testing.
+bool NativeBridgeNameAcceptable(const char* native_bridge_library_filename);
+
+// Decrements the reference count on the dynamic library handler. If the reference count drops
+// to zero then the dynamic library is unloaded. Returns 0 on success and non-zero on error.
+int NativeBridgeUnloadLibrary(void* handle);
+
+// Get last error message of native bridge when fail to load library or search symbol.
+// This is reflection of dlerror() for native bridge.
+const char* NativeBridgeGetError();
+
+struct native_bridge_namespace_t;
+
+// True if native library paths are valid and is for an ABI that is supported by native bridge.
+// Different from NativeBridgeIsSupported(), the *path* here must be a directory containing
+// libraries of an ABI.
+//
+// Starting with v3, NativeBridge has two scenarios: with/without namespace.
+// Use NativeBridgeIsSupported() instead in non-namespace scenario.
+bool NativeBridgeIsPathSupported(const char* path);
+
+// Initializes anonymous namespace.
+// NativeBridge's peer of android_init_anonymous_namespace() of dynamic linker.
+//
+// The anonymous namespace is used in the case when a NativeBridge implementation
+// cannot identify the caller of dlopen/dlsym which happens for the code not loaded
+// by dynamic linker; for example calls from the mono-compiled code.
+//
+// Starting with v3, NativeBridge has two scenarios: with/without namespace.
+// Should not use in non-namespace scenario.
+bool NativeBridgeInitAnonymousNamespace(const char* public_ns_sonames,
+                                        const char* anon_ns_library_path);
+
+// Create new namespace in which native libraries will be loaded.
+// NativeBridge's peer of android_create_namespace() of dynamic linker.
+//
+// The libraries in the namespace are searched by folowing order:
+// 1. ld_library_path (Think of this as namespace-local LD_LIBRARY_PATH)
+// 2. In directories specified by DT_RUNPATH of the "needed by" binary.
+// 3. deault_library_path (This of this as namespace-local default library path)
+//
+// Starting with v3, NativeBridge has two scenarios: with/without namespace.
+// Should not use in non-namespace scenario.
+struct native_bridge_namespace_t* NativeBridgeCreateNamespace(
+    const char* name, const char* ld_library_path, const char* default_library_path, uint64_t type,
+    const char* permitted_when_isolated_path, struct native_bridge_namespace_t* parent_ns);
+
+// Creates a link which shares some libraries from one namespace to another.
+// NativeBridge's peer of android_link_namespaces() of dynamic linker.
+//
+// Starting with v3, NativeBridge has two scenarios: with/without namespace.
+// Should not use in non-namespace scenario.
+bool NativeBridgeLinkNamespaces(struct native_bridge_namespace_t* from,
+                                struct native_bridge_namespace_t* to,
+                                const char* shared_libs_sonames);
+
+// Load a shared library with namespace key that is supported by the native bridge.
+// NativeBridge's peer of android_dlopen_ext() of dynamic linker, only supports namespace
+// extension.
+//
+// Starting with v3, NativeBridge has two scenarios: with/without namespace.
+// Use NativeBridgeLoadLibrary() instead in non-namespace scenario.
+void* NativeBridgeLoadLibraryExt(const char* libpath, int flag,
+                                 struct native_bridge_namespace_t* ns);
+
+// Returns exported namespace by the name. This is a reflection of
+// android_get_exported_namespace function. Introduced in v5.
+struct native_bridge_namespace_t* NativeBridgeGetExportedNamespace(const char* name);
+
+// Native bridge interfaces to runtime.
+struct NativeBridgeCallbacks {
+  // Version number of the interface.
+  uint32_t version;
+
+  // Initialize native bridge. Native bridge's internal implementation must ensure MT safety and
+  // that the native bridge is initialized only once. Thus it is OK to call this interface for an
+  // already initialized native bridge.
+  //
+  // Parameters:
+  //   runtime_cbs [IN] the pointer to NativeBridgeRuntimeCallbacks.
+  // Returns:
+  //   true if initialization was successful.
+  bool (*initialize)(const struct NativeBridgeRuntimeCallbacks* runtime_cbs,
+                     const char* private_dir, const char* instruction_set);
+
+  // Load a shared library that is supported by the native bridge.
+  //
+  // Parameters:
+  //   libpath [IN] path to the shared library
+  //   flag [IN] the stardard RTLD_XXX defined in bionic dlfcn.h
+  // Returns:
+  //   The opaque handle of the shared library if sucessful, otherwise NULL
+  //
+  // Starting with v3, NativeBridge has two scenarios: with/without namespace.
+  // Use loadLibraryExt instead in namespace scenario.
+  void* (*loadLibrary)(const char* libpath, int flag);
+
+  // Get a native bridge trampoline for specified native method. The trampoline has same
+  // sigature as the native method.
+  //
+  // Parameters:
+  //   handle [IN] the handle returned from loadLibrary
+  //   shorty [IN] short descriptor of native method
+  //   len [IN] length of shorty
+  // Returns:
+  //   address of trampoline if successful, otherwise NULL
+  void* (*getTrampoline)(void* handle, const char* name, const char* shorty, uint32_t len);
+
+  // Check whether native library is valid and is for an ABI that is supported by native bridge.
+  //
+  // Parameters:
+  //   libpath [IN] path to the shared library
+  // Returns:
+  //   TRUE if library is supported by native bridge, FALSE otherwise
+  //
+  // Starting with v3, NativeBridge has two scenarios: with/without namespace.
+  // Use isPathSupported instead in namespace scenario.
+  bool (*isSupported)(const char* libpath);
+
+  // Provide environment values required by the app running with native bridge according to the
+  // instruction set.
+  //
+  // Parameters:
+  //   instruction_set [IN] the instruction set of the app
+  // Returns:
+  //   NULL if not supported by native bridge.
+  //   Otherwise, return all environment values to be set after fork.
+  const struct NativeBridgeRuntimeValues* (*getAppEnv)(const char* instruction_set);
+
+  // Added callbacks in version 2.
+
+  // Check whether the bridge is compatible with the given version. A bridge may decide not to be
+  // forwards- or backwards-compatible, and libnativebridge will then stop using it.
+  //
+  // Parameters:
+  //   bridge_version [IN] the version of libnativebridge.
+  // Returns:
+  //   true if the native bridge supports the given version of libnativebridge.
+  bool (*isCompatibleWith)(uint32_t bridge_version);
+
+  // A callback to retrieve a native bridge's signal handler for the specified signal. The runtime
+  // will ensure that the signal handler is being called after the runtime's own handler, but before
+  // all chained handlers. The native bridge should not try to install the handler by itself, as
+  // that will potentially lead to cycles.
+  //
+  // Parameters:
+  //   signal [IN] the signal for which the handler is asked for. Currently, only SIGSEGV is
+  //                 supported by the runtime.
+  // Returns:
+  //   NULL if the native bridge doesn't use a handler or doesn't want it to be managed by the
+  //   runtime.
+  //   Otherwise, a pointer to the signal handler.
+  NativeBridgeSignalHandlerFn (*getSignalHandler)(int signal);
+
+  // Added callbacks in version 3.
+
+  // Decrements the reference count on the dynamic library handler. If the reference count drops
+  // to zero then the dynamic library is unloaded.
+  //
+  // Parameters:
+  //   handle [IN] the handler of a dynamic library.
+  //
+  // Returns:
+  //   0 on success, and nonzero on error.
+  int (*unloadLibrary)(void* handle);
+
+  // Dump the last failure message of native bridge when fail to load library or search symbol.
+  //
+  // Parameters:
+  //
+  // Returns:
+  //   A string describing the most recent error that occurred when load library
+  //   or lookup symbol via native bridge.
+  const char* (*getError)();
+
+  // Check whether library paths are supported by native bridge.
+  //
+  // Parameters:
+  //   library_path [IN] search paths for native libraries (directories separated by ':')
+  // Returns:
+  //   TRUE if libraries within search paths are supported by native bridge, FALSE otherwise
+  //
+  // Starting with v3, NativeBridge has two scenarios: with/without namespace.
+  // Use isSupported instead in non-namespace scenario.
+  bool (*isPathSupported)(const char* library_path);
+
+  // Initializes anonymous namespace at native bridge side.
+  // NativeBridge's peer of android_init_anonymous_namespace() of dynamic linker.
+  //
+  // The anonymous namespace is used in the case when a NativeBridge implementation
+  // cannot identify the caller of dlopen/dlsym which happens for the code not loaded
+  // by dynamic linker; for example calls from the mono-compiled code.
+  //
+  // Parameters:
+  //   public_ns_sonames [IN] the name of "public" libraries.
+  //   anon_ns_library_path [IN] the library search path of (anonymous) namespace.
+  // Returns:
+  //   true if the pass is ok.
+  //   Otherwise, false.
+  //
+  // Starting with v3, NativeBridge has two scenarios: with/without namespace.
+  // Should not use in non-namespace scenario.
+  bool (*initAnonymousNamespace)(const char* public_ns_sonames, const char* anon_ns_library_path);
+
+  // Create new namespace in which native libraries will be loaded.
+  // NativeBridge's peer of android_create_namespace() of dynamic linker.
+  //
+  // Parameters:
+  //   name [IN] the name of the namespace.
+  //   ld_library_path [IN] the first set of library search paths of the namespace.
+  //   default_library_path [IN] the second set of library search path of the namespace.
+  //   type [IN] the attribute of the namespace.
+  //   permitted_when_isolated_path [IN] the permitted path for isolated namespace(if it is).
+  //   parent_ns [IN] the pointer of the parent namespace to be inherited from.
+  // Returns:
+  //   native_bridge_namespace_t* for created namespace or nullptr in the case of error.
+  //
+  // Starting with v3, NativeBridge has two scenarios: with/without namespace.
+  // Should not use in non-namespace scenario.
+  struct native_bridge_namespace_t* (*createNamespace)(const char* name,
+                                                       const char* ld_library_path,
+                                                       const char* default_library_path,
+                                                       uint64_t type,
+                                                       const char* permitted_when_isolated_path,
+                                                       struct native_bridge_namespace_t* parent_ns);
+
+  // Creates a link which shares some libraries from one namespace to another.
+  // NativeBridge's peer of android_link_namespaces() of dynamic linker.
+  //
+  // Parameters:
+  //   from [IN] the namespace where libraries are accessed.
+  //   to [IN] the namespace where libraries are loaded.
+  //   shared_libs_sonames [IN] the libraries to be shared.
+  //
+  // Returns:
+  //   Whether successed or not.
+  //
+  // Starting with v3, NativeBridge has two scenarios: with/without namespace.
+  // Should not use in non-namespace scenario.
+  bool (*linkNamespaces)(struct native_bridge_namespace_t* from,
+                         struct native_bridge_namespace_t* to, const char* shared_libs_sonames);
+
+  // Load a shared library within a namespace.
+  // NativeBridge's peer of android_dlopen_ext() of dynamic linker, only supports namespace
+  // extension.
+  //
+  // Parameters:
+  //   libpath [IN] path to the shared library
+  //   flag [IN] the stardard RTLD_XXX defined in bionic dlfcn.h
+  //   ns [IN] the pointer of the namespace in which the library should be loaded.
+  // Returns:
+  //   The opaque handle of the shared library if sucessful, otherwise NULL
+  //
+  // Starting with v3, NativeBridge has two scenarios: with/without namespace.
+  // Use loadLibrary instead in non-namespace scenario.
+  void* (*loadLibraryExt)(const char* libpath, int flag, struct native_bridge_namespace_t* ns);
+
+  // Get native bridge version of vendor namespace.
+  // The vendor namespace is the namespace used to load vendor public libraries.
+  // With O release this namespace can be different from the default namespace.
+  // For the devices without enable vendor namespaces this function should return null
+  //
+  // Returns:
+  //   vendor namespace or null if it was not set up for the device
+  //
+  // Starting with v5 (Android Q) this function is no longer used.
+  // Use getExportedNamespace() below.
+  struct native_bridge_namespace_t* (*getVendorNamespace)();
+
+  // Get native bridge version of exported namespace. Peer of
+  // android_get_exported_namespace(const char*) function.
+  //
+  // Returns:
+  //   exported namespace or null if it was not set up for the device
+  struct native_bridge_namespace_t* (*getExportedNamespace)(const char* name);
+
+  // If native bridge is used in app-zygote (in doPreload()) this callback is
+  // required to clean-up the environment before the fork (see b/146904103).
+  void (*preZygoteFork)();
+};
+
+// Runtime interfaces to native bridge.
+struct NativeBridgeRuntimeCallbacks {
+  // Get shorty of a Java method. The shorty is supposed to be persistent in memory.
+  //
+  // Parameters:
+  //   env [IN] pointer to JNIenv.
+  //   mid [IN] Java methodID.
+  // Returns:
+  //   short descriptor for method.
+  const char* (*getMethodShorty)(JNIEnv* env, jmethodID mid);
+
+  // Get number of native methods for specified class.
+  //
+  // Parameters:
+  //   env [IN] pointer to JNIenv.
+  //   clazz [IN] Java class object.
+  // Returns:
+  //   number of native methods.
+  uint32_t (*getNativeMethodCount)(JNIEnv* env, jclass clazz);
+
+  // Get at most 'method_count' native methods for specified class 'clazz'. Results are outputed
+  // via 'methods' [OUT]. The signature pointer in JNINativeMethod is reused as the method shorty.
+  //
+  // Parameters:
+  //   env [IN] pointer to JNIenv.
+  //   clazz [IN] Java class object.
+  //   methods [OUT] array of method with the name, shorty, and fnPtr.
+  //   method_count [IN] max number of elements in methods.
+  // Returns:
+  //   number of method it actually wrote to methods.
+  uint32_t (*getNativeMethods)(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
+                               uint32_t method_count);
+};
+
+#ifdef __cplusplus
+}  // extern "C"
+}  // namespace android
+#endif  // __cplusplus
+
+#endif  // ART_LIBNATIVEBRIDGE_INCLUDE_NATIVEBRIDGE_NATIVE_BRIDGE_H_
diff --git a/libnativebridge/libnativebridge.map.txt b/libnativebridge/libnativebridge.map.txt
new file mode 100644
index 0000000..158363b
--- /dev/null
+++ b/libnativebridge/libnativebridge.map.txt
@@ -0,0 +1,46 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# TODO(b/122710865): Most of these uses come from libnativeloader, which should be bundled
+# together with libnativebridge in the APEX. Once this happens, prune this list.
+LIBNATIVEBRIDGE_1 {
+  global:
+    NativeBridgeIsSupported;
+    NativeBridgeLoadLibrary;
+    NativeBridgeUnloadLibrary;
+    NativeBridgeGetError;
+    NativeBridgeIsPathSupported;
+    NativeBridgeCreateNamespace;
+    NativeBridgeGetExportedNamespace;
+    NativeBridgeLinkNamespaces;
+    NativeBridgeLoadLibraryExt;
+    NativeBridgeInitAnonymousNamespace;
+    NativeBridgeInitialized;
+    NativeBridgeGetTrampoline;
+    LoadNativeBridge;
+    PreInitializeNativeBridge;
+    PreZygoteForkNativeBridge;
+    InitializeNativeBridge;
+    NativeBridgeGetVersion;
+    NativeBridgeGetSignalHandler;
+    UnloadNativeBridge;
+    NativeBridgeAvailable;
+    NeedsNativeBridge;
+    NativeBridgeError;
+    NativeBridgeNameAcceptable;
+  local:
+    *;
+};
diff --git a/libnativebridge/native_bridge.cc b/libnativebridge/native_bridge.cc
new file mode 100644
index 0000000..b24d14a
--- /dev/null
+++ b/libnativebridge/native_bridge.cc
@@ -0,0 +1,662 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "nativebridge"
+
+#include "nativebridge/native_bridge.h"
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <cstring>
+
+#include <android-base/macros.h>
+#include <log/log.h>
+
+namespace android {
+
+#ifdef __APPLE__
+template <typename T>
+void UNUSED(const T&) {}
+#endif
+
+extern "C" {
+
+// Environment values required by the apps running with native bridge.
+struct NativeBridgeRuntimeValues {
+    const char* os_arch;
+    const char* cpu_abi;
+    const char* cpu_abi2;
+    const char* *supported_abis;
+    int32_t abi_count;
+};
+
+// The symbol name exposed by native-bridge with the type of NativeBridgeCallbacks.
+static constexpr const char* kNativeBridgeInterfaceSymbol = "NativeBridgeItf";
+
+enum class NativeBridgeState {
+  kNotSetup,                        // Initial state.
+  kOpened,                          // After successful dlopen.
+  kPreInitialized,                  // After successful pre-initialization.
+  kInitialized,                     // After successful initialization.
+  kClosed                           // Closed or errors.
+};
+
+static constexpr const char* kNotSetupString = "kNotSetup";
+static constexpr const char* kOpenedString = "kOpened";
+static constexpr const char* kPreInitializedString = "kPreInitialized";
+static constexpr const char* kInitializedString = "kInitialized";
+static constexpr const char* kClosedString = "kClosed";
+
+static const char* GetNativeBridgeStateString(NativeBridgeState state) {
+  switch (state) {
+    case NativeBridgeState::kNotSetup:
+      return kNotSetupString;
+
+    case NativeBridgeState::kOpened:
+      return kOpenedString;
+
+    case NativeBridgeState::kPreInitialized:
+      return kPreInitializedString;
+
+    case NativeBridgeState::kInitialized:
+      return kInitializedString;
+
+    case NativeBridgeState::kClosed:
+      return kClosedString;
+  }
+}
+
+// Current state of the native bridge.
+static NativeBridgeState state = NativeBridgeState::kNotSetup;
+
+// The version of NativeBridge implementation.
+// Different Nativebridge interface needs the service of different version of
+// Nativebridge implementation.
+// Used by isCompatibleWith() which is introduced in v2.
+enum NativeBridgeImplementationVersion {
+  // first version, not used.
+  DEFAULT_VERSION = 1,
+  // The version which signal semantic is introduced.
+  SIGNAL_VERSION = 2,
+  // The version which namespace semantic is introduced.
+  NAMESPACE_VERSION = 3,
+  // The version with vendor namespaces
+  VENDOR_NAMESPACE_VERSION = 4,
+  // The version with runtime namespaces
+  RUNTIME_NAMESPACE_VERSION = 5,
+  // The version with pre-zygote-fork hook to support app-zygotes.
+  PRE_ZYGOTE_FORK_VERSION = 6,
+};
+
+// Whether we had an error at some point.
+static bool had_error = false;
+
+// Handle of the loaded library.
+static void* native_bridge_handle = nullptr;
+// Pointer to the callbacks. Available as soon as LoadNativeBridge succeeds, but only initialized
+// later.
+static const NativeBridgeCallbacks* callbacks = nullptr;
+// Callbacks provided by the environment to the bridge. Passed to LoadNativeBridge.
+static const NativeBridgeRuntimeCallbacks* runtime_callbacks = nullptr;
+
+// The app's code cache directory.
+static char* app_code_cache_dir = nullptr;
+
+// Code cache directory (relative to the application private directory)
+// Ideally we'd like to call into framework to retrieve this name. However that's considered an
+// implementation detail and will require either hacks or consistent refactorings. We compromise
+// and hard code the directory name again here.
+static constexpr const char* kCodeCacheDir = "code_cache";
+
+// Characters allowed in a native bridge filename. The first character must
+// be in [a-zA-Z] (expected 'l' for "libx"). The rest must be in [a-zA-Z0-9._-].
+static bool CharacterAllowed(char c, bool first) {
+  if (first) {
+    return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z');
+  } else {
+    return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') ||
+           (c == '.') || (c == '_') || (c == '-');
+  }
+}
+
+static void ReleaseAppCodeCacheDir() {
+  if (app_code_cache_dir != nullptr) {
+    delete[] app_code_cache_dir;
+    app_code_cache_dir = nullptr;
+  }
+}
+
+// We only allow simple names for the library. It is supposed to be a file in
+// /system/lib or /vendor/lib. Only allow a small range of characters, that is
+// names consisting of [a-zA-Z0-9._-] and starting with [a-zA-Z].
+bool NativeBridgeNameAcceptable(const char* nb_library_filename) {
+  const char* ptr = nb_library_filename;
+  if (*ptr == 0) {
+    // Emptry string. Allowed, means no native bridge.
+    return true;
+  } else {
+    // First character must be [a-zA-Z].
+    if (!CharacterAllowed(*ptr, true))  {
+      // Found an invalid fist character, don't accept.
+      ALOGE("Native bridge library %s has been rejected for first character %c",
+            nb_library_filename,
+            *ptr);
+      return false;
+    } else {
+      // For the rest, be more liberal.
+      ptr++;
+      while (*ptr != 0) {
+        if (!CharacterAllowed(*ptr, false)) {
+          // Found an invalid character, don't accept.
+          ALOGE("Native bridge library %s has been rejected for %c", nb_library_filename, *ptr);
+          return false;
+        }
+        ptr++;
+      }
+    }
+    return true;
+  }
+}
+
+// The policy of invoking Nativebridge changed in v3 with/without namespace.
+// Suggest Nativebridge implementation not maintain backward-compatible.
+static bool isCompatibleWith(const uint32_t version) {
+  // Libnativebridge is now designed to be forward-compatible. So only "0" is an unsupported
+  // version.
+  if (callbacks == nullptr || callbacks->version == 0 || version == 0) {
+    return false;
+  }
+
+  // If this is a v2+ bridge, it may not be forwards- or backwards-compatible. Check.
+  if (callbacks->version >= SIGNAL_VERSION) {
+    return callbacks->isCompatibleWith(version);
+  }
+
+  return true;
+}
+
+static void CloseNativeBridge(bool with_error) {
+  state = NativeBridgeState::kClosed;
+  had_error |= with_error;
+  ReleaseAppCodeCacheDir();
+}
+
+bool LoadNativeBridge(const char* nb_library_filename,
+                      const NativeBridgeRuntimeCallbacks* runtime_cbs) {
+  // We expect only one place that calls LoadNativeBridge: Runtime::Init. At that point we are not
+  // multi-threaded, so we do not need locking here.
+
+  if (state != NativeBridgeState::kNotSetup) {
+    // Setup has been called before. Ignore this call.
+    if (nb_library_filename != nullptr) {  // Avoids some log-spam for dalvikvm.
+      ALOGW("Called LoadNativeBridge for an already set up native bridge. State is %s.",
+            GetNativeBridgeStateString(state));
+    }
+    // Note: counts as an error, even though the bridge may be functional.
+    had_error = true;
+    return false;
+  }
+
+  if (nb_library_filename == nullptr || *nb_library_filename == 0) {
+    CloseNativeBridge(false);
+    return false;
+  } else {
+    if (!NativeBridgeNameAcceptable(nb_library_filename)) {
+      CloseNativeBridge(true);
+    } else {
+      // Try to open the library.
+      void* handle = dlopen(nb_library_filename, RTLD_LAZY);
+      if (handle != nullptr) {
+        callbacks = reinterpret_cast<NativeBridgeCallbacks*>(dlsym(handle,
+                                                                   kNativeBridgeInterfaceSymbol));
+        if (callbacks != nullptr) {
+          if (isCompatibleWith(NAMESPACE_VERSION)) {
+            // Store the handle for later.
+            native_bridge_handle = handle;
+          } else {
+            callbacks = nullptr;
+            dlclose(handle);
+            ALOGW("Unsupported native bridge interface.");
+          }
+        } else {
+          dlclose(handle);
+        }
+      }
+
+      // Two failure conditions: could not find library (dlopen failed), or could not find native
+      // bridge interface (dlsym failed). Both are an error and close the native bridge.
+      if (callbacks == nullptr) {
+        CloseNativeBridge(true);
+      } else {
+        runtime_callbacks = runtime_cbs;
+        state = NativeBridgeState::kOpened;
+      }
+    }
+    return state == NativeBridgeState::kOpened;
+  }
+}
+
+bool NeedsNativeBridge(const char* instruction_set) {
+  if (instruction_set == nullptr) {
+    ALOGE("Null instruction set in NeedsNativeBridge.");
+    return false;
+  }
+  return strncmp(instruction_set, ABI_STRING, strlen(ABI_STRING) + 1) != 0;
+}
+
+bool PreInitializeNativeBridge(const char* app_data_dir_in, const char* instruction_set) {
+  if (state != NativeBridgeState::kOpened) {
+    ALOGE("Invalid state: native bridge is expected to be opened.");
+    CloseNativeBridge(true);
+    return false;
+  }
+
+  if (app_data_dir_in != nullptr) {
+    // Create the path to the application code cache directory.
+    // The memory will be release after Initialization or when the native bridge is closed.
+    const size_t len = strlen(app_data_dir_in) + strlen(kCodeCacheDir) + 2;  // '\0' + '/'
+    app_code_cache_dir = new char[len];
+    snprintf(app_code_cache_dir, len, "%s/%s", app_data_dir_in, kCodeCacheDir);
+  } else {
+    ALOGW("Application private directory isn't available.");
+    app_code_cache_dir = nullptr;
+  }
+
+  // Bind-mount /system/lib{,64}/<isa>/cpuinfo to /proc/cpuinfo.
+  // Failure is not fatal and will keep the native bridge in kPreInitialized.
+  state = NativeBridgeState::kPreInitialized;
+
+#ifndef __APPLE__
+  if (instruction_set == nullptr) {
+    return true;
+  }
+  size_t isa_len = strlen(instruction_set);
+  if (isa_len > 10) {
+    // 10 is a loose upper bound on the currently known instruction sets (a tight bound is 7 for
+    // x86_64 [including the trailing \0]). This is so we don't have to change here if there will
+    // be another instruction set in the future.
+    ALOGW("Instruction set %s is malformed, must be less than or equal to 10 characters.",
+          instruction_set);
+    return true;
+  }
+
+  // If the file does not exist, the mount command will fail,
+  // so we save the extra file existence check.
+  char cpuinfo_path[1024];
+
+#if defined(__ANDROID__)
+  snprintf(cpuinfo_path, sizeof(cpuinfo_path), "/system/lib"
+#ifdef __LP64__
+      "64"
+#endif  // __LP64__
+      "/%s/cpuinfo", instruction_set);
+#else   // !__ANDROID__
+  // To be able to test on the host, we hardwire a relative path.
+  snprintf(cpuinfo_path, sizeof(cpuinfo_path), "./cpuinfo");
+#endif
+
+  // Bind-mount.
+  if (TEMP_FAILURE_RETRY(mount(cpuinfo_path,        // Source.
+                               "/proc/cpuinfo",     // Target.
+                               nullptr,             // FS type.
+                               MS_BIND,             // Mount flags: bind mount.
+                               nullptr)) == -1) {   // "Data."
+    ALOGW("Failed to bind-mount %s as /proc/cpuinfo: %s", cpuinfo_path, strerror(errno));
+  }
+#else  // __APPLE__
+  UNUSED(instruction_set);
+  ALOGW("Mac OS does not support bind-mounting. Host simulation of native bridge impossible.");
+#endif
+
+  return true;
+}
+
+void PreZygoteForkNativeBridge() {
+  if (NativeBridgeInitialized()) {
+    if (isCompatibleWith(PRE_ZYGOTE_FORK_VERSION)) {
+      return callbacks->preZygoteFork();
+    } else {
+      ALOGE("not compatible with version %d, preZygoteFork() isn't invoked",
+            PRE_ZYGOTE_FORK_VERSION);
+    }
+  }
+}
+
+static void SetCpuAbi(JNIEnv* env, jclass build_class, const char* field, const char* value) {
+  if (value != nullptr) {
+    jfieldID field_id = env->GetStaticFieldID(build_class, field, "Ljava/lang/String;");
+    if (field_id == nullptr) {
+      env->ExceptionClear();
+      ALOGW("Could not find %s field.", field);
+      return;
+    }
+
+    jstring str = env->NewStringUTF(value);
+    if (str == nullptr) {
+      env->ExceptionClear();
+      ALOGW("Could not create string %s.", value);
+      return;
+    }
+
+    env->SetStaticObjectField(build_class, field_id, str);
+  }
+}
+
+// Set up the environment for the bridged app.
+static void SetupEnvironment(const NativeBridgeCallbacks* cbs, JNIEnv* env, const char* isa) {
+  // Need a JNIEnv* to do anything.
+  if (env == nullptr) {
+    ALOGW("No JNIEnv* to set up app environment.");
+    return;
+  }
+
+  // Query the bridge for environment values.
+  const struct NativeBridgeRuntimeValues* env_values = cbs->getAppEnv(isa);
+  if (env_values == nullptr) {
+    return;
+  }
+
+  // Keep the JNIEnv clean.
+  jint success = env->PushLocalFrame(16);  // That should be small and large enough.
+  if (success < 0) {
+    // Out of memory, really borked.
+    ALOGW("Out of memory while setting up app environment.");
+    env->ExceptionClear();
+    return;
+  }
+
+  // Reset CPU_ABI & CPU_ABI2 to values required by the apps running with native bridge.
+  if (env_values->cpu_abi != nullptr || env_values->cpu_abi2 != nullptr ||
+      env_values->abi_count >= 0) {
+    jclass bclass_id = env->FindClass("android/os/Build");
+    if (bclass_id != nullptr) {
+      SetCpuAbi(env, bclass_id, "CPU_ABI", env_values->cpu_abi);
+      SetCpuAbi(env, bclass_id, "CPU_ABI2", env_values->cpu_abi2);
+    } else {
+      // For example in a host test environment.
+      env->ExceptionClear();
+      ALOGW("Could not find Build class.");
+    }
+  }
+
+  if (env_values->os_arch != nullptr) {
+    jclass sclass_id = env->FindClass("java/lang/System");
+    if (sclass_id != nullptr) {
+      jmethodID set_prop_id = env->GetStaticMethodID(sclass_id, "setUnchangeableSystemProperty",
+          "(Ljava/lang/String;Ljava/lang/String;)V");
+      if (set_prop_id != nullptr) {
+        // Init os.arch to the value reqired by the apps running with native bridge.
+        env->CallStaticVoidMethod(sclass_id, set_prop_id, env->NewStringUTF("os.arch"),
+            env->NewStringUTF(env_values->os_arch));
+      } else {
+        env->ExceptionClear();
+        ALOGW("Could not find System#setUnchangeableSystemProperty.");
+      }
+    } else {
+      env->ExceptionClear();
+      ALOGW("Could not find System class.");
+    }
+  }
+
+  // Make it pristine again.
+  env->PopLocalFrame(nullptr);
+}
+
+bool InitializeNativeBridge(JNIEnv* env, const char* instruction_set) {
+  // We expect only one place that calls InitializeNativeBridge: Runtime::DidForkFromZygote. At that
+  // point we are not multi-threaded, so we do not need locking here.
+
+  if (state == NativeBridgeState::kPreInitialized) {
+    if (app_code_cache_dir != nullptr) {
+      // Check for code cache: if it doesn't exist try to create it.
+      struct stat st;
+      if (stat(app_code_cache_dir, &st) == -1) {
+        if (errno == ENOENT) {
+          if (mkdir(app_code_cache_dir, S_IRWXU | S_IRWXG | S_IXOTH) == -1) {
+            ALOGW("Cannot create code cache directory %s: %s.",
+                  app_code_cache_dir, strerror(errno));
+            ReleaseAppCodeCacheDir();
+          }
+        } else {
+          ALOGW("Cannot stat code cache directory %s: %s.",
+                app_code_cache_dir, strerror(errno));
+          ReleaseAppCodeCacheDir();
+        }
+      } else if (!S_ISDIR(st.st_mode)) {
+        ALOGW("Code cache is not a directory %s.", app_code_cache_dir);
+        ReleaseAppCodeCacheDir();
+      }
+    }
+
+    // If we're still PreInitialized (didn't fail the code cache checks) try to initialize.
+    if (state == NativeBridgeState::kPreInitialized) {
+      if (callbacks->initialize(runtime_callbacks, app_code_cache_dir, instruction_set)) {
+        SetupEnvironment(callbacks, env, instruction_set);
+        state = NativeBridgeState::kInitialized;
+        // We no longer need the code cache path, release the memory.
+        ReleaseAppCodeCacheDir();
+      } else {
+        // Unload the library.
+        dlclose(native_bridge_handle);
+        CloseNativeBridge(true);
+      }
+    }
+  } else {
+    CloseNativeBridge(true);
+  }
+
+  return state == NativeBridgeState::kInitialized;
+}
+
+void UnloadNativeBridge() {
+  // We expect only one place that calls UnloadNativeBridge: Runtime::DidForkFromZygote. At that
+  // point we are not multi-threaded, so we do not need locking here.
+
+  switch (state) {
+    case NativeBridgeState::kOpened:
+    case NativeBridgeState::kPreInitialized:
+    case NativeBridgeState::kInitialized:
+      // Unload.
+      dlclose(native_bridge_handle);
+      CloseNativeBridge(false);
+      break;
+
+    case NativeBridgeState::kNotSetup:
+      // Not even set up. Error.
+      CloseNativeBridge(true);
+      break;
+
+    case NativeBridgeState::kClosed:
+      // Ignore.
+      break;
+  }
+}
+
+bool NativeBridgeError() {
+  return had_error;
+}
+
+bool NativeBridgeAvailable() {
+  return state == NativeBridgeState::kOpened
+      || state == NativeBridgeState::kPreInitialized
+      || state == NativeBridgeState::kInitialized;
+}
+
+bool NativeBridgeInitialized() {
+  // Calls of this are supposed to happen in a state where the native bridge is stable, i.e., after
+  // Runtime::DidForkFromZygote. In that case we do not need a lock.
+  return state == NativeBridgeState::kInitialized;
+}
+
+void* NativeBridgeLoadLibrary(const char* libpath, int flag) {
+  if (NativeBridgeInitialized()) {
+    return callbacks->loadLibrary(libpath, flag);
+  }
+  return nullptr;
+}
+
+void* NativeBridgeGetTrampoline(void* handle, const char* name, const char* shorty,
+                                uint32_t len) {
+  if (NativeBridgeInitialized()) {
+    return callbacks->getTrampoline(handle, name, shorty, len);
+  }
+  return nullptr;
+}
+
+bool NativeBridgeIsSupported(const char* libpath) {
+  if (NativeBridgeInitialized()) {
+    return callbacks->isSupported(libpath);
+  }
+  return false;
+}
+
+uint32_t NativeBridgeGetVersion() {
+  if (NativeBridgeAvailable()) {
+    return callbacks->version;
+  }
+  return 0;
+}
+
+NativeBridgeSignalHandlerFn NativeBridgeGetSignalHandler(int signal) {
+  if (NativeBridgeInitialized()) {
+    if (isCompatibleWith(SIGNAL_VERSION)) {
+      return callbacks->getSignalHandler(signal);
+    } else {
+      ALOGE("not compatible with version %d, cannot get signal handler", SIGNAL_VERSION);
+    }
+  }
+  return nullptr;
+}
+
+int NativeBridgeUnloadLibrary(void* handle) {
+  if (NativeBridgeInitialized()) {
+    if (isCompatibleWith(NAMESPACE_VERSION)) {
+      return callbacks->unloadLibrary(handle);
+    } else {
+      ALOGE("not compatible with version %d, cannot unload library", NAMESPACE_VERSION);
+    }
+  }
+  return -1;
+}
+
+const char* NativeBridgeGetError() {
+  if (NativeBridgeInitialized()) {
+    if (isCompatibleWith(NAMESPACE_VERSION)) {
+      return callbacks->getError();
+    } else {
+      return "native bridge implementation is not compatible with version 3, cannot get message";
+    }
+  }
+  return "native bridge is not initialized";
+}
+
+bool NativeBridgeIsPathSupported(const char* path) {
+  if (NativeBridgeInitialized()) {
+    if (isCompatibleWith(NAMESPACE_VERSION)) {
+      return callbacks->isPathSupported(path);
+    } else {
+      ALOGE("not compatible with version %d, cannot check via library path", NAMESPACE_VERSION);
+    }
+  }
+  return false;
+}
+
+bool NativeBridgeInitAnonymousNamespace(const char* public_ns_sonames,
+                                        const char* anon_ns_library_path) {
+  if (NativeBridgeInitialized()) {
+    if (isCompatibleWith(NAMESPACE_VERSION)) {
+      return callbacks->initAnonymousNamespace(public_ns_sonames, anon_ns_library_path);
+    } else {
+      ALOGE("not compatible with version %d, cannot init namespace", NAMESPACE_VERSION);
+    }
+  }
+
+  return false;
+}
+
+native_bridge_namespace_t* NativeBridgeCreateNamespace(const char* name,
+                                                       const char* ld_library_path,
+                                                       const char* default_library_path,
+                                                       uint64_t type,
+                                                       const char* permitted_when_isolated_path,
+                                                       native_bridge_namespace_t* parent_ns) {
+  if (NativeBridgeInitialized()) {
+    if (isCompatibleWith(NAMESPACE_VERSION)) {
+      return callbacks->createNamespace(name,
+                                        ld_library_path,
+                                        default_library_path,
+                                        type,
+                                        permitted_when_isolated_path,
+                                        parent_ns);
+    } else {
+      ALOGE("not compatible with version %d, cannot create namespace %s", NAMESPACE_VERSION, name);
+    }
+  }
+
+  return nullptr;
+}
+
+bool NativeBridgeLinkNamespaces(native_bridge_namespace_t* from, native_bridge_namespace_t* to,
+                                const char* shared_libs_sonames) {
+  if (NativeBridgeInitialized()) {
+    if (isCompatibleWith(NAMESPACE_VERSION)) {
+      return callbacks->linkNamespaces(from, to, shared_libs_sonames);
+    } else {
+      ALOGE("not compatible with version %d, cannot init namespace", NAMESPACE_VERSION);
+    }
+  }
+
+  return false;
+}
+
+native_bridge_namespace_t* NativeBridgeGetExportedNamespace(const char* name) {
+  if (!NativeBridgeInitialized()) {
+    return nullptr;
+  }
+
+  if (isCompatibleWith(RUNTIME_NAMESPACE_VERSION)) {
+    return callbacks->getExportedNamespace(name);
+  }
+
+  // sphal is vendor namespace name -> use v4 callback in the case NB callbacks
+  // are not compatible with v5
+  if (isCompatibleWith(VENDOR_NAMESPACE_VERSION) && name != nullptr && strcmp("sphal", name) == 0) {
+    return callbacks->getVendorNamespace();
+  }
+
+  return nullptr;
+}
+
+void* NativeBridgeLoadLibraryExt(const char* libpath, int flag, native_bridge_namespace_t* ns) {
+  if (NativeBridgeInitialized()) {
+    if (isCompatibleWith(NAMESPACE_VERSION)) {
+      return callbacks->loadLibraryExt(libpath, flag, ns);
+    } else {
+      ALOGE("not compatible with version %d, cannot load library in namespace", NAMESPACE_VERSION);
+    }
+  }
+  return nullptr;
+}
+
+}  // extern "C"
+
+}  // namespace android
diff --git a/libnativebridge/native_bridge_lazy.cc b/libnativebridge/native_bridge_lazy.cc
new file mode 100644
index 0000000..edc7848
--- /dev/null
+++ b/libnativebridge/native_bridge_lazy.cc
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "nativebridge/native_bridge.h"
+#define LOG_TAG "nativebridge"
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <string.h>
+
+#include <log/log.h>
+
+namespace android {
+
+namespace {
+
+void* GetLibHandle() {
+  static void* handle = dlopen("libnativebridge.so", RTLD_NOW);
+  LOG_FATAL_IF(handle == nullptr, "Failed to load libnativebridge.so: %s", dlerror());
+  return handle;
+}
+
+template <typename FuncPtr>
+FuncPtr GetFuncPtr(const char* function_name) {
+  auto f = reinterpret_cast<FuncPtr>(dlsym(GetLibHandle(), function_name));
+  LOG_FATAL_IF(f == nullptr, "Failed to get address of %s: %s", function_name, dlerror());
+  return f;
+}
+
+#define GET_FUNC_PTR(name) GetFuncPtr<decltype(&name)>(#name)
+
+}  // namespace
+
+bool LoadNativeBridge(const char* native_bridge_library_filename,
+                      const struct NativeBridgeRuntimeCallbacks* runtime_callbacks) {
+  static auto f = GET_FUNC_PTR(LoadNativeBridge);
+  return f(native_bridge_library_filename, runtime_callbacks);
+}
+
+bool NeedsNativeBridge(const char* instruction_set) {
+  static auto f = GET_FUNC_PTR(NeedsNativeBridge);
+  return f(instruction_set);
+}
+
+bool PreInitializeNativeBridge(const char* app_data_dir, const char* instruction_set) {
+  static auto f = GET_FUNC_PTR(PreInitializeNativeBridge);
+  return f(app_data_dir, instruction_set);
+}
+
+void PreZygoteForkNativeBridge() {
+  static auto f = GET_FUNC_PTR(PreZygoteForkNativeBridge);
+  return f();
+}
+
+bool InitializeNativeBridge(JNIEnv* env, const char* instruction_set) {
+  static auto f = GET_FUNC_PTR(InitializeNativeBridge);
+  return f(env, instruction_set);
+}
+
+void UnloadNativeBridge() {
+  static auto f = GET_FUNC_PTR(UnloadNativeBridge);
+  return f();
+}
+
+bool NativeBridgeAvailable() {
+  static auto f = GET_FUNC_PTR(NativeBridgeAvailable);
+  return f();
+}
+
+bool NativeBridgeInitialized() {
+  static auto f = GET_FUNC_PTR(NativeBridgeInitialized);
+  return f();
+}
+
+void* NativeBridgeLoadLibrary(const char* libpath, int flag) {
+  static auto f = GET_FUNC_PTR(NativeBridgeLoadLibrary);
+  return f(libpath, flag);
+}
+
+void* NativeBridgeGetTrampoline(void* handle, const char* name, const char* shorty, uint32_t len) {
+  static auto f = GET_FUNC_PTR(NativeBridgeGetTrampoline);
+  return f(handle, name, shorty, len);
+}
+
+bool NativeBridgeIsSupported(const char* libpath) {
+  static auto f = GET_FUNC_PTR(NativeBridgeIsSupported);
+  return f(libpath);
+}
+
+uint32_t NativeBridgeGetVersion() {
+  static auto f = GET_FUNC_PTR(NativeBridgeGetVersion);
+  return f();
+}
+
+NativeBridgeSignalHandlerFn NativeBridgeGetSignalHandler(int signal) {
+  static auto f = GET_FUNC_PTR(NativeBridgeGetSignalHandler);
+  return f(signal);
+}
+
+bool NativeBridgeError() {
+  static auto f = GET_FUNC_PTR(NativeBridgeError);
+  return f();
+}
+
+bool NativeBridgeNameAcceptable(const char* native_bridge_library_filename) {
+  static auto f = GET_FUNC_PTR(NativeBridgeNameAcceptable);
+  return f(native_bridge_library_filename);
+}
+
+int NativeBridgeUnloadLibrary(void* handle) {
+  static auto f = GET_FUNC_PTR(NativeBridgeUnloadLibrary);
+  return f(handle);
+}
+
+const char* NativeBridgeGetError() {
+  static auto f = GET_FUNC_PTR(NativeBridgeGetError);
+  return f();
+}
+
+bool NativeBridgeIsPathSupported(const char* path) {
+  static auto f = GET_FUNC_PTR(NativeBridgeIsPathSupported);
+  return f(path);
+}
+
+bool NativeBridgeInitAnonymousNamespace(const char* public_ns_sonames,
+                                        const char* anon_ns_library_path) {
+  static auto f = GET_FUNC_PTR(NativeBridgeInitAnonymousNamespace);
+  return f(public_ns_sonames, anon_ns_library_path);
+}
+
+struct native_bridge_namespace_t* NativeBridgeCreateNamespace(
+    const char* name, const char* ld_library_path, const char* default_library_path, uint64_t type,
+    const char* permitted_when_isolated_path, struct native_bridge_namespace_t* parent_ns) {
+  static auto f = GET_FUNC_PTR(NativeBridgeCreateNamespace);
+  return f(name, ld_library_path, default_library_path, type, permitted_when_isolated_path,
+           parent_ns);
+}
+
+bool NativeBridgeLinkNamespaces(struct native_bridge_namespace_t* from,
+                                struct native_bridge_namespace_t* to,
+                                const char* shared_libs_sonames) {
+  static auto f = GET_FUNC_PTR(NativeBridgeLinkNamespaces);
+  return f(from, to, shared_libs_sonames);
+}
+
+void* NativeBridgeLoadLibraryExt(const char* libpath, int flag,
+                                 struct native_bridge_namespace_t* ns) {
+  static auto f = GET_FUNC_PTR(NativeBridgeLoadLibraryExt);
+  return f(libpath, flag, ns);
+}
+
+struct native_bridge_namespace_t* NativeBridgeGetVendorNamespace() {
+  static auto f = GET_FUNC_PTR(NativeBridgeGetVendorNamespace);
+  return f();
+}
+
+#undef GET_FUNC_PTR
+
+}  // namespace android
diff --git a/libnativebridge/tests/Android.bp b/libnativebridge/tests/Android.bp
new file mode 100644
index 0000000..4ccf35e
--- /dev/null
+++ b/libnativebridge/tests/Android.bp
@@ -0,0 +1,163 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_defaults {
+    name: "libnativebridge-dummy-defaults",
+    defaults: [
+        "art_defaults",
+        "art_test_defaults",
+    ],
+    // TODO(mast): Split up art_gtest_defaults so that it can be used for the
+    // following without pulling in lots of libs.
+    target: {
+        linux: {
+            cflags: [
+                // gtest issue
+                "-Wno-used-but-marked-unused",
+                "-Wno-deprecated",
+                "-Wno-missing-noreturn",
+            ],
+        },
+    },
+    header_libs: ["libnativebridge-headers"],
+    cppflags: ["-fvisibility=protected"],
+}
+
+cc_library_shared {
+    name: "libnativebridge-dummy",
+    srcs: ["DummyNativeBridge.cpp"],
+    defaults: ["libnativebridge-dummy-defaults"],
+}
+
+cc_library_shared {
+    name: "libnativebridge2-dummy",
+    srcs: ["DummyNativeBridge2.cpp"],
+    defaults: ["libnativebridge-dummy-defaults"],
+}
+
+cc_library_shared {
+    name: "libnativebridge3-dummy",
+    srcs: ["DummyNativeBridge3.cpp"],
+    defaults: ["libnativebridge-dummy-defaults"],
+}
+
+cc_library_shared {
+    name: "libnativebridge6-dummy",
+    srcs: ["DummyNativeBridge6.cpp"],
+    defaults: ["libnativebridge-dummy-defaults"],
+    shared_libs: [
+        "libnativebridge6prezygotefork",
+    ],
+}
+
+// A helper library to produce dummy side effect of PreZygoteForkNativeBridge.
+cc_library_shared {
+    name: "libnativebridge6prezygotefork",
+    srcs: ["NativeBridge6PreZygoteFork_lib.cpp"],
+    defaults: ["libnativebridge-dummy-defaults"],
+}
+
+// Build the unit tests.
+cc_defaults {
+    name: "libnativebridge-tests-defaults",
+    defaults: [
+        "art_defaults",
+        "art_test_defaults",
+    ],
+    test_per_src: true,
+    // TODO(mast): Split up art_gtest_defaults so that it can be used for the
+    // following without pulling in lots of libs.
+    target: {
+        linux: {
+            cflags: [
+                // gtest issue
+                "-Wno-used-but-marked-unused",
+                "-Wno-deprecated",
+                "-Wno-missing-noreturn",
+            ],
+        },
+    },
+
+    srcs: [
+        "CodeCacheCreate_test.cpp",
+        "CodeCacheExists_test.cpp",
+        "CodeCacheStatFail_test.cpp",
+        "CompleteFlow_test.cpp",
+        "InvalidCharsNativeBridge_test.cpp",
+        "NativeBridge2Signal_test.cpp",
+        "NativeBridgeVersion_test.cpp",
+        "NeedsNativeBridge_test.cpp",
+        "PreInitializeNativeBridge_test.cpp",
+        "PreInitializeNativeBridgeFail1_test.cpp",
+        "PreInitializeNativeBridgeFail2_test.cpp",
+        "ReSetupNativeBridge_test.cpp",
+        "UnavailableNativeBridge_test.cpp",
+        "ValidNameNativeBridge_test.cpp",
+        "NativeBridge3UnloadLibrary_test.cpp",
+        "NativeBridge3GetError_test.cpp",
+        "NativeBridge3IsPathSupported_test.cpp",
+        "NativeBridge3InitAnonymousNamespace_test.cpp",
+        "NativeBridge3CreateNamespace_test.cpp",
+        "NativeBridge3LoadLibraryExt_test.cpp",
+        "NativeBridge6PreZygoteFork_test.cpp",
+    ],
+
+    shared_libs: [
+        "liblog",
+        "libnativebridge-dummy",
+        "libnativebridge6prezygotefork",
+    ],
+    header_libs: ["libbase_headers"],
+}
+
+cc_test {
+    name: "libnativebridge-tests",
+    defaults: ["libnativebridge-tests-defaults"],
+    shared_libs: ["libnativebridge"],
+}
+
+cc_test {
+    name: "libnativebridge-lazy-tests",
+    defaults: ["libnativebridge-tests-defaults"],
+    host_supported: false,
+    shared_libs: ["libnativebridge_lazy"],
+}
+
+// Build the test for the C API.
+cc_test {
+    name: "libnativebridge-api-tests",
+    defaults: [
+        "art_defaults",
+        "art_test_defaults",
+    ],
+    test_per_src: true,
+    // TODO(mast): Split up art_gtest_defaults so that it can be used for the
+    // following without pulling in lots of libs.
+    target: {
+        linux: {
+            cflags: [
+                // gtest issue
+                "-Wno-used-but-marked-unused",
+                "-Wno-deprecated",
+                "-Wno-missing-noreturn",
+            ],
+        },
+    },
+    srcs: [
+        "NativeBridgeApi.c",
+    ],
+    header_libs: ["libnativebridge-headers"],
+}
diff --git a/libnativebridge/tests/CodeCacheCreate_test.cpp b/libnativebridge/tests/CodeCacheCreate_test.cpp
new file mode 100644
index 0000000..58270c4
--- /dev/null
+++ b/libnativebridge/tests/CodeCacheCreate_test.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NativeBridgeTest.h"
+
+#include <errno.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+namespace android {
+
+// Tests that the bridge initialization creates the code_cache if it doesn't
+// exists.
+TEST_F(NativeBridgeTest, CodeCacheCreate) {
+    // Make sure that code_cache does not exists
+    struct stat st;
+    ASSERT_EQ(-1, stat(kCodeCache, &st));
+    ASSERT_EQ(ENOENT, errno);
+
+    // Init
+    ASSERT_TRUE(LoadNativeBridge(kNativeBridgeLibrary, nullptr));
+    ASSERT_TRUE(PreInitializeNativeBridge(".", "isa"));
+    ASSERT_TRUE(InitializeNativeBridge(nullptr, nullptr));
+    ASSERT_TRUE(NativeBridgeAvailable());
+    ASSERT_FALSE(NativeBridgeError());
+
+    // Check that code_cache was created
+    ASSERT_EQ(0, stat(kCodeCache, &st));
+    ASSERT_TRUE(S_ISDIR(st.st_mode));
+
+    // Clean up
+    UnloadNativeBridge();
+    ASSERT_EQ(0, rmdir(kCodeCache));
+
+    ASSERT_FALSE(NativeBridgeError());
+}
+
+}  // namespace android
diff --git a/libnativebridge/tests/CodeCacheExists_test.cpp b/libnativebridge/tests/CodeCacheExists_test.cpp
new file mode 100644
index 0000000..8ba0158
--- /dev/null
+++ b/libnativebridge/tests/CodeCacheExists_test.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NativeBridgeTest.h"
+
+#include <errno.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+namespace android {
+
+// Tests that the bridge is initialized without errors if the code_cache already
+// exists.
+TEST_F(NativeBridgeTest, CodeCacheExists) {
+    // Make sure that code_cache does not exists
+    struct stat st;
+    ASSERT_EQ(-1, stat(kCodeCache, &st));
+    ASSERT_EQ(ENOENT, errno);
+
+    // Create the code_cache
+    ASSERT_EQ(0, mkdir(kCodeCache, S_IRWXU | S_IRWXG | S_IXOTH));
+
+    // Init
+    ASSERT_TRUE(LoadNativeBridge(kNativeBridgeLibrary, nullptr));
+    ASSERT_TRUE(PreInitializeNativeBridge(".", "isa"));
+    ASSERT_TRUE(InitializeNativeBridge(nullptr, nullptr));
+    ASSERT_TRUE(NativeBridgeAvailable());
+    ASSERT_FALSE(NativeBridgeError());
+
+    // Check that the code cache is still there
+    ASSERT_EQ(0, stat(kCodeCache, &st));
+    ASSERT_TRUE(S_ISDIR(st.st_mode));
+
+    // Clean up
+    UnloadNativeBridge();
+    ASSERT_EQ(0, rmdir(kCodeCache));
+
+    ASSERT_FALSE(NativeBridgeError());
+}
+
+}  // namespace android
diff --git a/libnativebridge/tests/CodeCacheStatFail_test.cpp b/libnativebridge/tests/CodeCacheStatFail_test.cpp
new file mode 100644
index 0000000..4ea519e
--- /dev/null
+++ b/libnativebridge/tests/CodeCacheStatFail_test.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NativeBridgeTest.h"
+
+#include <errno.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+namespace android {
+
+// Tests that the bridge is initialized without errors if the code_cache is
+// existed as a file.
+TEST_F(NativeBridgeTest, CodeCacheStatFail) {
+    int fd = creat(kCodeCache, O_RDWR);
+    ASSERT_NE(-1, fd);
+    close(fd);
+
+    struct stat st;
+    ASSERT_EQ(-1, stat(kCodeCacheStatFail, &st));
+    ASSERT_EQ(ENOTDIR, errno);
+
+    // Init
+    ASSERT_TRUE(LoadNativeBridge(kNativeBridgeLibrary, nullptr));
+    ASSERT_TRUE(PreInitializeNativeBridge(kCodeCacheStatFail, "isa"));
+    ASSERT_TRUE(InitializeNativeBridge(nullptr, nullptr));
+    ASSERT_TRUE(NativeBridgeAvailable());
+    ASSERT_FALSE(NativeBridgeError());
+
+    // Clean up
+    UnloadNativeBridge();
+
+    ASSERT_FALSE(NativeBridgeError());
+    unlink(kCodeCache);
+}
+
+}  // namespace android
diff --git a/libnativebridge/tests/CompleteFlow_test.cpp b/libnativebridge/tests/CompleteFlow_test.cpp
new file mode 100644
index 0000000..b033792
--- /dev/null
+++ b/libnativebridge/tests/CompleteFlow_test.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NativeBridgeTest.h"
+
+#include <unistd.h>
+
+namespace android {
+
+TEST_F(NativeBridgeTest, CompleteFlow) {
+    // Init
+    ASSERT_TRUE(LoadNativeBridge(kNativeBridgeLibrary, nullptr));
+    ASSERT_TRUE(NativeBridgeAvailable());
+    ASSERT_TRUE(PreInitializeNativeBridge(".", "isa"));
+    ASSERT_TRUE(NativeBridgeAvailable());
+    ASSERT_TRUE(InitializeNativeBridge(nullptr, nullptr));
+    ASSERT_TRUE(NativeBridgeAvailable());
+
+    // Basic calls to check that nothing crashes
+    ASSERT_FALSE(NativeBridgeIsSupported(nullptr));
+    ASSERT_EQ(nullptr, NativeBridgeLoadLibrary(nullptr, 0));
+    ASSERT_EQ(nullptr, NativeBridgeGetTrampoline(nullptr, nullptr, nullptr, 0));
+
+    // Unload
+    UnloadNativeBridge();
+
+    ASSERT_FALSE(NativeBridgeAvailable());
+    ASSERT_FALSE(NativeBridgeError());
+
+    // Clean-up code_cache
+    ASSERT_EQ(0, rmdir(kCodeCache));
+}
+
+}  // namespace android
diff --git a/libnativebridge/tests/DummyNativeBridge.cpp b/libnativebridge/tests/DummyNativeBridge.cpp
new file mode 100644
index 0000000..b9894f6
--- /dev/null
+++ b/libnativebridge/tests/DummyNativeBridge.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// A dummy implementation of the native-bridge interface.
+
+#include "nativebridge/native_bridge.h"
+
+// NativeBridgeCallbacks implementations
+extern "C" bool native_bridge_initialize(const android::NativeBridgeRuntimeCallbacks* /* art_cbs */,
+                                         const char* /* app_code_cache_dir */,
+                                         const char* /* isa */) {
+  return true;
+}
+
+extern "C" void* native_bridge_loadLibrary(const char* /* libpath */, int /* flag */) {
+  return nullptr;
+}
+
+extern "C" void* native_bridge_getTrampoline(void* /* handle */, const char* /* name */,
+                                             const char* /* shorty */, uint32_t /* len */) {
+  return nullptr;
+}
+
+extern "C" bool native_bridge_isSupported(const char* /* libpath */) {
+  return false;
+}
+
+extern "C" const struct android::NativeBridgeRuntimeValues* native_bridge_getAppEnv(
+    const char* /* abi */) {
+  return nullptr;
+}
+
+android::NativeBridgeCallbacks NativeBridgeItf {
+  .version = 1,
+  .initialize = &native_bridge_initialize,
+  .loadLibrary = &native_bridge_loadLibrary,
+  .getTrampoline = &native_bridge_getTrampoline,
+  .isSupported = &native_bridge_isSupported,
+  .getAppEnv = &native_bridge_getAppEnv
+};
diff --git a/libnativebridge/tests/DummyNativeBridge2.cpp b/libnativebridge/tests/DummyNativeBridge2.cpp
new file mode 100644
index 0000000..6920c74
--- /dev/null
+++ b/libnativebridge/tests/DummyNativeBridge2.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// A dummy implementation of the native-bridge interface.
+
+#include "nativebridge/native_bridge.h"
+
+#include <signal.h>
+
+// NativeBridgeCallbacks implementations
+extern "C" bool native_bridge2_initialize(const android::NativeBridgeRuntimeCallbacks* /* art_cbs */,
+                                         const char* /* app_code_cache_dir */,
+                                         const char* /* isa */) {
+  return true;
+}
+
+extern "C" void* native_bridge2_loadLibrary(const char* /* libpath */, int /* flag */) {
+  return nullptr;
+}
+
+extern "C" void* native_bridge2_getTrampoline(void* /* handle */, const char* /* name */,
+                                             const char* /* shorty */, uint32_t /* len */) {
+  return nullptr;
+}
+
+extern "C" bool native_bridge2_isSupported(const char* /* libpath */) {
+  return false;
+}
+
+extern "C" const struct android::NativeBridgeRuntimeValues* native_bridge2_getAppEnv(
+    const char* /* abi */) {
+  return nullptr;
+}
+
+extern "C" bool native_bridge2_is_compatible_compatible_with(uint32_t version) {
+  // For testing, allow 1 and 2, but disallow 3+.
+  return version <= 2;
+}
+
+static bool native_bridge2_dummy_signal_handler(int, siginfo_t*, void*) {
+  // TODO: Implement something here. We'd either have to have a death test with a log here, or
+  //       we'd have to be able to resume after the faulting instruction...
+  return true;
+}
+
+extern "C" android::NativeBridgeSignalHandlerFn native_bridge2_get_signal_handler(int signal) {
+  if (signal == SIGSEGV) {
+    return &native_bridge2_dummy_signal_handler;
+  }
+  return nullptr;
+}
+
+android::NativeBridgeCallbacks NativeBridgeItf {
+  .version = 2,
+  .initialize = &native_bridge2_initialize,
+  .loadLibrary = &native_bridge2_loadLibrary,
+  .getTrampoline = &native_bridge2_getTrampoline,
+  .isSupported = &native_bridge2_isSupported,
+  .getAppEnv = &native_bridge2_getAppEnv,
+  .isCompatibleWith = &native_bridge2_is_compatible_compatible_with,
+  .getSignalHandler = &native_bridge2_get_signal_handler
+};
+
diff --git a/libnativebridge/tests/DummyNativeBridge3.cpp b/libnativebridge/tests/DummyNativeBridge3.cpp
new file mode 100644
index 0000000..4ef1c82
--- /dev/null
+++ b/libnativebridge/tests/DummyNativeBridge3.cpp
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// A dummy implementation of the native-bridge interface.
+
+#include "nativebridge/native_bridge.h"
+
+#include <signal.h>
+
+// NativeBridgeCallbacks implementations
+extern "C" bool native_bridge3_initialize(
+                      const android::NativeBridgeRuntimeCallbacks* /* art_cbs */,
+                      const char* /* app_code_cache_dir */,
+                      const char* /* isa */) {
+  return true;
+}
+
+extern "C" void* native_bridge3_loadLibrary(const char* /* libpath */, int /* flag */) {
+  return nullptr;
+}
+
+extern "C" void* native_bridge3_getTrampoline(void* /* handle */, const char* /* name */,
+                                             const char* /* shorty */, uint32_t /* len */) {
+  return nullptr;
+}
+
+extern "C" bool native_bridge3_isSupported(const char* /* libpath */) {
+  return false;
+}
+
+extern "C" const struct android::NativeBridgeRuntimeValues* native_bridge3_getAppEnv(
+    const char* /* abi */) {
+  return nullptr;
+}
+
+extern "C" bool native_bridge3_isCompatibleWith(uint32_t version) {
+  // For testing, allow 1-3, but disallow 4+.
+  return version <= 3;
+}
+
+static bool native_bridge3_dummy_signal_handler(int, siginfo_t*, void*) {
+  // TODO: Implement something here. We'd either have to have a death test with a log here, or
+  //       we'd have to be able to resume after the faulting instruction...
+  return true;
+}
+
+extern "C" android::NativeBridgeSignalHandlerFn native_bridge3_getSignalHandler(int signal) {
+  if (signal == SIGSEGV) {
+    return &native_bridge3_dummy_signal_handler;
+  }
+  return nullptr;
+}
+
+extern "C" int native_bridge3_unloadLibrary(void* /* handle */) {
+  return 0;
+}
+
+extern "C" const char* native_bridge3_getError() {
+  return nullptr;
+}
+
+extern "C" bool native_bridge3_isPathSupported(const char* /* path */) {
+  return true;
+}
+
+extern "C" bool native_bridge3_initAnonymousNamespace(const char* /* public_ns_sonames */,
+                                                      const char* /* anon_ns_library_path */) {
+  return true;
+}
+
+extern "C" android::native_bridge_namespace_t*
+native_bridge3_createNamespace(const char* /* name */,
+                               const char* /* ld_library_path */,
+                               const char* /* default_library_path */,
+                               uint64_t /* type */,
+                               const char* /* permitted_when_isolated_path */,
+                               android::native_bridge_namespace_t* /* parent_ns */) {
+  return nullptr;
+}
+
+extern "C" bool native_bridge3_linkNamespaces(android::native_bridge_namespace_t* /* from */,
+                                              android::native_bridge_namespace_t* /* to */,
+                                              const char* /* shared_libs_soname */) {
+  return true;
+}
+
+extern "C" void* native_bridge3_loadLibraryExt(const char* /* libpath */,
+                                               int /* flag */,
+                                               android::native_bridge_namespace_t* /* ns */) {
+  return nullptr;
+}
+
+android::NativeBridgeCallbacks NativeBridgeItf{
+    // v1
+    .version = 3,
+    .initialize = &native_bridge3_initialize,
+    .loadLibrary = &native_bridge3_loadLibrary,
+    .getTrampoline = &native_bridge3_getTrampoline,
+    .isSupported = &native_bridge3_isSupported,
+    .getAppEnv = &native_bridge3_getAppEnv,
+    // v2
+    .isCompatibleWith = &native_bridge3_isCompatibleWith,
+    .getSignalHandler = &native_bridge3_getSignalHandler,
+    // v3
+    .unloadLibrary = &native_bridge3_unloadLibrary,
+    .getError = &native_bridge3_getError,
+    .isPathSupported = &native_bridge3_isPathSupported,
+    .initAnonymousNamespace = &native_bridge3_initAnonymousNamespace,
+    .createNamespace = &native_bridge3_createNamespace,
+    .linkNamespaces = &native_bridge3_linkNamespaces,
+    .loadLibraryExt = &native_bridge3_loadLibraryExt};
diff --git a/libnativebridge/tests/DummyNativeBridge6.cpp b/libnativebridge/tests/DummyNativeBridge6.cpp
new file mode 100644
index 0000000..ce27e67
--- /dev/null
+++ b/libnativebridge/tests/DummyNativeBridge6.cpp
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// A dummy implementation of the native-bridge interface.
+
+#include "nativebridge/native_bridge.h"
+
+#include "NativeBridge6PreZygoteFork_lib.h"
+
+// NativeBridgeCallbacks implementations
+extern "C" bool native_bridge6_initialize(
+                      const android::NativeBridgeRuntimeCallbacks* /* art_cbs */,
+                      const char* /* app_code_cache_dir */,
+                      const char* /* isa */) {
+  return true;
+}
+
+extern "C" void* native_bridge6_loadLibrary(const char* /* libpath */, int /* flag */) {
+  return nullptr;
+}
+
+extern "C" void* native_bridge6_getTrampoline(void* /* handle */, const char* /* name */,
+                                             const char* /* shorty */, uint32_t /* len */) {
+  return nullptr;
+}
+
+extern "C" bool native_bridge6_isSupported(const char* /* libpath */) {
+  return false;
+}
+
+extern "C" const struct android::NativeBridgeRuntimeValues* native_bridge6_getAppEnv(
+    const char* /* abi */) {
+  return nullptr;
+}
+
+extern "C" bool native_bridge6_isCompatibleWith(uint32_t version) {
+  // For testing, allow 1-6, but disallow 7+.
+  return version <= 6;
+}
+
+extern "C" android::NativeBridgeSignalHandlerFn native_bridge6_getSignalHandler(int /* signal */) {
+  return nullptr;
+}
+
+extern "C" int native_bridge6_unloadLibrary(void* /* handle */) {
+  return 0;
+}
+
+extern "C" const char* native_bridge6_getError() {
+  return nullptr;
+}
+
+extern "C" bool native_bridge6_isPathSupported(const char* /* path */) {
+  return true;
+}
+
+extern "C" bool native_bridge6_initAnonymousNamespace(const char* /* public_ns_sonames */,
+                                                      const char* /* anon_ns_library_path */) {
+  return true;
+}
+
+extern "C" android::native_bridge_namespace_t*
+native_bridge6_createNamespace(const char* /* name */,
+                               const char* /* ld_library_path */,
+                               const char* /* default_library_path */,
+                               uint64_t /* type */,
+                               const char* /* permitted_when_isolated_path */,
+                               android::native_bridge_namespace_t* /* parent_ns */) {
+  return nullptr;
+}
+
+extern "C" bool native_bridge6_linkNamespaces(android::native_bridge_namespace_t* /* from */,
+                                              android::native_bridge_namespace_t* /* to */,
+                                              const char* /* shared_libs_soname */) {
+  return true;
+}
+
+extern "C" void* native_bridge6_loadLibraryExt(const char* /* libpath */,
+                                               int /* flag */,
+                                               android::native_bridge_namespace_t* /* ns */) {
+  return nullptr;
+}
+
+extern "C" android::native_bridge_namespace_t* native_bridge6_getVendorNamespace() {
+  return nullptr;
+}
+
+extern "C" android::native_bridge_namespace_t* native_bridge6_getExportedNamespace(const char* /* name */) {
+  return nullptr;
+}
+
+extern "C" void native_bridge6_preZygoteFork() {
+  android::SetPreZygoteForkDone();
+}
+
+android::NativeBridgeCallbacks NativeBridgeItf{
+    // v1
+    .version = 6,
+    .initialize = &native_bridge6_initialize,
+    .loadLibrary = &native_bridge6_loadLibrary,
+    .getTrampoline = &native_bridge6_getTrampoline,
+    .isSupported = &native_bridge6_isSupported,
+    .getAppEnv = &native_bridge6_getAppEnv,
+    // v2
+    .isCompatibleWith = &native_bridge6_isCompatibleWith,
+    .getSignalHandler = &native_bridge6_getSignalHandler,
+    // v3
+    .unloadLibrary = &native_bridge6_unloadLibrary,
+    .getError = &native_bridge6_getError,
+    .isPathSupported = &native_bridge6_isPathSupported,
+    .initAnonymousNamespace = &native_bridge6_initAnonymousNamespace,
+    .createNamespace = &native_bridge6_createNamespace,
+    .linkNamespaces = &native_bridge6_linkNamespaces,
+    .loadLibraryExt = &native_bridge6_loadLibraryExt,
+    // v4
+    &native_bridge6_getVendorNamespace,
+    // v5
+    &native_bridge6_getExportedNamespace,
+    // v6
+    &native_bridge6_preZygoteFork};
diff --git a/libnativebridge/tests/InvalidCharsNativeBridge_test.cpp b/libnativebridge/tests/InvalidCharsNativeBridge_test.cpp
new file mode 100644
index 0000000..8f7973d
--- /dev/null
+++ b/libnativebridge/tests/InvalidCharsNativeBridge_test.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NativeBridgeTest.h"
+
+namespace android {
+
+static const char* kTestName = "../librandom$@-bridge_not.existing.so";
+
+TEST_F(NativeBridgeTest, InvalidChars) {
+    // Do one test actually calling setup.
+    EXPECT_EQ(false, NativeBridgeError());
+    LoadNativeBridge(kTestName, nullptr);
+    // This should lead to an error for invalid characters.
+    EXPECT_EQ(true, NativeBridgeError());
+
+    // Further tests need to use NativeBridgeNameAcceptable, as the error
+    // state can't be changed back.
+    EXPECT_EQ(false, NativeBridgeNameAcceptable("."));
+    EXPECT_EQ(false, NativeBridgeNameAcceptable(".."));
+    EXPECT_EQ(false, NativeBridgeNameAcceptable("_"));
+    EXPECT_EQ(false, NativeBridgeNameAcceptable("-"));
+    EXPECT_EQ(false, NativeBridgeNameAcceptable("lib@.so"));
+    EXPECT_EQ(false, NativeBridgeNameAcceptable("lib$.so"));
+}
+
+}  // namespace android
diff --git a/libnativebridge/tests/NativeBridge2Signal_test.cpp b/libnativebridge/tests/NativeBridge2Signal_test.cpp
new file mode 100644
index 0000000..44e45e3
--- /dev/null
+++ b/libnativebridge/tests/NativeBridge2Signal_test.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NativeBridgeTest.h"
+
+#include <signal.h>
+#include <unistd.h>
+
+namespace android {
+
+constexpr const char* kNativeBridgeLibrary2 = "libnativebridge2-dummy.so";
+
+TEST_F(NativeBridgeTest, V2_Signal) {
+    // Init
+    ASSERT_TRUE(LoadNativeBridge(kNativeBridgeLibrary2, nullptr));
+    ASSERT_TRUE(NativeBridgeAvailable());
+    ASSERT_TRUE(PreInitializeNativeBridge(".", "isa"));
+    ASSERT_TRUE(NativeBridgeAvailable());
+    ASSERT_TRUE(InitializeNativeBridge(nullptr, nullptr));
+    ASSERT_TRUE(NativeBridgeAvailable());
+
+    ASSERT_EQ(2U, NativeBridgeGetVersion());
+    ASSERT_NE(nullptr, NativeBridgeGetSignalHandler(SIGSEGV));
+
+    // Clean-up code_cache
+    ASSERT_EQ(0, rmdir(kCodeCache));
+}
+
+}  // namespace android
diff --git a/libnativebridge/tests/NativeBridge3CreateNamespace_test.cpp b/libnativebridge/tests/NativeBridge3CreateNamespace_test.cpp
new file mode 100644
index 0000000..668d942
--- /dev/null
+++ b/libnativebridge/tests/NativeBridge3CreateNamespace_test.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NativeBridgeTest.h"
+
+namespace android {
+
+constexpr const char* kNativeBridgeLibrary3 = "libnativebridge3-dummy.so";
+
+TEST_F(NativeBridgeTest, V3_CreateNamespace) {
+    // Init
+    ASSERT_TRUE(LoadNativeBridge(kNativeBridgeLibrary3, nullptr));
+    ASSERT_TRUE(NativeBridgeAvailable());
+    ASSERT_TRUE(PreInitializeNativeBridge(".", "isa"));
+    ASSERT_TRUE(NativeBridgeAvailable());
+    ASSERT_TRUE(InitializeNativeBridge(nullptr, nullptr));
+    ASSERT_TRUE(NativeBridgeAvailable());
+
+    ASSERT_EQ(3U, NativeBridgeGetVersion());
+    ASSERT_EQ(nullptr, NativeBridgeCreateNamespace(nullptr, nullptr, nullptr,
+                                                   0, nullptr, nullptr));
+
+    // Clean-up code_cache
+    ASSERT_EQ(0, rmdir(kCodeCache));
+}
+
+}  // namespace android
diff --git a/libnativebridge/tests/NativeBridge3GetError_test.cpp b/libnativebridge/tests/NativeBridge3GetError_test.cpp
new file mode 100644
index 0000000..0b9f582
--- /dev/null
+++ b/libnativebridge/tests/NativeBridge3GetError_test.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NativeBridgeTest.h"
+
+namespace android {
+
+constexpr const char* kNativeBridgeLibrary3 = "libnativebridge3-dummy.so";
+
+TEST_F(NativeBridgeTest, V3_GetError) {
+    // Init
+    ASSERT_TRUE(LoadNativeBridge(kNativeBridgeLibrary3, nullptr));
+    ASSERT_TRUE(NativeBridgeAvailable());
+    ASSERT_TRUE(PreInitializeNativeBridge(".", "isa"));
+    ASSERT_TRUE(NativeBridgeAvailable());
+    ASSERT_TRUE(InitializeNativeBridge(nullptr, nullptr));
+    ASSERT_TRUE(NativeBridgeAvailable());
+
+    ASSERT_EQ(3U, NativeBridgeGetVersion());
+    ASSERT_EQ(nullptr, NativeBridgeGetError());
+
+    // Clean-up code_cache
+    ASSERT_EQ(0, rmdir(kCodeCache));
+}
+
+}  // namespace android
diff --git a/libnativebridge/tests/NativeBridge3InitAnonymousNamespace_test.cpp b/libnativebridge/tests/NativeBridge3InitAnonymousNamespace_test.cpp
new file mode 100644
index 0000000..b0d6b09
--- /dev/null
+++ b/libnativebridge/tests/NativeBridge3InitAnonymousNamespace_test.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NativeBridgeTest.h"
+
+namespace android {
+
+constexpr const char* kNativeBridgeLibrary3 = "libnativebridge3-dummy.so";
+
+TEST_F(NativeBridgeTest, V3_InitAnonymousNamespace) {
+  // Init
+  ASSERT_TRUE(LoadNativeBridge(kNativeBridgeLibrary3, nullptr));
+  ASSERT_TRUE(NativeBridgeAvailable());
+  ASSERT_TRUE(PreInitializeNativeBridge(".", "isa"));
+  ASSERT_TRUE(NativeBridgeAvailable());
+  ASSERT_TRUE(InitializeNativeBridge(nullptr, nullptr));
+  ASSERT_TRUE(NativeBridgeAvailable());
+
+  ASSERT_EQ(3U, NativeBridgeGetVersion());
+  ASSERT_EQ(true, NativeBridgeInitAnonymousNamespace(nullptr, nullptr));
+
+  // Clean-up code_cache
+  ASSERT_EQ(0, rmdir(kCodeCache));
+}
+
+}  // namespace android
diff --git a/libnativebridge/tests/NativeBridge3IsPathSupported_test.cpp b/libnativebridge/tests/NativeBridge3IsPathSupported_test.cpp
new file mode 100644
index 0000000..325e40b
--- /dev/null
+++ b/libnativebridge/tests/NativeBridge3IsPathSupported_test.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NativeBridgeTest.h"
+
+namespace android {
+
+constexpr const char* kNativeBridgeLibrary3 = "libnativebridge3-dummy.so";
+
+TEST_F(NativeBridgeTest, V3_IsPathSupported) {
+    // Init
+    ASSERT_TRUE(LoadNativeBridge(kNativeBridgeLibrary3, nullptr));
+    ASSERT_TRUE(NativeBridgeAvailable());
+    ASSERT_TRUE(PreInitializeNativeBridge(".", "isa"));
+    ASSERT_TRUE(NativeBridgeAvailable());
+    ASSERT_TRUE(InitializeNativeBridge(nullptr, nullptr));
+    ASSERT_TRUE(NativeBridgeAvailable());
+
+    ASSERT_EQ(3U, NativeBridgeGetVersion());
+    ASSERT_EQ(true, NativeBridgeIsPathSupported(nullptr));
+
+    // Clean-up code_cache
+    ASSERT_EQ(0, rmdir(kCodeCache));
+}
+
+}  // namespace android
diff --git a/libnativebridge/tests/NativeBridge3LoadLibraryExt_test.cpp b/libnativebridge/tests/NativeBridge3LoadLibraryExt_test.cpp
new file mode 100644
index 0000000..4caeb44
--- /dev/null
+++ b/libnativebridge/tests/NativeBridge3LoadLibraryExt_test.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NativeBridgeTest.h"
+
+namespace android {
+
+constexpr const char* kNativeBridgeLibrary3 = "libnativebridge3-dummy.so";
+
+TEST_F(NativeBridgeTest, V3_LoadLibraryExt) {
+    // Init
+    ASSERT_TRUE(LoadNativeBridge(kNativeBridgeLibrary3, nullptr));
+    ASSERT_TRUE(NativeBridgeAvailable());
+    ASSERT_TRUE(PreInitializeNativeBridge(".", "isa"));
+    ASSERT_TRUE(NativeBridgeAvailable());
+    ASSERT_TRUE(InitializeNativeBridge(nullptr, nullptr));
+    ASSERT_TRUE(NativeBridgeAvailable());
+
+    ASSERT_EQ(3U, NativeBridgeGetVersion());
+    ASSERT_EQ(nullptr, NativeBridgeLoadLibraryExt(nullptr, 0, nullptr));
+
+    // Clean-up code_cache
+    ASSERT_EQ(0, rmdir(kCodeCache));
+}
+
+}  // namespace android
diff --git a/libnativebridge/tests/NativeBridge3UnloadLibrary_test.cpp b/libnativebridge/tests/NativeBridge3UnloadLibrary_test.cpp
new file mode 100644
index 0000000..93a979c
--- /dev/null
+++ b/libnativebridge/tests/NativeBridge3UnloadLibrary_test.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NativeBridgeTest.h"
+
+namespace android {
+
+constexpr const char* kNativeBridgeLibrary3 = "libnativebridge3-dummy.so";
+
+TEST_F(NativeBridgeTest, V3_UnloadLibrary) {
+    // Init
+    ASSERT_TRUE(LoadNativeBridge(kNativeBridgeLibrary3, nullptr));
+    ASSERT_TRUE(NativeBridgeAvailable());
+    ASSERT_TRUE(PreInitializeNativeBridge(".", "isa"));
+    ASSERT_TRUE(NativeBridgeAvailable());
+    ASSERT_TRUE(InitializeNativeBridge(nullptr, nullptr));
+    ASSERT_TRUE(NativeBridgeAvailable());
+
+    ASSERT_EQ(3U, NativeBridgeGetVersion());
+    ASSERT_EQ(0, NativeBridgeUnloadLibrary(nullptr));
+
+    // Clean-up code_cache
+    ASSERT_EQ(0, rmdir(kCodeCache));
+}
+
+}  // namespace android
diff --git a/libnativebridge/tests/NativeBridge6PreZygoteFork_lib.cpp b/libnativebridge/tests/NativeBridge6PreZygoteFork_lib.cpp
new file mode 100644
index 0000000..0da5bb6
--- /dev/null
+++ b/libnativebridge/tests/NativeBridge6PreZygoteFork_lib.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace android {
+
+static bool g_pre_zygote_fork_done = false;
+
+bool IsPreZygoteForkDone() {
+  return g_pre_zygote_fork_done;
+}
+
+void SetPreZygoteForkDone() {
+  g_pre_zygote_fork_done = true;
+}
+
+}  // namespace android
diff --git a/libnativebridge/tests/NativeBridge6PreZygoteFork_lib.h b/libnativebridge/tests/NativeBridge6PreZygoteFork_lib.h
new file mode 100644
index 0000000..bcbf0d3
--- /dev/null
+++ b/libnativebridge/tests/NativeBridge6PreZygoteFork_lib.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBNATIVEBRIDGE_TESTS_NATIVEBRIDGE6PREZYGOTEFORK_LIB_H_
+#define ART_LIBNATIVEBRIDGE_TESTS_NATIVEBRIDGE6PREZYGOTEFORK_LIB_H_
+
+namespace android {
+
+bool IsPreZygoteForkDone();
+void SetPreZygoteForkDone();
+
+}  // namespace android
+
+#endif  // ART_LIBNATIVEBRIDGE_TESTS_NATIVEBRIDGE6PREZYGOTEFORK_LIB_H_
diff --git a/libnativebridge/tests/NativeBridge6PreZygoteFork_test.cpp b/libnativebridge/tests/NativeBridge6PreZygoteFork_test.cpp
new file mode 100644
index 0000000..9e348a2
--- /dev/null
+++ b/libnativebridge/tests/NativeBridge6PreZygoteFork_test.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NativeBridgeTest.h"
+#include "NativeBridge6PreZygoteFork_lib.h"
+
+namespace android {
+
+constexpr const char* kNativeBridgeLibrary6 = "libnativebridge6-dummy.so";
+
+TEST_F(NativeBridgeTest, V6_PreZygoteFork) {
+    // Init
+    ASSERT_TRUE(LoadNativeBridge(kNativeBridgeLibrary6, nullptr));
+    ASSERT_TRUE(NativeBridgeAvailable());
+    ASSERT_TRUE(PreInitializeNativeBridge(".", "isa"));
+    ASSERT_TRUE(NativeBridgeAvailable());
+    ASSERT_TRUE(InitializeNativeBridge(nullptr, nullptr));
+    ASSERT_TRUE(NativeBridgeAvailable());
+
+    ASSERT_EQ(6U, NativeBridgeGetVersion());
+
+    ASSERT_FALSE(IsPreZygoteForkDone());
+    PreZygoteForkNativeBridge();
+    ASSERT_TRUE(IsPreZygoteForkDone());
+}
+
+}  // namespace android
diff --git a/libnativebridge/tests/NativeBridgeApi.c b/libnativebridge/tests/NativeBridgeApi.c
new file mode 100644
index 0000000..7ab71fe
--- /dev/null
+++ b/libnativebridge/tests/NativeBridgeApi.c
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* The main purpose of this test is to ensure this C header compiles in C, so
+ * that no C++ features inadvertently leak into the C ABI. */
+#include "nativebridge/native_bridge.h"
+
+int main(int argc, char** argv) {
+  (void)argc;
+  (void)argv;
+  return 0;
+}
diff --git a/libnativebridge/tests/NativeBridgeTest.h b/libnativebridge/tests/NativeBridgeTest.h
new file mode 100644
index 0000000..cc79907
--- /dev/null
+++ b/libnativebridge/tests/NativeBridgeTest.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBNATIVEBRIDGE_TESTS_NATIVEBRIDGETEST_H_
+#define ART_LIBNATIVEBRIDGE_TESTS_NATIVEBRIDGETEST_H_
+
+#define LOG_TAG "NativeBridge_test"
+
+#include <nativebridge/native_bridge.h>
+#include <gtest/gtest.h>
+
+constexpr const char* kNativeBridgeLibrary = "libnativebridge-dummy.so";
+constexpr const char* kCodeCache = "./code_cache";
+constexpr const char* kCodeCacheStatFail = "./code_cache/temp";
+constexpr const char* kNativeBridgeLibrary2 = "libnativebridge2-dummy.so";
+constexpr const char* kNativeBridgeLibrary3 = "libnativebridge3-dummy.so";
+
+namespace android {
+
+class NativeBridgeTest : public testing::Test {
+};
+
+};  // namespace android
+
+#endif  // ART_LIBNATIVEBRIDGE_TESTS_NATIVEBRIDGETEST_H_
+
diff --git a/libnativebridge/tests/NativeBridgeVersion_test.cpp b/libnativebridge/tests/NativeBridgeVersion_test.cpp
new file mode 100644
index 0000000..d3f9a80
--- /dev/null
+++ b/libnativebridge/tests/NativeBridgeVersion_test.cpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NativeBridgeTest.h"
+
+#include <unistd.h>
+
+namespace android {
+
+TEST_F(NativeBridgeTest, Version) {
+    // When a bridge isn't loaded, we expect 0.
+    EXPECT_EQ(NativeBridgeGetVersion(), 0U);
+
+    // After our dummy bridge has been loaded, we expect 1.
+    ASSERT_TRUE(LoadNativeBridge(kNativeBridgeLibrary, nullptr));
+    EXPECT_EQ(NativeBridgeGetVersion(), 1U);
+
+    // Unload
+    UnloadNativeBridge();
+
+    // Version information is gone.
+    EXPECT_EQ(NativeBridgeGetVersion(), 0U);
+}
+
+}  // namespace android
diff --git a/libnativebridge/tests/NeedsNativeBridge_test.cpp b/libnativebridge/tests/NeedsNativeBridge_test.cpp
new file mode 100644
index 0000000..3f80f8d
--- /dev/null
+++ b/libnativebridge/tests/NeedsNativeBridge_test.cpp
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NativeBridgeTest.h"
+
+#include <android-base/macros.h>
+
+namespace android {
+
+static const char* kISAs[] = { "arm", "arm64", "x86", "x86_64", "random", "64arm",
+                               "64_x86", "64_x86_64", "", "reallylongstringabcd", nullptr };
+
+TEST_F(NativeBridgeTest, NeedsNativeBridge) {
+  EXPECT_EQ(false, NeedsNativeBridge(ABI_STRING));
+
+  const size_t kISACount = sizeof(kISAs) / sizeof(kISAs[0]);
+  for (size_t i = 0; i < kISACount; i++) {
+    EXPECT_EQ(kISAs[i] == nullptr ? false : strcmp(kISAs[i], ABI_STRING) != 0,
+              NeedsNativeBridge(kISAs[i]));
+    }
+}
+
+}  // namespace android
diff --git a/libnativebridge/tests/PreInitializeNativeBridgeFail1_test.cpp b/libnativebridge/tests/PreInitializeNativeBridgeFail1_test.cpp
new file mode 100644
index 0000000..5a2b0a1
--- /dev/null
+++ b/libnativebridge/tests/PreInitializeNativeBridgeFail1_test.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NativeBridgeTest.h"
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+
+#include <cstdio>
+#include <cstring>
+
+#include <android/log.h>
+
+namespace android {
+
+TEST_F(NativeBridgeTest, PreInitializeNativeBridgeFail1) {
+  // Needs a valid application directory.
+  ASSERT_TRUE(LoadNativeBridge(kNativeBridgeLibrary, nullptr));
+  ASSERT_FALSE(PreInitializeNativeBridge(nullptr, "isa"));
+  ASSERT_TRUE(NativeBridgeError());
+}
+
+}  // namespace android
diff --git a/libnativebridge/tests/PreInitializeNativeBridgeFail2_test.cpp b/libnativebridge/tests/PreInitializeNativeBridgeFail2_test.cpp
new file mode 100644
index 0000000..af976b1
--- /dev/null
+++ b/libnativebridge/tests/PreInitializeNativeBridgeFail2_test.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+
+#include <cstdio>
+#include <cstring>
+
+#include <android/log.h>
+
+#include "NativeBridgeTest.h"
+
+namespace android {
+
+TEST_F(NativeBridgeTest, PreInitializeNativeBridgeFail2) {
+  // Needs LoadNativeBridge() first
+  ASSERT_FALSE(PreInitializeNativeBridge(nullptr, "isa"));
+  ASSERT_TRUE(NativeBridgeError());
+}
+
+}  // namespace android
diff --git a/libnativebridge/tests/PreInitializeNativeBridge_test.cpp b/libnativebridge/tests/PreInitializeNativeBridge_test.cpp
new file mode 100644
index 0000000..cd5a8e2
--- /dev/null
+++ b/libnativebridge/tests/PreInitializeNativeBridge_test.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+
+#include <cstdio>
+#include <cstring>
+
+#include <android/log.h>
+
+#include "NativeBridgeTest.h"
+
+namespace android {
+
+TEST_F(NativeBridgeTest, PreInitializeNativeBridge) {
+    ASSERT_TRUE(LoadNativeBridge(kNativeBridgeLibrary, nullptr));
+#if !defined(__APPLE__)         // Mac OS does not support bind-mount.
+#if !defined(__ANDROID__)       // Cannot write into the hard-wired location.
+    static constexpr const char* kTestData = "PreInitializeNativeBridge test.";
+
+    // Try to create our mount namespace.
+    if (unshare(CLONE_NEWNS) != -1) {
+        // Create a dummy file.
+        FILE* cpuinfo = fopen("./cpuinfo", "w");
+        ASSERT_NE(nullptr, cpuinfo) << strerror(errno);
+        fprintf(cpuinfo, kTestData);
+        fclose(cpuinfo);
+
+        ASSERT_TRUE(PreInitializeNativeBridge("does not matter 1", "short 2"));
+
+        // Read /proc/cpuinfo
+        FILE* proc_cpuinfo = fopen("/proc/cpuinfo", "r");
+        ASSERT_NE(nullptr, proc_cpuinfo) << strerror(errno);
+        char buf[1024];
+        EXPECT_NE(nullptr, fgets(buf, sizeof(buf), proc_cpuinfo)) << "Error reading.";
+        fclose(proc_cpuinfo);
+
+        EXPECT_EQ(0, strcmp(buf, kTestData));
+
+        // Delete the file.
+        ASSERT_EQ(0, unlink("./cpuinfo")) << "Error unlinking temporary file.";
+        // Ending the test will tear down the mount namespace.
+    } else {
+        GTEST_LOG_(WARNING) << "Could not create mount namespace. Are you running this as root?";
+    }
+#endif
+#endif
+}
+
+}  // namespace android
diff --git a/libnativebridge/tests/ReSetupNativeBridge_test.cpp b/libnativebridge/tests/ReSetupNativeBridge_test.cpp
new file mode 100644
index 0000000..944e5d7
--- /dev/null
+++ b/libnativebridge/tests/ReSetupNativeBridge_test.cpp
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NativeBridgeTest.h"
+
+namespace android {
+
+TEST_F(NativeBridgeTest, ReSetup) {
+    EXPECT_EQ(false, NativeBridgeError());
+    LoadNativeBridge("", nullptr);
+    EXPECT_EQ(false, NativeBridgeError());
+    LoadNativeBridge("", nullptr);
+    // This should lead to an error for trying to re-setup a native bridge.
+    EXPECT_EQ(true, NativeBridgeError());
+}
+
+}  // namespace android
diff --git a/libnativebridge/tests/UnavailableNativeBridge_test.cpp b/libnativebridge/tests/UnavailableNativeBridge_test.cpp
new file mode 100644
index 0000000..ad374a5
--- /dev/null
+++ b/libnativebridge/tests/UnavailableNativeBridge_test.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "NativeBridgeTest.h"
+
+namespace android {
+
+TEST_F(NativeBridgeTest, NoNativeBridge) {
+    EXPECT_EQ(false, NativeBridgeAvailable());
+    // Try to initialize. This should fail as we are not set up.
+    EXPECT_EQ(false, InitializeNativeBridge(nullptr, nullptr));
+    EXPECT_EQ(true, NativeBridgeError());
+    EXPECT_EQ(false, NativeBridgeAvailable());
+}
+
+}  // namespace android
diff --git a/libnativebridge/tests/ValidNameNativeBridge_test.cpp b/libnativebridge/tests/ValidNameNativeBridge_test.cpp
new file mode 100644
index 0000000..690be4a
--- /dev/null
+++ b/libnativebridge/tests/ValidNameNativeBridge_test.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <NativeBridgeTest.h>
+
+namespace android {
+
+static const char* kTestName = "librandom-bridge_not.existing.so";
+
+TEST_F(NativeBridgeTest, ValidName) {
+    // Check that the name is acceptable.
+    EXPECT_EQ(true, NativeBridgeNameAcceptable(kTestName));
+
+    // Now check what happens on LoadNativeBridge.
+    EXPECT_EQ(false, NativeBridgeError());
+    LoadNativeBridge(kTestName, nullptr);
+    // This will lead to an error as the library doesn't exist.
+    EXPECT_EQ(true, NativeBridgeError());
+    EXPECT_EQ(false, NativeBridgeAvailable());
+}
+
+}  // namespace android
diff --git a/libnativeloader/.clang-format b/libnativeloader/.clang-format
new file mode 120000
index 0000000..fd0645f
--- /dev/null
+++ b/libnativeloader/.clang-format
@@ -0,0 +1 @@
+../.clang-format-2
\ No newline at end of file
diff --git a/libnativeloader/Android.bp b/libnativeloader/Android.bp
new file mode 100644
index 0000000..bfafa76
--- /dev/null
+++ b/libnativeloader/Android.bp
@@ -0,0 +1,116 @@
+// Shared library for target
+// ========================================================
+cc_defaults {
+    name: "libnativeloader-defaults",
+    defaults: ["art_defaults"],
+    cppflags: [
+        "-fvisibility=hidden",
+    ],
+    header_libs: ["libnativeloader-headers"],
+    export_header_lib_headers: ["libnativeloader-headers"],
+}
+
+cc_library {
+    name: "libnativeloader",
+    defaults: ["libnativeloader-defaults"],
+    visibility: [
+        "//frameworks/base/cmds/app_process",
+        // TODO(b/133140750): Clean this up.
+        "//frameworks/base/native/webview/loader",
+    ],
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
+    host_supported: true,
+    srcs: [
+        "native_loader.cpp",
+    ],
+    header_libs: ["libnativehelper_header_only"],
+    shared_libs: [
+        "liblog",
+        "libnativebridge",
+        "libbase",
+    ],
+    target: {
+        android: {
+            srcs: [
+                "library_namespaces.cpp",
+                "native_loader_namespace.cpp",
+                "public_libraries.cpp",
+            ],
+            shared_libs: [
+                "libdl_android",
+            ],
+            whole_static_libs: [
+                "PlatformProperties",
+            ],
+        },
+    },
+    stubs: {
+        symbol_file: "libnativeloader.map.txt",
+        versions: ["1"],
+    },
+}
+
+// TODO(b/124250621) eliminate the need for this library
+cc_library {
+    name: "libnativeloader_lazy",
+    defaults: ["libnativeloader-defaults"],
+    visibility: [
+        "//frameworks/base/core/jni",
+        "//frameworks/native/opengl/libs",
+        "//frameworks/native/vulkan/libvulkan",
+    ],
+    host_supported: false,
+    srcs: ["native_loader_lazy.cpp"],
+    required: ["libnativeloader"],
+}
+
+cc_library_headers {
+    name: "libnativeloader-headers",
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.art.debug",
+        "com.android.art.release",
+    ],
+    visibility: [
+        "//art:__subpackages__",
+        // TODO(b/133140750): Clean this up.
+        "//frameworks/av/media/libstagefright",
+        "//frameworks/native/libs/graphicsenv",
+        "//frameworks/native/vulkan/libvulkan",
+    ],
+    host_supported: true,
+    export_include_dirs: ["include"],
+}
+
+cc_test {
+    name: "libnativeloader_test",
+    srcs: [
+        "native_loader_test.cpp",
+        "native_loader.cpp",
+        "library_namespaces.cpp",
+        "native_loader_namespace.cpp",
+        "public_libraries.cpp",
+    ],
+    cflags: ["-DANDROID"],
+    static_libs: [
+        "libbase",
+        "liblog",
+        "libgmock",
+        "PlatformProperties",
+    ],
+    header_libs: [
+        "libnativebridge-headers",
+        "libnativehelper_header_only",
+        "libnativeloader-headers",
+    ],
+    // native_loader_test.cpp mocks libdl APIs so system_shared_libs
+    // are used to include C libraries without libdl.
+    system_shared_libs: [
+        "libc",
+        "libm",
+    ],
+    test_suites: ["device-tests"],
+}
diff --git a/libnativeloader/OWNERS b/libnativeloader/OWNERS
new file mode 100644
index 0000000..f735653
--- /dev/null
+++ b/libnativeloader/OWNERS
@@ -0,0 +1,6 @@
+dimitry@google.com
+jiyong@google.com
+ngeoffray@google.com
+oth@google.com
+mast@google.com
+rpl@google.com
diff --git a/libnativeloader/README.md b/libnativeloader/README.md
new file mode 100644
index 0000000..57b9001
--- /dev/null
+++ b/libnativeloader/README.md
@@ -0,0 +1,84 @@
+libnativeloader
+===============================================================================
+
+Overview
+-------------------------------------------------------------------------------
+libnativeloader is responsible for loading native shared libraries (`*.so`
+files) inside the Android Runtime (ART). The native shared libraries could be
+app-provided JNI libraries or public native libraries like `libc.so` provided
+by the platform.
+
+The most typical use case of this library is calling `System.loadLibrary(name)`.
+When the method is called, the ART runtime delegates the call to this library
+along with the reference to the classloader where the call was made.  Then this
+library finds the linker namespace (named `classloader-namespace`) that is
+associated with the given classloader, and tries to load the requested library
+from the namespace. The actual searching, loading, and linking of the library
+is performed by the dynamic linker.
+
+The linker namespace is created when an APK is loaded into the process, and is
+associated with the classloader that loaded the APK. The linker namespace is
+configured so that only the JNI libraries embedded in the APK is accessible
+from the namespace, thus preventing an APK from loading JNI libraries of other
+APKs.
+
+The linker namespace is also configured differently depending on other
+characteristics of the APK such as whether or not the APK is bundled with the
+platform. In case of the unbundled, i.e., downloaded or updated APK, only the
+public native libraries that is listed in `/system/etc/public.libraries.txt`
+are available from the platform, whereas in case of the bundled, all libraries
+under `/system/lib` are available (i.e. shared). In case when the unbundled
+app is from `/vendor` or `/product` partition, the app is additionally provided
+with the [VNDK-SP](https://source.android.com/devices/architecture/vndk#sp-hal)
+libraries. As the platform is getting modularized with
+[APEX](https://android.googlesource.com/platform/system/apex/+/refs/heads/master/docs/README.md),
+some libraries are no longer provided from platform, but from the APEXes which
+have their own linker namespaces. For example, ICU libraries `libicuuc.so` and
+`libicui18n.so` are from the runtime APEX.
+
+The list of public native libraries is not static. The default set of libraries
+are defined in AOSP, but partners can extend it to include their own libraries.
+Currently, following extensions are available:
+
+- `/vendor/etc/public.libraries.txt`: libraries in `/vendor/lib` that are
+specific to the underlying SoC, e.g. GPU, DSP, etc.
+- `/{system|product}/etc/public.libraries-<companyname>.txt`: libraries in
+`/{system|product}/lib` that a device manufacturer has newly added. The
+libraries should be named as `lib<name>.<companyname>.so` as in
+`libFoo.acme.so`.
+
+Note that, due to the naming constraint requiring `.<companyname>.so` suffix, it
+is prohibited for a device manufacturer to expose an AOSP-defined private
+library, e.g. libgui.so, libart.so, etc., to APKs.
+
+Lastly, libnativeloader is responsible for abstracting the two types of the
+dynamic linker interface: `libdl.so` and `libnativebridge.so`. The former is
+for non-translated, e.g. ARM-on-ARM, libraries, while the latter is for
+loading libraries in a translated environment such as ARM-on-x86.
+
+Implementation
+-------------------------------------------------------------------------------
+Implementation wise, libnativeloader consists of four parts:
+
+- `native_loader.cpp`
+- `library_namespaces.cpp`
+- `native_loader_namespace.cpp`
+- `public_libraries.cpp`
+
+`native_loader.cpp` implements the public interface of this library. It is just
+a thin wrapper around `library_namespaces.cpp` and `native_loader_namespace.cpp`.
+
+`library_namespaces.cpp` implements the singleton class `LibraryNamespaces` which
+is a manager-like entity that is responsible for creating and configuring
+linker namespaces and finding an already created linker namespace for a given
+classloader.
+
+`native_loader_namespace.cpp` implements the class `NativeLoaderNamespace` that
+models a linker namespace. Its main job is to abstract the two types of the
+dynamic linker interface so that other parts of this library do not have to know
+the differences of the interfaces.
+
+`public_libraries.cpp` is responsible for reading `*.txt` files for the public
+native libraries from the various partitions. It can be considered as a part of
+`LibraryNamespaces` but is separated from it to hide the details of the parsing
+routines.
diff --git a/libnativeloader/TEST_MAPPING b/libnativeloader/TEST_MAPPING
new file mode 100644
index 0000000..7becb77
--- /dev/null
+++ b/libnativeloader/TEST_MAPPING
@@ -0,0 +1,12 @@
+{
+  "presubmit": [
+    {
+      "name": "libnativeloader_test"
+    }
+  ],
+  "imports": [
+    {
+      "path": "cts/tests/tests/jni"
+    }
+  ]
+}
diff --git a/libnativeloader/include/nativeloader/dlext_namespaces.h b/libnativeloader/include/nativeloader/dlext_namespaces.h
new file mode 100644
index 0000000..ed335ee
--- /dev/null
+++ b/libnativeloader/include/nativeloader/dlext_namespaces.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBNATIVELOADER_INCLUDE_NATIVELOADER_DLEXT_NAMESPACES_H_
+#define ART_LIBNATIVELOADER_INCLUDE_NATIVELOADER_DLEXT_NAMESPACES_H_
+
+#include <android/dlext.h>
+#include <stdbool.h>
+
+__BEGIN_DECLS
+
+enum {
+  /* A regular namespace is the namespace with a custom search path that does
+   * not impose any restrictions on the location of native libraries.
+   */
+  ANDROID_NAMESPACE_TYPE_REGULAR = 0,
+
+  /* An isolated namespace requires all the libraries to be on the search path
+   * or under permitted_when_isolated_path. The search path is the union of
+   * ld_library_path and default_library_path.
+   */
+  ANDROID_NAMESPACE_TYPE_ISOLATED = 1,
+
+  /* The shared namespace clones the list of libraries of the caller namespace upon creation
+   * which means that they are shared between namespaces - the caller namespace and the new one
+   * will use the same copy of a library if it was loaded prior to android_create_namespace call.
+   *
+   * Note that libraries loaded after the namespace is created will not be shared.
+   *
+   * Shared namespaces can be isolated or regular. Note that they do not inherit the search path nor
+   * permitted_path from the caller's namespace.
+   */
+  ANDROID_NAMESPACE_TYPE_SHARED = 2,
+
+  /* This flag instructs linker to enable grey-list workaround for the namespace.
+   * See http://b/26394120 for details.
+   */
+  ANDROID_NAMESPACE_TYPE_GREYLIST_ENABLED = 0x08000000,
+
+  /* This flag instructs linker to use this namespace as the anonymous
+   * namespace. The anonymous namespace is used in the case when linker cannot
+   * identify the caller of dlopen/dlsym. This happens for the code not loaded
+   * by dynamic linker; for example calls from the mono-compiled code. There can
+   * be only one anonymous namespace in a process. If there already is an
+   * anonymous namespace in the process, using this flag when creating a new
+   * namespace causes an error.
+   */
+  ANDROID_NAMESPACE_TYPE_ALSO_USED_AS_ANONYMOUS = 0x10000000,
+
+  ANDROID_NAMESPACE_TYPE_SHARED_ISOLATED =
+      ANDROID_NAMESPACE_TYPE_SHARED | ANDROID_NAMESPACE_TYPE_ISOLATED,
+};
+
+/*
+ * Creates new linker namespace.
+ * ld_library_path and default_library_path represent the search path
+ * for the libraries in the namespace.
+ *
+ * The libraries in the namespace are searched by folowing order:
+ * 1. ld_library_path (Think of this as namespace-local LD_LIBRARY_PATH)
+ * 2. In directories specified by DT_RUNPATH of the "needed by" binary.
+ * 3. deault_library_path (This of this as namespace-local default library path)
+ *
+ * When type is ANDROID_NAMESPACE_TYPE_ISOLATED the resulting namespace requires all of
+ * the libraries to be on the search path or under the permitted_when_isolated_path;
+ * the search_path is ld_library_path:default_library_path. Note that the
+ * permitted_when_isolated_path path is not part of the search_path and
+ * does not affect the search order. It is a way to allow loading libraries from specific
+ * locations when using absolute path.
+ * If a library or any of its dependencies are outside of the permitted_when_isolated_path
+ * and search_path, and it is not part of the public namespace dlopen will fail.
+ */
+extern struct android_namespace_t* android_create_namespace(
+    const char* name, const char* ld_library_path, const char* default_library_path, uint64_t type,
+    const char* permitted_when_isolated_path, struct android_namespace_t* parent);
+
+/*
+ * Creates a link between namespaces. Every link has list of sonames of
+ * shared libraries. These are the libraries which are accessible from
+ * namespace 'from' but loaded within namespace 'to' context.
+ * When to namespace is nullptr this function establishes a link between
+ * 'from' namespace and the default namespace.
+ *
+ * The lookup order of the libraries in namespaces with links is following:
+ * 1. Look inside current namespace using 'this' namespace search path.
+ * 2. Look in linked namespaces
+ * 2.1. Perform soname check - if library soname is not in the list of shared
+ *      libraries sonames skip this link, otherwise
+ * 2.2. Search library using linked namespace search path. Note that this
+ *      step will not go deeper into linked namespaces for this library but
+ *      will do so for DT_NEEDED libraries.
+ */
+extern bool android_link_namespaces(struct android_namespace_t* from,
+                                    struct android_namespace_t* to,
+                                    const char* shared_libs_sonames);
+
+extern struct android_namespace_t* android_get_exported_namespace(const char* name);
+
+__END_DECLS
+
+#endif  // ART_LIBNATIVELOADER_INCLUDE_NATIVELOADER_DLEXT_NAMESPACES_H_
diff --git a/libnativeloader/include/nativeloader/native_loader.h b/libnativeloader/include/nativeloader/native_loader.h
new file mode 100644
index 0000000..4fd8092
--- /dev/null
+++ b/libnativeloader/include/nativeloader/native_loader.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBNATIVELOADER_INCLUDE_NATIVELOADER_NATIVE_LOADER_H_
+#define ART_LIBNATIVELOADER_INCLUDE_NATIVELOADER_NATIVE_LOADER_H_
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "jni.h"
+#if defined(__ANDROID__)
+#include <android/dlext.h>
+#endif
+
+#ifdef __cplusplus
+namespace android {
+extern "C" {
+#endif  // __cplusplus
+
+// README: the char** error message parameter being passed
+// to the methods below need to be freed through calling NativeLoaderFreeErrorMessage.
+// It's the caller's responsibility to call that method.
+
+__attribute__((visibility("default")))
+void InitializeNativeLoader();
+
+__attribute__((visibility("default"))) jstring CreateClassLoaderNamespace(
+    JNIEnv* env, int32_t target_sdk_version, jobject class_loader, bool is_shared, jstring dex_path,
+    jstring library_path, jstring permitted_path);
+
+__attribute__((visibility("default"))) void* OpenNativeLibrary(
+    JNIEnv* env, int32_t target_sdk_version, const char* path, jobject class_loader,
+    const char* caller_location, jstring library_path, bool* needs_native_bridge, char** error_msg);
+
+__attribute__((visibility("default"))) bool CloseNativeLibrary(void* handle,
+                                                               const bool needs_native_bridge,
+                                                               char** error_msg);
+
+__attribute__((visibility("default"))) void NativeLoaderFreeErrorMessage(char* msg);
+
+#if defined(__ANDROID__)
+// Look up linker namespace by class_loader. Returns nullptr if
+// there is no namespace associated with the class_loader.
+// TODO(b/79940628): move users to FindNativeLoaderNamespaceByClassLoader and remove this function.
+__attribute__((visibility("default"))) struct android_namespace_t* FindNamespaceByClassLoader(
+    JNIEnv* env, jobject class_loader);
+// That version works with native bridge namespaces, but requires use of OpenNativeLibrary.
+struct NativeLoaderNamespace;
+__attribute__((visibility("default"))) struct NativeLoaderNamespace*
+FindNativeLoaderNamespaceByClassLoader(JNIEnv* env, jobject class_loader);
+// Load library.  Unlinke OpenNativeLibrary above couldn't create namespace on demand, but does
+// not require access to JNIEnv either.
+__attribute__((visibility("default"))) void* OpenNativeLibraryInNamespace(
+    struct NativeLoaderNamespace* ns, const char* path, bool* needs_native_bridge,
+    char** error_msg);
+#endif
+
+__attribute__((visibility("default")))
+void ResetNativeLoader();
+
+#ifdef __cplusplus
+}  // extern "C"
+}  // namespace android
+#endif  // __cplusplus
+
+#endif  // ART_LIBNATIVELOADER_INCLUDE_NATIVELOADER_NATIVE_LOADER_H_
diff --git a/libnativeloader/libnativeloader.map.txt b/libnativeloader/libnativeloader.map.txt
new file mode 100644
index 0000000..40c30bd
--- /dev/null
+++ b/libnativeloader/libnativeloader.map.txt
@@ -0,0 +1,31 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+#  Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# TODO(b/122710865): Prune these uses once the runtime APEX is complete.
+LIBNATIVELOADER_1 {
+  global:
+    OpenNativeLibrary;
+    InitializeNativeLoader;
+    ResetNativeLoader;
+    CloseNativeLibrary;
+    OpenNativeLibraryInNamespace;
+    FindNamespaceByClassLoader;
+    FindNativeLoaderNamespaceByClassLoader;
+    CreateClassLoaderNamespace;
+    NativeLoaderFreeErrorMessage;
+  local:
+    *;
+};
diff --git a/libnativeloader/library_namespaces.cpp b/libnativeloader/library_namespaces.cpp
new file mode 100644
index 0000000..9e41c2f
--- /dev/null
+++ b/libnativeloader/library_namespaces.cpp
@@ -0,0 +1,359 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "library_namespaces.h"
+
+#include <dirent.h>
+#include <dlfcn.h>
+
+#include <regex>
+#include <string>
+#include <vector>
+
+#include <android-base/file.h>
+#include <android-base/logging.h>
+#include <android-base/macros.h>
+#include <android-base/properties.h>
+#include <android-base/strings.h>
+#include <nativehelper/scoped_utf_chars.h>
+
+#include "nativeloader/dlext_namespaces.h"
+#include "public_libraries.h"
+#include "utils.h"
+
+namespace android::nativeloader {
+
+namespace {
+// The device may be configured to have the vendor libraries loaded to a separate namespace.
+// For historical reasons this namespace was named sphal but effectively it is intended
+// to use to load vendor libraries to separate namespace with controlled interface between
+// vendor and system namespaces.
+constexpr const char* kVendorNamespaceName = "sphal";
+constexpr const char* kVndkNamespaceName = "vndk";
+constexpr const char* kVndkProductNamespaceName = "vndk_product";
+constexpr const char* kArtNamespaceName = "com_android_art";
+constexpr const char* kNeuralNetworksNamespaceName = "com_android_neuralnetworks";
+constexpr const char* kStatsdNamespaceName = "com_android_os_statsd";
+
+// classloader-namespace is a linker namespace that is created for the loaded
+// app. To be specific, it is created for the app classloader. When
+// System.load() is called from a Java class that is loaded from the
+// classloader, the classloader-namespace namespace associated with that
+// classloader is selected for dlopen. The namespace is configured so that its
+// search path is set to the app-local JNI directory and it is linked to the
+// system namespace with the names of libs listed in the public.libraries.txt.
+// This way an app can only load its own JNI libraries along with the public libs.
+constexpr const char* kClassloaderNamespaceName = "classloader-namespace";
+// Same thing for vendor APKs.
+constexpr const char* kVendorClassloaderNamespaceName = "vendor-classloader-namespace";
+// If the namespace is shared then add this suffix to form
+// "classloader-namespace-shared" or "vendor-classloader-namespace-shared",
+// respectively. A shared namespace (cf. ANDROID_NAMESPACE_TYPE_SHARED) has
+// inherited all the libraries of the parent classloader namespace, or the
+// system namespace for the main app classloader. It is used to give full
+// access to the platform libraries for apps bundled in the system image,
+// including their later updates installed in /data.
+constexpr const char* kSharedNamespaceSuffix = "-shared";
+
+// (http://b/27588281) This is a workaround for apps using custom classloaders and calling
+// System.load() with an absolute path which is outside of the classloader library search path.
+// This list includes all directories app is allowed to access this way.
+constexpr const char* kWhitelistedDirectories = "/data:/mnt/expand";
+
+constexpr const char* kVendorLibPath = "/vendor/" LIB;
+constexpr const char* kProductLibPath = "/product/" LIB ":/system/product/" LIB;
+
+const std::regex kVendorDexPathRegex("(^|:)/vendor/");
+const std::regex kProductDexPathRegex("(^|:)(/system)?/product/");
+
+// Define origin of APK if it is from vendor partition or product partition
+using ApkOrigin = enum {
+  APK_ORIGIN_DEFAULT = 0,
+  APK_ORIGIN_VENDOR = 1,
+  APK_ORIGIN_PRODUCT = 2,
+};
+
+jobject GetParentClassLoader(JNIEnv* env, jobject class_loader) {
+  jclass class_loader_class = env->FindClass("java/lang/ClassLoader");
+  jmethodID get_parent =
+      env->GetMethodID(class_loader_class, "getParent", "()Ljava/lang/ClassLoader;");
+
+  return env->CallObjectMethod(class_loader, get_parent);
+}
+
+ApkOrigin GetApkOriginFromDexPath(JNIEnv* env, jstring dex_path) {
+  ApkOrigin apk_origin = APK_ORIGIN_DEFAULT;
+
+  if (dex_path != nullptr) {
+    ScopedUtfChars dex_path_utf_chars(env, dex_path);
+
+    if (std::regex_search(dex_path_utf_chars.c_str(), kVendorDexPathRegex)) {
+      apk_origin = APK_ORIGIN_VENDOR;
+    }
+
+    if (std::regex_search(dex_path_utf_chars.c_str(), kProductDexPathRegex)) {
+      LOG_ALWAYS_FATAL_IF(apk_origin == APK_ORIGIN_VENDOR,
+                          "Dex path contains both vendor and product partition : %s",
+                          dex_path_utf_chars.c_str());
+
+      apk_origin = APK_ORIGIN_PRODUCT;
+    }
+  }
+  return apk_origin;
+}
+
+}  // namespace
+
+void LibraryNamespaces::Initialize() {
+  // Once public namespace is initialized there is no
+  // point in running this code - it will have no effect
+  // on the current list of public libraries.
+  if (initialized_) {
+    return;
+  }
+
+  // android_init_namespaces() expects all the public libraries
+  // to be loaded so that they can be found by soname alone.
+  //
+  // TODO(dimitry): this is a bit misleading since we do not know
+  // if the vendor public library is going to be opened from /vendor/lib
+  // we might as well end up loading them from /system/lib or /product/lib
+  // For now we rely on CTS test to catch things like this but
+  // it should probably be addressed in the future.
+  for (const auto& soname : android::base::Split(preloadable_public_libraries(), ":")) {
+    LOG_ALWAYS_FATAL_IF(dlopen(soname.c_str(), RTLD_NOW | RTLD_NODELETE) == nullptr,
+                        "Error preloading public library %s: %s", soname.c_str(), dlerror());
+  }
+}
+
+Result<NativeLoaderNamespace*> LibraryNamespaces::Create(JNIEnv* env, uint32_t target_sdk_version,
+                                                         jobject class_loader, bool is_shared,
+                                                         jstring dex_path,
+                                                         jstring java_library_path,
+                                                         jstring java_permitted_path) {
+  std::string library_path;  // empty string by default.
+
+  if (java_library_path != nullptr) {
+    ScopedUtfChars library_path_utf_chars(env, java_library_path);
+    library_path = library_path_utf_chars.c_str();
+  }
+
+  ApkOrigin apk_origin = GetApkOriginFromDexPath(env, dex_path);
+
+  // (http://b/27588281) This is a workaround for apps using custom
+  // classloaders and calling System.load() with an absolute path which
+  // is outside of the classloader library search path.
+  //
+  // This part effectively allows such a classloader to access anything
+  // under /data and /mnt/expand
+  std::string permitted_path = kWhitelistedDirectories;
+
+  if (java_permitted_path != nullptr) {
+    ScopedUtfChars path(env, java_permitted_path);
+    if (path.c_str() != nullptr && path.size() > 0) {
+      permitted_path = permitted_path + ":" + path.c_str();
+    }
+  }
+
+  LOG_ALWAYS_FATAL_IF(FindNamespaceByClassLoader(env, class_loader) != nullptr,
+                      "There is already a namespace associated with this classloader");
+
+  std::string system_exposed_libraries = default_public_libraries();
+  std::string namespace_name = kClassloaderNamespaceName;
+  ApkOrigin unbundled_app_origin = APK_ORIGIN_DEFAULT;
+  if ((apk_origin == APK_ORIGIN_VENDOR ||
+       (apk_origin == APK_ORIGIN_PRODUCT &&
+        is_product_vndk_version_defined())) &&
+      !is_shared) {
+    unbundled_app_origin = apk_origin;
+    // For vendor / product apks, give access to the vendor / product lib even though
+    // they are treated as unbundled; the libs and apks are still bundled
+    // together in the vendor / product partition.
+    const char* origin_partition;
+    const char* origin_lib_path;
+    const char* llndk_libraries;
+
+    switch (apk_origin) {
+      case APK_ORIGIN_VENDOR:
+        origin_partition = "vendor";
+        origin_lib_path = kVendorLibPath;
+        llndk_libraries = llndk_libraries_vendor().c_str();
+        break;
+      case APK_ORIGIN_PRODUCT:
+        origin_partition = "product";
+        origin_lib_path = kProductLibPath;
+        llndk_libraries = llndk_libraries_product().c_str();
+        break;
+      default:
+        origin_partition = "unknown";
+        origin_lib_path = "";
+        llndk_libraries = "";
+    }
+    library_path = library_path + ":" + origin_lib_path;
+    permitted_path = permitted_path + ":" + origin_lib_path;
+
+    // Also give access to LLNDK libraries since they are available to vendor or product
+    system_exposed_libraries = system_exposed_libraries + ":" + llndk_libraries;
+
+    // Different name is useful for debugging
+    namespace_name = kVendorClassloaderNamespaceName;
+    ALOGD("classloader namespace configured for unbundled %s apk. library_path=%s",
+          origin_partition, library_path.c_str());
+  } else {
+    // extended public libraries are NOT available to vendor apks, otherwise it
+    // would be system->vendor violation.
+    if (!extended_public_libraries().empty()) {
+      system_exposed_libraries = system_exposed_libraries + ':' + extended_public_libraries();
+    }
+  }
+
+  if (is_shared) {
+    // Show in the name that the namespace was created as shared, for debugging
+    // purposes.
+    namespace_name = namespace_name + kSharedNamespaceSuffix;
+  }
+
+  // Create the app namespace
+  NativeLoaderNamespace* parent_ns = FindParentNamespaceByClassLoader(env, class_loader);
+  // Heuristic: the first classloader with non-empty library_path is assumed to
+  // be the main classloader for app
+  // TODO(b/139178525) remove this heuristic by determining this in LoadedApk (or its
+  // friends) and then passing it down to here.
+  bool is_main_classloader = app_main_namespace_ == nullptr && !library_path.empty();
+  // Policy: the namespace for the main classloader is also used as the
+  // anonymous namespace.
+  bool also_used_as_anonymous = is_main_classloader;
+  // Note: this function is executed with g_namespaces_mutex held, thus no
+  // racing here.
+  auto app_ns = NativeLoaderNamespace::Create(
+      namespace_name, library_path, permitted_path, parent_ns, is_shared,
+      target_sdk_version < 24 /* is_greylist_enabled */, also_used_as_anonymous);
+  if (!app_ns.ok()) {
+    return app_ns.error();
+  }
+  // ... and link to other namespaces to allow access to some public libraries
+  bool is_bridged = app_ns->IsBridged();
+
+  auto system_ns = NativeLoaderNamespace::GetSystemNamespace(is_bridged);
+  if (!system_ns.ok()) {
+    return system_ns.error();
+  }
+
+  auto linked = app_ns->Link(*system_ns, system_exposed_libraries);
+  if (!linked.ok()) {
+    return linked.error();
+  }
+
+  auto art_ns = NativeLoaderNamespace::GetExportedNamespace(kArtNamespaceName, is_bridged);
+  // ART APEX does not exist on host, and under certain build conditions.
+  if (art_ns.ok()) {
+    linked = app_ns->Link(*art_ns, art_public_libraries());
+    if (!linked.ok()) {
+      return linked.error();
+    }
+  }
+
+  // Give access to NNAPI libraries (apex-updated LLNDK library).
+  auto nnapi_ns =
+      NativeLoaderNamespace::GetExportedNamespace(kNeuralNetworksNamespaceName, is_bridged);
+  if (nnapi_ns.ok()) {
+    linked = app_ns->Link(*nnapi_ns, neuralnetworks_public_libraries());
+    if (!linked.ok()) {
+      return linked.error();
+    }
+  }
+
+  // Give access to VNDK-SP libraries from the 'vndk' namespace for unbundled vendor apps.
+  if (unbundled_app_origin == APK_ORIGIN_VENDOR && !vndksp_libraries_vendor().empty()) {
+    auto vndk_ns = NativeLoaderNamespace::GetExportedNamespace(kVndkNamespaceName, is_bridged);
+    if (vndk_ns.ok()) {
+      linked = app_ns->Link(*vndk_ns, vndksp_libraries_vendor());
+      if (!linked.ok()) {
+        return linked.error();
+      }
+    }
+  }
+
+  // Give access to VNDK-SP libraries from the 'vndk_product' namespace for unbundled product apps.
+  if (unbundled_app_origin == APK_ORIGIN_PRODUCT && !vndksp_libraries_product().empty()) {
+    auto vndk_ns = NativeLoaderNamespace::GetExportedNamespace(kVndkProductNamespaceName, is_bridged);
+    if (vndk_ns.ok()) {
+      linked = app_ns->Link(*vndk_ns, vndksp_libraries_product());
+      if (!linked.ok()) {
+        return linked.error();
+      }
+    }
+  }
+
+  // Give access to StatsdAPI libraries
+  auto statsd_ns =
+      NativeLoaderNamespace::GetExportedNamespace(kStatsdNamespaceName, is_bridged);
+  if (statsd_ns.ok()) {
+    linked = app_ns->Link(*statsd_ns, statsd_public_libraries());
+    if (!linked.ok()) {
+      return linked.error();
+    }
+  }
+
+  if (!vendor_public_libraries().empty()) {
+    auto vendor_ns = NativeLoaderNamespace::GetExportedNamespace(kVendorNamespaceName, is_bridged);
+    // when vendor_ns is not configured, link to the system namespace
+    auto target_ns = vendor_ns.ok() ? vendor_ns : system_ns;
+    if (target_ns.ok()) {
+      linked = app_ns->Link(*target_ns, vendor_public_libraries());
+      if (!linked.ok()) {
+        return linked.error();
+      }
+    }
+  }
+
+  auto& emplaced = namespaces_.emplace_back(
+      std::make_pair(env->NewWeakGlobalRef(class_loader), *app_ns));
+  if (is_main_classloader) {
+    app_main_namespace_ = &emplaced.second;
+  }
+  return &emplaced.second;
+}
+
+NativeLoaderNamespace* LibraryNamespaces::FindNamespaceByClassLoader(JNIEnv* env,
+                                                                     jobject class_loader) {
+  auto it = std::find_if(namespaces_.begin(), namespaces_.end(),
+                         [&](const std::pair<jweak, NativeLoaderNamespace>& value) {
+                           return env->IsSameObject(value.first, class_loader);
+                         });
+  if (it != namespaces_.end()) {
+    return &it->second;
+  }
+
+  return nullptr;
+}
+
+NativeLoaderNamespace* LibraryNamespaces::FindParentNamespaceByClassLoader(JNIEnv* env,
+                                                                           jobject class_loader) {
+  jobject parent_class_loader = GetParentClassLoader(env, class_loader);
+
+  while (parent_class_loader != nullptr) {
+    NativeLoaderNamespace* ns;
+    if ((ns = FindNamespaceByClassLoader(env, parent_class_loader)) != nullptr) {
+      return ns;
+    }
+
+    parent_class_loader = GetParentClassLoader(env, parent_class_loader);
+  }
+
+  return nullptr;
+}
+
+}  // namespace android::nativeloader
diff --git a/libnativeloader/library_namespaces.h b/libnativeloader/library_namespaces.h
new file mode 100644
index 0000000..8fac534
--- /dev/null
+++ b/libnativeloader/library_namespaces.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBNATIVELOADER_LIBRARY_NAMESPACES_H_
+#define ART_LIBNATIVELOADER_LIBRARY_NAMESPACES_H_
+
+#if !defined(__ANDROID__)
+#error "Not available for host"
+#endif
+
+#define LOG_TAG "nativeloader"
+
+#include "native_loader_namespace.h"
+
+#include <list>
+#include <string>
+
+#include <android-base/result.h>
+#include <jni.h>
+
+namespace android::nativeloader {
+
+using android::base::Result;
+
+// LibraryNamespaces is a singleton object that manages NativeLoaderNamespace
+// objects for an app process. Its main job is to create (and configure) a new
+// NativeLoaderNamespace object for a Java ClassLoader, and to find an existing
+// object for a given ClassLoader.
+class LibraryNamespaces {
+ public:
+  LibraryNamespaces() : initialized_(false), app_main_namespace_(nullptr) {}
+
+  LibraryNamespaces(LibraryNamespaces&&) = default;
+  LibraryNamespaces(const LibraryNamespaces&) = delete;
+  LibraryNamespaces& operator=(const LibraryNamespaces&) = delete;
+
+  void Initialize();
+  void Reset() {
+    namespaces_.clear();
+    initialized_ = false;
+    app_main_namespace_ = nullptr;
+  }
+  Result<NativeLoaderNamespace*> Create(JNIEnv* env, uint32_t target_sdk_version,
+                                        jobject class_loader, bool is_shared, jstring dex_path,
+                                        jstring java_library_path, jstring java_permitted_path);
+  NativeLoaderNamespace* FindNamespaceByClassLoader(JNIEnv* env, jobject class_loader);
+
+ private:
+  Result<void> InitPublicNamespace(const char* library_path);
+  NativeLoaderNamespace* FindParentNamespaceByClassLoader(JNIEnv* env, jobject class_loader);
+
+  bool initialized_;
+  NativeLoaderNamespace* app_main_namespace_;
+  std::list<std::pair<jweak, NativeLoaderNamespace>> namespaces_;
+};
+
+}  // namespace android::nativeloader
+
+#endif  // ART_LIBNATIVELOADER_LIBRARY_NAMESPACES_H_
diff --git a/libnativeloader/native_loader.cpp b/libnativeloader/native_loader.cpp
new file mode 100644
index 0000000..988e8a8
--- /dev/null
+++ b/libnativeloader/native_loader.cpp
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "nativeloader"
+
+#include "nativeloader/native_loader.h"
+
+#include <dlfcn.h>
+#include <sys/types.h>
+
+#include <algorithm>
+#include <memory>
+#include <mutex>
+#include <string>
+#include <vector>
+
+#include <android-base/file.h>
+#include <android-base/macros.h>
+#include <android-base/strings.h>
+#include <nativebridge/native_bridge.h>
+#include <nativehelper/scoped_utf_chars.h>
+
+#ifdef __ANDROID__
+#include <log/log.h>
+#include "library_namespaces.h"
+#include "nativeloader/dlext_namespaces.h"
+#endif
+
+namespace android {
+
+namespace {
+#if defined(__ANDROID__)
+using android::nativeloader::LibraryNamespaces;
+
+constexpr const char* kApexPath = "/apex/";
+
+std::mutex g_namespaces_mutex;
+LibraryNamespaces* g_namespaces = new LibraryNamespaces;
+
+android_namespace_t* FindExportedNamespace(const char* caller_location) {
+  std::string location = caller_location;
+  // Lots of implicit assumptions here: we expect `caller_location` to be of the form:
+  // /apex/modulename/...
+  //
+  // And we extract from it 'modulename', which is the name of the linker namespace.
+  if (android::base::StartsWith(location, kApexPath)) {
+    size_t start_index = strlen(kApexPath);
+    size_t slash_index = location.find_first_of('/', start_index);
+    LOG_ALWAYS_FATAL_IF((slash_index == std::string::npos),
+                        "Error finding namespace of apex: no slash in path %s", caller_location);
+    std::string name = location.substr(start_index, slash_index - start_index);
+    std::replace(name.begin(), name.end(), '.', '_');
+    android_namespace_t* boot_namespace = android_get_exported_namespace(name.c_str());
+    LOG_ALWAYS_FATAL_IF((boot_namespace == nullptr),
+                        "Error finding namespace of apex: no namespace called %s", name.c_str());
+    return boot_namespace;
+  }
+  return nullptr;
+}
+#endif  // #if defined(__ANDROID__)
+}  // namespace
+
+void InitializeNativeLoader() {
+#if defined(__ANDROID__)
+  std::lock_guard<std::mutex> guard(g_namespaces_mutex);
+  g_namespaces->Initialize();
+#endif
+}
+
+void ResetNativeLoader() {
+#if defined(__ANDROID__)
+  std::lock_guard<std::mutex> guard(g_namespaces_mutex);
+  g_namespaces->Reset();
+#endif
+}
+
+jstring CreateClassLoaderNamespace(JNIEnv* env, int32_t target_sdk_version, jobject class_loader,
+                                   bool is_shared, jstring dex_path, jstring library_path,
+                                   jstring permitted_path) {
+#if defined(__ANDROID__)
+  std::lock_guard<std::mutex> guard(g_namespaces_mutex);
+  auto ns = g_namespaces->Create(env, target_sdk_version, class_loader, is_shared, dex_path,
+                                 library_path, permitted_path);
+  if (!ns.ok()) {
+    return env->NewStringUTF(ns.error().message().c_str());
+  }
+#else
+  UNUSED(env, target_sdk_version, class_loader, is_shared, dex_path, library_path, permitted_path);
+#endif
+  return nullptr;
+}
+
+void* OpenNativeLibrary(JNIEnv* env, int32_t target_sdk_version, const char* path,
+                        jobject class_loader, const char* caller_location, jstring library_path,
+                        bool* needs_native_bridge, char** error_msg) {
+#if defined(__ANDROID__)
+  UNUSED(target_sdk_version);
+  if (class_loader == nullptr) {
+    *needs_native_bridge = false;
+    if (caller_location != nullptr) {
+      android_namespace_t* boot_namespace = FindExportedNamespace(caller_location);
+      if (boot_namespace != nullptr) {
+        const android_dlextinfo dlextinfo = {
+            .flags = ANDROID_DLEXT_USE_NAMESPACE,
+            .library_namespace = boot_namespace,
+        };
+        void* handle = android_dlopen_ext(path, RTLD_NOW, &dlextinfo);
+        if (handle == nullptr) {
+          *error_msg = strdup(dlerror());
+        }
+        return handle;
+      }
+    }
+    void* handle = dlopen(path, RTLD_NOW);
+    if (handle == nullptr) {
+      *error_msg = strdup(dlerror());
+    }
+    return handle;
+  }
+
+  std::lock_guard<std::mutex> guard(g_namespaces_mutex);
+  NativeLoaderNamespace* ns;
+
+  if ((ns = g_namespaces->FindNamespaceByClassLoader(env, class_loader)) == nullptr) {
+    // This is the case where the classloader was not created by ApplicationLoaders
+    // In this case we create an isolated not-shared namespace for it.
+    Result<NativeLoaderNamespace*> isolated_ns =
+        g_namespaces->Create(env, target_sdk_version, class_loader, false /* is_shared */, nullptr,
+                             library_path, nullptr);
+    if (!isolated_ns.ok()) {
+      *error_msg = strdup(isolated_ns.error().message().c_str());
+      return nullptr;
+    } else {
+      ns = *isolated_ns;
+    }
+  }
+
+  return OpenNativeLibraryInNamespace(ns, path, needs_native_bridge, error_msg);
+#else
+  UNUSED(env, target_sdk_version, class_loader, caller_location);
+
+  // Do some best effort to emulate library-path support. It will not
+  // work for dependencies.
+  //
+  // Note: null has a special meaning and must be preserved.
+  std::string c_library_path;  // Empty string by default.
+  if (library_path != nullptr && path != nullptr && path[0] != '/') {
+    ScopedUtfChars library_path_utf_chars(env, library_path);
+    c_library_path = library_path_utf_chars.c_str();
+  }
+
+  std::vector<std::string> library_paths = base::Split(c_library_path, ":");
+
+  for (const std::string& lib_path : library_paths) {
+    *needs_native_bridge = false;
+    const char* path_arg;
+    std::string complete_path;
+    if (path == nullptr) {
+      // Preserve null.
+      path_arg = nullptr;
+    } else {
+      complete_path = lib_path;
+      if (!complete_path.empty()) {
+        complete_path.append("/");
+      }
+      complete_path.append(path);
+      path_arg = complete_path.c_str();
+    }
+    void* handle = dlopen(path_arg, RTLD_NOW);
+    if (handle != nullptr) {
+      return handle;
+    }
+    if (NativeBridgeIsSupported(path_arg)) {
+      *needs_native_bridge = true;
+      handle = NativeBridgeLoadLibrary(path_arg, RTLD_NOW);
+      if (handle != nullptr) {
+        return handle;
+      }
+      *error_msg = strdup(NativeBridgeGetError());
+    } else {
+      *error_msg = strdup(dlerror());
+    }
+  }
+  return nullptr;
+#endif
+}
+
+bool CloseNativeLibrary(void* handle, const bool needs_native_bridge, char** error_msg) {
+  bool success;
+  if (needs_native_bridge) {
+    success = (NativeBridgeUnloadLibrary(handle) == 0);
+    if (!success) {
+      *error_msg = strdup(NativeBridgeGetError());
+    }
+  } else {
+    success = (dlclose(handle) == 0);
+    if (!success) {
+      *error_msg = strdup(dlerror());
+    }
+  }
+
+  return success;
+}
+
+void NativeLoaderFreeErrorMessage(char* msg) {
+  // The error messages get allocated through strdup, so we must call free on them.
+  free(msg);
+}
+
+#if defined(__ANDROID__)
+void* OpenNativeLibraryInNamespace(NativeLoaderNamespace* ns, const char* path,
+                                   bool* needs_native_bridge, char** error_msg) {
+  auto handle = ns->Load(path);
+  if (!handle.ok() && error_msg != nullptr) {
+    *error_msg = strdup(handle.error().message().c_str());
+  }
+  if (needs_native_bridge != nullptr) {
+    *needs_native_bridge = ns->IsBridged();
+  }
+  return handle.ok() ? *handle : nullptr;
+}
+
+// native_bridge_namespaces are not supported for callers of this function.
+// This function will return nullptr in the case when application is running
+// on native bridge.
+android_namespace_t* FindNamespaceByClassLoader(JNIEnv* env, jobject class_loader) {
+  std::lock_guard<std::mutex> guard(g_namespaces_mutex);
+  NativeLoaderNamespace* ns = g_namespaces->FindNamespaceByClassLoader(env, class_loader);
+  if (ns != nullptr && !ns->IsBridged()) {
+    return ns->ToRawAndroidNamespace();
+  }
+  return nullptr;
+}
+
+NativeLoaderNamespace* FindNativeLoaderNamespaceByClassLoader(JNIEnv* env, jobject class_loader) {
+  std::lock_guard<std::mutex> guard(g_namespaces_mutex);
+  return g_namespaces->FindNamespaceByClassLoader(env, class_loader);
+}
+#endif
+
+};  // namespace android
diff --git a/libnativeloader/native_loader_lazy.cpp b/libnativeloader/native_loader_lazy.cpp
new file mode 100644
index 0000000..2eb1203
--- /dev/null
+++ b/libnativeloader/native_loader_lazy.cpp
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "nativeloader/native_loader.h"
+#define LOG_TAG "nativeloader"
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <string.h>
+
+#include <log/log.h>
+
+namespace android {
+
+namespace {
+
+void* GetLibHandle() {
+  static void* handle = dlopen("libnativeloader.so", RTLD_NOW);
+  LOG_FATAL_IF(handle == nullptr, "Failed to load libnativeloader.so: %s", dlerror());
+  return handle;
+}
+
+template <typename FuncPtr>
+FuncPtr GetFuncPtr(const char* function_name) {
+  auto f = reinterpret_cast<FuncPtr>(dlsym(GetLibHandle(), function_name));
+  LOG_FATAL_IF(f == nullptr, "Failed to get address of %s: %s", function_name, dlerror());
+  return f;
+}
+
+#define GET_FUNC_PTR(name) GetFuncPtr<decltype(&name)>(#name)
+
+}  // namespace
+
+void InitializeNativeLoader() {
+  static auto f = GET_FUNC_PTR(InitializeNativeLoader);
+  return f();
+}
+
+jstring CreateClassLoaderNamespace(JNIEnv* env, int32_t target_sdk_version, jobject class_loader,
+                                   bool is_shared, jstring dex_path, jstring library_path,
+                                   jstring permitted_path) {
+  static auto f = GET_FUNC_PTR(CreateClassLoaderNamespace);
+  return f(env, target_sdk_version, class_loader, is_shared, dex_path, library_path,
+           permitted_path);
+}
+
+void* OpenNativeLibrary(JNIEnv* env, int32_t target_sdk_version, const char* path,
+                        jobject class_loader, const char* caller_location, jstring library_path,
+                        bool* needs_native_bridge, char** error_msg) {
+  static auto f = GET_FUNC_PTR(OpenNativeLibrary);
+  return f(env, target_sdk_version, path, class_loader, caller_location, library_path,
+           needs_native_bridge, error_msg);
+}
+
+bool CloseNativeLibrary(void* handle, const bool needs_native_bridge, char** error_msg) {
+  static auto f = GET_FUNC_PTR(CloseNativeLibrary);
+  return f(handle, needs_native_bridge, error_msg);
+}
+
+void NativeLoaderFreeErrorMessage(char* msg) {
+  static auto f = GET_FUNC_PTR(NativeLoaderFreeErrorMessage);
+  return f(msg);
+}
+
+struct android_namespace_t* FindNamespaceByClassLoader(JNIEnv* env, jobject class_loader) {
+  static auto f = GET_FUNC_PTR(FindNamespaceByClassLoader);
+  return f(env, class_loader);
+}
+
+struct NativeLoaderNamespace* FindNativeLoaderNamespaceByClassLoader(JNIEnv* env,
+                                                                     jobject class_loader) {
+  static auto f = GET_FUNC_PTR(FindNativeLoaderNamespaceByClassLoader);
+  return f(env, class_loader);
+}
+
+void* OpenNativeLibraryInNamespace(struct NativeLoaderNamespace* ns, const char* path,
+                                   bool* needs_native_bridge, char** error_msg) {
+  static auto f = GET_FUNC_PTR(OpenNativeLibraryInNamespace);
+  return f(ns, path, needs_native_bridge, error_msg);
+}
+
+void ResetNativeLoader() {
+  static auto f = GET_FUNC_PTR(ResetNativeLoader);
+  return f();
+}
+
+#undef GET_FUNC_PTR
+
+}  // namespace android
diff --git a/libnativeloader/native_loader_namespace.cpp b/libnativeloader/native_loader_namespace.cpp
new file mode 100644
index 0000000..49f3035
--- /dev/null
+++ b/libnativeloader/native_loader_namespace.cpp
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "nativeloader"
+
+#include "native_loader_namespace.h"
+
+#include <dlfcn.h>
+
+#include <functional>
+
+#include <android-base/strings.h>
+#include <log/log.h>
+#include <nativebridge/native_bridge.h>
+
+#include "nativeloader/dlext_namespaces.h"
+
+using android::base::Error;
+
+namespace android {
+
+namespace {
+
+constexpr const char* kDefaultNamespaceName = "default";
+constexpr const char* kSystemNamespaceName = "system";
+
+std::string GetLinkerError(bool is_bridged) {
+  const char* msg = is_bridged ? NativeBridgeGetError() : dlerror();
+  if (msg == nullptr) {
+    return "no error";
+  }
+  return std::string(msg);
+}
+
+}  // namespace
+
+Result<NativeLoaderNamespace> NativeLoaderNamespace::GetExportedNamespace(const std::string& name,
+                                                                          bool is_bridged) {
+  if (!is_bridged) {
+    auto raw = android_get_exported_namespace(name.c_str());
+    if (raw != nullptr) {
+      return NativeLoaderNamespace(name, raw);
+    }
+  } else {
+    auto raw = NativeBridgeGetExportedNamespace(name.c_str());
+    if (raw != nullptr) {
+      return NativeLoaderNamespace(name, raw);
+    }
+  }
+  return Errorf("namespace {} does not exist or exported", name);
+}
+
+// The system namespace is called "default" for binaries in /system and
+// "system" for those in the Runtime APEX. Try "system" first since
+// "default" always exists.
+Result<NativeLoaderNamespace> NativeLoaderNamespace::GetSystemNamespace(bool is_bridged) {
+  auto ns = GetExportedNamespace(kSystemNamespaceName, is_bridged);
+  if (ns.ok()) return ns;
+  ns = GetExportedNamespace(kDefaultNamespaceName, is_bridged);
+  if (ns.ok()) return ns;
+
+  // If nothing is found, return NativeLoaderNamespace constructed from nullptr.
+  // nullptr also means default namespace to the linker.
+  if (!is_bridged) {
+    return NativeLoaderNamespace(kDefaultNamespaceName, static_cast<android_namespace_t*>(nullptr));
+  } else {
+    return NativeLoaderNamespace(kDefaultNamespaceName,
+                                 static_cast<native_bridge_namespace_t*>(nullptr));
+  }
+}
+
+Result<NativeLoaderNamespace> NativeLoaderNamespace::Create(
+    const std::string& name, const std::string& search_paths, const std::string& permitted_paths,
+    const NativeLoaderNamespace* parent, bool is_shared, bool is_greylist_enabled,
+    bool also_used_as_anonymous) {
+  bool is_bridged = false;
+  if (parent != nullptr) {
+    is_bridged = parent->IsBridged();
+  } else if (!search_paths.empty()) {
+    is_bridged = NativeBridgeIsPathSupported(search_paths.c_str());
+  }
+
+  // Fall back to the system namespace if no parent is set.
+  auto system_ns = GetSystemNamespace(is_bridged);
+  if (!system_ns.ok()) {
+    return system_ns.error();
+  }
+  const NativeLoaderNamespace& effective_parent = parent != nullptr ? *parent : *system_ns;
+
+  // All namespaces for apps are isolated
+  uint64_t type = ANDROID_NAMESPACE_TYPE_ISOLATED;
+
+  // The namespace is also used as the anonymous namespace
+  // which is used when the linker fails to determine the caller address
+  if (also_used_as_anonymous) {
+    type |= ANDROID_NAMESPACE_TYPE_ALSO_USED_AS_ANONYMOUS;
+  }
+
+  // Bundled apps have access to all system libraries that are currently loaded
+  // in the default namespace
+  if (is_shared) {
+    type |= ANDROID_NAMESPACE_TYPE_SHARED;
+  }
+  if (is_greylist_enabled) {
+    type |= ANDROID_NAMESPACE_TYPE_GREYLIST_ENABLED;
+  }
+
+  if (!is_bridged) {
+    android_namespace_t* raw =
+        android_create_namespace(name.c_str(), nullptr, search_paths.c_str(), type,
+                                 permitted_paths.c_str(), effective_parent.ToRawAndroidNamespace());
+    if (raw != nullptr) {
+      return NativeLoaderNamespace(name, raw);
+    }
+  } else {
+    native_bridge_namespace_t* raw = NativeBridgeCreateNamespace(
+        name.c_str(), nullptr, search_paths.c_str(), type, permitted_paths.c_str(),
+        effective_parent.ToRawNativeBridgeNamespace());
+    if (raw != nullptr) {
+      return NativeLoaderNamespace(name, raw);
+    }
+  }
+  return Errorf("failed to create {} namespace name:{}, search_paths:{}, permitted_paths:{}",
+                is_bridged ? "bridged" : "native", name, search_paths, permitted_paths);
+}
+
+Result<void> NativeLoaderNamespace::Link(const NativeLoaderNamespace& target,
+                                         const std::string& shared_libs) const {
+  LOG_ALWAYS_FATAL_IF(shared_libs.empty(), "empty share lib when linking %s to %s",
+                      this->name().c_str(), target.name().c_str());
+  if (!IsBridged()) {
+    if (android_link_namespaces(this->ToRawAndroidNamespace(), target.ToRawAndroidNamespace(),
+                                shared_libs.c_str())) {
+      return {};
+    }
+  } else {
+    if (NativeBridgeLinkNamespaces(this->ToRawNativeBridgeNamespace(),
+                                   target.ToRawNativeBridgeNamespace(), shared_libs.c_str())) {
+      return {};
+    }
+  }
+  return Error() << GetLinkerError(IsBridged());
+}
+
+Result<void*> NativeLoaderNamespace::Load(const char* lib_name) const {
+  if (!IsBridged()) {
+    android_dlextinfo extinfo;
+    extinfo.flags = ANDROID_DLEXT_USE_NAMESPACE;
+    extinfo.library_namespace = this->ToRawAndroidNamespace();
+    void* handle = android_dlopen_ext(lib_name, RTLD_NOW, &extinfo);
+    if (handle != nullptr) {
+      return handle;
+    }
+  } else {
+    void* handle =
+        NativeBridgeLoadLibraryExt(lib_name, RTLD_NOW, this->ToRawNativeBridgeNamespace());
+    if (handle != nullptr) {
+      return handle;
+    }
+  }
+  return Error() << GetLinkerError(IsBridged());
+}
+
+}  // namespace android
diff --git a/libnativeloader/native_loader_namespace.h b/libnativeloader/native_loader_namespace.h
new file mode 100644
index 0000000..ee84f61
--- /dev/null
+++ b/libnativeloader/native_loader_namespace.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBNATIVELOADER_NATIVE_LOADER_NAMESPACE_H_
+#define ART_LIBNATIVELOADER_NATIVE_LOADER_NAMESPACE_H_
+
+#if defined(__ANDROID__)
+
+#include <string>
+#include <variant>
+#include <vector>
+
+#include <android-base/logging.h>
+#include <android-base/result.h>
+#include <android/dlext.h>
+#include <log/log.h>
+#include <nativebridge/native_bridge.h>
+
+namespace android {
+
+using android::base::Result;
+
+// NativeLoaderNamespace abstracts a linker namespace for the native
+// architecture (ex: arm on arm) or the translated architecture (ex: arm on
+// x86). Instances of this class are managed by LibraryNamespaces object.
+struct NativeLoaderNamespace {
+ public:
+  static Result<NativeLoaderNamespace> Create(const std::string& name,
+                                              const std::string& search_paths,
+                                              const std::string& permitted_paths,
+                                              const NativeLoaderNamespace* parent, bool is_shared,
+                                              bool is_greylist_enabled,
+                                              bool also_used_as_anonymous);
+
+  NativeLoaderNamespace(NativeLoaderNamespace&&) = default;
+  NativeLoaderNamespace(const NativeLoaderNamespace&) = default;
+  NativeLoaderNamespace& operator=(const NativeLoaderNamespace&) = default;
+
+  android_namespace_t* ToRawAndroidNamespace() const { return std::get<0>(raw_); }
+  native_bridge_namespace_t* ToRawNativeBridgeNamespace() const { return std::get<1>(raw_); }
+
+  std::string name() const { return name_; }
+  bool IsBridged() const { return raw_.index() == 1; }
+
+  Result<void> Link(const NativeLoaderNamespace& target, const std::string& shared_libs) const;
+  Result<void*> Load(const char* lib_name) const;
+
+  static Result<NativeLoaderNamespace> GetExportedNamespace(const std::string& name,
+                                                            bool is_bridged);
+  static Result<NativeLoaderNamespace> GetSystemNamespace(bool is_bridged);
+
+ private:
+  explicit NativeLoaderNamespace(const std::string& name, android_namespace_t* ns)
+      : name_(name), raw_(ns) {}
+  explicit NativeLoaderNamespace(const std::string& name, native_bridge_namespace_t* ns)
+      : name_(name), raw_(ns) {}
+
+  std::string name_;
+  std::variant<android_namespace_t*, native_bridge_namespace_t*> raw_;
+};
+
+}  // namespace android
+#endif  // #if defined(__ANDROID__)
+
+#endif  // ART_LIBNATIVELOADER_NATIVE_LOADER_NAMESPACE_H_
diff --git a/libnativeloader/native_loader_test.cpp b/libnativeloader/native_loader_test.cpp
new file mode 100644
index 0000000..8c4a8c5
--- /dev/null
+++ b/libnativeloader/native_loader_test.cpp
@@ -0,0 +1,678 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <dlfcn.h>
+#include <memory>
+#include <unordered_map>
+
+#include <android-base/strings.h>
+#include <gmock/gmock.h>
+#include <gtest/gtest.h>
+#include <jni.h>
+
+#include "native_loader_namespace.h"
+#include "nativehelper/scoped_utf_chars.h"
+#include "nativeloader/dlext_namespaces.h"
+#include "nativeloader/native_loader.h"
+#include "public_libraries.h"
+
+namespace android {
+namespace nativeloader {
+
+using ::testing::Eq;
+using ::testing::Return;
+using ::testing::StrEq;
+using ::testing::_;
+using internal::ConfigEntry;
+using internal::ParseConfig;
+
+#if defined(__LP64__)
+#define LIB_DIR "lib64"
+#else
+#define LIB_DIR "lib"
+#endif
+
+// gmock interface that represents interested platform APIs on libdl and libnativebridge
+class Platform {
+ public:
+  virtual ~Platform() {}
+
+  // libdl APIs
+  virtual void* dlopen(const char* filename, int flags) = 0;
+  virtual int dlclose(void* handle) = 0;
+  virtual char* dlerror(void) = 0;
+
+  // These mock_* are the APIs semantically the same across libdl and libnativebridge.
+  // Instead of having two set of mock APIs for the two, define only one set with an additional
+  // argument 'bool bridged' to identify the context (i.e., called for libdl or libnativebridge).
+  typedef char* mock_namespace_handle;
+  virtual bool mock_init_anonymous_namespace(bool bridged, const char* sonames,
+                                             const char* search_paths) = 0;
+  virtual mock_namespace_handle mock_create_namespace(
+      bool bridged, const char* name, const char* ld_library_path, const char* default_library_path,
+      uint64_t type, const char* permitted_when_isolated_path, mock_namespace_handle parent) = 0;
+  virtual bool mock_link_namespaces(bool bridged, mock_namespace_handle from,
+                                    mock_namespace_handle to, const char* sonames) = 0;
+  virtual mock_namespace_handle mock_get_exported_namespace(bool bridged, const char* name) = 0;
+  virtual void* mock_dlopen_ext(bool bridged, const char* filename, int flags,
+                                mock_namespace_handle ns) = 0;
+
+  // libnativebridge APIs for which libdl has no corresponding APIs
+  virtual bool NativeBridgeInitialized() = 0;
+  virtual const char* NativeBridgeGetError() = 0;
+  virtual bool NativeBridgeIsPathSupported(const char*) = 0;
+  virtual bool NativeBridgeIsSupported(const char*) = 0;
+
+  // To mock "ClassLoader Object.getParent()"
+  virtual const char* JniObject_getParent(const char*) = 0;
+};
+
+// The mock does not actually create a namespace object. But simply casts the pointer to the
+// string for the namespace name as the handle to the namespace object.
+#define TO_ANDROID_NAMESPACE(str) \
+  reinterpret_cast<struct android_namespace_t*>(const_cast<char*>(str))
+
+#define TO_BRIDGED_NAMESPACE(str) \
+  reinterpret_cast<struct native_bridge_namespace_t*>(const_cast<char*>(str))
+
+#define TO_MOCK_NAMESPACE(ns) reinterpret_cast<Platform::mock_namespace_handle>(ns)
+
+// These represents built-in namespaces created by the linker according to ld.config.txt
+static std::unordered_map<std::string, Platform::mock_namespace_handle> namespaces = {
+    {"system", TO_MOCK_NAMESPACE(TO_ANDROID_NAMESPACE("system"))},
+    {"default", TO_MOCK_NAMESPACE(TO_ANDROID_NAMESPACE("default"))},
+    {"com_android_art", TO_MOCK_NAMESPACE(TO_ANDROID_NAMESPACE("com_android_art"))},
+    {"sphal", TO_MOCK_NAMESPACE(TO_ANDROID_NAMESPACE("sphal"))},
+    {"vndk", TO_MOCK_NAMESPACE(TO_ANDROID_NAMESPACE("vndk"))},
+    {"vndk_product", TO_MOCK_NAMESPACE(TO_ANDROID_NAMESPACE("vndk_product"))},
+    {"com_android_neuralnetworks", TO_MOCK_NAMESPACE(TO_ANDROID_NAMESPACE("com_android_neuralnetworks"))},
+    {"com_android_os_statsd", TO_MOCK_NAMESPACE(TO_ANDROID_NAMESPACE("com_android_os_statsd"))},
+};
+
+// The actual gmock object
+class MockPlatform : public Platform {
+ public:
+  explicit MockPlatform(bool is_bridged) : is_bridged_(is_bridged) {
+    ON_CALL(*this, NativeBridgeIsSupported(_)).WillByDefault(Return(is_bridged_));
+    ON_CALL(*this, NativeBridgeIsPathSupported(_)).WillByDefault(Return(is_bridged_));
+    ON_CALL(*this, mock_get_exported_namespace(_, _))
+        .WillByDefault(testing::Invoke([](bool, const char* name) -> mock_namespace_handle {
+          if (namespaces.find(name) != namespaces.end()) {
+            return namespaces[name];
+          }
+          return TO_MOCK_NAMESPACE(TO_ANDROID_NAMESPACE("(namespace not found"));
+        }));
+  }
+
+  // Mocking libdl APIs
+  MOCK_METHOD2(dlopen, void*(const char*, int));
+  MOCK_METHOD1(dlclose, int(void*));
+  MOCK_METHOD0(dlerror, char*());
+
+  // Mocking the common APIs
+  MOCK_METHOD3(mock_init_anonymous_namespace, bool(bool, const char*, const char*));
+  MOCK_METHOD7(mock_create_namespace,
+               mock_namespace_handle(bool, const char*, const char*, const char*, uint64_t,
+                                     const char*, mock_namespace_handle));
+  MOCK_METHOD4(mock_link_namespaces,
+               bool(bool, mock_namespace_handle, mock_namespace_handle, const char*));
+  MOCK_METHOD2(mock_get_exported_namespace, mock_namespace_handle(bool, const char*));
+  MOCK_METHOD4(mock_dlopen_ext, void*(bool, const char*, int, mock_namespace_handle));
+
+  // Mocking libnativebridge APIs
+  MOCK_METHOD0(NativeBridgeInitialized, bool());
+  MOCK_METHOD0(NativeBridgeGetError, const char*());
+  MOCK_METHOD1(NativeBridgeIsPathSupported, bool(const char*));
+  MOCK_METHOD1(NativeBridgeIsSupported, bool(const char*));
+
+  // Mocking "ClassLoader Object.getParent()"
+  MOCK_METHOD1(JniObject_getParent, const char*(const char*));
+
+ private:
+  bool is_bridged_;
+};
+
+static std::unique_ptr<MockPlatform> mock;
+
+// Provide C wrappers for the mock object.
+extern "C" {
+void* dlopen(const char* file, int flag) {
+  return mock->dlopen(file, flag);
+}
+
+int dlclose(void* handle) {
+  return mock->dlclose(handle);
+}
+
+char* dlerror(void) {
+  return mock->dlerror();
+}
+
+bool android_init_anonymous_namespace(const char* sonames, const char* search_path) {
+  return mock->mock_init_anonymous_namespace(false, sonames, search_path);
+}
+
+struct android_namespace_t* android_create_namespace(const char* name, const char* ld_library_path,
+                                                     const char* default_library_path,
+                                                     uint64_t type,
+                                                     const char* permitted_when_isolated_path,
+                                                     struct android_namespace_t* parent) {
+  return TO_ANDROID_NAMESPACE(
+      mock->mock_create_namespace(false, name, ld_library_path, default_library_path, type,
+                                  permitted_when_isolated_path, TO_MOCK_NAMESPACE(parent)));
+}
+
+bool android_link_namespaces(struct android_namespace_t* from, struct android_namespace_t* to,
+                             const char* sonames) {
+  return mock->mock_link_namespaces(false, TO_MOCK_NAMESPACE(from), TO_MOCK_NAMESPACE(to), sonames);
+}
+
+struct android_namespace_t* android_get_exported_namespace(const char* name) {
+  return TO_ANDROID_NAMESPACE(mock->mock_get_exported_namespace(false, name));
+}
+
+void* android_dlopen_ext(const char* filename, int flags, const android_dlextinfo* info) {
+  return mock->mock_dlopen_ext(false, filename, flags, TO_MOCK_NAMESPACE(info->library_namespace));
+}
+
+// libnativebridge APIs
+bool NativeBridgeIsSupported(const char* libpath) {
+  return mock->NativeBridgeIsSupported(libpath);
+}
+
+struct native_bridge_namespace_t* NativeBridgeGetExportedNamespace(const char* name) {
+  return TO_BRIDGED_NAMESPACE(mock->mock_get_exported_namespace(true, name));
+}
+
+struct native_bridge_namespace_t* NativeBridgeCreateNamespace(
+    const char* name, const char* ld_library_path, const char* default_library_path, uint64_t type,
+    const char* permitted_when_isolated_path, struct native_bridge_namespace_t* parent) {
+  return TO_BRIDGED_NAMESPACE(
+      mock->mock_create_namespace(true, name, ld_library_path, default_library_path, type,
+                                  permitted_when_isolated_path, TO_MOCK_NAMESPACE(parent)));
+}
+
+bool NativeBridgeLinkNamespaces(struct native_bridge_namespace_t* from,
+                                struct native_bridge_namespace_t* to, const char* sonames) {
+  return mock->mock_link_namespaces(true, TO_MOCK_NAMESPACE(from), TO_MOCK_NAMESPACE(to), sonames);
+}
+
+void* NativeBridgeLoadLibraryExt(const char* libpath, int flag,
+                                 struct native_bridge_namespace_t* ns) {
+  return mock->mock_dlopen_ext(true, libpath, flag, TO_MOCK_NAMESPACE(ns));
+}
+
+bool NativeBridgeInitialized() {
+  return mock->NativeBridgeInitialized();
+}
+
+bool NativeBridgeInitAnonymousNamespace(const char* public_ns_sonames,
+                                        const char* anon_ns_library_path) {
+  return mock->mock_init_anonymous_namespace(true, public_ns_sonames, anon_ns_library_path);
+}
+
+const char* NativeBridgeGetError() {
+  return mock->NativeBridgeGetError();
+}
+
+bool NativeBridgeIsPathSupported(const char* path) {
+  return mock->NativeBridgeIsPathSupported(path);
+}
+
+}  // extern "C"
+
+// A very simple JNI mock.
+// jstring is a pointer to utf8 char array. We don't need utf16 char here.
+// jobject, jclass, and jmethodID are also a pointer to utf8 char array
+// Only a few JNI methods that are actually used in libnativeloader are mocked.
+JNINativeInterface* CreateJNINativeInterface() {
+  JNINativeInterface* inf = new JNINativeInterface();
+  memset(inf, 0, sizeof(JNINativeInterface));
+
+  inf->GetStringUTFChars = [](JNIEnv*, jstring s, jboolean*) -> const char* {
+    return reinterpret_cast<const char*>(s);
+  };
+
+  inf->ReleaseStringUTFChars = [](JNIEnv*, jstring, const char*) -> void { return; };
+
+  inf->NewStringUTF = [](JNIEnv*, const char* bytes) -> jstring {
+    return reinterpret_cast<jstring>(const_cast<char*>(bytes));
+  };
+
+  inf->FindClass = [](JNIEnv*, const char* name) -> jclass {
+    return reinterpret_cast<jclass>(const_cast<char*>(name));
+  };
+
+  inf->CallObjectMethodV = [](JNIEnv*, jobject obj, jmethodID mid, va_list) -> jobject {
+    if (strcmp("getParent", reinterpret_cast<const char*>(mid)) == 0) {
+      // JniObject_getParent can be a valid jobject or nullptr if there is
+      // no parent classloader.
+      const char* ret = mock->JniObject_getParent(reinterpret_cast<const char*>(obj));
+      return reinterpret_cast<jobject>(const_cast<char*>(ret));
+    }
+    return nullptr;
+  };
+
+  inf->GetMethodID = [](JNIEnv*, jclass, const char* name, const char*) -> jmethodID {
+    return reinterpret_cast<jmethodID>(const_cast<char*>(name));
+  };
+
+  inf->NewWeakGlobalRef = [](JNIEnv*, jobject obj) -> jobject { return obj; };
+
+  inf->IsSameObject = [](JNIEnv*, jobject a, jobject b) -> jboolean {
+    return strcmp(reinterpret_cast<const char*>(a), reinterpret_cast<const char*>(b)) == 0;
+  };
+
+  return inf;
+}
+
+static void* const any_nonnull = reinterpret_cast<void*>(0x12345678);
+
+// Custom matcher for comparing namespace handles
+MATCHER_P(NsEq, other, "") {
+  *result_listener << "comparing " << reinterpret_cast<const char*>(arg) << " and " << other;
+  return strcmp(reinterpret_cast<const char*>(arg), reinterpret_cast<const char*>(other)) == 0;
+}
+
+/////////////////////////////////////////////////////////////////
+
+// Test fixture
+class NativeLoaderTest : public ::testing::TestWithParam<bool> {
+ protected:
+  bool IsBridged() { return GetParam(); }
+
+  void SetUp() override {
+    mock = std::make_unique<testing::NiceMock<MockPlatform>>(IsBridged());
+
+    env = std::make_unique<JNIEnv>();
+    env->functions = CreateJNINativeInterface();
+  }
+
+  void SetExpectations() {
+    std::vector<std::string> default_public_libs =
+        android::base::Split(preloadable_public_libraries(), ":");
+    for (auto l : default_public_libs) {
+      EXPECT_CALL(*mock, dlopen(StrEq(l.c_str()), RTLD_NOW | RTLD_NODELETE))
+          .WillOnce(Return(any_nonnull));
+    }
+  }
+
+  void RunTest() { InitializeNativeLoader(); }
+
+  void TearDown() override {
+    ResetNativeLoader();
+    delete env->functions;
+    mock.reset();
+  }
+
+  std::unique_ptr<JNIEnv> env;
+};
+
+/////////////////////////////////////////////////////////////////
+
+TEST_P(NativeLoaderTest, InitializeLoadsDefaultPublicLibraries) {
+  SetExpectations();
+  RunTest();
+}
+
+INSTANTIATE_TEST_SUITE_P(NativeLoaderTests, NativeLoaderTest, testing::Bool());
+
+/////////////////////////////////////////////////////////////////
+
+class NativeLoaderTest_Create : public NativeLoaderTest {
+ protected:
+  // Test inputs (initialized to the default values). Overriding these
+  // must be done before calling SetExpectations() and RunTest().
+  uint32_t target_sdk_version = 29;
+  std::string class_loader = "my_classloader";
+  bool is_shared = false;
+  std::string dex_path = "/data/app/foo/classes.dex";
+  std::string library_path = "/data/app/foo/" LIB_DIR "/arm";
+  std::string permitted_path = "/data/app/foo/" LIB_DIR;
+
+  // expected output (.. for the default test inputs)
+  std::string expected_namespace_name = "classloader-namespace";
+  uint64_t expected_namespace_flags =
+      ANDROID_NAMESPACE_TYPE_ISOLATED | ANDROID_NAMESPACE_TYPE_ALSO_USED_AS_ANONYMOUS;
+  std::string expected_library_path = library_path;
+  std::string expected_permitted_path = std::string("/data:/mnt/expand:") + permitted_path;
+  std::string expected_parent_namespace = "system";
+  bool expected_link_with_platform_ns = true;
+  bool expected_link_with_art_ns = true;
+  bool expected_link_with_sphal_ns = !vendor_public_libraries().empty();
+  bool expected_link_with_vndk_ns = false;
+  bool expected_link_with_vndk_product_ns = false;
+  bool expected_link_with_default_ns = false;
+  bool expected_link_with_neuralnetworks_ns = true;
+  bool expected_link_with_statsd_ns = true;
+  std::string expected_shared_libs_to_platform_ns = default_public_libraries();
+  std::string expected_shared_libs_to_art_ns = art_public_libraries();
+  std::string expected_shared_libs_to_sphal_ns = vendor_public_libraries();
+  std::string expected_shared_libs_to_vndk_ns = vndksp_libraries_vendor();
+  std::string expected_shared_libs_to_vndk_product_ns = vndksp_libraries_product();
+  std::string expected_shared_libs_to_default_ns = default_public_libraries();
+  std::string expected_shared_libs_to_neuralnetworks_ns = neuralnetworks_public_libraries();
+  std::string expected_shared_libs_to_statsd_ns = statsd_public_libraries();
+
+  void SetExpectations() {
+    NativeLoaderTest::SetExpectations();
+
+    ON_CALL(*mock, JniObject_getParent(StrEq(class_loader))).WillByDefault(Return(nullptr));
+
+    EXPECT_CALL(*mock, NativeBridgeIsPathSupported(_)).Times(testing::AnyNumber());
+    EXPECT_CALL(*mock, NativeBridgeInitialized()).Times(testing::AnyNumber());
+
+    EXPECT_CALL(*mock, mock_create_namespace(
+                           Eq(IsBridged()), StrEq(expected_namespace_name), nullptr,
+                           StrEq(expected_library_path), expected_namespace_flags,
+                           StrEq(expected_permitted_path), NsEq(expected_parent_namespace.c_str())))
+        .WillOnce(Return(TO_MOCK_NAMESPACE(TO_ANDROID_NAMESPACE(dex_path.c_str()))));
+    if (expected_link_with_platform_ns) {
+      EXPECT_CALL(*mock, mock_link_namespaces(Eq(IsBridged()), _, NsEq("system"),
+                                              StrEq(expected_shared_libs_to_platform_ns)))
+          .WillOnce(Return(true));
+    }
+    if (expected_link_with_art_ns) {
+      EXPECT_CALL(*mock, mock_link_namespaces(Eq(IsBridged()), _, NsEq("com_android_art"),
+                                              StrEq(expected_shared_libs_to_art_ns)))
+          .WillOnce(Return(true));
+    }
+    if (expected_link_with_sphal_ns) {
+      EXPECT_CALL(*mock, mock_link_namespaces(Eq(IsBridged()), _, NsEq("sphal"),
+                                              StrEq(expected_shared_libs_to_sphal_ns)))
+          .WillOnce(Return(true));
+    }
+    if (expected_link_with_vndk_ns) {
+      EXPECT_CALL(*mock, mock_link_namespaces(Eq(IsBridged()), _, NsEq("vndk"),
+                                              StrEq(expected_shared_libs_to_vndk_ns)))
+          .WillOnce(Return(true));
+    }
+    if (expected_link_with_vndk_product_ns) {
+      EXPECT_CALL(*mock, mock_link_namespaces(Eq(IsBridged()), _, NsEq("vndk_product"),
+                                              StrEq(expected_shared_libs_to_vndk_product_ns)))
+          .WillOnce(Return(true));
+    }
+    if (expected_link_with_default_ns) {
+      EXPECT_CALL(*mock, mock_link_namespaces(Eq(IsBridged()), _, NsEq("default"),
+                                              StrEq(expected_shared_libs_to_default_ns)))
+          .WillOnce(Return(true));
+    }
+    if (expected_link_with_neuralnetworks_ns) {
+      EXPECT_CALL(*mock, mock_link_namespaces(Eq(IsBridged()), _, NsEq("com_android_neuralnetworks"),
+                                              StrEq(expected_shared_libs_to_neuralnetworks_ns)))
+          .WillOnce(Return(true));
+    }
+    if (expected_link_with_statsd_ns) {
+      EXPECT_CALL(*mock, mock_link_namespaces(Eq(IsBridged()), _, NsEq("com_android_os_statsd"),
+                                              StrEq(expected_shared_libs_to_statsd_ns)))
+          .WillOnce(Return(true));
+    }
+  }
+
+  void RunTest() {
+    NativeLoaderTest::RunTest();
+
+    jstring err = CreateClassLoaderNamespace(
+        env(), target_sdk_version, env()->NewStringUTF(class_loader.c_str()), is_shared,
+        env()->NewStringUTF(dex_path.c_str()), env()->NewStringUTF(library_path.c_str()),
+        env()->NewStringUTF(permitted_path.c_str()));
+
+    // no error
+    EXPECT_EQ(err, nullptr) << "Error is: " << std::string(ScopedUtfChars(env(), err).c_str());
+
+    if (!IsBridged()) {
+      struct android_namespace_t* ns =
+          FindNamespaceByClassLoader(env(), env()->NewStringUTF(class_loader.c_str()));
+
+      // The created namespace is for this apk
+      EXPECT_EQ(dex_path.c_str(), reinterpret_cast<const char*>(ns));
+    } else {
+      struct NativeLoaderNamespace* ns =
+          FindNativeLoaderNamespaceByClassLoader(env(), env()->NewStringUTF(class_loader.c_str()));
+
+      // The created namespace is for the this apk
+      EXPECT_STREQ(dex_path.c_str(),
+                   reinterpret_cast<const char*>(ns->ToRawNativeBridgeNamespace()));
+    }
+  }
+
+  JNIEnv* env() { return NativeLoaderTest::env.get(); }
+};
+
+TEST_P(NativeLoaderTest_Create, DownloadedApp) {
+  SetExpectations();
+  RunTest();
+}
+
+TEST_P(NativeLoaderTest_Create, BundledSystemApp) {
+  dex_path = "/system/app/foo/foo.apk";
+  is_shared = true;
+
+  expected_namespace_name = "classloader-namespace-shared";
+  expected_namespace_flags |= ANDROID_NAMESPACE_TYPE_SHARED;
+  SetExpectations();
+  RunTest();
+}
+
+TEST_P(NativeLoaderTest_Create, BundledVendorApp) {
+  dex_path = "/vendor/app/foo/foo.apk";
+  is_shared = true;
+
+  expected_namespace_name = "classloader-namespace-shared";
+  expected_namespace_flags |= ANDROID_NAMESPACE_TYPE_SHARED;
+  SetExpectations();
+  RunTest();
+}
+
+TEST_P(NativeLoaderTest_Create, UnbundledVendorApp) {
+  dex_path = "/vendor/app/foo/foo.apk";
+  is_shared = false;
+
+  expected_namespace_name = "vendor-classloader-namespace";
+  expected_library_path = expected_library_path + ":/vendor/" LIB_DIR;
+  expected_permitted_path = expected_permitted_path + ":/vendor/" LIB_DIR;
+  expected_shared_libs_to_platform_ns =
+      expected_shared_libs_to_platform_ns + ":" + llndk_libraries_vendor();
+  expected_link_with_vndk_ns = true;
+  SetExpectations();
+  RunTest();
+}
+
+TEST_P(NativeLoaderTest_Create, BundledProductApp) {
+  dex_path = "/product/app/foo/foo.apk";
+  is_shared = true;
+
+  expected_namespace_name = "classloader-namespace-shared";
+  expected_namespace_flags |= ANDROID_NAMESPACE_TYPE_SHARED;
+  SetExpectations();
+  RunTest();
+}
+
+TEST_P(NativeLoaderTest_Create, UnbundledProductApp) {
+  dex_path = "/product/app/foo/foo.apk";
+  is_shared = false;
+
+  if (is_product_vndk_version_defined()) {
+    expected_namespace_name = "vendor-classloader-namespace";
+    expected_library_path = expected_library_path + ":/product/" LIB_DIR ":/system/product/" LIB_DIR;
+    expected_permitted_path =
+        expected_permitted_path + ":/product/" LIB_DIR ":/system/product/" LIB_DIR;
+    expected_shared_libs_to_platform_ns =
+        expected_shared_libs_to_platform_ns + ":" + llndk_libraries_product();
+    expected_link_with_vndk_product_ns = true;
+  }
+  SetExpectations();
+  RunTest();
+}
+
+TEST_P(NativeLoaderTest_Create, NamespaceForSharedLibIsNotUsedAsAnonymousNamespace) {
+  if (IsBridged()) {
+    // There is no shared lib in translated arch
+    // TODO(jiyong): revisit this
+    return;
+  }
+  // compared to apks, for java shared libs, library_path is empty; java shared
+  // libs don't have their own native libs. They use platform's.
+  library_path = "";
+  expected_library_path = library_path;
+  // no ALSO_USED_AS_ANONYMOUS
+  expected_namespace_flags = ANDROID_NAMESPACE_TYPE_ISOLATED;
+  SetExpectations();
+  RunTest();
+}
+
+TEST_P(NativeLoaderTest_Create, TwoApks) {
+  SetExpectations();
+  const uint32_t second_app_target_sdk_version = 29;
+  const std::string second_app_class_loader = "second_app_classloader";
+  const bool second_app_is_shared = false;
+  const std::string second_app_dex_path = "/data/app/bar/classes.dex";
+  const std::string second_app_library_path = "/data/app/bar/" LIB_DIR "/arm";
+  const std::string second_app_permitted_path = "/data/app/bar/" LIB_DIR;
+  const std::string expected_second_app_permitted_path =
+      std::string("/data:/mnt/expand:") + second_app_permitted_path;
+  const std::string expected_second_app_parent_namespace = "classloader-namespace";
+  // no ALSO_USED_AS_ANONYMOUS
+  const uint64_t expected_second_namespace_flags = ANDROID_NAMESPACE_TYPE_ISOLATED;
+
+  // The scenario is that second app is loaded by the first app.
+  // So the first app's classloader (`classloader`) is parent of the second
+  // app's classloader.
+  ON_CALL(*mock, JniObject_getParent(StrEq(second_app_class_loader)))
+      .WillByDefault(Return(class_loader.c_str()));
+
+  // namespace for the second app is created. Its parent is set to the namespace
+  // of the first app.
+  EXPECT_CALL(*mock, mock_create_namespace(
+                         Eq(IsBridged()), StrEq(expected_namespace_name), nullptr,
+                         StrEq(second_app_library_path), expected_second_namespace_flags,
+                         StrEq(expected_second_app_permitted_path), NsEq(dex_path.c_str())))
+      .WillOnce(Return(TO_MOCK_NAMESPACE(TO_ANDROID_NAMESPACE(second_app_dex_path.c_str()))));
+  EXPECT_CALL(*mock, mock_link_namespaces(Eq(IsBridged()), NsEq(second_app_dex_path.c_str()), _, _))
+      .WillRepeatedly(Return(true));
+
+  RunTest();
+  jstring err = CreateClassLoaderNamespace(
+      env(), second_app_target_sdk_version, env()->NewStringUTF(second_app_class_loader.c_str()),
+      second_app_is_shared, env()->NewStringUTF(second_app_dex_path.c_str()),
+      env()->NewStringUTF(second_app_library_path.c_str()),
+      env()->NewStringUTF(second_app_permitted_path.c_str()));
+
+  // success
+  EXPECT_EQ(err, nullptr) << "Error is: " << std::string(ScopedUtfChars(env(), err).c_str());
+
+  if (!IsBridged()) {
+    struct android_namespace_t* ns =
+        FindNamespaceByClassLoader(env(), env()->NewStringUTF(second_app_class_loader.c_str()));
+
+    // The created namespace is for the second apk
+    EXPECT_EQ(second_app_dex_path.c_str(), reinterpret_cast<const char*>(ns));
+  } else {
+    struct NativeLoaderNamespace* ns = FindNativeLoaderNamespaceByClassLoader(
+        env(), env()->NewStringUTF(second_app_class_loader.c_str()));
+
+    // The created namespace is for the second apk
+    EXPECT_STREQ(second_app_dex_path.c_str(),
+                 reinterpret_cast<const char*>(ns->ToRawNativeBridgeNamespace()));
+  }
+}
+
+INSTANTIATE_TEST_SUITE_P(NativeLoaderTests_Create, NativeLoaderTest_Create, testing::Bool());
+
+const std::function<Result<bool>(const struct ConfigEntry&)> always_true =
+    [](const struct ConfigEntry&) -> Result<bool> { return true; };
+
+TEST(NativeLoaderConfigParser, NamesAndComments) {
+  const char file_content[] = R"(
+######
+
+libA.so
+#libB.so
+
+
+      libC.so
+libD.so
+    #### libE.so
+)";
+  const std::vector<std::string> expected_result = {"libA.so", "libC.so", "libD.so"};
+  Result<std::vector<std::string>> result = ParseConfig(file_content, always_true);
+  ASSERT_RESULT_OK(result);
+  ASSERT_EQ(expected_result, *result);
+}
+
+TEST(NativeLoaderConfigParser, WithBitness) {
+  const char file_content[] = R"(
+libA.so 32
+libB.so 64
+libC.so
+)";
+#if defined(__LP64__)
+  const std::vector<std::string> expected_result = {"libB.so", "libC.so"};
+#else
+  const std::vector<std::string> expected_result = {"libA.so", "libC.so"};
+#endif
+  Result<std::vector<std::string>> result = ParseConfig(file_content, always_true);
+  ASSERT_RESULT_OK(result);
+  ASSERT_EQ(expected_result, *result);
+}
+
+TEST(NativeLoaderConfigParser, WithNoPreload) {
+  const char file_content[] = R"(
+libA.so nopreload
+libB.so nopreload
+libC.so
+)";
+
+  const std::vector<std::string> expected_result = {"libC.so"};
+  Result<std::vector<std::string>> result =
+      ParseConfig(file_content,
+                  [](const struct ConfigEntry& entry) -> Result<bool> { return !entry.nopreload; });
+  ASSERT_RESULT_OK(result);
+  ASSERT_EQ(expected_result, *result);
+}
+
+TEST(NativeLoaderConfigParser, WithNoPreloadAndBitness) {
+  const char file_content[] = R"(
+libA.so nopreload 32
+libB.so 64 nopreload
+libC.so 32
+libD.so 64
+libE.so nopreload
+)";
+
+#if defined(__LP64__)
+  const std::vector<std::string> expected_result = {"libD.so"};
+#else
+  const std::vector<std::string> expected_result = {"libC.so"};
+#endif
+  Result<std::vector<std::string>> result =
+      ParseConfig(file_content,
+                  [](const struct ConfigEntry& entry) -> Result<bool> { return !entry.nopreload; });
+  ASSERT_RESULT_OK(result);
+  ASSERT_EQ(expected_result, *result);
+}
+
+TEST(NativeLoaderConfigParser, RejectMalformed) {
+  ASSERT_FALSE(ParseConfig("libA.so 32 64", always_true).ok());
+  ASSERT_FALSE(ParseConfig("libA.so 32 32", always_true).ok());
+  ASSERT_FALSE(ParseConfig("libA.so 32 nopreload 64", always_true).ok());
+  ASSERT_FALSE(ParseConfig("32 libA.so nopreload", always_true).ok());
+  ASSERT_FALSE(ParseConfig("nopreload libA.so 32", always_true).ok());
+  ASSERT_FALSE(ParseConfig("libA.so nopreload # comment", always_true).ok());
+}
+
+}  // namespace nativeloader
+}  // namespace android
diff --git a/libnativeloader/public_libraries.cpp b/libnativeloader/public_libraries.cpp
new file mode 100644
index 0000000..a9d4e4d
--- /dev/null
+++ b/libnativeloader/public_libraries.cpp
@@ -0,0 +1,452 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "nativeloader"
+
+#include "public_libraries.h"
+
+#include <dirent.h>
+
+#include <algorithm>
+#include <memory>
+
+#include <android-base/file.h>
+#include <android-base/logging.h>
+#include <android-base/properties.h>
+#include <android-base/result.h>
+#include <android-base/strings.h>
+#include <log/log.h>
+
+#if defined(__ANDROID__)
+#include <android/sysprop/VndkProperties.sysprop.h>
+#endif
+
+#include "utils.h"
+
+namespace android::nativeloader {
+
+using android::base::ErrnoError;
+using android::base::Result;
+using internal::ConfigEntry;
+using internal::ParseConfig;
+using std::literals::string_literals::operator""s;
+
+namespace {
+
+constexpr const char* kDefaultPublicLibrariesFile = "/etc/public.libraries.txt";
+constexpr const char* kExtendedPublicLibrariesFilePrefix = "public.libraries-";
+constexpr const char* kExtendedPublicLibrariesFileSuffix = ".txt";
+constexpr const char* kVendorPublicLibrariesFile = "/vendor/etc/public.libraries.txt";
+constexpr const char* kLlndkLibrariesFile = "/apex/com.android.vndk.v{}/etc/llndk.libraries.{}.txt";
+constexpr const char* kVndkLibrariesFile = "/apex/com.android.vndk.v{}/etc/vndksp.libraries.{}.txt";
+
+const std::vector<const std::string> kArtApexPublicLibraries = {
+    "libicuuc.so",
+    "libicui18n.so",
+};
+
+constexpr const char* kArtApexLibPath = "/apex/com.android.art/" LIB;
+
+constexpr const char* kNeuralNetworksApexPublicLibrary = "libneuralnetworks.so";
+
+constexpr const char* kStatsdApexPublicLibrary = "libstats_jni.so";
+
+// TODO(b/130388701): do we need this?
+std::string root_dir() {
+  static const char* android_root_env = getenv("ANDROID_ROOT");
+  return android_root_env != nullptr ? android_root_env : "/system";
+}
+
+bool debuggable() {
+  static bool debuggable = android::base::GetBoolProperty("ro.debuggable", false);
+  return debuggable;
+}
+
+std::string vndk_version_str(bool use_product_vndk) {
+  if (use_product_vndk) {
+    static std::string product_vndk_version = get_vndk_version(true);
+    return product_vndk_version;
+  } else {
+    static std::string vendor_vndk_version = get_vndk_version(false);
+    return vendor_vndk_version;
+  }
+}
+
+// For debuggable platform builds use ANDROID_ADDITIONAL_PUBLIC_LIBRARIES environment
+// variable to add libraries to the list. This is intended for platform tests only.
+std::string additional_public_libraries() {
+  if (debuggable()) {
+    const char* val = getenv("ANDROID_ADDITIONAL_PUBLIC_LIBRARIES");
+    return val ? val : "";
+  }
+  return "";
+}
+
+// insert vndk version in every {} placeholder
+void InsertVndkVersionStr(std::string* file_name, bool use_product_vndk) {
+  CHECK(file_name != nullptr);
+  auto version = vndk_version_str(use_product_vndk);
+  size_t pos = file_name->find("{}");
+  while (pos != std::string::npos) {
+    file_name->replace(pos, 2, version);
+    pos = file_name->find("{}", pos + version.size());
+  }
+}
+
+const std::function<Result<bool>(const struct ConfigEntry&)> always_true =
+    [](const struct ConfigEntry&) -> Result<bool> { return true; };
+
+Result<std::vector<std::string>> ReadConfig(
+    const std::string& configFile,
+    const std::function<Result<bool>(const ConfigEntry& /* entry */)>& filter_fn) {
+  std::string file_content;
+  if (!base::ReadFileToString(configFile, &file_content)) {
+    return ErrnoError();
+  }
+  Result<std::vector<std::string>> result = ParseConfig(file_content, filter_fn);
+  if (!result.ok()) {
+    return Errorf("Cannot parse {}: {}", configFile, result.error().message());
+  }
+  return result;
+}
+
+void ReadExtensionLibraries(const char* dirname, std::vector<std::string>* sonames) {
+  std::unique_ptr<DIR, decltype(&closedir)> dir(opendir(dirname), closedir);
+  if (dir != nullptr) {
+    // Failing to opening the dir is not an error, which can happen in
+    // webview_zygote.
+    while (struct dirent* ent = readdir(dir.get())) {
+      if (ent->d_type != DT_REG && ent->d_type != DT_LNK) {
+        continue;
+      }
+      const std::string filename(ent->d_name);
+      std::string_view fn = filename;
+      if (android::base::ConsumePrefix(&fn, kExtendedPublicLibrariesFilePrefix) &&
+          android::base::ConsumeSuffix(&fn, kExtendedPublicLibrariesFileSuffix)) {
+        const std::string company_name(fn);
+        const std::string config_file_path = dirname + "/"s + filename;
+        LOG_ALWAYS_FATAL_IF(
+            company_name.empty(),
+            "Error extracting company name from public native library list file path \"%s\"",
+            config_file_path.c_str());
+
+        auto ret = ReadConfig(
+            config_file_path, [&company_name](const struct ConfigEntry& entry) -> Result<bool> {
+              if (android::base::StartsWith(entry.soname, "lib") &&
+                  android::base::EndsWith(entry.soname, "." + company_name + ".so")) {
+                return true;
+              } else {
+                return Errorf("Library name \"{}\" does not end with the company name {}.",
+                              entry.soname, company_name);
+              }
+            });
+        if (ret.ok()) {
+          sonames->insert(sonames->end(), ret->begin(), ret->end());
+        } else {
+          LOG_ALWAYS_FATAL("Error reading public native library list from \"%s\": %s",
+                           config_file_path.c_str(), ret.error().message().c_str());
+        }
+      }
+    }
+  }
+}
+
+static std::string InitDefaultPublicLibraries(bool for_preload) {
+  std::string config_file = root_dir() + kDefaultPublicLibrariesFile;
+  auto sonames =
+      ReadConfig(config_file, [&for_preload](const struct ConfigEntry& entry) -> Result<bool> {
+        if (for_preload) {
+          return !entry.nopreload;
+        } else {
+          return true;
+        }
+      });
+  if (!sonames.ok()) {
+    LOG_ALWAYS_FATAL("Error reading public native library list from \"%s\": %s",
+                     config_file.c_str(), sonames.error().message().c_str());
+    return "";
+  }
+
+  std::string additional_libs = additional_public_libraries();
+  if (!additional_libs.empty()) {
+    auto vec = base::Split(additional_libs, ":");
+    std::copy(vec.begin(), vec.end(), std::back_inserter(*sonames));
+  }
+
+  // If this is for preloading libs, don't remove the libs from APEXes.
+  if (for_preload) {
+    return android::base::Join(*sonames, ':');
+  }
+
+  // Remove the public libs in the art namespace.
+  // These libs are listed in public.android.txt, but we don't want the rest of android
+  // in default namespace to dlopen the libs.
+  // For example, libicuuc.so is exposed to classloader namespace from art namespace.
+  // Unfortunately, it does not have stable C symbols, and default namespace should only use
+  // stable symbols in libandroidicu.so. http://b/120786417
+  for (const std::string& lib_name : kArtApexPublicLibraries) {
+    std::string path(kArtApexLibPath);
+    path.append("/").append(lib_name);
+
+    struct stat s;
+    // Do nothing if the path in /apex does not exist.
+    // Runtime APEX must be mounted since libnativeloader is in the same APEX
+    if (stat(path.c_str(), &s) != 0) {
+      continue;
+    }
+
+    auto it = std::find(sonames->begin(), sonames->end(), lib_name);
+    if (it != sonames->end()) {
+      sonames->erase(it);
+    }
+  }
+
+  // Remove the public libs in the nnapi namespace.
+  auto it = std::find(sonames->begin(), sonames->end(), kNeuralNetworksApexPublicLibrary);
+  if (it != sonames->end()) {
+    sonames->erase(it);
+  }
+  return android::base::Join(*sonames, ':');
+}
+
+static std::string InitArtPublicLibraries() {
+  CHECK_GT((int)sizeof(kArtApexPublicLibraries), 0);
+  std::string list = android::base::Join(kArtApexPublicLibraries, ":");
+
+  std::string additional_libs = additional_public_libraries();
+  if (!additional_libs.empty()) {
+    list = list + ':' + additional_libs;
+  }
+  return list;
+}
+
+static std::string InitVendorPublicLibraries() {
+  // This file is optional, quietly ignore if the file does not exist.
+  auto sonames = ReadConfig(kVendorPublicLibrariesFile, always_true);
+  if (!sonames.ok()) {
+    return "";
+  }
+  return android::base::Join(*sonames, ':');
+}
+
+// read /system/etc/public.libraries-<companyname>.txt,
+// /system_ext/etc/public.libraries-<companyname>.txt and
+// /product/etc/public.libraries-<companyname>.txt which contain partner defined
+// system libs that are exposed to apps. The libs in the txt files must be
+// named as lib<name>.<companyname>.so.
+static std::string InitExtendedPublicLibraries() {
+  std::vector<std::string> sonames;
+  ReadExtensionLibraries("/system/etc", &sonames);
+  ReadExtensionLibraries("/system_ext/etc", &sonames);
+  ReadExtensionLibraries("/product/etc", &sonames);
+  return android::base::Join(sonames, ':');
+}
+
+static std::string InitLlndkLibrariesVendor() {
+  std::string config_file = kLlndkLibrariesFile;
+  InsertVndkVersionStr(&config_file, false);
+  auto sonames = ReadConfig(config_file, always_true);
+  if (!sonames.ok()) {
+    LOG_ALWAYS_FATAL("%s: %s", config_file.c_str(), sonames.error().message().c_str());
+    return "";
+  }
+  return android::base::Join(*sonames, ':');
+}
+
+static std::string InitLlndkLibrariesProduct() {
+  if (!is_product_vndk_version_defined()) {
+    return "";
+  }
+  std::string config_file = kLlndkLibrariesFile;
+  InsertVndkVersionStr(&config_file, true);
+  auto sonames = ReadConfig(config_file, always_true);
+  if (!sonames.ok()) {
+    LOG_ALWAYS_FATAL("%s: %s", config_file.c_str(), sonames.error().message().c_str());
+    return "";
+  }
+  return android::base::Join(*sonames, ':');
+}
+
+static std::string InitVndkspLibrariesVendor() {
+  std::string config_file = kVndkLibrariesFile;
+  InsertVndkVersionStr(&config_file, false);
+  auto sonames = ReadConfig(config_file, always_true);
+  if (!sonames.ok()) {
+    LOG_ALWAYS_FATAL("%s", sonames.error().message().c_str());
+    return "";
+  }
+  return android::base::Join(*sonames, ':');
+}
+
+static std::string InitVndkspLibrariesProduct() {
+  if (!is_product_vndk_version_defined()) {
+    return "";
+  }
+  std::string config_file = kVndkLibrariesFile;
+  InsertVndkVersionStr(&config_file, true);
+  auto sonames = ReadConfig(config_file, always_true);
+  if (!sonames.ok()) {
+    LOG_ALWAYS_FATAL("%s", sonames.error().message().c_str());
+    return "";
+  }
+  return android::base::Join(*sonames, ':');
+}
+
+static std::string InitNeuralNetworksPublicLibraries() {
+  return kNeuralNetworksApexPublicLibrary;
+}
+
+static std::string InitStatsdPublicLibraries() {
+  return kStatsdApexPublicLibrary;
+}
+
+}  // namespace
+
+const std::string& preloadable_public_libraries() {
+  static std::string list = InitDefaultPublicLibraries(/*for_preload*/ true);
+  return list;
+}
+
+const std::string& default_public_libraries() {
+  static std::string list = InitDefaultPublicLibraries(/*for_preload*/ false);
+  return list;
+}
+
+const std::string& art_public_libraries() {
+  static std::string list = InitArtPublicLibraries();
+  return list;
+}
+
+const std::string& vendor_public_libraries() {
+  static std::string list = InitVendorPublicLibraries();
+  return list;
+}
+
+const std::string& extended_public_libraries() {
+  static std::string list = InitExtendedPublicLibraries();
+  return list;
+}
+
+const std::string& neuralnetworks_public_libraries() {
+  static std::string list = InitNeuralNetworksPublicLibraries();
+  return list;
+}
+
+const std::string& statsd_public_libraries() {
+  static std::string list = InitStatsdPublicLibraries();
+  return list;
+}
+
+const std::string& llndk_libraries_product() {
+  static std::string list = InitLlndkLibrariesProduct();
+  return list;
+}
+
+const std::string& llndk_libraries_vendor() {
+  static std::string list = InitLlndkLibrariesVendor();
+  return list;
+}
+
+const std::string& vndksp_libraries_product() {
+  static std::string list = InitVndkspLibrariesProduct();
+  return list;
+}
+
+const std::string& vndksp_libraries_vendor() {
+  static std::string list = InitVndkspLibrariesVendor();
+  return list;
+}
+
+bool is_product_vndk_version_defined() {
+#if defined(__ANDROID__)
+  return android::sysprop::VndkProperties::product_vndk_version().has_value();
+#else
+  return false;
+#endif
+}
+
+std::string get_vndk_version(bool is_product_vndk) {
+#if defined(__ANDROID__)
+  if (is_product_vndk) {
+    return android::sysprop::VndkProperties::product_vndk_version().value_or("");
+  }
+  return android::sysprop::VndkProperties::vendor_vndk_version().value_or("");
+#else
+  if (is_product_vndk) {
+    return android::base::GetProperty("ro.product.vndk.version", "");
+  }
+  return android::base::GetProperty("ro.vndk.version", "");
+#endif
+}
+
+namespace internal {
+// Exported for testing
+Result<std::vector<std::string>> ParseConfig(
+    const std::string& file_content,
+    const std::function<Result<bool>(const ConfigEntry& /* entry */)>& filter_fn) {
+  std::vector<std::string> lines = base::Split(file_content, "\n");
+
+  std::vector<std::string> sonames;
+  for (auto& line : lines) {
+    auto trimmed_line = base::Trim(line);
+    if (trimmed_line[0] == '#' || trimmed_line.empty()) {
+      continue;
+    }
+
+    std::vector<std::string> tokens = android::base::Split(trimmed_line, " ");
+    if (tokens.size() < 1 || tokens.size() > 3) {
+      return Errorf("Malformed line \"{}\"", line);
+    }
+    struct ConfigEntry entry = {.soname = "", .nopreload = false, .bitness = ALL};
+    size_t i = tokens.size();
+    while (i-- > 0) {
+      if (tokens[i] == "nopreload") {
+        entry.nopreload = true;
+      } else if (tokens[i] == "32" || tokens[i] == "64") {
+        if (entry.bitness != ALL) {
+          return Errorf("Malformed line \"{}\": bitness can be specified only once", line);
+        }
+        entry.bitness = tokens[i] == "32" ? ONLY_32 : ONLY_64;
+      } else {
+        if (i != 0) {
+          return Errorf("Malformed line \"{}\"", line);
+        }
+        entry.soname = tokens[i];
+      }
+    }
+
+    // skip 32-bit lib on 64-bit process and vice versa
+#if defined(__LP64__)
+    if (entry.bitness == ONLY_32) continue;
+#else
+    if (entry.bitness == ONLY_64) continue;
+#endif
+
+    Result<bool> ret = filter_fn(entry);
+    if (!ret.ok()) {
+      return ret.error();
+    }
+    if (*ret) {
+      // filter_fn has returned true.
+      sonames.push_back(entry.soname);
+    }
+  }
+  return sonames;
+}
+
+}  // namespace internal
+
+}  // namespace android::nativeloader
diff --git a/libnativeloader/public_libraries.h b/libnativeloader/public_libraries.h
new file mode 100644
index 0000000..f086b3b
--- /dev/null
+++ b/libnativeloader/public_libraries.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBNATIVELOADER_PUBLIC_LIBRARIES_H_
+#define ART_LIBNATIVELOADER_PUBLIC_LIBRARIES_H_
+
+#include <algorithm>
+#include <string>
+
+#include <android-base/result.h>
+
+namespace android::nativeloader {
+
+using android::base::Result;
+
+// These provide the list of libraries that are available to the namespace for apps.
+// Not all of the libraries are available to apps. Depending on the context,
+// e.g., if it is a vendor app or not, different set of libraries are made available.
+const std::string& preloadable_public_libraries();
+const std::string& default_public_libraries();
+const std::string& art_public_libraries();
+const std::string& statsd_public_libraries();
+const std::string& vendor_public_libraries();
+const std::string& extended_public_libraries();
+const std::string& neuralnetworks_public_libraries();
+const std::string& llndk_libraries_product();
+const std::string& llndk_libraries_vendor();
+const std::string& vndksp_libraries_product();
+const std::string& vndksp_libraries_vendor();
+
+// Returns true if libnativeloader is running on devices and the device has
+// ro.product.vndk.version property. It returns false for host.
+bool is_product_vndk_version_defined();
+
+std::string get_vndk_version(bool is_product_vndk);
+
+// These are exported for testing
+namespace internal {
+
+enum Bitness { ALL = 0, ONLY_32, ONLY_64 };
+
+struct ConfigEntry {
+  std::string soname;
+  bool nopreload;
+  Bitness bitness;
+};
+
+Result<std::vector<std::string>> ParseConfig(
+    const std::string& file_content,
+    const std::function<Result<bool>(const ConfigEntry& /* entry */)>& filter_fn);
+
+}  // namespace internal
+
+}  // namespace android::nativeloader
+
+#endif  // ART_LIBNATIVELOADER_PUBLIC_LIBRARIES_H_
diff --git a/libnativeloader/test/Android.bp b/libnativeloader/test/Android.bp
new file mode 100644
index 0000000..72e8c0f
--- /dev/null
+++ b/libnativeloader/test/Android.bp
@@ -0,0 +1,85 @@
+//
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_library {
+    name: "libfoo.oem1",
+    srcs: ["test.cpp"],
+    cflags: ["-DLIBNAME=\"libfoo.oem1.so\""],
+    shared_libs: [
+        "libbase",
+    ],
+}
+
+cc_library {
+    name: "libbar.oem1",
+    srcs: ["test.cpp"],
+    cflags: ["-DLIBNAME=\"libbar.oem1.so\""],
+    shared_libs: [
+        "libbase",
+    ],
+}
+
+cc_library {
+    name: "libfoo.oem2",
+    srcs: ["test.cpp"],
+    cflags: ["-DLIBNAME=\"libfoo.oem2.so\""],
+    shared_libs: [
+        "libbase",
+    ],
+}
+
+cc_library {
+    name: "libbar.oem2",
+    srcs: ["test.cpp"],
+    cflags: ["-DLIBNAME=\"libbar.oem2.so\""],
+    shared_libs: [
+        "libbase",
+    ],
+}
+
+cc_library {
+    name: "libfoo.product1",
+    srcs: ["test.cpp"],
+    cflags: ["-DLIBNAME=\"libfoo.product1.so\""],
+    product_specific: true,
+    shared_libs: [
+        "libbase",
+    ],
+}
+
+cc_library {
+    name: "libbar.product1",
+    srcs: ["test.cpp"],
+    cflags: ["-DLIBNAME=\"libbar.product1.so\""],
+    product_specific: true,
+    shared_libs: [
+        "libbase",
+    ],
+}
+
+// Build the test for the C API.
+cc_test {
+    name: "libnativeloader-api-tests",
+    defaults: [
+        "art_defaults",
+        "art_test_defaults",
+    ],
+    test_per_src: true,
+    srcs: [
+        "api_test.c",
+    ],
+    header_libs: ["libnativeloader-headers"],
+}
diff --git a/libnativeloader/test/Android.mk b/libnativeloader/test/Android.mk
new file mode 100644
index 0000000..65e7b09
--- /dev/null
+++ b/libnativeloader/test/Android.mk
@@ -0,0 +1,57 @@
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := public.libraries-oem1.txt
+LOCAL_SRC_FILES:= $(LOCAL_MODULE)
+LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_PATH := $(TARGET_OUT_ETC)
+include $(BUILD_PREBUILT)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := public.libraries-oem2.txt
+LOCAL_SRC_FILES:= $(LOCAL_MODULE)
+LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_PATH := $(TARGET_OUT_ETC)
+include $(BUILD_PREBUILT)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := public.libraries-product1.txt
+LOCAL_SRC_FILES:= $(LOCAL_MODULE)
+LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_PATH := $(TARGET_OUT_PRODUCT_ETC)
+include $(BUILD_PREBUILT)
+
+include $(CLEAR_VARS)
+LOCAL_PACKAGE_NAME := oemlibrarytest-system
+LOCAL_MODULE_TAGS := tests
+LOCAL_MANIFEST_FILE := system/AndroidManifest.xml
+LOCAL_SRC_FILES := $(call all-java-files-under, src)
+LOCAL_SDK_VERSION := current
+LOCAL_PROGUARD_ENABLED := disabled
+LOCAL_MODULE_PATH := $(TARGET_OUT_APPS)
+include $(BUILD_PACKAGE)
+
+include $(CLEAR_VARS)
+LOCAL_PACKAGE_NAME := oemlibrarytest-vendor
+LOCAL_MODULE_TAGS := tests
+LOCAL_MANIFEST_FILE := vendor/AndroidManifest.xml
+LOCAL_SRC_FILES := $(call all-java-files-under, src)
+LOCAL_SDK_VERSION := current
+LOCAL_PROGUARD_ENABLED := disabled
+LOCAL_MODULE_PATH := $(TARGET_OUT_VENDOR_APPS)
+include $(BUILD_PACKAGE)
diff --git a/libnativeloader/test/api_test.c b/libnativeloader/test/api_test.c
new file mode 100644
index 0000000..e7025fd
--- /dev/null
+++ b/libnativeloader/test/api_test.c
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* The main purpose of this test is to ensure this C header compiles in C, so
+ * that no C++ features inadvertently leak into the C ABI. */
+#include "nativeloader/native_loader.h"
+
+int main(int argc, char** argv) {
+  (void)argc;
+  (void)argv;
+  return 0;
+}
diff --git a/libnativeloader/test/public.libraries-oem1.txt b/libnativeloader/test/public.libraries-oem1.txt
new file mode 100644
index 0000000..f9433e2
--- /dev/null
+++ b/libnativeloader/test/public.libraries-oem1.txt
@@ -0,0 +1,2 @@
+libfoo.oem1.so
+libbar.oem1.so
diff --git a/libnativeloader/test/public.libraries-oem2.txt b/libnativeloader/test/public.libraries-oem2.txt
new file mode 100644
index 0000000..de6bdb0
--- /dev/null
+++ b/libnativeloader/test/public.libraries-oem2.txt
@@ -0,0 +1,2 @@
+libfoo.oem2.so
+libbar.oem2.so
diff --git a/libnativeloader/test/public.libraries-product1.txt b/libnativeloader/test/public.libraries-product1.txt
new file mode 100644
index 0000000..358154c
--- /dev/null
+++ b/libnativeloader/test/public.libraries-product1.txt
@@ -0,0 +1,2 @@
+libfoo.product1.so
+libbar.product1.so
diff --git a/libnativeloader/test/runtest.sh b/libnativeloader/test/runtest.sh
new file mode 100755
index 0000000..40beb5b
--- /dev/null
+++ b/libnativeloader/test/runtest.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+adb root
+adb remount
+adb sync
+adb shell stop
+adb shell start
+sleep 5 # wait until device reboots
+adb logcat -c;
+adb shell am start -n android.test.app.system/android.test.app.TestActivity
+adb shell am start -n android.test.app.vendor/android.test.app.TestActivity
+adb logcat | grep android.test.app
diff --git a/libnativeloader/test/src/android/test/app/TestActivity.java b/libnativeloader/test/src/android/test/app/TestActivity.java
new file mode 100644
index 0000000..a7a455d
--- /dev/null
+++ b/libnativeloader/test/src/android/test/app/TestActivity.java
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.test.app;
+
+import android.app.Activity;
+import android.os.Bundle;
+import android.util.Log;
+
+public class TestActivity extends Activity {
+
+    @Override
+    public void onCreate(Bundle icicle) {
+         super.onCreate(icicle);
+         tryLoadingLib("foo.oem1");
+         tryLoadingLib("bar.oem1");
+         tryLoadingLib("foo.oem2");
+         tryLoadingLib("bar.oem2");
+         tryLoadingLib("foo.product1");
+         tryLoadingLib("bar.product1");
+    }
+
+    private void tryLoadingLib(String name) {
+        try {
+            System.loadLibrary(name);
+            Log.d(getPackageName(), "library " + name + " is successfully loaded");
+        } catch (UnsatisfiedLinkError e) {
+            Log.d(getPackageName(), "failed to load libarary " + name, e);
+        }
+    }
+}
diff --git a/libnativeloader/test/system/AndroidManifest.xml b/libnativeloader/test/system/AndroidManifest.xml
new file mode 100644
index 0000000..c304889
--- /dev/null
+++ b/libnativeloader/test/system/AndroidManifest.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ -->
+
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+    package="android.test.app.system">
+
+    <application>
+        <activity android:name="android.test.app.TestActivity" >
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+                <category android:name="android.intent.category.LAUNCHER" />
+            </intent-filter>
+        </activity>
+    </application>
+
+</manifest>
+
diff --git a/libnativeloader/test/test.cpp b/libnativeloader/test/test.cpp
new file mode 100644
index 0000000..b166928
--- /dev/null
+++ b/libnativeloader/test/test.cpp
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define LOG_TAG "oemlib"
+#include <android-base/logging.h>
+
+static __attribute__((constructor)) void test_lib_init() {
+  LOG(DEBUG) << LIBNAME << " loaded";
+}
diff --git a/libnativeloader/test/vendor/AndroidManifest.xml b/libnativeloader/test/vendor/AndroidManifest.xml
new file mode 100644
index 0000000..c4c1a9c
--- /dev/null
+++ b/libnativeloader/test/vendor/AndroidManifest.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ -->
+
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+    package="android.test.app.vendor">
+
+    <application>
+        <activity android:name="android.test.app.TestActivity" >
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+                <category android:name="android.intent.category.LAUNCHER" />
+            </intent-filter>
+        </activity>
+    </application>
+
+</manifest>
+
diff --git a/libnativeloader/utils.h b/libnativeloader/utils.h
new file mode 100644
index 0000000..9066e57
--- /dev/null
+++ b/libnativeloader/utils.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBNATIVELOADER_UTILS_H_
+#define ART_LIBNATIVELOADER_UTILS_H_
+
+namespace android::nativeloader {
+
+#if defined(__LP64__)
+#define LIB "lib64"
+#else
+#define LIB "lib"
+#endif
+
+}  // namespace android::nativeloader
+
+#endif  // ART_LIBNATIVELOADER_UTILS_H_
diff --git a/libprofile/Android.bp b/libprofile/Android.bp
index 78bc9d3..367eefc 100644
--- a/libprofile/Android.bp
+++ b/libprofile/Android.bp
@@ -19,14 +19,13 @@
     defaults: ["art_defaults"],
     host_supported: true,
     srcs: [
+        "profile/profile_boot_info.cc",
         "profile/profile_compilation_info.cc",
     ],
     target: {
         android: {
             shared_libs: [
-                "libartbase",
                 "libartpalette",
-                "libdexfile",
                 "libbase",
             ],
             static_libs: [
@@ -38,9 +37,7 @@
         },
         not_windows: {
             shared_libs: [
-                "libartbase",
                 "libartpalette",
-                "libdexfile",
                 "libziparchive",
                 "libz",
                 "libbase",
@@ -48,11 +45,9 @@
             export_shared_lib_headers: ["libbase"],
         },
         windows: {
-	    cflags: ["-Wno-thread-safety"],
+            cflags: ["-Wno-thread-safety"],
             static_libs: [
-                "libartbase",
                 "libartpalette",
-                "libdexfile",
                 "libziparchive",
                 "libz",
                 "libbase",
@@ -102,25 +97,43 @@
 
 art_cc_library {
     name: "libprofile",
-    defaults: ["libprofile_defaults"],
-    // Leave the symbols in the shared library so that stack unwinders can
-    // produce meaningful name resolution.
-    strip: {
-        keep_symbols: true,
-    },
+    defaults: [
+        "libprofile_defaults",
+        "libart_nativeunwind_defaults",
+    ],
     shared_libs: [
         "libbase",
         "libziparchive",
     ],
     export_shared_lib_headers: ["libbase"],
     target: {
+        android: {
+            shared_libs: [
+                "libartbase",
+                "libdexfile",
+            ],
+        },
+        not_windows: {
+            shared_libs: [
+                "libartbase",
+                "libdexfile",
+            ],
+        },
         windows: {
-	    enabled: true,
-	    shared: {
-	        enabled: false,
-	    },
-	},
-    }
+            enabled: true,
+            shared: {
+                enabled: false,
+            },
+            static_libs: [
+                "libartbase",
+                "libdexfile",
+            ],
+        },
+    },
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
 }
 
 art_cc_library {
@@ -133,7 +146,30 @@
         "libbase",
         "libziparchive",
     ],
+    target: {
+        android: {
+            shared_libs: [
+                "libartbased",
+                "libdexfiled",
+            ],
+        },
+        not_windows: {
+            shared_libs: [
+                "libartbased",
+                "libdexfiled",
+            ],
+        },
+        windows: {
+            static_libs: [
+                "libartbased",
+                "libdexfiled",
+            ],
+        },
+    },
     export_shared_lib_headers: ["libbase"],
+    apex_available: [
+        "com.android.art.debug",
+    ],
 }
 
 // For now many of these tests still use CommonRuntimeTest, almost universally because of
@@ -145,6 +181,7 @@
         "art_gtest_defaults",
     ],
     srcs: [
+        "profile/profile_boot_info_test.cc",
         "profile/profile_compilation_info_test.cc",
     ],
     shared_libs: [
diff --git a/libprofile/profile/profile_boot_info.cc b/libprofile/profile/profile_boot_info.cc
new file mode 100644
index 0000000..b64d086
--- /dev/null
+++ b/libprofile/profile/profile_boot_info.cc
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "profile_boot_info.h"
+
+#include <unistd.h>
+
+#include <vector>
+
+#include "dex/dex_file.h"
+#include "profile_helpers.h"
+
+
+namespace art {
+
+void ProfileBootInfo::Add(const DexFile* dex_file, uint32_t method_index) {
+  auto it = std::find(dex_files_.begin(), dex_files_.end(), dex_file);
+  uint32_t index = 0;
+  if (it == dex_files_.end()) {
+    index = dex_files_.size();
+    dex_files_.push_back(dex_file);
+  } else {
+    index = std::distance(dex_files_.begin(), it);
+  }
+  methods_.push_back(std::make_pair(index, method_index));
+}
+
+bool ProfileBootInfo::Save(int fd) const {
+  std::vector<uint8_t> buffer;
+  // Store dex file locations.
+  for (const DexFile* dex_file : dex_files_) {
+    AddUintToBuffer(&buffer, static_cast<uint8_t>(dex_file->GetLocation().size()));
+    AddStringToBuffer(&buffer, dex_file->GetLocation());
+  }
+  // Store marker between dex file locations and methods.
+  AddUintToBuffer(&buffer, static_cast<uint8_t>(0));
+
+  // Store pairs of <dex file index, method id>, in compilation order.
+  for (const std::pair<uint32_t, uint32_t>& pair : methods_) {
+    AddUintToBuffer(&buffer, pair.first);
+    AddUintToBuffer(&buffer, pair.second);
+  }
+  if (!WriteBuffer(fd, buffer.data(), buffer.size())) {
+    return false;
+  }
+  return true;
+}
+
+bool ProfileBootInfo::Load(int fd, const std::vector<const DexFile*>& dex_files) {
+  // Read dex file locations.
+  do {
+    uint8_t string_length;
+    int bytes_read = TEMP_FAILURE_RETRY(read(fd, &string_length, sizeof(uint8_t)));
+    if (bytes_read < 0) {
+      PLOG(ERROR) << "Unexpected error reading profile";
+      return false;
+    } else if (bytes_read == 0) {
+      if (dex_files.empty()) {
+        // If no dex files have been passed, that's expected.
+        return true;
+      } else {
+        LOG(ERROR) << "Unexpected end of file for length";
+        return false;
+      }
+    }
+    if (string_length == 0) {
+      break;
+    }
+    std::unique_ptr<char[]> data(new char[string_length]);
+    bytes_read = TEMP_FAILURE_RETRY(read(fd, data.get(), string_length));
+    if (bytes_read < 0) {
+      PLOG(WARNING) << "Unexpected error reading profile";
+      return false;
+    } else if (bytes_read == 0) {
+      LOG(ERROR) << "Unexpected end of file for name";
+      return false;
+    }
+    // Map the location to an instance of dex file in `dex_files`.
+    auto it = std::find_if(dex_files.begin(),
+                           dex_files.end(),
+                           [string_length, &data](const DexFile* file) {
+      std::string dex_location = file->GetLocation();
+      return dex_location.size() == string_length &&
+          (strncmp(data.get(), dex_location.data(), string_length) == 0);
+    });
+    if (it != dex_files.end()) {
+      dex_files_.push_back(*it);
+    } else {
+      LOG(ERROR) << "Couldn't find " << std::string(data.get(), string_length);
+      return false;
+    }
+  } while (true);
+
+  // Read methods.
+  do {
+    uint32_t dex_file_index;
+    uint32_t method_id;
+    int bytes_read = TEMP_FAILURE_RETRY(read(fd, &dex_file_index, sizeof(dex_file_index)));
+    if (bytes_read <= 0) {
+      break;
+    }
+    bytes_read = TEMP_FAILURE_RETRY(read(fd, &method_id, sizeof(method_id)));
+    if (bytes_read <= 0) {
+      LOG(ERROR) << "Didn't get a method id";
+      return false;
+    }
+    methods_.push_back(std::make_pair(dex_file_index, method_id));
+  } while (true);
+  return true;
+}
+
+}  // namespace art
diff --git a/libprofile/profile/profile_boot_info.h b/libprofile/profile/profile_boot_info.h
new file mode 100644
index 0000000..cd76b7b
--- /dev/null
+++ b/libprofile/profile/profile_boot_info.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBPROFILE_PROFILE_PROFILE_BOOT_INFO_H_
+#define ART_LIBPROFILE_PROFILE_PROFILE_BOOT_INFO_H_
+
+#include <vector>
+
+#include "base/value_object.h"
+
+namespace art {
+
+class DexFile;
+
+/**
+ * Abstraction over a list of methods representing the boot profile
+ * of an application. The order in the list is the order in which the methods
+ * should be compiled.
+ *
+ * TODO: This is currently implemented as a separate profile to
+ * ProfileCompilationInfo to enable fast experiments, but we are likely to
+ * incorporate it in ProfileCompilationInfo once we settle on an automated way
+ * to generate such a boot profile.
+ */
+class ProfileBootInfo : public ValueObject {
+ public:
+  // Add the given method located in the given dex file in the profile.
+  void Add(const DexFile* dex_file, uint32_t method_index);
+
+  // Save this profile boot info into the `fd` file descriptor.
+  bool Save(int fd) const;
+
+  // Load the profile listing from `fd` into this profile boot info. Note that
+  // the profile boot info will store internally the dex files being passed.
+  bool Load(int fd, const std::vector<const DexFile*>& dex_files);
+
+  const std::vector<const DexFile*>& GetDexFiles() const {
+    return dex_files_;
+  }
+
+  const std::vector<std::pair<uint32_t, uint32_t>>& GetMethods() const {
+    return methods_;
+  }
+
+  bool IsEmpty() const { return dex_files_.empty() && methods_.empty(); }
+
+ private:
+  // List of dex files this boot profile info covers.
+  std::vector<const DexFile*> dex_files_;
+
+  // List of pair of <dex file index, method_id> methods to be compiled in
+  // order.
+  std::vector<std::pair<uint32_t, uint32_t>> methods_;
+};
+
+}  // namespace art
+
+#endif  // ART_LIBPROFILE_PROFILE_PROFILE_BOOT_INFO_H_
diff --git a/libprofile/profile/profile_boot_info_test.cc b/libprofile/profile/profile_boot_info_test.cc
new file mode 100644
index 0000000..9939f0a
--- /dev/null
+++ b/libprofile/profile/profile_boot_info_test.cc
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+#include <stdio.h>
+
+#include "base/arena_allocator.h"
+#include "base/common_art_test.h"
+#include "base/unix_file/fd_file.h"
+#include "dex/dex_file.h"
+#include "profile/profile_boot_info.h"
+
+namespace art {
+
+class ProfileBootInfoTest : public CommonArtTest {
+ public:
+  void SetUp() override {
+    CommonArtTest::SetUp();
+  }
+};
+
+
+TEST_F(ProfileBootInfoTest, LoadEmpty) {
+  ScratchFile profile;
+  std::vector<const DexFile*> dex_files;
+
+  ProfileBootInfo loaded_info;
+  ASSERT_TRUE(loaded_info.IsEmpty());
+  ASSERT_TRUE(loaded_info.Load(profile.GetFd(), dex_files));
+  ASSERT_TRUE(loaded_info.IsEmpty());
+}
+
+TEST_F(ProfileBootInfoTest, OneMethod) {
+  ScratchFile profile;
+  std::unique_ptr<const DexFile> dex(OpenTestDexFile("ManyMethods"));
+  std::vector<const DexFile*> dex_files = { dex.get() };
+
+  ProfileBootInfo saved_info;
+  saved_info.Add(dex.get(), 0);
+  ASSERT_TRUE(saved_info.Save(profile.GetFd()));
+  ASSERT_TRUE(profile.GetFile()->ResetOffset());
+
+  ProfileBootInfo loaded_info;
+  ASSERT_TRUE(loaded_info.Load(profile.GetFd(), dex_files));
+  ASSERT_EQ(loaded_info.GetDexFiles().size(), 1u);
+  ASSERT_STREQ(loaded_info.GetDexFiles()[0]->GetLocation().c_str(), dex->GetLocation().c_str());
+  ASSERT_EQ(loaded_info.GetMethods().size(), 1u);
+  ASSERT_EQ(loaded_info.GetMethods()[0].first, 0u);
+  ASSERT_EQ(loaded_info.GetMethods()[0].second, 0u);
+}
+
+TEST_F(ProfileBootInfoTest, ManyDexFiles) {
+  ScratchFile profile;
+  std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles("MultiDex");
+  std::vector<const DexFile*> dex_files2;
+  for (const std::unique_ptr<const DexFile>& file : dex_files) {
+    dex_files2.push_back(file.get());
+  }
+
+  ProfileBootInfo saved_info;
+  saved_info.Add(dex_files[0].get(), 42);
+  saved_info.Add(dex_files[1].get(), 108);
+  saved_info.Add(dex_files[1].get(), 54);
+  ASSERT_TRUE(saved_info.Save(profile.GetFd()));
+  ASSERT_TRUE(profile.GetFile()->ResetOffset());
+
+  ProfileBootInfo loaded_info;
+  ASSERT_TRUE(loaded_info.Load(profile.GetFd(), dex_files2));
+  ASSERT_EQ(loaded_info.GetDexFiles().size(), 2u);
+  ASSERT_STREQ(loaded_info.GetDexFiles()[0]->GetLocation().c_str(),
+               dex_files[0]->GetLocation().c_str());
+  ASSERT_EQ(loaded_info.GetMethods().size(), 3u);
+  ASSERT_EQ(loaded_info.GetMethods()[0].first, 0u);
+  ASSERT_EQ(loaded_info.GetMethods()[0].second, 42u);
+  ASSERT_EQ(loaded_info.GetMethods()[1].first, 1u);
+  ASSERT_EQ(loaded_info.GetMethods()[1].second, 108u);
+  ASSERT_EQ(loaded_info.GetMethods()[2].first, 1u);
+  ASSERT_EQ(loaded_info.GetMethods()[2].second, 54u);
+}
+
+TEST_F(ProfileBootInfoTest, LoadWrongDexFile) {
+  ScratchFile profile;
+  std::unique_ptr<const DexFile> dex(OpenTestDexFile("ManyMethods"));
+
+  ProfileBootInfo saved_info;
+  saved_info.Add(dex.get(), 42);
+  ASSERT_TRUE(saved_info.Save(profile.GetFd()));
+
+
+  ASSERT_TRUE(profile.GetFile()->ResetOffset());
+  ProfileBootInfo loaded_info;
+  std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles("MultiDex");
+  std::vector<const DexFile*> dex_files2;
+  for (const std::unique_ptr<const DexFile>& file : dex_files) {
+    dex_files2.push_back(file.get());
+  }
+  ASSERT_FALSE(loaded_info.Load(profile.GetFd(), dex_files2));
+}
+
+}  // namespace art
diff --git a/libprofile/profile/profile_compilation_info.cc b/libprofile/profile/profile_compilation_info.cc
index 47b17ae..24419ef 100644
--- a/libprofile/profile/profile_compilation_info.cc
+++ b/libprofile/profile/profile_compilation_info.cc
@@ -57,17 +57,26 @@
 // profile_compilation_info object. All the profile line headers are now placed together
 // before corresponding method_encodings and class_ids.
 const uint8_t ProfileCompilationInfo::kProfileVersion[] = { '0', '1', '0', '\0' };
-const uint8_t ProfileCompilationInfo::kProfileVersionWithCounters[] = { '5', '0', '0', '\0' };
+const uint8_t ProfileCompilationInfo::kProfileVersionForBootImage[] = { '0', '1', '2', '\0' };
 
 static_assert(sizeof(ProfileCompilationInfo::kProfileVersion) == 4,
               "Invalid profile version size");
-static_assert(sizeof(ProfileCompilationInfo::kProfileVersionWithCounters) == 4,
+static_assert(sizeof(ProfileCompilationInfo::kProfileVersionForBootImage) == 4,
               "Invalid profile version size");
 
 // The name of the profile entry in the dex metadata file.
 // DO NOT CHANGE THIS! (it's similar to classes.dex in the apk files).
 const char ProfileCompilationInfo::kDexMetadataProfileEntry[] = "primary.prof";
 
+// A synthetic annotations that can be used to denote that no annotation should
+// be associated with the profile samples. We use the empty string for the package name
+// because that's an invalid package name and should never occur in practice.
+const ProfileCompilationInfo::ProfileSampleAnnotation
+  ProfileCompilationInfo::ProfileSampleAnnotation::kNone =
+      ProfileCompilationInfo::ProfileSampleAnnotation("");
+
+static constexpr char kSampleMetadataSeparator = ':';
+
 static constexpr uint16_t kMaxDexFileKeyLength = PATH_MAX;
 
 // Debug flag to ignore checksums when testing if a method or a class is present in the profile.
@@ -85,36 +94,34 @@
 static_assert(ProfileCompilationInfo::kIndividualInlineCacheSize < kIsMissingTypesEncoding,
               "InlineCache::kIndividualInlineCacheSize is larger than expected");
 
+static constexpr uint32_t kSizeWarningThresholdBytes = 500000U;
+static constexpr uint32_t kSizeErrorThresholdBytes = 1500000U;
+
+static constexpr uint32_t kSizeWarningThresholdBootBytes = 25000000U;
+static constexpr uint32_t kSizeErrorThresholdBootBytes = 100000000U;
+
 static bool ChecksumMatch(uint32_t dex_file_checksum, uint32_t checksum) {
   return kDebugIgnoreChecksum || dex_file_checksum == checksum;
 }
 
-// For storage efficiency we store aggregation counts of up to at most 2^16.
-static uint16_t IncrementAggregationCounter(uint16_t counter, uint16_t value) {
-  if (counter < (std::numeric_limits<uint16_t>::max() - value)) {
-    return counter + value;
-  } else {
-    return std::numeric_limits<uint16_t>::max();
-  }
-}
-
-ProfileCompilationInfo::ProfileCompilationInfo(ArenaPool* custom_arena_pool)
+ProfileCompilationInfo::ProfileCompilationInfo(ArenaPool* custom_arena_pool, bool for_boot_image)
     : default_arena_pool_(),
       allocator_(custom_arena_pool),
       info_(allocator_.Adapter(kArenaAllocProfile)),
-      profile_key_map_(std::less<const std::string>(), allocator_.Adapter(kArenaAllocProfile)),
-      aggregation_count_(0) {
-  InitProfileVersionInternal(kProfileVersion);
+      profile_key_map_(std::less<const std::string>(), allocator_.Adapter(kArenaAllocProfile)) {
+  memcpy(version_,
+         for_boot_image ? kProfileVersionForBootImage : kProfileVersion,
+         kProfileVersionSize);
 }
 
+ProfileCompilationInfo::ProfileCompilationInfo(ArenaPool* custom_arena_pool)
+    : ProfileCompilationInfo(custom_arena_pool, /*for_boot_image=*/ false) { }
+
 ProfileCompilationInfo::ProfileCompilationInfo()
-    : default_arena_pool_(),
-      allocator_(&default_arena_pool_),
-      info_(allocator_.Adapter(kArenaAllocProfile)),
-      profile_key_map_(std::less<const std::string>(), allocator_.Adapter(kArenaAllocProfile)),
-      aggregation_count_(0) {
-  InitProfileVersionInternal(kProfileVersion);
-}
+    : ProfileCompilationInfo(/*for_boot_image=*/ false) { }
+
+ProfileCompilationInfo::ProfileCompilationInfo(bool for_boot_image)
+    : ProfileCompilationInfo(&default_arena_pool_, for_boot_image) { }
 
 ProfileCompilationInfo::~ProfileCompilationInfo() {
   VLOG(profiler) << Dumpable<MemStats>(allocator_.GetMemStats());
@@ -149,11 +156,22 @@
   classes.insert(ref);
 }
 
-// Transform the actual dex location into relative paths.
+// Transform the actual dex location into a key used to index the dex file in the profile.
+// See ProfileCompilationInfo#GetProfileDexFileBaseKey as well.
+std::string ProfileCompilationInfo::GetProfileDexFileAugmentedKey(
+      const std::string& dex_location,
+      const ProfileSampleAnnotation& annotation) {
+  std::string base_key = GetProfileDexFileBaseKey(dex_location);
+  return annotation == ProfileSampleAnnotation::kNone
+      ? base_key
+      : base_key + kSampleMetadataSeparator + annotation.GetOriginPackageName();;
+}
+
+// Transform the actual dex location into a base profile key (represented as relative paths).
 // Note: this is OK because we don't store profiles of different apps into the same file.
 // Apps with split apks don't cause trouble because each split has a different name and will not
 // collide with other entries.
-std::string ProfileCompilationInfo::GetProfileDexFileKey(const std::string& dex_location) {
+std::string ProfileCompilationInfo::GetProfileDexFileBaseKey(const std::string& dex_location) {
   DCHECK(!dex_location.empty());
   size_t last_sep_index = dex_location.find_last_of('/');
   if (last_sep_index == std::string::npos) {
@@ -164,41 +182,34 @@
   }
 }
 
-bool ProfileCompilationInfo::AddMethodIndex(MethodHotness::Flag flags, const MethodReference& ref) {
-  DexFileData* data = GetOrAddDexFileData(ref.dex_file);
-  if (data == nullptr) {
-    return false;
-  }
-  return data->AddMethod(flags, ref.index);
+std::string ProfileCompilationInfo::GetBaseKeyFromAugmentedKey(
+    const std::string& profile_key) {
+  size_t pos = profile_key.rfind(kSampleMetadataSeparator);
+  return (pos == std::string::npos) ? profile_key : profile_key.substr(0, pos);
 }
 
-bool ProfileCompilationInfo::AddMethodIndex(MethodHotness::Flag flags,
-                                            const std::string& dex_location,
-                                            uint32_t checksum,
-                                            uint16_t method_idx,
-                                            uint32_t num_method_ids) {
-  DexFileData* data = GetOrAddDexFileData(GetProfileDexFileKey(dex_location),
-                                          checksum,
-                                          num_method_ids);
-  if (data == nullptr) {
-    return false;
-  }
-  return data->AddMethod(flags, method_idx);
+std::string ProfileCompilationInfo::MigrateAnnotationInfo(
+    const std::string& base_key,
+    const std::string& augmented_key) {
+  size_t pos = augmented_key.rfind(kSampleMetadataSeparator);
+  return (pos == std::string::npos)
+      ? base_key
+      : base_key + augmented_key.substr(pos);
+}
+
+ProfileCompilationInfo::ProfileSampleAnnotation ProfileCompilationInfo::GetAnnotationFromKey(
+     const std::string& augmented_key) {
+  size_t pos = augmented_key.rfind(kSampleMetadataSeparator);
+  return (pos == std::string::npos)
+      ? ProfileSampleAnnotation::kNone
+      : ProfileSampleAnnotation(augmented_key.substr(pos + 1));
 }
 
 bool ProfileCompilationInfo::AddMethods(const std::vector<ProfileMethodInfo>& methods,
-                                        MethodHotness::Flag flags) {
+                                        MethodHotness::Flag flags,
+                                        const ProfileSampleAnnotation& annotation) {
   for (const ProfileMethodInfo& method : methods) {
-    if (!AddMethod(method, flags)) {
-      return false;
-    }
-  }
-  return true;
-}
-
-bool ProfileCompilationInfo::AddClasses(const std::set<DexCacheResolvedClasses>& resolved_classes) {
-  for (const DexCacheResolvedClasses& dex_cache : resolved_classes) {
-    if (!AddResolvedClasses(dex_cache)) {
+    if (!AddMethod(method, flags, annotation)) {
       return false;
     }
   }
@@ -236,7 +247,7 @@
   std::string error;
 
   if (!IsEmpty()) {
-    return kProfileLoadWouldOverwiteData;
+    return false;
   }
 
 #ifdef _WIN32
@@ -356,15 +367,13 @@
 /**
  * Serialization format:
  * [profile_header, zipped[[profile_line_header1, profile_line_header2...],[profile_line_data1,
- *    profile_line_data2...]],global_aggregation_counter]
+ *    profile_line_data2...]]
  * profile_header:
  *   magic,version,number_of_dex_files,uncompressed_size_of_zipped_data,compressed_data_size
  * profile_line_header:
- *   dex_location,number_of_classes,methods_region_size,dex_location_checksum,num_method_ids
+ *   profile_key,number_of_classes,methods_region_size,dex_location_checksum,num_method_ids
  * profile_line_data:
- *   method_encoding_1,method_encoding_2...,class_id1,class_id2...,startup/post startup bitmap,
- *   num_classes,class_counters,num_methods,method_counters
- * The aggregation counters are only stored if the profile version is kProfileVersionWithCounters.
+ *   method_encoding_1,method_encoding_2...,class_id1,class_id2...,method_flags bitmap,
  * The method_encoding is:
  *    method_id,number_of_inline_caches,inline_cache1,inline_cache2...
  * The inline_cache is:
@@ -390,8 +399,9 @@
   if (!WriteBuffer(fd, version_, sizeof(version_))) {
     return false;
   }
-  DCHECK_LE(info_.size(), std::numeric_limits<uint8_t>::max());
-  AddUintToBuffer(&buffer, static_cast<uint8_t>(info_.size()));
+
+  DCHECK_LE(info_.size(), MaxProfileIndex());
+  WriteProfileIndex(&buffer, static_cast<ProfileIndexType>(info_.size()));
 
   uint32_t required_capacity = 0;
   for (const DexFileData* dex_data_ptr : info_) {
@@ -402,23 +412,15 @@
         sizeof(uint16_t) * dex_data.class_set.size() +
         methods_region_size +
         dex_data.bitmap_storage.size();
-    if (StoresAggregationCounters()) {
-      required_capacity += sizeof(uint16_t) +  // num class counters
-          sizeof(uint16_t) * dex_data.class_set.size() +
-          sizeof(uint16_t) +  // num method counter
-          sizeof(uint16_t) * dex_data_ptr->GetNumMethodCounters();
-    }
   }
-  if (StoresAggregationCounters()) {
-    required_capacity += sizeof(uint16_t);  // global counter
-  }
-
   // Allow large profiles for non target builds for the case where we are merging many profiles
   // to generate a boot image profile.
-  if (kIsTargetBuild && required_capacity > kProfileSizeErrorThresholdInBytes) {
+  VLOG(profiler) << "Required capacity: " << required_capacity << " bytes.";
+  if (required_capacity > GetSizeErrorThresholdBytes()) {
     LOG(ERROR) << "Profile data size exceeds "
-               << std::to_string(kProfileSizeErrorThresholdInBytes)
-               << " bytes. Profile will not be written to disk.";
+               << GetSizeErrorThresholdBytes()
+               << " bytes. Profile will not be written to disk."
+               << " It requires " << required_capacity << " bytes.";
     return false;
   }
   AddUintToBuffer(&buffer, required_capacity);
@@ -485,24 +487,6 @@
     buffer.insert(buffer.end(),
                   dex_data.bitmap_storage.begin(),
                   dex_data.bitmap_storage.end());
-
-    if (StoresAggregationCounters()) {
-      AddUintToBuffer(&buffer, static_cast<uint16_t>(dex_data.class_set.size()));
-      for (const auto& class_id : dex_data.class_set) {
-        uint16_t type_idx = class_id.index_;
-        AddUintToBuffer(&buffer, dex_data.class_counters[type_idx]);
-      }
-      AddUintToBuffer(&buffer, dex_data.GetNumMethodCounters());
-      for (uint16_t method_idx = 0; method_idx < dex_data.num_method_ids; method_idx++) {
-        if (dex_data.GetHotnessInfo(method_idx).IsInProfile()) {
-          AddUintToBuffer(&buffer, dex_data.method_counters[method_idx]);
-        }
-      }
-    }
-  }
-
-  if (StoresAggregationCounters()) {
-    AddUintToBuffer(&buffer, aggregation_count_);
   }
 
   uint32_t output_size = 0;
@@ -510,9 +494,10 @@
                                                                required_capacity,
                                                                &output_size);
 
-  if (output_size > kProfileSizeWarningThresholdInBytes) {
+  if (output_size > GetSizeWarningThresholdBytes()) {
     LOG(WARNING) << "Profile data size exceeds "
-                 << std::to_string(kProfileSizeWarningThresholdInBytes);
+        << GetSizeWarningThresholdBytes()
+        << " It has " << output_size << " bytes";
   }
 
   buffer.clear();
@@ -567,7 +552,7 @@
     DCHECK_LT(classes.size(), ProfileCompilationInfo::kIndividualInlineCacheSize);
     DCHECK_NE(classes.size(), 0u) << "InlineCache contains a dex_pc with 0 classes";
 
-    SafeMap<uint8_t, std::vector<dex::TypeIndex>> dex_to_classes_map;
+    SafeMap<ProfileIndexType, std::vector<dex::TypeIndex>> dex_to_classes_map;
     // Group the classes by dex. We expect that most of the classes will come from
     // the same dex, so this will be more efficient than encoding the dex index
     // for each class reference.
@@ -575,10 +560,10 @@
     // Add the dex map size.
     AddUintToBuffer(buffer, static_cast<uint8_t>(dex_to_classes_map.size()));
     for (const auto& dex_it : dex_to_classes_map) {
-      uint8_t dex_profile_index = dex_it.first;
+      ProfileIndexType dex_profile_index = dex_it.first;
       const std::vector<dex::TypeIndex>& dex_classes = dex_it.second;
       // Add the dex profile index.
-      AddUintToBuffer(buffer, dex_profile_index);
+      WriteProfileIndex(buffer, dex_profile_index);
       // Add the the number of classes for each dex profile index.
       AddUintToBuffer(buffer, static_cast<uint8_t>(dex_classes.size()));
       for (size_t i = 0; i < dex_classes.size(); i++) {
@@ -597,11 +582,11 @@
     size += sizeof(uint16_t) * inline_cache.size();  // dex_pc
     for (const auto& inline_cache_it : inline_cache) {
       const ClassSet& classes = inline_cache_it.second.classes;
-      SafeMap<uint8_t, std::vector<dex::TypeIndex>> dex_to_classes_map;
+      SafeMap<ProfileIndexType, std::vector<dex::TypeIndex>> dex_to_classes_map;
       GroupClassesByDex(classes, &dex_to_classes_map);
       size += sizeof(uint8_t);  // dex_to_classes_map size
       for (const auto& dex_it : dex_to_classes_map) {
-        size += sizeof(uint8_t);  // dex profile index
+        size += SizeOfProfileIndexType();  // dex profile index
         size += sizeof(uint8_t);  // number of classes
         const std::vector<dex::TypeIndex>& dex_classes = dex_it.second;
         size += sizeof(uint16_t) * dex_classes.size();  // the actual classes
@@ -613,7 +598,7 @@
 
 void ProfileCompilationInfo::GroupClassesByDex(
     const ClassSet& classes,
-    /*out*/SafeMap<uint8_t, std::vector<dex::TypeIndex>>* dex_to_classes_map) {
+    /*out*/SafeMap<ProfileIndexType, std::vector<dex::TypeIndex>>* dex_to_classes_map) {
   for (const auto& classes_it : classes) {
     auto dex_it = dex_to_classes_map->FindOrAdd(classes_it.dex_profile_index);
     dex_it->second.push_back(classes_it.type_index);
@@ -625,17 +610,18 @@
     uint32_t checksum,
     uint32_t num_method_ids) {
   const auto profile_index_it = profile_key_map_.FindOrAdd(profile_key, profile_key_map_.size());
-  if (profile_key_map_.size() > std::numeric_limits<uint8_t>::max()) {
-    // Allow only 255 dex files to be profiled. This allows us to save bytes
-    // when encoding. The number is well above what we expect for normal applications.
+  if (profile_key_map_.size() > MaxProfileIndex()) {
+    // Allow only a limited number dex files to be profiled. This allows us to save bytes
+    // when encoding. For regular profiles this 2^8, and for boot profiles is 2^16
+    // (well above what we expect for normal applications).
     if (kIsDebugBuild) {
-      LOG(ERROR) << "Exceeded the maximum number of dex files (255). Something went wrong";
+      LOG(ERROR) << "Exceeded the maximum number of dex file. Something went wrong";
     }
     profile_key_map_.erase(profile_key);
     return nullptr;
   }
 
-  uint8_t profile_index = profile_index_it->second;
+  ProfileIndexType profile_index = profile_index_it->second;
   if (info_.size() <= profile_index) {
     // This is a new addition. Add it to the info_ array.
     DexFileData* dex_file_data = new (&allocator_) DexFileData(
@@ -644,7 +630,7 @@
         checksum,
         profile_index,
         num_method_ids,
-        StoresAggregationCounters());
+        IsForBootImage());
     info_.push_back(dex_file_data);
   }
   DexFileData* result = info_[profile_index];
@@ -681,7 +667,7 @@
     return nullptr;
   }
 
-  uint8_t profile_index = profile_index_it->second;
+  ProfileIndexType profile_index = profile_index_it->second;
   const DexFileData* result = info_[profile_index];
   if (verify_checksum && !ChecksumMatch(result->checksum, checksum)) {
     return nullptr;
@@ -691,89 +677,62 @@
   return result;
 }
 
-bool ProfileCompilationInfo::AddResolvedClasses(const DexCacheResolvedClasses& classes) {
-  const std::string dex_location = GetProfileDexFileKey(classes.GetDexLocation());
-  const uint32_t checksum = classes.GetLocationChecksum();
-  DexFileData* const data = GetOrAddDexFileData(dex_location, checksum, classes.NumMethodIds());
-  if (data == nullptr) {
-    return false;
-  }
-  data->class_set.insert(classes.GetClasses().begin(), classes.GetClasses().end());
-  return true;
-}
-
-bool ProfileCompilationInfo::AddMethod(const std::string& dex_location,
-                                       uint32_t dex_checksum,
-                                       uint16_t method_index,
-                                       uint32_t num_method_ids,
-                                       const OfflineProfileMethodInfo& pmi,
-                                       MethodHotness::Flag flags) {
-  DexFileData* const data = GetOrAddDexFileData(GetProfileDexFileKey(dex_location),
-                                                dex_checksum,
-                                                num_method_ids);
-  if (data == nullptr) {
-    // The data is null if there is a mismatch in the checksum or number of method ids.
-    return false;
-  }
-
-  // Add the method.
-  InlineCacheMap* inline_cache = data->FindOrAddMethod(method_index);
-  if (inline_cache == nullptr) {
-    // Happens if the method index is outside the range (i.e. is greater then the number
-    // of methods in the dex file). This should not happen during normal execution,
-    // But tools (e.g. boot image aggregation tools) and tests stress this behaviour.
-    return false;
-  }
-
-  data->SetMethodHotness(method_index, flags);
-
-  if (pmi.inline_caches == nullptr) {
-    // If we don't have inline caches return success right away.
-    return true;
-  }
-  for (const auto& pmi_inline_cache_it : *pmi.inline_caches) {
-    uint16_t pmi_ic_dex_pc = pmi_inline_cache_it.first;
-    const DexPcData& pmi_ic_dex_pc_data = pmi_inline_cache_it.second;
-    DexPcData* dex_pc_data = FindOrAddDexPc(inline_cache, pmi_ic_dex_pc);
-    if (dex_pc_data->is_missing_types || dex_pc_data->is_megamorphic) {
-      // We are already megamorphic or we are missing types; no point in going forward.
-      continue;
-    }
-
-    if (pmi_ic_dex_pc_data.is_missing_types) {
-      dex_pc_data->SetIsMissingTypes();
-      continue;
-    }
-    if (pmi_ic_dex_pc_data.is_megamorphic) {
-      dex_pc_data->SetIsMegamorphic();
-      continue;
-    }
-
-    for (const ClassReference& class_ref : pmi_ic_dex_pc_data.classes) {
-      const DexReference& dex_ref = pmi.dex_references[class_ref.dex_profile_index];
-      DexFileData* class_dex_data = GetOrAddDexFileData(
-          GetProfileDexFileKey(dex_ref.dex_location),
-          dex_ref.dex_checksum,
-          dex_ref.num_method_ids);
-      if (class_dex_data == nullptr) {  // checksum mismatch
-        return false;
+const ProfileCompilationInfo::DexFileData* ProfileCompilationInfo::FindDexDataUsingAnnotations(
+      const DexFile* dex_file,
+      const ProfileSampleAnnotation& annotation) const {
+  if (annotation == ProfileSampleAnnotation::kNone) {
+    std::string profile_key = GetProfileDexFileBaseKey(dex_file->GetLocation());
+    for (const DexFileData* dex_data : info_) {
+      if (profile_key == GetBaseKeyFromAugmentedKey(dex_data->profile_key)) {
+        if (!ChecksumMatch(dex_data->checksum, dex_file->GetLocationChecksum())) {
+          return nullptr;
+        }
+        return dex_data;
       }
-      dex_pc_data->AddClass(class_dex_data->profile_index, class_ref.type_index);
     }
+  } else {
+    std::string profile_key = GetProfileDexFileAugmentedKey(dex_file->GetLocation(), annotation);
+    return FindDexData(profile_key, dex_file->GetLocationChecksum());
   }
-  return true;
+
+  return nullptr;
 }
 
-bool ProfileCompilationInfo::AddMethod(const ProfileMethodInfo& pmi, MethodHotness::Flag flags) {
-  DexFileData* const data = GetOrAddDexFileData(pmi.ref.dex_file);
+void ProfileCompilationInfo::FindAllDexData(
+    const DexFile* dex_file,
+    /*out*/ std::vector<const ProfileCompilationInfo::DexFileData*>* result) const {
+  std::string profile_key = GetProfileDexFileBaseKey(dex_file->GetLocation());
+  for (const DexFileData* dex_data : info_) {
+    if (profile_key == GetBaseKeyFromAugmentedKey(dex_data->profile_key)) {
+      if (ChecksumMatch(dex_data->checksum, dex_file->GetLocationChecksum())) {
+        result->push_back(dex_data);
+      }
+    }
+  }
+}
+
+bool ProfileCompilationInfo::AddMethod(const ProfileMethodInfo& pmi,
+                                       MethodHotness::Flag flags,
+                                       const ProfileSampleAnnotation& annotation) {
+  DexFileData* const data = GetOrAddDexFileData(pmi.ref.dex_file, annotation);
   if (data == nullptr) {  // checksum mismatch
     return false;
   }
-  InlineCacheMap* inline_cache = data->FindOrAddMethod(pmi.ref.index);
-  if (inline_cache == nullptr) {
+  if (!data->AddMethod(flags, pmi.ref.index)) {
     return false;
   }
-  data->SetMethodHotness(pmi.ref.index, flags);
+  if ((flags & MethodHotness::kFlagHot) == 0) {
+    // The method is not hot, do not add inline caches.
+    return true;
+  }
+
+  // Add inline caches. Do this only for regular profiles. The boot image profiles don't use
+  // them and they take up useless space.
+  if (IsForBootImage()) {
+    return true;  // early success return.
+  }
+  InlineCacheMap* inline_cache = data->FindOrAddHotMethod(pmi.ref.index);
+  DCHECK(inline_cache != nullptr);
 
   for (const ProfileMethodInfo::ProfileInlineCache& cache : pmi.inline_caches) {
     if (cache.is_missing_types) {
@@ -781,7 +740,7 @@
       continue;
     }
     for (const TypeReference& class_ref : cache.classes) {
-      DexFileData* class_dex_data = GetOrAddDexFileData(class_ref.dex_file);
+      DexFileData* class_dex_data = GetOrAddDexFileData(class_ref.dex_file, annotation);
       if (class_dex_data == nullptr) {  // checksum mismatch
         return false;
       }
@@ -796,18 +755,6 @@
   return true;
 }
 
-bool ProfileCompilationInfo::AddClassIndex(const std::string& dex_location,
-                                           uint32_t checksum,
-                                           dex::TypeIndex type_idx,
-                                           uint32_t num_method_ids) {
-  DexFileData* const data = GetOrAddDexFileData(dex_location, checksum, num_method_ids);
-  if (data == nullptr) {
-    return false;
-  }
-  data->class_set.insert(type_idx);
-  return true;
-}
-
 #define READ_UINT(type, buffer, dest, error)            \
   do {                                                  \
     if (!(buffer).ReadUintAndAdvance<type>(&(dest))) {  \
@@ -819,8 +766,8 @@
 
 bool ProfileCompilationInfo::ReadInlineCache(
     SafeBuffer& buffer,
-    uint8_t number_of_dex_files,
-    const SafeMap<uint8_t, uint8_t>& dex_profile_index_remap,
+    ProfileIndexType number_of_dex_files,
+    const SafeMap<ProfileIndexType, ProfileIndexType>& dex_profile_index_remap,
     /*out*/ InlineCacheMap* inline_cache,
     /*out*/ std::string* error) {
   uint16_t inline_cache_size;
@@ -840,9 +787,12 @@
       continue;
     }
     for (; dex_to_classes_map_size > 0; dex_to_classes_map_size--) {
-      uint8_t dex_profile_index;
+      ProfileIndexType dex_profile_index;
       uint8_t dex_classes_size;
-      READ_UINT(uint8_t, buffer, dex_profile_index, error);
+      if (!ReadProfileIndex(buffer, &dex_profile_index)) {
+        *error = "Cannot read profile index";
+        return false;
+      }
       READ_UINT(uint8_t, buffer, dex_classes_size, error);
       if (dex_profile_index >= number_of_dex_files) {
         *error = "dex_profile_index out of bounds ";
@@ -866,28 +816,29 @@
   return true;
 }
 
-bool ProfileCompilationInfo::ReadMethods(SafeBuffer& buffer,
-                                         uint8_t number_of_dex_files,
-                                         const ProfileLineHeader& line_header,
-                                         const SafeMap<uint8_t, uint8_t>& dex_profile_index_remap,
-                                         /*out*/std::string* error) {
+bool ProfileCompilationInfo::ReadMethods(
+    SafeBuffer& buffer,
+    ProfileIndexType number_of_dex_files,
+    const ProfileLineHeader& line_header,
+    const SafeMap<ProfileIndexType, ProfileIndexType>& dex_profile_index_remap,
+    /*out*/std::string* error) {
   uint32_t unread_bytes_before_operation = buffer.CountUnreadBytes();
   if (unread_bytes_before_operation < line_header.method_region_size_bytes) {
     *error += "Profile EOF reached prematurely for ReadMethod";
-    return kProfileLoadBadData;
+    return false;
   }
   size_t expected_unread_bytes_after_operation = buffer.CountUnreadBytes()
       - line_header.method_region_size_bytes;
   uint16_t last_method_index = 0;
   while (buffer.CountUnreadBytes() > expected_unread_bytes_after_operation) {
-    DexFileData* const data = GetOrAddDexFileData(line_header.dex_location,
+    DexFileData* const data = GetOrAddDexFileData(line_header.profile_key,
                                                   line_header.checksum,
                                                   line_header.num_method_ids);
     uint16_t diff_with_last_method_index;
     READ_UINT(uint16_t, buffer, diff_with_last_method_index, error);
     uint16_t method_index = last_method_index + diff_with_last_method_index;
     last_method_index = method_index;
-    InlineCacheMap* inline_cache = data->FindOrAddMethod(method_index);
+    InlineCacheMap* inline_cache = data->FindOrAddHotMethod(method_index);
     if (inline_cache == nullptr) {
       return false;
     }
@@ -913,7 +864,7 @@
   size_t unread_bytes_before_op = buffer.CountUnreadBytes();
   if (unread_bytes_before_op < line_header.class_set_size) {
     *error += "Profile EOF reached prematurely for ReadClasses";
-    return kProfileLoadBadData;
+    return false;
   }
 
   uint16_t last_class_index = 0;
@@ -922,12 +873,14 @@
     READ_UINT(uint16_t, buffer, diff_with_last_class_index, error);
     uint16_t type_index = last_class_index + diff_with_last_class_index;
     last_class_index = type_index;
-    if (!AddClassIndex(line_header.dex_location,
-                       line_header.checksum,
-                       dex::TypeIndex(type_index),
-                       line_header.num_method_ids)) {
-      return false;
+
+    DexFileData* const data = GetOrAddDexFileData(line_header.profile_key,
+                                                  line_header.checksum,
+                                                  line_header.num_method_ids);
+    if (data == nullptr) {
+       return false;
     }
+    data->class_set.insert(dex::TypeIndex(type_index));
   }
   size_t total_bytes_read = unread_bytes_before_op - buffer.CountUnreadBytes();
   uint32_t expected_bytes_read = line_header.class_set_size * sizeof(uint16_t);
@@ -997,50 +950,56 @@
 
 ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::ReadProfileHeader(
       ProfileSource& source,
-      /*out*/uint8_t* number_of_dex_files,
+      /*out*/ProfileIndexType* number_of_dex_files,
       /*out*/uint32_t* uncompressed_data_size,
       /*out*/uint32_t* compressed_data_size,
       /*out*/std::string* error) {
   // Read magic and version
   const size_t kMagicVersionSize =
     sizeof(kProfileMagic) +
-    kProfileVersionSize +
-    sizeof(uint8_t) +  // number of dex files
-    sizeof(uint32_t) +  // size of uncompressed profile data
-    sizeof(uint32_t);  // size of compressed profile data
+    kProfileVersionSize;
+  SafeBuffer safe_buffer_version(kMagicVersionSize);
 
-  SafeBuffer safe_buffer(kMagicVersionSize);
-
-  ProfileLoadStatus status = safe_buffer.Fill(source, "ReadProfileHeader", error);
+  ProfileLoadStatus status = safe_buffer_version.Fill(source, "ReadProfileHeaderVersion", error);
   if (status != kProfileLoadSuccess) {
     return status;
   }
 
-  if (!safe_buffer.CompareAndAdvance(kProfileMagic, sizeof(kProfileMagic))) {
+  if (!safe_buffer_version.CompareAndAdvance(kProfileMagic, sizeof(kProfileMagic))) {
     *error = "Profile missing magic";
     return kProfileLoadVersionMismatch;
   }
-  if (safe_buffer.CountUnreadBytes() < kProfileVersionSize) {
+  if (safe_buffer_version.CountUnreadBytes() < kProfileVersionSize) {
      *error = "Cannot read profile version";
      return kProfileLoadBadData;
   }
-  memcpy(version_, safe_buffer.GetCurrentPtr(), kProfileVersionSize);
-  safe_buffer.Advance(kProfileVersionSize);
+  memcpy(version_, safe_buffer_version.GetCurrentPtr(), kProfileVersionSize);
   if ((memcmp(version_, kProfileVersion, kProfileVersionSize) != 0) &&
-      (memcmp(version_, kProfileVersionWithCounters, kProfileVersionSize) != 0)) {
+      (memcmp(version_, kProfileVersionForBootImage, kProfileVersionSize) != 0)) {
     *error = "Profile version mismatch";
     return kProfileLoadVersionMismatch;
   }
 
-  if (!safe_buffer.ReadUintAndAdvance<uint8_t>(number_of_dex_files)) {
+  const size_t kProfileHeaderDataSize =
+    SizeOfProfileIndexType() +  // number of dex files
+    sizeof(uint32_t) +  // size of uncompressed profile data
+    sizeof(uint32_t);  // size of compressed profile data
+  SafeBuffer safe_buffer_header_data(kProfileHeaderDataSize);
+
+  status = safe_buffer_header_data.Fill(source, "ReadProfileHeaderData", error);
+  if (status != kProfileLoadSuccess) {
+    return status;
+  }
+
+  if (!ReadProfileIndex(safe_buffer_header_data, number_of_dex_files)) {
     *error = "Cannot read the number of dex files";
     return kProfileLoadBadData;
   }
-  if (!safe_buffer.ReadUintAndAdvance<uint32_t>(uncompressed_data_size)) {
+  if (!safe_buffer_header_data.ReadUintAndAdvance<uint32_t>(uncompressed_data_size)) {
     *error = "Cannot read the size of uncompressed data";
     return kProfileLoadBadData;
   }
-  if (!safe_buffer.ReadUintAndAdvance<uint32_t>(compressed_data_size)) {
+  if (!safe_buffer_header_data.ReadUintAndAdvance<uint32_t>(compressed_data_size)) {
     *error = "Cannot read the size of compressed data";
     return kProfileLoadBadData;
   }
@@ -1048,10 +1007,10 @@
 }
 
 bool ProfileCompilationInfo::ReadProfileLineHeaderElements(SafeBuffer& buffer,
-                                                           /*out*/uint16_t* dex_location_size,
+                                                           /*out*/uint16_t* profile_key_size,
                                                            /*out*/ProfileLineHeader* line_header,
                                                            /*out*/std::string* error) {
-  READ_UINT(uint16_t, buffer, *dex_location_size, error);
+  READ_UINT(uint16_t, buffer, *profile_key_size, error);
   READ_UINT(uint16_t, buffer, line_header->class_set_size, error);
   READ_UINT(uint32_t, buffer, line_header->method_region_size_bytes, error);
   READ_UINT(uint32_t, buffer, line_header->checksum, error);
@@ -1068,41 +1027,41 @@
     return kProfileLoadBadData;
   }
 
-  uint16_t dex_location_size;
-  if (!ReadProfileLineHeaderElements(buffer, &dex_location_size, line_header, error)) {
+  uint16_t profile_key_size;
+  if (!ReadProfileLineHeaderElements(buffer, &profile_key_size, line_header, error)) {
     return kProfileLoadBadData;
   }
 
-  if (dex_location_size == 0 || dex_location_size > kMaxDexFileKeyLength) {
-    *error = "DexFileKey has an invalid size: " +
-        std::to_string(static_cast<uint32_t>(dex_location_size));
+  if (profile_key_size == 0 || profile_key_size > kMaxDexFileKeyLength) {
+    *error = "ProfileKey has an invalid size: " +
+        std::to_string(static_cast<uint32_t>(profile_key_size));
     return kProfileLoadBadData;
   }
 
-  if (buffer.CountUnreadBytes() < dex_location_size) {
+  if (buffer.CountUnreadBytes() < profile_key_size) {
     *error += "Profile EOF reached prematurely for ReadProfileHeaderDexLocation";
     return kProfileLoadBadData;
   }
   const uint8_t* base_ptr = buffer.GetCurrentPtr();
-  line_header->dex_location.assign(
-      reinterpret_cast<const char*>(base_ptr), dex_location_size);
-  buffer.Advance(dex_location_size);
+  line_header->profile_key.assign(
+      reinterpret_cast<const char*>(base_ptr), profile_key_size);
+  buffer.Advance(profile_key_size);
   return kProfileLoadSuccess;
 }
 
 ProfileCompilationInfo::ProfileLoadStatus ProfileCompilationInfo::ReadProfileLine(
       SafeBuffer& buffer,
-      uint8_t number_of_dex_files,
+      ProfileIndexType number_of_dex_files,
       const ProfileLineHeader& line_header,
-      const SafeMap<uint8_t, uint8_t>& dex_profile_index_remap,
+      const SafeMap<ProfileIndexType, ProfileIndexType>& dex_profile_index_remap,
       bool merge_classes,
       /*out*/std::string* error) {
-  DexFileData* data = GetOrAddDexFileData(line_header.dex_location,
+  DexFileData* data = GetOrAddDexFileData(line_header.profile_key,
                                           line_header.checksum,
                                           line_header.num_method_ids);
   if (data == nullptr) {
     *error = "Error when reading profile file line header: checksum mismatch for "
-        + line_header.dex_location;
+        + line_header.profile_key;
     return kProfileLoadBadData;
   }
 
@@ -1126,50 +1085,9 @@
   std::copy_n(base_ptr, bytes, data->bitmap_storage.data());
   buffer.Advance(bytes);
 
-  if (StoresAggregationCounters()) {
-    ReadAggregationCounters(buffer, *data, error);
-  }
-
   return kProfileLoadSuccess;
 }
 
-bool ProfileCompilationInfo::ReadAggregationCounters(
-      SafeBuffer& buffer,
-      DexFileData& dex_data,
-      /*out*/std::string* error) {
-  size_t unread_bytes_before_op = buffer.CountUnreadBytes();
-  size_t expected_byte_count = sizeof(uint16_t) *
-      (dex_data.class_set.size() + dex_data.method_map.size() + 2);
-  if (unread_bytes_before_op < expected_byte_count) {
-    *error += "Profile EOF reached prematurely for ReadAggregationCounters";
-    return false;
-  }
-
-  uint16_t num_class_counters;
-  READ_UINT(uint16_t, buffer, num_class_counters, error);
-  if (num_class_counters != dex_data.class_set.size()) {
-    *error = "Invalid class size when reading counters";
-    return false;
-  }
-  for (const auto& class_it : dex_data.class_set) {
-    READ_UINT(uint16_t, buffer, dex_data.class_counters[class_it.index_], error);
-  }
-
-  uint16_t num_method_counters;
-  READ_UINT(uint16_t, buffer, num_method_counters, error);
-  if (num_method_counters != dex_data.GetNumMethodCounters()) {
-    *error = "Invalid class size when reading counters";
-    return false;
-  }
-  for (uint16_t method_idx = 0; method_idx < dex_data.num_method_ids; method_idx++) {
-    if (dex_data.GetHotnessInfo(method_idx).IsInProfile()) {
-      READ_UINT(uint16_t, buffer, dex_data.method_counters[method_idx], error);
-    }
-  }
-
-  return true;
-}
-
 // TODO(calin): Fix this API. ProfileCompilationInfo::Load should be static and
 // return a unique pointer to a ProfileCompilationInfo upon success.
 bool ProfileCompilationInfo::Load(
@@ -1189,10 +1107,11 @@
 bool ProfileCompilationInfo::VerifyProfileData(const std::vector<const DexFile*>& dex_files) {
   std::unordered_map<std::string, const DexFile*> key_to_dex_file;
   for (const DexFile* dex_file : dex_files) {
-    key_to_dex_file.emplace(GetProfileDexFileKey(dex_file->GetLocation()), dex_file);
+    key_to_dex_file.emplace(GetProfileDexFileBaseKey(dex_file->GetLocation()), dex_file);
   }
   for (const DexFileData* dex_data : info_) {
-    const auto it = key_to_dex_file.find(dex_data->profile_key);
+    // We need to remove any annotation from the key during verification.
+    const auto it = key_to_dex_file.find(GetBaseKeyFromAugmentedKey(dex_data->profile_key));
     if (it == key_to_dex_file.end()) {
       // It is okay if profile contains data for additional dex files.
       continue;
@@ -1235,13 +1154,13 @@
         }
 
         const ClassSet &classes = dex_pc_data.classes;
-        SafeMap<uint8_t, std::vector<dex::TypeIndex>> dex_to_classes_map;
+        SafeMap<ProfileIndexType, std::vector<dex::TypeIndex>> dex_to_classes_map;
         // Group the classes by dex. We expect that most of the classes will come from
         // the same dex, so this will be more efficient than encoding the dex index
         // for each class reference.
         GroupClassesByDex(classes, &dex_to_classes_map);
         for (const auto &dex_it : dex_to_classes_map) {
-          uint8_t dex_profile_index = dex_it.first;
+          ProfileIndexType dex_profile_index = dex_it.first;
           const auto dex_file_inline_cache_it = key_to_dex_file.find(
               info_[dex_profile_index]->profile_key);
           if (dex_file_inline_cache_it == key_to_dex_file.end()) {
@@ -1391,7 +1310,7 @@
   }
 
   // Read profile header: magic + version + number_of_dex_files.
-  uint8_t number_of_dex_files;
+  ProfileIndexType number_of_dex_files;
   uint32_t uncompressed_data_size;
   uint32_t compressed_data_size;
   status = ReadProfileHeader(*source,
@@ -1405,16 +1324,16 @@
   }
   // Allow large profiles for non target builds for the case where we are merging many profiles
   // to generate a boot image profile.
-  if (kIsTargetBuild && uncompressed_data_size > kProfileSizeErrorThresholdInBytes) {
+  if (uncompressed_data_size > GetSizeErrorThresholdBytes()) {
     LOG(ERROR) << "Profile data size exceeds "
-               << std::to_string(kProfileSizeErrorThresholdInBytes)
-               << " bytes";
+               << GetSizeErrorThresholdBytes()
+               << " bytes. It has " << uncompressed_data_size << " bytes.";
     return kProfileLoadBadData;
   }
-  if (uncompressed_data_size > kProfileSizeWarningThresholdInBytes) {
+  if (uncompressed_data_size > GetSizeWarningThresholdBytes()) {
     LOG(WARNING) << "Profile data size exceeds "
-                 << std::to_string(kProfileSizeWarningThresholdInBytes)
-                 << " bytes";
+                 << GetSizeWarningThresholdBytes()
+                 << " bytes. It has " << uncompressed_data_size << " bytes.";
   }
 
   std::unique_ptr<uint8_t[]> compressed_data(new uint8_t[compressed_data_size]);
@@ -1443,7 +1362,7 @@
 
   std::vector<ProfileLineHeader> profile_line_headers;
   // Read profile line headers.
-  for (uint8_t k = 0; k < number_of_dex_files; k++) {
+  for (ProfileIndexType k = 0; k < number_of_dex_files; k++) {
     ProfileLineHeader line_header;
 
     // First, read the line header to get the amount of data we need to read.
@@ -1454,18 +1373,19 @@
     profile_line_headers.push_back(line_header);
   }
 
-  SafeMap<uint8_t, uint8_t> dex_profile_index_remap;
+  SafeMap<ProfileIndexType, ProfileIndexType> dex_profile_index_remap;
   if (!RemapProfileIndex(profile_line_headers, filter_fn, &dex_profile_index_remap)) {
     return kProfileLoadBadData;
   }
 
-  for (uint8_t k = 0; k < number_of_dex_files; k++) {
-    if (!filter_fn(profile_line_headers[k].dex_location, profile_line_headers[k].checksum)) {
+  for (ProfileIndexType k = 0; k < number_of_dex_files; k++) {
+    if (!filter_fn(profile_line_headers[k].profile_key, profile_line_headers[k].checksum)) {
       // We have to skip the line. Advanced the current pointer of the buffer.
       size_t profile_line_size =
            profile_line_headers[k].class_set_size * sizeof(uint16_t) +
            profile_line_headers[k].method_region_size_bytes +
-           DexFileData::ComputeBitmapStorage(profile_line_headers[k].num_method_ids);
+           DexFileData::ComputeBitmapStorage(IsForBootImage(),
+              profile_line_headers[k].num_method_ids);
       uncompressed_data.Advance(profile_line_size);
     } else {
       // Now read the actual profile line.
@@ -1481,13 +1401,6 @@
     }
   }
 
-  if (StoresAggregationCounters()) {
-    if (!uncompressed_data.ReadUintAndAdvance<uint16_t>(&aggregation_count_)) {
-      *error = "Cannot read the global aggregation count";
-      return kProfileLoadBadData;
-    }
-  }
-
   // Check that we read everything and that profiles don't contain junk data.
   if (uncompressed_data.CountUnreadBytes() > 0) {
     *error = "Unexpected content in the profile file: " +
@@ -1501,32 +1414,32 @@
 bool ProfileCompilationInfo::RemapProfileIndex(
     const std::vector<ProfileLineHeader>& profile_line_headers,
     const ProfileLoadFilterFn& filter_fn,
-    /*out*/SafeMap<uint8_t, uint8_t>* dex_profile_index_remap) {
+    /*out*/SafeMap<ProfileIndexType, ProfileIndexType>* dex_profile_index_remap) {
   // First verify that all checksums match. This will avoid adding garbage to
   // the current profile info.
   // Note that the number of elements should be very small, so this should not
   // be a performance issue.
   for (const ProfileLineHeader& other_profile_line_header : profile_line_headers) {
-    if (!filter_fn(other_profile_line_header.dex_location, other_profile_line_header.checksum)) {
+    if (!filter_fn(other_profile_line_header.profile_key, other_profile_line_header.checksum)) {
       continue;
     }
     // verify_checksum is false because we want to differentiate between a missing dex data and
     // a mismatched checksum.
-    const DexFileData* dex_data = FindDexData(other_profile_line_header.dex_location,
+    const DexFileData* dex_data = FindDexData(other_profile_line_header.profile_key,
                                               /* checksum= */ 0u,
                                               /* verify_checksum= */ false);
     if ((dex_data != nullptr) && (dex_data->checksum != other_profile_line_header.checksum)) {
-      LOG(WARNING) << "Checksum mismatch for dex " << other_profile_line_header.dex_location;
+      LOG(WARNING) << "Checksum mismatch for dex " << other_profile_line_header.profile_key;
       return false;
     }
   }
   // All checksums match. Import the data.
   uint32_t num_dex_files = static_cast<uint32_t>(profile_line_headers.size());
   for (uint32_t i = 0; i < num_dex_files; i++) {
-    if (!filter_fn(profile_line_headers[i].dex_location, profile_line_headers[i].checksum)) {
+    if (!filter_fn(profile_line_headers[i].profile_key, profile_line_headers[i].checksum)) {
       continue;
     }
-    const DexFileData* dex_data = GetOrAddDexFileData(profile_line_headers[i].dex_location,
+    const DexFileData* dex_data = GetOrAddDexFileData(profile_line_headers[i].profile_key,
                                                       profile_line_headers[i].checksum,
                                                       profile_line_headers[i].num_method_ids);
     if (dex_data == nullptr) {
@@ -1593,6 +1506,11 @@
 
 bool ProfileCompilationInfo::MergeWith(const ProfileCompilationInfo& other,
                                        bool merge_classes) {
+  if (!SameVersion(other)) {
+    LOG(WARNING) << "Cannot merge different profile versions";
+    return false;
+  }
+
   // First verify that all checksums match. This will avoid adding garbage to
   // the current profile info.
   // Note that the number of elements should be very small, so this should not
@@ -1620,7 +1538,7 @@
 
   // First, build a mapping from other_dex_profile_index to this_dex_profile_index.
   // This will make sure that the ClassReferences  will point to the correct dex file.
-  SafeMap<uint8_t, uint8_t> dex_profile_index_remap;
+  SafeMap<ProfileIndexType, ProfileIndexType> dex_profile_index_remap;
   for (const DexFileData* other_dex_data : other.info_) {
     const DexFileData* dex_data = GetOrAddDexFileData(other_dex_data->profile_key,
                                                       other_dex_data->checksum,
@@ -1637,33 +1555,6 @@
                                                                  other_dex_data->checksum));
     DCHECK(dex_data != nullptr);
 
-    // Merge counters for methods and class. Must be done before we merge the bitmaps so that
-    // we can tell if the data is new or not.
-    if (StoresAggregationCounters()) {
-      // Class aggregation counters.
-      if (merge_classes) {
-        for (const dex::TypeIndex& type_idx : other_dex_data->class_set) {
-          uint16_t amount = other.StoresAggregationCounters()
-              ? other_dex_data->class_counters[type_idx.index_]
-              : (dex_data->ContainsClass(type_idx) ? 1 : 0);
-
-          dex_data->class_counters[type_idx.index_] =
-              IncrementAggregationCounter(dex_data->class_counters[type_idx.index_], amount);
-        }
-      }
-
-      // Method aggregation counters.
-      for (uint16_t method_idx = 0; method_idx < other_dex_data->num_method_ids; method_idx++) {
-        if (other_dex_data->GetHotnessInfo(method_idx).IsInProfile()) {
-          uint16_t amount = other.StoresAggregationCounters()
-              ? other_dex_data->method_counters[method_idx]
-              : (dex_data->GetHotnessInfo(method_idx).IsInProfile() ? 1 : 0);
-          dex_data->method_counters[method_idx] =
-              IncrementAggregationCounter(dex_data->method_counters[method_idx], amount);
-        }
-      }
-    }
-
     // Merge the classes.
     if (merge_classes) {
       dex_data->class_set.insert(other_dex_data->class_set.begin(),
@@ -1673,7 +1564,7 @@
     // Merge the methods and the inline caches.
     for (const auto& other_method_it : other_dex_data->method_map) {
       uint16_t other_method_index = other_method_it.first;
-      InlineCacheMap* inline_cache = dex_data->FindOrAddMethod(other_method_index);
+      InlineCacheMap* inline_cache = dex_data->FindOrAddHotMethod(other_method_index);
       if (inline_cache == nullptr) {
         return false;
       }
@@ -1699,54 +1590,22 @@
     dex_data->MergeBitmap(*other_dex_data);
   }
 
-  // Global aggregation counter.
-  if (StoresAggregationCounters()) {
-    uint16_t amount = other.StoresAggregationCounters() ? other.aggregation_count_ : 1;
-    aggregation_count_ = IncrementAggregationCounter(aggregation_count_, amount);
-  }
-
   return true;
 }
 
-const ProfileCompilationInfo::DexFileData* ProfileCompilationInfo::FindDexData(
-    const DexFile* dex_file) const {
-  return FindDexData(GetProfileDexFileKey(dex_file->GetLocation()),
-                     dex_file->GetLocationChecksum());
-}
-
 ProfileCompilationInfo::MethodHotness ProfileCompilationInfo::GetMethodHotness(
-    const MethodReference& method_ref) const {
-  const DexFileData* dex_data = FindDexData(method_ref.dex_file);
+    const MethodReference& method_ref,
+    const ProfileSampleAnnotation& annotation) const {
+  const DexFileData* dex_data = FindDexDataUsingAnnotations(method_ref.dex_file, annotation);
   return dex_data != nullptr
       ? dex_data->GetHotnessInfo(method_ref.index)
       : MethodHotness();
 }
 
-bool ProfileCompilationInfo::AddMethodHotness(const MethodReference& method_ref,
-                                              const MethodHotness& hotness) {
-  DexFileData* dex_data = GetOrAddDexFileData(method_ref.dex_file);
-  if (dex_data != nullptr) {
-    // TODO: Add inline caches.
-    return dex_data->AddMethod(
-        static_cast<MethodHotness::Flag>(hotness.GetFlags()), method_ref.index);
-  }
-  return false;
-}
-
-ProfileCompilationInfo::MethodHotness ProfileCompilationInfo::GetMethodHotness(
-    const std::string& dex_location,
-    uint32_t dex_checksum,
-    uint16_t dex_method_index) const {
-  const DexFileData* dex_data = FindDexData(GetProfileDexFileKey(dex_location), dex_checksum);
-  return dex_data != nullptr ? dex_data->GetHotnessInfo(dex_method_index) : MethodHotness();
-}
-
-
-std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> ProfileCompilationInfo::GetMethod(
-    const std::string& dex_location,
-    uint32_t dex_checksum,
-    uint16_t dex_method_index) const {
-  MethodHotness hotness(GetMethodHotness(dex_location, dex_checksum, dex_method_index));
+std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo>
+ProfileCompilationInfo::GetHotMethodInfo(const MethodReference& method_ref,
+                                         const ProfileSampleAnnotation& annotation) const {
+  MethodHotness hotness(GetMethodHotness(method_ref, annotation));
   if (!hotness.IsHot()) {
     return nullptr;
   }
@@ -1756,7 +1615,7 @@
 
   pmi->dex_references.resize(info_.size());
   for (const DexFileData* dex_data : info_) {
-    pmi->dex_references[dex_data->profile_index].dex_location = dex_data->profile_key;
+    pmi->dex_references[dex_data->profile_index].profile_key = dex_data->profile_key;
     pmi->dex_references[dex_data->profile_index].dex_checksum = dex_data->checksum;
     pmi->dex_references[dex_data->profile_index].num_method_ids = dex_data->num_method_ids;
   }
@@ -1765,8 +1624,10 @@
 }
 
 
-bool ProfileCompilationInfo::ContainsClass(const DexFile& dex_file, dex::TypeIndex type_idx) const {
-  const DexFileData* dex_data = FindDexData(&dex_file);
+bool ProfileCompilationInfo::ContainsClass(const DexFile& dex_file,
+                                           dex::TypeIndex type_idx,
+                                           const ProfileSampleAnnotation& annotation) const {
+  const DexFileData* dex_data = FindDexDataUsingAnnotations(&dex_file, annotation);
   return (dex_data != nullptr) && dex_data->ContainsClass(type_idx);
 }
 
@@ -1789,11 +1650,20 @@
 std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>& dex_files,
                                              bool print_full_dex_location) const {
   std::ostringstream os;
-  if (info_.empty()) {
-    return "ProfileInfo: empty";
-  }
 
-  os << "ProfileInfo:";
+  os << "ProfileInfo [";
+
+  for (size_t k = 0; k <  kProfileVersionSize - 1; k++) {
+    // Iterate to 'kProfileVersionSize - 1' because the version_ ends with '\0'
+    // which we don't want to print.
+    os << static_cast<char>(version_[k]);
+  }
+  os << "]\n";
+
+  if (info_.empty()) {
+    os << "-empty-";
+    return os.str();
+  }
 
   const std::string kFirstDexFileKeySubstitute = "!classes.dex";
 
@@ -1803,14 +1673,15 @@
       os << dex_data->profile_key;
     } else {
       // Replace the (empty) multidex suffix of the first key with a substitute for easier reading.
-      std::string multidex_suffix = DexFileLoader::GetMultiDexSuffix(dex_data->profile_key);
+      std::string multidex_suffix = DexFileLoader::GetMultiDexSuffix(
+          GetBaseKeyFromAugmentedKey(dex_data->profile_key));
       os << (multidex_suffix.empty() ? kFirstDexFileKeySubstitute : multidex_suffix);
     }
     os << " [index=" << static_cast<uint32_t>(dex_data->profile_index) << "]";
     os << " [checksum=" << std::hex << dex_data->checksum << "]" << std::dec;
     const DexFile* dex_file = nullptr;
     for (const DexFile* current : dex_files) {
-      if (dex_data->profile_key == current->GetLocation() &&
+      if (GetBaseKeyFromAugmentedKey(dex_data->profile_key) == current->GetLocation() &&
           dex_data->checksum == current->GetLocationChecksum()) {
         dex_file = current;
       }
@@ -1875,9 +1746,10 @@
     /*out*/std::set<dex::TypeIndex>* class_set,
     /*out*/std::set<uint16_t>* hot_method_set,
     /*out*/std::set<uint16_t>* startup_method_set,
-    /*out*/std::set<uint16_t>* post_startup_method_method_set) const {
+    /*out*/std::set<uint16_t>* post_startup_method_method_set,
+    const ProfileSampleAnnotation& annotation) const {
   std::set<std::string> ret;
-  const DexFileData* dex_data = FindDexData(&dex_file);
+  const DexFileData* dex_data = FindDexDataUsingAnnotations(&dex_file, annotation);
   if (dex_data == nullptr) {
     return false;
   }
@@ -1899,10 +1771,14 @@
   return true;
 }
 
+bool ProfileCompilationInfo::SameVersion(const ProfileCompilationInfo& other) const {
+  return memcmp(version_, other.version_, kProfileVersionSize) == 0;
+}
+
 bool ProfileCompilationInfo::Equals(const ProfileCompilationInfo& other) {
   // No need to compare profile_key_map_. That's only a cache for fast search.
   // All the information is already in the info_ vector.
-  if (memcmp(version_, other.version_, kProfileVersionSize) != 0) {
+  if (!SameVersion(other)) {
     return false;
   }
   if (info_.size() != other.info_.size()) {
@@ -1915,39 +1791,8 @@
       return false;
     }
   }
-  if (aggregation_count_ != other.aggregation_count_) {
-    return false;
-  }
-  return true;
-}
 
-std::set<DexCacheResolvedClasses> ProfileCompilationInfo::GetResolvedClasses(
-    const std::vector<const DexFile*>& dex_files) const {
-  std::unordered_map<std::string, const DexFile* > key_to_dex_file;
-  for (const DexFile* dex_file : dex_files) {
-    key_to_dex_file.emplace(GetProfileDexFileKey(dex_file->GetLocation()), dex_file);
-  }
-  std::set<DexCacheResolvedClasses> ret;
-  for (const DexFileData* dex_data : info_) {
-    const auto it = key_to_dex_file.find(dex_data->profile_key);
-    if (it != key_to_dex_file.end()) {
-      const DexFile* dex_file = it->second;
-      const std::string& dex_location = dex_file->GetLocation();
-      if (dex_data->checksum != it->second->GetLocationChecksum()) {
-        LOG(ERROR) << "Dex checksum mismatch when getting resolved classes from profile for "
-            << "location " << dex_location << " (checksum=" << dex_file->GetLocationChecksum()
-            << ", profile checksum=" << dex_data->checksum;
-        return std::set<DexCacheResolvedClasses>();
-      }
-      DexCacheResolvedClasses classes(dex_location,
-                                      dex_location,
-                                      dex_data->checksum,
-                                      dex_data->num_method_ids);
-      classes.AddClasses(dex_data->class_set.begin(), dex_data->class_set.end());
-      ret.insert(classes);
-    }
-  }
-  return ret;
+  return true;
 }
 
 // Naive implementation to generate a random profile file suitable for testing.
@@ -1973,8 +1818,9 @@
 
   for (uint16_t i = 0; i < number_of_dex_files; i++) {
     std::string dex_location = DexFileLoader::GetMultiDexLocation(i, base_dex_location.c_str());
-    std::string profile_key = GetProfileDexFileKey(dex_location);
+    std::string profile_key = info.GetProfileDexFileBaseKey(dex_location);
 
+    DexFileData* const data = info.GetOrAddDexFileData(profile_key, /*checksum=*/ 0, max_method);
     for (uint16_t m = 0; m < number_of_methods; m++) {
       uint16_t method_idx = rand() % max_method;
       if (m < (number_of_methods / kFavorSplit)) {
@@ -1983,11 +1829,7 @@
       // Alternate between startup and post startup.
       uint32_t flags = MethodHotness::kFlagHot;
       flags |= ((m & 1) != 0) ? MethodHotness::kFlagPostStartup : MethodHotness::kFlagStartup;
-      info.AddMethodIndex(static_cast<MethodHotness::Flag>(flags),
-                          profile_key,
-                          /*checksum=*/ 0,
-                          method_idx,
-                          max_method);
+      data->AddMethod(static_cast<MethodHotness::Flag>(flags), method_idx);
     }
 
     for (uint16_t c = 0; c < number_of_classes; c++) {
@@ -1995,7 +1837,7 @@
       if (c < (number_of_classes / kFavorSplit)) {
         type_idx %= kFavorFirstN;
       }
-      info.AddClassIndex(profile_key, 0, dex::TypeIndex(type_idx), max_method);
+      data->class_set.insert(dex::TypeIndex(type_idx));
     }
   }
   return info.Save(fd);
@@ -2024,17 +1866,17 @@
     return vec;
   };
   for (std::unique_ptr<const DexFile>& dex_file : dex_files) {
-    const std::string& location = dex_file->GetLocation();
+    const std::string& profile_key = dex_file->GetLocation();
     uint32_t checksum = dex_file->GetLocationChecksum();
 
     uint32_t number_of_classes = dex_file->NumClassDefs();
     uint32_t classes_required_in_profile = (number_of_classes * class_percentage) / 100;
+
+    DexFileData* const data = info.GetOrAddDexFileData(
+          profile_key, checksum, dex_file->NumMethodIds());
     for (uint32_t class_index : create_shuffled_range(classes_required_in_profile,
                                                       number_of_classes)) {
-      info.AddClassIndex(location,
-                         checksum,
-                         dex_file->GetClassDef(class_index).class_idx_,
-                         dex_file->NumMethodIds());
+      data->class_set.insert(dex_file->GetClassDef(class_index).class_idx_);
     }
 
     uint32_t number_of_methods = dex_file->NumMethodIds();
@@ -2046,8 +1888,7 @@
       flags |= ((method_index & 1) != 0)
                    ? MethodHotness::kFlagPostStartup
                    : MethodHotness::kFlagStartup;
-      info.AddMethodIndex(static_cast<MethodHotness::Flag>(flags),
-                          MethodReference(dex_file.get(), method_index));
+      data->AddMethod(static_cast<MethodHotness::Flag>(flags), method_index);
     }
   }
   return info.Save(fd);
@@ -2094,13 +1935,77 @@
   return true;
 }
 
+bool ProfileCompilationInfo::OfflineProfileMethodInfo::operator==(
+      const std::vector<ProfileMethodInfo::ProfileInlineCache>& runtime_caches) const {
+  if (inline_caches->size() != runtime_caches.size()) {
+    return false;
+  }
+
+  for (const auto& inline_cache_it : *inline_caches) {
+    uint16_t dex_pc = inline_cache_it.first;
+    const DexPcData dex_pc_data = inline_cache_it.second;
+
+    // Find the corresponding inline cahce.
+    const ProfileMethodInfo::ProfileInlineCache* runtime_cache = nullptr;
+    for (const ProfileMethodInfo::ProfileInlineCache& pic : runtime_caches) {
+      if (pic.dex_pc == dex_pc) {
+        runtime_cache = &pic;
+        break;
+      }
+    }
+    // If not found, returnb false.
+    if (runtime_cache == nullptr) {
+      return false;
+    }
+    // Check that the inline cache properties match up.
+    if (dex_pc_data.is_missing_types) {
+      if (!runtime_cache->is_missing_types) {
+        return false;
+      } else {
+        // If the inline cache is megamorphic do not check the classes (they don't matter).
+        continue;
+      }
+    }
+
+    if (dex_pc_data.is_megamorphic) {
+      if (runtime_cache->classes.size() < ProfileCompilationInfo::kIndividualInlineCacheSize) {
+        return false;
+      } else {
+        // If the inline cache is megamorphic do not check the classes (they don't matter).
+        continue;
+      }
+    }
+
+    if (dex_pc_data.classes.size() != runtime_cache->classes.size()) {
+      return false;
+    }
+    // Verify that all classes matches.
+    for (const ClassReference& class_ref : dex_pc_data.classes) {
+      bool found = false;
+      const DexReference& dex_ref = dex_references[class_ref.dex_profile_index];
+      for (const TypeReference& type_ref : runtime_cache->classes) {
+        if (class_ref.type_index == type_ref.TypeIndex() &&
+            dex_ref.MatchesDex(type_ref.dex_file)) {
+          found = true;
+          break;
+        }
+      }
+      if (!found) {
+        return false;
+      }
+    }
+  }
+  // If we didn't fail until now, then the two inline caches are equal.
+  return true;
+}
+
 bool ProfileCompilationInfo::IsEmpty() const {
   DCHECK_EQ(info_.empty(), profile_key_map_.empty());
   return info_.empty();
 }
 
 ProfileCompilationInfo::InlineCacheMap*
-ProfileCompilationInfo::DexFileData::FindOrAddMethod(uint16_t method_index) {
+ProfileCompilationInfo::DexFileData::FindOrAddHotMethod(uint16_t method_index) {
   if (method_index >= num_method_ids) {
     LOG(ERROR) << "Invalid method index " << method_index << ". num_method_ids=" << num_method_ids;
     return nullptr;
@@ -2120,7 +2025,7 @@
   SetMethodHotness(index, flags);
 
   if ((flags & MethodHotness::kFlagHot) != 0) {
-    ProfileCompilationInfo::InlineCacheMap* result = FindOrAddMethod(index);
+    ProfileCompilationInfo::InlineCacheMap* result = FindOrAddHotMethod(index);
     DCHECK(result != nullptr);
   }
   return true;
@@ -2129,22 +2034,36 @@
 void ProfileCompilationInfo::DexFileData::SetMethodHotness(size_t index,
                                                            MethodHotness::Flag flags) {
   DCHECK_LT(index, num_method_ids);
-  if ((flags & MethodHotness::kFlagStartup) != 0) {
-    method_bitmap.StoreBit(MethodBitIndex(/*startup=*/ true, index), /*value=*/ true);
-  }
-  if ((flags & MethodHotness::kFlagPostStartup) != 0) {
-    method_bitmap.StoreBit(MethodBitIndex(/*startup=*/ false, index), /*value=*/ true);
+  uint32_t lastFlag = is_for_boot_image
+      ? MethodHotness::kFlagLastBoot
+      : MethodHotness::kFlagLastRegular;
+  for (uint32_t flag = MethodHotness::kFlagFirst; flag <= lastFlag; flag = flag << 1) {
+    if (flag == MethodHotness::kFlagHot) {
+      // There's no bit for hotness in the bitmap.
+      // We store the hotness by recording the method in the method list.
+      continue;
+    }
+    if ((flags & flag) != 0) {
+      method_bitmap.StoreBit(MethodFlagBitmapIndex(
+          static_cast<MethodHotness::Flag>(flag), index), /*value=*/ true);
+    }
   }
 }
 
 ProfileCompilationInfo::MethodHotness ProfileCompilationInfo::DexFileData::GetHotnessInfo(
     uint32_t dex_method_index) const {
   MethodHotness ret;
-  if (method_bitmap.LoadBit(MethodBitIndex(/*startup=*/ true, dex_method_index))) {
-    ret.AddFlag(MethodHotness::kFlagStartup);
-  }
-  if (method_bitmap.LoadBit(MethodBitIndex(/*startup=*/ false, dex_method_index))) {
-    ret.AddFlag(MethodHotness::kFlagPostStartup);
+  uint32_t lastFlag = is_for_boot_image
+      ? MethodHotness::kFlagLastBoot
+      : MethodHotness::kFlagLastRegular;
+  for (uint32_t flag = MethodHotness::kFlagFirst; flag <= lastFlag; flag = flag << 1) {
+    if (flag == MethodHotness::kFlagHot) {
+      continue;
+    }
+    if (method_bitmap.LoadBit(MethodFlagBitmapIndex(
+          static_cast<MethodHotness::Flag>(flag), dex_method_index))) {
+      ret.AddFlag(static_cast<MethodHotness::Flag>(flag));
+    }
   }
   auto it = method_map.find(dex_method_index);
   if (it != method_map.end()) {
@@ -2154,41 +2073,41 @@
   return ret;
 }
 
-int32_t ProfileCompilationInfo::DexFileData::GetMethodAggregationCounter(
-      uint16_t method_idx) const {
-  CHECK_GT(method_counters.size(), method_idx) << "Profile not prepared for aggregation counters";
-  if (!GetHotnessInfo(method_idx).IsInProfile()) {
-    return -1;
-  }
+// To simplify the implementation we use the MethodHotness flag values as indexes into the internal
+// bitmap representation. As such, they should never change unless the profile version is updated
+// and the implementation changed accordingly.
+static_assert(ProfileCompilationInfo::MethodHotness::kFlagFirst == 1 << 0);
+static_assert(ProfileCompilationInfo::MethodHotness::kFlagHot == 1 << 0);
+static_assert(ProfileCompilationInfo::MethodHotness::kFlagStartup == 1 << 1);
+static_assert(ProfileCompilationInfo::MethodHotness::kFlagPostStartup == 1 << 2);
+static_assert(ProfileCompilationInfo::MethodHotness::kFlagLastRegular == 1 << 2);
+static_assert(ProfileCompilationInfo::MethodHotness::kFlag32bit == 1 << 3);
+static_assert(ProfileCompilationInfo::MethodHotness::kFlag64bit == 1 << 4);
+static_assert(ProfileCompilationInfo::MethodHotness::kFlagSensitiveThread == 1 << 5);
+static_assert(ProfileCompilationInfo::MethodHotness::kFlagAmStartup == 1 << 6);
+static_assert(ProfileCompilationInfo::MethodHotness::kFlagAmPostStartup == 1 << 7);
+static_assert(ProfileCompilationInfo::MethodHotness::kFlagBoot == 1 << 8);
+static_assert(ProfileCompilationInfo::MethodHotness::kFlagPostBoot == 1 << 9);
+static_assert(ProfileCompilationInfo::MethodHotness::kFlagStartupBin == 1 << 10);
+static_assert(ProfileCompilationInfo::MethodHotness::kFlagStartupMaxBin == 1 << 15);
+static_assert(ProfileCompilationInfo::MethodHotness::kFlagLastBoot == 1 << 15);
 
-  return method_counters[method_idx];
+size_t ProfileCompilationInfo::DexFileData::MethodFlagBitmapIndex(
+      MethodHotness::Flag flag, size_t method_index) const {
+  DCHECK_LT(method_index, num_method_ids);
+  // The format is [startup bitmap][post startup bitmap][AmStartup][...]
+  // This compresses better than ([startup bit][post startup bit])*
+  return method_index + FlagBitmapIndex(flag) * num_method_ids;
 }
 
-int32_t ProfileCompilationInfo::DexFileData::GetClassAggregationCounter(uint16_t type_idx) const {
-  CHECK_GT(class_counters.size(), type_idx) << "Profile not prepared for aggregation counters";
-  if (!ContainsClass(dex::TypeIndex(type_idx))) {
-    return -1;
-  }
-
-  return class_counters[type_idx];
-}
-
-int32_t ProfileCompilationInfo::GetMethodAggregationCounter(
-      const MethodReference& method_ref) const {
-  CHECK(StoresAggregationCounters()) << "Profile not prepared for aggregation counters";
-  const DexFileData* dex_data = FindDexData(method_ref.dex_file);
-  return dex_data == nullptr ? -1 : dex_data->GetMethodAggregationCounter(method_ref.index);
-}
-
-int32_t ProfileCompilationInfo::GetClassAggregationCounter(const TypeReference& type_ref) const {
-  CHECK(StoresAggregationCounters()) << "Profile not prepared for aggregation counters";
-  const DexFileData* dex_data = FindDexData(type_ref.dex_file);
-  return dex_data == nullptr ? -1 : dex_data->GetClassAggregationCounter(type_ref.index);
-}
-
-uint16_t ProfileCompilationInfo::GetAggregationCounter() const {
-  CHECK(StoresAggregationCounters()) << "Profile not prepared for aggregation counters";
-  return aggregation_count_;
+size_t ProfileCompilationInfo::DexFileData::FlagBitmapIndex(MethodHotness::Flag flag) {
+  DCHECK(flag != MethodHotness::kFlagHot);
+  DCHECK(IsPowerOfTwo(static_cast<uint32_t>(flag)));
+  // We arrange the method flags in order, starting with the startup flag.
+  // The kFlagHot is not encoded in the bitmap and thus not expected as an
+  // argument here. Since all the other flags start at 1 we have to subtract
+  // one for the power of 2.
+  return WhichPowerOf2(static_cast<uint32_t>(flag)) - 1;
 }
 
 ProfileCompilationInfo::DexPcData*
@@ -2197,10 +2116,11 @@
 }
 
 HashSet<std::string> ProfileCompilationInfo::GetClassDescriptors(
-    const std::vector<const DexFile*>& dex_files) {
+    const std::vector<const DexFile*>& dex_files,
+    const ProfileSampleAnnotation& annotation) {
   HashSet<std::string> ret;
   for (const DexFile* dex_file : dex_files) {
-    const DexFileData* data = FindDexData(dex_file);
+    const DexFileData* data = FindDexDataUsingAnnotations(dex_file, annotation);
     if (data != nullptr) {
       for (dex::TypeIndex type_idx : data->class_set) {
         if (!dex_file->IsTypeIndexValid(type_idx)) {
@@ -2255,8 +2175,9 @@
     for (DexFileData* dex_data : info_) {
       if (dex_data->checksum == dex_file->GetLocationChecksum()
           && dex_data->num_method_ids == dex_file->NumMethodIds()) {
-        std::string new_profile_key = GetProfileDexFileKey(dex_file->GetLocation());
-        if (dex_data->profile_key != new_profile_key) {
+        std::string new_profile_key = GetProfileDexFileBaseKey(dex_file->GetLocation());
+        std::string dex_data_base_key = GetBaseKeyFromAugmentedKey(dex_data->profile_key);
+        if (dex_data_base_key != new_profile_key) {
           if (profile_key_map_.find(new_profile_key) != profile_key_map_.end()) {
             // We can't update the key if the new key belongs to a different dex file.
             LOG(ERROR) << "Cannot update profile key to " << new_profile_key
@@ -2264,7 +2185,10 @@
             return false;
           }
           profile_key_map_.erase(dex_data->profile_key);
-          profile_key_map_.Put(new_profile_key, dex_data->profile_index);
+          // Retain the annotation (if any) during the renaming by re-attaching the info
+          // form the old key.
+          profile_key_map_.Put(MigrateAnnotationInfo(new_profile_key, dex_data->profile_key),
+                               dex_data->profile_index);
           dex_data->profile_key = new_profile_key;
         }
       }
@@ -2287,46 +2211,185 @@
   profile_key_map_.clear();
 }
 
-bool ProfileCompilationInfo::StoresAggregationCounters() const {
-  return memcmp(version_, kProfileVersionWithCounters, sizeof(kProfileVersionWithCounters)) == 0;
+void ProfileCompilationInfo::ClearDataAndAdjustVersion(bool for_boot_image) {
+  ClearData();
+  memcpy(version_,
+         for_boot_image ? kProfileVersionForBootImage : kProfileVersion,
+         kProfileVersionSize);
 }
 
-void ProfileCompilationInfo::PrepareForAggregationCounters() {
-  InitProfileVersionInternal(kProfileVersionWithCounters);
-  for (DexFileData* dex_data : info_) {
-    dex_data->PrepareForAggregationCounters();
-  }
-}
-
-void ProfileCompilationInfo::DexFileData::PrepareForAggregationCounters() {
-  method_counters.resize(num_method_ids);
-  // TODO(calin): we should store the maximum number of types in the profile.
-  // It will simplify quite a few things and make this storage allocation
-  // more efficient.
-  size_t max_elems = 1 << (kBitsPerByte * sizeof(uint16_t));
-  class_counters.resize(max_elems);
+bool ProfileCompilationInfo::IsForBootImage() const {
+  return memcmp(version_, kProfileVersionForBootImage, sizeof(kProfileVersionForBootImage)) == 0;
 }
 
 const uint8_t* ProfileCompilationInfo::GetVersion() const {
   return version_;
 }
 
-void ProfileCompilationInfo::InitProfileVersionInternal(const uint8_t version[]) {
-  CHECK(
-      (memcmp(version, kProfileVersion, kProfileVersionSize) == 0) ||
-      (memcmp(version, kProfileVersionWithCounters, kProfileVersionSize) == 0));
-  memcpy(version_, version, kProfileVersionSize);
-}
-
-uint16_t ProfileCompilationInfo::DexFileData::GetNumMethodCounters() const {
-  uint16_t num_method_counters = 0;
-  for (uint16_t method_idx = 0; method_idx < num_method_ids; method_idx++) {
-    num_method_counters += GetHotnessInfo(method_idx).IsInProfile() ? 1 : 0;
-  }
-  return num_method_counters;
-}
-
 bool ProfileCompilationInfo::DexFileData::ContainsClass(const dex::TypeIndex type_index) const {
   return class_set.find(type_index) != class_set.end();
 }
+
+size_t ProfileCompilationInfo::GetSizeWarningThresholdBytes() const {
+  return IsForBootImage() ?  kSizeWarningThresholdBootBytes : kSizeWarningThresholdBytes;
+}
+
+size_t ProfileCompilationInfo::GetSizeErrorThresholdBytes() const {
+  return IsForBootImage() ?  kSizeErrorThresholdBootBytes : kSizeErrorThresholdBytes;
+}
+
+std::ostream& operator<<(std::ostream& stream,
+                         const ProfileCompilationInfo::DexReference& dex_ref) {
+  stream << "[profile_key=" << dex_ref.profile_key
+         << ",dex_checksum=" << std::hex << dex_ref.dex_checksum << std::dec
+         << ",num_method_ids=" << dex_ref.num_method_ids
+         << "]";
+  return stream;
+}
+
+bool ProfileCompilationInfo::ProfileSampleAnnotation::operator==(
+      const ProfileSampleAnnotation& other) const {
+  return origin_package_name_ == other.origin_package_name_;
+}
+
+void ProfileCompilationInfo::WriteProfileIndex(
+    std::vector<uint8_t>* buffer, ProfileIndexType value) const {
+  if (IsForBootImage()) {
+    AddUintToBuffer(buffer, value);
+  } else {
+    AddUintToBuffer(buffer, static_cast<ProfileIndexTypeRegular>(value));
+  }
+}
+
+bool ProfileCompilationInfo::ReadProfileIndex(
+    SafeBuffer& safe_buffer, ProfileIndexType* value) const {
+  if (IsForBootImage()) {
+    return safe_buffer.ReadUintAndAdvance<ProfileIndexType>(value);
+  } else {
+    ProfileIndexTypeRegular out;
+    bool result = safe_buffer.ReadUintAndAdvance<ProfileIndexTypeRegular>(&out);
+    *value = out;
+    return result;
+  }
+}
+
+ProfileCompilationInfo::ProfileIndexType ProfileCompilationInfo::MaxProfileIndex() const {
+  return IsForBootImage()
+      ? std::numeric_limits<ProfileIndexType>::max()
+      : std::numeric_limits<ProfileIndexTypeRegular>::max();
+}
+
+uint32_t ProfileCompilationInfo::SizeOfProfileIndexType() const {
+  return IsForBootImage()
+    ? sizeof(ProfileIndexType)
+    : sizeof(ProfileIndexTypeRegular);
+}
+
+FlattenProfileData::FlattenProfileData() :
+    max_aggregation_for_methods_(0),
+    max_aggregation_for_classes_(0) {
+}
+
+FlattenProfileData::ItemMetadata::ItemMetadata() :
+    flags_(0) {
+}
+
+FlattenProfileData::ItemMetadata::ItemMetadata(const ItemMetadata& other) :
+    flags_(other.flags_),
+    annotations_(other.annotations_) {
+}
+
+std::unique_ptr<FlattenProfileData> ProfileCompilationInfo::ExtractProfileData(
+    const std::vector<std::unique_ptr<const DexFile>>& dex_files) const {
+
+  std::unique_ptr<FlattenProfileData> result(new FlattenProfileData());
+
+  auto create_metadata_fn = []() { return FlattenProfileData::ItemMetadata(); };
+
+  // Iterate through all the dex files, find the methods/classes associated with each of them,
+  // and add them to the flatten result.
+  for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
+    // Find all the dex data for the given dex file.
+    // We may have multiple dex data if the methods or classes were added using
+    // different annotations.
+    std::vector<const DexFileData*> all_dex_data;
+    FindAllDexData(dex_file.get(), &all_dex_data);
+    for (const DexFileData* dex_data : all_dex_data) {
+      // Extract the annotation from the key as we want to store it in the flatten result.
+      ProfileSampleAnnotation annotation = GetAnnotationFromKey(dex_data->profile_key);
+
+      // Check which methods from the current dex files are in the profile.
+      for (uint32_t method_idx = 0; method_idx < dex_data->num_method_ids; ++method_idx) {
+        MethodHotness hotness = dex_data->GetHotnessInfo(method_idx);
+        if (!hotness.IsInProfile()) {
+          // Not in the profile, continue.
+          continue;
+        }
+        // The method is in the profile, create metadata item for it and added to the result.
+        MethodReference ref(dex_file.get(), method_idx);
+        FlattenProfileData::ItemMetadata& metadata =
+            result->method_metadata_.GetOrCreate(ref, create_metadata_fn);
+        metadata.flags_ |= hotness.flags_;
+        metadata.annotations_.push_back(annotation);
+        // Update the max aggregation counter for methods.
+        // This is essentially a cache, to avoid traversing all the methods just to find out
+        // this value.
+        result->max_aggregation_for_methods_ = std::max(
+            result->max_aggregation_for_methods_,
+            static_cast<uint32_t>(metadata.annotations_.size()));
+      }
+
+      // Check which classes from the current dex files are in the profile.
+      for (const dex::TypeIndex& type_index : dex_data->class_set) {
+        TypeReference ref(dex_file.get(), type_index);
+        FlattenProfileData::ItemMetadata& metadata =
+            result->class_metadata_.GetOrCreate(ref, create_metadata_fn);
+        metadata.annotations_.push_back(annotation);
+        // Update the max aggregation counter for classes.
+        result->max_aggregation_for_classes_ = std::max(
+            result->max_aggregation_for_classes_,
+            static_cast<uint32_t>(metadata.annotations_.size()));
+      }
+    }
+  }
+
+  return result;
+}
+
+void FlattenProfileData::MergeData(const FlattenProfileData& other) {
+  auto create_metadata_fn = []() { return FlattenProfileData::ItemMetadata(); };
+  for (const auto& it : other.method_metadata_) {
+    const MethodReference& otherRef = it.first;
+    const FlattenProfileData::ItemMetadata otherData = it.second;
+    const std::list<ProfileCompilationInfo::ProfileSampleAnnotation>& other_annotations =
+        otherData.GetAnnotations();
+
+    FlattenProfileData::ItemMetadata& metadata =
+        method_metadata_.GetOrCreate(otherRef, create_metadata_fn);
+    metadata.flags_ |= otherData.GetFlags();
+    metadata.annotations_.insert(
+        metadata.annotations_.end(), other_annotations.begin(), other_annotations.end());
+
+    max_aggregation_for_methods_ = std::max(
+          max_aggregation_for_methods_,
+          static_cast<uint32_t>(metadata.annotations_.size()));
+  }
+  for (const auto& it : other.class_metadata_) {
+    const TypeReference& otherRef = it.first;
+    const FlattenProfileData::ItemMetadata otherData = it.second;
+    const std::list<ProfileCompilationInfo::ProfileSampleAnnotation>& other_annotations =
+        otherData.GetAnnotations();
+
+    FlattenProfileData::ItemMetadata& metadata =
+        class_metadata_.GetOrCreate(otherRef, create_metadata_fn);
+    metadata.flags_ |= otherData.GetFlags();
+    metadata.annotations_.insert(
+        metadata.annotations_.end(), other_annotations.begin(), other_annotations.end());
+
+    max_aggregation_for_classes_ = std::max(
+          max_aggregation_for_classes_,
+          static_cast<uint32_t>(metadata.annotations_.size()));
+  }
+}
+
 }  // namespace art
diff --git a/libprofile/profile/profile_compilation_info.h b/libprofile/profile/profile_compilation_info.h
index fa4615b..0b0c423 100644
--- a/libprofile/profile/profile_compilation_info.h
+++ b/libprofile/profile/profile_compilation_info.h
@@ -17,6 +17,7 @@
 #ifndef ART_LIBPROFILE_PROFILE_PROFILE_COMPILATION_INFO_H_
 #define ART_LIBPROFILE_PROFILE_PROFILE_COMPILATION_INFO_H_
 
+#include <list>
 #include <set>
 #include <vector>
 
@@ -28,7 +29,6 @@
 #include "base/malloc_arena_pool.h"
 #include "base/mem_map.h"
 #include "base/safe_map.h"
-#include "dex/dex_cache_resolved_classes.h"
 #include "dex/dex_file.h"
 #include "dex/dex_file_types.h"
 #include "dex/method_reference.h"
@@ -62,6 +62,8 @@
   std::vector<ProfileInlineCache> inline_caches;
 };
 
+class FlattenProfileData;
+
 /**
  * Profile information in a format suitable to be queried by the compiler and
  * performing profile guided compilation.
@@ -73,7 +75,7 @@
  public:
   static const uint8_t kProfileMagic[];
   static const uint8_t kProfileVersion[];
-  static const uint8_t kProfileVersionWithCounters[];
+  static const uint8_t kProfileVersionForBootImage[];
   static const char kDexMetadataProfileEntry[];
 
   static constexpr size_t kProfileVersionSize = 4;
@@ -83,29 +85,44 @@
   // This is exposed as public in order to make it available to dex2oat compilations
   // (see compiler/optimizing/inliner.cc).
 
-  // A dex location together with its checksum.
+  // A profile reference to the dex file (profile key, dex checksum and number of methods).
   struct DexReference {
     DexReference() : dex_checksum(0), num_method_ids(0) {}
 
-    DexReference(const std::string& location, uint32_t checksum, uint32_t num_methods)
-        : dex_location(location), dex_checksum(checksum), num_method_ids(num_methods) {}
+    DexReference(const std::string& key, uint32_t checksum, uint32_t num_methods)
+        : profile_key(key), dex_checksum(checksum), num_method_ids(num_methods) {}
 
     bool operator==(const DexReference& other) const {
       return dex_checksum == other.dex_checksum &&
-          dex_location == other.dex_location &&
+          profile_key == other.profile_key &&
           num_method_ids == other.num_method_ids;
     }
 
     bool MatchesDex(const DexFile* dex_file) const {
       return dex_checksum == dex_file->GetLocationChecksum() &&
-           dex_location == GetProfileDexFileKey(dex_file->GetLocation());
+           profile_key == GetProfileDexFileBaseKey(dex_file->GetLocation());
     }
 
-    std::string dex_location;
+    std::string profile_key;
     uint32_t dex_checksum;
     uint32_t num_method_ids;
   };
 
+  // The types used to manipulate the profile index of dex files.
+  // They set an upper limit to how many dex files a given profile can recored.
+  //
+  // Boot profiles have more needs than regular profiles as they contain data from
+  // many apps merged together. As such they set the default type for data manipulation.
+  //
+  // Regular profiles don't record a lot of dex files, and use a smaller data type
+  // in order to save disk and ram.
+  //
+  // In-memory all profiles will use ProfileIndexType to represent the indices. However,
+  // when serialized, the profile type (boot or regular) will determine which data type
+  // is used to write the data.
+  using ProfileIndexType = uint16_t;
+  using ProfileIndexTypeRegular = uint8_t;
+
   // Encodes a class reference in the profile.
   // The owning dex file is encoded as the index (dex_profile_index) it has in the
   // profile rather than as a full DexRefence(location,checksum).
@@ -118,7 +135,7 @@
   // data from multiple splits. This means that a profile may contain a classes2.dex from split-A
   // and one from split-B.
   struct ClassReference : public ValueObject {
-    ClassReference(uint8_t dex_profile_idx, const dex::TypeIndex type_idx) :
+    ClassReference(ProfileIndexType dex_profile_idx, const dex::TypeIndex type_idx) :
       dex_profile_index(dex_profile_idx), type_index(type_idx) {}
 
     bool operator==(const ClassReference& other) const {
@@ -130,7 +147,7 @@
           : dex_profile_index < other.dex_profile_index;
     }
 
-    uint8_t dex_profile_index;  // the index of the owning dex in the profile info
+    ProfileIndexType dex_profile_index;  // the index of the owning dex in the profile info
     dex::TypeIndex type_index;  // the type index of the class
   };
 
@@ -181,9 +198,41 @@
   class MethodHotness {
    public:
     enum Flag {
-      kFlagHot = 0x1,
-      kFlagStartup = 0x2,
-      kFlagPostStartup = 0x4,
+      // Marker flag used to simplify iterations.
+      kFlagFirst = 1 << 0,
+      // The method is profile-hot (this is implementation specific, e.g. equivalent to JIT-warm)
+      kFlagHot = 1 << 0,
+      // Executed during the app startup as determined by the runtime.
+      kFlagStartup = 1 << 1,
+      // Executed after app startup as determined by the runtime.
+      kFlagPostStartup = 1 << 2,
+      // Marker flag used to simplify iterations.
+      kFlagLastRegular = 1 << 2,
+      // Executed by a 32bit process.
+      kFlag32bit = 1 << 3,
+      // Executed by a 64bit process.
+      kFlag64bit = 1 << 4,
+      // Executed on sensitive thread (e.g. UI).
+      kFlagSensitiveThread = 1 << 5,
+      // Executed during the app startup as determined by the framework (equivalent to am start).
+      kFlagAmStartup = 1 << 6,
+      // Executed after the app startup as determined by the framework (equivalent to am start).
+      kFlagAmPostStartup = 1 << 7,
+      // Executed during system boot.
+      kFlagBoot = 1 << 8,
+      // Executed after the system has booted.
+      kFlagPostBoot = 1 << 9,
+
+      // The startup bins captured the relative order of when a method become hot. There are 6
+      // total bins supported and each hot method will have at least one bit set. If the profile was
+      // merged multiple times more than one bit may be set as a given method may become hot at
+      // various times during subsequent executions.
+      // The granularity of the bins is unspecified (i.e. the runtime is free to change the
+      // values it uses - this may be 100ms, 200ms etc...).
+      kFlagStartupBin = 1 << 10,
+      kFlagStartupMaxBin = 1 << 15,
+      // Marker flag used to simplify iterations.
+      kFlagLastBoot = 1 << 15,
     };
 
     bool IsHot() const {
@@ -202,17 +251,21 @@
       flags_ |= flag;
     }
 
-    uint8_t GetFlags() const {
+    uint32_t GetFlags() const {
       return flags_;
     }
 
+    bool HasFlagSet(MethodHotness::Flag flag) {
+      return (flags_ & flag ) != 0;
+    }
+
     bool IsInProfile() const {
       return flags_ != 0;
     }
 
    private:
     const InlineCacheMap* inline_cache_map_ = nullptr;
-    uint8_t flags_ = 0;
+    uint32_t flags_ = 0;
 
     const InlineCacheMap* GetInlineCacheMap() const {
       return inline_cache_map_;
@@ -234,65 +287,94 @@
         : inline_caches(inline_cache_map) {}
 
     bool operator==(const OfflineProfileMethodInfo& other) const;
+    // Checks that this offline representation of inline caches matches the runtime view of the
+    // data.
+    bool operator==(const std::vector<ProfileMethodInfo::ProfileInlineCache>& other) const;
 
     const InlineCacheMap* const inline_caches;
     std::vector<DexReference> dex_references;
   };
 
+  // Encapsulates metadata that can be associated with the methods and classes added to the profile.
+  // The additional metadata is serialized in the profile and becomes part of the profile key
+  // representation. It can be used to differentiate the samples that are added to the profile
+  // based on the supported criteria (e.g. keep track of which app generated what sample when
+  // constructing a boot profile.).
+  class ProfileSampleAnnotation {
+   public:
+    explicit ProfileSampleAnnotation(const std::string& package_name) :
+        origin_package_name_(package_name) {}
+
+    const std::string& GetOriginPackageName() const { return origin_package_name_; }
+
+    bool operator==(const ProfileSampleAnnotation& other) const;
+
+    bool operator<(const ProfileSampleAnnotation& other) const {
+      return origin_package_name_ < other.origin_package_name_;
+    }
+
+    // A convenient empty annotation object that can be used to denote that no annotation should
+    // be associated with the profile samples.
+    static const ProfileSampleAnnotation kNone;
+
+   private:
+    // The name of the package that generated the samples.
+    const std::string origin_package_name_;
+  };
+
   // Public methods to create, extend or query the profile.
   ProfileCompilationInfo();
+  explicit ProfileCompilationInfo(bool for_boot_image);
   explicit ProfileCompilationInfo(ArenaPool* arena_pool);
+  ProfileCompilationInfo(ArenaPool* arena_pool, bool for_boot_image);
 
   ~ProfileCompilationInfo();
 
   // Add the given methods to the current profile object.
-  bool AddMethods(const std::vector<ProfileMethodInfo>& methods, MethodHotness::Flag flags);
-
-  // Add the given classes to the current profile object.
-  bool AddClasses(const std::set<DexCacheResolvedClasses>& resolved_classes);
+  //
+  // Note: if an annotation is provided, the methods/classes will be associated with the group
+  // (dex_file, sample_annotation). Each group keeps its unique set of methods/classes.
+  bool AddMethods(const std::vector<ProfileMethodInfo>& methods,
+                  MethodHotness::Flag flags,
+                  const ProfileSampleAnnotation& annotation = ProfileSampleAnnotation::kNone);
 
   // Add multiple type ids for classes in a single dex file. Iterator is for type_ids not
   // class_defs.
+  //
+  // Note: see AddMethods docs for the handling of annotations.
   template <class Iterator>
-  bool AddClassesForDex(const DexFile* dex_file, Iterator index_begin, Iterator index_end) {
-    DexFileData* data = GetOrAddDexFileData(dex_file);
+  bool AddClassesForDex(
+      const DexFile* dex_file,
+      Iterator index_begin,
+      Iterator index_end,
+      const ProfileSampleAnnotation& annotation = ProfileSampleAnnotation::kNone) {
+    DexFileData* data = GetOrAddDexFileData(dex_file, annotation);
     if (data == nullptr) {
       return false;
     }
     data->class_set.insert(index_begin, index_end);
     return true;
   }
-  // Add a single type id for a dex file.
-  bool AddClassForDex(const TypeReference& ref) {
-    DexFileData* data = GetOrAddDexFileData(ref.dex_file);
-    if (data == nullptr) {
-      return false;
-    }
-    data->class_set.insert(ref.TypeIndex());
-    return true;
-  }
-
-
-  // Add a method index to the profile (without inline caches). The method flags determine if it is
-  // hot, startup, or post startup, or a combination of the previous.
-  bool AddMethodIndex(MethodHotness::Flag flags,
-                      const std::string& dex_location,
-                      uint32_t checksum,
-                      uint16_t method_idx,
-                      uint32_t num_method_ids);
-  bool AddMethodIndex(MethodHotness::Flag flags, const MethodReference& ref);
 
   // Add a method to the profile using its online representation (containing runtime structures).
-  bool AddMethod(const ProfileMethodInfo& pmi, MethodHotness::Flag flags);
+  //
+  // Note: see AddMethods docs for the handling of annotations.
+  bool AddMethod(const ProfileMethodInfo& pmi,
+                 MethodHotness::Flag flags,
+                 const ProfileSampleAnnotation& annotation = ProfileSampleAnnotation::kNone);
 
   // Bulk add sampled methods and/or hot methods for a single dex, fast since it only has one
   // GetOrAddDexFileData call.
+  //
+  // Note: see AddMethods docs for the handling of annotations.
   template <class Iterator>
-  bool AddMethodsForDex(MethodHotness::Flag flags,
-                        const DexFile* dex_file,
-                        Iterator index_begin,
-                        Iterator index_end) {
-    DexFileData* data = GetOrAddDexFileData(dex_file);
+  bool AddMethodsForDex(
+      MethodHotness::Flag flags,
+      const DexFile* dex_file,
+      Iterator index_begin,
+      Iterator index_end,
+      const ProfileSampleAnnotation& annotation = ProfileSampleAnnotation::kNone) {
+    DexFileData* data = GetOrAddDexFileData(dex_file, annotation);
     if (data == nullptr) {
       return false;
     }
@@ -305,9 +387,6 @@
     return true;
   }
 
-  // Add hotness flags for a simple method.
-  bool AddMethodHotness(const MethodReference& method_ref, const MethodHotness& hotness);
-
   // Load or Merge profile information from the given file descriptor.
   // If the current profile is non-empty the load will fail.
   // If merge_classes is set to false, classes will not be merged/loaded.
@@ -361,21 +440,35 @@
   uint32_t GetNumberOfResolvedClasses() const;
 
   // Returns the profile method info for a given method reference.
-  MethodHotness GetMethodHotness(const MethodReference& method_ref) const;
-  MethodHotness GetMethodHotness(const std::string& dex_location,
-                                 uint32_t dex_checksum,
-                                 uint16_t dex_method_index) const;
+  //
+  // Note that if the profile was built with annotations, the same dex file may be
+  // represented multiple times in the profile (due to different annotation associated with it).
+  // If so, and if no annotation is passed to this method, then only the first dex file is searched.
+  //
+  // Implementation details: It is suitable to pass kNone for regular profile guided compilation
+  // because during compilation we generally don't care about annotations. The metadata is
+  // useful for boot profiles which need the extra information.
+  MethodHotness GetMethodHotness(
+      const MethodReference& method_ref,
+      const ProfileSampleAnnotation& annotation = ProfileSampleAnnotation::kNone) const;
 
   // Return true if the class's type is present in the profiling info.
-  bool ContainsClass(const DexFile& dex_file, dex::TypeIndex type_idx) const;
+  //
+  // Note: see GetMethodHotness docs for the handling of annotations.
+  bool ContainsClass(
+      const DexFile& dex_file,
+      dex::TypeIndex type_idx,
+      const ProfileSampleAnnotation& annotation = ProfileSampleAnnotation::kNone) const;
 
-  // Return the method data for the given location and index from the profiling info.
+  // Return the hot method info for the given location and index from the profiling info.
   // If the method index is not found or the checksum doesn't match, null is returned.
   // Note: the inline cache map is a pointer to the map stored in the profile and
   // its allocation will go away if the profile goes out of scope.
-  std::unique_ptr<OfflineProfileMethodInfo> GetMethod(const std::string& dex_location,
-                                                      uint32_t dex_checksum,
-                                                      uint16_t dex_method_index) const;
+  //
+  // Note: see GetMethodHotness docs for the handling of annotations.
+  std::unique_ptr<OfflineProfileMethodInfo> GetHotMethodInfo(
+      const MethodReference& method_ref,
+      const ProfileSampleAnnotation& annotation = ProfileSampleAnnotation::kNone) const;
 
   // Dump all the loaded profile info into a string and returns it.
   // If dex_files is not empty then the method indices will be resolved to their
@@ -387,21 +480,34 @@
   // Return the classes and methods for a given dex file through out args. The out args are the set
   // of class as well as the methods and their associated inline caches. Returns true if the dex
   // file is register and has a matching checksum, false otherwise.
-  bool GetClassesAndMethods(const DexFile& dex_file,
-                            /*out*/std::set<dex::TypeIndex>* class_set,
-                            /*out*/std::set<uint16_t>* hot_method_set,
-                            /*out*/std::set<uint16_t>* startup_method_set,
-                            /*out*/std::set<uint16_t>* post_startup_method_method_set) const;
+  //
+  // Note: see GetMethodHotness docs for the handling of annotations.
+  bool GetClassesAndMethods(
+      const DexFile& dex_file,
+      /*out*/std::set<dex::TypeIndex>* class_set,
+      /*out*/std::set<uint16_t>* hot_method_set,
+      /*out*/std::set<uint16_t>* startup_method_set,
+      /*out*/std::set<uint16_t>* post_startup_method_method_set,
+      const ProfileSampleAnnotation& annotation = ProfileSampleAnnotation::kNone) const;
+
+  // Returns true iff both profiles have the same version.
+  bool SameVersion(const ProfileCompilationInfo& other) const;
 
   // Perform an equality test with the `other` profile information.
   bool Equals(const ProfileCompilationInfo& other);
 
-  // Return the class descriptors for all of the classes in the profiles' class sets.
-  std::set<DexCacheResolvedClasses> GetResolvedClasses(
-      const std::vector<const DexFile*>& dex_files_) const;
+  // Return the base profile key associated with the given dex location. The base profile key
+  // is solely constructed based on the dex location (as opposed to the one produced by
+  // GetProfileDexFileAugmentedKey which may include additional metadata like the origin
+  // package name)
+  static std::string GetProfileDexFileBaseKey(const std::string& dex_location);
 
-  // Return the profile key associated with the given dex location.
-  static std::string GetProfileDexFileKey(const std::string& dex_location);
+  // Returns a base key without the annotation information.
+  static std::string GetBaseKeyFromAugmentedKey(const std::string& profile_key);
+
+  // Returns the annotations from an augmented key.
+  // If the key is a base key it return ProfileSampleAnnotation::kNone.
+  static ProfileSampleAnnotation GetAnnotationFromKey(const std::string& augmented_key);
 
   // Generate a test profile which will contain a percentage of the total maximum
   // number of methods and classes (method_ratio and class_ratio).
@@ -426,7 +532,10 @@
   ArenaAllocator* GetAllocator() { return &allocator_; }
 
   // Return all of the class descriptors in the profile for a set of dex files.
-  HashSet<std::string> GetClassDescriptors(const std::vector<const DexFile*>& dex_files);
+  // Note: see GetMethodHotness docs for the handling of annotations..
+  HashSet<std::string> GetClassDescriptors(
+      const std::vector<const DexFile*>& dex_files,
+      const ProfileSampleAnnotation& annotation = ProfileSampleAnnotation::kNone);
 
   // Return true if the fd points to a profile file.
   bool IsProfileFile(int fd);
@@ -448,6 +557,9 @@
   // Clears all the data from the profile.
   void ClearData();
 
+  // Clears all the data from the profile and adjust the object version.
+  void ClearDataAndAdjustVersion(bool for_boot_image);
+
   // Prepare the profile to store aggregation counters.
   // This will change the profile version and allocate extra storage for the counters.
   // It allocates 2 bytes for every possible method and class, so do not use in performance
@@ -455,23 +567,20 @@
   void PrepareForAggregationCounters();
 
   // Returns true if the profile is configured to store aggregation counters.
-  bool StoresAggregationCounters() const;
-
-  // Returns the aggregation counter for the given method.
-  // Returns -1 if the method is not in the profile.
-  // CHECKs that the profile is configured to store aggregations counters.
-  int32_t GetMethodAggregationCounter(const MethodReference& method_ref) const;
-  // Returns the aggregation counter for the given class.
-  // Returns -1 if the class is not in the profile.
-  // CHECKs that the profile is configured to store aggregations counters.
-  int32_t GetClassAggregationCounter(const TypeReference& type_ref) const;
-  // Returns the number of times the profile was merged.
-  // CHECKs that the profile is configured to store aggregations counters.
-  uint16_t GetAggregationCounter() const;
+  bool IsForBootImage() const;
 
   // Return the version of this profile.
   const uint8_t* GetVersion() const;
 
+  // Extracts the data that the profile has on the given dex files:
+  //  - for each method and class, a list of the corresponding annotations and flags
+  //  - the maximum number of aggregations for classes and classes across dex files with different
+  //    annotations (essentially this sums up how many different packages used the corresponding
+  //    method). This information is reconstructible from the other two pieces of info, but it's
+  //    convenient to have it precomputed.
+  std::unique_ptr<FlattenProfileData> ExtractProfileData(
+      const std::vector<std::unique_ptr<const DexFile>>& dex_files) const;
+
  private:
   enum ProfileLoadStatus {
     kProfileLoadWouldOverwiteData,
@@ -481,9 +590,6 @@
     kProfileLoadSuccess
   };
 
-  const uint32_t kProfileSizeWarningThresholdInBytes = 500000U;
-  const uint32_t kProfileSizeErrorThresholdInBytes = 1000000U;
-
   // Internal representation of the profile information belonging to a dex file.
   // Note that we could do without profile_key (the key used to encode the dex
   // file in the profile) and profile_index (the index of the dex file in the
@@ -496,7 +602,7 @@
                 uint32_t location_checksum,
                 uint16_t index,
                 uint32_t num_methods,
-                bool store_aggregation_counters)
+                bool for_boot_image)
         : allocator_(allocator),
           profile_key(key),
           profile_index(index),
@@ -505,24 +611,27 @@
           class_set(std::less<dex::TypeIndex>(), allocator->Adapter(kArenaAllocProfile)),
           num_method_ids(num_methods),
           bitmap_storage(allocator->Adapter(kArenaAllocProfile)),
-          method_counters(allocator->Adapter(kArenaAllocProfile)),
-          class_counters(allocator->Adapter(kArenaAllocProfile)) {
-      bitmap_storage.resize(ComputeBitmapStorage(num_method_ids));
+          is_for_boot_image(for_boot_image) {
+      bitmap_storage.resize(ComputeBitmapStorage(is_for_boot_image, num_method_ids));
       if (!bitmap_storage.empty()) {
         method_bitmap =
             BitMemoryRegion(MemoryRegion(
-                &bitmap_storage[0], bitmap_storage.size()), 0, ComputeBitmapBits(num_method_ids));
-      }
-      if (store_aggregation_counters) {
-        PrepareForAggregationCounters();
+                &bitmap_storage[0],
+                bitmap_storage.size()),
+                0,
+                ComputeBitmapBits(is_for_boot_image, num_method_ids));
       }
     }
 
-    static size_t ComputeBitmapBits(uint32_t num_method_ids) {
-      return num_method_ids * kBitmapIndexCount;
+    static size_t ComputeBitmapBits(bool is_for_boot_image, uint32_t num_method_ids) {
+      size_t flag_bitmap_index = FlagBitmapIndex(is_for_boot_image
+          ? MethodHotness::kFlagLastBoot
+          : MethodHotness::kFlagLastRegular);
+      return num_method_ids * (flag_bitmap_index + 1);
     }
-    static size_t ComputeBitmapStorage(uint32_t num_method_ids) {
-      return RoundUp(ComputeBitmapBits(num_method_ids), kBitsPerByte) / kBitsPerByte;
+    static size_t ComputeBitmapStorage(bool is_for_boot_image, uint32_t num_method_ids) {
+      return RoundUp(ComputeBitmapBits(is_for_boot_image, num_method_ids), kBitsPerByte) /
+          kBitsPerByte;
     }
 
     bool operator==(const DexFileData& other) const {
@@ -530,9 +639,7 @@
           num_method_ids == other.num_method_ids &&
           method_map == other.method_map &&
           class_set == other.class_set &&
-          (BitMemoryRegion::Compare(method_bitmap, other.method_bitmap) == 0) &&
-          class_counters == other.class_counters &&
-          method_counters == other.method_counters;
+          (BitMemoryRegion::Compare(method_bitmap, other.method_bitmap) == 0);
     }
 
     // Mark a method as executed at least once.
@@ -547,12 +654,6 @@
 
     void SetMethodHotness(size_t index, MethodHotness::Flag flags);
     MethodHotness GetHotnessInfo(uint32_t dex_method_index) const;
-    void PrepareForAggregationCounters();
-
-    int32_t GetMethodAggregationCounter(uint16_t method_index) const;
-    int32_t GetClassAggregationCounter(uint16_t type_index) const;
-
-    uint16_t GetNumMethodCounters() const;
 
     bool ContainsClass(const dex::TypeIndex type_index) const;
 
@@ -561,7 +662,7 @@
     // The profile key this data belongs to.
     std::string profile_key;
     // The profile index of this dex file (matches ClassReference#dex_profile_index).
-    uint8_t profile_index;
+    ProfileIndexType profile_index;
     // The dex checksum.
     uint32_t checksum;
     // The methods' profile information.
@@ -571,30 +672,16 @@
     ArenaSet<dex::TypeIndex> class_set;
     // Find the inline caches of the the given method index. Add an empty entry if
     // no previous data is found.
-    InlineCacheMap* FindOrAddMethod(uint16_t method_index);
+    InlineCacheMap* FindOrAddHotMethod(uint16_t method_index);
     // Num method ids.
     uint32_t num_method_ids;
     ArenaVector<uint8_t> bitmap_storage;
     BitMemoryRegion method_bitmap;
-    ArenaVector<uint16_t> method_counters;
-    ArenaVector<uint16_t> class_counters;
+    bool is_for_boot_image;
 
    private:
-    enum BitmapIndex {
-      kBitmapIndexStartup,
-      kBitmapIndexPostStartup,
-      kBitmapIndexCount,
-    };
-
-    size_t MethodBitIndex(bool startup, size_t index) const {
-      DCHECK_LT(index, num_method_ids);
-      // The format is [startup bitmap][post startup bitmap]
-      // This compresses better than ([startup bit][post statup bit])*
-
-      return index + (startup
-          ? kBitmapIndexStartup * num_method_ids
-          : kBitmapIndexPostStartup * num_method_ids);
-    }
+    size_t MethodFlagBitmapIndex(MethodHotness::Flag flag, size_t method_index) const;
+    static size_t FlagBitmapIndex(MethodHotness::Flag flag);
   };
 
   // Return the profile data for the given profile key or null if the dex location
@@ -603,30 +690,13 @@
                                    uint32_t checksum,
                                    uint32_t num_method_ids);
 
-  DexFileData* GetOrAddDexFileData(const DexFile* dex_file) {
-    return GetOrAddDexFileData(GetProfileDexFileKey(dex_file->GetLocation()),
+  DexFileData* GetOrAddDexFileData(const DexFile* dex_file,
+                                   const ProfileSampleAnnotation& annotation) {
+    return GetOrAddDexFileData(GetProfileDexFileAugmentedKey(dex_file->GetLocation(), annotation),
                                dex_file->GetLocationChecksum(),
                                dex_file->NumMethodIds());
   }
 
-  // Add a method to the profile using its offline representation.
-  // This is mostly used to facilitate testing.
-  bool AddMethod(const std::string& dex_location,
-                 uint32_t dex_checksum,
-                 uint16_t method_index,
-                 uint32_t num_method_ids,
-                 const OfflineProfileMethodInfo& pmi,
-                 MethodHotness::Flag flags);
-
-  // Add a class index to the profile.
-  bool AddClassIndex(const std::string& dex_location,
-                     uint32_t checksum,
-                     dex::TypeIndex type_idx,
-                     uint32_t num_method_ids);
-
-  // Add all classes from the given dex cache to the the profile.
-  bool AddResolvedClasses(const DexCacheResolvedClasses& classes);
-
   // Encode the known dex_files into a vector. The index of a dex_reference will
   // be the same as the profile index of the dex file (used to encode the ClassReferences).
   void DexFileToProfileIndex(/*out*/std::vector<DexReference>* dex_references) const;
@@ -636,10 +706,19 @@
   const DexFileData* FindDexData(const std::string& profile_key,
                                  uint32_t checksum,
                                  bool verify_checksum = true) const;
+  // Same as FindDexData but performs the searching using the given annotation:
+  //   - If the annotation is kNone then the search ignores it and only looks at the base keys.
+  //     In this case only the first matching dex is searched.
+  //   - If the annotation is not kNone, the augmented key is constructed and used to invoke
+  //     the regular FindDexData.
+  const DexFileData* FindDexDataUsingAnnotations(
+      const DexFile* dex_file,
+      const ProfileSampleAnnotation& annotation) const;
 
-  // Return the dex data associated with the given dex file or null if the profile doesn't contain
-  // the key or the checksum mismatches.
-  const DexFileData* FindDexData(const DexFile* dex_file) const;
+  // Same as FindDexDataUsingAnnotations but extracts the data for all annotations.
+  void FindAllDexData(
+      const DexFile* dex_file,
+      /*out*/ std::vector<const ProfileCompilationInfo::DexFileData*>* result) const;
 
   // Inflate the input buffer (in_buffer) of size in_size. It returns a buffer of
   // compressed data for the input buffer of "compressed_data_size" size.
@@ -660,7 +739,7 @@
 
   // The information present in the header of each profile line.
   struct ProfileLineHeader {
-    std::string dex_location;
+    std::string profile_key;
     uint16_t class_set_size;
     uint32_t method_region_size_bytes;
     uint32_t checksum;
@@ -772,7 +851,7 @@
   // Read the profile header from the given fd and store the number of profile
   // lines into number_of_dex_files.
   ProfileLoadStatus ReadProfileHeader(ProfileSource& source,
-                                      /*out*/uint8_t* number_of_dex_files,
+                                      /*out*/ProfileIndexType* number_of_dex_files,
                                       /*out*/uint32_t* size_uncompressed_data,
                                       /*out*/uint32_t* size_compressed_data,
                                       /*out*/std::string* error);
@@ -789,12 +868,13 @@
                                      /*out*/std::string* error);
 
   // Read a single profile line from the given fd.
-  ProfileLoadStatus ReadProfileLine(SafeBuffer& buffer,
-                                    uint8_t number_of_dex_files,
-                                    const ProfileLineHeader& line_header,
-                                    const SafeMap<uint8_t, uint8_t>& dex_profile_index_remap,
-                                    bool merge_classes,
-                                    /*out*/std::string* error);
+  ProfileLoadStatus ReadProfileLine(
+      SafeBuffer& buffer,
+      ProfileIndexType number_of_dex_files,
+      const ProfileLineHeader& line_header,
+      const SafeMap<ProfileIndexType, ProfileIndexType>& dex_profile_index_remap,
+      bool merge_classes,
+      /*out*/std::string* error);
 
   // Read all the classes from the buffer into the profile `info_` structure.
   bool ReadClasses(SafeBuffer& buffer,
@@ -803,26 +883,22 @@
 
   // Read all the methods from the buffer into the profile `info_` structure.
   bool ReadMethods(SafeBuffer& buffer,
-                   uint8_t number_of_dex_files,
+                   ProfileIndexType number_of_dex_files,
                    const ProfileLineHeader& line_header,
-                   const SafeMap<uint8_t, uint8_t>& dex_profile_index_remap,
+                   const SafeMap<ProfileIndexType, ProfileIndexType>& dex_profile_index_remap,
                    /*out*/std::string* error);
 
-  // Read the aggregation counters from the buffer.
-  bool ReadAggregationCounters(SafeBuffer& buffer,
-                               DexFileData& dex_data,
-                               /*out*/std::string* error);
-
   // The method generates mapping of profile indices while merging a new profile
   // data into current data. It returns true, if the mapping was successful.
-  bool RemapProfileIndex(const std::vector<ProfileLineHeader>& profile_line_headers,
-                         const ProfileLoadFilterFn& filter_fn,
-                         /*out*/SafeMap<uint8_t, uint8_t>* dex_profile_index_remap);
+  bool RemapProfileIndex(
+      const std::vector<ProfileLineHeader>& profile_line_headers,
+      const ProfileLoadFilterFn& filter_fn,
+      /*out*/SafeMap<ProfileIndexType, ProfileIndexType>* dex_profile_index_remap);
 
   // Read the inline cache encoding from line_bufer into inline_cache.
   bool ReadInlineCache(SafeBuffer& buffer,
-                       uint8_t number_of_dex_files,
-                       const SafeMap<uint8_t, uint8_t>& dex_profile_index_remap,
+                       ProfileIndexType number_of_dex_files,
+                       const SafeMap<ProfileIndexType, ProfileIndexType>& dex_profile_index_remap,
                        /*out*/InlineCacheMap* inline_cache,
                        /*out*/std::string* error);
 
@@ -838,7 +914,7 @@
   // `dex_to_classes_map`.
   void GroupClassesByDex(
       const ClassSet& classes,
-      /*out*/SafeMap<uint8_t, std::vector<dex::TypeIndex>>* dex_to_classes_map);
+      /*out*/SafeMap<ProfileIndexType, std::vector<dex::TypeIndex>>* dex_to_classes_map);
 
   // Find the data for the dex_pc in the inline cache. Adds an empty entry
   // if no previous data exists.
@@ -847,6 +923,35 @@
   // Initializes the profile version to the desired one.
   void InitProfileVersionInternal(const uint8_t version[]);
 
+  // Returns the threshold size (in bytes) which will trigger save/load warnings.
+  size_t GetSizeWarningThresholdBytes() const;
+  // Returns the threshold size (in bytes) which will cause save/load failures.
+  size_t GetSizeErrorThresholdBytes() const;
+
+
+  // Returns the augmented profile key associated with the given dex location.
+  // The return key will contain a serialized form of the information from the provided
+  // annotation. If the annotation is ProfileSampleAnnotation::kNone then no extra info is
+  // added to the key and this method is equivalent to GetProfileDexFileBaseKey.
+  static std::string GetProfileDexFileAugmentedKey(const std::string& dex_location,
+                                                   const ProfileSampleAnnotation& annotation);
+
+  // Migrates the annotation from an augmented key to a base key.
+  static std::string MigrateAnnotationInfo(const std::string& base_key,
+                                           const std::string& augmented_key);
+
+  // Returns the maximum value for the profile index. It depends on the profile type.
+  // Boot profiles can store more dex files than regular profiles.
+  ProfileIndexType MaxProfileIndex() const;
+  // Returns the size of the profile index type used for serialization.
+  uint32_t SizeOfProfileIndexType() const;
+  // Writes the profile index to the buffer. The type of profile will determine the
+  // number of bytes used for serialization.
+  void WriteProfileIndex(std::vector<uint8_t>* buffer, ProfileIndexType value) const;
+  // Read the profile index from the buffer. The type of profile will determine the
+  // number of bytes used for serialization.
+  bool ReadProfileIndex(SafeBuffer& safe_buffer, ProfileIndexType* value) const;
+
   friend class ProfileCompilationInfoTest;
   friend class CompilerDriverProfileTest;
   friend class ProfileAssistantTest;
@@ -863,17 +968,89 @@
   // Cache mapping profile keys to profile index.
   // This is used to speed up searches since it avoids iterating
   // over the info_ vector when searching by profile key.
-  ArenaSafeMap<const std::string, uint8_t> profile_key_map_;
+  ArenaSafeMap<const std::string, ProfileIndexType> profile_key_map_;
 
   // The version of the profile.
-  // This may change if a "normal" profile is transformed to keep track
-  // of aggregation counters.
   uint8_t version_[kProfileVersionSize];
-
-  // Stored only when the profile is configured to keep track of aggregation counters.
-  uint16_t aggregation_count_;
 };
 
+/**
+ * Flatten profile data that list all methods and type references together
+ * with their metadata (such as flags or annotation list).
+ */
+class FlattenProfileData {
+ public:
+  class ItemMetadata {
+   public:
+    ItemMetadata();
+    ItemMetadata(const ItemMetadata& other);
+
+    uint16_t GetFlags() const {
+      return flags_;
+    }
+
+    const std::list<ProfileCompilationInfo::ProfileSampleAnnotation>& GetAnnotations() const {
+      return annotations_;
+    }
+
+    void AddFlag(ProfileCompilationInfo::MethodHotness::Flag flag) {
+      flags_ |= flag;
+    }
+
+    bool HasFlagSet(ProfileCompilationInfo::MethodHotness::Flag flag) const {
+      return (flags_ & flag) != 0;
+    }
+
+   private:
+    // will be 0 for classes and MethodHotness::Flags for methods.
+    uint16_t flags_;
+    // This is a list that may contain duplicates after a merge operation.
+    // It represents that a method was used multiple times across different devices.
+    std::list<ProfileCompilationInfo::ProfileSampleAnnotation> annotations_;
+
+    friend class ProfileCompilationInfo;
+    friend class FlattenProfileData;
+  };
+
+  FlattenProfileData();
+
+  const SafeMap<MethodReference, ItemMetadata>& GetMethodData() const {
+    return method_metadata_;
+  }
+
+  const SafeMap<TypeReference, ItemMetadata>& GetClassData() const {
+    return class_metadata_;
+  }
+
+  uint32_t GetMaxAggregationForMethods() const {
+    return max_aggregation_for_methods_;
+  }
+
+  uint32_t GetMaxAggregationForClasses() const {
+    return max_aggregation_for_classes_;
+  }
+
+  void MergeData(const FlattenProfileData& other);
+
+ private:
+  // Method data.
+  SafeMap<MethodReference, ItemMetadata> method_metadata_;
+  // Class data.
+  SafeMap<TypeReference, ItemMetadata> class_metadata_;
+  // Maximum aggregation counter for all methods.
+  // This is essentially a cache equal to the max size of any method's annation set.
+  // It avoids the traversal of all the methods which can be quite expensive.
+  uint32_t max_aggregation_for_methods_;
+  // Maximum aggregation counter for all classes.
+  // Simillar to max_aggregation_for_methods_.
+  uint32_t max_aggregation_for_classes_;
+
+  friend class ProfileCompilationInfo;
+};
+
+std::ostream& operator<<(std::ostream& stream,
+                         const ProfileCompilationInfo::DexReference& dex_ref);
+
 }  // namespace art
 
 #endif  // ART_LIBPROFILE_PROFILE_PROFILE_COMPILATION_INFO_H_
diff --git a/libprofile/profile/profile_compilation_info_test.cc b/libprofile/profile/profile_compilation_info_test.cc
index 47019c4..d6ae8a2 100644
--- a/libprofile/profile/profile_compilation_info_test.cc
+++ b/libprofile/profile/profile_compilation_info_test.cc
@@ -15,11 +15,13 @@
  */
 
 #include <gtest/gtest.h>
+#include <algorithm>
 #include <stdio.h>
 
 #include "base/arena_allocator.h"
 #include "base/common_art_test.h"
 #include "base/unix_file/fd_file.h"
+#include "dex/compact_dex_file.h"
 #include "dex/dex_file.h"
 #include "dex/dex_file_loader.h"
 #include "dex/method_reference.h"
@@ -30,50 +32,84 @@
 namespace art {
 
 using Hotness = ProfileCompilationInfo::MethodHotness;
+using ProfileInlineCache = ProfileMethodInfo::ProfileInlineCache;
+using ProfileSampleAnnotation = ProfileCompilationInfo::ProfileSampleAnnotation;
+using ProfileIndexType = ProfileCompilationInfo::ProfileIndexType;
+using ProfileIndexTypeRegular = ProfileCompilationInfo::ProfileIndexTypeRegular;
+using ItemMetadata = FlattenProfileData::ItemMetadata;
 
 static constexpr size_t kMaxMethodIds = 65535;
+static uint32_t kMaxHotnessFlagBootIndex =
+    WhichPowerOf2(static_cast<uint32_t>(Hotness::kFlagLastBoot));
+static uint32_t kMaxHotnessFlagRegularIndex =
+    WhichPowerOf2(static_cast<uint32_t>(Hotness::kFlagLastRegular));
 
 class ProfileCompilationInfoTest : public CommonArtTest {
  public:
   void SetUp() override {
     CommonArtTest::SetUp();
     allocator_.reset(new ArenaAllocator(&pool_));
+
+    dex1 = fake_dex_storage.AddFakeDex("location1", /* checksum= */ 1, /* num_method_ids= */ 10001);
+    dex2 = fake_dex_storage.AddFakeDex("location2", /* checksum= */ 2, /* num_method_ids= */ 10002);
+    dex3 = fake_dex_storage.AddFakeDex("location3", /* checksum= */ 3, /* num_method_ids= */ 10003);
+    dex4 = fake_dex_storage.AddFakeDex("location4", /* checksum= */ 4, /* num_method_ids= */ 10004);
+
+    dex1_checksum_missmatch = fake_dex_storage.AddFakeDex(
+        "location1", /* checksum= */ 12, /* num_method_ids= */ 10001);
+    dex1_renamed = fake_dex_storage.AddFakeDex(
+        "location1-renamed", /* checksum= */ 1, /* num_method_ids= */ 10001);
+    dex2_renamed = fake_dex_storage.AddFakeDex(
+        "location2-renamed", /* checksum= */ 2, /* num_method_ids= */ 10002);
+
+    dex_max_methods1 = fake_dex_storage.AddFakeDex(
+        "location-max1", /* checksum= */ 5, /* num_method_ids= */ kMaxMethodIds);
+    dex_max_methods2 = fake_dex_storage.AddFakeDex(
+        "location-max2", /* checksum= */ 6, /* num_method_ids= */ kMaxMethodIds);
   }
 
  protected:
-  bool AddMethod(const std::string& dex_location,
-                 uint32_t checksum,
+  bool AddMethod(ProfileCompilationInfo* info,
+                 const DexFile* dex,
                  uint16_t method_idx,
-                 ProfileCompilationInfo* info) {
-    return info->AddMethodIndex(Hotness::kFlagHot,
-                                dex_location,
-                                checksum,
-                                method_idx,
-                                kMaxMethodIds);
+                 Hotness::Flag flags = Hotness::kFlagHot,
+                 const ProfileSampleAnnotation& annotation = ProfileSampleAnnotation::kNone) {
+    return info->AddMethod(ProfileMethodInfo(MethodReference(dex, method_idx)),
+                           flags,
+                           annotation);
   }
 
-  bool AddMethod(const std::string& dex_location,
-                 uint32_t checksum,
-                 uint16_t method_idx,
-                 const ProfileCompilationInfo::OfflineProfileMethodInfo& pmi,
-                 ProfileCompilationInfo* info) {
+  bool AddMethod(ProfileCompilationInfo* info,
+                const DexFile* dex,
+                uint16_t method_idx,
+                const std::vector<ProfileInlineCache>& inline_caches,
+                const ProfileSampleAnnotation& annotation = ProfileSampleAnnotation::kNone) {
     return info->AddMethod(
-        dex_location, checksum, method_idx, kMaxMethodIds, pmi, Hotness::kFlagPostStartup);
+        ProfileMethodInfo(MethodReference(dex, method_idx), inline_caches),
+        Hotness::kFlagHot,
+        annotation);
   }
 
-  bool AddClass(const std::string& dex_location,
-                uint32_t checksum,
+  bool AddClass(ProfileCompilationInfo* info,
+                const DexFile* dex,
                 dex::TypeIndex type_index,
-                ProfileCompilationInfo* info) {
-    DexCacheResolvedClasses classes(dex_location, dex_location, checksum, kMaxMethodIds);
-    classes.AddClass(type_index);
-    return info->AddClasses({classes});
+                const ProfileSampleAnnotation& annotation = ProfileSampleAnnotation::kNone) {
+    std::vector<dex::TypeIndex> classes = {type_index};
+    return info->AddClassesForDex(dex, classes.begin(), classes.end(), annotation);
   }
 
   uint32_t GetFd(const ScratchFile& file) {
     return static_cast<uint32_t>(file.GetFd());
   }
 
+  std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> GetMethod(
+      const ProfileCompilationInfo& info,
+      const DexFile* dex,
+      uint16_t method_idx,
+      const ProfileSampleAnnotation& annotation = ProfileSampleAnnotation::kNone) {
+    return info.GetHotMethodInfo(MethodReference(dex, method_idx), annotation);
+  }
+
   // Creates an inline cache which will be destructed at the end of the test.
   ProfileCompilationInfo::InlineCacheMap* CreateInlineCacheMap() {
     used_inline_caches.emplace_back(new ProfileCompilationInfo::InlineCacheMap(
@@ -81,62 +117,58 @@
     return used_inline_caches.back().get();
   }
 
-  // Creates an offline profile used for testing inline caches.
-  ProfileCompilationInfo::OfflineProfileMethodInfo GetOfflineProfileMethodInfo() {
-    ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
-
+  // Creates the default inline caches used in tests.
+  std::vector<ProfileInlineCache> GetTestInlineCaches() {
+    std::vector<ProfileInlineCache> inline_caches;
     // Monomorphic
     for (uint16_t dex_pc = 0; dex_pc < 11; dex_pc++) {
-      ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
-      dex_pc_data.AddClass(0, dex::TypeIndex(0));
-      ic_map->Put(dex_pc, dex_pc_data);
+      std::vector<TypeReference> types = {TypeReference(dex1, dex::TypeIndex(0))};
+      inline_caches.push_back(ProfileInlineCache(dex_pc, /* missing_types*/ false, types));
     }
     // Polymorphic
     for (uint16_t dex_pc = 11; dex_pc < 22; dex_pc++) {
-      ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
-      dex_pc_data.AddClass(0, dex::TypeIndex(0));
-      dex_pc_data.AddClass(1, dex::TypeIndex(1));
-      dex_pc_data.AddClass(2, dex::TypeIndex(2));
-
-      ic_map->Put(dex_pc, dex_pc_data);
+      std::vector<TypeReference> types = {
+          TypeReference(dex1, dex::TypeIndex(0)),
+          TypeReference(dex2, dex::TypeIndex(1)),
+          TypeReference(dex3, dex::TypeIndex(2))};
+      inline_caches.push_back(ProfileInlineCache(dex_pc, /* missing_types*/ false, types));
     }
     // Megamorphic
     for (uint16_t dex_pc = 22; dex_pc < 33; dex_pc++) {
-      ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
-      dex_pc_data.SetIsMegamorphic();
-      ic_map->Put(dex_pc, dex_pc_data);
+      // we need 5 types to make the cache megamorphic
+      std::vector<TypeReference> types = {
+          TypeReference(dex1, dex::TypeIndex(0)),
+          TypeReference(dex1, dex::TypeIndex(1)),
+          TypeReference(dex1, dex::TypeIndex(2)),
+          TypeReference(dex1, dex::TypeIndex(3)),
+          TypeReference(dex1, dex::TypeIndex(4))};
+      inline_caches.push_back(ProfileInlineCache(dex_pc, /* missing_types*/ false, types));
     }
     // Missing types
     for (uint16_t dex_pc = 33; dex_pc < 44; dex_pc++) {
-      ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
-      dex_pc_data.SetIsMissingTypes();
-      ic_map->Put(dex_pc, dex_pc_data);
+      std::vector<TypeReference> types;
+      inline_caches.push_back(ProfileInlineCache(dex_pc, /* missing_types*/ true, types));
     }
 
-    ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
-
-    pmi.dex_references.emplace_back("dex_location1", /* checksum= */1, kMaxMethodIds);
-    pmi.dex_references.emplace_back("dex_location2", /* checksum= */2, kMaxMethodIds);
-    pmi.dex_references.emplace_back("dex_location3", /* checksum= */3, kMaxMethodIds);
-
-    return pmi;
+    return inline_caches;
   }
 
-  void MakeMegamorphic(/*out*/ProfileCompilationInfo::OfflineProfileMethodInfo* pmi) {
-    ProfileCompilationInfo::InlineCacheMap* ic_map =
-        const_cast<ProfileCompilationInfo::InlineCacheMap*>(pmi->inline_caches);
-    for (auto it : *ic_map) {
-      for (uint16_t k = 0; k <= 2 * ProfileCompilationInfo::kIndividualInlineCacheSize; k++) {
-        it.second.AddClass(0, dex::TypeIndex(k));
+  void MakeMegamorphic(/*out*/std::vector<ProfileInlineCache>* inline_caches) {
+    for (ProfileInlineCache& cache : *inline_caches) {
+      uint16_t k = 5;
+      while (cache.classes.size() < ProfileCompilationInfo::kIndividualInlineCacheSize) {
+        TypeReference type_ref(dex1, dex::TypeIndex(k++));
+        if (std::find(cache.classes.begin(), cache.classes.end(), type_ref) ==
+            cache.classes.end()) {
+          const_cast<std::vector<TypeReference>*>(&cache.classes)->push_back(type_ref);
+        }
       }
     }
   }
 
-  void SetIsMissingTypes(/*out*/ProfileCompilationInfo::OfflineProfileMethodInfo* pmi) {
-    ProfileCompilationInfo::InlineCacheMap* ic_map =
-        const_cast<ProfileCompilationInfo::InlineCacheMap*>(pmi->inline_caches);
-    for (auto it : *ic_map) {
-      it.second.SetIsMissingTypes();
+  void SetIsMissingTypes(/*out*/std::vector<ProfileInlineCache>* inline_caches) {
+    for (ProfileInlineCache& cache : *inline_caches) {
+      *(const_cast<bool*>(&(cache.is_missing_types))) = true;
     }
   }
 
@@ -148,8 +180,8 @@
     ScratchFile profile;
     ProfileCompilationInfo saved_info;
     for (uint16_t i = 0; i < 10; i++) {
-      ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &saved_info));
-      ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, /* method_idx= */ i, &saved_info));
+      ASSERT_TRUE(AddMethod(&saved_info, dex1, /* method_idx= */ i));
+      ASSERT_TRUE(AddMethod(&saved_info, dex2, /* method_idx= */ i));
     }
     ASSERT_TRUE(saved_info.Save(GetFd(profile)));
     ASSERT_EQ(0, profile.GetFile()->Flush());
@@ -187,6 +219,57 @@
     return info.IsEmpty();
   }
 
+  void SizeStressTest(bool random) {
+    ProfileCompilationInfo boot_profile(/*for_boot_image*/ true);
+    ProfileCompilationInfo reg_profile(/*for_boot_image*/ false);
+
+    static constexpr size_t kNumDexFiles = 5;
+
+    FakeDexStorage local_storage;
+    std::vector<const DexFile*> dex_files;
+    for (uint32_t i = 0; i < kNumDexFiles; i++) {
+      dex_files.push_back(local_storage.AddFakeDex(std::to_string(i), i, kMaxMethodIds));
+    }
+
+    std::srand(0);
+    // Set a few flags on a 2 different methods in each of the profile.
+    for (const DexFile* dex_file : dex_files) {
+      for (uint32_t method_idx = 0; method_idx < kMaxMethodIds; method_idx++) {
+        for (uint32_t flag_index = 0; flag_index <= kMaxHotnessFlagBootIndex; flag_index++) {
+          if (!random || rand() % 2 == 0) {
+            ASSERT_TRUE(AddMethod(
+                &boot_profile,
+                dex_file,
+                method_idx,
+                static_cast<Hotness::Flag>(1 << flag_index)));
+          }
+        }
+        for (uint32_t flag_index = 0; flag_index <= kMaxHotnessFlagRegularIndex; flag_index++) {
+          if (!random || rand() % 2 == 0) {
+            ASSERT_TRUE(AddMethod(
+                &reg_profile,
+                dex_file,
+                method_idx,
+                static_cast<Hotness::Flag>(1 << flag_index)));
+          }
+        }
+      }
+    }
+
+    ScratchFile boot_file;
+    ScratchFile reg_file;
+
+    ASSERT_TRUE(boot_profile.Save(GetFd(boot_file)));
+    ASSERT_TRUE(reg_profile.Save(GetFd(reg_file)));
+    ASSERT_TRUE(boot_file.GetFile()->ResetOffset());
+    ASSERT_TRUE(reg_file.GetFile()->ResetOffset());
+
+    ProfileCompilationInfo loaded_boot;
+    ProfileCompilationInfo loaded_reg;
+    ASSERT_TRUE(loaded_boot.Load(GetFd(boot_file)));
+    ASSERT_TRUE(loaded_reg.Load(GetFd(reg_file)));
+  }
+
   // Cannot sizeof the actual arrays so hard code the values here.
   // They should not change anyway.
   static constexpr int kProfileMagicSize = 4;
@@ -195,10 +278,22 @@
   MallocArenaPool pool_;
   std::unique_ptr<ArenaAllocator> allocator_;
 
+  const DexFile* dex1;
+  const DexFile* dex2;
+  const DexFile* dex3;
+  const DexFile* dex4;
+  const DexFile* dex1_checksum_missmatch;
+  const DexFile* dex1_renamed;
+  const DexFile* dex2_renamed;
+  const DexFile* dex_max_methods1;
+  const DexFile* dex_max_methods2;
+
   // Cache of inline caches generated during tests.
   // This makes it easier to pass data between different utilities and ensure that
   // caches are destructed at the end of the test.
   std::vector<std::unique_ptr<ProfileCompilationInfo::InlineCacheMap>> used_inline_caches;
+
+  FakeDexStorage fake_dex_storage;
 };
 
 TEST_F(ProfileCompilationInfoTest, SaveFd) {
@@ -207,8 +302,8 @@
   ProfileCompilationInfo saved_info;
   // Save a few methods.
   for (uint16_t i = 0; i < 10; i++) {
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &saved_info));
-    ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, /* method_idx= */ i, &saved_info));
+    ASSERT_TRUE(AddMethod(&saved_info, dex1, /* method_idx= */ i));
+    ASSERT_TRUE(AddMethod(&saved_info, dex2, /* method_idx= */ i));
   }
   ASSERT_TRUE(saved_info.Save(GetFd(profile)));
   ASSERT_EQ(0, profile.GetFile()->Flush());
@@ -221,9 +316,9 @@
 
   // Save more methods.
   for (uint16_t i = 0; i < 100; i++) {
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &saved_info));
-    ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, /* method_idx= */ i, &saved_info));
-    ASSERT_TRUE(AddMethod("dex_location3", /* checksum= */ 3, /* method_idx= */ i, &saved_info));
+    ASSERT_TRUE(AddMethod(&saved_info, dex1, /* method_idx= */ i));
+    ASSERT_TRUE(AddMethod(&saved_info, dex2, /* method_idx= */ i));
+    ASSERT_TRUE(AddMethod(&saved_info, dex3, /* method_idx= */ i));
   }
   ASSERT_TRUE(profile.GetFile()->ResetOffset());
   ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -240,19 +335,19 @@
   ScratchFile profile;
 
   ProfileCompilationInfo info;
-  ASSERT_TRUE(AddMethod("dex_location", /* checksum= */ 1, /* method_idx= */ 1, &info));
+  ASSERT_TRUE(AddMethod(&info, dex1, /* method_idx= */ 1));
   // Trying to add info for an existing file but with a different checksum.
-  ASSERT_FALSE(AddMethod("dex_location", /* checksum= */ 2, /* method_idx= */ 2, &info));
+  ASSERT_FALSE(AddMethod(&info, dex1_checksum_missmatch, /* method_idx= */ 2));
 }
 
 TEST_F(ProfileCompilationInfoTest, MergeFail) {
   ScratchFile profile;
 
   ProfileCompilationInfo info1;
-  ASSERT_TRUE(AddMethod("dex_location", /* checksum= */ 1, /* method_idx= */ 1, &info1));
+  ASSERT_TRUE(AddMethod(&info1, dex1, /* method_idx= */ 1));
   // Use the same file, change the checksum.
   ProfileCompilationInfo info2;
-  ASSERT_TRUE(AddMethod("dex_location", /* checksum= */ 2, /* method_idx= */ 2, &info2));
+  ASSERT_TRUE(AddMethod(&info2, dex1_checksum_missmatch, /* method_idx= */ 2));
 
   ASSERT_FALSE(info1.MergeWith(info2));
 }
@@ -262,10 +357,10 @@
   ScratchFile profile;
 
   ProfileCompilationInfo info1;
-  ASSERT_TRUE(AddMethod("dex_location", /* checksum= */ 1, /* method_idx= */ 1, &info1));
+  ASSERT_TRUE(AddMethod(&info1, dex1, /* method_idx= */ 1));
   // Use the same file, change the checksum.
   ProfileCompilationInfo info2;
-  ASSERT_TRUE(AddMethod("dex_location", /* checksum= */ 2, /* method_idx= */ 2, &info2));
+  ASSERT_TRUE(AddMethod(&info2, dex1_checksum_missmatch, /* method_idx= */ 2));
 
   ASSERT_TRUE(info1.Save(profile.GetFd()));
   ASSERT_EQ(0, profile.GetFile()->Flush());
@@ -280,13 +375,13 @@
   ProfileCompilationInfo saved_info;
   // Save the maximum number of methods
   for (uint16_t i = 0; i < std::numeric_limits<uint16_t>::max(); i++) {
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &saved_info));
-    ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, /* method_idx= */ i, &saved_info));
+    ASSERT_TRUE(AddMethod(&saved_info, dex_max_methods1, /* method_idx= */ i));
+    ASSERT_TRUE(AddMethod(&saved_info, dex_max_methods2, /* method_idx= */ i));
   }
   // Save the maximum number of classes
   for (uint16_t i = 0; i < std::numeric_limits<uint16_t>::max(); i++) {
-    ASSERT_TRUE(AddClass("dex_location1", /* checksum= */ 1, dex::TypeIndex(i), &saved_info));
-    ASSERT_TRUE(AddClass("dex_location2", /* checksum= */ 2, dex::TypeIndex(i), &saved_info));
+    ASSERT_TRUE(AddClass(&saved_info, dex1, dex::TypeIndex(i)));
+    ASSERT_TRUE(AddClass(&saved_info, dex2, dex::TypeIndex(i)));
   }
 
   ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -388,9 +483,8 @@
   ScratchFile profile;
 
   ProfileCompilationInfo saved_info;
-  // Save the maximum number of methods
   for (uint16_t i = 0; i < 10; i++) {
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &saved_info));
+    ASSERT_TRUE(AddMethod(&saved_info, dex1, /* method_idx= */ i));
   }
   ASSERT_TRUE(saved_info.Save(GetFd(profile)));
 
@@ -409,15 +503,15 @@
   ScratchFile profile;
 
   ProfileCompilationInfo saved_info;
-  ProfileCompilationInfo::OfflineProfileMethodInfo pmi = GetOfflineProfileMethodInfo();
+  std::vector<ProfileInlineCache> inline_caches = GetTestInlineCaches();
 
   // Add methods with inline caches.
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
     // Add a method which is part of the same dex file as one of the
     // class from the inline caches.
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info));
+    ASSERT_TRUE(AddMethod(&saved_info, dex1, method_idx, inline_caches));
     // Add a method which is outside the set of dex files.
-    ASSERT_TRUE(AddMethod("dex_location4", /* checksum= */ 4, method_idx, pmi, &saved_info));
+    ASSERT_TRUE(AddMethod(&saved_info, dex4, method_idx, inline_caches));
   }
 
   ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -431,119 +525,119 @@
   ASSERT_TRUE(loaded_info.Equals(saved_info));
 
   std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
-      loaded_info.GetMethod("dex_location1", /* dex_checksum= */ 1, /* dex_method_index= */ 3);
+      GetMethod(loaded_info, dex1, /* method_idx= */ 3);
   ASSERT_TRUE(loaded_pmi1 != nullptr);
-  ASSERT_TRUE(*loaded_pmi1 == pmi);
+  ASSERT_TRUE(*loaded_pmi1 == inline_caches);
   std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi2 =
-      loaded_info.GetMethod("dex_location4", /* dex_checksum= */ 4, /* dex_method_index= */ 3);
+      GetMethod(loaded_info, dex4, /* method_idx= */ 3);
   ASSERT_TRUE(loaded_pmi2 != nullptr);
-  ASSERT_TRUE(*loaded_pmi2 == pmi);
+  ASSERT_TRUE(*loaded_pmi2 == inline_caches);
 }
 
 TEST_F(ProfileCompilationInfoTest, MegamorphicInlineCaches) {
-  ScratchFile profile;
-
   ProfileCompilationInfo saved_info;
-  ProfileCompilationInfo::OfflineProfileMethodInfo pmi = GetOfflineProfileMethodInfo();
+  std::vector<ProfileInlineCache> inline_caches = GetTestInlineCaches();
 
   // Add methods with inline caches.
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info));
+    ASSERT_TRUE(AddMethod(&saved_info, dex1, method_idx, inline_caches));
   }
 
+  ScratchFile profile;
   ASSERT_TRUE(saved_info.Save(GetFd(profile)));
   ASSERT_EQ(0, profile.GetFile()->Flush());
 
   // Make the inline caches megamorphic and add them to the profile again.
   ProfileCompilationInfo saved_info_extra;
-  ProfileCompilationInfo::OfflineProfileMethodInfo pmi_extra = GetOfflineProfileMethodInfo();
-  MakeMegamorphic(&pmi_extra);
+  std::vector<ProfileInlineCache> inline_caches_extra = GetTestInlineCaches();
+  MakeMegamorphic(&inline_caches_extra);
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info_extra));
+    ASSERT_TRUE(AddMethod(&saved_info_extra, dex1, method_idx, inline_caches_extra));
   }
 
-  ASSERT_TRUE(profile.GetFile()->ResetOffset());
-  ASSERT_TRUE(saved_info_extra.Save(GetFd(profile)));
-  ASSERT_EQ(0, profile.GetFile()->Flush());
+  ScratchFile extra_profile;
+  ASSERT_TRUE(saved_info_extra.Save(GetFd(extra_profile)));
+  ASSERT_EQ(0, extra_profile.GetFile()->Flush());
 
   // Merge the profiles so that we have the same view as the file.
   ASSERT_TRUE(saved_info.MergeWith(saved_info_extra));
 
   // Check that we get back what we saved.
   ProfileCompilationInfo loaded_info;
-  ASSERT_TRUE(profile.GetFile()->ResetOffset());
-  ASSERT_TRUE(loaded_info.Load(GetFd(profile)));
+  ASSERT_TRUE(extra_profile.GetFile()->ResetOffset());
+  ASSERT_TRUE(loaded_info.Load(GetFd(extra_profile)));
 
   ASSERT_TRUE(loaded_info.Equals(saved_info));
 
   std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
-      loaded_info.GetMethod("dex_location1", /* dex_checksum= */ 1, /* dex_method_index= */ 3);
+      GetMethod(loaded_info, dex1, /* method_idx= */ 3);
 
   ASSERT_TRUE(loaded_pmi1 != nullptr);
-  ASSERT_TRUE(*loaded_pmi1 == pmi_extra);
+  ASSERT_TRUE(*loaded_pmi1 == inline_caches_extra);
 }
 
 TEST_F(ProfileCompilationInfoTest, MissingTypesInlineCaches) {
-  ScratchFile profile;
-
   ProfileCompilationInfo saved_info;
-  ProfileCompilationInfo::OfflineProfileMethodInfo pmi = GetOfflineProfileMethodInfo();
+  std::vector<ProfileInlineCache> inline_caches = GetTestInlineCaches();
 
   // Add methods with inline caches.
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info));
+    ASSERT_TRUE(AddMethod(&saved_info, dex1, method_idx, inline_caches));
   }
 
+  ScratchFile profile;
   ASSERT_TRUE(saved_info.Save(GetFd(profile)));
   ASSERT_EQ(0, profile.GetFile()->Flush());
 
   // Make some inline caches megamorphic and add them to the profile again.
   ProfileCompilationInfo saved_info_extra;
-  ProfileCompilationInfo::OfflineProfileMethodInfo pmi_extra = GetOfflineProfileMethodInfo();
-  MakeMegamorphic(&pmi_extra);
+  std::vector<ProfileInlineCache> inline_caches_extra = GetTestInlineCaches();
+  MakeMegamorphic(&inline_caches_extra);
   for (uint16_t method_idx = 5; method_idx < 10; method_idx++) {
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info_extra));
+    ASSERT_TRUE(AddMethod(&saved_info_extra, dex1, method_idx, inline_caches));
   }
 
   // Mark all inline caches with missing types and add them to the profile again.
   // This will verify that all inline caches (megamorphic or not) should be marked as missing types.
-  ProfileCompilationInfo::OfflineProfileMethodInfo missing_types = GetOfflineProfileMethodInfo();
+  std::vector<ProfileInlineCache> missing_types = GetTestInlineCaches();
   SetIsMissingTypes(&missing_types);
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info_extra));
+    ASSERT_TRUE(AddMethod(&saved_info_extra, dex1, method_idx, missing_types));
   }
 
-  ASSERT_TRUE(profile.GetFile()->ResetOffset());
-  ASSERT_TRUE(saved_info_extra.Save(GetFd(profile)));
-  ASSERT_EQ(0, profile.GetFile()->Flush());
+  ScratchFile extra_profile;
+  ASSERT_TRUE(saved_info_extra.Save(GetFd(extra_profile)));
+  ASSERT_EQ(0, extra_profile.GetFile()->Flush());
 
   // Merge the profiles so that we have the same view as the file.
   ASSERT_TRUE(saved_info.MergeWith(saved_info_extra));
 
   // Check that we get back what we saved.
   ProfileCompilationInfo loaded_info;
-  ASSERT_TRUE(profile.GetFile()->ResetOffset());
-  ASSERT_TRUE(loaded_info.Load(GetFd(profile)));
+  ASSERT_TRUE(extra_profile.GetFile()->ResetOffset());
+  ASSERT_TRUE(loaded_info.Load(GetFd(extra_profile)));
 
   ASSERT_TRUE(loaded_info.Equals(saved_info));
 
   std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
-      loaded_info.GetMethod("dex_location1", /* dex_checksum= */ 1, /* dex_method_index= */ 3);
+      GetMethod(loaded_info, dex1, /* method_idx= */ 3);
   ASSERT_TRUE(loaded_pmi1 != nullptr);
-  ASSERT_TRUE(*loaded_pmi1 == pmi_extra);
+  ASSERT_TRUE(*loaded_pmi1 == missing_types);
 }
 
 TEST_F(ProfileCompilationInfoTest, InvalidChecksumInInlineCache) {
   ScratchFile profile;
 
   ProfileCompilationInfo info;
-  ProfileCompilationInfo::OfflineProfileMethodInfo pmi1 = GetOfflineProfileMethodInfo();
-  ProfileCompilationInfo::OfflineProfileMethodInfo pmi2 = GetOfflineProfileMethodInfo();
+  std::vector<ProfileInlineCache> inline_caches1 = GetTestInlineCaches();
+  std::vector<ProfileInlineCache> inline_caches2 = GetTestInlineCaches();
   // Modify the checksum to trigger a mismatch.
-  pmi2.dex_references[0].dex_checksum++;
+  std::vector<TypeReference>* types = const_cast<std::vector<TypeReference>*>(
+      &inline_caches2[0].classes);
+  types->front().dex_file = dex1_checksum_missmatch;
 
-  ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /*method_idx=*/ 0, pmi1, &info));
-  ASSERT_FALSE(AddMethod("dex_location2", /* checksum= */ 2, /*method_idx=*/ 0, pmi2, &info));
+  ASSERT_TRUE(AddMethod(&info, dex1, /* method_idx= */ 0, inline_caches1));
+  ASSERT_FALSE(AddMethod(&info, dex2, /* method_idx= */ 0, inline_caches2));
 }
 
 // Verify that profiles behave correctly even if the methods are added in a different
@@ -554,40 +648,31 @@
   ProfileCompilationInfo info;
   ProfileCompilationInfo info_reindexed;
 
-  ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
-  ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
-  pmi.dex_references.emplace_back("dex_location1", /* checksum= */ 1, kMaxMethodIds);
-  pmi.dex_references.emplace_back("dex_location2", /* checksum= */ 2, kMaxMethodIds);
+  std::vector<ProfileInlineCache> inline_caches;
   for (uint16_t dex_pc = 1; dex_pc < 5; dex_pc++) {
-    ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
-    dex_pc_data.AddClass(0, dex::TypeIndex(0));
-    dex_pc_data.AddClass(1, dex::TypeIndex(1));
-    ic_map->Put(dex_pc, dex_pc_data);
+    std::vector<TypeReference> types = {
+        TypeReference(dex1, dex::TypeIndex(0)),
+        TypeReference(dex2, dex::TypeIndex(1))};
+    inline_caches.push_back(ProfileInlineCache(dex_pc, /* missing_types*/ false, types));
   }
 
-  ProfileCompilationInfo::InlineCacheMap* ic_map_reindexed = CreateInlineCacheMap();
-  ProfileCompilationInfo::OfflineProfileMethodInfo pmi_reindexed(ic_map_reindexed);
-  pmi_reindexed.dex_references.emplace_back("dex_location2", /* checksum= */ 2, kMaxMethodIds);
-  pmi_reindexed.dex_references.emplace_back("dex_location1", /* checksum= */ 1, kMaxMethodIds);
+  std::vector<ProfileInlineCache> inline_caches_reindexed;
   for (uint16_t dex_pc = 1; dex_pc < 5; dex_pc++) {
-    ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
-    dex_pc_data.AddClass(1, dex::TypeIndex(0));
-    dex_pc_data.AddClass(0, dex::TypeIndex(1));
-    ic_map_reindexed->Put(dex_pc, dex_pc_data);
+    std::vector<TypeReference> types = {
+        TypeReference(dex2, dex::TypeIndex(1)),
+        TypeReference(dex1, dex::TypeIndex(0))};
+    inline_caches_reindexed.push_back(ProfileInlineCache(dex_pc, /* missing_types*/ false, types));
   }
-
   // Profile 1 and Profile 2 get the same methods but in different order.
   // This will trigger a different dex numbers.
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &info));
-    ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, method_idx, pmi, &info));
+    ASSERT_TRUE(AddMethod(&info, dex1, method_idx, inline_caches));
+    ASSERT_TRUE(AddMethod(&info, dex2, method_idx, inline_caches));
   }
 
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
-    ASSERT_TRUE(AddMethod(
-      "dex_location2", /* checksum= */ 2, method_idx, pmi_reindexed, &info_reindexed));
-    ASSERT_TRUE(AddMethod(
-      "dex_location1", /* checksum= */ 1, method_idx, pmi_reindexed, &info_reindexed));
+    ASSERT_TRUE(AddMethod(&info_reindexed, dex2, method_idx, inline_caches_reindexed));
+    ASSERT_TRUE(AddMethod(&info_reindexed, dex1, method_idx, inline_caches_reindexed));
   }
 
   ProfileCompilationInfo info_backup;
@@ -597,50 +682,63 @@
   ASSERT_TRUE(info.Equals(info_backup));
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
     std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
-        info.GetMethod("dex_location1", /* dex_checksum= */ 1, method_idx);
+        GetMethod(info, dex1, method_idx);
     ASSERT_TRUE(loaded_pmi1 != nullptr);
-    ASSERT_TRUE(*loaded_pmi1 == pmi);
+    ASSERT_TRUE(*loaded_pmi1 == inline_caches);
     std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi2 =
-        info.GetMethod("dex_location2", /* dex_checksum= */ 2, method_idx);
+        GetMethod(info, dex2, method_idx);
     ASSERT_TRUE(loaded_pmi2 != nullptr);
-    ASSERT_TRUE(*loaded_pmi2 == pmi);
+    ASSERT_TRUE(*loaded_pmi2 == inline_caches);
   }
 }
 
-TEST_F(ProfileCompilationInfoTest, AddMoreDexFileThanLimit) {
+TEST_F(ProfileCompilationInfoTest, AddMoreDexFileThanLimitRegular) {
+  FakeDexStorage local_storage;
   ProfileCompilationInfo info;
   // Save a few methods.
-  for (uint16_t i = 0; i < std::numeric_limits<uint8_t>::max(); i++) {
-    std::string dex_location = std::to_string(i);
-    ASSERT_TRUE(AddMethod(dex_location, /* checksum= */ 1, /* method_idx= */ i, &info));
+  for (uint16_t i = 0; i < std::numeric_limits<ProfileIndexTypeRegular>::max(); i++) {
+    std::string location = std::to_string(i);
+    const DexFile* dex = local_storage.AddFakeDex(
+        location, /* checksum= */ 1, /* num_method_ids= */ 1);
+    ASSERT_TRUE(AddMethod(&info, dex, /* method_idx= */ 0));
   }
-  // We only support at most 255 dex files.
-  ASSERT_FALSE(AddMethod(
-      /*dex_location=*/ "256", /* checksum= */ 1, /* method_idx= */ 0, &info));
+  // Add an extra dex file.
+  const DexFile* dex = local_storage.AddFakeDex("-1", /* checksum= */ 1, /* num_method_ids= */ 1);
+  ASSERT_FALSE(AddMethod(&info, dex, /* method_idx= */ 0));
+}
+
+TEST_F(ProfileCompilationInfoTest, AddMoreDexFileThanLimitBoot) {
+  FakeDexStorage local_storage;
+  ProfileCompilationInfo info(/*for_boot_image=*/true);
+  // Save a few methods.
+  for (uint16_t i = 0; i < std::numeric_limits<ProfileIndexType>::max(); i++) {
+    std::string location = std::to_string(i);
+    const DexFile* dex = local_storage.AddFakeDex(
+        location, /* checksum= */ 1, /* num_method_ids= */ 1);
+    ASSERT_TRUE(AddMethod(&info, dex, /* method_idx= */ 0));
+  }
+  // Add an extra dex file.
+  const DexFile* dex = local_storage.AddFakeDex("-1", /* checksum= */ 1, /* num_method_ids= */ 1);
+  ASSERT_FALSE(AddMethod(&info, dex, /* method_idx= */ 0));
 }
 
 TEST_F(ProfileCompilationInfoTest, MegamorphicInlineCachesMerge) {
   // Create a megamorphic inline cache.
-  ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
-  ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
-  pmi.dex_references.emplace_back("dex_location1", /* checksum= */ 1, kMaxMethodIds);
-  ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
-  dex_pc_data.SetIsMegamorphic();
-  ic_map->Put(/*dex_pc*/ 0, dex_pc_data);
+  std::vector<ProfileInlineCache> inline_caches;
+  std::vector<TypeReference> types = {
+          TypeReference(dex1, dex::TypeIndex(0)),
+          TypeReference(dex1, dex::TypeIndex(1)),
+          TypeReference(dex1, dex::TypeIndex(2)),
+          TypeReference(dex1, dex::TypeIndex(3)),
+          TypeReference(dex1, dex::TypeIndex(4))};
+  inline_caches.push_back(ProfileInlineCache(0, /* missing_types*/ false, types));
 
   ProfileCompilationInfo info_megamorphic;
-  ASSERT_TRUE(AddMethod("dex_location1",
-                        /*checksum=*/ 1,
-                        /*method_idx=*/ 0,
-                        pmi,
-                        &info_megamorphic));
+  ASSERT_TRUE(AddMethod(&info_megamorphic, dex1, 0, inline_caches));
 
   // Create a profile with no inline caches (for the same method).
   ProfileCompilationInfo info_no_inline_cache;
-  ASSERT_TRUE(AddMethod("dex_location1",
-                        /*checksum=*/ 1,
-                        /*method_idx=*/ 0,
-                        &info_no_inline_cache));
+  ASSERT_TRUE(AddMethod(&info_no_inline_cache, dex1, 0));
 
   // Merge the megamorphic cache into the empty one.
   ASSERT_TRUE(info_no_inline_cache.MergeWith(info_megamorphic));
@@ -651,54 +749,39 @@
 
 TEST_F(ProfileCompilationInfoTest, MissingTypesInlineCachesMerge) {
   // Create an inline cache with missing types
-  ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
-  ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
-  pmi.dex_references.emplace_back("dex_location1", /* checksum= */ 1, kMaxMethodIds);
-  ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
-  dex_pc_data.SetIsMissingTypes();
-  ic_map->Put(/*dex_pc*/ 0, dex_pc_data);
+  std::vector<ProfileInlineCache> inline_caches;
+  std::vector<TypeReference> types = {};
+  inline_caches.push_back(ProfileInlineCache(0, /* missing_types*/ true, types));
 
-  ProfileCompilationInfo info_megamorphic;
-  ASSERT_TRUE(AddMethod("dex_location1",
-                        /*checksum=*/ 1,
-                        /*method_idx=*/ 0,
-                        pmi,
-                        &info_megamorphic));
+  ProfileCompilationInfo info_missing_types;
+  ASSERT_TRUE(AddMethod(&info_missing_types, dex1, /*method_idx=*/ 0, inline_caches));
 
   // Create a profile with no inline caches (for the same method).
   ProfileCompilationInfo info_no_inline_cache;
-  ASSERT_TRUE(AddMethod("dex_location1",
-                        /*checksum=*/ 1,
-                        /*method_idx=*/ 0,
-                        &info_no_inline_cache));
+  ASSERT_TRUE(AddMethod(&info_no_inline_cache, dex1, /*method_idx=*/ 0));
 
   // Merge the missing type cache into the empty one.
   // Everything should be saved without errors.
-  ASSERT_TRUE(info_no_inline_cache.MergeWith(info_megamorphic));
+  ASSERT_TRUE(info_no_inline_cache.MergeWith(info_missing_types));
   ScratchFile profile;
   ASSERT_TRUE(info_no_inline_cache.Save(GetFd(profile)));
 }
 
 TEST_F(ProfileCompilationInfoTest, SampledMethodsTest) {
   ProfileCompilationInfo test_info;
-  static constexpr size_t kNumMethods = 1000;
-  static constexpr size_t kChecksum1 = 1234;
-  static constexpr size_t kChecksum2 = 4321;
-  static const std::string kDex1 = "dex1";
-  static const std::string kDex2 = "dex2";
-  test_info.AddMethodIndex(Hotness::kFlagStartup, kDex1, kChecksum1, 1, kNumMethods);
-  test_info.AddMethodIndex(Hotness::kFlagPostStartup, kDex1, kChecksum1, 5, kNumMethods);
-  test_info.AddMethodIndex(Hotness::kFlagStartup, kDex2, kChecksum2, 2, kNumMethods);
-  test_info.AddMethodIndex(Hotness::kFlagPostStartup, kDex2, kChecksum2, 4, kNumMethods);
-  auto run_test = [](const ProfileCompilationInfo& info) {
-    EXPECT_FALSE(info.GetMethodHotness(kDex1, kChecksum1, 2).IsInProfile());
-    EXPECT_FALSE(info.GetMethodHotness(kDex1, kChecksum1, 4).IsInProfile());
-    EXPECT_TRUE(info.GetMethodHotness(kDex1, kChecksum1, 1).IsStartup());
-    EXPECT_FALSE(info.GetMethodHotness(kDex1, kChecksum1, 3).IsStartup());
-    EXPECT_TRUE(info.GetMethodHotness(kDex1, kChecksum1, 5).IsPostStartup());
-    EXPECT_FALSE(info.GetMethodHotness(kDex1, kChecksum1, 6).IsStartup());
-    EXPECT_TRUE(info.GetMethodHotness(kDex2, kChecksum2, 2).IsStartup());
-    EXPECT_TRUE(info.GetMethodHotness(kDex2, kChecksum2, 4).IsPostStartup());
+  AddMethod(&test_info, dex1, 1, Hotness::kFlagStartup);
+  AddMethod(&test_info, dex1, 5, Hotness::kFlagPostStartup);
+  AddMethod(&test_info, dex2, 2, Hotness::kFlagStartup);
+  AddMethod(&test_info, dex2, 4, Hotness::kFlagPostStartup);
+  auto run_test = [&dex1 = dex1, &dex2 = dex2](const ProfileCompilationInfo& info) {
+    EXPECT_FALSE(info.GetMethodHotness(MethodReference(dex1, 2)).IsInProfile());
+    EXPECT_FALSE(info.GetMethodHotness(MethodReference(dex1, 4)).IsInProfile());
+    EXPECT_TRUE(info.GetMethodHotness(MethodReference(dex1, 1)).IsStartup());
+    EXPECT_FALSE(info.GetMethodHotness(MethodReference(dex1, 3)).IsStartup());
+    EXPECT_TRUE(info.GetMethodHotness(MethodReference(dex1, 5)).IsPostStartup());
+    EXPECT_FALSE(info.GetMethodHotness(MethodReference(dex1, 6)).IsStartup());
+    EXPECT_TRUE(info.GetMethodHotness(MethodReference(dex2, 2)).IsStartup());
+    EXPECT_TRUE(info.GetMethodHotness(MethodReference(dex2, 4)).IsPostStartup());
   };
   run_test(test_info);
 
@@ -714,13 +797,13 @@
   run_test(loaded_info);
 
   // Test that the bitmap gets merged properly.
-  EXPECT_FALSE(test_info.GetMethodHotness(kDex1, kChecksum1, 11).IsStartup());
+  EXPECT_FALSE(test_info.GetMethodHotness(MethodReference(dex1, 11)).IsStartup());
   {
     ProfileCompilationInfo merge_info;
-    merge_info.AddMethodIndex(Hotness::kFlagStartup, kDex1, kChecksum1, 11, kNumMethods);
+    AddMethod(&merge_info, dex1, 11, Hotness::kFlagStartup);
     test_info.MergeWith(merge_info);
   }
-  EXPECT_TRUE(test_info.GetMethodHotness(kDex1, kChecksum1, 11).IsStartup());
+  EXPECT_TRUE(test_info.GetMethodHotness(MethodReference(dex1, 11)).IsStartup());
 
   // Test bulk adding.
   {
@@ -823,21 +906,13 @@
 }
 
 TEST_F(ProfileCompilationInfoTest, UpdateProfileKeyOk) {
-  std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles("MultiDex");
+  std::vector<std::unique_ptr<const DexFile>> dex_files;
+  dex_files.push_back(std::unique_ptr<const DexFile>(dex1_renamed));
+  dex_files.push_back(std::unique_ptr<const DexFile>(dex2_renamed));
 
   ProfileCompilationInfo info;
-  for (const std::unique_ptr<const DexFile>& dex : dex_files) {
-    // Create the profile with a different location so that we can update it to the
-    // real dex location later.
-    std::string base_location = DexFileLoader::GetBaseLocation(dex->GetLocation());
-    std::string multidex_suffix = DexFileLoader::GetMultiDexSuffix(dex->GetLocation());
-    std::string old_name = base_location + "-old" + multidex_suffix;
-    info.AddMethodIndex(Hotness::kFlagHot,
-                        old_name,
-                        dex->GetLocationChecksum(),
-                        /* method_idx= */ 0,
-                        dex->NumMethodIds());
-  }
+  AddMethod(&info, dex1, /* method_idx= */ 0);
+  AddMethod(&info, dex2, /* method_idx= */ 0);
 
   // Update the profile keys based on the original dex files
   ASSERT_TRUE(info.UpdateProfileKeys(dex_files));
@@ -845,81 +920,77 @@
   // Verify that we find the methods when searched with the original dex files.
   for (const std::unique_ptr<const DexFile>& dex : dex_files) {
     std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi =
-        info.GetMethod(dex->GetLocation(), dex->GetLocationChecksum(), /* dex_method_index= */ 0);
+        GetMethod(info, dex.get(), /* method_idx= */ 0);
     ASSERT_TRUE(loaded_pmi != nullptr);
   }
+
+  // Release the ownership as this is held by the test class;
+  for (std::unique_ptr<const DexFile>& dex : dex_files) {
+    UNUSED(dex.release());
+  }
 }
 
 TEST_F(ProfileCompilationInfoTest, UpdateProfileKeyOkButNoUpdate) {
-  std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles("MultiDex");
+  std::vector<std::unique_ptr<const DexFile>> dex_files;
+  dex_files.push_back(std::unique_ptr<const DexFile>(dex1));
 
   ProfileCompilationInfo info;
-  info.AddMethodIndex(Hotness::kFlagHot,
-                      "my.app",
-                      /* checksum= */ 123,
-                      /* method_idx= */ 0,
-                      /* num_method_ids= */ 10);
+  AddMethod(&info, dex2, /* method_idx= */ 0);
 
-  // Update the profile keys based on the original dex files
+  // Update the profile keys based on the original dex files.
   ASSERT_TRUE(info.UpdateProfileKeys(dex_files));
 
   // Verify that we did not perform any update and that we cannot find anything with the new
   // location.
   for (const std::unique_ptr<const DexFile>& dex : dex_files) {
     std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi =
-        info.GetMethod(dex->GetLocation(), dex->GetLocationChecksum(), /* dex_method_index= */ 0);
+        GetMethod(info, dex.get(), /* method_idx= */ 0);
     ASSERT_TRUE(loaded_pmi == nullptr);
   }
 
   // Verify that we can find the original entry.
   std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi =
-        info.GetMethod("my.app", /* dex_checksum= */ 123, /* dex_method_index= */ 0);
+        GetMethod(info, dex2, /* method_idx= */ 0);
   ASSERT_TRUE(loaded_pmi != nullptr);
+
+  // Release the ownership as this is held by the test class;
+  for (std::unique_ptr<const DexFile>& dex : dex_files) {
+    UNUSED(dex.release());
+  }
 }
 
 TEST_F(ProfileCompilationInfoTest, UpdateProfileKeyFail) {
-  std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles("MultiDex");
-
+  std::vector<std::unique_ptr<const DexFile>> dex_files;
+  dex_files.push_back(std::unique_ptr<const DexFile>(dex1_renamed));
 
   ProfileCompilationInfo info;
-  // Add all dex
-  for (const std::unique_ptr<const DexFile>& dex : dex_files) {
-    // Create the profile with a different location so that we can update it to the
-    // real dex location later.
-    std::string base_location = DexFileLoader::GetBaseLocation(dex->GetLocation());
-    std::string multidex_suffix = DexFileLoader::GetMultiDexSuffix(dex->GetLocation());
-    std::string old_name = base_location + "-old" + multidex_suffix;
-    info.AddMethodIndex(Hotness::kFlagHot,
-                        old_name,
-                        dex->GetLocationChecksum(),
-                        /* method_idx= */ 0,
-                        dex->NumMethodIds());
-  }
+  AddMethod(&info, dex1, /* method_idx= */ 0);
 
   // Add a method index using the location we want to rename to.
   // This will cause the rename to fail because an existing entry would already have that name.
-  info.AddMethodIndex(Hotness::kFlagHot,
-                      dex_files[0]->GetLocation(),
-                      /* checksum= */ 123,
-                      /* method_idx= */ 0,
-                      dex_files[0]->NumMethodIds());
+  AddMethod(&info, dex1_renamed, /* method_idx= */ 0);
 
   ASSERT_FALSE(info.UpdateProfileKeys(dex_files));
+
+  // Release the ownership as this is held by the test class;
+  for (std::unique_ptr<const DexFile>& dex : dex_files) {
+    UNUSED(dex.release());
+  }
 }
 
 TEST_F(ProfileCompilationInfoTest, FilteredLoading) {
   ScratchFile profile;
 
   ProfileCompilationInfo saved_info;
-  ProfileCompilationInfo::OfflineProfileMethodInfo pmi = GetOfflineProfileMethodInfo();
+  std::vector<ProfileInlineCache> inline_caches = GetTestInlineCaches();
 
   // Add methods with inline caches.
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
     // Add a method which is part of the same dex file as one of the class from the inline caches.
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info));
-    ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, method_idx, pmi, &saved_info));
+    ASSERT_TRUE(AddMethod(&saved_info, dex1, method_idx, inline_caches));
+    ASSERT_TRUE(AddMethod(&saved_info, dex2, method_idx, inline_caches));
     // Add a method which is outside the set of dex files.
-    ASSERT_TRUE(AddMethod("dex_location4", /* checksum= */ 4, method_idx, pmi, &saved_info));
+    ASSERT_TRUE(AddMethod(&saved_info, dex4, method_idx, inline_caches));
   }
 
   ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -931,9 +1002,9 @@
 
   // Filter out dex locations. Keep only dex_location1 and dex_location3.
   ProfileCompilationInfo::ProfileLoadFilterFn filter_fn =
-      [](const std::string& dex_location, uint32_t checksum) -> bool {
-          return (dex_location == "dex_location1" && checksum == 1)
-              || (dex_location == "dex_location3" && checksum == 3);
+      [&dex1 = dex1, &dex3 = dex3](const std::string& dex_location, uint32_t checksum) -> bool {
+          return (dex_location == dex1->GetLocation() && checksum == dex1->GetLocationChecksum())
+              || (dex_location == dex3->GetLocation() && checksum == dex3->GetLocationChecksum());
         };
   ASSERT_TRUE(loaded_info.Load(GetFd(profile), true, filter_fn));
 
@@ -941,12 +1012,8 @@
 
   // Dex location 2 and 4 should have been filtered out
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
-    ASSERT_TRUE(nullptr == loaded_info.GetMethod("dex_location2",
-                                                 /* dex_checksum= */ 2,
-                                                 method_idx));
-    ASSERT_TRUE(nullptr == loaded_info.GetMethod("dex_location4",
-                                                 /* dex_checksum= */ 4,
-                                                 method_idx));
+    ASSERT_TRUE(nullptr == GetMethod(loaded_info, dex2, method_idx));
+    ASSERT_TRUE(nullptr == GetMethod(loaded_info, dex4, method_idx));
   }
 
   // Dex location 1 should have all all the inline caches referencing dex location 2 set to
@@ -954,11 +1021,11 @@
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
     // The methods for dex location 1 should be in the profile data.
     std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
-        loaded_info.GetMethod("dex_location1", /* dex_checksum= */ 1, method_idx);
+        GetMethod(loaded_info, dex1, method_idx);
     ASSERT_TRUE(loaded_pmi1 != nullptr);
 
     // Verify the inline cache.
-    // Everything should be as constructed by GetOfflineProfileMethodInfo with the exception
+    // Everything should be as constructed by GetTestInlineCaches with the exception
     // of the inline caches referring types from dex_location2.
     // These should be set to IsMissingType.
     ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
@@ -993,8 +1060,10 @@
     ProfileCompilationInfo::OfflineProfileMethodInfo expected_pmi(ic_map);
 
     // The dex references should not have  dex_location2 in the list.
-    expected_pmi.dex_references.emplace_back("dex_location1", /* checksum= */1, kMaxMethodIds);
-    expected_pmi.dex_references.emplace_back("dex_location3", /* checksum= */3, kMaxMethodIds);
+    expected_pmi.dex_references.emplace_back(
+        dex1->GetLocation(), dex1->GetLocationChecksum(), dex1->NumMethodIds());
+    expected_pmi.dex_references.emplace_back(
+        dex3->GetLocation(), dex3->GetLocationChecksum(), dex3->NumMethodIds());
 
     // Now check that we get back what we expect.
     ASSERT_TRUE(*loaded_pmi1 == expected_pmi);
@@ -1005,15 +1074,15 @@
   ScratchFile profile;
 
   ProfileCompilationInfo saved_info;
-  ProfileCompilationInfo::OfflineProfileMethodInfo pmi = GetOfflineProfileMethodInfo();
+  std::vector<ProfileInlineCache> inline_caches = GetTestInlineCaches();
 
   // Add methods with inline caches.
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
     // Add a method which is part of the same dex file as one of the class from the inline caches.
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info));
-    ASSERT_TRUE(AddMethod("dex_location2", /* checksum= */ 2, method_idx, pmi, &saved_info));
+    ASSERT_TRUE(AddMethod(&saved_info, dex1, method_idx, inline_caches));
+    ASSERT_TRUE(AddMethod(&saved_info, dex2, method_idx, inline_caches));
     // Add a method which is outside the set of dex files.
-    ASSERT_TRUE(AddMethod("dex_location4", /* checksum= */ 4, method_idx, pmi, &saved_info));
+    ASSERT_TRUE(AddMethod(&saved_info, dex4, method_idx, inline_caches));
   }
 
   ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -1036,15 +1105,15 @@
   ScratchFile profile;
 
   ProfileCompilationInfo saved_info;
-  ProfileCompilationInfo::OfflineProfileMethodInfo pmi = GetOfflineProfileMethodInfo();
+  std::vector<ProfileInlineCache> inline_caches = GetTestInlineCaches();
 
   // Add methods with inline caches.
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
     // Add a method which is part of the same dex file as one of the
     // class from the inline caches.
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, method_idx, pmi, &saved_info));
+    ASSERT_TRUE(AddMethod(&saved_info, dex1, method_idx, inline_caches));
     // Add a method which is outside the set of dex files.
-    ASSERT_TRUE(AddMethod("dex_location4", /* checksum= */ 4, method_idx, pmi, &saved_info));
+    ASSERT_TRUE(AddMethod(&saved_info, dex4, method_idx, inline_caches));
   }
 
   ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -1064,15 +1133,15 @@
 
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
     std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi1 =
-        loaded_info.GetMethod("dex_location1", /* dex_checksum= */ 1, method_idx);
+        GetMethod(loaded_info, dex1, method_idx);
     ASSERT_TRUE(loaded_pmi1 != nullptr);
-    ASSERT_TRUE(*loaded_pmi1 == pmi);
+    ASSERT_TRUE(*loaded_pmi1 == inline_caches);
   }
   for (uint16_t method_idx = 0; method_idx < 10; method_idx++) {
     std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> loaded_pmi2 =
-        loaded_info.GetMethod("dex_location4", /* dex_checksum= */ 4, method_idx);
+        GetMethod(loaded_info, dex4, method_idx);
     ASSERT_TRUE(loaded_pmi2 != nullptr);
-    ASSERT_TRUE(*loaded_pmi2 == pmi);
+    ASSERT_TRUE(*loaded_pmi2 == inline_caches);
   }
 }
 
@@ -1085,8 +1154,8 @@
   ProfileCompilationInfo saved_info;
   uint16_t item_count = 1000;
   for (uint16_t i = 0; i < item_count; i++) {
-    ASSERT_TRUE(AddClass("dex_location1", /* checksum= */ 1, dex::TypeIndex(i), &saved_info));
-    ASSERT_TRUE(AddClass("dex_location2", /* checksum= */ 2, dex::TypeIndex(i), &saved_info));
+    ASSERT_TRUE(AddClass(&saved_info, dex1, dex::TypeIndex(i)));
+    ASSERT_TRUE(AddClass(&saved_info, dex2, dex::TypeIndex(i)));
   }
 
   ASSERT_TRUE(saved_info.Save(GetFd(profile)));
@@ -1097,15 +1166,15 @@
   ProfileCompilationInfo loaded_info;
   ASSERT_TRUE(profile.GetFile()->ResetOffset());
   ProfileCompilationInfo::ProfileLoadFilterFn filter_fn =
-      [](const std::string& dex_location, uint32_t checksum) -> bool {
-          return (dex_location == "dex_location2" && checksum == 2);
+      [&dex2 = dex2](const std::string& dex_location, uint32_t checksum) -> bool {
+          return (dex_location == dex2->GetLocation() && checksum == dex2->GetLocationChecksum());
         };
   ASSERT_TRUE(loaded_info.Load(GetFd(profile), true, filter_fn));
 
   // Compute the expectation.
   ProfileCompilationInfo expected_info;
   for (uint16_t i = 0; i < item_count; i++) {
-    ASSERT_TRUE(AddClass("dex_location2", /* checksum= */ 2, dex::TypeIndex(i), &expected_info));
+    ASSERT_TRUE(AddClass(&expected_info, dex2, dex::TypeIndex(i)));
   }
 
   // Validate the expectation.
@@ -1116,7 +1185,7 @@
 TEST_F(ProfileCompilationInfoTest, ClearData) {
   ProfileCompilationInfo info;
   for (uint16_t i = 0; i < 10; i++) {
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &info));
+    ASSERT_TRUE(AddMethod(&info, dex1, /* method_idx= */ i));
   }
   ASSERT_FALSE(IsEmpty(info));
   info.ClearData();
@@ -1126,7 +1195,7 @@
 TEST_F(ProfileCompilationInfoTest, ClearDataAndSave) {
   ProfileCompilationInfo info;
   for (uint16_t i = 0; i < 10; i++) {
-    ASSERT_TRUE(AddMethod("dex_location1", /* checksum= */ 1, /* method_idx= */ i, &info));
+    ASSERT_TRUE(AddMethod(&info, dex1, /* method_idx= */ i));
   }
   info.ClearData();
 
@@ -1141,180 +1210,638 @@
   ASSERT_TRUE(loaded_info.Equals(info));
 }
 
-TEST_F(ProfileCompilationInfoTest, PrepareForAggregationCounters) {
+TEST_F(ProfileCompilationInfoTest, InitProfiles) {
   ProfileCompilationInfo info;
   ASSERT_EQ(
       memcmp(info.GetVersion(),
              ProfileCompilationInfo::kProfileVersion,
              ProfileCompilationInfo::kProfileVersionSize),
       0);
+  ASSERT_FALSE(info.IsForBootImage());
 
-  info.PrepareForAggregationCounters();
+  ProfileCompilationInfo info1(/*for_boot_image=*/ true);
 
   ASSERT_EQ(
-      memcmp(info.GetVersion(),
-             ProfileCompilationInfo::kProfileVersionWithCounters,
+      memcmp(info1.GetVersion(),
+             ProfileCompilationInfo::kProfileVersionForBootImage,
              ProfileCompilationInfo::kProfileVersionSize),
       0);
-  ASSERT_TRUE(info.StoresAggregationCounters());
-  ASSERT_EQ(info.GetAggregationCounter(), 0);
+  ASSERT_TRUE(info1.IsForBootImage());
 }
 
-TEST_F(ProfileCompilationInfoTest, MergeWithAggregationCounters) {
-  ProfileCompilationInfo info1;
-  info1.PrepareForAggregationCounters();
-
-  ProfileCompilationInfo info2;
-  ProfileCompilationInfo info3;
-
-  std::unique_ptr<const DexFile> dex(OpenTestDexFile("ManyMethods"));
-  std::string location = dex->GetLocation();
-  int checksum = dex->GetLocationChecksum();
-
-  AddMethod(location, checksum, /* method_idx= */ 1, &info1);
-
-  AddMethod(location, checksum, /* method_idx= */ 2, &info1);
-  AddMethod(location, checksum, /* method_idx= */ 2, &info2);
-
-  info1.AddMethodIndex(Hotness::kFlagStartup, location, checksum, 3, kMaxMethodIds);
-  info2.AddMethodIndex(Hotness::kFlagPostStartup, location, checksum, 3, kMaxMethodIds);
-  info3.AddMethodIndex(Hotness::kFlagStartup, location, checksum, 3, kMaxMethodIds);
-
-  AddMethod(location, checksum, /* method_idx= */ 6, &info2);
-  AddMethod(location, checksum, /* method_idx= */ 6, &info3);
-
-  AddClass(location, checksum, dex::TypeIndex(10), &info1);
-
-  AddClass(location, checksum, dex::TypeIndex(20), &info1);
-  AddClass(location, checksum, dex::TypeIndex(20), &info2);
-
-  AddClass(location, checksum, dex::TypeIndex(30), &info1);
-  AddClass(location, checksum, dex::TypeIndex(30), &info2);
-  AddClass(location, checksum, dex::TypeIndex(30), &info3);
-
-  ASSERT_EQ(info1.GetAggregationCounter(), 0);
-  info1.MergeWith(info2);
-  ASSERT_EQ(info1.GetAggregationCounter(), 1);
-  info1.MergeWith(info3);
-  ASSERT_EQ(info1.GetAggregationCounter(), 2);
-
-  ASSERT_EQ(0, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 1)));
-  ASSERT_EQ(1, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 2)));
-  ASSERT_EQ(2, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 3)));
-  ASSERT_EQ(1, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 6)));
-
-  ASSERT_EQ(0, info1.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(10))));
-  ASSERT_EQ(1, info1.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(20))));
-  ASSERT_EQ(2, info1.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(30))));
-
-  // Check methods that do not exists.
-  ASSERT_EQ(-1, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 4)));
-  ASSERT_EQ(-1, info1.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(40))));
+TEST_F(ProfileCompilationInfoTest, VersionEquality) {
+  ProfileCompilationInfo info(/*for_boot_image=*/ false);
+  ProfileCompilationInfo info1(/*for_boot_image=*/ true);
+  ASSERT_FALSE(info.Equals(info1));
 }
 
-TEST_F(ProfileCompilationInfoTest, SaveAndLoadAggregationCounters) {
-  ProfileCompilationInfo info1;
-  info1.PrepareForAggregationCounters();
+TEST_F(ProfileCompilationInfoTest, AllMethodFlags) {
+  ProfileCompilationInfo info(/*for_boot_image*/ true);
 
-  ProfileCompilationInfo info2;
-  ProfileCompilationInfo info3;
+  for (uint32_t index = 0; index <= kMaxHotnessFlagBootIndex; index++) {
+    AddMethod(&info, dex1, index, static_cast<Hotness::Flag>(1 << index));
+  }
 
+  auto run_test = [&dex1 = dex1](const ProfileCompilationInfo& info) {
+    for (uint32_t index = 0; index <= kMaxHotnessFlagBootIndex; index++) {
+      EXPECT_TRUE(info.GetMethodHotness(MethodReference(dex1, index)).IsInProfile());
+      EXPECT_TRUE(info.GetMethodHotness(MethodReference(dex1, index))
+          .HasFlagSet(static_cast<Hotness::Flag>(1 << index))) << index << " "
+            << info.GetMethodHotness(MethodReference(dex1, index)).GetFlags();
+    }
+  };
+  run_test(info);
+
+  // Save the profile.
+  ScratchFile profile;
+  ASSERT_TRUE(info.Save(GetFd(profile)));
+  ASSERT_EQ(0, profile.GetFile()->Flush());
+  ASSERT_TRUE(profile.GetFile()->ResetOffset());
+
+  // Load the profile and make sure we can read the data and it matches what we expect.
+  ProfileCompilationInfo loaded_info;
+  ASSERT_TRUE(loaded_info.Load(GetFd(profile)));
+  run_test(loaded_info);
+}
+
+TEST_F(ProfileCompilationInfoTest, AllMethodFlagsOnOneMethod) {
+  ProfileCompilationInfo info(/*for_boot_image*/ true);
+
+  // Set all flags on a single method.
+  for (uint32_t index = 0; index <= kMaxHotnessFlagBootIndex; index++) {
+    AddMethod(&info, dex1, 0, static_cast<Hotness::Flag>(1 << index));
+  }
+
+  auto run_test = [&dex1 = dex1](const ProfileCompilationInfo& info) {
+    for (uint32_t index = 0; index <= kMaxHotnessFlagBootIndex; index++) {
+      EXPECT_TRUE(info.GetMethodHotness(MethodReference(dex1, 0)).IsInProfile());
+      EXPECT_TRUE(info.GetMethodHotness(MethodReference(dex1, 0))
+          .HasFlagSet(static_cast<Hotness::Flag>(1 << index)));
+    }
+  };
+  run_test(info);
+
+  // Save the profile.
+  ScratchFile profile;
+  ASSERT_TRUE(info.Save(GetFd(profile)));
+  ASSERT_EQ(0, profile.GetFile()->Flush());
+  ASSERT_TRUE(profile.GetFile()->ResetOffset());
+
+  // Load the profile and make sure we can read the data and it matches what we expect.
+  ProfileCompilationInfo loaded_info;
+  ASSERT_TRUE(loaded_info.Load(GetFd(profile)));
+  run_test(loaded_info);
+}
+
+
+TEST_F(ProfileCompilationInfoTest, MethodFlagsMerge) {
+  ProfileCompilationInfo info1(/*for_boot_image*/ true);
+  ProfileCompilationInfo info2(/*for_boot_image*/ true);
+
+  // Set a few flags on a 2 different methods in each of the profile.
+  for (uint32_t index = 0; index <= kMaxHotnessFlagBootIndex / 4; index++) {
+    AddMethod(&info1, dex1, 0, static_cast<Hotness::Flag>(1 << index));
+    AddMethod(&info2, dex1, 1, static_cast<Hotness::Flag>(1 << index));
+  }
+
+  // Set a few more flags on the method 1.
+  for (uint32_t index = kMaxHotnessFlagBootIndex / 4 + 1;
+       index <= kMaxHotnessFlagBootIndex / 2;
+       index++) {
+    AddMethod(&info2, dex1, 1, static_cast<Hotness::Flag>(1 << index));
+  }
+
+  ASSERT_TRUE(info1.MergeWith(info2));
+
+  auto run_test = [&dex1 = dex1](const ProfileCompilationInfo& info) {
+    // Assert that the flags were merged correctly for both methods.
+    for (uint32_t index = 0; index <= kMaxHotnessFlagBootIndex / 4; index++) {
+      EXPECT_TRUE(info.GetMethodHotness(MethodReference(dex1, 0)).IsInProfile());
+      EXPECT_TRUE(info.GetMethodHotness(MethodReference(dex1, 0))
+          .HasFlagSet(static_cast<Hotness::Flag>(1 << index)));
+
+      EXPECT_TRUE(info.GetMethodHotness(MethodReference(dex1, 1)).IsInProfile());
+      EXPECT_TRUE(info.GetMethodHotness(MethodReference(dex1, 1))
+          .HasFlagSet(static_cast<Hotness::Flag>(1 << index)));
+    }
+
+    // Assert that no flags were merged unnecessary.
+    for (uint32_t index = kMaxHotnessFlagBootIndex / 4 + 1;
+         index <= kMaxHotnessFlagBootIndex / 2;
+         index++) {
+      EXPECT_TRUE(info.GetMethodHotness(MethodReference(dex1, 0)).IsInProfile());
+      EXPECT_FALSE(info.GetMethodHotness(MethodReference(dex1, 0))
+          .HasFlagSet(static_cast<Hotness::Flag>(1 << index)));
+
+      EXPECT_TRUE(info.GetMethodHotness(MethodReference(dex1, 1)).IsInProfile());
+      EXPECT_TRUE(info.GetMethodHotness(MethodReference(dex1, 1))
+          .HasFlagSet(static_cast<Hotness::Flag>(1 << index)));
+    }
+
+    // Assert that no extra flags were added.
+    for (uint32_t index = kMaxHotnessFlagBootIndex / 2 + 1;
+         index <= kMaxHotnessFlagBootIndex;
+         index++) {
+      EXPECT_FALSE(info.GetMethodHotness(MethodReference(dex1, 0))
+          .HasFlagSet(static_cast<Hotness::Flag>(1 << index)));
+      EXPECT_FALSE(info.GetMethodHotness(MethodReference(dex1, 1))
+          .HasFlagSet(static_cast<Hotness::Flag>(1 << index)));
+    }
+  };
+
+  run_test(info1);
+
+  // Save the profile.
+  ScratchFile profile;
+  ASSERT_TRUE(info1.Save(GetFd(profile)));
+  ASSERT_EQ(0, profile.GetFile()->Flush());
+  ASSERT_TRUE(profile.GetFile()->ResetOffset());
+
+  // Load the profile and make sure we can read the data and it matches what we expect.
+  ProfileCompilationInfo loaded_info;
+  ASSERT_TRUE(loaded_info.Load(GetFd(profile)));
+  run_test(loaded_info);
+}
+
+TEST_F(ProfileCompilationInfoTest, SizeStressTestAllIn) {
+  SizeStressTest(/*random=*/ false);
+}
+
+TEST_F(ProfileCompilationInfoTest, SizeStressTestAllInRandom) {
+  SizeStressTest(/*random=*/ true);
+}
+
+// Verifies that we correctly add methods to the profile according to their flags.
+TEST_F(ProfileCompilationInfoTest, AddMethodsProfileMethodInfoBasic) {
   std::unique_ptr<const DexFile> dex(OpenTestDexFile("ManyMethods"));
-  std::string location = dex->GetLocation();
-  int checksum = dex->GetLocationChecksum();
 
-  AddMethod(location, checksum, /* method_idx= */ 1, &info1);
+  ProfileCompilationInfo info;
 
-  AddMethod(location, checksum, /* method_idx= */ 2, &info1);
-  AddMethod(location, checksum, /* method_idx= */ 2, &info2);
+  MethodReference hot(dex.get(), 0);
+  MethodReference hot_startup(dex.get(), 1);
+  MethodReference startup(dex.get(), 2);
 
-  info1.AddMethodIndex(Hotness::kFlagStartup, location, checksum, 3, kMaxMethodIds);
-  info2.AddMethodIndex(Hotness::kFlagPostStartup, location, checksum, 3, kMaxMethodIds);
-  info3.AddMethodIndex(Hotness::kFlagStartup, location, checksum, 3, kMaxMethodIds);
+  // Add methods
+  ASSERT_TRUE(info.AddMethod(ProfileMethodInfo(hot), Hotness::kFlagHot));
+  ASSERT_TRUE(info.AddMethod(
+      ProfileMethodInfo(hot_startup),
+      static_cast<Hotness::Flag>(Hotness::kFlagHot | Hotness::kFlagStartup)));
+  ASSERT_TRUE(info.AddMethod(ProfileMethodInfo(startup), Hotness::kFlagStartup));
 
-  AddMethod(location, checksum, /* method_idx= */ 6, &info2);
-  AddMethod(location, checksum, /* method_idx= */ 6, &info3);
+  // Verify the profile recorded them correctly.
+  EXPECT_TRUE(info.GetMethodHotness(hot).IsInProfile());
+  EXPECT_EQ(info.GetMethodHotness(hot).GetFlags(), Hotness::kFlagHot);
 
-  AddClass(location, checksum, dex::TypeIndex(10), &info1);
+  EXPECT_TRUE(info.GetMethodHotness(hot_startup).IsInProfile());
+  EXPECT_EQ(info.GetMethodHotness(hot_startup).GetFlags(),
+            static_cast<uint32_t>(Hotness::kFlagHot | Hotness::kFlagStartup));
 
-  AddClass(location, checksum, dex::TypeIndex(20), &info1);
-  AddClass(location, checksum, dex::TypeIndex(20), &info2);
+  EXPECT_TRUE(info.GetMethodHotness(startup).IsInProfile());
+  EXPECT_EQ(info.GetMethodHotness(startup).GetFlags(), Hotness::kFlagStartup);
+}
 
-  AddClass(location, checksum, dex::TypeIndex(30), &info1);
-  AddClass(location, checksum, dex::TypeIndex(30), &info2);
-  AddClass(location, checksum, dex::TypeIndex(30), &info3);
+// Verifies that we correctly add inline caches to the profile only for hot methods.
+TEST_F(ProfileCompilationInfoTest, AddMethodsProfileMethodInfoInlineCaches) {
+  ProfileCompilationInfo info;
+  MethodReference hot(dex1, 0);
+  MethodReference startup(dex1, 2);
 
-  info1.MergeWith(info2);
-  info1.MergeWith(info3);
+  // Add inline caches with the methods. The profile should record only the one for the hot method.
+  std::vector<TypeReference> types = {};
+  ProfileMethodInfo::ProfileInlineCache ic(/*dex_pc*/ 0, /*missing_types*/true, types);
+  std::vector<ProfileMethodInfo::ProfileInlineCache> inline_caches = {ic};
+  info.AddMethod(ProfileMethodInfo(hot, inline_caches), Hotness::kFlagHot);
+  info.AddMethod(ProfileMethodInfo(startup, inline_caches), Hotness::kFlagStartup);
+
+  // Check the hot method's inline cache.
+  std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> hot_pmi =
+      GetMethod(info, dex1, hot.index);
+  ASSERT_TRUE(hot_pmi != nullptr);
+  ASSERT_EQ(hot_pmi->inline_caches->size(), 1u);
+  ASSERT_TRUE(hot_pmi->inline_caches->Get(0).is_missing_types);
+
+  // Check there's no inline caches for the startup method.
+  ASSERT_TRUE(GetMethod(info, dex1, startup.index) == nullptr);
+}
+
+// Verifies that we correctly add methods to the profile according to their flags.
+TEST_F(ProfileCompilationInfoTest, AddMethodsProfileMethodInfoFail) {
+  ProfileCompilationInfo info;
+
+  MethodReference hot(dex1, 0);
+  MethodReference bad_ref(dex1, kMaxMethodIds);
+
+  std::vector<ProfileMethodInfo> pmis = {ProfileMethodInfo(hot), ProfileMethodInfo(bad_ref)};
+  ASSERT_FALSE(info.AddMethods(pmis, Hotness::kFlagHot));
+}
+
+// Verify that we can add methods with annotations.
+TEST_F(ProfileCompilationInfoTest, AddAnnotationsToMethods) {
+  ProfileCompilationInfo info;
+
+  ProfileSampleAnnotation psa1("test1");
+  ProfileSampleAnnotation psa2("test2");
+  // Save a few methods using different annotations, some overlapping, some not.
+  for (uint16_t i = 0; i < 10; i++) {
+    ASSERT_TRUE(AddMethod(&info, dex1, /* method_idx= */ i, Hotness::kFlagHot, psa1));
+  }
+  for (uint16_t i = 5; i < 15; i++) {
+    ASSERT_TRUE(AddMethod(&info, dex1, /* method_idx= */ i, Hotness::kFlagHot, psa2));
+  }
+
+  auto run_test = [&dex1 = dex1, &psa1 = psa1, &psa2 = psa2](const ProfileCompilationInfo& info) {
+    // Check that all methods are in.
+    for (uint16_t i = 0; i < 10; i++) {
+      EXPECT_TRUE(info.GetMethodHotness(MethodReference(dex1, i), psa1).IsInProfile());
+      EXPECT_TRUE(info.GetHotMethodInfo(MethodReference(dex1, i), psa1) != nullptr);
+    }
+    for (uint16_t i = 5; i < 15; i++) {
+      EXPECT_TRUE(info.GetMethodHotness(MethodReference(dex1, i), psa2).IsInProfile());
+      EXPECT_TRUE(info.GetHotMethodInfo(MethodReference(dex1, i), psa2) != nullptr);
+    }
+    // Check that the non-overlapping methods are not added with a wrong annotation.
+    for (uint16_t i = 10; i < 15; i++) {
+      EXPECT_FALSE(info.GetMethodHotness(MethodReference(dex1, i), psa1).IsInProfile());
+      EXPECT_FALSE(info.GetHotMethodInfo(MethodReference(dex1, i), psa1) != nullptr);
+    }
+    for (uint16_t i = 0; i < 5; i++) {
+      EXPECT_FALSE(info.GetMethodHotness(MethodReference(dex1, i), psa2).IsInProfile());
+      EXPECT_FALSE(info.GetHotMethodInfo(MethodReference(dex1, i), psa2) != nullptr);
+    }
+    // Check that when querying without an annotation only the first one is searched.
+    for (uint16_t i = 0; i < 10; i++) {
+      EXPECT_TRUE(info.GetMethodHotness(MethodReference(dex1, i)).IsInProfile());
+      EXPECT_TRUE(info.GetHotMethodInfo(MethodReference(dex1, i)) != nullptr);
+    }
+    // ... this should be false because they belong the second appearance of dex1.
+    for (uint16_t i = 10; i < 15; i++) {
+      EXPECT_FALSE(info.GetMethodHotness(MethodReference(dex1, i)).IsInProfile());
+      EXPECT_FALSE(info.GetHotMethodInfo(MethodReference(dex1, i)) != nullptr);
+    }
+
+    // Sanity check that methods cannot be found with a non existing annotation.
+    MethodReference ref(dex1, 0);
+    ProfileSampleAnnotation not_exisiting("A");
+    EXPECT_FALSE(info.GetMethodHotness(ref, not_exisiting).IsInProfile());
+    EXPECT_FALSE(info.GetHotMethodInfo(ref, not_exisiting) != nullptr);
+  };
+
+  // Run the test before save.
+  run_test(info);
 
   ScratchFile profile;
-
-  ASSERT_TRUE(info1.Save(GetFd(profile)));
+  ASSERT_TRUE(info.Save(GetFd(profile)));
   ASSERT_EQ(0, profile.GetFile()->Flush());
 
   // Check that we get back what we saved.
   ProfileCompilationInfo loaded_info;
-  loaded_info.PrepareForAggregationCounters();
   ASSERT_TRUE(profile.GetFile()->ResetOffset());
   ASSERT_TRUE(loaded_info.Load(GetFd(profile)));
-  ASSERT_TRUE(loaded_info.Equals(info1));
+  ASSERT_TRUE(loaded_info.Equals(info));
 
-  ASSERT_EQ(2, loaded_info.GetAggregationCounter());
-
-  ASSERT_EQ(0, loaded_info.GetMethodAggregationCounter(MethodReference(dex.get(), 1)));
-  ASSERT_EQ(1, loaded_info.GetMethodAggregationCounter(MethodReference(dex.get(), 2)));
-  ASSERT_EQ(2, loaded_info.GetMethodAggregationCounter(MethodReference(dex.get(), 3)));
-  ASSERT_EQ(1, loaded_info.GetMethodAggregationCounter(MethodReference(dex.get(), 6)));
-
-  ASSERT_EQ(0, loaded_info.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(10))));
-  ASSERT_EQ(1, loaded_info.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(20))));
-  ASSERT_EQ(2, loaded_info.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(30))));
+  // Run the test after save and load.
+  run_test(loaded_info);
 }
 
-TEST_F(ProfileCompilationInfoTest, MergeTwoWithAggregationCounters) {
-  ProfileCompilationInfo info1;
-  info1.PrepareForAggregationCounters();
+// Verify that we can add classes with annotations.
+TEST_F(ProfileCompilationInfoTest, AddAnnotationsToClasses) {
+  ProfileCompilationInfo info;
 
-  ProfileCompilationInfo info2;
+  ProfileSampleAnnotation psa1("test1");
+  ProfileSampleAnnotation psa2("test2");
+  // Save a few classes using different annotations, some overlapping, some not.
+  for (uint16_t i = 0; i < 10; i++) {
+    ASSERT_TRUE(AddClass(&info, dex1, dex::TypeIndex(i), psa1));
+  }
+  for (uint16_t i = 5; i < 15; i++) {
+    ASSERT_TRUE(AddClass(&info, dex1, dex::TypeIndex(i), psa2));
+  }
 
-  std::unique_ptr<const DexFile> dex(OpenTestDexFile("ManyMethods"));
-  std::string location = dex->GetLocation();
-  int checksum = dex->GetLocationChecksum();
+  auto run_test = [&dex1 = dex1, &psa1 = psa1, &psa2 = psa2](const ProfileCompilationInfo& info) {
+    // Check that all classes are in.
+    for (uint16_t i = 0; i < 10; i++) {
+      EXPECT_TRUE(info.ContainsClass(*dex1, dex::TypeIndex(i), psa1));
+    }
+    for (uint16_t i = 5; i < 15; i++) {
+      EXPECT_TRUE(info.ContainsClass(*dex1, dex::TypeIndex(i), psa2));
+    }
+    // Check that the non-overlapping classes are not added with a wrong annotation.
+    for (uint16_t i = 10; i < 15; i++) {
+      EXPECT_FALSE(info.ContainsClass(*dex1, dex::TypeIndex(i), psa1));
+    }
+    for (uint16_t i = 0; i < 5; i++) {
+      EXPECT_FALSE(info.ContainsClass(*dex1, dex::TypeIndex(i), psa2));
+    }
+    // Check that when querying without an annotation only the first one is searched.
+    for (uint16_t i = 0; i < 10; i++) {
+      EXPECT_TRUE(info.ContainsClass(*dex1, dex::TypeIndex(i)));
+    }
+    // ... this should be false because they belong the second appearance of dex1.
+    for (uint16_t i = 10; i < 15; i++) {
+      EXPECT_FALSE(info.ContainsClass(*dex1, dex::TypeIndex(i)));
+    }
 
-  AddMethod(location, checksum, /* method_idx= */ 1, &info1);
+    // Sanity check that classes cannot be found with a non existing annotation.
+    EXPECT_FALSE(info.ContainsClass(*dex1, dex::TypeIndex(0), ProfileSampleAnnotation("new_test")));
+  };
 
-  AddMethod(location, checksum, /* method_idx= */ 2, &info1);
-  AddMethod(location, checksum, /* method_idx= */ 2, &info2);
+  // Run the test before save.
+  run_test(info);
 
-  AddClass(location, checksum, dex::TypeIndex(20), &info1);
-
-  AddClass(location, checksum, dex::TypeIndex(10), &info1);
-  AddClass(location, checksum, dex::TypeIndex(10), &info2);
-
-  info1.MergeWith(info2);
-  info1.MergeWith(info2);
-  ASSERT_EQ(2, info1.GetAggregationCounter());
-
-  // Save and load the profile to create a copy of the data
   ScratchFile profile;
-  info1.Save(GetFd(profile));
+  ASSERT_TRUE(info.Save(GetFd(profile)));
   ASSERT_EQ(0, profile.GetFile()->Flush());
 
+  // Check that we get back what we saved.
   ProfileCompilationInfo loaded_info;
-  loaded_info.PrepareForAggregationCounters();
-  profile.GetFile()->ResetOffset();
-  loaded_info.Load(GetFd(profile));
+  ASSERT_TRUE(profile.GetFile()->ResetOffset());
+  ASSERT_TRUE(loaded_info.Load(GetFd(profile)));
+  ASSERT_TRUE(loaded_info.Equals(info));
 
-  // Merge the data
-  info1.MergeWith(loaded_info);
+  // Run the test after save and load.
+  run_test(loaded_info);
+}
 
-  ASSERT_EQ(4, info1.GetAggregationCounter());
+// Verify we can merge samples with annotations.
+TEST_F(ProfileCompilationInfoTest, MergeWithAnnotations) {
+  ProfileCompilationInfo info1;
+  ProfileCompilationInfo info2;
 
-  ASSERT_EQ(0, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 1)));
-  ASSERT_EQ(4, info1.GetMethodAggregationCounter(MethodReference(dex.get(), 2)));
+  ProfileSampleAnnotation psa1("test1");
+  ProfileSampleAnnotation psa2("test2");
 
-  ASSERT_EQ(4, info1.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(10))));
-  ASSERT_EQ(0, info1.GetClassAggregationCounter(TypeReference(dex.get(), dex::TypeIndex(20))));
+  for (uint16_t i = 0; i < 10; i++) {
+    ASSERT_TRUE(AddMethod(&info1, dex1, /* method_idx= */ i, Hotness::kFlagHot, psa1));
+    ASSERT_TRUE(AddClass(&info1, dex1, dex::TypeIndex(i), psa1));
+  }
+  for (uint16_t i = 5; i < 15; i++) {
+    ASSERT_TRUE(AddMethod(&info2, dex1, /* method_idx= */ i, Hotness::kFlagHot, psa1));
+    ASSERT_TRUE(AddMethod(&info2, dex1, /* method_idx= */ i, Hotness::kFlagHot, psa2));
+    ASSERT_TRUE(AddMethod(&info2, dex2, /* method_idx= */ i, Hotness::kFlagHot, psa2));
+    ASSERT_TRUE(AddClass(&info2, dex1, dex::TypeIndex(i), psa1));
+    ASSERT_TRUE(AddClass(&info2, dex1, dex::TypeIndex(i), psa2));
+  }
+
+  ProfileCompilationInfo info;
+  ASSERT_TRUE(info.MergeWith(info1));
+  ASSERT_TRUE(info.MergeWith(info2));
+
+  // Check that all items are in.
+  for (uint16_t i = 0; i < 15; i++) {
+    EXPECT_TRUE(info.GetMethodHotness(MethodReference(dex1, i), psa1).IsInProfile());
+    EXPECT_TRUE(info.ContainsClass(*dex1, dex::TypeIndex(i), psa1));
+  }
+  for (uint16_t i = 5; i < 15; i++) {
+    EXPECT_TRUE(info.GetMethodHotness(MethodReference(dex1, i), psa2).IsInProfile());
+    EXPECT_TRUE(info.GetMethodHotness(MethodReference(dex2, i), psa2).IsInProfile());
+    EXPECT_TRUE(info.ContainsClass(*dex1, dex::TypeIndex(i), psa2));
+  }
+
+  // Check that the non-overlapping items are not added with a wrong annotation.
+  for (uint16_t i = 0; i < 5; i++) {
+    EXPECT_FALSE(info.GetMethodHotness(MethodReference(dex1, i), psa2).IsInProfile());
+    EXPECT_FALSE(info.GetMethodHotness(MethodReference(dex2, i), psa2).IsInProfile());
+    EXPECT_FALSE(info.ContainsClass(*dex1, dex::TypeIndex(i), psa2));
+  }
+}
+
+// Verify the bulk extraction API.
+TEST_F(ProfileCompilationInfoTest, ExtractInfoWithAnnations) {
+  ProfileCompilationInfo info;
+
+  ProfileSampleAnnotation psa1("test1");
+  ProfileSampleAnnotation psa2("test2");
+
+  std::set<dex::TypeIndex> expected_classes;
+  std::set<uint16_t> expected_hot_methods;
+  std::set<uint16_t> expected_startup_methods;
+  std::set<uint16_t> expected_post_startup_methods;
+
+  for (uint16_t i = 0; i < 10; i++) {
+    ASSERT_TRUE(AddMethod(&info, dex1, /* method_idx= */ i, Hotness::kFlagHot, psa1));
+    ASSERT_TRUE(AddClass(&info, dex1, dex::TypeIndex(i), psa1));
+    expected_hot_methods.insert(i);
+    expected_classes.insert(dex::TypeIndex(i));
+  }
+  for (uint16_t i = 5; i < 15; i++) {
+    ASSERT_TRUE(AddMethod(&info, dex1, /* method_idx= */ i, Hotness::kFlagHot, psa2));
+    ASSERT_TRUE(AddMethod(&info, dex1, /* method_idx= */ i, Hotness::kFlagStartup, psa1));
+    expected_startup_methods.insert(i);
+  }
+
+  std::set<dex::TypeIndex> classes;
+  std::set<uint16_t> hot_methods;
+  std::set<uint16_t> startup_methods;
+  std::set<uint16_t> post_startup_methods;
+
+  EXPECT_TRUE(info.GetClassesAndMethods(
+      *dex1, &classes, &hot_methods, &startup_methods, &post_startup_methods, psa1));
+  EXPECT_EQ(expected_classes, classes);
+  EXPECT_EQ(expected_hot_methods, hot_methods);
+  EXPECT_EQ(expected_startup_methods, startup_methods);
+  EXPECT_EQ(expected_post_startup_methods, post_startup_methods);
+
+  EXPECT_FALSE(info.GetClassesAndMethods(
+      *dex1,
+      &classes,
+      &hot_methods,
+      &startup_methods,
+      &post_startup_methods,
+      ProfileSampleAnnotation("new_test")));
+}
+
+// Verify the behavior for adding methods with annotations and different dex checksums.
+TEST_F(ProfileCompilationInfoTest, AddMethodsWithAnnotationAndDifferentChecksum) {
+  ProfileCompilationInfo info;
+
+  ProfileSampleAnnotation psa1("test1");
+  ProfileSampleAnnotation psa2("test2");
+
+  MethodReference ref(dex1, 0);
+  MethodReference ref_checksum_missmatch(dex1_checksum_missmatch, 1);
+
+  ASSERT_TRUE(info.AddMethod(ProfileMethodInfo(ref), Hotness::kFlagHot, psa1));
+  // Adding a method with a different dex checksum and the same annotation should fail.
+  ASSERT_FALSE(info.AddMethod(ProfileMethodInfo(ref_checksum_missmatch), Hotness::kFlagHot, psa1));
+  // However, a method with a different dex checksum and a different annotation should be ok.
+  ASSERT_TRUE(info.AddMethod(ProfileMethodInfo(ref_checksum_missmatch), Hotness::kFlagHot, psa2));
+}
+
+// Verify the behavior for searching method with annotations and different dex checksums.
+TEST_F(ProfileCompilationInfoTest, FindMethodsWithAnnotationAndDifferentChecksum) {
+  ProfileCompilationInfo info;
+
+  ProfileSampleAnnotation psa1("test1");
+
+  MethodReference ref(dex1, 0);
+  MethodReference ref_checksum_missmatch(dex1_checksum_missmatch, 0);
+
+  ASSERT_TRUE(info.AddMethod(ProfileMethodInfo(ref), Hotness::kFlagHot, psa1));
+
+  // The method should be in the profile when searched with the correct data.
+  EXPECT_TRUE(info.GetMethodHotness(ref, psa1).IsInProfile());
+  // We should get a negative result if the dex checksum  does not match.
+  EXPECT_FALSE(info.GetMethodHotness(ref_checksum_missmatch, psa1).IsInProfile());
+
+  // If we search without annotation we should have the same behaviour.
+  EXPECT_TRUE(info.GetMethodHotness(ref).IsInProfile());
+  EXPECT_FALSE(info.GetMethodHotness(ref_checksum_missmatch).IsInProfile());
+}
+
+TEST_F(ProfileCompilationInfoTest, ClearDataAndAdjustVersionRegularToBoot) {
+  ProfileCompilationInfo info;
+
+  AddMethod(&info, dex1, /* method_idx= */ 0, Hotness::kFlagHot);
+
+  info.ClearDataAndAdjustVersion(/*for_boot_image=*/true);
+  ASSERT_TRUE(info.IsEmpty());
+  ASSERT_TRUE(info.IsForBootImage());
+}
+
+TEST_F(ProfileCompilationInfoTest, ClearDataAndAdjustVersionBootToRegular) {
+  ProfileCompilationInfo info(/*for_boot_image=*/true);
+
+  AddMethod(&info, dex1, /* method_idx= */ 0, Hotness::kFlagHot);
+
+  info.ClearDataAndAdjustVersion(/*for_boot_image=*/false);
+  ASSERT_TRUE(info.IsEmpty());
+  ASSERT_FALSE(info.IsForBootImage());
+}
+
+template<class T>
+static std::list<T> sort(const std::list<T>& list) {
+  std::list<T> copy(list);
+  copy.sort();
+  return copy;
+}
+
+// Verify we can extract profile data
+TEST_F(ProfileCompilationInfoTest, ExtractProfileData) {
+  // Setup test data
+  ProfileCompilationInfo info;
+
+  ProfileSampleAnnotation psa1("test1");
+  ProfileSampleAnnotation psa2("test2");
+
+  for (uint16_t i = 0; i < 10; i++) {
+    // Add dex1 data with different annotations so that we can check the annotation count.
+    ASSERT_TRUE(AddMethod(&info, dex1, /* method_idx= */ i, Hotness::kFlagHot, psa1));
+    ASSERT_TRUE(AddClass(&info, dex1, dex::TypeIndex(i), psa1));
+    ASSERT_TRUE(AddMethod(&info, dex1, /* method_idx= */ i, Hotness::kFlagStartup, psa2));
+    ASSERT_TRUE(AddClass(&info, dex1, dex::TypeIndex(i), psa2));
+    ASSERT_TRUE(AddMethod(&info, dex2, /* method_idx= */ i, Hotness::kFlagHot, psa2));
+    // dex3 will not be used in the data extraction
+    ASSERT_TRUE(AddMethod(&info, dex3, /* method_idx= */ i, Hotness::kFlagHot, psa2));
+  }
+
+  std::vector<std::unique_ptr<const DexFile>> dex_files;
+  dex_files.push_back(std::unique_ptr<const DexFile>(dex1));
+  dex_files.push_back(std::unique_ptr<const DexFile>(dex2));
+
+  // Run the test: extract the data for dex1 and dex2
+  std::unique_ptr<FlattenProfileData> flattenProfileData = info.ExtractProfileData(dex_files);
+
+  // Check the results
+  ASSERT_TRUE(flattenProfileData != nullptr);
+  ASSERT_EQ(flattenProfileData->GetMaxAggregationForMethods(), 2u);
+  ASSERT_EQ(flattenProfileData->GetMaxAggregationForClasses(), 2u);
+
+  const SafeMap<MethodReference, ItemMetadata>& methods = flattenProfileData->GetMethodData();
+  const SafeMap<TypeReference, ItemMetadata>& classes = flattenProfileData->GetClassData();
+  ASSERT_EQ(methods.size(), 20u);  // 10 methods in dex1, 10 in dex2
+  ASSERT_EQ(classes.size(), 10u);  // 10 methods in dex1
+
+  std::list<ProfileSampleAnnotation> expectedAnnotations1({psa1, psa2});
+  std::list<ProfileSampleAnnotation> expectedAnnotations2({psa2});
+  for (uint16_t i = 0; i < 10; i++) {
+    // Check dex1 methods.
+    auto mIt1 = methods.find(MethodReference(dex1, i));
+    ASSERT_TRUE(mIt1 != methods.end());
+    ASSERT_EQ(mIt1->second.GetFlags(), Hotness::kFlagHot | Hotness::kFlagStartup);
+    ASSERT_EQ(sort(mIt1->second.GetAnnotations()), expectedAnnotations1);
+    // Check dex1 classes
+    auto cIt1 = classes.find(TypeReference(dex1, dex::TypeIndex(i)));
+    ASSERT_TRUE(cIt1 != classes.end());
+    ASSERT_EQ(cIt1->second.GetFlags(), 0);
+    ASSERT_EQ(sort(cIt1->second.GetAnnotations()), expectedAnnotations1);
+    // Check dex2 methods.
+    auto mIt2 = methods.find(MethodReference(dex2, i));
+    ASSERT_TRUE(mIt2 != methods.end());
+    ASSERT_EQ(mIt2->second.GetFlags(), Hotness::kFlagHot);
+    ASSERT_EQ(sort(mIt2->second.GetAnnotations()), expectedAnnotations2);
+  }
+
+  // Release the ownership as this is held by the test class;
+  for (std::unique_ptr<const DexFile>& dex : dex_files) {
+    UNUSED(dex.release());
+  }
+}
+
+// Verify we can merge 2 previously flatten data.
+TEST_F(ProfileCompilationInfoTest, MergeFlattenData) {
+  // Setup test data: two profiles with different content which will be used
+  // to extract FlattenProfileData, later to be merged.
+  ProfileCompilationInfo info1;
+  ProfileCompilationInfo info2;
+
+  ProfileSampleAnnotation psa1("test1");
+  ProfileSampleAnnotation psa2("test2");
+
+  for (uint16_t i = 0; i < 10; i++) {
+    // Add dex1 data with different annotations so that we can check the annotation count.
+    ASSERT_TRUE(AddMethod(&info1, dex1, /* method_idx= */ i, Hotness::kFlagHot, psa1));
+    ASSERT_TRUE(AddClass(&info2, dex1, dex::TypeIndex(i), psa1));
+    ASSERT_TRUE(AddMethod(&info1, dex1, /* method_idx= */ i, Hotness::kFlagStartup, psa2));
+    ASSERT_TRUE(AddClass(&info1, dex1, dex::TypeIndex(i), psa2));
+    ASSERT_TRUE(AddMethod(i % 2 == 0 ? &info1 : &info2, dex2,
+                          /* method_idx= */ i,
+                          Hotness::kFlagHot,
+                          psa2));
+  }
+
+  std::vector<std::unique_ptr<const DexFile>> dex_files;
+  dex_files.push_back(std::unique_ptr<const DexFile>(dex1));
+  dex_files.push_back(std::unique_ptr<const DexFile>(dex2));
+
+  // Run the test: extract the data for dex1 and dex2 and then merge it into
+  std::unique_ptr<FlattenProfileData> flattenProfileData1 = info1.ExtractProfileData(dex_files);
+  std::unique_ptr<FlattenProfileData> flattenProfileData2 = info2.ExtractProfileData(dex_files);
+
+  flattenProfileData1->MergeData(*flattenProfileData2);
+  // Check the results
+  ASSERT_EQ(flattenProfileData1->GetMaxAggregationForMethods(), 2u);
+  ASSERT_EQ(flattenProfileData1->GetMaxAggregationForClasses(), 2u);
+
+  const SafeMap<MethodReference, ItemMetadata>& methods = flattenProfileData1->GetMethodData();
+  const SafeMap<TypeReference, ItemMetadata>& classes = flattenProfileData1->GetClassData();
+  ASSERT_EQ(methods.size(), 20u);  // 10 methods in dex1, 10 in dex2
+  ASSERT_EQ(classes.size(), 10u);  // 10 methods in dex1
+
+  std::list<ProfileSampleAnnotation> expectedAnnotations1({psa1, psa2});
+  std::list<ProfileSampleAnnotation> expectedAnnotations2({psa2});
+  for (uint16_t i = 0; i < 10; i++) {
+    // Check dex1 methods.
+    auto mIt1 = methods.find(MethodReference(dex1, i));
+    ASSERT_TRUE(mIt1 != methods.end());
+    ASSERT_EQ(mIt1->second.GetFlags(), Hotness::kFlagHot | Hotness::kFlagStartup);
+    ASSERT_EQ(sort(mIt1->second.GetAnnotations()), expectedAnnotations1);
+    // Check dex1 classes
+    auto cIt1 = classes.find(TypeReference(dex1, dex::TypeIndex(i)));
+    ASSERT_TRUE(cIt1 != classes.end());
+    ASSERT_EQ(cIt1->second.GetFlags(), 0);
+    ASSERT_EQ(sort(cIt1->second.GetAnnotations()).size(), expectedAnnotations1.size());
+    ASSERT_EQ(sort(cIt1->second.GetAnnotations()), expectedAnnotations1);
+    // Check dex2 methods.
+    auto mIt2 = methods.find(MethodReference(dex2, i));
+    ASSERT_TRUE(mIt2 != methods.end());
+    ASSERT_EQ(mIt2->second.GetFlags(), Hotness::kFlagHot);
+    ASSERT_EQ(sort(mIt2->second.GetAnnotations()), expectedAnnotations2);
+  }
+
+  // Release the ownership as this is held by the test class;
+  for (std::unique_ptr<const DexFile>& dex : dex_files) {
+    UNUSED(dex.release());
+  }
 }
 
 }  // namespace art
diff --git a/libprofile/profile/profile_helpers.h b/libprofile/profile/profile_helpers.h
new file mode 100644
index 0000000..95e829b
--- /dev/null
+++ b/libprofile/profile/profile_helpers.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBPROFILE_PROFILE_PROFILE_HELPERS_H_
+#define ART_LIBPROFILE_PROFILE_PROFILE_HELPERS_H_
+
+#include <unistd.h>
+
+#include <vector>
+
+#include "base/globals.h"
+
+namespace art {
+
+// Returns true if all the bytes were successfully written to the file descriptor.
+inline bool WriteBuffer(int fd, const uint8_t* buffer, size_t byte_count) {
+  while (byte_count > 0) {
+    int bytes_written = TEMP_FAILURE_RETRY(write(fd, buffer, byte_count));
+    if (bytes_written == -1) {
+      return false;
+    }
+    byte_count -= bytes_written;  // Reduce the number of remaining bytes.
+    buffer += bytes_written;  // Move the buffer forward.
+  }
+  return true;
+}
+
+// Add the string bytes to the buffer.
+inline void AddStringToBuffer(std::vector<uint8_t>* buffer, const std::string& value) {
+  buffer->insert(buffer->end(), value.begin(), value.end());
+}
+
+// Insert each byte, from low to high into the buffer.
+template <typename T>
+inline void AddUintToBuffer(std::vector<uint8_t>* buffer, T value) {
+  for (size_t i = 0; i < sizeof(T); i++) {
+    buffer->push_back((value >> (i * kBitsPerByte)) & 0xff);
+  }
+}
+
+}  // namespace art
+
+#endif  // ART_LIBPROFILE_PROFILE_PROFILE_HELPERS_H_
diff --git a/oatdump/Android.bp b/oatdump/Android.bp
index 64d49b4..e36d9d7 100644
--- a/oatdump/Android.bp
+++ b/oatdump/Android.bp
@@ -37,6 +37,10 @@
         "libprofile",
         "libbase",
     ],
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
 }
 
 art_cc_binary {
@@ -55,6 +59,9 @@
         "libprofiled",
         "libbase",
     ],
+    apex_available: [
+        "com.android.art.debug",
+    ],
 }
 
 cc_defaults {
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 6fa4e9d..8d33970 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -110,10 +110,6 @@
 const char* image_roots_descriptions_[] = {
   "kDexCaches",
   "kClassRoots",
-  "kOomeWhenThrowingException",
-  "kOomeWhenThrowingOome",
-  "kOomeWhenHandlingStackOverflow",
-  "kNoClassDefFoundError",
   "kSpecialRoots",
 };
 
@@ -199,7 +195,8 @@
         info.code_size = 0;  /* The symbol lasts until the next symbol. */        \
         method_debug_infos_.push_back(std::move(info));                           \
       }
-    DO_TRAMPOLINE(JniDlsymLookup);
+    DO_TRAMPOLINE(JniDlsymLookupTrampoline);
+    DO_TRAMPOLINE(JniDlsymLookupCriticalTrampoline);
     DO_TRAMPOLINE(QuickGenericJniTrampoline);
     DO_TRAMPOLINE(QuickImtConflictTrampoline);
     DO_TRAMPOLINE(QuickResolutionTrampoline);
@@ -449,8 +446,10 @@
     os << StringPrintf("\n\n");
 
     DUMP_OAT_HEADER_OFFSET("EXECUTABLE", GetExecutableOffset);
-    DUMP_OAT_HEADER_OFFSET("JNI DLSYM LOOKUP",
-                           GetJniDlsymLookupOffset);
+    DUMP_OAT_HEADER_OFFSET("JNI DLSYM LOOKUP TRAMPOLINE",
+                           GetJniDlsymLookupTrampolineOffset);
+    DUMP_OAT_HEADER_OFFSET("JNI DLSYM LOOKUP CRITICAL TRAMPOLINE",
+                           GetJniDlsymLookupCriticalTrampolineOffset);
     DUMP_OAT_HEADER_OFFSET("QUICK GENERIC JNI TRAMPOLINE",
                            GetQuickGenericJniTrampolineOffset);
     DUMP_OAT_HEADER_OFFSET("QUICK IMT CONFLICT TRAMPOLINE",
@@ -1893,8 +1892,6 @@
                                oat_location,
                                /*executable=*/ false,
                                /*low_4gb=*/ false,
-                               /*abs_dex_location=*/ nullptr,
-                               /*reservation=*/ nullptr,
                                &error_msg);
     }
     if (oat_file == nullptr) {
@@ -1972,7 +1969,7 @@
       stats_.file_bytes = file->GetLength();
       // If the image is compressed, adjust to decompressed size.
       size_t uncompressed_size = image_header_.GetImageSize() - sizeof(ImageHeader);
-      if (image_header_.HasCompressedBlock()) {
+      if (!image_header_.HasCompressedBlock()) {
         DCHECK_EQ(uncompressed_size, data_size) << "Sizes should match for uncompressed image";
       }
       stats_.file_bytes += uncompressed_size - data_size;
@@ -2138,7 +2135,12 @@
   const void* GetQuickOatCodeBegin(ArtMethod* m) REQUIRES_SHARED(Locks::mutator_lock_) {
     const void* quick_code = m->GetEntryPointFromQuickCompiledCodePtrSize(
         image_header_.GetPointerSize());
-    if (Runtime::Current()->GetClassLinker()->IsQuickResolutionStub(quick_code)) {
+    ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+    if (class_linker->IsQuickResolutionStub(quick_code) ||
+        class_linker->IsQuickToInterpreterBridge(quick_code) ||
+        class_linker->IsQuickGenericJniStub(quick_code) ||
+        class_linker->IsJniDlsymLookupStub(quick_code) ||
+        class_linker->IsJniDlsymLookupCriticalStub(quick_code)) {
       quick_code = oat_dumper_->GetQuickOatCode(m);
     }
     if (oat_dumper_->GetInstructionSet() == InstructionSet::kThumb2) {
@@ -2153,7 +2155,9 @@
     if (oat_code_begin == nullptr) {
       return 0;
     }
-    return oat_code_begin[-1];
+    OatQuickMethodHeader* method_header = reinterpret_cast<OatQuickMethodHeader*>(
+        reinterpret_cast<uintptr_t>(oat_code_begin) - sizeof(OatQuickMethodHeader));
+    return method_header->GetCodeSize();
   }
 
   const void* GetQuickOatCodeEnd(ArtMethod* m)
@@ -2351,12 +2355,9 @@
   void DumpMethod(ArtMethod* method, std::ostream& indent_os)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(method != nullptr);
-    const void* quick_oat_code_begin = GetQuickOatCodeBegin(method);
-    const void* quick_oat_code_end = GetQuickOatCodeEnd(method);
     const PointerSize pointer_size = image_header_.GetPointerSize();
-    OatQuickMethodHeader* method_header = reinterpret_cast<OatQuickMethodHeader*>(
-        reinterpret_cast<uintptr_t>(quick_oat_code_begin) - sizeof(OatQuickMethodHeader));
     if (method->IsNative()) {
+      const void* quick_oat_code_begin = GetQuickOatCodeBegin(method);
       bool first_occurrence;
       uint32_t quick_oat_code_size = GetQuickOatCodeSize(method);
       ComputeOatSize(quick_oat_code_begin, &first_occurrence);
@@ -2383,11 +2384,16 @@
       size_t dex_instruction_bytes = code_item_accessor.InsnsSizeInCodeUnits() * 2;
       stats_.dex_instruction_bytes += dex_instruction_bytes;
 
+      const void* quick_oat_code_begin = GetQuickOatCodeBegin(method);
+      const void* quick_oat_code_end = GetQuickOatCodeEnd(method);
+
       bool first_occurrence;
       size_t vmap_table_bytes = 0u;
-      if (!method_header->IsOptimized()) {
-        // Method compiled with the optimizing compiler have no vmap table.
-        vmap_table_bytes = ComputeOatSize(method_header->GetVmapTable(), &first_occurrence);
+      if (quick_oat_code_begin != nullptr) {
+        OatQuickMethodHeader* method_header = reinterpret_cast<OatQuickMethodHeader*>(
+            reinterpret_cast<uintptr_t>(quick_oat_code_begin) - sizeof(OatQuickMethodHeader));
+        vmap_table_bytes = ComputeOatSize(method_header->GetOptimizedCodeInfoPtr(),
+                                          &first_occurrence);
         if (first_occurrence) {
           stats_.vmap_table_bytes += vmap_table_bytes;
         }
@@ -2766,8 +2772,6 @@
                                                     options->app_oat_,
                                                     /*executable=*/ false,
                                                     /*low_4gb=*/ true,
-                                                    /*abs_dex_location=*/ nullptr,
-                                                    /*reservation=*/ nullptr,
                                                     &error_msg));
     if (oat_file == nullptr) {
       LOG(ERROR) << "Failed to open oat file " << options->app_oat_ << " with error " << error_msg;
@@ -2778,12 +2782,14 @@
     if (space == nullptr) {
       LOG(ERROR) << "Failed to open app image " << options->app_image_ << " with error "
                  << error_msg;
+      return EXIT_FAILURE;
     }
     // Open dex files for the image.
     std::vector<std::unique_ptr<const DexFile>> dex_files;
     if (!runtime->GetClassLinker()->OpenImageDexFiles(space.get(), &dex_files, &error_msg)) {
       LOG(ERROR) << "Failed to open app image dex files " << options->app_image_ << " with error "
                  << error_msg;
+      return EXIT_FAILURE;
     }
     // Dump the actual image.
     int result = DumpImage(space.get(), options, os);
@@ -2794,7 +2800,10 @@
   }
 
   gc::Heap* heap = runtime->GetHeap();
-  CHECK(heap->HasBootImageSpace()) << "No image spaces";
+  if (!heap->HasBootImageSpace()) {
+    LOG(ERROR) << "No image spaces";
+    return EXIT_FAILURE;
+  }
   for (gc::space::ImageSpace* image_space : heap->GetBootImageSpaces()) {
     int result = DumpImage(image_space, options, os);
     if (result != EXIT_SUCCESS) {
@@ -2883,13 +2892,16 @@
     LOG(WARNING) << "No dex filename provided, "
                  << "oatdump might fail if the oat file does not contain the dex code.";
   }
+  std::string dex_filename_str((dex_filename != nullptr) ? dex_filename : "");
+  ArrayRef<const std::string> dex_filenames(&dex_filename_str,
+                                            /*size=*/ (dex_filename != nullptr) ? 1u : 0u);
   std::string error_msg;
   std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
                                                   oat_filename,
                                                   oat_filename,
                                                   /*executable=*/ false,
                                                   /*low_4gb=*/ false,
-                                                  dex_filename,
+                                                  dex_filenames,
                                                   /*reservation=*/ nullptr,
                                                   &error_msg));
   if (oat_file == nullptr) {
@@ -2908,13 +2920,16 @@
                         const char* dex_filename,
                         std::string& output_name,
                         bool no_bits) {
+  std::string dex_filename_str((dex_filename != nullptr) ? dex_filename : "");
+  ArrayRef<const std::string> dex_filenames(&dex_filename_str,
+                                            /*size=*/ (dex_filename != nullptr) ? 1u : 0u);
   std::string error_msg;
   std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
                                                   oat_filename,
                                                   oat_filename,
                                                   /*executable=*/ false,
                                                   /*low_4gb=*/ false,
-                                                  dex_filename,
+                                                  dex_filenames,
                                                   /*reservation=*/ nullptr,
                                                   &error_msg));
   if (oat_file == nullptr) {
@@ -2955,13 +2970,16 @@
     std::vector<const DexFile*> class_path;
 
     if (oat_filename != nullptr) {
+    std::string dex_filename_str((dex_filename != nullptr) ? dex_filename : "");
+    ArrayRef<const std::string> dex_filenames(&dex_filename_str,
+                                              /*size=*/ (dex_filename != nullptr) ? 1u : 0u);
       std::string error_msg;
       std::unique_ptr<OatFile> oat_file(OatFile::Open(/*zip_fd=*/ -1,
                                                       oat_filename,
                                                       oat_filename,
                                                       /*executable=*/ false,
                                                       /*low_4gb=*/false,
-                                                      dex_filename,
+                                                      dex_filenames,
                                                       /*reservation=*/ nullptr,
                                                       &error_msg));
       if (oat_file == nullptr) {
@@ -3482,7 +3500,7 @@
         "\n"
         // Either oat-file or image is required.
         "  --oat-file=<file.oat>: specifies an input oat filename.\n"
-        "      Example: --oat-file=/system/framework/boot.oat\n"
+        "      Example: --oat-file=/system/framework/arm64/boot.oat\n"
         "\n"
         "  --image=<file.art>: specifies an input image location.\n"
         "      Example: --image=/system/framework/boot.art\n"
diff --git a/oatdump/oatdump_app_test.cc b/oatdump/oatdump_app_test.cc
index 4490647..b4997ba 100644
--- a/oatdump/oatdump_app_test.cc
+++ b/oatdump/oatdump_app_test.cc
@@ -42,4 +42,13 @@
   ASSERT_TRUE(Exec(kStatic, kModeAppImage, {}, kListAndCode));
 }
 
+TEST_F(OatDumpTest, TestAppImageInvalidPath) {
+  TEST_DISABLED_WITHOUT_BAKER_READ_BARRIERS();  // GC bug, b/126305867
+  TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
+  const std::string app_image_arg = "--app-image-file=" + GetAppImageName();
+  ASSERT_TRUE(GenerateAppOdexFile(kStatic, {"--runtime-arg", "-Xmx64M", app_image_arg}));
+  SetAppImageName("missing_app_image.art");
+  ASSERT_TRUE(Exec(kStatic, kModeAppImage, {}, kListAndCode, /*expect_failure=*/true));
+}
+
 }  // namespace art
diff --git a/oatdump/oatdump_image_test.cc b/oatdump/oatdump_image_test.cc
index 0a076f0..6270105 100644
--- a/oatdump/oatdump_image_test.cc
+++ b/oatdump/oatdump_image_test.cc
@@ -18,32 +18,30 @@
 
 namespace art {
 
-// Disable tests on arm and mips as they are taking too long to run. b/27824283.
-#define TEST_DISABLED_FOR_ARM_AND_MIPS() \
+// Disable tests on arm and arm64 as they are taking too long to run. b/27824283.
+#define TEST_DISABLED_FOR_ARM_AND_ARM64() \
     TEST_DISABLED_FOR_ARM(); \
     TEST_DISABLED_FOR_ARM64(); \
-    TEST_DISABLED_FOR_MIPS(); \
-    TEST_DISABLED_FOR_MIPS64(); \
 
 TEST_F(OatDumpTest, TestImage) {
-  TEST_DISABLED_FOR_ARM_AND_MIPS();
+  TEST_DISABLED_FOR_ARM_AND_ARM64();
   std::string error_msg;
   ASSERT_TRUE(Exec(kDynamic, kModeArt, {}, kListAndCode));
 }
 TEST_F(OatDumpTest, TestImageStatic) {
-  TEST_DISABLED_FOR_ARM_AND_MIPS();
+  TEST_DISABLED_FOR_ARM_AND_ARM64();
   TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
   std::string error_msg;
   ASSERT_TRUE(Exec(kStatic, kModeArt, {}, kListAndCode));
 }
 
 TEST_F(OatDumpTest, TestOatImage) {
-  TEST_DISABLED_FOR_ARM_AND_MIPS();
+  TEST_DISABLED_FOR_ARM_AND_ARM64();
   std::string error_msg;
   ASSERT_TRUE(Exec(kDynamic, kModeCoreOat, {}, kListAndCode));
 }
 TEST_F(OatDumpTest, TestOatImageStatic) {
-  TEST_DISABLED_FOR_ARM_AND_MIPS();
+  TEST_DISABLED_FOR_ARM_AND_ARM64();
   TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
   std::string error_msg;
   ASSERT_TRUE(Exec(kStatic, kModeCoreOat, {}, kListAndCode));
diff --git a/oatdump/oatdump_test.cc b/oatdump/oatdump_test.cc
index 8505b0c..8dcc143 100644
--- a/oatdump/oatdump_test.cc
+++ b/oatdump/oatdump_test.cc
@@ -20,75 +20,73 @@
 
 namespace art {
 
-// Disable tests on arm and mips as they are taking too long to run. b/27824283.
-#define TEST_DISABLED_FOR_ARM_AND_MIPS() \
+// Disable tests on arm and arm64 as they are taking too long to run. b/27824283.
+#define TEST_DISABLED_FOR_ARM_AND_ARM64() \
     TEST_DISABLED_FOR_ARM(); \
     TEST_DISABLED_FOR_ARM64(); \
-    TEST_DISABLED_FOR_MIPS(); \
-    TEST_DISABLED_FOR_MIPS64(); \
 
 TEST_F(OatDumpTest, TestNoDumpVmap) {
-  TEST_DISABLED_FOR_ARM_AND_MIPS();
+  TEST_DISABLED_FOR_ARM_AND_ARM64();
   std::string error_msg;
   ASSERT_TRUE(Exec(kDynamic, kModeArt, {"--no-dump:vmap"}, kListAndCode));
 }
 TEST_F(OatDumpTest, TestNoDumpVmapStatic) {
-  TEST_DISABLED_FOR_ARM_AND_MIPS();
+  TEST_DISABLED_FOR_ARM_AND_ARM64();
   TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
   std::string error_msg;
   ASSERT_TRUE(Exec(kStatic, kModeArt, {"--no-dump:vmap"}, kListAndCode));
 }
 
 TEST_F(OatDumpTest, TestNoDisassemble) {
-  TEST_DISABLED_FOR_ARM_AND_MIPS();
+  TEST_DISABLED_FOR_ARM_AND_ARM64();
   std::string error_msg;
   ASSERT_TRUE(Exec(kDynamic, kModeArt, {"--no-disassemble"}, kListAndCode));
 }
 TEST_F(OatDumpTest, TestNoDisassembleStatic) {
-  TEST_DISABLED_FOR_ARM_AND_MIPS();
+  TEST_DISABLED_FOR_ARM_AND_ARM64();
   TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
   std::string error_msg;
   ASSERT_TRUE(Exec(kStatic, kModeArt, {"--no-disassemble"}, kListAndCode));
 }
 
 TEST_F(OatDumpTest, TestListClasses) {
-  TEST_DISABLED_FOR_ARM_AND_MIPS();
+  TEST_DISABLED_FOR_ARM_AND_ARM64();
   std::string error_msg;
   ASSERT_TRUE(Exec(kDynamic, kModeArt, {"--list-classes"}, kListOnly));
 }
 TEST_F(OatDumpTest, TestListClassesStatic) {
-  TEST_DISABLED_FOR_ARM_AND_MIPS();
+  TEST_DISABLED_FOR_ARM_AND_ARM64();
   TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
   std::string error_msg;
   ASSERT_TRUE(Exec(kStatic, kModeArt, {"--list-classes"}, kListOnly));
 }
 
 TEST_F(OatDumpTest, TestListMethods) {
-  TEST_DISABLED_FOR_ARM_AND_MIPS();
+  TEST_DISABLED_FOR_ARM_AND_ARM64();
   std::string error_msg;
   ASSERT_TRUE(Exec(kDynamic, kModeArt, {"--list-methods"}, kListOnly));
 }
 TEST_F(OatDumpTest, TestListMethodsStatic) {
-  TEST_DISABLED_FOR_ARM_AND_MIPS();
+  TEST_DISABLED_FOR_ARM_AND_ARM64();
   TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
   std::string error_msg;
   ASSERT_TRUE(Exec(kStatic, kModeArt, {"--list-methods"}, kListOnly));
 }
 
 TEST_F(OatDumpTest, TestSymbolize) {
-  TEST_DISABLED_FOR_ARM_AND_MIPS();
+  TEST_DISABLED_FOR_ARM_AND_ARM64();
   std::string error_msg;
   ASSERT_TRUE(Exec(kDynamic, kModeSymbolize, {}, kListOnly));
 }
 TEST_F(OatDumpTest, TestSymbolizeStatic) {
-  TEST_DISABLED_FOR_ARM_AND_MIPS();
+  TEST_DISABLED_FOR_ARM_AND_ARM64();
   TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
   std::string error_msg;
   ASSERT_TRUE(Exec(kStatic, kModeSymbolize, {}, kListOnly));
 }
 
 TEST_F(OatDumpTest, TestExportDex) {
-  TEST_DISABLED_FOR_ARM_AND_MIPS();
+  TEST_DISABLED_FOR_ARM_AND_ARM64();
   // Test is failing on target, b/77469384.
   TEST_DISABLED_FOR_TARGET();
   std::string error_msg;
@@ -97,16 +95,17 @@
   const std::string dex_location =
       tmp_dir_+ "/" + android::base::Basename(GetTestDexFileName(GetAppBaseName().c_str())) +
       "_export.dex";
-  const std::string dexdump2 = GetExecutableFilePath("dexdump2",
-                                                     /*is_debug=*/false,
-                                                     /*is_static=*/false);
+  const std::string dexdump = GetExecutableFilePath("dexdump",
+                                                    /*is_debug=*/false,
+                                                    /*is_static=*/false,
+                                                    /*bitness=*/false);
   std::string output;
   auto post_fork_fn = []() { return true; };
-  ForkAndExecResult res = ForkAndExec({dexdump2, "-d", dex_location}, post_fork_fn, &output);
+  ForkAndExecResult res = ForkAndExec({dexdump, "-d", dex_location}, post_fork_fn, &output);
   ASSERT_TRUE(res.StandardSuccess());
 }
 TEST_F(OatDumpTest, TestExportDexStatic) {
-  TEST_DISABLED_FOR_ARM_AND_MIPS();
+  TEST_DISABLED_FOR_ARM_AND_ARM64();
   TEST_DISABLED_FOR_NON_STATIC_HOST_BUILDS();
   std::string error_msg;
   ASSERT_TRUE(GenerateAppOdexFile(kStatic, {"--runtime-arg", "-Xmx64M"}));
diff --git a/oatdump/oatdump_test.h b/oatdump/oatdump_test.h
index c4f2967..cbbce3b 100644
--- a/oatdump/oatdump_test.h
+++ b/oatdump/oatdump_test.h
@@ -71,21 +71,22 @@
   };
 
   // Returns path to the oatdump/dex2oat/dexdump binary.
-  std::string GetExecutableFilePath(const char* name, bool is_debug, bool is_static) {
-    std::string root = GetTestAndroidRoot();
-    root += "/bin/";
-    root += name;
+  std::string GetExecutableFilePath(const char* name, bool is_debug, bool is_static, bool bitness) {
+    std::string path = GetArtBinDir() + '/' + name;
     if (is_debug) {
-      root += "d";
+      path += 'd';
     }
     if (is_static) {
-      root += "s";
+      path += 's';
     }
-    return root;
+    if (bitness) {
+      path += Is64BitInstructionSet(kRuntimeISA) ? "64" : "32";
+    }
+    return path;
   }
 
-  std::string GetExecutableFilePath(Flavor flavor, const char* name) {
-    return GetExecutableFilePath(name, kIsDebugBuild, flavor == kStatic);
+  std::string GetExecutableFilePath(Flavor flavor, const char* name, bool bitness) {
+    return GetExecutableFilePath(name, kIsDebugBuild, flavor == kStatic, bitness);
   }
 
   enum Mode {
@@ -109,8 +110,15 @@
     return "ProfileTestMultiDex";
   }
 
+  void SetAppImageName(const std::string& name) {
+    app_image_name_ = name;
+  }
+
   std::string GetAppImageName() {
-    return tmp_dir_ + "/" + GetAppBaseName() + ".art";
+    if (app_image_name_.empty()) {
+      app_image_name_ =  tmp_dir_ + "/" + GetAppBaseName() + ".art";
+    }
+    return app_image_name_;
   }
 
   std::string GetAppOdexName() {
@@ -119,7 +127,8 @@
 
   ::testing::AssertionResult GenerateAppOdexFile(Flavor flavor,
                                                  const std::vector<std::string>& args) {
-    std::string dex2oat_path = GetExecutableFilePath(flavor, "dex2oat");
+    std::string dex2oat_path =
+        GetExecutableFilePath(flavor, "dex2oat", /* bitness= */ kIsTargetBuild);
     std::vector<std::string> exec_argv = {
         dex2oat_path,
         "--runtime-arg",
@@ -160,8 +169,9 @@
   ::testing::AssertionResult Exec(Flavor flavor,
                                   Mode mode,
                                   const std::vector<std::string>& args,
-                                  Display display) {
-    std::string file_path = GetExecutableFilePath(flavor, "oatdump");
+                                  Display display,
+                                  bool expect_failure = false) {
+    std::string file_path = GetExecutableFilePath(flavor, "oatdump", /* bitness= */ false);
 
     if (!OS::FileExists(file_path.c_str())) {
       return ::testing::AssertionFailure() << file_path << " should be a valid file path";
@@ -181,7 +191,7 @@
         // Code and dex code do not show up if list only.
         expected_prefixes.push_back("DEX CODE:");
         expected_prefixes.push_back("CODE:");
-        expected_prefixes.push_back("InlineInfo");
+        expected_prefixes.push_back("StackMap");
       }
       if (mode == kModeArt) {
         exec_argv.push_back("--runtime-arg");
@@ -324,8 +334,17 @@
     if (res.stage != ForkAndExecResult::kFinished) {
       return ::testing::AssertionFailure() << strerror(errno);
     }
+    error_buf.push_back(0);  // Make data a C string.
+
     if (!res.StandardSuccess()) {
-      return ::testing::AssertionFailure() << "Did not terminate successfully: " << res.status_code;
+      if (expect_failure && WIFEXITED(res.status_code)) {
+        // Avoid crash as valid exit.
+        return ::testing::AssertionSuccess();
+      }
+      return ::testing::AssertionFailure() << "Did not terminate successfully: " << res.status_code
+          << " " << error_buf.data();
+    } else if (expect_failure) {
+      return ::testing::AssertionFailure() << "Expected failure";
     }
 
     if (mode == kModeSymbolize) {
@@ -344,7 +363,6 @@
     }
     if (!result) {
       oss << "Processed bytes " << total << ":" << std::endl;
-      error_buf.push_back(0);  // Make data a C string.
     }
 
     return result ? ::testing::AssertionSuccess()
@@ -352,6 +370,7 @@
   }
 
   std::string tmp_dir_;
+  std::string app_image_name_;
 
  private:
   std::string core_art_location_;
diff --git a/openjdkjvm/Android.bp b/openjdkjvm/Android.bp
index 907315e..ace8d0b 100644
--- a/openjdkjvm/Android.bp
+++ b/openjdkjvm/Android.bp
@@ -16,6 +16,7 @@
 cc_defaults {
     name: "libopenjdkjvm_defaults",
     defaults: ["art_defaults"],
+    visibility: ["//libcore"],
     host_supported: true,
     srcs: ["OpenjdkJvm.cc"],
     shared_libs: [
@@ -33,6 +34,10 @@
         "libart",
         "libartbase",
     ],
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
 }
 
 art_cc_library {
@@ -45,4 +50,7 @@
         "libartd",
         "libartbased",
     ],
+    apex_available: [
+        "com.android.art.debug",
+    ],
 }
diff --git a/openjdkjvm/OpenjdkJvm.cc b/openjdkjvm/OpenjdkJvm.cc
index 8297c54..675a401 100644
--- a/openjdkjvm/OpenjdkJvm.cc
+++ b/openjdkjvm/OpenjdkJvm.cc
@@ -313,7 +313,9 @@
 JNIEXPORT __attribute__((noreturn)) void JVM_Exit(jint status) {
   LOG(INFO) << "System.exit called, status: " << status;
   art::Runtime::Current()->CallExitHook(status);
-  exit(status);
+  // Unsafe to call exit() while threads may still be running. They would race
+  // with static destructors.
+  _exit(status);
 }
 
 JNIEXPORT jstring JVM_NativeLoad(JNIEnv* env,
@@ -471,7 +473,7 @@
 }
 
 JNIEXPORT __attribute__((noreturn))  void JVM_Halt(jint code) {
-  exit(code);
+  _exit(code);
 }
 
 JNIEXPORT jboolean JVM_IsNaN(jdouble d) {
diff --git a/openjdkjvmti/Android.bp b/openjdkjvmti/Android.bp
index 7621d48..3311afc 100644
--- a/openjdkjvmti/Android.bp
+++ b/openjdkjvmti/Android.bp
@@ -15,9 +15,16 @@
 
 cc_library_headers {
     name: "libopenjdkjvmti_headers",
+    visibility: ["//visibility:public"],
     host_supported: true,
     export_include_dirs: ["include"],
     sdk_version: "current",
+
+    apex_available: [
+        "//apex_available:platform",
+        "com.android.art.debug",
+        "com.android.art.release",
+    ],
 }
 
 cc_defaults {
@@ -25,6 +32,7 @@
     defaults: ["art_defaults"],
     host_supported: true,
     srcs: [
+        "alloc_manager.cc",
         "deopt_manager.cc",
         "events.cc",
         "fixed_up_dex_file.cc",
@@ -74,6 +82,10 @@
         "libdexfile",
         "libartbase",
     ],
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
 }
 
 art_cc_library {
@@ -89,4 +101,7 @@
         "libdexfiled",
         "libartbased",
     ],
+    apex_available: [
+        "com.android.art.debug",
+    ],
 }
diff --git a/openjdkjvmti/OpenjdkJvmTi.cc b/openjdkjvmti/OpenjdkJvmTi.cc
index ffa1bd3..4ce376f 100644
--- a/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/openjdkjvmti/OpenjdkJvmTi.cc
@@ -40,6 +40,7 @@
 
 #include "jvmti.h"
 
+#include "alloc_manager.h"
 #include "art_jvmti.h"
 #include "base/logging.h"  // For gLogVerbosity.
 #include "base/mutex.h"
@@ -79,6 +80,7 @@
 // These should never be null.
 EventHandler* gEventHandler;
 DeoptManager* gDeoptManager;
+AllocationManager* gAllocManager;
 
 #define ENSURE_NON_NULL(n)      \
   do {                          \
@@ -344,50 +346,40 @@
     return StackUtil::NotifyFramePop(env, thread, depth);
   }
 
-  static jvmtiError ForceEarlyReturnObject(jvmtiEnv* env,
-                                           jthread thread ATTRIBUTE_UNUSED,
-                                           jobject value ATTRIBUTE_UNUSED) {
+  static jvmtiError ForceEarlyReturnObject(jvmtiEnv* env, jthread thread, jobject value) {
     ENSURE_VALID_ENV(env);
     ENSURE_HAS_CAP(env, can_force_early_return);
-    return ERR(NOT_IMPLEMENTED);
+    return StackUtil::ForceEarlyReturn(env, gEventHandler, thread, value);
   }
 
-  static jvmtiError ForceEarlyReturnInt(jvmtiEnv* env,
-                                        jthread thread ATTRIBUTE_UNUSED,
-                                        jint value ATTRIBUTE_UNUSED) {
+  static jvmtiError ForceEarlyReturnInt(jvmtiEnv* env, jthread thread, jint value) {
     ENSURE_VALID_ENV(env);
     ENSURE_HAS_CAP(env, can_force_early_return);
-    return ERR(NOT_IMPLEMENTED);
+    return StackUtil::ForceEarlyReturn(env, gEventHandler, thread, value);
   }
 
-  static jvmtiError ForceEarlyReturnLong(jvmtiEnv* env,
-                                         jthread thread ATTRIBUTE_UNUSED,
-                                         jlong value ATTRIBUTE_UNUSED) {
+  static jvmtiError ForceEarlyReturnLong(jvmtiEnv* env, jthread thread, jlong value) {
     ENSURE_VALID_ENV(env);
     ENSURE_HAS_CAP(env, can_force_early_return);
-    return ERR(NOT_IMPLEMENTED);
+    return StackUtil::ForceEarlyReturn(env, gEventHandler, thread, value);
   }
 
-  static jvmtiError ForceEarlyReturnFloat(jvmtiEnv* env,
-                                          jthread thread ATTRIBUTE_UNUSED,
-                                          jfloat value ATTRIBUTE_UNUSED) {
+  static jvmtiError ForceEarlyReturnFloat(jvmtiEnv* env, jthread thread, jfloat value) {
     ENSURE_VALID_ENV(env);
     ENSURE_HAS_CAP(env, can_force_early_return);
-    return ERR(NOT_IMPLEMENTED);
+    return StackUtil::ForceEarlyReturn(env, gEventHandler, thread, value);
   }
 
-  static jvmtiError ForceEarlyReturnDouble(jvmtiEnv* env,
-                                           jthread thread ATTRIBUTE_UNUSED,
-                                           jdouble value ATTRIBUTE_UNUSED) {
+  static jvmtiError ForceEarlyReturnDouble(jvmtiEnv* env, jthread thread, jdouble value) {
     ENSURE_VALID_ENV(env);
     ENSURE_HAS_CAP(env, can_force_early_return);
-    return ERR(NOT_IMPLEMENTED);
+    return StackUtil::ForceEarlyReturn(env, gEventHandler, thread, value);
   }
 
-  static jvmtiError ForceEarlyReturnVoid(jvmtiEnv* env, jthread thread ATTRIBUTE_UNUSED) {
+  static jvmtiError ForceEarlyReturnVoid(jvmtiEnv* env, jthread thread) {
     ENSURE_VALID_ENV(env);
     ENSURE_HAS_CAP(env, can_force_early_return);
-    return ERR(NOT_IMPLEMENTED);
+    return StackUtil::ForceEarlyReturn<nullptr_t>(env, gEventHandler, thread, nullptr);
   }
 
   static jvmtiError FollowReferences(jvmtiEnv* env,
@@ -788,18 +780,7 @@
   static jvmtiError RetransformClasses(jvmtiEnv* env, jint class_count, const jclass* classes) {
     ENSURE_VALID_ENV(env);
     ENSURE_HAS_CAP(env, can_retransform_classes);
-    std::string error_msg;
-    jvmtiError res = Transformer::RetransformClasses(ArtJvmTiEnv::AsArtJvmTiEnv(env),
-                                                     gEventHandler,
-                                                     art::Runtime::Current(),
-                                                     art::Thread::Current(),
-                                                     class_count,
-                                                     classes,
-                                                     &error_msg);
-    if (res != OK) {
-      JVMTI_LOG(WARNING, env) << "FAILURE TO RETRANFORM " << error_msg;
-    }
-    return res;
+    return Transformer::RetransformClasses(env, class_count, classes);
   }
 
   static jvmtiError RedefineClasses(jvmtiEnv* env,
@@ -807,18 +788,7 @@
                                     const jvmtiClassDefinition* class_definitions) {
     ENSURE_VALID_ENV(env);
     ENSURE_HAS_CAP(env, can_redefine_classes);
-    std::string error_msg;
-    jvmtiError res = Redefiner::RedefineClasses(ArtJvmTiEnv::AsArtJvmTiEnv(env),
-                                                gEventHandler,
-                                                art::Runtime::Current(),
-                                                art::Thread::Current(),
-                                                class_count,
-                                                class_definitions,
-                                                &error_msg);
-    if (res != OK) {
-      JVMTI_LOG(WARNING, env) << "FAILURE TO REDEFINE " << error_msg;
-    }
-    return res;
+    return Redefiner::RedefineClasses(env, class_count, class_definitions);
   }
 
   static jvmtiError GetObjectSize(jvmtiEnv* env, jobject object, jlong* size_ptr) {
@@ -1414,6 +1384,7 @@
       art::gLogVerbosity.compiler = val;
       art::gLogVerbosity.deopt = val;
       art::gLogVerbosity.heap = val;
+      art::gLogVerbosity.interpreter = val;
       art::gLogVerbosity.jdwp = val;
       art::gLogVerbosity.jit = val;
       art::gLogVerbosity.monitor = val;
@@ -1427,6 +1398,7 @@
       art::gLogVerbosity.verifier = val;
       // Do not set verifier-debug.
       art::gLogVerbosity.image = val;
+      art::gLogVerbosity.plugin = val;
 
       // Note: can't switch systrace_lock_logging. That requires changing entrypoints.
 
@@ -1527,6 +1499,7 @@
 extern "C" bool ArtPlugin_Initialize() {
   art::Runtime* runtime = art::Runtime::Current();
 
+  gAllocManager = new AllocationManager;
   gDeoptManager = new DeoptManager;
   gEventHandler = new EventHandler;
 
@@ -1541,9 +1514,12 @@
   ClassUtil::Register(gEventHandler);
   DumpUtil::Register(gEventHandler);
   MethodUtil::Register(gEventHandler);
+  HeapExtensions::Register(gEventHandler);
   SearchUtil::Register();
   HeapUtil::Register();
-  Transformer::Setup();
+  FieldUtil::Register(gEventHandler);
+  BreakpointUtil::Register(gEventHandler);
+  Transformer::Register(gEventHandler);
 
   {
     // Make sure we can deopt anything we need to.
@@ -1566,6 +1542,8 @@
   MethodUtil::Unregister();
   SearchUtil::Unregister();
   HeapUtil::Unregister();
+  FieldUtil::Unregister();
+  BreakpointUtil::Unregister();
 
   // TODO It would be good to delete the gEventHandler and gDeoptManager here but we cannot since
   // daemon threads might be suspended and we want to make sure that even if they wake up briefly
diff --git a/openjdkjvmti/alloc_manager.cc b/openjdkjvmti/alloc_manager.cc
new file mode 100644
index 0000000..5910073
--- /dev/null
+++ b/openjdkjvmti/alloc_manager.cc
@@ -0,0 +1,218 @@
+
+/* Copyright (C) 2019 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h.  The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "alloc_manager.h"
+
+#include <atomic>
+#include <sstream>
+
+#include "base/logging.h"
+#include "gc/allocation_listener.h"
+#include "gc/heap.h"
+#include "handle.h"
+#include "mirror/class-inl.h"
+#include "runtime.h"
+#include "runtime_globals.h"
+#include "scoped_thread_state_change-inl.h"
+#include "scoped_thread_state_change.h"
+#include "thread-current-inl.h"
+#include "thread_list.h"
+#include "thread_pool.h"
+
+namespace openjdkjvmti {
+
+template<typename T>
+void AllocationManager::PauseForAllocation(art::Thread* self, T msg) {
+  // The suspension can pause us for arbitrary times. We need to do it to sleep unfortunately. So we
+  // do test, suspend, test again, sleep, repeat.
+  std::string cause;
+  const bool is_logging = VLOG_IS_ON(plugin);
+  while (true) {
+    // We always return when there is no pause and we are runnable.
+    art::Thread* pausing_thread = allocations_paused_thread_.load(std::memory_order_seq_cst);
+    if (LIKELY(pausing_thread == nullptr || pausing_thread == self)) {
+      return;
+    }
+    if (UNLIKELY(is_logging && cause.empty())) {
+      cause = msg();
+    }
+    art::ScopedThreadSuspension sts(self, art::ThreadState::kSuspended);
+    art::MutexLock mu(self, alloc_listener_mutex_);
+    pausing_thread = allocations_paused_thread_.load(std::memory_order_seq_cst);
+    CHECK_NE(pausing_thread, self) << "We should always be setting pausing_thread = self!"
+                                   << " How did this happen? " << *self;
+    if (pausing_thread != nullptr) {
+      VLOG(plugin) << "Suspending " << *self << " due to " << cause << ". Allocation pause "
+                   << "initiated by " << *pausing_thread;
+      alloc_pause_cv_.Wait(self);
+    }
+  }
+}
+
+extern AllocationManager* gAllocManager;
+AllocationManager* AllocationManager::Get() {
+  return gAllocManager;
+}
+
+void JvmtiAllocationListener::ObjectAllocated(art::Thread* self,
+                                              art::ObjPtr<art::mirror::Object>* obj,
+                                              size_t cnt) {
+  auto cb = manager_->callback_;
+  if (cb != nullptr && manager_->callback_enabled_.load(std::memory_order_seq_cst)) {
+    cb->ObjectAllocated(self, obj, cnt);
+  }
+}
+
+bool JvmtiAllocationListener::HasPreAlloc() const {
+  return manager_->allocations_paused_ever_.load(std::memory_order_seq_cst);
+}
+
+void JvmtiAllocationListener::PreObjectAllocated(art::Thread* self,
+                                                 art::MutableHandle<art::mirror::Class> type,
+                                                 size_t* byte_count) {
+  manager_->PauseForAllocation(self, [&]() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    std::ostringstream oss;
+    oss << "allocating " << *byte_count << " bytes of type " << type->PrettyClass();
+    return oss.str();
+  });
+  if (!type->IsVariableSize()) {
+    *byte_count =
+        std::max(art::RoundUp(static_cast<size_t>(type->GetObjectSize()), art::kObjectAlignment),
+                 *byte_count);
+  }
+}
+
+AllocationManager::AllocationManager()
+    : alloc_listener_(nullptr),
+      alloc_listener_mutex_("JVMTI Alloc listener",
+                            art::LockLevel::kPostUserCodeSuspensionTopLevelLock),
+      alloc_pause_cv_("JVMTI Allocation Pause Condvar", alloc_listener_mutex_) {
+  alloc_listener_.reset(new JvmtiAllocationListener(this));
+}
+
+void AllocationManager::DisableAllocationCallback(art::Thread* self) {
+  callback_enabled_.store(false);
+  DecrListenerInstall(self);
+}
+
+void AllocationManager::EnableAllocationCallback(art::Thread* self) {
+  IncrListenerInstall(self);
+  callback_enabled_.store(true);
+}
+
+void AllocationManager::SetAllocListener(AllocationCallback* callback) {
+  CHECK(callback_ == nullptr) << "Already setup!";
+  callback_ = callback;
+  alloc_listener_.reset(new JvmtiAllocationListener(this));
+}
+
+void AllocationManager::RemoveAllocListener() {
+  callback_enabled_.store(false, std::memory_order_seq_cst);
+  callback_ = nullptr;
+}
+
+void AllocationManager::DecrListenerInstall(art::Thread* self) {
+  art::ScopedThreadSuspension sts(self, art::ThreadState::kSuspended);
+  art::MutexLock mu(self, alloc_listener_mutex_);
+  // We don't need any particular memory-order here since we're under the lock, they aren't
+  // changing.
+  if (--listener_refcount_ == 0) {
+    art::Runtime::Current()->GetHeap()->RemoveAllocationListener();
+  }
+}
+
+void AllocationManager::IncrListenerInstall(art::Thread* self) {
+  art::ScopedThreadSuspension sts(self, art::ThreadState::kSuspended);
+  art::MutexLock mu(self, alloc_listener_mutex_);
+  // We don't need any particular memory-order here since we're under the lock, they aren't
+  // changing.
+  if (listener_refcount_++ == 0) {
+    art::Runtime::Current()->GetHeap()->SetAllocationListener(alloc_listener_.get());
+  }
+}
+
+void AllocationManager::PauseAllocations(art::Thread* self) {
+  art::Thread* null_thr = nullptr;
+  // Unfortunately once we've paused allocations once we have to leave the listener and
+  // PreObjectAlloc event enabled forever. This is to avoid an instance of the ABA problem. We need
+  // to make sure that every thread gets a chance to see the PreObjectAlloc event at least once or
+  // else it could miss the fact that the object its allocating had its size changed.
+  //
+  // Consider the following 2 threads. T1 is allocating an object of class K. It is suspended (by
+  // user code) somewhere in the AllocObjectWithAllocator function, perhaps while doing a GC to
+  // attempt to clear space. With that thread suspended on thread T2 we decide to structurally
+  // redefine 'K', changing its size. To do this we insert this PreObjectAlloc event to check and
+  // update the size of the class being allocated. This is done successfully. Now imagine if T2
+  // removed the listener event then T1 subsequently resumes. T1 would see there is no
+  // PreObjectAlloc event and so allocate using the old object size. This leads to it not allocating
+  // enough. To prevent this we simply force every allocation after our first pause to go through
+  // the PreObjectAlloc event.
+  //
+  // TODO Technically we could do better than this. We just need to be able to require that all
+  // threads within allocation functions go through the PreObjectAlloc at least once after we turn
+  // it on. This is easier said than done though since we don't want to place a marker on threads
+  // (allocation is just too common) and we can't just have every thread go through the event since
+  // there are some threads that never or almost never allocate. We would also need to ensure that
+  // this thread doesn't pause waiting for all threads to pass the barrier since the other threads
+  // might be suspended. We could accomplish this by storing callbacks on each thread that would do
+  // the work. Honestly though this is a debug feature and it doesn't slow things down very much so
+  // simply leaving it on forever is simpler and safer.
+  bool expected = false;
+  if (allocations_paused_ever_.compare_exchange_strong(expected, true, std::memory_order_seq_cst)) {
+    IncrListenerInstall(self);
+  }
+  do {
+    PauseForAllocation(self, []() { return "request to pause allocations on other threads"; });
+  } while (!allocations_paused_thread_.compare_exchange_strong(
+      null_thr, self, std::memory_order_seq_cst));
+  // Make sure everything else can see this and isn't in the middle of final allocation.
+  // Force every thread to either be suspended or pass through a barrier.
+  art::ScopedThreadSuspension sts(self, art::ThreadState::kSuspended);
+  art::Barrier barrier(0);
+  art::FunctionClosure fc([&](art::Thread* thr ATTRIBUTE_UNUSED) {
+    barrier.Pass(art::Thread::Current());
+  });
+  size_t requested = art::Runtime::Current()->GetThreadList()->RunCheckpoint(&fc);
+  barrier.Increment(self, requested);
+}
+
+void AllocationManager::ResumeAllocations(art::Thread* self) {
+  CHECK_EQ(allocations_paused_thread_.load(), self) << "not paused! ";
+  // See above for why we don't decr the install count.
+  CHECK(allocations_paused_ever_.load(std::memory_order_seq_cst));
+  art::ScopedThreadSuspension sts(self, art::ThreadState::kSuspended);
+  art::MutexLock mu(self, alloc_listener_mutex_);
+  allocations_paused_thread_.store(nullptr, std::memory_order_seq_cst);
+  alloc_pause_cv_.Broadcast(self);
+}
+
+}  // namespace openjdkjvmti
diff --git a/openjdkjvmti/alloc_manager.h b/openjdkjvmti/alloc_manager.h
new file mode 100644
index 0000000..7fe8ae9
--- /dev/null
+++ b/openjdkjvmti/alloc_manager.h
@@ -0,0 +1,115 @@
+/* Copyright (C) 2019 The Android Open Source Project
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This file implements interfaces from the file jvmti.h. This implementation
+ * is licensed under the same terms as the file jvmti.h.  The
+ * copyright and license information for the file jvmti.h follows.
+ *
+ * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef ART_OPENJDKJVMTI_ALLOC_MANAGER_H_
+#define ART_OPENJDKJVMTI_ALLOC_MANAGER_H_
+
+#include <jvmti.h>
+
+#include <atomic>
+
+#include "base/locks.h"
+#include "base/mutex.h"
+#include "gc/allocation_listener.h"
+
+namespace art {
+template <typename T> class MutableHandle;
+template <typename T> class ObjPtr;
+class Thread;
+namespace mirror {
+class Class;
+class Object;
+}  // namespace mirror
+}  // namespace art
+
+namespace openjdkjvmti {
+
+class AllocationManager;
+
+class JvmtiAllocationListener : public art::gc::AllocationListener {
+ public:
+  explicit JvmtiAllocationListener(AllocationManager* manager) : manager_(manager) {}
+  void ObjectAllocated(art::Thread* self,
+                       art::ObjPtr<art::mirror::Object>* obj,
+                       size_t cnt) override REQUIRES_SHARED(art::Locks::mutator_lock_);
+  bool HasPreAlloc() const override REQUIRES_SHARED(art::Locks::mutator_lock_);
+  void PreObjectAllocated(art::Thread* self,
+                          art::MutableHandle<art::mirror::Class> type,
+                          size_t* byte_count) override REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ private:
+  AllocationManager* manager_;
+};
+
+class AllocationManager {
+ public:
+  class AllocationCallback {
+   public:
+    virtual ~AllocationCallback() {}
+    virtual void ObjectAllocated(art::Thread* self,
+                                 art::ObjPtr<art::mirror::Object>* obj,
+                                 size_t byte_count) REQUIRES_SHARED(art::Locks::mutator_lock_) = 0;
+  };
+
+  AllocationManager();
+
+  void SetAllocListener(AllocationCallback* callback);
+  void RemoveAllocListener();
+
+  static AllocationManager* Get();
+
+  void PauseAllocations(art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_);
+  void ResumeAllocations(art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+  void EnableAllocationCallback(art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_);
+  void DisableAllocationCallback(art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+ private:
+  template<typename T>
+  void PauseForAllocation(art::Thread* self, T msg) REQUIRES_SHARED(art::Locks::mutator_lock_);
+  void IncrListenerInstall(art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_);
+  void DecrListenerInstall(art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+  AllocationCallback* callback_ = nullptr;
+  uint32_t listener_refcount_ GUARDED_BY(alloc_listener_mutex_) = 0;
+  std::atomic<bool> allocations_paused_ever_ = false;
+  std::atomic<art::Thread*> allocations_paused_thread_ = nullptr;
+  std::atomic<bool> callback_enabled_ = false;
+  std::unique_ptr<JvmtiAllocationListener> alloc_listener_ = nullptr;
+  art::Mutex alloc_listener_mutex_ ACQUIRED_AFTER(art::Locks::user_code_suspension_lock_);
+  art::ConditionVariable alloc_pause_cv_;
+
+  friend class JvmtiAllocationListener;
+};
+
+}  // namespace openjdkjvmti
+
+#endif  // ART_OPENJDKJVMTI_ALLOC_MANAGER_H_
diff --git a/openjdkjvmti/art_jvmti.h b/openjdkjvmti/art_jvmti.h
index 7433e54..083ba6d 100644
--- a/openjdkjvmti/art_jvmti.h
+++ b/openjdkjvmti/art_jvmti.h
@@ -278,7 +278,7 @@
     .can_generate_native_method_bind_events          = 1,
     .can_generate_garbage_collection_events          = 1,
     .can_generate_object_free_events                 = 1,
-    .can_force_early_return                          = 0,
+    .can_force_early_return                          = 1,
     .can_get_owned_monitor_stack_depth_info          = 1,
     .can_get_constant_pool                           = 0,
     .can_set_native_method_prefix                    = 0,
@@ -296,6 +296,7 @@
 //   can_redefine_any_class:
 //   can_redefine_classes:
 //   can_pop_frame:
+//   can_force_early_return:
 //     We need to ensure that inlined code is either not present or can always be deoptimized. This
 //     is not guaranteed for non-debuggable processes since we might have inlined bootclasspath code
 //     on a threads stack.
@@ -333,7 +334,7 @@
     .can_generate_native_method_bind_events          = 0,
     .can_generate_garbage_collection_events          = 0,
     .can_generate_object_free_events                 = 0,
-    .can_force_early_return                          = 0,
+    .can_force_early_return                          = 1,
     .can_get_owned_monitor_stack_depth_info          = 0,
     .can_get_constant_pool                           = 0,
     .can_set_native_method_prefix                    = 0,
diff --git a/openjdkjvmti/deopt_manager.cc b/openjdkjvmti/deopt_manager.cc
index 3b04ed8..3e3691a 100644
--- a/openjdkjvmti/deopt_manager.cc
+++ b/openjdkjvmti/deopt_manager.cc
@@ -487,9 +487,11 @@
 void DeoptManager::DeoptimizeThread(art::Thread* target) {
   // We might or might not be running on the target thread (self) so get Thread::Current
   // directly.
+  art::ScopedThreadSuspension sts(art::Thread::Current(), art::kSuspended);
   art::gc::ScopedGCCriticalSection sgccs(art::Thread::Current(),
                                          art::gc::GcCause::kGcCauseDebugger,
                                          art::gc::CollectorType::kCollectorTypeDebugger);
+  art::ScopedSuspendAll ssa("Instrument thread stack");
   art::Runtime::Current()->GetInstrumentation()->InstrumentThreadStack(target);
 }
 
diff --git a/openjdkjvmti/events-inl.h b/openjdkjvmti/events-inl.h
index 8e06fe3..883a4cc 100644
--- a/openjdkjvmti/events-inl.h
+++ b/openjdkjvmti/events-inl.h
@@ -90,40 +90,42 @@
 
 // Infrastructure to achieve type safety for event dispatch.
 
-#define FORALL_EVENT_TYPES(fn)                                                       \
-  fn(VMInit,                  ArtJvmtiEvent::kVmInit)                                \
-  fn(VMDeath,                 ArtJvmtiEvent::kVmDeath)                               \
-  fn(ThreadStart,             ArtJvmtiEvent::kThreadStart)                           \
-  fn(ThreadEnd,               ArtJvmtiEvent::kThreadEnd)                             \
-  fn(ClassFileLoadHook,       ArtJvmtiEvent::kClassFileLoadHookRetransformable)      \
-  fn(ClassFileLoadHook,       ArtJvmtiEvent::kClassFileLoadHookNonRetransformable)   \
-  fn(ClassLoad,               ArtJvmtiEvent::kClassLoad)                             \
-  fn(ClassPrepare,            ArtJvmtiEvent::kClassPrepare)                          \
-  fn(VMStart,                 ArtJvmtiEvent::kVmStart)                               \
-  fn(Exception,               ArtJvmtiEvent::kException)                             \
-  fn(ExceptionCatch,          ArtJvmtiEvent::kExceptionCatch)                        \
-  fn(SingleStep,              ArtJvmtiEvent::kSingleStep)                            \
-  fn(FramePop,                ArtJvmtiEvent::kFramePop)                              \
-  fn(Breakpoint,              ArtJvmtiEvent::kBreakpoint)                            \
-  fn(FieldAccess,             ArtJvmtiEvent::kFieldAccess)                           \
-  fn(FieldModification,       ArtJvmtiEvent::kFieldModification)                     \
-  fn(MethodEntry,             ArtJvmtiEvent::kMethodEntry)                           \
-  fn(MethodExit,              ArtJvmtiEvent::kMethodExit)                            \
-  fn(NativeMethodBind,        ArtJvmtiEvent::kNativeMethodBind)                      \
-  fn(CompiledMethodLoad,      ArtJvmtiEvent::kCompiledMethodLoad)                    \
-  fn(CompiledMethodUnload,    ArtJvmtiEvent::kCompiledMethodUnload)                  \
-  fn(DynamicCodeGenerated,    ArtJvmtiEvent::kDynamicCodeGenerated)                  \
-  fn(DataDumpRequest,         ArtJvmtiEvent::kDataDumpRequest)                       \
-  fn(MonitorWait,             ArtJvmtiEvent::kMonitorWait)                           \
-  fn(MonitorWaited,           ArtJvmtiEvent::kMonitorWaited)                         \
-  fn(MonitorContendedEnter,   ArtJvmtiEvent::kMonitorContendedEnter)                 \
-  fn(MonitorContendedEntered, ArtJvmtiEvent::kMonitorContendedEntered)               \
-  fn(ResourceExhausted,       ArtJvmtiEvent::kResourceExhausted)                     \
-  fn(GarbageCollectionStart,  ArtJvmtiEvent::kGarbageCollectionStart)                \
-  fn(GarbageCollectionFinish, ArtJvmtiEvent::kGarbageCollectionFinish)               \
-  fn(ObjectFree,              ArtJvmtiEvent::kObjectFree)                            \
-  fn(VMObjectAlloc,           ArtJvmtiEvent::kVmObjectAlloc)                         \
-  fn(DdmPublishChunk,         ArtJvmtiEvent::kDdmPublishChunk)
+#define FORALL_EVENT_TYPES(fn)                                                         \
+  fn(VMInit,                    ArtJvmtiEvent::kVmInit)                                \
+  fn(VMDeath,                   ArtJvmtiEvent::kVmDeath)                               \
+  fn(ThreadStart,               ArtJvmtiEvent::kThreadStart)                           \
+  fn(ThreadEnd,                 ArtJvmtiEvent::kThreadEnd)                             \
+  fn(ClassFileLoadHook,         ArtJvmtiEvent::kClassFileLoadHookRetransformable)      \
+  fn(ClassFileLoadHook,         ArtJvmtiEvent::kClassFileLoadHookNonRetransformable)   \
+  fn(ClassLoad,                 ArtJvmtiEvent::kClassLoad)                             \
+  fn(ClassPrepare,              ArtJvmtiEvent::kClassPrepare)                          \
+  fn(VMStart,                   ArtJvmtiEvent::kVmStart)                               \
+  fn(Exception,                 ArtJvmtiEvent::kException)                             \
+  fn(ExceptionCatch,            ArtJvmtiEvent::kExceptionCatch)                        \
+  fn(SingleStep,                ArtJvmtiEvent::kSingleStep)                            \
+  fn(FramePop,                  ArtJvmtiEvent::kFramePop)                              \
+  fn(Breakpoint,                ArtJvmtiEvent::kBreakpoint)                            \
+  fn(FieldAccess,               ArtJvmtiEvent::kFieldAccess)                           \
+  fn(FieldModification,         ArtJvmtiEvent::kFieldModification)                     \
+  fn(MethodEntry,               ArtJvmtiEvent::kMethodEntry)                           \
+  fn(MethodExit,                ArtJvmtiEvent::kMethodExit)                            \
+  fn(NativeMethodBind,          ArtJvmtiEvent::kNativeMethodBind)                      \
+  fn(CompiledMethodLoad,        ArtJvmtiEvent::kCompiledMethodLoad)                    \
+  fn(CompiledMethodUnload,      ArtJvmtiEvent::kCompiledMethodUnload)                  \
+  fn(DynamicCodeGenerated,      ArtJvmtiEvent::kDynamicCodeGenerated)                  \
+  fn(DataDumpRequest,           ArtJvmtiEvent::kDataDumpRequest)                       \
+  fn(MonitorWait,               ArtJvmtiEvent::kMonitorWait)                           \
+  fn(MonitorWaited,             ArtJvmtiEvent::kMonitorWaited)                         \
+  fn(MonitorContendedEnter,     ArtJvmtiEvent::kMonitorContendedEnter)                 \
+  fn(MonitorContendedEntered,   ArtJvmtiEvent::kMonitorContendedEntered)               \
+  fn(ResourceExhausted,         ArtJvmtiEvent::kResourceExhausted)                     \
+  fn(GarbageCollectionStart,    ArtJvmtiEvent::kGarbageCollectionStart)                \
+  fn(GarbageCollectionFinish,   ArtJvmtiEvent::kGarbageCollectionFinish)               \
+  fn(ObjectFree,                ArtJvmtiEvent::kObjectFree)                            \
+  fn(VMObjectAlloc,             ArtJvmtiEvent::kVmObjectAlloc)                         \
+  fn(DdmPublishChunk,           ArtJvmtiEvent::kDdmPublishChunk)                       \
+  fn(ObsoleteObjectCreated,     ArtJvmtiEvent::kObsoleteObjectCreated)                 \
+  fn(StructuralDexFileLoadHook, ArtJvmtiEvent::kStructuralDexFileLoadHook)
 
 template <ArtJvmtiEvent kEvent>
 struct EventFnType {
@@ -216,7 +218,8 @@
                                                          unsigned char** new_class_data) const {
   art::ScopedThreadStateChange stsc(thread, art::ThreadState::kNative);
   static_assert(kEvent == ArtJvmtiEvent::kClassFileLoadHookRetransformable ||
-                kEvent == ArtJvmtiEvent::kClassFileLoadHookNonRetransformable, "Unsupported event");
+                kEvent == ArtJvmtiEvent::kClassFileLoadHookNonRetransformable ||
+                kEvent == ArtJvmtiEvent::kStructuralDexFileLoadHook, "Unsupported event");
   DCHECK(*new_class_data == nullptr);
   jint current_len = class_data_len;
   unsigned char* current_class_data = const_cast<unsigned char*>(class_data);
@@ -318,6 +321,24 @@
   }
 }
 
+template <>
+inline void EventHandler::DispatchEventOnEnv<ArtJvmtiEvent::kObsoleteObjectCreated>(
+    ArtJvmTiEnv* env, art::Thread* thread, jlong* obsolete_tag, jlong* new_tag) const {
+  static constexpr ArtJvmtiEvent kEvent = ArtJvmtiEvent::kObsoleteObjectCreated;
+  DCHECK(env != nullptr);
+  if (ShouldDispatch<kEvent>(env, thread, obsolete_tag, new_tag)) {
+    art::ScopedThreadStateChange stsc(thread, art::ThreadState::kNative);
+    impl::EventHandlerFunc<kEvent> func(env);
+    ExecuteCallback<kEvent>(func, obsolete_tag, new_tag);
+  } else {
+    // Unlike most others this has a default action to make sure that agents without knowledge of
+    // this extension get reasonable behavior.
+    jlong temp = *obsolete_tag;
+    *obsolete_tag = *new_tag;
+    *new_tag = temp;
+  }
+}
+
 template <ArtJvmtiEvent kEvent, typename ...Args>
 inline void EventHandler::ExecuteCallback(impl::EventHandlerFunc<kEvent> handler, Args... args) {
   handler.ExecuteCallback(args...);
@@ -362,7 +383,7 @@
   // have to deal with use-after-free or the frames being reallocated later.
   art::WriterMutexLock lk(art::Thread::Current(), env->event_info_mutex_);
   return env->notify_frames.erase(frame) != 0 &&
-      !frame->GetForcePopFrame() &&
+      !frame->GetSkipMethodExitEvents() &&
       ShouldDispatchOnThread<ArtJvmtiEvent::kFramePop>(env, thread);
 }
 
@@ -432,8 +453,10 @@
         thread_, art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
     old_disable_frame_pop_depth_ = data->disable_pop_frame_depth;
     data->disable_pop_frame_depth = current_top_frame_;
+    // Check that we cleaned up any old disables. This should only increase (or be equals if we do
+    // another ClassLoad/Prepare recursively).
     DCHECK(old_disable_frame_pop_depth_ == JvmtiGlobalTLSData::kNoDisallowedPopFrame ||
-           current_top_frame_ > old_disable_frame_pop_depth_)
+           current_top_frame_ >= old_disable_frame_pop_depth_)
         << "old: " << old_disable_frame_pop_depth_ << " current: " << current_top_frame_;
   }
 
@@ -569,6 +592,31 @@
       new_class_data);
 }
 
+template <>
+inline void EventHandler::DispatchEvent<ArtJvmtiEvent::kStructuralDexFileLoadHook>(
+    art::Thread* thread,
+    JNIEnv* jnienv,
+    jclass class_being_redefined,
+    jobject loader,
+    const char* name,
+    jobject protection_domain,
+    jint class_data_len,
+    const unsigned char* class_data,
+    jint* new_class_data_len,
+    unsigned char** new_class_data) const {
+  return DispatchClassFileLoadHookEvent<ArtJvmtiEvent::kStructuralDexFileLoadHook>(
+      thread,
+      jnienv,
+      class_being_redefined,
+      loader,
+      name,
+      protection_domain,
+      class_data_len,
+      class_data,
+      new_class_data_len,
+      new_class_data);
+}
+
 template <ArtJvmtiEvent kEvent>
 inline bool EventHandler::ShouldDispatchOnThread(ArtJvmTiEnv* env, art::Thread* thread) const {
   bool dispatch = env->event_masks.global_event_mask.Test(kEvent);
@@ -619,6 +667,7 @@
   return (added && caps.can_access_local_variables == 1) ||
       caps.can_generate_breakpoint_events == 1 ||
       caps.can_pop_frame == 1 ||
+      caps.can_force_early_return == 1 ||
       (caps.can_retransform_classes == 1 &&
        IsEventEnabledAnywhere(event) &&
        env->event_masks.IsEnabledAnywhere(event));
@@ -639,7 +688,7 @@
     if (caps.can_generate_breakpoint_events == 1) {
       HandleBreakpointEventsChanged(added);
     }
-    if (caps.can_pop_frame == 1 && added) {
+    if ((caps.can_pop_frame == 1 || caps.can_force_early_return == 1) && added) {
       // TODO We should keep track of how many of these have been enabled and remove it if there are
       // no more possible users. This isn't expected to be too common.
       art::Runtime::Current()->SetNonStandardExitsEnabled();
diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc
index 40e8b80..64a02e8 100644
--- a/openjdkjvmti/events.cc
+++ b/openjdkjvmti/events.cc
@@ -29,9 +29,15 @@
  * questions.
  */
 
+#include <android-base/thread_annotations.h>
+
+#include "alloc_manager.h"
+#include "base/locks.h"
+#include "base/mutex.h"
 #include "events-inl.h"
 
 #include <array>
+#include <functional>
 #include <sys/time.h>
 
 #include "arch/context.h"
@@ -41,21 +47,31 @@
 #include "base/mutex.h"
 #include "deopt_manager.h"
 #include "dex/dex_file_types.h"
+#include "events.h"
 #include "gc/allocation_listener.h"
 #include "gc/gc_pause_listener.h"
 #include "gc/heap.h"
 #include "gc/scoped_gc_critical_section.h"
 #include "handle_scope-inl.h"
+#include "indirect_reference_table.h"
 #include "instrumentation.h"
+#include "interpreter/shadow_frame.h"
 #include "jni/jni_env_ext-inl.h"
 #include "jni/jni_internal.h"
+#include "jvalue-inl.h"
+#include "jvalue.h"
+#include "jvmti.h"
 #include "mirror/class.h"
 #include "mirror/object-inl.h"
 #include "monitor-inl.h"
 #include "nativehelper/scoped_local_ref.h"
+#include "reflective_handle.h"
+#include "reflective_handle_scope-inl.h"
 #include "runtime.h"
 #include "scoped_thread_state_change-inl.h"
+#include "scoped_thread_state_change.h"
 #include "stack.h"
+#include "thread.h"
 #include "thread-inl.h"
 #include "thread_list.h"
 #include "ti_phase.h"
@@ -79,9 +95,15 @@
 
 jvmtiError ArtJvmtiEventCallbacks::Set(jint index, jvmtiExtensionEvent cb) {
   switch (index) {
+    case static_cast<jint>(ArtJvmtiEvent::kObsoleteObjectCreated):
+      ObsoleteObjectCreated = reinterpret_cast<ArtJvmtiEventObsoleteObjectCreated>(cb);
+      return OK;
     case static_cast<jint>(ArtJvmtiEvent::kDdmPublishChunk):
       DdmPublishChunk = reinterpret_cast<ArtJvmtiEventDdmPublishChunk>(cb);
       return OK;
+    case static_cast<jint>(ArtJvmtiEvent::kStructuralDexFileLoadHook):
+      StructuralDexFileLoadHook = reinterpret_cast<ArtJvmtiEventStructuralDexFileLoadHook>(cb);
+      return OK;
     default:
       return ERR(ILLEGAL_ARGUMENT);
   }
@@ -97,6 +119,8 @@
 bool IsExtensionEvent(ArtJvmtiEvent e) {
   switch (e) {
     case ArtJvmtiEvent::kDdmPublishChunk:
+    case ArtJvmtiEvent::kObsoleteObjectCreated:
+    case ArtJvmtiEvent::kStructuralDexFileLoadHook:
       return true;
     default:
       return false;
@@ -230,6 +254,7 @@
     case ArtJvmtiEvent::kCompiledMethodUnload:
     case ArtJvmtiEvent::kDynamicCodeGenerated:
     case ArtJvmtiEvent::kDataDumpRequest:
+    case ArtJvmtiEvent::kObsoleteObjectCreated:
       return false;
 
     default:
@@ -288,9 +313,9 @@
   DISALLOW_COPY_AND_ASSIGN(JvmtiDdmChunkListener);
 };
 
-class JvmtiAllocationListener : public art::gc::AllocationListener {
+class JvmtiEventAllocationListener : public AllocationManager::AllocationCallback {
  public:
-  explicit JvmtiAllocationListener(EventHandler* handler) : handler_(handler) {}
+  explicit JvmtiEventAllocationListener(EventHandler* handler) : handler_(handler) {}
 
   void ObjectAllocated(art::Thread* self, art::ObjPtr<art::mirror::Object>* obj, size_t byte_count)
       override REQUIRES_SHARED(art::Locks::mutator_lock_) {
@@ -325,15 +350,14 @@
   EventHandler* handler_;
 };
 
-static void SetupObjectAllocationTracking(art::gc::AllocationListener* listener, bool enable) {
+static void SetupObjectAllocationTracking(bool enable) {
   // We must not hold the mutator lock here, but if we're in FastJNI, for example, we might. For
   // now, do a workaround: (possibly) acquire and release.
   art::ScopedObjectAccess soa(art::Thread::Current());
-  art::ScopedThreadSuspension sts(soa.Self(), art::ThreadState::kSuspended);
   if (enable) {
-    art::Runtime::Current()->GetHeap()->SetAllocationListener(listener);
+    AllocationManager::Get()->EnableAllocationCallback(soa.Self());
   } else {
-    art::Runtime::Current()->GetHeap()->RemoveAllocationListener();
+    AllocationManager::Get()->DisableAllocationCallback(soa.Self());
   }
 }
 
@@ -571,7 +595,34 @@
 
 class JvmtiMethodTraceListener final : public art::instrumentation::InstrumentationListener {
  public:
-  explicit JvmtiMethodTraceListener(EventHandler* handler) : event_handler_(handler) {}
+  explicit JvmtiMethodTraceListener(EventHandler* handler)
+      : event_handler_(handler),
+        non_standard_exits_lock_("JVMTI NonStandard Exits list lock",
+                                 art::LockLevel::kGenericBottomLock) {}
+
+  void AddDelayedNonStandardExitEvent(const art::ShadowFrame* frame, bool is_object, jvalue val)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+          REQUIRES(art::Locks::user_code_suspension_lock_, art::Locks::thread_list_lock_) {
+    art::Thread* self = art::Thread::Current();
+    jobject to_cleanup = nullptr;
+    jobject new_val = is_object ? self->GetJniEnv()->NewGlobalRef(val.l) : nullptr;
+    {
+      art::MutexLock mu(self, non_standard_exits_lock_);
+      NonStandardExitEventInfo saved{ nullptr, { .j = 0 } };
+      if (is_object) {
+        saved.return_val_obj_ = new_val;
+        saved.return_val_.l = saved.return_val_obj_;
+      } else {
+        saved.return_val_.j = val.j;
+      }
+      // only objects need cleanup.
+      if (UNLIKELY(is_object && non_standard_exits_.find(frame) != non_standard_exits_.end())) {
+        to_cleanup = non_standard_exits_.find(frame)->second.return_val_obj_;
+      }
+      non_standard_exits_.insert_or_assign(frame, saved);
+    }
+    self->GetJniEnv()->DeleteGlobalRef(to_cleanup);
+  }
 
   // Call-back for when a method is entered.
   void MethodEntered(art::Thread* self,
@@ -589,15 +640,44 @@
     }
   }
 
+  // TODO Maybe try to combine this with below using templates?
   // Callback for when a method is exited with a reference return value.
   void MethodExited(art::Thread* self,
                     art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
                     art::ArtMethod* method,
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
-                    art::Handle<art::mirror::Object> return_value)
+                    art::instrumentation::OptionalFrame frame,
+                    art::MutableHandle<art::mirror::Object>& return_value)
       REQUIRES_SHARED(art::Locks::mutator_lock_) override {
-    if (!method->IsRuntimeMethod() &&
-        event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodExit)) {
+    if (method->IsRuntimeMethod()) {
+      return;
+    }
+    if (frame.has_value() && UNLIKELY(event_handler_->IsEventEnabledAnywhere(
+                                 ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue))) {
+      DCHECK(!frame->get().GetSkipMethodExitEvents());
+      bool has_return = false;
+      jobject ret_val = nullptr;
+      {
+        art::MutexLock mu(self, non_standard_exits_lock_);
+        const art::ShadowFrame* sframe = &frame.value().get();
+        const auto it = non_standard_exits_.find(sframe);
+        if (it != non_standard_exits_.end()) {
+          ret_val = it->second.return_val_obj_;
+          non_standard_exits_.erase(it);
+          has_return = true;
+        }
+      }
+      if (has_return) {
+        return_value.Assign(self->DecodeJObject(ret_val));
+        ScopedLocalRef<jthread> thr(self->GetJniEnv(),
+                                    self->GetJniEnv()->NewLocalRef(self->GetPeer()));
+        art::ScopedThreadSuspension sts(self, art::ThreadState::kNative);
+        self->GetJniEnv()->DeleteGlobalRef(ret_val);
+        event_handler_->SetInternalEvent(
+            thr.get(), ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue, JVMTI_DISABLE);
+      }
+    }
+    if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodExit)) {
       DCHECK_EQ(
           method->GetInterfaceMethodIfProxy(art::kRuntimePointerSize)->GetReturnTypePrimitive(),
           art::Primitive::kPrimNot) << method->PrettyMethod();
@@ -621,14 +701,36 @@
                     art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
                     art::ArtMethod* method,
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
-                    const art::JValue& return_value)
-      REQUIRES_SHARED(art::Locks::mutator_lock_) override {
-    if (!method->IsRuntimeMethod() &&
-        event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodExit)) {
+                    art::instrumentation::OptionalFrame frame,
+                    art::JValue& return_value) REQUIRES_SHARED(art::Locks::mutator_lock_) override {
+    if (frame.has_value() &&
+        UNLIKELY(event_handler_->IsEventEnabledAnywhere(
+            ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue))) {
+      DCHECK(!frame->get().GetSkipMethodExitEvents());
+      bool has_return = false;
+      {
+        art::MutexLock mu(self, non_standard_exits_lock_);
+        const art::ShadowFrame* sframe = &frame.value().get();
+        const auto it = non_standard_exits_.find(sframe);
+        if (it != non_standard_exits_.end()) {
+          return_value.SetJ(it->second.return_val_.j);
+          non_standard_exits_.erase(it);
+          has_return = true;
+        }
+      }
+      if (has_return) {
+        ScopedLocalRef<jthread> thr(self->GetJniEnv(),
+                                    self->GetJniEnv()->NewLocalRef(self->GetPeer()));
+        art::ScopedThreadSuspension sts(self, art::ThreadState::kNative);
+        event_handler_->SetInternalEvent(
+            thr.get(), ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue, JVMTI_DISABLE);
+      }
+    }
+    if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kMethodExit)) {
       DCHECK_NE(
           method->GetInterfaceMethodIfProxy(art::kRuntimePointerSize)->GetReturnTypePrimitive(),
           art::Primitive::kPrimNot) << method->PrettyMethod();
-      DCHECK(!self->IsExceptionPending());
+      DCHECK(!self->IsExceptionPending()) << self->GetException()->Dump();
       jvalue val;
       art::JNIEnvExt* jnienv = self->GetJniEnv();
       // 64bit integer is the largest value in the union so we should be fine simply copying it into
@@ -704,11 +806,14 @@
   // Call-back for when we read from a field.
   void FieldRead(art::Thread* self,
                  art::Handle<art::mirror::Object> this_object,
-                 art::ArtMethod* method,
+                 art::ArtMethod* method_p,
                  uint32_t dex_pc,
-                 art::ArtField* field)
+                 art::ArtField* field_p)
       REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kFieldAccess)) {
+      art::StackReflectiveHandleScope<1, 1> rhs(self);
+      art::ReflectiveHandle<art::ArtField> field(rhs.NewHandle(field_p));
+      art::ReflectiveHandle<art::ArtMethod> method(rhs.NewHandle(method_p));
       art::JNIEnvExt* jnienv = self->GetJniEnv();
       // DCHECK(!self->IsExceptionPending());
       ScopedLocalRef<jobject> this_ref(jnienv, AddLocalRef<jobject>(jnienv, this_object.Get()));
@@ -728,13 +833,16 @@
 
   void FieldWritten(art::Thread* self,
                     art::Handle<art::mirror::Object> this_object,
-                    art::ArtMethod* method,
+                    art::ArtMethod* method_p,
                     uint32_t dex_pc,
-                    art::ArtField* field,
+                    art::ArtField* field_p,
                     art::Handle<art::mirror::Object> new_val)
       REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kFieldModification)) {
       art::JNIEnvExt* jnienv = self->GetJniEnv();
+      art::StackReflectiveHandleScope<1, 1> rhs(self);
+      art::ReflectiveHandle<art::ArtField> field(rhs.NewHandle(field_p));
+      art::ReflectiveHandle<art::ArtMethod> method(rhs.NewHandle(method_p));
       // DCHECK(!self->IsExceptionPending());
       ScopedLocalRef<jobject> this_ref(jnienv, AddLocalRef<jobject>(jnienv, this_object.Get()));
       ScopedLocalRef<jobject> fklass(jnienv,
@@ -760,13 +868,16 @@
   // Call-back for when we write into a field.
   void FieldWritten(art::Thread* self,
                     art::Handle<art::mirror::Object> this_object,
-                    art::ArtMethod* method,
+                    art::ArtMethod* method_p,
                     uint32_t dex_pc,
-                    art::ArtField* field,
+                    art::ArtField* field_p,
                     const art::JValue& field_value)
       REQUIRES_SHARED(art::Locks::mutator_lock_) override {
     if (event_handler_->IsEventEnabledAnywhere(ArtJvmtiEvent::kFieldModification)) {
       art::JNIEnvExt* jnienv = self->GetJniEnv();
+      art::StackReflectiveHandleScope<1, 1> rhs(self);
+      art::ReflectiveHandle<art::ArtField> field(rhs.NewHandle(field_p));
+      art::ReflectiveHandle<art::ArtMethod> method(rhs.NewHandle(method_p));
       DCHECK(!self->IsExceptionPending());
       ScopedLocalRef<jobject> this_ref(jnienv, AddLocalRef<jobject>(jnienv, this_object.Get()));
       ScopedLocalRef<jobject> fklass(jnienv,
@@ -944,23 +1055,65 @@
   }
 
  private:
+  struct NonStandardExitEventInfo {
+    // if non-null is a GlobalReference to the returned value.
+    jobject return_val_obj_;
+    // The return-value to be passed to the MethodExit event.
+    jvalue return_val_;
+  };
+
   EventHandler* const event_handler_;
+
+  mutable art::Mutex non_standard_exits_lock_
+      ACQUIRED_BEFORE(art::Locks::instrument_entrypoints_lock_);
+
+  std::unordered_map<const art::ShadowFrame*, NonStandardExitEventInfo> non_standard_exits_
+      GUARDED_BY(non_standard_exits_lock_);
 };
 
-static uint32_t GetInstrumentationEventsFor(ArtJvmtiEvent event) {
+uint32_t EventHandler::GetInstrumentationEventsFor(ArtJvmtiEvent event) {
   switch (event) {
     case ArtJvmtiEvent::kMethodEntry:
       return art::instrumentation::Instrumentation::kMethodEntered;
-    case ArtJvmtiEvent::kMethodExit:
-      return art::instrumentation::Instrumentation::kMethodExited |
-             art::instrumentation::Instrumentation::kMethodUnwind;
+    case ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue:
+      // TODO We want to do this but supporting only having a single one is difficult.
+      // return art::instrumentation::Instrumentation::kMethodExited;
+    case ArtJvmtiEvent::kMethodExit: {
+      DCHECK(event == ArtJvmtiEvent::kMethodExit ||
+            event == ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue)
+          << "event = " << static_cast<uint32_t>(event);
+      ArtJvmtiEvent other = event == ArtJvmtiEvent::kMethodExit
+                                ? ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue
+                                : ArtJvmtiEvent::kMethodExit;
+      if (LIKELY(!IsEventEnabledAnywhere(other))) {
+        return art::instrumentation::Instrumentation::kMethodExited |
+               art::instrumentation::Instrumentation::kMethodUnwind;
+      } else {
+        // The event needs to be kept around/is already enabled by the other jvmti event that uses
+        // the same instrumentation event.
+        return 0u;
+      }
+    }
     case ArtJvmtiEvent::kFieldModification:
       return art::instrumentation::Instrumentation::kFieldWritten;
     case ArtJvmtiEvent::kFieldAccess:
       return art::instrumentation::Instrumentation::kFieldRead;
     case ArtJvmtiEvent::kBreakpoint:
-    case ArtJvmtiEvent::kSingleStep:
-      return art::instrumentation::Instrumentation::kDexPcMoved;
+    case ArtJvmtiEvent::kSingleStep: {
+      // Need to skip adding the listeners if the event is breakpoint/single-step since those events
+      // share the same art-instrumentation underlying event. We need to give them their own deopt
+      // request though so the test waits until here.
+      DCHECK(event == ArtJvmtiEvent::kBreakpoint || event == ArtJvmtiEvent::kSingleStep);
+      ArtJvmtiEvent other = event == ArtJvmtiEvent::kBreakpoint ? ArtJvmtiEvent::kSingleStep
+                                                                : ArtJvmtiEvent::kBreakpoint;
+      if (LIKELY(!IsEventEnabledAnywhere(other))) {
+        return art::instrumentation::Instrumentation::kDexPcMoved;
+      } else {
+        // The event needs to be kept around/is already enabled by the other jvmti event that uses
+        // the same instrumentation event.
+        return 0u;
+      }
+    }
     case ArtJvmtiEvent::kFramePop:
       return art::instrumentation::Instrumentation::kWatchedFramePop;
     case ArtJvmtiEvent::kException:
@@ -999,6 +1152,7 @@
     case ArtJvmtiEvent::kFieldAccess:
     case ArtJvmtiEvent::kSingleStep:
     case ArtJvmtiEvent::kFramePop:
+    case ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue:
       return thread == nullptr ? DeoptRequirement::kFull : DeoptRequirement::kThread;
     case ArtJvmtiEvent::kVmInit:
     case ArtJvmtiEvent::kVmDeath:
@@ -1024,6 +1178,8 @@
     case ArtJvmtiEvent::kVmObjectAlloc:
     case ArtJvmtiEvent::kClassFileLoadHookRetransformable:
     case ArtJvmtiEvent::kDdmPublishChunk:
+    case ArtJvmtiEvent::kObsoleteObjectCreated:
+    case ArtJvmtiEvent::kStructuralDexFileLoadHook:
       return DeoptRequirement::kNone;
   }
 }
@@ -1076,18 +1232,8 @@
                                       bool enable) {
   // Add the actual listeners.
   uint32_t new_events = GetInstrumentationEventsFor(event);
-  if (new_events == art::instrumentation::Instrumentation::kDexPcMoved) {
-    // Need to skip adding the listeners if the event is breakpoint/single-step since those events
-    // share the same art-instrumentation underlying event. We need to give them their own deopt
-    // request though so the test waits until here.
-    DCHECK(event == ArtJvmtiEvent::kBreakpoint || event == ArtJvmtiEvent::kSingleStep);
-    ArtJvmtiEvent other = event == ArtJvmtiEvent::kBreakpoint ? ArtJvmtiEvent::kSingleStep
-                                                              : ArtJvmtiEvent::kBreakpoint;
-    if (IsEventEnabledAnywhere(other)) {
-      // The event needs to be kept around/is already enabled by the other jvmti event that uses the
-      // same instrumentation event.
-      return;
-    }
+  if (new_events == 0) {
+    return;
   }
   art::ScopedThreadStateChange stsc(art::Thread::Current(), art::ThreadState::kNative);
   art::instrumentation::Instrumentation* instr = art::Runtime::Current()->GetInstrumentation();
@@ -1181,7 +1327,7 @@
       SetupDdmTracking(ddm_listener_.get(), enable);
       return;
     case ArtJvmtiEvent::kVmObjectAlloc:
-      SetupObjectAllocationTracking(alloc_listener_.get(), enable);
+      SetupObjectAllocationTracking(enable);
       return;
     case ArtJvmtiEvent::kGarbageCollectionStart:
     case ArtJvmtiEvent::kGarbageCollectionFinish:
@@ -1204,6 +1350,7 @@
     case ArtJvmtiEvent::kExceptionCatch:
     case ArtJvmtiEvent::kBreakpoint:
     case ArtJvmtiEvent::kSingleStep:
+    case ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue:
       SetupTraceListener(method_trace_listener_.get(), event, enable);
       return;
     case ArtJvmtiEvent::kMonitorContendedEnter:
@@ -1278,6 +1425,90 @@
   }
 }
 
+static bool IsInternalEvent(ArtJvmtiEvent event) {
+  return static_cast<uint32_t>(event) >=
+         static_cast<uint32_t>(ArtJvmtiEvent::kMinInternalEventTypeVal);
+}
+
+jvmtiError EventHandler::SetInternalEvent(jthread thread,
+                                          ArtJvmtiEvent event,
+                                          jvmtiEventMode mode) {
+  CHECK(IsInternalEvent(event)) << static_cast<uint32_t>(event);
+
+  art::Thread* self = art::Thread::Current();
+  art::Thread* target = nullptr;
+  ScopedNoUserCodeSuspension snucs(self);
+  // The overall state across all threads and jvmtiEnvs. This is used to control the state of the
+  // instrumentation handlers since we only want each added once.
+  bool old_state;
+  bool new_state;
+  // The state for just the current 'thread' (including null) across all jvmtiEnvs. This is used to
+  // control the deoptimization state since we do refcounting for that and need to perform different
+  // actions depending on if the event is limited to a single thread or global.
+  bool old_thread_state;
+  bool new_thread_state;
+  {
+    // From now on we know we cannot get suspended by user-code.
+    // NB This does a SuspendCheck (during thread state change) so we need to
+    // make sure we don't have the 'suspend_lock' locked here.
+    art::ScopedObjectAccess soa(self);
+    art::WriterMutexLock el_mu(self, envs_lock_);
+    art::MutexLock tll_mu(self, *art::Locks::thread_list_lock_);
+    jvmtiError err = ERR(INTERNAL);
+    if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
+      return err;
+    } else if (target->IsStillStarting() || target->GetState() == art::ThreadState::kStarting) {
+      target->Dump(LOG_STREAM(WARNING) << "Is not alive: ");
+      return ERR(THREAD_NOT_ALIVE);
+    }
+
+    // Make sure we have a valid jthread to pass to deopt-manager.
+    ScopedLocalRef<jthread> thread_lr(
+        soa.Env(), thread != nullptr ? nullptr : soa.AddLocalReference<jthread>(target->GetPeer()));
+    if (thread == nullptr) {
+      thread = thread_lr.get();
+    }
+    CHECK(thread != nullptr);
+
+    {
+      DCHECK_GE(GetInternalEventRefcount(event) + (mode == JVMTI_ENABLE ? 1 : -1), 0)
+        << "Refcount: " << GetInternalEventRefcount(event);
+      DCHECK_GE(GetInternalEventThreadRefcount(event, target) + (mode == JVMTI_ENABLE ? 1 : -1), 0)
+        << "Refcount: " << GetInternalEventThreadRefcount(event, target);
+      DCHECK_GE(GetInternalEventRefcount(event), GetInternalEventThreadRefcount(event, target));
+      old_state = GetInternalEventRefcount(event) > 0;
+      old_thread_state = GetInternalEventThreadRefcount(event, target) > 0;
+      if (mode == JVMTI_ENABLE) {
+        new_state = IncrInternalEventRefcount(event) > 0;
+        new_thread_state = IncrInternalEventThreadRefcount(event, target) > 0;
+      } else {
+        new_state = DecrInternalEventRefcount(event) > 0;
+        new_thread_state = DecrInternalEventThreadRefcount(event, target) > 0;
+      }
+      if (old_state != new_state) {
+        global_mask.Set(event, new_state);
+      }
+    }
+  }
+  // Handle any special work required for the event type. We still have the
+  // user_code_suspend_count_lock_ so there won't be any interleaving here.
+  if (new_state != old_state) {
+    HandleEventType(event, mode == JVMTI_ENABLE);
+  }
+  if (old_thread_state != new_thread_state) {
+    HandleEventDeopt(event, thread, new_thread_state);
+  }
+  return OK;
+}
+
+static bool IsDirectlySettableEvent(ArtJvmtiEvent event) {
+  return !IsInternalEvent(event);
+}
+
+static bool EventIsNormal(ArtJvmtiEvent event) {
+  return EventMask::EventIsInRange(event) && IsDirectlySettableEvent(event);
+}
+
 jvmtiError EventHandler::SetEvent(ArtJvmTiEnv* env,
                                   jthread thread,
                                   ArtJvmtiEvent event,
@@ -1286,7 +1517,7 @@
     return ERR(ILLEGAL_ARGUMENT);
   }
 
-  if (!EventMask::EventIsInRange(event)) {
+  if (!EventIsNormal(event)) {
     return ERR(INVALID_EVENT_TYPE);
   }
 
@@ -1385,6 +1616,46 @@
   }
 }
 
+void EventHandler::AddDelayedNonStandardExitEvent(const art::ShadowFrame *frame,
+                                                  bool is_object,
+                                                  jvalue val) {
+  method_trace_listener_->AddDelayedNonStandardExitEvent(frame, is_object, val);
+}
+
+static size_t GetInternalEventIndex(ArtJvmtiEvent event) {
+  CHECK(IsInternalEvent(event));
+  return static_cast<size_t>(event) - static_cast<size_t>(ArtJvmtiEvent::kMinInternalEventTypeVal);
+}
+
+int32_t EventHandler::DecrInternalEventThreadRefcount(ArtJvmtiEvent event, art::Thread* target) {
+  return --GetInternalEventThreadRefcount(event, target);
+}
+
+int32_t EventHandler::IncrInternalEventThreadRefcount(ArtJvmtiEvent event, art::Thread* target) {
+  return ++GetInternalEventThreadRefcount(event, target);
+}
+
+int32_t& EventHandler::GetInternalEventThreadRefcount(ArtJvmtiEvent event, art::Thread* target) {
+  auto& refs = internal_event_thread_refcount_[GetInternalEventIndex(event)];
+  UniqueThread target_ut{target, target->GetTid()};
+  if (refs.find(target_ut) == refs.end()) {
+    refs.insert({target_ut, 0});
+  }
+  return refs.at(target_ut);
+}
+
+int32_t EventHandler::DecrInternalEventRefcount(ArtJvmtiEvent event) {
+  return --internal_event_refcount_[GetInternalEventIndex(event)];
+}
+
+int32_t EventHandler::IncrInternalEventRefcount(ArtJvmtiEvent event) {
+  return ++internal_event_refcount_[GetInternalEventIndex(event)];
+}
+
+int32_t EventHandler::GetInternalEventRefcount(ArtJvmtiEvent event) const {
+  return internal_event_refcount_[GetInternalEventIndex(event)];
+}
+
 void EventHandler::Shutdown() {
   // Need to remove the method_trace_listener_ if it's there.
   art::Thread* self = art::Thread::Current();
@@ -1394,12 +1665,15 @@
   art::ScopedSuspendAll ssa("jvmti method tracing uninstallation");
   // Just remove every possible event.
   art::Runtime::Current()->GetInstrumentation()->RemoveListener(method_trace_listener_.get(), ~0);
+  AllocationManager::Get()->RemoveAllocListener();
 }
 
 EventHandler::EventHandler()
   : envs_lock_("JVMTI Environment List Lock", art::LockLevel::kPostMutatorTopLockLevel),
-    frame_pop_enabled(false) {
-  alloc_listener_.reset(new JvmtiAllocationListener(this));
+    frame_pop_enabled(false),
+    internal_event_refcount_({0}) {
+  alloc_listener_.reset(new JvmtiEventAllocationListener(this));
+  AllocationManager::Get()->SetAllocListener(alloc_listener_.get());
   ddm_listener_.reset(new JvmtiDdmChunkListener(this));
   gc_pause_listener_.reset(new JvmtiGcPauseListener(this));
   method_trace_listener_.reset(new JvmtiMethodTraceListener(this));
diff --git a/openjdkjvmti/events.h b/openjdkjvmti/events.h
index d54c87a..d4eb171 100644
--- a/openjdkjvmti/events.h
+++ b/openjdkjvmti/events.h
@@ -18,20 +18,23 @@
 #define ART_OPENJDKJVMTI_EVENTS_H_
 
 #include <bitset>
+#include <unordered_map>
 #include <vector>
 
 #include <android-base/logging.h>
 #include <android-base/thread_annotations.h>
 
+#include "android-base/thread_annotations.h"
 #include "base/macros.h"
 #include "base/mutex.h"
 #include "jvmti.h"
+#include "managed_stack.h"
 #include "thread.h"
 
 namespace openjdkjvmti {
 
 struct ArtJvmTiEnv;
-class JvmtiAllocationListener;
+class JvmtiEventAllocationListener;
 class JvmtiDdmChunkListener;
 class JvmtiGcPauseListener;
 class JvmtiMethodTraceListener;
@@ -73,19 +76,65 @@
     kGarbageCollectionFinish = JVMTI_EVENT_GARBAGE_COLLECTION_FINISH,
     kObjectFree = JVMTI_EVENT_OBJECT_FREE,
     kVmObjectAlloc = JVMTI_EVENT_VM_OBJECT_ALLOC,
+    // Internal event to mark a ClassFileLoadHook as one created with the can_retransform_classes
+    // capability.
     kClassFileLoadHookRetransformable = JVMTI_MAX_EVENT_TYPE_VAL + 1,
     kDdmPublishChunk = JVMTI_MAX_EVENT_TYPE_VAL + 2,
-    kMaxEventTypeVal = kDdmPublishChunk,
+    kObsoleteObjectCreated = JVMTI_MAX_EVENT_TYPE_VAL + 3,
+    kStructuralDexFileLoadHook = JVMTI_MAX_EVENT_TYPE_VAL + 4,
+    kMaxNormalEventTypeVal = kStructuralDexFileLoadHook,
+
+    // All that follow are events used to implement internal JVMTI functions. They are not settable
+    // directly by agents.
+    kMinInternalEventTypeVal = kMaxNormalEventTypeVal + 1,
+
+    // Internal event we use to implement the ForceEarlyReturn functions.
+    kForceEarlyReturnUpdateReturnValue = kMinInternalEventTypeVal,
+    kMaxInternalEventTypeVal = kForceEarlyReturnUpdateReturnValue,
+
+    kMaxEventTypeVal = kMaxInternalEventTypeVal,
 };
 
+constexpr jint kInternalEventCount = static_cast<jint>(ArtJvmtiEvent::kMaxInternalEventTypeVal) -
+                                     static_cast<jint>(ArtJvmtiEvent::kMinInternalEventTypeVal) + 1;
+
 using ArtJvmtiEventDdmPublishChunk = void (*)(jvmtiEnv *jvmti_env,
                                               JNIEnv* jni_env,
                                               jint data_type,
                                               jint data_len,
                                               const jbyte* data);
 
+using ArtJvmtiEventObsoleteObjectCreated = void (*)(jvmtiEnv *jvmti_env,
+                                                    jlong* obsolete_tag,
+                                                    jlong* new_tag);
+
+using ArtJvmtiEventStructuralDexFileLoadHook = void (*)(jvmtiEnv *jvmti_env,
+                                                        JNIEnv* jni_env,
+                                                        jclass class_being_redefined,
+                                                        jobject loader,
+                                                        const char* name,
+                                                        jobject protection_domain,
+                                                        jint dex_data_len,
+                                                        const unsigned char* dex_data,
+                                                        jint* new_dex_data_len,
+                                                        unsigned char** new_dex_data);
+
+// It is not enough to store a Thread pointer, as these may be reused. Use the pointer and the
+// thread id.
+// Note: We could just use the tid like tracing does.
+using UniqueThread = std::pair<art::Thread*, uint32_t>;
+
+struct UniqueThreadHasher {
+  std::size_t operator()(const UniqueThread& k) const {
+    return std::hash<uint32_t>{}(k.second) ^ (std::hash<void*>{}(k.first) << 1);
+  }
+};
+
 struct ArtJvmtiEventCallbacks : jvmtiEventCallbacks {
-  ArtJvmtiEventCallbacks() : DdmPublishChunk(nullptr) {
+  ArtJvmtiEventCallbacks()
+      : DdmPublishChunk(nullptr),
+        ObsoleteObjectCreated(nullptr),
+        StructuralDexFileLoadHook(nullptr) {
     memset(this, 0, sizeof(jvmtiEventCallbacks));
   }
 
@@ -96,6 +145,8 @@
   jvmtiError Set(jint index, jvmtiExtensionEvent cb);
 
   ArtJvmtiEventDdmPublishChunk DdmPublishChunk;
+  ArtJvmtiEventObsoleteObjectCreated ObsoleteObjectCreated;
+  ArtJvmtiEventStructuralDexFileLoadHook StructuralDexFileLoadHook;
 };
 
 bool IsExtensionEvent(jint e);
@@ -141,10 +192,6 @@
 
   // The per-thread enabled events.
 
-  // It is not enough to store a Thread pointer, as these may be reused. Use the pointer and the
-  // thread id.
-  // Note: We could just use the tid like tracing does.
-  using UniqueThread = std::pair<art::Thread*, uint32_t>;
   // TODO: Native thread objects are immovable, so we can use them as keys in an (unordered) map,
   //       if necessary.
   std::vector<std::pair<UniqueThread, EventMask>> thread_event_masks;
@@ -198,6 +245,16 @@
     return global_mask.Test(event);
   }
 
+  // Sets an internal event. Unlike normal JVMTI events internal events are not associated with any
+  // particular jvmtiEnv and are refcounted. This refcounting is done to allow us to easily enable
+  // events during functions and disable them during the requested event callback. Since these are
+  // used to implement various JVMTI functions these events always have a single target thread. If
+  // target is null the current thread is used.
+  jvmtiError SetInternalEvent(jthread target,
+                              ArtJvmtiEvent event,
+                              jvmtiEventMode mode)
+      REQUIRES(!envs_lock_, !art::Locks::mutator_lock_);
+
   jvmtiError SetEvent(ArtJvmTiEnv* env,
                       jthread thread,
                       ArtJvmtiEvent event,
@@ -246,9 +303,25 @@
   inline void DispatchEventOnEnv(ArtJvmTiEnv* env, art::Thread* thread, Args... args) const
       REQUIRES(!envs_lock_);
 
+  void AddDelayedNonStandardExitEvent(const art::ShadowFrame* frame, bool is_object, jvalue val)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(art::Locks::user_code_suspension_lock_, art::Locks::thread_list_lock_);
+
+  template<typename Visitor>
+  void ForEachEnv(art::Thread* self, Visitor v) REQUIRES(!envs_lock_) {
+    art::ReaderMutexLock mu(self, envs_lock_);
+    for (ArtJvmTiEnv* e : envs) {
+      if (e != nullptr) {
+        v(e);
+      }
+    }
+  }
+
  private:
   void SetupTraceListener(JvmtiMethodTraceListener* listener, ArtJvmtiEvent event, bool enable);
 
+  uint32_t GetInstrumentationEventsFor(ArtJvmtiEvent event);
+
   // Specifically handle the FramePop event which it might not always be possible to turn off.
   void SetupFramePopTraceListener(bool enable);
 
@@ -325,6 +398,21 @@
 
   bool OtherMonitorEventsEnabledAnywhere(ArtJvmtiEvent event);
 
+  int32_t GetInternalEventRefcount(ArtJvmtiEvent event) const REQUIRES(envs_lock_);
+  // Increment internal event refcount for the given event and return the new count.
+  int32_t IncrInternalEventRefcount(ArtJvmtiEvent event) REQUIRES(envs_lock_);
+  // Decrement internal event refcount for the given event and return the new count.
+  int32_t DecrInternalEventRefcount(ArtJvmtiEvent event) REQUIRES(envs_lock_);
+
+  int32_t& GetInternalEventThreadRefcount(ArtJvmtiEvent event, art::Thread* target)
+      REQUIRES(envs_lock_, art::Locks::thread_list_lock_);
+  // Increment internal event refcount for the given event and return the new count.
+  int32_t IncrInternalEventThreadRefcount(ArtJvmtiEvent event, art::Thread* target)
+      REQUIRES(envs_lock_, art::Locks::thread_list_lock_);
+  // Decrement internal event refcount for the given event and return the new count.
+  int32_t DecrInternalEventThreadRefcount(ArtJvmtiEvent event, art::Thread* target)
+      REQUIRES(envs_lock_, art::Locks::thread_list_lock_);
+
   // List of all JvmTiEnv objects that have been created, in their creation order. It is a std::list
   // since we mostly access it by iterating over the entire thing, only ever append to the end, and
   // need to be able to remove arbitrary elements from it.
@@ -337,7 +425,7 @@
   // A union of all enabled events, anywhere.
   EventMask global_mask;
 
-  std::unique_ptr<JvmtiAllocationListener> alloc_listener_;
+  std::unique_ptr<JvmtiEventAllocationListener> alloc_listener_;
   std::unique_ptr<JvmtiDdmChunkListener> ddm_listener_;
   std::unique_ptr<JvmtiGcPauseListener> gc_pause_listener_;
   std::unique_ptr<JvmtiMethodTraceListener> method_trace_listener_;
@@ -348,6 +436,16 @@
   // continue to listen to this event even if it has been disabled.
   // TODO We could remove the listeners once all jvmtiEnvs have drained their shadow-frame vectors.
   bool frame_pop_enabled;
+
+  // The overall refcount for each internal event across all threads.
+  std::array<int32_t, kInternalEventCount> internal_event_refcount_ GUARDED_BY(envs_lock_);
+  // The refcount for each thread for each internal event.
+  // TODO We should clean both this and the normal EventMask lists up when threads end.
+  std::array<std::unordered_map<UniqueThread, int32_t, UniqueThreadHasher>, kInternalEventCount>
+      internal_event_thread_refcount_
+          GUARDED_BY(envs_lock_) GUARDED_BY(art::Locks::thread_list_lock_);
+
+  friend class JvmtiMethodTraceListener;
 };
 
 }  // namespace openjdkjvmti
diff --git a/openjdkjvmti/fixed_up_dex_file.cc b/openjdkjvmti/fixed_up_dex_file.cc
index e8b3435..eefbe41 100644
--- a/openjdkjvmti/fixed_up_dex_file.cc
+++ b/openjdkjvmti/fixed_up_dex_file.cc
@@ -76,12 +76,12 @@
 static void DCheckVerifyDexFile(const art::DexFile& dex) {
   if (art::kIsDebugBuild) {
     std::string error;
-    if (!art::DexFileVerifier::Verify(&dex,
-                                      dex.Begin(),
-                                      dex.Size(),
-                                      "FixedUpDexFile_Verification.dex",
-                                      /*verify_checksum=*/ true,
-                                      &error)) {
+    if (!art::dex::Verify(&dex,
+                          dex.Begin(),
+                          dex.Size(),
+                          "FixedUpDexFile_Verification.dex",
+                          /*verify_checksum=*/ true,
+                          &error)) {
       LOG(FATAL) << "Failed to verify de-quickened dex file: " << error;
     }
   }
diff --git a/openjdkjvmti/ti_breakpoint.cc b/openjdkjvmti/ti_breakpoint.cc
index 813aa8e..13d8db7 100644
--- a/openjdkjvmti/ti_breakpoint.cc
+++ b/openjdkjvmti/ti_breakpoint.cc
@@ -53,6 +53,69 @@
 
 namespace openjdkjvmti {
 
+class JvmtiBreakpointReflectionSource : public art::ReflectionSourceInfo {
+ public:
+  JvmtiBreakpointReflectionSource(size_t pc, art::ArtMethod* m)
+      : art::ReflectionSourceInfo(art::ReflectionSourceType::kSourceMiscInternal),
+        pc_(pc),
+        m_(m) {}
+
+  void Describe(std::ostream& os) const override REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    art::ReflectionSourceInfo::Describe(os);
+    os << " jvmti Breakpoint Method=" << m_->PrettyMethod() << " PC=" << pc_;
+  }
+
+ private:
+  size_t pc_;
+  art::ArtMethod* m_;
+};
+
+class BreakpointReflectiveValueCallback : public art::ReflectiveValueVisitCallback {
+ public:
+  void VisitReflectiveTargets(art::ReflectiveValueVisitor* visitor)
+      REQUIRES(art::Locks::mutator_lock_) {
+    art::Thread* self = art::Thread::Current();
+    eh_->ForEachEnv(self, [&](ArtJvmTiEnv* env) NO_THREAD_SAFETY_ANALYSIS {
+      art::Locks::mutator_lock_->AssertExclusiveHeld(self);
+      art::WriterMutexLock mu(self, env->event_info_mutex_);
+      std::vector<std::pair<Breakpoint, Breakpoint>> updated_breakpoints;
+      for (auto it : env->breakpoints) {
+        art::ArtMethod* orig_method = it.GetMethod();
+        art::ArtMethod* am = visitor->VisitMethod(
+            orig_method, JvmtiBreakpointReflectionSource(it.GetLocation(), orig_method));
+        if (am != orig_method) {
+          updated_breakpoints.push_back({ Breakpoint { am, it.GetLocation() }, it });
+        }
+      }
+      for (auto it : updated_breakpoints) {
+        DCHECK(env->breakpoints.find(it.second) != env->breakpoints.end());
+        env->breakpoints.erase(it.second);
+        env->breakpoints.insert(it.first);
+      }
+    });
+  }
+
+  EventHandler* eh_;
+};
+
+static BreakpointReflectiveValueCallback gReflectiveValueCallback;
+void BreakpointUtil::Register(EventHandler* eh) {
+  gReflectiveValueCallback.eh_ = eh;
+  art::ScopedThreadStateChange stsc(art::Thread::Current(),
+                                    art::ThreadState::kWaitingForDebuggerToAttach);
+  art::ScopedSuspendAll ssa("Add breakpoint reflective value visit callback");
+  art::RuntimeCallbacks* callbacks = art::Runtime::Current()->GetRuntimeCallbacks();
+  callbacks->AddReflectiveValueVisitCallback(&gReflectiveValueCallback);
+}
+
+void BreakpointUtil::Unregister() {
+  art::ScopedThreadStateChange stsc(art::Thread::Current(),
+                                    art::ThreadState::kWaitingForDebuggerToAttach);
+  art::ScopedSuspendAll ssa("Remove reflective value visit callback");
+  art::RuntimeCallbacks* callbacks = art::Runtime::Current()->GetRuntimeCallbacks();
+  callbacks->RemoveReflectiveValueVisitCallback(&gReflectiveValueCallback);
+}
+
 size_t Breakpoint::hash() const {
   return std::hash<uintptr_t> {}(reinterpret_cast<uintptr_t>(method_))
       ^ std::hash<jlocation> {}(location_);
diff --git a/openjdkjvmti/ti_breakpoint.h b/openjdkjvmti/ti_breakpoint.h
index 7aa33ae..96610c3 100644
--- a/openjdkjvmti/ti_breakpoint.h
+++ b/openjdkjvmti/ti_breakpoint.h
@@ -47,6 +47,7 @@
 namespace openjdkjvmti {
 
 struct ArtJvmTiEnv;
+class EventHandler;
 
 class Breakpoint {
  public:
@@ -74,6 +75,9 @@
 
 class BreakpointUtil {
  public:
+  static void Register(EventHandler* eh);
+  static void Unregister();
+
   static jvmtiError SetBreakpoint(jvmtiEnv* env, jmethodID method, jlocation location);
   static jvmtiError ClearBreakpoint(jvmtiEnv* env, jmethodID method, jlocation location);
   // Used by class redefinition to remove breakpoints on redefined classes.
diff --git a/openjdkjvmti/ti_class.cc b/openjdkjvmti/ti_class.cc
index 3ad1112..4d6b41a 100644
--- a/openjdkjvmti/ti_class.cc
+++ b/openjdkjvmti/ti_class.cc
@@ -39,6 +39,7 @@
 
 #include "art_jvmti.h"
 #include "base/array_ref.h"
+#include "base/logging.h"
 #include "base/macros.h"
 #include "base/utils.h"
 #include "class_linker.h"
@@ -203,16 +204,20 @@
       memcpy(post_non_retransform.data(), def.GetDexData().data(), post_non_retransform.size());
     }
 
+    // Call all structural transformation agents.
+    Transformer::TransformSingleClassDirect<ArtJvmtiEvent::kStructuralDexFileLoadHook>(
+        event_handler, self, &def);
     // Call all retransformable agents.
     Transformer::TransformSingleClassDirect<ArtJvmtiEvent::kClassFileLoadHookRetransformable>(
         event_handler, self, &def);
 
     if (def.IsModified()) {
-      LOG(WARNING) << "Changing class " << descriptor;
+      VLOG(class_linker) << "Changing class " << descriptor;
       art::StackHandleScope<2> hs(self);
       // Save the results of all the non-retransformable agents.
       // First allocate the ClassExt
-      art::Handle<art::mirror::ClassExt> ext(hs.NewHandle(klass->EnsureExtDataPresent(self)));
+      art::Handle<art::mirror::ClassExt> ext =
+          hs.NewHandle(art::mirror::Class::EnsureExtDataPresent(klass, self));
       // Make sure we have a ClassExt. This is fine even though we are a temporary since it will
       // get copied.
       if (ext.IsNull()) {
@@ -721,8 +726,8 @@
           art::annotations::GetSignatureAnnotationForClass(h_klass);
       if (str_array != nullptr) {
         std::ostringstream oss;
-        for (int32_t i = 0; i != str_array->GetLength(); ++i) {
-          oss << str_array->Get(i)->ToModifiedUtf8();
+        for (auto str : str_array->Iterate()) {
+          oss << str->ToModifiedUtf8();
         }
         std::string output_string = oss.str();
         jvmtiError ret;
diff --git a/openjdkjvmti/ti_class_definition.h b/openjdkjvmti/ti_class_definition.h
index 224e664..cb0853b 100644
--- a/openjdkjvmti/ti_class_definition.h
+++ b/openjdkjvmti/ti_class_definition.h
@@ -40,6 +40,7 @@
 
 #include "base/array_ref.h"
 #include "base/mem_map.h"
+#include "events.h"
 
 namespace openjdkjvmti {
 
@@ -65,7 +66,8 @@
         current_dex_file_(),
         redefined_(false),
         from_class_ext_(false),
-        initialized_(false) {}
+        initialized_(false),
+        structural_transform_update_(false) {}
 
   void InitFirstLoad(const char* descriptor,
                      art::Handle<art::mirror::ClassLoader> klass_loader,
@@ -76,7 +78,7 @@
   ArtClassDefinition(ArtClassDefinition&& o) = default;
   ArtClassDefinition& operator=(ArtClassDefinition&& o) = default;
 
-  void SetNewDexData(jint new_dex_len, unsigned char* new_dex_data) {
+  void SetNewDexData(jint new_dex_len, unsigned char* new_dex_data, ArtJvmtiEvent event) {
     DCHECK(IsInitialized());
     if (new_dex_data == nullptr) {
       return;
@@ -86,10 +88,17 @@
         dex_data_memory_.resize(new_dex_len);
         memcpy(dex_data_memory_.data(), new_dex_data, new_dex_len);
         dex_data_ = art::ArrayRef<const unsigned char>(dex_data_memory_);
+        if (event == ArtJvmtiEvent::kStructuralDexFileLoadHook) {
+          structural_transform_update_ = true;
+        }
       }
     }
   }
 
+  bool HasStructuralChanges() const {
+    return structural_transform_update_;
+  }
+
   art::ArrayRef<const unsigned char> GetNewOriginalDexFile() const {
     DCHECK(IsInitialized());
     if (redefined_) {
@@ -187,6 +196,9 @@
 
   bool initialized_;
 
+  // Set if we had a new dex from the given transform type.
+  bool structural_transform_update_;
+
   DISALLOW_COPY_AND_ASSIGN(ArtClassDefinition);
 };
 
diff --git a/openjdkjvmti/ti_class_loader-inl.h b/openjdkjvmti/ti_class_loader-inl.h
index 9b04841..29ea684 100644
--- a/openjdkjvmti/ti_class_loader-inl.h
+++ b/openjdkjvmti/ti_class_loader-inl.h
@@ -57,10 +57,8 @@
     return;
   }
 
-  size_t num_elements = dex_elements_list->GetLength();
   // Iterate over the DexPathList$Element to find the right one
-  for (size_t i = 0; i < num_elements; i++) {
-    art::ObjPtr<art::mirror::Object> current_element = dex_elements_list->Get(i);
+  for (auto current_element : dex_elements_list.Iterate<art::mirror::Object>()) {
     CHECK(!current_element.IsNull());
     art::ObjPtr<art::mirror::Object> dex_file(element_dex_file_field->GetObject(current_element));
     if (!dex_file.IsNull()) {
diff --git a/openjdkjvmti/ti_class_loader.cc b/openjdkjvmti/ti_class_loader.cc
index 999b9d5..d0a6634 100644
--- a/openjdkjvmti/ti_class_loader.cc
+++ b/openjdkjvmti/ti_class_loader.cc
@@ -66,7 +66,7 @@
   art::ScopedObjectAccessUnchecked soa(self);
   art::StackHandleScope<3> hs(self);
   if (art::ClassLinker::IsBootClassLoader(soa, loader.Get())) {
-    art::Runtime::Current()->GetClassLinker()->AppendToBootClassPath(self, *dex_file);
+    art::Runtime::Current()->GetClassLinker()->AppendToBootClassPath(self, dex_file);
     return true;
   }
   art::Handle<art::mirror::Object> java_dex_file_obj(
diff --git a/openjdkjvmti/ti_extension.cc b/openjdkjvmti/ti_extension.cc
index f12cb0a..62c6fb2 100644
--- a/openjdkjvmti/ti_extension.cc
+++ b/openjdkjvmti/ti_extension.cc
@@ -22,7 +22,6 @@
  *
  * You should have received a copy of the GNU General Public License version
  * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  *
  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  * or visit www.oracle.com if you need additional information or have any
@@ -31,10 +30,13 @@
 
 #include <vector>
 
+#include "jvmti.h"
 #include "ti_extension.h"
 
 #include "art_jvmti.h"
 #include "events.h"
+#include "jni_id_type.h"
+#include "runtime-inl.h"
 #include "ti_allocator.h"
 #include "ti_class.h"
 #include "ti_ddms.h"
@@ -42,6 +44,9 @@
 #include "ti_heap.h"
 #include "ti_logging.h"
 #include "ti_monitor.h"
+#include "ti_redefine.h"
+#include "ti_search.h"
+#include "transform.h"
 
 #include "thread-inl.h"
 
@@ -327,6 +332,163 @@
     return error;
   }
 
+  // AddToDexClassLoader
+  error = add_extension(
+      reinterpret_cast<jvmtiExtensionFunction>(SearchUtil::AddToDexClassLoader),
+      "com.android.art.classloader.add_to_dex_class_loader",
+      "Adds a dexfile to a given dalvik.system.BaseDexClassLoader in a manner similar to"
+      " AddToSystemClassLoader.",
+      {
+        { "classloader", JVMTI_KIND_IN, JVMTI_TYPE_JOBJECT, false },
+        { "segment", JVMTI_KIND_IN_PTR, JVMTI_TYPE_CCHAR, false },
+      },
+      {
+         ERR(NULL_POINTER),
+         ERR(CLASS_LOADER_UNSUPPORTED),
+         ERR(ILLEGAL_ARGUMENT),
+         ERR(WRONG_PHASE),
+      });
+  if (error != ERR(NONE)) {
+    return error;
+  }
+
+  // AddToDexClassLoaderInMemory
+  error = add_extension(
+      reinterpret_cast<jvmtiExtensionFunction>(SearchUtil::AddToDexClassLoaderInMemory),
+      "com.android.art.classloader.add_to_dex_class_loader_in_memory",
+      "Adds a dexfile buffer to a given dalvik.system.BaseDexClassLoader in a manner similar to"
+      " AddToSystemClassLoader. This may only be done during the LIVE phase. The buffer is copied"
+      " and the caller is responsible for deallocating it after this call.",
+      {
+        { "classloader", JVMTI_KIND_IN, JVMTI_TYPE_JOBJECT, false },
+        { "dex_bytes", JVMTI_KIND_IN_BUF, JVMTI_TYPE_CCHAR, false },
+        { "dex_bytes_len", JVMTI_KIND_IN, JVMTI_TYPE_JINT, false },
+      },
+      {
+         ERR(NULL_POINTER),
+         ERR(CLASS_LOADER_UNSUPPORTED),
+         ERR(ILLEGAL_ARGUMENT),
+         ERR(WRONG_PHASE),
+      });
+  if (error != ERR(NONE)) {
+    return error;
+  }
+
+  // ChangeArraySize
+  error = add_extension(
+      reinterpret_cast<jvmtiExtensionFunction>(HeapExtensions::ChangeArraySize),
+      "com.android.art.heap.change_array_size",
+      "Changes the size of a java array. As far as all JNI and java code is concerned this is"
+      " atomic. Must have can_tag_objects capability. If the new length of the array is smaller"
+      " than the original length, then the array will be truncated to the new length. Otherwise,"
+      " all new slots will be filled with null, 0, or False as appropriate for the array type.",
+      {
+        { "array", JVMTI_KIND_IN, JVMTI_TYPE_JOBJECT, false },
+        { "new_size", JVMTI_KIND_IN, JVMTI_TYPE_JINT, false },
+      },
+      {
+         ERR(NULL_POINTER),
+         ERR(MUST_POSSESS_CAPABILITY),
+         ERR(ILLEGAL_ARGUMENT),
+         ERR(OUT_OF_MEMORY),
+      });
+  if (error != ERR(NONE)) {
+    return error;
+  }
+
+  // These require index-ids and debuggable to function
+  art::Runtime* runtime = art::Runtime::Current();
+  if (runtime->GetJniIdType() == art::JniIdType::kIndices &&
+      (runtime->GetInstrumentation()->IsForcedInterpretOnly() || runtime->IsJavaDebuggable())) {
+    // IsStructurallyModifiableClass
+    error = add_extension(
+        reinterpret_cast<jvmtiExtensionFunction>(Redefiner::IsStructurallyModifiableClass),
+        "com.android.art.class.is_structurally_modifiable_class",
+        "Returns whether a class can potentially be 'structurally' redefined using the various"
+        " structural redefinition extensions provided.",
+        {
+          { "klass", JVMTI_KIND_IN, JVMTI_TYPE_JCLASS, false },
+          { "result", JVMTI_KIND_OUT, JVMTI_TYPE_JBOOLEAN, false },
+        },
+        {
+          ERR(INVALID_CLASS),
+          ERR(NULL_POINTER),
+        });
+    if (error != ERR(NONE)) {
+      return error;
+    }
+
+    // StructurallyRedefineClasses
+    error = add_extension(
+        reinterpret_cast<jvmtiExtensionFunction>(Redefiner::StructurallyRedefineClasses),
+        "com.android.art.class.structurally_redefine_classes",
+        "Entrypoint for structural class redefinition. Has the same signature as RedefineClasses."
+        " Only supports additive changes, methods and fields may not be removed. Supertypes and"
+        " implemented interfaces may not be changed. After calling this"
+        " com.android.art.structural_dex_file_load_hook events will be triggered, followed by"
+        " re-transformable ClassFileLoadHook events. After this method completes subsequent"
+        " RetransformClasses calls will use the input to this function as the initial class"
+        " definition.",
+        {
+            { "num_classes", JVMTI_KIND_IN, JVMTI_TYPE_JINT, false },
+            { "class_definitions", JVMTI_KIND_IN_BUF, JVMTI_TYPE_CVOID, false },
+        },
+        {
+            ERR(CLASS_LOADER_UNSUPPORTED),
+            ERR(FAILS_VERIFICATION),
+            ERR(ILLEGAL_ARGUMENT),
+            ERR(INVALID_CLASS),
+            ERR(MUST_POSSESS_CAPABILITY),
+            ERR(MUST_POSSESS_CAPABILITY),
+            ERR(NULL_POINTER),
+            ERR(OUT_OF_MEMORY),
+            ERR(UNMODIFIABLE_CLASS),
+            ERR(UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED),
+            ERR(UNSUPPORTED_REDEFINITION_METHOD_ADDED),
+            ERR(UNSUPPORTED_REDEFINITION_METHOD_DELETED),
+            ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
+        });
+    if (error != ERR(NONE)) {
+      return error;
+    }
+
+    // StructurallyRedefineClassDirect
+    error = add_extension(
+        reinterpret_cast<jvmtiExtensionFunction>(Redefiner::StructurallyRedefineClassDirect),
+        "com.android.art.UNSAFE.class.structurally_redefine_class_direct",
+        "Temporary prototype entrypoint for redefining a single class structurally. Currently this"
+        " only supports adding new static fields to a class without any instances."
+        " ClassFileLoadHook events will NOT be triggered. This does not currently support creating"
+        " obsolete methods. This function only has rudimentary error checking. This should not be"
+        " used except for testing.",
+        {
+          { "klass", JVMTI_KIND_IN, JVMTI_TYPE_JCLASS, false },
+          { "new_def", JVMTI_KIND_IN_BUF, JVMTI_TYPE_CCHAR, false },
+          { "new_def_len", JVMTI_KIND_IN, JVMTI_TYPE_JINT, false },
+        },
+        {
+          ERR(CLASS_LOADER_UNSUPPORTED),
+          ERR(FAILS_VERIFICATION),
+          ERR(ILLEGAL_ARGUMENT),
+          ERR(INVALID_CLASS),
+          ERR(MUST_POSSESS_CAPABILITY),
+          ERR(MUST_POSSESS_CAPABILITY),
+          ERR(NULL_POINTER),
+          ERR(OUT_OF_MEMORY),
+          ERR(UNMODIFIABLE_CLASS),
+          ERR(UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED),
+          ERR(UNSUPPORTED_REDEFINITION_METHOD_ADDED),
+          ERR(UNSUPPORTED_REDEFINITION_METHOD_DELETED),
+          ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
+        });
+    if (error != ERR(NONE)) {
+      return error;
+    }
+  } else {
+    LOG(INFO) << "debuggable & jni-type indices are required to implement structural "
+              << "class redefinition extensions.";
+  }
+
   // Copy into output buffer.
 
   *extension_count_ptr = ext_vector.size();
@@ -368,7 +530,7 @@
                            const char* id,
                            const char* short_description,
                            const std::vector<CParamInfo>& params) {
-    DCHECK(IsExtensionEvent(extension_event_index));
+    DCHECK(IsExtensionEvent(extension_event_index)) << static_cast<jint>(extension_event_index);
     jvmtiExtensionEventInfo event_info;
     jvmtiError error;
 
@@ -432,7 +594,69 @@
   if (error != OK) {
     return error;
   }
-
+  error = add_extension(
+      ArtJvmtiEvent::kObsoleteObjectCreated,
+      "com.android.art.heap.obsolete_object_created",
+      "Called when an obsolete object is created.\n"
+      "An object becomes obsolete when, due to some jvmti function call all references to the"
+      " object are replaced with a reference to a different object. After this call finishes there"
+      " will be no strong references to the obsolete object anywere. If the object is retrieved"
+      " using GetObjectsWithTags its type (class) may have changed and any data it contains may"
+      " have been deleted. This is primarily designed to support memory tracking agents which make"
+      " use of the ObjectFree and VMObjectAlloc events for tracking. To support this use-case if"
+      " this event is not being handled it will by default act as though the following code was"
+      " registered as a handler:\n"
+      "\n"
+      "  void HandleObsoleteObjectCreated(jvmtiEnv* env, jlong* obsolete_tag, jlong* new_tag) {\n"
+      "    jlong temp = *obsolete_tag;\n"
+      "    *obsolete_tag = *new_tag;\n"
+      "    *new_tag = temp;\n"
+      "  }\n"
+      "\n"
+      "Note that this event does not support filtering based on thread. This event has the same"
+      " restrictions on JNI and JVMTI function calls as the ObjectFree event.\n"
+      "\n"
+      "Arguments:\n"
+      "  obsolete_tag: Pointer to the tag the old object (now obsolete) has. Setting the pointer"
+      " will update the tag value.\n"
+      "  new_tag: Pointer to the tag the new object (replacing the obsolete one) has. Setting the"
+      " pointer will update the tag value.",
+      {
+        { "obsolete_tag", JVMTI_KIND_IN_PTR, JVMTI_TYPE_JLONG, false },
+        { "new_tag", JVMTI_KIND_IN_PTR, JVMTI_TYPE_JLONG, false },
+      });
+  if (error != OK) {
+    return error;
+  }
+  art::Runtime* runtime = art::Runtime::Current();
+  if (runtime->GetJniIdType() == art::JniIdType::kIndices &&
+      (runtime->GetInstrumentation()->IsForcedInterpretOnly() || runtime->IsJavaDebuggable())) {
+    error = add_extension(
+        ArtJvmtiEvent::kStructuralDexFileLoadHook,
+        "com.android.art.class.structural_dex_file_load_hook",
+        "Called during class load, after a 'RetransformClasses' call, or after a 'RedefineClasses'"
+        " call in order to allow the agent to modify the class. This event is called after any"
+        " non-can_retransform_classes ClassFileLoadHookEvents and before any"
+        " can_retransform_classes ClassFileLoadHookEvents. The transformations applied are"
+        " restricted in the same way that transformations applied via the "
+        " 'com.android.art.class.structurally_redefine_classes' extension function. The arguments"
+        " to the event are identical to the ones in the ClassFileLoadHook and have the same"
+        " semantics.",
+        {
+          { "jni_env", JVMTI_KIND_IN, JVMTI_TYPE_JNIENV, false },
+          { "class_being_redefined", JVMTI_KIND_IN, JVMTI_TYPE_JCLASS, true },
+          { "loader", JVMTI_KIND_IN, JVMTI_TYPE_JOBJECT, false },
+          { "name", JVMTI_KIND_IN_PTR, JVMTI_TYPE_CCHAR, false },
+          { "protection_domain", JVMTI_KIND_IN, JVMTI_TYPE_JOBJECT, true },
+          { "dex_data_len", JVMTI_KIND_IN, JVMTI_TYPE_JINT, false },
+          { "dex_data", JVMTI_KIND_IN_BUF, JVMTI_TYPE_CCHAR, false },
+          { "new_dex_data_len", JVMTI_KIND_OUT, JVMTI_TYPE_JINT, false },
+          { "new_dex_data", JVMTI_KIND_ALLOC_BUF, JVMTI_TYPE_CCHAR, true },
+        });
+  } else {
+    LOG(INFO) << "debuggable & jni-type indices are required to implement structural "
+              << "class redefinition extensions.";
+  }
   // Copy into output buffer.
 
   *extension_count_ptr = ext_vector.size();
diff --git a/openjdkjvmti/ti_field.cc b/openjdkjvmti/ti_field.cc
index 2a860d9..d4c0ec8 100644
--- a/openjdkjvmti/ti_field.cc
+++ b/openjdkjvmti/ti_field.cc
@@ -30,19 +30,100 @@
  */
 
 #include "ti_field.h"
+#include <unordered_map>
 
+#include "android-base/thread_annotations.h"
 #include "art_field-inl.h"
+#include "art_field.h"
 #include "art_jvmti.h"
 #include "base/enums.h"
+#include "base/locks.h"
 #include "dex/dex_file_annotations.h"
 #include "dex/modifiers.h"
 #include "jni/jni_internal.h"
 #include "mirror/object_array-inl.h"
+#include "reflective_value_visitor.h"
+#include "runtime.h"
+#include "runtime_callbacks.h"
 #include "scoped_thread_state_change-inl.h"
 #include "thread-current-inl.h"
 
 namespace openjdkjvmti {
 
+class JvmtiFieldReflectionSource : public art::ReflectionSourceInfo {
+ public:
+  JvmtiFieldReflectionSource(bool is_access, art::ArtField* f)
+      : art::ReflectionSourceInfo(art::ReflectionSourceType::kSourceMiscInternal),
+        is_access_(is_access),
+        f_(f) {}
+  void Describe(std::ostream& os) const override REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    art::ReflectionSourceInfo::Describe(os);
+    os << " jvmti Field" << (is_access_ ? "Access" : "Modification")
+       << "Watch Target=" << f_->PrettyField();
+  }
+
+ private:
+  bool is_access_;
+  art::ArtField* f_;
+};
+struct FieldReflectiveValueCallback : public art::ReflectiveValueVisitCallback {
+ public:
+  void VisitReflectiveTargets(art::ReflectiveValueVisitor* visitor)
+      REQUIRES(art::Locks::mutator_lock_) {
+    art::Thread* self = art::Thread::Current();
+    event_handler->ForEachEnv(self, [&](ArtJvmTiEnv* env) NO_THREAD_SAFETY_ANALYSIS {
+      art::Locks::mutator_lock_->AssertExclusiveHeld(self);
+      art::WriterMutexLock mu(self, env->event_info_mutex_);
+      std::vector<std::pair<art::ArtField*, art::ArtField*>> updated_access_fields;
+      for (auto it : env->access_watched_fields) {
+        art::ArtField* af =
+            visitor->VisitField(it, JvmtiFieldReflectionSource(/*is_access=*/true, it));
+        if (af != it) {
+          updated_access_fields.push_back({ af, it });
+        }
+      }
+      for (auto it : updated_access_fields) {
+        DCHECK(env->access_watched_fields.find(it.second) != env->access_watched_fields.end());
+        env->access_watched_fields.erase(it.second);
+        env->access_watched_fields.insert(it.first);
+      }
+      std::vector<std::pair<art::ArtField*, art::ArtField*>> updated_modify_fields;
+      for (auto it : env->modify_watched_fields) {
+        art::ArtField* af =
+            visitor->VisitField(it, JvmtiFieldReflectionSource(/*is_access=*/false, it));
+        if (af != it) {
+          updated_modify_fields.push_back({ af, it });
+        }
+      }
+      for (auto it : updated_modify_fields) {
+        DCHECK(env->modify_watched_fields.find(it.second) != env->modify_watched_fields.end());
+        env->modify_watched_fields.erase(it.second);
+        env->modify_watched_fields.insert(it.first);
+      }
+    });
+  }
+
+  EventHandler* event_handler = nullptr;
+};
+
+static FieldReflectiveValueCallback gReflectiveValueCallback;
+
+void FieldUtil::Register(EventHandler* eh) {
+  gReflectiveValueCallback.event_handler = eh;
+  art::ScopedThreadStateChange stsc(art::Thread::Current(),
+                                    art::ThreadState::kWaitingForDebuggerToAttach);
+  art::ScopedSuspendAll ssa("Add reflective value visit callback");
+  art::RuntimeCallbacks* callbacks = art::Runtime::Current()->GetRuntimeCallbacks();
+  callbacks->AddReflectiveValueVisitCallback(&gReflectiveValueCallback);
+}
+
+void FieldUtil::Unregister() {
+  art::ScopedThreadStateChange stsc(art::Thread::Current(),
+                                    art::ThreadState::kWaitingForDebuggerToAttach);
+  art::ScopedSuspendAll ssa("Remove reflective value visit callback");
+  art::RuntimeCallbacks* callbacks = art::Runtime::Current()->GetRuntimeCallbacks();
+  callbacks->RemoveReflectiveValueVisitCallback(&gReflectiveValueCallback);
+}
 // Note: For all these functions, we could do a check that the field actually belongs to the given
 //       class. But the spec seems to assume a certain encoding of the field ID, and so doesn't
 //       specify any errors.
@@ -95,8 +176,8 @@
           art::annotations::GetSignatureAnnotationForField(art_field);
       if (str_array != nullptr) {
         std::ostringstream oss;
-        for (int32_t i = 0; i != str_array->GetLength(); ++i) {
-          oss << str_array->Get(i)->ToModifiedUtf8();
+        for (auto str : str_array->Iterate()) {
+          oss << str->ToModifiedUtf8();
         }
         std::string output_string = oss.str();
         jvmtiError ret;
diff --git a/openjdkjvmti/ti_field.h b/openjdkjvmti/ti_field.h
index 3cf29f0..073c6cc 100644
--- a/openjdkjvmti/ti_field.h
+++ b/openjdkjvmti/ti_field.h
@@ -71,6 +71,9 @@
       REQUIRES(!ArtJvmTiEnv::event_info_mutex_);
   static jvmtiError ClearFieldAccessWatch(jvmtiEnv* env, jclass klass, jfieldID field)
       REQUIRES(!ArtJvmTiEnv::event_info_mutex_);
+
+  static void Register(EventHandler* eh);
+  static void Unregister();
 };
 
 }  // namespace openjdkjvmti
diff --git a/openjdkjvmti/ti_heap.cc b/openjdkjvmti/ti_heap.cc
index 3d99ed8..974a710 100644
--- a/openjdkjvmti/ti_heap.cc
+++ b/openjdkjvmti/ti_heap.cc
@@ -16,32 +16,63 @@
 
 #include "ti_heap.h"
 
+#include <ios>
+#include <unordered_map>
+
+#include "android-base/logging.h"
+#include "android-base/thread_annotations.h"
+#include "arch/context.h"
 #include "art_field-inl.h"
 #include "art_jvmti.h"
+#include "base/logging.h"
 #include "base/macros.h"
 #include "base/mutex.h"
+#include "base/utils.h"
 #include "class_linker.h"
+#include "class_root.h"
+#include "deopt_manager.h"
 #include "dex/primitive.h"
+#include "events-inl.h"
+#include "gc/collector_type.h"
+#include "gc/gc_cause.h"
 #include "gc/heap-visit-objects-inl.h"
-#include "gc/heap.h"
+#include "gc/heap-inl.h"
+#include "gc/scoped_gc_critical_section.h"
 #include "gc_root-inl.h"
+#include "handle.h"
+#include "handle_scope.h"
 #include "java_frame_root_info.h"
 #include "jni/jni_env_ext.h"
+#include "jni/jni_id_manager.h"
 #include "jni/jni_internal.h"
 #include "jvmti_weak_table-inl.h"
+#include "mirror/array-inl.h"
+#include "mirror/array.h"
 #include "mirror/class.h"
 #include "mirror/object-inl.h"
+#include "mirror/object-refvisitor-inl.h"
 #include "mirror/object_array-inl.h"
+#include "mirror/object_array-alloc-inl.h"
+#include "mirror/object_reference.h"
 #include "obj_ptr-inl.h"
+#include "object_callbacks.h"
 #include "object_tagging.h"
+#include "offsets.h"
+#include "read_barrier.h"
 #include "runtime.h"
 #include "scoped_thread_state_change-inl.h"
 #include "stack.h"
 #include "thread-inl.h"
 #include "thread_list.h"
+#include "ti_logging.h"
+#include "ti_stack.h"
+#include "ti_thread.h"
+#include "well_known_classes.h"
 
 namespace openjdkjvmti {
 
+EventHandler* HeapExtensions::gEventHandler = nullptr;
+
 namespace {
 
 struct IndexCache {
@@ -671,11 +702,6 @@
     return ERR(INVALID_CLASS);
   }
   art::Handle<art::mirror::Class> filter_klass(hs.NewHandle(klass_ptr->AsClass()));
-  if (filter_klass->IsInterface()) {
-    // nothing is an 'instance' of an interface so just return without walking anything.
-    return OK;
-  }
-
   ObjectTagTable* tag_table = ArtJvmTiEnv::AsArtJvmTiEnv(env)->object_tag_table.get();
   bool stop_reports = false;
   auto visitor = [&](art::mirror::Object* obj) REQUIRES_SHARED(art::Locks::mutator_lock_) {
@@ -1011,7 +1037,9 @@
           }
 
           auto& java_info = static_cast<const art::JavaFrameRootInfo&>(info);
-          ref_info->stack_local.slot = static_cast<jint>(java_info.GetVReg());
+          size_t vreg = java_info.GetVReg();
+          ref_info->stack_local.slot = static_cast<jint>(
+              vreg <= art::JavaFrameRootInfo::kMaxVReg ? vreg : -1);
           const art::StackVisitor* visitor = java_info.GetVisitor();
           ref_info->stack_local.location =
               static_cast<jlocation>(visitor->GetDexPc(/* abort_on_failure= */ false));
@@ -1140,16 +1168,14 @@
     if (array->IsObjectArray()) {
       art::ObjPtr<art::mirror::ObjectArray<art::mirror::Object>> obj_array =
           array->AsObjectArray<art::mirror::Object>();
-      int32_t length = obj_array->GetLength();
-      for (int32_t i = 0; i != length; ++i) {
-        art::ObjPtr<art::mirror::Object> elem = obj_array->GetWithoutChecks(i);
-        if (elem != nullptr) {
+      for (auto elem_pair : art::ZipCount(obj_array->Iterate())) {
+        if (elem_pair.first != nullptr) {
           jvmtiHeapReferenceInfo reference_info;
-          reference_info.array.index = i;
+          reference_info.array.index = elem_pair.second;
           stop_reports_ = !ReportReferenceMaybeEnqueue(JVMTI_HEAP_REFERENCE_ARRAY_ELEMENT,
                                                        &reference_info,
                                                        array,
-                                                       elem.Ptr());
+                                                       elem_pair.first.Ptr());
           if (stop_reports_) {
             break;
           }
@@ -1378,6 +1404,7 @@
   }
   {
     art::ScopedObjectAccess soa(self);      // Now we know we have the shared lock.
+    art::jni::ScopedEnableSuspendAllJniIdQueries sjni;  // make sure we can get JNI ids.
     art::ScopedThreadSuspension sts(self, art::kWaitingForVisitObjects);
     art::ScopedSuspendAll ssa("FollowReferences");
 
@@ -1585,4 +1612,373 @@
                               user_data);
 }
 
+namespace {
+
+using ObjectPtr = art::ObjPtr<art::mirror::Object>;
+using ObjectMap = std::unordered_map<ObjectPtr, ObjectPtr, art::HashObjPtr>;
+
+static void ReplaceObjectReferences(const ObjectMap& map)
+    REQUIRES(art::Locks::mutator_lock_,
+             art::Roles::uninterruptible_) {
+  art::Runtime::Current()->GetHeap()->VisitObjectsPaused(
+      [&](art::mirror::Object* ref) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+        // Rewrite all references in the object if needed.
+        class ResizeReferenceVisitor {
+         public:
+          using CompressedObj = art::mirror::CompressedReference<art::mirror::Object>;
+          explicit ResizeReferenceVisitor(const ObjectMap& map, ObjectPtr ref)
+              : map_(map), ref_(ref) {}
+
+          // Ignore class roots.
+          void VisitRootIfNonNull(CompressedObj* root) const
+              REQUIRES_SHARED(art::Locks::mutator_lock_) {
+            if (root != nullptr) {
+              VisitRoot(root);
+            }
+          }
+          void VisitRoot(CompressedObj* root) const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+            auto it = map_.find(root->AsMirrorPtr());
+            if (it != map_.end()) {
+              root->Assign(it->second);
+              art::WriteBarrier::ForEveryFieldWrite(ref_);
+            }
+          }
+
+          void operator()(art::ObjPtr<art::mirror::Object> obj,
+                          art::MemberOffset off,
+                          bool is_static) const
+              REQUIRES_SHARED(art::Locks::mutator_lock_) {
+            auto it = map_.find(obj->GetFieldObject<art::mirror::Object>(off));
+            if (it != map_.end()) {
+              UNUSED(is_static);
+              if (UNLIKELY(!is_static && off == art::mirror::Object::ClassOffset())) {
+                // We don't want to update the declaring class of any objects. They will be replaced
+                // in the heap and we need the declaring class to know its size.
+                return;
+              } else if (UNLIKELY(!is_static && off == art::mirror::Class::SuperClassOffset() &&
+                                  obj->IsClass())) {
+                // We don't want to be messing with the class hierarcy either.
+                return;
+              }
+              VLOG(plugin) << "Updating field at offset " << off.Uint32Value() << " of type "
+                           << obj->GetClass()->PrettyClass();
+              obj->SetFieldObject</*transaction*/ false>(off, it->second);
+              art::WriteBarrier::ForEveryFieldWrite(obj);
+            }
+          }
+
+          // java.lang.ref.Reference visitor.
+          void operator()(art::ObjPtr<art::mirror::Class> klass ATTRIBUTE_UNUSED,
+                          art::ObjPtr<art::mirror::Reference> ref) const
+              REQUIRES_SHARED(art::Locks::mutator_lock_) {
+            operator()(ref, art::mirror::Reference::ReferentOffset(), /* is_static */ false);
+          }
+
+         private:
+          const ObjectMap& map_;
+          ObjectPtr ref_;
+        };
+
+        ResizeReferenceVisitor rrv(map, ref);
+        if (ref->IsClass()) {
+          // Class object native roots are the ArtField and ArtMethod 'declaring_class_' fields
+          // which we don't want to be messing with as it would break ref-visitor assumptions about
+          // what a class looks like. We want to keep the default behavior in other cases (such as
+          // dex-cache) though. Unfortunately there is no way to tell from the visitor where exactly
+          // the root came from.
+          // TODO It might be nice to have the visitors told where the reference came from.
+          ref->VisitReferences</*kVisitNativeRoots*/false>(rrv, rrv);
+        } else {
+          ref->VisitReferences</*kVisitNativeRoots*/true>(rrv, rrv);
+        }
+      });
+}
+
+static void ReplaceStrongRoots(art::Thread* self, const ObjectMap& map)
+    REQUIRES(art::Locks::mutator_lock_, art::Roles::uninterruptible_) {
+  // replace root references expcept java frames.
+  struct ResizeRootVisitor : public art::RootVisitor {
+   public:
+    explicit ResizeRootVisitor(const ObjectMap& map) : map_(map) {}
+
+    // TODO It's somewhat annoying to have to have this function implemented twice. It might be
+    // good/useful to implement operator= for CompressedReference to allow us to use a template to
+    // implement both of these.
+    void VisitRoots(art::mirror::Object*** roots, size_t count, const art::RootInfo& info) override
+        REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      art::mirror::Object*** end = roots + count;
+      for (art::mirror::Object** obj = *roots; roots != end; obj = *(++roots)) {
+        auto it = map_.find(*obj);
+        if (it != map_.end()) {
+          // Java frames might have the JIT doing optimizations (for example loop-unrolling or
+          // eliding bounds checks) so we need deopt them once we're done here.
+          if (info.GetType() == art::RootType::kRootJavaFrame) {
+            const art::JavaFrameRootInfo& jfri =
+                art::down_cast<const art::JavaFrameRootInfo&>(info);
+            if (jfri.GetVReg() == art::JavaFrameRootInfo::kMethodDeclaringClass) {
+              info.Describe(VLOG_STREAM(plugin) << "Not changing declaring-class during stack"
+                                                << " walk. Found obsolete java frame id ");
+              continue;
+            } else {
+              info.Describe(VLOG_STREAM(plugin) << "Found java frame id ");
+              threads_with_roots_.insert(info.GetThreadId());
+            }
+          }
+          *obj = it->second.Ptr();
+        }
+      }
+    }
+
+    void VisitRoots(art::mirror::CompressedReference<art::mirror::Object>** roots,
+                    size_t count,
+                    const art::RootInfo& info) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      art::mirror::CompressedReference<art::mirror::Object>** end = roots + count;
+      for (art::mirror::CompressedReference<art::mirror::Object>* obj = *roots; roots != end;
+           obj = *(++roots)) {
+        auto it = map_.find(obj->AsMirrorPtr());
+        if (it != map_.end()) {
+          // Java frames might have the JIT doing optimizations (for example loop-unrolling or
+          // eliding bounds checks) so we need deopt them once we're done here.
+          if (info.GetType() == art::RootType::kRootJavaFrame) {
+            const art::JavaFrameRootInfo& jfri =
+                art::down_cast<const art::JavaFrameRootInfo&>(info);
+            if (jfri.GetVReg() == art::JavaFrameRootInfo::kMethodDeclaringClass) {
+              info.Describe(VLOG_STREAM(plugin) << "Not changing declaring-class during stack"
+                                                << " walk. Found obsolete java frame id ");
+              continue;
+            } else {
+              info.Describe(VLOG_STREAM(plugin) << "Found java frame id ");
+              threads_with_roots_.insert(info.GetThreadId());
+            }
+          }
+          obj->Assign(it->second);
+        }
+      }
+    }
+
+    const std::unordered_set<uint32_t>& GetThreadsWithJavaFrameRoots() const {
+      return threads_with_roots_;
+    }
+
+   private:
+    const ObjectMap& map_;
+    std::unordered_set<uint32_t> threads_with_roots_;
+  };
+  ResizeRootVisitor rrv(map);
+  art::Runtime::Current()->VisitRoots(&rrv, art::VisitRootFlags::kVisitRootFlagAllRoots);
+  // Handle java Frames. Annoyingly the JIT can embed information about the length of the array into
+  // the compiled code. By changing the length of the array we potentially invalidate these
+  // assumptions and so could cause (eg) OOB array access or other issues.
+  if (!rrv.GetThreadsWithJavaFrameRoots().empty()) {
+    art::MutexLock mu(self, *art::Locks::thread_list_lock_);
+    art::ThreadList* thread_list = art::Runtime::Current()->GetThreadList();
+    art::instrumentation::Instrumentation* instr = art::Runtime::Current()->GetInstrumentation();
+    for (uint32_t id : rrv.GetThreadsWithJavaFrameRoots()) {
+      art::Thread* t = thread_list->FindThreadByThreadId(id);
+      CHECK(t != nullptr) << "id " << id << " does not refer to a valid thread."
+                          << " Where did the roots come from?";
+      VLOG(plugin) << "Instrumenting thread stack of thread " << *t;
+      // TODO Use deopt manager. We need a version that doesn't acquire all the locks we
+      // already have.
+      // TODO We technically only need to do this if the frames are not already being interpreted.
+      // The cost for doing an extra stack walk is unlikely to be worth it though.
+      instr->InstrumentThreadStack(t);
+    }
+  }
+}
+
+static void ReplaceWeakRoots(art::Thread* self,
+                             EventHandler* event_handler,
+                             const ObjectMap& map)
+    REQUIRES(art::Locks::mutator_lock_, art::Roles::uninterruptible_) {
+  // Handle tags. We want to do this seprately from other weak-refs (handled below) because we need
+  // to send additional events and handle cases where the agent might have tagged the new
+  // replacement object during the VMObjectAlloc. We do this by removing all tags associated with
+  // both the obsolete and the new arrays. Then we send the ObsoleteObjectCreated event and cache
+  // the new tag values. We next update all the other weak-references (the tags have been removed)
+  // and finally update the tag table with the new values. Doing things in this way (1) keeps all
+  // code relating to updating weak-references together and (2) ensures we don't end up in strange
+  // situations where the order of weak-ref visiting affects the final tagging state. Since we have
+  // the mutator_lock_ and gc-paused throughout this whole process no threads should be able to see
+  // the interval where the objects are not tagged.
+  struct NewTagValue {
+   public:
+    ObjectPtr obsolete_obj_;
+    jlong obsolete_tag_;
+    ObjectPtr new_obj_;
+    jlong new_tag_;
+  };
+
+  // Map from the environment to the list of <obsolete_tag, new_tag> pairs that were changed.
+  std::unordered_map<ArtJvmTiEnv*, std::vector<NewTagValue>> changed_tags;
+  event_handler->ForEachEnv(self, [&](ArtJvmTiEnv* env) {
+    // Cannot have REQUIRES(art::Locks::mutator_lock_) since ForEachEnv doesn't require it.
+    art::Locks::mutator_lock_->AssertExclusiveHeld(self);
+    env->object_tag_table->Lock();
+    // Get the tags and clear them (so we don't need to special-case the normal weak-ref visitor)
+    for (auto it : map) {
+      jlong new_tag = 0;
+      jlong obsolete_tag = 0;
+      bool had_obsolete_tag = env->object_tag_table->RemoveLocked(it.first, &obsolete_tag);
+      bool had_new_tag = env->object_tag_table->RemoveLocked(it.second, &new_tag);
+      // Dispatch event.
+      if (had_obsolete_tag || had_new_tag) {
+        event_handler->DispatchEventOnEnv<ArtJvmtiEvent::kObsoleteObjectCreated>(
+            env, self, &obsolete_tag, &new_tag);
+        changed_tags.try_emplace(env).first->second.push_back(
+            { it.first, obsolete_tag, it.second, new_tag });
+      }
+    }
+    // After weak-ref update we need to go back and re-add obsoletes. We wait to avoid having to
+    // deal with the visit-weaks overwriting the initial new_obj_ptr tag and generally making things
+    // difficult.
+    env->object_tag_table->Unlock();
+  });
+  // Handle weak-refs.
+  struct ReplaceWeaksVisitor : public art::IsMarkedVisitor {
+   public:
+    ReplaceWeaksVisitor(const ObjectMap& map) : map_(map) {}
+
+    art::mirror::Object* IsMarked(art::mirror::Object* obj)
+        REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      auto it = map_.find(obj);
+      if (it != map_.end()) {
+        return it->second.Ptr();
+      } else {
+        return obj;
+      }
+    }
+
+   private:
+    const ObjectMap& map_;
+  };
+  ReplaceWeaksVisitor rwv(map);
+  art::Runtime::Current()->SweepSystemWeaks(&rwv);
+  // Re-add the object tags. At this point all weak-references to the old_obj_ptr are gone.
+  event_handler->ForEachEnv(self, [&](ArtJvmTiEnv* env) {
+    // Cannot have REQUIRES(art::Locks::mutator_lock_) since ForEachEnv doesn't require it.
+    art::Locks::mutator_lock_->AssertExclusiveHeld(self);
+    env->object_tag_table->Lock();
+    auto it = changed_tags.find(env);
+    if (it != changed_tags.end()) {
+      for (const NewTagValue& v : it->second) {
+        env->object_tag_table->SetLocked(v.obsolete_obj_, v.obsolete_tag_);
+        env->object_tag_table->SetLocked(v.new_obj_, v.new_tag_);
+      }
+    }
+    env->object_tag_table->Unlock();
+  });
+}
+
+}  // namespace
+
+void HeapExtensions::ReplaceReference(art::Thread* self,
+                                      art::ObjPtr<art::mirror::Object> old_obj_ptr,
+                                      art::ObjPtr<art::mirror::Object> new_obj_ptr) {
+  ObjectMap map { { old_obj_ptr, new_obj_ptr } };
+  ReplaceReferences(self, map);
+}
+
+void HeapExtensions::ReplaceReferences(art::Thread* self, const ObjectMap& map) {
+  ReplaceObjectReferences(map);
+  ReplaceStrongRoots(self, map);
+  ReplaceWeakRoots(self, HeapExtensions::gEventHandler, map);
+}
+
+jvmtiError HeapExtensions::ChangeArraySize(jvmtiEnv* env, jobject arr, jsize new_size) {
+  if (ArtJvmTiEnv::AsArtJvmTiEnv(env)->capabilities.can_tag_objects != 1) {
+    return ERR(MUST_POSSESS_CAPABILITY);
+  }
+  art::Thread* self = art::Thread::Current();
+  ScopedNoUserCodeSuspension snucs(self);
+  art::ScopedObjectAccess soa(self);
+  if (arr == nullptr) {
+    JVMTI_LOG(INFO, env) << "Cannot resize a null object";
+    return ERR(NULL_POINTER);
+  }
+  art::ObjPtr<art::mirror::Class> klass(soa.Decode<art::mirror::Object>(arr)->GetClass());
+  if (!klass->IsArrayClass()) {
+    JVMTI_LOG(INFO, env) << klass->PrettyClass() << " is not an array class!";
+    return ERR(ILLEGAL_ARGUMENT);
+  }
+  if (new_size < 0) {
+    JVMTI_LOG(INFO, env) << "Cannot resize an array to a negative size";
+    return ERR(ILLEGAL_ARGUMENT);
+  }
+  // Allocate the new copy.
+  art::StackHandleScope<2> hs(self);
+  art::Handle<art::mirror::Array> old_arr(hs.NewHandle(soa.Decode<art::mirror::Array>(arr)));
+  art::MutableHandle<art::mirror::Array> new_arr(hs.NewHandle<art::mirror::Array>(nullptr));
+  if (klass->IsObjectArrayClass()) {
+    new_arr.Assign(
+        art::mirror::ObjectArray<art::mirror::Object>::Alloc(self, old_arr->GetClass(), new_size));
+  } else {
+    // NB This also copies the old array but since we aren't suspended we need to do this again to
+    // catch any concurrent modifications.
+    new_arr.Assign(art::mirror::Array::CopyOf(old_arr, self, new_size));
+  }
+  if (new_arr.IsNull()) {
+    self->AssertPendingOOMException();
+    JVMTI_LOG(INFO, env) << "Unable to allocate " << old_arr->GetClass()->PrettyClass()
+                         << " (length: " << new_size << ") due to OOME. Error was: "
+                         << self->GetException()->Dump();
+    self->ClearException();
+    return ERR(OUT_OF_MEMORY);
+  } else {
+    self->AssertNoPendingException();
+  }
+  // Suspend everything.
+  art::ScopedThreadSuspension sts(self, art::ThreadState::kSuspended);
+  art::gc::ScopedGCCriticalSection sgccs(
+      self, art::gc::GcCause::kGcCauseDebugger, art::gc::CollectorType::kCollectorTypeDebugger);
+  art::ScopedSuspendAll ssa("Resize array!");
+  // Replace internals.
+  new_arr->SetLockWord(old_arr->GetLockWord(false), false);
+  old_arr->SetLockWord(art::LockWord::Default(), false);
+  // Copy the contents now when everything is suspended.
+  int32_t size = std::min(old_arr->GetLength(), new_size);
+  switch (old_arr->GetClass()->GetComponentType()->GetPrimitiveType()) {
+    case art::Primitive::kPrimBoolean:
+      new_arr->AsBooleanArray()->Memcpy(0, old_arr->AsBooleanArray(), 0, size);
+      break;
+    case art::Primitive::kPrimByte:
+      new_arr->AsByteArray()->Memcpy(0, old_arr->AsByteArray(), 0, size);
+      break;
+    case art::Primitive::kPrimChar:
+      new_arr->AsCharArray()->Memcpy(0, old_arr->AsCharArray(), 0, size);
+      break;
+    case art::Primitive::kPrimShort:
+      new_arr->AsShortArray()->Memcpy(0, old_arr->AsShortArray(), 0, size);
+      break;
+    case art::Primitive::kPrimInt:
+      new_arr->AsIntArray()->Memcpy(0, old_arr->AsIntArray(), 0, size);
+      break;
+    case art::Primitive::kPrimLong:
+      new_arr->AsLongArray()->Memcpy(0, old_arr->AsLongArray(), 0, size);
+      break;
+    case art::Primitive::kPrimFloat:
+      new_arr->AsFloatArray()->Memcpy(0, old_arr->AsFloatArray(), 0, size);
+      break;
+    case art::Primitive::kPrimDouble:
+      new_arr->AsDoubleArray()->Memcpy(0, old_arr->AsDoubleArray(), 0, size);
+      break;
+    case art::Primitive::kPrimNot:
+      for (int32_t i = 0; i < size; i++) {
+        new_arr->AsObjectArray<art::mirror::Object>()->Set(
+            i, old_arr->AsObjectArray<art::mirror::Object>()->Get(i));
+      }
+      break;
+    case art::Primitive::kPrimVoid:
+      LOG(FATAL) << "void-array is not a legal type!";
+      UNREACHABLE();
+  }
+  // Actually replace all the pointers.
+  ReplaceReference(self, old_arr.Get(), new_arr.Get());
+  return OK;
+}
+
+void HeapExtensions::Register(EventHandler* eh) {
+  gEventHandler = eh;
+}
+
 }  // namespace openjdkjvmti
diff --git a/openjdkjvmti/ti_heap.h b/openjdkjvmti/ti_heap.h
index 382d80f..ee8b4d6 100644
--- a/openjdkjvmti/ti_heap.h
+++ b/openjdkjvmti/ti_heap.h
@@ -17,10 +17,24 @@
 #ifndef ART_OPENJDKJVMTI_TI_HEAP_H_
 #define ART_OPENJDKJVMTI_TI_HEAP_H_
 
+#include <unordered_map>
+
 #include "jvmti.h"
 
+#include "base/locks.h"
+
+namespace art {
+class Thread;
+template<typename T> class ObjPtr;
+class HashObjPtr;
+namespace mirror {
+class Object;
+}  // namespace mirror
+}  // namespace art
+
 namespace openjdkjvmti {
 
+class EventHandler;
 class ObjectTagTable;
 
 class HeapUtil {
@@ -64,6 +78,8 @@
 
 class HeapExtensions {
  public:
+  static void Register(EventHandler* eh);
+
   static jvmtiError JNICALL GetObjectHeapId(jvmtiEnv* env, jlong tag, jint* heap_id, ...);
   static jvmtiError JNICALL GetHeapName(jvmtiEnv* env, jint heap_id, char** heap_name, ...);
 
@@ -72,6 +88,24 @@
                                                   jclass klass,
                                                   const jvmtiHeapCallbacks* callbacks,
                                                   const void* user_data);
+
+  static jvmtiError JNICALL ChangeArraySize(jvmtiEnv* env, jobject arr, jsize new_size);
+
+  static void ReplaceReferences(
+      art::Thread* self,
+      const std::unordered_map<art::ObjPtr<art::mirror::Object>,
+                               art::ObjPtr<art::mirror::Object>,
+                               art::HashObjPtr>& refs)
+        REQUIRES(art::Locks::mutator_lock_, art::Roles::uninterruptible_);
+
+  static void ReplaceReference(art::Thread* self,
+                               art::ObjPtr<art::mirror::Object> original,
+                               art::ObjPtr<art::mirror::Object> replacement)
+      REQUIRES(art::Locks::mutator_lock_,
+               art::Roles::uninterruptible_);
+
+ private:
+  static EventHandler* gEventHandler;
 };
 
 }  // namespace openjdkjvmti
diff --git a/openjdkjvmti/ti_logging.h b/openjdkjvmti/ti_logging.h
index a1be090..b4ce5b6 100644
--- a/openjdkjvmti/ti_logging.h
+++ b/openjdkjvmti/ti_logging.h
@@ -50,7 +50,6 @@
   ::openjdkjvmti::JvmtiLogMessage((env),                     \
                                   __FILE__,                  \
                                   __LINE__,                  \
-                                  ::android::base::DEFAULT,  \
                                   SEVERITY_LAMBDA(severity), \
                                   _LOG_TAG_INTERNAL,         \
                                   -1)
@@ -60,12 +59,11 @@
   JvmtiLogMessage(jvmtiEnv* env,
                   const char* file,
                   unsigned int line,
-                  android::base::LogId id,
                   android::base::LogSeverity severity,
                   const char* tag,
                   int error)
       : env_(ArtJvmTiEnv::AsArtJvmTiEnv(env)),
-        real_log_(file, line, id, severity, tag, error),
+        real_log_(file, line, severity, tag, error),
         real_log_stream_(real_log_.stream()) {
     DCHECK(env_ != nullptr);
   }
diff --git a/openjdkjvmti/ti_method.cc b/openjdkjvmti/ti_method.cc
index a4b579b..e7f071f 100644
--- a/openjdkjvmti/ti_method.cc
+++ b/openjdkjvmti/ti_method.cc
@@ -31,22 +31,33 @@
 
 #include "ti_method.h"
 
+#include <initializer_list>
 #include <type_traits>
+#include <variant>
 
+#include "android-base/macros.h"
 #include "arch/context.h"
 #include "art_jvmti.h"
 #include "art_method-inl.h"
 #include "base/enums.h"
+#include "base/globals.h"
+#include "base/macros.h"
 #include "base/mutex-inl.h"
 #include "deopt_manager.h"
 #include "dex/code_item_accessors-inl.h"
+#include "dex/code_item_accessors.h"
 #include "dex/dex_file_annotations.h"
 #include "dex/dex_file_types.h"
+#include "dex/dex_instruction.h"
+#include "dex/dex_instruction_iterator.h"
 #include "dex/modifiers.h"
+#include "dex/primitive.h"
 #include "events-inl.h"
 #include "gc_root-inl.h"
+#include "handle.h"
 #include "jit/jit.h"
 #include "jni/jni_internal.h"
+#include "jvmti.h"
 #include "mirror/class-inl.h"
 #include "mirror/class_loader.h"
 #include "mirror/object-inl.h"
@@ -54,15 +65,21 @@
 #include "nativehelper/scoped_local_ref.h"
 #include "oat_file.h"
 #include "obj_ptr.h"
+#include "runtime.h"
 #include "runtime_callbacks.h"
 #include "scoped_thread_state_change-inl.h"
+#include "scoped_thread_state_change.h"
 #include "stack.h"
 #include "thread-current-inl.h"
 #include "thread.h"
 #include "thread_list.h"
+#include "ti_logging.h"
 #include "ti_stack.h"
 #include "ti_thread.h"
 #include "ti_phase.h"
+#include "verifier/register_line-inl.h"
+#include "verifier/reg_type-inl.h"
+#include "verifier/method_verifier-inl.h"
 
 namespace openjdkjvmti {
 
@@ -77,12 +94,13 @@
       ScopedLocalRef<jthread> thread_jni(
           jnienv, PhaseUtil::IsLivePhase() ? jnienv->AddLocalReference<jthread>(thread->GetPeer())
                                            : nullptr);
+      jmethodID method_id = art::jni::EncodeArtMethod(method);
       art::ScopedThreadSuspension sts(thread, art::ThreadState::kNative);
       event_handler->DispatchEvent<ArtJvmtiEvent::kNativeMethodBind>(
           thread,
           static_cast<JNIEnv*>(jnienv),
           thread_jni.get(),
-          art::jni::EncodeArtMethod(method),
+          method_id,
           const_cast<void*>(cur_method),
           new_method);
     }
@@ -336,8 +354,8 @@
           art::annotations::GetSignatureAnnotationForMethod(art_method);
       if (str_array != nullptr) {
         std::ostringstream oss;
-        for (int32_t i = 0; i != str_array->GetLength(); ++i) {
-          oss << str_array->Get(i)->ToModifiedUtf8();
+        for (auto str : str_array->Iterate()) {
+          oss << str->ToModifiedUtf8();
         }
         std::string output_string = oss.str();
         jvmtiError ret;
@@ -526,10 +544,21 @@
 
 class CommonLocalVariableClosure : public art::Closure {
  public:
-  CommonLocalVariableClosure(jint depth, jint slot)
-      : result_(ERR(INTERNAL)), depth_(depth), slot_(slot) {}
+  // The verifier isn't always able to be as specific as the local-variable-table. We can only get
+  // 32-bit, 64-bit or reference.
+  enum class VerifierPrimitiveType {
+    k32BitValue,  // float, int, short, char, boolean, byte
+    k64BitValue,  // double, long
+    kReferenceValue,  // Object
+    kZeroValue,  // null or zero constant. Might be either k32BitValue or kReferenceValue
+  };
 
-  void Run(art::Thread* self) override REQUIRES(art::Locks::mutator_lock_) {
+  using SlotType = std::variant<art::Primitive::Type, VerifierPrimitiveType>;
+
+  CommonLocalVariableClosure(jvmtiEnv* jvmti, jint depth, jint slot)
+      : jvmti_(jvmti), result_(ERR(INTERNAL)), depth_(depth), slot_(slot) {}
+
+  void Run(art::Thread* self) override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current());
     bool needs_instrument;
     {
@@ -548,7 +577,7 @@
         // TODO It might be useful to fake up support for get at least on proxy frames.
         result_ = ERR(OPAQUE_FRAME);
         return;
-      } else if (method->DexInstructionData().RegistersSize() <= slot_) {
+      } else if (slot_ >= method->DexInstructionData().RegistersSize() || slot_ < 0) {
         result_ = ERR(INVALID_SLOT);
         return;
       }
@@ -560,7 +589,7 @@
         return;
       }
       std::string descriptor;
-      art::Primitive::Type slot_type = art::Primitive::kPrimVoid;
+      SlotType slot_type{ art::Primitive::kPrimVoid };
       jvmtiError err = GetSlotType(method, pc, &descriptor, &slot_type);
       if (err != OK) {
         result_ = err;
@@ -587,56 +616,190 @@
   virtual jvmtiError Execute(art::ArtMethod* method, art::StackVisitor& visitor)
       REQUIRES_SHARED(art::Locks::mutator_lock_) = 0;
   virtual jvmtiError GetTypeError(art::ArtMethod* method,
-                                  art::Primitive::Type type,
+                                  SlotType type,
                                   const std::string& descriptor)
       REQUIRES_SHARED(art::Locks::mutator_lock_)  = 0;
 
   jvmtiError GetSlotType(art::ArtMethod* method,
                          uint32_t dex_pc,
                          /*out*/std::string* descriptor,
-                         /*out*/art::Primitive::Type* type)
-      REQUIRES(art::Locks::mutator_lock_) {
-    const art::DexFile* dex_file = method->GetDexFile();
-    if (dex_file == nullptr) {
+                         /*out*/SlotType* type)
+      REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+  jvmtiError InferSlotTypeFromVerifier(art::ArtMethod* method,
+                                       uint32_t dex_pc,
+                                       /*out*/ std::string* descriptor,
+                                       /*out*/ SlotType* type)
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    art::Thread* self = art::Thread::Current();
+    art::StackHandleScope<2> hs(self);
+    std::unique_ptr<art::verifier::MethodVerifier> verifier(
+        art::verifier::MethodVerifier::CalculateVerificationInfo(
+            self,
+            method,
+            hs.NewHandle(method->GetDexCache()),
+            hs.NewHandle(method->GetDeclaringClass()->GetClassLoader())));
+    if (verifier == nullptr) {
+      JVMTI_LOG(WARNING, jvmti_) << "Unable to extract verification information from "
+                                 << method->PrettyMethod() << " due to hard verification failures! "
+                                 << "How did this method even get loaded!";
+      return ERR(INTERNAL);
+    }
+    art::verifier::RegisterLine* line = verifier->GetRegLine(dex_pc);
+    if (line == nullptr) {
+      JVMTI_LOG(WARNING, jvmti_) << "Unable to determine register line at dex-pc " << dex_pc
+                                 << " for method " << method->PrettyMethod();
       return ERR(OPAQUE_FRAME);
     }
-    art::CodeItemDebugInfoAccessor accessor(method->DexInstructionDebugInfo());
-    if (!accessor.HasCodeItem()) {
-      return ERR(OPAQUE_FRAME);
-    }
-    bool found = false;
-    *type = art::Primitive::kPrimVoid;
-    descriptor->clear();
-    auto visitor = [&](const art::DexFile::LocalInfo& entry) {
-      if (!found &&
-          entry.start_address_ <= dex_pc &&
-          entry.end_address_ > dex_pc &&
-          entry.reg_ == slot_) {
-        found = true;
-        *type = art::Primitive::GetType(entry.descriptor_[0]);
-        *descriptor = entry.descriptor_;
-      }
-    };
-    if (!accessor.DecodeDebugLocalInfo(method->IsStatic(), method->GetDexMethodIndex(), visitor) ||
-        !found) {
-      // Something went wrong with decoding the debug information. It might as well not be there.
+    const art::verifier::RegType& rt = line->GetRegisterType(verifier.get(), slot_);
+    if (rt.IsUndefined()) {
+      return ERR(INVALID_SLOT);
+    } else if (rt.IsNonZeroReferenceTypes() || rt.IsNull()) {
+      *descriptor = (rt.HasClass() ? rt.GetDescriptor() : "Ljava/lang/Object;");
+      *type = VerifierPrimitiveType::kReferenceValue;
+      return OK;
+    } else if (rt.IsZero()) {
+      *descriptor = "I";
+      *type = VerifierPrimitiveType::kZeroValue;
+      return OK;
+    } else if (rt.IsCategory1Types()) {
+      *descriptor = "I";
+      *type = VerifierPrimitiveType::k32BitValue;
+      return OK;
+    } else if (rt.IsCategory2Types() && rt.IsLowHalf()) {
+      *descriptor = "J";
+      *type = VerifierPrimitiveType::k64BitValue;
+      return OK;
+    } else {
+      // The slot doesn't have a type. Must not be valid here.
       return ERR(INVALID_SLOT);
     }
-    return OK;
   }
 
+  constexpr VerifierPrimitiveType SquashType(SlotType t) {
+    if (std::holds_alternative<art::Primitive::Type>(t)) {
+      switch (std::get<art::Primitive::Type>(t)) {
+        // 32-bit primitives
+        case art::Primitive::kPrimByte:
+        case art::Primitive::kPrimChar:
+        case art::Primitive::kPrimInt:
+        case art::Primitive::kPrimShort:
+        case art::Primitive::kPrimBoolean:
+        case art::Primitive::kPrimFloat:
+          return VerifierPrimitiveType::k32BitValue;
+        // 64-bit primitives
+        case art::Primitive::kPrimLong:
+        case art::Primitive::kPrimDouble:
+          return VerifierPrimitiveType::k64BitValue;
+        case art::Primitive::kPrimNot:
+          return VerifierPrimitiveType::kReferenceValue;
+        case art::Primitive::kPrimVoid:
+          LOG(FATAL) << "Got kPrimVoid";
+          UNREACHABLE();
+      }
+    } else {
+      return std::get<VerifierPrimitiveType>(t);
+    }
+  }
+
+  jvmtiEnv* jvmti_;
   jvmtiError result_;
   jint depth_;
   jint slot_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(CommonLocalVariableClosure);
 };
 
+std::ostream& operator<<(std::ostream& os,
+                         CommonLocalVariableClosure::VerifierPrimitiveType state) {
+  switch (state) {
+    case CommonLocalVariableClosure::VerifierPrimitiveType::k32BitValue:
+      return os << "32BitValue";
+    case CommonLocalVariableClosure::VerifierPrimitiveType::k64BitValue:
+      return os << "64BitValue";
+    case CommonLocalVariableClosure::VerifierPrimitiveType::kReferenceValue:
+      return os << "ReferenceValue";
+    case CommonLocalVariableClosure::VerifierPrimitiveType::kZeroValue:
+      return os << "ZeroValue";
+  }
+}
+
+std::ostream& operator<<(std::ostream& os, CommonLocalVariableClosure::SlotType state) {
+  if (std::holds_alternative<art::Primitive::Type>(state)) {
+    return os << "Primitive::Type[" << std::get<art::Primitive::Type>(state) << "]";
+  } else {
+    return os << "VerifierPrimitiveType["
+              << std::get<CommonLocalVariableClosure::VerifierPrimitiveType>(state) << "]";
+  }
+}
+
+jvmtiError CommonLocalVariableClosure::GetSlotType(art::ArtMethod* method,
+                                                   uint32_t dex_pc,
+                                                   /*out*/ std::string* descriptor,
+                                                   /*out*/ SlotType* type) {
+  const art::DexFile* dex_file = method->GetDexFile();
+  if (dex_file == nullptr) {
+    return ERR(OPAQUE_FRAME);
+  }
+  art::CodeItemDebugInfoAccessor accessor(method->DexInstructionDebugInfo());
+  if (!accessor.HasCodeItem()) {
+    return ERR(OPAQUE_FRAME);
+  }
+  bool found = false;
+  *type = art::Primitive::kPrimVoid;
+  descriptor->clear();
+  auto visitor = [&](const art::DexFile::LocalInfo& entry) {
+    if (!found && entry.start_address_ <= dex_pc && entry.end_address_ > dex_pc &&
+        entry.reg_ == slot_) {
+      found = true;
+      *type = art::Primitive::GetType(entry.descriptor_[0]);
+      *descriptor = entry.descriptor_;
+    }
+  };
+  if (!accessor.DecodeDebugLocalInfo(method->IsStatic(), method->GetDexMethodIndex(), visitor) ||
+      !found) {
+    // Something went wrong with decoding the debug information. It might as well not be there.
+    // Try to find the type with the verifier.
+    // TODO This is very slow.
+    return InferSlotTypeFromVerifier(method, dex_pc, descriptor, type);
+  } else if (art::kIsDebugBuild) {
+    std::string type_unused;
+    SlotType verifier_type{ art::Primitive::kPrimVoid };
+    DCHECK_EQ(InferSlotTypeFromVerifier(method, dex_pc, &type_unused, &verifier_type), OK)
+        << method->PrettyMethod() << " failed to verify!";
+    if (*type == SlotType{ art::Primitive::kPrimNot }) {
+      // We cannot distinguish between a constant 0 and a null reference so we return that it is a
+      // 32bit value (Due to the way references are read by the interpreter this is safe even if
+      // it's modified, the value will remain null). This is not ideal since it prevents modifying
+      // locals in some circumstances but generally is not a big deal (since one can just modify it
+      // later once it's been determined to be a reference by a later instruction).
+      DCHECK(verifier_type == SlotType { VerifierPrimitiveType::kZeroValue } ||
+             verifier_type == SlotType { VerifierPrimitiveType::kReferenceValue })
+          << "Verifier disagrees on type of slot! debug: " << *type
+          << " verifier: " << verifier_type;
+    } else if (verifier_type == SlotType { VerifierPrimitiveType::kZeroValue }) {
+      DCHECK(VerifierPrimitiveType::k32BitValue == SquashType(*type) ||
+             VerifierPrimitiveType::kReferenceValue == SquashType(*type))
+          << "Verifier disagrees on type of slot! debug: " << *type
+          << " verifier: " << verifier_type;
+    } else {
+      DCHECK_EQ(SquashType(verifier_type), SquashType(*type))
+          << "Verifier disagrees on type of slot! debug: " << *type
+          << " verifier: " << verifier_type;
+    }
+  }
+  return OK;
+}
+
 class GetLocalVariableClosure : public CommonLocalVariableClosure {
  public:
-  GetLocalVariableClosure(jint depth,
+  GetLocalVariableClosure(jvmtiEnv* jvmti,
+                          jint depth,
                           jint slot,
                           art::Primitive::Type type,
                           jvalue* val)
-      : CommonLocalVariableClosure(depth, slot),
+      : CommonLocalVariableClosure(jvmti, depth, slot),
         type_(type),
         val_(val),
         obj_val_(nullptr) {}
@@ -656,22 +819,61 @@
   }
 
  protected:
-  jvmtiError GetTypeError(art::ArtMethod* method ATTRIBUTE_UNUSED,
-                          art::Primitive::Type slot_type,
-                          const std::string& descriptor ATTRIBUTE_UNUSED)
-      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
-    switch (slot_type) {
-      case art::Primitive::kPrimByte:
-      case art::Primitive::kPrimChar:
-      case art::Primitive::kPrimInt:
-      case art::Primitive::kPrimShort:
-      case art::Primitive::kPrimBoolean:
-        return type_ == art::Primitive::kPrimInt ? OK : ERR(TYPE_MISMATCH);
-      case art::Primitive::kPrimLong:
+  jvmtiError
+  GetTypeError(art::ArtMethod* method, SlotType slot_type, const std::string& descriptor) override
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    jvmtiError res = GetTypeErrorInner(method, slot_type, descriptor);
+    if (res == ERR(TYPE_MISMATCH)) {
+      JVMTI_LOG(INFO, jvmti_) << "Unable to Get local variable in slot " << slot_ << ". Expected"
+                              << " slot to be of type compatible with " << SlotType { type_ }
+                              << " but slot is " << slot_type;
+    } else if (res != OK) {
+      JVMTI_LOG(INFO, jvmti_) << "Unable to get local variable in slot " << slot_ << ".";
+    }
+    return res;
+  }
+
+  jvmtiError GetTypeErrorInner(art::ArtMethod* method ATTRIBUTE_UNUSED,
+                               SlotType slot_type,
+                               const std::string& descriptor ATTRIBUTE_UNUSED)
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    switch (type_) {
       case art::Primitive::kPrimFloat:
-      case art::Primitive::kPrimDouble:
+      case art::Primitive::kPrimInt: {
+        if (std::holds_alternative<VerifierPrimitiveType>(slot_type)) {
+          return (slot_type == SlotType { VerifierPrimitiveType::k32BitValue } ||
+                  slot_type == SlotType { VerifierPrimitiveType::kZeroValue })
+                     ? OK
+                     : ERR(TYPE_MISMATCH);
+        } else if (type_ == art::Primitive::kPrimFloat ||
+                   slot_type == SlotType { art::Primitive::kPrimFloat }) {
+          // Check that we are actually a float.
+          return (SlotType { type_ } == slot_type) ? OK : ERR(TYPE_MISMATCH);
+        } else {
+          // Some smaller int type.
+          return SquashType(slot_type) == SquashType(SlotType { type_ }) ? OK : ERR(TYPE_MISMATCH);
+        }
+      }
+      case art::Primitive::kPrimLong:
+      case art::Primitive::kPrimDouble: {
+        // todo
+        if (std::holds_alternative<VerifierPrimitiveType>(slot_type)) {
+          return (slot_type == SlotType { VerifierPrimitiveType::k64BitValue })
+                     ? OK
+                     : ERR(TYPE_MISMATCH);
+        } else {
+          return slot_type == SlotType { type_ } ? OK : ERR(TYPE_MISMATCH);
+        }
+      }
       case art::Primitive::kPrimNot:
-        return type_ == slot_type ? OK : ERR(TYPE_MISMATCH);
+        return (SquashType(slot_type) == VerifierPrimitiveType::kReferenceValue ||
+                SquashType(slot_type) == VerifierPrimitiveType::kZeroValue)
+                   ? OK
+                   : ERR(TYPE_MISMATCH);
+      case art::Primitive::kPrimShort:
+      case art::Primitive::kPrimChar:
+      case art::Primitive::kPrimByte:
+      case art::Primitive::kPrimBoolean:
       case art::Primitive::kPrimVoid:
         LOG(FATAL) << "Unexpected primitive type " << slot_type;
         UNREACHABLE();
@@ -689,11 +891,8 @@
                              &ptr_val)) {
           return ERR(OPAQUE_FRAME);
         }
-        art::JNIEnvExt* jni = art::Thread::Current()->GetJniEnv();
         art::ObjPtr<art::mirror::Object> obj(reinterpret_cast<art::mirror::Object*>(ptr_val));
-        ScopedLocalRef<jobject> local(
-            jni, obj.IsNull() ? nullptr : jni->AddLocalReference<jobject>(obj));
-        obj_val_ = jni->NewGlobalRef(local.get());
+        obj_val_ = art::Runtime::Current()->GetJavaVM()->AddGlobalRef(art::Thread::Current(), obj);
         break;
       }
       case art::Primitive::kPrimInt:
@@ -735,7 +934,7 @@
   jobject obj_val_;
 };
 
-jvmtiError MethodUtil::GetLocalVariableGeneric(jvmtiEnv* env ATTRIBUTE_UNUSED,
+jvmtiError MethodUtil::GetLocalVariableGeneric(jvmtiEnv* env,
                                                jthread thread,
                                                jint depth,
                                                jint slot,
@@ -753,7 +952,7 @@
     art::Locks::thread_list_lock_->ExclusiveUnlock(self);
     return err;
   }
-  GetLocalVariableClosure c(depth, slot, type, val);
+  GetLocalVariableClosure c(env, depth, slot, type, val);
   // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
   if (!target->RequestSynchronousCheckpoint(&c)) {
     return ERR(THREAD_NOT_ALIVE);
@@ -764,49 +963,100 @@
 
 class SetLocalVariableClosure : public CommonLocalVariableClosure {
  public:
-  SetLocalVariableClosure(art::Thread* caller,
+  SetLocalVariableClosure(jvmtiEnv* jvmti,
+                          art::Thread* caller,
                           jint depth,
                           jint slot,
                           art::Primitive::Type type,
                           jvalue val)
-      : CommonLocalVariableClosure(depth, slot), caller_(caller), type_(type), val_(val) {}
+      : CommonLocalVariableClosure(jvmti, depth, slot), caller_(caller), type_(type), val_(val) {}
 
  protected:
-  jvmtiError GetTypeError(art::ArtMethod* method,
-                          art::Primitive::Type slot_type,
-                          const std::string& descriptor)
-      override REQUIRES_SHARED(art::Locks::mutator_lock_) {
-    switch (slot_type) {
-      case art::Primitive::kPrimNot: {
-        if (type_ != art::Primitive::kPrimNot) {
+  jvmtiError
+  GetTypeError(art::ArtMethod* method, SlotType slot_type, const std::string& descriptor) override
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    jvmtiError res = GetTypeErrorInner(method, slot_type, descriptor);
+    if (res != OK) {
+      if (res == ERR(TYPE_MISMATCH)) {
+        std::ostringstream desc_exp;
+        std::ostringstream desc_set;
+        if (type_ == art::Primitive::kPrimNot) {
+          desc_exp << " (type: " << descriptor << ")";
+          art::ObjPtr<art::mirror::Object> new_val(art::Thread::Current()->DecodeJObject(val_.l));
+          desc_set << " (type: "
+                  << (new_val.IsNull() ? "NULL" : new_val->GetClass()->PrettyDescriptor()) << ")";
+        }
+        JVMTI_LOG(INFO, jvmti_) << "Unable to Set local variable in slot " << slot_ << ". Expected"
+                                << " slot to be of type compatible with " << SlotType{ type_ }
+                                << desc_set.str() << " but slot is " << slot_type << desc_exp.str();
+      } else {
+        JVMTI_LOG(INFO, jvmti_) << "Unable to set local variable in slot " << slot_ << ". "
+                                << err_.str();
+      }
+    }
+    return res;
+  }
+
+  jvmtiError
+  GetTypeErrorInner(art::ArtMethod* method, SlotType slot_type, const std::string& descriptor)
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    switch (SquashType(SlotType{ type_ })) {
+      case VerifierPrimitiveType::k32BitValue: {
+        if (slot_type == SlotType{ VerifierPrimitiveType::kZeroValue }) {
+          if (val_.i == 0) {
+            return OK;
+          } else {
+            err_ << "Cannot determine if slot " << slot_ << " is a null reference or 32bit "
+                 << "constant. Cannot allow writing to slot.";
+            return ERR(INTERNAL);
+          }
+        } else if (SquashType(slot_type) != VerifierPrimitiveType::k32BitValue) {
+          return ERR(TYPE_MISMATCH);
+        } else if (slot_type == SlotType { VerifierPrimitiveType::k32BitValue } ||
+                   slot_type == SlotType { type_ }) {
+          return OK;
+        } else if (type_ == art::Primitive::kPrimFloat ||
+                   slot_type == SlotType { art::Primitive::kPrimFloat }) {
+          // we should have hit the get == type_ above
+          return ERR(TYPE_MISMATCH);
+        } else {
+          // Some smaller type then int.
+          return OK;
+        }
+      }
+      case VerifierPrimitiveType::k64BitValue: {
+        if (slot_type == SlotType { VerifierPrimitiveType::k64BitValue } ||
+            slot_type == SlotType { type_ }) {
+          return OK;
+        } else {
+          return ERR(TYPE_MISMATCH);
+        }
+      }
+      case VerifierPrimitiveType::kReferenceValue: {
+        if (SquashType(slot_type) != VerifierPrimitiveType::kReferenceValue &&
+            SquashType(slot_type) != VerifierPrimitiveType::kZeroValue) {
           return ERR(TYPE_MISMATCH);
         } else if (val_.l == nullptr) {
           return OK;
+        } else if (slot_type == SlotType { VerifierPrimitiveType::kZeroValue }) {
+          err_ << "Cannot determine if slot " << slot_ << " is a null "
+               << "reference or 32bit constant. Cannot allow writing to slot.";
+          return ERR(INTERNAL);
         } else {
           art::ClassLinker* cl = art::Runtime::Current()->GetClassLinker();
-          art::ObjPtr<art::mirror::Class> set_class =
-              caller_->DecodeJObject(val_.l)->GetClass();
+          art::ObjPtr<art::mirror::Class> set_class = caller_->DecodeJObject(val_.l)->GetClass();
           art::ObjPtr<art::mirror::ClassLoader> loader =
               method->GetDeclaringClass()->GetClassLoader();
           art::ObjPtr<art::mirror::Class> slot_class =
               cl->LookupClass(caller_, descriptor.c_str(), loader);
-          DCHECK(!slot_class.IsNull());
+          DCHECK(!slot_class.IsNull()) << descriptor << " slot: " << slot_type;
           return slot_class->IsAssignableFrom(set_class) ? OK : ERR(TYPE_MISMATCH);
         }
       }
-      case art::Primitive::kPrimByte:
-      case art::Primitive::kPrimChar:
-      case art::Primitive::kPrimInt:
-      case art::Primitive::kPrimShort:
-      case art::Primitive::kPrimBoolean:
-        return type_ == art::Primitive::kPrimInt ? OK : ERR(TYPE_MISMATCH);
-      case art::Primitive::kPrimLong:
-      case art::Primitive::kPrimFloat:
-      case art::Primitive::kPrimDouble:
-        return type_ == slot_type ? OK : ERR(TYPE_MISMATCH);
-      case art::Primitive::kPrimVoid:
-        LOG(FATAL) << "Unexpected primitive type " << slot_type;
+      case VerifierPrimitiveType::kZeroValue: {
+        LOG(FATAL) << "Illegal result from SquashType of art::Primitive::Type " << type_;
         UNREACHABLE();
+      }
     }
   }
 
@@ -814,13 +1064,9 @@
       override REQUIRES_SHARED(art::Locks::mutator_lock_) {
     switch (type_) {
       case art::Primitive::kPrimNot: {
-        uint32_t ptr_val;
-        art::ObjPtr<art::mirror::Object> obj(caller_->DecodeJObject(val_.l));
-        ptr_val = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(obj.Ptr()));
-        if (!visitor.SetVReg(method,
-                             static_cast<uint16_t>(slot_),
-                             ptr_val,
-                             art::kReferenceVReg)) {
+        if (!visitor.SetVRegReference(method,
+                                      static_cast<uint16_t>(slot_),
+                                      caller_->DecodeJObject(val_.l))) {
           return ERR(OPAQUE_FRAME);
         }
         break;
@@ -861,9 +1107,10 @@
   art::Thread* caller_;
   art::Primitive::Type type_;
   jvalue val_;
+  std::ostringstream err_;
 };
 
-jvmtiError MethodUtil::SetLocalVariableGeneric(jvmtiEnv* env ATTRIBUTE_UNUSED,
+jvmtiError MethodUtil::SetLocalVariableGeneric(jvmtiEnv* env,
                                                jthread thread,
                                                jint depth,
                                                jint slot,
@@ -884,7 +1131,7 @@
     art::Locks::thread_list_lock_->ExclusiveUnlock(self);
     return err;
   }
-  SetLocalVariableClosure c(self, depth, slot, type, val);
+  SetLocalVariableClosure c(env, self, depth, slot, type, val);
   // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution.
   if (!target->RequestSynchronousCheckpoint(&c)) {
     return ERR(THREAD_NOT_ALIVE);
diff --git a/openjdkjvmti/ti_redefine.cc b/openjdkjvmti/ti_redefine.cc
index 975843c..c457dba 100644
--- a/openjdkjvmti/ti_redefine.cc
+++ b/openjdkjvmti/ti_redefine.cc
@@ -31,62 +31,114 @@
 
 #include "ti_redefine.h"
 
+#include <algorithm>
+#include <atomic>
 #include <iterator>
 #include <limits>
+#include <sstream>
 #include <string_view>
 #include <unordered_map>
 
 #include <android-base/logging.h>
 #include <android-base/stringprintf.h>
 
+#include "alloc_manager.h"
+#include "android-base/macros.h"
+#include "android-base/thread_annotations.h"
 #include "art_field-inl.h"
+#include "art_field.h"
 #include "art_jvmti.h"
 #include "art_method-inl.h"
+#include "art_method.h"
 #include "base/array_ref.h"
+#include "base/casts.h"
+#include "base/enums.h"
+#include "base/globals.h"
+#include "base/iteration_range.h"
+#include "base/length_prefixed_array.h"
+#include "base/locks.h"
+#include "base/stl_util.h"
+#include "base/utils.h"
 #include "class_linker-inl.h"
+#include "class_linker.h"
 #include "class_root.h"
+#include "class_status.h"
 #include "debugger.h"
 #include "dex/art_dex_file_loader.h"
 #include "dex/class_accessor-inl.h"
+#include "dex/class_accessor.h"
 #include "dex/dex_file.h"
 #include "dex/dex_file_loader.h"
 #include "dex/dex_file_types.h"
+#include "dex/primitive.h"
 #include "dex/signature-inl.h"
+#include "dex/signature.h"
 #include "events-inl.h"
+#include "events.h"
 #include "gc/allocation_listener.h"
 #include "gc/heap.h"
+#include "gc/heap-inl.h"
+#include "gc/heap-visit-objects-inl.h"
+#include "handle.h"
+#include "handle_scope.h"
 #include "instrumentation.h"
 #include "intern_table.h"
-#include "jdwp/jdwp.h"
-#include "jdwp/jdwp_constants.h"
-#include "jdwp/jdwp_event.h"
-#include "jdwp/object_registry.h"
 #include "jit/jit.h"
 #include "jit/jit_code_cache.h"
 #include "jni/jni_env_ext-inl.h"
+#include "jni/jni_id_manager.h"
+#include "jvmti.h"
 #include "jvmti_allocator.h"
 #include "linear_alloc.h"
 #include "mirror/array-alloc-inl.h"
+#include "mirror/array.h"
 #include "mirror/class-alloc-inl.h"
 #include "mirror/class-inl.h"
+#include "mirror/class-refvisitor-inl.h"
+#include "mirror/class.h"
 #include "mirror/class_ext-inl.h"
+#include "mirror/dex_cache-inl.h"
+#include "mirror/dex_cache.h"
+#include "mirror/executable-inl.h"
+#include "mirror/field-inl.h"
+#include "mirror/field.h"
+#include "mirror/method.h"
+#include "mirror/method_handle_impl-inl.h"
 #include "mirror/object.h"
 #include "mirror/object_array-alloc-inl.h"
 #include "mirror/object_array-inl.h"
+#include "mirror/object_array.h"
+#include "mirror/string.h"
+#include "mirror/var_handle.h"
 #include "nativehelper/scoped_local_ref.h"
 #include "non_debuggable_classes.h"
+#include "obj_ptr.h"
 #include "object_lock.h"
+#include "reflective_value_visitor.h"
 #include "runtime.h"
+#include "runtime_globals.h"
+#include "scoped_thread_state_change.h"
 #include "stack.h"
+#include "thread.h"
 #include "thread_list.h"
 #include "ti_breakpoint.h"
+#include "ti_class_definition.h"
 #include "ti_class_loader.h"
+#include "ti_heap.h"
+#include "ti_logging.h"
+#include "ti_thread.h"
 #include "transform.h"
 #include "verifier/class_verifier.h"
 #include "verifier/verifier_enums.h"
+#include "well_known_classes.h"
+#include "write_barrier.h"
 
 namespace openjdkjvmti {
 
+// Debug check to force us to directly check we saw all methods and fields exactly once directly.
+// Normally we don't need to do this since if any are missing the count will be different
+constexpr bool kCheckAllMethodsSeenOnce = art::kIsDebugBuild;
+
 using android::base::StringPrintf;
 
 // A helper that fills in a classes obsolete_methods_ and obsolete_dex_caches_ classExt fields as
@@ -143,11 +195,18 @@
     art::ArtMethod* obsolete_method;
   };
 
-  class ObsoleteMapIter : public std::iterator<std::forward_iterator_tag, ObsoleteMethodPair> {
+  class ObsoleteMapIter {
    public:
+    using iterator_category = std::forward_iterator_tag;
+    using value_type = ObsoleteMethodPair;
+    using difference_type = ptrdiff_t;
+    using pointer = void;    // Unsupported.
+    using reference = void;  // Unsupported.
+
     ObsoleteMethodPair operator*() const
         REQUIRES(art::Locks::mutator_lock_, art::Roles::uninterruptible_) {
-      art::ArtMethod* obsolete = map_->FindObsoleteVersion(iter_->first);
+      art::ArtMethod* obsolete = map_->obsolete_methods_->GetElementPtrSize<art::ArtMethod*>(
+          iter_->second, art::kRuntimePointerSize);
       DCHECK(obsolete != nullptr);
       return { iter_->first, obsolete };
     }
@@ -160,13 +219,13 @@
       return !(*this == other);
     }
 
-    ObsoleteMapIter operator++(int) const {
+    ObsoleteMapIter operator++(int) {
       ObsoleteMapIter retval = *this;
       ++(*this);
       return retval;
     }
 
-    ObsoleteMapIter operator++() const {
+    ObsoleteMapIter operator++() {
       ++iter_;
       return *this;
     }
@@ -177,7 +236,7 @@
         : map_(map), iter_(iter) {}
 
     const ObsoleteMap* map_;
-    mutable std::unordered_map<art::ArtMethod*, int32_t>::const_iterator iter_;
+    std::unordered_map<art::ArtMethod*, int32_t>::const_iterator iter_;
 
     friend class ObsoleteMap;
   };
@@ -279,9 +338,12 @@
   ObsoleteMap* obsolete_maps_;
 };
 
-jvmtiError Redefiner::IsModifiableClass(jvmtiEnv* env ATTRIBUTE_UNUSED,
-                                        jclass klass,
-                                        jboolean* is_redefinable) {
+template <RedefinitionType kType>
+jvmtiError
+Redefiner::IsModifiableClassGeneric(jvmtiEnv* env, jclass klass, jboolean* is_redefinable) {
+  if (env == nullptr) {
+    return ERR(INVALID_ENVIRONMENT);
+  }
   art::Thread* self = art::Thread::Current();
   art::ScopedObjectAccess soa(self);
   art::StackHandleScope<1> hs(self);
@@ -292,12 +354,24 @@
   art::Handle<art::mirror::Class> h_klass(hs.NewHandle(obj->AsClass()));
   std::string err_unused;
   *is_redefinable =
-      Redefiner::GetClassRedefinitionError(h_klass, &err_unused) != ERR(UNMODIFIABLE_CLASS)
-      ? JNI_TRUE : JNI_FALSE;
+      Redefiner::GetClassRedefinitionError<kType>(h_klass, &err_unused) != ERR(UNMODIFIABLE_CLASS)
+          ? JNI_TRUE
+          : JNI_FALSE;
   return OK;
 }
 
-jvmtiError Redefiner::GetClassRedefinitionError(jclass klass, /*out*/std::string* error_msg) {
+jvmtiError
+Redefiner::IsStructurallyModifiableClass(jvmtiEnv* env, jclass klass, jboolean* is_redefinable) {
+  return Redefiner::IsModifiableClassGeneric<RedefinitionType::kStructural>(
+      env, klass, is_redefinable);
+}
+
+jvmtiError Redefiner::IsModifiableClass(jvmtiEnv* env, jclass klass, jboolean* is_redefinable) {
+  return Redefiner::IsModifiableClassGeneric<RedefinitionType::kNormal>(env, klass, is_redefinable);
+}
+
+template <RedefinitionType kType>
+jvmtiError Redefiner::GetClassRedefinitionError(jclass klass, /*out*/ std::string* error_msg) {
   art::Thread* self = art::Thread::Current();
   art::ScopedObjectAccess soa(self);
   art::StackHandleScope<1> hs(self);
@@ -306,16 +380,18 @@
     return ERR(INVALID_CLASS);
   }
   art::Handle<art::mirror::Class> h_klass(hs.NewHandle(obj->AsClass()));
-  return Redefiner::GetClassRedefinitionError(h_klass, error_msg);
+  return Redefiner::GetClassRedefinitionError<kType>(h_klass, error_msg);
 }
 
+template <RedefinitionType kType>
 jvmtiError Redefiner::GetClassRedefinitionError(art::Handle<art::mirror::Class> klass,
-                                                /*out*/std::string* error_msg) {
+                                                /*out*/ std::string* error_msg) {
+  art::Thread* self = art::Thread::Current();
   if (!klass->IsResolved()) {
     // It's only a problem to try to retransform/redefine a unprepared class if it's happening on
     // the same thread as the class-linking process. If it's on another thread we will be able to
     // wait for the preparation to finish and continue from there.
-    if (klass->GetLockOwnerThreadId() == art::Thread::Current()->GetThreadId()) {
+    if (klass->GetLockOwnerThreadId() == self->GetThreadId()) {
       *error_msg = "Modification of class " + klass->PrettyClass() +
           " from within the classes ClassLoad callback is not supported to prevent deadlocks." +
           " Please use ClassFileLoadHook directly instead.";
@@ -343,15 +419,95 @@
   }
 
   for (jclass c : art::NonDebuggableClasses::GetNonDebuggableClasses()) {
-    if (klass.Get() == art::Thread::Current()->DecodeJObject(c)->AsClass()) {
+    if (klass.Get() == self->DecodeJObject(c)->AsClass()) {
       *error_msg = "Class might have stack frames that cannot be made obsolete";
       return ERR(UNMODIFIABLE_CLASS);
     }
   }
 
+  if (kType == RedefinitionType::kStructural) {
+    // Class initialization interacts really badly with structural redefinition since we need to
+    // make the old class obsolete. We currently just blanket don't allow it.
+    // TODO It might be nice to allow this at some point.
+    if (klass->IsInitializing() &&
+       !klass->IsInitialized() &&
+        klass->GetClinitThreadId() == self->GetTid()) {
+      // We are in the class-init running on this thread.
+      *error_msg = "Modification of class " + klass->PrettyClass() + " during class" +
+                   " initialization is not allowed.";
+      return ERR(INTERNAL);
+    }
+    if (!art::Runtime::Current()->GetClassLinker()->EnsureInitialized(
+            self, klass, /*can_init_fields=*/true, /*can_init_parents=*/true)) {
+      self->AssertPendingException();
+      *error_msg = "Class " + klass->PrettyClass() + " failed initialization. Structural" +
+                   " redefinition of erroneous classes is not allowed. Failure was: " +
+                   self->GetException()->Dump();
+      self->ClearException();
+      return ERR(INVALID_CLASS);
+    }
+    if (klass->IsMirrored()) {
+      std::string pc(klass->PrettyClass());
+      *error_msg = StringPrintf("Class %s is a mirror class and cannot be structurally redefined.",
+                                pc.c_str());
+      return ERR(UNMODIFIABLE_CLASS);
+    }
+    // Check Thread specifically since it's not a root but too many things reach into it with Unsafe
+    // too allow structural redefinition.
+    if (klass->IsAssignableFrom(
+            self->DecodeJObject(art::WellKnownClasses::java_lang_Thread)->AsClass())) {
+      *error_msg =
+          "java.lang.Thread has fields accessed using sun.misc.unsafe directly. It is not "
+          "safe to structurally redefine it.";
+      return ERR(UNMODIFIABLE_CLASS);
+    }
+    auto has_pointer_marker =
+        [](art::ObjPtr<art::mirror::Class> k) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+          // Check for fields/methods which were returned before moving to index jni id type.
+          // TODO We might want to rework how this is done. Once full redefinition is implemented we
+          // will need to check any subtypes too.
+          art::ObjPtr<art::mirror::ClassExt> ext(k->GetExtData());
+          if (!ext.IsNull()) {
+            if (ext->HasInstanceFieldPointerIdMarker() || ext->HasMethodPointerIdMarker() ||
+                ext->HasStaticFieldPointerIdMarker()) {
+              return true;
+            }
+          }
+          return false;
+        };
+    if (has_pointer_marker(klass.Get())) {
+      *error_msg =
+          StringPrintf("%s has active pointer jni-ids and cannot be redefined structurally",
+                       klass->PrettyClass().c_str());
+      return ERR(UNMODIFIABLE_CLASS);
+    }
+    jvmtiError res = OK;
+    art::ClassFuncVisitor cfv(
+      [&](art::ObjPtr<art::mirror::Class> k) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+        // if there is any class 'K' that is a subtype (i.e. extends) klass and has pointer-jni-ids
+        // we cannot structurally redefine the class 'k' since we would structurally redefine the
+        // subtype.
+        if (k->IsLoaded() && klass->IsAssignableFrom(k) && has_pointer_marker(k)) {
+          *error_msg = StringPrintf(
+              "%s has active pointer jni-ids from subtype %s and cannot be redefined structurally",
+              klass->PrettyClass().c_str(),
+              k->PrettyClass().c_str());
+          res = ERR(UNMODIFIABLE_CLASS);
+          return false;
+        }
+        return true;
+      });
+    art::Runtime::Current()->GetClassLinker()->VisitClasses(&cfv);
+    return res;
+  }
   return OK;
 }
 
+template jvmtiError Redefiner::GetClassRedefinitionError<RedefinitionType::kNormal>(
+    art::Handle<art::mirror::Class> klass, /*out*/ std::string* error_msg);
+template jvmtiError Redefiner::GetClassRedefinitionError<RedefinitionType::kStructural>(
+    art::Handle<art::mirror::Class> klass, /*out*/ std::string* error_msg);
+
 // Moves dex data to an anonymous, read-only mmap'd region.
 art::MemMap Redefiner::MoveDataToMemMap(const std::string& original_location,
                                         art::ArrayRef<const unsigned char> data,
@@ -391,67 +547,129 @@
   }
 }
 
-jvmtiError Redefiner::RedefineClasses(ArtJvmTiEnv* env,
-                                      EventHandler* event_handler,
-                                      art::Runtime* runtime,
-                                      art::Thread* self,
-                                      jint class_count,
-                                      const jvmtiClassDefinition* definitions,
-                                      /*out*/std::string* error_msg) {
+template<RedefinitionType kType>
+jvmtiError Redefiner::RedefineClassesGeneric(jvmtiEnv* jenv,
+                                             jint class_count,
+                                             const jvmtiClassDefinition* definitions) {
+  art::Runtime* runtime = art::Runtime::Current();
+  art::Thread* self = art::Thread::Current();
+  ArtJvmTiEnv* env = ArtJvmTiEnv::AsArtJvmTiEnv(jenv);
   if (env == nullptr) {
-    *error_msg = "env was null!";
+    JVMTI_LOG(WARNING, env) << "FAILURE TO REDEFINE env was null!";
     return ERR(INVALID_ENVIRONMENT);
   } else if (class_count < 0) {
-    *error_msg = "class_count was less then 0";
+    JVMTI_LOG(WARNING, env) << "FAILURE TO REDEFINE class_count was less then 0";
     return ERR(ILLEGAL_ARGUMENT);
   } else if (class_count == 0) {
     // We don't actually need to do anything. Just return OK.
     return OK;
   } else if (definitions == nullptr) {
-    *error_msg = "null definitions!";
+    JVMTI_LOG(WARNING, env) << "FAILURE TO REDEFINE null definitions!";
     return ERR(NULL_POINTER);
   }
+  std::string error_msg;
   std::vector<ArtClassDefinition> def_vector;
   def_vector.reserve(class_count);
   for (jint i = 0; i < class_count; i++) {
-    jvmtiError res = Redefiner::GetClassRedefinitionError(definitions[i].klass, error_msg);
+    jvmtiError res = Redefiner::GetClassRedefinitionError<RedefinitionType::kNormal>(
+        definitions[i].klass, &error_msg);
     if (res != OK) {
+      JVMTI_LOG(WARNING, env) << "FAILURE TO REDEFINE " << error_msg;
       return res;
     }
     ArtClassDefinition def;
     res = def.Init(self, definitions[i]);
     if (res != OK) {
+      JVMTI_LOG(WARNING, env) << "FAILURE TO REDEFINE bad definition " << i;
       return res;
     }
     def_vector.push_back(std::move(def));
   }
   // Call all the transformation events.
-  jvmtiError res = Transformer::RetransformClassesDirect(event_handler,
-                                                         self,
-                                                         &def_vector);
-  if (res != OK) {
-    // Something went wrong with transformation!
-    return res;
+  Transformer::RetransformClassesDirect<kType>(self, &def_vector);
+  if (kType == RedefinitionType::kStructural) {
+    Transformer::RetransformClassesDirect<RedefinitionType::kNormal>(self, &def_vector);
   }
-  return RedefineClassesDirect(env, runtime, self, def_vector, error_msg);
+  jvmtiError res = RedefineClassesDirect(env, runtime, self, def_vector, kType, &error_msg);
+  if (res != OK) {
+    JVMTI_LOG(WARNING, env) << "FAILURE TO REDEFINE " << error_msg;
+  }
+  return res;
+}
+
+jvmtiError Redefiner::StructurallyRedefineClasses(jvmtiEnv* jenv,
+                                                  jint class_count,
+                                                  const jvmtiClassDefinition* definitions) {
+  ArtJvmTiEnv* art_env = ArtJvmTiEnv::AsArtJvmTiEnv(jenv);
+  if (art_env == nullptr) {
+    return ERR(INVALID_ENVIRONMENT);
+  } else if (art_env->capabilities.can_redefine_classes != 1) {
+    return ERR(MUST_POSSESS_CAPABILITY);
+  }
+  return RedefineClassesGeneric<RedefinitionType::kStructural>(jenv, class_count, definitions);
+}
+
+jvmtiError Redefiner::RedefineClasses(jvmtiEnv* jenv,
+                                      jint class_count,
+                                      const jvmtiClassDefinition* definitions) {
+  return RedefineClassesGeneric<RedefinitionType::kNormal>(jenv, class_count, definitions);
+}
+
+jvmtiError Redefiner::StructurallyRedefineClassDirect(jvmtiEnv* env,
+                                                      jclass klass,
+                                                      const unsigned char* data,
+                                                      jint data_size) {
+  if (env == nullptr) {
+    return ERR(INVALID_ENVIRONMENT);
+  } else if (ArtJvmTiEnv::AsArtJvmTiEnv(env)->capabilities.can_redefine_classes != 1) {
+    JVMTI_LOG(INFO, env) << "Does not have can_redefine_classes cap!";
+    return ERR(MUST_POSSESS_CAPABILITY);
+  }
+  std::vector<ArtClassDefinition> acds;
+  ArtClassDefinition acd;
+  jvmtiError err = acd.Init(
+      art::Thread::Current(),
+      jvmtiClassDefinition{ .klass = klass, .class_byte_count = data_size, .class_bytes = data });
+  if (err != OK) {
+    return err;
+  }
+  acds.push_back(std::move(acd));
+  std::string err_msg;
+  err = RedefineClassesDirect(ArtJvmTiEnv::AsArtJvmTiEnv(env),
+                              art::Runtime::Current(),
+                              art::Thread::Current(),
+                              acds,
+                              RedefinitionType::kStructural,
+                              &err_msg);
+  if (err != OK) {
+    JVMTI_LOG(WARNING, env) << "Failed structural redefinition: " << err_msg;
+  }
+  return err;
 }
 
 jvmtiError Redefiner::RedefineClassesDirect(ArtJvmTiEnv* env,
                                             art::Runtime* runtime,
                                             art::Thread* self,
                                             const std::vector<ArtClassDefinition>& definitions,
+                                            RedefinitionType type,
                                             std::string* error_msg) {
   DCHECK(env != nullptr);
   if (definitions.size() == 0) {
     // We don't actually need to do anything. Just return OK.
     return OK;
   }
+  // We need to fiddle with the verification class flags. To do this we need to make sure there are
+  // no concurrent redefinitions of the same class at the same time. For simplicity and because
+  // this is not expected to be a common occurrence we will just wrap the whole thing in a TOP-level
+  // lock.
+
   // Stop JIT for the duration of this redefine since the JIT might concurrently compile a method we
   // are going to redefine.
+  // TODO We should prevent user-code suspensions to make sure this isn't held for too long.
   art::jit::ScopedJitSuspend suspend_jit;
   // Get shared mutator lock so we can lock all the classes.
   art::ScopedObjectAccess soa(self);
-  Redefiner r(env, runtime, self, error_msg);
+  Redefiner r(env, runtime, self, type, error_msg);
   for (const ArtClassDefinition& def : definitions) {
     // Only try to transform classes that have been modified.
     if (def.IsModified()) {
@@ -610,6 +828,7 @@
 // TODO Rewrite so we can do this only once regardless of how many redefinitions there are.
 void Redefiner::ClassRedefinition::FindAndAllocateObsoleteMethods(
     art::ObjPtr<art::mirror::Class> art_klass) {
+  DCHECK(!IsStructuralRedefinition());
   art::ScopedAssertNoThreadSuspension ns("No thread suspension during thread stack walking");
   art::ObjPtr<art::mirror::ClassExt> ext = art_klass->GetExtData();
   CHECK(ext->GetObsoleteMethods() != nullptr);
@@ -637,7 +856,8 @@
     art::MutexLock mu(driver_->self_, *art::Locks::thread_list_lock_);
     art::ThreadList* list = art::Runtime::Current()->GetThreadList();
     list->ForEach(DoAllocateObsoleteMethodsCallback, static_cast<void*>(&ctx));
-    // Update JIT Data structures to point to the new method.
+    // After we've done walking all threads' stacks and updating method pointers on them,
+    // update JIT data structures (used by the stack walk above) to point to the new methods.
     art::jit::Jit* jit = art::Runtime::Current()->GetJit();
     if (jit != nullptr) {
       for (const ObsoleteMap::ObsoleteMethodPair& it : *ctx.obsolete_map) {
@@ -649,35 +869,82 @@
   }
 }
 
-// Try and get the declared method. First try to get a virtual method then a direct method if that's
-// not found.
-static art::ArtMethod* FindMethod(art::Handle<art::mirror::Class> klass,
-                                  std::string_view name,
-                                  art::Signature sig) REQUIRES_SHARED(art::Locks::mutator_lock_) {
-  DCHECK(!klass->IsProxyClass());
-  for (art::ArtMethod& m : klass->GetDeclaredMethodsSlice(art::kRuntimePointerSize)) {
-    if (m.GetName() == name && m.GetSignature() == sig) {
-      return &m;
-    }
+namespace {
+template <typename T> struct SignatureType {};
+template <> struct SignatureType<art::ArtField> { using type = std::string_view; };
+template <> struct SignatureType<art::ArtMethod> { using type = art::Signature; };
+
+template <typename T> struct NameAndSignature {
+ public:
+  using SigType = typename SignatureType<T>::type;
+
+  NameAndSignature(const art::DexFile* dex_file, uint32_t id);
+
+  NameAndSignature(const std::string_view& name, const SigType& sig) : name_(name), sig_(sig) {}
+
+  bool operator==(const NameAndSignature<T>& o) {
+    return name_ == o.name_ && sig_ == o.sig_;
   }
-  return nullptr;
+
+  std::ostream& dump(std::ostream& os) const {
+    return os << "'" << name_ << "' (sig: " << sig_ << ")";
+  }
+
+  std::string ToString() const {
+    std::ostringstream os;
+    os << *this;
+    return os.str();
+  }
+
+  std::string_view name_;
+  SigType sig_;
+};
+
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const NameAndSignature<T>& nas) {
+  return nas.dump(os);
 }
 
-bool Redefiner::ClassRedefinition::CheckSameMethods() {
+using FieldNameAndSignature = NameAndSignature<art::ArtField>;
+template <>
+FieldNameAndSignature::NameAndSignature(const art::DexFile* dex_file, uint32_t id)
+    : FieldNameAndSignature(dex_file->GetFieldName(dex_file->GetFieldId(id)),
+                            dex_file->GetFieldTypeDescriptor(dex_file->GetFieldId(id))) {}
+
+using MethodNameAndSignature = NameAndSignature<art::ArtMethod>;
+template <>
+MethodNameAndSignature::NameAndSignature(const art::DexFile* dex_file, uint32_t id)
+    : MethodNameAndSignature(dex_file->GetMethodName(dex_file->GetMethodId(id)),
+                             dex_file->GetMethodSignature(dex_file->GetMethodId(id))) {}
+
+}  // namespace
+
+void Redefiner::ClassRedefinition::RecordNewMethodAdded() {
+  DCHECK(driver_->IsStructuralRedefinition());
+  added_methods_ = true;
+}
+void Redefiner::ClassRedefinition::RecordNewFieldAdded() {
+  DCHECK(driver_->IsStructuralRedefinition());
+  added_fields_ = true;
+}
+
+bool Redefiner::ClassRedefinition::CheckMethods() {
   art::StackHandleScope<1> hs(driver_->self_);
   art::Handle<art::mirror::Class> h_klass(hs.NewHandle(GetMirrorClass()));
   DCHECK_EQ(dex_file_->NumClassDefs(), 1u);
 
-  // Make sure we have the same number of methods.
+  // Make sure we have the same number of methods (or the same or greater if we're structural).
   art::ClassAccessor accessor(*dex_file_, dex_file_->GetClassDef(0));
   uint32_t num_new_method = accessor.NumMethods();
   uint32_t num_old_method = h_klass->GetDeclaredMethodsSlice(art::kRuntimePointerSize).size();
-  if (num_new_method != num_old_method) {
+  const bool is_structural = driver_->IsStructuralRedefinition();
+  if (!is_structural && num_new_method != num_old_method) {
     bool bigger = num_new_method > num_old_method;
     RecordFailure(bigger ? ERR(UNSUPPORTED_REDEFINITION_METHOD_ADDED)
                          : ERR(UNSUPPORTED_REDEFINITION_METHOD_DELETED),
                   StringPrintf("Total number of declared methods changed from %d to %d",
-                               num_old_method, num_new_method));
+                               num_old_method,
+                               num_new_method));
     return false;
   }
 
@@ -685,38 +952,65 @@
   // Check each of the methods. NB we don't need to specifically check for removals since the 2 dex
   // files have the same number of methods, which means there must be an equal amount of additions
   // and removals. We should have already checked the fields.
-  for (const art::ClassAccessor::Method& method : accessor.GetMethods()) {
+  const art::DexFile& old_dex_file = h_klass->GetDexFile();
+  art::ClassAccessor old_accessor(old_dex_file, *h_klass->GetClassDef());
+  // We need this to check for methods going missing in structural cases.
+  std::vector<bool> seen_old_methods(
+      (kCheckAllMethodsSeenOnce || is_structural) ? old_accessor.NumMethods() : 0, false);
+  const auto old_methods = old_accessor.GetMethods();
+  for (const art::ClassAccessor::Method& new_method : accessor.GetMethods()) {
     // Get the data on the method we are searching for
-    const art::dex::MethodId& new_method_id = dex_file_->GetMethodId(method.GetIndex());
-    const char* new_method_name = dex_file_->GetMethodName(new_method_id);
-    art::Signature new_method_signature = dex_file_->GetMethodSignature(new_method_id);
-    art::ArtMethod* old_method = FindMethod(h_klass, new_method_name, new_method_signature);
-    // If we got past the check for the same number of methods above that means there must be at
-    // least one added and one removed method. We will return the ADDED failure message since it is
-    // easier to get a useful error report for it.
-    if (old_method == nullptr) {
-      RecordFailure(ERR(UNSUPPORTED_REDEFINITION_METHOD_ADDED),
-                    StringPrintf("Unknown method '%s' (sig: %s) was added!",
-                                  new_method_name,
-                                  new_method_signature.ToString().c_str()));
-      return false;
+    MethodNameAndSignature new_method_id(dex_file_.get(), new_method.GetIndex());
+    const auto old_iter =
+        std::find_if(old_methods.cbegin(), old_methods.cend(), [&](const auto& current_old_method) {
+          MethodNameAndSignature old_method_id(&old_dex_file, current_old_method.GetIndex());
+          return old_method_id == new_method_id;
+        });
+
+    if (!new_method.IsStaticOrDirect()) {
+      RecordHasVirtualMembers();
     }
-    // Since direct methods have different flags than virtual ones (specifically direct methods must
-    // have kAccPrivate or kAccStatic or kAccConstructor flags) we can tell if a method changes from
-    // virtual to direct.
-    uint32_t new_flags = method.GetAccessFlags();
-    if (new_flags != (old_method->GetAccessFlags() & art::kAccValidMethodFlags)) {
-      RecordFailure(ERR(UNSUPPORTED_REDEFINITION_METHOD_MODIFIERS_CHANGED),
-                    StringPrintf("method '%s' (sig: %s) had different access flags",
-                                 new_method_name,
-                                 new_method_signature.ToString().c_str()));
+    if (old_iter == old_methods.cend()) {
+      if (is_structural) {
+        RecordNewMethodAdded();
+      } else {
+        RecordFailure(
+            ERR(UNSUPPORTED_REDEFINITION_METHOD_ADDED),
+            StringPrintf("Unknown virtual method %s was added!", new_method_id.ToString().c_str()));
+        return false;
+      }
+    } else if (new_method.GetAccessFlags() != old_iter->GetAccessFlags()) {
+      RecordFailure(
+          ERR(UNSUPPORTED_REDEFINITION_METHOD_MODIFIERS_CHANGED),
+          StringPrintf("method %s had different access flags", new_method_id.ToString().c_str()));
       return false;
+    } else if (kCheckAllMethodsSeenOnce || is_structural) {
+      // We only need this if we are structural.
+      size_t off = std::distance(old_methods.cbegin(), old_iter);
+      DCHECK(!seen_old_methods[off])
+          << "field at " << off << "("
+          << MethodNameAndSignature(&old_dex_file, old_iter->GetIndex()) << ") already seen?";
+      seen_old_methods[off] = true;
     }
   }
+  if ((kCheckAllMethodsSeenOnce || is_structural) &&
+      !std::all_of(seen_old_methods.cbegin(), seen_old_methods.cend(), [](auto x) { return x; })) {
+    DCHECK(is_structural) << "We should have hit an earlier failure before getting here!";
+    auto first_fail =
+        std::find_if(seen_old_methods.cbegin(), seen_old_methods.cend(), [](auto x) { return !x; });
+    auto off = std::distance(seen_old_methods.cbegin(), first_fail);
+    auto fail = old_methods.cbegin();
+    std::advance(fail, off);
+    RecordFailure(
+        ERR(UNSUPPORTED_REDEFINITION_METHOD_DELETED),
+        StringPrintf("Method %s missing!",
+                     MethodNameAndSignature(&old_dex_file, fail->GetIndex()).ToString().c_str()));
+    return false;
+  }
   return true;
 }
 
-bool Redefiner::ClassRedefinition::CheckSameFields() {
+bool Redefiner::ClassRedefinition::CheckFields() {
   art::StackHandleScope<1> hs(driver_->self_);
   art::Handle<art::mirror::Class> h_klass(hs.NewHandle(GetMirrorClass()));
   DCHECK_EQ(dex_file_->NumClassDefs(), 1u);
@@ -726,58 +1020,50 @@
   art::ClassAccessor old_accessor(old_dex_file, *h_klass->GetClassDef());
   // Instance and static fields can be differentiated by their flags so no need to check them
   // separately.
-  auto old_fields = old_accessor.GetFields();
-  auto old_iter = old_fields.begin();
+  std::vector<bool> seen_old_fields(old_accessor.NumFields(), false);
+  const auto old_fields = old_accessor.GetFields();
   for (const art::ClassAccessor::Field& new_field : new_accessor.GetFields()) {
     // Get the data on the method we are searching for
-    const art::dex::FieldId& new_field_id = dex_file_->GetFieldId(new_field.GetIndex());
-    const char* new_field_name = dex_file_->GetFieldName(new_field_id);
-    const char* new_field_type = dex_file_->GetFieldTypeDescriptor(new_field_id);
-
-    if (old_iter == old_fields.end()) {
-      // We are missing the old version of this method!
-      RecordFailure(ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
-                    StringPrintf("Unknown field '%s' (type: %s) added!",
-                                  new_field_name,
-                                  new_field_type));
-      return false;
+    FieldNameAndSignature new_field_id(dex_file_.get(), new_field.GetIndex());
+    const auto old_iter =
+        std::find_if(old_fields.cbegin(), old_fields.cend(), [&](const auto& old_iter) {
+          FieldNameAndSignature old_field_id(&old_dex_file, old_iter.GetIndex());
+          return old_field_id == new_field_id;
+        });
+    if (!new_field.IsStatic()) {
+      RecordHasVirtualMembers();
     }
-
-    const art::dex::FieldId& old_field_id = old_dex_file.GetFieldId(old_iter->GetIndex());
-    const char* old_field_name = old_dex_file.GetFieldName(old_field_id);
-    const char* old_field_type = old_dex_file.GetFieldTypeDescriptor(old_field_id);
-
-    // Check name and type.
-    if (strcmp(old_field_name, new_field_name) != 0 ||
-        strcmp(old_field_type, new_field_type) != 0) {
-      RecordFailure(ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
-                    StringPrintf("Field changed from '%s' (sig: %s) to '%s' (sig: %s)!",
-                                  old_field_name,
-                                  old_field_type,
-                                  new_field_name,
-                                  new_field_type));
+    if (old_iter == old_fields.cend()) {
+      if (driver_->IsStructuralRedefinition()) {
+        RecordNewFieldAdded();
+      } else {
+        RecordFailure(ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
+                      StringPrintf("Unknown field %s added!", new_field_id.ToString().c_str()));
+        return false;
+      }
+    } else if (new_field.GetAccessFlags() != old_iter->GetAccessFlags()) {
+      RecordFailure(
+          ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
+          StringPrintf("Field %s had different access flags", new_field_id.ToString().c_str()));
       return false;
+    } else {
+      size_t off = std::distance(old_fields.cbegin(), old_iter);
+      DCHECK(!seen_old_fields[off])
+          << "field at " << off << "(" << FieldNameAndSignature(&old_dex_file, old_iter->GetIndex())
+          << ") already seen?";
+      seen_old_fields[off] = true;
     }
-
-    // Since static fields have different flags than instance ones (specifically static fields must
-    // have the kAccStatic flag) we can tell if a field changes from static to instance.
-    if (new_field.GetAccessFlags() != old_iter->GetAccessFlags()) {
-      RecordFailure(ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
-                    StringPrintf("Field '%s' (sig: %s) had different access flags",
-                                  new_field_name,
-                                  new_field_type));
-      return false;
-    }
-
-    ++old_iter;
   }
-  if (old_iter != old_fields.end()) {
-    RecordFailure(ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
-                  StringPrintf("field '%s' (sig: %s) is missing!",
-                                old_dex_file.GetFieldName(old_dex_file.GetFieldId(
-                                    old_iter->GetIndex())),
-                                old_dex_file.GetFieldTypeDescriptor(old_dex_file.GetFieldId(
-                                    old_iter->GetIndex()))));
+  if (!std::all_of(seen_old_fields.cbegin(), seen_old_fields.cend(), [](auto x) { return x; })) {
+    auto first_fail =
+        std::find_if(seen_old_fields.cbegin(), seen_old_fields.cend(), [](auto x) { return !x; });
+    auto off = std::distance(seen_old_fields.cbegin(), first_fail);
+    auto fail = old_fields.cbegin();
+    std::advance(fail, off);
+    RecordFailure(
+        ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
+        StringPrintf("Field %s is missing!",
+                     FieldNameAndSignature(&old_dex_file, fail->GetIndex()).ToString().c_str()));
     return false;
   }
   return true;
@@ -834,6 +1120,7 @@
   const art::dex::TypeList* interfaces = dex_file_->GetInterfacesList(def);
   if (interfaces == nullptr) {
     if (current_class->NumDirectInterfaces() != 0) {
+      // TODO Support this for kStructural.
       RecordFailure(ERR(UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED), "Interfaces added");
       return false;
     }
@@ -841,6 +1128,7 @@
     DCHECK(!current_class->IsProxyClass());
     const art::dex::TypeList* current_interfaces = current_class->GetInterfaceTypeList();
     if (current_interfaces == nullptr || current_interfaces->Size() != interfaces->Size()) {
+      // TODO Support this for kStructural.
       RecordFailure(ERR(UNSUPPORTED_REDEFINITION_HIERARCHY_CHANGED), "Interfaces added or removed");
       return false;
     }
@@ -864,7 +1152,12 @@
   art::StackHandleScope<1> hs(driver_->self_);
 
   art::Handle<art::mirror::Class> h_klass(hs.NewHandle(GetMirrorClass()));
-  jvmtiError res = Redefiner::GetClassRedefinitionError(h_klass, &err);
+  jvmtiError res;
+  if (driver_->type_ == RedefinitionType::kStructural && this->IsStructuralRedefinition()) {
+    res = Redefiner::GetClassRedefinitionError<RedefinitionType::kStructural>(h_klass, &err);
+  } else {
+    res = Redefiner::GetClassRedefinitionError<RedefinitionType::kNormal>(h_klass, &err);
+  }
   if (res != OK) {
     RecordFailure(res, err);
     return false;
@@ -874,10 +1167,7 @@
 }
 
 bool Redefiner::ClassRedefinition::CheckRedefinitionIsValid() {
-  return CheckRedefinable() &&
-      CheckClass() &&
-      CheckSameFields() &&
-      CheckSameMethods();
+  return CheckClass() && CheckFields() && CheckMethods() && CheckRedefinable();
 }
 
 class RedefinitionDataIter;
@@ -896,9 +1186,14 @@
     kSlotOrigDexFile = 5,
     kSlotOldObsoleteMethods = 6,
     kSlotOldDexCaches = 7,
+    kSlotNewClassObject = 8,
+    kSlotOldInstanceObjects = 9,
+    kSlotNewInstanceObjects = 10,
+    kSlotOldClasses = 11,
+    kSlotNewClasses = 12,
 
     // Must be last one.
-    kNumSlots = 8,
+    kNumSlots = 13,
   };
 
   // This needs to have a HandleScope passed in that is capable of creating a new Handle without
@@ -913,7 +1208,10 @@
         self,
         art::GetClassRoot<art::mirror::ObjectArray<art::mirror::Object>>(runtime->GetClassLinker()),
         redefinitions->size() * kNumSlots))),
-    redefinitions_(redefinitions) {}
+    redefinitions_(redefinitions),
+    initialized_(redefinitions_->size(), false),
+    actually_structural_(redefinitions_->size(), false),
+    initial_structural_(redefinitions_->size(), false) {}
 
   bool IsNull() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
     return arr_.IsNull();
@@ -959,6 +1257,43 @@
         GetSlot(klass_index, kSlotOldDexCaches));
   }
 
+  art::ObjPtr<art::mirror::Class> GetNewClassObject(jint klass_index) const
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    return art::ObjPtr<art::mirror::Class>::DownCast(GetSlot(klass_index, kSlotNewClassObject));
+  }
+
+  art::ObjPtr<art::mirror::ObjectArray<art::mirror::Object>> GetOldInstanceObjects(
+      jint klass_index) const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    return art::ObjPtr<art::mirror::ObjectArray<art::mirror::Object>>::DownCast(
+        GetSlot(klass_index, kSlotOldInstanceObjects));
+  }
+
+  art::ObjPtr<art::mirror::ObjectArray<art::mirror::Object>> GetNewInstanceObjects(
+      jint klass_index) const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    return art::ObjPtr<art::mirror::ObjectArray<art::mirror::Object>>::DownCast(
+        GetSlot(klass_index, kSlotNewInstanceObjects));
+  }
+  art::ObjPtr<art::mirror::ObjectArray<art::mirror::Class>> GetOldClasses(jint klass_index) const
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    return art::ObjPtr<art::mirror::ObjectArray<art::mirror::Class>>::DownCast(
+        GetSlot(klass_index, kSlotOldClasses));
+  }
+  art::ObjPtr<art::mirror::ObjectArray<art::mirror::Class>> GetNewClasses(jint klass_index) const
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    return art::ObjPtr<art::mirror::ObjectArray<art::mirror::Class>>::DownCast(
+        GetSlot(klass_index, kSlotNewClasses));
+  }
+  bool IsInitialized(jint klass_index) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    return initialized_[klass_index];
+  }
+  bool IsActuallyStructural(jint klass_index) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    return actually_structural_[klass_index];
+  }
+
+  bool IsInitialStructural(jint klass_index) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    return initial_structural_[klass_index];
+  }
+
   void SetSourceClassLoader(jint klass_index, art::ObjPtr<art::mirror::ClassLoader> loader)
       REQUIRES_SHARED(art::Locks::mutator_lock_) {
     SetSlot(klass_index, kSlotSourceClassLoader, loader);
@@ -993,6 +1328,40 @@
     SetSlot(klass_index, kSlotOldDexCaches, caches);
   }
 
+  void SetNewClassObject(jint klass_index, art::ObjPtr<art::mirror::Class> klass)
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    SetSlot(klass_index, kSlotNewClassObject, klass);
+  }
+
+  void SetOldInstanceObjects(jint klass_index,
+                             art::ObjPtr<art::mirror::ObjectArray<art::mirror::Object>> objs)
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    SetSlot(klass_index, kSlotOldInstanceObjects, objs);
+  }
+  void SetNewInstanceObjects(jint klass_index,
+                             art::ObjPtr<art::mirror::ObjectArray<art::mirror::Object>> objs)
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    SetSlot(klass_index, kSlotNewInstanceObjects, objs);
+  }
+  void SetOldClasses(jint klass_index,
+                     art::ObjPtr<art::mirror::ObjectArray<art::mirror::Class>> klasses)
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    SetSlot(klass_index, kSlotOldClasses, klasses);
+  }
+  void SetNewClasses(jint klass_index,
+                     art::ObjPtr<art::mirror::ObjectArray<art::mirror::Class>> klasses)
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    SetSlot(klass_index, kSlotNewClasses, klasses);
+  }
+  void SetInitialized(jint klass_index) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    initialized_[klass_index] = true;
+  }
+  void SetActuallyStructural(jint klass_index) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    actually_structural_[klass_index] = true;
+  }
+  void SetInitialStructural(jint klass_index) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    initial_structural_[klass_index] = true;
+  }
   int32_t Length() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
     return arr_->GetLength() / kNumSlots;
   }
@@ -1018,6 +1387,14 @@
  private:
   mutable art::Handle<art::mirror::ObjectArray<art::mirror::Object>> arr_;
   std::vector<Redefiner::ClassRedefinition>* redefinitions_;
+  // Used to mark a particular redefinition as fully initialized.
+  std::vector<bool> initialized_;
+  // Used to mark a redefinition as 'actually' structural. That is either the redefinition is
+  // structural or a superclass is.
+  std::vector<bool> actually_structural_;
+  // Used to mark a redefinition as the initial structural redefinition. This redefinition will take
+  // care of updating all of its subtypes.
+  std::vector<bool> initial_structural_;
 
   art::ObjPtr<art::mirror::Object> GetSlot(jint klass_index, DataSlot slot) const
       REQUIRES_SHARED(art::Locks::mutator_lock_) {
@@ -1077,6 +1454,11 @@
     return *this;
   }
 
+  // Compat for STL iterators.
+  RedefinitionDataIter& operator*() {
+    return *this;
+  }
+
   Redefiner::ClassRedefinition& GetRedefinition() REQUIRES_SHARED(art::Locks::mutator_lock_) {
     return (*holder_.GetRedefinitions())[idx_];
   }
@@ -1118,6 +1500,36 @@
     return holder_.GetOldDexCaches(idx_);
   }
 
+  art::ObjPtr<art::mirror::Class> GetNewClassObject() const
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    return holder_.GetNewClassObject(idx_);
+  }
+
+  art::ObjPtr<art::mirror::ObjectArray<art::mirror::Object>> GetOldInstanceObjects() const
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    return holder_.GetOldInstanceObjects(idx_);
+  }
+  art::ObjPtr<art::mirror::ObjectArray<art::mirror::Object>> GetNewInstanceObjects() const
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    return holder_.GetNewInstanceObjects(idx_);
+  }
+  art::ObjPtr<art::mirror::ObjectArray<art::mirror::Class>> GetOldClasses() const
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    return holder_.GetOldClasses(idx_);
+  }
+  art::ObjPtr<art::mirror::ObjectArray<art::mirror::Class>> GetNewClasses() const
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    return holder_.GetNewClasses(idx_);
+  }
+  bool IsInitialized() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    return holder_.IsInitialized(idx_);
+  }
+  bool IsActuallyStructural() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    return holder_.IsActuallyStructural(idx_);
+  }
+  bool IsInitialStructural() const REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    return holder_.IsInitialStructural(idx_);
+  }
   int32_t GetIndex() const {
     return idx_;
   }
@@ -1154,6 +1566,35 @@
       REQUIRES_SHARED(art::Locks::mutator_lock_) {
     holder_.SetOldDexCaches(idx_, caches);
   }
+  void SetNewClassObject(art::ObjPtr<art::mirror::Class> klass)
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    holder_.SetNewClassObject(idx_, klass);
+  }
+  void SetOldInstanceObjects(art::ObjPtr<art::mirror::ObjectArray<art::mirror::Object>> objs)
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    holder_.SetOldInstanceObjects(idx_, objs);
+  }
+  void SetNewInstanceObjects(art::ObjPtr<art::mirror::ObjectArray<art::mirror::Object>> objs)
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    holder_.SetNewInstanceObjects(idx_, objs);
+  }
+  void SetOldClasses(art::ObjPtr<art::mirror::ObjectArray<art::mirror::Class>> klasses)
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    holder_.SetOldClasses(idx_, klasses);
+  }
+  void SetNewClasses(art::ObjPtr<art::mirror::ObjectArray<art::mirror::Class>> klasses)
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    holder_.SetNewClasses(idx_, klasses);
+  }
+  void SetInitialized() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    holder_.SetInitialized(idx_);
+  }
+  void SetActuallyStructural() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    holder_.SetActuallyStructural(idx_);
+  }
+  void SetInitialStructural() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    holder_.SetInitialStructural(idx_);
+  }
 
  private:
   int32_t idx_;
@@ -1187,7 +1628,15 @@
                                                 &error);
   switch (failure) {
     case art::verifier::FailureKind::kNoFailure:
+      // TODO It is possible that by doing redefinition previous NO_COMPILE verification failures
+      // were fixed. It would be nice to reflect this in the new implementations.
+      return true;
     case art::verifier::FailureKind::kSoftFailure:
+    case art::verifier::FailureKind::kAccessChecksFailure:
+      // Soft failures might require interpreter on some methods. It won't prevent redefinition but
+      // it does mean we need to run the verifier again and potentially update method flags after
+      // performing the swap.
+      needs_reverify_ = true;
       return true;
     case art::verifier::FailureKind::kHardFailure: {
       RecordFailure(ERR(FAILS_VERIFICATION), "Failed to verify class. Error was: " + error);
@@ -1249,7 +1698,114 @@
   return true;
 }
 
-bool Redefiner::ClassRedefinition::FinishRemainingAllocations(
+bool CompareClasses(art::ObjPtr<art::mirror::Class> l, art::ObjPtr<art::mirror::Class> r)
+    REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  auto parents = [](art::ObjPtr<art::mirror::Class> c) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    uint32_t res = 0;
+    while (!c->IsObjectClass()) {
+      res++;
+      c = c->GetSuperClass();
+    }
+    return res;
+  };
+  return parents(l.Ptr()) < parents(r.Ptr());
+}
+
+bool Redefiner::ClassRedefinition::CollectAndCreateNewInstances(
+    /*out*/ RedefinitionDataIter* cur_data) {
+  if (!cur_data->IsInitialStructural()) {
+    // An earlier structural redefinition already remade all the instances.
+    return true;
+  }
+  art::gc::Heap* heap = driver_->runtime_->GetHeap();
+  art::VariableSizedHandleScope hs(driver_->self_);
+  art::Handle<art::mirror::Class> old_klass(hs.NewHandle(cur_data->GetMirrorClass()));
+  std::vector<art::Handle<art::mirror::Object>> old_instances;
+  auto is_instance = [&](art::mirror::Object* obj) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    return obj->InstanceOf(old_klass.Get());
+  };
+  heap->VisitObjects([&](art::mirror::Object* obj) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    if (is_instance(obj)) {
+      old_instances.push_back(hs.NewHandle(obj));
+    }
+  });
+  VLOG(plugin) << "Collected " << old_instances.size() << " instances to recreate!";
+  art::Handle<art::mirror::ObjectArray<art::mirror::Class>> old_classes_arr(
+      hs.NewHandle(cur_data->GetOldClasses()));
+  art::Handle<art::mirror::ObjectArray<art::mirror::Class>> new_classes_arr(
+      hs.NewHandle(cur_data->GetNewClasses()));
+  DCHECK_EQ(old_classes_arr->GetLength(), new_classes_arr->GetLength());
+  DCHECK_GT(old_classes_arr->GetLength(), 0);
+  art::Handle<art::mirror::Class> obj_array_class(
+      hs.NewHandle(art::GetClassRoot<art::mirror::ObjectArray<art::mirror::Object>>(
+          driver_->runtime_->GetClassLinker())));
+  art::Handle<art::mirror::ObjectArray<art::mirror::Object>> old_instances_arr(
+      hs.NewHandle(art::mirror::ObjectArray<art::mirror::Object>::Alloc(
+          driver_->self_, obj_array_class.Get(), old_instances.size())));
+  if (old_instances_arr.IsNull()) {
+    driver_->self_->AssertPendingOOMException();
+    driver_->self_->ClearException();
+    RecordFailure(ERR(OUT_OF_MEMORY), "Could not allocate old_instance arrays!");
+    return false;
+  }
+  for (uint32_t i = 0; i < old_instances.size(); ++i) {
+    old_instances_arr->Set(i, old_instances[i].Get());
+  }
+  cur_data->SetOldInstanceObjects(old_instances_arr.Get());
+
+  art::Handle<art::mirror::ObjectArray<art::mirror::Object>> new_instances_arr(
+      hs.NewHandle(art::mirror::ObjectArray<art::mirror::Object>::Alloc(
+          driver_->self_, obj_array_class.Get(), old_instances.size())));
+  if (new_instances_arr.IsNull()) {
+    driver_->self_->AssertPendingOOMException();
+    driver_->self_->ClearException();
+    RecordFailure(ERR(OUT_OF_MEMORY), "Could not allocate new_instance arrays!");
+    return false;
+  }
+  for (auto pair : art::ZipCount(art::IterationRange(old_instances.begin(), old_instances.end()))) {
+    art::Handle<art::mirror::Object> hinstance(pair.first);
+    int32_t i = pair.second;
+    auto iterator = art::ZipLeft(old_classes_arr.Iterate<art::mirror::Class>(),
+                                 new_classes_arr.Iterate<art::mirror::Class>());
+    auto it = std::find_if(iterator.begin(),
+                           iterator.end(),
+                           [&](auto class_pair) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+                             return class_pair.first == hinstance->GetClass();
+                           });
+    DCHECK(it != iterator.end()) << "Unable to find class pair for "
+                                 << hinstance->GetClass()->PrettyClass() << " (instance " << i
+                                 << ")";
+    auto [_, new_type] = *it;
+    // Make sure when allocating the new instance we don't add it's finalizer since we will directly
+    // replace the old object in the finalizer reference. If we added it here to we would call
+    // finalize twice.
+    // NB If a type is changed from being non-finalizable to finalizable the finalizers on any
+    //    objects created before the redefine will never be called. This is (sort of) allowable by
+    //    the spec and greatly simplifies implementation.
+    // TODO Make it so we will always call all finalizers, even if the object when it was created
+    // wasn't finalizable. To do this we need to be careful of handling failure correctly and making
+    // sure that objects aren't finalized multiple times and that instances of failed redefinitions
+    // aren't finalized.
+    art::ObjPtr<art::mirror::Object> new_instance(
+        new_type->Alloc</*kIsInstrumented=*/true,
+                        art::mirror::Class::AddFinalizer::kNoAddFinalizer,
+                        /*kCheckAddFinalizer=*/false>(
+            driver_->self_, driver_->runtime_->GetHeap()->GetCurrentAllocator()));
+    if (new_instance.IsNull()) {
+      driver_->self_->AssertPendingOOMException();
+      driver_->self_->ClearException();
+      std::string msg(
+          StringPrintf("Could not allocate instance %d of %zu", i, old_instances.size()));
+      RecordFailure(ERR(OUT_OF_MEMORY), msg);
+      return false;
+    }
+    new_instances_arr->Set(i, new_instance);
+  }
+  cur_data->SetNewInstanceObjects(new_instances_arr.Get());
+  return true;
+}
+
+bool Redefiner::ClassRedefinition::FinishRemainingCommonAllocations(
     /*out*/RedefinitionDataIter* cur_data) {
   art::ScopedObjectAccessUnchecked soa(driver_->self_);
   art::StackHandleScope<2> hs(driver_->self_);
@@ -1293,23 +1849,311 @@
   return true;
 }
 
+bool Redefiner::ClassRedefinition::FinishNewClassAllocations(RedefinitionDataHolder &holder,
+                                                             RedefinitionDataIter *cur_data) {
+  if (cur_data->IsInitialized() || !cur_data->IsActuallyStructural()) {
+    cur_data->SetInitialized();
+    return true;
+  }
+
+  art::VariableSizedHandleScope hs(driver_->self_);
+  // If we weren't the lowest structural redef the superclass would have already initialized us.
+  CHECK(IsStructuralRedefinition());
+  CHECK(cur_data->IsInitialStructural()) << "Should have already been initialized by supertype";
+  auto setup_single_redefinition =
+      [this](RedefinitionDataIter* data, art::Handle<art::mirror::Class> super_class)
+          REQUIRES_SHARED(art::Locks::mutator_lock_) -> art::ObjPtr<art::mirror::Class> {
+    art::StackHandleScope<3> chs(driver_->self_);
+    art::Handle<art::mirror::Class> nc(
+        chs.NewHandle(AllocateNewClassObject(chs.NewHandle(data->GetMirrorClass()),
+                                             super_class,
+                                             chs.NewHandle(data->GetNewDexCache()),
+                                             /*dex_class_def_index*/ 0)));
+    if (nc.IsNull()) {
+      return nullptr;
+    }
+
+    data->SetNewClassObject(nc.Get());
+    // We really want to be able to resolve to the new class-object using this dex-cache for
+    // verification work. Since we haven't put it in the class-table yet we wll just manually add it
+    // to the dex-cache.
+    // TODO: We should maybe do this in a better spot.
+    data->GetNewDexCache()->SetResolvedType(nc->GetDexTypeIndex(), nc.Get());
+    data->SetInitialized();
+    return nc.Get();
+  };
+
+  std::vector<art::Handle<art::mirror::Class>> old_types;
+  {
+    art::gc::Heap* heap = driver_->runtime_->GetHeap();
+    art::Handle<art::mirror::Class>
+        old_klass(hs.NewHandle(cur_data->GetMirrorClass()));
+    if (setup_single_redefinition(cur_data, hs.NewHandle(old_klass->GetSuperClass())).IsNull()) {
+      return false;
+    }
+    auto is_subtype = [&](art::mirror::Object* obj) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      // We've already waited for class defines to be finished and paused them. All classes should be
+      // either resolved or error. We don't need to do anything with error classes, since they cannot
+      // be accessed in any observable way.
+      return obj->IsClass() && obj->AsClass()->IsResolved() &&
+            old_klass->IsAssignableFrom(obj->AsClass());
+    };
+    heap->VisitObjects([&](art::mirror::Object* obj) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      if (is_subtype(obj)) {
+        old_types.push_back(hs.NewHandle(obj->AsClass()));
+      }
+    });
+    DCHECK_GT(old_types.size(), 0u) << "Expected to find at least old_klass!";
+    VLOG(plugin) << "Found " << old_types.size() << " types that are/are subtypes of "
+                << old_klass->PrettyClass();
+  }
+
+  art::Handle<art::mirror::Class> cls_array_class(
+      hs.NewHandle(art::GetClassRoot<art::mirror::ObjectArray<art::mirror::Class>>(
+          driver_->runtime_->GetClassLinker())));
+  art::Handle<art::mirror::ObjectArray<art::mirror::Class>> old_classes_arr(
+      hs.NewHandle(art::mirror::ObjectArray<art::mirror::Class>::Alloc(
+          driver_->self_, cls_array_class.Get(), old_types.size())));
+  if (old_classes_arr.IsNull()) {
+    driver_->self_->AssertPendingOOMException();
+    driver_->self_->ClearException();
+    RecordFailure(ERR(OUT_OF_MEMORY), "Could not allocate old_classes arrays!");
+    return false;
+  }
+  // Sort the old_types topologically.
+  {
+    art::ScopedAssertNoThreadSuspension sants("Sort classes");
+    // Sort them by the distance to the base-class. This ensures that any class occurs before any of
+    // its subtypes.
+    std::sort(old_types.begin(),
+              old_types.end(),
+              [](auto& l, auto& r) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+                return CompareClasses(l.Get(), r.Get());
+              });
+  }
+  for (uint32_t i = 0; i < old_types.size(); ++i) {
+    DCHECK(!old_types[i].IsNull()) << i;
+    old_classes_arr->Set(i, old_types[i].Get());
+  }
+  cur_data->SetOldClasses(old_classes_arr.Get());
+  DCHECK_GT(old_classes_arr->GetLength(), 0);
+
+  art::Handle<art::mirror::ObjectArray<art::mirror::Class>> new_classes_arr(
+      hs.NewHandle(art::mirror::ObjectArray<art::mirror::Class>::Alloc(
+          driver_->self_, cls_array_class.Get(), old_types.size())));
+  if (new_classes_arr.IsNull()) {
+    driver_->self_->AssertPendingOOMException();
+    driver_->self_->ClearException();
+    RecordFailure(ERR(OUT_OF_MEMORY), "Could not allocate new_classes arrays!");
+    return false;
+  }
+
+  art::MutableHandle<art::mirror::DexCache> dch(hs.NewHandle<art::mirror::DexCache>(nullptr));
+  art::MutableHandle<art::mirror::Class> superclass(hs.NewHandle<art::mirror::Class>(nullptr));
+  for (size_t i = 0; i < old_types.size(); i++) {
+    art::Handle<art::mirror::Class>& old_type = old_types[i];
+    if (old_type.Get() == cur_data->GetMirrorClass()) {
+      CHECK_EQ(i, 0u) << "original class not at index 0. Bad sort!";
+      new_classes_arr->Set(i, cur_data->GetNewClassObject());
+      continue;
+    } else {
+      auto old_super = std::find_if(old_types.begin(),
+                                    old_types.begin() + i,
+                                    [&](art::Handle<art::mirror::Class>& v)
+                                        REQUIRES_SHARED(art::Locks::mutator_lock_) {
+                                          return v.Get() == old_type->GetSuperClass();
+                                        });
+      // Only the GetMirrorClass should not be in this list.
+      CHECK(old_super != old_types.begin() + i)
+          << "from first " << i << " could not find super of " << old_type->PrettyClass()
+          << " expected to find " << old_type->GetSuperClass()->PrettyClass();
+      superclass.Assign(new_classes_arr->Get(std::distance(old_types.begin(), old_super)));
+      auto new_redef = std::find_if(
+          *cur_data + 1, holder.end(), [&](auto it) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+            return it.GetMirrorClass() == old_type.Get();
+          });
+      art::ObjPtr<art::mirror::Class> new_type;
+      if (new_redef == holder.end()) {
+        // We aren't also redefining this subclass. Just allocate a new class and continue.
+        dch.Assign(old_type->GetDexCache());
+        new_type =
+            AllocateNewClassObject(old_type, superclass, dch, old_type->GetDexClassDefIndex());
+      } else {
+        // This subclass is also being redefined. We need to use its new dex-file to load the new
+        // class.
+        CHECK(new_redef.IsActuallyStructural());
+        CHECK(!new_redef.IsInitialStructural());
+        new_type = setup_single_redefinition(&new_redef, superclass);
+      }
+      if (new_type == nullptr) {
+        VLOG(plugin) << "Failed to load new version of class " << old_type->PrettyClass()
+                     << " for structural redefinition!";
+        return false;
+      }
+      new_classes_arr->Set(i, new_type);
+    }
+  }
+  cur_data->SetNewClasses(new_classes_arr.Get());
+  return true;
+}
+
+uint32_t Redefiner::ClassRedefinition::GetNewClassSize(art::ClassAccessor& accessor) {
+  uint32_t num_8bit_static_fields = 0;
+  uint32_t num_16bit_static_fields = 0;
+  uint32_t num_32bit_static_fields = 0;
+  uint32_t num_64bit_static_fields = 0;
+  uint32_t num_ref_static_fields = 0;
+  for (const art::ClassAccessor::Field& f : accessor.GetStaticFields()) {
+    std::string_view desc(accessor.GetDexFile().GetFieldTypeDescriptor(
+        accessor.GetDexFile().GetFieldId(f.GetIndex())));
+    if (desc[0] == 'L' || desc[0] == '[') {
+      num_ref_static_fields++;
+    } else if (desc == "Z" || desc == "B") {
+      num_8bit_static_fields++;
+    } else if (desc == "C" || desc == "S") {
+      num_16bit_static_fields++;
+    } else if (desc == "I" || desc == "F") {
+      num_32bit_static_fields++;
+    } else if (desc == "J" || desc == "D") {
+      num_64bit_static_fields++;
+    } else {
+      LOG(FATAL) << "Unknown type descriptor! " << desc;
+    }
+  }
+
+  return art::mirror::Class::ComputeClassSize(/*has_embedded_vtable=*/ false,
+                                              /*num_vtable_entries=*/ 0,
+                                              num_8bit_static_fields,
+                                              num_16bit_static_fields,
+                                              num_32bit_static_fields,
+                                              num_64bit_static_fields,
+                                              num_ref_static_fields,
+                                              art::kRuntimePointerSize);
+}
+
+art::ObjPtr<art::mirror::Class>
+Redefiner::ClassRedefinition::AllocateNewClassObject(art::Handle<art::mirror::DexCache> cache) {
+  art::StackHandleScope<2> hs(driver_->self_);
+  art::Handle<art::mirror::Class> old_class(hs.NewHandle(GetMirrorClass()));
+  art::Handle<art::mirror::Class> super_class(hs.NewHandle(old_class->GetSuperClass()));
+  return AllocateNewClassObject(old_class, super_class, cache, /*dex_class_def_index*/0);
+}
+
+art::ObjPtr<art::mirror::Class> Redefiner::ClassRedefinition::AllocateNewClassObject(
+    art::Handle<art::mirror::Class> old_class,
+    art::Handle<art::mirror::Class> super_class,
+    art::Handle<art::mirror::DexCache> cache,
+    uint16_t dex_class_def_index) {
+  // This is a stripped down DefineClass. We don't want to use DefineClass directly because it needs
+  // to perform a lot of extra steps to tell the ClassTable and the jit and everything about a new
+  // class. For now we will need to rely on our tests catching any issues caused by changes in how
+  // class_linker sets up classes.
+  // TODO Unify/move this into ClassLinker maybe.
+  art::StackHandleScope<3> hs(driver_->self_);
+  art::ClassLinker* linker = driver_->runtime_->GetClassLinker();
+  const art::DexFile* dex_file = cache->GetDexFile();
+  art::ClassAccessor accessor(*dex_file, dex_class_def_index);
+  art::Handle<art::mirror::Class> new_class(hs.NewHandle(linker->AllocClass(
+      driver_->self_, GetNewClassSize(accessor))));
+  if (new_class.IsNull()) {
+    driver_->self_->AssertPendingOOMException();
+    RecordFailure(
+        ERR(OUT_OF_MEMORY),
+        "Unable to allocate class object for redefinition of " + old_class->PrettyClass());
+    driver_->self_->ClearException();
+    return nullptr;
+  }
+  new_class->SetDexCache(cache.Get());
+  linker->SetupClass(*dex_file,
+                     dex_file->GetClassDef(dex_class_def_index),
+                     new_class,
+                     old_class->GetClassLoader());
+
+  // Make sure we are ready for linking. The lock isn't really needed since this isn't visible to
+  // other threads but the linker expects it.
+  art::ObjectLock<art::mirror::Class> lock(driver_->self_, new_class);
+  new_class->SetClinitThreadId(driver_->self_->GetTid());
+  // Make sure we have a valid empty iftable even if there are errors.
+  new_class->SetIfTable(art::GetClassRoot<art::mirror::Object>(linker)->GetIfTable());
+  linker->LoadClass(
+      driver_->self_, *dex_file, dex_file->GetClassDef(dex_class_def_index), new_class);
+  // NB. We know the interfaces and supers didn't change! :)
+  art::MutableHandle<art::mirror::Class> linked_class(hs.NewHandle<art::mirror::Class>(nullptr));
+  art::Handle<art::mirror::ObjectArray<art::mirror::Class>> proxy_ifaces(
+      hs.NewHandle<art::mirror::ObjectArray<art::mirror::Class>>(nullptr));
+  // No changing hierarchy so everything is loaded.
+  new_class->SetSuperClass(super_class.Get());
+  art::mirror::Class::SetStatus(new_class, art::ClassStatus::kLoaded, nullptr);
+  if (!linker->LinkClass(driver_->self_, nullptr, new_class, proxy_ifaces, &linked_class)) {
+    std::ostringstream oss;
+    oss << "failed to link class due to "
+        << (driver_->self_->IsExceptionPending() ? driver_->self_->GetException()->Dump()
+                                                 : " unknown");
+    RecordFailure(ERR(INTERNAL), oss.str());
+    driver_->self_->ClearException();
+    return nullptr;
+  }
+  // Everything is already resolved.
+  art::ObjectLock<art::mirror::Class> objlock(driver_->self_, linked_class);
+  // Mark the class as initialized.
+  CHECK(old_class->IsResolved())
+      << "Attempting to redefine an unresolved class " << old_class->PrettyClass()
+      << " status=" << old_class->GetStatus();
+  CHECK(linked_class->IsResolved());
+  if (old_class->WasVerificationAttempted()) {
+    // Match verification-attempted flag
+    linked_class->SetVerificationAttempted();
+  }
+  if (old_class->ShouldSkipHiddenApiChecks()) {
+    // Match skip hiddenapi flag
+    linked_class->SetSkipHiddenApiChecks();
+  }
+  if (old_class->IsInitialized()) {
+    // We already verified the class earlier. No need to do it again.
+    linker->ForceClassInitialized(driver_->self_, linked_class);
+  } else if (old_class->GetStatus() > linked_class->GetStatus()) {
+    // We want to match the old status.
+    art::mirror::Class::SetStatus(linked_class, old_class->GetStatus(), driver_->self_);
+  }
+  // Make sure we have ext-data space for method & field ids. We won't know if we need them until
+  // it's too late to create them.
+  // TODO We might want to remove these arrays if they're not needed.
+  if (!art::mirror::Class::EnsureInstanceFieldIds(linked_class) ||
+      !art::mirror::Class::EnsureStaticFieldIds(linked_class) ||
+      !art::mirror::Class::EnsureMethodIds(linked_class)) {
+    driver_->self_->AssertPendingOOMException();
+    driver_->self_->ClearException();
+    RecordFailure(
+        ERR(OUT_OF_MEMORY),
+        "Unable to allocate jni-id arrays for redefinition of " + old_class->PrettyClass());
+    return nullptr;
+  }
+  // Finish setting up methods.
+  linked_class->VisitMethods([&](art::ArtMethod* m) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    linker->SetEntryPointsToInterpreter(m);
+    m->SetNotIntrinsic();
+    DCHECK(m->IsCopied() || m->GetDeclaringClass() == linked_class.Get())
+        << m->PrettyMethod()
+        << " m->GetDeclaringClass(): " << m->GetDeclaringClass()->PrettyClass()
+        << " != linked_class.Get(): " << linked_class->PrettyClass();
+  }, art::kRuntimePointerSize);
+  if (art::kIsDebugBuild) {
+    linked_class->VisitFields([&](art::ArtField* f) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      DCHECK_EQ(f->GetDeclaringClass(), linked_class.Get());
+    });
+  }
+  // Reset ClinitThreadId back to the thread that loaded the old class. This is needed if we are in
+  // the middle of initializing a class.
+  linked_class->SetClinitThreadId(old_class->GetClinitThreadId());
+  return linked_class.Get();
+}
+
 void Redefiner::ClassRedefinition::UnregisterJvmtiBreakpoints() {
   BreakpointUtil::RemoveBreakpointsInClass(driver_->env_, GetMirrorClass().Ptr());
 }
 
-void Redefiner::ClassRedefinition::UnregisterBreakpoints() {
-  if (LIKELY(!art::Dbg::IsDebuggerActive())) {
-    return;
-  }
-  art::JDWP::JdwpState* state = art::Dbg::GetJdwpState();
-  if (state != nullptr) {
-    state->UnregisterLocationEventsOnClass(GetMirrorClass());
-  }
-}
-
 void Redefiner::UnregisterAllBreakpoints() {
   for (Redefiner::ClassRedefinition& redef : redefinitions_) {
-    redef.UnregisterBreakpoints();
     redef.UnregisterJvmtiBreakpoints();
   }
 }
@@ -1329,6 +2173,25 @@
   }
 }
 
+void Redefiner::MarkStructuralChanges(RedefinitionDataHolder& holder) {
+  for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
+    if (data.IsActuallyStructural()) {
+      // A superclass was structural and it marked all subclasses already. No need to do anything.
+      CHECK(!data.IsInitialStructural());
+    } else if (data.GetRedefinition().IsStructuralRedefinition()) {
+      data.SetActuallyStructural();
+      data.SetInitialStructural();
+      // Go over all potential subtypes and mark any that are actually subclasses as structural.
+      for (RedefinitionDataIter sub_data = data + 1; sub_data != holder.end(); ++sub_data) {
+        if (sub_data.GetRedefinition().GetMirrorClass()->IsSubClass(
+                data.GetRedefinition().GetMirrorClass())) {
+          sub_data.SetActuallyStructural();
+        }
+      }
+    }
+  }
+}
+
 bool Redefiner::EnsureAllClassAllocationsFinished(RedefinitionDataHolder& holder) {
   for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
     if (!data.GetRedefinition().EnsureClassAllocationsFinished(&data)) {
@@ -1338,10 +2201,30 @@
   return true;
 }
 
-bool Redefiner::FinishAllRemainingAllocations(RedefinitionDataHolder& holder) {
+bool Redefiner::CollectAndCreateNewInstances(RedefinitionDataHolder& holder) {
   for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
     // Allocate the data this redefinition requires.
-    if (!data.GetRedefinition().FinishRemainingAllocations(&data)) {
+    if (!data.GetRedefinition().CollectAndCreateNewInstances(&data)) {
+      return false;
+    }
+  }
+  return true;
+}
+
+bool Redefiner::FinishAllNewClassAllocations(RedefinitionDataHolder& holder) {
+  for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
+    // Allocate the data this redefinition requires.
+    if (!data.GetRedefinition().FinishNewClassAllocations(holder, &data)) {
+      return false;
+    }
+  }
+  return true;
+}
+
+bool Redefiner::FinishAllRemainingCommonAllocations(RedefinitionDataHolder& holder) {
+  for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
+    // Allocate the data this redefinition requires.
+    if (!data.GetRedefinition().FinishRemainingCommonAllocations(&data)) {
       return false;
     }
   }
@@ -1386,10 +2269,191 @@
   art::Thread* self_;
 };
 
+class ClassDefinitionPauser : public art::ClassLoadCallback {
+ public:
+  explicit ClassDefinitionPauser(art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_)
+      : self_(self),
+        is_running_(false),
+        barrier_(0),
+        release_mu_("SuspendClassDefinition lock", art::kGenericBottomLock),
+        release_barrier_(0),
+        release_cond_("SuspendClassDefinition condvar", release_mu_),
+        count_(0),
+        release_(false) {
+    art::Locks::mutator_lock_->AssertSharedHeld(self_);
+  }
+  ~ClassDefinitionPauser() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    art::Locks::mutator_lock_->AssertSharedHeld(self_);
+    CHECK(release_) << "Must call Release()";
+  }
+  void Release() REQUIRES(art::Locks::mutator_lock_) {
+    if (is_running_) {
+      art::Locks::mutator_lock_->AssertExclusiveHeld(self_);
+      uint32_t count;
+      // Wake up everything.
+      {
+        art::MutexLock mu(self_, release_mu_);
+        release_ = true;
+        // We have an exclusive mutator so all threads must be suspended and therefore they've
+        // either already incremented this count_ or they are stuck somewhere before it.
+        count = count_;
+        release_cond_.Broadcast(self_);
+      }
+      // Wait for all threads to leave this structs code.
+      VLOG(plugin) << "Resuming " << count << " threads paused before class-allocation!";
+      release_barrier_.Increment</*locks=*/art::Barrier::kAllowHoldingLocks>(self_, count);
+    } else {
+      release_ = true;
+    }
+  }
+  void BeginDefineClass() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    art::Thread* this_thread = art::Thread::Current();
+    if (this_thread == self_) {
+      // Allow the redefining thread to do whatever.
+      return;
+    }
+    if (this_thread->GetDefineClassCount() != 0) {
+      // We are in the middle of a recursive define-class. Don't suspend now allow it to finish.
+      VLOG(plugin) << "Recursive DefineClass in " << *this_thread
+                   << " allowed to proceed despite class-def pause initiated by " << *self_;
+      return;
+    }
+    // If we are suspended (no mutator-lock) then the pausing thread could do everything before the
+    // count_++ including destroying this object, causing UAF/deadlock.
+    art::Locks::mutator_lock_->AssertSharedHeld(this_thread);
+    ++count_;
+    art::ScopedThreadSuspension sts(this_thread, art::ThreadState::kSuspended);
+    {
+      art::MutexLock mu(this_thread, release_mu_);
+      VLOG(plugin) << "Suspending " << *this_thread << " due to class definition. class-def pause "
+                   << "initiated by " << *self_;
+      while (!release_) {
+        release_cond_.Wait(this_thread);
+      }
+    }
+    release_barrier_.Pass(this_thread);
+  }
+
+  void EndDefineClass() override REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    art::Thread* this_thread = art::Thread::Current();
+    if (this_thread == self_) {
+      // Allow the redefining thread to do whatever.
+      return;
+    }
+    if (this_thread->GetDefineClassCount() == 0) {
+      // We are done with defining classes.
+      barrier_.Pass(this_thread);
+    }
+  }
+
+  void ClassLoad(art::Handle<art::mirror::Class> klass ATTRIBUTE_UNUSED) override {}
+  void ClassPrepare(art::Handle<art::mirror::Class> klass1 ATTRIBUTE_UNUSED,
+                    art::Handle<art::mirror::Class> klass2 ATTRIBUTE_UNUSED) override {}
+
+  void SetRunning() {
+    is_running_ = true;
+  }
+  void WaitFor(uint32_t t) REQUIRES(!art::Locks::mutator_lock_) {
+    barrier_.Increment(self_, t);
+  }
+
+ private:
+  art::Thread* self_;
+  bool is_running_;
+  art::Barrier barrier_;
+  art::Mutex release_mu_;
+  art::Barrier release_barrier_;
+  art::ConditionVariable release_cond_;
+  std::atomic<uint32_t> count_;
+  bool release_;
+};
+
+class ScopedSuspendClassLoading {
+ public:
+  ScopedSuspendClassLoading(art::Thread* self, art::Runtime* runtime, RedefinitionDataHolder& h)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      : self_(self), runtime_(runtime), pauser_() {
+    if (std::any_of(h.begin(), h.end(), [](auto r) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+          return r.GetRedefinition().IsStructuralRedefinition();
+        })) {
+      VLOG(plugin) << "Pausing Class loading for structural redefinition.";
+      pauser_.emplace(self);
+      {
+        art::ScopedThreadSuspension sts(self_, art::ThreadState::kNative);
+        uint32_t in_progress_defines = 0;
+        {
+          art::ScopedSuspendAll ssa(__FUNCTION__);
+          pauser_->SetRunning();
+          runtime_->GetRuntimeCallbacks()->AddClassLoadCallback(&pauser_.value());
+          art::MutexLock mu(self_, *art::Locks::thread_list_lock_);
+          runtime_->GetThreadList()->ForEach([&](art::Thread* t) {
+            if (t != self_ && t->GetDefineClassCount() != 0) {
+              in_progress_defines++;
+            }
+          });
+          VLOG(plugin) << "Waiting for " << in_progress_defines
+                       << " in progress class-loads to finish";
+        }
+        pauser_->WaitFor(in_progress_defines);
+      }
+    }
+  }
+  ~ScopedSuspendClassLoading() {
+    if (pauser_.has_value()) {
+      art::ScopedThreadSuspension sts(self_, art::ThreadState::kNative);
+      art::ScopedSuspendAll ssa(__FUNCTION__);
+      pauser_->Release();
+      runtime_->GetRuntimeCallbacks()->RemoveClassLoadCallback(&pauser_.value());
+    }
+  }
+
+ private:
+  art::Thread* self_;
+  art::Runtime* runtime_;
+  std::optional<ClassDefinitionPauser> pauser_;
+};
+
+class ScopedSuspendAllocations {
+ public:
+  ScopedSuspendAllocations(art::Runtime* runtime, RedefinitionDataHolder& h)
+      REQUIRES_SHARED(art::Locks::mutator_lock_)
+      : paused_(false) {
+    if (std::any_of(h.begin(),
+                    h.end(),
+                    [](auto r) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+                      return r.GetRedefinition().IsStructuralRedefinition();
+                    })) {
+      VLOG(plugin) << "Pausing allocations for structural redefinition.";
+      paused_ = true;
+      AllocationManager::Get()->PauseAllocations(art::Thread::Current());
+      // Collect garbage so we don't need to recreate as much.
+      runtime->GetHeap()->CollectGarbage(/*clear_soft_references=*/false);
+    }
+  }
+
+  ~ScopedSuspendAllocations() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    if (paused_) {
+      AllocationManager::Get()->ResumeAllocations(art::Thread::Current());
+    }
+  }
+
+ private:
+  bool paused_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedSuspendAllocations);
+};
+
 jvmtiError Redefiner::Run() {
   art::StackHandleScope<1> hs(self_);
-  // Allocate an array to hold onto all java temporary objects associated with this redefinition.
-  // We will let this be collected after the end of this function.
+  // Sort the redefinitions_ array topologically by class. This makes later steps easier since we
+  // know that every class precedes all of its supertypes.
+  std::sort(redefinitions_.begin(),
+            redefinitions_.end(),
+            [&](auto& l, auto& r) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+              return CompareClasses(l.GetMirrorClass(), r.GetMirrorClass());
+            });
+  // Allocate an array to hold onto all java temporary objects associated with this
+  // redefinition. We will let this be collected after the end of this function.
   RedefinitionDataHolder holder(&hs, runtime_, self_, &redefinitions_);
   if (holder.IsNull()) {
     self_->AssertPendingOOMException();
@@ -1399,58 +2463,96 @@
   }
 
   // First we just allocate the ClassExt and its fields that we need. These can be updated
-  // atomically without any issues (since we allocate the map arrays as empty) so we don't bother
-  // doing a try loop. The other allocations we need to ensure that nothing has changed in the time
-  // between allocating them and pausing all threads before we can update them so we need to do a
-  // try loop.
-  if (!CheckAllRedefinitionAreValid() ||
-      !EnsureAllClassAllocationsFinished(holder) ||
-      !FinishAllRemainingAllocations(holder) ||
+  // atomically without any issues (since we allocate the map arrays as empty).
+  if (!CheckAllRedefinitionAreValid()) {
+    return result_;
+  }
+  // Mark structural changes.
+  MarkStructuralChanges(holder);
+  // Now we pause class loading. If we are doing a structural redefinition we will need to get an
+  // accurate picture of the classes loaded and having loads in the middle would make that
+  // impossible. This only pauses class-loading if we actually have at least one structural
+  // redefinition.
+  ScopedSuspendClassLoading suspend_class_load(self_, runtime_, holder);
+  if (!EnsureAllClassAllocationsFinished(holder) ||
+      !FinishAllRemainingCommonAllocations(holder) ||
+      !FinishAllNewClassAllocations(holder) ||
       !CheckAllClassesAreVerified(holder)) {
     return result_;
   }
 
+  ScopedSuspendAllocations suspend_alloc(runtime_, holder);
+  if (!CollectAndCreateNewInstances(holder)) {
+    return result_;
+  }
+
   // At this point we can no longer fail without corrupting the runtime state.
   for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
     art::ClassLinker* cl = runtime_->GetClassLinker();
     cl->RegisterExistingDexCache(data.GetNewDexCache(), data.GetSourceClassLoader());
     if (data.GetSourceClassLoader() == nullptr) {
-      cl->AppendToBootClassPath(self_, data.GetRedefinition().GetDexFile());
+      cl->AppendToBootClassPath(self_, &data.GetRedefinition().GetDexFile());
     }
   }
   UnregisterAllBreakpoints();
 
-  // Disable GC and wait for it to be done if we are a moving GC.  This is fine since we are done
-  // allocating so no deadlocks.
-  ScopedDisableConcurrentAndMovingGc sdcamgc(runtime_->GetHeap(), self_);
+  {
+    // Disable GC and wait for it to be done if we are a moving GC.  This is fine since we are done
+    // allocating so no deadlocks.
+    ScopedDisableConcurrentAndMovingGc sdcamgc(runtime_->GetHeap(), self_);
 
-  // Do transition to final suspension
-  // TODO We might want to give this its own suspended state!
-  // TODO This isn't right. We need to change state without any chance of suspend ideally!
-  art::ScopedThreadSuspension sts(self_, art::ThreadState::kNative);
-  art::ScopedSuspendAll ssa("Final installation of redefined Classes!", /*long_suspend=*/true);
-  for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
-    art::ScopedAssertNoThreadSuspension nts("Updating runtime objects for redefinition");
-    ClassRedefinition& redef = data.GetRedefinition();
-    if (data.GetSourceClassLoader() != nullptr) {
-      ClassLoaderHelper::UpdateJavaDexFile(data.GetJavaDexFile(), data.GetNewDexFileCookie());
+    // Do transition to final suspension
+    // TODO We might want to give this its own suspended state!
+    // TODO This isn't right. We need to change state without any chance of suspend ideally!
+    art::ScopedThreadSuspension sts(self_, art::ThreadState::kNative);
+    art::ScopedSuspendAll ssa("Final installation of redefined Classes!", /*long_suspend=*/true);
+    for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
+      art::ScopedAssertNoThreadSuspension nts("Updating runtime objects for redefinition");
+      ClassRedefinition& redef = data.GetRedefinition();
+      if (data.GetSourceClassLoader() != nullptr) {
+        ClassLoaderHelper::UpdateJavaDexFile(data.GetJavaDexFile(), data.GetNewDexFileCookie());
+      }
+      redef.UpdateClass(data);
     }
-    art::ObjPtr<art::mirror::Class> klass = data.GetMirrorClass();
-    // TODO Rewrite so we don't do a stack walk for each and every class.
-    redef.FindAndAllocateObsoleteMethods(klass);
-    redef.UpdateClass(klass, data.GetNewDexCache(), data.GetOriginalDexFile());
+    RestoreObsoleteMethodMapsIfUnneeded(holder);
+    // TODO We should check for if any of the redefined methods are intrinsic methods here and, if
+    // any are, force a full-world deoptimization before finishing redefinition. If we don't do this
+    // then methods that have been jitted prior to the current redefinition being applied might
+    // continue to use the old versions of the intrinsics!
+    // TODO Do the dex_file release at a more reasonable place. This works but it muddles who really
+    // owns the DexFile and when ownership is transferred.
+    ReleaseAllDexFiles();
   }
-  RestoreObsoleteMethodMapsIfUnneeded(holder);
-  // TODO We should check for if any of the redefined methods are intrinsic methods here and, if any
-  // are, force a full-world deoptimization before finishing redefinition. If we don't do this then
-  // methods that have been jitted prior to the current redefinition being applied might continue
-  // to use the old versions of the intrinsics!
-  // TODO Do the dex_file release at a more reasonable place. This works but it muddles who really
-  // owns the DexFile and when ownership is transferred.
-  ReleaseAllDexFiles();
+  // By now the class-linker knows about all the classes so we can safetly retry verification and
+  // update method flags.
+  ReverifyClasses(holder);
   return OK;
 }
 
+void Redefiner::ReverifyClasses(RedefinitionDataHolder& holder) {
+  for (RedefinitionDataIter data = holder.begin(); data != holder.end(); ++data) {
+    data.GetRedefinition().ReverifyClass(data);
+  }
+}
+
+void Redefiner::ClassRedefinition::ReverifyClass(const RedefinitionDataIter &cur_data) {
+  if (!needs_reverify_) {
+    return;
+  }
+  VLOG(plugin) << "Reverifying " << class_sig_ << " due to soft failures";
+  std::string error;
+  // TODO Make verification log level lower
+  art::verifier::FailureKind failure =
+      art::verifier::ClassVerifier::ReverifyClass(driver_->self_,
+                                                  cur_data.GetMirrorClass(),
+                                                  /*log_level=*/
+                                                  art::verifier::HardFailLogMode::kLogWarning,
+                                                  /*api_level=*/
+                                                  art::Runtime::Current()->GetTargetSdkVersion(),
+                                                  &error);
+  CHECK_NE(failure, art::verifier::FailureKind::kHardFailure);
+}
+
 void Redefiner::ClassRedefinition::UpdateMethods(art::ObjPtr<art::mirror::Class> mclass,
                                                  const art::dex::ClassDef& class_def) {
   art::ClassLinker* linker = driver_->runtime_->GetClassLinker();
@@ -1500,17 +2602,381 @@
       const art::dex::FieldId* new_field_id =
           dex_file_->FindFieldId(*new_declaring_id, *new_name_id, *new_type_id);
       CHECK(new_field_id != nullptr);
+      uint32_t new_field_index = dex_file_->GetIndexForFieldId(*new_field_id);
       // We only need to update the index since the other data in the ArtField cannot be updated.
-      field.SetDexFieldIndex(dex_file_->GetIndexForFieldId(*new_field_id));
+      field.SetDexFieldIndex(new_field_index);
     }
   }
 }
 
-// Performs updates to class that will allow us to verify it.
-void Redefiner::ClassRedefinition::UpdateClass(
-    art::ObjPtr<art::mirror::Class> mclass,
-    art::ObjPtr<art::mirror::DexCache> new_dex_cache,
-    art::ObjPtr<art::mirror::Object> original_dex_file) {
+void Redefiner::ClassRedefinition::CollectNewFieldAndMethodMappings(
+    const RedefinitionDataIter& data,
+    std::map<art::ArtMethod*, art::ArtMethod*>* method_map,
+    std::map<art::ArtField*, art::ArtField*>* field_map) {
+  for (auto [new_cls, old_cls] :
+       art::ZipLeft(data.GetNewClasses()->Iterate(), data.GetOldClasses()->Iterate())) {
+    for (art::ArtField& f : old_cls->GetSFields()) {
+      (*field_map)[&f] = new_cls->FindDeclaredStaticField(f.GetName(), f.GetTypeDescriptor());
+    }
+    for (art::ArtField& f : old_cls->GetIFields()) {
+      (*field_map)[&f] = new_cls->FindDeclaredInstanceField(f.GetName(), f.GetTypeDescriptor());
+    }
+    auto new_methods = new_cls->GetMethods(art::kRuntimePointerSize);
+    for (art::ArtMethod& m : old_cls->GetMethods(art::kRuntimePointerSize)) {
+      // No support for finding methods in this way since it's generally not needed. Just do it the
+      // easy way.
+      auto nm_iter = std::find_if(
+          new_methods.begin(),
+          new_methods.end(),
+          [&](art::ArtMethod& cand) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+            return cand.GetNameView() == m.GetNameView() && cand.GetSignature() == m.GetSignature();
+          });
+      CHECK(nm_iter != new_methods.end())
+          << "Could not find redefined version of " << m.PrettyMethod();
+      (*method_map)[&m] = &(*nm_iter);
+    }
+  }
+}
+
+static void CopyField(art::ObjPtr<art::mirror::Object> target,
+                      art::ArtField* new_field,
+                      art::ObjPtr<art::mirror::Object> source,
+                      art::ArtField& old_field) REQUIRES(art::Locks::mutator_lock_) {
+  art::Primitive::Type ftype = old_field.GetTypeAsPrimitiveType();
+  CHECK_EQ(ftype, new_field->GetTypeAsPrimitiveType())
+      << old_field.PrettyField() << " vs " << new_field->PrettyField();
+  if (ftype == art::Primitive::kPrimNot) {
+    new_field->SetObject<false>(target, old_field.GetObject(source));
+  } else {
+    switch (ftype) {
+#define UPDATE_FIELD(TYPE)                                            \
+  case art::Primitive::kPrim##TYPE:                                   \
+    new_field->Set##TYPE<false>(target, old_field.Get##TYPE(source)); \
+    break
+      UPDATE_FIELD(Int);
+      UPDATE_FIELD(Float);
+      UPDATE_FIELD(Long);
+      UPDATE_FIELD(Double);
+      UPDATE_FIELD(Short);
+      UPDATE_FIELD(Char);
+      UPDATE_FIELD(Byte);
+      UPDATE_FIELD(Boolean);
+      case art::Primitive::kPrimNot:
+      case art::Primitive::kPrimVoid:
+        LOG(FATAL) << "Unexpected field with type " << ftype << " found!";
+        UNREACHABLE();
+#undef UPDATE_FIELD
+    }
+  }
+}
+
+static void CopyFields(bool is_static,
+                       art::ObjPtr<art::mirror::Object> target,
+                       art::ObjPtr<art::mirror::Class> target_class,
+                       art::ObjPtr<art::mirror::Object> source,
+                       art::ObjPtr<art::mirror::Class> source_class)
+    REQUIRES(art::Locks::mutator_lock_) {
+  DCHECK(!source_class->IsObjectClass() && !target_class->IsObjectClass())
+      << "Should not be overriding object class fields. Target: " << target_class->PrettyClass()
+      << " Source: " << source_class->PrettyClass();
+  for (art::ArtField& f : (is_static ? source_class->GetSFields() : source_class->GetIFields())) {
+    art::ArtField* new_field =
+        (is_static ? target_class->FindDeclaredStaticField(f.GetName(), f.GetTypeDescriptor())
+                   : target_class->FindDeclaredInstanceField(f.GetName(), f.GetTypeDescriptor()));
+    CHECK(new_field != nullptr) << "could not find new version of " << f.PrettyField();
+    CopyField(target, new_field, source, f);
+  }
+  if (!is_static && !target_class->GetSuperClass()->IsObjectClass()) {
+    CopyFields(
+        is_static, target, target_class->GetSuperClass(), source, source_class->GetSuperClass());
+  }
+}
+
+static void ClearField(art::ObjPtr<art::mirror::Object> target, art::ArtField& field)
+    REQUIRES(art::Locks::mutator_lock_) {
+  art::Primitive::Type ftype = field.GetTypeAsPrimitiveType();
+  if (ftype == art::Primitive::kPrimNot) {
+    field.SetObject<false>(target, nullptr);
+  } else {
+    switch (ftype) {
+#define UPDATE_FIELD(TYPE)             \
+  case art::Primitive::kPrim##TYPE:    \
+    field.Set##TYPE<false>(target, 0); \
+    break
+      UPDATE_FIELD(Int);
+      UPDATE_FIELD(Float);
+      UPDATE_FIELD(Long);
+      UPDATE_FIELD(Double);
+      UPDATE_FIELD(Short);
+      UPDATE_FIELD(Char);
+      UPDATE_FIELD(Byte);
+      UPDATE_FIELD(Boolean);
+      case art::Primitive::kPrimNot:
+      case art::Primitive::kPrimVoid:
+        LOG(FATAL) << "Unexpected field with type " << ftype << " found!";
+        UNREACHABLE();
+#undef UPDATE_FIELD
+    }
+  }
+}
+
+static void ClearFields(bool is_static,
+                        art::ObjPtr<art::mirror::Object> target,
+                        art::ObjPtr<art::mirror::Class> target_class)
+    REQUIRES(art::Locks::mutator_lock_) {
+  DCHECK(!target_class->IsObjectClass());
+  for (art::ArtField& f : (is_static ? target_class->GetSFields() : target_class->GetIFields())) {
+    ClearField(target, f);
+  }
+  if (!is_static && !target_class->GetSuperClass()->IsObjectClass()) {
+    ClearFields(is_static, target, target_class->GetSuperClass());
+  }
+}
+
+static void CopyAndClearFields(bool is_static,
+                               art::ObjPtr<art::mirror::Object> target,
+                               art::ObjPtr<art::mirror::Class> target_class,
+                               art::ObjPtr<art::mirror::Object> source,
+                               art::ObjPtr<art::mirror::Class> source_class)
+    REQUIRES(art::Locks::mutator_lock_) {
+  // Copy all non-j.l.Object fields
+  CopyFields(is_static, target, target_class, source, source_class);
+  // Copy the lock-word.
+  target->SetLockWord(source->GetLockWord(false), false);
+  // Clear (reset) the old one.
+  source->SetLockWord(art::LockWord::Default(), false);
+  art::WriteBarrier::ForEveryFieldWrite(target);
+
+  // Clear the fields from the old class. We don't need it anymore.
+  ClearFields(is_static, source, source_class);
+  art::WriteBarrier::ForEveryFieldWrite(source);
+}
+
+void Redefiner::ClassRedefinition::UpdateClassStructurally(const RedefinitionDataIter& holder) {
+  DCHECK(holder.IsActuallyStructural());
+  DCHECK(holder.IsInitialStructural());
+  // LETS GO. We've got all new class structures so no need to do all the updating of the stacks.
+  // Instead we need to update everything else.
+  // Just replace the class and be done with it.
+  art::Locks::mutator_lock_->AssertExclusiveHeld(driver_->self_);
+  art::ClassLinker* cl = driver_->runtime_->GetClassLinker();
+  art::ScopedAssertNoThreadSuspension sants(__FUNCTION__);
+  art::ObjPtr<art::mirror::Class> orig(holder.GetMirrorClass());
+  art::ObjPtr<art::mirror::Class> replacement(holder.GetNewClassObject());
+  art::ObjPtr<art::mirror::ObjectArray<art::mirror::Class>> new_classes(holder.GetNewClasses());
+  art::ObjPtr<art::mirror::ObjectArray<art::mirror::Class>> old_classes(holder.GetOldClasses());
+  // Collect mappings from old to new fields/methods
+  std::map<art::ArtMethod*, art::ArtMethod*> method_map;
+  std::map<art::ArtField*, art::ArtField*> field_map;
+  CollectNewFieldAndMethodMappings(holder, &method_map, &field_map);
+  art::ObjPtr<art::mirror::ObjectArray<art::mirror::Object>> new_instances(
+      holder.GetNewInstanceObjects());
+  art::ObjPtr<art::mirror::ObjectArray<art::mirror::Object>> old_instances(
+      holder.GetOldInstanceObjects());
+  CHECK(!orig.IsNull());
+  CHECK(!replacement.IsNull());
+  // Once we do the ReplaceReferences old_classes will have the new_classes in it. We want to keep
+  // ahold of the old classes so copy them now.
+  std::vector<art::ObjPtr<art::mirror::Class>> old_classes_vec(old_classes->Iterate().begin(),
+                                                               old_classes->Iterate().end());
+  // Copy over the static fields of the class and all the instance fields.
+  for (auto [new_class, old_class] : art::ZipLeft(new_classes->Iterate(), old_classes->Iterate())) {
+    CHECK(!new_class.IsNull());
+    CHECK(!old_class.IsNull());
+    CHECK(!old_class->IsErroneous());
+    if (old_class->GetStatus() > new_class->GetStatus()) {
+      // Some verification/initialization step happened during interval between
+      // creating the new class and now. Just copy the new status.
+      new_class->SetStatusLocked(old_class->GetStatus());
+    }
+    CopyAndClearFields(true, new_class, new_class, old_class, old_class);
+  }
+
+  // Copy and clear the fields of the old-instances.
+  for (auto [new_instance, old_instance] :
+       art::ZipLeft(new_instances->Iterate(), old_instances->Iterate())) {
+    CopyAndClearFields(/*is_static=*/false,
+                       new_instance,
+                       new_instance->GetClass(),
+                       old_instance,
+                       old_instance->GetClass());
+  }
+  // Mark old class and methods obsolete. Copy over any native implementation as well.
+  for (auto [old_class, new_class] : art::ZipLeft(old_classes->Iterate(), new_classes->Iterate())) {
+    old_class->SetObsoleteObject();
+    // Mark methods obsolete and copy native implementation. We need to wait
+    // until later to actually clear the jit data. We copy the native
+    // implementation here since we don't want to race with any threads doing
+    // RegisterNatives.
+    for (art::ArtMethod& m : old_class->GetMethods(art::kRuntimePointerSize)) {
+      if (m.IsNative()) {
+        art::ArtMethod* new_method =
+            new_class->FindClassMethod(m.GetNameView(), m.GetSignature(), art::kRuntimePointerSize);
+        DCHECK(new_class->GetMethodsSlice(art::kRuntimePointerSize).Contains(new_method))
+            << "Could not find method " << m.PrettyMethod() << " declared in new class!";
+        DCHECK(new_method->IsNative());
+        new_method->SetEntryPointFromJni(m.GetEntryPointFromJni());
+      }
+      m.SetIsObsolete();
+      cl->SetEntryPointsForObsoleteMethod(&m);
+      if (m.IsInvokable()) {
+        m.SetDontCompile();
+      }
+    }
+  }
+  // Update live pointers in ART code.
+  auto could_change_resolution_of = [&](auto* field_or_method,
+                                        const auto& info) REQUIRES(art::Locks::mutator_lock_) {
+    constexpr bool is_method = std::is_same_v<art::ArtMethod*, decltype(field_or_method)>;
+    static_assert(is_method || std::is_same_v<art::ArtField*, decltype(field_or_method)>,
+                  "Input is not field or method!");
+    // Only dex-cache is used for resolution
+    if (LIKELY(info.GetType() != art::ReflectionSourceType::kSourceDexCacheResolvedField &&
+               info.GetType() != art::ReflectionSourceType::kSourceDexCacheResolvedMethod)) {
+      return false;
+    }
+    if constexpr (is_method) {
+      // Only direct methods are used without further indirection through a vtable/IFTable.
+      // Constructors cannot be shadowed.
+      if (LIKELY(!field_or_method->IsDirect() || field_or_method->IsConstructor())) {
+        return false;
+      }
+    } else {
+      // Only non-private fields can be shadowed in a manner that's visible.
+      if (LIKELY(field_or_method->IsPrivate())) {
+        return false;
+      }
+    }
+    // We can only shadow things from our superclasses
+    if (LIKELY(!field_or_method->GetDeclaringClass()->IsAssignableFrom(orig))) {
+      return false;
+    }
+    if constexpr (is_method) {
+      auto direct_methods = replacement->GetDirectMethods(art::kRuntimePointerSize);
+      return std::find_if(direct_methods.begin(),
+                          direct_methods.end(),
+                          [&](art::ArtMethod& m) REQUIRES(art::Locks::mutator_lock_) {
+                            return UNLIKELY(m.HasSameNameAndSignature(field_or_method));
+                          }) != direct_methods.end();
+    } else {
+      auto pred = [&](art::ArtField& f) REQUIRES(art::Locks::mutator_lock_) {
+        return std::string_view(f.GetName()) == std::string_view(field_or_method->GetName()) &&
+               std::string_view(f.GetTypeDescriptor()) ==
+                   std::string_view(field_or_method->GetTypeDescriptor());
+      };
+      if (field_or_method->IsStatic()) {
+        auto sfields = replacement->GetSFields();
+        return std::find_if(sfields.begin(), sfields.end(), pred) != sfields.end();
+      } else {
+        auto ifields = replacement->GetIFields();
+        return std::find_if(ifields.begin(), ifields.end(), pred) != ifields.end();
+      }
+    }
+  };
+  // TODO Performing 2 stack-walks back to back isn't the greatest. We might want to try to combine
+  // it with the one ReplaceReferences does. Doing so would be rather complicated though.
+  driver_->runtime_->VisitReflectiveTargets(
+      [&](art::ArtField* f, const auto& info) REQUIRES(art::Locks::mutator_lock_) {
+        DCHECK(f != nullptr) << info;
+        auto it = field_map.find(f);
+        if (it != field_map.end()) {
+          VLOG(plugin) << "Updating " << info << " object for (field) "
+                       << it->second->PrettyField();
+          return it->second;
+        } else if (UNLIKELY(could_change_resolution_of(f, info))) {
+          // Resolution might change. Just clear the resolved value.
+          VLOG(plugin) << "Clearing resolution " << info << " for (field) " << f->PrettyField();
+          return static_cast<art::ArtField*>(nullptr);
+        }
+        return f;
+      },
+      [&](art::ArtMethod* m, const auto& info) REQUIRES(art::Locks::mutator_lock_) {
+        DCHECK(m != nullptr) << info;
+        auto it = method_map.find(m);
+        if (it != method_map.end()) {
+          VLOG(plugin) << "Updating " << info << " object for (method) "
+                      << it->second->PrettyMethod();
+          return it->second;
+        } else if (UNLIKELY(could_change_resolution_of(m, info))) {
+          // Resolution might change. Just clear the resolved value.
+          VLOG(plugin) << "Clearing resolution " << info << " for (method) " << m->PrettyMethod();
+          return static_cast<art::ArtMethod*>(nullptr);
+        }
+        return m;
+      });
+
+  // Force every frame of every thread to deoptimize (any frame might have eg offsets compiled in).
+  driver_->runtime_->GetInstrumentation()->DeoptimizeAllThreadFrames();
+
+  std::unordered_map<art::ObjPtr<art::mirror::Object>,
+                     art::ObjPtr<art::mirror::Object>,
+                     art::HashObjPtr> map;
+  for (auto [new_class, old_class] : art::ZipLeft(new_classes->Iterate(), old_classes->Iterate())) {
+    map.emplace(old_class, new_class);
+  }
+  for (auto [new_instance, old_instance] :
+       art::ZipLeft(new_instances->Iterate(), old_instances->Iterate())) {
+    map.emplace(old_instance, new_instance);
+    // Bare-bones check that the mapping is correct.
+    CHECK(new_instance->GetClass() == map[old_instance->GetClass()]->AsClass())
+        << new_instance->GetClass()->PrettyClass() << " vs "
+        << map[old_instance->GetClass()]->AsClass()->PrettyClass();
+  }
+
+  // Actually perform the general replacement. This doesn't affect ArtMethod/ArtFields. It does
+  // affect the declaring_class field of all the obsolete objects, which is unfortunate and needs to
+  // be undone. This replaces the mirror::Class in 'holder' as well. It's magic!
+  HeapExtensions::ReplaceReferences(driver_->self_, map);
+
+  // Save the old class so that the JIT gc doesn't get confused by it being collected before the
+  // jit code. This is also needed to keep the dex-caches of any obsolete methods live.
+  for (auto [new_class, old_class] :
+       art::ZipLeft(new_classes->Iterate(), art::MakeIterationRange(old_classes_vec))) {
+    new_class->GetExtData()->SetObsoleteClass(old_class);
+  }
+
+  art::jit::Jit* jit = driver_->runtime_->GetJit();
+  if (jit != nullptr) {
+    // Clear jit.
+    // TODO We might want to have some way to tell the JIT not to wait the kJitSamplesBatchSize
+    // invokes to start compiling things again.
+    jit->GetCodeCache()->InvalidateAllCompiledCode();
+  }
+
+  // Clear thread caches
+  {
+    // TODO We might be able to avoid doing this but given the rather unstructured nature of the
+    // interpreter cache it's probably not worth the effort.
+    art::MutexLock mu(driver_->self_, *art::Locks::thread_list_lock_);
+    driver_->runtime_->GetThreadList()->ForEach(
+        [](art::Thread* t) { t->GetInterpreterCache()->Clear(t); });
+  }
+
+  if (art::kIsDebugBuild) {
+    // Just make sure we didn't screw up any of the now obsolete methods or fields. We need their
+    // declaring-class to still be the obolete class
+    orig->VisitMethods([&](art::ArtMethod* method) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      if (method->IsCopied()) {
+        // Copied methods have interfaces as their declaring class.
+        return;
+      }
+      DCHECK_EQ(method->GetDeclaringClass(), orig) << method->GetDeclaringClass()->PrettyClass()
+                                                   << " vs " << orig->PrettyClass();
+    }, art::kRuntimePointerSize);
+    orig->VisitFields([&](art::ArtField* field) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+      DCHECK_EQ(field->GetDeclaringClass(), orig) << field->GetDeclaringClass()->PrettyClass()
+                                                  << " vs " << orig->PrettyClass();
+    });
+  }
+}
+
+// Redefines the class in place
+void Redefiner::ClassRedefinition::UpdateClassInPlace(const RedefinitionDataIter& holder) {
+  art::ObjPtr<art::mirror::Class> mclass(holder.GetMirrorClass());
+  // TODO Rewrite so we don't do a stack walk for each and every class.
+  FindAndAllocateObsoleteMethods(mclass);
+  art::ObjPtr<art::mirror::DexCache> new_dex_cache(holder.GetNewDexCache());
+  art::ObjPtr<art::mirror::Object> original_dex_file(holder.GetOriginalDexFile());
   DCHECK_EQ(dex_file_->NumClassDefs(), 1u);
   const art::dex::ClassDef& class_def = dex_file_->GetClassDef(0);
   UpdateMethods(mclass, class_def);
@@ -1554,10 +3020,43 @@
   }
 }
 
+// Performs final updates to class for redefinition.
+void Redefiner::ClassRedefinition::UpdateClass(const RedefinitionDataIter& holder) {
+  CHECK(holder.IsInitialized());
+  if (holder.IsInitialStructural()) {
+    UpdateClassStructurally(holder);
+  } else if (!holder.IsActuallyStructural()) {
+    UpdateClassInPlace(holder);
+  }
+  UpdateClassCommon(holder);
+}
+
+void Redefiner::ClassRedefinition::UpdateClassCommon(const RedefinitionDataIter &cur_data) {
+  // NB This is after we've already replaced all old-refs with new-refs in the structural case.
+  art::ObjPtr<art::mirror::Class> klass(cur_data.GetMirrorClass());
+  DCHECK(!IsStructuralRedefinition() || klass == cur_data.GetNewClassObject());
+  if (!needs_reverify_) {
+    return;
+  }
+  // Force the most restrictive interpreter environment. We don't know what the final verification
+  // will allow. We will clear these after retrying verification once we drop the mutator-lock.
+  klass->VisitMethods([](art::ArtMethod* m) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    if (!m->IsNative() && m->IsInvokable() && !m->IsObsolete()) {
+      m->ClearSkipAccessChecks();
+      m->SetDontCompile();
+      m->SetMustCountLocks();
+    }
+  }, art::kRuntimePointerSize);
+}
+
 // Restores the old obsolete methods maps if it turns out they weren't needed (ie there were no new
 // obsolete methods).
 void Redefiner::ClassRedefinition::RestoreObsoleteMethodMapsIfUnneeded(
     const RedefinitionDataIter* cur_data) {
+  if (cur_data->IsActuallyStructural()) {
+    // We didn't touch these in this case.
+    return;
+  }
   art::ObjPtr<art::mirror::Class> klass = GetMirrorClass();
   art::ObjPtr<art::mirror::ClassExt> ext = klass->GetExtData();
   art::ObjPtr<art::mirror::PointerArray> methods = ext->GetObsoleteMethods();
@@ -1566,7 +3065,10 @@
   int32_t expected_length =
       old_length + klass->NumDirectMethods() + klass->NumDeclaredVirtualMethods();
   // Check to make sure we are only undoing this one.
-  if (expected_length == methods->GetLength()) {
+  if (methods.IsNull()) {
+    // No new obsolete methods! We can get rid of the maps.
+    ext->SetObsoleteArrays(cur_data->GetOldObsoleteMethods(), cur_data->GetOldDexCaches());
+  } else if (expected_length == methods->GetLength()) {
     for (int32_t i = 0; i < expected_length; i++) {
       art::ArtMethod* expected = nullptr;
       if (i < old_length) {
@@ -1595,7 +3097,8 @@
     return false;
   }
   // Allocate the classExt
-  art::Handle<art::mirror::ClassExt> ext(hs.NewHandle(klass->EnsureExtDataPresent(driver_->self_)));
+  art::Handle<art::mirror::ClassExt> ext =
+      hs.NewHandle(art::mirror::Class::EnsureExtDataPresent(klass, driver_->self_));
   if (ext == nullptr) {
     // No memory. Clear exception (it's not useful) and return error.
     driver_->self_->AssertPendingOOMException();
@@ -1603,20 +3106,23 @@
     RecordFailure(ERR(OUT_OF_MEMORY), "Could not allocate ClassExt");
     return false;
   }
-  // First save the old values of the 2 arrays that make up the obsolete methods maps.  Then
-  // allocate the 2 arrays that make up the obsolete methods map.  Since the contents of the arrays
-  // are only modified when all threads (other than the modifying one) are suspended we don't need
-  // to worry about missing the unsyncronized writes to the array. We do synchronize when setting it
-  // however, since that can happen at any time.
-  cur_data->SetOldObsoleteMethods(ext->GetObsoleteMethods());
-  cur_data->SetOldDexCaches(ext->GetObsoleteDexCaches());
-  if (!ext->ExtendObsoleteArrays(
-        driver_->self_, klass->GetDeclaredMethodsSlice(art::kRuntimePointerSize).size())) {
-    // OOM. Clear exception and return error.
-    driver_->self_->AssertPendingOOMException();
-    driver_->self_->ClearException();
-    RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate/extend obsolete methods map");
-    return false;
+  if (!cur_data->IsActuallyStructural()) {
+    CHECK(!IsStructuralRedefinition());
+    // First save the old values of the 2 arrays that make up the obsolete methods maps. Then
+    // allocate the 2 arrays that make up the obsolete methods map. Since the contents of the arrays
+    // are only modified when all threads (other than the modifying one) are suspended we don't need
+    // to worry about missing the unsyncronized writes to the array. We do synchronize when setting
+    // it however, since that can happen at any time.
+    cur_data->SetOldObsoleteMethods(ext->GetObsoleteMethods());
+    cur_data->SetOldDexCaches(ext->GetObsoleteDexCaches());
+    if (!art::mirror::ClassExt::ExtendObsoleteArrays(
+            ext, driver_->self_, klass->GetDeclaredMethodsSlice(art::kRuntimePointerSize).size())) {
+      // OOM. Clear exception and return error.
+      driver_->self_->AssertPendingOOMException();
+      driver_->self_->ClearException();
+      RecordFailure(ERR(OUT_OF_MEMORY), "Unable to allocate/extend obsolete methods map");
+      return false;
+    }
   }
   return true;
 }
diff --git a/openjdkjvmti/ti_redefine.h b/openjdkjvmti/ti_redefine.h
index f55a2b8..d7c7b89 100644
--- a/openjdkjvmti/ti_redefine.h
+++ b/openjdkjvmti/ti_redefine.h
@@ -32,24 +32,30 @@
 #ifndef ART_OPENJDKJVMTI_TI_REDEFINE_H_
 #define ART_OPENJDKJVMTI_TI_REDEFINE_H_
 
+#include <functional>
 #include <string>
 
 #include <jni.h>
 
+#include "art_field.h"
 #include "art_jvmti.h"
 #include "base/array_ref.h"
 #include "base/globals.h"
+#include "dex/class_accessor.h"
+#include "dex/dex_file.h"
+#include "dex/dex_file_structs.h"
 #include "jni/jni_env_ext-inl.h"
 #include "jvmti.h"
 #include "mirror/array.h"
 #include "mirror/class.h"
+#include "mirror/dex_cache.h"
 #include "obj_ptr.h"
 
 namespace art {
+class ClassAccessor;
 namespace dex {
 struct ClassDef;
 }  // namespace dex
-class DexFile;
 }  // namespace art
 
 namespace openjdkjvmti {
@@ -58,6 +64,11 @@
 class RedefinitionDataHolder;
 class RedefinitionDataIter;
 
+enum class RedefinitionType {
+  kStructural,
+  kNormal,
+};
+
 // Class that can redefine a single class's methods.
 class Redefiner {
  public:
@@ -69,29 +80,38 @@
                                           art::Runtime* runtime,
                                           art::Thread* self,
                                           const std::vector<ArtClassDefinition>& definitions,
+                                          RedefinitionType type,
                                           /*out*/std::string* error_msg);
 
   // Redefine the given classes with the given dex data. Note this function does not take ownership
   // of the dex_data pointers. It is not used after this call however and may be freed if desired.
   // The caller is responsible for freeing it. The runtime makes its own copy of the data.
-  static jvmtiError RedefineClasses(ArtJvmTiEnv* env,
-                                    EventHandler* event_handler,
-                                    art::Runtime* runtime,
-                                    art::Thread* self,
+  static jvmtiError RedefineClasses(jvmtiEnv* env,
                                     jint class_count,
-                                    const jvmtiClassDefinition* definitions,
-                                    /*out*/std::string* error_msg);
+                                    const jvmtiClassDefinition* definitions);
+  static jvmtiError StructurallyRedefineClasses(jvmtiEnv* env,
+                                                jint class_count,
+                                                const jvmtiClassDefinition* definitions);
 
   static jvmtiError IsModifiableClass(jvmtiEnv* env, jclass klass, jboolean* is_redefinable);
+  static jvmtiError IsStructurallyModifiableClass(jvmtiEnv* env,
+                                                  jclass klass,
+                                                  jboolean* is_redefinable);
 
   static art::MemMap MoveDataToMemMap(const std::string& original_location,
                                       art::ArrayRef<const unsigned char> data,
                                       std::string* error_msg);
 
   // Helper for checking if redefinition/retransformation is allowed.
+  template<RedefinitionType kType = RedefinitionType::kNormal>
   static jvmtiError GetClassRedefinitionError(jclass klass, /*out*/std::string* error_msg)
       REQUIRES(!art::Locks::mutator_lock_);
 
+  static jvmtiError StructurallyRedefineClassDirect(jvmtiEnv* env,
+                                                    jclass klass,
+                                                    const unsigned char* data,
+                                                    jint data_size);
+
  private:
   class ClassRedefinition {
    public:
@@ -105,6 +125,17 @@
     // NO_THREAD_SAFETY_ANALYSIS so we can unlock the class in the destructor.
     ~ClassRedefinition() NO_THREAD_SAFETY_ANALYSIS;
 
+    // Move assignment so we can sort these in a vector.
+    ClassRedefinition& operator=(ClassRedefinition&& other) {
+      driver_ = other.driver_;
+      klass_ = other.klass_;
+      dex_file_ = std::move(other.dex_file_);
+      class_sig_ = std::move(other.class_sig_);
+      original_dex_file_ = other.original_dex_file_;
+      other.driver_ = nullptr;
+      return *this;
+    }
+
     // Move constructor so we can put these into a vector.
     ClassRedefinition(ClassRedefinition&& other)
         : driver_(other.driver_),
@@ -115,6 +146,10 @@
       other.driver_ = nullptr;
     }
 
+    // No copy!
+    ClassRedefinition(ClassRedefinition&) = delete;
+    ClassRedefinition& operator=(ClassRedefinition&) = delete;
+
     art::ObjPtr<art::mirror::Class> GetMirrorClass() REQUIRES_SHARED(art::Locks::mutator_lock_);
     art::ObjPtr<art::mirror::ClassLoader> GetClassLoader()
         REQUIRES_SHARED(art::Locks::mutator_lock_);
@@ -134,7 +169,13 @@
       driver_->RecordFailure(e, class_sig_, err);
     }
 
-    bool FinishRemainingAllocations(/*out*/RedefinitionDataIter* cur_data)
+    bool FinishRemainingCommonAllocations(/*out*/RedefinitionDataIter* cur_data)
+        REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+    bool FinishNewClassAllocations(RedefinitionDataHolder& holder,
+                                   /*out*/RedefinitionDataIter* cur_data)
+        REQUIRES_SHARED(art::Locks::mutator_lock_);
+    bool CollectAndCreateNewInstances(/*out*/RedefinitionDataIter* cur_data)
         REQUIRES_SHARED(art::Locks::mutator_lock_);
 
     bool AllocateAndRememberNewDexFileCookie(
@@ -146,6 +187,17 @@
     void FindAndAllocateObsoleteMethods(art::ObjPtr<art::mirror::Class> art_klass)
         REQUIRES(art::Locks::mutator_lock_);
 
+    art::ObjPtr<art::mirror::Class> AllocateNewClassObject(
+        art::Handle<art::mirror::Class> old_class,
+        art::Handle<art::mirror::Class> super_class,
+        art::Handle<art::mirror::DexCache> cache,
+        uint16_t dex_class_def_index) REQUIRES_SHARED(art::Locks::mutator_lock_);
+    art::ObjPtr<art::mirror::Class> AllocateNewClassObject(art::Handle<art::mirror::DexCache> cache)
+        REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+    uint32_t GetNewClassSize(art::ClassAccessor& accessor)
+        REQUIRES_SHARED(art::Locks::mutator_lock_);
+
     // Checks that the dex file contains only the single expected class and that the top-level class
     // data has not been modified in an incompatible manner.
     bool CheckClass() REQUIRES_SHARED(art::Locks::mutator_lock_);
@@ -165,12 +217,15 @@
     // Checks that the class can even be redefined.
     bool CheckRedefinable() REQUIRES_SHARED(art::Locks::mutator_lock_);
 
-    // Checks that the dex file does not add/remove methods, or change their modifiers or types.
-    bool CheckSameMethods() REQUIRES_SHARED(art::Locks::mutator_lock_);
+    // Checks that the dex file does not add/remove methods, or change their modifiers or types in
+    // illegal ways.
+    bool CheckMethods() REQUIRES_SHARED(art::Locks::mutator_lock_);
 
-    // Checks that the dex file does not modify fields types or modifiers.
-    bool CheckSameFields() REQUIRES_SHARED(art::Locks::mutator_lock_);
+    // Checks that the dex file does not modify fields types or modifiers in illegal ways.
+    bool CheckFields() REQUIRES_SHARED(art::Locks::mutator_lock_);
 
+    // Temporary check that a class undergoing structural redefinition has no instances. This
+    // requirement will be removed in time.
     void UpdateJavaDexFile(art::ObjPtr<art::mirror::Object> java_dex_file,
                            art::ObjPtr<art::mirror::LongArray> new_cookie)
         REQUIRES(art::Locks::mutator_lock_);
@@ -182,9 +237,18 @@
                        const art::dex::ClassDef& class_def)
         REQUIRES(art::Locks::mutator_lock_);
 
-    void UpdateClass(art::ObjPtr<art::mirror::Class> mclass,
-                     art::ObjPtr<art::mirror::DexCache> new_dex_cache,
-                     art::ObjPtr<art::mirror::Object> original_dex_file)
+    void UpdateClass(const RedefinitionDataIter& cur_data)
+        REQUIRES(art::Locks::mutator_lock_);
+
+    void UpdateClassCommon(const RedefinitionDataIter& cur_data)
+        REQUIRES(art::Locks::mutator_lock_);
+
+    void ReverifyClass(const RedefinitionDataIter& cur_data)
+        REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+    void CollectNewFieldAndMethodMappings(const RedefinitionDataIter& data,
+                                          std::map<art::ArtMethod*, art::ArtMethod*>* method_map,
+                                          std::map<art::ArtField*, art::ArtField*>* field_map)
         REQUIRES(art::Locks::mutator_lock_);
 
     void RestoreObsoleteMethodMapsIfUnneeded(const RedefinitionDataIter* cur_data)
@@ -192,22 +256,53 @@
 
     void ReleaseDexFile() REQUIRES_SHARED(art::Locks::mutator_lock_);
 
-    void UnregisterBreakpoints() REQUIRES_SHARED(art::Locks::mutator_lock_);
     // This should be done with all threads suspended.
     void UnregisterJvmtiBreakpoints() REQUIRES_SHARED(art::Locks::mutator_lock_);
 
+    void RecordNewMethodAdded();
+    void RecordNewFieldAdded();
+    void RecordHasVirtualMembers() {
+      has_virtuals_ = true;
+    }
+
+    bool HasVirtualMembers() const {
+      return has_virtuals_;
+    }
+
+    bool IsStructuralRedefinition() const {
+      DCHECK(!(added_fields_ || added_methods_) || driver_->IsStructuralRedefinition())
+          << "added_fields_: " << added_fields_ << " added_methods_: " << added_methods_
+          << " driver_->IsStructuralRedefinition(): " << driver_->IsStructuralRedefinition();
+      return driver_->IsStructuralRedefinition() && (added_fields_ || added_methods_);
+    }
+
    private:
+    void UpdateClassStructurally(const RedefinitionDataIter& cur_data)
+        REQUIRES(art::Locks::mutator_lock_);
+
+    void UpdateClassInPlace(const RedefinitionDataIter& cur_data)
+        REQUIRES(art::Locks::mutator_lock_);
+
     Redefiner* driver_;
     jclass klass_;
     std::unique_ptr<const art::DexFile> dex_file_;
     std::string class_sig_;
     art::ArrayRef<const unsigned char> original_dex_file_;
+
+    bool added_fields_ = false;
+    bool added_methods_ = false;
+    bool has_virtuals_ = false;
+
+    // Does the class need to be reverified due to verification soft-fails possibly forcing
+    // interpreter or lock-counting?
+    bool needs_reverify_ = false;
   };
 
   ArtJvmTiEnv* env_;
   jvmtiError result_;
   art::Runtime* runtime_;
   art::Thread* self_;
+  RedefinitionType type_;
   std::vector<ClassRedefinition> redefinitions_;
   // Kept as a jclass since we have weird run-state changes that make keeping it around as a
   // mirror::Class difficult and confusing.
@@ -216,17 +311,28 @@
   Redefiner(ArtJvmTiEnv* env,
             art::Runtime* runtime,
             art::Thread* self,
+            RedefinitionType type,
             std::string* error_msg)
       : env_(env),
         result_(ERR(INTERNAL)),
         runtime_(runtime),
         self_(self),
+        type_(type),
         redefinitions_(),
         error_msg_(error_msg) { }
 
   jvmtiError AddRedefinition(ArtJvmTiEnv* env, const ArtClassDefinition& def)
       REQUIRES_SHARED(art::Locks::mutator_lock_);
 
+  template<RedefinitionType kType = RedefinitionType::kNormal>
+  static jvmtiError RedefineClassesGeneric(jvmtiEnv* env,
+                                           jint class_count,
+                                           const jvmtiClassDefinition* definitions);
+
+  template<RedefinitionType kType = RedefinitionType::kNormal>
+  static jvmtiError IsModifiableClassGeneric(jvmtiEnv* env, jclass klass, jboolean* is_redefinable);
+
+  template<RedefinitionType kType = RedefinitionType::kNormal>
   static jvmtiError GetClassRedefinitionError(art::Handle<art::mirror::Class> klass,
                                               /*out*/std::string* error_msg)
       REQUIRES_SHARED(art::Locks::mutator_lock_);
@@ -236,17 +342,28 @@
   bool CheckAllRedefinitionAreValid() REQUIRES_SHARED(art::Locks::mutator_lock_);
   bool CheckAllClassesAreVerified(RedefinitionDataHolder& holder)
       REQUIRES_SHARED(art::Locks::mutator_lock_);
+  void MarkStructuralChanges(RedefinitionDataHolder& holder)
+      REQUIRES_SHARED(art::Locks::mutator_lock_);
   bool EnsureAllClassAllocationsFinished(RedefinitionDataHolder& holder)
       REQUIRES_SHARED(art::Locks::mutator_lock_);
-  bool FinishAllRemainingAllocations(RedefinitionDataHolder& holder)
+  bool FinishAllRemainingCommonAllocations(RedefinitionDataHolder& holder)
+      REQUIRES_SHARED(art::Locks::mutator_lock_);
+  bool FinishAllNewClassAllocations(RedefinitionDataHolder& holder)
+      REQUIRES_SHARED(art::Locks::mutator_lock_);
+  bool CollectAndCreateNewInstances(RedefinitionDataHolder& holder)
       REQUIRES_SHARED(art::Locks::mutator_lock_);
   void ReleaseAllDexFiles() REQUIRES_SHARED(art::Locks::mutator_lock_);
+  void ReverifyClasses(RedefinitionDataHolder& holder) REQUIRES_SHARED(art::Locks::mutator_lock_);
   void UnregisterAllBreakpoints() REQUIRES_SHARED(art::Locks::mutator_lock_);
   // Restores the old obsolete methods maps if it turns out they weren't needed (ie there were no
   // new obsolete methods).
   void RestoreObsoleteMethodMapsIfUnneeded(RedefinitionDataHolder& holder)
       REQUIRES(art::Locks::mutator_lock_);
 
+  bool IsStructuralRedefinition() const {
+    return type_ == RedefinitionType::kStructural;
+  }
+
   void RecordFailure(jvmtiError result, const std::string& class_sig, const std::string& error_msg);
   void RecordFailure(jvmtiError result, const std::string& error_msg) {
     RecordFailure(result, "NO CLASS", error_msg);
diff --git a/openjdkjvmti/ti_search.cc b/openjdkjvmti/ti_search.cc
index 2187825..526836e 100644
--- a/openjdkjvmti/ti_search.cc
+++ b/openjdkjvmti/ti_search.cc
@@ -29,6 +29,9 @@
  * questions.
  */
 
+#include <sstream>
+#include <unistd.h>
+
 #include "ti_search.h"
 
 #include "jni.h"
@@ -37,6 +40,9 @@
 #include "art_jvmti.h"
 #include "base/enums.h"
 #include "base/macros.h"
+#include "base/memfd.h"
+#include "base/os.h"
+#include "base/unix_file/fd_file.h"
 #include "class_linker.h"
 #include "dex/art_dex_file_loader.h"
 #include "dex/dex_file.h"
@@ -243,14 +249,133 @@
 
   art::ScopedObjectAccess soa(art::Thread::Current());
   for (std::unique_ptr<const art::DexFile>& dex_file : dex_files) {
-    current->GetClassLinker()->AppendToBootClassPath(art::Thread::Current(), *dex_file.release());
+    current->GetClassLinker()->AppendToBootClassPath(art::Thread::Current(), dex_file.release());
   }
 
   return ERR(NONE);
 }
 
-jvmtiError SearchUtil::AddToSystemClassLoaderSearch(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
-                                                    const char* segment) {
+jvmtiError SearchUtil::AddToDexClassLoaderInMemory(jvmtiEnv* jvmti_env,
+                                                   jobject classloader,
+                                                   const char* dex_bytes,
+                                                   jint dex_bytes_length) {
+  if (jvmti_env == nullptr) {
+    return ERR(INVALID_ENVIRONMENT);
+  } else if (art::Thread::Current() == nullptr) {
+    return ERR(UNATTACHED_THREAD);
+  } else if (classloader == nullptr) {
+    return ERR(NULL_POINTER);
+  } else if (dex_bytes == nullptr) {
+    return ERR(NULL_POINTER);
+  } else if (dex_bytes_length <= 0) {
+    return ERR(ILLEGAL_ARGUMENT);
+  }
+
+  jvmtiPhase phase = PhaseUtil::GetPhaseUnchecked();
+
+  // TODO We really should try to support doing this during the ON_LOAD phase.
+  if (phase != jvmtiPhase::JVMTI_PHASE_LIVE) {
+    JVMTI_LOG(INFO, jvmti_env) << "Cannot add buffers to classpath during ON_LOAD phase to "
+                               << "prevent file-descriptor leaking.";
+    return ERR(WRONG_PHASE);
+  }
+
+  // We have java APIs for adding files to the classpath, we might as well use them. It simplifies a
+  // lot of code as well.
+
+  // Create a memfd
+  art::File file(art::memfd_create_compat("JVMTI InMemory Added dex file", 0), /*check-usage*/true);
+  if (file.Fd() < 0) {
+    char* reason = strerror(errno);
+    JVMTI_LOG(ERROR, jvmti_env) << "Unable to create memfd due to " << reason;
+    if (file.FlushClose() < 0) {
+      PLOG(WARNING) << "Failed to close file!";
+    }
+    return ERR(INTERNAL);
+  }
+  // Fill it with the buffer.
+  if (!file.WriteFully(dex_bytes, dex_bytes_length) || file.Flush() != 0) {
+    JVMTI_LOG(ERROR, jvmti_env) << "Failed to write to memfd!";
+    if (file.FlushClose() < 0) {
+      PLOG(WARNING) << "Failed to close file!";
+    }
+    return ERR(INTERNAL);
+  }
+  // Get the filename in procfs.
+  std::ostringstream oss;
+  oss << "/proc/self/fd/" << file.Fd();
+  std::string seg(oss.str());
+  // Use common code.
+
+  jvmtiError result = AddToDexClassLoader(jvmti_env, classloader, seg.c_str());
+  // We have either loaded the dex file and have a new MemMap pointing to the same pages or loading
+  // has failed and the memory isn't needed anymore. Either way we can close the memfd we created
+  // and return.
+  if (file.Close() != 0) {
+    JVMTI_LOG(WARNING, jvmti_env) << "Failed to close memfd!";
+  }
+  return result;
+}
+
+jvmtiError SearchUtil::AddToDexClassLoader(jvmtiEnv* jvmti_env,
+                                           jobject classloader,
+                                           const char* segment) {
+  if (jvmti_env == nullptr) {
+    return ERR(INVALID_ENVIRONMENT);
+  } else if (art::Thread::Current() == nullptr) {
+    return ERR(UNATTACHED_THREAD);
+  } else if (classloader == nullptr) {
+    return ERR(NULL_POINTER);
+  } else if (segment == nullptr) {
+    return ERR(NULL_POINTER);
+  }
+
+  jvmtiPhase phase = PhaseUtil::GetPhaseUnchecked();
+
+  // TODO We really should try to support doing this during the ON_LOAD phase.
+  if (phase != jvmtiPhase::JVMTI_PHASE_LIVE) {
+    JVMTI_LOG(INFO, jvmti_env) << "Cannot add to classpath of arbitrary classloaders during "
+                               << "ON_LOAD phase.";
+    return ERR(WRONG_PHASE);
+  }
+
+  // We'll use BaseDexClassLoader.addDexPath, as it takes care of array resizing etc. As a downside,
+  // exceptions are swallowed.
+
+  art::Thread* self = art::Thread::Current();
+  JNIEnv* env = self->GetJniEnv();
+  if (!env->IsInstanceOf(classloader, art::WellKnownClasses::dalvik_system_BaseDexClassLoader)) {
+    JVMTI_LOG(ERROR, jvmti_env) << "Unable to add " << segment << " to non BaseDexClassLoader!";
+    return ERR(CLASS_LOADER_UNSUPPORTED);
+  }
+
+  jmethodID add_dex_path_id = env->GetMethodID(
+      art::WellKnownClasses::dalvik_system_BaseDexClassLoader,
+      "addDexPath",
+      "(Ljava/lang/String;)V");
+  if (add_dex_path_id == nullptr) {
+    return ERR(INTERNAL);
+  }
+
+  ScopedLocalRef<jstring> dex_path(env, env->NewStringUTF(segment));
+  if (dex_path.get() == nullptr) {
+    return ERR(INTERNAL);
+  }
+  env->CallVoidMethod(classloader, add_dex_path_id, dex_path.get());
+
+  if (env->ExceptionCheck()) {
+    {
+      art::ScopedObjectAccess soa(self);
+      JVMTI_LOG(ERROR, jvmti_env) << "Failed to add " << segment << " to classloader. Error was "
+                                  << self->GetException()->Dump();
+    }
+    env->ExceptionClear();
+    return ERR(ILLEGAL_ARGUMENT);
+  }
+  return OK;
+}
+
+jvmtiError SearchUtil::AddToSystemClassLoaderSearch(jvmtiEnv* jvmti_env, const char* segment) {
   if (segment == nullptr) {
     return ERR(NULL_POINTER);
   }
@@ -266,41 +391,18 @@
     return ERR(WRONG_PHASE);
   }
 
-  jobject sys_class_loader = art::Runtime::Current()->GetSystemClassLoader();
-  if (sys_class_loader == nullptr) {
-    // This is unexpected.
+  jobject loader = art::Runtime::Current()->GetSystemClassLoader();
+  if (loader == nullptr) {
     return ERR(INTERNAL);
   }
 
-  // We'll use BaseDexClassLoader.addDexPath, as it takes care of array resizing etc. As a downside,
-  // exceptions are swallowed.
-
   art::Thread* self = art::Thread::Current();
   JNIEnv* env = self->GetJniEnv();
-  if (!env->IsInstanceOf(sys_class_loader,
-                         art::WellKnownClasses::dalvik_system_BaseDexClassLoader)) {
+  if (!env->IsInstanceOf(loader, art::WellKnownClasses::dalvik_system_BaseDexClassLoader)) {
     return ERR(INTERNAL);
   }
 
-  jmethodID add_dex_path_id = env->GetMethodID(
-      art::WellKnownClasses::dalvik_system_BaseDexClassLoader,
-      "addDexPath",
-      "(Ljava/lang/String;)V");
-  if (add_dex_path_id == nullptr) {
-    return ERR(INTERNAL);
-  }
-
-  ScopedLocalRef<jstring> dex_path(env, env->NewStringUTF(segment));
-  if (dex_path.get() == nullptr) {
-    return ERR(INTERNAL);
-  }
-  env->CallVoidMethod(sys_class_loader, add_dex_path_id, dex_path.get());
-
-  if (env->ExceptionCheck()) {
-    env->ExceptionClear();
-    return ERR(ILLEGAL_ARGUMENT);
-  }
-  return ERR(NONE);
+  return AddToDexClassLoader(jvmti_env, loader, segment);
 }
 
 }  // namespace openjdkjvmti
diff --git a/openjdkjvmti/ti_search.h b/openjdkjvmti/ti_search.h
index 81a28cc..b8d08bf 100644
--- a/openjdkjvmti/ti_search.h
+++ b/openjdkjvmti/ti_search.h
@@ -46,6 +46,12 @@
   static jvmtiError AddToBootstrapClassLoaderSearch(jvmtiEnv* env, const char* segment);
 
   static jvmtiError AddToSystemClassLoaderSearch(jvmtiEnv* env, const char* segment);
+
+  static jvmtiError AddToDexClassLoader(jvmtiEnv* env, jobject classloader, const char* segment);
+  static jvmtiError AddToDexClassLoaderInMemory(jvmtiEnv* env,
+                                                jobject classloader,
+                                                const char* dex_bytes,
+                                                jint dex_bytes_length);
 };
 
 }  // namespace openjdkjvmti
diff --git a/openjdkjvmti/ti_stack.cc b/openjdkjvmti/ti_stack.cc
index 75f0556..38257f1 100644
--- a/openjdkjvmti/ti_stack.cc
+++ b/openjdkjvmti/ti_stack.cc
@@ -32,10 +32,13 @@
 #include "ti_stack.h"
 
 #include <algorithm>
+#include <initializer_list>
 #include <list>
 #include <unordered_map>
 #include <vector>
 
+#include "android-base/macros.h"
+#include "android-base/thread_annotations.h"
 #include "arch/context.h"
 #include "art_field-inl.h"
 #include "art_method-inl.h"
@@ -44,21 +47,35 @@
 #include "barrier.h"
 #include "base/bit_utils.h"
 #include "base/enums.h"
+#include "base/locks.h"
+#include "base/macros.h"
 #include "base/mutex.h"
 #include "deopt_manager.h"
 #include "dex/code_item_accessors-inl.h"
 #include "dex/dex_file.h"
 #include "dex/dex_file_annotations.h"
 #include "dex/dex_file_types.h"
+#include "dex/dex_instruction-inl.h"
+#include "dex/primitive.h"
+#include "events.h"
 #include "gc_root.h"
 #include "handle_scope-inl.h"
+#include "instrumentation.h"
+#include "interpreter/shadow_frame-inl.h"
+#include "interpreter/shadow_frame.h"
 #include "jni/jni_env_ext.h"
 #include "jni/jni_internal.h"
+#include "jvalue-inl.h"
+#include "jvalue.h"
+#include "jvmti.h"
 #include "mirror/class.h"
 #include "mirror/dex_cache.h"
 #include "nativehelper/scoped_local_ref.h"
 #include "scoped_thread_state_change-inl.h"
+#include "scoped_thread_state_change.h"
 #include "stack.h"
+#include "thread.h"
+#include "thread_state.h"
 #include "ti_logging.h"
 #include "ti_thread.h"
 #include "thread-current-inl.h"
@@ -1087,96 +1104,333 @@
   return OK;
 }
 
+namespace {
+
+enum class NonStandardExitType {
+  kPopFrame,
+  kForceReturn,
+};
+
+template<NonStandardExitType kExitType>
+class NonStandardExitFrames {
+ public:
+  NonStandardExitFrames(art::Thread* self, jvmtiEnv* env, jthread thread)
+      REQUIRES(!art::Locks::thread_suspend_count_lock_)
+      ACQUIRE_SHARED(art::Locks::mutator_lock_)
+      ACQUIRE(art::Locks::thread_list_lock_, art::Locks::user_code_suspension_lock_)
+      : snucs_(self) {
+    // We keep the user-code-suspend-count lock.
+    art::Locks::user_code_suspension_lock_->AssertExclusiveHeld(self);
+
+    // From now on we know we cannot get suspended by user-code.
+    // NB This does a SuspendCheck (during thread state change) so we need to make sure we don't
+    // have the 'suspend_lock' locked here.
+    old_state_ = self->TransitionFromSuspendedToRunnable();
+    art::ScopedObjectAccessUnchecked soau(self);
+
+    art::Locks::thread_list_lock_->ExclusiveLock(self);
+
+    if (!ThreadUtil::GetAliveNativeThread(thread, soau, &target_, &result_)) {
+      return;
+    }
+    {
+      art::MutexLock tscl_mu(self, *art::Locks::thread_suspend_count_lock_);
+      if (target_ != self && target_->GetUserCodeSuspendCount() == 0) {
+        // We cannot be the current thread for this function.
+        result_ = ERR(THREAD_NOT_SUSPENDED);
+        return;
+      }
+    }
+    JvmtiGlobalTLSData* tls_data = ThreadUtil::GetGlobalTLSData(target_);
+    constexpr art::StackVisitor::StackWalkKind kWalkKind =
+        art::StackVisitor::StackWalkKind::kIncludeInlinedFrames;
+    if (tls_data != nullptr &&
+        tls_data->disable_pop_frame_depth != JvmtiGlobalTLSData::kNoDisallowedPopFrame &&
+        tls_data->disable_pop_frame_depth ==
+            art::StackVisitor::ComputeNumFrames(target_, kWalkKind)) {
+      JVMTI_LOG(WARNING, env) << "Disallowing frame pop due to in-progress class-load/prepare. "
+                              << "Frame at depth " << tls_data->disable_pop_frame_depth << " was "
+                              << "marked as un-poppable by the jvmti plugin. See b/117615146 for "
+                              << "more information.";
+      result_ = ERR(OPAQUE_FRAME);
+      return;
+    }
+    // We hold the user_code_suspension_lock_ so the target thread is staying suspended until we are
+    // done.
+    std::unique_ptr<art::Context> context(art::Context::Create());
+    FindFrameAtDepthVisitor final_frame(target_, context.get(), 0);
+    FindFrameAtDepthVisitor penultimate_frame(target_, context.get(), 1);
+    final_frame.WalkStack();
+    penultimate_frame.WalkStack();
+
+    if (!final_frame.FoundFrame() || !penultimate_frame.FoundFrame()) {
+      // Cannot do it if there is only one frame!
+      JVMTI_LOG(INFO, env) << "Can not pop final frame off of a stack";
+      result_ = ERR(NO_MORE_FRAMES);
+      return;
+    }
+
+    art::ArtMethod* called_method = final_frame.GetMethod();
+    art::ArtMethod* calling_method = penultimate_frame.GetMethod();
+    if (!CheckFunctions(env, calling_method, called_method)) {
+      return;
+    }
+    DCHECK(!called_method->IsNative()) << called_method->PrettyMethod();
+
+    // From here we are sure to succeed.
+    result_ = OK;
+
+    // Get/create a shadow frame
+    final_frame_ = final_frame.GetOrCreateShadowFrame(&created_final_frame_);
+    penultimate_frame_ =
+        (calling_method->IsNative()
+             ? nullptr
+             : penultimate_frame.GetOrCreateShadowFrame(&created_penultimate_frame_));
+
+    final_frame_id_ = final_frame.GetFrameId();
+    penultimate_frame_id_ = penultimate_frame.GetFrameId();
+
+    CHECK_NE(final_frame_, penultimate_frame_) << "Frames at different depths not different!";
+  }
+
+  bool CheckFunctions(jvmtiEnv* env, art::ArtMethod* calling, art::ArtMethod* called)
+      REQUIRES(art::Locks::thread_list_lock_, art::Locks::user_code_suspension_lock_)
+      REQUIRES_SHARED(art::Locks::mutator_lock_);
+
+  ~NonStandardExitFrames() RELEASE_SHARED(art::Locks::mutator_lock_)
+      REQUIRES(!art::Locks::thread_list_lock_)
+      RELEASE(art::Locks::user_code_suspension_lock_) {
+    art::Thread* self = art::Thread::Current();
+    DCHECK_EQ(old_state_, art::ThreadState::kNative)
+        << "Unexpected thread state on entering PopFrame!";
+    self->TransitionFromRunnableToSuspended(old_state_);
+  }
+
+  ScopedNoUserCodeSuspension snucs_;
+  art::ShadowFrame* final_frame_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = nullptr;
+  art::ShadowFrame* penultimate_frame_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = nullptr;
+  bool created_final_frame_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = false;
+  bool created_penultimate_frame_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = false;
+  uint32_t final_frame_id_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = -1;
+  uint32_t penultimate_frame_id_ GUARDED_BY(art::Locks::user_code_suspension_lock_) = -1;
+  art::Thread* target_ GUARDED_BY(art::Locks::thread_list_lock_) = nullptr;
+  art::ThreadState old_state_ = art::ThreadState::kTerminated;
+  jvmtiError result_ = ERR(INTERNAL);
+};
+
+template <>
+bool NonStandardExitFrames<NonStandardExitType::kForceReturn>::CheckFunctions(
+    jvmtiEnv* env, art::ArtMethod* calling ATTRIBUTE_UNUSED, art::ArtMethod* called) {
+  if (UNLIKELY(called->IsNative())) {
+    result_ = ERR(OPAQUE_FRAME);
+    JVMTI_LOG(INFO, env) << "Cannot force early return from " << called->PrettyMethod()
+                         << " because it is native.";
+    return false;
+  } else {
+    return true;
+  }
+}
+
+template <>
+bool NonStandardExitFrames<NonStandardExitType::kPopFrame>::CheckFunctions(
+    jvmtiEnv* env, art::ArtMethod* calling, art::ArtMethod* called) {
+  if (UNLIKELY(calling->IsNative() || called->IsNative())) {
+    result_ = ERR(OPAQUE_FRAME);
+    JVMTI_LOG(INFO, env) << "Cannot force early return from " << called->PrettyMethod() << " to "
+                         << calling->PrettyMethod() << " because at least one of them is native.";
+    return false;
+  } else {
+    return true;
+  }
+}
+
+class SetupMethodExitEvents {
+ public:
+  SetupMethodExitEvents(art::Thread* self,
+                        EventHandler* event_handler,
+                        jthread target) REQUIRES(!art::Locks::mutator_lock_,
+                                                 !art::Locks::user_code_suspension_lock_,
+                                                 !art::Locks::thread_list_lock_)
+      : self_(self), event_handler_(event_handler), target_(target) {
+    DCHECK(target != nullptr);
+    art::Locks::mutator_lock_->AssertNotHeld(self_);
+    art::Locks::user_code_suspension_lock_->AssertNotHeld(self_);
+    art::Locks::thread_list_lock_->AssertNotHeld(self_);
+    event_handler_->SetInternalEvent(
+        target_, ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue, JVMTI_ENABLE);
+  }
+
+  ~SetupMethodExitEvents() REQUIRES(!art::Locks::mutator_lock_,
+                                    !art::Locks::user_code_suspension_lock_,
+                                    !art::Locks::thread_list_lock_) {
+    art::Locks::mutator_lock_->AssertNotHeld(self_);
+    art::Locks::user_code_suspension_lock_->AssertNotHeld(self_);
+    art::Locks::thread_list_lock_->AssertNotHeld(self_);
+    if (failed_) {
+      event_handler_->SetInternalEvent(
+          target_, ArtJvmtiEvent::kForceEarlyReturnUpdateReturnValue, JVMTI_DISABLE);
+    }
+  }
+
+  void NotifyFailure() {
+    failed_ = true;
+  }
+
+ private:
+  art::Thread* self_;
+  EventHandler* event_handler_;
+  jthread target_;
+  bool failed_ = false;
+};
+
+template <typename T>
+void AddDelayedMethodExitEvent(EventHandler* handler, art::ShadowFrame* frame, T value)
+    REQUIRES_SHARED(art::Locks::mutator_lock_)
+    REQUIRES(art::Locks::user_code_suspension_lock_, art::Locks::thread_list_lock_);
+
+template <typename T>
+void AddDelayedMethodExitEvent(EventHandler* handler, art::ShadowFrame* frame, T value) {
+  art::JValue val = art::JValue::FromPrimitive(value);
+  jvalue jval{ .j = val.GetJ() };
+  handler->AddDelayedNonStandardExitEvent(frame, false, jval);
+}
+
+template <>
+void AddDelayedMethodExitEvent<std::nullptr_t>(EventHandler* handler,
+                                               art::ShadowFrame* frame,
+                                               std::nullptr_t null_val ATTRIBUTE_UNUSED) {
+  jvalue jval;
+  memset(&jval, 0, sizeof(jval));
+  handler->AddDelayedNonStandardExitEvent(frame, false, jval);
+}
+
+template <>
+void AddDelayedMethodExitEvent<jobject>(EventHandler* handler,
+                                        art::ShadowFrame* frame,
+                                        jobject obj) {
+  jvalue jval{ .l = art::Thread::Current()->GetJniEnv()->NewGlobalRef(obj) };
+  handler->AddDelayedNonStandardExitEvent(frame, true, jval);
+}
+
+template <typename T>
+bool ValidReturnType(art::Thread* self, art::ObjPtr<art::mirror::Class> return_type, T value)
+    REQUIRES_SHARED(art::Locks::mutator_lock_)
+        REQUIRES(art::Locks::user_code_suspension_lock_, art::Locks::thread_list_lock_);
+
+#define SIMPLE_VALID_RETURN_TYPE(type, ...)                                                        \
+  template <>                                                                                      \
+  bool ValidReturnType<type>(art::Thread * self ATTRIBUTE_UNUSED,                                  \
+                             art::ObjPtr<art::mirror::Class> return_type,                          \
+                             type value ATTRIBUTE_UNUSED) {                                        \
+    static constexpr std::initializer_list<art::Primitive::Type> types{ __VA_ARGS__ };             \
+    return std::find(types.begin(), types.end(), return_type->GetPrimitiveType()) != types.end();  \
+  }
+
+SIMPLE_VALID_RETURN_TYPE(jlong, art::Primitive::kPrimLong);
+SIMPLE_VALID_RETURN_TYPE(jfloat, art::Primitive::kPrimFloat);
+SIMPLE_VALID_RETURN_TYPE(jdouble, art::Primitive::kPrimDouble);
+SIMPLE_VALID_RETURN_TYPE(nullptr_t, art::Primitive::kPrimVoid);
+SIMPLE_VALID_RETURN_TYPE(jint,
+                         art::Primitive::kPrimInt,
+                         art::Primitive::kPrimChar,
+                         art::Primitive::kPrimBoolean,
+                         art::Primitive::kPrimShort,
+                         art::Primitive::kPrimByte);
+#undef SIMPLE_VALID_RETURN_TYPE
+
+template <>
+bool ValidReturnType<jobject>(art::Thread* self,
+                              art::ObjPtr<art::mirror::Class> return_type,
+                              jobject return_value) {
+  if (return_type->IsPrimitive()) {
+    return false;
+  }
+  if (return_value == nullptr) {
+    // Null can be used for anything.
+    return true;
+  }
+  return return_type->IsAssignableFrom(self->DecodeJObject(return_value)->GetClass());
+}
+
+}  // namespace
+
 jvmtiError StackUtil::PopFrame(jvmtiEnv* env, jthread thread) {
   art::Thread* self = art::Thread::Current();
-  art::Thread* target;
-
-  ScopedNoUserCodeSuspension snucs(self);
-  // From now on we know we cannot get suspended by user-code.
-  // NB This does a SuspendCheck (during thread state change) so we need to make
-  // sure we don't have the 'suspend_lock' locked here.
-  art::ScopedObjectAccess soa(self);
-  art::Locks::thread_list_lock_->ExclusiveLock(self);
-  jvmtiError err = ERR(INTERNAL);
-  if (!ThreadUtil::GetAliveNativeThread(thread, soa, &target, &err)) {
+  NonStandardExitFrames<NonStandardExitType::kPopFrame> frames(self, env, thread);
+  if (frames.result_ != OK) {
     art::Locks::thread_list_lock_->ExclusiveUnlock(self);
-    return err;
+    return frames.result_;
   }
-  {
-    art::Locks::thread_suspend_count_lock_->ExclusiveLock(self);
-    if (target == self || target->GetUserCodeSuspendCount() == 0) {
-      // We cannot be the current thread for this function.
-      art::Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
-      art::Locks::thread_list_lock_->ExclusiveUnlock(self);
-      return ERR(THREAD_NOT_SUSPENDED);
-    }
-    art::Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
-  }
-  JvmtiGlobalTLSData* tls_data = ThreadUtil::GetGlobalTLSData(target);
-  constexpr art::StackVisitor::StackWalkKind kWalkKind =
-      art::StackVisitor::StackWalkKind::kIncludeInlinedFrames;
-  if (tls_data != nullptr &&
-      tls_data->disable_pop_frame_depth !=
-          JvmtiGlobalTLSData::kNoDisallowedPopFrame &&
-      tls_data->disable_pop_frame_depth ==
-          art::StackVisitor::ComputeNumFrames(target, kWalkKind)) {
-    JVMTI_LOG(WARNING, env)
-        << "Disallowing frame pop due to in-progress class-load/prepare. "
-        << "Frame at depth " << tls_data->disable_pop_frame_depth << " was "
-        << "marked as un-poppable by the jvmti plugin. See b/117615146 for "
-        << "more information.";
-    art::Locks::thread_list_lock_->ExclusiveUnlock(self);
-    return ERR(OPAQUE_FRAME);
-  }
-  // We hold the user_code_suspension_lock_ so the target thread is staying
-  // suspended until we are done.
-  std::unique_ptr<art::Context> context(art::Context::Create());
-  FindFrameAtDepthVisitor final_frame(target, context.get(), 0);
-  FindFrameAtDepthVisitor penultimate_frame(target, context.get(), 1);
-  final_frame.WalkStack();
-  penultimate_frame.WalkStack();
-
-  if (!final_frame.FoundFrame() || !penultimate_frame.FoundFrame()) {
-    // Cannot do it if there is only one frame!
-    art::Locks::thread_list_lock_->ExclusiveUnlock(self);
-    return ERR(NO_MORE_FRAMES);
-  }
-
-  art::ArtMethod* called_method = final_frame.GetMethod();
-  art::ArtMethod* calling_method = penultimate_frame.GetMethod();
-  if (calling_method->IsNative() || called_method->IsNative()) {
-    art::Locks::thread_list_lock_->ExclusiveUnlock(self);
-    return ERR(OPAQUE_FRAME);
-  }
-  // From here we are sure to succeed.
-
-  // Get/create a shadow frame
-  bool created_final_frame = false;
-  bool created_penultimate_frame = false;
-  art::ShadowFrame* called_shadow_frame =
-      final_frame.GetOrCreateShadowFrame(&created_final_frame);
-  art::ShadowFrame* calling_shadow_frame =
-      penultimate_frame.GetOrCreateShadowFrame(&created_penultimate_frame);
-
-  CHECK_NE(called_shadow_frame, calling_shadow_frame)
-      << "Frames at different depths not different!";
-
   // Tell the shadow-frame to return immediately and skip all exit events.
-  called_shadow_frame->SetForcePopFrame(true);
-  calling_shadow_frame->SetForceRetryInstruction(true);
-
-  // Make sure can we will go to the interpreter and use the shadow frames. The
-  // early return for the final frame will force everything to the interpreter
-  // so we only need to instrument if it was not present.
-  if (created_final_frame) {
-    art::FunctionClosure fc([](art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+  frames.penultimate_frame_->SetForceRetryInstruction(true);
+  frames.final_frame_->SetForcePopFrame(true);
+  frames.final_frame_->SetSkipMethodExitEvents(true);
+  if (frames.created_final_frame_ || frames.created_penultimate_frame_) {
+    art::FunctionClosure fc([](art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_){
       DeoptManager::Get()->DeoptimizeThread(self);
     });
-    target->RequestSynchronousCheckpoint(&fc);
+    frames.target_->RequestSynchronousCheckpoint(&fc);
   } else {
     art::Locks::thread_list_lock_->ExclusiveUnlock(self);
   }
   return OK;
 }
 
+template <typename T>
+jvmtiError
+StackUtil::ForceEarlyReturn(jvmtiEnv* env, EventHandler* event_handler, jthread thread, T value) {
+  art::Thread* self = art::Thread::Current();
+  // We don't want to use the null == current-thread idiom since for events (that we use internally
+  // to implement force-early-return) we instead have null == all threads. Instead just get the
+  // current jthread if needed.
+  ScopedLocalRef<jthread> cur_thread(self->GetJniEnv(), nullptr);
+  if (UNLIKELY(thread == nullptr)) {
+    art::ScopedObjectAccess soa(self);
+    cur_thread.reset(soa.AddLocalReference<jthread>(self->GetPeer()));
+    thread = cur_thread.get();
+  }
+  // This sets up the exit events we implement early return using before we have the locks and
+  // thanks to destructor ordering will tear them down if something goes wrong.
+  SetupMethodExitEvents smee(self, event_handler, thread);
+  NonStandardExitFrames<NonStandardExitType::kForceReturn> frames(self, env, thread);
+  if (frames.result_ != OK) {
+    smee.NotifyFailure();
+    art::Locks::thread_list_lock_->ExclusiveUnlock(self);
+    return frames.result_;
+  } else if (!ValidReturnType<T>(
+                 self, frames.final_frame_->GetMethod()->ResolveReturnType(), value)) {
+    smee.NotifyFailure();
+    art::Locks::thread_list_lock_->ExclusiveUnlock(self);
+    return ERR(TYPE_MISMATCH);
+  } else if (frames.final_frame_->GetForcePopFrame()) {
+    // TODO We should really support this.
+    smee.NotifyFailure();
+    std::string thread_name;
+    frames.target_->GetThreadName(thread_name);
+    JVMTI_LOG(WARNING, env) << "PopFrame or force-return already pending on thread " << thread_name;
+    art::Locks::thread_list_lock_->ExclusiveUnlock(self);
+    return ERR(OPAQUE_FRAME);
+  }
+  // Tell the shadow-frame to return immediately and skip all exit events.
+  frames.final_frame_->SetForcePopFrame(true);
+  AddDelayedMethodExitEvent<T>(event_handler, frames.final_frame_, value);
+  if (frames.created_final_frame_ || frames.created_penultimate_frame_) {
+    art::FunctionClosure fc([](art::Thread* self) REQUIRES_SHARED(art::Locks::mutator_lock_){
+      DeoptManager::Get()->DeoptimizeThread(self);
+    });
+    frames.target_->RequestSynchronousCheckpoint(&fc);
+  } else {
+    art::Locks::thread_list_lock_->ExclusiveUnlock(self);
+  }
+  return OK;
+}
+
+// Instantiate the ForceEarlyReturn templates.
+template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jint);
+template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jlong);
+template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jfloat);
+template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jdouble);
+template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, jobject);
+template jvmtiError StackUtil::ForceEarlyReturn(jvmtiEnv*, EventHandler*, jthread, nullptr_t);
+
 }  // namespace openjdkjvmti
diff --git a/openjdkjvmti/ti_stack.h b/openjdkjvmti/ti_stack.h
index 55c4269..918aa4c 100644
--- a/openjdkjvmti/ti_stack.h
+++ b/openjdkjvmti/ti_stack.h
@@ -37,6 +37,7 @@
 
 #include "art_method.h"
 #include "base/mutex.h"
+#include "events.h"
 #include "stack.h"
 
 namespace openjdkjvmti {
@@ -83,6 +84,10 @@
   static jvmtiError NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth);
 
   static jvmtiError PopFrame(jvmtiEnv* env, jthread thread);
+
+  template <typename T>
+  static jvmtiError ForceEarlyReturn(
+      jvmtiEnv* env, EventHandler* event_handler, jthread thread, T value);
 };
 
 struct FindFrameAtDepthVisitor : art::StackVisitor {
diff --git a/openjdkjvmti/ti_thread.cc b/openjdkjvmti/ti_thread.cc
index 6c50a20..1a5b227f 100644
--- a/openjdkjvmti/ti_thread.cc
+++ b/openjdkjvmti/ti_thread.cc
@@ -122,12 +122,14 @@
     }
     if (!started) {
       // Runtime isn't started. We only expect at most the signal handler or JIT threads to be
-      // started here.
+      // started here; this includes the perfetto_hprof_listener signal handler thread for
+      // perfetto_hprof.
       if (art::kIsDebugBuild) {
         std::string name;
         self->GetThreadName(name);
         if (name != "JDWP" &&
             name != "Signal Catcher" &&
+            name != "perfetto_hprof_listener" &&
             !android::base::StartsWith(name, "Jit thread pool") &&
             !android::base::StartsWith(name, "Runtime worker thread")) {
           LOG(FATAL) << "Unexpected thread before start: " << name << " id: "
@@ -229,6 +231,7 @@
                                  const art::ScopedObjectAccessAlreadyRunnable& soa,
                                  /*out*/ art::Thread** thr,
                                  /*out*/ jvmtiError* err) {
+  art::ScopedExceptionStorage sse(soa.Self());
   if (thread == nullptr) {
     *thr = art::Thread::Current();
     return true;
diff --git a/openjdkjvmti/ti_thread.h b/openjdkjvmti/ti_thread.h
index c5443bf..5bf8a3f 100644
--- a/openjdkjvmti/ti_thread.h
+++ b/openjdkjvmti/ti_thread.h
@@ -39,6 +39,7 @@
 
 #include "base/macros.h"
 #include "base/mutex.h"
+#include "handle.h"
 #include "thread.h"
 
 namespace art {
@@ -46,6 +47,9 @@
 class ScopedObjectAccessAlreadyRunnable;
 class Thread;
 class Closure;
+namespace mirror {
+class Throwable;
+}  // namespace mirror
 }  // namespace art
 
 namespace openjdkjvmti {
diff --git a/openjdkjvmti/ti_threadgroup.cc b/openjdkjvmti/ti_threadgroup.cc
index e17e61f..bc912cf 100644
--- a/openjdkjvmti/ti_threadgroup.cc
+++ b/openjdkjvmti/ti_threadgroup.cc
@@ -99,7 +99,10 @@
     return ERR(INVALID_THREAD_GROUP);
   }
 
-  art::ObjPtr<art::mirror::Object> obj = soa.Decode<art::mirror::Object>(group);
+  art::StackHandleScope<2> hs(soa.Self());
+  art::Handle<art::mirror::Class> tg_class(
+      hs.NewHandle(soa.Decode<art::mirror::Class>(art::WellKnownClasses::java_lang_ThreadGroup)));
+  art::Handle<art::mirror::Object> obj(hs.NewHandle(soa.Decode<art::mirror::Object>(group)));
 
   // Do the name first. It's the only thing that can fail.
   {
@@ -107,7 +110,7 @@
         art::jni::DecodeArtField(art::WellKnownClasses::java_lang_ThreadGroup_name);
     CHECK(name_field != nullptr);
     art::ObjPtr<art::mirror::String> name_obj =
-        art::ObjPtr<art::mirror::String>::DownCast(name_field->GetObject(obj));
+        art::ObjPtr<art::mirror::String>::DownCast(name_field->GetObject(obj.Get()));
     std::string tmp_str;
     const char* tmp_cstr;
     if (name_obj == nullptr) {
@@ -129,7 +132,7 @@
     art::ArtField* parent_field =
         art::jni::DecodeArtField(art::WellKnownClasses::java_lang_ThreadGroup_parent);
     CHECK(parent_field != nullptr);
-    art::ObjPtr<art::mirror::Object> parent_group = parent_field->GetObject(obj);
+    art::ObjPtr<art::mirror::Object> parent_group = parent_field->GetObject(obj.Get());
     info_ptr->parent = parent_group == nullptr
                            ? nullptr
                            : soa.AddLocalReference<jthreadGroup>(parent_group);
@@ -137,16 +140,16 @@
 
   // Max priority.
   {
-    art::ArtField* prio_field = obj->GetClass()->FindDeclaredInstanceField("maxPriority", "I");
+    art::ArtField* prio_field = tg_class->FindDeclaredInstanceField("maxPriority", "I");
     CHECK(prio_field != nullptr);
-    info_ptr->max_priority = static_cast<jint>(prio_field->GetInt(obj));
+    info_ptr->max_priority = static_cast<jint>(prio_field->GetInt(obj.Get()));
   }
 
   // Daemon.
   {
-    art::ArtField* daemon_field = obj->GetClass()->FindDeclaredInstanceField("daemon", "Z");
+    art::ArtField* daemon_field = tg_class->FindDeclaredInstanceField("daemon", "Z");
     CHECK(daemon_field != nullptr);
-    info_ptr->is_daemon = daemon_field->GetBoolean(obj) == 0 ? JNI_FALSE : JNI_TRUE;
+    info_ptr->is_daemon = daemon_field->GetBoolean(obj.Get()) == 0 ? JNI_FALSE : JNI_TRUE;
   }
 
   return ERR(NONE);
@@ -204,8 +207,7 @@
       groups_array->AsObjectArray<art::mirror::Object>();
 
   // Copy all non-null elements.
-  for (int32_t i = 0; i < groups_array_as_array->GetLength(); ++i) {
-    art::ObjPtr<art::mirror::Object> entry = groups_array_as_array->Get(i);
+  for (auto entry : groups_array_as_array->Iterate()) {
     if (entry != nullptr) {
       thread_groups->push_back(entry);
     }
diff --git a/openjdkjvmti/transform.cc b/openjdkjvmti/transform.cc
index 27f04b7..715a98c 100644
--- a/openjdkjvmti/transform.cc
+++ b/openjdkjvmti/transform.cc
@@ -29,6 +29,7 @@
  * questions.
  */
 
+#include <error.h>
 #include <stddef.h>
 #include <sys/types.h>
 
@@ -47,6 +48,7 @@
 #include "dex/dex_file_types.h"
 #include "dex/utf.h"
 #include "events-inl.h"
+#include "events.h"
 #include "fault_handler.h"
 #include "gc_root-inl.h"
 #include "handle_scope-inl.h"
@@ -64,6 +66,7 @@
 #include "stack.h"
 #include "thread_list.h"
 #include "ti_redefine.h"
+#include "ti_logging.h"
 #include "transform.h"
 #include "utils/dex_cache_arrays_layout-inl.h"
 
@@ -89,15 +92,18 @@
   bool Action(int sig, siginfo_t* siginfo, void* context ATTRIBUTE_UNUSED) override {
     DCHECK_EQ(sig, SIGSEGV);
     art::Thread* self = art::Thread::Current();
-    if (UNLIKELY(uninitialized_class_definitions_lock_.IsExclusiveHeld(self))) {
-      if (self != nullptr) {
-        LOG(FATAL) << "Recursive call into Transformation fault handler!";
-        UNREACHABLE();
-      } else {
-        LOG(ERROR) << "Possible deadlock due to recursive signal delivery of segv.";
-      }
-    }
     uintptr_t ptr = reinterpret_cast<uintptr_t>(siginfo->si_addr);
+    if (UNLIKELY(uninitialized_class_definitions_lock_.IsExclusiveHeld(self))) {
+      // It's possible this is just some other unrelated segv that should be
+      // handled separately, continue to later handlers. This is likely due to
+      // running out of memory somewhere along the FixedUpDexFile pipeline and
+      // is likely unrecoverable. By returning false here though we will get a
+      // better, more accurate, stack-trace later that points to the actual
+      // issue.
+      LOG(WARNING) << "Recursive SEGV occurred during Transformation dequickening at 0x" << std::hex
+                   << ptr;
+      return false;
+    }
     ArtClassDefinition* res = nullptr;
 
     {
@@ -212,13 +218,16 @@
 };
 
 static TransformationFaultHandler* gTransformFaultHandler = nullptr;
+static EventHandler* gEventHandler = nullptr;
 
-void Transformer::Setup() {
+
+void Transformer::Register(EventHandler* eh) {
   // Although we create this the fault handler is actually owned by the 'art::fault_manager' which
   // will take care of destroying it.
   if (art::MemMap::kCanReplaceMapping && ArtClassDefinition::kEnableOnDemandDexDequicken) {
     gTransformFaultHandler = new TransformationFaultHandler(&art::fault_manager);
   }
+  gEventHandler = eh;
 }
 
 // Simple helper to add and remove the class definition from the fault handler.
@@ -249,13 +258,17 @@
 template
 void Transformer::TransformSingleClassDirect<ArtJvmtiEvent::kClassFileLoadHookRetransformable>(
     EventHandler* event_handler, art::Thread* self, /*in-out*/ArtClassDefinition* def);
+template
+void Transformer::TransformSingleClassDirect<ArtJvmtiEvent::kStructuralDexFileLoadHook>(
+    EventHandler* event_handler, art::Thread* self, /*in-out*/ArtClassDefinition* def);
 
 template<ArtJvmtiEvent kEvent>
 void Transformer::TransformSingleClassDirect(EventHandler* event_handler,
                                              art::Thread* self,
                                              /*in-out*/ArtClassDefinition* def) {
   static_assert(kEvent == ArtJvmtiEvent::kClassFileLoadHookNonRetransformable ||
-                kEvent == ArtJvmtiEvent::kClassFileLoadHookRetransformable,
+                kEvent == ArtJvmtiEvent::kClassFileLoadHookRetransformable ||
+                kEvent == ArtJvmtiEvent::kStructuralDexFileLoadHook,
                 "bad event type");
   // We don't want to do transitions between calling the event and setting the new data so change to
   // native state early. This also avoids any problems that the FaultHandler might have in
@@ -276,61 +289,73 @@
       dex_data.data(),
       /*out*/&new_len,
       /*out*/&new_data);
-  def->SetNewDexData(new_len, new_data);
+  def->SetNewDexData(new_len, new_data, kEvent);
 }
 
-jvmtiError Transformer::RetransformClassesDirect(
-      EventHandler* event_handler,
-      art::Thread* self,
-      /*in-out*/std::vector<ArtClassDefinition>* definitions) {
+template <RedefinitionType kType>
+void Transformer::RetransformClassesDirect(
+    art::Thread* self,
+    /*in-out*/ std::vector<ArtClassDefinition>* definitions) {
+  constexpr ArtJvmtiEvent kEvent = kType == RedefinitionType::kNormal
+                                       ? ArtJvmtiEvent::kClassFileLoadHookRetransformable
+                                       : ArtJvmtiEvent::kStructuralDexFileLoadHook;
   for (ArtClassDefinition& def : *definitions) {
-    TransformSingleClassDirect<ArtJvmtiEvent::kClassFileLoadHookRetransformable>(event_handler,
-                                                                                 self,
-                                                                                 &def);
+    TransformSingleClassDirect<kEvent>(gEventHandler, self, &def);
   }
-  return OK;
 }
 
-jvmtiError Transformer::RetransformClasses(ArtJvmTiEnv* env,
-                                           EventHandler* event_handler,
-                                           art::Runtime* runtime,
-                                           art::Thread* self,
+template void Transformer::RetransformClassesDirect<RedefinitionType::kNormal>(
+      art::Thread* self, /*in-out*/std::vector<ArtClassDefinition>* definitions);
+template void Transformer::RetransformClassesDirect<RedefinitionType::kStructural>(
+      art::Thread* self, /*in-out*/std::vector<ArtClassDefinition>* definitions);
+
+jvmtiError Transformer::RetransformClasses(jvmtiEnv* env,
                                            jint class_count,
-                                           const jclass* classes,
-                                           /*out*/std::string* error_msg) {
-  if (env == nullptr) {
-    *error_msg = "env was null!";
-    return ERR(INVALID_ENVIRONMENT);
-  } else if (class_count < 0) {
-    *error_msg = "class_count was less then 0";
+                                           const jclass* classes) {
+  if (class_count < 0) {
+    JVMTI_LOG(WARNING, env) << "FAILURE TO RETRANSFORM class_count was less then 0";
     return ERR(ILLEGAL_ARGUMENT);
   } else if (class_count == 0) {
     // We don't actually need to do anything. Just return OK.
     return OK;
   } else if (classes == nullptr) {
-    *error_msg = "null classes!";
+    JVMTI_LOG(WARNING, env) << "FAILURE TO RETRANSFORM null classes!";
     return ERR(NULL_POINTER);
   }
+  art::Thread* self = art::Thread::Current();
+  art::Runtime* runtime = art::Runtime::Current();
   // A holder that will Deallocate all the class bytes buffers on destruction.
+  std::string error_msg;
   std::vector<ArtClassDefinition> definitions;
   jvmtiError res = OK;
   for (jint i = 0; i < class_count; i++) {
-    res = Redefiner::GetClassRedefinitionError(classes[i], error_msg);
+    res = Redefiner::GetClassRedefinitionError<RedefinitionType::kNormal>(classes[i], &error_msg);
     if (res != OK) {
+      JVMTI_LOG(WARNING, env) << "FAILURE TO RETRANSFORM " << error_msg;
       return res;
     }
     ArtClassDefinition def;
     res = def.Init(self, classes[i]);
     if (res != OK) {
+      JVMTI_LOG(WARNING, env) << "FAILURE TO RETRANSFORM definition init failed";
       return res;
     }
     definitions.push_back(std::move(def));
   }
-  res = RetransformClassesDirect(event_handler, self, &definitions);
+  RetransformClassesDirect<RedefinitionType::kStructural>(self, &definitions);
+  RetransformClassesDirect<RedefinitionType::kNormal>(self, &definitions);
+  RedefinitionType redef_type =
+      std::any_of(definitions.cbegin(),
+                  definitions.cend(),
+                  [](const auto& it) { return it.HasStructuralChanges(); })
+          ? RedefinitionType::kStructural
+          : RedefinitionType::kNormal;
+  res = Redefiner::RedefineClassesDirect(
+      ArtJvmTiEnv::AsArtJvmTiEnv(env), runtime, self, definitions, redef_type, &error_msg);
   if (res != OK) {
-    return res;
+    JVMTI_LOG(WARNING, env) << "FAILURE TO RETRANSFORM " << error_msg;
   }
-  return Redefiner::RedefineClassesDirect(env, runtime, self, definitions, error_msg);
+  return res;
 }
 
 // TODO Move this somewhere else, ti_class?
diff --git a/openjdkjvmti/transform.h b/openjdkjvmti/transform.h
index 8bbeda4..a58b50e 100644
--- a/openjdkjvmti/transform.h
+++ b/openjdkjvmti/transform.h
@@ -39,6 +39,7 @@
 
 #include "art_jvmti.h"
 #include "ti_class_definition.h"
+#include "ti_redefine.h"
 
 namespace openjdkjvmti {
 
@@ -48,7 +49,7 @@
 
 class Transformer {
  public:
-  static void Setup();
+  static void Register(EventHandler* eh);
 
   template<ArtJvmtiEvent kEvent>
   static void TransformSingleClassDirect(
@@ -56,18 +57,14 @@
       art::Thread* self,
       /*in-out*/ArtClassDefinition* def);
 
-  static jvmtiError RetransformClassesDirect(
-      EventHandler* event_handler,
+  template<RedefinitionType kType>
+  static void RetransformClassesDirect(
       art::Thread* self,
       /*in-out*/std::vector<ArtClassDefinition>* definitions);
 
-  static jvmtiError RetransformClasses(ArtJvmTiEnv* env,
-                                       EventHandler* event_handler,
-                                       art::Runtime* runtime,
-                                       art::Thread* self,
+  static jvmtiError RetransformClasses(jvmtiEnv* env,
                                        jint class_count,
-                                       const jclass* classes,
-                                       /*out*/std::string* error_msg);
+                                       const jclass* classes);
 };
 
 }  // namespace openjdkjvmti
diff --git a/perfetto_hprof/Android.bp b/perfetto_hprof/Android.bp
new file mode 100644
index 0000000..65c072e
--- /dev/null
+++ b/perfetto_hprof/Android.bp
@@ -0,0 +1,99 @@
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Build variants {target} x {debug,ndebug} x {32,64}
+
+// This depends on the Perfetto client API. This uses the ProducerPort to
+// communicate to the system trace. This is an API whose ABI is maintained
+// to be backwards compatible, see
+// https://android.googlesource.com/platform/external/perfetto/+/refs/heads/master/protos/perfetto/ipc/producer_port.proto.
+
+gensrcs {
+    name: "art_perfetto_hprof_operator_srcs",
+    cmd: "$(location generate_operator_out) art/perfetto_hprof $(in) > $(out)",
+    tools: ["generate_operator_out"],
+    srcs: [
+        "perfetto_hprof.h",
+    ],
+    output_extension: "operator_out.cc",
+}
+
+cc_defaults {
+    name: "perfetto_hprof-defaults",
+    host_supported: false,
+    srcs: ["perfetto_hprof.cc"],
+    defaults: ["art_defaults"],
+    include_dirs: [
+        "external/perfetto/include",
+        "external/perfetto/protos",
+    ],
+
+    // Note that this tool needs to be built for both 32-bit and 64-bit since it requires
+    // to be same ISA as what it is attached to.
+    compile_multilib: "both",
+
+    shared_libs: [
+        "libbase",
+        "liblog",
+        "libdexfile",
+    ],
+    static_libs: [
+        "libperfetto_client_experimental",
+        "perfetto_trace_protos",
+    ],
+    generated_sources: [
+        "perfetto_protos_perfetto_config_profiling_zero_gen",
+        "art_perfetto_hprof_operator_srcs",
+    ],
+    generated_headers: [
+        "perfetto_protos_perfetto_config_profiling_zero_gen_headers",
+    ],
+    target: {
+        darwin: {
+            enabled: false,
+        },
+    },
+    header_libs: [
+        "libnativehelper_header_only",
+    ],
+}
+
+art_cc_library {
+    name: "libperfetto_hprof",
+    defaults: ["perfetto_hprof-defaults"],
+    shared_libs: [
+        "libart",
+        "libartbase",
+    ],
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
+}
+
+art_cc_library {
+    name: "libperfetto_hprofd",
+    defaults: [
+        "art_debug_defaults",
+        "perfetto_hprof-defaults",
+    ],
+    shared_libs: [
+        "libartd",
+        "libartbased",
+    ],
+    apex_available: [
+        "com.android.art.debug",
+    ],
+}
diff --git a/perfetto_hprof/perfetto_hprof.cc b/perfetto_hprof/perfetto_hprof.cc
new file mode 100644
index 0000000..3f5d06d
--- /dev/null
+++ b/perfetto_hprof/perfetto_hprof.cc
@@ -0,0 +1,784 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "perfetto_hprof"
+
+#include "perfetto_hprof.h"
+
+#include <android-base/logging.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <sched.h>
+#include <signal.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <sys/wait.h>
+#include <thread>
+#include <time.h>
+
+#include <type_traits>
+
+#include "gc/heap-visit-objects-inl.h"
+#include "gc/heap.h"
+#include "gc/scoped_gc_critical_section.h"
+#include "mirror/object-refvisitor-inl.h"
+#include "nativehelper/scoped_local_ref.h"
+#include "perfetto/profiling/normalize.h"
+#include "perfetto/profiling/parse_smaps.h"
+#include "perfetto/trace/interned_data/interned_data.pbzero.h"
+#include "perfetto/trace/profiling/heap_graph.pbzero.h"
+#include "perfetto/trace/profiling/profile_common.pbzero.h"
+#include "perfetto/trace/profiling/smaps.pbzero.h"
+#include "perfetto/config/profiling/java_hprof_config.pbzero.h"
+#include "perfetto/protozero/packed_repeated_fields.h"
+#include "perfetto/tracing.h"
+#include "runtime-inl.h"
+#include "runtime_callbacks.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread_list.h"
+#include "well_known_classes.h"
+#include "dex/descriptors_names.h"
+
+// There are three threads involved in this:
+// * listener thread: this is idle in the background when this plugin gets loaded, and waits
+//   for data on on g_signal_pipe_fds.
+// * signal thread: an arbitrary thread that handles the signal and writes data to
+//   g_signal_pipe_fds.
+// * perfetto producer thread: once the signal is received, the app forks. In the newly forked
+//   child, the Perfetto Client API spawns a thread to communicate with traced.
+
+namespace perfetto_hprof {
+
+constexpr int kJavaHeapprofdSignal = __SIGRTMIN + 6;
+constexpr time_t kWatchdogTimeoutSec = 120;
+// This needs to be lower than the maximum acceptable chunk size, because this
+// is checked *before* writing another submessage. We conservatively assume
+// submessages can be up to 100k here for a 500k chunk size.
+// DropBox has a 500k chunk limit, and each chunk needs to parse as a proto.
+constexpr uint32_t kPacketSizeThreshold = 400000;
+constexpr char kByte[1] = {'x'};
+static art::Mutex& GetStateMutex() {
+  static art::Mutex state_mutex("perfetto_hprof_state_mutex", art::LockLevel::kGenericBottomLock);
+  return state_mutex;
+}
+
+static art::ConditionVariable& GetStateCV() {
+  static art::ConditionVariable state_cv("perfetto_hprof_state_cv", GetStateMutex());
+  return state_cv;
+}
+
+static State g_state = State::kUninitialized;
+
+// Pipe to signal from the signal handler into a worker thread that handles the
+// dump requests.
+int g_signal_pipe_fds[2];
+static struct sigaction g_orig_act = {};
+
+template <typename T>
+uint64_t FindOrAppend(std::map<T, uint64_t>* m, const T& s) {
+  auto it = m->find(s);
+  if (it == m->end()) {
+    std::tie(it, std::ignore) = m->emplace(s, m->size());
+  }
+  return it->second;
+}
+
+void ArmWatchdogOrDie() {
+  timer_t timerid{};
+  struct sigevent sev {};
+  sev.sigev_notify = SIGEV_SIGNAL;
+  sev.sigev_signo = SIGKILL;
+
+  if (timer_create(CLOCK_MONOTONIC, &sev, &timerid) == -1) {
+    // This only gets called in the child, so we can fatal without impacting
+    // the app.
+    PLOG(FATAL) << "failed to create watchdog timer";
+  }
+
+  struct itimerspec its {};
+  its.it_value.tv_sec = kWatchdogTimeoutSec;
+
+  if (timer_settime(timerid, 0, &its, nullptr) == -1) {
+    // This only gets called in the child, so we can fatal without impacting
+    // the app.
+    PLOG(FATAL) << "failed to arm watchdog timer";
+  }
+}
+
+bool StartsWith(const std::string& str, const std::string& prefix) {
+  return str.compare(0, prefix.length(), prefix) == 0;
+}
+
+// Sample entries that match one of the following
+// start with /system/
+// start with /vendor/
+// start with /data/app/
+// contains "extracted in memory from Y", where Y matches any of the above
+bool ShouldSampleSmapsEntry(const perfetto::profiling::SmapsEntry& e) {
+  if (StartsWith(e.pathname, "/system/") || StartsWith(e.pathname, "/vendor/") ||
+      StartsWith(e.pathname, "/data/app/")) {
+    return true;
+  }
+  if (StartsWith(e.pathname, "[anon:")) {
+    if (e.pathname.find("extracted in memory from /system/") != std::string::npos) {
+      return true;
+    }
+    if (e.pathname.find("extracted in memory from /vendor/") != std::string::npos) {
+      return true;
+    }
+    if (e.pathname.find("extracted in memory from /data/app/") != std::string::npos) {
+      return true;
+    }
+  }
+  return false;
+}
+
+bool CanConnectToSocket(const char* name) {
+  struct sockaddr_un addr = {};
+  addr.sun_family = AF_UNIX;
+  strncpy(addr.sun_path, name, sizeof(addr.sun_path) - 1);
+  int fd = socket(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0);
+  if (fd == -1) {
+    PLOG(ERROR) << "failed to create socket";
+    return false;
+  }
+  bool connected = connect(fd, reinterpret_cast<struct sockaddr*>(&addr), sizeof(addr)) == 0;
+  close(fd);
+  return connected;
+}
+
+constexpr size_t kMaxCmdlineSize = 512;
+
+class JavaHprofDataSource : public perfetto::DataSource<JavaHprofDataSource> {
+ public:
+  constexpr static perfetto::BufferExhaustedPolicy kBufferExhaustedPolicy =
+    perfetto::BufferExhaustedPolicy::kStall;
+  void OnSetup(const SetupArgs& args) override {
+    // This is on the heap as it triggers -Wframe-larger-than.
+    std::unique_ptr<perfetto::protos::pbzero::JavaHprofConfig::Decoder> cfg(
+        new perfetto::protos::pbzero::JavaHprofConfig::Decoder(
+          args.config->java_hprof_config_raw()));
+
+    if (args.config->enable_extra_guardrails() && !CanConnectToSocket("/dev/socket/heapprofd")) {
+      LOG(ERROR) << "rejecting extra guardrails";
+      enabled_ = false;
+      return;
+    }
+
+    dump_smaps_ = cfg->dump_smaps();
+
+    uint64_t self_pid = static_cast<uint64_t>(getpid());
+    for (auto pid_it = cfg->pid(); pid_it; ++pid_it) {
+      if (*pid_it == self_pid) {
+        enabled_ = true;
+        return;
+      }
+    }
+
+    if (cfg->has_process_cmdline()) {
+      int fd = open("/proc/self/cmdline", O_RDONLY | O_CLOEXEC);
+      if (fd == -1) {
+        PLOG(ERROR) << "failed to open /proc/self/cmdline";
+        return;
+      }
+      char cmdline[kMaxCmdlineSize];
+      ssize_t rd = read(fd, cmdline, sizeof(cmdline) - 1);
+      if (rd == -1) {
+        PLOG(ERROR) << "failed to read /proc/self/cmdline";
+      }
+      close(fd);
+      if (rd == -1) {
+        return;
+      }
+      cmdline[rd] = '\0';
+      char* cmdline_ptr = cmdline;
+      ssize_t sz = perfetto::profiling::NormalizeCmdLine(&cmdline_ptr, static_cast<size_t>(rd + 1));
+      if (sz == -1) {
+        PLOG(ERROR) << "failed to normalize cmdline";
+      }
+      for (auto it = cfg->process_cmdline(); it; ++it) {
+        std::string other = (*it).ToStdString();
+        // Append \0 to make this a C string.
+        other.resize(other.size() + 1);
+        char* other_ptr = &(other[0]);
+        ssize_t other_sz = perfetto::profiling::NormalizeCmdLine(&other_ptr, other.size());
+        if (other_sz == -1) {
+          PLOG(ERROR) << "failed to normalize other cmdline";
+          continue;
+        }
+        if (sz == other_sz && strncmp(cmdline_ptr, other_ptr, static_cast<size_t>(sz)) == 0) {
+          enabled_ = true;
+          return;
+        }
+      }
+    }
+  }
+
+  bool dump_smaps() { return dump_smaps_; }
+  bool enabled() { return enabled_; }
+
+  void OnStart(const StartArgs&) override {
+    if (!enabled()) {
+      return;
+    }
+    art::MutexLock lk(art_thread(), GetStateMutex());
+    if (g_state == State::kWaitForStart) {
+      g_state = State::kStart;
+      GetStateCV().Broadcast(art_thread());
+    }
+  }
+
+  void OnStop(const StopArgs&) override {}
+
+  static art::Thread* art_thread() {
+    // TODO(fmayer): Attach the Perfetto producer thread to ART and give it a name. This is
+    // not trivial, we cannot just attach the first time this method is called, because
+    // AttachCurrentThread deadlocks with the ConditionVariable::Wait in WaitForDataSource.
+    //
+    // We should attach the thread as soon as the Client API spawns it, but that needs more
+    // complicated plumbing.
+    return nullptr;
+  }
+
+ private:
+  bool enabled_ = false;
+  bool dump_smaps_ = false;
+  static art::Thread* self_;
+};
+
+art::Thread* JavaHprofDataSource::self_ = nullptr;
+
+
+void WaitForDataSource(art::Thread* self) {
+  perfetto::TracingInitArgs args;
+  args.backends = perfetto::BackendType::kSystemBackend;
+  perfetto::Tracing::Initialize(args);
+
+  perfetto::DataSourceDescriptor dsd;
+  dsd.set_name("android.java_hprof");
+  JavaHprofDataSource::Register(dsd);
+
+  LOG(INFO) << "waiting for data source";
+
+  art::MutexLock lk(self, GetStateMutex());
+  while (g_state != State::kStart) {
+    GetStateCV().Wait(self);
+  }
+}
+
+class Writer {
+ public:
+  Writer(pid_t parent_pid, JavaHprofDataSource::TraceContext* ctx, uint64_t timestamp)
+      : parent_pid_(parent_pid), ctx_(ctx), timestamp_(timestamp),
+        last_written_(ctx_->written()) {}
+
+  // Return whether the next call to GetHeapGraph will create a new TracePacket.
+  bool will_create_new_packet() {
+    return !heap_graph_ || ctx_->written() - last_written_ > kPacketSizeThreshold;
+  }
+
+  perfetto::protos::pbzero::HeapGraph* GetHeapGraph() {
+    if (will_create_new_packet()) {
+      CreateNewHeapGraph();
+    }
+    return heap_graph_;
+  }
+
+  void CreateNewHeapGraph() {
+    if (heap_graph_) {
+      heap_graph_->set_continued(true);
+    }
+    Finalize();
+
+    uint64_t written = ctx_->written();
+
+    trace_packet_ = ctx_->NewTracePacket();
+    trace_packet_->set_timestamp(timestamp_);
+    heap_graph_ = trace_packet_->set_heap_graph();
+    heap_graph_->set_pid(parent_pid_);
+    heap_graph_->set_index(index_++);
+
+    last_written_ = written;
+  }
+
+  void Finalize() {
+    if (trace_packet_) {
+      trace_packet_->Finalize();
+    }
+    heap_graph_ = nullptr;
+  }
+
+  ~Writer() { Finalize(); }
+
+ private:
+  const pid_t parent_pid_;
+  JavaHprofDataSource::TraceContext* const ctx_;
+  const uint64_t timestamp_;
+
+  uint64_t last_written_ = 0;
+
+  perfetto::DataSource<JavaHprofDataSource>::TraceContext::TracePacketHandle
+      trace_packet_;
+  perfetto::protos::pbzero::HeapGraph* heap_graph_ = nullptr;
+
+  uint64_t index_ = 0;
+};
+
+class ReferredObjectsFinder {
+ public:
+  explicit ReferredObjectsFinder(
+      std::vector<std::pair<std::string, art::mirror::Object*>>* referred_objects)
+      : referred_objects_(referred_objects) {}
+
+  // For art::mirror::Object::VisitReferences.
+  void operator()(art::ObjPtr<art::mirror::Object> obj, art::MemberOffset offset,
+                  bool is_static) const
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    art::mirror::Object* ref = obj->GetFieldObject<art::mirror::Object>(offset);
+    art::ArtField* field;
+    if (is_static) {
+      field = art::ArtField::FindStaticFieldWithOffset(obj->AsClass(), offset.Uint32Value());
+    } else {
+      field = art::ArtField::FindInstanceFieldWithOffset(obj->GetClass(), offset.Uint32Value());
+    }
+    std::string field_name = "";
+    if (field != nullptr) {
+      field_name = field->PrettyField(/*with_type=*/true);
+    }
+    referred_objects_->emplace_back(std::move(field_name), ref);
+  }
+
+  void VisitRootIfNonNull(art::mirror::CompressedReference<art::mirror::Object>* root
+                              ATTRIBUTE_UNUSED) const {}
+  void VisitRoot(art::mirror::CompressedReference<art::mirror::Object>* root
+                     ATTRIBUTE_UNUSED) const {}
+
+ private:
+  // We can use a raw Object* pointer here, because there are no concurrent GC threads after the
+  // fork.
+  std::vector<std::pair<std::string, art::mirror::Object*>>* referred_objects_;
+};
+
+class RootFinder : public art::SingleRootVisitor {
+ public:
+  explicit RootFinder(
+    std::map<art::RootType, std::vector<art::mirror::Object*>>* root_objects)
+      : root_objects_(root_objects) {}
+
+  void VisitRoot(art::mirror::Object* root, const art::RootInfo& info) override {
+    (*root_objects_)[info.GetType()].emplace_back(root);
+  }
+
+ private:
+  // We can use a raw Object* pointer here, because there are no concurrent GC threads after the
+  // fork.
+  std::map<art::RootType, std::vector<art::mirror::Object*>>* root_objects_;
+};
+
+perfetto::protos::pbzero::HeapGraphRoot::Type ToProtoType(art::RootType art_type) {
+  switch (art_type) {
+    case art::kRootUnknown:
+      return perfetto::protos::pbzero::HeapGraphRoot::ROOT_UNKNOWN;
+    case art::kRootJNIGlobal:
+      return perfetto::protos::pbzero::HeapGraphRoot::ROOT_JNI_GLOBAL;
+    case art::kRootJNILocal:
+      return perfetto::protos::pbzero::HeapGraphRoot::ROOT_JNI_LOCAL;
+    case art::kRootJavaFrame:
+      return perfetto::protos::pbzero::HeapGraphRoot::ROOT_JAVA_FRAME;
+    case art::kRootNativeStack:
+      return perfetto::protos::pbzero::HeapGraphRoot::ROOT_NATIVE_STACK;
+    case art::kRootStickyClass:
+      return perfetto::protos::pbzero::HeapGraphRoot::ROOT_STICKY_CLASS;
+    case art::kRootThreadBlock:
+      return perfetto::protos::pbzero::HeapGraphRoot::ROOT_THREAD_BLOCK;
+    case art::kRootMonitorUsed:
+      return perfetto::protos::pbzero::HeapGraphRoot::ROOT_MONITOR_USED;
+    case art::kRootThreadObject:
+      return perfetto::protos::pbzero::HeapGraphRoot::ROOT_THREAD_OBJECT;
+    case art::kRootInternedString:
+      return perfetto::protos::pbzero::HeapGraphRoot::ROOT_INTERNED_STRING;
+    case art::kRootFinalizing:
+      return perfetto::protos::pbzero::HeapGraphRoot::ROOT_FINALIZING;
+    case art::kRootDebugger:
+      return perfetto::protos::pbzero::HeapGraphRoot::ROOT_DEBUGGER;
+    case art::kRootReferenceCleanup:
+      return perfetto::protos::pbzero::HeapGraphRoot::ROOT_REFERENCE_CLEANUP;
+    case art::kRootVMInternal:
+      return perfetto::protos::pbzero::HeapGraphRoot::ROOT_VM_INTERNAL;
+    case art::kRootJNIMonitor:
+      return perfetto::protos::pbzero::HeapGraphRoot::ROOT_JNI_MONITOR;
+  }
+}
+
+std::string PrettyType(art::mirror::Class* klass) NO_THREAD_SAFETY_ANALYSIS {
+  if (klass == nullptr) {
+    return "(raw)";
+  }
+  std::string temp;
+  std::string result(art::PrettyDescriptor(klass->GetDescriptor(&temp)));
+  return result;
+}
+
+void DumpSmaps(JavaHprofDataSource::TraceContext* ctx) {
+  FILE* smaps = fopen("/proc/self/smaps", "r");
+  if (smaps != nullptr) {
+    auto trace_packet = ctx->NewTracePacket();
+    auto* smaps_packet = trace_packet->set_smaps_packet();
+    smaps_packet->set_pid(getpid());
+    perfetto::profiling::ParseSmaps(smaps,
+        [&smaps_packet](const perfetto::profiling::SmapsEntry& e) {
+      if (ShouldSampleSmapsEntry(e)) {
+        auto* smaps_entry = smaps_packet->add_entries();
+        smaps_entry->set_path(e.pathname);
+        smaps_entry->set_size_kb(e.size_kb);
+        smaps_entry->set_private_dirty_kb(e.private_dirty_kb);
+        smaps_entry->set_swap_kb(e.swap_kb);
+      }
+    });
+    fclose(smaps);
+  } else {
+    PLOG(ERROR) << "failed to open smaps";
+  }
+}
+
+uint64_t GetObjectId(const art::mirror::Object* obj) {
+  return reinterpret_cast<uint64_t>(obj) / std::alignment_of<art::mirror::Object>::value;
+}
+
+void DumpPerfetto(art::Thread* self) {
+  pid_t parent_pid = getpid();
+  LOG(INFO) << "preparing to dump heap for " << parent_pid;
+
+  // Need to take a heap dump while GC isn't running. See the comment in
+  // Heap::VisitObjects(). Also we need the critical section to avoid visiting
+  // the same object twice. See b/34967844.
+  //
+  // We need to do this before the fork, because otherwise it can deadlock
+  // waiting for the GC, as all other threads get terminated by the clone, but
+  // their locks are not released.
+  art::gc::ScopedGCCriticalSection gcs(self, art::gc::kGcCauseHprof,
+                                       art::gc::kCollectorTypeHprof);
+
+  art::ScopedSuspendAll ssa(__FUNCTION__, /* long_suspend=*/ true);
+
+  pid_t pid = fork();
+  if (pid == -1) {
+    // Fork error.
+    PLOG(ERROR) << "fork";
+    return;
+  }
+  if (pid != 0) {
+    // Parent
+    int stat_loc;
+    for (;;) {
+      if (waitpid(pid, &stat_loc, 0) != -1 || errno != EINTR) {
+        break;
+      }
+    }
+    return;
+  }
+
+  // The following code is only executed by the child of the original process.
+  //
+  // Daemon creates a new process that is the grand-child of the original process, and exits.
+  if (daemon(0, 0) == -1) {
+    PLOG(FATAL) << "daemon";
+  }
+
+  // The following code is only executed by the grand-child of the original process.
+
+  // Make sure that this is the first thing we do after forking, so if anything
+  // below hangs, the fork will go away from the watchdog.
+  ArmWatchdogOrDie();
+
+  struct timespec ts = {};
+  if (clock_gettime(CLOCK_BOOTTIME, &ts) != 0) {
+    LOG(FATAL) << "Failed to get boottime.";
+  }
+  uint64_t timestamp = ts.tv_sec * 1000000000LL + ts.tv_nsec;
+
+  WaitForDataSource(self);
+
+  JavaHprofDataSource::Trace(
+      [parent_pid, timestamp](JavaHprofDataSource::TraceContext ctx)
+          NO_THREAD_SAFETY_ANALYSIS {
+            bool dump_smaps;
+            {
+              auto ds = ctx.GetDataSourceLocked();
+              if (!ds || !ds->enabled()) {
+                LOG(INFO) << "skipping irrelevant data source.";
+                return;
+              }
+              dump_smaps = ds->dump_smaps();
+            }
+            LOG(INFO) << "dumping heap for " << parent_pid;
+            if (dump_smaps) {
+              DumpSmaps(&ctx);
+            }
+            Writer writer(parent_pid, &ctx, timestamp);
+            // Make sure that intern ID 0 (default proto value for a uint64_t) always maps to ""
+            // (default proto value for a string).
+            std::map<std::string, uint64_t> interned_fields{{"", 0}};
+            std::map<std::string, uint64_t> interned_locations{{"", 0}};
+            std::map<uintptr_t, uint64_t> interned_classes{{0, 0}};
+
+            std::map<art::RootType, std::vector<art::mirror::Object*>> root_objects;
+            RootFinder rcf(&root_objects);
+            art::Runtime::Current()->VisitRoots(&rcf);
+            std::unique_ptr<protozero::PackedVarInt> object_ids(
+                new protozero::PackedVarInt);
+            for (const auto& p : root_objects) {
+              const art::RootType root_type = p.first;
+              const std::vector<art::mirror::Object*>& children = p.second;
+              perfetto::protos::pbzero::HeapGraphRoot* root_proto =
+                writer.GetHeapGraph()->add_roots();
+              root_proto->set_root_type(ToProtoType(root_type));
+              for (art::mirror::Object* obj : children) {
+                if (writer.will_create_new_packet()) {
+                  root_proto->set_object_ids(*object_ids);
+                  object_ids->Reset();
+                  root_proto = writer.GetHeapGraph()->add_roots();
+                  root_proto->set_root_type(ToProtoType(root_type));
+                }
+                object_ids->Append(GetObjectId(obj));
+              }
+              root_proto->set_object_ids(*object_ids);
+              object_ids->Reset();
+            }
+
+            std::unique_ptr<protozero::PackedVarInt> reference_field_ids(
+                new protozero::PackedVarInt);
+            std::unique_ptr<protozero::PackedVarInt> reference_object_ids(
+                new protozero::PackedVarInt);
+
+            art::Runtime::Current()->GetHeap()->VisitObjectsPaused(
+                [&writer, &interned_fields, &interned_locations,
+                &reference_field_ids, &reference_object_ids, &interned_classes](
+                    art::mirror::Object* obj) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+                  if (obj->IsClass()) {
+                    art::mirror::Class* klass = obj->AsClass().Ptr();
+                    perfetto::protos::pbzero::HeapGraphType* type_proto =
+                      writer.GetHeapGraph()->add_types();
+                    type_proto->set_id(FindOrAppend(&interned_classes,
+                          reinterpret_cast<uintptr_t>(klass)));
+                    type_proto->set_class_name(PrettyType(klass));
+                    type_proto->set_location_id(FindOrAppend(&interned_locations,
+                          klass->GetLocation()));
+                  }
+
+                  art::mirror::Class* klass = obj->GetClass();
+                  uintptr_t class_ptr = reinterpret_cast<uintptr_t>(klass);
+                  // We need to synethesize a new type for Class<Foo>, which does not exist
+                  // in the runtime. Otherwise, all the static members of all classes would be
+                  // attributed to java.lang.Class.
+                  if (klass->IsClassClass()) {
+                    CHECK(obj->IsClass());
+                    perfetto::protos::pbzero::HeapGraphType* type_proto =
+                      writer.GetHeapGraph()->add_types();
+                    // All pointers are at least multiples of two, so this way we can make sure
+                    // we are not colliding with a real class.
+                    class_ptr = reinterpret_cast<uintptr_t>(obj) | 1;
+                    auto class_id = FindOrAppend(&interned_classes, class_ptr);
+                    type_proto->set_id(class_id);
+                    type_proto->set_class_name(obj->PrettyTypeOf());
+                    type_proto->set_location_id(FindOrAppend(&interned_locations,
+                          obj->AsClass()->GetLocation()));
+                  }
+
+                  auto class_id = FindOrAppend(&interned_classes, class_ptr);
+
+                  perfetto::protos::pbzero::HeapGraphObject* object_proto =
+                    writer.GetHeapGraph()->add_objects();
+                  object_proto->set_id(GetObjectId(obj));
+                  object_proto->set_type_id(class_id);
+                  object_proto->set_self_size(obj->SizeOf());
+
+                  std::vector<std::pair<std::string, art::mirror::Object*>>
+                      referred_objects;
+                  ReferredObjectsFinder objf(&referred_objects);
+                  obj->VisitReferences(objf, art::VoidFunctor());
+                  for (const auto& p : referred_objects) {
+                    reference_field_ids->Append(FindOrAppend(&interned_fields, p.first));
+                    reference_object_ids->Append(GetObjectId(p.second));
+                  }
+                  object_proto->set_reference_field_id(*reference_field_ids);
+                  object_proto->set_reference_object_id(*reference_object_ids);
+                  reference_field_ids->Reset();
+                  reference_object_ids->Reset();
+                });
+
+            for (const auto& p : interned_fields) {
+              const std::string& str = p.first;
+              uint64_t id = p.second;
+
+              perfetto::protos::pbzero::InternedString* field_proto =
+                writer.GetHeapGraph()->add_field_names();
+              field_proto->set_iid(id);
+              field_proto->set_str(
+                  reinterpret_cast<const uint8_t*>(str.c_str()), str.size());
+            }
+            for (const auto& p : interned_locations) {
+              const std::string& str = p.first;
+              uint64_t id = p.second;
+
+              perfetto::protos::pbzero::InternedString* location_proto =
+                writer.GetHeapGraph()->add_location_names();
+              location_proto->set_iid(id);
+              location_proto->set_str(reinterpret_cast<const uint8_t*>(str.c_str()),
+                                  str.size());
+            }
+
+            writer.Finalize();
+
+            ctx.Flush([] {
+              {
+                art::MutexLock lk(JavaHprofDataSource::art_thread(), GetStateMutex());
+                g_state = State::kEnd;
+                GetStateCV().Broadcast(JavaHprofDataSource::art_thread());
+              }
+            });
+          });
+
+  art::MutexLock lk(self, GetStateMutex());
+  while (g_state != State::kEnd) {
+    GetStateCV().Wait(self);
+  }
+  LOG(INFO) << "finished dumping heap for " << parent_pid;
+  // Prevent the atexit handlers to run. We do not want to call cleanup
+  // functions the parent process has registered.
+  _exit(0);
+}
+
+// The plugin initialization function.
+extern "C" bool ArtPlugin_Initialize() {
+  if (art::Runtime::Current() == nullptr) {
+    return false;
+  }
+  art::Thread* self = art::Thread::Current();
+  {
+    art::MutexLock lk(self, GetStateMutex());
+    if (g_state != State::kUninitialized) {
+      LOG(ERROR) << "perfetto_hprof already initialized. state: " << g_state;
+      return false;
+    }
+    g_state = State::kWaitForListener;
+  }
+
+  if (pipe2(g_signal_pipe_fds, O_CLOEXEC) == -1) {
+    PLOG(ERROR) << "Failed to pipe";
+    return false;
+  }
+
+  struct sigaction act = {};
+  act.sa_flags = SA_SIGINFO | SA_RESTART;
+  act.sa_sigaction = [](int, siginfo_t*, void*) {
+    if (write(g_signal_pipe_fds[1], kByte, sizeof(kByte)) == -1) {
+      PLOG(ERROR) << "Failed to trigger heap dump";
+    }
+  };
+
+  // TODO(fmayer): We can probably use the SignalCatcher thread here to not
+  // have an idle thread.
+  if (sigaction(kJavaHeapprofdSignal, &act, &g_orig_act) != 0) {
+    close(g_signal_pipe_fds[0]);
+    close(g_signal_pipe_fds[1]);
+    PLOG(ERROR) << "Failed to sigaction";
+    return false;
+  }
+
+  std::thread th([] {
+    art::Runtime* runtime = art::Runtime::Current();
+    if (!runtime) {
+      LOG(FATAL_WITHOUT_ABORT) << "no runtime in perfetto_hprof_listener";
+      return;
+    }
+    if (!runtime->AttachCurrentThread("perfetto_hprof_listener", /*as_daemon=*/ true,
+                                      runtime->GetSystemThreadGroup(), /*create_peer=*/ false)) {
+      LOG(ERROR) << "failed to attach thread.";
+      {
+        art::MutexLock lk(nullptr, GetStateMutex());
+        g_state = State::kUninitialized;
+        GetStateCV().Broadcast(nullptr);
+      }
+
+      return;
+    }
+    art::Thread* self = art::Thread::Current();
+    if (!self) {
+      LOG(FATAL_WITHOUT_ABORT) << "no thread in perfetto_hprof_listener";
+      return;
+    }
+    {
+      art::MutexLock lk(self, GetStateMutex());
+      if (g_state == State::kWaitForListener) {
+        g_state = State::kWaitForStart;
+        GetStateCV().Broadcast(self);
+      }
+    }
+    char buf[1];
+    for (;;) {
+      int res;
+      do {
+        res = read(g_signal_pipe_fds[0], buf, sizeof(buf));
+      } while (res == -1 && errno == EINTR);
+
+      if (res <= 0) {
+        if (res == -1) {
+          PLOG(ERROR) << "failed to read";
+        }
+        close(g_signal_pipe_fds[0]);
+        return;
+      }
+
+      perfetto_hprof::DumpPerfetto(self);
+    }
+  });
+  th.detach();
+
+  return true;
+}
+
+extern "C" bool ArtPlugin_Deinitialize() {
+  if (sigaction(kJavaHeapprofdSignal, &g_orig_act, nullptr) != 0) {
+    PLOG(ERROR) << "failed to reset signal handler";
+    // We cannot close the pipe if the signal handler wasn't unregistered,
+    // to avoid receiving SIGPIPE.
+    return false;
+  }
+  close(g_signal_pipe_fds[1]);
+
+  art::Thread* self = art::Thread::Current();
+  art::MutexLock lk(self, GetStateMutex());
+  // Wait until after the thread was registered to the runtime. This is so
+  // we do not attempt to register it with the runtime after it had been torn
+  // down (ArtPlugin_Deinitialize gets called in the Runtime dtor).
+  while (g_state == State::kWaitForListener) {
+    GetStateCV().Wait(art::Thread::Current());
+  }
+  g_state = State::kUninitialized;
+  GetStateCV().Broadcast(self);
+  return true;
+}
+
+}  // namespace perfetto_hprof
+
+namespace perfetto {
+
+PERFETTO_DEFINE_DATA_SOURCE_STATIC_MEMBERS(perfetto_hprof::JavaHprofDataSource);
+
+}
diff --git a/perfetto_hprof/perfetto_hprof.h b/perfetto_hprof/perfetto_hprof.h
new file mode 100644
index 0000000..1713286
--- /dev/null
+++ b/perfetto_hprof/perfetto_hprof.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_PERFETTO_HPROF_PERFETTO_HPROF_H_
+#define ART_PERFETTO_HPROF_PERFETTO_HPROF_H_
+
+#include <ostream>
+
+namespace perfetto_hprof {
+
+enum class State {
+  // Worker thread not spawned.
+  kUninitialized,
+  // Worker thread spawned, waiting for ACK.
+  kWaitForListener,
+  // Worker thread ready, waiting for data-source.
+  kWaitForStart,
+  // These are only in the forked process:
+  // Data source received, start dump.
+  kStart,
+  // Dump finished. Kill forked child.
+  kEnd,
+};
+
+std::ostream& operator<<(std::ostream&, const State&);
+
+}  // namespace perfetto_hprof
+
+#endif  // ART_PERFETTO_HPROF_PERFETTO_HPROF_H_
diff --git a/profman/Android.bp b/profman/Android.bp
index bcefffd..c574fde 100644
--- a/profman/Android.bp
+++ b/profman/Android.bp
@@ -47,6 +47,10 @@
         "libdexfile",
         "libartbase",
     ],
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
 }
 
 art_cc_binary {
@@ -60,6 +64,9 @@
         "libdexfiled",
         "libartbased",
     ],
+    apex_available: [
+        "com.android.art.debug",
+    ],
 }
 
 art_cc_binary {
@@ -78,7 +85,7 @@
         },
         windows: {
             enabled: true,
-	    cflags: ["-Wno-thread-safety"],
+            cflags: ["-Wno-thread-safety"],
         },
     },
 }
diff --git a/profman/boot_image_profile.cc b/profman/boot_image_profile.cc
index 4d8eef9..3f9665f 100644
--- a/profman/boot_image_profile.cc
+++ b/profman/boot_image_profile.cc
@@ -14,11 +14,15 @@
  * limitations under the License.
  */
 
+#include "boot_image_profile.h"
+
 #include <memory>
 #include <set>
 
-#include "boot_image_profile.h"
+#include "android-base/file.h"
+#include "base/unix_file/fd_file.h"
 #include "dex/class_accessor-inl.h"
+#include "dex/descriptors_names.h"
 #include "dex/dex_file-inl.h"
 #include "dex/method_reference.h"
 #include "dex/type_reference.h"
@@ -28,105 +32,253 @@
 
 using Hotness = ProfileCompilationInfo::MethodHotness;
 
-void GenerateBootImageProfile(
+static const std::string kMethodSep = "->";  // NOLINT [runtime/string] [4]
+static const std::string kPackageUseDelim = "@";  // NOLINT [runtime/string] [4]
+static constexpr char kMethodFlagStringHot = 'H';
+static constexpr char kMethodFlagStringStartup = 'S';
+static constexpr char kMethodFlagStringPostStartup = 'P';
+
+// Returns the type descriptor of the given reference.
+static std::string GetTypeDescriptor(const TypeReference& ref) {
+  const dex::TypeId& type_id = ref.dex_file->GetTypeId(ref.TypeIndex());
+  return ref.dex_file->GetTypeDescriptor(type_id);
+}
+
+// Returns the method representation used in the text format of the boot image profile.
+static std::string BootImageRepresentation(const MethodReference& ref) {
+  const DexFile* dex_file = ref.dex_file;
+  const dex::MethodId& id = ref.GetMethodId();
+  std::string signature_string(dex_file->GetMethodSignature(id).ToString());
+  std::string type_string(dex_file->GetTypeDescriptor(dex_file->GetTypeId(id.class_idx_)));
+  std::string method_name(dex_file->GetMethodName(id));
+  return type_string +
+        kMethodSep +
+        method_name +
+        signature_string;
+}
+
+// Returns the class representation used in the text format of the boot image profile.
+static std::string BootImageRepresentation(const TypeReference& ref) {
+  return GetTypeDescriptor(ref);
+}
+
+// Returns the class representation used in preloaded classes.
+static std::string PreloadedClassesRepresentation(const TypeReference& ref) {
+  std::string descriptor = GetTypeDescriptor(ref);
+  return DescriptorToDot(descriptor.c_str());
+}
+
+// Formats the list of packages from the item metadata as a debug string.
+static std::string GetPackageUseString(const FlattenProfileData::ItemMetadata& metadata) {
+  std::string result;
+  for (const auto& it : metadata.GetAnnotations()) {
+    result += it.GetOriginPackageName() + ",";
+  }
+
+  return metadata.GetAnnotations().empty()
+      ? result
+      : result.substr(0, result.size() - 1);
+}
+
+// Converts a method representation to its final profile format.
+static std::string MethodToProfileFormat(
+    const std::string& method,
+    const FlattenProfileData::ItemMetadata& metadata,
+    bool output_package_use) {
+  std::string flags_string;
+  if (metadata.HasFlagSet(Hotness::kFlagHot)) {
+    flags_string += kMethodFlagStringHot;
+  }
+  if (metadata.HasFlagSet(Hotness::kFlagStartup)) {
+    flags_string += kMethodFlagStringStartup;
+  }
+  if (metadata.HasFlagSet(Hotness::kFlagPostStartup)) {
+    flags_string += kMethodFlagStringPostStartup;
+  }
+  std::string extra;
+  if (output_package_use) {
+    extra = kPackageUseDelim + GetPackageUseString(metadata);
+  }
+
+  return flags_string + method + extra;
+}
+
+// Converts a class representation to its final profile or preloaded classes format.
+static std::string ClassToProfileFormat(
+    const std::string& classString,
+    const FlattenProfileData::ItemMetadata& metadata,
+    bool output_package_use) {
+  std::string extra;
+  if (output_package_use) {
+    extra = kPackageUseDelim + GetPackageUseString(metadata);
+  }
+
+  return classString + extra;
+}
+
+// Tries to asses if the given type reference is a clean class.
+static bool MaybeIsClassClean(const TypeReference& ref) {
+  const dex::ClassDef* class_def = ref.dex_file->FindClassDef(ref.TypeIndex());
+  if (class_def == nullptr) {
+    return false;
+  }
+
+  ClassAccessor accessor(*ref.dex_file, *class_def);
+  for (auto& it : accessor.GetStaticFields()) {
+    if (!it.IsFinal()) {
+      // Not final static field will probably dirty the class.
+      return false;
+    }
+  }
+  for (auto& it : accessor.GetMethods()) {
+    uint32_t flags = it.GetAccessFlags();
+    if ((flags & kAccNative) != 0) {
+      // Native method will get dirtied.
+      return false;
+    }
+    if ((flags & kAccConstructor) != 0 && (flags & kAccStatic) != 0) {
+      // Class initializer, may get dirtied (not sure).
+      return false;
+    }
+  }
+
+  return true;
+}
+
+// Returns true iff the item should be included in the profile.
+// (i.e. it passes the given aggregation thresholds)
+static bool IncludeItemInProfile(uint32_t max_aggregation_count,
+                                 uint32_t item_threshold,
+                                 const FlattenProfileData::ItemMetadata& metadata,
+                                 const BootImageOptions& options) {
+  CHECK_NE(max_aggregation_count, 0u);
+  float item_percent = metadata.GetAnnotations().size() / static_cast<float>(max_aggregation_count);
+  for (const auto& annotIt : metadata.GetAnnotations()) {
+    const auto&thresholdIt =
+        options.special_packages_thresholds.find(annotIt.GetOriginPackageName());
+    if (thresholdIt != options.special_packages_thresholds.end()) {
+      if (item_percent >= (thresholdIt->second / 100.f)) {
+        return true;
+      }
+    }
+  }
+  return item_percent >= (item_threshold / 100.f);
+}
+
+// Returns true iff a method with the given metada should be included in the profile.
+static bool IncludeMethodInProfile(uint32_t max_aggregation_count,
+                                   const FlattenProfileData::ItemMetadata& metadata,
+                                   const BootImageOptions& options) {
+  return IncludeItemInProfile(max_aggregation_count, options.method_threshold, metadata, options);
+}
+
+// Returns true iff a class with the given metada should be included in the profile.
+static bool IncludeClassInProfile(const TypeReference& type_ref,
+                                  uint32_t max_aggregation_count,
+                                  const FlattenProfileData::ItemMetadata& metadata,
+                                  const BootImageOptions& options) {
+  uint32_t threshold = MaybeIsClassClean(type_ref)
+      ? options.image_class_clean_threshold
+      : options.image_class_threshold;
+  return IncludeItemInProfile(max_aggregation_count, threshold, metadata, options);
+}
+
+// Returns true iff a class with the given metada should be included in the list of
+// prelaoded classes.
+static bool IncludeInPreloadedClasses(const std::string& class_name,
+                                      uint32_t max_aggregation_count,
+                                      const FlattenProfileData::ItemMetadata& metadata,
+                                      const BootImageOptions& options) {
+  bool blacklisted = options.preloaded_classes_blacklist.find(class_name) !=
+      options.preloaded_classes_blacklist.end();
+  return !blacklisted && IncludeItemInProfile(
+      max_aggregation_count, options.preloaded_class_threshold, metadata, options);
+}
+
+bool GenerateBootImageProfile(
     const std::vector<std::unique_ptr<const DexFile>>& dex_files,
-    const std::vector<std::unique_ptr<const ProfileCompilationInfo>>& profiles,
+    const std::vector<std::string>& profile_files,
     const BootImageOptions& options,
-    bool verbose,
-    ProfileCompilationInfo* out_profile) {
-  for (const std::unique_ptr<const ProfileCompilationInfo>& profile : profiles) {
-    // Avoid merging classes since we may want to only add classes that fit a certain criteria.
-    // If we merged the classes, every single class in each profile would be in the out_profile,
-    // but we want to only included classes that are in at least a few profiles.
-    out_profile->MergeWith(*profile, /*merge_classes=*/ false);
+    const std::string& boot_profile_out_path,
+    const std::string& preloaded_classes_out_path) {
+  if (boot_profile_out_path.empty()) {
+    LOG(ERROR) << "No output file specified";
+    return false;
   }
 
-  // Image classes that were added because they are commonly used.
-  size_t class_count = 0;
-  // Image classes that were only added because they were clean.
-  size_t clean_class_count = 0;
-  // Total clean classes.
-  size_t clean_count = 0;
-  // Total dirty classes.
-  size_t dirty_count = 0;
+  bool generate_preloaded_classes = !preloaded_classes_out_path.empty();
 
-  for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
-    // Inferred classes are classes inferred from method samples.
-    std::set<std::pair<const ProfileCompilationInfo*, dex::TypeIndex>> inferred_classes;
-    for (size_t i = 0; i < dex_file->NumMethodIds(); ++i) {
-      MethodReference ref(dex_file.get(), i);
-      // This counter is how many profiles contain the method as sampled or hot.
-      size_t counter = 0;
-      for (const std::unique_ptr<const ProfileCompilationInfo>& profile : profiles) {
-        Hotness hotness = profile->GetMethodHotness(ref);
-        if (hotness.IsInProfile()) {
-          ++counter;
-          out_profile->AddMethodHotness(ref, hotness);
-          inferred_classes.emplace(profile.get(), ref.GetMethodId().class_idx_);
-        }
-      }
-      // If the counter is greater or equal to the compile threshold, mark the method as hot.
-      // Note that all hot methods are also marked as hot in the out profile during the merging
-      // process.
-      if (counter >= options.compiled_method_threshold) {
-        Hotness hotness;
-        hotness.AddFlag(Hotness::kFlagHot);
-        out_profile->AddMethodHotness(ref, hotness);
-      }
+  std::unique_ptr<FlattenProfileData> flattend_data(new FlattenProfileData());
+  for (const std::string& profile_file : profile_files) {
+    ProfileCompilationInfo profile;
+    if (!profile.Load(profile_file, /*clear_if_invalid=*/ false)) {
+      LOG(ERROR) << "Profile is not a valid: " << profile_file;
+      return false;
     }
-    // Walk all of the classes and add them to the profile if they meet the requirements.
-    for (ClassAccessor accessor : dex_file->GetClasses()) {
-      TypeReference ref(dex_file.get(), accessor.GetClassIdx());
-      bool is_clean = true;
-      auto method_visitor = [&](const ClassAccessor::Method& method) {
-        const uint32_t flags = method.GetAccessFlags();
-        if ((flags & kAccNative) != 0) {
-          // Native method will get dirtied.
-          is_clean = false;
-        }
-        if ((flags & kAccConstructor) != 0 && (flags & kAccStatic) != 0) {
-          // Class initializer, may get dirtied (not sure).
-          is_clean = false;
-        }
-      };
-      accessor.VisitFieldsAndMethods(
-          [&](const ClassAccessor::Field& field) {
-            if (!field.IsFinal()) {
-              // Not final static field will probably dirty the class.
-              is_clean = false;
-            }
-          },
-          /*instance_field_visitor=*/ VoidFunctor(),
-          method_visitor,
-          method_visitor);
+    std::unique_ptr<FlattenProfileData> currentData = profile.ExtractProfileData(dex_files);
+    flattend_data->MergeData(*currentData);
+  }
 
-      ++(is_clean ? clean_count : dirty_count);
-      // This counter is how many profiles contain the class.
-      size_t counter = 0;
-      for (const std::unique_ptr<const ProfileCompilationInfo>& profile : profiles) {
-        auto it = inferred_classes.find(std::make_pair(profile.get(), ref.TypeIndex()));
-        if (it != inferred_classes.end() ||
-            profile->ContainsClass(*ref.dex_file, ref.TypeIndex())) {
-          ++counter;
-        }
+  // We want the output sorted by the method/class name.
+  // So we use an intermediate map for that.
+  // There's no attempt to optimize this as it's not part of any critical path,
+  // and mostly executed on hosts.
+  SafeMap<std::string, FlattenProfileData::ItemMetadata> profile_methods;
+  SafeMap<std::string, FlattenProfileData::ItemMetadata> profile_classes;
+  SafeMap<std::string, FlattenProfileData::ItemMetadata> preloaded_classes;
+
+  for (const auto& it : flattend_data->GetMethodData()) {
+    if (IncludeMethodInProfile(flattend_data->GetMaxAggregationForMethods(), it.second, options)) {
+      FlattenProfileData::ItemMetadata metadata(it.second);
+      if (options.upgrade_startup_to_hot
+          && ((metadata.GetFlags() & Hotness::Flag::kFlagStartup) != 0)) {
+        metadata.AddFlag(Hotness::Flag::kFlagHot);
       }
-      if (counter == 0) {
-        continue;
-      }
-      if (counter >= options.image_class_theshold) {
-        ++class_count;
-        out_profile->AddClassForDex(ref);
-      } else if (is_clean && counter >= options.image_class_clean_theshold) {
-        ++clean_class_count;
-        out_profile->AddClassForDex(ref);
-      }
+      profile_methods.Put(BootImageRepresentation(it.first), metadata);
     }
   }
-  if (verbose) {
-    LOG(INFO) << "Image classes " << class_count + clean_class_count
-              << " added because clean " << clean_class_count
-              << " total clean " << clean_count << " total dirty " << dirty_count;
+
+  for (const auto& it : flattend_data->GetClassData()) {
+    const TypeReference& type_ref = it.first;
+    const FlattenProfileData::ItemMetadata& metadata = it.second;
+    if (IncludeClassInProfile(type_ref,
+            flattend_data->GetMaxAggregationForClasses(),
+            metadata,
+            options)) {
+      profile_classes.Put(BootImageRepresentation(it.first), it.second);
+    }
+    std::string preloaded_class_representation = PreloadedClassesRepresentation(it.first);
+    if (generate_preloaded_classes && IncludeInPreloadedClasses(
+            preloaded_class_representation,
+            flattend_data->GetMaxAggregationForClasses(),
+            metadata,
+            options)) {
+      preloaded_classes.Put(preloaded_class_representation, it.second);
+    }
   }
+
+  // Create the output content
+  std::string profile_content;
+  std::string preloaded_content;
+  for (const auto& it : profile_classes) {
+    profile_content += ClassToProfileFormat(it.first, it.second, options.append_package_use_list)
+        + "\n";
+  }
+  for (const auto& it : profile_methods) {
+    profile_content += MethodToProfileFormat(it.first, it.second, options.append_package_use_list)
+        + "\n";
+  }
+
+  if (generate_preloaded_classes) {
+    for (const auto& it : preloaded_classes) {
+      preloaded_content +=
+          ClassToProfileFormat(it.first, it.second, options.append_package_use_list) + "\n";
+    }
+  }
+
+  return android::base::WriteStringToFile(profile_content, boot_profile_out_path)
+      && (!generate_preloaded_classes
+          || android::base::WriteStringToFile(preloaded_content, preloaded_classes_out_path));
 }
 
 }  // namespace art
diff --git a/profman/boot_image_profile.h b/profman/boot_image_profile.h
index 99e5a75..a3dd52c 100644
--- a/profman/boot_image_profile.h
+++ b/profman/boot_image_profile.h
@@ -20,7 +20,9 @@
 #include <limits>
 #include <memory>
 #include <vector>
+#include <set>
 
+#include "base/safe_map.h"
 #include "dex/dex_file.h"
 
 namespace art {
@@ -29,27 +31,54 @@
 
 struct BootImageOptions {
  public:
-  // Threshold for classes that may be dirty or clean. The threshold specifies how
-  // many different profiles need to have the class before it gets added to the boot profile.
-  uint32_t image_class_theshold = 10;
+  // Threshold for preloaded. The threshold specifies, as percentage
+  // of maximum number or aggregations, how many different profiles need to have the class
+  // before it gets added to the list of preloaded classes.
+  uint32_t preloaded_class_threshold = 10;
 
-  // Threshold for classes that are likely to remain clean. The threshold specifies how
-  // many different profiles need to have the class before it gets added to the boot profile.
-  uint32_t image_class_clean_theshold = 3;
+  // Threshold for classes that may be dirty or clean. The threshold specifies, as percentage
+  // of maximum number or aggregations, how many different profiles need to have the class
+  // before it gets added to the boot profile.
+  uint32_t image_class_threshold = 10;
 
-  // Threshold for non-hot methods to be compiled. The threshold specifies how
-  // many different profiles need to have the method before it gets added to the boot profile.
-  uint32_t compiled_method_threshold = std::numeric_limits<uint32_t>::max();
+  // Threshold for classes that are likely to remain clean. The threshold specifies, as percentage
+  // of maximum number or aggregations, how many different profiles need to have the class
+  // before it gets added to the boot profile.
+  uint32_t image_class_clean_threshold = 5;
+
+  // Threshold for including a method in the profile. The threshold specifies, as percentage
+  // of maximum number or aggregations, how many different profiles need to have the method
+  // before it gets added to the boot profile.
+  uint32_t method_threshold = 10;
+
+  // Whether or not we should upgrade the startup methods to hot.
+  bool upgrade_startup_to_hot = true;
+
+  // A special set of thresholds (classes and methods) that apply if a method/class is being used
+  // by a special package. This can be used to lower the thresholds for methods used by important
+  // packages (e.g. system server of system ui) or packages which have special needs (e.g. camera
+  // needs more hardware methods).
+  SafeMap<std::string, uint32_t> special_packages_thresholds;
+
+  // Whether or not to append package use list to each profile element.
+  // Should be use only for debugging as it will add additional elements to the text output
+  // that are not compatible with the default profile format.
+  bool append_package_use_list = false;
+
+  // The set of classes that should not be preloaded in Zygote
+  std::set<std::string> preloaded_classes_blacklist;
 };
 
-// Merge a bunch of profiles together to generate a boot profile. Classes and methods are added
-// to the out_profile if they meet the options.
-void GenerateBootImageProfile(
+// Generate a boot image profile according to the specified options.
+// Boot classpaths dex files are identified by the given vector and the output is
+// written to the two specified paths. The paths will be open with O_CREAT | O_WRONLY.
+// Returns true if the generation was successful, false otherwise.
+bool GenerateBootImageProfile(
     const std::vector<std::unique_ptr<const DexFile>>& dex_files,
-    const std::vector<std::unique_ptr<const ProfileCompilationInfo>>& profiles,
+    const std::vector<std::string>& profile_files,
     const BootImageOptions& options,
-    bool verbose,
-    ProfileCompilationInfo* out_profile);
+    const std::string& boot_profile_out_path,
+    const std::string& preloaded_classes_out_path);
 
 }  // namespace art
 
diff --git a/profman/profile_assistant.cc b/profman/profile_assistant.cc
index b65bb43..1695d8c 100644
--- a/profman/profile_assistant.cc
+++ b/profman/profile_assistant.cc
@@ -33,20 +33,20 @@
         const std::vector<ScopedFlock>& profile_files,
         const ScopedFlock& reference_profile_file,
         const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn,
-        bool store_aggregation_counters) {
+        const Options& options) {
   DCHECK(!profile_files.empty());
 
-  ProfileCompilationInfo info;
+  ProfileCompilationInfo info(options.IsBootImageMerge());
+
   // Load the reference profile.
   if (!info.Load(reference_profile_file->Fd(), /*merge_classes=*/ true, filter_fn)) {
     LOG(WARNING) << "Could not load reference profile file";
     return kErrorBadProfiles;
   }
 
-  // If we need to store aggregation counters (e.g. for the boot image profile),
-  // prepare the reference profile now.
-  if (store_aggregation_counters) {
-    info.PrepareForAggregationCounters();
+  if (options.IsBootImageMerge() && !info.IsForBootImage()) {
+    LOG(WARNING) << "Requested merge for boot image profile but the reference profile is regular.";
+    return kErrorBadProfiles;
   }
 
   // Store the current state of the reference profile before merging with the current profiles.
@@ -58,25 +58,50 @@
     ProfileCompilationInfo cur_info;
     if (!cur_info.Load(profile_files[i]->Fd(), /*merge_classes=*/ true, filter_fn)) {
       LOG(WARNING) << "Could not load profile file at index " << i;
+      if (options.IsForceMerge()) {
+        // If we have to merge forcefully, ignore load failures.
+        // This is useful for boot image profiles to ignore stale profiles which are
+        // cleared lazily.
+        continue;
+      }
       return kErrorBadProfiles;
     }
+
+    // Check version mismatch.
+    // This may happen during profile analysis if one profile is regular and
+    // the other one is for the boot image. For example when switching on-off
+    // the boot image profiles.
+    if (!info.SameVersion(cur_info)) {
+      if (options.IsForceMerge()) {
+        // If we have to merge forcefully, ignore the current profile and
+        // continue to the next one.
+        continue;
+      } else {
+        // Otherwise, return an error.
+        return kErrorDifferentVersions;
+      }
+    }
+
     if (!info.MergeWith(cur_info)) {
       LOG(WARNING) << "Could not merge profile file at index " << i;
       return kErrorBadProfiles;
     }
   }
 
-  uint32_t min_change_in_methods_for_compilation = std::max(
-      (kMinNewMethodsPercentChangeForCompilation * number_of_methods) / 100,
-      kMinNewMethodsForCompilation);
-  uint32_t min_change_in_classes_for_compilation = std::max(
-      (kMinNewClassesPercentChangeForCompilation * number_of_classes) / 100,
-      kMinNewClassesForCompilation);
-  // Check if there is enough new information added by the current profiles.
-  if (((info.GetNumberOfMethods() - number_of_methods) < min_change_in_methods_for_compilation) &&
-      ((info.GetNumberOfResolvedClasses() - number_of_classes)
-          < min_change_in_classes_for_compilation)) {
-    return kSkipCompilation;
+  // If we perform a forced merge do not analyze the difference between profiles.
+  if (!options.IsForceMerge()) {
+    uint32_t min_change_in_methods_for_compilation = std::max(
+        (kMinNewMethodsPercentChangeForCompilation * number_of_methods) / 100,
+        kMinNewMethodsForCompilation);
+    uint32_t min_change_in_classes_for_compilation = std::max(
+        (kMinNewClassesPercentChangeForCompilation * number_of_classes) / 100,
+        kMinNewClassesForCompilation);
+    // Check if there is enough new information added by the current profiles.
+    if (((info.GetNumberOfMethods() - number_of_methods) < min_change_in_methods_for_compilation) &&
+        ((info.GetNumberOfResolvedClasses() - number_of_classes)
+            < min_change_in_classes_for_compilation)) {
+      return kSkipCompilation;
+    }
   }
 
   // We were successful in merging all profile information. Update the reference profile.
@@ -89,7 +114,7 @@
     return kErrorIO;
   }
 
-  return kCompile;
+  return options.IsForceMerge() ? kSuccess : kCompile;
 }
 
 class ScopedFlockList {
@@ -132,7 +157,7 @@
         const std::vector<int>& profile_files_fd,
         int reference_profile_file_fd,
         const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn,
-        bool store_aggregation_counters) {
+        const Options& options) {
   DCHECK_GE(reference_profile_file_fd, 0);
 
   std::string error;
@@ -156,14 +181,14 @@
   return ProcessProfilesInternal(profile_files.Get(),
                                  reference_profile_file,
                                  filter_fn,
-                                 store_aggregation_counters);
+                                 options);
 }
 
 ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfiles(
         const std::vector<std::string>& profile_files,
         const std::string& reference_profile_file,
         const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn,
-        bool store_aggregation_counters) {
+        const Options& options) {
   std::string error;
 
   ScopedFlockList profile_files_list(profile_files.size());
@@ -182,7 +207,7 @@
   return ProcessProfilesInternal(profile_files_list.Get(),
                                  locked_reference_profile_file,
                                  filter_fn,
-                                 store_aggregation_counters);
+                                 options);
 }
 
 }  // namespace art
diff --git a/profman/profile_assistant.h b/profman/profile_assistant.h
index 45d4e38..9aa0768 100644
--- a/profman/profile_assistant.h
+++ b/profman/profile_assistant.h
@@ -30,11 +30,39 @@
   // These also serve as return codes of profman and are processed by installd
   // (frameworks/native/cmds/installd/commands.cpp)
   enum ProcessingResult {
-    kCompile = 0,
-    kSkipCompilation = 1,
-    kErrorBadProfiles = 2,
-    kErrorIO = 3,
-    kErrorCannotLock = 4
+    kSuccess = 0,  // Generic success code for non-analysis runs.
+    kCompile = 1,
+    kSkipCompilation = 2,
+    kErrorBadProfiles = 3,
+    kErrorIO = 4,
+    kErrorCannotLock = 5,
+    kErrorDifferentVersions = 6,
+  };
+
+  class Options {
+   public:
+    static constexpr bool kForceMergeDefault = false;
+    static constexpr bool kBootImageMergeDefault = false;
+
+    Options()
+        : force_merge_(kForceMergeDefault),
+          boot_image_merge_(kBootImageMergeDefault) {
+    }
+
+    bool IsForceMerge() const { return force_merge_; }
+    bool IsBootImageMerge() const { return boot_image_merge_; }
+
+    void SetForceMerge(bool value) { force_merge_ = value; }
+    void SetBootImageMerge(bool value) { boot_image_merge_ = value; }
+
+   private:
+    // If true, performs a forced merge, without analyzing if there is a
+    // significant difference between the current profile and the reference profile.
+    // See ProfileAssistant#ProcessProfile.
+    bool force_merge_;
+    // Signals that the merge is for boot image profiles. It will ignore differences
+    // in profile versions (instead of aborting).
+    bool boot_image_merge_;
   };
 
   // Process the profile information present in the given files. Returns one of
@@ -56,21 +84,21 @@
       const std::string& reference_profile_file,
       const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn
           = ProfileCompilationInfo::ProfileFilterFnAcceptAll,
-      bool store_aggregation_counters = false);
+      const Options& options = Options());
 
   static ProcessingResult ProcessProfiles(
       const std::vector<int>& profile_files_fd_,
       int reference_profile_file_fd,
       const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn
           = ProfileCompilationInfo::ProfileFilterFnAcceptAll,
-      bool store_aggregation_counters = false);
+      const Options& options = Options());
 
  private:
   static ProcessingResult ProcessProfilesInternal(
       const std::vector<ScopedFlock>& profile_files,
       const ScopedFlock& reference_profile_file,
       const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn,
-      bool store_aggregation_counters);
+      const Options& options);
 
   DISALLOW_COPY_AND_ASSIGN(ProfileAssistant);
 };
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index e906151..2f3f58d 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -16,6 +16,7 @@
 
 #include <gtest/gtest.h>
 
+#include "android-base/file.h"
 #include "android-base/strings.h"
 #include "art_method-inl.h"
 #include "base/unix_file/fd_file.h"
@@ -35,77 +36,76 @@
 
 using Hotness = ProfileCompilationInfo::MethodHotness;
 using TypeReferenceSet = std::set<TypeReference, TypeReferenceValueComparator>;
+using ProfileInlineCache = ProfileMethodInfo::ProfileInlineCache;
 
-static constexpr size_t kMaxMethodIds = 65535;
-
+// TODO(calin): These tests share a lot with the ProfileCompilationInfo tests.
+// we should introduce a better abstraction to extract the common parts.
 class ProfileAssistantTest : public CommonRuntimeTest {
  public:
   void PostRuntimeCreate() override {
     allocator_.reset(new ArenaAllocator(Runtime::Current()->GetArenaPool()));
+
+    dex1 = fake_dex_storage.AddFakeDex("location1", /* checksum= */ 1, /* num_method_ids= */ 10001);
+    dex2 = fake_dex_storage.AddFakeDex("location2", /* checksum= */ 2, /* num_method_ids= */ 10002);
+    dex3 = fake_dex_storage.AddFakeDex("location3", /* checksum= */ 3, /* num_method_ids= */ 10003);
+    dex4 = fake_dex_storage.AddFakeDex("location4", /* checksum= */ 4, /* num_method_ids= */ 10004);
+
+    dex1_checksum_missmatch = fake_dex_storage.AddFakeDex(
+        "location1", /* checksum= */ 12, /* num_method_ids= */ 10001);
   }
 
  protected:
-  void SetupProfile(const std::string& id,
-                    uint32_t checksum,
+  bool AddMethod(ProfileCompilationInfo* info,
+                const DexFile* dex,
+                uint16_t method_idx,
+                const std::vector<ProfileInlineCache>& inline_caches,
+                Hotness::Flag flags) {
+    return info->AddMethod(
+        ProfileMethodInfo(MethodReference(dex, method_idx), inline_caches), flags);
+  }
+
+  bool AddMethod(ProfileCompilationInfo* info,
+                 const DexFile* dex,
+                 uint16_t method_idx,
+                 Hotness::Flag flags,
+                 const ProfileCompilationInfo::ProfileSampleAnnotation& annotation
+                    = ProfileCompilationInfo::ProfileSampleAnnotation::kNone) {
+    return info->AddMethod(ProfileMethodInfo(MethodReference(dex, method_idx)),
+                           flags,
+                           annotation);
+  }
+
+  bool AddClass(ProfileCompilationInfo* info,
+                const DexFile* dex,
+                dex::TypeIndex type_index) {
+    std::vector<dex::TypeIndex> classes = {type_index};
+    return info->AddClassesForDex(dex, classes.begin(), classes.end());
+  }
+
+  void SetupProfile(const DexFile* dex_file1,
+                    const DexFile* dex_file2,
                     uint16_t number_of_methods,
                     uint16_t number_of_classes,
                     const ScratchFile& profile,
                     ProfileCompilationInfo* info,
                     uint16_t start_method_index = 0,
                     bool reverse_dex_write_order = false) {
-    std::string dex_location1 = "location1" + id;
-    uint32_t dex_location_checksum1 = checksum;
-    std::string dex_location2 = "location2" + id;
-    uint32_t dex_location_checksum2 = 10 * checksum;
-    SetupProfile(dex_location1,
-                 dex_location_checksum1,
-                 dex_location2,
-                 dex_location_checksum2,
-                 number_of_methods,
-                 number_of_classes,
-                 profile,
-                 info,
-                 start_method_index,
-                 reverse_dex_write_order);
-  }
-
-  void SetupProfile(const std::string& dex_location1,
-                    uint32_t dex_location_checksum1,
-                    const std::string& dex_location2,
-                    uint32_t dex_location_checksum2,
-                    uint16_t number_of_methods,
-                    uint16_t number_of_classes,
-                    const ScratchFile& profile,
-                    ProfileCompilationInfo* info,
-                    uint16_t start_method_index = 0,
-                    bool reverse_dex_write_order = false,
-                    uint32_t number_of_methods1 = kMaxMethodIds,
-                    uint32_t number_of_methods2 = kMaxMethodIds) {
     for (uint16_t i = start_method_index; i < start_method_index + number_of_methods; i++) {
       // reverse_dex_write_order controls the order in which the dex files will be added to
       // the profile and thus written to disk.
-      ProfileCompilationInfo::OfflineProfileMethodInfo pmi =
-          GetOfflineProfileMethodInfo(dex_location1, dex_location_checksum1,
-                                      dex_location2, dex_location_checksum2,
-                                      number_of_methods1, number_of_methods2);
-      Hotness::Flag flags = Hotness::kFlagPostStartup;
+      std::vector<ProfileInlineCache> inline_caches = GetTestInlineCaches(dex_file1 , dex_file2, dex3);
+      Hotness::Flag flags = static_cast<Hotness::Flag>(
+          Hotness::kFlagHot | Hotness::kFlagPostStartup);
       if (reverse_dex_write_order) {
-        ASSERT_TRUE(info->AddMethod(
-            dex_location2, dex_location_checksum2, i, number_of_methods2, pmi, flags));
-        ASSERT_TRUE(info->AddMethod(
-            dex_location1, dex_location_checksum1, i, number_of_methods1, pmi, flags));
+        ASSERT_TRUE(AddMethod(info, dex_file2, i, inline_caches, flags));
+        ASSERT_TRUE(AddMethod(info, dex_file1, i, inline_caches, flags));
       } else {
-        ASSERT_TRUE(info->AddMethod(
-            dex_location1, dex_location_checksum1, i, number_of_methods1, pmi, flags));
-        ASSERT_TRUE(info->AddMethod(
-            dex_location2, dex_location_checksum2, i, number_of_methods2, pmi, flags));
+        ASSERT_TRUE(AddMethod(info, dex_file1, i, inline_caches, flags));
+        ASSERT_TRUE(AddMethod(info, dex_file2, i, inline_caches, flags));
       }
     }
     for (uint16_t i = 0; i < number_of_classes; i++) {
-      ASSERT_TRUE(info->AddClassIndex(ProfileCompilationInfo::GetProfileDexFileKey(dex_location1),
-                                      dex_location_checksum1,
-                                      dex::TypeIndex(i),
-                                      number_of_methods1));
+      ASSERT_TRUE(AddClass(info, dex_file1, dex::TypeIndex(i)));
     }
 
     ASSERT_TRUE(info->Save(GetFd(profile)));
@@ -113,77 +113,61 @@
     ASSERT_TRUE(profile.GetFile()->ResetOffset());
   }
 
-  void SetupBasicProfile(const std::string& id,
-                         uint32_t checksum,
-                         uint16_t number_of_methods,
+  void SetupBasicProfile(const DexFile* dex,
                          const std::vector<uint32_t>& hot_methods,
                          const std::vector<uint32_t>& startup_methods,
                          const std::vector<uint32_t>& post_startup_methods,
                          const ScratchFile& profile,
                          ProfileCompilationInfo* info) {
-    std::string dex_location = "location1" + id;
     for (uint32_t idx : hot_methods) {
-      info->AddMethodIndex(Hotness::kFlagHot, dex_location, checksum, idx, number_of_methods);
+      AddMethod(info, dex, idx, Hotness::kFlagHot);
     }
     for (uint32_t idx : startup_methods) {
-      info->AddMethodIndex(Hotness::kFlagStartup, dex_location, checksum, idx, number_of_methods);
+      AddMethod(info, dex, idx, Hotness::kFlagStartup);
     }
     for (uint32_t idx : post_startup_methods) {
-      info->AddMethodIndex(Hotness::kFlagPostStartup,
-                           dex_location,
-                           checksum,
-                           idx,
-                           number_of_methods);
+      AddMethod(info, dex, idx, Hotness::kFlagPostStartup);
     }
     ASSERT_TRUE(info->Save(GetFd(profile)));
     ASSERT_EQ(0, profile.GetFile()->Flush());
     ASSERT_TRUE(profile.GetFile()->ResetOffset());
   }
 
-  // Creates an inline cache which will be destructed at the end of the test.
-  ProfileCompilationInfo::InlineCacheMap* CreateInlineCacheMap() {
-    used_inline_caches.emplace_back(new ProfileCompilationInfo::InlineCacheMap(
-        std::less<uint16_t>(), allocator_->Adapter(kArenaAllocProfile)));
-    return used_inline_caches.back().get();
-  }
-
-  ProfileCompilationInfo::OfflineProfileMethodInfo GetOfflineProfileMethodInfo(
-        const std::string& dex_location1, uint32_t dex_checksum1,
-        const std::string& dex_location2, uint32_t dex_checksum2,
-        uint32_t number_of_methods1 = kMaxMethodIds, uint32_t number_of_methods2 = kMaxMethodIds) {
-    ProfileCompilationInfo::InlineCacheMap* ic_map = CreateInlineCacheMap();
-    ProfileCompilationInfo::OfflineProfileMethodInfo pmi(ic_map);
-    pmi.dex_references.emplace_back(dex_location1, dex_checksum1, number_of_methods1);
-    pmi.dex_references.emplace_back(dex_location2, dex_checksum2, number_of_methods2);
-
+  // The dex1_substitute can be used to replace the default dex1 file.
+  std::vector<ProfileInlineCache> GetTestInlineCaches(
+        const DexFile* dex_file1, const DexFile* dex_file2, const DexFile* dex_file3) {
+    std::vector<ProfileInlineCache> inline_caches;
     // Monomorphic
     for (uint16_t dex_pc = 0; dex_pc < 11; dex_pc++) {
-      ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
-      dex_pc_data.AddClass(0, dex::TypeIndex(0));
-      ic_map->Put(dex_pc, dex_pc_data);
+      std::vector<TypeReference> types = {TypeReference(dex_file1, dex::TypeIndex(0))};
+      inline_caches.push_back(ProfileInlineCache(dex_pc, /* missing_types*/ false, types));
     }
     // Polymorphic
     for (uint16_t dex_pc = 11; dex_pc < 22; dex_pc++) {
-      ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
-      dex_pc_data.AddClass(0, dex::TypeIndex(0));
-      dex_pc_data.AddClass(1, dex::TypeIndex(1));
-
-      ic_map->Put(dex_pc, dex_pc_data);
+      std::vector<TypeReference> types = {
+          TypeReference(dex_file1, dex::TypeIndex(0)),
+          TypeReference(dex_file2, dex::TypeIndex(1)),
+          TypeReference(dex_file3, dex::TypeIndex(2))};
+      inline_caches.push_back(ProfileInlineCache(dex_pc, /* missing_types*/ false, types));
     }
     // Megamorphic
     for (uint16_t dex_pc = 22; dex_pc < 33; dex_pc++) {
-      ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
-      dex_pc_data.SetIsMegamorphic();
-      ic_map->Put(dex_pc, dex_pc_data);
+      // we need 5 types to make the cache megamorphic
+      std::vector<TypeReference> types = {
+          TypeReference(dex_file1, dex::TypeIndex(0)),
+          TypeReference(dex_file1, dex::TypeIndex(1)),
+          TypeReference(dex_file1, dex::TypeIndex(2)),
+          TypeReference(dex_file1, dex::TypeIndex(3)),
+          TypeReference(dex_file1, dex::TypeIndex(4))};
+      inline_caches.push_back(ProfileInlineCache(dex_pc, /* missing_types*/ false, types));
     }
     // Missing types
     for (uint16_t dex_pc = 33; dex_pc < 44; dex_pc++) {
-      ProfileCompilationInfo::DexPcData dex_pc_data(allocator_.get());
-      dex_pc_data.SetIsMissingTypes();
-      ic_map->Put(dex_pc, dex_pc_data);
+      std::vector<TypeReference> types;
+      inline_caches.push_back(ProfileInlineCache(dex_pc, /* missing_types*/ true, types));
     }
 
-    return pmi;
+    return inline_caches;
   }
 
   int GetFd(const ScratchFile& file) const {
@@ -198,18 +182,19 @@
   }
 
   std::string GetProfmanCmd() {
-    std::string file_path = GetTestAndroidRoot();
-    file_path += "/bin/profman";
+    std::string file_path = GetArtBinDir() + "/profman";
     if (kIsDebugBuild) {
       file_path += "d";
     }
-    EXPECT_TRUE(OS::FileExists(file_path.c_str()))
-        << file_path << " should be a valid file path";
+    EXPECT_TRUE(OS::FileExists(file_path.c_str())) << file_path << " should be a valid file path";
     return file_path;
   }
 
   // Runs test with given arguments.
-  int ProcessProfiles(const std::vector<int>& profiles_fd, int reference_profile_fd) {
+  int ProcessProfiles(
+      const std::vector<int>& profiles_fd,
+      int reference_profile_fd,
+      const std::vector<const std::string>& extra_args = std::vector<const std::string>()) {
     std::string profman_cmd = GetProfmanCmd();
     std::vector<std::string> argv_str;
     argv_str.push_back(profman_cmd);
@@ -217,6 +202,7 @@
       argv_str.push_back("--profile-file-fd=" + std::to_string(profiles_fd[k]));
     }
     argv_str.push_back("--reference-profile-file-fd=" + std::to_string(reference_profile_fd));
+    argv_str.insert(argv_str.end(), extra_args.begin(), extra_args.end());
 
     std::string error;
     return ExecAndReturnCode(argv_str, &error);
@@ -350,9 +336,8 @@
                           bool is_missing_types)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> pmi =
-        info.GetMethod(method->GetDexFile()->GetLocation(),
-                       method->GetDexFile()->GetLocationChecksum(),
-                       method->GetDexMethodIndex());
+        info.GetHotMethodInfo(MethodReference(
+            method->GetDexFile(), method->GetDexMethodIndex()));
     ASSERT_TRUE(pmi != nullptr);
     ASSERT_EQ(pmi->inline_caches->size(), 1u);
     const ProfileCompilationInfo::DexPcData& dex_pc_data = pmi->inline_caches->begin()->second;
@@ -390,11 +375,10 @@
       hot_methods_ref.push_back(i);
     }
     ProfileCompilationInfo info1;
-    uint16_t methods_in_profile = std::max(methods_in_cur_profile, methods_in_ref_profile);
-    SetupBasicProfile("p1", 1, methods_in_profile, hot_methods_cur, empty_vector, empty_vector,
+    SetupBasicProfile(dex1, hot_methods_cur, empty_vector, empty_vector,
         profile,  &info1);
     ProfileCompilationInfo info2;
-    SetupBasicProfile("p1", 1, methods_in_profile, hot_methods_ref, empty_vector, empty_vector,
+    SetupBasicProfile(dex1, hot_methods_ref, empty_vector, empty_vector,
         reference_profile,  &info2);
     return ProcessProfiles(profile_fds, reference_profile_fd);
   }
@@ -408,18 +392,20 @@
     int reference_profile_fd = GetFd(reference_profile);
 
     ProfileCompilationInfo info1;
-    SetupProfile("p1", 1, 0, classes_in_cur_profile, profile,  &info1);
+    SetupProfile(dex1, dex2, 0, classes_in_cur_profile, profile,  &info1);
     ProfileCompilationInfo info2;
-    SetupProfile("p1", 1, 0, classes_in_ref_profile, reference_profile, &info2);
+    SetupProfile(dex1, dex2, 0, classes_in_ref_profile, reference_profile, &info2);
     return ProcessProfiles(profile_fds, reference_profile_fd);
   }
 
   std::unique_ptr<ArenaAllocator> allocator_;
 
-  // Cache of inline caches generated during tests.
-  // This makes it easier to pass data between different utilities and ensure that
-  // caches are destructed at the end of the test.
-  std::vector<std::unique_ptr<ProfileCompilationInfo::InlineCacheMap>> used_inline_caches;
+  const DexFile* dex1;
+  const DexFile* dex2;
+  const DexFile* dex3;
+  const DexFile* dex4;
+  const DexFile* dex1_checksum_missmatch;
+  FakeDexStorage fake_dex_storage;
 };
 
 TEST_F(ProfileAssistantTest, AdviseCompilationEmptyReferences) {
@@ -434,9 +420,9 @@
 
   const uint16_t kNumberOfMethodsToEnableCompilation = 100;
   ProfileCompilationInfo info1;
-  SetupProfile("p1", 1, kNumberOfMethodsToEnableCompilation, 0, profile1, &info1);
+  SetupProfile(dex1, dex2, kNumberOfMethodsToEnableCompilation, 0, profile1, &info1);
   ProfileCompilationInfo info2;
-  SetupProfile("p2", 2, kNumberOfMethodsToEnableCompilation, 0, profile2, &info2);
+  SetupProfile(dex3, dex4, kNumberOfMethodsToEnableCompilation, 0, profile2, &info2);
 
   // We should advise compilation.
   ASSERT_EQ(ProfileAssistant::kCompile,
@@ -467,7 +453,7 @@
 
   const uint16_t kNumberOfClassesToEnableCompilation = 100;
   ProfileCompilationInfo info1;
-  SetupProfile("p1", 1, 0, kNumberOfClassesToEnableCompilation, profile1, &info1);
+  SetupProfile(dex1, dex2, 0, kNumberOfClassesToEnableCompilation, profile1, &info1);
 
   // We should advise compilation.
   ASSERT_EQ(ProfileAssistant::kCompile,
@@ -498,15 +484,15 @@
   // The new profile info will contain the methods with indices 0-100.
   const uint16_t kNumberOfMethodsToEnableCompilation = 100;
   ProfileCompilationInfo info1;
-  SetupProfile("p1", 1, kNumberOfMethodsToEnableCompilation, 0, profile1, &info1);
+  SetupProfile(dex1, dex2, kNumberOfMethodsToEnableCompilation, 0, profile1, &info1);
   ProfileCompilationInfo info2;
-  SetupProfile("p2", 2, kNumberOfMethodsToEnableCompilation, 0, profile2, &info2);
+  SetupProfile(dex3, dex4, kNumberOfMethodsToEnableCompilation, 0, profile2, &info2);
 
 
   // The reference profile info will contain the methods with indices 50-150.
   const uint16_t kNumberOfMethodsAlreadyCompiled = 100;
   ProfileCompilationInfo reference_info;
-  SetupProfile("p1", 1, kNumberOfMethodsAlreadyCompiled, 0, reference_profile,
+  SetupProfile(dex1, dex2, kNumberOfMethodsAlreadyCompiled, 0, reference_profile,
       &reference_info, kNumberOfMethodsToEnableCompilation / 2);
 
   // We should advise compilation.
@@ -541,9 +527,9 @@
 
   const uint16_t kNumberOfMethodsToSkipCompilation = 24;  // Threshold is 100.
   ProfileCompilationInfo info1;
-  SetupProfile("p1", 1, kNumberOfMethodsToSkipCompilation, 0, profile1, &info1);
+  SetupProfile(dex1, dex2, kNumberOfMethodsToSkipCompilation, 0, profile1, &info1);
   ProfileCompilationInfo info2;
-  SetupProfile("p2", 2, kNumberOfMethodsToSkipCompilation, 0, profile2, &info2);
+  SetupProfile(dex3, dex4, kNumberOfMethodsToSkipCompilation, 0, profile2, &info2);
 
   // We should not advise compilation.
   ASSERT_EQ(ProfileAssistant::kSkipCompilation,
@@ -617,9 +603,10 @@
   const uint16_t kNumberOfMethodsToEnableCompilation = 100;
   // Assign different hashes for the same dex file. This will make merging of information to fail.
   ProfileCompilationInfo info1;
-  SetupProfile("p1", 1, kNumberOfMethodsToEnableCompilation, 0, profile1, &info1);
+  SetupProfile(dex1, dex2, kNumberOfMethodsToEnableCompilation, 0, profile1, &info1);
   ProfileCompilationInfo info2;
-  SetupProfile("p1", 2, kNumberOfMethodsToEnableCompilation, 0, profile2, &info2);
+  SetupProfile(
+      dex1_checksum_missmatch, dex2, kNumberOfMethodsToEnableCompilation, 0, profile2, &info2);
 
   // We should fail processing.
   ASSERT_EQ(ProfileAssistant::kErrorBadProfiles,
@@ -644,9 +631,10 @@
   const uint16_t kNumberOfMethodsToEnableCompilation = 100;
   // Assign different hashes for the same dex file. This will make merging of information to fail.
   ProfileCompilationInfo info1;
-  SetupProfile("p1", 1, kNumberOfMethodsToEnableCompilation, 0, profile1, &info1);
+  SetupProfile(dex1, dex2, kNumberOfMethodsToEnableCompilation, 0, profile1, &info1);
   ProfileCompilationInfo reference_info;
-  SetupProfile("p1", 2, kNumberOfMethodsToEnableCompilation, 0, reference_profile, &reference_info);
+  SetupProfile(
+      dex1_checksum_missmatch, dex2, kNumberOfMethodsToEnableCompilation, 0, reference_profile, &reference_info);
 
   // We should not advise compilation.
   ASSERT_TRUE(profile1.GetFile()->ResetOffset());
@@ -698,10 +686,23 @@
   ASSERT_EQ(output_file_contents, file_contents);
 }
 
+TEST_F(ProfileAssistantTest, TestArrayClass) {
+  std::vector<std::string> class_names = {
+    "[Ljava/lang/Comparable;",
+  };
+  std::string file_contents;
+  for (std::string& class_name : class_names) {
+    file_contents += class_name + std::string("\n");
+  }
+  std::string output_file_contents;
+  ASSERT_TRUE(CreateAndDump(file_contents, &output_file_contents));
+  ASSERT_EQ(output_file_contents, file_contents);
+}
+
 TEST_F(ProfileAssistantTest, TestProfileCreationGenerateMethods) {
   // Class names put here need to be in sorted order.
   std::vector<std::string> class_names = {
-    "Ljava/lang/Math;->*",
+    "HLjava/lang/Math;->*",
   };
   std::string input_file_contents;
   std::string expected_contents;
@@ -727,15 +728,18 @@
     if (!method.IsCopied() && method.GetCodeItem() != nullptr) {
       ++method_count;
       std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> pmi =
-          info.GetMethod(method.GetDexFile()->GetLocation(),
-                         method.GetDexFile()->GetLocationChecksum(),
-                         method.GetDexMethodIndex());
+          info.GetHotMethodInfo(MethodReference(method.GetDexFile(), method.GetDexMethodIndex()));
       ASSERT_TRUE(pmi != nullptr) << method.PrettyMethod();
     }
   }
   EXPECT_GT(method_count, 0u);
 }
 
+static std::string JoinProfileLines(const std::vector<std::string>& lines) {
+  std::string result = android::base::Join(lines, '\n');
+  return result + '\n';
+}
+
 TEST_F(ProfileAssistantTest, TestBootImageProfile) {
   const std::string core_dex = GetLibCoreDexFileNames()[0];
 
@@ -748,102 +752,210 @@
   // Not in image becauseof not enough occurrences.
   const std::string kUncommonCleanClass = "Ljava/lang/Process;";
   const std::string kUncommonDirtyClass = "Ljava/lang/Package;";
-  // Method that is hot.
-  // Also adds the class through inference since it is in each dex.
-  const std::string kHotMethod = "Ljava/lang/Comparable;->compareTo(Ljava/lang/Object;)I";
-  // Method that doesn't add the class since its only in one profile. Should still show up in the
-  // boot profile.
-  const std::string kOtherMethod = "Ljava/util/HashMap;-><init>()V";
-  // Method that gets marked as hot since it's in multiple profiles.
-  const std::string kMultiMethod = "Ljava/util/ArrayList;->clear()V";
+  // Method that is common and hot. Should end up in profile.
+  const std::string kCommonHotMethod = "Ljava/lang/Comparable;->compareTo(Ljava/lang/Object;)I";
+  // Uncommon method, should not end up in profile
+  const std::string kUncommonMethod = "Ljava/util/HashMap;-><init>()V";
+  // Method that gets marked as hot since it's in multiple profile and marked as startup.
+  const std::string kStartupMethodForUpgrade = "Ljava/util/ArrayList;->clear()V";
+  // Startup method used by a special package which will get a different threshold;
+  const std::string kSpecialPackageStartupMethod =
+      "Ljava/lang/Object;->toString()Ljava/lang/String;";
+  // Method used by a special package which will get a different threshold;
+  const std::string kUncommonSpecialPackageMethod = "Ljava/lang/Object;->hashCode()I";
+  // Blacklisted class
+  const std::string kPreloadedBlacklistedClass = "Ljava/lang/Thread;";
 
   // Thresholds for this test.
-  static const size_t kDirtyThreshold = 3;
-  static const size_t kCleanThreshold = 2;
-  static const size_t kMethodThreshold = 2;
+  static const size_t kDirtyThreshold = 100;
+  static const size_t kCleanThreshold = 50;
+  static const size_t kPreloadedThreshold = 100;
+  static const size_t kMethodThreshold = 75;
+  static const size_t kSpecialThreshold = 50;
+  const std::string kSpecialPackage = "dex4";
 
-  // Create a bunch of boot profiles.
-  std::string dex1 =
-      kCleanClass + "\n" +
-      kDirtyClass + "\n" +
-      kUncommonCleanClass + "\n" +
-      "H" + kHotMethod + "\n" +
-      kUncommonDirtyClass;
-  profiles.emplace_back(ScratchFile());
-  EXPECT_TRUE(CreateProfile(
-      dex1, profiles.back().GetFilename(), core_dex));
+  // Create boot profile content, attributing the classes and methods to different dex files.
+  std::vector<std::string> input_data = {
+      "{dex1}" + kCleanClass,
+      "{dex1}" + kDirtyClass,
+      "{dex1}" + kUncommonCleanClass,
+      "{dex1}H" + kCommonHotMethod,
+      "{dex1}P" + kStartupMethodForUpgrade,
+      "{dex1}" + kUncommonDirtyClass,
+      "{dex1}" + kPreloadedBlacklistedClass,
 
-  // Create a bunch of boot profiles.
-  std::string dex2 =
-      kCleanClass + "\n" +
-      kDirtyClass + "\n" +
-      "P" + kHotMethod + "\n" +
-      "P" + kMultiMethod + "\n" +
-      kUncommonDirtyClass;
-  profiles.emplace_back(ScratchFile());
-  EXPECT_TRUE(CreateProfile(
-      dex2, profiles.back().GetFilename(), core_dex));
+      "{dex2}" + kCleanClass,
+      "{dex2}" + kDirtyClass,
+      "{dex2}P" + kCommonHotMethod,
+      "{dex2}P" + kStartupMethodForUpgrade,
+      "{dex2}" + kUncommonDirtyClass,
+      "{dex2}" + kPreloadedBlacklistedClass,
 
-  // Create a bunch of boot profiles.
-  std::string dex3 =
-      "S" + kHotMethod + "\n" +
-      "P" + kOtherMethod + "\n" +
-      "P" + kMultiMethod + "\n" +
-      kDirtyClass + "\n";
-  profiles.emplace_back(ScratchFile());
-  EXPECT_TRUE(CreateProfile(
-      dex3, profiles.back().GetFilename(), core_dex));
+      "{dex3}P" + kUncommonMethod,
+      "{dex3}PS" + kStartupMethodForUpgrade,
+      "{dex3}S" + kCommonHotMethod,
+      "{dex3}S" + kSpecialPackageStartupMethod,
+      "{dex3}" + kDirtyClass,
+      "{dex3}" + kPreloadedBlacklistedClass,
+
+      "{dex4}" + kDirtyClass,
+      "{dex4}P" + kCommonHotMethod,
+      "{dex4}S" + kSpecialPackageStartupMethod,
+      "{dex4}P" + kUncommonSpecialPackageMethod,
+      "{dex4}" + kPreloadedBlacklistedClass,
+  };
+  std::string input_file_contents = JoinProfileLines(input_data);
+
+  ScratchFile preloaded_class_blacklist;
+  std::string blacklist_content = DescriptorToDot(kPreloadedBlacklistedClass.c_str());
+  EXPECT_TRUE(preloaded_class_blacklist.GetFile()->WriteFully(
+      blacklist_content.c_str(), blacklist_content.length()));
+
+  EXPECT_EQ(0, preloaded_class_blacklist.GetFile()->Flush());
+  EXPECT_TRUE(preloaded_class_blacklist.GetFile()->ResetOffset());
+  // Expected data
+  std::vector<std::string> expected_data = {
+      kCleanClass,
+      kDirtyClass,
+      kPreloadedBlacklistedClass,
+      "HSP" + kCommonHotMethod,
+      "HS" + kSpecialPackageStartupMethod,
+      "HSP" + kStartupMethodForUpgrade
+  };
+  std::string expected_profile_content = JoinProfileLines(expected_data);
+
+  std::vector<std::string> expected_preloaded_data = {
+       DescriptorToDot(kDirtyClass.c_str())
+  };
+  std::string expected_preloaded_content = JoinProfileLines(expected_preloaded_data);
+
+  ScratchFile profile;
+  EXPECT_TRUE(CreateProfile(input_file_contents, profile.GetFilename(), core_dex));
+
+  ProfileCompilationInfo bootProfile;
+  bootProfile.Load(profile.GetFilename(), /*for_boot_image*/ true);
 
   // Generate the boot profile.
   ScratchFile out_profile;
+  ScratchFile out_preloaded_classes;
+  ASSERT_TRUE(out_profile.GetFile()->ResetOffset());
+  ASSERT_TRUE(out_preloaded_classes.GetFile()->ResetOffset());
   std::vector<std::string> args;
   args.push_back(GetProfmanCmd());
   args.push_back("--generate-boot-image-profile");
-  args.push_back("--boot-image-class-threshold=" + std::to_string(kDirtyThreshold));
-  args.push_back("--boot-image-clean-class-threshold=" + std::to_string(kCleanThreshold));
-  args.push_back("--boot-image-sampled-method-threshold=" + std::to_string(kMethodThreshold));
-  args.push_back("--reference-profile-file=" + out_profile.GetFilename());
+  args.push_back("--class-threshold=" + std::to_string(kDirtyThreshold));
+  args.push_back("--clean-class-threshold=" + std::to_string(kCleanThreshold));
+  args.push_back("--method-threshold=" + std::to_string(kMethodThreshold));
+  args.push_back("--preloaded-class-threshold=" + std::to_string(kPreloadedThreshold));
+  args.push_back(
+      "--special-package=" + kSpecialPackage + ":" + std::to_string(kSpecialThreshold));
+  args.push_back("--profile-file=" + profile.GetFilename());
+  args.push_back("--out-profile-path=" + out_profile.GetFilename());
+  args.push_back("--out-preloaded-classes-path=" + out_preloaded_classes.GetFilename());
   args.push_back("--apk=" + core_dex);
   args.push_back("--dex-location=" + core_dex);
-  for (const ScratchFile& profile : profiles) {
-    args.push_back("--profile-file=" + profile.GetFilename());
-  }
+  args.push_back("--preloaded-classes-blacklist=" + preloaded_class_blacklist.GetFilename());
+
   std::string error;
-  EXPECT_EQ(ExecAndReturnCode(args, &error), 0) << error;
-  ASSERT_EQ(0, out_profile.GetFile()->Flush());
+  ASSERT_EQ(ExecAndReturnCode(args, &error), 0) << error;
   ASSERT_TRUE(out_profile.GetFile()->ResetOffset());
 
   // Verify the boot profile contents.
-  std::string output_file_contents;
-  EXPECT_TRUE(DumpClassesAndMethods(out_profile.GetFilename(), &output_file_contents));
-  // Common classes, should be in the classes of the profile.
-  EXPECT_NE(output_file_contents.find(kCleanClass + "\n"), std::string::npos)
-      << output_file_contents;
-  EXPECT_NE(output_file_contents.find(kDirtyClass + "\n"), std::string::npos)
-      << output_file_contents;
-  // Uncommon classes, should not fit preloaded class criteria and should not be in the profile.
-  EXPECT_EQ(output_file_contents.find(kUncommonCleanClass + "\n"), std::string::npos)
-      << output_file_contents;
-  EXPECT_EQ(output_file_contents.find(kUncommonDirtyClass + "\n"), std::string::npos)
-      << output_file_contents;
-  // Inferred class from a method common to all three profiles.
-  EXPECT_NE(output_file_contents.find("Ljava/lang/Comparable;\n"), std::string::npos)
-      << output_file_contents;
-  // Aggregated methods hotness information.
-  EXPECT_NE(output_file_contents.find("HSP" + kHotMethod), std::string::npos)
-      << output_file_contents;
-  EXPECT_NE(output_file_contents.find("P" + kOtherMethod), std::string::npos)
-      << output_file_contents;
-  // Not inferred class, method is only in one profile.
-  EXPECT_EQ(output_file_contents.find("Ljava/util/HashMap;\n"), std::string::npos)
-      << output_file_contents;
-  // Test the sampled methods that became hot.
-  // Other method is in only one profile, it should not become hot.
-  EXPECT_EQ(output_file_contents.find("HP" + kOtherMethod), std::string::npos)
-      << output_file_contents;
-  // Multi method is in at least two profiles, it should become hot.
-  EXPECT_NE(output_file_contents.find("HP" + kMultiMethod), std::string::npos)
-      << output_file_contents;
+  std::string output_profile_contents;
+  ASSERT_TRUE(android::base::ReadFileToString(
+      out_profile.GetFilename(), &output_profile_contents));
+  ASSERT_EQ(output_profile_contents, expected_profile_content);
+
+    // Verify the preloaded classes content.
+  std::string output_preloaded_contents;
+  ASSERT_TRUE(android::base::ReadFileToString(
+      out_preloaded_classes.GetFilename(), &output_preloaded_contents));
+  ASSERT_EQ(output_preloaded_contents, expected_preloaded_content);
+}
+
+TEST_F(ProfileAssistantTest, TestBootImageProfileWith2RawProfiles) {
+  const std::string core_dex = GetLibCoreDexFileNames()[0];
+
+  std::vector<ScratchFile> profiles;
+
+  const std::string kCommonClassUsedByDex1 = "Ljava/lang/CharSequence;";
+  const std::string kCommonClassUsedByDex1Dex2 = "Ljava/lang/Object;";
+  const std::string kUncommonClass = "Ljava/lang/Process;";
+  const std::string kCommonHotMethodUsedByDex1 =
+      "Ljava/lang/Comparable;->compareTo(Ljava/lang/Object;)I";
+  const std::string kCommonHotMethodUsedByDex1Dex2 = "Ljava/lang/Object;->hashCode()I";
+  const std::string kUncommonHotMethod = "Ljava/util/HashMap;-><init>()V";
+
+
+  // Thresholds for this test.
+  static const size_t kDirtyThreshold = 100;
+  static const size_t kCleanThreshold = 100;
+  static const size_t kMethodThreshold = 100;
+
+    // Create boot profile content, attributing the classes and methods to different dex files.
+  std::vector<std::string> input_data1 = {
+      "{dex1}" + kCommonClassUsedByDex1,
+      "{dex1}" + kCommonClassUsedByDex1Dex2,
+      "{dex1}" + kUncommonClass,
+      "{dex1}H" + kCommonHotMethodUsedByDex1Dex2,
+      "{dex1}" + kCommonHotMethodUsedByDex1,
+  };
+  std::vector<std::string> input_data2 = {
+      "{dex1}" + kCommonClassUsedByDex1,
+      "{dex2}" + kCommonClassUsedByDex1Dex2,
+      "{dex1}H" + kCommonHotMethodUsedByDex1,
+      "{dex2}" + kCommonHotMethodUsedByDex1Dex2,
+      "{dex1}" + kUncommonHotMethod,
+  };
+  std::string input_file_contents1 = JoinProfileLines(input_data1);
+  std::string input_file_contents2 = JoinProfileLines(input_data2);
+
+  // Expected data
+  std::vector<std::string> expected_data = {
+      kCommonClassUsedByDex1,
+      kCommonClassUsedByDex1Dex2,
+      "H" + kCommonHotMethodUsedByDex1,
+      "H" + kCommonHotMethodUsedByDex1Dex2
+  };
+  std::string expected_profile_content = JoinProfileLines(expected_data);
+
+  ScratchFile profile1;
+  ScratchFile profile2;
+  EXPECT_TRUE(CreateProfile(input_file_contents1, profile1.GetFilename(), core_dex));
+  EXPECT_TRUE(CreateProfile(input_file_contents2, profile2.GetFilename(), core_dex));
+
+  ProfileCompilationInfo boot_profile1;
+  ProfileCompilationInfo boot_profile2;
+  boot_profile1.Load(profile1.GetFilename(), /*for_boot_image*/ true);
+  boot_profile2.Load(profile2.GetFilename(), /*for_boot_image*/ true);
+
+  // Generate the boot profile.
+  ScratchFile out_profile;
+  ScratchFile out_preloaded_classes;
+  ASSERT_TRUE(out_profile.GetFile()->ResetOffset());
+  ASSERT_TRUE(out_preloaded_classes.GetFile()->ResetOffset());
+  std::vector<std::string> args;
+  args.push_back(GetProfmanCmd());
+  args.push_back("--generate-boot-image-profile");
+  args.push_back("--class-threshold=" + std::to_string(kDirtyThreshold));
+  args.push_back("--clean-class-threshold=" + std::to_string(kCleanThreshold));
+  args.push_back("--method-threshold=" + std::to_string(kMethodThreshold));
+  args.push_back("--profile-file=" + profile1.GetFilename());
+  args.push_back("--profile-file=" + profile2.GetFilename());
+  args.push_back("--out-profile-path=" + out_profile.GetFilename());
+  args.push_back("--out-preloaded-classes-path=" + out_preloaded_classes.GetFilename());
+  args.push_back("--apk=" + core_dex);
+  args.push_back("--dex-location=" + core_dex);
+
+  std::string error;
+  ASSERT_EQ(ExecAndReturnCode(args, &error), 0) << error;
+  ASSERT_TRUE(out_profile.GetFile()->ResetOffset());
+
+  // Verify the boot profile contents.
+  std::string output_profile_contents;
+  ASSERT_TRUE(android::base::ReadFileToString(
+      out_profile.GetFilename(), &output_profile_contents));
+  ASSERT_EQ(output_profile_contents, expected_profile_content);
 }
 
 TEST_F(ProfileAssistantTest, TestProfileCreationOneNotMatched) {
@@ -885,11 +997,11 @@
 TEST_F(ProfileAssistantTest, TestProfileCreateInlineCache) {
   // Create the profile content.
   std::vector<std::string> methods = {
-    "LTestInline;->inlineMonomorphic(LSuper;)I+LSubA;",
-    "LTestInline;->inlinePolymorphic(LSuper;)I+LSubA;,LSubB;,LSubC;",
-    "LTestInline;->inlineMegamorphic(LSuper;)I+LSubA;,LSubB;,LSubC;,LSubD;,LSubE;",
-    "LTestInline;->inlineMissingTypes(LSuper;)I+missing_types",
-    "LTestInline;->noInlineCache(LSuper;)I"
+    "HLTestInline;->inlineMonomorphic(LSuper;)I+LSubA;",
+    "HLTestInline;->inlinePolymorphic(LSuper;)I+LSubA;,LSubB;,LSubC;",
+    "HLTestInline;->inlineMegamorphic(LSuper;)I+LSubA;,LSubB;,LSubC;,LSubD;,LSubE;",
+    "HLTestInline;->inlineMissingTypes(LSuper;)I+missing_types",
+    "HLTestInline;->noInlineCache(LSuper;)I"
   };
   std::string input_file_contents;
   for (std::string& m : methods) {
@@ -986,9 +1098,8 @@
     ArtMethod* no_inline_cache = GetVirtualMethod(class_loader, "LTestInline;", "noInlineCache");
     ASSERT_TRUE(no_inline_cache != nullptr);
     std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> pmi_no_inline_cache =
-        info.GetMethod(no_inline_cache->GetDexFile()->GetLocation(),
-                       no_inline_cache->GetDexFile()->GetLocationChecksum(),
-                       no_inline_cache->GetDexMethodIndex());
+        info.GetHotMethodInfo(MethodReference(
+            no_inline_cache->GetDexFile(), no_inline_cache->GetDexMethodIndex()));
     ASSERT_TRUE(pmi_no_inline_cache != nullptr);
     ASSERT_TRUE(pmi_no_inline_cache->inline_caches->empty());
   }
@@ -1004,7 +1115,7 @@
   // The new profile info will contain the methods with indices 0-100.
   const uint16_t kNumberOfMethodsToEnableCompilation = 100;
   ProfileCompilationInfo info1;
-  SetupProfile("p1", 1, kNumberOfMethodsToEnableCompilation, 0, profile1, &info1,
+  SetupProfile(dex1, dex2, kNumberOfMethodsToEnableCompilation, 0, profile1, &info1,
       /*start_method_index=*/0, /*reverse_dex_write_order=*/false);
 
   // The reference profile info will contain the methods with indices 50-150.
@@ -1013,7 +1124,7 @@
   // with a different dex order correctly.
   const uint16_t kNumberOfMethodsAlreadyCompiled = 100;
   ProfileCompilationInfo reference_info;
-  SetupProfile("p1", 1, kNumberOfMethodsAlreadyCompiled, 0, reference_profile,
+  SetupProfile(dex1, dex2, kNumberOfMethodsAlreadyCompiled, 0, reference_profile,
       &reference_info, kNumberOfMethodsToEnableCompilation / 2, /*reverse_dex_write_order=*/true);
 
   // We should advise compilation.
@@ -1037,8 +1148,8 @@
 TEST_F(ProfileAssistantTest, TestProfileCreateWithInvalidData) {
   // Create the profile content.
   std::vector<std::string> profile_methods = {
-    "LTestInline;->inlineMonomorphic(LSuper;)I+invalid_class",
-    "LTestInline;->invalid_method",
+    "HLTestInline;->inlineMonomorphic(LSuper;)I+invalid_class",
+    "HLTestInline;->invalid_method",
     "invalid_class"
   };
   std::string input_file_contents;
@@ -1070,9 +1181,7 @@
 
   // Verify that the inline cache contains the invalid type.
   std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> pmi =
-      info.GetMethod(dex_file->GetLocation(),
-                     dex_file->GetLocationChecksum(),
-                     inline_monomorphic->GetDexMethodIndex());
+      info.GetHotMethodInfo(MethodReference(dex_file, inline_monomorphic->GetDexMethodIndex()));
   ASSERT_TRUE(pmi != nullptr);
   ASSERT_EQ(pmi->inline_caches->size(), 1u);
   const ProfileCompilationInfo::DexPcData& dex_pc_data = pmi->inline_caches->begin()->second;
@@ -1121,9 +1230,7 @@
   EXPECT_GT(startup_methods.size(), 0u);
   EXPECT_GT(post_startup_methods.size(), 0u);
   ProfileCompilationInfo info1;
-  SetupBasicProfile("p1",
-                    1,
-                    kNumberOfMethods,
+  SetupBasicProfile(dex1,
                     hot_methods,
                     startup_methods,
                     post_startup_methods,
@@ -1176,17 +1283,15 @@
   // The new profile info will contain the methods with indices 0-100.
   const uint16_t kNumberOfMethodsToEnableCompilation = 100;
   ProfileCompilationInfo info1;
-  SetupProfile(d1.GetLocation(), d1.GetLocationChecksum(), "p1", 1,
-      kNumberOfMethodsToEnableCompilation, 0, profile1, &info1);
+  SetupProfile(&d1, dex1, kNumberOfMethodsToEnableCompilation, 0, profile1, &info1);
   ProfileCompilationInfo info2;
-  SetupProfile(d2.GetLocation(), d2.GetLocationChecksum(), "p2", 2,
-      kNumberOfMethodsToEnableCompilation, 0, profile2, &info2);
+  SetupProfile(&d2, dex2, kNumberOfMethodsToEnableCompilation, 0, profile2, &info2);
 
 
   // The reference profile info will contain the methods with indices 50-150.
   const uint16_t kNumberOfMethodsAlreadyCompiled = 100;
   ProfileCompilationInfo reference_info;
-  SetupProfile(d1.GetLocation(), d1.GetLocationChecksum(), "p1", 1,
+  SetupProfile(&d1, dex1,
       kNumberOfMethodsAlreadyCompiled, 0, reference_profile,
       &reference_info, kNumberOfMethodsToEnableCompilation / 2);
 
@@ -1204,7 +1309,7 @@
   argv_str.push_back("--apk-fd=" + std::to_string(apk_fd.get()));
   std::string error;
 
-  EXPECT_EQ(ExecAndReturnCode(argv_str, &error), 0) << error;
+  EXPECT_EQ(ExecAndReturnCode(argv_str, &error), ProfileAssistant::kCompile) << error;
 
   // Verify that we can load the result.
 
@@ -1223,9 +1328,9 @@
 
   ProfileCompilationInfo::ProfileLoadFilterFn filter_fn =
       [&d1, &d2](const std::string& dex_location, uint32_t checksum) -> bool {
-          return (dex_location == ProfileCompilationInfo::GetProfileDexFileKey(d1.GetLocation())
+          return (dex_location == ProfileCompilationInfo::GetProfileDexFileBaseKey(d1.GetLocation())
               && checksum == d1.GetLocationChecksum())
-              || (dex_location == ProfileCompilationInfo::GetProfileDexFileKey(d2.GetLocation())
+              || (dex_location == ProfileCompilationInfo::GetProfileDexFileBaseKey(d2.GetLocation())
               && checksum == d2.GetLocationChecksum());
         };
 
@@ -1255,18 +1360,18 @@
 
   ProfileCompilationInfo info1;
   uint16_t num_methods_to_add = std::min(d1.NumMethodIds(), d2.NumMethodIds());
-  SetupProfile("fake-location1",
-               d1.GetLocationChecksum(),
-               "fake-location2",
-               d2.GetLocationChecksum(),
+
+  FakeDexStorage local_storage;
+  const DexFile* dex_to_be_updated1 = local_storage.AddFakeDex(
+      "fake-location1", d1.GetLocationChecksum(), d1.NumMethodIds());
+  const DexFile* dex_to_be_updated2 = local_storage.AddFakeDex(
+      "fake-location2", d2.GetLocationChecksum(), d2.NumMethodIds());
+  SetupProfile(dex_to_be_updated1,
+               dex_to_be_updated2,
                num_methods_to_add,
                /*number_of_classes=*/ 0,
                profile1,
-               &info1,
-               /*start_method_index=*/ 0,
-               /*reverse_dex_write_order=*/ false,
-               /*number_of_methods1=*/ d1.NumMethodIds(),
-               /*number_of_methods2=*/ d2.NumMethodIds());
+               &info1);
 
   // Run profman and pass the dex file with --apk-fd.
   android::base::unique_fd apk_fd(
@@ -1291,66 +1396,186 @@
 
   // Verify that the renaming was done.
   for (uint16_t i = 0; i < num_methods_to_add; i ++) {
-      std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> pmi;
-      ASSERT_TRUE(result.GetMethod(d1.GetLocation(), d1.GetLocationChecksum(), i) != nullptr) << i;
-      ASSERT_TRUE(result.GetMethod(d2.GetLocation(), d2.GetLocationChecksum(), i) != nullptr) << i;
+    std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> pmi;
+    ASSERT_TRUE(result.GetHotMethodInfo(MethodReference(&d1, i)) != nullptr) << i;
+    ASSERT_TRUE(result.GetHotMethodInfo(MethodReference(&d2, i)) != nullptr) << i;
 
-      ASSERT_TRUE(result.GetMethod("fake-location1", d1.GetLocationChecksum(), i) == nullptr);
-      ASSERT_TRUE(result.GetMethod("fake-location2", d2.GetLocationChecksum(), i) == nullptr);
+    ASSERT_TRUE(result.GetHotMethodInfo(MethodReference(dex_to_be_updated1, i)) == nullptr);
+    ASSERT_TRUE(result.GetHotMethodInfo(MethodReference(dex_to_be_updated2, i)) == nullptr);
   }
 }
 
-TEST_F(ProfileAssistantTest, MergeProfilesWithCounters) {
-  ScratchFile profile1;
-  ScratchFile profile2;
+TEST_F(ProfileAssistantTest, BootImageMerge) {
+  ScratchFile profile;
+  ScratchFile reference_profile;
+  std::vector<int> profile_fds({GetFd(profile)});
+  int reference_profile_fd = GetFd(reference_profile);
+  std::vector<uint32_t> hot_methods_cur;
+  std::vector<uint32_t> hot_methods_ref;
+  std::vector<uint32_t> empty_vector;
+  size_t num_methods = 100;
+  for (size_t i = 0; i < num_methods; ++i) {
+    hot_methods_cur.push_back(i);
+  }
+  for (size_t i = 0; i < num_methods; ++i) {
+    hot_methods_ref.push_back(i);
+  }
+  ProfileCompilationInfo info1;
+  SetupBasicProfile(dex1, hot_methods_cur, empty_vector, empty_vector,
+      profile, &info1);
+  ProfileCompilationInfo info2(/*for_boot_image=*/true);
+  SetupBasicProfile(dex1, hot_methods_ref, empty_vector, empty_vector,
+      reference_profile, &info2);
+
+  std::vector<const std::string> extra_args({"--force-merge", "--boot-image-merge"});
+
+  int return_code = ProcessProfiles(profile_fds, reference_profile_fd, extra_args);
+
+  ASSERT_EQ(return_code, ProfileAssistant::kSuccess);
+
+  // Verify the result: it should be equal to info2 since info1 is a regular profile
+  // and should be ignored.
+  ProfileCompilationInfo result;
+  ASSERT_TRUE(reference_profile.GetFile()->ResetOffset());
+  ASSERT_TRUE(result.Load(reference_profile.GetFd()));
+  ASSERT_TRUE(result.Equals(info2));
+}
+
+// Under default behaviour we should not advice compilation
+// and the reference profile should not be updated.
+// However we pass --force-merge to force aggregation and in this case
+// we should see an update.
+TEST_F(ProfileAssistantTest, ForceMerge) {
+  const uint16_t kNumberOfClassesInRefProfile = 6000;
+  const uint16_t kNumberOfClassesInCurProfile = 6110;  // Threshold is 2%.
+
+  ScratchFile profile;
   ScratchFile reference_profile;
 
-  // The new profile info will contain methods with indices 0-100.
-  const uint16_t kNumberOfMethodsToEnableCompilation = 100;
-  const uint16_t kNumberOfClasses = 50;
+  std::vector<int> profile_fds({ GetFd(profile)});
+  int reference_profile_fd = GetFd(reference_profile);
 
+  ProfileCompilationInfo info1;
+  SetupProfile(dex1, dex2, 0, kNumberOfClassesInRefProfile, profile,  &info1);
+  ProfileCompilationInfo info2;
+  SetupProfile(dex1, dex2, 0, kNumberOfClassesInCurProfile, reference_profile, &info2);
+
+  std::vector<const std::string> extra_args({"--force-merge"});
+  int return_code = ProcessProfiles(profile_fds, reference_profile_fd, extra_args);
+
+  ASSERT_EQ(return_code, ProfileAssistant::kSuccess);
+
+  // Check that the result is the aggregation.
+  ProfileCompilationInfo result;
+  ASSERT_TRUE(reference_profile.GetFile()->ResetOffset());
+  ASSERT_TRUE(result.Load(reference_profile.GetFd()));
+  ASSERT_TRUE(info1.MergeWith(info2));
+  ASSERT_TRUE(result.Equals(info1));
+}
+
+// Test that we consider the annations when we merge boot image profiles.
+TEST_F(ProfileAssistantTest, BootImageMergeWithAnnotations) {
+  ScratchFile profile;
+  ScratchFile reference_profile;
+
+  std::vector<int> profile_fds({GetFd(profile)});
+  int reference_profile_fd = GetFd(reference_profile);
+
+  // Use a real dex file to generate profile test data so that we can pass descriptors to profman.
   std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles("ProfileTestMultiDex");
   const DexFile& d1 = *dex_files[0];
   const DexFile& d2 = *dex_files[1];
-  ProfileCompilationInfo info1;
-  SetupProfile(
-      d1.GetLocation(), d1.GetLocationChecksum(),
-      d2.GetLocation(), d2.GetLocationChecksum(),
-      kNumberOfMethodsToEnableCompilation, kNumberOfClasses, profile1, &info1);
-  ProfileCompilationInfo info2;
-  SetupProfile(
-      d1.GetLocation(), d1.GetLocationChecksum(),
-      d2.GetLocation(), d2.GetLocationChecksum(),
-      kNumberOfMethodsToEnableCompilation, kNumberOfClasses, profile2, &info2);
+  // The new profile info will contain the methods with indices 0-100.
+  ProfileCompilationInfo info(/*for_boot_image*/ true);
+  ProfileCompilationInfo::ProfileSampleAnnotation psa1("package1");
+  ProfileCompilationInfo::ProfileSampleAnnotation psa2("package2");
+
+  AddMethod(&info, &d1, 0, Hotness::kFlagHot, psa1);
+  AddMethod(&info, &d2, 0, Hotness::kFlagHot, psa2);
+  info.Save(profile.GetFd());
+  profile.GetFile()->ResetOffset();
+
+  // Run profman and pass the dex file with --apk-fd.
+  android::base::unique_fd apk_fd(
+      open(GetTestDexFileName("ProfileTestMultiDex").c_str(), O_RDONLY));  // NOLINT
+  ASSERT_GE(apk_fd.get(), 0);
 
   std::string profman_cmd = GetProfmanCmd();
   std::vector<std::string> argv_str;
   argv_str.push_back(profman_cmd);
-  argv_str.push_back("--profile-file-fd=" + std::to_string(profile1.GetFd()));
-  argv_str.push_back("--profile-file-fd=" + std::to_string(profile2.GetFd()));
+  argv_str.push_back("--profile-file-fd=" + std::to_string(profile.GetFd()));
   argv_str.push_back("--reference-profile-file-fd=" + std::to_string(reference_profile.GetFd()));
-  argv_str.push_back("--store-aggregation-counters");
+  argv_str.push_back("--apk-fd=" + std::to_string(apk_fd.get()));
+  argv_str.push_back("--force-merge");
+  argv_str.push_back("--boot-image-merge");
   std::string error;
 
-  EXPECT_EQ(ExecAndReturnCode(argv_str, &error), 0) << error;
+  EXPECT_EQ(ExecAndReturnCode(argv_str, &error), ProfileAssistant::kSuccess) << error;
 
-  // Verify that we can load the result and that the counters are in place.
-
+  // Verify that we can load the result and that it equals to what we saved.
   ProfileCompilationInfo result;
-  result.PrepareForAggregationCounters();
   ASSERT_TRUE(reference_profile.GetFile()->ResetOffset());
-  ASSERT_TRUE(result.Load(reference_profile.GetFd()));
-
-  ASSERT_TRUE(result.StoresAggregationCounters());
-  ASSERT_EQ(2, result.GetAggregationCounter());
-
-  for (uint16_t i = 0; i < kNumberOfMethodsToEnableCompilation; i++) {
-    ASSERT_EQ(1, result.GetMethodAggregationCounter(MethodReference(&d1, i)));
-    ASSERT_EQ(1, result.GetMethodAggregationCounter(MethodReference(&d2, i)));
-  }
-  for (uint16_t i = 0; i < kNumberOfClasses; i++) {
-    ASSERT_EQ(1, result.GetClassAggregationCounter(TypeReference(&d1, dex::TypeIndex(i))));
-  }
+  ASSERT_TRUE(result.Load(reference_profile_fd));
+  ASSERT_TRUE(info.Equals(result));
 }
 
+TEST_F(ProfileAssistantTest, DifferentProfileVersions) {
+  ScratchFile profile1;
+  ScratchFile profile2;
+
+  ProfileCompilationInfo info1(/*for_boot_image*/ false);
+  info1.Save(profile1.GetFd());
+  profile1.GetFile()->ResetOffset();
+
+  ProfileCompilationInfo info2(/*for_boot_image*/ true);
+  info2.Save(profile2.GetFd());
+  profile2.GetFile()->ResetOffset();
+
+  std::vector<int> profile_fds({ GetFd(profile1)});
+  int reference_profile_fd = GetFd(profile2);
+  ASSERT_EQ(ProcessProfiles(profile_fds, reference_profile_fd),
+            ProfileAssistant::kErrorDifferentVersions);
+
+  // Reverse the order of the profiles to verify we get the same behaviour.
+  profile_fds[0] = GetFd(profile2);
+  reference_profile_fd = GetFd(profile1);
+  profile1.GetFile()->ResetOffset();
+  profile2.GetFile()->ResetOffset();
+  ASSERT_EQ(ProcessProfiles(profile_fds, reference_profile_fd),
+            ProfileAssistant::kErrorDifferentVersions);
+}
+
+// Under default behaviour we will abort if we cannot load a profile during a merge
+// operation. However, if we pass --force-merge to force aggregation we should
+// ignore files we cannot load
+TEST_F(ProfileAssistantTest, ForceMergeIgnoreProfilesItCannotLoad) {
+  ScratchFile profile1;
+  ScratchFile profile2;
+
+  // Write corrupt data in the first file.
+  std::string content = "giberish";
+  ASSERT_TRUE(profile1.GetFile()->WriteFully(content.c_str(), content.length()));
+  profile1.GetFile()->ResetOffset();
+
+  ProfileCompilationInfo info2(/*for_boot_image*/ true);
+  info2.Save(profile2.GetFd());
+  profile2.GetFile()->ResetOffset();
+
+  std::vector<int> profile_fds({ GetFd(profile1)});
+  int reference_profile_fd = GetFd(profile2);
+
+  // With force-merge we should merge successfully.
+  std::vector<const std::string> extra_args({"--force-merge"});
+  ASSERT_EQ(ProcessProfiles(profile_fds, reference_profile_fd, extra_args),
+            ProfileAssistant::kSuccess);
+
+  ProfileCompilationInfo result;
+  ASSERT_TRUE(profile2.GetFile()->ResetOffset());
+  ASSERT_TRUE(result.Load(reference_profile_fd));
+  ASSERT_TRUE(info2.Equals(result));
+
+  // Without force-merge we should fail.
+  ASSERT_EQ(ProcessProfiles(profile_fds, reference_profile_fd, extra_args),
+            ProfileAssistant::kErrorBadProfiles);
+}
 }  // namespace art
diff --git a/profman/profman.cc b/profman/profman.cc
index f90bba9..a011cd0 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -29,6 +29,7 @@
 #include <unordered_set>
 #include <vector>
 
+#include "android-base/parsebool.h"
 #include "android-base/stringprintf.h"
 #include "android-base/strings.h"
 
@@ -51,11 +52,14 @@
 #include "dex/dex_file_loader.h"
 #include "dex/dex_file_types.h"
 #include "dex/type_reference.h"
+#include "profile/profile_boot_info.h"
 #include "profile/profile_compilation_info.h"
 #include "profile_assistant.h"
 
 namespace art {
 
+using ProfileSampleAnnotation = ProfileCompilationInfo::ProfileSampleAnnotation;
+
 static int original_argc;
 static char** original_argv;
 
@@ -145,22 +149,36 @@
   UsageError("");
   UsageError("  --generate-boot-image-profile: Generate a boot image profile based on input");
   UsageError("      profiles. Requires passing in dex files to inspect properties of classes.");
-  UsageError("  --boot-image-class-threshold=<value>: specify minimum number of class occurrences");
-  UsageError("      to include a class in the boot image profile. Default is 10.");
-  UsageError("  --boot-image-clean-class-threshold=<value>: specify minimum number of clean class");
-  UsageError("      occurrences to include a class in the boot image profile. A clean class is a");
-  UsageError("      class that doesn't have any static fields or native methods and is likely to");
-  UsageError("      remain clean in the image. Default is 3.");
-  UsageError("  --boot-image-sampled-method-threshold=<value>: minimum number of profiles a");
-  UsageError("      non-hot method needs to be in order to be hot in the output profile. The");
-  UsageError("      default is max int.");
+  UsageError("  --method-threshold=percentage between 0 and 100");
+  UsageError("      what threshold to apply to the methods when deciding whether or not to");
+  UsageError("      include it in the final profile.");
+  UsageError("  --class-threshold=percentage between 0 and 100");
+  UsageError("      what threshold to apply to the classes when deciding whether or not to");
+  UsageError("      include it in the final profile.");
+  UsageError("  --clean-class-threshold=percentage between 0 and 100");
+  UsageError("      what threshold to apply to the clean classes when deciding whether or not to");
+  UsageError("      include it in the final profile.");
+  UsageError("  --preloaded-class-threshold=percentage between 0 and 100");
+  UsageError("      what threshold to apply to the classes when deciding whether or not to");
+  UsageError("      include it in the final preloaded classes.");
+  UsageError("  --preloaded-classes-blacklist=file");
+  UsageError("      a file listing the classes that should not be preloaded in Zygote");
+  UsageError("  --upgrade-startup-to-hot=true|false:");
+  UsageError("      whether or not to upgrade startup methods to hot");
+  UsageError("  --special-package=pkg_name:percentage between 0 and 100");
+  UsageError("      what threshold to apply to the methods/classes that are used by the given");
+  UsageError("      package when deciding whether or not to include it in the final profile.");
+  UsageError("  --debug-append-uses=bool: whether or not to append package use as debug info.");
+  UsageError("  --out-profile-path=path: boot image profile output path");
+  UsageError("  --out-preloaded-classes-path=path: preloaded classes output path");
   UsageError("  --copy-and-update-profile-key: if present, profman will copy the profile from");
   UsageError("      the file passed with --profile-fd(file) to the profile passed with");
   UsageError("      --reference-profile-fd(file) and update at the same time the profile-key");
   UsageError("      of entries corresponding to the apks passed with --apk(-fd).");
-  UsageError("  --store-aggregation-counters: if present, profman will compute and store");
-  UsageError("      the aggregation counters of classes and methods in the output profile.");
-  UsageError("      In this case the profile will have a different version.");
+  UsageError("  --boot-image-merge: indicates that this merge is for a boot image profile.");
+  UsageError("      In this case, the reference profile must have a boot profile version.");
+  UsageError("  --force-merge: performs a forced merge, without analyzing if there is a");
+  UsageError("      significant difference between the current profile and the reference profile.");
   UsageError("");
 
   exit(EXIT_FAILURE);
@@ -177,6 +195,8 @@
 static const std::string kInvalidClassDescriptor = "invalid_class";  // NOLINT [runtime/string] [4]
 static const std::string kInvalidMethod = "invalid_method";  // NOLINT [runtime/string] [4]
 static const std::string kClassAllMethods = "*";  // NOLINT [runtime/string] [4]
+static constexpr char kAnnotationStart = '{';
+static constexpr char kAnnotationEnd = '}';
 static constexpr char kProfileParsingInlineChacheSep = '+';
 static constexpr char kProfileParsingTypeSep = ',';
 static constexpr char kProfileParsingFirstCharInSignature = '(';
@@ -188,26 +208,25 @@
   LOG(ERROR) << msg;
   exit(1);
 }
-
 template <typename T>
-static void ParseUintOption(const char* raw_option,
-                            std::string_view option_prefix,
-                            T* out) {
-  DCHECK(EndsWith(option_prefix, "="));
-  DCHECK(StartsWith(raw_option, option_prefix)) << raw_option << " " << option_prefix;
-  const char* value_string = raw_option + option_prefix.size();
+static void ParseUintValue(const std::string& option_name,
+                           const std::string& value,
+                           T* out,
+                           T min = std::numeric_limits<T>::min(),
+                           T max = std::numeric_limits<T>::max()) {
   int64_t parsed_integer_value = 0;
-  if (!android::base::ParseInt(value_string, &parsed_integer_value)) {
-    std::string option_name(option_prefix.substr(option_prefix.size() - 1u));
-    Usage("Failed to parse %s '%s' as an integer", option_name.c_str(), value_string);
+  if (!android::base::ParseInt(
+      value,
+      &parsed_integer_value,
+      static_cast<int64_t>(min),
+      static_cast<int64_t>(max))) {
+    Usage("Failed to parse %s '%s' as an integer", option_name.c_str(), value.c_str());
   }
   if (parsed_integer_value < 0) {
-    std::string option_name(option_prefix.substr(option_prefix.size() - 1u));
     Usage("%s passed a negative value %" PRId64, option_name.c_str(), parsed_integer_value);
   }
   if (static_cast<uint64_t>(parsed_integer_value) >
       static_cast<std::make_unsigned_t<T>>(std::numeric_limits<T>::max())) {
-    std::string option_name(option_prefix.substr(option_prefix.size() - 1u));
     Usage("%s passed a value %" PRIu64 " above max (%" PRIu64 ")",
           option_name.c_str(),
           static_cast<uint64_t>(parsed_integer_value),
@@ -216,6 +235,35 @@
   *out = dchecked_integral_cast<T>(parsed_integer_value);
 }
 
+template <typename T>
+static void ParseUintOption(const char* raw_option,
+                            std::string_view option_prefix,
+                            T* out,
+                            T min = std::numeric_limits<T>::min(),
+                            T max = std::numeric_limits<T>::max()) {
+  DCHECK(EndsWith(option_prefix, "="));
+  DCHECK(StartsWith(raw_option, option_prefix)) << raw_option << " " << option_prefix;
+  std::string option_name(option_prefix.substr(option_prefix.size() - 1u));
+  const char* value_string = raw_option + option_prefix.size();
+
+  ParseUintValue(option_name, value_string, out, min, max);
+}
+
+static void ParseBoolOption(const char* raw_option,
+                            std::string_view option_prefix,
+                            bool* out) {
+  DCHECK(EndsWith(option_prefix, "="));
+  DCHECK(StartsWith(raw_option, option_prefix)) << raw_option << " " << option_prefix;
+  const char* value_string = raw_option + option_prefix.size();
+  android::base::ParseBoolResult result = android::base::ParseBool(value_string);
+  if (result == android::base::ParseBoolResult::kError) {
+    std::string option_name(option_prefix.substr(option_prefix.size() - 1u));
+    Usage("Failed to parse %s '%s' as an integer", option_name.c_str(), value_string);
+  }
+
+  *out = result == android::base::ParseBoolResult::kTrue;
+}
+
 // TODO(calin): This class has grown too much from its initial design. Split the functionality
 // into smaller, more contained pieces.
 class ProfMan final {
@@ -225,6 +273,7 @@
       dump_only_(false),
       dump_classes_and_methods_(false),
       generate_boot_image_profile_(false),
+      generate_boot_profile_(false),
       dump_output_to_fd_(kInvalidFd),
       test_profile_num_dex_(kDefaultTestProfileNumDex),
       test_profile_method_percerntage_(kDefaultTestProfileMethodPercentage),
@@ -232,7 +281,7 @@
       test_profile_seed_(NanoTime()),
       start_ns_(NanoTime()),
       copy_and_update_profile_key_(false),
-      store_aggregation_counters_(false) {}
+      profile_assistant_options_(ProfileAssistant::Options()) {}
 
   ~ProfMan() {
     LogCompletionTime();
@@ -268,20 +317,65 @@
         create_profile_from_file_ = std::string(option.substr(strlen("--create-profile-from=")));
       } else if (StartsWith(option, "--dump-output-to-fd=")) {
         ParseUintOption(raw_option, "--dump-output-to-fd=", &dump_output_to_fd_);
+      } else if (option == "--generate-boot-profile") {
+        generate_boot_profile_ = true;
       } else if (option == "--generate-boot-image-profile") {
         generate_boot_image_profile_ = true;
-      } else if (StartsWith(option, "--boot-image-class-threshold=")) {
+      } else if (StartsWith(option, "--method-threshold=")) {
         ParseUintOption(raw_option,
-                        "--boot-image-class-threshold=",
-                        &boot_image_options_.image_class_theshold);
-      } else if (StartsWith(option, "--boot-image-clean-class-threshold=")) {
+                        "--method-threshold=",
+                        &boot_image_options_.method_threshold,
+                        0u,
+                        100u);
+      } else if (StartsWith(option, "--class-threshold=")) {
         ParseUintOption(raw_option,
-                        "--boot-image-clean-class-threshold=",
-                        &boot_image_options_.image_class_clean_theshold);
-      } else if (StartsWith(option, "--boot-image-sampled-method-threshold=")) {
+                        "--class-threshold=",
+                        &boot_image_options_.image_class_threshold,
+                        0u,
+                        100u);
+      } else if (StartsWith(option, "--clean-class-threshold=")) {
         ParseUintOption(raw_option,
-                        "--boot-image-sampled-method-threshold=",
-                        &boot_image_options_.compiled_method_threshold);
+                        "--clean-class-threshold=",
+                        &boot_image_options_.image_class_clean_threshold,
+                        0u,
+                        100u);
+      } else if (StartsWith(option, "--preloaded-class-threshold=")) {
+        ParseUintOption(raw_option,
+                        "--preloaded-class-threshold=",
+                        &boot_image_options_.preloaded_class_threshold,
+                        0u,
+                        100u);
+      } else if (StartsWith(option, "--preloaded-classes-blacklist=")) {
+        std::string preloaded_classes_blacklist =
+            std::string(option.substr(strlen("--preloaded-classes-blacklist=")));
+        // Read the user-specified list of methods.
+        std::unique_ptr<std::set<std::string>>
+            blacklist(ReadCommentedInputFromFile<std::set<std::string>>(
+                preloaded_classes_blacklist.c_str(), nullptr));  // No post-processing.
+        boot_image_options_.preloaded_classes_blacklist.insert(
+            blacklist->begin(), blacklist->end());
+      } else if (StartsWith(option, "--upgrade-startup-to-hot=")) {
+        ParseBoolOption(raw_option,
+                        "--upgrade-startup-to-hot=",
+                        &boot_image_options_.upgrade_startup_to_hot);
+      } else if (StartsWith(option, "--special-package=")) {
+        std::vector<std::string> values;
+        Split(std::string(option.substr(strlen("--special-package="))), ':', &values);
+        if (values.size() != 2) {
+          Usage("--special-package needs to be specified as pkg_name:threshold");
+        }
+        uint32_t threshold;
+        ParseUintValue("special-package", values[1], &threshold, 0u, 100u);
+        boot_image_options_.special_packages_thresholds.Overwrite(values[0], threshold);
+      } else if (StartsWith(option, "--debug-append-uses=")) {
+        ParseBoolOption(raw_option,
+                        "--debug-append-uses=",
+                        &boot_image_options_.append_package_use_list);
+      } else if (StartsWith(option, "--out-profile-path=")) {
+        boot_profile_out_path_ = std::string(option.substr(strlen("--out-profile-path=")));
+      } else if (StartsWith(option, "--out-preloaded-classes-path=")) {
+        preloaded_classes_out_path_ = std::string(
+            option.substr(strlen("--out-preloaded-classes-path=")));
       } else if (StartsWith(option, "--profile-file=")) {
         profile_files_.push_back(std::string(option.substr(strlen("--profile-file="))));
       } else if (StartsWith(option, "--profile-file-fd=")) {
@@ -314,8 +408,10 @@
         ParseUintOption(raw_option, "--generate-test-profile-seed=", &test_profile_seed_);
       } else if (option == "--copy-and-update-profile-key") {
         copy_and_update_profile_key_ = true;
-      } else if (option == "--store-aggregation-counters") {
-        store_aggregation_counters_ = true;
+      } else if (option == "--boot-image-merge") {
+        profile_assistant_options_.SetBootImageMerge(true);
+      } else if (option == "--force-merge") {
+        profile_assistant_options_.SetForceMerge(true);
       } else {
         Usage("Unknown argument '%s'", raw_option);
       }
@@ -373,14 +469,15 @@
     // Build the profile filter function. If the set of keys is empty it means we
     // don't have any apks; as such we do not filter anything.
     const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn =
-        [profile_filter_keys](const std::string& dex_location, uint32_t checksum) {
+        [profile_filter_keys](const std::string& profile_key, uint32_t checksum) {
             if (profile_filter_keys.empty()) {
               // No --apk was specified. Accept all dex files.
               return true;
             } else {
-              bool res = profile_filter_keys.find(
-                  ProfileFilterKey(dex_location, checksum)) != profile_filter_keys.end();
-              return res;
+              // Remove any annotations from the profile key before comparing with the keys we get from apks.
+              std::string base_key = ProfileCompilationInfo::GetBaseKeyFromAugmentedKey(profile_key);
+              return profile_filter_keys.find(ProfileFilterKey(base_key, checksum)) !=
+                  profile_filter_keys.end();
             }
         };
 
@@ -393,13 +490,13 @@
       result = ProfileAssistant::ProcessProfiles(profile_files_fd_,
                                                  reference_profile_file_fd_,
                                                  filter_fn,
-                                                 store_aggregation_counters_);
+                                                 profile_assistant_options_);
       CloseAllFds(profile_files_fd_, "profile_files_fd_");
     } else {
       result = ProfileAssistant::ProcessProfiles(profile_files_,
                                                  reference_profile_file_,
                                                  filter_fn,
-                                                 store_aggregation_counters_);
+                                                 profile_assistant_options_);
     }
     return result;
   }
@@ -408,7 +505,7 @@
     auto process_fn = [profile_filter_keys](std::unique_ptr<const DexFile>&& dex_file) {
       // Store the profile key of the location instead of the location itself.
       // This will make the matching in the profile filter method much easier.
-      profile_filter_keys->emplace(ProfileCompilationInfo::GetProfileDexFileKey(
+      profile_filter_keys->emplace(ProfileCompilationInfo::GetProfileDexFileBaseKey(
           dex_file->GetLocation()), dex_file->GetLocationChecksum());
     };
     return OpenApkFilesFromLocations(process_fn);
@@ -800,7 +897,8 @@
 
   // Find class klass_descriptor in the given dex_files and store its reference
   // in the out parameter class_ref.
-  // Return true if the definition of the class was found in any of the dex_files.
+  // Return true if the definition or a reference of the class was found in any
+  // of the dex_files.
   bool FindClass(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
                  const std::string& klass_descriptor,
                  /*out*/TypeReference* class_ref) {
@@ -825,14 +923,22 @@
         continue;
       }
       dex::TypeIndex type_index = dex_file->GetIndexForTypeId(*type_id);
+      *class_ref = TypeReference(dex_file, type_index);
+
       if (dex_file->FindClassDef(type_index) == nullptr) {
         // Class is only referenced in the current dex file but not defined in it.
+        // We use its current type reference, but keep looking for its
+        // definition.
+        // Note that array classes fall into that category, as they do not have
+        // a class definition.
         continue;
       }
-      *class_ref = TypeReference(dex_file, type_index);
       return true;
     }
-    return false;
+    // If we arrive here, we haven't found a class definition. If the dex file
+    // of the class reference is not null, then we have found a type reference,
+    // and we return that to the caller.
+    return (class_ref->dex_file != nullptr);
   }
 
   // Find the method specified by method_spec in the class class_ref.
@@ -919,18 +1025,42 @@
   // Process a line defining a class or a method and its inline caches.
   // Upon success return true and add the class or the method info to profile.
   // The possible line formats are:
-  // "LJustTheCass;".
+  // "LJustTheClass;".
   // "LTestInline;->inlinePolymorphic(LSuper;)I+LSubA;,LSubB;,LSubC;".
   // "LTestInline;->inlinePolymorphic(LSuper;)I+LSubA;,LSubB;,invalid_class".
   // "LTestInline;->inlineMissingTypes(LSuper;)I+missing_types".
-  // "LTestInline;->inlineNoInlineCaches(LSuper;)I".
+  // "{annotation}LTestInline;->inlineNoInlineCaches(LSuper;)I".
   // "LTestInline;->*".
   // "invalid_class".
   // "LTestInline;->invalid_method".
   // The method and classes are searched only in the given dex files.
   bool ProcessLine(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
-                   const std::string& line,
+                   const std::string& maybe_annotated_line,
                    /*out*/ProfileCompilationInfo* profile) {
+    // First, process the annotation.
+    if (maybe_annotated_line.empty()) {
+      return true;
+    }
+    // Working line variable which will contain the user input without the annotations.
+    std::string line = maybe_annotated_line;
+
+    std::string annotation_string;
+    if (maybe_annotated_line[0] == kAnnotationStart) {
+      size_t end_pos = maybe_annotated_line.find(kAnnotationEnd, 0);
+      if (end_pos == std::string::npos || end_pos == 0) {
+        LOG(ERROR) << "Invalid line: " << maybe_annotated_line;
+        return false;
+      }
+      annotation_string = maybe_annotated_line.substr(1, end_pos - 1);
+      // Update the working line.
+      line = maybe_annotated_line.substr(end_pos + 1);
+    }
+
+    ProfileSampleAnnotation annotation = annotation_string.empty()
+        ? ProfileSampleAnnotation::kNone
+        : ProfileSampleAnnotation(annotation_string);
+
+    // Now process the rest of the lines.
     std::string klass;
     std::string method_str;
     bool is_hot = false;
@@ -979,14 +1109,7 @@
 
     if (method_str.empty() || method_str == kClassAllMethods) {
       // Start by adding the class.
-      std::set<DexCacheResolvedClasses> resolved_class_set;
       const DexFile* dex_file = class_ref.dex_file;
-      const auto& dex_resolved_classes = resolved_class_set.emplace(
-            dex_file->GetLocation(),
-            DexFileLoader::GetBaseLocation(dex_file->GetLocation()),
-            dex_file->GetLocationChecksum(),
-            dex_file->NumMethodIds());
-      dex_resolved_classes.first->AddClass(class_ref.TypeIndex());
       std::vector<ProfileMethodInfo> methods;
       if (method_str == kClassAllMethods) {
         ClassAccessor accessor(
@@ -1000,8 +1123,11 @@
         }
       }
       // TODO: Check return values?
-      profile->AddMethods(methods, static_cast<ProfileCompilationInfo::MethodHotness::Flag>(flags));
-      profile->AddClasses(resolved_class_set);
+      profile->AddMethods(
+          methods, static_cast<ProfileCompilationInfo::MethodHotness::Flag>(flags), annotation);
+      std::set<dex::TypeIndex> classes;
+      classes.insert(class_ref.TypeIndex());
+      profile->AddClassesForDex(dex_file, classes.begin(), classes.end(), annotation);
       return true;
     }
 
@@ -1053,18 +1179,42 @@
     MethodReference ref(class_ref.dex_file, method_index);
     if (is_hot) {
       profile->AddMethod(ProfileMethodInfo(ref, inline_caches),
-          static_cast<ProfileCompilationInfo::MethodHotness::Flag>(flags));
+          static_cast<ProfileCompilationInfo::MethodHotness::Flag>(flags),
+          annotation);
     }
     if (flags != 0) {
-      if (!profile->AddMethodIndex(
-          static_cast<ProfileCompilationInfo::MethodHotness::Flag>(flags), ref)) {
+      if (!profile->AddMethod(ProfileMethodInfo(ref),
+                              static_cast<ProfileCompilationInfo::MethodHotness::Flag>(flags),
+                              annotation)) {
         return false;
       }
-      DCHECK(profile->GetMethodHotness(ref).IsInProfile());
+      DCHECK(profile->GetMethodHotness(ref, annotation).IsInProfile()) << method_spec;
     }
     return true;
   }
 
+  bool ProcessBootLine(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
+                       const std::string& line,
+                       ProfileBootInfo* boot_profiling_info) {
+    const size_t method_sep_index = line.find(kMethodSep, 0);
+    std::string klass_str = line.substr(0, method_sep_index);
+    std::string method_str = line.substr(method_sep_index + kMethodSep.size());
+
+    TypeReference class_ref(/* dex_file= */ nullptr, dex::TypeIndex());
+    if (!FindClass(dex_files, klass_str, &class_ref)) {
+      LOG(WARNING) << "Could not find class: " << klass_str;
+      return false;
+    }
+
+    const uint32_t method_index = FindMethodIndex(class_ref, method_str);
+    if (method_index == dex::kDexNoIndex) {
+      LOG(WARNING) << "Could not find method: " << line;
+      return false;
+    }
+    boot_profiling_info->Add(class_ref.dex_file, method_index);
+    return true;
+  }
+
   int OpenReferenceProfile() const {
     int fd = reference_profile_file_fd_;
     if (!FdIsValid(fd)) {
@@ -1083,6 +1233,54 @@
     return fd;
   }
 
+  // Create and store a ProfileBootInfo.
+  int CreateBootProfile() {
+    // Validate parameters for this command.
+    if (apk_files_.empty() && apks_fd_.empty()) {
+      Usage("APK files must be specified");
+    }
+    if (dex_locations_.empty()) {
+      Usage("DEX locations must be specified");
+    }
+    if (reference_profile_file_.empty() && !FdIsValid(reference_profile_file_fd_)) {
+      Usage("Reference profile must be specified with --reference-profile-file or "
+            "--reference-profile-file-fd");
+    }
+    if (!profile_files_.empty() || !profile_files_fd_.empty()) {
+      Usage("Profile must be specified with --reference-profile-file or "
+            "--reference-profile-file-fd");
+    }
+    // Open the profile output file if needed.
+    int fd = OpenReferenceProfile();
+    if (!FdIsValid(fd)) {
+        return -1;
+    }
+    // Read the user-specified list of methods.
+    std::unique_ptr<std::vector<std::string>>
+        user_lines(ReadCommentedInputFromFile<std::vector<std::string>>(
+            create_profile_from_file_.c_str(), nullptr));  // No post-processing.
+
+    // Open the dex files to look up classes and methods.
+    std::vector<std::unique_ptr<const DexFile>> dex_files;
+    OpenApkFilesFromLocations(&dex_files);
+
+    // Process the lines one by one and add the successful ones to the profile.
+    ProfileBootInfo info;
+
+    for (const auto& line : *user_lines) {
+      ProcessBootLine(dex_files, line, &info);
+    }
+
+    // Write the profile file.
+    CHECK(info.Save(fd));
+
+    if (close(fd) < 0) {
+      PLOG(WARNING) << "Failed to close descriptor";
+    }
+
+    return 0;
+  }
+
   // Creates a profile from a human friendly textual representation.
   // The expected input format is:
   //   # Classes
@@ -1136,15 +1334,19 @@
     return 0;
   }
 
-  bool ShouldCreateBootProfile() const {
+  bool ShouldCreateBootImageProfile() const {
     return generate_boot_image_profile_;
   }
 
-  int CreateBootProfile() {
-    // Open the profile output file.
-    const int reference_fd = OpenReferenceProfile();
-    if (!FdIsValid(reference_fd)) {
-      PLOG(ERROR) << "Error opening reference profile";
+  bool ShouldCreateBootProfile() const {
+    return generate_boot_profile_;
+  }
+
+  // Create and store a ProfileCompilationInfo for the boot image.
+  int CreateBootImageProfile() {
+    // Open the input profile file.
+    if (profile_files_.size() < 1) {
+      LOG(ERROR) << "At least one --profile-file must be specified.";
       return -1;
     }
     // Open the dex files.
@@ -1154,34 +1356,15 @@
       PLOG(ERROR) << "Expected dex files for creating boot profile";
       return -2;
     }
-    // Open the input profiles.
-    std::vector<std::unique_ptr<const ProfileCompilationInfo>> profiles;
-    if (!profile_files_fd_.empty()) {
-      for (int profile_file_fd : profile_files_fd_) {
-        std::unique_ptr<const ProfileCompilationInfo> profile(LoadProfile("", profile_file_fd));
-        if (profile == nullptr) {
-          return -3;
-        }
-        profiles.emplace_back(std::move(profile));
-      }
+
+    if (!GenerateBootImageProfile(dex_files,
+                                  profile_files_,
+                                  boot_image_options_,
+                                  boot_profile_out_path_,
+                                  preloaded_classes_out_path_)) {
+      LOG(ERROR) << "There was an error when generating the boot image profiles";
+      return -4;
     }
-    if (!profile_files_.empty()) {
-      for (const std::string& profile_file : profile_files_) {
-        std::unique_ptr<const ProfileCompilationInfo> profile(LoadProfile(profile_file, kInvalidFd));
-        if (profile == nullptr) {
-          return -4;
-        }
-        profiles.emplace_back(std::move(profile));
-      }
-    }
-    ProfileCompilationInfo out_profile;
-    GenerateBootImageProfile(dex_files,
-                             profiles,
-                             boot_image_options_,
-                             VLOG_IS_ON(profiler),
-                             &out_profile);
-    out_profile.Save(reference_fd);
-    close(reference_fd);
     return 0;
   }
 
@@ -1323,6 +1506,7 @@
   bool dump_only_;
   bool dump_classes_and_methods_;
   bool generate_boot_image_profile_;
+  bool generate_boot_profile_;
   int dump_output_to_fd_;
   BootImageOptions boot_image_options_;
   std::string test_profile_;
@@ -1333,7 +1517,9 @@
   uint32_t test_profile_seed_;
   uint64_t start_ns_;
   bool copy_and_update_profile_key_;
-  bool store_aggregation_counters_;
+  ProfileAssistant::Options profile_assistant_options_;
+  std::string boot_profile_out_path_;
+  std::string preloaded_classes_out_path_;
 };
 
 // See ProfileAssistant::ProcessingResult for return codes.
@@ -1355,12 +1541,15 @@
   if (profman.ShouldOnlyDumpClassesAndMethods()) {
     return profman.DumpClassesAndMethods();
   }
+  if (profman.ShouldCreateBootProfile()) {
+    return profman.CreateBootProfile();
+  }
   if (profman.ShouldCreateProfile()) {
     return profman.CreateProfile();
   }
 
-  if (profman.ShouldCreateBootProfile()) {
-    return profman.CreateBootProfile();
+  if (profman.ShouldCreateBootImageProfile()) {
+    return profman.CreateBootImageProfile();
   }
 
   if (profman.ShouldCopyAndUpdateProfileKey()) {
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 7bf662c..7e75016 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -20,9 +20,43 @@
 // (empty) body is called.
 JIT_DEBUG_REGISTER_CODE_LDFLAGS = [
     "-Wl,--keep-unique,__jit_debug_register_code",
-    "-Wl,--keep-unique,__dex_debug_register_code"
+    "-Wl,--keep-unique,__dex_debug_register_code",
 ]
 
+// These are defaults for native shared libaries that are expected to be
+// in stack traces often.
+libart_cc_defaults {
+    name: "libart_nativeunwind_defaults",
+    arch: {
+        arm: {
+            // Arm 32 bit does not produce complete exidx unwind information
+            // so keep the .debug_frame which is relatively small and does
+            // include needed unwind information.
+            // See b/132992102 and b/145790995 for details.
+            strip: {
+                keep_symbols_and_debug_frame: true,
+            },
+        },
+        // For all other architectures, leave the symbols in the shared library
+        // so that stack unwinders can produce meaningful name resolution.
+        arm64: {
+            strip: {
+                keep_symbols: true,
+            },
+        },
+        x86: {
+            strip: {
+                keep_symbols: true,
+            },
+        },
+        x86_64: {
+            strip: {
+                keep_symbols: true,
+            },
+        },
+    },
+}
+
 libart_cc_defaults {
     name: "libart_defaults",
     defaults: ["art_defaults"],
@@ -105,21 +139,16 @@
         "interpreter/shadow_frame.cc",
         "interpreter/unstarted_runtime.cc",
         "java_frame_root_info.cc",
-        "jdwp/jdwp_event.cc",
-        "jdwp/jdwp_expand_buf.cc",
-        "jdwp/jdwp_handler.cc",
-        "jdwp/jdwp_main.cc",
-        "jdwp/jdwp_request.cc",
-        "jdwp/jdwp_socket.cc",
-        "jdwp/object_registry.cc",
         "jit/debugger_interface.cc",
         "jit/jit.cc",
         "jit/jit_code_cache.cc",
+        "jit/jit_memory_region.cc",
         "jit/profiling_info.cc",
         "jit/profile_saver.cc",
         "jni/check_jni.cc",
         "jni/java_vm_ext.cc",
         "jni/jni_env_ext.cc",
+        "jni/jni_id_manager.cc",
         "jni/jni_internal.cc",
         "linear_alloc.cc",
         "managed_stack.cc",
@@ -145,6 +174,7 @@
         "native_bridge_art_interface.cc",
         "native_stack_dump.cc",
         "native/dalvik_system_DexFile.cc",
+        "native/dalvik_system_BaseDexClassLoader.cc",
         "native/dalvik_system_VMDebug.cc",
         "native/dalvik_system_VMRuntime.cc",
         "native/dalvik_system_VMStack.cc",
@@ -173,6 +203,7 @@
         "native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc",
         "native/sun_misc_Unsafe.cc",
         "non_debuggable_classes.cc",
+        "nterp_helpers.cc",
         "oat.cc",
         "oat_file.cc",
         "oat_file_assistant.cc",
@@ -186,6 +217,8 @@
         "read_barrier.cc",
         "reference_table.cc",
         "reflection.cc",
+        "reflective_handle_scope.cc",
+        "reflective_value_visitor.cc",
         "runtime.cc",
         "runtime_callbacks.cc",
         "runtime_common.cc",
@@ -195,6 +228,7 @@
         "signal_catcher.cc",
         "stack.cc",
         "stack_map.cc",
+        "string_builder_append.cc",
         "thread.cc",
         "thread_list.cc",
         "thread_pool.cc",
@@ -220,10 +254,6 @@
         "arch/arm/registers_arm.cc",
         "arch/arm64/instruction_set_features_arm64.cc",
         "arch/arm64/registers_arm64.cc",
-        "arch/mips/instruction_set_features_mips.cc",
-        "arch/mips/registers_mips.cc",
-        "arch/mips64/instruction_set_features_mips64.cc",
-        "arch/mips64/registers_mips64.cc",
         "arch/x86/instruction_set_features_x86.cc",
         "arch/x86/registers_x86.cc",
         "arch/x86_64/registers_x86_64.cc",
@@ -240,6 +270,7 @@
         "entrypoints/quick/quick_jni_entrypoints.cc",
         "entrypoints/quick/quick_lock_entrypoints.cc",
         "entrypoints/quick/quick_math_entrypoints.cc",
+        "entrypoints/quick/quick_string_builder_append_entrypoints.cc",
         "entrypoints/quick/quick_thread_entrypoints.cc",
         "entrypoints/quick/quick_throw_entrypoints.cc",
         "entrypoints/quick/quick_trampoline_entrypoints.cc",
@@ -249,6 +280,7 @@
         arm: {
             srcs: [
                 "interpreter/mterp/mterp.cc",
+                "interpreter/mterp/nterp_stub.cc",
                 ":libart_mterp.arm",
                 "arch/arm/context_arm.cc",
                 "arch/arm/entrypoints_init_arm.cc",
@@ -264,6 +296,7 @@
         arm64: {
             srcs: [
                 "interpreter/mterp/mterp.cc",
+                "interpreter/mterp/nterp_stub.cc",
                 ":libart_mterp.arm64",
                 "arch/arm64/context_arm64.cc",
                 "arch/arm64/entrypoints_init_arm64.cc",
@@ -278,6 +311,7 @@
         x86: {
             srcs: [
                 "interpreter/mterp/mterp.cc",
+                "interpreter/mterp/nterp_stub.cc",
                 ":libart_mterp.x86",
                 "arch/x86/context_x86.cc",
                 "arch/x86/entrypoints_init_x86.cc",
@@ -287,13 +321,21 @@
                 "arch/x86/thread_x86.cc",
                 "arch/x86/fault_handler_x86.cc",
             ],
+            avx: {
+                asflags: ["-DMTERP_USE_AVX"],
+            },
+            avx2: {
+                asflags: ["-DMTERP_USE_AVX"],
+            },
         },
         x86_64: {
             srcs: [
                 // Note that the fault_handler_x86.cc is not a mistake.  This file is
                 // shared between the x86 and x86_64 architectures.
                 "interpreter/mterp/mterp.cc",
+                "interpreter/mterp/nterp.cc",
                 ":libart_mterp.x86_64",
+                ":libart_mterp.x86_64ng",
                 "arch/x86_64/context_x86_64.cc",
                 "arch/x86_64/entrypoints_init_x86_64.cc",
                 "arch/x86_64/jni_entrypoints_x86_64.S",
@@ -303,39 +345,17 @@
                 "monitor_pool.cc",
                 "arch/x86/fault_handler_x86.cc",
             ],
-        },
-        mips: {
-            srcs: [
-                "interpreter/mterp/mterp.cc",
-                ":libart_mterp.mips",
-                "arch/mips/context_mips.cc",
-                "arch/mips/entrypoints_init_mips.cc",
-                "arch/mips/jni_entrypoints_mips.S",
-                "arch/mips/memcmp16_mips.S",
-                "arch/mips/quick_entrypoints_mips.S",
-                "arch/mips/thread_mips.cc",
-                "arch/mips/fault_handler_mips.cc",
-            ],
-        },
-        mips64: {
-            srcs: [
-                "interpreter/mterp/mterp.cc",
-                ":libart_mterp.mips64",
-                "arch/mips64/context_mips64.cc",
-                "arch/mips64/entrypoints_init_mips64.cc",
-                "arch/mips64/jni_entrypoints_mips64.S",
-                "arch/mips64/memcmp16_mips64.S",
-                "arch/mips64/quick_entrypoints_mips64.S",
-                "arch/mips64/thread_mips64.cc",
-                "monitor_pool.cc",
-                "arch/mips64/fault_handler_mips64.cc",
-            ],
+            avx: {
+                asflags: ["-DMTERP_USE_AVX"],
+            },
+            avx2: {
+                asflags: ["-DMTERP_USE_AVX"],
+            },
         },
     },
     target: {
         android: {
             srcs: [
-                "jdwp/jdwp_adb.cc",
                 "monitor_android.cc",
                 "runtime_android.cc",
                 "thread_android.cc",
@@ -344,7 +364,7 @@
                 "libdl_android",
             ],
             static_libs: [
-                "libz",  // For adler32.
+                "libz", // For adler32.
             ],
             cflags: [
                 // ART is allowed to link to libicuuc directly
@@ -371,7 +391,7 @@
                 "thread_linux.cc",
             ],
             shared_libs: [
-                "libz",  // For adler32.
+                "libz", // For adler32.
             ],
         },
     },
@@ -381,10 +401,6 @@
     generated_headers: ["cpp-define-generator-asm-support"],
     // export our headers so the libart-gtest targets can use it as well.
     export_generated_headers: ["cpp-define-generator-asm-support"],
-    include_dirs: [
-        "art/sigchainlib",
-        "external/zlib",
-    ],
     header_libs: [
         "art_cmdlineparser_headers",
         "cpp-define-generator-definitions",
@@ -400,13 +416,9 @@
         "liblog",
         // For common macros.
         "libbase",
+        "libunwindstack",
+        "libsigchain",
     ],
-    static: {
-        static_libs: ["libsigchain_dummy"],
-    },
-    shared: {
-        shared_libs: ["libsigchain"],
-    },
     export_include_dirs: ["."],
     // ART's macros.h depends on libbase's macros.h.
     // Note: runtime_options.h depends on cmdline. But we don't really want to export this
@@ -420,8 +432,6 @@
         "libartpalette",
         "libbacktrace",
         "libbase",
-        "libdexfile_external",  // libunwindstack dependency
-        "libdexfile_support",  // libunwindstack dependency
         "liblog",
         "libnativebridge",
         "libnativeloader",
@@ -441,6 +451,7 @@
     ],
     static_libs: [
         "libart",
+        "libdexfile_support_static",
         "libelffile",
     ],
 }
@@ -455,6 +466,7 @@
     ],
     static_libs: [
         "libartd",
+        "libdexfiled_support_static",
         "libelffiled",
     ],
 }
@@ -482,12 +494,12 @@
         "instrumentation.h",
         "indirect_reference_table.h",
         "jdwp_provider.h",
-        "jdwp/jdwp.h",
-        "jdwp/jdwp_constants.h",
+        "jni_id_type.h",
         "lock_word.h",
-        "oat.h",
+        "oat_file.h",
         "object_callbacks.h",
         "process_state.h",
+        "reflective_value_visitor.h",
         "stack.h",
         "suspend_reason.h",
         "thread.h",
@@ -504,12 +516,10 @@
 
 art_cc_library {
     name: "libart",
-    defaults: ["libart_defaults"],
-    // Leave the symbols in the shared library so that stack unwinders can
-    // produce meaningful name resolution.
-    strip: {
-        keep_symbols: true,
-    },
+    defaults: [
+        "libart_defaults",
+        "libart_nativeunwind_defaults",
+    ],
     whole_static_libs: [
     ],
     static_libs: [
@@ -518,6 +528,8 @@
     shared_libs: [
         "libartbase",
         "libdexfile",
+        // We need to eagerly load it so libdexfile_support used from libunwindstack can find it.
+        "libdexfile_external",
         "libprofile",
     ],
     export_shared_lib_headers: [
@@ -526,10 +538,14 @@
     target: {
         android: {
             lto: {
-                 thin: true,
+                thin: true,
             },
         },
     },
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
 }
 
 art_cc_library {
@@ -546,11 +562,17 @@
     shared_libs: [
         "libartbased",
         "libdexfiled",
+        // We need to eagerly preload it, so that libunwindstack can find it.
+        // Otherwise, it would try to load the non-debug version with dlopen.
+        "libdexfiled_external",
         "libprofiled",
     ],
     export_shared_lib_headers: [
         "libdexfiled",
     ],
+    apex_available: [
+        "com.android.art.debug",
+    ],
 }
 
 art_cc_library {
@@ -583,8 +605,6 @@
         "arch/stub_test.cc",
         "arch/arm/instruction_set_features_arm_test.cc",
         "arch/arm64/instruction_set_features_arm64_test.cc",
-        "arch/mips/instruction_set_features_mips_test.cc",
-        "arch/mips64/instruction_set_features_mips64_test.cc",
         "arch/x86/instruction_set_features_x86_test.cc",
         "arch/x86_64/instruction_set_features_x86_64_test.cc",
         "barrier_test.cc",
@@ -624,7 +644,8 @@
         "intern_table_test.cc",
         "interpreter/safe_math_test.cc",
         "interpreter/unstarted_runtime_test.cc",
-        "jdwp/jdwp_options_test.cc",
+        "jit/jit_memory_region_test.cc",
+        "jit/profile_saver_test.cc",
         "jit/profiling_info_test.cc",
         "jni/java_vm_ext_test.cc",
         "jni/jni_internal_test.cc",
@@ -658,9 +679,6 @@
     header_libs: [
         "art_cmdlineparser_headers", // For parsed_options_test.
     ],
-    include_dirs: [
-        "external/zlib",
-    ],
 }
 
 art_cc_test {
@@ -679,55 +697,72 @@
 }
 
 cc_library_headers {
-    name: "libart_runtime_headers",
+    name: "libart_runtime_headers_ndk",
     host_supported: true,
     export_include_dirs: ["."],
+    sdk_version: "current",
+
+    apex_available: [
+        "com.android.art.debug",
+        "com.android.art.release",
+    ],
 }
 
 genrule {
-  name: "libart_mterp.arm",
-  out: ["mterp_arm.S"],
-  srcs: ["interpreter/mterp/arm/*.S"],
-  tool_files: ["interpreter/mterp/gen_mterp.py", "interpreter/mterp/common/gen_setup.py"],
-  cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
+    name: "libart_mterp.arm",
+    out: ["mterp_arm.S"],
+    srcs: ["interpreter/mterp/arm/*.S"],
+    tool_files: [
+        "interpreter/mterp/gen_mterp.py",
+        "interpreter/mterp/common/gen_setup.py",
+    ],
+    cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
 }
 
 genrule {
-  name: "libart_mterp.arm64",
-  out: ["mterp_arm64.S"],
-  srcs: ["interpreter/mterp/arm64/*.S"],
-  tool_files: ["interpreter/mterp/gen_mterp.py", "interpreter/mterp/common/gen_setup.py"],
-  cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
+    name: "libart_mterp.arm64",
+    out: ["mterp_arm64.S"],
+    srcs: ["interpreter/mterp/arm64/*.S"],
+    tool_files: [
+        "interpreter/mterp/gen_mterp.py",
+        "interpreter/mterp/common/gen_setup.py",
+    ],
+    cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
 }
 
 genrule {
-  name: "libart_mterp.mips",
-  out: ["mterp_mips.S"],
-  srcs: ["interpreter/mterp/mips/*.S"],
-  tool_files: ["interpreter/mterp/gen_mterp.py", "interpreter/mterp/common/gen_setup.py"],
-  cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
+    name: "libart_mterp.x86",
+    out: ["mterp_x86.S"],
+    srcs: ["interpreter/mterp/x86/*.S"],
+    tool_files: [
+        "interpreter/mterp/gen_mterp.py",
+        "interpreter/mterp/common/gen_setup.py",
+    ],
+    cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
 }
 
 genrule {
-  name: "libart_mterp.mips64",
-  out: ["mterp_mips64.S"],
-  srcs: ["interpreter/mterp/mips64/*.S"],
-  tool_files: ["interpreter/mterp/gen_mterp.py", "interpreter/mterp/common/gen_setup.py"],
-  cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
+    name: "libart_mterp.x86_64",
+    out: ["mterp_x86_64.S"],
+    srcs: ["interpreter/mterp/x86_64/*.S"],
+    tool_files: [
+        "interpreter/mterp/gen_mterp.py",
+        "interpreter/mterp/common/gen_setup.py",
+    ],
+    cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
 }
 
 genrule {
-  name: "libart_mterp.x86",
-  out: ["mterp_x86.S"],
-  srcs: ["interpreter/mterp/x86/*.S"],
-  tool_files: ["interpreter/mterp/gen_mterp.py", "interpreter/mterp/common/gen_setup.py"],
-  cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
-}
-
-genrule {
-  name: "libart_mterp.x86_64",
-  out: ["mterp_x86_64.S"],
-  srcs: ["interpreter/mterp/x86_64/*.S"],
-  tool_files: ["interpreter/mterp/gen_mterp.py", "interpreter/mterp/common/gen_setup.py"],
-  cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
+    name: "libart_mterp.x86_64ng",
+    out: ["mterp_x86_64ng.S"],
+    srcs: [
+        "interpreter/mterp/x86_64ng/*.S",
+        "interpreter/mterp/x86_64/arithmetic.S",
+        "interpreter/mterp/x86_64/floating_point.S",
+    ],
+    tool_files: [
+        "interpreter/mterp/gen_mterp.py",
+        "interpreter/mterp/common/gen_setup.py",
+    ],
+    cmd: "$(location interpreter/mterp/gen_mterp.py) $(out) $(in)",
 }
diff --git a/runtime/aot_class_linker.cc b/runtime/aot_class_linker.cc
index c9ca4c9..a2f450b 100644
--- a/runtime/aot_class_linker.cc
+++ b/runtime/aot_class_linker.cc
@@ -19,6 +19,7 @@
 #include "class_status.h"
 #include "compiler_callbacks.h"
 #include "dex/class_reference.h"
+#include "gc/heap.h"
 #include "handle_scope-inl.h"
 #include "mirror/class-inl.h"
 #include "runtime.h"
@@ -31,22 +32,52 @@
 
 AotClassLinker::~AotClassLinker() {}
 
+bool AotClassLinker::CanAllocClass() {
+  // AllocClass doesn't work under transaction, so we abort.
+  if (Runtime::Current()->IsActiveTransaction()) {
+    Runtime::Current()->AbortTransactionAndThrowAbortError(
+        Thread::Current(), "Can't resolve type within transaction.");
+    return false;
+  }
+  return ClassLinker::CanAllocClass();
+}
+
 // Wrap the original InitializeClass with creation of transaction when in strict mode.
 bool AotClassLinker::InitializeClass(Thread* self,
                                      Handle<mirror::Class> klass,
                                      bool can_init_statics,
                                      bool can_init_parents) {
   Runtime* const runtime = Runtime::Current();
-  bool strict_mode_ = runtime->IsActiveStrictTransactionMode();
+  bool strict_mode = runtime->IsActiveStrictTransactionMode();
 
   DCHECK(klass != nullptr);
   if (klass->IsInitialized() || klass->IsInitializing()) {
     return ClassLinker::InitializeClass(self, klass, can_init_statics, can_init_parents);
   }
 
+  // When compiling a boot image extension, do not initialize a class defined
+  // in a dex file belonging to the boot image we're compiling against.
+  // However, we must allow the initialization of TransactionAbortError,
+  // VerifyError, etc. outside of a transaction.
+  if (!strict_mode && runtime->GetHeap()->ObjectIsInBootImageSpace(klass->GetDexCache())) {
+    if (runtime->IsActiveTransaction()) {
+      runtime->AbortTransactionAndThrowAbortError(self, "Can't initialize " + klass->PrettyTypeOf()
+           + " because it is defined in a boot image dex file.");
+      return false;
+    }
+    CHECK(klass->IsThrowableClass()) << klass->PrettyDescriptor();
+  }
+
+  // When in strict_mode, don't initialize a class if it belongs to boot but not initialized.
+  if (strict_mode && klass->IsBootStrapClassLoaded()) {
+    runtime->AbortTransactionAndThrowAbortError(self, "Can't resolve "
+        + klass->PrettyTypeOf() + " because it is an uninitialized boot class.");
+    return false;
+  }
+
   // Don't initialize klass if it's superclass is not initialized, because superclass might abort
   // the transaction and rolled back after klass's change is commited.
-  if (strict_mode_ && !klass->IsInterface() && klass->HasSuperClass()) {
+  if (strict_mode && !klass->IsInterface() && klass->HasSuperClass()) {
     if (klass->GetSuperClass()->GetStatus() == ClassStatus::kInitializing) {
       runtime->AbortTransactionAndThrowAbortError(self, "Can't resolve "
           + klass->PrettyTypeOf() + " because it's superclass is not initialized.");
@@ -54,19 +85,18 @@
     }
   }
 
-  if (strict_mode_) {
-    runtime->EnterTransactionMode(true, klass.Get()->AsClass().Ptr());
+  if (strict_mode) {
+    runtime->EnterTransactionMode(/*strict=*/ true, klass.Get());
   }
   bool success = ClassLinker::InitializeClass(self, klass, can_init_statics, can_init_parents);
 
-  if (strict_mode_) {
+  if (strict_mode) {
     if (success) {
       // Exit Transaction if success.
       runtime->ExitTransactionMode();
     } else {
-      // If not successfully initialized, the last transaction must abort. Don't rollback
-      // immediately, leave the cleanup to compiler driver which needs abort message and exception.
-      DCHECK(runtime->IsTransactionAborted());
+      // If not successfully initialized, don't rollback immediately, leave the cleanup to compiler
+      // driver which needs abort message and exception.
       DCHECK(self->IsExceptionPending());
     }
   }
@@ -85,6 +115,9 @@
   if (old_status >= ClassStatus::kVerified) {
     return verifier::FailureKind::kNoFailure;
   }
+  if (old_status >= ClassStatus::kVerifiedNeedsAccessChecks) {
+    return verifier::FailureKind::kAccessChecksFailure;
+  }
   // Does it need to be verified at runtime? Report soft failure.
   if (old_status >= ClassStatus::kRetryVerificationAtRuntime) {
     // Error messages from here are only reported through -verbose:class. It is not worth it to
@@ -95,4 +128,119 @@
   return ClassLinker::PerformClassVerification(self, klass, log_level, error_msg);
 }
 
+bool AotClassLinker::CanReferenceInBootImageExtension(ObjPtr<mirror::Class> klass, gc::Heap* heap) {
+  // Do not allow referencing a class or instance of a class defined in a dex file
+  // belonging to the boot image we're compiling against but not itself in the boot image;
+  // or a class referencing such classes as component type, superclass or interface.
+  // Allowing this could yield duplicate class objects from multiple extensions.
+
+  if (heap->ObjectIsInBootImageSpace(klass)) {
+    return true;  // Already included in the boot image we're compiling against.
+  }
+
+  // Treat arrays and primitive types specially because they do not have a DexCache that we
+  // can use to check whether the dex file belongs to the boot image we're compiling against.
+  DCHECK(!klass->IsPrimitive());  // Primitive classes must be in the primary boot image.
+  if (klass->IsArrayClass()) {
+    DCHECK(heap->ObjectIsInBootImageSpace(klass->GetIfTable()));  // IfTable is OK.
+    // Arrays of all dimensions are tied to the dex file of the non-array component type.
+    do {
+      klass = klass->GetComponentType();
+    } while (klass->IsArrayClass());
+    if (klass->IsPrimitive()) {
+      return false;
+    }
+    // Do not allow arrays of erroneous classes (the array class is not itself erroneous).
+    if (klass->IsErroneous()) {
+      return false;
+    }
+  }
+
+  // Check the class itself.
+  if (heap->ObjectIsInBootImageSpace(klass->GetDexCache())) {
+    return false;
+  }
+
+  // Check superclasses.
+  ObjPtr<mirror::Class> superclass = klass->GetSuperClass();
+  while (!heap->ObjectIsInBootImageSpace(superclass)) {
+    DCHECK(superclass != nullptr);  // Cannot skip Object which is in the primary boot image.
+    if (heap->ObjectIsInBootImageSpace(superclass->GetDexCache())) {
+      return false;
+    }
+    superclass = superclass->GetSuperClass();
+  }
+
+  // Check IfTable. This includes direct and indirect interfaces.
+  ObjPtr<mirror::IfTable> if_table = klass->GetIfTable();
+  for (size_t i = 0, num_interfaces = klass->GetIfTableCount(); i < num_interfaces; ++i) {
+    ObjPtr<mirror::Class> interface = if_table->GetInterface(i);
+    DCHECK(interface != nullptr);
+    if (!heap->ObjectIsInBootImageSpace(interface) &&
+        heap->ObjectIsInBootImageSpace(interface->GetDexCache())) {
+      return false;
+    }
+  }
+
+  if (kIsDebugBuild) {
+    // All virtual methods must come from classes we have already checked above.
+    PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+    ObjPtr<mirror::Class> k = klass;
+    while (!heap->ObjectIsInBootImageSpace(k)) {
+      for (auto& m : k->GetVirtualMethods(pointer_size)) {
+        ObjPtr<mirror::Class> declaring_class = m.GetDeclaringClass();
+        CHECK(heap->ObjectIsInBootImageSpace(declaring_class) ||
+              !heap->ObjectIsInBootImageSpace(declaring_class->GetDexCache()));
+      }
+      k = k->GetSuperClass();
+    }
+  }
+
+  return true;
+}
+
+bool AotClassLinker::SetUpdatableBootClassPackages(const std::vector<std::string>& packages) {
+  DCHECK(updatable_boot_class_path_descriptor_prefixes_.empty());
+  // Transform package names to descriptor prefixes.
+  std::vector<std::string> prefixes;
+  prefixes.reserve(packages.size());
+  for (const std::string& package : packages) {
+    if (package.empty() || package.find('/') != std::string::npos) {
+      LOG(ERROR) << "Invalid package name: " << package;
+      return false;
+    }
+    std::string prefix = 'L' + package + '/';
+    std::replace(prefix.begin(), prefix.end(), '.', '/');
+    prefixes.push_back(std::move(prefix));
+  }
+  // Sort and remove unnecessary prefixes.
+  std::sort(prefixes.begin(), prefixes.end());
+  std::string last_prefix;
+  auto end_it = std::remove_if(
+      prefixes.begin(),
+      prefixes.end(),
+      [&last_prefix](const std::string& s) {
+        if (!last_prefix.empty() && StartsWith(s, last_prefix)) {
+          return true;
+        } else {
+          last_prefix = s;
+          return false;
+        }
+      });
+  prefixes.resize(std::distance(prefixes.begin(), end_it));
+  prefixes.shrink_to_fit();
+  updatable_boot_class_path_descriptor_prefixes_.swap(prefixes);
+  return true;
+}
+
+bool AotClassLinker::IsUpdatableBootClassPathDescriptor(const char* descriptor) {
+  std::string_view descriptor_sv(descriptor);
+  for (const std::string& prefix : updatable_boot_class_path_descriptor_prefixes_) {
+    if (StartsWith(descriptor_sv, prefix)) {
+      return true;
+    }
+  }
+  return false;
+}
+
 }  // namespace art
diff --git a/runtime/aot_class_linker.h b/runtime/aot_class_linker.h
index 6a8133e..76984bd 100644
--- a/runtime/aot_class_linker.h
+++ b/runtime/aot_class_linker.h
@@ -20,6 +20,11 @@
 #include "class_linker.h"
 
 namespace art {
+
+namespace gc {
+class Heap;
+}  // namespace gc
+
 // AotClassLinker is only used for AOT compiler, which includes some logic for class initialization
 // which will only be used in pre-compilation.
 class AotClassLinker : public ClassLinker {
@@ -27,6 +32,11 @@
   explicit AotClassLinker(InternTable *intern_table);
   ~AotClassLinker();
 
+  static bool CanReferenceInBootImageExtension(ObjPtr<mirror::Class> klass, gc::Heap* heap)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  bool SetUpdatableBootClassPackages(const std::vector<std::string>& packages);
+
  protected:
   // Overridden version of PerformClassVerification allows skipping verification if the class was
   // previously verified but unloaded.
@@ -37,6 +47,13 @@
       override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Override AllocClass because aot compiler will need to perform a transaction check to determine
+  // can we allocate class from heap.
+  bool CanAllocClass()
+      override
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(!Roles::uninterruptible_);
+
   bool InitializeClass(Thread *self,
                        Handle<mirror::Class> klass,
                        bool can_run_clinit,
@@ -44,7 +61,13 @@
       override
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::dex_lock_);
+
+  bool IsUpdatableBootClassPathDescriptor(const char* descriptor) override;
+
+ private:
+  std::vector<std::string> updatable_boot_class_path_descriptor_prefixes_;
 };
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_AOT_CLASS_LINKER_H_
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index 12ad84b..23213d9 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -89,48 +89,6 @@
 #undef BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_OFFSET
 }  // namespace arm64
 
-namespace mips {
-#include "arch/mips/asm_support_mips.h"
-static constexpr size_t kFrameSizeSaveAllCalleeSaves = FRAME_SIZE_SAVE_ALL_CALLEE_SAVES;
-#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVES
-static constexpr size_t kFrameSizeSaveRefsOnly = FRAME_SIZE_SAVE_REFS_ONLY;
-#undef FRAME_SIZE_SAVE_REFS_ONLY
-static constexpr size_t kFrameSizeSaveRefsAndArgs = FRAME_SIZE_SAVE_REFS_AND_ARGS;
-#undef FRAME_SIZE_SAVE_REFS_AND_ARGS
-static constexpr size_t kFrameSizeSaveEverythingForClinit = FRAME_SIZE_SAVE_EVERYTHING_FOR_CLINIT;
-#undef FRAME_SIZE_SAVE_EVERYTHING_FOR_CLINIT
-static constexpr size_t kFrameSizeSaveEverythingForSuspendCheck =
-    FRAME_SIZE_SAVE_EVERYTHING_FOR_SUSPEND_CHECK;
-#undef FRAME_SIZE_SAVE_EVERYTHING_FOR_SUSPEND_CHECK
-static constexpr size_t kFrameSizeSaveEverything = FRAME_SIZE_SAVE_EVERYTHING;
-#undef FRAME_SIZE_SAVE_EVERYTHING
-#undef BAKER_MARK_INTROSPECTION_REGISTER_COUNT
-#undef BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE
-#undef BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET
-#undef BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRY_SIZE
-}  // namespace mips
-
-namespace mips64 {
-#include "arch/mips64/asm_support_mips64.h"
-static constexpr size_t kFrameSizeSaveAllCalleeSaves = FRAME_SIZE_SAVE_ALL_CALLEE_SAVES;
-#undef FRAME_SIZE_SAVE_ALL_CALLEE_SAVES
-static constexpr size_t kFrameSizeSaveRefsOnly = FRAME_SIZE_SAVE_REFS_ONLY;
-#undef FRAME_SIZE_SAVE_REFS_ONLY
-static constexpr size_t kFrameSizeSaveRefsAndArgs = FRAME_SIZE_SAVE_REFS_AND_ARGS;
-#undef FRAME_SIZE_SAVE_REFS_AND_ARGS
-static constexpr size_t kFrameSizeSaveEverythingForClinit = FRAME_SIZE_SAVE_EVERYTHING_FOR_CLINIT;
-#undef FRAME_SIZE_SAVE_EVERYTHING_FOR_CLINIT
-static constexpr size_t kFrameSizeSaveEverythingForSuspendCheck =
-    FRAME_SIZE_SAVE_EVERYTHING_FOR_SUSPEND_CHECK;
-#undef FRAME_SIZE_SAVE_EVERYTHING_FOR_SUSPEND_CHECK
-static constexpr size_t kFrameSizeSaveEverything = FRAME_SIZE_SAVE_EVERYTHING;
-#undef FRAME_SIZE_SAVE_EVERYTHING
-#undef BAKER_MARK_INTROSPECTION_REGISTER_COUNT
-#undef BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE
-#undef BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET
-#undef BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRY_SIZE
-}  // namespace mips64
-
 namespace x86 {
 #include "arch/x86/asm_support_x86.h"
 static constexpr size_t kFrameSizeSaveAllCalleeSaves = FRAME_SIZE_SAVE_ALL_CALLEE_SAVES;
@@ -183,8 +141,6 @@
   }
 TEST_ARCH(Arm, arm)
 TEST_ARCH(Arm64, arm64)
-TEST_ARCH(Mips, mips)
-TEST_ARCH(Mips64, mips64)
 TEST_ARCH(X86, x86)
 TEST_ARCH(X86_64, x86_64)
 
diff --git a/runtime/arch/arm/asm_support_arm.S b/runtime/arch/arm/asm_support_arm.S
index eeac743..5b51e51 100644
--- a/runtime/arch/arm/asm_support_arm.S
+++ b/runtime/arch/arm/asm_support_arm.S
@@ -54,7 +54,7 @@
 // Common ENTRY declaration code for ARM and thumb, an ENTRY should always be paired with an END.
 // Declares the RUNTIME_CURRENT[123] macros that can be used within an ENTRY and will have literals
 // generated at END.
-.macro DEF_ENTRY thumb_or_arm, name
+.macro DEF_ENTRY thumb_or_arm, name, alignment
     \thumb_or_arm
 // Clang ignores .thumb_func and requires an explicit .thumb. Investigate whether we should still
 // carry around the .thumb_func.
@@ -64,8 +64,12 @@
     .type \name, #function
     .hidden \name  // Hide this as a global symbol, so we do not incur plt calls.
     .global \name
+    // ART-compiled functions have OatQuickMethodHeader but assembly funtions do not.
+    // Prefix the assembly code with 0xFFs, which means there is no method header.
+    .byte 0xFF, 0xFF, 0xFF, 0xFF
     // Cache alignment for function entry.
-    .balign 16
+    // NB: 0xFF because there is a bug in balign where 0x00 creates nop instructions.
+    .balign \alignment, 0xFF
 \name:
     .cfi_startproc
     .fnstart
@@ -88,12 +92,15 @@
 
 // A thumb2 style ENTRY.
 .macro ENTRY name
-    DEF_ENTRY .thumb_func, \name
+    DEF_ENTRY .thumb_func, \name, 16
+.endm
+.macro ENTRY_ALIGNED name, alignment
+    DEF_ENTRY .thumb_func, \name, \alignment
 .endm
 
 // A ARM style ENTRY.
 .macro ARM_ENTRY name
-    DEF_ENTRY .arm, \name
+    DEF_ENTRY .arm, \name, 16
 .endm
 
 // Terminate an ENTRY and generate GOT_PREL references.
@@ -142,4 +149,124 @@
 #endif  // USE_HEAP_POISONING
 .endm
 
+// Macro to refresh the Marking Register (R8).
+//
+// This macro must be called at the end of functions implementing
+// entrypoints that possibly (directly or indirectly) perform a
+// suspend check (before they return).
+.macro REFRESH_MARKING_REGISTER
+#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
+    ldr rMR, [rSELF, #THREAD_IS_GC_MARKING_OFFSET]
+#endif
+.endm
+
+    /*
+     * Macro that sets up the callee save frame to conform with
+     * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs), except for storing the method.
+     */
+.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY
+    // Note: We could avoid saving R8 in the case of Baker read
+    // barriers, as it is overwritten by REFRESH_MARKING_REGISTER
+    // later; but it's not worth handling this special case.
+    push {r1-r3, r5-r8, r10-r11, lr}   @ 10 words of callee saves and args.
+    .cfi_adjust_cfa_offset 40
+    .cfi_rel_offset r1, 0
+    .cfi_rel_offset r2, 4
+    .cfi_rel_offset r3, 8
+    .cfi_rel_offset r5, 12
+    .cfi_rel_offset r6, 16
+    .cfi_rel_offset r7, 20
+    .cfi_rel_offset r8, 24
+    .cfi_rel_offset r10, 28
+    .cfi_rel_offset r11, 32
+    .cfi_rel_offset lr, 36
+    vpush {s0-s15}                     @ 16 words of float args.
+    .cfi_adjust_cfa_offset 64
+    sub sp, #8                         @ 2 words of space, alignment padding and Method*
+    .cfi_adjust_cfa_offset 8
+    // Ugly compile-time check, but we only have the preprocessor.
+#if (FRAME_SIZE_SAVE_REFS_AND_ARGS != 40 + 64 + 8)
+#error "FRAME_SIZE_SAVE_REFS_AND_ARGS(ARM) size not as expected."
+#endif
+.endm
+
+.macro RESTORE_SAVE_REFS_AND_ARGS_FRAME
+    add  sp, #8                      @ rewind sp
+    .cfi_adjust_cfa_offset -8
+    vpop {s0-s15}
+    .cfi_adjust_cfa_offset -64
+    // Note: Likewise, we could avoid restoring R8 in the case of Baker
+    // read barriers, as it is overwritten by REFRESH_MARKING_REGISTER
+    // later; but it's not worth handling this special case.
+    pop {r1-r3, r5-r8, r10-r11, lr}  @ 10 words of callee saves and args.
+    .cfi_restore r1
+    .cfi_restore r2
+    .cfi_restore r3
+    .cfi_restore r5
+    .cfi_restore r6
+    .cfi_restore r7
+    .cfi_restore r8
+    .cfi_restore r10
+    .cfi_restore r11
+    .cfi_restore lr
+    .cfi_adjust_cfa_offset -40
+.endm
+
+    /*
+     * Macro to spill the GPRs.
+     */
+.macro SPILL_ALL_CALLEE_SAVE_GPRS
+    push {r4-r11, lr}                             @ 9 words (36 bytes) of callee saves.
+    .cfi_adjust_cfa_offset 36
+    .cfi_rel_offset r4, 0
+    .cfi_rel_offset r5, 4
+    .cfi_rel_offset r6, 8
+    .cfi_rel_offset r7, 12
+    .cfi_rel_offset r8, 16
+    .cfi_rel_offset r9, 20
+    .cfi_rel_offset r10, 24
+    .cfi_rel_offset r11, 28
+    .cfi_rel_offset lr, 32
+.endm
+
+    /*
+     * Macro that sets up the callee save frame to conform with
+     * Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves)
+     */
+.macro SETUP_SAVE_ALL_CALLEE_SAVES_FRAME rTemp
+    SPILL_ALL_CALLEE_SAVE_GPRS                    @ 9 words (36 bytes) of callee saves.
+    vpush {s16-s31}                               @ 16 words (64 bytes) of floats.
+    .cfi_adjust_cfa_offset 64
+    sub sp, #12                                   @ 3 words of space, bottom word will hold Method*
+    .cfi_adjust_cfa_offset 12
+    RUNTIME_CURRENT1 \rTemp                       @ Load Runtime::Current into rTemp.
+    @ Load kSaveAllCalleeSaves Method* into rTemp.
+    ldr \rTemp, [\rTemp, #RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET]
+    str \rTemp, [sp, #0]                          @ Place Method* at bottom of stack.
+    str sp, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET]  @ Place sp in Thread::Current()->top_quick_frame.
+
+     // Ugly compile-time check, but we only have the preprocessor.
+#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 36 + 64 + 12)
+#error "FRAME_SIZE_SAVE_ALL_CALLEE_SAVES(ARM) size not as expected."
+#endif
+.endm
+
+    /*
+     * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
+     * exception is Thread::Current()->exception_ when the runtime method frame is ready.
+     */
+.macro DELIVER_PENDING_EXCEPTION_FRAME_READY
+    mov    r0, rSELF                           @ pass Thread::Current
+    bl     artDeliverPendingExceptionFromCode  @ artDeliverPendingExceptionFromCode(Thread*)
+.endm
+
+    /*
+     * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
+     * exception is Thread::Current()->exception_.
+     */
+.macro DELIVER_PENDING_EXCEPTION
+    SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r0       @ save callee saves for throw
+    DELIVER_PENDING_EXCEPTION_FRAME_READY
+.endm
+
 #endif  // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_
diff --git a/runtime/arch/arm/fault_handler_arm.cc b/runtime/arch/arm/fault_handler_arm.cc
index e186cd3..4e7d64c 100644
--- a/runtime/arch/arm/fault_handler_arm.cc
+++ b/runtime/arch/arm/fault_handler_arm.cc
@@ -45,9 +45,12 @@
   return instr_size;
 }
 
-void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED, void* context,
+void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED,
+                                             void* context,
                                              ArtMethod** out_method,
-                                             uintptr_t* out_return_pc, uintptr_t* out_sp) {
+                                             uintptr_t* out_return_pc,
+                                             uintptr_t* out_sp,
+                                             bool* out_is_stack_overflow) {
   struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
   struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
   *out_sp = static_cast<uintptr_t>(sc->arm_sp);
@@ -63,9 +66,11 @@
       reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(InstructionSet::kArm));
   if (overflow_addr == fault_addr) {
     *out_method = reinterpret_cast<ArtMethod*>(sc->arm_r0);
+    *out_is_stack_overflow = true;
   } else {
     // The method is at the top of the stack.
     *out_method = reinterpret_cast<ArtMethod*>(reinterpret_cast<uintptr_t*>(*out_sp)[0]);
+    *out_is_stack_overflow = false;
   }
 
   // Work out the return PC.  This will be the address of the instruction
diff --git a/runtime/arch/arm/jni_entrypoints_arm.S b/runtime/arch/arm/jni_entrypoints_arm.S
index 0e00f34..ceef772 100644
--- a/runtime/arch/arm/jni_entrypoints_arm.S
+++ b/runtime/arch/arm/jni_entrypoints_arm.S
@@ -20,6 +20,7 @@
      * Jni dlsym lookup stub.
      */
     .extern artFindNativeMethod
+    .extern artFindNativeMethodRunnable
 ENTRY art_jni_dlsym_lookup_stub
     push   {r0, r1, r2, r3, lr}           @ spill regs
     .cfi_adjust_cfa_offset 20
@@ -30,8 +31,23 @@
     .cfi_rel_offset lr, 16
     sub    sp, #12                        @ pad stack pointer to align frame
     .cfi_adjust_cfa_offset 12
+
+    mov    r0, rSELF                      @ pass Thread::Current()
+    // Call artFindNativeMethod() for normal native and artFindNativeMethodRunnable()
+    // for @FastNative or @CriticalNative.
+    ldr    ip, [r0, #THREAD_TOP_QUICK_FRAME_OFFSET]   // uintptr_t tagged_quick_frame
+    bic    ip, #1                                     // ArtMethod** sp
+    ldr    ip, [ip]                                   // ArtMethod* method
+    ldr    ip, [ip, #ART_METHOD_ACCESS_FLAGS_OFFSET]  // uint32_t access_flags
+    tst    ip, #(ACCESS_FLAGS_METHOD_IS_FAST_NATIVE | ACCESS_FLAGS_METHOD_IS_CRITICAL_NATIVE)
+    bne    .Llookup_stub_fast_native
     blx    artFindNativeMethod
+    b      .Llookup_stub_continue
+.Llookup_stub_fast_native:
+    blx    artFindNativeMethodRunnable
+.Llookup_stub_continue:
     mov    r12, r0                        @ save result in r12
+
     add    sp, #12                        @ restore stack pointer
     .cfi_adjust_cfa_offset -12
     cbz    r0, 1f                         @ is method code null?
@@ -46,3 +62,188 @@
 1:
     pop    {r0, r1, r2, r3, pc}           @ restore regs and return to caller to handle exception
 END art_jni_dlsym_lookup_stub
+
+ENTRY art_jni_dlsym_lookup_critical_stub
+    // The hidden arg holding the tagged method (bit 0 set means GenericJNI) is r4.
+    // For Generic JNI we already have a managed frame, so we reuse the art_jni_dlsym_lookup_stub.
+    tst    r4, #1
+    bne art_jni_dlsym_lookup_stub
+
+    // We need to create a GenericJNI managed frame above the stack args.
+
+    // GenericJNI frame is similar to SaveRegsAndArgs frame with the native method
+    // instead of runtime method saved at the bottom. Note that the runtime shall
+    // not examine the args here, otherwise we would have to move them in registers
+    // and stack to account for the difference between managed and native ABIs.
+    SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY
+    // Save the hidden arg as method pointer, r0 in the padding.
+    // (x0 is an arg in native ABI but not considered an arg in managed ABI.)
+    strd   r4, r0, [sp]
+
+    // Call artCriticalNativeOutArgsSize(method)
+    mov    r0, r4  // r0 := method (from hidden arg)
+    bl     artCriticalNativeOutArgsSize
+
+    // Check if we have any stack args.
+    cbnz   r0, .Lcritical_has_stack_args
+
+    // Without stack args, the frame is fully constructed.
+    // Place tagged managed sp in Thread::Current()->top_quick_frame.
+    mov    ip, sp
+    orr    ip, #1  // Tag as GenericJNI frame.
+    str    ip, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET]
+
+    // Call artFindNativeMethodRunnable()
+    mov    r0, rSELF   // pass Thread::Current()
+    bl     artFindNativeMethodRunnable
+
+    // Store result in scratch reg.
+    mov    ip, r0
+
+    // Restore frame.
+    .cfi_remember_state
+    ldrd   r4, r0, [sp]
+    RESTORE_SAVE_REFS_AND_ARGS_FRAME
+    REFRESH_MARKING_REGISTER
+
+    // Check for exception.
+    cmp    ip, #0
+    beq    .Lcritical_deliver_exception
+
+    // Do the tail call.
+    bx     ip
+    .cfi_restore_state
+    .cfi_def_cfa_offset FRAME_SIZE_SAVE_REFS_AND_ARGS
+
+.Lcritical_has_stack_args:
+    // Move the out args size to a scratch register.
+    mov    ip, r0
+
+    // Restore register args as we're about to move stack args.
+    ldrd   r4, r0, [sp]
+    RESTORE_SAVE_REFS_AND_ARGS_FRAME
+
+    // Reserve space for SaveRefsAndArgs frame.
+    sub sp, #FRAME_SIZE_SAVE_REFS_AND_ARGS
+    .cfi_adjust_cfa_offset FRAME_SIZE_SAVE_REFS_AND_ARGS
+
+    // Save arg regs so that we can use them as temporaries.
+    push   {r0-r3}
+    .cfi_adjust_cfa_offset 16
+
+    // Move out args. For simplicity include the return address at the end.
+    add    r0, sp, #16   // Destination.
+    add    ip, r0, ip    // Destination end.
+1:
+    ldrd   r2, r3, [r0, #FRAME_SIZE_SAVE_REFS_AND_ARGS]
+    strd   r2, r3, [r0], #8
+    cmp    r0, ip
+    bne    1b
+
+    // Save our LR, load caller's LR and redefine CFI to take ownership of the JNI stub frame.
+    str    lr, [ip, #-__SIZEOF_POINTER__]
+    mov    lr, r3  // The last moved value from the loop above.
+    .cfi_def_cfa ip, FRAME_SIZE_SAVE_REFS_AND_ARGS
+
+    // Restore arg regs.
+    pop    {r0-r3}  // No `.cfi_adjust_cfa_offset`, CFA register is currently ip, not sp.
+
+    // Re-create the SaveRefsAndArgs frame above the args.
+    strd   r4, r0, [ip]  // r0 in the padding as before.
+    add    r4, ip, FRAME_SIZE_SAVE_REFS_AND_ARGS - 40
+    stmia  r4, {r1-r3, r5-r8, r10-r11, lr}   @ 10 words of callee saves and args.
+    .cfi_rel_offset r1, FRAME_SIZE_SAVE_REFS_AND_ARGS - 40 + 0
+    .cfi_rel_offset r2, FRAME_SIZE_SAVE_REFS_AND_ARGS - 40 + 4
+    .cfi_rel_offset r3, FRAME_SIZE_SAVE_REFS_AND_ARGS - 40 + 8
+    .cfi_rel_offset r5, FRAME_SIZE_SAVE_REFS_AND_ARGS - 40 + 12
+    .cfi_rel_offset r6, FRAME_SIZE_SAVE_REFS_AND_ARGS - 40 + 16
+    .cfi_rel_offset r7, FRAME_SIZE_SAVE_REFS_AND_ARGS - 40 + 20
+    .cfi_rel_offset r8, FRAME_SIZE_SAVE_REFS_AND_ARGS - 40 + 24
+    .cfi_rel_offset r10, FRAME_SIZE_SAVE_REFS_AND_ARGS - 40 + 28
+    .cfi_rel_offset r11, FRAME_SIZE_SAVE_REFS_AND_ARGS - 40 + 32
+    .cfi_rel_offset lr, FRAME_SIZE_SAVE_REFS_AND_ARGS - 40 + 36
+    vstmdb r4!, {s0-s15}                     @ 16 words of float args.
+
+    // Move the frame register to a callee-save register.
+    mov    r11, ip
+    .cfi_def_cfa_register r11
+
+    // Place tagged managed sp in Thread::Current()->top_quick_frame.
+    orr    ip, r11, #1  // Tag as GenericJNI frame.
+    str    ip, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET]
+
+    // Call artFindNativeMethodRunnable()
+    mov    r0, rSELF   // pass Thread::Current()
+    bl     artFindNativeMethodRunnable
+
+    // Store result in scratch reg.
+    mov    ip, r0
+
+    // Restore the frame. We shall not need the method anymore, so use r4 as scratch register.
+    mov    r4, r11
+    .cfi_def_cfa_register r4
+    ldr    r0, [r4, #4]
+    add    r11, r4, #(FRAME_SIZE_SAVE_REFS_AND_ARGS - 40 - 64)
+    vldmia r11!, {s0-s15}                    @ 16 words of float args.
+    ldmia  r11, {r1-r3, r5-r8, r10-r11, lr}  @ 10 words of callee saves and args.
+    .cfi_restore r1
+    .cfi_restore r2
+    .cfi_restore r3
+    .cfi_restore r5
+    .cfi_restore r6
+    .cfi_restore r7
+    .cfi_restore r8
+    .cfi_restore r10
+    .cfi_restore r11
+    .cfi_restore lr
+    REFRESH_MARKING_REGISTER
+
+    // Check for exception.
+    cmp    ip, #0
+    beq    3f
+
+    // Save arg regs so that we can use them as temporaries.
+    push   {r0-r3}  // No `.cfi_adjust_cfa_offset`, CFA register is currently r4, not sp.
+
+    // Move stack args to their original place.
+    mov    r0, r4
+    add    r1, sp, #16
+2:
+    ldrd   r2, r3, [r0, #-8]!
+    strd   r2, r3, [r0, #FRAME_SIZE_SAVE_REFS_AND_ARGS]
+    cmp    r1, r0
+    bne    2b
+
+    // Replace original return address with caller's return address.
+    ldr    r1, [r4, #(FRAME_SIZE_SAVE_REFS_AND_ARGS - __SIZEOF_POINTER__)]
+    str    lr, [r4, #(FRAME_SIZE_SAVE_REFS_AND_ARGS - __SIZEOF_POINTER__)]
+
+    // Restore LR and redefine CFI to release ownership of the JNI stub frame.
+    .cfi_remember_state
+    mov    lr, r1
+    .cfi_def_cfa sp, FRAME_SIZE_SAVE_REFS_AND_ARGS + 16
+
+    // Restore args
+    pop    {r0-r3}
+    .cfi_adjust_cfa_offset -16
+
+    // Remove the frame reservation.
+    add    sp, #FRAME_SIZE_SAVE_REFS_AND_ARGS
+    .cfi_adjust_cfa_offset -FRAME_SIZE_SAVE_REFS_AND_ARGS
+
+    // Do the tail call.
+    bx     ip
+    .cfi_restore_state
+    .cfi_def_cfa x4, FRAME_SIZE_SAVE_REFS_AND_ARGS
+
+3:
+    // Drop stack args and the SaveRefsAndArgs reservation.
+    mov    sp, r4
+    add    sp, #FRAME_SIZE_SAVE_REFS_AND_ARGS
+    .cfi_def_cfa sp, 0
+
+.Lcritical_deliver_exception:
+    // When delivering exception, we check that rSELF was saved but the SaveRefsAndArgs frame does
+    // not save it, so we cannot use DELIVER_PENDING_EXCEPTION_FRAME_READY with the above frames.
+    DELIVER_PENDING_EXCEPTION
+END art_jni_dlsym_lookup_critical_stub
diff --git a/runtime/arch/arm/jni_frame_arm.h b/runtime/arch/arm/jni_frame_arm.h
new file mode 100644
index 0000000..5203eaf
--- /dev/null
+++ b/runtime/arch/arm/jni_frame_arm.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_ARM_JNI_FRAME_ARM_H_
+#define ART_RUNTIME_ARCH_ARM_JNI_FRAME_ARM_H_
+
+#include <string.h>
+
+#include "arch/instruction_set.h"
+#include "base/bit_utils.h"
+#include "base/globals.h"
+#include "base/logging.h"
+
+namespace art {
+namespace arm {
+
+constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k32);
+static_assert(kArmPointerSize == PointerSize::k32, "Unexpected ARM pointer size");
+
+// The AAPCS requires 8-byte alignement. This is not as strict as the Managed ABI stack alignment.
+static constexpr size_t kAapcsStackAlignment = 8u;
+static_assert(kAapcsStackAlignment < kStackAlignment);
+
+// How many registers can be used for passing arguments.
+// Note: AAPCS is soft-float, so these are all core registers.
+constexpr size_t kJniArgumentRegisterCount = 4u;
+
+// Get the size of "out args" for @CriticalNative method stub.
+// This must match the size of the frame emitted by the JNI compiler at the native call site.
+inline size_t GetCriticalNativeOutArgsSize(const char* shorty, uint32_t shorty_len) {
+  DCHECK_EQ(shorty_len, strlen(shorty));
+
+  size_t reg = 0;  // Register for the current argument; if reg >= 4, we shall use stack.
+  for (size_t i = 1; i != shorty_len; ++i) {
+    if (shorty[i] == 'J' || shorty[i] == 'D') {
+      // 8-byte args need to start in even-numbered register or at aligned stack position.
+      reg += (reg & 1);
+      // Count first word and let the common path count the second.
+      reg += 1u;
+    }
+    reg += 1u;
+  }
+  size_t stack_args = std::max(reg, kJniArgumentRegisterCount) - kJniArgumentRegisterCount;
+  size_t size = kFramePointerSize * stack_args;
+
+  // Check if this is a tail call, i.e. there are no stack args and the return type
+  // is not  an FP type (otherwise we need to move the result to FP register).
+  // No need to sign/zero extend small return types thanks to AAPCS.
+  if (size != 0u || shorty[0] == 'F' || shorty[0] == 'D') {
+    size += kFramePointerSize;  // We need to spill LR with the args.
+  }
+  return RoundUp(size, kAapcsStackAlignment);
+}
+
+}  // namespace arm
+}  // namespace art
+
+#endif  // ART_RUNTIME_ARCH_ARM_JNI_FRAME_ARM_H_
+
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index b57e119..f94694d 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -25,45 +25,6 @@
     .extern artDeliverPendingException
 
     /*
-     * Macro to spill the GPRs.
-     */
-.macro SPILL_ALL_CALLEE_SAVE_GPRS
-    push {r4-r11, lr}                             @ 9 words (36 bytes) of callee saves.
-    .cfi_adjust_cfa_offset 36
-    .cfi_rel_offset r4, 0
-    .cfi_rel_offset r5, 4
-    .cfi_rel_offset r6, 8
-    .cfi_rel_offset r7, 12
-    .cfi_rel_offset r8, 16
-    .cfi_rel_offset r9, 20
-    .cfi_rel_offset r10, 24
-    .cfi_rel_offset r11, 28
-    .cfi_rel_offset lr, 32
-.endm
-
-    /*
-     * Macro that sets up the callee save frame to conform with
-     * Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves)
-     */
-.macro SETUP_SAVE_ALL_CALLEE_SAVES_FRAME rTemp
-    SPILL_ALL_CALLEE_SAVE_GPRS                    @ 9 words (36 bytes) of callee saves.
-    vpush {s16-s31}                               @ 16 words (64 bytes) of floats.
-    .cfi_adjust_cfa_offset 64
-    sub sp, #12                                   @ 3 words of space, bottom word will hold Method*
-    .cfi_adjust_cfa_offset 12
-    RUNTIME_CURRENT1 \rTemp                       @ Load Runtime::Current into rTemp.
-    @ Load kSaveAllCalleeSaves Method* into rTemp.
-    ldr \rTemp, [\rTemp, #RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET]
-    str \rTemp, [sp, #0]                          @ Place Method* at bottom of stack.
-    str sp, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET]  @ Place sp in Thread::Current()->top_quick_frame.
-
-     // Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 36 + 64 + 12)
-#error "FRAME_SIZE_SAVE_ALL_CALLEE_SAVES(ARM) size not as expected."
-#endif
-.endm
-
-    /*
      * Macro that sets up the callee save frame to conform with
      * Runtime::CreateCalleeSaveMethod(kSaveRefsOnly).
      */
@@ -111,36 +72,6 @@
     .cfi_adjust_cfa_offset -28
 .endm
 
-    /*
-     * Macro that sets up the callee save frame to conform with
-     * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs).
-     */
-.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY
-    // Note: We could avoid saving R8 in the case of Baker read
-    // barriers, as it is overwritten by REFRESH_MARKING_REGISTER
-    // later; but it's not worth handling this special case.
-    push {r1-r3, r5-r8, r10-r11, lr}   @ 10 words of callee saves and args.
-    .cfi_adjust_cfa_offset 40
-    .cfi_rel_offset r1, 0
-    .cfi_rel_offset r2, 4
-    .cfi_rel_offset r3, 8
-    .cfi_rel_offset r5, 12
-    .cfi_rel_offset r6, 16
-    .cfi_rel_offset r7, 20
-    .cfi_rel_offset r8, 24
-    .cfi_rel_offset r10, 28
-    .cfi_rel_offset r11, 32
-    .cfi_rel_offset lr, 36
-    vpush {s0-s15}                     @ 16 words of float args.
-    .cfi_adjust_cfa_offset 64
-    sub sp, #8                         @ 2 words of space, alignment padding and Method*
-    .cfi_adjust_cfa_offset 8
-    // Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_REFS_AND_ARGS != 40 + 64 + 8)
-#error "FRAME_SIZE_SAVE_REFS_AND_ARGS(ARM) size not as expected."
-#endif
-.endm
-
 .macro SETUP_SAVE_REFS_AND_ARGS_FRAME rTemp
     SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY
     RUNTIME_CURRENT3 \rTemp                       @ Load Runtime::Current into rTemp.
@@ -156,28 +87,6 @@
     str sp, [rSELF, #THREAD_TOP_QUICK_FRAME_OFFSET]  @ Place sp in Thread::Current()->top_quick_frame.
 .endm
 
-.macro RESTORE_SAVE_REFS_AND_ARGS_FRAME
-    add  sp, #8                      @ rewind sp
-    .cfi_adjust_cfa_offset -8
-    vpop {s0-s15}
-    .cfi_adjust_cfa_offset -64
-    // Note: Likewise, we could avoid restoring X20 in the case of Baker
-    // read barriers, as it is overwritten by REFRESH_MARKING_REGISTER
-    // later; but it's not worth handling this special case.
-    pop {r1-r3, r5-r8, r10-r11, lr}  @ 10 words of callee saves
-    .cfi_restore r1
-    .cfi_restore r2
-    .cfi_restore r3
-    .cfi_restore r5
-    .cfi_restore r6
-    .cfi_restore r7
-    .cfi_restore r8
-    .cfi_restore r10
-    .cfi_restore r11
-    .cfi_restore lr
-    .cfi_adjust_cfa_offset -40
-.endm
-
     /*
      * Macro that sets up the callee save frame to conform with
      * Runtime::CreateCalleeSaveMethod(kSaveEverything)
@@ -273,17 +182,6 @@
     .cfi_adjust_cfa_offset -52
 .endm
 
-// Macro to refresh the Marking Register (R8).
-//
-// This macro must be called at the end of functions implementing
-// entrypoints that possibly (directly or indirectly) perform a
-// suspend check (before they return).
-.macro REFRESH_MARKING_REGISTER
-#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
-    ldr rMR, [rSELF, #THREAD_IS_GC_MARKING_OFFSET]
-#endif
-.endm
-
 .macro RETURN_IF_RESULT_IS_ZERO
     cbnz   r0, 1f              @ result non-zero branch over
     bx     lr                  @ return
@@ -296,24 +194,6 @@
 1:
 .endm
 
-    /*
-     * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
-     * exception is Thread::Current()->exception_ when the runtime method frame is ready.
-     */
-.macro DELIVER_PENDING_EXCEPTION_FRAME_READY
-    mov    r0, rSELF                           @ pass Thread::Current
-    bl     artDeliverPendingExceptionFromCode  @ artDeliverPendingExceptionFromCode(Thread*)
-.endm
-
-    /*
-     * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
-     * exception is Thread::Current()->exception_.
-     */
-.macro DELIVER_PENDING_EXCEPTION
-    SETUP_SAVE_ALL_CALLEE_SAVES_FRAME r0       @ save callee saves for throw
-    DELIVER_PENDING_EXCEPTION_FRAME_READY
-.endm
-
 .macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
     .extern \cxx_name
 ENTRY \c_name
@@ -632,17 +512,7 @@
     ldr    r10, [sp, #8]                   @ Restore JValue* result
     ldr    sp, [sp, #4]                    @ Restore saved stack pointer
     .cfi_def_cfa sp, SAVE_SIZE             @ CFA = sp + SAVE_SIZE
-    ldr    r4, [sp, #SAVE_SIZE]            @ load shorty
-    ldrb   r4, [r4, #0]                    @ load return type
-    cmp    r4, #68                         @ Test if result type char == 'D'.
-    beq    .Losr_fp_result
-    cmp    r4, #70                         @ Test if result type char == 'F'.
-    beq    .Losr_fp_result
     strd r0, [r10]                         @ Store r0/r1 into result pointer
-    b    .Losr_exit
-.Losr_fp_result:
-    vstr d0, [r10]                         @ Store s0-s1/d0 into result pointer
-.Losr_exit:
     vpop   {s16-s31}
     .cfi_adjust_cfa_offset -64
     pop    {r4, r5, r6, r7, r8, r9, r10, r11, pc}
@@ -1200,13 +1070,11 @@
 
     ldr    r3, [r0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET]  // Load the object size (r3)
     cmp    r3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE        // Check if the size is for a thread
-                                                              // local allocation. Also does the
-                                                              // initialized and finalizable checks.
-    // When isInitialized == 0, then the class is potentially not yet initialized.
-    // If the class is not yet initialized, the object size will be very large to force the branch
-    // below to be taken.
+                                                              // local allocation.
+    // If the class is not yet visibly initialized, or it is finalizable,
+    // the object size will be very large to force the branch below to be taken.
     //
-    // See InitializeClassVisitors in class-inl.h for more details.
+    // See Class::SetStatus() in class.cc for more details.
     bhs    .Lslow_path\c_name
                                                               // Compute the rosalloc bracket index
                                                               // from the size. Since the size is
@@ -1274,19 +1142,9 @@
     str    r1, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)]
 
     mov    r0, r3                                             // Set the return value and return.
-.if \isInitialized == 0
-    // This barrier is only necessary when the allocation also requires
-    // a class initialization check.
-    //
-    // If the class is already observably initialized, then new-instance allocations are protected
+    // No barrier. The class is already observably initialized (otherwise the fast
+    // path size check above would fail) and new-instance allocations are protected
     // from publishing by the compiler which inserts its own StoreStore barrier.
-    dmb    ish
-    // Use a "dmb ish" fence here because if there are later loads of statics (e.g. class size),
-    // they should happen-after the implicit initialization check.
-    //
-    // TODO: Remove this dmb for class initialization checks (b/36692143) by introducing
-    // a new observably-initialized class state.
-.endif
     bx     lr
 
 .Lslow_path\c_name:
@@ -1321,11 +1179,10 @@
     sub    r12, r3, r12                                       // Compute the remaining buf size.
     ldr    r3, [r0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET]  // Load the object size (r3).
     cmp    r3, r12                                            // Check if it fits.
-    // When isInitialized == 0, then the class is potentially not yet initialized.
-    // If the class is not yet initialized, the object size will be very large to force the branch
-    // below to be taken.
+    // If the class is not yet visibly initialized, or it is finalizable,
+    // the object size will be very large to force the branch below to be taken.
     //
-    // See InitializeClassVisitors in class-inl.h for more details.
+    // See Class::SetStatus() in class.cc for more details.
     bhi    \slowPathLabel
     // "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
                                                               // Reload old thread_local_pos (r0)
@@ -1360,18 +1217,9 @@
                                                               // site will see the right values in
                                                               // the fields of the class.
     mov    r0, r2
-.if \isInitialized == 0
-    // This barrier is only necessary when the allocation also requires
-    // a class initialization check.
-    //
-    // If the class is already observably initialized, then new-instance allocations are protected
+    // No barrier. The class is already observably initialized (otherwise the fast
+    // path size check above would fail) and new-instance allocations are protected
     // from publishing by the compiler which inserts its own StoreStore barrier.
-    dmb    ish
-    // Use a "dmb ish" fence here because if there are later loads of statics (e.g. class size),
-    // they should happen-after the implicit initialization check.
-    //
-    // TODO: Remove dmb for class initialization checks (b/36692143)
-.endif
     bx     lr
 .endm
 
@@ -1739,41 +1587,44 @@
     sub sp, sp, #5120
 
     // prepare for artQuickGenericJniTrampoline call
-    // (Thread*,  SP)
-    //    r0      r1   <= C calling convention
-    //  rSELF     r10  <= where they are
+    // (Thread*, managed_sp, reserved_area)
+    //    r0         r1            r2   <= C calling convention
+    //  rSELF       r10            sp   <= where they are
 
     mov r0, rSELF   // Thread*
-    mov r1, r10
-    blx artQuickGenericJniTrampoline  // (Thread*, sp)
+    mov r1, r10     // SP for the managed frame.
+    mov r2, sp      // reserved area for arguments and other saved data (up to managed frame)
+    blx artQuickGenericJniTrampoline  // (Thread*, managed_sp, reserved_area)
 
     // The C call will have registered the complete save-frame on success.
     // The result of the call is:
-    // r0: pointer to native code, 0 on error.
-    // r1: pointer to the bottom of the used area of the alloca, can restore stack till there.
+    //     r0: pointer to native code, 0 on error.
+    //     The bottom of the reserved area contains values for arg registers,
+    //     hidden arg register and SP for out args for the call.
 
-    // Check for error = 0.
+    // Check for error (class init check or locking for synchronized native method can throw).
     cbz r0, .Lexception_in_native
 
-    // Release part of the alloca.
-    mov sp, r1
-
     // Save the code pointer
-    mov r12, r0
+    mov lr, r0
 
-    // Load parameters from frame into registers.
-    pop {r0-r3}
+    // Load parameters from frame into registers r0-r3 (soft-float),
+    // hidden arg (r4) for @CriticalNative and SP for out args.
+    pop {r0-r3, r4, ip}
+
+    // Apply the new SP for out args, releasing unneeded reserved area.
+    mov sp, ip
 
     // Softfloat.
     // TODO: Change to hardfloat when supported.
 
-    blx r12           // native call.
+    blx lr            // native call.
 
     // result sign extension is handled in C code
     // prepare for artQuickGenericJniEndTrampoline call
     // (Thread*, result, result_f)
     //    r0      r2,r3    stack       <= C calling convention
-    //    r11     r0,r1    r0,r1          <= where they are
+    //    r11     r0,r1    r0,r1       <= where they are
     sub sp, sp, #8 // Stack alignment.
 
     push {r0-r1}
@@ -2215,6 +2066,17 @@
     pop   {pc}
 END art_quick_l2f
 
+    .extern artStringBuilderAppend
+ENTRY art_quick_string_builder_append
+    SETUP_SAVE_REFS_ONLY_FRAME r2       @ save callee saves in case of GC
+    add    r1, sp, #(FRAME_SIZE_SAVE_REFS_ONLY + __SIZEOF_POINTER__)  @ pass args
+    mov    r2, rSELF                    @ pass Thread::Current
+    bl     artStringBuilderAppend       @ (uint32_t, const unit32_t*, Thread*)
+    RESTORE_SAVE_REFS_ONLY_FRAME
+    REFRESH_MARKING_REGISTER
+    RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END art_quick_string_builder_append
+
 .macro CONDITIONAL_CBZ reg, reg_if, dest
 .ifc \reg, \reg_if
     cbz \reg, \dest
@@ -2628,8 +2490,7 @@
      *     (6 bytes). Loads the return register and jumps to the runtime call.
      */
 #if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
-    .balign 512
-ENTRY art_quick_read_barrier_mark_introspection
+ENTRY_ALIGNED art_quick_read_barrier_mark_introspection, 512
     // At this point, IP contains the reference, rMR is clobbered by the thunk
     // and can be freely used as it will be set back to 1 before returning.
     // For heap poisoning, the reference is poisoned, so unpoison it first.
@@ -2754,3 +2615,84 @@
     blx r1                                        // Call the wrapped method.
     pop {r4, pc}
 END ExecuteSwitchImplAsm
+
+// r0 contains the class, r4 contains the inline cache. We can use ip as temporary.
+ENTRY art_quick_update_inline_cache
+#if (INLINE_CACHE_SIZE != 5)
+#error "INLINE_CACHE_SIZE not as expected."
+#endif
+#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
+    // Don't update the cache if we are marking.
+    cmp rMR, #0
+    bne .Ldone
+#endif
+.Lentry1:
+    ldr ip, [r4, #INLINE_CACHE_CLASSES_OFFSET]
+    cmp ip, r0
+    beq .Ldone
+    cmp ip, #0
+    bne .Lentry2
+    ldrex ip, [r4, #INLINE_CACHE_CLASSES_OFFSET]
+    cmp ip, #0
+    bne .Lentry1
+    strex  ip, r0, [r4, #INLINE_CACHE_CLASSES_OFFSET]
+    cmp ip, #0
+    bne .Ldone
+    b .Lentry1
+.Lentry2:
+    ldr ip, [r4, #INLINE_CACHE_CLASSES_OFFSET+4]
+    cmp ip, r0
+    beq .Ldone
+    cmp ip, #0
+    bne .Lentry3
+    ldrex ip, [r4, #INLINE_CACHE_CLASSES_OFFSET+4]
+    cmp ip, #0
+    bne .Lentry2
+    strex  ip, r0, [r4, #INLINE_CACHE_CLASSES_OFFSET+4]
+    cmp ip, #0
+    bne .Ldone
+    b .Lentry2
+.Lentry3:
+    ldr ip, [r4, #INLINE_CACHE_CLASSES_OFFSET+8]
+    cmp ip, r0
+    beq .Ldone
+    cmp ip, #0
+    bne .Lentry4
+    ldrex ip, [r4, #INLINE_CACHE_CLASSES_OFFSET+8]
+    cmp ip, #0
+    bne .Lentry3
+    strex  ip, r0, [r4, #INLINE_CACHE_CLASSES_OFFSET+8]
+    cmp ip, #0
+    bne .Ldone
+    b .Lentry3
+.Lentry4:
+    ldr ip, [r4, #INLINE_CACHE_CLASSES_OFFSET+12]
+    cmp ip, r0
+    beq .Ldone
+    cmp ip, #0
+    bne .Lentry5
+    ldrex ip, [r4, #INLINE_CACHE_CLASSES_OFFSET+12]
+    cmp ip, #0
+    bne .Lentry4
+    strex  ip, r0, [r4, #INLINE_CACHE_CLASSES_OFFSET+12]
+    cmp ip, #0
+    bne .Ldone
+    b .Lentry4
+.Lentry5:
+    // Unconditionally store, the inline cache is megamorphic.
+    str  r0, [r4, #INLINE_CACHE_CLASSES_OFFSET+16]
+.Ldone:
+    blx lr
+END art_quick_update_inline_cache
+
+// On entry, method is at the bottom of the stack.
+ENTRY art_quick_compile_optimized
+    SETUP_SAVE_EVERYTHING_FRAME r0
+    ldr r0, [sp, FRAME_SIZE_SAVE_EVERYTHING] @ pass ArtMethod
+    mov r1, rSELF                            @ pass Thread::Current
+    bl     artCompileOptimized               @ (ArtMethod*, Thread*)
+    RESTORE_SAVE_EVERYTHING_FRAME
+    // We don't need to restore the marking register here, as
+    // artCompileOptimized doesn't allow thread suspension.
+    blx lr
+END art_quick_compile_optimized
diff --git a/runtime/arch/arm64/asm_support_arm64.S b/runtime/arch/arm64/asm_support_arm64.S
index 715fc35..b1e5c86 100644
--- a/runtime/arch/arm64/asm_support_arm64.S
+++ b/runtime/arch/arm64/asm_support_arm64.S
@@ -40,16 +40,24 @@
 #define wMR w20
 #endif
 
-.macro ENTRY name
+.macro ENTRY_ALIGNED name, alignment
     .type \name, #function
     .hidden \name  // Hide this as a global symbol, so we do not incur plt calls.
     .global \name
-    /* Cache alignment for function entry */
-    .balign 16
+    // ART-compiled functions have OatQuickMethodHeader but assembly funtions do not.
+    // Prefix the assembly code with 0xFFs, which means there is no method header.
+    .byte 0xFF, 0xFF, 0xFF, 0xFF
+    // Cache alignment for function entry.
+    // NB: 0xFF because there is a bug in balign where 0x00 creates nop instructions.
+    .balign \alignment, 0xFF
 \name:
     .cfi_startproc
 .endm
 
+.macro ENTRY name
+    ENTRY_ALIGNED \name, 16
+.endm
+
 .macro END name
     .cfi_endproc
     .size \name, .-\name
@@ -75,4 +83,258 @@
 #endif  // USE_HEAP_POISONING
 .endm
 
+.macro INCREASE_FRAME frame_adjustment
+    sub sp, sp, #(\frame_adjustment)
+    .cfi_adjust_cfa_offset (\frame_adjustment)
+.endm
+
+.macro DECREASE_FRAME frame_adjustment
+    add sp, sp, #(\frame_adjustment)
+    .cfi_adjust_cfa_offset -(\frame_adjustment)
+.endm
+
+.macro SAVE_REG reg, offset
+    str \reg, [sp, #(\offset)]
+    .cfi_rel_offset \reg, (\offset)
+.endm
+
+.macro RESTORE_REG reg, offset
+    ldr \reg, [sp, #(\offset)]
+    .cfi_restore \reg
+.endm
+
+.macro SAVE_TWO_REGS_BASE base, reg1, reg2, offset
+    stp \reg1, \reg2, [\base, #(\offset)]
+    .cfi_rel_offset \reg1, (\offset)
+    .cfi_rel_offset \reg2, (\offset) + 8
+.endm
+
+.macro SAVE_TWO_REGS reg1, reg2, offset
+    SAVE_TWO_REGS_BASE sp, \reg1, \reg2, \offset
+.endm
+
+.macro RESTORE_TWO_REGS_BASE base, reg1, reg2, offset
+    ldp \reg1, \reg2, [\base, #(\offset)]
+    .cfi_restore \reg1
+    .cfi_restore \reg2
+.endm
+
+.macro RESTORE_TWO_REGS reg1, reg2, offset
+    RESTORE_TWO_REGS_BASE sp, \reg1, \reg2, \offset
+.endm
+
+.macro LOAD_RUNTIME_INSTANCE reg
+#if __has_feature(hwaddress_sanitizer) && __clang_major__ >= 10
+    adrp xIP0, :pg_hi21_nc:_ZN3art7Runtime9instance_E
+#else
+    adrp xIP0, _ZN3art7Runtime9instance_E
+#endif
+    ldr xIP0, [xIP0, #:lo12:_ZN3art7Runtime9instance_E]
+.endm
+
+// Macro to refresh the Marking Register (W20).
+//
+// This macro must be called at the end of functions implementing
+// entrypoints that possibly (directly or indirectly) perform a
+// suspend check (before they return).
+.macro REFRESH_MARKING_REGISTER
+#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
+    ldr wMR, [xSELF, #THREAD_IS_GC_MARKING_OFFSET]
+#endif
+.endm
+
+    /*
+     * Macro that sets up the callee save frame to conform with
+     * Runtime::CreateCalleeSaveMethod(kSaveRefsOnly).
+     */
+.macro SETUP_SAVE_REFS_ONLY_FRAME
+    // art::Runtime* xIP0 = art::Runtime::instance_;
+    // Our registers aren't intermixed - just spill in order.
+    LOAD_RUNTIME_INSTANCE xIP0
+
+    // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveRefOnly];
+    ldr xIP0, [xIP0, RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET]
+
+    INCREASE_FRAME 96
+
+    // Ugly compile-time check, but we only have the preprocessor.
+#if (FRAME_SIZE_SAVE_REFS_ONLY != 96)
+#error "FRAME_SIZE_SAVE_REFS_ONLY(ARM64) size not as expected."
+#endif
+
+    // GP callee-saves.
+    // x20 paired with ArtMethod* - see below.
+    SAVE_TWO_REGS x21, x22, 16
+    SAVE_TWO_REGS x23, x24, 32
+    SAVE_TWO_REGS x25, x26, 48
+    SAVE_TWO_REGS x27, x28, 64
+    SAVE_TWO_REGS x29, xLR, 80
+
+    // Store ArtMethod* Runtime::callee_save_methods_[kSaveRefsOnly].
+    // Note: We could avoid saving X20 in the case of Baker read
+    // barriers, as it is overwritten by REFRESH_MARKING_REGISTER
+    // later; but it's not worth handling this special case.
+    stp xIP0, x20, [sp]
+    .cfi_rel_offset x20, 8
+
+    // Place sp in Thread::Current()->top_quick_frame.
+    mov xIP0, sp
+    str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
+.endm
+
+// TODO: Probably no need to restore registers preserved by aapcs64.
+.macro RESTORE_SAVE_REFS_ONLY_FRAME
+    // Callee-saves.
+    // Note: Likewise, we could avoid restoring X20 in the case of Baker
+    // read barriers, as it is overwritten by REFRESH_MARKING_REGISTER
+    // later; but it's not worth handling this special case.
+    RESTORE_REG x20, 8
+    RESTORE_TWO_REGS x21, x22, 16
+    RESTORE_TWO_REGS x23, x24, 32
+    RESTORE_TWO_REGS x25, x26, 48
+    RESTORE_TWO_REGS x27, x28, 64
+    RESTORE_TWO_REGS x29, xLR, 80
+
+    DECREASE_FRAME 96
+.endm
+
+.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL base
+    // Ugly compile-time check, but we only have the preprocessor.
+#if (FRAME_SIZE_SAVE_REFS_AND_ARGS != 224)
+#error "FRAME_SIZE_SAVE_REFS_AND_ARGS(ARM64) size not as expected."
+#endif
+
+    // Stack alignment filler [\base, #8].
+    // FP args.
+    stp d0, d1, [\base, #16]
+    stp d2, d3, [\base, #32]
+    stp d4, d5, [\base, #48]
+    stp d6, d7, [\base, #64]
+
+    // Core args.
+    SAVE_TWO_REGS_BASE \base, x1, x2, 80
+    SAVE_TWO_REGS_BASE \base, x3, x4, 96
+    SAVE_TWO_REGS_BASE \base, x5, x6, 112
+
+    // x7, Callee-saves.
+    // Note: We could avoid saving X20 in the case of Baker read
+    // barriers, as it is overwritten by REFRESH_MARKING_REGISTER
+    // later; but it's not worth handling this special case.
+    SAVE_TWO_REGS_BASE \base, x7, x20, 128
+    SAVE_TWO_REGS_BASE \base, x21, x22, 144
+    SAVE_TWO_REGS_BASE \base, x23, x24, 160
+    SAVE_TWO_REGS_BASE \base, x25, x26, 176
+    SAVE_TWO_REGS_BASE \base, x27, x28, 192
+
+    // x29(callee-save) and LR.
+    SAVE_TWO_REGS_BASE \base, x29, xLR, 208
+.endm
+
+// TODO: Probably no need to restore registers preserved by aapcs64. (That would require
+// auditing all users to make sure they restore aapcs64 callee-save registers they clobber.)
+.macro RESTORE_SAVE_REFS_AND_ARGS_FRAME_INTERNAL base
+    // FP args.
+    ldp d0, d1, [\base, #16]
+    ldp d2, d3, [\base, #32]
+    ldp d4, d5, [\base, #48]
+    ldp d6, d7, [\base, #64]
+
+    // Core args.
+    RESTORE_TWO_REGS_BASE \base, x1, x2, 80
+    RESTORE_TWO_REGS_BASE \base, x3, x4, 96
+    RESTORE_TWO_REGS_BASE \base, x5, x6, 112
+
+    // x7, Callee-saves.
+    // Note: Likewise, we could avoid restoring X20 in the case of Baker
+    // read barriers, as it is overwritten by REFRESH_MARKING_REGISTER
+    // later; but it's not worth handling this special case.
+    RESTORE_TWO_REGS_BASE \base, x7, x20, 128
+    RESTORE_TWO_REGS_BASE \base, x21, x22, 144
+    RESTORE_TWO_REGS_BASE \base, x23, x24, 160
+    RESTORE_TWO_REGS_BASE \base, x25, x26, 176
+    RESTORE_TWO_REGS_BASE \base, x27, x28, 192
+
+    // x29(callee-save) and LR.
+    RESTORE_TWO_REGS_BASE \base, x29, xLR, 208
+.endm
+
+.macro RESTORE_SAVE_REFS_AND_ARGS_FRAME
+    RESTORE_SAVE_REFS_AND_ARGS_FRAME_INTERNAL sp
+    DECREASE_FRAME FRAME_SIZE_SAVE_REFS_AND_ARGS
+.endm
+
+    /*
+     * Macro that sets up the callee save frame to conform with
+     * Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves)
+     */
+.macro SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
+    // art::Runtime* xIP0 = art::Runtime::instance_;
+    // Our registers aren't intermixed - just spill in order.
+    LOAD_RUNTIME_INSTANCE xIP0
+
+    // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveAllCalleeSaves];
+    ldr xIP0, [xIP0, RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET]
+
+    INCREASE_FRAME 176
+
+    // Ugly compile-time check, but we only have the preprocessor.
+#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 176)
+#error "FRAME_SIZE_SAVE_ALL_CALLEE_SAVES(ARM64) size not as expected."
+#endif
+
+    // Stack alignment filler [sp, #8].
+    // FP callee-saves.
+    stp d8, d9,   [sp, #16]
+    stp d10, d11, [sp, #32]
+    stp d12, d13, [sp, #48]
+    stp d14, d15, [sp, #64]
+
+    // GP callee-saves
+    SAVE_TWO_REGS x19, x20, 80
+    SAVE_TWO_REGS x21, x22, 96
+    SAVE_TWO_REGS x23, x24, 112
+    SAVE_TWO_REGS x25, x26, 128
+    SAVE_TWO_REGS x27, x28, 144
+    SAVE_TWO_REGS x29, xLR, 160
+
+    // Store ArtMethod* Runtime::callee_save_methods_[kSaveAllCalleeSaves].
+    str xIP0, [sp]
+    // Place sp in Thread::Current()->top_quick_frame.
+    mov xIP0, sp
+    str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
+.endm
+
+    /*
+     * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
+     * exception is Thread::Current()->exception_ when the runtime method frame is ready.
+     */
+.macro DELIVER_PENDING_EXCEPTION_FRAME_READY
+    mov x0, xSELF
+
+    // Point of no return.
+    bl artDeliverPendingExceptionFromCode  // artDeliverPendingExceptionFromCode(Thread*)
+    brk 0  // Unreached
+.endm
+
+    /*
+     * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
+     * exception is Thread::Current()->exception_.
+     */
+.macro DELIVER_PENDING_EXCEPTION
+    SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
+    DELIVER_PENDING_EXCEPTION_FRAME_READY
+.endm
+
+.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_REG reg
+    ldr \reg, [xSELF, # THREAD_EXCEPTION_OFFSET]   // Get exception field.
+    cbnz \reg, 1f
+    ret
+1:
+    DELIVER_PENDING_EXCEPTION
+.endm
+
+.macro RETURN_OR_DELIVER_PENDING_EXCEPTION
+    RETURN_OR_DELIVER_PENDING_EXCEPTION_REG xIP0
+.endm
+
 #endif  // ART_RUNTIME_ARCH_ARM64_ASM_SUPPORT_ARM64_S_
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 22f0c28..8ff2aad 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -83,9 +83,9 @@
   // arguments, only define ReadBarrierMarkRegX entrypoints for the
   // first 30 registers.  This limitation is not a problem on other
   // supported architectures (ARM, x86 and x86-64) either, as they
-  // have less core registers (resp. 16, 8 and 16).  (We may have to
-  // revise that design choice if read barrier support is added for
-  // MIPS and/or MIPS64.)
+  // have less core registers (resp. 16, 8 and 16).
+  // TODO: ARM/ARM64 now use introspection entrypoints. Consider
+  // reducing the number of entrypoints to those needed by x86-64.
   qpoints->pReadBarrierMarkReg00 = is_active ? art_quick_read_barrier_mark_reg00 : nullptr;
   qpoints->pReadBarrierMarkReg01 = is_active ? art_quick_read_barrier_mark_reg01 : nullptr;
   qpoints->pReadBarrierMarkReg02 = is_active ? art_quick_read_barrier_mark_reg02 : nullptr;
diff --git a/runtime/arch/arm64/fault_handler_arm64.cc b/runtime/arch/arm64/fault_handler_arm64.cc
index 751c05b..c139e21 100644
--- a/runtime/arch/arm64/fault_handler_arm64.cc
+++ b/runtime/arch/arm64/fault_handler_arm64.cc
@@ -38,9 +38,12 @@
 
 namespace art {
 
-void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED, void* context,
+void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo ATTRIBUTE_UNUSED,
+                                             void* context,
                                              ArtMethod** out_method,
-                                             uintptr_t* out_return_pc, uintptr_t* out_sp) {
+                                             uintptr_t* out_return_pc,
+                                             uintptr_t* out_sp,
+                                             bool* out_is_stack_overflow) {
   struct ucontext *uc = reinterpret_cast<struct ucontext *>(context);
   struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
   *out_sp = static_cast<uintptr_t>(sc->sp);
@@ -56,9 +59,11 @@
       reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(InstructionSet::kArm64));
   if (overflow_addr == fault_addr) {
     *out_method = reinterpret_cast<ArtMethod*>(sc->regs[0]);
+    *out_is_stack_overflow = true;
   } else {
     // The method is at the top of the stack.
     *out_method = *reinterpret_cast<ArtMethod**>(*out_sp);
+    *out_is_stack_overflow = false;
   }
 
   // Work out the return PC.  This will be the address of the instruction
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.cc b/runtime/arch/arm64/instruction_set_features_arm64.cc
index 196f358..17369e8 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64.cc
@@ -116,6 +116,9 @@
                                         arraysize(arm64_variants_with_dotprod),
                                         variant);
 
+  // Currently there are no cpu variants which support SVE.
+  bool has_sve = false;
+
   if (!needs_a53_835769_fix) {
     // Check to see if this is an expected variant.
     static const char* arm64_known_variants[] = {
@@ -127,6 +130,7 @@
         "exynos-m2",
         "exynos-m3",
         "kryo",
+        "kryo300",
         "kryo385",
     };
     if (!FindVariantInArray(arm64_known_variants, arraysize(arm64_known_variants), variant)) {
@@ -142,7 +146,8 @@
                                                                 has_crc,
                                                                 has_lse,
                                                                 has_fp16,
-                                                                has_dotprod));
+                                                                has_dotprod,
+                                                                has_sve));
 }
 
 Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromBitmap(uint32_t bitmap) {
@@ -151,12 +156,14 @@
   bool has_lse = (bitmap & kLSEBitField) != 0;
   bool has_fp16 = (bitmap & kFP16BitField) != 0;
   bool has_dotprod = (bitmap & kDotProdBitField) != 0;
+  bool has_sve = (bitmap & kSVEBitField) != 0;
   return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(is_a53,
                                                                 is_a53,
                                                                 has_crc,
                                                                 has_lse,
                                                                 has_fp16,
-                                                                has_dotprod));
+                                                                has_dotprod,
+                                                                has_sve));
 }
 
 Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromCppDefines() {
@@ -169,6 +176,7 @@
   bool has_lse = false;
   bool has_fp16 = false;
   bool has_dotprod = false;
+  bool has_sve = false;
 
 #if defined (__ARM_FEATURE_CRC32)
   has_crc = true;
@@ -187,12 +195,17 @@
   has_dotprod = true;
 #endif
 
+#if defined (__ARM_FEATURE_SVE)
+  has_sve = true;
+#endif
+
   return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(needs_a53_835769_fix,
                                                                 needs_a53_843419_fix,
                                                                 has_crc,
                                                                 has_lse,
                                                                 has_fp16,
-                                                                has_dotprod));
+                                                                has_dotprod,
+                                                                has_sve));
 }
 
 Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromCpuInfo() {
@@ -207,6 +220,7 @@
   bool has_lse = false;
   bool has_fp16 = false;
   bool has_dotprod = false;
+  bool has_sve = false;
 
 #if defined(ART_TARGET_ANDROID) && defined(__aarch64__)
   uint64_t hwcaps = getauxval(AT_HWCAP);
@@ -214,6 +228,7 @@
   has_lse = hwcaps & HWCAP_ATOMICS ? true : false;
   has_fp16 = hwcaps & HWCAP_FPHP ? true : false;
   has_dotprod = hwcaps & HWCAP_ASIMDDP ? true : false;
+  has_sve = hwcaps & HWCAP_SVE ? true : false;
 #endif
 
   return Arm64FeaturesUniquePtr(new Arm64InstructionSetFeatures(needs_a53_835769_fix,
@@ -221,7 +236,8 @@
                                                                 has_crc,
                                                                 has_lse,
                                                                 has_fp16,
-                                                                has_dotprod));
+                                                                has_dotprod,
+                                                                has_sve));
 }
 
 Arm64FeaturesUniquePtr Arm64InstructionSetFeatures::FromAssembly() {
@@ -239,7 +255,8 @@
       has_crc_ == other_as_arm64->has_crc_ &&
       has_lse_ == other_as_arm64->has_lse_ &&
       has_fp16_ == other_as_arm64->has_fp16_ &&
-      has_dotprod_ == other_as_arm64->has_dotprod_;
+      has_dotprod_ == other_as_arm64->has_dotprod_ &&
+      has_sve_ == other_as_arm64->has_sve_;
 }
 
 bool Arm64InstructionSetFeatures::HasAtLeast(const InstructionSetFeatures* other) const {
@@ -253,7 +270,8 @@
   return (has_crc_ || !other_as_arm64->has_crc_)
       && (has_lse_ || !other_as_arm64->has_lse_)
       && (has_fp16_ || !other_as_arm64->has_fp16_)
-      && (has_dotprod_ || !other_as_arm64->has_dotprod_);
+      && (has_dotprod_ || !other_as_arm64->has_dotprod_)
+      && (has_sve_ || !other_as_arm64->has_sve_);
 }
 
 uint32_t Arm64InstructionSetFeatures::AsBitmap() const {
@@ -261,7 +279,8 @@
       | (has_crc_ ? kCRCBitField : 0)
       | (has_lse_ ? kLSEBitField: 0)
       | (has_fp16_ ? kFP16BitField: 0)
-      | (has_dotprod_ ? kDotProdBitField : 0);
+      | (has_dotprod_ ? kDotProdBitField : 0)
+      | (has_sve_ ? kSVEBitField : 0);
 }
 
 std::string Arm64InstructionSetFeatures::GetFeatureString() const {
@@ -291,6 +310,11 @@
   } else {
     result += ",-dotprod";
   }
+  if (has_sve_) {
+    result += ",sve";
+  } else {
+    result += ",-sve";
+  }
   return result;
 }
 
@@ -315,6 +339,7 @@
   bool has_lse = has_lse_;
   bool has_fp16 = has_fp16_;
   bool has_dotprod = has_dotprod_;
+  bool has_sve = has_sve_;
   for (const std::string& feature : features) {
     DCHECK_EQ(android::base::Trim(feature), feature)
         << "Feature name is not trimmed: '" << feature << "'";
@@ -338,6 +363,10 @@
       has_dotprod = true;
     } else if (feature == "-dotprod") {
       has_dotprod = false;
+    } else if (feature == "sve") {
+      has_sve = true;
+    } else if (feature == "-sve") {
+      has_sve = false;
     } else if (feature == "armv8.1-a") {
       has_crc = true;
       has_lse = true;
@@ -365,7 +394,8 @@
                                       has_crc,
                                       has_lse,
                                       has_fp16,
-                                      has_dotprod));
+                                      has_dotprod,
+                                      has_sve));
 }
 
 std::unique_ptr<const InstructionSetFeatures>
@@ -378,7 +408,8 @@
                                       arm64_features->has_crc_,
                                       arm64_features->has_lse_,
                                       arm64_features->has_fp16_,
-                                      arm64_features->has_dotprod_));
+                                      arm64_features->has_dotprod_,
+                                      arm64_features->has_sve_));
 }
 
 }  // namespace art
diff --git a/runtime/arch/arm64/instruction_set_features_arm64.h b/runtime/arch/arm64/instruction_set_features_arm64.h
index 432b9ef..d3c127a 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64.h
+++ b/runtime/arch/arm64/instruction_set_features_arm64.h
@@ -90,6 +90,10 @@
     return has_dotprod_;
   }
 
+  bool HasSVE() const {
+    return has_sve_;
+  }
+
   virtual ~Arm64InstructionSetFeatures() {}
 
  protected:
@@ -107,14 +111,16 @@
                               bool has_crc,
                               bool has_lse,
                               bool has_fp16,
-                              bool has_dotprod)
+                              bool has_dotprod,
+                              bool has_sve)
       : InstructionSetFeatures(),
         fix_cortex_a53_835769_(needs_a53_835769_fix),
         fix_cortex_a53_843419_(needs_a53_843419_fix),
         has_crc_(has_crc),
         has_lse_(has_lse),
         has_fp16_(has_fp16),
-        has_dotprod_(has_dotprod) {
+        has_dotprod_(has_dotprod),
+        has_sve_(has_sve) {
   }
 
   // Bitmap positions for encoding features as a bitmap.
@@ -124,6 +130,7 @@
     kLSEBitField = 1 << 2,
     kFP16BitField = 1 << 3,
     kDotProdBitField = 1 << 4,
+    kSVEBitField = 1 << 5,
   };
 
   const bool fix_cortex_a53_835769_;
@@ -132,6 +139,7 @@
   const bool has_lse_;      // ARMv8.1 Large System Extensions.
   const bool has_fp16_;     // ARMv8.2 FP16 extensions.
   const bool has_dotprod_;  // optional in ARMv8.2, mandatory in ARMv8.4.
+  const bool has_sve_;      // optional in ARMv8.2.
 
   DISALLOW_COPY_AND_ASSIGN(Arm64InstructionSetFeatures);
 };
diff --git a/runtime/arch/arm64/instruction_set_features_arm64_test.cc b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
index eef8f08..0212325 100644
--- a/runtime/arch/arm64/instruction_set_features_arm64_test.cc
+++ b/runtime/arch/arm64/instruction_set_features_arm64_test.cc
@@ -28,7 +28,7 @@
   ASSERT_TRUE(arm64_features.get() != nullptr) << error_msg;
   EXPECT_EQ(arm64_features->GetInstructionSet(), InstructionSet::kArm64);
   EXPECT_TRUE(arm64_features->Equals(arm64_features.get()));
-  EXPECT_STREQ("a53,crc,-lse,-fp16,-dotprod", arm64_features->GetFeatureString().c_str());
+  EXPECT_STREQ("a53,crc,-lse,-fp16,-dotprod,-sve", arm64_features->GetFeatureString().c_str());
   EXPECT_EQ(arm64_features->AsBitmap(), 3U);
 
   std::unique_ptr<const InstructionSetFeatures> cortex_a57_features(
@@ -37,7 +37,8 @@
   EXPECT_EQ(cortex_a57_features->GetInstructionSet(), InstructionSet::kArm64);
   EXPECT_TRUE(cortex_a57_features->Equals(cortex_a57_features.get()));
   EXPECT_TRUE(cortex_a57_features->HasAtLeast(arm64_features.get()));
-  EXPECT_STREQ("a53,crc,-lse,-fp16,-dotprod", cortex_a57_features->GetFeatureString().c_str());
+  EXPECT_STREQ("a53,crc,-lse,-fp16,-dotprod,-sve",
+               cortex_a57_features->GetFeatureString().c_str());
   EXPECT_EQ(cortex_a57_features->AsBitmap(), 3U);
 
   std::unique_ptr<const InstructionSetFeatures> cortex_a73_features(
@@ -49,7 +50,9 @@
   EXPECT_FALSE(cortex_a73_features->AsArm64InstructionSetFeatures()->HasLSE());
   EXPECT_FALSE(cortex_a73_features->AsArm64InstructionSetFeatures()->HasFP16());
   EXPECT_FALSE(cortex_a73_features->AsArm64InstructionSetFeatures()->HasDotProd());
-  EXPECT_STREQ("a53,crc,-lse,-fp16,-dotprod", cortex_a73_features->GetFeatureString().c_str());
+  EXPECT_FALSE(cortex_a73_features->AsArm64InstructionSetFeatures()->HasSVE());
+  EXPECT_STREQ("a53,crc,-lse,-fp16,-dotprod,-sve",
+               cortex_a73_features->GetFeatureString().c_str());
   EXPECT_EQ(cortex_a73_features->AsBitmap(), 3U);
 
   std::unique_ptr<const InstructionSetFeatures> cortex_a35_features(
@@ -57,7 +60,8 @@
   ASSERT_TRUE(cortex_a35_features.get() != nullptr) << error_msg;
   EXPECT_EQ(cortex_a35_features->GetInstructionSet(), InstructionSet::kArm64);
   EXPECT_TRUE(cortex_a35_features->Equals(cortex_a35_features.get()));
-  EXPECT_STREQ("-a53,crc,-lse,-fp16,-dotprod", cortex_a35_features->GetFeatureString().c_str());
+  EXPECT_STREQ("-a53,crc,-lse,-fp16,-dotprod,-sve",
+               cortex_a35_features->GetFeatureString().c_str());
   EXPECT_EQ(cortex_a35_features->AsBitmap(), 2U);
 
   std::unique_ptr<const InstructionSetFeatures> kryo_features(
@@ -67,7 +71,7 @@
   EXPECT_TRUE(kryo_features->Equals(kryo_features.get()));
   EXPECT_TRUE(kryo_features->Equals(cortex_a35_features.get()));
   EXPECT_FALSE(kryo_features->Equals(cortex_a57_features.get()));
-  EXPECT_STREQ("-a53,crc,-lse,-fp16,-dotprod", kryo_features->GetFeatureString().c_str());
+  EXPECT_STREQ("-a53,crc,-lse,-fp16,-dotprod,-sve", kryo_features->GetFeatureString().c_str());
   EXPECT_EQ(kryo_features->AsBitmap(), 2U);
 
   std::unique_ptr<const InstructionSetFeatures> cortex_a55_features(
@@ -78,7 +82,7 @@
   EXPECT_FALSE(cortex_a55_features->Equals(cortex_a35_features.get()));
   EXPECT_FALSE(cortex_a55_features->Equals(cortex_a57_features.get()));
   EXPECT_TRUE(cortex_a35_features->HasAtLeast(arm64_features.get()));
-  EXPECT_STREQ("-a53,crc,lse,fp16,dotprod", cortex_a55_features->GetFeatureString().c_str());
+  EXPECT_STREQ("-a53,crc,lse,fp16,dotprod,-sve", cortex_a55_features->GetFeatureString().c_str());
   EXPECT_EQ(cortex_a55_features->AsBitmap(), 30U);
 
   std::unique_ptr<const InstructionSetFeatures> cortex_a75_features(
@@ -97,7 +101,8 @@
   EXPECT_TRUE(cortex_a75_features->AsArm64InstructionSetFeatures()->HasLSE());
   EXPECT_TRUE(cortex_a75_features->AsArm64InstructionSetFeatures()->HasFP16());
   EXPECT_TRUE(cortex_a75_features->AsArm64InstructionSetFeatures()->HasDotProd());
-  EXPECT_STREQ("-a53,crc,lse,fp16,dotprod", cortex_a75_features->GetFeatureString().c_str());
+  EXPECT_FALSE(cortex_a75_features->AsArm64InstructionSetFeatures()->HasSVE());
+  EXPECT_STREQ("-a53,crc,lse,fp16,dotprod,-sve", cortex_a75_features->GetFeatureString().c_str());
   EXPECT_EQ(cortex_a75_features->AsBitmap(), 30U);
 
   std::unique_ptr<const InstructionSetFeatures> cortex_a76_features(
@@ -117,7 +122,8 @@
   EXPECT_TRUE(cortex_a76_features->AsArm64InstructionSetFeatures()->HasLSE());
   EXPECT_TRUE(cortex_a76_features->AsArm64InstructionSetFeatures()->HasFP16());
   EXPECT_TRUE(cortex_a76_features->AsArm64InstructionSetFeatures()->HasDotProd());
-  EXPECT_STREQ("-a53,crc,lse,fp16,dotprod", cortex_a76_features->GetFeatureString().c_str());
+  EXPECT_FALSE(cortex_a76_features->AsArm64InstructionSetFeatures()->HasSVE());
+  EXPECT_STREQ("-a53,crc,lse,fp16,dotprod,-sve", cortex_a76_features->GetFeatureString().c_str());
   EXPECT_EQ(cortex_a76_features->AsBitmap(), 30U);
 }
 
@@ -139,7 +145,8 @@
   EXPECT_TRUE(a76_features->AsArm64InstructionSetFeatures()->HasLSE());
   EXPECT_TRUE(a76_features->AsArm64InstructionSetFeatures()->HasFP16());
   EXPECT_TRUE(a76_features->AsArm64InstructionSetFeatures()->HasDotProd());
-  EXPECT_STREQ("-a53,crc,lse,fp16,dotprod", a76_features->GetFeatureString().c_str());
+  EXPECT_FALSE(a76_features->AsArm64InstructionSetFeatures()->HasSVE());
+  EXPECT_STREQ("-a53,crc,lse,fp16,dotprod,-sve", a76_features->GetFeatureString().c_str());
   EXPECT_EQ(a76_features->AsBitmap(), 30U);
 
   // Build features for a default ARM64 processor.
@@ -151,7 +158,8 @@
   EXPECT_FALSE(generic_features->AsArm64InstructionSetFeatures()->HasLSE());
   EXPECT_FALSE(generic_features->AsArm64InstructionSetFeatures()->HasFP16());
   EXPECT_FALSE(generic_features->AsArm64InstructionSetFeatures()->HasDotProd());
-  EXPECT_STREQ("a53,crc,-lse,-fp16,-dotprod", generic_features->GetFeatureString().c_str());
+  EXPECT_FALSE(generic_features->AsArm64InstructionSetFeatures()->HasSVE());
+  EXPECT_STREQ("a53,crc,-lse,-fp16,-dotprod,-sve", generic_features->GetFeatureString().c_str());
   EXPECT_EQ(generic_features->AsBitmap(), 3U);
 
   // Build features for a ARM64 processor that supports up to ARMv8.2.
@@ -166,7 +174,9 @@
   EXPECT_TRUE(armv8_2a_cpu_features->AsArm64InstructionSetFeatures()->HasLSE());
   EXPECT_TRUE(armv8_2a_cpu_features->AsArm64InstructionSetFeatures()->HasFP16());
   EXPECT_FALSE(armv8_2a_cpu_features->AsArm64InstructionSetFeatures()->HasDotProd());
-  EXPECT_STREQ("-a53,crc,lse,fp16,-dotprod", armv8_2a_cpu_features->GetFeatureString().c_str());
+  EXPECT_FALSE(armv8_2a_cpu_features->AsArm64InstructionSetFeatures()->HasSVE());
+  EXPECT_STREQ("-a53,crc,lse,fp16,-dotprod,-sve",
+               armv8_2a_cpu_features->GetFeatureString().c_str());
   EXPECT_EQ(armv8_2a_cpu_features->AsBitmap(), 14U);
 }
 
diff --git a/runtime/arch/arm64/jni_entrypoints_arm64.S b/runtime/arch/arm64/jni_entrypoints_arm64.S
index 7f7d791..8a34662 100644
--- a/runtime/arch/arm64/jni_entrypoints_arm64.S
+++ b/runtime/arch/arm64/jni_entrypoints_arm64.S
@@ -20,59 +20,213 @@
      * Jni dlsym lookup stub.
      */
     .extern artFindNativeMethod
+    .extern artFindNativeMethodRunnable
 
 ENTRY art_jni_dlsym_lookup_stub
-  // spill regs.
-  stp   x29, x30, [sp, #-16]!
-  .cfi_adjust_cfa_offset 16
-  .cfi_rel_offset x29, 0
-  .cfi_rel_offset x30, 8
-  mov   x29, sp
-  stp   d6, d7,   [sp, #-16]!
-  .cfi_adjust_cfa_offset 16
-  stp   d4, d5,   [sp, #-16]!
-  .cfi_adjust_cfa_offset 16
-  stp   d2, d3,   [sp, #-16]!
-  .cfi_adjust_cfa_offset 16
-  stp   d0, d1,   [sp, #-16]!
-  .cfi_adjust_cfa_offset 16
-  stp   x6, x7,   [sp, #-16]!
-  .cfi_adjust_cfa_offset 16
-  stp   x4, x5,   [sp, #-16]!
-  .cfi_adjust_cfa_offset 16
-  stp   x2, x3,   [sp, #-16]!
-  .cfi_adjust_cfa_offset 16
-  stp   x0, x1,   [sp, #-16]!
-  .cfi_adjust_cfa_offset 16
+    // spill regs.
+    stp   x29, x30, [sp, #-16]!
+    .cfi_adjust_cfa_offset 16
+    .cfi_rel_offset x29, 0
+    .cfi_rel_offset x30, 8
+    mov   x29, sp
+    stp   d6, d7,   [sp, #-16]!
+    .cfi_adjust_cfa_offset 16
+    stp   d4, d5,   [sp, #-16]!
+    .cfi_adjust_cfa_offset 16
+    stp   d2, d3,   [sp, #-16]!
+    .cfi_adjust_cfa_offset 16
+    stp   d0, d1,   [sp, #-16]!
+    .cfi_adjust_cfa_offset 16
+    stp   x6, x7,   [sp, #-16]!
+    .cfi_adjust_cfa_offset 16
+    stp   x4, x5,   [sp, #-16]!
+    .cfi_adjust_cfa_offset 16
+    stp   x2, x3,   [sp, #-16]!
+    .cfi_adjust_cfa_offset 16
+    stp   x0, x1,   [sp, #-16]!
+    .cfi_adjust_cfa_offset 16
 
-  bl  artFindNativeMethod
-  mov  x17, x0    // store result in scratch reg.
+    mov x0, xSELF   // pass Thread::Current()
+    // Call artFindNativeMethod() for normal native and artFindNativeMethodRunnable()
+    // for @FastNative or @CriticalNative.
+    ldr   xIP0, [x0, #THREAD_TOP_QUICK_FRAME_OFFSET]      // uintptr_t tagged_quick_frame
+    bic   xIP0, xIP0, #1                                  // ArtMethod** sp
+    ldr   xIP0, [xIP0]                                    // ArtMethod* method
+    ldr   xIP0, [xIP0, #ART_METHOD_ACCESS_FLAGS_OFFSET]   // uint32_t access_flags
+    mov   xIP1, #(ACCESS_FLAGS_METHOD_IS_FAST_NATIVE | ACCESS_FLAGS_METHOD_IS_CRITICAL_NATIVE)
+    tst   xIP0, xIP1
+    b.ne  .Llookup_stub_fast_native
+    bl    artFindNativeMethod
+    b     .Llookup_stub_continue
+    .Llookup_stub_fast_native:
+    bl    artFindNativeMethodRunnable
+.Llookup_stub_continue:
+    mov   x17, x0    // store result in scratch reg.
 
-  // load spill regs.
-  ldp   x0, x1,   [sp], #16
-  .cfi_adjust_cfa_offset -16
-  ldp   x2, x3,   [sp], #16
-  .cfi_adjust_cfa_offset -16
-  ldp   x4, x5,   [sp], #16
-  .cfi_adjust_cfa_offset -16
-  ldp   x6, x7,   [sp], #16
-  .cfi_adjust_cfa_offset -16
-  ldp   d0, d1,   [sp], #16
-  .cfi_adjust_cfa_offset -16
-  ldp   d2, d3,   [sp], #16
-  .cfi_adjust_cfa_offset -16
-  ldp   d4, d5,   [sp], #16
-  .cfi_adjust_cfa_offset -16
-  ldp   d6, d7,   [sp], #16
-  .cfi_adjust_cfa_offset -16
-  ldp   x29, x30, [sp], #16
-  .cfi_adjust_cfa_offset -16
-  .cfi_restore x29
-  .cfi_restore x30
+    // load spill regs.
+    ldp   x0, x1,   [sp], #16
+    .cfi_adjust_cfa_offset -16
+    ldp   x2, x3,   [sp], #16
+    .cfi_adjust_cfa_offset -16
+    ldp   x4, x5,   [sp], #16
+    .cfi_adjust_cfa_offset -16
+    ldp   x6, x7,   [sp], #16
+    .cfi_adjust_cfa_offset -16
+    ldp   d0, d1,   [sp], #16
+    .cfi_adjust_cfa_offset -16
+    ldp   d2, d3,   [sp], #16
+    .cfi_adjust_cfa_offset -16
+    ldp   d4, d5,   [sp], #16
+    .cfi_adjust_cfa_offset -16
+    ldp   d6, d7,   [sp], #16
+    .cfi_adjust_cfa_offset -16
+    ldp   x29, x30, [sp], #16
+    .cfi_adjust_cfa_offset -16
+    .cfi_restore x29
+    .cfi_restore x30
 
-  cbz   x17, 1f   // is method code null ?
-  br    x17       // if non-null, tail call to method's code.
+    cbz   x17, 1f   // is method code null ?
+    br    x17       // if non-null, tail call to method's code.
 
 1:
-  ret             // restore regs and return to caller to handle exception.
+    ret             // restore regs and return to caller to handle exception.
 END art_jni_dlsym_lookup_stub
+
+ENTRY art_jni_dlsym_lookup_critical_stub
+    // The hidden arg holding the tagged method (bit 0 set means GenericJNI) is x15.
+    // For Generic JNI we already have a managed frame, so we reuse the art_jni_dlsym_lookup_stub.
+    tbnz  x15, #0, art_jni_dlsym_lookup_stub
+
+    // We need to create a GenericJNI managed frame above the stack args.
+
+    // GenericJNI frame is similar to SaveRegsAndArgs frame with the native method
+    // instead of runtime method saved at the bottom. Note that the runtime shall
+    // not examine the args here, otherwise we would have to move them in registers
+    // and stack to account for the difference between managed and native ABIs.
+    INCREASE_FRAME FRAME_SIZE_SAVE_REFS_AND_ARGS
+    SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL sp
+    // Save the hidden arg as method pointer, x0 in the padding.
+    // (x0 is an arg in native ABI but not considered an arg in managed ABI.)
+    SAVE_TWO_REGS x15, x0, 0
+
+    // Call artCriticalNativeOutArgsSize(method)
+    mov   x0, x15  // x0 := method (from hidden arg)
+    bl    artCriticalNativeOutArgsSize
+
+    // Check if we have any stack args.
+    cbnz  x0, .Lcritical_has_stack_args
+
+    // Without stack args, the frame is fully constructed.
+    // Place tagged managed sp in Thread::Current()->top_quick_frame.
+    mov   xIP0, sp
+    orr   xIP0, xIP0, #1  // Tag as GenericJNI frame.
+    str   xIP0, [xSELF, #THREAD_TOP_QUICK_FRAME_OFFSET]
+
+    // Call artFindNativeMethodRunnable()
+    mov   x0, xSELF   // pass Thread::Current()
+    bl    artFindNativeMethodRunnable
+
+    // Store result in scratch reg.
+    mov   xIP0, x0
+
+    // Restore frame.
+    .cfi_remember_state
+    RESTORE_TWO_REGS x15, x0, 0
+    RESTORE_SAVE_REFS_AND_ARGS_FRAME
+    REFRESH_MARKING_REGISTER
+
+    // Check for exception.
+    cbz   xIP0, .Lcritical_deliver_exception
+
+    // Do the tail call
+    br    xIP0
+    .cfi_restore_state
+    .cfi_def_cfa_offset FRAME_SIZE_SAVE_REFS_AND_ARGS
+
+.Lcritical_has_stack_args:
+    // Move the out args size to a scratch register.
+    mov   xIP0, x0
+
+    // Restore register args as we're about to move stack args.
+    RESTORE_TWO_REGS x15, x0, 0
+    RESTORE_SAVE_REFS_AND_ARGS_FRAME_INTERNAL sp
+
+    // Move out args. For simplicity include the return address at the end.
+    mov   x8, sp        // Destination.
+    add   x9, sp, xIP0  // Destination end.
+1:
+    ldp   x10, x11, [x8, #FRAME_SIZE_SAVE_REFS_AND_ARGS]
+    stp   x10, x11, [x8], #16
+    cmp   x8, x9
+    bne   1b
+
+    // Save our LR, load caller's LR and redefine CFI to take ownership of the JNI stub frame.
+    str   xLR, [x9, #-__SIZEOF_POINTER__]
+    mov   xLR, x11  // The last moved value from the loop above.
+    .cfi_def_cfa x9, FRAME_SIZE_SAVE_REFS_AND_ARGS
+
+    // Re-create the SaveRefsAndArgs frame above the args.
+    SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL x9
+    SAVE_TWO_REGS_BASE x9, x15, x0, 0
+
+    // Move the frame register to a callee-save register.
+    mov   x29, x9
+    .cfi_def_cfa_register x29
+
+    // Place tagged managed sp in Thread::Current()->top_quick_frame.
+    orr   xIP0, x29, #1  // Tag as GenericJNI frame.
+    str   xIP0, [xSELF, #THREAD_TOP_QUICK_FRAME_OFFSET]
+
+    // Call artFindNativeMethodRunnable()
+    mov   x0, xSELF   // pass Thread::Current()
+    bl    artFindNativeMethodRunnable
+
+    // Store result in scratch reg.
+    mov   xIP0, x0
+
+    // Restore the frame.
+    mov   x9, x29
+    .cfi_def_cfa_register x9
+    RESTORE_TWO_REGS_BASE x9, x15, x0, 0
+    RESTORE_SAVE_REFS_AND_ARGS_FRAME_INTERNAL x9
+    REFRESH_MARKING_REGISTER
+
+    // Check for exception.
+    cbz   xIP0, 3f
+
+    // Move stack args to their original place.
+    mov   x8, x9
+2:
+    ldp   x10, x11, [x8, #-16]!
+    stp   x10, x11, [x8, #FRAME_SIZE_SAVE_REFS_AND_ARGS]
+    cmp   sp, x8
+    bne   2b
+
+    // Replace original return address with caller's return address.
+    ldr   xIP1, [x9, #(FRAME_SIZE_SAVE_REFS_AND_ARGS - __SIZEOF_POINTER__)]
+    str   xLR, [x9, #(FRAME_SIZE_SAVE_REFS_AND_ARGS - __SIZEOF_POINTER__)]
+
+    // Restore LR and redefine CFI to release ownership of the JNI stub frame.
+    .cfi_remember_state
+    mov   xLR, xIP1
+    .cfi_def_cfa sp, FRAME_SIZE_SAVE_REFS_AND_ARGS
+
+    // Remove the frame reservation.
+    DECREASE_FRAME FRAME_SIZE_SAVE_REFS_AND_ARGS
+
+    // Do the tail call.
+    br    xIP0
+    .cfi_restore_state
+    .cfi_def_cfa x9, FRAME_SIZE_SAVE_REFS_AND_ARGS
+
+3:
+    // Drop stack args and the SaveRefsAndArgs reservation.
+    mov   sp, x9
+    add   sp, sp, #FRAME_SIZE_SAVE_REFS_AND_ARGS
+    .cfi_def_cfa sp, 0
+
+.Lcritical_deliver_exception:
+    // When delivering exception, we check that xSELF was saved but the SaveRefsAndArgs frame does
+    // not save it, so we cannot use DELIVER_PENDING_EXCEPTION_FRAME_READY with the above frames.
+    DELIVER_PENDING_EXCEPTION
+END art_jni_dlsym_lookup_critical_stub
diff --git a/runtime/arch/arm64/jni_frame_arm64.h b/runtime/arch/arm64/jni_frame_arm64.h
new file mode 100644
index 0000000..fa4d43c
--- /dev/null
+++ b/runtime/arch/arm64/jni_frame_arm64.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_ARM64_JNI_FRAME_ARM64_H_
+#define ART_RUNTIME_ARCH_ARM64_JNI_FRAME_ARM64_H_
+
+#include <string.h>
+
+#include "arch/instruction_set.h"
+#include "base/bit_utils.h"
+#include "base/globals.h"
+#include "base/logging.h"
+
+namespace art {
+namespace arm64 {
+
+constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k64);
+static_assert(kArm64PointerSize == PointerSize::k64, "Unexpected ARM64 pointer size");
+
+// The AAPCS64 requires 16-byte alignement. This is the same as the Managed ABI stack alignment.
+static constexpr size_t kAapcs64StackAlignment = 16u;
+static_assert(kAapcs64StackAlignment == kStackAlignment);
+
+// Up to how many float-like (float, double) args can be in registers.
+// The rest of the args must go on the stack.
+constexpr size_t kMaxFloatOrDoubleRegisterArguments = 8u;
+// Up to how many integer-like (pointers, objects, longs, int, short, bool, etc) args can be
+// in registers. The rest of the args must go on the stack.
+constexpr size_t kMaxIntLikeRegisterArguments = 8u;
+
+// Get the size of "out args" for @CriticalNative method stub.
+// This must match the size of the frame emitted by the JNI compiler at the native call site.
+inline size_t GetCriticalNativeOutArgsSize(const char* shorty, uint32_t shorty_len) {
+  DCHECK_EQ(shorty_len, strlen(shorty));
+
+  size_t num_fp_args = 0u;
+  for (size_t i = 1; i != shorty_len; ++i) {
+    if (shorty[i] == 'F' || shorty[i] == 'D') {
+      num_fp_args += 1u;
+    }
+  }
+  size_t num_non_fp_args = shorty_len - 1u - num_fp_args;
+
+  // Account for FP arguments passed through v0-v7.
+  size_t num_stack_fp_args =
+      num_fp_args - std::min(kMaxFloatOrDoubleRegisterArguments, num_fp_args);
+  // Account for other (integer and pointer) arguments passed through GPR (x0-x7).
+  size_t num_stack_non_fp_args =
+      num_non_fp_args - std::min(kMaxIntLikeRegisterArguments, num_non_fp_args);
+  // The size of outgoing arguments.
+  size_t size =
+      (num_stack_fp_args + num_stack_non_fp_args) * static_cast<size_t>(kArm64PointerSize);
+
+  // We can make a tail call if there are no stack args and we do not need
+  // to extend the result. Otherwise, add space for return PC.
+  if (size != 0u || shorty[0] == 'B' || shorty[0] == 'C' || shorty[0] == 'S' || shorty[0] == 'Z') {
+    size += kFramePointerSize;  // We need to spill LR with the args.
+  }
+  return RoundUp(size, kAapcs64StackAlignment);
+}
+
+}  // namespace arm64
+}  // namespace art
+
+#endif  // ART_RUNTIME_ARCH_ARM64_JNI_FRAME_ARM64_H_
+
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index cb74ee8..634c762 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -19,27 +19,6 @@
 
 #include "arch/quick_alloc_entrypoints.S"
 
-
-.macro INCREASE_FRAME frame_adjustment
-    sub sp, sp, #(\frame_adjustment)
-    .cfi_adjust_cfa_offset (\frame_adjustment)
-.endm
-
-.macro DECREASE_FRAME frame_adjustment
-    add sp, sp, #(\frame_adjustment)
-    .cfi_adjust_cfa_offset -(\frame_adjustment)
-.endm
-
-.macro SAVE_REG reg, offset
-    str \reg, [sp, #(\offset)]
-    .cfi_rel_offset \reg, (\offset)
-.endm
-
-.macro RESTORE_REG reg, offset
-    ldr \reg, [sp, #(\offset)]
-    .cfi_restore \reg
-.endm
-
 .macro SAVE_REG_INCREASE_FRAME reg, frame_adjustment
     str \reg, [sp, #-(\frame_adjustment)]!
     .cfi_adjust_cfa_offset (\frame_adjustment)
@@ -52,18 +31,6 @@
     .cfi_adjust_cfa_offset -(\frame_adjustment)
 .endm
 
-.macro SAVE_TWO_REGS reg1, reg2, offset
-    stp \reg1, \reg2, [sp, #(\offset)]
-    .cfi_rel_offset \reg1, (\offset)
-    .cfi_rel_offset \reg2, (\offset) + 8
-.endm
-
-.macro RESTORE_TWO_REGS reg1, reg2, offset
-    ldp \reg1, \reg2, [sp, #(\offset)]
-    .cfi_restore \reg1
-    .cfi_restore \reg2
-.endm
-
 .macro SAVE_TWO_REGS_INCREASE_FRAME reg1, reg2, frame_adjustment
     stp \reg1, \reg2, [sp, #-(\frame_adjustment)]!
     .cfi_adjust_cfa_offset (\frame_adjustment)
@@ -78,144 +45,10 @@
     .cfi_adjust_cfa_offset -(\frame_adjustment)
 .endm
 
-    /*
-     * Macro that sets up the callee save frame to conform with
-     * Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves)
-     */
-.macro SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
-    // art::Runtime* xIP0 = art::Runtime::instance_;
-    // Our registers aren't intermixed - just spill in order.
-    adrp xIP0, _ZN3art7Runtime9instance_E
-    ldr xIP0, [xIP0, #:lo12:_ZN3art7Runtime9instance_E]
-
-    // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveAllCalleeSaves];
-    ldr xIP0, [xIP0, RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET]
-
-    INCREASE_FRAME 176
-
-    // Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 176)
-#error "FRAME_SIZE_SAVE_ALL_CALLEE_SAVES(ARM64) size not as expected."
-#endif
-
-    // Stack alignment filler [sp, #8].
-    // FP callee-saves.
-    stp d8, d9,   [sp, #16]
-    stp d10, d11, [sp, #32]
-    stp d12, d13, [sp, #48]
-    stp d14, d15, [sp, #64]
-
-    // GP callee-saves
-    SAVE_TWO_REGS x19, x20, 80
-    SAVE_TWO_REGS x21, x22, 96
-    SAVE_TWO_REGS x23, x24, 112
-    SAVE_TWO_REGS x25, x26, 128
-    SAVE_TWO_REGS x27, x28, 144
-    SAVE_TWO_REGS x29, xLR, 160
-
-    // Store ArtMethod* Runtime::callee_save_methods_[kSaveAllCalleeSaves].
-    str xIP0, [sp]
-    // Place sp in Thread::Current()->top_quick_frame.
-    mov xIP0, sp
-    str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
-.endm
-
-    /*
-     * Macro that sets up the callee save frame to conform with
-     * Runtime::CreateCalleeSaveMethod(kSaveRefsOnly).
-     */
-.macro SETUP_SAVE_REFS_ONLY_FRAME
-    // art::Runtime* xIP0 = art::Runtime::instance_;
-    // Our registers aren't intermixed - just spill in order.
-    adrp xIP0, _ZN3art7Runtime9instance_E
-    ldr xIP0, [xIP0, #:lo12:_ZN3art7Runtime9instance_E]
-
-    // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveRefOnly];
-    ldr xIP0, [xIP0, RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET]
-
-    INCREASE_FRAME 96
-
-    // Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_REFS_ONLY != 96)
-#error "FRAME_SIZE_SAVE_REFS_ONLY(ARM64) size not as expected."
-#endif
-
-    // GP callee-saves.
-    // x20 paired with ArtMethod* - see below.
-    SAVE_TWO_REGS x21, x22, 16
-    SAVE_TWO_REGS x23, x24, 32
-    SAVE_TWO_REGS x25, x26, 48
-    SAVE_TWO_REGS x27, x28, 64
-    SAVE_TWO_REGS x29, xLR, 80
-
-    // Store ArtMethod* Runtime::callee_save_methods_[kSaveRefsOnly].
-    // Note: We could avoid saving X20 in the case of Baker read
-    // barriers, as it is overwritten by REFRESH_MARKING_REGISTER
-    // later; but it's not worth handling this special case.
-    stp xIP0, x20, [sp]
-    .cfi_rel_offset x20, 8
-
-    // Place sp in Thread::Current()->top_quick_frame.
-    mov xIP0, sp
-    str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
-.endm
-
-// TODO: Probably no need to restore registers preserved by aapcs64.
-.macro RESTORE_SAVE_REFS_ONLY_FRAME
-    // Callee-saves.
-    // Note: Likewise, we could avoid restoring X20 in the case of Baker
-    // read barriers, as it is overwritten by REFRESH_MARKING_REGISTER
-    // later; but it's not worth handling this special case.
-    RESTORE_REG x20, 8
-    RESTORE_TWO_REGS x21, x22, 16
-    RESTORE_TWO_REGS x23, x24, 32
-    RESTORE_TWO_REGS x25, x26, 48
-    RESTORE_TWO_REGS x27, x28, 64
-    RESTORE_TWO_REGS x29, xLR, 80
-
-    DECREASE_FRAME 96
-.endm
-
 .macro POP_SAVE_REFS_ONLY_FRAME
     DECREASE_FRAME 96
 .endm
 
-
-.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL
-    INCREASE_FRAME 224
-
-    // Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_REFS_AND_ARGS != 224)
-#error "FRAME_SIZE_SAVE_REFS_AND_ARGS(ARM64) size not as expected."
-#endif
-
-    // Stack alignment filler [sp, #8].
-    // FP args.
-    stp d0, d1, [sp, #16]
-    stp d2, d3, [sp, #32]
-    stp d4, d5, [sp, #48]
-    stp d6, d7, [sp, #64]
-
-    // Core args.
-    SAVE_TWO_REGS x1, x2, 80
-    SAVE_TWO_REGS x3, x4, 96
-    SAVE_TWO_REGS x5, x6, 112
-
-    // x7, Callee-saves.
-    // Note: We could avoid saving X20 in the case of Baker read
-    // barriers, as it is overwritten by REFRESH_MARKING_REGISTER
-    // later; but it's not worth handling this special case.
-    SAVE_TWO_REGS x7, x20, 128
-    SAVE_TWO_REGS x21, x22, 144
-    SAVE_TWO_REGS x23, x24, 160
-    SAVE_TWO_REGS x25, x26, 176
-    SAVE_TWO_REGS x27, x28, 192
-
-    // x29(callee-save) and LR.
-    SAVE_TWO_REGS x29, xLR, 208
-
-.endm
-
     /*
      * Macro that sets up the callee save frame to conform with
      * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs).
@@ -225,13 +58,13 @@
 .macro SETUP_SAVE_REFS_AND_ARGS_FRAME
     // art::Runtime* xIP0 = art::Runtime::instance_;
     // Our registers aren't intermixed - just spill in order.
-    adrp xIP0, _ZN3art7Runtime9instance_E
-    ldr xIP0, [xIP0, #:lo12:_ZN3art7Runtime9instance_E]
+    LOAD_RUNTIME_INSTANCE xIP0
 
     // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveRefAndArgs];
     ldr xIP0, [xIP0, RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET]
 
-    SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL
+    INCREASE_FRAME FRAME_SIZE_SAVE_REFS_AND_ARGS
+    SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL sp
 
     str xIP0, [sp]    // Store ArtMethod* Runtime::callee_save_methods_[kSaveRefsAndArgs].
     // Place sp in Thread::Current()->top_quick_frame.
@@ -240,42 +73,14 @@
 .endm
 
 .macro SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_X0
-    SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL
+    INCREASE_FRAME FRAME_SIZE_SAVE_REFS_AND_ARGS
+    SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL sp
     str x0, [sp, #0]  // Store ArtMethod* to bottom of stack.
     // Place sp in Thread::Current()->top_quick_frame.
     mov xIP0, sp
     str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET]
 .endm
 
-// TODO: Probably no need to restore registers preserved by aapcs64.
-.macro RESTORE_SAVE_REFS_AND_ARGS_FRAME
-    // FP args.
-    ldp d0, d1, [sp, #16]
-    ldp d2, d3, [sp, #32]
-    ldp d4, d5, [sp, #48]
-    ldp d6, d7, [sp, #64]
-
-    // Core args.
-    RESTORE_TWO_REGS x1, x2, 80
-    RESTORE_TWO_REGS x3, x4, 96
-    RESTORE_TWO_REGS x5, x6, 112
-
-    // x7, Callee-saves.
-    // Note: Likewise, we could avoid restoring X20 in the case of Baker
-    // read barriers, as it is overwritten by REFRESH_MARKING_REGISTER
-    // later; but it's not worth handling this special case.
-    RESTORE_TWO_REGS x7, x20, 128
-    RESTORE_TWO_REGS x21, x22, 144
-    RESTORE_TWO_REGS x23, x24, 160
-    RESTORE_TWO_REGS x25, x26, 176
-    RESTORE_TWO_REGS x27, x28, 192
-
-    // x29(callee-save) and LR.
-    RESTORE_TWO_REGS x29, xLR, 208
-
-    DECREASE_FRAME 224
-.endm
-
     /*
      * Macro that sets up the callee save frame to conform with
      * Runtime::CreateCalleeSaveMethod(kSaveEverything)
@@ -323,8 +128,7 @@
     SAVE_TWO_REGS x27, x28, 480
 
     // art::Runtime* xIP0 = art::Runtime::instance_;
-    adrp xIP0, _ZN3art7Runtime9instance_E
-    ldr xIP0, [xIP0, #:lo12:_ZN3art7Runtime9instance_E]
+    LOAD_RUNTIME_INSTANCE xIP0
 
     // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveEverything];
     ldr xIP0, [xIP0, \runtime_method_offset]
@@ -390,17 +194,6 @@
     RESTORE_SAVE_EVERYTHING_FRAME_KEEP_X0
 .endm
 
-// Macro to refresh the Marking Register (W20).
-//
-// This macro must be called at the end of functions implementing
-// entrypoints that possibly (directly or indirectly) perform a
-// suspend check (before they return).
-.macro REFRESH_MARKING_REGISTER
-#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
-    ldr wMR, [xSELF, #THREAD_IS_GC_MARKING_OFFSET]
-#endif
-.endm
-
 .macro RETURN_IF_RESULT_IS_ZERO
     cbnz x0, 1f                // result non-zero branch over
     ret                        // return
@@ -413,39 +206,6 @@
 1:
 .endm
 
-    /*
-     * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
-     * exception is Thread::Current()->exception_ when the runtime method frame is ready.
-     */
-.macro DELIVER_PENDING_EXCEPTION_FRAME_READY
-    mov x0, xSELF
-
-    // Point of no return.
-    bl artDeliverPendingExceptionFromCode  // artDeliverPendingExceptionFromCode(Thread*)
-    brk 0  // Unreached
-.endm
-
-    /*
-     * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
-     * exception is Thread::Current()->exception_.
-     */
-.macro DELIVER_PENDING_EXCEPTION
-    SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
-    DELIVER_PENDING_EXCEPTION_FRAME_READY
-.endm
-
-.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_REG reg
-    ldr \reg, [xSELF, # THREAD_EXCEPTION_OFFSET]   // Get exception field.
-    cbnz \reg, 1f
-    ret
-1:
-    DELIVER_PENDING_EXCEPTION
-.endm
-
-.macro RETURN_OR_DELIVER_PENDING_EXCEPTION
-    RETURN_OR_DELIVER_PENDING_EXCEPTION_REG xIP0
-.endm
-
 // Same as above with x1. This is helpful in stubs that want to avoid clobbering another register.
 .macro RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
     RETURN_OR_DELIVER_PENDING_EXCEPTION_REG x1
@@ -1026,29 +786,8 @@
     RESTORE_TWO_REGS xFP, xLR, 96
     RESTORE_TWO_REGS_DECREASE_FRAME x3, x4, SAVE_SIZE
 
-    // Store result (w0/x0/s0/d0) appropriately, depending on resultType.
-    ldrb w10, [x4]
-
-    // Check the return type and store the correct register into the jvalue in memory.
-
-    // Don't set anything for a void type.
-    cmp w10, #'V'
-    beq .Losr_exit
-    // Is it a double?
-    cmp w10, #'D'
-    beq .Losr_return_double
-    // Is it a float?
-    cmp w10, #'F'
-    beq .Losr_return_float
-    // Just store x0. Doesn't matter if it is 64 or 32 bits.
+    // The compiler put the result in x0. Doesn't matter if it is 64 or 32 bits.
     str x0, [x3]
-.Losr_exit:
-    ret
-.Losr_return_double:
-    str d0, [x3]
-    ret
-.Losr_return_float:
-    str s0, [x3]
     ret
 
 .Losr_entry:
@@ -1641,14 +1380,11 @@
     bhs    .Lslow_path\c_name
     ldr    w3, [x0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET]  // Load the object size (x3)
     cmp    x3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE        // Check if the size is for a thread
-                                                              // local allocation. Also does the
-                                                              // finalizable and initialization
-                                                              // checks.
-    // When isInitialized == 0, then the class is potentially not yet initialized.
-    // If the class is not yet initialized, the object size will be very large to force the branch
-    // below to be taken.
+                                                              // local allocation.
+    // If the class is not yet visibly initialized, or it is finalizable,
+    // the object size will be very large to force the branch below to be taken.
     //
-    // See InitializeClassVisitors in class-inl.h for more details.
+    // See Class::SetStatus() in class.cc for more details.
     bhs    .Lslow_path\c_name
                                                               // Compute the rosalloc bracket index
                                                               // from the size. Since the size is
@@ -1716,19 +1452,9 @@
     str    w1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)]
 
     mov    x0, x3                                             // Set the return value and return.
-.if \isInitialized == 0
-    // This barrier is only necessary when the allocation also requires
-    // a class initialization check.
-    //
-    // If the class is already observably initialized, then new-instance allocations are protected
+    // No barrier. The class is already observably initialized (otherwise the fast
+    // path size check above would fail) and new-instance allocations are protected
     // from publishing by the compiler which inserts its own StoreStore barrier.
-    dmb    ish
-    // Use a "dmb ish" fence here because if there are later loads of statics (e.g. class size),
-    // they should happen-after the implicit initialization check.
-    //
-    // TODO: Remove this dmb for class initialization checks (b/36692143) by introducing
-    // a new observably-initialized class state.
-.endif
     ret
 .Lslow_path\c_name:
     SETUP_SAVE_REFS_ONLY_FRAME                      // save callee saves in case of GC
@@ -1754,11 +1480,10 @@
                                                               // since the tlab pos and end are 32
                                                               // bit values.
 
-    // When isInitialized == 0, then the class is potentially not yet initialized.
-    // If the class is not yet initialized, the object size will be very large to force the branch
-    // below to be taken.
+    // If the class is not yet visibly initialized, or it is finalizable,
+    // the object size will be very large to force the branch below to be taken.
     //
-    // See InitializeClassVisitors in class-inl.h for more details.
+    // See Class::SetStatus() in class.cc for more details.
     bhi    \slowPathLabel
     str    x6, [xSELF, #THREAD_LOCAL_POS_OFFSET]              // Store new thread_local_pos.
     ldr    x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET]          // Increment thread_local_objects.
@@ -1771,19 +1496,9 @@
                                                               // site will see the right values in
                                                               // the fields of the class.
     mov    x0, x4
-.if \isInitialized == 0
-    // This barrier is only necessary when the allocation also requires
-    // a class initialization check.
-    //
-    // If the class is already observably initialized, then new-instance allocations are protected
+    // No barrier. The class is already observably initialized (otherwise the fast
+    // path size check above would fail) and new-instance allocations are protected
     // from publishing by the compiler which inserts its own StoreStore barrier.
-    dmb    ish
-    // Use a "dmb ish" fence here because if there are later loads of statics (e.g. class size),
-    // they should happen-after the implicit initialization check.
-    //
-    // TODO: Remove this dmb for class initialization checks (b/36692143) by introducing
-    // a new observably-initialized class state.
-.endif
     ret
 .endm
 
@@ -2109,7 +1824,6 @@
  * | X22               |    callee save
  * | X21               |    callee save
  * | X20               |    callee save
- * | X19               |    callee save
  * | X7                |    arg7
  * | X6                |    arg6
  * | X5                |    arg5
@@ -2125,27 +1839,30 @@
  * | D2                |    float arg 3
  * | D1                |    float arg 2
  * | D0                |    float arg 1
- * | Method*           | <- X0
+ * | padding           | // 8B
+ * | Method*           | <- X0 (Managed frame similar to SaveRefsAndArgs.)
  * #-------------------#
  * | local ref cookie  | // 4B
- * | handle scope size | // 4B
+ * | padding           | // 0B or 4B to align handle scope on 8B address
+ * | handle scope      | // Size depends on number of references; multiple of 4B.
  * #-------------------#
- * | JNI Call Stack    |
- * #-------------------#    <--- SP on native call
+ * | JNI Stack Args    | // Empty if all args fit into registers x0-x7, d0-d7.
+ * #-------------------#    <--- SP on native call (1)
+ * | Free scratch      |
+ * #-------------------#
+ * | SP for JNI call   | // Pointer to (1).
+ * #-------------------#
+ * | Hidden arg        | // For @CriticalNative
+ * #-------------------#
  * |                   |
  * | Stack for Regs    |    The trampoline assembly will pop these values
  * |                   |    into registers for native call
  * #-------------------#
- * | Native code ptr   |
- * #-------------------#
- * | Free scratch      |
- * #-------------------#
- * | Ptr to (1)        |    <--- SP
- * #-------------------#
  */
     /*
      * Called to do a generic JNI down-call
      */
+    .extern artQuickGenericJniTrampoline
 ENTRY art_quick_generic_jni_trampoline
     SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_X0
 
@@ -2161,31 +1878,28 @@
     sub sp, sp, xIP0
 
     // prepare for artQuickGenericJniTrampoline call
-    // (Thread*,  SP)
-    //    x0      x1   <= C calling convention
-    //   xSELF    xFP  <= where they are
+    // (Thread*, managed_sp, reserved_area)
+    //    x0         x1            x2   <= C calling convention
+    //  xSELF       xFP            sp   <= where they are
 
     mov x0, xSELF   // Thread*
-    mov x1, xFP
+    mov x1, xFP     // SP for the managed frame.
+    mov x2, sp      // reserved area for arguments and other saved data (up to managed frame)
     bl artQuickGenericJniTrampoline  // (Thread*, sp)
 
     // The C call will have registered the complete save-frame on success.
     // The result of the call is:
-    // x0: pointer to native code, 0 on error.
-    // x1: pointer to the bottom of the used area of the alloca, can restore stack till there.
+    //     x0: pointer to native code, 0 on error.
+    //     The bottom of the reserved area contains values for arg registers,
+    //     hidden arg register and SP for out args for the call.
 
-    // Check for error = 0.
+    // Check for error (class init check or locking for synchronized native method can throw).
     cbz x0, .Lexception_in_native
 
-    // Release part of the alloca.
-    mov sp, x1
-
     // Save the code pointer
     mov xIP0, x0
 
     // Load parameters from frame into registers.
-    // TODO Check with artQuickGenericJniTrampoline.
-    //      Also, check again APPCS64 - the stack arguments are interleaved.
     ldp x0, x1, [sp]
     ldp x2, x3, [sp, #16]
     ldp x4, x5, [sp, #32]
@@ -2196,7 +1910,11 @@
     ldp d4, d5, [sp, #96]
     ldp d6, d7, [sp, #112]
 
-    add sp, sp, #128
+    // Load hidden arg (x15) for @CriticalNative and SP for out args.
+    ldp x15, xIP1, [sp, #128]
+
+    // Apply the new SP for out args, releasing unneeded reserved area.
+    mov sp, xIP1
 
     blr xIP0        // native call.
 
@@ -2468,6 +2186,17 @@
 #endif
 END art_quick_indexof
 
+    .extern artStringBuilderAppend
+ENTRY art_quick_string_builder_append
+    SETUP_SAVE_REFS_ONLY_FRAME          // save callee saves in case of GC
+    add    x1, sp, #(FRAME_SIZE_SAVE_REFS_ONLY + __SIZEOF_POINTER__)  // pass args
+    mov    x2, xSELF                    // pass Thread::Current
+    bl     artStringBuilderAppend       // (uint32_t, const unit32_t*, Thread*)
+    RESTORE_SAVE_REFS_ONLY_FRAME
+    REFRESH_MARKING_REGISTER
+    RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END art_quick_string_builder_append
+
     /*
      * Create a function `name` calling the ReadBarrier::Mark routine,
      * getting its argument and returning its result through W register
@@ -2798,8 +2527,7 @@
      *   art_quick_read_barrier_mark_introspection_gc_roots:
      *     GC root entrypoint code.
      */
-    .balign 512
-ENTRY art_quick_read_barrier_mark_introspection
+ENTRY_ALIGNED art_quick_read_barrier_mark_introspection, 512
     // At this point, IP0 contains the reference, IP1 can be freely used.
     // For heap poisoning, the reference is poisoned, so unpoison it first.
     UNPOISON_HEAP_REF wIP0
@@ -2883,3 +2611,75 @@
     RESTORE_TWO_REGS_DECREASE_FRAME x19, xLR, 16
     ret
 END ExecuteSwitchImplAsm
+
+// x0 contains the class, x8 contains the inline cache. x9-x15 can be used.
+ENTRY art_quick_update_inline_cache
+#if (INLINE_CACHE_SIZE != 5)
+#error "INLINE_CACHE_SIZE not as expected."
+#endif
+#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER)
+    // Don't update the cache if we are marking.
+    cbnz wMR, .Ldone
+#endif
+.Lentry1:
+    ldr w9, [x8, #INLINE_CACHE_CLASSES_OFFSET]
+    cmp w9, w0
+    beq .Ldone
+    cbnz w9, .Lentry2
+    add x10, x8, #INLINE_CACHE_CLASSES_OFFSET
+    ldxr w9, [x10]
+    cbnz w9, .Lentry1
+    stxr  w9, w0, [x10]
+    cbz   w9, .Ldone
+    b .Lentry1
+.Lentry2:
+    ldr w9, [x8, #INLINE_CACHE_CLASSES_OFFSET+4]
+    cmp w9, w0
+    beq .Ldone
+    cbnz w9, .Lentry3
+    add x10, x8, #INLINE_CACHE_CLASSES_OFFSET+4
+    ldxr w9, [x10]
+    cbnz w9, .Lentry2
+    stxr  w9, w0, [x10]
+    cbz   w9, .Ldone
+    b .Lentry2
+.Lentry3:
+    ldr w9, [x8, #INLINE_CACHE_CLASSES_OFFSET+8]
+    cmp w9, w0
+    beq .Ldone
+    cbnz w9, .Lentry4
+    add x10, x8, #INLINE_CACHE_CLASSES_OFFSET+8
+    ldxr w9, [x10]
+    cbnz w9, .Lentry3
+    stxr  w9, w0, [x10]
+    cbz   w9, .Ldone
+    b .Lentry3
+.Lentry4:
+    ldr w9, [x8, #INLINE_CACHE_CLASSES_OFFSET+12]
+    cmp w9, w0
+    beq .Ldone
+    cbnz w9, .Lentry5
+    add x10, x8, #INLINE_CACHE_CLASSES_OFFSET+12
+    ldxr w9, [x10]
+    cbnz w9, .Lentry4
+    stxr  w9, w0, [x10]
+    cbz   w9, .Ldone
+    b .Lentry4
+.Lentry5:
+    // Unconditionally store, the inline cache is megamorphic.
+    str  w0, [x8, #INLINE_CACHE_CLASSES_OFFSET+16]
+.Ldone:
+    ret
+END art_quick_update_inline_cache
+
+// On entry, method is at the bottom of the stack.
+ENTRY art_quick_compile_optimized
+    SETUP_SAVE_EVERYTHING_FRAME
+    ldr x0, [sp, #FRAME_SIZE_SAVE_EVERYTHING] // pass ArtMethod
+    mov x1, xSELF                             // pass Thread::Current
+    bl     artCompileOptimized                // (ArtMethod*, Thread*)
+    RESTORE_SAVE_EVERYTHING_FRAME
+    // We don't need to restore the marking register here, as
+    // artCompileOptimized doesn't allow thread suspension.
+    ret
+END art_quick_compile_optimized
diff --git a/runtime/arch/context-inl.h b/runtime/arch/context-inl.h
index ddcbbb1..cac7c43 100644
--- a/runtime/arch/context-inl.h
+++ b/runtime/arch/context-inl.h
@@ -28,12 +28,6 @@
 #elif defined(__aarch64__)
 #include "arm64/context_arm64.h"
 #define RUNTIME_CONTEXT_TYPE arm64::Arm64Context
-#elif defined(__mips__) && !defined(__LP64__)
-#include "mips/context_mips.h"
-#define RUNTIME_CONTEXT_TYPE mips::MipsContext
-#elif defined(__mips__) && defined(__LP64__)
-#include "mips64/context_mips64.h"
-#define RUNTIME_CONTEXT_TYPE mips64::Mips64Context
 #elif defined(__i386__)
 #include "x86/context_x86.h"
 #define RUNTIME_CONTEXT_TYPE x86::X86Context
diff --git a/runtime/arch/context.h b/runtime/arch/context.h
index 5980b03..be7adc7 100644
--- a/runtime/arch/context.h
+++ b/runtime/arch/context.h
@@ -88,6 +88,12 @@
   // Smashes the caller save registers. If we're throwing, we don't want to return bogus values.
   virtual void SmashCallerSaves() = 0;
 
+  // Set `new_value` to the physical register containing the dex PC pointer in
+  // an nterp frame.
+  virtual void SetNterpDexPC(uintptr_t new_value ATTRIBUTE_UNUSED) {
+    abort();
+  }
+
   // Switches execution of the executing context to this context
   NO_RETURN virtual void DoLongJump() = 0;
 
diff --git a/runtime/arch/instruction_set_features.cc b/runtime/arch/instruction_set_features.cc
index c5c2d31..2581f6e 100644
--- a/runtime/arch/instruction_set_features.cc
+++ b/runtime/arch/instruction_set_features.cc
@@ -28,8 +28,6 @@
 
 #include "arm/instruction_set_features_arm.h"
 #include "arm64/instruction_set_features_arm64.h"
-#include "mips/instruction_set_features_mips.h"
-#include "mips64/instruction_set_features_mips64.h"
 #include "x86/instruction_set_features_x86.h"
 #include "x86_64/instruction_set_features_x86_64.h"
 
@@ -43,16 +41,12 @@
       return ArmInstructionSetFeatures::FromVariant(variant, error_msg);
     case InstructionSet::kArm64:
       return Arm64InstructionSetFeatures::FromVariant(variant, error_msg);
-    case InstructionSet::kMips:
-      return MipsInstructionSetFeatures::FromVariant(variant, error_msg);
-    case InstructionSet::kMips64:
-      return Mips64InstructionSetFeatures::FromVariant(variant, error_msg);
     case InstructionSet::kX86:
       return X86InstructionSetFeatures::FromVariant(variant, error_msg);
     case InstructionSet::kX86_64:
       return X86_64InstructionSetFeatures::FromVariant(variant, error_msg);
 
-    case InstructionSet::kNone:
+    default:
       break;
   }
   UNIMPLEMENTED(FATAL) << isa;
@@ -70,12 +64,6 @@
     case InstructionSet::kArm64:
       result = Arm64InstructionSetFeatures::FromBitmap(bitmap);
       break;
-    case InstructionSet::kMips:
-      result = MipsInstructionSetFeatures::FromBitmap(bitmap);
-      break;
-    case InstructionSet::kMips64:
-      result = Mips64InstructionSetFeatures::FromBitmap(bitmap);
-      break;
     case InstructionSet::kX86:
       result = X86InstructionSetFeatures::FromBitmap(bitmap);
       break;
@@ -83,7 +71,6 @@
       result = X86_64InstructionSetFeatures::FromBitmap(bitmap);
       break;
 
-    case InstructionSet::kNone:
     default:
       UNIMPLEMENTED(FATAL) << isa;
       UNREACHABLE();
@@ -99,16 +86,12 @@
       return ArmInstructionSetFeatures::FromCppDefines();
     case InstructionSet::kArm64:
       return Arm64InstructionSetFeatures::FromCppDefines();
-    case InstructionSet::kMips:
-      return MipsInstructionSetFeatures::FromCppDefines();
-    case InstructionSet::kMips64:
-      return Mips64InstructionSetFeatures::FromCppDefines();
     case InstructionSet::kX86:
       return X86InstructionSetFeatures::FromCppDefines();
     case InstructionSet::kX86_64:
       return X86_64InstructionSetFeatures::FromCppDefines();
 
-    case InstructionSet::kNone:
+    default:
       break;
   }
   UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -133,16 +116,12 @@
       return ArmInstructionSetFeatures::FromCpuInfo();
     case InstructionSet::kArm64:
       return Arm64InstructionSetFeatures::FromCpuInfo();
-    case InstructionSet::kMips:
-      return MipsInstructionSetFeatures::FromCpuInfo();
-    case InstructionSet::kMips64:
-      return Mips64InstructionSetFeatures::FromCpuInfo();
     case InstructionSet::kX86:
       return X86InstructionSetFeatures::FromCpuInfo();
     case InstructionSet::kX86_64:
       return X86_64InstructionSetFeatures::FromCpuInfo();
 
-    case InstructionSet::kNone:
+    default:
       break;
   }
   UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -156,16 +135,12 @@
       return ArmInstructionSetFeatures::FromHwcap();
     case InstructionSet::kArm64:
       return Arm64InstructionSetFeatures::FromHwcap();
-    case InstructionSet::kMips:
-      return MipsInstructionSetFeatures::FromHwcap();
-    case InstructionSet::kMips64:
-      return Mips64InstructionSetFeatures::FromHwcap();
     case InstructionSet::kX86:
       return X86InstructionSetFeatures::FromHwcap();
     case InstructionSet::kX86_64:
       return X86_64InstructionSetFeatures::FromHwcap();
 
-    case InstructionSet::kNone:
+    default:
       break;
   }
   UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -179,16 +154,12 @@
       return ArmInstructionSetFeatures::FromAssembly();
     case InstructionSet::kArm64:
       return Arm64InstructionSetFeatures::FromAssembly();
-    case InstructionSet::kMips:
-      return MipsInstructionSetFeatures::FromAssembly();
-    case InstructionSet::kMips64:
-      return Mips64InstructionSetFeatures::FromAssembly();
     case InstructionSet::kX86:
       return X86InstructionSetFeatures::FromAssembly();
     case InstructionSet::kX86_64:
       return X86_64InstructionSetFeatures::FromAssembly();
 
-    case InstructionSet::kNone:
+    default:
       break;
   }
   UNIMPLEMENTED(FATAL) << kRuntimeISA;
@@ -259,16 +230,6 @@
   return down_cast<const Arm64InstructionSetFeatures*>(this);
 }
 
-const MipsInstructionSetFeatures* InstructionSetFeatures::AsMipsInstructionSetFeatures() const {
-  DCHECK_EQ(InstructionSet::kMips, GetInstructionSet());
-  return down_cast<const MipsInstructionSetFeatures*>(this);
-}
-
-const Mips64InstructionSetFeatures* InstructionSetFeatures::AsMips64InstructionSetFeatures() const {
-  DCHECK_EQ(InstructionSet::kMips64, GetInstructionSet());
-  return down_cast<const Mips64InstructionSetFeatures*>(this);
-}
-
 const X86InstructionSetFeatures* InstructionSetFeatures::AsX86InstructionSetFeatures() const {
   DCHECK(InstructionSet::kX86 == GetInstructionSet() ||
          InstructionSet::kX86_64 == GetInstructionSet());
diff --git a/runtime/arch/instruction_set_features.h b/runtime/arch/instruction_set_features.h
index 9222a7b..78ce580 100644
--- a/runtime/arch/instruction_set_features.h
+++ b/runtime/arch/instruction_set_features.h
@@ -28,8 +28,6 @@
 
 class ArmInstructionSetFeatures;
 class Arm64InstructionSetFeatures;
-class MipsInstructionSetFeatures;
-class Mips64InstructionSetFeatures;
 class X86InstructionSetFeatures;
 class X86_64InstructionSetFeatures;
 
@@ -114,12 +112,6 @@
   // Down cast this Arm64InstructionFeatures.
   const Arm64InstructionSetFeatures* AsArm64InstructionSetFeatures() const;
 
-  // Down cast this MipsInstructionFeatures.
-  const MipsInstructionSetFeatures* AsMipsInstructionSetFeatures() const;
-
-  // Down cast this Mips64InstructionFeatures.
-  const Mips64InstructionSetFeatures* AsMips64InstructionSetFeatures() const;
-
   // Down cast this X86InstructionFeatures.
   const X86InstructionSetFeatures* AsX86InstructionSetFeatures() const;
 
diff --git a/runtime/arch/memcmp16.h b/runtime/arch/memcmp16.h
index b051a1c..0226c4e 100644
--- a/runtime/arch/memcmp16.h
+++ b/runtime/arch/memcmp16.h
@@ -30,7 +30,7 @@
 //
 // In both cases, MemCmp16 is declared.
 
-#if defined(__aarch64__) || defined(__arm__) || defined(__mips__) || defined(__i386__) || defined(__x86_64__)
+#if defined(__aarch64__) || defined(__arm__) || defined(__i386__) || defined(__x86_64__)
 
 extern "C" uint32_t __memcmp16(const uint16_t* s0, const uint16_t* s1, size_t count);
 #define MemCmp16 __memcmp16
diff --git a/runtime/arch/mips/asm_support_mips.S b/runtime/arch/mips/asm_support_mips.S
deleted file mode 100644
index fa51059..0000000
--- a/runtime/arch/mips/asm_support_mips.S
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_
-#define ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_
-
-#include "asm_support_mips.h"
-
-// Define special registers.
-
-// Register holding suspend check count down.
-#define rSUSPEND $s0
-// Register holding Thread::Current().
-#define rSELF $s1
-
-     // Declare a function called name, doesn't set up $gp.
-.macro ENTRY_NO_GP_CUSTOM_CFA name, cfa_offset
-    .type \name, %function
-    .global \name
-    // Cache alignment for function entry.
-    .balign 16
-\name:
-    .cfi_startproc
-     // Ensure we get a sane starting CFA.
-    .cfi_def_cfa $sp, \cfa_offset
-.endm
-
-     // Declare a function called name, doesn't set up $gp.
-.macro ENTRY_NO_GP name
-    ENTRY_NO_GP_CUSTOM_CFA \name, 0
-.endm
-
-     // Declare a function called name, sets up $gp.
-.macro ENTRY name
-    ENTRY_NO_GP \name
-    // Load $gp. We expect that ".set noreorder" is in effect.
-    .cpload $t9
-    // Declare a local convenience label to be branched to when $gp is already set up.
-.L\name\()_gp_set:
-.endm
-
-.macro END name
-    .cfi_endproc
-    .size \name, .-\name
-.endm
-
-.macro UNIMPLEMENTED name
-    ENTRY \name
-    break
-    break
-    END \name
-.endm
-
-#if defined(__mips_isa_rev) && __mips_isa_rev > 2
-  /* mips32r5 & mips32r6 have mthc1 op, and have 64-bit fp regs,
-     and in FPXX abi we avoid referring to odd-numbered fp regs */
-
-/* LDu: Load 64-bit floating-point value to float reg feven,
-   from unaligned (mod-4-aligned) mem location disp(base) */
-.macro LDu feven,fodd,disp,base,temp
-  l.s   \feven, \disp(\base)
-  lw    \temp, \disp+4(\base)
-  mthc1 \temp, \feven
-.endm
-
-/* SDu: Store 64-bit floating-point value from float reg feven,
-   to unaligned (mod-4-aligned) mem location disp(base) */
-.macro SDu feven,fodd,disp,base,temp
-  mfhc1 \temp, \feven
-  s.s   \feven, \disp(\base)
-  sw    \temp, \disp+4(\base)
-.endm
-
-/* MTD: Move double, from general regpair (reven,rodd)
-        to float regpair (feven,fodd) */
-.macro MTD reven,rodd,feven,fodd
-  mtc1  \reven, \feven
-  mthc1 \rodd, \feven
-.endm
-
-#else
-  /* mips32r1 has no mthc1 op;
-     mips32r1 and mips32r2 use 32-bit floating point register mode (FR=0),
-     and always hold doubles as (feven, fodd) fp reg pair */
-
-.macro LDu feven,fodd,disp,base,temp
-  l.s   \feven, \disp(\base)
-  l.s   \fodd,  \disp+4(\base)
-.endm
-
-.macro SDu feven,fodd,disp,base,temp
-  s.s   \feven, \disp(\base)
-  s.s   \fodd,  \disp+4(\base)
-.endm
-
-.macro MTD reven,rodd,feven,fodd
-  mtc1  \reven, \feven
-  mtc1  \rodd, \fodd
-.endm
-
-#endif  /* mips_isa_rev */
-
-// Macros to poison (negate) the reference for heap poisoning.
-.macro POISON_HEAP_REF rRef
-#ifdef USE_HEAP_POISONING
-    subu \rRef, $zero, \rRef
-#endif  // USE_HEAP_POISONING
-.endm
-
-// Macros to unpoison (negate) the reference for heap poisoning.
-.macro UNPOISON_HEAP_REF rRef
-#ifdef USE_HEAP_POISONING
-    subu \rRef, $zero, \rRef
-#endif  // USE_HEAP_POISONING
-.endm
-
-// Byte size of the instructions (un)poisoning heap references.
-#ifdef USE_HEAP_POISONING
-#define HEAP_POISON_INSTR_SIZE 4
-#else
-#define HEAP_POISON_INSTR_SIZE 0
-#endif  // USE_HEAP_POISONING
-
-// Based on contents of creg select the minimum integer
-// At the end of the macro the original value of creg is lost
-.macro MINint dreg,rreg,sreg,creg
-  .set push
-  .set noat
-#if defined(_MIPS_ARCH_MIPS32R6) || defined(_MIPS_ARCH_MIPS64R6)
-  .ifc \dreg, \rreg
-  selnez \dreg, \rreg, \creg
-  seleqz \creg, \sreg, \creg
-  .else
-  seleqz \dreg, \sreg, \creg
-  selnez \creg, \rreg, \creg
-  .endif
-  or     \dreg, \dreg, \creg
-#else
-  movn   \dreg, \rreg, \creg
-  movz   \dreg, \sreg, \creg
-#endif
-  .set pop
-.endm
-
-// Find minimum of two signed registers
-.macro MINs dreg,rreg,sreg
-  .set push
-  .set noat
-  slt    $at, \rreg, \sreg
-  MINint \dreg, \rreg, \sreg, $at
-  .set pop
-.endm
-
-// Find minimum of two unsigned registers
-.macro MINu dreg,rreg,sreg
-  .set push
-  .set noat
-  sltu   $at, \rreg, \sreg
-  MINint \dreg, \rreg, \sreg, $at
-  .set pop
-.endm
-
-// This utility macro is used to check whether the address contained in
-// a register is suitably aligned. Default usage is confirm that the
-// address stored in $sp is a multiple of 16. It can be used for other
-// alignments, and for other base address registers, if needed.
-//
-// Enable this macro by running the shell command:
-//
-//    export ART_MIPS32_CHECK_ALIGNMENT=true
-//
-// NOTE: The value of alignment must be a power of 2, and must fit in an
-// unsigned 15-bit integer. The macro won't behave as expected if these
-// conditions aren't met.
-//
-.macro CHECK_ALIGNMENT ba=$sp, tmp=$at, alignment=16
-#ifdef ART_MIPS32_CHECK_ALIGNMENT
-    .set push
-    .set noat
-    .set noreorder
-    andi  \tmp, \ba, \alignment-1
-    beqz  \tmp, .+12    # Skip break instruction if base address register (ba) is aligned
-    nop
-    break
-    .set pop
-#endif
-.endm
-
-#endif  // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_
diff --git a/runtime/arch/mips/asm_support_mips.h b/runtime/arch/mips/asm_support_mips.h
deleted file mode 100644
index bec5238..0000000
--- a/runtime/arch/mips/asm_support_mips.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_
-#define ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_
-
-#include "asm_support.h"
-
-#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVES 112
-#define FRAME_SIZE_SAVE_REFS_ONLY 48
-#define FRAME_SIZE_SAVE_REFS_AND_ARGS 112
-#define FRAME_SIZE_SAVE_EVERYTHING 256
-#define FRAME_SIZE_SAVE_EVERYTHING_FOR_CLINIT FRAME_SIZE_SAVE_EVERYTHING
-#define FRAME_SIZE_SAVE_EVERYTHING_FOR_SUSPEND_CHECK FRAME_SIZE_SAVE_EVERYTHING
-
-// &art_quick_read_barrier_mark_introspection is the first of many entry points:
-//   21 entry points for long field offsets, large array indices and variable array indices
-//     (see macro BRB_FIELD_LONG_OFFSET_ENTRY)
-//   21 entry points for short field offsets and small array indices
-//     (see macro BRB_FIELD_SHORT_OFFSET_ENTRY)
-//   21 entry points for GC roots
-//     (see macro BRB_GC_ROOT_ENTRY)
-
-// There are as many entry points of each kind as there are registers that
-// can hold a reference: V0-V1, A0-A3, T0-T7, S2-S8.
-#define BAKER_MARK_INTROSPECTION_REGISTER_COUNT 21
-
-#define BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE (8 * 4)  // 8 instructions in
-                                                                 // BRB_FIELD_*_OFFSET_ENTRY.
-
-#define BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET \
-    (2 * BAKER_MARK_INTROSPECTION_REGISTER_COUNT * BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE)
-
-#define BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRY_SIZE (4 * 4)  // 4 instructions in BRB_GC_ROOT_ENTRY.
-
-#endif  // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_
diff --git a/runtime/arch/mips/callee_save_frame_mips.h b/runtime/arch/mips/callee_save_frame_mips.h
deleted file mode 100644
index 84ce209..0000000
--- a/runtime/arch/mips/callee_save_frame_mips.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS_CALLEE_SAVE_FRAME_MIPS_H_
-#define ART_RUNTIME_ARCH_MIPS_CALLEE_SAVE_FRAME_MIPS_H_
-
-#include "arch/instruction_set.h"
-#include "base/bit_utils.h"
-#include "base/callee_save_type.h"
-#include "base/enums.h"
-#include "quick/quick_method_frame_info.h"
-#include "registers_mips.h"
-#include "runtime_globals.h"
-
-namespace art {
-namespace mips {
-
-static constexpr uint32_t kMipsCalleeSaveAlwaysSpills =
-    (1u << art::mips::RA);
-static constexpr uint32_t kMipsCalleeSaveRefSpills =
-    (1 << art::mips::S2) | (1 << art::mips::S3) | (1 << art::mips::S4) | (1 << art::mips::S5) |
-    (1 << art::mips::S6) | (1 << art::mips::S7) | (1 << art::mips::GP) | (1 << art::mips::FP);
-static constexpr uint32_t kMipsCalleeSaveArgSpills =
-    (1 << art::mips::A1) | (1 << art::mips::A2) | (1 << art::mips::A3) | (1 << art::mips::T0) |
-    (1 << art::mips::T1);
-// We want to save all floating point register pairs at addresses
-// which are multiples of 8 so that we can eliminate use of the
-// SDu/LDu macros by using sdc1/ldc1 to store/load floating
-// register values using a single instruction. Because integer
-// registers are stored at the top of the frame, to achieve having
-// the floating point register pairs aligned on multiples of 8 the
-// number of integer registers saved must be even. Previously, the
-// only case in which we saved floating point registers beneath an
-// odd number of integer registers was when "type" is
-// CalleeSaveType::kSaveAllCalleeSaves. (There are other cases in
-// which an odd number of integer registers are saved but those
-// cases don't save any floating point registers. If no floating
-// point registers are saved we don't care if the number of integer
-// registers saved is odd or even). To save an even number of
-// integer registers in this particular case we add the ZERO
-// register to the list of registers which get saved.
-static constexpr uint32_t kMipsCalleeSaveAllSpills =
-    (1 << art::mips::ZERO) | (1 << art::mips::S0) | (1 << art::mips::S1);
-static constexpr uint32_t kMipsCalleeSaveEverythingSpills =
-    (1 << art::mips::AT) | (1 << art::mips::V0) | (1 << art::mips::V1) |
-    (1 << art::mips::A0) | (1 << art::mips::A1) | (1 << art::mips::A2) | (1 << art::mips::A3) |
-    (1 << art::mips::T0) | (1 << art::mips::T1) | (1 << art::mips::T2) | (1 << art::mips::T3) |
-    (1 << art::mips::T4) | (1 << art::mips::T5) | (1 << art::mips::T6) | (1 << art::mips::T7) |
-    (1 << art::mips::S0) | (1 << art::mips::S1) | (1 << art::mips::T8) | (1 << art::mips::T9);
-
-static constexpr uint32_t kMipsCalleeSaveFpAlwaysSpills = 0;
-static constexpr uint32_t kMipsCalleeSaveFpRefSpills = 0;
-static constexpr uint32_t kMipsCalleeSaveFpArgSpills =
-    (1 << art::mips::F8) | (1 << art::mips::F9) | (1 << art::mips::F10) | (1 << art::mips::F11) |
-    (1 << art::mips::F12) | (1 << art::mips::F13) | (1 << art::mips::F14) | (1 << art::mips::F15) |
-    (1 << art::mips::F16) | (1 << art::mips::F17) | (1 << art::mips::F18) | (1 << art::mips::F19);
-static constexpr uint32_t kMipsCalleeSaveAllFPSpills =
-    (1 << art::mips::F20) | (1 << art::mips::F21) | (1 << art::mips::F22) | (1 << art::mips::F23) |
-    (1 << art::mips::F24) | (1 << art::mips::F25) | (1 << art::mips::F26) | (1 << art::mips::F27) |
-    (1 << art::mips::F28) | (1 << art::mips::F29) | (1 << art::mips::F30) | (1u << art::mips::F31);
-static constexpr uint32_t kMipsCalleeSaveFpEverythingSpills =
-    (1 << art::mips::F0) | (1 << art::mips::F1) | (1 << art::mips::F2) | (1 << art::mips::F3) |
-    (1 << art::mips::F4) | (1 << art::mips::F5) | (1 << art::mips::F6) | (1 << art::mips::F7) |
-    (1 << art::mips::F8) | (1 << art::mips::F9) | (1 << art::mips::F10) | (1 << art::mips::F11) |
-    (1 << art::mips::F12) | (1 << art::mips::F13) | (1 << art::mips::F14) | (1 << art::mips::F15) |
-    (1 << art::mips::F16) | (1 << art::mips::F17) | (1 << art::mips::F18) | (1 << art::mips::F19) |
-    (1 << art::mips::F20) | (1 << art::mips::F21) | (1 << art::mips::F22) | (1 << art::mips::F23) |
-    (1 << art::mips::F24) | (1 << art::mips::F25) | (1 << art::mips::F26) | (1 << art::mips::F27) |
-    (1 << art::mips::F28) | (1 << art::mips::F29) | (1 << art::mips::F30) | (1u << art::mips::F31);
-
-class MipsCalleeSaveFrame {
- public:
-  static constexpr uint32_t GetCoreSpills(CalleeSaveType type) {
-    type = GetCanonicalCalleeSaveType(type);
-    return kMipsCalleeSaveAlwaysSpills | kMipsCalleeSaveRefSpills |
-        (type == CalleeSaveType::kSaveRefsAndArgs ? kMipsCalleeSaveArgSpills : 0) |
-        (type == CalleeSaveType::kSaveAllCalleeSaves ? kMipsCalleeSaveAllSpills : 0) |
-        (type == CalleeSaveType::kSaveEverything ? kMipsCalleeSaveEverythingSpills : 0);
-  }
-
-  static constexpr uint32_t GetFpSpills(CalleeSaveType type) {
-    type = GetCanonicalCalleeSaveType(type);
-    return kMipsCalleeSaveFpAlwaysSpills | kMipsCalleeSaveFpRefSpills |
-        (type == CalleeSaveType::kSaveRefsAndArgs ? kMipsCalleeSaveFpArgSpills : 0) |
-        (type == CalleeSaveType::kSaveAllCalleeSaves ? kMipsCalleeSaveAllFPSpills : 0) |
-        (type == CalleeSaveType::kSaveEverything ? kMipsCalleeSaveFpEverythingSpills : 0);
-  }
-
-  static constexpr uint32_t GetFrameSize(CalleeSaveType type) {
-    type = GetCanonicalCalleeSaveType(type);
-    return RoundUp((POPCOUNT(GetCoreSpills(type)) /* gprs */ +
-                    POPCOUNT(GetFpSpills(type))   /* fprs */ +
-                    1 /* Method* */) * static_cast<size_t>(kMipsPointerSize), kStackAlignment);
-  }
-
-  static constexpr QuickMethodFrameInfo GetMethodFrameInfo(CalleeSaveType type) {
-    type = GetCanonicalCalleeSaveType(type);
-    return QuickMethodFrameInfo(GetFrameSize(type), GetCoreSpills(type), GetFpSpills(type));
-  }
-
-  static constexpr size_t GetFpr1Offset(CalleeSaveType type) {
-    type = GetCanonicalCalleeSaveType(type);
-    return GetFrameSize(type) -
-           (POPCOUNT(GetCoreSpills(type)) +
-            POPCOUNT(GetFpSpills(type))) * static_cast<size_t>(kMipsPointerSize);
-  }
-
-  static constexpr size_t GetGpr1Offset(CalleeSaveType type) {
-    type = GetCanonicalCalleeSaveType(type);
-    return GetFrameSize(type) -
-           POPCOUNT(GetCoreSpills(type)) * static_cast<size_t>(kMipsPointerSize);
-  }
-
-  static constexpr size_t GetReturnPcOffset(CalleeSaveType type) {
-    type = GetCanonicalCalleeSaveType(type);
-    return GetFrameSize(type) - static_cast<size_t>(kMipsPointerSize);
-  }
-};
-
-}  // namespace mips
-}  // namespace art
-
-#endif  // ART_RUNTIME_ARCH_MIPS_CALLEE_SAVE_FRAME_MIPS_H_
diff --git a/runtime/arch/mips/context_mips.cc b/runtime/arch/mips/context_mips.cc
deleted file mode 100644
index 3f362de..0000000
--- a/runtime/arch/mips/context_mips.cc
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "context_mips.h"
-
-#include "base/bit_utils.h"
-#include "base/bit_utils_iterator.h"
-#include "quick/quick_method_frame_info.h"
-
-namespace art {
-namespace mips {
-
-static constexpr uint32_t gZero = 0;
-
-void MipsContext::Reset() {
-  std::fill_n(gprs_, arraysize(gprs_), nullptr);
-  std::fill_n(fprs_, arraysize(fprs_), nullptr);
-  gprs_[SP] = &sp_;
-  gprs_[T9] = &t9_;
-  gprs_[A0] = &arg0_;
-  // Initialize registers with easy to spot debug values.
-  sp_ = MipsContext::kBadGprBase + SP;
-  t9_ = MipsContext::kBadGprBase + T9;
-  arg0_ = 0;
-}
-
-void MipsContext::FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& frame_info) {
-  int spill_pos = 0;
-
-  // Core registers come first, from the highest down to the lowest.
-  for (uint32_t core_reg : HighToLowBits(frame_info.CoreSpillMask())) {
-    // If the $ZERO register shows up in the list of registers to
-    // be saved this was only done to properly align the floating
-    // point register save locations to addresses which are
-    // multiples of 8. We only store the address of a register in
-    // gprs_ if the register is not the $ZERO register.  The $ZERO
-    // register is read-only so there's never a reason to save it
-    // on the stack.
-    if (core_reg != 0u) {
-      gprs_[core_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
-    }
-    ++spill_pos;
-  }
-  DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()));
-
-  // FP registers come second, from the highest down to the lowest.
-  for (uint32_t fp_reg : HighToLowBits(frame_info.FpSpillMask())) {
-    fprs_[fp_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
-    ++spill_pos;
-  }
-  DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()) + POPCOUNT(frame_info.FpSpillMask()));
-}
-
-void MipsContext::SetGPR(uint32_t reg, uintptr_t value) {
-  CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
-  DCHECK(IsAccessibleGPR(reg));
-  CHECK_NE(gprs_[reg], &gZero);  // Can't overwrite this static value since they are never reset.
-  *gprs_[reg] = value;
-}
-
-void MipsContext::SetFPR(uint32_t reg, uintptr_t value) {
-  CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFRegisters));
-  DCHECK(IsAccessibleFPR(reg));
-  CHECK_NE(fprs_[reg], &gZero);  // Can't overwrite this static value since they are never reset.
-  *fprs_[reg] = value;
-}
-
-void MipsContext::SmashCallerSaves() {
-  // This needs to be 0 because we want a null/zero return value.
-  gprs_[V0] = const_cast<uint32_t*>(&gZero);
-  gprs_[V1] = const_cast<uint32_t*>(&gZero);
-  gprs_[A1] = nullptr;
-  gprs_[A2] = nullptr;
-  gprs_[A3] = nullptr;
-  gprs_[T0] = nullptr;
-  gprs_[T1] = nullptr;
-
-  fprs_[F8] = nullptr;
-  fprs_[F9] = nullptr;
-  fprs_[F10] = nullptr;
-  fprs_[F11] = nullptr;
-  fprs_[F12] = nullptr;
-  fprs_[F13] = nullptr;
-  fprs_[F14] = nullptr;
-  fprs_[F15] = nullptr;
-  fprs_[F16] = nullptr;
-  fprs_[F17] = nullptr;
-  fprs_[F18] = nullptr;
-  fprs_[F19] = nullptr;
-}
-
-extern "C" NO_RETURN void art_quick_do_long_jump(uint32_t*, uint32_t*);
-
-void MipsContext::DoLongJump() {
-  uintptr_t gprs[kNumberOfCoreRegisters];
-  // Align fprs[] so that art_quick_do_long_jump() can load FPU
-  // registers from it using the ldc1 instruction.
-  uint32_t fprs[kNumberOfFRegisters] __attribute__((aligned(8)));
-  for (size_t i = 0; i < kNumberOfCoreRegisters; ++i) {
-    gprs[i] = gprs_[i] != nullptr ? *gprs_[i] : MipsContext::kBadGprBase + i;
-  }
-  for (size_t i = 0; i < kNumberOfFRegisters; ++i) {
-    fprs[i] = fprs_[i] != nullptr ? *fprs_[i] : MipsContext::kBadFprBase + i;
-  }
-  art_quick_do_long_jump(gprs, fprs);
-}
-
-}  // namespace mips
-}  // namespace art
diff --git a/runtime/arch/mips/context_mips.h b/runtime/arch/mips/context_mips.h
deleted file mode 100644
index 960aea1..0000000
--- a/runtime/arch/mips/context_mips.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS_CONTEXT_MIPS_H_
-#define ART_RUNTIME_ARCH_MIPS_CONTEXT_MIPS_H_
-
-#include <android-base/logging.h>
-
-#include "arch/context.h"
-#include "base/macros.h"
-#include "registers_mips.h"
-
-namespace art {
-namespace mips {
-
-class MipsContext : public Context {
- public:
-  MipsContext() {
-    Reset();
-  }
-  virtual ~MipsContext() {}
-
-  void Reset() override;
-
-  void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
-
-  void SetSP(uintptr_t new_sp) override {
-    SetGPR(SP, new_sp);
-  }
-
-  void SetPC(uintptr_t new_pc) override {
-    SetGPR(T9, new_pc);
-  }
-
-  bool IsAccessibleGPR(uint32_t reg) override {
-    CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
-    return gprs_[reg] != nullptr;
-  }
-
-  uintptr_t* GetGPRAddress(uint32_t reg) override {
-    DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
-    return gprs_[reg];
-  }
-
-  uintptr_t GetGPR(uint32_t reg) override {
-    CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
-    DCHECK(IsAccessibleGPR(reg));
-    return *gprs_[reg];
-  }
-
-  void SetGPR(uint32_t reg, uintptr_t value) override;
-
-  bool IsAccessibleFPR(uint32_t reg) override {
-    CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFRegisters));
-    return fprs_[reg] != nullptr;
-  }
-
-  uintptr_t GetFPR(uint32_t reg) override {
-    CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFRegisters));
-    DCHECK(IsAccessibleFPR(reg));
-    return *fprs_[reg];
-  }
-
-  void SetFPR(uint32_t reg, uintptr_t value) override;
-
-  void SmashCallerSaves() override;
-  NO_RETURN void DoLongJump() override;
-
-  void SetArg0(uintptr_t new_arg0_value) override {
-    SetGPR(A0, new_arg0_value);
-  }
-
- private:
-  // Pointers to registers in the stack, initialized to null except for the special cases below.
-  uintptr_t* gprs_[kNumberOfCoreRegisters];
-  uint32_t* fprs_[kNumberOfFRegisters];
-  // Hold values for sp and t9 if they are not located within a stack frame. We use t9 for the
-  // PC (as ra is required to be valid for single-frame deopt and must not be clobbered). We
-  // also need the first argument for single-frame deopt.
-  uintptr_t sp_, t9_, arg0_;
-};
-}  // namespace mips
-}  // namespace art
-
-#endif  // ART_RUNTIME_ARCH_MIPS_CONTEXT_MIPS_H_
diff --git a/runtime/arch/mips/entrypoints_direct_mips.h b/runtime/arch/mips/entrypoints_direct_mips.h
deleted file mode 100644
index 3a6625f..0000000
--- a/runtime/arch/mips/entrypoints_direct_mips.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS_ENTRYPOINTS_DIRECT_MIPS_H_
-#define ART_RUNTIME_ARCH_MIPS_ENTRYPOINTS_DIRECT_MIPS_H_
-
-#include "entrypoints/quick/quick_entrypoints_enum.h"
-
-namespace art {
-
-/* Returns true if entrypoint contains direct reference to
-   native implementation. The list is required as direct
-   entrypoints need additional handling during invocation.*/
-static constexpr bool IsDirectEntrypoint(QuickEntrypointEnum entrypoint) {
-  return
-      entrypoint == kQuickInstanceofNonTrivial ||
-      entrypoint == kQuickA64Load ||
-      entrypoint == kQuickA64Store ||
-      entrypoint == kQuickFmod ||
-      entrypoint == kQuickFmodf ||
-      entrypoint == kQuickMemcpy ||
-      entrypoint == kQuickL2d ||
-      entrypoint == kQuickL2f ||
-      entrypoint == kQuickD2iz ||
-      entrypoint == kQuickF2iz ||
-      entrypoint == kQuickD2l ||
-      entrypoint == kQuickF2l ||
-      entrypoint == kQuickLdiv ||
-      entrypoint == kQuickLmod ||
-      entrypoint == kQuickLmul ||
-      entrypoint == kQuickCmpgDouble ||
-      entrypoint == kQuickCmpgFloat ||
-      entrypoint == kQuickCmplDouble ||
-      entrypoint == kQuickCmplFloat ||
-      entrypoint == kQuickReadBarrierJni ||
-      entrypoint == kQuickReadBarrierSlow ||
-      entrypoint == kQuickReadBarrierForRootSlow ||
-      entrypoint == kQuickCos ||
-      entrypoint == kQuickSin ||
-      entrypoint == kQuickAcos ||
-      entrypoint == kQuickAsin ||
-      entrypoint == kQuickAtan ||
-      entrypoint == kQuickAtan2 ||
-      entrypoint == kQuickPow ||
-      entrypoint == kQuickCbrt ||
-      entrypoint == kQuickCosh ||
-      entrypoint == kQuickExp ||
-      entrypoint == kQuickExpm1 ||
-      entrypoint == kQuickHypot ||
-      entrypoint == kQuickLog ||
-      entrypoint == kQuickLog10 ||
-      entrypoint == kQuickNextAfter ||
-      entrypoint == kQuickSinh ||
-      entrypoint == kQuickTan ||
-      entrypoint == kQuickTanh;
-}
-
-}  // namespace art
-
-#endif  // ART_RUNTIME_ARCH_MIPS_ENTRYPOINTS_DIRECT_MIPS_H_
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
deleted file mode 100644
index cbf5681..0000000
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ /dev/null
@@ -1,483 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <string.h>
-
-#include "arch/mips/asm_support_mips.h"
-#include "base/atomic.h"
-#include "base/logging.h"
-#include "base/quasi_atomic.h"
-#include "entrypoints/entrypoint_utils.h"
-#include "entrypoints/jni/jni_entrypoints.h"
-#include "entrypoints/math_entrypoints.h"
-#include "entrypoints/quick/quick_alloc_entrypoints.h"
-#include "entrypoints/quick/quick_default_externs.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "entrypoints/runtime_asm_entrypoints.h"
-#include "entrypoints_direct_mips.h"
-#include "interpreter/interpreter.h"
-
-namespace art {
-
-// Cast entrypoints.
-extern "C" size_t artInstanceOfFromCode(mirror::Object* obj, mirror::Class* ref_class);
-
-// Read barrier entrypoints.
-// art_quick_read_barrier_mark_regXX uses a non-standard calling
-// convention: it expects its input in register XX+1 and returns its
-// result in that same register, and saves and restores all
-// caller-save registers.
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg01(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg02(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg03(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg04(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg05(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg06(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg07(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg08(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg09(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg10(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg11(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg12(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg13(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg14(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg17(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg18(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg19(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg20(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg21(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg22(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg29(mirror::Object*);
-
-extern "C" mirror::Object* art_quick_read_barrier_mark_introspection(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_introspection_gc_roots(mirror::Object*);
-extern "C" void art_quick_read_barrier_mark_introspection_end_of_entries(void);
-
-// Math entrypoints.
-extern int32_t CmpgDouble(double a, double b);
-extern int32_t CmplDouble(double a, double b);
-extern int32_t CmpgFloat(float a, float b);
-extern int32_t CmplFloat(float a, float b);
-extern "C" int64_t artLmul(int64_t a, int64_t b);
-extern "C" int64_t artLdiv(int64_t a, int64_t b);
-extern "C" int64_t artLmod(int64_t a, int64_t b);
-
-// Math conversions.
-extern "C" int32_t __fixsfsi(float op1);      // FLOAT_TO_INT
-extern "C" int32_t __fixdfsi(double op1);     // DOUBLE_TO_INT
-extern "C" float __floatdisf(int64_t op1);    // LONG_TO_FLOAT
-extern "C" double __floatdidf(int64_t op1);   // LONG_TO_DOUBLE
-extern "C" int64_t __fixsfdi(float op1);      // FLOAT_TO_LONG
-extern "C" int64_t __fixdfdi(double op1);     // DOUBLE_TO_LONG
-
-// Single-precision FP arithmetics.
-extern "C" float fmodf(float a, float b);      // REM_FLOAT[_2ADDR]
-
-// Double-precision FP arithmetics.
-extern "C" double fmod(double a, double b);     // REM_DOUBLE[_2ADDR]
-
-// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR]
-extern "C" int64_t __divdi3(int64_t, int64_t);
-extern "C" int64_t __moddi3(int64_t, int64_t);
-
-void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_active) {
-  intptr_t introspection_field_array_entries_size =
-      reinterpret_cast<intptr_t>(&art_quick_read_barrier_mark_introspection_gc_roots) -
-      reinterpret_cast<intptr_t>(&art_quick_read_barrier_mark_introspection);
-  static_assert(
-      BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET == 2 *
-          BAKER_MARK_INTROSPECTION_REGISTER_COUNT * BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE,
-      "Expecting equal");
-  DCHECK_EQ(introspection_field_array_entries_size,
-            BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET);
-  intptr_t introspection_gc_root_entries_size =
-      reinterpret_cast<intptr_t>(&art_quick_read_barrier_mark_introspection_end_of_entries) -
-      reinterpret_cast<intptr_t>(&art_quick_read_barrier_mark_introspection_gc_roots);
-  DCHECK_EQ(introspection_gc_root_entries_size,
-            BAKER_MARK_INTROSPECTION_REGISTER_COUNT * BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRY_SIZE);
-  qpoints->pReadBarrierMarkReg00 = is_active ? art_quick_read_barrier_mark_introspection : nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg00),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg01 = is_active ? art_quick_read_barrier_mark_reg01 : nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg01),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg02 = is_active ? art_quick_read_barrier_mark_reg02 : nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg02),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg03 = is_active ? art_quick_read_barrier_mark_reg03 : nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg03),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg04 = is_active ? art_quick_read_barrier_mark_reg04 : nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg04),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg05 = is_active ? art_quick_read_barrier_mark_reg05 : nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg05),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg06 = is_active ? art_quick_read_barrier_mark_reg06 : nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg06),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg07 = is_active ? art_quick_read_barrier_mark_reg07 : nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg07),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg08 = is_active ? art_quick_read_barrier_mark_reg08 : nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg08),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg09 = is_active ? art_quick_read_barrier_mark_reg09 : nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg09),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg10 = is_active ? art_quick_read_barrier_mark_reg10 : nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg10),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg11 = is_active ? art_quick_read_barrier_mark_reg11 : nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg11),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg12 = is_active ? art_quick_read_barrier_mark_reg12 : nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg12),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg13 = is_active ? art_quick_read_barrier_mark_reg13 : nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg13),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg14 = is_active ? art_quick_read_barrier_mark_reg14 : nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg14),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg17 = is_active ? art_quick_read_barrier_mark_reg17 : nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg17),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg18 = is_active ? art_quick_read_barrier_mark_reg18 : nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg18),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg19 = is_active ? art_quick_read_barrier_mark_reg19 : nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg19),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg20 = is_active ? art_quick_read_barrier_mark_reg20 : nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg20),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg21 = is_active ? art_quick_read_barrier_mark_reg21 : nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg21),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg22 = is_active ? art_quick_read_barrier_mark_reg22 : nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg22),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg29 = is_active ? art_quick_read_barrier_mark_reg29 : nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg29),
-                "Non-direct C stub marked direct.");
-}
-
-void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
-  // Note: MIPS has asserts checking for the type of entrypoint. Don't move it
-  //       to InitDefaultEntryPoints().
-
-  // JNI
-  jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
-
-  // Alloc
-  ResetQuickAllocEntryPoints(qpoints, /*is_active=*/ false);
-
-  // Cast
-  qpoints->pInstanceofNonTrivial = artInstanceOfFromCode;
-  static_assert(IsDirectEntrypoint(kQuickInstanceofNonTrivial), "Direct C stub not marked direct.");
-  qpoints->pCheckInstanceOf = art_quick_check_instance_of;
-  static_assert(!IsDirectEntrypoint(kQuickCheckInstanceOf), "Non-direct C stub marked direct.");
-
-  // Resolution and initialization
-  qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
-  static_assert(!IsDirectEntrypoint(kQuickInitializeStaticStorage),
-                "Non-direct C stub marked direct.");
-  qpoints->pResolveTypeAndVerifyAccess = art_quick_resolve_type_and_verify_access;
-  static_assert(!IsDirectEntrypoint(kQuickResolveTypeAndVerifyAccess),
-                "Non-direct C stub marked direct.");
-  qpoints->pResolveType = art_quick_resolve_type;
-  static_assert(!IsDirectEntrypoint(kQuickResolveType), "Non-direct C stub marked direct.");
-  qpoints->pResolveString = art_quick_resolve_string;
-  static_assert(!IsDirectEntrypoint(kQuickResolveString), "Non-direct C stub marked direct.");
-  qpoints->pResolveMethodHandle = art_quick_resolve_method_handle;
-  static_assert(!IsDirectEntrypoint(kQuickResolveMethodHandle), "Non-direct C stub marked direct.");
-  qpoints->pResolveMethodType = art_quick_resolve_method_type;
-  static_assert(!IsDirectEntrypoint(kQuickResolveMethodType), "Non-direct C stub marked direct.");
-
-  // Field
-  qpoints->pSet8Instance = art_quick_set8_instance;
-  static_assert(!IsDirectEntrypoint(kQuickSet8Instance), "Non-direct C stub marked direct.");
-  qpoints->pSet8Static = art_quick_set8_static;
-  static_assert(!IsDirectEntrypoint(kQuickSet8Static), "Non-direct C stub marked direct.");
-  qpoints->pSet16Instance = art_quick_set16_instance;
-  static_assert(!IsDirectEntrypoint(kQuickSet16Instance), "Non-direct C stub marked direct.");
-  qpoints->pSet16Static = art_quick_set16_static;
-  static_assert(!IsDirectEntrypoint(kQuickSet16Static), "Non-direct C stub marked direct.");
-  qpoints->pSet32Instance = art_quick_set32_instance;
-  static_assert(!IsDirectEntrypoint(kQuickSet32Instance), "Non-direct C stub marked direct.");
-  qpoints->pSet32Static = art_quick_set32_static;
-  static_assert(!IsDirectEntrypoint(kQuickSet32Static), "Non-direct C stub marked direct.");
-  qpoints->pSet64Instance = art_quick_set64_instance;
-  static_assert(!IsDirectEntrypoint(kQuickSet64Instance), "Non-direct C stub marked direct.");
-  qpoints->pSet64Static = art_quick_set64_static;
-  static_assert(!IsDirectEntrypoint(kQuickSet64Static), "Non-direct C stub marked direct.");
-  qpoints->pSetObjInstance = art_quick_set_obj_instance;
-  static_assert(!IsDirectEntrypoint(kQuickSetObjInstance), "Non-direct C stub marked direct.");
-  qpoints->pSetObjStatic = art_quick_set_obj_static;
-  static_assert(!IsDirectEntrypoint(kQuickSetObjStatic), "Non-direct C stub marked direct.");
-  qpoints->pGetBooleanInstance = art_quick_get_boolean_instance;
-  static_assert(!IsDirectEntrypoint(kQuickGetBooleanInstance), "Non-direct C stub marked direct.");
-  qpoints->pGetByteInstance = art_quick_get_byte_instance;
-  static_assert(!IsDirectEntrypoint(kQuickGetByteInstance), "Non-direct C stub marked direct.");
-  qpoints->pGetCharInstance = art_quick_get_char_instance;
-  static_assert(!IsDirectEntrypoint(kQuickGetCharInstance), "Non-direct C stub marked direct.");
-  qpoints->pGetShortInstance = art_quick_get_short_instance;
-  static_assert(!IsDirectEntrypoint(kQuickGetShortInstance), "Non-direct C stub marked direct.");
-  qpoints->pGet32Instance = art_quick_get32_instance;
-  static_assert(!IsDirectEntrypoint(kQuickGet32Instance), "Non-direct C stub marked direct.");
-  qpoints->pGet64Instance = art_quick_get64_instance;
-  static_assert(!IsDirectEntrypoint(kQuickGet64Instance), "Non-direct C stub marked direct.");
-  qpoints->pGetObjInstance = art_quick_get_obj_instance;
-  static_assert(!IsDirectEntrypoint(kQuickGetObjInstance), "Non-direct C stub marked direct.");
-  qpoints->pGetBooleanStatic = art_quick_get_boolean_static;
-  static_assert(!IsDirectEntrypoint(kQuickGetBooleanStatic), "Non-direct C stub marked direct.");
-  qpoints->pGetByteStatic = art_quick_get_byte_static;
-  static_assert(!IsDirectEntrypoint(kQuickGetByteStatic), "Non-direct C stub marked direct.");
-  qpoints->pGetCharStatic = art_quick_get_char_static;
-  static_assert(!IsDirectEntrypoint(kQuickGetCharStatic), "Non-direct C stub marked direct.");
-  qpoints->pGetShortStatic = art_quick_get_short_static;
-  static_assert(!IsDirectEntrypoint(kQuickGetShortStatic), "Non-direct C stub marked direct.");
-  qpoints->pGet32Static = art_quick_get32_static;
-  static_assert(!IsDirectEntrypoint(kQuickGet32Static), "Non-direct C stub marked direct.");
-  qpoints->pGet64Static = art_quick_get64_static;
-  static_assert(!IsDirectEntrypoint(kQuickGet64Static), "Non-direct C stub marked direct.");
-  qpoints->pGetObjStatic = art_quick_get_obj_static;
-  static_assert(!IsDirectEntrypoint(kQuickGetObjStatic), "Non-direct C stub marked direct.");
-
-  // Array
-  qpoints->pAputObject = art_quick_aput_obj;
-  static_assert(!IsDirectEntrypoint(kQuickAputObject), "Non-direct C stub marked direct.");
-
-  // JNI
-  qpoints->pJniMethodStart = JniMethodStart;
-  static_assert(!IsDirectEntrypoint(kQuickJniMethodStart), "Non-direct C stub marked direct.");
-  qpoints->pJniMethodFastStart = JniMethodFastStart;
-  static_assert(!IsDirectEntrypoint(kQuickJniMethodFastStart), "Non-direct C stub marked direct.");
-  qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized;
-  static_assert(!IsDirectEntrypoint(kQuickJniMethodStartSynchronized),
-                "Non-direct C stub marked direct.");
-  qpoints->pJniMethodEnd = JniMethodEnd;
-  static_assert(!IsDirectEntrypoint(kQuickJniMethodEnd), "Non-direct C stub marked direct.");
-  qpoints->pJniMethodFastEnd = JniMethodFastEnd;
-  static_assert(!IsDirectEntrypoint(kQuickJniMethodFastEnd), "Non-direct C stub marked direct.");
-  qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized;
-  static_assert(!IsDirectEntrypoint(kQuickJniMethodEndSynchronized),
-                "Non-direct C stub marked direct.");
-  qpoints->pJniMethodEndWithReference = JniMethodEndWithReference;
-  static_assert(!IsDirectEntrypoint(kQuickJniMethodEndWithReference),
-                "Non-direct C stub marked direct.");
-  qpoints->pJniMethodFastEndWithReference = JniMethodFastEndWithReference;
-  static_assert(!IsDirectEntrypoint(kQuickJniMethodFastEndWithReference),
-                "Non-direct C stub marked direct.");
-  qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
-  static_assert(!IsDirectEntrypoint(kQuickJniMethodEndWithReferenceSynchronized),
-                "Non-direct C stub marked direct.");
-  qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline;
-  static_assert(!IsDirectEntrypoint(kQuickQuickGenericJniTrampoline),
-                "Non-direct C stub marked direct.");
-
-  // Locks
-  if (UNLIKELY(VLOG_IS_ON(systrace_lock_logging))) {
-    qpoints->pLockObject = art_quick_lock_object_no_inline;
-    qpoints->pUnlockObject = art_quick_unlock_object_no_inline;
-  } else {
-    qpoints->pLockObject = art_quick_lock_object;
-    qpoints->pUnlockObject = art_quick_unlock_object;
-  }
-  static_assert(!IsDirectEntrypoint(kQuickLockObject), "Non-direct C stub marked direct.");
-  static_assert(!IsDirectEntrypoint(kQuickUnlockObject), "Non-direct C stub marked direct.");
-
-  // Math
-  qpoints->pCmpgDouble = CmpgDouble;
-  static_assert(IsDirectEntrypoint(kQuickCmpgDouble), "Direct C stub not marked direct.");
-  qpoints->pCmpgFloat = CmpgFloat;
-  static_assert(IsDirectEntrypoint(kQuickCmpgFloat), "Direct C stub not marked direct.");
-  qpoints->pCmplDouble = CmplDouble;
-  static_assert(IsDirectEntrypoint(kQuickCmplDouble), "Direct C stub not marked direct.");
-  qpoints->pCmplFloat = CmplFloat;
-  static_assert(IsDirectEntrypoint(kQuickCmplFloat), "Direct C stub not marked direct.");
-  qpoints->pFmod = fmod;
-  static_assert(IsDirectEntrypoint(kQuickFmod), "Direct C stub not marked direct.");
-  qpoints->pL2d = art_l2d;
-  static_assert(IsDirectEntrypoint(kQuickL2d), "Direct C stub not marked direct.");
-  qpoints->pFmodf = fmodf;
-  static_assert(IsDirectEntrypoint(kQuickFmodf), "Direct C stub not marked direct.");
-  qpoints->pL2f = art_l2f;
-  static_assert(IsDirectEntrypoint(kQuickL2f), "Direct C stub not marked direct.");
-  qpoints->pD2iz = art_d2i;
-  static_assert(IsDirectEntrypoint(kQuickD2iz), "Direct C stub not marked direct.");
-  qpoints->pF2iz = art_f2i;
-  static_assert(IsDirectEntrypoint(kQuickF2iz), "Direct C stub not marked direct.");
-  qpoints->pIdivmod = nullptr;
-  qpoints->pD2l = art_d2l;
-  static_assert(IsDirectEntrypoint(kQuickD2l), "Direct C stub not marked direct.");
-  qpoints->pF2l = art_f2l;
-  static_assert(IsDirectEntrypoint(kQuickF2l), "Direct C stub not marked direct.");
-  qpoints->pLdiv = artLdiv;
-  static_assert(IsDirectEntrypoint(kQuickLdiv), "Direct C stub not marked direct.");
-  qpoints->pLmod = artLmod;
-  static_assert(IsDirectEntrypoint(kQuickLmod), "Direct C stub not marked direct.");
-  qpoints->pLmul = artLmul;
-  static_assert(IsDirectEntrypoint(kQuickLmul), "Direct C stub not marked direct.");
-  qpoints->pShlLong = art_quick_shl_long;
-  static_assert(!IsDirectEntrypoint(kQuickShlLong), "Non-direct C stub marked direct.");
-  qpoints->pShrLong = art_quick_shr_long;
-  static_assert(!IsDirectEntrypoint(kQuickShrLong), "Non-direct C stub marked direct.");
-  qpoints->pUshrLong = art_quick_ushr_long;
-  static_assert(!IsDirectEntrypoint(kQuickUshrLong), "Non-direct C stub marked direct.");
-
-  // More math.
-  qpoints->pCos = cos;
-  static_assert(IsDirectEntrypoint(kQuickCos), "Direct C stub marked non-direct.");
-  qpoints->pSin = sin;
-  static_assert(IsDirectEntrypoint(kQuickSin), "Direct C stub marked non-direct.");
-  qpoints->pAcos = acos;
-  static_assert(IsDirectEntrypoint(kQuickAcos), "Direct C stub marked non-direct.");
-  qpoints->pAsin = asin;
-  static_assert(IsDirectEntrypoint(kQuickAsin), "Direct C stub marked non-direct.");
-  qpoints->pAtan = atan;
-  static_assert(IsDirectEntrypoint(kQuickAtan), "Direct C stub marked non-direct.");
-  qpoints->pAtan2 = atan2;
-  static_assert(IsDirectEntrypoint(kQuickAtan2), "Direct C stub marked non-direct.");
-  qpoints->pPow = pow;
-  static_assert(IsDirectEntrypoint(kQuickPow), "Direct C stub marked non-direct.");
-  qpoints->pCbrt = cbrt;
-  static_assert(IsDirectEntrypoint(kQuickCbrt), "Direct C stub marked non-direct.");
-  qpoints->pCosh = cosh;
-  static_assert(IsDirectEntrypoint(kQuickCosh), "Direct C stub marked non-direct.");
-  qpoints->pExp = exp;
-  static_assert(IsDirectEntrypoint(kQuickExp), "Direct C stub marked non-direct.");
-  qpoints->pExpm1 = expm1;
-  static_assert(IsDirectEntrypoint(kQuickExpm1), "Direct C stub marked non-direct.");
-  qpoints->pHypot = hypot;
-  static_assert(IsDirectEntrypoint(kQuickHypot), "Direct C stub marked non-direct.");
-  qpoints->pLog = log;
-  static_assert(IsDirectEntrypoint(kQuickLog), "Direct C stub marked non-direct.");
-  qpoints->pLog10 = log10;
-  static_assert(IsDirectEntrypoint(kQuickLog10), "Direct C stub marked non-direct.");
-  qpoints->pNextAfter = nextafter;
-  static_assert(IsDirectEntrypoint(kQuickNextAfter), "Direct C stub marked non-direct.");
-  qpoints->pSinh = sinh;
-  static_assert(IsDirectEntrypoint(kQuickSinh), "Direct C stub marked non-direct.");
-  qpoints->pTan = tan;
-  static_assert(IsDirectEntrypoint(kQuickTan), "Direct C stub marked non-direct.");
-  qpoints->pTanh = tanh;
-  static_assert(IsDirectEntrypoint(kQuickTanh), "Direct C stub marked non-direct.");
-
-  // Intrinsics
-  qpoints->pIndexOf = art_quick_indexof;
-  static_assert(!IsDirectEntrypoint(kQuickIndexOf), "Non-direct C stub marked direct.");
-  qpoints->pStringCompareTo = art_quick_string_compareto;
-  static_assert(!IsDirectEntrypoint(kQuickStringCompareTo), "Non-direct C stub marked direct.");
-  qpoints->pMemcpy = memcpy;
-
-  // Invocation
-  qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline;
-  qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
-  qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
-  qpoints->pInvokeDirectTrampolineWithAccessCheck =
-      art_quick_invoke_direct_trampoline_with_access_check;
-  static_assert(!IsDirectEntrypoint(kQuickInvokeDirectTrampolineWithAccessCheck),
-                "Non-direct C stub marked direct.");
-  qpoints->pInvokeInterfaceTrampolineWithAccessCheck =
-      art_quick_invoke_interface_trampoline_with_access_check;
-  static_assert(!IsDirectEntrypoint(kQuickInvokeInterfaceTrampolineWithAccessCheck),
-                "Non-direct C stub marked direct.");
-  qpoints->pInvokeStaticTrampolineWithAccessCheck =
-      art_quick_invoke_static_trampoline_with_access_check;
-  static_assert(!IsDirectEntrypoint(kQuickInvokeStaticTrampolineWithAccessCheck),
-                "Non-direct C stub marked direct.");
-  qpoints->pInvokeSuperTrampolineWithAccessCheck =
-      art_quick_invoke_super_trampoline_with_access_check;
-  static_assert(!IsDirectEntrypoint(kQuickInvokeSuperTrampolineWithAccessCheck),
-                "Non-direct C stub marked direct.");
-  qpoints->pInvokeVirtualTrampolineWithAccessCheck =
-      art_quick_invoke_virtual_trampoline_with_access_check;
-  static_assert(!IsDirectEntrypoint(kQuickInvokeVirtualTrampolineWithAccessCheck),
-                "Non-direct C stub marked direct.");
-  qpoints->pInvokePolymorphic = art_quick_invoke_polymorphic;
-  static_assert(!IsDirectEntrypoint(kQuickInvokePolymorphic), "Non-direct C stub marked direct.");
-  qpoints->pInvokeCustom = art_quick_invoke_custom;
-  static_assert(!IsDirectEntrypoint(kQuickInvokeCustom), "Non-direct C stub marked direct.");
-
-  // Thread
-  qpoints->pTestSuspend = art_quick_test_suspend;
-  static_assert(!IsDirectEntrypoint(kQuickTestSuspend), "Non-direct C stub marked direct.");
-
-  // Throws
-  qpoints->pDeliverException = art_quick_deliver_exception;
-  static_assert(!IsDirectEntrypoint(kQuickDeliverException), "Non-direct C stub marked direct.");
-  qpoints->pThrowArrayBounds = art_quick_throw_array_bounds;
-  static_assert(!IsDirectEntrypoint(kQuickThrowArrayBounds), "Non-direct C stub marked direct.");
-  qpoints->pThrowDivZero = art_quick_throw_div_zero;
-  static_assert(!IsDirectEntrypoint(kQuickThrowDivZero), "Non-direct C stub marked direct.");
-  qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
-  static_assert(!IsDirectEntrypoint(kQuickThrowNullPointer), "Non-direct C stub marked direct.");
-  qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
-  static_assert(!IsDirectEntrypoint(kQuickThrowStackOverflow), "Non-direct C stub marked direct.");
-  qpoints->pThrowStringBounds = art_quick_throw_string_bounds;
-  static_assert(!IsDirectEntrypoint(kQuickThrowStringBounds), "Non-direct C stub marked direct.");
-
-  // Deoptimization from compiled code.
-  qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code;
-  static_assert(!IsDirectEntrypoint(kQuickDeoptimize), "Non-direct C stub marked direct.");
-
-  // Atomic 64-bit load/store
-  qpoints->pA64Load = QuasiAtomic::Read64;
-  static_assert(IsDirectEntrypoint(kQuickA64Load), "Non-direct C stub marked direct.");
-  qpoints->pA64Store = QuasiAtomic::Write64;
-  static_assert(IsDirectEntrypoint(kQuickA64Store), "Non-direct C stub marked direct.");
-
-  // Read barrier.
-  qpoints->pReadBarrierJni = ReadBarrierJni;
-  static_assert(IsDirectEntrypoint(kQuickReadBarrierJni), "Direct C stub not marked direct.");
-  UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
-  // Cannot use the following registers to pass arguments:
-  // 0(ZERO), 1(AT), 16(S0), 17(S1), 24(T8), 25(T9), 26(K0), 27(K1), 28(GP), 29(SP), 31(RA).
-  // Note that there are 30 entry points only: 00 for register 1(AT), ..., 29 for register 30(S8).
-  qpoints->pReadBarrierMarkReg15 = nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg15),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg16 = nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg16),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg23 = nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg23),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg24 = nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg24),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg25 = nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg25),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg26 = nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg26),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg27 = nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg27),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierMarkReg28 = nullptr;
-  static_assert(!IsDirectEntrypoint(kQuickReadBarrierMarkReg28),
-                "Non-direct C stub marked direct.");
-  qpoints->pReadBarrierSlow = artReadBarrierSlow;
-  static_assert(IsDirectEntrypoint(kQuickReadBarrierSlow), "Direct C stub not marked direct.");
-  qpoints->pReadBarrierForRootSlow = artReadBarrierForRootSlow;
-  static_assert(IsDirectEntrypoint(kQuickReadBarrierForRootSlow),
-                "Direct C stub not marked direct.");
-}
-
-}  // namespace art
diff --git a/runtime/arch/mips/fault_handler_mips.cc b/runtime/arch/mips/fault_handler_mips.cc
deleted file mode 100644
index 0354f0c..0000000
--- a/runtime/arch/mips/fault_handler_mips.cc
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <sys/ucontext.h>
-#include "fault_handler.h"
-
-#include "arch/instruction_set.h"
-#include "arch/mips/callee_save_frame_mips.h"
-#include "art_method.h"
-#include "base/callee_save_type.h"
-#include "base/hex_dump.h"
-#include "base/logging.h"  // For VLOG.
-#include "base/macros.h"
-#include "registers_mips.h"
-#include "runtime_globals.h"
-#include "thread-current-inl.h"
-
-extern "C" void art_quick_throw_stack_overflow();
-extern "C" void art_quick_throw_null_pointer_exception_from_signal();
-
-//
-// Mips specific fault handler functions.
-//
-
-namespace art {
-
-void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
-                                             ArtMethod** out_method,
-                                             uintptr_t* out_return_pc, uintptr_t* out_sp) {
-  struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
-  struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
-  *out_sp = static_cast<uintptr_t>(sc->sc_regs[mips::SP]);
-  VLOG(signals) << "sp: " << *out_sp;
-  if (*out_sp == 0) {
-    return;
-  }
-
-  // In the case of a stack overflow, the stack is not valid and we can't
-  // get the method from the top of the stack.  However it's in r0.
-  uintptr_t* fault_addr = reinterpret_cast<uintptr_t*>(siginfo->si_addr);  // BVA addr
-  uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
-      reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(InstructionSet::kMips));
-  if (overflow_addr == fault_addr) {
-    *out_method = reinterpret_cast<ArtMethod*>(sc->sc_regs[mips::A0]);
-  } else {
-    // The method is at the top of the stack.
-    *out_method = *reinterpret_cast<ArtMethod**>(*out_sp);
-  }
-
-  // Work out the return PC.  This will be the address of the instruction
-  // following the faulting ldr/str instruction.
-
-  VLOG(signals) << "pc: " << std::hex
-      << static_cast<void*>(reinterpret_cast<uint8_t*>(sc->sc_pc));
-
-  *out_return_pc = sc->sc_pc + 4;
-}
-
-bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, void* context) {
-  if (!IsValidImplicitCheck(info)) {
-    return false;
-  }
-  // The code that looks for the catch location needs to know the value of the
-  // PC at the point of call.  For Null checks we insert a GC map that is immediately after
-  // the load/store instruction that might cause the fault.
-
-  struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
-  struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
-
-  // Decrement $sp by the frame size of the kSaveEverything method and store
-  // the fault address in the padding right after the ArtMethod*.
-  sc->sc_regs[mips::SP] -= mips::MipsCalleeSaveFrameSize(CalleeSaveType::kSaveEverything);
-  uintptr_t* padding = reinterpret_cast<uintptr_t*>(sc->sc_regs[mips::SP]) + /* ArtMethod* */ 1;
-  *padding = reinterpret_cast<uintptr_t>(info->si_addr);
-
-  sc->sc_regs[mips::RA] = sc->sc_pc + 4;      // RA needs to point to gc map location
-  sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception_from_signal);
-  // Note: This entrypoint does not rely on T9 pointing to it, so we may as well preserve T9.
-  VLOG(signals) << "Generating null pointer exception";
-  return true;
-}
-
-bool SuspensionHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
-                               void* context ATTRIBUTE_UNUSED) {
-  return false;
-}
-
-// Stack overflow fault handler.
-//
-// This checks that the fault address is equal to the current stack pointer
-// minus the overflow region size (16K typically). The instruction that
-// generates this signal is:
-//
-// lw zero, -16384(sp)
-//
-// It will fault if sp is inside the protected region on the stack.
-//
-// If we determine this is a stack overflow we need to move the stack pointer
-// to the overflow region below the protected region.
-
-bool StackOverflowHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, void* context) {
-  struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
-  struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
-  VLOG(signals) << "stack overflow handler with sp at " << std::hex << &uc;
-  VLOG(signals) << "sigcontext: " << std::hex << sc;
-
-  uintptr_t sp = sc->sc_regs[mips::SP];
-  VLOG(signals) << "sp: " << std::hex << sp;
-
-  uintptr_t fault_addr = reinterpret_cast<uintptr_t>(info->si_addr);  // BVA addr
-  VLOG(signals) << "fault_addr: " << std::hex << fault_addr;
-  VLOG(signals) << "checking for stack overflow, sp: " << std::hex << sp <<
-    ", fault_addr: " << fault_addr;
-
-  uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(InstructionSet::kMips);
-
-  // Check that the fault address is the value expected for a stack overflow.
-  if (fault_addr != overflow_addr) {
-    VLOG(signals) << "Not a stack overflow";
-    return false;
-  }
-
-  VLOG(signals) << "Stack overflow found";
-
-  // Now arrange for the signal handler to return to art_quick_throw_stack_overflow_from.
-  // The value of RA must be the same as it was when we entered the code that
-  // caused this fault.  This will be inserted into a callee save frame by
-  // the function to which this handler returns (art_quick_throw_stack_overflow).
-  sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_stack_overflow);
-  sc->sc_regs[mips::T9] = sc->sc_pc;          // make sure T9 points to the function
-
-  // The kernel will now return to the address in sc->arm_pc.
-  return true;
-}
-}       // namespace art
diff --git a/runtime/arch/mips/instruction_set_features_mips.cc b/runtime/arch/mips/instruction_set_features_mips.cc
deleted file mode 100644
index 99ce536..0000000
--- a/runtime/arch/mips/instruction_set_features_mips.cc
+++ /dev/null
@@ -1,245 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "instruction_set_features_mips.h"
-
-#include <fstream>
-#include <sstream>
-
-#include <android-base/stringprintf.h>
-#include <android-base/strings.h>
-
-#include "base/stl_util.h"
-
-namespace art {
-
-using android::base::StringPrintf;
-
-// An enum for the Mips revision.
-enum class MipsLevel {
-  kBase,
-  kR2,
-  kR5,
-  kR6
-};
-
-#if defined(_MIPS_ARCH_MIPS32R6)
-static constexpr MipsLevel kRuntimeMipsLevel = MipsLevel::kR6;
-#elif defined(_MIPS_ARCH_MIPS32R5)
-static constexpr MipsLevel kRuntimeMipsLevel = MipsLevel::kR5;
-#elif defined(_MIPS_ARCH_MIPS32R2)
-static constexpr MipsLevel kRuntimeMipsLevel = MipsLevel::kR2;
-#else
-static constexpr MipsLevel kRuntimeMipsLevel = MipsLevel::kBase;
-#endif
-
-static void GetFlagsFromCppDefined(bool* mips_isa_gte2, bool* r6, bool* fpu_32bit, bool* msa) {
-  // Override defaults based on compiler flags.
-  if (kRuntimeMipsLevel >= MipsLevel::kR2) {
-    *mips_isa_gte2 = true;
-  } else {
-    *mips_isa_gte2 = false;
-  }
-
-  if (kRuntimeMipsLevel >= MipsLevel::kR5) {
-    *fpu_32bit = false;
-    *msa = true;
-  } else {
-    *fpu_32bit = true;
-    *msa = false;
-  }
-
-  if (kRuntimeMipsLevel >= MipsLevel::kR6) {
-    *r6 = true;
-  } else {
-    *r6 = false;
-  }
-}
-
-MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromVariant(
-    const std::string& variant, std::string* error_msg ATTRIBUTE_UNUSED) {
-
-  // Override defaults based on compiler flags.
-  // This is needed when running ART test where the variant is not defined.
-  bool fpu_32bit;
-  bool mips_isa_gte2;
-  bool r6;
-  bool msa;
-  GetFlagsFromCppDefined(&mips_isa_gte2, &r6, &fpu_32bit, &msa);
-
-  // Override defaults based on variant string.
-  // Only care if it is R1, R2, R5 or R6 and we assume all CPUs will have a FP unit.
-  constexpr const char* kMips32Prefix = "mips32r";
-  const size_t kPrefixLength = strlen(kMips32Prefix);
-  if (variant.compare(0, kPrefixLength, kMips32Prefix, kPrefixLength) == 0 &&
-      variant.size() > kPrefixLength) {
-    r6 = (variant[kPrefixLength] >= '6');
-    fpu_32bit = (variant[kPrefixLength] < '5');
-    mips_isa_gte2 = (variant[kPrefixLength] >= '2');
-    msa = (variant[kPrefixLength] >= '5');
-  } else if (variant == "default") {
-    // Default variant has FPU, is gte2. This is the traditional setting.
-    //
-    // Note, we get FPU bitness and R6-ness from the build (using cpp defines, see above)
-    // and don't override them because many things depend on the "default" variant being
-    // sufficient for most purposes. That is, "default" should work for both R2 and R6.
-    // Use "mips32r#" to get a specific configuration, possibly not matching the runtime
-    // ISA (e.g. for ISA-specific testing of dex2oat internals).
-    mips_isa_gte2 = true;
-  } else {
-    LOG(WARNING) << "Unexpected CPU variant for Mips32 using defaults: " << variant;
-  }
-
-  return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(fpu_32bit, mips_isa_gte2, r6, msa));
-}
-
-MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromBitmap(uint32_t bitmap) {
-  bool fpu_32bit = (bitmap & kFpu32Bitfield) != 0;
-  bool mips_isa_gte2 = (bitmap & kIsaRevGte2Bitfield) != 0;
-  bool r6 = (bitmap & kR6) != 0;
-  bool msa = (bitmap & kMsaBitfield) != 0;
-  return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(fpu_32bit, mips_isa_gte2, r6, msa));
-}
-
-MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromCppDefines() {
-  bool fpu_32bit;
-  bool mips_isa_gte2;
-  bool r6;
-  bool msa;
-  GetFlagsFromCppDefined(&mips_isa_gte2, &r6, &fpu_32bit, &msa);
-
-  return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(fpu_32bit, mips_isa_gte2, r6, msa));
-}
-
-MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromCpuInfo() {
-  bool fpu_32bit;
-  bool mips_isa_gte2;
-  bool r6;
-  bool msa;
-  GetFlagsFromCppDefined(&mips_isa_gte2, &r6, &fpu_32bit, &msa);
-
-  msa = false;
-
-  std::ifstream in("/proc/cpuinfo");
-  if (!in.fail()) {
-    while (!in.eof()) {
-      std::string line;
-      std::getline(in, line);
-      if (!in.eof()) {
-        LOG(INFO) << "cpuinfo line: " << line;
-        if (line.find("ASEs") != std::string::npos) {
-          LOG(INFO) << "found Application Specific Extensions";
-          if (line.find("msa") != std::string::npos) {
-            msa = true;
-          }
-        }
-      }
-    }
-    in.close();
-  } else {
-    LOG(ERROR) << "Failed to open /proc/cpuinfo";
-  }
-
-  return MipsFeaturesUniquePtr(new MipsInstructionSetFeatures(fpu_32bit, mips_isa_gte2, r6, msa));
-}
-
-MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromHwcap() {
-  UNIMPLEMENTED(WARNING);
-  return FromCppDefines();
-}
-
-MipsFeaturesUniquePtr MipsInstructionSetFeatures::FromAssembly() {
-  UNIMPLEMENTED(WARNING);
-  return FromCppDefines();
-}
-
-bool MipsInstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
-  if (InstructionSet::kMips != other->GetInstructionSet()) {
-    return false;
-  }
-  const MipsInstructionSetFeatures* other_as_mips = other->AsMipsInstructionSetFeatures();
-  return (fpu_32bit_ == other_as_mips->fpu_32bit_) &&
-      (mips_isa_gte2_ == other_as_mips->mips_isa_gte2_) &&
-      (r6_ == other_as_mips->r6_) &&
-      (msa_ == other_as_mips->msa_);
-}
-
-uint32_t MipsInstructionSetFeatures::AsBitmap() const {
-  return (fpu_32bit_ ? kFpu32Bitfield : 0) |
-      (mips_isa_gte2_ ? kIsaRevGte2Bitfield : 0) |
-      (r6_ ? kR6 : 0) |
-      (msa_ ? kMsaBitfield : 0);
-}
-
-std::string MipsInstructionSetFeatures::GetFeatureString() const {
-  std::string result;
-  if (fpu_32bit_) {
-    result += "fpu32";
-  } else {
-    result += "-fpu32";
-  }
-  if (mips_isa_gte2_) {
-    result += ",mips2";
-  } else {
-    result += ",-mips2";
-  }
-  if (r6_) {
-    result += ",r6";
-  }  // Suppress non-r6.
-  if (msa_) {
-    result += ",msa";
-  } else {
-    result += ",-msa";
-  }
-  return result;
-}
-
-std::unique_ptr<const InstructionSetFeatures>
-MipsInstructionSetFeatures::AddFeaturesFromSplitString(
-    const std::vector<std::string>& features, std::string* error_msg) const {
-  bool fpu_32bit = fpu_32bit_;
-  bool mips_isa_gte2 = mips_isa_gte2_;
-  bool r6 = r6_;
-  bool msa = msa_;
-  for (const std::string& feature : features) {
-    DCHECK_EQ(android::base::Trim(feature), feature)
-        << "Feature name is not trimmed: '" << feature << "'";
-    if (feature == "fpu32") {
-      fpu_32bit = true;
-    } else if (feature == "-fpu32") {
-      fpu_32bit = false;
-    } else if (feature == "mips2") {
-      mips_isa_gte2 = true;
-    } else if (feature == "-mips2") {
-      mips_isa_gte2 = false;
-    } else if (feature == "r6") {
-      r6 = true;
-    } else if (feature == "-r6") {
-      r6 = false;
-    } else if (feature == "msa") {
-      msa = true;
-    } else if (feature == "-msa") {
-      msa = false;
-    } else {
-      *error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str());
-      return nullptr;
-    }
-  }
-  return std::unique_ptr<const InstructionSetFeatures>(
-      new MipsInstructionSetFeatures(fpu_32bit, mips_isa_gte2, r6, msa));
-}
-
-}  // namespace art
diff --git a/runtime/arch/mips/instruction_set_features_mips.h b/runtime/arch/mips/instruction_set_features_mips.h
deleted file mode 100644
index ab5bb3c..0000000
--- a/runtime/arch/mips/instruction_set_features_mips.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS_INSTRUCTION_SET_FEATURES_MIPS_H_
-#define ART_RUNTIME_ARCH_MIPS_INSTRUCTION_SET_FEATURES_MIPS_H_
-
-#include <android-base/logging.h>
-
-#include "arch/instruction_set_features.h"
-#include "base/macros.h"
-
-namespace art {
-
-class MipsInstructionSetFeatures;
-using MipsFeaturesUniquePtr = std::unique_ptr<const MipsInstructionSetFeatures>;
-
-// Instruction set features relevant to the MIPS architecture.
-class MipsInstructionSetFeatures final : public InstructionSetFeatures {
- public:
-  // Process a CPU variant string like "r4000" and create InstructionSetFeatures.
-  static MipsFeaturesUniquePtr FromVariant(const std::string& variant, std::string* error_msg);
-
-  // Parse a bitmap and create an InstructionSetFeatures.
-  static MipsFeaturesUniquePtr FromBitmap(uint32_t bitmap);
-
-  // Turn C pre-processor #defines into the equivalent instruction set features.
-  static MipsFeaturesUniquePtr FromCppDefines();
-
-  // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
-  static MipsFeaturesUniquePtr FromCpuInfo();
-
-  // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
-  // InstructionSetFeatures.
-  static MipsFeaturesUniquePtr FromHwcap();
-
-  // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
-  // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
-  static MipsFeaturesUniquePtr FromAssembly();
-
-  bool Equals(const InstructionSetFeatures* other) const override;
-
-  InstructionSet GetInstructionSet() const override {
-    return InstructionSet::kMips;
-  }
-
-  uint32_t AsBitmap() const override;
-
-  std::string GetFeatureString() const override;
-
-  // Is this an ISA revision greater than 2 opening up new opcodes.
-  bool IsMipsIsaRevGreaterThanEqual2() const {
-    return mips_isa_gte2_;
-  }
-
-  // Floating point double registers are encoded differently based on whether the Status.FR bit is
-  // set. When the FR bit is 0 then the FPU is 32-bit, 1 its 64-bit. Return true if the code should
-  // be generated assuming Status.FR is 0.
-  bool Is32BitFloatingPoint() const {
-    return fpu_32bit_;
-  }
-
-  bool IsR6() const {
-    return r6_;
-  }
-
-  // Does it have MSA (MIPS SIMD Architecture) support.
-  bool HasMsa() const {
-    return msa_;
-  }
-
-  virtual ~MipsInstructionSetFeatures() {}
-
- protected:
-  // Parse a vector of the form "fpu32", "mips2" adding these to a new MipsInstructionSetFeatures.
-  std::unique_ptr<const InstructionSetFeatures>
-      AddFeaturesFromSplitString(const std::vector<std::string>& features,
-                                 std::string* error_msg) const override;
-
- private:
-  MipsInstructionSetFeatures(bool fpu_32bit, bool mips_isa_gte2, bool r6, bool msa)
-      : InstructionSetFeatures(),
-        fpu_32bit_(fpu_32bit),
-        mips_isa_gte2_(mips_isa_gte2),
-        r6_(r6),
-        msa_(msa) {
-    // Sanity checks.
-    if (r6) {
-      CHECK(mips_isa_gte2);
-      CHECK(!fpu_32bit);
-    }
-    if (!mips_isa_gte2) {
-      CHECK(fpu_32bit);
-    }
-  }
-
-  // Bitmap positions for encoding features as a bitmap.
-  enum {
-    kFpu32Bitfield = 1 << 0,
-    kIsaRevGte2Bitfield = 1 << 1,
-    kR6 = 1 << 2,
-    kMsaBitfield = 1 << 3,
-  };
-
-  const bool fpu_32bit_;
-  const bool mips_isa_gte2_;
-  const bool r6_;
-  const bool msa_;
-
-  DISALLOW_COPY_AND_ASSIGN(MipsInstructionSetFeatures);
-};
-
-}  // namespace art
-
-#endif  // ART_RUNTIME_ARCH_MIPS_INSTRUCTION_SET_FEATURES_MIPS_H_
diff --git a/runtime/arch/mips/instruction_set_features_mips_test.cc b/runtime/arch/mips/instruction_set_features_mips_test.cc
deleted file mode 100644
index b7de952..0000000
--- a/runtime/arch/mips/instruction_set_features_mips_test.cc
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "instruction_set_features_mips.h"
-
-#include <gtest/gtest.h>
-
-namespace art {
-
-TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromDefaultVariant) {
-  std::string error_msg;
-  std::unique_ptr<const InstructionSetFeatures> mips_features(
-      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "default", &error_msg));
-  ASSERT_TRUE(mips_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(mips_features->GetInstructionSet(), InstructionSet::kMips);
-  EXPECT_TRUE(mips_features->Equals(mips_features.get()));
-  EXPECT_STREQ("fpu32,mips2,-msa", mips_features->GetFeatureString().c_str());
-  EXPECT_EQ(mips_features->AsBitmap(), 3U);
-}
-
-TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromR1Variant) {
-  std::string error_msg;
-  std::unique_ptr<const InstructionSetFeatures> mips32r1_features(
-      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r1", &error_msg));
-  ASSERT_TRUE(mips32r1_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(mips32r1_features->GetInstructionSet(), InstructionSet::kMips);
-  EXPECT_TRUE(mips32r1_features->Equals(mips32r1_features.get()));
-  EXPECT_STREQ("fpu32,-mips2,-msa", mips32r1_features->GetFeatureString().c_str());
-  EXPECT_EQ(mips32r1_features->AsBitmap(), 1U);
-
-  std::unique_ptr<const InstructionSetFeatures> mips_default_features(
-      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "default", &error_msg));
-  ASSERT_TRUE(mips_default_features.get() != nullptr) << error_msg;
-  EXPECT_FALSE(mips32r1_features->Equals(mips_default_features.get()));
-}
-
-TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromR2Variant) {
-  std::string error_msg;
-  std::unique_ptr<const InstructionSetFeatures> mips32r2_features(
-      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r2", &error_msg));
-  ASSERT_TRUE(mips32r2_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(mips32r2_features->GetInstructionSet(), InstructionSet::kMips);
-  EXPECT_TRUE(mips32r2_features->Equals(mips32r2_features.get()));
-  EXPECT_STREQ("fpu32,mips2,-msa", mips32r2_features->GetFeatureString().c_str());
-  EXPECT_EQ(mips32r2_features->AsBitmap(), 3U);
-
-  std::unique_ptr<const InstructionSetFeatures> mips_default_features(
-      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "default", &error_msg));
-  ASSERT_TRUE(mips_default_features.get() != nullptr) << error_msg;
-  EXPECT_TRUE(mips32r2_features->Equals(mips_default_features.get()));
-
-  std::unique_ptr<const InstructionSetFeatures> mips32r1_features(
-      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r1", &error_msg));
-  ASSERT_TRUE(mips32r1_features.get() != nullptr) << error_msg;
-  EXPECT_FALSE(mips32r2_features->Equals(mips32r1_features.get()));
-}
-
-TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromR5Variant) {
-  std::string error_msg;
-  std::unique_ptr<const InstructionSetFeatures> mips32r5_features(
-      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r5", &error_msg));
-  ASSERT_TRUE(mips32r5_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(mips32r5_features->GetInstructionSet(), InstructionSet::kMips);
-  EXPECT_TRUE(mips32r5_features->Equals(mips32r5_features.get()));
-  EXPECT_STREQ("-fpu32,mips2,msa", mips32r5_features->GetFeatureString().c_str());
-  EXPECT_EQ(mips32r5_features->AsBitmap(), 10U);
-
-  std::unique_ptr<const InstructionSetFeatures> mips_default_features(
-      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "default", &error_msg));
-  ASSERT_TRUE(mips_default_features.get() != nullptr) << error_msg;
-  EXPECT_FALSE(mips32r5_features->Equals(mips_default_features.get()));
-
-  std::unique_ptr<const InstructionSetFeatures> mips32r1_features(
-      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r1", &error_msg));
-  ASSERT_TRUE(mips32r1_features.get() != nullptr) << error_msg;
-  EXPECT_FALSE(mips32r5_features->Equals(mips32r1_features.get()));
-
-  std::unique_ptr<const InstructionSetFeatures> mips32r2_features(
-      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r2", &error_msg));
-  ASSERT_TRUE(mips32r2_features.get() != nullptr) << error_msg;
-  EXPECT_FALSE(mips32r5_features->Equals(mips32r2_features.get()));
-}
-
-TEST(MipsInstructionSetFeaturesTest, MipsFeaturesFromR6Variant) {
-  std::string error_msg;
-  std::unique_ptr<const InstructionSetFeatures> mips32r6_features(
-      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r6", &error_msg));
-  ASSERT_TRUE(mips32r6_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(mips32r6_features->GetInstructionSet(), InstructionSet::kMips);
-  EXPECT_TRUE(mips32r6_features->Equals(mips32r6_features.get()));
-  EXPECT_STREQ("-fpu32,mips2,r6,msa", mips32r6_features->GetFeatureString().c_str());
-  EXPECT_EQ(mips32r6_features->AsBitmap(), 14U);
-
-  std::unique_ptr<const InstructionSetFeatures> mips_default_features(
-      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "default", &error_msg));
-  ASSERT_TRUE(mips_default_features.get() != nullptr) << error_msg;
-  EXPECT_FALSE(mips32r6_features->Equals(mips_default_features.get()));
-
-  std::unique_ptr<const InstructionSetFeatures> mips32r1_features(
-      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r1", &error_msg));
-  ASSERT_TRUE(mips32r1_features.get() != nullptr) << error_msg;
-  EXPECT_FALSE(mips32r6_features->Equals(mips32r1_features.get()));
-
-  std::unique_ptr<const InstructionSetFeatures> mips32r2_features(
-      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r2", &error_msg));
-  ASSERT_TRUE(mips32r2_features.get() != nullptr) << error_msg;
-  EXPECT_FALSE(mips32r6_features->Equals(mips32r2_features.get()));
-
-  std::unique_ptr<const InstructionSetFeatures> mips32r5_features(
-      InstructionSetFeatures::FromVariant(InstructionSet::kMips, "mips32r5", &error_msg));
-  ASSERT_TRUE(mips32r5_features.get() != nullptr) << error_msg;
-  EXPECT_FALSE(mips32r6_features->Equals(mips32r5_features.get()));
-}
-
-}  // namespace art
diff --git a/runtime/arch/mips/jni_entrypoints_mips.S b/runtime/arch/mips/jni_entrypoints_mips.S
deleted file mode 100644
index 2c0e750..0000000
--- a/runtime/arch/mips/jni_entrypoints_mips.S
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "asm_support_mips.S"
-
-    .set noreorder
-    .balign 4
-
-    /*
-     * Jni dlsym lookup stub.
-     */
-    .extern artFindNativeMethod
-ENTRY art_jni_dlsym_lookup_stub
-    addiu $sp, $sp, -48         # leave room for $f12, $f13, $f14, $f15, $a0, $a1, $a2, $a3, and $ra
-    .cfi_adjust_cfa_offset 48
-    sw    $ra, 32($sp)
-    .cfi_rel_offset 31, 32
-    CHECK_ALIGNMENT $sp, $t0
-    sdc1  $f14, 24($sp)
-    sdc1  $f12, 16($sp)
-    sw    $a3, 12($sp)
-    .cfi_rel_offset 7, 12
-    sw    $a2, 8($sp)
-    .cfi_rel_offset 6, 8
-    sw    $a1, 4($sp)
-    .cfi_rel_offset 5, 4
-    sw    $a0, 0($sp)
-    .cfi_rel_offset 4, 0
-    la    $t9, artFindNativeMethod
-    jalr  $t9                   # (Thread*)
-    move  $a0, $s1              # pass Thread::Current()
-    lw    $a0, 0($sp)           # restore registers from stack
-    lw    $a1, 4($sp)
-    lw    $a2, 8($sp)
-    lw    $a3, 12($sp)
-    CHECK_ALIGNMENT $sp, $t0
-    ldc1  $f12, 16($sp)
-    ldc1  $f14, 24($sp)
-    lw    $ra, 32($sp)
-    beq   $v0, $zero, .Lno_native_code_found
-    addiu $sp, $sp, 48          # restore the stack
-    .cfi_adjust_cfa_offset -48
-    move  $t9, $v0              # put method code result in $t9
-    jalr  $zero, $t9            # leaf call to method's code
-    nop
-.Lno_native_code_found:
-    jalr  $zero, $ra
-    nop
-END art_jni_dlsym_lookup_stub
diff --git a/runtime/arch/mips/memcmp16_mips.S b/runtime/arch/mips/memcmp16_mips.S
deleted file mode 100644
index c8eac9b..0000000
--- a/runtime/arch/mips/memcmp16_mips.S
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS_MEMCMP16_MIPS_S_
-#define ART_RUNTIME_ARCH_MIPS_MEMCMP16_MIPS_S_
-
-#include "asm_support_mips.S"
-
-// u4 __memcmp16(const u2*, const u2*, size_t);
-ENTRY_NO_GP __memcmp16
-  li  $t0,0
-  li  $t1,0
-  beqz  $a2,done   /* 0 length string */
-  beq $a0,$a1,done    /* strings are identical */
-
-  /* Unoptimized... */
-1:  lhu $t0,0($a0)
-  lhu $t1,0($a1)
-  addu  $a1,2
-  bne $t0,$t1,done
-  addu  $a0,2
-  subu  $a2,1
-  bnez  $a2,1b
-
-done:
-  subu  $v0,$t0,$t1
-  j $ra
-END __memcmp16
-
-#endif  // ART_RUNTIME_ARCH_MIPS_MEMCMP16_MIPS_S_
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
deleted file mode 100644
index b10d1fc..0000000
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ /dev/null
@@ -1,3295 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "asm_support_mips.S"
-
-#include "arch/quick_alloc_entrypoints.S"
-
-    .set noreorder
-    .balign 4
-
-    /* Deliver the given exception */
-    .extern artDeliverExceptionFromCode
-    /* Deliver an exception pending on a thread */
-    .extern artDeliverPendingExceptionFromCode
-
-#define ARG_SLOT_SIZE   32    // space for a0-a3 plus 4 more words
-
-    /*
-     * Macro that sets up the callee save frame to conform with
-     * Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves)
-     * Callee-save: $s0-$s8 + $gp + $ra, 11 total + 1 word for Method*
-     * Clobbers $t0 and $sp
-     * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
-     * Reserves FRAME_SIZE_SAVE_ALL_CALLEE_SAVES + ARG_SLOT_SIZE bytes on the stack
-     */
-.macro SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
-    addiu  $sp, $sp, -112
-    .cfi_adjust_cfa_offset 112
-
-     // Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 112)
-#error "FRAME_SIZE_SAVE_ALL_CALLEE_SAVES(MIPS) size not as expected."
-#endif
-
-    sw     $ra, 108($sp)
-    .cfi_rel_offset 31, 108
-    sw     $s8, 104($sp)
-    .cfi_rel_offset 30, 104
-    sw     $gp, 100($sp)
-    .cfi_rel_offset 28, 100
-    sw     $s7, 96($sp)
-    .cfi_rel_offset 23, 96
-    sw     $s6, 92($sp)
-    .cfi_rel_offset 22, 92
-    sw     $s5, 88($sp)
-    .cfi_rel_offset 21, 88
-    sw     $s4, 84($sp)
-    .cfi_rel_offset 20, 84
-    sw     $s3, 80($sp)
-    .cfi_rel_offset 19, 80
-    sw     $s2, 76($sp)
-    .cfi_rel_offset 18, 76
-    sw     $s1, 72($sp)
-    .cfi_rel_offset 17, 72
-    sw     $s0, 68($sp)
-    .cfi_rel_offset 16, 68
-    // 4-byte placeholder for register $zero, serving for alignment
-    // of the following double precision floating point registers.
-
-    CHECK_ALIGNMENT $sp, $t1
-    sdc1   $f30, 56($sp)
-    sdc1   $f28, 48($sp)
-    sdc1   $f26, 40($sp)
-    sdc1   $f24, 32($sp)
-    sdc1   $f22, 24($sp)
-    sdc1   $f20, 16($sp)
-
-    # 1 word for holding Method* plus 12 bytes padding to keep contents of SP
-    # a multiple of 16.
-
-    lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
-    lw $t0, 0($t0)
-    lw $t0, RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET($t0)
-    sw $t0, 0($sp)                                # Place Method* at bottom of stack.
-    sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)  # Place sp in Thread::Current()->top_quick_frame.
-    addiu  $sp, $sp, -ARG_SLOT_SIZE               # reserve argument slots on the stack
-    .cfi_adjust_cfa_offset ARG_SLOT_SIZE
-.endm
-
-    /*
-     * Macro that sets up the callee save frame to conform with
-     * Runtime::CreateCalleeSaveMethod(kSaveRefsOnly). Restoration assumes non-moving GC.
-     * Does not include rSUSPEND or rSELF
-     * callee-save: $s2-$s8 + $gp + $ra, 9 total + 2 words padding + 1 word to hold Method*
-     * Clobbers $t0 and $sp
-     * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
-     * Reserves FRAME_SIZE_SAVE_REFS_ONLY + ARG_SLOT_SIZE bytes on the stack
-     */
-.macro SETUP_SAVE_REFS_ONLY_FRAME
-    addiu  $sp, $sp, -48
-    .cfi_adjust_cfa_offset 48
-
-    // Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_REFS_ONLY != 48)
-#error "FRAME_SIZE_SAVE_REFS_ONLY(MIPS) size not as expected."
-#endif
-
-    sw     $ra, 44($sp)
-    .cfi_rel_offset 31, 44
-    sw     $s8, 40($sp)
-    .cfi_rel_offset 30, 40
-    sw     $gp, 36($sp)
-    .cfi_rel_offset 28, 36
-    sw     $s7, 32($sp)
-    .cfi_rel_offset 23, 32
-    sw     $s6, 28($sp)
-    .cfi_rel_offset 22, 28
-    sw     $s5, 24($sp)
-    .cfi_rel_offset 21, 24
-    sw     $s4, 20($sp)
-    .cfi_rel_offset 20, 20
-    sw     $s3, 16($sp)
-    .cfi_rel_offset 19, 16
-    sw     $s2, 12($sp)
-    .cfi_rel_offset 18, 12
-    # 2 words for alignment and bottom word will hold Method*
-
-    lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
-    lw $t0, 0($t0)
-    lw $t0, RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET($t0)
-    sw $t0, 0($sp)                                # Place Method* at bottom of stack.
-    sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)  # Place sp in Thread::Current()->top_quick_frame.
-    addiu  $sp, $sp, -ARG_SLOT_SIZE               # reserve argument slots on the stack
-    .cfi_adjust_cfa_offset ARG_SLOT_SIZE
-.endm
-
-.macro RESTORE_SAVE_REFS_ONLY_FRAME
-    addiu  $sp, $sp, ARG_SLOT_SIZE                # remove argument slots on the stack
-    .cfi_adjust_cfa_offset -ARG_SLOT_SIZE
-    lw     $ra, 44($sp)
-    .cfi_restore 31
-    lw     $s8, 40($sp)
-    .cfi_restore 30
-    lw     $gp, 36($sp)
-    .cfi_restore 28
-    lw     $s7, 32($sp)
-    .cfi_restore 23
-    lw     $s6, 28($sp)
-    .cfi_restore 22
-    lw     $s5, 24($sp)
-    .cfi_restore 21
-    lw     $s4, 20($sp)
-    .cfi_restore 20
-    lw     $s3, 16($sp)
-    .cfi_restore 19
-    lw     $s2, 12($sp)
-    .cfi_restore 18
-    addiu  $sp, $sp, 48
-    .cfi_adjust_cfa_offset -48
-.endm
-
-.macro RESTORE_SAVE_REFS_ONLY_FRAME_AND_RETURN
-    RESTORE_SAVE_REFS_ONLY_FRAME
-    jalr   $zero, $ra
-    nop
-.endm
-
-    /*
-     * Individually usable part of macro SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY.
-     */
-.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_S4_THRU_S8
-    sw      $s8, 104($sp)
-    .cfi_rel_offset 30, 104
-    sw      $s7, 96($sp)
-    .cfi_rel_offset 23, 96
-    sw      $s6, 92($sp)
-    .cfi_rel_offset 22, 92
-    sw      $s5, 88($sp)
-    .cfi_rel_offset 21, 88
-    sw      $s4, 84($sp)
-    .cfi_rel_offset 20, 84
-.endm
-
-    /*
-     * Macro that sets up the callee save frame to conform with
-     * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs).
-     * callee-save: $a1-$a3, $t0-$t1, $s2-$s8, $gp, $ra, $f8-$f19
-     *              (26 total + 1 word padding + method*)
-     */
-.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY save_s4_thru_s8=1
-    addiu   $sp, $sp, -112
-    .cfi_adjust_cfa_offset 112
-
-    // Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_REFS_AND_ARGS != 112)
-#error "FRAME_SIZE_SAVE_REFS_AND_ARGS(MIPS) size not as expected."
-#endif
-
-    sw      $ra, 108($sp)
-    .cfi_rel_offset 31, 108
-    sw      $gp, 100($sp)
-    .cfi_rel_offset 28, 100
-    .if \save_s4_thru_s8
-      SETUP_SAVE_REFS_AND_ARGS_FRAME_S4_THRU_S8
-    .endif
-    sw      $s3, 80($sp)
-    .cfi_rel_offset 19, 80
-    sw      $s2, 76($sp)
-    .cfi_rel_offset 18, 76
-    sw      $t1, 72($sp)
-    .cfi_rel_offset 9, 72
-    sw      $t0, 68($sp)
-    .cfi_rel_offset 8, 68
-    sw      $a3, 64($sp)
-    .cfi_rel_offset 7, 64
-    sw      $a2, 60($sp)
-    .cfi_rel_offset 6, 60
-    sw      $a1, 56($sp)
-    .cfi_rel_offset 5, 56
-    CHECK_ALIGNMENT $sp, $t8
-    sdc1    $f18, 48($sp)
-    sdc1    $f16, 40($sp)
-    sdc1    $f14, 32($sp)
-    sdc1    $f12, 24($sp)
-    sdc1    $f10, 16($sp)
-    sdc1    $f8,   8($sp)
-    # bottom will hold Method*
-.endm
-
-    /*
-     * Macro that sets up the callee save frame to conform with
-     * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs). Restoration assumes non-moving GC.
-     * callee-save: $a1-$a3, $t0-$t1, $s2-$s8, $gp, $ra, $f8-$f19
-     *              (26 total + 1 word padding + method*)
-     * Clobbers $t0 and $sp
-     * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
-     * Reserves FRAME_SIZE_SAVE_REFS_AND_ARGS + ARG_SLOT_SIZE bytes on the stack
-     */
-.macro SETUP_SAVE_REFS_AND_ARGS_FRAME save_s4_thru_s8_only=0
-    .if \save_s4_thru_s8_only
-      // It is expected that `SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY /* save_s4_thru_s8 */ 0`
-      // has been done prior to `SETUP_SAVE_REFS_AND_ARGS_FRAME /* save_s4_thru_s8_only */ 1`.
-      SETUP_SAVE_REFS_AND_ARGS_FRAME_S4_THRU_S8
-    .else
-      SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY
-    .endif
-    lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
-    lw $t0, 0($t0)
-    lw $t0, RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET($t0)
-    sw $t0, 0($sp)                                # Place Method* at bottom of stack.
-    sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)  # Place sp in Thread::Current()->top_quick_frame.
-    addiu  $sp, $sp, -ARG_SLOT_SIZE               # reserve argument slots on the stack
-    .cfi_adjust_cfa_offset ARG_SLOT_SIZE
-.endm
-
-    /*
-     * Macro that sets up the callee save frame to conform with
-     * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs). Restoration assumes non-moving GC.
-     * callee-save: $a1-$a3, $t0-$t1, $s2-$s8, $gp, $ra, $f8-$f19
-     *              (26 total + 1 word padding + method*)
-     * Clobbers $sp
-     * Use $a0 as the Method* and loads it into bottom of stack.
-     * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
-     * Reserves FRAME_SIZE_SAVE_REFS_AND_ARGS + ARG_SLOT_SIZE bytes on the stack
-     */
-.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_A0
-    SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY
-    sw $a0, 0($sp)                                # Place Method* at bottom of stack.
-    sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)  # Place sp in Thread::Current()->top_quick_frame.
-    addiu  $sp, $sp, -ARG_SLOT_SIZE               # reserve argument slots on the stack
-    .cfi_adjust_cfa_offset ARG_SLOT_SIZE
-.endm
-
-    /*
-     * Individually usable part of macro RESTORE_SAVE_REFS_AND_ARGS_FRAME.
-     */
-.macro RESTORE_SAVE_REFS_AND_ARGS_FRAME_GP
-    lw      $gp, 100($sp)
-    .cfi_restore 28
-.endm
-
-    /*
-     * Individually usable part of macro RESTORE_SAVE_REFS_AND_ARGS_FRAME.
-     */
-.macro RESTORE_SAVE_REFS_AND_ARGS_FRAME_A1
-    lw      $a1, 56($sp)
-    .cfi_restore 5
-.endm
-
-.macro RESTORE_SAVE_REFS_AND_ARGS_FRAME restore_s4_thru_s8=1, remove_arg_slots=1
-    .if \remove_arg_slots
-      addiu $sp, $sp, ARG_SLOT_SIZE                 # Remove argument slots from the stack.
-      .cfi_adjust_cfa_offset -ARG_SLOT_SIZE
-    .endif
-    lw      $ra, 108($sp)
-    .cfi_restore 31
-    .if \restore_s4_thru_s8
-      lw    $s8, 104($sp)
-      .cfi_restore 30
-    .endif
-    RESTORE_SAVE_REFS_AND_ARGS_FRAME_GP
-    .if \restore_s4_thru_s8
-      lw    $s7, 96($sp)
-      .cfi_restore 23
-      lw    $s6, 92($sp)
-      .cfi_restore 22
-      lw    $s5, 88($sp)
-      .cfi_restore 21
-      lw    $s4, 84($sp)
-      .cfi_restore 20
-    .endif
-    lw      $s3, 80($sp)
-    .cfi_restore 19
-    lw      $s2, 76($sp)
-    .cfi_restore 18
-    lw      $t1, 72($sp)
-    .cfi_restore 9
-    lw      $t0, 68($sp)
-    .cfi_restore 8
-    lw      $a3, 64($sp)
-    .cfi_restore 7
-    lw      $a2, 60($sp)
-    .cfi_restore 6
-    RESTORE_SAVE_REFS_AND_ARGS_FRAME_A1
-    CHECK_ALIGNMENT $sp, $t8
-    ldc1    $f18, 48($sp)
-    ldc1    $f16, 40($sp)
-    ldc1    $f14, 32($sp)
-    ldc1    $f12, 24($sp)
-    ldc1    $f10, 16($sp)
-    ldc1    $f8,   8($sp)
-    addiu   $sp, $sp, 112                           # Pop frame.
-    .cfi_adjust_cfa_offset -112
-.endm
-
-    /*
-     * Macro that sets up the callee save frame to conform with
-     * Runtime::CreateCalleeSaveMethod(kSaveEverything).
-     * when the $sp has already been decremented by FRAME_SIZE_SAVE_EVERYTHING.
-     * Callee-save: $at, $v0-$v1, $a0-$a3, $t0-$t7, $s0-$s7, $t8-$t9, $gp, $fp $ra, $f0-$f31;
-     *              28(GPR)+ 32(FPR) + 3 words for padding and 1 word for Method*
-     * Clobbers $t0 and $t1.
-     * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
-     * Reserves FRAME_SIZE_SAVE_EVERYTHING + ARG_SLOT_SIZE bytes on the stack.
-     * This macro sets up $gp; entrypoints using it should start with ENTRY_NO_GP.
-     */
-.macro SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP runtime_method_offset = RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET
-     // Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_EVERYTHING != 256)
-#error "FRAME_SIZE_SAVE_EVERYTHING(MIPS) size not as expected."
-#endif
-
-    sw     $ra, 252($sp)
-    .cfi_rel_offset 31, 252
-    sw     $fp, 248($sp)
-    .cfi_rel_offset 30, 248
-    sw     $gp, 244($sp)
-    .cfi_rel_offset 28, 244
-    sw     $t9, 240($sp)
-    .cfi_rel_offset 25, 240
-    sw     $t8, 236($sp)
-    .cfi_rel_offset 24, 236
-    sw     $s7, 232($sp)
-    .cfi_rel_offset 23, 232
-    sw     $s6, 228($sp)
-    .cfi_rel_offset 22, 228
-    sw     $s5, 224($sp)
-    .cfi_rel_offset 21, 224
-    sw     $s4, 220($sp)
-    .cfi_rel_offset 20, 220
-    sw     $s3, 216($sp)
-    .cfi_rel_offset 19, 216
-    sw     $s2, 212($sp)
-    .cfi_rel_offset 18, 212
-    sw     $s1, 208($sp)
-    .cfi_rel_offset 17, 208
-    sw     $s0, 204($sp)
-    .cfi_rel_offset 16, 204
-    sw     $t7, 200($sp)
-    .cfi_rel_offset 15, 200
-    sw     $t6, 196($sp)
-    .cfi_rel_offset 14, 196
-    sw     $t5, 192($sp)
-    .cfi_rel_offset 13, 192
-    sw     $t4, 188($sp)
-    .cfi_rel_offset 12, 188
-    sw     $t3, 184($sp)
-    .cfi_rel_offset 11, 184
-    sw     $t2, 180($sp)
-    .cfi_rel_offset 10, 180
-    sw     $t1, 176($sp)
-    .cfi_rel_offset 9, 176
-    sw     $t0, 172($sp)
-    .cfi_rel_offset 8, 172
-    sw     $a3, 168($sp)
-    .cfi_rel_offset 7, 168
-    sw     $a2, 164($sp)
-    .cfi_rel_offset 6, 164
-    sw     $a1, 160($sp)
-    .cfi_rel_offset 5, 160
-    sw     $a0, 156($sp)
-    .cfi_rel_offset 4, 156
-    sw     $v1, 152($sp)
-    .cfi_rel_offset 3, 152
-    sw     $v0, 148($sp)
-    .cfi_rel_offset 2, 148
-
-    // Set up $gp, clobbering $ra and using the branch delay slot for a useful instruction.
-    bal 1f
-    .set push
-    .set noat
-    sw     $at, 144($sp)
-    .cfi_rel_offset 1, 144
-    .set pop
-1:
-    .cpload $ra
-
-    CHECK_ALIGNMENT $sp, $t1
-    sdc1   $f30, 136($sp)
-    sdc1   $f28, 128($sp)
-    sdc1   $f26, 120($sp)
-    sdc1   $f24, 112($sp)
-    sdc1   $f22, 104($sp)
-    sdc1   $f20,  96($sp)
-    sdc1   $f18,  88($sp)
-    sdc1   $f16,  80($sp)
-    sdc1   $f14,  72($sp)
-    sdc1   $f12,  64($sp)
-    sdc1   $f10,  56($sp)
-    sdc1   $f8,   48($sp)
-    sdc1   $f6,   40($sp)
-    sdc1   $f4,   32($sp)
-    sdc1   $f2,   24($sp)
-    sdc1   $f0,   16($sp)
-
-    # 3 words padding and 1 word for holding Method*
-
-    lw $t0, %got(_ZN3art7Runtime9instance_E)($gp)
-    lw $t0, 0($t0)
-    lw $t0, \runtime_method_offset($t0)
-    sw $t0, 0($sp)                                # Place Method* at bottom of stack.
-    sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)  # Place sp in Thread::Current()->top_quick_frame.
-    addiu  $sp, $sp, -ARG_SLOT_SIZE               # reserve argument slots on the stack
-    .cfi_adjust_cfa_offset ARG_SLOT_SIZE
-.endm
-
-    /*
-     * Macro that sets up the callee save frame to conform with
-     * Runtime::CreateCalleeSaveMethod(kSaveEverything).
-     * Callee-save: $at, $v0-$v1, $a0-$a3, $t0-$t7, $s0-$s7, $t8-$t9, $gp, $fp $ra, $f0-$f31;
-     *              28(GPR)+ 32(FPR) + 3 words for padding and 1 word for Method*
-     * Clobbers $t0 and $t1.
-     * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots.
-     * Reserves FRAME_SIZE_SAVE_EVERYTHING + ARG_SLOT_SIZE bytes on the stack.
-     * This macro sets up $gp; entrypoints using it should start with ENTRY_NO_GP.
-     */
-.macro SETUP_SAVE_EVERYTHING_FRAME runtime_method_offset = RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET
-    addiu  $sp, $sp, -(FRAME_SIZE_SAVE_EVERYTHING)
-    .cfi_adjust_cfa_offset (FRAME_SIZE_SAVE_EVERYTHING)
-    SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP \runtime_method_offset
-.endm
-
-.macro RESTORE_SAVE_EVERYTHING_FRAME restore_a0=1
-    addiu  $sp, $sp, ARG_SLOT_SIZE                # remove argument slots on the stack
-    .cfi_adjust_cfa_offset -ARG_SLOT_SIZE
-
-    CHECK_ALIGNMENT $sp, $t1
-    ldc1   $f30, 136($sp)
-    ldc1   $f28, 128($sp)
-    ldc1   $f26, 120($sp)
-    ldc1   $f24, 112($sp)
-    ldc1   $f22, 104($sp)
-    ldc1   $f20,  96($sp)
-    ldc1   $f18,  88($sp)
-    ldc1   $f16,  80($sp)
-    ldc1   $f14,  72($sp)
-    ldc1   $f12,  64($sp)
-    ldc1   $f10,  56($sp)
-    ldc1   $f8,   48($sp)
-    ldc1   $f6,   40($sp)
-    ldc1   $f4,   32($sp)
-    ldc1   $f2,   24($sp)
-    ldc1   $f0,   16($sp)
-
-    lw     $ra, 252($sp)
-    .cfi_restore 31
-    lw     $fp, 248($sp)
-    .cfi_restore 30
-    lw     $gp, 244($sp)
-    .cfi_restore 28
-    lw     $t9, 240($sp)
-    .cfi_restore 25
-    lw     $t8, 236($sp)
-    .cfi_restore 24
-    lw     $s7, 232($sp)
-    .cfi_restore 23
-    lw     $s6, 228($sp)
-    .cfi_restore 22
-    lw     $s5, 224($sp)
-    .cfi_restore 21
-    lw     $s4, 220($sp)
-    .cfi_restore 20
-    lw     $s3, 216($sp)
-    .cfi_restore 19
-    lw     $s2, 212($sp)
-    .cfi_restore 18
-    lw     $s1, 208($sp)
-    .cfi_restore 17
-    lw     $s0, 204($sp)
-    .cfi_restore 16
-    lw     $t7, 200($sp)
-    .cfi_restore 15
-    lw     $t6, 196($sp)
-    .cfi_restore 14
-    lw     $t5, 192($sp)
-    .cfi_restore 13
-    lw     $t4, 188($sp)
-    .cfi_restore 12
-    lw     $t3, 184($sp)
-    .cfi_restore 11
-    lw     $t2, 180($sp)
-    .cfi_restore 10
-    lw     $t1, 176($sp)
-    .cfi_restore 9
-    lw     $t0, 172($sp)
-    .cfi_restore 8
-    lw     $a3, 168($sp)
-    .cfi_restore 7
-    lw     $a2, 164($sp)
-    .cfi_restore 6
-    lw     $a1, 160($sp)
-    .cfi_restore 5
-    .if \restore_a0
-    lw     $a0, 156($sp)
-    .cfi_restore 4
-    .endif
-    lw     $v1, 152($sp)
-    .cfi_restore 3
-    lw     $v0, 148($sp)
-    .cfi_restore 2
-    .set push
-    .set noat
-    lw     $at, 144($sp)
-    .cfi_restore 1
-    .set pop
-
-    addiu  $sp, $sp, 256            # pop frame
-    .cfi_adjust_cfa_offset -256
-.endm
-
-    /*
-     * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
-     * exception is Thread::Current()->exception_ when the runtime method frame is ready.
-     * Requires $gp properly set up.
-     */
-.macro DELIVER_PENDING_EXCEPTION_FRAME_READY
-    la      $t9, artDeliverPendingExceptionFromCode
-    jalr    $zero, $t9                   # artDeliverPendingExceptionFromCode(Thread*)
-    move    $a0, rSELF                   # pass Thread::Current
-.endm
-
-    /*
-     * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
-     * exception is Thread::Current()->exception_.
-     * Requires $gp properly set up.
-     */
-.macro DELIVER_PENDING_EXCEPTION
-    SETUP_SAVE_ALL_CALLEE_SAVES_FRAME    # save callee saves for throw
-    DELIVER_PENDING_EXCEPTION_FRAME_READY
-.endm
-
-.macro RETURN_IF_NO_EXCEPTION
-    lw     $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
-    RESTORE_SAVE_REFS_ONLY_FRAME
-    bnez   $t0, 1f                       # success if no exception is pending
-    nop
-    jalr   $zero, $ra
-    nop
-1:
-    DELIVER_PENDING_EXCEPTION
-.endm
-
-.macro RETURN_IF_ZERO
-    RESTORE_SAVE_REFS_ONLY_FRAME
-    bnez   $v0, 1f                       # success?
-    nop
-    jalr   $zero, $ra                    # return on success
-    nop
-1:
-    DELIVER_PENDING_EXCEPTION
-.endm
-
-.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-    RESTORE_SAVE_REFS_ONLY_FRAME
-    beqz   $v0, 1f                       # success?
-    nop
-    jalr   $zero, $ra                    # return on success
-    nop
-1:
-    DELIVER_PENDING_EXCEPTION
-.endm
-
-    /*
-     * On stack replacement stub.
-     * On entry:
-     *   a0 = stack to copy
-     *   a1 = size of stack
-     *   a2 = pc to call
-     *   a3 = JValue* result
-     *   [sp + 16] = shorty
-     *   [sp + 20] = thread
-     */
-ENTRY art_quick_osr_stub
-    // Save callee general purpose registers, RA and GP.
-    addiu  $sp, $sp, -48
-    .cfi_adjust_cfa_offset 48
-    sw     $ra, 44($sp)
-    .cfi_rel_offset 31, 44
-    sw     $s8, 40($sp)
-    .cfi_rel_offset 30, 40
-    sw     $gp, 36($sp)
-    .cfi_rel_offset 28, 36
-    sw     $s7, 32($sp)
-    .cfi_rel_offset 23, 32
-    sw     $s6, 28($sp)
-    .cfi_rel_offset 22, 28
-    sw     $s5, 24($sp)
-    .cfi_rel_offset 21, 24
-    sw     $s4, 20($sp)
-    .cfi_rel_offset 20, 20
-    sw     $s3, 16($sp)
-    .cfi_rel_offset 19, 16
-    sw     $s2, 12($sp)
-    .cfi_rel_offset 18, 12
-    sw     $s1, 8($sp)
-    .cfi_rel_offset 17, 8
-    sw     $s0, 4($sp)
-    .cfi_rel_offset 16, 4
-
-    move   $s8, $sp                        # Save the stack pointer
-    move   $s7, $a1                        # Save size of stack
-    move   $s6, $a2                        # Save the pc to call
-    lw     rSELF, 48+20($sp)               # Save managed thread pointer into rSELF
-    addiu  $t0, $sp, -12                   # Reserve space for stack pointer,
-                                           #    JValue* result, and ArtMethod* slot.
-    srl    $t0, $t0, 4                     # Align stack pointer to 16 bytes
-    sll    $sp, $t0, 4                     # Update stack pointer
-    sw     $s8, 4($sp)                     # Save old stack pointer
-    sw     $a3, 8($sp)                     # Save JValue* result
-    sw     $zero, 0($sp)                   # Store null for ArtMethod* at bottom of frame
-    subu   $sp, $a1                        # Reserve space for callee stack
-    move   $a2, $a1
-    move   $a1, $a0
-    move   $a0, $sp
-    la     $t9, memcpy
-    jalr   $t9                             # memcpy (dest a0, src a1, bytes a2)
-    addiu  $sp, $sp, -16                   # make space for argument slots for memcpy
-    bal    .Losr_entry                     # Call the method
-    addiu  $sp, $sp, 16                    # restore stack after memcpy
-    lw     $a2, 8($sp)                     # Restore JValue* result
-    lw     $sp, 4($sp)                     # Restore saved stack pointer
-    lw     $a0, 48+16($sp)                 # load shorty
-    lbu    $a0, 0($a0)                     # load return type
-    li     $a1, 'D'                        # put char 'D' into a1
-    beq    $a0, $a1, .Losr_fp_result       # Test if result type char == 'D'
-    li     $a1, 'F'                        # put char 'F' into a1
-    beq    $a0, $a1, .Losr_fp_result       # Test if result type char == 'F'
-    nop
-    sw     $v0, 0($a2)
-    b      .Losr_exit
-    sw     $v1, 4($a2)                     # store v0/v1 into result
-.Losr_fp_result:
-    CHECK_ALIGNMENT $a2, $t0, 8
-    sdc1   $f0, 0($a2)                     # store f0/f1 into result
-.Losr_exit:
-    lw     $ra, 44($sp)
-    .cfi_restore 31
-    lw     $s8, 40($sp)
-    .cfi_restore 30
-    lw     $gp, 36($sp)
-    .cfi_restore 28
-    lw     $s7, 32($sp)
-    .cfi_restore 23
-    lw     $s6, 28($sp)
-    .cfi_restore 22
-    lw     $s5, 24($sp)
-    .cfi_restore 21
-    lw     $s4, 20($sp)
-    .cfi_restore 20
-    lw     $s3, 16($sp)
-    .cfi_restore 19
-    lw     $s2, 12($sp)
-    .cfi_restore 18
-    lw     $s1, 8($sp)
-    .cfi_restore 17
-    lw     $s0, 4($sp)
-    .cfi_restore 16
-    jalr   $zero, $ra
-    addiu  $sp, $sp, 48
-    .cfi_adjust_cfa_offset -48
-.Losr_entry:
-    addiu  $s7, $s7, -4
-    addu   $t0, $s7, $sp
-    move   $t9, $s6
-    jalr   $zero, $t9
-    sw     $ra, 0($t0)                     # Store RA per the compiler ABI
-END art_quick_osr_stub
-
-    /*
-     * On entry $a0 is uint32_t* gprs_ and $a1 is uint32_t* fprs_.
-     * Note that fprs_ is expected to be an address that is a multiple of 8.
-     * FIXME: just guessing about the shape of the jmpbuf.  Where will pc be?
-     */
-ENTRY art_quick_do_long_jump
-    CHECK_ALIGNMENT $a1, $t1, 8
-    ldc1    $f0,   0*8($a1)
-    ldc1    $f2,   1*8($a1)
-    ldc1    $f4,   2*8($a1)
-    ldc1    $f6,   3*8($a1)
-    ldc1    $f8,   4*8($a1)
-    ldc1    $f10,  5*8($a1)
-    ldc1    $f12,  6*8($a1)
-    ldc1    $f14,  7*8($a1)
-    ldc1    $f16,  8*8($a1)
-    ldc1    $f18,  9*8($a1)
-    ldc1    $f20, 10*8($a1)
-    ldc1    $f22, 11*8($a1)
-    ldc1    $f24, 12*8($a1)
-    ldc1    $f26, 13*8($a1)
-    ldc1    $f28, 14*8($a1)
-    ldc1    $f30, 15*8($a1)
-
-    .set push
-    .set nomacro
-    .set noat
-    lw      $at, 4($a0)
-    .set pop
-    lw      $v0, 8($a0)
-    lw      $v1, 12($a0)
-    lw      $a1, 20($a0)
-    lw      $a2, 24($a0)
-    lw      $a3, 28($a0)
-    lw      $t0, 32($a0)
-    lw      $t1, 36($a0)
-    lw      $t2, 40($a0)
-    lw      $t3, 44($a0)
-    lw      $t4, 48($a0)
-    lw      $t5, 52($a0)
-    lw      $t6, 56($a0)
-    lw      $t7, 60($a0)
-    lw      $s0, 64($a0)
-    lw      $s1, 68($a0)
-    lw      $s2, 72($a0)
-    lw      $s3, 76($a0)
-    lw      $s4, 80($a0)
-    lw      $s5, 84($a0)
-    lw      $s6, 88($a0)
-    lw      $s7, 92($a0)
-    lw      $t8, 96($a0)
-    lw      $t9, 100($a0)
-    lw      $gp, 112($a0)
-    lw      $sp, 116($a0)
-    lw      $fp, 120($a0)
-    lw      $ra, 124($a0)
-    lw      $a0, 16($a0)
-    move    $v0, $zero          # clear result registers v0 and v1 (in branch delay slot)
-    jalr    $zero, $t9          # do long jump
-    move    $v1, $zero
-END art_quick_do_long_jump
-
-    /*
-     * Called by managed code, saves most registers (forms basis of long jump context) and passes
-     * the bottom of the stack. artDeliverExceptionFromCode will place the callee save Method* at
-     * the bottom of the thread. On entry a0 holds Throwable*
-     */
-ENTRY art_quick_deliver_exception
-    SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
-    la   $t9, artDeliverExceptionFromCode
-    jalr $zero, $t9                 # artDeliverExceptionFromCode(Throwable*, Thread*)
-    move $a1, rSELF                 # pass Thread::Current
-END art_quick_deliver_exception
-
-    /*
-     * Called by managed code to create and deliver a NullPointerException
-     */
-    .extern artThrowNullPointerExceptionFromCode
-ENTRY_NO_GP art_quick_throw_null_pointer_exception
-    // Note that setting up $gp does not rely on $t9 here, so branching here directly is OK,
-    // even after clobbering any registers we don't need to preserve, such as $gp or $t0.
-    SETUP_SAVE_EVERYTHING_FRAME
-    la   $t9, artThrowNullPointerExceptionFromCode
-    jalr $zero, $t9                 # artThrowNullPointerExceptionFromCode(Thread*)
-    move $a0, rSELF                 # pass Thread::Current
-END art_quick_throw_null_pointer_exception
-
-
-    /*
-     * Call installed by a signal handler to create and deliver a NullPointerException.
-     */
-    .extern artThrowNullPointerExceptionFromSignal
-ENTRY_NO_GP_CUSTOM_CFA art_quick_throw_null_pointer_exception_from_signal, FRAME_SIZE_SAVE_EVERYTHING
-    SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP
-    # Retrieve the fault address from the padding where the signal handler stores it.
-    lw   $a0, (ARG_SLOT_SIZE + __SIZEOF_POINTER__)($sp)
-    la   $t9, artThrowNullPointerExceptionFromSignal
-    jalr $zero, $t9                 # artThrowNullPointerExceptionFromSignal(uintptr_t, Thread*)
-    move $a1, rSELF                 # pass Thread::Current
-END art_quick_throw_null_pointer_exception_from_signal
-
-    /*
-     * Called by managed code to create and deliver an ArithmeticException
-     */
-    .extern artThrowDivZeroFromCode
-ENTRY_NO_GP art_quick_throw_div_zero
-    SETUP_SAVE_EVERYTHING_FRAME
-    la   $t9, artThrowDivZeroFromCode
-    jalr $zero, $t9                 # artThrowDivZeroFromCode(Thread*)
-    move $a0, rSELF                 # pass Thread::Current
-END art_quick_throw_div_zero
-
-    /*
-     * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException
-     */
-    .extern artThrowArrayBoundsFromCode
-ENTRY_NO_GP art_quick_throw_array_bounds
-    // Note that setting up $gp does not rely on $t9 here, so branching here directly is OK,
-    // even after clobbering any registers we don't need to preserve, such as $gp or $t0.
-    SETUP_SAVE_EVERYTHING_FRAME
-    la   $t9, artThrowArrayBoundsFromCode
-    jalr $zero, $t9                 # artThrowArrayBoundsFromCode(index, limit, Thread*)
-    move $a2, rSELF                 # pass Thread::Current
-END art_quick_throw_array_bounds
-
-    /*
-     * Called by managed code to create and deliver a StringIndexOutOfBoundsException
-     * as if thrown from a call to String.charAt().
-     */
-    .extern artThrowStringBoundsFromCode
-ENTRY_NO_GP art_quick_throw_string_bounds
-    SETUP_SAVE_EVERYTHING_FRAME
-    la   $t9, artThrowStringBoundsFromCode
-    jalr $zero, $t9                 # artThrowStringBoundsFromCode(index, limit, Thread*)
-    move $a2, rSELF                 # pass Thread::Current
-END art_quick_throw_string_bounds
-
-    /*
-     * Called by managed code to create and deliver a StackOverflowError.
-     */
-    .extern artThrowStackOverflowFromCode
-ENTRY art_quick_throw_stack_overflow
-    SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
-    la   $t9, artThrowStackOverflowFromCode
-    jalr $zero, $t9                 # artThrowStackOverflowFromCode(Thread*)
-    move $a0, rSELF                 # pass Thread::Current
-END art_quick_throw_stack_overflow
-
-    /*
-     * All generated callsites for interface invokes and invocation slow paths will load arguments
-     * as usual - except instead of loading arg0/$a0 with the target Method*, arg0/$a0 will contain
-     * the method_idx.  This wrapper will save arg1-arg3, and call the appropriate C helper.
-     * NOTE: "this" is first visable argument of the target, and so can be found in arg1/$a1.
-     *
-     * The helper will attempt to locate the target and return a 64-bit result in $v0/$v1 consisting
-     * of the target Method* in $v0 and method->code_ in $v1.
-     *
-     * If unsuccessful, the helper will return null/null. There will be a pending exception in the
-     * thread and we branch to another stub to deliver it.
-     *
-     * On success this wrapper will restore arguments and *jump* to the target, leaving the lr
-     * pointing back to the original caller.
-     */
-.macro INVOKE_TRAMPOLINE_BODY cxx_name, save_s4_thru_s8_only=0
-    .extern \cxx_name
-    SETUP_SAVE_REFS_AND_ARGS_FRAME \save_s4_thru_s8_only  # save callee saves in case
-                                                          # allocation triggers GC
-    move  $a2, rSELF                       # pass Thread::Current
-    la    $t9, \cxx_name
-    jalr  $t9                              # (method_idx, this, Thread*, $sp)
-    addiu $a3, $sp, ARG_SLOT_SIZE          # pass $sp (remove arg slots)
-    move  $a0, $v0                         # save target Method*
-    RESTORE_SAVE_REFS_AND_ARGS_FRAME
-    beqz  $v0, 1f
-    move  $t9, $v1                         # save $v0->code_
-    jalr  $zero, $t9
-    nop
-1:
-    DELIVER_PENDING_EXCEPTION
-.endm
-.macro INVOKE_TRAMPOLINE c_name, cxx_name
-ENTRY \c_name
-    INVOKE_TRAMPOLINE_BODY \cxx_name
-END \c_name
-.endm
-
-INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
-
-INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck
-INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck
-INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
-INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
-
-// Each of the following macros expands into four instructions or 16 bytes.
-// They are used to build indexable "tables" of code.
-
-.macro LOAD_WORD_TO_REG reg, next_arg, index_reg, label
-    lw    $\reg, -4($\next_arg)   # next_arg points to argument after the current one (offset is 4)
-    b     \label
-    addiu $\index_reg, 16
-    .balign 16
-.endm
-
-.macro LOAD_LONG_TO_REG reg1, reg2, next_arg, index_reg, next_index, label
-    lw    $\reg1, -8($\next_arg)  # next_arg points to argument after the current one (offset is 8)
-    lw    $\reg2, -4($\next_arg)
-    b     \label
-    li    $\index_reg, \next_index
-    .balign 16
-.endm
-
-.macro LOAD_FLOAT_TO_REG reg, next_arg, index_reg, label
-    lwc1  $\reg, -4($\next_arg)   # next_arg points to argument after the current one (offset is 4)
-    b     \label
-    addiu $\index_reg, 16
-    .balign 16
-.endm
-
-#if defined(__mips_isa_rev) && __mips_isa_rev > 2
-// LDu expands into 3 instructions for 64-bit FPU, so index_reg cannot be updated here.
-.macro LOAD_DOUBLE_TO_REG reg1, reg2, next_arg, index_reg, tmp, label
-    .set reorder                                # force use of the branch delay slot
-    LDu  $\reg1, $\reg2, -8, $\next_arg, $\tmp  # next_arg points to argument after the current one
-                                                # (offset is 8)
-    b     \label
-    .set noreorder
-    .balign 16
-.endm
-#else
-// LDu expands into 2 instructions for 32-bit FPU, so index_reg is updated here.
-.macro LOAD_DOUBLE_TO_REG reg1, reg2, next_arg, index_reg, tmp, label
-    LDu  $\reg1, $\reg2, -8, $\next_arg, $\tmp  # next_arg points to argument after the current one
-                                                # (offset is 8)
-    b     \label
-    addiu $\index_reg, 16
-    .balign 16
-.endm
-#endif
-
-.macro LOAD_END index_reg, next_index, label
-    b     \label
-    li    $\index_reg, \next_index
-    .balign 16
-.endm
-
-#define SPILL_SIZE    32
-
-    /*
-     * Invocation stub for quick code.
-     * On entry:
-     *   a0 = method pointer
-     *   a1 = argument array or null for no argument methods
-     *   a2 = size of argument array in bytes
-     *   a3 = (managed) thread pointer
-     *   [sp + 16] = JValue* result
-     *   [sp + 20] = shorty
-     */
-ENTRY art_quick_invoke_stub
-    sw    $a0, 0($sp)           # save out a0
-    addiu $sp, $sp, -SPILL_SIZE # spill s0, s1, fp, ra and gp
-    .cfi_adjust_cfa_offset SPILL_SIZE
-    sw    $gp, 16($sp)
-    sw    $ra, 12($sp)
-    .cfi_rel_offset 31, 12
-    sw    $fp, 8($sp)
-    .cfi_rel_offset 30, 8
-    sw    $s1, 4($sp)
-    .cfi_rel_offset 17, 4
-    sw    $s0, 0($sp)
-    .cfi_rel_offset 16, 0
-    move  $fp, $sp              # save sp in fp
-    .cfi_def_cfa_register 30
-    move  $s1, $a3              # move managed thread pointer into s1
-    addiu $t0, $a2, 4           # create space for ArtMethod* in frame.
-    subu  $t0, $sp, $t0         # reserve & align *stack* to 16 bytes:
-    srl   $t0, $t0, 4           #   native calling convention only aligns to 8B,
-    sll   $sp, $t0, 4           #   so we have to ensure ART 16B alignment ourselves.
-    addiu $a0, $sp, 4           # pass stack pointer + ArtMethod* as dest for memcpy
-    la    $t9, memcpy
-    jalr  $t9                   # (dest, src, bytes)
-    addiu $sp, $sp, -16         # make space for argument slots for memcpy
-    addiu $sp, $sp, 16          # restore stack after memcpy
-    lw    $gp, 16($fp)          # restore $gp
-    lw    $a0, SPILL_SIZE($fp)  # restore ArtMethod*
-    lw    $a1, 4($sp)           # a1 = this*
-    addiu $t8, $sp, 8           # t8 = pointer to the current argument (skip ArtMethod* and this*)
-    li    $t6, 0                # t6 = gpr_index = 0 (corresponds to A2; A0 and A1 are skipped)
-    li    $t7, 0                # t7 = fp_index = 0
-    lw    $t9, 20 + SPILL_SIZE($fp)  # get shorty (20 is offset from the $sp on entry + SPILL_SIZE
-                                # as the $fp is SPILL_SIZE bytes below the $sp on entry)
-    addiu $t9, 1                # t9 = shorty + 1 (skip 1 for return type)
-
-    // Load the base addresses of tabInt ... tabDouble.
-    // We will use the register indices (gpr_index, fp_index) to branch.
-    // Note that the indices are scaled by 16, so they can be added to the bases directly.
-#if defined(__mips_isa_rev) && __mips_isa_rev >= 6
-    lapc  $t2, tabInt
-    lapc  $t3, tabLong
-    lapc  $t4, tabSingle
-    lapc  $t5, tabDouble
-#else
-    bltzal $zero, tabBase       # nal
-    addiu $t2, $ra, %lo(tabInt - tabBase)
-tabBase:
-    addiu $t3, $ra, %lo(tabLong - tabBase)
-    addiu $t4, $ra, %lo(tabSingle - tabBase)
-    addiu $t5, $ra, %lo(tabDouble - tabBase)
-#endif
-
-loop:
-    lbu   $ra, 0($t9)           # ra = shorty[i]
-    beqz  $ra, loopEnd          # finish getting args when shorty[i] == '\0'
-    addiu $t9, 1
-
-    addiu $ra, -'J'
-    beqz  $ra, isLong           # branch if result type char == 'J'
-    addiu $ra, 'J' - 'D'
-    beqz  $ra, isDouble         # branch if result type char == 'D'
-    addiu $ra, 'D' - 'F'
-    beqz  $ra, isSingle         # branch if result type char == 'F'
-
-    addu  $ra, $t2, $t6
-    jalr  $zero, $ra
-    addiu $t8, 4                # next_arg = curr_arg + 4
-
-isLong:
-    addu  $ra, $t3, $t6
-    jalr  $zero, $ra
-    addiu $t8, 8                # next_arg = curr_arg + 8
-
-isSingle:
-    addu  $ra, $t4, $t7
-    jalr  $zero, $ra
-    addiu $t8, 4                # next_arg = curr_arg + 4
-
-isDouble:
-    addu  $ra, $t5, $t7
-#if defined(__mips_isa_rev) && __mips_isa_rev > 2
-    addiu $t7, 16               # fp_index += 16 didn't fit into LOAD_DOUBLE_TO_REG
-#endif
-    jalr  $zero, $ra
-    addiu $t8, 8                # next_arg = curr_arg + 8
-
-loopEnd:
-    lw    $t9, ART_METHOD_QUICK_CODE_OFFSET_32($a0)  # get pointer to the code
-    jalr  $t9                   # call the method
-    sw    $zero, 0($sp)         # store null for ArtMethod* at bottom of frame
-    move  $sp, $fp              # restore the stack
-    lw    $s0, 0($sp)
-    .cfi_restore 16
-    lw    $s1, 4($sp)
-    .cfi_restore 17
-    lw    $fp, 8($sp)
-    .cfi_restore 30
-    lw    $ra, 12($sp)
-    .cfi_restore 31
-    addiu $sp, $sp, SPILL_SIZE
-    .cfi_adjust_cfa_offset -SPILL_SIZE
-    lw    $t0, 16($sp)          # get result pointer
-    lw    $t1, 20($sp)          # get shorty
-    lb    $t1, 0($t1)           # get result type char
-    li    $t2, 'D'              # put char 'D' into t2
-    beq   $t1, $t2, 5f          # branch if result type char == 'D'
-    li    $t3, 'F'              # put char 'F' into t3
-    beq   $t1, $t3, 5f          # branch if result type char == 'F'
-    sw    $v0, 0($t0)           # store the result
-    jalr  $zero, $ra
-    sw    $v1, 4($t0)           # store the other half of the result
-5:
-    CHECK_ALIGNMENT $t0, $t1, 8
-    sdc1  $f0, 0($t0)           # store floating point result
-    jalr  $zero, $ra
-    nop
-
-    // Note that gpr_index is kept within the range of tabInt and tabLong
-    // and fp_index is kept within the range of tabSingle and tabDouble.
-    .balign 16
-tabInt:
-    LOAD_WORD_TO_REG a2, t8, t6, loop             # a2 = current argument, gpr_index += 16
-    LOAD_WORD_TO_REG a3, t8, t6, loop             # a3 = current argument, gpr_index += 16
-    LOAD_WORD_TO_REG t0, t8, t6, loop             # t0 = current argument, gpr_index += 16
-    LOAD_WORD_TO_REG t1, t8, t6, loop             # t1 = current argument, gpr_index += 16
-    LOAD_END t6, 4*16, loop                       # no more GPR args, gpr_index = 4*16
-tabLong:
-    LOAD_LONG_TO_REG a2, a3, t8, t6, 2*16, loop   # a2_a3 = curr_arg, gpr_index = 2*16
-    LOAD_LONG_TO_REG t0, t1, t8, t6, 4*16, loop   # t0_t1 = curr_arg, gpr_index = 4*16
-    LOAD_LONG_TO_REG t0, t1, t8, t6, 4*16, loop   # t0_t1 = curr_arg, gpr_index = 4*16
-    LOAD_END t6, 4*16, loop                       # no more GPR args, gpr_index = 4*16
-    LOAD_END t6, 4*16, loop                       # no more GPR args, gpr_index = 4*16
-tabSingle:
-    LOAD_FLOAT_TO_REG f8, t8, t7, loop            # f8 = curr_arg, fp_index += 16
-    LOAD_FLOAT_TO_REG f10, t8, t7, loop           # f10 = curr_arg, fp_index += 16
-    LOAD_FLOAT_TO_REG f12, t8, t7, loop           # f12 = curr_arg, fp_index += 16
-    LOAD_FLOAT_TO_REG f14, t8, t7, loop           # f14 = curr_arg, fp_index += 16
-    LOAD_FLOAT_TO_REG f16, t8, t7, loop           # f16 = curr_arg, fp_index += 16
-    LOAD_FLOAT_TO_REG f18, t8, t7, loop           # f18 = curr_arg, fp_index += 16
-    LOAD_END t7, 6*16, loop                       # no more FPR args, fp_index = 6*16
-tabDouble:
-    LOAD_DOUBLE_TO_REG f8, f9, t8, t7, ra, loop   # f8_f9 = curr_arg; if FPU32, fp_index += 16
-    LOAD_DOUBLE_TO_REG f10, f11, t8, t7, ra, loop # f10_f11 = curr_arg; if FPU32, fp_index += 16
-    LOAD_DOUBLE_TO_REG f12, f13, t8, t7, ra, loop # f12_f13 = curr_arg; if FPU32, fp_index += 16
-    LOAD_DOUBLE_TO_REG f14, f15, t8, t7, ra, loop # f14_f15 = curr_arg; if FPU32, fp_index += 16
-    LOAD_DOUBLE_TO_REG f16, f17, t8, t7, ra, loop # f16_f17 = curr_arg; if FPU32, fp_index += 16
-    LOAD_DOUBLE_TO_REG f18, f19, t8, t7, ra, loop # f18_f19 = curr_arg; if FPU32, fp_index += 16
-    LOAD_END t7, 6*16, loop                       # no more FPR args, fp_index = 6*16
-END art_quick_invoke_stub
-
-    /*
-     * Invocation static stub for quick code.
-     * On entry:
-     *   a0 = method pointer
-     *   a1 = argument array or null for no argument methods
-     *   a2 = size of argument array in bytes
-     *   a3 = (managed) thread pointer
-     *   [sp + 16] = JValue* result
-     *   [sp + 20] = shorty
-     */
-ENTRY art_quick_invoke_static_stub
-    sw    $a0, 0($sp)           # save out a0
-    addiu $sp, $sp, -SPILL_SIZE # spill s0, s1, fp, ra and gp
-    .cfi_adjust_cfa_offset SPILL_SIZE
-    sw    $gp, 16($sp)
-    sw    $ra, 12($sp)
-    .cfi_rel_offset 31, 12
-    sw    $fp, 8($sp)
-    .cfi_rel_offset 30, 8
-    sw    $s1, 4($sp)
-    .cfi_rel_offset 17, 4
-    sw    $s0, 0($sp)
-    .cfi_rel_offset 16, 0
-    move  $fp, $sp              # save sp in fp
-    .cfi_def_cfa_register 30
-    move  $s1, $a3              # move managed thread pointer into s1
-    addiu $t0, $a2, 4           # create space for ArtMethod* in frame.
-    subu  $t0, $sp, $t0         # reserve & align *stack* to 16 bytes:
-    srl   $t0, $t0, 4           #   native calling convention only aligns to 8B,
-    sll   $sp, $t0, 4           #   so we have to ensure ART 16B alignment ourselves.
-    addiu $a0, $sp, 4           # pass stack pointer + ArtMethod* as dest for memcpy
-    la    $t9, memcpy
-    jalr  $t9                   # (dest, src, bytes)
-    addiu $sp, $sp, -16         # make space for argument slots for memcpy
-    addiu $sp, $sp, 16          # restore stack after memcpy
-    lw    $gp, 16($fp)          # restore $gp
-    lw    $a0, SPILL_SIZE($fp)  # restore ArtMethod*
-    addiu $t8, $sp, 4           # t8 = pointer to the current argument (skip ArtMethod*)
-    li    $t6, 0                # t6 = gpr_index = 0 (corresponds to A1; A0 is skipped)
-    li    $t7, 0                # t7 = fp_index = 0
-    lw    $t9, 20 + SPILL_SIZE($fp)  # get shorty (20 is offset from the $sp on entry + SPILL_SIZE
-                                # as the $fp is SPILL_SIZE bytes below the $sp on entry)
-    addiu $t9, 1                # t9 = shorty + 1 (skip 1 for return type)
-
-    // Load the base addresses of tabIntS ... tabDoubleS.
-    // We will use the register indices (gpr_index, fp_index) to branch.
-    // Note that the indices are scaled by 16, so they can be added to the bases directly.
-#if defined(__mips_isa_rev) && __mips_isa_rev >= 6
-    lapc  $t2, tabIntS
-    lapc  $t3, tabLongS
-    lapc  $t4, tabSingleS
-    lapc  $t5, tabDoubleS
-#else
-    bltzal $zero, tabBaseS      # nal
-    addiu $t2, $ra, %lo(tabIntS - tabBaseS)
-tabBaseS:
-    addiu $t3, $ra, %lo(tabLongS - tabBaseS)
-    addiu $t4, $ra, %lo(tabSingleS - tabBaseS)
-    addiu $t5, $ra, %lo(tabDoubleS - tabBaseS)
-#endif
-
-loopS:
-    lbu   $ra, 0($t9)           # ra = shorty[i]
-    beqz  $ra, loopEndS         # finish getting args when shorty[i] == '\0'
-    addiu $t9, 1
-
-    addiu $ra, -'J'
-    beqz  $ra, isLongS          # branch if result type char == 'J'
-    addiu $ra, 'J' - 'D'
-    beqz  $ra, isDoubleS        # branch if result type char == 'D'
-    addiu $ra, 'D' - 'F'
-    beqz  $ra, isSingleS        # branch if result type char == 'F'
-
-    addu  $ra, $t2, $t6
-    jalr  $zero, $ra
-    addiu $t8, 4                # next_arg = curr_arg + 4
-
-isLongS:
-    addu  $ra, $t3, $t6
-    jalr  $zero, $ra
-    addiu $t8, 8                # next_arg = curr_arg + 8
-
-isSingleS:
-    addu  $ra, $t4, $t7
-    jalr  $zero, $ra
-    addiu $t8, 4                # next_arg = curr_arg + 4
-
-isDoubleS:
-    addu  $ra, $t5, $t7
-#if defined(__mips_isa_rev) && __mips_isa_rev > 2
-    addiu $t7, 16               # fp_index += 16 didn't fit into LOAD_DOUBLE_TO_REG
-#endif
-    jalr  $zero, $ra
-    addiu $t8, 8                # next_arg = curr_arg + 8
-
-loopEndS:
-    lw    $t9, ART_METHOD_QUICK_CODE_OFFSET_32($a0)  # get pointer to the code
-    jalr  $t9                   # call the method
-    sw    $zero, 0($sp)         # store null for ArtMethod* at bottom of frame
-    move  $sp, $fp              # restore the stack
-    lw    $s0, 0($sp)
-    .cfi_restore 16
-    lw    $s1, 4($sp)
-    .cfi_restore 17
-    lw    $fp, 8($sp)
-    .cfi_restore 30
-    lw    $ra, 12($sp)
-    .cfi_restore 31
-    addiu $sp, $sp, SPILL_SIZE
-    .cfi_adjust_cfa_offset -SPILL_SIZE
-    lw    $t0, 16($sp)          # get result pointer
-    lw    $t1, 20($sp)          # get shorty
-    lb    $t1, 0($t1)           # get result type char
-    li    $t2, 'D'              # put char 'D' into t2
-    beq   $t1, $t2, 6f          # branch if result type char == 'D'
-    li    $t3, 'F'              # put char 'F' into t3
-    beq   $t1, $t3, 6f          # branch if result type char == 'F'
-    sw    $v0, 0($t0)           # store the result
-    jalr  $zero, $ra
-    sw    $v1, 4($t0)           # store the other half of the result
-6:
-    CHECK_ALIGNMENT $t0, $t1, 8
-    sdc1  $f0, 0($t0)           # store floating point result
-    jalr  $zero, $ra
-    nop
-
-    // Note that gpr_index is kept within the range of tabIntS and tabLongS
-    // and fp_index is kept within the range of tabSingleS and tabDoubleS.
-    .balign 16
-tabIntS:
-    LOAD_WORD_TO_REG a1, t8, t6, loopS             # a1 = current argument, gpr_index += 16
-    LOAD_WORD_TO_REG a2, t8, t6, loopS             # a2 = current argument, gpr_index += 16
-    LOAD_WORD_TO_REG a3, t8, t6, loopS             # a3 = current argument, gpr_index += 16
-    LOAD_WORD_TO_REG t0, t8, t6, loopS             # t0 = current argument, gpr_index += 16
-    LOAD_WORD_TO_REG t1, t8, t6, loopS             # t1 = current argument, gpr_index += 16
-    LOAD_END t6, 5*16, loopS                       # no more GPR args, gpr_index = 5*16
-tabLongS:
-    LOAD_LONG_TO_REG a2, a3, t8, t6, 3*16, loopS   # a2_a3 = curr_arg, gpr_index = 3*16
-    LOAD_LONG_TO_REG a2, a3, t8, t6, 3*16, loopS   # a2_a3 = curr_arg, gpr_index = 3*16
-    LOAD_LONG_TO_REG t0, t1, t8, t6, 5*16, loopS   # t0_t1 = curr_arg, gpr_index = 5*16
-    LOAD_LONG_TO_REG t0, t1, t8, t6, 5*16, loopS   # t0_t1 = curr_arg, gpr_index = 5*16
-    LOAD_END t6, 5*16, loopS                       # no more GPR args, gpr_index = 5*16
-    LOAD_END t6, 5*16, loopS                       # no more GPR args, gpr_index = 5*16
-tabSingleS:
-    LOAD_FLOAT_TO_REG f8, t8, t7, loopS            # f8 = curr_arg, fp_index += 16
-    LOAD_FLOAT_TO_REG f10, t8, t7, loopS           # f10 = curr_arg, fp_index += 16
-    LOAD_FLOAT_TO_REG f12, t8, t7, loopS           # f12 = curr_arg, fp_index += 16
-    LOAD_FLOAT_TO_REG f14, t8, t7, loopS           # f14 = curr_arg, fp_index += 16
-    LOAD_FLOAT_TO_REG f16, t8, t7, loopS           # f16 = curr_arg, fp_index += 16
-    LOAD_FLOAT_TO_REG f18, t8, t7, loopS           # f18 = curr_arg, fp_index += 16
-    LOAD_END t7, 6*16, loopS                       # no more FPR args, fp_index = 6*16
-tabDoubleS:
-    LOAD_DOUBLE_TO_REG f8, f9, t8, t7, ra, loopS   # f8_f9 = curr_arg; if FPU32, fp_index += 16
-    LOAD_DOUBLE_TO_REG f10, f11, t8, t7, ra, loopS # f10_f11 = curr_arg; if FPU32, fp_index += 16
-    LOAD_DOUBLE_TO_REG f12, f13, t8, t7, ra, loopS # f12_f13 = curr_arg; if FPU32, fp_index += 16
-    LOAD_DOUBLE_TO_REG f14, f15, t8, t7, ra, loopS # f14_f15 = curr_arg; if FPU32, fp_index += 16
-    LOAD_DOUBLE_TO_REG f16, f17, t8, t7, ra, loopS # f16_f17 = curr_arg; if FPU32, fp_index += 16
-    LOAD_DOUBLE_TO_REG f18, f19, t8, t7, ra, loopS # f18_f19 = curr_arg; if FPU32, fp_index += 16
-    LOAD_END t7, 6*16, loopS                       # no more FPR args, fp_index = 6*16
-END art_quick_invoke_static_stub
-
-#undef SPILL_SIZE
-
-    /*
-     * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on
-     * failure.
-     */
-    .extern artHandleFillArrayDataFromCode
-ENTRY art_quick_handle_fill_data
-    lw     $a2, 0($sp)                # pass referrer's Method*
-    SETUP_SAVE_REFS_ONLY_FRAME        # save callee saves in case exception allocation triggers GC
-    la     $t9, artHandleFillArrayDataFromCode
-    jalr   $t9                        # (payload offset, Array*, method, Thread*)
-    move   $a3, rSELF                 # pass Thread::Current
-    RETURN_IF_ZERO
-END art_quick_handle_fill_data
-
-    /*
-     * Entry from managed code that calls artLockObjectFromCode, may block for GC.
-     */
-    .extern artLockObjectFromCode
-ENTRY art_quick_lock_object
-    beqz    $a0, art_quick_throw_null_pointer_exception
-    li      $t8, LOCK_WORD_THIN_LOCK_COUNT_ONE
-    li      $t3, LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED
-.Lretry_lock:
-    lw      $t0, THREAD_ID_OFFSET(rSELF)  # TODO: Can the thread ID really change during the loop?
-    ll      $t1, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
-    and     $t2, $t1, $t3                 # zero the gc bits
-    bnez    $t2, .Lnot_unlocked           # already thin locked
-    # Unlocked case - $t1: original lock word that's zero except for the read barrier bits.
-    or      $t2, $t1, $t0                 # $t2 holds thread id with count of 0 with preserved read barrier bits
-    sc      $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
-    beqz    $t2, .Lretry_lock             # store failed, retry
-    nop
-    jalr    $zero, $ra
-    sync                                  # full (LoadLoad|LoadStore) memory barrier
-.Lnot_unlocked:
-    # $t1: original lock word, $t0: thread_id with count of 0 and zero read barrier bits
-    srl     $t2, $t1, LOCK_WORD_STATE_SHIFT
-    bnez    $t2, .Lslow_lock              # if either of the top two bits are set, go slow path
-    xor     $t2, $t1, $t0                 # lock_word.ThreadId() ^ self->ThreadId()
-    andi    $t2, $t2, 0xFFFF              # zero top 16 bits
-    bnez    $t2, .Lslow_lock              # lock word and self thread id's match -> recursive lock
-                                          # otherwise contention, go to slow path
-    and     $t2, $t1, $t3                 # zero the gc bits
-    addu    $t2, $t2, $t8                 # increment count in lock word
-    srl     $t2, $t2, LOCK_WORD_STATE_SHIFT  # if the first gc state bit is set, we overflowed.
-    bnez    $t2, .Lslow_lock              # if we overflow the count go slow path
-    addu    $t2, $t1, $t8                 # increment count for real
-    sc      $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
-    beqz    $t2, .Lretry_lock             # store failed, retry
-    nop
-    jalr    $zero, $ra
-    nop
-.Lslow_lock:
-    SETUP_SAVE_REFS_ONLY_FRAME            # save callee saves in case we block
-    la      $t9, artLockObjectFromCode
-    jalr    $t9                           # (Object* obj, Thread*)
-    move    $a1, rSELF                    # pass Thread::Current
-    RETURN_IF_ZERO
-END art_quick_lock_object
-
-ENTRY art_quick_lock_object_no_inline
-    beqz    $a0, art_quick_throw_null_pointer_exception
-    nop
-    SETUP_SAVE_REFS_ONLY_FRAME            # save callee saves in case we block
-    la      $t9, artLockObjectFromCode
-    jalr    $t9                           # (Object* obj, Thread*)
-    move    $a1, rSELF                    # pass Thread::Current
-    RETURN_IF_ZERO
-END art_quick_lock_object_no_inline
-
-    /*
-     * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
-     */
-    .extern artUnlockObjectFromCode
-ENTRY art_quick_unlock_object
-    beqz    $a0, art_quick_throw_null_pointer_exception
-    li      $t8, LOCK_WORD_THIN_LOCK_COUNT_ONE
-    li      $t3, LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED
-.Lretry_unlock:
-#ifndef USE_READ_BARRIER
-    lw      $t1, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
-#else
-    ll      $t1, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)  # Need to use atomic read-modify-write for read barrier
-#endif
-    srl     $t2, $t1, LOCK_WORD_STATE_SHIFT
-    bnez    $t2, .Lslow_unlock         # if either of the top two bits are set, go slow path
-    lw      $t0, THREAD_ID_OFFSET(rSELF)
-    and     $t2, $t1, $t3              # zero the gc bits
-    xor     $t2, $t2, $t0              # lock_word.ThreadId() ^ self->ThreadId()
-    andi    $t2, $t2, 0xFFFF           # zero top 16 bits
-    bnez    $t2, .Lslow_unlock         # do lock word and self thread id's match?
-    and     $t2, $t1, $t3              # zero the gc bits
-    bgeu    $t2, $t8, .Lrecursive_thin_unlock
-    # transition to unlocked
-    nor     $t2, $zero, $t3            # $t2 = LOCK_WORD_GC_STATE_MASK_SHIFTED
-    and     $t2, $t1, $t2              # $t2: zero except for the preserved gc bits
-    sync                               # full (LoadStore|StoreStore) memory barrier
-#ifndef USE_READ_BARRIER
-    jalr    $zero, $ra
-    sw      $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
-#else
-    sc      $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
-    beqz    $t2, .Lretry_unlock        # store failed, retry
-    nop
-    jalr    $zero, $ra
-    nop
-#endif
-.Lrecursive_thin_unlock:
-    # t1: original lock word
-    subu    $t2, $t1, $t8              # decrement count
-#ifndef USE_READ_BARRIER
-    jalr    $zero, $ra
-    sw      $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
-#else
-    sc      $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
-    beqz    $t2, .Lretry_unlock        # store failed, retry
-    nop
-    jalr    $zero, $ra
-    nop
-#endif
-.Lslow_unlock:
-    SETUP_SAVE_REFS_ONLY_FRAME         # save callee saves in case exception allocation triggers GC
-    la      $t9, artUnlockObjectFromCode
-    jalr    $t9                        # (Object* obj, Thread*)
-    move    $a1, rSELF                 # pass Thread::Current
-    RETURN_IF_ZERO
-END art_quick_unlock_object
-
-ENTRY art_quick_unlock_object_no_inline
-    beqz    $a0, art_quick_throw_null_pointer_exception
-    nop
-    SETUP_SAVE_REFS_ONLY_FRAME        # save callee saves in case exception allocation triggers GC
-    la      $t9, artUnlockObjectFromCode
-    jalr    $t9                       # (Object* obj, Thread*)
-    move    $a1, rSELF                # pass Thread::Current
-    RETURN_IF_ZERO
-END art_quick_unlock_object_no_inline
-
-    /*
-     * Entry from managed code that calls artInstanceOfFromCode and delivers exception on failure.
-     */
-    .extern artInstanceOfFromCode
-    .extern artThrowClassCastExceptionForObject
-ENTRY art_quick_check_instance_of
-    // Type check using the bit string passes null as the target class. In that case just throw.
-    beqz   $a1, .Lthrow_class_cast_exception_for_bitstring_check
-    nop
-
-    addiu  $sp, $sp, -32
-    .cfi_adjust_cfa_offset 32
-    sw     $gp, 16($sp)
-    sw     $ra, 12($sp)
-    .cfi_rel_offset 31, 12
-    sw     $t9, 8($sp)
-    sw     $a1, 4($sp)
-    sw     $a0, 0($sp)
-    la     $t9, artInstanceOfFromCode
-    jalr   $t9
-    addiu  $sp, $sp, -16             # reserve argument slots on the stack
-    addiu  $sp, $sp, 16
-    lw     $gp, 16($sp)
-    beqz   $v0, .Lthrow_class_cast_exception
-    lw     $ra, 12($sp)
-    jalr   $zero, $ra
-    addiu  $sp, $sp, 32
-    .cfi_adjust_cfa_offset -32
-
-.Lthrow_class_cast_exception:
-    lw     $t9, 8($sp)
-    lw     $a1, 4($sp)
-    lw     $a0, 0($sp)
-    addiu  $sp, $sp, 32
-    .cfi_adjust_cfa_offset -32
-
-.Lthrow_class_cast_exception_for_bitstring_check:
-    SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
-    la   $t9, artThrowClassCastExceptionForObject
-    jalr $zero, $t9                 # artThrowClassCastException (Object*, Class*, Thread*)
-    move $a2, rSELF                 # pass Thread::Current
-END art_quick_check_instance_of
-
-    /*
-     * Restore rReg's value from offset($sp) if rReg is not the same as rExclude.
-     * nReg is the register number for rReg.
-     */
-.macro POP_REG_NE rReg, nReg, offset, rExclude
-    .ifnc \rReg, \rExclude
-        lw \rReg, \offset($sp)      # restore rReg
-        .cfi_restore \nReg
-    .endif
-.endm
-
-    /*
-     * Macro to insert read barrier, only used in art_quick_aput_obj.
-     * rObj and rDest are registers, offset is a defined literal such as MIRROR_OBJECT_CLASS_OFFSET.
-     * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
-     */
-.macro READ_BARRIER rDest, rObj, offset
-#ifdef USE_READ_BARRIER
-    # saved registers used in art_quick_aput_obj: a0-a2, t0-t1, t9, ra. 8 words for 16B alignment.
-    addiu  $sp, $sp, -32
-    .cfi_adjust_cfa_offset 32
-    sw     $ra, 28($sp)
-    .cfi_rel_offset 31, 28
-    sw     $t9, 24($sp)
-    .cfi_rel_offset 25, 24
-    sw     $t1, 20($sp)
-    .cfi_rel_offset 9, 20
-    sw     $t0, 16($sp)
-    .cfi_rel_offset 8, 16
-    sw     $a2, 8($sp)              # padding slot at offset 12 (padding can be any slot in the 32B)
-    .cfi_rel_offset 6, 8
-    sw     $a1, 4($sp)
-    .cfi_rel_offset 5, 4
-    sw     $a0, 0($sp)
-    .cfi_rel_offset 4, 0
-
-    # move $a0, \rRef               # pass ref in a0 (no-op for now since parameter ref is unused)
-    .ifnc \rObj, $a1
-        move $a1, \rObj             # pass rObj
-    .endif
-    addiu  $a2, $zero, \offset      # pass offset
-    la     $t9, artReadBarrierSlow
-    jalr   $t9                      # artReadBarrierSlow(ref, rObj, offset)
-    addiu  $sp, $sp, -16            # Use branch delay slot to reserve argument slots on the stack
-                                    # before the call to artReadBarrierSlow.
-    addiu  $sp, $sp, 16             # restore stack after call to artReadBarrierSlow
-    # No need to unpoison return value in v0, artReadBarrierSlow() would do the unpoisoning.
-    move \rDest, $v0                # save return value in rDest
-                                    # (rDest cannot be v0 in art_quick_aput_obj)
-
-    lw     $a0, 0($sp)              # restore registers except rDest
-                                    # (rDest can only be t0 or t1 in art_quick_aput_obj)
-    .cfi_restore 4
-    lw     $a1, 4($sp)
-    .cfi_restore 5
-    lw     $a2, 8($sp)
-    .cfi_restore 6
-    POP_REG_NE $t0, 8, 16, \rDest
-    POP_REG_NE $t1, 9, 20, \rDest
-    lw     $t9, 24($sp)
-    .cfi_restore 25
-    lw     $ra, 28($sp)             # restore $ra
-    .cfi_restore 31
-    addiu  $sp, $sp, 32
-    .cfi_adjust_cfa_offset -32
-#else
-    lw     \rDest, \offset(\rObj)
-    UNPOISON_HEAP_REF \rDest
-#endif  // USE_READ_BARRIER
-.endm
-
-#ifdef USE_READ_BARRIER
-    .extern artReadBarrierSlow
-#endif
-ENTRY art_quick_aput_obj
-    beqz $a2, .Ldo_aput_null
-    nop
-    READ_BARRIER $t0, $a0, MIRROR_OBJECT_CLASS_OFFSET
-    READ_BARRIER $t1, $a2, MIRROR_OBJECT_CLASS_OFFSET
-    READ_BARRIER $t0, $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET
-    bne $t1, $t0, .Lcheck_assignability  # value's type == array's component type - trivial assignability
-    nop
-.Ldo_aput:
-    sll $a1, $a1, 2
-    add $t0, $a0, $a1
-    POISON_HEAP_REF $a2
-    sw  $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0)
-    lw  $t0, THREAD_CARD_TABLE_OFFSET(rSELF)
-    srl $t1, $a0, CARD_TABLE_CARD_SHIFT
-    add $t1, $t1, $t0
-    sb  $t0, ($t1)
-    jalr $zero, $ra
-    nop
-.Ldo_aput_null:
-    sll $a1, $a1, 2
-    add $t0, $a0, $a1
-    sw  $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0)
-    jalr $zero, $ra
-    nop
-.Lcheck_assignability:
-    addiu  $sp, $sp, -32
-    .cfi_adjust_cfa_offset 32
-    sw     $ra, 28($sp)
-    .cfi_rel_offset 31, 28
-    sw     $gp, 16($sp)
-    sw     $t9, 12($sp)
-    sw     $a2, 8($sp)
-    sw     $a1, 4($sp)
-    sw     $a0, 0($sp)
-    move   $a1, $t1
-    move   $a0, $t0
-    la     $t9, artIsAssignableFromCode
-    jalr   $t9               # (Class*, Class*)
-    addiu  $sp, $sp, -16     # reserve argument slots on the stack
-    addiu  $sp, $sp, 16
-    lw     $ra, 28($sp)
-    lw     $gp, 16($sp)
-    lw     $t9, 12($sp)
-    lw     $a2, 8($sp)
-    lw     $a1, 4($sp)
-    lw     $a0, 0($sp)
-    addiu  $sp, 32
-    .cfi_adjust_cfa_offset -32
-    bnez   $v0, .Ldo_aput
-    nop
-    SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
-    move $a1, $a2
-    la   $t9, artThrowArrayStoreException
-    jalr $zero, $t9                 # artThrowArrayStoreException(Class*, Class*, Thread*)
-    move $a2, rSELF                 # pass Thread::Current
-END art_quick_aput_obj
-
-// Macros taking opportunity of code similarities for downcalls.
-.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return
-    .extern \entrypoint
-ENTRY \name
-    SETUP_SAVE_REFS_ONLY_FRAME        # save callee saves in case of GC
-    la      $t9, \entrypoint
-    jalr    $t9                       # (field_idx, Thread*)
-    move    $a1, rSELF                # pass Thread::Current
-    \return                           # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
-END \name
-.endm
-
-.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return
-    .extern \entrypoint
-ENTRY \name
-    SETUP_SAVE_REFS_ONLY_FRAME        # save callee saves in case of GC
-    la      $t9, \entrypoint
-    jalr    $t9                       # (field_idx, Object*, Thread*) or
-                                      # (field_idx, new_val, Thread*)
-    move    $a2, rSELF                # pass Thread::Current
-    \return                           # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
-END \name
-.endm
-
-.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return
-    .extern \entrypoint
-ENTRY \name
-    SETUP_SAVE_REFS_ONLY_FRAME        # save callee saves in case of GC
-    la      $t9, \entrypoint
-    jalr    $t9                       # (field_idx, Object*, new_val, Thread*)
-    move    $a3, rSELF                # pass Thread::Current
-    \return                           # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
-END \name
-.endm
-
-.macro FOUR_ARG_REF_DOWNCALL name, entrypoint, return
-    .extern \entrypoint
-ENTRY \name
-    SETUP_SAVE_REFS_ONLY_FRAME        # save callee saves in case of GC
-    la      $t9, \entrypoint
-    jalr    $t9                       # (field_idx, Object*, 64-bit new_val, Thread*) or
-                                      # (field_idx, 64-bit new_val, Thread*)
-                                      # Note that a 64-bit new_val needs to be aligned with
-                                      # an even-numbered register, hence A1 may be skipped
-                                      # for new_val to reside in A2-A3.
-    sw      rSELF, 16($sp)            # pass Thread::Current
-    \return                           # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
-END \name
-.endm
-
-    /*
-     * Called by managed code to resolve a static/instance field and load/store a value.
-     *
-     * Note: Functions `art{Get,Set}<Kind>{Static,Instance}FromCompiledCode` are
-     * defined with a macro in runtime/entrypoints/quick/quick_field_entrypoints.cc.
-     */
-ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCompiledCode, RETURN_IF_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCompiledCode, RETURN_IF_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCompiledCode, RETURN_IF_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCompiledCode, RETURN_IF_ZERO
-FOUR_ARG_REF_DOWNCALL art_quick_set64_static, artSet64StaticFromCompiledCode, RETURN_IF_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCompiledCode, RETURN_IF_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCompiledCode, RETURN_IF_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCompiledCode, RETURN_IF_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCompiledCode, RETURN_IF_ZERO
-FOUR_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCompiledCode, RETURN_IF_ZERO
-
-// Macro to facilitate adding new allocation entrypoints.
-.macro ONE_ARG_DOWNCALL name, entrypoint, return
-    .extern \entrypoint
-ENTRY \name
-    SETUP_SAVE_REFS_ONLY_FRAME        # save callee saves in case of GC
-    la      $t9, \entrypoint
-    jalr    $t9
-    move    $a1, rSELF                # pass Thread::Current
-    \return
-END \name
-.endm
-
-.macro TWO_ARG_DOWNCALL name, entrypoint, return
-    .extern \entrypoint
-ENTRY \name
-    SETUP_SAVE_REFS_ONLY_FRAME        # save callee saves in case of GC
-    la      $t9, \entrypoint
-    jalr    $t9
-    move    $a2, rSELF                # pass Thread::Current
-    \return
-END \name
-.endm
-
-.macro THREE_ARG_DOWNCALL name, entrypoint, return
-    .extern \entrypoint
-ENTRY \name
-    SETUP_SAVE_REFS_ONLY_FRAME        # save callee saves in case of GC
-    la      $t9, \entrypoint
-    jalr    $t9
-    move    $a3, rSELF                # pass Thread::Current
-    \return
-END \name
-.endm
-
-.macro FOUR_ARG_DOWNCALL name, entrypoint, return
-    .extern \entrypoint
-ENTRY \name
-    SETUP_SAVE_REFS_ONLY_FRAME        # save callee saves in case of GC
-    la      $t9, \entrypoint
-    jalr    $t9
-    sw      rSELF, 16($sp)            # pass Thread::Current
-    \return
-END \name
-.endm
-
-// Generate the allocation entrypoints for each allocator.
-GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS
-// Comment out allocators that have mips specific asm.
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_OBJECT(_region_tlab, RegionTLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_region_tlab, RegionTLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_region_tlab, RegionTLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_region_tlab, RegionTLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
-
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_OBJECT(_tlab, TLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_tlab, TLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_tlab, TLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_tlab, TLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
-
-// A hand-written override for:
-//   GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
-//   GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
-.macro ART_QUICK_ALLOC_OBJECT_ROSALLOC c_name, cxx_name, isInitialized
-ENTRY_NO_GP \c_name
-    # Fast path rosalloc allocation
-    # a0: type
-    # s1: Thread::Current
-    # -----------------------------
-    # t1: object size
-    # t2: rosalloc run
-    # t3: thread stack top offset
-    # t4: thread stack bottom offset
-    # v0: free list head
-    #
-    # t5, t6 : temps
-    lw    $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1)        # Check if thread local allocation
-    lw    $t4, THREAD_LOCAL_ALLOC_STACK_END_OFFSET($s1)        # stack has any room left.
-    bgeu  $t3, $t4, .Lslow_path_\c_name
-
-    lw    $t1, MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET($a0)  # Load object size (t1).
-    li    $t5, ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE          # Check if size is for a thread local
-                                                               # allocation. Also does the
-                                                               # initialized and finalizable checks.
-    # When isInitialized == 0, then the class is potentially not yet initialized.
-    # If the class is not yet initialized, the object size will be very large to force the branch
-    # below to be taken.
-    #
-    # See InitializeClassVisitors in class-inl.h for more details.
-    bgtu  $t1, $t5, .Lslow_path_\c_name
-
-    # Compute the rosalloc bracket index from the size. Since the size is already aligned we can
-    # combine the two shifts together.
-    srl   $t1, $t1, (ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT - POINTER_SIZE_SHIFT)
-
-    addu  $t2, $t1, $s1
-    lw    $t2, (THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)($t2)  # Load rosalloc run (t2).
-
-    # Load the free list head (v0).
-    # NOTE: this will be the return val.
-    lw    $v0, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
-    beqz  $v0, .Lslow_path_\c_name
-    nop
-
-    # Load the next pointer of the head and update the list head with the next pointer.
-    lw    $t5, ROSALLOC_SLOT_NEXT_OFFSET($v0)
-    sw    $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
-
-    # Store the class pointer in the header. This also overwrites the first pointer. The offsets are
-    # asserted to match.
-
-#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
-#error "Class pointer needs to overwrite next pointer."
-#endif
-
-    POISON_HEAP_REF $a0
-    sw    $a0, MIRROR_OBJECT_CLASS_OFFSET($v0)
-
-    # Push the new object onto the thread local allocation stack and increment the thread local
-    # allocation stack top.
-    sw    $v0, 0($t3)
-    addiu $t3, $t3, COMPRESSED_REFERENCE_SIZE
-    sw    $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1)
-
-    # Decrement the size of the free list.
-    lw    $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
-    addiu $t5, $t5, -1
-    sw    $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
-
-.if \isInitialized == 0
-    # This barrier is only necessary when the allocation also requires a class initialization check.
-    #
-    # If the class is already observably initialized, then new-instance allocations are protected
-    # from publishing by the compiler which inserts its own StoreStore barrier.
-    sync                                                          # Fence.
-.endif
-    jalr  $zero, $ra
-    nop
-
-  .Lslow_path_\c_name:
-    addiu $t9, $t9, (.Lslow_path_\c_name - \c_name) + 4
-    .cpload $t9
-    SETUP_SAVE_REFS_ONLY_FRAME
-    la    $t9, \cxx_name
-    jalr  $t9
-    move  $a1, $s1                                                # Pass self as argument.
-    RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END \c_name
-.endm
-
-ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc, /* isInitialized */ 0
-ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc, /* isInitialized */ 1
-
-// The common fast path code for art_quick_alloc_object_resolved/initialized_tlab
-// and art_quick_alloc_object_resolved/initialized_region_tlab.
-//
-// a0: type, s1(rSELF): Thread::Current.
-// Need to preserve a0 to the slow path.
-//
-// If isInitialized=1 then the compiler assumes the object's class has already been initialized.
-// If isInitialized=0 the compiler can only assume it's been at least resolved.
-.macro ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH slowPathLabel isInitialized
-    lw    $v0, THREAD_LOCAL_POS_OFFSET(rSELF)          # Load thread_local_pos.
-    lw    $a2, THREAD_LOCAL_END_OFFSET(rSELF)          # Load thread_local_end.
-    subu  $a3, $a2, $v0                                # Compute the remaining buffer size.
-    lw    $t0, MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET($a0)  # Load the object size.
-
-    # When isInitialized == 0, then the class is potentially not yet initialized.
-    # If the class is not yet initialized, the object size will be very large to force the branch
-    # below to be taken.
-    #
-    # See InitializeClassVisitors in class-inl.h for more details.
-    bgtu  $t0, $a3, \slowPathLabel                     # Check if it fits.
-    addu  $t1, $v0, $t0                                # Add object size to tlab pos (in branch
-                                                       # delay slot).
-    # "Point of no slow path". Won't go to the slow path from here on.
-    sw    $t1, THREAD_LOCAL_POS_OFFSET(rSELF)          # Store new thread_local_pos.
-    lw    $a2, THREAD_LOCAL_OBJECTS_OFFSET(rSELF)      # Increment thread_local_objects.
-    addiu $a2, $a2, 1
-    sw    $a2, THREAD_LOCAL_OBJECTS_OFFSET(rSELF)
-    POISON_HEAP_REF $a0
-    sw    $a0, MIRROR_OBJECT_CLASS_OFFSET($v0)         # Store the class pointer.
-
-.if \isInitialized == 0
-    # This barrier is only necessary when the allocation also requires a class initialization check.
-    #
-    # If the class is already observably initialized, then new-instance allocations are protected
-    # from publishing by the compiler which inserts its own StoreStore barrier.
-    sync                                               # Fence.
-.endif
-    jalr  $zero, $ra
-    nop
-.endm
-
-// The common code for art_quick_alloc_object_resolved/initialized_tlab
-// and art_quick_alloc_object_resolved/initialized_region_tlab.
-.macro GENERATE_ALLOC_OBJECT_TLAB name, entrypoint, isInitialized
-ENTRY_NO_GP \name
-    # Fast path tlab allocation.
-    # a0: type, s1(rSELF): Thread::Current.
-    ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lslow_path_\name, \isInitialized
-.Lslow_path_\name:
-    addiu $t9, $t9, (.Lslow_path_\name - \name) + 4
-    .cpload $t9
-    SETUP_SAVE_REFS_ONLY_FRAME                         # Save callee saves in case of GC.
-    la    $t9, \entrypoint
-    jalr  $t9                                          # (mirror::Class*, Thread*)
-    move  $a1, rSELF                                   # Pass Thread::Current.
-    RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END \name
-.endm
-
-GENERATE_ALLOC_OBJECT_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, /* isInitialized */ 0
-GENERATE_ALLOC_OBJECT_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, /* isInitialized */ 1
-GENERATE_ALLOC_OBJECT_TLAB art_quick_alloc_object_resolved_tlab, artAllocObjectFromCodeResolvedTLAB, /* isInitialized */ 0
-GENERATE_ALLOC_OBJECT_TLAB art_quick_alloc_object_initialized_tlab, artAllocObjectFromCodeInitializedTLAB, /* isInitialized */ 1
-
-// The common fast path code for art_quick_alloc_array_resolved/initialized_tlab
-// and art_quick_alloc_array_resolved/initialized_region_tlab.
-//
-// a0: type, a1: component_count, a2: total_size, s1(rSELF): Thread::Current.
-// Need to preserve a0 and a1 to the slow path.
-.macro ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE slowPathLabel
-    li    $a3, OBJECT_ALIGNMENT_MASK_TOGGLED           # Apply alignemnt mask
-    and   $a2, $a2, $a3                                # (addr + 7) & ~7.
-
-    lw    $v0, THREAD_LOCAL_POS_OFFSET(rSELF)          # Load thread_local_pos.
-    lw    $t1, THREAD_LOCAL_END_OFFSET(rSELF)          # Load thread_local_end.
-    subu  $t2, $t1, $v0                                # Compute the remaining buffer size.
-    bgtu  $a2, $t2, \slowPathLabel                     # Check if it fits.
-    addu  $a2, $v0, $a2                                # Add object size to tlab pos (in branch
-                                                       # delay slot).
-
-    # "Point of no slow path". Won't go to the slow path from here on.
-    sw    $a2, THREAD_LOCAL_POS_OFFSET(rSELF)          # Store new thread_local_pos.
-    lw    $a2, THREAD_LOCAL_OBJECTS_OFFSET(rSELF)      # Increment thread_local_objects.
-    addiu $a2, $a2, 1
-    sw    $a2, THREAD_LOCAL_OBJECTS_OFFSET(rSELF)
-    POISON_HEAP_REF $a0
-    sw    $a0, MIRROR_OBJECT_CLASS_OFFSET($v0)         # Store the class pointer.
-    jalr  $zero, $ra
-    sw    $a1, MIRROR_ARRAY_LENGTH_OFFSET($v0)         # Store the array length.
-.endm
-
-.macro GENERATE_ALLOC_ARRAY_TLAB name, entrypoint, size_setup
-ENTRY_NO_GP \name
-    # Fast path array allocation for region tlab allocation.
-    # a0: mirror::Class* type
-    # a1: int32_t component_count
-    # s1(rSELF): Thread::Current
-    \size_setup .Lslow_path_\name
-    ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE .Lslow_path_\name
-.Lslow_path_\name:
-    # a0: mirror::Class* type
-    # a1: int32_t component_count
-    # a2: Thread* self
-    addiu $t9, $t9, (.Lslow_path_\name - \name) + 4
-    .cpload $t9
-    SETUP_SAVE_REFS_ONLY_FRAME                         # Save callee saves in case of GC.
-    la    $t9, \entrypoint
-    jalr  $t9
-    move  $a2, rSELF                                   # Pass Thread::Current.
-    RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END \name
-.endm
-
-.macro COMPUTE_ARRAY_SIZE_UNKNOWN slow_path
-    break                                              # We should never enter here.
-                                                       # Code below is for reference.
-                                                       # Possibly a large object, go slow.
-                                                       # Also does negative array size check.
-    li    $a2, ((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_WIDE_ARRAY_DATA_OFFSET) / 8)
-    bgtu  $a1, $a2, \slow_path
-                                                       # Array classes are never finalizable
-                                                       # or uninitialized, no need to check.
-    lw    $a3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET($a0) # Load component type.
-    UNPOISON_HEAP_REF $a3
-    lw    $a3, MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET($a3)
-    srl   $a3, $a3, PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT    # Component size shift is in high 16 bits.
-    sllv  $a2, $a1, $a3                                # Calculate data size.
-                                                       # Add array data offset and alignment.
-    addiu $a2, $a2, (MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
-#if MIRROR_WIDE_ARRAY_DATA_OFFSET != MIRROR_INT_ARRAY_DATA_OFFSET + 4
-#error Long array data offset must be 4 greater than int array data offset.
-#endif
-
-    addiu $a3, $a3, 1                                  # Add 4 to the length only if the component
-    andi  $a3, $a3, 4                                  # size shift is 3 (for 64 bit alignment).
-    addu  $a2, $a2, $a3
-.endm
-
-.macro COMPUTE_ARRAY_SIZE_8 slow_path
-    # Possibly a large object, go slow.
-    # Also does negative array size check.
-    li    $a2, (MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET)
-    bgtu  $a1, $a2, \slow_path
-    # Add array data offset and alignment (in branch delay slot).
-    addiu $a2, $a1, (MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
-.endm
-
-.macro COMPUTE_ARRAY_SIZE_16 slow_path
-    # Possibly a large object, go slow.
-    # Also does negative array size check.
-    li    $a2, ((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET) / 2)
-    bgtu  $a1, $a2, \slow_path
-    sll   $a2, $a1, 1
-    # Add array data offset and alignment.
-    addiu $a2, $a2, (MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
-.endm
-
-.macro COMPUTE_ARRAY_SIZE_32 slow_path
-    # Possibly a large object, go slow.
-    # Also does negative array size check.
-    li    $a2, ((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_INT_ARRAY_DATA_OFFSET) / 4)
-    bgtu  $a1, $a2, \slow_path
-    sll   $a2, $a1, 2
-    # Add array data offset and alignment.
-    addiu $a2, $a2, (MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
-.endm
-
-.macro COMPUTE_ARRAY_SIZE_64 slow_path
-    # Possibly a large object, go slow.
-    # Also does negative array size check.
-    li    $a2, ((MIN_LARGE_OBJECT_THRESHOLD - MIRROR_LONG_ARRAY_DATA_OFFSET) / 8)
-    bgtu  $a1, $a2, \slow_path
-    sll   $a2, $a1, 3
-    # Add array data offset and alignment.
-    addiu $a2, $a2, (MIRROR_WIDE_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
-.endm
-
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_8
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_16
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_32
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_64
-
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_8
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_16
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_32
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_64
-
-    /*
-     * Macro for resolution and initialization of indexed DEX file
-     * constants such as classes and strings. $a0 is both input and
-     * output.
-     */
-.macro ONE_ARG_SAVE_EVERYTHING_DOWNCALL name, entrypoint, runtime_method_offset = RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET
-    .extern \entrypoint
-ENTRY_NO_GP \name
-    SETUP_SAVE_EVERYTHING_FRAME \runtime_method_offset  # Save everything in case of GC.
-    move    $s2, $gp                  # Preserve $gp across the call for exception delivery.
-    la      $t9, \entrypoint
-    jalr    $t9                       # (uint32_t index, Thread*)
-    move    $a1, rSELF                # Pass Thread::Current (in delay slot).
-    beqz    $v0, 1f                   # Success?
-    move    $a0, $v0                  # Move result to $a0 (in delay slot).
-    RESTORE_SAVE_EVERYTHING_FRAME 0   # Restore everything except $a0.
-    jalr    $zero, $ra                # Return on success.
-    nop
-1:
-    move    $gp, $s2
-    DELIVER_PENDING_EXCEPTION_FRAME_READY
-END \name
-.endm
-
-.macro ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT name, entrypoint
-    ONE_ARG_SAVE_EVERYTHING_DOWNCALL \name, \entrypoint, RUNTIME_SAVE_EVERYTHING_FOR_CLINIT_METHOD_OFFSET
-.endm
-
-    /*
-     * Entry from managed code to resolve a method handle. On entry, A0 holds the method handle
-     * index. On success the MethodHandle is returned, otherwise an exception is raised.
-     */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_handle, artResolveMethodHandleFromCode
-
-    /*
-     * Entry from managed code to resolve a method type. On entry, A0 holds the method type index.
-     * On success the MethodType is returned, otherwise an exception is raised.
-     */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_type, artResolveMethodTypeFromCode
-
-    /*
-     * Entry from managed code to resolve a string, this stub will allocate a String and deliver an
-     * exception on error. On success the String is returned. A0 holds the string index. The fast
-     * path check for hit in strings cache has already been performed.
-     */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode
-
-    /*
-     * Entry from managed code when uninitialized static storage, this stub will run the class
-     * initializer and deliver the exception on error. On success the static storage base is
-     * returned.
-     */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_static_storage, artInitializeStaticStorageFromCode
-
-    /*
-     * Entry from managed code when dex cache misses for a type_idx.
-     */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_resolve_type, artResolveTypeFromCode
-
-    /*
-     * Entry from managed code when type_idx needs to be checked for access and dex cache may also
-     * miss.
-     */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_type_and_verify_access, artResolveTypeAndVerifyAccessFromCode
-
-    /*
-     * Called by managed code when the value in rSUSPEND has been decremented to 0.
-     */
-    .extern artTestSuspendFromCode
-ENTRY_NO_GP art_quick_test_suspend
-    SETUP_SAVE_EVERYTHING_FRAME RUNTIME_SAVE_EVERYTHING_FOR_SUSPEND_CHECK_METHOD_OFFSET
-                                                     # save everything for stack crawl
-    la     $t9, artTestSuspendFromCode
-    jalr   $t9                                       # (Thread*)
-    move   $a0, rSELF
-    RESTORE_SAVE_EVERYTHING_FRAME
-    jalr   $zero, $ra
-    nop
-END art_quick_test_suspend
-
-    /*
-     * Called by managed code that is attempting to call a method on a proxy class. On entry
-     * a0 holds the proxy method; a1, a2 and a3 may contain arguments.
-     */
-    .extern artQuickProxyInvokeHandler
-ENTRY art_quick_proxy_invoke_handler
-    SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_A0
-    move    $a2, rSELF                  # pass Thread::Current
-    la      $t9, artQuickProxyInvokeHandler
-    jalr    $t9                         # (Method* proxy method, receiver, Thread*, SP)
-    addiu   $a3, $sp, ARG_SLOT_SIZE     # pass $sp (remove arg slots)
-    lw      $t7, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
-    RESTORE_SAVE_REFS_AND_ARGS_FRAME
-    bnez    $t7, 1f
-    # don't care if $v0 and/or $v1 are modified, when exception branch taken
-    MTD     $v0, $v1, $f0, $f1          # move float value to return value
-    jalr    $zero, $ra
-    nop
-1:
-    DELIVER_PENDING_EXCEPTION
-END art_quick_proxy_invoke_handler
-
-    /*
-     * Called to resolve an imt conflict.
-     * a0 is the conflict ArtMethod.
-     * t7 is a hidden argument that holds the target interface method's dex method index.
-     *
-     * Note that this stub writes to v0-v1, a0, t2-t9, f0-f7.
-     */
-    .extern artLookupResolvedMethod
-    .extern __atomic_load_8         # For int64_t std::atomic::load(std::memory_order).
-ENTRY art_quick_imt_conflict_trampoline
-    SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY /* save_s4_thru_s8 */ 0
-
-    lw      $t8, FRAME_SIZE_SAVE_REFS_AND_ARGS($sp)  # $t8 = referrer.
-    // If the method is obsolete, just go through the dex cache miss slow path.
-    // The obsolete flag is set with suspended threads, so we do not need an acquire operation here.
-    lw      $t9, ART_METHOD_ACCESS_FLAGS_OFFSET($t8)  # $t9 = access flags.
-    sll     $t9, $t9, 31 - ACC_OBSOLETE_METHOD_SHIFT  # Move obsolete method bit to sign bit.
-    bltz    $t9, .Limt_conflict_trampoline_dex_cache_miss
-    lw      $t8, ART_METHOD_DECLARING_CLASS_OFFSET($t8)  # $t8 = declaring class (no read barrier).
-    lw      $t8, MIRROR_CLASS_DEX_CACHE_OFFSET($t8)  # $t8 = dex cache (without read barrier).
-    UNPOISON_HEAP_REF $t8
-    la      $t9, __atomic_load_8
-    addiu   $sp, $sp, -ARG_SLOT_SIZE                # Reserve argument slots on the stack.
-    .cfi_adjust_cfa_offset ARG_SLOT_SIZE
-    lw      $t8, MIRROR_DEX_CACHE_RESOLVED_METHODS_OFFSET($t8)  # $t8 = dex cache methods array.
-
-    move    $s2, $t7                                # $s2 = method index (callee-saved).
-    lw      $s3, ART_METHOD_JNI_OFFSET_32($a0)      # $s3 = ImtConflictTable (callee-saved).
-
-    sll     $t7, $t7, 32 - METHOD_DEX_CACHE_HASH_BITS  # $t7 = slot index in top bits, zeroes below.
-    srl     $t7, $t7, 32 - METHOD_DEX_CACHE_HASH_BITS - (POINTER_SIZE_SHIFT + 1)
-                                                    # $t7 = slot offset.
-
-    li      $a1, STD_MEMORY_ORDER_RELAXED           # $a1 = std::memory_order_relaxed.
-    jalr    $t9                                     # [$v0, $v1] = __atomic_load_8($a0, $a1).
-    addu    $a0, $t8, $t7                           # $a0 = DexCache method slot address.
-
-    bne     $v1, $s2, .Limt_conflict_trampoline_dex_cache_miss  # Branch if method index miss.
-    addiu   $sp, $sp, ARG_SLOT_SIZE                 # Remove argument slots from the stack.
-    .cfi_adjust_cfa_offset -ARG_SLOT_SIZE
-
-.Limt_table_iterate:
-    lw      $t8, 0($s3)                             # Load next entry in ImtConflictTable.
-    # Branch if found.
-    beq     $t8, $v0, .Limt_table_found
-    nop
-    # If the entry is null, the interface method is not in the ImtConflictTable.
-    beqz    $t8, .Lconflict_trampoline
-    nop
-    # Iterate over the entries of the ImtConflictTable.
-    b       .Limt_table_iterate
-    addiu   $s3, $s3, 2 * __SIZEOF_POINTER__        # Iterate to the next entry.
-
-.Limt_table_found:
-    # We successfully hit an entry in the table. Load the target method and jump to it.
-    .cfi_remember_state
-    lw      $a0, __SIZEOF_POINTER__($s3)
-    lw      $t9, ART_METHOD_QUICK_CODE_OFFSET_32($a0)
-    RESTORE_SAVE_REFS_AND_ARGS_FRAME /* restore_s4_thru_s8 */ 0, /* remove_arg_slots */ 0
-    jalr    $zero, $t9
-    nop
-    .cfi_restore_state
-
-.Lconflict_trampoline:
-    # Call the runtime stub to populate the ImtConflictTable and jump to the resolved method.
-    .cfi_remember_state
-    RESTORE_SAVE_REFS_AND_ARGS_FRAME_GP             # Restore clobbered $gp.
-    RESTORE_SAVE_REFS_AND_ARGS_FRAME_A1             # Restore this.
-    move    $a0, $v0                                # Load interface method.
-    INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline, /* save_s4_thru_s8_only */ 1
-    .cfi_restore_state
-
-.Limt_conflict_trampoline_dex_cache_miss:
-    # We're not creating a proper runtime method frame here,
-    # artLookupResolvedMethod() is not allowed to walk the stack.
-    RESTORE_SAVE_REFS_AND_ARGS_FRAME_GP             # Restore clobbered $gp.
-    lw      $a1, FRAME_SIZE_SAVE_REFS_AND_ARGS($sp)  # $a1 = referrer.
-    la      $t9, artLookupResolvedMethod
-    addiu   $sp, $sp, -ARG_SLOT_SIZE                # Reserve argument slots on the stack.
-    .cfi_adjust_cfa_offset ARG_SLOT_SIZE
-    jalr    $t9                                     # (uint32_t method_index, ArtMethod* referrer).
-    move    $a0, $s2                                # $a0 = method index.
-
-    # If the method wasn't resolved, skip the lookup and go to artInvokeInterfaceTrampoline().
-    beqz    $v0, .Lconflict_trampoline
-    addiu   $sp, $sp, ARG_SLOT_SIZE                 # Remove argument slots from the stack.
-    .cfi_adjust_cfa_offset -ARG_SLOT_SIZE
-
-    b       .Limt_table_iterate
-    nop
-END art_quick_imt_conflict_trampoline
-
-    .extern artQuickResolutionTrampoline
-ENTRY art_quick_resolution_trampoline
-    SETUP_SAVE_REFS_AND_ARGS_FRAME
-    move    $a2, rSELF                    # pass Thread::Current
-    la      $t9, artQuickResolutionTrampoline
-    jalr    $t9                           # (Method* called, receiver, Thread*, SP)
-    addiu   $a3, $sp, ARG_SLOT_SIZE       # pass $sp (remove arg slots)
-    beqz    $v0, 1f
-    lw      $a0, ARG_SLOT_SIZE($sp)       # load resolved method to $a0
-    RESTORE_SAVE_REFS_AND_ARGS_FRAME
-    move    $t9, $v0               # code pointer must be in $t9 to generate the global pointer
-    jalr    $zero, $t9             # tail call to method
-    nop
-1:
-    RESTORE_SAVE_REFS_AND_ARGS_FRAME
-    DELIVER_PENDING_EXCEPTION
-END art_quick_resolution_trampoline
-
-    .extern artQuickGenericJniTrampoline
-    .extern artQuickGenericJniEndTrampoline
-ENTRY art_quick_generic_jni_trampoline
-    SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_A0
-    move    $s8, $sp               # save $sp to $s8
-    move    $s3, $gp               # save $gp to $s3
-
-    # prepare for call to artQuickGenericJniTrampoline(Thread*, SP)
-    move    $a0, rSELF                     # pass Thread::Current
-    addiu   $a1, $sp, ARG_SLOT_SIZE        # save $sp (remove arg slots)
-    la      $t9, artQuickGenericJniTrampoline
-    jalr    $t9                            # (Thread*, SP)
-    addiu   $sp, $sp, -5120                # reserve space on the stack
-
-    # The C call will have registered the complete save-frame on success.
-    # The result of the call is:
-    # v0: ptr to native code, 0 on error.
-    # v1: ptr to the bottom of the used area of the alloca, can restore stack till here.
-    beq     $v0, $zero, 2f         # check entry error
-    move    $t9, $v0               # save the code ptr
-    move    $sp, $v1               # release part of the alloca
-
-    # Load parameters from stack into registers
-    lw      $a0,   0($sp)
-    lw      $a1,   4($sp)
-    lw      $a2,   8($sp)
-    lw      $a3,  12($sp)
-
-    # artQuickGenericJniTrampoline sets bit 0 of the native code address to 1
-    # when the first two arguments are both single precision floats. This lets
-    # us extract them properly from the stack and load into floating point
-    # registers.
-    MTD     $a0, $a1, $f12, $f13
-    andi    $t0, $t9, 1
-    xor     $t9, $t9, $t0
-    bnez    $t0, 1f
-    mtc1    $a1, $f14
-    MTD     $a2, $a3, $f14, $f15
-
-1:
-    jalr    $t9                    # native call
-    nop
-    addiu   $sp, $sp, 16           # remove arg slots
-
-    move    $gp, $s3               # restore $gp from $s3
-
-    # result sign extension is handled in C code
-    # prepare for call to artQuickGenericJniEndTrampoline(Thread*, result, result_f)
-    move    $a0, rSELF             # pass Thread::Current
-    move    $a2, $v0               # pass result
-    move    $a3, $v1
-    addiu   $sp, $sp, -32          # reserve arg slots
-    la      $t9, artQuickGenericJniEndTrampoline
-    jalr    $t9
-    s.d     $f0, 16($sp)           # pass result_f
-
-    lw      $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
-    bne     $t0, $zero, 2f         # check for pending exceptions
-
-    move    $sp, $s8               # tear down the alloca
-
-    # tear down the callee-save frame
-    RESTORE_SAVE_REFS_AND_ARGS_FRAME
-
-    MTD     $v0, $v1, $f0, $f1     # move float value to return value
-    jalr    $zero, $ra
-    nop
-
-2:
-    lw      $t0, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)
-    addiu   $sp, $t0, -1  // Remove the GenericJNI tag.
-    move    $gp, $s3               # restore $gp from $s3
-    # This will create a new save-all frame, required by the runtime.
-    DELIVER_PENDING_EXCEPTION
-END art_quick_generic_jni_trampoline
-
-    .extern artQuickToInterpreterBridge
-ENTRY art_quick_to_interpreter_bridge
-    SETUP_SAVE_REFS_AND_ARGS_FRAME
-    move    $a1, rSELF                          # pass Thread::Current
-    la      $t9, artQuickToInterpreterBridge
-    jalr    $t9                                 # (Method* method, Thread*, SP)
-    addiu   $a2, $sp, ARG_SLOT_SIZE             # pass $sp (remove arg slots)
-    lw      $t7, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
-    RESTORE_SAVE_REFS_AND_ARGS_FRAME
-    bnez    $t7, 1f
-    # don't care if $v0 and/or $v1 are modified, when exception branch taken
-    MTD     $v0, $v1, $f0, $f1                  # move float value to return value
-    jalr    $zero, $ra
-    nop
-1:
-    DELIVER_PENDING_EXCEPTION
-END art_quick_to_interpreter_bridge
-
-    .extern artInvokeObsoleteMethod
-ENTRY art_invoke_obsolete_method_stub
-    SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
-    la      $t9, artInvokeObsoleteMethod
-    jalr    $t9                                 # (Method* method, Thread* self)
-    move    $a1, rSELF                          # pass Thread::Current
-END art_invoke_obsolete_method_stub
-
-    /*
-     * Routine that intercepts method calls and returns.
-     */
-    .extern artInstrumentationMethodEntryFromCode
-    .extern artInstrumentationMethodExitFromCode
-ENTRY art_quick_instrumentation_entry
-    SETUP_SAVE_REFS_AND_ARGS_FRAME
-    sw      $a0, 28($sp)    # save arg0 in free arg slot
-    addiu   $a3, $sp, ARG_SLOT_SIZE     # Pass $sp.
-    la      $t9, artInstrumentationMethodEntryFromCode
-    jalr    $t9             # (Method*, Object*, Thread*, SP)
-    move    $a2, rSELF      # pass Thread::Current
-    beqz    $v0, .Ldeliver_instrumentation_entry_exception
-    move    $t9, $v0        # $t9 holds reference to code
-    lw      $a0, 28($sp)    # restore arg0 from free arg slot
-    RESTORE_SAVE_REFS_AND_ARGS_FRAME
-    la      $ra, art_quick_instrumentation_exit
-    jalr    $zero, $t9      # call method, returning to art_quick_instrumentation_exit
-    nop
-.Ldeliver_instrumentation_entry_exception:
-    RESTORE_SAVE_REFS_AND_ARGS_FRAME
-    DELIVER_PENDING_EXCEPTION
-END art_quick_instrumentation_entry
-
-ENTRY_NO_GP art_quick_instrumentation_exit
-    move    $ra, $zero      # RA points here, so clobber with 0 for later checks.
-    SETUP_SAVE_EVERYTHING_FRAME  # Allocates ARG_SLOT_SIZE bytes at the bottom of the stack.
-    move    $s2, $gp             # Preserve $gp across the call for exception delivery.
-
-    addiu   $a3, $sp, ARG_SLOT_SIZE+16  # Pass fpr_res pointer ($f0 in SAVE_EVERYTHING_FRAME).
-    addiu   $a2, $sp, ARG_SLOT_SIZE+148 # Pass gpr_res pointer ($v0 in SAVE_EVERYTHING_FRAME).
-    addiu   $a1, $sp, ARG_SLOT_SIZE     # Pass $sp.
-    la      $t9, artInstrumentationMethodExitFromCode
-    jalr    $t9                         # (Thread*, SP, gpr_res*, fpr_res*)
-    move    $a0, rSELF                  # Pass Thread::Current.
-
-    beqz    $v0, .Ldo_deliver_instrumentation_exception
-    move    $gp, $s2        # Deliver exception if we got nullptr as function.
-    bnez    $v1, .Ldeoptimize
-
-    # Normal return.
-    sw      $v0, (ARG_SLOT_SIZE+FRAME_SIZE_SAVE_EVERYTHING-4)($sp)  # Set return pc.
-    RESTORE_SAVE_EVERYTHING_FRAME
-    jalr    $zero, $ra
-    nop
-.Ldo_deliver_instrumentation_exception:
-    DELIVER_PENDING_EXCEPTION_FRAME_READY
-.Ldeoptimize:
-    b       art_quick_deoptimize
-    sw      $v1, (ARG_SLOT_SIZE+FRAME_SIZE_SAVE_EVERYTHING-4)($sp)
-                            # Fake a call from instrumentation return pc.
-END art_quick_instrumentation_exit
-
-    /*
-     * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
-     * will long jump to the upcall with a special exception of -1.
-     */
-    .extern artDeoptimize
-ENTRY_NO_GP_CUSTOM_CFA art_quick_deoptimize, ARG_SLOT_SIZE+FRAME_SIZE_SAVE_EVERYTHING
-    # SETUP_SAVE_EVERYTHING_FRAME has been done by art_quick_instrumentation_exit.
-    .cfi_rel_offset 31, ARG_SLOT_SIZE+252
-    .cfi_rel_offset 30, ARG_SLOT_SIZE+248
-    .cfi_rel_offset 28, ARG_SLOT_SIZE+244
-    .cfi_rel_offset 25, ARG_SLOT_SIZE+240
-    .cfi_rel_offset 24, ARG_SLOT_SIZE+236
-    .cfi_rel_offset 23, ARG_SLOT_SIZE+232
-    .cfi_rel_offset 22, ARG_SLOT_SIZE+228
-    .cfi_rel_offset 21, ARG_SLOT_SIZE+224
-    .cfi_rel_offset 20, ARG_SLOT_SIZE+220
-    .cfi_rel_offset 19, ARG_SLOT_SIZE+216
-    .cfi_rel_offset 18, ARG_SLOT_SIZE+212
-    .cfi_rel_offset 17, ARG_SLOT_SIZE+208
-    .cfi_rel_offset 16, ARG_SLOT_SIZE+204
-    .cfi_rel_offset 15, ARG_SLOT_SIZE+200
-    .cfi_rel_offset 14, ARG_SLOT_SIZE+196
-    .cfi_rel_offset 13, ARG_SLOT_SIZE+192
-    .cfi_rel_offset 12, ARG_SLOT_SIZE+188
-    .cfi_rel_offset 11, ARG_SLOT_SIZE+184
-    .cfi_rel_offset 10, ARG_SLOT_SIZE+180
-    .cfi_rel_offset 9, ARG_SLOT_SIZE+176
-    .cfi_rel_offset 8, ARG_SLOT_SIZE+172
-    .cfi_rel_offset 7, ARG_SLOT_SIZE+168
-    .cfi_rel_offset 6, ARG_SLOT_SIZE+164
-    .cfi_rel_offset 5, ARG_SLOT_SIZE+160
-    .cfi_rel_offset 4, ARG_SLOT_SIZE+156
-    .cfi_rel_offset 3, ARG_SLOT_SIZE+152
-    .cfi_rel_offset 2, ARG_SLOT_SIZE+148
-    .cfi_rel_offset 1, ARG_SLOT_SIZE+144
-
-    la      $t9, artDeoptimize
-    jalr    $t9             # (Thread*)
-    move    $a0, rSELF      # pass Thread::current
-    break
-END art_quick_deoptimize
-
-    /*
-     * Compiled code has requested that we deoptimize into the interpreter. The deoptimization
-     * will long jump to the upcall with a special exception of -1.
-     */
-    .extern artDeoptimizeFromCompiledCode
-ENTRY_NO_GP art_quick_deoptimize_from_compiled_code
-    SETUP_SAVE_EVERYTHING_FRAME
-    la       $t9, artDeoptimizeFromCompiledCode
-    jalr     $t9                            # (DeoptimizationKind, Thread*)
-    move     $a1, rSELF                     # pass Thread::current
-END art_quick_deoptimize_from_compiled_code
-
-    /*
-     * Long integer shift.  This is different from the generic 32/64-bit
-     * binary operations because vAA/vBB are 64-bit but vCC (the shift
-     * distance) is 32-bit.  Also, Dalvik requires us to ignore all but the low
-     * 6 bits.
-     * On entry:
-     *   $a0: low word
-     *   $a1: high word
-     *   $a2: shift count
-     */
-ENTRY_NO_GP art_quick_shl_long
-    /* shl-long vAA, vBB, vCC */
-    sll     $v0, $a0, $a2                    #  rlo<- alo << (shift&31)
-    not     $v1, $a2                         #  rhi<- 31-shift  (shift is 5b)
-    srl     $a0, 1
-    srl     $a0, $v1                         #  alo<- alo >> (32-(shift&31))
-    sll     $v1, $a1, $a2                    #  rhi<- ahi << (shift&31)
-    andi    $a2, 0x20                        #  shift< shift & 0x20
-    beqz    $a2, 1f
-    or      $v1, $a0                         #  rhi<- rhi | alo
-
-    move    $v1, $v0                         #  rhi<- rlo (if shift&0x20)
-    move    $v0, $zero                       #  rlo<- 0 (if shift&0x20)
-
-1:  jalr    $zero, $ra
-    nop
-END art_quick_shl_long
-
-    /*
-     * Long integer shift.  This is different from the generic 32/64-bit
-     * binary operations because vAA/vBB are 64-bit but vCC (the shift
-     * distance) is 32-bit.  Also, Dalvik requires us to ignore all but the low
-     * 6 bits.
-     * On entry:
-     *   $a0: low word
-     *   $a1: high word
-     *   $a2: shift count
-     */
-ENTRY_NO_GP art_quick_shr_long
-    sra     $v1, $a1, $a2                    #  rhi<- ahi >> (shift&31)
-    srl     $v0, $a0, $a2                    #  rlo<- alo >> (shift&31)
-    sra     $a3, $a1, 31                     #  $a3<- sign(ah)
-    not     $a0, $a2                         #  alo<- 31-shift (shift is 5b)
-    sll     $a1, 1
-    sll     $a1, $a0                         #  ahi<- ahi << (32-(shift&31))
-    andi    $a2, 0x20                        #  shift & 0x20
-    beqz    $a2, 1f
-    or      $v0, $a1                         #  rlo<- rlo | ahi
-
-    move    $v0, $v1                         #  rlo<- rhi (if shift&0x20)
-    move    $v1, $a3                         #  rhi<- sign(ahi) (if shift&0x20)
-
-1:  jalr    $zero, $ra
-    nop
-END art_quick_shr_long
-
-    /*
-     * Long integer shift.  This is different from the generic 32/64-bit
-     * binary operations because vAA/vBB are 64-bit but vCC (the shift
-     * distance) is 32-bit.  Also, Dalvik requires us to ignore all but the low
-     * 6 bits.
-     * On entry:
-     *   $a0: low word
-     *   $a1: high word
-     *   $a2: shift count
-     */
-    /* ushr-long vAA, vBB, vCC */
-ENTRY_NO_GP art_quick_ushr_long
-    srl     $v1, $a1, $a2                    #  rhi<- ahi >> (shift&31)
-    srl     $v0, $a0, $a2                    #  rlo<- alo >> (shift&31)
-    not     $a0, $a2                         #  alo<- 31-shift (shift is 5b)
-    sll     $a1, 1
-    sll     $a1, $a0                         #  ahi<- ahi << (32-(shift&31))
-    andi    $a2, 0x20                        #  shift & 0x20
-    beqz    $a2, 1f
-    or      $v0, $a1                         #  rlo<- rlo | ahi
-
-    move    $v0, $v1                         #  rlo<- rhi (if shift&0x20)
-    move    $v1, $zero                       #  rhi<- 0 (if shift&0x20)
-
-1:  jalr    $zero, $ra
-    nop
-END art_quick_ushr_long
-
-/* java.lang.String.indexOf(int ch, int fromIndex=0) */
-ENTRY_NO_GP art_quick_indexof
-/* $a0 holds address of "this" */
-/* $a1 holds "ch" */
-/* $a2 holds "fromIndex" */
-#if (STRING_COMPRESSION_FEATURE)
-    lw    $a3, MIRROR_STRING_COUNT_OFFSET($a0)    # 'count' field of this
-#else
-    lw    $t0, MIRROR_STRING_COUNT_OFFSET($a0)    # this.length()
-#endif
-    slt   $t1, $a2, $zero # if fromIndex < 0
-#if defined(_MIPS_ARCH_MIPS32R6)
-    seleqz $a2, $a2, $t1  #     fromIndex = 0;
-#else
-    movn   $a2, $zero, $t1 #    fromIndex = 0;
-#endif
-
-#if (STRING_COMPRESSION_FEATURE)
-    srl   $t0, $a3, 1     # $a3 holds count (with flag) and $t0 holds actual length
-#endif
-    subu  $t0, $t0, $a2   # this.length() - fromIndex
-    blez  $t0, 6f         # if this.length()-fromIndex <= 0
-    li    $v0, -1         #     return -1;
-
-#if (STRING_COMPRESSION_FEATURE)
-    sll   $a3, $a3, 31    # Extract compression flag.
-    beqz  $a3, .Lstring_indexof_compressed
-    move  $t2, $a0        # Save a copy in $t2 to later compute result (in branch delay slot).
-#endif
-    sll   $v0, $a2, 1     # $a0 += $a2 * 2
-    addu  $a0, $a0, $v0   #  "  ditto  "
-    move  $v0, $a2        # Set i to fromIndex.
-
-1:
-    lhu   $t3, MIRROR_STRING_VALUE_OFFSET($a0)    # if this.charAt(i) == ch
-    beq   $t3, $a1, 6f                            #     return i;
-    addu  $a0, $a0, 2     # i++
-    subu  $t0, $t0, 1     # this.length() - i
-    bnez  $t0, 1b         # while this.length() - i > 0
-    addu  $v0, $v0, 1     # i++
-
-    li    $v0, -1         # if this.length() - i <= 0
-                          #     return -1;
-
-6:
-    j     $ra
-    nop
-
-#if (STRING_COMPRESSION_FEATURE)
-.Lstring_indexof_compressed:
-    addu  $a0, $a0, $a2   # $a0 += $a2
-
-.Lstring_indexof_compressed_loop:
-    lbu   $t3, MIRROR_STRING_VALUE_OFFSET($a0)
-    beq   $t3, $a1, .Lstring_indexof_compressed_matched
-    subu  $t0, $t0, 1
-    bgtz  $t0, .Lstring_indexof_compressed_loop
-    addu  $a0, $a0, 1
-
-.Lstring_indexof_nomatch:
-    jalr  $zero, $ra
-    li    $v0, -1         # return -1;
-
-.Lstring_indexof_compressed_matched:
-    jalr  $zero, $ra
-    subu  $v0, $a0, $t2   # return (current - start);
-#endif
-END art_quick_indexof
-
-/* java.lang.String.compareTo(String anotherString) */
-ENTRY_NO_GP art_quick_string_compareto
-/* $a0 holds address of "this" */
-/* $a1 holds address of "anotherString" */
-    beq    $a0, $a1, .Lstring_compareto_length_diff   # this and anotherString are the same object
-    move   $a3, $a2                                   # trick to return 0 (it returns a2 - a3)
-
-#if (STRING_COMPRESSION_FEATURE)
-    lw     $t0, MIRROR_STRING_COUNT_OFFSET($a0)   # 'count' field of this
-    lw     $t1, MIRROR_STRING_COUNT_OFFSET($a1)   # 'count' field of anotherString
-    sra    $a2, $t0, 1                            # this.length()
-    sra    $a3, $t1, 1                            # anotherString.length()
-#else
-    lw     $a2, MIRROR_STRING_COUNT_OFFSET($a0)   # this.length()
-    lw     $a3, MIRROR_STRING_COUNT_OFFSET($a1)   # anotherString.length()
-#endif
-
-    MINu   $t2, $a2, $a3
-    # $t2 now holds min(this.length(),anotherString.length())
-
-    # while min(this.length(),anotherString.length())-i != 0
-    beqz   $t2, .Lstring_compareto_length_diff # if $t2==0
-    nop                                        #     return (this.length() - anotherString.length())
-
-#if (STRING_COMPRESSION_FEATURE)
-    # Differ cases:
-    sll    $t3, $t0, 31
-    beqz   $t3, .Lstring_compareto_this_is_compressed
-    sll    $t3, $t1, 31                           # In branch delay slot.
-    beqz   $t3, .Lstring_compareto_that_is_compressed
-    nop
-    b      .Lstring_compareto_both_not_compressed
-    nop
-
-.Lstring_compareto_this_is_compressed:
-    beqz   $t3, .Lstring_compareto_both_compressed
-    nop
-    /* If (this->IsCompressed() && that->IsCompressed() == false) */
-.Lstring_compareto_loop_comparison_this_compressed:
-    lbu    $t0, MIRROR_STRING_VALUE_OFFSET($a0)
-    lhu    $t1, MIRROR_STRING_VALUE_OFFSET($a1)
-    bne    $t0, $t1, .Lstring_compareto_char_diff
-    addiu  $a0, $a0, 1    # point at this.charAt(i++) - compressed
-    subu   $t2, $t2, 1    # new value of min(this.length(),anotherString.length())-i
-    bnez   $t2, .Lstring_compareto_loop_comparison_this_compressed
-    addiu  $a1, $a1, 2    # point at anotherString.charAt(i++) - uncompressed
-    jalr   $zero, $ra
-    subu   $v0, $a2, $a3  # return (this.length() - anotherString.length())
-
-.Lstring_compareto_that_is_compressed:
-    lhu    $t0, MIRROR_STRING_VALUE_OFFSET($a0)
-    lbu    $t1, MIRROR_STRING_VALUE_OFFSET($a1)
-    bne    $t0, $t1, .Lstring_compareto_char_diff
-    addiu  $a0, $a0, 2    # point at this.charAt(i++) - uncompressed
-    subu   $t2, $t2, 1    # new value of min(this.length(),anotherString.length())-i
-    bnez   $t2, .Lstring_compareto_that_is_compressed
-    addiu  $a1, $a1, 1    # point at anotherString.charAt(i++) - compressed
-    jalr   $zero, $ra
-    subu   $v0, $a2, $a3  # return (this.length() - anotherString.length())
-
-.Lstring_compareto_both_compressed:
-    lbu    $t0, MIRROR_STRING_VALUE_OFFSET($a0)
-    lbu    $t1, MIRROR_STRING_VALUE_OFFSET($a1)
-    bne    $t0, $t1, .Lstring_compareto_char_diff
-    addiu  $a0, $a0, 1    # point at this.charAt(i++) - compressed
-    subu   $t2, $t2, 1    # new value of min(this.length(),anotherString.length())-i
-    bnez   $t2, .Lstring_compareto_both_compressed
-    addiu  $a1, $a1, 1    # point at anotherString.charAt(i++) - compressed
-    jalr   $zero, $ra
-    subu   $v0, $a2, $a3  # return (this.length() - anotherString.length())
-#endif
-
-.Lstring_compareto_both_not_compressed:
-    lhu    $t0, MIRROR_STRING_VALUE_OFFSET($a0)   # while this.charAt(i) == anotherString.charAt(i)
-    lhu    $t1, MIRROR_STRING_VALUE_OFFSET($a1)
-    bne    $t0, $t1, .Lstring_compareto_char_diff # if this.charAt(i) != anotherString.charAt(i)
-                          #     return (this.charAt(i) - anotherString.charAt(i))
-    addiu  $a0, $a0, 2    # point at this.charAt(i++)
-    subu   $t2, $t2, 1    # new value of min(this.length(),anotherString.length())-i
-    bnez   $t2, .Lstring_compareto_both_not_compressed
-    addiu  $a1, $a1, 2    # point at anotherString.charAt(i++)
-
-.Lstring_compareto_length_diff:
-    jalr   $zero, $ra
-    subu   $v0, $a2, $a3  # return (this.length() - anotherString.length())
-
-.Lstring_compareto_char_diff:
-    jalr   $zero, $ra
-    subu   $v0, $t0, $t1  # return (this.charAt(i) - anotherString.charAt(i))
-END art_quick_string_compareto
-
-    /*
-     * Create a function `name` calling the ReadBarrier::Mark routine,
-     * getting its argument and returning its result through register
-     * `reg`, saving and restoring all caller-save registers.
-     */
-.macro READ_BARRIER_MARK_REG name, reg
-ENTRY \name
-    // Null check so that we can load the lock word.
-    bnez    \reg, .Lnot_null_\name
-    nop
-.Lret_rb_\name:
-    jalr    $zero, $ra
-    nop
-.Lnot_null_\name:
-    // Check lock word for mark bit, if marked return.
-    lw      $t9, MIRROR_OBJECT_LOCK_WORD_OFFSET(\reg)
-    .set push
-    .set noat
-    sll     $at, $t9, 31 - LOCK_WORD_MARK_BIT_SHIFT     # Move mark bit to sign bit.
-    bltz    $at, .Lret_rb_\name
-#if (LOCK_WORD_STATE_SHIFT != 30) || (LOCK_WORD_STATE_FORWARDING_ADDRESS != 3)
-    // The below code depends on the lock word state being in the highest bits
-    // and the "forwarding address" state having all bits set.
-#error "Unexpected lock word state shift or forwarding address state value."
-#endif
-    // Test that both the forwarding state bits are 1.
-    sll     $at, $t9, 1
-    and     $at, $at, $t9                               # Sign bit = 1 IFF both bits are 1.
-    bltz    $at, .Lret_forwarding_address\name
-    nop
-    .set pop
-
-    addiu   $sp, $sp, -160      # Includes 16 bytes of space for argument registers a0-a3.
-    .cfi_adjust_cfa_offset 160
-
-    sw      $ra, 156($sp)
-    .cfi_rel_offset 31, 156
-    sw      $t8, 152($sp)
-    .cfi_rel_offset 24, 152
-    sw      $t7, 148($sp)
-    .cfi_rel_offset 15, 148
-    sw      $t6, 144($sp)
-    .cfi_rel_offset 14, 144
-    sw      $t5, 140($sp)
-    .cfi_rel_offset 13, 140
-    sw      $t4, 136($sp)
-    .cfi_rel_offset 12, 136
-    sw      $t3, 132($sp)
-    .cfi_rel_offset 11, 132
-    sw      $t2, 128($sp)
-    .cfi_rel_offset 10, 128
-    sw      $t1, 124($sp)
-    .cfi_rel_offset 9, 124
-    sw      $t0, 120($sp)
-    .cfi_rel_offset 8, 120
-    sw      $a3, 116($sp)
-    .cfi_rel_offset 7, 116
-    sw      $a2, 112($sp)
-    .cfi_rel_offset 6, 112
-    sw      $a1, 108($sp)
-    .cfi_rel_offset 5, 108
-    sw      $a0, 104($sp)
-    .cfi_rel_offset 4, 104
-    sw      $v1, 100($sp)
-    .cfi_rel_offset 3, 100
-    sw      $v0, 96($sp)
-    .cfi_rel_offset 2, 96
-
-    la      $t9, artReadBarrierMark
-
-    sdc1    $f18, 88($sp)
-    sdc1    $f16, 80($sp)
-    sdc1    $f14, 72($sp)
-    sdc1    $f12, 64($sp)
-    sdc1    $f10, 56($sp)
-    sdc1    $f8,  48($sp)
-    sdc1    $f6,  40($sp)
-    sdc1    $f4,  32($sp)
-    sdc1    $f2,  24($sp)
-
-    .ifnc \reg, $a0
-      move  $a0, \reg           # pass obj from `reg` in a0
-    .endif
-    jalr    $t9                 # v0 <- artReadBarrierMark(obj)
-    sdc1    $f0,  16($sp)       # in delay slot
-
-    lw      $ra, 156($sp)
-    .cfi_restore 31
-    lw      $t8, 152($sp)
-    .cfi_restore 24
-    lw      $t7, 148($sp)
-    .cfi_restore 15
-    lw      $t6, 144($sp)
-    .cfi_restore 14
-    lw      $t5, 140($sp)
-    .cfi_restore 13
-    lw      $t4, 136($sp)
-    .cfi_restore 12
-    lw      $t3, 132($sp)
-    .cfi_restore 11
-    lw      $t2, 128($sp)
-    .cfi_restore 10
-    lw      $t1, 124($sp)
-    .cfi_restore 9
-    lw      $t0, 120($sp)
-    .cfi_restore 8
-    lw      $a3, 116($sp)
-    .cfi_restore 7
-    lw      $a2, 112($sp)
-    .cfi_restore 6
-    lw      $a1, 108($sp)
-    .cfi_restore 5
-    lw      $a0, 104($sp)
-    .cfi_restore 4
-    lw      $v1, 100($sp)
-    .cfi_restore 3
-
-    .ifnc \reg, $v0
-      move  \reg, $v0           # `reg` <- v0
-      lw    $v0, 96($sp)
-      .cfi_restore 2
-    .endif
-
-    ldc1    $f18, 88($sp)
-    ldc1    $f16, 80($sp)
-    ldc1    $f14, 72($sp)
-    ldc1    $f12, 64($sp)
-    ldc1    $f10, 56($sp)
-    ldc1    $f8,  48($sp)
-    ldc1    $f6,  40($sp)
-    ldc1    $f4,  32($sp)
-    ldc1    $f2,  24($sp)
-    ldc1    $f0,  16($sp)
-
-    jalr    $zero, $ra
-    addiu   $sp, $sp, 160
-    .cfi_adjust_cfa_offset -160
-
-.Lret_forwarding_address\name:
-    jalr    $zero, $ra
-    // Shift left by the forwarding address shift. This clears out the state bits since they are
-    // in the top 2 bits of the lock word.
-    sll     \reg, $t9, LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT
-END \name
-.endm
-
-// Note that art_quick_read_barrier_mark_regXX corresponds to register XX+1.
-// ZERO (register 0) is reserved.
-// AT (register 1) is reserved as a temporary/scratch register.
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg01, $v0
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg02, $v1
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg03, $a0
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg04, $a1
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg05, $a2
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg06, $a3
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg07, $t0
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg08, $t1
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg09, $t2
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg10, $t3
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg11, $t4
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg12, $t5
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg13, $t6
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg14, $t7
-// S0 and S1 (registers 16 and 17) are reserved as suspended and thread registers.
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg17, $s2
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg18, $s3
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg19, $s4
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg20, $s5
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg21, $s6
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg22, $s7
-// T8 and T9 (registers 24 and 25) are reserved as temporary/scratch registers.
-// K0, K1, GP, SP (registers 26 - 29) are reserved.
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg29, $s8
-// RA (register 31) is reserved.
-
-// Caller code:
-// Short constant offset/index:
-// R2:                           | R6:
-//  lw      $t9, pReadBarrierMarkReg00
-//  beqz    $t9, skip_call       |  beqzc   $t9, skip_call
-//  addiu   $t9, $t9, thunk_disp |  nop
-//  jalr    $t9                  |  jialc   $t9, thunk_disp
-//  nop                          |
-// skip_call:                    | skip_call:
-//  lw      `out`, ofs(`obj`)    |  lw      `out`, ofs(`obj`)
-// [subu    `out`, $zero, `out`] | [subu    `out`, $zero, `out`]  # Unpoison reference.
-.macro BRB_FIELD_SHORT_OFFSET_ENTRY obj
-1:
-    # Explicit null check. May be redundant (for array elements or when the field
-    # offset is larger than the page size, 4KB).
-    # $ra will be adjusted to point to lw's stack map when throwing NPE.
-    beqz    \obj, .Lintrospection_throw_npe
-#if defined(_MIPS_ARCH_MIPS32R6)
-    lapc    $gp, .Lintrospection_exits                  # $gp = address of .Lintrospection_exits.
-#else
-    addiu   $gp, $t9, (.Lintrospection_exits - 1b)      # $gp = address of .Lintrospection_exits.
-#endif
-    .set push
-    .set noat
-    lw      $at, MIRROR_OBJECT_LOCK_WORD_OFFSET(\obj)
-    sll     $at, $at, 31 - LOCK_WORD_READ_BARRIER_STATE_SHIFT   # Move barrier state bit
-                                                                # to sign bit.
-    bltz    $at, .Lintrospection_field_array            # If gray, load reference, mark.
-    move    $t8, \obj                                   # Move `obj` to $t8 for common code.
-    .set pop
-    jalr    $zero, $ra                                  # Otherwise, load-load barrier and return.
-    sync
-.endm
-
-// Caller code (R2):
-// Long constant offset/index:   | Variable index:
-//  lw      $t9, pReadBarrierMarkReg00
-//  lui     $t8, ofs_hi          |  sll     $t8, `index`, 2
-//  beqz    $t9, skip_call       |  beqz    $t9, skip_call
-//  addiu   $t9, $t9, thunk_disp |  addiu   $t9, $t9, thunk_disp
-//  jalr    $t9                  |  jalr    $t9
-// skip_call:                    | skip_call:
-//  addu    $t8, $t8, `obj`      |  addu    $t8, $t8, `obj`
-//  lw      `out`, ofs_lo($t8)   |  lw      `out`, ofs($t8)
-// [subu    `out`, $zero, `out`] | [subu    `out`, $zero, `out`]  # Unpoison reference.
-//
-// Caller code (R6):
-// Long constant offset/index:   | Variable index:
-//  lw      $t9, pReadBarrierMarkReg00
-//  beqz    $t9, skip_call       |  beqz    $t9, skip_call
-//  aui     $t8, `obj`, ofs_hi   |  lsa     $t8, `index`, `obj`, 2
-//  jialc   $t9, thunk_disp      |  jialc   $t9, thunk_disp
-// skip_call:                    | skip_call:
-//  lw      `out`, ofs_lo($t8)   |  lw      `out`, ofs($t8)
-// [subu    `out`, $zero, `out`] | [subu    `out`, $zero, `out`]  # Unpoison reference.
-.macro BRB_FIELD_LONG_OFFSET_ENTRY obj
-1:
-    # No explicit null check for variable indices or large constant indices/offsets
-    # as it must have been done earlier.
-#if defined(_MIPS_ARCH_MIPS32R6)
-    lapc    $gp, .Lintrospection_exits                  # $gp = address of .Lintrospection_exits.
-#else
-    addiu   $gp, $t9, (.Lintrospection_exits - 1b)      # $gp = address of .Lintrospection_exits.
-#endif
-    .set push
-    .set noat
-    lw      $at, MIRROR_OBJECT_LOCK_WORD_OFFSET(\obj)
-    sll     $at, $at, 31 - LOCK_WORD_READ_BARRIER_STATE_SHIFT   # Move barrier state bit
-                                                                # to sign bit.
-    bltz    $at, .Lintrospection_field_array            # If gray, load reference, mark.
-    nop
-    .set pop
-    jalr    $zero, $ra                                  # Otherwise, load-load barrier and return.
-    sync
-    break                                               # Padding to 8 instructions.
-.endm
-
-.macro BRB_GC_ROOT_ENTRY root
-1:
-#if defined(_MIPS_ARCH_MIPS32R6)
-    lapc    $gp, .Lintrospection_exit_\root             # $gp = exit point address.
-#else
-    addiu   $gp, $t9, (.Lintrospection_exit_\root - 1b)  # $gp = exit point address.
-#endif
-    bnez    \root, .Lintrospection_common
-    move    $t8, \root                                  # Move reference to $t8 for common code.
-    jalr    $zero, $ra                                  # Return if null.
-    # The next instruction (from the following BRB_GC_ROOT_ENTRY) fills the delay slot.
-    # This instruction has no effect (actual NOP for the last entry; otherwise changes $gp,
-    # which is unused after that anyway).
-.endm
-
-.macro BRB_FIELD_EXIT out
-.Lintrospection_exit_\out:
-    jalr    $zero, $ra
-    move    \out, $t8                                   # Return reference in expected register.
-.endm
-
-.macro BRB_FIELD_EXIT_BREAK
-    break
-    break
-.endm
-
-ENTRY_NO_GP art_quick_read_barrier_mark_introspection
-    # Entry points for offsets/indices not fitting into int16_t and for variable indices.
-    BRB_FIELD_LONG_OFFSET_ENTRY $v0
-    BRB_FIELD_LONG_OFFSET_ENTRY $v1
-    BRB_FIELD_LONG_OFFSET_ENTRY $a0
-    BRB_FIELD_LONG_OFFSET_ENTRY $a1
-    BRB_FIELD_LONG_OFFSET_ENTRY $a2
-    BRB_FIELD_LONG_OFFSET_ENTRY $a3
-    BRB_FIELD_LONG_OFFSET_ENTRY $t0
-    BRB_FIELD_LONG_OFFSET_ENTRY $t1
-    BRB_FIELD_LONG_OFFSET_ENTRY $t2
-    BRB_FIELD_LONG_OFFSET_ENTRY $t3
-    BRB_FIELD_LONG_OFFSET_ENTRY $t4
-    BRB_FIELD_LONG_OFFSET_ENTRY $t5
-    BRB_FIELD_LONG_OFFSET_ENTRY $t6
-    BRB_FIELD_LONG_OFFSET_ENTRY $t7
-    BRB_FIELD_LONG_OFFSET_ENTRY $s2
-    BRB_FIELD_LONG_OFFSET_ENTRY $s3
-    BRB_FIELD_LONG_OFFSET_ENTRY $s4
-    BRB_FIELD_LONG_OFFSET_ENTRY $s5
-    BRB_FIELD_LONG_OFFSET_ENTRY $s6
-    BRB_FIELD_LONG_OFFSET_ENTRY $s7
-    BRB_FIELD_LONG_OFFSET_ENTRY $s8
-
-    # Entry points for offsets/indices fitting into int16_t.
-    BRB_FIELD_SHORT_OFFSET_ENTRY $v0
-    BRB_FIELD_SHORT_OFFSET_ENTRY $v1
-    BRB_FIELD_SHORT_OFFSET_ENTRY $a0
-    BRB_FIELD_SHORT_OFFSET_ENTRY $a1
-    BRB_FIELD_SHORT_OFFSET_ENTRY $a2
-    BRB_FIELD_SHORT_OFFSET_ENTRY $a3
-    BRB_FIELD_SHORT_OFFSET_ENTRY $t0
-    BRB_FIELD_SHORT_OFFSET_ENTRY $t1
-    BRB_FIELD_SHORT_OFFSET_ENTRY $t2
-    BRB_FIELD_SHORT_OFFSET_ENTRY $t3
-    BRB_FIELD_SHORT_OFFSET_ENTRY $t4
-    BRB_FIELD_SHORT_OFFSET_ENTRY $t5
-    BRB_FIELD_SHORT_OFFSET_ENTRY $t6
-    BRB_FIELD_SHORT_OFFSET_ENTRY $t7
-    BRB_FIELD_SHORT_OFFSET_ENTRY $s2
-    BRB_FIELD_SHORT_OFFSET_ENTRY $s3
-    BRB_FIELD_SHORT_OFFSET_ENTRY $s4
-    BRB_FIELD_SHORT_OFFSET_ENTRY $s5
-    BRB_FIELD_SHORT_OFFSET_ENTRY $s6
-    BRB_FIELD_SHORT_OFFSET_ENTRY $s7
-    BRB_FIELD_SHORT_OFFSET_ENTRY $s8
-
-    .global art_quick_read_barrier_mark_introspection_gc_roots
-art_quick_read_barrier_mark_introspection_gc_roots:
-    # Entry points for GC roots.
-    BRB_GC_ROOT_ENTRY $v0
-    BRB_GC_ROOT_ENTRY $v1
-    BRB_GC_ROOT_ENTRY $a0
-    BRB_GC_ROOT_ENTRY $a1
-    BRB_GC_ROOT_ENTRY $a2
-    BRB_GC_ROOT_ENTRY $a3
-    BRB_GC_ROOT_ENTRY $t0
-    BRB_GC_ROOT_ENTRY $t1
-    BRB_GC_ROOT_ENTRY $t2
-    BRB_GC_ROOT_ENTRY $t3
-    BRB_GC_ROOT_ENTRY $t4
-    BRB_GC_ROOT_ENTRY $t5
-    BRB_GC_ROOT_ENTRY $t6
-    BRB_GC_ROOT_ENTRY $t7
-    BRB_GC_ROOT_ENTRY $s2
-    BRB_GC_ROOT_ENTRY $s3
-    BRB_GC_ROOT_ENTRY $s4
-    BRB_GC_ROOT_ENTRY $s5
-    BRB_GC_ROOT_ENTRY $s6
-    BRB_GC_ROOT_ENTRY $s7
-    BRB_GC_ROOT_ENTRY $s8
-    .global art_quick_read_barrier_mark_introspection_end_of_entries
-art_quick_read_barrier_mark_introspection_end_of_entries:
-    nop                         # Fill the delay slot of the last BRB_GC_ROOT_ENTRY.
-
-.Lintrospection_throw_npe:
-    b       art_quick_throw_null_pointer_exception
-    addiu   $ra, $ra, 4         # Skip lw, make $ra point to lw's stack map.
-
-    .set push
-    .set noat
-
-    // Fields and array elements.
-
-.Lintrospection_field_array:
-    // Get the field/element address using $t8 and the offset from the lw instruction.
-    lh      $at, 0($ra)         # $ra points to lw: $at = field/element offset.
-    addiu   $ra, $ra, 4 + HEAP_POISON_INSTR_SIZE  # Skip lw(+subu).
-    addu    $t8, $t8, $at       # $t8 = field/element address.
-
-    // Calculate the address of the exit point, store it in $gp and load the reference into $t8.
-    lb      $at, (-HEAP_POISON_INSTR_SIZE - 2)($ra)   # $ra-HEAP_POISON_INSTR_SIZE-4 points to
-                                                      # "lw `out`, ...".
-    andi    $at, $at, 31        # Extract `out` from lw.
-    sll     $at, $at, 3         # Multiply `out` by the exit point size (BRB_FIELD_EXIT* macros).
-
-    lw      $t8, 0($t8)         # $t8 = reference.
-    UNPOISON_HEAP_REF $t8
-
-    // Return if null reference.
-    bnez    $t8, .Lintrospection_common
-    addu    $gp, $gp, $at       # $gp = address of the exit point.
-
-    // Early return through the exit point.
-.Lintrospection_return_early:
-    jalr    $zero, $gp          # Move $t8 to `out` and return.
-    nop
-
-    // Code common for GC roots, fields and array elements.
-
-.Lintrospection_common:
-    // Check lock word for mark bit, if marked return.
-    lw      $t9, MIRROR_OBJECT_LOCK_WORD_OFFSET($t8)
-    sll     $at, $t9, 31 - LOCK_WORD_MARK_BIT_SHIFT     # Move mark bit to sign bit.
-    bltz    $at, .Lintrospection_return_early
-#if (LOCK_WORD_STATE_SHIFT != 30) || (LOCK_WORD_STATE_FORWARDING_ADDRESS != 3)
-    // The below code depends on the lock word state being in the highest bits
-    // and the "forwarding address" state having all bits set.
-#error "Unexpected lock word state shift or forwarding address state value."
-#endif
-    // Test that both the forwarding state bits are 1.
-    sll     $at, $t9, 1
-    and     $at, $at, $t9                               # Sign bit = 1 IFF both bits are 1.
-    bgez    $at, .Lintrospection_mark
-    nop
-
-    .set pop
-
-    // Shift left by the forwarding address shift. This clears out the state bits since they are
-    // in the top 2 bits of the lock word.
-    jalr    $zero, $gp          # Move $t8 to `out` and return.
-    sll     $t8, $t9, LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT
-
-.Lintrospection_mark:
-    // Partially set up the stack frame preserving only $ra.
-    addiu   $sp, $sp, -160      # Includes 16 bytes of space for argument registers $a0-$a3.
-    .cfi_adjust_cfa_offset 160
-    sw      $ra, 156($sp)
-    .cfi_rel_offset 31, 156
-
-    // Set up $gp, clobbering $ra and using the branch delay slot for a useful instruction.
-    bal     1f
-    sw      $gp, 152($sp)       # Preserve the exit point address.
-1:
-    .cpload $ra
-
-    // Finalize the stack frame and call.
-    sw      $t7, 148($sp)
-    .cfi_rel_offset 15, 148
-    sw      $t6, 144($sp)
-    .cfi_rel_offset 14, 144
-    sw      $t5, 140($sp)
-    .cfi_rel_offset 13, 140
-    sw      $t4, 136($sp)
-    .cfi_rel_offset 12, 136
-    sw      $t3, 132($sp)
-    .cfi_rel_offset 11, 132
-    sw      $t2, 128($sp)
-    .cfi_rel_offset 10, 128
-    sw      $t1, 124($sp)
-    .cfi_rel_offset 9, 124
-    sw      $t0, 120($sp)
-    .cfi_rel_offset 8, 120
-    sw      $a3, 116($sp)
-    .cfi_rel_offset 7, 116
-    sw      $a2, 112($sp)
-    .cfi_rel_offset 6, 112
-    sw      $a1, 108($sp)
-    .cfi_rel_offset 5, 108
-    sw      $a0, 104($sp)
-    .cfi_rel_offset 4, 104
-    sw      $v1, 100($sp)
-    .cfi_rel_offset 3, 100
-    sw      $v0, 96($sp)
-    .cfi_rel_offset 2, 96
-
-    la      $t9, artReadBarrierMark
-
-    sdc1    $f18, 88($sp)
-    sdc1    $f16, 80($sp)
-    sdc1    $f14, 72($sp)
-    sdc1    $f12, 64($sp)
-    sdc1    $f10, 56($sp)
-    sdc1    $f8,  48($sp)
-    sdc1    $f6,  40($sp)
-    sdc1    $f4,  32($sp)
-    sdc1    $f2,  24($sp)
-    sdc1    $f0,  16($sp)
-
-    jalr    $t9                 # $v0 <- artReadBarrierMark(reference)
-    move    $a0, $t8            # Pass reference in $a0.
-    move    $t8, $v0
-
-    lw      $ra, 156($sp)
-    .cfi_restore 31
-    lw      $gp, 152($sp)       # $gp = address of the exit point.
-    lw      $t7, 148($sp)
-    .cfi_restore 15
-    lw      $t6, 144($sp)
-    .cfi_restore 14
-    lw      $t5, 140($sp)
-    .cfi_restore 13
-    lw      $t4, 136($sp)
-    .cfi_restore 12
-    lw      $t3, 132($sp)
-    .cfi_restore 11
-    lw      $t2, 128($sp)
-    .cfi_restore 10
-    lw      $t1, 124($sp)
-    .cfi_restore 9
-    lw      $t0, 120($sp)
-    .cfi_restore 8
-    lw      $a3, 116($sp)
-    .cfi_restore 7
-    lw      $a2, 112($sp)
-    .cfi_restore 6
-    lw      $a1, 108($sp)
-    .cfi_restore 5
-    lw      $a0, 104($sp)
-    .cfi_restore 4
-    lw      $v1, 100($sp)
-    .cfi_restore 3
-    lw      $v0, 96($sp)
-    .cfi_restore 2
-
-    ldc1    $f18, 88($sp)
-    ldc1    $f16, 80($sp)
-    ldc1    $f14, 72($sp)
-    ldc1    $f12, 64($sp)
-    ldc1    $f10, 56($sp)
-    ldc1    $f8,  48($sp)
-    ldc1    $f6,  40($sp)
-    ldc1    $f4,  32($sp)
-    ldc1    $f2,  24($sp)
-    ldc1    $f0,  16($sp)
-
-    // Return through the exit point.
-    jalr    $zero, $gp          # Move $t8 to `out` and return.
-    addiu   $sp, $sp, 160
-    .cfi_adjust_cfa_offset -160
-
-.Lintrospection_exits:
-    BRB_FIELD_EXIT_BREAK
-    BRB_FIELD_EXIT_BREAK
-    BRB_FIELD_EXIT $v0
-    BRB_FIELD_EXIT $v1
-    BRB_FIELD_EXIT $a0
-    BRB_FIELD_EXIT $a1
-    BRB_FIELD_EXIT $a2
-    BRB_FIELD_EXIT $a3
-    BRB_FIELD_EXIT $t0
-    BRB_FIELD_EXIT $t1
-    BRB_FIELD_EXIT $t2
-    BRB_FIELD_EXIT $t3
-    BRB_FIELD_EXIT $t4
-    BRB_FIELD_EXIT $t5
-    BRB_FIELD_EXIT $t6
-    BRB_FIELD_EXIT $t7
-    BRB_FIELD_EXIT_BREAK
-    BRB_FIELD_EXIT_BREAK
-    BRB_FIELD_EXIT $s2
-    BRB_FIELD_EXIT $s3
-    BRB_FIELD_EXIT $s4
-    BRB_FIELD_EXIT $s5
-    BRB_FIELD_EXIT $s6
-    BRB_FIELD_EXIT $s7
-    BRB_FIELD_EXIT_BREAK
-    BRB_FIELD_EXIT_BREAK
-    BRB_FIELD_EXIT_BREAK
-    BRB_FIELD_EXIT_BREAK
-    BRB_FIELD_EXIT_BREAK
-    BRB_FIELD_EXIT_BREAK
-    BRB_FIELD_EXIT $s8
-    BRB_FIELD_EXIT_BREAK
-END art_quick_read_barrier_mark_introspection
-
-    /*
-     * Polymorphic method invocation.
-     * On entry:
-     *   a0 = unused
-     *   a1 = receiver
-     */
-.extern artInvokePolymorphic
-ENTRY art_quick_invoke_polymorphic
-    SETUP_SAVE_REFS_AND_ARGS_FRAME
-    move    $a0, $a1                            # Make $a0 the receiver.
-    move    $a1, rSELF                          # Make $a1 an alias for the current Thread.
-    la      $t9, artInvokePolymorphic           # Invoke artInvokePolymorphic
-    jalr    $t9                                 # with args (receiver, Thread*, context).
-    addiu   $a2, $sp, ARG_SLOT_SIZE             # Make $a2 a pointer to the saved frame context.
-    lw      $t7, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
-    RESTORE_SAVE_REFS_AND_ARGS_FRAME
-    bnez    $t7, 1f
-    # don't care if $v0 and/or $v1 are modified, when exception branch taken
-    MTD     $v0, $v1, $f0, $f1                  # move float value to return value
-    jalr    $zero, $ra
-    nop
-1:
-    DELIVER_PENDING_EXCEPTION
-END art_quick_invoke_polymorphic
-
-    /*
-     * InvokeCustom invocation.
-     * On entry:
-     *   a0 = call_site_idx
-     */
-.extern artInvokeCustom
-ENTRY art_quick_invoke_custom
-    SETUP_SAVE_REFS_AND_ARGS_FRAME
-    move    $a1, rSELF                          # Make $a1 an alias for the current Thread.
-    la      $t9, artInvokeCustom                # Invoke artInvokeCustom
-    jalr    $t9                                 # with args (call_site_idx, Thread*, context).
-    addiu   $a2, $sp, ARG_SLOT_SIZE             # Make $a2 a pointer to the saved frame context.
-    lw      $t7, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
-    RESTORE_SAVE_REFS_AND_ARGS_FRAME
-    bnez    $t7, 1f
-    # don't care if $v0 and/or $v1 are modified, when exception branch taken
-    MTD     $v0, $v1, $f0, $f1                  # move float value to return value
-    jalr    $zero, $ra
-    nop
-END art_quick_invoke_custom
diff --git a/runtime/arch/mips/registers_mips.cc b/runtime/arch/mips/registers_mips.cc
deleted file mode 100644
index 92c2746..0000000
--- a/runtime/arch/mips/registers_mips.cc
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "registers_mips.h"
-
-#include <ostream>
-
-namespace art {
-namespace mips {
-
-static const char* kRegisterNames[] = {
-  "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
-  "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
-  "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
-  "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra",
-};
-std::ostream& operator<<(std::ostream& os, const Register& rhs) {
-  if (rhs >= ZERO && rhs <= RA) {
-    os << kRegisterNames[rhs];
-  } else {
-    os << "Register[" << static_cast<int>(rhs) << "]";
-  }
-  return os;
-}
-
-std::ostream& operator<<(std::ostream& os, const FRegister& rhs) {
-  if (rhs >= F0 && rhs < kNumberOfFRegisters) {
-    os << "f" << static_cast<int>(rhs);
-  } else {
-    os << "FRegister[" << static_cast<int>(rhs) << "]";
-  }
-  return os;
-}
-
-std::ostream& operator<<(std::ostream& os, const VectorRegister& rhs) {
-  if (rhs >= W0 && rhs < kNumberOfVectorRegisters) {
-    os << "w" << static_cast<int>(rhs);
-  } else {
-    os << "VectorRegister[" << static_cast<int>(rhs) << "]";
-  }
-  return os;
-}
-
-}  // namespace mips
-}  // namespace art
diff --git a/runtime/arch/mips/registers_mips.h b/runtime/arch/mips/registers_mips.h
deleted file mode 100644
index 4900e41..0000000
--- a/runtime/arch/mips/registers_mips.h
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS_REGISTERS_MIPS_H_
-#define ART_RUNTIME_ARCH_MIPS_REGISTERS_MIPS_H_
-
-#include <iosfwd>
-
-#include "base/macros.h"
-
-namespace art {
-namespace mips {
-
-enum Register {
-  ZERO =  0,
-  AT   =  1,  // Assembler temporary.
-  V0   =  2,  // Values.
-  V1   =  3,
-  A0   =  4,  // Arguments.
-  A1   =  5,
-  A2   =  6,
-  A3   =  7,
-  T0   =  8,  // Two extra arguments / temporaries.
-  T1   =  9,
-  T2   = 10,  // Temporaries.
-  T3   = 11,
-  T4   = 12,
-  T5   = 13,
-  T6   = 14,
-  T7   = 15,
-  S0   = 16,  // Saved values.
-  S1   = 17,
-  S2   = 18,
-  S3   = 19,
-  S4   = 20,
-  S5   = 21,
-  S6   = 22,
-  S7   = 23,
-  T8   = 24,  // More temporaries.
-  T9   = 25,
-  K0   = 26,  // Reserved for trap handler.
-  K1   = 27,
-  GP   = 28,  // Global pointer.
-  SP   = 29,  // Stack pointer.
-  FP   = 30,  // Saved value/frame pointer.
-  RA   = 31,  // Return address.
-  TR   = S1,  // ART Thread Register
-  TMP  = T8,  // scratch register (in addition to AT)
-  kNumberOfCoreRegisters = 32,
-  kNoRegister = -1  // Signals an illegal register.
-};
-std::ostream& operator<<(std::ostream& os, const Register& rhs);
-
-// Values for single-precision floating point registers.
-enum FRegister {
-  F0  =  0,
-  F1  =  1,
-  F2  =  2,
-  F3  =  3,
-  F4  =  4,
-  F5  =  5,
-  F6  =  6,
-  F7  =  7,
-  F8  =  8,
-  F9  =  9,
-  F10 = 10,
-  F11 = 11,
-  F12 = 12,
-  F13 = 13,
-  F14 = 14,
-  F15 = 15,
-  F16 = 16,
-  F17 = 17,
-  F18 = 18,
-  F19 = 19,
-  F20 = 20,
-  F21 = 21,
-  F22 = 22,
-  F23 = 23,
-  F24 = 24,
-  F25 = 25,
-  F26 = 26,
-  F27 = 27,
-  F28 = 28,
-  F29 = 29,
-  F30 = 30,
-  F31 = 31,
-  FTMP = F6,   // scratch register
-  FTMP2 = F7,  // scratch register (in addition to FTMP, reserved for MSA instructions)
-  kNumberOfFRegisters = 32,
-  kNoFRegister = -1,
-};
-std::ostream& operator<<(std::ostream& os, const FRegister& rhs);
-
-// Values for vector registers.
-enum VectorRegister {
-  W0  =  0,
-  W1  =  1,
-  W2  =  2,
-  W3  =  3,
-  W4  =  4,
-  W5  =  5,
-  W6  =  6,
-  W7  =  7,
-  W8  =  8,
-  W9  =  9,
-  W10 = 10,
-  W11 = 11,
-  W12 = 12,
-  W13 = 13,
-  W14 = 14,
-  W15 = 15,
-  W16 = 16,
-  W17 = 17,
-  W18 = 18,
-  W19 = 19,
-  W20 = 20,
-  W21 = 21,
-  W22 = 22,
-  W23 = 23,
-  W24 = 24,
-  W25 = 25,
-  W26 = 26,
-  W27 = 27,
-  W28 = 28,
-  W29 = 29,
-  W30 = 30,
-  W31 = 31,
-  kNumberOfVectorRegisters = 32,
-  kNoVectorRegister = -1,
-};
-std::ostream& operator<<(std::ostream& os, const VectorRegister& rhs);
-
-}  // namespace mips
-}  // namespace art
-
-#endif  // ART_RUNTIME_ARCH_MIPS_REGISTERS_MIPS_H_
diff --git a/runtime/arch/mips/thread_mips.cc b/runtime/arch/mips/thread_mips.cc
deleted file mode 100644
index 0be7a7f..0000000
--- a/runtime/arch/mips/thread_mips.cc
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "thread.h"
-
-#include <android-base/logging.h>
-
-#include "asm_support_mips.h"
-#include "base/enums.h"
-
-namespace art {
-
-void Thread::InitCpu() {
-  CHECK_EQ(THREAD_FLAGS_OFFSET, ThreadFlagsOffset<PointerSize::k32>().Int32Value());
-  CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<PointerSize::k32>().Int32Value());
-  CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<PointerSize::k32>().Int32Value());
-}
-
-void Thread::CleanupCpu() {
-  // Do nothing.
-}
-
-}  // namespace art
diff --git a/runtime/arch/mips64/asm_support_mips64.S b/runtime/arch/mips64/asm_support_mips64.S
deleted file mode 100644
index a6b249a..0000000
--- a/runtime/arch/mips64/asm_support_mips64.S
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS64_ASM_SUPPORT_MIPS64_S_
-#define ART_RUNTIME_ARCH_MIPS64_ASM_SUPPORT_MIPS64_S_
-
-#include "asm_support_mips64.h"
-
-// Define special registers.
-
-// Register holding suspend check count down.
-#define rSUSPEND $s0
-// Register holding Thread::Current().
-#define rSELF $s1
-
-
-    // Declare a function called name, doesn't set up $gp.
-.macro ENTRY_NO_GP_CUSTOM_CFA name, cfa_offset
-    .type \name, %function
-    .global \name
-    // Cache alignment for function entry.
-    .balign 16
-\name:
-    .cfi_startproc
-     // Ensure we get a sane starting CFA.
-    .cfi_def_cfa $sp, \cfa_offset
-.endm
-
-    // Declare a function called name, doesn't set up $gp.
-.macro ENTRY_NO_GP name
-    ENTRY_NO_GP_CUSTOM_CFA \name, 0
-.endm
-
-    // Declare a function called name, sets up $gp.
-    // This macro modifies t8.
-.macro ENTRY name
-    ENTRY_NO_GP \name
-    // Set up $gp and store the previous $gp value to $t8. It will be pushed to the
-    // stack after the frame has been constructed.
-    .cpsetup $t9, $t8, \name
-    // Declare a local convenience label to be branched to when $gp is already set up.
-.L\name\()_gp_set:
-.endm
-
-.macro END name
-    .cfi_endproc
-    .size \name, .-\name
-.endm
-
-.macro UNIMPLEMENTED name
-    ENTRY \name
-    break
-    break
-    END \name
-.endm
-
-// Macros to poison (negate) the reference for heap poisoning.
-.macro POISON_HEAP_REF rRef
-#ifdef USE_HEAP_POISONING
-    dsubu \rRef, $zero, \rRef
-    dext  \rRef, \rRef, 0, 32
-#endif  // USE_HEAP_POISONING
-.endm
-
-// Macros to unpoison (negate) the reference for heap poisoning.
-.macro UNPOISON_HEAP_REF rRef
-#ifdef USE_HEAP_POISONING
-    dsubu \rRef, $zero, \rRef
-    dext  \rRef, \rRef, 0, 32
-#endif  // USE_HEAP_POISONING
-.endm
-
-// Byte size of the instructions (un)poisoning heap references.
-#ifdef USE_HEAP_POISONING
-#define HEAP_POISON_INSTR_SIZE 8
-#else
-#define HEAP_POISON_INSTR_SIZE 0
-#endif  // USE_HEAP_POISONING
-
-// Based on contents of creg select the minimum integer
-// At the end of the macro the original value of creg is lost
-.macro MINint dreg,rreg,sreg,creg
-  .set push
-  .set noat
-  .ifc \dreg, \rreg
-  selnez \dreg, \rreg, \creg
-  seleqz \creg, \sreg, \creg
-  .else
-  seleqz \dreg, \sreg, \creg
-  selnez \creg, \rreg, \creg
-  .endif
-  or     \dreg, \dreg, \creg
-  .set pop
-.endm
-
-// Find minimum of two signed registers
-.macro MINs dreg,rreg,sreg
-  .set push
-  .set noat
-  slt    $at, \rreg, \sreg
-  MINint \dreg, \rreg, \sreg, $at
-  .set pop
-.endm
-
-// Find minimum of two unsigned registers
-.macro MINu dreg,rreg,sreg
-  .set push
-  .set noat
-  sltu   $at, \rreg, \sreg
-  MINint \dreg, \rreg, \sreg, $at
-  .set pop
-.endm
-
-#endif  // ART_RUNTIME_ARCH_MIPS64_ASM_SUPPORT_MIPS64_S_
diff --git a/runtime/arch/mips64/asm_support_mips64.h b/runtime/arch/mips64/asm_support_mips64.h
deleted file mode 100644
index a8e907e..0000000
--- a/runtime/arch/mips64/asm_support_mips64.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS64_ASM_SUPPORT_MIPS64_H_
-#define ART_RUNTIME_ARCH_MIPS64_ASM_SUPPORT_MIPS64_H_
-
-#include "asm_support.h"
-
-// 64 ($f24-$f31) + 64 ($s0-$s7) + 8 ($gp) + 8 ($s8) + 8 ($ra) + 1x8 bytes padding
-#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVES 160
-// 48 ($s2-$s7) + 8 ($gp) + 8 ($s8) + 8 ($ra) + 1x8 bytes padding
-#define FRAME_SIZE_SAVE_REFS_ONLY 80
-// $f12-$f19, $a1-$a7, $s2-$s7 + $gp + $s8 + $ra, 16 total + 1x8 bytes padding + method*
-#define FRAME_SIZE_SAVE_REFS_AND_ARGS 208
-// $f0-$f31, $at, $v0-$v1, $a0-$a7, $t0-$t3, $s0-$s7, $t8-$t9, $gp, $s8, $ra + padding + method*
-#define FRAME_SIZE_SAVE_EVERYTHING 496
-#define FRAME_SIZE_SAVE_EVERYTHING_FOR_CLINIT FRAME_SIZE_SAVE_EVERYTHING
-#define FRAME_SIZE_SAVE_EVERYTHING_FOR_SUSPEND_CHECK FRAME_SIZE_SAVE_EVERYTHING
-
-// &art_quick_read_barrier_mark_introspection is the first of many entry points:
-//   20 entry points for long field offsets, large array indices and variable array indices
-//     (see macro BRB_FIELD_LONG_OFFSET_ENTRY)
-//   20 entry points for short field offsets and small array indices
-//     (see macro BRB_FIELD_SHORT_OFFSET_ENTRY)
-//   20 entry points for GC roots
-//     (see macro BRB_GC_ROOT_ENTRY)
-
-// There are as many entry points of each kind as there are registers that
-// can hold a reference: V0-V1, A0-A7, T0-T2, S2-S8.
-#define BAKER_MARK_INTROSPECTION_REGISTER_COUNT 20
-
-#define BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE (8 * 4)  // 8 instructions in
-                                                                 // BRB_FIELD_*_OFFSET_ENTRY.
-
-#define BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET \
-    (2 * BAKER_MARK_INTROSPECTION_REGISTER_COUNT * BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE)
-
-#define BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRY_SIZE (4 * 4)  // 4 instructions in BRB_GC_ROOT_ENTRY.
-
-#endif  // ART_RUNTIME_ARCH_MIPS64_ASM_SUPPORT_MIPS64_H_
diff --git a/runtime/arch/mips64/callee_save_frame_mips64.h b/runtime/arch/mips64/callee_save_frame_mips64.h
deleted file mode 100644
index 64d6bec..0000000
--- a/runtime/arch/mips64/callee_save_frame_mips64.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS64_CALLEE_SAVE_FRAME_MIPS64_H_
-#define ART_RUNTIME_ARCH_MIPS64_CALLEE_SAVE_FRAME_MIPS64_H_
-
-#include "arch/instruction_set.h"
-#include "base/bit_utils.h"
-#include "base/callee_save_type.h"
-#include "base/enums.h"
-#include "quick/quick_method_frame_info.h"
-#include "registers_mips64.h"
-#include "runtime_globals.h"
-
-namespace art {
-namespace mips64 {
-
-static constexpr uint32_t kMips64CalleeSaveAlwaysSpills =
-    (1 << art::mips64::RA);
-static constexpr uint32_t kMips64CalleeSaveRefSpills =
-    (1 << art::mips64::S2) | (1 << art::mips64::S3) | (1 << art::mips64::S4) |
-    (1 << art::mips64::S5) | (1 << art::mips64::S6) | (1 << art::mips64::S7) |
-    (1 << art::mips64::GP) | (1 << art::mips64::S8);
-static constexpr uint32_t kMips64CalleeSaveArgSpills =
-    (1 << art::mips64::A1) | (1 << art::mips64::A2) | (1 << art::mips64::A3) |
-    (1 << art::mips64::A4) | (1 << art::mips64::A5) | (1 << art::mips64::A6) |
-    (1 << art::mips64::A7);
-static constexpr uint32_t kMips64CalleeSaveAllSpills =
-    (1 << art::mips64::S0) | (1 << art::mips64::S1);
-static constexpr uint32_t kMips64CalleeSaveEverythingSpills =
-    (1 << art::mips64::AT) | (1 << art::mips64::V0) | (1 << art::mips64::V1) |
-    (1 << art::mips64::A0) | (1 << art::mips64::A1) | (1 << art::mips64::A2) |
-    (1 << art::mips64::A3) | (1 << art::mips64::A4) | (1 << art::mips64::A5) |
-    (1 << art::mips64::A6) | (1 << art::mips64::A7) | (1 << art::mips64::T0) |
-    (1 << art::mips64::T1) | (1 << art::mips64::T2) | (1 << art::mips64::T3) |
-    (1 << art::mips64::S0) | (1 << art::mips64::S1) | (1 << art::mips64::T8) |
-    (1 << art::mips64::T9);
-
-static constexpr uint32_t kMips64CalleeSaveFpRefSpills = 0;
-static constexpr uint32_t kMips64CalleeSaveFpArgSpills =
-    (1 << art::mips64::F12) | (1 << art::mips64::F13) | (1 << art::mips64::F14) |
-    (1 << art::mips64::F15) | (1 << art::mips64::F16) | (1 << art::mips64::F17) |
-    (1 << art::mips64::F18) | (1 << art::mips64::F19);
-// F12 should not be necessary to spill, as A0 is always in use.
-static constexpr uint32_t kMips64CalleeSaveFpAllSpills =
-    (1 << art::mips64::F24) | (1 << art::mips64::F25) | (1 << art::mips64::F26) |
-    (1 << art::mips64::F27) | (1 << art::mips64::F28) | (1 << art::mips64::F29) |
-    (1 << art::mips64::F30) | (1 << art::mips64::F31);
-static constexpr uint32_t kMips64CalleeSaveFpEverythingSpills =
-    (1 << art::mips64::F0) | (1 << art::mips64::F1) | (1 << art::mips64::F2) |
-    (1 << art::mips64::F3) | (1 << art::mips64::F4) | (1 << art::mips64::F5) |
-    (1 << art::mips64::F6) | (1 << art::mips64::F7) | (1 << art::mips64::F8) |
-    (1 << art::mips64::F9) | (1 << art::mips64::F10) | (1 << art::mips64::F11) |
-    (1 << art::mips64::F12) | (1 << art::mips64::F13) | (1 << art::mips64::F14) |
-    (1 << art::mips64::F15) | (1 << art::mips64::F16) | (1 << art::mips64::F17) |
-    (1 << art::mips64::F18) | (1 << art::mips64::F19) | (1 << art::mips64::F20) |
-    (1 << art::mips64::F21) | (1 << art::mips64::F22) | (1 << art::mips64::F23) |
-    (1 << art::mips64::F24) | (1 << art::mips64::F25) | (1 << art::mips64::F26) |
-    (1 << art::mips64::F27) | (1 << art::mips64::F28) | (1 << art::mips64::F29) |
-    (1 << art::mips64::F30) | (1 << art::mips64::F31);
-
-class Mips64CalleeSaveFrame {
- public:
-  static constexpr uint32_t GetCoreSpills(CalleeSaveType type) {
-    type = GetCanonicalCalleeSaveType(type);
-    return kMips64CalleeSaveAlwaysSpills | kMips64CalleeSaveRefSpills |
-        (type == CalleeSaveType::kSaveRefsAndArgs ? kMips64CalleeSaveArgSpills : 0) |
-        (type == CalleeSaveType::kSaveAllCalleeSaves ? kMips64CalleeSaveAllSpills : 0) |
-        (type == CalleeSaveType::kSaveEverything ? kMips64CalleeSaveEverythingSpills : 0);
-  }
-
-  static constexpr uint32_t GetFpSpills(CalleeSaveType type) {
-    type = GetCanonicalCalleeSaveType(type);
-    return kMips64CalleeSaveFpRefSpills |
-        (type == CalleeSaveType::kSaveRefsAndArgs ? kMips64CalleeSaveFpArgSpills : 0) |
-        (type == CalleeSaveType::kSaveAllCalleeSaves ? kMips64CalleeSaveFpAllSpills : 0) |
-        (type == CalleeSaveType::kSaveEverything ? kMips64CalleeSaveFpEverythingSpills : 0);
-  }
-
-  static constexpr uint32_t GetFrameSize(CalleeSaveType type) {
-    type = GetCanonicalCalleeSaveType(type);
-    return RoundUp((POPCOUNT(GetCoreSpills(type)) /* gprs */ +
-                    POPCOUNT(GetFpSpills(type))   /* fprs */ +
-                    + 1 /* Method* */) * static_cast<size_t>(kMips64PointerSize), kStackAlignment);
-  }
-
-  static constexpr QuickMethodFrameInfo GetMethodFrameInfo(CalleeSaveType type) {
-    type = GetCanonicalCalleeSaveType(type);
-    return QuickMethodFrameInfo(GetFrameSize(type), GetCoreSpills(type), GetFpSpills(type));
-  }
-
-  static constexpr size_t GetFpr1Offset(CalleeSaveType type) {
-    type = GetCanonicalCalleeSaveType(type);
-    return GetFrameSize(type) -
-           (POPCOUNT(GetCoreSpills(type)) +
-            POPCOUNT(GetFpSpills(type))) * static_cast<size_t>(kMips64PointerSize);
-  }
-
-  static constexpr size_t GetGpr1Offset(CalleeSaveType type) {
-    type = GetCanonicalCalleeSaveType(type);
-    return GetFrameSize(type) -
-           POPCOUNT(GetCoreSpills(type)) * static_cast<size_t>(kMips64PointerSize);
-  }
-
-  static constexpr size_t GetReturnPcOffset(CalleeSaveType type) {
-    type = GetCanonicalCalleeSaveType(type);
-    return GetFrameSize(type) - static_cast<size_t>(kMips64PointerSize);
-  }
-};
-
-}  // namespace mips64
-}  // namespace art
-
-#endif  // ART_RUNTIME_ARCH_MIPS64_CALLEE_SAVE_FRAME_MIPS64_H_
diff --git a/runtime/arch/mips64/context_mips64.cc b/runtime/arch/mips64/context_mips64.cc
deleted file mode 100644
index b14908f..0000000
--- a/runtime/arch/mips64/context_mips64.cc
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "context_mips64.h"
-
-#include "base/bit_utils.h"
-#include "base/bit_utils_iterator.h"
-#include "quick/quick_method_frame_info.h"
-
-namespace art {
-namespace mips64 {
-
-static constexpr uintptr_t gZero = 0;
-
-void Mips64Context::Reset() {
-  std::fill_n(gprs_, arraysize(gprs_), nullptr);
-  std::fill_n(fprs_, arraysize(fprs_), nullptr);
-  gprs_[SP] = &sp_;
-  gprs_[T9] = &t9_;
-  gprs_[A0] = &arg0_;
-  // Initialize registers with easy to spot debug values.
-  sp_ = Mips64Context::kBadGprBase + SP;
-  t9_ = Mips64Context::kBadGprBase + T9;
-  arg0_ = 0;
-}
-
-void Mips64Context::FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& frame_info) {
-  int spill_pos = 0;
-
-  // Core registers come first, from the highest down to the lowest.
-  for (uint32_t core_reg : HighToLowBits(frame_info.CoreSpillMask())) {
-    gprs_[core_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
-    ++spill_pos;
-  }
-  DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()));
-
-  // FP registers come second, from the highest down to the lowest.
-  for (uint32_t fp_reg : HighToLowBits(frame_info.FpSpillMask())) {
-    fprs_[fp_reg] = CalleeSaveAddress(frame, spill_pos, frame_info.FrameSizeInBytes());
-    ++spill_pos;
-  }
-  DCHECK_EQ(spill_pos, POPCOUNT(frame_info.CoreSpillMask()) + POPCOUNT(frame_info.FpSpillMask()));
-}
-
-void Mips64Context::SetGPR(uint32_t reg, uintptr_t value) {
-  CHECK_LT(reg, static_cast<uint32_t>(kNumberOfGpuRegisters));
-  DCHECK(IsAccessibleGPR(reg));
-  CHECK_NE(gprs_[reg], &gZero);  // Can't overwrite this static value since they are never reset.
-  *gprs_[reg] = value;
-}
-
-void Mips64Context::SetFPR(uint32_t reg, uintptr_t value) {
-  CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFpuRegisters));
-  DCHECK(IsAccessibleFPR(reg));
-  CHECK_NE(fprs_[reg], &gZero);  // Can't overwrite this static value since they are never reset.
-  *fprs_[reg] = value;
-}
-
-void Mips64Context::SmashCallerSaves() {
-  // This needs to be 0 because we want a null/zero return value.
-  gprs_[V0] = const_cast<uintptr_t*>(&gZero);
-  gprs_[V1] = const_cast<uintptr_t*>(&gZero);
-  gprs_[A1] = nullptr;
-  gprs_[A0] = nullptr;
-  gprs_[A2] = nullptr;
-  gprs_[A3] = nullptr;
-  gprs_[A4] = nullptr;
-  gprs_[A5] = nullptr;
-  gprs_[A6] = nullptr;
-  gprs_[A7] = nullptr;
-
-  // f0-f23 are caller-saved; f24-f31 are callee-saved.
-  fprs_[F0] = nullptr;
-  fprs_[F1] = nullptr;
-  fprs_[F2] = nullptr;
-  fprs_[F3] = nullptr;
-  fprs_[F4] = nullptr;
-  fprs_[F5] = nullptr;
-  fprs_[F6] = nullptr;
-  fprs_[F7] = nullptr;
-  fprs_[F8] = nullptr;
-  fprs_[F9] = nullptr;
-  fprs_[F10] = nullptr;
-  fprs_[F11] = nullptr;
-  fprs_[F12] = nullptr;
-  fprs_[F13] = nullptr;
-  fprs_[F14] = nullptr;
-  fprs_[F15] = nullptr;
-  fprs_[F16] = nullptr;
-  fprs_[F17] = nullptr;
-  fprs_[F18] = nullptr;
-  fprs_[F19] = nullptr;
-  fprs_[F20] = nullptr;
-  fprs_[F21] = nullptr;
-  fprs_[F22] = nullptr;
-  fprs_[F23] = nullptr;
-}
-
-extern "C" NO_RETURN void art_quick_do_long_jump(uintptr_t*, uintptr_t*);
-
-void Mips64Context::DoLongJump() {
-  uintptr_t gprs[kNumberOfGpuRegisters];
-  uintptr_t fprs[kNumberOfFpuRegisters];
-  for (size_t i = 0; i < kNumberOfGpuRegisters; ++i) {
-    gprs[i] = gprs_[i] != nullptr ? *gprs_[i] : Mips64Context::kBadGprBase + i;
-  }
-  for (size_t i = 0; i < kNumberOfFpuRegisters; ++i) {
-    fprs[i] = fprs_[i] != nullptr ? *fprs_[i] : Mips64Context::kBadFprBase + i;
-  }
-  art_quick_do_long_jump(gprs, fprs);
-}
-
-}  // namespace mips64
-}  // namespace art
diff --git a/runtime/arch/mips64/context_mips64.h b/runtime/arch/mips64/context_mips64.h
deleted file mode 100644
index 857abfd..0000000
--- a/runtime/arch/mips64/context_mips64.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS64_CONTEXT_MIPS64_H_
-#define ART_RUNTIME_ARCH_MIPS64_CONTEXT_MIPS64_H_
-
-#include <android-base/logging.h>
-
-#include "arch/context.h"
-#include "base/macros.h"
-#include "registers_mips64.h"
-
-namespace art {
-namespace mips64 {
-
-class Mips64Context : public Context {
- public:
-  Mips64Context() {
-    Reset();
-  }
-  virtual ~Mips64Context() {}
-
-  void Reset() override;
-
-  void FillCalleeSaves(uint8_t* frame, const QuickMethodFrameInfo& fr) override;
-
-  void SetSP(uintptr_t new_sp) override {
-    SetGPR(SP, new_sp);
-  }
-
-  void SetPC(uintptr_t new_pc) override {
-    SetGPR(T9, new_pc);
-  }
-
-  bool IsAccessibleGPR(uint32_t reg) override {
-    DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfGpuRegisters));
-    return gprs_[reg] != nullptr;
-  }
-
-  uintptr_t* GetGPRAddress(uint32_t reg) override {
-    DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfGpuRegisters));
-    return gprs_[reg];
-  }
-
-  uintptr_t GetGPR(uint32_t reg) override {
-    CHECK_LT(reg, static_cast<uint32_t>(kNumberOfGpuRegisters));
-    DCHECK(IsAccessibleGPR(reg));
-    return *gprs_[reg];
-  }
-
-  void SetGPR(uint32_t reg, uintptr_t value) override;
-
-  bool IsAccessibleFPR(uint32_t reg) override {
-    CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFpuRegisters));
-    return fprs_[reg] != nullptr;
-  }
-
-  uintptr_t GetFPR(uint32_t reg) override {
-    CHECK_LT(reg, static_cast<uint32_t>(kNumberOfFpuRegisters));
-    DCHECK(IsAccessibleFPR(reg));
-    return *fprs_[reg];
-  }
-
-  void SetFPR(uint32_t reg, uintptr_t value) override;
-
-  void SmashCallerSaves() override;
-  NO_RETURN void DoLongJump() override;
-
-  void SetArg0(uintptr_t new_arg0_value) override {
-    SetGPR(A0, new_arg0_value);
-  }
-
- private:
-  // Pointers to registers in the stack, initialized to null except for the special cases below.
-  uintptr_t* gprs_[kNumberOfGpuRegisters];
-  uint64_t* fprs_[kNumberOfFpuRegisters];
-  // Hold values for sp and t9 if they are not located within a stack frame. We use t9 for the
-  // PC (as ra is required to be valid for single-frame deopt and must not be clobbered). We
-  // also need the first argument for single-frame deopt.
-  uintptr_t sp_, t9_, arg0_;
-};
-
-}  // namespace mips64
-}  // namespace art
-
-#endif  // ART_RUNTIME_ARCH_MIPS64_CONTEXT_MIPS64_H_
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
deleted file mode 100644
index 741d41a..0000000
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <math.h>
-#include <string.h>
-
-#include "arch/mips64/asm_support_mips64.h"
-#include "base/atomic.h"
-#include "base/quasi_atomic.h"
-#include "entrypoints/entrypoint_utils.h"
-#include "entrypoints/jni/jni_entrypoints.h"
-#include "entrypoints/math_entrypoints.h"
-#include "entrypoints/quick/quick_alloc_entrypoints.h"
-#include "entrypoints/quick/quick_default_externs.h"
-#include "entrypoints/quick/quick_default_init_entrypoints.h"
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "entrypoints/runtime_asm_entrypoints.h"
-#include "interpreter/interpreter.h"
-
-namespace art {
-
-// Cast entrypoints.
-extern "C" size_t artInstanceOfFromCode(mirror::Object* obj, mirror::Class* ref_class);
-
-// Read barrier entrypoints.
-// art_quick_read_barrier_mark_regXX uses a non-standard calling
-// convention: it expects its input in register XX+1 and returns its
-// result in that same register, and saves and restores all
-// caller-save registers.
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg01(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg02(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg03(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg04(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg05(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg06(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg07(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg08(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg09(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg10(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg11(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg12(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg13(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg17(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg18(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg19(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg20(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg21(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg22(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_reg29(mirror::Object*);
-
-extern "C" mirror::Object* art_quick_read_barrier_mark_introspection(mirror::Object*);
-extern "C" mirror::Object* art_quick_read_barrier_mark_introspection_gc_roots(mirror::Object*);
-extern "C" void art_quick_read_barrier_mark_introspection_end_of_entries(void);
-
-// Math entrypoints.
-extern int32_t CmpgDouble(double a, double b);
-extern int32_t CmplDouble(double a, double b);
-extern int32_t CmpgFloat(float a, float b);
-extern int32_t CmplFloat(float a, float b);
-extern "C" int64_t artLmul(int64_t a, int64_t b);
-extern "C" int64_t artLdiv(int64_t a, int64_t b);
-extern "C" int64_t artLmod(int64_t a, int64_t b);
-
-// Math conversions.
-extern "C" int32_t __fixsfsi(float op1);      // FLOAT_TO_INT
-extern "C" int32_t __fixdfsi(double op1);     // DOUBLE_TO_INT
-extern "C" float __floatdisf(int64_t op1);    // LONG_TO_FLOAT
-extern "C" double __floatdidf(int64_t op1);   // LONG_TO_DOUBLE
-extern "C" int64_t __fixsfdi(float op1);      // FLOAT_TO_LONG
-extern "C" int64_t __fixdfdi(double op1);     // DOUBLE_TO_LONG
-
-// Single-precision FP arithmetics.
-extern "C" float fmodf(float a, float b);      // REM_FLOAT[_2ADDR]
-
-// Double-precision FP arithmetics.
-extern "C" double fmod(double a, double b);     // REM_DOUBLE[_2ADDR]
-
-// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR]
-extern "C" int64_t __divdi3(int64_t, int64_t);
-extern "C" int64_t __moddi3(int64_t, int64_t);
-
-// No read barrier entrypoints for marking registers.
-void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_active) {
-  intptr_t introspection_field_array_entries_size =
-      reinterpret_cast<intptr_t>(&art_quick_read_barrier_mark_introspection_gc_roots) -
-      reinterpret_cast<intptr_t>(&art_quick_read_barrier_mark_introspection);
-  static_assert(
-      BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET == 2 *
-          BAKER_MARK_INTROSPECTION_REGISTER_COUNT * BAKER_MARK_INTROSPECTION_FIELD_ARRAY_ENTRY_SIZE,
-      "Expecting equal");
-  DCHECK_EQ(introspection_field_array_entries_size,
-            BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRIES_OFFSET);
-  intptr_t introspection_gc_root_entries_size =
-      reinterpret_cast<intptr_t>(&art_quick_read_barrier_mark_introspection_end_of_entries) -
-      reinterpret_cast<intptr_t>(&art_quick_read_barrier_mark_introspection_gc_roots);
-  DCHECK_EQ(introspection_gc_root_entries_size,
-            BAKER_MARK_INTROSPECTION_REGISTER_COUNT * BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRY_SIZE);
-  qpoints->pReadBarrierMarkReg00 = is_active ? art_quick_read_barrier_mark_introspection : nullptr;
-  qpoints->pReadBarrierMarkReg01 = is_active ? art_quick_read_barrier_mark_reg01 : nullptr;
-  qpoints->pReadBarrierMarkReg02 = is_active ? art_quick_read_barrier_mark_reg02 : nullptr;
-  qpoints->pReadBarrierMarkReg03 = is_active ? art_quick_read_barrier_mark_reg03 : nullptr;
-  qpoints->pReadBarrierMarkReg04 = is_active ? art_quick_read_barrier_mark_reg04 : nullptr;
-  qpoints->pReadBarrierMarkReg05 = is_active ? art_quick_read_barrier_mark_reg05 : nullptr;
-  qpoints->pReadBarrierMarkReg06 = is_active ? art_quick_read_barrier_mark_reg06 : nullptr;
-  qpoints->pReadBarrierMarkReg07 = is_active ? art_quick_read_barrier_mark_reg07 : nullptr;
-  qpoints->pReadBarrierMarkReg08 = is_active ? art_quick_read_barrier_mark_reg08 : nullptr;
-  qpoints->pReadBarrierMarkReg09 = is_active ? art_quick_read_barrier_mark_reg09 : nullptr;
-  qpoints->pReadBarrierMarkReg10 = is_active ? art_quick_read_barrier_mark_reg10 : nullptr;
-  qpoints->pReadBarrierMarkReg11 = is_active ? art_quick_read_barrier_mark_reg11 : nullptr;
-  qpoints->pReadBarrierMarkReg12 = is_active ? art_quick_read_barrier_mark_reg12 : nullptr;
-  qpoints->pReadBarrierMarkReg13 = is_active ? art_quick_read_barrier_mark_reg13 : nullptr;
-  qpoints->pReadBarrierMarkReg17 = is_active ? art_quick_read_barrier_mark_reg17 : nullptr;
-  qpoints->pReadBarrierMarkReg18 = is_active ? art_quick_read_barrier_mark_reg18 : nullptr;
-  qpoints->pReadBarrierMarkReg19 = is_active ? art_quick_read_barrier_mark_reg19 : nullptr;
-  qpoints->pReadBarrierMarkReg20 = is_active ? art_quick_read_barrier_mark_reg20 : nullptr;
-  qpoints->pReadBarrierMarkReg21 = is_active ? art_quick_read_barrier_mark_reg21 : nullptr;
-  qpoints->pReadBarrierMarkReg22 = is_active ? art_quick_read_barrier_mark_reg22 : nullptr;
-  qpoints->pReadBarrierMarkReg29 = is_active ? art_quick_read_barrier_mark_reg29 : nullptr;
-}
-
-void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
-  DefaultInitEntryPoints(jpoints, qpoints);
-
-  // Cast
-  qpoints->pInstanceofNonTrivial = artInstanceOfFromCode;
-  qpoints->pCheckInstanceOf = art_quick_check_instance_of;
-
-  // Math
-  qpoints->pCmpgDouble = CmpgDouble;
-  qpoints->pCmpgFloat = CmpgFloat;
-  qpoints->pCmplDouble = CmplDouble;
-  qpoints->pCmplFloat = CmplFloat;
-  qpoints->pFmod = fmod;
-  qpoints->pL2d = art_l2d;
-  qpoints->pFmodf = fmodf;
-  qpoints->pL2f = art_l2f;
-  qpoints->pD2iz = art_d2i;
-  qpoints->pF2iz = art_f2i;
-  qpoints->pIdivmod = nullptr;
-  qpoints->pD2l = art_d2l;
-  qpoints->pF2l = art_f2l;
-  qpoints->pLdiv = artLdiv;
-  qpoints->pLmod = artLmod;
-  qpoints->pLmul = artLmul;
-  qpoints->pShlLong = nullptr;
-  qpoints->pShrLong = nullptr;
-  qpoints->pUshrLong = nullptr;
-
-  // More math.
-  qpoints->pCos = cos;
-  qpoints->pSin = sin;
-  qpoints->pAcos = acos;
-  qpoints->pAsin = asin;
-  qpoints->pAtan = atan;
-  qpoints->pAtan2 = atan2;
-  qpoints->pPow = pow;
-  qpoints->pCbrt = cbrt;
-  qpoints->pCosh = cosh;
-  qpoints->pExp = exp;
-  qpoints->pExpm1 = expm1;
-  qpoints->pHypot = hypot;
-  qpoints->pLog = log;
-  qpoints->pLog10 = log10;
-  qpoints->pNextAfter = nextafter;
-  qpoints->pSinh = sinh;
-  qpoints->pTan = tan;
-  qpoints->pTanh = tanh;
-
-  // Intrinsics
-  qpoints->pIndexOf = art_quick_indexof;
-  qpoints->pStringCompareTo = art_quick_string_compareto;
-  qpoints->pMemcpy = memcpy;
-
-  // TODO - use lld/scd instructions for Mips64
-  // Atomic 64-bit load/store
-  qpoints->pA64Load = QuasiAtomic::Read64;
-  qpoints->pA64Store = QuasiAtomic::Write64;
-
-  // Read barrier.
-  qpoints->pReadBarrierJni = ReadBarrierJni;
-  UpdateReadBarrierEntrypoints(qpoints, /*is_active=*/ false);
-  // Cannot use the following registers to pass arguments:
-  // 0(ZERO), 1(AT), 15(T3), 16(S0), 17(S1), 24(T8), 25(T9), 26(K0), 27(K1), 28(GP), 29(SP), 31(RA).
-  // Note that there are 30 entry points only: 00 for register 1(AT), ..., 29 for register 30(S8).
-  qpoints->pReadBarrierMarkReg14 = nullptr;
-  qpoints->pReadBarrierMarkReg15 = nullptr;
-  qpoints->pReadBarrierMarkReg16 = nullptr;
-  qpoints->pReadBarrierMarkReg23 = nullptr;
-  qpoints->pReadBarrierMarkReg24 = nullptr;
-  qpoints->pReadBarrierMarkReg25 = nullptr;
-  qpoints->pReadBarrierMarkReg26 = nullptr;
-  qpoints->pReadBarrierMarkReg27 = nullptr;
-  qpoints->pReadBarrierMarkReg28 = nullptr;
-  qpoints->pReadBarrierSlow = artReadBarrierSlow;
-  qpoints->pReadBarrierForRootSlow = artReadBarrierForRootSlow;
-}
-
-}  // namespace art
diff --git a/runtime/arch/mips64/fault_handler_mips64.cc b/runtime/arch/mips64/fault_handler_mips64.cc
deleted file mode 100644
index 6255235..0000000
--- a/runtime/arch/mips64/fault_handler_mips64.cc
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "fault_handler.h"
-
-#include <sys/ucontext.h>
-
-#include "arch/instruction_set.h"
-#include "arch/mips64/callee_save_frame_mips64.h"
-#include "art_method.h"
-#include "base/callee_save_type.h"
-#include "base/hex_dump.h"
-#include "base/logging.h"  // For VLOG.
-#include "base/macros.h"
-#include "registers_mips64.h"
-#include "runtime_globals.h"
-#include "thread-current-inl.h"
-
-extern "C" void art_quick_throw_stack_overflow();
-extern "C" void art_quick_throw_null_pointer_exception_from_signal();
-
-//
-// Mips64 specific fault handler functions.
-//
-
-namespace art {
-
-void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
-                                             ArtMethod** out_method,
-                                             uintptr_t* out_return_pc, uintptr_t* out_sp) {
-  struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
-  struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
-  *out_sp = static_cast<uintptr_t>(sc->sc_regs[mips64::SP]);
-  VLOG(signals) << "sp: " << *out_sp;
-  if (*out_sp == 0) {
-    return;
-  }
-
-  // In the case of a stack overflow, the stack is not valid and we can't
-  // get the method from the top of the stack.  However it's in r0.
-  uintptr_t* fault_addr = reinterpret_cast<uintptr_t*>(siginfo->si_addr);  // BVA addr
-  uintptr_t* overflow_addr = reinterpret_cast<uintptr_t*>(
-      reinterpret_cast<uint8_t*>(*out_sp) - GetStackOverflowReservedBytes(InstructionSet::kMips64));
-  if (overflow_addr == fault_addr) {
-    *out_method = reinterpret_cast<ArtMethod*>(sc->sc_regs[mips64::A0]);
-  } else {
-    // The method is at the top of the stack.
-    *out_method = *reinterpret_cast<ArtMethod**>(*out_sp);
-  }
-
-  // Work out the return PC.  This will be the address of the instruction
-  // following the faulting ldr/str instruction.
-
-  VLOG(signals) << "pc: " << std::hex
-      << static_cast<void*>(reinterpret_cast<uint8_t*>(sc->sc_pc));
-
-  *out_return_pc = sc->sc_pc + 4;
-}
-
-bool NullPointerHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, void* context) {
-  if (!IsValidImplicitCheck(info)) {
-    return false;
-  }
-
-  // The code that looks for the catch location needs to know the value of the
-  // PC at the point of call.  For Null checks we insert a GC map that is immediately after
-  // the load/store instruction that might cause the fault.
-
-  struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
-  struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
-
-  // Decrement $sp by the frame size of the kSaveEverything method and store
-  // the fault address in the padding right after the ArtMethod*.
-  sc->sc_regs[mips64::SP] -= mips64::Mips64CalleeSaveFrameSize(CalleeSaveType::kSaveEverything);
-  uintptr_t* padding = reinterpret_cast<uintptr_t*>(sc->sc_regs[mips64::SP]) + /* ArtMethod* */ 1;
-  *padding = reinterpret_cast<uintptr_t>(info->si_addr);
-
-  sc->sc_regs[mips64::RA] = sc->sc_pc + 4;      // RA needs to point to gc map location
-  sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_null_pointer_exception_from_signal);
-  // Note: This entrypoint does not rely on T9 pointing to it, so we may as well preserve T9.
-  VLOG(signals) << "Generating null pointer exception";
-  return true;
-}
-
-bool SuspensionHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
-                               void* context ATTRIBUTE_UNUSED) {
-  return false;
-}
-
-// Stack overflow fault handler.
-//
-// This checks that the fault address is equal to the current stack pointer
-// minus the overflow region size (16K typically). The instruction that
-// generates this signal is:
-//
-// lw zero, -16384(sp)
-//
-// It will fault if sp is inside the protected region on the stack.
-//
-// If we determine this is a stack overflow we need to move the stack pointer
-// to the overflow region below the protected region.
-
-bool StackOverflowHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* info, void* context) {
-  struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
-  struct sigcontext *sc = reinterpret_cast<struct sigcontext*>(&uc->uc_mcontext);
-  VLOG(signals) << "stack overflow handler with sp at " << std::hex << &uc;
-  VLOG(signals) << "sigcontext: " << std::hex << sc;
-
-  uintptr_t sp = sc->sc_regs[mips64::SP];
-  VLOG(signals) << "sp: " << std::hex << sp;
-
-  uintptr_t fault_addr = reinterpret_cast<uintptr_t>(info->si_addr);  // BVA addr
-  VLOG(signals) << "fault_addr: " << std::hex << fault_addr;
-  VLOG(signals) << "checking for stack overflow, sp: " << std::hex << sp <<
-    ", fault_addr: " << fault_addr;
-
-  uintptr_t overflow_addr = sp - GetStackOverflowReservedBytes(InstructionSet::kMips64);
-
-  // Check that the fault address is the value expected for a stack overflow.
-  if (fault_addr != overflow_addr) {
-    VLOG(signals) << "Not a stack overflow";
-    return false;
-  }
-
-  VLOG(signals) << "Stack overflow found";
-
-  // Now arrange for the signal handler to return to art_quick_throw_stack_overflow_from.
-  // The value of RA must be the same as it was when we entered the code that
-  // caused this fault.  This will be inserted into a callee save frame by
-  // the function to which this handler returns (art_quick_throw_stack_overflow).
-  sc->sc_pc = reinterpret_cast<uintptr_t>(art_quick_throw_stack_overflow);
-  sc->sc_regs[mips64::T9] = sc->sc_pc;          // make sure T9 points to the function
-
-  // The kernel will now return to the address in sc->arm_pc.
-  return true;
-}
-}       // namespace art
diff --git a/runtime/arch/mips64/instruction_set_features_mips64.cc b/runtime/arch/mips64/instruction_set_features_mips64.cc
deleted file mode 100644
index 2031433..0000000
--- a/runtime/arch/mips64/instruction_set_features_mips64.cc
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "instruction_set_features_mips64.h"
-
-#include <fstream>
-#include <sstream>
-
-#include "android-base/stringprintf.h"
-#include "android-base/strings.h"
-
-#include "base/logging.h"
-
-namespace art {
-
-using android::base::StringPrintf;
-
-Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromVariant(
-    const std::string& variant, std::string* error_msg ATTRIBUTE_UNUSED) {
-  bool msa = true;
-  if (variant != "default" && variant != "mips64r6") {
-    LOG(WARNING) << "Unexpected CPU variant for Mips64 using defaults: " << variant;
-  }
-  return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(msa));
-}
-
-Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromBitmap(uint32_t bitmap) {
-  bool msa = (bitmap & kMsaBitfield) != 0;
-  return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(msa));
-}
-
-Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromCppDefines() {
-#if defined(_MIPS_ARCH_MIPS64R6)
-  const bool msa = true;
-#else
-  const bool msa = false;
-#endif
-  return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(msa));
-}
-
-Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromCpuInfo() {
-  // Look in /proc/cpuinfo for features we need.  Only use this when we can guarantee that
-  // the kernel puts the appropriate feature flags in here.  Sometimes it doesn't.
-  bool msa = false;
-
-  std::ifstream in("/proc/cpuinfo");
-  if (!in.fail()) {
-    while (!in.eof()) {
-      std::string line;
-      std::getline(in, line);
-      if (!in.eof()) {
-        LOG(INFO) << "cpuinfo line: " << line;
-        if (line.find("ASEs") != std::string::npos) {
-          LOG(INFO) << "found Application Specific Extensions";
-          if (line.find("msa") != std::string::npos) {
-            msa = true;
-          }
-        }
-      }
-    }
-    in.close();
-  } else {
-    LOG(ERROR) << "Failed to open /proc/cpuinfo";
-  }
-  return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(msa));
-}
-
-Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromHwcap() {
-  UNIMPLEMENTED(WARNING);
-  return FromCppDefines();
-}
-
-Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromAssembly() {
-  UNIMPLEMENTED(WARNING);
-  return FromCppDefines();
-}
-
-bool Mips64InstructionSetFeatures::Equals(const InstructionSetFeatures* other) const {
-  if (InstructionSet::kMips64 != other->GetInstructionSet()) {
-    return false;
-  }
-  const Mips64InstructionSetFeatures* other_as_mips64 = other->AsMips64InstructionSetFeatures();
-  return msa_ == other_as_mips64->msa_;
-}
-
-uint32_t Mips64InstructionSetFeatures::AsBitmap() const {
-  return (msa_ ? kMsaBitfield : 0);
-}
-
-std::string Mips64InstructionSetFeatures::GetFeatureString() const {
-  std::string result;
-  if (msa_) {
-    result += "msa";
-  } else {
-    result += "-msa";
-  }
-  return result;
-}
-
-std::unique_ptr<const InstructionSetFeatures>
-Mips64InstructionSetFeatures::AddFeaturesFromSplitString(
-    const std::vector<std::string>& features, std::string* error_msg) const {
-  bool msa = msa_;
-  for (const std::string& feature : features) {
-    DCHECK_EQ(android::base::Trim(feature), feature)
-        << "Feature name is not trimmed: '" << feature << "'";
-    if (feature == "msa") {
-      msa = true;
-    } else if (feature == "-msa") {
-      msa = false;
-    } else {
-      *error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str());
-      return nullptr;
-    }
-  }
-  return std::unique_ptr<const InstructionSetFeatures>(new Mips64InstructionSetFeatures(msa));
-}
-
-}  // namespace art
diff --git a/runtime/arch/mips64/instruction_set_features_mips64.h b/runtime/arch/mips64/instruction_set_features_mips64.h
deleted file mode 100644
index e204d9d..0000000
--- a/runtime/arch/mips64/instruction_set_features_mips64.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS64_INSTRUCTION_SET_FEATURES_MIPS64_H_
-#define ART_RUNTIME_ARCH_MIPS64_INSTRUCTION_SET_FEATURES_MIPS64_H_
-
-#include "arch/instruction_set_features.h"
-
-namespace art {
-
-class Mips64InstructionSetFeatures;
-using Mips64FeaturesUniquePtr = std::unique_ptr<const Mips64InstructionSetFeatures>;
-
-// Instruction set features relevant to the MIPS64 architecture.
-class Mips64InstructionSetFeatures final : public InstructionSetFeatures {
- public:
-  // Process a CPU variant string like "r4000" and create InstructionSetFeatures.
-  static Mips64FeaturesUniquePtr FromVariant(const std::string& variant,
-                                             std::string* error_msg);
-
-  // Parse a bitmap and create an InstructionSetFeatures.
-  static Mips64FeaturesUniquePtr FromBitmap(uint32_t bitmap);
-
-  // Turn C pre-processor #defines into the equivalent instruction set features.
-  static Mips64FeaturesUniquePtr FromCppDefines();
-
-  // Process /proc/cpuinfo and use kRuntimeISA to produce InstructionSetFeatures.
-  static Mips64FeaturesUniquePtr FromCpuInfo();
-
-  // Process the auxiliary vector AT_HWCAP entry and use kRuntimeISA to produce
-  // InstructionSetFeatures.
-  static Mips64FeaturesUniquePtr FromHwcap();
-
-  // Use assembly tests of the current runtime (ie kRuntimeISA) to determine the
-  // InstructionSetFeatures. This works around kernel bugs in AT_HWCAP and /proc/cpuinfo.
-  static Mips64FeaturesUniquePtr FromAssembly();
-
-  bool Equals(const InstructionSetFeatures* other) const override;
-
-  InstructionSet GetInstructionSet() const override {
-    return InstructionSet::kMips64;
-  }
-
-  uint32_t AsBitmap() const override;
-
-  std::string GetFeatureString() const override;
-
-  // Does it have MSA (MIPS SIMD Architecture) support.
-  bool HasMsa() const {
-    return msa_;
-  }
-
-  virtual ~Mips64InstructionSetFeatures() {}
-
- protected:
-  // Parse a vector of the form "fpu32", "mips2" adding these to a new Mips64InstructionSetFeatures.
-  std::unique_ptr<const InstructionSetFeatures>
-      AddFeaturesFromSplitString(const std::vector<std::string>& features,
-                                 std::string* error_msg) const override;
-
- private:
-  explicit Mips64InstructionSetFeatures(bool msa) : InstructionSetFeatures(), msa_(msa) {
-  }
-
-  // Bitmap positions for encoding features as a bitmap.
-  enum {
-    kMsaBitfield = 1,
-  };
-
-  const bool msa_;
-
-  DISALLOW_COPY_AND_ASSIGN(Mips64InstructionSetFeatures);
-};
-
-}  // namespace art
-
-#endif  // ART_RUNTIME_ARCH_MIPS64_INSTRUCTION_SET_FEATURES_MIPS64_H_
diff --git a/runtime/arch/mips64/instruction_set_features_mips64_test.cc b/runtime/arch/mips64/instruction_set_features_mips64_test.cc
deleted file mode 100644
index 933dc66..0000000
--- a/runtime/arch/mips64/instruction_set_features_mips64_test.cc
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "instruction_set_features_mips64.h"
-
-#include <gtest/gtest.h>
-
-namespace art {
-
-TEST(Mips64InstructionSetFeaturesTest, Mips64FeaturesFromDefaultVariant) {
-  std::string error_msg;
-  std::unique_ptr<const InstructionSetFeatures> mips64_features(
-      InstructionSetFeatures::FromVariant(InstructionSet::kMips64, "default", &error_msg));
-  ASSERT_TRUE(mips64_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(mips64_features->GetInstructionSet(), InstructionSet::kMips64);
-  EXPECT_TRUE(mips64_features->Equals(mips64_features.get()));
-  EXPECT_STREQ("msa", mips64_features->GetFeatureString().c_str());
-  EXPECT_EQ(mips64_features->AsBitmap(), 1U);
-}
-
-TEST(Mips64InstructionSetFeaturesTest, Mips64FeaturesFromR6Variant) {
-  std::string error_msg;
-  std::unique_ptr<const InstructionSetFeatures> mips64r6_features(
-      InstructionSetFeatures::FromVariant(InstructionSet::kMips64, "mips64r6", &error_msg));
-  ASSERT_TRUE(mips64r6_features.get() != nullptr) << error_msg;
-  EXPECT_EQ(mips64r6_features->GetInstructionSet(), InstructionSet::kMips64);
-  EXPECT_TRUE(mips64r6_features->Equals(mips64r6_features.get()));
-  EXPECT_STREQ("msa", mips64r6_features->GetFeatureString().c_str());
-  EXPECT_EQ(mips64r6_features->AsBitmap(), 1U);
-
-  std::unique_ptr<const InstructionSetFeatures> mips64_default_features(
-      InstructionSetFeatures::FromVariant(InstructionSet::kMips64, "default", &error_msg));
-  ASSERT_TRUE(mips64_default_features.get() != nullptr) << error_msg;
-  EXPECT_TRUE(mips64r6_features->Equals(mips64_default_features.get()));
-}
-
-}  // namespace art
diff --git a/runtime/arch/mips64/jni_entrypoints_mips64.S b/runtime/arch/mips64/jni_entrypoints_mips64.S
deleted file mode 100644
index 70d7d97..0000000
--- a/runtime/arch/mips64/jni_entrypoints_mips64.S
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "asm_support_mips64.S"
-
-    .set noreorder
-    .balign 16
-
-    /*
-     * Jni dlsym lookup stub.
-     */
-    .extern artFindNativeMethod
-ENTRY art_jni_dlsym_lookup_stub
-    daddiu $sp, $sp, -80        # save a0-a7 and $ra
-    .cfi_adjust_cfa_offset 80
-    sd     $ra, 64($sp)
-    .cfi_rel_offset 31, 64
-    sd     $a7, 56($sp)
-    .cfi_rel_offset 11, 56
-    sd     $a6, 48($sp)
-    .cfi_rel_offset 10, 48
-    sd     $a5, 40($sp)
-    .cfi_rel_offset 9, 40
-    sd     $a4, 32($sp)
-    .cfi_rel_offset 8, 32
-    sd     $a3, 24($sp)
-    .cfi_rel_offset 7, 24
-    sd     $a2, 16($sp)
-    .cfi_rel_offset 6, 16
-    sd     $a1, 8($sp)
-    .cfi_rel_offset 5, 8
-    sd     $a0, 0($sp)
-    .cfi_rel_offset 4, 0
-    move   $a0, $s1             # pass Thread::Current()
-    jal    artFindNativeMethod  # (Thread*)
-    .cpreturn                   # Restore gp from t8 in branch delay slot. gp is not used
-                                # anymore, and t8 may be clobbered in artFindNativeMethod.
-
-    ld     $a0, 0($sp)          # restore registers from stack
-    .cfi_restore 4
-    ld     $a1, 8($sp)
-    .cfi_restore 5
-    ld     $a2, 16($sp)
-    .cfi_restore 6
-    ld     $a3, 24($sp)
-    .cfi_restore 7
-    ld     $a4, 32($sp)
-    .cfi_restore 8
-    ld     $a5, 40($sp)
-    .cfi_restore 9
-    ld     $a6, 48($sp)
-    .cfi_restore 10
-    ld     $a7, 56($sp)
-    .cfi_restore 11
-    ld     $ra, 64($sp)
-    .cfi_restore 31
-    beq    $v0, $zero, .Lno_native_code_found
-    daddiu $sp, $sp, 80         # restore the stack
-    .cfi_adjust_cfa_offset -80
-    move   $t9, $v0             # put method code result in $t9
-    jalr   $zero, $t9           # leaf call to method's code
-    nop
-.Lno_native_code_found:
-    jalr   $zero, $ra
-    nop
-END art_jni_dlsym_lookup_stub
diff --git a/runtime/arch/mips64/memcmp16_mips64.S b/runtime/arch/mips64/memcmp16_mips64.S
deleted file mode 100644
index 962977e..0000000
--- a/runtime/arch/mips64/memcmp16_mips64.S
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS64_MEMCMP16_MIPS64_S_
-#define ART_RUNTIME_ARCH_MIPS64_MEMCMP16_MIPS64_S_
-
-#include "asm_support_mips64.S"
-
-.set noreorder
-
-// u4 __memcmp16(const u2*, const u2*, size_t);
-ENTRY_NO_GP __memcmp16
-  move  $t0, $zero
-  move  $t1, $zero
-  beqz  $a2, done       /* 0 length string */
-  nop
-  beq   $a0, $a1, done  /* addresses are identical */
-  nop
-
-1:
-  lhu   $t0, 0($a0)
-  lhu   $t1, 0($a1)
-  bne   $t0, $t1, done
-  nop
-  daddu $a0, 2
-  daddu $a1, 2
-  dsubu $a2, 1
-  bnez  $a2, 1b
-  nop
-
-done:
-  dsubu $v0, $t0, $t1
-  j     $ra
-  nop
-END __memcmp16
-
-#endif  // ART_RUNTIME_ARCH_MIPS64_MEMCMP16_MIPS64_S_
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
deleted file mode 100644
index ebf1d5b..0000000
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ /dev/null
@@ -1,3096 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "asm_support_mips64.S"
-
-#include "arch/quick_alloc_entrypoints.S"
-
-    .set noreorder
-    .balign 16
-
-    /* Deliver the given exception */
-    .extern artDeliverExceptionFromCode
-    /* Deliver an exception pending on a thread */
-    .extern artDeliverPendingExceptionFromCode
-
-    /*
-     * Macro that sets up $gp and stores the previous $gp value to $t8.
-     * This macro modifies v1 and t8.
-     */
-.macro SETUP_GP
-    move $v1, $ra
-    bal 1f
-    nop
-1:
-    .cpsetup $ra, $t8, 1b
-    move $ra, $v1
-.endm
-
-    /*
-     * Macro that sets up the callee save frame to conform with
-     * Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves)
-     * callee-save: padding + $f24-$f31 + $s0-$s7 + $gp + $ra + $s8 = 19 total + 1x8 bytes padding
-     */
-.macro SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
-    daddiu $sp, $sp, -160
-    .cfi_adjust_cfa_offset 160
-
-     // Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 160)
-#error "FRAME_SIZE_SAVE_ALL_CALLEE_SAVES(MIPS64) size not as expected."
-#endif
-
-    sd     $ra, 152($sp)
-    .cfi_rel_offset 31, 152
-    sd     $s8, 144($sp)
-    .cfi_rel_offset 30, 144
-    sd     $t8, 136($sp)           # t8 holds caller's gp, now save it to the stack.
-    .cfi_rel_offset 28, 136        # Value from gp is pushed, so set the cfi offset accordingly.
-    sd     $s7, 128($sp)
-    .cfi_rel_offset 23, 128
-    sd     $s6, 120($sp)
-    .cfi_rel_offset 22, 120
-    sd     $s5, 112($sp)
-    .cfi_rel_offset 21, 112
-    sd     $s4, 104($sp)
-    .cfi_rel_offset 20, 104
-    sd     $s3,  96($sp)
-    .cfi_rel_offset 19, 96
-    sd     $s2,  88($sp)
-    .cfi_rel_offset 18, 88
-    sd     $s1,  80($sp)
-    .cfi_rel_offset 17, 80
-    sd     $s0,  72($sp)
-    .cfi_rel_offset 16, 72
-
-    // FP callee-saves
-    s.d    $f31, 64($sp)
-    s.d    $f30, 56($sp)
-    s.d    $f29, 48($sp)
-    s.d    $f28, 40($sp)
-    s.d    $f27, 32($sp)
-    s.d    $f26, 24($sp)
-    s.d    $f25, 16($sp)
-    s.d    $f24,  8($sp)
-
-    # load appropriate callee-save-method
-    ld      $t1, %got(_ZN3art7Runtime9instance_E)($gp)
-    ld      $t1, 0($t1)
-    ld      $t1, RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET($t1)
-    sd      $t1, 0($sp)                                # Place ArtMethod* at bottom of stack.
-    sd      $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)  # Place sp in Thread::Current()->top_quick_frame.
-.endm
-
-    /*
-     * Macro that sets up the callee save frame to conform with
-     * Runtime::CreateCalleeSaveMethod(kSaveRefsOnly). Restoration assumes
-     * non-moving GC.
-     * Does not include rSUSPEND or rSELF
-     * callee-save: padding + $s2-$s7 + $gp + $ra + $s8 = 9 total + 1x8 bytes padding
-     */
-.macro SETUP_SAVE_REFS_ONLY_FRAME
-    daddiu $sp, $sp, -80
-    .cfi_adjust_cfa_offset 80
-
-    // Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_REFS_ONLY != 80)
-#error "FRAME_SIZE_SAVE_REFS_ONLY(MIPS64) size not as expected."
-#endif
-
-    sd     $ra, 72($sp)
-    .cfi_rel_offset 31, 72
-    sd     $s8, 64($sp)
-    .cfi_rel_offset 30, 64
-    sd     $t8, 56($sp)            # t8 holds caller's gp, now save it to the stack.
-    .cfi_rel_offset 28, 56         # Value from gp is pushed, so set the cfi offset accordingly.
-    sd     $s7, 48($sp)
-    .cfi_rel_offset 23, 48
-    sd     $s6, 40($sp)
-    .cfi_rel_offset 22, 40
-    sd     $s5, 32($sp)
-    .cfi_rel_offset 21, 32
-    sd     $s4, 24($sp)
-    .cfi_rel_offset 20, 24
-    sd     $s3, 16($sp)
-    .cfi_rel_offset 19, 16
-    sd     $s2, 8($sp)
-    .cfi_rel_offset 18, 8
-    # load appropriate callee-save-method
-    ld      $t1, %got(_ZN3art7Runtime9instance_E)($gp)
-    ld      $t1, 0($t1)
-    ld      $t1, RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET($t1)
-    sd      $t1, 0($sp)                                # Place Method* at bottom of stack.
-    sd      $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)  # Place sp in Thread::Current()->top_quick_frame.
-.endm
-
-.macro RESTORE_SAVE_REFS_ONLY_FRAME
-    ld     $ra, 72($sp)
-    .cfi_restore 31
-    ld     $s8, 64($sp)
-    .cfi_restore 30
-    ld     $t8, 56($sp)            # Restore gp back to it's temp storage.
-    .cfi_restore 28
-    ld     $s7, 48($sp)
-    .cfi_restore 23
-    ld     $s6, 40($sp)
-    .cfi_restore 22
-    ld     $s5, 32($sp)
-    .cfi_restore 21
-    ld     $s4, 24($sp)
-    .cfi_restore 20
-    ld     $s3, 16($sp)
-    .cfi_restore 19
-    ld     $s2, 8($sp)
-    .cfi_restore 18
-    daddiu $sp, $sp, 80
-    .cfi_adjust_cfa_offset -80
-    .cpreturn
-.endm
-
-.macro RESTORE_SAVE_REFS_ONLY_FRAME_AND_RETURN
-    ld     $ra, 72($sp)
-    .cfi_restore 31
-    ld     $s8, 64($sp)
-    .cfi_restore 30
-    ld     $t8, 56($sp)            # Restore gp back to it's temp storage.
-    .cfi_restore 28
-    ld     $s7, 48($sp)
-    .cfi_restore 23
-    ld     $s6, 40($sp)
-    .cfi_restore 22
-    ld     $s5, 32($sp)
-    .cfi_restore 21
-    ld     $s4, 24($sp)
-    .cfi_restore 20
-    ld     $s3, 16($sp)
-    .cfi_restore 19
-    ld     $s2, 8($sp)
-    .cfi_restore 18
-    .cpreturn
-    jalr   $zero, $ra
-    daddiu $sp, $sp, 80
-    .cfi_adjust_cfa_offset -80
-.endm
-
-// This assumes the top part of these stack frame types are identical.
-#define REFS_AND_ARGS_MINUS_REFS_SIZE (FRAME_SIZE_SAVE_REFS_AND_ARGS - FRAME_SIZE_SAVE_REFS_ONLY)
-
-    /*
-     * Individually usable part of macro SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL.
-     */
-.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_S4_THRU_S8
-    sd      $s8, 192($sp)
-    .cfi_rel_offset 30, 192
-    sd      $s7, 176($sp)
-    .cfi_rel_offset 23, 176
-    sd      $s6, 168($sp)
-    .cfi_rel_offset 22, 168
-    sd      $s5, 160($sp)
-    .cfi_rel_offset 21, 160
-    sd      $s4, 152($sp)
-    .cfi_rel_offset 20, 152
-.endm
-
-.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL save_s4_thru_s8=1
-    daddiu  $sp, $sp, -208
-    .cfi_adjust_cfa_offset 208
-
-    // Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_REFS_AND_ARGS != 208)
-#error "FRAME_SIZE_SAVE_REFS_AND_ARGS(MIPS64) size not as expected."
-#endif
-
-    sd      $ra, 200($sp)           # = kQuickCalleeSaveFrame_RefAndArgs_LrOffset
-    .cfi_rel_offset 31, 200
-    sd      $t8, 184($sp)           # t8 holds caller's gp, now save it to the stack.
-    .cfi_rel_offset 28, 184         # Value from gp is pushed, so set the cfi offset accordingly.
-    .if \save_s4_thru_s8
-      SETUP_SAVE_REFS_AND_ARGS_FRAME_S4_THRU_S8
-    .endif
-    sd      $s3, 144($sp)
-    .cfi_rel_offset 19, 144
-    sd      $s2, 136($sp)
-    .cfi_rel_offset 18, 136
-    sd      $a7, 128($sp)
-    .cfi_rel_offset 11, 128
-    sd      $a6, 120($sp)
-    .cfi_rel_offset 10, 120
-    sd      $a5, 112($sp)
-    .cfi_rel_offset 9, 112
-    sd      $a4, 104($sp)
-    .cfi_rel_offset 8, 104
-    sd      $a3,  96($sp)
-    .cfi_rel_offset 7, 96
-    sd      $a2,  88($sp)
-    .cfi_rel_offset 6, 88
-    sd      $a1,  80($sp)           # = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset
-    .cfi_rel_offset 5, 80
-
-    s.d     $f19, 72($sp)
-    s.d     $f18, 64($sp)
-    s.d     $f17, 56($sp)
-    s.d     $f16, 48($sp)
-    s.d     $f15, 40($sp)
-    s.d     $f14, 32($sp)
-    s.d     $f13, 24($sp)           # = kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset
-    s.d     $f12, 16($sp)           # This isn't necessary to store.
-    # 1x8 bytes padding + Method*
-.endm
-
-    /*
-     * Macro that sets up the callee save frame to conform with
-     * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs). Restoration assumes
-     * non-moving GC.
-     * callee-save: padding + $f12-$f19 + $a1-$a7 + $s2-$s7 + $gp + $ra + $s8 = 24 total + 1 words padding + Method*
-     */
-.macro SETUP_SAVE_REFS_AND_ARGS_FRAME save_s4_thru_s8_only=0
-    .if \save_s4_thru_s8_only
-      // It is expected that `SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL /* save_s4_thru_s8 */ 0`
-      // has been done prior to `SETUP_SAVE_REFS_AND_ARGS_FRAME /* save_s4_thru_s8_only */ 1`.
-      SETUP_SAVE_REFS_AND_ARGS_FRAME_S4_THRU_S8
-    .else
-      SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL
-    .endif
-    # load appropriate callee-save-method
-    ld      $t1, %got(_ZN3art7Runtime9instance_E)($gp)
-    ld      $t1, 0($t1)
-    ld      $t1, RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET($t1)
-    sd      $t1, 0($sp)                                # Place Method* at bottom of stack.
-    sd      $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)  # Place sp in Thread::Current()->top_quick_frame.
-.endm
-
-.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_A0
-    SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL
-    sd      $a0, 0($sp)                                # Place Method* at bottom of stack.
-    sd      $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)  # Place sp in Thread::Current()->top_quick_frame.
-.endm
-
-    /*
-     * Individually usable part of macro RESTORE_SAVE_REFS_AND_ARGS_FRAME.
-     */
-.macro RESTORE_SAVE_REFS_AND_ARGS_FRAME_A1
-    ld      $a1,  80($sp)
-    .cfi_restore 5
-.endm
-
-.macro RESTORE_SAVE_REFS_AND_ARGS_FRAME restore_s4_thru_s8=1
-    ld      $ra, 200($sp)
-    .cfi_restore 31
-    .if \restore_s4_thru_s8
-      ld    $s8, 192($sp)
-      .cfi_restore 30
-    .endif
-    ld      $t8, 184($sp)           # Restore gp back to it's temp storage.
-    .cfi_restore 28
-    .if \restore_s4_thru_s8
-      ld    $s7, 176($sp)
-      .cfi_restore 23
-      ld    $s6, 168($sp)
-      .cfi_restore 22
-      ld    $s5, 160($sp)
-      .cfi_restore 21
-      ld    $s4, 152($sp)
-      .cfi_restore 20
-    .endif
-    ld      $s3, 144($sp)
-    .cfi_restore 19
-    ld      $s2, 136($sp)
-    .cfi_restore 18
-    ld      $a7, 128($sp)
-    .cfi_restore 11
-    ld      $a6, 120($sp)
-    .cfi_restore 10
-    ld      $a5, 112($sp)
-    .cfi_restore 9
-    ld      $a4, 104($sp)
-    .cfi_restore 8
-    ld      $a3,  96($sp)
-    .cfi_restore 7
-    ld      $a2,  88($sp)
-    .cfi_restore 6
-    RESTORE_SAVE_REFS_AND_ARGS_FRAME_A1
-
-    l.d     $f19, 72($sp)
-    l.d     $f18, 64($sp)
-    l.d     $f17, 56($sp)
-    l.d     $f16, 48($sp)
-    l.d     $f15, 40($sp)
-    l.d     $f14, 32($sp)
-    l.d     $f13, 24($sp)
-    l.d     $f12, 16($sp)
-
-    .cpreturn
-    daddiu  $sp, $sp, 208
-    .cfi_adjust_cfa_offset -208
-.endm
-
-    /*
-     * Macro that sets up the callee save frame to conform with
-     * Runtime::CreateCalleeSaveMethod(kSaveEverything).
-     * when the $sp has already been decremented by FRAME_SIZE_SAVE_EVERYTHING.
-     * callee-save: $at + $v0-$v1 + $a0-$a7 + $t0-$t3 + $s0-$s7 + $t8-$t9 + $gp + $s8 + $ra + $s8,
-     *              $f0-$f31; 28(GPR)+ 32(FPR) + 1x8 bytes padding + method*
-     * This macro sets up $gp; entrypoints using it should start with ENTRY_NO_GP.
-     */
-.macro SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP runtime_method_offset = RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET
-     // Ugly compile-time check, but we only have the preprocessor.
-#if (FRAME_SIZE_SAVE_EVERYTHING != 496)
-#error "FRAME_SIZE_SAVE_EVERYTHING(MIPS64) size not as expected."
-#endif
-
-    // Save core registers.
-    sd     $ra, 488($sp)
-    .cfi_rel_offset 31, 488
-    sd     $s8, 480($sp)
-    .cfi_rel_offset 30, 480
-    sd     $t9, 464($sp)
-    .cfi_rel_offset 25, 464
-    sd     $t8, 456($sp)
-    .cfi_rel_offset 24, 456
-    sd     $s7, 448($sp)
-    .cfi_rel_offset 23, 448
-    sd     $s6, 440($sp)
-    .cfi_rel_offset 22, 440
-    sd     $s5, 432($sp)
-    .cfi_rel_offset 21, 432
-    sd     $s4, 424($sp)
-    .cfi_rel_offset 20, 424
-    sd     $s3,  416($sp)
-    .cfi_rel_offset 19, 416
-    sd     $s2,  408($sp)
-    .cfi_rel_offset 18, 408
-    sd     $s1,  400($sp)
-    .cfi_rel_offset 17, 400
-    sd     $s0,  392($sp)
-    .cfi_rel_offset 16, 392
-    sd     $t3,  384($sp)
-    .cfi_rel_offset 15, 384
-    sd     $t2,  376($sp)
-    .cfi_rel_offset 14, 376
-    sd     $t1,  368($sp)
-    .cfi_rel_offset 13, 368
-    sd     $t0,  360($sp)
-    .cfi_rel_offset 12, 360
-    sd     $a7, 352($sp)
-    .cfi_rel_offset 11, 352
-    sd     $a6, 344($sp)
-    .cfi_rel_offset 10, 344
-    sd     $a5, 336($sp)
-    .cfi_rel_offset 9, 336
-    sd     $a4, 328($sp)
-    .cfi_rel_offset 8, 328
-    sd     $a3,  320($sp)
-    .cfi_rel_offset 7, 320
-    sd     $a2,  312($sp)
-    .cfi_rel_offset 6, 312
-    sd     $a1,  304($sp)
-    .cfi_rel_offset 5, 304
-    sd     $a0,  296($sp)
-    .cfi_rel_offset 4, 296
-    sd     $v1,  288($sp)
-    .cfi_rel_offset 3, 288
-    sd     $v0,  280($sp)
-    .cfi_rel_offset 2, 280
-
-    // Set up $gp, clobbering $ra and using the branch delay slot for a useful instruction.
-    bal 1f
-    .set push
-    .set noat
-    sd     $at,  272($sp)
-    .cfi_rel_offset 1, 272
-    .set pop
-1:
-    .cpsetup $ra, 472, 1b
-
-    // Save FP registers.
-    s.d    $f31, 264($sp)
-    s.d    $f30, 256($sp)
-    s.d    $f29, 248($sp)
-    s.d    $f28, 240($sp)
-    s.d    $f27, 232($sp)
-    s.d    $f26, 224($sp)
-    s.d    $f25, 216($sp)
-    s.d    $f24, 208($sp)
-    s.d    $f23, 200($sp)
-    s.d    $f22, 192($sp)
-    s.d    $f21, 184($sp)
-    s.d    $f20, 176($sp)
-    s.d    $f19, 168($sp)
-    s.d    $f18, 160($sp)
-    s.d    $f17, 152($sp)
-    s.d    $f16, 144($sp)
-    s.d    $f15, 136($sp)
-    s.d    $f14, 128($sp)
-    s.d    $f13, 120($sp)
-    s.d    $f12, 112($sp)
-    s.d    $f11, 104($sp)
-    s.d    $f10, 96($sp)
-    s.d    $f9, 88($sp)
-    s.d    $f8, 80($sp)
-    s.d    $f7, 72($sp)
-    s.d    $f6, 64($sp)
-    s.d    $f5, 56($sp)
-    s.d    $f4, 48($sp)
-    s.d    $f3, 40($sp)
-    s.d    $f2, 32($sp)
-    s.d    $f1, 24($sp)
-    s.d    $f0, 16($sp)
-
-    # load appropriate callee-save-method
-    ld      $t1, %got(_ZN3art7Runtime9instance_E)($gp)
-    ld      $t1, 0($t1)
-    ld      $t1, \runtime_method_offset($t1)
-    sd      $t1, 0($sp)                                # Place ArtMethod* at bottom of stack.
-    # Place sp in Thread::Current()->top_quick_frame.
-    sd      $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)
-.endm
-
-    /*
-     * Macro that sets up the callee save frame to conform with
-     * Runtime::CreateCalleeSaveMethod(kSaveEverything).
-     * callee-save: $at + $v0-$v1 + $a0-$a7 + $t0-$t3 + $s0-$s7 + $t8-$t9 + $gp + $s8 + $ra + $s8,
-     *              $f0-$f31; 28(GPR)+ 32(FPR) + 1x8 bytes padding + method*
-     * This macro sets up $gp; entrypoints using it should start with ENTRY_NO_GP.
-     */
-.macro SETUP_SAVE_EVERYTHING_FRAME runtime_method_offset = RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET
-    daddiu $sp, $sp, -(FRAME_SIZE_SAVE_EVERYTHING)
-    .cfi_adjust_cfa_offset (FRAME_SIZE_SAVE_EVERYTHING)
-    SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP \runtime_method_offset
-.endm
-
-.macro RESTORE_SAVE_EVERYTHING_FRAME restore_a0=1
-    // Restore FP registers.
-    l.d    $f31, 264($sp)
-    l.d    $f30, 256($sp)
-    l.d    $f29, 248($sp)
-    l.d    $f28, 240($sp)
-    l.d    $f27, 232($sp)
-    l.d    $f26, 224($sp)
-    l.d    $f25, 216($sp)
-    l.d    $f24, 208($sp)
-    l.d    $f23, 200($sp)
-    l.d    $f22, 192($sp)
-    l.d    $f21, 184($sp)
-    l.d    $f20, 176($sp)
-    l.d    $f19, 168($sp)
-    l.d    $f18, 160($sp)
-    l.d    $f17, 152($sp)
-    l.d    $f16, 144($sp)
-    l.d    $f15, 136($sp)
-    l.d    $f14, 128($sp)
-    l.d    $f13, 120($sp)
-    l.d    $f12, 112($sp)
-    l.d    $f11, 104($sp)
-    l.d    $f10, 96($sp)
-    l.d    $f9, 88($sp)
-    l.d    $f8, 80($sp)
-    l.d    $f7, 72($sp)
-    l.d    $f6, 64($sp)
-    l.d    $f5, 56($sp)
-    l.d    $f4, 48($sp)
-    l.d    $f3, 40($sp)
-    l.d    $f2, 32($sp)
-    l.d    $f1, 24($sp)
-    l.d    $f0, 16($sp)
-
-    // Restore core registers.
-    .cpreturn
-    ld     $ra, 488($sp)
-    .cfi_restore 31
-    ld     $s8, 480($sp)
-    .cfi_restore 30
-    ld     $t9, 464($sp)
-    .cfi_restore 25
-    ld     $t8, 456($sp)
-    .cfi_restore 24
-    ld     $s7, 448($sp)
-    .cfi_restore 23
-    ld     $s6, 440($sp)
-    .cfi_restore 22
-    ld     $s5, 432($sp)
-    .cfi_restore 21
-    ld     $s4, 424($sp)
-    .cfi_restore 20
-    ld     $s3,  416($sp)
-    .cfi_restore 19
-    ld     $s2,  408($sp)
-    .cfi_restore 18
-    ld     $s1,  400($sp)
-    .cfi_restore 17
-    ld     $s0,  392($sp)
-    .cfi_restore 16
-    ld     $t3,  384($sp)
-    .cfi_restore 15
-    ld     $t2,  376($sp)
-    .cfi_restore 14
-    ld     $t1,  368($sp)
-    .cfi_restore 13
-    ld     $t0,  360($sp)
-    .cfi_restore 12
-    ld     $a7, 352($sp)
-    .cfi_restore 11
-    ld     $a6, 344($sp)
-    .cfi_restore 10
-    ld     $a5, 336($sp)
-    .cfi_restore 9
-    ld     $a4, 328($sp)
-    .cfi_restore 8
-    ld     $a3,  320($sp)
-    .cfi_restore 7
-    ld     $a2,  312($sp)
-    .cfi_restore 6
-    ld     $a1,  304($sp)
-    .cfi_restore 5
-    .if \restore_a0
-    ld     $a0,  296($sp)
-    .cfi_restore 4
-    .endif
-    ld     $v1,  288($sp)
-    .cfi_restore 3
-    ld     $v0,  280($sp)
-    .cfi_restore 2
-    .set push
-    .set noat
-    ld     $at,  272($sp)
-    .cfi_restore 1
-    .set pop
-
-    daddiu $sp, $sp, 496
-    .cfi_adjust_cfa_offset -496
-.endm
-
-    /*
-     * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
-     * exception is Thread::Current()->exception_ when the runtime method frame is ready.
-     * Requires $gp properly set up.
-     */
-.macro DELIVER_PENDING_EXCEPTION_FRAME_READY
-    dla     $t9, artDeliverPendingExceptionFromCode
-    jalr    $zero, $t9                   # artDeliverPendingExceptionFromCode(Thread*)
-    move    $a0, rSELF                   # pass Thread::Current
-.endm
-
-    /*
-     * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
-     * exception is Thread::Current()->exception_.
-     */
-.macro DELIVER_PENDING_EXCEPTION
-    SETUP_GP
-    SETUP_SAVE_ALL_CALLEE_SAVES_FRAME    # save callee saves for throw
-    DELIVER_PENDING_EXCEPTION_FRAME_READY
-.endm
-
-.macro RETURN_IF_NO_EXCEPTION
-    ld     $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
-    RESTORE_SAVE_REFS_ONLY_FRAME
-    bne    $t0, $zero, 1f                      # success if no exception is pending
-    nop
-    jalr   $zero, $ra
-    nop
-1:
-    DELIVER_PENDING_EXCEPTION
-.endm
-
-.macro RETURN_IF_ZERO
-    RESTORE_SAVE_REFS_ONLY_FRAME
-    bne    $v0, $zero, 1f                # success?
-    nop
-    jalr   $zero, $ra                    # return on success
-    nop
-1:
-    DELIVER_PENDING_EXCEPTION
-.endm
-
-.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-    RESTORE_SAVE_REFS_ONLY_FRAME
-    beq    $v0, $zero, 1f                # success?
-    nop
-    jalr   $zero, $ra                    # return on success
-    nop
-1:
-    DELIVER_PENDING_EXCEPTION
-.endm
-
-    /*
-     * On stack replacement stub.
-     * On entry:
-     *   a0 = stack to copy
-     *   a1 = size of stack
-     *   a2 = pc to call
-     *   a3 = JValue* result
-     *   a4 = shorty
-     *   a5 = thread
-     */
-ENTRY art_quick_osr_stub
-    move   $t0, $sp               # save stack pointer
-    daddiu $t1, $sp, -112         # reserve stack space
-    dsrl   $t1, $t1, 4            # enforce 16 byte stack alignment
-    dsll   $sp, $t1, 4            # update stack pointer
-
-    // Save callee general purpose registers, SP, T8(GP), RA, A3, and A4 (8x14 bytes)
-    sd     $ra, 104($sp)
-    .cfi_rel_offset 31, 104
-    sd     $s8, 96($sp)
-    .cfi_rel_offset 30, 96
-    sd     $t0, 88($sp)           # save original stack pointer stored in t0
-    .cfi_rel_offset 29, 88
-    sd     $t8, 80($sp)           # t8 holds caller's gp, now save it to the stack.
-    .cfi_rel_offset 28, 80        # Value from gp is pushed, so set the cfi offset accordingly.
-    sd     $s7, 72($sp)
-    .cfi_rel_offset 23, 72
-    sd     $s6, 64($sp)
-    .cfi_rel_offset 22, 64
-    sd     $s5, 56($sp)
-    .cfi_rel_offset 21, 56
-    sd     $s4, 48($sp)
-    .cfi_rel_offset 20, 48
-    sd     $s3, 40($sp)
-    .cfi_rel_offset 19, 40
-    sd     $s2, 32($sp)
-    .cfi_rel_offset 18, 32
-    sd     $s1, 24($sp)
-    .cfi_rel_offset 17, 24
-    sd     $s0, 16($sp)
-    .cfi_rel_offset 16, 16
-    sd     $a4, 8($sp)
-    .cfi_rel_offset 8, 8
-    sd     $a3, 0($sp)
-    .cfi_rel_offset 7, 0
-    move   rSELF, $a5                      # Save managed thread pointer into rSELF
-
-    daddiu $sp, $sp, -16
-    jal    .Losr_entry
-    sd     $zero, 0($sp)                   # Store null for ArtMethod* at bottom of frame
-    daddiu $sp, $sp, 16
-
-    // Restore return value address and shorty address
-    ld     $a4, 8($sp)                     # shorty address
-    .cfi_restore 8
-    ld     $a3, 0($sp)                     # result value address
-    .cfi_restore 7
-
-    lbu    $t1, 0($a4)                     # load return type
-    li     $t2, 'D'                        # put char 'D' into t2
-    beq    $t1, $t2, .Losr_fp_result       # branch if result type char == 'D'
-    li     $t2, 'F'                        # put char 'F' into t2
-    beq    $t1, $t2, .Losr_fp_result       # branch if result type char == 'F'
-    nop
-    b      .Losr_exit
-    dsrl   $v1, $v0, 32                    # put high half of result in v1
-.Losr_fp_result:
-    mfc1   $v0, $f0
-    mfhc1  $v1, $f0                        # put high half of FP result in v1
-.Losr_exit:
-    sw     $v0, 0($a3)                     # store low half of result
-    sw     $v1, 4($a3)                     # store high half of result
-
-    // Restore callee registers
-    ld     $ra, 104($sp)
-    .cfi_restore 31
-    ld     $s8, 96($sp)
-    .cfi_restore 30
-    ld     $t0, 88($sp)                    # save SP into t0 for now
-    .cfi_restore 29
-    ld     $t8, 80($sp)                    # Restore gp back to it's temp storage.
-    .cfi_restore 28
-    ld     $s7, 72($sp)
-    .cfi_restore 23
-    ld     $s6, 64($sp)
-    .cfi_restore 22
-    ld     $s5, 56($sp)
-    .cfi_restore 21
-    ld     $s4, 48($sp)
-    .cfi_restore 20
-    ld     $s3, 40($sp)
-    .cfi_restore 19
-    ld     $s2, 32($sp)
-    .cfi_restore 18
-    ld     $s1, 24($sp)
-    .cfi_restore 17
-    ld     $s0, 16($sp)
-    .cfi_restore 16
-    jalr   $zero, $ra
-    move   $sp, $t0
-
-.Losr_entry:
-    dsubu  $sp, $sp, $a1                   # Reserve space for callee stack
-    daddiu $a1, $a1, -8
-    daddu  $t0, $a1, $sp
-    sw     $ra, 0($t0)                     # Store low half of RA per compiler ABI
-    dsrl   $t1, $ra, 32
-    sw     $t1, 4($t0)                     # Store high half of RA per compiler ABI
-
-    // Copy arguments into callee stack
-    // Use simple copy routine for now.
-    // 4 bytes per slot.
-    // a0 = source address
-    // a1 = args length in bytes (does not include 8 bytes for RA)
-    // sp = destination address
-    beqz   $a1, .Losr_loop_exit
-    daddiu $a1, $a1, -4
-    daddu  $t1, $a0, $a1
-    daddu  $t2, $sp, $a1
-.Losr_loop_entry:
-    lw     $t0, 0($t1)
-    daddiu $t1, $t1, -4
-    sw     $t0, 0($t2)
-    bne    $sp, $t2, .Losr_loop_entry
-    daddiu $t2, $t2, -4
-
-.Losr_loop_exit:
-    move   $t9, $a2
-    jalr   $zero, $t9                      # Jump to the OSR entry point.
-    nop
-END art_quick_osr_stub
-
-    /*
-     * On entry $a0 is uint32_t* gprs_ and $a1 is uint32_t* fprs_
-     * FIXME: just guessing about the shape of the jmpbuf.  Where will pc be?
-     */
-ENTRY_NO_GP art_quick_do_long_jump
-    l.d     $f0, 0($a1)
-    l.d     $f1, 8($a1)
-    l.d     $f2, 16($a1)
-    l.d     $f3, 24($a1)
-    l.d     $f4, 32($a1)
-    l.d     $f5, 40($a1)
-    l.d     $f6, 48($a1)
-    l.d     $f7, 56($a1)
-    l.d     $f8, 64($a1)
-    l.d     $f9, 72($a1)
-    l.d     $f10, 80($a1)
-    l.d     $f11, 88($a1)
-    l.d     $f12, 96($a1)
-    l.d     $f13, 104($a1)
-    l.d     $f14, 112($a1)
-    l.d     $f15, 120($a1)
-    l.d     $f16, 128($a1)
-    l.d     $f17, 136($a1)
-    l.d     $f18, 144($a1)
-    l.d     $f19, 152($a1)
-    l.d     $f20, 160($a1)
-    l.d     $f21, 168($a1)
-    l.d     $f22, 176($a1)
-    l.d     $f23, 184($a1)
-    l.d     $f24, 192($a1)
-    l.d     $f25, 200($a1)
-    l.d     $f26, 208($a1)
-    l.d     $f27, 216($a1)
-    l.d     $f28, 224($a1)
-    l.d     $f29, 232($a1)
-    l.d     $f30, 240($a1)
-    l.d     $f31, 248($a1)
-    .set push
-    .set nomacro
-    .set noat
-# no need to load zero
-    ld      $at, 8($a0)
-    .set pop
-    ld      $v0, 16($a0)
-    ld      $v1, 24($a0)
-# a0 has to be loaded last
-    ld      $a1, 40($a0)
-    ld      $a2, 48($a0)
-    ld      $a3, 56($a0)
-    ld      $a4, 64($a0)
-    ld      $a5, 72($a0)
-    ld      $a6, 80($a0)
-    ld      $a7, 88($a0)
-    ld      $t0, 96($a0)
-    ld      $t1, 104($a0)
-    ld      $t2, 112($a0)
-    ld      $t3, 120($a0)
-    ld      $s0, 128($a0)
-    ld      $s1, 136($a0)
-    ld      $s2, 144($a0)
-    ld      $s3, 152($a0)
-    ld      $s4, 160($a0)
-    ld      $s5, 168($a0)
-    ld      $s6, 176($a0)
-    ld      $s7, 184($a0)
-    ld      $t8, 192($a0)
-    ld      $t9, 200($a0)
-# no need to load k0, k1
-    ld      $gp, 224($a0)
-    ld      $sp, 232($a0)
-    ld      $s8, 240($a0)
-    ld      $ra, 248($a0)
-    ld      $a0, 32($a0)
-    move    $v0, $zero          # clear result registers v0 and v1
-    jalr    $zero, $t9          # do long jump (do not use ra, it must not be clobbered)
-    move    $v1, $zero
-END art_quick_do_long_jump
-
-    /*
-     * Called by managed code, saves most registers (forms basis of long jump
-     * context) and passes the bottom of the stack.
-     * artDeliverExceptionFromCode will place the callee save Method* at
-     * the bottom of the thread. On entry a0 holds Throwable*
-     */
-ENTRY art_quick_deliver_exception
-    SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
-    dla  $t9, artDeliverExceptionFromCode
-    jalr $zero, $t9                 # artDeliverExceptionFromCode(Throwable*, Thread*)
-    move $a1, rSELF                 # pass Thread::Current
-END art_quick_deliver_exception
-
-    /*
-     * Called by managed code to create and deliver a NullPointerException
-     */
-    .extern artThrowNullPointerExceptionFromCode
-ENTRY_NO_GP art_quick_throw_null_pointer_exception
-    // Note that setting up $gp does not rely on $t9 here, so branching here directly is OK,
-    // even after clobbering any registers we don't need to preserve, such as $gp or $t0.
-    SETUP_SAVE_EVERYTHING_FRAME
-    dla  $t9, artThrowNullPointerExceptionFromCode
-    jalr $zero, $t9                 # artThrowNullPointerExceptionFromCode(Thread*)
-    move $a0, rSELF                 # pass Thread::Current
-END art_quick_throw_null_pointer_exception
-
-    /*
-     * Call installed by a signal handler to create and deliver a NullPointerException
-     */
-    .extern artThrowNullPointerExceptionFromSignal
-ENTRY_NO_GP_CUSTOM_CFA art_quick_throw_null_pointer_exception_from_signal, FRAME_SIZE_SAVE_EVERYTHING
-    SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP
-    # Retrieve the fault address from the padding where the signal handler stores it.
-    ld   $a0, (__SIZEOF_POINTER__)($sp)
-    dla  $t9, artThrowNullPointerExceptionFromSignal
-    jalr $zero, $t9                 # artThrowNullPointerExceptionFromSignal(uinptr_t, Thread*)
-    move $a1, rSELF                 # pass Thread::Current
-END art_quick_throw_null_pointer_exception_from_signal
-
-    /*
-     * Called by managed code to create and deliver an ArithmeticException
-     */
-    .extern artThrowDivZeroFromCode
-ENTRY_NO_GP art_quick_throw_div_zero
-    SETUP_SAVE_EVERYTHING_FRAME
-    dla  $t9, artThrowDivZeroFromCode
-    jalr $zero, $t9                 # artThrowDivZeroFromCode(Thread*)
-    move $a0, rSELF                 # pass Thread::Current
-END art_quick_throw_div_zero
-
-    /*
-     * Called by managed code to create and deliver an
-     * ArrayIndexOutOfBoundsException
-     */
-    .extern artThrowArrayBoundsFromCode
-ENTRY_NO_GP art_quick_throw_array_bounds
-    // Note that setting up $gp does not rely on $t9 here, so branching here directly is OK,
-    // even after clobbering any registers we don't need to preserve, such as $gp or $t0.
-    SETUP_SAVE_EVERYTHING_FRAME
-    dla  $t9, artThrowArrayBoundsFromCode
-    jalr $zero, $t9                 # artThrowArrayBoundsFromCode(index, limit, Thread*)
-    move $a2, rSELF                 # pass Thread::Current
-END art_quick_throw_array_bounds
-
-    /*
-     * Called by managed code to create and deliver a StringIndexOutOfBoundsException
-     * as if thrown from a call to String.charAt().
-     */
-    .extern artThrowStringBoundsFromCode
-ENTRY_NO_GP art_quick_throw_string_bounds
-    SETUP_SAVE_EVERYTHING_FRAME
-    dla  $t9, artThrowStringBoundsFromCode
-    jalr $zero, $t9                 # artThrowStringBoundsFromCode(index, limit, Thread*)
-    move $a2, rSELF                 # pass Thread::Current
-END art_quick_throw_string_bounds
-
-    /*
-     * Called by managed code to create and deliver a StackOverflowError.
-     */
-    .extern artThrowStackOverflowFromCode
-ENTRY art_quick_throw_stack_overflow
-    SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
-    dla  $t9, artThrowStackOverflowFromCode
-    jalr $zero, $t9                 # artThrowStackOverflowFromCode(Thread*)
-    move $a0, rSELF                 # pass Thread::Current
-END art_quick_throw_stack_overflow
-
-    /*
-     * All generated callsites for interface invokes and invocation slow paths will load arguments
-     * as usual - except instead of loading arg0/$a0 with the target Method*, arg0/$a0 will contain
-     * the method_idx.  This wrapper will save arg1-arg3, load the caller's Method*, align the
-     * stack and call the appropriate C helper.
-     * NOTE: "this" is first visable argument of the target, and so can be found in arg1/$a1.
-     *
-     * The helper will attempt to locate the target and return a 128-bit result in $v0/$v1 consisting
-     * of the target Method* in $v0 and method->code_ in $v1.
-     *
-     * If unsuccessful, the helper will return null/null. There will be a pending exception in the
-     * thread and we branch to another stub to deliver it.
-     *
-     * On success this wrapper will restore arguments and *jump* to the target, leaving the ra
-     * pointing back to the original caller.
-     */
-.macro INVOKE_TRAMPOLINE_BODY cxx_name, save_s4_thru_s8_only=0
-    .extern \cxx_name
-    SETUP_SAVE_REFS_AND_ARGS_FRAME \save_s4_thru_s8_only  # save callee saves in case
-                                                          # allocation triggers GC
-    move  $a2, rSELF                       # pass Thread::Current
-    jal   \cxx_name                        # (method_idx, this, Thread*, $sp)
-    move  $a3, $sp                         # pass $sp
-    move  $a0, $v0                         # save target Method*
-    move  $t9, $v1                         # save $v0->code_
-    RESTORE_SAVE_REFS_AND_ARGS_FRAME
-    beq   $v0, $zero, 1f
-    nop
-    jalr  $zero, $t9
-    nop
-1:
-    DELIVER_PENDING_EXCEPTION
-.endm
-.macro INVOKE_TRAMPOLINE c_name, cxx_name
-ENTRY \c_name
-    INVOKE_TRAMPOLINE_BODY \cxx_name
-END \c_name
-.endm
-
-INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
-
-INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck
-INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck
-INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
-INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
-
-    # On entry:
-    #   t0 = shorty
-    #   t1 = ptr to arg_array
-    #   t2 = number of argument bytes remain
-    #   v0 = ptr to stack frame where to copy arg_array
-    # This macro modifies t3, t9 and v0
-.macro LOOP_OVER_SHORTY_LOADING_REG gpu, fpu, label
-    lbu    $t3, 0($t0)           # get argument type from shorty
-    beqz   $t3, \label
-    daddiu $t0, 1
-    li     $t9, 68               # put char 'D' into t9
-    beq    $t9, $t3, 1f          # branch if result type char == 'D'
-    li     $t9, 70               # put char 'F' into t9
-    beq    $t9, $t3, 2f          # branch if result type char == 'F'
-    li     $t9, 74               # put char 'J' into t9
-    beq    $t9, $t3, 3f          # branch if result type char == 'J'
-    nop
-    lw     $\gpu, 0($t1)
-    sw     $\gpu, 0($v0)
-    daddiu $v0, 4
-    daddiu $t1, 4
-    b      4f
-    daddiu $t2, -4               # delay slot
-
-1:  # found double
-    lwu    $t3, 0($t1)
-    mtc1   $t3, $\fpu
-    sw     $t3, 0($v0)
-    lwu    $t3, 4($t1)
-    mthc1  $t3, $\fpu
-    sw     $t3, 4($v0)
-    daddiu $v0, 8
-    daddiu $t1, 8
-    b      4f
-    daddiu $t2, -8               # delay slot
-
-2:  # found float
-    lwu    $t3, 0($t1)
-    mtc1   $t3, $\fpu
-    sw     $t3, 0($v0)
-    daddiu $v0, 4
-    daddiu $t1, 4
-    b      4f
-    daddiu $t2, -4               # delay slot
-
-3:  # found long (8 bytes)
-    lwu    $t3, 0($t1)
-    sw     $t3, 0($v0)
-    lwu    $t9, 4($t1)
-    sw     $t9, 4($v0)
-    dsll   $t9, $t9, 32
-    or     $\gpu, $t9, $t3
-    daddiu $v0, 8
-    daddiu $t1, 8
-    daddiu $t2, -8
-4:
-.endm
-
-    /*
-     * Invocation stub for quick code.
-     * On entry:
-     *   a0 = method pointer
-     *   a1 = argument array that must at least contain the this ptr.
-     *   a2 = size of argument array in bytes
-     *   a3 = (managed) thread pointer
-     *   a4 = JValue* result
-     *   a5 = shorty
-     */
-ENTRY_NO_GP art_quick_invoke_stub
-    # push a4, a5, s0(rSUSPEND), s1(rSELF), s8, ra onto the stack
-    daddiu $sp, $sp, -48
-    .cfi_adjust_cfa_offset 48
-    sd     $ra, 40($sp)
-    .cfi_rel_offset 31, 40
-    sd     $s8, 32($sp)
-    .cfi_rel_offset 30, 32
-    sd     $s1, 24($sp)
-    .cfi_rel_offset 17, 24
-    sd     $s0, 16($sp)
-    .cfi_rel_offset 16, 16
-    sd     $a5, 8($sp)
-    .cfi_rel_offset 9, 8
-    sd     $a4, 0($sp)
-    .cfi_rel_offset 8, 0
-
-    move   $s1, $a3              # move managed thread pointer into s1 (rSELF)
-    move   $s8, $sp              # save sp in s8 (fp)
-
-    daddiu $t3, $a2, 24          # add 8 for ArtMethod* and 16 for stack alignment
-    dsrl   $t3, $t3, 4           # shift the frame size right 4
-    dsll   $t3, $t3, 4           # shift the frame size left 4 to align to 16 bytes
-    dsubu  $sp, $sp, $t3         # reserve stack space for argument array
-
-    daddiu $t0, $a5, 1           # t0 = shorty[1] (skip 1 for return type)
-    daddiu $t1, $a1, 4           # t1 = ptr to arg_array[4] (skip this ptr)
-    daddiu $t2, $a2, -4          # t2 = number of argument bytes remain (skip this ptr)
-    daddiu $v0, $sp, 12          # v0 points to where to copy arg_array
-    LOOP_OVER_SHORTY_LOADING_REG a2, f14, call_fn
-    LOOP_OVER_SHORTY_LOADING_REG a3, f15, call_fn
-    LOOP_OVER_SHORTY_LOADING_REG a4, f16, call_fn
-    LOOP_OVER_SHORTY_LOADING_REG a5, f17, call_fn
-    LOOP_OVER_SHORTY_LOADING_REG a6, f18, call_fn
-    LOOP_OVER_SHORTY_LOADING_REG a7, f19, call_fn
-
-    # copy arguments onto stack (t2 should be multiples of 4)
-    ble    $t2, $zero, call_fn   # t2 = number of argument bytes remain
-1:
-    lw     $t3, 0($t1)           # load from argument array
-    daddiu $t1, $t1, 4
-    sw     $t3, 0($v0)           # save to stack
-    daddiu $t2, -4
-    bgt    $t2, $zero, 1b        # t2 = number of argument bytes remain
-    daddiu $v0, $v0, 4
-
-call_fn:
-    # call method (a0 and a1 have been untouched)
-    lwu    $a1, 0($a1)           # make a1 = this ptr
-    sw     $a1, 8($sp)           # copy this ptr (skip 8 bytes for ArtMethod*)
-    sd     $zero, 0($sp)         # store null for ArtMethod* at bottom of frame
-    ld     $t9, ART_METHOD_QUICK_CODE_OFFSET_64($a0)  # get pointer to the code
-    jalr   $t9                   # call the method
-    nop
-    move   $sp, $s8              # restore sp
-
-    # pop a4, a5, s1(rSELF), s8, ra off of the stack
-    ld     $a4, 0($sp)
-    .cfi_restore 8
-    ld     $a5, 8($sp)
-    .cfi_restore 9
-    ld     $s0, 16($sp)
-    .cfi_restore 16
-    ld     $s1, 24($sp)
-    .cfi_restore 17
-    ld     $s8, 32($sp)
-    .cfi_restore 30
-    ld     $ra, 40($sp)
-    .cfi_restore 31
-    daddiu $sp, $sp, 48
-    .cfi_adjust_cfa_offset -48
-
-    # a4 = JValue* result
-    # a5 = shorty string
-    lbu   $t1, 0($a5)           # get result type from shorty
-    li    $t2, 68               # put char 'D' into t2
-    beq   $t1, $t2, 1f          # branch if result type char == 'D'
-    li    $t3, 70               # put char 'F' into t3
-    beq   $t1, $t3, 1f          # branch if result type char == 'F'
-    sw    $v0, 0($a4)           # store the result
-    dsrl  $v1, $v0, 32
-    jalr  $zero, $ra
-    sw    $v1, 4($a4)           # store the other half of the result
-1:
-    mfc1  $v0, $f0
-    mfhc1 $v1, $f0
-    sw    $v0, 0($a4)           # store the result
-    jalr  $zero, $ra
-    sw    $v1, 4($a4)           # store the other half of the result
-END art_quick_invoke_stub
-
-    /*
-     * Invocation static stub for quick code.
-     * On entry:
-     *   a0 = method pointer
-     *   a1 = argument array that must at least contain the this ptr.
-     *   a2 = size of argument array in bytes
-     *   a3 = (managed) thread pointer
-     *   a4 = JValue* result
-     *   a5 = shorty
-     */
-ENTRY_NO_GP art_quick_invoke_static_stub
-
-    # push a4, a5, s0(rSUSPEND), s1(rSELF), s8, ra, onto the stack
-    daddiu $sp, $sp, -48
-    .cfi_adjust_cfa_offset 48
-    sd     $ra, 40($sp)
-    .cfi_rel_offset 31, 40
-    sd     $s8, 32($sp)
-    .cfi_rel_offset 30, 32
-    sd     $s1, 24($sp)
-    .cfi_rel_offset 17, 24
-    sd     $s0, 16($sp)
-    .cfi_rel_offset 16, 16
-    sd     $a5, 8($sp)
-    .cfi_rel_offset 9, 8
-    sd     $a4, 0($sp)
-    .cfi_rel_offset 8, 0
-
-    move   $s1, $a3              # move managed thread pointer into s1 (rSELF)
-    move   $s8, $sp              # save sp in s8 (fp)
-
-    daddiu $t3, $a2, 24          # add 8 for ArtMethod* and 16 for stack alignment
-    dsrl   $t3, $t3, 4           # shift the frame size right 4
-    dsll   $t3, $t3, 4           # shift the frame size left 4 to align to 16 bytes
-    dsubu  $sp, $sp, $t3         # reserve stack space for argument array
-
-    daddiu $t0, $a5, 1           # t0 = shorty[1] (skip 1 for return type)
-    move   $t1, $a1              # t1 = arg_array
-    move   $t2, $a2              # t2 = number of argument bytes remain
-    daddiu $v0, $sp, 8           # v0 points to where to copy arg_array
-    LOOP_OVER_SHORTY_LOADING_REG a1, f13, call_sfn
-    LOOP_OVER_SHORTY_LOADING_REG a2, f14, call_sfn
-    LOOP_OVER_SHORTY_LOADING_REG a3, f15, call_sfn
-    LOOP_OVER_SHORTY_LOADING_REG a4, f16, call_sfn
-    LOOP_OVER_SHORTY_LOADING_REG a5, f17, call_sfn
-    LOOP_OVER_SHORTY_LOADING_REG a6, f18, call_sfn
-    LOOP_OVER_SHORTY_LOADING_REG a7, f19, call_sfn
-
-    # copy arguments onto stack (t2 should be multiples of 4)
-    ble    $t2, $zero, call_sfn  # t2 = number of argument bytes remain
-1:
-    lw     $t3, 0($t1)           # load from argument array
-    daddiu $t1, $t1, 4
-    sw     $t3, 0($v0)           # save to stack
-    daddiu $t2, -4
-    bgt    $t2, $zero, 1b        # t2 = number of argument bytes remain
-    daddiu $v0, $v0, 4
-
-call_sfn:
-    # call method (a0 has been untouched)
-    sd     $zero, 0($sp)         # store null for ArtMethod* at bottom of frame
-    ld     $t9, ART_METHOD_QUICK_CODE_OFFSET_64($a0)  # get pointer to the code
-    jalr   $t9                   # call the method
-    nop
-    move   $sp, $s8              # restore sp
-
-    # pop a4, a5, s0(rSUSPEND), s1(rSELF), s8, ra off of the stack
-    ld     $a4, 0($sp)
-    .cfi_restore 8
-    ld     $a5, 8($sp)
-    .cfi_restore 9
-    ld     $s0, 16($sp)
-    .cfi_restore 16
-    ld     $s1, 24($sp)
-    .cfi_restore 17
-    ld     $s8, 32($sp)
-    .cfi_restore 30
-    ld     $ra, 40($sp)
-    .cfi_restore 31
-    daddiu $sp, $sp, 48
-    .cfi_adjust_cfa_offset -48
-
-    # a4 = JValue* result
-    # a5 = shorty string
-    lbu   $t1, 0($a5)           # get result type from shorty
-    li    $t2, 68               # put char 'D' into t2
-    beq   $t1, $t2, 1f          # branch if result type char == 'D'
-    li    $t3, 70               # put char 'F' into t3
-    beq   $t1, $t3, 1f          # branch if result type char == 'F'
-    sw    $v0, 0($a4)           # store the result
-    dsrl  $v1, $v0, 32
-    jalr  $zero, $ra
-    sw    $v1, 4($a4)           # store the other half of the result
-1:
-    mfc1  $v0, $f0
-    mfhc1 $v1, $f0
-    sw    $v0, 0($a4)           # store the result
-    jalr  $zero, $ra
-    sw    $v1, 4($a4)           # store the other half of the result
-END art_quick_invoke_static_stub
-
-    /*
-     * Entry from managed code that calls artHandleFillArrayDataFromCode and
-     * delivers exception on failure.
-     */
-    .extern artHandleFillArrayDataFromCode
-ENTRY art_quick_handle_fill_data
-    SETUP_SAVE_REFS_ONLY_FRAME         # save callee saves in case exception allocation triggers GC
-    ld      $a2, FRAME_SIZE_SAVE_REFS_ONLY($sp)         # pass referrer's Method*
-    jal     artHandleFillArrayDataFromCode              # (payload offset, Array*, method, Thread*)
-    move    $a3, rSELF                                  # pass Thread::Current
-    RETURN_IF_ZERO
-END art_quick_handle_fill_data
-
-    /*
-     * Entry from managed code that calls artLockObjectFromCode, may block for GC.
-     */
-    .extern artLockObjectFromCode
-ENTRY_NO_GP art_quick_lock_object
-    beqzc   $a0, art_quick_throw_null_pointer_exception
-    li      $t8, LOCK_WORD_THIN_LOCK_COUNT_ONE
-    li      $t3, LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED
-.Lretry_lock:
-    lw      $t0, THREAD_ID_OFFSET(rSELF)  # TODO: Can the thread ID really change during the loop?
-    ll      $t1, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
-    and     $t2, $t1, $t3                 # zero the gc bits
-    bnezc   $t2, .Lnot_unlocked           # already thin locked
-    # Unlocked case - $t1: original lock word that's zero except for the read barrier bits.
-    or      $t2, $t1, $t0                 # $t2 holds thread id with count of 0 with preserved read barrier bits
-    sc      $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
-    beqzc   $t2, .Lretry_lock             # store failed, retry
-    sync                                  # full (LoadLoad|LoadStore) memory barrier
-    jic     $ra, 0
-.Lnot_unlocked:
-    # $t1: original lock word, $t0: thread_id with count of 0 and zero read barrier bits
-    srl     $t2, $t1, LOCK_WORD_STATE_SHIFT
-    bnezc   $t2, .Lslow_lock              # if either of the top two bits are set, go slow path
-    xor     $t2, $t1, $t0                 # lock_word.ThreadId() ^ self->ThreadId()
-    andi    $t2, $t2, 0xFFFF              # zero top 16 bits
-    bnezc   $t2, .Lslow_lock              # lock word and self thread id's match -> recursive lock
-                                          # otherwise contention, go to slow path
-    and     $t2, $t1, $t3                 # zero the gc bits
-    addu    $t2, $t2, $t8                 # increment count in lock word
-    srl     $t2, $t2, LOCK_WORD_STATE_SHIFT  # if the first gc state bit is set, we overflowed.
-    bnezc   $t2, .Lslow_lock              # if we overflow the count go slow path
-    addu    $t2, $t1, $t8                 # increment count for real
-    sc      $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
-    beqzc   $t2, .Lretry_lock             # store failed, retry
-    nop
-    jic     $ra, 0
-.Lslow_lock:
-    .cpsetup $t9, $t8, art_quick_lock_object
-    SETUP_SAVE_REFS_ONLY_FRAME            # save callee saves in case we block
-    jal     artLockObjectFromCode         # (Object* obj, Thread*)
-    move    $a1, rSELF                    # pass Thread::Current
-    RETURN_IF_ZERO
-END art_quick_lock_object
-
-ENTRY_NO_GP art_quick_lock_object_no_inline
-    beq     $a0, $zero, art_quick_throw_null_pointer_exception
-    nop
-    .cpsetup $t9, $t8, art_quick_lock_object_no_inline
-    SETUP_SAVE_REFS_ONLY_FRAME            # save callee saves in case we block
-    jal     artLockObjectFromCode         # (Object* obj, Thread*)
-    move    $a1, rSELF                    # pass Thread::Current
-    RETURN_IF_ZERO
-END art_quick_lock_object_no_inline
-
-    /*
-     * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
-     */
-    .extern artUnlockObjectFromCode
-ENTRY_NO_GP art_quick_unlock_object
-    beqzc   $a0, art_quick_throw_null_pointer_exception
-    li      $t8, LOCK_WORD_THIN_LOCK_COUNT_ONE
-    li      $t3, LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED
-.Lretry_unlock:
-#ifndef USE_READ_BARRIER
-    lw      $t1, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
-#else
-    ll      $t1, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)  # Need to use atomic read-modify-write for read barrier
-#endif
-    srl     $t2, $t1, LOCK_WORD_STATE_SHIFT
-    bnezc   $t2, .Lslow_unlock         # if either of the top two bits are set, go slow path
-    lw      $t0, THREAD_ID_OFFSET(rSELF)
-    and     $t2, $t1, $t3              # zero the gc bits
-    xor     $t2, $t2, $t0              # lock_word.ThreadId() ^ self->ThreadId()
-    andi    $t2, $t2, 0xFFFF           # zero top 16 bits
-    bnezc   $t2, .Lslow_unlock         # do lock word and self thread id's match?
-    and     $t2, $t1, $t3              # zero the gc bits
-    bgeuc   $t2, $t8, .Lrecursive_thin_unlock
-    # transition to unlocked
-    nor     $t2, $zero, $t3            # $t2 = LOCK_WORD_GC_STATE_MASK_SHIFTED
-    and     $t2, $t1, $t2              # $t2: zero except for the preserved gc bits
-    sync                               # full (LoadStore|StoreStore) memory barrier
-#ifndef USE_READ_BARRIER
-    sw      $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
-#else
-    sc      $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
-    beqzc   $t2, .Lretry_unlock        # store failed, retry
-    nop
-#endif
-    jic     $ra, 0
-.Lrecursive_thin_unlock:
-    # t1: original lock word
-    subu    $t2, $t1, $t8              # decrement count
-#ifndef USE_READ_BARRIER
-    sw      $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
-#else
-    sc      $t2, MIRROR_OBJECT_LOCK_WORD_OFFSET($a0)
-    beqzc   $t2, .Lretry_unlock        # store failed, retry
-    nop
-#endif
-    jic     $ra, 0
-.Lslow_unlock:
-    .cpsetup $t9, $t8, art_quick_unlock_object
-    SETUP_SAVE_REFS_ONLY_FRAME         # save callee saves in case exception allocation triggers GC
-    jal     artUnlockObjectFromCode    # (Object* obj, Thread*)
-    move    $a1, rSELF                 # pass Thread::Current
-    RETURN_IF_ZERO
-END art_quick_unlock_object
-
-ENTRY_NO_GP art_quick_unlock_object_no_inline
-    beq     $a0, $zero, art_quick_throw_null_pointer_exception
-    nop
-    .cpsetup $t9, $t8, art_quick_unlock_object_no_inline
-    SETUP_SAVE_REFS_ONLY_FRAME         # save callee saves in case exception allocation triggers GC
-    jal     artUnlockObjectFromCode    # (Object* obj, Thread*)
-    move    $a1, rSELF                 # pass Thread::Current
-    RETURN_IF_ZERO
-END art_quick_unlock_object_no_inline
-
-    /*
-     * Entry from managed code that calls artInstanceOfFromCode and delivers exception on failure.
-     */
-    .extern artInstanceOfFromCode
-    .extern artThrowClassCastExceptionForObject
-ENTRY art_quick_check_instance_of
-    // Type check using the bit string passes null as the target class. In that case just throw.
-    beqzc  $a1, .Lthrow_class_cast_exception_for_bitstring_check
-
-    daddiu $sp, $sp, -32
-    .cfi_adjust_cfa_offset 32
-    sd     $ra, 24($sp)
-    .cfi_rel_offset 31, 24
-    sd     $t9, 16($sp)
-    sd     $a1, 8($sp)
-    sd     $a0, 0($sp)
-    jal    artInstanceOfFromCode
-    .cpreturn                       # Restore gp from t8 in branch delay slot.
-                                    # t8 may be clobbered in artIsAssignableFromCode.
-    beq    $v0, $zero, .Lthrow_class_cast_exception
-    ld     $ra, 24($sp)
-    jalr   $zero, $ra
-    daddiu $sp, $sp, 32
-    .cfi_adjust_cfa_offset -32
-
-.Lthrow_class_cast_exception:
-    ld     $t9, 16($sp)
-    ld     $a1, 8($sp)
-    ld     $a0, 0($sp)
-    daddiu $sp, $sp, 32
-    .cfi_adjust_cfa_offset -32
-
-.Lthrow_class_cast_exception_for_bitstring_check:
-    SETUP_GP
-    SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
-    dla  $t9, artThrowClassCastExceptionForObject
-    jalr $zero, $t9                 # artThrowClassCastException (Object*, Class*, Thread*)
-    move $a2, rSELF                 # pass Thread::Current
-END art_quick_check_instance_of
-
-
-    /*
-     * Restore rReg's value from offset($sp) if rReg is not the same as rExclude.
-     * nReg is the register number for rReg.
-     */
-.macro POP_REG_NE rReg, nReg, offset, rExclude
-    .ifnc \rReg, \rExclude
-        ld \rReg, \offset($sp)      # restore rReg
-        .cfi_restore \nReg
-    .endif
-.endm
-
-    /*
-     * Macro to insert read barrier, only used in art_quick_aput_obj.
-     * rObj and rDest are registers, offset is a defined literal such as MIRROR_OBJECT_CLASS_OFFSET.
-     * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path.
-     */
-.macro READ_BARRIER rDest, rObj, offset
-#ifdef USE_READ_BARRIER
-    # saved registers used in art_quick_aput_obj: a0-a2, t0-t1, t9, ra. 16B-aligned.
-    daddiu  $sp, $sp, -64
-    .cfi_adjust_cfa_offset 64
-    sd     $ra, 56($sp)
-    .cfi_rel_offset 31, 56
-    sd     $t9, 48($sp)
-    .cfi_rel_offset 25, 48
-    sd     $t1, 40($sp)
-    .cfi_rel_offset 13, 40
-    sd     $t0, 32($sp)
-    .cfi_rel_offset 12, 32
-    sd     $a2, 16($sp)             # padding slot at offset 24 (padding can be any slot in the 64B)
-    .cfi_rel_offset 6, 16
-    sd     $a1, 8($sp)
-    .cfi_rel_offset 5, 8
-    sd     $a0, 0($sp)
-    .cfi_rel_offset 4, 0
-
-    # move $a0, \rRef               # pass ref in a0 (no-op for now since parameter ref is unused)
-    .ifnc \rObj, $a1
-        move $a1, \rObj             # pass rObj
-    .endif
-    daddiu $a2, $zero, \offset      # pass offset
-    jal artReadBarrierSlow          # artReadBarrierSlow(ref, rObj, offset)
-    .cpreturn                       # Restore gp from t8 in branch delay slot.
-                                    # t8 may be clobbered in artReadBarrierSlow.
-    # No need to unpoison return value in v0, artReadBarrierSlow() would do the unpoisoning.
-    move \rDest, $v0                # save return value in rDest
-                                    # (rDest cannot be v0 in art_quick_aput_obj)
-
-    ld     $a0, 0($sp)              # restore registers except rDest
-                                    # (rDest can only be t0 or t1 in art_quick_aput_obj)
-    .cfi_restore 4
-    ld     $a1, 8($sp)
-    .cfi_restore 5
-    ld     $a2, 16($sp)
-    .cfi_restore 6
-    POP_REG_NE $t0, 12, 32, \rDest
-    POP_REG_NE $t1, 13, 40, \rDest
-    ld     $t9, 48($sp)
-    .cfi_restore 25
-    ld     $ra, 56($sp)             # restore $ra
-    .cfi_restore 31
-    daddiu  $sp, $sp, 64
-    .cfi_adjust_cfa_offset -64
-    SETUP_GP                        # set up gp because we are not returning
-#else
-    lwu     \rDest, \offset(\rObj)
-    UNPOISON_HEAP_REF \rDest
-#endif  // USE_READ_BARRIER
-.endm
-
-ENTRY art_quick_aput_obj
-    beq  $a2, $zero, .Ldo_aput_null
-    nop
-    READ_BARRIER $t0, $a0, MIRROR_OBJECT_CLASS_OFFSET
-    READ_BARRIER $t1, $a2, MIRROR_OBJECT_CLASS_OFFSET
-    READ_BARRIER $t0, $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET
-    bne $t1, $t0, .Lcheck_assignability  # value's type == array's component type - trivial assignability
-    nop
-.Ldo_aput:
-    dsll  $a1, $a1, 2
-    daddu $t0, $a0, $a1
-    POISON_HEAP_REF $a2
-    sw   $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0)
-    ld   $t0, THREAD_CARD_TABLE_OFFSET(rSELF)
-    dsrl  $t1, $a0, CARD_TABLE_CARD_SHIFT
-    daddu $t1, $t1, $t0
-    sb   $t0, ($t1)
-    jalr $zero, $ra
-    .cpreturn                       # Restore gp from t8 in branch delay slot.
-.Ldo_aput_null:
-    dsll  $a1, $a1, 2
-    daddu $t0, $a0, $a1
-    sw   $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0)
-    jalr $zero, $ra
-    .cpreturn                       # Restore gp from t8 in branch delay slot.
-.Lcheck_assignability:
-    daddiu $sp, $sp, -64
-    .cfi_adjust_cfa_offset 64
-    sd     $ra, 56($sp)
-    .cfi_rel_offset 31, 56
-    sd     $t9, 24($sp)
-    sd     $a2, 16($sp)
-    sd     $a1, 8($sp)
-    sd     $a0, 0($sp)
-    move   $a1, $t1
-    move   $a0, $t0
-    jal    artIsAssignableFromCode  # (Class*, Class*)
-    .cpreturn                       # Restore gp from t8 in branch delay slot.
-                                    # t8 may be clobbered in artIsAssignableFromCode.
-    ld     $ra, 56($sp)
-    ld     $t9, 24($sp)
-    ld     $a2, 16($sp)
-    ld     $a1, 8($sp)
-    ld     $a0, 0($sp)
-    daddiu $sp, $sp, 64
-    .cfi_adjust_cfa_offset -64
-    SETUP_GP
-    bne    $v0, $zero, .Ldo_aput
-    nop
-    SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
-    move   $a1, $a2
-    dla  $t9, artThrowArrayStoreException
-    jalr $zero, $t9                 # artThrowArrayStoreException(Class*, Class*, Thread*)
-    move   $a2, rSELF               # pass Thread::Current
-END art_quick_aput_obj
-
-// Macros taking opportunity of code similarities for downcalls.
-.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return, extend=0
-    .extern \entrypoint
-ENTRY \name
-    SETUP_SAVE_REFS_ONLY_FRAME        # save callee saves in case of GC
-    dla     $t9, \entrypoint
-    jalr    $t9                       # (field_idx, Thread*)
-    move    $a1, rSELF                # pass Thread::Current
-    .if     \extend
-    sll     $v0, $v0, 0               # sign-extend 32-bit result
-    .endif
-    \return                           # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
-END \name
-.endm
-
-.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return, extend=0
-    .extern \entrypoint
-ENTRY \name
-    SETUP_SAVE_REFS_ONLY_FRAME        # save callee saves in case of GC
-    dla     $t9, \entrypoint
-    jalr    $t9                       # (field_idx, Object*, Thread*) or
-                                      # (field_idx, new_val, Thread*)
-    move    $a2, rSELF                # pass Thread::Current
-    .if     \extend
-    sll     $v0, $v0, 0               # sign-extend 32-bit result
-    .endif
-    \return                           # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
-END \name
-.endm
-
-.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return, extend=0
-    .extern \entrypoint
-ENTRY \name
-    SETUP_SAVE_REFS_ONLY_FRAME        # save callee saves in case of GC
-    dla     $t9, \entrypoint
-    jalr    $t9                       # (field_idx, Object*, new_val, Thread*)
-    move    $a3, rSELF                # pass Thread::Current
-    .if     \extend
-    sll     $v0, $v0, 0               # sign-extend 32-bit result
-    .endif
-    \return                           # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO
-END \name
-.endm
-
-    /*
-     * Called by managed code to resolve a static/instance field and load/store a value.
-     *
-     * Note: Functions `art{Get,Set}<Kind>{Static,Instance}FromCompiledCode` are
-     * defined with a macro in runtime/entrypoints/quick/quick_field_entrypoints.cc.
-     */
-ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_IF_NO_EXCEPTION, 1
-ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
-ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION, 1
-TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION
-TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCompiledCode, RETURN_IF_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCompiledCode, RETURN_IF_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCompiledCode, RETURN_IF_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCompiledCode, RETURN_IF_ZERO
-TWO_ARG_REF_DOWNCALL art_quick_set64_static, artSet64StaticFromCompiledCode, RETURN_IF_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCompiledCode, RETURN_IF_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCompiledCode, RETURN_IF_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCompiledCode, RETURN_IF_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCompiledCode, RETURN_IF_ZERO
-THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCompiledCode, RETURN_IF_ZERO
-
-// Macro to facilitate adding new allocation entrypoints.
-.macro ONE_ARG_DOWNCALL name, entrypoint, return
-    .extern \entrypoint
-ENTRY \name
-    SETUP_SAVE_REFS_ONLY_FRAME         # save callee saves in case of GC
-    jal     \entrypoint
-    move    $a1, rSELF                 # pass Thread::Current
-    \return
-END \name
-.endm
-
-// Macro to facilitate adding new allocation entrypoints.
-.macro TWO_ARG_DOWNCALL name, entrypoint, return
-    .extern \entrypoint
-ENTRY \name
-    SETUP_SAVE_REFS_ONLY_FRAME         # save callee saves in case of GC
-    jal     \entrypoint
-    move    $a2, rSELF                 # pass Thread::Current
-    \return
-END \name
-.endm
-
-.macro THREE_ARG_DOWNCALL name, entrypoint, return
-    .extern \entrypoint
-ENTRY \name
-    SETUP_SAVE_REFS_ONLY_FRAME         # save callee saves in case of GC
-    jal     \entrypoint
-    move    $a3, rSELF                 # pass Thread::Current
-    \return
-END \name
-.endm
-
-.macro FOUR_ARG_DOWNCALL name, entrypoint, return
-    .extern \entrypoint
-ENTRY \name
-    SETUP_SAVE_REFS_ONLY_FRAME         # save callee saves in case of GC
-    jal     \entrypoint
-    move    $a4, rSELF                 # pass Thread::Current
-    \return
-END \name
-.endm
-
-// Generate the allocation entrypoints for each allocator.
-GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS
-// Comment out allocators that have mips64 specific asm.
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_OBJECT(_region_tlab, RegionTLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_region_tlab, RegionTLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_region_tlab, RegionTLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_region_tlab, RegionTLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB)
-
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_OBJECT(_tlab, TLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_tlab, TLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_tlab, TLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_tlab, TLAB)
-// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB)
-
-// A hand-written override for:
-//   GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
-//   GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
-.macro ART_QUICK_ALLOC_OBJECT_ROSALLOC c_name, cxx_name, isInitialized
-ENTRY_NO_GP \c_name
-    # Fast path rosalloc allocation
-    # a0: type
-    # s1: Thread::Current
-    # -----------------------------
-    # t1: object size
-    # t2: rosalloc run
-    # t3: thread stack top offset
-    # a4: thread stack bottom offset
-    # v0: free list head
-    #
-    # a5, a6 : temps
-    ld     $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1)    # Check if thread local allocation stack
-    ld     $a4, THREAD_LOCAL_ALLOC_STACK_END_OFFSET($s1)    # has any room left.
-    bgeuc  $t3, $a4, .Lslow_path_\c_name
-
-    lwu    $t1, MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET($a0)  # Load object size (t1).
-    li     $a5, ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE      # Check if size is for a thread local
-                                                            # allocation. Also does the initialized
-                                                            # and finalizable checks.
-    # When isInitialized == 0, then the class is potentially not yet initialized.
-    # If the class is not yet initialized, the object size will be very large to force the branch
-    # below to be taken.
-    #
-    # See InitializeClassVisitors in class-inl.h for more details.
-    bltuc  $a5, $t1, .Lslow_path_\c_name
-
-    # Compute the rosalloc bracket index from the size. Since the size is already aligned we can
-    # combine the two shifts together.
-    dsrl   $t1, $t1, (ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT - POINTER_SIZE_SHIFT)
-
-    daddu  $t2, $t1, $s1
-    ld     $t2, (THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)($t2)  # Load rosalloc run (t2).
-
-    # Load the free list head (v0).
-    # NOTE: this will be the return val.
-    ld     $v0, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
-    beqzc  $v0, .Lslow_path_\c_name
-
-    # Load the next pointer of the head and update the list head with the next pointer.
-    ld     $a5, ROSALLOC_SLOT_NEXT_OFFSET($v0)
-    sd     $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2)
-
-    # Store the class pointer in the header. This also overwrites the first pointer. The offsets are
-    # asserted to match.
-
-#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
-#error "Class pointer needs to overwrite next pointer."
-#endif
-
-    POISON_HEAP_REF $a0
-    sw     $a0, MIRROR_OBJECT_CLASS_OFFSET($v0)
-
-    # Push the new object onto the thread local allocation stack and increment the thread local
-    # allocation stack top.
-    sw     $v0, 0($t3)
-    daddiu $t3, $t3, COMPRESSED_REFERENCE_SIZE
-    sd     $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1)
-
-    # Decrement the size of the free list.
-    lw     $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
-    addiu  $a5, $a5, -1
-    sw     $a5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2)
-
-.if \isInitialized == 0
-    # This barrier is only necessary when the allocation also requires a class initialization check.
-    #
-    # If the class is already observably initialized, then new-instance allocations are protected
-    # from publishing by the compiler which inserts its own StoreStore barrier.
-    sync                                         # Fence.
-.endif
-    jic    $ra, 0
-
-.Lslow_path_\c_name:
-    SETUP_GP
-    SETUP_SAVE_REFS_ONLY_FRAME
-    jal    \cxx_name
-    move   $a1 ,$s1                              # Pass self as argument.
-    RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END \c_name
-.endm
-
-ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc, /* isInitialized */ 0
-ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc, /* isInitialized */ 1
-
-// The common fast path code for art_quick_alloc_object_resolved/initialized_tlab
-// and art_quick_alloc_object_resolved/initialized_region_tlab.
-//
-// a0: type, s1(rSELF): Thread::Current
-// Need to preserve a0 to the slow path.
-//
-// If isInitialized=1 then the compiler assumes the object's class has already been initialized.
-// If isInitialized=0 the compiler can only assume it's been at least resolved.
-.macro ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH slowPathLabel isInitialized
-    ld     $v0, THREAD_LOCAL_POS_OFFSET(rSELF)         # Load thread_local_pos.
-    ld     $a2, THREAD_LOCAL_END_OFFSET(rSELF)         # Load thread_local_end.
-    lwu    $t0, MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET($a0)  # Load the object size.
-    daddu  $a3, $v0, $t0                               # Add object size to tlab pos.
-
-    # When isInitialized == 0, then the class is potentially not yet initialized.
-    # If the class is not yet initialized, the object size will be very large to force the branch
-    # below to be taken.
-    #
-    # See InitializeClassVisitors in class-inl.h for more details.
-    bltuc  $a2, $a3, \slowPathLabel                    # Check if it fits, overflow works since the
-                                                       # tlab pos and end are 32 bit values.
-    # "Point of no slow path". Won't go to the slow path from here on.
-    sd     $a3, THREAD_LOCAL_POS_OFFSET(rSELF)         # Store new thread_local_pos.
-    ld     $a2, THREAD_LOCAL_OBJECTS_OFFSET(rSELF)     # Increment thread_local_objects.
-    daddiu $a2, $a2, 1
-    sd     $a2, THREAD_LOCAL_OBJECTS_OFFSET(rSELF)
-    POISON_HEAP_REF $a0
-    sw     $a0, MIRROR_OBJECT_CLASS_OFFSET($v0)        # Store the class pointer.
-
-.if \isInitialized == 0
-    # This barrier is only necessary when the allocation also requires a class initialization check.
-    #
-    # If the class is already observably initialized, then new-instance allocations are protected
-    # from publishing by the compiler which inserts its own StoreStore barrier.
-    sync                                               # Fence.
-.endif
-    jic    $ra, 0
-.endm
-
-// The common code for art_quick_alloc_object_resolved/initialized_tlab
-// and art_quick_alloc_object_resolved/initialized_region_tlab.
-.macro GENERATE_ALLOC_OBJECT_TLAB name, entrypoint, isInitialized
-ENTRY_NO_GP \name
-    # Fast path tlab allocation.
-    # a0: type, s1(rSELF): Thread::Current.
-    ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lslow_path_\name, \isInitialized
-.Lslow_path_\name:
-    SETUP_GP
-    SETUP_SAVE_REFS_ONLY_FRAME                         # Save callee saves in case of GC.
-    jal    \entrypoint                                 # (mirror::Class*, Thread*)
-    move   $a1, rSELF                                  # Pass Thread::Current.
-    RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END \name
-.endm
-
-GENERATE_ALLOC_OBJECT_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, /* isInitialized */ 0
-GENERATE_ALLOC_OBJECT_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, /* isInitialized */ 1
-GENERATE_ALLOC_OBJECT_TLAB art_quick_alloc_object_resolved_tlab, artAllocObjectFromCodeResolvedTLAB, /* isInitialized */ 0
-GENERATE_ALLOC_OBJECT_TLAB art_quick_alloc_object_initialized_tlab, artAllocObjectFromCodeInitializedTLAB, /* isInitialized */ 1
-
-// The common fast path code for art_quick_alloc_array_resolved/initialized_tlab
-// and art_quick_alloc_array_resolved/initialized_region_tlab.
-//
-// a0: type, a1: component_count, a2: total_size, s1(rSELF): Thread::Current.
-// Need to preserve a0 and a1 to the slow path.
-.macro ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE slowPathLabel
-    dli    $a3, OBJECT_ALIGNMENT_MASK_TOGGLED64        # Apply alignemnt mask (addr + 7) & ~7.
-    and    $a2, $a2, $a3                               # The mask must be 64 bits to keep high
-                                                       # bits in case of overflow.
-    # Negative sized arrays are handled here since a1 holds a zero extended 32 bit value.
-    # Negative ints become large 64 bit unsigned ints which will always be larger than max signed
-    # 32 bit int. Since the max shift for arrays is 3, it can not become a negative 64 bit int.
-    dli    $a3, MIN_LARGE_OBJECT_THRESHOLD
-    bgeuc  $a2, $a3, \slowPathLabel                    # Possibly a large object, go slow path.
-
-    ld     $v0, THREAD_LOCAL_POS_OFFSET(rSELF)         # Load thread_local_pos.
-    ld     $t1, THREAD_LOCAL_END_OFFSET(rSELF)         # Load thread_local_end.
-    dsubu  $t2, $t1, $v0                               # Compute the remaining buffer size.
-    bltuc  $t2, $a2, \slowPathLabel                    # Check tlab for space, note that we use
-                                                       # (end - begin) to handle negative size
-                                                       # arrays. It is assumed that a negative size
-                                                       # will always be greater unsigned than region
-                                                       # size.
-
-    # "Point of no slow path". Won't go to the slow path from here on.
-    daddu  $a2, $v0, $a2                               # Add object size to tlab pos.
-    sd     $a2, THREAD_LOCAL_POS_OFFSET(rSELF)         # Store new thread_local_pos.
-    ld     $a2, THREAD_LOCAL_OBJECTS_OFFSET(rSELF)     # Increment thread_local_objects.
-    daddiu $a2, $a2, 1
-    sd     $a2, THREAD_LOCAL_OBJECTS_OFFSET(rSELF)
-    POISON_HEAP_REF $a0
-    sw     $a0, MIRROR_OBJECT_CLASS_OFFSET($v0)        # Store the class pointer.
-    sw     $a1, MIRROR_ARRAY_LENGTH_OFFSET($v0)        # Store the array length.
-
-    jic    $ra, 0
-.endm
-
-.macro GENERATE_ALLOC_ARRAY_TLAB name, entrypoint, size_setup
-ENTRY_NO_GP \name
-    # Fast path array allocation for region tlab allocation.
-    # a0: mirror::Class* type
-    # a1: int32_t component_count
-    # s1(rSELF): Thread::Current
-    dext   $a4, $a1, 0, 32                             # Create zero-extended component_count. Value
-                                                       # in a1 is preserved in a case of slow path.
-    \size_setup .Lslow_path_\name
-    ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE .Lslow_path_\name
-.Lslow_path_\name:
-    # a0: mirror::Class* type
-    # a1: int32_t component_count
-    # a2: Thread* self
-    SETUP_GP
-    SETUP_SAVE_REFS_ONLY_FRAME                         # Save callee saves in case of GC.
-    jal    \entrypoint
-    move   $a2, rSELF                                  # Pass Thread::Current.
-    RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
-END \name
-.endm
-
-.macro COMPUTE_ARRAY_SIZE_UNKNOWN slow_path
-    # Array classes are never finalizable or uninitialized, no need to check.
-    lwu    $a3, MIRROR_CLASS_COMPONENT_TYPE_OFFSET($a0) # Load component type.
-    UNPOISON_HEAP_REF $a3
-    lw     $a3, MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET($a3)
-    dsrl   $a3, $a3, PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT   # Component size shift is in high 16 bits.
-    dsllv  $a2, $a4, $a3                               # Calculate data size.
-                                                       # Add array data offset and alignment.
-    daddiu $a2, $a2, (MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
-#if MIRROR_WIDE_ARRAY_DATA_OFFSET != MIRROR_INT_ARRAY_DATA_OFFSET + 4
-#error Long array data offset must be 4 greater than int array data offset.
-#endif
-
-    daddiu $a3, $a3, 1                                 # Add 4 to the length only if the component
-    andi   $a3, $a3, 4                                 # size shift is 3 (for 64 bit alignment).
-    daddu  $a2, $a2, $a3
-.endm
-
-.macro COMPUTE_ARRAY_SIZE_8 slow_path
-    # Add array data offset and alignment.
-    daddiu $a2, $a4, (MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
-.endm
-
-.macro COMPUTE_ARRAY_SIZE_16 slow_path
-    dsll   $a2, $a4, 1
-    # Add array data offset and alignment.
-    daddiu $a2, $a2, (MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
-.endm
-
-.macro COMPUTE_ARRAY_SIZE_32 slow_path
-    dsll   $a2, $a4, 2
-    # Add array data offset and alignment.
-    daddiu $a2, $a2, (MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
-.endm
-
-.macro COMPUTE_ARRAY_SIZE_64 slow_path
-    dsll   $a2, $a4, 3
-    # Add array data offset and alignment.
-    daddiu $a2, $a2, (MIRROR_WIDE_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK)
-.endm
-
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_8
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_16
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_32
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_64
-
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_8
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_16
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_32
-GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_64
-
-    /*
-     * Macro for resolution and initialization of indexed DEX file
-     * constants such as classes and strings. $a0 is both input and
-     * output.
-     */
-.macro ONE_ARG_SAVE_EVERYTHING_DOWNCALL name, entrypoint, runtime_method_offset = RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET
-    .extern \entrypoint
-ENTRY_NO_GP \name
-    SETUP_SAVE_EVERYTHING_FRAME \runtime_method_offset  # Save everything in case of GC.
-    dla     $t9, \entrypoint
-    jalr    $t9                       # (uint32_t index, Thread*)
-    move    $a1, rSELF                # Pass Thread::Current (in delay slot).
-    beqz    $v0, 1f                   # Success?
-    move    $a0, $v0                  # Move result to $a0 (in delay slot).
-    RESTORE_SAVE_EVERYTHING_FRAME 0   # Restore everything except $a0.
-    jic     $ra, 0                    # Return on success.
-1:
-    DELIVER_PENDING_EXCEPTION_FRAME_READY
-END \name
-.endm
-
-.macro ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT name, entrypoint
-    ONE_ARG_SAVE_EVERYTHING_DOWNCALL \name, \entrypoint, RUNTIME_SAVE_EVERYTHING_FOR_CLINIT_METHOD_OFFSET
-.endm
-
-    /*
-     * Entry from managed code to resolve a method handle. On entry, A0 holds the method handle
-     * index. On success the MethodHandle is returned, otherwise an exception is raised.
-     */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_handle, artResolveMethodHandleFromCode
-
-    /*
-     * Entry from managed code to resolve a method type. On entry, A0 holds the method type index.
-     * On success the MethodType is returned, otherwise an exception is raised.
-     */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_type, artResolveMethodTypeFromCode
-
-    /*
-     * Entry from managed code to resolve a string, this stub will allocate a String and deliver an
-     * exception on error. On success the String is returned. A0 holds the string index. The fast
-     * path check for hit in strings cache has already been performed.
-     */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode
-
-    /*
-     * Entry from managed code when uninitialized static storage, this stub will run the class
-     * initializer and deliver the exception on error. On success the static storage base is
-     * returned.
-     */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_static_storage, artInitializeStaticStorageFromCode
-
-    /*
-     * Entry from managed code when dex cache misses for a type_idx.
-     */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_resolve_type, artResolveTypeFromCode
-
-    /*
-     * Entry from managed code when type_idx needs to be checked for access and dex cache may also
-     * miss.
-     */
-ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_type_and_verify_access, artResolveTypeAndVerifyAccessFromCode
-
-    /*
-     * Called by managed code when the value in rSUSPEND has been decremented to 0.
-     */
-    .extern artTestSuspendFromCode
-ENTRY_NO_GP art_quick_test_suspend
-    SETUP_SAVE_EVERYTHING_FRAME RUNTIME_SAVE_EVERYTHING_FOR_SUSPEND_CHECK_METHOD_OFFSET
-                                              # save everything for stack crawl
-    jal    artTestSuspendFromCode             # (Thread*)
-    move   $a0, rSELF
-    RESTORE_SAVE_EVERYTHING_FRAME
-    jalr   $zero, $ra
-    nop
-END art_quick_test_suspend
-
-    /*
-     * Called by managed code that is attempting to call a method on a proxy class. On entry
-     * r0 holds the proxy method; r1, r2 and r3 may contain arguments.
-     */
-    .extern artQuickProxyInvokeHandler
-ENTRY art_quick_proxy_invoke_handler
-    SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_A0
-    move    $a2, rSELF             # pass Thread::Current
-    jal     artQuickProxyInvokeHandler  # (Method* proxy method, receiver, Thread*, SP)
-    move    $a3, $sp               # pass $sp
-    ld      $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
-    daddiu  $sp, $sp, REFS_AND_ARGS_MINUS_REFS_SIZE  # skip a0-a7 and f12-f19
-    RESTORE_SAVE_REFS_ONLY_FRAME
-    bne     $t0, $zero, 1f
-    dmtc1   $v0, $f0               # place return value to FP return value
-    jalr    $zero, $ra
-    dmtc1   $v1, $f1               # place return value to FP return value
-1:
-    DELIVER_PENDING_EXCEPTION
-END art_quick_proxy_invoke_handler
-
-    /*
-     * Called to resolve an imt conflict.
-     * a0 is the conflict ArtMethod.
-     * t0 is a hidden argument that holds the target interface method's dex method index.
-     *
-     * Mote that this stub writes to v0-v1, a0, t0-t3, t8-t9, f0-f11, f20-f23.
-     */
-    .extern artLookupResolvedMethod
-    .extern __atomic_load_16        # For __int128_t std::atomic::load(std::memory_order).
-ENTRY art_quick_imt_conflict_trampoline
-    SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL /* save_s4_thru_s8 */ 0
-
-    ld      $t1, FRAME_SIZE_SAVE_REFS_AND_ARGS($sp)  # $t1 = referrer.
-    // If the method is obsolete, just go through the dex cache miss slow path.
-    // The obsolete flag is set with suspended threads, so we do not need an acquire operation here.
-    lw      $t9, ART_METHOD_ACCESS_FLAGS_OFFSET($t1)  # $t9 = access flags.
-    sll     $t9, $t9, 31 - ACC_OBSOLETE_METHOD_SHIFT  # Move obsolete method bit to sign bit.
-    bltzc   $t9, .Limt_conflict_trampoline_dex_cache_miss
-    lwu     $t1, ART_METHOD_DECLARING_CLASS_OFFSET($t1)  # $t1 = declaring class (no read barrier).
-    lwu     $t1, MIRROR_CLASS_DEX_CACHE_OFFSET($t1)  # $t1 = dex cache (without read barrier).
-    UNPOISON_HEAP_REF $t1
-    dla     $t9, __atomic_load_16
-    ld      $t1, MIRROR_DEX_CACHE_RESOLVED_METHODS_OFFSET($t1)  # $t1 = dex cache methods array.
-
-    dext    $s2, $t0, 0, 32                         # $s2 = zero-extended method index
-                                                    # (callee-saved).
-    ld      $s3, ART_METHOD_JNI_OFFSET_64($a0)      # $s3 = ImtConflictTable (callee-saved).
-
-    dext    $t0, $t0, 0, METHOD_DEX_CACHE_HASH_BITS  # $t0 = slot index.
-
-    li      $a1, STD_MEMORY_ORDER_RELAXED           # $a1 = std::memory_order_relaxed.
-    jalr    $t9                                     # [$v0, $v1] = __atomic_load_16($a0, $a1).
-    dlsa    $a0, $t0, $t1, POINTER_SIZE_SHIFT + 1   # $a0 = DexCache method slot address.
-
-    bnec    $v1, $s2, .Limt_conflict_trampoline_dex_cache_miss  # Branch if method index miss.
-
-.Limt_table_iterate:
-    ld      $t1, 0($s3)                             # Load next entry in ImtConflictTable.
-    # Branch if found.
-    beq     $t1, $v0, .Limt_table_found
-    nop
-    # If the entry is null, the interface method is not in the ImtConflictTable.
-    beqzc   $t1, .Lconflict_trampoline
-    # Iterate over the entries of the ImtConflictTable.
-    daddiu  $s3, $s3, 2 * __SIZEOF_POINTER__        # Iterate to the next entry.
-    bc      .Limt_table_iterate
-
-.Limt_table_found:
-    # We successfully hit an entry in the table. Load the target method and jump to it.
-    .cfi_remember_state
-    ld      $a0, __SIZEOF_POINTER__($s3)
-    ld      $t9, ART_METHOD_QUICK_CODE_OFFSET_64($a0)
-    RESTORE_SAVE_REFS_AND_ARGS_FRAME /* restore_s4_thru_s8 */ 0
-    jic     $t9, 0
-    .cfi_restore_state
-
-.Lconflict_trampoline:
-    # Call the runtime stub to populate the ImtConflictTable and jump to the resolved method.
-    .cfi_remember_state
-    RESTORE_SAVE_REFS_AND_ARGS_FRAME_A1             # Restore this.
-    move    $a0, $v0                                # Load interface method.
-    INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline, /* save_s4_thru_s8_only */ 1
-    .cfi_restore_state
-
-.Limt_conflict_trampoline_dex_cache_miss:
-    # We're not creating a proper runtime method frame here,
-    # artLookupResolvedMethod() is not allowed to walk the stack.
-    dla     $t9, artLookupResolvedMethod
-    ld      $a1, FRAME_SIZE_SAVE_REFS_AND_ARGS($sp)  # $a1 = referrer.
-    jalr    $t9                                     # (uint32_t method_index, ArtMethod* referrer).
-    sll     $a0, $s2, 0                             # $a0 = sign-extended method index.
-
-    # If the method wasn't resolved, skip the lookup and go to artInvokeInterfaceTrampoline().
-    beqzc   $v0, .Lconflict_trampoline
-    nop
-    bc      .Limt_table_iterate
-END art_quick_imt_conflict_trampoline
-
-    .extern artQuickResolutionTrampoline
-ENTRY art_quick_resolution_trampoline
-    SETUP_SAVE_REFS_AND_ARGS_FRAME
-    move    $a2, rSELF             # pass Thread::Current
-    jal     artQuickResolutionTrampoline  # (Method* called, receiver, Thread*, SP)
-    move    $a3, $sp               # pass $sp
-    beq     $v0, $zero, 1f
-    ld      $a0, 0($sp)            # load resolved method in $a0
-                                   # artQuickResolutionTrampoline puts resolved method in *SP
-    RESTORE_SAVE_REFS_AND_ARGS_FRAME
-    move    $t9, $v0               # code pointer must be in $t9 to generate the global pointer
-    jalr    $zero, $t9             # tail call to method
-    nop
-1:
-    RESTORE_SAVE_REFS_AND_ARGS_FRAME
-    DELIVER_PENDING_EXCEPTION
-END art_quick_resolution_trampoline
-
-    .extern artQuickGenericJniTrampoline
-    .extern artQuickGenericJniEndTrampoline
-ENTRY art_quick_generic_jni_trampoline
-    SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_A0
-    move    $s8, $sp               # save $sp
-
-    # prepare for call to artQuickGenericJniTrampoline(Thread*, SP)
-    move    $a0, rSELF             # pass Thread::Current
-    move    $a1, $sp               # pass $sp
-    jal     artQuickGenericJniTrampoline   # (Thread*, SP)
-    daddiu  $sp, $sp, -5120        # reserve space on the stack
-
-    # The C call will have registered the complete save-frame on success.
-    # The result of the call is:
-    # v0: ptr to native code, 0 on error.
-    # v1: ptr to the bottom of the used area of the alloca, can restore stack till here.
-    beq     $v0, $zero, 1f         # check entry error
-    move    $t9, $v0               # save the code ptr
-    move    $sp, $v1               # release part of the alloca
-
-    # Load parameters from stack into registers
-    ld      $a0,   0($sp)
-    ld      $a1,   8($sp)
-    ld      $a2,  16($sp)
-    ld      $a3,  24($sp)
-    ld      $a4,  32($sp)
-    ld      $a5,  40($sp)
-    ld      $a6,  48($sp)
-    ld      $a7,  56($sp)
-    # Load FPRs the same as GPRs. Look at BuildNativeCallFrameStateMachine.
-    l.d     $f12,  0($sp)
-    l.d     $f13,  8($sp)
-    l.d     $f14, 16($sp)
-    l.d     $f15, 24($sp)
-    l.d     $f16, 32($sp)
-    l.d     $f17, 40($sp)
-    l.d     $f18, 48($sp)
-    l.d     $f19, 56($sp)
-    jalr    $t9                    # native call
-    daddiu  $sp, $sp, 64
-
-    # result sign extension is handled in C code
-    # prepare for call to artQuickGenericJniEndTrampoline(Thread*, result, result_f)
-    move    $a0, rSELF             # pass Thread::Current
-    move    $a1, $v0
-    jal     artQuickGenericJniEndTrampoline
-    dmfc1   $a2, $f0
-
-    ld      $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
-    bne     $t0, $zero, 1f         # check for pending exceptions
-    move    $sp, $s8               # tear down the alloca
-
-    # tear dpown the callee-save frame
-    RESTORE_SAVE_REFS_AND_ARGS_FRAME
-
-    jalr    $zero, $ra
-    dmtc1   $v0, $f0               # place return value to FP return value
-
-1:
-    ld      $t0, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF)
-    daddiu  $sp, $t0, -1  // Remove the GenericJNI tag.
-    # This will create a new save-all frame, required by the runtime.
-    DELIVER_PENDING_EXCEPTION
-END art_quick_generic_jni_trampoline
-
-    .extern artQuickToInterpreterBridge
-ENTRY art_quick_to_interpreter_bridge
-    SETUP_SAVE_REFS_AND_ARGS_FRAME
-    move    $a1, rSELF             # pass Thread::Current
-    jal     artQuickToInterpreterBridge    # (Method* method, Thread*, SP)
-    move    $a2, $sp               # pass $sp
-    ld      $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
-    daddiu  $sp, $sp, REFS_AND_ARGS_MINUS_REFS_SIZE  # skip a0-a7 and f12-f19
-    RESTORE_SAVE_REFS_ONLY_FRAME
-    bne     $t0, $zero, 1f
-    dmtc1   $v0, $f0               # place return value to FP return value
-    jalr    $zero, $ra
-    dmtc1   $v1, $f1               # place return value to FP return value
-1:
-    DELIVER_PENDING_EXCEPTION
-END art_quick_to_interpreter_bridge
-
-    .extern artInvokeObsoleteMethod
-ENTRY art_invoke_obsolete_method_stub
-    SETUP_SAVE_ALL_CALLEE_SAVES_FRAME
-    jal     artInvokeObsoleteMethod    # (Method* method, Thread* self)
-    move    $a1, rSELF                 # pass Thread::Current
-END art_invoke_obsolete_method_stub
-
-    /*
-     * Routine that intercepts method calls and returns.
-     */
-    .extern artInstrumentationMethodEntryFromCode
-    .extern artInstrumentationMethodExitFromCode
-ENTRY art_quick_instrumentation_entry
-    SETUP_SAVE_REFS_AND_ARGS_FRAME
-    # Preserve $a0 knowing there is a spare slot in kSaveRefsAndArgs.
-    sd      $a0, 8($sp)     # Save arg0.
-    move    $a3, $sp        # Pass $sp.
-    jal     artInstrumentationMethodEntryFromCode  # (Method*, Object*, Thread*, SP)
-    move    $a2, rSELF      # pass Thread::Current
-    beqzc   $v0, .Ldeliver_instrumentation_entry_exception
-                            # Deliver exception if we got nullptr as function.
-    move    $t9, $v0        # $t9 holds reference to code
-    ld      $a0, 8($sp)     # Restore arg0.
-    dla     $v0, art_quick_instrumentation_exit
-    RESTORE_SAVE_REFS_AND_ARGS_FRAME
-    move    $ra, $v0
-    jic     $t9, 0          # call method, returning to art_quick_instrumentation_exit
-.Ldeliver_instrumentation_entry_exception:
-    RESTORE_SAVE_REFS_AND_ARGS_FRAME
-    DELIVER_PENDING_EXCEPTION
-END art_quick_instrumentation_entry
-
-ENTRY_NO_GP art_quick_instrumentation_exit
-    move    $ra, $zero      # RA points here, so clobber with 0 for later checks.
-    SETUP_SAVE_EVERYTHING_FRAME
-
-    daddiu  $a3, $sp, 16    # Pass fpr_res pointer ($f0 in SAVE_EVERYTHING_FRAME).
-    daddiu  $a2, $sp, 280   # Pass gpr_res pointer ($v0 in SAVE_EVERYTHING_FRAME).
-    move    $a1, $sp        # Pass $sp.
-    jal     artInstrumentationMethodExitFromCode  # (Thread*, SP, gpr_res*, fpr_res*)
-    move    $a0, rSELF      # pass Thread::Current
-
-    beqzc   $v0, .Ldo_deliver_instrumentation_exception
-                            # Deliver exception if we got nullptr as function.
-    nop
-    bnez    $v1, .Ldeoptimize
-
-    # Normal return.
-    sd      $v0, (FRAME_SIZE_SAVE_EVERYTHING-8)($sp)  # Set return pc.
-    RESTORE_SAVE_EVERYTHING_FRAME
-    jic     $ra, 0
-.Ldo_deliver_instrumentation_exception:
-    DELIVER_PENDING_EXCEPTION_FRAME_READY
-.Ldeoptimize:
-    b       art_quick_deoptimize
-    sd      $v1, (FRAME_SIZE_SAVE_EVERYTHING-8)($sp)
-                            # Fake a call from instrumentation return pc.
-END art_quick_instrumentation_exit
-
-    /*
-     * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
-     * will long jump to the upcall with a special exception of -1.
-     */
-    .extern artDeoptimize
-ENTRY_NO_GP_CUSTOM_CFA art_quick_deoptimize, FRAME_SIZE_SAVE_EVERYTHING
-    # SETUP_SAVE_EVERYTHING_FRAME has been done by art_quick_instrumentation_exit.
-    .cfi_rel_offset 31, 488
-    .cfi_rel_offset 30, 480
-    .cfi_rel_offset 28, 472
-    .cfi_rel_offset 25, 464
-    .cfi_rel_offset 24, 456
-    .cfi_rel_offset 23, 448
-    .cfi_rel_offset 22, 440
-    .cfi_rel_offset 21, 432
-    .cfi_rel_offset 20, 424
-    .cfi_rel_offset 19, 416
-    .cfi_rel_offset 18, 408
-    .cfi_rel_offset 17, 400
-    .cfi_rel_offset 16, 392
-    .cfi_rel_offset 15, 384
-    .cfi_rel_offset 14, 376
-    .cfi_rel_offset 13, 368
-    .cfi_rel_offset 12, 360
-    .cfi_rel_offset 11, 352
-    .cfi_rel_offset 10, 344
-    .cfi_rel_offset 9, 336
-    .cfi_rel_offset 8, 328
-    .cfi_rel_offset 7, 320
-    .cfi_rel_offset 6, 312
-    .cfi_rel_offset 5, 304
-    .cfi_rel_offset 4, 296
-    .cfi_rel_offset 3, 288
-    .cfi_rel_offset 2, 280
-    .cfi_rel_offset 1, 272
-
-    jal     artDeoptimize   # artDeoptimize(Thread*)
-    move    $a0, rSELF      # pass Thread::current
-    break
-END art_quick_deoptimize
-
-    /*
-     * Compiled code has requested that we deoptimize into the interpreter. The deoptimization
-     * will long jump to the upcall with a special exception of -1.
-     */
-    .extern artDeoptimizeFromCompiledCode
-ENTRY_NO_GP art_quick_deoptimize_from_compiled_code
-    SETUP_SAVE_EVERYTHING_FRAME
-    jal      artDeoptimizeFromCompiledCode    # (DeoptimizationKind, Thread*)
-    move     $a1, rSELF                       # pass Thread::current
-END art_quick_deoptimize_from_compiled_code
-
-  .set push
-  .set noat
-/* java.lang.String.compareTo(String anotherString) */
-ENTRY_NO_GP art_quick_string_compareto
-/* $a0 holds address of "this" */
-/* $a1 holds address of "anotherString" */
-    move   $a2, $zero
-    beq    $a0, $a1, .Lstring_compareto_length_diff # this and anotherString are the same object
-    move   $a3, $zero                               # return 0 (it returns a2 - a3)
-
-#if (STRING_COMPRESSION_FEATURE)
-    lw     $a4, MIRROR_STRING_COUNT_OFFSET($a0)     # 'count' field of this
-    lw     $a5, MIRROR_STRING_COUNT_OFFSET($a1)     # 'count' field of anotherString
-    sra    $a2, $a4, 1                              # this.length()
-    sra    $a3, $a5, 1                              # anotherString.length()
-#else
-    lw     $a2, MIRROR_STRING_COUNT_OFFSET($a0)     # this.length()
-    lw     $a3, MIRROR_STRING_COUNT_OFFSET($a1)     # anotherString.length()
-#endif
-
-    MINu   $t2, $a2, $a3
-    # $t2 now holds min(this.length(),anotherString.length())
-
-    # while min(this.length(),anotherString.length())-i != 0
-    beqzc  $t2, .Lstring_compareto_length_diff # if $t2==0
-                                               #     return (this.length() - anotherString.length())
-
-#if (STRING_COMPRESSION_FEATURE)
-    # Differ cases:
-    dext   $a6, $a4, 0, 1
-    beqz   $a6, .Lstring_compareto_this_is_compressed
-    dext   $a6, $a5, 0, 1                      # In branch delay slot.
-    beqz   $a6, .Lstring_compareto_that_is_compressed
-    nop
-    b      .Lstring_compareto_both_not_compressed
-    nop
-
-.Lstring_compareto_this_is_compressed:
-    beqzc  $a6, .Lstring_compareto_both_compressed
-    /* If (this->IsCompressed() && that->IsCompressed() == false) */
-.Lstring_compareto_loop_comparison_this_compressed:
-    lbu    $t0, MIRROR_STRING_VALUE_OFFSET($a0)
-    lhu    $t1, MIRROR_STRING_VALUE_OFFSET($a1)
-    bnec   $t0, $t1, .Lstring_compareto_char_diff
-    daddiu $a0, $a0, 1      # point at this.charAt(i++) - compressed
-    subu   $t2, $t2, 1      # new value of min(this.length(),anotherString.length())-i
-    bnez   $t2, .Lstring_compareto_loop_comparison_this_compressed
-    daddiu $a1, $a1, 2      # point at anotherString.charAt(i++) - uncompressed
-    jalr   $zero, $ra
-    subu   $v0, $a2, $a3    # return (this.length() - anotherString.length())
-
-.Lstring_compareto_that_is_compressed:
-    lhu    $t0, MIRROR_STRING_VALUE_OFFSET($a0)
-    lbu    $t1, MIRROR_STRING_VALUE_OFFSET($a1)
-    bnec   $t0, $t1, .Lstring_compareto_char_diff
-    daddiu $a0, $a0, 2      # point at this.charAt(i++) - uncompressed
-    subu   $t2, $t2, 1      # new value of min(this.length(),anotherString.length())-i
-    bnez   $t2, .Lstring_compareto_that_is_compressed
-    daddiu $a1, $a1, 1      # point at anotherString.charAt(i++) - compressed
-    jalr   $zero, $ra
-    subu   $v0, $a2, $a3    # return (this.length() - anotherString.length())
-
-.Lstring_compareto_both_compressed:
-    lbu    $t0, MIRROR_STRING_VALUE_OFFSET($a0)
-    lbu    $t1, MIRROR_STRING_VALUE_OFFSET($a1)
-    bnec   $t0, $t1, .Lstring_compareto_char_diff
-    daddiu $a0, $a0, 1      # point at this.charAt(i++) - compressed
-    subu   $t2, $t2, 1      # new value of min(this.length(),anotherString.length())-i
-    bnez   $t2, .Lstring_compareto_both_compressed
-    daddiu $a1, $a1, 1      # point at anotherString.charAt(i++) - compressed
-    jalr   $zero, $ra
-    subu   $v0, $a2, $a3    # return (this.length() - anotherString.length())
-#endif
-
-.Lstring_compareto_both_not_compressed:
-    lhu    $t0, MIRROR_STRING_VALUE_OFFSET($a0)    # while this.charAt(i) == anotherString.charAt(i)
-    lhu    $t1, MIRROR_STRING_VALUE_OFFSET($a1)
-    bnec   $t0, $t1, .Lstring_compareto_char_diff  # if this.charAt(i) != anotherString.charAt(i)
-                            #     return (this.charAt(i) - anotherString.charAt(i))
-    daddiu $a0, $a0, 2      # point at this.charAt(i++)
-    subu   $t2, $t2, 1      # new value of min(this.length(),anotherString.length())-i
-    bnez   $t2, .Lstring_compareto_both_not_compressed
-    daddiu $a1, $a1, 2      # point at anotherString.charAt(i++)
-
-.Lstring_compareto_length_diff:
-    jalr   $zero, $ra
-    subu   $v0, $a2, $a3    # return (this.length() - anotherString.length())
-
-.Lstring_compareto_char_diff:
-    jalr   $zero, $ra
-    subu   $v0, $t0, $t1    # return (this.charAt(i) - anotherString.charAt(i))
-END art_quick_string_compareto
-
-/* java.lang.String.indexOf(int ch, int fromIndex=0) */
-ENTRY_NO_GP art_quick_indexof
-/* $a0 holds address of "this" */
-/* $a1 holds "ch" */
-/* $a2 holds "fromIndex" */
-#if (STRING_COMPRESSION_FEATURE)
-    lw     $a3, MIRROR_STRING_COUNT_OFFSET($a0)     # 'count' field of this
-#else
-    lw     $t0, MIRROR_STRING_COUNT_OFFSET($a0)     # this.length()
-#endif
-    slt    $at, $a2, $zero  # if fromIndex < 0
-    seleqz $a2, $a2, $at    #     fromIndex = 0;
-#if (STRING_COMPRESSION_FEATURE)
-    srl   $t0, $a3, 1       # $a3 holds count (with flag) and $t0 holds actual length
-#endif
-    subu   $t0, $t0, $a2    # this.length() - fromIndex
-    blez   $t0, 6f          # if this.length()-fromIndex <= 0
-    li     $v0, -1          #     return -1;
-
-#if (STRING_COMPRESSION_FEATURE)
-    dext   $a3, $a3, 0, 1   # Extract compression flag.
-    beqzc  $a3, .Lstring_indexof_compressed
-#endif
-
-    sll    $v0, $a2, 1      # $a0 += $a2 * 2
-    daddu  $a0, $a0, $v0    #  "  ditto  "
-    move   $v0, $a2         # Set i to fromIndex.
-
-1:
-    lhu    $t3, MIRROR_STRING_VALUE_OFFSET($a0)     # if this.charAt(i) == ch
-    beq    $t3, $a1, 6f                             #     return i;
-    daddu  $a0, $a0, 2      # i++
-    subu   $t0, $t0, 1      # this.length() - i
-    bnez   $t0, 1b          # while this.length() - i > 0
-    addu   $v0, $v0, 1      # i++
-
-    li     $v0, -1          # if this.length() - i <= 0
-                            #     return -1;
-
-6:
-    j      $ra
-    nop
-
-#if (STRING_COMPRESSION_FEATURE)
-.Lstring_indexof_compressed:
-    move   $a4, $a0         # Save a copy in $a4 to later compute result.
-    daddu  $a0, $a0, $a2    # $a0 += $a2
-
-.Lstring_indexof_compressed_loop:
-    lbu    $t3, MIRROR_STRING_VALUE_OFFSET($a0)
-    beq    $t3, $a1, .Lstring_indexof_compressed_matched
-    subu   $t0, $t0, 1
-    bgtz   $t0, .Lstring_indexof_compressed_loop
-    daddu  $a0, $a0, 1
-
-.Lstring_indexof_nomatch:
-    jalr   $zero, $ra
-    li     $v0, -1          # return -1;
-
-.Lstring_indexof_compressed_matched:
-    jalr   $zero, $ra
-    dsubu  $v0, $a0, $a4    # return (current - start);
-#endif
-END art_quick_indexof
-
-    /*
-     * Create a function `name` calling the ReadBarrier::Mark routine,
-     * getting its argument and returning its result through register
-     * `reg`, saving and restoring all caller-save registers.
-     */
-.macro READ_BARRIER_MARK_REG name, reg
-ENTRY \name
-    // Null check so that we can load the lock word.
-    bnezc   \reg, .Lnot_null_\name
-    nop
-.Lret_rb_\name:
-    jic     $ra, 0
-.Lnot_null_\name:
-    // Check lock word for mark bit, if marked return.
-    lw      $t9, MIRROR_OBJECT_LOCK_WORD_OFFSET(\reg)
-    .set push
-    .set noat
-    sll     $at, $t9, 31 - LOCK_WORD_MARK_BIT_SHIFT     # Move mark bit to sign bit.
-    bltzc   $at, .Lret_rb_\name
-#if (LOCK_WORD_STATE_SHIFT != 30) || (LOCK_WORD_STATE_FORWARDING_ADDRESS != 3)
-    // The below code depends on the lock word state being in the highest bits
-    // and the "forwarding address" state having all bits set.
-#error "Unexpected lock word state shift or forwarding address state value."
-#endif
-    // Test that both the forwarding state bits are 1.
-    sll     $at, $t9, 1
-    and     $at, $at, $t9                               # Sign bit = 1 IFF both bits are 1.
-    bltzc   $at, .Lret_forwarding_address\name
-    .set pop
-
-    daddiu  $sp, $sp, -320
-    .cfi_adjust_cfa_offset 320
-
-    sd      $ra, 312($sp)
-    .cfi_rel_offset 31, 312
-    sd      $t8, 304($sp)       # save t8 holding caller's gp
-    .cfi_rel_offset 24, 304
-    sd      $t3, 296($sp)
-    .cfi_rel_offset 15, 296
-    sd      $t2, 288($sp)
-    .cfi_rel_offset 14, 288
-    sd      $t1, 280($sp)
-    .cfi_rel_offset 13, 280
-    sd      $t0, 272($sp)
-    .cfi_rel_offset 12, 272
-    sd      $a7, 264($sp)
-    .cfi_rel_offset 11, 264
-    sd      $a6, 256($sp)
-    .cfi_rel_offset 10, 256
-    sd      $a5, 248($sp)
-    .cfi_rel_offset 9, 248
-    sd      $a4, 240($sp)
-    .cfi_rel_offset 8, 240
-    sd      $a3, 232($sp)
-    .cfi_rel_offset 7, 232
-    sd      $a2, 224($sp)
-    .cfi_rel_offset 6, 224
-    sd      $a1, 216($sp)
-    .cfi_rel_offset 5, 216
-    sd      $a0, 208($sp)
-    .cfi_rel_offset 4, 208
-    sd      $v1, 200($sp)
-    .cfi_rel_offset 3, 200
-    sd      $v0, 192($sp)
-    .cfi_rel_offset 2, 192
-
-    dla     $t9, artReadBarrierMark
-
-    sdc1    $f23, 184($sp)
-    sdc1    $f22, 176($sp)
-    sdc1    $f21, 168($sp)
-    sdc1    $f20, 160($sp)
-    sdc1    $f19, 152($sp)
-    sdc1    $f18, 144($sp)
-    sdc1    $f17, 136($sp)
-    sdc1    $f16, 128($sp)
-    sdc1    $f15, 120($sp)
-    sdc1    $f14, 112($sp)
-    sdc1    $f13, 104($sp)
-    sdc1    $f12,  96($sp)
-    sdc1    $f11,  88($sp)
-    sdc1    $f10,  80($sp)
-    sdc1    $f9,   72($sp)
-    sdc1    $f8,   64($sp)
-    sdc1    $f7,   56($sp)
-    sdc1    $f6,   48($sp)
-    sdc1    $f5,   40($sp)
-    sdc1    $f4,   32($sp)
-    sdc1    $f3,   24($sp)
-    sdc1    $f2,   16($sp)
-    sdc1    $f1,    8($sp)
-
-    .ifnc \reg, $a0
-      move  $a0, \reg           # pass obj from `reg` in a0
-    .endif
-    jalr    $t9                 # v0 <- artReadBarrierMark(obj)
-    sdc1    $f0,    0($sp)      # in delay slot
-
-    ld      $ra, 312($sp)
-    .cfi_restore 31
-    ld      $t8, 304($sp)       # restore t8 holding caller's gp
-    .cfi_restore 24
-    ld      $t3, 296($sp)
-    .cfi_restore 15
-    ld      $t2, 288($sp)
-    .cfi_restore 14
-    ld      $t1, 280($sp)
-    .cfi_restore 13
-    ld      $t0, 272($sp)
-    .cfi_restore 12
-    ld      $a7, 264($sp)
-    .cfi_restore 11
-    ld      $a6, 256($sp)
-    .cfi_restore 10
-    ld      $a5, 248($sp)
-    .cfi_restore 9
-    ld      $a4, 240($sp)
-    .cfi_restore 8
-    ld      $a3, 232($sp)
-    .cfi_restore 7
-    ld      $a2, 224($sp)
-    .cfi_restore 6
-    ld      $a1, 216($sp)
-    .cfi_restore 5
-    ld      $a0, 208($sp)
-    .cfi_restore 4
-    ld      $v1, 200($sp)
-    .cfi_restore 3
-
-    .ifnc \reg, $v0
-      move  \reg, $v0           # `reg` <- v0
-      ld    $v0, 192($sp)
-      .cfi_restore 2
-    .endif
-
-    ldc1    $f23, 184($sp)
-    ldc1    $f22, 176($sp)
-    ldc1    $f21, 168($sp)
-    ldc1    $f20, 160($sp)
-    ldc1    $f19, 152($sp)
-    ldc1    $f18, 144($sp)
-    ldc1    $f17, 136($sp)
-    ldc1    $f16, 128($sp)
-    ldc1    $f15, 120($sp)
-    ldc1    $f14, 112($sp)
-    ldc1    $f13, 104($sp)
-    ldc1    $f12,  96($sp)
-    ldc1    $f11,  88($sp)
-    ldc1    $f10,  80($sp)
-    ldc1    $f9,   72($sp)
-    ldc1    $f8,   64($sp)
-    ldc1    $f7,   56($sp)
-    ldc1    $f6,   48($sp)
-    ldc1    $f5,   40($sp)
-    ldc1    $f4,   32($sp)
-    ldc1    $f3,   24($sp)
-    ldc1    $f2,   16($sp)
-    ldc1    $f1,    8($sp)
-    ldc1    $f0,    0($sp)
-
-    .cpreturn                   # restore caller's gp from t8
-    jalr    $zero, $ra
-    daddiu  $sp, $sp, 320
-    .cfi_adjust_cfa_offset -320
-
-.Lret_forwarding_address\name:
-    // Shift left by the forwarding address shift. This clears out the state bits since they are
-    // in the top 2 bits of the lock word.
-    sll     \reg, $t9, LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT
-    jalr    $zero, $ra
-    dext    \reg, \reg, 0, 32   # Make sure the address is zero-extended.
-END \name
-.endm
-
-// Note that art_quick_read_barrier_mark_regXX corresponds to register XX+1.
-// ZERO (register 0) is reserved.
-// AT (register 1) is reserved as a temporary/scratch register.
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg01, $v0
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg02, $v1
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg03, $a0
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg04, $a1
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg05, $a2
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg06, $a3
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg07, $a4
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg08, $a5
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg09, $a6
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg10, $a7
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg11, $t0
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg12, $t1
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg13, $t2
-// T3 (register 15) is reserved as a temporary/scratch register.
-// S0 and S1 (registers 16 and 17) are reserved as suspended and thread registers.
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg17, $s2
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg18, $s3
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg19, $s4
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg20, $s5
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg21, $s6
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg22, $s7
-// T8 and T9 (registers 24 and 25) are reserved as temporary/scratch registers.
-// K0, K1, GP, SP (registers 26 - 29) are reserved.
-READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg29, $s8
-// RA (register 31) is reserved.
-
-// Caller code:
-// Short constant offset/index:
-//  ld      $t9, pReadBarrierMarkReg00
-//  beqzc   $t9, skip_call
-//  nop
-//  jialc   $t9, thunk_disp
-// skip_call:
-//  lwu     `out`, ofs(`obj`)
-// [dsubu   `out`, $zero, `out`
-//  dext    `out`, `out`, 0, 32]  # Unpoison reference.
-.macro BRB_FIELD_SHORT_OFFSET_ENTRY obj
-    # Explicit null check. May be redundant (for array elements or when the field
-    # offset is larger than the page size, 4KB).
-    # $ra will be adjusted to point to lwu's stack map when throwing NPE.
-    beqzc   \obj, .Lintrospection_throw_npe
-    lapc    $t3, .Lintrospection_exits                  # $t3 = address of .Lintrospection_exits.
-    .set push
-    .set noat
-    lw      $at, MIRROR_OBJECT_LOCK_WORD_OFFSET(\obj)
-    sll     $at, $at, 31 - LOCK_WORD_READ_BARRIER_STATE_SHIFT   # Move barrier state bit
-                                                                # to sign bit.
-    bltz    $at, .Lintrospection_field_array            # If gray, load reference, mark.
-    move    $t8, \obj                                   # Move `obj` to $t8 for common code.
-    .set pop
-    jalr    $zero, $ra                                  # Otherwise, load-load barrier and return.
-    sync
-.endm
-
-// Caller code:
-// Long constant offset/index:   | Variable index:
-//  ld      $t9, pReadBarrierMarkReg00
-//  beqz    $t9, skip_call       |  beqz    $t9, skip_call
-//  daui    $t8, `obj`, ofs_hi   |  dlsa    $t8, `index`, `obj`, 2
-//  jialc   $t9, thunk_disp      |  jialc   $t9, thunk_disp
-// skip_call:                    | skip_call:
-//  lwu     `out`, ofs_lo($t8)   |  lwu     `out`, ofs($t8)
-// [dsubu   `out`, $zero, `out`  | [dsubu   `out`, $zero, `out`
-//  dext    `out`, `out`, 0, 32] |  dext    `out`, `out`, 0, 32]  # Unpoison reference.
-.macro BRB_FIELD_LONG_OFFSET_ENTRY obj
-    # No explicit null check for variable indices or large constant indices/offsets
-    # as it must have been done earlier.
-    lapc    $t3, .Lintrospection_exits                  # $t3 = address of .Lintrospection_exits.
-    .set push
-    .set noat
-    lw      $at, MIRROR_OBJECT_LOCK_WORD_OFFSET(\obj)
-    sll     $at, $at, 31 - LOCK_WORD_READ_BARRIER_STATE_SHIFT   # Move barrier state bit
-                                                                # to sign bit.
-    bltzc   $at, .Lintrospection_field_array            # If gray, load reference, mark.
-    .set pop
-    sync                                                # Otherwise, load-load barrier and return.
-    jic     $ra, 0
-    break                                               # Padding to 8 instructions.
-    break
-.endm
-
-.macro BRB_GC_ROOT_ENTRY root
-    lapc    $t3, .Lintrospection_exit_\root             # $t3 = exit point address.
-    bnez    \root, .Lintrospection_common
-    move    $t8, \root                                  # Move reference to $t8 for common code.
-    jic     $ra, 0                                      # Return if null.
-.endm
-
-.macro BRB_FIELD_EXIT out
-.Lintrospection_exit_\out:
-    jalr    $zero, $ra
-    move    \out, $t8                                   # Return reference in expected register.
-.endm
-
-.macro BRB_FIELD_EXIT_BREAK
-    break
-    break
-.endm
-
-ENTRY_NO_GP art_quick_read_barrier_mark_introspection
-    # Entry points for offsets/indices not fitting into int16_t and for variable indices.
-    BRB_FIELD_LONG_OFFSET_ENTRY $v0
-    BRB_FIELD_LONG_OFFSET_ENTRY $v1
-    BRB_FIELD_LONG_OFFSET_ENTRY $a0
-    BRB_FIELD_LONG_OFFSET_ENTRY $a1
-    BRB_FIELD_LONG_OFFSET_ENTRY $a2
-    BRB_FIELD_LONG_OFFSET_ENTRY $a3
-    BRB_FIELD_LONG_OFFSET_ENTRY $a4
-    BRB_FIELD_LONG_OFFSET_ENTRY $a5
-    BRB_FIELD_LONG_OFFSET_ENTRY $a6
-    BRB_FIELD_LONG_OFFSET_ENTRY $a7
-    BRB_FIELD_LONG_OFFSET_ENTRY $t0
-    BRB_FIELD_LONG_OFFSET_ENTRY $t1
-    BRB_FIELD_LONG_OFFSET_ENTRY $t2
-    BRB_FIELD_LONG_OFFSET_ENTRY $s2
-    BRB_FIELD_LONG_OFFSET_ENTRY $s3
-    BRB_FIELD_LONG_OFFSET_ENTRY $s4
-    BRB_FIELD_LONG_OFFSET_ENTRY $s5
-    BRB_FIELD_LONG_OFFSET_ENTRY $s6
-    BRB_FIELD_LONG_OFFSET_ENTRY $s7
-    BRB_FIELD_LONG_OFFSET_ENTRY $s8
-
-    # Entry points for offsets/indices fitting into int16_t.
-    BRB_FIELD_SHORT_OFFSET_ENTRY $v0
-    BRB_FIELD_SHORT_OFFSET_ENTRY $v1
-    BRB_FIELD_SHORT_OFFSET_ENTRY $a0
-    BRB_FIELD_SHORT_OFFSET_ENTRY $a1
-    BRB_FIELD_SHORT_OFFSET_ENTRY $a2
-    BRB_FIELD_SHORT_OFFSET_ENTRY $a3
-    BRB_FIELD_SHORT_OFFSET_ENTRY $a4
-    BRB_FIELD_SHORT_OFFSET_ENTRY $a5
-    BRB_FIELD_SHORT_OFFSET_ENTRY $a6
-    BRB_FIELD_SHORT_OFFSET_ENTRY $a7
-    BRB_FIELD_SHORT_OFFSET_ENTRY $t0
-    BRB_FIELD_SHORT_OFFSET_ENTRY $t1
-    BRB_FIELD_SHORT_OFFSET_ENTRY $t2
-    BRB_FIELD_SHORT_OFFSET_ENTRY $s2
-    BRB_FIELD_SHORT_OFFSET_ENTRY $s3
-    BRB_FIELD_SHORT_OFFSET_ENTRY $s4
-    BRB_FIELD_SHORT_OFFSET_ENTRY $s5
-    BRB_FIELD_SHORT_OFFSET_ENTRY $s6
-    BRB_FIELD_SHORT_OFFSET_ENTRY $s7
-    BRB_FIELD_SHORT_OFFSET_ENTRY $s8
-
-    .global art_quick_read_barrier_mark_introspection_gc_roots
-art_quick_read_barrier_mark_introspection_gc_roots:
-    # Entry points for GC roots.
-    BRB_GC_ROOT_ENTRY $v0
-    BRB_GC_ROOT_ENTRY $v1
-    BRB_GC_ROOT_ENTRY $a0
-    BRB_GC_ROOT_ENTRY $a1
-    BRB_GC_ROOT_ENTRY $a2
-    BRB_GC_ROOT_ENTRY $a3
-    BRB_GC_ROOT_ENTRY $a4
-    BRB_GC_ROOT_ENTRY $a5
-    BRB_GC_ROOT_ENTRY $a6
-    BRB_GC_ROOT_ENTRY $a7
-    BRB_GC_ROOT_ENTRY $t0
-    BRB_GC_ROOT_ENTRY $t1
-    BRB_GC_ROOT_ENTRY $t2
-    BRB_GC_ROOT_ENTRY $s2
-    BRB_GC_ROOT_ENTRY $s3
-    BRB_GC_ROOT_ENTRY $s4
-    BRB_GC_ROOT_ENTRY $s5
-    BRB_GC_ROOT_ENTRY $s6
-    BRB_GC_ROOT_ENTRY $s7
-    BRB_GC_ROOT_ENTRY $s8
-    .global art_quick_read_barrier_mark_introspection_end_of_entries
-art_quick_read_barrier_mark_introspection_end_of_entries:
-
-.Lintrospection_throw_npe:
-    b       art_quick_throw_null_pointer_exception
-    daddiu  $ra, $ra, 4         # Skip lwu, make $ra point to lwu's stack map.
-
-    .set push
-    .set noat
-
-    // Fields and array elements.
-
-.Lintrospection_field_array:
-    // Get the field/element address using $t8 and the offset from the lwu instruction.
-    lh      $at, 0($ra)         # $ra points to lwu: $at = low 16 bits of field/element offset.
-    daddiu  $ra, $ra, 4 + HEAP_POISON_INSTR_SIZE   # Skip lwu(+dsubu+dext).
-    daddu   $t8, $t8, $at       # $t8 = field/element address.
-
-    // Calculate the address of the exit point, store it in $t3 and load the reference into $t8.
-    lb      $at, (-HEAP_POISON_INSTR_SIZE - 2)($ra)   # $ra-HEAP_POISON_INSTR_SIZE-4 points to
-                                                      # "lwu `out`, ...".
-    andi    $at, $at, 31        # Extract `out` from lwu.
-
-    lwu     $t8, 0($t8)         # $t8 = reference.
-    UNPOISON_HEAP_REF $t8
-
-    // Return if null reference.
-    bnez    $t8, .Lintrospection_common
-    dlsa    $t3, $at, $t3, 3    # $t3 = address of the exit point
-                                # (BRB_FIELD_EXIT* macro is 8 bytes).
-
-    // Early return through the exit point.
-.Lintrospection_return_early:
-    jic     $t3, 0              # Move $t8 to `out` and return.
-
-    // Code common for GC roots, fields and array elements.
-
-.Lintrospection_common:
-    // Check lock word for mark bit, if marked return.
-    lw      $t9, MIRROR_OBJECT_LOCK_WORD_OFFSET($t8)
-    sll     $at, $t9, 31 - LOCK_WORD_MARK_BIT_SHIFT     # Move mark bit to sign bit.
-    bltzc   $at, .Lintrospection_return_early
-#if (LOCK_WORD_STATE_SHIFT != 30) || (LOCK_WORD_STATE_FORWARDING_ADDRESS != 3)
-    // The below code depends on the lock word state being in the highest bits
-    // and the "forwarding address" state having all bits set.
-#error "Unexpected lock word state shift or forwarding address state value."
-#endif
-    // Test that both the forwarding state bits are 1.
-    sll     $at, $t9, 1
-    and     $at, $at, $t9                               # Sign bit = 1 IFF both bits are 1.
-    bgezc   $at, .Lintrospection_mark
-
-    .set pop
-
-    // Shift left by the forwarding address shift. This clears out the state bits since they are
-    // in the top 2 bits of the lock word.
-    sll     $t8, $t9, LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT
-    jalr    $zero, $t3          # Move $t8 to `out` and return.
-    dext    $t8, $t8, 0, 32     # Make sure the address is zero-extended.
-
-.Lintrospection_mark:
-    // Partially set up the stack frame preserving only $ra.
-    daddiu  $sp, $sp, -320
-    .cfi_adjust_cfa_offset 320
-    sd      $ra, 312($sp)
-    .cfi_rel_offset 31, 312
-
-    // Set up $gp, clobbering $ra.
-    lapc    $ra, 1f
-1:
-    .cpsetup $ra, 304, 1b       # Save old $gp in 304($sp).
-
-    // Finalize the stack frame and call.
-    sd      $t3, 296($sp)       # Preserve the exit point address.
-    sd      $t2, 288($sp)
-    .cfi_rel_offset 14, 288
-    sd      $t1, 280($sp)
-    .cfi_rel_offset 13, 280
-    sd      $t0, 272($sp)
-    .cfi_rel_offset 12, 272
-    sd      $a7, 264($sp)
-    .cfi_rel_offset 11, 264
-    sd      $a6, 256($sp)
-    .cfi_rel_offset 10, 256
-    sd      $a5, 248($sp)
-    .cfi_rel_offset 9, 248
-    sd      $a4, 240($sp)
-    .cfi_rel_offset 8, 240
-    sd      $a3, 232($sp)
-    .cfi_rel_offset 7, 232
-    sd      $a2, 224($sp)
-    .cfi_rel_offset 6, 224
-    sd      $a1, 216($sp)
-    .cfi_rel_offset 5, 216
-    sd      $a0, 208($sp)
-    .cfi_rel_offset 4, 208
-    sd      $v1, 200($sp)
-    .cfi_rel_offset 3, 200
-    sd      $v0, 192($sp)
-    .cfi_rel_offset 2, 192
-
-    dla     $t9, artReadBarrierMark
-
-    sdc1    $f23, 184($sp)
-    sdc1    $f22, 176($sp)
-    sdc1    $f21, 168($sp)
-    sdc1    $f20, 160($sp)
-    sdc1    $f19, 152($sp)
-    sdc1    $f18, 144($sp)
-    sdc1    $f17, 136($sp)
-    sdc1    $f16, 128($sp)
-    sdc1    $f15, 120($sp)
-    sdc1    $f14, 112($sp)
-    sdc1    $f13, 104($sp)
-    sdc1    $f12,  96($sp)
-    sdc1    $f11,  88($sp)
-    sdc1    $f10,  80($sp)
-    sdc1    $f9,   72($sp)
-    sdc1    $f8,   64($sp)
-    sdc1    $f7,   56($sp)
-    sdc1    $f6,   48($sp)
-    sdc1    $f5,   40($sp)
-    sdc1    $f4,   32($sp)
-    sdc1    $f3,   24($sp)
-    sdc1    $f2,   16($sp)
-    sdc1    $f1,    8($sp)
-    sdc1    $f0,    0($sp)
-
-    jalr    $t9                 # $v0 <- artReadBarrierMark(reference)
-    move    $a0, $t8            # Pass reference in $a0.
-    move    $t8, $v0
-
-    ld      $ra, 312($sp)
-    .cfi_restore 31
-    .cpreturn                   # Restore old $gp from 304($sp).
-    ld      $t3, 296($sp)       # $t3 = address of the exit point.
-    ld      $t2, 288($sp)
-    .cfi_restore 14
-    ld      $t1, 280($sp)
-    .cfi_restore 13
-    ld      $t0, 272($sp)
-    .cfi_restore 12
-    ld      $a7, 264($sp)
-    .cfi_restore 11
-    ld      $a6, 256($sp)
-    .cfi_restore 10
-    ld      $a5, 248($sp)
-    .cfi_restore 9
-    ld      $a4, 240($sp)
-    .cfi_restore 8
-    ld      $a3, 232($sp)
-    .cfi_restore 7
-    ld      $a2, 224($sp)
-    .cfi_restore 6
-    ld      $a1, 216($sp)
-    .cfi_restore 5
-    ld      $a0, 208($sp)
-    .cfi_restore 4
-    ld      $v1, 200($sp)
-    .cfi_restore 3
-    ld      $v0, 192($sp)
-    .cfi_restore 2
-
-    ldc1    $f23, 184($sp)
-    ldc1    $f22, 176($sp)
-    ldc1    $f21, 168($sp)
-    ldc1    $f20, 160($sp)
-    ldc1    $f19, 152($sp)
-    ldc1    $f18, 144($sp)
-    ldc1    $f17, 136($sp)
-    ldc1    $f16, 128($sp)
-    ldc1    $f15, 120($sp)
-    ldc1    $f14, 112($sp)
-    ldc1    $f13, 104($sp)
-    ldc1    $f12,  96($sp)
-    ldc1    $f11,  88($sp)
-    ldc1    $f10,  80($sp)
-    ldc1    $f9,   72($sp)
-    ldc1    $f8,   64($sp)
-    ldc1    $f7,   56($sp)
-    ldc1    $f6,   48($sp)
-    ldc1    $f5,   40($sp)
-    ldc1    $f4,   32($sp)
-    ldc1    $f3,   24($sp)
-    ldc1    $f2,   16($sp)
-    ldc1    $f1,    8($sp)
-    ldc1    $f0,    0($sp)
-
-    // Return through the exit point.
-    jalr    $zero, $t3          # Move $t8 to `out` and return.
-    daddiu  $sp, $sp, 320
-    .cfi_adjust_cfa_offset -320
-
-.Lintrospection_exits:
-    BRB_FIELD_EXIT_BREAK
-    BRB_FIELD_EXIT_BREAK
-    BRB_FIELD_EXIT $v0
-    BRB_FIELD_EXIT $v1
-    BRB_FIELD_EXIT $a0
-    BRB_FIELD_EXIT $a1
-    BRB_FIELD_EXIT $a2
-    BRB_FIELD_EXIT $a3
-    BRB_FIELD_EXIT $a4
-    BRB_FIELD_EXIT $a5
-    BRB_FIELD_EXIT $a6
-    BRB_FIELD_EXIT $a7
-    BRB_FIELD_EXIT $t0
-    BRB_FIELD_EXIT $t1
-    BRB_FIELD_EXIT $t2
-    BRB_FIELD_EXIT_BREAK
-    BRB_FIELD_EXIT_BREAK
-    BRB_FIELD_EXIT_BREAK
-    BRB_FIELD_EXIT $s2
-    BRB_FIELD_EXIT $s3
-    BRB_FIELD_EXIT $s4
-    BRB_FIELD_EXIT $s5
-    BRB_FIELD_EXIT $s6
-    BRB_FIELD_EXIT $s7
-    BRB_FIELD_EXIT_BREAK
-    BRB_FIELD_EXIT_BREAK
-    BRB_FIELD_EXIT_BREAK
-    BRB_FIELD_EXIT_BREAK
-    BRB_FIELD_EXIT_BREAK
-    BRB_FIELD_EXIT_BREAK
-    BRB_FIELD_EXIT $s8
-    BRB_FIELD_EXIT_BREAK
-END art_quick_read_barrier_mark_introspection
-
-    /*
-     * Polymorphic method invocation.
-     * On entry:
-     *   a0 = unused
-     *   a1 = receiver
-     */
-.extern artInvokePolymorphic
-ENTRY art_quick_invoke_polymorphic
-    SETUP_SAVE_REFS_AND_ARGS_FRAME
-    move    $a0, $a1               # Make $a0 the receiver
-    move    $a1, rSELF             # Make $a1 an alias for the current Thread.
-    jal     artInvokePolymorphic   # artInvokePolymorphic(receiver, Thread*, context)
-    move    $a2, $sp               # Make $a3 a pointer to the saved frame context.
-    ld      $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
-    daddiu  $sp, $sp, REFS_AND_ARGS_MINUS_REFS_SIZE  # skip a0-a7 and f12-f19
-    RESTORE_SAVE_REFS_ONLY_FRAME
-    bne     $t0, $zero, 1f
-    dmtc1   $v0, $f0               # place return value to FP return value
-    jalr    $zero, $ra
-    dmtc1   $v1, $f1               # place return value to FP return value
-1:
-    DELIVER_PENDING_EXCEPTION
-END art_quick_invoke_polymorphic
-
-    /*
-     * InvokeCustom invocation.
-     * On entry:
-     *   a0 = call_site_idx
-     */
-.extern artInvokeCustom
-ENTRY art_quick_invoke_custom
-    SETUP_SAVE_REFS_AND_ARGS_FRAME
-    move    $a1, rSELF             # Make $a1 an alias for the current Thread.
-    jal     artInvokeCustom        # Call artInvokeCustom(call_site_idx, Thread*, context).
-    move    $a2, $sp               # Make $a1 a pointer to the saved frame context.
-    ld      $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
-    daddiu  $sp, $sp, REFS_AND_ARGS_MINUS_REFS_SIZE  # skip a0-a7 and f12-f19
-    RESTORE_SAVE_REFS_ONLY_FRAME
-    bne     $t0, $zero, 1f
-    dmtc1   $v0, $f0               # place return value to FP return value
-    jalr    $zero, $ra
-    dmtc1   $v1, $f1               # place return value to FP return value
-1:
-    DELIVER_PENDING_EXCEPTION
-END art_quick_invoke_polymorphic
-  .set pop
diff --git a/runtime/arch/mips64/registers_mips64.cc b/runtime/arch/mips64/registers_mips64.cc
deleted file mode 100644
index 1ee2cdd..0000000
--- a/runtime/arch/mips64/registers_mips64.cc
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "registers_mips64.h"
-
-#include <ostream>
-
-namespace art {
-namespace mips64 {
-
-static const char* kRegisterNames[] = {
-  "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
-  "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3",
-  "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
-  "t8", "t9", "k0", "k1", "gp", "sp", "s8", "ra",
-};
-
-std::ostream& operator<<(std::ostream& os, const GpuRegister& rhs) {
-  if (rhs >= ZERO && rhs < kNumberOfGpuRegisters) {
-    os << kRegisterNames[rhs];
-  } else {
-    os << "GpuRegister[" << static_cast<int>(rhs) << "]";
-  }
-  return os;
-}
-
-std::ostream& operator<<(std::ostream& os, const FpuRegister& rhs) {
-  if (rhs >= F0 && rhs < kNumberOfFpuRegisters) {
-    os << "f" << static_cast<int>(rhs);
-  } else {
-    os << "FpuRegister[" << static_cast<int>(rhs) << "]";
-  }
-  return os;
-}
-
-std::ostream& operator<<(std::ostream& os, const VectorRegister& rhs) {
-  if (rhs >= W0 && rhs < kNumberOfVectorRegisters) {
-    os << "w" << static_cast<int>(rhs);
-  } else {
-    os << "VectorRegister[" << static_cast<int>(rhs) << "]";
-  }
-  return os;
-}
-
-}  // namespace mips64
-}  // namespace art
diff --git a/runtime/arch/mips64/registers_mips64.h b/runtime/arch/mips64/registers_mips64.h
deleted file mode 100644
index 1c22c07..0000000
--- a/runtime/arch/mips64/registers_mips64.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_MIPS64_REGISTERS_MIPS64_H_
-#define ART_RUNTIME_ARCH_MIPS64_REGISTERS_MIPS64_H_
-
-#include <iosfwd>
-
-#include "base/macros.h"
-
-namespace art {
-namespace mips64 {
-
-enum GpuRegister {
-  ZERO =  0,
-  AT   =  1,  // Assembler temporary.
-  V0   =  2,  // Values.
-  V1   =  3,
-  A0   =  4,  // Arguments.
-  A1   =  5,
-  A2   =  6,
-  A3   =  7,
-  A4   =  8,
-  A5   =  9,
-  A6   = 10,
-  A7   = 11,
-  T0   = 12,  // Temporaries.
-  T1   = 13,
-  T2   = 14,
-  T3   = 15,
-  S0   = 16,  // Saved values.
-  S1   = 17,
-  S2   = 18,
-  S3   = 19,
-  S4   = 20,
-  S5   = 21,
-  S6   = 22,
-  S7   = 23,
-  T8   = 24,  // More temporaries.
-  T9   = 25,
-  K0   = 26,  // Reserved for trap handler.
-  K1   = 27,
-  GP   = 28,  // Global pointer.
-  SP   = 29,  // Stack pointer.
-  S8   = 30,  // Saved value/frame pointer.
-  RA   = 31,  // Return address.
-  TR   = S1,  // ART Thread Register
-  TMP  = T8,  // scratch register (in addition to AT)
-  TMP2 = T3,  // scratch register (in addition to AT, reserved for assembler)
-  kNumberOfGpuRegisters = 32,
-  kNoGpuRegister = -1  // Signals an illegal register.
-};
-std::ostream& operator<<(std::ostream& os, const GpuRegister& rhs);
-
-// Values for floating point registers.
-enum FpuRegister {
-  F0  =  0,
-  F1  =  1,
-  F2  =  2,
-  F3  =  3,
-  F4  =  4,
-  F5  =  5,
-  F6  =  6,
-  F7  =  7,
-  F8  =  8,
-  F9  =  9,
-  F10 = 10,
-  F11 = 11,
-  F12 = 12,
-  F13 = 13,
-  F14 = 14,
-  F15 = 15,
-  F16 = 16,
-  F17 = 17,
-  F18 = 18,
-  F19 = 19,
-  F20 = 20,
-  F21 = 21,
-  F22 = 22,
-  F23 = 23,
-  F24 = 24,
-  F25 = 25,
-  F26 = 26,
-  F27 = 27,
-  F28 = 28,
-  F29 = 29,
-  F30 = 30,
-  F31 = 31,
-  FTMP = F8,   // scratch register
-  FTMP2 = F9,  // scratch register (in addition to FTMP, reserved for MSA instructions)
-  kNumberOfFpuRegisters = 32,
-  kNoFpuRegister = -1,
-};
-std::ostream& operator<<(std::ostream& os, const FpuRegister& rhs);
-
-// Values for vector registers.
-enum VectorRegister {
-  W0  =  0,
-  W1  =  1,
-  W2  =  2,
-  W3  =  3,
-  W4  =  4,
-  W5  =  5,
-  W6  =  6,
-  W7  =  7,
-  W8  =  8,
-  W9  =  9,
-  W10 = 10,
-  W11 = 11,
-  W12 = 12,
-  W13 = 13,
-  W14 = 14,
-  W15 = 15,
-  W16 = 16,
-  W17 = 17,
-  W18 = 18,
-  W19 = 19,
-  W20 = 20,
-  W21 = 21,
-  W22 = 22,
-  W23 = 23,
-  W24 = 24,
-  W25 = 25,
-  W26 = 26,
-  W27 = 27,
-  W28 = 28,
-  W29 = 29,
-  W30 = 30,
-  W31 = 31,
-  kNumberOfVectorRegisters = 32,
-  kNoVectorRegister = -1,
-};
-std::ostream& operator<<(std::ostream& os, const VectorRegister& rhs);
-
-}  // namespace mips64
-}  // namespace art
-
-#endif  // ART_RUNTIME_ARCH_MIPS64_REGISTERS_MIPS64_H_
diff --git a/runtime/arch/mips64/thread_mips64.cc b/runtime/arch/mips64/thread_mips64.cc
deleted file mode 100644
index c1c390b..0000000
--- a/runtime/arch/mips64/thread_mips64.cc
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "thread.h"
-
-#include <android-base/logging.h>
-
-#include "asm_support_mips64.h"
-#include "base/enums.h"
-
-namespace art {
-
-void Thread::InitCpu() {
-  CHECK_EQ(THREAD_FLAGS_OFFSET, ThreadFlagsOffset<PointerSize::k64>().Int32Value());
-  CHECK_EQ(THREAD_CARD_TABLE_OFFSET, CardTableOffset<PointerSize::k64>().Int32Value());
-  CHECK_EQ(THREAD_EXCEPTION_OFFSET, ExceptionOffset<PointerSize::k64>().Int32Value());
-}
-
-void Thread::CleanupCpu() {
-  // Do nothing.
-}
-
-}  // namespace art
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index c82b445..2b47cef 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -322,138 +322,6 @@
           "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
           "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
           "memory");
-#elif defined(__mips__) && !defined(__LP64__)
-    __asm__ __volatile__ (
-        // Spill a0-a3 and t0-t7 which we say we don't clobber. May contain args.
-        "addiu $sp, $sp, -64\n\t"
-        "sw $a0, 0($sp)\n\t"
-        "sw $a1, 4($sp)\n\t"
-        "sw $a2, 8($sp)\n\t"
-        "sw $a3, 12($sp)\n\t"
-        "sw $t0, 16($sp)\n\t"
-        "sw $t1, 20($sp)\n\t"
-        "sw $t2, 24($sp)\n\t"
-        "sw $t3, 28($sp)\n\t"
-        "sw $t4, 32($sp)\n\t"
-        "sw $t5, 36($sp)\n\t"
-        "sw $t6, 40($sp)\n\t"
-        "sw $t7, 44($sp)\n\t"
-        // Spill gp register since it is caller save.
-        "sw $gp, 52($sp)\n\t"
-
-        "addiu $sp, $sp, -16\n\t"  // Reserve stack space, 16B aligned.
-        "sw %[referrer], 0($sp)\n\t"
-
-        // Push everything on the stack, so we don't rely on the order.
-        "addiu $sp, $sp, -24\n\t"
-        "sw %[arg0], 0($sp)\n\t"
-        "sw %[arg1], 4($sp)\n\t"
-        "sw %[arg2], 8($sp)\n\t"
-        "sw %[code], 12($sp)\n\t"
-        "sw %[self], 16($sp)\n\t"
-        "sw %[hidden], 20($sp)\n\t"
-
-        // Load call params into the right registers.
-        "lw $a0, 0($sp)\n\t"
-        "lw $a1, 4($sp)\n\t"
-        "lw $a2, 8($sp)\n\t"
-        "lw $t9, 12($sp)\n\t"
-        "lw $s1, 16($sp)\n\t"
-        "lw $t7, 20($sp)\n\t"
-        "addiu $sp, $sp, 24\n\t"
-
-        "jalr $t9\n\t"             // Call the stub.
-        "nop\n\t"
-        "addiu $sp, $sp, 16\n\t"   // Drop the quick "frame".
-
-        // Restore stuff not named clobbered.
-        "lw $a0, 0($sp)\n\t"
-        "lw $a1, 4($sp)\n\t"
-        "lw $a2, 8($sp)\n\t"
-        "lw $a3, 12($sp)\n\t"
-        "lw $t0, 16($sp)\n\t"
-        "lw $t1, 20($sp)\n\t"
-        "lw $t2, 24($sp)\n\t"
-        "lw $t3, 28($sp)\n\t"
-        "lw $t4, 32($sp)\n\t"
-        "lw $t5, 36($sp)\n\t"
-        "lw $t6, 40($sp)\n\t"
-        "lw $t7, 44($sp)\n\t"
-        // Restore gp.
-        "lw $gp, 52($sp)\n\t"
-        "addiu $sp, $sp, 64\n\t"   // Free stack space, now sp as on entry.
-
-        "move %[result], $v0\n\t"  // Store the call result.
-        : [result] "=r" (result)
-        : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
-          [referrer] "r"(referrer), [hidden] "r"(hidden)
-        : "at", "v0", "v1", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0", "k1",
-          "fp", "ra",
-          "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", "$f9", "$f10", "$f11",
-          "$f12", "$f13", "$f14", "$f15", "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22",
-          "$f23", "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31",
-          "memory");  // clobber.
-#elif defined(__mips__) && defined(__LP64__)
-    __asm__ __volatile__ (
-        // Spill a0-a7 which we say we don't clobber. May contain args.
-        "daddiu $sp, $sp, -64\n\t"
-        "sd $a0, 0($sp)\n\t"
-        "sd $a1, 8($sp)\n\t"
-        "sd $a2, 16($sp)\n\t"
-        "sd $a3, 24($sp)\n\t"
-        "sd $a4, 32($sp)\n\t"
-        "sd $a5, 40($sp)\n\t"
-        "sd $a6, 48($sp)\n\t"
-        "sd $a7, 56($sp)\n\t"
-
-        "daddiu $sp, $sp, -16\n\t"  // Reserve stack space, 16B aligned.
-        "sd %[referrer], 0($sp)\n\t"
-
-        // Push everything on the stack, so we don't rely on the order.
-        "daddiu $sp, $sp, -48\n\t"
-        "sd %[arg0], 0($sp)\n\t"
-        "sd %[arg1], 8($sp)\n\t"
-        "sd %[arg2], 16($sp)\n\t"
-        "sd %[code], 24($sp)\n\t"
-        "sd %[self], 32($sp)\n\t"
-        "sd %[hidden], 40($sp)\n\t"
-
-        // Load call params into the right registers.
-        "ld $a0, 0($sp)\n\t"
-        "ld $a1, 8($sp)\n\t"
-        "ld $a2, 16($sp)\n\t"
-        "ld $t9, 24($sp)\n\t"
-        "ld $s1, 32($sp)\n\t"
-        "ld $t0, 40($sp)\n\t"
-        "daddiu $sp, $sp, 48\n\t"
-
-        "jalr $t9\n\t"              // Call the stub.
-        "nop\n\t"
-        "daddiu $sp, $sp, 16\n\t"   // Drop the quick "frame".
-
-        // Restore stuff not named clobbered.
-        "ld $a0, 0($sp)\n\t"
-        "ld $a1, 8($sp)\n\t"
-        "ld $a2, 16($sp)\n\t"
-        "ld $a3, 24($sp)\n\t"
-        "ld $a4, 32($sp)\n\t"
-        "ld $a5, 40($sp)\n\t"
-        "ld $a6, 48($sp)\n\t"
-        "ld $a7, 56($sp)\n\t"
-        "daddiu $sp, $sp, 64\n\t"
-
-        "move %[result], $v0\n\t"   // Store the call result.
-        : [result] "=r" (result)
-        : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
-          [referrer] "r"(referrer), [hidden] "r"(hidden)
-        // Instead aliases t0-t3, register names $12-$15 has been used in the clobber list because
-        // t0-t3 are ambiguous.
-        : "at", "v0", "v1", "$12", "$13", "$14", "$15", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
-          "s7", "t8", "t9", "k0", "k1", "fp", "ra",
-          "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", "$f9", "$f10", "$f11",
-          "$f12", "$f13", "$f14", "$f15", "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22",
-          "$f23", "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31",
-          "memory");  // clobber.
 #elif defined(__x86_64__) && !defined(__APPLE__)
 #define PUSH(reg) "pushq " # reg "\n\t .cfi_adjust_cfa_offset 8\n\t"
 #define POP(reg) "popq " # reg "\n\t .cfi_adjust_cfa_offset -8\n\t"
@@ -546,7 +414,7 @@
 
 
 TEST_F(StubTest, Memcpy) {
-#if defined(__i386__) || (defined(__x86_64__) && !defined(__APPLE__)) || defined(__mips__)
+#if defined(__i386__) || (defined(__x86_64__) && !defined(__APPLE__))
   Thread* self = Thread::Current();
 
   uint32_t orig[20];
@@ -583,7 +451,7 @@
 }
 
 TEST_F(StubTest, LockObject) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   static constexpr size_t kThinLockLoops = 100;
 
@@ -657,7 +525,7 @@
 
 // NO_THREAD_SAFETY_ANALYSIS as we do not want to grab exclusive mutator lock for MonitorInfo.
 static void TestUnlockObject(StubTest* test) NO_THREAD_SAFETY_ANALYSIS {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   static constexpr size_t kThinLockLoops = 100;
 
@@ -808,13 +676,13 @@
   TestUnlockObject(this);
 }
 
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
 extern "C" void art_quick_check_instance_of(void);
 #endif
 
 TEST_F(StubTest, CheckCast) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   Thread* self = Thread::Current();
 
@@ -913,7 +781,7 @@
 }
 
 TEST_F(StubTest, AllocObject) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   // This will lead to OOM  error messages in the log.
   ScopedLogSeverity sls(LogSeverity::FATAL);
@@ -1030,7 +898,7 @@
 }
 
 TEST_F(StubTest, AllocObjectArray) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   // TODO: Check the "Unresolved" allocation stubs
 
@@ -1095,8 +963,7 @@
 TEST_F(StubTest, StringCompareTo) {
   TEST_DISABLED_FOR_STRING_COMPRESSION();
   // There is no StringCompareTo runtime entrypoint for __arm__ or __aarch64__.
-#if defined(__i386__) || defined(__mips__) || \
-    (defined(__x86_64__) && !defined(__APPLE__))
+#if defined(__i386__) || (defined(__x86_64__) && !defined(__APPLE__))
   // TODO: Check the "Unresolved" allocation stubs
 
   Thread* self = Thread::Current();
@@ -1178,7 +1045,7 @@
 static void GetSetBooleanStatic(ArtField* f, Thread* self,
                                 ArtMethod* referrer, StubTest* test)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   constexpr size_t num_values = 5;
   uint8_t values[num_values] = { 0, 1, 2, 128, 0xFF };
@@ -1209,7 +1076,7 @@
 static void GetSetByteStatic(ArtField* f, Thread* self, ArtMethod* referrer,
                              StubTest* test)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   int8_t values[] = { -128, -64, 0, 64, 127 };
 
@@ -1240,7 +1107,7 @@
 static void GetSetBooleanInstance(Handle<mirror::Object>* obj, ArtField* f, Thread* self,
                                   ArtMethod* referrer, StubTest* test)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   uint8_t values[] = { 0, true, 2, 128, 0xFF };
 
@@ -1275,7 +1142,7 @@
 static void GetSetByteInstance(Handle<mirror::Object>* obj, ArtField* f,
                              Thread* self, ArtMethod* referrer, StubTest* test)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   int8_t values[] = { -128, -64, 0, 64, 127 };
 
@@ -1310,7 +1177,7 @@
 static void GetSetCharStatic(ArtField* f, Thread* self, ArtMethod* referrer,
                              StubTest* test)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
 
@@ -1340,7 +1207,7 @@
 static void GetSetShortStatic(ArtField* f, Thread* self,
                               ArtMethod* referrer, StubTest* test)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
 
@@ -1371,7 +1238,7 @@
 static void GetSetCharInstance(Handle<mirror::Object>* obj, ArtField* f,
                                Thread* self, ArtMethod* referrer, StubTest* test)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
 
@@ -1405,7 +1272,7 @@
 static void GetSetShortInstance(Handle<mirror::Object>* obj, ArtField* f,
                              Thread* self, ArtMethod* referrer, StubTest* test)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
 
@@ -1440,7 +1307,7 @@
 static void GetSet32Static(ArtField* f, Thread* self, ArtMethod* referrer,
                            StubTest* test)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
 
@@ -1458,11 +1325,7 @@
                                            self,
                                            referrer);
 
-#if defined(__mips__) && defined(__LP64__)
-    EXPECT_EQ(static_cast<uint32_t>(res), values[i]) << "Iteration " << i;
-#else
     EXPECT_EQ(res, values[i]) << "Iteration " << i;
-#endif
   }
 #else
   UNUSED(f, self, referrer, test);
@@ -1476,7 +1339,7 @@
 static void GetSet32Instance(Handle<mirror::Object>* obj, ArtField* f,
                              Thread* self, ArtMethod* referrer, StubTest* test)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
 
@@ -1511,7 +1374,7 @@
 }
 
 
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
 
 static void set_and_check_static(uint32_t f_idx,
@@ -1543,7 +1406,7 @@
 static void GetSetObjStatic(ArtField* f, Thread* self, ArtMethod* referrer,
                             StubTest* test)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   set_and_check_static(f->GetDexFieldIndex(), nullptr, self, referrer, test);
 
@@ -1561,7 +1424,7 @@
 }
 
 
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
 static void set_and_check_instance(ArtField* f,
                                    ObjPtr<mirror::Object> trg,
@@ -1596,7 +1459,7 @@
 static void GetSetObjInstance(Handle<mirror::Object>* obj, ArtField* f,
                               Thread* self, ArtMethod* referrer, StubTest* test)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
 
@@ -1619,8 +1482,7 @@
 static void GetSet64Static(ArtField* f, Thread* self, ArtMethod* referrer,
                            StubTest* test)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-#if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) \
-    || defined(__aarch64__)
+#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
   uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
 
   for (size_t i = 0; i < arraysize(values); ++i) {
@@ -1652,8 +1514,7 @@
 static void GetSet64Instance(Handle<mirror::Object>* obj, ArtField* f,
                              Thread* self, ArtMethod* referrer, StubTest* test)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-#if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \
-    defined(__aarch64__)
+#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
   uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
 
   for (size_t i = 0; i < arraysize(values); ++i) {
@@ -1842,7 +1703,7 @@
 // and gets a bogus OatQuickMethodHeader* pointing into our assembly code just before
 // the bridge and uses that to check for inlined frames, crashing in the process.
 TEST_F(StubTest, DISABLED_IMT) {
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
     (defined(__x86_64__) && !defined(__APPLE__))
   Thread* self = Thread::Current();
 
@@ -1981,7 +1842,7 @@
 }
 
 TEST_F(StubTest, StringIndexOf) {
-#if defined(__arm__) || defined(__aarch64__) || defined(__mips__)
+#if defined(__arm__) || defined(__aarch64__)
   Thread* self = Thread::Current();
   ScopedObjectAccess soa(self);
   // garbage is created during ClassLinker::Init
@@ -2058,7 +1919,7 @@
 
 TEST_F(StubTest, ReadBarrier) {
 #if defined(ART_USE_READ_BARRIER) && (defined(__i386__) || defined(__arm__) || \
-      defined(__aarch64__) || defined(__mips__) || (defined(__x86_64__) && !defined(__APPLE__)))
+      defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)))
   Thread* self = Thread::Current();
 
   const uintptr_t readBarrierSlow = StubTest::GetEntrypoint(self, kQuickReadBarrierSlow);
@@ -2094,7 +1955,7 @@
 
 TEST_F(StubTest, ReadBarrierForRoot) {
 #if defined(ART_USE_READ_BARRIER) && (defined(__i386__) || defined(__arm__) || \
-      defined(__aarch64__) || defined(__mips__) || (defined(__x86_64__) && !defined(__APPLE__)))
+      defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__)))
   Thread* self = Thread::Current();
 
   const uintptr_t readBarrierForRootSlow =
diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S
index c9514f5..8938d8b 100644
--- a/runtime/arch/x86/asm_support_x86.S
+++ b/runtime/arch/x86/asm_support_x86.S
@@ -77,8 +77,12 @@
     #define CFI_DEF_CFA_REGISTER(reg) .cfi_def_cfa_register reg
     #define CFI_RESTORE(reg) .cfi_restore reg
     #define CFI_REL_OFFSET(reg,size) .cfi_rel_offset reg,size
-    #define CFI_RESTORE_STATE .cfi_restore_state
     #define CFI_REMEMBER_STATE .cfi_remember_state
+    // The spec is not clear whether the CFA is part of the saved state and tools
+    // differ in the behaviour, so explicitly set the CFA to avoid any ambiguity.
+    // The restored CFA state should match the CFA state during CFI_REMEMBER_STATE.
+    // `objdump -Wf libart.so | egrep "_cfa|_state"` is useful to audit the opcodes.
+    #define CFI_RESTORE_STATE_AND_DEF_CFA(reg,off) .cfi_restore_state .cfi_def_cfa reg,off
     #define CFI_ESCAPE(...) .cfi_escape __VA_ARGS__
 #else
     // Mac OS' doesn't like cfi_* directives.
@@ -89,8 +93,8 @@
     #define CFI_DEF_CFA_REGISTER(reg)
     #define CFI_RESTORE(reg)
     #define CFI_REL_OFFSET(reg,size)
-    #define CFI_RESTORE_STATE
     #define CFI_REMEMBER_STATE
+    #define CFI_RESTORE_STATE_AND_DEF_CFA(reg,off)
     #define CFI_ESCAPE(...)
 #endif
 
@@ -113,7 +117,11 @@
 
     /* Cache alignment for function entry */
 MACRO0(ALIGN_FUNCTION_ENTRY)
-    .balign 16
+    // ART-compiled functions have OatQuickMethodHeader but assembly funtions do not.
+    // Prefix the assembly code with 0xFFs, which means there is no method header.
+    .byte 0xFF, 0xFF, 0xFF, 0xFF
+    // Cache alignment for function entry.
+    .balign 16, 0xFF
 END_MACRO
 
 MACRO2(DEFINE_FUNCTION_CUSTOM_CFA, c_name, cfa_offset)
@@ -148,6 +156,18 @@
     CFI_RESTORE(REG_VAR(reg))
 END_MACRO
 
+// Arguments do not need .cfi_rel_offset as they are caller-saved and
+// therefore cannot hold caller's variables or unwinding data.
+MACRO1(PUSH_ARG, reg)
+    pushl REG_VAR(reg)
+    CFI_ADJUST_CFA_OFFSET(4)
+END_MACRO
+
+MACRO1(POP_ARG, reg)
+    popl REG_VAR(reg)
+    CFI_ADJUST_CFA_OFFSET(-4)
+END_MACRO
+
 MACRO1(CFI_RESTORE_REG, reg)
     CFI_RESTORE(REG_VAR(reg))
 END_MACRO
@@ -191,5 +211,64 @@
 #endif  // USE_HEAP_POISONING
 END_MACRO
 
+    /*
+     * Macro that sets up the callee save frame to conform with
+     * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs), except for pushing the method
+     */
+MACRO0(SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY)
+    PUSH edi      // Save callee saves
+    PUSH esi
+    PUSH ebp
+    PUSH_ARG ebx  // Save args.
+    PUSH_ARG edx
+    PUSH_ARG ecx
+    // Create space for FPR args.
+    subl MACRO_LITERAL(4 * 8), %esp
+    CFI_ADJUST_CFA_OFFSET(4 * 8)
+    // Save FPRs.
+    movsd %xmm0, 0(%esp)
+    movsd %xmm1, 8(%esp)
+    movsd %xmm2, 16(%esp)
+    movsd %xmm3, 24(%esp)
+
+    // Ugly compile-time check, but we only have the preprocessor.
+    // First +4: implicit return address pushed on stack when caller made call.
+    // Last +4: we're not pushing the method on the stack here.
+#if (FRAME_SIZE_SAVE_REFS_AND_ARGS != 4 + 6*4 + 4*8 + 4)
+#error "FRAME_SIZE_SAVE_REFS_AND_ARGS(X86) size not as expected."
+#endif
+END_MACRO
+
+MACRO0(RESTORE_SAVE_REFS_AND_ARGS_FRAME)
+    // Restore FPRs. EAX is still on the stack.
+    movsd 4(%esp), %xmm0
+    movsd 12(%esp), %xmm1
+    movsd 20(%esp), %xmm2
+    movsd 28(%esp), %xmm3
+
+    addl MACRO_LITERAL(36), %esp  // Remove FPRs and method pointer.
+    CFI_ADJUST_CFA_OFFSET(-36)
+
+    POP_ARG ecx                   // Restore args
+    POP_ARG edx
+    POP_ARG ebx
+    POP ebp                       // Restore callee saves
+    POP esi
+    POP edi
+END_MACRO
+
+    /*
+     * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
+     * exception is Thread::Current()->exception_ when the runtime method frame is ready.
+     */
+MACRO0(DELIVER_PENDING_EXCEPTION_FRAME_READY)
+    // Outgoing argument set up
+    subl MACRO_LITERAL(12), %esp               // alignment padding
+    CFI_ADJUST_CFA_OFFSET(12)
+    pushl %fs:THREAD_SELF_OFFSET               // pass Thread::Current()
+    CFI_ADJUST_CFA_OFFSET(4)
+    call SYMBOL(artDeliverPendingExceptionFromCode)  // artDeliverPendingExceptionFromCode(Thread*)
+    UNREACHABLE
+END_MACRO
 
 #endif  // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index 26312fb..3a08ec5 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -279,7 +279,9 @@
 
 void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
                                              ArtMethod** out_method,
-                                             uintptr_t* out_return_pc, uintptr_t* out_sp) {
+                                             uintptr_t* out_return_pc,
+                                             uintptr_t* out_sp,
+                                             bool* out_is_stack_overflow) {
   struct ucontext* uc = reinterpret_cast<struct ucontext*>(context);
   *out_sp = static_cast<uintptr_t>(uc->CTX_ESP);
   VLOG(signals) << "sp: " << std::hex << *out_sp;
@@ -298,9 +300,11 @@
 #endif
   if (overflow_addr == fault_addr) {
     *out_method = reinterpret_cast<ArtMethod*>(uc->CTX_METHOD);
+    *out_is_stack_overflow = true;
   } else {
     // The method is at the top of the stack.
     *out_method = *reinterpret_cast<ArtMethod**>(*out_sp);
+    *out_is_stack_overflow = false;
   }
 
   uint8_t* pc = reinterpret_cast<uint8_t*>(uc->CTX_EIP);
diff --git a/runtime/arch/x86/instruction_set_features_x86.h b/runtime/arch/x86/instruction_set_features_x86.h
index 34d908b..bf1b606 100644
--- a/runtime/arch/x86/instruction_set_features_x86.h
+++ b/runtime/arch/x86/instruction_set_features_x86.h
@@ -19,6 +19,26 @@
 
 #include "arch/instruction_set_features.h"
 
+#define GET_REX_R       0x04
+#define GET_REX_X       0x02
+#define GET_REX_B       0x01
+#define SET_VEX_R       0x80
+#define SET_VEX_X       0x40
+#define SET_VEX_B       0x20
+#define SET_VEX_M_0F    0x01
+#define SET_VEX_M_0F_38 0x02
+#define SET_VEX_M_0F_3A 0x03
+#define SET_VEX_W       0x80
+#define SET_VEX_L_128   0x00
+#define SET_VEL_L_256   0x04
+#define SET_VEX_PP_NONE 0x00
+#define SET_VEX_PP_66   0x01
+#define SET_VEX_PP_F3   0x02
+#define SET_VEX_PP_F2   0x03
+#define TWO_BYTE_VEX    0xC5
+#define THREE_BYTE_VEX  0xC4
+#define VEX_INIT        0x00
+
 namespace art {
 
 class X86InstructionSetFeatures;
@@ -69,6 +89,8 @@
 
   bool HasAVX2() const { return has_AVX2_; }
 
+  bool HasAVX() const { return has_AVX_; }
+
  protected:
   // Parse a string of the form "ssse3" adding these to a new InstructionSetFeatures.
   std::unique_ptr<const InstructionSetFeatures>
diff --git a/runtime/arch/x86/jni_entrypoints_x86.S b/runtime/arch/x86/jni_entrypoints_x86.S
index aca5a37..086e96f 100644
--- a/runtime/arch/x86/jni_entrypoints_x86.S
+++ b/runtime/arch/x86/jni_entrypoints_x86.S
@@ -24,7 +24,20 @@
     CFI_ADJUST_CFA_OFFSET(8)
     pushl %fs:THREAD_SELF_OFFSET  // pass Thread::Current()
     CFI_ADJUST_CFA_OFFSET(4)
+    // Call artFindNativeMethod() for normal native and artFindNativeMethodRunnable()
+    // for @FastNative or @CriticalNative.
+    movl (%esp), %eax                                // Thread* self
+    movl THREAD_TOP_QUICK_FRAME_OFFSET(%eax), %eax   // uintptr_t tagged_quick_frame
+    andl LITERAL(0xfffffffe), %eax                   // ArtMethod** sp
+    movl (%eax), %eax                                // ArtMethod* method
+    testl LITERAL(ACCESS_FLAGS_METHOD_IS_FAST_NATIVE | ACCESS_FLAGS_METHOD_IS_CRITICAL_NATIVE), \
+          ART_METHOD_ACCESS_FLAGS_OFFSET(%eax)
+    jne .Llookup_stub_fast_native
     call SYMBOL(artFindNativeMethod)  // (Thread*)
+    jmp .Llookup_stub_continue
+.Llookup_stub_fast_native:
+    call SYMBOL(artFindNativeMethodRunnable)  // (Thread*)
+.Llookup_stub_continue:
     addl LITERAL(12), %esp        // remove argument & padding
     CFI_ADJUST_CFA_OFFSET(-12)
     testl %eax, %eax              // check if returned method code is null
@@ -33,3 +46,178 @@
 .Lno_native_code_found:
     ret
 END_FUNCTION art_jni_dlsym_lookup_stub
+
+DEFINE_FUNCTION art_jni_dlsym_lookup_critical_stub
+    // The hidden arg holding the tagged method (bit 0 set means GenericJNI) is eax.
+    // For Generic JNI we already have a managed frame, so we reuse the art_jni_dlsym_lookup_stub.
+    testl LITERAL(1), %eax
+    jnz art_jni_dlsym_lookup_stub
+
+    // We need to create a GenericJNI managed frame above the stack args.
+
+    // GenericJNI frame is similar to SaveRegsAndArgs frame with the native method
+    // instead of runtime method saved at the bottom. Note that the runtime shall
+    // not examine the args here, otherwise we would have to reload them from stack
+    // to account for the difference between managed and native ABIs.
+    SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY
+    pushl %eax  // Save the hidden arg as method pointer at the bottom of the stack.
+    CFI_ADJUST_CFA_OFFSET(4)
+
+    // Call artCriticalNativeOutArgsSize(method); method is conveniently at the bottom of the stack.
+    call SYMBOL(artCriticalNativeOutArgsSize)
+
+    // Check if we have any stack args other than return PC.
+    cmp LITERAL(__SIZEOF_POINTER__), %eax
+    jnz .Lcritical_has_stack_args
+
+    // Without stack args, the frame is fully constructed.
+    // Place tagged managed sp in Thread::Current()->top_quick_frame.
+    leal 1(%esp), %eax  // Tag as GenericJNI frame.
+    mov %eax, %fs:THREAD_TOP_QUICK_FRAME_OFFSET
+
+    // Call artFindNativeMethodRunnable()
+    subl LITERAL(12), %esp         // align stack
+    CFI_ADJUST_CFA_OFFSET(12)
+    pushl %fs:THREAD_SELF_OFFSET  // pass Thread::Current()
+    CFI_ADJUST_CFA_OFFSET(4)
+    call SYMBOL(artFindNativeMethodRunnable)  // (Thread*)
+    addl LITERAL(16), %esp
+    CFI_ADJUST_CFA_OFFSET(-16)
+
+    // Check for exception.
+    test %eax, %eax
+    jz 1f
+
+    // Restore frame and do the tail call.
+    CFI_REMEMBER_STATE
+    RESTORE_SAVE_REFS_AND_ARGS_FRAME
+    jmp *%eax
+    CFI_RESTORE_STATE_AND_DEF_CFA(%esp, FRAME_SIZE_SAVE_REFS_AND_ARGS)
+
+1:
+    DELIVER_PENDING_EXCEPTION_FRAME_READY
+
+.Lcritical_has_stack_args:
+    // As mentioned above, the runtime shall not examine the args in the managed frame
+    // and since all args for the native call are on the stack, we can use the managed
+    // args registers as scratch registers. So, EBX, EDX and ECX are available and we
+    // do not need to restore xmm0-xmm3 either.
+
+    // Restore registers as we're about to move stack args over the current SaveRefsAndArgs frame.
+    movl (%esp), %edx   // Remember the method in EDX.
+    movl 48(%esp), %ebp
+    CFI_RESTORE(%ebp)
+    movl 52(%esp), %esi
+    CFI_RESTORE(%esi)
+    movl 56(%esp), %edi
+    CFI_RESTORE(%edi)
+
+    // Calculate the address of the end of the move destination and redefine CFI to take
+    // ownership of the JNI stub frame. EBX is conveniently callee-save in native ABI.
+    leal 0(%esp, %eax, 1), %ebx
+    CFI_DEF_CFA(%ebx, FRAME_SIZE_SAVE_REFS_AND_ARGS)
+
+    // Calculate the number of DWORDs to move.
+    shrl LITERAL(2), %eax
+    leal -1(%eax), %ecx  // Do not move the return PC.
+
+    // Load our return PC to EAX.
+    movl FRAME_SIZE_SAVE_REFS_AND_ARGS - __SIZEOF_POINTER__(%esp), %eax
+
+    // Save EDI, ESI so that we can use them for moving stack args.
+    pushl %edi  // No `CFI_ADJUST_CFA_OFFSET`, CFA register is currently EBX, not ESP.
+    pushl %esi  // ditto
+
+    // Mov the stack args.
+    leal 2 * __SIZEOF_POINTER__(%esp), %edi
+    leal FRAME_SIZE_SAVE_REFS_AND_ARGS(%edi), %esi
+    rep movsd
+
+    // Save our return PC.
+    movl %eax, (%edi)
+
+    // Restore EDI, ESI.
+    popl %esi   // No `CFI_ADJUST_CFA_OFFSET`, CFA register is currently EBX, not ESP.
+    popl %edi   // ditto
+
+    // Re-create the SaveRefsAndArgs frame above the args.
+    movl %edi, 56(%ebx)
+    CFI_REL_OFFSET(%edi, 56)
+    movl %esi, 52(%ebx)
+    CFI_REL_OFFSET(%esi, 52)
+    movl %ebp, 48(%ebx)
+    CFI_REL_OFFSET(%ebp, 48)
+    // Skip managed ABI args EBX, EDX, ECX and FPRs, see above.
+    // (We have already clobbered EBX, EDX, ECX anyway).
+    movl %edx, (%ebx)    // Save method pointer.
+
+    // Place tagged managed sp in Thread::Current()->top_quick_frame.
+    leal 1(%ebx), %eax  // Tag as GenericJNI frame.
+    movl %eax, %fs:THREAD_TOP_QUICK_FRAME_OFFSET
+
+    // Call artFindNativeMethodRunnable()
+    subl LITERAL(12), %esp        // align stack, no `CFI_ADJUST_CFA_OFFSET`.
+    pushl %fs:THREAD_SELF_OFFSET  // pass Thread::Current()
+    call SYMBOL(artFindNativeMethodRunnable)  // (Thread*)
+    addl LITERAL(16), %esp        // Pop args, no `CFI_ADJUST_CFA_OFFSET`.
+
+    // Check for exception.
+    test %eax, %eax
+    jz 2f
+
+    // Restore the frame. We shall not need the method anymore.
+    CFI_REMEMBER_STATE
+    movl 48(%ebx), %ebp
+    CFI_RESTORE(%ebp)
+    movl 52(%ebx), %esi
+    CFI_RESTORE(%esi)
+    movl 56(%ebx), %edi
+    CFI_RESTORE(%edi)
+
+    // Remember our return PC in EDX.
+    movl -__SIZEOF_POINTER__(%ebx), %edx
+
+    // Calculate the number of DWORDs to move.
+    leal -__SIZEOF_POINTER__(%ebx), %ecx  // Do not move return PC.
+    subl %esp, %ecx
+    shrl LITERAL(2), %ecx
+
+    // Save EDI, ESI so that we can use them for moving stack args.
+    pushl %edi  // No `CFI_ADJUST_CFA_OFFSET`, CFA register is currently EBX, not ESP.
+    pushl %esi  // ditto
+
+    // Mov stack args to their original place.
+    leal -2 * __SIZEOF_POINTER__(%ebx), %esi
+    leal FRAME_SIZE_SAVE_REFS_AND_ARGS - 2 * __SIZEOF_POINTER__(%ebx), %edi
+    std
+    rep movsd
+    cld
+
+    // Store our return PC.
+    movl %edx, (%edi)
+
+    // Restore EDI, ESI.
+    popl %esi   // No `CFI_ADJUST_CFA_OFFSET`, CFA register is currently EBX, not ESP.
+    popl %edi   // ditto
+
+    // Redefine CFI to release ownership of the JNI stub frame.
+    CFI_DEF_CFA(%esp, FRAME_SIZE_SAVE_REFS_AND_ARGS)
+
+    // Remove the frame reservation.
+    addl LITERAL(FRAME_SIZE_SAVE_REFS_AND_ARGS - __SIZEOF_POINTER__), %esp
+    CFI_ADJUST_CFA_OFFSET(-FRAME_SIZE_SAVE_REFS_AND_ARGS - __SIZEOF_POINTER__)
+
+    // Do the tail call.
+    jmp *%eax
+    CFI_RESTORE_STATE_AND_DEF_CFA(%ebx, FRAME_SIZE_SAVE_REFS_AND_ARGS)
+
+2:
+    // Replicate DELIVER_PENDING_EXCEPTION_FRAME_READY without CFI_ADJUST_CFA_OFFSET,
+    // CFA register is currently EBX, not ESP.
+
+    // Outgoing argument set up
+    subl MACRO_LITERAL(12), %esp               // alignment padding
+    pushl %fs:THREAD_SELF_OFFSET               // pass Thread::Current()
+    call SYMBOL(artDeliverPendingExceptionFromCode)  // artDeliverPendingExceptionFromCode(Thread*)
+    UNREACHABLE
+END_FUNCTION art_jni_dlsym_lookup_critical_stub
diff --git a/runtime/arch/x86/jni_frame_x86.h b/runtime/arch/x86/jni_frame_x86.h
new file mode 100644
index 0000000..e710179
--- /dev/null
+++ b/runtime/arch/x86/jni_frame_x86.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_X86_JNI_FRAME_X86_H_
+#define ART_RUNTIME_ARCH_X86_JNI_FRAME_X86_H_
+
+#include <string.h>
+
+#include "arch/instruction_set.h"
+#include "base/bit_utils.h"
+#include "base/globals.h"
+#include "base/logging.h"
+
+namespace art {
+namespace x86 {
+
+constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k32);
+static_assert(kX86PointerSize == PointerSize::k32, "Unexpected x86 pointer size");
+
+static constexpr size_t kNativeStackAlignment = 16;  // IA-32 cdecl requires 16 byte alignment.
+static_assert(kNativeStackAlignment == kStackAlignment);
+
+// Get the size of "out args" for @CriticalNative method stub.
+// This must match the size of the frame emitted by the JNI compiler at the native call site.
+inline size_t GetCriticalNativeOutArgsSize(const char* shorty, uint32_t shorty_len) {
+  DCHECK_EQ(shorty_len, strlen(shorty));
+
+  size_t num_long_or_double_args = 0u;
+  for (size_t i = 1; i != shorty_len; ++i) {
+    if (shorty[i] == 'J' || shorty[i] == 'D') {
+      num_long_or_double_args += 1u;
+    }
+  }
+  size_t num_arg_words = shorty_len - 1u + num_long_or_double_args;
+
+  // The size of outgoing arguments.
+  size_t size = num_arg_words * static_cast<size_t>(kX86PointerSize);
+
+  // Add return address size.
+  size += kFramePointerSize;
+  // We can make a tail call if there are no stack args and the return type is not
+  // FP type (needs moving from ST0 to MMX0) and we do not need to extend the result.
+  bool return_type_ok = shorty[0] == 'I' || shorty[0] == 'J' || shorty[0] == 'V';
+  if (return_type_ok && size == kFramePointerSize) {
+    return kFramePointerSize;
+  }
+
+  return RoundUp(size, kNativeStackAlignment);
+}
+
+}  // namespace x86
+}  // namespace art
+
+#endif  // ART_RUNTIME_ARCH_X86_JNI_FRAME_X86_H_
+
diff --git a/runtime/arch/x86/memcmp16_x86.S b/runtime/arch/x86/memcmp16_x86.S
index a315a37..bd33a62 100644
--- a/runtime/arch/x86/memcmp16_x86.S
+++ b/runtime/arch/x86/memcmp16_x86.S
@@ -40,7 +40,7 @@
 #define BLK2        BLK1+4
 #define LEN        BLK2+4
 #define RETURN_END    POP (%edi); POP (%esi); POP (%ebx); ret
-#define RETURN        RETURN_END; CFI_RESTORE_STATE; CFI_REMEMBER_STATE
+#define RETURN        RETURN_END; CFI_RESTORE_STATE_AND_DEF_CFA(esp, 16); CFI_REMEMBER_STATE
 
 DEFINE_FUNCTION MEMCMP
     movl       LEN(%esp), %ecx
@@ -131,7 +131,7 @@
     POP        (%esi)
     jmp        L(less48bytes)
 
-    CFI_RESTORE_STATE
+    CFI_RESTORE_STATE_AND_DEF_CFA(esp, 16)
     CFI_REMEMBER_STATE
     .p2align 4
 L(shr_0_gobble):
@@ -177,7 +177,7 @@
     POP        (%esi)
     jmp        L(less48bytes)
 
-    CFI_RESTORE_STATE
+    CFI_RESTORE_STATE_AND_DEF_CFA(esp, 16)
     CFI_REMEMBER_STATE
     .p2align 4
 L(shr_2):
@@ -207,7 +207,7 @@
     POP        (%esi)
     jmp        L(less48bytes)
 
-    CFI_RESTORE_STATE
+    CFI_RESTORE_STATE_AND_DEF_CFA(esp, 16)
     CFI_REMEMBER_STATE
     .p2align 4
 L(shr_2_gobble):
@@ -260,7 +260,7 @@
     POP        (%esi)
     jmp        L(less48bytes)
 
-    CFI_RESTORE_STATE
+    CFI_RESTORE_STATE_AND_DEF_CFA(esp, 16)
     CFI_REMEMBER_STATE
     .p2align 4
 L(shr_4):
@@ -290,7 +290,7 @@
     POP        (%esi)
     jmp        L(less48bytes)
 
-    CFI_RESTORE_STATE
+    CFI_RESTORE_STATE_AND_DEF_CFA(esp, 16)
     CFI_REMEMBER_STATE
     .p2align 4
 L(shr_4_gobble):
@@ -343,7 +343,7 @@
     POP        (%esi)
     jmp        L(less48bytes)
 
-    CFI_RESTORE_STATE
+    CFI_RESTORE_STATE_AND_DEF_CFA(esp, 16)
     CFI_REMEMBER_STATE
     .p2align 4
 L(shr_6):
@@ -373,7 +373,7 @@
     POP        (%esi)
     jmp        L(less48bytes)
 
-    CFI_RESTORE_STATE
+    CFI_RESTORE_STATE_AND_DEF_CFA(esp, 16)
     CFI_REMEMBER_STATE
     .p2align 4
 L(shr_6_gobble):
@@ -426,7 +426,7 @@
     POP        (%esi)
     jmp        L(less48bytes)
 
-    CFI_RESTORE_STATE
+    CFI_RESTORE_STATE_AND_DEF_CFA(esp, 16)
     CFI_REMEMBER_STATE
     .p2align 4
 L(shr_8):
@@ -456,7 +456,7 @@
     POP        (%esi)
     jmp        L(less48bytes)
 
-    CFI_RESTORE_STATE
+    CFI_RESTORE_STATE_AND_DEF_CFA(esp, 16)
     CFI_REMEMBER_STATE
     .p2align 4
 L(shr_8_gobble):
@@ -509,7 +509,7 @@
     POP        (%esi)
     jmp        L(less48bytes)
 
-    CFI_RESTORE_STATE
+    CFI_RESTORE_STATE_AND_DEF_CFA(esp, 16)
     CFI_REMEMBER_STATE
     .p2align 4
 L(shr_10):
@@ -539,7 +539,7 @@
     POP        (%esi)
     jmp        L(less48bytes)
 
-    CFI_RESTORE_STATE
+    CFI_RESTORE_STATE_AND_DEF_CFA(esp, 16)
     CFI_REMEMBER_STATE
     .p2align 4
 L(shr_10_gobble):
@@ -592,7 +592,7 @@
     POP        (%esi)
     jmp        L(less48bytes)
 
-    CFI_RESTORE_STATE
+    CFI_RESTORE_STATE_AND_DEF_CFA(esp, 16)
     CFI_REMEMBER_STATE
     .p2align 4
 L(shr_12):
@@ -622,7 +622,7 @@
     POP        (%esi)
     jmp        L(less48bytes)
 
-    CFI_RESTORE_STATE
+    CFI_RESTORE_STATE_AND_DEF_CFA(esp, 16)
     CFI_REMEMBER_STATE
     .p2align 4
 L(shr_12_gobble):
@@ -675,7 +675,7 @@
     POP        (%esi)
     jmp        L(less48bytes)
 
-    CFI_RESTORE_STATE
+    CFI_RESTORE_STATE_AND_DEF_CFA(esp, 16)
     CFI_REMEMBER_STATE
     .p2align 4
 L(shr_14):
@@ -705,7 +705,7 @@
     POP        (%esi)
     jmp        L(less48bytes)
 
-    CFI_RESTORE_STATE
+    CFI_RESTORE_STATE_AND_DEF_CFA(esp, 16)
     CFI_REMEMBER_STATE
     .p2align 4
 L(shr_14_gobble):
@@ -758,7 +758,7 @@
     POP        (%esi)
     jmp        L(less48bytes)
 
-    CFI_RESTORE_STATE
+    CFI_RESTORE_STATE_AND_DEF_CFA(esp, 16)
     CFI_REMEMBER_STATE
     .p2align 4
 L(exit):
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 306c4eb..4abdf70 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -120,20 +120,7 @@
      * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs)
      */
 MACRO2(SETUP_SAVE_REFS_AND_ARGS_FRAME, got_reg, temp_reg)
-    PUSH edi  // Save callee saves
-    PUSH esi
-    PUSH ebp
-    PUSH ebx  // Save args
-    PUSH edx
-    PUSH ecx
-    // Create space for FPR args.
-    subl MACRO_LITERAL(4 * 8), %esp
-    CFI_ADJUST_CFA_OFFSET(4 * 8)
-    // Save FPRs.
-    movsd %xmm0, 0(%esp)
-    movsd %xmm1, 8(%esp)
-    movsd %xmm2, 16(%esp)
-    movsd %xmm3, 24(%esp)
+    SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY
 
     SETUP_GOT_NOSAVE RAW_VAR(got_reg)
     // Load Runtime::instance_ from GOT.
@@ -144,12 +131,6 @@
     CFI_ADJUST_CFA_OFFSET(4)
     // Store esp as the stop quick frame.
     movl %esp, %fs:THREAD_TOP_QUICK_FRAME_OFFSET
-
-    // Ugly compile-time check, but we only have the preprocessor.
-    // Last +4: implicit return address pushed on stack when caller made call.
-#if (FRAME_SIZE_SAVE_REFS_AND_ARGS != 7*4 + 4*8 + 4)
-#error "FRAME_SIZE_SAVE_REFS_AND_ARGS(X86) size not as expected."
-#endif
 END_MACRO
 
     /*
@@ -157,47 +138,14 @@
      * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs) where the method is passed in EAX.
      */
 MACRO0(SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_EAX)
-    // Save callee and GPR args, mixed together to agree with core spills bitmap.
-    PUSH edi  // Save callee saves
-    PUSH esi
-    PUSH ebp
-    PUSH ebx  // Save args
-    PUSH edx
-    PUSH ecx
+    SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY
 
-    // Create space for FPR args.
-    subl MACRO_LITERAL(32), %esp
-    CFI_ADJUST_CFA_OFFSET(32)
-
-    // Save FPRs.
-    movsd %xmm0, 0(%esp)
-    movsd %xmm1, 8(%esp)
-    movsd %xmm2, 16(%esp)
-    movsd %xmm3, 24(%esp)
-
-    PUSH eax  // Store the ArtMethod reference at the bottom of the stack.
+    pushl %eax  // Store the ArtMethod reference at the bottom of the stack.
+    CFI_ADJUST_CFA_OFFSET(4)
     // Store esp as the stop quick frame.
     movl %esp, %fs:THREAD_TOP_QUICK_FRAME_OFFSET
 END_MACRO
 
-MACRO0(RESTORE_SAVE_REFS_AND_ARGS_FRAME)
-    // Restore FPRs. EAX is still on the stack.
-    movsd 4(%esp), %xmm0
-    movsd 12(%esp), %xmm1
-    movsd 20(%esp), %xmm2
-    movsd 28(%esp), %xmm3
-
-    addl MACRO_LITERAL(36), %esp  // Remove FPRs and EAX.
-    CFI_ADJUST_CFA_OFFSET(-36)
-
-    POP ecx                       // Restore args except eax
-    POP edx
-    POP ebx
-    POP ebp                       // Restore callee saves
-    POP esi
-    POP edi
-END_MACRO
-
 // Restore register and jump to routine
 // Inputs:  EDI contains pointer to code.
 // Notes: Need to pop EAX too (restores Method*)
@@ -331,20 +279,6 @@
 
     /*
      * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
-     * exception is Thread::Current()->exception_ when the runtime method frame is ready.
-     */
-MACRO0(DELIVER_PENDING_EXCEPTION_FRAME_READY)
-    // Outgoing argument set up
-    subl MACRO_LITERAL(12), %esp               // alignment padding
-    CFI_ADJUST_CFA_OFFSET(12)
-    pushl %fs:THREAD_SELF_OFFSET               // pass Thread::Current()
-    CFI_ADJUST_CFA_OFFSET(4)
-    call SYMBOL(artDeliverPendingExceptionFromCode)  // artDeliverPendingExceptionFromCode(Thread*)
-    UNREACHABLE
-END_MACRO
-
-    /*
-     * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
      * exception is Thread::Current()->exception_.
      */
 MACRO0(DELIVER_PENDING_EXCEPTION)
@@ -944,8 +878,7 @@
     CFI_REMEMBER_STATE
     RESTORE_SAVE_EVERYTHING_FRAME_KEEP_EAX            // restore frame up to return address
     ret                                               // return
-    CFI_RESTORE_STATE
-    CFI_DEF_CFA(esp, FRAME_SIZE_SAVE_EVERYTHING)      // workaround for clang bug: 31975598
+    CFI_RESTORE_STATE_AND_DEF_CFA(esp, FRAME_SIZE_SAVE_EVERYTHING)
 1:
     DELIVER_PENDING_EXCEPTION_FRAME_READY
     END_FUNCTION VAR(c_name)
@@ -1836,7 +1769,7 @@
     POP ESI
     POP EDI
     jmp *ART_METHOD_QUICK_CODE_OFFSET_32(%eax)
-    CFI_RESTORE_STATE
+    CFI_RESTORE_STATE_AND_DEF_CFA(esp, 16)
 .Limt_table_next_entry:
     // If the entry is null, the interface method is not in the ImtConflictTable.
     cmpl LITERAL(0), 0(%edi)
@@ -1852,7 +1785,7 @@
     POP ESI
     POP EDI
     INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
-    CFI_RESTORE_STATE
+    CFI_RESTORE_STATE_AND_DEF_CFA(esp, 16)
 .Limt_conflict_trampoline_dex_cache_miss:
     // We're not creating a proper runtime method frame here,
     // artLookupResolvedMethod() is not allowed to walk the stack.
@@ -1920,30 +1853,38 @@
     CFI_DEF_CFA_REGISTER(ebp)
     subl LITERAL(5120), %esp
     // prepare for artQuickGenericJniTrampoline call
-    // (Thread*,  SP)
-    //  (esp)    4(esp)   <= C calling convention
-    //  fs:...    ebp     <= where they are
+    // (Thread*, managed_sp, reserved_area)
+    //   (esp)    4(esp)        8(esp)  <= C calling convention
+    //  fs:...      ebp           esp   <= where they are
 
-    subl LITERAL(8), %esp         // Padding for 16B alignment.
-    pushl %ebp                    // Pass SP (to ArtMethod).
+    movl %esp, %eax
+    subl LITERAL(4), %esp         // Padding for 16B alignment.
+    pushl %eax                    // Pass reserved area.
+    pushl %ebp                    // Pass managed frame SP.
     pushl %fs:THREAD_SELF_OFFSET  // Pass Thread::Current().
     call SYMBOL(artQuickGenericJniTrampoline)  // (Thread*, sp)
 
     // The C call will have registered the complete save-frame on success.
     // The result of the call is:
-    // eax: pointer to native code, 0 on error.
-    // edx: pointer to the bottom of the used area of the alloca, can restore stack till there.
+    //     eax: pointer to native code, 0 on error.
+    //     The bottom of the reserved area contains values for arg registers,
+    //     hidden arg register and SP for out args for the call.
 
-    // Check for error = 0.
+    // Check for error (class init check or locking for synchronized native method can throw).
     test %eax, %eax
     jz .Lexception_in_native
 
-    // Release part of the alloca.
-    movl %edx, %esp
+    // On x86 there are no registers passed, so no native call args to pop here.
 
-    // On x86 there are no registers passed, so nothing to pop here.
+    // Save code pointer in EDX.
+    movl %eax, %edx
+    // Load hidden arg (EAX) for @CriticalNative.
+    movl 16(%esp), %eax
+    // Load SP for out args, releasing unneeded reserved area.
+    movl 20(%esp), %esp
+
     // Native call.
-    call *%eax
+    call *%edx
 
     // result sign extension is handled in C code
     // prepare for artQuickGenericJniEndTrampoline call
@@ -2246,6 +2187,25 @@
     ret
 END_FUNCTION art_quick_string_compareto
 
+DEFINE_FUNCTION art_quick_string_builder_append
+    SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx       // save ref containing registers for GC
+    // Outgoing argument set up
+    leal FRAME_SIZE_SAVE_REFS_ONLY + __SIZEOF_POINTER__(%esp), %edi  // prepare args
+    push %eax                                 // push padding
+    CFI_ADJUST_CFA_OFFSET(4)
+    pushl %fs:THREAD_SELF_OFFSET              // pass Thread::Current()
+    CFI_ADJUST_CFA_OFFSET(4)
+    push %edi                                 // pass args
+    CFI_ADJUST_CFA_OFFSET(4)
+    push %eax                                 // pass format
+    CFI_ADJUST_CFA_OFFSET(4)
+    call SYMBOL(artStringBuilderAppend)       // (uint32_t, const unit32_t*, Thread*)
+    addl MACRO_LITERAL(16), %esp              // pop arguments
+    CFI_ADJUST_CFA_OFFSET(-16)
+    RESTORE_SAVE_REFS_ONLY_FRAME              // restore frame up to return address
+    RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER   // return or deliver exception
+END_FUNCTION art_quick_string_builder_append
+
 // Create a function `name` calling the ReadBarrier::Mark routine,
 // getting its argument and returning its result through register
 // `reg`, saving and restoring all caller-save registers.
@@ -2411,23 +2371,11 @@
     POP ebx
     POP ebp
     mov 16(%esp), %ecx            // Get JValue result
-    mov %eax, (%ecx)              // Store the result assuming it is a long, int or Object*
-    mov %edx, 4(%ecx)             // Store the other half of the result
-    mov 20(%esp), %edx            // Get the shorty
-    cmpb LITERAL(68), (%edx)      // Test if result type char == 'D'
-    je .Losr_return_double_quick
-    cmpb LITERAL(70), (%edx)      // Test if result type char == 'F'
-    je .Losr_return_float_quick
-    ret
-.Losr_return_double_quick:
-    movsd %xmm0, (%ecx)           // Store the floating point result
-    ret
-.Losr_return_float_quick:
-    movss %xmm0, (%ecx)           // Store the floating point result
+    mov %eax, (%ecx)              // Store the result.
+    mov %edx, 4(%ecx)             // Store the other half of the result.
     ret
 .Losr_entry:
-    CFI_RESTORE_STATE
-    CFI_DEF_CFA(ebp, SAVE_SIZE)   // CFA = ebp + SAVE_SIZE
+    CFI_RESTORE_STATE_AND_DEF_CFA(ebp, SAVE_SIZE)  // CFA = ebp + SAVE_SIZE
     subl LITERAL(4), %ecx         // Given stack size contains pushed frame pointer, substract it.
     subl %ecx, %esp
     mov %esp, %edi                // EDI = beginning of stack
@@ -2502,5 +2450,79 @@
     ret
 END_FUNCTION ExecuteSwitchImplAsm
 
+// On entry: eax is the class, ebp is the inline cache.
+DEFINE_FUNCTION art_quick_update_inline_cache
+#if (INLINE_CACHE_SIZE != 5)
+#error "INLINE_CACHE_SIZE not as expected."
+#endif
+    // Don't update the cache if we are marking.
+    cmpl LITERAL(0), %fs:THREAD_IS_GC_MARKING_OFFSET
+    jnz .Lret
+    PUSH ecx
+    movl %eax, %ecx // eax will be used for cmpxchg
+.Lentry1:
+    movl INLINE_CACHE_CLASSES_OFFSET(%ebp), %eax
+    cmpl %ecx, %eax
+    je .Ldone
+    cmpl LITERAL(0), %eax
+    jne .Lentry2
+    lock cmpxchg %ecx, INLINE_CACHE_CLASSES_OFFSET(%ebp)
+    jz .Ldone
+    jmp .Lentry1
+.Lentry2:
+    movl (INLINE_CACHE_CLASSES_OFFSET+4)(%ebp), %eax
+    cmpl %ecx, %eax
+    je .Ldone
+    cmpl LITERAL(0), %eax
+    jne .Lentry3
+    lock cmpxchg %ecx, (INLINE_CACHE_CLASSES_OFFSET+4)(%ebp)
+    jz .Ldone
+    jmp .Lentry2
+.Lentry3:
+    movl (INLINE_CACHE_CLASSES_OFFSET+8)(%ebp), %eax
+    cmpl %ecx, %eax
+    je .Ldone
+    cmpl LITERAL(0), %eax
+    jne .Lentry4
+    lock cmpxchg %ecx, (INLINE_CACHE_CLASSES_OFFSET+8)(%ebp)
+    jz .Ldone
+    jmp .Lentry3
+.Lentry4:
+    movl (INLINE_CACHE_CLASSES_OFFSET+12)(%ebp), %eax
+    cmpl %ecx, %eax
+    je .Ldone
+    cmpl LITERAL(0), %eax
+    jne .Lentry5
+    lock cmpxchg %ecx, (INLINE_CACHE_CLASSES_OFFSET+12)(%ebp)
+    jz .Ldone
+    jmp .Lentry4
+.Lentry5:
+    // Unconditionally store, the cache is megamorphic.
+    movl %ecx, (INLINE_CACHE_CLASSES_OFFSET+16)(%ebp)
+.Ldone:
+    // Restore registers
+    movl %ecx, %eax
+    POP ecx
+.Lret:
+    ret
+END_FUNCTION art_quick_update_inline_cache
+
     // TODO: implement these!
 UNIMPLEMENTED art_quick_memcmp16
+
+// On entry, the method is at the bottom of the stack.
+DEFINE_FUNCTION art_quick_compile_optimized
+    SETUP_SAVE_EVERYTHING_FRAME ebx, ebx
+    mov FRAME_SIZE_SAVE_EVERYTHING(%esp), %eax // Fetch ArtMethod
+    sub LITERAL(8), %esp   		       // Alignment padding
+    CFI_ADJUST_CFA_OFFSET(8)
+    pushl %fs:THREAD_SELF_OFFSET               // pass Thread::Current()
+    CFI_ADJUST_CFA_OFFSET(4)
+    pushl %eax
+    CFI_ADJUST_CFA_OFFSET(4)
+    call SYMBOL(artCompileOptimized)           // (ArtMethod*, Thread*)
+    addl LITERAL(16), %esp                     // Pop arguments.
+    CFI_ADJUST_CFA_OFFSET(-16)
+    RESTORE_SAVE_EVERYTHING_FRAME
+    ret
+END_FUNCTION art_quick_compile_optimized
diff --git a/runtime/arch/x86_64/asm_support_x86_64.S b/runtime/arch/x86_64/asm_support_x86_64.S
index 28018c5..6a60a98 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.S
+++ b/runtime/arch/x86_64/asm_support_x86_64.S
@@ -76,8 +76,13 @@
     #define CFI_DEF_CFA_REGISTER(reg) .cfi_def_cfa_register reg
     #define CFI_RESTORE(reg) .cfi_restore reg
     #define CFI_REL_OFFSET(reg,size) .cfi_rel_offset reg,size
-    #define CFI_RESTORE_STATE .cfi_restore_state
     #define CFI_REMEMBER_STATE .cfi_remember_state
+    // The spec is not clear whether the CFA is part of the saved state and tools
+    // differ in the behaviour, so explicitly set the CFA to avoid any ambiguity.
+    // The restored CFA state should match the CFA state during CFI_REMEMBER_STATE.
+    // `objdump -Wf libart.so | egrep "_cfa|_state"` is useful to audit the opcodes.
+    #define CFI_RESTORE_STATE_AND_DEF_CFA(reg,off) .cfi_restore_state .cfi_def_cfa reg,off
+    #define CFI_RESTORE_STATE .cfi_restore_state
 #else
     // Mac OS' doesn't like cfi_* directives.
     #define CFI_STARTPROC
@@ -87,8 +92,9 @@
     #define CFI_DEF_CFA_REGISTER(reg)
     #define CFI_RESTORE(reg)
     #define CFI_REL_OFFSET(reg,size)
-    #define CFI_RESTORE_STATE
     #define CFI_REMEMBER_STATE
+    #define CFI_RESTORE_STATE_AND_DEF_CFA(off)
+    #define CFI_RESTORE_STATE
 #endif
 
     // Symbols.
@@ -109,7 +115,11 @@
 
     /* Cache alignment for function entry */
 MACRO0(ALIGN_FUNCTION_ENTRY)
-    .balign 16
+    // ART-compiled functions have OatQuickMethodHeader but assembly funtions do not.
+    // Prefix the assembly code with 0xFFs, which means there is no method header.
+    .byte 0xFF, 0xFF, 0xFF, 0xFF
+    // Cache alignment for function entry.
+    .balign 16, 0xFF
 END_MACRO
 
 // TODO: we might need to use SYMBOL() here to add the underscore prefix
@@ -146,6 +156,28 @@
     CFI_RESTORE(REG_VAR(reg))
 END_MACRO
 
+// Arguments do not need .cfi_rel_offset as they are caller-saved and
+// therefore cannot hold caller's variables or unwinding data.
+MACRO1(PUSH_ARG, reg)
+    pushq REG_VAR(reg)
+    CFI_ADJUST_CFA_OFFSET(8)
+END_MACRO
+
+MACRO1(POP_ARG, reg)
+    popq REG_VAR(reg)
+    CFI_ADJUST_CFA_OFFSET(-8)
+END_MACRO
+
+MACRO3(SAVE_REG_BASE, base, reg, offset)
+    movq REG_VAR(reg), RAW_VAR(offset)(REG_VAR(base))
+    CFI_REL_OFFSET(REG_VAR(reg), RAW_VAR(offset))
+END_MACRO
+
+MACRO3(RESTORE_REG_BASE, base, reg, offset)
+    movq RAW_VAR(offset)(REG_VAR(base)), REG_VAR(reg)
+    CFI_RESTORE(REG_VAR(reg))
+END_MACRO
+
 MACRO1(UNIMPLEMENTED,name)
     FUNCTION_TYPE(SYMBOL(\name))
     ASM_HIDDEN VAR(name)
@@ -181,4 +213,223 @@
 #endif  // USE_HEAP_POISONING
 END_MACRO
 
+    /*
+     * Macro that sets up the callee save frame to conform with
+     * Runtime::CreateCalleeSaveMethod(kSaveRefsOnly)
+     */
+MACRO0(SETUP_SAVE_REFS_ONLY_FRAME)
+#if defined(__APPLE__)
+    int3
+    int3
+#else
+    // R10 := Runtime::Current()
+    movq _ZN3art7Runtime9instance_E@GOTPCREL(%rip), %r10
+    movq (%r10), %r10
+    // Save callee and GPR args, mixed together to agree with core spills bitmap.
+    PUSH r15  // Callee save.
+    PUSH r14  // Callee save.
+    PUSH r13  // Callee save.
+    PUSH r12  // Callee save.
+    PUSH rbp  // Callee save.
+    PUSH rbx  // Callee save.
+    // Create space for FPR args, plus space for ArtMethod*.
+    subq LITERAL(8 + 4 * 8), %rsp
+    CFI_ADJUST_CFA_OFFSET(8 + 4 * 8)
+    // Save FPRs.
+    movq %xmm12, 8(%rsp)
+    movq %xmm13, 16(%rsp)
+    movq %xmm14, 24(%rsp)
+    movq %xmm15, 32(%rsp)
+    // R10 := ArtMethod* for refs only callee save frame method.
+    movq RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET(%r10), %r10
+    // Store ArtMethod* to bottom of stack.
+    movq %r10, 0(%rsp)
+    // Store rsp as the stop quick frame.
+    movq %rsp, %gs:THREAD_TOP_QUICK_FRAME_OFFSET
+
+    // Ugly compile-time check, but we only have the preprocessor.
+    // Last +8: implicit return address pushed on stack when caller made call.
+#if (FRAME_SIZE_SAVE_REFS_ONLY != 6 * 8 + 4 * 8 + 8 + 8)
+#error "FRAME_SIZE_SAVE_REFS_ONLY(X86_64) size not as expected."
+#endif
+#endif  // __APPLE__
+END_MACRO
+
+MACRO0(RESTORE_SAVE_REFS_ONLY_FRAME)
+    movq 8(%rsp), %xmm12
+    movq 16(%rsp), %xmm13
+    movq 24(%rsp), %xmm14
+    movq 32(%rsp), %xmm15
+    addq LITERAL(8 + 4*8), %rsp
+    CFI_ADJUST_CFA_OFFSET(-8 - 4*8)
+    // TODO: optimize by not restoring callee-saves restored by the ABI
+    POP rbx
+    POP rbp
+    POP r12
+    POP r13
+    POP r14
+    POP r15
+END_MACRO
+
+    /*
+     * Macro that sets up the callee save frame to conform with
+     * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs), except for storing the method.
+     */
+MACRO0(SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY)
+    // Save callee and GPR args, mixed together to agree with core spills bitmap.
+    PUSH r15      // Callee save.
+    PUSH r14      // Callee save.
+    PUSH r13      // Callee save.
+    PUSH r12      // Callee save.
+    PUSH_ARG r9   // Quick arg 5.
+    PUSH_ARG r8   // Quick arg 4.
+    PUSH_ARG rsi  // Quick arg 1.
+    PUSH rbp      // Callee save.
+    PUSH rbx      // Callee save.
+    PUSH_ARG rdx  // Quick arg 2.
+    PUSH_ARG rcx  // Quick arg 3.
+    // Create space for FPR args and create 2 slots for ArtMethod*.
+    subq MACRO_LITERAL(16 + 12 * 8), %rsp
+    CFI_ADJUST_CFA_OFFSET(16 + 12 * 8)
+    // Save FPRs.
+    movq %xmm0, 16(%rsp)
+    movq %xmm1, 24(%rsp)
+    movq %xmm2, 32(%rsp)
+    movq %xmm3, 40(%rsp)
+    movq %xmm4, 48(%rsp)
+    movq %xmm5, 56(%rsp)
+    movq %xmm6, 64(%rsp)
+    movq %xmm7, 72(%rsp)
+    movq %xmm12, 80(%rsp)
+    movq %xmm13, 88(%rsp)
+    movq %xmm14, 96(%rsp)
+    movq %xmm15, 104(%rsp)
+
+    // Ugly compile-time check, but we only have the preprocessor.
+    // Last +8: implicit return address pushed on stack when caller made call.
+#if (FRAME_SIZE_SAVE_REFS_AND_ARGS != 11 * 8 + 12 * 8 + 16 + 8)
+#error "FRAME_SIZE_SAVE_REFS_AND_ARGS(X86_64) size not as expected."
+#endif
+END_MACRO
+
+MACRO0(RESTORE_SAVE_REFS_AND_ARGS_FRAME)
+    // Restore FPRs.
+    movq 16(%rsp), %xmm0
+    movq 24(%rsp), %xmm1
+    movq 32(%rsp), %xmm2
+    movq 40(%rsp), %xmm3
+    movq 48(%rsp), %xmm4
+    movq 56(%rsp), %xmm5
+    movq 64(%rsp), %xmm6
+    movq 72(%rsp), %xmm7
+    movq 80(%rsp), %xmm12
+    movq 88(%rsp), %xmm13
+    movq 96(%rsp), %xmm14
+    movq 104(%rsp), %xmm15
+    addq MACRO_LITERAL(80 + 4 * 8), %rsp
+    CFI_ADJUST_CFA_OFFSET(-(80 + 4 * 8))
+    // Restore callee and GPR args, mixed together to agree with core spills bitmap.
+    POP_ARG rcx
+    POP_ARG rdx
+    POP rbx
+    POP rbp
+    POP_ARG rsi
+    POP_ARG r8
+    POP_ARG r9
+    POP r12
+    POP r13
+    POP r14
+    POP r15
+END_MACRO
+
+    /*
+     * Macro that sets up the callee save frame to conform with
+     * Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves)
+     */
+MACRO0(SETUP_SAVE_ALL_CALLEE_SAVES_FRAME)
+#if defined(__APPLE__)
+    int3
+    int3
+#else
+    // R10 := Runtime::Current()
+    movq _ZN3art7Runtime9instance_E@GOTPCREL(%rip), %r10
+    movq (%r10), %r10
+    // Save callee save registers to agree with core spills bitmap.
+    PUSH r15  // Callee save.
+    PUSH r14  // Callee save.
+    PUSH r13  // Callee save.
+    PUSH r12  // Callee save.
+    PUSH rbp  // Callee save.
+    PUSH rbx  // Callee save.
+    // Create space for FPR args, plus space for ArtMethod*.
+    subq MACRO_LITERAL(4 * 8 + 8), %rsp
+    CFI_ADJUST_CFA_OFFSET(4 * 8 + 8)
+    // Save FPRs.
+    movq %xmm12, 8(%rsp)
+    movq %xmm13, 16(%rsp)
+    movq %xmm14, 24(%rsp)
+    movq %xmm15, 32(%rsp)
+    // R10 := ArtMethod* for save all callee save frame method.
+    movq RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET(%r10), %r10
+    // Store ArtMethod* to bottom of stack.
+    movq %r10, 0(%rsp)
+    // Store rsp as the top quick frame.
+    movq %rsp, %gs:THREAD_TOP_QUICK_FRAME_OFFSET
+
+    // Ugly compile-time check, but we only have the preprocessor.
+    // Last +8: implicit return address pushed on stack when caller made call.
+#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 6 * 8 + 4 * 8 + 8 + 8)
+#error "FRAME_SIZE_SAVE_ALL_CALLEE_SAVES(X86_64) size not as expected."
+#endif
+#endif  // __APPLE__
+END_MACRO
+
+MACRO0(SETUP_FP_CALLEE_SAVE_FRAME)
+    // Create space for ART FP callee-saved registers
+    subq MACRO_LITERAL(4 * 8), %rsp
+    CFI_ADJUST_CFA_OFFSET(4 * 8)
+    movq %xmm12, 0(%rsp)
+    movq %xmm13, 8(%rsp)
+    movq %xmm14, 16(%rsp)
+    movq %xmm15, 24(%rsp)
+END_MACRO
+
+MACRO0(RESTORE_FP_CALLEE_SAVE_FRAME)
+    // Restore ART FP callee-saved registers
+    movq 0(%rsp), %xmm12
+    movq 8(%rsp), %xmm13
+    movq 16(%rsp), %xmm14
+    movq 24(%rsp), %xmm15
+    addq MACRO_LITERAL(4 * 8), %rsp
+    CFI_ADJUST_CFA_OFFSET(- 4 * 8)
+END_MACRO
+
+    /*
+     * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
+     * exception is Thread::Current()->exception_ when the runtime method frame is ready.
+     */
+MACRO0(DELIVER_PENDING_EXCEPTION_FRAME_READY)
+    // (Thread*) setup
+    movq %gs:THREAD_SELF_OFFSET, %rdi
+    call SYMBOL(artDeliverPendingExceptionFromCode)  // artDeliverPendingExceptionFromCode(Thread*)
+    UNREACHABLE
+END_MACRO
+    /*
+     * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
+     * exception is Thread::Current()->exception_.
+     */
+MACRO0(DELIVER_PENDING_EXCEPTION)
+    SETUP_SAVE_ALL_CALLEE_SAVES_FRAME        // save callee saves for throw
+    DELIVER_PENDING_EXCEPTION_FRAME_READY
+END_MACRO
+
+MACRO0(RETURN_OR_DELIVER_PENDING_EXCEPTION)
+    movq %gs:THREAD_EXCEPTION_OFFSET, %rcx // get exception field
+    testq %rcx, %rcx               // rcx == 0 ?
+    jnz 1f                         // if rcx != 0 goto 1
+    ret                            // return
+1:                                 // deliver exception on current thread
+    DELIVER_PENDING_EXCEPTION
+END_MACRO
+
 #endif  // ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_S_
diff --git a/runtime/arch/x86_64/context_x86_64.h b/runtime/arch/x86_64/context_x86_64.h
index ab38614..1e2658c 100644
--- a/runtime/arch/x86_64/context_x86_64.h
+++ b/runtime/arch/x86_64/context_x86_64.h
@@ -45,6 +45,10 @@
     rip_ = new_pc;
   }
 
+  void SetNterpDexPC(uintptr_t dex_pc_ptr) override {
+    SetGPR(R12, dex_pc_ptr);
+  }
+
   void SetArg0(uintptr_t new_arg0_value) override {
     SetGPR(RDI, new_arg0_value);
   }
diff --git a/runtime/arch/x86_64/jni_entrypoints_x86_64.S b/runtime/arch/x86_64/jni_entrypoints_x86_64.S
index f6736df..e1b8e52 100644
--- a/runtime/arch/x86_64/jni_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/jni_entrypoints_x86_64.S
@@ -20,16 +20,16 @@
      * Jni dlsym lookup stub.
      */
 DEFINE_FUNCTION art_jni_dlsym_lookup_stub
-    // Save callee and GPR args, mixed together to agree with core spills bitmap.
-    PUSH r9   // Arg.
-    PUSH r8   // Arg.
-    PUSH rdi  // JniEnv.
-    PUSH rsi  // Arg.
-    PUSH rdx  // Arg.
-    PUSH rcx  // Arg.
+    // Save callee and GPR args.
+    PUSH_ARG r9   // Arg.
+    PUSH_ARG r8   // Arg.
+    PUSH_ARG rdi  // Arg. (JniEnv for normal and @FastNative)
+    PUSH_ARG rsi  // Arg.
+    PUSH_ARG rdx  // Arg.
+    PUSH_ARG rcx  // Arg.
     // Create space for FPR args, plus padding for alignment
-    subq LITERAL(72 + 4 * 8), %rsp
-    CFI_ADJUST_CFA_OFFSET(72 + 4 * 8)
+    subq LITERAL(72), %rsp
+    CFI_ADJUST_CFA_OFFSET(72)
     // Save FPRs.
     movq %xmm0, 0(%rsp)
     movq %xmm1, 8(%rsp)
@@ -39,14 +39,21 @@
     movq %xmm5, 40(%rsp)
     movq %xmm6, 48(%rsp)
     movq %xmm7, 56(%rsp)
-    movq %xmm12, 64(%rsp)
-    movq %xmm13, 72(%rsp)
-    movq %xmm14, 80(%rsp)
-    movq %xmm15, 88(%rsp)
     // prepare call
     movq %gs:THREAD_SELF_OFFSET, %rdi      // RDI := Thread::Current()
-    // call
-    call PLT_SYMBOL(artFindNativeMethod)  // (Thread*)
+    // Call artFindNativeMethod() for normal native and artFindNativeMethodRunnable()
+    // for @FastNative or @CriticalNative.
+    movq THREAD_TOP_QUICK_FRAME_OFFSET(%rdi), %rax   // uintptr_t tagged_quick_frame
+    andq LITERAL(0xfffffffffffffffe), %rax           // ArtMethod** sp
+    movq (%rax), %rax                                // ArtMethod* method
+    testl LITERAL(ACCESS_FLAGS_METHOD_IS_FAST_NATIVE | ACCESS_FLAGS_METHOD_IS_CRITICAL_NATIVE), \
+          ART_METHOD_ACCESS_FLAGS_OFFSET(%rax)
+    jne .Llookup_stub_fast_native
+    call SYMBOL(artFindNativeMethod)  // (Thread*)
+    jmp .Llookup_stub_continue
+.Llookup_stub_fast_native:
+    call SYMBOL(artFindNativeMethodRunnable)  // (Thread*)
+.Llookup_stub_continue:
     // restore arguments
     movq 0(%rsp), %xmm0
     movq 8(%rsp), %xmm1
@@ -56,21 +63,200 @@
     movq 40(%rsp), %xmm5
     movq 48(%rsp), %xmm6
     movq 56(%rsp), %xmm7
-    movq 64(%rsp), %xmm12
-    movq 72(%rsp), %xmm13
-    movq 80(%rsp), %xmm14
-    movq 88(%rsp), %xmm15
-    addq LITERAL(72 + 4 * 8), %rsp
-    CFI_ADJUST_CFA_OFFSET(-72 - 4 * 8)
-    POP rcx  // Arg.
-    POP rdx  // Arg.
-    POP rsi  // Arg.
-    POP rdi  // JniEnv.
-    POP r8   // Arg.
-    POP r9   // Arg.
-    testq %rax, %rax         // check if returned method code is null
+    addq LITERAL(72), %rsp
+    CFI_ADJUST_CFA_OFFSET(-72)
+    POP_ARG rcx  // Arg.
+    POP_ARG rdx  // Arg.
+    POP_ARG rsi  // Arg.
+    POP_ARG rdi  // Arg. (JniEnv for normal and @FastNative)
+    POP_ARG r8   // Arg.
+    POP_ARG r9   // Arg.
+    testq %rax, %rax              // check if returned method code is null
     jz .Lno_native_code_found     // if null, jump to return to handle
     jmp *%rax                     // otherwise, tail call to intended method
 .Lno_native_code_found:
     ret
 END_FUNCTION art_jni_dlsym_lookup_stub
+
+DEFINE_FUNCTION art_jni_dlsym_lookup_critical_stub
+    // The hidden arg holding the tagged method (bit 0 set means GenericJNI) is r11.
+    // For Generic JNI we already have a managed frame, so we reuse the art_jni_dlsym_lookup_stub.
+    testq LITERAL(1), %r11
+    jnz art_jni_dlsym_lookup_stub
+
+    // We need to create a GenericJNI managed frame above the stack args.
+
+    // GenericJNI frame is similar to SaveRegsAndArgs frame with the native method
+    // instead of runtime method saved at the bottom.
+
+    // As we always have "stack args" on x86-64 (due to xmm12-xmm15 being callee-save
+    // in managed ABI but caller-save in native ABI), do not create a proper frame yet
+    // as we do on other architectures where it's useful for no stack args case.
+
+    // Reserve space for the frame (return PC is on stack).
+    subq MACRO_LITERAL(FRAME_SIZE_SAVE_REFS_AND_ARGS - __SIZEOF_POINTER__), %rsp
+    CFI_ADJUST_CFA_OFFSET(FRAME_SIZE_SAVE_REFS_AND_ARGS - __SIZEOF_POINTER__)
+
+    // Save GPR args.
+    PUSH_ARG r9
+    PUSH_ARG r8
+    PUSH_ARG rdi
+    PUSH_ARG rsi
+    PUSH_ARG rdx
+    PUSH_ARG rcx
+    // Create space for FPR args.
+    subq LITERAL(64), %rsp
+    CFI_ADJUST_CFA_OFFSET(64)
+    // Save FPRs.
+    movq %xmm0, 0(%rsp)
+    movq %xmm1, 8(%rsp)
+    movq %xmm2, 16(%rsp)
+    movq %xmm3, 24(%rsp)
+    movq %xmm4, 32(%rsp)
+    movq %xmm5, 40(%rsp)
+    movq %xmm6, 48(%rsp)
+    movq %xmm7, 56(%rsp)
+
+    // Add alignment padding.
+    subq MACRO_LITERAL(__SIZEOF_POINTER__), %rsp
+    CFI_ADJUST_CFA_OFFSET(__SIZEOF_POINTER__)
+    // Save hidden arg.
+    PUSH_ARG r11
+
+    // Call artCriticalNativeOutArgsSize(method).
+    movq %r11, %rdi  // Pass the method from hidden arg.
+    call SYMBOL(artCriticalNativeOutArgsSize)
+
+    // Calculate the address of the end of the move destination and redefine CFI to take
+    // ownership of the JNI stub frame.
+    leaq 16 * __SIZEOF_POINTER__(%rsp, %rax, 1), %r10  // 16 QWORDs of registers saved above.
+    CFI_DEF_CFA(%r10, FRAME_SIZE_SAVE_REFS_AND_ARGS)
+
+    // Calculate the number of QWORDs to move.
+    shrq LITERAL(3), %rax
+    leaq -1(%rax), %rcx  // Do not move the return PC.
+
+    // Load our return PC to EAX.
+    movq FRAME_SIZE_SAVE_REFS_AND_ARGS + (16 - 1) * __SIZEOF_POINTER__(%rsp), %rax
+
+    // Mov the stack args.
+    leaq 16 * __SIZEOF_POINTER__(%rsp), %rdi
+    leaq FRAME_SIZE_SAVE_REFS_AND_ARGS(%rdi), %rsi
+    rep movsq
+
+    // Save our return PC.
+    movq %rax, (%rdi)
+
+    // Pop the hidden arg and alignment padding.
+    popq %r11    // No `.cfi_adjust_cfa_offset`, CFA register is currently R10, not RSP.
+    addq MACRO_LITERAL(__SIZEOF_POINTER__), %rsp  // ditto
+
+    // Fill the SaveRefsAndArgs frame above the args, without actual args. Note that
+    // the runtime shall not examine the args here, otherwise we would have to move them in
+    // registers and stack to account for the difference between managed and native ABIs.
+    SAVE_REG_BASE r10, r15, 192
+    SAVE_REG_BASE r10, r14, 184
+    SAVE_REG_BASE r10, r13, 176
+    SAVE_REG_BASE r10, r12, 168
+    // Skip args r9, r8, rsi.
+    SAVE_REG_BASE r10, rbp, 136
+    SAVE_REG_BASE r10, rbx, 128
+    // Skip args rdx, rcx.
+    // Skip args xmm0-xmm7.
+    // Copy managed callee-saves xmm12-xmm15 from out args to the managed frame as they
+    // may theoretically store variables or unwinding data. (The compiled stub preserves
+    // them but the artCriticalNativeOutArgsSize() call above may clobber them.)
+    movq -5 * __SIZEOF_POINTER__(%r10), %xmm12
+    movq -4 * __SIZEOF_POINTER__(%r10), %xmm13
+    movq -3 * __SIZEOF_POINTER__(%r10), %xmm14
+    movq -2 * __SIZEOF_POINTER__(%r10), %xmm15
+    movq %xmm12, 80(%r10)
+    movq %xmm13, 88(%r10)
+    movq %xmm14, 96(%r10)
+    movq %xmm15, 104(%r10)
+    // Save the hidden arg as method pointer at the bottom of the stack.
+    movq %r11, (%r10)
+
+    // Move the frame register to a callee-save register.
+    movq %r10, %rbp
+    CFI_DEF_CFA_REGISTER(%rbp)
+
+    // Place tagged managed sp in Thread::Current()->top_quick_frame.
+    leaq 1(%rbp), %rax  // Tag as GenericJNI frame.
+    movq %rax, %gs:THREAD_TOP_QUICK_FRAME_OFFSET
+
+    // Call artFindNativeMethodRunnable()
+    movq %gs:THREAD_SELF_OFFSET, %rdi  // pass Thread::Current()
+    call SYMBOL(artFindNativeMethodRunnable)  // (Thread*)
+
+    // Check for exception.
+    test %rax, %rax
+    jz 2f
+
+    // Restore the frame. We shall not need the method anymore.
+    .cfi_remember_state
+    movq %rbp, %r10
+    CFI_DEF_CFA_REGISTER(%r10)
+    // Skip args xmm0-xmm7 and managed callee-saves xmm12-xmm15 (not needed for native call).
+    // Skip args rdx, rcx.
+    RESTORE_REG_BASE r10, rbx, 128
+    RESTORE_REG_BASE r10, rbp, 136
+    // Skip args r9, r8, rsi.
+    RESTORE_REG_BASE r10, r12, 168
+    RESTORE_REG_BASE r10, r13, 176
+    RESTORE_REG_BASE r10, r14, 184
+    RESTORE_REG_BASE r10, r15, 192
+
+    // Remember our return PC in R11.
+    movq -__SIZEOF_POINTER__(%r10), %r11
+
+    // Calculate the number of DWORDs to move.
+    leaq -(1 + 14) * __SIZEOF_POINTER__(%r10), %rcx  // Do not move return PC, 14 arg regs saved.
+    subq %rsp, %rcx
+    shrq LITERAL(3), %rcx
+
+    // Mov stack args to their original place.
+    leaq -2 * __SIZEOF_POINTER__(%r10), %rsi
+    leaq FRAME_SIZE_SAVE_REFS_AND_ARGS - 2 * __SIZEOF_POINTER__(%r10), %rdi
+    std
+    rep movsq
+    cld
+
+    // Store our return PC.
+    movq %r11, (%rdi)
+
+    // Redefine CFI to release ownership of the JNI stub frame.
+    CFI_DEF_CFA(%rsp, FRAME_SIZE_SAVE_REFS_AND_ARGS + 14 * __SIZEOF_POINTER__)
+
+    // Restore args.
+    movq 0(%rsp), %xmm0
+    movq 8(%rsp), %xmm1
+    movq 16(%rsp), %xmm2
+    movq 24(%rsp), %xmm3
+    movq 32(%rsp), %xmm4
+    movq 40(%rsp), %xmm5
+    movq 48(%rsp), %xmm6
+    movq 56(%rsp), %xmm7
+    addq LITERAL(64), %rsp
+    CFI_ADJUST_CFA_OFFSET(-64)
+    POP_ARG rcx
+    POP_ARG rdx
+    POP_ARG rsi
+    POP_ARG rdi
+    POP_ARG r8
+    POP_ARG r9
+
+    // Remove the frame reservation.
+    addq LITERAL(FRAME_SIZE_SAVE_REFS_AND_ARGS - __SIZEOF_POINTER__), %rsp
+    CFI_ADJUST_CFA_OFFSET(-(FRAME_SIZE_SAVE_REFS_AND_ARGS - __SIZEOF_POINTER__))
+
+    // Do the tail call.
+    jmp *%rax
+    CFI_RESTORE_STATE_AND_DEF_CFA(%rbp, FRAME_SIZE_SAVE_REFS_AND_ARGS)
+
+2:
+    // Drop the args from the stack (the r11 and padding was already removed).
+    addq LITERAL(14 * __SIZEOF_POINTER__), %rsp
+
+    DELIVER_PENDING_EXCEPTION_FRAME_READY
+END_FUNCTION art_jni_dlsym_lookup_critical_stub
diff --git a/runtime/arch/x86_64/jni_frame_x86_64.h b/runtime/arch/x86_64/jni_frame_x86_64.h
new file mode 100644
index 0000000..65736fe
--- /dev/null
+++ b/runtime/arch/x86_64/jni_frame_x86_64.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_X86_64_JNI_FRAME_X86_64_H_
+#define ART_RUNTIME_ARCH_X86_64_JNI_FRAME_X86_64_H_
+
+#include <string.h>
+
+#include "arch/instruction_set.h"
+#include "base/bit_utils.h"
+#include "base/globals.h"
+#include "base/logging.h"
+
+namespace art {
+namespace x86_64 {
+
+constexpr size_t kFramePointerSize = static_cast<size_t>(PointerSize::k64);
+static_assert(kX86_64PointerSize == PointerSize::k64, "Unexpected x86_64 pointer size");
+
+static constexpr size_t kNativeStackAlignment = 16;
+static_assert(kNativeStackAlignment == kStackAlignment);
+
+// We always have to spill registers xmm12-xmm15 which are callee-save
+// in managed ABI but caller-save in native ABI.
+constexpr size_t kMmxSpillSize = 8u;
+constexpr size_t kAlwaysSpilledMmxRegisters = 4;
+
+// XMM0..XMM7 can be used to pass the first 8 floating args. The rest must go on the stack.
+// -- Managed and JNI calling conventions.
+constexpr size_t kMaxFloatOrDoubleRegisterArguments = 8u;
+// Up to how many integer-like (pointers, objects, longs, int, short, bool, etc) args can be
+// enregistered. The rest of the args must go on the stack.
+// -- JNI calling convention only (Managed excludes RDI, so it's actually 5).
+constexpr size_t kMaxIntLikeRegisterArguments = 6u;
+
+// Get the size of "out args" for @CriticalNative method stub.
+// This must match the size of the frame emitted by the JNI compiler at the native call site.
+inline size_t GetCriticalNativeOutArgsSize(const char* shorty, uint32_t shorty_len) {
+  DCHECK_EQ(shorty_len, strlen(shorty));
+
+  size_t num_fp_args = 0u;
+  for (size_t i = 1; i != shorty_len; ++i) {
+    if (shorty[i] == 'F' || shorty[i] == 'D') {
+      num_fp_args += 1u;
+    }
+  }
+  size_t num_non_fp_args = shorty_len - 1u - num_fp_args;
+
+  // Account for FP arguments passed through Xmm0..Xmm7.
+  size_t num_stack_fp_args =
+      num_fp_args - std::min(kMaxFloatOrDoubleRegisterArguments, num_fp_args);
+  // Account for other (integer) arguments passed through GPR (RDI, RSI, RDX, RCX, R8, R9).
+  size_t num_stack_non_fp_args =
+      num_non_fp_args - std::min(kMaxIntLikeRegisterArguments, num_non_fp_args);
+  // The size of outgoing arguments.
+  static_assert(kFramePointerSize == kMmxSpillSize);
+  size_t size = (num_stack_fp_args + num_stack_non_fp_args) * kFramePointerSize;
+
+  // We always need to spill xmm12-xmm15 as they are managed callee-saves
+  // but not native callee-saves.
+  size += kAlwaysSpilledMmxRegisters * kMmxSpillSize;
+  // Add return address size.
+  size += kFramePointerSize;
+
+  return RoundUp(size, kNativeStackAlignment);
+}
+
+}  // namespace x86_64
+}  // namespace art
+
+#endif  // ART_RUNTIME_ARCH_X86_64_JNI_FRAME_X86_64_H_
+
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 39bf6e8..abc3a8a 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -26,127 +26,8 @@
 #endif
 END_MACRO
 
-MACRO0(SETUP_FP_CALLEE_SAVE_FRAME)
-    // Create space for ART FP callee-saved registers
-    subq MACRO_LITERAL(4 * 8), %rsp
-    CFI_ADJUST_CFA_OFFSET(4 * 8)
-    movq %xmm12, 0(%rsp)
-    movq %xmm13, 8(%rsp)
-    movq %xmm14, 16(%rsp)
-    movq %xmm15, 24(%rsp)
-END_MACRO
-
-MACRO0(RESTORE_FP_CALLEE_SAVE_FRAME)
-    // Restore ART FP callee-saved registers
-    movq 0(%rsp), %xmm12
-    movq 8(%rsp), %xmm13
-    movq 16(%rsp), %xmm14
-    movq 24(%rsp), %xmm15
-    addq MACRO_LITERAL(4 * 8), %rsp
-    CFI_ADJUST_CFA_OFFSET(- 4 * 8)
-END_MACRO
-
 // For x86, the CFA is esp+4, the address above the pushed return address on the stack.
 
-    /*
-     * Macro that sets up the callee save frame to conform with
-     * Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves)
-     */
-MACRO0(SETUP_SAVE_ALL_CALLEE_SAVES_FRAME)
-#if defined(__APPLE__)
-    int3
-    int3
-#else
-    // R10 := Runtime::Current()
-    movq _ZN3art7Runtime9instance_E@GOTPCREL(%rip), %r10
-    movq (%r10), %r10
-    // Save callee save registers to agree with core spills bitmap.
-    PUSH r15  // Callee save.
-    PUSH r14  // Callee save.
-    PUSH r13  // Callee save.
-    PUSH r12  // Callee save.
-    PUSH rbp  // Callee save.
-    PUSH rbx  // Callee save.
-    // Create space for FPR args, plus space for ArtMethod*.
-    subq MACRO_LITERAL(4 * 8 + 8), %rsp
-    CFI_ADJUST_CFA_OFFSET(4 * 8 + 8)
-    // Save FPRs.
-    movq %xmm12, 8(%rsp)
-    movq %xmm13, 16(%rsp)
-    movq %xmm14, 24(%rsp)
-    movq %xmm15, 32(%rsp)
-    // R10 := ArtMethod* for save all callee save frame method.
-    movq RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET(%r10), %r10
-    // Store ArtMethod* to bottom of stack.
-    movq %r10, 0(%rsp)
-    // Store rsp as the top quick frame.
-    movq %rsp, %gs:THREAD_TOP_QUICK_FRAME_OFFSET
-
-    // Ugly compile-time check, but we only have the preprocessor.
-    // Last +8: implicit return address pushed on stack when caller made call.
-#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 6 * 8 + 4 * 8 + 8 + 8)
-#error "FRAME_SIZE_SAVE_ALL_CALLEE_SAVES(X86_64) size not as expected."
-#endif
-#endif  // __APPLE__
-END_MACRO
-
-    /*
-     * Macro that sets up the callee save frame to conform with
-     * Runtime::CreateCalleeSaveMethod(kSaveRefsOnly)
-     */
-MACRO0(SETUP_SAVE_REFS_ONLY_FRAME)
-#if defined(__APPLE__)
-    int3
-    int3
-#else
-    // R10 := Runtime::Current()
-    movq _ZN3art7Runtime9instance_E@GOTPCREL(%rip), %r10
-    movq (%r10), %r10
-    // Save callee and GPR args, mixed together to agree with core spills bitmap.
-    PUSH r15  // Callee save.
-    PUSH r14  // Callee save.
-    PUSH r13  // Callee save.
-    PUSH r12  // Callee save.
-    PUSH rbp  // Callee save.
-    PUSH rbx  // Callee save.
-    // Create space for FPR args, plus space for ArtMethod*.
-    subq LITERAL(8 + 4 * 8), %rsp
-    CFI_ADJUST_CFA_OFFSET(8 + 4 * 8)
-    // Save FPRs.
-    movq %xmm12, 8(%rsp)
-    movq %xmm13, 16(%rsp)
-    movq %xmm14, 24(%rsp)
-    movq %xmm15, 32(%rsp)
-    // R10 := ArtMethod* for refs only callee save frame method.
-    movq RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET(%r10), %r10
-    // Store ArtMethod* to bottom of stack.
-    movq %r10, 0(%rsp)
-    // Store rsp as the stop quick frame.
-    movq %rsp, %gs:THREAD_TOP_QUICK_FRAME_OFFSET
-
-    // Ugly compile-time check, but we only have the preprocessor.
-    // Last +8: implicit return address pushed on stack when caller made call.
-#if (FRAME_SIZE_SAVE_REFS_ONLY != 6 * 8 + 4 * 8 + 8 + 8)
-#error "FRAME_SIZE_SAVE_REFS_ONLY(X86_64) size not as expected."
-#endif
-#endif  // __APPLE__
-END_MACRO
-
-MACRO0(RESTORE_SAVE_REFS_ONLY_FRAME)
-    movq 8(%rsp), %xmm12
-    movq 16(%rsp), %xmm13
-    movq 24(%rsp), %xmm14
-    movq 32(%rsp), %xmm15
-    addq LITERAL(8 + 4*8), %rsp
-    CFI_ADJUST_CFA_OFFSET(-8 - 4*8)
-    // TODO: optimize by not restoring callee-saves restored by the ABI
-    POP rbx
-    POP rbp
-    POP r12
-    POP r13
-    POP r14
-    POP r15
-END_MACRO
 
     /*
      * Macro that sets up the callee save frame to conform with
@@ -157,117 +38,27 @@
     int3
     int3
 #else
+    SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY
     // R10 := Runtime::Current()
     movq _ZN3art7Runtime9instance_E@GOTPCREL(%rip), %r10
     movq (%r10), %r10
-    // Save callee and GPR args, mixed together to agree with core spills bitmap.
-    PUSH r15  // Callee save.
-    PUSH r14  // Callee save.
-    PUSH r13  // Callee save.
-    PUSH r12  // Callee save.
-    PUSH r9   // Quick arg 5.
-    PUSH r8   // Quick arg 4.
-    PUSH rsi  // Quick arg 1.
-    PUSH rbp  // Callee save.
-    PUSH rbx  // Callee save.
-    PUSH rdx  // Quick arg 2.
-    PUSH rcx  // Quick arg 3.
-    // Create space for FPR args and create 2 slots for ArtMethod*.
-    subq MACRO_LITERAL(16 + 12 * 8), %rsp
-    CFI_ADJUST_CFA_OFFSET(16 + 12 * 8)
     // R10 := ArtMethod* for ref and args callee save frame method.
     movq RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET(%r10), %r10
-    // Save FPRs.
-    movq %xmm0, 16(%rsp)
-    movq %xmm1, 24(%rsp)
-    movq %xmm2, 32(%rsp)
-    movq %xmm3, 40(%rsp)
-    movq %xmm4, 48(%rsp)
-    movq %xmm5, 56(%rsp)
-    movq %xmm6, 64(%rsp)
-    movq %xmm7, 72(%rsp)
-    movq %xmm12, 80(%rsp)
-    movq %xmm13, 88(%rsp)
-    movq %xmm14, 96(%rsp)
-    movq %xmm15, 104(%rsp)
     // Store ArtMethod* to bottom of stack.
     movq %r10, 0(%rsp)
     // Store rsp as the top quick frame.
     movq %rsp, %gs:THREAD_TOP_QUICK_FRAME_OFFSET
-
-    // Ugly compile-time check, but we only have the preprocessor.
-    // Last +8: implicit return address pushed on stack when caller made call.
-#if (FRAME_SIZE_SAVE_REFS_AND_ARGS != 11 * 8 + 12 * 8 + 16 + 8)
-#error "FRAME_SIZE_SAVE_REFS_AND_ARGS(X86_64) size not as expected."
-#endif
 #endif  // __APPLE__
 END_MACRO
 
 MACRO0(SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_RDI)
-    // Save callee and GPR args, mixed together to agree with core spills bitmap.
-    PUSH r15  // Callee save.
-    PUSH r14  // Callee save.
-    PUSH r13  // Callee save.
-    PUSH r12  // Callee save.
-    PUSH r9   // Quick arg 5.
-    PUSH r8   // Quick arg 4.
-    PUSH rsi  // Quick arg 1.
-    PUSH rbp  // Callee save.
-    PUSH rbx  // Callee save.
-    PUSH rdx  // Quick arg 2.
-    PUSH rcx  // Quick arg 3.
-    // Create space for FPR args and create 2 slots for ArtMethod*.
-    subq LITERAL(80 + 4 * 8), %rsp
-    CFI_ADJUST_CFA_OFFSET(80 + 4 * 8)
-    // Save FPRs.
-    movq %xmm0, 16(%rsp)
-    movq %xmm1, 24(%rsp)
-    movq %xmm2, 32(%rsp)
-    movq %xmm3, 40(%rsp)
-    movq %xmm4, 48(%rsp)
-    movq %xmm5, 56(%rsp)
-    movq %xmm6, 64(%rsp)
-    movq %xmm7, 72(%rsp)
-    movq %xmm12, 80(%rsp)
-    movq %xmm13, 88(%rsp)
-    movq %xmm14, 96(%rsp)
-    movq %xmm15, 104(%rsp)
+    SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY
     // Store ArtMethod to bottom of stack.
     movq %rdi, 0(%rsp)
     // Store rsp as the stop quick frame.
     movq %rsp, %gs:THREAD_TOP_QUICK_FRAME_OFFSET
 END_MACRO
 
-MACRO0(RESTORE_SAVE_REFS_AND_ARGS_FRAME)
-    // Restore FPRs.
-    movq 16(%rsp), %xmm0
-    movq 24(%rsp), %xmm1
-    movq 32(%rsp), %xmm2
-    movq 40(%rsp), %xmm3
-    movq 48(%rsp), %xmm4
-    movq 56(%rsp), %xmm5
-    movq 64(%rsp), %xmm6
-    movq 72(%rsp), %xmm7
-    movq 80(%rsp), %xmm12
-    movq 88(%rsp), %xmm13
-    movq 96(%rsp), %xmm14
-    movq 104(%rsp), %xmm15
-    addq MACRO_LITERAL(80 + 4 * 8), %rsp
-    CFI_ADJUST_CFA_OFFSET(-(80 + 4 * 8))
-    // Restore callee and GPR args, mixed together to agree with core spills bitmap.
-    POP rcx
-    POP rdx
-    POP rbx
-    POP rbp
-    POP rsi
-    POP r8
-    POP r9
-    POP r12
-    POP r13
-    POP r14
-    POP r15
-END_MACRO
-
     /*
      * Macro that sets up the callee save frame to conform with
      * Runtime::CreateCalleeSaveMethod(kSaveEverything)
@@ -408,26 +199,6 @@
     RESTORE_SAVE_EVERYTHING_FRAME_GPRS_EXCEPT_RAX
 END_MACRO
 
-    /*
-     * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
-     * exception is Thread::Current()->exception_ when the runtime method frame is ready.
-     */
-MACRO0(DELIVER_PENDING_EXCEPTION_FRAME_READY)
-    // (Thread*) setup
-    movq %gs:THREAD_SELF_OFFSET, %rdi
-    call SYMBOL(artDeliverPendingExceptionFromCode)  // artDeliverPendingExceptionFromCode(Thread*)
-    UNREACHABLE
-END_MACRO
-
-    /*
-     * Macro that calls through to artDeliverPendingExceptionFromCode, where the pending
-     * exception is Thread::Current()->exception_.
-     */
-MACRO0(DELIVER_PENDING_EXCEPTION)
-    SETUP_SAVE_ALL_CALLEE_SAVES_FRAME        // save callee saves for throw
-    DELIVER_PENDING_EXCEPTION_FRAME_READY
-END_MACRO
-
 MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
     DEFINE_FUNCTION VAR(c_name)
     SETUP_SAVE_ALL_CALLEE_SAVES_FRAME  // save all registers as basis for long jump context
@@ -967,8 +738,7 @@
     CFI_REMEMBER_STATE
     RESTORE_SAVE_EVERYTHING_FRAME_KEEP_RAX        // restore frame up to return address
     ret
-    CFI_RESTORE_STATE
-    CFI_DEF_CFA(rsp, FRAME_SIZE_SAVE_EVERYTHING)  // workaround for clang bug: 31975598
+    CFI_RESTORE_STATE_AND_DEF_CFA(rsp, FRAME_SIZE_SAVE_EVERYTHING)
 1:
     DELIVER_PENDING_EXCEPTION_FRAME_READY
     END_FUNCTION VAR(c_name)
@@ -994,15 +764,6 @@
     DELIVER_PENDING_EXCEPTION
 END_MACRO
 
-MACRO0(RETURN_OR_DELIVER_PENDING_EXCEPTION)
-    movq %gs:THREAD_EXCEPTION_OFFSET, %rcx // get exception field
-    testq %rcx, %rcx               // rcx == 0 ?
-    jnz 1f                         // if rcx != 0 goto 1
-    ret                            // return
-1:                                 // deliver exception on current thread
-    DELIVER_PENDING_EXCEPTION
-END_MACRO
-
 // Generate the allocation entrypoints for each allocator.
 GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS
 
@@ -1428,7 +1189,7 @@
     addq LITERAL(24), %rsp            // pop arguments
     CFI_ADJUST_CFA_OFFSET(-24)
     ret
-    CFI_RESTORE_STATE                 // Reset unwind info so following code unwinds.
+    CFI_RESTORE_STATE_AND_DEF_CFA(rsp, 64)  // Reset unwind info so following code unwinds.
 
 .Lthrow_class_cast_exception:
     RESTORE_FP_CALLEE_SAVE_FRAME
@@ -1690,7 +1451,7 @@
     CFI_REMEMBER_STATE
     POP rdx
     jmp *ART_METHOD_QUICK_CODE_OFFSET_64(%rdi)
-    CFI_RESTORE_STATE
+    CFI_RESTORE_STATE_AND_DEF_CFA(rsp, 16)
 .Limt_table_next_entry:
     // If the entry is null, the interface method is not in the ImtConflictTable.
     cmpq LITERAL(0), 0(%rdi)
@@ -1705,7 +1466,7 @@
     POP rdx
     movq %rax, %rdi  // Load interface method
     INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline
-    CFI_RESTORE_STATE
+    CFI_RESTORE_STATE_AND_DEF_CFA(rsp, 16)
 .Limt_conflict_trampoline_dex_cache_miss:
     // We're not creating a proper runtime method frame here,
     // artLookupResolvedMethod() is not allowed to walk the stack.
@@ -1829,28 +1590,27 @@
  * #-------------------#
  * | caller method...  |
  * #-------------------#
- * | Return            |
- * | Callee-Save Data  |
- * #-------------------#
- * | handle scope      |
- * #-------------------#
+ * | Return PC         |
+ * | Callee-Saves      |
+ * | padding           | // 8B
  * | Method*           |    <--- (1)
  * #-------------------#
  * | local ref cookie  | // 4B
- * | handle scope size | // 4B   TODO: roll into call stack alignment?
+ * | padding           | // 0B or 4B to align handle scope on 8B address
+ * | handle scope      | // Size depends on number of references; multiple of 4B.
  * #-------------------#
- * | JNI Call Stack    |
- * #-------------------#    <--- SP on native call
+ * | JNI Stack Args    | // Empty if all args fit into registers.
+ * #-------------------#    <--- SP on native call (1)
+ * | Free scratch      |
+ * #-------------------#
+ * | SP for JNI call   | // Pointer to (1).
+ * #-------------------#
+ * | Hidden arg        | // For @CriticalNative
+ * #-------------------#
  * |                   |
  * | Stack for Regs    |    The trampoline assembly will pop these values
  * |                   |    into registers for native call
  * #-------------------#
- * | Native code ptr   |
- * #-------------------#
- * | Free scratch      |
- * #-------------------#
- * | Ptr to (1)        |    <--- RSP
- * #-------------------#
  */
     /*
      * Called to do a generic JNI down-call
@@ -1880,25 +1640,24 @@
     // 5k = 5120
     subq LITERAL(5120), %rsp
     // prepare for artQuickGenericJniTrampoline call
-    // (Thread*,  SP)
-    //    rdi    rsi      <= C calling convention
-    //  gs:...   rbp      <= where they are
-    movq %gs:THREAD_SELF_OFFSET, %rdi
-    movq %rbp, %rsi
+    // (Thread*, managed_sp, reserved_area)
+    //    rdi       rsi           rdx   <= C calling convention
+    //  gs:...      rbp           rsp   <= where they are
+    movq %gs:THREAD_SELF_OFFSET, %rdi  // Pass Thread::Current().
+    movq %rbp, %rsi                    // Pass managed frame SP.
+    movq %rsp, %rdx                    // Pass reserved area.
     call SYMBOL(artQuickGenericJniTrampoline)  // (Thread*, sp)
 
     // The C call will have registered the complete save-frame on success.
     // The result of the call is:
-    // %rax: pointer to native code, 0 on error.
-    // %rdx: pointer to the bottom of the used area of the alloca, can restore stack till there.
+    //     %rax: pointer to native code, 0 on error.
+    //     The bottom of the reserved area contains values for arg registers,
+    //     hidden arg register and SP for out args for the call.
 
-    // Check for error = 0.
+    // Check for error (class init check or locking for synchronized native method can throw).
     test %rax, %rax
     jz .Lexception_in_native
 
-    // Release part of the alloca.
-    movq %rdx, %rsp
-
     // pop from the register-passing alloca region
     // what's the right layout?
     popq %rdi
@@ -1916,7 +1675,11 @@
     movq 40(%rsp), %xmm5
     movq 48(%rsp), %xmm6
     movq 56(%rsp), %xmm7
-    addq LITERAL(64), %rsp          // floating-point done
+
+    // Load hidden arg (r11) for @CriticalNative.
+    movq 64(%rsp), %r11
+    // Load SP for out args, releasing unneeded reserved area.
+    movq 72(%rsp), %rsp
 
     // native call
     call *%rax
@@ -2201,6 +1964,16 @@
     ret
 END_FUNCTION art_quick_instance_of
 
+DEFINE_FUNCTION art_quick_string_builder_append
+    SETUP_SAVE_REFS_ONLY_FRAME                // save ref containing registers for GC
+    // Outgoing argument set up
+    leaq FRAME_SIZE_SAVE_REFS_ONLY + __SIZEOF_POINTER__(%rsp), %rsi  // pass args
+    movq %gs:THREAD_SELF_OFFSET, %rdx         // pass Thread::Current()
+    call artStringBuilderAppend               // (uint32_t, const unit32_t*, Thread*)
+    RESTORE_SAVE_REFS_ONLY_FRAME              // restore frame up to return address
+    RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER   // return or deliver exception
+END_FUNCTION art_quick_string_builder_append
+
 // Create a function `name` calling the ReadBarrier::Mark routine,
 // getting its argument and returning its result through register
 // `reg`, saving and restoring all caller-save registers.
@@ -2396,21 +2169,12 @@
     POP r8
     POP rcx
     POP rbp
-    cmpb LITERAL(68), (%r8)        // Test if result type char == 'D'.
-    je .Losr_return_double_quick
-    cmpb LITERAL(70), (%r8)        // Test if result type char == 'F'.
-    je .Losr_return_float_quick
-    movq %rax, (%rcx)              // Store the result assuming its a long, int or Object*
-    ret
-.Losr_return_double_quick:
-    movsd %xmm0, (%rcx)            // Store the double floating point result.
-    ret
-.Losr_return_float_quick:
-    movss %xmm0, (%rcx)            // Store the floating point result.
+    movq %rax, (%rcx)              // Store the result.
     ret
 .Losr_entry:
-    CFI_RESTORE_STATE             // Restore CFI state; however, since the call has pushed the
-    CFI_DEF_CFA_REGISTER(rbp)     // return address we need to switch the CFA register to RBP.
+    CFI_RESTORE_STATE_AND_DEF_CFA(rsp, 80)
+    // Since the call has pushed the return address we need to switch the CFA register to RBP.
+    CFI_DEF_CFA_REGISTER(rbp)
 
     subl LITERAL(8), %ecx         // Given stack size contains pushed frame pointer, substract it.
     subq %rcx, %rsp
@@ -2457,3 +2221,64 @@
     POP rbx                  // Restore RBX
     ret
 END_FUNCTION ExecuteSwitchImplAsm
+
+// On entry: edi is the class, r11 is the inline cache. r10 and rax are available.
+DEFINE_FUNCTION art_quick_update_inline_cache
+#if (INLINE_CACHE_SIZE != 5)
+#error "INLINE_CACHE_SIZE not as expected."
+#endif
+    // Don't update the cache if we are marking.
+    cmpl LITERAL(0), %gs:THREAD_IS_GC_MARKING_OFFSET
+    jnz .Ldone
+.Lentry1:
+    movl INLINE_CACHE_CLASSES_OFFSET(%r11), %eax
+    cmpl %edi, %eax
+    je .Ldone
+    cmpl LITERAL(0), %eax
+    jne .Lentry2
+    lock cmpxchg %edi, INLINE_CACHE_CLASSES_OFFSET(%r11)
+    jz .Ldone
+    jmp .Lentry1
+.Lentry2:
+    movl (INLINE_CACHE_CLASSES_OFFSET+4)(%r11), %eax
+    cmpl %edi, %eax
+    je .Ldone
+    cmpl LITERAL(0), %eax
+    jne .Lentry3
+    lock cmpxchg %edi, (INLINE_CACHE_CLASSES_OFFSET+4)(%r11)
+    jz .Ldone
+    jmp .Lentry2
+.Lentry3:
+    movl (INLINE_CACHE_CLASSES_OFFSET+8)(%r11), %eax
+    cmpl %edi, %eax
+    je .Ldone
+    cmpl LITERAL(0), %eax
+    jne .Lentry4
+    lock cmpxchg %edi, (INLINE_CACHE_CLASSES_OFFSET+8)(%r11)
+    jz .Ldone
+    jmp .Lentry3
+.Lentry4:
+    movl (INLINE_CACHE_CLASSES_OFFSET+12)(%r11), %eax
+    cmpl %edi, %eax
+    je .Ldone
+    cmpl LITERAL(0), %eax
+    jne .Lentry5
+    lock cmpxchg %edi, (INLINE_CACHE_CLASSES_OFFSET+12)(%r11)
+    jz .Ldone
+    jmp .Lentry4
+.Lentry5:
+    // Unconditionally store, the cache is megamorphic.
+    movl %edi, (INLINE_CACHE_CLASSES_OFFSET+16)(%r11)
+.Ldone:
+    ret
+END_FUNCTION art_quick_update_inline_cache
+
+// On entry, method is at the bottom of the stack.
+DEFINE_FUNCTION art_quick_compile_optimized
+    SETUP_SAVE_EVERYTHING_FRAME
+    movq FRAME_SIZE_SAVE_EVERYTHING(%rsp), %rdi // pass ArtMethod
+    movq %gs:THREAD_SELF_OFFSET, %rsi           // pass Thread::Current()
+    call SYMBOL(artCompileOptimized)            // (ArtMethod*, Thread*)
+    RESTORE_SAVE_EVERYTHING_FRAME               // restore frame up to return address
+    ret
+END_FUNCTION art_quick_compile_optimized
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h
index 4aeb055..5ab6d91 100644
--- a/runtime/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -40,6 +40,17 @@
   return GetDeclaringClass<kWithoutReadBarrier>()->IsProxyClass<kVerifyNone>();
 }
 
+// We are only ever allowed to set our own final fields. We do need to be careful since if a
+// structural redefinition occurs during <clinit> we can end up trying to set the non-obsolete
+// class's fields from the obsolete class. This is something we want to allow. This is tested by
+// run-test 2002-virtual-structural-initializing.
+inline bool ArtField::CanBeChangedBy(ArtMethod* method) {
+  ObjPtr<mirror::Class> declaring_class(GetDeclaringClass());
+  ObjPtr<mirror::Class> referring_class(method->GetDeclaringClass());
+  return !IsFinal() || (declaring_class == referring_class) ||
+         UNLIKELY(referring_class->IsObsoleteVersionOf(declaring_class));
+}
+
 template<ReadBarrierOption kReadBarrierOption>
 inline ObjPtr<mirror::Class> ArtField::GetDeclaringClass() {
   GcRootSource gc_root_source(this);
@@ -348,15 +359,6 @@
   return Runtime::Current()->GetClassLinker()->ResolveString(field_id.name_idx_, this);
 }
 
-template <typename Visitor>
-inline void ArtField::UpdateObjects(const Visitor& visitor) {
-  ObjPtr<mirror::Class> old_class = DeclaringClassRoot().Read<kWithoutReadBarrier>();
-  ObjPtr<mirror::Class> new_class = visitor(old_class.Ptr());
-  if (old_class != new_class) {
-    SetDeclaringClass(new_class);
-  }
-}
-
 // If kExactOffset is true then we only find the matching offset, not the field containing the
 // offset.
 template <bool kExactOffset>
diff --git a/runtime/art_field.cc b/runtime/art_field.cc
index 6e55f9f..639ee2b 100644
--- a/runtime/art_field.cc
+++ b/runtime/art_field.cc
@@ -33,13 +33,8 @@
 
 void ArtField::SetOffset(MemberOffset num_bytes) {
   DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous());
-  if (kIsDebugBuild && Runtime::Current()->IsAotCompiler() &&
-      Runtime::Current()->IsCompilingBootImage()) {
-    Primitive::Type type = GetTypeAsPrimitiveType();
-    if (type == Primitive::kPrimDouble || type == Primitive::kPrimLong) {
-      DCHECK_ALIGNED(num_bytes.Uint32Value(), 8);
-    }
-  }
+  DCHECK_ALIGNED_PARAM(num_bytes.Uint32Value(),
+                       Primitive::ComponentSize(GetTypeAsPrimitiveType()));
   // Not called within a transaction.
   offset_ = num_bytes.Uint32Value();
 }
diff --git a/runtime/art_field.h b/runtime/art_field.h
index 18132ed..86f67ad 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -17,7 +17,6 @@
 #ifndef ART_RUNTIME_ART_FIELD_H_
 #define ART_RUNTIME_ART_FIELD_H_
 
-#include "dex/dex_file_types.h"
 #include "dex/modifiers.h"
 #include "dex/primitive.h"
 #include "gc_root.h"
@@ -76,6 +75,10 @@
     return (GetAccessFlags() & kAccFinal) != 0;
   }
 
+  bool IsPrivate() REQUIRES_SHARED(Locks::mutator_lock_) {
+    return (GetAccessFlags() & kAccPrivate) != 0;
+  }
+
   uint32_t GetDexFieldIndex() {
     return field_dex_idx_;
   }
@@ -93,10 +96,14 @@
     return MemberOffset(offset_);
   }
 
-  static MemberOffset OffsetOffset() {
+  static constexpr MemberOffset OffsetOffset() {
     return MemberOffset(OFFSETOF_MEMBER(ArtField, offset_));
   }
 
+  static constexpr MemberOffset DeclaringClassOffset() {
+    return MemberOffset(OFFSETOF_MEMBER(ArtField, declaring_class_));
+  }
+
   MemberOffset GetOffsetDuringLinking() REQUIRES_SHARED(Locks::mutator_lock_);
 
   void SetOffset(MemberOffset num_bytes) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -226,10 +233,8 @@
   std::string PrettyField(bool with_type = true)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Update the declaring class with the passed in visitor. Does not use read barrier.
-  template <typename Visitor>
-  ALWAYS_INLINE void UpdateObjects(const Visitor& visitor)
-      REQUIRES_SHARED(Locks::mutator_lock_);
+  // Returns true if a set-* instruction in the given method is allowable.
+  ALWAYS_INLINE inline bool CanBeChangedBy(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
   bool IsProxyField() REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index d77c608..dfadc62 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -29,6 +29,7 @@
 #include "dex/dex_file_types.h"
 #include "dex/invoke_type.h"
 #include "dex/primitive.h"
+#include "dex/signature.h"
 #include "gc_root-inl.h"
 #include "imtable-inl.h"
 #include "intrinsics_enum.h"
@@ -38,7 +39,6 @@
 #include "mirror/object-inl.h"
 #include "mirror/object_array.h"
 #include "mirror/string.h"
-#include "oat.h"
 #include "obj_ptr-inl.h"
 #include "quick/quick_method_frame_info.h"
 #include "read_barrier-inl.h"
@@ -375,15 +375,6 @@
 }
 
 template <typename Visitor>
-inline void ArtMethod::UpdateObjectsForImageRelocation(const Visitor& visitor) {
-  ObjPtr<mirror::Class> old_class = GetDeclaringClassUnchecked<kWithoutReadBarrier>();
-  ObjPtr<mirror::Class> new_class = visitor(old_class.Ptr());
-  if (old_class != new_class) {
-    SetDeclaringClass(new_class);
-  }
-}
-
-template <typename Visitor>
 inline void ArtMethod::UpdateEntrypoints(const Visitor& visitor, PointerSize pointer_size) {
   if (IsNative()) {
     const void* old_native_code = GetEntryPointFromJniPtrSize(pointer_size);
@@ -413,7 +404,7 @@
   return CodeItemDebugInfoAccessor(*GetDexFile(), GetCodeItem(), GetDexMethodIndex());
 }
 
-inline void ArtMethod::SetCounter(int16_t hotness_count) {
+inline void ArtMethod::SetCounter(uint16_t hotness_count) {
   DCHECK(!IsAbstract()) << PrettyMethod();
   hotness_count_ = hotness_count;
 }
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index 0890da8..d0b6fde 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -16,12 +16,15 @@
 
 #include "art_method.h"
 
+#include <algorithm>
 #include <cstddef>
 
 #include "android-base/stringprintf.h"
 
 #include "arch/context.h"
 #include "art_method-inl.h"
+#include "base/enums.h"
+#include "base/stl_util.h"
 #include "class_linker-inl.h"
 #include "class_root.h"
 #include "debugger.h"
@@ -65,7 +68,7 @@
               "Wrong runtime-method dex method index");
 
 ArtMethod* ArtMethod::GetCanonicalMethod(PointerSize pointer_size) {
-  if (LIKELY(!IsDefault())) {
+  if (LIKELY(!IsCopied())) {
     return this;
   } else {
     ObjPtr<mirror::Class> declaring_class = GetDeclaringClass();
@@ -106,26 +109,32 @@
 }
 
 ObjPtr<mirror::DexCache> ArtMethod::GetObsoleteDexCache() {
+  PointerSize pointer_size = kRuntimePointerSize;
   DCHECK(!Runtime::Current()->IsAotCompiler()) << PrettyMethod();
   DCHECK(IsObsolete());
   ObjPtr<mirror::ClassExt> ext(GetDeclaringClass()->GetExtData());
-  CHECK(!ext.IsNull());
-  ObjPtr<mirror::PointerArray> obsolete_methods(ext->GetObsoleteMethods());
-  CHECK(!obsolete_methods.IsNull());
-  DCHECK(ext->GetObsoleteDexCaches() != nullptr);
-  int32_t len = obsolete_methods->GetLength();
-  DCHECK_EQ(len, ext->GetObsoleteDexCaches()->GetLength());
+  ObjPtr<mirror::PointerArray> obsolete_methods(ext.IsNull() ? nullptr : ext->GetObsoleteMethods());
+  int32_t len = (obsolete_methods.IsNull() ? 0 : obsolete_methods->GetLength());
+  DCHECK(len == 0 || len == ext->GetObsoleteDexCaches()->GetLength())
+      << "len=" << len << " ext->GetObsoleteDexCaches()=" << ext->GetObsoleteDexCaches();
   // Using kRuntimePointerSize (instead of using the image's pointer size) is fine since images
   // should never have obsolete methods in them so they should always be the same.
-  PointerSize pointer_size = kRuntimePointerSize;
-  DCHECK_EQ(kRuntimePointerSize, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
+  DCHECK_EQ(pointer_size, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
   for (int32_t i = 0; i < len; i++) {
     if (this == obsolete_methods->GetElementPtrSize<ArtMethod*>(i, pointer_size)) {
       return ext->GetObsoleteDexCaches()->Get(i);
     }
   }
-  LOG(FATAL) << "This method does not appear in the obsolete map of its class!";
-  UNREACHABLE();
+  CHECK(GetDeclaringClass()->IsObsoleteObject())
+      << "This non-structurally obsolete method does not appear in the obsolete map of its class: "
+      << GetDeclaringClass()->PrettyClass() << " Searched " << len << " caches.";
+  CHECK_EQ(this,
+           std::clamp(this,
+                      &(*GetDeclaringClass()->GetMethods(pointer_size).begin()),
+                      &(*GetDeclaringClass()->GetMethods(pointer_size).end())))
+      << "class is marked as structurally obsolete method but not found in normal obsolete-map "
+      << "despite not being the original method pointer for " << GetDeclaringClass()->PrettyClass();
+  return GetDeclaringClass()->GetDexCache();
 }
 
 uint16_t ArtMethod::FindObsoleteDexClassDefIndex() {
@@ -323,8 +332,7 @@
   // Invocation by the interpreter, explicitly forcing interpretation over JIT to prevent
   // cycling around the various JIT/Interpreter methods that handle method invocation.
   if (UNLIKELY(!runtime->IsStarted() ||
-               (self->IsForceInterpreter() && !IsNative() && !IsProxyMethod() && IsInvokable()) ||
-               Dbg::IsForcedInterpreterNeededForCalling(self, this))) {
+               (self->IsForceInterpreter() && !IsNative() && !IsProxyMethod() && IsInvokable()))) {
     if (IsStatic()) {
       art::interpreter::EnterInterpreterFromInvoke(
           self, this, nullptr, args, result, /*stay_in_interpreter=*/ true);
@@ -398,7 +406,8 @@
 void ArtMethod::UnregisterNative() {
   CHECK(IsNative()) << PrettyMethod();
   // restore stub to lookup native pointer via dlsym
-  SetEntryPointFromJni(GetJniDlsymLookupStub());
+  SetEntryPointFromJni(
+      IsCriticalNative() ? GetJniDlsymLookupCriticalStub() : GetJniDlsymLookupStub());
 }
 
 bool ArtMethod::IsOverridableByDefaultMethod() {
@@ -603,6 +612,11 @@
     }
   }
 
+  if (OatQuickMethodHeader::NterpMethodHeader != nullptr &&
+      OatQuickMethodHeader::NterpMethodHeader->Contains(pc)) {
+    return OatQuickMethodHeader::NterpMethodHeader;
+  }
+
   // Check whether the pc is in the JIT code cache.
   jit::Jit* jit = runtime->GetJit();
   if (jit != nullptr) {
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 83213d5..70d8d15 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -18,6 +18,7 @@
 #define ART_RUNTIME_ART_METHOD_H_
 
 #include <cstddef>
+#include <limits>
 
 #include <android-base/logging.h>
 #include <jni.h>
@@ -28,12 +29,9 @@
 #include "base/enums.h"
 #include "base/macros.h"
 #include "base/runtime_debug.h"
-#include "dex/code_item_accessors.h"
 #include "dex/dex_file_structs.h"
-#include "dex/dex_instruction_iterator.h"
 #include "dex/modifiers.h"
 #include "dex/primitive.h"
-#include "dex/signature.h"
 #include "gc_root.h"
 #include "obj_ptr.h"
 #include "offsets.h"
@@ -41,6 +39,9 @@
 
 namespace art {
 
+class CodeItemDataAccessor;
+class CodeItemDebugInfoAccessor;
+class CodeItemInstructionAccessor;
 class DexFile;
 template<class T> class Handle;
 class ImtConflictTable;
@@ -50,6 +51,7 @@
 class ProfilingInfo;
 class ScopedObjectAccessAlreadyRunnable;
 class ShadowFrame;
+class Signature;
 
 namespace mirror {
 class Array;
@@ -108,14 +110,14 @@
     return MemberOffset(OFFSETOF_MEMBER(ArtMethod, declaring_class_));
   }
 
-  uint32_t GetAccessFlags() {
+  uint32_t GetAccessFlags() const {
     return access_flags_.load(std::memory_order_relaxed);
   }
 
   // This version should only be called when it's certain there is no
   // concurrency so there is no need to guarantee atomicity. For example,
   // before the method is linked.
-  void SetAccessFlags(uint32_t new_access_flags) {
+  void SetAccessFlags(uint32_t new_access_flags) REQUIRES_SHARED(Locks::mutator_lock_) {
     access_flags_.store(new_access_flags, std::memory_order_relaxed);
   }
 
@@ -127,32 +129,32 @@
   InvokeType GetInvokeType() REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Returns true if the method is declared public.
-  bool IsPublic() {
+  bool IsPublic() const {
     return (GetAccessFlags() & kAccPublic) != 0;
   }
 
   // Returns true if the method is declared private.
-  bool IsPrivate() {
+  bool IsPrivate() const {
     return (GetAccessFlags() & kAccPrivate) != 0;
   }
 
   // Returns true if the method is declared static.
-  bool IsStatic() {
+  bool IsStatic() const {
     return (GetAccessFlags() & kAccStatic) != 0;
   }
 
   // Returns true if the method is a constructor according to access flags.
-  bool IsConstructor() {
+  bool IsConstructor() const {
     return (GetAccessFlags() & kAccConstructor) != 0;
   }
 
   // Returns true if the method is a class initializer according to access flags.
-  bool IsClassInitializer() {
+  bool IsClassInitializer() const {
     return IsConstructor() && IsStatic();
   }
 
   // Returns true if the method is static, private, or a constructor.
-  bool IsDirect() {
+  bool IsDirect() const {
     return IsDirect(GetAccessFlags());
   }
 
@@ -162,22 +164,22 @@
   }
 
   // Returns true if the method is declared synchronized.
-  bool IsSynchronized() {
+  bool IsSynchronized() const {
     constexpr uint32_t synchonized = kAccSynchronized | kAccDeclaredSynchronized;
     return (GetAccessFlags() & synchonized) != 0;
   }
 
-  bool IsFinal() {
+  bool IsFinal() const {
     return (GetAccessFlags() & kAccFinal) != 0;
   }
 
-  bool IsIntrinsic() {
+  bool IsIntrinsic() const {
     return (GetAccessFlags() & kAccIntrinsic) != 0;
   }
 
   ALWAYS_INLINE void SetIntrinsic(uint32_t intrinsic) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  uint32_t GetIntrinsic() {
+  uint32_t GetIntrinsic() const {
     static const int kAccFlagsShift = CTZ(kAccIntrinsicBits);
     static_assert(IsPowerOfTwo((kAccIntrinsicBits >> kAccFlagsShift) + 1),
                   "kAccIntrinsicBits are not continuous");
@@ -189,7 +191,7 @@
 
   void SetNotIntrinsic() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  bool IsCopied() {
+  bool IsCopied() const {
     static_assert((kAccCopied & (kAccIntrinsic | kAccIntrinsicBits)) == 0,
                   "kAccCopied conflicts with intrinsic modifier");
     const bool copied = (GetAccessFlags() & kAccCopied) != 0;
@@ -199,34 +201,62 @@
     return copied;
   }
 
-  bool IsMiranda() {
-    // The kAccMiranda flag value is used with a different meaning for native methods,
-    // so we need to check the kAccNative flag as well.
-    return (GetAccessFlags() & (kAccNative | kAccMiranda)) == kAccMiranda;
+  bool IsMiranda() const {
+    // The kAccMiranda flag value is used with a different meaning for native methods and methods
+    // marked kAccCompileDontBother, so we need to check these flags as well.
+    return (GetAccessFlags() & (kAccNative | kAccMiranda | kAccCompileDontBother)) == kAccMiranda;
   }
 
   // Returns true if invoking this method will not throw an AbstractMethodError or
   // IncompatibleClassChangeError.
-  bool IsInvokable() {
+  bool IsInvokable() const {
     return !IsAbstract() && !IsDefaultConflicting();
   }
 
-  bool IsCompilable() {
+  bool IsPreCompiled() const {
     if (IsIntrinsic()) {
       // kAccCompileDontBother overlaps with kAccIntrinsicBits.
+      return false;
+    }
+    uint32_t expected = (kAccPreCompiled | kAccCompileDontBother);
+    return (GetAccessFlags() & expected) == expected;
+  }
+
+  void SetPreCompiled() REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(IsInvokable());
+    DCHECK(IsCompilable());
+    AddAccessFlags(kAccPreCompiled | kAccCompileDontBother);
+  }
+
+  void ClearPreCompiled() REQUIRES_SHARED(Locks::mutator_lock_) {
+    ClearAccessFlags(kAccPreCompiled | kAccCompileDontBother);
+  }
+
+  bool IsCompilable() const {
+    if (IsIntrinsic()) {
+      // kAccCompileDontBother overlaps with kAccIntrinsicBits.
+      return true;
+    }
+    if (IsPreCompiled()) {
       return true;
     }
     return (GetAccessFlags() & kAccCompileDontBother) == 0;
   }
 
-  void SetDontCompile() {
+  void ClearDontCompile() REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(!IsMiranda());
+    ClearAccessFlags(kAccCompileDontBother);
+  }
+
+  void SetDontCompile() REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(!IsMiranda());
     AddAccessFlags(kAccCompileDontBother);
   }
 
   // A default conflict method is a special sentinel method that stands for a conflict between
   // multiple default methods. It cannot be invoked, throwing an IncompatibleClassChangeError if one
   // attempts to do so.
-  bool IsDefaultConflicting() {
+  bool IsDefaultConflicting() const {
     if (IsIntrinsic()) {
       return false;
     }
@@ -234,26 +264,26 @@
   }
 
   // This is set by the class linker.
-  bool IsDefault() {
+  bool IsDefault() const {
     static_assert((kAccDefault & (kAccIntrinsic | kAccIntrinsicBits)) == 0,
                   "kAccDefault conflicts with intrinsic modifier");
     return (GetAccessFlags() & kAccDefault) != 0;
   }
 
-  bool IsObsolete() {
+  bool IsObsolete() const {
     return (GetAccessFlags() & kAccObsoleteMethod) != 0;
   }
 
-  void SetIsObsolete() {
+  void SetIsObsolete() REQUIRES_SHARED(Locks::mutator_lock_) {
     AddAccessFlags(kAccObsoleteMethod);
   }
 
-  bool IsNative() {
+  bool IsNative() const {
     return (GetAccessFlags() & kAccNative) != 0;
   }
 
   // Checks to see if the method was annotated with @dalvik.annotation.optimization.FastNative.
-  bool IsFastNative() {
+  bool IsFastNative() const {
     // The presence of the annotation is checked by ClassLinker and recorded in access flags.
     // The kAccFastNative flag value is used with a different meaning for non-native methods,
     // so we need to check the kAccNative flag as well.
@@ -262,7 +292,7 @@
   }
 
   // Checks to see if the method was annotated with @dalvik.annotation.optimization.CriticalNative.
-  bool IsCriticalNative() {
+  bool IsCriticalNative() const {
     // The presence of the annotation is checked by ClassLinker and recorded in access flags.
     // The kAccCriticalNative flag value is used with a different meaning for non-native methods,
     // so we need to check the kAccNative flag as well.
@@ -270,15 +300,15 @@
     return (GetAccessFlags() & mask) == mask;
   }
 
-  bool IsAbstract() {
+  bool IsAbstract() const {
     return (GetAccessFlags() & kAccAbstract) != 0;
   }
 
-  bool IsSynthetic() {
+  bool IsSynthetic() const {
     return (GetAccessFlags() & kAccSynthetic) != 0;
   }
 
-  bool IsVarargs() {
+  bool IsVarargs() const {
     return (GetAccessFlags() & kAccVarargs) != 0;
   }
 
@@ -286,36 +316,41 @@
 
   bool IsPolymorphicSignature() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  bool UseFastInterpreterToInterpreterInvoke() {
+  bool UseFastInterpreterToInterpreterInvoke() const {
     // The bit is applicable only if the method is not intrinsic.
     constexpr uint32_t mask = kAccFastInterpreterToInterpreterInvoke | kAccIntrinsic;
     return (GetAccessFlags() & mask) == kAccFastInterpreterToInterpreterInvoke;
   }
 
-  void SetFastInterpreterToInterpreterInvokeFlag() {
+  void SetFastInterpreterToInterpreterInvokeFlag() REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(!IsIntrinsic());
     AddAccessFlags(kAccFastInterpreterToInterpreterInvoke);
   }
 
-  void ClearFastInterpreterToInterpreterInvokeFlag() {
+  void ClearFastInterpreterToInterpreterInvokeFlag() REQUIRES_SHARED(Locks::mutator_lock_) {
     if (!IsIntrinsic()) {
       ClearAccessFlags(kAccFastInterpreterToInterpreterInvoke);
     }
   }
 
-  bool SkipAccessChecks() {
+  bool SkipAccessChecks() const {
     // The kAccSkipAccessChecks flag value is used with a different meaning for native methods,
     // so we need to check the kAccNative flag as well.
     return (GetAccessFlags() & (kAccSkipAccessChecks | kAccNative)) == kAccSkipAccessChecks;
   }
 
-  void SetSkipAccessChecks() {
+  void SetSkipAccessChecks() REQUIRES_SHARED(Locks::mutator_lock_) {
     // SkipAccessChecks() is applicable only to non-native methods.
     DCHECK(!IsNative());
     AddAccessFlags(kAccSkipAccessChecks);
   }
+  void ClearSkipAccessChecks() REQUIRES_SHARED(Locks::mutator_lock_) {
+    // SkipAccessChecks() is applicable only to non-native methods.
+    DCHECK(!IsNative());
+    ClearAccessFlags(kAccSkipAccessChecks);
+  }
 
-  bool PreviouslyWarm() {
+  bool PreviouslyWarm() const {
     if (IsIntrinsic()) {
       // kAccPreviouslyWarm overlaps with kAccIntrinsicBits.
       return true;
@@ -323,7 +358,7 @@
     return (GetAccessFlags() & kAccPreviouslyWarm) != 0;
   }
 
-  void SetPreviouslyWarm() {
+  void SetPreviouslyWarm() REQUIRES_SHARED(Locks::mutator_lock_) {
     if (IsIntrinsic()) {
       // kAccPreviouslyWarm overlaps with kAccIntrinsicBits.
       return;
@@ -333,15 +368,20 @@
 
   // Should this method be run in the interpreter and count locks (e.g., failed structured-
   // locking verification)?
-  bool MustCountLocks() {
+  bool MustCountLocks() const {
     if (IsIntrinsic()) {
       return false;
     }
     return (GetAccessFlags() & kAccMustCountLocks) != 0;
   }
 
-  void SetMustCountLocks() {
+  void ClearMustCountLocks() REQUIRES_SHARED(Locks::mutator_lock_) {
+    ClearAccessFlags(kAccMustCountLocks);
+  }
+
+  void SetMustCountLocks() REQUIRES_SHARED(Locks::mutator_lock_) {
     AddAccessFlags(kAccMustCountLocks);
+    ClearAccessFlags(kAccSkipAccessChecks);
   }
 
   // Returns true if this method could be overridden by a default method.
@@ -375,11 +415,15 @@
     return MemberOffset(OFFSETOF_MEMBER(ArtMethod, method_index_));
   }
 
-  uint32_t GetCodeItemOffset() {
+  static constexpr MemberOffset ImtIndexOffset() {
+    return MemberOffset(OFFSETOF_MEMBER(ArtMethod, imt_index_));
+  }
+
+  uint32_t GetCodeItemOffset() const {
     return dex_code_item_offset_;
   }
 
-  void SetCodeItemOffset(uint32_t new_code_off) {
+  void SetCodeItemOffset(uint32_t new_code_off) REQUIRES_SHARED(Locks::mutator_lock_) {
     // Not called within a transaction.
     dex_code_item_offset_ = new_code_off;
   }
@@ -387,11 +431,11 @@
   // Number of 32bit registers that would be required to hold all the arguments
   static size_t NumArgRegisters(const char* shorty);
 
-  ALWAYS_INLINE uint32_t GetDexMethodIndex() {
+  ALWAYS_INLINE uint32_t GetDexMethodIndex() const {
     return dex_method_index_;
   }
 
-  void SetDexMethodIndex(uint32_t new_idx) {
+  void SetDexMethodIndex(uint32_t new_idx) REQUIRES_SHARED(Locks::mutator_lock_) {
     // Not called within a transaction.
     dex_method_index_ = new_idx;
   }
@@ -421,20 +465,23 @@
   void Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result, const char* shorty)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  const void* GetEntryPointFromQuickCompiledCode() {
+  const void* GetEntryPointFromQuickCompiledCode() const {
     return GetEntryPointFromQuickCompiledCodePtrSize(kRuntimePointerSize);
   }
-  ALWAYS_INLINE const void* GetEntryPointFromQuickCompiledCodePtrSize(PointerSize pointer_size) {
+  ALWAYS_INLINE
+  const void* GetEntryPointFromQuickCompiledCodePtrSize(PointerSize pointer_size) const {
     return GetNativePointer<const void*>(
         EntryPointFromQuickCompiledCodeOffset(pointer_size), pointer_size);
   }
 
-  void SetEntryPointFromQuickCompiledCode(const void* entry_point_from_quick_compiled_code) {
+  void SetEntryPointFromQuickCompiledCode(const void* entry_point_from_quick_compiled_code)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     SetEntryPointFromQuickCompiledCodePtrSize(entry_point_from_quick_compiled_code,
                                               kRuntimePointerSize);
   }
   ALWAYS_INLINE void SetEntryPointFromQuickCompiledCodePtrSize(
-      const void* entry_point_from_quick_compiled_code, PointerSize pointer_size) {
+      const void* entry_point_from_quick_compiled_code, PointerSize pointer_size)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     SetNativePointer(EntryPointFromQuickCompiledCodeOffset(pointer_size),
                      entry_point_from_quick_compiled_code,
                      pointer_size);
@@ -464,12 +511,13 @@
             * static_cast<size_t>(pointer_size));
   }
 
-  ImtConflictTable* GetImtConflictTable(PointerSize pointer_size) {
+  ImtConflictTable* GetImtConflictTable(PointerSize pointer_size) const {
     DCHECK(IsRuntimeMethod());
     return reinterpret_cast<ImtConflictTable*>(GetDataPtrSize(pointer_size));
   }
 
-  ALWAYS_INLINE void SetImtConflictTable(ImtConflictTable* table, PointerSize pointer_size) {
+  ALWAYS_INLINE void SetImtConflictTable(ImtConflictTable* table, PointerSize pointer_size)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(IsRuntimeMethod());
     SetDataPtrSize(table, pointer_size);
   }
@@ -481,11 +529,12 @@
     return reinterpret_cast<ProfilingInfo*>(GetDataPtrSize(pointer_size));
   }
 
-  ALWAYS_INLINE void SetProfilingInfo(ProfilingInfo* info) {
+  ALWAYS_INLINE void SetProfilingInfo(ProfilingInfo* info) REQUIRES_SHARED(Locks::mutator_lock_) {
     SetDataPtrSize(info, kRuntimePointerSize);
   }
 
-  ALWAYS_INLINE void SetProfilingInfoPtrSize(ProfilingInfo* info, PointerSize pointer_size) {
+  ALWAYS_INLINE void SetProfilingInfoPtrSize(ProfilingInfo* info, PointerSize pointer_size)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     SetDataPtrSize(info, pointer_size);
   }
 
@@ -497,7 +546,8 @@
   template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   ALWAYS_INLINE bool HasSingleImplementation() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ALWAYS_INLINE void SetHasSingleImplementation(bool single_impl) {
+  ALWAYS_INLINE void SetHasSingleImplementation(bool single_impl)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(!IsIntrinsic()) << "conflict with intrinsic bits";
     if (single_impl) {
       AddAccessFlags(kAccSingleImplementation);
@@ -506,6 +556,10 @@
     }
   }
 
+  ALWAYS_INLINE bool HasSingleImplementationFlag() const {
+    return (GetAccessFlags() & kAccSingleImplementation) != 0;
+  }
+
   // Takes a method and returns a 'canonical' one if the method is default (and therefore
   // potentially copied from some other class). For example, this ensures that the debugger does not
   // get confused as to which method we are in.
@@ -514,44 +568,48 @@
 
   ArtMethod* GetSingleImplementation(PointerSize pointer_size);
 
-  ALWAYS_INLINE void SetSingleImplementation(ArtMethod* method, PointerSize pointer_size) {
+  ALWAYS_INLINE void SetSingleImplementation(ArtMethod* method, PointerSize pointer_size)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(!IsNative());
     // Non-abstract method's single implementation is just itself.
     DCHECK(IsAbstract());
     SetDataPtrSize(method, pointer_size);
   }
 
-  void* GetEntryPointFromJni() {
+  void* GetEntryPointFromJni() const {
     DCHECK(IsNative());
     return GetEntryPointFromJniPtrSize(kRuntimePointerSize);
   }
 
-  ALWAYS_INLINE void* GetEntryPointFromJniPtrSize(PointerSize pointer_size) {
+  ALWAYS_INLINE void* GetEntryPointFromJniPtrSize(PointerSize pointer_size) const {
     return GetDataPtrSize(pointer_size);
   }
 
-  void SetEntryPointFromJni(const void* entrypoint) {
+  void SetEntryPointFromJni(const void* entrypoint)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(IsNative());
     SetEntryPointFromJniPtrSize(entrypoint, kRuntimePointerSize);
   }
 
-  ALWAYS_INLINE void SetEntryPointFromJniPtrSize(const void* entrypoint, PointerSize pointer_size) {
+  ALWAYS_INLINE void SetEntryPointFromJniPtrSize(const void* entrypoint, PointerSize pointer_size)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     SetDataPtrSize(entrypoint, pointer_size);
   }
 
-  ALWAYS_INLINE void* GetDataPtrSize(PointerSize pointer_size) {
+  ALWAYS_INLINE void* GetDataPtrSize(PointerSize pointer_size) const {
     DCHECK(IsImagePointerSize(pointer_size));
     return GetNativePointer<void*>(DataOffset(pointer_size), pointer_size);
   }
 
-  ALWAYS_INLINE void SetDataPtrSize(const void* data, PointerSize pointer_size) {
+  ALWAYS_INLINE void SetDataPtrSize(const void* data, PointerSize pointer_size)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(IsImagePointerSize(pointer_size));
     SetNativePointer(DataOffset(pointer_size), data, pointer_size);
   }
 
   // Is this a CalleSaveMethod or ResolutionMethod and therefore doesn't adhere to normal
   // conventions for a method of managed code. Returns false for Proxy methods.
-  ALWAYS_INLINE bool IsRuntimeMethod() {
+  ALWAYS_INLINE bool IsRuntimeMethod() const {
     return dex_method_index_ == kRuntimeMethodDexMethodIndex;
   }
 
@@ -653,10 +711,14 @@
   void CopyFrom(ArtMethod* src, PointerSize image_pointer_size)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ALWAYS_INLINE void SetCounter(int16_t hotness_count) REQUIRES_SHARED(Locks::mutator_lock_);
+  ALWAYS_INLINE void SetCounter(uint16_t hotness_count) REQUIRES_SHARED(Locks::mutator_lock_);
 
   ALWAYS_INLINE uint16_t GetCounter() REQUIRES_SHARED(Locks::mutator_lock_);
 
+  ALWAYS_INLINE static constexpr uint16_t MaxCounter() {
+    return std::numeric_limits<decltype(hotness_count_)>::max();
+  }
+
   ALWAYS_INLINE uint32_t GetImtIndex() REQUIRES_SHARED(Locks::mutator_lock_);
 
   void CalculateAndSetImtIndex() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -693,21 +755,16 @@
   std::string JniLongName()
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Update heap objects and non-entrypoint pointers by the passed in visitor for image relocation.
-  // Does not use read barrier.
-  template <typename Visitor>
-  ALWAYS_INLINE void UpdateObjectsForImageRelocation(const Visitor& visitor)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
   // Update entry points by passing them through the visitor.
   template <typename Visitor>
-  ALWAYS_INLINE void UpdateEntrypoints(const Visitor& visitor, PointerSize pointer_size);
+  ALWAYS_INLINE void UpdateEntrypoints(const Visitor& visitor, PointerSize pointer_size)
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Visit the individual members of an ArtMethod.  Used by imgdiag.
   // As imgdiag does not support mixing instruction sets or pointer sizes (e.g., using imgdiag32
   // to inspect 64-bit images, etc.), we can go beneath the accessors directly to the class members.
   template <typename VisitorFunc>
-  void VisitMembers(VisitorFunc& visitor) {
+  void VisitMembers(VisitorFunc& visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(IsImagePointerSize(kRuntimePointerSize));
     visitor(this, &declaring_class_, "declaring_class_");
     visitor(this, &access_flags_, "access_flags_");
@@ -818,7 +875,8 @@
   }
 
   template<typename T>
-  ALWAYS_INLINE void SetNativePointer(MemberOffset offset, T new_value, PointerSize pointer_size) {
+  ALWAYS_INLINE void SetNativePointer(MemberOffset offset, T new_value, PointerSize pointer_size)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     static_assert(std::is_pointer<T>::value, "T must be a pointer type");
     const auto addr = reinterpret_cast<uintptr_t>(this) + offset.Uint32Value();
     if (pointer_size == PointerSize::k32) {
@@ -839,27 +897,16 @@
   }
 
   // This setter guarantees atomicity.
-  void AddAccessFlags(uint32_t flag) {
-    DCHECK(!IsIntrinsic() ||
-           !OverlapsIntrinsicBits(flag) ||
-           IsValidIntrinsicUpdate(flag));
-    uint32_t old_access_flags;
-    uint32_t new_access_flags;
-    do {
-      old_access_flags = access_flags_.load(std::memory_order_relaxed);
-      new_access_flags = old_access_flags | flag;
-    } while (!access_flags_.compare_exchange_weak(old_access_flags, new_access_flags));
+  void AddAccessFlags(uint32_t flag) REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(!IsIntrinsic() || !OverlapsIntrinsicBits(flag) || IsValidIntrinsicUpdate(flag));
+    // None of the readers rely ordering.
+    access_flags_.fetch_or(flag, std::memory_order_relaxed);
   }
 
   // This setter guarantees atomicity.
-  void ClearAccessFlags(uint32_t flag) {
+  void ClearAccessFlags(uint32_t flag) REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK(!IsIntrinsic() || !OverlapsIntrinsicBits(flag) || IsValidIntrinsicUpdate(flag));
-    uint32_t old_access_flags;
-    uint32_t new_access_flags;
-    do {
-      old_access_flags = access_flags_.load(std::memory_order_relaxed);
-      new_access_flags = old_access_flags & ~flag;
-    } while (!access_flags_.compare_exchange_weak(old_access_flags, new_access_flags));
+    access_flags_.fetch_and(~flag, std::memory_order_relaxed);
   }
 
   // Used by GetName and GetNameView to share common code.
diff --git a/runtime/backtrace_helper.cc b/runtime/backtrace_helper.cc
index 21a0568..2d39270 100644
--- a/runtime/backtrace_helper.cc
+++ b/runtime/backtrace_helper.cc
@@ -18,12 +18,16 @@
 
 #if defined(__linux__)
 
-#include <backtrace/Backtrace.h>
-#include <backtrace/BacktraceMap.h>
-
-#include <unistd.h>
 #include <sys/types.h>
+#include <unistd.h>
 
+#include "unwindstack/Regs.h"
+#include "unwindstack/RegsGetLocal.h"
+#include "unwindstack/Memory.h"
+#include "unwindstack/Unwinder.h"
+
+#include "base/bit_utils.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
 #include "thread-inl.h"
 
 #else
@@ -35,46 +39,83 @@
 
 namespace art {
 
-// We only really support libbacktrace on linux which is unfortunate but since this is only for
+// We only really support libunwindstack on linux which is unfortunate but since this is only for
 // gcstress this isn't a huge deal.
 #if defined(__linux__)
 
-static const char* kBacktraceCollectorTlsKey = "BacktraceCollectorTlsKey";
+struct UnwindHelper : public TLSData {
+  static constexpr const char* kTlsKey = "UnwindHelper::kTlsKey";
 
-struct BacktraceMapHolder : public TLSData {
-  BacktraceMapHolder() : map_(BacktraceMap::Create(getpid())) {}
+  explicit UnwindHelper(size_t max_depth)
+      : memory_(unwindstack::Memory::CreateProcessMemory(getpid())),
+        jit_(memory_),
+        dex_(memory_),
+        unwinder_(max_depth, &maps_, memory_) {
+    CHECK(maps_.Parse());
+    unwinder_.SetJitDebug(&jit_, unwindstack::Regs::CurrentArch());
+    unwinder_.SetDexFiles(&dex_, unwindstack::Regs::CurrentArch());
+    unwinder_.SetResolveNames(false);
+    unwindstack::Elf::SetCachingEnabled(true);
+  }
 
-  std::unique_ptr<BacktraceMap> map_;
+  // Reparse process mmaps to detect newly loaded libraries.
+  bool Reparse() { return maps_.Reparse(); }
+
+  static UnwindHelper* Get(Thread* self, size_t max_depth) {
+    UnwindHelper* tls = reinterpret_cast<UnwindHelper*>(self->GetCustomTLS(kTlsKey));
+    if (tls == nullptr) {
+      tls = new UnwindHelper(max_depth);
+      self->SetCustomTLS(kTlsKey, tls);
+    }
+    return tls;
+  }
+
+  unwindstack::Unwinder* Unwinder() { return &unwinder_; }
+
+ private:
+  unwindstack::LocalUpdatableMaps maps_;
+  std::shared_ptr<unwindstack::Memory> memory_;
+  unwindstack::JitDebug jit_;
+  unwindstack::DexFiles dex_;
+  unwindstack::Unwinder unwinder_;
 };
 
-static BacktraceMap* GetMap(Thread* self) {
-  BacktraceMapHolder* map_holder =
-      reinterpret_cast<BacktraceMapHolder*>(self->GetCustomTLS(kBacktraceCollectorTlsKey));
-  if (map_holder == nullptr) {
-    map_holder = new BacktraceMapHolder;
-    // We don't care about the function names. Turning this off makes everything significantly
-    // faster.
-    map_holder->map_->SetResolveNames(false);
-    // Only created and queried on Thread::Current so no sync needed.
-    self->SetCustomTLS(kBacktraceCollectorTlsKey, map_holder);
+void BacktraceCollector::Collect() {
+  if (!CollectImpl()) {
+    // Reparse process mmaps to detect newly loaded libraries and retry.
+    UnwindHelper::Get(Thread::Current(), max_depth_)->Reparse();
+    if (!CollectImpl()) {
+      // Failed to unwind stack. Ignore for now.
+    }
   }
-
-  return map_holder->map_.get();
 }
 
-void BacktraceCollector::Collect() {
-  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS,
-                                                         BACKTRACE_CURRENT_THREAD,
-                                                         GetMap(Thread::Current())));
-  backtrace->SetSkipFrames(true);
-  if (!backtrace->Unwind(skip_count_, nullptr)) {
-    return;
+bool BacktraceCollector::CollectImpl() {
+  unwindstack::Unwinder* unwinder = UnwindHelper::Get(Thread::Current(), max_depth_)->Unwinder();
+  std::unique_ptr<unwindstack::Regs> regs(unwindstack::Regs::CreateFromLocal());
+  RegsGetLocal(regs.get());
+  unwinder->SetRegs(regs.get());
+  unwinder->Unwind();
+
+  num_frames_ = 0;
+  if (unwinder->NumFrames() > skip_count_) {
+    for (auto it = unwinder->frames().begin() + skip_count_; it != unwinder->frames().end(); ++it) {
+      CHECK_LT(num_frames_, max_depth_);
+      out_frames_[num_frames_++] = static_cast<uintptr_t>(it->pc);
+
+      // Expected early end: Instrumentation breaks unwinding (b/138296821).
+      size_t align = GetInstructionSetAlignment(kRuntimeISA);
+      if (RoundUp(it->pc, align) == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc())) {
+        return true;
+      }
+    }
   }
-  for (Backtrace::const_iterator it = backtrace->begin();
-       max_depth_ > num_frames_ && it != backtrace->end();
-       ++it) {
-    out_frames_[num_frames_++] = static_cast<uintptr_t>(it->pc);
+
+  if (unwinder->LastErrorCode() == unwindstack::ERROR_INVALID_MAP) {
+    return false;
   }
+
+  return true;
 }
 
 #else
diff --git a/runtime/backtrace_helper.h b/runtime/backtrace_helper.h
index 8eda3fa..2fee62c 100644
--- a/runtime/backtrace_helper.h
+++ b/runtime/backtrace_helper.h
@@ -36,6 +36,10 @@
   void Collect();
 
  private:
+  // Try to collect backtrace. Returns false on failure.
+  // It is used to retry backtrace on temporary failure.
+  bool CollectImpl();
+
   uintptr_t* const out_frames_ = nullptr;
   size_t num_frames_ = 0u;
   const size_t max_depth_ = 0u;
diff --git a/runtime/base/locks.cc b/runtime/base/locks.cc
index 4349be0..7404d0d 100644
--- a/runtime/base/locks.cc
+++ b/runtime/base/locks.cc
@@ -63,6 +63,7 @@
 Mutex* Locks::runtime_shutdown_lock_ = nullptr;
 Mutex* Locks::runtime_thread_pool_lock_ = nullptr;
 Mutex* Locks::cha_lock_ = nullptr;
+Mutex* Locks::jit_lock_ = nullptr;
 Mutex* Locks::subtype_check_lock_ = nullptr;
 Mutex* Locks::thread_list_lock_ = nullptr;
 ConditionVariable* Locks::thread_exit_cond_ = nullptr;
@@ -75,6 +76,7 @@
 Mutex* Locks::jni_weak_globals_lock_ = nullptr;
 ReaderWriterMutex* Locks::dex_lock_ = nullptr;
 Mutex* Locks::native_debug_interface_lock_ = nullptr;
+ReaderWriterMutex* Locks::jni_id_lock_ = nullptr;
 std::vector<BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_;
 Atomic<const BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_guard_;
 
@@ -147,6 +149,7 @@
     DCHECK(mutator_lock_ != nullptr);
     DCHECK(profiler_lock_ != nullptr);
     DCHECK(cha_lock_ != nullptr);
+    DCHECK(jit_lock_ != nullptr);
     DCHECK(subtype_check_lock_ != nullptr);
     DCHECK(thread_list_lock_ != nullptr);
     DCHECK(thread_suspend_count_lock_ != nullptr);
@@ -155,6 +158,7 @@
     DCHECK(user_code_suspension_lock_ != nullptr);
     DCHECK(dex_lock_ != nullptr);
     DCHECK(native_debug_interface_lock_ != nullptr);
+    DCHECK(jni_id_lock_ != nullptr);
     DCHECK(runtime_thread_pool_lock_ != nullptr);
   } else {
     // Create global locks in level order from highest lock level to lowest.
@@ -303,6 +307,10 @@
     DCHECK(custom_tls_lock_ == nullptr);
     custom_tls_lock_ = new Mutex("Thread::custom_tls_ lock", current_lock_level);
 
+    UPDATE_CURRENT_LOCK_LEVEL(kJitCodeCacheLock);
+    DCHECK(jit_lock_ == nullptr);
+    jit_lock_ = new Mutex("Jit code cache", current_lock_level);
+
     UPDATE_CURRENT_LOCK_LEVEL(kCHALock);
     DCHECK(cha_lock_ == nullptr);
     cha_lock_ = new Mutex("CHA lock", current_lock_level);
@@ -311,6 +319,10 @@
     DCHECK(native_debug_interface_lock_ == nullptr);
     native_debug_interface_lock_ = new Mutex("Native debug interface lock", current_lock_level);
 
+    UPDATE_CURRENT_LOCK_LEVEL(kJniIdLock);
+    DCHECK(jni_id_lock_ == nullptr);
+    jni_id_lock_ = new ReaderWriterMutex("JNI id map lock", current_lock_level);
+
     UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
     DCHECK(abort_lock_ == nullptr);
     abort_lock_ = new Mutex("abort lock", current_lock_level, true);
diff --git a/runtime/base/locks.h b/runtime/base/locks.h
index b15fd32..c1667f3 100644
--- a/runtime/base/locks.h
+++ b/runtime/base/locks.h
@@ -45,6 +45,7 @@
   kUnexpectedSignalLock,
   kThreadSuspendCountLock,
   kAbortLock,
+  kJniIdLock,
   kNativeDebugInterfaceLock,
   kSignalHandlingLock,
   // A generic lock level for mutexs that should not allow any additional mutexes to be gained after
@@ -106,6 +107,9 @@
   kDexToDexCompilerLock,
   kSubtypeCheckLock,
   kBreakpointLock,
+  // This is a generic lock level for a lock meant to be gained after having a
+  // monitor lock.
+  kPostMonitorLock,
   kMonitorLock,
   kMonitorListLock,
   kJniLoadLibraryLock,
@@ -128,6 +132,9 @@
 
   kMutatorLock,
   kInstrumentEntrypointsLock,
+  // This is a generic lock level for a top-level lock meant to be gained after having the
+  // UserCodeSuspensionLock.
+  kPostUserCodeSuspensionTopLevelLock,
   kUserCodeSuspensionLock,
   kZygoteCreationLock,
 
@@ -328,8 +335,11 @@
   // GetThreadLocalStorage.
   static Mutex* custom_tls_lock_ ACQUIRED_AFTER(jni_function_table_lock_);
 
+  // Guard access to any JIT data structure.
+  static Mutex* jit_lock_ ACQUIRED_AFTER(custom_tls_lock_);
+
   // Guards Class Hierarchy Analysis (CHA).
-  static Mutex* cha_lock_ ACQUIRED_AFTER(custom_tls_lock_);
+  static Mutex* cha_lock_ ACQUIRED_AFTER(jit_lock_);
 
   // When declaring any Mutex add BOTTOM_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
   // doesn't try to acquire a higher level Mutex. NB Due to the way the annotalysis works this
@@ -350,8 +360,12 @@
   // Guards the magic global variables used by native tools (e.g. libunwind).
   static Mutex* native_debug_interface_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
 
+  // Guards the data structures responsible for keeping track of the JNI
+  // jmethodID/jfieldID <-> ArtMethod/ArtField mapping when using index-ids.
+  static ReaderWriterMutex* jni_id_lock_ ACQUIRED_AFTER(native_debug_interface_lock_);
+
   // Have an exclusive logging thread.
-  static Mutex* logging_lock_ ACQUIRED_AFTER(native_debug_interface_lock_);
+  static Mutex* logging_lock_ ACQUIRED_AFTER(jni_id_lock_);
 
   // List of mutexes that we expect a thread may hold when accessing weak refs. This is used to
   // avoid a deadlock in the empty checkpoint while weak ref access is disabled (b/34964016). If we
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index b29ae55..821c75d 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -82,7 +82,9 @@
           // Avoid recursive death.
           level == kAbortLock ||
           // Locks at the absolute top of the stack can be locked at any time.
-          level == kTopLockLevel) << level;
+          level == kTopLockLevel ||
+          // The unexpected signal handler may be catching signals from any thread.
+          level == kUnexpectedSignalLock) << level;
   }
 }
 
@@ -204,13 +206,10 @@
     int32_t cur_state = state_.load(std::memory_order_relaxed);
     if (LIKELY(cur_state > 0)) {
       // Reduce state by 1 and impose lock release load/store ordering.
-      // Note, the relaxed loads below musn't reorder before the CompareAndSet.
-      // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
-      // a status bit into the state on contention.
+      // Note, the num_contenders_ load below musn't reorder before the CompareAndSet.
       done = state_.CompareAndSetWeakSequentiallyConsistent(cur_state, cur_state - 1);
       if (done && (cur_state - 1) == 0) {  // Weak CAS may fail spuriously.
-        if (num_pending_writers_.load(std::memory_order_seq_cst) > 0 ||
-            num_pending_readers_.load(std::memory_order_seq_cst) > 0) {
+        if (num_contenders_.load(std::memory_order_seq_cst) > 0) {
           // Wake any exclusive waiters as there are now no readers.
           futex(state_.Address(), FUTEX_WAKE_PRIVATE, kWakeAll, nullptr, nullptr, 0);
         }
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index f4394f7..0b8c781 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -19,6 +19,8 @@
 #include <errno.h>
 #include <sys/time.h>
 
+#include <sstream>
+
 #include "android-base/stringprintf.h"
 
 #include "base/atomic.h"
@@ -59,6 +61,28 @@
 }
 #endif
 
+#if ART_USE_FUTEXES
+// If we wake up from a futex wake, and the runtime disappeared while we were asleep,
+// it's important to stop in our tracks before we touch deallocated memory.
+static inline void SleepIfRuntimeDeleted(Thread* self) {
+  if (self != nullptr) {
+    JNIEnvExt* const env = self->GetJniEnv();
+    if (UNLIKELY(env != nullptr && env->IsRuntimeDeleted())) {
+      DCHECK(self->IsDaemon());
+      // If the runtime has been deleted, then we cannot proceed. Just sleep forever. This may
+      // occur for user daemon threads that get a spurious wakeup. This occurs for test 132 with
+      // --host and --gdb.
+      // After we wake up, the runtime may have been shutdown, which means that this condition may
+      // have been deleted. It is not safe to retry the wait.
+      SleepForever();
+    }
+  }
+}
+#else
+// We should be doing this for pthreads to, but it seems to be impossible for something
+// like a condition variable wait. Thus we don't bother trying.
+#endif
+
 // Wait for an amount of time that roughly increases in the argument i.
 // Spin for small arguments and yield/sleep for longer ones.
 static void BackOff(uint32_t i) {
@@ -80,6 +104,34 @@
   }
 }
 
+// Wait until pred(testLoc->load(std::memory_order_relaxed)) holds, or until a
+// short time interval, on the order of kernel context-switch time, passes.
+// Return true if the predicate test succeeded, false if we timed out.
+template<typename Pred>
+static inline bool WaitBrieflyFor(AtomicInteger* testLoc, Thread* self, Pred pred) {
+  // TODO: Tune these parameters correctly. BackOff(3) should take on the order of 100 cycles. So
+  // this should result in retrying <= 10 times, usually waiting around 100 cycles each. The
+  // maximum delay should be significantly less than the expected futex() context switch time, so
+  // there should be little danger of this worsening things appreciably. If the lock was only
+  // held briefly by a running thread, this should help immensely.
+  static constexpr uint32_t kMaxBackOff = 3;  // Should probably be <= kSpinMax above.
+  static constexpr uint32_t kMaxIters = 50;
+  JNIEnvExt* const env = self == nullptr ? nullptr : self->GetJniEnv();
+  for (uint32_t i = 1; i <= kMaxIters; ++i) {
+    BackOff(std::min(i, kMaxBackOff));
+    if (pred(testLoc->load(std::memory_order_relaxed))) {
+      return true;
+    }
+    if (UNLIKELY(env != nullptr && env->IsRuntimeDeleted())) {
+      // This returns true once we've started shutting down. We then try to reach a quiescent
+      // state as soon as possible to avoid touching data that may be deallocated by the shutdown
+      // process. It currently relies on a timeout.
+      return false;
+    }
+  }
+  return false;
+}
+
 class ScopedAllMutexesLock final {
  public:
   explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
@@ -187,6 +239,7 @@
     CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock)
         << "Waiting on unacquired mutex: " << name_;
     bool bad_mutexes_held = false;
+    std::string error_msg;
     for (int i = kLockLevelCount - 1; i >= 0; --i) {
       if (i != level_) {
         BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
@@ -205,22 +258,28 @@
             return self->GetUserCodeSuspendCount() != 0;
           };
           if (is_suspending_for_user_code()) {
-            LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
-                      << "(level " << LockLevel(i) << ") while performing wait on "
-                      << "\"" << name_ << "\" (level " << level_ << ") "
-                      << "with SuspendReason::kForUserCode pending suspensions";
+            std::ostringstream oss;
+            oss << "Holding \"" << held_mutex->name_ << "\" "
+                << "(level " << LockLevel(i) << ") while performing wait on "
+                << "\"" << name_ << "\" (level " << level_ << ") "
+                << "with SuspendReason::kForUserCode pending suspensions";
+            error_msg = oss.str();
+            LOG(ERROR) << error_msg;
             bad_mutexes_held = true;
           }
         } else if (held_mutex != nullptr) {
-          LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" "
-                     << "(level " << LockLevel(i) << ") while performing wait on "
-                     << "\"" << name_ << "\" (level " << level_ << ")";
+          std::ostringstream oss;
+          oss << "Holding \"" << held_mutex->name_ << "\" "
+              << "(level " << LockLevel(i) << ") while performing wait on "
+              << "\"" << name_ << "\" (level " << level_ << ")";
+          error_msg = oss.str();
+          LOG(ERROR) << error_msg;
           bad_mutexes_held = true;
         }
       }
     }
     if (gAborting == 0) {  // Avoid recursive aborts.
-      CHECK(!bad_mutexes_held) << this;
+      CHECK(!bad_mutexes_held) << error_msg;
     }
   }
 }
@@ -372,24 +431,36 @@
       } else {
         // Failed to acquire, hang up.
         ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
-        // Increment contender count. We can't create enough threads for this to overflow.
-        increment_contenders();
-        // Make cur_state again reflect the expected value of state_and_contenders.
-        cur_state += kContenderIncrement;
-        if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
-          self->CheckEmptyCheckpointFromMutex();
-        }
-        if (futex(state_and_contenders_.Address(), FUTEX_WAIT_PRIVATE, cur_state,
-                  nullptr, nullptr, 0) != 0) {
-          // We only went to sleep after incrementing and contenders and checking that the lock
-          // is still held by someone else.
-          // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
-          // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
-          if ((errno != EAGAIN) && (errno != EINTR)) {
-            PLOG(FATAL) << "futex wait failed for " << name_;
+        // Empirically, it appears important to spin again each time through the loop; if we
+        // bother to go to sleep and wake up, we should be fairly persistent in trying for the
+        // lock.
+        if (!WaitBrieflyFor(&state_and_contenders_, self,
+                            [](int32_t v) { return (v & kHeldMask) == 0; })) {
+          // Increment contender count. We can't create enough threads for this to overflow.
+          increment_contenders();
+          // Make cur_state again reflect the expected value of state_and_contenders.
+          cur_state += kContenderIncrement;
+          if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
+            self->CheckEmptyCheckpointFromMutex();
           }
+          do {
+            if (futex(state_and_contenders_.Address(), FUTEX_WAIT_PRIVATE, cur_state,
+                      nullptr, nullptr, 0) != 0) {
+              // We only went to sleep after incrementing and contenders and checking that the
+              // lock is still held by someone else.  EAGAIN and EINTR both indicate a spurious
+              // failure, try again from the beginning.  We don't use TEMP_FAILURE_RETRY so we can
+              // intentionally retry to acquire the lock.
+              if ((errno != EAGAIN) && (errno != EINTR)) {
+                PLOG(FATAL) << "futex wait failed for " << name_;
+              }
+            }
+            SleepIfRuntimeDeleted(self);
+            // Retry until not held. In heavy contention situations we otherwise get redundant
+            // futex wakeups as a result of repeatedly decrementing and incrementing contenders.
+            cur_state = state_and_contenders_.load(std::memory_order_relaxed);
+          } while ((cur_state & kHeldMask) != 0);
+          decrement_contenders();
         }
-        decrement_contenders();
       }
     } while (!done);
     // Confirm that lock is now held.
@@ -397,7 +468,8 @@
 #else
     CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_));
 #endif
-    DCHECK_EQ(GetExclusiveOwnerTid(), 0);
+    DCHECK_EQ(GetExclusiveOwnerTid(), 0) << " my tid = " << SafeGetTid(self)
+                                         << " recursive_ = " << recursive_;
     exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed);
     RegisterAsLocked(self);
   }
@@ -450,6 +522,48 @@
   return true;
 }
 
+bool Mutex::ExclusiveTryLockWithSpinning(Thread* self) {
+  // Spin a small number of times, since this affects our ability to respond to suspension
+  // requests. We spin repeatedly only if the mutex repeatedly becomes available and unavailable
+  // in rapid succession, and then we will typically not spin for the maximal period.
+  const int kMaxSpins = 5;
+  for (int i = 0; i < kMaxSpins; ++i) {
+    if (ExclusiveTryLock(self)) {
+      return true;
+    }
+#if ART_USE_FUTEXES
+    if (!WaitBrieflyFor(&state_and_contenders_, self,
+            [](int32_t v) { return (v & kHeldMask) == 0; })) {
+      return false;
+    }
+#endif
+  }
+  return ExclusiveTryLock(self);
+}
+
+#if ART_USE_FUTEXES
+void Mutex::ExclusiveLockUncontendedFor(Thread* new_owner) {
+  DCHECK_EQ(level_, kMonitorLock);
+  DCHECK(!recursive_);
+  state_and_contenders_.store(kHeldMask, std::memory_order_relaxed);
+  recursion_count_ = 1;
+  exclusive_owner_.store(SafeGetTid(new_owner), std::memory_order_relaxed);
+  // Don't call RegisterAsLocked(). It wouldn't register anything anyway.  And
+  // this happens as we're inflating a monitor, which doesn't logically affect
+  // held "locks"; it effectively just converts a thin lock to a mutex.  By doing
+  // this while the lock is already held, we're delaying the acquisition of a
+  // logically held mutex, which can introduce bogus lock order violations.
+}
+
+void Mutex::ExclusiveUnlockUncontended() {
+  DCHECK_EQ(level_, kMonitorLock);
+  state_and_contenders_.store(0, std::memory_order_relaxed);
+  recursion_count_ = 0;
+  exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
+  // Skip RegisterAsUnlocked(), which wouldn't do anything anyway.
+}
+#endif  // ART_USE_FUTEXES
+
 void Mutex::ExclusiveUnlock(Thread* self) {
   if (kIsDebugBuild && self != nullptr && self != Thread::Current()) {
     std::string name1 = "<null>";
@@ -542,21 +656,19 @@
 ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level)
     : BaseMutex(name, level)
 #if ART_USE_FUTEXES
-    , state_(0), num_pending_readers_(0), num_pending_writers_(0)
+    , state_(0), exclusive_owner_(0), num_contenders_(0)
 #endif
 {
 #if !ART_USE_FUTEXES
   CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, nullptr));
 #endif
-  exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
 }
 
 ReaderWriterMutex::~ReaderWriterMutex() {
 #if ART_USE_FUTEXES
   CHECK_EQ(state_.load(std::memory_order_relaxed), 0);
   CHECK_EQ(GetExclusiveOwnerTid(), 0);
-  CHECK_EQ(num_pending_readers_.load(std::memory_order_relaxed), 0);
-  CHECK_EQ(num_pending_writers_.load(std::memory_order_relaxed), 0);
+  CHECK_EQ(num_contenders_.load(std::memory_order_relaxed), 0);
 #else
   // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
   // may still be using locks.
@@ -582,18 +694,21 @@
     } else {
       // Failed to acquire, hang up.
       ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
-      ++num_pending_writers_;
-      if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
-        self->CheckEmptyCheckpointFromMutex();
-      }
-      if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) {
-        // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
-        // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
-        if ((errno != EAGAIN) && (errno != EINTR)) {
-          PLOG(FATAL) << "futex wait failed for " << name_;
+      if (!WaitBrieflyFor(&state_, self, [](int32_t v) { return v == 0; })) {
+        num_contenders_.fetch_add(1);
+        if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
+          self->CheckEmptyCheckpointFromMutex();
         }
+        if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) {
+          // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
+          // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock.
+          if ((errno != EAGAIN) && (errno != EINTR)) {
+            PLOG(FATAL) << "futex wait failed for " << name_;
+          }
+        }
+        SleepIfRuntimeDeleted(self);
+        num_contenders_.fetch_sub(1);
       }
-      --num_pending_writers_;
     }
   } while (!done);
   DCHECK_EQ(state_.load(std::memory_order_relaxed), -1);
@@ -619,14 +734,11 @@
       // We're no longer the owner.
       exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed);
       // Change state from -1 to 0 and impose load/store ordering appropriate for lock release.
-      // Note, the relaxed loads below musn't reorder before the CompareAndSet.
-      // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
-      // a status bit into the state on contention.
+      // Note, the num_contenders_ load below musn't reorder before the CompareAndSet.
       done = state_.CompareAndSetWeakSequentiallyConsistent(-1 /* cur_state*/, 0 /* new state */);
       if (LIKELY(done)) {  // Weak CAS may fail spuriously.
         // Wake any waiters.
-        if (UNLIKELY(num_pending_readers_.load(std::memory_order_seq_cst) > 0 ||
-                     num_pending_writers_.load(std::memory_order_seq_cst) > 0)) {
+        if (UNLIKELY(num_contenders_.load(std::memory_order_seq_cst) > 0)) {
           futex(state_.Address(), FUTEX_WAKE_PRIVATE, kWakeAll, nullptr, nullptr, 0);
         }
       }
@@ -661,22 +773,25 @@
         return false;  // Timed out.
       }
       ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
-      ++num_pending_writers_;
-      if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
-        self->CheckEmptyCheckpointFromMutex();
-      }
-      if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, &rel_ts, nullptr, 0) != 0) {
-        if (errno == ETIMEDOUT) {
-          --num_pending_writers_;
-          return false;  // Timed out.
-        } else if ((errno != EAGAIN) && (errno != EINTR)) {
-          // EAGAIN and EINTR both indicate a spurious failure,
-          // recompute the relative time out from now and try again.
-          // We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts;
-          PLOG(FATAL) << "timed futex wait failed for " << name_;
+      if (!WaitBrieflyFor(&state_, self, [](int32_t v) { return v == 0; })) {
+        num_contenders_.fetch_add(1);
+        if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
+          self->CheckEmptyCheckpointFromMutex();
         }
+        if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, &rel_ts, nullptr, 0) != 0) {
+          if (errno == ETIMEDOUT) {
+            num_contenders_.fetch_sub(1);
+            return false;  // Timed out.
+          } else if ((errno != EAGAIN) && (errno != EINTR)) {
+            // EAGAIN and EINTR both indicate a spurious failure,
+            // recompute the relative time out from now and try again.
+            // We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts;
+            PLOG(FATAL) << "timed futex wait failed for " << name_;
+          }
+        }
+        SleepIfRuntimeDeleted(self);
+        num_contenders_.fetch_sub(1);
       }
-      --num_pending_writers_;
     }
   } while (!done);
 #else
@@ -702,16 +817,19 @@
 void ReaderWriterMutex::HandleSharedLockContention(Thread* self, int32_t cur_state) {
   // Owner holds it exclusively, hang up.
   ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid());
-  ++num_pending_readers_;
-  if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
-    self->CheckEmptyCheckpointFromMutex();
-  }
-  if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) {
-    if (errno != EAGAIN && errno != EINTR) {
-      PLOG(FATAL) << "futex wait failed for " << name_;
+  if (!WaitBrieflyFor(&state_, self, [](int32_t v) { return v >= 0; })) {
+    num_contenders_.fetch_add(1);
+    if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) {
+      self->CheckEmptyCheckpointFromMutex();
     }
+    if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) {
+      if (errno != EAGAIN && errno != EINTR) {
+        PLOG(FATAL) << "futex wait failed for " << name_;
+      }
+    }
+    SleepIfRuntimeDeleted(self);
+    num_contenders_.fetch_sub(1);
   }
-  --num_pending_readers_;
 }
 #endif
 
@@ -761,8 +879,7 @@
       << " owner=" << GetExclusiveOwnerTid()
 #if ART_USE_FUTEXES
       << " state=" << state_.load(std::memory_order_seq_cst)
-      << " num_pending_writers=" << num_pending_writers_.load(std::memory_order_seq_cst)
-      << " num_pending_readers=" << num_pending_readers_.load(std::memory_order_seq_cst)
+      << " num_contenders=" << num_contenders_.load(std::memory_order_seq_cst)
 #endif
       << " ";
   DumpContention(os);
@@ -782,8 +899,7 @@
 #if ART_USE_FUTEXES
   // Wake up all the waiters so they will respond to the emtpy checkpoint.
   DCHECK(should_respond_to_empty_checkpoint_request_);
-  if (UNLIKELY(num_pending_readers_.load(std::memory_order_relaxed) > 0 ||
-               num_pending_writers_.load(std::memory_order_relaxed) > 0)) {
+  if (UNLIKELY(num_contenders_.load(std::memory_order_relaxed) > 0)) {
     futex(state_.Address(), FUTEX_WAKE_PRIVATE, kWakeAll, nullptr, nullptr, 0);
   }
 #else
@@ -893,18 +1009,7 @@
       PLOG(FATAL) << "futex wait failed for " << name_;
     }
   }
-  if (self != nullptr) {
-    JNIEnvExt* const env = self->GetJniEnv();
-    if (UNLIKELY(env != nullptr && env->IsRuntimeDeleted())) {
-      CHECK(self->IsDaemon());
-      // If the runtime has been deleted, then we cannot proceed. Just sleep forever. This may
-      // occur for user daemon threads that get a spurious wakeup. This occurs for test 132 with
-      // --host and --gdb.
-      // After we wake up, the runtime may have been shutdown, which means that this condition may
-      // have been deleted. It is not safe to retry the wait.
-      SleepForever();
-    }
-  }
+  SleepIfRuntimeDeleted(self);
   guard_.ExclusiveLock(self);
   CHECK_GT(num_waiters_, 0);
   num_waiters_--;
@@ -946,6 +1051,7 @@
       PLOG(FATAL) << "timed futex wait failed for " << name_;
     }
   }
+  SleepIfRuntimeDeleted(self);
   guard_.ExclusiveLock(self);
   CHECK_GT(num_waiters_, 0);
   num_waiters_--;
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index d297fc4..33878e6 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -57,7 +57,7 @@
 constexpr bool kDebugLocking = kIsDebugBuild;
 
 // Record Log contention information, dumpable via SIGQUIT.
-#ifdef ART_USE_FUTEXES
+#if ART_USE_FUTEXES
 // To enable lock contention logging, set this to true.
 constexpr bool kLogLockContentions = false;
 // FUTEX_WAKE first argument:
@@ -102,7 +102,11 @@
 
   BaseMutex(const char* name, LockLevel level);
   virtual ~BaseMutex();
+
+  // Add this mutex to those owned by self, and perform appropriate checking.
+  // For this call only, self may also be another suspended thread.
   void RegisterAsLocked(Thread* self);
+
   void RegisterAsUnlocked(Thread* self);
   void CheckSafeToWait(Thread* self);
 
@@ -156,8 +160,14 @@
 // -------------------------------------------
 // Free      | Exclusive     | error
 // Exclusive | Block*        | Free
-// * Mutex is not reentrant and so an attempt to ExclusiveLock on the same thread will result in
-//   an error. Being non-reentrant simplifies Waiting on ConditionVariables.
+// * Mutex is not reentrant unless recursive is true. An attempt to ExclusiveLock on a
+// recursive=false Mutex on a thread already owning the Mutex results in an error.
+//
+// TODO(b/140590186): Remove support for recursive == true.
+//
+// Some mutexes, including those associated with Java monitors may be accessed (in particular
+// acquired) by a thread in suspended state. Suspending all threads does NOT prevent mutex state
+// from changing.
 std::ostream& operator<<(std::ostream& os, const Mutex& mu);
 class LOCKABLE Mutex : public BaseMutex {
  public:
@@ -173,6 +183,8 @@
   // Returns true if acquires exclusive access, false otherwise.
   bool ExclusiveTryLock(Thread* self) TRY_ACQUIRE(true);
   bool TryLock(Thread* self) TRY_ACQUIRE(true) { return ExclusiveTryLock(self); }
+  // Equivalent to ExclusiveTryLock, but retry for a short period before giving up.
+  bool ExclusiveTryLockWithSpinning(Thread* self) TRY_ACQUIRE(true);
 
   // Release exclusive access.
   void ExclusiveUnlock(Thread* self) RELEASE();
@@ -200,7 +212,9 @@
   // whether we hold the lock; any other information may be invalidated before we return.
   pid_t GetExclusiveOwnerTid() const;
 
-  // Returns how many times this Mutex has been locked, it is better to use AssertHeld/NotHeld.
+  // Returns how many times this Mutex has been locked, it is typically better to use
+  // AssertHeld/NotHeld. For a simply held mutex this method returns 1. Should only be called
+  // while holding the mutex or threads are suspended.
   unsigned int GetDepth() const {
     return recursion_count_;
   }
@@ -212,6 +226,18 @@
 
   void WakeupToRespondToEmptyCheckpoint() override;
 
+#if ART_USE_FUTEXES
+  // Acquire the mutex, possibly on behalf of another thread. Acquisition must be
+  // uncontended. New_owner must be current thread or suspended.
+  // Mutex must be at level kMonitorLock.
+  // Not implementable for the pthreads version, so we must avoid calling it there.
+  void ExclusiveLockUncontendedFor(Thread* new_owner);
+
+  // Undo the effect of the previous calling, setting the mutex back to unheld.
+  // Still assumes no concurrent access.
+  void ExclusiveUnlockUncontended();
+#endif  // ART_USE_FUTEXES
+
  private:
 #if ART_USE_FUTEXES
   // Low order bit: 0 is unheld, 1 is held.
@@ -336,8 +362,9 @@
 
   // Assert the current thread doesn't hold this ReaderWriterMutex either in shared or exclusive
   // mode.
-  ALWAYS_INLINE void AssertNotHeld(const Thread* self) ASSERT_SHARED_CAPABILITY(!this) {
+  ALWAYS_INLINE void AssertNotHeld(const Thread* self) ASSERT_CAPABILITY(!this) {
     if (kDebugLocking && (gAborting == 0)) {
+      CHECK(!IsExclusiveHeld(self)) << *this;
       CHECK(!IsSharedHeld(self)) << *this;
     }
   }
@@ -359,14 +386,15 @@
   // Out-of-inline path for handling contention for a SharedLock.
   void HandleSharedLockContention(Thread* self, int32_t cur_state);
 
-  // -1 implies held exclusive, +ve shared held by state_ many owners.
+  // -1 implies held exclusive, >= 0: shared held by state_ many owners.
   AtomicInteger state_;
   // Exclusive owner. Modification guarded by this mutex.
   Atomic<pid_t> exclusive_owner_;
-  // Number of contenders waiting for a reader share.
-  AtomicInteger num_pending_readers_;
-  // Number of contenders waiting to be the writer.
-  AtomicInteger num_pending_writers_;
+  // Number of contenders waiting for either a reader share or exclusive access.  We only maintain
+  // the sum, since we would otherwise need to read both in all unlock operations.
+  // We keep this separate from the state, since futexes are limited to 32 bits, and obvious
+  // approaches to combining with state_ risk overflow.
+  AtomicInteger num_contenders_;
 #else
   pthread_rwlock_t rwlock_;
   Atomic<pid_t> exclusive_owner_;  // Writes guarded by rwlock_. Asynchronous reads are OK.
diff --git a/runtime/base/quasi_atomic.h b/runtime/base/quasi_atomic.h
index 0012f64..5aa4dde 100644
--- a/runtime/base/quasi_atomic.h
+++ b/runtime/base/quasi_atomic.h
@@ -46,9 +46,9 @@
 // quasiatomic operations that are performed on partially-overlapping
 // memory.
 class QuasiAtomic {
-  static constexpr bool NeedSwapMutexes(InstructionSet isa) {
-    // TODO - mips64 still need this for Cas64 ???
-    return (isa == InstructionSet::kMips) || (isa == InstructionSet::kMips64);
+  static constexpr bool NeedSwapMutexes(InstructionSet isa ATTRIBUTE_UNUSED) {
+    // TODO: Remove this function now that mips support has been removed.
+    return false;
   }
 
  public:
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index 8f9f45c..f4bda90 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -40,7 +40,12 @@
       CHECK_EQ(GetDexPc(), dex::kDexNoIndex);
     }
 
-    if (m == nullptr || m->IsNative() || m->IsRuntimeMethod() || IsShadowFrame()) {
+    // If the method is not compiled, continue the stack walk.
+    if (m == nullptr ||
+        m->IsNative() ||
+        m->IsRuntimeMethod() ||
+        IsShadowFrame() ||
+        !GetCurrentOatQuickMethodHeader()->IsOptimized()) {
       return true;
     }
 
@@ -68,6 +73,12 @@
     StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
     CodeItemDataAccessor accessor(m->DexInstructionData());
     uint16_t number_of_dex_registers = accessor.RegistersSize();
+
+    if (!Runtime::Current()->IsAsyncDeoptimizeable(GetCurrentQuickFramePc())) {
+      // We can only guarantee dex register info presence for debuggable methods.
+      return;
+    }
+
     DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(stack_map);
     DCHECK_EQ(dex_register_map.size(), number_of_dex_registers);
     uint32_t register_mask = code_info.GetRegisterMaskOf(stack_map);
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 978b1ab..b3aecde 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -19,6 +19,7 @@
 
 #include <atomic>
 
+#include "android-base/thread_annotations.h"
 #include "art_field-inl.h"
 #include "art_method-inl.h"
 #include "base/mutex.h"
@@ -27,12 +28,14 @@
 #include "dex/dex_file_structs.h"
 #include "gc_root-inl.h"
 #include "handle_scope-inl.h"
+#include "jni/jni_internal.h"
 #include "mirror/class_loader.h"
 #include "mirror/dex_cache-inl.h"
 #include "mirror/iftable.h"
 #include "mirror/object_array-inl.h"
 #include "obj_ptr-inl.h"
 #include "scoped_thread_state_change-inl.h"
+#include "well_known_classes.h"
 
 namespace art {
 
@@ -365,10 +368,26 @@
                                                   type);
   } else if (kResolveMode == ResolveMode::kCheckICCEAndIAE) {
     referrer = referrer->GetInterfaceMethodIfProxy(image_pointer_size_);
+    const dex::MethodId& method_id = referrer->GetDexFile()->GetMethodId(method_idx);
+    ObjPtr<mirror::Class> cls =
+        LookupResolvedType(method_id.class_idx_,
+                           referrer->GetDexCache(),
+                           referrer->GetClassLoader());
+    if (cls == nullptr) {
+      // The verifier breaks the invariant that a resolved method must have its
+      // class in the class table, so resolve the type in case we haven't found it.
+      // b/73760543
+      StackHandleScope<2> hs(Thread::Current());
+      Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(referrer->GetDexCache()));
+      Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(referrer->GetClassLoader()));
+      cls = ResolveType(method_id.class_idx_, h_dex_cache, h_class_loader);
+      if (hs.Self()->IsExceptionPending()) {
+        return nullptr;
+      }
+    }
     // Check if the invoke type matches the class type.
-    ObjPtr<mirror::DexCache> dex_cache = referrer->GetDexCache();
-    ObjPtr<mirror::ClassLoader> class_loader = referrer->GetClassLoader();
-    if (CheckInvokeClassMismatch</* kThrow= */ true>(dex_cache, type, method_idx, class_loader)) {
+    if (CheckInvokeClassMismatch</* kThrow= */ true>(
+            referrer->GetDexCache(), type, [cls]() { return cls; })) {
       DCHECK(Thread::Current()->IsExceptionPending());
       return nullptr;
     }
@@ -376,7 +395,7 @@
     ObjPtr<mirror::Class> referring_class = referrer->GetDeclaringClass();
     if (!referring_class->CheckResolvedMethodAccess(resolved_method->GetDeclaringClass(),
                                                     resolved_method,
-                                                    dex_cache,
+                                                    referrer->GetDexCache(),
                                                     method_idx,
                                                     type)) {
       DCHECK(Thread::Current()->IsExceptionPending());
@@ -449,6 +468,18 @@
   return class_roots;
 }
 
+template <typename Visitor>
+void ClassLinker::VisitKnownDexFiles(Thread* self, Visitor visitor) {
+  ReaderMutexLock rmu(self, *Locks::dex_lock_);
+  std::for_each(dex_caches_.begin(),
+                dex_caches_.end(),
+                [&](DexCacheData& dcd) REQUIRES(Locks::mutator_lock_) {
+                  if (dcd.IsValid()) {
+                    visitor(dcd.dex_file);
+                  }
+                });
+}
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_CLASS_LINKER_INL_H_
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 9575546..c39c5be 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -20,6 +20,7 @@
 
 #include <algorithm>
 #include <deque>
+#include <forward_list>
 #include <iostream>
 #include <map>
 #include <memory>
@@ -35,10 +36,13 @@
 
 #include "art_field-inl.h"
 #include "art_method-inl.h"
+#include "barrier.h"
 #include "base/arena_allocator.h"
 #include "base/casts.h"
+#include "base/file_utils.h"
 #include "base/leb128.h"
 #include "base/logging.h"
+#include "base/mutex-inl.h"
 #include "base/os.h"
 #include "base/quasi_atomic.h"
 #include "base/scoped_arena_containers.h"
@@ -65,7 +69,7 @@
 #include "dex/dex_file_loader.h"
 #include "dex/signature-inl.h"
 #include "dex/utf.h"
-#include "entrypoints/entrypoint_utils.h"
+#include "entrypoints/entrypoint_utils-inl.h"
 #include "entrypoints/runtime_asm_entrypoints.h"
 #include "experimental_flags.h"
 #include "gc/accounting/card_table-inl.h"
@@ -109,8 +113,10 @@
 #include "mirror/method_type.h"
 #include "mirror/object-inl.h"
 #include "mirror/object-refvisitor-inl.h"
+#include "mirror/object.h"
 #include "mirror/object_array-alloc-inl.h"
 #include "mirror/object_array-inl.h"
+#include "mirror/object_array.h"
 #include "mirror/object_reference.h"
 #include "mirror/object_reference-inl.h"
 #include "mirror/proxy.h"
@@ -132,12 +138,16 @@
 #include "runtime_callbacks.h"
 #include "scoped_thread_state_change-inl.h"
 #include "thread-inl.h"
+#include "thread.h"
 #include "thread_list.h"
 #include "trace.h"
+#include "transaction.h"
 #include "utils/dex_cache_arrays_layout-inl.h"
 #include "verifier/class_verifier.h"
 #include "well_known_classes.h"
 
+#include "interpreter/interpreter_mterp_impl.h"
+
 namespace art {
 
 using android::base::StringPrintf;
@@ -219,16 +229,223 @@
 // Ensures that methods have the kAccSkipAccessChecks bit set. We use the
 // kAccVerificationAttempted bit on the class access flags to determine whether this has been done
 // before.
-template <bool kNeedsVerified = false>
 static void EnsureSkipAccessChecksMethods(Handle<mirror::Class> klass, PointerSize pointer_size)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  if (kNeedsVerified) {
-    // To not fail access-flags access checks, push a minimal state.
-    mirror::Class::SetStatus(klass, ClassStatus::kVerified, Thread::Current());
-  }
+  Runtime* runtime = Runtime::Current();
+  ClassLinker* class_linker = runtime->GetClassLinker();
   if (!klass->WasVerificationAttempted()) {
     klass->SetSkipAccessChecksFlagOnAllMethods(pointer_size);
     klass->SetVerificationAttempted();
+    // Now that the class has passed verification, try to set nterp entrypoints
+    // to methods that currently use the switch interpreter.
+    if (interpreter::CanRuntimeUseNterp()) {
+      for (ArtMethod& m : klass->GetMethods(pointer_size)) {
+        if (class_linker->IsQuickToInterpreterBridge(m.GetEntryPointFromQuickCompiledCode()) &&
+            interpreter::CanMethodUseNterp(&m)) {
+          if (klass->IsVisiblyInitialized() || !NeedsClinitCheckBeforeCall(&m)) {
+            runtime->GetInstrumentation()->UpdateMethodsCode(&m, interpreter::GetNterpEntryPoint());
+          } else {
+            // Put the resolution stub, which will initialize the class and then
+            // call the method with nterp.
+            runtime->GetInstrumentation()->UpdateMethodsCode(&m, GetQuickResolutionStub());
+          }
+        }
+      }
+    }
+  }
+}
+
+// Callback responsible for making a batch of classes visibly initialized
+// after all threads have called it from a checkpoint, ensuring visibility.
+class ClassLinker::VisiblyInitializedCallback final
+    : public Closure, public IntrusiveForwardListNode<VisiblyInitializedCallback> {
+ public:
+  explicit VisiblyInitializedCallback(ClassLinker* class_linker)
+      : class_linker_(class_linker),
+        num_classes_(0u),
+        thread_visibility_counter_(0),
+        barriers_() {
+    std::fill_n(classes_, kMaxClasses, nullptr);
+  }
+
+  bool IsEmpty() const {
+    DCHECK_LE(num_classes_, kMaxClasses);
+    return num_classes_ == 0u;
+  }
+
+  bool IsFull() const {
+    DCHECK_LE(num_classes_, kMaxClasses);
+    return num_classes_ == kMaxClasses;
+  }
+
+  void AddClass(Thread* self, ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK_EQ(klass->GetStatus(), ClassStatus::kInitialized);
+    DCHECK(!IsFull());
+    classes_[num_classes_] = self->GetJniEnv()->GetVm()->AddWeakGlobalRef(self, klass);
+    ++num_classes_;
+  }
+
+  void AddBarrier(Barrier* barrier) {
+    barriers_.push_front(barrier);
+  }
+
+  std::forward_list<Barrier*> GetAndClearBarriers() {
+    std::forward_list<Barrier*> result;
+    result.swap(barriers_);
+    result.reverse();  // Return barriers in insertion order.
+    return result;
+  }
+
+  void MakeVisible(Thread* self) {
+    DCHECK_EQ(thread_visibility_counter_.load(std::memory_order_relaxed), 0);
+    size_t count = Runtime::Current()->GetThreadList()->RunCheckpoint(this);
+    AdjustThreadVisibilityCounter(self, count);
+  }
+
+  void Run(Thread* self) override {
+    self->ClearMakeVisiblyInitializedCounter();
+    AdjustThreadVisibilityCounter(self, -1);
+  }
+
+ private:
+  void AdjustThreadVisibilityCounter(Thread* self, ssize_t adjustment) {
+    ssize_t old = thread_visibility_counter_.fetch_add(adjustment, std::memory_order_relaxed);
+    if (old + adjustment == 0) {
+      // All threads passed the checkpoint. Mark classes as visibly initialized.
+      {
+        ScopedObjectAccess soa(self);
+        StackHandleScope<1u> hs(self);
+        MutableHandle<mirror::Class> klass = hs.NewHandle<mirror::Class>(nullptr);
+        JavaVMExt* vm = self->GetJniEnv()->GetVm();
+        for (size_t i = 0, num = num_classes_; i != num; ++i) {
+          klass.Assign(ObjPtr<mirror::Class>::DownCast(self->DecodeJObject(classes_[i])));
+          vm->DeleteWeakGlobalRef(self, classes_[i]);
+          if (klass != nullptr) {
+            mirror::Class::SetStatus(klass, ClassStatus::kVisiblyInitialized, self);
+            class_linker_->FixupStaticTrampolines(klass.Get());
+          }
+        }
+        num_classes_ = 0u;
+      }
+      class_linker_->VisiblyInitializedCallbackDone(self, this);
+    }
+  }
+
+  static constexpr size_t kMaxClasses = 16;
+
+  ClassLinker* const class_linker_;
+  size_t num_classes_;
+  jweak classes_[kMaxClasses];
+
+  // The thread visibility counter starts at 0 and it is incremented by the number of
+  // threads that need to run this callback (by the thread that request the callback
+  // to be run) and decremented once for each `Run()` execution. When it reaches 0,
+  // whether after the increment or after a decrement, we know that `Run()` was executed
+  // for all threads and therefore we can mark the classes as visibly initialized.
+  std::atomic<ssize_t> thread_visibility_counter_;
+
+  // List of barries to `Pass()` for threads that wait for the callback to complete.
+  std::forward_list<Barrier*> barriers_;
+};
+
+void ClassLinker::MakeInitializedClassesVisiblyInitialized(Thread* self, bool wait) {
+  if (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64) {
+    return;  // Nothing to do. Thanks to the x86 memory model classes skip the initialized status.
+  }
+  std::optional<Barrier> maybe_barrier;  // Avoid constructing the Barrier for `wait == false`.
+  if (wait) {
+    maybe_barrier.emplace(0);
+  }
+  int wait_count = 0;
+  VisiblyInitializedCallback* callback = nullptr;
+  {
+    MutexLock lock(self, visibly_initialized_callback_lock_);
+    if (visibly_initialized_callback_ != nullptr && !visibly_initialized_callback_->IsEmpty()) {
+      callback = visibly_initialized_callback_.release();
+      running_visibly_initialized_callbacks_.push_front(*callback);
+    }
+    if (wait) {
+      DCHECK(maybe_barrier.has_value());
+      Barrier* barrier = std::addressof(*maybe_barrier);
+      for (VisiblyInitializedCallback& cb : running_visibly_initialized_callbacks_) {
+        cb.AddBarrier(barrier);
+        ++wait_count;
+      }
+    }
+  }
+  if (callback != nullptr) {
+    callback->MakeVisible(self);
+  }
+  if (wait_count != 0) {
+    DCHECK(maybe_barrier.has_value());
+    maybe_barrier->Increment(self, wait_count);
+  }
+}
+
+void ClassLinker::VisiblyInitializedCallbackDone(Thread* self,
+                                                 VisiblyInitializedCallback* callback) {
+  MutexLock lock(self, visibly_initialized_callback_lock_);
+  // Pass the barriers if requested.
+  for (Barrier* barrier : callback->GetAndClearBarriers()) {
+    barrier->Pass(self);
+  }
+  // Remove the callback from the list of running callbacks.
+  auto before = running_visibly_initialized_callbacks_.before_begin();
+  auto it = running_visibly_initialized_callbacks_.begin();
+  DCHECK(it != running_visibly_initialized_callbacks_.end());
+  while (std::addressof(*it) != callback) {
+    before = it;
+    ++it;
+    DCHECK(it != running_visibly_initialized_callbacks_.end());
+  }
+  running_visibly_initialized_callbacks_.erase_after(before);
+  // Reuse or destroy the callback object.
+  if (visibly_initialized_callback_ == nullptr) {
+    visibly_initialized_callback_.reset(callback);
+  } else {
+    delete callback;
+  }
+}
+
+void ClassLinker::ForceClassInitialized(Thread* self, Handle<mirror::Class> klass) {
+  ClassLinker::VisiblyInitializedCallback* cb = MarkClassInitialized(self, klass);
+  if (cb != nullptr) {
+    cb->MakeVisible(self);
+  }
+  ScopedThreadSuspension sts(self, ThreadState::kSuspended);
+  MakeInitializedClassesVisiblyInitialized(self, /*wait=*/true);
+}
+
+ClassLinker::VisiblyInitializedCallback* ClassLinker::MarkClassInitialized(
+    Thread* self, Handle<mirror::Class> klass) {
+  if (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64) {
+    // Thanks to the x86 memory model, we do not need any memory fences and
+    // we can immediately mark the class as visibly initialized.
+    mirror::Class::SetStatus(klass, ClassStatus::kVisiblyInitialized, self);
+    FixupStaticTrampolines(klass.Get());
+    return nullptr;
+  }
+  if (Runtime::Current()->IsActiveTransaction()) {
+    // Transactions are single-threaded, so we can mark the class as visibly intialized.
+    // (Otherwise we'd need to track the callback's entry in the transaction for rollback.)
+    mirror::Class::SetStatus(klass, ClassStatus::kVisiblyInitialized, self);
+    FixupStaticTrampolines(klass.Get());
+    return nullptr;
+  }
+  mirror::Class::SetStatus(klass, ClassStatus::kInitialized, self);
+  MutexLock lock(self, visibly_initialized_callback_lock_);
+  if (visibly_initialized_callback_ == nullptr) {
+    visibly_initialized_callback_.reset(new VisiblyInitializedCallback(this));
+  }
+  DCHECK(!visibly_initialized_callback_->IsFull());
+  visibly_initialized_callback_->AddClass(self, klass.Get());
+
+  if (visibly_initialized_callback_->IsFull()) {
+    VisiblyInitializedCallback* callback = visibly_initialized_callback_.release();
+    running_visibly_initialized_callbacks_.push_front(*callback);
+    return callback;
+  } else {
+    return nullptr;
   }
 }
 
@@ -412,11 +629,15 @@
       log_new_roots_(false),
       intern_table_(intern_table),
       fast_class_not_found_exceptions_(fast_class_not_found_exceptions),
+      jni_dlsym_lookup_trampoline_(nullptr),
+      jni_dlsym_lookup_critical_trampoline_(nullptr),
       quick_resolution_trampoline_(nullptr),
       quick_imt_conflict_trampoline_(nullptr),
       quick_generic_jni_trampoline_(nullptr),
       quick_to_interpreter_bridge_trampoline_(nullptr),
       image_pointer_size_(kRuntimePointerSize),
+      visibly_initialized_callback_lock_("visibly initialized callback lock"),
+      visibly_initialized_callback_(nullptr),
       cha_(Runtime::Current()->IsAotCompiler() ? nullptr : new ClassHierarchyAnalysis()) {
   // For CHA disabled during Aot, see b/34193647.
 
@@ -466,7 +687,7 @@
   // Allocate the object as non-movable so that there are no cases where Object::IsClass returns
   // the incorrect result when comparing to-space vs from-space.
   Handle<mirror::Class> java_lang_Class(hs.NewHandle(ObjPtr<mirror::Class>::DownCast(
-      heap->AllocNonMovableObject<true>(self, nullptr, class_class_size, VoidFunctor()))));
+      heap->AllocNonMovableObject(self, nullptr, class_class_size, VoidFunctor()))));
   CHECK(java_lang_Class != nullptr);
   java_lang_Class->SetClassFlags(mirror::kClassFlagClass);
   java_lang_Class->SetClass(java_lang_Class.Get());
@@ -495,10 +716,10 @@
   java_lang_Object->SetObjectSize(sizeof(mirror::Object));
   // Allocate in non-movable so that it's possible to check if a JNI weak global ref has been
   // cleared without triggering the read barrier and unintentionally mark the sentinel alive.
-  runtime->SetSentinel(heap->AllocNonMovableObject<true>(self,
-                                                         java_lang_Object.Get(),
-                                                         java_lang_Object->GetObjectSize(),
-                                                         VoidFunctor()));
+  runtime->SetSentinel(heap->AllocNonMovableObject(self,
+                                                   java_lang_Object.Get(),
+                                                   java_lang_Object->GetObjectSize(),
+                                                   VoidFunctor()));
 
   // Initialize the SubtypeCheck bitstring for java.lang.Object and java.lang.Class.
   if (kBitstringSubtypeCheckEnabled) {
@@ -558,27 +779,27 @@
   DCHECK_EQ(GetArrayIfTable(), object_array_class->GetIfTable());
 
   // Setup the primitive type classes.
-  SetClassRoot(ClassRoot::kPrimitiveBoolean, CreatePrimitiveClass(self, Primitive::kPrimBoolean));
-  SetClassRoot(ClassRoot::kPrimitiveByte, CreatePrimitiveClass(self, Primitive::kPrimByte));
-  SetClassRoot(ClassRoot::kPrimitiveChar, CreatePrimitiveClass(self, Primitive::kPrimChar));
-  SetClassRoot(ClassRoot::kPrimitiveShort, CreatePrimitiveClass(self, Primitive::kPrimShort));
-  SetClassRoot(ClassRoot::kPrimitiveInt, CreatePrimitiveClass(self, Primitive::kPrimInt));
-  SetClassRoot(ClassRoot::kPrimitiveLong, CreatePrimitiveClass(self, Primitive::kPrimLong));
-  SetClassRoot(ClassRoot::kPrimitiveFloat, CreatePrimitiveClass(self, Primitive::kPrimFloat));
-  SetClassRoot(ClassRoot::kPrimitiveDouble, CreatePrimitiveClass(self, Primitive::kPrimDouble));
-  SetClassRoot(ClassRoot::kPrimitiveVoid, CreatePrimitiveClass(self, Primitive::kPrimVoid));
+  CreatePrimitiveClass(self, Primitive::kPrimBoolean, ClassRoot::kPrimitiveBoolean);
+  CreatePrimitiveClass(self, Primitive::kPrimByte, ClassRoot::kPrimitiveByte);
+  CreatePrimitiveClass(self, Primitive::kPrimChar, ClassRoot::kPrimitiveChar);
+  CreatePrimitiveClass(self, Primitive::kPrimShort, ClassRoot::kPrimitiveShort);
+  CreatePrimitiveClass(self, Primitive::kPrimInt, ClassRoot::kPrimitiveInt);
+  CreatePrimitiveClass(self, Primitive::kPrimLong, ClassRoot::kPrimitiveLong);
+  CreatePrimitiveClass(self, Primitive::kPrimFloat, ClassRoot::kPrimitiveFloat);
+  CreatePrimitiveClass(self, Primitive::kPrimDouble, ClassRoot::kPrimitiveDouble);
+  CreatePrimitiveClass(self, Primitive::kPrimVoid, ClassRoot::kPrimitiveVoid);
 
-  // Create int array type for native pointer arrays (for example vtables) on 32-bit archs.
-  Handle<mirror::Class> int_array_class(hs.NewHandle(
-      AllocPrimitiveArrayClass(self, java_lang_Class.Get())));
-  int_array_class->SetComponentType(GetClassRoot(ClassRoot::kPrimitiveInt, this));
-  SetClassRoot(ClassRoot::kIntArrayClass, int_array_class.Get());
-
-  // Create long array type for native pointer arrays (for example vtables) on 64-bit archs.
-  Handle<mirror::Class> long_array_class(hs.NewHandle(
-      AllocPrimitiveArrayClass(self, java_lang_Class.Get())));
-  long_array_class->SetComponentType(GetClassRoot(ClassRoot::kPrimitiveLong, this));
-  SetClassRoot(ClassRoot::kLongArrayClass, long_array_class.Get());
+  // Allocate the primitive array classes. We need only the native pointer
+  // array at this point (int[] or long[], depending on architecture) but
+  // we shall perform the same setup steps for all primitive array classes.
+  AllocPrimitiveArrayClass(self, ClassRoot::kPrimitiveBoolean, ClassRoot::kBooleanArrayClass);
+  AllocPrimitiveArrayClass(self, ClassRoot::kPrimitiveByte, ClassRoot::kByteArrayClass);
+  AllocPrimitiveArrayClass(self, ClassRoot::kPrimitiveChar, ClassRoot::kCharArrayClass);
+  AllocPrimitiveArrayClass(self, ClassRoot::kPrimitiveShort, ClassRoot::kShortArrayClass);
+  AllocPrimitiveArrayClass(self, ClassRoot::kPrimitiveInt, ClassRoot::kIntArrayClass);
+  AllocPrimitiveArrayClass(self, ClassRoot::kPrimitiveLong, ClassRoot::kLongArrayClass);
+  AllocPrimitiveArrayClass(self, ClassRoot::kPrimitiveFloat, ClassRoot::kFloatArrayClass);
+  AllocPrimitiveArrayClass(self, ClassRoot::kPrimitiveDouble, ClassRoot::kDoubleArrayClass);
 
   // now that these are registered, we can use AllocClass() and AllocObjectArray
 
@@ -618,11 +839,11 @@
     return false;
   }
   for (auto& dex_file : boot_class_path) {
-    if (dex_file.get() == nullptr) {
+    if (dex_file == nullptr) {
       *error_msg = "Null dex file.";
       return false;
     }
-    AppendToBootClassPath(self, *dex_file);
+    AppendToBootClassPath(self, dex_file.get());
     boot_dex_files_.push_back(std::move(dex_file));
   }
 
@@ -633,8 +854,11 @@
   quick_generic_jni_trampoline_ = GetQuickGenericJniStub();
   if (!runtime->IsAotCompiler()) {
     // We need to set up the generic trampolines since we don't have an image.
+    jni_dlsym_lookup_trampoline_ = GetJniDlsymLookupStub();
+    jni_dlsym_lookup_critical_trampoline_ = GetJniDlsymLookupCriticalStub();
     quick_resolution_trampoline_ = GetQuickResolutionStub();
     quick_imt_conflict_trampoline_ = GetQuickImtConflictStub();
+    quick_generic_jni_trampoline_ = GetQuickGenericJniStub();
     quick_to_interpreter_bridge_trampoline_ = GetQuickToInterpreterBridge();
   }
 
@@ -651,37 +875,23 @@
   CheckSystemClass(self, dalvik_system_ClassExt, "Ldalvik/system/ClassExt;");
   CHECK_EQ(dalvik_system_ClassExt->GetObjectSize(), mirror::ClassExt::InstanceSize());
 
-  // Setup the primitive array type classes - can't be done until Object has a vtable.
-  AllocAndSetPrimitiveArrayClassRoot(self,
-                                     java_lang_Class.Get(),
-                                     ClassRoot::kBooleanArrayClass,
-                                     ClassRoot::kPrimitiveBoolean,
-                                     "[Z");
-
-  AllocAndSetPrimitiveArrayClassRoot(
-      self, java_lang_Class.Get(), ClassRoot::kByteArrayClass, ClassRoot::kPrimitiveByte, "[B");
-
-  AllocAndSetPrimitiveArrayClassRoot(
-      self, java_lang_Class.Get(), ClassRoot::kCharArrayClass, ClassRoot::kPrimitiveChar, "[C");
-
-  AllocAndSetPrimitiveArrayClassRoot(
-      self, java_lang_Class.Get(), ClassRoot::kShortArrayClass, ClassRoot::kPrimitiveShort, "[S");
-
-  CheckSystemClass(self, int_array_class, "[I");
-  CheckSystemClass(self, long_array_class, "[J");
-
-  AllocAndSetPrimitiveArrayClassRoot(
-      self, java_lang_Class.Get(), ClassRoot::kFloatArrayClass, ClassRoot::kPrimitiveFloat, "[F");
-
-  AllocAndSetPrimitiveArrayClassRoot(
-      self, java_lang_Class.Get(), ClassRoot::kDoubleArrayClass, ClassRoot::kPrimitiveDouble, "[D");
-
   // Run Class through FindSystemClass. This initializes the dex_cache_ fields and register it
   // in class_table_.
   CheckSystemClass(self, java_lang_Class, "Ljava/lang/Class;");
 
-  CheckSystemClass(self, class_array_class, "[Ljava/lang/Class;");
-  CheckSystemClass(self, object_array_class, "[Ljava/lang/Object;");
+  // Setup core array classes, i.e. Object[], String[] and Class[] and primitive
+  // arrays - can't be done until Object has a vtable and component classes are loaded.
+  FinishCoreArrayClassSetup(ClassRoot::kObjectArrayClass);
+  FinishCoreArrayClassSetup(ClassRoot::kClassArrayClass);
+  FinishCoreArrayClassSetup(ClassRoot::kJavaLangStringArrayClass);
+  FinishCoreArrayClassSetup(ClassRoot::kBooleanArrayClass);
+  FinishCoreArrayClassSetup(ClassRoot::kByteArrayClass);
+  FinishCoreArrayClassSetup(ClassRoot::kCharArrayClass);
+  FinishCoreArrayClassSetup(ClassRoot::kShortArrayClass);
+  FinishCoreArrayClassSetup(ClassRoot::kIntArrayClass);
+  FinishCoreArrayClassSetup(ClassRoot::kLongArrayClass);
+  FinishCoreArrayClassSetup(ClassRoot::kFloatArrayClass);
+  FinishCoreArrayClassSetup(ClassRoot::kDoubleArrayClass);
 
   // Setup the single, global copy of "iftable".
   auto java_lang_Cloneable = hs.NewHandle(FindSystemClass(self, "Ljava/lang/Cloneable;"));
@@ -904,7 +1114,7 @@
   // initialize the StackOverflowError class (as it might require running the verifier). Instead,
   // ensure that the class will be initialized.
   if (kMemoryToolIsAvailable && !Runtime::Current()->IsAotCompiler()) {
-    verifier::ClassVerifier::Init();  // Need to prepare the verifier.
+    verifier::ClassVerifier::Init(this);  // Need to prepare the verifier.
 
     ObjPtr<mirror::Class> soe_klass = FindSystemClass(self, "Ljava/lang/StackOverflowError;");
     if (soe_klass == nullptr || !EnsureInitialized(self, hs.NewHandle(soe_klass), true, true)) {
@@ -950,12 +1160,12 @@
   gc::Heap* const heap = runtime->GetHeap();
   std::vector<gc::space::ImageSpace*> spaces = heap->GetBootImageSpaces();
   CHECK(!spaces.empty());
-  uint32_t pointer_size_unchecked = spaces[0]->GetImageHeader().GetPointerSizeUnchecked();
+  const ImageHeader& image_header = spaces[0]->GetImageHeader();
+  uint32_t pointer_size_unchecked = image_header.GetPointerSizeUnchecked();
   if (!ValidPointerSize(pointer_size_unchecked)) {
     *error_msg = StringPrintf("Invalid image pointer size: %u", pointer_size_unchecked);
     return false;
   }
-  const ImageHeader& image_header = spaces[0]->GetImageHeader();
   image_pointer_size_ = image_header.GetPointerSize();
   if (!runtime->IsAotCompiler()) {
     // Only the Aot compiler supports having an image with a different pointer size than the
@@ -996,6 +1206,8 @@
       runtime->GetOatFileManager().RegisterImageOatFiles(spaces);
   DCHECK(!oat_files.empty());
   const OatHeader& default_oat_header = oat_files[0]->GetOatHeader();
+  jni_dlsym_lookup_trampoline_ = default_oat_header.GetJniDlsymLookupTrampoline();
+  jni_dlsym_lookup_critical_trampoline_ = default_oat_header.GetJniDlsymLookupCriticalTrampoline();
   quick_resolution_trampoline_ = default_oat_header.GetQuickResolutionTrampoline();
   quick_imt_conflict_trampoline_ = default_oat_header.GetQuickImtConflictTrampoline();
   quick_generic_jni_trampoline_ = default_oat_header.GetQuickGenericJniTrampoline();
@@ -1004,6 +1216,10 @@
     // Check that the other images use the same trampoline.
     for (size_t i = 1; i < oat_files.size(); ++i) {
       const OatHeader& ith_oat_header = oat_files[i]->GetOatHeader();
+      const void* ith_jni_dlsym_lookup_trampoline_ =
+          ith_oat_header.GetJniDlsymLookupTrampoline();
+      const void* ith_jni_dlsym_lookup_critical_trampoline_ =
+          ith_oat_header.GetJniDlsymLookupCriticalTrampoline();
       const void* ith_quick_resolution_trampoline =
           ith_oat_header.GetQuickResolutionTrampoline();
       const void* ith_quick_imt_conflict_trampoline =
@@ -1012,7 +1228,9 @@
           ith_oat_header.GetQuickGenericJniTrampoline();
       const void* ith_quick_to_interpreter_bridge_trampoline =
           ith_oat_header.GetQuickToInterpreterBridge();
-      if (ith_quick_resolution_trampoline != quick_resolution_trampoline_ ||
+      if (ith_jni_dlsym_lookup_trampoline_ != jni_dlsym_lookup_trampoline_ ||
+          ith_jni_dlsym_lookup_critical_trampoline_ != jni_dlsym_lookup_critical_trampoline_ ||
+          ith_quick_resolution_trampoline != quick_resolution_trampoline_ ||
           ith_quick_imt_conflict_trampoline != quick_imt_conflict_trampoline_ ||
           ith_quick_generic_jni_trampoline != quick_generic_jni_trampoline_ ||
           ith_quick_to_interpreter_bridge_trampoline != quick_to_interpreter_bridge_trampoline_) {
@@ -1056,37 +1274,25 @@
 
   class_roots_ = GcRoot<mirror::ObjectArray<mirror::Class>>(
       ObjPtr<mirror::ObjectArray<mirror::Class>>::DownCast(
-          spaces[0]->GetImageHeader().GetImageRoot(ImageHeader::kClassRoots)));
+          image_header.GetImageRoot(ImageHeader::kClassRoots)));
   DCHECK_EQ(GetClassRoot<mirror::Class>(this)->GetClassFlags(), mirror::kClassFlagClass);
 
-  ObjPtr<mirror::Class> java_lang_Object = GetClassRoot<mirror::Object>(this);
-  java_lang_Object->SetObjectSize(sizeof(mirror::Object));
-  // Allocate in non-movable so that it's possible to check if a JNI weak global ref has been
-  // cleared without triggering the read barrier and unintentionally mark the sentinel alive.
-  runtime->SetSentinel(heap->AllocNonMovableObject<true>(
-      self, java_lang_Object, java_lang_Object->GetObjectSize(), VoidFunctor()));
+  DCHECK_EQ(GetClassRoot<mirror::Object>(this)->GetObjectSize(), sizeof(mirror::Object));
+  ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects =
+      ObjPtr<mirror::ObjectArray<mirror::Object>>::DownCast(
+          image_header.GetImageRoot(ImageHeader::kBootImageLiveObjects));
+  runtime->SetSentinel(boot_image_live_objects->Get(ImageHeader::kClearedJniWeakSentinel));
+  DCHECK(runtime->GetSentinel().Read()->GetClass() == GetClassRoot<mirror::Object>(this));
 
-  const std::vector<std::string>& boot_class_path_locations = runtime->GetBootClassPathLocations();
-  CHECK_LE(spaces.size(), boot_class_path_locations.size());
   for (size_t i = 0u, size = spaces.size(); i != size; ++i) {
     // Boot class loader, use a null handle.
     std::vector<std::unique_ptr<const DexFile>> dex_files;
     if (!AddImageSpace(spaces[i],
                        ScopedNullHandle<mirror::ClassLoader>(),
-                       /*dex_elements=*/ nullptr,
-                       /*dex_location=*/ boot_class_path_locations[i].c_str(),
                        /*out*/&dex_files,
                        error_msg)) {
       return false;
     }
-    // Assert that if absolute boot classpath locations were provided, they were
-    // assigned to the loaded dex files.
-    if (kIsDebugBuild && IsAbsoluteLocation(boot_class_path_locations[i])) {
-      for (const auto& dex_file : dex_files) {
-        DCHECK_EQ(DexFileLoader::GetBaseLocation(dex_file->GetLocation()),
-                  boot_class_path_locations[i]);
-      }
-    }
     // Append opened dex files at the end.
     boot_dex_files_.insert(boot_dex_files_.end(),
                            std::make_move_iterator(dex_files.begin()),
@@ -1105,7 +1311,7 @@
     Thread* self,
     std::vector<std::unique_ptr<const DexFile>>&& additional_dex_files) {
   for (std::unique_ptr<const DexFile>& dex_file : additional_dex_files) {
-    AppendToBootClassPath(self, *dex_file);
+    AppendToBootClassPath(self, dex_file.get());
     boot_dex_files_.push_back(std::move(dex_file));
   }
 }
@@ -1117,194 +1323,6 @@
            class_loader->GetClass();
 }
 
-static bool GetDexPathListElementName(ObjPtr<mirror::Object> element,
-                                      ObjPtr<mirror::String>* out_name)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ArtField* const dex_file_field =
-      jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
-  ArtField* const dex_file_name_field =
-      jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_fileName);
-  DCHECK(dex_file_field != nullptr);
-  DCHECK(dex_file_name_field != nullptr);
-  DCHECK(element != nullptr);
-  CHECK_EQ(dex_file_field->GetDeclaringClass(), element->GetClass()) << element->PrettyTypeOf();
-  ObjPtr<mirror::Object> dex_file = dex_file_field->GetObject(element);
-  if (dex_file == nullptr) {
-    // Null dex file means it was probably a jar with no dex files, return a null string.
-    *out_name = nullptr;
-    return true;
-  }
-  ObjPtr<mirror::Object> name_object = dex_file_name_field->GetObject(dex_file);
-  if (name_object != nullptr) {
-    *out_name = name_object->AsString();
-    return true;
-  }
-  return false;
-}
-
-static bool GetDexFileNames(ScopedObjectAccessUnchecked& soa,
-                            ObjPtr<mirror::ClassLoader> class_loader,
-                            /*out*/std::list<ObjPtr<mirror::String>>* dex_files,
-                            /*out*/std::string* error_msg)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  StackHandleScope<1> hs(soa.Self());
-  Handle<mirror::ClassLoader> handle(hs.NewHandle(class_loader));
-  // Get element names. Sets error to true on failure.
-  auto add_element_names = [&](ObjPtr<mirror::Object> element, bool* error)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (element == nullptr) {
-      *error_msg = "Null dex element";
-      *error = true;  // Null element is a critical error.
-      return false;   // Had an error, stop the visit.
-    }
-    ObjPtr<mirror::String> name;
-    if (!GetDexPathListElementName(element, &name)) {
-      *error_msg = "Invalid dex path list element";
-      *error = true;   // Invalid element, make it a critical error.
-      return false;    // Stop the visit.
-    }
-    if (name != nullptr) {
-      dex_files->push_front(name);
-    }
-    return true;  // Continue with the next Element.
-  };
-  bool error = VisitClassLoaderDexElements(soa,
-                                           handle,
-                                           add_element_names,
-                                           /*defaultReturn=*/ false);
-  return !error;
-}
-
-static bool CompareClassLoaderTypes(ScopedObjectAccessUnchecked& soa,
-                                    ObjPtr<mirror::ClassLoader> image_class_loader,
-                                    ObjPtr<mirror::ClassLoader> class_loader,
-                                    std::string* error_msg)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  if (ClassLinker::IsBootClassLoader(soa, class_loader)) {
-    if (!ClassLinker::IsBootClassLoader(soa, image_class_loader)) {
-      *error_msg = "Hierarchies don't match";
-      return false;
-    }
-  } else if (ClassLinker::IsBootClassLoader(soa, image_class_loader)) {
-    *error_msg = "Hierarchies don't match";
-    return false;
-  } else if (class_loader->GetClass() != image_class_loader->GetClass()) {
-    *error_msg = StringPrintf("Class loader types don't match %s and %s",
-                              image_class_loader->PrettyTypeOf().c_str(),
-                              class_loader->PrettyTypeOf().c_str());
-    return false;
-  } else if (soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_PathClassLoader) !=
-      class_loader->GetClass()) {
-    *error_msg = StringPrintf("Unknown class loader type %s",
-                              class_loader->PrettyTypeOf().c_str());
-    // Unsupported class loader.
-    return false;
-  }
-  return true;
-}
-
-static bool CompareDexFiles(const std::list<ObjPtr<mirror::String>>& image_dex_files,
-                            const std::list<ObjPtr<mirror::String>>& loader_dex_files,
-                            std::string* error_msg)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  bool equal = (image_dex_files.size() == loader_dex_files.size()) &&
-      std::equal(image_dex_files.begin(),
-                 image_dex_files.end(),
-                 loader_dex_files.begin(),
-                 [](ObjPtr<mirror::String> lhs, ObjPtr<mirror::String> rhs)
-                     REQUIRES_SHARED(Locks::mutator_lock_) {
-                   return lhs->Equals(rhs);
-                 });
-  if (!equal) {
-    VLOG(image) << "Image dex files " << image_dex_files.size();
-    for (ObjPtr<mirror::String> name : image_dex_files) {
-      VLOG(image) << name->ToModifiedUtf8();
-    }
-    VLOG(image) << "Loader dex files " << loader_dex_files.size();
-    for (ObjPtr<mirror::String> name : loader_dex_files) {
-      VLOG(image) << name->ToModifiedUtf8();
-    }
-    *error_msg = "Mismatch in dex files";
-  }
-  return equal;
-}
-
-static bool CompareClassLoaders(ScopedObjectAccessUnchecked& soa,
-                                ObjPtr<mirror::ClassLoader> image_class_loader,
-                                ObjPtr<mirror::ClassLoader> class_loader,
-                                bool check_dex_file_names,
-                                std::string* error_msg)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  if (!CompareClassLoaderTypes(soa, image_class_loader, class_loader, error_msg)) {
-    return false;
-  }
-
-  if (ClassLinker::IsBootClassLoader(soa, class_loader)) {
-    // No need to check further.
-    return true;
-  }
-
-  if (check_dex_file_names) {
-    std::list<ObjPtr<mirror::String>> image_dex_files;
-    if (!GetDexFileNames(soa, image_class_loader, &image_dex_files, error_msg)) {
-      return false;
-    }
-
-    std::list<ObjPtr<mirror::String>> loader_dex_files;
-    if (!GetDexFileNames(soa, class_loader, &loader_dex_files, error_msg)) {
-      return false;
-    }
-
-    if (!CompareDexFiles(image_dex_files, loader_dex_files, error_msg)) {
-      return false;
-    }
-  }
-
-  ArtField* field =
-      jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_sharedLibraryLoaders);
-  ObjPtr<mirror::Object> shared_libraries_image_loader = field->GetObject(image_class_loader.Ptr());
-  ObjPtr<mirror::Object> shared_libraries_loader = field->GetObject(class_loader.Ptr());
-  if (shared_libraries_image_loader == nullptr) {
-    if (shared_libraries_loader != nullptr) {
-      *error_msg = "Mismatch in shared libraries";
-      return false;
-    }
-  } else if (shared_libraries_loader == nullptr) {
-    *error_msg = "Mismatch in shared libraries";
-    return false;
-  } else {
-    ObjPtr<mirror::ObjectArray<mirror::ClassLoader>> array1 =
-        shared_libraries_image_loader->AsObjectArray<mirror::ClassLoader>();
-    ObjPtr<mirror::ObjectArray<mirror::ClassLoader>> array2 =
-        shared_libraries_loader->AsObjectArray<mirror::ClassLoader>();
-    if (array1->GetLength() != array2->GetLength()) {
-      *error_msg = "Mismatch in number of shared libraries";
-      return false;
-    }
-
-    for (int32_t i = 0; i < array1->GetLength(); ++i) {
-      // Do a full comparison of the class loaders, including comparing their dex files.
-      if (!CompareClassLoaders(soa,
-                               array1->Get(i),
-                               array2->Get(i),
-                               /*check_dex_file_names=*/ true,
-                               error_msg)) {
-        return false;
-      }
-    }
-  }
-
-  // Do a full comparison of the class loaders, including comparing their dex files.
-  if (!CompareClassLoaders(soa,
-                           image_class_loader->GetParent(),
-                           class_loader->GetParent(),
-                           /*check_dex_file_names=*/ true,
-                           error_msg)) {
-    return false;
-  }
-  return true;
-}
-
 class CHAOnDeleteUpdateClassVisitor {
  public:
   explicit CHAOnDeleteUpdateClassVisitor(LinearAlloc* alloc)
@@ -1325,14 +1343,16 @@
 };
 
 /*
- * A class used to ensure that all strings in an AppImage have been properly
- * interned, and is only ever run in debug mode.
+ * A class used to ensure that all references to strings interned in an AppImage have been
+ * properly recorded in the interned references list, and is only ever run in debug mode.
  */
-class VerifyStringInterningVisitor {
+class CountInternedStringReferencesVisitor {
  public:
-  explicit VerifyStringInterningVisitor(const gc::space::ImageSpace& space) :
-      space_(space),
-      intern_table_(*Runtime::Current()->GetInternTable()) {}
+  CountInternedStringReferencesVisitor(const gc::space::ImageSpace& space,
+                                       const InternTable::UnorderedSet& image_interns)
+      : space_(space),
+        image_interns_(image_interns),
+        count_(0u) {}
 
   void TestObject(ObjPtr<mirror::Object> referred_obj) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1340,15 +1360,9 @@
         space_.HasAddress(referred_obj.Ptr()) &&
         referred_obj->IsString()) {
       ObjPtr<mirror::String> referred_str = referred_obj->AsString();
-
-      if (kIsDebugBuild) {
-        // Saved to temporary variables to aid in debugging.
-        ObjPtr<mirror::String> strong_lookup_result =
-            intern_table_.LookupStrong(Thread::Current(), referred_str);
-        ObjPtr<mirror::String> weak_lookup_result =
-            intern_table_.LookupWeak(Thread::Current(), referred_str);
-
-        DCHECK((strong_lookup_result == referred_str) || (weak_lookup_result == referred_str));
+      auto it = image_interns_.find(GcRoot<mirror::String>(referred_str));
+      if (it != image_interns_.end() && it->Read() == referred_str) {
+        ++count_;
       }
     }
   }
@@ -1371,33 +1385,35 @@
                   MemberOffset offset,
                   bool is_static ATTRIBUTE_UNUSED) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
-    // There could be overlap between ranges, we must avoid visiting the same reference twice.
-    // Avoid the class field since we already fixed it up in FixupClassVisitor.
-    if (offset.Uint32Value() != mirror::Object::ClassOffset().Uint32Value()) {
-      // Updating images, don't do a read barrier.
-      ObjPtr<mirror::Object> referred_obj =
-          obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset);
-
-      TestObject(referred_obj);
-    }
+    // References within image or across images don't need a read barrier.
+    ObjPtr<mirror::Object> referred_obj =
+        obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset);
+    TestObject(referred_obj);
   }
 
   void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
                   ObjPtr<mirror::Reference> ref) const
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
-    operator()(ref, mirror::Reference::ReferentOffset(), false);
+    operator()(ref, mirror::Reference::ReferentOffset(), /*is_static=*/ false);
   }
 
+  size_t GetCount() const {
+    return count_;
+  }
+
+ private:
   const gc::space::ImageSpace& space_;
-  InternTable& intern_table_;
+  const InternTable::UnorderedSet& image_interns_;
+  mutable size_t count_;  // Modified from the `const` callbacks.
 };
 
 /*
- * This function verifies that string references in the AppImage have been
- * properly interned.  To be considered properly interned a reference must
- * point to the same version of the string that the intern table does.
+ * This function counts references to strings interned in the AppImage.
+ * This is used in debug build to check against the number of the recorded references.
  */
-void VerifyStringInterning(gc::space::ImageSpace& space) REQUIRES_SHARED(Locks::mutator_lock_) {
+size_t CountInternedStringReferences(gc::space::ImageSpace& space,
+                                     const InternTable::UnorderedSet& image_interns)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   const gc::accounting::ContinuousSpaceBitmap* bitmap = space.GetMarkBitmap();
   const ImageHeader& image_header = space.GetImageHeader();
   const uint8_t* target_base = space.GetMemMap()->Begin();
@@ -1406,7 +1422,7 @@
   auto objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset());
   auto objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End());
 
-  VerifyStringInterningVisitor visitor(space);
+  CountInternedStringReferencesVisitor visitor(space, image_interns);
   bitmap->VisitMarkedRange(objects_begin,
                            objects_end,
                            [&space, &visitor](mirror::Object* obj)
@@ -1426,6 +1442,126 @@
       }
     }
   });
+  return visitor.GetCount();
+}
+
+template <typename Visitor>
+static void VisitInternedStringReferences(
+    gc::space::ImageSpace* space,
+    bool use_preresolved_strings,
+    const Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+  const uint8_t* target_base = space->Begin();
+  const ImageSection& sro_section =
+      space->GetImageHeader().GetImageStringReferenceOffsetsSection();
+  const size_t num_string_offsets = sro_section.Size() / sizeof(AppImageReferenceOffsetInfo);
+
+  VLOG(image)
+      << "ClassLinker:AppImage:InternStrings:imageStringReferenceOffsetCount = "
+      << num_string_offsets;
+
+  const auto* sro_base =
+      reinterpret_cast<const AppImageReferenceOffsetInfo*>(target_base + sro_section.Offset());
+
+  for (size_t offset_index = 0; offset_index < num_string_offsets; ++offset_index) {
+    uint32_t base_offset = sro_base[offset_index].first;
+
+    if (HasDexCacheStringNativeRefTag(base_offset)) {
+      base_offset = ClearDexCacheNativeRefTags(base_offset);
+      DCHECK_ALIGNED(base_offset, 2);
+
+      ObjPtr<mirror::DexCache> dex_cache =
+          reinterpret_cast<mirror::DexCache*>(space->Begin() + base_offset);
+      uint32_t string_slot_index = sro_base[offset_index].second;
+
+      mirror::StringDexCachePair source =
+          dex_cache->GetStrings()[string_slot_index].load(std::memory_order_relaxed);
+      ObjPtr<mirror::String> referred_string = source.object.Read();
+      DCHECK(referred_string != nullptr);
+
+      ObjPtr<mirror::String> visited = visitor(referred_string);
+      if (visited != referred_string) {
+        // Because we are not using a helper function we need to mark the GC card manually.
+        WriteBarrier::ForEveryFieldWrite(dex_cache);
+        dex_cache->GetStrings()[string_slot_index].store(
+            mirror::StringDexCachePair(visited, source.index), std::memory_order_relaxed);
+      }
+    } else if (HasDexCachePreResolvedStringNativeRefTag(base_offset)) {
+      if (use_preresolved_strings) {
+        base_offset = ClearDexCacheNativeRefTags(base_offset);
+        DCHECK_ALIGNED(base_offset, 2);
+
+        ObjPtr<mirror::DexCache> dex_cache =
+            reinterpret_cast<mirror::DexCache*>(space->Begin() + base_offset);
+        uint32_t string_index = sro_base[offset_index].second;
+
+        GcRoot<mirror::String>* preresolved_strings =
+            dex_cache->GetPreResolvedStrings();
+        // Handle calls to ClearPreResolvedStrings that might occur concurrently by the profile
+        // saver that runs shortly after startup. In case the strings are cleared, there is nothing
+        // to fix up.
+        if (preresolved_strings != nullptr) {
+          ObjPtr<mirror::String> referred_string =
+              preresolved_strings[string_index].Read();
+          if (referred_string != nullptr) {
+            ObjPtr<mirror::String> visited = visitor(referred_string);
+            if (visited != referred_string) {
+              // Because we are not using a helper function we need to mark the GC card manually.
+              WriteBarrier::ForEveryFieldWrite(dex_cache);
+              preresolved_strings[string_index] = GcRoot<mirror::String>(visited);
+            }
+          }
+        }
+      }
+    } else {
+      uint32_t raw_member_offset = sro_base[offset_index].second;
+      DCHECK_ALIGNED(base_offset, 2);
+      DCHECK_ALIGNED(raw_member_offset, 2);
+
+      ObjPtr<mirror::Object> obj_ptr =
+          reinterpret_cast<mirror::Object*>(space->Begin() + base_offset);
+      MemberOffset member_offset(raw_member_offset);
+      ObjPtr<mirror::String> referred_string =
+          obj_ptr->GetFieldObject<mirror::String,
+                                  kVerifyNone,
+                                  kWithoutReadBarrier,
+                                  /* kIsVolatile= */ false>(member_offset);
+      DCHECK(referred_string != nullptr);
+
+      ObjPtr<mirror::String> visited = visitor(referred_string);
+      if (visited != referred_string) {
+        obj_ptr->SetFieldObject</* kTransactionActive= */ false,
+                                /* kCheckTransaction= */ false,
+                                kVerifyNone,
+                                /* kIsVolatile= */ false>(member_offset, visited);
+      }
+    }
+  }
+}
+
+static void VerifyInternedStringReferences(gc::space::ImageSpace* space)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  InternTable::UnorderedSet image_interns;
+  const ImageSection& section = space->GetImageHeader().GetInternedStringsSection();
+  if (section.Size() > 0) {
+    size_t read_count;
+    const uint8_t* data = space->Begin() + section.Offset();
+    InternTable::UnorderedSet image_set(data, /*make_copy_of_data=*/ false, &read_count);
+    image_set.swap(image_interns);
+  }
+  size_t num_recorded_refs = 0u;
+  VisitInternedStringReferences(
+      space,
+      /*use_preresolved_strings=*/ true,
+      [&image_interns, &num_recorded_refs](ObjPtr<mirror::String> str)
+          REQUIRES_SHARED(Locks::mutator_lock_) {
+        auto it = image_interns.find(GcRoot<mirror::String>(str));
+        CHECK(it != image_interns.end());
+        CHECK(it->Read() == str);
+        ++num_recorded_refs;
+        return str;
+      });
+  size_t num_found_refs = CountInternedStringReferences(*space, image_interns);
+  CHECK_EQ(num_recorded_refs, num_found_refs);
 }
 
 // new_class_set is the set of classes that were read from the class table section in the image.
@@ -1444,12 +1580,6 @@
 
   static void HandleAppImageStrings(gc::space::ImageSpace* space)
       REQUIRES_SHARED(Locks::mutator_lock_);
-
-  static void UpdateInternStrings(
-      gc::space::ImageSpace* space,
-      bool use_preresolved_strings,
-      const SafeMap<mirror::String*, mirror::String*>& intern_remap)
-      REQUIRES_SHARED(Locks::mutator_lock_);
 };
 
 void AppImageLoadingHelper::Update(
@@ -1462,6 +1592,12 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   ScopedTrace app_image_timing("AppImage:Updating");
 
+  if (kIsDebugBuild && ClassLinker::kAppImageMayContainStrings) {
+    // In debug build, verify the string references before applying
+    // the Runtime::LoadAppImageStartupCache() option.
+    VerifyInternedStringReferences(space);
+  }
+
   Thread* const self = Thread::Current();
   Runtime* const runtime = Runtime::Current();
   gc::Heap* const heap = runtime->GetHeap();
@@ -1470,13 +1606,11 @@
   {
     // Register dex caches with the class loader.
     WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
-    const size_t num_dex_caches = dex_caches->GetLength();
-    for (size_t i = 0; i < num_dex_caches; i++) {
-      ObjPtr<mirror::DexCache> dex_cache = dex_caches->Get(i);
+    for (auto dex_cache : dex_caches.Iterate<mirror::DexCache>()) {
       const DexFile* const dex_file = dex_cache->GetDexFile();
       {
         WriterMutexLock mu2(self, *Locks::dex_lock_);
-        CHECK(!class_linker->FindDexCacheDataLocked(*dex_file).IsValid());
+        CHECK(class_linker->FindDexCacheDataLocked(*dex_file) == nullptr);
         class_linker->RegisterDexFileLocked(*dex_file, dex_cache, class_loader.Get());
       }
 
@@ -1534,10 +1668,6 @@
 
   if (ClassLinker::kAppImageMayContainStrings) {
     HandleAppImageStrings(space);
-
-    if (kIsDebugBuild) {
-      VerifyStringInterning(*space);
-    }
   }
 
   if (kVerifyArtMethodDeclaringClasses) {
@@ -1554,113 +1684,6 @@
   }
 }
 
-void AppImageLoadingHelper::UpdateInternStrings(
-    gc::space::ImageSpace* space,
-    bool use_preresolved_strings,
-    const SafeMap<mirror::String*, mirror::String*>& intern_remap) {
-  const uint8_t* target_base = space->Begin();
-  const ImageSection& sro_section =
-      space->GetImageHeader().GetImageStringReferenceOffsetsSection();
-  const size_t num_string_offsets = sro_section.Size() / sizeof(AppImageReferenceOffsetInfo);
-  InternTable* const intern_table = Runtime::Current()->GetInternTable();
-
-  VLOG(image)
-      << "ClassLinker:AppImage:InternStrings:imageStringReferenceOffsetCount = "
-      << num_string_offsets;
-
-  const auto* sro_base =
-      reinterpret_cast<const AppImageReferenceOffsetInfo*>(target_base + sro_section.Offset());
-
-  for (size_t offset_index = 0; offset_index < num_string_offsets; ++offset_index) {
-    uint32_t base_offset = sro_base[offset_index].first;
-
-    if (HasDexCacheStringNativeRefTag(base_offset)) {
-      base_offset = ClearDexCacheNativeRefTags(base_offset);
-      DCHECK_ALIGNED(base_offset, 2);
-
-      ObjPtr<mirror::DexCache> dex_cache =
-          reinterpret_cast<mirror::DexCache*>(space->Begin() + base_offset);
-      uint32_t string_index = sro_base[offset_index].second;
-
-      mirror::StringDexCachePair source = dex_cache->GetStrings()[string_index].load();
-      ObjPtr<mirror::String> referred_string = source.object.Read();
-      DCHECK(referred_string != nullptr);
-
-      auto it = intern_remap.find(referred_string.Ptr());
-      if (it != intern_remap.end()) {
-        // This doesn't use SetResolvedString to maintain consistency with how
-        // we load the string.  The index from the source string must be
-        // re-used due to the circular nature of the cache.  Because we are not
-        // using a helper function we need to mark the GC card manually.
-        WriteBarrier::ForEveryFieldWrite(dex_cache);
-        dex_cache->GetStrings()[string_index].store(
-            mirror::StringDexCachePair(it->second, source.index));
-      } else if (!use_preresolved_strings) {
-        dex_cache->GetStrings()[string_index].store(
-            mirror::StringDexCachePair(intern_table->InternStrong(referred_string), source.index));
-      }
-    } else if (HasDexCachePreResolvedStringNativeRefTag(base_offset)) {
-      if (use_preresolved_strings) {
-        base_offset = ClearDexCacheNativeRefTags(base_offset);
-        DCHECK_ALIGNED(base_offset, 2);
-
-        ObjPtr<mirror::DexCache> dex_cache =
-            reinterpret_cast<mirror::DexCache*>(space->Begin() + base_offset);
-        uint32_t string_index = sro_base[offset_index].second;
-
-        GcRoot<mirror::String>* preresolved_strings =
-            dex_cache->GetPreResolvedStrings();
-        // Handle calls to ClearPreResolvedStrings that might occur concurrently by the profile
-        // saver that runs shortly after startup. In case the strings are cleared, there is nothing
-        // to fix up.
-        if (preresolved_strings == nullptr) {
-          continue;
-        }
-        ObjPtr<mirror::String> referred_string =
-            preresolved_strings[string_index].Read();
-        if (referred_string == nullptr) {
-          continue;
-        }
-        auto it = intern_remap.find(referred_string.Ptr());
-        if (it != intern_remap.end()) {
-          // Because we are not using a helper function we need to mark the GC card manually.
-          WriteBarrier::ForEveryFieldWrite(dex_cache);
-          dex_cache->GetPreResolvedStrings()[string_index] = GcRoot<mirror::String>(it->second);
-        }
-      }
-    } else {
-      uint32_t raw_member_offset = sro_base[offset_index].second;
-      DCHECK_ALIGNED(base_offset, 2);
-      DCHECK_ALIGNED(raw_member_offset, 2);
-
-      ObjPtr<mirror::Object> obj_ptr =
-          reinterpret_cast<mirror::Object*>(space->Begin() + base_offset);
-      MemberOffset member_offset(raw_member_offset);
-      ObjPtr<mirror::String> referred_string =
-          obj_ptr->GetFieldObject<mirror::String,
-                                  kVerifyNone,
-                                  kWithoutReadBarrier,
-                                  /* kIsVolatile= */ false>(member_offset);
-      DCHECK(referred_string != nullptr);
-
-      auto it = intern_remap.find(referred_string.Ptr());
-      if (it != intern_remap.end()) {
-        obj_ptr->SetFieldObject</* kTransactionActive= */ false,
-                                /* kCheckTransaction= */ false,
-                                kVerifyNone,
-                                /* kIsVolatile= */ false>(member_offset, it->second);
-      } else if (!use_preresolved_strings) {
-        obj_ptr->SetFieldObject</* kTransactionActive= */ false,
-                                /* kCheckTransaction= */ false,
-                                kVerifyNone,
-                                /* kIsVolatile= */ false>(
-            member_offset,
-            intern_table->InternStrong(referred_string));
-      }
-    }
-  }
-}
-
 void AppImageLoadingHelper::HandleAppImageStrings(gc::space::ImageSpace* space) {
   // Iterate over the string reference offsets stored in the image and intern
   // the strings they point to.
@@ -1719,23 +1742,19 @@
       }
     }
   };
-
-  bool update_intern_strings;
-  if (load_startup_cache) {
-    VLOG(image) << "AppImage:load_startup_cache";
-    // Only add the intern table if we are using the startup cache. Otherwise,
-    // UpdateInternStrings adds the strings to the intern table.
-    intern_table->AddImageStringsToTable(space, func);
-    update_intern_strings = kIsDebugBuild || !intern_remap.empty();
+  intern_table->AddImageStringsToTable(space, func);
+  if (!intern_remap.empty()) {
     VLOG(image) << "AppImage:conflictingInternStrings = " << intern_remap.size();
-  } else {
-    update_intern_strings = true;
-  }
-
-  // For debug builds, always run the code below to get coverage.
-  if (update_intern_strings) {
-    // Slow path case is when there are conflicting intern strings to fix up.
-    UpdateInternStrings(space, /*use_preresolved_strings=*/ load_startup_cache, intern_remap);
+    VisitInternedStringReferences(
+        space,
+        load_startup_cache,
+        [&intern_remap](ObjPtr<mirror::String> str) REQUIRES_SHARED(Locks::mutator_lock_) {
+          auto it = intern_remap.find(str.Ptr());
+          if (it != intern_remap.end()) {
+            return ObjPtr<mirror::String>(it->second);
+          }
+          return str;
+        });
   }
 }
 
@@ -1779,8 +1798,7 @@
   ObjPtr<mirror::ObjectArray<mirror::DexCache>> dex_caches =
       dex_caches_object->AsObjectArray<mirror::DexCache>();
   const OatFile* oat_file = space->GetOatFile();
-  for (int32_t i = 0, length = dex_caches->GetLength(); i != length; ++i) {
-    ObjPtr<mirror::DexCache> dex_cache = dex_caches->Get(i);
+  for (auto dex_cache : dex_caches->Iterate()) {
     std::string dex_file_location(dex_cache->GetLocation()->ToModifiedUtf8());
     std::unique_ptr<const DexFile> dex_file = OpenOatDexFile(oat_file,
                                                              dex_file_location.c_str(),
@@ -1969,8 +1987,7 @@
     }
   }
   // Check that all non-primitive classes in dex caches are also in the class table.
-  for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
-    ObjPtr<mirror::DexCache> dex_cache = dex_caches->Get(i);
+  for (auto dex_cache : dex_caches.ConstIterate<mirror::DexCache>()) {
     mirror::TypeDexCacheType* const types = dex_cache->GetResolvedTypes();
     for (int32_t j = 0, num_types = dex_cache->NumResolvedTypes(); j < num_types; j++) {
       ObjPtr<mirror::Class> klass = types[j].load(std::memory_order_relaxed).object.Read();
@@ -1985,8 +2002,6 @@
 bool ClassLinker::AddImageSpace(
     gc::space::ImageSpace* space,
     Handle<mirror::ClassLoader> class_loader,
-    jobjectArray dex_elements,
-    const char* dex_location,
     std::vector<std::unique_ptr<const DexFile>>* out_dex_files,
     std::string* error_msg) {
   DCHECK(out_dex_files != nullptr);
@@ -2044,13 +2059,8 @@
     return false;
   }
 
-  for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
-    ObjPtr<mirror::DexCache> dex_cache = dex_caches->Get(i);
+  for (auto dex_cache : dex_caches.Iterate<mirror::DexCache>()) {
     std::string dex_file_location = dex_cache->GetLocation()->ToModifiedUtf8();
-    if (class_loader == nullptr) {
-      // For app images, we'll see the relative location. b/130666977.
-      DCHECK_EQ(dex_location, DexFileLoader::GetBaseLocation(dex_file_location));
-    }
     std::unique_ptr<const DexFile> dex_file = OpenOatDexFile(oat_file,
                                                              dex_file_location.c_str(),
                                                              error_msg);
@@ -2077,7 +2087,7 @@
                                                        dex_cache->NumResolvedMethods());
       }
       // Register dex files, keep track of existing ones that are conflicts.
-      AppendToBootClassPath(*dex_file.get(), dex_cache);
+      AppendToBootClassPath(dex_file.get(), dex_cache);
     }
     out_dex_files->push_back(std::move(dex_file));
   }
@@ -2085,63 +2095,14 @@
   if (app_image) {
     ScopedObjectAccessUnchecked soa(Thread::Current());
     ScopedAssertNoThreadSuspension sants("Checking app image", soa.Self());
-    // Check that the class loader resolves the same way as the ones in the image.
-    // Image class loader [A][B][C][image dex files]
-    // Class loader = [???][dex_elements][image dex files]
-    // Need to ensure that [???][dex_elements] == [A][B][C].
-    // For each class loader, PathClassLoader, the loader checks the parent first. Also the logic
-    // for PathClassLoader does this by looping through the array of dex files. To ensure they
-    // resolve the same way, simply flatten the hierarchy in the way the resolution order would be,
-    // and check that the dex file names are the same.
     if (IsBootClassLoader(soa, image_class_loader.Get())) {
       *error_msg = "Unexpected BootClassLoader in app image";
       return false;
     }
-    // The dex files of `class_loader` are not setup yet, so we cannot do a full comparison
-    // of `class_loader` and `image_class_loader` in `CompareClassLoaders`. Therefore, we
-    // special case the comparison of dex files of the two class loaders, but then do full
-    // comparisons for their shared libraries and parent.
-    auto elements = soa.Decode<mirror::ObjectArray<mirror::Object>>(dex_elements);
-    std::list<ObjPtr<mirror::String>> loader_dex_file_names;
-    for (size_t i = 0, num_elems = elements->GetLength(); i < num_elems; ++i) {
-      ObjPtr<mirror::Object> element = elements->GetWithoutChecks(i);
-      if (element != nullptr) {
-        // If we are somewhere in the middle of the array, there may be nulls at the end.
-        ObjPtr<mirror::String> name;
-        if (GetDexPathListElementName(element, &name) && name != nullptr) {
-          loader_dex_file_names.push_back(name);
-        }
-      }
-    }
-    std::string temp_error_msg;
-    std::list<ObjPtr<mirror::String>> image_dex_file_names;
-    bool success = GetDexFileNames(
-        soa, image_class_loader.Get(), &image_dex_file_names, &temp_error_msg);
-    if (success) {
-      // Ignore the number of image dex files since we are adding those to the class loader anyways.
-      CHECK_GE(static_cast<size_t>(image_dex_file_names.size()),
-               static_cast<size_t>(dex_caches->GetLength()));
-      size_t image_count = image_dex_file_names.size() - dex_caches->GetLength();
-      image_dex_file_names.resize(image_count);
-      success = success && CompareDexFiles(image_dex_file_names,
-                                           loader_dex_file_names,
-                                           &temp_error_msg);
-      success = success && CompareClassLoaders(soa,
-                                               image_class_loader.Get(),
-                                               class_loader.Get(),
-                                               /*check_dex_file_names=*/ false,
-                                               &temp_error_msg);
-    }
-    if (!success) {
-      *error_msg = StringPrintf("Rejecting application image due to class loader mismatch: '%s'",
-                               temp_error_msg.c_str());
-      return false;
-    }
   }
 
   if (kSanityCheckObjects) {
-    for (int32_t i = 0; i < dex_caches->GetLength(); i++) {
-      ObjPtr<mirror::DexCache> dex_cache = dex_caches->Get(i);
+    for (auto dex_cache : dex_caches.Iterate<mirror::DexCache>()) {
       for (size_t j = 0; j < dex_cache->NumResolvedFields(); ++j) {
         auto* field = dex_cache->GetResolvedField(j, image_pointer_size_);
         if (field != nullptr) {
@@ -2168,6 +2129,25 @@
     }, space->Begin(), image_pointer_size_);
   }
 
+  if (interpreter::CanRuntimeUseNterp()) {
+    // Set image methods' entry point that point to the interpreter bridge to the nterp entry point.
+    header.VisitPackedArtMethods([&](ArtMethod& method) REQUIRES_SHARED(Locks::mutator_lock_) {
+      if (IsQuickToInterpreterBridge(method.GetEntryPointFromQuickCompiledCode()) &&
+          interpreter::CanMethodUseNterp(&method)) {
+        method.SetEntryPointFromQuickCompiledCodePtrSize(interpreter::GetNterpEntryPoint(),
+                                                         image_pointer_size_);
+      }
+    }, space->Begin(), image_pointer_size_);
+  }
+
+  if (runtime->IsVerificationSoftFail()) {
+    header.VisitPackedArtMethods([&](ArtMethod& method) REQUIRES_SHARED(Locks::mutator_lock_) {
+      if (!method.IsNative() && method.IsInvokable()) {
+        method.ClearSkipAccessChecks();
+      }
+    }, space->Begin(), image_pointer_size_);
+  }
+
   ClassTable* class_table = nullptr;
   {
     WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
@@ -2202,7 +2182,7 @@
         // class loader is only the initiating loader but not the defining loader.
         // Avoid read barrier since we are comparing against null.
         if (klass->GetClassLoader<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
-          klass->SetClassLoader</*kCheckTransaction=*/ false>(loader);
+          klass->SetClassLoader(loader);
         }
       }
     }
@@ -2489,6 +2469,11 @@
     DeleteClassLoader(self, data, /*cleanup_cha=*/ false);
   }
   class_loaders_.clear();
+  while (!running_visibly_initialized_callbacks_.empty()) {
+    std::unique_ptr<VisiblyInitializedCallback> callback(
+        std::addressof(running_visibly_initialized_callbacks_.front()));
+    running_visibly_initialized_callbacks_.pop_front();
+  }
 }
 
 void ClassLinker::DeleteClassLoader(Thread* self, const ClassLoaderData& data, bool cleanup_cha) {
@@ -2534,7 +2519,9 @@
     self->AssertPendingOOMException();
     return nullptr;
   }
-  ObjPtr<mirror::String> location = intern_table_->InternStrong(dex_file.GetLocation().c_str());
+  // Use InternWeak() so that the location String can be collected when the ClassLoader
+  // with this DexCache is collected.
+  ObjPtr<mirror::String> location = intern_table_->InternWeak(dex_file.GetLocation().c_str());
   if (location == nullptr) {
     self->AssertPendingOOMException();
     return nullptr;
@@ -2561,16 +2548,16 @@
   return dex_cache;
 }
 
-template <bool kMovable>
+template <bool kMovable, typename PreFenceVisitor>
 ObjPtr<mirror::Class> ClassLinker::AllocClass(Thread* self,
                                               ObjPtr<mirror::Class> java_lang_Class,
-                                              uint32_t class_size) {
+                                              uint32_t class_size,
+                                              const PreFenceVisitor& pre_fence_visitor) {
   DCHECK_GE(class_size, sizeof(mirror::Class));
   gc::Heap* heap = Runtime::Current()->GetHeap();
-  mirror::Class::InitializeClassVisitor visitor(class_size);
   ObjPtr<mirror::Object> k = (kMovingClasses && kMovable) ?
-      heap->AllocObject<true>(self, java_lang_Class, class_size, visitor) :
-      heap->AllocNonMovableObject<true>(self, java_lang_Class, class_size, visitor);
+      heap->AllocObject(self, java_lang_Class, class_size, pre_fence_visitor) :
+      heap->AllocNonMovableObject(self, java_lang_Class, class_size, pre_fence_visitor);
   if (UNLIKELY(k == nullptr)) {
     self->AssertPendingOOMException();
     return nullptr;
@@ -2578,20 +2565,98 @@
   return k->AsClass();
 }
 
+template <bool kMovable>
+ObjPtr<mirror::Class> ClassLinker::AllocClass(Thread* self,
+                                              ObjPtr<mirror::Class> java_lang_Class,
+                                              uint32_t class_size) {
+  mirror::Class::InitializeClassVisitor visitor(class_size);
+  return AllocClass<kMovable>(self, java_lang_Class, class_size, visitor);
+}
+
 ObjPtr<mirror::Class> ClassLinker::AllocClass(Thread* self, uint32_t class_size) {
   return AllocClass(self, GetClassRoot<mirror::Class>(this), class_size);
 }
 
-ObjPtr<mirror::Class> ClassLinker::AllocPrimitiveArrayClass(Thread* self,
-                                                            ObjPtr<mirror::Class> java_lang_Class) {
+void ClassLinker::AllocPrimitiveArrayClass(Thread* self,
+                                           ClassRoot primitive_root,
+                                           ClassRoot array_root) {
   // We make this class non-movable for the unlikely case where it were to be
   // moved by a sticky-bit (minor) collection when using the Generational
   // Concurrent Copying (CC) collector, potentially creating a stale reference
   // in the `klass_` field of one of its instances allocated in the Large-Object
   // Space (LOS) -- see the comment about the dirty card scanning logic in
   // art::gc::collector::ConcurrentCopying::MarkingPhase.
-  return AllocClass</* kMovable= */ false>(
-      self, java_lang_Class, mirror::Array::ClassSize(image_pointer_size_));
+  ObjPtr<mirror::Class> array_class = AllocClass</* kMovable= */ false>(
+      self, GetClassRoot<mirror::Class>(this), mirror::Array::ClassSize(image_pointer_size_));
+  ObjPtr<mirror::Class> component_type = GetClassRoot(primitive_root, this);
+  DCHECK(component_type->IsPrimitive());
+  array_class->SetComponentType(component_type);
+  SetClassRoot(array_root, array_class);
+}
+
+void ClassLinker::FinishArrayClassSetup(ObjPtr<mirror::Class> array_class) {
+  ObjPtr<mirror::Class> java_lang_Object = GetClassRoot<mirror::Object>(this);
+  array_class->SetSuperClass(java_lang_Object);
+  array_class->SetVTable(java_lang_Object->GetVTable());
+  array_class->SetPrimitiveType(Primitive::kPrimNot);
+  ObjPtr<mirror::Class> component_type = array_class->GetComponentType();
+  array_class->SetClassFlags(component_type->IsPrimitive()
+                                 ? mirror::kClassFlagNoReferenceFields
+                                 : mirror::kClassFlagObjectArray);
+  array_class->SetClassLoader(component_type->GetClassLoader());
+  array_class->SetStatusForPrimitiveOrArray(ClassStatus::kLoaded);
+  array_class->PopulateEmbeddedVTable(image_pointer_size_);
+  ImTable* object_imt = java_lang_Object->GetImt(image_pointer_size_);
+  array_class->SetImt(object_imt, image_pointer_size_);
+  // Skip EnsureSkipAccessChecksMethods(). We can skip the verified status,
+  // the kAccVerificationAttempted flag is added below, and there are no
+  // methods that need the kAccSkipAccessChecks flag.
+  DCHECK_EQ(array_class->NumMethods(), 0u);
+
+  // don't need to set new_class->SetObjectSize(..)
+  // because Object::SizeOf delegates to Array::SizeOf
+
+  // All arrays have java/lang/Cloneable and java/io/Serializable as
+  // interfaces.  We need to set that up here, so that stuff like
+  // "instanceof" works right.
+
+  // Use the single, global copies of "interfaces" and "iftable"
+  // (remember not to free them for arrays).
+  {
+    ObjPtr<mirror::IfTable> array_iftable = GetArrayIfTable();
+    CHECK(array_iftable != nullptr);
+    array_class->SetIfTable(array_iftable);
+  }
+
+  // Inherit access flags from the component type.
+  int access_flags = component_type->GetAccessFlags();
+  // Lose any implementation detail flags; in particular, arrays aren't finalizable.
+  access_flags &= kAccJavaFlagsMask;
+  // Arrays can't be used as a superclass or interface, so we want to add "abstract final"
+  // and remove "interface".
+  access_flags |= kAccAbstract | kAccFinal;
+  access_flags &= ~kAccInterface;
+  // Arrays are access-checks-clean and preverified.
+  access_flags |= kAccVerificationAttempted;
+
+  array_class->SetAccessFlagsDuringLinking(access_flags);
+
+  // Array classes are fully initialized either during single threaded startup,
+  // or from a pre-fence visitor, so visibly initialized.
+  array_class->SetStatusForPrimitiveOrArray(ClassStatus::kVisiblyInitialized);
+}
+
+void ClassLinker::FinishCoreArrayClassSetup(ClassRoot array_root) {
+  // Do not hold lock on the array class object, the initialization of
+  // core array classes is done while the process is still single threaded.
+  ObjPtr<mirror::Class> array_class = GetClassRoot(array_root, this);
+  FinishArrayClassSetup(array_class);
+
+  std::string temp;
+  const char* descriptor = array_class->GetDescriptor(&temp);
+  size_t hash = ComputeModifiedUtf8Hash(descriptor);
+  ObjPtr<mirror::Class> existing = InsertClass(descriptor, array_class, hash);
+  CHECK(existing == nullptr);
 }
 
 ObjPtr<mirror::ObjectArray<mirror::StackTraceElement>> ClassLinker::AllocStackTraceElementArray(
@@ -2684,6 +2749,7 @@
 ClassPathEntry FindInClassPath(const char* descriptor,
                                size_t hash, const std::vector<const DexFile*>& class_path) {
   for (const DexFile* dex_file : class_path) {
+    DCHECK(dex_file != nullptr);
     const dex::ClassDef* dex_class_def = OatDexFile::FindClassDef(*dex_file, descriptor, hash);
     if (dex_class_def != nullptr) {
       return ClassPathEntry(dex_file, dex_class_def);
@@ -2709,8 +2775,8 @@
   Handle<mirror::ObjectArray<mirror::ClassLoader>> shared_libraries(
       hs.NewHandle(raw_shared_libraries->AsObjectArray<mirror::ClassLoader>()));
   MutableHandle<mirror::ClassLoader> temp_loader = hs.NewHandle<mirror::ClassLoader>(nullptr);
-  for (int32_t i = 0; i < shared_libraries->GetLength(); ++i) {
-    temp_loader.Assign(shared_libraries->Get(i));
+  for (auto loader : shared_libraries.Iterate<mirror::ClassLoader>()) {
+    temp_loader.Assign(loader);
     if (!FindClassInBaseDexClassLoader(soa, self, descriptor, hash, temp_loader, result)) {
       return false;  // One of the shared libraries is not supported.
     }
@@ -3081,15 +3147,49 @@
   return result_ptr;
 }
 
-static bool IsReservedBootClassPathDescriptor(const char* descriptor) {
-  std::string_view descriptor_sv(descriptor);
-  return
-      // Reserved conscrypt packages (includes sub-packages under these paths).
-      StartsWith(descriptor_sv, "Landroid/net/ssl/") ||
-      StartsWith(descriptor_sv, "Lcom/android/org/conscrypt/") ||
-      // Reserved updatable-media package (includes sub-packages under this path).
-      StartsWith(descriptor_sv, "Landroid/media/");
-}
+// Helper for maintaining DefineClass counting. We need to notify callbacks when we start/end a
+// define-class and how many recursive DefineClasses we are at in order to allow for doing  things
+// like pausing class definition.
+struct ScopedDefiningClass {
+ public:
+  explicit ScopedDefiningClass(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_)
+      : self_(self), returned_(false) {
+    Locks::mutator_lock_->AssertSharedHeld(self_);
+    Runtime::Current()->GetRuntimeCallbacks()->BeginDefineClass();
+    self_->IncrDefineClassCount();
+  }
+  ~ScopedDefiningClass() REQUIRES_SHARED(Locks::mutator_lock_) {
+    Locks::mutator_lock_->AssertSharedHeld(self_);
+    CHECK(returned_);
+  }
+
+  ObjPtr<mirror::Class> Finish(Handle<mirror::Class> h_klass)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    CHECK(!returned_);
+    self_->DecrDefineClassCount();
+    Runtime::Current()->GetRuntimeCallbacks()->EndDefineClass();
+    Thread::PoisonObjectPointersIfDebug();
+    returned_ = true;
+    return h_klass.Get();
+  }
+
+  ObjPtr<mirror::Class> Finish(ObjPtr<mirror::Class> klass)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    StackHandleScope<1> hs(self_);
+    Handle<mirror::Class> h_klass(hs.NewHandle(klass));
+    return Finish(h_klass);
+  }
+
+  ObjPtr<mirror::Class> Finish(nullptr_t np ATTRIBUTE_UNUSED)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    ScopedNullHandle<mirror::Class> snh;
+    return Finish(snh);
+  }
+
+ private:
+  Thread* self_;
+  bool returned_;
+};
 
 ObjPtr<mirror::Class> ClassLinker::DefineClass(Thread* self,
                                                const char* descriptor,
@@ -3097,6 +3197,7 @@
                                                Handle<mirror::ClassLoader> class_loader,
                                                const DexFile& dex_file,
                                                const dex::ClassDef& dex_class_def) {
+  ScopedDefiningClass sdc(self);
   StackHandleScope<3> hs(self);
   auto klass = hs.NewHandle<mirror::Class>(nullptr);
 
@@ -3123,11 +3224,11 @@
   // with these modules as these classes could be resolved differently during execution.
   if (class_loader != nullptr &&
       Runtime::Current()->IsAotCompiler() &&
-      IsReservedBootClassPathDescriptor(descriptor)) {
+      IsUpdatableBootClassPathDescriptor(descriptor)) {
     ObjPtr<mirror::Throwable> pre_allocated =
         Runtime::Current()->GetPreAllocatedNoClassDefFoundError();
     self->SetException(pre_allocated);
-    return nullptr;
+    return sdc.Finish(nullptr);
   }
 
   // This is to prevent the calls to ClassLoad and ClassPrepare which can cause java/user-supplied
@@ -3138,7 +3239,7 @@
     ObjPtr<mirror::Throwable> pre_allocated =
         Runtime::Current()->GetPreAllocatedNoClassDefFoundError();
     self->SetException(pre_allocated);
-    return nullptr;
+    return sdc.Finish(nullptr);
   }
 
   if (klass == nullptr) {
@@ -3146,11 +3247,15 @@
     // Interface object should get the right size here. Regular class will
     // figure out the right size later and be replaced with one of the right
     // size when the class becomes resolved.
-    klass.Assign(AllocClass(self, SizeOfClassWithoutEmbeddedTables(dex_file, dex_class_def)));
+    if (CanAllocClass()) {
+      klass.Assign(AllocClass(self, SizeOfClassWithoutEmbeddedTables(dex_file, dex_class_def)));
+    } else {
+      return sdc.Finish(nullptr);
+    }
   }
   if (UNLIKELY(klass == nullptr)) {
     self->AssertPendingOOMException();
-    return nullptr;
+    return sdc.Finish(nullptr);
   }
   // Get the real dex file. This will return the input if there aren't any callbacks or they do
   // nothing.
@@ -3167,12 +3272,12 @@
                                                             &new_class_def);
   // Check to see if an exception happened during runtime callbacks. Return if so.
   if (self->IsExceptionPending()) {
-    return nullptr;
+    return sdc.Finish(nullptr);
   }
   ObjPtr<mirror::DexCache> dex_cache = RegisterDexFile(*new_dex_file, class_loader.Get());
   if (dex_cache == nullptr) {
     self->AssertPendingException();
-    return nullptr;
+    return sdc.Finish(nullptr);
   }
   klass->SetDexCache(dex_cache);
   SetupClass(*new_dex_file, *new_class_def, klass, class_loader.Get());
@@ -3194,7 +3299,7 @@
   if (existing != nullptr) {
     // We failed to insert because we raced with another thread. Calling EnsureResolved may cause
     // this thread to block.
-    return EnsureResolved(self, descriptor, existing);
+    return sdc.Finish(EnsureResolved(self, descriptor, existing));
   }
 
   // Load the fields and other things after we are inserted in the table. This is so that we don't
@@ -3209,7 +3314,7 @@
     if (!klass->IsErroneous()) {
       mirror::Class::SetStatus(klass, ClassStatus::kErrorUnresolved, self);
     }
-    return nullptr;
+    return sdc.Finish(nullptr);
   }
 
   // Finish loading (if necessary) by finding parents
@@ -3219,7 +3324,7 @@
     if (!klass->IsErroneous()) {
       mirror::Class::SetStatus(klass, ClassStatus::kErrorUnresolved, self);
     }
-    return nullptr;
+    return sdc.Finish(nullptr);
   }
   CHECK(klass->IsLoaded());
 
@@ -3238,7 +3343,7 @@
     if (!klass->IsErroneous()) {
       mirror::Class::SetStatus(klass, ClassStatus::kErrorUnresolved, self);
     }
-    return nullptr;
+    return sdc.Finish(nullptr);
   }
   self->AssertNoPendingException();
   CHECK(h_new_class != nullptr) << descriptor;
@@ -3272,7 +3377,7 @@
   // Notify native debugger of the new class and its layout.
   jit::Jit::NewTypeLoadedIfUsingJit(h_new_class.Get());
 
-  return h_new_class.Get();
+  return sdc.Finish(h_new_class);
 }
 
 uint32_t ClassLinker::SizeOfClassWithoutEmbeddedTables(const DexFile& dex_file,
@@ -3339,14 +3444,28 @@
   if (method->IsProxyMethod()) {
     return GetQuickProxyInvokeHandler();
   }
-  auto* code = method->GetOatMethodQuickCode(GetImagePointerSize());
+  const void* code = method->GetOatMethodQuickCode(GetImagePointerSize());
   if (code != nullptr) {
     return code;
   }
+
+  jit::Jit* jit = Runtime::Current()->GetJit();
+  if (jit != nullptr) {
+    code = jit->GetCodeCache()->GetSavedEntryPointOfPreCompiledMethod(method);
+    if (code != nullptr) {
+      return code;
+    }
+  }
+
   if (method->IsNative()) {
     // No code and native? Use generic trampoline.
     return GetQuickGenericJniStub();
   }
+
+  if (interpreter::CanRuntimeUseNterp() && interpreter::CanMethodUseNterp(method)) {
+    return interpreter::GetNterpEntryPoint();
+  }
+
   return GetQuickToInterpreterBridge();
 }
 
@@ -3371,8 +3490,7 @@
     return true;
   }
 
-  if (Thread::Current()->IsForceInterpreter() ||
-      Dbg::IsForcedInterpreterNeededForCalling(Thread::Current(), method)) {
+  if (Thread::Current()->IsForceInterpreter()) {
     // Force the use of interpreter when it is required by the debugger.
     return true;
   }
@@ -3413,10 +3531,13 @@
 
 void ClassLinker::FixupStaticTrampolines(ObjPtr<mirror::Class> klass) {
   ScopedAssertNoThreadSuspension sants(__FUNCTION__);
-  DCHECK(klass->IsInitialized()) << klass->PrettyDescriptor();
+  DCHECK(klass->IsVisiblyInitialized()) << klass->PrettyDescriptor();
   if (klass->NumDirectMethods() == 0) {
     return;  // No direct methods => no static methods.
   }
+  if (UNLIKELY(klass->IsProxyClass())) {
+    return;
+  }
   Runtime* runtime = Runtime::Current();
   if (!runtime->IsStarted()) {
     if (runtime->IsAotCompiler() || runtime->GetHeap()->HasBootImageSpace()) {
@@ -3442,21 +3563,40 @@
       continue;
     }
     const void* quick_code = nullptr;
+
+    // In order:
+    // 1) Check if we have AOT Code.
+    // 2) Check if we have JIT Code.
+    // 3) Check if we can use Nterp.
     if (has_oat_class) {
       OatFile::OatMethod oat_method = oat_class.GetOatMethod(method_index);
       quick_code = oat_method.GetQuickCode();
     }
-    // Check if we have JIT compiled code for it.
-    if (quick_code == nullptr && Runtime::Current()->GetJit() != nullptr) {
-      quick_code = Runtime::Current()->GetJit()->GetCodeCache()->GetZygoteSavedEntryPoint(method);
+
+    jit::Jit* jit = runtime->GetJit();
+    if (quick_code == nullptr && jit != nullptr) {
+      quick_code = jit->GetCodeCache()->GetSavedEntryPointOfPreCompiledMethod(method);
     }
+
+    if (quick_code == nullptr &&
+        interpreter::CanRuntimeUseNterp() &&
+        interpreter::CanMethodUseNterp(method)) {
+      quick_code = interpreter::GetNterpEntryPoint();
+    }
+
     // Check whether the method is native, in which case it's generic JNI.
     if (quick_code == nullptr && method->IsNative()) {
       quick_code = GetQuickGenericJniStub();
     } else if (ShouldUseInterpreterEntrypoint(method, quick_code)) {
       // Use interpreter entry point.
+      if (IsQuickToInterpreterBridge(method->GetEntryPointFromQuickCompiledCode())) {
+        // If we have the trampoline or the bridge already, no need to update.
+        // This saves in not dirtying boot image memory.
+        continue;
+      }
       quick_code = GetQuickToInterpreterBridge();
     }
+    CHECK(quick_code != nullptr);
     runtime->GetInstrumentation()->UpdateMethodsCode(method, quick_code);
   }
   // Ignore virtual methods on the iterator.
@@ -3464,7 +3604,8 @@
 
 // Does anything needed to make sure that the compiler will not generate a direct invoke to this
 // method. Should only be called on non-invokable methods.
-inline void EnsureThrowsInvocationError(ClassLinker* class_linker, ArtMethod* method) {
+inline void EnsureThrowsInvocationError(ClassLinker* class_linker, ArtMethod* method)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   DCHECK(method != nullptr);
   DCHECK(!method->IsInvokable());
   method->SetEntryPointFromQuickCompiledCodePtrSize(
@@ -3482,34 +3623,42 @@
     // The following code only applies to a non-compiler runtime.
     return;
   }
+
   // Method shouldn't have already been linked.
   DCHECK(method->GetEntryPointFromQuickCompiledCode() == nullptr);
-  if (oat_class != nullptr) {
-    // Every kind of method should at least get an invoke stub from the oat_method.
-    // non-abstract methods also get their code pointers.
-    const OatFile::OatMethod oat_method = oat_class->GetOatMethod(class_def_method_index);
-    oat_method.LinkMethod(method);
-  }
-
-  // Install entry point from interpreter.
-  const void* quick_code = method->GetEntryPointFromQuickCompiledCode();
-  bool enter_interpreter = class_linker->ShouldUseInterpreterEntrypoint(method, quick_code);
 
   if (!method->IsInvokable()) {
     EnsureThrowsInvocationError(class_linker, method);
     return;
   }
 
-  if (method->IsStatic() && !method->IsConstructor()) {
-    // For static methods excluding the class initializer, install the trampoline.
+  const void* quick_code = nullptr;
+  if (oat_class != nullptr) {
+    // Every kind of method should at least get an invoke stub from the oat_method.
+    // non-abstract methods also get their code pointers.
+    const OatFile::OatMethod oat_method = oat_class->GetOatMethod(class_def_method_index);
+    quick_code = oat_method.GetQuickCode();
+  }
+
+  bool enter_interpreter = class_linker->ShouldUseInterpreterEntrypoint(method, quick_code);
+
+  // Note: this mimics the logic in image_writer.cc that installs the resolution
+  // stub only if we have compiled code and the method needs a class initialization
+  // check.
+  if (quick_code == nullptr) {
+    method->SetEntryPointFromQuickCompiledCode(
+        method->IsNative() ? GetQuickGenericJniStub() : GetQuickToInterpreterBridge());
+  } else if (enter_interpreter) {
+    method->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
+  } else if (NeedsClinitCheckBeforeCall(method)) {
+    DCHECK(!method->GetDeclaringClass()->IsVisiblyInitialized());  // Actually ClassStatus::Idx.
+    // If we do have code but the method needs a class initialization check before calling
+    // that code, install the resolution stub that will perform the check.
     // It will be replaced by the proper entry point by ClassLinker::FixupStaticTrampolines
     // after initializing class (see ClassLinker::InitializeClass method).
     method->SetEntryPointFromQuickCompiledCode(GetQuickResolutionStub());
-  } else if (quick_code == nullptr && method->IsNative()) {
-    method->SetEntryPointFromQuickCompiledCode(GetQuickGenericJniStub());
-  } else if (enter_interpreter) {
-    // Set entry point from compiled code if there's no code or in interpreter only mode.
-    method->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
+  } else {
+    method->SetEntryPointFromQuickCompiledCode(quick_code);
   }
 
   if (method->IsNative()) {
@@ -3517,12 +3666,10 @@
     method->UnregisterNative();
 
     if (enter_interpreter || quick_code == nullptr) {
-      // We have a native method here without code. Then it should have either the generic JNI
-      // trampoline as entrypoint (non-static), or the resolution trampoline (static).
+      // We have a native method here without code. Then it should have the generic JNI
+      // trampoline as entrypoint.
       // TODO: this doesn't handle all the cases where trampolines may be installed.
-      const void* entry_point = method->GetEntryPointFromQuickCompiledCode();
-      DCHECK(class_linker->IsQuickGenericJniStub(entry_point) ||
-             class_linker->IsQuickResolutionStub(entry_point));
+      DCHECK(class_linker->IsQuickGenericJniStub(method->GetEntryPointFromQuickCompiledCode()));
     }
   }
 }
@@ -3540,7 +3687,7 @@
   klass->SetClass(GetClassRoot<mirror::Class>(this));
   uint32_t access_flags = dex_class_def.GetJavaAccessFlags();
   CHECK_EQ(access_flags & ~kAccJavaFlagsMask, 0U);
-  klass->SetAccessFlags(access_flags);
+  klass->SetAccessFlagsDuringLinking(access_flags);
   klass->SetClassLoader(class_loader);
   DCHECK_EQ(klass->GetPrimitiveType(), Primitive::kPrimNot);
   mirror::Class::SetStatus(klass, ClassStatus::kIdx, nullptr);
@@ -3791,21 +3938,22 @@
   }
 }
 
-void ClassLinker::AppendToBootClassPath(Thread* self, const DexFile& dex_file) {
+void ClassLinker::AppendToBootClassPath(Thread* self, const DexFile* dex_file) {
   ObjPtr<mirror::DexCache> dex_cache = AllocAndInitializeDexCache(
       self,
-      dex_file,
+      *dex_file,
       Runtime::Current()->GetLinearAlloc());
-  CHECK(dex_cache != nullptr) << "Failed to allocate dex cache for " << dex_file.GetLocation();
+  CHECK(dex_cache != nullptr) << "Failed to allocate dex cache for " << dex_file->GetLocation();
   AppendToBootClassPath(dex_file, dex_cache);
 }
 
-void ClassLinker::AppendToBootClassPath(const DexFile& dex_file,
+void ClassLinker::AppendToBootClassPath(const DexFile* dex_file,
                                         ObjPtr<mirror::DexCache> dex_cache) {
-  CHECK(dex_cache != nullptr) << dex_file.GetLocation();
-  boot_class_path_.push_back(&dex_file);
+  CHECK(dex_file != nullptr);
+  CHECK(dex_cache != nullptr) << dex_file->GetLocation();
+  boot_class_path_.push_back(dex_file);
   WriterMutexLock mu(Thread::Current(), *Locks::dex_lock_);
-  RegisterDexFileLocked(dex_file, dex_cache, /* class_loader= */ nullptr);
+  RegisterDexFileLocked(*dex_file, dex_cache, /* class_loader= */ nullptr);
 }
 
 void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
@@ -3870,6 +4018,8 @@
   // Make sure to hold the dex cache live in the class table. This case happens for the boot class
   // path dex caches without an image.
   data.class_table->InsertStrongRoot(dex_cache);
+  // Make sure that the dex cache holds the classloader live.
+  dex_cache->SetClassLoader(class_loader);
   if (class_loader != nullptr) {
     // Since we added a strong root to the class table, do the write barrier as required for
     // remembered sets and generational GCs.
@@ -3878,25 +4028,19 @@
   dex_caches_.push_back(data);
 }
 
-ObjPtr<mirror::DexCache> ClassLinker::DecodeDexCache(Thread* self, const DexCacheData& data) {
-  return data.IsValid()
-      ? ObjPtr<mirror::DexCache>::DownCast(self->DecodeJObject(data.weak_root))
+ObjPtr<mirror::DexCache> ClassLinker::DecodeDexCacheLocked(Thread* self, const DexCacheData* data) {
+  return data != nullptr
+      ? ObjPtr<mirror::DexCache>::DownCast(self->DecodeJObject(data->weak_root))
       : nullptr;
 }
 
-ObjPtr<mirror::DexCache> ClassLinker::EnsureSameClassLoader(
-    Thread* self,
+bool ClassLinker::IsSameClassLoader(
     ObjPtr<mirror::DexCache> dex_cache,
-    const DexCacheData& data,
+    const DexCacheData* data,
     ObjPtr<mirror::ClassLoader> class_loader) {
-  DCHECK_EQ(dex_cache->GetDexFile(), data.dex_file);
-  if (data.class_table != ClassTableForClassLoader(class_loader)) {
-    self->ThrowNewExceptionF("Ljava/lang/InternalError;",
-                             "Attempt to register dex file %s with multiple class loaders",
-                             data.dex_file->GetLocation().c_str());
-    return nullptr;
-  }
-  return dex_cache;
+  CHECK(data != nullptr);
+  DCHECK_EQ(dex_cache->GetDexFile(), data->dex_file);
+  return data->class_table == ClassTableForClassLoader(class_loader);
 }
 
 void ClassLinker::RegisterExistingDexCache(ObjPtr<mirror::DexCache> dex_cache,
@@ -3909,12 +4053,9 @@
   const DexFile* dex_file = dex_cache->GetDexFile();
   DCHECK(dex_file != nullptr) << "Attempt to register uninitialized dex_cache object!";
   if (kIsDebugBuild) {
-    DexCacheData old_data;
-    {
-      ReaderMutexLock mu(self, *Locks::dex_lock_);
-      old_data = FindDexCacheDataLocked(*dex_file);
-    }
-    ObjPtr<mirror::DexCache> old_dex_cache = DecodeDexCache(self, old_data);
+    ReaderMutexLock mu(self, *Locks::dex_lock_);
+    const DexCacheData* old_data = FindDexCacheDataLocked(*dex_file);
+    ObjPtr<mirror::DexCache> old_dex_cache = DecodeDexCacheLocked(self, old_data);
     DCHECK(old_dex_cache.IsNull()) << "Attempt to manually register a dex cache thats already "
                                    << "been registered on dex file " << dex_file->GetLocation();
   }
@@ -3937,17 +4078,35 @@
   }
 }
 
+static void ThrowDexFileAlreadyRegisteredError(Thread* self, const DexFile& dex_file) REQUIRES_SHARED(Locks::mutator_lock_) {
+  self->ThrowNewExceptionF("Ljava/lang/InternalError;",
+                            "Attempt to register dex file %s with multiple class loaders",
+                            dex_file.GetLocation().c_str());
+}
+
 ObjPtr<mirror::DexCache> ClassLinker::RegisterDexFile(const DexFile& dex_file,
                                                       ObjPtr<mirror::ClassLoader> class_loader) {
   Thread* self = Thread::Current();
-  DexCacheData old_data;
+  ObjPtr<mirror::DexCache> old_dex_cache;
+  bool registered_with_another_class_loader = false;
   {
     ReaderMutexLock mu(self, *Locks::dex_lock_);
-    old_data = FindDexCacheDataLocked(dex_file);
+    const DexCacheData* old_data = FindDexCacheDataLocked(dex_file);
+    old_dex_cache = DecodeDexCacheLocked(self, old_data);
+    if (old_dex_cache != nullptr) {
+      if (IsSameClassLoader(old_dex_cache, old_data, class_loader)) {
+        return old_dex_cache;
+      } else {
+        // TODO This is not very clean looking. Should maybe try to make a way to request exceptions
+        // be thrown when it's safe to do so to simplify this.
+        registered_with_another_class_loader = true;
+      }
+    }
   }
-  ObjPtr<mirror::DexCache> old_dex_cache = DecodeDexCache(self, old_data);
-  if (old_dex_cache != nullptr) {
-    return EnsureSameClassLoader(self, old_dex_cache, old_data, class_loader);
+  // We need to have released the dex_lock_ to allocate safely.
+  if (registered_with_another_class_loader) {
+    ThrowDexFileAlreadyRegisteredError(self, dex_file);
+    return nullptr;
   }
   SCOPED_TRACE << __FUNCTION__ << " " << dex_file.GetLocation();
   LinearAlloc* const linear_alloc = GetOrCreateAllocatorForClassLoader(class_loader);
@@ -3973,8 +4132,8 @@
     // weak references access, and a thread blocking on the dex lock.
     gc::ScopedGCCriticalSection gcs(self, gc::kGcCauseClassLinker, gc::kCollectorTypeClassLinker);
     WriterMutexLock mu(self, *Locks::dex_lock_);
-    old_data = FindDexCacheDataLocked(dex_file);
-    old_dex_cache = DecodeDexCache(self, old_data);
+    const DexCacheData* old_data = FindDexCacheDataLocked(dex_file);
+    old_dex_cache = DecodeDexCacheLocked(self, old_data);
     if (old_dex_cache == nullptr && h_dex_cache != nullptr) {
       // Do InitializeDexCache while holding dex lock to make sure two threads don't call it at the
       // same time with the same dex cache. Since the .bss is shared this can cause failing DCHECK
@@ -3987,14 +4146,23 @@
                                            image_pointer_size_);
       RegisterDexFileLocked(dex_file, h_dex_cache.Get(), h_class_loader.Get());
     }
+    if (old_dex_cache != nullptr) {
+      // Another thread managed to initialize the dex cache faster, so use that DexCache.
+      // If this thread encountered OOME, ignore it.
+      DCHECK_EQ(h_dex_cache == nullptr, self->IsExceptionPending());
+      self->ClearException();
+      // We cannot call EnsureSameClassLoader() or allocate an exception while holding the
+      // dex_lock_.
+      if (IsSameClassLoader(old_dex_cache, old_data, h_class_loader.Get())) {
+        return old_dex_cache;
+      } else {
+        registered_with_another_class_loader = true;
+      }
+    }
   }
-  if (old_dex_cache != nullptr) {
-    // Another thread managed to initialize the dex cache faster, so use that DexCache.
-    // If this thread encountered OOME, ignore it.
-    DCHECK_EQ(h_dex_cache == nullptr, self->IsExceptionPending());
-    self->ClearException();
-    // We cannot call EnsureSameClassLoader() while holding the dex_lock_.
-    return EnsureSameClassLoader(self, old_dex_cache, old_data, h_class_loader.Get());
+  if (registered_with_another_class_loader) {
+    ThrowDexFileAlreadyRegisteredError(self, dex_file);
+    return nullptr;
   }
   if (h_dex_cache == nullptr) {
     self->AssertPendingOOMException();
@@ -4011,24 +4179,24 @@
 
 bool ClassLinker::IsDexFileRegistered(Thread* self, const DexFile& dex_file) {
   ReaderMutexLock mu(self, *Locks::dex_lock_);
-  return DecodeDexCache(self, FindDexCacheDataLocked(dex_file)) != nullptr;
+  return DecodeDexCacheLocked(self, FindDexCacheDataLocked(dex_file)) != nullptr;
 }
 
 ObjPtr<mirror::DexCache> ClassLinker::FindDexCache(Thread* self, const DexFile& dex_file) {
   ReaderMutexLock mu(self, *Locks::dex_lock_);
-  DexCacheData dex_cache_data = FindDexCacheDataLocked(dex_file);
-  ObjPtr<mirror::DexCache> dex_cache = DecodeDexCache(self, dex_cache_data);
+  const DexCacheData* dex_cache_data = FindDexCacheDataLocked(dex_file);
+  ObjPtr<mirror::DexCache> dex_cache = DecodeDexCacheLocked(self, dex_cache_data);
   if (dex_cache != nullptr) {
     return dex_cache;
   }
   // Failure, dump diagnostic and abort.
   for (const DexCacheData& data : dex_caches_) {
-    if (DecodeDexCache(self, data) != nullptr) {
+    if (DecodeDexCacheLocked(self, &data) != nullptr) {
       LOG(FATAL_WITHOUT_ABORT) << "Registered dex file " << data.dex_file->GetLocation();
     }
   }
   LOG(FATAL) << "Failed to find DexCache for DexFile " << dex_file.GetLocation()
-             << " " << &dex_file << " " << dex_cache_data.dex_file;
+             << " " << &dex_file << " " << dex_cache_data->dex_file;
   UNREACHABLE();
 }
 
@@ -4040,7 +4208,7 @@
   for (const DexCacheData& data : dex_caches_) {
     // Avoid decoding (and read barriers) other unrelated dex caches.
     if (data.dex_file == dex_file) {
-      ObjPtr<mirror::DexCache> registered_dex_cache = DecodeDexCache(self, data);
+      ObjPtr<mirror::DexCache> registered_dex_cache = DecodeDexCacheLocked(self, &data);
       if (registered_dex_cache != nullptr) {
         CHECK_EQ(registered_dex_cache, dex_cache) << dex_file->GetLocation();
         return data.class_table;
@@ -4050,39 +4218,41 @@
   return nullptr;
 }
 
-ClassLinker::DexCacheData ClassLinker::FindDexCacheDataLocked(const DexFile& dex_file) {
+const ClassLinker::DexCacheData* ClassLinker::FindDexCacheDataLocked(const DexFile& dex_file) {
   // Search assuming unique-ness of dex file.
   for (const DexCacheData& data : dex_caches_) {
     // Avoid decoding (and read barriers) other unrelated dex caches.
     if (data.dex_file == &dex_file) {
-      return data;
+      return &data;
     }
   }
-  return DexCacheData();
+  return nullptr;
 }
 
-ObjPtr<mirror::Class> ClassLinker::CreatePrimitiveClass(Thread* self, Primitive::Type type) {
+void ClassLinker::CreatePrimitiveClass(Thread* self,
+                                       Primitive::Type type,
+                                       ClassRoot primitive_root) {
   ObjPtr<mirror::Class> primitive_class =
       AllocClass(self, mirror::Class::PrimitiveClassSize(image_pointer_size_));
-  if (UNLIKELY(primitive_class == nullptr)) {
-    self->AssertPendingOOMException();
-    return nullptr;
-  }
-  // Must hold lock on object when initializing.
-  StackHandleScope<1> hs(self);
-  Handle<mirror::Class> h_class(hs.NewHandle(primitive_class));
-  ObjectLock<mirror::Class> lock(self, h_class);
-  h_class->SetAccessFlags(kAccPublic | kAccFinal | kAccAbstract);
-  h_class->SetPrimitiveType(type);
-  h_class->SetIfTable(GetClassRoot<mirror::Object>(this)->GetIfTable());
-  EnsureSkipAccessChecksMethods</* kNeedsVerified= */ true>(h_class, image_pointer_size_);
-  mirror::Class::SetStatus(h_class, ClassStatus::kInitialized, self);
+  CHECK(primitive_class != nullptr) << "OOM for primitive class " << type;
+  // Do not hold lock on the primitive class object, the initialization of
+  // primitive classes is done while the process is still single threaded.
+  primitive_class->SetAccessFlagsDuringLinking(
+      kAccPublic | kAccFinal | kAccAbstract | kAccVerificationAttempted);
+  primitive_class->SetPrimitiveType(type);
+  primitive_class->SetIfTable(GetClassRoot<mirror::Object>(this)->GetIfTable());
+  // Skip EnsureSkipAccessChecksMethods(). We can skip the verified status,
+  // the kAccVerificationAttempted flag was added above, and there are no
+  // methods that need the kAccSkipAccessChecks flag.
+  DCHECK_EQ(primitive_class->NumMethods(), 0u);
+  // Primitive classes are initialized during single threaded startup, so visibly initialized.
+  primitive_class->SetStatusForPrimitiveOrArray(ClassStatus::kVisiblyInitialized);
   const char* descriptor = Primitive::Descriptor(type);
   ObjPtr<mirror::Class> existing = InsertClass(descriptor,
-                                               h_class.Get(),
+                                               primitive_class,
                                                ComputeModifiedUtf8Hash(descriptor));
   CHECK(existing == nullptr) << "InitPrimitiveClass(" << type << ") failed";
-  return h_class.Get();
+  SetClassRoot(primitive_root, primitive_class);
 }
 
 inline ObjPtr<mirror::IfTable> ClassLinker::GetArrayIfTable() {
@@ -4163,6 +4333,14 @@
       return new_class;
     }
   }
+  // Core array classes, i.e. Object[], Class[], String[] and primitive
+  // arrays, have special initialization and they should be found above.
+  DCHECK(!component_type->IsObjectClass() ||
+         // Guard from false positives for errors before setting superclass.
+         component_type->IsErroneousUnresolved());
+  DCHECK(!component_type->IsStringClass());
+  DCHECK(!component_type->IsClassClass());
+  DCHECK(!component_type->IsPrimitive());
 
   // Fill out the fields in the Class.
   //
@@ -4172,92 +4350,27 @@
   //
   // Array classes are simple enough that we don't need to do a full
   // link step.
-  auto new_class = hs.NewHandle<mirror::Class>(nullptr);
-  if (UNLIKELY(!init_done_)) {
-    // Classes that were hand created, ie not by FindSystemClass
-    if (strcmp(descriptor, "[Ljava/lang/Class;") == 0) {
-      new_class.Assign(GetClassRoot<mirror::ObjectArray<mirror::Class>>(this));
-    } else if (strcmp(descriptor, "[Ljava/lang/Object;") == 0) {
-      new_class.Assign(GetClassRoot<mirror::ObjectArray<mirror::Object>>(this));
-    } else if (strcmp(descriptor, "[Ljava/lang/String;") == 0) {
-      new_class.Assign(GetClassRoot<mirror::ObjectArray<mirror::String>>(this));
-    } else if (strcmp(descriptor, "[Z") == 0) {
-      new_class.Assign(GetClassRoot<mirror::BooleanArray>(this));
-    } else if (strcmp(descriptor, "[B") == 0) {
-      new_class.Assign(GetClassRoot<mirror::ByteArray>(this));
-    } else if (strcmp(descriptor, "[C") == 0) {
-      new_class.Assign(GetClassRoot<mirror::CharArray>(this));
-    } else if (strcmp(descriptor, "[S") == 0) {
-      new_class.Assign(GetClassRoot<mirror::ShortArray>(this));
-    } else if (strcmp(descriptor, "[I") == 0) {
-      new_class.Assign(GetClassRoot<mirror::IntArray>(this));
-    } else if (strcmp(descriptor, "[J") == 0) {
-      new_class.Assign(GetClassRoot<mirror::LongArray>(this));
-    } else if (strcmp(descriptor, "[F") == 0) {
-      new_class.Assign(GetClassRoot<mirror::FloatArray>(this));
-    } else if (strcmp(descriptor, "[D") == 0) {
-      new_class.Assign(GetClassRoot<mirror::DoubleArray>(this));
-    }
-  }
+  size_t array_class_size = mirror::Array::ClassSize(image_pointer_size_);
+  auto visitor = [this, array_class_size, component_type](ObjPtr<mirror::Object> obj,
+                                                          size_t usable_size)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    ScopedAssertNoNewTransactionRecords sanntr("CreateArrayClass");
+    mirror::Class::InitializeClassVisitor init_class(array_class_size);
+    init_class(obj, usable_size);
+    ObjPtr<mirror::Class> klass = ObjPtr<mirror::Class>::DownCast(obj);
+    klass->SetComponentType(component_type.Get());
+    // Do not hold lock for initialization, the fence issued after the visitor
+    // returns ensures memory visibility together with the implicit consume
+    // semantics (for all supported architectures) for any thread that loads
+    // the array class reference from any memory locations afterwards.
+    FinishArrayClassSetup(klass);
+  };
+  auto new_class = hs.NewHandle<mirror::Class>(
+      AllocClass(self, GetClassRoot<mirror::Class>(this), array_class_size, visitor));
   if (new_class == nullptr) {
-    new_class.Assign(AllocClass(self, mirror::Array::ClassSize(image_pointer_size_)));
-    if (new_class == nullptr) {
-      self->AssertPendingOOMException();
-      return nullptr;
-    }
-    new_class->SetComponentType(component_type.Get());
+    self->AssertPendingOOMException();
+    return nullptr;
   }
-  ObjectLock<mirror::Class> lock(self, new_class);  // Must hold lock on object when initializing.
-  DCHECK(new_class->GetComponentType() != nullptr);
-  ObjPtr<mirror::Class> java_lang_Object = GetClassRoot<mirror::Object>(this);
-  new_class->SetSuperClass(java_lang_Object);
-  new_class->SetVTable(java_lang_Object->GetVTable());
-  new_class->SetPrimitiveType(Primitive::kPrimNot);
-  new_class->SetClassLoader(component_type->GetClassLoader());
-  if (component_type->IsPrimitive()) {
-    new_class->SetClassFlags(mirror::kClassFlagNoReferenceFields);
-  } else {
-    new_class->SetClassFlags(mirror::kClassFlagObjectArray);
-  }
-  mirror::Class::SetStatus(new_class, ClassStatus::kLoaded, self);
-  new_class->PopulateEmbeddedVTable(image_pointer_size_);
-  ImTable* object_imt = java_lang_Object->GetImt(image_pointer_size_);
-  new_class->SetImt(object_imt, image_pointer_size_);
-  EnsureSkipAccessChecksMethods</* kNeedsVerified= */ true>(new_class, image_pointer_size_);
-  mirror::Class::SetStatus(new_class, ClassStatus::kInitialized, self);
-  // don't need to set new_class->SetObjectSize(..)
-  // because Object::SizeOf delegates to Array::SizeOf
-
-  // All arrays have java/lang/Cloneable and java/io/Serializable as
-  // interfaces.  We need to set that up here, so that stuff like
-  // "instanceof" works right.
-  //
-  // Note: The GC could run during the call to FindSystemClass,
-  // so we need to make sure the class object is GC-valid while we're in
-  // there.  Do this by clearing the interface list so the GC will just
-  // think that the entries are null.
-
-
-  // Use the single, global copies of "interfaces" and "iftable"
-  // (remember not to free them for arrays).
-  {
-    ObjPtr<mirror::IfTable> array_iftable = GetArrayIfTable();
-    CHECK(array_iftable != nullptr);
-    new_class->SetIfTable(array_iftable);
-  }
-
-  // Inherit access flags from the component type.
-  int access_flags = new_class->GetComponentType()->GetAccessFlags();
-  // Lose any implementation detail flags; in particular, arrays aren't finalizable.
-  access_flags &= kAccJavaFlagsMask;
-  // Arrays can't be used as a superclass or interface, so we want to add "abstract final"
-  // and remove "interface".
-  access_flags |= kAccAbstract | kAccFinal;
-  access_flags &= ~kAccInterface;
-  // Arrays are access-checks-clean and preverified.
-  access_flags |= kAccVerificationAttempted;
-
-  new_class->SetAccessFlags(access_flags);
 
   ObjPtr<mirror::Class> existing = InsertClass(descriptor, new_class.Get(), hash);
   if (existing == nullptr) {
@@ -4458,7 +4571,9 @@
     VerifyClass(self, supertype);
   }
 
-  if (supertype->IsVerified() || supertype->ShouldVerifyAtRuntime()) {
+  if (supertype->IsVerified()
+      || supertype->ShouldVerifyAtRuntime()
+      || supertype->IsVerifiedNeedsAccessChecks()) {
     // The supertype is either verified, or we soft failed at AOT time.
     DCHECK(supertype->IsVerified() || Runtime::Current()->IsAotCompiler());
     return true;
@@ -4498,8 +4613,7 @@
 
     // Is somebody verifying this now?
     ClassStatus old_status = klass->GetStatus();
-    while (old_status == ClassStatus::kVerifying ||
-        old_status == ClassStatus::kVerifyingAtRuntime) {
+    while (old_status == ClassStatus::kVerifying) {
       lock.WaitIgnoringInterrupts();
       // WaitIgnoringInterrupts can still receive an interrupt and return early, in this
       // case we may see the same status again. b/62912904. This is why the check is
@@ -4524,20 +4638,25 @@
       return verifier::FailureKind::kNoFailure;
     }
 
+    if (klass->IsVerifiedNeedsAccessChecks()) {
+      if (!Runtime::Current()->IsAotCompiler()) {
+        // Mark the class as having a verification attempt to avoid re-running
+        // the verifier and avoid calling EnsureSkipAccessChecksMethods.
+        klass->SetVerificationAttempted();
+        mirror::Class::SetStatus(klass, ClassStatus::kVerified, self);
+      }
+      return verifier::FailureKind::kAccessChecksFailure;
+    }
+
     // For AOT, don't attempt to re-verify if we have already found we should
     // verify at runtime.
-    if (Runtime::Current()->IsAotCompiler() && klass->ShouldVerifyAtRuntime()) {
+    if (klass->ShouldVerifyAtRuntime()) {
+      CHECK(Runtime::Current()->IsAotCompiler());
       return verifier::FailureKind::kSoftFailure;
     }
 
-    if (klass->GetStatus() == ClassStatus::kResolved) {
-      mirror::Class::SetStatus(klass, ClassStatus::kVerifying, self);
-    } else {
-      CHECK_EQ(klass->GetStatus(), ClassStatus::kRetryVerificationAtRuntime)
-          << klass->PrettyClass();
-      CHECK(!Runtime::Current()->IsAotCompiler());
-      mirror::Class::SetStatus(klass, ClassStatus::kVerifyingAtRuntime, self);
-    }
+    DCHECK_EQ(klass->GetStatus(), ClassStatus::kResolved);
+    mirror::Class::SetStatus(klass, ClassStatus::kVerifying, self);
 
     // Skip verification if disabled.
     if (!Runtime::Current()->IsVerificationEnabled()) {
@@ -4610,7 +4729,8 @@
                      << klass->PrettyDescriptor()
                      << " in " << klass->GetDexCache()->GetLocation()->ToModifiedUtf8()
                      << ": "
-                     << preverified;
+                     << preverified
+                     << "( " << oat_file_class_status << ")";
 
   // If the oat file says the class had an error, re-run the verifier. That way we will get a
   // precise error message. To ensure a rerun, test:
@@ -4639,21 +4759,29 @@
     if (verifier_failure == verifier::FailureKind::kNoFailure) {
       // Even though there were no verifier failures we need to respect whether the super-class and
       // super-default-interfaces were verified or requiring runtime reverification.
-      if (supertype == nullptr || supertype->IsVerified()) {
+      if (supertype == nullptr
+          || supertype->IsVerified()
+          || supertype->IsVerifiedNeedsAccessChecks()) {
         mirror::Class::SetStatus(klass, ClassStatus::kVerified, self);
       } else {
+        CHECK(Runtime::Current()->IsAotCompiler());
         CHECK_EQ(supertype->GetStatus(), ClassStatus::kRetryVerificationAtRuntime);
         mirror::Class::SetStatus(klass, ClassStatus::kRetryVerificationAtRuntime, self);
         // Pretend a soft failure occurred so that we don't consider the class verified below.
         verifier_failure = verifier::FailureKind::kSoftFailure;
       }
     } else {
-      CHECK_EQ(verifier_failure, verifier::FailureKind::kSoftFailure);
+      CHECK(verifier_failure == verifier::FailureKind::kSoftFailure ||
+            verifier_failure == verifier::FailureKind::kAccessChecksFailure);
       // Soft failures at compile time should be retried at runtime. Soft
       // failures at runtime will be handled by slow paths in the generated
       // code. Set status accordingly.
       if (Runtime::Current()->IsAotCompiler()) {
-        mirror::Class::SetStatus(klass, ClassStatus::kRetryVerificationAtRuntime, self);
+        if (verifier_failure == verifier::FailureKind::kSoftFailure) {
+          mirror::Class::SetStatus(klass, ClassStatus::kRetryVerificationAtRuntime, self);
+        } else {
+          mirror::Class::SetStatus(klass, ClassStatus::kVerifiedNeedsAccessChecks, self);
+        }
       } else {
         mirror::Class::SetStatus(klass, ClassStatus::kVerified, self);
         // As this is a fake verified status, make sure the methods are _not_ marked
@@ -4670,18 +4798,18 @@
     mirror::Class::SetStatus(klass, ClassStatus::kErrorResolved, self);
   }
   if (preverified || verifier_failure == verifier::FailureKind::kNoFailure) {
-    // Class is verified so we don't need to do any access check on its methods.
-    // Let the interpreter know it by setting the kAccSkipAccessChecks flag onto each
-    // method.
-    // Note: we're going here during compilation and at runtime. When we set the
-    // kAccSkipAccessChecks flag when compiling image classes, the flag is recorded
-    // in the image and is set when loading the image.
-
-    if (UNLIKELY(Runtime::Current()->IsVerificationSoftFail())) {
+    if (oat_file_class_status == ClassStatus::kVerifiedNeedsAccessChecks ||
+        UNLIKELY(Runtime::Current()->IsVerificationSoftFail())) {
       // Never skip access checks if the verification soft fail is forced.
       // Mark the class as having a verification attempt to avoid re-running the verifier.
       klass->SetVerificationAttempted();
     } else {
+      // Class is verified so we don't need to do any access check on its methods.
+      // Let the interpreter know it by setting the kAccSkipAccessChecks flag onto each
+      // method.
+      // Note: we're going here during compilation and at runtime. When we set the
+      // kAccSkipAccessChecks flag when compiling image classes, the flag is recorded
+      // in the image and is set when loading the image.
       EnsureSkipAccessChecksMethods(klass, image_pointer_size_);
     }
   }
@@ -4717,10 +4845,6 @@
   // tell us about the latter.
   if (Runtime::Current()->IsAotCompiler()) {
     CompilerCallbacks* callbacks = Runtime::Current()->GetCompilerCallbacks();
-    // Are we compiling the bootclasspath?
-    if (callbacks->IsBootImage()) {
-      return false;
-    }
     // We are compiling an app (not the image).
     if (!callbacks->CanUseOatStatusForVerification(klass.Ptr())) {
       return false;
@@ -4730,20 +4854,6 @@
   const OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
   // In case we run without an image there won't be a backing oat file.
   if (oat_dex_file == nullptr || oat_dex_file->GetOatFile() == nullptr) {
-    if (!kIsDebugBuild && klass->GetClassLoader() == nullptr) {
-      // For boot classpath classes in the case we're not using a default boot image:
-      // we don't have the infrastructure yet to query verification data on individual
-      // boot vdex files, so it's simpler for now to consider all boot classpath classes
-      // verified. This should be taken into account when measuring boot time and app
-      // startup compare to the (current) production system where both:
-      // 1) updatable boot classpath classes, and
-      // 2) classes in /system referencing updatable classes
-      // will be verified at runtime.
-      if (Runtime::Current()->IsUsingApexBootImageLocation()) {
-        oat_file_class_status = ClassStatus::kVerified;
-        return true;
-      }
-    }
     return false;
   }
 
@@ -4752,31 +4862,20 @@
   if (oat_file_class_status >= ClassStatus::kVerified) {
     return true;
   }
+  if (oat_file_class_status >= ClassStatus::kVerifiedNeedsAccessChecks) {
+    // We return that the clas has already been verified, and the caller should
+    // check the class status to ensure we run with access checks.
+    return true;
+  }
   // If we only verified a subset of the classes at compile time, we can end up with classes that
   // were resolved by the verifier.
   if (oat_file_class_status == ClassStatus::kResolved) {
     return false;
   }
-  if (oat_file_class_status == ClassStatus::kRetryVerificationAtRuntime) {
-    // Compile time verification failed with a soft error. Compile time verification can fail
-    // because we have incomplete type information. Consider the following:
-    // class ... {
-    //   Foo x;
-    //   .... () {
-    //     if (...) {
-    //       v1 gets assigned a type of resolved class Foo
-    //     } else {
-    //       v1 gets assigned a type of unresolved class Bar
-    //     }
-    //     iput x = v1
-    // } }
-    // when we merge v1 following the if-the-else it results in Conflict
-    // (see verifier::RegType::Merge) as we can't know the type of Bar and we could possibly be
-    // allowing an unsafe assignment to the field x in the iput (javac may have compiled this as
-    // it knew Bar was a sub-class of Foo, but for us this may have been moved into a separate apk
-    // at compile time).
-    return false;
-  }
+  // We never expect a .oat file to have kRetryVerificationAtRuntime statuses.
+  CHECK_NE(oat_file_class_status, ClassStatus::kRetryVerificationAtRuntime)
+      << klass->PrettyClass() << " " << dex_file.GetLocation();
+
   if (mirror::Class::IsErroneous(oat_file_class_status)) {
     // Compile time verification failed with a hard error. This is caused by invalid instructions
     // in the class. These errors are unrecoverable.
@@ -4849,7 +4948,7 @@
     return nullptr;
   }
 
-  StackHandleScope<10> hs(self);
+  StackHandleScope<12> hs(self);
   MutableHandle<mirror::Class> temp_klass(hs.NewHandle(
       AllocClass(self, GetClassRoot<mirror::Class>(this), sizeof(mirror::Class))));
   if (temp_klass == nullptr) {
@@ -4860,7 +4959,8 @@
   temp_klass->SetObjectSize(sizeof(mirror::Proxy));
   // Set the class access flags incl. VerificationAttempted, so we do not try to set the flag on
   // the methods.
-  temp_klass->SetAccessFlags(kAccClassIsProxy | kAccPublic | kAccFinal | kAccVerificationAttempted);
+  temp_klass->SetAccessFlagsDuringLinking(
+      kAccClassIsProxy | kAccPublic | kAccFinal | kAccVerificationAttempted);
   temp_klass->SetClassLoader(soa.Decode<mirror::ClassLoader>(loader));
   DCHECK_EQ(temp_klass->GetPrimitiveType(), Primitive::kPrimNot);
   temp_klass->SetName(soa.Decode<mirror::String>(name));
@@ -4903,11 +5003,53 @@
   // Proxies have 1 direct method, the constructor
   const size_t num_direct_methods = 1;
 
-  // They have as many virtual methods as the array
-  auto h_methods = hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Method>>(methods));
+  // The array we get passed contains all methods, including private and static
+  // ones that aren't proxied. We need to filter those out since only interface
+  // methods (non-private & virtual) are actually proxied.
+  Handle<mirror::ObjectArray<mirror::Method>> h_methods =
+      hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Method>>(methods));
   DCHECK_EQ(h_methods->GetClass(), GetClassRoot<mirror::ObjectArray<mirror::Method>>())
       << mirror::Class::PrettyClass(h_methods->GetClass());
-  const size_t num_virtual_methods = h_methods->GetLength();
+  // List of the actual virtual methods this class will have.
+  std::vector<ArtMethod*> proxied_methods;
+  std::vector<size_t> proxied_throws_idx;
+  proxied_methods.reserve(h_methods->GetLength());
+  proxied_throws_idx.reserve(h_methods->GetLength());
+  // Filter out to only the non-private virtual methods.
+  for (auto [mirror, idx] : ZipCount(h_methods.Iterate<mirror::Method>())) {
+    ArtMethod* m = mirror->GetArtMethod();
+    if (!m->IsPrivate() && !m->IsStatic()) {
+      proxied_methods.push_back(m);
+      proxied_throws_idx.push_back(idx);
+    }
+  }
+  const size_t num_virtual_methods = proxied_methods.size();
+  // We also need to filter out the 'throws'. The 'throws' are a Class[][] that
+  // contains an array of all the classes each function is declared to throw.
+  // This is used to wrap unexpected exceptions in a
+  // UndeclaredThrowableException exception. This array is in the same order as
+  // the methods array and like the methods array must be filtered to remove any
+  // non-proxied methods.
+  const bool has_filtered_methods =
+      static_cast<int32_t>(num_virtual_methods) != h_methods->GetLength();
+  MutableHandle<mirror::ObjectArray<mirror::ObjectArray<mirror::Class>>> original_proxied_throws(
+      hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::ObjectArray<mirror::Class>>>(throws)));
+  MutableHandle<mirror::ObjectArray<mirror::ObjectArray<mirror::Class>>> proxied_throws(
+      hs.NewHandle<mirror::ObjectArray<mirror::ObjectArray<mirror::Class>>>(
+          (has_filtered_methods)
+              ? mirror::ObjectArray<mirror::ObjectArray<mirror::Class>>::Alloc(
+                    self, original_proxied_throws->GetClass(), num_virtual_methods)
+              : original_proxied_throws.Get()));
+  if (proxied_throws.IsNull() && !original_proxied_throws.IsNull()) {
+    self->AssertPendingOOMException();
+    return nullptr;
+  }
+  if (has_filtered_methods) {
+    for (auto [orig_idx, new_idx] : ZipCount(MakeIterationRange(proxied_throws_idx))) {
+      DCHECK_LE(new_idx, orig_idx);
+      proxied_throws->Set(new_idx, original_proxied_throws->Get(orig_idx));
+    }
+  }
 
   // Create the methods array.
   LengthPrefixedArray<ArtMethod>* proxy_class_methods = AllocArtMethodArray(
@@ -4927,7 +5069,7 @@
   // TODO These should really use the iterators.
   for (size_t i = 0; i < num_virtual_methods; ++i) {
     auto* virtual_method = temp_klass->GetVirtualMethodUnchecked(i, image_pointer_size_);
-    auto* prototype = h_methods->Get(i)->GetArtMethod();
+    auto* prototype = proxied_methods[i];
     CreateProxyMethod(temp_klass, prototype, virtual_method);
     DCHECK(virtual_method->GetDeclaringClass() != nullptr);
     DCHECK(prototype->GetDeclaringClass() != nullptr);
@@ -4966,7 +5108,7 @@
   CHECK_EQ(throws_sfield.GetDeclaringClass(), klass.Get());
   throws_sfield.SetObject<false>(
       klass.Get(),
-      soa.Decode<mirror::ObjectArray<mirror::ObjectArray<mirror::Class>>>(throws));
+      proxied_throws.Get());
 
   Runtime::Current()->GetRuntimeCallbacks()->ClassPrepare(temp_klass, klass);
 
@@ -4978,11 +5120,16 @@
     // TODO: Avoid taking subtype_check_lock_ if SubtypeCheck for j.l.r.Proxy is already assigned.
   }
 
+  VisiblyInitializedCallback* callback = nullptr;
   {
     // Lock on klass is released. Lock new class object.
     ObjectLock<mirror::Class> initialization_lock(self, klass);
     EnsureSkipAccessChecksMethods(klass, image_pointer_size_);
-    mirror::Class::SetStatus(klass, ClassStatus::kInitialized, self);
+    // Conservatively go through the ClassStatus::kInitialized state.
+    callback = MarkClassInitialized(self, klass);
+  }
+  if (callback != nullptr) {
+    callback->MakeVisible(self);
   }
 
   // sanity checks
@@ -4992,8 +5139,7 @@
 
     for (size_t i = 0; i < num_virtual_methods; ++i) {
       auto* virtual_method = klass->GetVirtualMethodUnchecked(i, image_pointer_size_);
-      auto* prototype = h_methods->Get(i++)->GetArtMethod();
-      CheckProxyMethod(virtual_method, prototype);
+      CheckProxyMethod(virtual_method, proxied_methods[i]);
     }
 
     StackHandleScope<1> hs2(self);
@@ -5009,7 +5155,7 @@
     CHECK_EQ(klass.Get()->GetProxyInterfaces(),
              soa.Decode<mirror::ObjectArray<mirror::Class>>(interfaces));
     CHECK_EQ(klass.Get()->GetProxyThrows(),
-             soa.Decode<mirror::ObjectArray<mirror::ObjectArray<mirror::Class>>>(throws));
+             proxied_throws.Get());
   }
   return klass.Get();
 }
@@ -5110,14 +5256,15 @@
         return false;
       }
     }
-    // If we are a class we need to initialize all interfaces with default methods when we are
-    // initialized. Check all of them.
-    if (!klass->IsInterface()) {
-      size_t num_interfaces = klass->GetIfTableCount();
-      for (size_t i = 0; i < num_interfaces; i++) {
-        ObjPtr<mirror::Class> iface = klass->GetIfTable()->GetInterface(i);
-        if (iface->HasDefaultMethods() &&
-            !CanWeInitializeClass(iface, can_init_statics, can_init_parents)) {
+  }
+  // If we are a class we need to initialize all interfaces with default methods when we are
+  // initialized. Check all of them.
+  if (!klass->IsInterface()) {
+    size_t num_interfaces = klass->GetIfTableCount();
+    for (size_t i = 0; i < num_interfaces; i++) {
+      ObjPtr<mirror::Class> iface = klass->GetIfTable()->GetInterface(i);
+      if (iface->HasDefaultMethods() && !iface->IsInitialized()) {
+        if (!can_init_parents || !CanWeInitializeClass(iface, can_init_statics, can_init_parents)) {
           return false;
         }
       }
@@ -5127,10 +5274,10 @@
     return true;
   }
   ObjPtr<mirror::Class> super_class = klass->GetSuperClass();
-  if (!can_init_parents && !super_class->IsInitialized()) {
-    return false;
+  if (super_class->IsInitialized()) {
+    return true;
   }
-  return CanWeInitializeClass(super_class, can_init_statics, can_init_parents);
+  return can_init_parents && CanWeInitializeClass(super_class, can_init_statics, can_init_parents);
 }
 
 bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
@@ -5192,7 +5339,7 @@
           VlogClassInitializationFailure(klass);
         } else {
           CHECK(Runtime::Current()->IsAotCompiler());
-          CHECK_EQ(klass->GetStatus(), ClassStatus::kRetryVerificationAtRuntime);
+          CHECK(klass->ShouldVerifyAtRuntime() || klass->IsVerifiedNeedsAccessChecks());
           self->AssertNoPendingException();
           self->SetException(Runtime::Current()->GetPreAllocatedNoClassDefFoundError());
         }
@@ -5259,6 +5406,8 @@
     t0 = NanoTime();
   }
 
+  uint64_t t_sub = 0;
+
   // Initialize super classes, must be done while initializing for the JLS.
   if (!klass->IsInterface() && klass->HasSuperClass()) {
     ObjPtr<mirror::Class> super_class = klass->GetSuperClass();
@@ -5267,7 +5416,9 @@
       CHECK(can_init_parents);
       StackHandleScope<1> hs(self);
       Handle<mirror::Class> handle_scope_super(hs.NewHandle(super_class));
+      uint64_t super_t0 = NanoTime();
       bool super_initialized = InitializeClass(self, handle_scope_super, can_init_statics, true);
+      uint64_t super_t1 = NanoTime();
       if (!super_initialized) {
         // The super class was verified ahead of entering initializing, we should only be here if
         // the super class became erroneous due to initialization.
@@ -5284,6 +5435,7 @@
         mirror::Class::SetStatus(klass, ClassStatus::kErrorResolved, self);
         return false;
       }
+      t_sub = super_t1 - super_t0;
     }
   }
 
@@ -5305,16 +5457,19 @@
         // We cannot just call initialize class directly because we need to ensure that ALL
         // interfaces with default methods are initialized. Non-default interface initialization
         // will not affect other non-default super-interfaces.
+        uint64_t inf_t0 = NanoTime();  // This is not very precise, misses all walking.
         bool iface_initialized = InitializeDefaultInterfaceRecursive(self,
                                                                      handle_scope_iface,
                                                                      can_init_statics,
                                                                      can_init_parents);
+        uint64_t inf_t1 = NanoTime();
         if (!iface_initialized) {
           ObjectLock<mirror::Class> lock(self, klass);
           // Initialization failed because one of our interfaces with default methods is erroneous.
           mirror::Class::SetStatus(klass, ClassStatus::kErrorResolved, self);
           return false;
         }
+        t_sub += inf_t1 - inf_t0;
       }
     }
   }
@@ -5388,6 +5543,7 @@
   self->AllowThreadSuspension();
   uint64_t t1 = NanoTime();
 
+  VisiblyInitializedCallback* callback = nullptr;
   bool success = true;
   {
     ObjectLock<mirror::Class> lock(self, klass);
@@ -5410,19 +5566,20 @@
       RuntimeStats* thread_stats = self->GetStats();
       ++global_stats->class_init_count;
       ++thread_stats->class_init_count;
-      global_stats->class_init_time_ns += (t1 - t0);
-      thread_stats->class_init_time_ns += (t1 - t0);
+      global_stats->class_init_time_ns += (t1 - t0 - t_sub);
+      thread_stats->class_init_time_ns += (t1 - t0 - t_sub);
       // Set the class as initialized except if failed to initialize static fields.
-      mirror::Class::SetStatus(klass, ClassStatus::kInitialized, self);
+      callback = MarkClassInitialized(self, klass);
       if (VLOG_IS_ON(class_linker)) {
         std::string temp;
         LOG(INFO) << "Initialized class " << klass->GetDescriptor(&temp) << " from " <<
             klass->GetLocation();
       }
-      // Opportunistically set static method trampolines to their destination.
-      FixupStaticTrampolines(klass.Get());
     }
   }
+  if (callback != nullptr) {
+    callback->MakeVisible(self);
+  }
   return success;
 }
 
@@ -5725,6 +5882,16 @@
   DCHECK(c != nullptr);
 
   if (c->IsInitialized()) {
+    // If we've seen an initialized but not visibly initialized class
+    // many times, request visible initialization.
+    if (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64) {
+      // Thanks to the x86 memory model classes skip the initialized status.
+      DCHECK(c->IsVisiblyInitialized());
+    } else if (UNLIKELY(!c->IsVisiblyInitialized())) {
+      if (self->IncrementMakeVisiblyInitializedCounter()) {
+        MakeInitializedClassesVisiblyInitialized(self, /*wait=*/ false);
+      }
+    }
     DCHECK(c->WasVerificationAttempted()) << c->PrettyClassAndClassLoader();
     return true;
   }
@@ -5899,7 +6066,7 @@
     // Update CHA info based on whether we override methods.
     // Have to do this before setting the class as resolved which allows
     // instantiation of klass.
-    if (cha_ != nullptr) {
+    if (LIKELY(descriptor != nullptr) && cha_ != nullptr) {
       cha_->UpdateAfterLoadingOf(klass);
     }
 
@@ -5911,7 +6078,8 @@
     CHECK(!klass->IsResolved());
     // Retire the temporary class and create the correctly sized resolved class.
     StackHandleScope<1> hs(self);
-    auto h_new_class = hs.NewHandle(klass->CopyOf(self, class_size, imt, image_pointer_size_));
+    Handle<mirror::Class> h_new_class =
+        hs.NewHandle(mirror::Class::CopyOf(klass, self, class_size, imt, image_pointer_size_));
     // Set arrays to null since we don't want to have multiple classes with the same ArtField or
     // ArtMethod array pointers. If this occurs, it causes bugs in remembered sets since the GC
     // may not see any references to the target space and clean the card for a class if another
@@ -5929,7 +6097,7 @@
     ObjectLock<mirror::Class> lock(self, h_new_class);
     FixupTemporaryDeclaringClass(klass.Get(), h_new_class.Get());
 
-    {
+    if (LIKELY(descriptor != nullptr)) {
       WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
       const ObjPtr<mirror::ClassLoader> class_loader = h_new_class.Get()->GetClassLoader();
       ClassTable* const table = InsertClassTableForClassLoader(class_loader);
@@ -5949,7 +6117,7 @@
     // Update CHA info based on whether we override methods.
     // Have to do this before setting the class as resolved which allows
     // instantiation of klass.
-    if (cha_ != nullptr) {
+    if (LIKELY(descriptor != nullptr) && cha_ != nullptr) {
       cha_->UpdateAfterLoadingOf(h_new_class);
     }
 
@@ -6259,6 +6427,19 @@
       ArtMethod* m = klass->GetVirtualMethodDuringLinking(i, image_pointer_size_);
       m->SetMethodIndex(i);
       if (!m->IsAbstract()) {
+        // If the dex file does not support default methods, throw ClassFormatError.
+        // This check is necessary to protect from odd cases, such as native default
+        // methods, that the dex file verifier permits for old dex file versions. b/157170505
+        // FIXME: This should be `if (!m->GetDexFile()->SupportsDefaultMethods())` but we're
+        // currently running CTS tests for default methods with dex file version 035 which
+        // does not support default methods. So, we limit this to native methods. b/157718952
+        if (m->IsNative()) {
+          DCHECK(!m->GetDexFile()->SupportsDefaultMethods());
+          ThrowClassFormatError(klass.Get(),
+                                "Dex file does not support default method '%s'",
+                                m->PrettyMethod().c_str());
+          return false;
+        }
         m->SetAccessFlags(m->GetAccessFlags() | kAccDefault);
         has_defaults = true;
       }
@@ -6273,7 +6454,7 @@
   } else if (klass->HasSuperClass()) {
     const size_t super_vtable_length = klass->GetSuperClass()->GetVTableLength();
     const size_t max_count = num_virtual_methods + super_vtable_length;
-    StackHandleScope<2> hs(self);
+    StackHandleScope<3> hs(self);
     Handle<mirror::Class> super_class(hs.NewHandle(klass->GetSuperClass()));
     MutableHandle<mirror::PointerArray> vtable;
     if (super_class->ShouldHaveEmbeddedVTable()) {
@@ -6297,16 +6478,16 @@
       }
     } else {
       DCHECK(super_class->IsAbstract() && !super_class->IsArrayClass());
-      ObjPtr<mirror::PointerArray> super_vtable = super_class->GetVTable();
+      Handle<mirror::PointerArray> super_vtable = hs.NewHandle(super_class->GetVTable());
       CHECK(super_vtable != nullptr) << super_class->PrettyClass();
       // We might need to change vtable if we have new virtual methods or new interfaces (since that
       // might give us new default methods). See comment above.
       if (num_virtual_methods == 0 && super_class->GetIfTableCount() == klass->GetIfTableCount()) {
-        klass->SetVTable(super_vtable);
+        klass->SetVTable(super_vtable.Get());
         return true;
       }
-      vtable = hs.NewHandle(
-          ObjPtr<mirror::PointerArray>::DownCast(super_vtable->CopyOf(self, max_count)));
+      vtable = hs.NewHandle(ObjPtr<mirror::PointerArray>::DownCast(
+          mirror::Array::CopyOf(super_vtable, self, max_count)));
       if (UNLIKELY(vtable == nullptr)) {
         self->AssertPendingOOMException();
         return false;
@@ -6442,7 +6623,8 @@
     // Shrink vtable if possible
     CHECK_LE(actual_count, max_count);
     if (actual_count < max_count) {
-      vtable.Assign(ObjPtr<mirror::PointerArray>::DownCast(vtable->CopyOf(self, actual_count)));
+      vtable.Assign(ObjPtr<mirror::PointerArray>::DownCast(
+          mirror::Array::CopyOf(vtable, self, actual_count)));
       if (UNLIKELY(vtable == nullptr)) {
         self->AssertPendingOOMException();
         return false;
@@ -6700,8 +6882,10 @@
         DCHECK(if_table != nullptr);
         DCHECK(if_table->GetMethodArray(i) != nullptr);
         // If we are working on a super interface, try extending the existing method array.
-        method_array = ObjPtr<mirror::PointerArray>::DownCast(
-            if_table->GetMethodArray(i)->Clone(self));
+        StackHandleScope<1u> hs(self);
+        Handle<mirror::PointerArray> old_array = hs.NewHandle(if_table->GetMethodArray(i));
+        method_array =
+            ObjPtr<mirror::PointerArray>::DownCast(mirror::Object::Clone(old_array, self));
       } else {
         method_array = AllocPointerArray(self, num_methods);
       }
@@ -7121,7 +7305,7 @@
   if (new_ifcount < ifcount) {
     DCHECK_NE(num_interfaces, 0U);
     iftable.Assign(ObjPtr<mirror::IfTable>::DownCast(
-        iftable->CopyOf(self, new_ifcount * mirror::IfTable::kMax)));
+        mirror::IfTable::CopyOf(iftable, self, new_ifcount * mirror::IfTable::kMax)));
     if (UNLIKELY(iftable == nullptr)) {
       self->AssertPendingOOMException();
       return false;
@@ -7439,7 +7623,7 @@
 
   ObjPtr<mirror::PointerArray> UpdateVtable(
       const std::unordered_map<size_t, ClassLinker::MethodTranslation>& default_translations,
-      ObjPtr<mirror::PointerArray> old_vtable) REQUIRES_SHARED(Locks::mutator_lock_);
+      Handle<mirror::PointerArray> old_vtable) REQUIRES_SHARED(Locks::mutator_lock_);
 
   void UpdateIfTable(Handle<mirror::IfTable> iftable) REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -7767,7 +7951,7 @@
 
 ObjPtr<mirror::PointerArray> ClassLinker::LinkInterfaceMethodsHelper::UpdateVtable(
     const std::unordered_map<size_t, ClassLinker::MethodTranslation>& default_translations,
-    ObjPtr<mirror::PointerArray> old_vtable) {
+    Handle<mirror::PointerArray> old_vtable) {
   // Update the vtable to the new method structures. We can skip this for interfaces since they
   // do not have vtables.
   const size_t old_vtable_count = old_vtable->GetLength();
@@ -7776,8 +7960,8 @@
                                   default_methods_.size() +
                                   default_conflict_methods_.size();
 
-  ObjPtr<mirror::PointerArray> vtable =
-      ObjPtr<mirror::PointerArray>::DownCast(old_vtable->CopyOf(self_, new_vtable_count));
+  ObjPtr<mirror::PointerArray> vtable = ObjPtr<mirror::PointerArray>::DownCast(
+      mirror::Array::CopyOf(old_vtable, self_, new_vtable_count));
   if (UNLIKELY(vtable == nullptr)) {
     self_->AssertPendingOOMException();
     return nullptr;
@@ -7963,7 +8147,7 @@
         // If we are overwriting a super class interface, try to only virtual methods instead of the
         // whole vtable.
         using_virtuals = true;
-        input_virtual_methods = klass->GetDeclaredMethodsSlice(image_pointer_size_);
+        input_virtual_methods = klass->GetDeclaredVirtualMethodsSlice(image_pointer_size_);
         input_array_length = input_virtual_methods.size();
       } else {
         // For a new interface, however, we need the whole vtable in case a new
@@ -7999,6 +8183,7 @@
               input_vtable_array->GetElementPtrSize<ArtMethod*>(k, image_pointer_size_);
           ArtMethod* vtable_method_for_name_comparison =
               vtable_method->GetInterfaceMethodIfProxy(image_pointer_size_);
+          DCHECK(!vtable_method->IsStatic()) << vtable_method->PrettyMethod();
           if (interface_name_comparator.HasSameNameAndSignature(
               vtable_method_for_name_comparison)) {
             if (!vtable_method->IsAbstract() && !vtable_method->IsPublic()) {
@@ -8111,7 +8296,7 @@
     self->EndAssertNoThreadSuspension(old_cause);
 
     if (fill_tables) {
-      vtable.Assign(helper.UpdateVtable(default_translations, vtable.Get()));
+      vtable.Assign(helper.UpdateVtable(default_translations, vtable));
       if (UNLIKELY(vtable == nullptr)) {
         // The helper has already called self->AssertPendingOOMException();
         return false;
@@ -8456,8 +8641,8 @@
   return type;
 }
 
-template <typename T>
-ObjPtr<mirror::Class> ClassLinker::DoResolveType(dex::TypeIndex type_idx, T referrer) {
+template <typename RefType>
+ObjPtr<mirror::Class> ClassLinker::DoResolveType(dex::TypeIndex type_idx, RefType referrer) {
   StackHandleScope<2> hs(Thread::Current());
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(referrer->GetClassLoader()));
@@ -8603,6 +8788,13 @@
       // We normaly should not end up here. However the verifier currently doesn't guarantee
       // the invariant of having the klass in the class table. b/73760543
       klass = ResolveType(method_id.class_idx_, dex_cache, class_loader);
+      if (klass == nullptr) {
+        // This can only happen if the current thread is not allowed to load
+        // classes.
+        DCHECK(!Thread::Current()->CanLoadClasses());
+        DCHECK(Thread::Current()->IsExceptionPending());
+        return nullptr;
+      }
     }
   } else {
     // The method was not in the DexCache, resolve the declaring class.
@@ -9240,7 +9432,13 @@
 }
 
 bool ClassLinker::IsJniDlsymLookupStub(const void* entry_point) const {
-  return entry_point == GetJniDlsymLookupStub();
+  return entry_point == GetJniDlsymLookupStub() ||
+      (jni_dlsym_lookup_trampoline_ == entry_point);
+}
+
+bool ClassLinker::IsJniDlsymLookupCriticalStub(const void* entry_point) const {
+  return entry_point == GetJniDlsymLookupCriticalStub() ||
+      (jni_dlsym_lookup_critical_trampoline_ == entry_point);
 }
 
 const void* ClassLinker::GetRuntimeQuickGenericJniStub() const {
@@ -9312,6 +9510,9 @@
     }
   }
   os << "Done dumping class loaders\n";
+  Runtime* runtime = Runtime::Current();
+  os << "Classes initialized: " << runtime->GetStat(KIND_GLOBAL_CLASS_INIT_COUNT) << " in "
+     << PrettyDuration(runtime->GetStat(KIND_GLOBAL_CLASS_INIT_TIME)) << "\n";
 }
 
 class CountClassesVisitor : public ClassLoaderVisitor {
@@ -9371,19 +9572,6 @@
   class_roots->Set<false>(index, klass);
 }
 
-void ClassLinker::AllocAndSetPrimitiveArrayClassRoot(Thread* self,
-                                                     ObjPtr<mirror::Class> java_lang_Class,
-                                                     ClassRoot primitive_array_class_root,
-                                                     ClassRoot primitive_class_root,
-                                                     const char* descriptor) {
-  StackHandleScope<1> hs(self);
-  Handle<mirror::Class> primitive_array_class(hs.NewHandle(
-      AllocPrimitiveArrayClass(self, java_lang_Class)));
-  primitive_array_class->SetComponentType(GetClassRoot(primitive_class_root, this));
-  SetClassRoot(primitive_array_class_root, primitive_array_class.Get());
-  CheckSystemClass(self, primitive_array_class, descriptor);
-}
-
 ObjPtr<mirror::ClassLoader> ClassLinker::CreateWellKnownClassLoader(
     Thread* self,
     const std::vector<const DexFile*>& dex_files,
@@ -9628,99 +9816,6 @@
   }
 }
 
-class GetResolvedClassesVisitor : public ClassVisitor {
- public:
-  GetResolvedClassesVisitor(std::set<DexCacheResolvedClasses>* result, bool ignore_boot_classes)
-      : result_(result),
-        ignore_boot_classes_(ignore_boot_classes),
-        last_resolved_classes_(result->end()),
-        last_dex_file_(nullptr),
-        vlog_is_on_(VLOG_IS_ON(class_linker)),
-        extra_stats_(),
-        last_extra_stats_(extra_stats_.end()) { }
-
-  bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (!klass->IsProxyClass() &&
-        !klass->IsArrayClass() &&
-        klass->IsResolved() &&
-        !klass->IsErroneousResolved() &&
-        (!ignore_boot_classes_ || klass->GetClassLoader() != nullptr)) {
-      const DexFile& dex_file = klass->GetDexFile();
-      if (&dex_file != last_dex_file_) {
-        last_dex_file_ = &dex_file;
-        DexCacheResolvedClasses resolved_classes(
-            dex_file.GetLocation(),
-            DexFileLoader::GetBaseLocation(dex_file.GetLocation()),
-            dex_file.GetLocationChecksum(),
-            dex_file.NumMethodIds());
-        last_resolved_classes_ = result_->find(resolved_classes);
-        if (last_resolved_classes_ == result_->end()) {
-          last_resolved_classes_ = result_->insert(resolved_classes).first;
-        }
-      }
-      bool added = last_resolved_classes_->AddClass(klass->GetDexTypeIndex());
-      if (UNLIKELY(vlog_is_on_) && added) {
-        const DexCacheResolvedClasses* resolved_classes = std::addressof(*last_resolved_classes_);
-        if (last_extra_stats_ == extra_stats_.end() ||
-            last_extra_stats_->first != resolved_classes) {
-          last_extra_stats_ = extra_stats_.find(resolved_classes);
-          if (last_extra_stats_ == extra_stats_.end()) {
-            last_extra_stats_ =
-                extra_stats_.emplace(resolved_classes, ExtraStats(dex_file.NumClassDefs())).first;
-          }
-        }
-      }
-    }
-    return true;
-  }
-
-  void PrintStatistics() const {
-    if (vlog_is_on_) {
-      for (const DexCacheResolvedClasses& resolved_classes : *result_) {
-        auto it = extra_stats_.find(std::addressof(resolved_classes));
-        DCHECK(it != extra_stats_.end());
-        const ExtraStats& extra_stats = it->second;
-        LOG(INFO) << "Dex location " << resolved_classes.GetDexLocation()
-                  << " has " << resolved_classes.GetClasses().size() << " / "
-                  << extra_stats.number_of_class_defs_ << " resolved classes";
-      }
-    }
-  }
-
- private:
-  struct ExtraStats {
-    explicit ExtraStats(uint32_t number_of_class_defs)
-        : number_of_class_defs_(number_of_class_defs) {}
-    uint32_t number_of_class_defs_;
-  };
-
-  std::set<DexCacheResolvedClasses>* result_;
-  bool ignore_boot_classes_;
-  std::set<DexCacheResolvedClasses>::iterator last_resolved_classes_;
-  const DexFile* last_dex_file_;
-
-  // Statistics.
-  bool vlog_is_on_;
-  std::map<const DexCacheResolvedClasses*, ExtraStats> extra_stats_;
-  std::map<const DexCacheResolvedClasses*, ExtraStats>::iterator last_extra_stats_;
-};
-
-std::set<DexCacheResolvedClasses> ClassLinker::GetResolvedClasses(bool ignore_boot_classes) {
-  ScopedTrace trace(__PRETTY_FUNCTION__);
-  ScopedObjectAccess soa(Thread::Current());
-  ScopedAssertNoThreadSuspension ants(__FUNCTION__);
-  std::set<DexCacheResolvedClasses> ret;
-  VLOG(class_linker) << "Collecting resolved classes";
-  const uint64_t start_time = NanoTime();
-  GetResolvedClassesVisitor visitor(&ret, ignore_boot_classes);
-  VisitClasses(&visitor);
-  if (VLOG_IS_ON(class_linker)) {
-    visitor.PrintStatistics();
-    LOG(INFO) << "Collecting class profile took " << PrettyDuration(NanoTime() - start_time);
-  }
-  return ret;
-}
-
 class ClassLinker::FindVirtualMethodHolderVisitor : public ClassVisitor {
  public:
   FindVirtualMethodHolderVisitor(const ArtMethod* method, PointerSize pointer_size)
@@ -9755,6 +9850,12 @@
                              ifcount * mirror::IfTable::kMax)));
 }
 
+bool ClassLinker::IsUpdatableBootClassPathDescriptor(const char* descriptor ATTRIBUTE_UNUSED) {
+  // Should not be called on ClassLinker, only on AotClassLinker that overrides this.
+  LOG(FATAL) << "UNREACHABLE";
+  UNREACHABLE();
+}
+
 // Instantiate ClassLinker::ResolveMethod.
 template ArtMethod* ClassLinker::ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
     uint32_t method_idx,
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index f3c3ef8..4731203 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -20,16 +20,18 @@
 #include <list>
 #include <set>
 #include <string>
+#include <type_traits>
 #include <unordered_map>
 #include <unordered_set>
 #include <utility>
 #include <vector>
 
 #include "base/enums.h"
+#include "base/mutex.h"
+#include "base/intrusive_forward_list.h"
 #include "base/locks.h"
 #include "base/macros.h"
 #include "dex/class_accessor.h"
-#include "dex/dex_cache_resolved_classes.h"
 #include "dex/dex_file_types.h"
 #include "gc_root.h"
 #include "handle.h"
@@ -101,6 +103,18 @@
   virtual bool operator()(ObjPtr<mirror::Class> klass) = 0;
 };
 
+template <typename Func>
+class ClassFuncVisitor final : public ClassVisitor {
+ public:
+  explicit ClassFuncVisitor(Func func) : func_(func) {}
+  bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
+    return func_(klass);
+  }
+
+ private:
+  Func func_;
+};
+
 class ClassLoaderVisitor {
  public:
   virtual ~ClassLoaderVisitor() {}
@@ -108,6 +122,18 @@
       REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) = 0;
 };
 
+template <typename Func>
+class ClassLoaderFuncVisitor final : public ClassLoaderVisitor {
+ public:
+  explicit ClassLoaderFuncVisitor(Func func) : func_(func) {}
+  void Visit(ObjPtr<mirror::ClassLoader> cl) override REQUIRES_SHARED(Locks::mutator_lock_) {
+    func_(cl);
+  }
+
+ private:
+  Func func_;
+};
+
 class AllocatorVisitor {
  public:
   virtual ~AllocatorVisitor() {}
@@ -148,8 +174,6 @@
   // properly handle read barriers and object marking.
   bool AddImageSpace(gc::space::ImageSpace* space,
                      Handle<mirror::ClassLoader> class_loader,
-                     jobjectArray dex_elements,
-                     const char* dex_location,
                      std::vector<std::unique_ptr<const DexFile>>* out_dex_files,
                      std::string* error_msg)
       REQUIRES(!Locks::dex_lock_)
@@ -458,6 +482,9 @@
   void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
       REQUIRES(!Locks::dex_lock_, !Locks::classlinker_classes_lock_, !Locks::trace_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
+  // Visits all dex-files accessible by any class-loader or the BCP.
+  template<typename Visitor>
+  void VisitKnownDexFiles(Thread* self, Visitor visitor) REQUIRES(Locks::mutator_lock_);
 
   bool IsDexFileRegistered(Thread* self, const DexFile& dex_file)
       REQUIRES(!Locks::dex_lock_)
@@ -477,6 +504,38 @@
                                                       LinearAlloc* allocator,
                                                       size_t length);
 
+  // Convenience AllocClass() overload that uses mirror::Class::InitializeClassVisitor
+  // for the class initialization and uses the `java_lang_Class` from class roots
+  // instead of an explicit argument.
+  ObjPtr<mirror::Class> AllocClass(Thread* self, uint32_t class_size)
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(!Roles::uninterruptible_);
+
+  // Setup the classloader, class def index, type idx so that we can insert this class in the class
+  // table.
+  void SetupClass(const DexFile& dex_file,
+                  const dex::ClassDef& dex_class_def,
+                  Handle<mirror::Class> klass,
+                  ObjPtr<mirror::ClassLoader> class_loader)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  void LoadClass(Thread* self,
+                 const DexFile& dex_file,
+                 const dex::ClassDef& dex_class_def,
+                 Handle<mirror::Class> klass)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Link the class and place it into the class-table using the given descriptor. NB if the
+  // descriptor is null the class will not be placed in any class-table. This is useful implementing
+  // obsolete classes and should not be used otherwise.
+  bool LinkClass(Thread* self,
+                 const char* descriptor,
+                 Handle<mirror::Class> klass,
+                 Handle<mirror::ObjectArray<mirror::Class>> interfaces,
+                 MutableHandle<mirror::Class>* h_new_class_out)
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(!Locks::classlinker_classes_lock_);
+
   ObjPtr<mirror::PointerArray> AllocPointerArray(Thread* self, size_t length)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Roles::uninterruptible_);
@@ -535,6 +594,9 @@
   // Is the given entry point the JNI dlsym lookup stub?
   bool IsJniDlsymLookupStub(const void* entry_point) const;
 
+  // Is the given entry point the JNI dlsym lookup critical stub?
+  bool IsJniDlsymLookupCriticalStub(const void* entry_point) const;
+
   const void* GetQuickToInterpreterBridgeTrampoline() const {
     return quick_to_interpreter_bridge_trampoline_;
   }
@@ -647,9 +709,6 @@
   static bool ShouldUseInterpreterEntrypoint(ArtMethod* method, const void* quick_code)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  std::set<DexCacheResolvedClasses> GetResolvedClasses(bool ignore_boot_classes)
-      REQUIRES(!Locks::dex_lock_);
-
   static bool IsBootClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
                                 ObjPtr<mirror::ClassLoader> class_loader)
       REQUIRES_SHARED(Locks::mutator_lock_);
@@ -703,7 +762,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       NO_THREAD_SAFETY_ANALYSIS;
 
-  void AppendToBootClassPath(Thread* self, const DexFile& dex_file)
+  void AppendToBootClassPath(Thread* self, const DexFile* dex_file)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::dex_lock_);
 
@@ -719,6 +778,8 @@
     return cha_.get();
   }
 
+  void MakeInitializedClassesVisiblyInitialized(Thread* self, bool wait);
+
   struct DexCacheData {
     // Construct an invalid data object.
     DexCacheData()
@@ -744,6 +805,12 @@
     ClassTable* class_table;
   };
 
+  // Forces a class to be marked as initialized without actually running initializers. Should only
+  // be used by plugin code when creating new classes directly.
+  void ForceClassInitialized(Thread* self, Handle<mirror::Class> klass)
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
+
  protected:
   virtual bool InitializeClass(Thread* self,
                                Handle<mirror::Class> klass,
@@ -758,8 +825,15 @@
                                                          std::string* error_msg)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  virtual bool CanAllocClass() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::dex_lock_) {
+    return true;
+  }
+
+  virtual bool IsUpdatableBootClassPathDescriptor(const char* descriptor);
+
  private:
   class LinkInterfaceMethodsHelper;
+  class VisiblyInitializedCallback;
 
   struct ClassLoaderData {
     jweak weak_root;  // Weak root to enable class unloading.
@@ -767,6 +841,10 @@
     LinearAlloc* allocator;
   };
 
+  void VisiblyInitializedCallbackDone(Thread* self, VisiblyInitializedCallback* callback);
+  VisiblyInitializedCallback* MarkClassInitialized(Thread* self, Handle<mirror::Class> klass)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
   // Ensures that the supertype of 'klass' ('supertype') is verified. Returns false and throws
   // appropriate exceptions if verification failed hard. Returns true for successful verification or
   // soft-failures.
@@ -796,10 +874,19 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
 
-  // For early bootstrapping by Init.
   // If we do not allow moving classes (`art::kMovingClass` is false) or if
   // parameter `kMovable` is false (or both), the class object is allocated in
   // the non-moving space.
+  template <bool kMovable = true, class PreFenceVisitor>
+  ObjPtr<mirror::Class> AllocClass(Thread* self,
+                                   ObjPtr<mirror::Class> java_lang_Class,
+                                   uint32_t class_size,
+                                   const PreFenceVisitor& pre_fence_visitor)
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(!Roles::uninterruptible_);
+
+  // Convenience AllocClass() overload that uses mirror::Class::InitializeClassVisitor
+  // for the class initialization.
   template <bool kMovable = true>
   ObjPtr<mirror::Class> AllocClass(Thread* self,
                                    ObjPtr<mirror::Class> java_lang_Class,
@@ -807,16 +894,21 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Roles::uninterruptible_);
 
-  // Alloc* convenience functions to avoid needing to pass in ObjPtr<mirror::Class>
-  // values that are known to the ClassLinker such as classes corresponding to
-  // ClassRoot::kObjectArrayClass and ClassRoot::kJavaLangString etc.
-  ObjPtr<mirror::Class> AllocClass(Thread* self, uint32_t class_size)
+  // Allocate a primitive array class and store it in appropriate class root.
+  void AllocPrimitiveArrayClass(Thread* self,
+                                ClassRoot primitive_root,
+                                ClassRoot array_root)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Roles::uninterruptible_);
 
-  // Allocate a primitive array class.
-  ObjPtr<mirror::Class> AllocPrimitiveArrayClass(Thread* self,
-                                                 ObjPtr<mirror::Class> java_lang_Class)
+  // Finish setup of an array class.
+  void FinishArrayClassSetup(ObjPtr<mirror::Class> array_class)
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(!Roles::uninterruptible_);
+
+  // Finish setup of a core array class (Object[], Class[], String[] and
+  // primitive arrays) and insert it into the class table.
+  void FinishCoreArrayClassSetup(ClassRoot array_root)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Roles::uninterruptible_);
 
@@ -834,7 +926,8 @@
       REQUIRES(!Locks::dex_lock_)
       REQUIRES(!Roles::uninterruptible_);
 
-  ObjPtr<mirror::Class> CreatePrimitiveClass(Thread* self, Primitive::Type type)
+  // Create a primitive class and store it in the appropriate class root.
+  void CreatePrimitiveClass(Thread* self, Primitive::Type type, ClassRoot primitive_root)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Roles::uninterruptible_);
 
@@ -845,7 +938,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
 
-  void AppendToBootClassPath(const DexFile& dex_file, ObjPtr<mirror::DexCache> dex_cache)
+  void AppendToBootClassPath(const DexFile* dex_file, ObjPtr<mirror::DexCache> dex_cache)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::dex_lock_);
 
@@ -854,20 +947,6 @@
   uint32_t SizeOfClassWithoutEmbeddedTables(const DexFile& dex_file,
                                             const dex::ClassDef& dex_class_def);
 
-  // Setup the classloader, class def index, type idx so that we can insert this class in the class
-  // table.
-  void SetupClass(const DexFile& dex_file,
-                  const dex::ClassDef& dex_class_def,
-                  Handle<mirror::Class> klass,
-                  ObjPtr<mirror::ClassLoader> class_loader)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  void LoadClass(Thread* self,
-                 const DexFile& dex_file,
-                 const dex::ClassDef& dex_class_def,
-                 Handle<mirror::Class> klass)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
   void LoadField(const ClassAccessor::Field& field, Handle<mirror::Class> klass, ArtField* dst)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -945,9 +1024,10 @@
                                         ObjPtr<mirror::DexCache> dex_cache)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Implementation of ResolveType() called when the type was not found in the dex cache.
-  template <typename T>
-  ObjPtr<mirror::Class> DoResolveType(dex::TypeIndex type_idx, T referrer)
+  // Implementation of ResolveType() called when the type was not found in the dex cache. May be
+  // used with ArtField*, ArtMethod* or ObjPtr<Class>.
+  template <typename RefType>
+  ObjPtr<mirror::Class> DoResolveType(dex::TypeIndex type_idx, RefType referrer)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::dex_lock_, !Roles::uninterruptible_);
   ObjPtr<mirror::Class> DoResolveType(dex::TypeIndex type_idx,
@@ -977,19 +1057,15 @@
                              ObjPtr<mirror::ClassLoader> class_loader)
       REQUIRES(Locks::dex_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  DexCacheData FindDexCacheDataLocked(const DexFile& dex_file)
+  const DexCacheData* FindDexCacheDataLocked(const DexFile& dex_file)
       REQUIRES(Locks::dex_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
-  static ObjPtr<mirror::DexCache> DecodeDexCache(Thread* self, const DexCacheData& data)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  // Called to ensure that the dex cache has been registered with the same class loader.
-  // If yes, returns the dex cache, otherwise throws InternalError and returns null.
-  ObjPtr<mirror::DexCache> EnsureSameClassLoader(Thread* self,
-                                                 ObjPtr<mirror::DexCache> dex_cache,
-                                                 const DexCacheData& data,
-                                                 ObjPtr<mirror::ClassLoader> class_loader)
-      REQUIRES(!Locks::dex_lock_)
-      REQUIRES_SHARED(Locks::mutator_lock_);
+  static ObjPtr<mirror::DexCache> DecodeDexCacheLocked(Thread* self, const DexCacheData* data)
+      REQUIRES_SHARED(Locks::dex_lock_, Locks::mutator_lock_);
+  bool IsSameClassLoader(ObjPtr<mirror::DexCache> dex_cache,
+                         const DexCacheData* data,
+                         ObjPtr<mirror::ClassLoader> class_loader)
+      REQUIRES_SHARED(Locks::dex_lock_, Locks::mutator_lock_);
 
   bool InitializeDefaultInterfaceRecursive(Thread* self,
                                            Handle<mirror::Class> klass,
@@ -1013,14 +1089,6 @@
                                                      ObjPtr<mirror::Class> klass2)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  bool LinkClass(Thread* self,
-                 const char* descriptor,
-                 Handle<mirror::Class> klass,
-                 Handle<mirror::ObjectArray<mirror::Class>> interfaces,
-                 MutableHandle<mirror::Class>* h_new_class_out)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!Locks::classlinker_classes_lock_);
-
   bool LinkSuperClass(Handle<mirror::Class> klass)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -1246,20 +1314,6 @@
   void SetClassRoot(ClassRoot class_root, ObjPtr<mirror::Class> klass)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Allocate primitive array class for primitive with class root
-  // `primitive_class_root`, and associate it to class root
-  // `primitive_array_class_root`.
-  //
-  // Also check this class returned when searching system classes for
-  // `descriptor` matches the allocated class.
-  void AllocAndSetPrimitiveArrayClassRoot(Thread* self,
-                                          ObjPtr<mirror::Class> java_lang_Class,
-                                          ClassRoot primitive_array_class_root,
-                                          ClassRoot primitive_class_root,
-                                          const char* descriptor)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!Roles::uninterruptible_);
-
   // Return the quick generic JNI stub for testing.
   const void* GetRuntimeQuickGenericJniStub() const;
 
@@ -1372,6 +1426,8 @@
 
   // Trampolines within the image the bounce to runtime entrypoints. Done so that there is a single
   // patch point within the image. TODO: make these proper relocations.
+  const void* jni_dlsym_lookup_trampoline_;
+  const void* jni_dlsym_lookup_critical_trampoline_;
   const void* quick_resolution_trampoline_;
   const void* quick_imt_conflict_trampoline_;
   const void* quick_generic_jni_trampoline_;
@@ -1380,6 +1436,13 @@
   // Image pointer size.
   PointerSize image_pointer_size_;
 
+  // Classes to transition from ClassStatus::kInitialized to ClassStatus::kVisiblyInitialized.
+  Mutex visibly_initialized_callback_lock_;
+  std::unique_ptr<VisiblyInitializedCallback> visibly_initialized_callback_
+      GUARDED_BY(visibly_initialized_callback_lock_);
+  IntrusiveForwardList<VisiblyInitializedCallback> running_visibly_initialized_callbacks_
+      GUARDED_BY(visibly_initialized_callback_lock_);
+
   std::unique_ptr<ClassHierarchyAnalysis> cha_;
 
   class FindVirtualMethodHolderVisitor;
@@ -1401,6 +1464,10 @@
  public:
   virtual ~ClassLoadCallback() {}
 
+  // Called immediately before beginning class-definition and immediately before returning from it.
+  virtual void BeginDefineClass() REQUIRES_SHARED(Locks::mutator_lock_) {}
+  virtual void EndDefineClass() REQUIRES_SHARED(Locks::mutator_lock_) {}
+
   // If set we will replace initial_class_def & initial_dex_file with the final versions. The
   // callback author is responsible for ensuring these are allocated in such a way they can be
   // cleaned up if another transformation occurs. Note that both must be set or null/unchanged on
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 1a91abe..931f6df 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -92,7 +92,7 @@
     EXPECT_TRUE(primitive->GetSuperClass() == nullptr);
     EXPECT_FALSE(primitive->HasSuperClass());
     EXPECT_TRUE(primitive->GetClassLoader() == nullptr);
-    EXPECT_EQ(ClassStatus::kInitialized, primitive->GetStatus());
+    EXPECT_EQ(ClassStatus::kVisiblyInitialized, primitive->GetStatus());
     EXPECT_FALSE(primitive->IsErroneous());
     EXPECT_TRUE(primitive->IsLoaded());
     EXPECT_TRUE(primitive->IsResolved());
@@ -131,7 +131,8 @@
     EXPECT_TRUE(JavaLangObject->GetSuperClass() == nullptr);
     EXPECT_FALSE(JavaLangObject->HasSuperClass());
     EXPECT_TRUE(JavaLangObject->GetClassLoader() == nullptr);
-    EXPECT_EQ(ClassStatus::kInitialized, JavaLangObject->GetStatus());
+    class_linker_->MakeInitializedClassesVisiblyInitialized(Thread::Current(), /*wait=*/ true);
+    EXPECT_EQ(ClassStatus::kVisiblyInitialized, JavaLangObject->GetStatus());
     EXPECT_FALSE(JavaLangObject->IsErroneous());
     EXPECT_TRUE(JavaLangObject->IsLoaded());
     EXPECT_TRUE(JavaLangObject->IsResolved());
@@ -207,7 +208,7 @@
     EXPECT_TRUE(array->HasSuperClass());
     ASSERT_TRUE(array->GetComponentType() != nullptr);
     ASSERT_GT(strlen(array->GetComponentType()->GetDescriptor(&temp)), 0U);
-    EXPECT_EQ(ClassStatus::kInitialized, array->GetStatus());
+    EXPECT_EQ(ClassStatus::kVisiblyInitialized, array->GetStatus());
     EXPECT_FALSE(array->IsErroneous());
     EXPECT_TRUE(array->IsLoaded());
     EXPECT_TRUE(array->IsResolved());
@@ -478,6 +479,7 @@
     ObjPtr<mirror::Class> klass =
         Runtime::Current()->GetClassLinker()->FindSystemClass(self, class_descriptor.c_str());
     CHECK(klass != nullptr) << class_descriptor;
+    CHECK(klass->IsMirrored()) << class_descriptor;
 
     bool error = false;
 
@@ -610,6 +612,9 @@
 
 struct ClassExtOffsets : public CheckOffsets<mirror::ClassExt> {
   ClassExtOffsets() : CheckOffsets<mirror::ClassExt>(false, "Ldalvik/system/ClassExt;") {
+    addOffset(OFFSETOF_MEMBER(mirror::ClassExt, instance_jfield_ids_), "instanceJfieldIDs");
+    addOffset(OFFSETOF_MEMBER(mirror::ClassExt, jmethod_ids_), "jmethodIDs");
+    addOffset(OFFSETOF_MEMBER(mirror::ClassExt, obsolete_class_), "obsoleteClass");
     addOffset(OFFSETOF_MEMBER(mirror::ClassExt, obsolete_dex_caches_), "obsoleteDexCaches");
     addOffset(OFFSETOF_MEMBER(mirror::ClassExt, obsolete_methods_), "obsoleteMethods");
     addOffset(OFFSETOF_MEMBER(mirror::ClassExt, original_dex_file_), "originalDexFile");
@@ -617,6 +622,7 @@
               "preRedefineClassDefIndex");
     addOffset(OFFSETOF_MEMBER(mirror::ClassExt, pre_redefine_dex_file_ptr_),
               "preRedefineDexFilePtr");
+    addOffset(OFFSETOF_MEMBER(mirror::ClassExt, static_jfield_ids_), "staticJfieldIDs");
     addOffset(OFFSETOF_MEMBER(mirror::ClassExt, verify_error_), "verifyError");
   }
 };
@@ -666,6 +672,7 @@
 
 struct DexCacheOffsets : public CheckOffsets<mirror::DexCache> {
   DexCacheOffsets() : CheckOffsets<mirror::DexCache>(false, "Ljava/lang/DexCache;") {
+    addOffset(OFFSETOF_MEMBER(mirror::DexCache, class_loader_), "classLoader");
     addOffset(OFFSETOF_MEMBER(mirror::DexCache, dex_file_), "dexFile");
     addOffset(OFFSETOF_MEMBER(mirror::DexCache, location_), "location");
     addOffset(OFFSETOF_MEMBER(mirror::DexCache, num_preresolved_strings_), "numPreResolvedStrings");
@@ -713,8 +720,8 @@
 struct FieldOffsets : public CheckOffsets<mirror::Field> {
   FieldOffsets() : CheckOffsets<mirror::Field>(false, "Ljava/lang/reflect/Field;") {
     addOffset(OFFSETOF_MEMBER(mirror::Field, access_flags_), "accessFlags");
+    addOffset(OFFSETOF_MEMBER(mirror::Field, art_field_index_), "artFieldIndex");
     addOffset(OFFSETOF_MEMBER(mirror::Field, declaring_class_), "declaringClass");
-    addOffset(OFFSETOF_MEMBER(mirror::Field, dex_field_index_), "dexFieldIndex");
     addOffset(OFFSETOF_MEMBER(mirror::Field, offset_), "offset");
     addOffset(OFFSETOF_MEMBER(mirror::Field, type_), "type");
   }
@@ -1522,7 +1529,7 @@
     ASSERT_TRUE(dex_cache != nullptr);
   }
   // Make a copy of the dex cache and change the name.
-  dex_cache.Assign(dex_cache->Clone(soa.Self())->AsDexCache());
+  dex_cache.Assign(mirror::Object::Clone(dex_cache, soa.Self())->AsDexCache());
   const uint16_t data[] = { 0x20AC, 0x20A1 };
   Handle<mirror::String> location(hs.NewHandle(mirror::String::AllocFromUtf16(soa.Self(),
                                                                               arraysize(data),
diff --git a/runtime/class_loader_context.cc b/runtime/class_loader_context.cc
index 925f25e..7c723d3 100644
--- a/runtime/class_loader_context.cc
+++ b/runtime/class_loader_context.cc
@@ -16,12 +16,15 @@
 
 #include "class_loader_context.h"
 
+#include <algorithm>
+
 #include <android-base/parseint.h>
 #include <android-base/strings.h>
 
 #include "art_field-inl.h"
 #include "base/casts.h"
 #include "base/dchecked_vector.h"
+#include "base/file_utils.h"
 #include "base/stl_util.h"
 #include "class_linker.h"
 #include "class_loader_utils.h"
@@ -32,6 +35,7 @@
 #include "handle_scope-inl.h"
 #include "jni/jni_internal.h"
 #include "mirror/class_loader-inl.h"
+#include "mirror/object.h"
 #include "mirror/object_array-alloc-inl.h"
 #include "nativehelper/scoped_local_ref.h"
 #include "oat_file_assistant.h"
@@ -355,7 +359,7 @@
       // The class loader spec contains shared libraries. Find the matching closing
       // shared library marker for it.
 
-      uint32_t shared_library_close =
+      size_t shared_library_close =
           FindMatchingSharedLibraryCloseMarker(remaining, first_shared_library_open);
       if (shared_library_close == std::string::npos) {
         LOG(ERROR) << "Invalid class loader spec: " << class_loader_spec;
@@ -453,10 +457,12 @@
       std::string error_msg;
       // When opening the dex files from the context we expect their checksum to match their
       // contents. So pass true to verify_checksum.
+      // We don't need to do structural dex file verification, we only need to
+      // check the checksum, so pass false to verify.
       if (fd < 0) {
         if (!dex_file_loader.Open(location.c_str(),
                                   location.c_str(),
-                                  Runtime::Current()->IsVerificationEnabled(),
+                                  /*verify=*/ false,
                                   /*verify_checksum=*/ true,
                                   &error_msg,
                                   &info->opened_dex_files)) {
@@ -482,7 +488,7 @@
         }
       } else if (!dex_file_loader.Open(fd,
                                        location.c_str(),
-                                       Runtime::Current()->IsVerificationEnabled(),
+                                       /*verify=*/ false,
                                        /*verify_checksum=*/ true,
                                        &error_msg,
                                        &info->opened_dex_files)) {
@@ -567,6 +573,46 @@
   return EncodeContext(base_dir, /*for_dex2oat=*/ false, stored_context);
 }
 
+std::map<std::string, std::string>
+ClassLoaderContext::EncodeClassPathContexts(const std::string& base_dir) const {
+  CheckDexFilesOpened("EncodeClassPathContexts");
+  if (class_loader_chain_ == nullptr) {
+    return std::map<std::string, std::string>{};
+  }
+
+  std::map<std::string, std::string> results;
+  std::vector<std::string> dex_locations;
+  std::vector<uint32_t> checksums;
+  dex_locations.reserve(class_loader_chain_->original_classpath.size());
+
+  std::ostringstream encoded_libs_and_parent_stream;
+  EncodeSharedLibAndParent(*class_loader_chain_,
+                           base_dir,
+                           /*for_dex2oat=*/true,
+                           /*stored_info=*/nullptr,
+                           encoded_libs_and_parent_stream);
+  std::string encoded_libs_and_parent(encoded_libs_and_parent_stream.str());
+
+  std::set<std::string> seen_locations;
+  for (const std::string& path : class_loader_chain_->classpath) {
+    // The classpath will contain multiple entries for multidex files, so make sure this is the
+    // first time we're seeing this file.
+    const std::string base_location(DexFileLoader::GetBaseLocation(path));
+    if (!seen_locations.insert(base_location).second) {
+      continue;
+    }
+
+    std::ostringstream out;
+    EncodeClassPath(base_dir, dex_locations, checksums, class_loader_chain_->type, out);
+    out << encoded_libs_and_parent;
+    results.emplace(base_location, out.str());
+
+    dex_locations.push_back(base_location);
+  }
+
+  return results;
+}
+
 std::string ClassLoaderContext::EncodeContext(const std::string& base_dir,
                                               bool for_dex2oat,
                                               ClassLoaderContext* stored_context) const {
@@ -598,13 +644,44 @@
   return out.str();
 }
 
+void ClassLoaderContext::EncodeClassPath(const std::string& base_dir,
+                                         const std::vector<std::string>& dex_locations,
+                                         const std::vector<uint32_t>& checksums,
+                                         ClassLoaderType type,
+                                         std::ostringstream& out) const {
+  CHECK(checksums.empty() || dex_locations.size() == checksums.size());
+  out << GetClassLoaderTypeName(type);
+  out << kClassLoaderOpeningMark;
+  const size_t len = dex_locations.size();
+  for (size_t k = 0; k < len; k++) {
+    std::string location = dex_locations[k];
+    if (k > 0) {
+      out << kClasspathSeparator;
+    }
+    if (type == kInMemoryDexClassLoader) {
+      out << kInMemoryDexClassLoaderDexLocationMagic;
+    } else if (!base_dir.empty()
+               && location.substr(0, base_dir.length()) == base_dir) {
+      // Find paths that were relative and convert them back from absolute.
+      out << location.substr(base_dir.length() + 1).c_str();
+    } else {
+      out << location.c_str();
+    }
+    if (!checksums.empty()) {
+      out << kDexFileChecksumSeparator;
+      out << checksums[k];
+    }
+  }
+  out << kClassLoaderClosingMark;
+}
+
 void ClassLoaderContext::EncodeContextInternal(const ClassLoaderInfo& info,
                                                const std::string& base_dir,
                                                bool for_dex2oat,
                                                ClassLoaderInfo* stored_info,
                                                std::ostringstream& out) const {
-  out << GetClassLoaderTypeName(info.type);
-  out << kClassLoaderOpeningMark;
+  std::vector<std::string> locations;
+  std::vector<uint32_t> checksums;
   std::set<std::string> seen_locations;
   SafeMap<std::string, std::string> remap;
   if (stored_info != nullptr) {
@@ -613,6 +690,7 @@
       remap.Put(info.original_classpath[k], stored_info->classpath[k]);
     }
   }
+
   for (size_t k = 0; k < info.opened_dex_files.size(); k++) {
     const std::unique_ptr<const DexFile>& dex_file = info.opened_dex_files[k];
     if (for_dex2oat) {
@@ -624,6 +702,7 @@
         continue;
       }
     }
+
     std::string location = dex_file->GetLocation();
     // If there is a stored class loader remap, fix up the multidex strings.
     if (!remap.empty()) {
@@ -632,25 +711,22 @@
       CHECK(it != remap.end()) << base_dex_location;
       location = it->second + DexFileLoader::GetMultiDexSuffix(location);
     }
-    if (k > 0) {
-      out << kClasspathSeparator;
-    }
-    if (info.type == kInMemoryDexClassLoader) {
-      out << kInMemoryDexClassLoaderDexLocationMagic;
-    } else if (!base_dir.empty() && location.substr(0, base_dir.length()) == base_dir) {
-      // Find paths that were relative and convert them back from absolute.
-      out << location.substr(base_dir.length() + 1).c_str();
-    } else {
-      out << location.c_str();
-    }
+    locations.emplace_back(std::move(location));
+
     // dex2oat does not need the checksums.
     if (!for_dex2oat) {
-      out << kDexFileChecksumSeparator;
-      out << dex_file->GetLocationChecksum();
+      checksums.push_back(dex_file->GetLocationChecksum());
     }
   }
-  out << kClassLoaderClosingMark;
+  EncodeClassPath(base_dir, locations, checksums, info.type, out);
+  EncodeSharedLibAndParent(info, base_dir, for_dex2oat, stored_info, out);
+}
 
+void ClassLoaderContext::EncodeSharedLibAndParent(const ClassLoaderInfo& info,
+                                                  const std::string& base_dir,
+                                                  bool for_dex2oat,
+                                                  ClassLoaderInfo* stored_info,
+                                                  std::ostringstream& out) const {
   if (!info.shared_libraries.empty()) {
     out << kClassLoaderSharedLibraryOpeningMark;
     for (uint32_t i = 0; i < info.shared_libraries.size(); ++i) {
@@ -905,9 +981,7 @@
                                                     Handle<mirror::ClassLoader> class_loader,
                                                     std::vector<const DexFile*>* out_dex_files)
       REQUIRES_SHARED(Locks::mutator_lock_) {
-  CHECK(IsPathOrDexClassLoader(soa, class_loader) ||
-        IsDelegateLastClassLoader(soa, class_loader) ||
-        IsInMemoryDexClassLoader(soa, class_loader));
+  CHECK(IsInstanceOfBaseDexClassLoader(soa, class_loader));
 
   // All supported class loaders inherit from BaseDexClassLoader.
   // We need to get the DexPathList and loop through it.
@@ -939,8 +1013,7 @@
     StackHandleScope<1> hs(soa.Self());
     Handle<mirror::ObjectArray<mirror::Object>> dex_elements(
         hs.NewHandle(dex_elements_obj->AsObjectArray<mirror::Object>()));
-    for (int32_t i = 0; i < dex_elements->GetLength(); ++i) {
-      ObjPtr<mirror::Object> element = dex_elements->GetWithoutChecks(i);
+    for (auto element : dex_elements.Iterate<mirror::Object>()) {
       if (element == nullptr) {
         // Should never happen, log an error and break.
         // TODO(calin): It's unclear if we should just assert here.
@@ -974,8 +1047,7 @@
   const ObjPtr<mirror::Class> dexfile_class = soa.Decode<mirror::Class>(
       WellKnownClasses::dalvik_system_DexFile);
 
-  for (int32_t i = 0; i < dex_elements->GetLength(); ++i) {
-    ObjPtr<mirror::Object> element = dex_elements->GetWithoutChecks(i);
+  for (auto element : dex_elements.Iterate<mirror::Object>()) {
     // We can hit a null element here because this is invoked with a partially filled dex_elements
     // array from DexPathList. DexPathList will open each dex sequentially, each time passing the
     // list of dex files which were opened before.
@@ -1085,8 +1157,8 @@
     Handle<mirror::ObjectArray<mirror::ClassLoader>> shared_libraries =
         hs.NewHandle(raw_shared_libraries->AsObjectArray<mirror::ClassLoader>());
     MutableHandle<mirror::ClassLoader> temp_loader = hs.NewHandle<mirror::ClassLoader>(nullptr);
-    for (int32_t i = 0; i < shared_libraries->GetLength(); ++i) {
-      temp_loader.Assign(shared_libraries->Get(i));
+    for (auto library : shared_libraries.Iterate<mirror::ClassLoader>()) {
+      temp_loader.Assign(library);
       if (!CreateInfoFromClassLoader(
               soa, temp_loader, null_dex_elements, info, /*is_shared_library=*/ true)) {
         return false;
@@ -1122,6 +1194,38 @@
   return result;
 }
 
+std::map<std::string, std::string>
+ClassLoaderContext::EncodeClassPathContextsForClassLoader(jobject class_loader) {
+  std::unique_ptr<ClassLoaderContext> clc =
+      ClassLoaderContext::CreateContextForClassLoader(class_loader, nullptr);
+  if (clc != nullptr) {
+    return clc->EncodeClassPathContexts("");
+  }
+
+  ScopedObjectAccess soa(Thread::Current());
+  StackHandleScope<1> hs(soa.Self());
+  Handle<mirror::ClassLoader> h_class_loader =
+      hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader));
+  if (!IsInstanceOfBaseDexClassLoader(soa, h_class_loader)) {
+    return std::map<std::string, std::string>{};
+  }
+
+  std::vector<const DexFile*> dex_files_loaded;
+  CollectDexFilesFromSupportedClassLoader(soa, h_class_loader, &dex_files_loaded);
+
+  std::map<std::string, std::string> results;
+  for (const DexFile* dex_file : dex_files_loaded) {
+    results.emplace(DexFileLoader::GetBaseLocation(dex_file->GetLocation()),
+                    ClassLoaderContext::kUnsupportedClassLoaderContextEncoding);
+  }
+  return results;
+}
+
+bool ClassLoaderContext::IsValidEncoding(const std::string& possible_encoded_class_loader_context) {
+  return ClassLoaderContext::Create(possible_encoded_class_loader_context.c_str()) != nullptr
+      || possible_encoded_class_loader_context == kUnsupportedClassLoaderContextEncoding;
+}
+
 ClassLoaderContext::VerificationResult ClassLoaderContext::VerifyClassLoaderContextMatch(
     const std::string& context_spec,
     bool verify_names,
@@ -1175,6 +1279,43 @@
          (std::string_view(path).substr(/*pos*/ path.size() - suffix.size()) == suffix);
 }
 
+// Returns true if the given dex names are mathing, false otherwise.
+static bool AreDexNameMatching(const std::string& actual_dex_name,
+                               const std::string& expected_dex_name) {
+  // Compute the dex location that must be compared.
+  // We shouldn't do a naive comparison `actual_dex_name == expected_dex_name`
+  // because even if they refer to the same file, one could be encoded as a relative location
+  // and the other as an absolute one.
+  bool is_dex_name_absolute = IsAbsoluteLocation(actual_dex_name);
+  bool is_expected_dex_name_absolute = IsAbsoluteLocation(expected_dex_name);
+  bool dex_names_match = false;
+
+  if (is_dex_name_absolute == is_expected_dex_name_absolute) {
+    // If both locations are absolute or relative then compare them as they are.
+    // This is usually the case for: shared libraries and secondary dex files.
+    dex_names_match = (actual_dex_name == expected_dex_name);
+  } else if (is_dex_name_absolute) {
+    // The runtime name is absolute but the compiled name (the expected one) is relative.
+    // This is the case for split apks which depend on base or on other splits.
+    dex_names_match =
+        AbsolutePathHasRelativeSuffix(actual_dex_name, expected_dex_name);
+  } else if (is_expected_dex_name_absolute) {
+    // The runtime name is relative but the compiled name is absolute.
+    // There is no expected use case that would end up here as dex files are always loaded
+    // with their absolute location. However, be tolerant and do the best effort (in case
+    // there are unexpected new use case...).
+    dex_names_match =
+        AbsolutePathHasRelativeSuffix(expected_dex_name, actual_dex_name);
+  } else {
+    // Both locations are relative. In this case there's not much we can be sure about
+    // except that the names are the same. The checksum will ensure that the files are
+    // are same. This should not happen outside testing and manual invocations.
+    dex_names_match = (actual_dex_name == expected_dex_name);
+  }
+
+  return dex_names_match;
+}
+
 bool ClassLoaderContext::ClassLoaderInfoMatch(
     const ClassLoaderInfo& info,
     const ClassLoaderInfo& expected_info,
@@ -1203,37 +1344,7 @@
 
   if (verify_names) {
     for (size_t k = 0; k < info.classpath.size(); k++) {
-      // Compute the dex location that must be compared.
-      // We shouldn't do a naive comparison `info.classpath[k] == expected_info.classpath[k]`
-      // because even if they refer to the same file, one could be encoded as a relative location
-      // and the other as an absolute one.
-      bool is_dex_name_absolute = IsAbsoluteLocation(info.classpath[k]);
-      bool is_expected_dex_name_absolute = IsAbsoluteLocation(expected_info.classpath[k]);
-      bool dex_names_match = false;
-
-
-      if (is_dex_name_absolute == is_expected_dex_name_absolute) {
-        // If both locations are absolute or relative then compare them as they are.
-        // This is usually the case for: shared libraries and secondary dex files.
-        dex_names_match = (info.classpath[k] == expected_info.classpath[k]);
-      } else if (is_dex_name_absolute) {
-        // The runtime name is absolute but the compiled name (the expected one) is relative.
-        // This is the case for split apks which depend on base or on other splits.
-        dex_names_match =
-            AbsolutePathHasRelativeSuffix(info.classpath[k], expected_info.classpath[k]);
-      } else if (is_expected_dex_name_absolute) {
-        // The runtime name is relative but the compiled name is absolute.
-        // There is no expected use case that would end up here as dex files are always loaded
-        // with their absolute location. However, be tolerant and do the best effort (in case
-        // there are unexpected new use case...).
-        dex_names_match =
-            AbsolutePathHasRelativeSuffix(expected_info.classpath[k], info.classpath[k]);
-      } else {
-        // Both locations are relative. In this case there's not much we can be sure about
-        // except that the names are the same. The checksum will ensure that the files are
-        // are same. This should not happen outside testing and manual invocations.
-        dex_names_match = (info.classpath[k] == expected_info.classpath[k]);
-      }
+      bool dex_names_match = AreDexNameMatching(info.classpath[k], expected_info.classpath[k]);
 
       // Compare the locations.
       if (!dex_names_match) {
@@ -1291,4 +1402,35 @@
   }
 }
 
+std::set<const DexFile*> ClassLoaderContext::CheckForDuplicateDexFiles(
+    const std::vector<const DexFile*>& dex_files_to_check) {
+  DCHECK(dex_files_open_attempted_);
+  DCHECK(dex_files_open_result_);
+
+  std::set<const DexFile*> result;
+
+  // If we are the special shared library or the chain is null there's nothing
+  // we can check, return an empty list;
+  // The class loader chain can be null if there were issues when creating the
+  // class loader context (e.g. tests).
+  if (special_shared_library_ || class_loader_chain_ == nullptr) {
+    return result;
+  }
+
+  // We only check the current Class Loader which the first one in the chain.
+  // Cross class-loader duplicates may be a valid scenario (though unlikely
+  // in the Android world) - and as such we decide not to warn on them.
+  ClassLoaderInfo* info = class_loader_chain_.get();
+  for (size_t k = 0; k < info->classpath.size(); k++) {
+    for (const DexFile* dex_file : dex_files_to_check) {
+      if (info->checksums[k] == dex_file->GetLocationChecksum()
+          && AreDexNameMatching(info->classpath[k], dex_file->GetLocation())) {
+        result.insert(dex_file);
+      }
+    }
+  }
+
+  return result;
+}
+
 }  // namespace art
diff --git a/runtime/class_loader_context.h b/runtime/class_loader_context.h
index 224b4d6..d564ec8 100644
--- a/runtime/class_loader_context.h
+++ b/runtime/class_loader_context.h
@@ -19,6 +19,7 @@
 
 #include <string>
 #include <vector>
+#include <set>
 
 #include "arch/instruction_set.h"
 #include "base/dchecked_vector.h"
@@ -49,6 +50,12 @@
     kInMemoryDexClassLoader = 3
   };
 
+  // Special encoding used to denote a foreign ClassLoader was found when trying to encode class
+  // loader contexts for each classpath element in a ClassLoader. See
+  // EncodeClassPathContextsForClassLoader. Keep in sync with PackageDexUsage in the framework.
+  static constexpr const char* kUnsupportedClassLoaderContextEncoding =
+      "=UnsupportedClassLoaderContext=";
+
   ~ClassLoaderContext();
 
   // Opens requested class path files and appends them to ClassLoaderInfo::opened_dex_files.
@@ -121,6 +128,25 @@
   // Should only be called if OpenDexFiles() returned true.
   std::string EncodeContextForDex2oat(const std::string& base_dir) const;
 
+  // Encodes the contexts for each of the classpath elements in the child-most
+  // classloader. Under the hood EncodeContextForDex2oat is used, so no checksums
+  // will be encoded.
+  // Should only be called if the dex files are opened (either via OpenDexFiles() or by creating the
+  // context from a live class loader).
+  // Notably, for each classpath element the encoded classloader context will contain only the
+  // elements that appear before it in the containing classloader. E.g. if `this` contains
+  // (from child to parent):
+  //
+  // PathClassLoader { multidex.apk!classes.dex, multidex.apk!classes2.dex, foo.dex, bar.dex } ->
+  //    PathClassLoader { baz.dex } -> BootClassLoader
+  //
+  // then the return value will look like:
+  //
+  // `{ "multidex.apk": "PCL[];PCL[baz.dex]",
+  //    "foo.dex"     : "PCL[multidex.apk];PCL[baz.dex]",
+  //    "bar.dex"     : "PCL[multidex.apk:foo.dex];PCL[baz.dex]" }`
+  std::map<std::string, std::string> EncodeClassPathContexts(const std::string& base_dir) const;
+
   // Flattens the opened dex files into the given vector.
   // Should only be called if OpenDexFiles() returned true.
   std::vector<const DexFile*> FlattenOpenedDexFiles() const;
@@ -141,6 +167,12 @@
                                                    bool verify_names = true,
                                                    bool verify_checksums = true) const;
 
+  // Checks if any of the given dex files is already loaded in the current class loader context.
+  // It only checks the first class loader.
+  // Returns the list of duplicate dex files (empty if there are no duplicates).
+  std::set<const DexFile*> CheckForDuplicateDexFiles(
+      const std::vector<const DexFile*>& dex_files);
+
   // Creates the class loader context from the given string.
   // The format: ClassLoaderType1[ClasspathElem1:ClasspathElem2...];ClassLoaderType2[...]...
   // ClassLoaderType is either "PCL" (PathClassLoader) or "DLC" (DelegateLastClassLoader).
@@ -168,6 +200,19 @@
   // This will return a context with a single and empty PathClassLoader.
   static std::unique_ptr<ClassLoaderContext> Default();
 
+  // Encodes the contexts for each of the classpath elements in `class_loader`. See
+  // ClassLoaderContext::EncodeClassPathContexts for more information about the return value.
+  //
+  // If `class_loader` does not derive from BaseDexClassLoader then an empty map is returned.
+  // Otherwise if a foreign ClassLoader is found in the class loader chain then the results values
+  // will all be ClassLoaderContext::kUnsupportedClassLoaderContextEncoding.
+  static std::map<std::string, std::string> EncodeClassPathContextsForClassLoader(
+      jobject class_loader);
+
+  // Returns whether `encoded_class_loader_context` is a valid encoded ClassLoaderContext or
+  // EncodedUnsupportedClassLoaderContext.
+  static bool IsValidEncoding(const std::string& possible_encoded_class_loader_context);
+
   struct ClassLoaderInfo {
     // The type of this class loader.
     ClassLoaderType type;
@@ -266,6 +311,21 @@
                              ClassLoaderInfo* stored_info,
                              std::ostringstream& out) const;
 
+  // Encodes e.g. PCL[foo.dex:bar.dex]
+  void EncodeClassPath(const std::string& base_dir,
+                       const std::vector<std::string>& dex_locations,
+                       const std::vector<uint32_t>& checksums,
+                       ClassLoaderType type,
+                       std::ostringstream& out) const;
+
+  // Encodes the shared libraries classloaders and the parent classloader if
+  // either are present in info, e.g. {PCL[foo.dex]#PCL[bar.dex]};PCL[baz.dex]
+  void EncodeSharedLibAndParent(const ClassLoaderInfo& info,
+                                const std::string& base_dir,
+                                bool for_dex2oat,
+                                ClassLoaderInfo* stored_info,
+                                std::ostringstream& out) const;
+
   bool ClassLoaderInfoMatch(const ClassLoaderInfo& info,
                             const ClassLoaderInfo& expected_info,
                             const std::string& context_spec,
diff --git a/runtime/class_loader_context_test.cc b/runtime/class_loader_context_test.cc
index 0083278..035c513 100644
--- a/runtime/class_loader_context_test.cc
+++ b/runtime/class_loader_context_test.cc
@@ -1136,6 +1136,200 @@
   ASSERT_EQ(expected_encoding, context->EncodeContextForDex2oat(""));
 }
 
+TEST_F(ClassLoaderContextTest, EncodeContextsSinglePath) {
+  jobject class_loader = LoadDexInPathClassLoader("Main", nullptr);
+  std::unique_ptr<ClassLoaderContext> context =
+      CreateContextForClassLoader(class_loader);
+
+  std::map<std::string, std::string> encodings = context->EncodeClassPathContexts("");
+  ASSERT_EQ(1u, encodings.size());
+  ASSERT_EQ("PCL[]", encodings.at(GetTestDexFileName("Main")));
+}
+
+TEST_F(ClassLoaderContextTest, EncodeContextsMultiDex) {
+  jobject class_loader = LoadDexInPathClassLoader("MultiDex", nullptr);
+  std::unique_ptr<ClassLoaderContext> context =
+      CreateContextForClassLoader(class_loader);
+
+  std::map<std::string, std::string> encodings = context->EncodeClassPathContexts("");
+  ASSERT_EQ(1u, encodings.size());
+  ASSERT_EQ("PCL[]", encodings.at(GetTestDexFileName("MultiDex")));
+}
+
+TEST_F(ClassLoaderContextTest, EncodeContextsRepeatedMultiDex) {
+  jobject top_class_loader = LoadDexInPathClassLoader("MultiDex", nullptr);
+  jobject middle_class_loader =
+      LoadDexInPathClassLoader("Main", top_class_loader);
+  jobject bottom_class_loader =
+      LoadDexInPathClassLoader("MultiDex", middle_class_loader);
+  std::unique_ptr<ClassLoaderContext> context =
+      CreateContextForClassLoader(bottom_class_loader);
+
+  std::map<std::string, std::string> encodings = context->EncodeClassPathContexts("");
+  ASSERT_EQ(1u, encodings.size());
+
+  std::string main_dex_name = GetTestDexFileName("Main");
+  std::string multidex_dex_name = GetTestDexFileName("MultiDex");
+  ASSERT_EQ(
+      "PCL[];PCL[" + main_dex_name + "];PCL[" + multidex_dex_name + "]",
+      encodings.at(multidex_dex_name));
+}
+
+TEST_F(ClassLoaderContextTest, EncodeContextsSinglePathWithShared) {
+  jobject class_loader_a = LoadDexInPathClassLoader("MyClass", nullptr);
+
+  ScopedObjectAccess soa(Thread::Current());
+  StackHandleScope<1> hs(soa.Self());
+  Handle<mirror::ObjectArray<mirror::ClassLoader>> libraries = hs.NewHandle(
+    mirror::ObjectArray<mirror::ClassLoader>::Alloc(
+        soa.Self(),
+        GetClassRoot<mirror::ObjectArray<mirror::ClassLoader>>(),
+        1));
+  libraries->Set(0, soa.Decode<mirror::ClassLoader>(class_loader_a));
+
+  jobject class_loader_b = LoadDexInPathClassLoader(
+      "Main", nullptr, soa.AddLocalReference<jobject>(libraries.Get()));
+
+  std::unique_ptr<ClassLoaderContext> context = CreateContextForClassLoader(class_loader_b);
+
+  std::map<std::string, std::string> encodings = context->EncodeClassPathContexts("");
+  ASSERT_EQ(1u, encodings.size());
+  ASSERT_EQ(
+      "PCL[]{PCL[" + GetTestDexFileName("MyClass") + "]}",
+      encodings.at(GetTestDexFileName("Main")));
+}
+
+TEST_F(ClassLoaderContextTest, EncodeContextsMultiplePaths) {
+  jobject class_loader = LoadDexInPathClassLoader(
+      std::vector<std::string>{ "Main", "MultiDex"}, nullptr);
+
+  std::unique_ptr<ClassLoaderContext> context =
+      CreateContextForClassLoader(class_loader);
+
+  std::map<std::string, std::string> encodings = context->EncodeClassPathContexts("");
+  ASSERT_EQ(2u, encodings.size());
+  ASSERT_EQ("PCL[]", encodings.at(GetTestDexFileName("Main")));
+  ASSERT_EQ(
+      "PCL[" + GetTestDexFileName("Main") + "]", encodings.at(GetTestDexFileName("MultiDex")));
+}
+
+TEST_F(ClassLoaderContextTest, EncodeContextsMultiplePathsWithShared) {
+  jobject class_loader_a = LoadDexInPathClassLoader("MyClass", nullptr);
+
+  ScopedObjectAccess soa(Thread::Current());
+  StackHandleScope<1> hs(soa.Self());
+  Handle<mirror::ObjectArray<mirror::ClassLoader>> libraries = hs.NewHandle(
+    mirror::ObjectArray<mirror::ClassLoader>::Alloc(
+        soa.Self(),
+        GetClassRoot<mirror::ObjectArray<mirror::ClassLoader>>(),
+        1));
+  libraries->Set(0, soa.Decode<mirror::ClassLoader>(class_loader_a));
+
+  jobject class_loader_b = LoadDexInPathClassLoader(
+      std::vector<std::string> { "Main", "MultiDex" },
+      nullptr, soa.AddLocalReference<jobject>(libraries.Get()));
+
+  std::unique_ptr<ClassLoaderContext> context =
+      CreateContextForClassLoader(class_loader_b);
+
+  std::map<std::string, std::string> encodings = context->EncodeClassPathContexts("");
+  ASSERT_EQ(2u, encodings.size());
+  const std::string context_suffix =
+      "{PCL[" + GetTestDexFileName("MyClass") + "]}";
+  ASSERT_EQ("PCL[]" + context_suffix, encodings.at(GetTestDexFileName("Main")));
+  ASSERT_EQ(
+      "PCL[" + GetTestDexFileName("Main") + "]" + context_suffix,
+      encodings.at(GetTestDexFileName("MultiDex")));
+}
+
+TEST_F(ClassLoaderContextTest, EncodeContextsIMC) {
+  jobject class_loader_a = LoadDexInPathClassLoader("Main", nullptr);
+  jobject class_loader_b =
+      LoadDexInInMemoryDexClassLoader("MyClass", class_loader_a);
+
+  std::unique_ptr<ClassLoaderContext> context =
+      CreateContextForClassLoader(class_loader_b);
+
+  std::map<std::string, std::string> encodings = context->EncodeClassPathContexts("");
+  ASSERT_EQ(1u, encodings.size());
+  ASSERT_EQ(
+      "IMC[];PCL[" + GetTestDexFileName("Main") + "]",
+      encodings.at("<unknown>"));
+}
+
+TEST_F(ClassLoaderContextTest, EncodeContextsForSingleDex) {
+  jobject class_loader = LoadDexInPathClassLoader("Main", nullptr);
+  std::map<std::string, std::string> encodings =
+      ClassLoaderContext::EncodeClassPathContextsForClassLoader(class_loader);
+  ASSERT_EQ(1u, encodings.size());
+  ASSERT_EQ("PCL[]", encodings.at(GetTestDexFileName("Main")));
+}
+
+static jobject CreateForeignClassLoader() {
+  ScopedObjectAccess soa(Thread::Current());
+  JNIEnv* env = soa.Env();
+
+  // We cannot instantiate a ClassLoader directly, so instead we allocate an Object to represent
+  // our foreign ClassLoader (this works because the runtime does proper instanceof checks before
+  // operating on this object.
+  jmethodID ctor = env->GetMethodID(WellKnownClasses::java_lang_Object, "<init>", "()V");
+  return env->NewObject(WellKnownClasses::java_lang_Object, ctor);
+}
+
+TEST_F(ClassLoaderContextTest, EncodeContextsForUnsupportedBase) {
+  std::map<std::string, std::string> empty;
+  ASSERT_EQ(
+      empty, ClassLoaderContext::EncodeClassPathContextsForClassLoader(CreateForeignClassLoader()));
+}
+
+TEST_F(ClassLoaderContextTest, EncodeContextsForUnsupportedChain) {
+  jobject class_loader = LoadDexInPathClassLoader("Main", CreateForeignClassLoader());
+  std::map<std::string, std::string> encodings =
+      ClassLoaderContext::EncodeClassPathContextsForClassLoader(class_loader);
+  ASSERT_EQ(1u, encodings.size());
+  ASSERT_EQ(
+      ClassLoaderContext::kUnsupportedClassLoaderContextEncoding,
+      encodings.at(GetTestDexFileName("Main")));
+}
+
+TEST_F(ClassLoaderContextTest, EncodeContextsForUnsupportedChainMultiPath) {
+  jobject class_loader = LoadDexInPathClassLoader(std::vector<std::string> { "Main", "MyClass" },
+                                                  CreateForeignClassLoader());
+  std::map<std::string, std::string> encodings =
+      ClassLoaderContext::EncodeClassPathContextsForClassLoader(class_loader);
+  ASSERT_EQ(2u, encodings.size());
+  ASSERT_EQ(
+      ClassLoaderContext::kUnsupportedClassLoaderContextEncoding,
+      encodings.at(GetTestDexFileName("Main")));
+  ASSERT_EQ(
+      ClassLoaderContext::kUnsupportedClassLoaderContextEncoding,
+      encodings.at(GetTestDexFileName("MyClass")));
+}
+
+TEST_F(ClassLoaderContextTest, EncodeContextsForUnsupportedChainMultiDex) {
+  jobject class_loader = LoadDexInPathClassLoader("MultiDex", CreateForeignClassLoader());
+  std::map<std::string, std::string> encodings =
+      ClassLoaderContext::EncodeClassPathContextsForClassLoader(class_loader);
+  ASSERT_EQ(1u, encodings.size());
+  ASSERT_EQ(
+      ClassLoaderContext::kUnsupportedClassLoaderContextEncoding,
+      encodings.at(GetTestDexFileName("MultiDex")));
+}
+
+TEST_F(ClassLoaderContextTest, IsValidEncoding) {
+  ASSERT_TRUE(ClassLoaderContext::IsValidEncoding("PCL[]"));
+  ASSERT_TRUE(ClassLoaderContext::IsValidEncoding("PCL[foo.dex]"));
+  ASSERT_TRUE(ClassLoaderContext::IsValidEncoding("PCL[foo.dex];PCL[bar.dex]"));
+  ASSERT_TRUE(ClassLoaderContext::IsValidEncoding("DLC[];PCL[bar.dex]"));
+  ASSERT_TRUE(
+      ClassLoaderContext::IsValidEncoding(
+        ClassLoaderContext::kUnsupportedClassLoaderContextEncoding));
+  ASSERT_FALSE(ClassLoaderContext::IsValidEncoding("not_valid"));
+  ASSERT_FALSE(ClassLoaderContext::IsValidEncoding("[]"));
+  ASSERT_FALSE(ClassLoaderContext::IsValidEncoding("FCL[]"));
+  ASSERT_FALSE(ClassLoaderContext::IsValidEncoding("foo.dex:bar.dex"));
+}
+
 // TODO(calin) add a test which creates the context for a class loader together with dex_elements.
 TEST_F(ClassLoaderContextTest, CreateContextForClassLoader) {
   // The chain is
@@ -1443,4 +1637,44 @@
             ClassLoaderContext::VerificationResult::kVerifies);
 }
 
+TEST_F(ClassLoaderContextTest, CheckForDuplicateDexFilesNotFoundSingleCL) {
+  jobject class_loader = LoadDexInPathClassLoader("Main", nullptr);
+
+  std::unique_ptr<ClassLoaderContext> context = CreateContextForClassLoader(class_loader);
+
+  std::set<const DexFile*> result = context->CheckForDuplicateDexFiles(
+      std::vector<const DexFile*>());
+  ASSERT_EQ(0u, result.size());
+
+  std::vector<std::unique_ptr<const DexFile>> dex1 = OpenTestDexFiles("ForClassLoaderA");
+  std::vector<const DexFile*> dex1_raw = MakeNonOwningPointerVector(dex1);
+  result = context->CheckForDuplicateDexFiles(dex1_raw);
+  ASSERT_EQ(0u, result.size());
+}
+
+TEST_F(ClassLoaderContextTest, CheckForDuplicateDexFilesFound) {
+  jobject class_loader = LoadDexInPathClassLoader(std::vector<std::string> { "Main", "Main" }, nullptr);
+
+  std::unique_ptr<ClassLoaderContext> context = CreateContextForClassLoader(class_loader);
+
+  std::vector<std::unique_ptr<const DexFile>> dex1 = OpenTestDexFiles("Main");
+  std::vector<const DexFile*> dex1_raw = MakeNonOwningPointerVector(dex1);
+  std::set<const DexFile*> result = context->CheckForDuplicateDexFiles(dex1_raw);
+  ASSERT_EQ(1u, result.size()) << context->EncodeContextForOatFile("");
+  ASSERT_EQ(dex1_raw[0], *(result.begin()));
+}
+
+
+TEST_F(ClassLoaderContextTest, CheckForDuplicateCrossCLNotFound) {
+  jobject class_loader_a = LoadDexInPathClassLoader("ForClassLoaderA", nullptr);
+  jobject class_loader_b = LoadDexInInMemoryDexClassLoader("ForClassLoaderB", class_loader_a);
+
+  std::unique_ptr<ClassLoaderContext> context = CreateContextForClassLoader(class_loader_b);
+
+  std::vector<std::unique_ptr<const DexFile>> dex1 = OpenTestDexFiles("ForClassLoaderA");
+  std::vector<const DexFile*> dex1_raw = MakeNonOwningPointerVector(dex1);
+  std::set<const DexFile*> result = context->CheckForDuplicateDexFiles(dex1_raw);
+  ASSERT_EQ(0u, result.size());
+}
+
 }  // namespace art
diff --git a/runtime/class_loader_utils.h b/runtime/class_loader_utils.h
index 2e85043..934c92b 100644
--- a/runtime/class_loader_utils.h
+++ b/runtime/class_loader_utils.h
@@ -23,12 +23,21 @@
 #include "jni/jni_internal.h"
 #include "mirror/class_loader.h"
 #include "mirror/object-inl.h"
+#include "mirror/object.h"
 #include "native/dalvik_system_DexFile.h"
 #include "scoped_thread_state_change-inl.h"
 #include "well_known_classes.h"
 
 namespace art {
 
+// Returns true if the given class loader derives from BaseDexClassLoader.
+inline bool IsInstanceOfBaseDexClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
+                                           Handle<mirror::ClassLoader> class_loader)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  return class_loader->InstanceOf(
+      soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_BaseDexClassLoader));
+}
+
 // Returns true if the given class loader is either a PathClassLoader or a DexClassLoader.
 // (they both have the same behaviour with respect to class lookup order)
 inline bool IsPathOrDexClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
@@ -86,8 +95,7 @@
       StackHandleScope<1> hs(self);
       Handle<mirror::ObjectArray<mirror::Object>> dex_elements =
           hs.NewHandle(dex_elements_obj->AsObjectArray<mirror::Object>());
-      for (int32_t i = 0; i < dex_elements->GetLength(); ++i) {
-        ObjPtr<mirror::Object> element = dex_elements->GetWithoutChecks(i);
+      for (auto element : dex_elements.Iterate<mirror::Object>()) {
         if (element == nullptr) {
           // Should never happen, fail.
           break;
diff --git a/runtime/class_status.h b/runtime/class_status.h
index ada2863..b194ffa 100644
--- a/runtime/class_status.h
+++ b/runtime/class_status.h
@@ -68,7 +68,15 @@
 // this state if it encounters a soft failure at compile time. This
 // often happens when there are unresolved classes in other dex
 // files, and this status marks a class as needing to be verified
-// again at runtime.
+// again at runtime. This status is only set and seen during AOT
+// compilation, and the compiler will mark the class as resolved in the
+// image and/or oat file.
+//
+// kVerifiedNeedsAccessChecks: The verifier sets a class to
+// this state if it encounters access-checks only soft failure at compile
+// time. This happens when there are unresolved classes in other dex
+// files, and this status marks a class as verified but that will need to run
+// with access checks enabled in the interpreter.
 //
 // TODO: Explain the other states
 enum class ClassStatus : uint8_t {
@@ -82,12 +90,13 @@
   kResolved = 7,  // Part of linking.
   kVerifying = 8,  // In the process of being verified.
   kRetryVerificationAtRuntime = 9,  // Compile time verification failed, retry at runtime.
-  kVerifyingAtRuntime = 10,  // Retrying verification at runtime.
+  kVerifiedNeedsAccessChecks = 10,  // Compile time verification only failed for access checks.
   kVerified = 11,  // Logically part of linking; done pre-init.
   kSuperclassValidated = 12,  // Superclass validation part of init done.
   kInitializing = 13,  // Class init in progress.
   kInitialized = 14,  // Ready to go.
-  kLast = kInitialized
+  kVisiblyInitialized = 15,  // Initialized and visible to all threads.
+  kLast = kVisiblyInitialized
 };
 
 std::ostream& operator<<(std::ostream& os, const ClassStatus& rhs);
diff --git a/runtime/class_table-inl.h b/runtime/class_table-inl.h
index 5f8a0b0..d043af3 100644
--- a/runtime/class_table-inl.h
+++ b/runtime/class_table-inl.h
@@ -23,6 +23,7 @@
 #include "gc_root-inl.h"
 #include "mirror/class.h"
 #include "oat_file.h"
+#include "obj_ptr-inl.h"
 
 namespace art {
 
@@ -88,8 +89,12 @@
   return true;
 }
 
+inline bool ClassTable::TableSlot::IsNull() const {
+  return Read<kWithoutReadBarrier>() == nullptr;
+}
+
 template<ReadBarrierOption kReadBarrierOption>
-inline mirror::Class* ClassTable::TableSlot::Read() const {
+inline ObjPtr<mirror::Class> ClassTable::TableSlot::Read() const {
   const uint32_t before = data_.load(std::memory_order_relaxed);
   const ObjPtr<mirror::Class> before_ptr(ExtractPtr(before));
   const ObjPtr<mirror::Class> after_ptr(
@@ -99,7 +104,7 @@
     // one.
     data_.CompareAndSetStrongRelease(before, Encode(after_ptr, MaskHash(before)));
   }
-  return after_ptr.Ptr();
+  return after_ptr;
 }
 
 template<typename Visitor>
@@ -127,11 +132,7 @@
 
 inline ClassTable::TableSlot::TableSlot(ObjPtr<mirror::Class> klass, uint32_t descriptor_hash)
     : data_(Encode(klass, MaskHash(descriptor_hash))) {
-  if (kIsDebugBuild) {
-    std::string temp;
-    const uint32_t hash = ComputeModifiedUtf8Hash(klass->GetDescriptor(&temp));
-    CHECK_EQ(descriptor_hash, hash);
-  }
+  DCHECK_EQ(descriptor_hash, HashDescriptor(klass));
 }
 
 template <typename Filter>
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
index 8d8e93a..b7d37e2 100644
--- a/runtime/class_table.cc
+++ b/runtime/class_table.cc
@@ -34,18 +34,10 @@
 }
 
 bool ClassTable::Contains(ObjPtr<mirror::Class> klass) {
-  ReaderMutexLock mu(Thread::Current(), lock_);
-  TableSlot slot(klass);
-  for (ClassSet& class_set : classes_) {
-    auto it = class_set.find(slot);
-    if (it != class_set.end()) {
-      return it->Read() == klass;
-    }
-  }
-  return false;
+  return LookupByDescriptor(klass) == klass;
 }
 
-mirror::Class* ClassTable::LookupByDescriptor(ObjPtr<mirror::Class> klass) {
+ObjPtr<mirror::Class> ClassTable::LookupByDescriptor(ObjPtr<mirror::Class> klass) {
   ReaderMutexLock mu(Thread::Current(), lock_);
   TableSlot slot(klass);
   for (ClassSet& class_set : classes_) {
@@ -57,7 +49,9 @@
   return nullptr;
 }
 
-mirror::Class* ClassTable::UpdateClass(const char* descriptor, mirror::Class* klass, size_t hash) {
+ObjPtr<mirror::Class> ClassTable::UpdateClass(const char* descriptor,
+                                              ObjPtr<mirror::Class> klass,
+                                              size_t hash) {
   WriterMutexLock mu(Thread::Current(), lock_);
   // Should only be updating latest table.
   DescriptorHashPair pair(descriptor, hash);
@@ -70,7 +64,7 @@
     }
     LOG(FATAL) << "Updating class not found " << descriptor;
   }
-  mirror::Class* const existing = existing_it->Read();
+  const ObjPtr<mirror::Class> existing = existing_it->Read();
   CHECK_NE(existing, klass) << descriptor;
   CHECK(!existing->IsResolved()) << descriptor;
   CHECK_EQ(klass->GetStatus(), ClassStatus::kResolving) << descriptor;
@@ -121,7 +115,7 @@
   return classes_.back().size();
 }
 
-mirror::Class* ClassTable::Lookup(const char* descriptor, size_t hash) {
+ObjPtr<mirror::Class> ClassTable::Lookup(const char* descriptor, size_t hash) {
   DescriptorHashPair pair(descriptor, hash);
   ReaderMutexLock mu(Thread::Current(), lock_);
   for (ClassSet& class_set : classes_) {
@@ -191,27 +185,35 @@
 uint32_t ClassTable::ClassDescriptorHashEquals::operator()(const TableSlot& slot)
     const {
   std::string temp;
-  return ComputeModifiedUtf8Hash(slot.Read()->GetDescriptor(&temp));
+  // No read barrier needed, we're reading a chain of constant references for comparison
+  // with null and retrieval of constant primitive data. See ReadBarrierOption.
+  return ComputeModifiedUtf8Hash(slot.Read<kWithoutReadBarrier>()->GetDescriptor(&temp));
 }
 
 bool ClassTable::ClassDescriptorHashEquals::operator()(const TableSlot& a,
                                                        const TableSlot& b) const {
+  // No read barrier needed, we're reading a chain of constant references for comparison
+  // with null and retrieval of constant primitive data. See ReadBarrierOption.
   if (a.Hash() != b.Hash()) {
     std::string temp;
-    DCHECK(!a.Read()->DescriptorEquals(b.Read()->GetDescriptor(&temp)));
+    DCHECK(!a.Read<kWithoutReadBarrier>()->DescriptorEquals(
+        b.Read<kWithoutReadBarrier>()->GetDescriptor(&temp)));
     return false;
   }
   std::string temp;
-  return a.Read()->DescriptorEquals(b.Read()->GetDescriptor(&temp));
+  return a.Read<kWithoutReadBarrier>()->DescriptorEquals(
+      b.Read<kWithoutReadBarrier>()->GetDescriptor(&temp));
 }
 
 bool ClassTable::ClassDescriptorHashEquals::operator()(const TableSlot& a,
                                                        const DescriptorHashPair& b) const {
+  // No read barrier needed, we're reading a chain of constant references for comparison
+  // with null and retrieval of constant primitive data. See ReadBarrierOption.
   if (!a.MaskedHashEquals(b.second)) {
-    DCHECK(!a.Read()->DescriptorEquals(b.first));
+    DCHECK(!a.Read<kWithoutReadBarrier>()->DescriptorEquals(b.first));
     return false;
   }
-  return a.Read()->DescriptorEquals(b.first);
+  return a.Read<kWithoutReadBarrier>()->DescriptorEquals(b.first);
 }
 
 uint32_t ClassTable::ClassDescriptorHashEquals::operator()(const DescriptorHashPair& pair) const {
diff --git a/runtime/class_table.h b/runtime/class_table.h
index 0b08041..810c09c 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -64,9 +64,7 @@
       return *this;
     }
 
-    bool IsNull() const REQUIRES_SHARED(Locks::mutator_lock_) {
-      return Read<kWithoutReadBarrier>() == nullptr;
-    }
+    bool IsNull() const REQUIRES_SHARED(Locks::mutator_lock_);
 
     uint32_t Hash() const {
       return MaskHash(data_.load(std::memory_order_relaxed));
@@ -84,7 +82,7 @@
         REQUIRES_SHARED(Locks::mutator_lock_);
 
     template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-    mirror::Class* Read() const REQUIRES_SHARED(Locks::mutator_lock_);
+    ObjPtr<mirror::Class> Read() const REQUIRES_SHARED(Locks::mutator_lock_);
 
     // NO_THREAD_SAFETY_ANALYSIS since the visitor may require heap bitmap lock.
     template<typename Visitor>
@@ -172,7 +170,9 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Update a class in the table with the new class. Returns the existing class which was replaced.
-  mirror::Class* UpdateClass(const char* descriptor, mirror::Class* new_klass, size_t hash)
+  ObjPtr<mirror::Class> UpdateClass(const char* descriptor,
+                                    ObjPtr<mirror::Class> new_klass,
+                                    size_t hash)
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -200,12 +200,12 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Return the first class that matches the descriptor. Returns null if there are none.
-  mirror::Class* Lookup(const char* descriptor, size_t hash)
+  ObjPtr<mirror::Class> Lookup(const char* descriptor, size_t hash)
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Return the first class that matches the descriptor of klass. Returns null if there are none.
-  mirror::Class* LookupByDescriptor(ObjPtr<mirror::Class> klass)
+  ObjPtr<mirror::Class> LookupByDescriptor(ObjPtr<mirror::Class> klass)
       REQUIRES(!lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
diff --git a/runtime/class_table_test.cc b/runtime/class_table_test.cc
index 2270662..5275c7e 100644
--- a/runtime/class_table_test.cc
+++ b/runtime/class_table_test.cc
@@ -87,9 +87,9 @@
 
   // Add h_X to the class table.
   table.Insert(h_X.Get());
-  EXPECT_EQ(table.LookupByDescriptor(h_X.Get()), h_X.Get());
-  EXPECT_EQ(table.Lookup(descriptor_x, ComputeModifiedUtf8Hash(descriptor_x)), h_X.Get());
-  EXPECT_EQ(table.Lookup("NOT_THERE", ComputeModifiedUtf8Hash("NOT_THERE")), nullptr);
+  EXPECT_OBJ_PTR_EQ(table.LookupByDescriptor(h_X.Get()), h_X.Get());
+  EXPECT_OBJ_PTR_EQ(table.Lookup(descriptor_x, ComputeModifiedUtf8Hash(descriptor_x)), h_X.Get());
+  EXPECT_TRUE(table.Lookup("NOT_THERE", ComputeModifiedUtf8Hash("NOT_THERE")) == nullptr);
   EXPECT_EQ(table.NumZygoteClasses(class_loader.Get()), 0u);
   EXPECT_EQ(table.NumNonZygoteClasses(class_loader.Get()), 1u);
 
@@ -99,11 +99,11 @@
   EXPECT_EQ(table.NumNonZygoteClasses(class_loader.Get()), 0u);
 
   // Test inserting and related lookup functions.
-  EXPECT_EQ(table.LookupByDescriptor(h_Y.Get()), nullptr);
+  EXPECT_TRUE(table.LookupByDescriptor(h_Y.Get()) == nullptr);
   EXPECT_FALSE(table.Contains(h_Y.Get()));
   table.Insert(h_Y.Get());
-  EXPECT_EQ(table.LookupByDescriptor(h_X.Get()), h_X.Get());
-  EXPECT_EQ(table.LookupByDescriptor(h_Y.Get()), h_Y.Get());
+  EXPECT_OBJ_PTR_EQ(table.LookupByDescriptor(h_X.Get()), h_X.Get());
+  EXPECT_OBJ_PTR_EQ(table.LookupByDescriptor(h_Y.Get()), h_Y.Get());
   EXPECT_TRUE(table.Contains(h_X.Get()));
   EXPECT_TRUE(table.Contains(h_Y.Get()));
 
diff --git a/runtime/common_dex_operations.h b/runtime/common_dex_operations.h
index 1c95622..882e3ce 100644
--- a/runtime/common_dex_operations.h
+++ b/runtime/common_dex_operations.h
@@ -35,6 +35,8 @@
 #include "mirror/class.h"
 #include "mirror/object.h"
 #include "obj_ptr-inl.h"
+#include "reflective_handle.h"
+#include "reflective_handle_scope.h"
 #include "runtime.h"
 #include "stack.h"
 #include "thread.h"
@@ -100,14 +102,16 @@
   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
   if (UNLIKELY(instrumentation->HasFieldReadListeners())) {
     StackHandleScope<1> hs(self);
+    StackArtFieldHandleScope<1> rhs(self);
     // Wrap in handle wrapper in case the listener does thread suspension.
     HandleWrapperObjPtr<mirror::Object> h(hs.NewHandleWrapper(&obj));
+    ReflectiveHandleWrapper<ArtField> fh(rhs.NewReflectiveHandleWrapper(&field));
     ObjPtr<mirror::Object> this_object;
     if (!field->IsStatic()) {
       this_object = obj;
     }
     instrumentation->FieldReadEvent(self,
-                                    this_object.Ptr(),
+                                    this_object,
                                     shadow_frame.GetMethod(),
                                     shadow_frame.GetDexPC(),
                                     field);
@@ -159,14 +163,16 @@
   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
   if (UNLIKELY(instrumentation->HasFieldWriteListeners())) {
     StackHandleScope<2> hs(self);
+    StackArtFieldHandleScope<1> rhs(self);
     // Save this and return value (if needed) in case the instrumentation causes a suspend.
     HandleWrapperObjPtr<mirror::Object> h(hs.NewHandleWrapper(&obj));
+    ReflectiveHandleWrapper<ArtField> fh(rhs.NewReflectiveHandleWrapper(&field));
     ObjPtr<mirror::Object> this_object = field->IsStatic() ? nullptr : obj;
     mirror::Object* fake_root = nullptr;
     HandleWrapper<mirror::Object> ret(hs.NewHandleWrapper<mirror::Object>(
         field_type == Primitive::kPrimNot ? value.GetGCRoot() : &fake_root));
     instrumentation->FieldWriteEvent(self,
-                                     this_object.Ptr(),
+                                     this_object,
                                      shadow_frame.GetMethod(),
                                      shadow_frame.GetDexPC(),
                                      field,
@@ -179,7 +185,6 @@
       // actual field write. If one pops the stack we should not modify the field.  The next
       // instruction will force a pop. Return true.
       DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
-      DCHECK(interpreter::PrevFrameWillRetry(self, shadow_frame));
       return true;
     }
   }
@@ -211,8 +216,10 @@
         ObjPtr<mirror::Class> field_class;
         {
           StackHandleScope<2> hs(self);
+          StackArtFieldHandleScope<1> rhs(self);
           HandleWrapperObjPtr<mirror::Object> h_reg(hs.NewHandleWrapper(&reg));
           HandleWrapperObjPtr<mirror::Object> h_obj(hs.NewHandleWrapper(&obj));
+          ReflectiveHandleWrapper<ArtField> fh(rhs.NewReflectiveHandleWrapper(&field));
           field_class = field->ResolveType();
         }
         // ArtField::ResolveType() may fail as evidenced with a dexing bug (b/78788577).
@@ -220,7 +227,7 @@
           Thread::Current()->AssertPendingException();
           return false;
         }
-        if (UNLIKELY(!reg->VerifierInstanceOf(field_class.Ptr()))) {
+        if (UNLIKELY(!reg->VerifierInstanceOf(field_class))) {
           // This should never happen.
           std::string temp1, temp2, temp3;
           self->ThrowNewExceptionF("Ljava/lang/InternalError;",
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index f4cc161..ffb8c77 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -41,7 +41,9 @@
 #include "dex/art_dex_file_loader.h"
 #include "dex/dex_file-inl.h"
 #include "dex/dex_file_loader.h"
+#include "dex/method_reference.h"
 #include "dex/primitive.h"
+#include "dex/type_reference.h"
 #include "gc/heap.h"
 #include "gc/space/image_space.h"
 #include "gc_root-inl.h"
@@ -56,6 +58,7 @@
 #include "mirror/object_array-alloc-inl.h"
 #include "native/dalvik_system_DexFile.h"
 #include "noop_compiler_callbacks.h"
+#include "profile/profile_compilation_info.h"
 #include "runtime-inl.h"
 #include "scoped_thread_state_change-inl.h"
 #include "thread.h"
@@ -93,12 +96,7 @@
       return GetAndroidToolsDir("prebuilts/gcc/linux-x86/x86",
                                 "x86_64-linux-android",
                                 "x86_64-linux-android");
-    case InstructionSet::kMips:
-    case InstructionSet::kMips64:
-      return GetAndroidToolsDir("prebuilts/gcc/linux-x86/mips",
-                                "mips64el-linux-android",
-                                "mips64el-linux-android");
-    case InstructionSet::kNone:
+    default:
       break;
   }
   ADD_FAILURE() << "Invalid isa " << isa;
@@ -122,6 +120,8 @@
   options.push_back(std::make_pair("-Xcheck:jni", nullptr));
   options.push_back(std::make_pair(min_heap_string, nullptr));
   options.push_back(std::make_pair(max_heap_string, nullptr));
+
+  // Technically this is redundant w/ common_art_test, but still check.
   options.push_back(std::make_pair("-XX:SlowDebug=true", nullptr));
   static bool gSlowDebugTestFlag = false;
   RegisterRuntimeDebugFlag(&gSlowDebugTestFlag);
@@ -270,16 +270,19 @@
   return class_loader;
 }
 
-jobject CommonRuntimeTestImpl::LoadDexInWellKnownClassLoader(const std::string& dex_name,
-                                                             jclass loader_class,
-                                                             jobject parent_loader,
-                                                             jobject shared_libraries) {
-  std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles(dex_name.c_str());
+jobject
+CommonRuntimeTestImpl::LoadDexInWellKnownClassLoader(const std::vector<std::string>& dex_names,
+                                                     jclass loader_class,
+                                                     jobject parent_loader,
+                                                     jobject shared_libraries) {
   std::vector<const DexFile*> class_path;
-  CHECK_NE(0U, dex_files.size());
-  for (auto& dex_file : dex_files) {
-    class_path.push_back(dex_file.get());
-    loaded_dex_files_.push_back(std::move(dex_file));
+  for (const std::string& dex_name : dex_names) {
+    std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles(dex_name.c_str());
+    CHECK_NE(0U, dex_files.size());
+    for (auto& dex_file : dex_files) {
+      class_path.push_back(dex_file.get());
+      loaded_dex_files_.push_back(std::move(dex_file));
+    }
   }
   Thread* self = Thread::Current();
   ScopedObjectAccess soa(self);
@@ -318,7 +321,15 @@
 jobject CommonRuntimeTestImpl::LoadDexInPathClassLoader(const std::string& dex_name,
                                                         jobject parent_loader,
                                                         jobject shared_libraries) {
-  return LoadDexInWellKnownClassLoader(dex_name,
+  return LoadDexInPathClassLoader(std::vector<std::string>{ dex_name },
+                                  parent_loader,
+                                  shared_libraries);
+}
+
+jobject CommonRuntimeTestImpl::LoadDexInPathClassLoader(const std::vector<std::string>& names,
+                                                        jobject parent_loader,
+                                                        jobject shared_libraries) {
+  return LoadDexInWellKnownClassLoader(names,
                                        WellKnownClasses::dalvik_system_PathClassLoader,
                                        parent_loader,
                                        shared_libraries);
@@ -326,14 +337,14 @@
 
 jobject CommonRuntimeTestImpl::LoadDexInDelegateLastClassLoader(const std::string& dex_name,
                                                                 jobject parent_loader) {
-  return LoadDexInWellKnownClassLoader(dex_name,
+  return LoadDexInWellKnownClassLoader({ dex_name },
                                        WellKnownClasses::dalvik_system_DelegateLastClassLoader,
                                        parent_loader);
 }
 
 jobject CommonRuntimeTestImpl::LoadDexInInMemoryDexClassLoader(const std::string& dex_name,
                                                                jobject parent_loader) {
-  return LoadDexInWellKnownClassLoader(dex_name,
+  return LoadDexInWellKnownClassLoader({ dex_name },
                                        WellKnownClasses::dalvik_system_InMemoryDexClassLoader,
                                        parent_loader);
 }
@@ -400,18 +411,16 @@
 }
 
 bool CommonRuntimeTestImpl::StartDex2OatCommandLine(/*out*/std::vector<std::string>* argv,
-                                                    /*out*/std::string* error_msg) {
+                                                    /*out*/std::string* error_msg,
+                                                    bool use_runtime_bcp_and_image) {
   DCHECK(argv != nullptr);
   DCHECK(argv->empty());
 
   Runtime* runtime = Runtime::Current();
-  const std::vector<gc::space::ImageSpace*>& image_spaces =
-      runtime->GetHeap()->GetBootImageSpaces();
-  if (image_spaces.empty()) {
+  if (use_runtime_bcp_and_image && runtime->GetHeap()->GetBootImageSpaces().empty()) {
     *error_msg = "No image location found for Dex2Oat.";
     return false;
   }
-  std::string image_location = image_spaces[0]->GetImageLocation();
 
   argv->push_back(runtime->GetCompilerExecutable());
   if (runtime->IsJavaDebuggable()) {
@@ -419,18 +428,202 @@
   }
   runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(argv);
 
-  argv->push_back("--runtime-arg");
-  argv->push_back(GetClassPathOption("-Xbootclasspath:", GetLibCoreDexFileNames()));
-  argv->push_back("--runtime-arg");
-  argv->push_back(GetClassPathOption("-Xbootclasspath-locations:", GetLibCoreDexLocations()));
+  if (use_runtime_bcp_and_image) {
+    argv->push_back("--runtime-arg");
+    argv->push_back(GetClassPathOption("-Xbootclasspath:", GetLibCoreDexFileNames()));
+    argv->push_back("--runtime-arg");
+    argv->push_back(GetClassPathOption("-Xbootclasspath-locations:", GetLibCoreDexLocations()));
 
-  argv->push_back("--boot-image=" + image_location);
+    const std::vector<gc::space::ImageSpace*>& image_spaces =
+        runtime->GetHeap()->GetBootImageSpaces();
+    DCHECK(!image_spaces.empty());
+    argv->push_back("--boot-image=" + image_spaces[0]->GetImageLocation());
+  }
 
   std::vector<std::string> compiler_options = runtime->GetCompilerOptions();
   argv->insert(argv->end(), compiler_options.begin(), compiler_options.end());
   return true;
 }
 
+bool CommonRuntimeTestImpl::CompileBootImage(const std::vector<std::string>& extra_args,
+                                             const std::string& image_file_name_prefix,
+                                             ArrayRef<const std::string> dex_files,
+                                             ArrayRef<const std::string> dex_locations,
+                                             std::string* error_msg,
+                                             const std::string& use_fd_prefix) {
+  Runtime* const runtime = Runtime::Current();
+  std::vector<std::string> argv {
+    runtime->GetCompilerExecutable(),
+    "--runtime-arg",
+    "-Xms64m",
+    "--runtime-arg",
+    "-Xmx64m",
+    "--runtime-arg",
+    "-Xverify:softfail",
+  };
+  CHECK_EQ(dex_files.size(), dex_locations.size());
+  for (const std::string& dex_file : dex_files) {
+    argv.push_back("--dex-file=" + dex_file);
+  }
+  for (const std::string& dex_location : dex_locations) {
+    argv.push_back("--dex-location=" + dex_location);
+  }
+  if (runtime->IsJavaDebuggable()) {
+    argv.push_back("--debuggable");
+  }
+  runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
+
+  if (!kIsTargetBuild) {
+    argv.push_back("--host");
+  }
+
+  std::unique_ptr<File> art_file;
+  std::unique_ptr<File> vdex_file;
+  std::unique_ptr<File> oat_file;
+  if (!use_fd_prefix.empty()) {
+    art_file.reset(OS::CreateEmptyFile((use_fd_prefix + ".art").c_str()));
+    vdex_file.reset(OS::CreateEmptyFile((use_fd_prefix + ".vdex").c_str()));
+    oat_file.reset(OS::CreateEmptyFile((use_fd_prefix + ".oat").c_str()));
+    argv.push_back("--image-fd=" + std::to_string(art_file->Fd()));
+    argv.push_back("--output-vdex-fd=" + std::to_string(vdex_file->Fd()));
+    argv.push_back("--oat-fd=" + std::to_string(oat_file->Fd()));
+    argv.push_back("--oat-location=" + image_file_name_prefix + ".oat");
+  } else {
+    argv.push_back("--image=" + image_file_name_prefix + ".art");
+    argv.push_back("--oat-file=" + image_file_name_prefix + ".oat");
+    argv.push_back("--oat-location=" + image_file_name_prefix + ".oat");
+  }
+
+  std::vector<std::string> compiler_options = runtime->GetCompilerOptions();
+  argv.insert(argv.end(), compiler_options.begin(), compiler_options.end());
+
+  // We must set --android-root.
+  const char* android_root = getenv("ANDROID_ROOT");
+  CHECK(android_root != nullptr);
+  argv.push_back("--android-root=" + std::string(android_root));
+  argv.insert(argv.end(), extra_args.begin(), extra_args.end());
+
+  bool result = RunDex2Oat(argv, error_msg);
+  if (art_file != nullptr) {
+    CHECK_EQ(0, art_file->FlushClose());
+  }
+  if (vdex_file != nullptr) {
+    CHECK_EQ(0, vdex_file->FlushClose());
+  }
+  if (oat_file != nullptr) {
+    CHECK_EQ(0, oat_file->FlushClose());
+  }
+  return result;
+}
+
+bool CommonRuntimeTestImpl::RunDex2Oat(const std::vector<std::string>& args,
+                                       std::string* error_msg) {
+  // We only want fatal logging for the error message.
+  auto post_fork_fn = []() { return setenv("ANDROID_LOG_TAGS", "*:f", 1) == 0; };
+  ForkAndExecResult res = ForkAndExec(args, post_fork_fn, error_msg);
+  if (res.stage != ForkAndExecResult::kFinished) {
+    *error_msg = strerror(errno);
+    return false;
+  }
+  return res.StandardSuccess();
+}
+
+std::string CommonRuntimeTestImpl::GetImageDirectory() {
+  if (IsHost()) {
+    const char* host_dir = getenv("ANDROID_HOST_OUT");
+    CHECK(host_dir != nullptr);
+    return std::string(host_dir) + "/framework";
+  } else {
+    return std::string("/apex/com.android.art/javalib");
+  }
+}
+
+std::string CommonRuntimeTestImpl::GetImageLocation() {
+  return GetImageDirectory() + (IsHost() ? "/core.art" : "/boot.art");
+}
+
+std::string CommonRuntimeTestImpl::GetSystemImageFile() {
+  std::string isa = GetInstructionSetString(kRuntimeISA);
+  return GetImageDirectory() + "/" + isa + (IsHost() ? "/core.art" : "/boot.art");
+}
+
+void CommonRuntimeTestImpl::EnterTransactionMode() {
+  CHECK(!Runtime::Current()->IsActiveTransaction());
+  Runtime::Current()->EnterTransactionMode(/*strict=*/ false, /*root=*/ nullptr);
+}
+
+void CommonRuntimeTestImpl::ExitTransactionMode() {
+  Runtime::Current()->ExitTransactionMode();
+  CHECK(!Runtime::Current()->IsActiveTransaction());
+}
+
+void CommonRuntimeTestImpl::RollbackAndExitTransactionMode() {
+  Runtime::Current()->RollbackAndExitTransactionMode();
+  CHECK(!Runtime::Current()->IsActiveTransaction());
+}
+
+bool CommonRuntimeTestImpl::IsTransactionAborted() {
+  return Runtime::Current()->IsTransactionAborted();
+}
+
+void CommonRuntimeTestImpl::VisitDexes(ArrayRef<const std::string> dexes,
+                                       const std::function<void(MethodReference)>& method_visitor,
+                                       const std::function<void(TypeReference)>& class_visitor,
+                                       size_t method_frequency,
+                                       size_t class_frequency) {
+  size_t method_counter = 0;
+  size_t class_counter = 0;
+  for (const std::string& dex : dexes) {
+    std::vector<std::unique_ptr<const DexFile>> dex_files;
+    std::string error_msg;
+    const ArtDexFileLoader dex_file_loader;
+    CHECK(dex_file_loader.Open(dex.c_str(),
+                               dex,
+                               /*verify*/ true,
+                               /*verify_checksum*/ false,
+                               &error_msg,
+                               &dex_files))
+        << error_msg;
+    for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
+      for (size_t i = 0; i < dex_file->NumMethodIds(); ++i) {
+        if (++method_counter % method_frequency == 0) {
+          method_visitor(MethodReference(dex_file.get(), i));
+        }
+      }
+      for (size_t i = 0; i < dex_file->NumTypeIds(); ++i) {
+        if (++class_counter % class_frequency == 0) {
+          class_visitor(TypeReference(dex_file.get(), dex::TypeIndex(i)));
+        }
+      }
+    }
+  }
+}
+
+void CommonRuntimeTestImpl::GenerateProfile(ArrayRef<const std::string> dexes,
+                                            File* out_file,
+                                            size_t method_frequency,
+                                            size_t type_frequency) {
+  ProfileCompilationInfo profile;
+  VisitDexes(
+      dexes,
+      [&profile](MethodReference ref) {
+        uint32_t flags = ProfileCompilationInfo::MethodHotness::kFlagHot |
+            ProfileCompilationInfo::MethodHotness::kFlagStartup;
+        EXPECT_TRUE(profile.AddMethod(
+            ProfileMethodInfo(ref),
+            static_cast<ProfileCompilationInfo::MethodHotness::Flag>(flags)));
+      },
+      [&profile](TypeReference ref) {
+        std::set<dex::TypeIndex> classes;
+        classes.insert(ref.TypeIndex());
+        EXPECT_TRUE(profile.AddClassesForDex(ref.dex_file, classes.begin(), classes.end()));
+      },
+      method_frequency,
+      type_frequency);
+  profile.Save(out_file->Fd());
+  EXPECT_EQ(out_file->Flush(), 0);
+}
+
 CheckJniAbortCatcher::CheckJniAbortCatcher() : vm_(Runtime::Current()->GetJavaVM()) {
   vm_->SetCheckJniAbortHook(Hook, &actual_);
 }
@@ -463,7 +656,6 @@
 extern "C"
 __attribute__((visibility("default"))) __attribute__((weak))
 void ArtTestGlobalInit() {
-  LOG(ERROR) << "ArtTestGlobalInit in common_runtime_test";
 }
 
 int main(int argc, char **argv) {
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index fb3eae7..711bc59 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -20,6 +20,7 @@
 #include <gtest/gtest.h>
 #include <jni.h>
 
+#include <functional>
 #include <string>
 
 #include <android-base/logging.h>
@@ -38,9 +39,28 @@
 
 namespace art {
 
+class MethodReference;
+class TypeReference;
+
 using LogSeverity = android::base::LogSeverity;
 using ScopedLogSeverity = android::base::ScopedLogSeverity;
 
+template<class MirrorType>
+static inline ObjPtr<MirrorType> MakeObjPtr(MirrorType* ptr) {
+  return ptr;
+}
+
+template<class MirrorType>
+static inline ObjPtr<MirrorType> MakeObjPtr(ObjPtr<MirrorType> ptr) {
+  return ptr;
+}
+
+// OBJ pointer helpers to avoid needing .Decode everywhere.
+#define EXPECT_OBJ_PTR_EQ(a, b) EXPECT_EQ(MakeObjPtr(a).Ptr(), MakeObjPtr(b).Ptr())
+#define ASSERT_OBJ_PTR_EQ(a, b) ASSERT_EQ(MakeObjPtr(a).Ptr(), MakeObjPtr(b).Ptr())
+#define EXPECT_OBJ_PTR_NE(a, b) EXPECT_NE(MakeObjPtr(a).Ptr(), MakeObjPtr(b).Ptr())
+#define ASSERT_OBJ_PTR_NE(a, b) ASSERT_NE(MakeObjPtr(a).Ptr(), MakeObjPtr(b).Ptr())
+
 class ClassLinker;
 class CompilerCallbacks;
 class DexFile;
@@ -95,7 +115,26 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool StartDex2OatCommandLine(/*out*/std::vector<std::string>* argv,
-                               /*out*/std::string* error_msg);
+                               /*out*/std::string* error_msg,
+                               bool use_runtime_bcp_and_image = true);
+
+  bool CompileBootImage(const std::vector<std::string>& extra_args,
+                        const std::string& image_file_name_prefix,
+                        ArrayRef<const std::string> dex_files,
+                        ArrayRef<const std::string> dex_locations,
+                        std::string* error_msg,
+                        const std::string& use_fd_prefix = "");
+
+  bool CompileBootImage(const std::vector<std::string>& extra_args,
+                        const std::string& image_file_name_prefix,
+                        ArrayRef<const std::string> dex_files,
+                        std::string* error_msg,
+                        const std::string& use_fd_prefix = "") {
+    return CompileBootImage(
+        extra_args, image_file_name_prefix, dex_files, dex_files, error_msg, use_fd_prefix);
+  }
+
+  bool RunDex2Oat(const std::vector<std::string>& args, std::string* error_msg);
 
  protected:
   // Allow subclases such as CommonCompilerTest to add extra options.
@@ -118,13 +157,27 @@
   jobject LoadDexInPathClassLoader(const std::string& dex_name,
                                    jobject parent_loader,
                                    jobject shared_libraries = nullptr);
+  jobject LoadDexInPathClassLoader(const std::vector<std::string>& dex_names,
+                                   jobject parent_loader,
+                                   jobject shared_libraries = nullptr);
   jobject LoadDexInDelegateLastClassLoader(const std::string& dex_name, jobject parent_loader);
   jobject LoadDexInInMemoryDexClassLoader(const std::string& dex_name, jobject parent_loader);
-  jobject LoadDexInWellKnownClassLoader(const std::string& dex_name,
+  jobject LoadDexInWellKnownClassLoader(const std::vector<std::string>& dex_names,
                                         jclass loader_class,
                                         jobject parent_loader,
                                         jobject shared_libraries = nullptr);
 
+  void VisitDexes(ArrayRef<const std::string> dexes,
+                  const std::function<void(MethodReference)>& method_visitor,
+                  const std::function<void(TypeReference)>& class_visitor,
+                  size_t method_frequency = 1u,
+                  size_t class_frequency = 1u);
+
+  void GenerateProfile(ArrayRef<const std::string> dexes,
+                       File* out_file,
+                       size_t method_frequency = 1u,
+                       size_t type_frequency = 1u);
+
   std::unique_ptr<Runtime> runtime_;
 
   // The class_linker_, java_lang_dex_file_, and boot_class_path_ are all
@@ -152,6 +205,16 @@
   // Called to finish up runtime creation and filling test fields. By default runs root
   // initializers, initialize well-known classes, and creates the heap thread pool.
   virtual void FinalizeSetup();
+
+  // Returns the directory where the pre-compiled core.art can be found.
+  static std::string GetImageDirectory();
+  static std::string GetImageLocation();
+  static std::string GetSystemImageFile();
+
+  static void EnterTransactionMode();
+  static void ExitTransactionMode();
+  static void RollbackAndExitTransactionMode() REQUIRES_SHARED(Locks::mutator_lock_);
+  static bool IsTransactionAborted();
 };
 
 template <typename TestType>
@@ -195,6 +258,12 @@
   DISALLOW_COPY_AND_ASSIGN(CheckJniAbortCatcher);
 };
 
+#define TEST_DISABLED() \
+  do { \
+    printf("WARNING: TEST DISABLED\n"); \
+    return; \
+  } while (false)
+
 #define TEST_DISABLED_FOR_ARM() \
   if (kRuntimeISA == InstructionSet::kArm || kRuntimeISA == InstructionSet::kThumb2) { \
     printf("WARNING: TEST DISABLED FOR ARM\n"); \
@@ -207,24 +276,18 @@
     return; \
   }
 
-#define TEST_DISABLED_FOR_MIPS() \
-  if (kRuntimeISA == InstructionSet::kMips) { \
-    printf("WARNING: TEST DISABLED FOR MIPS\n"); \
-    return; \
-  }
-
-#define TEST_DISABLED_FOR_MIPS64() \
-  if (kRuntimeISA == InstructionSet::kMips64) { \
-    printf("WARNING: TEST DISABLED FOR MIPS64\n"); \
-    return; \
-  }
-
 #define TEST_DISABLED_FOR_X86() \
   if (kRuntimeISA == InstructionSet::kX86) { \
     printf("WARNING: TEST DISABLED FOR X86\n"); \
     return; \
   }
 
+#define TEST_DISABLED_FOR_X86_64() \
+  if (kRuntimeISA == InstructionSet::kX86_64) { \
+    printf("WARNING: TEST DISABLED FOR X86_64\n"); \
+    return; \
+  }
+
 #define TEST_DISABLED_FOR_STRING_COMPRESSION() \
   if (mirror::kUseStringCompression) { \
     printf("WARNING: TEST DISABLED FOR STRING COMPRESSION\n"); \
@@ -249,6 +312,12 @@
     return; \
   }
 
+#define TEST_DISABLED_FOR_KERNELS_WITH_CACHE_SEGFAULT() \
+  if (CacheOperationsMaySegFault()) { \
+    printf("WARNING: TEST DISABLED ON KERNEL THAT SEGFAULT ON CACHE OPERATIONS\n"); \
+    return; \
+  }
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_COMMON_RUNTIME_TEST_H_
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index a1168af..1c9cf18 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -475,6 +475,8 @@
     case Instruction::INVOKE_INTERFACE_RANGE:
     case Instruction::INVOKE_POLYMORPHIC:
     case Instruction::INVOKE_POLYMORPHIC_RANGE:
+    case Instruction::INVOKE_SUPER:
+    case Instruction::INVOKE_SUPER_RANGE:
     case Instruction::INVOKE_VIRTUAL_QUICK:
     case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
       // Without inlining, we could just check that the offset is the class offset.
@@ -596,6 +598,12 @@
     case Instruction::INVOKE_VIRTUAL_RANGE:
       ThrowNullPointerExceptionForMethodAccess(instr.VRegB_3rc(), kVirtual);
       break;
+    case Instruction::INVOKE_SUPER:
+      ThrowNullPointerExceptionForMethodAccess(instr.VRegB_35c(), kSuper);
+      break;
+    case Instruction::INVOKE_SUPER_RANGE:
+      ThrowNullPointerExceptionForMethodAccess(instr.VRegB_3rc(), kSuper);
+      break;
     case Instruction::INVOKE_INTERFACE:
       ThrowNullPointerExceptionForMethodAccess(instr.VRegB_35c(), kInterface);
       break;
@@ -728,6 +736,10 @@
   ThrowException("Ljava/lang/NullPointerException;", nullptr, msg);
 }
 
+void ThrowNullPointerException() {
+  ThrowException("Ljava/lang/NullPointerException;");
+}
+
 // ReadOnlyBufferException
 
 void ThrowReadOnlyBufferException() {
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index c167d1b..832eac6 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -234,6 +234,9 @@
 void ThrowNullPointerException(const char* msg)
     REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
 
+void ThrowNullPointerException()
+    REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
+
 // ReadOnlyBufferException
 
 void ThrowReadOnlyBufferException() REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index c5b111f..f218c4e 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -23,12 +23,16 @@
 #include <set>
 #include <vector>
 
+#include "android-base/macros.h"
 #include "android-base/stringprintf.h"
 
 #include "arch/context.h"
 #include "art_field-inl.h"
 #include "art_method-inl.h"
+#include "base/endian_utils.h"
 #include "base/enums.h"
+#include "base/logging.h"
+#include "base/memory_tool.h"
 #include "base/safe_map.h"
 #include "base/strlcpy.h"
 #include "base/time_utils.h"
@@ -49,8 +53,7 @@
 #include "gc/space/large_object_space.h"
 #include "gc/space/space-inl.h"
 #include "handle_scope-inl.h"
-#include "jdwp/jdwp_priv.h"
-#include "jdwp/object_registry.h"
+#include "instrumentation.h"
 #include "jni/jni_internal.h"
 #include "jvalue-inl.h"
 #include "mirror/array-alloc-inl.h"
@@ -68,19 +71,22 @@
 #include "oat_file.h"
 #include "obj_ptr-inl.h"
 #include "reflection.h"
+#include "reflective_handle.h"
+#include "reflective_handle_scope-inl.h"
 #include "runtime-inl.h"
+#include "runtime_callbacks.h"
 #include "scoped_thread_state_change-inl.h"
+#include "scoped_thread_state_change.h"
 #include "stack.h"
+#include "thread.h"
 #include "thread_list.h"
+#include "thread_pool.h"
 #include "well_known_classes.h"
 
 namespace art {
 
 using android::base::StringPrintf;
 
-// The key identifying the debugger to update instrumentation.
-static constexpr const char* kDbgInstrumentationKey = "Debugger";
-
 // Limit alloc_record_count to the 2BE value (64k-1) that is the limit of the current protocol.
 static uint16_t CappedAllocRecordCount(size_t alloc_record_count) {
   const size_t cap = 0xffff;
@@ -90,222 +96,9 @@
   return alloc_record_count;
 }
 
-class Breakpoint : public ValueObject {
- public:
-  Breakpoint(ArtMethod* method, uint32_t dex_pc, DeoptimizationRequest::Kind deoptimization_kind)
-    : method_(method->GetCanonicalMethod(kRuntimePointerSize)),
-      dex_pc_(dex_pc),
-      deoptimization_kind_(deoptimization_kind) {
-    CHECK(deoptimization_kind_ == DeoptimizationRequest::kNothing ||
-          deoptimization_kind_ == DeoptimizationRequest::kSelectiveDeoptimization ||
-          deoptimization_kind_ == DeoptimizationRequest::kFullDeoptimization);
-  }
-
-  Breakpoint(const Breakpoint& other) REQUIRES_SHARED(Locks::mutator_lock_)
-    : method_(other.method_),
-      dex_pc_(other.dex_pc_),
-      deoptimization_kind_(other.deoptimization_kind_) {}
-
-  // Method() is called from root visiting, do not use ScopedObjectAccess here or it can cause
-  // GC to deadlock if another thread tries to call SuspendAll while the GC is in a runnable state.
-  ArtMethod* Method() const {
-    return method_;
-  }
-
-  uint32_t DexPc() const {
-    return dex_pc_;
-  }
-
-  DeoptimizationRequest::Kind GetDeoptimizationKind() const {
-    return deoptimization_kind_;
-  }
-
-  // Returns true if the method of this breakpoint and the passed in method should be considered the
-  // same. That is, they are either the same method or they are copied from the same method.
-  bool IsInMethod(ArtMethod* m) const REQUIRES_SHARED(Locks::mutator_lock_) {
-    return method_ == m->GetCanonicalMethod(kRuntimePointerSize);
-  }
-
- private:
-  // The location of this breakpoint.
-  ArtMethod* method_;
-  uint32_t dex_pc_;
-
-  // Indicates whether breakpoint needs full deoptimization or selective deoptimization.
-  DeoptimizationRequest::Kind deoptimization_kind_;
-};
-
-static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  os << StringPrintf("Breakpoint[%s @%#x]", ArtMethod::PrettyMethod(rhs.Method()).c_str(),
-                     rhs.DexPc());
-  return os;
-}
-
-class DebugInstrumentationListener final : public instrumentation::InstrumentationListener {
- public:
-  DebugInstrumentationListener() {}
-  virtual ~DebugInstrumentationListener() {}
-
-  void MethodEntered(Thread* thread,
-                     Handle<mirror::Object> this_object,
-                     ArtMethod* method,
-                     uint32_t dex_pc)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (method->IsNative()) {
-      // TODO: post location events is a suspension point and native method entry stubs aren't.
-      return;
-    }
-    if (IsListeningToDexPcMoved()) {
-      // We also listen to kDexPcMoved instrumentation event so we know the DexPcMoved method is
-      // going to be called right after us. To avoid sending JDWP events twice for this location,
-      // we report the event in DexPcMoved. However, we must remind this is method entry so we
-      // send the METHOD_ENTRY event. And we can also group it with other events for this location
-      // like BREAKPOINT or SINGLE_STEP (or even METHOD_EXIT if this is a RETURN instruction).
-      thread->SetDebugMethodEntry();
-    } else if (IsListeningToMethodExit() && IsReturn(method, dex_pc)) {
-      // We also listen to kMethodExited instrumentation event and the current instruction is a
-      // RETURN so we know the MethodExited method is going to be called right after us. To avoid
-      // sending JDWP events twice for this location, we report the event(s) in MethodExited.
-      // However, we must remind this is method entry so we send the METHOD_ENTRY event. And we can
-      // also group it with other events for this location like BREAKPOINT or SINGLE_STEP.
-      thread->SetDebugMethodEntry();
-    } else {
-      Dbg::UpdateDebugger(thread, this_object.Get(), method, 0, Dbg::kMethodEntry, nullptr);
-    }
-  }
-
-  void MethodExited(Thread* thread,
-                    Handle<mirror::Object> this_object,
-                    ArtMethod* method,
-                    uint32_t dex_pc,
-                    const JValue& return_value)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (method->IsNative()) {
-      // TODO: post location events is a suspension point and native method entry stubs aren't.
-      return;
-    }
-    uint32_t events = Dbg::kMethodExit;
-    if (thread->IsDebugMethodEntry()) {
-      // It is also the method entry.
-      DCHECK(IsReturn(method, dex_pc));
-      events |= Dbg::kMethodEntry;
-      thread->ClearDebugMethodEntry();
-    }
-    Dbg::UpdateDebugger(thread, this_object.Get(), method, dex_pc, events, &return_value);
-  }
-
-  void MethodUnwind(Thread* thread ATTRIBUTE_UNUSED,
-                    Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
-                    ArtMethod* method,
-                    uint32_t dex_pc)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
-    // We're not recorded to listen to this kind of event, so complain.
-    LOG(ERROR) << "Unexpected method unwind event in debugger " << ArtMethod::PrettyMethod(method)
-               << " " << dex_pc;
-  }
-
-  void DexPcMoved(Thread* thread,
-                  Handle<mirror::Object> this_object,
-                  ArtMethod* method,
-                  uint32_t new_dex_pc)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (IsListeningToMethodExit() && IsReturn(method, new_dex_pc)) {
-      // We also listen to kMethodExited instrumentation event and the current instruction is a
-      // RETURN so we know the MethodExited method is going to be called right after us. Like in
-      // MethodEntered, we delegate event reporting to MethodExited.
-      // Besides, if this RETURN instruction is the only one in the method, we can send multiple
-      // JDWP events in the same packet: METHOD_ENTRY, METHOD_EXIT, BREAKPOINT and/or SINGLE_STEP.
-      // Therefore, we must not clear the debug method entry flag here.
-    } else {
-      uint32_t events = 0;
-      if (thread->IsDebugMethodEntry()) {
-        // It is also the method entry.
-        events = Dbg::kMethodEntry;
-        thread->ClearDebugMethodEntry();
-      }
-      Dbg::UpdateDebugger(thread, this_object.Get(), method, new_dex_pc, events, nullptr);
-    }
-  }
-
-  void FieldRead(Thread* thread ATTRIBUTE_UNUSED,
-                 Handle<mirror::Object> this_object,
-                 ArtMethod* method,
-                 uint32_t dex_pc,
-                 ArtField* field)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
-    Dbg::PostFieldAccessEvent(method, dex_pc, this_object.Get(), field);
-  }
-
-  void FieldWritten(Thread* thread ATTRIBUTE_UNUSED,
-                    Handle<mirror::Object> this_object,
-                    ArtMethod* method,
-                    uint32_t dex_pc,
-                    ArtField* field,
-                    const JValue& field_value)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
-    Dbg::PostFieldModificationEvent(method, dex_pc, this_object.Get(), field, &field_value);
-  }
-
-  void ExceptionThrown(Thread* thread ATTRIBUTE_UNUSED,
-                       Handle<mirror::Throwable> exception_object)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
-    Dbg::PostException(exception_object.Get());
-  }
-
-  // We only care about branches in the Jit.
-  void Branch(Thread* /*thread*/, ArtMethod* method, uint32_t dex_pc, int32_t dex_pc_offset)
-      override REQUIRES_SHARED(Locks::mutator_lock_) {
-    LOG(ERROR) << "Unexpected branch event in debugger " << ArtMethod::PrettyMethod(method)
-               << " " << dex_pc << ", " << dex_pc_offset;
-  }
-
-  // TODO Might be worth it to post ExceptionCatch event.
-  void ExceptionHandled(Thread* thread ATTRIBUTE_UNUSED,
-                        Handle<mirror::Throwable> throwable ATTRIBUTE_UNUSED) override {
-    LOG(ERROR) << "Unexpected exception handled event in debugger";
-  }
-
-  // TODO Might be worth it to implement this.
-  void WatchedFramePop(Thread* thread ATTRIBUTE_UNUSED,
-                       const ShadowFrame& frame ATTRIBUTE_UNUSED) override {
-    LOG(ERROR) << "Unexpected WatchedFramePop event in debugger";
-  }
-
- private:
-  static bool IsReturn(ArtMethod* method, uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) {
-    return method->DexInstructions().InstructionAt(dex_pc).IsReturn();
-  }
-
-  static bool IsListeningToDexPcMoved() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return IsListeningTo(instrumentation::Instrumentation::kDexPcMoved);
-  }
-
-  static bool IsListeningToMethodExit() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return IsListeningTo(instrumentation::Instrumentation::kMethodExited);
-  }
-
-  static bool IsListeningTo(instrumentation::Instrumentation::InstrumentationEvent event)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    return (Dbg::GetInstrumentationEvents() & event) != 0;
-  }
-
-  DISALLOW_COPY_AND_ASSIGN(DebugInstrumentationListener);
-} gDebugInstrumentationListener;
-
 // JDWP is allowed unless the Zygote forbids it.
 static bool gJdwpAllowed = true;
 
-// Was there a -Xrunjdwp or -agentlib:jdwp= argument on the command line?
-static bool gJdwpConfigured = false;
-
-// JDWP options for debugging. Only valid if IsJdwpConfigured() is true.
-static JDWP::JdwpOptions gJdwpOptions;
-
-// Runtime JDWP state.
-static JDWP::JdwpState* gJdwpState = nullptr;
-static bool gDebuggerConnected;  // debugger or DDMS is connected.
-
 static bool gDdmThreadNotification = false;
 
 // DDMS GC-related settings.
@@ -315,287 +108,7 @@
 static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER;
 static Dbg::HpsgWhat gDdmNhsgWhat;
 
-bool Dbg::gDebuggerActive = false;
-bool Dbg::gDisposed = false;
-ObjectRegistry* Dbg::gRegistry = nullptr;
-DebuggerActiveMethodInspectionCallback Dbg::gDebugActiveCallback;
-DebuggerDdmCallback Dbg::gDebugDdmCallback;
-InternalDebuggerControlCallback Dbg::gDebuggerControlCallback;
-
-// Deoptimization support.
-std::vector<DeoptimizationRequest> Dbg::deoptimization_requests_;
-size_t Dbg::full_deoptimization_event_count_ = 0;
-
-// Instrumentation event reference counters.
-size_t Dbg::dex_pc_change_event_ref_count_ = 0;
-size_t Dbg::method_enter_event_ref_count_ = 0;
-size_t Dbg::method_exit_event_ref_count_ = 0;
-size_t Dbg::field_read_event_ref_count_ = 0;
-size_t Dbg::field_write_event_ref_count_ = 0;
-size_t Dbg::exception_catch_event_ref_count_ = 0;
-uint32_t Dbg::instrumentation_events_ = 0;
-
 Dbg::DbgThreadLifecycleCallback Dbg::thread_lifecycle_callback_;
-Dbg::DbgClassLoadCallback Dbg::class_load_callback_;
-
-void DebuggerDdmCallback::DdmPublishChunk(uint32_t type, const ArrayRef<const uint8_t>& data) {
-  if (gJdwpState == nullptr) {
-    VLOG(jdwp) << "Debugger thread not active, ignoring DDM send: " << type;
-  } else {
-    iovec vec[1];
-    vec[0].iov_base = reinterpret_cast<void*>(const_cast<uint8_t*>(data.data()));
-    vec[0].iov_len = data.size();
-    gJdwpState->DdmSendChunkV(type, vec, 1);
-  }
-}
-
-bool DebuggerActiveMethodInspectionCallback::IsMethodBeingInspected(ArtMethod* m ATTRIBUTE_UNUSED) {
-  return Dbg::IsDebuggerActive();
-}
-
-bool DebuggerActiveMethodInspectionCallback::IsMethodSafeToJit(ArtMethod* m) {
-  return !Dbg::MethodHasAnyBreakpoints(m);
-}
-
-bool DebuggerActiveMethodInspectionCallback::MethodNeedsDebugVersion(
-    ArtMethod* m ATTRIBUTE_UNUSED) {
-  return Dbg::IsDebuggerActive();
-}
-
-void InternalDebuggerControlCallback::StartDebugger() {
-  // Release the mutator lock.
-  ScopedThreadStateChange stsc(art::Thread::Current(), kNative);
-  Dbg::StartJdwp();
-}
-
-void InternalDebuggerControlCallback::StopDebugger() {
-  Dbg::StopJdwp();
-}
-
-bool InternalDebuggerControlCallback::IsDebuggerConfigured() {
-  return Dbg::IsJdwpConfigured();
-}
-
-// Breakpoints.
-static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
-
-void DebugInvokeReq::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
-  receiver.VisitRootIfNonNull(visitor, root_info);  // null for static method call.
-  klass.VisitRoot(visitor, root_info);
-}
-
-void SingleStepControl::AddDexPc(uint32_t dex_pc) {
-  dex_pcs_.insert(dex_pc);
-}
-
-bool SingleStepControl::ContainsDexPc(uint32_t dex_pc) const {
-  return dex_pcs_.find(dex_pc) == dex_pcs_.end();
-}
-
-static bool IsBreakpoint(ArtMethod* m, uint32_t dex_pc)
-    REQUIRES(!Locks::breakpoint_lock_)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
-  for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
-    if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].IsInMethod(m)) {
-      VLOG(jdwp) << "Hit breakpoint #" << i << ": " << gBreakpoints[i];
-      return true;
-    }
-  }
-  return false;
-}
-
-static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thread)
-    REQUIRES(!Locks::thread_suspend_count_lock_) {
-  MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
-  // A thread may be suspended for GC; in this code, we really want to know whether
-  // there's a debugger suspension active.
-  return thread->IsSuspended() && thread->GetDebugSuspendCount() > 0;
-}
-
-static ObjPtr<mirror::Array> DecodeNonNullArray(JDWP::RefTypeId id, JDWP::JdwpError* error)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjPtr<mirror::Object> o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error);
-  if (o == nullptr) {
-    *error = JDWP::ERR_INVALID_OBJECT;
-    return nullptr;
-  }
-  if (!o->IsArrayInstance()) {
-    *error = JDWP::ERR_INVALID_ARRAY;
-    return nullptr;
-  }
-  *error = JDWP::ERR_NONE;
-  return o->AsArray();
-}
-
-static ObjPtr<mirror::Class> DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError* error)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjPtr<mirror::Object> o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error);
-  if (o == nullptr) {
-    *error = JDWP::ERR_INVALID_OBJECT;
-    return nullptr;
-  }
-  if (!o->IsClass()) {
-    *error = JDWP::ERR_INVALID_CLASS;
-    return nullptr;
-  }
-  *error = JDWP::ERR_NONE;
-  return o->AsClass();
-}
-
-static Thread* DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id,
-                            JDWP::JdwpError* error)
-    REQUIRES_SHARED(Locks::mutator_lock_)
-    REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) {
-  ObjPtr<mirror::Object> thread_peer =
-      Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_id, error);
-  if (thread_peer == nullptr) {
-    // This isn't even an object.
-    *error = JDWP::ERR_INVALID_OBJECT;
-    return nullptr;
-  }
-
-  ObjPtr<mirror::Class> java_lang_Thread =
-      soa.Decode<mirror::Class>(WellKnownClasses::java_lang_Thread);
-  if (!java_lang_Thread->IsAssignableFrom(thread_peer->GetClass())) {
-    // This isn't a thread.
-    *error = JDWP::ERR_INVALID_THREAD;
-    return nullptr;
-  }
-
-  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
-  Thread* thread = Thread::FromManagedThread(soa, thread_peer);
-  // If thread is null then this a java.lang.Thread without a Thread*. Must be a un-started or a
-  // zombie.
-  *error = (thread == nullptr) ? JDWP::ERR_THREAD_NOT_ALIVE : JDWP::ERR_NONE;
-  return thread;
-}
-
-static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) {
-  // JDWP deliberately uses the descriptor characters' ASCII values for its enum.
-  // Note that by "basic" we mean that we don't get more specific than JT_OBJECT.
-  return static_cast<JDWP::JdwpTag>(descriptor[0]);
-}
-
-static JDWP::JdwpTag BasicTagFromClass(ObjPtr<mirror::Class> klass)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  std::string temp;
-  const char* descriptor = klass->GetDescriptor(&temp);
-  return BasicTagFromDescriptor(descriptor);
-}
-
-static JDWP::JdwpTag TagFromClass(const ScopedObjectAccessUnchecked& soa, mirror::Class* c)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  CHECK(c != nullptr);
-  if (c->IsArrayClass()) {
-    return JDWP::JT_ARRAY;
-  }
-  if (c->IsStringClass()) {
-    return JDWP::JT_STRING;
-  }
-  if (c->IsClassClass()) {
-    return JDWP::JT_CLASS_OBJECT;
-  }
-  {
-    ObjPtr<mirror::Class> thread_class =
-        soa.Decode<mirror::Class>(WellKnownClasses::java_lang_Thread);
-    if (thread_class->IsAssignableFrom(c)) {
-      return JDWP::JT_THREAD;
-    }
-  }
-  {
-    ObjPtr<mirror::Class> thread_group_class =
-        soa.Decode<mirror::Class>(WellKnownClasses::java_lang_ThreadGroup);
-    if (thread_group_class->IsAssignableFrom(c)) {
-      return JDWP::JT_THREAD_GROUP;
-    }
-  }
-  {
-    ObjPtr<mirror::Class> class_loader_class =
-        soa.Decode<mirror::Class>(WellKnownClasses::java_lang_ClassLoader);
-    if (class_loader_class->IsAssignableFrom(c)) {
-      return JDWP::JT_CLASS_LOADER;
-    }
-  }
-  return JDWP::JT_OBJECT;
-}
-
-/*
- * Objects declared to hold Object might actually hold a more specific
- * type.  The debugger may take a special interest in these (e.g. it
- * wants to display the contents of Strings), so we want to return an
- * appropriate tag.
- *
- * Null objects are tagged JT_OBJECT.
- */
-JDWP::JdwpTag Dbg::TagFromObject(const ScopedObjectAccessUnchecked& soa, ObjPtr<mirror::Object> o) {
-  return (o == nullptr) ? JDWP::JT_OBJECT : TagFromClass(soa, o->GetClass());
-}
-
-static bool IsPrimitiveTag(JDWP::JdwpTag tag) {
-  switch (tag) {
-  case JDWP::JT_BOOLEAN:
-  case JDWP::JT_BYTE:
-  case JDWP::JT_CHAR:
-  case JDWP::JT_FLOAT:
-  case JDWP::JT_DOUBLE:
-  case JDWP::JT_INT:
-  case JDWP::JT_LONG:
-  case JDWP::JT_SHORT:
-  case JDWP::JT_VOID:
-    return true;
-  default:
-    return false;
-  }
-}
-
-void Dbg::StartJdwp() {
-  if (!gJdwpAllowed || !IsJdwpConfigured()) {
-    // No JDWP for you!
-    return;
-  }
-
-  CHECK(gRegistry == nullptr);
-  gRegistry = new ObjectRegistry;
-
-  {
-    // Setup the Ddm listener
-    ScopedObjectAccess soa(Thread::Current());
-    Runtime::Current()->GetRuntimeCallbacks()->AddDdmCallback(&gDebugDdmCallback);
-  }
-
-  // Init JDWP if the debugger is enabled. This may connect out to a
-  // debugger, passively listen for a debugger, or block waiting for a
-  // debugger.
-  gJdwpState = JDWP::JdwpState::Create(&gJdwpOptions);
-  if (gJdwpState == nullptr) {
-    // We probably failed because some other process has the port already, which means that
-    // if we don't abort the user is likely to think they're talking to us when they're actually
-    // talking to that other process.
-    LOG(FATAL) << "Debugger thread failed to initialize";
-  }
-
-  // If a debugger has already attached, send the "welcome" message.
-  // This may cause us to suspend all threads.
-  if (gJdwpState->IsActive()) {
-    ScopedObjectAccess soa(Thread::Current());
-    gJdwpState->PostVMStart();
-  }
-}
-
-void Dbg::StopJdwp() {
-  // Post VM_DEATH event before the JDWP connection is closed (either by the JDWP thread or the
-  // destruction of gJdwpState).
-  if (gJdwpState != nullptr && gJdwpState->IsActive()) {
-    gJdwpState->PostVMDeath();
-  }
-  // Prevent the JDWP thread from processing JDWP incoming packets after we close the connection.
-  Dispose();
-  delete gJdwpState;
-  gJdwpState = nullptr;
-  delete gRegistry;
-  gRegistry = nullptr;
-}
 
 void Dbg::GcDidFinish() {
   if (gDdmHpifWhen != HPIF_WHEN_NEVER) {
@@ -623,2967 +136,8 @@
   return gJdwpAllowed;
 }
 
-DebugInvokeReq* Dbg::GetInvokeReq() {
-  return Thread::Current()->GetInvokeReq();
-}
-
-Thread* Dbg::GetDebugThread() {
-  return (gJdwpState != nullptr) ? gJdwpState->GetDebugThread() : nullptr;
-}
-
-void Dbg::ClearWaitForEventThread() {
-  gJdwpState->ReleaseJdwpTokenForEvent();
-}
-
-void Dbg::Connected() {
-  CHECK(!gDebuggerConnected);
-  VLOG(jdwp) << "JDWP has attached";
-  gDebuggerConnected = true;
-  gDisposed = false;
-}
-
-bool Dbg::RequiresDeoptimization() {
-  // We don't need deoptimization if everything runs with interpreter after
-  // enabling -Xint mode.
-  return !Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly();
-}
-
-void Dbg::GoActive() {
-  // Enable all debugging features, including scans for breakpoints.
-  // This is a no-op if we're already active.
-  // Only called from the JDWP handler thread.
-  if (IsDebuggerActive()) {
-    return;
-  }
-
-  Thread* const self = Thread::Current();
-  {
-    // TODO: dalvik only warned if there were breakpoints left over. clear in Dbg::Disconnected?
-    ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
-    CHECK_EQ(gBreakpoints.size(), 0U);
-  }
-
-  {
-    MutexLock mu(self, *Locks::deoptimization_lock_);
-    CHECK_EQ(deoptimization_requests_.size(), 0U);
-    CHECK_EQ(full_deoptimization_event_count_, 0U);
-    CHECK_EQ(dex_pc_change_event_ref_count_, 0U);
-    CHECK_EQ(method_enter_event_ref_count_, 0U);
-    CHECK_EQ(method_exit_event_ref_count_, 0U);
-    CHECK_EQ(field_read_event_ref_count_, 0U);
-    CHECK_EQ(field_write_event_ref_count_, 0U);
-    CHECK_EQ(exception_catch_event_ref_count_, 0U);
-  }
-
-  Runtime* runtime = Runtime::Current();
-  // Best effort deoptimization if the runtime is non-Java debuggable. This happens when
-  // ro.debuggable is set, but the application is not debuggable, or when a standalone
-  // dalvikvm invocation is not passed the debuggable option (-Xcompiler-option --debuggable).
-  //
-  // The performance cost of this is non-negligible during native-debugging due to the
-  // forced JIT, so we keep the AOT code in that case in exchange for limited native debugging.
-  ScopedSuspendAll ssa(__FUNCTION__);
-  if (!runtime->IsJavaDebuggable() &&
-      !runtime->GetInstrumentation()->IsForcedInterpretOnly() &&
-      !runtime->IsNativeDebuggable()) {
-    runtime->DeoptimizeBootImage();
-  }
-
-  if (RequiresDeoptimization()) {
-    runtime->GetInstrumentation()->EnableDeoptimization();
-  }
-  instrumentation_events_ = 0;
-  Runtime::DoAndMaybeSwitchInterpreter([=](){ gDebuggerActive = true; });
-  Runtime::Current()->GetRuntimeCallbacks()->AddMethodInspectionCallback(&gDebugActiveCallback);
-  LOG(INFO) << "Debugger is active";
-}
-
-void Dbg::Disconnected() {
-  CHECK(gDebuggerConnected);
-
-  LOG(INFO) << "Debugger is no longer active";
-
-  // Suspend all threads and exclusively acquire the mutator lock. Remove the debugger as a listener
-  // and clear the object registry.
-  Runtime* runtime = Runtime::Current();
-  Thread* self = Thread::Current();
-  {
-    // Required for DisableDeoptimization.
-    gc::ScopedGCCriticalSection gcs(self,
-                                    gc::kGcCauseInstrumentation,
-                                    gc::kCollectorTypeInstrumentation);
-    ScopedSuspendAll ssa(__FUNCTION__);
-    // Debugger may not be active at this point.
-    if (IsDebuggerActive()) {
-      {
-        // Since we're going to disable deoptimization, we clear the deoptimization requests queue.
-        // This prevents us from having any pending deoptimization request when the debugger attaches
-        // to us again while no event has been requested yet.
-        MutexLock mu(self, *Locks::deoptimization_lock_);
-        deoptimization_requests_.clear();
-        full_deoptimization_event_count_ = 0U;
-      }
-      if (instrumentation_events_ != 0) {
-        runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener,
-                                                      instrumentation_events_);
-        instrumentation_events_ = 0;
-      }
-      if (RequiresDeoptimization()) {
-        runtime->GetInstrumentation()->DisableDeoptimization(kDbgInstrumentationKey);
-      }
-      Runtime::DoAndMaybeSwitchInterpreter([=](){ gDebuggerActive = false; });
-      Runtime::Current()->GetRuntimeCallbacks()->RemoveMethodInspectionCallback(
-          &gDebugActiveCallback);
-    }
-  }
-
-  {
-    ScopedObjectAccess soa(self);
-    gRegistry->Clear();
-  }
-
-  gDebuggerConnected = false;
-}
-
-void Dbg::ConfigureJdwp(const JDWP::JdwpOptions& jdwp_options) {
-  CHECK_NE(jdwp_options.transport, JDWP::kJdwpTransportUnknown);
-  gJdwpOptions = jdwp_options;
-  gJdwpConfigured = true;
-  Runtime::Current()->GetRuntimeCallbacks()->AddDebuggerControlCallback(&gDebuggerControlCallback);
-}
-
-bool Dbg::IsJdwpConfigured() {
-  return gJdwpConfigured;
-}
-
-int64_t Dbg::LastDebuggerActivity() {
-  return gJdwpState->LastDebuggerActivity();
-}
-
-void Dbg::UndoDebuggerSuspensions() {
-  Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
-}
-
-std::string Dbg::GetClassName(JDWP::RefTypeId class_id) {
-  JDWP::JdwpError error;
-  ObjPtr<mirror::Object> o = gRegistry->Get<mirror::Object*>(class_id, &error);
-  if (o == nullptr) {
-    if (error == JDWP::ERR_NONE) {
-      return "null";
-    } else {
-      return StringPrintf("invalid object %p", reinterpret_cast<void*>(class_id));
-    }
-  }
-  if (!o->IsClass()) {
-    return StringPrintf("non-class %p", o.Ptr());  // This is only used for debugging output anyway.
-  }
-  return GetClassName(o->AsClass());
-}
-
-std::string Dbg::GetClassName(ObjPtr<mirror::Class> klass) {
-  if (klass == nullptr) {
-    return "null";
-  }
-  std::string temp;
-  return DescriptorToName(klass->GetDescriptor(&temp));
-}
-
-JDWP::JdwpError Dbg::GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId* class_object_id) {
-  JDWP::JdwpError status;
-  ObjPtr<mirror::Class> c = DecodeClass(id, &status);
-  if (c == nullptr) {
-    *class_object_id = 0;
-    return status;
-  }
-  *class_object_id = gRegistry->Add(c);
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId* superclass_id) {
-  JDWP::JdwpError status;
-  ObjPtr<mirror::Class> c = DecodeClass(id, &status);
-  if (c == nullptr) {
-    *superclass_id = 0;
-    return status;
-  }
-  if (c->IsInterface()) {
-    // http://code.google.com/p/android/issues/detail?id=20856
-    *superclass_id = 0;
-  } else {
-    *superclass_id = gRegistry->Add(c->GetSuperClass());
-  }
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
-  JDWP::JdwpError error;
-  ObjPtr<mirror::Class> c = DecodeClass(id, &error);
-  if (c == nullptr) {
-    return error;
-  }
-  expandBufAddObjectId(pReply, gRegistry->Add(c->GetClassLoader()));
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
-  JDWP::JdwpError error;
-  ObjPtr<mirror::Class> c = DecodeClass(id, &error);
-  if (c == nullptr) {
-    return error;
-  }
-
-  uint32_t access_flags = c->GetAccessFlags() & kAccJavaFlagsMask;
-
-  // Set ACC_SUPER. Dex files don't contain this flag but only classes are supposed to have it set,
-  // not interfaces.
-  // Class.getModifiers doesn't return it, but JDWP does, so we set it here.
-  if ((access_flags & kAccInterface) == 0) {
-    access_flags |= kAccSuper;
-  }
-
-  expandBufAdd4BE(pReply, access_flags);
-
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply) {
-  JDWP::JdwpError error;
-  Thread* self = Thread::Current();
-  StackHandleScope<1u> hs(self);
-  Handle<mirror::Object> o = hs.NewHandle(gRegistry->Get<mirror::Object*>(object_id, &error));
-  if (o == nullptr) {
-    return JDWP::ERR_INVALID_OBJECT;
-  }
-
-  // Ensure all threads are suspended while we read objects' lock words.
-  CHECK_EQ(self->GetState(), kRunnable);
-
-  MonitorInfo monitor_info;
-  {
-    ScopedThreadSuspension sts(self, kSuspended);
-    ScopedSuspendAll ssa(__FUNCTION__);
-    monitor_info = MonitorInfo(o.Get());
-  }
-  if (monitor_info.owner_ != nullptr) {
-    expandBufAddObjectId(reply, gRegistry->Add(monitor_info.owner_->GetPeerFromOtherThread()));
-  } else {
-    expandBufAddObjectId(reply, gRegistry->Add(nullptr));
-  }
-  expandBufAdd4BE(reply, monitor_info.entry_count_);
-  expandBufAdd4BE(reply, monitor_info.waiters_.size());
-  for (size_t i = 0; i < monitor_info.waiters_.size(); ++i) {
-    expandBufAddObjectId(reply, gRegistry->Add(monitor_info.waiters_[i]->GetPeerFromOtherThread()));
-  }
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::GetOwnedMonitors(JDWP::ObjectId thread_id,
-                                      std::vector<JDWP::ObjectId>* monitors,
-                                      std::vector<uint32_t>* stack_depths) {
-  struct OwnedMonitorVisitor : public StackVisitor {
-    OwnedMonitorVisitor(Thread* thread, Context* context,
-                        std::vector<JDWP::ObjectId>* monitor_vector,
-                        std::vector<uint32_t>* stack_depth_vector)
-        REQUIRES_SHARED(Locks::mutator_lock_)
-      : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
-        current_stack_depth(0),
-        monitors(monitor_vector),
-        stack_depths(stack_depth_vector) {}
-
-    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
-    // annotalysis.
-    bool VisitFrame() override NO_THREAD_SAFETY_ANALYSIS {
-      if (!GetMethod()->IsRuntimeMethod()) {
-        Monitor::VisitLocks(this, AppendOwnedMonitors, this);
-        ++current_stack_depth;
-      }
-      return true;
-    }
-
-    static void AppendOwnedMonitors(ObjPtr<mirror::Object> owned_monitor, void* arg)
-        REQUIRES_SHARED(Locks::mutator_lock_) {
-      OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg);
-      visitor->monitors->push_back(gRegistry->Add(owned_monitor));
-      visitor->stack_depths->push_back(visitor->current_stack_depth);
-    }
-
-    size_t current_stack_depth;
-    std::vector<JDWP::ObjectId>* const monitors;
-    std::vector<uint32_t>* const stack_depths;
-  };
-
-  ScopedObjectAccessUnchecked soa(Thread::Current());
-  JDWP::JdwpError error;
-  Thread* thread = DecodeThread(soa, thread_id, &error);
-  if (thread == nullptr) {
-    return error;
-  }
-  if (!IsSuspendedForDebugger(soa, thread)) {
-    return JDWP::ERR_THREAD_NOT_SUSPENDED;
-  }
-  std::unique_ptr<Context> context(Context::Create());
-  OwnedMonitorVisitor visitor(thread, context.get(), monitors, stack_depths);
-  visitor.WalkStack();
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id,
-                                         JDWP::ObjectId* contended_monitor) {
-  ScopedObjectAccessUnchecked soa(Thread::Current());
-  *contended_monitor = 0;
-  JDWP::JdwpError error;
-  Thread* thread = DecodeThread(soa, thread_id, &error);
-  if (thread == nullptr) {
-    return error;
-  }
-  if (!IsSuspendedForDebugger(soa, thread)) {
-    return JDWP::ERR_THREAD_NOT_SUSPENDED;
-  }
-  ObjPtr<mirror::Object> contended_monitor_obj = Monitor::GetContendedMonitor(thread);
-  // Add() requires the thread_list_lock_ not held to avoid the lock
-  // level violation.
-  *contended_monitor = gRegistry->Add(contended_monitor_obj);
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
-                                       std::vector<uint64_t>* counts) {
-  gc::Heap* heap = Runtime::Current()->GetHeap();
-  heap->CollectGarbage(/* clear_soft_references= */ false, gc::GcCause::kGcCauseDebugger);
-  VariableSizedHandleScope hs(Thread::Current());
-  std::vector<Handle<mirror::Class>> classes;
-  counts->clear();
-  for (size_t i = 0; i < class_ids.size(); ++i) {
-    JDWP::JdwpError error;
-    ObjPtr<mirror::Class> c = DecodeClass(class_ids[i], &error);
-    if (c == nullptr) {
-      return error;
-    }
-    classes.push_back(hs.NewHandle(c));
-    counts->push_back(0);
-  }
-  heap->CountInstances(classes, false, &(*counts)[0]);
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count,
-                                  std::vector<JDWP::ObjectId>* instances) {
-  gc::Heap* heap = Runtime::Current()->GetHeap();
-  // We only want reachable instances, so do a GC.
-  heap->CollectGarbage(/* clear_soft_references= */ false, gc::GcCause::kGcCauseDebugger);
-  JDWP::JdwpError error;
-  ObjPtr<mirror::Class> c = DecodeClass(class_id, &error);
-  if (c == nullptr) {
-    return error;
-  }
-  VariableSizedHandleScope hs(Thread::Current());
-  std::vector<Handle<mirror::Object>> raw_instances;
-  Runtime::Current()->GetHeap()->GetInstances(hs,
-                                              hs.NewHandle(c),
-                                              /* use_is_assignable_from= */ false,
-                                              max_count,
-                                              raw_instances);
-  for (size_t i = 0; i < raw_instances.size(); ++i) {
-    instances->push_back(gRegistry->Add(raw_instances[i].Get()));
-  }
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
-                                         std::vector<JDWP::ObjectId>* referring_objects) {
-  gc::Heap* heap = Runtime::Current()->GetHeap();
-  heap->CollectGarbage(/* clear_soft_references= */ false, gc::GcCause::kGcCauseDebugger);
-  JDWP::JdwpError error;
-  ObjPtr<mirror::Object> o = gRegistry->Get<mirror::Object*>(object_id, &error);
-  if (o == nullptr) {
-    return JDWP::ERR_INVALID_OBJECT;
-  }
-  VariableSizedHandleScope hs(Thread::Current());
-  std::vector<Handle<mirror::Object>> raw_instances;
-  heap->GetReferringObjects(hs, hs.NewHandle(o), max_count, raw_instances);
-  for (size_t i = 0; i < raw_instances.size(); ++i) {
-    referring_objects->push_back(gRegistry->Add(raw_instances[i].Get()));
-  }
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::DisableCollection(JDWP::ObjectId object_id) {
-  JDWP::JdwpError error;
-  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
-  if (o == nullptr) {
-    return JDWP::ERR_INVALID_OBJECT;
-  }
-  gRegistry->DisableCollection(object_id);
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::EnableCollection(JDWP::ObjectId object_id) {
-  JDWP::JdwpError error;
-  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
-  // Unlike DisableCollection, JDWP specs do not state an invalid object causes an error. The RI
-  // also ignores these cases and never return an error. However it's not obvious why this command
-  // should behave differently from DisableCollection and IsCollected commands. So let's be more
-  // strict and return an error if this happens.
-  if (o == nullptr) {
-    return JDWP::ERR_INVALID_OBJECT;
-  }
-  gRegistry->EnableCollection(object_id);
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::IsCollected(JDWP::ObjectId object_id, bool* is_collected) {
-  *is_collected = true;
-  if (object_id == 0) {
-    // Null object id is invalid.
-    return JDWP::ERR_INVALID_OBJECT;
-  }
-  // JDWP specs state an INVALID_OBJECT error is returned if the object ID is not valid. However
-  // the RI seems to ignore this and assume object has been collected.
-  JDWP::JdwpError error;
-  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
-  if (o != nullptr) {
-    *is_collected = gRegistry->IsCollected(object_id);
-  }
-  return JDWP::ERR_NONE;
-}
-
-void Dbg::DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count) {
-  gRegistry->DisposeObject(object_id, reference_count);
-}
-
-JDWP::JdwpTypeTag Dbg::GetTypeTag(ObjPtr<mirror::Class> klass) {
-  DCHECK(klass != nullptr);
-  if (klass->IsArrayClass()) {
-    return JDWP::TT_ARRAY;
-  } else if (klass->IsInterface()) {
-    return JDWP::TT_INTERFACE;
-  } else {
-    return JDWP::TT_CLASS;
-  }
-}
-
-JDWP::JdwpError Dbg::GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
-  JDWP::JdwpError error;
-  ObjPtr<mirror::Class> c = DecodeClass(class_id, &error);
-  if (c == nullptr) {
-    return error;
-  }
-
-  JDWP::JdwpTypeTag type_tag = GetTypeTag(c);
-  expandBufAdd1(pReply, type_tag);
-  expandBufAddRefTypeId(pReply, class_id);
-  return JDWP::ERR_NONE;
-}
-
-// Get the complete list of reference classes (i.e. all classes except
-// the primitive types).
-// Returns a newly-allocated buffer full of RefTypeId values.
-class ClassListCreator : public ClassVisitor {
- public:
-  explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes) : classes_(classes) {}
-
-  bool operator()(ObjPtr<mirror::Class> c) override REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (!c->IsPrimitive()) {
-      classes_->push_back(Dbg::GetObjectRegistry()->AddRefType(c));
-    }
-    return true;
-  }
-
- private:
-  std::vector<JDWP::RefTypeId>* const classes_;
-};
-
-void Dbg::GetClassList(std::vector<JDWP::RefTypeId>* classes) {
-  ClassListCreator clc(classes);
-  Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(&clc);
-}
-
-JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag,
-                                  uint32_t* pStatus, std::string* pDescriptor) {
-  JDWP::JdwpError error;
-  ObjPtr<mirror::Class> c = DecodeClass(class_id, &error);
-  if (c == nullptr) {
-    return error;
-  }
-
-  if (c->IsArrayClass()) {
-    *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
-    *pTypeTag = JDWP::TT_ARRAY;
-  } else {
-    if (c->IsErroneous()) {
-      *pStatus = JDWP::CS_ERROR;
-    } else {
-      *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED | JDWP::CS_INITIALIZED;
-    }
-    *pTypeTag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS;
-  }
-
-  if (pDescriptor != nullptr) {
-    std::string temp;
-    *pDescriptor = c->GetDescriptor(&temp);
-  }
-  return JDWP::ERR_NONE;
-}
-
-void Dbg::FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>* ids) {
-  std::vector<ObjPtr<mirror::Class>> classes;
-  Runtime::Current()->GetClassLinker()->LookupClasses(descriptor, classes);
-  ids->clear();
-  for (ObjPtr<mirror::Class> c : classes) {
-    ids->push_back(gRegistry->Add(c));
-  }
-}
-
-JDWP::JdwpError Dbg::GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply) {
-  JDWP::JdwpError error;
-  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
-  if (o == nullptr) {
-    return JDWP::ERR_INVALID_OBJECT;
-  }
-
-  JDWP::JdwpTypeTag type_tag = GetTypeTag(o->GetClass());
-  JDWP::RefTypeId type_id = gRegistry->AddRefType(o->GetClass());
-
-  expandBufAdd1(pReply, type_tag);
-  expandBufAddRefTypeId(pReply, type_id);
-
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::GetSignature(JDWP::RefTypeId class_id, std::string* signature) {
-  JDWP::JdwpError error;
-  ObjPtr<mirror::Class> c = DecodeClass(class_id, &error);
-  if (c == nullptr) {
-    return error;
-  }
-  std::string temp;
-  *signature = c->GetDescriptor(&temp);
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::GetSourceDebugExtension(JDWP::RefTypeId class_id,
-                                             std::string* extension_data) {
-  JDWP::JdwpError error;
-  ObjPtr<mirror::Class> c = DecodeClass(class_id, &error);
-  if (c == nullptr) {
-    return error;
-  }
-  StackHandleScope<1> hs(Thread::Current());
-  Handle<mirror::Class> klass(hs.NewHandle(c));
-  const char* data = annotations::GetSourceDebugExtension(klass);
-  if (data == nullptr) {
-    return JDWP::ERR_ABSENT_INFORMATION;
-  }
-  *extension_data = data;
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::GetSourceFile(JDWP::RefTypeId class_id, std::string* result) {
-  JDWP::JdwpError error;
-  ObjPtr<mirror::Class> c = DecodeClass(class_id, &error);
-  if (c == nullptr) {
-    return error;
-  }
-  const char* source_file = c->GetSourceFile();
-  if (source_file == nullptr) {
-    return JDWP::ERR_ABSENT_INFORMATION;
-  }
-  *result = source_file;
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::GetObjectTag(JDWP::ObjectId object_id, uint8_t* tag) {
-  ScopedObjectAccessUnchecked soa(Thread::Current());
-  JDWP::JdwpError error;
-  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
-  if (error != JDWP::ERR_NONE) {
-    *tag = JDWP::JT_VOID;
-    return error;
-  }
-  *tag = TagFromObject(soa, o);
-  return JDWP::ERR_NONE;
-}
-
-size_t Dbg::GetTagWidth(JDWP::JdwpTag tag) {
-  switch (tag) {
-  case JDWP::JT_VOID:
-    return 0;
-  case JDWP::JT_BYTE:
-  case JDWP::JT_BOOLEAN:
-    return 1;
-  case JDWP::JT_CHAR:
-  case JDWP::JT_SHORT:
-    return 2;
-  case JDWP::JT_FLOAT:
-  case JDWP::JT_INT:
-    return 4;
-  case JDWP::JT_ARRAY:
-  case JDWP::JT_OBJECT:
-  case JDWP::JT_STRING:
-  case JDWP::JT_THREAD:
-  case JDWP::JT_THREAD_GROUP:
-  case JDWP::JT_CLASS_LOADER:
-  case JDWP::JT_CLASS_OBJECT:
-    return sizeof(JDWP::ObjectId);
-  case JDWP::JT_DOUBLE:
-  case JDWP::JT_LONG:
-    return 8;
-  default:
-    LOG(FATAL) << "Unknown tag " << tag;
-    UNREACHABLE();
-  }
-}
-
-JDWP::JdwpError Dbg::GetArrayLength(JDWP::ObjectId array_id, int32_t* length) {
-  JDWP::JdwpError error;
-  ObjPtr<mirror::Array> a = DecodeNonNullArray(array_id, &error);
-  if (a == nullptr) {
-    return error;
-  }
-  *length = a->GetLength();
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId array_id,
-                                 int offset,
-                                 int count,
-                                 JDWP::ExpandBuf* pReply) {
-  JDWP::JdwpError error;
-  ObjPtr<mirror::Array> a = DecodeNonNullArray(array_id, &error);
-  if (a == nullptr) {
-    return error;
-  }
-
-  if (offset < 0 || count < 0 || offset > a->GetLength() || a->GetLength() - offset < count) {
-    LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
-    return JDWP::ERR_INVALID_LENGTH;
-  }
-  JDWP::JdwpTag element_tag = BasicTagFromClass(a->GetClass()->GetComponentType());
-  expandBufAdd1(pReply, element_tag);
-  expandBufAdd4BE(pReply, count);
-
-  if (IsPrimitiveTag(element_tag)) {
-    size_t width = GetTagWidth(element_tag);
-    uint8_t* dst = expandBufAddSpace(pReply, count * width);
-    if (width == 8) {
-      const uint64_t* src8 = reinterpret_cast<uint64_t*>(a->GetRawData(sizeof(uint64_t), 0));
-      for (int i = 0; i < count; ++i) JDWP::Write8BE(&dst, src8[offset + i]);
-    } else if (width == 4) {
-      const uint32_t* src4 = reinterpret_cast<uint32_t*>(a->GetRawData(sizeof(uint32_t), 0));
-      for (int i = 0; i < count; ++i) JDWP::Write4BE(&dst, src4[offset + i]);
-    } else if (width == 2) {
-      const uint16_t* src2 = reinterpret_cast<uint16_t*>(a->GetRawData(sizeof(uint16_t), 0));
-      for (int i = 0; i < count; ++i) JDWP::Write2BE(&dst, src2[offset + i]);
-    } else {
-      const uint8_t* src = reinterpret_cast<uint8_t*>(a->GetRawData(sizeof(uint8_t), 0));
-      memcpy(dst, &src[offset * width], count * width);
-    }
-  } else {
-    ScopedObjectAccessUnchecked soa(Thread::Current());
-    ObjPtr<mirror::ObjectArray<mirror::Object>> oa = a->AsObjectArray<mirror::Object>();
-    for (int i = 0; i < count; ++i) {
-      ObjPtr<mirror::Object> element = oa->Get(offset + i);
-      JDWP::JdwpTag specific_tag = (element != nullptr) ? TagFromObject(soa, element)
-                                                        : element_tag;
-      expandBufAdd1(pReply, specific_tag);
-      expandBufAddObjectId(pReply, gRegistry->Add(element));
-    }
-  }
-
-  return JDWP::ERR_NONE;
-}
-
-template <typename T>
-static void CopyArrayData(ObjPtr<mirror::Array> a, JDWP::Request* src, int offset, int count)
-    NO_THREAD_SAFETY_ANALYSIS {
-  // TODO: fix when annotalysis correctly handles non-member functions.
-  DCHECK(a->GetClass()->IsPrimitiveArray());
-
-  T* dst = reinterpret_cast<T*>(a->GetRawData(sizeof(T), offset));
-  for (int i = 0; i < count; ++i) {
-    *dst++ = src->ReadValue(sizeof(T));
-  }
-}
-
-JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId array_id, int offset, int count,
-                                      JDWP::Request* request) {
-  JDWP::JdwpError error;
-  ObjPtr<mirror::Array> dst = DecodeNonNullArray(array_id, &error);
-  if (dst == nullptr) {
-    return error;
-  }
-
-  if (offset < 0 || count < 0 || offset > dst->GetLength() || dst->GetLength() - offset < count) {
-    LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
-    return JDWP::ERR_INVALID_LENGTH;
-  }
-  JDWP::JdwpTag element_tag = BasicTagFromClass(dst->GetClass()->GetComponentType());
-
-  if (IsPrimitiveTag(element_tag)) {
-    size_t width = GetTagWidth(element_tag);
-    if (width == 8) {
-      CopyArrayData<uint64_t>(dst, request, offset, count);
-    } else if (width == 4) {
-      CopyArrayData<uint32_t>(dst, request, offset, count);
-    } else if (width == 2) {
-      CopyArrayData<uint16_t>(dst, request, offset, count);
-    } else {
-      CopyArrayData<uint8_t>(dst, request, offset, count);
-    }
-  } else {
-    ObjPtr<mirror::ObjectArray<mirror::Object>> oa = dst->AsObjectArray<mirror::Object>();
-    for (int i = 0; i < count; ++i) {
-      JDWP::ObjectId id = request->ReadObjectId();
-      ObjPtr<mirror::Object> o = gRegistry->Get<mirror::Object*>(id, &error);
-      if (error != JDWP::ERR_NONE) {
-        return error;
-      }
-      // Check if the object's type is compatible with the array's type.
-      if (o != nullptr && !o->InstanceOf(oa->GetClass()->GetComponentType())) {
-        return JDWP::ERR_TYPE_MISMATCH;
-      }
-      oa->Set<false>(offset + i, o);
-    }
-  }
-
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::CreateString(const std::string& str, JDWP::ObjectId* new_string_id) {
-  Thread* self = Thread::Current();
-  ObjPtr<mirror::String> new_string = mirror::String::AllocFromModifiedUtf8(self, str.c_str());
-  if (new_string == nullptr) {
-    DCHECK(self->IsExceptionPending());
-    self->ClearException();
-    LOG(ERROR) << "Could not allocate string";
-    *new_string_id = 0;
-    return JDWP::ERR_OUT_OF_MEMORY;
-  }
-  *new_string_id = gRegistry->Add(new_string);
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId* new_object_id) {
-  JDWP::JdwpError error;
-  ObjPtr<mirror::Class> c = DecodeClass(class_id, &error);
-  if (c == nullptr) {
-    *new_object_id = 0;
-    return error;
-  }
-  Thread* self = Thread::Current();
-  ObjPtr<mirror::Object> new_object;
-  if (c->IsStringClass()) {
-    // Special case for java.lang.String.
-    gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
-    new_object = mirror::String::AllocEmptyString<true>(self, allocator_type);
-  } else {
-    new_object = c->AllocObject(self);
-  }
-  if (new_object == nullptr) {
-    DCHECK(self->IsExceptionPending());
-    self->ClearException();
-    LOG(ERROR) << "Could not allocate object of type " << mirror::Class::PrettyDescriptor(c);
-    *new_object_id = 0;
-    return JDWP::ERR_OUT_OF_MEMORY;
-  }
-  *new_object_id = gRegistry->Add(new_object);
-  return JDWP::ERR_NONE;
-}
-
-/*
- * Used by Eclipse's "Display" view to evaluate "new byte[5]" to get "(byte[]) [0, 0, 0, 0, 0]".
- */
-JDWP::JdwpError Dbg::CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length,
-                                       JDWP::ObjectId* new_array_id) {
-  JDWP::JdwpError error;
-  ObjPtr<mirror::Class> c = DecodeClass(array_class_id, &error);
-  if (c == nullptr) {
-    *new_array_id = 0;
-    return error;
-  }
-  Thread* self = Thread::Current();
-  gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
-  ObjPtr<mirror::Array> new_array =
-      mirror::Array::Alloc<true>(self, c, length, c->GetComponentSizeShift(), allocator_type);
-  if (new_array == nullptr) {
-    DCHECK(self->IsExceptionPending());
-    self->ClearException();
-    LOG(ERROR) << "Could not allocate array of type " << mirror::Class::PrettyDescriptor(c);
-    *new_array_id = 0;
-    return JDWP::ERR_OUT_OF_MEMORY;
-  }
-  *new_array_id = gRegistry->Add(new_array);
-  return JDWP::ERR_NONE;
-}
-
-JDWP::FieldId Dbg::ToFieldId(const ArtField* f) {
-  return static_cast<JDWP::FieldId>(reinterpret_cast<uintptr_t>(f));
-}
-
-static JDWP::MethodId ToMethodId(ArtMethod* m)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  return static_cast<JDWP::MethodId>(
-      reinterpret_cast<uintptr_t>(m->GetCanonicalMethod(kRuntimePointerSize)));
-}
-
-static ArtField* FromFieldId(JDWP::FieldId fid)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  return reinterpret_cast<ArtField*>(static_cast<uintptr_t>(fid));
-}
-
-static ArtMethod* FromMethodId(JDWP::MethodId mid)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(mid));
-}
-
-bool Dbg::MatchThread(JDWP::ObjectId expected_thread_id, Thread* event_thread) {
-  CHECK(event_thread != nullptr);
-  JDWP::JdwpError error;
-  mirror::Object* expected_thread_peer = gRegistry->Get<mirror::Object*>(
-      expected_thread_id, &error);
-  return expected_thread_peer == event_thread->GetPeerFromOtherThread();
-}
-
-bool Dbg::MatchLocation(const JDWP::JdwpLocation& expected_location,
-                        const JDWP::EventLocation& event_location) {
-  if (expected_location.dex_pc != event_location.dex_pc) {
-    return false;
-  }
-  ArtMethod* m = FromMethodId(expected_location.method_id);
-  return m == event_location.method;
-}
-
-bool Dbg::MatchType(ObjPtr<mirror::Class> event_class, JDWP::RefTypeId class_id) {
-  if (event_class == nullptr) {
-    return false;
-  }
-  JDWP::JdwpError error;
-  ObjPtr<mirror::Class> expected_class = DecodeClass(class_id, &error);
-  CHECK(expected_class != nullptr);
-  return expected_class->IsAssignableFrom(event_class);
-}
-
-bool Dbg::MatchField(JDWP::RefTypeId expected_type_id, JDWP::FieldId expected_field_id,
-                     ArtField* event_field) {
-  ArtField* expected_field = FromFieldId(expected_field_id);
-  if (expected_field != event_field) {
-    return false;
-  }
-  return Dbg::MatchType(event_field->GetDeclaringClass(), expected_type_id);
-}
-
-bool Dbg::MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* event_instance) {
-  JDWP::JdwpError error;
-  mirror::Object* modifier_instance = gRegistry->Get<mirror::Object*>(expected_instance_id, &error);
-  return modifier_instance == event_instance;
-}
-
-void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc) {
-  if (m == nullptr) {
-    memset(location, 0, sizeof(*location));
-  } else {
-    ObjPtr<mirror::Class> c = m->GetDeclaringClass();
-    location->type_tag = GetTypeTag(c);
-    location->class_id = gRegistry->AddRefType(c);
-    // The RI Seems to return 0 for all obsolete methods. For compatibility we shall do the same.
-    location->method_id = m->IsObsolete() ? 0 : ToMethodId(m);
-    location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint64_t>(-1) : dex_pc;
-  }
-}
-
-std::string Dbg::GetMethodName(JDWP::MethodId method_id) {
-  ArtMethod* m = FromMethodId(method_id);
-  if (m == nullptr) {
-    return "null";
-  }
-  return m->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetName();
-}
-
-bool Dbg::IsMethodObsolete(JDWP::MethodId method_id) {
-  ArtMethod* m = FromMethodId(method_id);
-  if (m == nullptr) {
-    // NB Since we return 0 as MID for obsolete methods we want to default to true here.
-    return true;
-  }
-  return m->IsObsolete();
-}
-
-std::string Dbg::GetFieldName(JDWP::FieldId field_id) {
-  ArtField* f = FromFieldId(field_id);
-  if (f == nullptr) {
-    return "null";
-  }
-  return f->GetName();
-}
-
-/*
- * Augment the access flags for synthetic methods and fields by setting
- * the (as described by the spec) "0xf0000000 bit".  Also, strip out any
- * flags not specified by the Java programming language.
- */
-static uint32_t MangleAccessFlags(uint32_t accessFlags) {
-  accessFlags &= kAccJavaFlagsMask;
-  if ((accessFlags & kAccSynthetic) != 0) {
-    accessFlags |= 0xf0000000;
-  }
-  return accessFlags;
-}
-
-/*
- * Circularly shifts registers so that arguments come first. Debuggers
- * expect slots to begin with arguments, but dex code places them at
- * the end.
- */
-static uint16_t MangleSlot(uint16_t slot, ArtMethod* m)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  CodeItemDataAccessor accessor(m->DexInstructionData());
-  if (!accessor.HasCodeItem()) {
-    // We should not get here for a method without code (native, proxy or abstract). Log it and
-    // return the slot as is since all registers are arguments.
-    LOG(WARNING) << "Trying to mangle slot for method without code " << m->PrettyMethod();
-    return slot;
-  }
-  uint16_t ins_size = accessor.InsSize();
-  uint16_t locals_size = accessor.RegistersSize() - ins_size;
-  if (slot >= locals_size) {
-    return slot - locals_size;
-  } else {
-    return slot + ins_size;
-  }
-}
-
-static size_t GetMethodNumArgRegistersIncludingThis(ArtMethod* method)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  uint32_t num_registers = ArtMethod::NumArgRegisters(method->GetShorty());
-  if (!method->IsStatic()) {
-    ++num_registers;
-  }
-  return num_registers;
-}
-
-/*
- * Circularly shifts registers so that arguments come last. Reverts
- * slots to dex style argument placement.
- */
-static uint16_t DemangleSlot(uint16_t slot, ArtMethod* m, JDWP::JdwpError* error)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  CodeItemDataAccessor accessor(m->DexInstructionData());
-  if (!accessor.HasCodeItem()) {
-    // We should not get here for a method without code (native, proxy or abstract). Log it and
-    // return the slot as is since all registers are arguments.
-    LOG(WARNING) << "Trying to demangle slot for method without code "
-                 << m->PrettyMethod();
-    uint16_t vreg_count = GetMethodNumArgRegistersIncludingThis(m);
-    if (slot < vreg_count) {
-      *error = JDWP::ERR_NONE;
-      return slot;
-    }
-  } else {
-    if (slot < accessor.RegistersSize()) {
-      uint16_t ins_size = accessor.InsSize();
-      uint16_t locals_size = accessor.RegistersSize() - ins_size;
-      *error = JDWP::ERR_NONE;
-      return (slot < ins_size) ? slot + locals_size : slot - ins_size;
-    }
-  }
-
-  // Slot is invalid in the method.
-  LOG(ERROR) << "Invalid local slot " << slot << " for method " << m->PrettyMethod();
-  *error = JDWP::ERR_INVALID_SLOT;
-  return DexFile::kDexNoIndex16;
-}
-
-JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic,
-                                          JDWP::ExpandBuf* pReply) {
-  JDWP::JdwpError error;
-  ObjPtr<mirror::Class> c = DecodeClass(class_id, &error);
-  if (c == nullptr) {
-    return error;
-  }
-
-  size_t instance_field_count = c->NumInstanceFields();
-  size_t static_field_count = c->NumStaticFields();
-
-  expandBufAdd4BE(pReply, instance_field_count + static_field_count);
-
-  for (size_t i = 0; i < instance_field_count + static_field_count; ++i) {
-    ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) :
-        c->GetStaticField(i - instance_field_count);
-    expandBufAddFieldId(pReply, ToFieldId(f));
-    expandBufAddUtf8String(pReply, f->GetName());
-    expandBufAddUtf8String(pReply, f->GetTypeDescriptor());
-    if (with_generic) {
-      static const char genericSignature[1] = "";
-      expandBufAddUtf8String(pReply, genericSignature);
-    }
-    expandBufAdd4BE(pReply, MangleAccessFlags(f->GetAccessFlags()));
-  }
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_generic,
-                                           JDWP::ExpandBuf* pReply) {
-  JDWP::JdwpError error;
-  ObjPtr<mirror::Class> c = DecodeClass(class_id, &error);
-  if (c == nullptr) {
-    return error;
-  }
-
-  expandBufAdd4BE(pReply, c->NumMethods());
-
-  auto* cl = Runtime::Current()->GetClassLinker();
-  auto ptr_size = cl->GetImagePointerSize();
-  for (ArtMethod& m : c->GetMethods(ptr_size)) {
-    expandBufAddMethodId(pReply, ToMethodId(&m));
-    expandBufAddUtf8String(pReply, m.GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetName());
-    expandBufAddUtf8String(
-        pReply, m.GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetSignature().ToString());
-    if (with_generic) {
-      const char* generic_signature = "";
-      expandBufAddUtf8String(pReply, generic_signature);
-    }
-    expandBufAdd4BE(pReply, MangleAccessFlags(m.GetAccessFlags()));
-  }
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::OutputDeclaredInterfaces(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
-  JDWP::JdwpError error;
-  Thread* self = Thread::Current();
-  ObjPtr<mirror::Class> c = DecodeClass(class_id, &error);
-  if (c == nullptr) {
-    return error;
-  }
-  size_t interface_count = c->NumDirectInterfaces();
-  expandBufAdd4BE(pReply, interface_count);
-  for (size_t i = 0; i < interface_count; ++i) {
-    ObjPtr<mirror::Class> interface = mirror::Class::GetDirectInterface(self, c, i);
-    DCHECK(interface != nullptr);
-    expandBufAddRefTypeId(pReply, gRegistry->AddRefType(interface));
-  }
-  return JDWP::ERR_NONE;
-}
-
-void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::ExpandBuf* pReply) {
-  ArtMethod* m = FromMethodId(method_id);
-  CodeItemDebugInfoAccessor accessor(m->DexInstructionDebugInfo());
-  uint64_t start, end;
-  if (!accessor.HasCodeItem()) {
-    DCHECK(m->IsNative() || m->IsProxyMethod());
-    start = -1;
-    end = -1;
-  } else {
-    start = 0;
-    // Return the index of the last instruction
-    end = accessor.InsnsSizeInCodeUnits() - 1;
-  }
-
-  expandBufAdd8BE(pReply, start);
-  expandBufAdd8BE(pReply, end);
-
-  // Add numLines later
-  size_t numLinesOffset = expandBufGetLength(pReply);
-  expandBufAdd4BE(pReply, 0);
-
-  int numItems = 0;
-  accessor.DecodeDebugPositionInfo([&](const DexFile::PositionInfo& entry) {
-    expandBufAdd8BE(pReply, entry.address_);
-    expandBufAdd4BE(pReply, entry.line_);
-    numItems++;
-    return false;
-  });
-
-  JDWP::Set4BE(expandBufGetBuffer(pReply) + numLinesOffset, numItems);
-}
-
-void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool with_generic,
-                              JDWP::ExpandBuf* pReply) {
-  ArtMethod* m = FromMethodId(method_id);
-  CodeItemDebugInfoAccessor accessor(m->DexInstructionDebugInfo());
-
-  // arg_count considers doubles and longs to take 2 units.
-  // variable_count considers everything to take 1 unit.
-  expandBufAdd4BE(pReply, GetMethodNumArgRegistersIncludingThis(m));
-
-  // We don't know the total number of variables yet, so leave a blank and update it later.
-  size_t variable_count_offset = expandBufGetLength(pReply);
-  expandBufAdd4BE(pReply, 0);
-
-  size_t variable_count = 0;
-
-  if (accessor.HasCodeItem()) {
-    accessor.DecodeDebugLocalInfo(m->IsStatic(),
-                                  m->GetDexMethodIndex(),
-                                  [&](const DexFile::LocalInfo& entry)
-        REQUIRES_SHARED(Locks::mutator_lock_) {
-      uint16_t slot = entry.reg_;
-      VLOG(jdwp) << StringPrintf("    %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d",
-                                 variable_count,
-                                 entry.start_address_,
-                                 entry.end_address_ - entry.start_address_,
-                                 entry.name_,
-                                 entry.descriptor_, entry.signature_,
-                                 slot,
-                                 MangleSlot(slot, m));
-
-      slot = MangleSlot(slot, m);
-
-      expandBufAdd8BE(pReply, entry.start_address_);
-      expandBufAddUtf8String(pReply, entry.name_);
-      expandBufAddUtf8String(pReply, entry.descriptor_);
-      if (with_generic) {
-        expandBufAddUtf8String(pReply, entry.signature_);
-      }
-      expandBufAdd4BE(pReply, entry.end_address_- entry.start_address_);
-      expandBufAdd4BE(pReply, slot);
-
-      ++variable_count;
-    });
-  }
-
-  JDWP::Set4BE(expandBufGetBuffer(pReply) + variable_count_offset, variable_count);
-}
-
-void Dbg::OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value,
-                                  JDWP::ExpandBuf* pReply) {
-  ArtMethod* m = FromMethodId(method_id);
-  JDWP::JdwpTag tag = BasicTagFromDescriptor(m->GetShorty());
-  OutputJValue(tag, return_value, pReply);
-}
-
-void Dbg::OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value,
-                           JDWP::ExpandBuf* pReply) {
-  ArtField* f = FromFieldId(field_id);
-  JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
-  OutputJValue(tag, field_value, pReply);
-}
-
-JDWP::JdwpError Dbg::GetBytecodes(JDWP::RefTypeId, JDWP::MethodId method_id,
-                                  std::vector<uint8_t>* bytecodes) {
-  ArtMethod* m = FromMethodId(method_id);
-  if (m == nullptr) {
-    return JDWP::ERR_INVALID_METHODID;
-  }
-  CodeItemDataAccessor accessor(m->DexInstructionData());
-  size_t byte_count = accessor.InsnsSizeInCodeUnits() * 2;
-  const uint8_t* begin = reinterpret_cast<const uint8_t*>(accessor.Insns());
-  const uint8_t* end = begin + byte_count;
-  for (const uint8_t* p = begin; p != end; ++p) {
-    bytecodes->push_back(*p);
-  }
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpTag Dbg::GetFieldBasicTag(JDWP::FieldId field_id) {
-  return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
-}
-
-JDWP::JdwpTag Dbg::GetStaticFieldBasicTag(JDWP::FieldId field_id) {
-  return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
-}
-
-static JValue GetArtFieldValue(ArtField* f, mirror::Object* o)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  Primitive::Type fieldType = f->GetTypeAsPrimitiveType();
-  JValue field_value;
-  switch (fieldType) {
-    case Primitive::kPrimBoolean:
-      field_value.SetZ(f->GetBoolean(o));
-      return field_value;
-
-    case Primitive::kPrimByte:
-      field_value.SetB(f->GetByte(o));
-      return field_value;
-
-    case Primitive::kPrimChar:
-      field_value.SetC(f->GetChar(o));
-      return field_value;
-
-    case Primitive::kPrimShort:
-      field_value.SetS(f->GetShort(o));
-      return field_value;
-
-    case Primitive::kPrimInt:
-    case Primitive::kPrimFloat:
-      // Int and Float must be treated as 32-bit values in JDWP.
-      field_value.SetI(f->GetInt(o));
-      return field_value;
-
-    case Primitive::kPrimLong:
-    case Primitive::kPrimDouble:
-      // Long and Double must be treated as 64-bit values in JDWP.
-      field_value.SetJ(f->GetLong(o));
-      return field_value;
-
-    case Primitive::kPrimNot:
-      field_value.SetL(f->GetObject(o));
-      return field_value;
-
-    case Primitive::kPrimVoid:
-      LOG(FATAL) << "Attempt to read from field of type 'void'";
-      UNREACHABLE();
-  }
-  LOG(FATAL) << "Attempt to read from field of unknown type";
-  UNREACHABLE();
-}
-
-static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id,
-                                         JDWP::FieldId field_id, JDWP::ExpandBuf* pReply,
-                                         bool is_static)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  JDWP::JdwpError error;
-  ObjPtr<mirror::Class> c = DecodeClass(ref_type_id, &error);
-  if (ref_type_id != 0 && c == nullptr) {
-    return error;
-  }
-
-  Thread* self = Thread::Current();
-  StackHandleScope<2> hs(self);
-  MutableHandle<mirror::Object>
-      o(hs.NewHandle(Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error)));
-  if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) {
-    return JDWP::ERR_INVALID_OBJECT;
-  }
-  ArtField* f = FromFieldId(field_id);
-
-  ObjPtr<mirror::Class> receiver_class = c;
-  if (receiver_class == nullptr && o != nullptr) {
-    receiver_class = o->GetClass();
-  }
-
-  // TODO: should we give up now if receiver_class is null?
-  if (receiver_class != nullptr && !f->GetDeclaringClass()->IsAssignableFrom(receiver_class)) {
-    LOG(INFO) << "ERR_INVALID_FIELDID: " << f->PrettyField() << " "
-              << receiver_class->PrettyClass();
-    return JDWP::ERR_INVALID_FIELDID;
-  }
-
-  // Ensure the field's class is initialized.
-  Handle<mirror::Class> klass(hs.NewHandle(f->GetDeclaringClass()));
-  if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, klass, true, false)) {
-    LOG(WARNING) << "Not able to initialize class for SetValues: "
-                 << mirror::Class::PrettyClass(klass.Get());
-  }
-
-  // The RI only enforces the static/non-static mismatch in one direction.
-  // TODO: should we change the tests and check both?
-  if (is_static) {
-    if (!f->IsStatic()) {
-      return JDWP::ERR_INVALID_FIELDID;
-    }
-  } else {
-    if (f->IsStatic()) {
-      LOG(WARNING) << "Ignoring non-nullptr receiver for ObjectReference.GetValues"
-                   << " on static field " << f->PrettyField();
-    }
-  }
-  if (f->IsStatic()) {
-    o.Assign(f->GetDeclaringClass());
-  }
-
-  JValue field_value(GetArtFieldValue(f, o.Get()));
-  JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
-  Dbg::OutputJValue(tag, &field_value, pReply);
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id,
-                                   JDWP::ExpandBuf* pReply) {
-  return GetFieldValueImpl(0, object_id, field_id, pReply, false);
-}
-
-JDWP::JdwpError Dbg::GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id,
-                                         JDWP::ExpandBuf* pReply) {
-  return GetFieldValueImpl(ref_type_id, 0, field_id, pReply, true);
-}
-
-static JDWP::JdwpError SetArtFieldValue(ArtField* f, mirror::Object* o, uint64_t value, int width)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  Primitive::Type fieldType = f->GetTypeAsPrimitiveType();
-  // Debugging only happens at runtime so we know we are not running in a transaction.
-  static constexpr bool kNoTransactionMode = false;
-  switch (fieldType) {
-    case Primitive::kPrimBoolean:
-      CHECK_EQ(width, 1);
-      f->SetBoolean<kNoTransactionMode>(o, static_cast<uint8_t>(value));
-      return JDWP::ERR_NONE;
-
-    case Primitive::kPrimByte:
-      CHECK_EQ(width, 1);
-      f->SetByte<kNoTransactionMode>(o, static_cast<uint8_t>(value));
-      return JDWP::ERR_NONE;
-
-    case Primitive::kPrimChar:
-      CHECK_EQ(width, 2);
-      f->SetChar<kNoTransactionMode>(o, static_cast<uint16_t>(value));
-      return JDWP::ERR_NONE;
-
-    case Primitive::kPrimShort:
-      CHECK_EQ(width, 2);
-      f->SetShort<kNoTransactionMode>(o, static_cast<int16_t>(value));
-      return JDWP::ERR_NONE;
-
-    case Primitive::kPrimInt:
-    case Primitive::kPrimFloat:
-      CHECK_EQ(width, 4);
-      // Int and Float must be treated as 32-bit values in JDWP.
-      f->SetInt<kNoTransactionMode>(o, static_cast<int32_t>(value));
-      return JDWP::ERR_NONE;
-
-    case Primitive::kPrimLong:
-    case Primitive::kPrimDouble:
-      CHECK_EQ(width, 8);
-      // Long and Double must be treated as 64-bit values in JDWP.
-      f->SetLong<kNoTransactionMode>(o, value);
-      return JDWP::ERR_NONE;
-
-    case Primitive::kPrimNot: {
-      JDWP::JdwpError error;
-      mirror::Object* v = Dbg::GetObjectRegistry()->Get<mirror::Object*>(value, &error);
-      if (error != JDWP::ERR_NONE) {
-        return JDWP::ERR_INVALID_OBJECT;
-      }
-      if (v != nullptr) {
-        ObjPtr<mirror::Class> field_type;
-        {
-          StackHandleScope<2> hs(Thread::Current());
-          HandleWrapper<mirror::Object> h_v(hs.NewHandleWrapper(&v));
-          HandleWrapper<mirror::Object> h_o(hs.NewHandleWrapper(&o));
-          field_type = f->ResolveType();
-        }
-        if (!field_type->IsAssignableFrom(v->GetClass())) {
-          return JDWP::ERR_INVALID_OBJECT;
-        }
-      }
-      f->SetObject<kNoTransactionMode>(o, v);
-      return JDWP::ERR_NONE;
-    }
-
-    case Primitive::kPrimVoid:
-      LOG(FATAL) << "Attempt to write to field of type 'void'";
-      UNREACHABLE();
-  }
-  LOG(FATAL) << "Attempt to write to field of unknown type";
-  UNREACHABLE();
-}
-
-static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id,
-                                         uint64_t value, int width, bool is_static)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  JDWP::JdwpError error;
-  Thread* self = Thread::Current();
-  StackHandleScope<2> hs(self);
-  MutableHandle<mirror::Object>
-      o(hs.NewHandle(Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error)));
-  if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) {
-    return JDWP::ERR_INVALID_OBJECT;
-  }
-  ArtField* f = FromFieldId(field_id);
-
-  // Ensure the field's class is initialized.
-  Handle<mirror::Class> klass(hs.NewHandle(f->GetDeclaringClass()));
-  if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, klass, true, false)) {
-    LOG(WARNING) << "Not able to initialize class for SetValues: "
-                 << mirror::Class::PrettyClass(klass.Get());
-  }
-
-  // The RI only enforces the static/non-static mismatch in one direction.
-  // TODO: should we change the tests and check both?
-  if (is_static) {
-    if (!f->IsStatic()) {
-      return JDWP::ERR_INVALID_FIELDID;
-    }
-  } else {
-    if (f->IsStatic()) {
-      LOG(WARNING) << "Ignoring non-nullptr receiver for ObjectReference.SetValues"
-                   << " on static field " << f->PrettyField();
-    }
-  }
-  if (f->IsStatic()) {
-    o.Assign(f->GetDeclaringClass());
-  }
-  return SetArtFieldValue(f, o.Get(), value, width);
-}
-
-JDWP::JdwpError Dbg::SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value,
-                                   int width) {
-  return SetFieldValueImpl(object_id, field_id, value, width, false);
-}
-
-JDWP::JdwpError Dbg::SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width) {
-  return SetFieldValueImpl(0, field_id, value, width, true);
-}
-
-JDWP::JdwpError Dbg::StringToUtf8(JDWP::ObjectId string_id, std::string* str) {
-  JDWP::JdwpError error;
-  mirror::Object* obj = gRegistry->Get<mirror::Object*>(string_id, &error);
-  if (error != JDWP::ERR_NONE) {
-    return error;
-  }
-  if (obj == nullptr) {
-    return JDWP::ERR_INVALID_OBJECT;
-  }
-  {
-    ScopedObjectAccessUnchecked soa(Thread::Current());
-    ObjPtr<mirror::Class> java_lang_String =
-        soa.Decode<mirror::Class>(WellKnownClasses::java_lang_String);
-    if (!java_lang_String->IsAssignableFrom(obj->GetClass())) {
-      // This isn't a string.
-      return JDWP::ERR_INVALID_STRING;
-    }
-  }
-  *str = obj->AsString()->ToModifiedUtf8();
-  return JDWP::ERR_NONE;
-}
-
-void Dbg::OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply) {
-  if (IsPrimitiveTag(tag)) {
-    expandBufAdd1(pReply, tag);
-    if (tag == JDWP::JT_BOOLEAN || tag == JDWP::JT_BYTE) {
-      expandBufAdd1(pReply, return_value->GetI());
-    } else if (tag == JDWP::JT_CHAR || tag == JDWP::JT_SHORT) {
-      expandBufAdd2BE(pReply, return_value->GetI());
-    } else if (tag == JDWP::JT_FLOAT || tag == JDWP::JT_INT) {
-      expandBufAdd4BE(pReply, return_value->GetI());
-    } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
-      expandBufAdd8BE(pReply, return_value->GetJ());
-    } else {
-      CHECK_EQ(tag, JDWP::JT_VOID);
-    }
-  } else {
-    ScopedObjectAccessUnchecked soa(Thread::Current());
-    mirror::Object* value = return_value->GetL();
-    expandBufAdd1(pReply, TagFromObject(soa, value));
-    expandBufAddObjectId(pReply, gRegistry->Add(value));
-  }
-}
-
-JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string* name) {
-  ScopedObjectAccessUnchecked soa(Thread::Current());
-  JDWP::JdwpError error;
-  DecodeThread(soa, thread_id, &error);
-  if (error != JDWP::ERR_NONE && error != JDWP::ERR_THREAD_NOT_ALIVE) {
-    return error;
-  }
-
-  // We still need to report the zombie threads' names, so we can't just call Thread::GetThreadName.
-  mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id, &error);
-  CHECK(thread_object != nullptr) << error;
-  ArtField* java_lang_Thread_name_field =
-      jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name);
-  ObjPtr<mirror::String> s(java_lang_Thread_name_field->GetObject(thread_object)->AsString());
-  if (s != nullptr) {
-    *name = s->ToModifiedUtf8();
-  }
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
-  ScopedObjectAccessUnchecked soa(Thread::Current());
-  JDWP::JdwpError error;
-  mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id, &error);
-  if (error != JDWP::ERR_NONE) {
-    return JDWP::ERR_INVALID_OBJECT;
-  }
-  ScopedAssertNoThreadSuspension ants("Debugger: GetThreadGroup");
-  // Okay, so it's an object, but is it actually a thread?
-  DecodeThread(soa, thread_id, &error);
-  if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
-    // Zombie threads are in the null group.
-    expandBufAddObjectId(pReply, JDWP::ObjectId(0));
-    error = JDWP::ERR_NONE;
-  } else if (error == JDWP::ERR_NONE) {
-    ObjPtr<mirror::Class> c = soa.Decode<mirror::Class>(WellKnownClasses::java_lang_Thread);
-    CHECK(c != nullptr);
-    ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group);
-    CHECK(f != nullptr);
-    ObjPtr<mirror::Object> group = f->GetObject(thread_object);
-    CHECK(group != nullptr);
-    JDWP::ObjectId thread_group_id = gRegistry->Add(group);
-    expandBufAddObjectId(pReply, thread_group_id);
-  }
-  return error;
-}
-
-static mirror::Object* DecodeThreadGroup(ScopedObjectAccessUnchecked& soa,
-                                         JDWP::ObjectId thread_group_id, JDWP::JdwpError* error)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  mirror::Object* thread_group = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_group_id,
-                                                                                error);
-  if (*error != JDWP::ERR_NONE) {
-    return nullptr;
-  }
-  if (thread_group == nullptr) {
-    *error = JDWP::ERR_INVALID_OBJECT;
-    return nullptr;
-  }
-  ObjPtr<mirror::Class> c =
-      soa.Decode<mirror::Class>(WellKnownClasses::java_lang_ThreadGroup);
-  CHECK(c != nullptr);
-  if (!c->IsAssignableFrom(thread_group->GetClass())) {
-    // This is not a java.lang.ThreadGroup.
-    *error = JDWP::ERR_INVALID_THREAD_GROUP;
-    return nullptr;
-  }
-  *error = JDWP::ERR_NONE;
-  return thread_group;
-}
-
-JDWP::JdwpError Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) {
-  ScopedObjectAccessUnchecked soa(Thread::Current());
-  JDWP::JdwpError error;
-  mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
-  if (error != JDWP::ERR_NONE) {
-    return error;
-  }
-  ScopedAssertNoThreadSuspension ants("Debugger: GetThreadGroupName");
-  ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_name);
-  CHECK(f != nullptr);
-  ObjPtr<mirror::String> s = f->GetObject(thread_group)->AsString();
-
-  std::string thread_group_name(s->ToModifiedUtf8());
-  expandBufAddUtf8String(pReply, thread_group_name);
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) {
-  ScopedObjectAccessUnchecked soa(Thread::Current());
-  JDWP::JdwpError error;
-  mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
-  if (error != JDWP::ERR_NONE) {
-    return error;
-  }
-  ObjPtr<mirror::Object> parent;
-  {
-    ScopedAssertNoThreadSuspension ants("Debugger: GetThreadGroupParent");
-    ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_parent);
-    CHECK(f != nullptr);
-    parent = f->GetObject(thread_group);
-  }
-  JDWP::ObjectId parent_group_id = gRegistry->Add(parent);
-  expandBufAddObjectId(pReply, parent_group_id);
-  return JDWP::ERR_NONE;
-}
-
-static void GetChildThreadGroups(mirror::Object* thread_group,
-                                 std::vector<JDWP::ObjectId>* child_thread_group_ids)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  CHECK(thread_group != nullptr);
-
-  // Get the int "ngroups" count of this thread group...
-  ArtField* ngroups_field = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_ngroups);
-  CHECK(ngroups_field != nullptr);
-  const int32_t size = ngroups_field->GetInt(thread_group);
-  if (size == 0) {
-    return;
-  }
-
-  // Get the ThreadGroup[] "groups" out of this thread group...
-  ArtField* groups_field = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_groups);
-  ObjPtr<mirror::Object> groups_array = groups_field->GetObject(thread_group);
-
-  CHECK(groups_array != nullptr);
-  CHECK(groups_array->IsObjectArray());
-
-  ObjPtr<mirror::ObjectArray<mirror::Object>> groups_array_as_array =
-      groups_array->AsObjectArray<mirror::Object>();
-
-  // Copy the first 'size' elements out of the array into the result.
-  ObjectRegistry* registry = Dbg::GetObjectRegistry();
-  for (int32_t i = 0; i < size; ++i) {
-    child_thread_group_ids->push_back(registry->Add(groups_array_as_array->Get(i)));
-  }
-}
-
-JDWP::JdwpError Dbg::GetThreadGroupChildren(JDWP::ObjectId thread_group_id,
-                                            JDWP::ExpandBuf* pReply) {
-  ScopedObjectAccessUnchecked soa(Thread::Current());
-  JDWP::JdwpError error;
-  mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
-  if (error != JDWP::ERR_NONE) {
-    return error;
-  }
-
-  // Add child threads.
-  {
-    std::vector<JDWP::ObjectId> child_thread_ids;
-    GetThreads(thread_group, &child_thread_ids);
-    expandBufAdd4BE(pReply, child_thread_ids.size());
-    for (JDWP::ObjectId child_thread_id : child_thread_ids) {
-      expandBufAddObjectId(pReply, child_thread_id);
-    }
-  }
-
-  // Add child thread groups.
-  {
-    std::vector<JDWP::ObjectId> child_thread_groups_ids;
-    GetChildThreadGroups(thread_group, &child_thread_groups_ids);
-    expandBufAdd4BE(pReply, child_thread_groups_ids.size());
-    for (JDWP::ObjectId child_thread_group_id : child_thread_groups_ids) {
-      expandBufAddObjectId(pReply, child_thread_group_id);
-    }
-  }
-
-  return JDWP::ERR_NONE;
-}
-
-JDWP::ObjectId Dbg::GetSystemThreadGroupId() {
-  ScopedObjectAccessUnchecked soa(Thread::Current());
-  ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
-  ObjPtr<mirror::Object> group = f->GetObject(f->GetDeclaringClass());
-  return gRegistry->Add(group);
-}
-
-JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) {
-  switch (state) {
-    case kBlocked:
-      return JDWP::TS_MONITOR;
-    case kNative:
-    case kRunnable:
-    case kSuspended:
-      return JDWP::TS_RUNNING;
-    case kSleeping:
-      return JDWP::TS_SLEEPING;
-    case kStarting:
-    case kTerminated:
-      return JDWP::TS_ZOMBIE;
-    case kTimedWaiting:
-    case kWaitingForTaskProcessor:
-    case kWaitingForLockInflation:
-    case kWaitingForCheckPointsToRun:
-    case kWaitingForDebuggerSend:
-    case kWaitingForDebuggerSuspension:
-    case kWaitingForDebuggerToAttach:
-    case kWaitingForDeoptimization:
-    case kWaitingForGcToComplete:
-    case kWaitingForGetObjectsAllocated:
-    case kWaitingForJniOnLoad:
-    case kWaitingForMethodTracingStart:
-    case kWaitingForSignalCatcherOutput:
-    case kWaitingForVisitObjects:
-    case kWaitingInMainDebuggerLoop:
-    case kWaitingInMainSignalCatcherLoop:
-    case kWaitingPerformingGc:
-    case kWaitingWeakGcRootRead:
-    case kWaitingForGcThreadFlip:
-    case kNativeForAbort:
-    case kWaiting:
-      return JDWP::TS_WAIT;
-      // Don't add a 'default' here so the compiler can spot incompatible enum changes.
-  }
-  LOG(FATAL) << "Unknown thread state: " << state;
-  UNREACHABLE();
-}
-
-JDWP::JdwpError Dbg::GetThreadStatus(JDWP::ObjectId thread_id, JDWP::JdwpThreadStatus* pThreadStatus,
-                                     JDWP::JdwpSuspendStatus* pSuspendStatus) {
-  ScopedObjectAccess soa(Thread::Current());
-
-  *pSuspendStatus = JDWP::SUSPEND_STATUS_NOT_SUSPENDED;
-
-  JDWP::JdwpError error;
-  Thread* thread = DecodeThread(soa, thread_id, &error);
-  if (error != JDWP::ERR_NONE) {
-    if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
-      *pThreadStatus = JDWP::TS_ZOMBIE;
-      return JDWP::ERR_NONE;
-    }
-    return error;
-  }
-
-  if (IsSuspendedForDebugger(soa, thread)) {
-    *pSuspendStatus = JDWP::SUSPEND_STATUS_SUSPENDED;
-  }
-
-  *pThreadStatus = ToJdwpThreadStatus(thread->GetState());
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::GetThreadDebugSuspendCount(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
-  ScopedObjectAccess soa(Thread::Current());
-  JDWP::JdwpError error;
-  Thread* thread = DecodeThread(soa, thread_id, &error);
-  if (error != JDWP::ERR_NONE) {
-    return error;
-  }
-  MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
-  expandBufAdd4BE(pReply, thread->GetDebugSuspendCount());
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::Interrupt(JDWP::ObjectId thread_id) {
-  ScopedObjectAccess soa(Thread::Current());
-  JDWP::JdwpError error;
-  Thread* thread = DecodeThread(soa, thread_id, &error);
-  if (error != JDWP::ERR_NONE) {
-    return error;
-  }
-  thread->Interrupt(soa.Self());
-  return JDWP::ERR_NONE;
-}
-
-static bool IsInDesiredThreadGroup(mirror::Object* desired_thread_group, mirror::Object* peer)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  // Do we want threads from all thread groups?
-  if (desired_thread_group == nullptr) {
-    return true;
-  }
-  ArtField* thread_group_field = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group);
-  DCHECK(thread_group_field != nullptr);
-  ObjPtr<mirror::Object> group = thread_group_field->GetObject(peer);
-  return (group == desired_thread_group);
-}
-
-void Dbg::GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* thread_ids) {
-  ScopedObjectAccessUnchecked soa(Thread::Current());
-  std::list<Thread*> all_threads_list;
-  {
-    MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
-    all_threads_list = Runtime::Current()->GetThreadList()->GetList();
-  }
-  for (Thread* t : all_threads_list) {
-    if (t == Dbg::GetDebugThread()) {
-      // Skip the JDWP thread. Some debuggers get bent out of shape when they can't suspend and
-      // query all threads, so it's easier if we just don't tell them about this thread.
-      continue;
-    }
-    if (t->IsStillStarting()) {
-      // This thread is being started (and has been registered in the thread list). However, it is
-      // not completely started yet so we must ignore it.
-      continue;
-    }
-    mirror::Object* peer = t->GetPeerFromOtherThread();
-    if (peer == nullptr) {
-      // peer might be null if the thread is still starting up. We can't tell the debugger about
-      // this thread yet.
-      // TODO: if we identified threads to the debugger by their Thread*
-      // rather than their peer's mirror::Object*, we could fix this.
-      // Doing so might help us report ZOMBIE threads too.
-      continue;
-    }
-    if (IsInDesiredThreadGroup(thread_group, peer)) {
-      thread_ids->push_back(gRegistry->Add(peer));
-    }
-  }
-}
-
-static int GetStackDepth(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_) {
-  size_t depth = 0u;
-  StackVisitor::WalkStack(
-      [&depth](const StackVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
-        if (!visitor->GetMethod()->IsRuntimeMethod()) {
-          ++depth;
-        }
-        return true;
-      },
-      thread,
-      /* context= */ nullptr,
-      StackVisitor::StackWalkKind::kIncludeInlinedFrames);
-  return depth;
-}
-
-JDWP::JdwpError Dbg::GetThreadFrameCount(JDWP::ObjectId thread_id, size_t* result) {
-  ScopedObjectAccess soa(Thread::Current());
-  JDWP::JdwpError error;
-  *result = 0;
-  Thread* thread = DecodeThread(soa, thread_id, &error);
-  if (error != JDWP::ERR_NONE) {
-    return error;
-  }
-  if (!IsSuspendedForDebugger(soa, thread)) {
-    return JDWP::ERR_THREAD_NOT_SUSPENDED;
-  }
-  *result = GetStackDepth(thread);
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id,
-                                     const size_t start_frame,
-                                     const size_t frame_count,
-                                     JDWP::ExpandBuf* buf) {
-  ScopedObjectAccessUnchecked soa(Thread::Current());
-  JDWP::JdwpError error;
-  Thread* thread = DecodeThread(soa, thread_id, &error);
-  if (error != JDWP::ERR_NONE) {
-    return error;
-  }
-  if (!IsSuspendedForDebugger(soa, thread)) {
-    return JDWP::ERR_THREAD_NOT_SUSPENDED;
-  }
-
-  expandBufAdd4BE(buf, frame_count);
-
-  size_t depth = 0u;
-  StackVisitor::WalkStack(
-      [&](StackVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
-        if (visitor->GetMethod()->IsRuntimeMethod()) {
-          return true;  // The debugger can't do anything useful with a frame that has no Method*.
-        }
-        if (depth >= start_frame + frame_count) {
-          return false;
-        }
-        if (depth >= start_frame) {
-          JDWP::FrameId frame_id(visitor->GetFrameId());
-          JDWP::JdwpLocation location;
-          SetJdwpLocation(&location, visitor->GetMethod(), visitor->GetDexPc());
-          VLOG(jdwp)
-              << StringPrintf("    Frame %3zd: id=%3" PRIu64 " ", depth, frame_id) << location;
-          expandBufAdd8BE(buf, frame_id);
-          expandBufAddLocation(buf, location);
-        }
-        ++depth;
-        return true;
-      },
-      thread,
-      /* context= */ nullptr,
-      StackVisitor::StackWalkKind::kIncludeInlinedFrames);
-
-  return JDWP::ERR_NONE;
-}
-
-JDWP::ObjectId Dbg::GetThreadSelfId() {
-  return GetThreadId(Thread::Current());
-}
-
-JDWP::ObjectId Dbg::GetThreadId(Thread* thread) {
-  ScopedObjectAccessUnchecked soa(Thread::Current());
-  return gRegistry->Add(thread->GetPeerFromOtherThread());
-}
-
-void Dbg::SuspendVM() {
-  // Avoid a deadlock between GC and debugger where GC gets suspended during GC. b/25800335.
-  gc::ScopedGCCriticalSection gcs(Thread::Current(),
-                                  gc::kGcCauseDebugger,
-                                  gc::kCollectorTypeDebugger);
-  Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
-}
-
-void Dbg::ResumeVM() {
-  Runtime::Current()->GetThreadList()->ResumeAllForDebugger();
-}
-
-JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspension) {
-  Thread* self = Thread::Current();
-  ScopedLocalRef<jobject> peer(self->GetJniEnv(), nullptr);
-  {
-    ScopedObjectAccess soa(self);
-    JDWP::JdwpError error;
-    peer.reset(soa.AddLocalReference<jobject>(gRegistry->Get<mirror::Object*>(thread_id, &error)));
-  }
-  if (peer.get() == nullptr) {
-    return JDWP::ERR_THREAD_NOT_ALIVE;
-  }
-  // Suspend thread to build stack trace.
-  bool timed_out;
-  ThreadList* thread_list = Runtime::Current()->GetThreadList();
-  Thread* thread = thread_list->SuspendThreadByPeer(peer.get(),
-                                                    request_suspension,
-                                                    SuspendReason::kForDebugger,
-                                                    &timed_out);
-  if (thread != nullptr) {
-    return JDWP::ERR_NONE;
-  } else if (timed_out) {
-    return JDWP::ERR_INTERNAL;
-  } else {
-    return JDWP::ERR_THREAD_NOT_ALIVE;
-  }
-}
-
-void Dbg::ResumeThread(JDWP::ObjectId thread_id) {
-  ScopedObjectAccessUnchecked soa(Thread::Current());
-  JDWP::JdwpError error;
-  mirror::Object* peer = gRegistry->Get<mirror::Object*>(thread_id, &error);
-  CHECK(peer != nullptr) << error;
-  Thread* thread;
-  {
-    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
-    thread = Thread::FromManagedThread(soa, peer);
-  }
-  if (thread == nullptr) {
-    LOG(WARNING) << "No such thread for resume: " << peer;
-    return;
-  }
-  bool needs_resume;
-  {
-    MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
-    needs_resume = thread->GetDebugSuspendCount() > 0;
-  }
-  if (needs_resume) {
-    bool resumed = Runtime::Current()->GetThreadList()->Resume(thread, SuspendReason::kForDebugger);
-    DCHECK(resumed);
-  }
-}
-
-void Dbg::SuspendSelf() {
-  Runtime::Current()->GetThreadList()->SuspendSelfForDebugger();
-}
-
-JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id,
-                                   JDWP::ObjectId* result) {
-  ScopedObjectAccessUnchecked soa(Thread::Current());
-  JDWP::JdwpError error;
-  Thread* thread = DecodeThread(soa, thread_id, &error);
-  if (error != JDWP::ERR_NONE) {
-    return error;
-  }
-  if (!IsSuspendedForDebugger(soa, thread)) {
-    return JDWP::ERR_THREAD_NOT_SUSPENDED;
-  }
-  std::unique_ptr<Context> context(Context::Create());
-  mirror::Object* this_object = nullptr;
-  StackVisitor::WalkStack(
-      [&](art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
-        if (frame_id != stack_visitor->GetFrameId()) {
-          return true;  // continue
-        } else {
-          this_object = stack_visitor->GetThisObject();
-          return false;
-        }
-      },
-      thread,
-      context.get(),
-      art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
-  *result = gRegistry->Add(this_object);
-  return JDWP::ERR_NONE;
-}
-
-template <typename FrameHandler>
-static JDWP::JdwpError FindAndHandleNonNativeFrame(Thread* thread,
-                                                   JDWP::FrameId frame_id,
-                                                   const FrameHandler& handler)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  JDWP::JdwpError result = JDWP::ERR_INVALID_FRAMEID;
-  std::unique_ptr<Context> context(Context::Create());
-  StackVisitor::WalkStack(
-      [&](art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
-        if (stack_visitor->GetFrameId() != frame_id) {
-          return true;  // Not our frame, carry on.
-        }
-        ArtMethod* m = stack_visitor->GetMethod();
-        if (m->IsNative()) {
-          // We can't read/write local value from/into native method.
-          result = JDWP::ERR_OPAQUE_FRAME;
-        } else {
-          // We found our frame.
-          result = handler(stack_visitor);
-        }
-        return false;
-      },
-      thread,
-      context.get(),
-      art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
-  return result;
-}
-
-JDWP::JdwpError Dbg::GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pReply) {
-  JDWP::ObjectId thread_id = request->ReadThreadId();
-  JDWP::FrameId frame_id = request->ReadFrameId();
-
-  ScopedObjectAccessUnchecked soa(Thread::Current());
-  JDWP::JdwpError error;
-  Thread* thread = DecodeThread(soa, thread_id, &error);
-  if (error != JDWP::ERR_NONE) {
-    return error;
-  }
-  if (!IsSuspendedForDebugger(soa, thread)) {
-    return JDWP::ERR_THREAD_NOT_SUSPENDED;
-  }
-
-  return FindAndHandleNonNativeFrame(
-      thread,
-      frame_id,
-      [&](art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
-        // Read the values from visitor's context.
-        int32_t slot_count = request->ReadSigned32("slot count");
-        expandBufAdd4BE(pReply, slot_count);     /* "int values" */
-        for (int32_t i = 0; i < slot_count; ++i) {
-          uint32_t slot = request->ReadUnsigned32("slot");
-          JDWP::JdwpTag reqSigByte = request->ReadTag();
-
-          VLOG(jdwp) << "    --> slot " << slot << " " << reqSigByte;
-
-          size_t width = Dbg::GetTagWidth(reqSigByte);
-          uint8_t* ptr = expandBufAddSpace(pReply, width + 1);
-          error = Dbg::GetLocalValue(*stack_visitor, soa, slot, reqSigByte, ptr, width);
-          if (error != JDWP::ERR_NONE) {
-            return error;
-          }
-        }
-        return JDWP::ERR_NONE;
-      });
-}
-
-constexpr JDWP::JdwpError kStackFrameLocalAccessError = JDWP::ERR_ABSENT_INFORMATION;
-
-static std::string GetStackContextAsString(const StackVisitor& visitor)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  return StringPrintf(" at DEX pc 0x%08x in method %s", visitor.GetDexPc(false),
-                      ArtMethod::PrettyMethod(visitor.GetMethod()).c_str());
-}
-
-static JDWP::JdwpError FailGetLocalValue(const StackVisitor& visitor, uint16_t vreg,
-                                         JDWP::JdwpTag tag)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  LOG(ERROR) << "Failed to read " << tag << " local from register v" << vreg
-             << GetStackContextAsString(visitor);
-  return kStackFrameLocalAccessError;
-}
-
-JDWP::JdwpError Dbg::GetLocalValue(const StackVisitor& visitor, ScopedObjectAccessUnchecked& soa,
-                                   int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width) {
-  ArtMethod* m = visitor.GetMethod();
-  JDWP::JdwpError error = JDWP::ERR_NONE;
-  uint16_t vreg = DemangleSlot(slot, m, &error);
-  if (error != JDWP::ERR_NONE) {
-    return error;
-  }
-  // TODO: check that the tag is compatible with the actual type of the slot!
-  switch (tag) {
-    case JDWP::JT_BOOLEAN: {
-      CHECK_EQ(width, 1U);
-      uint32_t intVal;
-      if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
-        return FailGetLocalValue(visitor, vreg, tag);
-      }
-      VLOG(jdwp) << "get boolean local " << vreg << " = " << intVal;
-      JDWP::Set1(buf + 1, intVal != 0);
-      break;
-    }
-    case JDWP::JT_BYTE: {
-      CHECK_EQ(width, 1U);
-      uint32_t intVal;
-      if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
-        return FailGetLocalValue(visitor, vreg, tag);
-      }
-      VLOG(jdwp) << "get byte local " << vreg << " = " << intVal;
-      JDWP::Set1(buf + 1, intVal);
-      break;
-    }
-    case JDWP::JT_SHORT:
-    case JDWP::JT_CHAR: {
-      CHECK_EQ(width, 2U);
-      uint32_t intVal;
-      if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
-        return FailGetLocalValue(visitor, vreg, tag);
-      }
-      VLOG(jdwp) << "get short/char local " << vreg << " = " << intVal;
-      JDWP::Set2BE(buf + 1, intVal);
-      break;
-    }
-    case JDWP::JT_INT: {
-      CHECK_EQ(width, 4U);
-      uint32_t intVal;
-      if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
-        return FailGetLocalValue(visitor, vreg, tag);
-      }
-      VLOG(jdwp) << "get int local " << vreg << " = " << intVal;
-      JDWP::Set4BE(buf + 1, intVal);
-      break;
-    }
-    case JDWP::JT_FLOAT: {
-      CHECK_EQ(width, 4U);
-      uint32_t intVal;
-      if (!visitor.GetVReg(m, vreg, kFloatVReg, &intVal)) {
-        return FailGetLocalValue(visitor, vreg, tag);
-      }
-      VLOG(jdwp) << "get float local " << vreg << " = " << intVal;
-      JDWP::Set4BE(buf + 1, intVal);
-      break;
-    }
-    case JDWP::JT_ARRAY:
-    case JDWP::JT_CLASS_LOADER:
-    case JDWP::JT_CLASS_OBJECT:
-    case JDWP::JT_OBJECT:
-    case JDWP::JT_STRING:
-    case JDWP::JT_THREAD:
-    case JDWP::JT_THREAD_GROUP: {
-      CHECK_EQ(width, sizeof(JDWP::ObjectId));
-      uint32_t intVal;
-      if (!visitor.GetVReg(m, vreg, kReferenceVReg, &intVal)) {
-        return FailGetLocalValue(visitor, vreg, tag);
-      }
-      mirror::Object* o = reinterpret_cast<mirror::Object*>(intVal);
-      VLOG(jdwp) << "get " << tag << " object local " << vreg << " = " << o;
-      if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
-        LOG(FATAL) << StringPrintf("Found invalid object %#" PRIxPTR " in register v%u",
-                                   reinterpret_cast<uintptr_t>(o), vreg)
-                                   << GetStackContextAsString(visitor);
-        UNREACHABLE();
-      }
-      tag = TagFromObject(soa, o);
-      JDWP::SetObjectId(buf + 1, gRegistry->Add(o));
-      break;
-    }
-    case JDWP::JT_DOUBLE: {
-      CHECK_EQ(width, 8U);
-      uint64_t longVal;
-      if (!visitor.GetVRegPair(m, vreg, kDoubleLoVReg, kDoubleHiVReg, &longVal)) {
-        return FailGetLocalValue(visitor, vreg, tag);
-      }
-      VLOG(jdwp) << "get double local " << vreg << " = " << longVal;
-      JDWP::Set8BE(buf + 1, longVal);
-      break;
-    }
-    case JDWP::JT_LONG: {
-      CHECK_EQ(width, 8U);
-      uint64_t longVal;
-      if (!visitor.GetVRegPair(m, vreg, kLongLoVReg, kLongHiVReg, &longVal)) {
-        return FailGetLocalValue(visitor, vreg, tag);
-      }
-      VLOG(jdwp) << "get long local " << vreg << " = " << longVal;
-      JDWP::Set8BE(buf + 1, longVal);
-      break;
-    }
-    default:
-      LOG(FATAL) << "Unknown tag " << tag;
-      UNREACHABLE();
-  }
-
-  // Prepend tag, which may have been updated.
-  JDWP::Set1(buf, tag);
-  return JDWP::ERR_NONE;
-}
-
-JDWP::JdwpError Dbg::SetLocalValues(JDWP::Request* request) {
-  JDWP::ObjectId thread_id = request->ReadThreadId();
-  JDWP::FrameId frame_id = request->ReadFrameId();
-
-  ScopedObjectAccessUnchecked soa(Thread::Current());
-  JDWP::JdwpError error;
-  Thread* thread = DecodeThread(soa, thread_id, &error);
-  if (error != JDWP::ERR_NONE) {
-    return error;
-  }
-  if (!IsSuspendedForDebugger(soa, thread)) {
-    return JDWP::ERR_THREAD_NOT_SUSPENDED;
-  }
-
-  return FindAndHandleNonNativeFrame(
-      thread,
-      frame_id,
-      [&](art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
-        // Writes the values into visitor's context.
-        int32_t slot_count = request->ReadSigned32("slot count");
-        for (int32_t i = 0; i < slot_count; ++i) {
-          uint32_t slot = request->ReadUnsigned32("slot");
-          JDWP::JdwpTag sigByte = request->ReadTag();
-          size_t width = Dbg::GetTagWidth(sigByte);
-          uint64_t value = request->ReadValue(width);
-
-          VLOG(jdwp) << "    --> slot " << slot << " " << sigByte << " " << value;
-          error = Dbg::SetLocalValue(thread, *stack_visitor, slot, sigByte, value, width);
-          if (error != JDWP::ERR_NONE) {
-            return error;
-          }
-        }
-        return JDWP::ERR_NONE;
-      });
-}
-
-template<typename T>
-static JDWP::JdwpError FailSetLocalValue(const StackVisitor& visitor, uint16_t vreg,
-                                         JDWP::JdwpTag tag, T value)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  LOG(ERROR) << "Failed to write " << tag << " local " << value
-             << " (0x" << std::hex << value << ") into register v" << vreg
-             << GetStackContextAsString(visitor);
-  return kStackFrameLocalAccessError;
-}
-
-JDWP::JdwpError Dbg::SetLocalValue(Thread* thread, StackVisitor& visitor, int slot,
-                                   JDWP::JdwpTag tag, uint64_t value, size_t width) {
-  ArtMethod* m = visitor.GetMethod();
-  JDWP::JdwpError error = JDWP::ERR_NONE;
-  uint16_t vreg = DemangleSlot(slot, m, &error);
-  if (error != JDWP::ERR_NONE) {
-    return error;
-  }
-  // TODO: check that the tag is compatible with the actual type of the slot!
-  switch (tag) {
-    case JDWP::JT_BOOLEAN:
-    case JDWP::JT_BYTE:
-      CHECK_EQ(width, 1U);
-      if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kIntVReg)) {
-        return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
-      }
-      break;
-    case JDWP::JT_SHORT:
-    case JDWP::JT_CHAR:
-      CHECK_EQ(width, 2U);
-      if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kIntVReg)) {
-        return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
-      }
-      break;
-    case JDWP::JT_INT:
-      CHECK_EQ(width, 4U);
-      if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kIntVReg)) {
-        return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
-      }
-      break;
-    case JDWP::JT_FLOAT:
-      CHECK_EQ(width, 4U);
-      if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kFloatVReg)) {
-        return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
-      }
-      break;
-    case JDWP::JT_ARRAY:
-    case JDWP::JT_CLASS_LOADER:
-    case JDWP::JT_CLASS_OBJECT:
-    case JDWP::JT_OBJECT:
-    case JDWP::JT_STRING:
-    case JDWP::JT_THREAD:
-    case JDWP::JT_THREAD_GROUP: {
-      CHECK_EQ(width, sizeof(JDWP::ObjectId));
-      mirror::Object* o = gRegistry->Get<mirror::Object*>(static_cast<JDWP::ObjectId>(value),
-                                                          &error);
-      if (error != JDWP::ERR_NONE) {
-        VLOG(jdwp) << tag << " object " << o << " is an invalid object";
-        return JDWP::ERR_INVALID_OBJECT;
-      }
-      if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)),
-                                 kReferenceVReg)) {
-        return FailSetLocalValue(visitor, vreg, tag, reinterpret_cast<uintptr_t>(o));
-      }
-      break;
-    }
-    case JDWP::JT_DOUBLE: {
-      CHECK_EQ(width, 8U);
-      if (!visitor.SetVRegPair(m, vreg, value, kDoubleLoVReg, kDoubleHiVReg)) {
-        return FailSetLocalValue(visitor, vreg, tag, value);
-      }
-      break;
-    }
-    case JDWP::JT_LONG: {
-      CHECK_EQ(width, 8U);
-      if (!visitor.SetVRegPair(m, vreg, value, kLongLoVReg, kLongHiVReg)) {
-        return FailSetLocalValue(visitor, vreg, tag, value);
-      }
-      break;
-    }
-    default:
-      LOG(FATAL) << "Unknown tag " << tag;
-      UNREACHABLE();
-  }
-
-  // If we set the local variable in a compiled frame, we need to trigger a deoptimization of
-  // the stack so we continue execution with the interpreter using the new value(s) of the updated
-  // local variable(s). To achieve this, we install instrumentation exit stub on each method of the
-  // thread's stack. The stub will cause the deoptimization to happen.
-  if (!visitor.IsShadowFrame() && thread->HasDebuggerShadowFrames()) {
-    Runtime::Current()->GetInstrumentation()->InstrumentThreadStack(thread);
-  }
-
-  return JDWP::ERR_NONE;
-}
-
-static void SetEventLocation(JDWP::EventLocation* location, ArtMethod* m, uint32_t dex_pc)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  DCHECK(location != nullptr);
-  if (m == nullptr) {
-    memset(location, 0, sizeof(*location));
-  } else {
-    location->method = m->GetCanonicalMethod(kRuntimePointerSize);
-    location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint32_t>(-1) : dex_pc;
-  }
-}
-
-void Dbg::PostLocationEvent(ArtMethod* m, int dex_pc, mirror::Object* this_object,
-                            int event_flags, const JValue* return_value) {
-  if (!IsDebuggerActive()) {
-    return;
-  }
-  DCHECK(m != nullptr);
-  DCHECK_EQ(m->IsStatic(), this_object == nullptr);
-  JDWP::EventLocation location;
-  SetEventLocation(&location, m, dex_pc);
-
-  // We need to be sure no exception is pending when calling JdwpState::PostLocationEvent.
-  // This is required to be able to call JNI functions to create JDWP ids. To achieve this,
-  // we temporarily clear the current thread's exception (if any) and will restore it after
-  // the call.
-  // Note: the only way to get a pending exception here is to suspend on a move-exception
-  // instruction.
-  Thread* const self = Thread::Current();
-  StackHandleScope<1> hs(self);
-  Handle<mirror::Throwable> pending_exception(hs.NewHandle(self->GetException()));
-  self->ClearException();
-  if (kIsDebugBuild && pending_exception != nullptr) {
-    const Instruction& instr = location.method->DexInstructions().InstructionAt(location.dex_pc);
-    CHECK_EQ(Instruction::MOVE_EXCEPTION, instr.Opcode());
-  }
-
-  gJdwpState->PostLocationEvent(&location, this_object, event_flags, return_value);
-
-  if (pending_exception != nullptr) {
-    self->SetException(pending_exception.Get());
-  }
-}
-
-void Dbg::PostFieldAccessEvent(ArtMethod* m, int dex_pc,
-                               mirror::Object* this_object, ArtField* f) {
-  // TODO We should send events for native methods.
-  if (!IsDebuggerActive() || m->IsNative()) {
-    return;
-  }
-  DCHECK(m != nullptr);
-  DCHECK(f != nullptr);
-  JDWP::EventLocation location;
-  SetEventLocation(&location, m, dex_pc);
-
-  gJdwpState->PostFieldEvent(&location, f, this_object, nullptr, false);
-}
-
-void Dbg::PostFieldModificationEvent(ArtMethod* m, int dex_pc,
-                                     mirror::Object* this_object, ArtField* f,
-                                     const JValue* field_value) {
-  // TODO We should send events for native methods.
-  if (!IsDebuggerActive() || m->IsNative()) {
-    return;
-  }
-  DCHECK(m != nullptr);
-  DCHECK(f != nullptr);
-  DCHECK(field_value != nullptr);
-  JDWP::EventLocation location;
-  SetEventLocation(&location, m, dex_pc);
-
-  gJdwpState->PostFieldEvent(&location, f, this_object, field_value, true);
-}
-
-void Dbg::PostException(mirror::Throwable* exception_object) {
-  if (!IsDebuggerActive()) {
-    return;
-  }
-  Thread* const self = Thread::Current();
-  StackHandleScope<2> handle_scope(self);
-  Handle<mirror::Throwable> h_exception(handle_scope.NewHandle(exception_object));
-  MutableHandle<mirror::Object> this_at_throw = handle_scope.NewHandle<mirror::Object>(nullptr);
-  std::unique_ptr<Context> context(Context::Create());
-
-  ArtMethod* catch_method = nullptr;
-  ArtMethod* throw_method = nullptr;
-  uint32_t catch_dex_pc = dex::kDexNoIndex;
-  uint32_t throw_dex_pc = dex::kDexNoIndex;
-  StackVisitor::WalkStack(
-      /**
-       * Finds the location where this exception will be caught. We search until we reach the top
-       * frame, in which case this exception is considered uncaught.
-       */
-      [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
-        ArtMethod* method = stack_visitor->GetMethod();
-        DCHECK(method != nullptr);
-        if (method->IsRuntimeMethod()) {
-          // Ignore callee save method.
-          DCHECK(method->IsCalleeSaveMethod());
-          return true;
-        }
-
-        uint32_t dex_pc = stack_visitor->GetDexPc();
-        if (throw_method == nullptr) {
-          // First Java method found. It is either the method that threw the exception,
-          // or the Java native method that is reporting an exception thrown by
-          // native code.
-          this_at_throw.Assign(stack_visitor->GetThisObject());
-          throw_method = method;
-          throw_dex_pc = dex_pc;
-        }
-
-        if (dex_pc != dex::kDexNoIndex) {
-          StackHandleScope<1> hs(stack_visitor->GetThread());
-          uint32_t found_dex_pc;
-          Handle<mirror::Class> exception_class(hs.NewHandle(h_exception->GetClass()));
-          bool unused_clear_exception;
-          found_dex_pc = method->FindCatchBlock(exception_class, dex_pc, &unused_clear_exception);
-          if (found_dex_pc != dex::kDexNoIndex) {
-            catch_method = method;
-            catch_dex_pc = found_dex_pc;
-            return false;  // End stack walk.
-          }
-        }
-        return true;  // Continue stack walk.
-      },
-      self,
-      context.get(),
-      art::StackVisitor::StackWalkKind::kIncludeInlinedFrames);
-
-  JDWP::EventLocation exception_throw_location;
-  SetEventLocation(&exception_throw_location, throw_method, throw_dex_pc);
-  JDWP::EventLocation exception_catch_location;
-  SetEventLocation(&exception_catch_location, catch_method, catch_dex_pc);
-
-  gJdwpState->PostException(&exception_throw_location,
-                            h_exception.Get(),
-                            &exception_catch_location,
-                            this_at_throw.Get());
-}
-
-void Dbg::PostClassPrepare(mirror::Class* c) {
-  if (!IsDebuggerActive()) {
-    return;
-  }
-  gJdwpState->PostClassPrepare(c);
-}
-
-void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object,
-                         ArtMethod* m, uint32_t dex_pc,
-                         int event_flags, const JValue* return_value) {
-  if (!IsDebuggerActive() || dex_pc == static_cast<uint32_t>(-2) /* fake method exit */) {
-    return;
-  }
-
-  if (IsBreakpoint(m, dex_pc)) {
-    event_flags |= kBreakpoint;
-  }
-
-  // If the debugger is single-stepping one of our threads, check to
-  // see if we're that thread and we've reached a step point.
-  const SingleStepControl* single_step_control = thread->GetSingleStepControl();
-  if (single_step_control != nullptr) {
-    CHECK(!m->IsNative());
-    if (single_step_control->GetStepDepth() == JDWP::SD_INTO) {
-      // Step into method calls.  We break when the line number
-      // or method pointer changes.  If we're in SS_MIN mode, we
-      // always stop.
-      if (single_step_control->GetMethod() != m) {
-        event_flags |= kSingleStep;
-        VLOG(jdwp) << "SS new method";
-      } else if (single_step_control->GetStepSize() == JDWP::SS_MIN) {
-        event_flags |= kSingleStep;
-        VLOG(jdwp) << "SS new instruction";
-      } else if (single_step_control->ContainsDexPc(dex_pc)) {
-        event_flags |= kSingleStep;
-        VLOG(jdwp) << "SS new line";
-      }
-    } else if (single_step_control->GetStepDepth() == JDWP::SD_OVER) {
-      // Step over method calls.  We break when the line number is
-      // different and the frame depth is <= the original frame
-      // depth.  (We can't just compare on the method, because we
-      // might get unrolled past it by an exception, and it's tricky
-      // to identify recursion.)
-
-      int stack_depth = GetStackDepth(thread);
-
-      if (stack_depth < single_step_control->GetStackDepth()) {
-        // Popped up one or more frames, always trigger.
-        event_flags |= kSingleStep;
-        VLOG(jdwp) << "SS method pop";
-      } else if (stack_depth == single_step_control->GetStackDepth()) {
-        // Same depth, see if we moved.
-        if (single_step_control->GetStepSize() == JDWP::SS_MIN) {
-          event_flags |= kSingleStep;
-          VLOG(jdwp) << "SS new instruction";
-        } else if (single_step_control->ContainsDexPc(dex_pc)) {
-          event_flags |= kSingleStep;
-          VLOG(jdwp) << "SS new line";
-        }
-      }
-    } else {
-      CHECK_EQ(single_step_control->GetStepDepth(), JDWP::SD_OUT);
-      // Return from the current method.  We break when the frame
-      // depth pops up.
-
-      // This differs from the "method exit" break in that it stops
-      // with the PC at the next instruction in the returned-to
-      // function, rather than the end of the returning function.
-
-      int stack_depth = GetStackDepth(thread);
-      if (stack_depth < single_step_control->GetStackDepth()) {
-        event_flags |= kSingleStep;
-        VLOG(jdwp) << "SS method pop";
-      }
-    }
-  }
-
-  // If there's something interesting going on, see if it matches one
-  // of the debugger filters.
-  if (event_flags != 0) {
-    Dbg::PostLocationEvent(m, dex_pc, this_object, event_flags, return_value);
-  }
-}
-
-size_t* Dbg::GetReferenceCounterForEvent(uint32_t instrumentation_event) {
-  switch (instrumentation_event) {
-    case instrumentation::Instrumentation::kMethodEntered:
-      return &method_enter_event_ref_count_;
-    case instrumentation::Instrumentation::kMethodExited:
-      return &method_exit_event_ref_count_;
-    case instrumentation::Instrumentation::kDexPcMoved:
-      return &dex_pc_change_event_ref_count_;
-    case instrumentation::Instrumentation::kFieldRead:
-      return &field_read_event_ref_count_;
-    case instrumentation::Instrumentation::kFieldWritten:
-      return &field_write_event_ref_count_;
-    case instrumentation::Instrumentation::kExceptionThrown:
-      return &exception_catch_event_ref_count_;
-    default:
-      return nullptr;
-  }
-}
-
-// Process request while all mutator threads are suspended.
-void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) {
-  instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
-  switch (request.GetKind()) {
-    case DeoptimizationRequest::kNothing:
-      LOG(WARNING) << "Ignoring empty deoptimization request.";
-      break;
-    case DeoptimizationRequest::kRegisterForEvent:
-      VLOG(jdwp) << StringPrintf("Add debugger as listener for instrumentation event 0x%x",
-                                 request.InstrumentationEvent());
-      instrumentation->AddListener(&gDebugInstrumentationListener, request.InstrumentationEvent());
-      instrumentation_events_ |= request.InstrumentationEvent();
-      break;
-    case DeoptimizationRequest::kUnregisterForEvent:
-      VLOG(jdwp) << StringPrintf("Remove debugger as listener for instrumentation event 0x%x",
-                                 request.InstrumentationEvent());
-      instrumentation->RemoveListener(&gDebugInstrumentationListener,
-                                      request.InstrumentationEvent());
-      instrumentation_events_ &= ~request.InstrumentationEvent();
-      break;
-    case DeoptimizationRequest::kFullDeoptimization:
-      VLOG(jdwp) << "Deoptimize the world ...";
-      instrumentation->DeoptimizeEverything(kDbgInstrumentationKey);
-      VLOG(jdwp) << "Deoptimize the world DONE";
-      break;
-    case DeoptimizationRequest::kFullUndeoptimization:
-      VLOG(jdwp) << "Undeoptimize the world ...";
-      instrumentation->UndeoptimizeEverything(kDbgInstrumentationKey);
-      VLOG(jdwp) << "Undeoptimize the world DONE";
-      break;
-    case DeoptimizationRequest::kSelectiveDeoptimization:
-      VLOG(jdwp) << "Deoptimize method " << ArtMethod::PrettyMethod(request.Method()) << " ...";
-      instrumentation->Deoptimize(request.Method());
-      VLOG(jdwp) << "Deoptimize method " << ArtMethod::PrettyMethod(request.Method()) << " DONE";
-      break;
-    case DeoptimizationRequest::kSelectiveUndeoptimization:
-      VLOG(jdwp) << "Undeoptimize method " << ArtMethod::PrettyMethod(request.Method()) << " ...";
-      instrumentation->Undeoptimize(request.Method());
-      VLOG(jdwp) << "Undeoptimize method " << ArtMethod::PrettyMethod(request.Method()) << " DONE";
-      break;
-    default:
-      LOG(FATAL) << "Unsupported deoptimization request kind " << request.GetKind();
-      UNREACHABLE();
-  }
-}
-
-void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) {
-  if (req.GetKind() == DeoptimizationRequest::kNothing) {
-    // Nothing to do.
-    return;
-  }
-  MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
-  RequestDeoptimizationLocked(req);
-}
-
-void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) {
-  switch (req.GetKind()) {
-    case DeoptimizationRequest::kRegisterForEvent: {
-      DCHECK_NE(req.InstrumentationEvent(), 0u);
-      size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
-      CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
-                                                req.InstrumentationEvent());
-      if (*counter == 0) {
-        VLOG(jdwp) << StringPrintf("Queue request #%zd to start listening to instrumentation event 0x%x",
-                                   deoptimization_requests_.size(), req.InstrumentationEvent());
-        deoptimization_requests_.push_back(req);
-      }
-      *counter = *counter + 1;
-      break;
-    }
-    case DeoptimizationRequest::kUnregisterForEvent: {
-      DCHECK_NE(req.InstrumentationEvent(), 0u);
-      size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
-      CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
-                                                req.InstrumentationEvent());
-      *counter = *counter - 1;
-      if (*counter == 0) {
-        VLOG(jdwp) << StringPrintf("Queue request #%zd to stop listening to instrumentation event 0x%x",
-                                   deoptimization_requests_.size(), req.InstrumentationEvent());
-        deoptimization_requests_.push_back(req);
-      }
-      break;
-    }
-    case DeoptimizationRequest::kFullDeoptimization: {
-      DCHECK(req.Method() == nullptr);
-      if (full_deoptimization_event_count_ == 0) {
-        VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
-                   << " for full deoptimization";
-        deoptimization_requests_.push_back(req);
-      }
-      ++full_deoptimization_event_count_;
-      break;
-    }
-    case DeoptimizationRequest::kFullUndeoptimization: {
-      DCHECK(req.Method() == nullptr);
-      DCHECK_GT(full_deoptimization_event_count_, 0U);
-      --full_deoptimization_event_count_;
-      if (full_deoptimization_event_count_ == 0) {
-        VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
-                   << " for full undeoptimization";
-        deoptimization_requests_.push_back(req);
-      }
-      break;
-    }
-    case DeoptimizationRequest::kSelectiveDeoptimization: {
-      DCHECK(req.Method() != nullptr);
-      VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
-                 << " for deoptimization of " << req.Method()->PrettyMethod();
-      deoptimization_requests_.push_back(req);
-      break;
-    }
-    case DeoptimizationRequest::kSelectiveUndeoptimization: {
-      DCHECK(req.Method() != nullptr);
-      VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
-                 << " for undeoptimization of " << req.Method()->PrettyMethod();
-      deoptimization_requests_.push_back(req);
-      break;
-    }
-    default: {
-      LOG(FATAL) << "Unknown deoptimization request kind " << req.GetKind();
-      UNREACHABLE();
-    }
-  }
-}
-
-void Dbg::ManageDeoptimization() {
-  Thread* const self = Thread::Current();
-  {
-    // Avoid suspend/resume if there is no pending request.
-    MutexLock mu(self, *Locks::deoptimization_lock_);
-    if (deoptimization_requests_.empty()) {
-      return;
-    }
-  }
-  CHECK_EQ(self->GetState(), kRunnable);
-  ScopedThreadSuspension sts(self, kWaitingForDeoptimization);
-  // Required for ProcessDeoptimizationRequest.
-  gc::ScopedGCCriticalSection gcs(self,
-                                  gc::kGcCauseInstrumentation,
-                                  gc::kCollectorTypeInstrumentation);
-  // We need to suspend mutator threads first.
-  ScopedSuspendAll ssa(__FUNCTION__);
-  const ThreadState old_state = self->SetStateUnsafe(kRunnable);
-  {
-    MutexLock mu(self, *Locks::deoptimization_lock_);
-    size_t req_index = 0;
-    for (DeoptimizationRequest& request : deoptimization_requests_) {
-      VLOG(jdwp) << "Process deoptimization request #" << req_index++;
-      ProcessDeoptimizationRequest(request);
-    }
-    deoptimization_requests_.clear();
-  }
-  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
-}
-
-static const Breakpoint* FindFirstBreakpointForMethod(ArtMethod* m)
-    REQUIRES_SHARED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
-  for (Breakpoint& breakpoint : gBreakpoints) {
-    if (breakpoint.IsInMethod(m)) {
-      return &breakpoint;
-    }
-  }
-  return nullptr;
-}
-
-bool Dbg::MethodHasAnyBreakpoints(ArtMethod* method) {
-  ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
-  return FindFirstBreakpointForMethod(method) != nullptr;
-}
-
-// Sanity checks all existing breakpoints on the same method.
-static void SanityCheckExistingBreakpoints(ArtMethod* m,
-                                           DeoptimizationRequest::Kind deoptimization_kind)
-    REQUIRES_SHARED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
-  for (const Breakpoint& breakpoint : gBreakpoints) {
-    if (breakpoint.IsInMethod(m)) {
-      CHECK_EQ(deoptimization_kind, breakpoint.GetDeoptimizationKind());
-    }
-  }
-  instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
-  if (deoptimization_kind == DeoptimizationRequest::kFullDeoptimization) {
-    // We should have deoptimized everything but not "selectively" deoptimized this method.
-    CHECK(instrumentation->AreAllMethodsDeoptimized());
-    CHECK(!instrumentation->IsDeoptimized(m));
-  } else if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
-    // We should have "selectively" deoptimized this method.
-    // Note: while we have not deoptimized everything for this method, we may have done it for
-    // another event.
-    CHECK(instrumentation->IsDeoptimized(m));
-  } else {
-    // This method does not require deoptimization.
-    CHECK_EQ(deoptimization_kind, DeoptimizationRequest::kNothing);
-    CHECK(!instrumentation->IsDeoptimized(m));
-  }
-}
-
-// Returns the deoptimization kind required to set a breakpoint in a method.
-// If a breakpoint has already been set, we also return the first breakpoint
-// through the given 'existing_brkpt' pointer.
-static DeoptimizationRequest::Kind GetRequiredDeoptimizationKind(Thread* self,
-                                                                 ArtMethod* m,
-                                                                 const Breakpoint** existing_brkpt)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  if (!Dbg::RequiresDeoptimization()) {
-    // We already run in interpreter-only mode so we don't need to deoptimize anything.
-    VLOG(jdwp) << "No need for deoptimization when fully running with interpreter for method "
-               << ArtMethod::PrettyMethod(m);
-    return DeoptimizationRequest::kNothing;
-  }
-  const Breakpoint* first_breakpoint;
-  {
-    ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
-    first_breakpoint = FindFirstBreakpointForMethod(m);
-    *existing_brkpt = first_breakpoint;
-  }
-
-  if (first_breakpoint == nullptr) {
-    // There is no breakpoint on this method yet: we need to deoptimize. If this method is default,
-    // we deoptimize everything; otherwise we deoptimize only this method. We
-    // deoptimize with defaults because we do not know everywhere they are used. It is possible some
-    // of the copies could be missed.
-    // TODO Deoptimizing on default methods might not be necessary in all cases.
-    bool need_full_deoptimization = m->IsDefault();
-    if (need_full_deoptimization) {
-      VLOG(jdwp) << "Need full deoptimization because of copying of method "
-                 << ArtMethod::PrettyMethod(m);
-      return DeoptimizationRequest::kFullDeoptimization;
-    } else {
-      // We don't need to deoptimize if the method has not been compiled.
-      const bool is_compiled = m->HasAnyCompiledCode();
-      if (is_compiled) {
-        VLOG(jdwp) << "Need selective deoptimization for compiled method "
-                   << ArtMethod::PrettyMethod(m);
-        return DeoptimizationRequest::kSelectiveDeoptimization;
-      } else {
-        // Method is not compiled: we don't need to deoptimize.
-        VLOG(jdwp) << "No need for deoptimization for non-compiled method "
-                   << ArtMethod::PrettyMethod(m);
-        return DeoptimizationRequest::kNothing;
-      }
-    }
-  } else {
-    // There is at least one breakpoint for this method: we don't need to deoptimize.
-    // Let's check that all breakpoints are configured the same way for deoptimization.
-    VLOG(jdwp) << "Breakpoint already set: no deoptimization is required";
-    DeoptimizationRequest::Kind deoptimization_kind = first_breakpoint->GetDeoptimizationKind();
-    if (kIsDebugBuild) {
-      ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
-      SanityCheckExistingBreakpoints(m, deoptimization_kind);
-    }
-    return DeoptimizationRequest::kNothing;
-  }
-}
-
-// Installs a breakpoint at the specified location. Also indicates through the deoptimization
-// request if we need to deoptimize.
-void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
-  Thread* const self = Thread::Current();
-  ArtMethod* m = FromMethodId(location->method_id);
-  DCHECK(m != nullptr) << "No method for method id " << location->method_id;
-
-  const Breakpoint* existing_breakpoint = nullptr;
-  const DeoptimizationRequest::Kind deoptimization_kind =
-      GetRequiredDeoptimizationKind(self, m, &existing_breakpoint);
-  req->SetKind(deoptimization_kind);
-  if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
-    req->SetMethod(m);
-  } else {
-    CHECK(deoptimization_kind == DeoptimizationRequest::kNothing ||
-          deoptimization_kind == DeoptimizationRequest::kFullDeoptimization);
-    req->SetMethod(nullptr);
-  }
-
-  {
-    WriterMutexLock mu(self, *Locks::breakpoint_lock_);
-    // If there is at least one existing breakpoint on the same method, the new breakpoint
-    // must have the same deoptimization kind than the existing breakpoint(s).
-    DeoptimizationRequest::Kind breakpoint_deoptimization_kind;
-    if (existing_breakpoint != nullptr) {
-      breakpoint_deoptimization_kind = existing_breakpoint->GetDeoptimizationKind();
-    } else {
-      breakpoint_deoptimization_kind = deoptimization_kind;
-    }
-    gBreakpoints.push_back(Breakpoint(m, location->dex_pc, breakpoint_deoptimization_kind));
-    VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": "
-               << gBreakpoints[gBreakpoints.size() - 1];
-  }
-}
-
-// Uninstalls a breakpoint at the specified location. Also indicates through the deoptimization
-// request if we need to undeoptimize.
-void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
-  WriterMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
-  ArtMethod* m = FromMethodId(location->method_id);
-  DCHECK(m != nullptr) << "No method for method id " << location->method_id;
-  DeoptimizationRequest::Kind deoptimization_kind = DeoptimizationRequest::kNothing;
-  for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
-    if (gBreakpoints[i].DexPc() == location->dex_pc && gBreakpoints[i].IsInMethod(m)) {
-      VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i];
-      deoptimization_kind = gBreakpoints[i].GetDeoptimizationKind();
-      DCHECK_EQ(deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization,
-                Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
-      gBreakpoints.erase(gBreakpoints.begin() + i);
-      break;
-    }
-  }
-  const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
-  if (existing_breakpoint == nullptr) {
-    // There is no more breakpoint on this method: we need to undeoptimize.
-    if (deoptimization_kind == DeoptimizationRequest::kFullDeoptimization) {
-      // This method required full deoptimization: we need to undeoptimize everything.
-      req->SetKind(DeoptimizationRequest::kFullUndeoptimization);
-      req->SetMethod(nullptr);
-    } else if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
-      // This method required selective deoptimization: we need to undeoptimize only that method.
-      req->SetKind(DeoptimizationRequest::kSelectiveUndeoptimization);
-      req->SetMethod(m);
-    } else {
-      // This method had no need for deoptimization: do nothing.
-      CHECK_EQ(deoptimization_kind, DeoptimizationRequest::kNothing);
-      req->SetKind(DeoptimizationRequest::kNothing);
-      req->SetMethod(nullptr);
-    }
-  } else {
-    // There is at least one breakpoint for this method: we don't need to undeoptimize.
-    req->SetKind(DeoptimizationRequest::kNothing);
-    req->SetMethod(nullptr);
-    if (kIsDebugBuild) {
-      SanityCheckExistingBreakpoints(m, deoptimization_kind);
-    }
-  }
-}
-
-bool Dbg::IsForcedInterpreterNeededForCallingImpl(Thread* thread, ArtMethod* m) {
-  const SingleStepControl* const ssc = thread->GetSingleStepControl();
-  if (ssc == nullptr) {
-    // If we are not single-stepping, then we don't have to force interpreter.
-    return false;
-  }
-  if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) {
-    // If we are in interpreter only mode, then we don't have to force interpreter.
-    return false;
-  }
-
-  if (!m->IsNative() && !m->IsProxyMethod()) {
-    // If we want to step into a method, then we have to force interpreter on that call.
-    if (ssc->GetStepDepth() == JDWP::SD_INTO) {
-      return true;
-    }
-  }
-  return false;
-}
-
-bool Dbg::IsForcedInterpreterNeededForResolutionImpl(Thread* thread, ArtMethod* m) {
-  instrumentation::Instrumentation* const instrumentation =
-      Runtime::Current()->GetInstrumentation();
-  // If we are in interpreter only mode, then we don't have to force interpreter.
-  if (instrumentation->InterpretOnly()) {
-    return false;
-  }
-  // We can only interpret pure Java method.
-  if (m->IsNative() || m->IsProxyMethod()) {
-    return false;
-  }
-  const SingleStepControl* const ssc = thread->GetSingleStepControl();
-  if (ssc != nullptr) {
-    // If we want to step into a method, then we have to force interpreter on that call.
-    if (ssc->GetStepDepth() == JDWP::SD_INTO) {
-      return true;
-    }
-    // If we are stepping out from a static initializer, by issuing a step
-    // in or step over, that was implicitly invoked by calling a static method,
-    // then we need to step into that method. Having a lower stack depth than
-    // the one the single step control has indicates that the step originates
-    // from the static initializer.
-    if (ssc->GetStepDepth() != JDWP::SD_OUT &&
-        ssc->GetStackDepth() > GetStackDepth(thread)) {
-      return true;
-    }
-  }
-  // There are cases where we have to force interpreter on deoptimized methods,
-  // because in some cases the call will not be performed by invoking an entry
-  // point that has been replaced by the deoptimization, but instead by directly
-  // invoking the compiled code of the method, for example.
-  return instrumentation->IsDeoptimized(m);
-}
-
-bool Dbg::IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, ArtMethod* m) {
-  // The upcall can be null and in that case we don't need to do anything.
-  if (m == nullptr) {
-    return false;
-  }
-  instrumentation::Instrumentation* const instrumentation =
-      Runtime::Current()->GetInstrumentation();
-  // If we are in interpreter only mode, then we don't have to force interpreter.
-  if (instrumentation->InterpretOnly()) {
-    return false;
-  }
-  // We can only interpret pure Java method.
-  if (m->IsNative() || m->IsProxyMethod()) {
-    return false;
-  }
-  const SingleStepControl* const ssc = thread->GetSingleStepControl();
-  if (ssc != nullptr) {
-    // If we are stepping out from a static initializer, by issuing a step
-    // out, that was implicitly invoked by calling a static method, then we
-    // need to step into the caller of that method. Having a lower stack
-    // depth than the one the single step control has indicates that the
-    // step originates from the static initializer.
-    if (ssc->GetStepDepth() == JDWP::SD_OUT &&
-        ssc->GetStackDepth() > GetStackDepth(thread)) {
-      return true;
-    }
-  }
-  // If we are returning from a static intializer, that was implicitly
-  // invoked by calling a static method and the caller is deoptimized,
-  // then we have to deoptimize the stack without forcing interpreter
-  // on the static method that was called originally. This problem can
-  // be solved easily by forcing instrumentation on the called method,
-  // because the instrumentation exit hook will recognise the need of
-  // stack deoptimization by calling IsForcedInterpreterNeededForUpcall.
-  return instrumentation->IsDeoptimized(m);
-}
-
-bool Dbg::IsForcedInterpreterNeededForUpcallImpl(Thread* thread, ArtMethod* m) {
-  // The upcall can be null and in that case we don't need to do anything.
-  if (m == nullptr) {
-    return false;
-  }
-  instrumentation::Instrumentation* const instrumentation =
-      Runtime::Current()->GetInstrumentation();
-  // If we are in interpreter only mode, then we don't have to force interpreter.
-  if (instrumentation->InterpretOnly()) {
-    return false;
-  }
-  // We can only interpret pure Java method.
-  if (m->IsNative() || m->IsProxyMethod()) {
-    return false;
-  }
-  const SingleStepControl* const ssc = thread->GetSingleStepControl();
-  if (ssc != nullptr) {
-    // The debugger is not interested in what is happening under the level
-    // of the step, thus we only force interpreter when we are not below of
-    // the step.
-    if (ssc->GetStackDepth() >= GetStackDepth(thread)) {
-      return true;
-    }
-  }
-  if (thread->HasDebuggerShadowFrames()) {
-    // We need to deoptimize the stack for the exception handling flow so that
-    // we don't miss any deoptimization that should be done when there are
-    // debugger shadow frames.
-    return true;
-  }
-  // We have to require stack deoptimization if the upcall is deoptimized.
-  return instrumentation->IsDeoptimized(m);
-}
-
 // Do we need to deoptimize the stack to handle an exception?
 bool Dbg::IsForcedInterpreterNeededForExceptionImpl(Thread* thread) {
-  const SingleStepControl* const ssc = thread->GetSingleStepControl();
-  if (ssc != nullptr) {
-    // We deopt to step into the catch handler.
-    return true;
-  }
   // Deoptimization is required if at least one method in the stack needs it. However we
   // skip frames that will be unwound (thus not executed).
   bool needs_deoptimization = false;
@@ -3627,569 +181,6 @@
   return needs_deoptimization;
 }
 
-// Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't
-// cause suspension if the thread is the current thread.
-class ScopedDebuggerThreadSuspension {
- public:
-  ScopedDebuggerThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
-      REQUIRES(!Locks::thread_list_lock_)
-      REQUIRES_SHARED(Locks::mutator_lock_) :
-      thread_(nullptr),
-      error_(JDWP::ERR_NONE),
-      self_suspend_(false),
-      other_suspend_(false) {
-    ScopedObjectAccessUnchecked soa(self);
-    thread_ = DecodeThread(soa, thread_id, &error_);
-    if (error_ == JDWP::ERR_NONE) {
-      if (thread_ == soa.Self()) {
-        self_suspend_ = true;
-      } else {
-        Thread* suspended_thread;
-        {
-          ScopedThreadSuspension sts(self, kWaitingForDebuggerSuspension);
-          jobject thread_peer = Dbg::GetObjectRegistry()->GetJObject(thread_id);
-          bool timed_out;
-          ThreadList* const thread_list = Runtime::Current()->GetThreadList();
-          suspended_thread = thread_list->SuspendThreadByPeer(thread_peer,
-                                                              /* request_suspension= */ true,
-                                                              SuspendReason::kForDebugger,
-                                                              &timed_out);
-        }
-        if (suspended_thread == nullptr) {
-          // Thread terminated from under us while suspending.
-          error_ = JDWP::ERR_INVALID_THREAD;
-        } else {
-          CHECK_EQ(suspended_thread, thread_);
-          other_suspend_ = true;
-        }
-      }
-    }
-  }
-
-  Thread* GetThread() const {
-    return thread_;
-  }
-
-  JDWP::JdwpError GetError() const {
-    return error_;
-  }
-
-  ~ScopedDebuggerThreadSuspension() {
-    if (other_suspend_) {
-      bool resumed = Runtime::Current()->GetThreadList()->Resume(thread_,
-                                                                 SuspendReason::kForDebugger);
-      DCHECK(resumed);
-    }
-  }
-
- private:
-  Thread* thread_;
-  JDWP::JdwpError error_;
-  bool self_suspend_;
-  bool other_suspend_;
-};
-
-JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize step_size,
-                                   JDWP::JdwpStepDepth step_depth) {
-  Thread* self = Thread::Current();
-  ScopedDebuggerThreadSuspension sts(self, thread_id);
-  if (sts.GetError() != JDWP::ERR_NONE) {
-    return sts.GetError();
-  }
-
-  // Work out what ArtMethod* we're in, the current line number, and how deep the stack currently
-  // is for step-out.
-  struct SingleStepStackVisitor : public StackVisitor {
-    explicit SingleStepStackVisitor(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
-        : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
-          stack_depth(0),
-          method(nullptr),
-          line_number(-1) {}
-
-    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
-    // annotalysis.
-    bool VisitFrame() override NO_THREAD_SAFETY_ANALYSIS {
-      ArtMethod* m = GetMethod();
-      if (!m->IsRuntimeMethod()) {
-        ++stack_depth;
-        if (method == nullptr) {
-          const DexFile* dex_file = m->GetDexFile();
-          method = m;
-          if (dex_file != nullptr) {
-            line_number = annotations::GetLineNumFromPC(dex_file, m, GetDexPc());
-          }
-        }
-      }
-      return true;
-    }
-
-    int stack_depth;
-    ArtMethod* method;
-    int32_t line_number;
-  };
-
-  Thread* const thread = sts.GetThread();
-  SingleStepStackVisitor visitor(thread);
-  visitor.WalkStack();
-
-  // Allocate single step.
-  SingleStepControl* single_step_control =
-      new (std::nothrow) SingleStepControl(step_size, step_depth,
-                                           visitor.stack_depth, visitor.method);
-  if (single_step_control == nullptr) {
-    LOG(ERROR) << "Failed to allocate SingleStepControl";
-    return JDWP::ERR_OUT_OF_MEMORY;
-  }
-
-  ArtMethod* m = single_step_control->GetMethod();
-  const int32_t line_number = visitor.line_number;
-  // Note: if the thread is not running Java code (pure native thread), there is no "current"
-  // method on the stack (and no line number either).
-  if (m != nullptr && !m->IsNative()) {
-    CodeItemDebugInfoAccessor accessor(m->DexInstructionDebugInfo());
-    bool last_pc_valid = false;
-    uint32_t last_pc = 0u;
-    // Find the dex_pc values that correspond to the current line, for line-based single-stepping.
-    accessor.DecodeDebugPositionInfo([&](const DexFile::PositionInfo& entry) {
-      if (static_cast<int32_t>(entry.line_) == line_number) {
-        if (!last_pc_valid) {
-          // Everything from this address until the next line change is ours.
-          last_pc = entry.address_;
-          last_pc_valid = true;
-        }
-        // Otherwise, if we're already in a valid range for this line,
-        // just keep going (shouldn't really happen)...
-      } else if (last_pc_valid) {  // and the line number is new
-        // Add everything from the last entry up until here to the set
-        for (uint32_t dex_pc = last_pc; dex_pc < entry.address_; ++dex_pc) {
-          single_step_control->AddDexPc(dex_pc);
-        }
-        last_pc_valid = false;
-      }
-      return false;  // There may be multiple entries for any given line.
-    });
-    // If the line number was the last in the position table...
-    if (last_pc_valid) {
-      for (uint32_t dex_pc = last_pc; dex_pc < accessor.InsnsSizeInCodeUnits(); ++dex_pc) {
-        single_step_control->AddDexPc(dex_pc);
-      }
-    }
-  }
-
-  // Activate single-step in the thread.
-  thread->ActivateSingleStepControl(single_step_control);
-
-  if (VLOG_IS_ON(jdwp)) {
-    VLOG(jdwp) << "Single-step thread: " << *thread;
-    VLOG(jdwp) << "Single-step step size: " << single_step_control->GetStepSize();
-    VLOG(jdwp) << "Single-step step depth: " << single_step_control->GetStepDepth();
-    VLOG(jdwp) << "Single-step current method: "
-               << ArtMethod::PrettyMethod(single_step_control->GetMethod());
-    VLOG(jdwp) << "Single-step current line: " << line_number;
-    VLOG(jdwp) << "Single-step current stack depth: " << single_step_control->GetStackDepth();
-    VLOG(jdwp) << "Single-step dex_pc values:";
-    for (uint32_t dex_pc : single_step_control->GetDexPcs()) {
-      VLOG(jdwp) << StringPrintf(" %#x", dex_pc);
-    }
-  }
-
-  return JDWP::ERR_NONE;
-}
-
-void Dbg::UnconfigureStep(JDWP::ObjectId thread_id) {
-  ScopedObjectAccessUnchecked soa(Thread::Current());
-  JDWP::JdwpError error;
-  Thread* thread = DecodeThread(soa, thread_id, &error);
-  if (error == JDWP::ERR_NONE) {
-    thread->DeactivateSingleStepControl();
-  }
-}
-
-static char JdwpTagToShortyChar(JDWP::JdwpTag tag) {
-  switch (tag) {
-    default:
-      LOG(FATAL) << "unknown JDWP tag: " << PrintableChar(tag);
-      UNREACHABLE();
-
-    // Primitives.
-    case JDWP::JT_BYTE:    return 'B';
-    case JDWP::JT_CHAR:    return 'C';
-    case JDWP::JT_FLOAT:   return 'F';
-    case JDWP::JT_DOUBLE:  return 'D';
-    case JDWP::JT_INT:     return 'I';
-    case JDWP::JT_LONG:    return 'J';
-    case JDWP::JT_SHORT:   return 'S';
-    case JDWP::JT_VOID:    return 'V';
-    case JDWP::JT_BOOLEAN: return 'Z';
-
-    // Reference types.
-    case JDWP::JT_ARRAY:
-    case JDWP::JT_OBJECT:
-    case JDWP::JT_STRING:
-    case JDWP::JT_THREAD:
-    case JDWP::JT_THREAD_GROUP:
-    case JDWP::JT_CLASS_LOADER:
-    case JDWP::JT_CLASS_OBJECT:
-      return 'L';
-  }
-}
-
-JDWP::JdwpError Dbg::PrepareInvokeMethod(uint32_t request_id, JDWP::ObjectId thread_id,
-                                         JDWP::ObjectId object_id, JDWP::RefTypeId class_id,
-                                         JDWP::MethodId method_id, uint32_t arg_count,
-                                         uint64_t arg_values[], JDWP::JdwpTag* arg_types,
-                                         uint32_t options) {
-  Thread* const self = Thread::Current();
-  CHECK_EQ(self, GetDebugThread()) << "This must be called by the JDWP thread";
-  const bool resume_all_threads = ((options & JDWP::INVOKE_SINGLE_THREADED) == 0);
-
-  ThreadList* thread_list = Runtime::Current()->GetThreadList();
-  Thread* targetThread = nullptr;
-  {
-    ScopedObjectAccessUnchecked soa(self);
-    JDWP::JdwpError error;
-    targetThread = DecodeThread(soa, thread_id, &error);
-    if (error != JDWP::ERR_NONE) {
-      LOG(ERROR) << "InvokeMethod request for invalid thread id " << thread_id;
-      return error;
-    }
-    if (targetThread->GetInvokeReq() != nullptr) {
-      // Thread is already invoking a method on behalf of the debugger.
-      LOG(ERROR) << "InvokeMethod request for thread already invoking a method: " << *targetThread;
-      return JDWP::ERR_ALREADY_INVOKING;
-    }
-    if (!targetThread->IsReadyForDebugInvoke()) {
-      // Thread is not suspended by an event so it cannot invoke a method.
-      LOG(ERROR) << "InvokeMethod request for thread not stopped by event: " << *targetThread;
-      return JDWP::ERR_INVALID_THREAD;
-    }
-
-    /*
-     * According to the JDWP specs, we are expected to resume all threads (or only the
-     * target thread) once. So if a thread has been suspended more than once (either by
-     * the debugger for an event or by the runtime for GC), it will remain suspended before
-     * the invoke is executed. This means the debugger is responsible to properly resume all
-     * the threads it has suspended so the target thread can execute the method.
-     *
-     * However, for compatibility reason with older versions of debuggers (like Eclipse), we
-     * fully resume all threads (by canceling *all* debugger suspensions) when the debugger
-     * wants us to resume all threads. This is to avoid ending up in deadlock situation.
-     *
-     * On the other hand, if we are asked to only resume the target thread, then we follow the
-     * JDWP specs by resuming that thread only once. This means the thread will remain suspended
-     * if it has been suspended more than once before the invoke (and again, this is the
-     * responsibility of the debugger to properly resume that thread before invoking a method).
-     */
-    int suspend_count;
-    {
-      MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
-      suspend_count = targetThread->GetSuspendCount();
-    }
-    if (suspend_count > 1 && resume_all_threads) {
-      // The target thread will remain suspended even after we resume it. Let's emit a warning
-      // to indicate the invoke won't be executed until the thread is resumed.
-      LOG(WARNING) << *targetThread << " suspended more than once (suspend count == "
-                   << suspend_count << "). This thread will invoke the method only once "
-                   << "it is fully resumed.";
-    }
-
-    ObjPtr<mirror::Object> receiver = gRegistry->Get<mirror::Object*>(object_id, &error);
-    if (error != JDWP::ERR_NONE) {
-      return JDWP::ERR_INVALID_OBJECT;
-    }
-
-    gRegistry->Get<mirror::Object*>(thread_id, &error);
-    if (error != JDWP::ERR_NONE) {
-      return JDWP::ERR_INVALID_OBJECT;
-    }
-
-    ObjPtr<mirror::Class> c = DecodeClass(class_id, &error);
-    if (c == nullptr) {
-      return error;
-    }
-
-    ArtMethod* m = FromMethodId(method_id);
-    if (m->IsStatic() != (receiver == nullptr)) {
-      return JDWP::ERR_INVALID_METHODID;
-    }
-    if (m->IsStatic()) {
-      if (m->GetDeclaringClass() != c) {
-        return JDWP::ERR_INVALID_METHODID;
-      }
-    } else {
-      if (!m->GetDeclaringClass()->IsAssignableFrom(c)) {
-        return JDWP::ERR_INVALID_METHODID;
-      }
-    }
-
-    // Check the argument list matches the method.
-    uint32_t shorty_len = 0;
-    const char* shorty = m->GetShorty(&shorty_len);
-    if (shorty_len - 1 != arg_count) {
-      return JDWP::ERR_ILLEGAL_ARGUMENT;
-    }
-
-    {
-      StackHandleScope<2> hs(soa.Self());
-      HandleWrapperObjPtr<mirror::Object> h_obj(hs.NewHandleWrapper(&receiver));
-      HandleWrapperObjPtr<mirror::Class> h_klass(hs.NewHandleWrapper(&c));
-      const dex::TypeList* types = m->GetParameterTypeList();
-      for (size_t i = 0; i < arg_count; ++i) {
-        if (shorty[i + 1] != JdwpTagToShortyChar(arg_types[i])) {
-          return JDWP::ERR_ILLEGAL_ARGUMENT;
-        }
-
-        if (shorty[i + 1] == 'L') {
-          // Did we really get an argument of an appropriate reference type?
-          ObjPtr<mirror::Class> parameter_type =
-              m->ResolveClassFromTypeIndex(types->GetTypeItem(i).type_idx_);
-          mirror::Object* argument = gRegistry->Get<mirror::Object*>(arg_values[i], &error);
-          if (error != JDWP::ERR_NONE) {
-            return JDWP::ERR_INVALID_OBJECT;
-          }
-          if (argument != nullptr && !argument->InstanceOf(parameter_type)) {
-            return JDWP::ERR_ILLEGAL_ARGUMENT;
-          }
-
-          // Turn the on-the-wire ObjectId into a jobject.
-          jvalue& v = reinterpret_cast<jvalue&>(arg_values[i]);
-          v.l = gRegistry->GetJObject(arg_values[i]);
-        }
-      }
-    }
-
-    // Allocates a DebugInvokeReq.
-    DebugInvokeReq* req = new (std::nothrow) DebugInvokeReq(
-        request_id, thread_id, receiver, c, m, options, arg_values, arg_count);
-    if (req == nullptr) {
-      LOG(ERROR) << "Failed to allocate DebugInvokeReq";
-      return JDWP::ERR_OUT_OF_MEMORY;
-    }
-
-    // Attaches the DebugInvokeReq to the target thread so it executes the method when
-    // it is resumed. Once the invocation completes, the target thread will delete it before
-    // suspending itself (see ThreadList::SuspendSelfForDebugger).
-    targetThread->SetDebugInvokeReq(req);
-  }
-
-  // The fact that we've released the thread list lock is a bit risky --- if the thread goes
-  // away we're sitting high and dry -- but we must release this before the UndoDebuggerSuspensions
-  // call.
-  if (resume_all_threads) {
-    VLOG(jdwp) << "      Resuming all threads";
-    thread_list->UndoDebuggerSuspensions();
-  } else {
-    VLOG(jdwp) << "      Resuming event thread only";
-    bool resumed = thread_list->Resume(targetThread, SuspendReason::kForDebugger);
-    DCHECK(resumed);
-  }
-
-  return JDWP::ERR_NONE;
-}
-
-void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
-  Thread* const self = Thread::Current();
-  CHECK_NE(self, GetDebugThread()) << "This must be called by the event thread";
-
-  ScopedObjectAccess soa(self);
-
-  // We can be called while an exception is pending. We need
-  // to preserve that across the method invocation.
-  StackHandleScope<1> hs(soa.Self());
-  Handle<mirror::Throwable> old_exception = hs.NewHandle(soa.Self()->GetException());
-  soa.Self()->ClearException();
-
-  // Execute the method then sends reply to the debugger.
-  ExecuteMethodWithoutPendingException(soa, pReq);
-
-  // If an exception was pending before the invoke, restore it now.
-  if (old_exception != nullptr) {
-    soa.Self()->SetException(old_exception.Get());
-  }
-}
-
-// Helper function: write a variable-width value into the output input buffer.
-static void WriteValue(JDWP::ExpandBuf* pReply, int width, uint64_t value) {
-  switch (width) {
-    case 1:
-      expandBufAdd1(pReply, value);
-      break;
-    case 2:
-      expandBufAdd2BE(pReply, value);
-      break;
-    case 4:
-      expandBufAdd4BE(pReply, value);
-      break;
-    case 8:
-      expandBufAdd8BE(pReply, value);
-      break;
-    default:
-      LOG(FATAL) << width;
-      UNREACHABLE();
-  }
-}
-
-void Dbg::ExecuteMethodWithoutPendingException(ScopedObjectAccess& soa, DebugInvokeReq* pReq) {
-  soa.Self()->AssertNoPendingException();
-
-  // Translate the method through the vtable, unless the debugger wants to suppress it.
-  ArtMethod* m = pReq->method;
-  PointerSize image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
-  if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver.Read() != nullptr) {
-    ArtMethod* actual_method =
-        pReq->klass.Read()->FindVirtualMethodForVirtualOrInterface(m, image_pointer_size);
-    if (actual_method != m) {
-      VLOG(jdwp) << "ExecuteMethod translated " << ArtMethod::PrettyMethod(m)
-                 << " to " << ArtMethod::PrettyMethod(actual_method);
-      m = actual_method;
-    }
-  }
-  VLOG(jdwp) << "ExecuteMethod " << ArtMethod::PrettyMethod(m)
-             << " receiver=" << pReq->receiver.Read()
-             << " arg_count=" << pReq->arg_count;
-  CHECK(m != nullptr);
-
-  static_assert(sizeof(jvalue) == sizeof(uint64_t), "jvalue and uint64_t have different sizes.");
-
-  // Invoke the method.
-  ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(pReq->receiver.Read()));
-  JValue result = InvokeWithJValues(soa, ref.get(), jni::EncodeArtMethod(m),
-                                    reinterpret_cast<jvalue*>(pReq->arg_values.get()));
-
-  // Prepare JDWP ids for the reply.
-  JDWP::JdwpTag result_tag = BasicTagFromDescriptor(m->GetShorty());
-  const bool is_object_result = (result_tag == JDWP::JT_OBJECT);
-  StackHandleScope<3> hs(soa.Self());
-  Handle<mirror::Object> object_result = hs.NewHandle(is_object_result ? result.GetL() : nullptr);
-  Handle<mirror::Throwable> exception = hs.NewHandle(soa.Self()->GetException());
-  soa.Self()->ClearException();
-
-  if (!IsDebuggerActive()) {
-    // The debugger detached: we must not re-suspend threads. We also don't need to fill the reply
-    // because it won't be sent either.
-    return;
-  }
-
-  JDWP::ObjectId exceptionObjectId = gRegistry->Add(exception);
-  uint64_t result_value = 0;
-  if (exceptionObjectId != 0) {
-    VLOG(jdwp) << "  JDWP invocation returning with exception=" << exception.Get()
-               << " " << exception->Dump();
-    result_value = 0;
-  } else if (is_object_result) {
-    /* if no exception was thrown, examine object result more closely */
-    JDWP::JdwpTag new_tag = TagFromObject(soa, object_result.Get());
-    if (new_tag != result_tag) {
-      VLOG(jdwp) << "  JDWP promoted result from " << result_tag << " to " << new_tag;
-      result_tag = new_tag;
-    }
-
-    // Register the object in the registry and reference its ObjectId. This ensures
-    // GC safety and prevents from accessing stale reference if the object is moved.
-    result_value = gRegistry->Add(object_result.Get());
-  } else {
-    // Primitive result.
-    DCHECK(IsPrimitiveTag(result_tag));
-    result_value = result.GetJ();
-  }
-  const bool is_constructor = m->IsConstructor() && !m->IsStatic();
-  if (is_constructor) {
-    // If we invoked a constructor (which actually returns void), return the receiver,
-    // unless we threw, in which case we return null.
-    DCHECK_EQ(JDWP::JT_VOID, result_tag);
-    if (exceptionObjectId == 0) {
-      if (m->GetDeclaringClass()->IsStringClass()) {
-        // For string constructors, the new string is remapped to the receiver (stored in ref).
-        Handle<mirror::Object> decoded_ref = hs.NewHandle(soa.Self()->DecodeJObject(ref.get()));
-        result_value = gRegistry->Add(decoded_ref);
-        result_tag = TagFromObject(soa, decoded_ref.Get());
-      } else {
-        // TODO we could keep the receiver ObjectId in the DebugInvokeReq to avoid looking into the
-        // object registry.
-        result_value = GetObjectRegistry()->Add(pReq->receiver.Read());
-        result_tag = TagFromObject(soa, pReq->receiver.Read());
-      }
-    } else {
-      result_value = 0;
-      result_tag = JDWP::JT_OBJECT;
-    }
-  }
-
-  // Suspend other threads if the invoke is not single-threaded.
-  if ((pReq->options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
-    ScopedThreadSuspension sts(soa.Self(), kWaitingForDebuggerSuspension);
-    // Avoid a deadlock between GC and debugger where GC gets suspended during GC. b/25800335.
-    gc::ScopedGCCriticalSection gcs(soa.Self(), gc::kGcCauseDebugger, gc::kCollectorTypeDebugger);
-    VLOG(jdwp) << "      Suspending all threads";
-    Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
-  }
-
-  VLOG(jdwp) << "  --> returned " << result_tag
-             << StringPrintf(" %#" PRIx64 " (except=%#" PRIx64 ")", result_value,
-                             exceptionObjectId);
-
-  // Show detailed debug output.
-  if (result_tag == JDWP::JT_STRING && exceptionObjectId == 0) {
-    if (result_value != 0) {
-      if (VLOG_IS_ON(jdwp)) {
-        std::string result_string;
-        JDWP::JdwpError error = Dbg::StringToUtf8(result_value, &result_string);
-        CHECK_EQ(error, JDWP::ERR_NONE);
-        VLOG(jdwp) << "      string '" << result_string << "'";
-      }
-    } else {
-      VLOG(jdwp) << "      string (null)";
-    }
-  }
-
-  // Attach the reply to DebugInvokeReq so it can be sent to the debugger when the event thread
-  // is ready to suspend.
-  BuildInvokeReply(pReq->reply, pReq->request_id, result_tag, result_value, exceptionObjectId);
-}
-
-void Dbg::BuildInvokeReply(JDWP::ExpandBuf* pReply, uint32_t request_id, JDWP::JdwpTag result_tag,
-                           uint64_t result_value, JDWP::ObjectId exception) {
-  // Make room for the JDWP header since we do not know the size of the reply yet.
-  JDWP::expandBufAddSpace(pReply, kJDWPHeaderLen);
-
-  size_t width = GetTagWidth(result_tag);
-  JDWP::expandBufAdd1(pReply, result_tag);
-  if (width != 0) {
-    WriteValue(pReply, width, result_value);
-  }
-  JDWP::expandBufAdd1(pReply, JDWP::JT_OBJECT);
-  JDWP::expandBufAddObjectId(pReply, exception);
-
-  // Now we know the size, we can complete the JDWP header.
-  uint8_t* buf = expandBufGetBuffer(pReply);
-  JDWP::Set4BE(buf + kJDWPHeaderSizeOffset, expandBufGetLength(pReply));
-  JDWP::Set4BE(buf + kJDWPHeaderIdOffset, request_id);
-  JDWP::Set1(buf + kJDWPHeaderFlagsOffset, kJDWPFlagReply);  // flags
-  JDWP::Set2BE(buf + kJDWPHeaderErrorCodeOffset, JDWP::ERR_NONE);
-}
-
-void Dbg::FinishInvokeMethod(DebugInvokeReq* pReq) {
-  CHECK_NE(Thread::Current(), GetDebugThread()) << "This must be called by the event thread";
-
-  JDWP::ExpandBuf* const pReply = pReq->reply;
-  CHECK(pReply != nullptr) << "No reply attached to DebugInvokeReq";
-
-  // We need to prevent other threads (including JDWP thread) from interacting with the debugger
-  // while we send the reply but are not yet suspended. The JDWP token will be released just before
-  // we suspend ourself again (see ThreadList::SuspendSelfForDebugger).
-  gJdwpState->AcquireJdwpTokenForEvent(pReq->thread_id);
-
-  // Send the reply unless the debugger detached before the completion of the method.
-  if (IsDebuggerActive()) {
-    const size_t replyDataLength = expandBufGetLength(pReply) - kJDWPHeaderLen;
-    VLOG(jdwp) << StringPrintf("REPLY INVOKE id=0x%06x (length=%zu)",
-                               pReq->request_id, replyDataLength);
-
-    gJdwpState->SendRequest(pReply);
-  } else {
-    VLOG(jdwp) << "Not sending invoke reply because debugger detached";
-  }
-}
 
 bool Dbg::DdmHandleChunk(JNIEnv* env,
                          uint32_t type,
@@ -4273,52 +264,6 @@
   return true;
 }
 
-/*
- * "request" contains a full JDWP packet, possibly with multiple chunks.  We
- * need to process each, accumulate the replies, and ship the whole thing
- * back.
- *
- * Returns "true" if we have a reply.  The reply buffer is newly allocated,
- * and includes the chunk type/length, followed by the data.
- *
- * OLD-TODO: we currently assume that the request and reply include a single
- * chunk.  If this becomes inconvenient we will need to adapt.
- */
-bool Dbg::DdmHandlePacket(JDWP::Request* request, uint8_t** pReplyBuf, int* pReplyLen) {
-  Thread* self = Thread::Current();
-  JNIEnv* env = self->GetJniEnv();
-
-  uint32_t type = request->ReadUnsigned32("type");
-  uint32_t length = request->ReadUnsigned32("length");
-
-  // Create a byte[] corresponding to 'request'.
-  size_t request_length = request->size();
-  // Run through and find all chunks.  [Currently just find the first.]
-  if (length != request_length) {
-    LOG(WARNING) << StringPrintf("bad chunk found (len=%u pktLen=%zd)", length, request_length);
-    return false;
-  }
-
-  ArrayRef<const jbyte> data(reinterpret_cast<const jbyte*>(request->data()), request_length);
-  std::vector<uint8_t> out_data;
-  uint32_t out_type = 0;
-  request->Skip(request_length);
-  if (!DdmHandleChunk(env, type, data, &out_type, &out_data) || out_data.empty()) {
-    return false;
-  }
-  const uint32_t kDdmHeaderSize = 8;
-  *pReplyLen = out_data.size() + kDdmHeaderSize;
-  *pReplyBuf = new uint8_t[out_data.size() + kDdmHeaderSize];
-  memcpy((*pReplyBuf) + kDdmHeaderSize, out_data.data(), out_data.size());
-  JDWP::Set4BE(*pReplyBuf, out_type);
-  JDWP::Set4BE((*pReplyBuf) + 4, static_cast<uint32_t>(out_data.size()));
-  VLOG(jdwp)
-      << StringPrintf("dvmHandleDdm returning type=%.4s", reinterpret_cast<char*>(*pReplyBuf))
-      << "0x" << std::hex << reinterpret_cast<uintptr_t>(*pReplyBuf) << std::dec
-      << " len= " << out_data.size();
-  return true;
-}
-
 void Dbg::DdmBroadcast(bool connect) {
   VLOG(jdwp) << "Broadcasting DDM " << (connect ? "connect" : "disconnect") << "...";
 
@@ -4349,6 +294,7 @@
   gDdmThreadNotification = false;
 }
 
+
 /*
  * Send a notification when a thread starts, stops, or changes its name.
  *
@@ -4356,6 +302,7 @@
  * first enabled, it's possible for "thread" to be actively executing.
  */
 void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) {
+  Locks::mutator_lock_->AssertNotExclusiveHeld(Thread::Current());
   if (!gDdmThreadNotification) {
     return;
   }
@@ -4363,24 +310,23 @@
   RuntimeCallbacks* cb = Runtime::Current()->GetRuntimeCallbacks();
   if (type == CHUNK_TYPE("THDE")) {
     uint8_t buf[4];
-    JDWP::Set4BE(&buf[0], t->GetThreadId());
+    Set4BE(&buf[0], t->GetThreadId());
     cb->DdmPublishChunk(CHUNK_TYPE("THDE"), ArrayRef<const uint8_t>(buf));
   } else {
     CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type;
-    ScopedObjectAccessUnchecked soa(Thread::Current());
-    StackHandleScope<1> hs(soa.Self());
+    StackHandleScope<1> hs(Thread::Current());
     Handle<mirror::String> name(hs.NewHandle(t->GetThreadName()));
     size_t char_count = (name != nullptr) ? name->GetLength() : 0;
     const jchar* chars = (name != nullptr) ? name->GetValue() : nullptr;
     bool is_compressed = (name != nullptr) ? name->IsCompressed() : false;
 
     std::vector<uint8_t> bytes;
-    JDWP::Append4BE(bytes, t->GetThreadId());
+    Append4BE(bytes, t->GetThreadId());
     if (is_compressed) {
       const uint8_t* chars_compressed = name->GetValueCompressed();
-      JDWP::AppendUtf16CompressedBE(bytes, chars_compressed, char_count);
+      AppendUtf16CompressedBE(bytes, chars_compressed, char_count);
     } else {
-      JDWP::AppendUtf16BE(bytes, chars, char_count);
+      AppendUtf16BE(bytes, chars, char_count);
     }
     CHECK_EQ(bytes.size(), char_count*2 + sizeof(uint32_t)*2);
     cb->DdmPublishChunk(type, ArrayRef<const uint8_t>(bytes));
@@ -4391,30 +337,25 @@
   // Enable/disable thread notifications.
   gDdmThreadNotification = enable;
   if (enable) {
-    // Suspend the VM then post thread start notifications for all threads. Threads attaching will
-    // see a suspension in progress and block until that ends. They then post their own start
-    // notification.
-    SuspendVM();
-    std::list<Thread*> threads;
+    // Use a Checkpoint to cause every currently running thread to send their own notification when
+    // able. We then wait for every thread thread active at the time to post the creation
+    // notification. Threads created later will send this themselves.
     Thread* self = Thread::Current();
-    {
-      MutexLock mu(self, *Locks::thread_list_lock_);
-      threads = Runtime::Current()->GetThreadList()->GetList();
-    }
-    {
-      ScopedObjectAccess soa(self);
-      for (Thread* thread : threads) {
-        Dbg::DdmSendThreadNotification(thread, CHUNK_TYPE("THCR"));
-      }
-    }
-    ResumeVM();
+    ScopedObjectAccess soa(self);
+    Barrier finish_barrier(0);
+    FunctionClosure fc([&](Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_) {
+      Thread* cls_self = Thread::Current();
+      Locks::mutator_lock_->AssertSharedHeld(cls_self);
+      Dbg::DdmSendThreadNotification(thread, CHUNK_TYPE("THCR"));
+      finish_barrier.Pass(cls_self);
+    });
+    size_t checkpoints = Runtime::Current()->GetThreadList()->RunCheckpoint(&fc);
+    ScopedThreadSuspension sts(self, ThreadState::kWaitingForCheckPointsToRun);
+    finish_barrier.Increment(self, checkpoints);
   }
 }
 
 void Dbg::PostThreadStartOrStop(Thread* t, uint32_t type) {
-  if (IsDebuggerActive()) {
-    gJdwpState->PostThreadChange(t, type == CHUNK_TYPE("THCR"));
-  }
   Dbg::DdmSendThreadNotification(t, type);
 }
 
@@ -4426,23 +367,19 @@
   Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THDE"));
 }
 
-JDWP::JdwpState* Dbg::GetJdwpState() {
-  return gJdwpState;
-}
-
 int Dbg::DdmHandleHpifChunk(HpifWhen when) {
   if (when == HPIF_WHEN_NOW) {
     DdmSendHeapInfo(when);
-    return true;
+    return 1;
   }
 
   if (when != HPIF_WHEN_NEVER && when != HPIF_WHEN_NEXT_GC && when != HPIF_WHEN_EVERY_GC) {
     LOG(ERROR) << "invalid HpifWhen value: " << static_cast<int>(when);
-    return false;
+    return 0;
   }
 
   gDdmHpifWhen = when;
-  return true;
+  return 1;
 }
 
 bool Dbg::DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when, Dbg::HpsgWhat what, bool native) {
@@ -4494,14 +431,14 @@
   uint8_t heap_count = 1;
   gc::Heap* heap = Runtime::Current()->GetHeap();
   std::vector<uint8_t> bytes;
-  JDWP::Append4BE(bytes, heap_count);
-  JDWP::Append4BE(bytes, 1);  // Heap id (bogus; we only have one heap).
-  JDWP::Append8BE(bytes, MilliTime());
-  JDWP::Append1BE(bytes, reason);
-  JDWP::Append4BE(bytes, heap->GetMaxMemory());  // Max allowed heap size in bytes.
-  JDWP::Append4BE(bytes, heap->GetTotalMemory());  // Current heap size in bytes.
-  JDWP::Append4BE(bytes, heap->GetBytesAllocated());
-  JDWP::Append4BE(bytes, heap->GetObjectsAllocated());
+  Append4BE(bytes, heap_count);
+  Append4BE(bytes, 1);  // Heap id (bogus; we only have one heap).
+  Append8BE(bytes, MilliTime());
+  Append1BE(bytes, reason);
+  Append4BE(bytes, heap->GetMaxMemory());  // Max allowed heap size in bytes.
+  Append4BE(bytes, heap->GetTotalMemory());  // Current heap size in bytes.
+  Append4BE(bytes, heap->GetBytesAllocated());
+  Append4BE(bytes, heap->GetObjectsAllocated());
   CHECK_EQ(bytes.size(), 4U + (heap_count * (4 + 8 + 1 + 4 + 4 + 4 + 4)));
   Runtime::Current()->GetRuntimeCallbacks()->DdmPublishChunk(CHUNK_TYPE("HPIF"),
                                                              ArrayRef<const uint8_t>(bytes));
@@ -4567,15 +504,15 @@
     }
 
     // Start a new HPSx chunk.
-    JDWP::Write4BE(&p_, 1);  // Heap id (bogus; we only have one heap).
-    JDWP::Write1BE(&p_, 8);  // Size of allocation unit, in bytes.
+    Write4BE(&p_, 1);  // Heap id (bogus; we only have one heap).
+    Write1BE(&p_, 8);  // Size of allocation unit, in bytes.
 
-    JDWP::Write4BE(&p_, reinterpret_cast<uintptr_t>(chunk_ptr));  // virtual address of segment start.
-    JDWP::Write4BE(&p_, 0);  // offset of this piece (relative to the virtual address).
+    Write4BE(&p_, reinterpret_cast<uintptr_t>(chunk_ptr));  // virtual address of segment start.
+    Write4BE(&p_, 0);  // offset of this piece (relative to the virtual address).
     // [u4]: length of piece, in allocation units
     // We won't know this until we're done, so save the offset and stuff in a dummy value.
     pieceLenField_ = p_;
-    JDWP::Write4BE(&p_, 0x55555555);
+    Write4BE(&p_, 0x55555555);
     needHeader_ = false;
   }
 
@@ -4588,7 +525,7 @@
     // Patch the "length of piece" field.
     CHECK_LE(&buf_[0], pieceLenField_);
     CHECK_LE(pieceLenField_, p_);
-    JDWP::Set4BE(pieceLenField_, totalAllocationUnits_);
+    Set4BE(pieceLenField_, totalAllocationUnits_);
 
     ArrayRef<const uint8_t> out(&buf_[0], p_ - &buf_[0]);
     Runtime::Current()->GetRuntimeCallbacks()->DdmPublishChunk(type_, out);
@@ -4717,7 +654,7 @@
     return p == nullptr ? HPSG_STATE(SOLIDITY_FREE, 0) : HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
   }
 
-  uint8_t ExamineJavaObject(mirror::Object* o)
+  uint8_t ExamineJavaObject(ObjPtr<mirror::Object> o)
       REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     if (o == nullptr) {
       return HPSG_STATE(SOLIDITY_FREE, 0);
@@ -4767,6 +704,7 @@
   DISALLOW_COPY_AND_ASSIGN(HeapChunkContext);
 };
 
+
 void Dbg::DdmSendHeapSegments(bool native) {
   Dbg::HpsgWhen when = native ? gDdmNhsgWhen : gDdmHpsgWhen;
   Dbg::HpsgWhat what = native ? gDdmNhsgWhat : gDdmHpsgWhat;
@@ -4780,7 +718,7 @@
 
   // First, send a heap start chunk.
   uint8_t heap_id[4];
-  JDWP::Set4BE(&heap_id[0], 1);  // Heap id (bogus; we only have one heap).
+  Set4BE(&heap_id[0], 1);  // Heap id (bogus; we only have one heap).
   cb->DdmPublishChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"),
                       ArrayRef<const uint8_t>(heap_id));
   Thread* self = Thread::Current();
@@ -4849,41 +787,6 @@
   gc::AllocRecordObjectMap::SetAllocTrackingEnabled(enable);
 }
 
-void Dbg::DumpRecentAllocations() {
-  ScopedObjectAccess soa(Thread::Current());
-  MutexLock mu(soa.Self(), *Locks::alloc_tracker_lock_);
-  if (!Runtime::Current()->GetHeap()->IsAllocTrackingEnabled()) {
-    LOG(INFO) << "Not recording tracked allocations";
-    return;
-  }
-  gc::AllocRecordObjectMap* records = Runtime::Current()->GetHeap()->GetAllocationRecords();
-  CHECK(records != nullptr);
-
-  const uint16_t capped_count = CappedAllocRecordCount(records->GetRecentAllocationSize());
-  uint16_t count = capped_count;
-
-  LOG(INFO) << "Tracked allocations, (count=" << count << ")";
-  for (auto it = records->RBegin(), end = records->REnd();
-      count > 0 && it != end; count--, it++) {
-    const gc::AllocRecord* record = &it->second;
-
-    LOG(INFO) << StringPrintf(" Thread %-2d %6zd bytes ", record->GetTid(), record->ByteCount())
-              << mirror::Class::PrettyClass(record->GetClass());
-
-    for (size_t stack_frame = 0, depth = record->GetDepth(); stack_frame < depth; ++stack_frame) {
-      const gc::AllocRecordStackTraceElement& stack_element = record->StackElement(stack_frame);
-      ArtMethod* m = stack_element.GetMethod();
-      LOG(INFO) << "    " << ArtMethod::PrettyMethod(m) << " line "
-                << stack_element.ComputeLineNumber();
-    }
-
-    // pause periodically to help logcat catch up
-    if ((count % 5) == 0) {
-      usleep(40000);
-    }
-  }
-}
-
 class StringTable {
  private:
   struct Entry {
@@ -4975,7 +878,7 @@
       size_t s_len = CountModifiedUtf8Chars(entry.data);
       std::unique_ptr<uint16_t[]> s_utf16(new uint16_t[s_len]);
       ConvertModifiedUtf8ToUtf16(s_utf16.get(), entry.data);
-      JDWP::AppendUtf16BE(bytes, s_utf16.get(), s_len);
+      AppendUtf16BE(bytes, s_utf16.get(), s_len);
     }
   }
 
@@ -4988,6 +891,7 @@
   DISALLOW_COPY_AND_ASSIGN(StringTable);
 };
 
+
 static const char* GetMethodSourceFile(ArtMethod* method)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   DCHECK(method != nullptr);
@@ -5112,21 +1016,21 @@
     const int kMessageHeaderLen = 15;
     const int kEntryHeaderLen = 9;
     const int kStackFrameLen = 8;
-    JDWP::Append1BE(bytes, kMessageHeaderLen);
-    JDWP::Append1BE(bytes, kEntryHeaderLen);
-    JDWP::Append1BE(bytes, kStackFrameLen);
+    Append1BE(bytes, kMessageHeaderLen);
+    Append1BE(bytes, kEntryHeaderLen);
+    Append1BE(bytes, kStackFrameLen);
 
     // (2b) number of entries
     // (4b) offset to string table from start of message
     // (2b) number of class name strings
     // (2b) number of method name strings
     // (2b) number of source file name strings
-    JDWP::Append2BE(bytes, capped_count);
+    Append2BE(bytes, capped_count);
     size_t string_table_offset = bytes.size();
-    JDWP::Append4BE(bytes, 0);  // We'll patch this later...
-    JDWP::Append2BE(bytes, class_names.Size());
-    JDWP::Append2BE(bytes, method_names.Size());
-    JDWP::Append2BE(bytes, filenames.Size());
+    Append4BE(bytes, 0);  // We'll patch this later...
+    Append2BE(bytes, class_names.Size());
+    Append2BE(bytes, method_names.Size());
+    Append2BE(bytes, filenames.Size());
 
     VLOG(jdwp) << "Dumping allocations with stacks";
 
@@ -5149,10 +1053,10 @@
       size_t stack_depth = record->GetDepth();
       size_t allocated_object_class_name_index =
           class_names.IndexOf(record->GetClassDescriptor(&temp));
-      JDWP::Append4BE(bytes, record->ByteCount());
-      JDWP::Append2BE(bytes, static_cast<uint16_t>(record->GetTid()));
-      JDWP::Append2BE(bytes, allocated_object_class_name_index);
-      JDWP::Append1BE(bytes, stack_depth);
+      Append4BE(bytes, record->ByteCount());
+      Append2BE(bytes, static_cast<uint16_t>(record->GetTid()));
+      Append2BE(bytes, allocated_object_class_name_index);
+      Append1BE(bytes, stack_depth);
 
       for (size_t stack_frame = 0; stack_frame < stack_depth; ++stack_frame) {
         // For each stack frame:
@@ -5164,10 +1068,10 @@
         size_t class_name_index = class_names.IndexOf(m->GetDeclaringClassDescriptor());
         size_t method_name_index = method_names.IndexOf(m->GetName());
         size_t file_name_index = filenames.IndexOf(GetMethodSourceFile(m));
-        JDWP::Append2BE(bytes, class_name_index);
-        JDWP::Append2BE(bytes, method_name_index);
-        JDWP::Append2BE(bytes, file_name_index);
-        JDWP::Append2BE(bytes, record->StackElement(stack_frame).ComputeLineNumber());
+        Append2BE(bytes, class_name_index);
+        Append2BE(bytes, method_name_index);
+        Append2BE(bytes, file_name_index);
+        Append2BE(bytes, record->StackElement(stack_frame).ComputeLineNumber());
       }
     }
 
@@ -5177,7 +1081,7 @@
     // (xb) class name strings
     // (xb) method name strings
     // (xb) source file strings
-    JDWP::Set4BE(&bytes[string_table_offset], bytes.size());
+    Set4BE(&bytes[string_table_offset], bytes.size());
     class_names.WriteTo(bytes);
     method_names.WriteTo(bytes);
     filenames.WriteTo(bytes);
@@ -5192,23 +1096,6 @@
   return result;
 }
 
-ArtMethod* DeoptimizationRequest::Method() const {
-  return jni::DecodeArtMethod(method_);
-}
-
-void DeoptimizationRequest::SetMethod(ArtMethod* m) {
-  method_ = jni::EncodeArtMethod(m);
-}
-
-void Dbg::VisitRoots(RootVisitor* visitor) {
-  // Visit breakpoint roots, used to prevent unloading of methods with breakpoints.
-  ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
-  BufferedRootVisitor<128> root_visitor(visitor, RootInfo(kRootVMInternal));
-  for (Breakpoint& breakpoint : gBreakpoints) {
-    breakpoint.Method()->VisitRoots(root_visitor, kRuntimePointerSize);
-  }
-}
-
 void Dbg::DbgThreadLifecycleCallback::ThreadStart(Thread* self) {
   Dbg::PostThreadStart(self);
 }
@@ -5217,12 +1104,4 @@
   Dbg::PostThreadDeath(self);
 }
 
-void Dbg::DbgClassLoadCallback::ClassLoad(Handle<mirror::Class> klass ATTRIBUTE_UNUSED) {
-  // Ignore ClassLoad;
-}
-void Dbg::DbgClassLoadCallback::ClassPrepare(Handle<mirror::Class> temp_klass ATTRIBUTE_UNUSED,
-                                             Handle<mirror::Class> klass) {
-  Dbg::PostClassPrepare(klass.Get());
-}
-
 }  // namespace art
diff --git a/runtime/debugger.h b/runtime/debugger.h
index de44b14..65dc13d 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -14,10 +14,6 @@
  * limitations under the License.
  */
 
-/*
- * Dalvik-specific side of debugger support.  (The JDWP code is intended to
- * be relatively generic.)
- */
 #ifndef ART_RUNTIME_DEBUGGER_H_
 #define ART_RUNTIME_DEBUGGER_H_
 
@@ -27,589 +23,38 @@
 #include <string>
 #include <vector>
 
+#include "art_method.h"
 #include "base/array_ref.h"
-#include "class_linker.h"
-#include "gc_root.h"
-#include "handle.h"
-#include "jdwp/jdwp.h"
+#include "base/locks.h"
+#include "base/logging.h"
 #include "jni.h"
-#include "jvalue.h"
-#include "obj_ptr.h"
 #include "runtime_callbacks.h"
 #include "thread.h"
 #include "thread_state.h"
 
 namespace art {
-namespace mirror {
-class Class;
-class Object;
-class Throwable;
-}  // namespace mirror
-class ArtField;
-class ArtMethod;
-class ObjectRegistry;
-class ScopedObjectAccess;
-class ScopedObjectAccessUnchecked;
-class StackVisitor;
-class Thread;
-
-struct DebuggerActiveMethodInspectionCallback : public MethodInspectionCallback {
-  bool IsMethodBeingInspected(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_);
-  bool IsMethodSafeToJit(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_);
-  bool MethodNeedsDebugVersion(ArtMethod* method) override REQUIRES_SHARED(Locks::mutator_lock_);
-};
-
-struct DebuggerDdmCallback : public DdmCallback {
-  void DdmPublishChunk(uint32_t type, const ArrayRef<const uint8_t>& data)
-      override REQUIRES_SHARED(Locks::mutator_lock_);
-};
-
-struct InternalDebuggerControlCallback : public DebuggerControlCallback {
-  void StartDebugger() override;
-  void StopDebugger() override;
-  bool IsDebuggerConfigured() override;
-};
-
-/*
- * Invoke-during-breakpoint support.
- */
-struct DebugInvokeReq {
-  DebugInvokeReq(uint32_t invoke_request_id,
-                 JDWP::ObjectId invoke_thread_id,
-                 ObjPtr<mirror::Object> invoke_receiver,
-                 ObjPtr<mirror::Class> invoke_class,
-                 ArtMethod* invoke_method,
-                 uint32_t invoke_options,
-                 uint64_t args[],
-                 uint32_t args_count)
-      : request_id(invoke_request_id),
-        thread_id(invoke_thread_id),
-        receiver(invoke_receiver),
-        klass(invoke_class),
-        method(invoke_method),
-        arg_count(args_count),
-        arg_values(args),
-        options(invoke_options),
-        reply(JDWP::expandBufAlloc()) {
-  }
-
-  ~DebugInvokeReq() {
-    JDWP::expandBufFree(reply);
-  }
-
-  // Request
-  const uint32_t request_id;
-  const JDWP::ObjectId thread_id;
-  GcRoot<mirror::Object> receiver;      // not used for ClassType.InvokeMethod.
-  GcRoot<mirror::Class> klass;
-  ArtMethod* const method;
-  const uint32_t arg_count;
-  std::unique_ptr<uint64_t[]> arg_values;   // will be null if arg_count_ == 0. We take ownership
-                                            // of this array so we must delete it upon destruction.
-  const uint32_t options;
-
-  // Reply
-  JDWP::ExpandBuf* const reply;
-
-  void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(DebugInvokeReq);
-};
-
-// Thread local data-structure that holds fields for controlling single-stepping.
-class SingleStepControl {
- public:
-  SingleStepControl(JDWP::JdwpStepSize step_size, JDWP::JdwpStepDepth step_depth,
-                    int stack_depth, ArtMethod* method)
-      : step_size_(step_size), step_depth_(step_depth),
-        stack_depth_(stack_depth), method_(method) {
-  }
-
-  JDWP::JdwpStepSize GetStepSize() const {
-    return step_size_;
-  }
-
-  JDWP::JdwpStepDepth GetStepDepth() const {
-    return step_depth_;
-  }
-
-  int GetStackDepth() const {
-    return stack_depth_;
-  }
-
-  ArtMethod* GetMethod() const {
-    return method_;
-  }
-
-  const std::set<uint32_t>& GetDexPcs() const {
-    return dex_pcs_;
-  }
-
-  void AddDexPc(uint32_t dex_pc);
-
-  bool ContainsDexPc(uint32_t dex_pc) const;
-
- private:
-  // See JdwpStepSize and JdwpStepDepth for details.
-  const JDWP::JdwpStepSize step_size_;
-  const JDWP::JdwpStepDepth step_depth_;
-
-  // The stack depth when this single-step was initiated. This is used to support SD_OVER and SD_OUT
-  // single-step depth.
-  const int stack_depth_;
-
-  // The location this single-step was initiated from.
-  // A single-step is initiated in a suspended thread. We save here the current method and the
-  // set of DEX pcs associated to the source line number where the suspension occurred.
-  // This is used to support SD_INTO and SD_OVER single-step depths so we detect when a single-step
-  // causes the execution of an instruction in a different method or at a different line number.
-  ArtMethod* method_;
-
-  std::set<uint32_t> dex_pcs_;
-
-  DISALLOW_COPY_AND_ASSIGN(SingleStepControl);
-};
-
-// TODO rename to InstrumentationRequest.
-class DeoptimizationRequest {
- public:
-  enum Kind {
-    kNothing,                   // no action.
-    kRegisterForEvent,          // start listening for instrumentation event.
-    kUnregisterForEvent,        // stop listening for instrumentation event.
-    kFullDeoptimization,        // deoptimize everything.
-    kFullUndeoptimization,      // undeoptimize everything.
-    kSelectiveDeoptimization,   // deoptimize one method.
-    kSelectiveUndeoptimization  // undeoptimize one method.
-  };
-
-  DeoptimizationRequest() : kind_(kNothing), instrumentation_event_(0), method_(nullptr) {}
-
-  DeoptimizationRequest(const DeoptimizationRequest& other)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      : kind_(other.kind_), instrumentation_event_(other.instrumentation_event_) {
-    // Create a new JNI global reference for the method.
-    SetMethod(other.Method());
-  }
-
-  ArtMethod* Method() const REQUIRES_SHARED(Locks::mutator_lock_);
-
-  void SetMethod(ArtMethod* m) REQUIRES_SHARED(Locks::mutator_lock_);
-
-  // Name 'Kind()' would collide with the above enum name.
-  Kind GetKind() const {
-    return kind_;
-  }
-
-  void SetKind(Kind kind) {
-    kind_ = kind;
-  }
-
-  uint32_t InstrumentationEvent() const {
-    return instrumentation_event_;
-  }
-
-  void SetInstrumentationEvent(uint32_t instrumentation_event) {
-    instrumentation_event_ = instrumentation_event;
-  }
-
- private:
-  Kind kind_;
-
-  // TODO we could use a union to hold the instrumentation_event and the method since they
-  // respectively have sense only for kRegisterForEvent/kUnregisterForEvent and
-  // kSelectiveDeoptimization/kSelectiveUndeoptimization.
-
-  // Event to start or stop listening to. Only for kRegisterForEvent and kUnregisterForEvent.
-  uint32_t instrumentation_event_;
-
-  // Method for selective deoptimization.
-  jmethodID method_;
-};
-std::ostream& operator<<(std::ostream& os, const DeoptimizationRequest::Kind& rhs);
 
 class Dbg {
  public:
   static void SetJdwpAllowed(bool allowed);
   static bool IsJdwpAllowed();
 
-  static void StartJdwp();
-  static void StopJdwp();
-
   // Invoked by the GC in case we need to keep DDMS informed.
   static void GcDidFinish() REQUIRES(!Locks::mutator_lock_);
 
-  // Return the DebugInvokeReq for the current thread.
-  static DebugInvokeReq* GetInvokeReq();
-
-  static Thread* GetDebugThread();
-  static void ClearWaitForEventThread();
-
-  /*
-   * Enable/disable breakpoints and step modes.  Used to provide a heads-up
-   * when the debugger attaches.
-   */
-  static void Connected();
-  static void GoActive()
-      REQUIRES(!Locks::breakpoint_lock_, !Locks::deoptimization_lock_, !Locks::mutator_lock_);
-  static void Disconnected() REQUIRES(!Locks::deoptimization_lock_, !Locks::mutator_lock_);
-  static void Dispose() {
-    gDisposed = true;
-  }
-
-  // Returns true if we're actually debugging with a real debugger, false if it's
-  // just DDMS (or nothing at all).
-  static bool IsDebuggerActive() {
-    return gDebuggerActive;
-  }
-
-  // Configures JDWP with parsed command-line options.
-  static void ConfigureJdwp(const JDWP::JdwpOptions& jdwp_options)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  // Returns true if we had -Xrunjdwp or -agentlib:jdwp= on the command line.
-  static bool IsJdwpConfigured();
-
-  // Returns true if a method has any breakpoints.
-  static bool MethodHasAnyBreakpoints(ArtMethod* method)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::breakpoint_lock_);
-
-  static bool IsDisposed() {
-    return gDisposed;
-  }
-
-  /*
-   * Time, in milliseconds, since the last debugger activity.  Does not
-   * include DDMS activity.  Returns -1 if there has been no activity.
-   * Returns 0 if we're in the middle of handling a debugger request.
-   */
-  static int64_t LastDebuggerActivity();
-
-  static void UndoDebuggerSuspensions()
-      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
-
-  /*
-   * Class, Object, Array
-   */
-  static std::string GetClassName(JDWP::RefTypeId id)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static std::string GetClassName(ObjPtr<mirror::Class> klass)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId* class_object_id)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId* superclass_id)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static void GetClassList(std::vector<JDWP::RefTypeId>* classes)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag,
-                                      uint32_t* pStatus, std::string* pDescriptor)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static void FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>* ids)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError GetSignature(JDWP::RefTypeId ref_type_id, std::string* signature)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError GetSourceDebugExtension(JDWP::RefTypeId ref_type_id,
-                                                 std::string* extension_data)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError GetSourceFile(JDWP::RefTypeId ref_type_id, std::string* source_file)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError GetObjectTag(JDWP::ObjectId object_id, uint8_t* tag)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static size_t GetTagWidth(JDWP::JdwpTag tag);
-
-  static JDWP::JdwpError GetArrayLength(JDWP::ObjectId array_id, int32_t* length)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError OutputArray(JDWP::ObjectId array_id,
-                                     int offset,
-                                     int count,
-                                     JDWP::ExpandBuf* pReply)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError SetArrayElements(JDWP::ObjectId array_id, int offset, int count,
-                                          JDWP::Request* request)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  static JDWP::JdwpError CreateString(const std::string& str, JDWP::ObjectId* new_string_id)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId* new_object_id)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length,
-                                           JDWP::ObjectId* new_array_id)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  //
-  // Event filtering.
-  //
-  static bool MatchThread(JDWP::ObjectId expected_thread_id, Thread* event_thread)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  static bool MatchLocation(const JDWP::JdwpLocation& expected_location,
-                            const JDWP::EventLocation& event_location)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  static bool MatchType(ObjPtr<mirror::Class> event_class, JDWP::RefTypeId class_id)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  static bool MatchField(JDWP::RefTypeId expected_type_id, JDWP::FieldId expected_field_id,
-                         ArtField* event_field)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  static bool MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* event_instance)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  //
-  // Monitors.
-  //
-  static JDWP::JdwpError GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError GetOwnedMonitors(JDWP::ObjectId thread_id,
-                                          std::vector<JDWP::ObjectId>* monitors,
-                                          std::vector<uint32_t>* stack_depths)
-      REQUIRES(!Locks::thread_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError GetContendedMonitor(JDWP::ObjectId thread_id,
-                                             JDWP::ObjectId* contended_monitor)
-      REQUIRES(!Locks::thread_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
-
-  //
-  // Heap.
-  //
-  static JDWP::JdwpError GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
-                                           std::vector<uint64_t>* counts)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError GetInstances(JDWP::RefTypeId class_id, int32_t max_count,
-                                      std::vector<JDWP::ObjectId>* instances)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
-                                             std::vector<JDWP::ObjectId>* referring_objects)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError DisableCollection(JDWP::ObjectId object_id)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError EnableCollection(JDWP::ObjectId object_id)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError IsCollected(JDWP::ObjectId object_id, bool* is_collected)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static void DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  //
-  // Methods and fields.
-  //
-  static std::string GetMethodName(JDWP::MethodId method_id)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static bool IsMethodObsolete(JDWP::MethodId method_id)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError OutputDeclaredFields(JDWP::RefTypeId ref_type_id, bool with_generic,
-                                              JDWP::ExpandBuf* pReply)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError OutputDeclaredMethods(JDWP::RefTypeId ref_type_id, bool with_generic,
-                                               JDWP::ExpandBuf* pReply)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError OutputDeclaredInterfaces(JDWP::RefTypeId ref_type_id,
-                                                  JDWP::ExpandBuf* pReply)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static void OutputLineTable(JDWP::RefTypeId ref_type_id, JDWP::MethodId method_id,
-                              JDWP::ExpandBuf* pReply)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static void OutputVariableTable(JDWP::RefTypeId ref_type_id, JDWP::MethodId id, bool with_generic,
-                                  JDWP::ExpandBuf* pReply)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static void OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value,
-                                      JDWP::ExpandBuf* pReply)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static void OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value,
-                               JDWP::ExpandBuf* pReply)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError GetBytecodes(JDWP::RefTypeId class_id, JDWP::MethodId method_id,
-                                      std::vector<uint8_t>* bytecodes)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  static std::string GetFieldName(JDWP::FieldId field_id)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpTag GetFieldBasicTag(JDWP::FieldId field_id)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpTag GetStaticFieldBasicTag(JDWP::FieldId field_id)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id,
-                                       JDWP::ExpandBuf* pReply)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id,
-                                       uint64_t value, int width)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id,
-                                             JDWP::ExpandBuf* pReply)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  static JDWP::JdwpError StringToUtf8(JDWP::ObjectId string_id, std::string* str)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static void OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  /*
-   * Thread, ThreadGroup, Frame
-   */
-  static JDWP::JdwpError GetThreadName(JDWP::ObjectId thread_id, std::string* name)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::thread_list_lock_);
-  static JDWP::JdwpError GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::thread_list_lock_);
-  static JDWP::JdwpError GetThreadGroupName(JDWP::ObjectId thread_group_id,
-                                            JDWP::ExpandBuf* pReply)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError GetThreadGroupParent(JDWP::ObjectId thread_group_id,
-                                              JDWP::ExpandBuf* pReply)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError GetThreadGroupChildren(JDWP::ObjectId thread_group_id,
-                                                JDWP::ExpandBuf* pReply)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::ObjectId GetSystemThreadGroupId()
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  static JDWP::JdwpThreadStatus ToJdwpThreadStatus(ThreadState state);
-  static JDWP::JdwpError GetThreadStatus(JDWP::ObjectId thread_id,
-                                         JDWP::JdwpThreadStatus* pThreadStatus,
-                                         JDWP::JdwpSuspendStatus* pSuspendStatus)
-      REQUIRES(!Locks::thread_list_lock_);
-  static JDWP::JdwpError GetThreadDebugSuspendCount(JDWP::ObjectId thread_id,
-                                                    JDWP::ExpandBuf* pReply)
-      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
-  // static void WaitForSuspend(JDWP::ObjectId thread_id);
-
-  // Fills 'thread_ids' with the threads in the given thread group. If thread_group_id == 0,
-  // returns all threads.
-  static void GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* thread_ids)
-      REQUIRES(!Locks::thread_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
-
-  static JDWP::JdwpError GetThreadFrameCount(JDWP::ObjectId thread_id, size_t* result)
-      REQUIRES(!Locks::thread_list_lock_);
-  static JDWP::JdwpError GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame,
-                                         size_t frame_count, JDWP::ExpandBuf* buf)
-      REQUIRES(!Locks::thread_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
-
-  static JDWP::ObjectId GetThreadSelfId() REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::ObjectId GetThreadId(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_);
-
-  static void SuspendVM()
-      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
-  static void ResumeVM()
-      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
-  static JDWP::JdwpError SuspendThread(JDWP::ObjectId thread_id, bool request_suspension = true)
-      REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_,
-               !Locks::thread_suspend_count_lock_);
-
-  static void ResumeThread(JDWP::ObjectId thread_id)
-      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static void SuspendSelf();
-
-  static JDWP::JdwpError GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id,
-                                       JDWP::ObjectId* result)
-      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pReply)
-      REQUIRES(!Locks::thread_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError SetLocalValues(JDWP::Request* request)
-      REQUIRES(!Locks::thread_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
-
-  static JDWP::JdwpError Interrupt(JDWP::ObjectId thread_id)
-      REQUIRES(!Locks::thread_list_lock_);
-
-  /*
-   * Debugger notification
-   */
-  enum EventFlag {
-    kBreakpoint     = 0x01,
-    kSingleStep     = 0x02,
-    kMethodEntry    = 0x04,
-    kMethodExit     = 0x08,
-  };
-  static void PostFieldAccessEvent(ArtMethod* m, int dex_pc, mirror::Object* this_object,
-                                   ArtField* f)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static void PostFieldModificationEvent(ArtMethod* m, int dex_pc,
-                                         mirror::Object* this_object, ArtField* f,
-                                         const JValue* field_value)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static void PostException(mirror::Throwable* exception)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  static void UpdateDebugger(Thread* thread, mirror::Object* this_object,
-                             ArtMethod* method, uint32_t new_dex_pc,
-                             int event_flags, const JValue* return_value)
-      REQUIRES(!Locks::breakpoint_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
-
-  // Indicates whether we need deoptimization for debugging.
-  static bool RequiresDeoptimization();
-
-  // Records deoptimization request in the queue.
-  static void RequestDeoptimization(const DeoptimizationRequest& req)
-      REQUIRES(!Locks::deoptimization_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
-
-  // Manage deoptimization after updating JDWP events list. Suspends all threads, processes each
-  // request and finally resumes all threads.
-  static void ManageDeoptimization()
-      REQUIRES(!Locks::deoptimization_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
-
-  // Breakpoints.
-  static void WatchLocation(const JDWP::JdwpLocation* pLoc, DeoptimizationRequest* req)
-      REQUIRES(!Locks::breakpoint_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
-  static void UnwatchLocation(const JDWP::JdwpLocation* pLoc, DeoptimizationRequest* req)
-      REQUIRES(!Locks::breakpoint_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
-
-  /*
-   * Forced interpreter checkers for single-step and continue support.
-   */
-
-  // Indicates whether we need to force the use of interpreter to invoke a method.
-  // This allows to single-step or continue into the called method.
-  static bool IsForcedInterpreterNeededForCalling(Thread* thread, ArtMethod* m)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (!IsDebuggerActive()) {
-      return false;
-    }
-    return IsForcedInterpreterNeededForCallingImpl(thread, m);
-  }
-
-  // Indicates whether we need to force the use of interpreter entrypoint when calling a
-  // method through the resolution trampoline. This allows to single-step or continue into
-  // the called method.
-  static bool IsForcedInterpreterNeededForResolution(Thread* thread, ArtMethod* m)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (!IsDebuggerActive()) {
-      return false;
-    }
-    return IsForcedInterpreterNeededForResolutionImpl(thread, m);
-  }
-
-  // Indicates whether we need to force the use of instrumentation entrypoint when calling
-  // a method through the resolution trampoline. This allows to deoptimize the stack for
-  // debugging when we returned from the called method.
-  static bool IsForcedInstrumentationNeededForResolution(Thread* thread, ArtMethod* m)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (!IsDebuggerActive()) {
-      return false;
-    }
-    return IsForcedInstrumentationNeededForResolutionImpl(thread, m);
-  }
+  static uint8_t ToJdwpThreadStatus(ThreadState state);
 
   // Indicates whether we need to force the use of interpreter when returning from the
   // interpreter into the runtime. This allows to deoptimize the stack and continue
   // execution with interpreter for debugging.
   static bool IsForcedInterpreterNeededForUpcall(Thread* thread, ArtMethod* m)
       REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (!IsDebuggerActive() && !thread->HasDebuggerShadowFrames()) {
+    if (LIKELY(!thread->HasDebuggerShadowFrames())) {
       return false;
     }
-    return IsForcedInterpreterNeededForUpcallImpl(thread, m);
+    // If we have debugger stack frames we always need to go back to interpreter unless we are
+    // native or a proxy.
+    return m != nullptr && !m->IsProxyMethod() && !m->IsNative();
   }
 
   // Indicates whether we need to force the use of interpreter when handling an
@@ -619,50 +64,12 @@
   // the deoptimized frames.
   static bool IsForcedInterpreterNeededForException(Thread* thread)
       REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (!IsDebuggerActive() && !thread->HasDebuggerShadowFrames()) {
+    if (LIKELY(!thread->HasDebuggerShadowFrames())) {
       return false;
     }
     return IsForcedInterpreterNeededForExceptionImpl(thread);
   }
 
-  // Single-stepping.
-  static JDWP::JdwpError ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize size,
-                                       JDWP::JdwpStepDepth depth)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  static void UnconfigureStep(JDWP::ObjectId thread_id)
-      REQUIRES(!Locks::thread_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
-
-  /*
-   * Invoke support
-   */
-
-  // Called by the JDWP thread to prepare invocation in the event thread (suspended on an event).
-  // If the information sent by the debugger is incorrect, it will send a reply with the
-  // appropriate error code. Otherwise, it will attach a DebugInvokeReq object to the event thread
-  // and resume it (and possibly other threads depending on the invoke options).
-  // Unlike other commands, the JDWP thread will not send the reply to the debugger (see
-  // JdwpState::ProcessRequest). The reply will be sent by the event thread itself after method
-  // invocation completes (see FinishInvokeMethod). This is required to allow the JDWP thread to
-  // process incoming commands from the debugger while the invocation is still in progress in the
-  // event thread, especially if it gets suspended by a debug event occurring in another thread.
-  static JDWP::JdwpError PrepareInvokeMethod(uint32_t request_id, JDWP::ObjectId thread_id,
-                                             JDWP::ObjectId object_id, JDWP::RefTypeId class_id,
-                                             JDWP::MethodId method_id, uint32_t arg_count,
-                                             uint64_t arg_values[], JDWP::JdwpTag* arg_types,
-                                             uint32_t options)
-      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  // Called by the event thread to execute a method prepared by the JDWP thread in the given
-  // DebugInvokeReq object. Once the invocation completes, the event thread attaches a reply
-  // to that DebugInvokeReq object so it can be sent to the debugger only when the event thread
-  // is ready to suspend (see FinishInvokeMethod).
-  static void ExecuteMethod(DebugInvokeReq* pReq);
-
-  // Called by the event thread to send the reply of the invoke (created in ExecuteMethod)
-  // before suspending itself. This is to ensure the thread is ready to suspend before the
-  // debugger receives the reply.
-  static void FinishInvokeMethod(DebugInvokeReq* pReq);
 
   /*
    * DDM support.
@@ -677,14 +84,10 @@
       const ArrayRef<const jbyte>& data,
       /*out*/uint32_t* out_type,
       /*out*/std::vector<uint8_t>* out_data);
-  static bool DdmHandlePacket(JDWP::Request* request, uint8_t** pReplyBuf, int* pReplyLen);
+
   static void DdmConnected() REQUIRES_SHARED(Locks::mutator_lock_);
   static void DdmDisconnected() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Visit breakpoint roots, used to prevent unloading of methods with breakpoints.
-  static void VisitRoots(RootVisitor* visitor)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
   /*
    * Allocation tracking support.
    */
@@ -717,54 +120,11 @@
   static void DdmSendHeapSegments(bool native)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static ObjectRegistry* GetObjectRegistry() {
-    return gRegistry;
-  }
-
-  static JDWP::JdwpTag TagFromObject(const ScopedObjectAccessUnchecked& soa,
-                                     ObjPtr<mirror::Object> o)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  static JDWP::JdwpTypeTag GetTypeTag(ObjPtr<mirror::Class> klass)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  static JDWP::FieldId ToFieldId(const ArtField* f)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  static void SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
-
-  static JDWP::JdwpState* GetJdwpState();
-
-  static uint32_t GetInstrumentationEvents() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return instrumentation_events_;
-  }
-
   static ThreadLifecycleCallback* GetThreadLifecycleCallback() {
     return &thread_lifecycle_callback_;
   }
-  static ClassLoadCallback* GetClassLoadCallback() {
-    return &class_load_callback_;
-  }
 
  private:
-  static void ExecuteMethodWithoutPendingException(ScopedObjectAccess& soa, DebugInvokeReq* pReq)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  static void BuildInvokeReply(JDWP::ExpandBuf* pReply, uint32_t request_id,
-                               JDWP::JdwpTag result_tag, uint64_t result_value,
-                               JDWP::ObjectId exception)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  static JDWP::JdwpError GetLocalValue(const StackVisitor& visitor,
-                                       ScopedObjectAccessUnchecked& soa, int slot,
-                                       JDWP::JdwpTag tag, uint8_t* buf, size_t width)
-      REQUIRES(!Locks::thread_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
-  static JDWP::JdwpError SetLocalValue(Thread* thread, StackVisitor& visitor, int slot,
-                                       JDWP::JdwpTag tag, uint64_t value, size_t width)
-      REQUIRES(!Locks::thread_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
-
   static void DdmBroadcast(bool connect) REQUIRES_SHARED(Locks::mutator_lock_);
 
   static void PostThreadStart(Thread* t)
@@ -774,89 +134,16 @@
   static void PostThreadStartOrStop(Thread*, uint32_t)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static void PostClassPrepare(mirror::Class* c)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  static void PostLocationEvent(ArtMethod* method, int pcOffset,
-                                mirror::Object* thisPtr, int eventFlags,
-                                const JValue* return_value)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  static void ProcessDeoptimizationRequest(const DeoptimizationRequest& request)
-      REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_);
-
-  static void RequestDeoptimizationLocked(const DeoptimizationRequest& req)
-      REQUIRES(Locks::deoptimization_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
-
-  static bool IsForcedInterpreterNeededForCallingImpl(Thread* thread, ArtMethod* m)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  static bool IsForcedInterpreterNeededForResolutionImpl(Thread* thread, ArtMethod* m)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  static bool IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, ArtMethod* m)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  static bool IsForcedInterpreterNeededForUpcallImpl(Thread* thread, ArtMethod* m)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
   static bool IsForcedInterpreterNeededForExceptionImpl(Thread* thread)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Indicates whether the debugger is making requests.
-  static bool gDebuggerActive;
-
-  static DebuggerActiveMethodInspectionCallback gDebugActiveCallback;
-  static DebuggerDdmCallback gDebugDdmCallback;
-  static InternalDebuggerControlCallback gDebuggerControlCallback;
-
-  // Indicates whether we should drop the JDWP connection because the runtime stops or the
-  // debugger called VirtualMachine.Dispose.
-  static bool gDisposed;
-
-  // The registry mapping objects to JDWP ids.
-  static ObjectRegistry* gRegistry;
-
-  // Deoptimization requests to be processed each time the event list is updated. This is used when
-  // registering and unregistering events so we do not deoptimize while holding the event list
-  // lock.
-  // TODO rename to instrumentation_requests.
-  static std::vector<DeoptimizationRequest> deoptimization_requests_ GUARDED_BY(Locks::deoptimization_lock_);
-
-  // Count the number of events requiring full deoptimization. When the counter is > 0, everything
-  // is deoptimized, otherwise everything is undeoptimized.
-  // Note: we fully deoptimize on the first event only (when the counter is set to 1). We fully
-  // undeoptimize when the last event is unregistered (when the counter is set to 0).
-  static size_t full_deoptimization_event_count_ GUARDED_BY(Locks::deoptimization_lock_);
-
-  static size_t* GetReferenceCounterForEvent(uint32_t instrumentation_event);
-
-  // Instrumentation event reference counters.
-  // TODO we could use an array instead of having all these dedicated counters. Instrumentation
-  // events are bits of a mask so we could convert them to array index.
-  static size_t dex_pc_change_event_ref_count_ GUARDED_BY(Locks::deoptimization_lock_);
-  static size_t method_enter_event_ref_count_ GUARDED_BY(Locks::deoptimization_lock_);
-  static size_t method_exit_event_ref_count_ GUARDED_BY(Locks::deoptimization_lock_);
-  static size_t field_read_event_ref_count_ GUARDED_BY(Locks::deoptimization_lock_);
-  static size_t field_write_event_ref_count_ GUARDED_BY(Locks::deoptimization_lock_);
-  static size_t exception_catch_event_ref_count_ GUARDED_BY(Locks::deoptimization_lock_);
-  static uint32_t instrumentation_events_ GUARDED_BY(Locks::mutator_lock_);
-
   class DbgThreadLifecycleCallback : public ThreadLifecycleCallback {
    public:
     void ThreadStart(Thread* self) override REQUIRES_SHARED(Locks::mutator_lock_);
     void ThreadDeath(Thread* self) override REQUIRES_SHARED(Locks::mutator_lock_);
   };
 
-  class DbgClassLoadCallback : public ClassLoadCallback {
-   public:
-    void ClassLoad(Handle<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_);
-    void ClassPrepare(Handle<mirror::Class> temp_klass,
-                      Handle<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_);
-  };
-
   static DbgThreadLifecycleCallback thread_lifecycle_callback_;
-  static DbgClassLoadCallback class_load_callback_;
 
   DISALLOW_COPY_AND_ASSIGN(Dbg);
 };
diff --git a/runtime/deoptimization_kind.h b/runtime/deoptimization_kind.h
index 14e189c..5be6f3d 100644
--- a/runtime/deoptimization_kind.h
+++ b/runtime/deoptimization_kind.h
@@ -17,6 +17,8 @@
 #ifndef ART_RUNTIME_DEOPTIMIZATION_KIND_H_
 #define ART_RUNTIME_DEOPTIMIZATION_KIND_H_
 
+#include "base/logging.h"
+
 namespace art {
 
 enum class DeoptimizationKind {
diff --git a/runtime/dex/dex_file_annotations.cc b/runtime/dex/dex_file_annotations.cc
index 050be4a..24b3a3e 100644
--- a/runtime/dex/dex_file_annotations.cc
+++ b/runtime/dex/dex_file_annotations.cc
@@ -605,7 +605,7 @@
         StackHandleScope<2> hs(self);
         uint32_t size = DecodeUnsignedLeb128(&annotation);
         Handle<mirror::Class> component_type(hs.NewHandle(array_class->GetComponentType()));
-        Handle<mirror::Array> new_array(hs.NewHandle(mirror::Array::Alloc<true>(
+        Handle<mirror::Array> new_array(hs.NewHandle(mirror::Array::Alloc(
             self, array_class.Get(), size, array_class->GetComponentSizeShift(),
             Runtime::Current()->GetHeap()->GetCurrentAllocator())));
         if (new_array == nullptr) {
diff --git a/runtime/dex2oat_environment_test.h b/runtime/dex2oat_environment_test.h
index fbcee39..fb8a760 100644
--- a/runtime/dex2oat_environment_test.h
+++ b/runtime/dex2oat_environment_test.h
@@ -137,36 +137,6 @@
     dst_stream << src_stream.rdbuf();
   }
 
-  // Returns the directory where the pre-compiled core.art can be found.
-  // TODO: We should factor out this into common tests somewhere rather than
-  // re-hardcoding it here (This was copied originally from the elf writer
-  // test).
-  std::string GetImageDirectory() const {
-    if (IsHost()) {
-      const char* host_dir = getenv("ANDROID_HOST_OUT");
-      CHECK(host_dir != nullptr);
-      return std::string(host_dir) + "/framework";
-    } else {
-      return std::string("/data/art-test");
-    }
-  }
-
-  std::string GetImageLocation() const {
-    return GetImageDirectory() + "/core.art";
-  }
-
-  std::string GetSystemImageFile() const {
-    return GetImageDirectory() + "/" + GetInstructionSetString(kRuntimeISA)
-      + "/core.art";
-  }
-
-  // Returns the path to an image location whose contents differ from the
-  // image at GetImageLocation(). This is used for testing mismatched
-  // image checksums in the oat_file_assistant_tests.
-  std::string GetImageLocation2() const {
-    return GetImageDirectory() + "/core-interpreter.art";
-  }
-
   std::string GetDexSrc1() const {
     return GetTestDexFileName("Main");
   }
diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc
index b67014a..8ba6f3e 100644
--- a/runtime/dexopt_test.cc
+++ b/runtime/dexopt_test.cc
@@ -30,6 +30,7 @@
 #include "dexopt_test.h"
 #include "gc/space/image_space.h"
 #include "hidden_api.h"
+#include "oat.h"
 
 namespace art {
 void DexoptTest::SetUp() {
@@ -68,15 +69,34 @@
   return Exec(argv, error_msg);
 }
 
+std::string DexoptTest::GenerateAlternateImage(const std::string& scratch_dir) {
+  std::vector<std::string> libcore_dex_files = GetLibCoreDexFileNames();
+  std::vector<std::string> libcore_dex_locations = GetLibCoreDexLocations();
+
+  std::string image_dir = scratch_dir + GetInstructionSetString(kRuntimeISA);
+  int mkdir_result = mkdir(image_dir.c_str(), 0700);
+  CHECK_EQ(0, mkdir_result) << image_dir.c_str();
+
+  std::vector<std::string> extra_args {
+    "--compiler-filter=verify",
+    android::base::StringPrintf("--base=0x%08x", ART_BASE_ADDRESS),
+  };
+  std::string filename_prefix = image_dir + "/boot-interpreter";
+  ArrayRef<const std::string> dex_files(libcore_dex_files);
+  ArrayRef<const std::string> dex_locations(libcore_dex_locations);
+  std::string error_msg;
+  bool ok = CompileBootImage(extra_args, filename_prefix, dex_files, dex_locations, &error_msg);
+  EXPECT_TRUE(ok) << error_msg;
+
+  return scratch_dir + "boot-interpreter.art";
+}
+
 void DexoptTest::GenerateOatForTest(const std::string& dex_location,
                                     const std::string& oat_location,
                                     CompilerFilter::Filter filter,
                                     bool with_alternate_image,
                                     const char* compilation_reason,
                                     const std::vector<std::string>& extra_args) {
-  std::string dalvik_cache = GetDalvikCache(GetInstructionSetString(kRuntimeISA));
-  std::string dalvik_cache_tmp = dalvik_cache + ".redirected";
-
   std::vector<std::string> args;
   args.push_back("--dex-file=" + dex_location);
   args.push_back("--oat-file=" + oat_location);
@@ -94,8 +114,11 @@
   }
 
   std::string image_location = GetImageLocation();
+  std::optional<ScratchDir> scratch;
   if (with_alternate_image) {
-    args.push_back("--boot-image=" + GetImageLocation2());
+    scratch.emplace();  // Create the scratch directory for the generated boot image.
+    std::string alternate_image_location = GenerateAlternateImage(scratch->GetPath());
+    args.push_back("--boot-image=" + alternate_image_location);
   }
 
   if (compilation_reason != nullptr) {
@@ -113,30 +136,29 @@
                                                    oat_location.c_str(),
                                                    /*executable=*/ false,
                                                    /*low_4gb=*/ false,
-                                                   dex_location.c_str(),
-                                                   /*reservation=*/ nullptr,
+                                                   dex_location,
                                                    &error_msg));
   ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
   EXPECT_EQ(filter, odex_file->GetCompilerFilter());
 
-  std::string boot_image_checksums = gc::space::ImageSpace::GetBootClassPathChecksums(
-      ArrayRef<const std::string>(Runtime::Current()->GetBootClassPath()),
-      image_location,
-      kRuntimeISA,
-      gc::space::ImageSpaceLoadingOrder::kSystemFirst,
-      &error_msg);
-  ASSERT_FALSE(boot_image_checksums.empty()) << error_msg;
-
-  const OatHeader& oat_header = odex_file->GetOatHeader();
-
   if (CompilerFilter::DependsOnImageChecksum(filter)) {
+    const OatHeader& oat_header = odex_file->GetOatHeader();
+    const char* oat_bcp = oat_header.GetStoreValueByKey(OatHeader::kBootClassPathKey);
+    ASSERT_TRUE(oat_bcp != nullptr);
+    ASSERT_EQ(oat_bcp, android::base::Join(Runtime::Current()->GetBootClassPathLocations(), ':'));
     const char* checksums = oat_header.GetStoreValueByKey(OatHeader::kBootClassPathChecksumsKey);
     ASSERT_TRUE(checksums != nullptr);
-    if (with_alternate_image) {
-      EXPECT_NE(boot_image_checksums, checksums);
-    } else {
-      EXPECT_EQ(boot_image_checksums, checksums);
-    }
+
+    bool match = gc::space::ImageSpace::VerifyBootClassPathChecksums(
+        checksums,
+        oat_bcp,
+        image_location,
+        ArrayRef<const std::string>(Runtime::Current()->GetBootClassPathLocations()),
+        ArrayRef<const std::string>(Runtime::Current()->GetBootClassPath()),
+        kRuntimeISA,
+        gc::space::ImageSpaceLoadingOrder::kSystemFirst,
+        &error_msg);
+    ASSERT_EQ(!with_alternate_image, match) << error_msg;
   }
 }
 
@@ -174,8 +196,6 @@
   MemMap::Init();
 
   // Ensure a chunk of memory is reserved for the image space.
-  // The reservation_end includes room for the main space that has to come
-  // right after the image in case of the GSS collector.
   uint64_t reservation_start = ART_BASE_ADDRESS;
   uint64_t reservation_end = ART_BASE_ADDRESS + 384 * MB;
 
diff --git a/runtime/dexopt_test.h b/runtime/dexopt_test.h
index bfae8a1..a236393 100644
--- a/runtime/dexopt_test.h
+++ b/runtime/dexopt_test.h
@@ -32,6 +32,8 @@
 
   void PostRuntimeCreate() override;
 
+  std::string GenerateAlternateImage(const std::string& scratch_dir);
+
   // Generate an oat file for the purposes of test.
   // The oat file will be generated for dex_location in the given oat_location
   // with the following configuration:
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 12c33de..6bd1c8f 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -1076,7 +1076,8 @@
   return true;
 }
 
-static InstructionSet GetInstructionSetFromELF(uint16_t e_machine, uint32_t e_flags) {
+static InstructionSet GetInstructionSetFromELF(uint16_t e_machine,
+                                               uint32_t e_flags ATTRIBUTE_UNUSED) {
   switch (e_machine) {
     case EM_ARM:
       return InstructionSet::kArm;
@@ -1086,15 +1087,6 @@
       return InstructionSet::kX86;
     case EM_X86_64:
       return InstructionSet::kX86_64;
-    case EM_MIPS: {
-      if ((e_flags & EF_MIPS_ARCH) == EF_MIPS_ARCH_32R2 ||
-          (e_flags & EF_MIPS_ARCH) == EF_MIPS_ARCH_32R6) {
-        return InstructionSet::kMips;
-      } else if ((e_flags & EF_MIPS_ARCH) == EF_MIPS_ARCH_64R6) {
-        return InstructionSet::kMips64;
-      }
-      break;
-    }
   }
   return InstructionSet::kNone;
 }
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 53dea72..a31be00 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -39,6 +39,7 @@
 #include "mirror/object-inl.h"
 #include "mirror/throwable.h"
 #include "nth_caller_visitor.h"
+#include "reflective_handle_scope-inl.h"
 #include "runtime.h"
 #include "stack_map.h"
 #include "thread.h"
@@ -114,6 +115,35 @@
   return method;
 }
 
+ALWAYS_INLINE
+inline ObjPtr<mirror::Class> CheckClassInitializedForObjectAlloc(ObjPtr<mirror::Class> klass,
+                                                                 Thread* self,
+                                                                 bool* slow_path)
+    REQUIRES_SHARED(Locks::mutator_lock_)
+    REQUIRES(!Roles::uninterruptible_) {
+  if (UNLIKELY(!klass->IsVisiblyInitialized())) {
+    StackHandleScope<1> hs(self);
+    Handle<mirror::Class> h_class(hs.NewHandle(klass));
+    // EnsureInitialized (the class initializer) might cause a GC.
+    // may cause us to suspend meaning that another thread may try to
+    // change the allocator while we are stuck in the entrypoints of
+    // an old allocator. Also, the class initialization may fail. To
+    // handle these cases we mark the slow path boolean as true so
+    // that the caller knows to check the allocator type to see if it
+    // has changed and to null-check the return value in case the
+    // initialization fails.
+    *slow_path = true;
+    if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
+      DCHECK(self->IsExceptionPending());
+      return nullptr;  // Failure
+    } else {
+      DCHECK(!self->IsExceptionPending());
+    }
+    return h_class.Get();
+  }
+  return klass;
+}
+
 ALWAYS_INLINE inline ObjPtr<mirror::Class> CheckObjectAlloc(ObjPtr<mirror::Class> klass,
                                                             Thread* self,
                                                             bool* slow_path)
@@ -130,54 +160,7 @@
     *slow_path = true;
     return nullptr;  // Failure
   }
-  if (UNLIKELY(!klass->IsInitialized())) {
-    StackHandleScope<1> hs(self);
-    Handle<mirror::Class> h_klass(hs.NewHandle(klass));
-    // EnsureInitialized (the class initializer) might cause a GC.
-    // may cause us to suspend meaning that another thread may try to
-    // change the allocator while we are stuck in the entrypoints of
-    // an old allocator. Also, the class initialization may fail. To
-    // handle these cases we mark the slow path boolean as true so
-    // that the caller knows to check the allocator type to see if it
-    // has changed and to null-check the return value in case the
-    // initialization fails.
-    *slow_path = true;
-    if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_klass, true, true)) {
-      DCHECK(self->IsExceptionPending());
-      return nullptr;  // Failure
-    } else {
-      DCHECK(!self->IsExceptionPending());
-    }
-    return h_klass.Get();
-  }
-  return klass;
-}
-
-ALWAYS_INLINE
-inline ObjPtr<mirror::Class> CheckClassInitializedForObjectAlloc(ObjPtr<mirror::Class> klass,
-                                                                 Thread* self,
-                                                                 bool* slow_path)
-    REQUIRES_SHARED(Locks::mutator_lock_)
-    REQUIRES(!Roles::uninterruptible_) {
-  if (UNLIKELY(!klass->IsInitialized())) {
-    StackHandleScope<1> hs(self);
-    Handle<mirror::Class> h_class(hs.NewHandle(klass));
-    // EnsureInitialized (the class initializer) might cause a GC.
-    // may cause us to suspend meaning that another thread may try to
-    // change the allocator while we are stuck in the entrypoints of
-    // an old allocator. Also, the class initialization may fail. To
-    // handle these cases we mark the slow path boolean as true so
-    // that the caller knows to check the allocator type to see if it
-    // has changed and to null-check the return value in case the
-    // initialization fails.
-    *slow_path = true;
-    if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
-      DCHECK(self->IsExceptionPending());
-      return nullptr;  // Failure
-    }
-    return h_class.Get();
-  }
-  return klass;
+  return CheckClassInitializedForObjectAlloc(klass, self, slow_path);
 }
 
 // Allocate an instance of klass. Throws InstantationError if klass is not instantiable,
@@ -216,13 +199,15 @@
       return nullptr;
     }
     gc::Heap* heap = Runtime::Current()->GetHeap();
-    // Pass in false since the object cannot be finalizable.
+    // Pass in kNoAddFinalizer since the object cannot be finalizable.
     // CheckClassInitializedForObjectAlloc can cause thread suspension which means we may now be
     // instrumented.
-    return klass->Alloc</*kInstrumented=*/true, false>(self, heap->GetCurrentAllocator());
+    return klass->Alloc</*kInstrumented=*/true, mirror::Class::AddFinalizer::kNoAddFinalizer>(
+        self, heap->GetCurrentAllocator());
   }
-  // Pass in false since the object cannot be finalizable.
-  return klass->Alloc<kInstrumented, false>(self, allocator_type);
+  // Pass in kNoAddFinalizer since the object cannot be finalizable.
+  return klass->Alloc<kInstrumented,
+                      mirror::Class::AddFinalizer::kNoAddFinalizer>(self, allocator_type);
 }
 
 // Given the context of a calling Method and an initialized class, create an instance.
@@ -232,8 +217,9 @@
                                                              Thread* self,
                                                              gc::AllocatorType allocator_type) {
   DCHECK(klass != nullptr);
-  // Pass in false since the object cannot be finalizable.
-  return klass->Alloc<kInstrumented, false>(self, allocator_type);
+  // Pass in kNoAddFinalizer since the object cannot be finalizable.
+  return klass->Alloc<kInstrumented,
+                      mirror::Class::AddFinalizer::kNoAddFinalizer>(self, allocator_type);
 }
 
 
@@ -296,8 +282,11 @@
                                                         klass->GetComponentSizeShift(),
                                                         heap->GetCurrentAllocator());
   }
-  return mirror::Array::Alloc<kInstrumented>(self, klass, component_count,
-                                             klass->GetComponentSizeShift(), allocator_type);
+  return mirror::Array::Alloc<kInstrumented>(self,
+                                             klass,
+                                             component_count,
+                                             klass->GetComponentSizeShift(),
+                                             allocator_type);
 }
 
 template <bool kInstrumented>
@@ -313,8 +302,11 @@
   }
   // No need to retry a slow-path allocation as the above code won't cause a GC or thread
   // suspension.
-  return mirror::Array::Alloc<kInstrumented>(self, klass, component_count,
-                                             klass->GetComponentSizeShift(), allocator_type);
+  return mirror::Array::Alloc<kInstrumented>(self,
+                                             klass,
+                                             component_count,
+                                             klass->GetComponentSizeShift(),
+                                             allocator_type);
 }
 
 template<FindFieldType type, bool access_check>
@@ -368,7 +360,7 @@
       DCHECK(self->IsExceptionPending());  // Throw exception and unwind.
       return nullptr;  // Failure.
     }
-    if (UNLIKELY(is_set && resolved_field->IsFinal() && (fields_class != referring_class))) {
+    if (UNLIKELY(is_set && !resolved_field->CanBeChangedBy(referrer))) {
       ThrowIllegalAccessErrorFinalField(referrer, resolved_field);
       return nullptr;  // Failure.
     } else {
@@ -388,13 +380,15 @@
     return resolved_field;
   } else {
     // If the class is initialized we're done.
-    if (LIKELY(fields_class->IsInitialized())) {
+    if (LIKELY(fields_class->IsVisiblyInitialized())) {
       return resolved_field;
     } else {
       StackHandleScope<1> hs(self);
+      StackArtFieldHandleScope<1> rhs(self);
+      ReflectiveHandle<ArtField> resolved_field_handle(rhs.NewHandle(resolved_field));
       if (LIKELY(class_linker->EnsureInitialized(self, hs.NewHandle(fields_class), true, true))) {
         // Otherwise let's ensure the class is initialized before resolving the field.
-        return resolved_field;
+        return resolved_field_handle.Get();
       }
       DCHECK(self->IsExceptionPending());  // Throw exception and unwind
       return nullptr;  // Failure.
@@ -630,14 +624,14 @@
   if (is_static) {
     // Check class is initialized else fail so that we can contend to initialize the class with
     // other threads that may be racing to do this.
-    if (UNLIKELY(!fields_class->IsInitialized())) {
+    if (UNLIKELY(!fields_class->IsVisiblyInitialized())) {
       return nullptr;
     }
   }
   ObjPtr<mirror::Class> referring_class = referrer->GetDeclaringClass();
   if (UNLIKELY(!referring_class->CanAccess(fields_class) ||
                !referring_class->CanAccessMember(fields_class, resolved_field->GetAccessFlags()) ||
-               (is_set && resolved_field->IsFinal() && (fields_class != referring_class)))) {
+               (is_set && !resolved_field->CanBeChangedBy(referrer)))) {
     // Illegal access.
     return nullptr;
   }
@@ -741,27 +735,6 @@
   return h_class.Get();
 }
 
-inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) {
-  // Save any pending exception over monitor exit call.
-  ObjPtr<mirror::Throwable> saved_exception = nullptr;
-  if (UNLIKELY(self->IsExceptionPending())) {
-    saved_exception = self->GetException();
-    self->ClearException();
-  }
-  // Decode locked object and unlock, before popping local references.
-  self->DecodeJObject(locked)->MonitorExit(self);
-  if (UNLIKELY(self->IsExceptionPending())) {
-    LOG(FATAL) << "Synchronized JNI code returning with an exception:\n"
-        << saved_exception->Dump()
-        << "\nEncountered second exception during implicit MonitorExit:\n"
-        << self->GetException()->Dump();
-  }
-  // Restore pending exception.
-  if (saved_exception != nullptr) {
-    self->SetException(saved_exception);
-  }
-}
-
 template <typename INT_TYPE, typename FLOAT_TYPE>
 inline INT_TYPE art_float_to_integral(FLOAT_TYPE f) {
   const INT_TYPE kMaxInt = static_cast<INT_TYPE>(std::numeric_limits<INT_TYPE>::max());
@@ -779,6 +752,21 @@
   }
 }
 
+inline bool NeedsClinitCheckBeforeCall(ArtMethod* method) {
+  // The class needs to be visibly initialized before we can use entrypoints to
+  // compiled code for static methods. See b/18161648 . The class initializer is
+  // special as it is invoked during initialization and does not need the check.
+  return method->IsStatic() && !method->IsConstructor();
+}
+
+inline HandleScope* GetGenericJniHandleScope(ArtMethod** managed_sp,
+                                             size_t num_handle_scope_references) {
+  // The HandleScope is just below the cookie and padding to align as uintptr_t.
+  const size_t offset =
+      RoundUp(HandleScope::SizeOf(num_handle_scope_references) + kJniCookieSize, sizeof(uintptr_t));
+  return reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(managed_sp) - offset);
+}
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_INL_H_
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index ee2ab56..849a967 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -202,14 +202,16 @@
     if (outer_method != nullptr) {
       const OatQuickMethodHeader* current_code = outer_method->GetOatQuickMethodHeader(caller_pc);
       DCHECK(current_code != nullptr);
-      DCHECK(current_code->IsOptimized());
-      uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
-      CodeInfo code_info(current_code, CodeInfo::DecodeFlags::InlineInfoOnly);
-      StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
-      DCHECK(stack_map.IsValid());
-      BitTableRange<InlineInfo> inline_infos = code_info.GetInlineInfosOf(stack_map);
-      if (!inline_infos.empty()) {
-        caller = GetResolvedMethod(outer_method, code_info, inline_infos);
+      if (current_code->IsOptimized() &&
+          CodeInfo::HasInlineInfo(current_code->GetOptimizedCodeInfoPtr())) {
+        uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
+        CodeInfo code_info = CodeInfo::DecodeInlineInfoOnly(current_code);
+        StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+        DCHECK(stack_map.IsValid());
+        BitTableRange<InlineInfo> inline_infos = code_info.GetInlineInfosOf(stack_map);
+        if (!inline_infos.empty()) {
+          caller = GetResolvedMethod(outer_method, code_info, inline_infos);
+        }
       }
     }
     if (kIsDebugBuild && do_caller_check) {
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index a8618bd..85082d3 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -42,6 +42,7 @@
 
 class ArtField;
 class ArtMethod;
+class HandleScope;
 enum InvokeType : uint32_t;
 class OatQuickMethodHeader;
 class ScopedObjectAccessAlreadyRunnable;
@@ -49,7 +50,7 @@
 
 // Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it
 // cannot be resolved, throw an error. If it can, use it to create an instance.
-template <bool kInstrumented>
+template <bool kInstrumented = true>
 ALWAYS_INLINE inline ObjPtr<mirror::Object> AllocObjectFromCode(ObjPtr<mirror::Class> klass,
                                                                 Thread* self,
                                                                 gc::AllocatorType allocator_type)
@@ -87,7 +88,7 @@
 // it cannot be resolved, throw an error. If it can, use it to create an array.
 // When verification/compiler hasn't been able to verify access, optionally perform an access
 // check.
-template <bool kAccessCheck, bool kInstrumented>
+template <bool kAccessCheck, bool kInstrumented = true>
 ALWAYS_INLINE inline ObjPtr<mirror::Array> AllocArrayFromCode(dex::TypeIndex type_idx,
                                                               int32_t component_count,
                                                               ArtMethod* method,
@@ -173,10 +174,6 @@
     REQUIRES_SHARED(Locks::mutator_lock_)
     REQUIRES(!Roles::uninterruptible_);
 
-// TODO: annotalysis disabled as monitor semantics are maintained in Java code.
-inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self)
-    NO_THREAD_SAFETY_ANALYSIS REQUIRES(!Roles::uninterruptible_);
-
 void CheckReferenceResult(Handle<mirror::Object> o, Thread* self)
     REQUIRES_SHARED(Locks::mutator_lock_)
     REQUIRES(!Roles::uninterruptible_);
@@ -212,6 +209,15 @@
 ArtMethod* GetCalleeSaveOuterMethod(Thread* self, CalleeSaveType type)
     REQUIRES_SHARED(Locks::mutator_lock_);
 
+// Returns whether we need to do class initialization check before invoking the method.
+// The caller is responsible for performing that check.
+bool NeedsClinitCheckBeforeCall(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
+
+constexpr size_t kJniCookieSize = sizeof(uint32_t);
+
+inline HandleScope* GetGenericJniHandleScope(ArtMethod** managed_sp,
+                                             size_t num_handle_scope_references);
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_H_
diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc
index a4083a4..f1e5772 100644
--- a/runtime/entrypoints/jni/jni_entrypoints.cc
+++ b/runtime/entrypoints/jni/jni_entrypoints.cc
@@ -16,6 +16,11 @@
 
 #include <android-base/logging.h>
 
+#include "arch/arm/jni_frame_arm.h"
+#include "arch/arm64/jni_frame_arm64.h"
+#include "arch/instruction_set.h"
+#include "arch/x86/jni_frame_x86.h"
+#include "arch/x86_64/jni_frame_x86_64.h"
 #include "art_method-inl.h"
 #include "entrypoints/entrypoint_utils.h"
 #include "jni/java_vm_ext.h"
@@ -26,22 +31,16 @@
 namespace art {
 
 // Used by the JNI dlsym stub to find the native method to invoke if none is registered.
-#if defined(__arm__) || defined(__aarch64__)
-extern "C" const void* artFindNativeMethod() {
-  Thread* self = Thread::Current();
-#else
-extern "C" const void* artFindNativeMethod(Thread* self) {
-  DCHECK_EQ(self, Thread::Current());
-#endif
-  Locks::mutator_lock_->AssertNotHeld(self);  // We come here as Native.
-  ScopedObjectAccess soa(self);
-
+extern "C" const void* artFindNativeMethodRunnable(Thread* self)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  Locks::mutator_lock_->AssertSharedHeld(self);  // We come here as Runnable.
   ArtMethod* method = self->GetCurrentMethod(nullptr);
   DCHECK(method != nullptr);
 
   // Lookup symbol address for method, on failure we'll return null with an exception set,
   // otherwise we return the address of the method we found.
-  void* native_code = soa.Vm()->FindCodeForNativeMethod(method);
+  JavaVMExt* vm = down_cast<JNIEnvExt*>(self->GetJniEnv())->GetVm();
+  void* native_code = vm->FindCodeForNativeMethod(method);
   if (native_code == nullptr) {
     self->AssertPendingException();
     return nullptr;
@@ -50,4 +49,32 @@
   return method->RegisterNative(native_code);
 }
 
+// Used by the JNI dlsym stub to find the native method to invoke if none is registered.
+extern "C" const void* artFindNativeMethod(Thread* self) {
+  DCHECK_EQ(self, Thread::Current());
+  Locks::mutator_lock_->AssertNotHeld(self);  // We come here as Native.
+  ScopedObjectAccess soa(self);
+  return artFindNativeMethodRunnable(self);
+}
+
+extern "C" size_t artCriticalNativeOutArgsSize(ArtMethod* method)
+    REQUIRES_SHARED(Locks::mutator_lock_)  {
+  uint32_t shorty_len;
+  const char* shorty = method->GetShorty(&shorty_len);
+  switch (kRuntimeISA) {
+    case InstructionSet::kArm:
+    case InstructionSet::kThumb2:
+      return arm::GetCriticalNativeOutArgsSize(shorty, shorty_len);
+    case InstructionSet::kArm64:
+      return arm64::GetCriticalNativeOutArgsSize(shorty, shorty_len);
+    case InstructionSet::kX86:
+      return x86::GetCriticalNativeOutArgsSize(shorty, shorty_len);
+    case InstructionSet::kX86_64:
+      return x86_64::GetCriticalNativeOutArgsSize(shorty, shorty_len);
+    default:
+      UNIMPLEMENTED(FATAL) << kRuntimeISA;
+      UNREACHABLE();
+  }
+}
+
 }  // namespace art
diff --git a/runtime/entrypoints/jni/jni_entrypoints.h b/runtime/entrypoints/jni/jni_entrypoints.h
index 9c1b0dc..0aabed0 100644
--- a/runtime/entrypoints/jni/jni_entrypoints.h
+++ b/runtime/entrypoints/jni/jni_entrypoints.h
@@ -29,8 +29,10 @@
 
 // Pointers to functions that are called by JNI trampolines via thread-local storage.
 struct PACKED(4) JniEntryPoints {
-  // Called when the JNI method isn't registered.
+  // Called when the JNI method isn't registered for normal native and @FastNative methods.
   void* (*pDlsymLookup)(JNIEnv* env, jobject);
+  // Called when the JNI method isn't registered for @CriticalNative methods.
+  void* (*pDlsymLookupCritical)(JNIEnv* env, jobject);
 };
 
 }  // namespace art
diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h
index e555d68..1baccee 100644
--- a/runtime/entrypoints/quick/callee_save_frame.h
+++ b/runtime/entrypoints/quick/callee_save_frame.h
@@ -28,8 +28,6 @@
 // specialize the code.
 #include "arch/arm/callee_save_frame_arm.h"
 #include "arch/arm64/callee_save_frame_arm64.h"
-#include "arch/mips/callee_save_frame_mips.h"
-#include "arch/mips64/callee_save_frame_mips64.h"
 #include "arch/x86/callee_save_frame_x86.h"
 #include "arch/x86_64/callee_save_frame_x86_64.h"
 
@@ -79,10 +77,6 @@
 template <>
 struct CSFSelector<InstructionSet::kArm64> { using type = arm64::Arm64CalleeSaveFrame; };
 template <>
-struct CSFSelector<InstructionSet::kMips> { using type = mips::MipsCalleeSaveFrame; };
-template <>
-struct CSFSelector<InstructionSet::kMips64> { using type = mips64::Mips64CalleeSaveFrame; };
-template <>
 struct CSFSelector<InstructionSet::kX86> { using type = x86::X86CalleeSaveFrame; };
 template <>
 struct CSFSelector<InstructionSet::kX86_64> { using type = x86_64::X86_64CalleeSaveFrame; };
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index ecf6f67..dba4ecc 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -41,22 +41,21 @@
   ScopedQuickEntrypointChecks sqec(self);
   DCHECK(klass != nullptr);
   if (kUseTlabFastPath && !kInstrumented && allocator_type == gc::kAllocatorTypeTLAB) {
-    if (kInitialized || klass->IsInitialized()) {
-      if (!kFinalize || !klass->IsFinalizable()) {
-        size_t byte_count = klass->GetObjectSize();
-        byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment);
-        mirror::Object* obj;
-        if (LIKELY(byte_count < self->TlabSize())) {
-          obj = self->AllocTlab(byte_count);
-          DCHECK(obj != nullptr) << "AllocTlab can't fail";
-          obj->SetClass(klass);
-          if (kUseBakerReadBarrier) {
-            obj->AssertReadBarrierState();
-          }
-          QuasiAtomic::ThreadFenceForConstructor();
-          return obj;
-        }
+    // The "object size alloc fast path" is set when the class is
+    // visibly initialized, objects are fixed size and non-finalizable.
+    // Otherwise, the value is too large for the size check to succeed.
+    size_t byte_count = klass->GetObjectSizeAllocFastPath();
+    if (LIKELY(byte_count < self->TlabSize())) {
+      static_assert(kObjectAlignment == gc::space::BumpPointerSpace::kAlignment, "Alignment check");
+      DCHECK_ALIGNED(byte_count, gc::space::BumpPointerSpace::kAlignment);
+      mirror::Object* obj = self->AllocTlab(byte_count);
+      DCHECK(obj != nullptr) << "AllocTlab can't fail";
+      obj->SetClass(klass);
+      if (kUseBakerReadBarrier) {
+        obj->AssertReadBarrierState();
       }
+      QuasiAtomic::ThreadFenceForConstructor();
+      return obj;
     }
   }
   if (kInitialized) {
diff --git a/runtime/entrypoints/quick/quick_default_externs.h b/runtime/entrypoints/quick/quick_default_externs.h
index aa32113..42f962e 100644
--- a/runtime/entrypoints/quick/quick_default_externs.h
+++ b/runtime/entrypoints/quick/quick_default_externs.h
@@ -130,4 +130,7 @@
 extern "C" void art_quick_throw_stack_overflow(void*);
 extern "C" void art_quick_throw_string_bounds(int32_t index, int32_t limit);
 
+// Inline cache.
+extern "C" void art_quick_update_inline_cache();
+
 #endif  // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_DEFAULT_EXTERNS_H_
diff --git a/runtime/entrypoints/quick/quick_default_init_entrypoints.h b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
index ce12fde..a77bb85 100644
--- a/runtime/entrypoints/quick/quick_default_init_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_default_init_entrypoints.h
@@ -29,6 +29,7 @@
 static void DefaultInitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) {
   // JNI
   jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
+  jpoints->pDlsymLookupCritical = art_jni_dlsym_lookup_critical_stub;
 
   // Alloc
   ResetQuickAllocEntryPoints(qpoints, /* is_marking= */ true);
@@ -121,6 +122,13 @@
 
   // Deoptimize
   qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code;
+
+  // StringBuilder append
+  qpoints->pStringBuilderAppend = art_quick_string_builder_append;
+
+  // Tiered JIT support
+  qpoints->pUpdateInlineCache = art_quick_update_inline_cache;
+  qpoints->pCompileOptimized = art_quick_compile_optimized;
 }
 
 }  // namespace art
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index e939982..838b5b5 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -53,7 +53,10 @@
   DCHECK_LT(slot, oat_file->GetBssGcRoots().data() + oat_file->GetBssGcRoots().size());
   if (slot->IsNull()) {
     // This may race with another thread trying to store the very same value but that's OK.
-    *slot = GcRoot<mirror::Object>(object);
+    std::atomic<GcRoot<mirror::Object>>* atomic_slot =
+        reinterpret_cast<std::atomic<GcRoot<mirror::Object>>*>(slot);
+    static_assert(sizeof(*slot) == sizeof(*atomic_slot), "Size check");
+    atomic_slot->store(GcRoot<mirror::Object>(object), std::memory_order_release);
     // We need a write barrier for the class loader that holds the GC roots in the .bss.
     ObjPtr<mirror::ClassLoader> class_loader = outer_method->GetClassLoader();
     Runtime* runtime = Runtime::Current();
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index 243f7ec..d75893d 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -34,6 +34,7 @@
 class Class;
 template<class MirrorType> class CompressedReference;
 class Object;
+class String;
 }  // namespace mirror
 
 class ArtMethod;
@@ -78,6 +79,11 @@
                                                              jobject locked, Thread* self)
     NO_THREAD_SAFETY_ANALYSIS HOT_ATTR;
 
+extern "C" mirror::String* artStringBuilderAppend(uint32_t format,
+                                                  const uint32_t* args,
+                                                  Thread* self)
+    REQUIRES_SHARED(Locks::mutator_lock_) HOT_ATTR;
+
 extern void ReadBarrierJni(mirror::CompressedReference<mirror::Object>* handle_on_stack,
                            Thread* self)
     NO_THREAD_SAFETY_ANALYSIS HOT_ATTR;
@@ -85,7 +91,7 @@
 
 // Read barrier entrypoints.
 //
-// Compilers for ARM, ARM64, MIPS, MIPS64 can insert a call to these
+// Compilers for ARM, ARM64 can insert a call to these
 // functions directly.  For x86 and x86-64, compilers need a wrapper
 // assembly function, to handle mismatch in ABI.
 
diff --git a/runtime/entrypoints/quick/quick_entrypoints_enum.cc b/runtime/entrypoints/quick/quick_entrypoints_enum.cc
index 81f152b..5387e44 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_enum.cc
+++ b/runtime/entrypoints/quick/quick_entrypoints_enum.cc
@@ -61,6 +61,7 @@
     case kQuickUshrLong:
       return false;
 
+    // TODO: Remove these entrypoints now that MIPS support was removed.
     /* Used by mips for 64bit volatile load/stores. */
     case kQuickA64Load:
     case kQuickA64Store:
@@ -112,6 +113,7 @@
     case kQuickUshrLong:
       return false;
 
+    // TODO: Remove these entrypoints now that MIPS support was removed.
     /* Used by mips for 64bit volatile load/stores. */
     case kQuickA64Load:
     case kQuickA64Store:
diff --git a/runtime/entrypoints/quick/quick_entrypoints_enum.h b/runtime/entrypoints/quick/quick_entrypoints_enum.h
index 1cf7f8d..6240a7b 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_enum.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_enum.h
@@ -36,7 +36,7 @@
 
 // Translate a QuickEntrypointEnum value to the corresponding ThreadOffset.
 template <PointerSize pointer_size>
-static ThreadOffset<pointer_size> GetThreadOffset(QuickEntrypointEnum trampoline) {
+static constexpr ThreadOffset<pointer_size> GetThreadOffset(QuickEntrypointEnum trampoline) {
   switch (trampoline)
   {  // NOLINT(whitespace/braces)
   #define ENTRYPOINT_ENUM(name, rettype, ...) case kQuick ## name : \
diff --git a/runtime/entrypoints/quick/quick_entrypoints_list.h b/runtime/entrypoints/quick/quick_entrypoints_list.h
index 42b680e..e031b21 100644
--- a/runtime/entrypoints/quick/quick_entrypoints_list.h
+++ b/runtime/entrypoints/quick/quick_entrypoints_list.h
@@ -169,6 +169,11 @@
   V(NewStringFromStringBuffer, void, void) \
   V(NewStringFromStringBuilder, void, void) \
 \
+  V(StringBuilderAppend, void*, uint32_t) \
+\
+  V(UpdateInlineCache, void, void) \
+  V(CompileOptimized, void, ArtMethod*, Thread*) \
+\
   V(ReadBarrierJni, void, mirror::CompressedReference<mirror::Object>*, Thread*) \
   V(ReadBarrierMarkReg00, mirror::Object*, mirror::Object*) \
   V(ReadBarrierMarkReg01, mirror::Object*, mirror::Object*) \
diff --git a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
index d22f180..5b7fe0c 100644
--- a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
@@ -24,13 +24,11 @@
 /*
  * Handle fill array data by copying appropriate part of dex file into array.
  */
-extern "C" int artHandleFillArrayDataFromCode(uint32_t payload_offset, mirror::Array* array,
-                                              ArtMethod* method, Thread* self)
+extern "C" int artHandleFillArrayDataFromCode(const Instruction::ArrayDataPayload* payload,
+                                              mirror::Array* array,
+                                              Thread* self)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   ScopedQuickEntrypointChecks sqec(self);
-  const uint16_t* const insns = method->DexInstructions().Insns();
-  const Instruction::ArrayDataPayload* payload =
-      reinterpret_cast<const Instruction::ArrayDataPayload*>(insns + payload_offset);
   bool success = FillArrayData(array, payload);
   return success ? 0 : -1;
 }
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 5c86bbb..38c6d3c 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -123,6 +123,29 @@
   self->PopHandleScope();
 }
 
+// TODO: annotalysis disabled as monitor semantics are maintained in Java code.
+static inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self)
+    NO_THREAD_SAFETY_ANALYSIS REQUIRES(!Roles::uninterruptible_) {
+  // Save any pending exception over monitor exit call.
+  ObjPtr<mirror::Throwable> saved_exception = nullptr;
+  if (UNLIKELY(self->IsExceptionPending())) {
+    saved_exception = self->GetException();
+    self->ClearException();
+  }
+  // Decode locked object and unlock, before popping local references.
+  self->DecodeJObject(locked)->MonitorExit(self);
+  if (UNLIKELY(self->IsExceptionPending())) {
+    LOG(FATAL) << "Synchronized JNI code returning with an exception:\n"
+        << saved_exception->Dump()
+        << "\nEncountered second exception during implicit MonitorExit:\n"
+        << self->GetException()->Dump();
+  }
+  // Restore pending exception.
+  if (saved_exception != nullptr) {
+    self->SetException(saved_exception);
+  }
+}
+
 // TODO: These should probably be templatized or macro-ized.
 // Otherwise there's just too much repetitive boilerplate.
 
@@ -193,8 +216,7 @@
                                     uint32_t saved_local_ref_cookie,
                                     jvalue result,
                                     uint64_t result_f,
-                                    ArtMethod* called,
-                                    HandleScope* handle_scope)
+                                    ArtMethod* called)
     // TODO: NO_THREAD_SAFETY_ANALYSIS as GoToRunnable() is NO_THREAD_SAFETY_ANALYSIS
     NO_THREAD_SAFETY_ANALYSIS {
   bool critical_native = called->IsCriticalNative();
@@ -207,22 +229,20 @@
   }
   // We need the mutator lock (i.e., calling GoToRunnable()) before accessing the shorty or the
   // locked object.
-  jobject locked = called->IsSynchronized() ? handle_scope->GetHandle(0).ToJObject() : nullptr;
+  if (called->IsSynchronized()) {
+    DCHECK(normal_native) << "@FastNative/@CriticalNative and synchronize is not supported";
+    HandleScope* handle_scope = down_cast<HandleScope*>(self->GetTopHandleScope());
+    jobject lock = handle_scope->GetHandle(0).ToJObject();
+    DCHECK(lock != nullptr);
+    UnlockJniSynchronizedMethod(lock, self);
+  }
   char return_shorty_char = called->GetShorty()[0];
   if (return_shorty_char == 'L') {
-    if (locked != nullptr) {
-      DCHECK(normal_native) << " @FastNative and synchronize is not supported";
-      UnlockJniSynchronizedMethod(locked, self);
-    }
     return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceHandleResult(
         result.l, saved_local_ref_cookie, self));
   } else {
-    if (locked != nullptr) {
-      DCHECK(normal_native) << " @FastNative and synchronize is not supported";
-      UnlockJniSynchronizedMethod(locked, self);  // Must decode before pop.
-    }
     if (LIKELY(!critical_native)) {
-      PopLocalReferences(saved_local_ref_cookie, self);
+      PopLocalReferences(saved_local_ref_cookie, self);  // Invalidates top handle scope.
     }
     switch (return_shorty_char) {
       case 'F': {
diff --git a/runtime/entrypoints/quick/quick_string_builder_append_entrypoints.cc b/runtime/entrypoints/quick/quick_string_builder_append_entrypoints.cc
new file mode 100644
index 0000000..9afaf43
--- /dev/null
+++ b/runtime/entrypoints/quick/quick_string_builder_append_entrypoints.cc
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "quick_entrypoints.h"
+
+#include "string_builder_append.h"
+#include "obj_ptr-inl.h"
+
+namespace art {
+
+extern "C" mirror::String* artStringBuilderAppend(uint32_t format,
+                                                  const uint32_t* args,
+                                                  Thread* self) {
+  return StringBuilderAppend::AppendF(format, args, self).Ptr();
+}
+
+}  // namespace art
diff --git a/runtime/entrypoints/quick/quick_thread_entrypoints.cc b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
index 0838059..64be926 100644
--- a/runtime/entrypoints/quick/quick_thread_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
@@ -15,6 +15,8 @@
  */
 
 #include "callee_save_frame.h"
+#include "jit/jit.h"
+#include "runtime.h"
 #include "thread-inl.h"
 
 namespace art {
@@ -25,4 +27,11 @@
   self->CheckSuspend();
 }
 
+extern "C" void artCompileOptimized(ArtMethod* method, Thread* self)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  ScopedQuickEntrypointChecks sqec(self);
+  ScopedAssertNoThreadSuspension sants("Enqueuing optimized compilation");
+  Runtime::Current()->GetJit()->EnqueueOptimizedCompilation(method, self);
+}
+
 }  // namespace art
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index 2e447ec..202b031 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -54,7 +54,7 @@
    */
   ScopedQuickEntrypointChecks sqec(self);
   if (exception == nullptr) {
-    self->ThrowNewException("Ljava/lang/NullPointerException;", "throw with null exception");
+    self->ThrowNewException("Ljava/lang/NullPointerException;", nullptr);
   } else {
     self->SetException(exception);
   }
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index a2420af..5356637 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -48,6 +48,7 @@
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
 #include "mirror/var_handle.h"
+#include "oat.h"
 #include "oat_file.h"
 #include "oat_quick_method_header.h"
 #include "quick_exception_handler.h"
@@ -138,90 +139,6 @@
   static size_t GprIndexToGprOffset(uint32_t gpr_index) {
     return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
   }
-#elif defined(__mips__) && !defined(__LP64__)
-  // The callee save frame is pointed to by SP.
-  // | argN       |  |
-  // | ...        |  |
-  // | arg4       |  |
-  // | arg3 spill |  |  Caller's frame
-  // | arg2 spill |  |
-  // | arg1 spill |  |
-  // | Method*    | ---
-  // | RA         |
-  // | ...        |    callee saves
-  // | T1         |    arg5
-  // | T0         |    arg4
-  // | A3         |    arg3
-  // | A2         |    arg2
-  // | A1         |    arg1
-  // | F19        |
-  // | F18        |    f_arg5
-  // | F17        |
-  // | F16        |    f_arg4
-  // | F15        |
-  // | F14        |    f_arg3
-  // | F13        |
-  // | F12        |    f_arg2
-  // | F11        |
-  // | F10        |    f_arg1
-  // | F9         |
-  // | F8         |    f_arg0
-  // |            |    padding
-  // | A0/Method* |  <- sp
-  static constexpr bool kSplitPairAcrossRegisterAndStack = false;
-  static constexpr bool kAlignPairRegister = true;
-  static constexpr bool kQuickSoftFloatAbi = false;
-  static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
-  static constexpr bool kQuickSkipOddFpRegisters = true;
-  static constexpr size_t kNumQuickGprArgs = 5;   // 5 arguments passed in GPRs.
-  static constexpr size_t kNumQuickFprArgs = 12;  // 6 arguments passed in FPRs. Floats can be
-                                                  // passed only in even numbered registers and each
-                                                  // double occupies two registers.
-  static constexpr bool kGprFprLockstep = false;
-  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
-    return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
-  }
-#elif defined(__mips__) && defined(__LP64__)
-  // The callee save frame is pointed to by SP.
-  // | argN       |  |
-  // | ...        |  |
-  // | arg4       |  |
-  // | arg3 spill |  |  Caller's frame
-  // | arg2 spill |  |
-  // | arg1 spill |  |
-  // | Method*    | ---
-  // | RA         |
-  // | ...        |    callee saves
-  // | A7         |    arg7
-  // | A6         |    arg6
-  // | A5         |    arg5
-  // | A4         |    arg4
-  // | A3         |    arg3
-  // | A2         |    arg2
-  // | A1         |    arg1
-  // | F19        |    f_arg7
-  // | F18        |    f_arg6
-  // | F17        |    f_arg5
-  // | F16        |    f_arg4
-  // | F15        |    f_arg3
-  // | F14        |    f_arg2
-  // | F13        |    f_arg1
-  // | F12        |    f_arg0
-  // |            |    padding
-  // | A0/Method* |  <- sp
-  // NOTE: for Mip64, when A0 is skipped, F12 is also skipped.
-  static constexpr bool kSplitPairAcrossRegisterAndStack = false;
-  static constexpr bool kAlignPairRegister = false;
-  static constexpr bool kQuickSoftFloatAbi = false;
-  static constexpr bool kQuickDoubleRegAlignedFloatBackFilled = false;
-  static constexpr bool kQuickSkipOddFpRegisters = false;
-  static constexpr size_t kNumQuickGprArgs = 7;  // 7 arguments passed in GPRs.
-  static constexpr size_t kNumQuickFprArgs = 7;  // 7 arguments passed in FPRs.
-  static constexpr bool kGprFprLockstep = true;
-
-  static size_t GprIndexToGprOffset(uint32_t gpr_index) {
-    return gpr_index * GetBytesPerGprSpillLocation(kRuntimeISA);
-  }
 #elif defined(__i386__)
   // The callee save frame is pointed to by SP.
   // | argN        |  |
@@ -343,7 +260,7 @@
     uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc);
 
     if (current_code->IsOptimized()) {
-      CodeInfo code_info(current_code, CodeInfo::DecodeFlags::InlineInfoOnly);
+      CodeInfo code_info = CodeInfo::DecodeInlineInfoOnly(current_code);
       StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset);
       DCHECK(stack_map.IsValid());
       BitTableRange<InlineInfo> inline_infos = code_info.GetInlineInfosOf(stack_map);
@@ -353,16 +270,20 @@
         return stack_map.GetDexPc();
       }
     } else {
-      return current_code->ToDexPc(*caller_sp, outer_pc);
+      return current_code->ToDexPc(caller_sp, outer_pc);
     }
   }
 
+  static uint8_t* GetCallingPcAddr(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK((*sp)->IsCalleeSaveMethod());
+    uint8_t* return_adress_spill =
+        reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_ReturnPcOffset;
+    return return_adress_spill;
+  }
+
   // For the given quick ref and args quick frame, return the caller's PC.
   static uintptr_t GetCallingPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
-    DCHECK((*sp)->IsCalleeSaveMethod());
-    uint8_t* return_adress_spill =
-        reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_ReturnPcOffset;
-    return *reinterpret_cast<uintptr_t*>(return_adress_spill);
+    return *reinterpret_cast<uintptr_t*>(GetCallingPcAddr(sp));
   }
 
   QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty,
@@ -515,15 +436,10 @@
         case Primitive::kPrimLong:
           if (kQuickSoftFloatAbi || (cur_type_ == Primitive::kPrimLong)) {
             if (cur_type_ == Primitive::kPrimLong &&
-#if defined(__mips__) && !defined(__LP64__)
-                (gpr_index_ == 0 || gpr_index_ == 2) &&
-#else
                 gpr_index_ == 0 &&
-#endif
                 kAlignPairRegister) {
-              // Currently, this is only for ARM and MIPS, where we align long parameters with
-              // even-numbered registers by skipping R1 (on ARM) or A1(A3) (on MIPS) and using
-              // R2 (on ARM) or A2(T0) (on MIPS) instead.
+              // Currently, this is only for ARM, where we align long parameters with
+              // even-numbered registers by skipping R1 and using R2 instead.
               IncGprIndex();
             }
             is_split_long_or_double_ = (GetBytesPerGprSpillLocation(kRuntimeISA) == 4) &&
@@ -769,22 +685,22 @@
     BuildQuickShadowFrameVisitor shadow_frame_builder(sp, method->IsStatic(), shorty, shorty_len,
                                                       shadow_frame, first_arg_reg);
     shadow_frame_builder.VisitArguments();
-    const bool needs_initialization =
-        method->IsStatic() && !method->GetDeclaringClass()->IsInitialized();
     // Push a transition back into managed code onto the linked list in thread.
     self->PushManagedStackFragment(&fragment);
     self->PushShadowFrame(shadow_frame);
     self->EndAssertNoThreadSuspension(old_cause);
 
-    if (needs_initialization) {
-      // Ensure static method's class is initialized.
-      StackHandleScope<1> hs(self);
-      Handle<mirror::Class> h_class(hs.NewHandle(shadow_frame->GetMethod()->GetDeclaringClass()));
-      if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
-        DCHECK(Thread::Current()->IsExceptionPending())
-            << shadow_frame->GetMethod()->PrettyMethod();
-        self->PopManagedStackFragment(fragment);
-        return 0;
+    if (NeedsClinitCheckBeforeCall(method)) {
+      ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
+      if (UNLIKELY(!declaring_class->IsVisiblyInitialized())) {
+        // Ensure static method's class is initialized.
+        StackHandleScope<1> hs(self);
+        Handle<mirror::Class> h_class(hs.NewHandle(declaring_class));
+        if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
+          DCHECK(Thread::Current()->IsExceptionPending()) << method->PrettyMethod();
+          self->PopManagedStackFragment(fragment);
+          return 0;
+        }
       }
     }
 
@@ -941,12 +857,12 @@
   instrumentation::Instrumentation* instr = Runtime::Current()->GetInstrumentation();
   if (instr->HasMethodEntryListeners()) {
     instr->MethodEnterEvent(soa.Self(),
-                            soa.Decode<mirror::Object>(rcvr_jobj).Ptr(),
+                            soa.Decode<mirror::Object>(rcvr_jobj),
                             proxy_method,
                             0);
     if (soa.Self()->IsExceptionPending()) {
       instr->MethodUnwindEvent(self,
-                               soa.Decode<mirror::Object>(rcvr_jobj).Ptr(),
+                               soa.Decode<mirror::Object>(rcvr_jobj),
                                proxy_method,
                                0);
       return 0;
@@ -956,15 +872,16 @@
   if (soa.Self()->IsExceptionPending()) {
     if (instr->HasMethodUnwindListeners()) {
       instr->MethodUnwindEvent(self,
-                               soa.Decode<mirror::Object>(rcvr_jobj).Ptr(),
+                               soa.Decode<mirror::Object>(rcvr_jobj),
                                proxy_method,
                                0);
     }
   } else if (instr->HasMethodExitListeners()) {
     instr->MethodExitEvent(self,
-                           soa.Decode<mirror::Object>(rcvr_jobj).Ptr(),
+                           soa.Decode<mirror::Object>(rcvr_jobj),
                            proxy_method,
                            0,
+                           {},
                            result);
   }
   return result.GetJ();
@@ -1154,6 +1071,8 @@
   instrumentation->PushInstrumentationStackFrame(self,
                                                  is_static ? nullptr : this_object,
                                                  method,
+                                                 reinterpret_cast<uintptr_t>(
+                                                     QuickArgumentVisitor::GetCallingPcAddr(sp)),
                                                  QuickArgumentVisitor::GetCallingPc(sp),
                                                  interpreter_entry);
 
@@ -1179,9 +1098,9 @@
   // Compute address of return PC and sanity check that it currently holds 0.
   constexpr size_t return_pc_offset =
       RuntimeCalleeSaveFrame::GetReturnPcOffset(CalleeSaveType::kSaveEverything);
-  uintptr_t* return_pc = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) +
-                                                      return_pc_offset);
-  CHECK_EQ(*return_pc, 0U);
+  uintptr_t* return_pc_addr = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(sp) +
+                                                           return_pc_offset);
+  CHECK_EQ(*return_pc_addr, 0U);
 
   // Pop the frame filling in the return pc. The low half of the return value is 0 when
   // deoptimization shouldn't be performed with the high-half having the return address. When
@@ -1189,7 +1108,7 @@
   // deoptimization entry point.
   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
   TwoWordReturn return_or_deoptimize_pc = instrumentation->PopInstrumentationStackFrame(
-      self, return_pc, gpr_result, fpr_result);
+      self, return_pc_addr, gpr_result, fpr_result);
   if (self->IsExceptionPending() || self->ObserveAsyncException()) {
     return GetTwoWordFailureValue();
   }
@@ -1415,7 +1334,10 @@
         DCHECK_GE(method_entry, oat_file->GetBssMethods().data());
         DCHECK_LT(method_entry,
                   oat_file->GetBssMethods().data() + oat_file->GetBssMethods().size());
-        *method_entry = called;
+        std::atomic<ArtMethod*>* atomic_entry =
+            reinterpret_cast<std::atomic<ArtMethod*>*>(method_entry);
+        static_assert(sizeof(*method_entry) == sizeof(*atomic_entry), "Size check.");
+        atomic_entry->store(called, std::memory_order_release);
       }
     }
   }
@@ -1452,50 +1374,32 @@
                                << invoke_type << " " << orig_called->GetVtableIndex();
     }
 
-    // Ensure that the called method's class is initialized.
-    StackHandleScope<1> hs(soa.Self());
-    Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass()));
-    linker->EnsureInitialized(soa.Self(), called_class, true, true);
+    ObjPtr<mirror::Class> called_class = called->GetDeclaringClass();
+    if (NeedsClinitCheckBeforeCall(called) && !called_class->IsVisiblyInitialized()) {
+      // Ensure that the called method's class is initialized.
+      StackHandleScope<1> hs(soa.Self());
+      HandleWrapperObjPtr<mirror::Class> h_called_class(hs.NewHandleWrapper(&called_class));
+      linker->EnsureInitialized(soa.Self(), h_called_class, true, true);
+    }
     bool force_interpreter = self->IsForceInterpreter() && !called->IsNative();
-    if (LIKELY(called_class->IsInitialized())) {
-      if (UNLIKELY(force_interpreter ||
-                   Dbg::IsForcedInterpreterNeededForResolution(self, called))) {
+    if (called_class->IsInitialized() || called_class->IsInitializing()) {
+      if (UNLIKELY(force_interpreter)) {
         // If we are single-stepping or the called method is deoptimized (by a
         // breakpoint, for example), then we have to execute the called method
         // with the interpreter.
         code = GetQuickToInterpreterBridge();
-      } else if (UNLIKELY(Dbg::IsForcedInstrumentationNeededForResolution(self, caller))) {
-        // If the caller is deoptimized (by a breakpoint, for example), we have to
-        // continue its execution with interpreter when returning from the called
-        // method. Because we do not want to execute the called method with the
-        // interpreter, we wrap its execution into the instrumentation stubs.
-        // When the called method returns, it will execute the instrumentation
-        // exit hook that will determine the need of the interpreter with a call
-        // to Dbg::IsForcedInterpreterNeededForUpcall and deoptimize the stack if
-        // it is needed.
-        code = GetQuickInstrumentationEntryPoint();
       } else {
         code = called->GetEntryPointFromQuickCompiledCode();
-      }
-    } else if (called_class->IsInitializing()) {
-      if (UNLIKELY(force_interpreter ||
-                   Dbg::IsForcedInterpreterNeededForResolution(self, called))) {
-        // If we are single-stepping or the called method is deoptimized (by a
-        // breakpoint, for example), then we have to execute the called method
-        // with the interpreter.
-        code = GetQuickToInterpreterBridge();
-      } else if (invoke_type == kStatic) {
-        // Class is still initializing, go to JIT or oat and grab code (trampoline must be
-        // left in place until class is initialized to stop races between threads).
-        if (Runtime::Current()->GetJit() != nullptr) {
-          code = Runtime::Current()->GetJit()->GetCodeCache()->GetZygoteSavedEntryPoint(called);
-        }
-        if (code == nullptr) {
+        if (linker->IsQuickResolutionStub(code)) {
+          DCHECK_EQ(invoke_type, kStatic);
+          // Go to JIT or oat and grab code.
           code = linker->GetQuickOatCodeFor(called);
+          if (called_class->IsInitialized()) {
+            // Only update the entrypoint once the class is initialized. Other
+            // threads still need to go through the resolution stub.
+            Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(called, code);
+          }
         }
-      } else {
-        // No trampoline for non-static methods.
-        code = called->GetEntryPointFromQuickCompiledCode();
       }
     } else {
       DCHECK(called_class->IsErroneous());
@@ -1567,7 +1471,7 @@
   static constexpr bool kAlignDoubleOnStack = true;
 #elif defined(__aarch64__)
   static constexpr bool kNativeSoftFloatAbi = false;  // This is a hard float ABI.
-  static constexpr size_t kNumNativeGprArgs = 8;  // 6 arguments passed in GPRs.
+  static constexpr size_t kNumNativeGprArgs = 8;  // 8 arguments passed in GPRs.
   static constexpr size_t kNumNativeFprArgs = 8;  // 8 arguments passed in FPRs.
 
   static constexpr size_t kRegistersNeededForLong = 1;
@@ -1577,36 +1481,11 @@
   static constexpr bool kMultiGPRegistersWidened = false;
   static constexpr bool kAlignLongOnStack = false;
   static constexpr bool kAlignDoubleOnStack = false;
-#elif defined(__mips__) && !defined(__LP64__)
-  static constexpr bool kNativeSoftFloatAbi = true;  // This is a hard float ABI.
-  static constexpr size_t kNumNativeGprArgs = 4;  // 4 arguments passed in GPRs.
-  static constexpr size_t kNumNativeFprArgs = 0;  // 0 arguments passed in FPRs.
-
-  static constexpr size_t kRegistersNeededForLong = 2;
-  static constexpr size_t kRegistersNeededForDouble = 2;
-  static constexpr bool kMultiRegistersAligned = true;
-  static constexpr bool kMultiFPRegistersWidened = true;
-  static constexpr bool kMultiGPRegistersWidened = false;
-  static constexpr bool kAlignLongOnStack = true;
-  static constexpr bool kAlignDoubleOnStack = true;
-#elif defined(__mips__) && defined(__LP64__)
-  // Let the code prepare GPRs only and we will load the FPRs with same data.
-  static constexpr bool kNativeSoftFloatAbi = true;
-  static constexpr size_t kNumNativeGprArgs = 8;
-  static constexpr size_t kNumNativeFprArgs = 0;
-
-  static constexpr size_t kRegistersNeededForLong = 1;
-  static constexpr size_t kRegistersNeededForDouble = 1;
-  static constexpr bool kMultiRegistersAligned = false;
-  static constexpr bool kMultiFPRegistersWidened = false;
-  static constexpr bool kMultiGPRegistersWidened = true;
-  static constexpr bool kAlignLongOnStack = false;
-  static constexpr bool kAlignDoubleOnStack = false;
 #elif defined(__i386__)
   // TODO: Check these!
   static constexpr bool kNativeSoftFloatAbi = false;  // Not using int registers for fp
-  static constexpr size_t kNumNativeGprArgs = 0;  // 6 arguments passed in GPRs.
-  static constexpr size_t kNumNativeFprArgs = 0;  // 8 arguments passed in FPRs.
+  static constexpr size_t kNumNativeGprArgs = 0;  // 0 arguments passed in GPRs.
+  static constexpr size_t kNumNativeFprArgs = 0;  // 0 arguments passed in FPRs.
 
   static constexpr size_t kRegistersNeededForLong = 2;
   static constexpr size_t kRegistersNeededForDouble = 2;
@@ -1878,38 +1757,13 @@
     return num_stack_entries_ * sizeof(uintptr_t);
   }
 
-  uint8_t* LayoutCallStack(uint8_t* sp8) const {
+  uint8_t* LayoutStackArgs(uint8_t* sp8) const {
     sp8 -= GetStackSize();
-    // Align by kStackAlignment.
+    // Align by kStackAlignment; it is at least as strict as native stack alignment.
     sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment));
     return sp8;
   }
 
-  uint8_t* LayoutCallRegisterStacks(uint8_t* sp8, uintptr_t** start_gpr, uint32_t** start_fpr)
-      const {
-    // Assumption is OK right now, as we have soft-float arm
-    size_t fregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeFprArgs;
-    sp8 -= fregs * sizeof(uintptr_t);
-    *start_fpr = reinterpret_cast<uint32_t*>(sp8);
-    size_t iregs = BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeGprArgs;
-    sp8 -= iregs * sizeof(uintptr_t);
-    *start_gpr = reinterpret_cast<uintptr_t*>(sp8);
-    return sp8;
-  }
-
-  uint8_t* LayoutNativeCall(uint8_t* sp8, uintptr_t** start_stack, uintptr_t** start_gpr,
-                            uint32_t** start_fpr) const {
-    // Native call stack.
-    sp8 = LayoutCallStack(sp8);
-    *start_stack = reinterpret_cast<uintptr_t*>(sp8);
-
-    // Put fprs and gprs below.
-    sp8 = LayoutCallRegisterStacks(sp8, start_gpr, start_fpr);
-
-    // Return the new bottom.
-    return sp8;
-  }
-
   virtual void WalkHeader(
       BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm ATTRIBUTE_UNUSED)
       REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1983,80 +1837,53 @@
   explicit ComputeGenericJniFrameSize(bool critical_native)
     : num_handle_scope_references_(0), critical_native_(critical_native) {}
 
-  // Lays out the callee-save frame. Assumes that the incorrect frame corresponding to RefsAndArgs
-  // is at *m = sp. Will update to point to the bottom of the save frame.
-  //
-  // Note: assumes ComputeAll() has been run before.
-  void LayoutCalleeSaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    ArtMethod* method = **m;
-
+  uintptr_t* ComputeLayout(Thread* self,
+                           ArtMethod** managed_sp,
+                           const char* shorty,
+                           uint32_t shorty_len,
+                           HandleScope** handle_scope) REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
 
-    uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp);
-
-    // First, fix up the layout of the callee-save frame.
-    // We have to squeeze in the HandleScope, and relocate the method pointer.
-
-    // "Free" the slot for the method.
-    sp8 += sizeof(void*);  // In the callee-save frame we use a full pointer.
-
-    // Under the callee saves put handle scope and new method stack reference.
-    size_t handle_scope_size = HandleScope::SizeOf(num_handle_scope_references_);
-    size_t scope_and_method = handle_scope_size + sizeof(ArtMethod*);
-
-    sp8 -= scope_and_method;
-    // Align by kStackAlignment.
-    sp8 = reinterpret_cast<uint8_t*>(RoundDown(reinterpret_cast<uintptr_t>(sp8), kStackAlignment));
-
-    uint8_t* sp8_table = sp8 + sizeof(ArtMethod*);
-    *handle_scope = HandleScope::Create(sp8_table, self->GetTopHandleScope(),
-                                        num_handle_scope_references_);
-
-    // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us.
-    uint8_t* method_pointer = sp8;
-    auto** new_method_ref = reinterpret_cast<ArtMethod**>(method_pointer);
-    *new_method_ref = method;
-    *m = new_method_ref;
-  }
-
-  // Adds space for the cookie. Note: may leave stack unaligned.
-  void LayoutCookie(uint8_t** sp) const {
-    // Reference cookie and padding
-    *sp -= 8;
-  }
-
-  // Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie.
-  // Returns the new bottom. Note: this may be unaligned.
-  uint8_t* LayoutJNISaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    // First, fix up the layout of the callee-save frame.
-    // We have to squeeze in the HandleScope, and relocate the method pointer.
-    LayoutCalleeSaveFrame(self, m, sp, handle_scope);
-
-    // The bottom of the callee-save frame is now where the method is, *m.
-    uint8_t* sp8 = reinterpret_cast<uint8_t*>(*m);
-
-    // Add space for cookie.
-    LayoutCookie(&sp8);
-
-    return sp8;
-  }
-
-  // WARNING: After this, *sp won't be pointing to the method anymore!
-  uint8_t* ComputeLayout(Thread* self, ArtMethod*** m, const char* shorty, uint32_t shorty_len,
-                         HandleScope** handle_scope, uintptr_t** start_stack, uintptr_t** start_gpr,
-                         uint32_t** start_fpr)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
     Walk(shorty, shorty_len);
 
-    // JNI part.
-    uint8_t* sp8 = LayoutJNISaveFrame(self, m, reinterpret_cast<void*>(*m), handle_scope);
+    // Add space for cookie and HandleScope.
+    void* storage = GetGenericJniHandleScope(managed_sp, num_handle_scope_references_);
+    DCHECK_ALIGNED(storage, sizeof(uintptr_t));
+    *handle_scope =
+        HandleScope::Create(storage, self->GetTopHandleScope(), num_handle_scope_references_);
+    DCHECK_EQ(*handle_scope, storage);
+    uint8_t* sp8 = reinterpret_cast<uint8_t*>(*handle_scope);
+    DCHECK_GE(static_cast<size_t>(reinterpret_cast<uint8_t*>(managed_sp) - sp8),
+              HandleScope::SizeOf(num_handle_scope_references_) + kJniCookieSize);
 
-    sp8 = LayoutNativeCall(sp8, start_stack, start_gpr, start_fpr);
+    // Layout stack arguments.
+    sp8 = LayoutStackArgs(sp8);
 
     // Return the new bottom.
-    return sp8;
+    DCHECK_ALIGNED(sp8, sizeof(uintptr_t));
+    return reinterpret_cast<uintptr_t*>(sp8);
+  }
+
+  static uintptr_t* GetStartGprRegs(uintptr_t* reserved_area) {
+    return reserved_area;
+  }
+
+  static uint32_t* GetStartFprRegs(uintptr_t* reserved_area) {
+    constexpr size_t num_gprs =
+        BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeGprArgs;
+    return reinterpret_cast<uint32_t*>(GetStartGprRegs(reserved_area) + num_gprs);
+  }
+
+  static uintptr_t* GetHiddenArgSlot(uintptr_t* reserved_area) {
+    // Note: `num_fprs` is 0 on architectures where sizeof(uintptr_t) does not match the
+    // FP register size (it is actually 0 on all supported 32-bit architectures).
+    constexpr size_t num_fprs =
+        BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>::kNumNativeFprArgs;
+    return reinterpret_cast<uintptr_t*>(GetStartFprRegs(reserved_area)) + num_fprs;
+  }
+
+  static uintptr_t* GetOutArgsSpSlot(uintptr_t* reserved_area) {
+    return GetHiddenArgSlot(reserved_area) + 1;
   }
 
   uintptr_t PushHandle(mirror::Object* /* ptr */) override;
@@ -2145,20 +1972,33 @@
                               bool critical_native,
                               const char* shorty,
                               uint32_t shorty_len,
-                              ArtMethod*** sp)
-     : QuickArgumentVisitor(*sp, is_static, shorty, shorty_len),
+                              ArtMethod** managed_sp,
+                              uintptr_t* reserved_area)
+     : QuickArgumentVisitor(managed_sp, is_static, shorty, shorty_len),
        jni_call_(nullptr, nullptr, nullptr, nullptr, critical_native),
        sm_(&jni_call_) {
-    ComputeGenericJniFrameSize fsc(critical_native);
-    uintptr_t* start_gpr_reg;
-    uint32_t* start_fpr_reg;
-    uintptr_t* start_stack_arg;
-    bottom_of_used_area_ = fsc.ComputeLayout(self, sp, shorty, shorty_len,
-                                             &handle_scope_,
-                                             &start_stack_arg,
-                                             &start_gpr_reg, &start_fpr_reg);
+    DCHECK_ALIGNED(managed_sp, kStackAlignment);
+    DCHECK_ALIGNED(reserved_area, sizeof(uintptr_t));
 
-    jni_call_.Reset(start_gpr_reg, start_fpr_reg, start_stack_arg, handle_scope_);
+    ComputeGenericJniFrameSize fsc(critical_native);
+    uintptr_t* out_args_sp =
+        fsc.ComputeLayout(self, managed_sp, shorty, shorty_len, &handle_scope_);
+
+    // Store hidden argument for @CriticalNative.
+    uintptr_t* hidden_arg_slot = fsc.GetHiddenArgSlot(reserved_area);
+    constexpr uintptr_t kGenericJniTag = 1u;
+    ArtMethod* method = *managed_sp;
+    *hidden_arg_slot = critical_native ? (reinterpret_cast<uintptr_t>(method) | kGenericJniTag)
+                                       : 0xebad6a89u;  // Bad value.
+
+    // Set out args SP.
+    uintptr_t* out_args_sp_slot = fsc.GetOutArgsSpSlot(reserved_area);
+    *out_args_sp_slot = reinterpret_cast<uintptr_t>(out_args_sp);
+
+    jni_call_.Reset(fsc.GetStartGprRegs(reserved_area),
+                    fsc.GetStartFprRegs(reserved_area),
+                    out_args_sp,
+                    handle_scope_);
 
     // First 2 parameters are always excluded for CriticalNative methods.
     if (LIKELY(!critical_native)) {
@@ -2166,7 +2006,7 @@
       sm_.AdvancePointer(self->GetJniEnv());
 
       if (is_static) {
-        sm_.AdvanceHandleScope((**sp)->GetDeclaringClass().Ptr());
+        sm_.AdvanceHandleScope(method->GetDeclaringClass().Ptr());
       }  // else "this" reference is already handled by QuickArgumentVisitor.
     }
   }
@@ -2183,10 +2023,6 @@
     return handle_scope_->GetHandle(0).ToJObject();
   }
 
-  void* GetBottomOfUsedArea() const {
-    return bottom_of_used_area_;
-  }
-
  private:
   // A class to fill a JNI call. Adds reference/handle-scope management to FillNativeCall.
   class FillJniCall final : public FillNativeCall {
@@ -2231,7 +2067,6 @@
 
   HandleScope* handle_scope_;
   FillJniCall jni_call_;
-  void* bottom_of_used_area_;
 
   BuildNativeCallFrameStateMachine<FillJniCall> sm_;
 
@@ -2302,57 +2137,28 @@
   }
 }
 
-#if defined(__arm__) || defined(__aarch64__)
-extern "C" const void* artFindNativeMethod();
-#else
-extern "C" const void* artFindNativeMethod(Thread* self);
-#endif
-
-static uint64_t artQuickGenericJniEndJNIRef(Thread* self,
-                                            uint32_t cookie,
-                                            bool fast_native ATTRIBUTE_UNUSED,
-                                            jobject l,
-                                            jobject lock) {
-  // TODO: add entrypoints for @FastNative returning objects.
-  if (lock != nullptr) {
-    return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(l, cookie, lock, self));
-  } else {
-    return reinterpret_cast<uint64_t>(JniMethodEndWithReference(l, cookie, self));
-  }
-}
-
-static void artQuickGenericJniEndJNINonRef(Thread* self,
-                                           uint32_t cookie,
-                                           bool fast_native,
-                                           jobject lock) {
-  if (lock != nullptr) {
-    JniMethodEndSynchronized(cookie, lock, self);
-    // Ignore "fast_native" here because synchronized functions aren't very fast.
-  } else {
-    if (UNLIKELY(fast_native)) {
-      JniMethodFastEnd(cookie, self);
-    } else {
-      JniMethodEnd(cookie, self);
-    }
-  }
-}
-
 /*
- * Initializes an alloca region assumed to be directly below sp for a native call:
- * Create a HandleScope and call stack and fill a mini stack with values to be pushed to registers.
- * The final element on the stack is a pointer to the native code.
+ * Initializes the reserved area assumed to be directly below `managed_sp` for a native call:
  *
- * On entry, the stack has a standard callee-save frame above sp, and an alloca below it.
- * We need to fix this, as the handle scope needs to go into the callee-save frame.
+ * On entry, the stack has a standard callee-save frame above `managed_sp`,
+ * and the reserved area below it. Starting below `managed_sp`, we reserve space
+ * for local reference cookie (not present for @CriticalNative), HandleScope
+ * (not present for @CriticalNative) and stack args (if args do not fit into
+ * registers). At the bottom of the reserved area, there is space for register
+ * arguments, hidden arg (for @CriticalNative) and the SP for the native call
+ * (i.e. pointer to the stack args area), which the calling stub shall load
+ * to perform the native call. We fill all these fields, perform class init
+ * check (for static methods) and/or locking (for synchronized methods) if
+ * needed and return to the stub.
  *
- * The return of this function denotes:
- * 1) How many bytes of the alloca can be released, if the value is non-negative.
- * 2) An error, if the value is negative.
+ * The return value is the pointer to the native code, null on failure.
  */
-extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** sp)
+extern "C" const void* artQuickGenericJniTrampoline(Thread* self,
+                                                    ArtMethod** managed_sp,
+                                                    uintptr_t* reserved_area)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   // Note: We cannot walk the stack properly until fixed up below.
-  ArtMethod* called = *sp;
+  ArtMethod* called = *managed_sp;
   DCHECK(called->IsNative()) << called->PrettyMethod(true);
   Runtime* runtime = Runtime::Current();
   uint32_t shorty_len = 0;
@@ -2367,7 +2173,8 @@
                                       critical_native,
                                       shorty,
                                       shorty_len,
-                                      &sp);
+                                      managed_sp,
+                                      reserved_area);
   {
     ScopedAssertNoThreadSuspension sants(__FUNCTION__);
     visitor.VisitArguments();
@@ -2376,7 +2183,7 @@
   }
 
   // Fix up managed-stack things in Thread. After this we can walk the stack.
-  self->SetTopOfStackTagged(sp);
+  self->SetTopOfStackTagged(managed_sp);
 
   self->VerifyStack();
 
@@ -2386,6 +2193,23 @@
     jit->MethodEntered(self, called);
   }
 
+  // We can set the entrypoint of a native method to generic JNI even when the
+  // class hasn't been initialized, so we need to do the initialization check
+  // before invoking the native code.
+  if (NeedsClinitCheckBeforeCall(called)) {
+    ObjPtr<mirror::Class> declaring_class = called->GetDeclaringClass();
+    if (UNLIKELY(!declaring_class->IsVisiblyInitialized())) {
+      // Ensure static method's class is initialized.
+      StackHandleScope<1> hs(self);
+      Handle<mirror::Class> h_class(hs.NewHandle(declaring_class));
+      if (!runtime->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) {
+        DCHECK(Thread::Current()->IsExceptionPending()) << called->PrettyMethod();
+        self->PopHandleScope();
+        return nullptr;  // Report error.
+      }
+    }
+  }
+
   uint32_t cookie;
   uint32_t* sp32;
   // Skip calling JniMethodStart for @CriticalNative.
@@ -2396,8 +2220,7 @@
       cookie = JniMethodStartSynchronized(visitor.GetFirstHandleScopeJObject(), self);
       if (self->IsExceptionPending()) {
         self->PopHandleScope();
-        // A negative value denotes an error.
-        return GetTwoWordFailureValue();
+        return nullptr;  // Report error.
       }
     } else {
       if (fast_native) {
@@ -2407,94 +2230,32 @@
         cookie = JniMethodStart(self);
       }
     }
-    sp32 = reinterpret_cast<uint32_t*>(sp);
+    sp32 = reinterpret_cast<uint32_t*>(managed_sp);
     *(sp32 - 1) = cookie;
   }
 
   // Retrieve the stored native code.
+  // Note that it may point to the lookup stub or trampoline.
+  // FIXME: This is broken for @CriticalNative as the art_jni_dlsym_lookup_stub
+  // does not handle that case. Calls from compiled stubs are also broken.
   void const* nativeCode = called->GetEntryPointFromJni();
 
-  // There are two cases for the content of nativeCode:
-  // 1) Pointer to the native function.
-  // 2) Pointer to the trampoline for native code binding.
-  // In the second case, we need to execute the binding and continue with the actual native function
-  // pointer.
-  DCHECK(nativeCode != nullptr);
-  if (nativeCode == GetJniDlsymLookupStub()) {
-#if defined(__arm__) || defined(__aarch64__)
-    nativeCode = artFindNativeMethod();
-#else
-    nativeCode = artFindNativeMethod(self);
-#endif
-
-    if (nativeCode == nullptr) {
-      DCHECK(self->IsExceptionPending());    // There should be an exception pending now.
-
-      // @CriticalNative calls do not need to call back into JniMethodEnd.
-      if (LIKELY(!critical_native)) {
-        // End JNI, as the assembly will move to deliver the exception.
-        jobject lock = called->IsSynchronized() ? visitor.GetFirstHandleScopeJObject() : nullptr;
-        if (shorty[0] == 'L') {
-          artQuickGenericJniEndJNIRef(self, cookie, fast_native, nullptr, lock);
-        } else {
-          artQuickGenericJniEndJNINonRef(self, cookie, fast_native, lock);
-        }
-      }
-
-      return GetTwoWordFailureValue();
-    }
-    // Note that the native code pointer will be automatically set by artFindNativeMethod().
-  }
-
-#if defined(__mips__) && !defined(__LP64__)
-  // On MIPS32 if the first two arguments are floating-point, we need to know their types
-  // so that art_quick_generic_jni_trampoline can correctly extract them from the stack
-  // and load into floating-point registers.
-  // Possible arrangements of first two floating-point arguments on the stack (32-bit FPU
-  // view):
-  // (1)
-  //  |     DOUBLE    |     DOUBLE    | other args, if any
-  //  |  F12  |  F13  |  F14  |  F15  |
-  //  |  SP+0 |  SP+4 |  SP+8 | SP+12 | SP+16
-  // (2)
-  //  |     DOUBLE    | FLOAT | (PAD) | other args, if any
-  //  |  F12  |  F13  |  F14  |       |
-  //  |  SP+0 |  SP+4 |  SP+8 | SP+12 | SP+16
-  // (3)
-  //  | FLOAT | (PAD) |     DOUBLE    | other args, if any
-  //  |  F12  |       |  F14  |  F15  |
-  //  |  SP+0 |  SP+4 |  SP+8 | SP+12 | SP+16
-  // (4)
-  //  | FLOAT | FLOAT | other args, if any
-  //  |  F12  |  F14  |
-  //  |  SP+0 |  SP+4 | SP+8
-  // As you can see, only the last case (4) is special. In all others we can just
-  // load F12/F13 and F14/F15 in the same manner.
-  // Set bit 0 of the native code address to 1 in this case (valid code addresses
-  // are always a multiple of 4 on MIPS32, so we have 2 spare bits available).
-  if (nativeCode != nullptr &&
-      shorty != nullptr &&
-      shorty_len >= 3 &&
-      shorty[1] == 'F' &&
-      shorty[2] == 'F') {
-    nativeCode = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(nativeCode) | 1);
-  }
-#endif
-
   VLOG(third_party_jni) << "GenericJNI: "
                         << called->PrettyMethod()
                         << " -> "
                         << std::hex << reinterpret_cast<uintptr_t>(nativeCode);
 
-  // Return native code addr(lo) and bottom of alloca address(hi).
-  return GetTwoWordSuccessValue(reinterpret_cast<uintptr_t>(visitor.GetBottomOfUsedArea()),
-                                reinterpret_cast<uintptr_t>(nativeCode));
+  // Return native code.
+  return nativeCode;
 }
 
 // Defined in quick_jni_entrypoints.cc.
-extern uint64_t GenericJniMethodEnd(Thread* self, uint32_t saved_local_ref_cookie,
-                                    jvalue result, uint64_t result_f, ArtMethod* called,
-                                    HandleScope* handle_scope);
+extern uint64_t GenericJniMethodEnd(Thread* self,
+                                    uint32_t saved_local_ref_cookie,
+                                    jvalue result,
+                                    uint64_t result_f,
+                                    ArtMethod* called);
+
 /*
  * Is called after the native JNI code. Responsible for cleanup (handle scope, saved state) and
  * unlocking.
@@ -2511,8 +2272,15 @@
   uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
   ArtMethod* called = *sp;
   uint32_t cookie = *(sp32 - 1);
-  HandleScope* table = reinterpret_cast<HandleScope*>(reinterpret_cast<uint8_t*>(sp) + sizeof(*sp));
-  return GenericJniMethodEnd(self, cookie, result, result_f, called, table);
+  if (kIsDebugBuild && !called->IsCriticalNative()) {
+    BaseHandleScope* handle_scope = self->GetTopHandleScope();
+    DCHECK(handle_scope != nullptr);
+    DCHECK(!handle_scope->IsVariableSized());
+    // Note: We do not hold mutator lock here for normal JNI, so we cannot use the method's shorty
+    // to determine the number of references. Instead rely on the value from the HandleScope.
+    DCHECK_EQ(handle_scope, GetGenericJniHandleScope(sp, handle_scope->NumberOfReferences()));
+  }
+  return GenericJniMethodEnd(self, cookie, result, result_f, called);
 }
 
 // We use TwoWordReturn to optimize scalar returns. We use the hi value for code, and the lo value
@@ -2685,6 +2453,11 @@
     }
   }
 
+  // The compiler and interpreter make sure the conflict trampoline is never
+  // called on a method that resolves to j.l.Object.
+  CHECK(!interface_method->GetDeclaringClass()->IsObjectClass());
+  CHECK(interface_method->GetDeclaringClass()->IsInterface());
+
   DCHECK(!interface_method->IsRuntimeMethod());
   // Look whether we have a match in the ImtConflictTable.
   uint32_t imt_index = interface_method->GetImtIndex();
diff --git a/runtime/entrypoints/runtime_asm_entrypoints.h b/runtime/entrypoints/runtime_asm_entrypoints.h
index fa287cb..9f47034 100644
--- a/runtime/entrypoints/runtime_asm_entrypoints.h
+++ b/runtime/entrypoints/runtime_asm_entrypoints.h
@@ -19,8 +19,13 @@
 
 #include "deoptimization_kind.h"
 
+#include "jni.h"
+
 namespace art {
 
+class ArtMethod;
+class Thread;
+
 #ifndef BUILDING_LIBART
 #error "File and symbols only for use within libart."
 #endif
@@ -30,6 +35,11 @@
   return reinterpret_cast<const void*>(art_jni_dlsym_lookup_stub);
 }
 
+extern "C" void* art_jni_dlsym_lookup_critical_stub(JNIEnv*, jobject);
+static inline const void* GetJniDlsymLookupCriticalStub() {
+  return reinterpret_cast<const void*>(art_jni_dlsym_lookup_critical_stub);
+}
+
 // Return the address of quick stub code for handling IMT conflicts.
 extern "C" void art_quick_imt_conflict_trampoline(ArtMethod*);
 static inline const void* GetQuickImtConflictStub() {
@@ -87,6 +97,9 @@
   return reinterpret_cast<const void*>(art_quick_instrumentation_exit);
 }
 
+extern "C" void* art_quick_string_builder_append(uint32_t format);
+extern "C" void art_quick_compile_optimized(ArtMethod*, Thread*);
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_ENTRYPOINTS_RUNTIME_ASM_ENTRYPOINTS_H_
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 040a8c5..52c4142 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -100,9 +100,7 @@
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, top_handle_scope, class_loader_override, sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, class_loader_override, long_jump_context, sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, long_jump_context, instrumentation_stack, sizeof(void*));
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, instrumentation_stack, debug_invoke_req, sizeof(void*));
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, debug_invoke_req, single_step_control, sizeof(void*));
-    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, single_step_control, stacked_shadow_frame_record,
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, instrumentation_stack, stacked_shadow_frame_record,
                         sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stacked_shadow_frame_record,
                         deoptimization_context_stack, sizeof(void*));
@@ -136,18 +134,25 @@
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, flip_function, method_verifier, sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, method_verifier, thread_local_mark_stack, sizeof(void*));
     EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, thread_local_mark_stack, async_exception, sizeof(void*));
+    EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, async_exception, top_reflective_handle_scope,
+                        sizeof(void*));
     // The first field after tlsPtr_ is forced to a 16 byte alignment so it might have some space.
     auto offset_tlsptr_end = OFFSETOF_MEMBER(Thread, tlsPtr_) +
         sizeof(decltype(reinterpret_cast<Thread*>(16)->tlsPtr_));
-    CHECKED(offset_tlsptr_end - OFFSETOF_MEMBER(Thread, tlsPtr_.async_exception) == sizeof(void*),
+    CHECKED(offset_tlsptr_end - OFFSETOF_MEMBER(Thread, tlsPtr_.top_reflective_handle_scope) ==
+                sizeof(void*),
             "async_exception last field");
   }
 
   void CheckJniEntryPoints() {
     CHECKED(OFFSETOF_MEMBER(JniEntryPoints, pDlsymLookup) == 0,
             JniEntryPoints_start_with_dlsymlookup);
-    CHECKED(OFFSETOF_MEMBER(JniEntryPoints, pDlsymLookup)
-            + sizeof(void*) == sizeof(JniEntryPoints), JniEntryPoints_all);
+    CHECKED(OFFSETOF_MEMBER(JniEntryPoints, pDlsymLookup) + sizeof(void*) ==
+                OFFSETOF_MEMBER(JniEntryPoints, pDlsymLookupCritical),
+            JniEntryPoints_dlsymlookup_critical);
+    CHECKED(OFFSETOF_MEMBER(JniEntryPoints, pDlsymLookupCritical) + sizeof(void*) ==
+                sizeof(JniEntryPoints),
+            JniEntryPoints_all);
   }
 
   void CheckQuickEntryPoints() {
@@ -331,9 +336,16 @@
                          sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pNewStringFromStringBuffer, pNewStringFromStringBuilder,
                          sizeof(void*));
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pNewStringFromStringBuilder, pReadBarrierJni,
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pNewStringFromStringBuilder, pStringBuilderAppend,
                          sizeof(void*));
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pReadBarrierJni, pReadBarrierMarkReg00, sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pStringBuilderAppend, pUpdateInlineCache,
+                         sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pUpdateInlineCache, pCompileOptimized,
+                         sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pCompileOptimized, pReadBarrierJni,
+                         sizeof(void*));
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pReadBarrierJni, pReadBarrierMarkReg00,
+                         sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pReadBarrierMarkReg00, pReadBarrierMarkReg01,
                          sizeof(void*));
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pReadBarrierMarkReg01, pReadBarrierMarkReg02,
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 5c2830d..cae7deb 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -291,10 +291,11 @@
   ArtMethod* method_obj = nullptr;
   uintptr_t return_pc = 0;
   uintptr_t sp = 0;
+  bool is_stack_overflow = false;
 
   // Get the architecture specific method address and return address.  These
   // are in architecture specific files in arch/<arch>/fault_handler_<arch>.
-  GetMethodAndReturnPcAndSp(siginfo, context, &method_obj, &return_pc, &sp);
+  GetMethodAndReturnPcAndSp(siginfo, context, &method_obj, &return_pc, &sp, &is_stack_overflow);
 
   // If we don't have a potential method, we're outta here.
   VLOG(signals) << "potential method: " << method_obj;
@@ -336,7 +337,15 @@
         reinterpret_cast<uintptr_t>(method_header->GetEntryPoint());
     VLOG(signals) << "pc offset: " << std::hex << sought_offset;
   }
-  uint32_t dexpc = method_header->ToDexPc(method_obj, return_pc, false);
+  uint32_t dexpc = dex::kDexNoIndex;
+  if (is_stack_overflow) {
+    // If it's an implicit stack overflow check, the frame is not setup, so we
+    // just infer the dex PC as zero.
+    dexpc = 0;
+  } else {
+    CHECK_EQ(*reinterpret_cast<ArtMethod**>(sp), method_obj);
+    dexpc = method_header->ToDexPc(reinterpret_cast<ArtMethod**>(sp), return_pc, false);
+  }
   VLOG(signals) << "dexpc: " << dexpc;
   return !check_dex_pc || dexpc != dex::kDexNoIndex;
 }
@@ -380,9 +389,11 @@
     ArtMethod* method = nullptr;
     uintptr_t return_pc = 0;
     uintptr_t sp = 0;
+    bool is_stack_overflow = false;
     Thread* self = Thread::Current();
 
-    manager_->GetMethodAndReturnPcAndSp(siginfo, context, &method, &return_pc, &sp);
+    manager_->GetMethodAndReturnPcAndSp(
+        siginfo, context, &method, &return_pc, &sp, &is_stack_overflow);
     // Inside of generated code, sp[0] is the method, so sp is the frame.
     self->SetTopOfStack(reinterpret_cast<ArtMethod**>(sp));
     self->DumpJavaStack(LOG_STREAM(ERROR));
diff --git a/runtime/fault_handler.h b/runtime/fault_handler.h
index f6cf2d7..8b89c22 100644
--- a/runtime/fault_handler.h
+++ b/runtime/fault_handler.h
@@ -55,8 +55,12 @@
   // The IsInGeneratedCode() function checks that the mutator lock is held before it
   // calls GetMethodAndReturnPCAndSP().
   // TODO: think about adding lock assertions and fake lock and unlock functions.
-  void GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context, ArtMethod** out_method,
-                                 uintptr_t* out_return_pc, uintptr_t* out_sp)
+  void GetMethodAndReturnPcAndSp(siginfo_t* siginfo,
+                                 void* context,
+                                 ArtMethod** out_method,
+                                 uintptr_t* out_return_pc,
+                                 uintptr_t* out_sp,
+                                 bool* out_is_stack_overflow)
                                  NO_THREAD_SAFETY_ANALYSIS;
   bool IsInGeneratedCode(siginfo_t* siginfo, void *context, bool check_dex_pc)
                          NO_THREAD_SAFETY_ANALYSIS;
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
index bd1a326..37646b3 100644
--- a/runtime/gc/accounting/bitmap.cc
+++ b/runtime/gc/accounting/bitmap.cc
@@ -21,7 +21,7 @@
 #include "base/bit_utils.h"
 #include "base/mem_map.h"
 #include "card_table.h"
-#include "jit/jit_code_cache.h"
+#include "jit/jit_memory_region.h"
 
 namespace art {
 namespace gc {
diff --git a/runtime/gc/accounting/heap_bitmap.cc b/runtime/gc/accounting/heap_bitmap.cc
index 1d729ff..4a3902e 100644
--- a/runtime/gc/accounting/heap_bitmap.cc
+++ b/runtime/gc/accounting/heap_bitmap.cc
@@ -23,23 +23,6 @@
 namespace gc {
 namespace accounting {
 
-void HeapBitmap::ReplaceBitmap(ContinuousSpaceBitmap* old_bitmap,
-                               ContinuousSpaceBitmap* new_bitmap) {
-  auto it = std::find(continuous_space_bitmaps_.begin(), continuous_space_bitmaps_.end(),
-                      old_bitmap);
-  CHECK(it != continuous_space_bitmaps_.end()) << " continuous space bitmap " << old_bitmap
-      << " not found";
-  *it = new_bitmap;
-}
-
-void HeapBitmap::ReplaceLargeObjectBitmap(LargeObjectBitmap* old_bitmap,
-                                          LargeObjectBitmap* new_bitmap) {
-  auto it = std::find(large_object_bitmaps_.begin(), large_object_bitmaps_.end(), old_bitmap);
-  CHECK(it != large_object_bitmaps_.end()) << " large object bitmap " << old_bitmap
-      << " not found";
-  *it = new_bitmap;
-}
-
 void HeapBitmap::AddContinuousSpaceBitmap(accounting::ContinuousSpaceBitmap* bitmap) {
   DCHECK(bitmap != nullptr);
   // Check that there is no bitmap overlap.
diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h
index e477556..a5f4499 100644
--- a/runtime/gc/accounting/heap_bitmap.h
+++ b/runtime/gc/accounting/heap_bitmap.h
@@ -55,14 +55,6 @@
       REQUIRES(Locks::heap_bitmap_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Find and replace a bitmap pointer, this is used by for the bitmap swapping in the GC.
-  void ReplaceBitmap(ContinuousSpaceBitmap* old_bitmap, ContinuousSpaceBitmap* new_bitmap)
-      REQUIRES(Locks::heap_bitmap_lock_);
-
-  // Find and replace a object set pointer, this is used by for the bitmap swapping in the GC.
-  void ReplaceLargeObjectBitmap(LargeObjectBitmap* old_bitmap, LargeObjectBitmap* new_bitmap)
-      REQUIRES(Locks::heap_bitmap_lock_);
-
   explicit HeapBitmap(Heap* heap) : heap_(heap) {}
 
  private:
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 4029057..3c5688d 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -48,13 +48,12 @@
 }
 
 template<size_t kAlignment>
-SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::CreateFromMemMap(
+SpaceBitmap<kAlignment> SpaceBitmap<kAlignment>::CreateFromMemMap(
     const std::string& name, MemMap&& mem_map, uint8_t* heap_begin, size_t heap_capacity) {
   CHECK(mem_map.IsValid());
   uintptr_t* bitmap_begin = reinterpret_cast<uintptr_t*>(mem_map.Begin());
   const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
-  return new SpaceBitmap(
-      name, std::move(mem_map), bitmap_begin, bitmap_size, heap_begin, heap_capacity);
+  return { name, std::move(mem_map), bitmap_begin, bitmap_size, heap_begin, heap_capacity };
 }
 
 template<size_t kAlignment>
@@ -78,7 +77,7 @@
 SpaceBitmap<kAlignment>::~SpaceBitmap() {}
 
 template<size_t kAlignment>
-SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::Create(
+SpaceBitmap<kAlignment> SpaceBitmap<kAlignment>::Create(
     const std::string& name, uint8_t* heap_begin, size_t heap_capacity) {
   // Round up since `heap_capacity` is not necessarily a multiple of `kAlignment * kBitsPerIntPtrT`
   // (we represent one word as an `intptr_t`).
@@ -91,7 +90,7 @@
                                         &error_msg);
   if (UNLIKELY(!mem_map.IsValid())) {
     LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
-    return nullptr;
+    return SpaceBitmap<kAlignment>();
   }
   return CreateFromMemMap(name, std::move(mem_map), heap_begin, heap_capacity);
 }
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 6ca254a..fe98741 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -45,15 +45,15 @@
 
   // Initialize a space bitmap so that it points to a bitmap large enough to cover a heap at
   // heap_begin of heap_capacity bytes, where objects are guaranteed to be kAlignment-aligned.
-  static SpaceBitmap* Create(const std::string& name, uint8_t* heap_begin, size_t heap_capacity);
+  static SpaceBitmap Create(const std::string& name, uint8_t* heap_begin, size_t heap_capacity);
 
   // Initialize a space bitmap using the provided mem_map as the live bits. Takes ownership of the
   // mem map. The address range covered starts at heap_begin and is of size equal to heap_capacity.
   // Objects are kAlignement-aligned.
-  static SpaceBitmap* CreateFromMemMap(const std::string& name,
-                                       MemMap&& mem_map,
-                                       uint8_t* heap_begin,
-                                       size_t heap_capacity);
+  static SpaceBitmap CreateFromMemMap(const std::string& name,
+                                      MemMap&& mem_map,
+                                      uint8_t* heap_begin,
+                                      size_t heap_capacity);
 
   ~SpaceBitmap();
 
@@ -124,19 +124,6 @@
     return index < bitmap_size_ / sizeof(intptr_t);
   }
 
-  class ClearVisitor {
-   public:
-    explicit ClearVisitor(SpaceBitmap* const bitmap)
-        : bitmap_(bitmap) {
-    }
-
-    void operator()(mirror::Object* obj) const {
-      bitmap_->Clear(obj);
-    }
-   private:
-    SpaceBitmap* const bitmap_;
-  };
-
   template <typename Visitor>
   void VisitRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor) const {
     for (; visit_begin < visit_end; visit_begin += kAlignment) {
@@ -219,6 +206,26 @@
   static size_t ComputeBitmapSize(uint64_t capacity);
   static size_t ComputeHeapSize(uint64_t bitmap_bytes);
 
+  // TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1,
+  // however, we document that this is expected on heap_end_
+
+  SpaceBitmap() = default;
+  SpaceBitmap(SpaceBitmap&&) = default;
+  SpaceBitmap& operator=(SpaceBitmap&&) = default;
+
+  bool IsValid() const {
+    return bitmap_begin_ != nullptr;
+  }
+
+  // Copy a view of the other bitmap without taking ownership of the underlying data.
+  void CopyView(SpaceBitmap& other) {
+    bitmap_begin_ = other.bitmap_begin_;
+    bitmap_size_ = other.bitmap_size_;
+    heap_begin_ = other.heap_begin_;
+    heap_limit_ = other.heap_limit_;
+    name_ = other.name_;
+  }
+
  private:
   // TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1,
   // however, we document that this is expected on heap_end_
@@ -238,17 +245,17 @@
   MemMap mem_map_;
 
   // This bitmap itself, word sized for efficiency in scanning.
-  Atomic<uintptr_t>* const bitmap_begin_;
+  Atomic<uintptr_t>* bitmap_begin_ = nullptr;
 
   // Size of this bitmap.
-  size_t bitmap_size_;
+  size_t bitmap_size_ = 0u;
 
   // The start address of the memory covered by the bitmap, which corresponds to the word
   // containing the first bit in the bitmap.
-  const uintptr_t heap_begin_;
+  uintptr_t heap_begin_ = 0u;
 
   // The end address of the memory covered by the bitmap. This may not be on a word boundary.
-  uintptr_t heap_limit_;
+  uintptr_t heap_limit_ = 0u;
 
   // Name of this bitmap.
   std::string name_;
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index 9f355e3..3a69865 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -33,9 +33,9 @@
 TEST_F(SpaceBitmapTest, Init) {
   uint8_t* heap_begin = reinterpret_cast<uint8_t*>(0x10000000);
   size_t heap_capacity = 16 * MB;
-  std::unique_ptr<ContinuousSpaceBitmap> space_bitmap(
+  ContinuousSpaceBitmap space_bitmap(
       ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
-  EXPECT_TRUE(space_bitmap.get() != nullptr);
+  EXPECT_TRUE(space_bitmap.IsValid());
 }
 
 class BitmapVerify {
@@ -61,16 +61,16 @@
   uint8_t* heap_begin = reinterpret_cast<uint8_t*>(0x10000000);
   size_t heap_capacity = 16 * MB;
 
-  std::unique_ptr<ContinuousSpaceBitmap> space_bitmap(
+  ContinuousSpaceBitmap space_bitmap(
       ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
-  EXPECT_TRUE(space_bitmap != nullptr);
+  EXPECT_TRUE(space_bitmap.IsValid());
 
   // Set all the odd bits in the first BitsPerIntPtrT * 3 to one.
   for (size_t j = 0; j < kBitsPerIntPtrT * 3; ++j) {
     const mirror::Object* obj =
         reinterpret_cast<mirror::Object*>(heap_begin + j * kObjectAlignment);
     if (reinterpret_cast<uintptr_t>(obj) & 0xF) {
-      space_bitmap->Set(obj);
+      space_bitmap.Set(obj);
     }
   }
   // Try every possible starting bit in the first word. Then for each starting bit, try each
@@ -83,7 +83,7 @@
     for (size_t j = 0; j < static_cast<size_t>(kBitsPerIntPtrT * 2); ++j) {
       mirror::Object* end =
           reinterpret_cast<mirror::Object*>(heap_begin + (i + j) * kObjectAlignment);
-      BitmapVerify(space_bitmap.get(), start, end);
+      BitmapVerify(&space_bitmap, start, end);
     }
   }
 }
@@ -92,14 +92,14 @@
   uint8_t* heap_begin = reinterpret_cast<uint8_t*>(0x10000000);
   size_t heap_capacity = 16 * MB;
 
-  std::unique_ptr<ContinuousSpaceBitmap> bitmap(
+  ContinuousSpaceBitmap bitmap(
       ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
-  EXPECT_TRUE(bitmap != nullptr);
+  EXPECT_TRUE(bitmap.IsValid());
 
   // Set all of the bits in the bitmap.
   for (size_t j = 0; j < heap_capacity; j += kObjectAlignment) {
     const mirror::Object* obj = reinterpret_cast<mirror::Object*>(heap_begin + j);
-    bitmap->Set(obj);
+    bitmap.Set(obj);
   }
 
   std::vector<std::pair<uintptr_t, uintptr_t>> ranges = {
@@ -113,18 +113,18 @@
   for (const std::pair<uintptr_t, uintptr_t>& range : ranges) {
     const mirror::Object* obj_begin = reinterpret_cast<mirror::Object*>(heap_begin + range.first);
     const mirror::Object* obj_end = reinterpret_cast<mirror::Object*>(heap_begin + range.second);
-    bitmap->ClearRange(obj_begin, obj_end);
+    bitmap.ClearRange(obj_begin, obj_end);
     // Boundaries should still be marked.
     for (uintptr_t i = 0; i < range.first; i += kObjectAlignment) {
-      EXPECT_TRUE(bitmap->Test(reinterpret_cast<mirror::Object*>(heap_begin + i)));
+      EXPECT_TRUE(bitmap.Test(reinterpret_cast<mirror::Object*>(heap_begin + i)));
     }
     for (uintptr_t i = range.second; i < range.second + kPageSize; i += kObjectAlignment) {
-      EXPECT_TRUE(bitmap->Test(reinterpret_cast<mirror::Object*>(heap_begin + i)));
+      EXPECT_TRUE(bitmap.Test(reinterpret_cast<mirror::Object*>(heap_begin + i)));
     }
     // Everything inside should be cleared.
     for (uintptr_t i = range.first; i < range.second; i += kObjectAlignment) {
-      EXPECT_FALSE(bitmap->Test(reinterpret_cast<mirror::Object*>(heap_begin + i)));
-      bitmap->Set(reinterpret_cast<mirror::Object*>(heap_begin + i));
+      EXPECT_FALSE(bitmap.Test(reinterpret_cast<mirror::Object*>(heap_begin + i)));
+      bitmap.Set(reinterpret_cast<mirror::Object*>(heap_begin + i));
     }
   }
 }
@@ -162,7 +162,7 @@
   RandGen r(0x1234);
 
   for (int i = 0; i < 5 ; ++i) {
-    std::unique_ptr<ContinuousSpaceBitmap> space_bitmap(
+    ContinuousSpaceBitmap space_bitmap(
         ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
 
     for (int j = 0; j < 10000; ++j) {
@@ -170,9 +170,9 @@
       bool set = r.next() % 2 == 1;
 
       if (set) {
-        space_bitmap->Set(reinterpret_cast<mirror::Object*>(heap_begin + offset));
+        space_bitmap.Set(reinterpret_cast<mirror::Object*>(heap_begin + offset));
       } else {
-        space_bitmap->Clear(reinterpret_cast<mirror::Object*>(heap_begin + offset));
+        space_bitmap.Clear(reinterpret_cast<mirror::Object*>(heap_begin + offset));
       }
     }
 
@@ -183,7 +183,7 @@
 
       size_t manual = 0;
       for (uintptr_t k = offset; k < end; k += kAlignment) {
-        if (space_bitmap->Test(reinterpret_cast<mirror::Object*>(heap_begin + k))) {
+        if (space_bitmap.Test(reinterpret_cast<mirror::Object*>(heap_begin + k))) {
           manual++;
         }
       }
@@ -191,7 +191,7 @@
       uintptr_t range_begin = reinterpret_cast<uintptr_t>(heap_begin) + offset;
       uintptr_t range_end = reinterpret_cast<uintptr_t>(heap_begin) + end;
 
-      fn(space_bitmap.get(), range_begin, range_end, manual);
+      fn(&space_bitmap, range_begin, range_end, manual);
     }
   }
 }
diff --git a/runtime/gc/allocation_listener.h b/runtime/gc/allocation_listener.h
index a578252..376b524 100644
--- a/runtime/gc/allocation_listener.h
+++ b/runtime/gc/allocation_listener.h
@@ -23,11 +23,13 @@
 #include "base/locks.h"
 #include "base/macros.h"
 #include "gc_root.h"
+#include "handle.h"
 #include "obj_ptr.h"
 
 namespace art {
 
 namespace mirror {
+class Class;
 class Object;
 }  // namespace mirror
 
@@ -39,6 +41,26 @@
  public:
   virtual ~AllocationListener() {}
 
+  // An event to allow a listener to intercept and modify an allocation before it takes place.
+  // The listener can change the byte_count and type as they see fit. Extreme caution should be used
+  // when doing so. This can also be used to control allocation occurring on another thread.
+  //
+  // Concurrency guarantees: This might be called multiple times for each single allocation. It's
+  // guaranteed that, between the final call to the callback and the object being visible to
+  // heap-walks there are no suspensions. If a suspension was allowed between these events the
+  // callback will be invoked again after passing the suspend point.
+  //
+  // If the alloc succeeds it is guaranteed there are no suspend-points between the last return of
+  // PreObjectAlloc and the newly allocated object being visible to heap-walks.
+  //
+  // This can also be used to make any last-minute changes to the type or size of the allocation.
+  virtual void PreObjectAllocated(Thread* self ATTRIBUTE_UNUSED,
+                                  MutableHandle<mirror::Class> type ATTRIBUTE_UNUSED,
+                                  size_t* byte_count ATTRIBUTE_UNUSED)
+      REQUIRES(!Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_) {}
+  // Fast check if we want to get the PreObjectAllocated callback, to avoid the expense of creating
+  // handles. Defaults to false.
+  virtual bool HasPreAlloc() const { return false; }
   virtual void ObjectAllocated(Thread* self, ObjPtr<mirror::Object>* obj, size_t byte_count)
       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
 };
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 4900a9a..60fb71d 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -153,11 +153,6 @@
       }
       CHECK(records != nullptr);
       records->SetMaxStackDepth(heap->GetAllocTrackerStackDepth());
-      std::string self_name;
-      self->GetThreadName(self_name);
-      if (self_name == "JDWP") {
-        records->alloc_ddm_thread_id_ = self->GetTid();
-      }
       size_t sz = sizeof(AllocRecordStackTraceElement) * records->max_stack_depth_ +
                   sizeof(AllocRecord) + sizeof(AllocRecordStackTrace);
       LOG(INFO) << "Enabling alloc tracker (" << records->alloc_record_max_ << " entries of "
@@ -222,10 +217,9 @@
     return;
   }
 
-  // Do not record for DDM thread.
-  if (alloc_ddm_thread_id_ == self->GetTid()) {
-    return;
-  }
+  // TODO Skip recording allocations associated with DDMS. This was a feature of the old debugger
+  // but when we switched to the JVMTI based debugger the feature was (unintentionally) broken.
+  // Since nobody seemed to really notice or care it might not be worth the trouble.
 
   // Wait for GC's sweeping to complete and allow new records.
   while (UNLIKELY((!kUseReadBarrier && !allow_new_record_) ||
diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h
index 7c4181c..405d060 100644
--- a/runtime/gc/allocation_record.h
+++ b/runtime/gc/allocation_record.h
@@ -299,7 +299,6 @@
   size_t alloc_record_max_ GUARDED_BY(Locks::alloc_tracker_lock_) = kDefaultNumAllocRecords;
   size_t recent_record_max_ GUARDED_BY(Locks::alloc_tracker_lock_) = kDefaultNumRecentRecords;
   size_t max_stack_depth_ = kDefaultAllocStackDepth;
-  pid_t alloc_ddm_thread_id_  GUARDED_BY(Locks::alloc_tracker_lock_) = 0;
   bool allow_new_record_ GUARDED_BY(Locks::alloc_tracker_lock_) = true;
   ConditionVariable new_record_condition_ GUARDED_BY(Locks::alloc_tracker_lock_);
   // see the comment in typedef of EntryList
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 2faa7e5..f1572cd 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -890,7 +890,7 @@
   DCHECK(IsThreadLocal());
   // Merge the thread local free list into the free list and clear the thread local free list.
   const uint8_t idx = size_bracket_idx_;
-  bool thread_local_free_list_size = thread_local_free_list_.Size();
+  size_t thread_local_free_list_size = thread_local_free_list_.Size();
   const size_t size_before = free_list_.Size();
   free_list_.Merge(&thread_local_free_list_);
   const size_t size_after = free_list_.Size();
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 0906295..c4bc76f 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -352,7 +352,8 @@
     uint8_t magic_num_;                 // The magic number used for debugging.
     uint8_t size_bracket_idx_;          // The index of the size bracket of this run.
     uint8_t is_thread_local_;           // True if this run is used as a thread-local run.
-    uint8_t to_be_bulk_freed_;          // Used within BulkFree() to flag a run that's involved with a bulk free.
+    bool to_be_bulk_freed_;             // Used within BulkFree() to flag a run that's involved with
+                                        // a bulk free.
     uint32_t padding_ ATTRIBUTE_UNUSED;
     // Use a tailless free list for free_list_ so that the alloc fast path does not manage the tail.
     SlotFreeList<false> free_list_;
diff --git a/runtime/gc/allocator_type.h b/runtime/gc/allocator_type.h
index 992c32a..cd9f5d4 100644
--- a/runtime/gc/allocator_type.h
+++ b/runtime/gc/allocator_type.h
@@ -24,7 +24,7 @@
 
 // Different types of allocators.
 // Those marked with * have fast path entrypoints callable from generated code.
-enum AllocatorType {
+enum AllocatorType : char {
   // BumpPointer spaces are currently only used for ZygoteSpace construction.
   kAllocatorTypeBumpPointer,  // Use global CAS-based BumpPointer allocator. (*)
   kAllocatorTypeTLAB,  // Use TLAB allocator within BumpPointer space. (*)
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 9428a0b..1f50c27 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -101,8 +101,6 @@
       weak_ref_access_enabled_(true),
       copied_live_bytes_ratio_sum_(0.f),
       gc_count_(0),
-      region_space_inter_region_bitmap_(nullptr),
-      non_moving_space_inter_region_bitmap_(nullptr),
       reclaimed_bytes_ratio_sum_(0.f),
       skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
       measure_read_barrier_slow_path_(measure_read_barrier_slow_path),
@@ -294,24 +292,24 @@
 
 void ConcurrentCopying::CreateInterRegionRefBitmaps() {
   DCHECK(use_generational_cc_);
-  DCHECK(region_space_inter_region_bitmap_ == nullptr);
-  DCHECK(non_moving_space_inter_region_bitmap_ == nullptr);
+  DCHECK(!region_space_inter_region_bitmap_.IsValid());
+  DCHECK(!non_moving_space_inter_region_bitmap_.IsValid());
   DCHECK(region_space_ != nullptr);
   DCHECK(heap_->non_moving_space_ != nullptr);
   // Region-space
-  region_space_inter_region_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
+  region_space_inter_region_bitmap_ = accounting::ContinuousSpaceBitmap::Create(
       "region-space inter region ref bitmap",
       reinterpret_cast<uint8_t*>(region_space_->Begin()),
-      region_space_->Limit() - region_space_->Begin()));
-  CHECK(region_space_inter_region_bitmap_ != nullptr)
+      region_space_->Limit() - region_space_->Begin());
+  CHECK(region_space_inter_region_bitmap_.IsValid())
       << "Couldn't allocate region-space inter region ref bitmap";
 
   // non-moving-space
-  non_moving_space_inter_region_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
+  non_moving_space_inter_region_bitmap_ = accounting::ContinuousSpaceBitmap::Create(
       "non-moving-space inter region ref bitmap",
       reinterpret_cast<uint8_t*>(heap_->non_moving_space_->Begin()),
-      heap_->non_moving_space_->Limit() - heap_->non_moving_space_->Begin()));
-  CHECK(non_moving_space_inter_region_bitmap_ != nullptr)
+      heap_->non_moving_space_->Limit() - heap_->non_moving_space_->Begin());
+  CHECK(non_moving_space_inter_region_bitmap_.IsValid())
       << "Couldn't allocate non-moving-space inter region ref bitmap";
 }
 
@@ -446,15 +444,17 @@
         << thread->GetState() << " thread " << thread << " self " << self;
     thread->SetIsGcMarkingAndUpdateEntrypoints(true);
     if (use_tlab_ && thread->HasTlab()) {
+      // We should not reuse the partially utilized TLABs revoked here as they
+      // are going to be part of from-space.
       if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
         // This must come before the revoke.
         size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated();
-        concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
+        concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread, /*reuse=*/ false);
         reinterpret_cast<Atomic<size_t>*>(
             &concurrent_copying_->from_space_num_objects_at_first_pause_)->
                 fetch_add(thread_local_objects, std::memory_order_relaxed);
       } else {
-        concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
+        concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread, /*reuse=*/ false);
       }
     }
     if (kUseThreadLocalAllocationStack) {
@@ -946,6 +946,43 @@
   Thread* const self_;
 };
 
+void ConcurrentCopying::RemoveThreadMarkStackMapping(Thread* thread,
+                                                     accounting::ObjectStack* tl_mark_stack) {
+  CHECK(tl_mark_stack != nullptr);
+  auto it = thread_mark_stack_map_.find(thread);
+  CHECK(it != thread_mark_stack_map_.end());
+  CHECK(it->second == tl_mark_stack);
+  thread_mark_stack_map_.erase(it);
+}
+
+void ConcurrentCopying::AssertEmptyThreadMarkStackMap() {
+  std::ostringstream oss;
+  auto capture_mappings = [this, &oss] () REQUIRES(mark_stack_lock_) {
+    for (const auto & iter : thread_mark_stack_map_) {
+      oss << "thread:" << iter.first << " mark-stack:" << iter.second << "\n";
+    }
+    return oss.str();
+  };
+  CHECK(thread_mark_stack_map_.empty()) << "thread_mark_stack_map not empty. size:"
+                                        << thread_mark_stack_map_.size()
+                                        << "Mappings:\n"
+                                        << capture_mappings()
+                                        << "pooled_mark_stacks size:"
+                                        << pooled_mark_stacks_.size();
+}
+
+void ConcurrentCopying::AssertNoThreadMarkStackMapping(Thread* thread) {
+  MutexLock mu(Thread::Current(), mark_stack_lock_);
+  CHECK(thread_mark_stack_map_.find(thread) == thread_mark_stack_map_.end());
+}
+
+void ConcurrentCopying::AddThreadMarkStackMapping(Thread* thread,
+                                                  accounting::ObjectStack* tl_mark_stack) {
+  CHECK(tl_mark_stack != nullptr);
+  CHECK(thread_mark_stack_map_.find(thread) == thread_mark_stack_map_.end());
+  thread_mark_stack_map_.insert({thread, tl_mark_stack});
+}
+
 class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
  public:
   RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
@@ -960,11 +997,14 @@
     CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
         << thread->GetState() << " thread " << thread << " self " << self;
     // Revoke thread local mark stacks.
-    accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
-    if (tl_mark_stack != nullptr) {
+    {
       MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
-      concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
-      thread->SetThreadLocalMarkStack(nullptr);
+      accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
+      if (tl_mark_stack != nullptr) {
+        concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
+        thread->SetThreadLocalMarkStack(nullptr);
+        concurrent_copying_->RemoveThreadMarkStackMapping(thread, tl_mark_stack);
+      }
     }
     // Disable weak ref access.
     if (disable_weak_ref_access_) {
@@ -996,6 +1036,9 @@
     // only.
     CaptureRootsForMarkingVisitor</*kAtomicTestAndSet*/ true> visitor(concurrent_copying_, self);
     thread->VisitRoots(&visitor, kVisitRootFlagAllRoots);
+    // If thread_running_gc_ performed the root visit then its thread-local
+    // mark-stack should be null as we directly push to gc_mark_stack_.
+    CHECK(self == thread || self->GetThreadLocalMarkStack() == nullptr);
     // Barrier handling is done in the base class' Run() below.
     RevokeThreadLocalMarkStackCheckpoint::Run(thread);
   }
@@ -1138,9 +1181,9 @@
       // only class object reference, which is either in some immune-space, or
       // in non-moving-space.
       DCHECK(heap_->non_moving_space_->HasAddress(ref));
-      non_moving_space_inter_region_bitmap_->Set(ref);
+      non_moving_space_inter_region_bitmap_.Set(ref);
     } else {
-      region_space_inter_region_bitmap_->Set(ref);
+      region_space_inter_region_bitmap_.Set(ref);
     }
   }
 }
@@ -1222,6 +1265,12 @@
                                    REQUIRES_SHARED(Locks::mutator_lock_) {
                                  AddLiveBytesAndScanRef(ref);
                                });
+  {
+    MutexLock mu(thread_running_gc_, mark_stack_lock_);
+    CHECK(revoked_mark_stacks_.empty());
+    AssertEmptyThreadMarkStackMap();
+    CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
+  }
 
   while (!gc_mark_stack_->IsEmpty()) {
     mirror::Object* ref = gc_mark_stack_->PopBack();
@@ -1241,7 +1290,7 @@
   }
 
   static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
-    reinterpret_cast<ImmuneSpaceScanObjVisitor*>(arg)->operator()(obj);
+    reinterpret_cast<ImmuneSpaceCaptureRefsVisitor*>(arg)->operator()(obj);
   }
 
  private:
@@ -1316,6 +1365,7 @@
   }
   accounting::CardTable* const card_table = heap_->GetCardTable();
   Thread* const self = Thread::Current();
+  CHECK_EQ(self, thread_running_gc_);
   // Clear live_bytes_ of every non-free region, except the ones that are newly
   // allocated.
   region_space_->SetAllRegionLiveBytesZero();
@@ -1459,10 +1509,10 @@
               // We need to process un-evac references as they may be unprocessed,
               // if they skipped the marking phase due to heap mutation.
               ScanDirtyObject</*kNoUnEvac*/ false>(obj);
-              non_moving_space_inter_region_bitmap_->Clear(obj);
+              non_moving_space_inter_region_bitmap_.Clear(obj);
             } else if (region_space_->IsInUnevacFromSpace(obj)) {
               ScanDirtyObject</*kNoUnEvac*/ false>(obj);
-              region_space_inter_region_bitmap_->Clear(obj);
+              region_space_inter_region_bitmap_.Clear(obj);
             }
           },
           accounting::CardTable::kCardAged);
@@ -1474,10 +1524,10 @@
                          ScanDirtyObject</*kNoUnEvac*/ true>(obj);
                        };
         if (space == region_space_) {
-          region_space_->ScanUnevacFromSpace(region_space_inter_region_bitmap_.get(), visitor);
+          region_space_->ScanUnevacFromSpace(&region_space_inter_region_bitmap_, visitor);
         } else {
           DCHECK(space == heap_->non_moving_space_);
-          non_moving_space_inter_region_bitmap_->VisitMarkedRange(
+          non_moving_space_inter_region_bitmap_.VisitMarkedRange(
               reinterpret_cast<uintptr_t>(space->Begin()),
               reinterpret_cast<uintptr_t>(space->End()),
               visitor);
@@ -1781,7 +1831,9 @@
         if (tl_mark_stack != nullptr) {
           // Store the old full stack into a vector.
           revoked_mark_stacks_.push_back(tl_mark_stack);
+          RemoveThreadMarkStackMapping(self, tl_mark_stack);
         }
+        AddThreadMarkStackMapping(self, new_tl_mark_stack);
       } else {
         tl_mark_stack->PushBack(to_ref);
       }
@@ -2003,11 +2055,12 @@
 void ConcurrentCopying::RevokeThreadLocalMarkStack(Thread* thread) {
   Thread* self = Thread::Current();
   CHECK_EQ(self, thread);
+  MutexLock mu(self, mark_stack_lock_);
   accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
   if (tl_mark_stack != nullptr) {
     CHECK(is_marking_);
-    MutexLock mu(self, mark_stack_lock_);
     revoked_mark_stacks_.push_back(tl_mark_stack);
+    RemoveThreadMarkStackMapping(thread, tl_mark_stack);
     thread->SetThreadLocalMarkStack(nullptr);
   }
 }
@@ -2057,6 +2110,8 @@
     {
       MutexLock mu(thread_running_gc_, mark_stack_lock_);
       CHECK(revoked_mark_stacks_.empty());
+      AssertEmptyThreadMarkStackMap();
+      CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
     }
     while (true) {
       std::vector<mirror::Object*> refs;
@@ -2083,6 +2138,8 @@
     {
       MutexLock mu(thread_running_gc_, mark_stack_lock_);
       CHECK(revoked_mark_stacks_.empty());
+      AssertEmptyThreadMarkStackMap();
+      CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
     }
     // Process the GC mark stack in the exclusive mode. No need to take the lock.
     while (!gc_mark_stack_->IsEmpty()) {
@@ -2103,6 +2160,14 @@
                                                        const Processor& processor) {
   // Run a checkpoint to collect all thread local mark stacks and iterate over them all.
   RevokeThreadLocalMarkStacks(disable_weak_ref_access, checkpoint_callback);
+  if (disable_weak_ref_access) {
+    CHECK_EQ(static_cast<uint32_t>(mark_stack_mode_.load(std::memory_order_relaxed)),
+             static_cast<uint32_t>(kMarkStackModeShared));
+    // From this point onwards no mutator should require a thread-local mark
+    // stack.
+    MutexLock mu(thread_running_gc_, mark_stack_lock_);
+    AssertEmptyThreadMarkStackMap();
+  }
   size_t count = 0;
   std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks;
   {
@@ -2129,6 +2194,11 @@
       }
     }
   }
+  if (disable_weak_ref_access) {
+    MutexLock mu(thread_running_gc_, mark_stack_lock_);
+    CHECK(revoked_mark_stacks_.empty());
+    CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
+  }
   return count;
 }
 
@@ -2369,6 +2439,8 @@
     MutexLock mu(thread_running_gc_, mark_stack_lock_);
     CHECK(gc_mark_stack_->IsEmpty());
     CHECK(revoked_mark_stacks_.empty());
+    AssertEmptyThreadMarkStackMap();
+    CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
   }
 }
 
@@ -2584,11 +2656,11 @@
     // inter-region refs
     if (use_generational_cc_ && !young_gen_) {
       // region space
-      add_gc_range(region_space_inter_region_bitmap_->Begin(),
-                   region_space_inter_region_bitmap_->Size());
+      add_gc_range(region_space_inter_region_bitmap_.Begin(),
+                   region_space_inter_region_bitmap_.Size());
       // non-moving space
-      add_gc_range(non_moving_space_inter_region_bitmap_->Begin(),
-                   non_moving_space_inter_region_bitmap_->Size());
+      add_gc_range(non_moving_space_inter_region_bitmap_.Begin(),
+                   non_moving_space_inter_region_bitmap_.Size());
     }
     // Extract RSS using mincore(). Updates the cummulative RSS counter.
     ExtractRssFromMincore(&gc_ranges);
@@ -2627,6 +2699,20 @@
   // the biggest memory range, thereby reducing the cost of this function.
   CaptureRssAtPeak();
 
+  // Sweep the malloc spaces before clearing the from space since the memory tool mode might
+  // access the object classes in the from space for dead objects.
+  {
+    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+    Sweep(/* swap_bitmaps= */ false);
+    SwapBitmaps();
+    heap_->UnBindBitmaps();
+
+    // The bitmap was cleared at the start of the GC, there is nothing we need to do here.
+    DCHECK(region_space_bitmap_ != nullptr);
+    region_space_bitmap_ = nullptr;
+  }
+
+
   {
     // Record freed objects.
     TimingLogger::ScopedTiming split2("RecordFree", GetTimings());
@@ -2689,17 +2775,6 @@
     reclaimed_bytes_ratio_sum_ += reclaimed_bytes_ratio;
   }
 
-  {
-    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-    Sweep(/* swap_bitmaps= */ false);
-    SwapBitmaps();
-    heap_->UnBindBitmaps();
-
-    // The bitmap was cleared at the start of the GC, there is nothing we need to do here.
-    DCHECK(region_space_bitmap_ != nullptr);
-    region_space_bitmap_ = nullptr;
-  }
-
   CheckEmptyMarkStack();
 
   if (heap_->dump_region_info_after_gc_) {
@@ -3627,6 +3702,8 @@
   Thread* const self = Thread::Current();
   {
     MutexLock mu(self, mark_stack_lock_);
+    CHECK(revoked_mark_stacks_.empty());
+    AssertEmptyThreadMarkStackMap();
     CHECK_EQ(pooled_mark_stacks_.size(), kMarkStackPoolSize);
   }
   // kVerifyNoMissingCardMarks relies on the region space cards not being cleared to avoid false
@@ -3636,8 +3713,8 @@
     // We do not currently use the region space cards at all, madvise them away to save ram.
     heap_->GetCardTable()->ClearCardRange(region_space_->Begin(), region_space_->Limit());
   } else if (use_generational_cc_ && !young_gen_) {
-    region_space_inter_region_bitmap_->Clear();
-    non_moving_space_inter_region_bitmap_->Clear();
+    region_space_inter_region_bitmap_.Clear();
+    non_moving_space_inter_region_bitmap_.Clear();
   }
   {
     MutexLock mu(self, skipped_blocks_lock_);
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 2e5752b..6482ff7 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -18,11 +18,13 @@
 #define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
 
 #include "garbage_collector.h"
+#include "gc/accounting/space_bitmap.h"
 #include "immune_spaces.h"
 #include "offsets.h"
 
 #include <map>
 #include <memory>
+#include <unordered_map>
 #include <vector>
 
 namespace art {
@@ -148,12 +150,13 @@
   bool IsWeakRefAccessEnabled() REQUIRES(Locks::thread_list_lock_) {
     return weak_ref_access_enabled_;
   }
-  void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!mark_stack_lock_);
+  void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES(!mark_stack_lock_);
 
   mirror::Object* IsMarked(mirror::Object* from_ref) override
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  void AssertNoThreadMarkStackMapping(Thread* thread) REQUIRES(!mark_stack_lock_);
+
  private:
   void PushOntoMarkStack(Thread* const self, mirror::Object* obj)
       REQUIRES_SHARED(Locks::mutator_lock_)
@@ -321,6 +324,12 @@
   void ProcessMarkStackForMarkingAndComputeLiveBytes() REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_);
 
+  void RemoveThreadMarkStackMapping(Thread* thread, accounting::ObjectStack* tl_mark_stack)
+      REQUIRES(mark_stack_lock_);
+  void AddThreadMarkStackMapping(Thread* thread, accounting::ObjectStack* tl_mark_stack)
+      REQUIRES(mark_stack_lock_);
+  void AssertEmptyThreadMarkStackMap() REQUIRES(mark_stack_lock_);
+
   space::RegionSpace* region_space_;      // The underlying region space.
   std::unique_ptr<Barrier> gc_barrier_;
   std::unique_ptr<accounting::ObjectStack> gc_mark_stack_;
@@ -351,6 +360,10 @@
   // (see use case in ConcurrentCopying::MarkFromReadBarrier).
   bool rb_mark_bit_stack_full_;
 
+  // Guards access to pooled_mark_stacks_ and revoked_mark_stacks_ vectors.
+  // Also guards destruction and revocations of thread-local mark-stacks.
+  // Clearing thread-local mark-stack (by other threads or during destruction)
+  // should be guarded by it.
   Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   std::vector<accounting::ObjectStack*> revoked_mark_stacks_
       GUARDED_BY(mark_stack_lock_);
@@ -358,6 +371,9 @@
   static constexpr size_t kMarkStackPoolSize = 256;
   std::vector<accounting::ObjectStack*> pooled_mark_stacks_
       GUARDED_BY(mark_stack_lock_);
+  // TODO(lokeshgidra b/140119552): remove this after bug fix.
+  std::unordered_map<Thread*, accounting::ObjectStack*> thread_mark_stack_map_
+      GUARDED_BY(mark_stack_lock_);
   Thread* thread_running_gc_;
   bool is_marking_;                       // True while marking is ongoing.
   // True while we might dispatch on the read barrier entrypoints.
@@ -409,8 +425,8 @@
   size_t gc_count_;
   // Bit is set if the corresponding object has inter-region references that
   // were found during the marking phase of two-phase full-heap GC cycle.
-  std::unique_ptr<accounting::ContinuousSpaceBitmap> region_space_inter_region_bitmap_;
-  std::unique_ptr<accounting::ContinuousSpaceBitmap> non_moving_space_inter_region_bitmap_;
+  accounting::ContinuousSpaceBitmap region_space_inter_region_bitmap_;
+  accounting::ContinuousSpaceBitmap non_moving_space_inter_region_bitmap_;
 
   // reclaimed_bytes_ratio = reclaimed_bytes/num_allocated_bytes per GC cycle
   float reclaimed_bytes_ratio_sum_;
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 1785a77..bb1a146 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -195,23 +195,14 @@
     if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect ||
         (gc_type == kGcTypeFull &&
          space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect)) {
-      accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
-      accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
-      if (live_bitmap != nullptr && live_bitmap != mark_bitmap) {
-        heap_->GetLiveBitmap()->ReplaceBitmap(live_bitmap, mark_bitmap);
-        heap_->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
+      if (space->GetLiveBitmap() != nullptr && !space->HasBoundBitmaps()) {
         CHECK(space->IsContinuousMemMapAllocSpace());
         space->AsContinuousMemMapAllocSpace()->SwapBitmaps();
       }
     }
   }
   for (const auto& disc_space : GetHeap()->GetDiscontinuousSpaces()) {
-    space::LargeObjectSpace* space = disc_space->AsLargeObjectSpace();
-    accounting::LargeObjectBitmap* live_set = space->GetLiveBitmap();
-    accounting::LargeObjectBitmap* mark_set = space->GetMarkBitmap();
-    heap_->GetLiveBitmap()->ReplaceLargeObjectBitmap(live_set, mark_set);
-    heap_->GetMarkBitmap()->ReplaceLargeObjectBitmap(mark_set, live_set);
-    space->SwapBitmaps();
+    disc_space->AsLargeObjectSpace()->SwapBitmaps();
   }
 }
 
@@ -288,7 +279,7 @@
   }
   os << Dumpable<CumulativeLogger>(logger);
   const uint64_t total_ns = logger.GetTotalNs();
-  double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
+  const double seconds = NsToMs(total_ns) / 1000.0;
   const uint64_t freed_bytes = GetTotalFreedBytes();
   const uint64_t freed_objects = GetTotalFreedObjects();
   {
@@ -319,7 +310,7 @@
     freed_bytes_histogram_.DumpBins(os);
     os << "\n";
   }
-  double cpu_seconds = NsToMs(GetTotalCpuTime()) / 1000.0;
+  const double cpu_seconds = NsToMs(GetTotalCpuTime()) / 1000.0;
   os << GetName() << " total time: " << PrettyDuration(total_ns)
      << " mean time: " << PrettyDuration(total_ns / iterations) << "\n"
      << GetName() << " freed: " << freed_objects
diff --git a/runtime/gc/collector/immune_spaces.cc b/runtime/gc/collector/immune_spaces.cc
index 3c20e51..84fcc3f 100644
--- a/runtime/gc/collector/immune_spaces.cc
+++ b/runtime/gc/collector/immune_spaces.cc
@@ -107,7 +107,7 @@
 void ImmuneSpaces::AddSpace(space::ContinuousSpace* space) {
   DCHECK(spaces_.find(space) == spaces_.end()) << *space;
   // Bind live to mark bitmap if necessary.
-  if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
+  if (space->GetLiveBitmap() != nullptr && !space->HasBoundBitmaps()) {
     CHECK(space->IsContinuousMemMapAllocSpace());
     space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
   }
diff --git a/runtime/gc/collector/immune_spaces_test.cc b/runtime/gc/collector/immune_spaces_test.cc
index b0d09ba..b1a21d4 100644
--- a/runtime/gc/collector/immune_spaces_test.cc
+++ b/runtime/gc/collector/immune_spaces_test.cc
@@ -41,11 +41,12 @@
 class DummyImageSpace : public space::ImageSpace {
  public:
   DummyImageSpace(MemMap&& map,
-                  std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap,
+                  accounting::ContinuousSpaceBitmap&& live_bitmap,
                   std::unique_ptr<DummyOatFile>&& oat_file,
                   MemMap&& oat_map)
       : ImageSpace("DummyImageSpace",
                    /*image_location=*/"",
+                   /*profile_file=*/"",
                    std::move(map),
                    std::move(live_bitmap),
                    map.End()),
@@ -68,11 +69,11 @@
     // Create a bunch of dummy bitmaps since these are required to create image spaces. The bitmaps
     // do not need to cover the image spaces though.
     for (size_t i = 0; i < kMaxBitmaps; ++i) {
-      std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap(
+      accounting::ContinuousSpaceBitmap bitmap(
           accounting::ContinuousSpaceBitmap::Create("bitmap",
                                                     reinterpret_cast<uint8_t*>(kPageSize),
                                                     kPageSize));
-      CHECK(bitmap != nullptr);
+      CHECK(bitmap.IsValid());
       live_bitmaps_.push_back(std::move(bitmap));
     }
   }
@@ -96,7 +97,7 @@
       return nullptr;
     }
     CHECK(!live_bitmaps_.empty());
-    std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap(std::move(live_bitmaps_.back()));
+    accounting::ContinuousSpaceBitmap live_bitmap(std::move(live_bitmaps_.back()));
     live_bitmaps_.pop_back();
     MemMap oat_map = MemMap::MapAnonymous("OatMap",
                                           oat_size,
@@ -126,6 +127,8 @@
         /*oat_file_end=*/ PointerToLowMemUInt32(oat_map.Begin() + oat_size),
         /*boot_image_begin=*/ 0u,
         /*boot_image_size=*/ 0u,
+        /*boot_image_component_count=*/ 0u,
+        /*boot_image_checksum=*/ 0u,
         /*pointer_size=*/ sizeof(void*));
     return new DummyImageSpace(std::move(image_map),
                                std::move(live_bitmap),
@@ -136,7 +139,7 @@
  private:
   // Bitmap pool for pre-allocated dummy bitmaps. We need to pre-allocate them since we don't want
   // them to randomly get placed somewhere where we want an image space.
-  std::vector<std::unique_ptr<accounting::ContinuousSpaceBitmap>> live_bitmaps_;
+  std::vector<accounting::ContinuousSpaceBitmap> live_bitmaps_;
 };
 
 class DummySpace : public space::ContinuousSpace {
@@ -156,11 +159,11 @@
     return false;
   }
 
-  accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
+  accounting::ContinuousSpaceBitmap* GetLiveBitmap() override {
     return nullptr;
   }
 
-  accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
+  accounting::ContinuousSpaceBitmap* GetMarkBitmap() override {
     return nullptr;
   }
 };
diff --git a/runtime/gc/collector/semi_space-inl.h b/runtime/gc/collector/semi_space-inl.h
index 7db5d2ca..065a125 100644
--- a/runtime/gc/collector/semi_space-inl.h
+++ b/runtime/gc/collector/semi_space-inl.h
@@ -58,7 +58,7 @@
       MarkStackPush(forward_address);
     }
     obj_ptr->Assign(forward_address);
-  } else if (!collect_from_space_only_ && !immune_spaces_.IsInImmuneRegion(obj)) {
+  } else if (!immune_spaces_.IsInImmuneRegion(obj)) {
     DCHECK(!to_space_->HasAddress(obj)) << "Tried to mark " << obj << " in to-space";
     auto slow_path = [this](const mirror::Object* ref) {
       CHECK(!to_space_->HasAddress(ref)) << "Marking " << ref << " in to_space_";
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 15e0711..c93410e 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -58,8 +58,6 @@
 
 static constexpr bool kProtectFromSpace = true;
 static constexpr bool kStoreStackTraces = false;
-static constexpr size_t kBytesPromotedThreshold = 4 * MB;
-static constexpr size_t kLargeObjectBytesAllocatedThreshold = 16 * MB;
 
 void SemiSpace::BindBitmaps() {
   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
@@ -71,41 +69,23 @@
       immune_spaces_.AddSpace(space);
     } else if (space->GetLiveBitmap() != nullptr) {
       // TODO: We can probably also add this space to the immune region.
-      if (space == to_space_ || collect_from_space_only_) {
-        if (collect_from_space_only_) {
-          // Bind the bitmaps of the main free list space and the non-moving space we are doing a
-          // bump pointer space only collection.
-          CHECK(space == GetHeap()->GetPrimaryFreeListSpace() ||
-                space == GetHeap()->GetNonMovingSpace());
-        }
+      if (space == to_space_) {
         CHECK(space->IsContinuousMemMapAllocSpace());
         space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
       }
     }
   }
-  if (collect_from_space_only_) {
-    // We won't collect the large object space if a bump pointer space only collection.
-    is_large_object_space_immune_ = true;
-  }
 }
 
-SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix)
+SemiSpace::SemiSpace(Heap* heap, const std::string& name_prefix)
     : GarbageCollector(heap,
                        name_prefix + (name_prefix.empty() ? "" : " ") + "semispace"),
       mark_stack_(nullptr),
-      is_large_object_space_immune_(false),
       to_space_(nullptr),
       to_space_live_bitmap_(nullptr),
       from_space_(nullptr),
       mark_bitmap_(nullptr),
       self_(nullptr),
-      generational_(generational),
-      last_gc_to_space_end_(nullptr),
-      bytes_promoted_(0),
-      bytes_promoted_since_last_whole_heap_collection_(0),
-      large_object_bytes_allocated_at_last_whole_heap_collection_(0),
-      collect_from_space_only_(generational),
-      promo_dest_space_(nullptr),
       fallback_space_(nullptr),
       bytes_moved_(0U),
       objects_moved_(0U),
@@ -148,7 +128,6 @@
   mark_stack_ = heap_->GetMarkStack();
   DCHECK(mark_stack_ != nullptr);
   immune_spaces_.Reset();
-  is_large_object_space_immune_ = false;
   saved_bytes_ = 0;
   bytes_moved_ = 0;
   objects_moved_ = 0;
@@ -161,9 +140,6 @@
     ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
     mark_bitmap_ = heap_->GetMarkBitmap();
   }
-  if (generational_) {
-    promo_dest_space_ = GetHeap()->GetPrimaryFreeListSpace();
-  }
   fallback_space_ = GetHeap()->GetNonMovingSpace();
 }
 
@@ -191,44 +167,14 @@
   // Revoke the thread local buffers since the GC may allocate into a RosAllocSpace and this helps
   // to prevent fragmentation.
   RevokeAllThreadLocalBuffers();
-  if (generational_) {
-    if (GetCurrentIteration()->GetGcCause() == kGcCauseExplicit ||
-        GetCurrentIteration()->GetGcCause() == kGcCauseForNativeAlloc ||
-        GetCurrentIteration()->GetClearSoftReferences()) {
-      // If an explicit, native allocation-triggered, or last attempt
-      // collection, collect the whole heap.
-      collect_from_space_only_ = false;
-    }
-    if (!collect_from_space_only_) {
-      VLOG(heap) << "Whole heap collection";
-      name_ = collector_name_ + " whole";
-    } else {
-      VLOG(heap) << "Bump pointer space only collection";
-      name_ = collector_name_ + " bps";
-    }
-  }
 
-  if (!collect_from_space_only_) {
-    // If non-generational, always clear soft references.
-    // If generational, clear soft references if a whole heap collection.
-    GetCurrentIteration()->SetClearSoftReferences(true);
-  }
+  // Always clear soft references.
+  GetCurrentIteration()->SetClearSoftReferences(true);
   Locks::mutator_lock_->AssertExclusiveHeld(self_);
-  if (generational_) {
-    // If last_gc_to_space_end_ is out of the bounds of the from-space
-    // (the to-space from last GC), then point it to the beginning of
-    // the from-space. For example, the very first GC or the
-    // pre-zygote compaction.
-    if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) {
-      last_gc_to_space_end_ = from_space_->Begin();
-    }
-    // Reset this before the marking starts below.
-    bytes_promoted_ = 0;
-  }
   // Assume the cleared space is already empty.
   BindBitmaps();
   // Process dirty cards and add dirty cards to mod-union tables.
-  heap_->ProcessCards(GetTimings(), kUseRememberedSet && generational_, false, true);
+  heap_->ProcessCards(GetTimings(), /*use_rem_sets=*/false, false, true);
   // Clear the whole card table since we cannot get any additional dirty cards during the
   // paused GC. This saves memory but only works for pause the world collectors.
   t.NewTiming("ClearCardTable");
@@ -256,7 +202,7 @@
   // Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked
   // before they are properly counted.
   RevokeAllThreadLocalBuffers();
-  GetHeap()->RecordFreeRevoke();  // this is for the non-moving rosalloc space used by GSS.
+  GetHeap()->RecordFreeRevoke();  // This is for the non-moving rosalloc space.
   // Record freed memory.
   const int64_t from_bytes = from_space_->GetBytesAllocated();
   const int64_t to_bytes = bytes_moved_;
@@ -349,8 +295,7 @@
                                    GetTimings());
       table->UpdateAndMarkReferences(this);
       DCHECK(GetHeap()->FindRememberedSetFromSpace(space) == nullptr);
-    } else if ((space->IsImageSpace() || collect_from_space_only_) &&
-               space->GetLiveBitmap() != nullptr) {
+    } else if (space->IsImageSpace() && space->GetLiveBitmap() != nullptr) {
       // If the space has no mod union table (the non-moving space, app image spaces, main spaces
       // when the bump pointer space only collection is enabled,) then we need to scan its live
       // bitmap or dirty cards as roots (including the objects on the live stack which have just
@@ -358,11 +303,8 @@
       accounting::RememberedSet* rem_set = GetHeap()->FindRememberedSetFromSpace(space);
       if (!space->IsImageSpace()) {
         DCHECK(space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())
-            << "Space " << space->GetName() << " "
-            << "generational_=" << generational_ << " "
-            << "collect_from_space_only_=" << collect_from_space_only_;
+            << "Space " << space->GetName();
         // App images currently do not have remembered sets.
-        DCHECK_EQ(kUseRememberedSet, rem_set != nullptr);
       } else {
         DCHECK(rem_set == nullptr);
       }
@@ -395,30 +337,6 @@
       }
     }
   }
-
-  CHECK_EQ(is_large_object_space_immune_, collect_from_space_only_);
-  space::LargeObjectSpace* los = GetHeap()->GetLargeObjectsSpace();
-  if (is_large_object_space_immune_ && los != nullptr) {
-    TimingLogger::ScopedTiming t2("VisitLargeObjects", GetTimings());
-    DCHECK(collect_from_space_only_);
-    // Delay copying the live set to the marked set until here from
-    // BindBitmaps() as the large objects on the allocation stack may
-    // be newly added to the live set above in MarkAllocStackAsLive().
-    los->CopyLiveToMarked();
-
-    // When the large object space is immune, we need to scan the
-    // large object space as roots as they contain references to their
-    // classes (primitive array classes) that could move though they
-    // don't contain any other references.
-    accounting::LargeObjectBitmap* large_live_bitmap = los->GetLiveBitmap();
-    std::pair<uint8_t*, uint8_t*> range = los->GetBeginEndAtomic();
-    large_live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(range.first),
-                                        reinterpret_cast<uintptr_t>(range.second),
-                                        [this](mirror::Object* obj)
-        REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
-      ScanObject(obj);
-    });
-  }
   // Recursively process the mark stack.
   ProcessMarkStack();
 }
@@ -437,12 +355,6 @@
   if (saved_bytes_ > 0) {
     VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_);
   }
-  if (generational_) {
-    // Record the end (top) of the to space so we can distinguish
-    // between objects that were allocated since the last GC and the
-    // older objects.
-    last_gc_to_space_end_ = to_space_->End();
-  }
 }
 
 void SemiSpace::ResizeMarkStack(size_t new_size) {
@@ -515,66 +427,15 @@
 mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
   const size_t object_size = obj->SizeOf();
   size_t bytes_allocated, dummy;
-  mirror::Object* forward_address = nullptr;
-  if (generational_ && reinterpret_cast<uint8_t*>(obj) < last_gc_to_space_end_) {
-    // If it's allocated before the last GC (older), move
-    // (pseudo-promote) it to the main free list space (as sort
-    // of an old generation.)
-    forward_address = promo_dest_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
-                                                           nullptr, &dummy);
-    if (UNLIKELY(forward_address == nullptr)) {
-      // If out of space, fall back to the to-space.
-      forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr,
-                                                     &dummy);
-      // No logic for marking the bitmap, so it must be null.
-      DCHECK(to_space_live_bitmap_ == nullptr);
-    } else {
-      bytes_promoted_ += bytes_allocated;
-      // Dirty the card at the destionation as it may contain
-      // references (including the class pointer) to the bump pointer
-      // space.
-      WriteBarrier::ForEveryFieldWrite(forward_address);
-      // Handle the bitmaps marking.
-      accounting::ContinuousSpaceBitmap* live_bitmap = promo_dest_space_->GetLiveBitmap();
-      DCHECK(live_bitmap != nullptr);
-      accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap();
-      DCHECK(mark_bitmap != nullptr);
-      DCHECK(!live_bitmap->Test(forward_address));
-      if (collect_from_space_only_) {
-        // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap.
-        DCHECK_EQ(live_bitmap, mark_bitmap);
+  // Copy it to the to-space.
+  mirror::Object* forward_address = to_space_->AllocThreadUnsafe(self_,
+                                                                 object_size,
+                                                                 &bytes_allocated,
+                                                                 nullptr,
+                                                                 &dummy);
 
-        // If a bump pointer space only collection, delay the live
-        // bitmap marking of the promoted object until it's popped off
-        // the mark stack (ProcessMarkStack()). The rationale: we may
-        // be in the middle of scanning the objects in the promo
-        // destination space for
-        // non-moving-space-to-bump-pointer-space references by
-        // iterating over the marked bits of the live bitmap
-        // (MarkReachableObjects()). If we don't delay it (and instead
-        // mark the promoted object here), the above promo destination
-        // space scan could encounter the just-promoted object and
-        // forward the references in the promoted object's fields even
-        // through it is pushed onto the mark stack. If this happens,
-        // the promoted object would be in an inconsistent state, that
-        // is, it's on the mark stack (gray) but its fields are
-        // already forwarded (black), which would cause a
-        // DCHECK(!to_space_->HasAddress(obj)) failure below.
-      } else {
-        // Mark forward_address on the live bit map.
-        live_bitmap->Set(forward_address);
-        // Mark forward_address on the mark bit map.
-        DCHECK(!mark_bitmap->Test(forward_address));
-        mark_bitmap->Set(forward_address);
-      }
-    }
-  } else {
-    // If it's allocated after the last GC (younger), copy it to the to-space.
-    forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr,
-                                                   &dummy);
-    if (forward_address != nullptr && to_space_live_bitmap_ != nullptr) {
-      to_space_live_bitmap_->Set(forward_address);
-    }
+  if (forward_address != nullptr && to_space_live_bitmap_ != nullptr) {
+    to_space_live_bitmap_->Set(forward_address);
   }
   // If it's still null, attempt to use the fallback space.
   if (UNLIKELY(forward_address == nullptr)) {
@@ -596,9 +457,7 @@
     obj->AssertReadBarrierState();
     forward_address->AssertReadBarrierState();
   }
-  DCHECK(to_space_->HasAddress(forward_address) ||
-         fallback_space_->HasAddress(forward_address) ||
-         (generational_ && promo_dest_space_->HasAddress(forward_address)))
+  DCHECK(to_space_->HasAddress(forward_address) || fallback_space_->HasAddress(forward_address))
       << forward_address << "\n" << GetHeap()->DumpSpaces();
   return forward_address;
 }
@@ -664,13 +523,10 @@
       RecordFree(alloc_space->Sweep(swap_bitmaps));
     }
   }
-  if (!is_large_object_space_immune_) {
-    SweepLargeObjects(swap_bitmaps);
-  }
+  SweepLargeObjects(swap_bitmaps);
 }
 
 void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
-  DCHECK(!is_large_object_space_immune_);
   space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
   if (los != nullptr) {
     TimingLogger::ScopedTiming split("SweepLargeObjects", GetTimings());
@@ -735,26 +591,8 @@
 // Scan anything that's on the mark stack.
 void SemiSpace::ProcessMarkStack() {
   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
-  accounting::ContinuousSpaceBitmap* live_bitmap = nullptr;
-  const bool collect_from_space_only = collect_from_space_only_;
-  if (collect_from_space_only) {
-    // If a bump pointer space only collection (and the promotion is
-    // enabled,) we delay the live-bitmap marking of promoted objects
-    // from MarkObject() until this function.
-    live_bitmap = promo_dest_space_->GetLiveBitmap();
-    DCHECK(live_bitmap != nullptr);
-    accounting::ContinuousSpaceBitmap* mark_bitmap = promo_dest_space_->GetMarkBitmap();
-    DCHECK(mark_bitmap != nullptr);
-    DCHECK_EQ(live_bitmap, mark_bitmap);
-  }
   while (!mark_stack_->IsEmpty()) {
     Object* obj = mark_stack_->PopBack();
-    if (collect_from_space_only && promo_dest_space_->HasAddress(obj)) {
-      // obj has just been promoted. Mark the live bitmap for it,
-      // which is delayed from MarkObject().
-      DCHECK(!live_bitmap->Test(obj));
-      live_bitmap->Set(obj);
-    }
     ScanObject(obj);
   }
 }
@@ -764,9 +602,7 @@
   if (from_space_->HasAddress(obj)) {
     // Returns either the forwarding address or null.
     return GetForwardingAddressInFromSpace(obj);
-  } else if (collect_from_space_only_ ||
-             immune_spaces_.IsInImmuneRegion(obj) ||
-             to_space_->HasAddress(obj)) {
+  } else if (immune_spaces_.IsInImmuneRegion(obj) || to_space_->HasAddress(obj)) {
     return obj;  // Already forwarded, must be marked.
   }
   return mark_bitmap_->Test(obj) ? obj : nullptr;
@@ -817,35 +653,6 @@
   from_space_ = nullptr;
   CHECK(mark_stack_->IsEmpty());
   mark_stack_->Reset();
-  space::LargeObjectSpace* los = GetHeap()->GetLargeObjectsSpace();
-  if (generational_) {
-    // Decide whether to do a whole heap collection or a bump pointer
-    // only space collection at the next collection by updating
-    // collect_from_space_only_.
-    if (collect_from_space_only_) {
-      // Disable collect_from_space_only_ if the bytes promoted since the
-      // last whole heap collection or the large object bytes
-      // allocated exceeds a threshold.
-      bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_;
-      bool bytes_promoted_threshold_exceeded =
-          bytes_promoted_since_last_whole_heap_collection_ >= kBytesPromotedThreshold;
-      uint64_t current_los_bytes_allocated = los != nullptr ? los->GetBytesAllocated() : 0U;
-      uint64_t last_los_bytes_allocated =
-          large_object_bytes_allocated_at_last_whole_heap_collection_;
-      bool large_object_bytes_threshold_exceeded =
-          current_los_bytes_allocated >=
-          last_los_bytes_allocated + kLargeObjectBytesAllocatedThreshold;
-      if (bytes_promoted_threshold_exceeded || large_object_bytes_threshold_exceeded) {
-        collect_from_space_only_ = false;
-      }
-    } else {
-      // Reset the counters.
-      bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_;
-      large_object_bytes_allocated_at_last_whole_heap_collection_ =
-          los != nullptr ? los->GetBytesAllocated() : 0U;
-      collect_from_space_only_ = true;
-    }
-  }
   // Clear all of the spaces' mark bitmaps.
   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
   heap_->ClearMarkedObjects();
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index f23d416..9f2939f 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -59,7 +59,7 @@
   // If true, use remembered sets in the generational mode.
   static constexpr bool kUseRememberedSet = true;
 
-  explicit SemiSpace(Heap* heap, bool generational = false, const std::string& name_prefix = "");
+  explicit SemiSpace(Heap* heap, const std::string& name_prefix = "");
 
   ~SemiSpace() {}
 
@@ -76,7 +76,7 @@
     return kGcTypePartial;
   }
   CollectorType GetCollectorType() const override {
-    return generational_ ? kCollectorTypeGSS : kCollectorTypeSS;
+    return kCollectorTypeSS;
   }
 
   // Sets which space we will be copying objects to.
@@ -208,9 +208,6 @@
   // Every object inside the immune spaces is assumed to be marked.
   ImmuneSpaces immune_spaces_;
 
-  // If true, the large object space is immune.
-  bool is_large_object_space_immune_;
-
   // Destination and source spaces (can be any type of ContinuousMemMapAllocSpace which either has
   // a live bitmap or doesn't).
   space::ContinuousMemMapAllocSpace* to_space_;
@@ -222,35 +219,6 @@
 
   Thread* self_;
 
-  // When true, the generational mode (promotion and the bump pointer
-  // space only collection) is enabled. TODO: move these to a new file
-  // as a new garbage collector?
-  const bool generational_;
-
-  // Used for the generational mode. the end/top of the bump
-  // pointer space at the end of the last collection.
-  uint8_t* last_gc_to_space_end_;
-
-  // Used for the generational mode. During a collection, keeps track
-  // of how many bytes of objects have been copied so far from the
-  // bump pointer space to the non-moving space.
-  uint64_t bytes_promoted_;
-
-  // Used for the generational mode. Keeps track of how many bytes of
-  // objects have been copied so far from the bump pointer space to
-  // the non-moving space, since the last whole heap collection.
-  uint64_t bytes_promoted_since_last_whole_heap_collection_;
-
-  // Used for the generational mode. Keeps track of how many bytes of
-  // large objects were allocated at the last whole heap collection.
-  uint64_t large_object_bytes_allocated_at_last_whole_heap_collection_;
-
-  // Used for generational mode. When true, we only collect the from_space_.
-  bool collect_from_space_only_;
-
-  // The space which we are promoting into, only used for GSS.
-  space::ContinuousMemMapAllocSpace* promo_dest_space_;
-
   // The space which we copy to if the to_space_ is full.
   space::ContinuousMemMapAllocSpace* fallback_space_;
 
diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h
index 4759fca..62527e2 100644
--- a/runtime/gc/collector_type.h
+++ b/runtime/gc/collector_type.h
@@ -32,8 +32,6 @@
   kCollectorTypeCMS,
   // Semi-space / mark-sweep hybrid, enables compaction.
   kCollectorTypeSS,
-  // A generational variant of kCollectorTypeSS.
-  kCollectorTypeGSS,
   // Heap trimming collector, doesn't do any actual collecting.
   kCollectorTypeHeapTrim,
   // A (mostly) concurrent copying collector.
@@ -69,8 +67,6 @@
     kCollectorTypeCMS
 #elif ART_DEFAULT_GC_TYPE_IS_SS
     kCollectorTypeSS
-#elif ART_DEFAULT_GC_TYPE_IS_GSS
-    kCollectorTypeGSS
 #else
     kCollectorTypeCMS
 #error "ART default GC type must be set"
diff --git a/runtime/gc/gc_cause.cc b/runtime/gc/gc_cause.cc
index 8b4bac2..b197a99 100644
--- a/runtime/gc/gc_cause.cc
+++ b/runtime/gc/gc_cause.cc
@@ -46,6 +46,7 @@
     case kGcCauseHprof: return "Hprof";
     case kGcCauseGetObjectsAllocated: return "ObjectsAllocated";
     case kGcCauseProfileSaver: return "ProfileSaver";
+    case kGcCauseRunEmptyCheckpoint: return "RunEmptyCheckpoint";
   }
   LOG(FATAL) << "Unreachable";
   UNREACHABLE();
diff --git a/runtime/gc/gc_cause.h b/runtime/gc/gc_cause.h
index 81781ce..4dae585 100644
--- a/runtime/gc/gc_cause.h
+++ b/runtime/gc/gc_cause.h
@@ -62,6 +62,8 @@
   kGcCauseGetObjectsAllocated,
   // GC cause for the profile saver.
   kGcCauseProfileSaver,
+  // GC cause for running an empty checkpoint.
+  kGcCauseRunEmptyCheckpoint,
 };
 
 const char* PrettyCause(GcCause cause);
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 1c09b5c..4499342 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -47,6 +47,12 @@
                                                       size_t byte_count,
                                                       AllocatorType allocator,
                                                       const PreFenceVisitor& pre_fence_visitor) {
+  auto no_suspend_pre_fence_visitor =
+      [&pre_fence_visitor](auto... x) REQUIRES_SHARED(Locks::mutator_lock_) {
+        ScopedAssertNoThreadSuspension sants("No thread suspension during pre-fence visitor");
+        pre_fence_visitor(x...);
+      };
+
   if (kIsDebugBuild) {
     CheckPreconditionsForAllocObject(klass, byte_count);
     // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
@@ -59,110 +65,137 @@
     HandleWrapperObjPtr<mirror::Class> h = hs.NewHandleWrapper(&klass);
     self->PoisonObjectPointers();
   }
-  // Need to check that we aren't the large object allocator since the large object allocation code
-  // path includes this function. If we didn't check we would have an infinite loop.
-  ObjPtr<mirror::Object> obj;
-  if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
-    obj = AllocLargeObject<kInstrumented, PreFenceVisitor>(self, &klass, byte_count,
-                                                           pre_fence_visitor);
-    if (obj != nullptr) {
-      return obj.Ptr();
-    } else {
-      // There should be an OOM exception, since we are retrying, clear it.
-      self->ClearException();
+  auto pre_object_allocated = [&]() REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(!Roles::uninterruptible_) {
+    if constexpr (kInstrumented) {
+      AllocationListener* l = alloc_listener_.load(std::memory_order_seq_cst);
+      if (UNLIKELY(l != nullptr) && UNLIKELY(l->HasPreAlloc())) {
+        StackHandleScope<1> hs(self);
+        HandleWrapperObjPtr<mirror::Class> h_klass(hs.NewHandleWrapper(&klass));
+        l->PreObjectAllocated(self, h_klass, &byte_count);
+      }
     }
-    // If the large object allocation failed, try to use the normal spaces (main space,
-    // non moving space). This can happen if there is significant virtual address space
-    // fragmentation.
-  }
+  };
+  ObjPtr<mirror::Object> obj;
   // bytes allocated for the (individual) object.
   size_t bytes_allocated;
   size_t usable_size;
   size_t new_num_bytes_allocated = 0;
-  if (IsTLABAllocator(allocator)) {
-    byte_count = RoundUp(byte_count, space::BumpPointerSpace::kAlignment);
-  }
-  // If we have a thread local allocation we don't need to update bytes allocated.
-  if (IsTLABAllocator(allocator) && byte_count <= self->TlabSize()) {
-    obj = self->AllocTlab(byte_count);
-    DCHECK(obj != nullptr) << "AllocTlab can't fail";
-    obj->SetClass(klass);
-    if (kUseBakerReadBarrier) {
-      obj->AssertReadBarrierState();
-    }
-    bytes_allocated = byte_count;
-    usable_size = bytes_allocated;
-    pre_fence_visitor(obj, usable_size);
-    QuasiAtomic::ThreadFenceForConstructor();
-  } else if (
-      !kInstrumented && allocator == kAllocatorTypeRosAlloc &&
-      (obj = rosalloc_space_->AllocThreadLocal(self, byte_count, &bytes_allocated)) != nullptr &&
-      LIKELY(obj != nullptr)) {
-    DCHECK(!is_running_on_memory_tool_);
-    obj->SetClass(klass);
-    if (kUseBakerReadBarrier) {
-      obj->AssertReadBarrierState();
-    }
-    usable_size = bytes_allocated;
-    pre_fence_visitor(obj, usable_size);
-    QuasiAtomic::ThreadFenceForConstructor();
-  } else {
-    // Bytes allocated that includes bulk thread-local buffer allocations in addition to direct
-    // non-TLAB object allocations.
-    size_t bytes_tl_bulk_allocated = 0u;
-    obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
-                                              &usable_size, &bytes_tl_bulk_allocated);
-    if (UNLIKELY(obj == nullptr)) {
-      // AllocateInternalWithGc can cause thread suspension, if someone instruments the entrypoints
-      // or changes the allocator in a suspend point here, we need to retry the allocation.
-      obj = AllocateInternalWithGc(self,
-                                   allocator,
-                                   kInstrumented,
-                                   byte_count,
-                                   &bytes_allocated,
-                                   &usable_size,
-                                   &bytes_tl_bulk_allocated, &klass);
-      if (obj == nullptr) {
-        // The only way that we can get a null return if there is no pending exception is if the
-        // allocator or instrumentation changed.
-        if (!self->IsExceptionPending()) {
-          // AllocObject will pick up the new allocator type, and instrumented as true is the safe
-          // default.
-          return AllocObject</*kInstrumented=*/true>(self,
-                                                     klass,
-                                                     byte_count,
-                                                     pre_fence_visitor);
-        }
-        return nullptr;
+  {
+    // Do the initial pre-alloc
+    pre_object_allocated();
+    ScopedAssertNoThreadSuspension ants("Called PreObjectAllocated, no suspend until alloc");
+
+    // Need to check that we aren't the large object allocator since the large object allocation
+    // code path includes this function. If we didn't check we would have an infinite loop.
+    if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
+      // AllocLargeObject can suspend and will recall PreObjectAllocated if needed.
+      ScopedAllowThreadSuspension ats;
+      obj = AllocLargeObject<kInstrumented, PreFenceVisitor>(self, &klass, byte_count,
+                                                             pre_fence_visitor);
+      if (obj != nullptr) {
+        return obj.Ptr();
       }
+      // There should be an OOM exception, since we are retrying, clear it.
+      self->ClearException();
+
+      // If the large object allocation failed, try to use the normal spaces (main space,
+      // non moving space). This can happen if there is significant virtual address space
+      // fragmentation.
+      pre_object_allocated();
     }
-    DCHECK_GT(bytes_allocated, 0u);
-    DCHECK_GT(usable_size, 0u);
-    obj->SetClass(klass);
-    if (kUseBakerReadBarrier) {
-      obj->AssertReadBarrierState();
+    if (IsTLABAllocator(allocator)) {
+      byte_count = RoundUp(byte_count, space::BumpPointerSpace::kAlignment);
     }
-    if (collector::SemiSpace::kUseRememberedSet && UNLIKELY(allocator == kAllocatorTypeNonMoving)) {
-      // (Note this if statement will be constant folded away for the
-      // fast-path quick entry points.) Because SetClass() has no write
-      // barrier, if a non-moving space allocation, we need a write
-      // barrier as the class pointer may point to the bump pointer
-      // space (where the class pointer is an "old-to-young" reference,
-      // though rare) under the GSS collector with the remembered set
-      // enabled. We don't need this for kAllocatorTypeRosAlloc/DlMalloc
-      // cases because we don't directly allocate into the main alloc
-      // space (besides promotions) under the SS/GSS collector.
-      WriteBarrier::ForFieldWrite(obj, mirror::Object::ClassOffset(), klass);
-    }
-    pre_fence_visitor(obj, usable_size);
-    QuasiAtomic::ThreadFenceForConstructor();
-    if (bytes_tl_bulk_allocated > 0) {
-      size_t num_bytes_allocated_before =
-          num_bytes_allocated_.fetch_add(bytes_tl_bulk_allocated, std::memory_order_relaxed);
-      new_num_bytes_allocated = num_bytes_allocated_before + bytes_tl_bulk_allocated;
-      // Only trace when we get an increase in the number of bytes allocated. This happens when
-      // obtaining a new TLAB and isn't often enough to hurt performance according to golem.
-      TraceHeapSize(new_num_bytes_allocated);
+    // If we have a thread local allocation we don't need to update bytes allocated.
+    if (IsTLABAllocator(allocator) && byte_count <= self->TlabSize()) {
+      obj = self->AllocTlab(byte_count);
+      DCHECK(obj != nullptr) << "AllocTlab can't fail";
+      obj->SetClass(klass);
+      if (kUseBakerReadBarrier) {
+        obj->AssertReadBarrierState();
+      }
+      bytes_allocated = byte_count;
+      usable_size = bytes_allocated;
+      no_suspend_pre_fence_visitor(obj, usable_size);
+      QuasiAtomic::ThreadFenceForConstructor();
+    } else if (
+        !kInstrumented && allocator == kAllocatorTypeRosAlloc &&
+        (obj = rosalloc_space_->AllocThreadLocal(self, byte_count, &bytes_allocated)) != nullptr &&
+        LIKELY(obj != nullptr)) {
+      DCHECK(!is_running_on_memory_tool_);
+      obj->SetClass(klass);
+      if (kUseBakerReadBarrier) {
+        obj->AssertReadBarrierState();
+      }
+      usable_size = bytes_allocated;
+      no_suspend_pre_fence_visitor(obj, usable_size);
+      QuasiAtomic::ThreadFenceForConstructor();
+    } else {
+      // Bytes allocated that includes bulk thread-local buffer allocations in addition to direct
+      // non-TLAB object allocations.
+      size_t bytes_tl_bulk_allocated = 0u;
+      obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
+                                                &usable_size, &bytes_tl_bulk_allocated);
+      if (UNLIKELY(obj == nullptr)) {
+        // AllocateInternalWithGc can cause thread suspension, if someone instruments the
+        // entrypoints or changes the allocator in a suspend point here, we need to retry the
+        // allocation. It will send the pre-alloc event again.
+        obj = AllocateInternalWithGc(self,
+                                     allocator,
+                                     kInstrumented,
+                                     byte_count,
+                                     &bytes_allocated,
+                                     &usable_size,
+                                     &bytes_tl_bulk_allocated,
+                                     &klass);
+        if (obj == nullptr) {
+          // The only way that we can get a null return if there is no pending exception is if the
+          // allocator or instrumentation changed.
+          if (!self->IsExceptionPending()) {
+            // Since we are restarting, allow thread suspension.
+            ScopedAllowThreadSuspension ats;
+            // AllocObject will pick up the new allocator type, and instrumented as true is the safe
+            // default.
+            return AllocObject</*kInstrumented=*/true>(self,
+                                                       klass,
+                                                       byte_count,
+                                                       pre_fence_visitor);
+          }
+          return nullptr;
+        }
+      }
+      DCHECK_GT(bytes_allocated, 0u);
+      DCHECK_GT(usable_size, 0u);
+      obj->SetClass(klass);
+      if (kUseBakerReadBarrier) {
+        obj->AssertReadBarrierState();
+      }
+      if (collector::SemiSpace::kUseRememberedSet &&
+          UNLIKELY(allocator == kAllocatorTypeNonMoving)) {
+        // (Note this if statement will be constant folded away for the fast-path quick entry
+        // points.) Because SetClass() has no write barrier, the GC may need a write barrier in the
+        // case the object is non movable and points to a recently allocated movable class.
+        WriteBarrier::ForFieldWrite(obj, mirror::Object::ClassOffset(), klass);
+      }
+      no_suspend_pre_fence_visitor(obj, usable_size);
+      QuasiAtomic::ThreadFenceForConstructor();
+      if (bytes_tl_bulk_allocated > 0) {
+        size_t num_bytes_allocated_before =
+            num_bytes_allocated_.fetch_add(bytes_tl_bulk_allocated, std::memory_order_relaxed);
+        new_num_bytes_allocated = num_bytes_allocated_before + bytes_tl_bulk_allocated;
+        // Only trace when we get an increase in the number of bytes allocated. This happens when
+        // obtaining a new TLAB and isn't often enough to hurt performance according to golem.
+        if (region_space_) {
+          // With CC collector, during a GC cycle, the heap usage increases as
+          // there are two copies of evacuated objects. Therefore, add evac-bytes
+          // to the heap size. When the GC cycle is not running, evac-bytes
+          // are 0, as required.
+          TraceHeapSize(new_num_bytes_allocated + region_space_->EvacBytes());
+        } else {
+          TraceHeapSize(new_num_bytes_allocated);
+        }
+      }
     }
   }
   if (kIsDebugBuild && Runtime::Current()->IsStarted()) {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index ff53f78..be3b7f8 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -17,6 +17,7 @@
 #include "heap.h"
 
 #include <limits>
+#include "android-base/thread_annotations.h"
 #if defined(__BIONIC__) || defined(__GLIBC__)
 #include <malloc.h>  // For mallinfo()
 #endif
@@ -41,6 +42,7 @@
 #include "base/systrace.h"
 #include "base/time_utils.h"
 #include "base/utils.h"
+#include "class_root.h"
 #include "common_throws.h"
 #include "debugger.h"
 #include "dex/dex_file-inl.h"
@@ -80,10 +82,14 @@
 #include "jit/jit_code_cache.h"
 #include "jni/java_vm_ext.h"
 #include "mirror/class-inl.h"
+#include "mirror/executable-inl.h"
+#include "mirror/field.h"
+#include "mirror/method_handle_impl.h"
 #include "mirror/object-inl.h"
 #include "mirror/object-refvisitor-inl.h"
 #include "mirror/object_array-inl.h"
 #include "mirror/reference-inl.h"
+#include "mirror/var_handle.h"
 #include "nativehelper/scoped_local_ref.h"
 #include "obj_ptr-inl.h"
 #include "reflection.h"
@@ -97,9 +103,6 @@
 
 namespace gc {
 
-static constexpr size_t kCollectorTransitionStressIterations = 0;
-static constexpr size_t kCollectorTransitionStressWait = 10 * 1000;  // Microseconds
-
 DEFINE_RUNTIME_DEBUG_FLAG(Heap, kStressCollectorTransition);
 
 // Minimum amount of remaining bytes before a concurrent GC is triggered.
@@ -124,7 +127,6 @@
 static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
 static const char* kNonMovingSpaceName = "non moving space";
 static const char* kZygoteSpaceName = "zygote space";
-static constexpr size_t kGSSBumpPointerSpaceCapacity = 32 * MB;
 static constexpr bool kGCALotMode = false;
 // GC alot mode uses a small allocation stack to stress test a lot of GC.
 static constexpr size_t kGcAlotAllocationStackSize = 4 * KB /
@@ -145,10 +147,6 @@
 // If true, we log all GCs in the both the foreground and background. Used for debugging.
 static constexpr bool kLogAllGCs = false;
 
-// How much we grow the TLAB if we can do it.
-static constexpr size_t kPartialTlabSize = 16 * KB;
-static constexpr bool kUsePartialTlabs = true;
-
 // Use Max heap for 2 seconds, this is smaller than the usual 5s window since we don't want to leave
 // allocate with relaxed ergonomics for that long.
 static constexpr size_t kPostForkMaxHeapDurationMS = 2000;
@@ -171,12 +169,51 @@
   return Runtime::Current()->InJankPerceptibleProcessState();
 }
 
+static void VerifyBootImagesContiguity(const std::vector<gc::space::ImageSpace*>& image_spaces) {
+  uint32_t boot_image_size = 0u;
+  for (size_t i = 0u, num_spaces = image_spaces.size(); i != num_spaces; ) {
+    const ImageHeader& image_header = image_spaces[i]->GetImageHeader();
+    uint32_t reservation_size = image_header.GetImageReservationSize();
+    uint32_t image_count = image_header.GetImageSpaceCount();
+
+    CHECK_NE(image_count, 0u);
+    CHECK_LE(image_count, num_spaces - i);
+    CHECK_NE(reservation_size, 0u);
+    for (size_t j = 1u; j != image_count; ++j) {
+      CHECK_EQ(image_spaces[i + j]->GetImageHeader().GetComponentCount(), 0u);
+      CHECK_EQ(image_spaces[i + j]->GetImageHeader().GetImageReservationSize(), 0u);
+    }
+
+    // Check the start of the heap.
+    CHECK_EQ(image_spaces[0]->Begin() + boot_image_size, image_spaces[i]->Begin());
+    // Check contiguous layout of images and oat files.
+    const uint8_t* current_heap = image_spaces[i]->Begin();
+    const uint8_t* current_oat = image_spaces[i]->GetImageHeader().GetOatFileBegin();
+    for (size_t j = 0u; j != image_count; ++j) {
+      const ImageHeader& current_header = image_spaces[i + j]->GetImageHeader();
+      CHECK_EQ(current_heap, image_spaces[i + j]->Begin());
+      CHECK_EQ(current_oat, current_header.GetOatFileBegin());
+      current_heap += RoundUp(current_header.GetImageSize(), kPageSize);
+      CHECK_GT(current_header.GetOatFileEnd(), current_header.GetOatFileBegin());
+      current_oat = current_header.GetOatFileEnd();
+    }
+    // Check that oat files start at the end of images.
+    CHECK_EQ(current_heap, image_spaces[i]->GetImageHeader().GetOatFileBegin());
+    // Check that the reservation size equals the size of images and oat files.
+    CHECK_EQ(reservation_size, static_cast<size_t>(current_oat - image_spaces[i]->Begin()));
+
+    boot_image_size += reservation_size;
+    i += image_count;
+  }
+}
+
 Heap::Heap(size_t initial_size,
            size_t growth_limit,
            size_t min_free,
            size_t max_free,
            double target_utilization,
            double foreground_heap_growth_multiplier,
+           size_t stop_for_native_allocs,
            size_t capacity,
            size_t non_moving_space_capacity,
            const std::vector<std::string>& boot_class_path,
@@ -241,6 +278,10 @@
       capacity_(capacity),
       growth_limit_(growth_limit),
       target_footprint_(initial_size),
+      // Using kPostMonitorLock as a lock at kDefaultMutexLevel is acquired after
+      // this one.
+      process_state_update_lock_("process state update lock", kPostMonitorLock),
+      min_foreground_target_footprint_(0),
       concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
       total_bytes_freed_ever_(0),
       total_objects_freed_ever_(0),
@@ -276,6 +317,7 @@
       max_free_(max_free),
       target_utilization_(target_utilization),
       foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier),
+      stop_for_native_allocs_(stop_for_native_allocs),
       total_wait_time_(0),
       verify_object_mode_(kVerifyObjectModeDisabled),
       disable_moving_gc_count_(0),
@@ -310,13 +352,20 @@
       unique_backtrace_count_(0u),
       gc_disabled_for_shutdown_(false),
       dump_region_info_before_gc_(dump_region_info_before_gc),
-      dump_region_info_after_gc_(dump_region_info_after_gc) {
+      dump_region_info_after_gc_(dump_region_info_after_gc),
+      boot_image_spaces_(),
+      boot_images_start_address_(0u),
+      boot_images_size_(0u) {
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
     LOG(INFO) << "Heap() entering";
   }
   if (kUseReadBarrier) {
     CHECK_EQ(foreground_collector_type_, kCollectorTypeCC);
     CHECK_EQ(background_collector_type_, kCollectorTypeCCBackground);
+  } else if (background_collector_type_ != gc::kCollectorTypeHomogeneousSpaceCompact) {
+    CHECK_EQ(IsMovingGc(foreground_collector_type_), IsMovingGc(background_collector_type_))
+        << "Changing from " << foreground_collector_type_ << " to "
+        << background_collector_type_ << " (or visa versa) is not supported.";
   }
   verification_.reset(new Verification(this));
   CHECK_GE(large_object_threshold, kMinLargeObjectThreshold);
@@ -336,9 +385,8 @@
   live_bitmap_.reset(new accounting::HeapBitmap(this));
   mark_bitmap_.reset(new accounting::HeapBitmap(this));
 
-  // We don't have hspace compaction enabled with GSS or CC.
-  if (foreground_collector_type_ == kCollectorTypeGSS ||
-      foreground_collector_type_ == kCollectorTypeCC) {
+  // We don't have hspace compaction enabled with CC.
+  if (foreground_collector_type_ == kCollectorTypeCC) {
     use_homogeneous_space_compaction_for_oom_ = false;
   }
   bool support_homogeneous_space_compaction =
@@ -351,9 +399,6 @@
   bool separate_non_moving_space = is_zygote ||
       support_homogeneous_space_compaction || IsMovingGc(foreground_collector_type_) ||
       IsMovingGc(background_collector_type_);
-  if (foreground_collector_type_ == kCollectorTypeGSS) {
-    separate_non_moving_space = false;
-  }
 
   // Requested begin for the alloc space, to follow the mapped image and oat files
   uint8_t* request_begin = nullptr;
@@ -361,8 +406,7 @@
   size_t heap_reservation_size = 0u;
   if (separate_non_moving_space) {
     heap_reservation_size = non_moving_space_capacity;
-  } else if ((foreground_collector_type_ != kCollectorTypeCC) &&
-             (is_zygote || foreground_collector_type_ == kCollectorTypeGSS)) {
+  } else if (foreground_collector_type_ != kCollectorTypeCC && is_zygote) {
     heap_reservation_size = capacity_;
   }
   heap_reservation_size = RoundUp(heap_reservation_size, kPageSize);
@@ -390,6 +434,13 @@
       boot_image_spaces_.push_back(space.get());
       AddSpace(space.release());
     }
+    boot_images_start_address_ = PointerToLowMemUInt32(boot_image_spaces_.front()->Begin());
+    uint32_t boot_images_end =
+        PointerToLowMemUInt32(boot_image_spaces_.back()->GetImageHeader().GetOatFileEnd());
+    boot_images_size_ = boot_images_end - boot_images_start_address_;
+    if (kIsDebugBuild) {
+      VerifyBootImagesContiguity(boot_image_spaces_);
+    }
   } else {
     if (foreground_collector_type_ == kCollectorTypeCC) {
       // Need to use a low address so that we can allocate a contiguous 2 * Xmx space
@@ -447,14 +498,13 @@
   // Attempt to create 2 mem maps at or after the requested begin.
   if (foreground_collector_type_ != kCollectorTypeCC) {
     ScopedTrace trace2("Create main mem map");
-    if (separate_non_moving_space ||
-        !(is_zygote || foreground_collector_type_ == kCollectorTypeGSS)) {
+    if (separate_non_moving_space || !is_zygote) {
       main_mem_map_1 = MapAnonymousPreferredAddress(
           kMemMapSpaceName[0], request_begin, capacity_, &error_str);
     } else {
-      // If no separate non-moving space and we are the zygote or the collector type is GSS,
-      // the main space must come right after the image space to avoid a gap.
-      // This is required since we want the zygote space to be adjacent to the image space.
+      // If no separate non-moving space and we are the zygote, the main space must come right after
+      // the image space to avoid a gap. This is required since we want the zygote space to be
+      // adjacent to the image space.
       DCHECK_EQ(heap_reservation.IsValid(), !boot_image_spaces_.empty());
       main_mem_map_1 = MemMap::MapAnonymous(
           kMemMapSpaceName[0],
@@ -507,8 +557,7 @@
     region_space_ = space::RegionSpace::Create(
         kRegionSpaceName, std::move(region_space_mem_map), use_generational_cc_);
     AddSpace(region_space_);
-  } else if (IsMovingGc(foreground_collector_type_) &&
-      foreground_collector_type_ != kCollectorTypeGSS) {
+  } else if (IsMovingGc(foreground_collector_type_)) {
     // Create bump pointer spaces.
     // We only to create the bump pointer if the foreground collector is a compacting GC.
     // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
@@ -529,19 +578,7 @@
       non_moving_space_ = main_space_;
       CHECK(!non_moving_space_->CanMoveObjects());
     }
-    if (foreground_collector_type_ == kCollectorTypeGSS) {
-      CHECK_EQ(foreground_collector_type_, background_collector_type_);
-      // Create bump pointer spaces instead of a backup space.
-      main_mem_map_2.Reset();
-      bump_pointer_space_ = space::BumpPointerSpace::Create(
-          "Bump pointer space 1", kGSSBumpPointerSpaceCapacity);
-      CHECK(bump_pointer_space_ != nullptr);
-      AddSpace(bump_pointer_space_);
-      temp_space_ = space::BumpPointerSpace::Create(
-          "Bump pointer space 2", kGSSBumpPointerSpaceCapacity);
-      CHECK(temp_space_ != nullptr);
-      AddSpace(temp_space_);
-    } else if (main_mem_map_2.IsValid()) {
+    if (main_mem_map_2.IsValid()) {
       const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
       main_space_backup_.reset(CreateMallocSpaceFromMemMap(std::move(main_mem_map_2),
                                                            initial_size,
@@ -651,13 +688,10 @@
     }
   }
   if (kMovingCollector) {
-    if (MayUseCollector(kCollectorTypeSS) || MayUseCollector(kCollectorTypeGSS) ||
+    if (MayUseCollector(kCollectorTypeSS) ||
         MayUseCollector(kCollectorTypeHomogeneousSpaceCompact) ||
         use_homogeneous_space_compaction_for_oom_) {
-      // TODO: Clean this up.
-      const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
-      semi_space_collector_ = new collector::SemiSpace(this, generational,
-                                                       generational ? "generational" : "");
+      semi_space_collector_ = new collector::SemiSpace(this);
       garbage_collectors_.push_back(semi_space_collector_);
     }
     if (MayUseCollector(kCollectorTypeCC)) {
@@ -690,10 +724,10 @@
     }
   }
   if (!GetBootImageSpaces().empty() && non_moving_space_ != nullptr &&
-      (is_zygote || separate_non_moving_space || foreground_collector_type_ == kCollectorTypeGSS)) {
+      (is_zygote || separate_non_moving_space)) {
     // Check that there's no gap between the image space and the non moving space so that the
     // immune region won't break (eg. due to a large object allocated in the gap). This is only
-    // required when we're the zygote or using GSS.
+    // required when we're the zygote.
     // Space with smallest Begin().
     space::ImageSpace* first_space = nullptr;
     for (space::ImageSpace* space : boot_image_spaces_) {
@@ -796,8 +830,7 @@
   if (kCompactZygote && Runtime::Current()->IsZygote() && !can_move_objects) {
     // After the zygote we want this to be false if we don't have background compaction enabled so
     // that getting primitive array elements is faster.
-    // We never have homogeneous compaction with GSS and don't need a space with movable objects.
-    can_move_objects = !HasZygoteSpace() && foreground_collector_type_ != kCollectorTypeGSS;
+    can_move_objects = !HasZygoteSpace();
   }
   if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
     RemoveRememberedSet(main_space_);
@@ -824,34 +857,6 @@
   }
 }
 
-void Heap::DisableMovingGc() {
-  CHECK(!kUseReadBarrier);
-  if (IsMovingGc(foreground_collector_type_)) {
-    foreground_collector_type_ = kCollectorTypeCMS;
-  }
-  if (IsMovingGc(background_collector_type_)) {
-    background_collector_type_ = foreground_collector_type_;
-  }
-  TransitionCollector(foreground_collector_type_);
-  Thread* const self = Thread::Current();
-  ScopedThreadStateChange tsc(self, kSuspended);
-  ScopedSuspendAll ssa(__FUNCTION__);
-  // Something may have caused the transition to fail.
-  if (!IsMovingGc(collector_type_) && non_moving_space_ != main_space_) {
-    CHECK(main_space_ != nullptr);
-    // The allocation stack may have non movable objects in it. We need to flush it since the GC
-    // can't only handle marking allocation stack objects of one non moving space and one main
-    // space.
-    {
-      WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-      FlushAllocStack();
-    }
-    main_space_->DisableMovingObjects();
-    non_moving_space_ = main_space_;
-    CHECK(!non_moving_space_->CanMoveObjects());
-  }
-}
-
 bool Heap::IsCompilingBoot() const {
   if (!Runtime::Current()->IsAotCompiler()) {
     return false;
@@ -971,20 +976,24 @@
   thread_flip_cond_->Broadcast(self);
 }
 
+void Heap::GrowHeapOnJankPerceptibleSwitch() {
+  MutexLock mu(Thread::Current(), process_state_update_lock_);
+  size_t orig_target_footprint = target_footprint_.load(std::memory_order_relaxed);
+  if (orig_target_footprint < min_foreground_target_footprint_) {
+    target_footprint_.compare_exchange_strong(orig_target_footprint,
+                                              min_foreground_target_footprint_,
+                                              std::memory_order_relaxed);
+  }
+  min_foreground_target_footprint_ = 0;
+}
+
 void Heap::UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state) {
   if (old_process_state != new_process_state) {
     const bool jank_perceptible = new_process_state == kProcessStateJankPerceptible;
-    for (size_t i = 1; i <= kCollectorTransitionStressIterations; ++i) {
-      // Start at index 1 to avoid "is always false" warning.
-      // Have iteration 1 always transition the collector.
-      TransitionCollector((((i & 1) == 1) == jank_perceptible)
-          ? foreground_collector_type_
-          : background_collector_type_);
-      usleep(kCollectorTransitionStressWait);
-    }
     if (jank_perceptible) {
       // Transition back to foreground right away to prevent jank.
       RequestCollectorTransition(foreground_collector_type_, 0);
+      GrowHeapOnJankPerceptibleSwitch();
     } else {
       // Don't delay for debug builds since we may want to stress test the GC.
       // If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have
@@ -1139,10 +1148,13 @@
     collector->DumpPerformanceInfo(os);
   }
   if (total_duration != 0) {
-    const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
+    const double total_seconds = total_duration / 1.0e9;
+    const double total_cpu_seconds = GetTotalGcCpuTime() / 1.0e9;
     os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
     os << "Mean GC size throughput: "
-       << PrettySize(GetBytesFreedEver() / total_seconds) << "/s\n";
+       << PrettySize(GetBytesFreedEver() / total_seconds) << "/s"
+       << " per cpu-time: "
+       << PrettySize(GetBytesFreedEver() / total_cpu_seconds) << "/s\n";
     os << "Mean GC object throughput: "
        << (GetObjectsFreedEver() / total_seconds) << " objects/s\n";
   }
@@ -1206,8 +1218,8 @@
   post_gc_last_process_cpu_time_ns_ = process_cpu_start_time_ns_;
   post_gc_weighted_allocated_bytes_ = 0u;
 
-  total_bytes_freed_ever_ = 0;
-  total_objects_freed_ever_ = 0;
+  total_bytes_freed_ever_.store(0);
+  total_objects_freed_ever_.store(0);
   total_wait_time_ = 0;
   blocking_gc_count_ = 0;
   blocking_gc_time_ = 0;
@@ -1409,7 +1421,7 @@
       VLOG(gc) << "CC background compaction ignored due to jank perceptible process state";
     }
   } else {
-    TransitionCollector(desired_collector_type);
+    CHECK_EQ(desired_collector_type, collector_type_) << "Unsupported collector transition";
   }
 }
 
@@ -1729,11 +1741,31 @@
   // Make sure there is no pending exception since we may need to throw an OOME.
   self->AssertNoPendingException();
   DCHECK(klass != nullptr);
+
   StackHandleScope<1> hs(self);
-  HandleWrapperObjPtr<mirror::Class> h(hs.NewHandleWrapper(klass));
+  HandleWrapperObjPtr<mirror::Class> h_klass(hs.NewHandleWrapper(klass));
+
+  auto send_object_pre_alloc =
+      [&]() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_) {
+        if (UNLIKELY(instrumented)) {
+          AllocationListener* l = alloc_listener_.load(std::memory_order_seq_cst);
+          if (UNLIKELY(l != nullptr) && UNLIKELY(l->HasPreAlloc())) {
+            l->PreObjectAllocated(self, h_klass, &alloc_size);
+          }
+        }
+      };
+#define PERFORM_SUSPENDING_OPERATION(op)                                          \
+  [&]() REQUIRES(Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_) { \
+    ScopedAllowThreadSuspension ats;                                              \
+    auto res = (op);                                                              \
+    send_object_pre_alloc();                                                      \
+    return res;                                                                   \
+  }()
+
   // The allocation failed. If the GC is running, block until it completes, and then retry the
   // allocation.
-  collector::GcType last_gc = WaitForGcToComplete(kGcCauseForAlloc, self);
+  collector::GcType last_gc =
+      PERFORM_SUSPENDING_OPERATION(WaitForGcToComplete(kGcCauseForAlloc, self));
   // If we were the default allocator but the allocator changed while we were suspended,
   // abort the allocation.
   if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
@@ -1750,8 +1782,9 @@
   }
 
   collector::GcType tried_type = next_gc_type_;
-  const bool gc_ran =
-      CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
+  const bool gc_ran = PERFORM_SUSPENDING_OPERATION(
+      CollectGarbageInternal(tried_type, kGcCauseForAlloc, false) != collector::kGcTypeNone);
+
   if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
       (!instrumented && EntrypointsInstrumented())) {
     return nullptr;
@@ -1770,8 +1803,8 @@
       continue;
     }
     // Attempt to run the collector, if we succeed, re-try the allocation.
-    const bool plan_gc_ran =
-        CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone;
+    const bool plan_gc_ran = PERFORM_SUSPENDING_OPERATION(
+        CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone);
     if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
         (!instrumented && EntrypointsInstrumented())) {
       return nullptr;
@@ -1801,7 +1834,7 @@
   // TODO: Run finalization, but this may cause more allocations to occur.
   // We don't need a WaitForGcToComplete here either.
   DCHECK(!gc_plan_.empty());
-  CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true);
+  PERFORM_SUSPENDING_OPERATION(CollectGarbageInternal(gc_plan_.back(), kGcCauseForAlloc, true));
   if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
       (!instrumented && EntrypointsInstrumented())) {
     return nullptr;
@@ -1818,7 +1851,8 @@
             current_time - last_time_homogeneous_space_compaction_by_oom_ >
             min_interval_homogeneous_space_compaction_by_oom_) {
           last_time_homogeneous_space_compaction_by_oom_ = current_time;
-          HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
+          HomogeneousSpaceCompactResult result =
+              PERFORM_SUSPENDING_OPERATION(PerformHomogeneousSpaceCompact());
           // Thread suspension could have occurred.
           if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
               (!instrumented && EntrypointsInstrumented())) {
@@ -1858,42 +1892,15 @@
         }
         break;
       }
-      case kAllocatorTypeNonMoving: {
-        if (kUseReadBarrier) {
-          // DisableMovingGc() isn't compatible with CC.
-          break;
-        }
-        // Try to transition the heap if the allocation failure was due to the space being full.
-        if (!IsOutOfMemoryOnAllocation(allocator, alloc_size, /*grow=*/ false)) {
-          // If we aren't out of memory then the OOM was probably from the non moving space being
-          // full. Attempt to disable compaction and turn the main space into a non moving space.
-          DisableMovingGc();
-          // Thread suspension could have occurred.
-          if ((was_default_allocator && allocator != GetCurrentAllocator()) ||
-              (!instrumented && EntrypointsInstrumented())) {
-            return nullptr;
-          }
-          // If we are still a moving GC then something must have caused the transition to fail.
-          if (IsMovingGc(collector_type_)) {
-            MutexLock mu(self, *gc_complete_lock_);
-            // If we couldn't disable moving GC, just throw OOME and return null.
-            LOG(WARNING) << "Couldn't disable moving GC with disable GC count "
-                         << disable_moving_gc_count_;
-          } else {
-            LOG(WARNING) << "Disabled moving GC due to the non moving space being full";
-            ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
-                                            usable_size, bytes_tl_bulk_allocated);
-          }
-        }
-        break;
-      }
       default: {
         // Do nothing for others allocators.
       }
     }
   }
+#undef PERFORM_SUSPENDING_OPERATION
   // If the allocation hasn't succeeded by this point, throw an OOM error.
   if (ptr == nullptr) {
+    ScopedAllowThreadSuspension ats;
     ThrowOutOfMemoryError(self, alloc_size, allocator);
   }
   return ptr;
@@ -1933,7 +1940,21 @@
 }
 
 uint64_t Heap::GetBytesAllocatedEver() const {
-  return GetBytesFreedEver() + GetBytesAllocated();
+  // Force the returned value to be monotonically increasing, in the sense that if this is called
+  // at A and B, such that A happens-before B, then the call at B returns a value no smaller than
+  // that at A. This is not otherwise guaranteed, since num_bytes_allocated_ is decremented first,
+  // and total_bytes_freed_ever_ is incremented later.
+  static std::atomic<uint64_t> max_bytes_so_far(0);
+  uint64_t so_far = max_bytes_so_far.load(std::memory_order_relaxed);
+  uint64_t current_bytes = GetBytesFreedEver(std::memory_order_acquire);
+  current_bytes += GetBytesAllocated();
+  do {
+    if (current_bytes <= so_far) {
+      return so_far;
+    }
+  } while (!max_bytes_so_far.compare_exchange_weak(so_far /* updated */,
+                                                   current_bytes, std::memory_order_relaxed));
+  return current_bytes;
 }
 
 // Check whether the given object is an instance of the given class.
@@ -2040,10 +2061,7 @@
   count_requested_homogeneous_space_compaction_++;
   // Store performed homogeneous space compaction at a new request arrival.
   ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
-  // TODO: Clang prebuilt for r316199 produces bogus thread safety analysis warning for holding both
-  // exclusive and shared lock in the same scope. Remove the assertion as a temporary workaround.
-  // http://b/71769596
-  // Locks::mutator_lock_->AssertNotHeld(self);
+  Locks::mutator_lock_->AssertNotHeld(self);
   {
     ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
     MutexLock mu(self, *gc_complete_lock_);
@@ -2112,166 +2130,6 @@
   return HomogeneousSpaceCompactResult::kSuccess;
 }
 
-void Heap::TransitionCollector(CollectorType collector_type) {
-  if (collector_type == collector_type_) {
-    return;
-  }
-  // Collector transition must not happen with CC
-  CHECK(!kUseReadBarrier);
-  VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
-             << " -> " << static_cast<int>(collector_type);
-  uint64_t start_time = NanoTime();
-  uint32_t before_allocated = num_bytes_allocated_.load(std::memory_order_relaxed);
-  Runtime* const runtime = Runtime::Current();
-  Thread* const self = Thread::Current();
-  ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
-  // TODO: Clang prebuilt for r316199 produces bogus thread safety analysis warning for holding both
-  // exclusive and shared lock in the same scope. Remove the assertion as a temporary workaround.
-  // http://b/71769596
-  // Locks::mutator_lock_->AssertNotHeld(self);
-  // Busy wait until we can GC (StartGC can fail if we have a non-zero
-  // compacting_gc_disable_count_, this should rarely occurs).
-  for (;;) {
-    {
-      ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
-      MutexLock mu(self, *gc_complete_lock_);
-      // Ensure there is only one GC at a time.
-      WaitForGcToCompleteLocked(kGcCauseCollectorTransition, self);
-      // Currently we only need a heap transition if we switch from a moving collector to a
-      // non-moving one, or visa versa.
-      const bool copying_transition = IsMovingGc(collector_type_) != IsMovingGc(collector_type);
-      // If someone else beat us to it and changed the collector before we could, exit.
-      // This is safe to do before the suspend all since we set the collector_type_running_ before
-      // we exit the loop. If another thread attempts to do the heap transition before we exit,
-      // then it would get blocked on WaitForGcToCompleteLocked.
-      if (collector_type == collector_type_) {
-        return;
-      }
-      // GC can be disabled if someone has a used GetPrimitiveArrayCritical but not yet released.
-      if (!copying_transition || disable_moving_gc_count_ == 0) {
-        // TODO: Not hard code in semi-space collector?
-        collector_type_running_ = copying_transition ? kCollectorTypeSS : collector_type;
-        break;
-      }
-    }
-    usleep(1000);
-  }
-  if (runtime->IsShuttingDown(self)) {
-    // Don't allow heap transitions to happen if the runtime is shutting down since these can
-    // cause objects to get finalized.
-    FinishGC(self, collector::kGcTypeNone);
-    return;
-  }
-  collector::GarbageCollector* collector = nullptr;
-  {
-    ScopedSuspendAll ssa(__FUNCTION__);
-    switch (collector_type) {
-      case kCollectorTypeSS: {
-        if (!IsMovingGc(collector_type_)) {
-          // Create the bump pointer space from the backup space.
-          CHECK(main_space_backup_ != nullptr);
-          MemMap mem_map = main_space_backup_->ReleaseMemMap();
-          // We are transitioning from non moving GC -> moving GC, since we copied from the bump
-          // pointer space last transition it will be protected.
-          CHECK(mem_map.IsValid());
-          mem_map.Protect(PROT_READ | PROT_WRITE);
-          bump_pointer_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space",
-                                                                          std::move(mem_map));
-          AddSpace(bump_pointer_space_);
-          collector = Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
-          // Use the now empty main space mem map for the bump pointer temp space.
-          mem_map = main_space_->ReleaseMemMap();
-          // Unset the pointers just in case.
-          if (dlmalloc_space_ == main_space_) {
-            dlmalloc_space_ = nullptr;
-          } else if (rosalloc_space_ == main_space_) {
-            rosalloc_space_ = nullptr;
-          }
-          // Remove the main space so that we don't try to trim it, this doens't work for debug
-          // builds since RosAlloc attempts to read the magic number from a protected page.
-          RemoveSpace(main_space_);
-          RemoveRememberedSet(main_space_);
-          delete main_space_;  // Delete the space since it has been removed.
-          main_space_ = nullptr;
-          RemoveRememberedSet(main_space_backup_.get());
-          main_space_backup_.reset(nullptr);  // Deletes the space.
-          temp_space_ = space::BumpPointerSpace::CreateFromMemMap("Bump pointer space 2",
-                                                                  std::move(mem_map));
-          AddSpace(temp_space_);
-        }
-        break;
-      }
-      case kCollectorTypeMS:
-        // Fall through.
-      case kCollectorTypeCMS: {
-        if (IsMovingGc(collector_type_)) {
-          CHECK(temp_space_ != nullptr);
-          MemMap mem_map = temp_space_->ReleaseMemMap();
-          RemoveSpace(temp_space_);
-          temp_space_ = nullptr;
-          mem_map.Protect(PROT_READ | PROT_WRITE);
-          CreateMainMallocSpace(std::move(mem_map),
-                                kDefaultInitialSize,
-                                std::min(mem_map.Size(), growth_limit_),
-                                mem_map.Size());
-          // Compact to the main space from the bump pointer space, don't need to swap semispaces.
-          AddSpace(main_space_);
-          collector = Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
-          mem_map = bump_pointer_space_->ReleaseMemMap();
-          RemoveSpace(bump_pointer_space_);
-          bump_pointer_space_ = nullptr;
-          const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
-          // Temporarily unprotect the backup mem map so rosalloc can write the debug magic number.
-          if (kIsDebugBuild && kUseRosAlloc) {
-            mem_map.Protect(PROT_READ | PROT_WRITE);
-          }
-          main_space_backup_.reset(CreateMallocSpaceFromMemMap(
-              std::move(mem_map),
-              kDefaultInitialSize,
-              std::min(mem_map.Size(), growth_limit_),
-              mem_map.Size(),
-              name,
-              true));
-          if (kIsDebugBuild && kUseRosAlloc) {
-            main_space_backup_->GetMemMap()->Protect(PROT_NONE);
-          }
-        }
-        break;
-      }
-      default: {
-        LOG(FATAL) << "Attempted to transition to invalid collector type "
-                   << static_cast<size_t>(collector_type);
-        UNREACHABLE();
-      }
-    }
-    ChangeCollector(collector_type);
-  }
-  // Can't call into java code with all threads suspended or the GC ongoing.
-  SelfDeletingTask* clear = reference_processor_->CollectClearedReferences(self);
-  uint64_t duration = NanoTime() - start_time;
-  GrowForUtilization(semi_space_collector_);
-  DCHECK(collector != nullptr);
-  LogGC(kGcCauseCollectorTransition, collector);
-  FinishGC(self, collector::kGcTypeFull);
-  // Now call into java and enqueue the references.
-  clear->Run(self);
-  clear->Finalize();
-  {
-    ScopedObjectAccess soa(self);
-    soa.Vm()->UnloadNativeLibraries();
-  }
-  int32_t after_allocated = num_bytes_allocated_.load(std::memory_order_relaxed);
-  int32_t delta_allocated = before_allocated - after_allocated;
-  std::string saved_str;
-  if (delta_allocated >= 0) {
-    saved_str = " saved at least " + PrettySize(delta_allocated);
-  } else {
-    saved_str = " expanded " + PrettySize(-delta_allocated);
-  }
-  VLOG(heap) << "Collector transition to " << collector_type << " took "
-             << PrettyDuration(duration) << saved_str;
-}
-
 void Heap::ChangeCollector(CollectorType collector_type) {
   // TODO: Only do this with all mutators suspended to avoid races.
   if (collector_type != collector_type_) {
@@ -2290,8 +2148,7 @@
         }
         break;
       }
-      case kCollectorTypeSS:  // Fall-through.
-      case kCollectorTypeGSS: {
+      case kCollectorTypeSS: {
         gc_plan_.push_back(collector::kGcTypeFull);
         if (use_tlab_) {
           ChangeAllocator(kAllocatorTypeTLAB);
@@ -2333,7 +2190,7 @@
 class ZygoteCompactingCollector final : public collector::SemiSpace {
  public:
   ZygoteCompactingCollector(gc::Heap* heap, bool is_running_on_memory_tool)
-      : SemiSpace(heap, false, "zygote collector"),
+      : SemiSpace(heap, "zygote collector"),
         bin_live_bitmap_(nullptr),
         bin_mark_bitmap_(nullptr),
         is_running_on_memory_tool_(is_running_on_memory_tool) {}
@@ -2426,13 +2283,32 @@
   for (const auto& space : GetContinuousSpaces()) {
     if (space->IsContinuousMemMapAllocSpace()) {
       space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
-      if (alloc_space->HasBoundBitmaps()) {
+      if (alloc_space->GetLiveBitmap() != nullptr && alloc_space->HasBoundBitmaps()) {
         alloc_space->UnBindBitmaps();
       }
     }
   }
 }
 
+void Heap::IncrementFreedEver() {
+  // Counters are updated only by us, but may be read concurrently.
+  // The updates should become visible after the corresponding live object info.
+  total_objects_freed_ever_.store(total_objects_freed_ever_.load(std::memory_order_relaxed)
+                                  + GetCurrentGcIteration()->GetFreedObjects()
+                                  + GetCurrentGcIteration()->GetFreedLargeObjects(),
+                                  std::memory_order_release);
+  total_bytes_freed_ever_.store(total_bytes_freed_ever_.load(std::memory_order_relaxed)
+                                + GetCurrentGcIteration()->GetFreedBytes()
+                                + GetCurrentGcIteration()->GetFreedLargeObjectBytes(),
+                                std::memory_order_release);
+}
+
+#pragma clang diagnostic push
+#if !ART_USE_FUTEXES
+// Frame gets too large, perhaps due to Bionic pthread_mutex_lock size. We don't care.
+#  pragma clang diagnostic ignored "-Wframe-larger-than="
+#endif
+// This has a large frame, but shouldn't be run anywhere near the stack limit.
 void Heap::PreZygoteFork() {
   if (!HasZygoteSpace()) {
     // We still want to GC in case there is some unreachable non moving objects that could cause a
@@ -2507,8 +2383,7 @@
     if (temp_space_ != nullptr) {
       CHECK(temp_space_->IsEmpty());
     }
-    total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
-    total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
+    IncrementFreedEver();
     // Update the end and write out image.
     non_moving_space_->SetEnd(target_space.End());
     non_moving_space_->SetLimit(target_space.Limit());
@@ -2546,15 +2421,12 @@
   AddSpace(zygote_space_);
   non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
   AddSpace(non_moving_space_);
-  if (kUseBakerReadBarrier && gc::collector::ConcurrentCopying::kGrayDirtyImmuneObjects) {
+  constexpr bool set_mark_bit = kUseBakerReadBarrier
+                                && gc::collector::ConcurrentCopying::kGrayDirtyImmuneObjects;
+  if (set_mark_bit) {
     // Treat all of the objects in the zygote as marked to avoid unnecessary dirty pages. This is
     // safe since we mark all of the objects that may reference non immune objects as gray.
-    zygote_space_->GetLiveBitmap()->VisitMarkedRange(
-        reinterpret_cast<uintptr_t>(zygote_space_->Begin()),
-        reinterpret_cast<uintptr_t>(zygote_space_->Limit()),
-        [](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
-      CHECK(obj->AtomicSetMarkBit(0, 1));
-    });
+    zygote_space_->SetMarkBitInLiveObjects();
   }
 
   // Create the zygote space mod union table.
@@ -2586,7 +2458,7 @@
     }
   }
   AddModUnionTable(mod_union_table);
-  large_object_space_->SetAllLargeObjectsAsZygoteObjects(self);
+  large_object_space_->SetAllLargeObjectsAsZygoteObjects(self, set_mark_bit);
   if (collector::SemiSpace::kUseRememberedSet) {
     // Add a new remembered set for the post-zygote non-moving space.
     accounting::RememberedSet* post_zygote_non_moving_space_rem_set =
@@ -2597,6 +2469,7 @@
     AddRememberedSet(post_zygote_non_moving_space_rem_set);
   }
 }
+#pragma clang diagnostic pop
 
 void Heap::FlushAllocStack() {
   MarkAllocStackAsLive(allocation_stack_.get());
@@ -2651,10 +2524,16 @@
   ATraceIntegerValue("Heap size (KB)", heap_size / KB);
 }
 
+#if defined(__GLIBC__)
+# define IF_GLIBC(x) x
+#else
+# define IF_GLIBC(x)
+#endif
+
 size_t Heap::GetNativeBytes() {
   size_t malloc_bytes;
 #if defined(__BIONIC__) || defined(__GLIBC__)
-  size_t mmapped_bytes;
+  IF_GLIBC(size_t mmapped_bytes;)
   struct mallinfo mi = mallinfo();
   // In spite of the documentation, the jemalloc version of this call seems to do what we want,
   // and it is thread-safe.
@@ -2662,17 +2541,24 @@
     // Shouldn't happen, but glibc declares uordblks as int.
     // Avoiding sign extension gets us correct behavior for another 2 GB.
     malloc_bytes = (unsigned int)mi.uordblks;
-    mmapped_bytes = (unsigned int)mi.hblkhd;
+    IF_GLIBC(mmapped_bytes = (unsigned int)mi.hblkhd;)
   } else {
     malloc_bytes = mi.uordblks;
-    mmapped_bytes = mi.hblkhd;
+    IF_GLIBC(mmapped_bytes = mi.hblkhd;)
   }
-  // From the spec, we clearly have mmapped_bytes <= malloc_bytes. Reality is sometimes
-  // dramatically different. (b/119580449) If so, fudge it.
+  // From the spec, it appeared mmapped_bytes <= malloc_bytes. Reality was sometimes
+  // dramatically different. (b/119580449 was an early bug.) If so, we try to fudge it.
+  // However, malloc implementations seem to interpret hblkhd differently, namely as
+  // mapped blocks backing the entire heap (e.g. jemalloc) vs. large objects directly
+  // allocated via mmap (e.g. glibc). Thus we now only do this for glibc, where it
+  // previously helped, and which appears to use a reading of the spec compatible
+  // with our adjustment.
+#if defined(__GLIBC__)
   if (mmapped_bytes > malloc_bytes) {
     malloc_bytes = mmapped_bytes;
   }
-#else
+#endif  // GLIBC
+#else  // Neither Bionic nor Glibc
   // We should hit this case only in contexts in which GC triggering is not critical. Effectively
   // disable GC triggering based on malloc().
   malloc_bytes = 1000;
@@ -2703,10 +2589,7 @@
     }
   }
   ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
-  // TODO: Clang prebuilt for r316199 produces bogus thread safety analysis warning for holding both
-  // exclusive and shared lock in the same scope. Remove the assertion as a temporary workaround.
-  // http://b/71769596
-  // Locks::mutator_lock_->AssertNotHeld(self);
+  Locks::mutator_lock_->AssertNotHeld(self);
   if (self->IsHandlingStackOverflow()) {
     // If we are throwing a stack overflow error we probably don't have enough remaining stack
     // space to run the GC.
@@ -2748,8 +2631,6 @@
            current_allocator_ == kAllocatorTypeRegionTLAB);
     switch (collector_type_) {
       case kCollectorTypeSS:
-        // Fall-through.
-      case kCollectorTypeGSS:
         semi_space_collector_->SetFromSpace(bump_pointer_space_);
         semi_space_collector_->SetToSpace(temp_space_);
         semi_space_collector_->SetSwapSemiSpaces(true);
@@ -2788,8 +2669,7 @@
       << "Could not find garbage collector with collector_type="
       << static_cast<size_t>(collector_type_) << " and gc_type=" << gc_type;
   collector->Run(gc_cause, clear_soft_references || runtime->IsZygote());
-  total_objects_freed_ever_ += GetCurrentGcIteration()->GetFreedObjects();
-  total_bytes_freed_ever_ += GetCurrentGcIteration()->GetFreedBytes();
+  IncrementFreedEver();
   RequestTrim(self);
   // Collect cleared references.
   SelfDeletingTask* clear = reference_processor_->CollectClearedReferences(self);
@@ -3379,8 +3259,7 @@
       TimingLogger::ScopedTiming t2(name, timings);
       table->ProcessCards();
     } else if (use_rem_sets && rem_set != nullptr) {
-      DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS)
-          << static_cast<int>(collector_type_);
+      DCHECK(collector::SemiSpace::kUseRememberedSet) << static_cast<int>(collector_type_);
       TimingLogger::ScopedTiming t2("AllocSpaceRemSetClearCards", timings);
       rem_set->ClearCards();
     } else if (process_alloc_space_cards) {
@@ -3639,23 +3518,19 @@
   const size_t bytes_allocated = GetBytesAllocated();
   // Trace the new heap size after the GC is finished.
   TraceHeapSize(bytes_allocated);
-  uint64_t target_size;
+  uint64_t target_size, grow_bytes;
   collector::GcType gc_type = collector_ran->GetGcType();
+  MutexLock mu(Thread::Current(), process_state_update_lock_);
   // Use the multiplier to grow more for foreground.
-  const double multiplier = HeapGrowthMultiplier();  // Use the multiplier to grow more for
-  // foreground.
-  const size_t adjusted_min_free = static_cast<size_t>(min_free_ * multiplier);
-  const size_t adjusted_max_free = static_cast<size_t>(max_free_ * multiplier);
+  const double multiplier = HeapGrowthMultiplier();
   if (gc_type != collector::kGcTypeSticky) {
     // Grow the heap for non sticky GC.
     uint64_t delta = bytes_allocated * (1.0 / GetTargetHeapUtilization() - 1.0);
     DCHECK_LE(delta, std::numeric_limits<size_t>::max()) << "bytes_allocated=" << bytes_allocated
         << " target_utilization_=" << target_utilization_;
-    target_size = bytes_allocated + delta * multiplier;
-    target_size = std::min(target_size,
-                           static_cast<uint64_t>(bytes_allocated + adjusted_max_free));
-    target_size = std::max(target_size,
-                           static_cast<uint64_t>(bytes_allocated + adjusted_min_free));
+    grow_bytes = std::min(delta, static_cast<uint64_t>(max_free_));
+    grow_bytes = std::max(grow_bytes, static_cast<uint64_t>(min_free_));
+    target_size = bytes_allocated + static_cast<uint64_t>(grow_bytes * multiplier);
     next_gc_type_ = collector::kGcTypeSticky;
   } else {
     collector::GcType non_sticky_gc_type = NonStickyGcType();
@@ -3685,15 +3560,28 @@
       next_gc_type_ = non_sticky_gc_type;
     }
     // If we have freed enough memory, shrink the heap back down.
+    const size_t adjusted_max_free = static_cast<size_t>(max_free_ * multiplier);
     if (bytes_allocated + adjusted_max_free < target_footprint) {
       target_size = bytes_allocated + adjusted_max_free;
+      grow_bytes = max_free_;
     } else {
       target_size = std::max(bytes_allocated, target_footprint);
+      // The same whether jank perceptible or not; just avoid the adjustment.
+      grow_bytes = 0;
     }
   }
   CHECK_LE(target_size, std::numeric_limits<size_t>::max());
   if (!ignore_target_footprint_) {
     SetIdealFootprint(target_size);
+    // Store target size (computed with foreground heap growth multiplier) for updating
+    // target_footprint_ when process state switches to foreground.
+    // target_size = 0 ensures that target_footprint_ is not updated on
+    // process-state switch.
+    min_foreground_target_footprint_ =
+        (multiplier <= 1.0 && grow_bytes > 0)
+        ? bytes_allocated + static_cast<size_t>(grow_bytes * foreground_heap_growth_multiplier_)
+        : 0;
+
     if (IsGcConcurrent()) {
       const uint64_t freed_bytes = current_gc_iteration_.GetFreedBytes() +
           current_gc_iteration_.GetFreedLargeObjectBytes() +
@@ -4002,15 +3890,9 @@
 static constexpr size_t kNewNativeDiscountFactor = 2;
 
 // If weighted java + native memory use exceeds our target by kStopForNativeFactor, and
-// newly allocated memory exceeds kHugeNativeAlloc, we wait for GC to complete to avoid
+// newly allocated memory exceeds stop_for_native_allocs_, we wait for GC to complete to avoid
 // running out of memory.
 static constexpr float kStopForNativeFactor = 4.0;
-// TODO: Allow this to be tuned. We want this much smaller for some apps, like Calculator.
-// But making it too small can cause jank in apps like launcher that intentionally allocate
-// large amounts of memory in rapid succession. (b/122099093)
-// For now, we punt, and use a value that should be easily large enough to disable this in all
-// questionable setting, but that is clearly too large to be effective for small memory devices.
-static constexpr size_t kHugeNativeAllocs = 1 * GB;
 
 // Return the ratio of the weighted native + java allocated bytes to its target value.
 // A return value > 1.0 means we should collect. Significantly larger values mean we're falling
@@ -4050,7 +3932,7 @@
     if (is_gc_concurrent) {
       RequestConcurrentGC(self, kGcCauseForNativeAlloc, /*force_full=*/true);
       if (gc_urgency > kStopForNativeFactor
-          && current_native_bytes > kHugeNativeAllocs) {
+          && current_native_bytes > stop_for_native_allocs_) {
         // We're in danger of running out of memory due to rampant native allocation.
         if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
           LOG(INFO) << "Stopping for native allocation, urgency: " << gc_urgency;
@@ -4073,6 +3955,8 @@
 // This should only be done for large allocations of non-malloc memory, which we wouldn't
 // otherwise see.
 void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
+  // Cautiously check for a wrapped negative bytes argument.
+  DCHECK(sizeof(size_t) < 8 || bytes < (std::numeric_limits<size_t>::max() / 2));
   native_bytes_registered_.fetch_add(bytes, std::memory_order_relaxed);
   uint32_t objects_notified =
       native_objects_notified_.fetch_add(1, std::memory_order_relaxed);
@@ -4142,9 +4026,8 @@
 void Heap::ClearMarkedObjects() {
   // Clear all of the spaces' mark bitmaps.
   for (const auto& space : GetContinuousSpaces()) {
-    accounting::ContinuousSpaceBitmap* mark_bitmap = space->GetMarkBitmap();
-    if (space->GetLiveBitmap() != mark_bitmap) {
-      mark_bitmap->Clear();
+    if (space->GetLiveBitmap() != nullptr && !space->HasBoundBitmaps()) {
+      space->GetMarkBitmap()->Clear();
     }
   }
   // Clear the marked objects in the discontinous space object sets.
@@ -4212,10 +4095,10 @@
     bool new_backtrace = false;
     {
       static constexpr size_t kMaxFrames = 16u;
+      MutexLock mu(self, *backtrace_lock_);
       FixedSizeBacktrace<kMaxFrames> backtrace;
       backtrace.Collect(/* skip_count= */ 2);
       uint64_t hash = backtrace.Hash();
-      MutexLock mu(self, *backtrace_lock_);
       new_backtrace = seen_backtraces_.find(hash) == seen_backtraces_.end();
       if (new_backtrace) {
         seen_backtraces_.insert(hash);
@@ -4240,50 +4123,23 @@
 }
 
 bool Heap::ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const {
-  for (gc::space::ImageSpace* space : boot_image_spaces_) {
-    if (space->HasAddress(obj.Ptr())) {
-      return true;
-    }
-  }
-  return false;
+  DCHECK_EQ(IsBootImageAddress(obj.Ptr()),
+            any_of(boot_image_spaces_.begin(),
+                   boot_image_spaces_.end(),
+                   [obj](gc::space::ImageSpace* space) REQUIRES_SHARED(Locks::mutator_lock_) {
+                     return space->HasAddress(obj.Ptr());
+                   }));
+  return IsBootImageAddress(obj.Ptr());
 }
 
 bool Heap::IsInBootImageOatFile(const void* p) const {
-  for (gc::space::ImageSpace* space : boot_image_spaces_) {
-    if (space->GetOatFile()->Contains(p)) {
-      return true;
-    }
-  }
-  return false;
-}
-
-void Heap::GetBootImagesSize(uint32_t* boot_image_begin,
-                             uint32_t* boot_image_end,
-                             uint32_t* boot_oat_begin,
-                             uint32_t* boot_oat_end) {
-  DCHECK(boot_image_begin != nullptr);
-  DCHECK(boot_image_end != nullptr);
-  DCHECK(boot_oat_begin != nullptr);
-  DCHECK(boot_oat_end != nullptr);
-  *boot_image_begin = 0u;
-  *boot_image_end = 0u;
-  *boot_oat_begin = 0u;
-  *boot_oat_end = 0u;
-  for (gc::space::ImageSpace* space_ : GetBootImageSpaces()) {
-    const uint32_t image_begin = PointerToLowMemUInt32(space_->Begin());
-    const uint32_t image_size = space_->GetImageHeader().GetImageSize();
-    if (*boot_image_begin == 0 || image_begin < *boot_image_begin) {
-      *boot_image_begin = image_begin;
-    }
-    *boot_image_end = std::max(*boot_image_end, image_begin + image_size);
-    const OatFile* boot_oat_file = space_->GetOatFile();
-    const uint32_t oat_begin = PointerToLowMemUInt32(boot_oat_file->Begin());
-    const uint32_t oat_size = boot_oat_file->Size();
-    if (*boot_oat_begin == 0 || oat_begin < *boot_oat_begin) {
-      *boot_oat_begin = oat_begin;
-    }
-    *boot_oat_end = std::max(*boot_oat_end, oat_begin + oat_size);
-  }
+  DCHECK_EQ(IsBootImageAddress(p),
+            any_of(boot_image_spaces_.begin(),
+                   boot_image_spaces_.end(),
+                   [p](gc::space::ImageSpace* space) REQUIRES_SHARED(Locks::mutator_lock_) {
+                     return space->GetOatFile()->Contains(p);
+                   }));
+  return IsBootImageAddress(p);
 }
 
 void Heap::SetAllocationListener(AllocationListener* l) {
@@ -4355,14 +4211,13 @@
             ? std::max(alloc_size, kPartialTlabSize)
             : gc::space::RegionSpace::kRegionSize;
         // Try to allocate a tlab.
-        if (!region_space_->AllocNewTlab(self, new_tlab_size)) {
+        if (!region_space_->AllocNewTlab(self, new_tlab_size, bytes_tl_bulk_allocated)) {
           // Failed to allocate a tlab. Try non-tlab.
           return region_space_->AllocNonvirtual<false>(alloc_size,
                                                        bytes_allocated,
                                                        usable_size,
                                                        bytes_tl_bulk_allocated);
         }
-        *bytes_tl_bulk_allocated = new_tlab_size;
         // Fall-through to using the TLAB below.
       } else {
         // Check OOME for a non-tlab allocation.
@@ -4431,5 +4286,36 @@
   }
 }
 
+void Heap::VisitReflectiveTargets(ReflectiveValueVisitor *visit) {
+  VisitObjectsPaused([&visit](mirror::Object* ref) NO_THREAD_SAFETY_ANALYSIS {
+    art::ObjPtr<mirror::Class> klass(ref->GetClass());
+    // All these classes are in the BootstrapClassLoader.
+    if (!klass->IsBootStrapClassLoaded()) {
+      return;
+    }
+    if (GetClassRoot<mirror::Method>()->IsAssignableFrom(klass) ||
+        GetClassRoot<mirror::Constructor>()->IsAssignableFrom(klass)) {
+      down_cast<mirror::Executable*>(ref)->VisitTarget(visit);
+    } else if (art::GetClassRoot<art::mirror::Field>() == klass) {
+      down_cast<mirror::Field*>(ref)->VisitTarget(visit);
+    } else if (art::GetClassRoot<art::mirror::MethodHandle>()->IsAssignableFrom(klass)) {
+      down_cast<mirror::MethodHandle*>(ref)->VisitTarget(visit);
+    } else if (art::GetClassRoot<art::mirror::FieldVarHandle>()->IsAssignableFrom(klass)) {
+      down_cast<mirror::FieldVarHandle*>(ref)->VisitTarget(visit);
+    } else if (art::GetClassRoot<art::mirror::DexCache>()->IsAssignableFrom(klass)) {
+      down_cast<mirror::DexCache*>(ref)->VisitReflectiveTargets(visit);
+    }
+  });
+}
+
+bool Heap::AddHeapTask(gc::HeapTask* task) {
+  Thread* const self = Thread::Current();
+  if (!CanAddHeapTask(self)) {
+    return false;
+  }
+  GetTaskProcessor()->AddTask(self, task);
+  return true;
+}
+
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 5cf1978..ebbb843 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -51,6 +51,7 @@
 enum class InstructionSet;
 class IsMarkedVisitor;
 class Mutex;
+class ReflectiveValueVisitor;
 class RootVisitor;
 class StackVisitor;
 class Thread;
@@ -68,6 +69,7 @@
 class AllocationListener;
 class AllocRecordObjectMap;
 class GcPauseListener;
+class HeapTask;
 class ReferenceProcessor;
 class TaskProcessor;
 class Verification;
@@ -127,6 +129,10 @@
 
 class Heap {
  public:
+  // How much we grow the TLAB if we can do it.
+  static constexpr size_t kPartialTlabSize = 16 * KB;
+  static constexpr bool kUsePartialTlabs = true;
+
   static constexpr size_t kDefaultStartingSize = kPageSize;
   static constexpr size_t kDefaultInitialSize = 2 * MB;
   static constexpr size_t kDefaultMaximumSize = 256 * MB;
@@ -136,7 +142,7 @@
   static constexpr size_t kDefaultLongPauseLogThreshold = MsToNs(5);
   static constexpr size_t kDefaultLongGCLogThreshold = MsToNs(100);
   static constexpr size_t kDefaultTLABSize = 32 * KB;
-  static constexpr double kDefaultTargetUtilization = 0.5;
+  static constexpr double kDefaultTargetUtilization = 0.75;
   static constexpr double kDefaultHeapGrowthMultiplier = 2.0;
   // Primitive arrays larger than this size are put in the large object space.
   static constexpr size_t kMinLargeObjectThreshold = 3 * kPageSize;
@@ -162,7 +168,7 @@
   static constexpr uint32_t kNotifyNativeInterval = 32;
 #else
   // Some host mallinfo() implementations are slow. And memory is less scarce.
-  static constexpr uint32_t kNotifyNativeInterval = 128;
+  static constexpr uint32_t kNotifyNativeInterval = 512;
 #endif
 
   // RegisterNativeAllocation checks immediately whether GC is needed if size exceeds the
@@ -187,6 +193,7 @@
        size_t max_free,
        double target_utilization,
        double foreground_heap_growth_multiplier,
+       size_t stop_for_native_allocs,
        size_t capacity,
        size_t non_moving_space_capacity,
        const std::vector<std::string>& boot_class_path,
@@ -222,7 +229,7 @@
   ~Heap();
 
   // Allocates and initializes storage for an object instance.
-  template <bool kInstrumented, typename PreFenceVisitor>
+  template <bool kInstrumented = true, typename PreFenceVisitor>
   mirror::Object* AllocObject(Thread* self,
                               ObjPtr<mirror::Class> klass,
                               size_t num_bytes,
@@ -231,15 +238,16 @@
       REQUIRES(!*gc_complete_lock_,
                !*pending_task_lock_,
                !*backtrace_lock_,
+               !process_state_update_lock_,
                !Roles::uninterruptible_) {
-    return AllocObjectWithAllocator<kInstrumented, true>(self,
-                                                         klass,
-                                                         num_bytes,
-                                                         GetCurrentAllocator(),
-                                                         pre_fence_visitor);
+    return AllocObjectWithAllocator<kInstrumented>(self,
+                                                   klass,
+                                                   num_bytes,
+                                                   GetCurrentAllocator(),
+                                                   pre_fence_visitor);
   }
 
-  template <bool kInstrumented, typename PreFenceVisitor>
+  template <bool kInstrumented = true, typename PreFenceVisitor>
   mirror::Object* AllocNonMovableObject(Thread* self,
                                         ObjPtr<mirror::Class> klass,
                                         size_t num_bytes,
@@ -248,15 +256,16 @@
       REQUIRES(!*gc_complete_lock_,
                !*pending_task_lock_,
                !*backtrace_lock_,
+               !process_state_update_lock_,
                !Roles::uninterruptible_) {
-    return AllocObjectWithAllocator<kInstrumented, true>(self,
-                                                         klass,
-                                                         num_bytes,
-                                                         GetCurrentNonMovingAllocator(),
-                                                         pre_fence_visitor);
+    return AllocObjectWithAllocator<kInstrumented>(self,
+                                                   klass,
+                                                   num_bytes,
+                                                   GetCurrentNonMovingAllocator(),
+                                                   pre_fence_visitor);
   }
 
-  template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
+  template <bool kInstrumented = true, bool kCheckLargeObject = true, typename PreFenceVisitor>
   ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(Thread* self,
                                                          ObjPtr<mirror::Class> klass,
                                                          size_t byte_count,
@@ -266,6 +275,7 @@
       REQUIRES(!*gc_complete_lock_,
                !*pending_task_lock_,
                !*backtrace_lock_,
+               !process_state_update_lock_,
                !Roles::uninterruptible_);
 
   AllocatorType GetCurrentAllocator() const {
@@ -285,20 +295,23 @@
   ALWAYS_INLINE void VisitObjectsPaused(Visitor&& visitor)
       REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
 
+  void VisitReflectiveTargets(ReflectiveValueVisitor* visitor)
+      REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
+
   void CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Inform the garbage collector of a non-malloc allocated native memory that might become
   // reclaimable in the future as a result of Java garbage collection.
   void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
-      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
+      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
   void RegisterNativeFree(JNIEnv* env, size_t bytes);
 
   // Notify the garbage collector of malloc allocations that might be reclaimable
   // as a result of Java garbage collection. Each such call represents approximately
   // kNotifyNativeInterval such allocations.
   void NotifyNativeAllocations(JNIEnv* env)
-      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
+      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
 
   uint32_t GetNotifyNativeInterval() {
     return kNotifyNativeInterval;
@@ -308,9 +321,6 @@
   void ChangeAllocator(AllocatorType allocator)
       REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_);
 
-  // Transition the garbage collector during runtime, may copy objects from one space to another.
-  void TransitionCollector(CollectorType collector_type) REQUIRES(!*gc_complete_lock_);
-
   // Change the collector to be one of the possible options (MS, CMS, SS).
   void ChangeCollector(CollectorType collector_type)
       REQUIRES(Locks::mutator_lock_);
@@ -367,12 +377,13 @@
 
   // Initiates an explicit garbage collection.
   void CollectGarbage(bool clear_soft_references, GcCause cause = kGcCauseExplicit)
-      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
+      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
 
   // Does a concurrent GC, should only be called by the GC daemon thread
   // through runtime.
   void ConcurrentGC(Thread* self, GcCause cause, bool force_full)
-      REQUIRES(!Locks::runtime_shutdown_lock_, !*gc_complete_lock_, !*pending_task_lock_);
+      REQUIRES(!Locks::runtime_shutdown_lock_, !*gc_complete_lock_,
+               !*pending_task_lock_, !process_state_update_lock_);
 
   // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
   // The boolean decides whether to use IsAssignableFrom or == when comparing classes.
@@ -462,7 +473,7 @@
 
   // Update the heap's process state to a new value, may cause compaction to occur.
   void UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state)
-      REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
+      REQUIRES(!*pending_task_lock_, !*gc_complete_lock_, !process_state_update_lock_);
 
   bool HaveContinuousSpaces() const NO_THREAD_SAFETY_ANALYSIS {
     // No lock since vector empty is thread safe.
@@ -550,13 +561,15 @@
   uint64_t GetBytesAllocatedEver() const;
 
   // Returns the total number of objects freed since the heap was created.
-  uint64_t GetObjectsFreedEver() const {
-    return total_objects_freed_ever_;
+  // With default memory order, this should be viewed only as a hint.
+  uint64_t GetObjectsFreedEver(std::memory_order mo = std::memory_order_relaxed) const {
+    return total_objects_freed_ever_.load(mo);
   }
 
   // Returns the total number of bytes freed since the heap was created.
-  uint64_t GetBytesFreedEver() const {
-    return total_bytes_freed_ever_;
+  // With default memory order, this should be viewed only as a hint.
+  uint64_t GetBytesFreedEver(std::memory_order mo = std::memory_order_relaxed) const {
+    return total_bytes_freed_ever_.load(mo);
   }
 
   space::RegionSpace* GetRegionSpace() const {
@@ -620,7 +633,8 @@
   void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_);
 
   // Do a pending collector transition.
-  void DoPendingCollectorTransition() REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
+  void DoPendingCollectorTransition()
+      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
 
   // Deflate monitors, ... and trim the spaces.
   void Trim(Thread* self) REQUIRES(!*gc_complete_lock_);
@@ -686,13 +700,20 @@
   bool IsInBootImageOatFile(const void* p) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void GetBootImagesSize(uint32_t* boot_image_begin,
-                         uint32_t* boot_image_end,
-                         uint32_t* boot_oat_begin,
-                         uint32_t* boot_oat_end);
+  // Get the start address of the boot images if any; otherwise returns 0.
+  uint32_t GetBootImagesStartAddress() const {
+    return boot_images_start_address_;
+  }
 
-  // Permenantly disable moving garbage collection.
-  void DisableMovingGc() REQUIRES(!*gc_complete_lock_);
+  // Get the size of all boot images, including the heap and oat areas.
+  uint32_t GetBootImagesSize() const {
+    return boot_images_size_;
+  }
+
+  // Check if a pointer points to a boot image.
+  bool IsBootImageAddress(const void* p) const {
+    return reinterpret_cast<uintptr_t>(p) - boot_images_start_address_ < boot_images_size_;
+  }
 
   space::DlMallocSpace* GetDlMallocSpace() const {
     return dlmalloc_space_;
@@ -874,7 +895,8 @@
   void DisableGCForShutdown() REQUIRES(!*gc_complete_lock_);
 
   // Create a new alloc space and compact default alloc space to it.
-  HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact() REQUIRES(!*gc_complete_lock_);
+  HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact()
+      REQUIRES(!*gc_complete_lock_, !process_state_update_lock_);
   bool SupportHomogeneousSpaceCompactAndCollectorTransitions() const;
 
   // Install an allocation listener.
@@ -897,6 +919,10 @@
 
   void PostForkChildAction(Thread* self);
 
+  void TraceHeapSize(size_t heap_size);
+
+  bool AddHeapTask(gc::HeapTask* task);
+
  private:
   class ConcurrentGCTask;
   class CollectorTransitionTask;
@@ -956,7 +982,6 @@
     return
         collector_type == kCollectorTypeCC ||
         collector_type == kCollectorTypeSS ||
-        collector_type == kCollectorTypeGSS ||
         collector_type == kCollectorTypeCCBackground ||
         collector_type == kCollectorTypeHomogeneousSpaceCompact;
   }
@@ -972,7 +997,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
   void CheckGCForNative(Thread* self)
-      REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
+      REQUIRES(!*pending_task_lock_, !*gc_complete_lock_, !process_state_update_lock_);
 
   accounting::ObjectStack* GetMarkStack() {
     return mark_stack_.get();
@@ -985,7 +1010,8 @@
                                    size_t byte_count,
                                    const PreFenceVisitor& pre_fence_visitor)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
+      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_,
+               !*backtrace_lock_, !process_state_update_lock_);
 
   // Handles Allocate()'s slow allocation path with GC involved after
   // an initial allocation attempt failed.
@@ -998,6 +1024,7 @@
                                          size_t* bytes_tl_bulk_allocated,
                                          ObjPtr<mirror::Class>* klass)
       REQUIRES(!Locks::thread_suspend_count_lock_, !*gc_complete_lock_, !*pending_task_lock_)
+      REQUIRES(Roles::uninterruptible_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Allocate into a specific space.
@@ -1064,7 +1091,7 @@
                                            GcCause gc_cause,
                                            bool clear_soft_references)
       REQUIRES(!*gc_complete_lock_, !Locks::heap_bitmap_lock_, !Locks::thread_suspend_count_lock_,
-               !*pending_task_lock_);
+               !*pending_task_lock_, !process_state_update_lock_);
 
   void PreGcVerification(collector::GarbageCollector* gc)
       REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_);
@@ -1101,7 +1128,8 @@
   // collection. bytes_allocated_before_gc is used to measure bytes / second for the period which
   // the GC was run.
   void GrowForUtilization(collector::GarbageCollector* collector_ran,
-                          size_t bytes_allocated_before_gc = 0);
+                          size_t bytes_allocated_before_gc = 0)
+      REQUIRES(!process_state_update_lock_);
 
   size_t GetPercentFree();
 
@@ -1120,13 +1148,13 @@
   // Push an object onto the allocation stack.
   void PushOnAllocationStack(Thread* self, ObjPtr<mirror::Object>* obj)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
+      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
   void PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
+      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
   void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, ObjPtr<mirror::Object>* obj)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
+      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !process_state_update_lock_);
 
   void ClearConcurrentGCRequest();
   void ClearPendingTrim(Thread* self) REQUIRES(!*pending_task_lock_);
@@ -1159,7 +1187,8 @@
   // GC stress mode attempts to do one GC per unique backtrace.
   void CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
+      REQUIRES(!*gc_complete_lock_, !*pending_task_lock_,
+               !*backtrace_lock_, !process_state_update_lock_);
 
   collector::GcType NonStickyGcType() const {
     return HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
@@ -1176,7 +1205,12 @@
 
   ALWAYS_INLINE void IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke);
 
-  void TraceHeapSize(size_t heap_size);
+  // On switching app from background to foreground, grow the heap size
+  // to incorporate foreground heap growth multiplier.
+  void GrowHeapOnJankPerceptibleSwitch() REQUIRES(!process_state_update_lock_);
+
+  // Update *_freed_ever_ counters to reflect current GC values.
+  void IncrementFreedEver();
 
   // Remove a vlog code from heap-inl.h which is transitively included in half the world.
   static void VlogHeapGrowth(size_t max_allowed_footprint, size_t new_footprint, size_t alloc_size);
@@ -1324,6 +1358,12 @@
   // concurrent GC case.
   Atomic<size_t> target_footprint_;
 
+  // Computed with foreground-multiplier in GrowForUtilization() when run in
+  // jank non-perceptible state. On update to process state from background to
+  // foreground we set target_footprint_ to this value.
+  Mutex process_state_update_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  size_t min_foreground_target_footprint_ GUARDED_BY(process_state_update_lock_);
+
   // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
   // it completes ahead of an allocation failing.
   // A multiple of this is also used to determine when to trigger a GC in response to native
@@ -1331,10 +1371,10 @@
   size_t concurrent_start_bytes_;
 
   // Since the heap was created, how many bytes have been freed.
-  uint64_t total_bytes_freed_ever_;
+  std::atomic<uint64_t> total_bytes_freed_ever_;
 
   // Since the heap was created, how many objects have been freed.
-  uint64_t total_objects_freed_ever_;
+  std::atomic<uint64_t> total_objects_freed_ever_;
 
   // Number of bytes currently allocated and not yet reclaimed. Includes active
   // TLABS in their entirety, even if they have not yet been parceled out.
@@ -1447,6 +1487,13 @@
   // How much more we grow the heap when we are a foreground app instead of background.
   double foreground_heap_growth_multiplier_;
 
+  // The amount of native memory allocation since the last GC required to cause us to wait for a
+  // collection as a result of native allocation. Very large values can cause the device to run
+  // out of memory, due to lack of finalization to reclaim native memory.  Making it too small can
+  // cause jank in apps like launcher that intentionally allocate large amounts of memory in rapid
+  // succession. (b/122099093) 1/4 to 1/3 of physical memory seems to be a good number.
+  const size_t stop_for_native_allocs_;
+
   // Total time which mutators are paused or waiting for GC to complete.
   uint64_t total_wait_time_;
 
@@ -1551,6 +1598,10 @@
   // Boot image spaces.
   std::vector<space::ImageSpace*> boot_image_spaces_;
 
+  // Boot image address range. Includes images and oat files.
+  uint32_t boot_images_start_address_;
+  uint32_t boot_images_size_;
+
   // An installed allocation listener.
   Atomic<AllocationListener*> alloc_listener_;
   // An installed GC Pause listener.
@@ -1566,6 +1617,7 @@
   friend class GCCriticalSection;
   friend class ReferenceQueue;
   friend class ScopedGCCriticalSection;
+  friend class ScopedInterruptibleGCCriticalSection;
   friend class VerifyReferenceCardVisitor;
   friend class VerifyReferenceVisitor;
   friend class VerifyObjectVisitor;
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index 5f4621e..817c876 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -87,11 +87,11 @@
 TEST_F(HeapTest, HeapBitmapCapacityTest) {
   uint8_t* heap_begin = reinterpret_cast<uint8_t*>(0x1000);
   const size_t heap_capacity = kObjectAlignment * (sizeof(intptr_t) * 8 + 1);
-  std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap(
+  accounting::ContinuousSpaceBitmap bitmap(
       accounting::ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
   mirror::Object* fake_end_of_heap_object =
       reinterpret_cast<mirror::Object*>(&heap_begin[heap_capacity - kObjectAlignment]);
-  bitmap->Set(fake_end_of_heap_object);
+  bitmap.Set(fake_end_of_heap_object);
 }
 
 TEST_F(HeapTest, DumpGCPerformanceOnShutdown) {
diff --git a/runtime/gc/scoped_gc_critical_section.cc b/runtime/gc/scoped_gc_critical_section.cc
index 7a0a6e8..eaede43 100644
--- a/runtime/gc/scoped_gc_critical_section.cc
+++ b/runtime/gc/scoped_gc_critical_section.cc
@@ -58,5 +58,17 @@
   critical_section_.Exit(old_no_suspend_reason_);
 }
 
+ScopedInterruptibleGCCriticalSection::ScopedInterruptibleGCCriticalSection(
+    Thread* self,
+    GcCause cause,
+    CollectorType type) : self_(self) {
+  DCHECK(self != nullptr);
+  Runtime::Current()->GetHeap()->StartGC(self_, cause, type);
+}
+
+ScopedInterruptibleGCCriticalSection::~ScopedInterruptibleGCCriticalSection() {
+  Runtime::Current()->GetHeap()->FinishGC(self_, collector::kGcTypeNone);
+}
+
 }  // namespace gc
 }  // namespace art
diff --git a/runtime/gc/scoped_gc_critical_section.h b/runtime/gc/scoped_gc_critical_section.h
index 8ad0158..b3a897c 100644
--- a/runtime/gc/scoped_gc_critical_section.h
+++ b/runtime/gc/scoped_gc_critical_section.h
@@ -59,6 +59,19 @@
   const char* old_no_suspend_reason_;
 };
 
+// The use of ScopedGCCriticalSection should be preferred whenever possible.
+// This class allows thread suspension but should never be used with allocations because of the
+// deadlock risk. TODO: Add a new thread role for "no allocations" that still allows suspension.
+class ScopedInterruptibleGCCriticalSection {
+ public:
+  ScopedInterruptibleGCCriticalSection(Thread* self, GcCause cause, CollectorType type);
+  ~ScopedInterruptibleGCCriticalSection();
+
+ private:
+  Thread* const self_;
+};
+
+
 }  // namespace gc
 }  // namespace art
 
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 609ccee..c4fda14 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -206,7 +206,7 @@
 void BumpPointerSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
   objects_allocated_.fetch_add(thread->GetThreadLocalObjectsAllocated(), std::memory_order_relaxed);
   bytes_allocated_.fetch_add(thread->GetThreadLocalBytesAllocated(), std::memory_order_relaxed);
-  thread->SetTlab(nullptr, nullptr, nullptr);
+  thread->ResetTlab();
 }
 
 bool BumpPointerSpace::AllocNewTlab(Thread* self, size_t bytes) {
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 3e4961a..559fae8 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -96,11 +96,11 @@
     return GetMemMap()->Size();
   }
 
-  accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
+  accounting::ContinuousSpaceBitmap* GetLiveBitmap() override {
     return nullptr;
   }
 
-  accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
+  accounting::ContinuousSpaceBitmap* GetMarkBitmap() override {
     return nullptr;
   }
 
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 7955ff9..f3fccbb 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -154,7 +154,7 @@
   // create mspace using our backing storage starting at begin and with a footprint of
   // morecore_start. Don't use an internal dlmalloc lock (as we already hold heap lock). When
   // morecore_start bytes of memory is exhaused morecore will be called.
-  void* msp = create_mspace_with_base(begin, morecore_start, false /*locked*/);
+  void* msp = create_mspace_with_base(begin, morecore_start, 0 /*locked*/);
   if (msp != nullptr) {
     // Do not allow morecore requests to succeed beyond the initial size of the heap
     mspace_set_footprint_limit(msp, initial_size);
@@ -337,8 +337,8 @@
 void DlMallocSpace::Clear() {
   size_t footprint_limit = GetFootprintLimit();
   madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED);
-  live_bitmap_->Clear();
-  mark_bitmap_->Clear();
+  live_bitmap_.Clear();
+  mark_bitmap_.Clear();
   SetEnd(Begin() + starting_size_);
   mspace_ = CreateMspace(mem_map_.Begin(), starting_size_, initial_size_);
   SetFootprintLimit(footprint_limit);
@@ -384,8 +384,8 @@
   ::art::gc::space::DlMallocSpace* dlmalloc_space = heap->GetDlMallocSpace();
   // Support for multiple DlMalloc provided by a slow path.
   if (UNLIKELY(dlmalloc_space == nullptr || dlmalloc_space->GetMspace() != mspace)) {
-    if (LIKELY(runtime->GetJit() != nullptr)) {
-      jit::JitCodeCache* code_cache = runtime->GetJit()->GetCodeCache();
+    if (LIKELY(runtime->GetJitCodeCache() != nullptr)) {
+      jit::JitCodeCache* code_cache = runtime->GetJitCodeCache();
       if (code_cache->OwnsSpace(mspace)) {
         return code_cache->MoreCore(mspace, increment);
       }
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 4a4fac5..bda90dd 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -24,6 +24,7 @@
 
 #include "android-base/stringprintf.h"
 #include "android-base/strings.h"
+#include "android-base/unique_fd.h"
 
 #include "arch/instruction_set.h"
 #include "art_field-inl.h"
@@ -34,9 +35,11 @@
 #include "base/enums.h"
 #include "base/file_utils.h"
 #include "base/macros.h"
+#include "base/memfd.h"
 #include "base/os.h"
 #include "base/scoped_flock.h"
 #include "base/stl_util.h"
+#include "base/string_view_cpp20.h"
 #include "base/systrace.h"
 #include "base/time_utils.h"
 #include "base/utils.h"
@@ -53,7 +56,9 @@
 #include "mirror/executable-inl.h"
 #include "mirror/object-inl.h"
 #include "mirror/object-refvisitor-inl.h"
+#include "oat.h"
 #include "oat_file.h"
+#include "profile/profile_compilation_info.h"
 #include "runtime.h"
 #include "space-inl.h"
 
@@ -61,15 +66,21 @@
 namespace gc {
 namespace space {
 
+using android::base::Join;
 using android::base::StringAppendF;
 using android::base::StringPrintf;
 
+// We do not allow the boot image and extensions to take more than 1GiB. They are
+// supposed to be much smaller and allocating more that this would likely fail anyway.
+static constexpr size_t kMaxTotalImageReservationSize = 1 * GB;
+
 Atomic<uint32_t> ImageSpace::bitmap_index_(0);
 
 ImageSpace::ImageSpace(const std::string& image_filename,
                        const char* image_location,
+                       const char* profile_file,
                        MemMap&& mem_map,
-                       std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap,
+                       accounting::ContinuousSpaceBitmap&& live_bitmap,
                        uint8_t* end)
     : MemMapSpace(image_filename,
                   std::move(mem_map),
@@ -79,8 +90,9 @@
                   kGcRetentionPolicyNeverCollect),
       live_bitmap_(std::move(live_bitmap)),
       oat_file_non_owned_(nullptr),
-      image_location_(image_location) {
-  DCHECK(live_bitmap_ != nullptr);
+      image_location_(image_location),
+      profile_file_(profile_file) {
+  DCHECK(live_bitmap_.IsValid());
 }
 
 static int32_t ChooseRelocationOffsetDelta(int32_t min_delta, int32_t max_delta) {
@@ -171,12 +183,21 @@
     arg_vector.push_back("--host");
   }
 
+  // Check if there is a boot profile, and pass it to dex2oat.
+  if (OS::FileExists("/system/etc/boot-image.prof")) {
+    arg_vector.push_back("--profile-file=/system/etc/boot-image.prof");
+  } else {
+    // We will compile the boot image with compiler filter "speed" unless overridden below.
+    LOG(WARNING) << "Missing boot-image.prof file, /system/etc/boot-image.prof not found: "
+                 << strerror(errno);
+  }
+
   const std::vector<std::string>& compiler_options = Runtime::Current()->GetImageCompilerOptions();
   for (size_t i = 0; i < compiler_options.size(); ++i) {
     arg_vector.push_back(compiler_options[i].c_str());
   }
 
-  std::string command_line(android::base::Join(arg_vector, ' '));
+  std::string command_line(Join(arg_vector, ' '));
   LOG(INFO) << "GenerateImage: " << command_line;
   return Exec(arg_vector, error_msg);
 }
@@ -251,67 +272,41 @@
                                cache_filename);
 }
 
-static bool ReadSpecificImageHeader(const char* filename, ImageHeader* image_header) {
-    std::unique_ptr<File> image_file(OS::OpenFileForReading(filename));
-    if (image_file.get() == nullptr) {
+static bool ReadSpecificImageHeader(File* image_file,
+                                    const char* file_description,
+                                    /*out*/ImageHeader* image_header,
+                                    /*out*/std::string* error_msg) {
+    if (!image_file->ReadFully(image_header, sizeof(ImageHeader))) {
+      *error_msg = StringPrintf("Unable to read image header from \"%s\"", file_description);
       return false;
     }
-    const bool success = image_file->ReadFully(image_header, sizeof(ImageHeader));
-    if (!success || !image_header->IsValid()) {
+    if (!image_header->IsValid()) {
+      *error_msg = StringPrintf("Image header from \"%s\" is invalid", file_description);
       return false;
     }
     return true;
 }
 
+static bool ReadSpecificImageHeader(const char* filename,
+                                    /*out*/ImageHeader* image_header,
+                                    /*out*/std::string* error_msg) {
+  std::unique_ptr<File> image_file(OS::OpenFileForReading(filename));
+  if (image_file.get() == nullptr) {
+    *error_msg = StringPrintf("Unable to open file \"%s\" for reading image header", filename);
+    return false;
+  }
+  return ReadSpecificImageHeader(image_file.get(), filename, image_header, error_msg);
+}
+
 static std::unique_ptr<ImageHeader> ReadSpecificImageHeader(const char* filename,
                                                             std::string* error_msg) {
   std::unique_ptr<ImageHeader> hdr(new ImageHeader);
-  if (!ReadSpecificImageHeader(filename, hdr.get())) {
-    *error_msg = StringPrintf("Unable to read image header for %s", filename);
+  if (!ReadSpecificImageHeader(filename, hdr.get(), error_msg)) {
     return nullptr;
   }
   return hdr;
 }
 
-std::unique_ptr<ImageHeader> ImageSpace::ReadImageHeader(const char* image_location,
-                                                         const InstructionSet image_isa,
-                                                         ImageSpaceLoadingOrder order,
-                                                         std::string* error_msg) {
-  std::string system_filename;
-  bool has_system = false;
-  std::string cache_filename;
-  bool has_cache = false;
-  bool dalvik_cache_exists = false;
-  bool is_global_cache = false;
-  if (FindImageFilename(image_location,
-                        image_isa,
-                        &system_filename,
-                        &has_system,
-                        &cache_filename,
-                        &dalvik_cache_exists,
-                        &has_cache,
-                        &is_global_cache)) {
-    if (order == ImageSpaceLoadingOrder::kSystemFirst) {
-      if (has_system) {
-        return ReadSpecificImageHeader(system_filename.c_str(), error_msg);
-      }
-      if (has_cache) {
-        return ReadSpecificImageHeader(cache_filename.c_str(), error_msg);
-      }
-    } else {
-      if (has_cache) {
-        return ReadSpecificImageHeader(cache_filename.c_str(), error_msg);
-      }
-      if (has_system) {
-        return ReadSpecificImageHeader(system_filename.c_str(), error_msg);
-      }
-    }
-  }
-
-  *error_msg = StringPrintf("Unable to find image file for %s", image_location);
-  return nullptr;
-}
-
 static bool CanWriteToDalvikCache(const InstructionSet isa) {
   const std::string dalvik_cache = GetDalvikCache(GetInstructionSetString(isa));
   if (access(dalvik_cache.c_str(), O_RDWR) == 0) {
@@ -348,7 +343,7 @@
     CHECK_ALIGNED(current, kObjectAlignment);
     auto* obj = reinterpret_cast<mirror::Object*>(current);
     CHECK(obj->GetClass() != nullptr) << "Image object at address " << obj << " has null class";
-    CHECK(live_bitmap_->Test(obj)) << obj->PrettyTypeOf();
+    CHECK(live_bitmap_.Test(obj)) << obj->PrettyTypeOf();
     if (kUseBakerReadBarrier) {
       obj->AssertReadBarrierState();
     }
@@ -384,6 +379,11 @@
     return address + Delta();
   }
 
+  template <typename T>
+  T* ToDest(T* src) const {
+    return reinterpret_cast<T*>(ToDest(reinterpret_cast<uintptr_t>(src)));
+  }
+
   // Returns the delta between the dest from the source.
   uintptr_t Delta() const {
     return dest_ - source_;
@@ -420,7 +420,8 @@
   explicit PatchObjectVisitor(HeapVisitor heap_visitor, NativeVisitor native_visitor)
       : heap_visitor_(heap_visitor), native_visitor_(native_visitor) {}
 
-  void VisitClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) {
+  void VisitClass(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Class> class_class)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     // A mirror::Class object consists of
     //  - instance fields inherited from j.l.Object,
     //  - instance fields inherited from j.l.Class,
@@ -431,16 +432,20 @@
     // fields and the first reference of the subclass due to alignment, it can be filled
     // with smaller fields - but that's not the case for j.l.Object and j.l.Class).
 
-    DCHECK_ALIGNED(klass, kObjectAlignment);
+    DCHECK_ALIGNED(klass.Ptr(), kObjectAlignment);
     static_assert(IsAligned<kHeapReferenceSize>(kObjectAlignment), "Object alignment check.");
     // First, patch the `klass->klass_`, known to be a reference to the j.l.Class.class.
     // This should be the only reference field in j.l.Object and we assert that below.
-    PatchReferenceField</*kMayBeNull=*/ false>(klass, mirror::Object::ClassOffset());
+    DCHECK_EQ(class_class,
+              heap_visitor_(klass->GetClass<kVerifyNone, kWithoutReadBarrier>()));
+    klass->SetFieldObjectWithoutWriteBarrier<
+        /*kTransactionActive=*/ false,
+        /*kCheckTransaction=*/ true,
+        kVerifyNone>(mirror::Object::ClassOffset(), class_class);
     // Then patch the reference instance fields described by j.l.Class.class.
     // Use the sizeof(Object) to determine where these reference fields start;
     // this is the same as `class_class->GetFirstReferenceInstanceFieldOffset()`
     // after patching but the j.l.Class may not have been patched yet.
-    mirror::Class* class_class = klass->GetClass<kVerifyNone, kWithoutReadBarrier>();
     size_t num_reference_instance_fields = class_class->NumReferenceInstanceFields<kVerifyNone>();
     DCHECK_NE(num_reference_instance_fields, 0u);
     static_assert(IsAligned<kHeapReferenceSize>(sizeof(mirror::Object)), "Size alignment check.");
@@ -474,7 +479,7 @@
       }
     }
     // Then patch native pointers.
-    klass->FixupNativePointers<kVerifyNone>(klass, kPointerSize, *this);
+    klass->FixupNativePointers<kVerifyNone>(klass.Ptr(), kPointerSize, *this);
   }
 
   template <typename T>
@@ -525,6 +530,7 @@
 
   void VisitDexCacheArrays(ObjPtr<mirror::DexCache> dex_cache)
       REQUIRES_SHARED(Locks::mutator_lock_) {
+    ScopedTrace st("VisitDexCacheArrays");
     FixupDexCacheArray<mirror::StringDexCacheType>(dex_cache,
                                                    mirror::DexCache::StringsOffset(),
                                                    dex_cache->NumStrings<kVerifyNone>());
@@ -669,6 +675,53 @@
   ReferenceVisitor reference_visitor_;
 };
 
+class ImageSpace::RemapInternedStringsVisitor {
+ public:
+  explicit RemapInternedStringsVisitor(
+      const SafeMap<mirror::String*, mirror::String*>& intern_remap)
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      : intern_remap_(intern_remap),
+        string_class_(GetStringClass()) {}
+
+  // Visitor for VisitReferences().
+  ALWAYS_INLINE void operator()(ObjPtr<mirror::Object> object,
+                                MemberOffset field_offset,
+                                bool is_static ATTRIBUTE_UNUSED)
+      const REQUIRES_SHARED(Locks::mutator_lock_) {
+    ObjPtr<mirror::Object> old_value =
+        object->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(field_offset);
+    if (old_value != nullptr &&
+        old_value->GetClass<kVerifyNone, kWithoutReadBarrier>() == string_class_) {
+      auto it = intern_remap_.find(old_value->AsString().Ptr());
+      if (it != intern_remap_.end()) {
+        mirror::String* new_value = it->second;
+        object->SetFieldObjectWithoutWriteBarrier</*kTransactionActive=*/ false,
+                                                  /*kCheckTransaction=*/ true,
+                                                  kVerifyNone>(field_offset, new_value);
+      }
+    }
+  }
+  // Visitor for VisitReferences(), java.lang.ref.Reference case.
+  ALWAYS_INLINE void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(klass->IsTypeOfReferenceClass());
+    this->operator()(ref, mirror::Reference::ReferentOffset(), /*is_static=*/ false);
+  }
+  // Ignore class native roots; not called from VisitReferences() for kVisitNativeRoots == false.
+  void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED)
+      const {}
+  void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
+
+ private:
+  mirror::Class* GetStringClass() REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(!intern_remap_.empty());
+    return intern_remap_.begin()->first->GetClass<kVerifyNone, kWithoutReadBarrier>();
+  }
+
+  const SafeMap<mirror::String*, mirror::String*>& intern_remap_;
+  mirror::Class* const string_class_;
+};
+
 // Helper class encapsulating loading, so we can access private ImageSpace members (this is a
 // nested class), but not declare functions in the header.
 class ImageSpace::Loader {
@@ -676,64 +729,91 @@
   static std::unique_ptr<ImageSpace> InitAppImage(const char* image_filename,
                                                   const char* image_location,
                                                   const OatFile* oat_file,
-                                                  /*inout*/MemMap* image_reservation,
+                                                  ArrayRef<ImageSpace* const> boot_image_spaces,
                                                   /*out*/std::string* error_msg)
       REQUIRES_SHARED(Locks::mutator_lock_) {
     TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
 
     std::unique_ptr<ImageSpace> space = Init(image_filename,
                                              image_location,
-                                             oat_file,
                                              &logger,
-                                             image_reservation,
+                                             /*image_reservation=*/ nullptr,
                                              error_msg);
     if (space != nullptr) {
-      uint32_t expected_reservation_size =
-          RoundUp(space->GetImageHeader().GetImageSize(), kPageSize);
+      space->oat_file_non_owned_ = oat_file;
+      const ImageHeader& image_header = space->GetImageHeader();
+
+      // Check the oat file checksum.
+      const uint32_t oat_checksum = oat_file->GetOatHeader().GetChecksum();
+      const uint32_t image_oat_checksum = image_header.GetOatChecksum();
+      if (oat_checksum != image_oat_checksum) {
+        *error_msg = StringPrintf("Oat checksum 0x%x does not match the image one 0x%x in image %s",
+                                  oat_checksum,
+                                  image_oat_checksum,
+                                  image_filename);
+        return nullptr;
+      }
+      size_t boot_image_space_dependencies;
+      if (!ValidateBootImageChecksum(image_filename,
+                                     image_header,
+                                     oat_file,
+                                     boot_image_spaces,
+                                     &boot_image_space_dependencies,
+                                     error_msg)) {
+        DCHECK(!error_msg->empty());
+        return nullptr;
+      }
+
+      uint32_t expected_reservation_size = RoundUp(image_header.GetImageSize(), kPageSize);
       if (!CheckImageReservationSize(*space, expected_reservation_size, error_msg) ||
           !CheckImageComponentCount(*space, /*expected_component_count=*/ 1u, error_msg)) {
         return nullptr;
       }
 
-      TimingLogger::ScopedTiming timing("RelocateImage", &logger);
-      ImageHeader* image_header = reinterpret_cast<ImageHeader*>(space->GetMemMap()->Begin());
-      const PointerSize pointer_size = image_header->GetPointerSize();
-      bool result;
-      if (pointer_size == PointerSize::k64) {
-        result = RelocateInPlace<PointerSize::k64>(*image_header,
-                                                   space->GetMemMap()->Begin(),
-                                                   space->GetLiveBitmap(),
-                                                   oat_file,
-                                                   error_msg);
-      } else {
-        result = RelocateInPlace<PointerSize::k32>(*image_header,
-                                                   space->GetMemMap()->Begin(),
-                                                   space->GetLiveBitmap(),
-                                                   oat_file,
-                                                   error_msg);
+      {
+        TimingLogger::ScopedTiming timing("RelocateImage", &logger);
+        const PointerSize pointer_size = image_header.GetPointerSize();
+        uint32_t boot_image_begin =
+            reinterpret_cast32<uint32_t>(boot_image_spaces.front()->Begin());
+        bool result;
+        if (pointer_size == PointerSize::k64) {
+          result = RelocateInPlace<PointerSize::k64>(boot_image_begin,
+                                                     space->GetMemMap()->Begin(),
+                                                     space->GetLiveBitmap(),
+                                                     oat_file,
+                                                     error_msg);
+        } else {
+          result = RelocateInPlace<PointerSize::k32>(boot_image_begin,
+                                                     space->GetMemMap()->Begin(),
+                                                     space->GetLiveBitmap(),
+                                                     oat_file,
+                                                     error_msg);
+        }
+        if (!result) {
+          return nullptr;
+        }
       }
-      if (!result) {
-        return nullptr;
+
+      DCHECK_LE(boot_image_space_dependencies, boot_image_spaces.size());
+      if (boot_image_space_dependencies != boot_image_spaces.size()) {
+        TimingLogger::ScopedTiming timing("DeduplicateInternedStrings", &logger);
+        // There shall be no duplicates with boot image spaces this app image depends on.
+        ArrayRef<ImageSpace* const> old_spaces =
+            boot_image_spaces.SubArray(/*pos=*/ boot_image_space_dependencies);
+        SafeMap<mirror::String*, mirror::String*> intern_remap;
+        RemoveInternTableDuplicates(old_spaces, space.get(), &intern_remap);
+        if (!intern_remap.empty()) {
+          RemapInternedStringDuplicates(intern_remap, space.get());
+        }
       }
-      Runtime* runtime = Runtime::Current();
-      CHECK_EQ(runtime->GetResolutionMethod(),
-               image_header->GetImageMethod(ImageHeader::kResolutionMethod));
-      CHECK_EQ(runtime->GetImtConflictMethod(),
-               image_header->GetImageMethod(ImageHeader::kImtConflictMethod));
-      CHECK_EQ(runtime->GetImtUnimplementedMethod(),
-               image_header->GetImageMethod(ImageHeader::kImtUnimplementedMethod));
-      CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveAllCalleeSaves),
-               image_header->GetImageMethod(ImageHeader::kSaveAllCalleeSavesMethod));
-      CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsOnly),
-               image_header->GetImageMethod(ImageHeader::kSaveRefsOnlyMethod));
-      CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs),
-               image_header->GetImageMethod(ImageHeader::kSaveRefsAndArgsMethod));
-      CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverything),
-               image_header->GetImageMethod(ImageHeader::kSaveEverythingMethod));
-      CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverythingForClinit),
-               image_header->GetImageMethod(ImageHeader::kSaveEverythingMethodForClinit));
-      CHECK_EQ(runtime->GetCalleeSaveMethod(CalleeSaveType::kSaveEverythingForSuspendCheck),
-               image_header->GetImageMethod(ImageHeader::kSaveEverythingMethodForSuspendCheck));
+
+      const ImageHeader& primary_header = boot_image_spaces.front()->GetImageHeader();
+      static_assert(static_cast<size_t>(ImageHeader::kResolutionMethod) == 0u);
+      for (size_t i = 0u; i != static_cast<size_t>(ImageHeader::kImageMethodsCount); ++i) {
+        ImageHeader::ImageMethod method = static_cast<ImageHeader::ImageMethod>(i);
+        CHECK_EQ(primary_header.GetImageMethod(method), image_header.GetImageMethod(method))
+            << method;
+      }
 
       VLOG(image) << "ImageSpace::Loader::InitAppImage exiting " << *space.get();
     }
@@ -745,7 +825,6 @@
 
   static std::unique_ptr<ImageSpace> Init(const char* image_filename,
                                           const char* image_location,
-                                          const OatFile* oat_file,
                                           TimingLogger* logger,
                                           /*inout*/MemMap* image_reservation,
                                           /*out*/std::string* error_msg)
@@ -753,8 +832,6 @@
     CHECK(image_filename != nullptr);
     CHECK(image_location != nullptr);
 
-    VLOG(image) << "ImageSpace::Init entering image_filename=" << image_filename;
-
     std::unique_ptr<File> file;
     {
       TimingLogger::ScopedTiming timing("OpenImageFile", logger);
@@ -764,56 +841,65 @@
         return nullptr;
       }
     }
-    ImageHeader temp_image_header;
-    ImageHeader* image_header = &temp_image_header;
+    return Init(file.get(),
+                image_filename,
+                image_location,
+                /* profile_file=*/ "",
+                /*allow_direct_mapping=*/ true,
+                logger,
+                image_reservation,
+                error_msg);
+  }
+
+  static std::unique_ptr<ImageSpace> Init(File* file,
+                                          const char* image_filename,
+                                          const char* image_location,
+                                          const char* profile_file,
+                                          bool allow_direct_mapping,
+                                          TimingLogger* logger,
+                                          /*inout*/MemMap* image_reservation,
+                                          /*out*/std::string* error_msg)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    CHECK(image_filename != nullptr);
+    CHECK(image_location != nullptr);
+
+    VLOG(image) << "ImageSpace::Init entering image_filename=" << image_filename;
+
+    ImageHeader image_header;
     {
       TimingLogger::ScopedTiming timing("ReadImageHeader", logger);
-      bool success = file->ReadFully(image_header, sizeof(*image_header));
-      if (!success || !image_header->IsValid()) {
+      bool success = file->PreadFully(&image_header, sizeof(image_header), /*offset=*/ 0u);
+      if (!success || !image_header.IsValid()) {
         *error_msg = StringPrintf("Invalid image header in '%s'", image_filename);
         return nullptr;
       }
     }
     // Check that the file is larger or equal to the header size + data size.
     const uint64_t image_file_size = static_cast<uint64_t>(file->GetLength());
-    if (image_file_size < sizeof(ImageHeader) + image_header->GetDataSize()) {
+    if (image_file_size < sizeof(ImageHeader) + image_header.GetDataSize()) {
       *error_msg = StringPrintf(
           "Image file truncated: %" PRIu64 " vs. %" PRIu64 ".",
            image_file_size,
-           static_cast<uint64_t>(sizeof(ImageHeader) + image_header->GetDataSize()));
+           static_cast<uint64_t>(sizeof(ImageHeader) + image_header.GetDataSize()));
       return nullptr;
     }
 
-    if (oat_file != nullptr) {
-      // If we have an oat file (i.e. for app image), check the oat file checksum.
-      // Otherwise, we open the oat file after the image and check the checksum there.
-      const uint32_t oat_checksum = oat_file->GetOatHeader().GetChecksum();
-      const uint32_t image_oat_checksum = image_header->GetOatChecksum();
-      if (oat_checksum != image_oat_checksum) {
-        *error_msg = StringPrintf("Oat checksum 0x%x does not match the image one 0x%x in image %s",
-                                  oat_checksum,
-                                  image_oat_checksum,
-                                  image_filename);
-        return nullptr;
-      }
-    }
-
     if (VLOG_IS_ON(startup)) {
       LOG(INFO) << "Dumping image sections";
       for (size_t i = 0; i < ImageHeader::kSectionCount; ++i) {
         const auto section_idx = static_cast<ImageHeader::ImageSections>(i);
-        auto& section = image_header->GetImageSection(section_idx);
+        auto& section = image_header.GetImageSection(section_idx);
         LOG(INFO) << section_idx << " start="
-            << reinterpret_cast<void*>(image_header->GetImageBegin() + section.Offset()) << " "
+            << reinterpret_cast<void*>(image_header.GetImageBegin() + section.Offset()) << " "
             << section;
       }
     }
 
-    const auto& bitmap_section = image_header->GetImageBitmapSection();
+    const auto& bitmap_section = image_header.GetImageBitmapSection();
     // The location we want to map from is the first aligned page after the end of the stored
     // (possibly compressed) data.
-    const size_t image_bitmap_offset = RoundUp(sizeof(ImageHeader) + image_header->GetDataSize(),
-                                               kPageSize);
+    const size_t image_bitmap_offset =
+        RoundUp(sizeof(ImageHeader) + image_header.GetDataSize(), kPageSize);
     const size_t end_of_bitmap = image_bitmap_offset + bitmap_section.Size();
     if (end_of_bitmap != image_file_size) {
       *error_msg = StringPrintf(
@@ -832,8 +918,9 @@
     MemMap map = LoadImageFile(
         image_filename,
         image_location,
-        *image_header,
+        image_header,
         file->Fd(),
+        allow_direct_mapping,
         logger,
         image_reservation,
         error_msg);
@@ -841,7 +928,7 @@
       DCHECK(!error_msg->empty());
       return nullptr;
     }
-    DCHECK_EQ(0, memcmp(image_header, map.Begin(), sizeof(ImageHeader)));
+    DCHECK_EQ(0, memcmp(&image_header, map.Begin(), sizeof(ImageHeader)));
 
     MemMap image_bitmap_map = MemMap::MapFile(bitmap_section.Size(),
                                               PROT_READ,
@@ -855,28 +942,24 @@
       *error_msg = StringPrintf("Failed to map image bitmap: %s", error_msg->c_str());
       return nullptr;
     }
-    // Loaded the map, use the image header from the file now in case we patch it with
-    // RelocateInPlace.
-    image_header = reinterpret_cast<ImageHeader*>(map.Begin());
     const uint32_t bitmap_index = ImageSpace::bitmap_index_.fetch_add(1);
     std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u",
                                          image_filename,
                                          bitmap_index));
     // Bitmap only needs to cover until the end of the mirror objects section.
-    const ImageSection& image_objects = image_header->GetObjectsSection();
+    const ImageSection& image_objects = image_header.GetObjectsSection();
     // We only want the mirror object, not the ArtFields and ArtMethods.
     uint8_t* const image_end = map.Begin() + image_objects.End();
-    std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap;
+    accounting::ContinuousSpaceBitmap bitmap;
     {
       TimingLogger::ScopedTiming timing("CreateImageBitmap", logger);
-      bitmap.reset(
-          accounting::ContinuousSpaceBitmap::CreateFromMemMap(
-              bitmap_name,
-              std::move(image_bitmap_map),
-              reinterpret_cast<uint8_t*>(map.Begin()),
-              // Make sure the bitmap is aligned to card size instead of just bitmap word size.
-              RoundUp(image_objects.End(), gc::accounting::CardTable::kCardSize)));
-      if (bitmap == nullptr) {
+      bitmap = accounting::ContinuousSpaceBitmap::CreateFromMemMap(
+          bitmap_name,
+          std::move(image_bitmap_map),
+          reinterpret_cast<uint8_t*>(map.Begin()),
+          // Make sure the bitmap is aligned to card size instead of just bitmap word size.
+          RoundUp(image_objects.End(), gc::accounting::CardTable::kCardSize));
+      if (!bitmap.IsValid()) {
         *error_msg = StringPrintf("Could not create bitmap '%s'", bitmap_name.c_str());
         return nullptr;
       }
@@ -884,10 +967,10 @@
     // We only want the mirror object, not the ArtFields and ArtMethods.
     std::unique_ptr<ImageSpace> space(new ImageSpace(image_filename,
                                                      image_location,
+                                                     profile_file,
                                                      std::move(map),
                                                      std::move(bitmap),
                                                      image_end));
-    space->oat_file_non_owned_ = oat_file;
     return space;
   }
 
@@ -919,11 +1002,160 @@
     return true;
   }
 
+  template <typename Container>
+  static void RemoveInternTableDuplicates(
+      const Container& old_spaces,
+      /*inout*/ImageSpace* new_space,
+      /*inout*/SafeMap<mirror::String*, mirror::String*>* intern_remap)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    const ImageSection& new_interns = new_space->GetImageHeader().GetInternedStringsSection();
+    if (new_interns.Size() != 0u) {
+      const uint8_t* new_data = new_space->Begin() + new_interns.Offset();
+      size_t new_read_count;
+      InternTable::UnorderedSet new_set(new_data, /*make_copy_of_data=*/ false, &new_read_count);
+      for (const auto& old_space : old_spaces) {
+        const ImageSection& old_interns = old_space->GetImageHeader().GetInternedStringsSection();
+        if (old_interns.Size() != 0u) {
+          const uint8_t* old_data = old_space->Begin() + old_interns.Offset();
+          size_t old_read_count;
+          InternTable::UnorderedSet old_set(
+              old_data, /*make_copy_of_data=*/ false, &old_read_count);
+          RemoveDuplicates(old_set, &new_set, intern_remap);
+        }
+      }
+    }
+  }
+
+  static void RemapInternedStringDuplicates(
+      const SafeMap<mirror::String*, mirror::String*>& intern_remap,
+      ImageSpace* new_space) REQUIRES_SHARED(Locks::mutator_lock_) {
+    RemapInternedStringsVisitor visitor(intern_remap);
+    static_assert(IsAligned<kObjectAlignment>(sizeof(ImageHeader)), "Header alignment check");
+    uint32_t objects_end = new_space->GetImageHeader().GetObjectsSection().Size();
+    DCHECK_ALIGNED(objects_end, kObjectAlignment);
+    for (uint32_t pos = sizeof(ImageHeader); pos != objects_end; ) {
+      mirror::Object* object = reinterpret_cast<mirror::Object*>(new_space->Begin() + pos);
+      object->VisitReferences</*kVisitNativeRoots=*/ false,
+                              kVerifyNone,
+                              kWithoutReadBarrier>(visitor, visitor);
+      pos += RoundUp(object->SizeOf<kVerifyNone>(), kObjectAlignment);
+    }
+  }
+
  private:
+  // Remove duplicates found in the `old_set` from the `new_set`.
+  // Record the removed Strings for remapping. No read barriers are needed as the
+  // tables are either just being loaded and not yet a part of the heap, or boot
+  // image intern tables with non-moveable Strings used when loading an app image.
+  static void RemoveDuplicates(const InternTable::UnorderedSet& old_set,
+                               /*inout*/InternTable::UnorderedSet* new_set,
+                               /*inout*/SafeMap<mirror::String*, mirror::String*>* intern_remap)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (old_set.size() < new_set->size()) {
+      for (const GcRoot<mirror::String>& old_s : old_set) {
+        auto new_it = new_set->find(old_s);
+        if (UNLIKELY(new_it != new_set->end())) {
+          intern_remap->Put(new_it->Read<kWithoutReadBarrier>(), old_s.Read<kWithoutReadBarrier>());
+          new_set->erase(new_it);
+        }
+      }
+    } else {
+      for (auto new_it = new_set->begin(), end = new_set->end(); new_it != end; ) {
+        auto old_it = old_set.find(*new_it);
+        if (UNLIKELY(old_it != old_set.end())) {
+          intern_remap->Put(new_it->Read<kWithoutReadBarrier>(),
+                            old_it->Read<kWithoutReadBarrier>());
+          new_it = new_set->erase(new_it);
+        } else {
+          ++new_it;
+        }
+      }
+    }
+  }
+
+  static bool ValidateBootImageChecksum(const char* image_filename,
+                                        const ImageHeader& image_header,
+                                        const OatFile* oat_file,
+                                        ArrayRef<ImageSpace* const> boot_image_spaces,
+                                        /*out*/size_t* boot_image_space_dependencies,
+                                        /*out*/std::string* error_msg) {
+    // Use the boot image component count to calculate the checksum from
+    // the appropriate number of boot image chunks.
+    uint32_t boot_image_component_count = image_header.GetBootImageComponentCount();
+    size_t boot_image_spaces_size = boot_image_spaces.size();
+    if (boot_image_component_count > boot_image_spaces_size) {
+      *error_msg = StringPrintf("Too many boot image dependencies (%u > %zu) in image %s",
+                                boot_image_component_count,
+                                boot_image_spaces_size,
+                                image_filename);
+      return false;
+    }
+    uint32_t checksum = 0u;
+    size_t chunk_count = 0u;
+    size_t space_pos = 0u;
+    uint64_t boot_image_size = 0u;
+    for (size_t component_count = 0u; component_count != boot_image_component_count; ) {
+      const ImageHeader& current_header = boot_image_spaces[space_pos]->GetImageHeader();
+      if (current_header.GetComponentCount() > boot_image_component_count - component_count) {
+        *error_msg = StringPrintf("Boot image component count in %s ends in the middle of a chunk, "
+                                      "%u is between %zu and %zu",
+                                  image_filename,
+                                  boot_image_component_count,
+                                  component_count,
+                                  component_count + current_header.GetComponentCount());
+        return false;
+      }
+      component_count += current_header.GetComponentCount();
+      checksum ^= current_header.GetImageChecksum();
+      chunk_count += 1u;
+      space_pos += current_header.GetImageSpaceCount();
+      boot_image_size += current_header.GetImageReservationSize();
+    }
+    if (image_header.GetBootImageChecksum() != checksum) {
+      *error_msg = StringPrintf("Boot image checksum mismatch (0x%08x != 0x%08x) in image %s",
+                                image_header.GetBootImageChecksum(),
+                                checksum,
+                                image_filename);
+      return false;
+    }
+    if (image_header.GetBootImageSize() != boot_image_size) {
+      *error_msg = StringPrintf("Boot image size mismatch (0x%08x != 0x%08" PRIx64 ") in image %s",
+                                image_header.GetBootImageSize(),
+                                boot_image_size,
+                                image_filename);
+      return false;
+    }
+    // Oat checksums, if present, have already been validated, so we know that
+    // they match the loaded image spaces. Therefore, we just verify that they
+    // are consistent in the number of boot image chunks they list by looking
+    // for the kImageChecksumPrefix at the start of each component.
+    const char* oat_boot_class_path_checksums =
+        oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kBootClassPathChecksumsKey);
+    if (oat_boot_class_path_checksums != nullptr) {
+      size_t oat_bcp_chunk_count = 0u;
+      while (*oat_boot_class_path_checksums == kImageChecksumPrefix) {
+        oat_bcp_chunk_count += 1u;
+        // Find the start of the next component if any.
+        const char* separator = strchr(oat_boot_class_path_checksums, ':');
+        oat_boot_class_path_checksums = (separator != nullptr) ? separator + 1u : "";
+      }
+      if (oat_bcp_chunk_count != chunk_count) {
+        *error_msg = StringPrintf("Boot image chunk count mismatch (%zu != %zu) in image %s",
+                                  oat_bcp_chunk_count,
+                                  chunk_count,
+                                  image_filename);
+        return false;
+      }
+    }
+    *boot_image_space_dependencies = space_pos;
+    return true;
+  }
+
   static MemMap LoadImageFile(const char* image_filename,
                               const char* image_location,
                               const ImageHeader& image_header,
                               int fd,
+                              bool allow_direct_mapping,
                               TimingLogger* logger,
                               /*inout*/MemMap* image_reservation,
                               /*out*/std::string* error_msg)
@@ -931,7 +1163,7 @@
     TimingLogger::ScopedTiming timing("MapImageFile", logger);
     std::string temp_error_msg;
     const bool is_compressed = image_header.HasCompressedBlock();
-    if (!is_compressed) {
+    if (!is_compressed && allow_direct_mapping) {
       uint8_t* address = (image_reservation != nullptr) ? image_reservation->Begin() : nullptr;
       return MemMap::MapFileAtAddress(address,
                                       image_header.GetImageSize(),
@@ -946,7 +1178,7 @@
                                       error_msg);
     }
 
-    // Reserve output and decompress into it.
+    // Reserve output and copy/decompress into it.
     MemMap map = MemMap::MapAnonymous(image_location,
                                       image_header.GetImageSize(),
                                       PROT_READ | PROT_WRITE,
@@ -967,44 +1199,63 @@
         DCHECK(error_msg == nullptr || !error_msg->empty());
         return MemMap::Invalid();
       }
-      memcpy(map.Begin(), &image_header, sizeof(ImageHeader));
 
-      Runtime::ScopedThreadPoolUsage stpu;
-      ThreadPool* const pool = stpu.GetThreadPool();
-      const uint64_t start = NanoTime();
-      Thread* const self = Thread::Current();
-      static constexpr size_t kMinBlocks = 2u;
-      const bool use_parallel = pool != nullptr && image_header.GetBlockCount() >= kMinBlocks;
-      for (const ImageHeader::Block& block : image_header.GetBlocks(temp_map.Begin())) {
-        auto function = [&](Thread*) {
-          const uint64_t start2 = NanoTime();
-          ScopedTrace trace("LZ4 decompress block");
-          bool result = block.Decompress(/*out_ptr=*/map.Begin(),
-                                         /*in_ptr=*/temp_map.Begin(),
-                                         error_msg);
-          if (!result && error_msg != nullptr) {
-            *error_msg = "Failed to decompress image block " + *error_msg;
+      if (is_compressed) {
+        memcpy(map.Begin(), &image_header, sizeof(ImageHeader));
+
+        Runtime::ScopedThreadPoolUsage stpu;
+        ThreadPool* const pool = stpu.GetThreadPool();
+        const uint64_t start = NanoTime();
+        Thread* const self = Thread::Current();
+        static constexpr size_t kMinBlocks = 2u;
+        const bool use_parallel = pool != nullptr && image_header.GetBlockCount() >= kMinBlocks;
+        for (const ImageHeader::Block& block : image_header.GetBlocks(temp_map.Begin())) {
+          auto function = [&](Thread*) {
+            const uint64_t start2 = NanoTime();
+            ScopedTrace trace("LZ4 decompress block");
+            bool result = block.Decompress(/*out_ptr=*/map.Begin(),
+                                           /*in_ptr=*/temp_map.Begin(),
+                                           error_msg);
+            if (!result && error_msg != nullptr) {
+              *error_msg = "Failed to decompress image block " + *error_msg;
+            }
+            VLOG(image) << "Decompress block " << block.GetDataSize() << " -> "
+                        << block.GetImageSize() << " in " << PrettyDuration(NanoTime() - start2);
+          };
+          if (use_parallel) {
+            pool->AddTask(self, new FunctionTask(std::move(function)));
+          } else {
+            function(self);
           }
-          VLOG(image) << "Decompress block " << block.GetDataSize() << " -> "
-                      << block.GetImageSize() << " in " << PrettyDuration(NanoTime() - start2);
-        };
-        if (use_parallel) {
-          pool->AddTask(self, new FunctionTask(std::move(function)));
-        } else {
-          function(self);
         }
+        if (use_parallel) {
+          ScopedTrace trace("Waiting for workers");
+          // Go to native since we don't want to suspend while holding the mutator lock.
+          ScopedThreadSuspension sts(Thread::Current(), kNative);
+          pool->Wait(self, true, false);
+        }
+        const uint64_t time = NanoTime() - start;
+        // Add one 1 ns to prevent possible divide by 0.
+        VLOG(image) << "Decompressing image took " << PrettyDuration(time) << " ("
+                    << PrettySize(static_cast<uint64_t>(map.Size()) * MsToNs(1000) / (time + 1))
+                    << "/s)";
+      } else {
+        DCHECK(!allow_direct_mapping);
+        // We do not allow direct mapping for boot image extensions compiled to a memfd.
+        // This prevents wasting memory by kernel keeping the contents of the file alive
+        // despite these contents being unreachable once the file descriptor is closed
+        // and mmapped memory is copied for all existing mappings.
+        //
+        // Most pages would be copied during relocation while there is only one mapping.
+        // We could use MAP_SHARED for relocation and then msync() and remap MAP_PRIVATE
+        // as required for forking from zygote, but there would still be some pages
+        // wasted anyway and we want to avoid that. (For example, static synchronized
+        // methods use the class object for locking and thus modify its lockword.)
+
+        // No other process should race to overwrite the extension in memfd.
+        DCHECK_EQ(memcmp(temp_map.Begin(), &image_header, sizeof(ImageHeader)), 0);
+        memcpy(map.Begin(), temp_map.Begin(), temp_map.Size());
       }
-      if (use_parallel) {
-        ScopedTrace trace("Waiting for workers");
-        // Go to native since we don't want to suspend while holding the mutator lock.
-        ScopedThreadSuspension sts(Thread::Current(), kNative);
-        pool->Wait(self, true, false);
-      }
-      const uint64_t time = NanoTime() - start;
-      // Add one 1 ns to prevent possible divide by 0.
-      VLOG(image) << "Decompressing image took " << PrettyDuration(time) << " ("
-                  << PrettySize(static_cast<uint64_t>(map.Size()) * MsToNs(1000) / (time + 1))
-                  << "/s)";
     }
 
     return map;
@@ -1020,9 +1271,9 @@
   template <typename Range0, typename Range1 = EmptyRange, typename Range2 = EmptyRange>
   class ForwardAddress {
    public:
-    ForwardAddress(const Range0& range0 = Range0(),
-                   const Range1& range1 = Range1(),
-                   const Range2& range2 = Range2())
+    explicit ForwardAddress(const Range0& range0 = Range0(),
+                            const Range1& range1 = Range1(),
+                            const Range2& range2 = Range2())
         : range0_(range0), range1_(range1), range2_(range2) {}
 
     // Return the relocated address of a heap object.
@@ -1105,15 +1356,10 @@
     }
 
     // java.lang.ref.Reference visitor.
-    void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
-                    ObjPtr<mirror::Reference> ref) const
+    ALWAYS_INLINE void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
         REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
-      mirror::Object* obj = ref->GetReferent<kWithoutReadBarrier>();
-      if (obj != nullptr) {
-        ref->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
-            mirror::Reference::ReferentOffset(),
-            forward_(obj));
-      }
+      DCHECK(klass->IsTypeOfReferenceClass());
+      this->operator()(ref, mirror::Reference::ReferentOffset(), /*is_static=*/ false);
     }
 
     void operator()(mirror::Object* obj) const
@@ -1136,58 +1382,37 @@
   // address. In place means modifying a single ImageSpace in place rather than relocating from
   // one ImageSpace to another.
   template <PointerSize kPointerSize>
-  static bool RelocateInPlace(ImageHeader& image_header,
+  static bool RelocateInPlace(uint32_t boot_image_begin,
                               uint8_t* target_base,
                               accounting::ContinuousSpaceBitmap* bitmap,
                               const OatFile* app_oat_file,
                               std::string* error_msg) {
     DCHECK(error_msg != nullptr);
     // Set up sections.
-    uint32_t boot_image_begin = 0;
-    uint32_t boot_image_end = 0;
-    uint32_t boot_oat_begin = 0;
-    uint32_t boot_oat_end = 0;
-    gc::Heap* const heap = Runtime::Current()->GetHeap();
-    heap->GetBootImagesSize(&boot_image_begin, &boot_image_end, &boot_oat_begin, &boot_oat_end);
-    if (boot_image_begin == boot_image_end) {
-      *error_msg = "Can not relocate app image without boot image space";
-      return false;
-    }
-    if (boot_oat_begin == boot_oat_end) {
-      *error_msg = "Can not relocate app image without boot oat file";
-      return false;
-    }
-    const uint32_t boot_image_size = boot_oat_end - boot_image_begin;
-    const uint32_t image_header_boot_image_size = image_header.GetBootImageSize();
-    if (boot_image_size != image_header_boot_image_size) {
-      *error_msg = StringPrintf("Boot image size %" PRIu64 " does not match expected size %"
-                                    PRIu64,
-                                static_cast<uint64_t>(boot_image_size),
-                                static_cast<uint64_t>(image_header_boot_image_size));
-      return false;
-    }
-    const ImageSection& objects_section = image_header.GetObjectsSection();
+    ImageHeader* image_header = reinterpret_cast<ImageHeader*>(target_base);
+    const uint32_t boot_image_size = image_header->GetBootImageSize();
+    const ImageSection& objects_section = image_header->GetObjectsSection();
     // Where the app image objects are mapped to.
     uint8_t* objects_location = target_base + objects_section.Offset();
     TimingLogger logger(__FUNCTION__, true, false);
-    RelocationRange boot_image(image_header.GetBootImageBegin(),
+    RelocationRange boot_image(image_header->GetBootImageBegin(),
                                boot_image_begin,
                                boot_image_size);
     // Metadata is everything after the objects section, use exclusion to be safe.
     RelocationRange app_image_metadata(
-        reinterpret_cast<uintptr_t>(image_header.GetImageBegin()) + objects_section.End(),
+        reinterpret_cast<uintptr_t>(image_header->GetImageBegin()) + objects_section.End(),
         reinterpret_cast<uintptr_t>(target_base) + objects_section.End(),
-        image_header.GetImageSize() - objects_section.End());
+        image_header->GetImageSize() - objects_section.End());
     // App image heap objects, may be mapped in the heap.
     RelocationRange app_image_objects(
-        reinterpret_cast<uintptr_t>(image_header.GetImageBegin()) + objects_section.Offset(),
+        reinterpret_cast<uintptr_t>(image_header->GetImageBegin()) + objects_section.Offset(),
         reinterpret_cast<uintptr_t>(objects_location),
         objects_section.Size());
     // Use the oat data section since this is where the OatFile::Begin is.
-    RelocationRange app_oat(reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin()),
+    RelocationRange app_oat(reinterpret_cast<uintptr_t>(image_header->GetOatDataBegin()),
                             // Not necessarily in low 4GB.
                             reinterpret_cast<uintptr_t>(app_oat_file->Begin()),
-                            image_header.GetOatDataEnd() - image_header.GetOatDataBegin());
+                            image_header->GetOatDataEnd() - image_header->GetOatDataBegin());
     VLOG(image) << "App image metadata " << app_image_metadata;
     VLOG(image) << "App image objects " << app_image_objects;
     VLOG(image) << "App oat " << app_oat;
@@ -1212,13 +1437,23 @@
     if (fixup_image) {
       // Two pass approach, fix up all classes first, then fix up non class-objects.
       // The visited bitmap is used to ensure that pointer arrays are not forwarded twice.
-      std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> visited_bitmap(
+      gc::accounting::ContinuousSpaceBitmap visited_bitmap(
           gc::accounting::ContinuousSpaceBitmap::Create("Relocate bitmap",
                                                         target_base,
-                                                        image_header.GetImageSize()));
+                                                        image_header->GetImageSize()));
       {
         TimingLogger::ScopedTiming timing("Fixup classes", &logger);
-        const auto& class_table_section = image_header.GetClassTableSection();
+        ObjPtr<mirror::Class> class_class = [&]() NO_THREAD_SAFETY_ANALYSIS {
+          ObjPtr<mirror::ObjectArray<mirror::Object>> image_roots = app_image_objects.ToDest(
+              image_header->GetImageRoots<kWithoutReadBarrier>().Ptr());
+          int32_t class_roots_index = enum_cast<int32_t>(ImageHeader::kClassRoots);
+          DCHECK_LT(class_roots_index, image_roots->GetLength<kVerifyNone>());
+          ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots =
+              ObjPtr<mirror::ObjectArray<mirror::Class>>::DownCast(boot_image.ToDest(
+                  image_roots->GetWithoutChecks<kVerifyNone>(class_roots_index).Ptr()));
+          return GetClassRoot<mirror::Class, kWithoutReadBarrier>(class_roots);
+        }();
+        const auto& class_table_section = image_header->GetClassTableSection();
         if (class_table_section.Size() > 0u) {
           ScopedObjectAccess soa(Thread::Current());
           ClassTableVisitor class_table_visitor(forward_object);
@@ -1229,19 +1464,19 @@
           ClassTable::ClassSet temp_set(data, /*make_copy_of_data=*/ false, &read_count);
           for (ClassTable::TableSlot& slot : temp_set) {
             slot.VisitRoot(class_table_visitor);
-            mirror::Class* klass = slot.Read<kWithoutReadBarrier>();
-            if (!app_image_objects.InDest(klass)) {
+            ObjPtr<mirror::Class> klass = slot.Read<kWithoutReadBarrier>();
+            if (!app_image_objects.InDest(klass.Ptr())) {
               continue;
             }
-            const bool already_marked = visited_bitmap->Set(klass);
+            const bool already_marked = visited_bitmap.Set(klass.Ptr());
             CHECK(!already_marked) << "App image class already visited";
-            patch_object_visitor.VisitClass(klass);
+            patch_object_visitor.VisitClass(klass, class_class);
             // Then patch the non-embedded vtable and iftable.
             ObjPtr<mirror::PointerArray> vtable =
                 klass->GetVTable<kVerifyNone, kWithoutReadBarrier>();
             if (vtable != nullptr &&
                 app_image_objects.InDest(vtable.Ptr()) &&
-                !visited_bitmap->Set(vtable.Ptr())) {
+                !visited_bitmap.Set(vtable.Ptr())) {
               patch_object_visitor.VisitPointerArray(vtable);
             }
             ObjPtr<mirror::IfTable> iftable = klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>();
@@ -1256,7 +1491,7 @@
                   // The iftable has not been patched, so we need to explicitly adjust the pointer.
                   ObjPtr<mirror::PointerArray> ifarray = forward_object(unpatched_ifarray.Ptr());
                   if (app_image_objects.InDest(ifarray.Ptr()) &&
-                      !visited_bitmap->Set(ifarray.Ptr())) {
+                      !visited_bitmap.Set(ifarray.Ptr())) {
                     patch_object_visitor.VisitPointerArray(ifarray);
                   }
                 }
@@ -1273,16 +1508,17 @@
       // Need to update the image to be at the target base.
       uintptr_t objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset());
       uintptr_t objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End());
-      FixupObjectVisitor<ForwardObject> fixup_object_visitor(visited_bitmap.get(), forward_object);
+      FixupObjectVisitor<ForwardObject> fixup_object_visitor(&visited_bitmap, forward_object);
       bitmap->VisitMarkedRange(objects_begin, objects_end, fixup_object_visitor);
       // Fixup image roots.
       CHECK(app_image_objects.InSource(reinterpret_cast<uintptr_t>(
-          image_header.GetImageRoots<kWithoutReadBarrier>().Ptr())));
-      image_header.RelocateImageObjects(app_image_objects.Delta());
-      CHECK_EQ(image_header.GetImageBegin(), target_base);
+          image_header->GetImageRoots<kWithoutReadBarrier>().Ptr())));
+      image_header->RelocateImageReferences(app_image_objects.Delta());
+      image_header->RelocateBootImageReferences(boot_image.Delta());
+      CHECK_EQ(image_header->GetImageBegin(), target_base);
       // Fix up dex cache DexFile pointers.
       ObjPtr<mirror::ObjectArray<mirror::DexCache>> dex_caches =
-          image_header.GetImageRoot<kWithoutReadBarrier>(ImageHeader::kDexCaches)
+          image_header->GetImageRoot<kWithoutReadBarrier>(ImageHeader::kDexCaches)
               ->AsObjectArray<mirror::DexCache, kVerifyNone>();
       for (int32_t i = 0, count = dex_caches->GetLength(); i < count; ++i) {
         ObjPtr<mirror::DexCache> dex_cache = dex_caches->Get<kVerifyNone, kWithoutReadBarrier>(i);
@@ -1293,7 +1529,7 @@
     {
       // Only touches objects in the app image, no need for mutator lock.
       TimingLogger::ScopedTiming timing("Fixup methods", &logger);
-      image_header.VisitPackedArtMethods([&](ArtMethod& method) NO_THREAD_SAFETY_ANALYSIS {
+      image_header->VisitPackedArtMethods([&](ArtMethod& method) NO_THREAD_SAFETY_ANALYSIS {
         // TODO: Consider a separate visitor for runtime vs normal methods.
         if (UNLIKELY(method.IsRuntimeMethod())) {
           ImtConflictTable* table = method.GetImtConflictTable(kPointerSize);
@@ -1309,7 +1545,7 @@
             method.SetEntryPointFromQuickCompiledCodePtrSize(new_code, kPointerSize);
           }
         } else {
-          method.UpdateObjectsForImageRelocation(forward_object);
+          patch_object_visitor.PatchGcRoot(&method.DeclaringClassRoot());
           method.UpdateEntrypoints(forward_code, kPointerSize);
         }
       }, target_base, kPointerSize);
@@ -1318,22 +1554,21 @@
       {
         // Only touches objects in the app image, no need for mutator lock.
         TimingLogger::ScopedTiming timing("Fixup fields", &logger);
-        image_header.VisitPackedArtFields([&](ArtField& field) NO_THREAD_SAFETY_ANALYSIS {
-          field.UpdateObjects(forward_object);
+        image_header->VisitPackedArtFields([&](ArtField& field) NO_THREAD_SAFETY_ANALYSIS {
+          patch_object_visitor.template PatchGcRoot</*kMayBeNull=*/ false>(
+              &field.DeclaringClassRoot());
         }, target_base);
       }
       {
         TimingLogger::ScopedTiming timing("Fixup imt", &logger);
-        image_header.VisitPackedImTables(forward_metadata, target_base, kPointerSize);
+        image_header->VisitPackedImTables(forward_metadata, target_base, kPointerSize);
       }
       {
         TimingLogger::ScopedTiming timing("Fixup conflict tables", &logger);
-        image_header.VisitPackedImtConflictTables(forward_metadata, target_base, kPointerSize);
+        image_header->VisitPackedImtConflictTables(forward_metadata, target_base, kPointerSize);
       }
-      // In the app image case, the image methods are actually in the boot image.
-      image_header.RelocateImageMethods(boot_image.Delta());
       // Fix up the intern table.
-      const auto& intern_table_section = image_header.GetInternedStringsSection();
+      const auto& intern_table_section = image_header->GetInternedStringsSection();
       if (intern_table_section.Size() > 0u) {
         TimingLogger::ScopedTiming timing("Fixup intern table", &logger);
         ScopedObjectAccess soa(Thread::Current());
@@ -1357,6 +1592,899 @@
   }
 };
 
+static void AppendImageChecksum(uint32_t component_count,
+                                uint32_t checksum,
+                                /*inout*/std::string* checksums) {
+  static_assert(ImageSpace::kImageChecksumPrefix == 'i', "Format prefix check.");
+  StringAppendF(checksums, "i;%u/%08x", component_count, checksum);
+}
+
+static bool CheckAndRemoveImageChecksum(uint32_t component_count,
+                                        uint32_t checksum,
+                                        /*inout*/std::string_view* oat_checksums,
+                                        /*out*/std::string* error_msg) {
+  std::string image_checksum;
+  AppendImageChecksum(component_count, checksum, &image_checksum);
+  if (!StartsWith(*oat_checksums, image_checksum)) {
+    *error_msg = StringPrintf("Image checksum mismatch, expected %s to start with %s",
+                              std::string(*oat_checksums).c_str(),
+                              image_checksum.c_str());
+    return false;
+  }
+  oat_checksums->remove_prefix(image_checksum.size());
+  return true;
+}
+
+// Helper class to find the primary boot image and boot image extensions
+// and determine the boot image layout.
+class ImageSpace::BootImageLayout {
+ public:
+  // Description of a "chunk" of the boot image, i.e. either primary boot image
+  // or a boot image extension, used in conjunction with the boot class path to
+  // load boot image components.
+  struct ImageChunk {
+    std::string base_location;
+    std::string base_filename;
+    std::string profile_file;
+    size_t start_index;
+    uint32_t component_count;
+    uint32_t image_space_count;
+    uint32_t reservation_size;
+    uint32_t checksum;
+    uint32_t boot_image_component_count;
+    uint32_t boot_image_checksum;
+    uint32_t boot_image_size;
+
+    // The following file descriptors hold the memfd files for extensions compiled
+    // in memory and described by the above fields. We want to use them to mmap()
+    // the contents and then close them while treating the ImageChunk description
+    // as immutable (const), so make these fields explicitly mutable.
+    mutable android::base::unique_fd art_fd;
+    mutable android::base::unique_fd vdex_fd;
+    mutable android::base::unique_fd oat_fd;
+  };
+
+  BootImageLayout(const std::string& image_location,
+                  ArrayRef<const std::string> boot_class_path,
+                  ArrayRef<const std::string> boot_class_path_locations)
+     : image_location_(image_location),
+       boot_class_path_(boot_class_path),
+       boot_class_path_locations_(boot_class_path_locations) {}
+
+  std::string GetPrimaryImageLocation();
+
+  bool LoadFromSystem(InstructionSet image_isa, /*out*/std::string* error_msg) {
+    return LoadOrValidateFromSystem(image_isa, /*oat_checksums=*/ nullptr, error_msg);
+  }
+
+  bool ValidateFromSystem(InstructionSet image_isa,
+                          /*inout*/std::string_view* oat_checksums,
+                          /*out*/std::string* error_msg) {
+    DCHECK(oat_checksums != nullptr);
+    return LoadOrValidateFromSystem(image_isa, oat_checksums, error_msg);
+  }
+
+  bool LoadFromDalvikCache(const std::string& dalvik_cache, /*out*/std::string* error_msg) {
+    return LoadOrValidateFromDalvikCache(dalvik_cache, /*oat_checksums=*/ nullptr, error_msg);
+  }
+
+  bool ValidateFromDalvikCache(const std::string& dalvik_cache,
+                               /*inout*/std::string_view* oat_checksums,
+                               /*out*/std::string* error_msg) {
+    DCHECK(oat_checksums != nullptr);
+    return LoadOrValidateFromDalvikCache(dalvik_cache, oat_checksums, error_msg);
+  }
+
+  ArrayRef<const ImageChunk> GetChunks() const {
+    return ArrayRef<const ImageChunk>(chunks_);
+  }
+
+  uint32_t GetBaseAddress() const {
+    return base_address_;
+  }
+
+  size_t GetNextBcpIndex() const {
+    return next_bcp_index_;
+  }
+
+  size_t GetTotalComponentCount() const {
+    return total_component_count_;
+  }
+
+  size_t GetTotalReservationSize() const {
+    return total_reservation_size_;
+  }
+
+ private:
+  struct NamedComponentLocation {
+    std::string base_location;
+    size_t bcp_index;
+    std::string profile_filename;
+  };
+
+  std::string ExpandLocationImpl(const std::string& location,
+                                 size_t bcp_index,
+                                 bool boot_image_extension) {
+    std::vector<std::string> expanded = ExpandMultiImageLocations(
+        ArrayRef<const std::string>(boot_class_path_).SubArray(bcp_index, 1u),
+        location,
+        boot_image_extension);
+    DCHECK_EQ(expanded.size(), 1u);
+    return expanded[0];
+  }
+
+  std::string ExpandLocation(const std::string& location, size_t bcp_index) {
+    if (bcp_index == 0u) {
+      DCHECK_EQ(location, ExpandLocationImpl(location, bcp_index, /*boot_image_extension=*/ false));
+      return location;
+    } else {
+      return ExpandLocationImpl(location, bcp_index, /*boot_image_extension=*/ true);
+    }
+  }
+
+  std::string GetBcpComponentPath(size_t bcp_index) {
+    DCHECK_LE(bcp_index, boot_class_path_.size());
+    size_t bcp_slash_pos = boot_class_path_[bcp_index].rfind('/');
+    DCHECK_NE(bcp_slash_pos, std::string::npos);
+    return boot_class_path_[bcp_index].substr(0u, bcp_slash_pos + 1u);
+  }
+
+  bool VerifyImageLocation(const std::vector<std::string>& components,
+                           /*out*/size_t* named_components_count,
+                           /*out*/std::string* error_msg);
+
+  bool MatchNamedComponents(
+      ArrayRef<const std::string> named_components,
+      /*out*/std::vector<NamedComponentLocation>* named_component_locations,
+      /*out*/std::string* error_msg);
+
+  bool ValidateBootImageChecksum(const char* file_description,
+                                 const ImageHeader& header,
+                                 /*out*/std::string* error_msg);
+
+  bool ValidateHeader(const ImageHeader& header,
+                      size_t bcp_index,
+                      const char* file_description,
+                      /*out*/std::string* error_msg);
+
+  bool ReadHeader(const std::string& base_location,
+                  const std::string& base_filename,
+                  size_t bcp_index,
+                  /*out*/std::string* error_msg);
+
+  bool CompileExtension(const std::string& base_location,
+                        const std::string& base_filename,
+                        size_t bcp_index,
+                        const std::string& profile_filename,
+                        ArrayRef<std::string> dependencies,
+                        /*out*/std::string* error_msg);
+
+  bool CheckAndRemoveLastChunkChecksum(/*inout*/std::string_view* oat_checksums,
+                                       /*out*/std::string* error_msg);
+
+  template <typename FilenameFn>
+  bool LoadOrValidate(FilenameFn&& filename_fn,
+                      /*inout*/std::string_view* oat_checksums,
+                      /*out*/std::string* error_msg);
+
+  bool LoadOrValidateFromSystem(InstructionSet image_isa,
+                                /*inout*/std::string_view* oat_checksums,
+                                /*out*/std::string* error_msg);
+
+  bool LoadOrValidateFromDalvikCache(const std::string& dalvik_cache,
+                                     /*inout*/std::string_view* oat_checksums,
+                                     /*out*/std::string* error_msg);
+
+  const std::string& image_location_;
+  ArrayRef<const std::string> boot_class_path_;
+  ArrayRef<const std::string> boot_class_path_locations_;
+
+  std::vector<ImageChunk> chunks_;
+  uint32_t base_address_ = 0u;
+  size_t next_bcp_index_ = 0u;
+  size_t total_component_count_ = 0u;
+  size_t total_reservation_size_ = 0u;
+};
+
+std::string ImageSpace::BootImageLayout::GetPrimaryImageLocation() {
+  size_t location_start = 0u;
+  size_t location_end = image_location_.find(kComponentSeparator);
+  while (location_end == location_start) {
+    ++location_start;
+    location_end = image_location_.find(location_start, kComponentSeparator);
+  }
+  std::string location = (location_end == std::string::npos)
+      ? image_location_.substr(location_start)
+      : image_location_.substr(location_start, location_end - location_start);
+  if (location.find('/') == std::string::npos) {
+    // No path, so use the path from the first boot class path component.
+    size_t slash_pos = boot_class_path_.empty()
+        ? std::string::npos
+        : boot_class_path_[0].rfind('/');
+    if (slash_pos == std::string::npos) {
+      return std::string();
+    }
+    location.insert(0u, boot_class_path_[0].substr(0u, slash_pos + 1u));
+  }
+  return location;
+}
+
+bool ImageSpace::BootImageLayout::VerifyImageLocation(
+    const std::vector<std::string>& components,
+    /*out*/size_t* named_components_count,
+    /*out*/std::string* error_msg) {
+  DCHECK(named_components_count != nullptr);
+
+  // Validate boot class path. Require a path and non-empty name in each component.
+  for (const std::string& bcp_component : boot_class_path_) {
+    size_t bcp_slash_pos = bcp_component.rfind('/');
+    if (bcp_slash_pos == std::string::npos || bcp_slash_pos == bcp_component.size() - 1u) {
+      *error_msg = StringPrintf("Invalid boot class path component: %s", bcp_component.c_str());
+      return false;
+    }
+  }
+
+  // Validate the format of image location components.
+  size_t components_size = components.size();
+  if (components_size == 0u) {
+    *error_msg = "Empty image location.";
+    return false;
+  }
+  size_t wildcards_start = components_size;  // No wildcards.
+  for (size_t i = 0; i != components_size; ++i) {
+    const std::string& component = components[i];
+    DCHECK(!component.empty());  // Guaranteed by Split().
+    const size_t profile_separator_pos = component.find(kProfileSeparator);
+    size_t wildcard_pos = component.find('*');
+    if (wildcard_pos == std::string::npos) {
+      if (wildcards_start != components.size()) {
+        *error_msg =
+            StringPrintf("Image component without wildcard after component with wildcard: %s",
+                         component.c_str());
+        return false;
+      }
+      if (profile_separator_pos != std::string::npos) {
+        if (component.find(kProfileSeparator, profile_separator_pos + 1u) != std::string::npos) {
+          *error_msg = StringPrintf("Multiple profile delimiters in %s", component.c_str());
+          return false;
+        }
+        if (profile_separator_pos == 0u || profile_separator_pos + 1u == component.size()) {
+          *error_msg = StringPrintf("Missing component and/or profile name in %s",
+                                    component.c_str());
+          return false;
+        }
+        if (component.back() == '/') {
+          *error_msg = StringPrintf("Profile name ends with path separator: %s",
+                                    component.c_str());
+          return false;
+        }
+      }
+      size_t component_name_length =
+          profile_separator_pos != std::string::npos ? profile_separator_pos : component.size();
+      if (component[component_name_length - 1u] == '/') {
+        *error_msg = StringPrintf("Image component ends with path separator: %s",
+                                  component.c_str());
+        return false;
+      }
+    } else {
+      if (profile_separator_pos != std::string::npos) {
+        *error_msg = StringPrintf("Unsupproted wildcard (*) and profile delimiter (!) in %s",
+                                  component.c_str());
+        return false;
+      }
+      if (wildcards_start == components_size) {
+        wildcards_start = i;
+      }
+      // Wildcard must be the last character.
+      if (wildcard_pos != component.size() - 1u) {
+        *error_msg = StringPrintf("Unsupported wildcard (*) position in %s", component.c_str());
+        return false;
+      }
+      // And it must be either plain wildcard or preceded by a path separator.
+      if (component.size() != 1u && component[wildcard_pos - 1u] != '/') {
+        *error_msg = StringPrintf("Non-plain wildcard (*) not preceded by path separator '/': %s",
+                                  component.c_str());
+        return false;
+      }
+      if (i == 0) {
+        *error_msg = StringPrintf("Primary component contains wildcard (*): %s", component.c_str());
+        return false;
+      }
+    }
+  }
+
+  *named_components_count = wildcards_start;
+  return true;
+}
+
+bool ImageSpace::BootImageLayout::MatchNamedComponents(
+    ArrayRef<const std::string> named_components,
+    /*out*/std::vector<NamedComponentLocation>* named_component_locations,
+    /*out*/std::string* error_msg) {
+  DCHECK(!named_components.empty());
+  DCHECK(named_component_locations->empty());
+  named_component_locations->reserve(named_components.size());
+  size_t bcp_component_count = boot_class_path_.size();
+  size_t bcp_pos = 0;
+  std::string base_name;
+  for (size_t i = 0, size = named_components.size(); i != size; ++i) {
+    std::string component = named_components[i];
+    std::string profile_filename;  // Empty.
+    const size_t profile_separator_pos = component.find(kProfileSeparator);
+    if (profile_separator_pos != std::string::npos) {
+      profile_filename = component.substr(profile_separator_pos + 1u);
+      DCHECK(!profile_filename.empty());  // Checked by VerifyImageLocation()
+      component.resize(profile_separator_pos);
+      DCHECK(!component.empty());  // Checked by VerifyImageLocation()
+    }
+    size_t slash_pos = component.rfind('/');
+    std::string base_location;
+    if (i == 0u) {
+      // The primary boot image name is taken as provided. It forms the base
+      // for expanding the extension filenames.
+      if (slash_pos != std::string::npos) {
+        base_name = component.substr(slash_pos + 1u);
+        base_location = component;
+      } else {
+        base_name = component;
+        base_location = GetBcpComponentPath(0u) + component;
+      }
+    } else {
+      std::string to_match;
+      if (slash_pos != std::string::npos) {
+        // If we have the full path, we just need to match the filename to the BCP component.
+        base_location = component.substr(0u, slash_pos + 1u) + base_name;
+        to_match = component;
+      }
+      while (true) {
+        if (slash_pos == std::string::npos) {
+          // If we do not have a full path, we need to update the path based on the BCP location.
+          std::string path = GetBcpComponentPath(bcp_pos);
+          to_match = path + component;
+          base_location = path + base_name;
+        }
+        if (ExpandLocation(base_location, bcp_pos) == to_match) {
+          break;
+        }
+        ++bcp_pos;
+        if (bcp_pos == bcp_component_count) {
+          *error_msg = StringPrintf("Image component %s does not match a boot class path component",
+                                    component.c_str());
+          return false;
+        }
+      }
+    }
+    if (!profile_filename.empty() && profile_filename.find('/') == std::string::npos) {
+      profile_filename.insert(/*pos*/ 0u, GetBcpComponentPath(bcp_pos));
+    }
+    NamedComponentLocation location;
+    location.base_location = base_location;
+    location.bcp_index = bcp_pos;
+    location.profile_filename = profile_filename;
+    named_component_locations->push_back(location);
+    ++bcp_pos;
+  }
+  return true;
+}
+
+bool ImageSpace::BootImageLayout::ValidateBootImageChecksum(const char* file_description,
+                                                            const ImageHeader& header,
+                                                            /*out*/std::string* error_msg) {
+  uint32_t boot_image_component_count = header.GetBootImageComponentCount();
+  if (chunks_.empty() != (boot_image_component_count == 0u)) {
+    *error_msg = StringPrintf("Unexpected boot image component count in %s: %u, %s",
+                              file_description,
+                              boot_image_component_count,
+                              chunks_.empty() ? "should be 0" : "should not be 0");
+    return false;
+  }
+  uint32_t component_count = 0u;
+  uint32_t composite_checksum = 0u;
+  uint64_t boot_image_size = 0u;
+  for (const ImageChunk& chunk : chunks_) {
+    if (component_count == boot_image_component_count) {
+      break;  // Hit the component count.
+    }
+    if (chunk.start_index != component_count) {
+      break;  // End of contiguous chunks, fail below; same as reaching end of `chunks_`.
+    }
+    if (chunk.component_count > boot_image_component_count - component_count) {
+      *error_msg = StringPrintf("Boot image component count in %s ends in the middle of a chunk, "
+                                    "%u is between %u and %u",
+                                file_description,
+                                boot_image_component_count,
+                                component_count,
+                                component_count + chunk.component_count);
+      return false;
+    }
+    component_count += chunk.component_count;
+    composite_checksum ^= chunk.checksum;
+    boot_image_size += chunk.reservation_size;
+  }
+  DCHECK_LE(component_count, boot_image_component_count);
+  if (component_count != boot_image_component_count) {
+    *error_msg = StringPrintf("Missing boot image components for checksum in %s: %u > %u",
+                              file_description,
+                              boot_image_component_count,
+                              component_count);
+    return false;
+  }
+  if (composite_checksum != header.GetBootImageChecksum()) {
+    *error_msg = StringPrintf("Boot image checksum mismatch in %s: 0x%08x != 0x%08x",
+                              file_description,
+                              header.GetBootImageChecksum(),
+                              composite_checksum);
+    return false;
+  }
+  if (boot_image_size != header.GetBootImageSize()) {
+    *error_msg = StringPrintf("Boot image size mismatch in %s: 0x%08x != 0x%08" PRIx64,
+                              file_description,
+                              header.GetBootImageSize(),
+                              boot_image_size);
+    return false;
+  }
+  return true;
+}
+
+bool ImageSpace::BootImageLayout::ValidateHeader(const ImageHeader& header,
+                                                 size_t bcp_index,
+                                                 const char* file_description,
+                                                 /*out*/std::string* error_msg) {
+  size_t bcp_component_count = boot_class_path_.size();
+  DCHECK_LT(bcp_index, bcp_component_count);
+  size_t allowed_component_count = bcp_component_count - bcp_index;
+  DCHECK_LE(total_reservation_size_, kMaxTotalImageReservationSize);
+  size_t allowed_reservation_size = kMaxTotalImageReservationSize - total_reservation_size_;
+
+  if (header.GetComponentCount() == 0u ||
+      header.GetComponentCount() > allowed_component_count) {
+    *error_msg = StringPrintf("Unexpected component count in %s, received %u, "
+                                  "expected non-zero and <= %zu",
+                              file_description,
+                              header.GetComponentCount(),
+                              allowed_component_count);
+    return false;
+  }
+  if (header.GetImageReservationSize() > allowed_reservation_size) {
+    *error_msg = StringPrintf("Reservation size too big in %s: %u > %zu",
+                              file_description,
+                              header.GetImageReservationSize(),
+                              allowed_reservation_size);
+    return false;
+  }
+  if (!ValidateBootImageChecksum(file_description, header, error_msg)) {
+    return false;
+  }
+
+  return true;
+}
+
+bool ImageSpace::BootImageLayout::ReadHeader(const std::string& base_location,
+                                             const std::string& base_filename,
+                                             size_t bcp_index,
+                                             /*out*/std::string* error_msg) {
+  DCHECK_LE(next_bcp_index_, bcp_index);
+  DCHECK_LT(bcp_index, boot_class_path_.size());
+
+  std::string actual_filename = ExpandLocation(base_filename, bcp_index);
+  ImageHeader header;
+  if (!ReadSpecificImageHeader(actual_filename.c_str(), &header, error_msg)) {
+    return false;
+  }
+  const char* file_description = actual_filename.c_str();
+  if (!ValidateHeader(header, bcp_index, file_description, error_msg)) {
+    return false;
+  }
+
+  if (chunks_.empty()) {
+    base_address_ = reinterpret_cast32<uint32_t>(header.GetImageBegin());
+  }
+  ImageChunk chunk;
+  chunk.base_location = base_location;
+  chunk.base_filename = base_filename;
+  chunk.start_index = bcp_index;
+  chunk.component_count = header.GetComponentCount();
+  chunk.image_space_count = header.GetImageSpaceCount();
+  chunk.reservation_size = header.GetImageReservationSize();
+  chunk.checksum = header.GetImageChecksum();
+  chunk.boot_image_component_count = header.GetBootImageComponentCount();
+  chunk.boot_image_checksum = header.GetBootImageChecksum();
+  chunk.boot_image_size = header.GetBootImageSize();
+  chunks_.push_back(std::move(chunk));
+  next_bcp_index_ = bcp_index + header.GetComponentCount();
+  total_component_count_ += header.GetComponentCount();
+  total_reservation_size_ += header.GetImageReservationSize();
+  return true;
+}
+
+bool ImageSpace::BootImageLayout::CompileExtension(const std::string& base_location,
+                                                   const std::string& base_filename,
+                                                   size_t bcp_index,
+                                                   const std::string& profile_filename,
+                                                   ArrayRef<std::string> dependencies,
+                                                   /*out*/std::string* error_msg) {
+  DCHECK_LE(total_component_count_, next_bcp_index_);
+  DCHECK_LE(next_bcp_index_, bcp_index);
+  size_t bcp_component_count = boot_class_path_.size();
+  DCHECK_LT(bcp_index, bcp_component_count);
+  DCHECK(!profile_filename.empty());
+  if (total_component_count_ != bcp_index) {
+    // We require all previous BCP components to have a boot image space (primary or extension).
+    *error_msg = "Cannot compile extension because of missing dependencies.";
+    return false;
+  }
+  Runtime* runtime = Runtime::Current();
+  if (!runtime->IsImageDex2OatEnabled()) {
+    *error_msg = "Cannot compile extension because dex2oat for image compilation is disabled.";
+    return false;
+  }
+
+  // Check dependencies.
+  DCHECK(!dependencies.empty());
+  size_t dependency_component_count = 0;
+  for (size_t i = 0, size = dependencies.size(); i != size; ++i) {
+    if (chunks_.size() == i || chunks_[i].start_index != dependency_component_count) {
+      *error_msg = StringPrintf("Missing extension dependency \"%s\"", dependencies[i].c_str());
+      return false;
+    }
+    dependency_component_count += chunks_[i].component_count;
+  }
+
+  // Collect locations from the profile.
+  std::set<std::string> dex_locations;
+  {
+    std::unique_ptr<File> profile_file(OS::OpenFileForReading(profile_filename.c_str()));
+    if (profile_file == nullptr) {
+      *error_msg = StringPrintf("Failed to open profile file \"%s\" for reading, error: %s",
+                                profile_filename.c_str(),
+                                strerror(errno));
+      return false;
+    }
+
+    // TODO: Rewrite ProfileCompilationInfo to provide a better interface and
+    // to store the dex locations in uncompressed section of the file.
+    auto collect_fn = [&dex_locations](const std::string& dex_location,
+                                       uint32_t checksum ATTRIBUTE_UNUSED) {
+      dex_locations.insert(dex_location);  // Just collect locations.
+      return false;                        // Do not read the profile data.
+    };
+    ProfileCompilationInfo info(/*for_boot_image=*/ true);
+    if (!info.Load(profile_file->Fd(), /*merge_classes=*/ true, collect_fn)) {
+      *error_msg = StringPrintf("Failed to scan profile from %s", profile_filename.c_str());
+      return false;
+    }
+  }
+
+  // Match boot class path components to locations from profile.
+  // Note that the profile records only filenames without paths.
+  size_t bcp_end = bcp_index;
+  for (; bcp_end != bcp_component_count; ++bcp_end) {
+    const std::string& bcp_component = boot_class_path_locations_[bcp_end];
+    size_t slash_pos = bcp_component.rfind('/');
+    DCHECK_NE(slash_pos, std::string::npos);
+    std::string bcp_component_name = bcp_component.substr(slash_pos + 1u);
+    if (dex_locations.count(bcp_component_name) == 0u) {
+      break;  // Did not find the current location in dex file.
+    }
+  }
+
+  if (bcp_end == bcp_index) {
+    // No data for the first (requested) component.
+    *error_msg = StringPrintf("The profile does not contain data for %s",
+                              boot_class_path_locations_[bcp_index].c_str());
+    return false;
+  }
+
+  // Create in-memory files.
+  std::string art_filename = ExpandLocation(base_filename, bcp_index);
+  std::string vdex_filename = ImageHeader::GetVdexLocationFromImageLocation(art_filename);
+  std::string oat_filename = ImageHeader::GetOatLocationFromImageLocation(art_filename);
+  android::base::unique_fd art_fd(memfd_create_compat(art_filename.c_str(), /*flags=*/ 0));
+  android::base::unique_fd vdex_fd(memfd_create_compat(vdex_filename.c_str(), /*flags=*/ 0));
+  android::base::unique_fd oat_fd(memfd_create_compat(oat_filename.c_str(), /*flags=*/ 0));
+  if (art_fd.get() == -1 || vdex_fd.get() == -1 || oat_fd.get() == -1) {
+    *error_msg = StringPrintf("Failed to create memfd handles for compiling extension for %s",
+                              boot_class_path_locations_[bcp_index].c_str());
+    return false;
+  }
+
+  // Construct the dex2oat command line.
+  std::string dex2oat = runtime->GetCompilerExecutable();
+  ArrayRef<const std::string> head_bcp =
+      boot_class_path_.SubArray(/*pos=*/ 0u, /*length=*/ dependency_component_count);
+  ArrayRef<const std::string> head_bcp_locations =
+      boot_class_path_locations_.SubArray(/*pos=*/ 0u, /*length=*/ dependency_component_count);
+  ArrayRef<const std::string> extension_bcp =
+      boot_class_path_.SubArray(/*pos=*/ bcp_index, /*length=*/ bcp_end - bcp_index);
+  ArrayRef<const std::string> extension_bcp_locations =
+      boot_class_path_locations_.SubArray(/*pos=*/ bcp_index, /*length=*/ bcp_end - bcp_index);
+  std::string boot_class_path = Join(head_bcp, ':') + ':' + Join(extension_bcp, ':');
+  std::string boot_class_path_locations =
+      Join(head_bcp_locations, ':') + ':' + Join(extension_bcp_locations, ':');
+
+  std::vector<std::string> args;
+  args.push_back(dex2oat);
+  args.push_back("--runtime-arg");
+  args.push_back("-Xbootclasspath:" + boot_class_path);
+  args.push_back("--runtime-arg");
+  args.push_back("-Xbootclasspath-locations:" + boot_class_path_locations);
+  args.push_back("--boot-image=" + Join(dependencies, kComponentSeparator));
+  for (size_t i = bcp_index; i != bcp_end; ++i) {
+    args.push_back("--dex-file=" + boot_class_path_[i]);
+    args.push_back("--dex-location=" + boot_class_path_locations_[i]);
+  }
+  args.push_back("--image-fd=" + std::to_string(art_fd.get()));
+  args.push_back("--output-vdex-fd=" + std::to_string(vdex_fd.get()));
+  args.push_back("--oat-fd=" + std::to_string(oat_fd.get()));
+  args.push_back("--oat-location=" + ImageHeader::GetOatLocationFromImageLocation(base_filename));
+  args.push_back("--single-image");
+  args.push_back("--image-format=uncompressed");
+
+  // We currently cannot guarantee that the boot class path has no verification failures.
+  // And we do not want to compile anything, compilation should be done by JIT in zygote.
+  args.push_back("--compiler-filter=verify");
+
+  // Pass the profile.
+  args.push_back("--profile-file=" + profile_filename);
+
+  // Do not let the file descriptor numbers change the compilation output.
+  args.push_back("--avoid-storing-invocation");
+
+  runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(&args);
+
+  if (!kIsTargetBuild) {
+    args.push_back("--host");
+  }
+
+  // Image compiler options go last to allow overriding above args, such as --compiler-filter.
+  for (const std::string& compiler_option : runtime->GetImageCompilerOptions()) {
+    args.push_back(compiler_option);
+  }
+
+  // Compile the extension.
+  VLOG(image) << "Compiling boot image extension for " << (bcp_end - bcp_index)
+              << " components, starting from " << boot_class_path_locations_[bcp_index];
+  if (!Exec(args, error_msg)) {
+    return false;
+  }
+
+  // Read and validate the image header.
+  ImageHeader header;
+  {
+    File image_file(art_fd.release(), /*check_usage=*/ false);
+    if (!ReadSpecificImageHeader(&image_file, "compiled image file", &header, error_msg)) {
+      return false;
+    }
+    art_fd.reset(image_file.Release());
+  }
+  const char* file_description = "compiled image file";
+  if (!ValidateHeader(header, bcp_index, file_description, error_msg)) {
+    return false;
+  }
+
+  DCHECK(!chunks_.empty());
+  ImageChunk chunk;
+  chunk.base_location = base_location;
+  chunk.base_filename = base_filename;
+  chunk.profile_file = profile_filename;
+  chunk.start_index = bcp_index;
+  chunk.component_count = header.GetComponentCount();
+  chunk.image_space_count = header.GetImageSpaceCount();
+  chunk.reservation_size = header.GetImageReservationSize();
+  chunk.checksum = header.GetImageChecksum();
+  chunk.boot_image_component_count = header.GetBootImageComponentCount();
+  chunk.boot_image_checksum = header.GetBootImageChecksum();
+  chunk.boot_image_size = header.GetBootImageSize();
+  chunk.art_fd.reset(art_fd.release());
+  chunk.vdex_fd.reset(vdex_fd.release());
+  chunk.oat_fd.reset(oat_fd.release());
+  chunks_.push_back(std::move(chunk));
+  next_bcp_index_ = bcp_index + header.GetComponentCount();
+  total_component_count_ += header.GetComponentCount();
+  total_reservation_size_ += header.GetImageReservationSize();
+  return true;
+}
+
+bool ImageSpace::BootImageLayout::CheckAndRemoveLastChunkChecksum(
+    /*inout*/std::string_view* oat_checksums,
+    /*out*/std::string* error_msg) {
+  DCHECK(oat_checksums != nullptr);
+  DCHECK(!chunks_.empty());
+  const ImageChunk& chunk = chunks_.back();
+  size_t component_count = chunk.component_count;
+  size_t checksum = chunk.checksum;
+  if (!CheckAndRemoveImageChecksum(component_count, checksum, oat_checksums, error_msg)) {
+    DCHECK(!error_msg->empty());
+    return false;
+  }
+  if (oat_checksums->empty()) {
+    if (next_bcp_index_ != boot_class_path_.size()) {
+      *error_msg = StringPrintf("Checksum too short, missing %zu components.",
+                                boot_class_path_.size() - next_bcp_index_);
+      return false;
+    }
+    return true;
+  }
+  if (!StartsWith(*oat_checksums, ":")) {
+    *error_msg = StringPrintf("Missing ':' separator at start of %s",
+                              std::string(*oat_checksums).c_str());
+    return false;
+  }
+  oat_checksums->remove_prefix(1u);
+  if (oat_checksums->empty()) {
+    *error_msg = "Missing checksums after the ':' separator.";
+    return false;
+  }
+  return true;
+}
+
+template <typename FilenameFn>
+bool ImageSpace::BootImageLayout::LoadOrValidate(FilenameFn&& filename_fn,
+                                                 /*inout*/std::string_view* oat_checksums,
+                                                 /*out*/std::string* error_msg) {
+  DCHECK(GetChunks().empty());
+  DCHECK_EQ(GetBaseAddress(), 0u);
+  bool validate = (oat_checksums != nullptr);
+  static_assert(ImageSpace::kImageChecksumPrefix == 'i', "Format prefix check.");
+  DCHECK(!validate || StartsWith(*oat_checksums, "i"));
+
+  std::vector<std::string> components;
+  Split(image_location_, kComponentSeparator, &components);
+  size_t named_components_count = 0u;
+  if (!VerifyImageLocation(components, &named_components_count, error_msg)) {
+    return false;
+  }
+
+  ArrayRef<const std::string> named_components =
+      ArrayRef<const std::string>(components).SubArray(/*pos=*/ 0u, named_components_count);
+
+  std::vector<NamedComponentLocation> named_component_locations;
+  if (!MatchNamedComponents(named_components, &named_component_locations, error_msg)) {
+    return false;
+  }
+
+  // Load the image headers of named components.
+  DCHECK_EQ(named_component_locations.size(), named_components.size());
+  const size_t bcp_component_count = boot_class_path_.size();
+  size_t bcp_pos = 0u;
+  ArrayRef<std::string> extension_dependencies;
+  for (size_t i = 0, size = named_components.size(); i != size; ++i) {
+    const std::string& base_location = named_component_locations[i].base_location;
+    size_t bcp_index = named_component_locations[i].bcp_index;
+    const std::string& profile_filename = named_component_locations[i].profile_filename;
+    if (extension_dependencies.empty() && !profile_filename.empty()) {
+      // Each extension is compiled against the same dependencies, namely the leading
+      // named components that were specified without providing the profile filename.
+      extension_dependencies =
+          ArrayRef<std::string>(components).SubArray(/*pos=*/ 0, /*length=*/ i);
+    }
+    if (bcp_index < bcp_pos) {
+      DCHECK_NE(i, 0u);
+      LOG(ERROR) << "Named image component already covered by previous image: " << base_location;
+      continue;
+    }
+    if (validate && bcp_index > bcp_pos) {
+      *error_msg = StringPrintf("End of contiguous boot class path images, remaining checksum: %s",
+                                std::string(*oat_checksums).c_str());
+      return false;
+    }
+    std::string local_error_msg;
+    std::string* err_msg = (i == 0 || validate) ? error_msg : &local_error_msg;
+    std::string base_filename;
+    if (!filename_fn(base_location, &base_filename, err_msg) ||
+        !ReadHeader(base_location, base_filename, bcp_index, err_msg)) {
+      if (i == 0u || validate) {
+        return false;
+      }
+      VLOG(image) << "Error reading named image component header for " << base_location
+                  << ", error: " << local_error_msg;
+      if (profile_filename.empty() ||
+          !CompileExtension(base_location,
+                            base_filename,
+                            bcp_index,
+                            profile_filename,
+                            extension_dependencies,
+                            &local_error_msg)) {
+        if (!profile_filename.empty()) {
+          VLOG(image) << "Error compiling extension for " << boot_class_path_[bcp_index]
+                      << " error: " << local_error_msg;
+        }
+        bcp_pos = bcp_index + 1u;  // Skip at least this component.
+        DCHECK_GT(bcp_pos, GetNextBcpIndex());
+        continue;
+      }
+    }
+    if (validate) {
+      if (!CheckAndRemoveLastChunkChecksum(oat_checksums, error_msg)) {
+        return false;
+      }
+      if (oat_checksums->empty() || !StartsWith(*oat_checksums, "i")) {
+        return true;  // Let the caller deal with the dex file checksums if any.
+      }
+    }
+    bcp_pos = GetNextBcpIndex();
+  }
+
+  // Look for remaining components if there are any wildcard specifications.
+  ArrayRef<const std::string> search_paths =
+      ArrayRef<const std::string>(components).SubArray(/*pos=*/ named_components_count);
+  if (!search_paths.empty()) {
+    const std::string& primary_base_location = named_component_locations[0].base_location;
+    size_t base_slash_pos = primary_base_location.rfind('/');
+    DCHECK_NE(base_slash_pos, std::string::npos);
+    std::string base_name = primary_base_location.substr(base_slash_pos + 1u);
+    DCHECK(!base_name.empty());
+    while (bcp_pos != bcp_component_count) {
+      const std::string& bcp_component =  boot_class_path_[bcp_pos];
+      bool found = false;
+      for (const std::string& path : search_paths) {
+        std::string base_location;
+        if (path.size() == 1u) {
+          DCHECK_EQ(path, "*");
+          size_t slash_pos = bcp_component.rfind('/');
+          DCHECK_NE(slash_pos, std::string::npos);
+          base_location = bcp_component.substr(0u, slash_pos + 1u) + base_name;
+        } else {
+          DCHECK(EndsWith(path, "/*"));
+          base_location = path.substr(0u, path.size() - 1u) + base_name;
+        }
+        std::string err_msg;  // Ignored.
+        std::string base_filename;
+        if (filename_fn(base_location, &base_filename, &err_msg) &&
+            ReadHeader(base_location, base_filename, bcp_pos, &err_msg)) {
+          VLOG(image) << "Found image extension for " << ExpandLocation(base_location, bcp_pos);
+          bcp_pos = GetNextBcpIndex();
+          found = true;
+          if (validate) {
+            if (!CheckAndRemoveLastChunkChecksum(oat_checksums, error_msg)) {
+              return false;
+            }
+            if (oat_checksums->empty() || !StartsWith(*oat_checksums, "i")) {
+              return true;  // Let the caller deal with the dex file checksums if any.
+            }
+          }
+          break;
+        }
+      }
+      if (!found) {
+        if (validate) {
+          *error_msg = StringPrintf("Missing extension for %s, remaining checksum: %s",
+                                    bcp_component.c_str(),
+                                    std::string(*oat_checksums).c_str());
+          return false;
+        }
+        ++bcp_pos;
+      }
+    }
+  }
+
+  return true;
+}
+
+bool ImageSpace::BootImageLayout::LoadOrValidateFromSystem(InstructionSet image_isa,
+                                                           /*inout*/std::string_view* oat_checksums,
+                                                           /*out*/std::string* error_msg) {
+  auto filename_fn = [image_isa](const std::string& location,
+                                 /*out*/std::string* filename,
+                                 /*out*/std::string* err_msg ATTRIBUTE_UNUSED) {
+    *filename = GetSystemImageFilename(location.c_str(), image_isa);
+    return true;
+  };
+  return LoadOrValidate(filename_fn, oat_checksums, error_msg);
+}
+
+bool ImageSpace::BootImageLayout::LoadOrValidateFromDalvikCache(
+    const std::string& dalvik_cache,
+    /*inout*/std::string_view* oat_checksums,
+    /*out*/std::string* error_msg) {
+  auto filename_fn = [&dalvik_cache](const std::string& location,
+                                     /*out*/std::string* filename,
+                                     /*out*/std::string* err_msg) {
+    return GetDalvikCacheFilename(location.c_str(), dalvik_cache.c_str(), filename, err_msg);
+  };
+  return LoadOrValidate(filename_fn, oat_checksums, error_msg);
+}
+
 class ImageSpace::BootImageLoader {
  public:
   BootImageLoader(const std::vector<std::string>& boot_class_path,
@@ -1384,8 +2512,10 @@
   bool IsZygote() const { return is_zygote_; }
 
   void FindImageFiles() {
+    BootImageLayout layout(image_location_, boot_class_path_, boot_class_path_locations_);
+    std::string image_location = layout.GetPrimaryImageLocation();
     std::string system_filename;
-    bool found_image = FindImageFilenameImpl(image_location_.c_str(),
+    bool found_image = FindImageFilenameImpl(image_location.c_str(),
                                              image_isa_,
                                              &has_system_,
                                              &system_filename,
@@ -1414,149 +2544,207 @@
 
   bool LoadFromSystem(bool validate_oat_file,
                       size_t extra_reservation_size,
-                      /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
+                      /*out*/std::vector<std::unique_ptr<ImageSpace>>* boot_image_spaces,
                       /*out*/MemMap* extra_reservation,
-                      /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
-    TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
-    std::string filename = GetSystemImageFilename(image_location_.c_str(), image_isa_);
-
-    if (!LoadFromFile(filename,
-                      validate_oat_file,
-                      extra_reservation_size,
-                      &logger,
-                      boot_image_spaces,
-                      extra_reservation,
-                      error_msg)) {
-      return false;
-    }
-
-    if (VLOG_IS_ON(image)) {
-      LOG(INFO) << "ImageSpace::BootImageLoader::LoadFromSystem exiting "
-          << boot_image_spaces->front();
-      logger.Dump(LOG_STREAM(INFO));
-    }
-    return true;
-  }
+                      /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool LoadFromDalvikCache(
       bool validate_oat_file,
       size_t extra_reservation_size,
-      /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
+      /*out*/std::vector<std::unique_ptr<ImageSpace>>* boot_image_spaces,
       /*out*/MemMap* extra_reservation,
-      /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
-    TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
-    DCHECK(DalvikCacheExists());
-
-    if (!LoadFromFile(cache_filename_,
-                      validate_oat_file,
-                      extra_reservation_size,
-                      &logger,
-                      boot_image_spaces,
-                      extra_reservation,
-                      error_msg)) {
-      return false;
-    }
-
-    if (VLOG_IS_ON(image)) {
-      LOG(INFO) << "ImageSpace::BootImageLoader::LoadFromDalvikCache exiting "
-          << boot_image_spaces->front();
-      logger.Dump(LOG_STREAM(INFO));
-    }
-    return true;
-  }
+      /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
-  bool LoadFromFile(
-      const std::string& filename,
+  bool LoadImage(
+      const BootImageLayout& layout,
       bool validate_oat_file,
       size_t extra_reservation_size,
       TimingLogger* logger,
-      /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
+      /*out*/std::vector<std::unique_ptr<ImageSpace>>* boot_image_spaces,
       /*out*/MemMap* extra_reservation,
       /*out*/std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_) {
-    ImageHeader system_hdr;
-    if (!ReadSpecificImageHeader(filename.c_str(), &system_hdr)) {
-      *error_msg = StringPrintf("Cannot read header of %s", filename.c_str());
-      return false;
-    }
-    if (system_hdr.GetComponentCount() == 0u ||
-        system_hdr.GetComponentCount() > boot_class_path_.size()) {
-      *error_msg = StringPrintf("Unexpected component count in %s, received %u, "
-                                    "expected non-zero and <= %zu",
-                                filename.c_str(),
-                                system_hdr.GetComponentCount(),
-                                boot_class_path_.size());
-      return false;
-    }
-    MemMap image_reservation;
-    MemMap local_extra_reservation;
-    if (!ReserveBootImageMemory(system_hdr.GetImageReservationSize(),
-                                reinterpret_cast32<uint32_t>(system_hdr.GetImageBegin()),
-                                extra_reservation_size,
-                                &image_reservation,
-                                &local_extra_reservation,
-                                error_msg)) {
+    ArrayRef<const BootImageLayout::ImageChunk> chunks = layout.GetChunks();
+    DCHECK(!chunks.empty());
+    const uint32_t base_address = layout.GetBaseAddress();
+    const size_t image_component_count = layout.GetTotalComponentCount();
+    const size_t image_reservation_size = layout.GetTotalReservationSize();
+
+    DCHECK_LE(image_reservation_size, kMaxTotalImageReservationSize);
+    static_assert(kMaxTotalImageReservationSize < std::numeric_limits<uint32_t>::max());
+    if (extra_reservation_size > std::numeric_limits<uint32_t>::max() - image_reservation_size) {
+      // Since the `image_reservation_size` is limited to kMaxTotalImageReservationSize,
+      // the `extra_reservation_size` would have to be really excessive to fail this check.
+      *error_msg = StringPrintf("Excessive extra reservation size: %zu", extra_reservation_size);
       return false;
     }
 
-    ArrayRef<const std::string> provided_locations(boot_class_path_locations_.data(),
-                                                   system_hdr.GetComponentCount());
-    std::vector<std::string> locations =
-        ExpandMultiImageLocations(provided_locations, image_location_);
-    std::vector<std::string> filenames =
-        ExpandMultiImageLocations(provided_locations, filename);
-    DCHECK_EQ(locations.size(), filenames.size());
+    // Reserve address space. If relocating, choose a random address for ALSR.
+    uint8_t* addr = reinterpret_cast<uint8_t*>(
+        relocate_ ? ART_BASE_ADDRESS + ChooseRelocationOffsetDelta() : base_address);
+    MemMap image_reservation =
+        ReserveBootImageMemory(addr, image_reservation_size + extra_reservation_size, error_msg);
+    if (!image_reservation.IsValid()) {
+      return false;
+    }
+
+    // Load components.
     std::vector<std::unique_ptr<ImageSpace>> spaces;
-    spaces.reserve(locations.size());
-    for (std::size_t i = 0u, size = locations.size(); i != size; ++i) {
-      spaces.push_back(Load(locations[i], filenames[i], logger, &image_reservation, error_msg));
-      const ImageSpace* space = spaces.back().get();
-      if (space == nullptr) {
-        return false;
+    spaces.reserve(image_component_count);
+    size_t max_image_space_dependencies = 0u;
+    for (size_t i = 0, num_chunks = chunks.size(); i != num_chunks; ++i) {
+      const BootImageLayout::ImageChunk& chunk = chunks[i];
+      std::string extension_error_msg;
+      uint8_t* old_reservation_begin = image_reservation.Begin();
+      size_t old_reservation_size = image_reservation.Size();
+      DCHECK_LE(chunk.reservation_size, old_reservation_size);
+      if (!LoadComponents(chunk,
+                          validate_oat_file,
+                          max_image_space_dependencies,
+                          logger,
+                          &spaces,
+                          &image_reservation,
+                          (i == 0) ? error_msg : &extension_error_msg)) {
+        // Failed to load the chunk. If this is the primary boot image, report the error.
+        if (i == 0) {
+          return false;
+        }
+        // For extension, shrink the reservation (and remap if needed, see below).
+        size_t new_reservation_size = old_reservation_size - chunk.reservation_size;
+        if (new_reservation_size == 0u) {
+          DCHECK_EQ(extra_reservation_size, 0u);
+          DCHECK_EQ(i + 1u, num_chunks);
+          image_reservation.Reset();
+        } else if (old_reservation_begin != image_reservation.Begin()) {
+          // Part of the image reservation has been used and then unmapped when
+          // rollling back the partial boot image extension load. Try to remap
+          // the image reservation. As this should be running single-threaded,
+          // the address range should still be available to mmap().
+          image_reservation.Reset();
+          std::string remap_error_msg;
+          image_reservation = ReserveBootImageMemory(old_reservation_begin,
+                                                     new_reservation_size,
+                                                     &remap_error_msg);
+          if (!image_reservation.IsValid()) {
+            *error_msg = StringPrintf("Failed to remap boot image reservation after failing "
+                                          "to load boot image extension (%s: %s): %s",
+                                      boot_class_path_locations_[chunk.start_index].c_str(),
+                                      extension_error_msg.c_str(),
+                                      remap_error_msg.c_str());
+            return false;
+          }
+        } else {
+          DCHECK_EQ(old_reservation_size, image_reservation.Size());
+          image_reservation.SetSize(new_reservation_size);
+        }
+        LOG(ERROR) << "Failed to load boot image extension "
+            << boot_class_path_locations_[chunk.start_index] << ": " << extension_error_msg;
       }
-      uint32_t expected_component_count = (i == 0u) ? system_hdr.GetComponentCount() : 0u;
-      uint32_t expected_reservation_size = (i == 0u) ? system_hdr.GetImageReservationSize() : 0u;
-      if (!Loader::CheckImageReservationSize(*space, expected_reservation_size, error_msg) ||
-          !Loader::CheckImageComponentCount(*space, expected_component_count, error_msg)) {
-        return false;
+      // Update `max_image_space_dependencies` if all previous BCP components
+      // were covered and loading the current chunk succeeded.
+      if (max_image_space_dependencies == chunk.start_index &&
+          spaces.size() == chunk.start_index + chunk.component_count) {
+        max_image_space_dependencies = chunk.start_index + chunk.component_count;
       }
     }
-    for (size_t i = 0u, size = spaces.size(); i != size; ++i) {
-      std::string expected_boot_class_path =
-          (i == 0u) ? android::base::Join(provided_locations, ':') : std::string();
-      if (!OpenOatFile(spaces[i].get(),
-                       boot_class_path_[i],
-                       expected_boot_class_path,
-                       validate_oat_file,
-                       logger,
-                       &image_reservation,
-                       error_msg)) {
-        return false;
-      }
-    }
-    if (!CheckReservationExhausted(image_reservation, error_msg)) {
+
+    MemMap local_extra_reservation;
+    if (!RemapExtraReservation(extra_reservation_size,
+                               &image_reservation,
+                               &local_extra_reservation,
+                               error_msg)) {
       return false;
     }
 
     MaybeRelocateSpaces(spaces, logger);
+    DeduplicateInternedStrings(ArrayRef<const std::unique_ptr<ImageSpace>>(spaces), logger);
     boot_image_spaces->swap(spaces);
     *extra_reservation = std::move(local_extra_reservation);
     return true;
   }
 
  private:
-  class RelocateVisitor {
+  class SimpleRelocateVisitor {
    public:
-    explicit RelocateVisitor(uint32_t diff) : diff_(diff) {}
+    SimpleRelocateVisitor(uint32_t diff, uint32_t begin, uint32_t size)
+        : diff_(diff), begin_(begin), size_(size) {}
+
+    // Adapter taking the same arguments as SplitRangeRelocateVisitor
+    // to simplify constructing the various visitors in DoRelocateSpaces().
+    SimpleRelocateVisitor(uint32_t base_diff,
+                          uint32_t current_diff,
+                          uint32_t bound,
+                          uint32_t begin,
+                          uint32_t size)
+        : SimpleRelocateVisitor(base_diff, begin, size) {
+      // Check arguments unused by this class.
+      DCHECK_EQ(base_diff, current_diff);
+      DCHECK_EQ(bound, begin);
+    }
 
     template <typename T>
     ALWAYS_INLINE T* operator()(T* src) const {
-      DCHECK(src != nullptr);
-      return reinterpret_cast32<T*>(reinterpret_cast32<uint32_t>(src) + diff_);
+      DCHECK(InSource(src));
+      uint32_t raw_src = reinterpret_cast32<uint32_t>(src);
+      return reinterpret_cast32<T*>(raw_src + diff_);
+    }
+
+    template <typename T>
+    ALWAYS_INLINE bool InSource(T* ptr) const {
+      uint32_t raw_ptr = reinterpret_cast32<uint32_t>(ptr);
+      return raw_ptr - begin_ < size_;
+    }
+
+    template <typename T>
+    ALWAYS_INLINE bool InDest(T* ptr) const {
+      uint32_t raw_ptr = reinterpret_cast32<uint32_t>(ptr);
+      uint32_t src_ptr = raw_ptr - diff_;
+      return src_ptr - begin_ < size_;
     }
 
    private:
     const uint32_t diff_;
+    const uint32_t begin_;
+    const uint32_t size_;
+  };
+
+  class SplitRangeRelocateVisitor {
+   public:
+    SplitRangeRelocateVisitor(uint32_t base_diff,
+                              uint32_t current_diff,
+                              uint32_t bound,
+                              uint32_t begin,
+                              uint32_t size)
+        : base_diff_(base_diff),
+          current_diff_(current_diff),
+          bound_(bound),
+          begin_(begin),
+          size_(size) {
+      DCHECK_NE(begin_, bound_);
+      // The bound separates the boot image range and the extension range.
+      DCHECK_LT(bound_ - begin_, size_);
+    }
+
+    template <typename T>
+    ALWAYS_INLINE T* operator()(T* src) const {
+      DCHECK(InSource(src));
+      uint32_t raw_src = reinterpret_cast32<uint32_t>(src);
+      uint32_t diff = (raw_src < bound_) ? base_diff_ : current_diff_;
+      return reinterpret_cast32<T*>(raw_src + diff);
+    }
+
+    template <typename T>
+    ALWAYS_INLINE bool InSource(T* ptr) const {
+      uint32_t raw_ptr = reinterpret_cast32<uint32_t>(ptr);
+      return raw_ptr - begin_ < size_;
+    }
+
+   private:
+    const uint32_t base_diff_;
+    const uint32_t current_diff_;
+    const uint32_t bound_;
+    const uint32_t begin_;
+    const uint32_t size_;
   };
 
   static void** PointerAddress(ArtMethod* method, MemberOffset offset) {
@@ -1564,43 +2752,135 @@
   }
 
   template <PointerSize kPointerSize>
-  static void DoRelocateSpaces(const std::vector<std::unique_ptr<ImageSpace>>& spaces,
-                               uint32_t diff) REQUIRES_SHARED(Locks::mutator_lock_) {
-    std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> patched_objects(
+  static void DoRelocateSpaces(ArrayRef<const std::unique_ptr<ImageSpace>>& spaces,
+                               int64_t base_diff64) REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(!spaces.empty());
+    gc::accounting::ContinuousSpaceBitmap patched_objects(
         gc::accounting::ContinuousSpaceBitmap::Create(
             "Marked objects",
             spaces.front()->Begin(),
             spaces.back()->End() - spaces.front()->Begin()));
-    using PatchRelocateVisitor = PatchObjectVisitor<kPointerSize, RelocateVisitor, RelocateVisitor>;
-    RelocateVisitor relocate_visitor(diff);
-    PatchRelocateVisitor patch_object_visitor(relocate_visitor, relocate_visitor);
+    const ImageHeader& base_header = spaces[0]->GetImageHeader();
+    size_t base_image_space_count = base_header.GetImageSpaceCount();
+    DCHECK_LE(base_image_space_count, spaces.size());
+    DoRelocateSpaces<kPointerSize, /*kExtension=*/ false>(
+        spaces.SubArray(/*pos=*/ 0u, base_image_space_count),
+        base_diff64,
+        &patched_objects);
 
-    mirror::Class* dcheck_class_class = nullptr;  // Used only for a DCHECK().
+    for (size_t i = base_image_space_count, size = spaces.size(); i != size; ) {
+      const ImageHeader& ext_header = spaces[i]->GetImageHeader();
+      size_t ext_image_space_count = ext_header.GetImageSpaceCount();
+      DCHECK_LE(ext_image_space_count, size - i);
+      DoRelocateSpaces<kPointerSize, /*kExtension=*/ true>(
+          spaces.SubArray(/*pos=*/ i, ext_image_space_count),
+          base_diff64,
+          &patched_objects);
+      i += ext_image_space_count;
+    }
+  }
+
+  template <PointerSize kPointerSize, bool kExtension>
+  static void DoRelocateSpaces(ArrayRef<const std::unique_ptr<ImageSpace>> spaces,
+                               int64_t base_diff64,
+                               gc::accounting::ContinuousSpaceBitmap* patched_objects)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(!spaces.empty());
+    const ImageHeader& first_header = spaces.front()->GetImageHeader();
+    uint32_t image_begin = reinterpret_cast32<uint32_t>(first_header.GetImageBegin());
+    uint32_t image_size = first_header.GetImageReservationSize();
+    DCHECK_NE(image_size, 0u);
+    uint32_t source_begin = kExtension ? first_header.GetBootImageBegin() : image_begin;
+    uint32_t source_size = kExtension ? first_header.GetBootImageSize() + image_size : image_size;
+    if (kExtension) {
+      DCHECK_EQ(first_header.GetBootImageBegin() + first_header.GetBootImageSize(), image_begin);
+    }
+    int64_t current_diff64 = kExtension
+        ? static_cast<int64_t>(reinterpret_cast32<uint32_t>(spaces.front()->Begin())) -
+              static_cast<int64_t>(image_begin)
+        : base_diff64;
+    if (base_diff64 == 0 && current_diff64 == 0) {
+      return;
+    }
+    uint32_t base_diff = static_cast<uint32_t>(base_diff64);
+    uint32_t current_diff = static_cast<uint32_t>(current_diff64);
+
+    // For boot image the main visitor is a SimpleRelocateVisitor. For the boot image extension we
+    // mostly use a SplitRelocationVisitor but some work can still use the SimpleRelocationVisitor.
+    using MainRelocateVisitor = typename std::conditional<
+        kExtension, SplitRangeRelocateVisitor, SimpleRelocateVisitor>::type;
+    SimpleRelocateVisitor simple_relocate_visitor(current_diff, image_begin, image_size);
+    MainRelocateVisitor main_relocate_visitor(
+        base_diff, current_diff, /*bound=*/ image_begin, source_begin, source_size);
+
+    using MainPatchRelocateVisitor =
+        PatchObjectVisitor<kPointerSize, MainRelocateVisitor, MainRelocateVisitor>;
+    using SimplePatchRelocateVisitor =
+        PatchObjectVisitor<kPointerSize, SimpleRelocateVisitor, SimpleRelocateVisitor>;
+    MainPatchRelocateVisitor main_patch_object_visitor(main_relocate_visitor,
+                                                       main_relocate_visitor);
+    SimplePatchRelocateVisitor simple_patch_object_visitor(simple_relocate_visitor,
+                                                           simple_relocate_visitor);
+
+    // Retrieve the Class.class, Method.class and Constructor.class needed in the loops below.
+    ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots;
+    ObjPtr<mirror::Class> class_class;
+    ObjPtr<mirror::Class> method_class;
+    ObjPtr<mirror::Class> constructor_class;
+    {
+      ObjPtr<mirror::ObjectArray<mirror::Object>> image_roots =
+          simple_relocate_visitor(first_header.GetImageRoots<kWithoutReadBarrier>().Ptr());
+      DCHECK(!patched_objects->Test(image_roots.Ptr()));
+
+      SimpleRelocateVisitor base_relocate_visitor(
+          base_diff,
+          source_begin,
+          kExtension ? source_size - image_size : image_size);
+      int32_t class_roots_index = enum_cast<int32_t>(ImageHeader::kClassRoots);
+      DCHECK_LT(class_roots_index, image_roots->GetLength<kVerifyNone>());
+      class_roots = ObjPtr<mirror::ObjectArray<mirror::Class>>::DownCast(base_relocate_visitor(
+          image_roots->GetWithoutChecks<kVerifyNone>(class_roots_index).Ptr()));
+      if (kExtension) {
+        // Class roots must have been visited if we relocated the primary boot image.
+        DCHECK(base_diff == 0 || patched_objects->Test(class_roots.Ptr()));
+        class_class = GetClassRoot<mirror::Class, kWithoutReadBarrier>(class_roots);
+        method_class = GetClassRoot<mirror::Method, kWithoutReadBarrier>(class_roots);
+        constructor_class = GetClassRoot<mirror::Constructor, kWithoutReadBarrier>(class_roots);
+      } else {
+        DCHECK(!patched_objects->Test(class_roots.Ptr()));
+        class_class = simple_relocate_visitor(
+            GetClassRoot<mirror::Class, kWithoutReadBarrier>(class_roots).Ptr());
+        method_class = simple_relocate_visitor(
+            GetClassRoot<mirror::Method, kWithoutReadBarrier>(class_roots).Ptr());
+        constructor_class = simple_relocate_visitor(
+            GetClassRoot<mirror::Constructor, kWithoutReadBarrier>(class_roots).Ptr());
+      }
+    }
+
     for (const std::unique_ptr<ImageSpace>& space : spaces) {
-      // First patch the image header. The `diff` is OK for patching 32-bit fields but
-      // the 64-bit method fields in the ImageHeader may need a negative `delta`.
-      reinterpret_cast<ImageHeader*>(space->Begin())->RelocateImage(
-          (reinterpret_cast32<uint32_t>(space->Begin()) >= -diff)  // Would `begin+diff` overflow?
-              ? -static_cast<int64_t>(-diff) : static_cast<int64_t>(diff));
+      // First patch the image header.
+      reinterpret_cast<ImageHeader*>(space->Begin())->RelocateImageReferences(current_diff64);
+      reinterpret_cast<ImageHeader*>(space->Begin())->RelocateBootImageReferences(base_diff64);
 
       // Patch fields and methods.
       const ImageHeader& image_header = space->GetImageHeader();
       image_header.VisitPackedArtFields([&](ArtField& field) REQUIRES_SHARED(Locks::mutator_lock_) {
-        patch_object_visitor.template PatchGcRoot</*kMayBeNull=*/ false>(
+        // Fields always reference class in the current image.
+        simple_patch_object_visitor.template PatchGcRoot</*kMayBeNull=*/ false>(
             &field.DeclaringClassRoot());
       }, space->Begin());
       image_header.VisitPackedArtMethods([&](ArtMethod& method)
           REQUIRES_SHARED(Locks::mutator_lock_) {
-        patch_object_visitor.PatchGcRoot(&method.DeclaringClassRoot());
+        main_patch_object_visitor.PatchGcRoot(&method.DeclaringClassRoot());
         void** data_address = PointerAddress(&method, ArtMethod::DataOffset(kPointerSize));
-        patch_object_visitor.PatchNativePointer(data_address);
+        main_patch_object_visitor.PatchNativePointer(data_address);
         void** entrypoint_address =
             PointerAddress(&method, ArtMethod::EntryPointFromQuickCompiledCodeOffset(kPointerSize));
-        patch_object_visitor.PatchNativePointer(entrypoint_address);
+        main_patch_object_visitor.PatchNativePointer(entrypoint_address);
       }, space->Begin(), kPointerSize);
       auto method_table_visitor = [&](ArtMethod* method) {
         DCHECK(method != nullptr);
-        return relocate_visitor(method);
+        return main_relocate_visitor(method);
       };
       image_header.VisitPackedImTables(method_table_visitor, space->Begin(), kPointerSize);
       image_header.VisitPackedImtConflictTables(method_table_visitor, space->Begin(), kPointerSize);
@@ -1611,7 +2891,8 @@
         size_t read_count;
         InternTable::UnorderedSet temp_set(data, /*make_copy_of_data=*/ false, &read_count);
         for (GcRoot<mirror::String>& slot : temp_set) {
-          patch_object_visitor.template PatchGcRoot</*kMayBeNull=*/ false>(&slot);
+          // The intern table contains only strings in the current image.
+          simple_patch_object_visitor.template PatchGcRoot</*kMayBeNull=*/ false>(&slot);
         }
       }
 
@@ -1622,26 +2903,21 @@
         size_t read_count;
         ClassTable::ClassSet temp_set(data, /*make_copy_of_data=*/ false, &read_count);
         DCHECK(!temp_set.empty());
-        ClassTableVisitor class_table_visitor(relocate_visitor);
+        // The class table contains only classes in the current image.
+        ClassTableVisitor class_table_visitor(simple_relocate_visitor);
         for (ClassTable::TableSlot& slot : temp_set) {
           slot.VisitRoot(class_table_visitor);
-          mirror::Class* klass = slot.Read<kWithoutReadBarrier>();
+          ObjPtr<mirror::Class> klass = slot.Read<kWithoutReadBarrier>();
           DCHECK(klass != nullptr);
-          patched_objects->Set(klass);
-          patch_object_visitor.VisitClass(klass);
-          if (kIsDebugBuild) {
-            mirror::Class* class_class = klass->GetClass<kVerifyNone, kWithoutReadBarrier>();
-            if (dcheck_class_class == nullptr) {
-              dcheck_class_class = class_class;
-            } else {
-              CHECK_EQ(class_class, dcheck_class_class);
-            }
-          }
+          DCHECK(!patched_objects->Test(klass.Ptr()));
+          patched_objects->Set(klass.Ptr());
+          main_patch_object_visitor.VisitClass(klass, class_class);
           // Then patch the non-embedded vtable and iftable.
           ObjPtr<mirror::PointerArray> vtable =
               klass->GetVTable<kVerifyNone, kWithoutReadBarrier>();
-          if (vtable != nullptr && !patched_objects->Set(vtable.Ptr())) {
-            patch_object_visitor.VisitPointerArray(vtable);
+          if ((kExtension ? simple_relocate_visitor.InDest(vtable.Ptr()) : vtable != nullptr) &&
+              !patched_objects->Set(vtable.Ptr())) {
+            main_patch_object_visitor.VisitPointerArray(vtable);
           }
           ObjPtr<mirror::IfTable> iftable = klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>();
           if (iftable != nullptr) {
@@ -1649,11 +2925,13 @@
             for (int32_t i = 0; i != ifcount; ++i) {
               ObjPtr<mirror::PointerArray> unpatched_ifarray =
                   iftable->GetMethodArrayOrNull<kVerifyNone, kWithoutReadBarrier>(i);
-              if (unpatched_ifarray != nullptr) {
+              if (kExtension ? simple_relocate_visitor.InSource(unpatched_ifarray.Ptr())
+                             : unpatched_ifarray != nullptr) {
                 // The iftable has not been patched, so we need to explicitly adjust the pointer.
-                ObjPtr<mirror::PointerArray> ifarray = relocate_visitor(unpatched_ifarray.Ptr());
+                ObjPtr<mirror::PointerArray> ifarray =
+                    simple_relocate_visitor(unpatched_ifarray.Ptr());
                 if (!patched_objects->Set(ifarray.Ptr())) {
-                  patch_object_visitor.VisitPointerArray(ifarray);
+                  main_patch_object_visitor.VisitPointerArray(ifarray);
                 }
               }
             }
@@ -1662,30 +2940,7 @@
       }
     }
 
-    // Patch class roots now, so that we can recognize mirror::Method and mirror::Constructor.
-    ObjPtr<mirror::Class> method_class;
-    ObjPtr<mirror::Class> constructor_class;
-    {
-      const ImageSpace* space = spaces.front().get();
-      const ImageHeader& image_header = space->GetImageHeader();
-
-      ObjPtr<mirror::ObjectArray<mirror::Object>> image_roots =
-          image_header.GetImageRoots<kWithoutReadBarrier>();
-      patched_objects->Set(image_roots.Ptr());
-      patch_object_visitor.VisitObject(image_roots.Ptr());
-
-      ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots =
-          ObjPtr<mirror::ObjectArray<mirror::Class>>::DownCast(
-              image_header.GetImageRoot<kWithoutReadBarrier>(ImageHeader::kClassRoots));
-      patched_objects->Set(class_roots.Ptr());
-      patch_object_visitor.VisitObject(class_roots.Ptr());
-
-      method_class = GetClassRoot<mirror::Method, kWithoutReadBarrier>(class_roots);
-      constructor_class = GetClassRoot<mirror::Constructor, kWithoutReadBarrier>(class_roots);
-    }
-
-    for (size_t s = 0u, size = spaces.size(); s != size; ++s) {
-      const ImageSpace* space = spaces[s].get();
+    for (const std::unique_ptr<ImageSpace>& space : spaces) {
       const ImageHeader& image_header = space->GetImageHeader();
 
       static_assert(IsAligned<kObjectAlignment>(sizeof(ImageHeader)), "Header alignment check");
@@ -1693,21 +2948,22 @@
       DCHECK_ALIGNED(objects_end, kObjectAlignment);
       for (uint32_t pos = sizeof(ImageHeader); pos != objects_end; ) {
         mirror::Object* object = reinterpret_cast<mirror::Object*>(space->Begin() + pos);
+        // Note: use Test() rather than Set() as this is the last time we're checking this object.
         if (!patched_objects->Test(object)) {
           // This is the last pass over objects, so we do not need to Set().
-          patch_object_visitor.VisitObject(object);
+          main_patch_object_visitor.VisitObject(object);
           ObjPtr<mirror::Class> klass = object->GetClass<kVerifyNone, kWithoutReadBarrier>();
           if (klass->IsDexCacheClass<kVerifyNone>()) {
             // Patch dex cache array pointers and elements.
             ObjPtr<mirror::DexCache> dex_cache =
                 object->AsDexCache<kVerifyNone, kWithoutReadBarrier>();
-            patch_object_visitor.VisitDexCacheArrays(dex_cache);
+            main_patch_object_visitor.VisitDexCacheArrays(dex_cache);
           } else if (klass == method_class || klass == constructor_class) {
             // Patch the ArtMethod* in the mirror::Executable subobject.
             ObjPtr<mirror::Executable> as_executable =
                 ObjPtr<mirror::Executable>::DownCast(object);
             ArtMethod* unpatched_method = as_executable->GetArtMethod<kVerifyNone>();
-            ArtMethod* patched_method = relocate_visitor(unpatched_method);
+            ArtMethod* patched_method = main_relocate_visitor(unpatched_method);
             as_executable->SetArtMethod</*kTransactionActive=*/ false,
                                         /*kCheckTransaction=*/ true,
                                         kVerifyNone>(patched_method);
@@ -1716,6 +2972,12 @@
         pos += RoundUp(object->SizeOf<kVerifyNone>(), kObjectAlignment);
       }
     }
+    if (kIsDebugBuild && !kExtension) {
+      // We used just Test() instead of Set() above but we need to use Set()
+      // for class roots to satisfy a DCHECK() for extensions.
+      DCHECK(!patched_objects->Test(class_roots.Ptr()));
+      patched_objects->Set(class_roots.Ptr());
+    }
   }
 
   void MaybeRelocateSpaces(const std::vector<std::unique_ptr<ImageSpace>>& spaces,
@@ -1724,27 +2986,100 @@
     TimingLogger::ScopedTiming timing("MaybeRelocateSpaces", logger);
     ImageSpace* first_space = spaces.front().get();
     const ImageHeader& first_space_header = first_space->GetImageHeader();
-    uint32_t diff =
-        static_cast<uint32_t>(first_space->Begin() - first_space_header.GetImageBegin());
+    int64_t base_diff64 =
+        static_cast<int64_t>(reinterpret_cast32<uint32_t>(first_space->Begin())) -
+        static_cast<int64_t>(reinterpret_cast32<uint32_t>(first_space_header.GetImageBegin()));
     if (!relocate_) {
-      DCHECK_EQ(diff, 0u);
-      return;
+      DCHECK_EQ(base_diff64, 0);
     }
 
+    ArrayRef<const std::unique_ptr<ImageSpace>> spaces_ref(spaces);
     PointerSize pointer_size = first_space_header.GetPointerSize();
     if (pointer_size == PointerSize::k64) {
-      DoRelocateSpaces<PointerSize::k64>(spaces, diff);
+      DoRelocateSpaces<PointerSize::k64>(spaces_ref, base_diff64);
     } else {
-      DoRelocateSpaces<PointerSize::k32>(spaces, diff);
+      DoRelocateSpaces<PointerSize::k32>(spaces_ref, base_diff64);
+    }
+  }
+
+  void DeduplicateInternedStrings(ArrayRef<const std::unique_ptr<ImageSpace>> spaces,
+                                  TimingLogger* logger) REQUIRES_SHARED(Locks::mutator_lock_) {
+    TimingLogger::ScopedTiming timing("DeduplicateInternedStrings", logger);
+    DCHECK(!spaces.empty());
+    size_t num_spaces = spaces.size();
+    const ImageHeader& primary_header = spaces.front()->GetImageHeader();
+    size_t primary_image_count = primary_header.GetImageSpaceCount();
+    DCHECK_LE(primary_image_count, num_spaces);
+    DCHECK_EQ(primary_image_count, primary_header.GetComponentCount());
+    size_t component_count = primary_image_count;
+    size_t space_pos = primary_image_count;
+    while (space_pos != num_spaces) {
+      const ImageHeader& current_header = spaces[space_pos]->GetImageHeader();
+      size_t image_space_count = current_header.GetImageSpaceCount();
+      DCHECK_LE(image_space_count, num_spaces - space_pos);
+      size_t dependency_component_count = current_header.GetBootImageComponentCount();
+      DCHECK_LE(dependency_component_count, component_count);
+      if (dependency_component_count < component_count) {
+        // There shall be no duplicate strings with the components that this space depends on.
+        // Find the end of the dependencies, i.e. start of non-dependency images.
+        size_t start_component_count = primary_image_count;
+        size_t start_pos = primary_image_count;
+        while (start_component_count != dependency_component_count) {
+          const ImageHeader& dependency_header = spaces[start_pos]->GetImageHeader();
+          DCHECK_LE(dependency_header.GetComponentCount(),
+                    dependency_component_count - start_component_count);
+          start_component_count += dependency_header.GetComponentCount();
+          start_pos += dependency_header.GetImageSpaceCount();
+        }
+        // Remove duplicates from all intern tables belonging to the chunk.
+        ArrayRef<const std::unique_ptr<ImageSpace>> old_spaces =
+            spaces.SubArray(/*pos=*/ start_pos, space_pos - start_pos);
+        SafeMap<mirror::String*, mirror::String*> intern_remap;
+        for (size_t i = 0; i != image_space_count; ++i) {
+          ImageSpace* new_space = spaces[space_pos + i].get();
+          Loader::RemoveInternTableDuplicates(old_spaces, new_space, &intern_remap);
+        }
+        // Remap string for all spaces belonging to the chunk.
+        if (!intern_remap.empty()) {
+          for (size_t i = 0; i != image_space_count; ++i) {
+            ImageSpace* new_space = spaces[space_pos + i].get();
+            Loader::RemapInternedStringDuplicates(intern_remap, new_space);
+          }
+        }
+      }
+      component_count += current_header.GetComponentCount();
+      space_pos += image_space_count;
     }
   }
 
   std::unique_ptr<ImageSpace> Load(const std::string& image_location,
                                    const std::string& image_filename,
+                                   const std::string& profile_file,
+                                   android::base::unique_fd art_fd,
                                    TimingLogger* logger,
                                    /*inout*/MemMap* image_reservation,
                                    /*out*/std::string* error_msg)
       REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (art_fd.get() != -1) {
+      // No need to lock memfd for which we hold the only file descriptor
+      // (see locking with ScopedFlock for normal files below).
+      VLOG(startup) << "Using image file " << image_filename.c_str() << " for image location "
+                    << image_location << " for compiled extension";
+
+      File image_file(art_fd.release(), image_filename, /*check_usage=*/ false);
+      std::unique_ptr<ImageSpace> result = Loader::Init(&image_file,
+                                                        image_filename.c_str(),
+                                                        image_location.c_str(),
+                                                        profile_file.c_str(),
+                                                        /*allow_direct_mapping=*/ false,
+                                                        logger,
+                                                        image_reservation,
+                                                        error_msg);
+      // Note: We're closing the image file descriptor here when we destroy
+      // the `image_file` as we no longer need it.
+      return result;
+    }
+
     // Should this be a RDWR lock? This is only a defensive measure, as at
     // this point the image should exist.
     // However, only the zygote can write into the global dalvik-cache, so
@@ -1763,6 +3098,7 @@
 
     VLOG(startup) << "Using image file " << image_filename.c_str() << " for image location "
                   << image_location;
+
     // If we are in /system we can assume the image is good. We can also
     // assume this if we are using a relocated image (i.e. image checksum
     // matches) since this is only different by the offset. We need this to
@@ -1771,16 +3107,17 @@
     // file name.
     return Loader::Init(image_filename.c_str(),
                         image_location.c_str(),
-                        /*oat_file=*/ nullptr,
                         logger,
                         image_reservation,
                         error_msg);
   }
 
   bool OpenOatFile(ImageSpace* space,
-                   const std::string& dex_filename,
-                   const std::string& expected_boot_class_path,
+                   android::base::unique_fd vdex_fd,
+                   android::base::unique_fd oat_fd,
+                   ArrayRef<const std::string> dex_filenames,
                    bool validate_oat_file,
+                   ArrayRef<const std::unique_ptr<ImageSpace>> dependencies,
                    TimingLogger* logger,
                    /*inout*/MemMap* image_reservation,
                    /*out*/std::string* error_msg) {
@@ -1798,14 +3135,30 @@
       std::string oat_location =
           ImageHeader::GetOatLocationFromImageLocation(space->GetImageLocation());
 
-      oat_file.reset(OatFile::Open(/*zip_fd=*/ -1,
-                                   oat_filename,
-                                   oat_location,
-                                   executable_,
-                                   /*low_4gb=*/ false,
-                                   /*abs_dex_location=*/ dex_filename.c_str(),
-                                   image_reservation,
-                                   error_msg));
+      DCHECK_EQ(vdex_fd.get() != -1, oat_fd.get() != -1);
+      if (vdex_fd.get() == -1) {
+        oat_file.reset(OatFile::Open(/*zip_fd=*/ -1,
+                                     oat_filename,
+                                     oat_location,
+                                     executable_,
+                                     /*low_4gb=*/ false,
+                                     dex_filenames,
+                                     image_reservation,
+                                     error_msg));
+      } else {
+        oat_file.reset(OatFile::Open(/*zip_fd=*/ -1,
+                                     vdex_fd.get(),
+                                     oat_fd.get(),
+                                     oat_location,
+                                     executable_,
+                                     /*low_4gb=*/ false,
+                                     dex_filenames,
+                                     image_reservation,
+                                     error_msg));
+        // We no longer need the file descriptors and they will be closed by
+        // the unique_fd destructor when we leave this function.
+      }
+
       if (oat_file == nullptr) {
         *error_msg = StringPrintf("Failed to open oat file '%s' referenced from image %s: %s",
                                   oat_filename.c_str(),
@@ -1827,13 +3180,47 @@
       const char* oat_boot_class_path =
           oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kBootClassPathKey);
       oat_boot_class_path = (oat_boot_class_path != nullptr) ? oat_boot_class_path : "";
-      if (expected_boot_class_path != oat_boot_class_path) {
-        *error_msg = StringPrintf("Failed to match oat boot class path %s to expected "
-                                  "boot class path %s in image %s",
-                                  oat_boot_class_path,
-                                  expected_boot_class_path.c_str(),
-                                  space->GetName());
-        return false;
+      const char* oat_boot_class_path_checksums =
+          oat_file->GetOatHeader().GetStoreValueByKey(OatHeader::kBootClassPathChecksumsKey);
+      oat_boot_class_path_checksums =
+          (oat_boot_class_path_checksums != nullptr) ? oat_boot_class_path_checksums : "";
+      size_t component_count = image_header.GetComponentCount();
+      if (component_count == 0u) {
+        if (oat_boot_class_path[0] != 0 || oat_boot_class_path_checksums[0] != 0) {
+          *error_msg = StringPrintf("Unexpected non-empty boot class path %s and/or checksums %s"
+                                    " in image %s",
+                                    oat_boot_class_path,
+                                    oat_boot_class_path_checksums,
+                                    space->GetName());
+          return false;
+        }
+      } else if (dependencies.empty()) {
+        std::string expected_boot_class_path = Join(ArrayRef<const std::string>(
+              boot_class_path_locations_).SubArray(0u, component_count), ':');
+        if (expected_boot_class_path != oat_boot_class_path) {
+          *error_msg = StringPrintf("Failed to match oat boot class path %s to expected "
+                                    "boot class path %s in image %s",
+                                    oat_boot_class_path,
+                                    expected_boot_class_path.c_str(),
+                                    space->GetName());
+          return false;
+        }
+      } else {
+        std::string local_error_msg;
+        if (!VerifyBootClassPathChecksums(
+                 oat_boot_class_path_checksums,
+                 oat_boot_class_path,
+                 dependencies,
+                 ArrayRef<const std::string>(boot_class_path_locations_),
+                 ArrayRef<const std::string>(boot_class_path_),
+                 &local_error_msg)) {
+          *error_msg = StringPrintf("Failed to verify BCP %s with checksums %s in image %s: %s",
+                                    oat_boot_class_path,
+                                    oat_boot_class_path_checksums,
+                                    space->GetName(),
+                                    local_error_msg.c_str());
+          return false;
+        }
       }
       ptrdiff_t relocation_diff = space->Begin() - image_header.GetImageBegin();
       CHECK(image_header.GetOatDataBegin() != nullptr);
@@ -1860,37 +3247,152 @@
     return true;
   }
 
-  bool ReserveBootImageMemory(uint32_t reservation_size,
-                              uint32_t image_start,
-                              size_t extra_reservation_size,
-                              /*out*/MemMap* image_reservation,
-                              /*out*/MemMap* extra_reservation,
-                              /*out*/std::string* error_msg) {
-    DCHECK_ALIGNED(reservation_size, kPageSize);
-    DCHECK_ALIGNED(image_start, kPageSize);
-    DCHECK(!image_reservation->IsValid());
-    DCHECK_LT(extra_reservation_size, std::numeric_limits<uint32_t>::max() - reservation_size);
-    size_t total_size = reservation_size + extra_reservation_size;
-    // If relocating, choose a random address for ALSR.
-    uint32_t addr = relocate_ ? ART_BASE_ADDRESS + ChooseRelocationOffsetDelta() : image_start;
-    *image_reservation =
-        MemMap::MapAnonymous("Boot image reservation",
-                             reinterpret_cast32<uint8_t*>(addr),
-                             total_size,
-                             PROT_NONE,
-                             /*low_4gb=*/ true,
-                             /*reuse=*/ false,
-                             /*reservation=*/ nullptr,
-                             error_msg);
-    if (!image_reservation->IsValid()) {
+  bool LoadComponents(const BootImageLayout::ImageChunk& chunk,
+                      bool validate_oat_file,
+                      size_t max_image_space_dependencies,
+                      TimingLogger* logger,
+                      /*inout*/std::vector<std::unique_ptr<ImageSpace>>* spaces,
+                      /*inout*/MemMap* image_reservation,
+                      /*out*/std::string* error_msg)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    // Make sure we destroy the spaces we created if we're returning an error.
+    // Note that this can unmap part of the original `image_reservation`.
+    class Guard {
+     public:
+      explicit Guard(std::vector<std::unique_ptr<ImageSpace>>* spaces_in)
+          : spaces_(spaces_in), committed_(spaces_->size()) {}
+      void Commit() {
+        DCHECK_LT(committed_, spaces_->size());
+        committed_ = spaces_->size();
+      }
+      ~Guard() {
+        DCHECK_LE(committed_, spaces_->size());
+        spaces_->resize(committed_);
+      }
+     private:
+      std::vector<std::unique_ptr<ImageSpace>>* const spaces_;
+      size_t committed_;
+    };
+    Guard guard(spaces);
+
+    bool is_extension = (chunk.start_index != 0u);
+    DCHECK_NE(spaces->empty(), is_extension);
+    if (max_image_space_dependencies < chunk.boot_image_component_count) {
+      DCHECK(is_extension);
+      *error_msg = StringPrintf("Missing dependencies for extension component %s, %zu < %u",
+                                boot_class_path_locations_[chunk.start_index].c_str(),
+                                max_image_space_dependencies,
+                                chunk.boot_image_component_count);
       return false;
     }
+    ArrayRef<const std::string> requested_bcp_locations =
+        ArrayRef<const std::string>(boot_class_path_locations_).SubArray(
+            chunk.start_index, chunk.image_space_count);
+    std::vector<std::string> locations =
+        ExpandMultiImageLocations(requested_bcp_locations, chunk.base_location, is_extension);
+    std::vector<std::string> filenames =
+        ExpandMultiImageLocations(requested_bcp_locations, chunk.base_filename, is_extension);
+    DCHECK_EQ(locations.size(), filenames.size());
+    for (size_t i = 0u, size = locations.size(); i != size; ++i) {
+      spaces->push_back(Load(locations[i],
+                             filenames[i],
+                             chunk.profile_file,
+                             std::move(chunk.art_fd),
+                             logger,
+                             image_reservation,
+                             error_msg));
+      const ImageSpace* space = spaces->back().get();
+      if (space == nullptr) {
+        return false;
+      }
+      uint32_t expected_component_count = (i == 0u) ? chunk.component_count : 0u;
+      uint32_t expected_reservation_size = (i == 0u) ? chunk.reservation_size : 0u;
+      if (!Loader::CheckImageReservationSize(*space, expected_reservation_size, error_msg) ||
+          !Loader::CheckImageComponentCount(*space, expected_component_count, error_msg)) {
+        return false;
+      }
+      const ImageHeader& header = space->GetImageHeader();
+      if (i == 0 && (chunk.checksum != header.GetImageChecksum() ||
+                     chunk.image_space_count != header.GetImageSpaceCount() ||
+                     chunk.boot_image_component_count != header.GetBootImageComponentCount() ||
+                     chunk.boot_image_checksum != header.GetBootImageChecksum() ||
+                     chunk.boot_image_size != header.GetBootImageSize())) {
+        *error_msg = StringPrintf("Image header modified since previously read from %s; "
+                                      "checksum: 0x%08x -> 0x%08x,"
+                                      "image_space_count: %u -> %u"
+                                      "boot_image_component_count: %u -> %u, "
+                                      "boot_image_checksum: 0x%08x -> 0x%08x"
+                                      "boot_image_size: 0x%08x -> 0x%08x",
+                                  space->GetImageFilename().c_str(),
+                                  chunk.checksum,
+                                  chunk.image_space_count,
+                                  header.GetImageSpaceCount(),
+                                  header.GetImageChecksum(),
+                                  chunk.boot_image_component_count,
+                                  header.GetBootImageComponentCount(),
+                                  chunk.boot_image_checksum,
+                                  header.GetBootImageChecksum(),
+                                  chunk.boot_image_size,
+                                  header.GetBootImageSize());
+        return false;
+      }
+    }
+    DCHECK_GE(max_image_space_dependencies, chunk.boot_image_component_count);
+    ArrayRef<const std::unique_ptr<ImageSpace>> dependencies =
+        ArrayRef<const std::unique_ptr<ImageSpace>>(*spaces).SubArray(
+            /*pos=*/ 0u, chunk.boot_image_component_count);
+    for (size_t i = 0u, size = locations.size(); i != size; ++i) {
+      ImageSpace* space = (*spaces)[spaces->size() - chunk.image_space_count + i].get();
+      size_t bcp_chunk_size = (chunk.image_space_count == 1u) ? chunk.component_count : 1u;
+      if (!OpenOatFile(space,
+                       std::move(chunk.vdex_fd),
+                       std::move(chunk.oat_fd),
+                       boot_class_path_.SubArray(/*pos=*/ chunk.start_index + i, bcp_chunk_size),
+                       validate_oat_file,
+                       dependencies,
+                       logger,
+                       image_reservation,
+                       error_msg)) {
+        return false;
+      }
+    }
+
+    guard.Commit();
+    return true;
+  }
+
+  MemMap ReserveBootImageMemory(uint8_t* addr,
+                                uint32_t reservation_size,
+                                /*out*/std::string* error_msg) {
+    DCHECK_ALIGNED(reservation_size, kPageSize);
+    DCHECK_ALIGNED(addr, kPageSize);
+    return MemMap::MapAnonymous("Boot image reservation",
+                                addr,
+                                reservation_size,
+                                PROT_NONE,
+                                /*low_4gb=*/ true,
+                                /*reuse=*/ false,
+                                /*reservation=*/ nullptr,
+                                error_msg);
+  }
+
+  bool RemapExtraReservation(size_t extra_reservation_size,
+                             /*inout*/MemMap* image_reservation,
+                             /*out*/MemMap* extra_reservation,
+                             /*out*/std::string* error_msg) {
+    DCHECK_ALIGNED(extra_reservation_size, kPageSize);
     DCHECK(!extra_reservation->IsValid());
+    size_t expected_size = image_reservation->IsValid() ? image_reservation->Size() : 0u;
+    if (extra_reservation_size != expected_size) {
+      *error_msg = StringPrintf("Image reservation mismatch after loading boot image: %zu != %zu",
+                                extra_reservation_size,
+                                expected_size);
+      return false;
+    }
     if (extra_reservation_size != 0u) {
-      DCHECK_ALIGNED(extra_reservation_size, kPageSize);
-      DCHECK_LT(extra_reservation_size, image_reservation->Size());
-      uint8_t* split = image_reservation->End() - extra_reservation_size;
-      *extra_reservation = image_reservation->RemapAtEnd(split,
+      DCHECK(image_reservation->IsValid());
+      DCHECK_EQ(extra_reservation_size, image_reservation->Size());
+      *extra_reservation = image_reservation->RemapAtEnd(image_reservation->Begin(),
                                                          "Boot image extra reservation",
                                                          PROT_NONE,
                                                          error_msg);
@@ -1898,27 +3400,17 @@
         return false;
       }
     }
-
+    DCHECK(!image_reservation->IsValid());
     return true;
   }
 
-  bool CheckReservationExhausted(const MemMap& image_reservation, /*out*/std::string* error_msg) {
-    if (image_reservation.IsValid()) {
-      *error_msg = StringPrintf("Excessive image reservation after loading boot image: %p-%p",
-                                image_reservation.Begin(),
-                                image_reservation.End());
-      return false;
-    }
-    return true;
-  }
-
-  const std::vector<std::string>& boot_class_path_;
-  const std::vector<std::string>& boot_class_path_locations_;
-  const std::string& image_location_;
-  InstructionSet image_isa_;
-  bool relocate_;
-  bool executable_;
-  bool is_zygote_;
+  const ArrayRef<const std::string> boot_class_path_;
+  const ArrayRef<const std::string> boot_class_path_locations_;
+  const std::string image_location_;
+  const InstructionSet image_isa_;
+  const bool relocate_;
+  const bool executable_;
+  const bool is_zygote_;
   bool has_system_;
   bool has_cache_;
   bool is_global_cache_;
@@ -1927,6 +3419,102 @@
   std::string cache_filename_;
 };
 
+bool ImageSpace::BootImageLoader::LoadFromSystem(
+    bool validate_oat_file,
+    size_t extra_reservation_size,
+    /*out*/std::vector<std::unique_ptr<ImageSpace>>* boot_image_spaces,
+    /*out*/MemMap* extra_reservation,
+    /*out*/std::string* error_msg) {
+  TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
+
+  BootImageLayout layout(image_location_, boot_class_path_, boot_class_path_locations_);
+  if (!layout.LoadFromSystem(image_isa_, error_msg)) {
+    return false;
+  }
+
+  if (!LoadImage(layout,
+                 validate_oat_file,
+                 extra_reservation_size,
+                 &logger,
+                 boot_image_spaces,
+                 extra_reservation,
+                 error_msg)) {
+    return false;
+  }
+
+  if (VLOG_IS_ON(image)) {
+    LOG(INFO) << "ImageSpace::BootImageLoader::LoadFromSystem exiting "
+        << boot_image_spaces->front();
+    logger.Dump(LOG_STREAM(INFO));
+  }
+  return true;
+}
+
+bool ImageSpace::BootImageLoader::LoadFromDalvikCache(
+    bool validate_oat_file,
+    size_t extra_reservation_size,
+    /*out*/std::vector<std::unique_ptr<ImageSpace>>* boot_image_spaces,
+    /*out*/MemMap* extra_reservation,
+    /*out*/std::string* error_msg) {
+  TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
+  DCHECK(DalvikCacheExists());
+
+  BootImageLayout layout(image_location_, boot_class_path_, boot_class_path_locations_);
+  if (!layout.LoadFromDalvikCache(dalvik_cache_, error_msg)) {
+    return false;
+  }
+  if (!LoadImage(layout,
+                 validate_oat_file,
+                 extra_reservation_size,
+                 &logger,
+                 boot_image_spaces,
+                 extra_reservation,
+                 error_msg)) {
+    return false;
+  }
+
+  if (VLOG_IS_ON(image)) {
+    LOG(INFO) << "ImageSpace::BootImageLoader::LoadFromDalvikCache exiting "
+        << boot_image_spaces->front();
+    logger.Dump(LOG_STREAM(INFO));
+  }
+  return true;
+}
+
+bool ImageSpace::IsBootClassPathOnDisk(InstructionSet image_isa) {
+  Runtime* runtime = Runtime::Current();
+  BootImageLayout layout(runtime->GetImageLocation(),
+                         ArrayRef<const std::string>(runtime->GetBootClassPath()),
+                         ArrayRef<const std::string>(runtime->GetBootClassPathLocations()));
+  const std::string image_location = layout.GetPrimaryImageLocation();
+  ImageSpaceLoadingOrder order = runtime->GetImageSpaceLoadingOrder();
+  std::unique_ptr<ImageHeader> image_header;
+  std::string error_msg;
+
+  std::string system_filename;
+  bool has_system = false;
+  std::string cache_filename;
+  bool has_cache = false;
+  bool dalvik_cache_exists = false;
+  bool is_global_cache = false;
+  if (FindImageFilename(image_location.c_str(),
+                        image_isa,
+                        &system_filename,
+                        &has_system,
+                        &cache_filename,
+                        &dalvik_cache_exists,
+                        &has_cache,
+                        &is_global_cache)) {
+    DCHECK(has_system || has_cache);
+    const std::string& filename = (order == ImageSpaceLoadingOrder::kSystemFirst)
+        ? (has_system ? system_filename : cache_filename)
+        : (has_cache ? cache_filename : system_filename);
+    image_header = ReadSpecificImageHeader(filename.c_str(), &error_msg);
+  }
+
+  return image_header != nullptr;
+}
+
 static constexpr uint64_t kLowSpaceValue = 50 * MB;
 static constexpr uint64_t kTmpFsSentinelValue = 384 * MB;
 
@@ -1972,7 +3560,7 @@
     bool executable,
     bool is_zygote,
     size_t extra_reservation_size,
-    /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
+    /*out*/std::vector<std::unique_ptr<ImageSpace>>* boot_image_spaces,
     /*out*/MemMap* extra_reservation) {
   ScopedTrace trace(__FUNCTION__);
 
@@ -2042,10 +3630,17 @@
   };
 
   auto try_load_from_system = [&]() {
-    return try_load_from(&BootImageLoader::HasSystem, &BootImageLoader::LoadFromSystem, false);
+    // Validate the oat files if the loading order checks data first. Otherwise assume system
+    // integrity.
+    return try_load_from(&BootImageLoader::HasSystem,
+                         &BootImageLoader::LoadFromSystem,
+                         /*validate_oat_file=*/ order != ImageSpaceLoadingOrder::kSystemFirst);
   };
   auto try_load_from_cache = [&]() {
-    return try_load_from(&BootImageLoader::HasCache, &BootImageLoader::LoadFromDalvikCache, true);
+    // Always validate oat files from the dalvik cache.
+    return try_load_from(&BootImageLoader::HasCache,
+                         &BootImageLoader::LoadFromDalvikCache,
+                         /*validate_oat_file=*/ true);
   };
 
   auto invoke_sequentially = [](auto first, auto second) {
@@ -2119,10 +3714,23 @@
                                                            const OatFile* oat_file,
                                                            std::string* error_msg) {
   // Note: The oat file has already been validated.
+  const std::vector<ImageSpace*>& boot_image_spaces =
+      Runtime::Current()->GetHeap()->GetBootImageSpaces();
+  return CreateFromAppImage(image,
+                            oat_file,
+                            ArrayRef<ImageSpace* const>(boot_image_spaces),
+                            error_msg);
+}
+
+std::unique_ptr<ImageSpace> ImageSpace::CreateFromAppImage(
+    const char* image,
+    const OatFile* oat_file,
+    ArrayRef<ImageSpace* const> boot_image_spaces,
+    std::string* error_msg) {
   return Loader::InitAppImage(image,
                               image,
                               oat_file,
-                              /*image_reservation=*/ nullptr,
+                              boot_image_spaces,
                               error_msg);
 }
 
@@ -2203,53 +3811,162 @@
   return true;
 }
 
-std::string ImageSpace::GetBootClassPathChecksums(ArrayRef<const std::string> boot_class_path,
-                                                  const std::string& image_location,
-                                                  InstructionSet image_isa,
-                                                  ImageSpaceLoadingOrder order,
-                                                  /*out*/std::string* error_msg) {
-  std::string system_filename;
-  bool has_system = false;
-  std::string cache_filename;
-  bool has_cache = false;
-  bool dalvik_cache_exists = false;
-  bool is_global_cache = false;
-  if (!FindImageFilename(image_location.c_str(),
-                         image_isa,
-                         &system_filename,
-                         &has_system,
-                         &cache_filename,
-                         &dalvik_cache_exists,
-                         &has_cache,
-                         &is_global_cache)) {
-    *error_msg = StringPrintf("Unable to find image file for %s and %s",
-                              image_location.c_str(),
-                              GetInstructionSetString(image_isa));
-    return std::string();
+std::string ImageSpace::GetBootClassPathChecksums(
+    ArrayRef<ImageSpace* const> image_spaces,
+    ArrayRef<const DexFile* const> boot_class_path) {
+  DCHECK(!boot_class_path.empty());
+  size_t bcp_pos = 0u;
+  std::string boot_image_checksum;
+
+  for (size_t image_pos = 0u, size = image_spaces.size(); image_pos != size; ) {
+    const ImageSpace* main_space = image_spaces[image_pos];
+    // Caller must make sure that the image spaces correspond to the head of the BCP.
+    DCHECK_NE(main_space->oat_file_non_owned_->GetOatDexFiles().size(), 0u);
+    DCHECK_EQ(main_space->oat_file_non_owned_->GetOatDexFiles()[0]->GetDexFileLocation(),
+              boot_class_path[bcp_pos]->GetLocation());
+    const ImageHeader& current_header = main_space->GetImageHeader();
+    uint32_t image_space_count = current_header.GetImageSpaceCount();
+    DCHECK_NE(image_space_count, 0u);
+    DCHECK_LE(image_space_count, image_spaces.size() - image_pos);
+    if (image_pos != 0u) {
+      boot_image_checksum += ':';
+    }
+    uint32_t component_count = current_header.GetComponentCount();
+    AppendImageChecksum(component_count, current_header.GetImageChecksum(), &boot_image_checksum);
+    for (size_t space_index = 0; space_index != image_space_count; ++space_index) {
+      const ImageSpace* space = image_spaces[image_pos + space_index];
+      const OatFile* oat_file = space->oat_file_non_owned_;
+      size_t num_dex_files = oat_file->GetOatDexFiles().size();
+      if (kIsDebugBuild) {
+        CHECK_NE(num_dex_files, 0u);
+        CHECK_LE(oat_file->GetOatDexFiles().size(), boot_class_path.size() - bcp_pos);
+        for (size_t i = 0; i != num_dex_files; ++i) {
+          CHECK_EQ(oat_file->GetOatDexFiles()[i]->GetDexFileLocation(),
+                   boot_class_path[bcp_pos + i]->GetLocation());
+        }
+      }
+      bcp_pos += num_dex_files;
+    }
+    image_pos += image_space_count;
   }
 
-  DCHECK(has_system || has_cache);
-  const std::string& filename = (order == ImageSpaceLoadingOrder::kSystemFirst)
-      ? (has_system ? system_filename : cache_filename)
-      : (has_cache ? cache_filename : system_filename);
-  std::unique_ptr<ImageHeader> header = ReadSpecificImageHeader(filename.c_str(), error_msg);
-  if (header == nullptr) {
-    return std::string();
+  ArrayRef<const DexFile* const> boot_class_path_tail =
+      ArrayRef<const DexFile* const>(boot_class_path).SubArray(bcp_pos);
+  DCHECK(boot_class_path_tail.empty() ||
+         !DexFileLoader::IsMultiDexLocation(boot_class_path_tail.front()->GetLocation().c_str()));
+  for (const DexFile* dex_file : boot_class_path_tail) {
+    if (!DexFileLoader::IsMultiDexLocation(dex_file->GetLocation().c_str())) {
+      if (!boot_image_checksum.empty()) {
+        boot_image_checksum += ':';
+      }
+      boot_image_checksum += kDexFileChecksumPrefix;
+    }
+    StringAppendF(&boot_image_checksum, "/%08x", dex_file->GetLocationChecksum());
   }
-  if (header->GetComponentCount() == 0u || header->GetComponentCount() > boot_class_path.size()) {
-    *error_msg = StringPrintf("Unexpected component count in %s, received %u, "
-                                  "expected non-zero and <= %zu",
-                              filename.c_str(),
-                              header->GetComponentCount(),
-                              boot_class_path.size());
-    return std::string();
+  return boot_image_checksum;
+}
+
+static size_t CheckAndCountBCPComponents(std::string_view oat_boot_class_path,
+                                         ArrayRef<const std::string> boot_class_path,
+                                         /*out*/std::string* error_msg) {
+  // Check that the oat BCP is a prefix of current BCP locations and count components.
+  size_t component_count = 0u;
+  std::string_view remaining_bcp(oat_boot_class_path);
+  bool bcp_ok = false;
+  for (const std::string& location : boot_class_path) {
+    if (!StartsWith(remaining_bcp, location)) {
+      break;
+    }
+    remaining_bcp.remove_prefix(location.size());
+    ++component_count;
+    if (remaining_bcp.empty()) {
+      bcp_ok = true;
+      break;
+    }
+    if (!StartsWith(remaining_bcp, ":")) {
+      break;
+    }
+    remaining_bcp.remove_prefix(1u);
+  }
+  if (!bcp_ok) {
+    *error_msg = StringPrintf("Oat boot class path (%s) is not a prefix of"
+                              " runtime boot class path (%s)",
+                              std::string(oat_boot_class_path).c_str(),
+                              Join(boot_class_path, ':').c_str());
+    return static_cast<size_t>(-1);
+  }
+  return component_count;
+}
+
+bool ImageSpace::VerifyBootClassPathChecksums(std::string_view oat_checksums,
+                                              std::string_view oat_boot_class_path,
+                                              const std::string& image_location,
+                                              ArrayRef<const std::string> boot_class_path_locations,
+                                              ArrayRef<const std::string> boot_class_path,
+                                              InstructionSet image_isa,
+                                              ImageSpaceLoadingOrder order,
+                                              /*out*/std::string* error_msg) {
+  if (oat_checksums.empty() || oat_boot_class_path.empty()) {
+    *error_msg = oat_checksums.empty() ? "Empty checksums." : "Empty boot class path.";
+    return false;
   }
 
-  std::string boot_image_checksum =
-      StringPrintf("i;%d/%08x", header->GetComponentCount(), header->GetImageChecksum());
-  ArrayRef<const std::string> boot_class_path_tail =
-      ArrayRef<const std::string>(boot_class_path).SubArray(header->GetComponentCount());
-  for (const std::string& bcp_filename : boot_class_path_tail) {
+  DCHECK_EQ(boot_class_path_locations.size(), boot_class_path.size());
+  size_t bcp_size =
+      CheckAndCountBCPComponents(oat_boot_class_path, boot_class_path_locations, error_msg);
+  if (bcp_size == static_cast<size_t>(-1)) {
+    DCHECK(!error_msg->empty());
+    return false;
+  }
+
+  size_t bcp_pos = 0u;
+  if (StartsWith(oat_checksums, "i")) {
+    // Use only the matching part of the BCP for validation.
+    BootImageLayout layout(image_location,
+                           boot_class_path.SubArray(/*pos=*/ 0u, bcp_size),
+                           boot_class_path_locations.SubArray(/*pos=*/ 0u, bcp_size));
+    std::string primary_image_location = layout.GetPrimaryImageLocation();
+    std::string system_filename;
+    bool has_system = false;
+    std::string cache_filename;
+    bool has_cache = false;
+    bool dalvik_cache_exists = false;
+    bool is_global_cache = false;
+    if (!FindImageFilename(primary_image_location.c_str(),
+                           image_isa,
+                           &system_filename,
+                           &has_system,
+                           &cache_filename,
+                           &dalvik_cache_exists,
+                           &has_cache,
+                           &is_global_cache)) {
+      *error_msg = StringPrintf("Unable to find image file for %s and %s",
+                                image_location.c_str(),
+                                GetInstructionSetString(image_isa));
+      return false;
+    }
+
+    DCHECK(has_system || has_cache);
+    bool use_system = (order == ImageSpaceLoadingOrder::kSystemFirst) ? has_system : !has_cache;
+    bool image_checksums_ok = use_system
+        ? layout.ValidateFromSystem(image_isa, &oat_checksums, error_msg)
+        : layout.ValidateFromDalvikCache(cache_filename, &oat_checksums, error_msg);
+    if (!image_checksums_ok) {
+      return false;
+    }
+    bcp_pos = layout.GetNextBcpIndex();
+  }
+
+  for ( ; bcp_pos != bcp_size; ++bcp_pos) {
+    static_assert(ImageSpace::kDexFileChecksumPrefix == 'd', "Format prefix check.");
+    if (!StartsWith(oat_checksums, "d")) {
+      *error_msg = StringPrintf("Missing dex checksums, expected %s to start with 'd'",
+                                std::string(oat_checksums).c_str());
+      return false;
+    }
+    oat_checksums.remove_prefix(1u);
+
+    const std::string& bcp_filename = boot_class_path[bcp_pos];
     std::vector<std::unique_ptr<const DexFile>> dex_files;
     const ArtDexFileLoader dex_file_loader;
     if (!dex_file_loader.Open(bcp_filename.c_str(),
@@ -2258,65 +3975,132 @@
                               /*verify_checksum=*/ false,
                               error_msg,
                               &dex_files)) {
-      return std::string();
+      return false;
     }
     DCHECK(!dex_files.empty());
-    StringAppendF(&boot_image_checksum, ":d");
     for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
-      StringAppendF(&boot_image_checksum, "/%08x", dex_file->GetLocationChecksum());
+      std::string dex_file_checksum = StringPrintf("/%08x", dex_file->GetLocationChecksum());
+      if (!StartsWith(oat_checksums, dex_file_checksum)) {
+        *error_msg = StringPrintf("Dex checksum mismatch, expected %s to start with %s",
+                                  std::string(oat_checksums).c_str(),
+                                  dex_file_checksum.c_str());
+        return false;
+      }
+      oat_checksums.remove_prefix(dex_file_checksum.size());
+    }
+    if (bcp_pos + 1u != bcp_size) {
+      if (!StartsWith(oat_checksums, ":")) {
+        *error_msg = StringPrintf("Missing ':' separator at start of %s",
+                                  std::string(oat_checksums).c_str());
+        return false;
+      }
+      oat_checksums.remove_prefix(1u);
     }
   }
-  return boot_image_checksum;
+  if (!oat_checksums.empty()) {
+    *error_msg = StringPrintf("Checksum too long, unexpected tail %s",
+                              std::string(oat_checksums).c_str());
+    return false;
+  }
+  return true;
 }
 
-std::string ImageSpace::GetBootClassPathChecksums(
-    const std::vector<ImageSpace*>& image_spaces,
-    const std::vector<const DexFile*>& boot_class_path) {
-  size_t pos = 0u;
-  std::string boot_image_checksum;
+bool ImageSpace::VerifyBootClassPathChecksums(
+    std::string_view oat_checksums,
+    std::string_view oat_boot_class_path,
+    ArrayRef<const std::unique_ptr<ImageSpace>> image_spaces,
+    ArrayRef<const std::string> boot_class_path_locations,
+    ArrayRef<const std::string> boot_class_path,
+    /*out*/std::string* error_msg) {
+  DCHECK_EQ(boot_class_path.size(), boot_class_path_locations.size());
+  DCHECK_GE(boot_class_path_locations.size(), image_spaces.size());
+  if (oat_checksums.empty() || oat_boot_class_path.empty()) {
+    *error_msg = oat_checksums.empty() ? "Empty checksums." : "Empty boot class path.";
+    return false;
+  }
 
-  if (!image_spaces.empty()) {
-    const ImageHeader& primary_header = image_spaces.front()->GetImageHeader();
-    uint32_t component_count = primary_header.GetComponentCount();
-    DCHECK_EQ(component_count, image_spaces.size());
-    boot_image_checksum =
-        StringPrintf("i;%d/%08x", component_count, primary_header.GetImageChecksum());
-    for (const ImageSpace* space : image_spaces) {
-      size_t num_dex_files = space->oat_file_non_owned_->GetOatDexFiles().size();
-      if (kIsDebugBuild) {
+  size_t oat_bcp_size =
+      CheckAndCountBCPComponents(oat_boot_class_path, boot_class_path_locations, error_msg);
+  if (oat_bcp_size == static_cast<size_t>(-1)) {
+    DCHECK(!error_msg->empty());
+    return false;
+  }
+  const size_t num_image_spaces = image_spaces.size();
+  if (num_image_spaces != oat_bcp_size) {
+    *error_msg = StringPrintf("Image header records more dependencies (%zu) than BCP (%zu)",
+                              num_image_spaces,
+                              oat_bcp_size);
+    return false;
+  }
+
+  // Verify image checksums.
+  size_t bcp_pos = 0u;
+  size_t image_pos = 0u;
+  while (image_pos != num_image_spaces && StartsWith(oat_checksums, "i")) {
+    // Verify the current image checksum.
+    const ImageHeader& current_header = image_spaces[image_pos]->GetImageHeader();
+    uint32_t image_space_count = current_header.GetImageSpaceCount();
+    DCHECK_NE(image_space_count, 0u);
+    DCHECK_LE(image_space_count, image_spaces.size() - image_pos);
+    uint32_t component_count = current_header.GetComponentCount();
+    uint32_t checksum = current_header.GetImageChecksum();
+    if (!CheckAndRemoveImageChecksum(component_count, checksum, &oat_checksums, error_msg)) {
+      DCHECK(!error_msg->empty());
+      return false;
+    }
+
+    if (kIsDebugBuild) {
+      for (size_t space_index = 0; space_index != image_space_count; ++space_index) {
+        const OatFile* oat_file = image_spaces[image_pos + space_index]->oat_file_non_owned_;
+        size_t num_dex_files = oat_file->GetOatDexFiles().size();
         CHECK_NE(num_dex_files, 0u);
-        CHECK_LE(space->oat_file_non_owned_->GetOatDexFiles().size(), boot_class_path.size() - pos);
-        for (size_t i = 0; i != num_dex_files; ++i) {
-          CHECK_EQ(space->oat_file_non_owned_->GetOatDexFiles()[i]->GetDexFileLocation(),
-                   boot_class_path[pos + i]->GetLocation());
+        const std::string main_location = oat_file->GetOatDexFiles()[0]->GetDexFileLocation();
+        CHECK_EQ(main_location, boot_class_path_locations[bcp_pos + space_index]);
+        CHECK(!DexFileLoader::IsMultiDexLocation(main_location.c_str()));
+        size_t num_base_locations = 1u;
+        for (size_t i = 1u; i != num_dex_files; ++i) {
+          if (DexFileLoader::IsMultiDexLocation(
+                  oat_file->GetOatDexFiles()[i]->GetDexFileLocation().c_str())) {
+            CHECK_EQ(image_space_count, 1u);  // We can find base locations only for --single-image.
+            ++num_base_locations;
+          }
+        }
+        if (image_space_count == 1u) {
+          CHECK_EQ(num_base_locations, component_count);
         }
       }
-      pos += num_dex_files;
     }
+
+    image_pos += image_space_count;
+    bcp_pos += component_count;
+
+    if (!StartsWith(oat_checksums, ":")) {
+      // Check that we've reached the end of checksums and BCP.
+      if (!oat_checksums.empty()) {
+         *error_msg = StringPrintf("Expected ':' separator or end of checksums, remaining %s.",
+                                   std::string(oat_checksums).c_str());
+         return false;
+      }
+      if (image_pos != oat_bcp_size) {
+        *error_msg = StringPrintf("Component count mismatch between checksums (%zu) and BCP (%zu)",
+                                  image_pos,
+                                  oat_bcp_size);
+        return false;
+      }
+      return true;
+    }
+    oat_checksums.remove_prefix(1u);
   }
 
-  ArrayRef<const DexFile* const> boot_class_path_tail =
-      ArrayRef<const DexFile* const>(boot_class_path).SubArray(pos);
-  DCHECK(boot_class_path_tail.empty() ||
-         !DexFileLoader::IsMultiDexLocation(boot_class_path_tail.front()->GetLocation().c_str()));
-  for (const DexFile* dex_file : boot_class_path_tail) {
-    if (!DexFileLoader::IsMultiDexLocation(dex_file->GetLocation().c_str())) {
-      StringAppendF(&boot_image_checksum, boot_image_checksum.empty() ? "d" : ":d");
-    }
-    StringAppendF(&boot_image_checksum, "/%08x", dex_file->GetLocationChecksum());
-  }
-  return boot_image_checksum;
-}
-
-std::vector<std::string> ImageSpace::ExpandMultiImageLocations(
-    const std::vector<std::string>& dex_locations,
-    const std::string& image_location) {
-  return ExpandMultiImageLocations(ArrayRef<const std::string>(dex_locations), image_location);
+  // We do not allow dependencies of extensions on dex files. That would require
+  // interleaving the loading of the images with opening the other BCP dex files.
+  return false;
 }
 
 std::vector<std::string> ImageSpace::ExpandMultiImageLocations(
     ArrayRef<const std::string> dex_locations,
-    const std::string& image_location) {
+    const std::string& image_location,
+    bool boot_image_extension) {
   DCHECK(!dex_locations.empty());
 
   // Find the path.
@@ -2345,10 +4129,14 @@
 
   std::vector<std::string> locations;
   locations.reserve(dex_locations.size());
-  locations.push_back(image_location);
+  size_t start_index = 0u;
+  if (!boot_image_extension) {
+    start_index = 1u;
+    locations.push_back(image_location);
+  }
 
-  // Now create the other names. Use a counted loop to skip the first one.
-  for (size_t i = 1u; i < dex_locations.size(); ++i) {
+  // Now create the other names. Use a counted loop to skip the first one if needed.
+  for (size_t i = start_index; i < dex_locations.size(); ++i) {
     // Replace path with `base` (i.e. image path and prefix) and replace the original
     // extension (if any) with `extension`.
     std::string name = dex_locations[i];
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index c020dc1..4ddc519 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -39,11 +39,90 @@
     return kSpaceTypeImageSpace;
   }
 
-  // Load boot image spaces from a primary image file for a specified instruction set.
+  // The separator for boot image location components.
+  static constexpr char kComponentSeparator = ':';
+  // The separator for profile filename.
+  static constexpr char kProfileSeparator = '!';
+
+  // Load boot image spaces for specified boot class path, image location, instruction set, etc.
   //
   // On successful return, the loaded spaces are added to boot_image_spaces (which must be
   // empty on entry) and `extra_reservation` is set to the requested reservation located
   // after the end of the last loaded oat file.
+  //
+  // IMAGE LOCATION
+  //
+  // The "image location" is a colon-separated list that specifies one or more
+  // components by name and may also specify search paths for extensions
+  // corresponding to the remaining boot class path (BCP) extensions.
+  //
+  // The primary boot image can be specified as one of
+  //     <path>/<base-name>
+  //     <base-name>
+  // and the path of the first BCP component is used for the second form.
+  //
+  // Named extension specifications must correspond to an expansion of the
+  // <base-name> with a BCP component (for example boot.art with the BCP
+  // component name <jar-path>/framework.jar expands to boot-framework.art).
+  // They can be similarly specified as one of
+  //     <ext-path>/<ext-name>
+  //     <ext-name>
+  // and must be listed in the order of their corresponding BCP components.
+  // The specification may have a suffix with profile specification, one of
+  //     !<ext-path>/<ext-name>
+  //     !<ext-name>
+  // and this profile will be used to compile the extension when loading the
+  // boot image if the on-disk version is not acceptable (either not present
+  // or fails validation, presumably because it's out of date). The first
+  // extension specification that includes the profile specification also
+  // terminates the list of the boot image dependencies that each extension
+  // is compiled against.
+  //
+  // Search paths for remaining extensions can be specified after named
+  // components as one of
+  //     <search-path>/*
+  //     *
+  // where the second form means that the path of a particular BCP component
+  // should be used to search for that component's boot image extension. These
+  // paths will be searched in the specifed order.
+  //
+  // The actual filename shall be derived from the specified locations using
+  // `GetSystemImageFilename()` or `GetDalvikCacheFilename()`.
+  //
+  // Example image locations:
+  //     /system/framework/boot.art
+  //         - only primary boot image with full path.
+  //     boot.art:boot-framework.art
+  //         - primary and one extension, use BCP component paths.
+  //     /apex/com.android.art/boot.art:*
+  //         - primary with exact location, search for the rest based on BCP
+  //           component paths.
+  //     boot.art:/system/framework/*
+  //         - primary based on BCP component path, search for extensions in
+  //           /system/framework.
+  //     /apex/com.android.art/boot.art:/system/framework/*:*
+  //         - primary with exact location, search for extensions first in
+  //           /system/framework, then in the corresponding BCP component path.
+  //     /apex/com.android.art/boot.art:*:/system/framework/*
+  //         - primary with exact location, search for extensions first in the
+  //           corresponding BCP component path and then in /system/framework.
+  //     /apex/com.android.art/boot.art:*:boot-framework.jar
+  //         - invalid, named components may not follow search paths.
+  //     boot.art:boot-framework.jar!/system/framework/framework.prof
+  //         - primary and one extension, use BCP component paths; if extension
+  //           is not found or broken compile it in memory using the specified
+  //           profile file from the exact path.
+  //     boot.art:boot-framework.jar:conscrypt.jar!conscrypt.prof
+  //         - primary and two extensions, use BCP component paths; only the
+  //           second extension has a profile file and can be compiled in memory
+  //           when it is not found or broken, using the specified profile file
+  //           in the BCP component path and it is compiled against the primary
+  //           and first extension and only if the first extension is OK.
+  //     boot.art:boot-framework.jar!framework.prof:conscrypt.jar!conscrypt.prof
+  //         - primary and two extensions, use BCP component paths; if any
+  //           extension is not found or broken compile it in memory using
+  //           the specified profile file in the BCP component path, each
+  //           extension is compiled only against the primary boot image.
   static bool LoadBootImage(
       const std::vector<std::string>& boot_class_path,
       const std::vector<std::string>& boot_class_path_locations,
@@ -54,22 +133,24 @@
       bool executable,
       bool is_zygote,
       size_t extra_reservation_size,
-      /*out*/std::vector<std::unique_ptr<space::ImageSpace>>* boot_image_spaces,
+      /*out*/std::vector<std::unique_ptr<ImageSpace>>* boot_image_spaces,
       /*out*/MemMap* extra_reservation) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Try to open an existing app image space.
+  // Try to open an existing app image space for an oat file,
+  // using the boot image spaces from the current Runtime.
   static std::unique_ptr<ImageSpace> CreateFromAppImage(const char* image,
                                                         const OatFile* oat_file,
                                                         std::string* error_msg)
       REQUIRES_SHARED(Locks::mutator_lock_);
+  // Try to open an existing app image space for an the oat file and given boot image spaces.
+  static std::unique_ptr<ImageSpace> CreateFromAppImage(
+      const char* image,
+      const OatFile* oat_file,
+      ArrayRef<ImageSpace* const> boot_image_spaces,
+      std::string* error_msg) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Reads the image header from the specified image location for the
-  // instruction set image_isa. Returns null on failure, with
-  // reason in error_msg.
-  static std::unique_ptr<ImageHeader> ReadImageHeader(const char* image_location,
-                                                      InstructionSet image_isa,
-                                                      ImageSpaceLoadingOrder order,
-                                                      std::string* error_msg);
+  // Checks whether we have a primary boot image on the disk.
+  static bool IsBootClassPathOnDisk(InstructionSet image_isa);
 
   // Give access to the OatFile.
   const OatFile* GetOatFile() const;
@@ -97,14 +178,18 @@
     return image_location_;
   }
 
-  accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
-    return live_bitmap_.get();
+  const std::string GetProfileFile() const {
+    return profile_file_;
   }
 
-  accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
+  accounting::ContinuousSpaceBitmap* GetLiveBitmap() override {
+    return &live_bitmap_;
+  }
+
+  accounting::ContinuousSpaceBitmap* GetMarkBitmap() override {
     // ImageSpaces have the same bitmap for both live and marked. This helps reduce the number of
     // special cases to test against.
-    return live_bitmap_.get();
+    return &live_bitmap_;
   }
 
   void Dump(std::ostream& os) const override;
@@ -132,24 +217,44 @@
                                 bool* has_data,
                                 bool *is_global_cache);
 
-  // Returns the checksums for the boot image and extra boot class path dex files,
-  // based on the boot class path, image location and ISA (may differ from the ISA of an
-  // initialized Runtime). The boot image and dex files do not need to be loaded in memory.
-  static std::string GetBootClassPathChecksums(ArrayRef<const std::string> boot_class_path,
-                                               const std::string& image_location,
-                                               InstructionSet image_isa,
-                                               ImageSpaceLoadingOrder order,
-                                               /*out*/std::string* error_msg);
+  // The leading character in an image checksum part of boot class path checkums.
+  static constexpr char kImageChecksumPrefix = 'i';
+  // The leading character in a dex file checksum part of boot class path checkums.
+  static constexpr char kDexFileChecksumPrefix = 'd';
 
-  // Returns the checksums for the boot image and extra boot class path dex files,
-  // based on the boot image and boot class path dex files loaded in memory.
-  static std::string GetBootClassPathChecksums(const std::vector<ImageSpace*>& image_spaces,
-                                               const std::vector<const DexFile*>& boot_class_path);
+  // Returns the checksums for the boot image, extensions and extra boot class path dex files,
+  // based on the image spaces and boot class path dex files loaded in memory.
+  // The `image_spaces` must correspond to the head of the `boot_class_path`.
+  static std::string GetBootClassPathChecksums(ArrayRef<ImageSpace* const> image_spaces,
+                                               ArrayRef<const DexFile* const> boot_class_path);
+
+  // Returns whether the checksums are valid for the given boot class path,
+  // image location and ISA (may differ from the ISA of an initialized Runtime).
+  // The boot image and dex files do not need to be loaded in memory.
+  static bool VerifyBootClassPathChecksums(std::string_view oat_checksums,
+                                           std::string_view oat_boot_class_path,
+                                           const std::string& image_location,
+                                           ArrayRef<const std::string> boot_class_path_locations,
+                                           ArrayRef<const std::string> boot_class_path,
+                                           InstructionSet image_isa,
+                                           ImageSpaceLoadingOrder order,
+                                           /*out*/std::string* error_msg);
+
+  // Returns whether the oat checksums and boot class path description are valid
+  // for the given boot image spaces and boot class path. Used for boot image extensions.
+  static bool VerifyBootClassPathChecksums(
+      std::string_view oat_checksums,
+      std::string_view oat_boot_class_path,
+      ArrayRef<const std::unique_ptr<ImageSpace>> image_spaces,
+      ArrayRef<const std::string> boot_class_path_locations,
+      ArrayRef<const std::string> boot_class_path,
+      /*out*/std::string* error_msg);
 
   // Expand a single image location to multi-image locations based on the dex locations.
   static std::vector<std::string> ExpandMultiImageLocations(
-      const std::vector<std::string>& dex_locations,
-      const std::string& image_location);
+      ArrayRef<const std::string> dex_locations,
+      const std::string& image_location,
+      bool boot_image_extension = false);
 
   // Returns true if the dex checksums in the given oat file match the
   // checksums of the original dex files on disk. This is intended to be used
@@ -191,12 +296,13 @@
 
   static Atomic<uint32_t> bitmap_index_;
 
-  std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap_;
+  accounting::ContinuousSpaceBitmap live_bitmap_;
 
   ImageSpace(const std::string& name,
              const char* image_location,
+             const char* profile_file,
              MemMap&& mem_map,
-             std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap,
+             accounting::ContinuousSpaceBitmap&& live_bitmap,
              uint8_t* end);
 
   // The OatFile associated with the image during early startup to
@@ -209,18 +315,16 @@
   const OatFile* oat_file_non_owned_;
 
   const std::string image_location_;
+  const std::string profile_file_;
 
   friend class Space;
 
  private:
-  // Internal overload that takes ArrayRef<> instead of vector<>.
-  static std::vector<std::string> ExpandMultiImageLocations(
-      ArrayRef<const std::string> dex_locations,
-      const std::string& image_location);
-
+  class BootImageLayout;
   class BootImageLoader;
   template <typename ReferenceVisitor>
   class ClassTableVisitor;
+  class RemapInternedStringsVisitor;
   class Loader;
   template <typename PatchObjectVisitor>
   class PatchArtFieldVisitor;
diff --git a/runtime/gc/space/image_space_test.cc b/runtime/gc/space/image_space_test.cc
index 34df447..b08c680 100644
--- a/runtime/gc/space/image_space_test.cc
+++ b/runtime/gc/space/image_space_test.cc
@@ -16,16 +16,211 @@
 
 #include <gtest/gtest.h>
 
+#include "android-base/logging.h"
 #include "android-base/stringprintf.h"
+#include "android-base/strings.h"
 
 #include "base/stl_util.h"
+#include "class_linker.h"
 #include "dexopt_test.h"
+#include "dex/utf.h"
+#include "intern_table.h"
 #include "noop_compiler_callbacks.h"
+#include "oat_file.h"
 
 namespace art {
 namespace gc {
 namespace space {
 
+class ImageSpaceTest : public CommonRuntimeTest {
+ protected:
+  void SetUpRuntimeOptions(RuntimeOptions* options) override {
+    // Disable implicit dex2oat invocations when loading image spaces.
+    options->emplace_back("-Xnoimage-dex2oat", nullptr);
+    // Disable relocation.
+    options->emplace_back("-Xnorelocate", nullptr);
+  }
+
+  std::string GetFilenameBase(const std::string& full_path) {
+    size_t slash_pos = full_path.rfind('/');
+    CHECK_NE(std::string::npos, slash_pos);
+    size_t dot_pos = full_path.rfind('.');
+    CHECK_NE(std::string::npos, dot_pos);
+    CHECK_GT(dot_pos, slash_pos + 1u);
+    return full_path.substr(slash_pos + 1u, dot_pos - (slash_pos + 1u));
+  }
+};
+
+TEST_F(ImageSpaceTest, StringDeduplication) {
+  const char* const kBaseNames[] = { "Extension1", "Extension2" };
+
+  ScratchDir scratch;
+  const std::string& scratch_dir = scratch.GetPath();
+  std::string image_dir = scratch_dir + GetInstructionSetString(kRuntimeISA);
+  int mkdir_result = mkdir(image_dir.c_str(), 0700);
+  ASSERT_EQ(0, mkdir_result);
+
+  // Prepare boot class path variables, exclude conscrypt which is not in the primary boot image.
+  std::vector<std::string> bcp = GetLibCoreDexFileNames();
+  std::vector<std::string> bcp_locations = GetLibCoreDexLocations();
+  CHECK_EQ(bcp.size(), bcp_locations.size());
+  ASSERT_NE(std::string::npos, bcp.back().find("conscrypt"));
+  bcp.pop_back();
+  bcp_locations.pop_back();
+  std::string base_bcp_string = android::base::Join(bcp, ':');
+  std::string base_bcp_locations_string = android::base::Join(bcp_locations, ':');
+  std::string base_image_location = GetImageLocation();
+
+  // Compile the two extensions independently.
+  std::vector<std::string> extension_image_locations;
+  for (const char* base_name : kBaseNames) {
+    std::string jar_name = GetTestDexFileName(base_name);
+    ArrayRef<const std::string> dex_files(&jar_name, /*size=*/ 1u);
+    ScratchFile profile_file;
+    GenerateProfile(dex_files, profile_file.GetFile());
+    std::vector<std::string> extra_args = {
+        "--profile-file=" + profile_file.GetFilename(),
+        "--runtime-arg",
+        "-Xbootclasspath:" + base_bcp_string + ':' + jar_name,
+        "--runtime-arg",
+        "-Xbootclasspath-locations:" + base_bcp_locations_string + ':' + jar_name,
+        "--boot-image=" + base_image_location,
+    };
+    std::string prefix = GetFilenameBase(base_image_location);
+    std::string error_msg;
+    bool success = CompileBootImage(extra_args, image_dir + '/' + prefix, dex_files, &error_msg);
+    ASSERT_TRUE(success) << error_msg;
+    bcp.push_back(jar_name);
+    bcp_locations.push_back(jar_name);
+    extension_image_locations.push_back(
+        scratch_dir + prefix + '-' + GetFilenameBase(jar_name) + ".art");
+  }
+
+  // Also compile the second extension as an app with app image.
+  const char* app_base_name = kBaseNames[std::size(kBaseNames) - 1u];
+  std::string app_jar_name = GetTestDexFileName(app_base_name);
+  std::string app_odex_name = scratch_dir + app_base_name + ".odex";
+  std::string app_image_name = scratch_dir + app_base_name + ".art";
+  {
+    ArrayRef<const std::string> dex_files(&app_jar_name, /*size=*/ 1u);
+    ScratchFile profile_file;
+    GenerateProfile(dex_files, profile_file.GetFile());
+    std::vector<std::string> argv;
+    std::string error_msg;
+    bool success = StartDex2OatCommandLine(&argv, &error_msg, /*use_runtime_bcp_and_image=*/ false);
+    ASSERT_TRUE(success) << error_msg;
+    argv.insert(argv.end(), {
+        "--profile-file=" + profile_file.GetFilename(),
+        "--runtime-arg",
+        "-Xbootclasspath:" + base_bcp_string,
+        "--runtime-arg",
+        "-Xbootclasspath-locations:" + base_bcp_locations_string,
+        "--boot-image=" + base_image_location,
+        "--dex-file=" + app_jar_name,
+        "--dex-location=" + app_jar_name,
+        "--oat-file=" + app_odex_name,
+        "--app-image-file=" + app_image_name,
+        "--initialize-app-image-classes=true",
+    });
+    success = RunDex2Oat(argv, &error_msg);
+    ASSERT_TRUE(success) << error_msg;
+  }
+
+  std::string full_image_locations;
+  std::vector<std::unique_ptr<gc::space::ImageSpace>> boot_image_spaces;
+  MemMap extra_reservation;
+  auto load_boot_image = [&]() REQUIRES_SHARED(Locks::mutator_lock_) {
+    boot_image_spaces.clear();
+    extra_reservation = MemMap::Invalid();
+    return ImageSpace::LoadBootImage(bcp,
+                                     bcp_locations,
+                                     full_image_locations,
+                                     kRuntimeISA,
+                                     ImageSpaceLoadingOrder::kSystemFirst,
+                                     /*relocate=*/ false,
+                                     /*executable=*/ true,
+                                     /*is_zygote=*/ false,
+                                     /*extra_reservation_size=*/ 0u,
+                                     &boot_image_spaces,
+                                     &extra_reservation);
+  };
+
+  const char test_string[] = "SharedBootImageExtensionTestString";
+  size_t test_string_length = std::size(test_string) - 1u;  // Equals UTF-16 length.
+  uint32_t hash = ComputeUtf16HashFromModifiedUtf8(test_string, test_string_length);
+  InternTable::Utf8String utf8_test_string(test_string_length, test_string, hash);
+  auto contains_test_string = [utf8_test_string](ImageSpace* space)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    const ImageHeader& image_header = space->GetImageHeader();
+    if (image_header.GetInternedStringsSection().Size() != 0u) {
+      const uint8_t* data = space->Begin() + image_header.GetInternedStringsSection().Offset();
+      size_t read_count;
+      InternTable::UnorderedSet temp_set(data, /*make_copy_of_data=*/ false, &read_count);
+      return temp_set.find(utf8_test_string) != temp_set.end();
+    } else {
+      return false;
+    }
+  };
+
+  // Load extensions and test for the presence of the test string.
+  ScopedObjectAccess soa(Thread::Current());
+  ASSERT_EQ(2u, extension_image_locations.size());
+  full_image_locations = base_image_location +
+                             ImageSpace::kComponentSeparator + extension_image_locations[0] +
+                             ImageSpace::kComponentSeparator + extension_image_locations[1];
+  bool success = load_boot_image();
+  ASSERT_TRUE(success);
+  ASSERT_EQ(bcp.size(), boot_image_spaces.size());
+  EXPECT_TRUE(contains_test_string(boot_image_spaces[boot_image_spaces.size() - 2u].get()));
+  // The string in the second extension should be replaced and removed from interned string section.
+  EXPECT_FALSE(contains_test_string(boot_image_spaces[boot_image_spaces.size() - 1u].get()));
+
+  // Reload extensions in reverse order and test for the presence of the test string.
+  std::swap(bcp[bcp.size() - 2u], bcp[bcp.size() - 1u]);
+  std::swap(bcp_locations[bcp_locations.size() - 2u], bcp_locations[bcp_locations.size() - 1u]);
+  full_image_locations = base_image_location +
+                             ImageSpace::kComponentSeparator + extension_image_locations[1] +
+                             ImageSpace::kComponentSeparator + extension_image_locations[0];
+  success = load_boot_image();
+  ASSERT_TRUE(success);
+  ASSERT_EQ(bcp.size(), boot_image_spaces.size());
+  EXPECT_TRUE(contains_test_string(boot_image_spaces[boot_image_spaces.size() - 2u].get()));
+  // The string in the second extension should be replaced and removed from interned string section.
+  EXPECT_FALSE(contains_test_string(boot_image_spaces[boot_image_spaces.size() - 1u].get()));
+
+  // Reload the image without the second extension.
+  bcp.erase(bcp.end() - 2u);
+  bcp_locations.erase(bcp_locations.end() - 2u);
+  full_image_locations =
+      base_image_location + ImageSpace::kComponentSeparator + extension_image_locations[0];
+  success = load_boot_image();
+  ASSERT_TRUE(success);
+  ASSERT_EQ(bcp.size(), boot_image_spaces.size());
+  ASSERT_TRUE(contains_test_string(boot_image_spaces[boot_image_spaces.size() - 1u].get()));
+
+  // Load the app odex file and app image.
+  std::string error_msg;
+  std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
+                                                   app_odex_name.c_str(),
+                                                   app_odex_name.c_str(),
+                                                   /*executable=*/ false,
+                                                   /*low_4gb=*/ false,
+                                                   app_jar_name,
+                                                   &error_msg));
+  ASSERT_TRUE(odex_file != nullptr) << error_msg;
+  std::vector<ImageSpace*> non_owning_boot_image_spaces =
+      MakeNonOwningPointerVector(boot_image_spaces);
+  std::unique_ptr<ImageSpace> app_image_space = ImageSpace::CreateFromAppImage(
+      app_image_name.c_str(),
+      odex_file.get(),
+      ArrayRef<ImageSpace* const>(non_owning_boot_image_spaces),
+      &error_msg);
+  ASSERT_TRUE(app_image_space != nullptr) << error_msg;
+
+  // The string in the app image should be replaced and removed from interned string section.
+  EXPECT_FALSE(contains_test_string(app_image_space.get()));
+}
+
 TEST_F(DexoptTest, ValidateOatFile) {
   std::string dex1 = GetScratchDir() + "/Dex1.jar";
   std::string multidex1 = GetScratchDir() + "/MultiDex1.jar";
@@ -49,11 +244,23 @@
                                              oat_location.c_str(),
                                              /*executable=*/ false,
                                              /*low_4gb=*/ false,
-                                             /*abs_dex_location=*/ nullptr,
-                                             /*reservation=*/ nullptr,
                                              &error_msg));
   ASSERT_TRUE(oat != nullptr) << error_msg;
 
+  {
+    // Test opening the oat file also with explicit dex filenames.
+    std::vector<std::string> dex_filenames{ dex1, multidex1, dex2 };
+    std::unique_ptr<OatFile> oat2(OatFile::Open(/*zip_fd=*/ -1,
+                                                oat_location.c_str(),
+                                                oat_location.c_str(),
+                                                /*executable=*/ false,
+                                                /*low_4gb=*/ false,
+                                                ArrayRef<const std::string>(dex_filenames),
+                                                /*reservation=*/ nullptr,
+                                                &error_msg));
+    ASSERT_TRUE(oat2 != nullptr) << error_msg;
+  }
+
   // Originally all the dex checksums should be up to date.
   EXPECT_TRUE(ImageSpace::ValidateOatFile(*oat, &error_msg)) << error_msg;
 
@@ -110,14 +317,67 @@
   EXPECT_FALSE(ImageSpace::ValidateOatFile(*oat, &error_msg));
 }
 
+TEST_F(DexoptTest, Checksums) {
+  Runtime* runtime = Runtime::Current();
+  ASSERT_TRUE(runtime != nullptr);
+  ASSERT_FALSE(runtime->GetHeap()->GetBootImageSpaces().empty());
+
+  std::vector<std::string> bcp = runtime->GetBootClassPath();
+  std::vector<std::string> bcp_locations = runtime->GetBootClassPathLocations();
+  std::vector<const DexFile*> dex_files = runtime->GetClassLinker()->GetBootClassPath();
+
+  std::string error_msg;
+  auto create_and_verify = [&]() {
+    std::string checksums = gc::space::ImageSpace::GetBootClassPathChecksums(
+        ArrayRef<gc::space::ImageSpace* const>(runtime->GetHeap()->GetBootImageSpaces()),
+        ArrayRef<const DexFile* const>(dex_files));
+    return gc::space::ImageSpace::VerifyBootClassPathChecksums(
+        checksums,
+        android::base::Join(bcp_locations, ':'),
+        runtime->GetImageLocation(),
+        ArrayRef<const std::string>(bcp_locations),
+        ArrayRef<const std::string>(bcp),
+        kRuntimeISA,
+        gc::space::ImageSpaceLoadingOrder::kSystemFirst,
+        &error_msg);
+  };
+
+  ASSERT_TRUE(create_and_verify()) << error_msg;
+
+  std::vector<std::unique_ptr<const DexFile>> opened_dex_files;
+  for (const std::string& src : { GetDexSrc1(), GetDexSrc2() }) {
+    std::vector<std::unique_ptr<const DexFile>> new_dex_files;
+    const ArtDexFileLoader dex_file_loader;
+    ASSERT_TRUE(dex_file_loader.Open(src.c_str(),
+                                     src,
+                                     /*verify=*/ true,
+                                     /*verify_checksum=*/ false,
+                                     &error_msg,
+                                     &new_dex_files))
+        << error_msg;
+
+    bcp.push_back(src);
+    bcp_locations.push_back(src);
+    for (std::unique_ptr<const DexFile>& df : new_dex_files) {
+      dex_files.push_back(df.get());
+      opened_dex_files.push_back(std::move(df));
+    }
+
+    ASSERT_TRUE(create_and_verify()) << error_msg;
+  }
+}
+
 template <bool kImage, bool kRelocate, bool kImageDex2oat>
 class ImageSpaceLoadingTest : public CommonRuntimeTest {
  protected:
   void SetUpRuntimeOptions(RuntimeOptions* options) override {
-    if (kImage) {
-      options->emplace_back(android::base::StringPrintf("-Ximage:%s", GetCoreArtLocation().c_str()),
-                            nullptr);
+    std::string image_location = GetCoreArtLocation();
+    if (!kImage) {
+      missing_image_base_ = std::make_unique<ScratchFile>();
+      image_location = missing_image_base_->GetFilename() + ".art";
     }
+    options->emplace_back(android::base::StringPrintf("-Ximage:%s", image_location.c_str()),
+                          nullptr);
     options->emplace_back(kRelocate ? "-Xrelocate" : "-Xnorelocate", nullptr);
     options->emplace_back(kImageDex2oat ? "-Ximage-dex2oat" : "-Xnoimage-dex2oat", nullptr);
 
@@ -142,9 +402,11 @@
       CHECK_EQ(result, 0);
       old_dex2oat_bcp_.reset();
     }
+    missing_image_base_.reset();
   }
 
  private:
+  std::unique_ptr<ScratchFile> missing_image_base_;
   UniqueCPtr<const char[]> old_dex2oat_bcp_;
 };
 
@@ -165,6 +427,8 @@
 
 class NoAccessAndroidDataTest : public ImageSpaceLoadingTest<false, true, true> {
  protected:
+  NoAccessAndroidDataTest() : quiet_(LogSeverity::FATAL) {}
+
   void SetUpRuntimeOptions(RuntimeOptions* options) override {
     const char* android_data = getenv("ANDROID_DATA");
     CHECK(android_data != nullptr);
@@ -186,16 +450,17 @@
   }
 
   void TearDown() override {
+    ImageSpaceLoadingTest<false, true, true>::TearDown();
     int result = unlink(bad_dalvik_cache_.c_str());
     CHECK_EQ(result, 0) << strerror(errno);
     result = rmdir(bad_android_data_.c_str());
     CHECK_EQ(result, 0) << strerror(errno);
     result = setenv("ANDROID_DATA", old_android_data_.c_str(), /* replace */ 1);
     CHECK_EQ(result, 0) << strerror(errno);
-    ImageSpaceLoadingTest<false, true, true>::TearDown();
   }
 
  private:
+  ScopedLogSeverity quiet_;
   std::string old_android_data_;
   std::string bad_android_data_;
   std::string bad_dalvik_cache_;
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 2c18888..d1b4d7c 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -31,6 +31,7 @@
 #include "gc/accounting/space_bitmap-inl.h"
 #include "gc/heap.h"
 #include "image.h"
+#include "mirror/object-readbarrier-inl.h"
 #include "scoped_thread_state_change-inl.h"
 #include "space-inl.h"
 #include "thread-current-inl.h"
@@ -101,11 +102,11 @@
 };
 
 void LargeObjectSpace::SwapBitmaps() {
-  live_bitmap_.swap(mark_bitmap_);
-  // Swap names to get more descriptive diagnostics.
-  std::string temp_name = live_bitmap_->GetName();
-  live_bitmap_->SetName(mark_bitmap_->GetName());
-  mark_bitmap_->SetName(temp_name);
+  std::swap(live_bitmap_, mark_bitmap_);
+  // Preserve names to get more descriptive diagnostics.
+  std::string temp_name = live_bitmap_.GetName();
+  live_bitmap_.SetName(mark_bitmap_.GetName());
+  mark_bitmap_.SetName(temp_name);
 }
 
 LargeObjectSpace::LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end,
@@ -118,7 +119,7 @@
 
 
 void LargeObjectSpace::CopyLiveToMarked() {
-  mark_bitmap_->CopyFrom(live_bitmap_.get());
+  mark_bitmap_.CopyFrom(&live_bitmap_);
 }
 
 LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
@@ -176,10 +177,14 @@
   return it->second.is_zygote;
 }
 
-void LargeObjectMapSpace::SetAllLargeObjectsAsZygoteObjects(Thread* self) {
+void LargeObjectMapSpace::SetAllLargeObjectsAsZygoteObjects(Thread* self, bool set_mark_bit) {
   MutexLock mu(self, lock_);
   for (auto& pair : large_objects_) {
     pair.second.is_zygote = true;
+    if (set_mark_bit) {
+      bool success = pair.first->AtomicSetMarkBit(0, 1);
+      CHECK(success);
+    }
   }
 }
 
@@ -420,7 +425,6 @@
 }
 
 size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) {
-  MutexLock mu(self, lock_);
   DCHECK(Contains(obj)) << reinterpret_cast<void*>(Begin()) << " " << obj << " "
                         << reinterpret_cast<void*>(End());
   DCHECK_ALIGNED(obj, kAlignment);
@@ -429,6 +433,15 @@
   const size_t allocation_size = info->ByteSize();
   DCHECK_GT(allocation_size, 0U);
   DCHECK_ALIGNED(allocation_size, kAlignment);
+
+  // madvise the pages without lock
+  madvise(obj, allocation_size, MADV_DONTNEED);
+  if (kIsDebugBuild) {
+    // Can't disallow reads since we use them to find next chunks during coalescing.
+    CheckedCall(mprotect, __FUNCTION__, obj, allocation_size, PROT_READ);
+  }
+
+  MutexLock mu(self, lock_);
   info->SetByteSize(allocation_size, true);  // Mark as free.
   // Look at the next chunk.
   AllocationInfo* next_info = info->GetNextInfo();
@@ -470,11 +483,6 @@
   --num_objects_allocated_;
   DCHECK_LE(allocation_size, num_bytes_allocated_);
   num_bytes_allocated_ -= allocation_size;
-  madvise(obj, allocation_size, MADV_DONTNEED);
-  if (kIsDebugBuild) {
-    // Can't disallow reads since we use them to find next chunks during coalescing.
-    CheckedCall(mprotect, __FUNCTION__, obj, allocation_size, PROT_READ);
-  }
   return allocation_size;
 }
 
@@ -579,7 +587,7 @@
   return info->IsZygoteObject();
 }
 
-void FreeListSpace::SetAllLargeObjectsAsZygoteObjects(Thread* self) {
+void FreeListSpace::SetAllLargeObjectsAsZygoteObjects(Thread* self, bool set_mark_bit) {
   MutexLock mu(self, lock_);
   uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
   for (AllocationInfo* cur_info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(Begin())),
@@ -587,6 +595,12 @@
       cur_info = cur_info->GetNextInfo()) {
     if (!cur_info->IsFree()) {
       cur_info->SetZygoteObject();
+      if (set_mark_bit) {
+        ObjPtr<mirror::Object> obj =
+            reinterpret_cast<mirror::Object*>(GetAddressForAllocationInfo(cur_info));
+        bool success = obj->AtomicSetMarkBit(0, 1);
+        CHECK(success);
+      }
     }
   }
 }
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 4d1cbc0..13251d6 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -107,8 +107,9 @@
   // Return true if the large object is a zygote large object. Potentially slow.
   virtual bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const = 0;
   // Called when we create the zygote space, mark all existing large objects as zygote large
-  // objects.
-  virtual void SetAllLargeObjectsAsZygoteObjects(Thread* self) = 0;
+  // objects. Set mark-bit if called from PreZygoteFork() for ConcurrentCopying
+  // GC to avoid dirtying the first page.
+  virtual void SetAllLargeObjectsAsZygoteObjects(Thread* self, bool set_mark_bit) = 0;
 
   virtual void ForEachMemMap(std::function<void(const MemMap&)> func) const = 0;
   // GetRangeAtomic returns Begin() and End() atomically, that is, it never returns Begin() and
@@ -173,7 +174,9 @@
   virtual ~LargeObjectMapSpace() {}
 
   bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override REQUIRES(!lock_);
-  void SetAllLargeObjectsAsZygoteObjects(Thread* self) override REQUIRES(!lock_);
+  void SetAllLargeObjectsAsZygoteObjects(Thread* self, bool set_mark_bit) override
+      REQUIRES(!lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   AllocationTrackingSafeMap<mirror::Object*, LargeObject, kAllocatorTagLOSMaps> large_objects_
       GUARDED_BY(lock_);
@@ -215,7 +218,9 @@
   // Removes header from the free blocks set by finding the corresponding iterator and erasing it.
   void RemoveFreePrev(AllocationInfo* info) REQUIRES(lock_);
   bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const override;
-  void SetAllLargeObjectsAsZygoteObjects(Thread* self) override REQUIRES(!lock_);
+  void SetAllLargeObjectsAsZygoteObjects(Thread* self, bool set_mark_bit) override
+      REQUIRES(!lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   class SortByPrevFree {
    public:
diff --git a/runtime/gc/space/large_object_space_test.cc b/runtime/gc/space/large_object_space_test.cc
index 62bc26e..9736c5d 100644
--- a/runtime/gc/space/large_object_space_test.cc
+++ b/runtime/gc/space/large_object_space_test.cc
@@ -85,7 +85,7 @@
           mirror::Object* obj = pair.first;
           ASSERT_FALSE(los->IsZygoteLargeObject(self, obj));
         }
-        los->SetAllLargeObjectsAsZygoteObjects(self);
+        los->SetAllLargeObjectsAsZygoteObjects(self, /*set_mark_bit=*/ false);
         for (const auto& pair : requests) {
           mirror::Object* obj = pair.first;
           ASSERT_TRUE(los->IsZygoteLargeObject(self, obj));
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index 474231b..281d9c2 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -63,16 +63,15 @@
     static const uintptr_t kGcCardSize = static_cast<uintptr_t>(accounting::CardTable::kCardSize);
     CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map_.Begin()), kGcCardSize);
     CHECK_ALIGNED(reinterpret_cast<uintptr_t>(mem_map_.End()), kGcCardSize);
-    live_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
+    live_bitmap_ = accounting::ContinuousSpaceBitmap::Create(
         StringPrintf("allocspace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
-        Begin(), NonGrowthLimitCapacity()));
-    CHECK(live_bitmap_.get() != nullptr) << "could not create allocspace live bitmap #"
+        Begin(), NonGrowthLimitCapacity());
+    CHECK(live_bitmap_.IsValid()) << "could not create allocspace live bitmap #"
         << bitmap_index;
-    mark_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
+    mark_bitmap_ = accounting::ContinuousSpaceBitmap::Create(
         StringPrintf("allocspace %s mark-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)),
-        Begin(), NonGrowthLimitCapacity()));
-    CHECK(mark_bitmap_.get() != nullptr) << "could not create allocspace mark bitmap #"
-        << bitmap_index;
+        Begin(), NonGrowthLimitCapacity());
+    CHECK(mark_bitmap_.IsValid()) << "could not create allocspace mark bitmap #" << bitmap_index;
   }
   for (auto& freed : recent_freed_objects_) {
     freed.first = nullptr;
@@ -229,14 +228,16 @@
                                      growth_limit,
                                      CanMoveObjects());
   SetLimit(End());
-  live_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
-  CHECK_EQ(live_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(End()));
-  mark_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
-  CHECK_EQ(mark_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(End()));
+  live_bitmap_.SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
+  CHECK_EQ(live_bitmap_.HeapLimit(), reinterpret_cast<uintptr_t>(End()));
+  mark_bitmap_.SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
+  CHECK_EQ(mark_bitmap_.HeapLimit(), reinterpret_cast<uintptr_t>(End()));
 
   // Create the actual zygote space.
-  ZygoteSpace* zygote_space = ZygoteSpace::Create("Zygote space", ReleaseMemMap(),
-                                                  live_bitmap_.release(), mark_bitmap_.release());
+  ZygoteSpace* zygote_space = ZygoteSpace::Create("Zygote space",
+                                                  ReleaseMemMap(),
+                                                  std::move(live_bitmap_),
+                                                  std::move(mark_bitmap_));
   if (UNLIKELY(zygote_space == nullptr)) {
     VLOG(heap) << "Failed creating zygote space from space " << GetName();
   } else {
@@ -280,9 +281,9 @@
   CHECK_LE(new_capacity, NonGrowthLimitCapacity());
   GetLiveBitmap()->SetHeapSize(new_capacity);
   GetMarkBitmap()->SetHeapSize(new_capacity);
-  if (temp_bitmap_.get() != nullptr) {
+  if (temp_bitmap_.IsValid()) {
     // If the bitmaps are clamped, then the temp bitmap is actually the mark bitmap.
-    temp_bitmap_->SetHeapSize(new_capacity);
+    temp_bitmap_.SetHeapSize(new_capacity);
   }
   GetMemMap()->SetSize(new_capacity);
   limit_ = Begin() + new_capacity;
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 86a0a6e..901568e 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -328,58 +328,53 @@
     }
   }
 
+  mirror::Object* region = nullptr;
   // Find a large enough set of contiguous free regions.
   if (kCyclicRegionAllocation) {
+    size_t next_region = -1;
     // Try to find a range of free regions within [cyclic_alloc_region_index_, num_regions_).
-    size_t next_region1 = -1;
-    mirror::Object* region1 = AllocLargeInRange<kForEvac>(cyclic_alloc_region_index_,
-                                                          num_regions_,
-                                                          num_regs_in_large_region,
-                                                          bytes_allocated,
-                                                          usable_size,
-                                                          bytes_tl_bulk_allocated,
-                                                          &next_region1);
-    if (region1 != nullptr) {
-      DCHECK_LT(0u, next_region1);
-      DCHECK_LE(next_region1, num_regions_);
-      // Move the cyclic allocation region marker to the region
-      // following the large region that was just allocated.
-      cyclic_alloc_region_index_ = next_region1 % num_regions_;
-      return region1;
+    region = AllocLargeInRange<kForEvac>(cyclic_alloc_region_index_,
+                                         num_regions_,
+                                         num_regs_in_large_region,
+                                         bytes_allocated,
+                                         usable_size,
+                                         bytes_tl_bulk_allocated,
+                                         &next_region);
+
+    if (region == nullptr) {
+      DCHECK_EQ(next_region, static_cast<size_t>(-1));
+      // If the previous attempt failed, try to find a range of free regions within
+      // [0, min(cyclic_alloc_region_index_ + num_regs_in_large_region - 1, num_regions_)).
+      region = AllocLargeInRange<kForEvac>(
+          0,
+          std::min(cyclic_alloc_region_index_ + num_regs_in_large_region - 1, num_regions_),
+          num_regs_in_large_region,
+          bytes_allocated,
+          usable_size,
+          bytes_tl_bulk_allocated,
+          &next_region);
     }
 
-    // If the previous attempt failed, try to find a range of free regions within
-    // [0, min(cyclic_alloc_region_index_ + num_regs_in_large_region - 1, num_regions_)).
-    size_t next_region2 = -1;
-    mirror::Object* region2 = AllocLargeInRange<kForEvac>(
-            0,
-            std::min(cyclic_alloc_region_index_ + num_regs_in_large_region - 1, num_regions_),
-            num_regs_in_large_region,
-            bytes_allocated,
-            usable_size,
-            bytes_tl_bulk_allocated,
-            &next_region2);
-    if (region2 != nullptr) {
-      DCHECK_LT(0u, next_region2);
-      DCHECK_LE(next_region2, num_regions_);
+    if (region != nullptr) {
+      DCHECK_LT(0u, next_region);
+      DCHECK_LE(next_region, num_regions_);
       // Move the cyclic allocation region marker to the region
       // following the large region that was just allocated.
-      cyclic_alloc_region_index_ = next_region2 % num_regions_;
-      return region2;
+      cyclic_alloc_region_index_ = next_region % num_regions_;
     }
   } else {
     // Try to find a range of free regions within [0, num_regions_).
-    mirror::Object* region = AllocLargeInRange<kForEvac>(0,
-                                                         num_regions_,
-                                                         num_regs_in_large_region,
-                                                         bytes_allocated,
-                                                         usable_size,
-                                                         bytes_tl_bulk_allocated);
-    if (region != nullptr) {
-      return region;
-    }
+    region = AllocLargeInRange<kForEvac>(0,
+                                         num_regions_,
+                                         num_regs_in_large_region,
+                                         bytes_allocated,
+                                         usable_size,
+                                         bytes_tl_bulk_allocated);
   }
-  return nullptr;
+  if (kForEvac && region != nullptr) {
+    TraceHeapSize();
+  }
+  return region;
 }
 
 template<bool kForEvac>
@@ -503,7 +498,7 @@
     DCHECK_LE(begin_, Top());
     size_t bytes;
     if (is_a_tlab_) {
-      bytes = thread_->GetThreadLocalBytesAllocated();
+      bytes = thread_->GetTlabEnd() - begin_;
     } else {
       bytes = static_cast<size_t>(Top() - begin_);
     }
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 823043e..faeeec0 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -33,7 +33,7 @@
 static constexpr uint kEvacuateLivePercentThreshold = 75U;
 
 // Whether we protect the unused and cleared regions.
-static constexpr bool kProtectClearedRegions = true;
+static constexpr bool kProtectClearedRegions = kIsDebugBuild;
 
 // Wether we poison memory areas occupied by dead objects in unevacuated regions.
 static constexpr bool kPoisonDeadObjectsInUnevacuatedRegions = true;
@@ -125,8 +125,8 @@
   for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) {
     regions_[i].Init(i, region_addr, region_addr + kRegionSize);
   }
-  mark_bitmap_.reset(
-      accounting::ContinuousSpaceBitmap::Create("region space live bitmap", Begin(), Capacity()));
+  mark_bitmap_ =
+      accounting::ContinuousSpaceBitmap::Create("region space live bitmap", Begin(), Capacity());
   if (kIsDebugBuild) {
     CHECK_EQ(regions_[0].Begin(), Begin());
     for (size_t i = 0; i < num_regions_; ++i) {
@@ -337,6 +337,10 @@
     rb_table->SetAll();
   }
   MutexLock mu(Thread::Current(), region_lock_);
+  // We cannot use the partially utilized TLABs across a GC. Therefore, revoke
+  // them during the thread-flip.
+  partial_tlabs_.clear();
+
   // Counter for the number of expected large tail regions following a large region.
   size_t num_expected_large_tails = 0U;
   // Flag to store whether the previously seen large region has been evacuated.
@@ -833,17 +837,40 @@
   r->objects_allocated_.fetch_add(1, std::memory_order_relaxed);
 }
 
-bool RegionSpace::AllocNewTlab(Thread* self, size_t min_bytes) {
+bool RegionSpace::AllocNewTlab(Thread* self,
+                               const size_t tlab_size,
+                               size_t* bytes_tl_bulk_allocated) {
   MutexLock mu(self, region_lock_);
-  RevokeThreadLocalBuffersLocked(self);
-  // Retain sufficient free regions for full evacuation.
-
-  Region* r = AllocateRegion(/*for_evac=*/ false);
+  RevokeThreadLocalBuffersLocked(self, /*reuse=*/ gc::Heap::kUsePartialTlabs);
+  Region* r = nullptr;
+  uint8_t* pos = nullptr;
+  *bytes_tl_bulk_allocated = tlab_size;
+  // First attempt to get a partially used TLAB, if available.
+  if (tlab_size < kRegionSize) {
+    // Fetch the largest partial TLAB. The multimap is ordered in decreasing
+    // size.
+    auto largest_partial_tlab = partial_tlabs_.begin();
+    if (largest_partial_tlab != partial_tlabs_.end() && largest_partial_tlab->first >= tlab_size) {
+      r = largest_partial_tlab->second;
+      pos = r->End() - largest_partial_tlab->first;
+      partial_tlabs_.erase(largest_partial_tlab);
+      DCHECK_GT(r->End(), pos);
+      DCHECK_LE(r->Begin(), pos);
+      DCHECK_GE(r->Top(), pos);
+      *bytes_tl_bulk_allocated -= r->Top() - pos;
+    }
+  }
+  if (r == nullptr) {
+    // Fallback to allocating an entire region as TLAB.
+    r = AllocateRegion(/*for_evac=*/ false);
+  }
   if (r != nullptr) {
+    uint8_t* start = pos != nullptr ? pos : r->Begin();
+    DCHECK_ALIGNED(start, kObjectAlignment);
     r->is_a_tlab_ = true;
     r->thread_ = self;
     r->SetTop(r->End());
-    self->SetTlab(r->Begin(), r->Begin() + min_bytes, r->End());
+    self->SetTlab(start, start + tlab_size, r->End());
     return true;
   }
   return false;
@@ -851,24 +878,35 @@
 
 size_t RegionSpace::RevokeThreadLocalBuffers(Thread* thread) {
   MutexLock mu(Thread::Current(), region_lock_);
-  RevokeThreadLocalBuffersLocked(thread);
+  RevokeThreadLocalBuffersLocked(thread, /*reuse=*/ gc::Heap::kUsePartialTlabs);
   return 0U;
 }
 
-void RegionSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
+size_t RegionSpace::RevokeThreadLocalBuffers(Thread* thread, const bool reuse) {
+  MutexLock mu(Thread::Current(), region_lock_);
+  RevokeThreadLocalBuffersLocked(thread, reuse);
+  return 0U;
+}
+
+void RegionSpace::RevokeThreadLocalBuffersLocked(Thread* thread, bool reuse) {
   uint8_t* tlab_start = thread->GetTlabStart();
   DCHECK_EQ(thread->HasTlab(), tlab_start != nullptr);
   if (tlab_start != nullptr) {
-    DCHECK_ALIGNED(tlab_start, kRegionSize);
     Region* r = RefToRegionLocked(reinterpret_cast<mirror::Object*>(tlab_start));
+    r->is_a_tlab_ = false;
+    r->thread_ = nullptr;
     DCHECK(r->IsAllocated());
     DCHECK_LE(thread->GetThreadLocalBytesAllocated(), kRegionSize);
     r->RecordThreadLocalAllocations(thread->GetThreadLocalObjectsAllocated(),
-                                    thread->GetThreadLocalBytesAllocated());
-    r->is_a_tlab_ = false;
-    r->thread_ = nullptr;
+                                    thread->GetTlabEnd() - r->Begin());
+    DCHECK_GE(r->End(), thread->GetTlabPos());
+    DCHECK_LE(r->Begin(), thread->GetTlabPos());
+    size_t remaining_bytes = r->End() - thread->GetTlabPos();
+    if (reuse && remaining_bytes >= gc::Heap::kPartialTlabSize) {
+      partial_tlabs_.insert(std::make_pair(remaining_bytes, r));
+    }
   }
-  thread->SetTlab(nullptr, nullptr, nullptr);
+  thread->ResetTlab();
 }
 
 size_t RegionSpace::RevokeAllThreadLocalBuffers() {
@@ -977,6 +1015,11 @@
   thread_ = nullptr;
 }
 
+void RegionSpace::TraceHeapSize() {
+  Heap* heap = Runtime::Current()->GetHeap();
+  heap->TraceHeapSize(heap->GetBytesAllocated() + EvacBytes());
+}
+
 RegionSpace::Region* RegionSpace::AllocateRegion(bool for_evac) {
   if (!for_evac && (num_non_free_regions_ + 1) * 2 > num_regions_) {
     return nullptr;
@@ -998,6 +1041,7 @@
       }
       if (for_evac) {
         ++num_evac_regions_;
+        TraceHeapSize();
         // Evac doesn't count as newly allocated.
       } else {
         r->SetNewlyAllocated();
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 26af633..f74abfb 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -22,6 +22,9 @@
 #include "space.h"
 #include "thread.h"
 
+#include <functional>
+#include <map>
+
 namespace art {
 namespace gc {
 
@@ -107,11 +110,11 @@
     UNIMPLEMENTED(FATAL);
     return 0;
   }
-  accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
-    return mark_bitmap_.get();
+  accounting::ContinuousSpaceBitmap* GetLiveBitmap() override {
+    return &mark_bitmap_;
   }
-  accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
-    return mark_bitmap_.get();
+  accounting::ContinuousSpaceBitmap* GetMarkBitmap() override {
+    return &mark_bitmap_;
   }
 
   void Clear() override REQUIRES(!region_lock_);
@@ -141,7 +144,7 @@
   void DumpNonFreeRegions(std::ostream& os) REQUIRES(!region_lock_);
 
   size_t RevokeThreadLocalBuffers(Thread* thread) override REQUIRES(!region_lock_);
-  void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(region_lock_);
+  size_t RevokeThreadLocalBuffers(Thread* thread, const bool reuse) REQUIRES(!region_lock_);
   size_t RevokeAllThreadLocalBuffers() override
       REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !region_lock_);
   void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!region_lock_);
@@ -189,6 +192,9 @@
   size_t GetNumRegions() const {
     return num_regions_;
   }
+  size_t GetNumNonFreeRegions() const NO_THREAD_SAFETY_ANALYSIS {
+    return num_non_free_regions_;
+  }
 
   bool CanMoveObjects() const override {
     return true;
@@ -363,12 +369,17 @@
   // Increment object allocation count for region containing ref.
   void RecordAlloc(mirror::Object* ref) REQUIRES(!region_lock_);
 
-  bool AllocNewTlab(Thread* self, size_t min_bytes) REQUIRES(!region_lock_);
+  bool AllocNewTlab(Thread* self, const size_t tlab_size, size_t* bytes_tl_bulk_allocated)
+      REQUIRES(!region_lock_);
 
   uint32_t Time() {
     return time_;
   }
 
+  size_t EvacBytes() const NO_THREAD_SAFETY_ANALYSIS {
+    return num_evac_regions_ * kRegionSize;
+  }
+
  private:
   RegionSpace(const std::string& name, MemMap&& mem_map, bool use_generational_cc);
 
@@ -587,9 +598,8 @@
 
     void RecordThreadLocalAllocations(size_t num_objects, size_t num_bytes) {
       DCHECK(IsAllocated());
-      DCHECK_EQ(objects_allocated_.load(std::memory_order_relaxed), 0U);
       DCHECK_EQ(Top(), end_);
-      objects_allocated_.store(num_objects, std::memory_order_relaxed);
+      objects_allocated_.fetch_add(num_objects, std::memory_order_relaxed);
       top_.store(begin_ + num_bytes, std::memory_order_relaxed);
       DCHECK_LE(Top(), end_);
     }
@@ -635,6 +645,8 @@
     return RefToRegionLocked(ref);
   }
 
+  void TraceHeapSize() REQUIRES(region_lock_);
+
   Region* RefToRegionUnlocked(mirror::Object* ref) NO_THREAD_SAFETY_ANALYSIS {
     // For a performance reason (this is frequently called via
     // RegionSpace::IsInFromSpace, etc.) we avoid taking a lock here.
@@ -691,6 +703,7 @@
   }
 
   Region* AllocateRegion(bool for_evac) REQUIRES(region_lock_);
+  void RevokeThreadLocalBuffersLocked(Thread* thread, bool reuse) REQUIRES(region_lock_);
 
   // Scan region range [`begin`, `end`) in increasing order to try to
   // allocate a large region having a size of `num_regs_in_large_region`
@@ -739,6 +752,9 @@
   // The pointer to the region array.
   std::unique_ptr<Region[]> regions_ GUARDED_BY(region_lock_);
 
+  // To hold partially used TLABs which can be reassigned to threads later for
+  // utilizing the un-used portion.
+  std::multimap<size_t, Region*, std::greater<size_t>> partial_tlabs_ GUARDED_BY(region_lock_);
   // The upper-bound index of the non-free regions. Used to avoid scanning all regions in
   // RegionSpace::SetFromSpace and RegionSpace::ClearFromSpace.
   //
@@ -756,7 +772,7 @@
   size_t cyclic_alloc_region_index_ GUARDED_BY(region_lock_);
 
   // Mark bitmap used by the GC.
-  std::unique_ptr<accounting::ContinuousSpaceBitmap> mark_bitmap_;
+  accounting::ContinuousSpaceBitmap mark_bitmap_;
 
   DISALLOW_COPY_AND_ASSIGN(RegionSpace);
 };
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index 36fd864..fc9cad0 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -427,8 +427,8 @@
 void RosAllocSpace::Clear() {
   size_t footprint_limit = GetFootprintLimit();
   madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED);
-  live_bitmap_->Clear();
-  mark_bitmap_->Clear();
+  live_bitmap_.Clear();
+  mark_bitmap_.Clear();
   SetEnd(begin_ + starting_size_);
   delete rosalloc_;
   rosalloc_ = CreateRosAlloc(mem_map_.Begin(),
diff --git a/runtime/gc/space/space.cc b/runtime/gc/space/space.cc
index e7961eb..cae9ce8 100644
--- a/runtime/gc/space/space.cc
+++ b/runtime/gc/space/space.cc
@@ -81,12 +81,10 @@
     Space(name, gc_retention_policy) {
   // TODO: Fix this if we ever support objects not in the low 32 bit.
   const size_t capacity = static_cast<size_t>(std::numeric_limits<uint32_t>::max());
-  live_bitmap_.reset(accounting::LargeObjectBitmap::Create("large live objects", nullptr,
-                                                           capacity));
-  CHECK(live_bitmap_.get() != nullptr);
-  mark_bitmap_.reset(accounting::LargeObjectBitmap::Create("large marked objects", nullptr,
-                                                           capacity));
-  CHECK(mark_bitmap_.get() != nullptr);
+  live_bitmap_ = accounting::LargeObjectBitmap::Create("large live objects", nullptr, capacity);
+  CHECK(live_bitmap_.IsValid());
+  mark_bitmap_ = accounting::LargeObjectBitmap::Create("large marked objects", nullptr, capacity);
+  CHECK(mark_bitmap_.IsValid());
 }
 
 collector::ObjectBytePair ContinuousMemMapAllocSpace::Sweep(bool swap_bitmaps) {
@@ -109,35 +107,30 @@
 
 void ContinuousMemMapAllocSpace::BindLiveToMarkBitmap() {
   CHECK(!HasBoundBitmaps());
-  accounting::ContinuousSpaceBitmap* live_bitmap = GetLiveBitmap();
-  if (live_bitmap != mark_bitmap_.get()) {
-    accounting::ContinuousSpaceBitmap* mark_bitmap = mark_bitmap_.release();
-    Runtime::Current()->GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
-    temp_bitmap_.reset(mark_bitmap);
-    mark_bitmap_.reset(live_bitmap);
-  }
+  temp_bitmap_ = std::move(mark_bitmap_);
+  mark_bitmap_.CopyView(live_bitmap_);
 }
 
-bool ContinuousMemMapAllocSpace::HasBoundBitmaps() const {
-  return temp_bitmap_.get() != nullptr;
+bool ContinuousSpace::HasBoundBitmaps() {
+  DCHECK(GetLiveBitmap() != nullptr);
+  DCHECK(GetMarkBitmap() != nullptr);
+  // Check if the bitmaps are pointing to the same underlying data.
+  return GetLiveBitmap()->Begin() == GetMarkBitmap()->Begin();
 }
 
 void ContinuousMemMapAllocSpace::UnBindBitmaps() {
   CHECK(HasBoundBitmaps());
   // At this point, `temp_bitmap_` holds our old mark bitmap.
-  accounting::ContinuousSpaceBitmap* new_bitmap = temp_bitmap_.release();
-  Runtime::Current()->GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap_.get(), new_bitmap);
-  CHECK_EQ(mark_bitmap_.release(), live_bitmap_.get());
-  mark_bitmap_.reset(new_bitmap);
-  DCHECK(temp_bitmap_.get() == nullptr);
+  mark_bitmap_ = std::move(temp_bitmap_);
 }
 
 void ContinuousMemMapAllocSpace::SwapBitmaps() {
-  live_bitmap_.swap(mark_bitmap_);
-  // Swap names to get more descriptive diagnostics.
-  std::string temp_name(live_bitmap_->GetName());
-  live_bitmap_->SetName(mark_bitmap_->GetName());
-  mark_bitmap_->SetName(temp_name);
+  CHECK(!HasBoundBitmaps());
+  std::swap(live_bitmap_, mark_bitmap_);
+  // Preserve names to get more descriptive diagnostics.
+  std::string temp_name(live_bitmap_.GetName());
+  live_bitmap_.SetName(mark_bitmap_.GetName());
+  mark_bitmap_.SetName(temp_name);
 }
 
 AllocSpace::SweepCallbackContext::SweepCallbackContext(bool swap_bitmaps_in, space::Space* space_in)
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 6a4095c..3b7e3b7 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -295,8 +295,8 @@
     return End() - Begin();
   }
 
-  virtual accounting::ContinuousSpaceBitmap* GetLiveBitmap() const = 0;
-  virtual accounting::ContinuousSpaceBitmap* GetMarkBitmap() const = 0;
+  virtual accounting::ContinuousSpaceBitmap* GetLiveBitmap() = 0;
+  virtual accounting::ContinuousSpaceBitmap* GetMarkBitmap() = 0;
 
   // Maximum which the mapped space can grow to.
   virtual size_t Capacity() const {
@@ -318,6 +318,8 @@
     return true;
   }
 
+  bool HasBoundBitmaps() REQUIRES(Locks::heap_bitmap_lock_);
+
   virtual ~ContinuousSpace() {}
 
  protected:
@@ -344,12 +346,12 @@
 // is suitable for use for large primitive arrays.
 class DiscontinuousSpace : public Space {
  public:
-  accounting::LargeObjectBitmap* GetLiveBitmap() const {
-    return live_bitmap_.get();
+  accounting::LargeObjectBitmap* GetLiveBitmap() {
+    return &live_bitmap_;
   }
 
-  accounting::LargeObjectBitmap* GetMarkBitmap() const {
-    return mark_bitmap_.get();
+  accounting::LargeObjectBitmap* GetMarkBitmap() {
+    return &mark_bitmap_;
   }
 
   bool IsDiscontinuousSpace() const override {
@@ -361,8 +363,8 @@
  protected:
   DiscontinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy);
 
-  std::unique_ptr<accounting::LargeObjectBitmap> live_bitmap_;
-  std::unique_ptr<accounting::LargeObjectBitmap> mark_bitmap_;
+  accounting::LargeObjectBitmap live_bitmap_;
+  accounting::LargeObjectBitmap mark_bitmap_;
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(DiscontinuousSpace);
@@ -423,37 +425,36 @@
     return this;
   }
 
-  bool HasBoundBitmaps() const REQUIRES(Locks::heap_bitmap_lock_);
   // Make the mark bitmap an alias of the live bitmap. Save the current mark bitmap into
   // `temp_bitmap_`, so that we can restore it later in ContinuousMemMapAllocSpace::UnBindBitmaps.
   void BindLiveToMarkBitmap() REQUIRES(Locks::heap_bitmap_lock_);
   // Unalias the mark bitmap from the live bitmap and restore the old mark bitmap.
   void UnBindBitmaps() REQUIRES(Locks::heap_bitmap_lock_);
   // Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping.
-  void SwapBitmaps();
+  void SwapBitmaps() REQUIRES(Locks::heap_bitmap_lock_);
 
   // Clear the space back to an empty space.
   virtual void Clear() = 0;
 
-  accounting::ContinuousSpaceBitmap* GetLiveBitmap() const override {
-    return live_bitmap_.get();
+  accounting::ContinuousSpaceBitmap* GetLiveBitmap() override {
+    return &live_bitmap_;
   }
 
-  accounting::ContinuousSpaceBitmap* GetMarkBitmap() const override {
-    return mark_bitmap_.get();
+  accounting::ContinuousSpaceBitmap* GetMarkBitmap() override {
+    return &mark_bitmap_;
   }
 
-  accounting::ContinuousSpaceBitmap* GetTempBitmap() const {
-    return temp_bitmap_.get();
+  accounting::ContinuousSpaceBitmap* GetTempBitmap() {
+    return &temp_bitmap_;
   }
 
   collector::ObjectBytePair Sweep(bool swap_bitmaps);
   virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() = 0;
 
  protected:
-  std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap_;
-  std::unique_ptr<accounting::ContinuousSpaceBitmap> mark_bitmap_;
-  std::unique_ptr<accounting::ContinuousSpaceBitmap> temp_bitmap_;
+  accounting::ContinuousSpaceBitmap live_bitmap_;
+  accounting::ContinuousSpaceBitmap mark_bitmap_;
+  accounting::ContinuousSpaceBitmap temp_bitmap_;
 
   ContinuousMemMapAllocSpace(const std::string& name,
                              MemMap&& mem_map,
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index f482466..66427a7 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -21,6 +21,7 @@
 #include "gc/accounting/card_table-inl.h"
 #include "gc/accounting/space_bitmap-inl.h"
 #include "gc/heap.h"
+#include "mirror/object-readbarrier-inl.h"
 #include "runtime.h"
 #include "thread-current-inl.h"
 
@@ -43,23 +44,30 @@
 
 ZygoteSpace* ZygoteSpace::Create(const std::string& name,
                                  MemMap&& mem_map,
-                                 accounting::ContinuousSpaceBitmap* live_bitmap,
-                                 accounting::ContinuousSpaceBitmap* mark_bitmap) {
-  DCHECK(live_bitmap != nullptr);
-  DCHECK(mark_bitmap != nullptr);
+                                 accounting::ContinuousSpaceBitmap&& live_bitmap,
+                                 accounting::ContinuousSpaceBitmap&& mark_bitmap) {
+  DCHECK(live_bitmap.IsValid());
+  DCHECK(mark_bitmap.IsValid());
   size_t objects_allocated = 0;
   CountObjectsAllocated visitor(&objects_allocated);
   ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
-  live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(mem_map.Begin()),
-                                reinterpret_cast<uintptr_t>(mem_map.End()), visitor);
+  live_bitmap.VisitMarkedRange(reinterpret_cast<uintptr_t>(mem_map.Begin()),
+                               reinterpret_cast<uintptr_t>(mem_map.End()), visitor);
   ZygoteSpace* zygote_space = new ZygoteSpace(name, std::move(mem_map), objects_allocated);
-  CHECK(zygote_space->live_bitmap_.get() == nullptr);
-  CHECK(zygote_space->mark_bitmap_.get() == nullptr);
-  zygote_space->live_bitmap_.reset(live_bitmap);
-  zygote_space->mark_bitmap_.reset(mark_bitmap);
+  zygote_space->live_bitmap_ = std::move(live_bitmap);
+  zygote_space->mark_bitmap_ = std::move(mark_bitmap);
   return zygote_space;
 }
 
+void ZygoteSpace::SetMarkBitInLiveObjects() {
+  GetLiveBitmap()->VisitMarkedRange(reinterpret_cast<uintptr_t>(Begin()),
+                                    reinterpret_cast<uintptr_t>(Limit()),
+                                    [](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+                                      bool success = obj->AtomicSetMarkBit(0, 1);
+                                      CHECK(success);
+                                    });
+}
+
 void ZygoteSpace::Clear() {
   UNIMPLEMENTED(FATAL);
   UNREACHABLE();
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 09db40e..631691d 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -32,10 +32,12 @@
   // Returns the remaining storage in the out_map field.
   static ZygoteSpace* Create(const std::string& name,
                              MemMap&& mem_map,
-                             accounting::ContinuousSpaceBitmap* live_bitmap,
-                             accounting::ContinuousSpaceBitmap* mark_bitmap)
+                             accounting::ContinuousSpaceBitmap&& live_bitmap,
+                             accounting::ContinuousSpaceBitmap&& mark_bitmap)
       REQUIRES_SHARED(Locks::mutator_lock_);
-
+  // In PreZygoteFork() we set mark-bit of all live objects to avoid page
+  // getting dirtied due to it.
+  void SetMarkBitInLiveObjects();
   void Dump(std::ostream& os) const override;
 
   SpaceType GetType() const override {
diff --git a/runtime/gc/system_weak_test.cc b/runtime/gc/system_weak_test.cc
index 4fe8027..ca11297 100644
--- a/runtime/gc/system_weak_test.cc
+++ b/runtime/gc/system_weak_test.cc
@@ -113,7 +113,6 @@
     case CollectorType::kCollectorTypeCMS:
     case CollectorType::kCollectorTypeCC:
     case CollectorType::kCollectorTypeSS:
-    case CollectorType::kCollectorTypeGSS:
       return true;
 
     default:
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
index 32af62d..8d8c32c 100644
--- a/runtime/gc_root.h
+++ b/runtime/gc_root.h
@@ -94,13 +94,14 @@
 // The precise flag ensures that more metadata is supplied. An example is vreg data for compiled
 // method frames.
 enum VisitRootFlags : uint8_t {
-  kVisitRootFlagAllRoots = 0x1,
-  kVisitRootFlagNewRoots = 0x2,
-  kVisitRootFlagStartLoggingNewRoots = 0x4,
-  kVisitRootFlagStopLoggingNewRoots = 0x8,
-  kVisitRootFlagClearRootLog = 0x10,
-  kVisitRootFlagClassLoader = 0x20,
-  kVisitRootFlagPrecise = 0x80,
+  kVisitRootFlagAllRoots = (1 << 0),
+  kVisitRootFlagNewRoots = (1 << 1),
+  kVisitRootFlagStartLoggingNewRoots = (1 << 2),
+  kVisitRootFlagStopLoggingNewRoots = (1 << 3),
+  kVisitRootFlagClearRootLog = (1 << 4),
+  kVisitRootFlagClassLoader = (1 << 5),
+  // There is no (1 << 6).
+  kVisitRootFlagPrecise = (1 << 7),
 };
 
 class RootVisitor {
diff --git a/runtime/handle.h b/runtime/handle.h
index 0c9c029..6de4e88 100644
--- a/runtime/handle.h
+++ b/runtime/handle.h
@@ -32,6 +32,14 @@
 class Thread;
 
 template<class T> class Handle;
+template<typename T> class IterationRange;
+
+namespace mirror {
+template<typename T> class ObjectArray;
+template<typename T, typename C> class ArrayIter;
+template<typename T> using HandleArrayIter = ArrayIter<T, Handle<ObjectArray<T>>>;
+template<typename T> using ConstHandleArrayIter = ArrayIter<T, const Handle<ObjectArray<T>>>;
+}  // namespace mirror
 
 // Handles are memory locations that contain GC roots. As the mirror::Object*s within a handle are
 // GC visible then the GC may move the references within them, something that couldn't be done with
@@ -47,6 +55,11 @@
 
   ALWAYS_INLINE Handle<T>& operator=(const Handle<T>& handle) = default;
 
+  template <typename Type,
+            typename = typename std::enable_if_t<std::is_base_of_v<T, Type>>>
+  ALWAYS_INLINE Handle(const Handle<Type>& other) : reference_(other.reference_) {
+  }
+
   ALWAYS_INLINE explicit Handle(StackReference<T>* reference) : reference_(reference) {
   }
 
@@ -62,6 +75,19 @@
     return down_cast<T*>(reference_->AsMirrorPtr());
   }
 
+  template <typename Type,
+            typename = typename std::enable_if_t<std::is_same_v<mirror::ObjectArray<Type>, T>>>
+  ALWAYS_INLINE IterationRange<mirror::ConstHandleArrayIter<Type>> ConstIterate() const
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    return T::ConstIterate(*this);
+  }
+  template <typename Type,
+            typename = typename std::enable_if_t<std::is_same_v<mirror::ObjectArray<Type>, T>>>
+  ALWAYS_INLINE IterationRange<mirror::HandleArrayIter<Type>> Iterate()
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    return T::Iterate(*this);
+  }
+
   ALWAYS_INLINE bool IsNull() const {
     // It's safe to null-check it without a read barrier.
     return reference_->IsNull();
diff --git a/runtime/handle_scope-inl.h b/runtime/handle_scope-inl.h
index 765ed7d..90cf597 100644
--- a/runtime/handle_scope-inl.h
+++ b/runtime/handle_scope-inl.h
@@ -22,6 +22,7 @@
 #include "base/mutex.h"
 #include "handle.h"
 #include "handle_wrapper.h"
+#include "mirror/object_reference-inl.h"
 #include "obj_ptr-inl.h"
 #include "thread-current-inl.h"
 #include "verify_object.h"
@@ -30,7 +31,7 @@
 
 template<size_t kNumReferences>
 inline FixedSizeHandleScope<kNumReferences>::FixedSizeHandleScope(BaseHandleScope* link,
-                                                                  mirror::Object* fill_value)
+                                                                  ObjPtr<mirror::Object> fill_value)
     : HandleScope(link, kNumReferences) {
   if (kDebugLocking) {
     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
@@ -43,7 +44,8 @@
 }
 
 template<size_t kNumReferences>
-inline StackHandleScope<kNumReferences>::StackHandleScope(Thread* self, mirror::Object* fill_value)
+inline StackHandleScope<kNumReferences>::StackHandleScope(Thread* self,
+                                                          ObjPtr<mirror::Object> fill_value)
     : FixedSizeHandleScope<kNumReferences>(self->GetTopHandleScope(), fill_value),
       self_(self) {
   DCHECK_EQ(self, Thread::Current());
@@ -72,7 +74,7 @@
   return header_size + data_size;
 }
 
-inline mirror::Object* HandleScope::GetReference(size_t i) const {
+inline ObjPtr<mirror::Object> HandleScope::GetReference(size_t i) const {
   DCHECK_LT(i, NumberOfReferences());
   if (kDebugLocking) {
     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
@@ -90,7 +92,7 @@
   return MutableHandle<mirror::Object>(&GetReferences()[i]);
 }
 
-inline void HandleScope::SetReference(size_t i, mirror::Object* object) {
+inline void HandleScope::SetReference(size_t i, ObjPtr<mirror::Object> object) {
   if (kDebugLocking) {
     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
   }
@@ -118,16 +120,16 @@
 
 template<size_t kNumReferences> template<class T>
 inline MutableHandle<T> FixedSizeHandleScope<kNumReferences>::NewHandle(T* object) {
-  SetReference(pos_, object);
-  MutableHandle<T> h(GetHandle<T>(pos_));
-  pos_++;
-  return h;
+  return NewHandle(ObjPtr<T>(object));
 }
 
 template<size_t kNumReferences> template<class MirrorType>
 inline MutableHandle<MirrorType> FixedSizeHandleScope<kNumReferences>::NewHandle(
     ObjPtr<MirrorType> object) {
-  return NewHandle(object.Ptr());
+  SetReference(pos_, object);
+  MutableHandle<MirrorType> h(GetHandle<MirrorType>(pos_));
+  ++pos_;
+  return h;
 }
 
 template<size_t kNumReferences> template<class T>
@@ -142,7 +144,8 @@
 }
 
 template<size_t kNumReferences>
-inline void FixedSizeHandleScope<kNumReferences>::SetReference(size_t i, mirror::Object* object) {
+inline void FixedSizeHandleScope<kNumReferences>::SetReference(size_t i,
+                                                               ObjPtr<mirror::Object> object) {
   if (kDebugLocking) {
     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
   }
@@ -194,30 +197,32 @@
 }
 
 template<class T>
-MutableHandle<T> VariableSizedHandleScope::NewHandle(T* object) {
-  if (current_scope_->RemainingSlots() == 0) {
-    current_scope_ = new LocalScopeType(current_scope_);
-  }
-  return current_scope_->NewHandle(object);
+inline MutableHandle<T> VariableSizedHandleScope::NewHandle(T* object) {
+  return NewHandle(ObjPtr<T>(object));
 }
 
 template<class MirrorType>
 inline MutableHandle<MirrorType> VariableSizedHandleScope::NewHandle(ObjPtr<MirrorType> ptr) {
-  return NewHandle(ptr.Ptr());
+  if (current_scope_->RemainingSlots() == 0) {
+    current_scope_ = new LocalScopeType(current_scope_);
+  }
+  return current_scope_->NewHandle(ptr);
 }
 
 inline VariableSizedHandleScope::VariableSizedHandleScope(Thread* const self)
     : BaseHandleScope(self->GetTopHandleScope()),
-      self_(self) {
-  current_scope_ = new LocalScopeType(/*link=*/ nullptr);
+      self_(self),
+      current_scope_(&first_scope_),
+      first_scope_(/*link=*/ nullptr) {
   self_->PushHandleScope(this);
 }
 
 inline VariableSizedHandleScope::~VariableSizedHandleScope() {
   BaseHandleScope* top_handle_scope = self_->PopHandleScope();
   DCHECK_EQ(top_handle_scope, this);
-  while (current_scope_ != nullptr) {
-    LocalScopeType* next = reinterpret_cast<LocalScopeType*>(current_scope_->GetLink());
+  // Don't delete first_scope_ since it is not heap allocated.
+  while (current_scope_ != &first_scope_) {
+    LocalScopeType* next = down_cast<LocalScopeType*>(current_scope_->GetLink());
     delete current_scope_;
     current_scope_ = next;
   }
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index 5a6f1ac..b173453 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -105,7 +105,7 @@
   // Returns the size of a HandleScope containing num_references handles.
   static size_t SizeOf(PointerSize pointer_size, uint32_t num_references);
 
-  ALWAYS_INLINE mirror::Object* GetReference(size_t i) const
+  ALWAYS_INLINE ObjPtr<mirror::Object> GetReference(size_t i) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   ALWAYS_INLINE Handle<mirror::Object> GetHandle(size_t i);
@@ -113,7 +113,7 @@
   ALWAYS_INLINE MutableHandle<mirror::Object> GetMutableHandle(size_t i)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object)
+  ALWAYS_INLINE void SetReference(size_t i, ObjPtr<mirror::Object> object)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   ALWAYS_INLINE bool Contains(StackReference<mirror::Object>* handle_scope_entry) const;
@@ -187,7 +187,7 @@
   ALWAYS_INLINE MutableHandle<MirrorType> NewHandle(ObjPtr<MirrorType> object)
     REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object)
+  ALWAYS_INLINE void SetReference(size_t i, ObjPtr<mirror::Object> object)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   size_t RemainingSlots() const {
@@ -196,7 +196,7 @@
 
  private:
   explicit ALWAYS_INLINE FixedSizeHandleScope(BaseHandleScope* link,
-                                              mirror::Object* fill_value = nullptr);
+                                              ObjPtr<mirror::Object> fill_value = nullptr);
   ALWAYS_INLINE ~FixedSizeHandleScope() {}
 
   template<class T>
@@ -219,7 +219,8 @@
 template<size_t kNumReferences>
 class PACKED(4) StackHandleScope final : public FixedSizeHandleScope<kNumReferences> {
  public:
-  explicit ALWAYS_INLINE StackHandleScope(Thread* self, mirror::Object* fill_value = nullptr);
+  explicit ALWAYS_INLINE StackHandleScope(Thread* self,
+                                          ObjPtr<mirror::Object> fill_value = nullptr);
   ALWAYS_INLINE ~StackHandleScope();
 
   Thread* Self() const {
@@ -270,6 +271,7 @@
   using LocalScopeType = FixedSizeHandleScope<kNumReferencesPerScope>;
   static_assert(sizeof(LocalScopeType) <= kMaxLocalScopeSize, "Unexpected size of LocalScopeType");
   LocalScopeType* current_scope_;
+  LocalScopeType first_scope_;
 
   DISALLOW_COPY_AND_ASSIGN(VariableSizedHandleScope);
 };
diff --git a/runtime/hidden_api.cc b/runtime/hidden_api.cc
index 388ed33..a0dc0d2 100644
--- a/runtime/hidden_api.cc
+++ b/runtime/hidden_api.cc
@@ -34,6 +34,12 @@
 namespace art {
 namespace hiddenapi {
 
+// Should be the same as dalvik.system.VMRuntime.HIDE_MAXTARGETSDK_P_HIDDEN_APIS and
+// dalvik.system.VMRuntime.HIDE_MAXTARGETSDK_Q_HIDDEN_APIS.
+// Corresponds to bug ids.
+static constexpr uint64_t kHideMaxtargetsdkPHiddenApis = 149997251;
+static constexpr uint64_t kHideMaxtargetsdkQHiddenApis = 149994052;
+
 // Set to true if we should always print a warning in logcat for all hidden API accesses, not just
 // dark grey and black. This can be set to true for developer preview / beta builds, but should be
 // false for public release builds.
@@ -42,6 +48,14 @@
 // list.
 static constexpr bool kLogAllAccesses = false;
 
+// Exemptions for logcat warning. Following signatures do not produce a warning as app developers
+// should not be alerted on the usage of these greylised APIs. See b/154851649.
+static const std::vector<std::string> kWarningExemptions = {
+    "Ljava/nio/Buffer;",
+    "Llibcore/io/Memory;",
+    "Lsun/misc/Unsafe;",
+};
+
 static inline std::ostream& operator<<(std::ostream& os, AccessMethod value) {
   switch (value) {
     case AccessMethod::kNone:
@@ -76,10 +90,10 @@
 static Domain DetermineDomainFromLocation(const std::string& dex_location,
                                           ObjPtr<mirror::ClassLoader> class_loader) {
   // If running with APEX, check `path` against known APEX locations.
-  // These checks will be skipped on target buildbots where ANDROID_RUNTIME_ROOT
+  // These checks will be skipped on target buildbots where ANDROID_ART_ROOT
   // is set to "/system".
-  if (RuntimeModuleRootDistinctFromAndroidRoot()) {
-    if (LocationIsOnRuntimeModule(dex_location.c_str()) ||
+  if (ArtModuleRootDistinctFromAndroidRoot()) {
+    if (LocationIsOnArtModule(dex_location.c_str()) ||
         LocationIsOnConscryptModule(dex_location.c_str())) {
       return Domain::kCorePlatform;
     }
@@ -112,6 +126,28 @@
   }
 }
 
+void InitializeCorePlatformApiPrivateFields() {
+  // The following fields in WellKnownClasses correspond to private fields in the Core Platform
+  // API that cannot be otherwise expressed and propagated through tooling (b/144502743).
+  jfieldID private_core_platform_api_fields[] = {
+    WellKnownClasses::java_io_FileDescriptor_descriptor,
+    WellKnownClasses::java_io_FileDescriptor_ownerId,
+    WellKnownClasses::java_nio_Buffer_address,
+    WellKnownClasses::java_nio_Buffer_elementSizeShift,
+    WellKnownClasses::java_nio_Buffer_limit,
+    WellKnownClasses::java_nio_Buffer_position,
+  };
+
+  ScopedObjectAccess soa(Thread::Current());
+  for (const auto private_core_platform_api_field : private_core_platform_api_fields) {
+    ArtField* field = jni::DecodeArtField(private_core_platform_api_field);
+    const uint32_t access_flags = field->GetAccessFlags();
+    uint32_t new_access_flags = access_flags | kAccCorePlatformApi;
+    DCHECK(new_access_flags != access_flags);
+    field->SetAccessFlags(new_access_flags);
+  }
+}
+
 namespace detail {
 
 // Do not change the values of items in this enum, as they are written to the
@@ -181,7 +217,7 @@
   return pos == prefix.length();
 }
 
-bool MemberSignature::IsExempted(const std::vector<std::string>& exemptions) {
+bool MemberSignature::DoesPrefixMatchAny(const std::vector<std::string>& exemptions) {
   for (const std::string& exemption : exemptions) {
     if (DoesPrefixMatch(exemption)) {
       return true;
@@ -435,19 +471,14 @@
   DCHECK(member != nullptr);
   Runtime* runtime = Runtime::Current();
 
-  EnforcementPolicy policy = runtime->GetHiddenApiEnforcementPolicy();
-  DCHECK(policy != EnforcementPolicy::kDisabled)
+  EnforcementPolicy hiddenApiPolicy = runtime->GetHiddenApiEnforcementPolicy();
+  DCHECK(hiddenApiPolicy != EnforcementPolicy::kDisabled)
       << "Should never enter this function when access checks are completely disabled";
 
-  const bool deny_access =
-      (policy == EnforcementPolicy::kEnabled) &&
-      IsSdkVersionSetAndMoreThan(runtime->GetTargetSdkVersion(),
-                                 api_list.GetMaxAllowedSdkVersion());
-
   MemberSignature member_signature(member);
 
   // Check for an exemption first. Exempted APIs are treated as white list.
-  if (member_signature.IsExempted(runtime->GetHiddenApiExemptions())) {
+  if (member_signature.DoesPrefixMatchAny(runtime->GetHiddenApiExemptions())) {
     // Avoid re-examining the exemption list next time.
     // Note this results in no warning for the member, which seems like what one would expect.
     // Exemptions effectively adds new members to the whitelist.
@@ -455,15 +486,39 @@
     return false;
   }
 
-  if (access_method != AccessMethod::kNone) {
-    // Print a log message with information about this class member access.
-    // We do this if we're about to deny access, or the app is debuggable.
-    if (kLogAllAccesses || deny_access || runtime->IsJavaDebuggable()) {
-      member_signature.WarnAboutAccess(access_method, api_list, deny_access);
-    }
+  EnforcementPolicy testApiPolicy = runtime->GetTestApiEnforcementPolicy();
 
-    // If there is a StrictMode listener, notify it about this violation.
-    member_signature.NotifyHiddenApiListener(access_method);
+  bool deny_access = false;
+  if (hiddenApiPolicy == EnforcementPolicy::kEnabled) {
+    if (testApiPolicy == EnforcementPolicy::kDisabled && api_list.IsTestApi()) {
+      deny_access = false;
+    } else {
+      switch (api_list.GetMaxAllowedSdkVersion()) {
+        case SdkVersion::kP:
+          deny_access = runtime->isChangeEnabled(kHideMaxtargetsdkPHiddenApis);
+          break;
+        case SdkVersion::kQ:
+          deny_access = runtime->isChangeEnabled(kHideMaxtargetsdkQHiddenApis);
+          break;
+        default:
+          deny_access = IsSdkVersionSetAndMoreThan(runtime->GetTargetSdkVersion(),
+                                                         api_list.GetMaxAllowedSdkVersion());
+      }
+    }
+  }
+
+  if (access_method != AccessMethod::kNone) {
+    // Warn if non-greylisted signature is being accessed or it is not exempted.
+    if (deny_access || !member_signature.DoesPrefixMatchAny(kWarningExemptions)) {
+      // Print a log message with information about this class member access.
+      // We do this if we're about to deny access, or the app is debuggable.
+      if (kLogAllAccesses || deny_access || runtime->IsJavaDebuggable()) {
+        member_signature.WarnAboutAccess(access_method, api_list, deny_access);
+      }
+
+      // If there is a StrictMode listener, notify it about this violation.
+      member_signature.NotifyHiddenApiListener(access_method);
+    }
 
     // If event log sampling is enabled, report this violation.
     if (kIsTargetBuild && !kIsTargetLinux) {
diff --git a/runtime/hidden_api.h b/runtime/hidden_api.h
index e6a0ed3..8817c63 100644
--- a/runtime/hidden_api.h
+++ b/runtime/hidden_api.h
@@ -19,12 +19,15 @@
 
 #include "art_field.h"
 #include "art_method.h"
+#include "base/hiddenapi_domain.h"
 #include "base/hiddenapi_flags.h"
 #include "base/locks.h"
 #include "intrinsics_enum.h"
+#include "jni/jni_internal.h"
 #include "mirror/class-inl.h"
 #include "reflection.h"
 #include "runtime.h"
+#include "well_known_classes.h"
 
 namespace art {
 namespace hiddenapi {
@@ -155,6 +158,8 @@
   DISALLOW_COPY_AND_ASSIGN(ScopedHiddenApiEnforcementPolicySetting);
 };
 
+void InitializeCorePlatformApiPrivateFields() REQUIRES(!Locks::mutator_lock_);
+
 // Implementation details. DO NOT ACCESS DIRECTLY.
 namespace detail {
 
@@ -191,7 +196,7 @@
   // building the entire thing in memory and performing a simple prefix match)
   bool DoesPrefixMatch(const std::string& prefix) const;
 
-  bool IsExempted(const std::vector<std::string>& exemptions);
+  bool DoesPrefixMatchAny(const std::vector<std::string>& exemptions);
 
   void WarnAboutAccess(AccessMethod access_method, ApiList list, bool access_denied);
 
@@ -281,31 +286,6 @@
       case Intrinsics::kReferenceGetReferent:
       case Intrinsics::kMemoryPeekByte:
       case Intrinsics::kMemoryPokeByte:
-      case Intrinsics::kUnsafeCASInt:
-      case Intrinsics::kUnsafeCASLong:
-      case Intrinsics::kUnsafeCASObject:
-      case Intrinsics::kUnsafeGet:
-      case Intrinsics::kUnsafeGetAndAddInt:
-      case Intrinsics::kUnsafeGetAndAddLong:
-      case Intrinsics::kUnsafeGetAndSetInt:
-      case Intrinsics::kUnsafeGetAndSetLong:
-      case Intrinsics::kUnsafeGetAndSetObject:
-      case Intrinsics::kUnsafeGetLongVolatile:
-      case Intrinsics::kUnsafeGetObject:
-      case Intrinsics::kUnsafeGetObjectVolatile:
-      case Intrinsics::kUnsafeGetVolatile:
-      case Intrinsics::kUnsafePut:
-      case Intrinsics::kUnsafePutLong:
-      case Intrinsics::kUnsafePutLongOrdered:
-      case Intrinsics::kUnsafePutLongVolatile:
-      case Intrinsics::kUnsafePutObject:
-      case Intrinsics::kUnsafePutObjectOrdered:
-      case Intrinsics::kUnsafePutObjectVolatile:
-      case Intrinsics::kUnsafePutOrdered:
-      case Intrinsics::kUnsafePutVolatile:
-      case Intrinsics::kUnsafeLoadFence:
-      case Intrinsics::kUnsafeStoreFence:
-      case Intrinsics::kUnsafeFullFence:
       case Intrinsics::kCRC32Update:
       case Intrinsics::kCRC32UpdateBytes:
       case Intrinsics::kCRC32UpdateByteBuffer:
@@ -318,6 +298,26 @@
       case Intrinsics::kMemoryPokeIntNative:
       case Intrinsics::kMemoryPokeLongNative:
       case Intrinsics::kMemoryPokeShortNative:
+      case Intrinsics::kUnsafeCASInt:
+      case Intrinsics::kUnsafeCASLong:
+      case Intrinsics::kUnsafeCASObject:
+      case Intrinsics::kUnsafeGetAndAddInt:
+      case Intrinsics::kUnsafeGetAndAddLong:
+      case Intrinsics::kUnsafeGetAndSetInt:
+      case Intrinsics::kUnsafeGetAndSetLong:
+      case Intrinsics::kUnsafeGetAndSetObject:
+      case Intrinsics::kUnsafeGetLongVolatile:
+      case Intrinsics::kUnsafeGetObjectVolatile:
+      case Intrinsics::kUnsafeGetVolatile:
+      case Intrinsics::kUnsafePutLongOrdered:
+      case Intrinsics::kUnsafePutLongVolatile:
+      case Intrinsics::kUnsafePutObjectOrdered:
+      case Intrinsics::kUnsafePutObjectVolatile:
+      case Intrinsics::kUnsafePutOrdered:
+      case Intrinsics::kUnsafePutVolatile:
+      case Intrinsics::kUnsafeLoadFence:
+      case Intrinsics::kUnsafeStoreFence:
+      case Intrinsics::kUnsafeFullFence:
       case Intrinsics::kVarHandleFullFence:
       case Intrinsics::kVarHandleAcquireFence:
       case Intrinsics::kVarHandleReleaseFence:
@@ -355,7 +355,21 @@
       case Intrinsics::kVarHandleWeakCompareAndSetPlain:
       case Intrinsics::kVarHandleWeakCompareAndSetRelease:
         return 0u;
+      case Intrinsics::kFP16Ceil:
+      case Intrinsics::kFP16Floor:
+      case Intrinsics::kFP16Greater:
+      case Intrinsics::kFP16GreaterEquals:
+      case Intrinsics::kFP16Less:
+      case Intrinsics::kFP16LessEquals:
+      case Intrinsics::kFP16ToFloat:
+      case Intrinsics::kFP16ToHalf:
+      case Intrinsics::kFP16Rint:
+      case Intrinsics::kUnsafeGet:
       case Intrinsics::kUnsafeGetLong:
+      case Intrinsics::kUnsafeGetObject:
+      case Intrinsics::kUnsafePutLong:
+      case Intrinsics::kUnsafePut:
+      case Intrinsics::kUnsafePutObject:
         return kAccCorePlatformApi;
       default:
         // Remaining intrinsics are public API. We DCHECK that in SetIntrinsic().
diff --git a/runtime/hidden_api_test.cc b/runtime/hidden_api_test.cc
index 70fafe6..b9214ff 100644
--- a/runtime/hidden_api_test.cc
+++ b/runtime/hidden_api_test.cc
@@ -32,6 +32,11 @@
 using hiddenapi::detail::MemberSignature;
 using hiddenapi::detail::ShouldDenyAccessToMemberImpl;
 
+// Should be the same as dalvik.system.VMRuntime.HIDE_MAXTARGETSDK_P_HIDDEN_APIS and
+// dalvik.system.VMRuntime.HIDE_MAXTARGETSDK_Q_HIDDEN_APIS.
+static constexpr uint64_t kHideMaxtargetsdkPHiddenApis = 149997251;
+static constexpr uint64_t kHideMaxtargetsdkQHiddenApis = 149994052;
+
 class HiddenApiTest : public CommonRuntimeTest {
  protected:
   void SetUp() override {
@@ -75,6 +80,16 @@
     return art_field;
   }
 
+  void setChangeIdState(uint64_t change, bool enabled) {
+    std::set<uint64_t> disabled_changes = runtime_->GetDisabledCompatChanges();
+    if (enabled) {
+      disabled_changes.erase(change);
+    } else {
+      disabled_changes.insert(change);
+    }
+    runtime_->SetDisabledCompatChanges(disabled_changes);
+  }
+
   bool ShouldDenyAccess(hiddenapi::ApiList list) REQUIRES_SHARED(Locks::mutator_lock_) {
     // Choose parameters such that there are no side effects (AccessMethod::kNone)
     // and that the member is not on the exemptions list (here we choose one which
@@ -109,6 +124,7 @@
   runtime_->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kJustWarn);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Whitelist()), false);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Greylist()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxQ()), false);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxP()), false);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxO()), false);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Blacklist()), false);
@@ -116,8 +132,11 @@
   runtime_->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kEnabled);
   runtime_->SetTargetSdkVersion(
       static_cast<uint32_t>(hiddenapi::ApiList::GreylistMaxO().GetMaxAllowedSdkVersion()));
+  setChangeIdState(kHideMaxtargetsdkPHiddenApis, false);
+  setChangeIdState(kHideMaxtargetsdkQHiddenApis, false);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Whitelist()), false);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Greylist()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxQ()), false);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxP()), false);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxO()), false);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Blacklist()), true);
@@ -125,8 +144,18 @@
   runtime_->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kEnabled);
   runtime_->SetTargetSdkVersion(
       static_cast<uint32_t>(hiddenapi::ApiList::GreylistMaxO().GetMaxAllowedSdkVersion()) + 1);
+  setChangeIdState(kHideMaxtargetsdkPHiddenApis, false);
+  setChangeIdState(kHideMaxtargetsdkQHiddenApis, false);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Whitelist()), false);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Greylist()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxQ()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxP()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxO()), true);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Blacklist()), true);
+  setChangeIdState(kHideMaxtargetsdkQHiddenApis, true);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Whitelist()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Greylist()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxQ()), true);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxP()), false);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxO()), true);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Blacklist()), true);
@@ -134,11 +163,64 @@
   runtime_->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kEnabled);
   runtime_->SetTargetSdkVersion(
       static_cast<uint32_t>(hiddenapi::ApiList::GreylistMaxP().GetMaxAllowedSdkVersion()) + 1);
+  setChangeIdState(kHideMaxtargetsdkPHiddenApis, true);
+  setChangeIdState(kHideMaxtargetsdkQHiddenApis, false);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Whitelist()), false);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Greylist()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxQ()), false);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxP()), true);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxO()), true);
   ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Blacklist()), true);
+
+  runtime_->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kEnabled);
+  runtime_->SetTargetSdkVersion(
+      static_cast<uint32_t>(hiddenapi::ApiList::GreylistMaxQ().GetMaxAllowedSdkVersion()) + 1);
+  setChangeIdState(kHideMaxtargetsdkPHiddenApis, true);
+  setChangeIdState(kHideMaxtargetsdkQHiddenApis, true);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Whitelist()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Greylist()), false);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxQ()), true);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxP()), true);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::GreylistMaxO()), true);
+  ASSERT_EQ(ShouldDenyAccess(hiddenapi::ApiList::Blacklist()), true);
+}
+
+TEST_F(HiddenApiTest, CheckTestApiEnforcement) {
+  ScopedObjectAccess soa(self_);
+
+  runtime_->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kEnabled);
+  runtime_->SetTargetSdkVersion(
+      static_cast<uint32_t>(hiddenapi::ApiList::GreylistMaxQ().GetMaxAllowedSdkVersion()) + 1);
+
+  // Default case where all TestApis are treated like non-TestApi.
+  runtime_->SetTestApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kEnabled);
+  ASSERT_EQ(
+      ShouldDenyAccess(hiddenapi::ApiList::TestApi() | hiddenapi::ApiList::Whitelist()), false);
+  ASSERT_EQ(
+      ShouldDenyAccess(hiddenapi::ApiList::TestApi() | hiddenapi::ApiList::Greylist()), false);
+  ASSERT_EQ(
+      ShouldDenyAccess(hiddenapi::ApiList::TestApi() | hiddenapi::ApiList::GreylistMaxQ()), true);
+  ASSERT_EQ(
+      ShouldDenyAccess(hiddenapi::ApiList::TestApi() | hiddenapi::ApiList::GreylistMaxP()), true);
+  ASSERT_EQ(
+      ShouldDenyAccess(hiddenapi::ApiList::TestApi() | hiddenapi::ApiList::GreylistMaxO()), true);
+  ASSERT_EQ(
+      ShouldDenyAccess(hiddenapi::ApiList::TestApi() | hiddenapi::ApiList::Blacklist()), true);
+
+  // A case where we want to allow access to TestApis.
+  runtime_->SetTestApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kDisabled);
+  ASSERT_EQ(
+      ShouldDenyAccess(hiddenapi::ApiList::TestApi() | hiddenapi::ApiList::Whitelist()), false);
+  ASSERT_EQ(
+      ShouldDenyAccess(hiddenapi::ApiList::TestApi() | hiddenapi::ApiList::Greylist()), false);
+  ASSERT_EQ(
+      ShouldDenyAccess(hiddenapi::ApiList::TestApi() | hiddenapi::ApiList::GreylistMaxQ()), false);
+  ASSERT_EQ(
+      ShouldDenyAccess(hiddenapi::ApiList::TestApi() | hiddenapi::ApiList::GreylistMaxP()), false);
+  ASSERT_EQ(
+      ShouldDenyAccess(hiddenapi::ApiList::TestApi() | hiddenapi::ApiList::GreylistMaxO()), false);
+  ASSERT_EQ(
+      ShouldDenyAccess(hiddenapi::ApiList::TestApi() | hiddenapi::ApiList::Blacklist()), false);
 }
 
 TEST_F(HiddenApiTest, CheckMembersRead) {
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 8440c41..516c435 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -42,6 +42,7 @@
 #include "art_method-inl.h"
 #include "base/array_ref.h"
 #include "base/file_utils.h"
+#include "base/logging.h"
 #include "base/macros.h"
 #include "base/mutex.h"
 #include "base/os.h"
@@ -60,8 +61,6 @@
 #include "gc/scoped_gc_critical_section.h"
 #include "gc/space/space.h"
 #include "gc_root.h"
-#include "jdwp/jdwp.h"
-#include "jdwp/jdwp_priv.h"
 #include "mirror/class-inl.h"
 #include "mirror/class.h"
 #include "mirror/object-refvisitor-inl.h"
diff --git a/runtime/image.cc b/runtime/image.cc
index b6bb0b1..782c0c6 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -29,7 +29,7 @@
 namespace art {
 
 const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '7', '4', '\0' };  // CRC32UpdateBB intrinsic
+const uint8_t ImageHeader::kImageVersion[] = { '0', '8', '5', '\0' };  // Single-image.
 
 ImageHeader::ImageHeader(uint32_t image_reservation_size,
                          uint32_t component_count,
@@ -44,6 +44,8 @@
                          uint32_t oat_file_end,
                          uint32_t boot_image_begin,
                          uint32_t boot_image_size,
+                         uint32_t boot_image_component_count,
+                         uint32_t boot_image_checksum,
                          uint32_t pointer_size)
   : image_reservation_size_(image_reservation_size),
     component_count_(component_count),
@@ -57,6 +59,8 @@
     oat_file_end_(oat_file_end),
     boot_image_begin_(boot_image_begin),
     boot_image_size_(boot_image_size),
+    boot_image_component_count_(boot_image_component_count),
+    boot_image_checksum_(boot_image_checksum),
     image_roots_(image_roots),
     pointer_size_(pointer_size) {
   CHECK_EQ(image_begin, RoundUp(image_begin, kPageSize));
@@ -72,27 +76,42 @@
   std::copy_n(sections, kSectionCount, sections_);
 }
 
-void ImageHeader::RelocateImage(int64_t delta) {
-  CHECK_ALIGNED(delta, kPageSize) << " patch delta must be page aligned";
+void ImageHeader::RelocateImageReferences(int64_t delta) {
+  CHECK_ALIGNED(delta, kPageSize) << "relocation delta must be page aligned";
   oat_file_begin_ += delta;
   oat_data_begin_ += delta;
   oat_data_end_ += delta;
   oat_file_end_ += delta;
-  RelocateImageObjects(delta);
-  RelocateImageMethods(delta);
-}
-
-void ImageHeader::RelocateImageObjects(int64_t delta) {
   image_begin_ += delta;
   image_roots_ += delta;
 }
 
-void ImageHeader::RelocateImageMethods(int64_t delta) {
+void ImageHeader::RelocateBootImageReferences(int64_t delta) {
+  CHECK_ALIGNED(delta, kPageSize) << "relocation delta must be page aligned";
+  DCHECK_EQ(boot_image_begin_ != 0u, boot_image_size_ != 0u);
+  if (boot_image_begin_ != 0u) {
+    boot_image_begin_ += delta;
+  }
   for (size_t i = 0; i < kImageMethodsCount; ++i) {
     image_methods_[i] += delta;
   }
 }
 
+bool ImageHeader::IsAppImage() const {
+  // Unlike boot image and boot image extensions which include address space for
+  // oat files in their reservation size, app images are loaded separately from oat
+  // files and their reservation size is the image size rounded up to full page.
+  return image_reservation_size_ == RoundUp(image_size_, kPageSize);
+}
+
+uint32_t ImageHeader::GetImageSpaceCount() const {
+  DCHECK(!IsAppImage());
+  DCHECK_NE(component_count_, 0u);  // Must be the header for the first component.
+  // For images compiled with --single-image, there is only one oat file. To detect
+  // that, check whether the reservation ends at the end of the first oat file.
+  return (image_begin_ + image_reservation_size_ == oat_file_end_) ? 1u : component_count_;
+}
+
 bool ImageHeader::IsValid() const {
   if (memcmp(magic_, kImageMagic, sizeof(kImageMagic)) != 0) {
     return false;
diff --git a/runtime/image.h b/runtime/image.h
index 88bba13..637bf1c 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -137,6 +137,8 @@
               uint32_t oat_file_end,
               uint32_t boot_image_begin,
               uint32_t boot_image_size,
+              uint32_t boot_image_component_count,
+              uint32_t boot_image_checksum,
               uint32_t pointer_size);
 
   bool IsValid() const;
@@ -222,10 +224,6 @@
   enum ImageRoot {
     kDexCaches,
     kClassRoots,
-    kOomeWhenThrowingException,       // Pre-allocated OOME when throwing exception.
-    kOomeWhenThrowingOome,            // Pre-allocated OOME when throwing OOME.
-    kOomeWhenHandlingStackOverflow,   // Pre-allocated OOME when handling StackOverflowError.
-    kNoClassDefFoundError,            // Pre-allocated NoClassDefFoundError.
     kSpecialRoots,                    // Different for boot image and app image, see aliases below.
     kImageRootsMax,
 
@@ -234,6 +232,15 @@
     kBootImageLiveObjects = kSpecialRoots,  // Array of boot image objects that must be kept live.
   };
 
+  enum BootImageLiveObjects {
+    kOomeWhenThrowingException,       // Pre-allocated OOME when throwing exception.
+    kOomeWhenThrowingOome,            // Pre-allocated OOME when throwing OOME.
+    kOomeWhenHandlingStackOverflow,   // Pre-allocated OOME when handling StackOverflowError.
+    kNoClassDefFoundError,            // Pre-allocated NoClassDefFoundError.
+    kClearedJniWeakSentinel,          // Pre-allocated sentinel for cleared weak JNI references.
+    kIntrinsicObjectsStart
+  };
+
   /*
    * This describes the number and ordering of sections inside of Boot
    * and App Images.  It is very important that changes to this struct
@@ -334,9 +341,8 @@
   ObjPtr<mirror::ObjectArray<mirror::Object>> GetImageRoots() const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void RelocateImage(int64_t delta);
-  void RelocateImageMethods(int64_t delta);
-  void RelocateImageObjects(int64_t delta);
+  void RelocateImageReferences(int64_t delta);
+  void RelocateBootImageReferences(int64_t delta);
 
   uint32_t GetBootImageBegin() const {
     return boot_image_begin_;
@@ -346,15 +352,21 @@
     return boot_image_size_;
   }
 
+  uint32_t GetBootImageComponentCount() const {
+    return boot_image_component_count_;
+  }
+
+  uint32_t GetBootImageChecksum() const {
+    return boot_image_checksum_;
+  }
+
   uint64_t GetDataSize() const {
     return data_size_;
   }
 
-  bool IsAppImage() const {
-    // App images currently require a boot image, if the size is non zero then it is an app image
-    // header.
-    return boot_image_size_ != 0u;
-  }
+  bool IsAppImage() const;
+
+  uint32_t GetImageSpaceCount() const;
 
   // Visit mirror::Objects in the section starting at base.
   // TODO: Delete base parameter if it is always equal to GetImageBegin.
@@ -461,10 +473,15 @@
   // .so files. Used for positioning a following alloc spaces.
   uint32_t oat_file_end_ = 0u;
 
-  // Boot image begin and end (app image headers only).
+  // Boot image begin and end (only applies to boot image extension and app image headers).
   uint32_t boot_image_begin_ = 0u;
   uint32_t boot_image_size_ = 0u;  // Includes heap (*.art) and code (.oat).
 
+  // Number of boot image components that this image depends on and their composite checksum
+  // (only applies to boot image extension and app image headers).
+  uint32_t boot_image_component_count_ = 0u;
+  uint32_t boot_image_checksum_ = 0u;
+
   // Absolute address of an Object[] of objects needed to reinitialize from an image.
   uint32_t image_roots_ = 0u;
 
@@ -562,8 +579,8 @@
   return val & ~3u;
 }
 
-std::ostream& operator<<(std::ostream& os, const ImageHeader::ImageMethod& policy);
-std::ostream& operator<<(std::ostream& os, const ImageHeader::ImageRoot& policy);
+std::ostream& operator<<(std::ostream& os, const ImageHeader::ImageMethod& method);
+std::ostream& operator<<(std::ostream& os, const ImageHeader::ImageRoot& root);
 std::ostream& operator<<(std::ostream& os, const ImageHeader::ImageSections& section);
 std::ostream& operator<<(std::ostream& os, const ImageSection& section);
 std::ostream& operator<<(std::ostream& os, const ImageHeader::StorageMode& mode);
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 315311c..60e7c9c 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -16,6 +16,8 @@
 
 #include "instrumentation.h"
 
+#include <functional>
+#include <optional>
 #include <sstream>
 
 #include <android-base/logging.h>
@@ -39,6 +41,7 @@
 #include "jit/jit.h"
 #include "jit/jit_code_cache.h"
 #include "jvalue-inl.h"
+#include "jvalue.h"
 #include "mirror/class-inl.h"
 #include "mirror/dex_cache.h"
 #include "mirror/object-inl.h"
@@ -54,16 +57,20 @@
 
 constexpr bool kVerboseInstrumentation = false;
 
-void InstrumentationListener::MethodExited(Thread* thread,
-                                           Handle<mirror::Object> this_object,
-                                           ArtMethod* method,
-                                           uint32_t dex_pc,
-                                           Handle<mirror::Object> return_value) {
+void InstrumentationListener::MethodExited(
+    Thread* thread,
+    Handle<mirror::Object> this_object,
+    ArtMethod* method,
+    uint32_t dex_pc,
+    OptionalFrame frame,
+    MutableHandle<mirror::Object>& return_value) {
   DCHECK_EQ(method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetReturnTypePrimitive(),
             Primitive::kPrimNot);
+  const void* original_ret = return_value.Get();
   JValue v;
   v.SetL(return_value.Get());
-  MethodExited(thread, this_object, method, dex_pc, v);
+  MethodExited(thread, this_object, method, dex_pc, frame, v);
+  DCHECK(original_ret == v.GetL()) << "Return value changed";
 }
 
 void InstrumentationListener::FieldWritten(Thread* thread,
@@ -100,23 +107,23 @@
 InstrumentationStackPopper::InstrumentationStackPopper(Thread* self)
       : self_(self),
         instrumentation_(Runtime::Current()->GetInstrumentation()),
-        frames_to_remove_(0) {}
+        pop_until_(0u) {}
 
 InstrumentationStackPopper::~InstrumentationStackPopper() {
-  std::deque<instrumentation::InstrumentationStackFrame>* stack = self_->GetInstrumentationStack();
-  for (size_t i = 0; i < frames_to_remove_; i++) {
-    stack->pop_front();
+  std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* stack =
+      self_->GetInstrumentationStack();
+  for (auto i = stack->begin(); i != stack->end() && i->first <= pop_until_;) {
+    i = stack->erase(i);
   }
 }
 
-bool InstrumentationStackPopper::PopFramesTo(uint32_t desired_pops,
+bool InstrumentationStackPopper::PopFramesTo(uintptr_t stack_pointer,
                                              MutableHandle<mirror::Throwable>& exception) {
-  std::deque<instrumentation::InstrumentationStackFrame>* stack = self_->GetInstrumentationStack();
-  DCHECK_LE(frames_to_remove_, desired_pops);
-  DCHECK_GE(stack->size(), desired_pops);
+  std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* stack =
+      self_->GetInstrumentationStack();
   DCHECK(!self_->IsExceptionPending());
   if (!instrumentation_->HasMethodUnwindListeners()) {
-    frames_to_remove_ = desired_pops;
+    pop_until_ = stack_pointer;
     return true;
   }
   if (kVerboseInstrumentation) {
@@ -125,8 +132,14 @@
   // The instrumentation events expect the exception to be set.
   self_->SetException(exception.Get());
   bool new_exception_thrown = false;
-  for (; frames_to_remove_ < desired_pops && !new_exception_thrown; frames_to_remove_++) {
-    InstrumentationStackFrame frame = stack->at(frames_to_remove_);
+  auto i = stack->upper_bound(pop_until_);
+
+  // Now pop all frames until reaching stack_pointer, or a new exception is
+  // thrown. Note that `stack_pointer` doesn't need to be a return PC address
+  // (in fact the exception handling code passes the start of the frame where
+  // the catch handler is).
+  for (; i != stack->end() && i->first <= stack_pointer; i++) {
+    const InstrumentationStackFrame& frame = i->second;
     ArtMethod* method = frame.method_;
     // Notify listeners of method unwind.
     // TODO: improve the dex_pc information here.
@@ -137,19 +150,26 @@
     if (!method->IsRuntimeMethod() && !frame.interpreter_entry_) {
       instrumentation_->MethodUnwindEvent(self_, frame.this_object_, method, dex_pc);
       new_exception_thrown = self_->GetException() != exception.Get();
+      if (new_exception_thrown) {
+        pop_until_ = i->first;
+        break;
+      }
     }
   }
+  if (!new_exception_thrown) {
+    pop_until_ = stack_pointer;
+  }
   exception.Assign(self_->GetException());
   self_->ClearException();
   if (kVerboseInstrumentation && new_exception_thrown) {
-    LOG(INFO) << "Failed to pop " << (desired_pops - frames_to_remove_)
-              << " frames due to new exception";
+    LOG(INFO) << "Did partial pop of frames due to new exception";
   }
   return !new_exception_thrown;
 }
 
 Instrumentation::Instrumentation()
-    : instrumentation_stubs_installed_(false),
+    : current_force_deopt_id_(0),
+      instrumentation_stubs_installed_(false),
       entry_exit_stubs_installed_(false),
       interpreter_stubs_installed_(false),
       interpret_only_(false),
@@ -173,7 +193,7 @@
       can_use_instrumentation_trampolines_(true) {
 }
 
-void Instrumentation::InstallStubsForClass(mirror::Class* klass) {
+void Instrumentation::InstallStubsForClass(ObjPtr<mirror::Class> klass) {
   if (!klass->IsResolved()) {
     // We need the class to be resolved to install/uninstall stubs. Otherwise its methods
     // could not be initialized or linked with regards to class inheritance.
@@ -188,6 +208,15 @@
 
 static void UpdateEntrypoints(ArtMethod* method, const void* quick_code)
     REQUIRES_SHARED(Locks::mutator_lock_) {
+  if (kIsDebugBuild) {
+    jit::Jit* jit = Runtime::Current()->GetJit();
+    if (jit != nullptr && jit->GetCodeCache()->ContainsPc(quick_code)) {
+      // Ensure we always have the thumb entrypoint for JIT on arm32.
+      if (kRuntimeISA == InstructionSet::kArm) {
+        CHECK_EQ(reinterpret_cast<uintptr_t>(quick_code) & 1, 1u);
+      }
+    }
+  }
   method->SetEntryPointFromQuickCompiledCode(quick_code);
 }
 
@@ -266,16 +295,20 @@
 // deoptimization of quick frames to interpreter frames.
 // Since we may already have done this previously, we need to push new instrumentation frame before
 // existing instrumentation frames.
-static void InstrumentationInstallStack(Thread* thread, void* arg)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
+void InstrumentationInstallStack(Thread* thread, void* arg)
+    REQUIRES(Locks::mutator_lock_) {
+  Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
   struct InstallStackVisitor final : public StackVisitor {
-    InstallStackVisitor(Thread* thread_in, Context* context, uintptr_t instrumentation_exit_pc)
+    InstallStackVisitor(Thread* thread_in,
+                        Context* context,
+                        uintptr_t instrumentation_exit_pc,
+                        uint64_t force_deopt_id)
         : StackVisitor(thread_in, context, kInstrumentationStackWalk),
           instrumentation_stack_(thread_in->GetInstrumentationStack()),
           instrumentation_exit_pc_(instrumentation_exit_pc),
-          reached_existing_instrumentation_frames_(false), instrumentation_stack_depth_(0),
-          last_return_pc_(0) {
-    }
+          reached_existing_instrumentation_frames_(false),
+          last_return_pc_(0),
+          force_deopt_id_(force_deopt_id) {}
 
     bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
       ArtMethod* m = GetMethod();
@@ -288,8 +321,12 @@
       }
       if (GetCurrentQuickFrame() == nullptr) {
         bool interpreter_frame = true;
-        InstrumentationStackFrame instrumentation_frame(GetThisObject(), m, 0, GetFrameId(),
-                                                        interpreter_frame);
+        InstrumentationStackFrame instrumentation_frame(GetThisObject().Ptr(),
+                                                        m,
+                                                        /*return_pc=*/ 0,
+                                                        GetFrameId(),
+                                                        interpreter_frame,
+                                                        force_deopt_id_);
         if (kVerboseInstrumentation) {
           LOG(INFO) << "Pushing shadow frame " << instrumentation_frame.Dump();
         }
@@ -301,11 +338,10 @@
         LOG(INFO) << "  Installing exit stub in " << DescribeLocation();
       }
       if (return_pc == instrumentation_exit_pc_) {
-        CHECK_LT(instrumentation_stack_depth_, instrumentation_stack_->size());
-
+        auto it = instrumentation_stack_->find(GetReturnPcAddr());
+        CHECK(it != instrumentation_stack_->end());
+        const InstrumentationStackFrame& frame = it->second;
         if (m->IsRuntimeMethod()) {
-          const InstrumentationStackFrame& frame =
-              (*instrumentation_stack_)[instrumentation_stack_depth_];
           if (frame.interpreter_entry_) {
             // This instrumentation frame is for an interpreter bridge and is
             // pushed when executing the instrumented interpreter bridge. So method
@@ -314,7 +350,6 @@
             uint32_t dex_pc = dex::kDexNoIndex;
             dex_pcs_.push_back(dex_pc);
             last_return_pc_ = frame.return_pc_;
-            ++instrumentation_stack_depth_;
             return true;
           }
         }
@@ -323,8 +358,6 @@
         // We should have already installed instrumentation or be interpreter on previous frames.
         reached_existing_instrumentation_frames_ = true;
 
-        const InstrumentationStackFrame& frame =
-            (*instrumentation_stack_)[instrumentation_stack_depth_];
         CHECK_EQ(m->GetNonObsoleteMethod(), frame.method_->GetNonObsoleteMethod())
             << "Expected " << ArtMethod::PrettyMethod(m)
             << ", Found " << ArtMethod::PrettyMethod(frame.method_);
@@ -340,9 +373,9 @@
           std::string thread_name;
           GetThread()->GetThreadName(thread_name);
           uint32_t dex_pc = dex::kDexNoIndex;
-          if (last_return_pc_ != 0 &&
-              GetCurrentOatQuickMethodHeader() != nullptr) {
-            dex_pc = GetCurrentOatQuickMethodHeader()->ToDexPc(m, last_return_pc_);
+          if (last_return_pc_ != 0 && GetCurrentOatQuickMethodHeader() != nullptr) {
+            dex_pc = GetCurrentOatQuickMethodHeader()->ToDexPc(
+                GetCurrentQuickFrame(), last_return_pc_);
           }
           LOG(FATAL) << "While walking " << thread_name << " found unexpected non-runtime method"
                      << " without instrumentation exit return or interpreter frame."
@@ -352,44 +385,34 @@
           UNREACHABLE();
         }
         InstrumentationStackFrame instrumentation_frame(
-            m->IsRuntimeMethod() ? nullptr : GetThisObject(),
+            m->IsRuntimeMethod() ? nullptr : GetThisObject().Ptr(),
             m,
             return_pc,
             GetFrameId(),    // A runtime method still gets a frame id.
-            false);
+            false,
+            force_deopt_id_);
         if (kVerboseInstrumentation) {
           LOG(INFO) << "Pushing frame " << instrumentation_frame.Dump();
         }
 
-        // Insert frame at the right position so we do not corrupt the instrumentation stack.
-        // Instrumentation stack frames are in descending frame id order.
-        auto it = instrumentation_stack_->begin();
-        for (auto end = instrumentation_stack_->end(); it != end; ++it) {
-          const InstrumentationStackFrame& current = *it;
-          if (instrumentation_frame.frame_id_ >= current.frame_id_) {
-            break;
-          }
-        }
-        instrumentation_stack_->insert(it, instrumentation_frame);
+        instrumentation_stack_->insert({GetReturnPcAddr(), instrumentation_frame});
         SetReturnPc(instrumentation_exit_pc_);
       }
       uint32_t dex_pc = dex::kDexNoIndex;
-      if (last_return_pc_ != 0 &&
-          GetCurrentOatQuickMethodHeader() != nullptr) {
-        dex_pc = GetCurrentOatQuickMethodHeader()->ToDexPc(m, last_return_pc_);
+      if (last_return_pc_ != 0 && GetCurrentOatQuickMethodHeader() != nullptr) {
+        dex_pc = GetCurrentOatQuickMethodHeader()->ToDexPc(GetCurrentQuickFrame(), last_return_pc_);
       }
       dex_pcs_.push_back(dex_pc);
       last_return_pc_ = return_pc;
-      ++instrumentation_stack_depth_;
       return true;  // Continue.
     }
-    std::deque<InstrumentationStackFrame>* const instrumentation_stack_;
+    std::map<uintptr_t, InstrumentationStackFrame>* const instrumentation_stack_;
     std::vector<InstrumentationStackFrame> shadow_stack_;
     std::vector<uint32_t> dex_pcs_;
     const uintptr_t instrumentation_exit_pc_;
     bool reached_existing_instrumentation_frames_;
-    size_t instrumentation_stack_depth_;
     uintptr_t last_return_pc_;
+    uint64_t force_deopt_id_;
   };
   if (kVerboseInstrumentation) {
     std::string thread_name;
@@ -400,24 +423,28 @@
   Instrumentation* instrumentation = reinterpret_cast<Instrumentation*>(arg);
   std::unique_ptr<Context> context(Context::Create());
   uintptr_t instrumentation_exit_pc = reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc());
-  InstallStackVisitor visitor(thread, context.get(), instrumentation_exit_pc);
+  InstallStackVisitor visitor(
+      thread, context.get(), instrumentation_exit_pc, instrumentation->current_force_deopt_id_);
   visitor.WalkStack(true);
   CHECK_EQ(visitor.dex_pcs_.size(), thread->GetInstrumentationStack()->size());
 
   if (instrumentation->ShouldNotifyMethodEnterExitEvents()) {
     // Create method enter events for all methods currently on the thread's stack. We only do this
     // if no debugger is attached to prevent from posting events twice.
+    // TODO: This is the only place we make use of frame_id_. We should create a
+    // std::vector instead and populate it as we walk the stack.
     auto ssi = visitor.shadow_stack_.rbegin();
     for (auto isi = thread->GetInstrumentationStack()->rbegin(),
         end = thread->GetInstrumentationStack()->rend(); isi != end; ++isi) {
-      while (ssi != visitor.shadow_stack_.rend() && (*ssi).frame_id_ < (*isi).frame_id_) {
+      while (ssi != visitor.shadow_stack_.rend() && (*ssi).frame_id_ < isi->second.frame_id_) {
         instrumentation->MethodEnterEvent(thread, (*ssi).this_object_, (*ssi).method_, 0);
         ++ssi;
       }
       uint32_t dex_pc = visitor.dex_pcs_.back();
       visitor.dex_pcs_.pop_back();
-      if (!isi->interpreter_entry_ && !isi->method_->IsRuntimeMethod()) {
-        instrumentation->MethodEnterEvent(thread, (*isi).this_object_, (*isi).method_, dex_pc);
+      if (!isi->second.interpreter_entry_ && !isi->second.method_->IsRuntimeMethod()) {
+        instrumentation->MethodEnterEvent(
+            thread, isi->second.this_object_, isi->second.method_, dex_pc);
       }
     }
   }
@@ -462,35 +489,31 @@
         }
         return true;  // Ignore upcalls.
       }
-      bool removed_stub = false;
-      // TODO: make this search more efficient?
-      const size_t frameId = GetFrameId();
-      for (const InstrumentationStackFrame& instrumentation_frame : *instrumentation_stack_) {
-        if (instrumentation_frame.frame_id_ == frameId) {
-          if (kVerboseInstrumentation) {
-            LOG(INFO) << "  Removing exit stub in " << DescribeLocation();
-          }
-          if (instrumentation_frame.interpreter_entry_) {
-            CHECK(m == Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs));
-          } else {
-            CHECK_EQ(m->GetNonObsoleteMethod(),
-                     instrumentation_frame.method_->GetNonObsoleteMethod())
-                << ArtMethod::PrettyMethod(m);
-          }
-          SetReturnPc(instrumentation_frame.return_pc_);
-          if (instrumentation_->ShouldNotifyMethodEnterExitEvents() &&
-              !m->IsRuntimeMethod()) {
-            // Create the method exit events. As the methods didn't really exit the result is 0.
-            // We only do this if no debugger is attached to prevent from posting events twice.
-            instrumentation_->MethodExitEvent(thread_, instrumentation_frame.this_object_, m,
-                                              GetDexPc(), JValue());
-          }
-          frames_removed_++;
-          removed_stub = true;
-          break;
+      auto it = instrumentation_stack_->find(GetReturnPcAddr());
+      if (it != instrumentation_stack_->end()) {
+        const InstrumentationStackFrame& instrumentation_frame = it->second;
+        if (kVerboseInstrumentation) {
+          LOG(INFO) << "  Removing exit stub in " << DescribeLocation();
         }
-      }
-      if (!removed_stub) {
+        if (instrumentation_frame.interpreter_entry_) {
+          CHECK(m == Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs));
+        } else {
+          CHECK_EQ(m->GetNonObsoleteMethod(),
+                   instrumentation_frame.method_->GetNonObsoleteMethod())
+              << ArtMethod::PrettyMethod(m)
+              << " and " << instrumentation_frame.method_->GetNonObsoleteMethod()->PrettyMethod();
+        }
+        SetReturnPc(instrumentation_frame.return_pc_);
+        if (instrumentation_->ShouldNotifyMethodEnterExitEvents() &&
+            !m->IsRuntimeMethod()) {
+          // Create the method exit events. As the methods didn't really exit the result is 0.
+          // We only do this if no debugger is attached to prevent from posting events twice.
+          JValue val;
+          instrumentation_->MethodExitEvent(thread_, instrumentation_frame.this_object_, m,
+                                            GetDexPc(), OptionalFrame{}, val);
+        }
+        frames_removed_++;
+      } else {
         if (kVerboseInstrumentation) {
           LOG(INFO) << "  No exit stub in " << DescribeLocation();
         }
@@ -500,7 +523,7 @@
     Thread* const thread_;
     const uintptr_t instrumentation_exit_pc_;
     Instrumentation* const instrumentation_;
-    std::deque<instrumentation::InstrumentationStackFrame>* const instrumentation_stack_;
+    std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* const instrumentation_stack_;
     size_t frames_removed_;
   };
   if (kVerboseInstrumentation) {
@@ -508,7 +531,8 @@
     thread->GetThreadName(thread_name);
     LOG(INFO) << "Removing exit stubs in " << thread_name;
   }
-  std::deque<instrumentation::InstrumentationStackFrame>* stack = thread->GetInstrumentationStack();
+  std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* stack =
+      thread->GetInstrumentationStack();
   if (stack->size() > 0) {
     Instrumentation* instrumentation = reinterpret_cast<Instrumentation*>(arg);
     uintptr_t instrumentation_exit_pc =
@@ -516,12 +540,21 @@
     RestoreStackVisitor visitor(thread, instrumentation_exit_pc, instrumentation);
     visitor.WalkStack(true);
     CHECK_EQ(visitor.frames_removed_, stack->size());
-    while (stack->size() > 0) {
-      stack->pop_front();
-    }
+    stack->clear();
   }
 }
 
+void Instrumentation::DeoptimizeAllThreadFrames() {
+  Thread* self = Thread::Current();
+  MutexLock mu(self, *Locks::thread_list_lock_);
+  ThreadList* tl = Runtime::Current()->GetThreadList();
+  tl->ForEach([&](Thread* t) {
+    Locks::mutator_lock_->AssertExclusiveHeld(self);
+    InstrumentThreadStack(t);
+  });
+  current_force_deopt_id_++;
+}
+
 static bool HasEvent(Instrumentation::InstrumentationEvent expected, uint32_t events) {
   return (events & expected) != 0;
 }
@@ -783,10 +816,28 @@
     }
     if (empty) {
       MutexLock mu(self, *Locks::thread_list_lock_);
-      Runtime::Current()->GetThreadList()->ForEach(InstrumentationRestoreStack, this);
-      // Only do this after restoring, as walking the stack when restoring will see
-      // the instrumentation exit pc.
-      instrumentation_stubs_installed_ = false;
+      bool no_remaining_deopts = true;
+      // Check that there are no other forced deoptimizations. Do it here so we only need to lock
+      // thread_list_lock once.
+      // The compiler gets confused on the thread annotations, so use
+      // NO_THREAD_SAFETY_ANALYSIS. Note that we hold the mutator lock
+      // exclusively at this point.
+      Locks::mutator_lock_->AssertExclusiveHeld(self);
+      runtime->GetThreadList()->ForEach([&](Thread* t) NO_THREAD_SAFETY_ANALYSIS {
+        no_remaining_deopts =
+            no_remaining_deopts && !t->IsForceInterpreter() &&
+            std::all_of(t->GetInstrumentationStack()->cbegin(),
+                        t->GetInstrumentationStack()->cend(),
+                        [&](const auto& frame) REQUIRES_SHARED(Locks::mutator_lock_) {
+                          return frame.second.force_deopt_id_ == current_force_deopt_id_;
+                        });
+      });
+      if (no_remaining_deopts) {
+        Runtime::Current()->GetThreadList()->ForEach(InstrumentationRestoreStack, this);
+        // Only do this after restoring, as walking the stack when restoring will see
+        // the instrumentation exit pc.
+        instrumentation_stubs_installed_ = false;
+      }
     }
   }
 }
@@ -1164,35 +1215,52 @@
   }
 }
 
+template <>
 void Instrumentation::MethodExitEventImpl(Thread* thread,
                                           ObjPtr<mirror::Object> this_object,
                                           ArtMethod* method,
                                           uint32_t dex_pc,
-                                          const JValue& return_value) const {
+                                          OptionalFrame frame,
+                                          MutableHandle<mirror::Object>& return_value) const {
   if (HasMethodExitListeners()) {
     Thread* self = Thread::Current();
-    StackHandleScope<2> hs(self);
+    StackHandleScope<1> hs(self);
     Handle<mirror::Object> thiz(hs.NewHandle(this_object));
-    if (method->GetInterfaceMethodIfProxy(kRuntimePointerSize)
-              ->GetReturnTypePrimitive() != Primitive::kPrimNot) {
-      for (InstrumentationListener* listener : method_exit_listeners_) {
-        if (listener != nullptr) {
-          listener->MethodExited(thread, thiz, method, dex_pc, return_value);
-        }
-      }
-    } else {
-      Handle<mirror::Object> ret(hs.NewHandle(return_value.GetL()));
-      for (InstrumentationListener* listener : method_exit_listeners_) {
-        if (listener != nullptr) {
-          listener->MethodExited(thread, thiz, method, dex_pc, ret);
-        }
+    for (InstrumentationListener* listener : method_exit_listeners_) {
+      if (listener != nullptr) {
+        listener->MethodExited(thread, thiz, method, dex_pc, frame, return_value);
       }
     }
   }
 }
 
+template<> void Instrumentation::MethodExitEventImpl(Thread* thread,
+                                                     ObjPtr<mirror::Object> this_object,
+                                                     ArtMethod* method,
+                                                     uint32_t dex_pc,
+                                                     OptionalFrame frame,
+                                                     JValue& return_value) const {
+  if (HasMethodExitListeners()) {
+    Thread* self = Thread::Current();
+    StackHandleScope<2> hs(self);
+    Handle<mirror::Object> thiz(hs.NewHandle(this_object));
+    if (method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetReturnTypePrimitive() !=
+        Primitive::kPrimNot) {
+      for (InstrumentationListener* listener : method_exit_listeners_) {
+        if (listener != nullptr) {
+          listener->MethodExited(thread, thiz, method, dex_pc, frame, return_value);
+        }
+      }
+    } else {
+      MutableHandle<mirror::Object> ret(hs.NewHandle(return_value.GetL()));
+      MethodExitEventImpl(thread, thiz.Get(), method, dex_pc, frame, ret);
+      return_value.SetL(ret.Get());
+    }
+  }
+}
+
 void Instrumentation::MethodUnwindEvent(Thread* thread,
-                                        mirror::Object* this_object,
+                                        ObjPtr<mirror::Object> this_object,
                                         ArtMethod* method,
                                         uint32_t dex_pc) const {
   if (HasMethodUnwindListeners()) {
@@ -1281,7 +1349,7 @@
 }
 
 void Instrumentation::ExceptionThrownEvent(Thread* thread,
-                                           mirror::Throwable* exception_object) const {
+                                           ObjPtr<mirror::Throwable> exception_object) const {
   Thread* self = Thread::Current();
   StackHandleScope<1> hs(self);
   Handle<mirror::Throwable> h_exception(hs.NewHandle(exception_object));
@@ -1300,7 +1368,7 @@
 }
 
 void Instrumentation::ExceptionHandledEvent(Thread* thread,
-                                            mirror::Throwable* exception_object) const {
+                                            ObjPtr<mirror::Throwable> exception_object) const {
   Thread* self = Thread::Current();
   StackHandleScope<1> hs(self);
   Handle<mirror::Throwable> h_exception(hs.NewHandle(exception_object));
@@ -1315,32 +1383,15 @@
   }
 }
 
-// Computes a frame ID by ignoring inlined frames.
-size_t Instrumentation::ComputeFrameId(Thread* self,
-                                       size_t frame_depth,
-                                       size_t inlined_frames_before_frame) {
-  CHECK_GE(frame_depth, inlined_frames_before_frame);
-  size_t no_inline_depth = frame_depth - inlined_frames_before_frame;
-  return StackVisitor::ComputeNumFrames(self, kInstrumentationStackWalk) - no_inline_depth;
-}
-
-static void CheckStackDepth(Thread* self, const InstrumentationStackFrame& instrumentation_frame,
-                            int delta)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  size_t frame_id = StackVisitor::ComputeNumFrames(self, kInstrumentationStackWalk) + delta;
-  if (frame_id != instrumentation_frame.frame_id_) {
-    LOG(ERROR) << "Expected frame_id=" << frame_id << " but found "
-        << instrumentation_frame.frame_id_;
-    StackVisitor::DescribeStack(self);
-    CHECK_EQ(frame_id, instrumentation_frame.frame_id_);
-  }
-}
-
-void Instrumentation::PushInstrumentationStackFrame(Thread* self, mirror::Object* this_object,
+void Instrumentation::PushInstrumentationStackFrame(Thread* self,
+                                                    ObjPtr<mirror::Object> this_object,
                                                     ArtMethod* method,
-                                                    uintptr_t lr, bool interpreter_entry) {
+                                                    uintptr_t stack_ptr,
+                                                    uintptr_t lr,
+                                                    bool interpreter_entry) {
   DCHECK(!self->IsExceptionPending());
-  std::deque<instrumentation::InstrumentationStackFrame>* stack = self->GetInstrumentationStack();
+  std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* stack =
+      self->GetInstrumentationStack();
   if (kVerboseInstrumentation) {
     LOG(INFO) << "Entering " << ArtMethod::PrettyMethod(method) << " from PC "
               << reinterpret_cast<void*>(lr);
@@ -1362,9 +1413,9 @@
   DCHECK(!self->IsExceptionPending());
   size_t frame_id = StackVisitor::ComputeNumFrames(self, kInstrumentationStackWalk);
 
-  instrumentation::InstrumentationStackFrame instrumentation_frame(h_this.Get(), method, lr,
-                                                                   frame_id, interpreter_entry);
-  stack->push_front(instrumentation_frame);
+  instrumentation::InstrumentationStackFrame instrumentation_frame(
+      h_this.Get(), method, lr, frame_id, interpreter_entry, current_force_deopt_id_);
+  stack->insert({stack_ptr, instrumentation_frame});
 }
 
 DeoptimizationMethodType Instrumentation::GetDeoptimizationMethodType(ArtMethod* method) {
@@ -1446,20 +1497,24 @@
 }
 
 TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self,
-                                                            uintptr_t* return_pc,
+                                                            uintptr_t* return_pc_addr,
                                                             uint64_t* gpr_result,
                                                             uint64_t* fpr_result) {
   DCHECK(gpr_result != nullptr);
   DCHECK(fpr_result != nullptr);
   // Do the pop.
-  std::deque<instrumentation::InstrumentationStackFrame>* stack = self->GetInstrumentationStack();
+  std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* stack =
+      self->GetInstrumentationStack();
   CHECK_GT(stack->size(), 0U);
-  InstrumentationStackFrame instrumentation_frame = stack->front();
-  stack->pop_front();
+  auto it = stack->find(reinterpret_cast<uintptr_t>(return_pc_addr));
+  CHECK(it != stack->end());
+  InstrumentationStackFrame instrumentation_frame = it->second;
+  stack->erase(it);
 
   // Set return PC and check the sanity of the stack.
-  *return_pc = instrumentation_frame.return_pc_;
-  CheckStackDepth(self, instrumentation_frame, 0);
+  // We don't cache the return pc value in a local as it may change after
+  // sending a method exit event.
+  *return_pc_addr = instrumentation_frame.return_pc_;
   self->VerifyStack();
 
   ArtMethod* method = instrumentation_frame.method_;
@@ -1511,9 +1566,11 @@
   // TODO: improve the dex pc information here, requires knowledge of current PC as opposed to
   //       return_pc.
   uint32_t dex_pc = dex::kDexNoIndex;
-  mirror::Object* this_object = instrumentation_frame.this_object_;
   if (!method->IsRuntimeMethod() && !instrumentation_frame.interpreter_entry_) {
-    MethodExitEvent(self, this_object, instrumentation_frame.method_, dex_pc, return_value);
+    ObjPtr<mirror::Object> this_object = instrumentation_frame.this_object_;
+    // Note that sending the event may change the contents of *return_pc_addr.
+    MethodExitEvent(
+        self, this_object, instrumentation_frame.method_, dex_pc, OptionalFrame{}, return_value);
   }
 
   // Deoptimize if the caller needs to continue execution in the interpreter. Do nothing if we get
@@ -1523,12 +1580,19 @@
   bool deoptimize = (visitor.caller != nullptr) &&
                     (interpreter_stubs_installed_ || IsDeoptimized(visitor.caller) ||
                     self->IsForceInterpreter() ||
+                    // NB Since structurally obsolete compiled methods might have the offsets of
+                    // methods/fields compiled in we need to go back to interpreter whenever we hit
+                    // them.
+                    visitor.caller->GetDeclaringClass()->IsObsoleteObject() ||
+                    // Check if we forced all threads to deoptimize in the time between this frame
+                    // being created and now.
+                    instrumentation_frame.force_deopt_id_ != current_force_deopt_id_ ||
                     Dbg::IsForcedInterpreterNeededForUpcall(self, visitor.caller));
   if (is_ref) {
     // Restore the return value if it's a reference since it might have moved.
     *reinterpret_cast<mirror::Object**>(gpr_result) = res.Get();
   }
-  if (deoptimize && Runtime::Current()->IsAsyncDeoptimizeable(*return_pc)) {
+  if (deoptimize && Runtime::Current()->IsAsyncDeoptimizeable(*return_pc_addr)) {
     if (kVerboseInstrumentation) {
       LOG(INFO) << "Deoptimizing "
                 << visitor.caller->PrettyMethod()
@@ -1545,50 +1609,43 @@
                                     /* exception= */ nullptr ,
                                     /* from_code= */ false,
                                     deopt_method_type);
-    return GetTwoWordSuccessValue(*return_pc,
+    return GetTwoWordSuccessValue(*return_pc_addr,
                                   reinterpret_cast<uintptr_t>(GetQuickDeoptimizationEntryPoint()));
   } else {
-    if (deoptimize && !Runtime::Current()->IsAsyncDeoptimizeable(*return_pc)) {
+    if (deoptimize && !Runtime::Current()->IsAsyncDeoptimizeable(*return_pc_addr)) {
       VLOG(deopt) << "Got a deoptimization request on un-deoptimizable " << method->PrettyMethod()
-                  << " at PC " << reinterpret_cast<void*>(*return_pc);
+                  << " at PC " << reinterpret_cast<void*>(*return_pc_addr);
     }
     if (kVerboseInstrumentation) {
       LOG(INFO) << "Returning from " << method->PrettyMethod()
-                << " to PC " << reinterpret_cast<void*>(*return_pc);
+                << " to PC " << reinterpret_cast<void*>(*return_pc_addr);
     }
-    return GetTwoWordSuccessValue(0, *return_pc);
+    return GetTwoWordSuccessValue(0, *return_pc_addr);
   }
 }
 
-uintptr_t Instrumentation::PopFramesForDeoptimization(Thread* self, size_t nframes) const {
-  std::deque<instrumentation::InstrumentationStackFrame>* stack = self->GetInstrumentationStack();
-  CHECK_GE(stack->size(), nframes);
-  if (nframes == 0) {
-    return 0u;
-  }
-  // Only need to send instrumentation events if it's not for deopt (do give the log messages if we
-  // have verbose-instrumentation anyway though).
-  if (kVerboseInstrumentation) {
-    for (size_t i = 0; i < nframes; i++) {
-      LOG(INFO) << "Popping for deoptimization " << stack->at(i).method_->PrettyMethod();
+uintptr_t Instrumentation::PopFramesForDeoptimization(Thread* self, uintptr_t pop_until) const {
+  std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* stack =
+      self->GetInstrumentationStack();
+  // Pop all instrumentation frames below `pop_until`.
+  uintptr_t return_pc = 0u;
+  for (auto i = stack->begin(); i != stack->end() && i->first <= pop_until;) {
+    auto e = i;
+    ++i;
+    if (kVerboseInstrumentation) {
+      LOG(INFO) << "Popping for deoptimization " << e->second.method_->PrettyMethod();
     }
+    return_pc = e->second.return_pc_;
+    stack->erase(e);
   }
-  // Now that we've sent all the instrumentation events we can actually modify the
-  // instrumentation-stack. We cannot do this earlier since MethodUnwindEvent can re-enter java and
-  // do other things that require the instrumentation stack to be in a consistent state with the
-  // actual stack.
-  for (size_t i = 0; i < nframes - 1; i++) {
-    stack->pop_front();
-  }
-  uintptr_t return_pc = stack->front().return_pc_;
-  stack->pop_front();
   return return_pc;
 }
 
 std::string InstrumentationStackFrame::Dump() const {
   std::ostringstream os;
   os << "Frame " << frame_id_ << " " << ArtMethod::PrettyMethod(method_) << ":"
-      << reinterpret_cast<void*>(return_pc_) << " this=" << reinterpret_cast<void*>(this_object_);
+      << reinterpret_cast<void*>(return_pc_) << " this=" << reinterpret_cast<void*>(this_object_)
+      << " force_deopt_id=" << force_deopt_id_;
   return os.str();
 }
 
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 27918ea..e30fc9a 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -17,10 +17,12 @@
 #ifndef ART_RUNTIME_INSTRUMENTATION_H_
 #define ART_RUNTIME_INSTRUMENTATION_H_
 
+#include <functional>
 #include <stdint.h>
 #include <list>
 #include <memory>
 #include <unordered_set>
+#include <optional>
 
 #include "arch/instruction_set.h"
 #include "base/enums.h"
@@ -60,6 +62,11 @@
 // application's performance.
 static constexpr bool kDeoptimizeForAccurateMethodEntryExitListeners = true;
 
+// an optional frame is either Some(const ShadowFrame& current_frame) or None depending on if the
+// method being exited has a shadow-frame associed with the current stack frame. In cases where
+// there is no shadow-frame associated with this stack frame this will be None.
+using OptionalFrame = std::optional<std::reference_wrapper<const ShadowFrame>>;
+
 // Instrumentation event listener API. Registered listeners will get the appropriate call back for
 // the events they are listening for. The call backs supply the thread, method and dex_pc the event
 // occurred upon. The thread may or may not be Thread::Current().
@@ -77,7 +84,8 @@
                             Handle<mirror::Object> this_object,
                             ArtMethod* method,
                             uint32_t dex_pc,
-                            Handle<mirror::Object> return_value)
+                            OptionalFrame frame,
+                            MutableHandle<mirror::Object>& return_value)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Call-back for when a method is exited. The implementor should either handler-ize the return
@@ -87,7 +95,8 @@
                             Handle<mirror::Object> this_object,
                             ArtMethod* method,
                             uint32_t dex_pc,
-                            const JValue& return_value)
+                            OptionalFrame frame,
+                            JValue& return_value)
       REQUIRES_SHARED(Locks::mutator_lock_) = 0;
 
   // Call-back for when a method is popped due to an exception throw. A method will either cause a
@@ -160,16 +169,17 @@
   explicit InstrumentationStackPopper(Thread* self);
   ~InstrumentationStackPopper() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Increase the number of frames being popped to 'desired_pops' return true if the frames were
-  // popped without any exceptions, false otherwise. The exception that caused the pop is
-  // 'exception'.
-  bool PopFramesTo(uint32_t desired_pops, /*in-out*/MutableHandle<mirror::Throwable>& exception)
+  // Increase the number of frames being popped up to `stack_pointer`. Return true if the
+  // frames were popped without any exceptions, false otherwise. The exception that caused
+  // the pop is 'exception'.
+  bool PopFramesTo(uintptr_t stack_pointer, /*in-out*/MutableHandle<mirror::Throwable>& exception)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
   Thread* self_;
   Instrumentation* instrumentation_;
-  uint32_t frames_to_remove_;
+  // The stack pointer limit for frames to pop.
+  uintptr_t pop_until_;
 };
 
 // Instrumentation is a catch-all for when extra information is required from the runtime. The
@@ -388,8 +398,10 @@
 
   // Inform listeners that a method has been entered. A dex PC is provided as we may install
   // listeners into executing code and get method enter events for methods already on the stack.
-  void MethodEnterEvent(Thread* thread, mirror::Object* this_object,
-                        ArtMethod* method, uint32_t dex_pc) const
+  void MethodEnterEvent(Thread* thread,
+                        ObjPtr<mirror::Object> this_object,
+                        ArtMethod* method,
+                        uint32_t dex_pc) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     if (UNLIKELY(HasMethodEntryListeners())) {
       MethodEnterEventImpl(thread, this_object, method, dex_pc);
@@ -397,25 +409,31 @@
   }
 
   // Inform listeners that a method has been exited.
+  template<typename T>
   void MethodExitEvent(Thread* thread,
-                       mirror::Object* this_object,
+                       ObjPtr<mirror::Object> this_object,
                        ArtMethod* method,
                        uint32_t dex_pc,
-                       const JValue& return_value) const
+                       OptionalFrame frame,
+                       T& return_value) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     if (UNLIKELY(HasMethodExitListeners())) {
-      MethodExitEventImpl(thread, this_object, method, dex_pc, return_value);
+      MethodExitEventImpl(thread, this_object, method, dex_pc, frame, return_value);
     }
   }
 
   // Inform listeners that a method has been exited due to an exception.
-  void MethodUnwindEvent(Thread* thread, mirror::Object* this_object,
-                         ArtMethod* method, uint32_t dex_pc) const
+  void MethodUnwindEvent(Thread* thread,
+                         ObjPtr<mirror::Object> this_object,
+                         ArtMethod* method,
+                         uint32_t dex_pc) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Inform listeners that the dex pc has moved (only supported by the interpreter).
-  void DexPcMovedEvent(Thread* thread, mirror::Object* this_object,
-                       ArtMethod* method, uint32_t dex_pc) const
+  void DexPcMovedEvent(Thread* thread,
+                       ObjPtr<mirror::Object> this_object,
+                       ArtMethod* method,
+                       uint32_t dex_pc) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     if (UNLIKELY(HasDexPcListeners())) {
       DexPcMovedEventImpl(thread, this_object, method, dex_pc);
@@ -431,8 +449,10 @@
   }
 
   // Inform listeners that we read a field (only supported by the interpreter).
-  void FieldReadEvent(Thread* thread, mirror::Object* this_object,
-                      ArtMethod* method, uint32_t dex_pc,
+  void FieldReadEvent(Thread* thread,
+                      ObjPtr<mirror::Object> this_object,
+                      ArtMethod* method,
+                      uint32_t dex_pc,
                       ArtField* field) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     if (UNLIKELY(HasFieldReadListeners())) {
@@ -441,9 +461,12 @@
   }
 
   // Inform listeners that we write a field (only supported by the interpreter).
-  void FieldWriteEvent(Thread* thread, mirror::Object* this_object,
-                       ArtMethod* method, uint32_t dex_pc,
-                       ArtField* field, const JValue& field_value) const
+  void FieldWriteEvent(Thread* thread,
+                       ObjPtr<mirror::Object> this_object,
+                       ArtMethod* method,
+                       uint32_t dex_pc,
+                       ArtField* field,
+                       const JValue& field_value) const
       REQUIRES_SHARED(Locks::mutator_lock_) {
     if (UNLIKELY(HasFieldWriteListeners())) {
       FieldWriteEventImpl(thread, this_object, method, dex_pc, field, field_value);
@@ -459,18 +482,21 @@
   }
 
   // Inform listeners that an exception was thrown.
-  void ExceptionThrownEvent(Thread* thread, mirror::Throwable* exception_object) const
+  void ExceptionThrownEvent(Thread* thread, ObjPtr<mirror::Throwable> exception_object) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Inform listeners that an exception has been handled. This is not sent for native code or for
   // exceptions which reach the end of the thread's stack.
-  void ExceptionHandledEvent(Thread* thread, mirror::Throwable* exception_object) const
+  void ExceptionHandledEvent(Thread* thread, ObjPtr<mirror::Throwable> exception_object) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Called when an instrumented method is entered. The intended link register (lr) is saved so
   // that returning causes a branch to the method exit stub. Generates method enter events.
-  void PushInstrumentationStackFrame(Thread* self, mirror::Object* this_object,
-                                     ArtMethod* method, uintptr_t lr,
+  void PushInstrumentationStackFrame(Thread* self,
+                                     ObjPtr<mirror::Object> this_object,
+                                     ArtMethod* method,
+                                     uintptr_t stack_pointer,
+                                     uintptr_t lr,
                                      bool interpreter_entry)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -483,17 +509,19 @@
   // result values of the function are stored. Both pointers must always be valid but the values
   // held there will only be meaningful if interpreted as the appropriate type given the function
   // being returned from.
-  TwoWordReturn PopInstrumentationStackFrame(Thread* self, uintptr_t* return_pc,
-                                             uint64_t* gpr_result, uint64_t* fpr_result)
+  TwoWordReturn PopInstrumentationStackFrame(Thread* self,
+                                             uintptr_t* return_pc_addr,
+                                             uint64_t* gpr_result,
+                                             uint64_t* fpr_result)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!GetDeoptimizedMethodsLock());
 
   // Pops nframes instrumentation frames from the current thread. Returns the return pc for the last
   // instrumentation frame that's popped.
-  uintptr_t PopFramesForDeoptimization(Thread* self, size_t nframes) const
+  uintptr_t PopFramesForDeoptimization(Thread* self, uintptr_t stack_pointer) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Call back for configure stubs.
-  void InstallStubsForClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_)
+  void InstallStubsForClass(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!GetDeoptimizedMethodsLock());
 
   void InstallStubsForMethod(ArtMethod* method)
@@ -510,7 +538,11 @@
   // This is used by the debugger to cause a deoptimization of the thread's stack after updating
   // local variable(s).
   void InstrumentThreadStack(Thread* thread)
-      REQUIRES_SHARED(Locks::mutator_lock_);
+      REQUIRES(Locks::mutator_lock_);
+
+  // Force all currently running frames to be deoptimized back to interpreter. This should only be
+  // used in cases where basically all compiled code has been invalidated.
+  void DeoptimizeAllThreadFrames() REQUIRES(art::Locks::mutator_lock_);
 
   static size_t ComputeFrameId(Thread* self,
                                size_t frame_depth,
@@ -570,11 +602,13 @@
                             ArtMethod* method,
                             uint32_t dex_pc) const
       REQUIRES_SHARED(Locks::mutator_lock_);
+  template <typename T>
   void MethodExitEventImpl(Thread* thread,
                            ObjPtr<mirror::Object> this_object,
                            ArtMethod* method,
                            uint32_t dex_pc,
-                           const JValue& return_value) const
+                           OptionalFrame frame,
+                           T& return_value) const
       REQUIRES_SHARED(Locks::mutator_lock_);
   void DexPcMovedEventImpl(Thread* thread,
                            ObjPtr<mirror::Object> this_object,
@@ -617,6 +651,11 @@
     return deoptimized_methods_lock_.get();
   }
 
+  // A counter that's incremented every time a DeoptimizeAllFrames. We check each
+  // InstrumentationStackFrames creation id against this number and if they differ we deopt even if
+  // we could otherwise continue running.
+  uint64_t current_force_deopt_id_ GUARDED_BY(Locks::mutator_lock_);
+
   // Have we hijacked ArtMethod::code_ so that it calls instrumentation/interpreter code?
   bool instrumentation_stubs_installed_;
 
@@ -720,6 +759,7 @@
 
   friend class InstrumentationTest;  // For GetCurrentInstrumentationLevel and ConfigureStubs.
   friend class InstrumentationStackPopper;  // For popping instrumentation frames.
+  friend void InstrumentationInstallStack(Thread*, void*);
 
   DISALLOW_COPY_AND_ASSIGN(Instrumentation);
 };
@@ -732,12 +772,14 @@
                             ArtMethod* method,
                             uintptr_t return_pc,
                             size_t frame_id,
-                            bool interpreter_entry)
+                            bool interpreter_entry,
+                            uint64_t force_deopt_id)
       : this_object_(this_object),
         method_(method),
         return_pc_(return_pc),
         frame_id_(frame_id),
-        interpreter_entry_(interpreter_entry) {
+        interpreter_entry_(interpreter_entry),
+        force_deopt_id_(force_deopt_id) {
   }
 
   std::string Dump() const REQUIRES_SHARED(Locks::mutator_lock_);
@@ -747,6 +789,7 @@
   uintptr_t return_pc_;
   size_t frame_id_;
   bool interpreter_entry_;
+  uint64_t force_deopt_id_;
 };
 
 }  // namespace instrumentation
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index cf5d3ed..6284299 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -16,6 +16,7 @@
 
 #include "instrumentation.h"
 
+#include "android-base/macros.h"
 #include "art_method-inl.h"
 #include "base/enums.h"
 #include "class_linker-inl.h"
@@ -66,7 +67,8 @@
                     Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
                     ArtMethod* method ATTRIBUTE_UNUSED,
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
-                    Handle<mirror::Object> return_value ATTRIBUTE_UNUSED)
+                    instrumentation::OptionalFrame frame ATTRIBUTE_UNUSED,
+                    MutableHandle<mirror::Object>& return_value ATTRIBUTE_UNUSED)
       override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_method_exit_object_event = true;
   }
@@ -75,7 +77,8 @@
                     Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
                     ArtMethod* method ATTRIBUTE_UNUSED,
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
-                    const JValue& return_value ATTRIBUTE_UNUSED)
+                    instrumentation::OptionalFrame frame ATTRIBUTE_UNUSED,
+                    JValue& return_value ATTRIBUTE_UNUSED)
       override REQUIRES_SHARED(Locks::mutator_lock_) {
     received_method_exit_event = true;
   }
@@ -393,7 +396,7 @@
         break;
       case instrumentation::Instrumentation::kMethodExited: {
         JValue value;
-        instr->MethodExitEvent(self, obj, method, dex_pc, value);
+        instr->MethodExitEvent(self, obj, method, dex_pc, {}, value);
         break;
       }
       case instrumentation::Instrumentation::kMethodUnwind:
@@ -520,7 +523,8 @@
   Runtime* const runtime = Runtime::Current();
   ClassLinker* class_linker = runtime->GetClassLinker();
   StackHandleScope<1> hs(soa.Self());
-  Handle<mirror::ClassLoader> loader(hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader)));
+  MutableHandle<mirror::ClassLoader> loader(
+      hs.NewHandle(soa.Decode<mirror::ClassLoader>(class_loader)));
   ObjPtr<mirror::Class> klass = class_linker->FindClass(soa.Self(), "LInstrumentation;", loader);
   ASSERT_TRUE(klass != nullptr);
   ArtMethod* method =
diff --git a/runtime/intern_table-inl.h b/runtime/intern_table-inl.h
index 6fc53e9..687f5ee 100644
--- a/runtime/intern_table-inl.h
+++ b/runtime/intern_table-inl.h
@@ -19,8 +19,9 @@
 
 #include "intern_table.h"
 
-// Required for ToModifiedUtf8 below.
-#include "mirror/string-inl.h"
+#include "gc/space/image_space.h"
+#include "image.h"
+#include "mirror/string-inl.h"  // Required for ToModifiedUtf8 below.
 
 namespace art {
 
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 9ac9927..96f70d1 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -275,10 +275,25 @@
   return Insert(s, true, true);
 }
 
+void InternTable::PromoteWeakToStrong() {
+  MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
+  DCHECK_EQ(weak_interns_.tables_.size(), 1u);
+  for (GcRoot<mirror::String>& entry : weak_interns_.tables_.front().set_) {
+    DCHECK(LookupStrongLocked(entry.Read()) == nullptr);
+    InsertStrong(entry.Read());
+  }
+  weak_interns_.tables_.front().set_.clear();
+}
+
 ObjPtr<mirror::String> InternTable::InternStrong(ObjPtr<mirror::String> s) {
   return Insert(s, true, false);
 }
 
+ObjPtr<mirror::String> InternTable::InternWeak(const char* utf8_data) {
+  DCHECK(utf8_data != nullptr);
+  return InternWeak(mirror::String::AllocFromModifiedUtf8(Thread::Current(), utf8_data));
+}
+
 ObjPtr<mirror::String> InternTable::InternWeak(ObjPtr<mirror::String> s) {
   return Insert(s, false, false);
 }
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 165d56c..7065015 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -17,9 +17,6 @@
 #ifndef ART_RUNTIME_INTERN_TABLE_H_
 #define ART_RUNTIME_INTERN_TABLE_H_
 
-#include <unordered_set>
-
-#include "base/atomic.h"
 #include "base/allocator.h"
 #include "base/hash_set.h"
 #include "base/mutex.h"
@@ -119,6 +116,9 @@
   ObjPtr<mirror::String> InternStrongImageString(ObjPtr<mirror::String> s)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Only used by image writer. Promote all weak interns to strong interns.
+  void PromoteWeakToStrong() REQUIRES_SHARED(Locks::mutator_lock_);
+
   // Interns a potentially new string in the 'strong' table. May cause thread suspension.
   ObjPtr<mirror::String> InternStrong(const char* utf8_data) REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Roles::uninterruptible_);
@@ -129,6 +129,10 @@
       REQUIRES(!Roles::uninterruptible_);
 
   // Interns a potentially new string in the 'weak' table. May cause thread suspension.
+  ObjPtr<mirror::String> InternWeak(const char* utf8_data) REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(!Roles::uninterruptible_);
+
+  // Interns a potentially new string in the 'weak' table. May cause thread suspension.
   ObjPtr<mirror::String> InternWeak(ObjPtr<mirror::String> s) REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Roles::uninterruptible_);
 
diff --git a/runtime/interpreter/cfi_asm_support.h b/runtime/interpreter/cfi_asm_support.h
index c1e5fb5..04812e1 100644
--- a/runtime/interpreter/cfi_asm_support.h
+++ b/runtime/interpreter/cfi_asm_support.h
@@ -44,9 +44,16 @@
     0x0c /* DW_OP_const4u */, 0x44, 0x45, 0x58, 0x31, /* magic = "DEX1" */     \
     0x13 /* DW_OP_drop */,                                                     \
     0x92 /* DW_OP_bregx */, dexReg, (dexOffset & 0x7F) /* 1-byte SLEB128 */
+
+  #define CFI_DEFINE_CFA_DEREF(reg, offset, size) .cfi_escape                  \
+    0x0f /* DW_CFA_expression */, 6 /* size */,                                \
+    0x92 /* bregx */, reg, (offset & 0x7F),                                    \
+    0x06 /* DW_OP_DEREF */,                                                    \
+    0x23 /* DW_OP_plus_uconst */, size
 #else
   // Mac OS doesn't like cfi_* directives.
   #define CFI_DEFINE_DEX_PC_WITH_OFFSET(tmpReg, dexReg, dexOffset)
+  #define CFI_DEFINE_CFA_DEREF(reg, offset)
 #endif
 
 #endif  // ART_RUNTIME_INTERPRETER_CFI_ASM_SUPPORT_H_
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index db116f5..b5e5238 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -275,17 +275,38 @@
                                         method,
                                         0);
       if (UNLIKELY(shadow_frame.GetForcePopFrame())) {
-        // The caller will retry this invoke. Just return immediately without any value.
+        // The caller will retry this invoke or ignore the result. Just return immediately without
+        // any value.
         DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
-        DCHECK(PrevFrameWillRetry(self, shadow_frame));
-        return JValue();
+        JValue ret = JValue();
+        bool res = PerformNonStandardReturn<MonitorState::kNoMonitorsLocked>(
+            self,
+            shadow_frame,
+            ret,
+            instrumentation,
+            accessor.InsSize(),
+            0);
+        DCHECK(res) << "Expected to perform non-standard return!";
+        return ret;
       }
       if (UNLIKELY(self->IsExceptionPending())) {
         instrumentation->MethodUnwindEvent(self,
                                            shadow_frame.GetThisObject(accessor.InsSize()),
                                            method,
                                            0);
-        return JValue();
+        JValue ret = JValue();
+        if (UNLIKELY(shadow_frame.GetForcePopFrame())) {
+          DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
+          bool res = PerformNonStandardReturn<MonitorState::kNoMonitorsLocked>(
+              self,
+              shadow_frame,
+              ret,
+              instrumentation,
+              accessor.InsSize(),
+              0);
+          DCHECK(res) << "Expected to perform non-standard return!";
+        }
+        return ret;
       }
     }
 
@@ -321,6 +342,7 @@
   DCHECK(!method->SkipAccessChecks() || !method->MustCountLocks());
 
   bool transaction_active = Runtime::Current()->IsActiveTransaction();
+  VLOG(interpreter) << "Interpreting " << method->PrettyMethod();
   if (LIKELY(method->SkipAccessChecks())) {
     // Enter the "without access check" interpreter.
     if (kInterpreterImplKind == kMterpImplKind) {
@@ -368,12 +390,6 @@
   } else {
     // Enter the "with access check" interpreter.
 
-    // The boot classpath should really not have to run access checks.
-    DCHECK(method->GetDeclaringClass()->GetClassLoader() != nullptr
-           || Runtime::Current()->IsVerificationSoftFail()
-           || Runtime::Current()->IsAotCompiler())
-        << method->PrettyMethod();
-
     if (kInterpreterImplKind == kMterpImplKind) {
       // No access check variants for Mterp.  Just use the switch version.
       if (transaction_active) {
@@ -474,14 +490,18 @@
   }
   self->EndAssertNoThreadSuspension(old_cause);
   // Do this after populating the shadow frame in case EnsureInitialized causes a GC.
-  if (method->IsStatic() && UNLIKELY(!method->GetDeclaringClass()->IsInitialized())) {
-    ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-    StackHandleScope<1> hs(self);
-    Handle<mirror::Class> h_class(hs.NewHandle(method->GetDeclaringClass()));
-    if (UNLIKELY(!class_linker->EnsureInitialized(self, h_class, true, true))) {
-      CHECK(self->IsExceptionPending());
-      self->PopShadowFrame();
-      return;
+  if (method->IsStatic()) {
+    ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
+    if (UNLIKELY(!declaring_class->IsVisiblyInitialized())) {
+      StackHandleScope<1> hs(self);
+      Handle<mirror::Class> h_class(hs.NewHandle(declaring_class));
+      if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(
+                        self, h_class, /*can_init_fields=*/ true, /*can_init_parents=*/ true))) {
+        CHECK(self->IsExceptionPending());
+        self->PopShadowFrame();
+        return;
+      }
+      DCHECK(h_class->IsInitializing());
     }
   }
   if (LIKELY(!method->IsNative())) {
@@ -654,16 +674,16 @@
   const bool is_static = method->IsStatic();
   if (is_static) {
     ObjPtr<mirror::Class> declaring_class = method->GetDeclaringClass();
-    if (UNLIKELY(!declaring_class->IsInitialized())) {
+    if (UNLIKELY(!declaring_class->IsVisiblyInitialized())) {
       StackHandleScope<1> hs(self);
-      HandleWrapperObjPtr<mirror::Class> h_declaring_class(hs.NewHandleWrapper(&declaring_class));
+      Handle<mirror::Class> h_class(hs.NewHandle(declaring_class));
       if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(
-          self, h_declaring_class, true, true))) {
+                        self, h_class, /*can_init_fields=*/ true, /*can_init_parents=*/ true))) {
         DCHECK(self->IsExceptionPending());
         self->PopShadowFrame();
         return;
       }
-      CHECK(h_declaring_class->IsInitializing());
+      DCHECK(h_class->IsInitializing());
     }
   }
 
@@ -683,6 +703,7 @@
 
 void CheckInterpreterAsmConstants() {
   CheckMterpAsmConstants();
+  CheckNterpAsmConstants();
 }
 
 void InitInterpreterTls(Thread* self) {
diff --git a/runtime/interpreter/interpreter_cache.h b/runtime/interpreter/interpreter_cache.h
index 003ea6c..0ada562 100644
--- a/runtime/interpreter/interpreter_cache.h
+++ b/runtime/interpreter/interpreter_cache.h
@@ -45,10 +45,10 @@
 // Aligned to 16-bytes to make it easier to get the address of the cache
 // from assembly (it ensures that the offset is valid immediate value).
 class ALIGNED(16) InterpreterCache {
+ public:
   // Aligned since we load the whole entry in single assembly instruction.
   typedef std::pair<const void*, size_t> Entry ALIGNED(2 * sizeof(size_t));
 
- public:
   // 2x size increase/decrease corresponds to ~0.5% interpreter performance change.
   // Value of 256 has around 75% cache hit rate.
   static constexpr size_t kSize = 256;
@@ -77,6 +77,10 @@
     data_[IndexOf(key)] = Entry{key, value};
   }
 
+  std::array<Entry, kSize>& GetArray() {
+    return data_;
+  }
+
  private:
   bool IsCalledFromOwningThread();
 
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 30c4b90..4d964f1 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -24,6 +24,7 @@
 #include "debugger.h"
 #include "dex/dex_file_types.h"
 #include "entrypoints/runtime_asm_entrypoints.h"
+#include "handle.h"
 #include "intrinsics_enum.h"
 #include "jit/jit.h"
 #include "jvalue-inl.h"
@@ -81,7 +82,7 @@
   if (method->GetDeclaringClass()->IsStringClass() && method->IsConstructor()) {
     return false;
   }
-  if (method->IsStatic() && !method->GetDeclaringClass()->IsInitialized()) {
+  if (method->IsStatic() && !method->GetDeclaringClass()->IsVisiblyInitialized()) {
     return false;
   }
   ProfilingInfo* profiling_info = method->GetProfilingInfo(kRuntimePointerSize);
@@ -91,384 +92,56 @@
   return true;
 }
 
-template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check,
-         bool transaction_active>
-bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
-                uint16_t inst_data) {
-  const bool is_static = (find_type == StaticObjectRead) || (find_type == StaticPrimitiveRead);
-  const uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
-  ArtField* f =
-      FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
-                                                    Primitive::ComponentSize(field_type));
-  if (UNLIKELY(f == nullptr)) {
-    CHECK(self->IsExceptionPending());
-    return false;
-  }
-  ObjPtr<mirror::Object> obj;
-  if (is_static) {
-    obj = f->GetDeclaringClass();
-    if (transaction_active) {
-      if (Runtime::Current()->GetTransaction()->ReadConstraint(obj.Ptr(), f)) {
-        Runtime::Current()->AbortTransactionAndThrowAbortError(self, "Can't read static fields of "
-            + obj->PrettyTypeOf() + " since it does not belong to clinit's class.");
-        return false;
-      }
+template <typename T>
+bool SendMethodExitEvents(Thread* self,
+                          const instrumentation::Instrumentation* instrumentation,
+                          ShadowFrame& frame,
+                          ObjPtr<mirror::Object> thiz,
+                          ArtMethod* method,
+                          uint32_t dex_pc,
+                          T& result) {
+  bool had_event = false;
+  // We can get additional ForcePopFrame requests during handling of these events. We should
+  // respect these and send additional instrumentation events.
+  StackHandleScope<1> hs(self);
+  Handle<mirror::Object> h_thiz(hs.NewHandle(thiz));
+  do {
+    frame.SetForcePopFrame(false);
+    if (UNLIKELY(instrumentation->HasMethodExitListeners() && !frame.GetSkipMethodExitEvents())) {
+      had_event = true;
+      instrumentation->MethodExitEvent(
+          self, h_thiz.Get(), method, dex_pc, instrumentation::OptionalFrame{ frame }, result);
     }
+    // We don't send method-exit if it's a pop-frame. We still send frame_popped though.
+    if (UNLIKELY(frame.NeedsNotifyPop() && instrumentation->HasWatchedFramePopListeners())) {
+      had_event = true;
+      instrumentation->WatchedFramePopped(self, frame);
+    }
+  } while (UNLIKELY(frame.GetForcePopFrame()));
+  if (UNLIKELY(had_event)) {
+    return !self->IsExceptionPending();
   } else {
-    obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
-    if (UNLIKELY(obj == nullptr)) {
-      ThrowNullPointerExceptionForFieldAccess(f, true);
-      return false;
-    }
+    return true;
   }
-
-  JValue result;
-  if (UNLIKELY(!DoFieldGetCommon<field_type>(self, shadow_frame, obj, f, &result))) {
-    // Instrumentation threw an error!
-    CHECK(self->IsExceptionPending());
-    return false;
-  }
-  uint32_t vregA = is_static ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
-  switch (field_type) {
-    case Primitive::kPrimBoolean:
-      shadow_frame.SetVReg(vregA, result.GetZ());
-      break;
-    case Primitive::kPrimByte:
-      shadow_frame.SetVReg(vregA, result.GetB());
-      break;
-    case Primitive::kPrimChar:
-      shadow_frame.SetVReg(vregA, result.GetC());
-      break;
-    case Primitive::kPrimShort:
-      shadow_frame.SetVReg(vregA, result.GetS());
-      break;
-    case Primitive::kPrimInt:
-      shadow_frame.SetVReg(vregA, result.GetI());
-      break;
-    case Primitive::kPrimLong:
-      shadow_frame.SetVRegLong(vregA, result.GetJ());
-      break;
-    case Primitive::kPrimNot:
-      shadow_frame.SetVRegReference(vregA, result.GetL());
-      break;
-    default:
-      LOG(FATAL) << "Unreachable: " << field_type;
-      UNREACHABLE();
-  }
-  return true;
 }
 
-// Explicitly instantiate all DoFieldGet functions.
-#define EXPLICIT_DO_FIELD_GET_TEMPLATE_DECL(_find_type, _field_type, _do_check, _transaction_active) \
-  template bool DoFieldGet<_find_type, _field_type, _do_check, _transaction_active>(Thread* self, \
-                                                               ShadowFrame& shadow_frame, \
-                                                               const Instruction* inst, \
-                                                               uint16_t inst_data)
+template
+bool SendMethodExitEvents(Thread* self,
+                          const instrumentation::Instrumentation* instrumentation,
+                          ShadowFrame& frame,
+                          ObjPtr<mirror::Object> thiz,
+                          ArtMethod* method,
+                          uint32_t dex_pc,
+                          MutableHandle<mirror::Object>& result);
 
-#define EXPLICIT_DO_FIELD_GET_ALL_TEMPLATE_DECL(_find_type, _field_type)  \
-    EXPLICIT_DO_FIELD_GET_TEMPLATE_DECL(_find_type, _field_type, false, true);  \
-    EXPLICIT_DO_FIELD_GET_TEMPLATE_DECL(_find_type, _field_type, false, false);  \
-    EXPLICIT_DO_FIELD_GET_TEMPLATE_DECL(_find_type, _field_type, true, true);  \
-    EXPLICIT_DO_FIELD_GET_TEMPLATE_DECL(_find_type, _field_type, true, false);
-
-// iget-XXX
-EXPLICIT_DO_FIELD_GET_ALL_TEMPLATE_DECL(InstancePrimitiveRead, Primitive::kPrimBoolean)
-EXPLICIT_DO_FIELD_GET_ALL_TEMPLATE_DECL(InstancePrimitiveRead, Primitive::kPrimByte)
-EXPLICIT_DO_FIELD_GET_ALL_TEMPLATE_DECL(InstancePrimitiveRead, Primitive::kPrimChar)
-EXPLICIT_DO_FIELD_GET_ALL_TEMPLATE_DECL(InstancePrimitiveRead, Primitive::kPrimShort)
-EXPLICIT_DO_FIELD_GET_ALL_TEMPLATE_DECL(InstancePrimitiveRead, Primitive::kPrimInt)
-EXPLICIT_DO_FIELD_GET_ALL_TEMPLATE_DECL(InstancePrimitiveRead, Primitive::kPrimLong)
-EXPLICIT_DO_FIELD_GET_ALL_TEMPLATE_DECL(InstanceObjectRead, Primitive::kPrimNot)
-
-// sget-XXX
-EXPLICIT_DO_FIELD_GET_ALL_TEMPLATE_DECL(StaticPrimitiveRead, Primitive::kPrimBoolean)
-EXPLICIT_DO_FIELD_GET_ALL_TEMPLATE_DECL(StaticPrimitiveRead, Primitive::kPrimByte)
-EXPLICIT_DO_FIELD_GET_ALL_TEMPLATE_DECL(StaticPrimitiveRead, Primitive::kPrimChar)
-EXPLICIT_DO_FIELD_GET_ALL_TEMPLATE_DECL(StaticPrimitiveRead, Primitive::kPrimShort)
-EXPLICIT_DO_FIELD_GET_ALL_TEMPLATE_DECL(StaticPrimitiveRead, Primitive::kPrimInt)
-EXPLICIT_DO_FIELD_GET_ALL_TEMPLATE_DECL(StaticPrimitiveRead, Primitive::kPrimLong)
-EXPLICIT_DO_FIELD_GET_ALL_TEMPLATE_DECL(StaticObjectRead, Primitive::kPrimNot)
-
-#undef EXPLICIT_DO_FIELD_GET_ALL_TEMPLATE_DECL
-#undef EXPLICIT_DO_FIELD_GET_TEMPLATE_DECL
-
-// Handles iget-quick, iget-wide-quick and iget-object-quick instructions.
-// Returns true on success, otherwise throws an exception and returns false.
-template<Primitive::Type field_type>
-bool DoIGetQuick(ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data) {
-  ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
-  if (UNLIKELY(obj == nullptr)) {
-    // We lost the reference to the field index so we cannot get a more
-    // precised exception message.
-    ThrowNullPointerExceptionFromDexPC();
-    return false;
-  }
-  MemberOffset field_offset(inst->VRegC_22c());
-  // Report this field access to instrumentation if needed. Since we only have the offset of
-  // the field from the base of the object, we need to look for it first.
-  instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
-  if (UNLIKELY(instrumentation->HasFieldReadListeners())) {
-    ArtField* f = ArtField::FindInstanceFieldWithOffset(obj->GetClass(),
-                                                        field_offset.Uint32Value());
-    DCHECK(f != nullptr);
-    DCHECK(!f->IsStatic());
-    Thread* self = Thread::Current();
-    StackHandleScope<1> hs(self);
-    // Save obj in case the instrumentation event has thread suspension.
-    HandleWrapperObjPtr<mirror::Object> h = hs.NewHandleWrapper(&obj);
-    instrumentation->FieldReadEvent(self,
-                                    obj.Ptr(),
-                                    shadow_frame.GetMethod(),
-                                    shadow_frame.GetDexPC(),
-                                    f);
-    if (UNLIKELY(self->IsExceptionPending())) {
-      return false;
-    }
-  }
-  // Note: iget-x-quick instructions are only for non-volatile fields.
-  const uint32_t vregA = inst->VRegA_22c(inst_data);
-  switch (field_type) {
-    case Primitive::kPrimInt:
-      shadow_frame.SetVReg(vregA, static_cast<int32_t>(obj->GetField32(field_offset)));
-      break;
-    case Primitive::kPrimBoolean:
-      shadow_frame.SetVReg(vregA, static_cast<int32_t>(obj->GetFieldBoolean(field_offset)));
-      break;
-    case Primitive::kPrimByte:
-      shadow_frame.SetVReg(vregA, static_cast<int32_t>(obj->GetFieldByte(field_offset)));
-      break;
-    case Primitive::kPrimChar:
-      shadow_frame.SetVReg(vregA, static_cast<int32_t>(obj->GetFieldChar(field_offset)));
-      break;
-    case Primitive::kPrimShort:
-      shadow_frame.SetVReg(vregA, static_cast<int32_t>(obj->GetFieldShort(field_offset)));
-      break;
-    case Primitive::kPrimLong:
-      shadow_frame.SetVRegLong(vregA, static_cast<int64_t>(obj->GetField64(field_offset)));
-      break;
-    case Primitive::kPrimNot:
-      shadow_frame.SetVRegReference(vregA, obj->GetFieldObject<mirror::Object>(field_offset));
-      break;
-    default:
-      LOG(FATAL) << "Unreachable: " << field_type;
-      UNREACHABLE();
-  }
-  return true;
-}
-
-// Explicitly instantiate all DoIGetQuick functions.
-#define EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(_field_type) \
-  template bool DoIGetQuick<_field_type>(ShadowFrame& shadow_frame, const Instruction* inst, \
-                                         uint16_t inst_data)
-
-EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(Primitive::kPrimInt);      // iget-quick.
-EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(Primitive::kPrimBoolean);  // iget-boolean-quick.
-EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(Primitive::kPrimByte);     // iget-byte-quick.
-EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(Primitive::kPrimChar);     // iget-char-quick.
-EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(Primitive::kPrimShort);    // iget-short-quick.
-EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(Primitive::kPrimLong);     // iget-wide-quick.
-EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(Primitive::kPrimNot);      // iget-object-quick.
-#undef EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL
-
-template<Primitive::Type field_type>
-static JValue GetFieldValue(const ShadowFrame& shadow_frame, uint32_t vreg)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  JValue field_value;
-  switch (field_type) {
-    case Primitive::kPrimBoolean:
-      field_value.SetZ(static_cast<uint8_t>(shadow_frame.GetVReg(vreg)));
-      break;
-    case Primitive::kPrimByte:
-      field_value.SetB(static_cast<int8_t>(shadow_frame.GetVReg(vreg)));
-      break;
-    case Primitive::kPrimChar:
-      field_value.SetC(static_cast<uint16_t>(shadow_frame.GetVReg(vreg)));
-      break;
-    case Primitive::kPrimShort:
-      field_value.SetS(static_cast<int16_t>(shadow_frame.GetVReg(vreg)));
-      break;
-    case Primitive::kPrimInt:
-      field_value.SetI(shadow_frame.GetVReg(vreg));
-      break;
-    case Primitive::kPrimLong:
-      field_value.SetJ(shadow_frame.GetVRegLong(vreg));
-      break;
-    case Primitive::kPrimNot:
-      field_value.SetL(shadow_frame.GetVRegReference(vreg));
-      break;
-    default:
-      LOG(FATAL) << "Unreachable: " << field_type;
-      UNREACHABLE();
-  }
-  return field_value;
-}
-
-template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check,
-         bool transaction_active>
-bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, const Instruction* inst,
-                uint16_t inst_data) {
-  const bool do_assignability_check = do_access_check;
-  bool is_static = (find_type == StaticObjectWrite) || (find_type == StaticPrimitiveWrite);
-  uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
-  ArtField* f =
-      FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
-                                                    Primitive::ComponentSize(field_type));
-  if (UNLIKELY(f == nullptr)) {
-    CHECK(self->IsExceptionPending());
-    return false;
-  }
-  ObjPtr<mirror::Object> obj;
-  if (is_static) {
-    obj = f->GetDeclaringClass();
-    if (transaction_active) {
-      if (Runtime::Current()->GetTransaction()->WriteConstraint(obj.Ptr(), f)) {
-        Runtime::Current()->AbortTransactionAndThrowAbortError(
-            self, "Can't set fields of " + obj->PrettyTypeOf());
-        return false;
-      }
-    }
-
-  } else {
-    obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
-    if (UNLIKELY(obj == nullptr)) {
-      ThrowNullPointerExceptionForFieldAccess(f, false);
-      return false;
-    }
-  }
-
-  uint32_t vregA = is_static ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
-  JValue value = GetFieldValue<field_type>(shadow_frame, vregA);
-  return DoFieldPutCommon<field_type, do_assignability_check, transaction_active>(self,
-                                                                                  shadow_frame,
-                                                                                  obj,
-                                                                                  f,
-                                                                                  value);
-}
-
-// Explicitly instantiate all DoFieldPut functions.
-#define EXPLICIT_DO_FIELD_PUT_TEMPLATE_DECL(_find_type, _field_type, _do_check, _transaction_active) \
-  template bool DoFieldPut<_find_type, _field_type, _do_check, _transaction_active>(Thread* self, \
-      const ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data)
-
-#define EXPLICIT_DO_FIELD_PUT_ALL_TEMPLATE_DECL(_find_type, _field_type)  \
-    EXPLICIT_DO_FIELD_PUT_TEMPLATE_DECL(_find_type, _field_type, false, false);  \
-    EXPLICIT_DO_FIELD_PUT_TEMPLATE_DECL(_find_type, _field_type, true, false);  \
-    EXPLICIT_DO_FIELD_PUT_TEMPLATE_DECL(_find_type, _field_type, false, true);  \
-    EXPLICIT_DO_FIELD_PUT_TEMPLATE_DECL(_find_type, _field_type, true, true);
-
-// iput-XXX
-EXPLICIT_DO_FIELD_PUT_ALL_TEMPLATE_DECL(InstancePrimitiveWrite, Primitive::kPrimBoolean)
-EXPLICIT_DO_FIELD_PUT_ALL_TEMPLATE_DECL(InstancePrimitiveWrite, Primitive::kPrimByte)
-EXPLICIT_DO_FIELD_PUT_ALL_TEMPLATE_DECL(InstancePrimitiveWrite, Primitive::kPrimChar)
-EXPLICIT_DO_FIELD_PUT_ALL_TEMPLATE_DECL(InstancePrimitiveWrite, Primitive::kPrimShort)
-EXPLICIT_DO_FIELD_PUT_ALL_TEMPLATE_DECL(InstancePrimitiveWrite, Primitive::kPrimInt)
-EXPLICIT_DO_FIELD_PUT_ALL_TEMPLATE_DECL(InstancePrimitiveWrite, Primitive::kPrimLong)
-EXPLICIT_DO_FIELD_PUT_ALL_TEMPLATE_DECL(InstanceObjectWrite, Primitive::kPrimNot)
-
-// sput-XXX
-EXPLICIT_DO_FIELD_PUT_ALL_TEMPLATE_DECL(StaticPrimitiveWrite, Primitive::kPrimBoolean)
-EXPLICIT_DO_FIELD_PUT_ALL_TEMPLATE_DECL(StaticPrimitiveWrite, Primitive::kPrimByte)
-EXPLICIT_DO_FIELD_PUT_ALL_TEMPLATE_DECL(StaticPrimitiveWrite, Primitive::kPrimChar)
-EXPLICIT_DO_FIELD_PUT_ALL_TEMPLATE_DECL(StaticPrimitiveWrite, Primitive::kPrimShort)
-EXPLICIT_DO_FIELD_PUT_ALL_TEMPLATE_DECL(StaticPrimitiveWrite, Primitive::kPrimInt)
-EXPLICIT_DO_FIELD_PUT_ALL_TEMPLATE_DECL(StaticPrimitiveWrite, Primitive::kPrimLong)
-EXPLICIT_DO_FIELD_PUT_ALL_TEMPLATE_DECL(StaticObjectWrite, Primitive::kPrimNot)
-
-#undef EXPLICIT_DO_FIELD_PUT_ALL_TEMPLATE_DECL
-#undef EXPLICIT_DO_FIELD_PUT_TEMPLATE_DECL
-
-template<Primitive::Type field_type, bool transaction_active>
-bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data) {
-  ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
-  if (UNLIKELY(obj == nullptr)) {
-    // We lost the reference to the field index so we cannot get a more
-    // precised exception message.
-    ThrowNullPointerExceptionFromDexPC();
-    return false;
-  }
-  MemberOffset field_offset(inst->VRegC_22c());
-  const uint32_t vregA = inst->VRegA_22c(inst_data);
-  // Report this field modification to instrumentation if needed. Since we only have the offset of
-  // the field from the base of the object, we need to look for it first.
-  instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
-  if (UNLIKELY(instrumentation->HasFieldWriteListeners())) {
-    ArtField* f = ArtField::FindInstanceFieldWithOffset(obj->GetClass(),
-                                                        field_offset.Uint32Value());
-    DCHECK(f != nullptr);
-    DCHECK(!f->IsStatic());
-    JValue field_value = GetFieldValue<field_type>(shadow_frame, vregA);
-    Thread* self = Thread::Current();
-    StackHandleScope<2> hs(self);
-    // Save obj in case the instrumentation event has thread suspension.
-    HandleWrapperObjPtr<mirror::Object> h = hs.NewHandleWrapper(&obj);
-    mirror::Object* fake_root = nullptr;
-    HandleWrapper<mirror::Object> ret(hs.NewHandleWrapper<mirror::Object>(
-        field_type == Primitive::kPrimNot ? field_value.GetGCRoot() : &fake_root));
-    instrumentation->FieldWriteEvent(self,
-                                     obj.Ptr(),
-                                     shadow_frame.GetMethod(),
-                                     shadow_frame.GetDexPC(),
-                                     f,
-                                     field_value);
-    if (UNLIKELY(self->IsExceptionPending())) {
-      return false;
-    }
-    if (UNLIKELY(shadow_frame.GetForcePopFrame())) {
-      // Don't actually set the field. The next instruction will force us to pop.
-      DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
-      DCHECK(PrevFrameWillRetry(self, shadow_frame));
-      return true;
-    }
-  }
-  // Note: iput-x-quick instructions are only for non-volatile fields.
-  switch (field_type) {
-    case Primitive::kPrimBoolean:
-      obj->SetFieldBoolean<transaction_active>(field_offset, shadow_frame.GetVReg(vregA));
-      break;
-    case Primitive::kPrimByte:
-      obj->SetFieldByte<transaction_active>(field_offset, shadow_frame.GetVReg(vregA));
-      break;
-    case Primitive::kPrimChar:
-      obj->SetFieldChar<transaction_active>(field_offset, shadow_frame.GetVReg(vregA));
-      break;
-    case Primitive::kPrimShort:
-      obj->SetFieldShort<transaction_active>(field_offset, shadow_frame.GetVReg(vregA));
-      break;
-    case Primitive::kPrimInt:
-      obj->SetField32<transaction_active>(field_offset, shadow_frame.GetVReg(vregA));
-      break;
-    case Primitive::kPrimLong:
-      obj->SetField64<transaction_active>(field_offset, shadow_frame.GetVRegLong(vregA));
-      break;
-    case Primitive::kPrimNot:
-      obj->SetFieldObject<transaction_active>(field_offset, shadow_frame.GetVRegReference(vregA));
-      break;
-    default:
-      LOG(FATAL) << "Unreachable: " << field_type;
-      UNREACHABLE();
-  }
-  return true;
-}
-
-// Explicitly instantiate all DoIPutQuick functions.
-#define EXPLICIT_DO_IPUT_QUICK_TEMPLATE_DECL(_field_type, _transaction_active) \
-  template bool DoIPutQuick<_field_type, _transaction_active>(const ShadowFrame& shadow_frame, \
-                                                              const Instruction* inst, \
-                                                              uint16_t inst_data)
-
-#define EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(_field_type)   \
-  EXPLICIT_DO_IPUT_QUICK_TEMPLATE_DECL(_field_type, false);     \
-  EXPLICIT_DO_IPUT_QUICK_TEMPLATE_DECL(_field_type, true);
-
-EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimInt)      // iput-quick.
-EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimBoolean)  // iput-boolean-quick.
-EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimByte)     // iput-byte-quick.
-EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimChar)     // iput-char-quick.
-EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimShort)    // iput-short-quick.
-EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimLong)     // iput-wide-quick.
-EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimNot)      // iput-object-quick.
-#undef EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL
-#undef EXPLICIT_DO_IPUT_QUICK_TEMPLATE_DECL
+template
+bool SendMethodExitEvents(Thread* self,
+                          const instrumentation::Instrumentation* instrumentation,
+                          ShadowFrame& frame,
+                          ObjPtr<mirror::Object> thiz,
+                          ArtMethod* method,
+                          uint32_t dex_pc,
+                          JValue& result);
 
 // We execute any instrumentation events that are triggered by this exception and change the
 // shadow_frame's dex_pc to that of the exception handler if there is one in the current method.
@@ -501,6 +174,12 @@
     if (instrumentation != nullptr) {
       if (shadow_frame.NeedsNotifyPop()) {
         instrumentation->WatchedFramePopped(self, shadow_frame);
+        if (shadow_frame.GetForcePopFrame()) {
+          // We will check in the caller for GetForcePopFrame again. We need to bail out early to
+          // prevent an ExceptionHandledEvent from also being sent before popping and to ensure we
+          // handle other types of non-standard-exits.
+          return true;
+        }
       }
       // Exception is not caught by the current method. We will unwind to the
       // caller. Notify any instrumentation listener.
@@ -509,7 +188,7 @@
                                          shadow_frame.GetMethod(),
                                          shadow_frame.GetDexPC());
     }
-    return false;
+    return shadow_frame.GetForcePopFrame();
   } else {
     shadow_frame.SetDexPC(found_dex_pc);
     if (instrumentation != nullptr && instrumentation->HasExceptionHandledListeners()) {
@@ -585,18 +264,18 @@
   // Ensure static methods are initialized.
   if (method->IsStatic()) {
     ObjPtr<mirror::Class> declaringClass = method->GetDeclaringClass();
-    if (UNLIKELY(!declaringClass->IsInitialized())) {
+    if (UNLIKELY(!declaringClass->IsVisiblyInitialized())) {
       self->PushShadowFrame(shadow_frame);
       StackHandleScope<1> hs(self);
       Handle<mirror::Class> h_class(hs.NewHandle(declaringClass));
-      if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true,
-                                                                            true))) {
+      if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(
+                        self, h_class, /*can_init_fields=*/ true, /*can_init_parents=*/ true))) {
         self->PopShadowFrame();
         DCHECK(self->IsExceptionPending());
         return;
       }
       self->PopShadowFrame();
-      CHECK(h_class->IsInitializing());
+      DCHECK(h_class->IsInitializing());
       // Reload from shadow frame in case the method moved, this is faster than adding a handle.
       method = shadow_frame->GetMethod();
     }
@@ -1799,7 +1478,7 @@
     }
     return false;
   }
-  ObjPtr<mirror::Object> new_array = mirror::Array::Alloc<true>(
+  ObjPtr<mirror::Object> new_array = mirror::Array::Alloc(
       self,
       array_class,
       length,
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 6366035..c6d8569 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -17,8 +17,11 @@
 #ifndef ART_RUNTIME_INTERPRETER_INTERPRETER_COMMON_H_
 #define ART_RUNTIME_INTERPRETER_INTERPRETER_COMMON_H_
 
+#include "android-base/macros.h"
+#include "instrumentation.h"
 #include "interpreter.h"
 #include "interpreter_intrinsics.h"
+#include "transaction.h"
 
 #include <math.h>
 
@@ -33,6 +36,7 @@
 #include "art_method-inl.h"
 #include "base/enums.h"
 #include "base/locks.h"
+#include "base/logging.h"
 #include "base/macros.h"
 #include "class_linker-inl.h"
 #include "class_root.h"
@@ -58,6 +62,7 @@
 #include "stack.h"
 #include "thread.h"
 #include "unstarted_runtime.h"
+#include "verifier/method_verifier.h"
 #include "well_known_classes.h"
 
 namespace art {
@@ -131,6 +136,96 @@
 NO_INLINE bool CheckStackOverflow(Thread* self, size_t frame_size)
     REQUIRES_SHARED(Locks::mutator_lock_);
 
+
+// Sends the normal method exit event.
+// Returns true if the events succeeded and false if there is a pending exception.
+template <typename T> bool SendMethodExitEvents(
+    Thread* self,
+    const instrumentation::Instrumentation* instrumentation,
+    ShadowFrame& frame,
+    ObjPtr<mirror::Object> thiz,
+    ArtMethod* method,
+    uint32_t dex_pc,
+    T& result) REQUIRES_SHARED(Locks::mutator_lock_);
+
+static inline ALWAYS_INLINE WARN_UNUSED bool
+NeedsMethodExitEvent(const instrumentation::Instrumentation* ins)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  return ins->HasMethodExitListeners() || ins->HasWatchedFramePopListeners();
+}
+
+// NO_INLINE so we won't bloat the interpreter with this very cold lock-release code.
+template <bool kMonitorCounting>
+static NO_INLINE void UnlockHeldMonitors(Thread* self, ShadowFrame* shadow_frame)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  DCHECK(shadow_frame->GetForcePopFrame());
+  // Unlock all monitors.
+  if (kMonitorCounting && shadow_frame->GetMethod()->MustCountLocks()) {
+    // Get the monitors from the shadow-frame monitor-count data.
+    shadow_frame->GetLockCountData().VisitMonitors(
+      [&](mirror::Object** obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+        // Since we don't use the 'obj' pointer after the DoMonitorExit everything should be fine
+        // WRT suspension.
+        DoMonitorExit<kMonitorCounting>(self, shadow_frame, *obj);
+      });
+  } else {
+    std::vector<verifier::MethodVerifier::DexLockInfo> locks;
+    verifier::MethodVerifier::FindLocksAtDexPc(shadow_frame->GetMethod(),
+                                                shadow_frame->GetDexPC(),
+                                                &locks,
+                                                Runtime::Current()->GetTargetSdkVersion());
+    for (const auto& reg : locks) {
+      if (UNLIKELY(reg.dex_registers.empty())) {
+        LOG(ERROR) << "Unable to determine reference locked by "
+                    << shadow_frame->GetMethod()->PrettyMethod() << " at pc "
+                    << shadow_frame->GetDexPC();
+      } else {
+        DoMonitorExit<kMonitorCounting>(
+            self, shadow_frame, shadow_frame->GetVRegReference(*reg.dex_registers.begin()));
+      }
+    }
+  }
+}
+
+enum class MonitorState {
+  kNoMonitorsLocked,
+  kCountingMonitors,
+  kNormalMonitors,
+};
+
+template<MonitorState kMonitorState>
+static inline ALWAYS_INLINE WARN_UNUSED bool PerformNonStandardReturn(
+      Thread* self,
+      ShadowFrame& frame,
+      JValue& result,
+      const instrumentation::Instrumentation* instrumentation,
+      uint16_t num_dex_inst,
+      uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) {
+  static constexpr bool kMonitorCounting = (kMonitorState == MonitorState::kCountingMonitors);
+  if (UNLIKELY(frame.GetForcePopFrame())) {
+    ObjPtr<mirror::Object> thiz(frame.GetThisObject(num_dex_inst));
+    StackHandleScope<1> hs(self);
+    Handle<mirror::Object> h_thiz(hs.NewHandle(thiz));
+    DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
+    if (UNLIKELY(self->IsExceptionPending())) {
+      LOG(WARNING) << "Suppressing exception for non-standard method exit: "
+                   << self->GetException()->Dump();
+      self->ClearException();
+    }
+    if (kMonitorState != MonitorState::kNoMonitorsLocked) {
+      UnlockHeldMonitors<kMonitorCounting>(self, &frame);
+    }
+    DoMonitorCheckOnExit<kMonitorCounting>(self, &frame);
+    result = JValue();
+    if (UNLIKELY(NeedsMethodExitEvent(instrumentation))) {
+      SendMethodExitEvents(
+          self, instrumentation, frame, h_thiz.Get(), frame.GetMethod(), dex_pc, result);
+    }
+    return true;
+  }
+  return false;
+}
+
 // Handles all invoke-XXX/range instructions except for invoke-polymorphic[/range].
 // Returns true on success, otherwise throws an exception and returns false.
 template<InvokeType type, bool is_range, bool do_access_check, bool is_mterp, bool is_quick = false>
@@ -150,13 +245,14 @@
   const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
   ArtMethod* sf_method = shadow_frame.GetMethod();
 
-  // Try to find the method in small thread-local cache first.
+  // Try to find the method in small thread-local cache first (only used when
+  // nterp is not used as mterp and nterp use the cache in an incompatible way).
   InterpreterCache* tls_cache = self->GetInterpreterCache();
   size_t tls_value;
   ArtMethod* resolved_method;
   if (is_quick) {
     resolved_method = nullptr;  // We don't know/care what the original method was.
-  } else if (LIKELY(tls_cache->Get(inst, &tls_value))) {
+  } else if (!IsNterpSupported() && LIKELY(tls_cache->Get(inst, &tls_value))) {
     resolved_method = reinterpret_cast<ArtMethod*>(tls_value);
   } else {
     ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
@@ -169,7 +265,9 @@
       result->SetJ(0);
       return false;
     }
-    tls_cache->Set(inst, reinterpret_cast<size_t>(resolved_method));
+    if (!IsNterpSupported()) {
+      tls_cache->Set(inst, reinterpret_cast<size_t>(resolved_method));
+    }
   }
 
   // Null pointer check and virtual method resolution.
@@ -238,7 +336,7 @@
     DCHECK(!called_method->IsIntrinsic());
     DCHECK(!(called_method->GetDeclaringClass()->IsStringClass() &&
         called_method->IsConstructor()));
-    DCHECK(type != kStatic || called_method->GetDeclaringClass()->IsInitialized());
+    DCHECK(type != kStatic || called_method->GetDeclaringClass()->IsVisiblyInitialized());
 
     const uint16_t number_of_inputs =
         (is_range) ? inst->VRegA_3rc(inst_data) : inst->VRegA_35c(inst_data);
@@ -278,6 +376,8 @@
     self->PushShadowFrame(new_shadow_frame);
     self->EndAssertNoThreadSuspension(old_cause);
 
+    VLOG(interpreter) << "Interpreting " << called_method->PrettyMethod();
+
     DCheckStaticState(self, called_method);
     while (true) {
       // Mterp does not support all instrumentation/debugging.
@@ -368,32 +468,322 @@
   }
 }
 
+template<Primitive::Type field_type>
+ALWAYS_INLINE static JValue GetFieldValue(const ShadowFrame& shadow_frame, uint32_t vreg)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  JValue field_value;
+  switch (field_type) {
+    case Primitive::kPrimBoolean:
+      field_value.SetZ(static_cast<uint8_t>(shadow_frame.GetVReg(vreg)));
+      break;
+    case Primitive::kPrimByte:
+      field_value.SetB(static_cast<int8_t>(shadow_frame.GetVReg(vreg)));
+      break;
+    case Primitive::kPrimChar:
+      field_value.SetC(static_cast<uint16_t>(shadow_frame.GetVReg(vreg)));
+      break;
+    case Primitive::kPrimShort:
+      field_value.SetS(static_cast<int16_t>(shadow_frame.GetVReg(vreg)));
+      break;
+    case Primitive::kPrimInt:
+      field_value.SetI(shadow_frame.GetVReg(vreg));
+      break;
+    case Primitive::kPrimLong:
+      field_value.SetJ(shadow_frame.GetVRegLong(vreg));
+      break;
+    case Primitive::kPrimNot:
+      field_value.SetL(shadow_frame.GetVRegReference(vreg));
+      break;
+    default:
+      LOG(FATAL) << "Unreachable: " << field_type;
+      UNREACHABLE();
+  }
+  return field_value;
+}
+
 // Handles iget-XXX and sget-XXX instructions.
 // Returns true on success, otherwise throws an exception and returns false.
 template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check,
          bool transaction_active = false>
-bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
-                uint16_t inst_data) REQUIRES_SHARED(Locks::mutator_lock_);
+ALWAYS_INLINE bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
+                              uint16_t inst_data) REQUIRES_SHARED(Locks::mutator_lock_) {
+  const bool is_static = (find_type == StaticObjectRead) || (find_type == StaticPrimitiveRead);
+  const uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
+  ArtField* f =
+      FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
+                                                    Primitive::ComponentSize(field_type));
+  if (UNLIKELY(f == nullptr)) {
+    CHECK(self->IsExceptionPending());
+    return false;
+  }
+  ObjPtr<mirror::Object> obj;
+  if (is_static) {
+    obj = f->GetDeclaringClass();
+    if (transaction_active) {
+      if (Runtime::Current()->GetTransaction()->ReadConstraint(self, obj)) {
+        Runtime::Current()->AbortTransactionAndThrowAbortError(self, "Can't read static fields of "
+            + obj->PrettyTypeOf() + " since it does not belong to clinit's class.");
+        return false;
+      }
+    }
+  } else {
+    obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
+    if (UNLIKELY(obj == nullptr)) {
+      ThrowNullPointerExceptionForFieldAccess(f, true);
+      return false;
+    }
+  }
+
+  JValue result;
+  if (UNLIKELY(!DoFieldGetCommon<field_type>(self, shadow_frame, obj, f, &result))) {
+    // Instrumentation threw an error!
+    CHECK(self->IsExceptionPending());
+    return false;
+  }
+  uint32_t vregA = is_static ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
+  switch (field_type) {
+    case Primitive::kPrimBoolean:
+      shadow_frame.SetVReg(vregA, result.GetZ());
+      break;
+    case Primitive::kPrimByte:
+      shadow_frame.SetVReg(vregA, result.GetB());
+      break;
+    case Primitive::kPrimChar:
+      shadow_frame.SetVReg(vregA, result.GetC());
+      break;
+    case Primitive::kPrimShort:
+      shadow_frame.SetVReg(vregA, result.GetS());
+      break;
+    case Primitive::kPrimInt:
+      shadow_frame.SetVReg(vregA, result.GetI());
+      break;
+    case Primitive::kPrimLong:
+      shadow_frame.SetVRegLong(vregA, result.GetJ());
+      break;
+    case Primitive::kPrimNot:
+      shadow_frame.SetVRegReference(vregA, result.GetL());
+      break;
+    default:
+      LOG(FATAL) << "Unreachable: " << field_type;
+      UNREACHABLE();
+  }
+  return true;
+}
 
 // Handles iget-quick, iget-wide-quick and iget-object-quick instructions.
 // Returns true on success, otherwise throws an exception and returns false.
 template<Primitive::Type field_type>
-bool DoIGetQuick(ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data)
-    REQUIRES_SHARED(Locks::mutator_lock_);
+ALWAYS_INLINE bool DoIGetQuick(ShadowFrame& shadow_frame, const Instruction* inst,
+                               uint16_t inst_data) REQUIRES_SHARED(Locks::mutator_lock_) {
+  ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
+  if (UNLIKELY(obj == nullptr)) {
+    // We lost the reference to the field index so we cannot get a more
+    // precised exception message.
+    ThrowNullPointerExceptionFromDexPC();
+    return false;
+  }
+  MemberOffset field_offset(inst->VRegC_22c());
+  // Report this field access to instrumentation if needed. Since we only have the offset of
+  // the field from the base of the object, we need to look for it first.
+  instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+  if (UNLIKELY(instrumentation->HasFieldReadListeners())) {
+    ArtField* f = ArtField::FindInstanceFieldWithOffset(obj->GetClass(),
+                                                        field_offset.Uint32Value());
+    DCHECK(f != nullptr);
+    DCHECK(!f->IsStatic());
+    Thread* self = Thread::Current();
+    StackHandleScope<1> hs(self);
+    // Save obj in case the instrumentation event has thread suspension.
+    HandleWrapperObjPtr<mirror::Object> h = hs.NewHandleWrapper(&obj);
+    instrumentation->FieldReadEvent(self,
+                                    obj,
+                                    shadow_frame.GetMethod(),
+                                    shadow_frame.GetDexPC(),
+                                    f);
+    if (UNLIKELY(self->IsExceptionPending())) {
+      return false;
+    }
+  }
+  // Note: iget-x-quick instructions are only for non-volatile fields.
+  const uint32_t vregA = inst->VRegA_22c(inst_data);
+  switch (field_type) {
+    case Primitive::kPrimInt:
+      shadow_frame.SetVReg(vregA, static_cast<int32_t>(obj->GetField32(field_offset)));
+      break;
+    case Primitive::kPrimBoolean:
+      shadow_frame.SetVReg(vregA, static_cast<int32_t>(obj->GetFieldBoolean(field_offset)));
+      break;
+    case Primitive::kPrimByte:
+      shadow_frame.SetVReg(vregA, static_cast<int32_t>(obj->GetFieldByte(field_offset)));
+      break;
+    case Primitive::kPrimChar:
+      shadow_frame.SetVReg(vregA, static_cast<int32_t>(obj->GetFieldChar(field_offset)));
+      break;
+    case Primitive::kPrimShort:
+      shadow_frame.SetVReg(vregA, static_cast<int32_t>(obj->GetFieldShort(field_offset)));
+      break;
+    case Primitive::kPrimLong:
+      shadow_frame.SetVRegLong(vregA, static_cast<int64_t>(obj->GetField64(field_offset)));
+      break;
+    case Primitive::kPrimNot:
+      shadow_frame.SetVRegReference(vregA, obj->GetFieldObject<mirror::Object>(field_offset));
+      break;
+    default:
+      LOG(FATAL) << "Unreachable: " << field_type;
+      UNREACHABLE();
+  }
+  return true;
+}
+
+static inline bool CheckWriteConstraint(Thread* self, ObjPtr<mirror::Object> obj)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  Runtime* runtime = Runtime::Current();
+  if (runtime->GetTransaction()->WriteConstraint(self, obj)) {
+    DCHECK(runtime->GetHeap()->ObjectIsInBootImageSpace(obj) || obj->IsClass());
+    const char* base_msg = runtime->GetHeap()->ObjectIsInBootImageSpace(obj)
+        ? "Can't set fields of boot image "
+        : "Can't set fields of ";
+    runtime->AbortTransactionAndThrowAbortError(self, base_msg + obj->PrettyTypeOf());
+    return false;
+  }
+  return true;
+}
+
+static inline bool CheckWriteValueConstraint(Thread* self, ObjPtr<mirror::Object> value)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  Runtime* runtime = Runtime::Current();
+  if (runtime->GetTransaction()->WriteValueConstraint(self, value)) {
+    DCHECK(value != nullptr);
+    std::string msg = value->IsClass()
+        ? "Can't store reference to class " + value->AsClass()->PrettyDescriptor()
+        : "Can't store reference to instance of " + value->GetClass()->PrettyDescriptor();
+    runtime->AbortTransactionAndThrowAbortError(self, msg);
+    return false;
+  }
+  return true;
+}
 
 // Handles iput-XXX and sput-XXX instructions.
 // Returns true on success, otherwise throws an exception and returns false.
 template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check,
          bool transaction_active>
-bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, const Instruction* inst,
-                uint16_t inst_data) REQUIRES_SHARED(Locks::mutator_lock_);
+ALWAYS_INLINE bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame,
+                              const Instruction* inst, uint16_t inst_data)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  const bool do_assignability_check = do_access_check;
+  bool is_static = (find_type == StaticObjectWrite) || (find_type == StaticPrimitiveWrite);
+  uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
+  ArtField* f =
+      FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
+                                                    Primitive::ComponentSize(field_type));
+  if (UNLIKELY(f == nullptr)) {
+    CHECK(self->IsExceptionPending());
+    return false;
+  }
+  ObjPtr<mirror::Object> obj;
+  if (is_static) {
+    obj = f->GetDeclaringClass();
+  } else {
+    obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
+    if (UNLIKELY(obj == nullptr)) {
+      ThrowNullPointerExceptionForFieldAccess(f, false);
+      return false;
+    }
+  }
+  if (transaction_active && !CheckWriteConstraint(self, obj)) {
+    return false;
+  }
+
+  uint32_t vregA = is_static ? inst->VRegA_21c(inst_data) : inst->VRegA_22c(inst_data);
+  JValue value = GetFieldValue<field_type>(shadow_frame, vregA);
+
+  if (transaction_active &&
+      field_type == Primitive::kPrimNot &&
+      !CheckWriteValueConstraint(self, value.GetL())) {
+    return false;
+  }
+
+  return DoFieldPutCommon<field_type, do_assignability_check, transaction_active>(self,
+                                                                                  shadow_frame,
+                                                                                  obj,
+                                                                                  f,
+                                                                                  value);
+}
 
 // Handles iput-quick, iput-wide-quick and iput-object-quick instructions.
 // Returns true on success, otherwise throws an exception and returns false.
 template<Primitive::Type field_type, bool transaction_active>
-bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data)
-    REQUIRES_SHARED(Locks::mutator_lock_);
-
+ALWAYS_INLINE bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instruction* inst,
+                               uint16_t inst_data) REQUIRES_SHARED(Locks::mutator_lock_) {
+  ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
+  if (UNLIKELY(obj == nullptr)) {
+    // We lost the reference to the field index so we cannot get a more
+    // precised exception message.
+    ThrowNullPointerExceptionFromDexPC();
+    return false;
+  }
+  MemberOffset field_offset(inst->VRegC_22c());
+  const uint32_t vregA = inst->VRegA_22c(inst_data);
+  // Report this field modification to instrumentation if needed. Since we only have the offset of
+  // the field from the base of the object, we need to look for it first.
+  instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
+  if (UNLIKELY(instrumentation->HasFieldWriteListeners())) {
+    ArtField* f = ArtField::FindInstanceFieldWithOffset(obj->GetClass(),
+                                                        field_offset.Uint32Value());
+    DCHECK(f != nullptr);
+    DCHECK(!f->IsStatic());
+    JValue field_value = GetFieldValue<field_type>(shadow_frame, vregA);
+    Thread* self = Thread::Current();
+    StackHandleScope<2> hs(self);
+    // Save obj in case the instrumentation event has thread suspension.
+    HandleWrapperObjPtr<mirror::Object> h = hs.NewHandleWrapper(&obj);
+    mirror::Object* fake_root = nullptr;
+    HandleWrapper<mirror::Object> ret(hs.NewHandleWrapper<mirror::Object>(
+        field_type == Primitive::kPrimNot ? field_value.GetGCRoot() : &fake_root));
+    instrumentation->FieldWriteEvent(self,
+                                     obj,
+                                     shadow_frame.GetMethod(),
+                                     shadow_frame.GetDexPC(),
+                                     f,
+                                     field_value);
+    if (UNLIKELY(self->IsExceptionPending())) {
+      return false;
+    }
+    if (UNLIKELY(shadow_frame.GetForcePopFrame())) {
+      // Don't actually set the field. The next instruction will force us to pop.
+      DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
+      return true;
+    }
+  }
+  // Note: iput-x-quick instructions are only for non-volatile fields.
+  switch (field_type) {
+    case Primitive::kPrimBoolean:
+      obj->SetFieldBoolean<transaction_active>(field_offset, shadow_frame.GetVReg(vregA));
+      break;
+    case Primitive::kPrimByte:
+      obj->SetFieldByte<transaction_active>(field_offset, shadow_frame.GetVReg(vregA));
+      break;
+    case Primitive::kPrimChar:
+      obj->SetFieldChar<transaction_active>(field_offset, shadow_frame.GetVReg(vregA));
+      break;
+    case Primitive::kPrimShort:
+      obj->SetFieldShort<transaction_active>(field_offset, shadow_frame.GetVReg(vregA));
+      break;
+    case Primitive::kPrimInt:
+      obj->SetField32<transaction_active>(field_offset, shadow_frame.GetVReg(vregA));
+      break;
+    case Primitive::kPrimLong:
+      obj->SetField64<transaction_active>(field_offset, shadow_frame.GetVRegLong(vregA));
+      break;
+    case Primitive::kPrimNot:
+      obj->SetFieldObject<transaction_active>(field_offset, shadow_frame.GetVRegReference(vregA));
+      break;
+    default:
+      LOG(FATAL) << "Unreachable: " << field_type;
+      UNREACHABLE();
+  }
+  return true;
+}
 
 // Handles string resolution for const-string and const-string-jumbo instructions. Also ensures the
 // java.lang.String class is initialized.
@@ -402,14 +792,15 @@
                                                    dex::StringIndex string_idx)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   ObjPtr<mirror::Class> java_lang_string_class = GetClassRoot<mirror::String>();
-  if (UNLIKELY(!java_lang_string_class->IsInitialized())) {
-    ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  if (UNLIKELY(!java_lang_string_class->IsVisiblyInitialized())) {
     StackHandleScope<1> hs(self);
     Handle<mirror::Class> h_class(hs.NewHandle(java_lang_string_class));
-    if (UNLIKELY(!class_linker->EnsureInitialized(self, h_class, true, true))) {
+    if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(
+                      self, h_class, /*can_init_fields=*/ true, /*can_init_parents=*/ true))) {
       DCHECK(self->IsExceptionPending());
       return nullptr;
     }
+    DCHECK(h_class->IsInitializing());
   }
   ArtMethod* method = shadow_frame.GetMethod();
   ObjPtr<mirror::String> string_ptr =
diff --git a/runtime/interpreter/interpreter_intrinsics.cc b/runtime/interpreter/interpreter_intrinsics.cc
index c8878e1..63bd967 100644
--- a/runtime/interpreter/interpreter_intrinsics.cc
+++ b/runtime/interpreter/interpreter_intrinsics.cc
@@ -526,7 +526,19 @@
     UNIMPLEMENTED_CASE(StringBufferAppend /* (Ljava/lang/String;)Ljava/lang/StringBuffer; */)
     UNIMPLEMENTED_CASE(StringBufferLength /* ()I */)
     UNIMPLEMENTED_CASE(StringBufferToString /* ()Ljava/lang/String; */)
-    UNIMPLEMENTED_CASE(StringBuilderAppend /* (Ljava/lang/String;)Ljava/lang/StringBuilder; */)
+    UNIMPLEMENTED_CASE(
+        StringBuilderAppendObject /* (Ljava/lang/Object;)Ljava/lang/StringBuilder; */)
+    UNIMPLEMENTED_CASE(
+        StringBuilderAppendString /* (Ljava/lang/String;)Ljava/lang/StringBuilder; */)
+    UNIMPLEMENTED_CASE(
+        StringBuilderAppendCharSequence /* (Ljava/lang/CharSequence;)Ljava/lang/StringBuilder; */)
+    UNIMPLEMENTED_CASE(StringBuilderAppendCharArray /* ([C)Ljava/lang/StringBuilder; */)
+    UNIMPLEMENTED_CASE(StringBuilderAppendBoolean /* (Z)Ljava/lang/StringBuilder; */)
+    UNIMPLEMENTED_CASE(StringBuilderAppendChar /* (C)Ljava/lang/StringBuilder; */)
+    UNIMPLEMENTED_CASE(StringBuilderAppendInt /* (I)Ljava/lang/StringBuilder; */)
+    UNIMPLEMENTED_CASE(StringBuilderAppendLong /* (J)Ljava/lang/StringBuilder; */)
+    UNIMPLEMENTED_CASE(StringBuilderAppendFloat /* (F)Ljava/lang/StringBuilder; */)
+    UNIMPLEMENTED_CASE(StringBuilderAppendDouble /* (D)Ljava/lang/StringBuilder; */)
     UNIMPLEMENTED_CASE(StringBuilderLength /* ()I */)
     UNIMPLEMENTED_CASE(StringBuilderToString /* ()Ljava/lang/String; */)
     UNIMPLEMENTED_CASE(UnsafeCASInt /* (Ljava/lang/Object;JII)Z */)
@@ -561,6 +573,15 @@
     UNIMPLEMENTED_CASE(CRC32Update /* (II)I */)
     UNIMPLEMENTED_CASE(CRC32UpdateBytes /* (I[BII)I */)
     UNIMPLEMENTED_CASE(CRC32UpdateByteBuffer /* (IJII)I */)
+    UNIMPLEMENTED_CASE(FP16ToFloat /* (S)F */)
+    UNIMPLEMENTED_CASE(FP16ToHalf /* (F)S */)
+    UNIMPLEMENTED_CASE(FP16Floor /* (S)S */)
+    UNIMPLEMENTED_CASE(FP16Ceil /* (S)S */)
+    UNIMPLEMENTED_CASE(FP16Rint /* (S)S */)
+    UNIMPLEMENTED_CASE(FP16Greater /* (SS)Z */)
+    UNIMPLEMENTED_CASE(FP16GreaterEquals /* (SS)Z */)
+    UNIMPLEMENTED_CASE(FP16Less /* (SS)Z */)
+    UNIMPLEMENTED_CASE(FP16LessEquals /* (SS)Z */)
     INTRINSIC_CASE(VarHandleFullFence)
     INTRINSIC_CASE(VarHandleAcquireFence)
     INTRINSIC_CASE(VarHandleReleaseFence)
diff --git a/runtime/interpreter/interpreter_mterp_impl.h b/runtime/interpreter/interpreter_mterp_impl.h
index 177b0fd..892790b 100644
--- a/runtime/interpreter/interpreter_mterp_impl.h
+++ b/runtime/interpreter/interpreter_mterp_impl.h
@@ -36,6 +36,9 @@
                                  ShadowFrame* shadow_frame,
                                  JValue* result_register) REQUIRES_SHARED(Locks::mutator_lock_);
 
+// The entrypoint for nterp, which ArtMethods can directly point to.
+extern "C" void ExecuteNterpImpl() REQUIRES_SHARED(Locks::mutator_lock_);
+
 }  // namespace interpreter
 }  // namespace art
 
diff --git a/runtime/interpreter/interpreter_switch_impl-inl.h b/runtime/interpreter/interpreter_switch_impl-inl.h
index 36cfee4..0f15adf 100644
--- a/runtime/interpreter/interpreter_switch_impl-inl.h
+++ b/runtime/interpreter/interpreter_switch_impl-inl.h
@@ -33,6 +33,7 @@
 #include "jvalue-inl.h"
 #include "mirror/string-alloc-inl.h"
 #include "mirror/throwable.h"
+#include "monitor.h"
 #include "nth_caller_visitor.h"
 #include "safe_math.h"
 #include "shadow_frame-inl.h"
@@ -43,88 +44,41 @@
 namespace interpreter {
 
 // Short-lived helper class which executes single DEX bytecode.  It is inlined by compiler.
+// Any relevant execution information is stored in the fields - it should be kept to minimum.
+// All instance functions must be inlined so that the fields can be stored in registers.
 //
 // The function names must match the names from dex_instruction_list.h and have no arguments.
+// Return value: The handlers must return false if the instruction throws or returns (exits).
 //
-// Any relevant execution information is stored in the fields - it should be kept to minimum.
-//
-// Helper methods may return boolean value - in which case 'false' always means
-// "stop executing current opcode" (which does not necessarily exit the interpreter loop).
-//
-template<bool do_access_check, bool transaction_active>
+template<bool do_access_check, bool transaction_active, Instruction::Format kFormat>
 class InstructionHandler {
  public:
-  template <bool kMonitorCounting>
-  static NO_INLINE void UnlockHeldMonitors(Thread* self, ShadowFrame* shadow_frame)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    DCHECK(shadow_frame->GetForcePopFrame());
-    // Unlock all monitors.
-    if (kMonitorCounting && shadow_frame->GetMethod()->MustCountLocks()) {
-      // Get the monitors from the shadow-frame monitor-count data.
-      shadow_frame->GetLockCountData().VisitMonitors(
-        [&](mirror::Object** obj) REQUIRES_SHARED(Locks::mutator_lock_) {
-          // Since we don't use the 'obj' pointer after the DoMonitorExit everything should be fine
-          // WRT suspension.
-          DoMonitorExit<do_assignability_check>(self, shadow_frame, *obj);
-        });
-    } else {
-      std::vector<verifier::MethodVerifier::DexLockInfo> locks;
-      verifier::MethodVerifier::FindLocksAtDexPc(shadow_frame->GetMethod(),
-                                                  shadow_frame->GetDexPC(),
-                                                  &locks,
-                                                  Runtime::Current()->GetTargetSdkVersion());
-      for (const auto& reg : locks) {
-        if (UNLIKELY(reg.dex_registers.empty())) {
-          LOG(ERROR) << "Unable to determine reference locked by "
-                      << shadow_frame->GetMethod()->PrettyMethod() << " at pc "
-                      << shadow_frame->GetDexPC();
-        } else {
-          DoMonitorExit<do_assignability_check>(
-              self, shadow_frame, shadow_frame->GetVRegReference(*reg.dex_registers.begin()));
-        }
-      }
-    }
-  }
+#define HANDLER_ATTRIBUTES ALWAYS_INLINE FLATTEN WARN_UNUSED REQUIRES_SHARED(Locks::mutator_lock_)
 
-  ALWAYS_INLINE WARN_UNUSED bool CheckForceReturn()
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (UNLIKELY(shadow_frame.GetForcePopFrame())) {
-      DCHECK(PrevFrameWillRetry(self, shadow_frame))
-          << "Pop frame forced without previous frame ready to retry instruction!";
-      DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
-      UnlockHeldMonitors<do_assignability_check>(self, &shadow_frame);
-      DoMonitorCheckOnExit<do_assignability_check>(self, &shadow_frame);
-      if (UNLIKELY(NeedsMethodExitEvent(instrumentation))) {
-        SendMethodExitEvents(self,
-                             instrumentation,
-                             shadow_frame,
-                             shadow_frame.GetThisObject(Accessor().InsSize()),
-                             shadow_frame.GetMethod(),
-                             inst->GetDexPc(Insns()),
-                             JValue());
-      }
-      ctx->result = JValue(); /* Handled in caller. */
+  HANDLER_ATTRIBUTES bool CheckForceReturn() {
+    if (PerformNonStandardReturn<kMonitorState>(self,
+                                                shadow_frame,
+                                                ctx->result,
+                                                instrumentation,
+                                                Accessor().InsSize(),
+                                                inst->GetDexPc(Insns()))) {
       exit_interpreter_loop = true;
       return false;
     }
     return true;
   }
 
-  NO_INLINE WARN_UNUSED bool HandlePendingExceptionWithInstrumentationImpl(
-      const instrumentation::Instrumentation* instr)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
+  HANDLER_ATTRIBUTES bool HandlePendingException() {
     DCHECK(self->IsExceptionPending());
     self->AllowThreadSuspension();
     if (!CheckForceReturn()) {
       return false;
     }
-    if (!MoveToExceptionHandler(self, shadow_frame, instr)) {
+    bool skip_event = shadow_frame.GetSkipNextExceptionEvent();
+    shadow_frame.SetSkipNextExceptionEvent(false);
+    if (!MoveToExceptionHandler(self, shadow_frame, skip_event ? nullptr : instrumentation)) {
       /* Structured locking is to be enforced for abnormal termination, too. */
       DoMonitorCheckOnExit<do_assignability_check>(self, &shadow_frame);
-      if (ctx->interpret_one_instruction) {
-        /* Signal mterp to return to caller */
-        shadow_frame.SetDexPC(dex::kDexNoIndex);
-      }
       ctx->result = JValue(); /* Handled in caller. */
       exit_interpreter_loop = true;
       return false;  // Return to caller.
@@ -134,38 +88,11 @@
     }
     int32_t displacement =
         static_cast<int32_t>(shadow_frame.GetDexPC()) - static_cast<int32_t>(dex_pc);
-    inst = inst->RelativeAt(displacement);
-    return false;  // Stop executing this opcode and continue in the exception handler.
+    SetNextInstruction(inst->RelativeAt(displacement));
+    return true;
   }
 
-  // Forwards the call to the NO_INLINE HandlePendingExceptionWithInstrumentationImpl.
-  ALWAYS_INLINE WARN_UNUSED bool HandlePendingExceptionWithInstrumentation(
-      const instrumentation::Instrumentation* instr)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    // We need to help the compiler a bit to make the NO_INLINE call efficient.
-    //  * All handler fields should be in registers, so we do not want to take the object
-    //    address (for 'this' argument). Make a copy of the handler just for the slow path.
-    //  * The modifiable fields should also be in registers, so we don't want to store their
-    //    address even in the handler copy. Make a copy of them just for the call as well.
-    const Instruction* inst_copy = inst;
-    bool exit_loop_copy = exit_interpreter_loop;
-    InstructionHandler<do_access_check, transaction_active> handler_copy(
-        ctx, instrumentation, self, shadow_frame, dex_pc, inst_copy, inst_data, exit_loop_copy);
-    bool result = handler_copy.HandlePendingExceptionWithInstrumentationImpl(instr);
-    inst = inst_copy;
-    exit_interpreter_loop = exit_loop_copy;
-    return result;
-  }
-
-  ALWAYS_INLINE WARN_UNUSED bool HandlePendingException()
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    return HandlePendingExceptionWithInstrumentation(instrumentation);
-  }
-
-  ALWAYS_INLINE WARN_UNUSED bool PossiblyHandlePendingExceptionOnInvokeImpl(
-      bool is_exception_pending,
-      const Instruction* next_inst)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
+  HANDLER_ATTRIBUTES bool PossiblyHandlePendingExceptionOnInvoke(bool is_exception_pending) {
     if (UNLIKELY(shadow_frame.GetForceRetryInstruction())) {
       /* Don't need to do anything except clear the flag and exception. We leave the */
       /* instruction the same so it will be re-executed on the next go-around.       */
@@ -179,47 +106,24 @@
         }
         self->ClearException();
       }
+      SetNextInstruction(inst);
     } else if (UNLIKELY(is_exception_pending)) {
       /* Should have succeeded. */
       DCHECK(!shadow_frame.GetForceRetryInstruction());
-      if (!HandlePendingException()) {
-        return false;
-      }
-    } else {
-      inst = next_inst;
+      return false;  // Pending exception.
     }
     return true;
   }
 
-  ALWAYS_INLINE WARN_UNUSED bool PossiblyHandlePendingException(
-      bool is_exception_pending,
-      const Instruction* next_inst)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    /* Should only be on invoke instructions. */
-    DCHECK(!shadow_frame.GetForceRetryInstruction());
-    if (UNLIKELY(is_exception_pending)) {
-      if (!HandlePendingException()) {
-        return false;
-      }
-    } else {
-      inst = next_inst;
-    }
-    return true;
-  }
-
-  ALWAYS_INLINE WARN_UNUSED bool HandleMonitorChecks()
-      REQUIRES_SHARED(Locks::mutator_lock_) {
+  HANDLER_ATTRIBUTES bool HandleMonitorChecks() {
     if (!DoMonitorCheckOnExit<do_assignability_check>(self, &shadow_frame)) {
-      if (!HandlePendingException()) {
-        return false;
-      }
+      return false;  // Pending exception.
     }
     return true;
   }
 
   // Code to run before each dex instruction.
-  ALWAYS_INLINE WARN_UNUSED bool Preamble()
-      REQUIRES_SHARED(Locks::mutator_lock_) {
+  HANDLER_ATTRIBUTES bool Preamble() {
     /* We need to put this before & after the instrumentation to avoid having to put in a */
     /* post-script macro.                                                                 */
     if (!CheckForceReturn()) {
@@ -235,9 +139,10 @@
                                      dex_pc,
                                      instrumentation,
                                      save_ref))) {
-        if (!HandlePendingException()) {
-          return false;
-        }
+        DCHECK(self->IsExceptionPending());
+        // Do not raise exception event if it is caused by other instrumentation event.
+        shadow_frame.SetSkipNextExceptionEvent(true);
+        return false;  // Pending exception.
       }
       if (!CheckForceReturn()) {
         return false;
@@ -246,8 +151,7 @@
     return true;
   }
 
-  ALWAYS_INLINE WARN_UNUSED bool BranchInstrumentation(int32_t offset)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
+  HANDLER_ATTRIBUTES bool BranchInstrumentation(int32_t offset) {
     if (UNLIKELY(instrumentation->HasBranchListeners())) {
       instrumentation->Branch(self, shadow_frame.GetMethod(), dex_pc, offset);
     }
@@ -257,10 +161,6 @@
                                             dex_pc,
                                             offset,
                                             &result)) {
-      if (ctx->interpret_one_instruction) {
-        /* OSR has completed execution of the method.  Signal mterp to return to caller */
-        shadow_frame.SetDexPC(dex::kDexNoIndex);
-      }
       ctx->result = result;
       exit_interpreter_loop = true;
       return false;
@@ -276,12 +176,9 @@
     }
   }
 
-  ALWAYS_INLINE WARN_UNUSED bool HandleAsyncException()
-      REQUIRES_SHARED(Locks::mutator_lock_) {
+  HANDLER_ATTRIBUTES bool HandleAsyncException() {
     if (UNLIKELY(self->ObserveAsyncException())) {
-      if (!HandlePendingException()) {
-        return false;
-      }
+      return false;  // Pending exception.
     }
     return true;
   }
@@ -291,7 +188,7 @@
     if (IsBackwardBranch(offset)) {
       HotnessUpdate();
       /* Record new dex pc early to have consistent suspend point at loop header. */
-      shadow_frame.SetDexPC(inst->GetDexPc(Insns()));
+      shadow_frame.SetDexPC(next->GetDexPc(Insns()));
       self->AllowThreadSuspension();
     }
   }
@@ -324,7 +221,7 @@
       // We just let this exception replace the old one.
       // TODO It would be good to add the old exception to the
       // suppressed exceptions of the new one if possible.
-      return false;
+      return false;  // Pending exception.
     } else {
       if (UNLIKELY(!thr.IsNull())) {
         self->SetException(thr.Get());
@@ -333,279 +230,287 @@
     }
   }
 
-  static bool NeedsMethodExitEvent(const instrumentation::Instrumentation* ins)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    return ins->HasMethodExitListeners() || ins->HasWatchedFramePopListeners();
+  HANDLER_ATTRIBUTES bool HandleReturn(JValue result) {
+    self->AllowThreadSuspension();
+    if (!HandleMonitorChecks()) {
+      return false;
+    }
+    if (UNLIKELY(NeedsMethodExitEvent(instrumentation) &&
+                 !SendMethodExitEvents(self,
+                                       instrumentation,
+                                       shadow_frame,
+                                       shadow_frame.GetThisObject(Accessor().InsSize()),
+                                       shadow_frame.GetMethod(),
+                                       inst->GetDexPc(Insns()),
+                                       result))) {
+      DCHECK(self->IsExceptionPending());
+      // Do not raise exception event if it is caused by other instrumentation event.
+      shadow_frame.SetSkipNextExceptionEvent(true);
+      return false;  // Pending exception.
+    }
+    ctx->result = result;
+    exit_interpreter_loop = true;
+    return false;
   }
 
-  // Sends the normal method exit event.
-  // Returns true if the events succeeded and false if there is a pending exception.
-  NO_INLINE static bool SendMethodExitEvents(
-      Thread* self,
-      const instrumentation::Instrumentation* instrumentation,
-      const ShadowFrame& frame,
-      ObjPtr<mirror::Object> thiz,
-      ArtMethod* method,
-      uint32_t dex_pc,
-      const JValue& result)
-      REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool had_event = false;
-    // We don't send method-exit if it's a pop-frame. We still send frame_popped though.
-    if (UNLIKELY(instrumentation->HasMethodExitListeners() && !frame.GetForcePopFrame())) {
-      had_event = true;
-      instrumentation->MethodExitEvent(self, thiz.Ptr(), method, dex_pc, result);
+  HANDLER_ATTRIBUTES bool HandleGoto(int32_t offset) {
+    if (!HandleAsyncException()) {
+      return false;
     }
-    if (UNLIKELY(frame.NeedsNotifyPop() && instrumentation->HasWatchedFramePopListeners())) {
-      had_event = true;
-      instrumentation->WatchedFramePopped(self, frame);
+    if (!BranchInstrumentation(offset)) {
+      return false;
     }
-    if (UNLIKELY(had_event)) {
-      return !self->IsExceptionPending();
+    SetNextInstruction(inst->RelativeAt(offset));
+    HandleBackwardBranch(offset);
+    return true;
+  }
+
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wfloat-equal"
+
+  template<typename T>
+  HANDLER_ATTRIBUTES bool HandleCmpl(T val1, T val2) {
+    int32_t result;
+    if (val1 > val2) {
+      result = 1;
+    } else if (val1 == val2) {
+      result = 0;
     } else {
-      return true;
+      result = -1;
     }
+    SetVReg(A(), result);
+    return true;
   }
 
-#define BRANCH_INSTRUMENTATION(offset)                                                            \
-  if (!BranchInstrumentation(offset)) {                                                           \
-    return;                                                                                       \
+  // Returns the same result as the function above. It only differs for NaN values.
+  template<typename T>
+  HANDLER_ATTRIBUTES bool HandleCmpg(T val1, T val2) {
+    int32_t result;
+    if (val1 < val2) {
+      result = -1;
+    } else if (val1 == val2) {
+      result = 0;
+    } else {
+      result = 1;
+    }
+    SetVReg(A(), result);
+    return true;
   }
 
-#define HANDLE_PENDING_EXCEPTION()                                                                \
-  if (!HandlePendingException()) {                                                                \
-    return;                                                                                       \
+#pragma clang diagnostic pop
+
+  HANDLER_ATTRIBUTES bool HandleIf(bool cond, int32_t offset) {
+    if (cond) {
+      if (!BranchInstrumentation(offset)) {
+        return false;
+      }
+      SetNextInstruction(inst->RelativeAt(offset));
+      HandleBackwardBranch(offset);
+    } else {
+      if (!BranchInstrumentation(2)) {
+        return false;
+      }
+    }
+    return true;
   }
 
-#define POSSIBLY_HANDLE_PENDING_EXCEPTION(is_exception_pending, next_function)                    \
-  if (!PossiblyHandlePendingException(is_exception_pending, inst->next_function())) {             \
-    return;                                                                                       \
+  template<typename ArrayType, typename SetVRegFn>
+  HANDLER_ATTRIBUTES bool HandleAGet(SetVRegFn setVReg) {
+    ObjPtr<mirror::Object> a = GetVRegReference(B());
+    if (UNLIKELY(a == nullptr)) {
+      ThrowNullPointerExceptionFromInterpreter();
+      return false;  // Pending exception.
+    }
+    int32_t index = GetVReg(C());
+    ObjPtr<ArrayType> array = ObjPtr<ArrayType>::DownCast(a);
+    if (UNLIKELY(!array->CheckIsValidIndex(index))) {
+      return false;  // Pending exception.
+    } else {
+      (this->*setVReg)(A(), array->GetWithoutChecks(index));
+    }
+    return true;
   }
 
-#define POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE_POLYMORPHIC(is_exception_pending)             \
-  if (!PossiblyHandlePendingExceptionOnInvokeImpl(is_exception_pending, inst->Next_4xx())) {      \
-    return;                                                                                       \
+  template<typename ArrayType, typename T>
+  HANDLER_ATTRIBUTES bool HandleAPut(T value) {
+    ObjPtr<mirror::Object> a = GetVRegReference(B());
+    if (UNLIKELY(a == nullptr)) {
+      ThrowNullPointerExceptionFromInterpreter();
+      return false;  // Pending exception.
+    }
+    int32_t index = GetVReg(C());
+    ObjPtr<ArrayType> array = ObjPtr<ArrayType>::DownCast(a);
+    if (UNLIKELY(!array->CheckIsValidIndex(index))) {
+      return false;  // Pending exception.
+    } else {
+      if (transaction_active && !CheckWriteConstraint(self, array)) {
+        return false;
+      }
+      array->template SetWithoutChecks<transaction_active>(index, value);
+    }
+    return true;
   }
 
-#define POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(is_exception_pending)                         \
-  if (!PossiblyHandlePendingExceptionOnInvokeImpl(is_exception_pending, inst->Next_3xx())) {      \
-    return;                                                                                       \
+  template<FindFieldType find_type, Primitive::Type field_type>
+  HANDLER_ATTRIBUTES bool HandleGet() {
+    return DoFieldGet<find_type, field_type, do_access_check, transaction_active>(
+        self, shadow_frame, inst, inst_data);
   }
 
-  ALWAYS_INLINE void NOP() REQUIRES_SHARED(Locks::mutator_lock_) {
-    inst = inst->Next_1xx();
+  template<Primitive::Type field_type>
+  HANDLER_ATTRIBUTES bool HandleGetQuick() {
+    return DoIGetQuick<field_type>(shadow_frame, inst, inst_data);
   }
 
-  ALWAYS_INLINE void MOVE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
-                         shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  template<FindFieldType find_type, Primitive::Type field_type>
+  HANDLER_ATTRIBUTES bool HandlePut() {
+    return DoFieldPut<find_type, field_type, do_access_check, transaction_active>(
+        self, shadow_frame, inst, inst_data);
   }
 
-  ALWAYS_INLINE void MOVE_FROM16() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_22x(inst_data),
-                         shadow_frame.GetVReg(inst->VRegB_22x()));
-    inst = inst->Next_2xx();
+  template<Primitive::Type field_type>
+  HANDLER_ATTRIBUTES bool HandlePutQuick() {
+    return DoIPutQuick<field_type, transaction_active>(
+        shadow_frame, inst, inst_data);
   }
 
-  ALWAYS_INLINE void MOVE_16() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_32x(),
-                         shadow_frame.GetVReg(inst->VRegB_32x()));
-    inst = inst->Next_3xx();
+  template<InvokeType type, bool is_range, bool is_quick = false>
+  HANDLER_ATTRIBUTES bool HandleInvoke() {
+    bool success = DoInvoke<type, is_range, do_access_check, /*is_mterp=*/ false, is_quick>(
+        self, shadow_frame, inst, inst_data, ResultRegister());
+    return PossiblyHandlePendingExceptionOnInvoke(!success);
   }
 
-  ALWAYS_INLINE void MOVE_WIDE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data),
-                             shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool HandleUnused() {
+    UnexpectedOpcode(inst, shadow_frame);
+    return true;
   }
 
-  ALWAYS_INLINE void MOVE_WIDE_FROM16() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegLong(inst->VRegA_22x(inst_data),
-                             shadow_frame.GetVRegLong(inst->VRegB_22x()));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool NOP() {
+    return true;
   }
 
-  ALWAYS_INLINE void MOVE_WIDE_16() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegLong(inst->VRegA_32x(),
-                             shadow_frame.GetVRegLong(inst->VRegB_32x()));
-    inst = inst->Next_3xx();
+  HANDLER_ATTRIBUTES bool MOVE() {
+    SetVReg(A(), GetVReg(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void MOVE_OBJECT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegReference(inst->VRegA_12x(inst_data),
-                                  shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool MOVE_FROM16() {
+    SetVReg(A(), GetVReg(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void MOVE_OBJECT_FROM16() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegReference(inst->VRegA_22x(inst_data),
-                                  shadow_frame.GetVRegReference(inst->VRegB_22x()));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool MOVE_16() {
+    SetVReg(A(), GetVReg(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void MOVE_OBJECT_16() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegReference(inst->VRegA_32x(),
-                                  shadow_frame.GetVRegReference(inst->VRegB_32x()));
-    inst = inst->Next_3xx();
+  HANDLER_ATTRIBUTES bool MOVE_WIDE() {
+    SetVRegLong(A(), GetVRegLong(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void MOVE_RESULT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_11x(inst_data), ResultRegister()->GetI());
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool MOVE_WIDE_FROM16() {
+    SetVRegLong(A(), GetVRegLong(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void MOVE_RESULT_WIDE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegLong(inst->VRegA_11x(inst_data), ResultRegister()->GetJ());
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool MOVE_WIDE_16() {
+    SetVRegLong(A(), GetVRegLong(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void MOVE_RESULT_OBJECT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), ResultRegister()->GetL());
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool MOVE_OBJECT() {
+    SetVRegReference(A(), GetVRegReference(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void MOVE_EXCEPTION() REQUIRES_SHARED(Locks::mutator_lock_) {
+  HANDLER_ATTRIBUTES bool MOVE_OBJECT_FROM16() {
+    SetVRegReference(A(), GetVRegReference(B()));
+    return true;
+  }
+
+  HANDLER_ATTRIBUTES bool MOVE_OBJECT_16() {
+    SetVRegReference(A(), GetVRegReference(B()));
+    return true;
+  }
+
+  HANDLER_ATTRIBUTES bool MOVE_RESULT() {
+    SetVReg(A(), ResultRegister()->GetI());
+    return true;
+  }
+
+  HANDLER_ATTRIBUTES bool MOVE_RESULT_WIDE() {
+    SetVRegLong(A(), ResultRegister()->GetJ());
+    return true;
+  }
+
+  HANDLER_ATTRIBUTES bool MOVE_RESULT_OBJECT() {
+    SetVRegReference(A(), ResultRegister()->GetL());
+    return true;
+  }
+
+  HANDLER_ATTRIBUTES bool MOVE_EXCEPTION() {
     ObjPtr<mirror::Throwable> exception = self->GetException();
     DCHECK(exception != nullptr) << "No pending exception on MOVE_EXCEPTION instruction";
-    shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), exception);
+    SetVRegReference(A(), exception);
     self->ClearException();
-    inst = inst->Next_1xx();
+    return true;
   }
 
-  ALWAYS_INLINE void RETURN_VOID_NO_BARRIER() REQUIRES_SHARED(Locks::mutator_lock_) {
+  HANDLER_ATTRIBUTES bool RETURN_VOID_NO_BARRIER() {
     JValue result;
-    self->AllowThreadSuspension();
-    if (!HandleMonitorChecks()) {
-      return;
-    }
-    if (UNLIKELY(NeedsMethodExitEvent(instrumentation) &&
-                 !SendMethodExitEvents(self,
-                                       instrumentation,
-                                       shadow_frame,
-                                       shadow_frame.GetThisObject(Accessor().InsSize()),
-                                       shadow_frame.GetMethod(),
-                                       inst->GetDexPc(Insns()),
-                                       result))) {
-      if (!HandlePendingExceptionWithInstrumentation(nullptr)) {
-        return;
-      }
-    }
-    if (ctx->interpret_one_instruction) {
-      /* Signal mterp to return to caller */
-      shadow_frame.SetDexPC(dex::kDexNoIndex);
-    }
-    ctx->result = result;
-    exit_interpreter_loop = true;
+    return HandleReturn(result);
   }
 
-  ALWAYS_INLINE void RETURN_VOID() REQUIRES_SHARED(Locks::mutator_lock_) {
+  HANDLER_ATTRIBUTES bool RETURN_VOID() {
     QuasiAtomic::ThreadFenceForConstructor();
     JValue result;
-    self->AllowThreadSuspension();
-    if (!HandleMonitorChecks()) {
-      return;
-    }
-    if (UNLIKELY(NeedsMethodExitEvent(instrumentation) &&
-                 !SendMethodExitEvents(self,
-                                       instrumentation,
-                                       shadow_frame,
-                                       shadow_frame.GetThisObject(Accessor().InsSize()),
-                                       shadow_frame.GetMethod(),
-                                       inst->GetDexPc(Insns()),
-                                       result))) {
-      if (!HandlePendingExceptionWithInstrumentation(nullptr)) {
-        return;
-      }
-    }
-    if (ctx->interpret_one_instruction) {
-      /* Signal mterp to return to caller */
-      shadow_frame.SetDexPC(dex::kDexNoIndex);
-    }
-    ctx->result = result;
-    exit_interpreter_loop = true;
+    return HandleReturn(result);
   }
 
-  ALWAYS_INLINE void RETURN() REQUIRES_SHARED(Locks::mutator_lock_) {
+  HANDLER_ATTRIBUTES bool RETURN() {
     JValue result;
     result.SetJ(0);
-    result.SetI(shadow_frame.GetVReg(inst->VRegA_11x(inst_data)));
-    self->AllowThreadSuspension();
-    if (!HandleMonitorChecks()) {
-      return;
-    }
-    if (UNLIKELY(NeedsMethodExitEvent(instrumentation) &&
-                 !SendMethodExitEvents(self,
-                                       instrumentation,
-                                       shadow_frame,
-                                       shadow_frame.GetThisObject(Accessor().InsSize()),
-                                       shadow_frame.GetMethod(),
-                                       inst->GetDexPc(Insns()),
-                                       result))) {
-      if (!HandlePendingExceptionWithInstrumentation(nullptr)) {
-        return;
-      }
-    }
-    if (ctx->interpret_one_instruction) {
-      /* Signal mterp to return to caller */
-      shadow_frame.SetDexPC(dex::kDexNoIndex);
-    }
-    ctx->result = result;
-    exit_interpreter_loop = true;
+    result.SetI(GetVReg(A()));
+    return HandleReturn(result);
   }
 
-  ALWAYS_INLINE void RETURN_WIDE() REQUIRES_SHARED(Locks::mutator_lock_) {
+  HANDLER_ATTRIBUTES bool RETURN_WIDE() {
     JValue result;
-    result.SetJ(shadow_frame.GetVRegLong(inst->VRegA_11x(inst_data)));
-    self->AllowThreadSuspension();
-    if (!HandleMonitorChecks()) {
-      return;
-    }
-    if (UNLIKELY(NeedsMethodExitEvent(instrumentation) &&
-                 !SendMethodExitEvents(self,
-                                       instrumentation,
-                                       shadow_frame,
-                                       shadow_frame.GetThisObject(Accessor().InsSize()),
-                                       shadow_frame.GetMethod(),
-                                       inst->GetDexPc(Insns()),
-                                       result))) {
-      if (!HandlePendingExceptionWithInstrumentation(nullptr)) {
-        return;
-      }
-    }
-    if (ctx->interpret_one_instruction) {
-      /* Signal mterp to return to caller */
-      shadow_frame.SetDexPC(dex::kDexNoIndex);
-    }
-    ctx->result = result;
-    exit_interpreter_loop = true;
+    result.SetJ(GetVRegLong(A()));
+    return HandleReturn(result);
   }
 
-  ALWAYS_INLINE void RETURN_OBJECT() REQUIRES_SHARED(Locks::mutator_lock_) {
+  HANDLER_ATTRIBUTES bool RETURN_OBJECT() {
     JValue result;
     self->AllowThreadSuspension();
     if (!HandleMonitorChecks()) {
-      return;
+      return false;
     }
-    const size_t ref_idx = inst->VRegA_11x(inst_data);
-    ObjPtr<mirror::Object> obj_result = shadow_frame.GetVRegReference(ref_idx);
+    const size_t ref_idx = A();
+    ObjPtr<mirror::Object> obj_result = GetVRegReference(ref_idx);
     if (do_assignability_check && obj_result != nullptr) {
       ObjPtr<mirror::Class> return_type = shadow_frame.GetMethod()->ResolveReturnType();
       // Re-load since it might have moved.
-      obj_result = shadow_frame.GetVRegReference(ref_idx);
+      obj_result = GetVRegReference(ref_idx);
       if (return_type == nullptr) {
         // Return the pending exception.
-        HANDLE_PENDING_EXCEPTION();
+        return false;  // Pending exception.
       }
       if (!obj_result->VerifierInstanceOf(return_type)) {
+        CHECK_LE(Runtime::Current()->GetTargetSdkVersion(), 29u);
         // This should never happen.
         std::string temp1, temp2;
         self->ThrowNewExceptionF("Ljava/lang/InternalError;",
                                  "Returning '%s' that is not instance of return type '%s'",
                                  obj_result->GetClass()->GetDescriptor(&temp1),
                                  return_type->GetDescriptor(&temp2));
-        HANDLE_PENDING_EXCEPTION();
+        return false;  // Pending exception.
       }
     }
+    StackHandleScope<1> hs(self);
+    MutableHandle<mirror::Object> h_result(hs.NewHandle(obj_result));
     result.SetL(obj_result);
     if (UNLIKELY(NeedsMethodExitEvent(instrumentation) &&
                  !SendMethodExitEvents(self,
@@ -614,307 +519,290 @@
                                        shadow_frame.GetThisObject(Accessor().InsSize()),
                                        shadow_frame.GetMethod(),
                                        inst->GetDexPc(Insns()),
-                                       result))) {
-      if (!HandlePendingExceptionWithInstrumentation(nullptr)) {
-        return;
-      }
+                                       h_result))) {
+      DCHECK(self->IsExceptionPending());
+      // Do not raise exception event if it is caused by other instrumentation event.
+      shadow_frame.SetSkipNextExceptionEvent(true);
+      return false;  // Pending exception.
     }
-    // Re-load since it might have moved during the MethodExitEvent.
-    result.SetL(shadow_frame.GetVRegReference(ref_idx));
-    if (ctx->interpret_one_instruction) {
-      /* Signal mterp to return to caller */
-      shadow_frame.SetDexPC(dex::kDexNoIndex);
-    }
+    // Re-load since it might have moved or been replaced during the MethodExitEvent.
+    result.SetL(h_result.Get());
     ctx->result = result;
     exit_interpreter_loop = true;
+    return false;
   }
 
-  ALWAYS_INLINE void CONST_4() REQUIRES_SHARED(Locks::mutator_lock_) {
+  HANDLER_ATTRIBUTES bool CONST_4() {
     uint4_t dst = inst->VRegA_11n(inst_data);
     int4_t val = inst->VRegB_11n(inst_data);
-    shadow_frame.SetVReg(dst, val);
+    SetVReg(dst, val);
     if (val == 0) {
-      shadow_frame.SetVRegReference(dst, nullptr);
+      SetVRegReference(dst, nullptr);
     }
-    inst = inst->Next_1xx();
+    return true;
   }
 
-  ALWAYS_INLINE void CONST_16() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint8_t dst = inst->VRegA_21s(inst_data);
-    int16_t val = inst->VRegB_21s();
-    shadow_frame.SetVReg(dst, val);
+  HANDLER_ATTRIBUTES bool CONST_16() {
+    uint8_t dst = A();
+    int16_t val = B();
+    SetVReg(dst, val);
     if (val == 0) {
-      shadow_frame.SetVRegReference(dst, nullptr);
+      SetVRegReference(dst, nullptr);
     }
-    inst = inst->Next_2xx();
+    return true;
   }
 
-  ALWAYS_INLINE void CONST() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint8_t dst = inst->VRegA_31i(inst_data);
-    int32_t val = inst->VRegB_31i();
-    shadow_frame.SetVReg(dst, val);
+  HANDLER_ATTRIBUTES bool CONST() {
+    uint8_t dst = A();
+    int32_t val = B();
+    SetVReg(dst, val);
     if (val == 0) {
-      shadow_frame.SetVRegReference(dst, nullptr);
+      SetVRegReference(dst, nullptr);
     }
-    inst = inst->Next_3xx();
+    return true;
   }
 
-  ALWAYS_INLINE void CONST_HIGH16() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint8_t dst = inst->VRegA_21h(inst_data);
-    int32_t val = static_cast<int32_t>(inst->VRegB_21h() << 16);
-    shadow_frame.SetVReg(dst, val);
+  HANDLER_ATTRIBUTES bool CONST_HIGH16() {
+    uint8_t dst = A();
+    int32_t val = static_cast<int32_t>(B() << 16);
+    SetVReg(dst, val);
     if (val == 0) {
-      shadow_frame.SetVRegReference(dst, nullptr);
+      SetVRegReference(dst, nullptr);
     }
-    inst = inst->Next_2xx();
+    return true;
   }
 
-  ALWAYS_INLINE void CONST_WIDE_16() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegLong(inst->VRegA_21s(inst_data), inst->VRegB_21s());
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool CONST_WIDE_16() {
+    SetVRegLong(A(), B());
+    return true;
   }
 
-  ALWAYS_INLINE void CONST_WIDE_32() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegLong(inst->VRegA_31i(inst_data), inst->VRegB_31i());
-    inst = inst->Next_3xx();
+  HANDLER_ATTRIBUTES bool CONST_WIDE_32() {
+    SetVRegLong(A(), B());
+    return true;
   }
 
-  ALWAYS_INLINE void CONST_WIDE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegLong(inst->VRegA_51l(inst_data), inst->VRegB_51l());
-    inst = inst->Next_51l();
+  HANDLER_ATTRIBUTES bool CONST_WIDE() {
+    SetVRegLong(A(), inst->WideVRegB());
+    return true;
   }
 
-  ALWAYS_INLINE void CONST_WIDE_HIGH16() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegLong(inst->VRegA_21h(inst_data),
-                             static_cast<uint64_t>(inst->VRegB_21h()) << 48);
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool CONST_WIDE_HIGH16() {
+    SetVRegLong(A(), static_cast<uint64_t>(B()) << 48);
+    return true;
   }
 
-  ALWAYS_INLINE void CONST_STRING() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ObjPtr<mirror::String> s = ResolveString(self,
-                                             shadow_frame,
-                                             dex::StringIndex(inst->VRegB_21c()));
+  HANDLER_ATTRIBUTES bool CONST_STRING() {
+    ObjPtr<mirror::String> s = ResolveString(self, shadow_frame, dex::StringIndex(B()));
     if (UNLIKELY(s == nullptr)) {
-      HANDLE_PENDING_EXCEPTION();
+      return false;  // Pending exception.
     } else {
-      shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), s);
-      inst = inst->Next_2xx();
+      SetVRegReference(A(), s);
     }
+    return true;
   }
 
-  ALWAYS_INLINE void CONST_STRING_JUMBO() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ObjPtr<mirror::String> s = ResolveString(self,
-                                             shadow_frame,
-                                             dex::StringIndex(inst->VRegB_31c()));
+  HANDLER_ATTRIBUTES bool CONST_STRING_JUMBO() {
+    ObjPtr<mirror::String> s = ResolveString(self, shadow_frame, dex::StringIndex(B()));
     if (UNLIKELY(s == nullptr)) {
-      HANDLE_PENDING_EXCEPTION();
+      return false;  // Pending exception.
     } else {
-      shadow_frame.SetVRegReference(inst->VRegA_31c(inst_data), s);
-      inst = inst->Next_3xx();
+      SetVRegReference(A(), s);
     }
+    return true;
   }
 
-  ALWAYS_INLINE void CONST_CLASS() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegB_21c()),
+  HANDLER_ATTRIBUTES bool CONST_CLASS() {
+    ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(B()),
                                                      shadow_frame.GetMethod(),
                                                      self,
                                                      false,
                                                      do_access_check);
     if (UNLIKELY(c == nullptr)) {
-      HANDLE_PENDING_EXCEPTION();
+      return false;  // Pending exception.
     } else {
-      shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), c);
-      inst = inst->Next_2xx();
+      SetVRegReference(A(), c);
     }
+    return true;
   }
 
-  ALWAYS_INLINE void CONST_METHOD_HANDLE() REQUIRES_SHARED(Locks::mutator_lock_) {
+  HANDLER_ATTRIBUTES bool CONST_METHOD_HANDLE() {
     ClassLinker* cl = Runtime::Current()->GetClassLinker();
     ObjPtr<mirror::MethodHandle> mh = cl->ResolveMethodHandle(self,
-                                                              inst->VRegB_21c(),
+                                                              B(),
                                                               shadow_frame.GetMethod());
     if (UNLIKELY(mh == nullptr)) {
-      HANDLE_PENDING_EXCEPTION();
+      return false;  // Pending exception.
     } else {
-      shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), mh);
-      inst = inst->Next_2xx();
+      SetVRegReference(A(), mh);
     }
+    return true;
   }
 
-  ALWAYS_INLINE void CONST_METHOD_TYPE() REQUIRES_SHARED(Locks::mutator_lock_) {
+  HANDLER_ATTRIBUTES bool CONST_METHOD_TYPE() {
     ClassLinker* cl = Runtime::Current()->GetClassLinker();
     ObjPtr<mirror::MethodType> mt = cl->ResolveMethodType(self,
-                                                          dex::ProtoIndex(inst->VRegB_21c()),
+                                                          dex::ProtoIndex(B()),
                                                           shadow_frame.GetMethod());
     if (UNLIKELY(mt == nullptr)) {
-      HANDLE_PENDING_EXCEPTION();
+      return false;  // Pending exception.
     } else {
-      shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), mt);
-      inst = inst->Next_2xx();
+      SetVRegReference(A(), mt);
     }
+    return true;
   }
 
-  ALWAYS_INLINE void MONITOR_ENTER() REQUIRES_SHARED(Locks::mutator_lock_) {
+  HANDLER_ATTRIBUTES bool MONITOR_ENTER() {
     if (!HandleAsyncException()) {
-      return;
+      return false;
     }
-    ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
+    ObjPtr<mirror::Object> obj = GetVRegReference(A());
     if (UNLIKELY(obj == nullptr)) {
       ThrowNullPointerExceptionFromInterpreter();
-      HANDLE_PENDING_EXCEPTION();
+      return false;  // Pending exception.
     } else {
       DoMonitorEnter<do_assignability_check>(self, &shadow_frame, obj);
-      POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
+      return !self->IsExceptionPending();
     }
   }
 
-  ALWAYS_INLINE void MONITOR_EXIT() REQUIRES_SHARED(Locks::mutator_lock_) {
+  HANDLER_ATTRIBUTES bool MONITOR_EXIT() {
     if (!HandleAsyncException()) {
-      return;
+      return false;
     }
-    ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
+    ObjPtr<mirror::Object> obj = GetVRegReference(A());
     if (UNLIKELY(obj == nullptr)) {
       ThrowNullPointerExceptionFromInterpreter();
-      HANDLE_PENDING_EXCEPTION();
+      return false;  // Pending exception.
     } else {
       DoMonitorExit<do_assignability_check>(self, &shadow_frame, obj);
-      POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
+      return !self->IsExceptionPending();
     }
   }
 
-  ALWAYS_INLINE void CHECK_CAST() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegB_21c()),
+  HANDLER_ATTRIBUTES bool CHECK_CAST() {
+    ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(B()),
                                                      shadow_frame.GetMethod(),
                                                      self,
                                                      false,
                                                      do_access_check);
     if (UNLIKELY(c == nullptr)) {
-      HANDLE_PENDING_EXCEPTION();
+      return false;  // Pending exception.
     } else {
-      ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegA_21c(inst_data));
+      ObjPtr<mirror::Object> obj = GetVRegReference(A());
       if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) {
         ThrowClassCastException(c, obj->GetClass());
-        HANDLE_PENDING_EXCEPTION();
-      } else {
-        inst = inst->Next_2xx();
+        return false;  // Pending exception.
       }
     }
+    return true;
   }
 
-  ALWAYS_INLINE void INSTANCE_OF() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegC_22c()),
+  HANDLER_ATTRIBUTES bool INSTANCE_OF() {
+    ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(C()),
                                                      shadow_frame.GetMethod(),
                                                      self,
                                                      false,
                                                      do_access_check);
     if (UNLIKELY(c == nullptr)) {
-      HANDLE_PENDING_EXCEPTION();
+      return false;  // Pending exception.
     } else {
-      ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
-      shadow_frame.SetVReg(inst->VRegA_22c(inst_data),
-                           (obj != nullptr && obj->InstanceOf(c)) ? 1 : 0);
-      inst = inst->Next_2xx();
+      ObjPtr<mirror::Object> obj = GetVRegReference(B());
+      SetVReg(A(), (obj != nullptr && obj->InstanceOf(c)) ? 1 : 0);
     }
+    return true;
   }
 
-  ALWAYS_INLINE void ARRAY_LENGTH() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ObjPtr<mirror::Object> array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
+  HANDLER_ATTRIBUTES bool ARRAY_LENGTH() {
+    ObjPtr<mirror::Object> array = GetVRegReference(B());
     if (UNLIKELY(array == nullptr)) {
       ThrowNullPointerExceptionFromInterpreter();
-      HANDLE_PENDING_EXCEPTION();
+      return false;  // Pending exception.
     } else {
-      shadow_frame.SetVReg(inst->VRegA_12x(inst_data), array->AsArray()->GetLength());
-      inst = inst->Next_1xx();
+      SetVReg(A(), array->AsArray()->GetLength());
     }
+    return true;
   }
 
-  ALWAYS_INLINE void NEW_INSTANCE() REQUIRES_SHARED(Locks::mutator_lock_) {
+  HANDLER_ATTRIBUTES bool NEW_INSTANCE() {
     ObjPtr<mirror::Object> obj = nullptr;
-    ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(inst->VRegB_21c()),
+    ObjPtr<mirror::Class> c = ResolveVerifyAndClinit(dex::TypeIndex(B()),
                                                      shadow_frame.GetMethod(),
                                                      self,
                                                      false,
                                                      do_access_check);
     if (LIKELY(c != nullptr)) {
+      // Don't allow finalizable objects to be allocated during a transaction since these can't
+      // be finalized without a started runtime.
+      if (transaction_active && c->IsFinalizable()) {
+        AbortTransactionF(self,
+                          "Allocating finalizable object in transaction: %s",
+                          c->PrettyDescriptor().c_str());
+        return false;  // Pending exception.
+      }
+      gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
       if (UNLIKELY(c->IsStringClass())) {
-        gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
-        obj = mirror::String::AllocEmptyString<true>(self, allocator_type);
+        obj = mirror::String::AllocEmptyString(self, allocator_type);
       } else {
-        obj = AllocObjectFromCode<true>(
-            c.Ptr(),
-            self,
-            Runtime::Current()->GetHeap()->GetCurrentAllocator());
+        obj = AllocObjectFromCode(c, self, allocator_type);
       }
     }
     if (UNLIKELY(obj == nullptr)) {
-      HANDLE_PENDING_EXCEPTION();
+      return false;  // Pending exception.
     } else {
       obj->GetClass()->AssertInitializedOrInitializingInThread(self);
-      // Don't allow finalizable objects to be allocated during a transaction since these can't
-      // be finalized without a started runtime.
-      if (transaction_active && obj->GetClass()->IsFinalizable()) {
-        AbortTransactionF(self, "Allocating finalizable object in transaction: %s",
-                          obj->PrettyTypeOf().c_str());
-        HANDLE_PENDING_EXCEPTION();
-      }
-      shadow_frame.SetVRegReference(inst->VRegA_21c(inst_data), obj);
-      inst = inst->Next_2xx();
+      SetVRegReference(A(), obj);
     }
+    return true;
   }
 
-  ALWAYS_INLINE void NEW_ARRAY() REQUIRES_SHARED(Locks::mutator_lock_) {
-    int32_t length = shadow_frame.GetVReg(inst->VRegB_22c(inst_data));
-    ObjPtr<mirror::Object> obj = AllocArrayFromCode<do_access_check, true>(
-        dex::TypeIndex(inst->VRegC_22c()),
+  HANDLER_ATTRIBUTES bool NEW_ARRAY() {
+    int32_t length = GetVReg(B());
+    ObjPtr<mirror::Object> obj = AllocArrayFromCode<do_access_check>(
+        dex::TypeIndex(C()),
         length,
         shadow_frame.GetMethod(),
         self,
         Runtime::Current()->GetHeap()->GetCurrentAllocator());
     if (UNLIKELY(obj == nullptr)) {
-      HANDLE_PENDING_EXCEPTION();
+      return false;  // Pending exception.
     } else {
-      shadow_frame.SetVRegReference(inst->VRegA_22c(inst_data), obj);
-      inst = inst->Next_2xx();
+      SetVRegReference(A(), obj);
     }
+    return true;
   }
 
-  ALWAYS_INLINE void FILLED_NEW_ARRAY() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success =
-        DoFilledNewArray<false, do_access_check, transaction_active>(inst, shadow_frame, self,
-                                                                     ResultRegister());
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+  HANDLER_ATTRIBUTES bool FILLED_NEW_ARRAY() {
+    return DoFilledNewArray<false, do_access_check, transaction_active>(
+        inst, shadow_frame, self, ResultRegister());
   }
 
-  ALWAYS_INLINE void FILLED_NEW_ARRAY_RANGE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success =
-        DoFilledNewArray<true, do_access_check, transaction_active>(inst, shadow_frame,
-                                                                    self, ResultRegister());
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
+  HANDLER_ATTRIBUTES bool FILLED_NEW_ARRAY_RANGE() {
+    return DoFilledNewArray<true, do_access_check, transaction_active>(
+        inst, shadow_frame, self, ResultRegister());
   }
 
-  ALWAYS_INLINE void FILL_ARRAY_DATA() REQUIRES_SHARED(Locks::mutator_lock_) {
-    const uint16_t* payload_addr = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
+  HANDLER_ATTRIBUTES bool FILL_ARRAY_DATA() {
+    const uint16_t* payload_addr = reinterpret_cast<const uint16_t*>(inst) + B();
     const Instruction::ArrayDataPayload* payload =
         reinterpret_cast<const Instruction::ArrayDataPayload*>(payload_addr);
-    ObjPtr<mirror::Object> obj = shadow_frame.GetVRegReference(inst->VRegA_31t(inst_data));
-    bool success = FillArrayData(obj, payload);
-    if (!success) {
-      HANDLE_PENDING_EXCEPTION();
+    ObjPtr<mirror::Object> obj = GetVRegReference(A());
+    if (!FillArrayData(obj, payload)) {
+      return false;  // Pending exception.
     }
     if (transaction_active) {
       RecordArrayElementsInTransaction(obj->AsArray(), payload->element_count);
     }
-    inst = inst->Next_3xx();
+    return true;
   }
 
-  ALWAYS_INLINE void THROW() REQUIRES_SHARED(Locks::mutator_lock_) {
+  HANDLER_ATTRIBUTES bool THROW() {
     if (!HandleAsyncException()) {
-      return;
+      return false;
     }
-    ObjPtr<mirror::Object> exception =
-        shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
+    ObjPtr<mirror::Object> exception = GetVRegReference(A());
     if (UNLIKELY(exception == nullptr)) {
-      ThrowNullPointerException("throw with null exception");
+      ThrowNullPointerException();
     } else if (do_assignability_check && !exception->GetClass()->IsThrowableClass()) {
       // This should never happen.
       std::string temp;
@@ -924,1657 +812,1032 @@
     } else {
       self->SetException(exception->AsThrowable());
     }
-    HANDLE_PENDING_EXCEPTION();
+    return false;  // Pending exception.
   }
 
-  ALWAYS_INLINE void GOTO() REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (!HandleAsyncException()) {
-      return;
-    }
-    int8_t offset = inst->VRegA_10t(inst_data);
-    BRANCH_INSTRUMENTATION(offset);
-    inst = inst->RelativeAt(offset);
-    HandleBackwardBranch(offset);
+  HANDLER_ATTRIBUTES bool GOTO() {
+    return HandleGoto(A());
   }
 
-  ALWAYS_INLINE void GOTO_16() REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (!HandleAsyncException()) {
-      return;
-    }
-    int16_t offset = inst->VRegA_20t();
-    BRANCH_INSTRUMENTATION(offset);
-    inst = inst->RelativeAt(offset);
-    HandleBackwardBranch(offset);
+  HANDLER_ATTRIBUTES bool GOTO_16() {
+    return HandleGoto(A());
   }
 
-  ALWAYS_INLINE void GOTO_32() REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (!HandleAsyncException()) {
-      return;
-    }
-    int32_t offset = inst->VRegA_30t();
-    BRANCH_INSTRUMENTATION(offset);
-    inst = inst->RelativeAt(offset);
-    HandleBackwardBranch(offset);
+  HANDLER_ATTRIBUTES bool GOTO_32() {
+    return HandleGoto(A());
   }
 
-  ALWAYS_INLINE void PACKED_SWITCH() REQUIRES_SHARED(Locks::mutator_lock_) {
+  HANDLER_ATTRIBUTES bool PACKED_SWITCH() {
     int32_t offset = DoPackedSwitch(inst, shadow_frame, inst_data);
-    BRANCH_INSTRUMENTATION(offset);
-    inst = inst->RelativeAt(offset);
+    if (!BranchInstrumentation(offset)) {
+      return false;
+    }
+    SetNextInstruction(inst->RelativeAt(offset));
     HandleBackwardBranch(offset);
+    return true;
   }
 
-  ALWAYS_INLINE void SPARSE_SWITCH() REQUIRES_SHARED(Locks::mutator_lock_) {
+  HANDLER_ATTRIBUTES bool SPARSE_SWITCH() {
     int32_t offset = DoSparseSwitch(inst, shadow_frame, inst_data);
-    BRANCH_INSTRUMENTATION(offset);
-    inst = inst->RelativeAt(offset);
+    if (!BranchInstrumentation(offset)) {
+      return false;
+    }
+    SetNextInstruction(inst->RelativeAt(offset));
     HandleBackwardBranch(offset);
+    return true;
   }
 
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wfloat-equal"
-
-
-  ALWAYS_INLINE void CMPL_FLOAT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    float val1 = shadow_frame.GetVRegFloat(inst->VRegB_23x());
-    float val2 = shadow_frame.GetVRegFloat(inst->VRegC_23x());
-    int32_t result;
-    if (val1 > val2) {
-      result = 1;
-    } else if (val1 == val2) {
-      result = 0;
-    } else {
-      result = -1;
-    }
-    shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool CMPL_FLOAT() {
+    return HandleCmpl<float>(GetVRegFloat(B()), GetVRegFloat(C()));
   }
 
-  ALWAYS_INLINE void CMPG_FLOAT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    float val1 = shadow_frame.GetVRegFloat(inst->VRegB_23x());
-    float val2 = shadow_frame.GetVRegFloat(inst->VRegC_23x());
-    int32_t result;
-    if (val1 < val2) {
-      result = -1;
-    } else if (val1 == val2) {
-      result = 0;
-    } else {
-      result = 1;
-    }
-    shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool CMPG_FLOAT() {
+    return HandleCmpg<float>(GetVRegFloat(B()), GetVRegFloat(C()));
   }
 
-  ALWAYS_INLINE void CMPL_DOUBLE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    double val1 = shadow_frame.GetVRegDouble(inst->VRegB_23x());
-    double val2 = shadow_frame.GetVRegDouble(inst->VRegC_23x());
-    int32_t result;
-    if (val1 > val2) {
-      result = 1;
-    } else if (val1 == val2) {
-      result = 0;
-    } else {
-      result = -1;
-    }
-    shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool CMPL_DOUBLE() {
+    return HandleCmpl<double>(GetVRegDouble(B()), GetVRegDouble(C()));
   }
 
-
-  ALWAYS_INLINE void CMPG_DOUBLE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    double val1 = shadow_frame.GetVRegDouble(inst->VRegB_23x());
-    double val2 = shadow_frame.GetVRegDouble(inst->VRegC_23x());
-    int32_t result;
-    if (val1 < val2) {
-      result = -1;
-    } else if (val1 == val2) {
-      result = 0;
-    } else {
-      result = 1;
-    }
-    shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool CMPG_DOUBLE() {
+    return HandleCmpg<double>(GetVRegDouble(B()), GetVRegDouble(C()));
   }
 
-#pragma clang diagnostic pop
-
-
-  ALWAYS_INLINE void CMP_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
-    int64_t val1 = shadow_frame.GetVRegLong(inst->VRegB_23x());
-    int64_t val2 = shadow_frame.GetVRegLong(inst->VRegC_23x());
-    int32_t result;
-    if (val1 > val2) {
-      result = 1;
-    } else if (val1 == val2) {
-      result = 0;
-    } else {
-      result = -1;
-    }
-    shadow_frame.SetVReg(inst->VRegA_23x(inst_data), result);
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool CMP_LONG() {
+    return HandleCmpl<int64_t>(GetVRegLong(B()), GetVRegLong(C()));
   }
 
-  ALWAYS_INLINE void IF_EQ() REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) ==
-        shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
-      int16_t offset = inst->VRegC_22t();
-      BRANCH_INSTRUMENTATION(offset);
-      inst = inst->RelativeAt(offset);
-      HandleBackwardBranch(offset);
-    } else {
-      BRANCH_INSTRUMENTATION(2);
-      inst = inst->Next_2xx();
-    }
+  HANDLER_ATTRIBUTES bool IF_EQ() {
+    return HandleIf(GetVReg(A()) == GetVReg(B()), C());
   }
 
-  ALWAYS_INLINE void IF_NE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) !=
-        shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
-      int16_t offset = inst->VRegC_22t();
-      BRANCH_INSTRUMENTATION(offset);
-      inst = inst->RelativeAt(offset);
-      HandleBackwardBranch(offset);
-    } else {
-      BRANCH_INSTRUMENTATION(2);
-      inst = inst->Next_2xx();
-    }
+  HANDLER_ATTRIBUTES bool IF_NE() {
+    return HandleIf(GetVReg(A()) != GetVReg(B()), C());
   }
 
-  ALWAYS_INLINE void IF_LT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <
-        shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
-      int16_t offset = inst->VRegC_22t();
-      BRANCH_INSTRUMENTATION(offset);
-      inst = inst->RelativeAt(offset);
-      HandleBackwardBranch(offset);
-    } else {
-      BRANCH_INSTRUMENTATION(2);
-      inst = inst->Next_2xx();
-    }
+  HANDLER_ATTRIBUTES bool IF_LT() {
+    return HandleIf(GetVReg(A()) < GetVReg(B()), C());
   }
 
-  ALWAYS_INLINE void IF_GE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >=
-        shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
-      int16_t offset = inst->VRegC_22t();
-      BRANCH_INSTRUMENTATION(offset);
-      inst = inst->RelativeAt(offset);
-      HandleBackwardBranch(offset);
-    } else {
-      BRANCH_INSTRUMENTATION(2);
-      inst = inst->Next_2xx();
-    }
+  HANDLER_ATTRIBUTES bool IF_GE() {
+    return HandleIf(GetVReg(A()) >= GetVReg(B()), C());
   }
 
-  ALWAYS_INLINE void IF_GT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) >
-    shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
-      int16_t offset = inst->VRegC_22t();
-      BRANCH_INSTRUMENTATION(offset);
-      inst = inst->RelativeAt(offset);
-      HandleBackwardBranch(offset);
-    } else {
-      BRANCH_INSTRUMENTATION(2);
-      inst = inst->Next_2xx();
-    }
+  HANDLER_ATTRIBUTES bool IF_GT() {
+    return HandleIf(GetVReg(A()) > GetVReg(B()), C());
   }
 
-  ALWAYS_INLINE void IF_LE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (shadow_frame.GetVReg(inst->VRegA_22t(inst_data)) <=
-        shadow_frame.GetVReg(inst->VRegB_22t(inst_data))) {
-      int16_t offset = inst->VRegC_22t();
-      BRANCH_INSTRUMENTATION(offset);
-      inst = inst->RelativeAt(offset);
-      HandleBackwardBranch(offset);
-    } else {
-      BRANCH_INSTRUMENTATION(2);
-      inst = inst->Next_2xx();
-    }
+  HANDLER_ATTRIBUTES bool IF_LE() {
+    return HandleIf(GetVReg(A()) <= GetVReg(B()), C());
   }
 
-  ALWAYS_INLINE void IF_EQZ() REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) == 0) {
-      int16_t offset = inst->VRegB_21t();
-      BRANCH_INSTRUMENTATION(offset);
-      inst = inst->RelativeAt(offset);
-      HandleBackwardBranch(offset);
-    } else {
-      BRANCH_INSTRUMENTATION(2);
-      inst = inst->Next_2xx();
-    }
+  HANDLER_ATTRIBUTES bool IF_EQZ() {
+    return HandleIf(GetVReg(A()) == 0, B());
   }
 
-  ALWAYS_INLINE void IF_NEZ() REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) != 0) {
-      int16_t offset = inst->VRegB_21t();
-      BRANCH_INSTRUMENTATION(offset);
-      inst = inst->RelativeAt(offset);
-      HandleBackwardBranch(offset);
-    } else {
-      BRANCH_INSTRUMENTATION(2);
-      inst = inst->Next_2xx();
-    }
+  HANDLER_ATTRIBUTES bool IF_NEZ() {
+    return HandleIf(GetVReg(A()) != 0, B());
   }
 
-  ALWAYS_INLINE void IF_LTZ() REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) < 0) {
-      int16_t offset = inst->VRegB_21t();
-      BRANCH_INSTRUMENTATION(offset);
-      inst = inst->RelativeAt(offset);
-      HandleBackwardBranch(offset);
-    } else {
-      BRANCH_INSTRUMENTATION(2);
-      inst = inst->Next_2xx();
-    }
+  HANDLER_ATTRIBUTES bool IF_LTZ() {
+    return HandleIf(GetVReg(A()) < 0, B());
   }
 
-  ALWAYS_INLINE void IF_GEZ() REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) >= 0) {
-      int16_t offset = inst->VRegB_21t();
-      BRANCH_INSTRUMENTATION(offset);
-      inst = inst->RelativeAt(offset);
-      HandleBackwardBranch(offset);
-    } else {
-      BRANCH_INSTRUMENTATION(2);
-      inst = inst->Next_2xx();
-    }
+  HANDLER_ATTRIBUTES bool IF_GEZ() {
+    return HandleIf(GetVReg(A()) >= 0, B());
   }
 
-  ALWAYS_INLINE void IF_GTZ() REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) > 0) {
-      int16_t offset = inst->VRegB_21t();
-      BRANCH_INSTRUMENTATION(offset);
-      inst = inst->RelativeAt(offset);
-      HandleBackwardBranch(offset);
-    } else {
-      BRANCH_INSTRUMENTATION(2);
-      inst = inst->Next_2xx();
-    }
+  HANDLER_ATTRIBUTES bool IF_GTZ() {
+    return HandleIf(GetVReg(A()) > 0, B());
   }
 
-  ALWAYS_INLINE void IF_LEZ() REQUIRES_SHARED(Locks::mutator_lock_) {
-    if (shadow_frame.GetVReg(inst->VRegA_21t(inst_data)) <= 0) {
-      int16_t offset = inst->VRegB_21t();
-      BRANCH_INSTRUMENTATION(offset);
-      inst = inst->RelativeAt(offset);
-      HandleBackwardBranch(offset);
-    } else {
-      BRANCH_INSTRUMENTATION(2);
-      inst = inst->Next_2xx();
-    }
+  HANDLER_ATTRIBUTES bool IF_LEZ() {
+    return HandleIf(GetVReg(A()) <= 0, B());
   }
 
-  ALWAYS_INLINE void AGET_BOOLEAN() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
+  HANDLER_ATTRIBUTES bool AGET_BOOLEAN() {
+    return HandleAGet<mirror::BooleanArray>(&InstructionHandler::SetVReg);
+  }
+
+  HANDLER_ATTRIBUTES bool AGET_BYTE() {
+    return HandleAGet<mirror::ByteArray>(&InstructionHandler::SetVReg);
+  }
+
+  HANDLER_ATTRIBUTES bool AGET_CHAR() {
+    return HandleAGet<mirror::CharArray>(&InstructionHandler::SetVReg);
+  }
+
+  HANDLER_ATTRIBUTES bool AGET_SHORT() {
+    return HandleAGet<mirror::ShortArray>(&InstructionHandler::SetVReg);
+  }
+
+  HANDLER_ATTRIBUTES bool AGET() {
+    return HandleAGet<mirror::IntArray>(&InstructionHandler::SetVReg);
+  }
+
+  HANDLER_ATTRIBUTES bool AGET_WIDE() {
+    return HandleAGet<mirror::LongArray>(&InstructionHandler::SetVRegLong);
+  }
+
+  HANDLER_ATTRIBUTES bool AGET_OBJECT() {
+    return HandleAGet<mirror::ObjectArray<mirror::Object>>(&InstructionHandler::SetVRegReference);
+  }
+
+  HANDLER_ATTRIBUTES bool APUT_BOOLEAN() {
+    return HandleAPut<mirror::BooleanArray>(GetVReg(A()));
+  }
+
+  HANDLER_ATTRIBUTES bool APUT_BYTE() {
+    return HandleAPut<mirror::ByteArray>(GetVReg(A()));
+  }
+
+  HANDLER_ATTRIBUTES bool APUT_CHAR() {
+    return HandleAPut<mirror::CharArray>(GetVReg(A()));
+  }
+
+  HANDLER_ATTRIBUTES bool APUT_SHORT() {
+    return HandleAPut<mirror::ShortArray>(GetVReg(A()));
+  }
+
+  HANDLER_ATTRIBUTES bool APUT() {
+    return HandleAPut<mirror::IntArray>(GetVReg(A()));
+  }
+
+  HANDLER_ATTRIBUTES bool APUT_WIDE() {
+    return HandleAPut<mirror::LongArray>(GetVRegLong(A()));
+  }
+
+  HANDLER_ATTRIBUTES bool APUT_OBJECT() {
+    ObjPtr<mirror::Object> a = GetVRegReference(B());
     if (UNLIKELY(a == nullptr)) {
       ThrowNullPointerExceptionFromInterpreter();
-      HANDLE_PENDING_EXCEPTION();
+      return false;  // Pending exception.
     }
-    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-    ObjPtr<mirror::BooleanArray> array = a->AsBooleanArray();
-    if (array->CheckIsValidIndex(index)) {
-      shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
-      inst = inst->Next_2xx();
-    } else {
-      HANDLE_PENDING_EXCEPTION();
-    }
-  }
-
-  ALWAYS_INLINE void AGET_BYTE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-    if (UNLIKELY(a == nullptr)) {
-      ThrowNullPointerExceptionFromInterpreter();
-      HANDLE_PENDING_EXCEPTION();
-    }
-    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-    ObjPtr<mirror::ByteArray> array = a->AsByteArray();
-    if (array->CheckIsValidIndex(index)) {
-      shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
-      inst = inst->Next_2xx();
-    } else {
-      HANDLE_PENDING_EXCEPTION();
-    }
-  }
-
-  ALWAYS_INLINE void AGET_CHAR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-    if (UNLIKELY(a == nullptr)) {
-      ThrowNullPointerExceptionFromInterpreter();
-      HANDLE_PENDING_EXCEPTION();
-    }
-    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-    ObjPtr<mirror::CharArray> array = a->AsCharArray();
-    if (array->CheckIsValidIndex(index)) {
-      shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
-      inst = inst->Next_2xx();
-    } else {
-      HANDLE_PENDING_EXCEPTION();
-    }
-  }
-
-  ALWAYS_INLINE void AGET_SHORT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-    if (UNLIKELY(a == nullptr)) {
-      ThrowNullPointerExceptionFromInterpreter();
-      HANDLE_PENDING_EXCEPTION();
-    }
-    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-    ObjPtr<mirror::ShortArray> array = a->AsShortArray();
-    if (array->CheckIsValidIndex(index)) {
-      shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
-      inst = inst->Next_2xx();
-    } else {
-      HANDLE_PENDING_EXCEPTION();
-    }
-  }
-
-  ALWAYS_INLINE void AGET() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-    if (UNLIKELY(a == nullptr)) {
-      ThrowNullPointerExceptionFromInterpreter();
-      HANDLE_PENDING_EXCEPTION();
-    }
-    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-    DCHECK(a->IsIntArray() || a->IsFloatArray()) << a->PrettyTypeOf();
-    ObjPtr<mirror::IntArray> array = ObjPtr<mirror::IntArray>::DownCast(a);
-    if (array->CheckIsValidIndex(index)) {
-      shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
-      inst = inst->Next_2xx();
-    } else {
-      HANDLE_PENDING_EXCEPTION();
-    }
-  }
-
-  ALWAYS_INLINE void AGET_WIDE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-    if (UNLIKELY(a == nullptr)) {
-      ThrowNullPointerExceptionFromInterpreter();
-      HANDLE_PENDING_EXCEPTION();
-    }
-    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-    DCHECK(a->IsLongArray() || a->IsDoubleArray()) << a->PrettyTypeOf();
-    ObjPtr<mirror::LongArray> array = ObjPtr<mirror::LongArray>::DownCast(a);
-    if (array->CheckIsValidIndex(index)) {
-      shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
-      inst = inst->Next_2xx();
-    } else {
-      HANDLE_PENDING_EXCEPTION();
-    }
-  }
-
-  ALWAYS_INLINE void AGET_OBJECT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-    if (UNLIKELY(a == nullptr)) {
-      ThrowNullPointerExceptionFromInterpreter();
-      HANDLE_PENDING_EXCEPTION();
-    }
-    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-    ObjPtr<mirror::ObjectArray<mirror::Object>> array = a->AsObjectArray<mirror::Object>();
-    if (array->CheckIsValidIndex(index)) {
-      shadow_frame.SetVRegReference(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
-      inst = inst->Next_2xx();
-    } else {
-      HANDLE_PENDING_EXCEPTION();
-    }
-  }
-
-  ALWAYS_INLINE void APUT_BOOLEAN() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-    if (UNLIKELY(a == nullptr)) {
-      ThrowNullPointerExceptionFromInterpreter();
-      HANDLE_PENDING_EXCEPTION();
-    }
-    uint8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
-    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-    ObjPtr<mirror::BooleanArray> array = a->AsBooleanArray();
-    if (array->CheckIsValidIndex(index)) {
-      array->SetWithoutChecks<transaction_active>(index, val);
-      inst = inst->Next_2xx();
-    } else {
-      HANDLE_PENDING_EXCEPTION();
-    }
-  }
-
-  ALWAYS_INLINE void APUT_BYTE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-    if (UNLIKELY(a == nullptr)) {
-      ThrowNullPointerExceptionFromInterpreter();
-      HANDLE_PENDING_EXCEPTION();
-    }
-    int8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
-    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-    ObjPtr<mirror::ByteArray> array = a->AsByteArray();
-    if (array->CheckIsValidIndex(index)) {
-      array->SetWithoutChecks<transaction_active>(index, val);
-      inst = inst->Next_2xx();
-    } else {
-      HANDLE_PENDING_EXCEPTION();
-    }
-  }
-
-  ALWAYS_INLINE void APUT_CHAR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-    if (UNLIKELY(a == nullptr)) {
-      ThrowNullPointerExceptionFromInterpreter();
-      HANDLE_PENDING_EXCEPTION();
-    }
-    uint16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
-    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-    ObjPtr<mirror::CharArray> array = a->AsCharArray();
-    if (array->CheckIsValidIndex(index)) {
-      array->SetWithoutChecks<transaction_active>(index, val);
-      inst = inst->Next_2xx();
-    } else {
-      HANDLE_PENDING_EXCEPTION();
-    }
-  }
-
-  ALWAYS_INLINE void APUT_SHORT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-    if (UNLIKELY(a == nullptr)) {
-      ThrowNullPointerExceptionFromInterpreter();
-      HANDLE_PENDING_EXCEPTION();
-    }
-    int16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
-    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-    ObjPtr<mirror::ShortArray> array = a->AsShortArray();
-    if (array->CheckIsValidIndex(index)) {
-      array->SetWithoutChecks<transaction_active>(index, val);
-      inst = inst->Next_2xx();
-    } else {
-      HANDLE_PENDING_EXCEPTION();
-    }
-  }
-
-  ALWAYS_INLINE void APUT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-    if (UNLIKELY(a == nullptr)) {
-      ThrowNullPointerExceptionFromInterpreter();
-      HANDLE_PENDING_EXCEPTION();
-    }
-    int32_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
-    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-    DCHECK(a->IsIntArray() || a->IsFloatArray()) << a->PrettyTypeOf();
-    ObjPtr<mirror::IntArray> array = ObjPtr<mirror::IntArray>::DownCast(a);
-    if (array->CheckIsValidIndex(index)) {
-      array->SetWithoutChecks<transaction_active>(index, val);
-      inst = inst->Next_2xx();
-    } else {
-      HANDLE_PENDING_EXCEPTION();
-    }
-  }
-
-  ALWAYS_INLINE void APUT_WIDE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-    if (UNLIKELY(a == nullptr)) {
-      ThrowNullPointerExceptionFromInterpreter();
-      HANDLE_PENDING_EXCEPTION();
-    }
-    int64_t val = shadow_frame.GetVRegLong(inst->VRegA_23x(inst_data));
-    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-    DCHECK(a->IsLongArray() || a->IsDoubleArray()) << a->PrettyTypeOf();
-    ObjPtr<mirror::LongArray> array = ObjPtr<mirror::LongArray>::DownCast(a);
-    if (array->CheckIsValidIndex(index)) {
-      array->SetWithoutChecks<transaction_active>(index, val);
-      inst = inst->Next_2xx();
-    } else {
-      HANDLE_PENDING_EXCEPTION();
-    }
-  }
-
-  ALWAYS_INLINE void APUT_OBJECT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    ObjPtr<mirror::Object> a = shadow_frame.GetVRegReference(inst->VRegB_23x());
-    if (UNLIKELY(a == nullptr)) {
-      ThrowNullPointerExceptionFromInterpreter();
-      HANDLE_PENDING_EXCEPTION();
-    }
-    int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
-    ObjPtr<mirror::Object> val = shadow_frame.GetVRegReference(inst->VRegA_23x(inst_data));
+    int32_t index = GetVReg(C());
+    ObjPtr<mirror::Object> val = GetVRegReference(A());
     ObjPtr<mirror::ObjectArray<mirror::Object>> array = a->AsObjectArray<mirror::Object>();
     if (array->CheckIsValidIndex(index) && array->CheckAssignable(val)) {
+      if (transaction_active &&
+          (!CheckWriteConstraint(self, array) || !CheckWriteValueConstraint(self, val))) {
+        return false;
+      }
       array->SetWithoutChecks<transaction_active>(index, val);
-      inst = inst->Next_2xx();
     } else {
-      HANDLE_PENDING_EXCEPTION();
+      return false;  // Pending exception.
     }
+    return true;
   }
 
-  ALWAYS_INLINE void IGET_BOOLEAN() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(
-        self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IGET_BOOLEAN() {
+    return HandleGet<InstancePrimitiveRead, Primitive::kPrimBoolean>();
   }
 
-  ALWAYS_INLINE void IGET_BYTE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(
-        self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IGET_BYTE() {
+    return HandleGet<InstancePrimitiveRead, Primitive::kPrimByte>();
   }
 
-  ALWAYS_INLINE void IGET_CHAR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(
-        self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IGET_CHAR() {
+    return HandleGet<InstancePrimitiveRead, Primitive::kPrimChar>();
   }
 
-  ALWAYS_INLINE void IGET_SHORT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(
-        self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IGET_SHORT() {
+    return HandleGet<InstancePrimitiveRead, Primitive::kPrimShort>();
   }
 
-  ALWAYS_INLINE void IGET() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(
-        self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IGET() {
+    return HandleGet<InstancePrimitiveRead, Primitive::kPrimInt>();
   }
 
-  ALWAYS_INLINE void IGET_WIDE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(
-        self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IGET_WIDE() {
+    return HandleGet<InstancePrimitiveRead, Primitive::kPrimLong>();
   }
 
-  ALWAYS_INLINE void IGET_OBJECT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(
-        self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IGET_OBJECT() {
+    return HandleGet<InstanceObjectRead, Primitive::kPrimNot>();
   }
 
-  ALWAYS_INLINE void IGET_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoIGetQuick<Primitive::kPrimInt>(shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IGET_QUICK() {
+    return HandleGetQuick<Primitive::kPrimInt>();
   }
 
-  ALWAYS_INLINE void IGET_WIDE_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoIGetQuick<Primitive::kPrimLong>(shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IGET_WIDE_QUICK() {
+    return HandleGetQuick<Primitive::kPrimLong>();
   }
 
-  ALWAYS_INLINE void IGET_OBJECT_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoIGetQuick<Primitive::kPrimNot>(shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IGET_OBJECT_QUICK() {
+    return HandleGetQuick<Primitive::kPrimNot>();
   }
 
-  ALWAYS_INLINE void IGET_BOOLEAN_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoIGetQuick<Primitive::kPrimBoolean>(shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IGET_BOOLEAN_QUICK() {
+    return HandleGetQuick<Primitive::kPrimBoolean>();
   }
 
-  ALWAYS_INLINE void IGET_BYTE_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoIGetQuick<Primitive::kPrimByte>(shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IGET_BYTE_QUICK() {
+    return HandleGetQuick<Primitive::kPrimByte>();
   }
 
-  ALWAYS_INLINE void IGET_CHAR_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoIGetQuick<Primitive::kPrimChar>(shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IGET_CHAR_QUICK() {
+    return HandleGetQuick<Primitive::kPrimChar>();
   }
 
-  ALWAYS_INLINE void IGET_SHORT_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoIGetQuick<Primitive::kPrimShort>(shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IGET_SHORT_QUICK() {
+    return HandleGetQuick<Primitive::kPrimShort>();
   }
 
-  ALWAYS_INLINE void SGET_BOOLEAN() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check,
-        transaction_active>(self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool SGET_BOOLEAN() {
+    return HandleGet<StaticPrimitiveRead, Primitive::kPrimBoolean>();
   }
 
-  ALWAYS_INLINE void SGET_BYTE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check,
-        transaction_active>(self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool SGET_BYTE() {
+    return HandleGet<StaticPrimitiveRead, Primitive::kPrimByte>();
   }
 
-  ALWAYS_INLINE void SGET_CHAR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check,
-        transaction_active>(self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool SGET_CHAR() {
+    return HandleGet<StaticPrimitiveRead, Primitive::kPrimChar>();
   }
 
-  ALWAYS_INLINE void SGET_SHORT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check,
-        transaction_active>(self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool SGET_SHORT() {
+    return HandleGet<StaticPrimitiveRead, Primitive::kPrimShort>();
   }
 
-  ALWAYS_INLINE void SGET() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check,
-        transaction_active>(self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool SGET() {
+    return HandleGet<StaticPrimitiveRead, Primitive::kPrimInt>();
   }
 
-  ALWAYS_INLINE void SGET_WIDE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check,
-        transaction_active>(self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool SGET_WIDE() {
+    return HandleGet<StaticPrimitiveRead, Primitive::kPrimLong>();
   }
 
-  ALWAYS_INLINE void SGET_OBJECT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check,
-        transaction_active>(self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool SGET_OBJECT() {
+    return HandleGet<StaticObjectRead, Primitive::kPrimNot>();
   }
 
-  ALWAYS_INLINE void IPUT_BOOLEAN() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check,
-        transaction_active>(self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IPUT_BOOLEAN() {
+    return HandlePut<InstancePrimitiveWrite, Primitive::kPrimBoolean>();
   }
 
-  ALWAYS_INLINE void IPUT_BYTE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check,
-        transaction_active>(self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IPUT_BYTE() {
+    return HandlePut<InstancePrimitiveWrite, Primitive::kPrimByte>();
   }
 
-  ALWAYS_INLINE void IPUT_CHAR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check,
-        transaction_active>(self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IPUT_CHAR() {
+    return HandlePut<InstancePrimitiveWrite, Primitive::kPrimChar>();
   }
 
-  ALWAYS_INLINE void IPUT_SHORT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check,
-        transaction_active>(self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IPUT_SHORT() {
+    return HandlePut<InstancePrimitiveWrite, Primitive::kPrimShort>();
   }
 
-  ALWAYS_INLINE void IPUT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check,
-        transaction_active>(self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IPUT() {
+    return HandlePut<InstancePrimitiveWrite, Primitive::kPrimInt>();
   }
 
-  ALWAYS_INLINE void IPUT_WIDE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check,
-        transaction_active>(self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IPUT_WIDE() {
+    return HandlePut<InstancePrimitiveWrite, Primitive::kPrimLong>();
   }
 
-  ALWAYS_INLINE void IPUT_OBJECT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check,
-        transaction_active>(self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IPUT_OBJECT() {
+    return HandlePut<InstanceObjectWrite, Primitive::kPrimNot>();
   }
 
-  ALWAYS_INLINE void IPUT_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoIPutQuick<Primitive::kPrimInt, transaction_active>(
-        shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IPUT_QUICK() {
+    return HandlePutQuick<Primitive::kPrimInt>();
   }
 
-  ALWAYS_INLINE void IPUT_BOOLEAN_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoIPutQuick<Primitive::kPrimBoolean, transaction_active>(
-        shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IPUT_BOOLEAN_QUICK() {
+    return HandlePutQuick<Primitive::kPrimBoolean>();
   }
 
-  ALWAYS_INLINE void IPUT_BYTE_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoIPutQuick<Primitive::kPrimByte, transaction_active>(
-        shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IPUT_BYTE_QUICK() {
+    return HandlePutQuick<Primitive::kPrimByte>();
   }
 
-  ALWAYS_INLINE void IPUT_CHAR_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoIPutQuick<Primitive::kPrimChar, transaction_active>(
-        shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IPUT_CHAR_QUICK() {
+    return HandlePutQuick<Primitive::kPrimChar>();
   }
 
-  ALWAYS_INLINE void IPUT_SHORT_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoIPutQuick<Primitive::kPrimShort, transaction_active>(
-        shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IPUT_SHORT_QUICK() {
+    return HandlePutQuick<Primitive::kPrimShort>();
   }
 
-  ALWAYS_INLINE void IPUT_WIDE_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoIPutQuick<Primitive::kPrimLong, transaction_active>(
-        shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IPUT_WIDE_QUICK() {
+    return HandlePutQuick<Primitive::kPrimLong>();
   }
 
-  ALWAYS_INLINE void IPUT_OBJECT_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoIPutQuick<Primitive::kPrimNot, transaction_active>(
-        shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool IPUT_OBJECT_QUICK() {
+    return HandlePutQuick<Primitive::kPrimNot>();
   }
 
-  ALWAYS_INLINE void SPUT_BOOLEAN() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check,
-        transaction_active>(self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool SPUT_BOOLEAN() {
+    return HandlePut<StaticPrimitiveWrite, Primitive::kPrimBoolean>();
   }
 
-  ALWAYS_INLINE void SPUT_BYTE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check,
-        transaction_active>(self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool SPUT_BYTE() {
+    return HandlePut<StaticPrimitiveWrite, Primitive::kPrimByte>();
   }
 
-  ALWAYS_INLINE void SPUT_CHAR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check,
-        transaction_active>(self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool SPUT_CHAR() {
+    return HandlePut<StaticPrimitiveWrite, Primitive::kPrimChar>();
   }
 
-  ALWAYS_INLINE void SPUT_SHORT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check,
-        transaction_active>(self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool SPUT_SHORT() {
+    return HandlePut<StaticPrimitiveWrite, Primitive::kPrimShort>();
   }
 
-  ALWAYS_INLINE void SPUT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check,
-        transaction_active>(self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool SPUT() {
+    return HandlePut<StaticPrimitiveWrite, Primitive::kPrimInt>();
   }
 
-  ALWAYS_INLINE void SPUT_WIDE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check,
-        transaction_active>(self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool SPUT_WIDE() {
+    return HandlePut<StaticPrimitiveWrite, Primitive::kPrimLong>();
   }
 
-  ALWAYS_INLINE void SPUT_OBJECT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check,
-        transaction_active>(self, shadow_frame, inst, inst_data);
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool SPUT_OBJECT() {
+    return HandlePut<StaticObjectWrite, Primitive::kPrimNot>();
   }
 
-  ALWAYS_INLINE void INVOKE_VIRTUAL() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoInvoke<kVirtual, false, do_access_check, /*is_mterp=*/ false>(
-        self, shadow_frame, inst, inst_data, ResultRegister());
-    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+  HANDLER_ATTRIBUTES bool INVOKE_VIRTUAL() {
+    return HandleInvoke<kVirtual, /*is_range=*/ false>();
   }
 
-  ALWAYS_INLINE void INVOKE_VIRTUAL_RANGE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoInvoke<kVirtual, true, do_access_check, /*is_mterp=*/ false>(
-        self, shadow_frame, inst, inst_data, ResultRegister());
-    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+  HANDLER_ATTRIBUTES bool INVOKE_VIRTUAL_RANGE() {
+    return HandleInvoke<kVirtual, /*is_range=*/ true>();
   }
 
-  ALWAYS_INLINE void INVOKE_SUPER() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoInvoke<kSuper, false, do_access_check, /*is_mterp=*/ false>(
-        self, shadow_frame, inst, inst_data, ResultRegister());
-    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+  HANDLER_ATTRIBUTES bool INVOKE_SUPER() {
+    return HandleInvoke<kSuper, /*is_range=*/ false>();
   }
 
-  ALWAYS_INLINE void INVOKE_SUPER_RANGE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoInvoke<kSuper, true, do_access_check, /*is_mterp=*/ false>(
-        self, shadow_frame, inst, inst_data, ResultRegister());
-    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+  HANDLER_ATTRIBUTES bool INVOKE_SUPER_RANGE() {
+    return HandleInvoke<kSuper, /*is_range=*/ true>();
   }
 
-  ALWAYS_INLINE void INVOKE_DIRECT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoInvoke<kDirect, false, do_access_check, /*is_mterp=*/ false>(
-        self, shadow_frame, inst, inst_data, ResultRegister());
-    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+  HANDLER_ATTRIBUTES bool INVOKE_DIRECT() {
+    return HandleInvoke<kDirect, /*is_range=*/ false>();
   }
 
-  ALWAYS_INLINE void INVOKE_DIRECT_RANGE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoInvoke<kDirect, true, do_access_check, /*is_mterp=*/ false>(
-        self, shadow_frame, inst, inst_data, ResultRegister());
-    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+  HANDLER_ATTRIBUTES bool INVOKE_DIRECT_RANGE() {
+    return HandleInvoke<kDirect, /*is_range=*/ true>();
   }
 
-  ALWAYS_INLINE void INVOKE_INTERFACE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoInvoke<kInterface, false, do_access_check, /*is_mterp=*/ false>(
-        self, shadow_frame, inst, inst_data, ResultRegister());
-    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+  HANDLER_ATTRIBUTES bool INVOKE_INTERFACE() {
+    return HandleInvoke<kInterface, /*is_range=*/ false>();
   }
 
-  ALWAYS_INLINE void INVOKE_INTERFACE_RANGE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoInvoke<kInterface, true, do_access_check, /*is_mterp=*/ false>(
-        self, shadow_frame, inst, inst_data, ResultRegister());
-    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+  HANDLER_ATTRIBUTES bool INVOKE_INTERFACE_RANGE() {
+    return HandleInvoke<kInterface, /*is_range=*/ true>();
   }
 
-  ALWAYS_INLINE void INVOKE_STATIC() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoInvoke<kStatic, false, do_access_check, /*is_mterp=*/ false>(
-        self, shadow_frame, inst, inst_data, ResultRegister());
-    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+  HANDLER_ATTRIBUTES bool INVOKE_STATIC() {
+    return HandleInvoke<kStatic, /*is_range=*/ false>();
   }
 
-  ALWAYS_INLINE void INVOKE_STATIC_RANGE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoInvoke<kStatic, true, do_access_check, /*is_mterp=*/ false>(
-        self, shadow_frame, inst, inst_data, ResultRegister());
-    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+  HANDLER_ATTRIBUTES bool INVOKE_STATIC_RANGE() {
+    return HandleInvoke<kStatic, /*is_range=*/ true>();
   }
 
-  ALWAYS_INLINE void INVOKE_VIRTUAL_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoInvoke<kVirtual, false, do_access_check, /*is_mterp=*/ false,
-        /*is_quick=*/ true>(self, shadow_frame, inst, inst_data, ResultRegister());
-    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+  HANDLER_ATTRIBUTES bool INVOKE_VIRTUAL_QUICK() {
+    return HandleInvoke<kVirtual, /*is_range=*/ false, /*is_quick=*/ true>();
   }
 
-  ALWAYS_INLINE void INVOKE_VIRTUAL_RANGE_QUICK() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoInvoke<kVirtual, true, do_access_check, /*is_mterp=*/ false,
-        /*is_quick=*/ true>(self, shadow_frame, inst, inst_data, ResultRegister());
-    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+  HANDLER_ATTRIBUTES bool INVOKE_VIRTUAL_RANGE_QUICK() {
+    return HandleInvoke<kVirtual, /*is_range=*/ true, /*is_quick=*/ true>();
   }
 
-  ALWAYS_INLINE void INVOKE_POLYMORPHIC() REQUIRES_SHARED(Locks::mutator_lock_) {
+  HANDLER_ATTRIBUTES bool INVOKE_POLYMORPHIC() {
     DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
     bool success = DoInvokePolymorphic</* is_range= */ false>(
         self, shadow_frame, inst, inst_data, ResultRegister());
-    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE_POLYMORPHIC(!success);
+    return PossiblyHandlePendingExceptionOnInvoke(!success);
   }
 
-  ALWAYS_INLINE void INVOKE_POLYMORPHIC_RANGE() REQUIRES_SHARED(Locks::mutator_lock_) {
+  HANDLER_ATTRIBUTES bool INVOKE_POLYMORPHIC_RANGE() {
     DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
     bool success = DoInvokePolymorphic</* is_range= */ true>(
         self, shadow_frame, inst, inst_data, ResultRegister());
-    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE_POLYMORPHIC(!success);
+    return PossiblyHandlePendingExceptionOnInvoke(!success);
   }
 
-  ALWAYS_INLINE void INVOKE_CUSTOM() REQUIRES_SHARED(Locks::mutator_lock_) {
+  HANDLER_ATTRIBUTES bool INVOKE_CUSTOM() {
     DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
     bool success = DoInvokeCustom</* is_range= */ false>(
         self, shadow_frame, inst, inst_data, ResultRegister());
-    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+    return PossiblyHandlePendingExceptionOnInvoke(!success);
   }
 
-  ALWAYS_INLINE void INVOKE_CUSTOM_RANGE() REQUIRES_SHARED(Locks::mutator_lock_) {
+  HANDLER_ATTRIBUTES bool INVOKE_CUSTOM_RANGE() {
     DCHECK(Runtime::Current()->IsMethodHandlesEnabled());
     bool success = DoInvokeCustom</* is_range= */ true>(
         self, shadow_frame, inst, inst_data, ResultRegister());
-    POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE(!success);
+    return PossiblyHandlePendingExceptionOnInvoke(!success);
   }
 
-  ALWAYS_INLINE void NEG_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(
-        inst->VRegA_12x(inst_data), -shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool NEG_INT() {
+    SetVReg(A(), -GetVReg(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void NOT_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(
-        inst->VRegA_12x(inst_data), ~shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool NOT_INT() {
+    SetVReg(A(), ~GetVReg(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void NEG_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegLong(
-        inst->VRegA_12x(inst_data), -shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool NEG_LONG() {
+    SetVRegLong(A(), -GetVRegLong(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void NOT_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegLong(
-        inst->VRegA_12x(inst_data), ~shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool NOT_LONG() {
+    SetVRegLong(A(), ~GetVRegLong(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void NEG_FLOAT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegFloat(
-        inst->VRegA_12x(inst_data), -shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool NEG_FLOAT() {
+    SetVRegFloat(A(), -GetVRegFloat(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void NEG_DOUBLE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegDouble(
-        inst->VRegA_12x(inst_data), -shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool NEG_DOUBLE() {
+    SetVRegDouble(A(), -GetVRegDouble(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void INT_TO_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data),
-                             shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool INT_TO_LONG() {
+    SetVRegLong(A(), GetVReg(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void INT_TO_FLOAT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data),
-                              shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool INT_TO_FLOAT() {
+    SetVRegFloat(A(), GetVReg(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void INT_TO_DOUBLE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data),
-                               shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool INT_TO_DOUBLE() {
+    SetVRegDouble(A(), GetVReg(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void LONG_TO_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_12x(inst_data),
-                         shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool LONG_TO_INT() {
+    SetVReg(A(), GetVRegLong(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void LONG_TO_FLOAT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data),
-                              shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool LONG_TO_FLOAT() {
+    SetVRegFloat(A(), GetVRegLong(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void LONG_TO_DOUBLE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data),
-                               shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool LONG_TO_DOUBLE() {
+    SetVRegDouble(A(), GetVRegLong(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void FLOAT_TO_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    float val = shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data));
+  HANDLER_ATTRIBUTES bool FLOAT_TO_INT() {
+    float val = GetVRegFloat(B());
     int32_t result = art_float_to_integral<int32_t, float>(val);
-    shadow_frame.SetVReg(inst->VRegA_12x(inst_data), result);
-    inst = inst->Next_1xx();
+    SetVReg(A(), result);
+    return true;
   }
 
-  ALWAYS_INLINE void FLOAT_TO_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
-    float val = shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data));
+  HANDLER_ATTRIBUTES bool FLOAT_TO_LONG() {
+    float val = GetVRegFloat(B());
     int64_t result = art_float_to_integral<int64_t, float>(val);
-    shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), result);
-    inst = inst->Next_1xx();
+    SetVRegLong(A(), result);
+    return true;
   }
 
-  ALWAYS_INLINE void FLOAT_TO_DOUBLE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegDouble(inst->VRegA_12x(inst_data),
-                               shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool FLOAT_TO_DOUBLE() {
+    SetVRegDouble(A(), GetVRegFloat(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void DOUBLE_TO_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    double val = shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data));
+  HANDLER_ATTRIBUTES bool DOUBLE_TO_INT() {
+    double val = GetVRegDouble(B());
     int32_t result = art_float_to_integral<int32_t, double>(val);
-    shadow_frame.SetVReg(inst->VRegA_12x(inst_data), result);
-    inst = inst->Next_1xx();
+    SetVReg(A(), result);
+    return true;
   }
 
-  ALWAYS_INLINE void DOUBLE_TO_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
-    double val = shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data));
+  HANDLER_ATTRIBUTES bool DOUBLE_TO_LONG() {
+    double val = GetVRegDouble(B());
     int64_t result = art_float_to_integral<int64_t, double>(val);
-    shadow_frame.SetVRegLong(inst->VRegA_12x(inst_data), result);
-    inst = inst->Next_1xx();
+    SetVRegLong(A(), result);
+    return true;
   }
 
-  ALWAYS_INLINE void DOUBLE_TO_FLOAT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegFloat(inst->VRegA_12x(inst_data),
-                              shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool DOUBLE_TO_FLOAT() {
+    SetVRegFloat(A(), GetVRegDouble(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void INT_TO_BYTE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_12x(inst_data), static_cast<int8_t>(
-        shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool INT_TO_BYTE() {
+    SetVReg(A(), static_cast<int8_t>(GetVReg(B())));
+    return true;
   }
 
-  ALWAYS_INLINE void INT_TO_CHAR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_12x(inst_data), static_cast<uint16_t>(
-        shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool INT_TO_CHAR() {
+    SetVReg(A(), static_cast<uint16_t>(GetVReg(B())));
+    return true;
   }
 
-  ALWAYS_INLINE void INT_TO_SHORT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_12x(inst_data), static_cast<int16_t>(
-        shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool INT_TO_SHORT() {
+    SetVReg(A(), static_cast<int16_t>(GetVReg(B())));
+    return true;
   }
 
-  ALWAYS_INLINE void ADD_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
-                         SafeAdd(shadow_frame.GetVReg(inst->VRegB_23x()),
-                                 shadow_frame.GetVReg(inst->VRegC_23x())));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool ADD_INT() {
+    SetVReg(A(), SafeAdd(GetVReg(B()), GetVReg(C())));
+    return true;
   }
 
-  ALWAYS_INLINE void SUB_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
-                         SafeSub(shadow_frame.GetVReg(inst->VRegB_23x()),
-                                 shadow_frame.GetVReg(inst->VRegC_23x())));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool SUB_INT() {
+    SetVReg(A(), SafeSub(GetVReg(B()), GetVReg(C())));
+    return true;
   }
 
-  ALWAYS_INLINE void MUL_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
-                         SafeMul(shadow_frame.GetVReg(inst->VRegB_23x()),
-                                 shadow_frame.GetVReg(inst->VRegC_23x())));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool MUL_INT() {
+    SetVReg(A(), SafeMul(GetVReg(B()), GetVReg(C())));
+    return true;
   }
 
-  ALWAYS_INLINE void DIV_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoIntDivide(shadow_frame, inst->VRegA_23x(inst_data),
-                               shadow_frame.GetVReg(inst->VRegB_23x()),
-                               shadow_frame.GetVReg(inst->VRegC_23x()));
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool DIV_INT() {
+    return DoIntDivide(shadow_frame, A(), GetVReg(B()), GetVReg(C()));
   }
 
-  ALWAYS_INLINE void REM_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoIntRemainder(shadow_frame, inst->VRegA_23x(inst_data),
-                                  shadow_frame.GetVReg(inst->VRegB_23x()),
-                                  shadow_frame.GetVReg(inst->VRegC_23x()));
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool REM_INT() {
+    return DoIntRemainder(shadow_frame, A(), GetVReg(B()), GetVReg(C()));
   }
 
-  ALWAYS_INLINE void SHL_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
-                         shadow_frame.GetVReg(inst->VRegB_23x()) <<
-                         (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool SHL_INT() {
+    SetVReg(A(), GetVReg(B()) << (GetVReg(C()) & 0x1f));
+    return true;
   }
 
-  ALWAYS_INLINE void SHR_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
-                         shadow_frame.GetVReg(inst->VRegB_23x()) >>
-                         (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool SHR_INT() {
+    SetVReg(A(), GetVReg(B()) >> (GetVReg(C()) & 0x1f));
+    return true;
   }
 
-  ALWAYS_INLINE void USHR_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
-                         static_cast<uint32_t>(shadow_frame.GetVReg(inst->VRegB_23x())) >>
-                         (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x1f));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool USHR_INT() {
+    SetVReg(A(), static_cast<uint32_t>(GetVReg(B())) >> (GetVReg(C()) & 0x1f));
+    return true;
   }
 
-  ALWAYS_INLINE void AND_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
-                         shadow_frame.GetVReg(inst->VRegB_23x()) &
-                         shadow_frame.GetVReg(inst->VRegC_23x()));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool AND_INT() {
+    SetVReg(A(), GetVReg(B()) & GetVReg(C()));
+    return true;
   }
 
-  ALWAYS_INLINE void OR_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
-                         shadow_frame.GetVReg(inst->VRegB_23x()) |
-                         shadow_frame.GetVReg(inst->VRegC_23x()));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool OR_INT() {
+    SetVReg(A(), GetVReg(B()) | GetVReg(C()));
+    return true;
   }
 
-  ALWAYS_INLINE void XOR_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_23x(inst_data),
-                         shadow_frame.GetVReg(inst->VRegB_23x()) ^
-                         shadow_frame.GetVReg(inst->VRegC_23x()));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool XOR_INT() {
+    SetVReg(A(), GetVReg(B()) ^ GetVReg(C()));
+    return true;
   }
 
-  ALWAYS_INLINE void ADD_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
-                             SafeAdd(shadow_frame.GetVRegLong(inst->VRegB_23x()),
-                                     shadow_frame.GetVRegLong(inst->VRegC_23x())));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool ADD_LONG() {
+    SetVRegLong(A(), SafeAdd(GetVRegLong(B()), GetVRegLong(C())));
+    return true;
   }
 
-  ALWAYS_INLINE void SUB_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
-                             SafeSub(shadow_frame.GetVRegLong(inst->VRegB_23x()),
-                                     shadow_frame.GetVRegLong(inst->VRegC_23x())));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool SUB_LONG() {
+    SetVRegLong(A(), SafeSub(GetVRegLong(B()), GetVRegLong(C())));
+    return true;
   }
 
-  ALWAYS_INLINE void MUL_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
-                             SafeMul(shadow_frame.GetVRegLong(inst->VRegB_23x()),
-                                     shadow_frame.GetVRegLong(inst->VRegC_23x())));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool MUL_LONG() {
+    SetVRegLong(A(), SafeMul(GetVRegLong(B()), GetVRegLong(C())));
+    return true;
   }
 
-  ALWAYS_INLINE void DIV_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
-    DoLongDivide(shadow_frame, inst->VRegA_23x(inst_data),
-                 shadow_frame.GetVRegLong(inst->VRegB_23x()),
-                 shadow_frame.GetVRegLong(inst->VRegC_23x()));
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_2xx);
+  HANDLER_ATTRIBUTES bool DIV_LONG() {
+    return DoLongDivide(shadow_frame, A(), GetVRegLong(B()), GetVRegLong(C()));
   }
 
-  ALWAYS_INLINE void REM_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
-    DoLongRemainder(shadow_frame, inst->VRegA_23x(inst_data),
-                    shadow_frame.GetVRegLong(inst->VRegB_23x()),
-                    shadow_frame.GetVRegLong(inst->VRegC_23x()));
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_2xx);
+  HANDLER_ATTRIBUTES bool REM_LONG() {
+    return DoLongRemainder(shadow_frame, A(), GetVRegLong(B()), GetVRegLong(C()));
   }
 
-  ALWAYS_INLINE void AND_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
-                             shadow_frame.GetVRegLong(inst->VRegB_23x()) &
-                             shadow_frame.GetVRegLong(inst->VRegC_23x()));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool AND_LONG() {
+    SetVRegLong(A(), GetVRegLong(B()) & GetVRegLong(C()));
+    return true;
   }
 
-  ALWAYS_INLINE void OR_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
-                             shadow_frame.GetVRegLong(inst->VRegB_23x()) |
-                             shadow_frame.GetVRegLong(inst->VRegC_23x()));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool OR_LONG() {
+    SetVRegLong(A(), GetVRegLong(B()) | GetVRegLong(C()));
+    return true;
   }
 
-  ALWAYS_INLINE void XOR_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
-                             shadow_frame.GetVRegLong(inst->VRegB_23x()) ^
-                             shadow_frame.GetVRegLong(inst->VRegC_23x()));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool XOR_LONG() {
+    SetVRegLong(A(), GetVRegLong(B()) ^ GetVRegLong(C()));
+    return true;
   }
 
-  ALWAYS_INLINE void SHL_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
-                             shadow_frame.GetVRegLong(inst->VRegB_23x()) <<
-                             (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool SHL_LONG() {
+    SetVRegLong(A(), GetVRegLong(B()) << (GetVReg(C()) & 0x3f));
+    return true;
   }
 
-  ALWAYS_INLINE void SHR_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
-                             shadow_frame.GetVRegLong(inst->VRegB_23x()) >>
-                             (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool SHR_LONG() {
+    SetVRegLong(A(), GetVRegLong(B()) >> (GetVReg(C()) & 0x3f));
+    return true;
   }
 
-  ALWAYS_INLINE void USHR_LONG() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data),
-                             static_cast<uint64_t>(shadow_frame.GetVRegLong(inst->VRegB_23x())) >>
-                             (shadow_frame.GetVReg(inst->VRegC_23x()) & 0x3f));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool USHR_LONG() {
+    SetVRegLong(A(), static_cast<uint64_t>(GetVRegLong(B())) >> (GetVReg(C()) & 0x3f));
+    return true;
   }
 
-  ALWAYS_INLINE void ADD_FLOAT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
-                              shadow_frame.GetVRegFloat(inst->VRegB_23x()) +
-                              shadow_frame.GetVRegFloat(inst->VRegC_23x()));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool ADD_FLOAT() {
+    SetVRegFloat(A(), GetVRegFloat(B()) + GetVRegFloat(C()));
+    return true;
   }
 
-  ALWAYS_INLINE void SUB_FLOAT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
-                              shadow_frame.GetVRegFloat(inst->VRegB_23x()) -
-                              shadow_frame.GetVRegFloat(inst->VRegC_23x()));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool SUB_FLOAT() {
+    SetVRegFloat(A(), GetVRegFloat(B()) - GetVRegFloat(C()));
+    return true;
   }
 
-  ALWAYS_INLINE void MUL_FLOAT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
-                              shadow_frame.GetVRegFloat(inst->VRegB_23x()) *
-                              shadow_frame.GetVRegFloat(inst->VRegC_23x()));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool MUL_FLOAT() {
+    SetVRegFloat(A(), GetVRegFloat(B()) * GetVRegFloat(C()));
+    return true;
   }
 
-  ALWAYS_INLINE void DIV_FLOAT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
-                              shadow_frame.GetVRegFloat(inst->VRegB_23x()) /
-                              shadow_frame.GetVRegFloat(inst->VRegC_23x()));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool DIV_FLOAT() {
+    SetVRegFloat(A(), GetVRegFloat(B()) / GetVRegFloat(C()));
+    return true;
   }
 
-  ALWAYS_INLINE void REM_FLOAT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegFloat(inst->VRegA_23x(inst_data),
-                              fmodf(shadow_frame.GetVRegFloat(inst->VRegB_23x()),
-                                    shadow_frame.GetVRegFloat(inst->VRegC_23x())));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool REM_FLOAT() {
+    SetVRegFloat(A(), fmodf(GetVRegFloat(B()), GetVRegFloat(C())));
+    return true;
   }
 
-  ALWAYS_INLINE void ADD_DOUBLE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
-                               shadow_frame.GetVRegDouble(inst->VRegB_23x()) +
-                               shadow_frame.GetVRegDouble(inst->VRegC_23x()));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool ADD_DOUBLE() {
+    SetVRegDouble(A(), GetVRegDouble(B()) + GetVRegDouble(C()));
+    return true;
   }
 
-  ALWAYS_INLINE void SUB_DOUBLE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
-                               shadow_frame.GetVRegDouble(inst->VRegB_23x()) -
-                               shadow_frame.GetVRegDouble(inst->VRegC_23x()));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool SUB_DOUBLE() {
+    SetVRegDouble(A(), GetVRegDouble(B()) - GetVRegDouble(C()));
+    return true;
   }
 
-  ALWAYS_INLINE void MUL_DOUBLE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
-                               shadow_frame.GetVRegDouble(inst->VRegB_23x()) *
-                               shadow_frame.GetVRegDouble(inst->VRegC_23x()));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool MUL_DOUBLE() {
+    SetVRegDouble(A(), GetVRegDouble(B()) * GetVRegDouble(C()));
+    return true;
   }
 
-  ALWAYS_INLINE void DIV_DOUBLE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
-                               shadow_frame.GetVRegDouble(inst->VRegB_23x()) /
-                               shadow_frame.GetVRegDouble(inst->VRegC_23x()));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool DIV_DOUBLE() {
+    SetVRegDouble(A(), GetVRegDouble(B()) / GetVRegDouble(C()));
+    return true;
   }
 
-  ALWAYS_INLINE void REM_DOUBLE() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVRegDouble(inst->VRegA_23x(inst_data),
-                               fmod(shadow_frame.GetVRegDouble(inst->VRegB_23x()),
-                                    shadow_frame.GetVRegDouble(inst->VRegC_23x())));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool REM_DOUBLE() {
+    SetVRegDouble(A(), fmod(GetVRegDouble(B()), GetVRegDouble(C())));
+    return true;
   }
 
-  ALWAYS_INLINE void ADD_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVReg(vregA, SafeAdd(shadow_frame.GetVReg(vregA),
-                                        shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool ADD_INT_2ADDR() {
+    uint4_t vregA = A();
+    SetVReg(vregA, SafeAdd(GetVReg(vregA), GetVReg(B())));
+    return true;
   }
 
-  ALWAYS_INLINE void SUB_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVReg(vregA,
-                         SafeSub(shadow_frame.GetVReg(vregA),
-                                 shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool SUB_INT_2ADDR() {
+    uint4_t vregA = A();
+    SetVReg(vregA, SafeSub(GetVReg(vregA), GetVReg(B())));
+    return true;
   }
 
-  ALWAYS_INLINE void MUL_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVReg(vregA,
-                         SafeMul(shadow_frame.GetVReg(vregA),
-                                 shadow_frame.GetVReg(inst->VRegB_12x(inst_data))));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool MUL_INT_2ADDR() {
+    uint4_t vregA = A();
+    SetVReg(vregA, SafeMul(GetVReg(vregA), GetVReg(B())));
+    return true;
   }
 
-  ALWAYS_INLINE void DIV_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    bool success = DoIntDivide(shadow_frame, vregA, shadow_frame.GetVReg(vregA),
-                               shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_1xx);
+  HANDLER_ATTRIBUTES bool DIV_INT_2ADDR() {
+    uint4_t vregA = A();
+    return DoIntDivide(shadow_frame, vregA, GetVReg(vregA), GetVReg(B()));
   }
 
-  ALWAYS_INLINE void REM_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    bool success = DoIntRemainder(shadow_frame, vregA, shadow_frame.GetVReg(vregA),
-                                  shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_1xx);
+  HANDLER_ATTRIBUTES bool REM_INT_2ADDR() {
+    uint4_t vregA = A();
+    return DoIntRemainder(shadow_frame, vregA, GetVReg(vregA), GetVReg(B()));
   }
 
-  ALWAYS_INLINE void SHL_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVReg(vregA,
-                         shadow_frame.GetVReg(vregA) <<
-                         (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x1f));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool SHL_INT_2ADDR() {
+    uint4_t vregA = A();
+    SetVReg(vregA, GetVReg(vregA) << (GetVReg(B()) & 0x1f));
+    return true;
   }
 
-  ALWAYS_INLINE void SHR_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVReg(vregA,
-                         shadow_frame.GetVReg(vregA) >>
-                         (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x1f));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool SHR_INT_2ADDR() {
+    uint4_t vregA = A();
+    SetVReg(vregA, GetVReg(vregA) >> (GetVReg(B()) & 0x1f));
+    return true;
   }
 
-  ALWAYS_INLINE void USHR_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVReg(vregA,
-                         static_cast<uint32_t>(shadow_frame.GetVReg(vregA)) >>
-                         (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x1f));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool USHR_INT_2ADDR() {
+    uint4_t vregA = A();
+    SetVReg(vregA, static_cast<uint32_t>(GetVReg(vregA)) >> (GetVReg(B()) & 0x1f));
+    return true;
   }
 
-  ALWAYS_INLINE void AND_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVReg(vregA,
-                         shadow_frame.GetVReg(vregA) &
-                         shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool AND_INT_2ADDR() {
+    uint4_t vregA = A();
+    SetVReg(vregA, GetVReg(vregA) & GetVReg(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void OR_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVReg(vregA,
-                         shadow_frame.GetVReg(vregA) |
-                         shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool OR_INT_2ADDR() {
+    uint4_t vregA = A();
+    SetVReg(vregA, GetVReg(vregA) | GetVReg(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void XOR_INT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVReg(vregA,
-                         shadow_frame.GetVReg(vregA) ^
-                         shadow_frame.GetVReg(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool XOR_INT_2ADDR() {
+    uint4_t vregA = A();
+    SetVReg(vregA, GetVReg(vregA) ^ GetVReg(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void ADD_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVRegLong(vregA,
-                             SafeAdd(shadow_frame.GetVRegLong(vregA),
-                                     shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data))));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool ADD_LONG_2ADDR() {
+    uint4_t vregA = A();
+    SetVRegLong(vregA, SafeAdd(GetVRegLong(vregA), GetVRegLong(B())));
+    return true;
   }
 
-  ALWAYS_INLINE void SUB_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVRegLong(vregA,
-                             SafeSub(shadow_frame.GetVRegLong(vregA),
-                                     shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data))));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool SUB_LONG_2ADDR() {
+    uint4_t vregA = A();
+    SetVRegLong(vregA, SafeSub(GetVRegLong(vregA), GetVRegLong(B())));
+    return true;
   }
 
-  ALWAYS_INLINE void MUL_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVRegLong(vregA,
-                             SafeMul(shadow_frame.GetVRegLong(vregA),
-                                     shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data))));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool MUL_LONG_2ADDR() {
+    uint4_t vregA = A();
+    SetVRegLong(vregA, SafeMul(GetVRegLong(vregA), GetVRegLong(B())));
+    return true;
   }
 
-  ALWAYS_INLINE void DIV_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    DoLongDivide(shadow_frame, vregA, shadow_frame.GetVRegLong(vregA),
-                shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
+  HANDLER_ATTRIBUTES bool DIV_LONG_2ADDR() {
+    uint4_t vregA = A();
+    return DoLongDivide(shadow_frame, vregA, GetVRegLong(vregA), GetVRegLong(B()));
   }
 
-  ALWAYS_INLINE void REM_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    DoLongRemainder(shadow_frame, vregA, shadow_frame.GetVRegLong(vregA),
-                    shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
+  HANDLER_ATTRIBUTES bool REM_LONG_2ADDR() {
+    uint4_t vregA = A();
+    return DoLongRemainder(shadow_frame, vregA, GetVRegLong(vregA), GetVRegLong(B()));
   }
 
-  ALWAYS_INLINE void AND_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVRegLong(vregA,
-                             shadow_frame.GetVRegLong(vregA) &
-                             shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool AND_LONG_2ADDR() {
+    uint4_t vregA = A();
+    SetVRegLong(vregA, GetVRegLong(vregA) & GetVRegLong(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void OR_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVRegLong(vregA,
-                             shadow_frame.GetVRegLong(vregA) |
-                             shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool OR_LONG_2ADDR() {
+    uint4_t vregA = A();
+    SetVRegLong(vregA, GetVRegLong(vregA) | GetVRegLong(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void XOR_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVRegLong(vregA,
-                             shadow_frame.GetVRegLong(vregA) ^
-                             shadow_frame.GetVRegLong(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool XOR_LONG_2ADDR() {
+    uint4_t vregA = A();
+    SetVRegLong(vregA, GetVRegLong(vregA) ^ GetVRegLong(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void SHL_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVRegLong(vregA,
-                             shadow_frame.GetVRegLong(vregA) <<
-                             (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x3f));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool SHL_LONG_2ADDR() {
+    uint4_t vregA = A();
+    SetVRegLong(vregA, GetVRegLong(vregA) << (GetVReg(B()) & 0x3f));
+    return true;
   }
 
-  ALWAYS_INLINE void SHR_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVRegLong(vregA,
-                             shadow_frame.GetVRegLong(vregA) >>
-                             (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x3f));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool SHR_LONG_2ADDR() {
+    uint4_t vregA = A();
+    SetVRegLong(vregA, GetVRegLong(vregA) >> (GetVReg(B()) & 0x3f));
+    return true;
   }
 
-  ALWAYS_INLINE void USHR_LONG_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVRegLong(vregA,
-                             static_cast<uint64_t>(shadow_frame.GetVRegLong(vregA)) >>
-                             (shadow_frame.GetVReg(inst->VRegB_12x(inst_data)) & 0x3f));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool USHR_LONG_2ADDR() {
+    uint4_t vregA = A();
+    SetVRegLong(vregA, static_cast<uint64_t>(GetVRegLong(vregA)) >> (GetVReg(B()) & 0x3f));
+    return true;
   }
 
-  ALWAYS_INLINE void ADD_FLOAT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVRegFloat(vregA,
-                              shadow_frame.GetVRegFloat(vregA) +
-                              shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool ADD_FLOAT_2ADDR() {
+    uint4_t vregA = A();
+    SetVRegFloat(vregA, GetVRegFloat(vregA) + GetVRegFloat(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void SUB_FLOAT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVRegFloat(vregA,
-                              shadow_frame.GetVRegFloat(vregA) -
-                              shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool SUB_FLOAT_2ADDR() {
+    uint4_t vregA = A();
+    SetVRegFloat(vregA, GetVRegFloat(vregA) - GetVRegFloat(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void MUL_FLOAT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVRegFloat(vregA,
-                              shadow_frame.GetVRegFloat(vregA) *
-                              shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool MUL_FLOAT_2ADDR() {
+    uint4_t vregA = A();
+    SetVRegFloat(vregA, GetVRegFloat(vregA) * GetVRegFloat(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void DIV_FLOAT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVRegFloat(vregA,
-                              shadow_frame.GetVRegFloat(vregA) /
-                              shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool DIV_FLOAT_2ADDR() {
+    uint4_t vregA = A();
+    SetVRegFloat(vregA, GetVRegFloat(vregA) / GetVRegFloat(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void REM_FLOAT_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVRegFloat(vregA,
-                              fmodf(shadow_frame.GetVRegFloat(vregA),
-                                    shadow_frame.GetVRegFloat(inst->VRegB_12x(inst_data))));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool REM_FLOAT_2ADDR() {
+    uint4_t vregA = A();
+    SetVRegFloat(vregA, fmodf(GetVRegFloat(vregA), GetVRegFloat(B())));
+    return true;
   }
 
-  ALWAYS_INLINE void ADD_DOUBLE_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVRegDouble(vregA,
-                               shadow_frame.GetVRegDouble(vregA) +
-                               shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool ADD_DOUBLE_2ADDR() {
+    uint4_t vregA = A();
+    SetVRegDouble(vregA, GetVRegDouble(vregA) + GetVRegDouble(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void SUB_DOUBLE_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVRegDouble(vregA,
-                               shadow_frame.GetVRegDouble(vregA) -
-                               shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool SUB_DOUBLE_2ADDR() {
+    uint4_t vregA = A();
+    SetVRegDouble(vregA, GetVRegDouble(vregA) - GetVRegDouble(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void MUL_DOUBLE_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVRegDouble(vregA,
-                               shadow_frame.GetVRegDouble(vregA) *
-                               shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool MUL_DOUBLE_2ADDR() {
+    uint4_t vregA = A();
+    SetVRegDouble(vregA, GetVRegDouble(vregA) * GetVRegDouble(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void DIV_DOUBLE_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVRegDouble(vregA,
-                               shadow_frame.GetVRegDouble(vregA) /
-                               shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data)));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool DIV_DOUBLE_2ADDR() {
+    uint4_t vregA = A();
+    SetVRegDouble(vregA, GetVRegDouble(vregA) / GetVRegDouble(B()));
+    return true;
   }
 
-  ALWAYS_INLINE void REM_DOUBLE_2ADDR() REQUIRES_SHARED(Locks::mutator_lock_) {
-    uint4_t vregA = inst->VRegA_12x(inst_data);
-    shadow_frame.SetVRegDouble(vregA,
-                               fmod(shadow_frame.GetVRegDouble(vregA),
-                                    shadow_frame.GetVRegDouble(inst->VRegB_12x(inst_data))));
-    inst = inst->Next_1xx();
+  HANDLER_ATTRIBUTES bool REM_DOUBLE_2ADDR() {
+    uint4_t vregA = A();
+    SetVRegDouble(vregA, fmod(GetVRegDouble(vregA), GetVRegDouble(B())));
+    return true;
   }
 
-  ALWAYS_INLINE void ADD_INT_LIT16() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
-                         SafeAdd(shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
-                                 inst->VRegC_22s()));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool ADD_INT_LIT16() {
+    SetVReg(A(), SafeAdd(GetVReg(B()), C()));
+    return true;
   }
 
-  ALWAYS_INLINE void RSUB_INT() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
-                         SafeSub(inst->VRegC_22s(),
-                                 shadow_frame.GetVReg(inst->VRegB_22s(inst_data))));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool RSUB_INT() {
+    SetVReg(A(), SafeSub(C(), GetVReg(B())));
+    return true;
   }
 
-  ALWAYS_INLINE void MUL_INT_LIT16() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
-                         SafeMul(shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
-                                 inst->VRegC_22s()));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool MUL_INT_LIT16() {
+    SetVReg(A(), SafeMul(GetVReg(B()), C()));
+    return true;
   }
 
-  ALWAYS_INLINE void DIV_INT_LIT16() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoIntDivide(shadow_frame, inst->VRegA_22s(inst_data),
-                               shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
-                               inst->VRegC_22s());
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool DIV_INT_LIT16() {
+    return DoIntDivide(shadow_frame, A(), GetVReg(B()), C());
   }
 
-  ALWAYS_INLINE void REM_INT_LIT16() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoIntRemainder(shadow_frame, inst->VRegA_22s(inst_data),
-                                  shadow_frame.GetVReg(inst->VRegB_22s(inst_data)),
-                                  inst->VRegC_22s());
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool REM_INT_LIT16() {
+    return DoIntRemainder(shadow_frame, A(), GetVReg(B()), C());
   }
 
-  ALWAYS_INLINE void AND_INT_LIT16() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
-                         shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) &
-                         inst->VRegC_22s());
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool AND_INT_LIT16() {
+    SetVReg(A(), GetVReg(B()) & C());
+    return true;
   }
 
-  ALWAYS_INLINE void OR_INT_LIT16() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
-                         shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) |
-                         inst->VRegC_22s());
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool OR_INT_LIT16() {
+    SetVReg(A(), GetVReg(B()) | C());
+    return true;
   }
 
-  ALWAYS_INLINE void XOR_INT_LIT16() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_22s(inst_data),
-                         shadow_frame.GetVReg(inst->VRegB_22s(inst_data)) ^
-                         inst->VRegC_22s());
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool XOR_INT_LIT16() {
+    SetVReg(A(), GetVReg(B()) ^ C());
+    return true;
   }
 
-  ALWAYS_INLINE void ADD_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
-                         SafeAdd(shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b()));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool ADD_INT_LIT8() {
+    SetVReg(A(), SafeAdd(GetVReg(B()), C()));
+    return true;
   }
 
-  ALWAYS_INLINE void RSUB_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
-                         SafeSub(inst->VRegC_22b(), shadow_frame.GetVReg(inst->VRegB_22b())));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool RSUB_INT_LIT8() {
+    SetVReg(A(), SafeSub(C(), GetVReg(B())));
+    return true;
   }
 
-  ALWAYS_INLINE void MUL_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
-                         SafeMul(shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b()));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool MUL_INT_LIT8() {
+    SetVReg(A(), SafeMul(GetVReg(B()), C()));
+    return true;
   }
 
-  ALWAYS_INLINE void DIV_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoIntDivide(shadow_frame, inst->VRegA_22b(inst_data),
-                               shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b());
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool DIV_INT_LIT8() {
+    return DoIntDivide(shadow_frame, A(), GetVReg(B()), C());
   }
 
-  ALWAYS_INLINE void REM_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
-    bool success = DoIntRemainder(shadow_frame, inst->VRegA_22b(inst_data),
-                                  shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b());
-    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+  HANDLER_ATTRIBUTES bool REM_INT_LIT8() {
+    return DoIntRemainder(shadow_frame, A(), GetVReg(B()), C());
   }
 
-  ALWAYS_INLINE void AND_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
-                         shadow_frame.GetVReg(inst->VRegB_22b()) &
-                         inst->VRegC_22b());
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool AND_INT_LIT8() {
+    SetVReg(A(), GetVReg(B()) & C());
+    return true;
   }
 
-  ALWAYS_INLINE void OR_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
-                         shadow_frame.GetVReg(inst->VRegB_22b()) |
-                         inst->VRegC_22b());
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool OR_INT_LIT8() {
+    SetVReg(A(), GetVReg(B()) | C());
+    return true;
   }
 
-  ALWAYS_INLINE void XOR_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
-                         shadow_frame.GetVReg(inst->VRegB_22b()) ^
-                         inst->VRegC_22b());
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool XOR_INT_LIT8() {
+    SetVReg(A(), GetVReg(B()) ^ C());
+    return true;
   }
 
-  ALWAYS_INLINE void SHL_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
-                         shadow_frame.GetVReg(inst->VRegB_22b()) <<
-                         (inst->VRegC_22b() & 0x1f));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool SHL_INT_LIT8() {
+    SetVReg(A(), GetVReg(B()) << (C() & 0x1f));
+    return true;
   }
 
-  ALWAYS_INLINE void SHR_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
-                         shadow_frame.GetVReg(inst->VRegB_22b()) >>
-                         (inst->VRegC_22b() & 0x1f));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool SHR_INT_LIT8() {
+    SetVReg(A(), GetVReg(B()) >> (C() & 0x1f));
+    return true;
   }
 
-  ALWAYS_INLINE void USHR_INT_LIT8() REQUIRES_SHARED(Locks::mutator_lock_) {
-    shadow_frame.SetVReg(inst->VRegA_22b(inst_data),
-                         static_cast<uint32_t>(shadow_frame.GetVReg(inst->VRegB_22b())) >>
-                         (inst->VRegC_22b() & 0x1f));
-    inst = inst->Next_2xx();
+  HANDLER_ATTRIBUTES bool USHR_INT_LIT8() {
+    SetVReg(A(), static_cast<uint32_t>(GetVReg(B())) >> (C() & 0x1f));
+    return true;
   }
 
-  ALWAYS_INLINE void UNUSED_3E() REQUIRES_SHARED(Locks::mutator_lock_) {
-    UnexpectedOpcode(inst, shadow_frame);
+  HANDLER_ATTRIBUTES bool UNUSED_3E() {
+    return HandleUnused();
   }
 
-  ALWAYS_INLINE void UNUSED_3F() REQUIRES_SHARED(Locks::mutator_lock_) {
-    UnexpectedOpcode(inst, shadow_frame);
+  HANDLER_ATTRIBUTES bool UNUSED_3F() {
+    return HandleUnused();
   }
 
-  ALWAYS_INLINE void UNUSED_40() REQUIRES_SHARED(Locks::mutator_lock_) {
-    UnexpectedOpcode(inst, shadow_frame);
+  HANDLER_ATTRIBUTES bool UNUSED_40() {
+    return HandleUnused();
   }
 
-  ALWAYS_INLINE void UNUSED_41() REQUIRES_SHARED(Locks::mutator_lock_) {
-    UnexpectedOpcode(inst, shadow_frame);
+  HANDLER_ATTRIBUTES bool UNUSED_41() {
+    return HandleUnused();
   }
 
-  ALWAYS_INLINE void UNUSED_42() REQUIRES_SHARED(Locks::mutator_lock_) {
-    UnexpectedOpcode(inst, shadow_frame);
+  HANDLER_ATTRIBUTES bool UNUSED_42() {
+    return HandleUnused();
   }
 
-  ALWAYS_INLINE void UNUSED_43() REQUIRES_SHARED(Locks::mutator_lock_) {
-    UnexpectedOpcode(inst, shadow_frame);
+  HANDLER_ATTRIBUTES bool UNUSED_43() {
+    return HandleUnused();
   }
 
-  ALWAYS_INLINE void UNUSED_79() REQUIRES_SHARED(Locks::mutator_lock_) {
-    UnexpectedOpcode(inst, shadow_frame);
+  HANDLER_ATTRIBUTES bool UNUSED_79() {
+    return HandleUnused();
   }
 
-  ALWAYS_INLINE void UNUSED_7A() REQUIRES_SHARED(Locks::mutator_lock_) {
-    UnexpectedOpcode(inst, shadow_frame);
+  HANDLER_ATTRIBUTES bool UNUSED_7A() {
+    return HandleUnused();
   }
 
-  ALWAYS_INLINE void UNUSED_F3() REQUIRES_SHARED(Locks::mutator_lock_) {
-    UnexpectedOpcode(inst, shadow_frame);
+  HANDLER_ATTRIBUTES bool UNUSED_F3() {
+    return HandleUnused();
   }
 
-  ALWAYS_INLINE void UNUSED_F4() REQUIRES_SHARED(Locks::mutator_lock_) {
-    UnexpectedOpcode(inst, shadow_frame);
+  HANDLER_ATTRIBUTES bool UNUSED_F4() {
+    return HandleUnused();
   }
 
-  ALWAYS_INLINE void UNUSED_F5() REQUIRES_SHARED(Locks::mutator_lock_) {
-    UnexpectedOpcode(inst, shadow_frame);
+  HANDLER_ATTRIBUTES bool UNUSED_F5() {
+    return HandleUnused();
   }
 
-  ALWAYS_INLINE void UNUSED_F6() REQUIRES_SHARED(Locks::mutator_lock_) {
-    UnexpectedOpcode(inst, shadow_frame);
+  HANDLER_ATTRIBUTES bool UNUSED_F6() {
+    return HandleUnused();
   }
 
-  ALWAYS_INLINE void UNUSED_F7() REQUIRES_SHARED(Locks::mutator_lock_) {
-    UnexpectedOpcode(inst, shadow_frame);
+  HANDLER_ATTRIBUTES bool UNUSED_F7() {
+    return HandleUnused();
   }
 
-  ALWAYS_INLINE void UNUSED_F8() REQUIRES_SHARED(Locks::mutator_lock_) {
-    UnexpectedOpcode(inst, shadow_frame);
+  HANDLER_ATTRIBUTES bool UNUSED_F8() {
+    return HandleUnused();
   }
 
-  ALWAYS_INLINE void UNUSED_F9() REQUIRES_SHARED(Locks::mutator_lock_) {
-    UnexpectedOpcode(inst, shadow_frame);
+  HANDLER_ATTRIBUTES bool UNUSED_F9() {
+    return HandleUnused();
   }
 
   ALWAYS_INLINE InstructionHandler(SwitchImplContext* ctx,
@@ -2582,8 +1845,9 @@
                                    Thread* self,
                                    ShadowFrame& shadow_frame,
                                    uint16_t dex_pc,
-                                   const Instruction*& inst,
+                                   const Instruction* inst,
                                    uint16_t inst_data,
+                                   const Instruction*& next,
                                    bool& exit_interpreter_loop)
     : ctx(ctx),
       instrumentation(instrumentation),
@@ -2592,100 +1856,152 @@
       dex_pc(dex_pc),
       inst(inst),
       inst_data(inst_data),
+      next(next),
       exit_interpreter_loop(exit_interpreter_loop) {
   }
 
  private:
   static constexpr bool do_assignability_check = do_access_check;
+  static constexpr MonitorState kMonitorState =
+      do_assignability_check ? MonitorState::kCountingMonitors : MonitorState::kNormalMonitors;
 
   const CodeItemDataAccessor& Accessor() { return ctx->accessor; }
   const uint16_t* Insns() { return ctx->accessor.Insns(); }
   JValue* ResultRegister() { return &ctx->result_register; }
 
+  ALWAYS_INLINE int32_t A() { return inst->VRegA(kFormat, inst_data); }
+  ALWAYS_INLINE int32_t B() { return inst->VRegB(kFormat, inst_data); }
+  ALWAYS_INLINE int32_t C() { return inst->VRegC(kFormat); }
+
+  int32_t GetVReg(size_t i) const { return shadow_frame.GetVReg(i); }
+  int64_t GetVRegLong(size_t i) const { return shadow_frame.GetVRegLong(i); }
+  float GetVRegFloat(size_t i) const { return shadow_frame.GetVRegFloat(i); }
+  double GetVRegDouble(size_t i) const { return shadow_frame.GetVRegDouble(i); }
+  ObjPtr<mirror::Object> GetVRegReference(size_t i) const REQUIRES_SHARED(Locks::mutator_lock_) {
+    return shadow_frame.GetVRegReference(i);
+  }
+
+  void SetVReg(size_t i, int32_t val) { shadow_frame.SetVReg(i, val); }
+  void SetVRegLong(size_t i, int64_t val) { shadow_frame.SetVRegLong(i, val); }
+  void SetVRegFloat(size_t i, float val) { shadow_frame.SetVRegFloat(i, val); }
+  void SetVRegDouble(size_t i, double val) { shadow_frame.SetVRegDouble(i, val); }
+  void SetVRegReference(size_t i, ObjPtr<mirror::Object> val)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    shadow_frame.SetVRegReference(i, val);
+  }
+
+  // Set the next instruction to be executed.  It is the 'fall-through' instruction by default.
+  ALWAYS_INLINE void SetNextInstruction(const Instruction* next_inst) {
+    DCHECK_LT(next_inst->GetDexPc(Insns()), Accessor().InsnsSizeInCodeUnits());
+    next = next_inst;
+  }
+
   SwitchImplContext* const ctx;
   const instrumentation::Instrumentation* const instrumentation;
   Thread* const self;
   ShadowFrame& shadow_frame;
   uint32_t const dex_pc;
-  const Instruction*& inst;
+  const Instruction* const inst;
   uint16_t const inst_data;
+  const Instruction*& next;
+
   bool& exit_interpreter_loop;
 };
 
-#undef BRANCH_INSTRUMENTATION
-#undef POSSIBLY_HANDLE_PENDING_EXCEPTION
-#undef POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE
-#undef POSSIBLY_HANDLE_PENDING_EXCEPTION_ON_INVOKE_POLYMORPHIC
-#undef HANDLE_PENDING_EXCEPTION
+// Don't inline in ASAN. It would create massive stack frame.
+#if defined(ADDRESS_SANITIZER) || defined(HWADDRESS_SANITIZER)
+#define ASAN_NO_INLINE NO_INLINE
+#else
+#define ASAN_NO_INLINE ALWAYS_INLINE
+#endif
 
-// TODO On ASAN builds this function gets a huge stack frame. Since normally we run in the mterp
-// this shouldn't cause any problems for stack overflow detection. Remove this once b/117341496 is
-// fixed.
+#define OPCODE_CASE(OPCODE, OPCODE_NAME, NAME, FORMAT, i, a, e, v)                                \
+template<bool do_access_check, bool transaction_active>                                           \
+ASAN_NO_INLINE static bool OP_##OPCODE_NAME(                                                      \
+    SwitchImplContext* ctx,                                                                       \
+    const instrumentation::Instrumentation* instrumentation,                                      \
+    Thread* self,                                                                                 \
+    ShadowFrame& shadow_frame,                                                                    \
+    uint16_t dex_pc,                                                                              \
+    const Instruction* inst,                                                                      \
+    uint16_t inst_data,                                                                           \
+    const Instruction*& next,                                                                     \
+    bool& exit) REQUIRES_SHARED(Locks::mutator_lock_) {                                           \
+  InstructionHandler<do_access_check, transaction_active, Instruction::FORMAT> handler(           \
+      ctx, instrumentation, self, shadow_frame, dex_pc, inst, inst_data, next, exit);             \
+  return LIKELY(handler.OPCODE_NAME());                                                           \
+}
+DEX_INSTRUCTION_LIST(OPCODE_CASE)
+#undef OPCODE_CASE
+
 template<bool do_access_check, bool transaction_active>
-ATTRIBUTE_NO_SANITIZE_ADDRESS void ExecuteSwitchImplCpp(SwitchImplContext* ctx) {
+void ExecuteSwitchImplCpp(SwitchImplContext* ctx) {
   Thread* self = ctx->self;
   const CodeItemDataAccessor& accessor = ctx->accessor;
   ShadowFrame& shadow_frame = ctx->shadow_frame;
-  if (UNLIKELY(!shadow_frame.HasReferenceArray())) {
-    LOG(FATAL) << "Invalid shadow frame for interpreter use";
-    ctx->result = JValue();
-    return;
-  }
   self->VerifyStack();
 
   uint32_t dex_pc = shadow_frame.GetDexPC();
   const auto* const instrumentation = Runtime::Current()->GetInstrumentation();
   const uint16_t* const insns = accessor.Insns();
-  const Instruction* inst = Instruction::At(insns + dex_pc);
-  uint16_t inst_data;
+  const Instruction* next = Instruction::At(insns + dex_pc);
 
   DCHECK(!shadow_frame.GetForceRetryInstruction())
       << "Entered interpreter from invoke without retry instruction being handled!";
 
   bool const interpret_one_instruction = ctx->interpret_one_instruction;
   while (true) {
+    const Instruction* const inst = next;
     dex_pc = inst->GetDexPc(insns);
     shadow_frame.SetDexPC(dex_pc);
     TraceExecution(shadow_frame, inst, dex_pc);
-    inst_data = inst->Fetch16(0);
-    {
-      bool exit_loop = false;
-      InstructionHandler<do_access_check, transaction_active> handler(
-          ctx, instrumentation, self, shadow_frame, dex_pc, inst, inst_data, exit_loop);
-      if (!handler.Preamble()) {
-        if (UNLIKELY(exit_loop)) {
-          return;
+    uint16_t inst_data = inst->Fetch16(0);
+    bool exit = false;
+    if (InstructionHandler<do_access_check, transaction_active, Instruction::kInvalidFormat>(
+            ctx, instrumentation, self, shadow_frame, dex_pc, inst, inst_data, next, exit).
+            Preamble()) {
+      switch (inst->Opcode(inst_data)) {
+#define OPCODE_CASE(OPCODE, OPCODE_NAME, NAME, FORMAT, i, a, e, v)                                \
+        case OPCODE: {                                                                            \
+          DCHECK_EQ(self->IsExceptionPending(), (OPCODE == Instruction::MOVE_EXCEPTION));         \
+          next = inst->RelativeAt(Instruction::SizeInCodeUnits(Instruction::FORMAT));             \
+          bool success = OP_##OPCODE_NAME<do_access_check, transaction_active>(                   \
+              ctx, instrumentation, self, shadow_frame, dex_pc, inst, inst_data, next, exit);     \
+          if (success && LIKELY(!interpret_one_instruction)) {                                    \
+            DCHECK(!exit) << NAME;                                                                \
+            continue;                                                                             \
+          }                                                                                       \
+          if (exit) {                                                                             \
+            shadow_frame.SetDexPC(dex::kDexNoIndex);                                              \
+            return;                                                                               \
+          }                                                                                       \
+          break;                                                                                  \
         }
-        if (UNLIKELY(interpret_one_instruction)) {
-          break;
-        }
-        continue;
-      }
-    }
-    switch (inst->Opcode(inst_data)) {
-#define OPCODE_CASE(OPCODE, OPCODE_NAME, pname, f, i, a, e, v)                                    \
-      case OPCODE: {                                                                              \
-        bool exit_loop = false;                                                                   \
-        InstructionHandler<do_access_check, transaction_active> handler(                          \
-            ctx, instrumentation, self, shadow_frame, dex_pc, inst, inst_data, exit_loop);        \
-        handler.OPCODE_NAME();                                                                    \
-        /* TODO: Advance 'inst' here, instead of explicitly in each handler */                    \
-        if (UNLIKELY(exit_loop)) {                                                                \
-          return;                                                                                 \
-        }                                                                                         \
-        break;                                                                                    \
-      }
-DEX_INSTRUCTION_LIST(OPCODE_CASE)
+  DEX_INSTRUCTION_LIST(OPCODE_CASE)
 #undef OPCODE_CASE
+      }
+    } else {
+      // Preamble returned false due to debugger event.
+      if (exit) {
+        shadow_frame.SetDexPC(dex::kDexNoIndex);
+        return;  // Return statement or debugger forced exit.
+      }
     }
-    if (UNLIKELY(interpret_one_instruction)) {
-      break;
+    if (self->IsExceptionPending()) {
+      if (!InstructionHandler<do_access_check, transaction_active, Instruction::kInvalidFormat>(
+              ctx, instrumentation, self, shadow_frame, dex_pc, inst, inst_data, next, exit).
+              HandlePendingException()) {
+        shadow_frame.SetDexPC(dex::kDexNoIndex);
+        return;  // Locally unhandled exception - return to caller.
+      }
+      // Continue execution in the catch block.
+    }
+    if (interpret_one_instruction) {
+      shadow_frame.SetDexPC(next->GetDexPc(insns));  // Record where we stopped.
+      ctx->result = ctx->result_register;
+      return;
     }
   }
-  // Record where we stopped.
-  shadow_frame.SetDexPC(inst->GetDexPc(insns));
-  ctx->result = ctx->result_register;
-  return;
 }  // NOLINT(readability/fn_size)
 
 }  // namespace interpreter
diff --git a/runtime/interpreter/mterp/arm64/array.S b/runtime/interpreter/mterp/arm64/array.S
index a023d22..628f832 100644
--- a/runtime/interpreter/mterp/arm64/array.S
+++ b/runtime/interpreter/mterp/arm64/array.S
@@ -75,7 +75,7 @@
     GET_VREG w1, w3                     // w1<- vCC (requested index)
     cbz     w0, common_errNullObject        // yes, bail
     ldr     w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]    // w3<- arrayObj->length
-    add     x0, x0, w1, lsl #3          // w0<- arrayObj + index*width
+    add     x0, x0, w1, uxtw #3         // w0<- arrayObj + index*width
     cmp     w1, w3                      // compare unsigned index, length
     bcs     common_errArrayIndex        // index >= length, bail
     FETCH_ADVANCE_INST 2                // advance rPC, load wINST
@@ -104,7 +104,7 @@
     GET_VREG w1, w3                     // w1<- vCC (requested index)
     cbz     w0, common_errNullObject    // bail if null
     ldr     w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]     // w3<- arrayObj->length
-    add     x0, x0, w1, lsl #$shift     // w0<- arrayObj + index*width
+    add     x0, x0, w1, uxtw #$shift    // w0<- arrayObj + index*width
     cmp     w1, w3                      // compare unsigned index, length
     bcs     common_errArrayIndex        // index >= length, bail
     FETCH_ADVANCE_INST 2                // advance rPC, load rINST
@@ -154,7 +154,7 @@
     GET_VREG w1, w3                     // w1<- vCC (requested index)
     cbz     w0, common_errNullObject    // bail if null
     ldr     w3, [x0, #MIRROR_ARRAY_LENGTH_OFFSET]    // w3<- arrayObj->length
-    add     x0, x0, w1, lsl #3          // w0<- arrayObj + index*width
+    add     x0, x0, w1, uxtw #3         // w0<- arrayObj + index*width
     cmp     w1, w3                      // compare unsigned index, length
     bcs     common_errArrayIndex        // index >= length, bail
     GET_VREG_WIDE x1, w4
diff --git a/runtime/interpreter/mterp/arm64/main.S b/runtime/interpreter/mterp/arm64/main.S
index aefec61..fd745f1 100644
--- a/runtime/interpreter/mterp/arm64/main.S
+++ b/runtime/interpreter/mterp/arm64/main.S
@@ -268,23 +268,23 @@
  * Get/set the 64-bit value from a Dalvik register.
  */
 .macro GET_VREG_WIDE reg, vreg
-    add     ip2, xFP, \vreg, lsl #2
+    add     ip2, xFP, \vreg, uxtw #2
     ldr     \reg, [ip2]
 .endm
 .macro SET_VREG_WIDE reg, vreg
-    add     ip2, xFP, \vreg, lsl #2
+    add     ip2, xFP, \vreg, uxtw #2
     str     \reg, [ip2]
-    add     ip2, xREFS, \vreg, lsl #2
+    add     ip2, xREFS, \vreg, uxtw #2
     str     xzr, [ip2]
 .endm
 .macro GET_VREG_DOUBLE reg, vreg
-    add     ip2, xFP, \vreg, lsl #2
+    add     ip2, xFP, \vreg, uxtw #2
     ldr     \reg, [ip2]
 .endm
 .macro SET_VREG_DOUBLE reg, vreg
-    add     ip2, xFP, \vreg, lsl #2
+    add     ip2, xFP, \vreg, uxtw #2
     str     \reg, [ip2]
-    add     ip2, xREFS, \vreg, lsl #2
+    add     ip2, xREFS, \vreg, uxtw #2
     str     xzr, [ip2]
 .endm
 
@@ -300,7 +300,7 @@
  * Convert a virtual register index into an address.
  */
 .macro VREG_INDEX_TO_ADDR reg, vreg
-    add     \reg, xFP, \vreg, lsl #2   /* WARNING: handle shadow frame vreg zero if store */
+    add     \reg, xFP, \vreg, uxtw #2   /* WARNING: handle shadow frame vreg zero if store */
 .endm
 
 /*
@@ -418,9 +418,9 @@
     mov     xSELF, x0
     ldr     w0, [x2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
     add     xFP, x2, #SHADOWFRAME_VREGS_OFFSET     // point to vregs.
-    add     xREFS, xFP, w0, lsl #2                 // point to reference array in shadow frame
+    add     xREFS, xFP, w0, uxtw #2                // point to reference array in shadow frame
     ldr     w0, [x2, #SHADOWFRAME_DEX_PC_OFFSET]   // Get starting dex_pc.
-    add     xPC, x1, w0, lsl #1                    // Create direct pointer to 1st dex opcode
+    add     xPC, x1, w0, uxtw #1                   // Create direct pointer to 1st dex opcode
     CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
     EXPORT_PC
 
diff --git a/runtime/interpreter/mterp/arm64/other.S b/runtime/interpreter/mterp/arm64/other.S
index f1d0ef3..eccd521 100644
--- a/runtime/interpreter/mterp/arm64/other.S
+++ b/runtime/interpreter/mterp/arm64/other.S
@@ -263,7 +263,7 @@
     ldr     x0, [xFP, #OFF_FP_RESULT_REGISTER]  // get pointer to result JType.
     ldr     x0, [x0]                    // r0 <- result.i.
     GET_INST_OPCODE ip                  // extract opcode from wINST
-    SET_VREG_WIDE x0, x2                // fp[AA]<- r0
+    SET_VREG_WIDE x0, w2                // fp[AA]<- r0
     GOTO_OPCODE ip                      // jump to next instruction
 
 %def op_move_wide():
diff --git a/runtime/interpreter/mterp/mips/arithmetic.S b/runtime/interpreter/mterp/mips/arithmetic.S
deleted file mode 100644
index 9ae10f2..0000000
--- a/runtime/interpreter/mterp/mips/arithmetic.S
+++ /dev/null
@@ -1,803 +0,0 @@
-%def binop(preinstr="", result="a0", chkzero="0", instr=""):
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    srl       a3, a0, 8                    #  a3 <- CC
-    and       a2, a0, 255                  #  a2 <- BB
-    GET_VREG(a1, a3)                       #  a1 <- vCC
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    .if $chkzero
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    $preinstr                              #  optional op
-    $instr                                 #  $result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO($result, rOBJ, t0)       #  vAA <- $result
-
-%def binop2addr(preinstr="", result="a0", chkzero="0", instr=""):
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a0, rOBJ)                     #  a0 <- vA
-    GET_VREG(a1, a3)                       #  a1 <- vB
-    .if $chkzero
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-    $preinstr                              #  optional op
-    $instr                                 #  $result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO($result, rOBJ, t0)       #  vA <- $result
-
-%def binopLit16(preinstr="", result="a0", chkzero="0", instr=""):
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, +CCCC */
-    FETCH_S(a1, 1)                         #  a1 <- ssssCCCC (sign-extended)
-    GET_OPB(a2)                            #  a2 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_VREG(a0, a2)                       #  a0 <- vB
-    .if $chkzero
-    # cmp a1, 0; is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-    $preinstr                              #  optional op
-    $instr                                 #  $result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO($result, rOBJ, t0)       #  vA <- $result
-
-%def binopLit8(preinstr="", result="a0", chkzero="0", instr=""):
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, +CC */
-    FETCH_S(a3, 1)                         #  a3 <- ssssCCBB (sign-extended for CC)
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a3, 255                  #  a2 <- BB
-    GET_VREG(a0, a2)                       #  a0 <- vBB
-    sra       a1, a3, 8                    #  a1 <- ssssssCC (sign extended)
-    .if $chkzero
-    # is second operand zero?
-    beqz      a1, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-    $preinstr                              #  optional op
-    $instr                                 #  $result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO($result, rOBJ, t0)       #  vAA <- $result
-
-%def binopWide(preinstr="", result0="a0", result1="a1", chkzero="0", arg0="a0", arg1="a1", arg2="a2", arg3="a3", instr=""):
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0-a1 op a2-a3".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register pair other than a0-a1, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a2-a3).  Useful for integer division and modulus.
-     *
-     * for: add-long, sub-long, div-long, rem-long, and-long, or-long,
-     *      xor-long
-     *
-     * IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
-    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
-    LOAD64($arg0, $arg1, a2)               #  a0/a1 <- vBB/vBB+1
-    LOAD64($arg2, $arg3, t1)               #  a2/a3 <- vCC/vCC+1
-    .if $chkzero
-    or        t0, $arg2, $arg3             #  second arg (a2-a3) is zero?
-    beqz      t0, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-
-    $preinstr                              #  optional op
-    $instr                                 #  result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO($result0, $result1, rOBJ, t0)   #  vAA/vAA+1 <- $result0/$result1
-
-%def binopWide2addr(preinstr="", result0="a0", result1="a1", chkzero="0", arg0="a0", arg1="a1", arg2="a2", arg3="a3", instr=""):
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0-a1 op a2-a3".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register pair other than a0-a1, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a2-a3).  Useful for integer division and modulus.
-     *
-     * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
-     *      and-long/2addr, or-long/2addr, xor-long/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a1)                            #  a1 <- B
-    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
-    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64($arg2, $arg3, a1)               #  a2/a3 <- vB/vB+1
-    LOAD64($arg0, $arg1, t0)               #  a0/a1 <- vA/vA+1
-    .if $chkzero
-    or        t0, $arg2, $arg3             #  second arg (a2-a3) is zero?
-    beqz      t0, common_errDivideByZero
-    .endif
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-    $preinstr                              #  optional op
-    $instr                                 #  result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO($result0, $result1, rOBJ, t0)   #  vA/vA+1 <- $result0/$result1
-
-%def unop(preinstr="", result0="a0", instr=""):
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result0 = op a0".
-     * This could be a MIPS instruction or a function call.
-     *
-     * for: int-to-byte, int-to-char, int-to-short,
-     *      neg-int, not-int, neg-float
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(t0)                           #  t0 <- A+
-    GET_VREG(a0, a3)                       #  a0 <- vB
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    $preinstr                              #  optional op
-    $instr                                 #  a0 <- op, a0-a3 changed
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    SET_VREG_GOTO($result0, t0, t1)        #  vA <- result0
-
-%def unopNarrower(load="LOAD64_F(fa0, fa0f, a3)", instr=""):
-    /*
-     * Generic 64bit-to-32bit floating-point unary operation.  Provide an "instr"
-     * line that specifies an instruction that performs "fv0 = op fa0".
-     *
-     * For: double-to-float
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-    $load
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    $instr
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- fv0
-
-%def unopWide(preinstr="", result0="a0", result1="a1", instr=""):
-    /*
-     * Generic 64-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result0/result1 = op a0/a1".
-     * This could be MIPS instruction or a function call.
-     *
-     * For: neg-long, not-long, neg-double,
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- vA
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    $preinstr                              #  optional op
-    $instr                                 #  a0/a1 <- op, a2-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO($result0, $result1, rOBJ, t0)   #  vA/vA+1 <- a0/a1
-
-%def unopWider(preinstr="", result0="a0", result1="a1", instr=""):
-    /*
-     * Generic 32bit-to-64bit unary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result0/result1 = op a0".
-     *
-     * For: int-to-long
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a0, a3)                       #  a0 <- vB
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    $preinstr                              #  optional op
-    $instr                                 #  result <- op, a0-a3 changed
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO($result0, $result1, rOBJ, t0)   #  vA/vA+1 <- a0/a1
-
-%def op_add_int():
-%  binop(instr="addu a0, a0, a1")
-
-%def op_add_int_2addr():
-%  binop2addr(instr="addu a0, a0, a1")
-
-%def op_add_int_lit16():
-%  binopLit16(instr="addu a0, a0, a1")
-
-%def op_add_int_lit8():
-%  binopLit8(instr="addu a0, a0, a1")
-
-%def op_add_long():
-/*
- *  The compiler generates the following sequence for
- *  [v1 v0] =  [a1 a0] + [a3 a2];
- *    addu v0,a2,a0
- *    addu a1,a3,a1
- *    sltu v1,v0,a2
- *    addu v1,v1,a1
- */
-%  binopWide(result0="v0", result1="v1", preinstr="addu v0, a2, a0", instr="addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1")
-
-%def op_add_long_2addr():
-/*
- * See op_add_long.S for details
- */
-%  binopWide2addr(result0="v0", result1="v1", preinstr="addu v0, a2, a0", instr="addu a1, a3, a1; sltu v1, v0, a2; addu v1, v1, a1")
-
-%def op_and_int():
-%  binop(instr="and a0, a0, a1")
-
-%def op_and_int_2addr():
-%  binop2addr(instr="and a0, a0, a1")
-
-%def op_and_int_lit16():
-%  binopLit16(instr="and a0, a0, a1")
-
-%def op_and_int_lit8():
-%  binopLit8(instr="and a0, a0, a1")
-
-%def op_and_long():
-%  binopWide(preinstr="and a0, a0, a2", instr="and a1, a1, a3")
-
-%def op_and_long_2addr():
-%  binopWide2addr(preinstr="and a0, a0, a2", instr="and a1, a1, a3")
-
-%def op_cmp_long():
-    /*
-     * Compare two 64-bit values
-     *    x = y     return  0
-     *    x < y     return -1
-     *    x > y     return  1
-     *
-     * I think I can improve on the ARM code by the following observation
-     *    slt   t0,  x.hi, y.hi;        # (x.hi < y.hi) ? 1:0
-     *    sgt   t1,  x.hi, y.hi;        # (y.hi > x.hi) ? 1:0
-     *    subu  v0, t0, t1              # v0= -1:1:0 for [ < > = ]
-     */
-    /* cmp-long vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[CC]
-    LOAD64(a0, a1, a2)                     #  a0/a1 <- vBB/vBB+1
-    LOAD64(a2, a3, a3)                     #  a2/a3 <- vCC/vCC+1
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    slt       t0, a1, a3                   #  compare hi
-    sgt       t1, a1, a3
-    subu      v0, t1, t0                   #  v0 <- (-1, 1, 0)
-    bnez      v0, .L${opcode}_finish
-    # at this point x.hi==y.hi
-    sltu      t0, a0, a2                   #  compare lo
-    sgtu      t1, a0, a2
-    subu      v0, t1, t0                   #  v0 <- (-1, 1, 0) for [< > =]
-
-.L${opcode}_finish:
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(v0, rOBJ, t0)            #  vAA <- v0
-
-%def op_div_int():
-#ifdef MIPS32REVGE6
-%  binop(instr="div a0, a0, a1", chkzero="1")
-#else
-%  binop(preinstr="div zero, a0, a1", instr="mflo a0", chkzero="1")
-#endif
-
-%def op_div_int_2addr():
-#ifdef MIPS32REVGE6
-%  binop2addr(instr="div a0, a0, a1", chkzero="1")
-#else
-%  binop2addr(preinstr="div zero, a0, a1", instr="mflo a0", chkzero="1")
-#endif
-
-%def op_div_int_lit16():
-#ifdef MIPS32REVGE6
-%  binopLit16(instr="div a0, a0, a1", chkzero="1")
-#else
-%  binopLit16(preinstr="div zero, a0, a1", instr="mflo a0", chkzero="1")
-#endif
-
-%def op_div_int_lit8():
-#ifdef MIPS32REVGE6
-%  binopLit8(instr="div a0, a0, a1", chkzero="1")
-#else
-%  binopLit8(preinstr="div zero, a0, a1", instr="mflo a0", chkzero="1")
-#endif
-
-%def op_div_long():
-%  binopWide(result0="v0", result1="v1", instr="JAL(__divdi3)", chkzero="1")
-
-%def op_div_long_2addr():
-%  binopWide2addr(result0="v0", result1="v1", instr="JAL(__divdi3)", chkzero="1")
-
-%def op_int_to_byte():
-%  unop(instr="SEB(a0, a0)")
-
-%def op_int_to_char():
-%  unop(preinstr="", instr="and a0, 0xffff")
-
-%def op_int_to_long():
-%  unopWider(instr="sra a1, a0, 31")
-
-%def op_int_to_short():
-%  unop(instr="SEH(a0, a0)")
-
-%def op_long_to_int():
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-%  op_move()
-
-%def op_mul_int():
-%  binop(instr="mul a0, a0, a1")
-
-%def op_mul_int_2addr():
-%  binop2addr(instr="mul a0, a0, a1")
-
-%def op_mul_int_lit16():
-%  binopLit16(instr="mul a0, a0, a1")
-
-%def op_mul_int_lit8():
-%  binopLit8(instr="mul a0, a0, a1")
-
-%def op_mul_long():
-    /*
-     * Signed 64-bit integer multiply.
-     *         a1   a0
-     *   x     a3   a2
-     *   -------------
-     *       a2a1 a2a0
-     *       a3a0
-     *  a3a1 (<= unused)
-     *  ---------------
-     *         v1   v0
-     */
-    /* mul-long vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    and       t0, a0, 255                  #  a2 <- BB
-    srl       t1, a0, 8                    #  a3 <- CC
-    EAS2(t0, rFP, t0)                      #  t0 <- &fp[BB]
-    LOAD64(a0, a1, t0)                     #  a0/a1 <- vBB/vBB+1
-
-    EAS2(t1, rFP, t1)                      #  t0 <- &fp[CC]
-    LOAD64(a2, a3, t1)                     #  a2/a3 <- vCC/vCC+1
-
-    mul       v1, a3, a0                   #  v1= a3a0
-#ifdef MIPS32REVGE6
-    mulu      v0, a2, a0                   #  v0= a2a0
-    muhu      t1, a2, a0
-#else
-    multu     a2, a0
-    mfhi      t1
-    mflo      v0                           #  v0= a2a0
-#endif
-    mul       t0, a2, a1                   #  t0= a2a1
-    addu      v1, v1, t1                   #  v1+= hi(a2a0)
-    addu      v1, v1, t0                   #  v1= a3a0 + a2a1;
-
-    GET_OPA(a0)                            #  a0 <- AA
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    b         .L${opcode}_finish
-%def op_mul_long_helper_code():
-
-.Lop_mul_long_finish:
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(v0, v1, a0, t0)        #  vAA/vAA+1 <- v0(low)/v1(high)
-
-%def op_mul_long_2addr():
-    /*
-     * See op_mul_long.S for more details
-     */
-    /* mul-long/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-
-    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64(a0, a1, t0)                     #  vAA.low / high
-
-    GET_OPB(t1)                            #  t1 <- B
-    EAS2(t1, rFP, t1)                      #  t1 <- &fp[B]
-    LOAD64(a2, a3, t1)                     #  vBB.low / high
-
-    mul       v1, a3, a0                   #  v1= a3a0
-#ifdef MIPS32REVGE6
-    mulu      v0, a2, a0                   #  v0= a2a0
-    muhu      t1, a2, a0
-#else
-    multu     a2, a0
-    mfhi      t1
-    mflo      v0                           #  v0= a2a0
- #endif
-    mul       t2, a2, a1                   #  t2= a2a1
-    addu      v1, v1, t1                   #  v1= a3a0 + hi(a2a0)
-    addu      v1, v1, t2                   #  v1= v1 + a2a1;
-
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(v0, v1, rOBJ, t1)      #  vA/vA+1 <- v0(low)/v1(high)
-
-%def op_neg_int():
-%  unop(instr="negu a0, a0")
-
-%def op_neg_long():
-%  unopWide(result0="v0", result1="v1", preinstr="negu v0, a0", instr="negu v1, a1; sltu a0, zero, v0; subu v1, v1, a0")
-
-%def op_not_int():
-%  unop(instr="not a0, a0")
-
-%def op_not_long():
-%  unopWide(preinstr="not a0, a0", instr="not a1, a1")
-
-%def op_or_int():
-%  binop(instr="or a0, a0, a1")
-
-%def op_or_int_2addr():
-%  binop2addr(instr="or a0, a0, a1")
-
-%def op_or_int_lit16():
-%  binopLit16(instr="or a0, a0, a1")
-
-%def op_or_int_lit8():
-%  binopLit8(instr="or a0, a0, a1")
-
-%def op_or_long():
-%  binopWide(preinstr="or a0, a0, a2", instr="or a1, a1, a3")
-
-%def op_or_long_2addr():
-%  binopWide2addr(preinstr="or a0, a0, a2", instr="or a1, a1, a3")
-
-%def op_rem_int():
-#ifdef MIPS32REVGE6
-%  binop(instr="mod a0, a0, a1", chkzero="1")
-#else
-%  binop(preinstr="div zero, a0, a1", instr="mfhi a0", chkzero="1")
-#endif
-
-%def op_rem_int_2addr():
-#ifdef MIPS32REVGE6
-%  binop2addr(instr="mod a0, a0, a1", chkzero="1")
-#else
-%  binop2addr(preinstr="div zero, a0, a1", instr="mfhi a0", chkzero="1")
-#endif
-
-%def op_rem_int_lit16():
-#ifdef MIPS32REVGE6
-%  binopLit16(instr="mod a0, a0, a1", chkzero="1")
-#else
-%  binopLit16(preinstr="div zero, a0, a1", instr="mfhi a0", chkzero="1")
-#endif
-
-%def op_rem_int_lit8():
-#ifdef MIPS32REVGE6
-%  binopLit8(instr="mod a0, a0, a1", chkzero="1")
-#else
-%  binopLit8(preinstr="div zero, a0, a1", instr="mfhi a0", chkzero="1")
-#endif
-
-%def op_rem_long():
-%  binopWide(result0="v0", result1="v1", instr="JAL(__moddi3)", chkzero="1")
-
-%def op_rem_long_2addr():
-%  binopWide2addr(result0="v0", result1="v1", instr="JAL(__moddi3)", chkzero="1")
-
-%def op_rsub_int():
-/* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
-%  binopLit16(instr="subu a0, a1, a0")
-
-%def op_rsub_int_lit8():
-%  binopLit8(instr="subu a0, a1, a0")
-
-%def op_shl_int():
-%  binop(instr="sll a0, a0, a1")
-
-%def op_shl_int_2addr():
-%  binop2addr(instr="sll a0, a0, a1")
-
-%def op_shl_int_lit8():
-%  binopLit8(instr="sll a0, a0, a1")
-
-%def op_shl_long():
-    /*
-     * Long integer shift.  This is different from the generic 32/64-bit
-     * binary operations because vAA/vBB are 64-bit but vCC (the shift
-     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
-     * 6 bits of the shift distance.
-     */
-    /* shl-long vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(t2)                            #  t2 <- AA
-    and       a3, a0, 255                  #  a3 <- BB
-    srl       a0, a0, 8                    #  a0 <- CC
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BB]
-    GET_VREG(a2, a0)                       #  a2 <- vCC
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- vBB/vBB+1
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-
-    andi    v1, a2, 0x20                   #  shift< shift & 0x20
-    sll     v0, a0, a2                     #  rlo<- alo << (shift&31)
-    bnez    v1, .L${opcode}_finish
-    not     v1, a2                         #  rhi<- 31-shift  (shift is 5b)
-    srl     a0, 1
-    srl     a0, v1                         #  alo<- alo >> (32-(shift&31))
-    sll     v1, a1, a2                     #  rhi<- ahi << (shift&31)
-    or      v1, a0                         #  rhi<- rhi | alo
-    SET_VREG64_GOTO(v0, v1, t2, t0)        #  vAA/vAA+1 <- v0/v1
-%def op_shl_long_helper_code():
-
-.Lop_shl_long_finish:
-    SET_VREG64_GOTO(zero, v0, t2, t0)      #  vAA/vAA+1 <- rlo/rhi
-
-%def op_shl_long_2addr():
-    /*
-     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
-     * 32-bit shift distance.
-     */
-    /* shl-long/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a2, a3)                       #  a2 <- vB
-    EAS2(t2, rFP, rOBJ)                    #  t2 <- &fp[A]
-    LOAD64(a0, a1, t2)                     #  a0/a1 <- vA/vA+1
-
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-
-    andi    v1, a2, 0x20                   #  shift< shift & 0x20
-    sll     v0, a0, a2                     #  rlo<- alo << (shift&31)
-    bnez    v1, .L${opcode}_finish
-    not     v1, a2                         #  rhi<- 31-shift  (shift is 5b)
-    srl     a0, 1
-    srl     a0, v1                         #  alo<- alo >> (32-(shift&31))
-    sll     v1, a1, a2                     #  rhi<- ahi << (shift&31)
-    or      v1, a0                         #  rhi<- rhi | alo
-    SET_VREG64_GOTO(v0, v1, rOBJ, t0)      #  vA/vA+1 <- v0/v1
-%def op_shl_long_2addr_helper_code():
-
-.Lop_shl_long_2addr_finish:
-    SET_VREG64_GOTO(zero, v0, rOBJ, t0)    #  vA/vA+1 <- rlo/rhi
-
-%def op_shr_int():
-%  binop(instr="sra a0, a0, a1")
-
-%def op_shr_int_2addr():
-%  binop2addr(instr="sra a0, a0, a1")
-
-%def op_shr_int_lit8():
-%  binopLit8(instr="sra a0, a0, a1")
-
-%def op_shr_long():
-    /*
-     * Long integer shift.  This is different from the generic 32/64-bit
-     * binary operations because vAA/vBB are 64-bit but vCC (the shift
-     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
-     * 6 bits of the shift distance.
-     */
-    /* shr-long vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(t3)                            #  t3 <- AA
-    and       a3, a0, 255                  #  a3 <- BB
-    srl       a0, a0, 8                    #  a0 <- CC
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BB]
-    GET_VREG(a2, a0)                       #  a2 <- vCC
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- vBB/vBB+1
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-
-    andi    v0, a2, 0x20                   #  shift & 0x20
-    sra     v1, a1, a2                     #  rhi<- ahi >> (shift&31)
-    bnez    v0, .L${opcode}_finish
-    srl     v0, a0, a2                     #  rlo<- alo >> (shift&31)
-    not     a0, a2                         #  alo<- 31-shift (shift is 5b)
-    sll     a1, 1
-    sll     a1, a0                         #  ahi<- ahi << (32-(shift&31))
-    or      v0, a1                         #  rlo<- rlo | ahi
-    SET_VREG64_GOTO(v0, v1, t3, t0)        #  vAA/VAA+1 <- v0/v1
-%def op_shr_long_helper_code():
-
-.Lop_shr_long_finish:
-    sra     a3, a1, 31                     #  a3<- sign(ah)
-    SET_VREG64_GOTO(v1, a3, t3, t0)        #  vAA/VAA+1 <- rlo/rhi
-
-%def op_shr_long_2addr():
-    /*
-     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
-     * 32-bit shift distance.
-     */
-    /* shr-long/2addr vA, vB */
-    GET_OPA4(t2)                           #  t2 <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a2, a3)                       #  a2 <- vB
-    EAS2(t0, rFP, t2)                      #  t0 <- &fp[A]
-    LOAD64(a0, a1, t0)                     #  a0/a1 <- vA/vA+1
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-
-    andi    v0, a2, 0x20                   #  shift & 0x20
-    sra     v1, a1, a2                     #  rhi<- ahi >> (shift&31)
-    bnez    v0, .L${opcode}_finish
-    srl     v0, a0, a2                     #  rlo<- alo >> (shift&31)
-    not     a0, a2                         #  alo<- 31-shift (shift is 5b)
-    sll     a1, 1
-    sll     a1, a0                         #  ahi<- ahi << (32-(shift&31))
-    or      v0, a1                         #  rlo<- rlo | ahi
-    SET_VREG64_GOTO(v0, v1, t2, t0)        #  vA/vA+1 <- v0/v1
-%def op_shr_long_2addr_helper_code():
-
-.Lop_shr_long_2addr_finish:
-    sra     a3, a1, 31                     #  a3<- sign(ah)
-    SET_VREG64_GOTO(v1, a3, t2, t0)        #  vA/vA+1 <- rlo/rhi
-
-%def op_sub_int():
-%  binop(instr="subu a0, a0, a1")
-
-%def op_sub_int_2addr():
-%  binop2addr(instr="subu a0, a0, a1")
-
-%def op_sub_long():
-/*
- * For little endian the code sequence looks as follows:
- *    subu    v0,a0,a2
- *    subu    v1,a1,a3
- *    sltu    a0,a0,v0
- *    subu    v1,v1,a0
- */
-%  binopWide(result0="v0", result1="v1", preinstr="subu v0, a0, a2", instr="subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0")
-
-%def op_sub_long_2addr():
-/*
- * See op_sub_long.S for more details
- */
-%  binopWide2addr(result0="v0", result1="v1", preinstr="subu v0, a0, a2", instr="subu v1, a1, a3; sltu a0, a0, v0; subu v1, v1, a0")
-
-%def op_ushr_int():
-%  binop(instr="srl a0, a0, a1")
-
-%def op_ushr_int_2addr():
-%  binop2addr(instr="srl a0, a0, a1 ")
-
-%def op_ushr_int_lit8():
-%  binopLit8(instr="srl a0, a0, a1")
-
-%def op_ushr_long():
-    /*
-     * Long integer shift.  This is different from the generic 32/64-bit
-     * binary operations because vAA/vBB are 64-bit but vCC (the shift
-     * distance) is 32-bit.  Also, Dalvik requires us to mask off the low
-     * 6 bits of the shift distance.
-     */
-    /* ushr-long vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a3, a0, 255                  #  a3 <- BB
-    srl       a0, a0, 8                    #  a0 <- CC
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BB]
-    GET_VREG(a2, a0)                       #  a2 <- vCC
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- vBB/vBB+1
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-
-    andi      v0, a2, 0x20                 #  shift & 0x20
-    srl       v1, a1, a2                   #  rhi<- ahi >> (shift&31)
-    bnez      v0, .L${opcode}_finish
-    srl       v0, a0, a2                   #  rlo<- alo >> (shift&31)
-    not       a0, a2                       #  alo<- 31-n  (shift is 5b)
-    sll       a1, 1
-    sll       a1, a0                       #  ahi<- ahi << (32-(shift&31))
-    or        v0, a1                       #  rlo<- rlo | ahi
-    SET_VREG64_GOTO(v0, v1, rOBJ, t0)      #  vAA/vAA+1 <- v0/v1
-%def op_ushr_long_helper_code():
-
-.Lop_ushr_long_finish:
-    SET_VREG64_GOTO(v1, zero, rOBJ, t0)    #  vAA/vAA+1 <- rlo/rhi
-
-%def op_ushr_long_2addr():
-    /*
-     * Long integer shift, 2addr version.  vA is 64-bit value/result, vB is
-     * 32-bit shift distance.
-     */
-    /* ushr-long/2addr vA, vB */
-    GET_OPA4(t3)                           #  t3 <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG(a2, a3)                       #  a2 <- vB
-    EAS2(t0, rFP, t3)                      #  t0 <- &fp[A]
-    LOAD64(a0, a1, t0)                     #  a0/a1 <- vA/vA+1
-
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-
-    andi      v0, a2, 0x20                 #  shift & 0x20
-    srl       v1, a1, a2                   #  rhi<- ahi >> (shift&31)
-    bnez      v0, .L${opcode}_finish
-    srl       v0, a0, a2                   #  rlo<- alo >> (shift&31)
-    not       a0, a2                       #  alo<- 31-n  (shift is 5b)
-    sll       a1, 1
-    sll       a1, a0                       #  ahi<- ahi << (32-(shift&31))
-    or        v0, a1                       #  rlo<- rlo | ahi
-    SET_VREG64_GOTO(v0, v1, t3, t0)        #  vA/vA+1 <- v0/v1
-%def op_ushr_long_2addr_helper_code():
-
-.Lop_ushr_long_2addr_finish:
-    SET_VREG64_GOTO(v1, zero, t3, t0)      #  vA/vA+1 <- rlo/rhi
-
-%def op_xor_int():
-%  binop(instr="xor a0, a0, a1")
-
-%def op_xor_int_2addr():
-%  binop2addr(instr="xor a0, a0, a1")
-
-%def op_xor_int_lit16():
-%  binopLit16(instr="xor a0, a0, a1")
-
-%def op_xor_int_lit8():
-%  binopLit8(instr="xor a0, a0, a1")
-
-%def op_xor_long():
-%  binopWide(preinstr="xor a0, a0, a2", instr="xor a1, a1, a3")
-
-%def op_xor_long_2addr():
-%  binopWide2addr(preinstr="xor a0, a0, a2", instr="xor a1, a1, a3")
diff --git a/runtime/interpreter/mterp/mips/array.S b/runtime/interpreter/mterp/mips/array.S
deleted file mode 100644
index 57ab147..0000000
--- a/runtime/interpreter/mterp/mips/array.S
+++ /dev/null
@@ -1,239 +0,0 @@
-%def op_aget(load="lw", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
-     * instructions.  We use a pair of FETCH_Bs instead.
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B(a2, 1, 0)                      #  a2 <- BB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    FETCH_B(a3, 1, 1)                      #  a3 <- CC
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    # null array object?
-    beqz      a0, common_errNullObject     #  yes, bail
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    EASN(a0, a0, a1, $shift)               #  a0 <- arrayObj + index*width
-    # a1 >= a3; compare unsigned index
-    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    $load a2, $data_offset(a0)             #  a2 <- vBB[vCC]
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a2, rOBJ, t0)            #  vAA <- a2
-
-%def op_aget_boolean():
-%  op_aget(load="lbu", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
-
-%def op_aget_byte():
-%  op_aget(load="lb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
-
-%def op_aget_char():
-%  op_aget(load="lhu", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
-
-%def op_aget_object():
-    /*
-     * Array object get.  vAA <- vBB[vCC].
-     *
-     * for: aget-object
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B(a2, 1, 0)                      #  a2 <- BB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    FETCH_B(a3, 1, 1)                      #  a3 <- CC
-    EXPORT_PC()
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    JAL(artAGetObjectFromMterp)            #  v0 <- GetObj(array, index)
-    lw   a1, THREAD_EXCEPTION_OFFSET(rSELF)
-    PREFETCH_INST(2)                       #  load rINST
-    bnez a1, MterpException
-    ADVANCE(2)                             #  advance rPC
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_OBJECT_GOTO(v0, rOBJ, t0)     #  vAA <- v0
-
-%def op_aget_short():
-%  op_aget(load="lh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
-
-%def op_aget_wide():
-    /*
-     * Array get, 64 bits.  vAA <- vBB[vCC].
-     *
-     * Arrays of long/double are 64-bit aligned.
-     */
-    /* aget-wide vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    # null array object?
-    beqz      a0, common_errNullObject     #  yes, bail
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    EAS3(a0, a0, a1)                       #  a0 <- arrayObj + index*width
-    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    LOAD64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET)
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a2, a3, rOBJ, t0)      #  vAA/vAA+1 <- a2/a3
-
-%def op_aput(store="sw", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
-
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    FETCH_B(a2, 1, 0)                      #  a2 <- BB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    FETCH_B(a3, 1, 1)                      #  a3 <- CC
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    # null array object?
-    beqz      a0, common_errNullObject     #  yes, bail
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    EASN(a0, a0, a1, $shift)               #  a0 <- arrayObj + index*width
-    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_VREG(a2, rOBJ)                     #  a2 <- vAA
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GET_OPCODE_TARGET(t0)
-    $store a2, $data_offset(a0)            #  vBB[vCC] <- a2
-    JR(t0)                                 #  jump to next instruction
-
-%def op_aput_boolean():
-%  op_aput(store="sb", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
-
-%def op_aput_byte():
-%  op_aput(store="sb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
-
-%def op_aput_char():
-%  op_aput(store="sh", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
-
-%def op_aput_object():
-    /*
-     * Store an object into an array.  vBB[vCC] <- vAA.
-     *
-     */
-    /* op vAA, vBB, vCC */
-    EXPORT_PC()
-    addu   a0, rFP, OFF_FP_SHADOWFRAME
-    move   a1, rPC
-    move   a2, rINST
-    JAL(MterpAputObject)
-    beqz   v0, MterpPossibleException
-    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-%def op_aput_short():
-%  op_aput(store="sh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
-
-%def op_aput_wide():
-    /*
-     * Array put, 64 bits.  vBB[vCC] <- vAA.
-     */
-    /* aput-wide vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(t0)                            #  t0 <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    GET_VREG(a0, a2)                       #  a0 <- vBB (array object)
-    GET_VREG(a1, a3)                       #  a1 <- vCC (requested index)
-    # null array object?
-    beqz      a0, common_errNullObject     #  yes, bail
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- arrayObj->length
-    EAS3(a0, a0, a1)                       #  a0 <- arrayObj + index*width
-    EAS2(rOBJ, rFP, t0)                    #  rOBJ <- &fp[AA]
-    # compare unsigned index, length
-    bgeu      a1, a3, common_errArrayIndex #  index >= length, bail
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    LOAD64(a2, a3, rOBJ)                   #  a2/a3 <- vAA/vAA+1
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GET_OPCODE_TARGET(t0)
-    STORE64_off(a2, a3, a0, MIRROR_WIDE_ARRAY_DATA_OFFSET) #  a2/a3 <- vBB[vCC]
-    JR(t0)                                 #  jump to next instruction
-
-%def op_array_length():
-    /*
-     * Return the length of an array.
-     */
-    /* array-length vA, vB */
-    GET_OPB(a1)                            #  a1 <- B
-    GET_OPA4(a2)                           #  a2 <- A+
-    GET_VREG(a0, a1)                       #  a0 <- vB (object ref)
-    # is object null?
-    beqz      a0, common_errNullObject     #  yup, fail
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    LOAD_base_offMirrorArray_length(a3, a0) #  a3 <- array length
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a3, a2, t0)              #  vA <- length
-
-%def op_fill_array_data():
-    /* fill-array-data vAA, +BBBBBBBB */
-    EXPORT_PC()
-    FETCH(a1, 1)                           #  a1 <- bbbb (lo)
-    FETCH(a0, 2)                           #  a0 <- BBBB (hi)
-    GET_OPA(a3)                            #  a3 <- AA
-    INSERT_HIGH_HALF(a1, a0)               #  a1 <- BBBBbbbb
-    GET_VREG(a0, a3)                       #  a0 <- vAA (array object)
-    EAS1(a1, rPC, a1)                      #  a1 <- PC + BBBBbbbb*2 (array data off.)
-    JAL(MterpFillArrayData)                #  v0 <- Mterp(obj, payload)
-    beqz      v0,  MterpPossibleException  #  has exception
-    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-%def op_filled_new_array(helper="MterpFilledNewArray"):
-    /*
-     * Create a new array with elements filled from registers.
-     *
-     * for: filled-new-array, filled-new-array/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
-    .extern $helper
-    EXPORT_PC()
-    addu   a0, rFP, OFF_FP_SHADOWFRAME     # a0 <- shadow frame
-    move   a1, rPC
-    move   a2, rSELF
-    JAL($helper)                           #  v0 <- helper(shadow_frame, pc, self)
-    beqz      v0,  MterpPossibleException  #  has exception
-    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-%def op_filled_new_array_range():
-%  op_filled_new_array(helper="MterpFilledNewArrayRange")
-
-%def op_new_array():
-    /*
-     * Allocate an array of objects, specified with the array class
-     * and a count.
-     *
-     * The verifier guarantees that this is an array class, so we don't
-     * check for it here.
-     */
-    /* new-array vA, vB, class@CCCC */
-    EXPORT_PC()
-    addu   a0, rFP, OFF_FP_SHADOWFRAME
-    move   a1, rPC
-    move   a2, rINST
-    move   a3, rSELF
-    JAL(MterpNewArray)
-    beqz   v0, MterpPossibleException
-    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips/control_flow.S b/runtime/interpreter/mterp/mips/control_flow.S
deleted file mode 100644
index 88e1f0e..0000000
--- a/runtime/interpreter/mterp/mips/control_flow.S
+++ /dev/null
@@ -1,214 +0,0 @@
-%def bincmp(condition=""):
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    GET_OPA4(a0)                           #  a0 <- A+
-    GET_OPB(a1)                            #  a1 <- B
-    GET_VREG(a3, a1)                       #  a3 <- vB
-    GET_VREG(a0, a0)                       #  a0 <- vA
-    FETCH_S(rINST, 1)                      #  rINST<- branch offset, in code units
-    b${condition} a0, a3, MterpCommonTakenBranchNoFlags  #  compare (vA, vB)
-    li        t0, JIT_CHECK_OSR
-    beq       rPROFILE, t0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-%def zcmp(condition=""):
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform.
-     *
-     * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    GET_OPA(a0)                            #  a0 <- AA
-    GET_VREG(a0, a0)                       #  a0 <- vAA
-    FETCH_S(rINST, 1)                      #  rINST <- branch offset, in code units
-    b${condition} a0, zero, MterpCommonTakenBranchNoFlags
-    li        t0, JIT_CHECK_OSR            # possible OSR re-entry?
-    beq       rPROFILE, t0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-%def op_goto():
-    /*
-     * Unconditional branch, 8-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     */
-    /* goto +AA */
-    sll       a0, rINST, 16                #  a0 <- AAxx0000
-    sra       rINST, a0, 24                #  rINST <- ssssssAA (sign-extended)
-    b       MterpCommonTakenBranchNoFlags
-
-%def op_goto_16():
-    /*
-     * Unconditional branch, 16-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     */
-    /* goto/16 +AAAA */
-    FETCH_S(rINST, 1)                      #  rINST <- ssssAAAA (sign-extended)
-    b       MterpCommonTakenBranchNoFlags
-
-%def op_goto_32():
-    /*
-     * Unconditional branch, 32-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     *
-     * Unlike most opcodes, this one is allowed to branch to itself, so
-     * our "backward branch" test must be "<=0" instead of "<0".
-     */
-    /* goto/32 +AAAAAAAA */
-    FETCH(rINST, 1)                        #  rINST <- aaaa (lo)
-    FETCH(a1, 2)                           #  a1 <- AAAA (hi)
-    INSERT_HIGH_HALF(rINST, a1)            #  rINST <- AAAAaaaa
-    b         MterpCommonTakenBranchNoFlags
-
-%def op_if_eq():
-%  bincmp(condition="eq")
-
-%def op_if_eqz():
-%  zcmp(condition="eq")
-
-%def op_if_ge():
-%  bincmp(condition="ge")
-
-%def op_if_gez():
-%  zcmp(condition="ge")
-
-%def op_if_gt():
-%  bincmp(condition="gt")
-
-%def op_if_gtz():
-%  zcmp(condition="gt")
-
-%def op_if_le():
-%  bincmp(condition="le")
-
-%def op_if_lez():
-%  zcmp(condition="le")
-
-%def op_if_lt():
-%  bincmp(condition="lt")
-
-%def op_if_ltz():
-%  zcmp(condition="lt")
-
-%def op_if_ne():
-%  bincmp(condition="ne")
-
-%def op_if_nez():
-%  zcmp(condition="ne")
-
-%def op_packed_switch(func="MterpDoPackedSwitch"):
-    /*
-     * Handle a packed-switch or sparse-switch instruction.  In both cases
-     * we decode it and hand it off to a helper function.
-     *
-     * We don't really expect backward branches in a switch statement, but
-     * they're perfectly legal, so we check for them here.
-     *
-     * for: packed-switch, sparse-switch
-     */
-    /* op vAA, +BBBB */
-    FETCH(a0, 1)                           #  a0 <- bbbb (lo)
-    FETCH(a1, 2)                           #  a1 <- BBBB (hi)
-    GET_OPA(a3)                            #  a3 <- AA
-    INSERT_HIGH_HALF(a0, a1)               #  a0 <- BBBBbbbb
-    GET_VREG(a1, a3)                       #  a1 <- vAA
-    EAS1(a0, rPC, a0)                      #  a0 <- PC + BBBBbbbb*2
-    JAL($func)                             #  a0 <- code-unit branch offset
-    move      rINST, v0
-    b         MterpCommonTakenBranchNoFlags
-
-%def op_return():
-    /*
-     * Return a 32-bit value.
-     *
-     * for: return, return-object
-     */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    JAL(MterpThreadFenceForConstructor)
-    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
-    move      a0, rSELF
-    and       ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqz      ra, 1f
-    JAL(MterpSuspendCheck)                 # (self)
-1:
-    GET_OPA(a2)                            #  a2 <- AA
-    GET_VREG(v0, a2)                       #  v0 <- vAA
-    move      v1, zero
-    b         MterpReturn
-
-%def op_return_object():
-%  op_return()
-
-%def op_return_void():
-    .extern MterpThreadFenceForConstructor
-    JAL(MterpThreadFenceForConstructor)
-    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
-    move      a0, rSELF
-    and       ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqz      ra, 1f
-    JAL(MterpSuspendCheck)                 # (self)
-1:
-    move      v0, zero
-    move      v1, zero
-    b         MterpReturn
-
-%def op_return_void_no_barrier():
-    lw     ra, THREAD_FLAGS_OFFSET(rSELF)
-    move   a0, rSELF
-    and    ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqz   ra, 1f
-    JAL(MterpSuspendCheck)                 # (self)
-1:
-    move   v0, zero
-    move   v1, zero
-    b      MterpReturn
-
-%def op_return_wide():
-    /*
-     * Return a 64-bit value.
-     */
-    /* return-wide vAA */
-    .extern MterpThreadFenceForConstructor
-    JAL(MterpThreadFenceForConstructor)
-    lw        ra, THREAD_FLAGS_OFFSET(rSELF)
-    move      a0, rSELF
-    and       ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqz      ra, 1f
-    JAL(MterpSuspendCheck)                 # (self)
-1:
-    GET_OPA(a2)                            #  a2 <- AA
-    EAS2(a2, rFP, a2)                      #  a2 <- &fp[AA]
-    LOAD64(v0, v1, a2)                     #  v0/v1 <- vAA/vAA+1
-    b         MterpReturn
-
-%def op_sparse_switch():
-%  op_packed_switch(func="MterpDoSparseSwitch")
-
-%def op_throw():
-    /*
-     * Throw an exception object in the current thread.
-     */
-    /* throw vAA */
-    EXPORT_PC()                              #  exception handler can throw
-    GET_OPA(a2)                              #  a2 <- AA
-    GET_VREG(a1, a2)                         #  a1 <- vAA (exception object)
-    # null object?
-    beqz  a1, common_errNullObject           #  yes, throw an NPE instead
-    sw    a1, THREAD_EXCEPTION_OFFSET(rSELF) #  thread->exception <- obj
-    b         MterpException
diff --git a/runtime/interpreter/mterp/mips/floating_point.S b/runtime/interpreter/mterp/mips/floating_point.S
deleted file mode 100644
index 20df51e..0000000
--- a/runtime/interpreter/mterp/mips/floating_point.S
+++ /dev/null
@@ -1,518 +0,0 @@
-%def fbinop(instr=""):
-    /*
-     * Generic 32-bit binary float operation.
-     *
-     * For: add-fp, sub-fp, mul-fp, div-fp, rem-fp
-     */
-
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    srl       a3, a0, 8                    #  a3 <- CC
-    and       a2, a0, 255                  #  a2 <- BB
-    GET_VREG_F(fa1, a3)                    #  a1 <- vCC
-    GET_VREG_F(fa0, a2)                    #  a0 <- vBB
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    $instr                                 #  f0 = result
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vAA <- fv0
-
-%def fbinop2addr(instr=""):
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr"
-     * that specifies an instruction that performs "fv0 = fa0 op fa1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * For: add-float/2addr, sub-float/2addr, mul-float/2addr,
-     *      div-float/2addr, rem-float/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG_F(fa0, rOBJ)
-    GET_VREG_F(fa1, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-    $instr
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- result
-
-%def fbinopWide(instr=""):
-    /*
-     * Generic 64-bit floating-point binary operation.  Provide an "instr"
-     * line that specifies an instruction that performs "fv0 = fa0 op fa1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * for: add-double, sub-double, mul-double, div-double,
-     *      rem-double
-     *
-     */
-    /* binop vAA, vBB, vCC */
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    GET_OPA(rOBJ)                          #  rOBJ <- AA
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8                    #  a3 <- CC
-    EAS2(a2, rFP, a2)                      #  a2 <- &fp[BB]
-    EAS2(t1, rFP, a3)                      #  a3 <- &fp[CC]
-    LOAD64_F(fa0, fa0f, a2)
-    LOAD64_F(fa1, fa1f, t1)
-
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    $instr
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vAA/vAA+1 <- fv0
-
-%def fbinopWide2addr(instr=""):
-    /*
-     * Generic 64-bit floating-point "/2addr" binary operation.
-     * Provide an "instr" line that specifies an instruction that
-     * performs "fv0 = fa0 op fa1".
-     * This could be an MIPS instruction or a function call.
-     *
-     * For: add-double/2addr, sub-double/2addr, mul-double/2addr,
-     *      div-double/2addr, rem-double/2addr
-     */
-    /* binop/2addr vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a1)                            #  a1 <- B
-    EAS2(a1, rFP, a1)                      #  a1 <- &fp[B]
-    EAS2(t0, rFP, rOBJ)                    #  t0 <- &fp[A]
-    LOAD64_F(fa0, fa0f, t0)
-    LOAD64_F(fa1, fa1f, a1)
-
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    $instr
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0)  #  vA/vA+1 <- fv0
-
-%def funop(instr=""):
-    /*
-     * Generic 32-bit floating-point unary operation.  Provide an "instr"
-     * line that specifies an instruction that performs "fv0 = op fa0".
-     * This could be a MIPS instruction or a function call.
-     *
-     * for: int-to-float
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_VREG_F(fa0, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    $instr
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t1)         #  vA <- fv0
-
-%def funopWider(instr=""):
-    /*
-     * Generic 32bit-to-64bit floating-point unary operation.  Provide an "instr"
-     * line that specifies an instruction that performs "fv0 = op fa0".
-     *
-     * For: int-to-double, float-to-double
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG_F(fa0, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    $instr
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) #  vA/vA+1 <- fv0
-
-%def op_add_double():
-%  fbinopWide(instr="add.d fv0, fa0, fa1")
-
-%def op_add_double_2addr():
-%  fbinopWide2addr(instr="add.d fv0, fa0, fa1")
-
-%def op_add_float():
-%  fbinop(instr="add.s fv0, fa0, fa1")
-
-%def op_add_float_2addr():
-%  fbinop2addr(instr="add.s fv0, fa0, fa1")
-
-%def op_cmpg_double():
-%  op_cmpl_double(gt_bias="1")
-
-%def op_cmpg_float():
-%  op_cmpl_float(gt_bias="1")
-
-%def op_cmpl_double(gt_bias="0"):
-    /*
-     * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
-     * into the destination register based on the comparison results.
-     *
-     * For: cmpl-double, cmpg-double
-     */
-    /* op vAA, vBB, vCC */
-
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    and       rOBJ, a0, 255                #  rOBJ <- BB
-    srl       t0, a0, 8                    #  t0 <- CC
-    EAS2(rOBJ, rFP, rOBJ)                  #  rOBJ <- &fp[BB]
-    EAS2(t0, rFP, t0)                      #  t0 <- &fp[CC]
-    LOAD64_F(ft0, ft0f, rOBJ)
-    LOAD64_F(ft1, ft1f, t0)
-#ifdef MIPS32REVGE6
-    cmp.eq.d  ft2, ft0, ft1
-    li        rTEMP, 0
-    bc1nez    ft2, 1f                      # done if vBB == vCC (ordered)
-    .if $gt_bias
-    cmp.lt.d  ft2, ft0, ft1
-    li        rTEMP, -1
-    bc1nez    ft2, 1f                      # done if vBB < vCC (ordered)
-    li        rTEMP, 1                     # vBB > vCC or unordered
-    .else
-    cmp.lt.d  ft2, ft1, ft0
-    li        rTEMP, 1
-    bc1nez    ft2, 1f                      # done if vBB > vCC (ordered)
-    li        rTEMP, -1                    # vBB < vCC or unordered
-    .endif
-#else
-    c.eq.d    fcc0, ft0, ft1
-    li        rTEMP, 0
-    bc1t      fcc0, 1f                     # done if vBB == vCC (ordered)
-    .if $gt_bias
-    c.olt.d   fcc0, ft0, ft1
-    li        rTEMP, -1
-    bc1t      fcc0, 1f                     # done if vBB < vCC (ordered)
-    li        rTEMP, 1                     # vBB > vCC or unordered
-    .else
-    c.olt.d   fcc0, ft1, ft0
-    li        rTEMP, 1
-    bc1t      fcc0, 1f                     # done if vBB > vCC (ordered)
-    li        rTEMP, -1                    # vBB < vCC or unordered
-    .endif
-#endif
-1:
-    GET_OPA(rOBJ)
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
-
-%def op_cmpl_float(gt_bias="0"):
-    /*
-     * Compare two floating-point values. Puts 0(==), 1(>), or -1(<)
-     * into the destination register based on the comparison results.
-     *
-     * for: cmpl-float, cmpg-float
-     */
-    /* op vAA, vBB, vCC */
-
-    FETCH(a0, 1)                           #  a0 <- CCBB
-    and       a2, a0, 255                  #  a2 <- BB
-    srl       a3, a0, 8
-    GET_VREG_F(ft0, a2)
-    GET_VREG_F(ft1, a3)
-#ifdef MIPS32REVGE6
-    cmp.eq.s  ft2, ft0, ft1
-    li        rTEMP, 0
-    bc1nez    ft2, 1f                      # done if vBB == vCC (ordered)
-    .if $gt_bias
-    cmp.lt.s  ft2, ft0, ft1
-    li        rTEMP, -1
-    bc1nez    ft2, 1f                      # done if vBB < vCC (ordered)
-    li        rTEMP, 1                     # vBB > vCC or unordered
-    .else
-    cmp.lt.s  ft2, ft1, ft0
-    li        rTEMP, 1
-    bc1nez    ft2, 1f                      # done if vBB > vCC (ordered)
-    li        rTEMP, -1                    # vBB < vCC or unordered
-    .endif
-#else
-    c.eq.s    fcc0, ft0, ft1
-    li        rTEMP, 0
-    bc1t      fcc0, 1f                     # done if vBB == vCC (ordered)
-    .if $gt_bias
-    c.olt.s   fcc0, ft0, ft1
-    li        rTEMP, -1
-    bc1t      fcc0, 1f                     # done if vBB < vCC (ordered)
-    li        rTEMP, 1                     # vBB > vCC or unordered
-    .else
-    c.olt.s   fcc0, ft1, ft0
-    li        rTEMP, 1
-    bc1t      fcc0, 1f                     # done if vBB > vCC (ordered)
-    li        rTEMP, -1                    # vBB < vCC or unordered
-    .endif
-#endif
-1:
-    GET_OPA(rOBJ)
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(rTEMP, rOBJ, t0)         #  vAA <- rTEMP
-
-%def op_div_double():
-%  fbinopWide(instr="div.d fv0, fa0, fa1")
-
-%def op_div_double_2addr():
-%  fbinopWide2addr(instr="div.d fv0, fa0, fa1")
-
-%def op_div_float():
-%  fbinop(instr="div.s fv0, fa0, fa1")
-
-%def op_div_float_2addr():
-%  fbinop2addr(instr="div.s fv0, fa0, fa1")
-
-%def op_double_to_float():
-%  unopNarrower(instr="cvt.s.d fv0, fa0")
-
-%def op_double_to_int():
-    /*
-     * double-to-int
-     *
-     * We have to clip values to int min/max per the specification.  The
-     * expected common case is a "reasonable" value that converts directly
-     * to modest integer.  The EABI convert function isn't doing this for us
-     * for pre-R6.
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-    LOAD64_F(fa0, fa0f, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-#ifndef MIPS32REVGE6
-    li        t0, INT_MIN_AS_DOUBLE_HIGH
-    mtc1      zero, fa1
-    MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
-    c.ole.d   fcc0, fa1, fa0
-#endif
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-#ifndef MIPS32REVGE6
-    bc1t      fcc0, 1f                     #  if INT_MIN <= vB, proceed to truncation
-    c.eq.d    fcc0, fa0, fa0
-    mtc1      zero, fa0
-    MOVE_TO_FPU_HIGH(zero, fa0, fa0f)
-    movt.d    fa0, fa1, fcc0               #  fa0 = ordered(vB) ? INT_MIN_AS_DOUBLE : 0
-1:
-#endif
-    trunc.w.d fa0, fa0
-    SET_VREG_F_GOTO(fa0, rOBJ, t1)         #  vA <- result
-
-%def op_double_to_long():
-    /*
-     * double-to-long
-     *
-     * We have to clip values to long min/max per the specification.  The
-     * expected common case is a "reasonable" value that converts directly
-     * to modest integer.  The EABI convert function isn't doing this for us
-     * for pre-R6.
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-    LOAD64_F(fa0, fa0f, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-#ifdef MIPS32REVGE6
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    trunc.l.d fa0, fa0
-    SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) #  vA <- result
-#else
-    c.eq.d    fcc0, fa0, fa0
-    li        rRESULT0, 0
-    li        rRESULT1, 0
-    bc1f      fcc0, .L${opcode}_get_opcode
-
-    li        t0, LONG_MIN_AS_DOUBLE_HIGH
-    mtc1      zero, fa1
-    MOVE_TO_FPU_HIGH(t0, fa1, fa1f)
-    c.ole.d   fcc0, fa0, fa1
-    li        rRESULT1, LONG_MIN_HIGH
-    bc1t      fcc0, .L${opcode}_get_opcode
-
-    neg.d     fa1, fa1
-    c.ole.d   fcc0, fa1, fa0
-    nor       rRESULT0, rRESULT0, zero
-    nor       rRESULT1, rRESULT1, zero
-    bc1t      fcc0, .L${opcode}_get_opcode
-
-    JAL(__fixdfdi)
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    b         .L${opcode}_set_vreg
-#endif
-%def op_double_to_long_helper_code():
-
-#ifndef MIPS32REVGE6
-.Lop_double_to_long_get_opcode:
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-.Lop_double_to_long_set_vreg:
-    SET_VREG64_GOTO(rRESULT0, rRESULT1, rOBJ, t1)   #  vA/vA+1 <- v0/v1
-#endif
-
-%def op_float_to_double():
-%  funopWider(instr="cvt.d.s fv0, fa0")
-
-%def op_float_to_int():
-    /*
-     * float-to-int
-     *
-     * We have to clip values to int min/max per the specification.  The
-     * expected common case is a "reasonable" value that converts directly
-     * to modest integer.  The EABI convert function isn't doing this for us
-     * for pre-R6.
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_VREG_F(fa0, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-#ifndef MIPS32REVGE6
-    li        t0, INT_MIN_AS_FLOAT
-    mtc1      t0, fa1
-    c.ole.s   fcc0, fa1, fa0
-#endif
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-#ifndef MIPS32REVGE6
-    bc1t      fcc0, 1f                     #  if INT_MIN <= vB, proceed to truncation
-    c.eq.s    fcc0, fa0, fa0
-    mtc1      zero, fa0
-    movt.s    fa0, fa1, fcc0               #  fa0 = ordered(vB) ? INT_MIN_AS_FLOAT : 0
-1:
-#endif
-    trunc.w.s fa0, fa0
-    SET_VREG_F_GOTO(fa0, rOBJ, t1)         #  vA <- result
-
-%def op_float_to_long():
-    /*
-     * float-to-long
-     *
-     * We have to clip values to long min/max per the specification.  The
-     * expected common case is a "reasonable" value that converts directly
-     * to modest integer.  The EABI convert function isn't doing this for us
-     * for pre-R6.
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    GET_VREG_F(fa0, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-
-#ifdef MIPS32REVGE6
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    trunc.l.s fa0, fa0
-    SET_VREG64_F_GOTO(fa0, fa0f, rOBJ, t1) #  vA <- result
-#else
-    c.eq.s    fcc0, fa0, fa0
-    li        rRESULT0, 0
-    li        rRESULT1, 0
-    bc1f      fcc0, .L${opcode}_get_opcode
-
-    li        t0, LONG_MIN_AS_FLOAT
-    mtc1      t0, fa1
-    c.ole.s   fcc0, fa0, fa1
-    li        rRESULT1, LONG_MIN_HIGH
-    bc1t      fcc0, .L${opcode}_get_opcode
-
-    neg.s     fa1, fa1
-    c.ole.s   fcc0, fa1, fa0
-    nor       rRESULT0, rRESULT0, zero
-    nor       rRESULT1, rRESULT1, zero
-    bc1t      fcc0, .L${opcode}_get_opcode
-
-    JAL(__fixsfdi)
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    b         .L${opcode}_set_vreg
-#endif
-%def op_float_to_long_helper_code():
-
-#ifndef MIPS32REVGE6
-.Lop_float_to_long_get_opcode:
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-.Lop_float_to_long_set_vreg:
-    SET_VREG64_GOTO(rRESULT0, rRESULT1, rOBJ, t1)   #  vA/vA+1 <- v0/v1
-#endif
-
-%def op_int_to_double():
-%  funopWider(instr="cvt.d.w fv0, fa0")
-
-%def op_int_to_float():
-%  funop(instr="cvt.s.w fv0, fa0")
-
-%def op_long_to_double():
-    /*
-     * long-to-double
-     */
-    /* unop vA, vB */
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    GET_OPB(a3)                            #  a3 <- B
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-
-#ifdef MIPS32REVGE6
-    LOAD64_F(fv0, fv0f, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    cvt.d.l   fv0, fv0
-#else
-    LOAD64(rARG0, rARG1, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    JAL(__floatdidf)                       #  a0/a1 <- op, a2-a3 changed
-#endif
-
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_F_GOTO(fv0, fv0f, rOBJ, t0) #  vA/vA+1 <- result
-
-%def op_long_to_float():
-    /*
-     * long-to-float
-     */
-    /* unop vA, vB */
-    GET_OPB(a3)                            #  a3 <- B
-    GET_OPA4(rOBJ)                         #  rOBJ <- A+
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-
-#ifdef MIPS32REVGE6
-    LOAD64_F(fv0, fv0f, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    cvt.s.l   fv0, fv0
-#else
-    LOAD64(rARG0, rARG1, a3)
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    JAL(__floatdisf)
-#endif
-
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_F_GOTO(fv0, rOBJ, t0)         #  vA <- fv0
-
-%def op_mul_double():
-%  fbinopWide(instr="mul.d fv0, fa0, fa1")
-
-%def op_mul_double_2addr():
-%  fbinopWide2addr(instr="mul.d fv0, fa0, fa1")
-
-%def op_mul_float():
-%  fbinop(instr="mul.s fv0, fa0, fa1")
-
-%def op_mul_float_2addr():
-%  fbinop2addr(instr="mul.s fv0, fa0, fa1")
-
-%def op_neg_double():
-%  unopWide(instr="addu a1, a1, 0x80000000")
-
-%def op_neg_float():
-%  unop(instr="addu a0, a0, 0x80000000")
-
-%def op_rem_double():
-%  fbinopWide(instr="JAL(fmod)")
-
-%def op_rem_double_2addr():
-%  fbinopWide2addr(instr="JAL(fmod)")
-
-%def op_rem_float():
-%  fbinop(instr="JAL(fmodf)")
-
-%def op_rem_float_2addr():
-%  fbinop2addr(instr="JAL(fmodf)")
-
-%def op_sub_double():
-%  fbinopWide(instr="sub.d fv0, fa0, fa1")
-
-%def op_sub_double_2addr():
-%  fbinopWide2addr(instr="sub.d fv0, fa0, fa1")
-
-%def op_sub_float():
-%  fbinop(instr="sub.s fv0, fa0, fa1")
-
-%def op_sub_float_2addr():
-%  fbinop2addr(instr="sub.s fv0, fa0, fa1")
diff --git a/runtime/interpreter/mterp/mips/invoke.S b/runtime/interpreter/mterp/mips/invoke.S
deleted file mode 100644
index c77d12b..0000000
--- a/runtime/interpreter/mterp/mips/invoke.S
+++ /dev/null
@@ -1,87 +0,0 @@
-%def invoke(helper="UndefinedInvokeHandler"):
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern $helper
-    EXPORT_PC()
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    JAL($helper)
-    beqz    v0, MterpException
-    FETCH_ADVANCE_INST(3)
-    JAL(MterpShouldSwitchInterpreters)
-    bnez    v0, MterpFallback
-    GET_INST_OPCODE(t0)
-    GOTO_OPCODE(t0)
-
-%def invoke_polymorphic(helper="UndefinedInvokeHandler"):
-    /*
-     * invoke-polymorphic handler wrapper.
-     */
-    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
-    .extern $helper
-    EXPORT_PC()
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    JAL($helper)
-    beqz    v0, MterpException
-    FETCH_ADVANCE_INST(4)
-    JAL(MterpShouldSwitchInterpreters)
-    bnez    v0, MterpFallback
-    GET_INST_OPCODE(t0)
-    GOTO_OPCODE(t0)
-
-%def op_invoke_custom():
-%  invoke(helper="MterpInvokeCustom")
-
-%def op_invoke_custom_range():
-%  invoke(helper="MterpInvokeCustomRange")
-
-%def op_invoke_direct():
-%  invoke(helper="MterpInvokeDirect")
-
-%def op_invoke_direct_range():
-%  invoke(helper="MterpInvokeDirectRange")
-
-%def op_invoke_interface():
-%  invoke(helper="MterpInvokeInterface")
-
-%def op_invoke_interface_range():
-%  invoke(helper="MterpInvokeInterfaceRange")
-
-%def op_invoke_polymorphic():
-%  invoke_polymorphic(helper="MterpInvokePolymorphic")
-
-%def op_invoke_polymorphic_range():
-%  invoke_polymorphic(helper="MterpInvokePolymorphicRange")
-
-%def op_invoke_static():
-%  invoke(helper="MterpInvokeStatic")
-
-%def op_invoke_static_range():
-%  invoke(helper="MterpInvokeStaticRange")
-
-%def op_invoke_super():
-%  invoke(helper="MterpInvokeSuper")
-
-%def op_invoke_super_range():
-%  invoke(helper="MterpInvokeSuperRange")
-
-%def op_invoke_virtual():
-%  invoke(helper="MterpInvokeVirtual")
-
-%def op_invoke_virtual_quick():
-%  invoke(helper="MterpInvokeVirtualQuick")
-
-%def op_invoke_virtual_range():
-%  invoke(helper="MterpInvokeVirtualRange")
-
-%def op_invoke_virtual_range_quick():
-%  invoke(helper="MterpInvokeVirtualQuickRange")
diff --git a/runtime/interpreter/mterp/mips/main.S b/runtime/interpreter/mterp/mips/main.S
deleted file mode 100644
index 88180cf..0000000
--- a/runtime/interpreter/mterp/mips/main.S
+++ /dev/null
@@ -1,1144 +0,0 @@
-%def header():
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
-  Art assembly interpreter notes:
-
-  First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
-  handle invoke, allows higher-level code to create frame & shadow frame.
-
-  Once that's working, support direct entry code & eliminate shadow frame (and
-  excess locals allocation.
-
-  Some (hopefully) temporary ugliness.  We'll treat rFP as pointing to the
-  base of the vreg array within the shadow frame.  Access the other fields,
-  dex_pc_, method_ and number_of_vregs_ via negative offsets.  For now, we'll continue
-  the shadow frame mechanism of double-storing object references - via rFP &
-  number_of_vregs_.
-
- */
-
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-#if (__mips==32) && (__mips_isa_rev>=2)
-#define MIPS32REVGE2    /* mips32r2 and greater */
-#if (__mips==32) && (__mips_isa_rev>=5)
-#define FPU64           /* 64 bit FPU */
-#if (__mips==32) && (__mips_isa_rev>=6)
-#define MIPS32REVGE6    /* mips32r6 and greater */
-#endif
-#endif
-#endif
-
-/* MIPS definitions and declarations
-
-   reg  nick      purpose
-   s0   rPC       interpreted program counter, used for fetching instructions
-   s1   rFP       interpreted frame pointer, used for accessing locals and args
-   s2   rSELF     self (Thread) pointer
-   s3   rIBASE    interpreted instruction base pointer, used for computed goto
-   s4   rINST     first 16-bit code unit of current instruction
-   s5   rOBJ      object pointer
-   s6   rREFS     base of object references in shadow frame (ideally, we'll get rid of this later).
-   s7   rTEMP     used as temp storage that can survive a function call
-   s8   rPROFILE  branch profiling countdown
-
-*/
-
-/* single-purpose registers, given names for clarity */
-#define rPC s0
-#define CFI_DEX 16  // DWARF register number of the register holding dex-pc (s0).
-#define CFI_TMP 4   // DWARF register number of the first argument register (a0).
-#define rFP s1
-#define rSELF s2
-#define rIBASE s3
-#define rINST s4
-#define rOBJ s5
-#define rREFS s6
-#define rTEMP s7
-#define rPROFILE s8
-
-#define rARG0 a0
-#define rARG1 a1
-#define rARG2 a2
-#define rARG3 a3
-#define rRESULT0 v0
-#define rRESULT1 v1
-
-/* GP register definitions */
-#define zero    $$0      /* always zero */
-#define AT      $$at     /* assembler temp */
-#define v0      $$2      /* return value */
-#define v1      $$3
-#define a0      $$4      /* argument registers */
-#define a1      $$5
-#define a2      $$6
-#define a3      $$7
-#define t0      $$8      /* temp registers (not saved across subroutine calls) */
-#define t1      $$9
-#define t2      $$10
-#define t3      $$11
-#define t4      $$12
-#define t5      $$13
-#define t6      $$14
-#define t7      $$15
-#define ta0     $$12     /* alias */
-#define ta1     $$13
-#define ta2     $$14
-#define ta3     $$15
-#define s0      $$16     /* saved across subroutine calls (callee saved) */
-#define s1      $$17
-#define s2      $$18
-#define s3      $$19
-#define s4      $$20
-#define s5      $$21
-#define s6      $$22
-#define s7      $$23
-#define t8      $$24     /* two more temp registers */
-#define t9      $$25
-#define k0      $$26     /* kernel temporary */
-#define k1      $$27
-#define gp      $$28     /* global pointer */
-#define sp      $$29     /* stack pointer */
-#define s8      $$30     /* one more callee saved */
-#define ra      $$31     /* return address */
-
-/* FP register definitions */
-#define fv0    $$f0
-#define fv0f   $$f1
-#define fv1    $$f2
-#define fv1f   $$f3
-#define fa0    $$f12
-#define fa0f   $$f13
-#define fa1    $$f14
-#define fa1f   $$f15
-#define ft0    $$f4
-#define ft0f   $$f5
-#define ft1    $$f6
-#define ft1f   $$f7
-#define ft2    $$f8
-#define ft2f   $$f9
-#define ft3    $$f10
-#define ft3f   $$f11
-#define ft4    $$f16
-#define ft4f   $$f17
-#define ft5    $$f18
-#define ft5f   $$f19
-#define fs0    $$f20
-#define fs0f   $$f21
-#define fs1    $$f22
-#define fs1f   $$f23
-#define fs2    $$f24
-#define fs2f   $$f25
-#define fs3    $$f26
-#define fs3f   $$f27
-#define fs4    $$f28
-#define fs4f   $$f29
-#define fs5    $$f30
-#define fs5f   $$f31
-
-#ifndef MIPS32REVGE6
-#define fcc0   $$fcc0
-#define fcc1   $$fcc1
-#endif
-
-#ifdef MIPS32REVGE2
-#define SEB(rd, rt) \
-    seb       rd, rt
-#define SEH(rd, rt) \
-    seh       rd, rt
-#define INSERT_HIGH_HALF(rd_lo, rt_hi) \
-    ins       rd_lo, rt_hi, 16, 16
-#else
-#define SEB(rd, rt) \
-    sll       rd, rt, 24; \
-    sra       rd, rd, 24
-#define SEH(rd, rt) \
-    sll       rd, rt, 16; \
-    sra       rd, rd, 16
-/* Clobbers rt_hi on pre-R2. */
-#define INSERT_HIGH_HALF(rd_lo, rt_hi) \
-    sll       rt_hi, rt_hi, 16; \
-    or        rd_lo, rt_hi
-#endif
-
-#ifdef FPU64
-#define MOVE_TO_FPU_HIGH(r, flo, fhi) \
-    mthc1     r, flo
-#else
-#define MOVE_TO_FPU_HIGH(r, flo, fhi) \
-    mtc1      r, fhi
-#endif
-
-#ifdef MIPS32REVGE6
-#define JR(rt) \
-    jic       rt, 0
-#define LSA(rd, rs, rt, sa) \
-    .if sa; \
-    lsa       rd, rs, rt, sa; \
-    .else; \
-    addu      rd, rs, rt; \
-    .endif
-#else
-#define JR(rt) \
-    jalr      zero, rt
-#define LSA(rd, rs, rt, sa) \
-    .if sa; \
-    .set      push; \
-    .set      noat; \
-    sll       AT, rs, sa; \
-    addu      rd, AT, rt; \
-    .set      pop; \
-    .else; \
-    addu      rd, rs, rt; \
-    .endif
-#endif
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
- * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-#define MTERP_PROFILE_BRANCHES 1
-#define MTERP_LOGGING 0
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array.  For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-#define EXPORT_PC() \
-    sw        rPC, OFF_FP_DEX_PC_PTR(rFP)
-
-#define EXPORT_DEX_PC(tmp) \
-    lw        tmp, OFF_FP_DEX_INSTRUCTIONS(rFP); \
-    sw        rPC, OFF_FP_DEX_PC_PTR(rFP); \
-    subu      tmp, rPC, tmp; \
-    sra       tmp, tmp, 1; \
-    sw        tmp, OFF_FP_DEX_PC(rFP)
-
-/*
- * Fetch the next instruction from rPC into rINST.  Does not advance rPC.
- */
-#define FETCH_INST() lhu rINST, (rPC)
-
-/*
- * Fetch the next instruction from the specified offset.  Advances rPC
- * to point to the next instruction.  "_count" is in 16-bit code units.
- *
- * This must come AFTER anything that can throw an exception, or the
- * exception catch may miss.  (This also implies that it must come after
- * EXPORT_PC().)
- */
-#define FETCH_ADVANCE_INST(_count) \
-    lhu       rINST, ((_count)*2)(rPC); \
-    addu      rPC, rPC, ((_count) * 2)
-
-/*
- * Similar to FETCH_ADVANCE_INST, but does not update rPC.  Used to load
- * rINST ahead of possible exception point.  Be sure to manually advance rPC
- * later.
- */
-#define PREFETCH_INST(_count) lhu rINST, ((_count)*2)(rPC)
-
-/* Advance rPC by some number of code units. */
-#define ADVANCE(_count) addu rPC, rPC, ((_count) * 2)
-
-/*
- * Fetch the next instruction from an offset specified by rd.  Updates
- * rPC to point to the next instruction.  "rd" must specify the distance
- * in bytes, *not* 16-bit code units, and may be a signed value.
- */
-#define FETCH_ADVANCE_INST_RB(rd) \
-    addu      rPC, rPC, rd; \
-    lhu       rINST, (rPC)
-
-/*
- * Fetch a half-word code unit from an offset past the current PC.  The
- * "_count" value is in 16-bit code units.  Does not advance rPC.
- *
- * The "_S" variant works the same but treats the value as signed.
- */
-#define FETCH(rd, _count) lhu rd, ((_count) * 2)(rPC)
-#define FETCH_S(rd, _count) lh rd, ((_count) * 2)(rPC)
-
-/*
- * Fetch one byte from an offset past the current PC.  Pass in the same
- * "_count" as you would for FETCH, and an additional 0/1 indicating which
- * byte of the halfword you want (lo/hi).
- */
-#define FETCH_B(rd, _count, _byte) lbu rd, ((_count) * 2 + _byte)(rPC)
-
-/*
- * Put the instruction's opcode field into the specified register.
- */
-#define GET_INST_OPCODE(rd) and rd, rINST, 0xFF
-
-/*
- * Transform opcode into branch target address.
- */
-#define GET_OPCODE_TARGET(rd) \
-    sll       rd, rd, ${handler_size_bits}; \
-    addu      rd, rIBASE, rd
-
-/*
- * Begin executing the opcode in rd.
- */
-#define GOTO_OPCODE(rd) \
-    GET_OPCODE_TARGET(rd); \
-    JR(rd)
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- */
-#define GET_VREG(rd, rix) LOAD_eas2(rd, rFP, rix)
-
-#define GET_VREG_F(rd, rix) \
-    .set noat; \
-    EAS2(AT, rFP, rix); \
-    l.s       rd, (AT); \
-    .set at
-
-#ifdef MIPS32REVGE6
-#define SET_VREG(rd, rix) \
-    lsa       t8, rix, rFP, 2; \
-    sw        rd, 0(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    sw        zero, 0(t8)
-#else
-#define SET_VREG(rd, rix) \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    sw        rd, 0(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        zero, 0(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG_OBJECT(rd, rix) \
-    lsa       t8, rix, rFP, 2; \
-    sw        rd, 0(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    sw        rd, 0(t8)
-#else
-#define SET_VREG_OBJECT(rd, rix) \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    sw        rd, 0(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        rd, 0(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG64(rlo, rhi, rix) \
-    lsa       t8, rix, rFP, 2; \
-    sw        rlo, 0(t8); \
-    sw        rhi, 4(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    sw        zero, 0(t8); \
-    sw        zero, 4(t8)
-#else
-#define SET_VREG64(rlo, rhi, rix) \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    sw        rlo, 0(t8); \
-    sw        rhi, 4(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        zero, 0(t8); \
-    sw        zero, 4(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG_F(rd, rix) \
-    lsa       t8, rix, rFP, 2; \
-    s.s       rd, 0(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    sw        zero, 0(t8)
-#else
-#define SET_VREG_F(rd, rix) \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    s.s       rd, 0(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        zero, 0(t8)
-#endif
-
-#ifdef MIPS32REVGE6
-#define SET_VREG64_F(rlo, rhi, rix) \
-    lsa       t8, rix, rFP, 2; \
-    .set noat; \
-    mfhc1     AT, rlo; \
-    s.s       rlo, 0(t8); \
-    sw        AT, 4(t8); \
-    .set at; \
-    lsa       t8, rix, rREFS, 2; \
-    sw        zero, 0(t8); \
-    sw        zero, 4(t8)
-#elif defined(FPU64)
-#define SET_VREG64_F(rlo, rhi, rix) \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rREFS, AT; \
-    sw        zero, 0(t8); \
-    sw        zero, 4(t8); \
-    addu      t8, rFP, AT; \
-    mfhc1     AT, rlo; \
-    sw        AT, 4(t8); \
-    .set at; \
-    s.s       rlo, 0(t8)
-#else
-#define SET_VREG64_F(rlo, rhi, rix) \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    s.s       rlo, 0(t8); \
-    s.s       rhi, 4(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        zero, 0(t8); \
-    sw        zero, 4(t8)
-#endif
-
-/* Combination of the SET_VREG and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG_GOTO(rd, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    lsa       t8, rix, rFP, 2; \
-    sw        rd, 0(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    jalr      zero, dst; \
-    sw        zero, 0(t8); \
-    .set reorder
-#else
-#define SET_VREG_GOTO(rd, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    sw        rd, 0(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    jalr      zero, dst; \
-    sw        zero, 0(t8); \
-    .set reorder
-#endif
-
-/* Combination of the SET_VREG_OBJECT and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG_OBJECT_GOTO(rd, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    lsa       t8, rix, rFP, 2; \
-    sw        rd, 0(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    jalr      zero, dst; \
-    sw        rd, 0(t8); \
-    .set reorder
-#else
-#define SET_VREG_OBJECT_GOTO(rd, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    sw        rd, 0(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    jalr      zero, dst; \
-    sw        rd, 0(t8); \
-    .set reorder
-#endif
-
-/* Combination of the SET_VREG64 and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG64_GOTO(rlo, rhi, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    lsa       t8, rix, rFP, 2; \
-    sw        rlo, 0(t8); \
-    sw        rhi, 4(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    sw        zero, 0(t8); \
-    jalr      zero, dst; \
-    sw        zero, 4(t8); \
-    .set reorder
-#else
-#define SET_VREG64_GOTO(rlo, rhi, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    sw        rlo, 0(t8); \
-    sw        rhi, 4(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        zero, 0(t8); \
-    jalr      zero, dst; \
-    sw        zero, 4(t8); \
-    .set reorder
-#endif
-
-/* Combination of the SET_VREG_F and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG_F_GOTO(rd, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    lsa       t8, rix, rFP, 2; \
-    s.s       rd, 0(t8); \
-    lsa       t8, rix, rREFS, 2; \
-    jalr      zero, dst; \
-    sw        zero, 0(t8); \
-    .set reorder
-#else
-#define SET_VREG_F_GOTO(rd, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    s.s       rd, 0(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    jalr      zero, dst; \
-    sw        zero, 0(t8); \
-    .set reorder
-#endif
-
-/* Combination of the SET_VREG64_F and GOTO_OPCODE functions to save 1 instruction */
-#ifdef MIPS32REVGE6
-#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    lsa       t8, rix, rFP, 2; \
-    .set noat; \
-    mfhc1     AT, rlo; \
-    s.s       rlo, 0(t8); \
-    sw        AT, 4(t8); \
-    .set at; \
-    lsa       t8, rix, rREFS, 2; \
-    sw        zero, 0(t8); \
-    jalr      zero, dst; \
-    sw        zero, 4(t8); \
-    .set reorder
-#elif defined(FPU64)
-#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rREFS, AT; \
-    sw        zero, 0(t8); \
-    sw        zero, 4(t8); \
-    addu      t8, rFP, AT; \
-    mfhc1     AT, rlo; \
-    sw        AT, 4(t8); \
-    .set at; \
-    jalr      zero, dst; \
-    s.s       rlo, 0(t8); \
-    .set reorder
-#else
-#define SET_VREG64_F_GOTO(rlo, rhi, rix, dst) \
-    .set noreorder; \
-    GET_OPCODE_TARGET(dst); \
-    .set noat; \
-    sll       AT, rix, 2; \
-    addu      t8, rFP, AT; \
-    s.s       rlo, 0(t8); \
-    s.s       rhi, 4(t8); \
-    addu      t8, rREFS, AT; \
-    .set at; \
-    sw        zero, 0(t8); \
-    jalr      zero, dst; \
-    sw        zero, 4(t8); \
-    .set reorder
-#endif
-
-#define GET_OPA(rd) srl rd, rINST, 8
-#ifdef MIPS32REVGE2
-#define GET_OPA4(rd) ext rd, rINST, 8, 4
-#else
-#define GET_OPA4(rd) GET_OPA(rd); and rd, 0xf
-#endif
-#define GET_OPB(rd) srl rd, rINST, 12
-
-/*
- * Form an Effective Address rd = rbase + roff<<shift;
- * Uses reg AT on pre-R6.
- */
-#define EASN(rd, rbase, roff, shift) LSA(rd, roff, rbase, shift)
-
-#define EAS1(rd, rbase, roff) EASN(rd, rbase, roff, 1)
-#define EAS2(rd, rbase, roff) EASN(rd, rbase, roff, 2)
-#define EAS3(rd, rbase, roff) EASN(rd, rbase, roff, 3)
-#define EAS4(rd, rbase, roff) EASN(rd, rbase, roff, 4)
-
-#define LOAD_eas2(rd, rbase, roff) \
-    .set noat; \
-    EAS2(AT, rbase, roff); \
-    lw        rd, 0(AT); \
-    .set at
-
-#define STORE_eas2(rd, rbase, roff) \
-    .set noat; \
-    EAS2(AT, rbase, roff); \
-    sw        rd, 0(AT); \
-    .set at
-
-#define LOAD_RB_OFF(rd, rbase, off) lw rd, off(rbase)
-#define STORE_RB_OFF(rd, rbase, off) sw rd, off(rbase)
-
-#define STORE64_off(rlo, rhi, rbase, off) \
-    sw        rlo, off(rbase); \
-    sw        rhi, (off+4)(rbase)
-#define LOAD64_off(rlo, rhi, rbase, off) \
-    lw        rlo, off(rbase); \
-    lw        rhi, (off+4)(rbase)
-
-#define STORE64(rlo, rhi, rbase) STORE64_off(rlo, rhi, rbase, 0)
-#define LOAD64(rlo, rhi, rbase) LOAD64_off(rlo, rhi, rbase, 0)
-
-#ifdef FPU64
-#define STORE64_off_F(rlo, rhi, rbase, off) \
-    s.s       rlo, off(rbase); \
-    .set noat; \
-    mfhc1     AT, rlo; \
-    sw        AT, (off+4)(rbase); \
-    .set at
-#define LOAD64_off_F(rlo, rhi, rbase, off) \
-    l.s       rlo, off(rbase); \
-    .set noat; \
-    lw        AT, (off+4)(rbase); \
-    mthc1     AT, rlo; \
-    .set at
-#else
-#define STORE64_off_F(rlo, rhi, rbase, off) \
-    s.s       rlo, off(rbase); \
-    s.s       rhi, (off+4)(rbase)
-#define LOAD64_off_F(rlo, rhi, rbase, off) \
-    l.s       rlo, off(rbase); \
-    l.s       rhi, (off+4)(rbase)
-#endif
-
-#define STORE64_F(rlo, rhi, rbase) STORE64_off_F(rlo, rhi, rbase, 0)
-#define LOAD64_F(rlo, rhi, rbase) LOAD64_off_F(rlo, rhi, rbase, 0)
-
-#define LOAD_base_offMirrorArray_length(rd, rbase) LOAD_RB_OFF(rd, rbase, MIRROR_ARRAY_LENGTH_OFFSET)
-
-#define STACK_STORE(rd, off) sw rd, off(sp)
-#define STACK_LOAD(rd, off) lw rd, off(sp)
-#define CREATE_STACK(n) subu sp, sp, n
-#define DELETE_STACK(n) addu sp, sp, n
-
-#define LOAD_ADDR(dest, addr) la dest, addr
-#define LOAD_IMM(dest, imm) li dest, imm
-#define MOVE_REG(dest, src) move dest, src
-#define STACK_SIZE 128
-
-#define STACK_OFFSET_ARG04 16
-#define STACK_OFFSET_ARG05 20
-#define STACK_OFFSET_ARG06 24
-#define STACK_OFFSET_ARG07 28
-#define STACK_OFFSET_GP    84
-
-#define JAL(n) jal n
-#define BAL(n) bal n
-
-/*
- * FP register usage restrictions:
- * 1) We don't use the callee save FP registers so we don't have to save them.
- * 2) We don't use the odd FP registers so we can share code with mips32r6.
- */
-#define STACK_STORE_FULL() CREATE_STACK(STACK_SIZE); \
-    STACK_STORE(ra, 124); \
-    STACK_STORE(s8, 120); \
-    STACK_STORE(s0, 116); \
-    STACK_STORE(s1, 112); \
-    STACK_STORE(s2, 108); \
-    STACK_STORE(s3, 104); \
-    STACK_STORE(s4, 100); \
-    STACK_STORE(s5, 96); \
-    STACK_STORE(s6, 92); \
-    STACK_STORE(s7, 88);
-
-#define STACK_LOAD_FULL() STACK_LOAD(gp, STACK_OFFSET_GP); \
-    STACK_LOAD(s7, 88); \
-    STACK_LOAD(s6, 92); \
-    STACK_LOAD(s5, 96); \
-    STACK_LOAD(s4, 100); \
-    STACK_LOAD(s3, 104); \
-    STACK_LOAD(s2, 108); \
-    STACK_LOAD(s1, 112); \
-    STACK_LOAD(s0, 116); \
-    STACK_LOAD(s8, 120); \
-    STACK_LOAD(ra, 124); \
-    DELETE_STACK(STACK_SIZE)
-
-#define REFRESH_IBASE() \
-    lw        rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
-
-/* Constants for float/double_to_int/long conversions */
-#define INT_MIN                 0x80000000
-#define INT_MIN_AS_FLOAT        0xCF000000
-#define INT_MIN_AS_DOUBLE_HIGH  0xC1E00000
-#define LONG_MIN_HIGH           0x80000000
-#define LONG_MIN_AS_FLOAT       0xDF000000
-#define LONG_MIN_AS_DOUBLE_HIGH 0xC3E00000
-
-%def entry():
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Interpreter entry point.
- */
-
-    .text
-    .align 2
-    .global ExecuteMterpImpl
-    .ent    ExecuteMterpImpl
-    .frame sp, STACK_SIZE, ra
-/*
- * On entry:
- *  a0  Thread* self
- *  a1  dex_instructions
- *  a2  ShadowFrame
- *  a3  JValue* result_register
- *
- */
-
-ExecuteMterpImpl:
-    .cfi_startproc
-    .set noreorder
-    .cpload t9
-    .set reorder
-/* Save to the stack. Frame size = STACK_SIZE */
-    STACK_STORE_FULL()
-/* This directive will make sure all subsequent jal restore gp at a known offset */
-    .cprestore STACK_OFFSET_GP
-
-    /* Remember the return register */
-    sw      a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
-
-    /* Remember the dex instruction pointer */
-    sw      a1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(a2)
-
-    /* set up "named" registers */
-    move    rSELF, a0
-    lw      a0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
-    addu    rFP, a2, SHADOWFRAME_VREGS_OFFSET     # point to vregs.
-    EAS2(rREFS, rFP, a0)                          # point to reference array in shadow frame
-    lw      a0, SHADOWFRAME_DEX_PC_OFFSET(a2)     # Get starting dex_pc
-    EAS1(rPC, a1, a0)                             # Create direct pointer to 1st dex opcode
-    CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
-
-    EXPORT_PC()
-
-    /* Starting ibase */
-    lw      rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
-
-    /* Set up for backwards branches & osr profiling */
-    lw      a0, OFF_FP_METHOD(rFP)
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rSELF
-    JAL(MterpSetUpHotnessCountdown)        # (method, shadow_frame, self)
-    move    rPROFILE, v0                   # Starting hotness countdown to rPROFILE
-
-    /* start executing the instruction at rPC */
-    FETCH_INST()                           # load rINST from rPC
-    GET_INST_OPCODE(t0)                    # extract opcode from rINST
-    GOTO_OPCODE(t0)                        # jump to next instruction
-    /* NOTE: no fallthrough */
-
-%def dchecks_before_helper():
-    // Call C++ to do debug checks and return to the handler using tail call.
-    .extern MterpCheckBefore
-    move   a0, rSELF                    # arg0
-    addu   a1, rFP, OFF_FP_SHADOWFRAME  # arg1
-    move   a2, rPC
-    la     t9, MterpCheckBefore
-    jalr   zero, t9                     # Tail call to Mterp(self, shadow_frame, dex_pc_ptr)
-
-%def opcode_pre():
-%  add_helper(dchecks_before_helper, "mterp_dchecks_before_helper")
-    #if !defined(NDEBUG)
-    jal    SYMBOL(mterp_dchecks_before_helper)
-    #endif
-
-%def fallback():
-/* Transfer stub to alternate interpreter */
-    b    MterpFallback
-
-%def helpers():
-%  op_float_to_long_helper_code()
-%  op_double_to_long_helper_code()
-%  op_mul_long_helper_code()
-%  op_shl_long_helper_code()
-%  op_shr_long_helper_code()
-%  op_ushr_long_helper_code()
-%  op_shl_long_2addr_helper_code()
-%  op_shr_long_2addr_helper_code()
-%  op_ushr_long_2addr_helper_code()
-
-%def footer():
-/*
- * ===========================================================================
- *  Common subroutines and data
- * ===========================================================================
- */
-
-    .text
-    .align 2
-
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-common_errDivideByZero:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogDivideByZeroException)
-#endif
-    b MterpCommonFallback
-
-common_errArrayIndex:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogArrayIndexException)
-#endif
-    b MterpCommonFallback
-
-common_errNegativeArraySize:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogNegativeArraySizeException)
-#endif
-    b MterpCommonFallback
-
-common_errNoSuchMethod:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogNoSuchMethodException)
-#endif
-    b MterpCommonFallback
-
-common_errNullObject:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogNullObjectException)
-#endif
-    b MterpCommonFallback
-
-common_exceptionThrown:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogExceptionThrownException)
-#endif
-    b MterpCommonFallback
-
-MterpSuspendFallback:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    lw    a2, THREAD_FLAGS_OFFSET(rSELF)
-    JAL(MterpLogSuspendFallback)
-#endif
-    b MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary.  If there is a pending
- * exception, handle it.  Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
-    lw      a0, THREAD_EXCEPTION_OFFSET(rSELF)
-    beqz    a0, MterpFallback          # If exception, fall back to reference interpreter.
-    /* intentional fallthrough - handle pending exception. */
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-MterpException:
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpHandleException)                    # (self, shadow_frame)
-    beqz    v0, MterpExceptionReturn             # no local catch, back to caller.
-    lw      a0, OFF_FP_DEX_INSTRUCTIONS(rFP)
-    lw      a1, OFF_FP_DEX_PC(rFP)
-    lw      rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
-    EAS1(rPC, a0, a1)                            # generate new dex_pc_ptr
-    /* Do we need to switch interpreters? */
-    JAL(MterpShouldSwitchInterpreters)
-    bnez    v0, MterpFallback
-    /* resume execution at catch block */
-    EXPORT_PC()
-    FETCH_INST()
-    GET_INST_OPCODE(t0)
-    GOTO_OPCODE(t0)
-    /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- *    rINST          <= signed offset
- *    rPROFILE       <= signed hotness countdown (expanded to 32 bits)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- *    If profiling active, do hotness countdown and report if we hit zero.
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *    Is there a pending suspend request?  If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- */
-MterpCommonTakenBranchNoFlags:
-    bgtz    rINST, .L_forward_branch    # don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-#if JIT_CHECK_OSR != -1
-#  error "JIT_CHECK_OSR must be -1."
-#endif
-    li      t0, JIT_CHECK_OSR
-    beq     rPROFILE, t0, .L_osr_check
-    blt     rPROFILE, t0, .L_resume_backward_branch
-    subu    rPROFILE, 1
-    beqz    rPROFILE, .L_add_batch      # counted down to zero - report
-.L_resume_backward_branch:
-    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
-    REFRESH_IBASE()
-    addu    a2, rINST, rINST            # a2<- byte offset
-    FETCH_ADVANCE_INST_RB(a2)           # update rPC, load rINST
-    and     ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    bnez    ra, .L_suspend_request_pending
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-.L_suspend_request_pending:
-    EXPORT_PC()
-    move    a0, rSELF
-    JAL(MterpSuspendCheck)              # (self)
-    bnez    v0, MterpFallback
-    REFRESH_IBASE()                     # might have changed during suspend
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-.L_no_count_backwards:
-    li      t0, JIT_CHECK_OSR           # check for possible OSR re-entry
-    bne     rPROFILE, t0, .L_resume_backward_branch
-.L_osr_check:
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rINST
-    EXPORT_PC()
-    JAL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
-    bnez    v0, MterpOnStackReplacement
-    b       .L_resume_backward_branch
-
-.L_forward_branch:
-    li      t0, JIT_CHECK_OSR           # check for possible OSR re-entry
-    beq     rPROFILE, t0, .L_check_osr_forward
-.L_resume_forward_branch:
-    add     a2, rINST, rINST            # a2<- byte offset
-    FETCH_ADVANCE_INST_RB(a2)           # update rPC, load rINST
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-.L_check_osr_forward:
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rINST
-    EXPORT_PC()
-    JAL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
-    bnez    v0, MterpOnStackReplacement
-    b       .L_resume_forward_branch
-
-.L_add_batch:
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    sh      rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
-    lw      a0, OFF_FP_METHOD(rFP)
-    move    a2, rSELF
-    JAL(MterpAddHotnessBatch)           # (method, shadow_frame, self)
-    move    rPROFILE, v0                # restore new hotness countdown to rPROFILE
-    b       .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path.  All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    li      a2, 2
-    EXPORT_PC()
-    JAL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
-    bnez    v0, MterpOnStackReplacement
-    FETCH_ADVANCE_INST(2)
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
-    move    a0, rSELF
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rINST
-    JAL(MterpLogOSR)
-#endif
-    li      v0, 1                       # Signal normal return
-    b       MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-MterpFallback:
-    EXPORT_PC()
-#if MTERP_LOGGING
-    move  a0, rSELF
-    addu  a1, rFP, OFF_FP_SHADOWFRAME
-    JAL(MterpLogFallback)
-#endif
-MterpCommonFallback:
-    move    v0, zero                    # signal retry with reference interpreter.
-    b       MterpDone
-/*
- * We pushed some registers on the stack in ExecuteMterpImpl, then saved
- * SP and LR.  Here we restore SP, restore the registers, and then restore
- * LR to PC.
- *
- * On entry:
- *  uint32_t* rFP  (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
-    li      v0, 1                       # signal return to caller.
-    b       MterpDone
-MterpReturn:
-    lw      a2, OFF_FP_RESULT_REGISTER(rFP)
-    sw      v0, 0(a2)
-    sw      v1, 4(a2)
-    li      v0, 1                       # signal return to caller.
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero.  If negative, hotness is disabled or we're
- * checking for OSR.  If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter).  rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
-    blez    rPROFILE, .L_pop_and_return # if > 0, we may have some counts to report.
-
-MterpProfileActive:
-    move    rINST, v0                   # stash return value
-    /* Report cached hotness counts */
-    lw      a0, OFF_FP_METHOD(rFP)
-    addu    a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rSELF
-    sh      rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
-    JAL(MterpAddHotnessBatch)           # (method, shadow_frame, self)
-    move    v0, rINST                   # restore return value
-
-.L_pop_and_return:
-/* Restore from the stack and return. Frame size = STACK_SIZE */
-    STACK_LOAD_FULL()
-    jalr    zero, ra
-
-    .cfi_endproc
-    .end ExecuteMterpImpl
-
-%def instruction_end():
-
-    .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
-
-%def instruction_start():
-
-    .global artMterpAsmInstructionStart
-artMterpAsmInstructionStart = .L_op_nop
-    .text
-
-%def opcode_start():
-%  pass
-%def opcode_end():
-%  pass
-%def helper_start(name):
-    ENTRY ${name}
-%def helper_end(name):
-    END ${name}
diff --git a/runtime/interpreter/mterp/mips/object.S b/runtime/interpreter/mterp/mips/object.S
deleted file mode 100644
index a987789..0000000
--- a/runtime/interpreter/mterp/mips/object.S
+++ /dev/null
@@ -1,257 +0,0 @@
-%def field(helper=""):
-TODO
-
-%def op_check_cast():
-    /*
-     * Check to see if a cast from one class to another is allowed.
-     */
-    /* check-cast vAA, class@BBBB */
-    EXPORT_PC()
-    FETCH(a0, 1)                           #  a0 <- BBBB
-    GET_OPA(a1)                            #  a1 <- AA
-    EAS2(a1, rFP, a1)                      #  a1 <- &object
-    lw     a2, OFF_FP_METHOD(rFP)          #  a2 <- method
-    move   a3, rSELF                       #  a3 <- self
-    JAL(MterpCheckCast)                    #  v0 <- CheckCast(index, &obj, method, self)
-    PREFETCH_INST(2)
-    bnez   v0, MterpPossibleException
-    ADVANCE(2)
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-%def op_iget(is_object="0", helper="MterpIGetU32"):
-%  field(helper=helper)
-
-%def op_iget_boolean():
-%  op_iget(helper="MterpIGetU8")
-
-%def op_iget_boolean_quick():
-%  op_iget_quick(load="lbu")
-
-%def op_iget_byte():
-%  op_iget(helper="MterpIGetI8")
-
-%def op_iget_byte_quick():
-%  op_iget_quick(load="lb")
-
-%def op_iget_char():
-%  op_iget(helper="MterpIGetU16")
-
-%def op_iget_char_quick():
-%  op_iget_quick(load="lhu")
-
-%def op_iget_object():
-%  op_iget(is_object="1", helper="MterpIGetObj")
-
-%def op_iget_object_quick():
-    /* For: iget-object-quick */
-    /* op vA, vB, offset@CCCC */
-    GET_OPB(a2)                            #  a2 <- B
-    FETCH(a1, 1)                           #  a1 <- field byte offset
-    EXPORT_PC()
-    GET_VREG(a0, a2)                       #  a0 <- object we're operating on
-    JAL(artIGetObjectFromMterp)            #  v0 <- GetObj(obj, offset)
-    lw   a3, THREAD_EXCEPTION_OFFSET(rSELF)
-    GET_OPA4(a2)                           #  a2<- A+
-    PREFETCH_INST(2)                       #  load rINST
-    bnez a3, MterpPossibleException        #  bail out
-    ADVANCE(2)                             #  advance rPC
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_OBJECT_GOTO(v0, a2, t0)       #  fp[A] <- v0
-
-%def op_iget_quick(load="lw"):
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset@CCCC */
-    GET_OPB(a2)                            #  a2 <- B
-    GET_VREG(a3, a2)                       #  a3 <- object we're operating on
-    FETCH(a1, 1)                           #  a1 <- field byte offset
-    GET_OPA4(a2)                           #  a2 <- A(+)
-    # check object for null
-    beqz      a3, common_errNullObject     #  object was null
-    addu      t0, a3, a1
-    $load     a0, 0(t0)                    #  a0 <- obj.field (8/16/32 bits)
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, a2, t0)              #  fp[A] <- a0
-
-%def op_iget_short():
-%  op_iget(helper="MterpIGetI16")
-
-%def op_iget_short_quick():
-%  op_iget_quick(load="lh")
-
-%def op_iget_wide():
-%  op_iget(helper="MterpIGetU64")
-
-%def op_iget_wide_quick():
-    /* iget-wide-quick vA, vB, offset@CCCC */
-    GET_OPB(a2)                            #  a2 <- B
-    GET_VREG(a3, a2)                       #  a3 <- object we're operating on
-    FETCH(a1, 1)                           #  a1 <- field byte offset
-    GET_OPA4(a2)                           #  a2 <- A(+)
-    # check object for null
-    beqz      a3, common_errNullObject     #  object was null
-    addu      t0, a3, a1                   #  t0 <- a3 + a1
-    LOAD64(a0, a1, t0)                     #  a0 <- obj.field (64 bits, aligned)
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[A] <- a0/a1
-
-%def op_instance_of():
-    /*
-     * Check to see if an object reference is an instance of a class.
-     *
-     * Most common situation is a non-null object, being compared against
-     * an already-resolved class.
-     */
-    /* instance-of vA, vB, class@CCCC */
-    EXPORT_PC()
-    FETCH(a0, 1)                           # a0 <- CCCC
-    GET_OPB(a1)                            # a1 <- B
-    EAS2(a1, rFP, a1)                      # a1 <- &object
-    lw    a2, OFF_FP_METHOD(rFP)           # a2 <- method
-    move  a3, rSELF                        # a3 <- self
-    GET_OPA4(rOBJ)                         # rOBJ <- A+
-    JAL(MterpInstanceOf)                   # v0 <- Mterp(index, &obj, method, self)
-    lw   a1, THREAD_EXCEPTION_OFFSET(rSELF)
-    PREFETCH_INST(2)                       # load rINST
-    bnez a1, MterpException
-    ADVANCE(2)                             # advance rPC
-    GET_INST_OPCODE(t0)                    # extract opcode from rINST
-    SET_VREG_GOTO(v0, rOBJ, t0)            # vA <- v0
-
-%def op_iput(is_object="0", helper="MterpIPutU32"):
-%  field(helper=helper)
-
-%def op_iput_boolean():
-%  op_iput(helper="MterpIPutU8")
-
-%def op_iput_boolean_quick():
-%  op_iput_quick(store="sb")
-
-%def op_iput_byte():
-%  op_iput(helper="MterpIPutI8")
-
-%def op_iput_byte_quick():
-%  op_iput_quick(store="sb")
-
-%def op_iput_char():
-%  op_iput(helper="MterpIPutU16")
-
-%def op_iput_char_quick():
-%  op_iput_quick(store="sh")
-
-%def op_iput_object():
-%  op_iput(is_object="1", helper="MterpIPutObj")
-
-%def op_iput_object_quick():
-    /* For: iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    EXPORT_PC()
-    addu   a0, rFP, OFF_FP_SHADOWFRAME
-    move   a1, rPC
-    move   a2, rINST
-    JAL(MterpIputObjectQuick)
-    beqz   v0, MterpException
-    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-%def op_iput_quick(store="sw"):
-    /* For: iput-quick, iput-object-quick */
-    /* op vA, vB, offset@CCCC */
-    GET_OPB(a2)                            #  a2 <- B
-    GET_VREG(a3, a2)                       #  a3 <- fp[B], the object pointer
-    FETCH(a1, 1)                           #  a1 <- field byte offset
-    GET_OPA4(a2)                           #  a2 <- A(+)
-    beqz      a3, common_errNullObject     #  object was null
-    GET_VREG(a0, a2)                       #  a0 <- fp[A]
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    addu      t0, a3, a1
-    GET_INST_OPCODE(t1)                    #  extract opcode from rINST
-    GET_OPCODE_TARGET(t1)
-    $store    a0, 0(t0)                    #  obj.field (8/16/32 bits) <- a0
-    JR(t1)                                 #  jump to next instruction
-
-%def op_iput_short():
-%  op_iput(helper="MterpIPutI16")
-
-%def op_iput_short_quick():
-%  op_iput_quick(store="sh")
-
-%def op_iput_wide():
-%  op_iput(helper="MterpIPutU64")
-
-%def op_iput_wide_quick():
-    /* iput-wide-quick vA, vB, offset@CCCC */
-    GET_OPA4(a0)                           #  a0 <- A(+)
-    GET_OPB(a1)                            #  a1 <- B
-    GET_VREG(a2, a1)                       #  a2 <- fp[B], the object pointer
-    # check object for null
-    beqz      a2, common_errNullObject     #  object was null
-    EAS2(a3, rFP, a0)                      #  a3 <- &fp[A]
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[A]
-    FETCH(a3, 1)                           #  a3 <- field byte offset
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    addu      a2, a2, a3                   #  obj.field (64 bits, aligned) <- a0/a1
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GET_OPCODE_TARGET(t0)
-    STORE64(a0, a1, a2)                    #  obj.field (64 bits, aligned) <- a0/a1
-    JR(t0)                                 #  jump to next instruction
-
-%def op_new_instance():
-    /*
-     * Create a new instance of a class.
-     */
-    /* new-instance vAA, class@BBBB */
-    EXPORT_PC()
-    addu   a0, rFP, OFF_FP_SHADOWFRAME
-    move   a1, rSELF
-    move   a2, rINST
-    JAL(MterpNewInstance)
-    beqz   v0, MterpPossibleException
-    FETCH_ADVANCE_INST(2)               # advance rPC, load rINST
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-%def op_sget(is_object="0", helper="MterpSGetU32"):
-%  field(helper=helper)
-
-%def op_sget_boolean():
-%  op_sget(helper="MterpSGetU8")
-
-%def op_sget_byte():
-%  op_sget(helper="MterpSGetI8")
-
-%def op_sget_char():
-%  op_sget(helper="MterpSGetU16")
-
-%def op_sget_object():
-%  op_sget(is_object="1", helper="MterpSGetObj")
-
-%def op_sget_short():
-%  op_sget(helper="MterpSGetI16")
-
-%def op_sget_wide():
-%  op_sget(helper="MterpSGetU64")
-
-%def op_sput(is_object="0", helper="MterpSPutU32"):
-%  field(helper=helper)
-
-%def op_sput_boolean():
-%  op_sput(helper="MterpSPutU8")
-
-%def op_sput_byte():
-%  op_sput(helper="MterpSPutI8")
-
-%def op_sput_char():
-%  op_sput(helper="MterpSPutU16")
-
-%def op_sput_object():
-%  op_sput(is_object="1", helper="MterpSPutObj")
-
-%def op_sput_short():
-%  op_sput(helper="MterpSPutI16")
-
-%def op_sput_wide():
-%  op_sput(helper="MterpSPutU64")
diff --git a/runtime/interpreter/mterp/mips/other.S b/runtime/interpreter/mterp/mips/other.S
deleted file mode 100644
index 5002329..0000000
--- a/runtime/interpreter/mterp/mips/other.S
+++ /dev/null
@@ -1,345 +0,0 @@
-%def const(helper="UndefinedConstHandler"):
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern $helper
-    EXPORT_PC()
-    FETCH(a0, 1)                        # a0 <- BBBB
-    GET_OPA(a1)                         # a1 <- AA
-    addu   a2, rFP, OFF_FP_SHADOWFRAME  # a2 <- shadow frame
-    move   a3, rSELF
-    JAL($helper)                        # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST(2)                    # load rINST
-    bnez   v0, MterpPossibleException
-    ADVANCE(2)                          # advance rPC
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-%def unused():
-/*
- * Bail to reference interpreter to throw.
- */
-  b MterpFallback
-
-%def op_const():
-    /* const vAA, +BBBBbbbb */
-    GET_OPA(a3)                            #  a3 <- AA
-    FETCH(a0, 1)                           #  a0 <- bbbb (low)
-    FETCH(a1, 2)                           #  a1 <- BBBB (high)
-    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    INSERT_HIGH_HALF(a0, a1)               #  a0 <- BBBBbbbb
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
-
-%def op_const_16():
-    /* const/16 vAA, +BBBB */
-    FETCH_S(a0, 1)                         #  a0 <- ssssBBBB (sign-extended)
-    GET_OPA(a3)                            #  a3 <- AA
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
-
-%def op_const_4():
-    /* const/4 vA, +B */
-    sll       a1, rINST, 16                #  a1 <- Bxxx0000
-    GET_OPA(a0)                            #  a0 <- A+
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    sra       a1, a1, 28                   #  a1 <- sssssssB (sign-extended)
-    and       a0, a0, 15
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a1, a0, t0)              #  fp[A] <- a1
-
-%def op_const_class():
-%  const(helper="MterpConstClass")
-
-%def op_const_high16():
-    /* const/high16 vAA, +BBBB0000 */
-    FETCH(a0, 1)                           #  a0 <- 0000BBBB (zero-extended)
-    GET_OPA(a3)                            #  a3 <- AA
-    sll       a0, a0, 16                   #  a0 <- BBBB0000
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG_GOTO(a0, a3, t0)              #  vAA <- a0
-
-%def op_const_method_handle():
-%  const(helper="MterpConstMethodHandle")
-
-%def op_const_method_type():
-%  const(helper="MterpConstMethodType")
-
-%def op_const_string():
-%  const(helper="MterpConstString")
-
-%def op_const_string_jumbo():
-    /* const/string vAA, string@BBBBBBBB */
-    EXPORT_PC()
-    FETCH(a0, 1)                        # a0 <- bbbb (low)
-    FETCH(a2, 2)                        # a2 <- BBBB (high)
-    GET_OPA(a1)                         # a1 <- AA
-    INSERT_HIGH_HALF(a0, a2)            # a0 <- BBBBbbbb
-    addu   a2, rFP, OFF_FP_SHADOWFRAME  # a2 <- shadow frame
-    move   a3, rSELF
-    JAL(MterpConstString)               # v0 <- Mterp(index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST(3)                    # load rINST
-    bnez   v0, MterpPossibleException
-    ADVANCE(3)                          # advance rPC
-    GET_INST_OPCODE(t0)                 # extract opcode from rINST
-    GOTO_OPCODE(t0)                     # jump to next instruction
-
-%def op_const_wide():
-    /* const-wide vAA, +HHHHhhhhBBBBbbbb */
-    FETCH(a0, 1)                           #  a0 <- bbbb (low)
-    FETCH(a1, 2)                           #  a1 <- BBBB (low middle)
-    FETCH(a2, 3)                           #  a2 <- hhhh (high middle)
-    INSERT_HIGH_HALF(a0, a1)               #  a0 <- BBBBbbbb (low word)
-    FETCH(a3, 4)                           #  a3 <- HHHH (high)
-    GET_OPA(t1)                            #  t1 <- AA
-    INSERT_HIGH_HALF(a2, a3)               #  a2 <- HHHHhhhh (high word)
-    FETCH_ADVANCE_INST(5)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a2, t1, t0)        #  vAA/vAA+1 <- a0/a2
-
-%def op_const_wide_16():
-    /* const-wide/16 vAA, +BBBB */
-    FETCH_S(a0, 1)                         #  a0 <- ssssBBBB (sign-extended)
-    GET_OPA(a3)                            #  a3 <- AA
-    sra       a1, a0, 31                   #  a1 <- ssssssss
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a3, t0)        #  vAA/vAA+1 <- a0/a1
-
-%def op_const_wide_32():
-    /* const-wide/32 vAA, +BBBBbbbb */
-    FETCH(a0, 1)                           #  a0 <- 0000bbbb (low)
-    GET_OPA(a3)                            #  a3 <- AA
-    FETCH_S(a2, 2)                         #  a2 <- ssssBBBB (high)
-    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    INSERT_HIGH_HALF(a0, a2)               #  a0 <- BBBBbbbb
-    sra       a1, a0, 31                   #  a1 <- ssssssss
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a3, t0)        #  vAA/vAA+1 <- a0/a1
-
-%def op_const_wide_high16():
-    /* const-wide/high16 vAA, +BBBB000000000000 */
-    FETCH(a1, 1)                           #  a1 <- 0000BBBB (zero-extended)
-    GET_OPA(a3)                            #  a3 <- AA
-    li        a0, 0                        #  a0 <- 00000000
-    sll       a1, 16                       #  a1 <- BBBB0000
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a3, t0)        #  vAA/vAA+1 <- a0/a1
-
-%def op_monitor_enter():
-    /*
-     * Synchronize on an object.
-     */
-    /* monitor-enter vAA */
-    EXPORT_PC()
-    GET_OPA(a2)                            # a2 <- AA
-    GET_VREG(a0, a2)                       # a0 <- vAA (object)
-    move   a1, rSELF                       # a1 <- self
-    JAL(artLockObjectFromCode)             # v0 <- artLockObject(obj, self)
-    bnez v0, MterpException
-    FETCH_ADVANCE_INST(1)                  # advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    # extract opcode from rINST
-    GOTO_OPCODE(t0)                        # jump to next instruction
-
-%def op_monitor_exit():
-    /*
-     * Unlock an object.
-     *
-     * Exceptions that occur when unlocking a monitor need to appear as
-     * if they happened at the following instruction.  See the Dalvik
-     * instruction spec.
-     */
-    /* monitor-exit vAA */
-    EXPORT_PC()
-    GET_OPA(a2)                            # a2 <- AA
-    GET_VREG(a0, a2)                       # a0 <- vAA (object)
-    move   a1, rSELF                       # a1 <- self
-    JAL(artUnlockObjectFromCode)           # v0 <- artUnlockObject(obj, self)
-    bnez v0, MterpException
-    FETCH_ADVANCE_INST(1)                  # advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    # extract opcode from rINST
-    GOTO_OPCODE(t0)                        # jump to next instruction
-
-%def op_move(is_object="0"):
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    GET_OPB(a1)                            #  a1 <- B from 15:12
-    GET_OPA4(a0)                           #  a0 <- A from 11:8
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_VREG(a2, a1)                       #  a2 <- fp[B]
-    GET_INST_OPCODE(t0)                    #  t0 <- opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[A] <- a2
-    .else
-    SET_VREG_GOTO(a2, a0, t0)              #  fp[A] <- a2
-    .endif
-
-%def op_move_16(is_object="0"):
-    /* for: move/16, move-object/16 */
-    /* op vAAAA, vBBBB */
-    FETCH(a1, 2)                           #  a1 <- BBBB
-    FETCH(a0, 1)                           #  a0 <- AAAA
-    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[AAAA] <- a2
-    .else
-    SET_VREG_GOTO(a2, a0, t0)              #  fp[AAAA] <- a2
-    .endif
-
-%def op_move_exception():
-    /* move-exception vAA */
-    GET_OPA(a2)                                 #  a2 <- AA
-    lw    a3, THREAD_EXCEPTION_OFFSET(rSELF)    #  get exception obj
-    FETCH_ADVANCE_INST(1)                       #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                         #  extract opcode from rINST
-    GET_OPCODE_TARGET(t0)
-    SET_VREG_OBJECT(a3, a2)                     #  fp[AA] <- exception obj
-    sw    zero, THREAD_EXCEPTION_OFFSET(rSELF)  #  clear exception
-    JR(t0)                                      #  jump to next instruction
-
-%def op_move_from16(is_object="0"):
-    /* for: move/from16, move-object/from16 */
-    /* op vAA, vBBBB */
-    FETCH(a1, 1)                           #  a1 <- BBBB
-    GET_OPA(a0)                            #  a0 <- AA
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_VREG(a2, a1)                       #  a2 <- fp[BBBB]
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT_GOTO(a2, a0, t0)       #  fp[AA] <- a2
-    .else
-    SET_VREG_GOTO(a2, a0, t0)              #  fp[AA] <- a2
-    .endif
-
-%def op_move_object():
-%  op_move(is_object="1")
-
-%def op_move_object_16():
-%  op_move_16(is_object="1")
-
-%def op_move_object_from16():
-%  op_move_from16(is_object="1")
-
-%def op_move_result(is_object="0"):
-    /* for: move-result, move-result-object */
-    /* op vAA */
-    GET_OPA(a2)                            #  a2 <- AA
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    lw    a0, OFF_FP_RESULT_REGISTER(rFP)  #  get pointer to result JType
-    lw    a0, 0(a0)                        #  a0 <- result.i
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT_GOTO(a0, a2, t0)       #  fp[AA] <- a0
-    .else
-    SET_VREG_GOTO(a0, a2, t0)              #  fp[AA] <- a0
-    .endif
-
-%def op_move_result_object():
-%  op_move_result(is_object="1")
-
-%def op_move_result_wide():
-    /* move-result-wide vAA */
-    GET_OPA(a2)                            #  a2 <- AA
-    lw    a3, OFF_FP_RESULT_REGISTER(rFP)  #  get pointer to result JType
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- retval.j
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[AA] <- a0/a1
-
-%def op_move_wide():
-    /* move-wide vA, vB */
-    /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
-    GET_OPA4(a2)                           #  a2 <- A(+)
-    GET_OPB(a3)                            #  a3 <- B
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[B]
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[B]
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[A] <- a0/a1
-
-%def op_move_wide_16():
-    /* move-wide/16 vAAAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
-    FETCH(a3, 2)                           #  a3 <- BBBB
-    FETCH(a2, 1)                           #  a2 <- AAAA
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BBBB]
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[BBBB]
-    FETCH_ADVANCE_INST(3)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[AAAA] <- a0/a1
-
-%def op_move_wide_from16():
-    /* move-wide/from16 vAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6, v7" or "move v7, v6" */
-    FETCH(a3, 1)                           #  a3 <- BBBB
-    GET_OPA(a2)                            #  a2 <- AA
-    EAS2(a3, rFP, a3)                      #  a3 <- &fp[BBBB]
-    LOAD64(a0, a1, a3)                     #  a0/a1 <- fp[BBBB]
-    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    SET_VREG64_GOTO(a0, a1, a2, t0)        #  fp[AA] <- a0/a1
-
-%def op_nop():
-    FETCH_ADVANCE_INST(1)                  #  advance rPC, load rINST
-    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
-    GOTO_OPCODE(t0)                        #  jump to next instruction
-
-%def op_unused_3e():
-%  unused()
-
-%def op_unused_3f():
-%  unused()
-
-%def op_unused_40():
-%  unused()
-
-%def op_unused_41():
-%  unused()
-
-%def op_unused_42():
-%  unused()
-
-%def op_unused_43():
-%  unused()
-
-%def op_unused_73():
-%  unused()
-
-%def op_unused_79():
-%  unused()
-
-%def op_unused_7a():
-%  unused()
-
-%def op_unused_f3():
-%  unused()
-
-%def op_unused_f4():
-%  unused()
-
-%def op_unused_f5():
-%  unused()
-
-%def op_unused_f6():
-%  unused()
-
-%def op_unused_f7():
-%  unused()
-
-%def op_unused_f8():
-%  unused()
-
-%def op_unused_f9():
-%  unused()
-
-%def op_unused_fc():
-%  unused()
-
-%def op_unused_fd():
-%  unused()
diff --git a/runtime/interpreter/mterp/mips64/arithmetic.S b/runtime/interpreter/mterp/mips64/arithmetic.S
deleted file mode 100644
index 0b03e02..0000000
--- a/runtime/interpreter/mterp/mips64/arithmetic.S
+++ /dev/null
@@ -1,458 +0,0 @@
-%def binop(preinstr="", result="a0", chkzero="0", instr=""):
-    /*
-     * Generic 32-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
-     *      xor-int, shl-int, shr-int, ushr-int
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG a0, a2                     # a0 <- vBB
-    GET_VREG a1, a3                     # a1 <- vCC
-    .if $chkzero
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    $preinstr                           # optional op
-    $instr                              # $result <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG $result, a4                # vAA <- $result
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def binop2addr(preinstr="", result="a0", chkzero="0", instr=""):
-    /*
-     * Generic 32-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (INT_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
-     *      rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
-     *      shl-int/2addr, shr-int/2addr, ushr-int/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a2                     # a0 <- vA
-    GET_VREG a1, a3                     # a1 <- vB
-    .if $chkzero
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    $preinstr                           # optional op
-    $instr                              # $result <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG $result, a2                # vA <- $result
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def binopLit16(preinstr="", result="a0", chkzero="0", instr=""):
-    /*
-     * Generic 32-bit "lit16" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * CCCC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
-     *      rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
-     */
-    /* binop/lit16 vA, vB, #+CCCC */
-    lh      a1, 2(rPC)                  # a1 <- sign-extended CCCC
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a3                     # a0 <- vB
-    .if $chkzero
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    $preinstr                           # optional op
-    $instr                              # $result <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG $result, a2                # vA <- $result
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-%def binopLit8(preinstr="", result="a0", chkzero="0", instr=""):
-    /*
-     * Generic 32-bit "lit8" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be an MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * CC (a1).  Useful for integer division and modulus.
-     *
-     * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
-     *      rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
-     *      shl-int/lit8, shr-int/lit8, ushr-int/lit8
-     */
-    /* binop/lit8 vAA, vBB, #+CC */
-    lbu     a3, 2(rPC)                  # a3 <- BB
-    lb      a1, 3(rPC)                  # a1 <- sign-extended CC
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG a0, a3                     # a0 <- vBB
-    .if $chkzero
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    $preinstr                           # optional op
-    $instr                              # $result <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG $result, a2                # vAA <- $result
-    GOTO_OPCODE v0                      # jump to next instruction
-
-
-%def binopWide(preinstr="", result="a0", chkzero="0", instr=""):
-    /*
-     * Generic 64-bit binary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vCC (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long, sub-long, mul-long, div-long, rem-long, and-long, or-long,
-     *      xor-long, shl-long, shr-long, ushr-long
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_WIDE a0, a2                # a0 <- vBB
-    GET_VREG_WIDE a1, a3                # a1 <- vCC
-    .if $chkzero
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    $preinstr                           # optional op
-    $instr                              # $result <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE $result, a4           # vAA <- $result
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def binopWide2addr(preinstr="", result="a0", chkzero="0", instr=""):
-    /*
-     * Generic 64-bit "/2addr" binary operation.  Provide an "instr" line
-     * that specifies an instruction that performs "result = a0 op a1".
-     * This could be a MIPS instruction or a function call.  (If the result
-     * comes back in a register other than a0, you can override "result".)
-     *
-     * If "chkzero" is set to 1, we perform a divide-by-zero check on
-     * vB (a1).  Useful for integer division and modulus.  Note that we
-     * *don't* check for (LONG_MIN / -1) here, because the CPU handles it
-     * correctly.
-     *
-     * For: add-long/2addr, sub-long/2addr, mul-long/2addr, div-long/2addr,
-     *      rem-long/2addr, and-long/2addr, or-long/2addr, xor-long/2addr,
-     *      shl-long/2addr, shr-long/2addr, ushr-long/2addr
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_WIDE a0, a2                # a0 <- vA
-    GET_VREG_WIDE a1, a3                # a1 <- vB
-    .if $chkzero
-    beqz    a1, common_errDivideByZero  # is second operand zero?
-    .endif
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    $preinstr                           # optional op
-    $instr                              # $result <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE $result, a2           # vA <- $result
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def unop(preinstr="", instr=""):
-    /*
-     * Generic 32-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "a0 = op a0".
-     *
-     * for: int-to-byte, int-to-char, int-to-short,
-     *      not-int, neg-int
-     */
-    /* unop vA, vB */
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a3                     # a0 <- vB
-    ext     a2, rINST, 8, 4             # a2 <- A
-    $preinstr                           # optional op
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    $instr                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                     # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def unopWide(preinstr="", instr=""):
-    /*
-     * Generic 64-bit unary operation.  Provide an "instr" line that
-     * specifies an instruction that performs "a0 = op a0".
-     *
-     * For: not-long, neg-long
-     */
-    /* unop vA, vB */
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_WIDE a0, a3                # a0 <- vB
-    ext     a2, rINST, 8, 4             # a2 <- A
-    $preinstr                           # optional op
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    $instr                              # a0 <- op, a0-a3 changed
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_add_int():
-%  binop(instr="addu a0, a0, a1")
-
-%def op_add_int_2addr():
-%  binop2addr(instr="addu a0, a0, a1")
-
-%def op_add_int_lit16():
-%  binopLit16(instr="addu a0, a0, a1")
-
-%def op_add_int_lit8():
-%  binopLit8(instr="addu a0, a0, a1")
-
-%def op_add_long():
-%  binopWide(instr="daddu a0, a0, a1")
-
-%def op_add_long_2addr():
-%  binopWide2addr(instr="daddu a0, a0, a1")
-
-%def op_and_int():
-%  binop(instr="and a0, a0, a1")
-
-%def op_and_int_2addr():
-%  binop2addr(instr="and a0, a0, a1")
-
-%def op_and_int_lit16():
-%  binopLit16(instr="and a0, a0, a1")
-
-%def op_and_int_lit8():
-%  binopLit8(instr="and a0, a0, a1")
-
-%def op_and_long():
-%  binopWide(instr="and a0, a0, a1")
-
-%def op_and_long_2addr():
-%  binopWide2addr(instr="and a0, a0, a1")
-
-%def op_cmp_long():
-    /* cmp-long vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_WIDE a0, a2                # a0 <- vBB
-    GET_VREG_WIDE a1, a3                # a1 <- vCC
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    slt     a2, a0, a1
-    slt     a0, a1, a0
-    subu    a0, a0, a2
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a4                     # vAA <- result
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_div_int():
-%  binop(instr="div a0, a0, a1", chkzero="1")
-
-%def op_div_int_2addr():
-%  binop2addr(instr="div a0, a0, a1", chkzero="1")
-
-%def op_div_int_lit16():
-%  binopLit16(instr="div a0, a0, a1", chkzero="1")
-
-%def op_div_int_lit8():
-%  binopLit8(instr="div a0, a0, a1", chkzero="1")
-
-%def op_div_long():
-%  binopWide(instr="ddiv a0, a0, a1", chkzero="1")
-
-%def op_div_long_2addr():
-%  binopWide2addr(instr="ddiv a0, a0, a1", chkzero="1")
-
-%def op_int_to_byte():
-%  unop(instr="seb     a0, a0")
-
-%def op_int_to_char():
-%  unop(instr="and     a0, a0, 0xffff")
-
-%def op_int_to_long():
-    /* int-to-long vA, vB */
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG a0, a3                     # a0 <- vB (sign-extended to 64 bits)
-    ext     a2, rINST, 8, 4             # a2 <- A
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vA <- vB
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_int_to_short():
-%  unop(instr="seh     a0, a0")
-
-%def op_long_to_int():
-/* we ignore the high word, making this equivalent to a 32-bit reg move */
-%  op_move()
-
-%def op_mul_int():
-%  binop(instr="mul a0, a0, a1")
-
-%def op_mul_int_2addr():
-%  binop2addr(instr="mul a0, a0, a1")
-
-%def op_mul_int_lit16():
-%  binopLit16(instr="mul a0, a0, a1")
-
-%def op_mul_int_lit8():
-%  binopLit8(instr="mul a0, a0, a1")
-
-%def op_mul_long():
-%  binopWide(instr="dmul a0, a0, a1")
-
-%def op_mul_long_2addr():
-%  binopWide2addr(instr="dmul a0, a0, a1")
-
-%def op_neg_int():
-%  unop(instr="subu    a0, zero, a0")
-
-%def op_neg_long():
-%  unopWide(instr="dsubu   a0, zero, a0")
-
-%def op_not_int():
-%  unop(instr="nor     a0, zero, a0")
-
-%def op_not_long():
-%  unopWide(instr="nor     a0, zero, a0")
-
-%def op_or_int():
-%  binop(instr="or a0, a0, a1")
-
-%def op_or_int_2addr():
-%  binop2addr(instr="or a0, a0, a1")
-
-%def op_or_int_lit16():
-%  binopLit16(instr="or a0, a0, a1")
-
-%def op_or_int_lit8():
-%  binopLit8(instr="or a0, a0, a1")
-
-%def op_or_long():
-%  binopWide(instr="or a0, a0, a1")
-
-%def op_or_long_2addr():
-%  binopWide2addr(instr="or a0, a0, a1")
-
-%def op_rem_int():
-%  binop(instr="mod a0, a0, a1", chkzero="1")
-
-%def op_rem_int_2addr():
-%  binop2addr(instr="mod a0, a0, a1", chkzero="1")
-
-%def op_rem_int_lit16():
-%  binopLit16(instr="mod a0, a0, a1", chkzero="1")
-
-%def op_rem_int_lit8():
-%  binopLit8(instr="mod a0, a0, a1", chkzero="1")
-
-%def op_rem_long():
-%  binopWide(instr="dmod a0, a0, a1", chkzero="1")
-
-%def op_rem_long_2addr():
-%  binopWide2addr(instr="dmod a0, a0, a1", chkzero="1")
-
-%def op_rsub_int():
-%  binopLit16(instr="subu a0, a1, a0")
-
-%def op_rsub_int_lit8():
-%  binopLit8(instr="subu a0, a1, a0")
-
-%def op_shl_int():
-%  binop(instr="sll a0, a0, a1")
-
-%def op_shl_int_2addr():
-%  binop2addr(instr="sll a0, a0, a1")
-
-%def op_shl_int_lit8():
-%  binopLit8(instr="sll a0, a0, a1")
-
-%def op_shl_long():
-%  binopWide(instr="dsll a0, a0, a1")
-
-%def op_shl_long_2addr():
-%  binopWide2addr(instr="dsll a0, a0, a1")
-
-%def op_shr_int():
-%  binop(instr="sra a0, a0, a1")
-
-%def op_shr_int_2addr():
-%  binop2addr(instr="sra a0, a0, a1")
-
-%def op_shr_int_lit8():
-%  binopLit8(instr="sra a0, a0, a1")
-
-%def op_shr_long():
-%  binopWide(instr="dsra a0, a0, a1")
-
-%def op_shr_long_2addr():
-%  binopWide2addr(instr="dsra a0, a0, a1")
-
-%def op_sub_int():
-%  binop(instr="subu a0, a0, a1")
-
-%def op_sub_int_2addr():
-%  binop2addr(instr="subu a0, a0, a1")
-
-%def op_sub_long():
-%  binopWide(instr="dsubu a0, a0, a1")
-
-%def op_sub_long_2addr():
-%  binopWide2addr(instr="dsubu a0, a0, a1")
-
-%def op_ushr_int():
-%  binop(instr="srl a0, a0, a1")
-
-%def op_ushr_int_2addr():
-%  binop2addr(instr="srl a0, a0, a1")
-
-%def op_ushr_int_lit8():
-%  binopLit8(instr="srl a0, a0, a1")
-
-%def op_ushr_long():
-%  binopWide(instr="dsrl a0, a0, a1")
-
-%def op_ushr_long_2addr():
-%  binopWide2addr(instr="dsrl a0, a0, a1")
-
-%def op_xor_int():
-%  binop(instr="xor a0, a0, a1")
-
-%def op_xor_int_2addr():
-%  binop2addr(instr="xor a0, a0, a1")
-
-%def op_xor_int_lit16():
-%  binopLit16(instr="xor a0, a0, a1")
-
-%def op_xor_int_lit8():
-%  binopLit8(instr="xor a0, a0, a1")
-
-%def op_xor_long():
-%  binopWide(instr="xor a0, a0, a1")
-
-%def op_xor_long_2addr():
-%  binopWide2addr(instr="xor a0, a0, a1")
diff --git a/runtime/interpreter/mterp/mips64/array.S b/runtime/interpreter/mterp/mips64/array.S
deleted file mode 100644
index 9d97f0a..0000000
--- a/runtime/interpreter/mterp/mips64/array.S
+++ /dev/null
@@ -1,241 +0,0 @@
-%def op_aget(load="lw", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
-    /*
-     * Array get, 32 bits or less.  vAA <- vBB[vCC].
-     *
-     * for: aget, aget-boolean, aget-byte, aget-char, aget-short
-     *
-     * NOTE: assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    beqz    a0, common_errNullObject    # bail if null array object
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
-    .if $shift
-    # [d]lsa does not support shift count of 0.
-    dlsa    a0, a1, a0, $shift          # a0 <- arrayObj + index*width
-    .else
-    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
-    .endif
-    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    $load   a2, $data_offset(a0)        # a2 <- vBB[vCC]
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a2, a4                     # vAA <- a2
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_aget_boolean():
-%  op_aget(load="lbu", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
-
-%def op_aget_byte():
-%  op_aget(load="lb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
-
-%def op_aget_char():
-%  op_aget(load="lhu", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
-
-%def op_aget_object():
-    /*
-     * Array object get.  vAA <- vBB[vCC].
-     *
-     * for: aget-object
-     */
-    /* op vAA, vBB, vCC */
-    .extern artAGetObjectFromMterp
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    EXPORT_PC
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    jal     artAGetObjectFromMterp      # (array, index)
-    ld      a1, THREAD_EXCEPTION_OFFSET(rSELF)
-    srl     a4, rINST, 8                # a4 <- AA
-    PREFETCH_INST 2
-    bnez    a1, MterpException
-    SET_VREG_OBJECT v0, a4              # vAA <- v0
-    ADVANCE 2
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_aget_short():
-%  op_aget(load="lh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
-
-%def op_aget_wide():
-    /*
-     * Array get, 64 bits.  vAA <- vBB[vCC].
-     *
-     */
-    /* aget-wide vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    beqz    a0, common_errNullObject    # bail if null array object
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
-    dlsa    a0, a1, a0, 3               # a0 <- arrayObj + index*width
-    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    lw      a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
-    lw      a3, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0)
-    dinsu   a2, a3, 32, 32              # a2 <- vBB[vCC]
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a2, a4                # vAA <- a2
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_aput(store="sw", shift="2", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET"):
-    /*
-     * Array put, 32 bits or less.  vBB[vCC] <- vAA.
-     *
-     * for: aput, aput-boolean, aput-byte, aput-char, aput-short
-     *
-     * NOTE: this assumes data offset for arrays is the same for all non-wide types.
-     * If this changes, specialize.
-     */
-    /* op vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    beqz    a0, common_errNullObject    # bail if null array object
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
-    .if $shift
-    # [d]lsa does not support shift count of 0.
-    dlsa    a0, a1, a0, $shift          # a0 <- arrayObj + index*width
-    .else
-    daddu   a0, a1, a0                  # a0 <- arrayObj + index*width
-    .endif
-    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_VREG a2, a4                     # a2 <- vAA
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    $store  a2, $data_offset(a0)        # vBB[vCC] <- a2
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_aput_boolean():
-%  op_aput(store="sb", shift="0", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET")
-
-%def op_aput_byte():
-%  op_aput(store="sb", shift="0", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET")
-
-%def op_aput_char():
-%  op_aput(store="sh", shift="1", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET")
-
-%def op_aput_object():
-    /*
-     * Store an object into an array.  vBB[vCC] <- vAA.
-     */
-    /* op vAA, vBB, vCC */
-    .extern MterpAputObject
-    EXPORT_PC
-    daddu   a0, rFP, OFF_FP_SHADOWFRAME
-    move    a1, rPC
-    move    a2, rINST
-    jal     MterpAputObject
-    beqzc   v0, MterpPossibleException
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_aput_short():
-%  op_aput(store="sh", shift="1", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET")
-
-%def op_aput_wide():
-    /*
-     * Array put, 64 bits.  vBB[vCC] <- vAA.
-     *
-     */
-    /* aput-wide vAA, vBB, vCC */
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    srl     a4, rINST, 8                # a4 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vBB (array object)
-    GET_VREG a1, a3                     # a1 <- vCC (requested index)
-    beqz    a0, common_errNullObject    # bail if null array object
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- arrayObj->length
-    dlsa    a0, a1, a0, 3               # a0 <- arrayObj + index*width
-    bgeu    a1, a3, common_errArrayIndex  # unsigned compare: index >= length, bail
-    GET_VREG_WIDE a2, a4                # a2 <- vAA
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    sw      a2, MIRROR_WIDE_ARRAY_DATA_OFFSET(a0)
-    dsrl32  a2, a2, 0
-    sw      a2, (MIRROR_WIDE_ARRAY_DATA_OFFSET+4)(a0)  # vBB[vCC] <- a2
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_array_length():
-    /*
-     * Return the length of an array.
-     */
-    srl     a1, rINST, 12               # a1 <- B
-    GET_VREG_U a0, a1                   # a0 <- vB (object ref)
-    ext     a2, rINST, 8, 4             # a2 <- A
-    beqz    a0, common_errNullObject    # yup, fail
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    lw      a3, MIRROR_ARRAY_LENGTH_OFFSET(a0)  # a3 <- array length
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a3, a2                     # vB <- length
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_fill_array_data():
-    /* fill-array-data vAA, +BBBBBBBB */
-    .extern MterpFillArrayData
-    EXPORT_PC
-    lh      a1, 2(rPC)                  # a1 <- bbbb (lo)
-    lh      a0, 4(rPC)                  # a0 <- BBBB (hi)
-    srl     a3, rINST, 8                # a3 <- AA
-    ins     a1, a0, 16, 16              # a1 <- BBBBbbbb
-    GET_VREG_U a0, a3                   # a0 <- vAA (array object)
-    dlsa    a1, a1, rPC, 1              # a1 <- PC + BBBBbbbb*2 (array data off.)
-    jal     MterpFillArrayData          # (obj, payload)
-    beqzc   v0, MterpPossibleException  # exception?
-    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_filled_new_array(helper="MterpFilledNewArray"):
-    /*
-     * Create a new array with elements filled from registers.
-     *
-     * for: filled-new-array, filled-new-array/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class//CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, type//BBBB */
-    .extern $helper
-    EXPORT_PC
-    daddu   a0, rFP, OFF_FP_SHADOWFRAME
-    move    a1, rPC
-    move    a2, rSELF
-    jal     $helper
-    beqzc   v0, MterpPossibleException
-    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_filled_new_array_range():
-%  op_filled_new_array(helper="MterpFilledNewArrayRange")
-
-%def op_new_array():
-    /*
-     * Allocate an array of objects, specified with the array class
-     * and a count.
-     *
-     * The verifier guarantees that this is an array class, so we don't
-     * check for it here.
-     */
-    /* new-array vA, vB, class//CCCC */
-    .extern MterpNewArray
-    EXPORT_PC
-    daddu   a0, rFP, OFF_FP_SHADOWFRAME
-    move    a1, rPC
-    move    a2, rINST
-    move    a3, rSELF
-    jal     MterpNewArray
-    beqzc   v0, MterpPossibleException
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
diff --git a/runtime/interpreter/mterp/mips64/control_flow.S b/runtime/interpreter/mterp/mips64/control_flow.S
deleted file mode 100644
index 457b938..0000000
--- a/runtime/interpreter/mterp/mips64/control_flow.S
+++ /dev/null
@@ -1,217 +0,0 @@
-%def bincmp(condition=""):
-    /*
-     * Generic two-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform, e.g. for
-     * "if-le" you would use "le".
-     *
-     * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
-     */
-    /* if-cmp vA, vB, +CCCC */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    lh      rINST, 2(rPC)               # rINST <- offset (sign-extended CCCC)
-    GET_VREG a0, a2                     # a0 <- vA
-    GET_VREG a1, a3                     # a1 <- vB
-    b${condition}c a0, a1, MterpCommonTakenBranchNoFlags
-    li      v0, JIT_CHECK_OSR           # possible OSR re-entry?
-    beqc    rPROFILE, v0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def zcmp(condition=""):
-    /*
-     * Generic one-operand compare-and-branch operation.  Provide a "condition"
-     * fragment that specifies the comparison to perform, e.g. for
-     * "if-lez" you would use "le".
-     *
-     * For: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
-     */
-    /* if-cmp vAA, +BBBB */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      rINST, 2(rPC)               # rINST <- offset (sign-extended BBBB)
-    GET_VREG a0, a2                     # a0 <- vAA
-    b${condition}zc a0, MterpCommonTakenBranchNoFlags
-    li      v0, JIT_CHECK_OSR           # possible OSR re-entry?
-    beqc    rPROFILE, v0, .L_check_not_taken_osr
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_goto():
-    /*
-     * Unconditional branch, 8-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     */
-    /* goto +AA */
-    srl     rINST, rINST, 8
-    seb     rINST, rINST                # rINST <- offset (sign-extended AA)
-    b       MterpCommonTakenBranchNoFlags
-
-%def op_goto_16():
-    /*
-     * Unconditional branch, 16-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     */
-    /* goto/16 +AAAA */
-    lh      rINST, 2(rPC)               # rINST <- offset (sign-extended AAAA)
-    b       MterpCommonTakenBranchNoFlags
-
-%def op_goto_32():
-    /*
-     * Unconditional branch, 32-bit offset.
-     *
-     * The branch distance is a signed code-unit offset, which we need to
-     * double to get a byte offset.
-     *
-     * Unlike most opcodes, this one is allowed to branch to itself, so
-     * our "backward branch" test must be "<=0" instead of "<0".
-     */
-    /* goto/32 +AAAAAAAA */
-    lh      rINST, 2(rPC)               # rINST <- aaaa (low)
-    lh      a1, 4(rPC)                  # a1 <- AAAA (high)
-    ins     rINST, a1, 16, 16           # rINST <- offset (sign-extended AAAAaaaa)
-    b       MterpCommonTakenBranchNoFlags
-
-%def op_if_eq():
-%  bincmp(condition="eq")
-
-%def op_if_eqz():
-%  zcmp(condition="eq")
-
-%def op_if_ge():
-%  bincmp(condition="ge")
-
-%def op_if_gez():
-%  zcmp(condition="ge")
-
-%def op_if_gt():
-%  bincmp(condition="gt")
-
-%def op_if_gtz():
-%  zcmp(condition="gt")
-
-%def op_if_le():
-%  bincmp(condition="le")
-
-%def op_if_lez():
-%  zcmp(condition="le")
-
-%def op_if_lt():
-%  bincmp(condition="lt")
-
-%def op_if_ltz():
-%  zcmp(condition="lt")
-
-%def op_if_ne():
-%  bincmp(condition="ne")
-
-%def op_if_nez():
-%  zcmp(condition="ne")
-
-%def op_packed_switch(func="MterpDoPackedSwitch"):
-    /*
-     * Handle a packed-switch or sparse-switch instruction.  In both cases
-     * we decode it and hand it off to a helper function.
-     *
-     * We don't really expect backward branches in a switch statement, but
-     * they're perfectly legal, so we check for them here.
-     *
-     * for: packed-switch, sparse-switch
-     */
-    /* op vAA, +BBBBBBBB */
-    .extern $func
-    lh      a0, 2(rPC)                  # a0 <- bbbb (lo)
-    lh      a1, 4(rPC)                  # a1 <- BBBB (hi)
-    srl     a3, rINST, 8                # a3 <- AA
-    ins     a0, a1, 16, 16              # a0 <- BBBBbbbb
-    GET_VREG a1, a3                     # a1 <- vAA
-    dlsa    a0, a0, rPC, 1              # a0 <- PC + BBBBbbbb*2
-    jal     $func                       # v0 <- code-unit branch offset
-    move    rINST, v0
-    b       MterpCommonTakenBranchNoFlags
-
-%def op_return(instr="GET_VREG"):
-    /*
-     * Return a 32-bit value.
-     *
-     * for: return (sign-extend), return-object (zero-extend)
-     */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    .extern MterpSuspendCheck
-    jal     MterpThreadFenceForConstructor
-    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
-    move    a0, rSELF
-    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqzc   ra, 1f
-    jal     MterpSuspendCheck           # (self)
-1:
-    srl     a2, rINST, 8                # a2 <- AA
-    $instr  a0, a2                      # a0 <- vAA
-    b       MterpReturn
-
-%def op_return_object():
-%  op_return(instr="GET_VREG_U")
-
-%def op_return_void():
-    .extern MterpThreadFenceForConstructor
-    .extern MterpSuspendCheck
-    jal     MterpThreadFenceForConstructor
-    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
-    move    a0, rSELF
-    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqzc   ra, 1f
-    jal     MterpSuspendCheck           # (self)
-1:
-    li      a0, 0
-    b       MterpReturn
-
-%def op_return_void_no_barrier():
-    .extern MterpSuspendCheck
-    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
-    move    a0, rSELF
-    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqzc   ra, 1f
-    jal     MterpSuspendCheck           # (self)
-1:
-    li      a0, 0
-    b       MterpReturn
-
-%def op_return_wide():
-    /*
-     * Return a 64-bit value.
-     */
-    /* return-wide vAA */
-    /* op vAA */
-    .extern MterpThreadFenceForConstructor
-    .extern MterpSuspendCheck
-    jal     MterpThreadFenceForConstructor
-    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
-    move    a0, rSELF
-    and     ra, ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    beqzc   ra, 1f
-    jal     MterpSuspendCheck           # (self)
-1:
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG_WIDE a0, a2                # a0 <- vAA
-    b       MterpReturn
-
-%def op_sparse_switch():
-%  op_packed_switch(func="MterpDoSparseSwitch")
-
-%def op_throw():
-    /*
-     * Throw an exception object in the current thread.
-     */
-    /* throw vAA */
-    EXPORT_PC
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vAA (exception object)
-    beqzc   a0, common_errNullObject
-    sd      a0, THREAD_EXCEPTION_OFFSET(rSELF)  # thread->exception <- obj
-    b       MterpException
diff --git a/runtime/interpreter/mterp/mips64/floating_point.S b/runtime/interpreter/mterp/mips64/floating_point.S
deleted file mode 100644
index 1132a09..0000000
--- a/runtime/interpreter/mterp/mips64/floating_point.S
+++ /dev/null
@@ -1,382 +0,0 @@
-%def fbinop(instr=""):
-    /*:
-     * Generic 32-bit floating-point operation.
-     *
-     * For: add-float, sub-float, mul-float, div-float.
-     * form: <op> f0, f0, f1
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_FLOAT f0, a2               # f0 <- vBB
-    GET_VREG_FLOAT f1, a3               # f1 <- vCC
-    $instr                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a4               # vAA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def fbinop2addr(instr=""):
-    /*:
-     * Generic 32-bit "/2addr" floating-point operation.
-     *
-     * For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr.
-     * form: <op> f0, f0, f1
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_FLOAT f0, a2               # f0 <- vA
-    GET_VREG_FLOAT f1, a3               # f1 <- vB
-    $instr                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a2               # vA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def fbinopWide(instr=""):
-    /*:
-     * Generic 64-bit floating-point operation.
-     *
-     * For: add-double, sub-double, mul-double, div-double.
-     * form: <op> f0, f0, f1
-     */
-    /* binop vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_DOUBLE f0, a2              # f0 <- vBB
-    GET_VREG_DOUBLE f1, a3              # f1 <- vCC
-    $instr                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a4              # vAA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def fbinopWide2addr(instr=""):
-    /*:
-     * Generic 64-bit "/2addr" floating-point operation.
-     *
-     * For: add-double/2addr, sub-double/2addr, mul-double/2addr, div-double/2addr.
-     * form: <op> f0, f0, f1
-     */
-    /* binop/2addr vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_DOUBLE f0, a2              # f0 <- vA
-    GET_VREG_DOUBLE f1, a3              # f1 <- vB
-    $instr                              # f0 <- f0 op f1
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a2              # vA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def fcmp(gt_bias=""):
-    /*
-     * Compare two floating-point values.  Puts 0, 1, or -1 into the
-     * destination register based on the results of the comparison.
-     *
-     * For: cmpl-float, cmpg-float
-     */
-    /* op vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_FLOAT f0, a2               # f0 <- vBB
-    GET_VREG_FLOAT f1, a3               # f1 <- vCC
-    cmp.eq.s f2, f0, f1
-    li      a0, 0
-    bc1nez  f2, 1f                      # done if vBB == vCC (ordered)
-    .if $gt_bias
-    cmp.lt.s f2, f0, f1
-    li      a0, -1
-    bc1nez  f2, 1f                      # done if vBB < vCC (ordered)
-    li      a0, 1                       # vBB > vCC or unordered
-    .else
-    cmp.lt.s f2, f1, f0
-    li      a0, 1
-    bc1nez  f2, 1f                      # done if vBB > vCC (ordered)
-    li      a0, -1                      # vBB < vCC or unordered
-    .endif
-1:
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a4                     # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def fcmpWide(gt_bias=""):
-    /*
-     * Compare two floating-point values.  Puts 0, 1, or -1 into the
-     * destination register based on the results of the comparison.
-     *
-     * For: cmpl-double, cmpg-double
-     */
-    /* op vAA, vBB, vCC */
-    srl     a4, rINST, 8                # a4 <- AA
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_DOUBLE f0, a2              # f0 <- vBB
-    GET_VREG_DOUBLE f1, a3              # f1 <- vCC
-    cmp.eq.d f2, f0, f1
-    li      a0, 0
-    bc1nez  f2, 1f                      # done if vBB == vCC (ordered)
-    .if $gt_bias
-    cmp.lt.d f2, f0, f1
-    li      a0, -1
-    bc1nez  f2, 1f                      # done if vBB < vCC (ordered)
-    li      a0, 1                       # vBB > vCC or unordered
-    .else
-    cmp.lt.d f2, f1, f0
-    li      a0, 1
-    bc1nez  f2, 1f                      # done if vBB > vCC (ordered)
-    li      a0, -1                      # vBB < vCC or unordered
-    .endif
-1:
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a4                     # vAA <- a0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def fcvtFooter(suffix="", valreg=""):
-    /*
-     * Stores a specified register containing the result of conversion
-     * from or to a floating-point type and jumps to the next instruction.
-     *
-     * Expects a1 to contain the destination Dalvik register number.
-     * a1 is set up by fcvtHeader.S.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     *
-     * Note that this file can't be included after a break in other files
-     * and in those files its contents appear as a copy.
-     * See: float-to-int, float-to-long, double-to-int, double-to-long.
-     */
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG$suffix $valreg, a1
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def fcvtHeader(suffix="", valreg=""):
-    /*
-     * Loads a specified register from vB. Used primarily for conversions
-     * from or to a floating-point type.
-     *
-     * Sets up a1 = A and a2 = B. a2 is later used by fcvtFooter.S to
-     * store the result in vA and jump to the next instruction.
-     *
-     * For: int-to-float, int-to-double, long-to-float, long-to-double,
-     *      float-to-int, float-to-long, float-to-double, double-to-int,
-     *      double-to-long, double-to-float, neg-float, neg-double.
-     */
-    ext     a1, rINST, 8, 4             # a1 <- A
-    srl     a2, rINST, 12               # a2 <- B
-    GET_VREG$suffix $valreg, a2
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-
-%def op_add_double():
-%  fbinopWide(instr="add.d f0, f0, f1")
-
-%def op_add_double_2addr():
-%  fbinopWide2addr(instr="add.d f0, f0, f1")
-
-%def op_add_float():
-%  fbinop(instr="add.s f0, f0, f1")
-
-%def op_add_float_2addr():
-%  fbinop2addr(instr="add.s f0, f0, f1")
-
-%def op_cmpg_double():
-%  fcmpWide(gt_bias="1")
-
-%def op_cmpg_float():
-%  fcmp(gt_bias="1")
-
-%def op_cmpl_double():
-%  fcmpWide(gt_bias="0")
-
-%def op_cmpl_float():
-%  fcmp(gt_bias="0")
-
-%def op_div_double():
-%  fbinopWide(instr="div.d f0, f0, f1")
-
-%def op_div_double_2addr():
-%  fbinopWide2addr(instr="div.d f0, f0, f1")
-
-%def op_div_float():
-%  fbinop(instr="div.s f0, f0, f1")
-
-%def op_div_float_2addr():
-%  fbinop2addr(instr="div.s f0, f0, f1")
-
-%def op_double_to_float():
-    /*
-     * Conversion from or to floating-point happens in a floating-point register.
-     * Therefore we load the input and store the output into or from a
-     * floating-point register irrespective of the type.
-     */
-%  fcvtHeader(suffix="_DOUBLE", valreg="f0")
-    cvt.s.d f0, f0
-%  fcvtFooter(suffix="_FLOAT", valreg="f0")
-
-%def op_double_to_int():
-%  fcvtHeader(suffix="_DOUBLE", valreg="f0")
-    trunc.w.d f0, f0
-%  fcvtFooter(suffix="_FLOAT", valreg="f0")
-
-%def op_double_to_long():
-%  fcvtHeader(suffix="_DOUBLE", valreg="f0")
-    trunc.l.d f0, f0
-%  fcvtFooter(suffix="_DOUBLE", valreg="f0")
-
-%def op_float_to_double():
-    /*
-     * Conversion from or to floating-point happens in a floating-point register.
-     * Therefore we load the input and store the output into or from a
-     * floating-point register irrespective of the type.
-     */
-%  fcvtHeader(suffix="_FLOAT", valreg="f0")
-    cvt.d.s f0, f0
-%  fcvtFooter(suffix="_DOUBLE", valreg="f0")
-
-%def op_float_to_int():
-%  fcvtHeader(suffix="_FLOAT", valreg="f0")
-    trunc.w.s f0, f0
-%  fcvtFooter(suffix="_FLOAT", valreg="f0")
-
-%def op_float_to_long():
-%  fcvtHeader(suffix="_FLOAT", valreg="f0")
-    trunc.l.s f0, f0
-%  fcvtFooter(suffix="_DOUBLE", valreg="f0")
-
-%def op_int_to_double():
-    /*
-     * Conversion from or to floating-point happens in a floating-point register.
-     * Therefore we load the input and store the output into or from a
-     * floating-point register irrespective of the type.
-     */
-%  fcvtHeader(suffix="_FLOAT", valreg="f0")
-    cvt.d.w f0, f0
-%  fcvtFooter(suffix="_DOUBLE", valreg="f0")
-
-%def op_int_to_float():
-    /*
-     * Conversion from or to floating-point happens in a floating-point register.
-     * Therefore we load the input and store the output into or from a
-     * floating-point register irrespective of the type.
-     */
-%  fcvtHeader(suffix="_FLOAT", valreg="f0")
-    cvt.s.w f0, f0
-%  fcvtFooter(suffix="_FLOAT", valreg="f0")
-
-%def op_long_to_double():
-    /*
-     * Conversion from or to floating-point happens in a floating-point register.
-     * Therefore we load the input and store the output into or from a
-     * floating-point register irrespective of the type.
-     */
-%  fcvtHeader(suffix="_DOUBLE", valreg="f0")
-    cvt.d.l f0, f0
-%  fcvtFooter(suffix="_DOUBLE", valreg="f0")
-
-%def op_long_to_float():
-    /*
-     * Conversion from or to floating-point happens in a floating-point register.
-     * Therefore we load the input and store the output into or from a
-     * floating-point register irrespective of the type.
-     */
-%  fcvtHeader(suffix="_DOUBLE", valreg="f0")
-    cvt.s.l f0, f0
-%  fcvtFooter(suffix="_FLOAT", valreg="f0")
-
-%def op_mul_double():
-%  fbinopWide(instr="mul.d f0, f0, f1")
-
-%def op_mul_double_2addr():
-%  fbinopWide2addr(instr="mul.d f0, f0, f1")
-
-%def op_mul_float():
-%  fbinop(instr="mul.s f0, f0, f1")
-
-%def op_mul_float_2addr():
-%  fbinop2addr(instr="mul.s f0, f0, f1")
-
-%def op_neg_double():
-%  fcvtHeader(suffix="_DOUBLE", valreg="f0")
-    neg.d   f0, f0
-%  fcvtFooter(suffix="_DOUBLE", valreg="f0")
-
-%def op_neg_float():
-%  fcvtHeader(suffix="_FLOAT", valreg="f0")
-    neg.s   f0, f0
-%  fcvtFooter(suffix="_FLOAT", valreg="f0")
-
-%def op_rem_double():
-    /* rem-double vAA, vBB, vCC */
-    .extern fmod
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_DOUBLE f12, a2             # f12 <- vBB
-    GET_VREG_DOUBLE f13, a3             # f13 <- vCC
-    jal     fmod                        # f0 <- f12 op f13
-    srl     a4, rINST, 8                # a4 <- AA
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a4              # vAA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_rem_double_2addr():
-    /* rem-double/2addr vA, vB */
-    .extern fmod
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_DOUBLE f12, a2             # f12 <- vA
-    GET_VREG_DOUBLE f13, a3             # f13 <- vB
-    jal     fmod                        # f0 <- f12 op f13
-    ext     a2, rINST, 8, 4             # a2 <- A
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_DOUBLE f0, a2              # vA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_rem_float():
-    /* rem-float vAA, vBB, vCC */
-    .extern fmodf
-    lbu     a2, 2(rPC)                  # a2 <- BB
-    lbu     a3, 3(rPC)                  # a3 <- CC
-    GET_VREG_FLOAT f12, a2              # f12 <- vBB
-    GET_VREG_FLOAT f13, a3              # f13 <- vCC
-    jal     fmodf                       # f0 <- f12 op f13
-    srl     a4, rINST, 8                # a4 <- AA
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a4               # vAA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_rem_float_2addr():
-    /* rem-float/2addr vA, vB */
-    .extern fmodf
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    GET_VREG_FLOAT f12, a2              # f12 <- vA
-    GET_VREG_FLOAT f13, a3              # f13 <- vB
-    jal     fmodf                       # f0 <- f12 op f13
-    ext     a2, rINST, 8, 4             # a2 <- A
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_FLOAT f0, a2               # vA <- f0
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_sub_double():
-%  fbinopWide(instr="sub.d f0, f0, f1")
-
-%def op_sub_double_2addr():
-%  fbinopWide2addr(instr="sub.d f0, f0, f1")
-
-%def op_sub_float():
-%  fbinop(instr="sub.s f0, f0, f1")
-
-%def op_sub_float_2addr():
-%  fbinop2addr(instr="sub.s f0, f0, f1")
diff --git a/runtime/interpreter/mterp/mips64/invoke.S b/runtime/interpreter/mterp/mips64/invoke.S
deleted file mode 100644
index c2967cf..0000000
--- a/runtime/interpreter/mterp/mips64/invoke.S
+++ /dev/null
@@ -1,110 +0,0 @@
-%def invoke(helper="UndefinedInvokeHandler"):
-    /*
-     * Generic invoke handler wrapper.
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-    .extern $helper
-    .extern MterpShouldSwitchInterpreters
-    EXPORT_PC
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    jal     $helper
-    beqzc   v0, MterpException
-    FETCH_ADVANCE_INST 3
-    jal     MterpShouldSwitchInterpreters
-    bnezc   v0, MterpFallback
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-
-%def invoke_polymorphic(helper="UndefinedInvokeHandler"):
-    /*
-     * invoke-polymorphic handler wrapper.
-     */
-    /* op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB, proto@HHHH */
-    .extern $helper
-    .extern MterpShouldSwitchInterpreters
-    EXPORT_PC
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    move    a3, rINST
-    jal     $helper
-    beqzc   v0, MterpException
-    FETCH_ADVANCE_INST 4
-    jal     MterpShouldSwitchInterpreters
-    bnezc   v0, MterpFallback
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-
-%def op_invoke_custom():
-%  invoke(helper="MterpInvokeCustom")
-
-%def op_invoke_custom_range():
-%  invoke(helper="MterpInvokeCustomRange")
-
-%def op_invoke_direct():
-%  invoke(helper="MterpInvokeDirect")
-
-%def op_invoke_direct_range():
-%  invoke(helper="MterpInvokeDirectRange")
-
-%def op_invoke_interface():
-%  invoke(helper="MterpInvokeInterface")
-    /*
-     * Handle an interface method call.
-     *
-     * for: invoke-interface, invoke-interface/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-%def op_invoke_interface_range():
-%  invoke(helper="MterpInvokeInterfaceRange")
-
-%def op_invoke_polymorphic():
-%  invoke_polymorphic(helper="MterpInvokePolymorphic")
-
-%def op_invoke_polymorphic_range():
-%  invoke_polymorphic(helper="MterpInvokePolymorphicRange")
-
-%def op_invoke_static():
-%  invoke(helper="MterpInvokeStatic")
-
-%def op_invoke_static_range():
-%  invoke(helper="MterpInvokeStaticRange")
-
-%def op_invoke_super():
-%  invoke(helper="MterpInvokeSuper")
-    /*
-     * Handle a "super" method call.
-     *
-     * for: invoke-super, invoke-super/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-%def op_invoke_super_range():
-%  invoke(helper="MterpInvokeSuperRange")
-
-%def op_invoke_virtual():
-%  invoke(helper="MterpInvokeVirtual")
-    /*
-     * Handle a virtual method call.
-     *
-     * for: invoke-virtual, invoke-virtual/range
-     */
-    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
-    /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
-
-%def op_invoke_virtual_quick():
-%  invoke(helper="MterpInvokeVirtualQuick")
-
-%def op_invoke_virtual_range():
-%  invoke(helper="MterpInvokeVirtualRange")
-
-%def op_invoke_virtual_range_quick():
-%  invoke(helper="MterpInvokeVirtualQuickRange")
diff --git a/runtime/interpreter/mterp/mips64/main.S b/runtime/interpreter/mterp/mips64/main.S
deleted file mode 100644
index ac3a4a3..0000000
--- a/runtime/interpreter/mterp/mips64/main.S
+++ /dev/null
@@ -1,745 +0,0 @@
-%def header():
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define zero $$0  /* always zero */
-#define AT   $$at /* assembler temp */
-#define v0   $$2  /* return value */
-#define v1   $$3
-#define a0   $$4  /* argument registers */
-#define a1   $$5
-#define a2   $$6
-#define a3   $$7
-#define a4   $$8  /* expanded register arguments */
-#define a5   $$9
-#define a6   $$10
-#define a7   $$11
-#define ta0  $$8  /* alias */
-#define ta1  $$9
-#define ta2  $$10
-#define ta3  $$11
-#define t0   $$12 /* temp registers (not saved across subroutine calls) */
-#define t1   $$13
-#define t2   $$14
-#define t3   $$15
-
-#define s0   $$16 /* saved across subroutine calls (callee saved) */
-#define s1   $$17
-#define s2   $$18
-#define s3   $$19
-#define s4   $$20
-#define s5   $$21
-#define s6   $$22
-#define s7   $$23
-#define t8   $$24 /* two more temp registers */
-#define t9   $$25
-#define k0   $$26 /* kernel temporary */
-#define k1   $$27
-#define gp   $$28 /* global pointer */
-#define sp   $$29 /* stack pointer */
-#define s8   $$30 /* one more callee saved */
-#define ra   $$31 /* return address */
-
-#define f0   $$f0
-#define f1   $$f1
-#define f2   $$f2
-#define f3   $$f3
-#define f12  $$f12
-#define f13  $$f13
-
-/*
- * It looks like the GNU assembler currently does not support the blec and bgtc
- * idioms, which should translate into bgec and bltc respectively with swapped
- * left and right register operands.
- * TODO: remove these macros when the assembler is fixed.
- */
-.macro blec lreg, rreg, target
-    bgec    \rreg, \lreg, \target
-.endm
-.macro bgtc lreg, rreg, target
-    bltc    \rreg, \lreg, \target
-.endm
-
-/*
-Mterp and MIPS64 notes:
-
-The following registers have fixed assignments:
-
-  reg nick      purpose
-  s0  rPC       interpreted program counter, used for fetching instructions
-  s1  rFP       interpreted frame pointer, used for accessing locals and args
-  s2  rSELF     self (Thread) pointer
-  s3  rINST     first 16-bit code unit of current instruction
-  s4  rIBASE    interpreted instruction base pointer, used for computed goto
-  s5  rREFS     base of object references in shadow frame  (ideally, we'll get rid of this later).
-  s6  rPROFILE  jit profile hotness countdown
-*/
-
-/* During bringup, we'll use the shadow frame model instead of rFP */
-/* single-purpose registers, given names for clarity */
-#define rPC      s0
-#define CFI_DEX  16  // DWARF register number of the register holding dex-pc (s0).
-#define CFI_TMP  4   // DWARF register number of the first argument register (a0).
-#define rFP      s1
-#define rSELF    s2
-#define rINST    s3
-#define rIBASE   s4
-#define rREFS    s5
-#define rPROFILE s6
-
-/*
- * This is a #include, not a %include, because we want the C pre-processor
- * to expand the macros into assembler assignment statements.
- */
-#include "asm_support.h"
-#include "interpreter/cfi_asm_support.h"
-
-/*
- * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
- * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
- */
-#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
-#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
-#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
-#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
-#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
-#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
-#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
-#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
-#define OFF_FP_SHADOWFRAME OFF_FP(0)
-
-#define MTERP_PROFILE_BRANCHES 1
-#define MTERP_LOGGING 0
-
-/*
- * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
- * be done *before* something throws.
- *
- * It's okay to do this more than once.
- *
- * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
- * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
- * offset into the code_items_[] array.  For effiency, we will "export" the
- * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
- * to convert to a dex pc when needed.
- */
-.macro EXPORT_PC
-    sd      rPC, OFF_FP_DEX_PC_PTR(rFP)
-.endm
-
-/*
- * Refresh handler table.
- */
-.macro REFRESH_IBASE
-    ld      rIBASE, THREAD_CURRENT_IBASE_OFFSET(rSELF)
-.endm
-
-/*
- * Fetch the next instruction from rPC into rINST.  Does not advance rPC.
- */
-.macro FETCH_INST
-    lhu     rINST, 0(rPC)
-.endm
-
-/* Advance rPC by some number of code units. */
-.macro ADVANCE count
-    daddu   rPC, rPC, (\count) * 2
-.endm
-
-/*
- * Fetch the next instruction from an offset specified by _reg and advance xPC.
- * xPC to point to the next instruction.  "_reg" must specify the distance
- * in bytes, *not* 16-bit code units, and may be a signed value.  Must not set flags.
- *
- */
-.macro FETCH_ADVANCE_INST_RB reg
-    daddu   rPC, rPC, \reg
-    FETCH_INST
-.endm
-
-/*
- * Fetch the next instruction from the specified offset.  Advances rPC
- * to point to the next instruction.
- *
- * This must come AFTER anything that can throw an exception, or the
- * exception catch may miss.  (This also implies that it must come after
- * EXPORT_PC.)
- */
-.macro FETCH_ADVANCE_INST count
-    ADVANCE \count
-    FETCH_INST
-.endm
-
-/*
- * Similar to FETCH_ADVANCE_INST, but does not update rPC.  Used to load
- * rINST ahead of possible exception point.  Be sure to manually advance rPC
- * later.
- */
-.macro PREFETCH_INST count
-    lhu     rINST, ((\count) * 2)(rPC)
-.endm
-
-/*
- * Put the instruction's opcode field into the specified register.
- */
-.macro GET_INST_OPCODE reg
-    and     \reg, rINST, 255
-.endm
-
-/*
- * Begin executing the opcode in _reg.
- */
-.macro GOTO_OPCODE reg
-    .set noat
-    sll     AT, \reg, 7
-    daddu   AT, rIBASE, AT
-    jic     AT, 0
-    .set at
-.endm
-
-/*
- * Get/set the 32-bit value from a Dalvik register.
- * Note, GET_VREG does sign extension to 64 bits while
- * GET_VREG_U does zero extension to 64 bits.
- * One is useful for arithmetic while the other is
- * useful for storing the result value as 64-bit.
- */
-.macro GET_VREG reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    lw      \reg, 0(AT)
-    .set at
-.endm
-.macro GET_VREG_U reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    lwu     \reg, 0(AT)
-    .set at
-.endm
-.macro GET_VREG_FLOAT reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    lwc1    \reg, 0(AT)
-    .set at
-.endm
-.macro SET_VREG reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    sw      \reg, 0(AT)
-    dlsa    AT, \vreg, rREFS, 2
-    sw      zero, 0(AT)
-    .set at
-.endm
-.macro SET_VREG_OBJECT reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    sw      \reg, 0(AT)
-    dlsa    AT, \vreg, rREFS, 2
-    sw      \reg, 0(AT)
-    .set at
-.endm
-.macro SET_VREG_FLOAT reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    swc1    \reg, 0(AT)
-    dlsa    AT, \vreg, rREFS, 2
-    sw      zero, 0(AT)
-    .set at
-.endm
-
-/*
- * Get/set the 64-bit value from a Dalvik register.
- * Avoid unaligned memory accesses.
- * Note, SET_VREG_WIDE clobbers the register containing the value being stored.
- * Note, SET_VREG_DOUBLE clobbers the register containing the Dalvik register number.
- */
-.macro GET_VREG_WIDE reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    lw      \reg, 0(AT)
-    lw      AT, 4(AT)
-    dinsu   \reg, AT, 32, 32
-    .set at
-.endm
-.macro GET_VREG_DOUBLE reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    lwc1    \reg, 0(AT)
-    lw      AT, 4(AT)
-    mthc1   AT, \reg
-    .set at
-.endm
-.macro SET_VREG_WIDE reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rFP, 2
-    sw      \reg, 0(AT)
-    drotr32 \reg, \reg, 0
-    sw      \reg, 4(AT)
-    dlsa    AT, \vreg, rREFS, 2
-    sw      zero, 0(AT)
-    sw      zero, 4(AT)
-    .set at
-.endm
-.macro SET_VREG_DOUBLE reg, vreg
-    .set noat
-    dlsa    AT, \vreg, rREFS, 2
-    sw      zero, 0(AT)
-    sw      zero, 4(AT)
-    dlsa    AT, \vreg, rFP, 2
-    swc1    \reg, 0(AT)
-    mfhc1   \vreg, \reg
-    sw      \vreg, 4(AT)
-    .set at
-.endm
-
-/*
- * On-stack offsets for spilling/unspilling callee-saved registers
- * and the frame size.
- */
-#define STACK_OFFSET_RA 0
-#define STACK_OFFSET_GP 8
-#define STACK_OFFSET_S0 16
-#define STACK_OFFSET_S1 24
-#define STACK_OFFSET_S2 32
-#define STACK_OFFSET_S3 40
-#define STACK_OFFSET_S4 48
-#define STACK_OFFSET_S5 56
-#define STACK_OFFSET_S6 64
-#define STACK_SIZE      80    /* needs 16 byte alignment */
-
-/* Constants for float/double_to_int/long conversions */
-#define INT_MIN             0x80000000
-#define INT_MIN_AS_FLOAT    0xCF000000
-#define INT_MIN_AS_DOUBLE   0xC1E0000000000000
-#define LONG_MIN            0x8000000000000000
-#define LONG_MIN_AS_FLOAT   0xDF000000
-#define LONG_MIN_AS_DOUBLE  0xC3E0000000000000
-
-%def entry():
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- * Interpreter entry point.
- */
-
-    .set    reorder
-
-    .text
-    .global ExecuteMterpImpl
-    .type   ExecuteMterpImpl, %function
-    .balign 16
-/*
- * On entry:
- *  a0  Thread* self
- *  a1  dex_instructions
- *  a2  ShadowFrame
- *  a3  JValue* result_register
- *
- */
-ExecuteMterpImpl:
-    .cfi_startproc
-    .cpsetup t9, t8, ExecuteMterpImpl
-
-    .cfi_def_cfa sp, 0
-    daddu   sp, sp, -STACK_SIZE
-    .cfi_adjust_cfa_offset STACK_SIZE
-
-    sd      t8, STACK_OFFSET_GP(sp)
-    .cfi_rel_offset 28, STACK_OFFSET_GP
-    sd      ra, STACK_OFFSET_RA(sp)
-    .cfi_rel_offset 31, STACK_OFFSET_RA
-
-    sd      s0, STACK_OFFSET_S0(sp)
-    .cfi_rel_offset 16, STACK_OFFSET_S0
-    sd      s1, STACK_OFFSET_S1(sp)
-    .cfi_rel_offset 17, STACK_OFFSET_S1
-    sd      s2, STACK_OFFSET_S2(sp)
-    .cfi_rel_offset 18, STACK_OFFSET_S2
-    sd      s3, STACK_OFFSET_S3(sp)
-    .cfi_rel_offset 19, STACK_OFFSET_S3
-    sd      s4, STACK_OFFSET_S4(sp)
-    .cfi_rel_offset 20, STACK_OFFSET_S4
-    sd      s5, STACK_OFFSET_S5(sp)
-    .cfi_rel_offset 21, STACK_OFFSET_S5
-    sd      s6, STACK_OFFSET_S6(sp)
-    .cfi_rel_offset 22, STACK_OFFSET_S6
-
-    /* Remember the return register */
-    sd      a3, SHADOWFRAME_RESULT_REGISTER_OFFSET(a2)
-
-    /* Remember the dex instruction pointer */
-    sd      a1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(a2)
-
-    /* set up "named" registers */
-    move    rSELF, a0
-    daddu   rFP, a2, SHADOWFRAME_VREGS_OFFSET
-    lw      v0, SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(a2)
-    dlsa    rREFS, v0, rFP, 2
-    lw      v0, SHADOWFRAME_DEX_PC_OFFSET(a2)
-    dlsa    rPC, v0, a1, 1
-    CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
-    EXPORT_PC
-
-    /* Starting ibase */
-    REFRESH_IBASE
-
-    /* Set up for backwards branches & osr profiling */
-    ld      a0, OFF_FP_METHOD(rFP)
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rSELF
-    jal     MterpSetUpHotnessCountdown
-    move    rPROFILE, v0                # Starting hotness countdown to rPROFILE
-
-    /* start executing the instruction at rPC */
-    FETCH_INST
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-
-    /* NOTE: no fallthrough */
-
-%def dchecks_before_helper():
-    // Call C++ to do debug checks and return to the handler using tail call.
-    .extern MterpCheckBefore
-    dla     t9, MterpCheckBefore
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rPC
-    jalr    zero, t9                            # (self, shadow_frame, dex_pc_ptr) Note: tail call.
-
-%def opcode_pre():
-%  add_helper(dchecks_before_helper, "mterp_dchecks_before_helper")
-    #if !defined(NDEBUG)
-    jal    SYMBOL(mterp_dchecks_before_helper)
-    #endif
-
-%def fallback():
-/* Transfer stub to alternate interpreter */
-    b       MterpFallback
-
-%def helpers():
-%  pass
-
-%def footer():
-/*
- * We've detected a condition that will result in an exception, but the exception
- * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
- * TUNING: for consistency, we may want to just go ahead and handle these here.
- */
-
-    .extern MterpLogDivideByZeroException
-common_errDivideByZero:
-    EXPORT_PC
-#if MTERP_LOGGING
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    jal     MterpLogDivideByZeroException
-#endif
-    b       MterpCommonFallback
-
-    .extern MterpLogArrayIndexException
-common_errArrayIndex:
-    EXPORT_PC
-#if MTERP_LOGGING
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    jal     MterpLogArrayIndexException
-#endif
-    b       MterpCommonFallback
-
-    .extern MterpLogNullObjectException
-common_errNullObject:
-    EXPORT_PC
-#if MTERP_LOGGING
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    jal     MterpLogNullObjectException
-#endif
-    b       MterpCommonFallback
-
-/*
- * If we're here, something is out of the ordinary.  If there is a pending
- * exception, handle it.  Otherwise, roll back and retry with the reference
- * interpreter.
- */
-MterpPossibleException:
-    ld      a0, THREAD_EXCEPTION_OFFSET(rSELF)
-    beqzc   a0, MterpFallback                       # If not, fall back to reference interpreter.
-    /* intentional fallthrough - handle pending exception. */
-/*
- * On return from a runtime helper routine, we've found a pending exception.
- * Can we handle it here - or need to bail out to caller?
- *
- */
-    .extern MterpHandleException
-    .extern MterpShouldSwitchInterpreters
-MterpException:
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    jal     MterpHandleException                    # (self, shadow_frame)
-    beqzc   v0, MterpExceptionReturn                # no local catch, back to caller.
-    ld      a0, OFF_FP_DEX_INSTRUCTIONS(rFP)
-    lwu     a1, OFF_FP_DEX_PC(rFP)
-    REFRESH_IBASE
-    dlsa    rPC, a1, a0, 1                          # generate new dex_pc_ptr
-    /* Do we need to switch interpreters? */
-    jal     MterpShouldSwitchInterpreters
-    bnezc   v0, MterpFallback
-    /* resume execution at catch block */
-    EXPORT_PC
-    FETCH_INST
-    GET_INST_OPCODE v0
-    GOTO_OPCODE v0
-    /* NOTE: no fallthrough */
-
-/*
- * Common handling for branches with support for Jit profiling.
- * On entry:
- *    rINST          <= signed offset
- *    rPROFILE       <= signed hotness countdown (expanded to 64 bits)
- *
- * We have quite a few different cases for branch profiling, OSR detection and
- * suspend check support here.
- *
- * Taken backward branches:
- *    If profiling active, do hotness countdown and report if we hit zero.
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *    Is there a pending suspend request?  If so, suspend.
- *
- * Taken forward branches and not-taken backward branches:
- *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
- *
- * Our most common case is expected to be a taken backward branch with active jit profiling,
- * but no full OSR check and no pending suspend request.
- * Next most common case is not-taken branch with no full OSR check.
- *
- */
-MterpCommonTakenBranchNoFlags:
-    bgtzc   rINST, .L_forward_branch    # don't add forward branches to hotness
-/*
- * We need to subtract 1 from positive values and we should not see 0 here,
- * so we may use the result of the comparison with -1.
- */
-    li      v0, JIT_CHECK_OSR
-    beqc    rPROFILE, v0, .L_osr_check
-    bltc    rPROFILE, v0, .L_resume_backward_branch
-    dsubu   rPROFILE, 1
-    beqzc   rPROFILE, .L_add_batch      # counted down to zero - report
-.L_resume_backward_branch:
-    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
-    REFRESH_IBASE
-    daddu   a2, rINST, rINST            # a2<- byte offset
-    FETCH_ADVANCE_INST_RB a2            # update rPC, load rINST
-    and     ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
-    bnezc   ra, .L_suspend_request_pending
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-.L_suspend_request_pending:
-    EXPORT_PC
-    move    a0, rSELF
-    jal     MterpSuspendCheck           # (self)
-    bnezc   v0, MterpFallback
-    REFRESH_IBASE                       # might have changed during suspend
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-.L_no_count_backwards:
-    li      v0, JIT_CHECK_OSR           # check for possible OSR re-entry
-    bnec    rPROFILE, v0, .L_resume_backward_branch
-.L_osr_check:
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rINST
-    EXPORT_PC
-    jal MterpMaybeDoOnStackReplacement  # (self, shadow_frame, offset)
-    bnezc   v0, MterpOnStackReplacement
-    b       .L_resume_backward_branch
-
-.L_forward_branch:
-    li      v0, JIT_CHECK_OSR           # check for possible OSR re-entry
-    beqc    rPROFILE, v0, .L_check_osr_forward
-.L_resume_forward_branch:
-    daddu   a2, rINST, rINST            # a2<- byte offset
-    FETCH_ADVANCE_INST_RB a2            # update rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-.L_check_osr_forward:
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rINST
-    EXPORT_PC
-    jal     MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
-    bnezc   v0, MterpOnStackReplacement
-    b       .L_resume_forward_branch
-
-.L_add_batch:
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    sh      rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
-    ld      a0, OFF_FP_METHOD(rFP)
-    move    a2, rSELF
-    jal     MterpAddHotnessBatch        # (method, shadow_frame, self)
-    move    rPROFILE, v0                # restore new hotness countdown to rPROFILE
-    b       .L_no_count_backwards
-
-/*
- * Entered from the conditional branch handlers when OSR check request active on
- * not-taken path.  All Dalvik not-taken conditional branch offsets are 2.
- */
-.L_check_not_taken_osr:
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    li      a2, 2
-    EXPORT_PC
-    jal     MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
-    bnezc   v0, MterpOnStackReplacement
-    FETCH_ADVANCE_INST 2
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-/*
- * On-stack replacement has happened, and now we've returned from the compiled method.
- */
-MterpOnStackReplacement:
-#if MTERP_LOGGING
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rINST                               # rINST contains offset
-    jal     MterpLogOSR
-#endif
-    li      v0, 1                                   # Signal normal return
-    b       MterpDone
-
-/*
- * Bail out to reference interpreter.
- */
-    .extern MterpLogFallback
-MterpFallback:
-    EXPORT_PC
-#if MTERP_LOGGING
-    move    a0, rSELF
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    jal     MterpLogFallback
-#endif
-MterpCommonFallback:
-    li      v0, 0                                   # signal retry with reference interpreter.
-    b       MterpDone
-
-/*
- * We pushed some registers on the stack in ExecuteMterpImpl, then saved
- * SP and RA.  Here we restore SP, restore the registers, and then restore
- * RA to PC.
- *
- * On entry:
- *  uint32_t* rFP  (should still be live, pointer to base of vregs)
- */
-MterpExceptionReturn:
-    li      v0, 1                                   # signal return to caller.
-    b       MterpDone
-/*
- * Returned value is expected in a0 and if it's not 64-bit, the 32 most
- * significant bits of a0 must be zero-extended or sign-extended
- * depending on the return type.
- */
-MterpReturn:
-    ld      a2, OFF_FP_RESULT_REGISTER(rFP)
-    sd      a0, 0(a2)
-    li      v0, 1                                   # signal return to caller.
-MterpDone:
-/*
- * At this point, we expect rPROFILE to be non-zero.  If negative, hotness is disabled or we're
- * checking for OSR.  If greater than zero, we might have unreported hotness to register
- * (the difference between the ending rPROFILE and the cached hotness counter).  rPROFILE
- * should only reach zero immediately after a hotness decrement, and is then reset to either
- * a negative special state or the new non-zero countdown value.
- */
-    blez    rPROFILE, .L_pop_and_return # if > 0, we may have some counts to report.
-
-MterpProfileActive:
-    move    rINST, v0                   # stash return value
-    /* Report cached hotness counts */
-    ld      a0, OFF_FP_METHOD(rFP)
-    daddu   a1, rFP, OFF_FP_SHADOWFRAME
-    move    a2, rSELF
-    sh      rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
-    jal     MterpAddHotnessBatch        # (method, shadow_frame, self)
-    move    v0, rINST                   # restore return value
-
-.L_pop_and_return:
-    ld      s6, STACK_OFFSET_S6(sp)
-    .cfi_restore 22
-    ld      s5, STACK_OFFSET_S5(sp)
-    .cfi_restore 21
-    ld      s4, STACK_OFFSET_S4(sp)
-    .cfi_restore 20
-    ld      s3, STACK_OFFSET_S3(sp)
-    .cfi_restore 19
-    ld      s2, STACK_OFFSET_S2(sp)
-    .cfi_restore 18
-    ld      s1, STACK_OFFSET_S1(sp)
-    .cfi_restore 17
-    ld      s0, STACK_OFFSET_S0(sp)
-    .cfi_restore 16
-
-    ld      ra, STACK_OFFSET_RA(sp)
-    .cfi_restore 31
-
-    ld      t8, STACK_OFFSET_GP(sp)
-    .cpreturn
-    .cfi_restore 28
-
-    .set    noreorder
-    jr      ra
-    daddu   sp, sp, STACK_SIZE
-    .cfi_adjust_cfa_offset -STACK_SIZE
-
-    .cfi_endproc
-    .set    reorder
-    .size ExecuteMterpImpl, .-ExecuteMterpImpl
-
-%def instruction_end():
-
-    .global artMterpAsmInstructionEnd
-artMterpAsmInstructionEnd:
-
-%def instruction_start():
-
-    .global artMterpAsmInstructionStart
-artMterpAsmInstructionStart = .L_op_nop
-    .text
-
-%def opcode_start():
-%  pass
-%def opcode_end():
-%  pass
-%def helper_start(name):
-    ENTRY ${name}
-%def helper_end(name):
-    END ${name}
diff --git a/runtime/interpreter/mterp/mips64/object.S b/runtime/interpreter/mterp/mips64/object.S
deleted file mode 100644
index a5a2b3d..0000000
--- a/runtime/interpreter/mterp/mips64/object.S
+++ /dev/null
@@ -1,262 +0,0 @@
-%def field(helper=""):
-TODO
-
-%def op_check_cast():
-    /*
-     * Check to see if a cast from one class to another is allowed.
-     */
-    /* check-cast vAA, class//BBBB */
-    .extern MterpCheckCast
-    EXPORT_PC
-    lhu     a0, 2(rPC)                  # a0 <- BBBB
-    srl     a1, rINST, 8                # a1 <- AA
-    dlsa    a1, a1, rFP, 2              # a1 <- &object
-    ld      a2, OFF_FP_METHOD(rFP)      # a2 <- method
-    move    a3, rSELF                   # a3 <- self
-    jal     MterpCheckCast              # (index, &obj, method, self)
-    PREFETCH_INST 2
-    bnez    v0, MterpPossibleException
-    ADVANCE 2
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_iget(is_object="0", helper="MterpIGetU32"):
-%  field(helper=helper)
-
-%def op_iget_boolean():
-%  op_iget(helper="MterpIGetU8")
-
-%def op_iget_boolean_quick():
-%  op_iget_quick(load="lbu")
-
-%def op_iget_byte():
-%  op_iget(helper="MterpIGetI8")
-
-%def op_iget_byte_quick():
-%  op_iget_quick(load="lb")
-
-%def op_iget_char():
-%  op_iget(helper="MterpIGetU16")
-
-%def op_iget_char_quick():
-%  op_iget_quick(load="lhu")
-
-%def op_iget_object():
-%  op_iget(is_object="1", helper="MterpIGetObj")
-
-%def op_iget_object_quick():
-    /* For: iget-object-quick */
-    /* op vA, vB, offset//CCCC */
-    .extern artIGetObjectFromMterp
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a1, 2(rPC)                  # a1 <- field byte offset
-    EXPORT_PC
-    GET_VREG_U a0, a2                   # a0 <- object we're operating on
-    jal     artIGetObjectFromMterp      # (obj, offset)
-    ld      a3, THREAD_EXCEPTION_OFFSET(rSELF)
-    ext     a2, rINST, 8, 4             # a2 <- A
-    PREFETCH_INST 2
-    bnez    a3, MterpPossibleException  # bail out
-    SET_VREG_OBJECT v0, a2              # fp[A] <- v0
-    ADVANCE 2                           # advance rPC
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_iget_quick(load="lw"):
-    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick */
-    /* op vA, vB, offset//CCCC */
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a1, 2(rPC)                  # a1 <- field byte offset
-    GET_VREG_U a3, a2                   # a3 <- object we're operating on
-    ext     a4, rINST, 8, 4             # a4 <- A
-    daddu   a1, a1, a3
-    beqz    a3, common_errNullObject    # object was null
-    $load   a0, 0(a1)                   # a0 <- obj.field
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    SET_VREG a0, a4                     # fp[A] <- a0
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_iget_short():
-%  op_iget(helper="MterpIGetI16")
-
-%def op_iget_short_quick():
-%  op_iget_quick(load="lh")
-
-%def op_iget_wide():
-%  op_iget(helper="MterpIGetU64")
-
-%def op_iget_wide_quick():
-    /* iget-wide-quick vA, vB, offset//CCCC */
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a4, 2(rPC)                  # a4 <- field byte offset
-    GET_VREG_U a3, a2                   # a3 <- object we're operating on
-    ext     a2, rINST, 8, 4             # a2 <- A
-    beqz    a3, common_errNullObject    # object was null
-    daddu   a4, a3, a4                  # create direct pointer
-    lw      a0, 0(a4)
-    lw      a1, 4(a4)
-    dinsu   a0, a1, 32, 32
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    SET_VREG_WIDE a0, a2
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_instance_of():
-    /*
-     * Check to see if an object reference is an instance of a class.
-     *
-     * Most common situation is a non-null object, being compared against
-     * an already-resolved class.
-     */
-    /* instance-of vA, vB, class//CCCC */
-    .extern MterpInstanceOf
-    EXPORT_PC
-    lhu     a0, 2(rPC)                  # a0 <- CCCC
-    srl     a1, rINST, 12               # a1 <- B
-    dlsa    a1, a1, rFP, 2              # a1 <- &object
-    ld      a2, OFF_FP_METHOD(rFP)      # a2 <- method
-    move    a3, rSELF                   # a3 <- self
-    jal     MterpInstanceOf             # (index, &obj, method, self)
-    ld      a1, THREAD_EXCEPTION_OFFSET(rSELF)
-    ext     a2, rINST, 8, 4             # a2 <- A
-    PREFETCH_INST 2
-    bnez    a1, MterpException
-    ADVANCE 2                           # advance rPC
-    SET_VREG v0, a2                     # vA <- v0
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_iput(is_object="0", helper="MterpIPutU32"):
-%  field(helper=helper)
-
-%def op_iput_boolean():
-%  op_iput(helper="MterpIPutU8")
-
-%def op_iput_boolean_quick():
-%  op_iput_quick(store="sb")
-
-%def op_iput_byte():
-%  op_iput(helper="MterpIPutI8")
-
-%def op_iput_byte_quick():
-%  op_iput_quick(store="sb")
-
-%def op_iput_char():
-%  op_iput(helper="MterpIPutU16")
-
-%def op_iput_char_quick():
-%  op_iput_quick(store="sh")
-
-%def op_iput_object():
-%  op_iput(is_object="1", helper="MterpIPutObj")
-
-%def op_iput_object_quick():
-    .extern MterpIputObjectQuick
-    EXPORT_PC
-    daddu   a0, rFP, OFF_FP_SHADOWFRAME
-    move    a1, rPC
-    move    a2, rINST
-    jal     MterpIputObjectQuick
-    beqzc   v0, MterpException
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_iput_quick(store="sw"):
-    /* For: iput-quick, iput-boolean-quick, iput-byte-quick, iput-char-quick, iput-short-quick */
-    /* op vA, vB, offset//CCCC */
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a1, 2(rPC)                  # a1 <- field byte offset
-    GET_VREG_U a3, a2                   # a3 <- fp[B], the object pointer
-    ext     a2, rINST, 8, 4             # a2 <- A
-    beqz    a3, common_errNullObject    # object was null
-    GET_VREG a0, a2                     # a0 <- fp[A]
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    daddu   a1, a1, a3
-    $store  a0, 0(a1)                   # obj.field <- a0
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_iput_short():
-%  op_iput(helper="MterpIPutI16")
-
-%def op_iput_short_quick():
-%  op_iput_quick(store="sh")
-
-%def op_iput_wide():
-%  op_iput(helper="MterpIPutU64")
-
-%def op_iput_wide_quick():
-    /* iput-wide-quick vA, vB, offset//CCCC */
-    srl     a2, rINST, 12               # a2 <- B
-    lhu     a3, 2(rPC)                  # a3 <- field byte offset
-    GET_VREG_U a2, a2                   # a2 <- fp[B], the object pointer
-    ext     a0, rINST, 8, 4             # a0 <- A
-    beqz    a2, common_errNullObject    # object was null
-    GET_VREG_WIDE a0, a0                # a0 <- fp[A]
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    daddu   a1, a2, a3                  # create a direct pointer
-    sw      a0, 0(a1)
-    dsrl32  a0, a0, 0
-    sw      a0, 4(a1)
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_new_instance():
-    /*
-     * Create a new instance of a class.
-     */
-    /* new-instance vAA, class//BBBB */
-    .extern MterpNewInstance
-    EXPORT_PC
-    daddu   a0, rFP, OFF_FP_SHADOWFRAME
-    move    a1, rSELF
-    move    a2, rINST
-    jal     MterpNewInstance            # (shadow_frame, self, inst_data)
-    beqzc   v0, MterpPossibleException
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_sget(is_object="0", helper="MterpSGetU32"):
-%  field(helper=helper)
-
-%def op_sget_boolean():
-%  op_sget(helper="MterpSGetU8")
-
-%def op_sget_byte():
-%  op_sget(helper="MterpSGetI8")
-
-%def op_sget_char():
-%  op_sget(helper="MterpSGetU16")
-
-%def op_sget_object():
-%  op_sget(is_object="1", helper="MterpSGetObj")
-
-%def op_sget_short():
-%  op_sget(helper="MterpSGetI16")
-
-%def op_sget_wide():
-%  op_sget(helper="MterpSGetU64")
-
-%def op_sput(is_object="0", helper="MterpSPutU32"):
-%  field(helper=helper)
-
-%def op_sput_boolean():
-%  op_sput(helper="MterpSPutU8")
-
-%def op_sput_byte():
-%  op_sput(helper="MterpSPutI8")
-
-%def op_sput_char():
-%  op_sput(helper="MterpSPutU16")
-
-%def op_sput_object():
-%  op_sput(is_object="1", helper="MterpSPutObj")
-
-%def op_sput_short():
-%  op_sput(helper="MterpSPutI16")
-
-%def op_sput_wide():
-%  op_sput(helper="MterpSPutU64")
diff --git a/runtime/interpreter/mterp/mips64/other.S b/runtime/interpreter/mterp/mips64/other.S
deleted file mode 100644
index 789efee..0000000
--- a/runtime/interpreter/mterp/mips64/other.S
+++ /dev/null
@@ -1,355 +0,0 @@
-%def const(helper="UndefinedConstHandler"):
-    /* const/class vAA, type@BBBB */
-    /* const/method-handle vAA, method_handle@BBBB */
-    /* const/method-type vAA, proto@BBBB */
-    /* const/string vAA, string@@BBBB */
-    .extern $helper
-    EXPORT_PC
-    lhu     a0, 2(rPC)                  # a0 <- BBBB
-    srl     a1, rINST, 8                # a1 <- AA
-    daddu   a2, rFP, OFF_FP_SHADOWFRAME
-    move    a3, rSELF
-    jal     $helper                     # (index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST 2                     # load rINST
-    bnez    v0, MterpPossibleException  # let reference interpreter deal with it.
-    ADVANCE 2                           # advance rPC
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def unused():
-/*
- * Bail to reference interpreter to throw.
- */
-    b       MterpFallback
-
-%def op_const():
-    /* const vAA, #+BBBBbbbb */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
-    lh      a1, 4(rPC)                  # a1 <- BBBB (high)
-    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
-    ins     a0, a1, 16, 16              # a0 = BBBBbbbb
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                     # vAA <- +BBBBbbbb
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_const_16():
-    /* const/16 vAA, #+BBBB */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      a0, 2(rPC)                  # a0 <- sign-extended BBBB
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                     # vAA <- +BBBB
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_const_4():
-    /* const/4 vA, #+B */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    seh     a0, rINST                   # sign extend B in rINST
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    sra     a0, a0, 12                  # shift B into its final position
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                     # vA <- +B
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_const_class():
-%  const(helper="MterpConstClass")
-
-%def op_const_high16():
-    /* const/high16 vAA, #+BBBB0000 */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      a0, 2(rPC)                  # a0 <- BBBB
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    sll     a0, a0, 16                  # a0 <- BBBB0000
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG a0, a2                     # vAA <- +BBBB0000
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_const_method_handle():
-%  const(helper="MterpConstMethodHandle")
-
-%def op_const_method_type():
-%  const(helper="MterpConstMethodType")
-
-%def op_const_string():
-%  const(helper="MterpConstString")
-
-%def op_const_string_jumbo():
-    /* const/string vAA, String//BBBBBBBB */
-    .extern MterpConstString
-    EXPORT_PC
-    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
-    lh      a4, 4(rPC)                  # a4 <- BBBB (high)
-    srl     a1, rINST, 8                # a1 <- AA
-    ins     a0, a4, 16, 16              # a0 <- BBBBbbbb
-    daddu   a2, rFP, OFF_FP_SHADOWFRAME
-    move    a3, rSELF
-    jal     MterpConstString            # (index, tgt_reg, shadow_frame, self)
-    PREFETCH_INST 3                     # load rINST
-    bnez    v0, MterpPossibleException  # let reference interpreter deal with it.
-    ADVANCE 3                           # advance rPC
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_const_wide():
-    /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
-    srl     a4, rINST, 8                # a4 <- AA
-    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
-    lh      a1, 4(rPC)                  # a1 <- BBBB (low middle)
-    lh      a2, 6(rPC)                  # a2 <- hhhh (high middle)
-    lh      a3, 8(rPC)                  # a3 <- HHHH (high)
-    FETCH_ADVANCE_INST 5                # advance rPC, load rINST
-    ins     a0, a1, 16, 16              # a0 = BBBBbbbb
-    ins     a2, a3, 16, 16              # a2 = HHHHhhhh
-    dinsu   a0, a2, 32, 32              # a0 = HHHHhhhhBBBBbbbb
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a4                # vAA <- +HHHHhhhhBBBBbbbb
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_const_wide_16():
-    /* const-wide/16 vAA, #+BBBB */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      a0, 2(rPC)                  # a0 <- sign-extended BBBB
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vAA <- +BBBB
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_const_wide_32():
-    /* const-wide/32 vAA, #+BBBBbbbb */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      a0, 2(rPC)                  # a0 <- bbbb (low)
-    lh      a1, 4(rPC)                  # a1 <- BBBB (high)
-    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
-    ins     a0, a1, 16, 16              # a0 = BBBBbbbb
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vAA <- +BBBBbbbb
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_const_wide_high16():
-    /* const-wide/high16 vAA, #+BBBB000000000000 */
-    srl     a2, rINST, 8                # a2 <- AA
-    lh      a0, 2(rPC)                  # a0 <- BBBB
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    dsll32  a0, a0, 16                  # a0 <- BBBB000000000000
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vAA <- +BBBB000000000000
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_monitor_enter():
-    /*
-     * Synchronize on an object.
-     */
-    /* monitor-enter vAA */
-    .extern artLockObjectFromCode
-    EXPORT_PC
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vAA (object)
-    move    a1, rSELF                   # a1 <- self
-    jal     artLockObjectFromCode
-    bnezc   v0, MterpException
-    FETCH_ADVANCE_INST 1
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_monitor_exit():
-    /*
-     * Unlock an object.
-     *
-     * Exceptions that occur when unlocking a monitor need to appear as
-     * if they happened at the following instruction.  See the Dalvik
-     * instruction spec.
-     */
-    /* monitor-exit vAA */
-    .extern artUnlockObjectFromCode
-    EXPORT_PC
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG_U a0, a2                   # a0 <- vAA (object)
-    move    a1, rSELF                   # a1 <- self
-    jal     artUnlockObjectFromCode     # v0 <- success for unlock(self, obj)
-    bnezc   v0, MterpException
-    FETCH_ADVANCE_INST 1                # before throw: advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_move(is_object="0"):
-    /* for move, move-object, long-to-int */
-    /* op vA, vB */
-    ext     a2, rINST, 8, 4             # a2 <- A
-    ext     a3, rINST, 12, 4            # a3 <- B
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_VREG a0, a3                     # a0 <- vB
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT a0, a2              # vA <- vB
-    .else
-    SET_VREG a0, a2                     # vA <- vB
-    .endif
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_move_16(is_object="0"):
-    /* for: move/16, move-object/16 */
-    /* op vAAAA, vBBBB */
-    lhu     a3, 4(rPC)                  # a3 <- BBBB
-    lhu     a2, 2(rPC)                  # a2 <- AAAA
-    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
-    GET_VREG a0, a3                     # a0 <- vBBBB
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT a0, a2              # vAAAA <- vBBBB
-    .else
-    SET_VREG a0, a2                     # vAAAA <- vBBBB
-    .endif
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_move_exception():
-    /* move-exception vAA */
-    srl     a2, rINST, 8                # a2 <- AA
-    ld      a0, THREAD_EXCEPTION_OFFSET(rSELF)  # load exception obj
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    SET_VREG_OBJECT a0, a2              # vAA <- exception obj
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    sd      zero, THREAD_EXCEPTION_OFFSET(rSELF)  # clear exception
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_move_from16(is_object="0"):
-    /* for: move/from16, move-object/from16 */
-    /* op vAA, vBBBB */
-    lhu     a3, 2(rPC)                  # a3 <- BBBB
-    srl     a2, rINST, 8                # a2 <- AA
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_VREG a0, a3                     # a0 <- vBBBB
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT a0, a2              # vAA <- vBBBB
-    .else
-    SET_VREG a0, a2                     # vAA <- vBBBB
-    .endif
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_move_object():
-%  op_move(is_object="1")
-
-%def op_move_object_16():
-%  op_move_16(is_object="1")
-
-%def op_move_object_from16():
-%  op_move_from16(is_object="1")
-
-%def op_move_result(is_object="0"):
-    /* for: move-result, move-result-object */
-    /* op vAA */
-    srl     a2, rINST, 8                # a2 <- AA
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    ld      a0, OFF_FP_RESULT_REGISTER(rFP)  # get pointer to result JType
-    lw      a0, 0(a0)                   # a0 <- result.i
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    .if $is_object
-    SET_VREG_OBJECT a0, a2              # vAA <- result
-    .else
-    SET_VREG a0, a2                     # vAA <- result
-    .endif
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_move_result_object():
-%  op_move_result(is_object="1")
-
-%def op_move_result_wide():
-    /* for: move-result-wide */
-    /* op vAA */
-    srl     a2, rINST, 8                # a2 <- AA
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    ld      a0, OFF_FP_RESULT_REGISTER(rFP)  # get pointer to result JType
-    ld      a0, 0(a0)                   # a0 <- result.j
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vAA <- result
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_move_wide():
-    /* move-wide vA, vB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    ext     a3, rINST, 12, 4            # a3 <- B
-    ext     a2, rINST, 8, 4             # a2 <- A
-    GET_VREG_WIDE a0, a3                # a0 <- vB
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vA <- vB
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_move_wide_16():
-    /* move-wide/16 vAAAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    lhu     a3, 4(rPC)                  # a3 <- BBBB
-    lhu     a2, 2(rPC)                  # a2 <- AAAA
-    GET_VREG_WIDE a0, a3                # a0 <- vBBBB
-    FETCH_ADVANCE_INST 3                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vAAAA <- vBBBB
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_move_wide_from16():
-    /* move-wide/from16 vAA, vBBBB */
-    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
-    lhu     a3, 2(rPC)                  # a3 <- BBBB
-    srl     a2, rINST, 8                # a2 <- AA
-    GET_VREG_WIDE a0, a3                # a0 <- vBBBB
-    FETCH_ADVANCE_INST 2                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    SET_VREG_WIDE a0, a2                # vAA <- vBBBB
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_nop():
-    FETCH_ADVANCE_INST 1                # advance rPC, load rINST
-    GET_INST_OPCODE v0                  # extract opcode from rINST
-    GOTO_OPCODE v0                      # jump to next instruction
-
-%def op_unused_3e():
-%  unused()
-
-%def op_unused_3f():
-%  unused()
-
-%def op_unused_40():
-%  unused()
-
-%def op_unused_41():
-%  unused()
-
-%def op_unused_42():
-%  unused()
-
-%def op_unused_43():
-%  unused()
-
-%def op_unused_79():
-%  unused()
-
-%def op_unused_7a():
-%  unused()
-
-%def op_unused_f3():
-%  unused()
-
-%def op_unused_f4():
-%  unused()
-
-%def op_unused_f5():
-%  unused()
-
-%def op_unused_f6():
-%  unused()
-
-%def op_unused_f7():
-%  unused()
-
-%def op_unused_f8():
-%  unused()
-
-%def op_unused_f9():
-%  unused()
-
-%def op_unused_fc():
-%  unused()
-
-%def op_unused_fd():
-%  unused()
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index 6a8f864..c6d3258 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -149,7 +149,6 @@
   return
       runtime->IsStarted() &&
       !runtime->IsAotCompiler() &&
-      !Dbg::IsDebuggerActive() &&
       !runtime->GetInstrumentation()->IsActive() &&
       // mterp only knows how to deal with the normal exits. It cannot handle any of the
       // non-standard force-returns.
@@ -170,7 +169,7 @@
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
   return DoInvoke<kVirtual, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
-      self, *shadow_frame, inst, inst_data, result_register);
+      self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
 }
 
 extern "C" size_t MterpInvokeSuper(Thread* self,
@@ -181,7 +180,7 @@
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
   return DoInvoke<kSuper, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
-      self, *shadow_frame, inst, inst_data, result_register);
+      self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
 }
 
 extern "C" size_t MterpInvokeInterface(Thread* self,
@@ -192,7 +191,7 @@
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
   return DoInvoke<kInterface, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
-      self, *shadow_frame, inst, inst_data, result_register);
+      self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
 }
 
 extern "C" size_t MterpInvokeDirect(Thread* self,
@@ -203,7 +202,7 @@
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
   return DoInvoke<kDirect, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
-      self, *shadow_frame, inst, inst_data, result_register);
+      self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
 }
 
 extern "C" size_t MterpInvokeStatic(Thread* self,
@@ -214,7 +213,7 @@
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
   return DoInvoke<kStatic, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true>(
-      self, *shadow_frame, inst, inst_data, result_register);
+      self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
 }
 
 extern "C" size_t MterpInvokeCustom(Thread* self,
@@ -225,7 +224,7 @@
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
   return DoInvokeCustom</* is_range= */ false>(
-      self, *shadow_frame, inst, inst_data, result_register);
+      self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
 }
 
 extern "C" size_t MterpInvokePolymorphic(Thread* self,
@@ -236,7 +235,7 @@
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
   return DoInvokePolymorphic</* is_range= */ false>(
-      self, *shadow_frame, inst, inst_data, result_register);
+      self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
 }
 
 extern "C" size_t MterpInvokeVirtualRange(Thread* self,
@@ -247,7 +246,7 @@
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
   return DoInvoke<kVirtual, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
-      self, *shadow_frame, inst, inst_data, result_register);
+      self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
 }
 
 extern "C" size_t MterpInvokeSuperRange(Thread* self,
@@ -258,7 +257,7 @@
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
   return DoInvoke<kSuper, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
-      self, *shadow_frame, inst, inst_data, result_register);
+      self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
 }
 
 extern "C" size_t MterpInvokeInterfaceRange(Thread* self,
@@ -269,7 +268,7 @@
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
   return DoInvoke<kInterface, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
-      self, *shadow_frame, inst, inst_data, result_register);
+      self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
 }
 
 extern "C" size_t MterpInvokeDirectRange(Thread* self,
@@ -280,7 +279,7 @@
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
   return DoInvoke<kDirect, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
-      self, *shadow_frame, inst, inst_data, result_register);
+      self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
 }
 
 extern "C" size_t MterpInvokeStaticRange(Thread* self,
@@ -291,7 +290,7 @@
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
   return DoInvoke<kStatic, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true>(
-      self, *shadow_frame, inst, inst_data, result_register);
+      self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
 }
 
 extern "C" size_t MterpInvokeCustomRange(Thread* self,
@@ -301,7 +300,8 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
-  return DoInvokeCustom</*is_range=*/ true>(self, *shadow_frame, inst, inst_data, result_register);
+  return DoInvokeCustom</*is_range=*/ true>(
+      self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
 }
 
 extern "C" size_t MterpInvokePolymorphicRange(Thread* self,
@@ -312,7 +312,7 @@
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
   return DoInvokePolymorphic</* is_range= */ true>(
-      self, *shadow_frame, inst, inst_data, result_register);
+      self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
 }
 
 extern "C" size_t MterpInvokeVirtualQuick(Thread* self,
@@ -323,7 +323,7 @@
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
   return DoInvoke<kVirtual, /*is_range=*/ false, /*do_access_check=*/ false, /*is_mterp=*/ true,
-      /*is_quick=*/ true>(self, *shadow_frame, inst, inst_data, result_register);
+      /*is_quick=*/ true>(self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
 }
 
 extern "C" size_t MterpInvokeVirtualQuickRange(Thread* self,
@@ -334,7 +334,7 @@
   JValue* result_register = shadow_frame->GetResultRegister();
   const Instruction* inst = Instruction::At(dex_pc_ptr);
   return DoInvoke<kVirtual, /*is_range=*/ true, /*do_access_check=*/ false, /*is_mterp=*/ true,
-      /*is_quick=*/ true>(self, *shadow_frame, inst, inst_data, result_register);
+      /*is_quick=*/ true>(self, *shadow_frame, inst, inst_data, result_register) ? 1u : 0u;
 }
 
 extern "C" void MterpThreadFenceForConstructor() {
@@ -348,10 +348,10 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   ObjPtr<mirror::String> s = ResolveString(self, *shadow_frame, dex::StringIndex(index));
   if (UNLIKELY(s == nullptr)) {
-    return true;
+    return 1u;
   }
   shadow_frame->SetVRegReference(tgt_vreg, s);
-  return false;
+  return 0u;
 }
 
 extern "C" size_t MterpConstClass(uint32_t index,
@@ -365,10 +365,10 @@
                                                    /* can_run_clinit= */ false,
                                                    /* verify_access= */ false);
   if (UNLIKELY(c == nullptr)) {
-    return true;
+    return 1u;
   }
   shadow_frame->SetVRegReference(tgt_vreg, c);
-  return false;
+  return 0u;
 }
 
 extern "C" size_t MterpConstMethodHandle(uint32_t index,
@@ -378,10 +378,10 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   ObjPtr<mirror::MethodHandle> mh = ResolveMethodHandle(self, index, shadow_frame->GetMethod());
   if (UNLIKELY(mh == nullptr)) {
-    return true;
+    return 1u;
   }
   shadow_frame->SetVRegReference(tgt_vreg, mh);
-  return false;
+  return 0u;
 }
 
 extern "C" size_t MterpConstMethodType(uint32_t index,
@@ -392,10 +392,10 @@
   ObjPtr<mirror::MethodType> mt =
       ResolveMethodType(self, dex::ProtoIndex(index), shadow_frame->GetMethod());
   if (UNLIKELY(mt == nullptr)) {
-    return true;
+    return 1u;
   }
   shadow_frame->SetVRegReference(tgt_vreg, mt);
-  return false;
+  return 0u;
 }
 
 extern "C" size_t MterpCheckCast(uint32_t index,
@@ -409,15 +409,15 @@
                                                    false,
                                                    false);
   if (UNLIKELY(c == nullptr)) {
-    return true;
+    return 1u;
   }
   // Must load obj from vreg following ResolveVerifyAndClinit due to moving gc.
   ObjPtr<mirror::Object> obj = vreg_addr->AsMirrorPtr();
   if (UNLIKELY(obj != nullptr && !obj->InstanceOf(c))) {
     ThrowClassCastException(c, obj->GetClass());
-    return true;
+    return 1u;
   }
-  return false;
+  return 0u;
 }
 
 extern "C" size_t MterpInstanceOf(uint32_t index,
@@ -431,17 +431,17 @@
                                                    false,
                                                    false);
   if (UNLIKELY(c == nullptr)) {
-    return false;  // Caller will check for pending exception.  Return value unimportant.
+    return 0u;  // Caller will check for pending exception.  Return value unimportant.
   }
   // Must load obj from vreg following ResolveVerifyAndClinit due to moving gc.
   ObjPtr<mirror::Object> obj = vreg_addr->AsMirrorPtr();
-  return (obj != nullptr) && obj->InstanceOf(c);
+  return (obj != nullptr) && obj->InstanceOf(c) ? 1u : 0u;
 }
 
 extern "C" size_t MterpFillArrayData(mirror::Object* obj,
                                      const Instruction::ArrayDataPayload* payload)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  return FillArrayData(obj, payload);
+  return FillArrayData(obj, payload) ? 1u : 0u;
 }
 
 extern "C" size_t MterpNewInstance(ShadowFrame* shadow_frame, Thread* self, uint32_t inst_data)
@@ -456,19 +456,17 @@
   if (LIKELY(c != nullptr)) {
     if (UNLIKELY(c->IsStringClass())) {
       gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
-      obj = mirror::String::AllocEmptyString<true>(self, allocator_type);
+      obj = mirror::String::AllocEmptyString(self, allocator_type);
     } else {
-      obj = AllocObjectFromCode<true>(c,
-                                      self,
-                                      Runtime::Current()->GetHeap()->GetCurrentAllocator());
+      obj = AllocObjectFromCode(c, self, Runtime::Current()->GetHeap()->GetCurrentAllocator());
     }
   }
   if (UNLIKELY(obj == nullptr)) {
-    return false;
+    return 0u;
   }
   obj->GetClass()->AssertInitializedOrInitializingInThread(self);
   shadow_frame->SetVRegReference(inst->VRegA_21c(inst_data), obj);
-  return true;
+  return 1u;
 }
 
 extern "C" size_t MterpIputObjectQuick(ShadowFrame* shadow_frame,
@@ -476,7 +474,7 @@
                                        uint32_t inst_data)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   const Instruction* inst = Instruction::At(dex_pc_ptr);
-  return DoIPutQuick<Primitive::kPrimNot, false>(*shadow_frame, inst, inst_data);
+  return DoIPutQuick<Primitive::kPrimNot, false>(*shadow_frame, inst, inst_data) ? 1u : 0u;
 }
 
 extern "C" size_t MterpAputObject(ShadowFrame* shadow_frame,
@@ -486,16 +484,16 @@
   const Instruction* inst = Instruction::At(dex_pc_ptr);
   ObjPtr<mirror::Object> a = shadow_frame->GetVRegReference(inst->VRegB_23x());
   if (UNLIKELY(a == nullptr)) {
-    return false;
+    return 0u;
   }
   int32_t index = shadow_frame->GetVReg(inst->VRegC_23x());
   ObjPtr<mirror::Object> val = shadow_frame->GetVRegReference(inst->VRegA_23x(inst_data));
   ObjPtr<mirror::ObjectArray<mirror::Object>> array = a->AsObjectArray<mirror::Object>();
   if (array->CheckIsValidIndex(index) && array->CheckAssignable(val)) {
     array->SetWithoutChecks<false>(index, val);
-    return true;
+    return 1u;
   }
-  return false;
+  return 0u;
 }
 
 extern "C" size_t MterpFilledNewArray(ShadowFrame* shadow_frame,
@@ -504,7 +502,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   const Instruction* inst = Instruction::At(dex_pc_ptr);
   return DoFilledNewArray<false, false, false>(inst, *shadow_frame, self,
-                                               shadow_frame->GetResultRegister());
+                                               shadow_frame->GetResultRegister()) ? 1u : 0u;
 }
 
 extern "C" size_t MterpFilledNewArrayRange(ShadowFrame* shadow_frame,
@@ -513,7 +511,7 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   const Instruction* inst = Instruction::At(dex_pc_ptr);
   return DoFilledNewArray<true, false, false>(inst, *shadow_frame, self,
-                                              shadow_frame->GetResultRegister());
+                                              shadow_frame->GetResultRegister()) ? 1u : 0u;
 }
 
 extern "C" size_t MterpNewArray(ShadowFrame* shadow_frame,
@@ -522,14 +520,14 @@
     REQUIRES_SHARED(Locks::mutator_lock_) {
   const Instruction* inst = Instruction::At(dex_pc_ptr);
   int32_t length = shadow_frame->GetVReg(inst->VRegB_22c(inst_data));
-  ObjPtr<mirror::Object> obj = AllocArrayFromCode<false, true>(
+  ObjPtr<mirror::Object> obj = AllocArrayFromCode</*kAccessCheck=*/ false>(
       dex::TypeIndex(inst->VRegC_22c()), length, shadow_frame->GetMethod(), self,
       Runtime::Current()->GetHeap()->GetCurrentAllocator());
   if (UNLIKELY(obj == nullptr)) {
-      return false;
+      return 0u;
   }
   shadow_frame->SetVRegReference(inst->VRegA_22c(inst_data), obj);
-  return true;
+  return 1u;
 }
 
 extern "C" size_t MterpHandleException(Thread* self, ShadowFrame* shadow_frame)
@@ -537,7 +535,7 @@
   DCHECK(self->IsExceptionPending());
   const instrumentation::Instrumentation* const instrumentation =
       Runtime::Current()->GetInstrumentation();
-  return MoveToExceptionHandler(self, *shadow_frame, instrumentation);
+  return MoveToExceptionHandler(self, *shadow_frame, instrumentation) ? 1u : 0u;
 }
 
 struct MterpCheckHelper {
@@ -794,8 +792,8 @@
     uint32_t field_idx = kIsStatic ? inst->VRegB_21c() : inst->VRegC_22c();
     ArtField* field = dex_cache->GetResolvedField(field_idx, kRuntimePointerSize);
     if (LIKELY(field != nullptr)) {
-      bool initialized = !kIsStatic || field->GetDeclaringClass()->IsInitialized();
-      if (LIKELY(initialized)) {
+      bool visibly_initialized = !kIsStatic || field->GetDeclaringClass()->IsVisiblyInitialized();
+      if (LIKELY(visibly_initialized)) {
         DCHECK_EQ(field, (FindFieldFromCode<kAccessType, /* access_checks= */ false>(
             field_idx, referrer, self, sizeof(PrimType))));
         ObjPtr<mirror::Object> obj = kIsStatic
@@ -958,7 +956,7 @@
     did_osr = jit::Jit::MaybeDoOnStackReplacement(self, method, dex_pc, offset, result);
   }
   shadow_frame->SetCachedHotnessCountdown(osr_countdown);
-  return did_osr;
+  return did_osr ? 1u : 0u;
 }
 
 }  // namespace interpreter
diff --git a/runtime/interpreter/mterp/mterp.h b/runtime/interpreter/mterp/mterp.h
index af52758..7813fca 100644
--- a/runtime/interpreter/mterp/mterp.h
+++ b/runtime/interpreter/mterp/mterp.h
@@ -20,21 +20,32 @@
 #include <cstddef>
 #include <cstdint>
 
+#include "base/globals.h"
+
 /*
  * Mterp assembly handler bases
  */
 extern "C" void* artMterpAsmInstructionStart[];
 extern "C" void* artMterpAsmInstructionEnd[];
 
+extern "C" void* artNterpAsmInstructionStart[];
+extern "C" void* artNterpAsmInstructionEnd[];
+
 namespace art {
 
+class ArtMethod;
 class Thread;
 
 namespace interpreter {
 
 void InitMterpTls(Thread* self);
 void CheckMterpAsmConstants();
+void CheckNterpAsmConstants();
 bool CanUseMterp();
+bool IsNterpSupported();
+bool CanRuntimeUseNterp();
+bool CanMethodUseNterp(ArtMethod* method);
+const void* GetNterpEntryPoint();
 
 // Poison value for TestExportPC.  If we segfault with this value, it means that a mterp
 // handler for a recent opcode failed to export the Dalvik PC prior to a possible exit from
@@ -45,6 +56,9 @@
 
 constexpr size_t kMterpHandlerSize = 128;
 
+// The maximum we will allow an nterp frame to be.
+constexpr size_t kMaxNterpFrame = 3 * KB;
+
 }  // namespace interpreter
 }  // namespace art
 
diff --git a/runtime/interpreter/mterp/nterp.cc b/runtime/interpreter/mterp/nterp.cc
new file mode 100644
index 0000000..d51b0ae
--- /dev/null
+++ b/runtime/interpreter/mterp/nterp.cc
@@ -0,0 +1,606 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Mterp entry point and support functions.
+ */
+#include "mterp.h"
+
+#include "base/quasi_atomic.h"
+#include "dex/dex_instruction_utils.h"
+#include "debugger.h"
+#include "entrypoints/entrypoint_utils-inl.h"
+#include "interpreter/interpreter_common.h"
+#include "interpreter/interpreter_intrinsics.h"
+#include "interpreter/shadow_frame-inl.h"
+#include "mirror/string-alloc-inl.h"
+#include "nterp_helpers.h"
+
+namespace art {
+namespace interpreter {
+
+bool IsNterpSupported() {
+  return !kPoisonHeapReferences && kUseReadBarrier;
+}
+
+bool CanRuntimeUseNterp() REQUIRES_SHARED(Locks::mutator_lock_) {
+  // Nterp has the same restrictions as Mterp.
+  return IsNterpSupported() && CanUseMterp();
+}
+
+bool CanMethodUseNterp(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
+  return method->SkipAccessChecks() &&
+      !method->IsNative() &&
+      method->GetDexFile()->IsStandardDexFile() &&
+      NterpGetFrameSize(method) < kMaxNterpFrame;
+}
+
+const void* GetNterpEntryPoint() {
+  return reinterpret_cast<const void*>(interpreter::ExecuteNterpImpl);
+}
+
+/*
+ * Verify some constants used by the nterp interpreter.
+ */
+void CheckNterpAsmConstants() {
+  /*
+   * If we're using computed goto instruction transitions, make sure
+   * none of the handlers overflows the byte limit.  This won't tell
+   * which one did, but if any one is too big the total size will
+   * overflow.
+   */
+  const int width = kMterpHandlerSize;
+  ptrdiff_t interp_size = reinterpret_cast<uintptr_t>(artNterpAsmInstructionEnd) -
+                          reinterpret_cast<uintptr_t>(artNterpAsmInstructionStart);
+  if ((interp_size == 0) || (interp_size != (art::kNumPackedOpcodes * width))) {
+      LOG(FATAL) << "ERROR: unexpected asm interp size " << interp_size
+                 << "(did an instruction handler exceed " << width << " bytes?)";
+  }
+}
+
+template<typename T>
+inline void UpdateCache(Thread* self, uint16_t* dex_pc_ptr, T value) {
+  DCHECK(kUseReadBarrier) << "Nterp only works with read barriers";
+  // For simplicity, only update the cache if weak ref accesses are enabled. If
+  // they are disabled, this means the GC is processing the cache, and is
+  // reading it concurrently.
+  if (self->GetWeakRefAccessEnabled()) {
+    self->GetInterpreterCache()->Set(dex_pc_ptr, value);
+  }
+}
+
+template<typename T>
+inline void UpdateCache(Thread* self, uint16_t* dex_pc_ptr, T* value) {
+  UpdateCache(self, dex_pc_ptr, reinterpret_cast<size_t>(value));
+}
+
+extern "C" const dex::CodeItem* NterpGetCodeItem(ArtMethod* method)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  ScopedAssertNoThreadSuspension sants("In nterp");
+  return method->GetCodeItem();
+}
+
+extern "C" const char* NterpGetShorty(ArtMethod* method)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  ScopedAssertNoThreadSuspension sants("In nterp");
+  return method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetShorty();
+}
+
+extern "C" const char* NterpGetShortyFromMethodId(ArtMethod* caller, uint32_t method_index)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  ScopedAssertNoThreadSuspension sants("In nterp");
+  return caller->GetDexFile()->GetMethodShorty(method_index);
+}
+
+extern "C" const char* NterpGetShortyFromInvokePolymorphic(ArtMethod* caller, uint16_t* dex_pc_ptr)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  ScopedAssertNoThreadSuspension sants("In nterp");
+  const Instruction* inst = Instruction::At(dex_pc_ptr);
+  dex::ProtoIndex proto_idx(inst->Opcode() == Instruction::INVOKE_POLYMORPHIC
+      ? inst->VRegH_45cc()
+      : inst->VRegH_4rcc());
+  return caller->GetDexFile()->GetShorty(proto_idx);
+}
+
+extern "C" const char* NterpGetShortyFromInvokeCustom(ArtMethod* caller, uint16_t* dex_pc_ptr)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  ScopedAssertNoThreadSuspension sants("In nterp");
+  const Instruction* inst = Instruction::At(dex_pc_ptr);
+  uint16_t call_site_index = (inst->Opcode() == Instruction::INVOKE_CUSTOM
+      ? inst->VRegB_35c()
+      : inst->VRegB_3rc());
+  const DexFile* dex_file = caller->GetDexFile();
+  dex::ProtoIndex proto_idx = dex_file->GetProtoIndexForCallSite(call_site_index);
+  return dex_file->GetShorty(proto_idx);
+}
+
+extern "C" size_t NterpGetMethod(Thread* self, ArtMethod* caller, uint16_t* dex_pc_ptr)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  const Instruction* inst = Instruction::At(dex_pc_ptr);
+  InvokeType invoke_type = kStatic;
+  uint16_t method_index = 0;
+  switch (inst->Opcode()) {
+    case Instruction::INVOKE_DIRECT: {
+      method_index = inst->VRegB_35c();
+      invoke_type = kDirect;
+      break;
+    }
+
+    case Instruction::INVOKE_INTERFACE: {
+      method_index = inst->VRegB_35c();
+      invoke_type = kInterface;
+      break;
+    }
+
+    case Instruction::INVOKE_STATIC: {
+      method_index = inst->VRegB_35c();
+      invoke_type = kStatic;
+      break;
+    }
+
+    case Instruction::INVOKE_SUPER: {
+      method_index = inst->VRegB_35c();
+      invoke_type = kSuper;
+      break;
+    }
+    case Instruction::INVOKE_VIRTUAL: {
+      method_index = inst->VRegB_35c();
+      invoke_type = kVirtual;
+      break;
+    }
+
+    case Instruction::INVOKE_DIRECT_RANGE: {
+      method_index = inst->VRegB_3rc();
+      invoke_type = kDirect;
+      break;
+    }
+
+    case Instruction::INVOKE_INTERFACE_RANGE: {
+      method_index = inst->VRegB_3rc();
+      invoke_type = kInterface;
+      break;
+    }
+
+    case Instruction::INVOKE_STATIC_RANGE: {
+      method_index = inst->VRegB_3rc();
+      invoke_type = kStatic;
+      break;
+    }
+
+    case Instruction::INVOKE_SUPER_RANGE: {
+      method_index = inst->VRegB_3rc();
+      invoke_type = kSuper;
+      break;
+    }
+
+    case Instruction::INVOKE_VIRTUAL_RANGE: {
+      method_index = inst->VRegB_3rc();
+      invoke_type = kVirtual;
+      break;
+    }
+
+    default:
+      LOG(FATAL) << "Unknown instruction " << inst->Opcode();
+  }
+
+  ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+  ArtMethod* resolved_method = caller->SkipAccessChecks()
+      ? class_linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>(
+            self, method_index, caller, invoke_type)
+      : class_linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>(
+            self, method_index, caller, invoke_type);
+  if (resolved_method == nullptr) {
+    DCHECK(self->IsExceptionPending());
+    return 0;
+  }
+
+  // ResolveMethod returns the method based on the method_id. For super invokes
+  // we must use the executing class's context to find the right method.
+  if (invoke_type == kSuper) {
+    ObjPtr<mirror::Class> executing_class = caller->GetDeclaringClass();
+    ObjPtr<mirror::Class> referenced_class = class_linker->LookupResolvedType(
+        executing_class->GetDexFile().GetMethodId(method_index).class_idx_,
+        executing_class->GetDexCache(),
+        executing_class->GetClassLoader());
+    DCHECK(referenced_class != nullptr);  // We have already resolved a method from this class.
+    if (!referenced_class->IsAssignableFrom(executing_class)) {
+      // We cannot determine the target method.
+      ThrowNoSuchMethodError(invoke_type,
+                             resolved_method->GetDeclaringClass(),
+                             resolved_method->GetName(),
+                             resolved_method->GetSignature());
+      return 0;
+    }
+    if (referenced_class->IsInterface()) {
+      resolved_method = referenced_class->FindVirtualMethodForInterfaceSuper(
+          resolved_method, class_linker->GetImagePointerSize());
+    } else {
+      uint16_t vtable_index = resolved_method->GetMethodIndex();
+      ObjPtr<mirror::Class> super_class = executing_class->GetSuperClass();
+      if (super_class == nullptr ||
+          !super_class->HasVTable() ||
+          vtable_index >= static_cast<uint32_t>(super_class->GetVTableLength())) {
+        // Behavior to agree with that of the verifier.
+        ThrowNoSuchMethodError(invoke_type,
+                               resolved_method->GetDeclaringClass(),
+                               resolved_method->GetName(),
+                               resolved_method->GetSignature());
+        return 0;
+      } else {
+        resolved_method = executing_class->GetSuperClass()->GetVTableEntry(
+            vtable_index, class_linker->GetImagePointerSize());
+      }
+    }
+  }
+
+  if (invoke_type == kInterface) {
+    if (resolved_method->GetDeclaringClass()->IsObjectClass()) {
+      // Don't update the cache and return a value with high bit set to notify the
+      // interpreter it should do a vtable call instead.
+      DCHECK_LT(resolved_method->GetMethodIndex(), 0x10000);
+      return resolved_method->GetMethodIndex() | (1U << 31);
+    } else {
+      DCHECK(resolved_method->GetDeclaringClass()->IsInterface());
+      UpdateCache(self, dex_pc_ptr, resolved_method->GetImtIndex());
+      return resolved_method->GetImtIndex();
+    }
+  } else if (resolved_method->GetDeclaringClass()->IsStringClass()
+             && !resolved_method->IsStatic()
+             && resolved_method->IsConstructor()) {
+    resolved_method = WellKnownClasses::StringInitToStringFactory(resolved_method);
+    // Or the result with 1 to notify to nterp this is a string init method. We
+    // also don't cache the result as we don't want nterp to have its fast path always
+    // check for it, and we expect a lot more regular calls than string init
+    // calls.
+    return reinterpret_cast<size_t>(resolved_method) | 1;
+  } else if (invoke_type == kVirtual) {
+    UpdateCache(self, dex_pc_ptr, resolved_method->GetMethodIndex());
+    return resolved_method->GetMethodIndex();
+  } else {
+    UpdateCache(self, dex_pc_ptr, resolved_method);
+    return reinterpret_cast<size_t>(resolved_method);
+  }
+}
+
+static ArtField* ResolveFieldWithAccessChecks(Thread* self,
+                                              ClassLinker* class_linker,
+                                              uint16_t field_index,
+                                              ArtMethod* caller,
+                                              bool is_static,
+                                              bool is_put)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  if (caller->SkipAccessChecks()) {
+    return class_linker->ResolveField(field_index, caller, is_static);
+  }
+
+  caller = caller->GetInterfaceMethodIfProxy(kRuntimePointerSize);
+
+  StackHandleScope<2> hs(self);
+  Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(caller->GetDexCache()));
+  Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(caller->GetClassLoader()));
+
+  ArtField* resolved_field = class_linker->ResolveFieldJLS(field_index,
+                                                           h_dex_cache,
+                                                           h_class_loader);
+  if (resolved_field == nullptr) {
+    return nullptr;
+  }
+
+  ObjPtr<mirror::Class> fields_class = resolved_field->GetDeclaringClass();
+  if (UNLIKELY(resolved_field->IsStatic() != is_static)) {
+    ThrowIncompatibleClassChangeErrorField(resolved_field, is_static, caller);
+    return nullptr;
+  }
+  ObjPtr<mirror::Class> referring_class = caller->GetDeclaringClass();
+  if (UNLIKELY(!referring_class->CheckResolvedFieldAccess(fields_class,
+                                                          resolved_field,
+                                                          caller->GetDexCache(),
+                                                          field_index))) {
+    return nullptr;
+  }
+  if (UNLIKELY(is_put && resolved_field->IsFinal() && (fields_class != referring_class))) {
+    ThrowIllegalAccessErrorFinalField(caller, resolved_field);
+    return nullptr;
+  }
+  return resolved_field;
+}
+
+extern "C" size_t NterpGetStaticField(Thread* self, ArtMethod* caller, uint16_t* dex_pc_ptr)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  const Instruction* inst = Instruction::At(dex_pc_ptr);
+  uint16_t field_index = inst->VRegB_21c();
+  ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+  ArtField* resolved_field = ResolveFieldWithAccessChecks(
+      self,
+      class_linker,
+      field_index,
+      caller,
+      /* is_static */ true,
+      /* is_put */ IsInstructionSPut(inst->Opcode()));
+
+  if (resolved_field == nullptr) {
+    DCHECK(self->IsExceptionPending());
+    return 0;
+  }
+  if (UNLIKELY(!resolved_field->GetDeclaringClass()->IsVisiblyInitialized())) {
+    StackHandleScope<1> hs(self);
+    Handle<mirror::Class> h_class(hs.NewHandle(resolved_field->GetDeclaringClass()));
+    if (UNLIKELY(!class_linker->EnsureInitialized(
+                      self, h_class, /*can_init_fields=*/ true, /*can_init_parents=*/ true))) {
+      DCHECK(self->IsExceptionPending());
+      return 0;
+    }
+    DCHECK(h_class->IsInitializing());
+  }
+  if (resolved_field->IsVolatile()) {
+    // Or the result with 1 to notify to nterp this is a volatile field. We
+    // also don't cache the result as we don't want nterp to have its fast path always
+    // check for it.
+    return reinterpret_cast<size_t>(resolved_field) | 1;
+  } else {
+    UpdateCache(self, dex_pc_ptr, resolved_field);
+    return reinterpret_cast<size_t>(resolved_field);
+  }
+}
+
+extern "C" uint32_t NterpGetInstanceFieldOffset(Thread* self,
+                                                ArtMethod* caller,
+                                                uint16_t* dex_pc_ptr)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  const Instruction* inst = Instruction::At(dex_pc_ptr);
+  uint16_t field_index = inst->VRegC_22c();
+  ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+  ArtField* resolved_field = ResolveFieldWithAccessChecks(
+      self,
+      class_linker,
+      field_index,
+      caller,
+      /* is_static */ false,
+      /* is_put */ IsInstructionIPut(inst->Opcode()));
+  if (resolved_field == nullptr) {
+    DCHECK(self->IsExceptionPending());
+    return 0;
+  }
+  if (resolved_field->IsVolatile()) {
+    // Don't cache for a volatile field, and return a negative offset as marker
+    // of volatile.
+    return -resolved_field->GetOffset().Uint32Value();
+  }
+  UpdateCache(self, dex_pc_ptr, resolved_field->GetOffset().Uint32Value());
+  return resolved_field->GetOffset().Uint32Value();
+}
+
+extern "C" mirror::Object* NterpGetClassOrAllocateObject(Thread* self,
+                                                         ArtMethod* caller,
+                                                         uint16_t* dex_pc_ptr)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  const Instruction* inst = Instruction::At(dex_pc_ptr);
+  dex::TypeIndex index;
+  switch (inst->Opcode()) {
+    case Instruction::NEW_INSTANCE:
+      index = dex::TypeIndex(inst->VRegB_21c());
+      break;
+    case Instruction::CHECK_CAST:
+      index = dex::TypeIndex(inst->VRegB_21c());
+      break;
+    case Instruction::INSTANCE_OF:
+      index = dex::TypeIndex(inst->VRegC_22c());
+      break;
+    case Instruction::CONST_CLASS:
+      index = dex::TypeIndex(inst->VRegB_21c());
+      break;
+    case Instruction::NEW_ARRAY:
+      index = dex::TypeIndex(inst->VRegC_22c());
+      break;
+    default:
+      LOG(FATAL) << "Unreachable";
+  }
+  ObjPtr<mirror::Class> c =
+      ResolveVerifyAndClinit(index,
+                             caller,
+                             self,
+                             /* can_run_clinit= */ false,
+                             /* verify_access= */ !caller->SkipAccessChecks());
+  if (c == nullptr) {
+    DCHECK(self->IsExceptionPending());
+    return nullptr;
+  }
+
+  if (inst->Opcode() == Instruction::NEW_INSTANCE) {
+    gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
+    if (UNLIKELY(c->IsStringClass())) {
+      // We don't cache the class for strings as we need to special case their
+      // allocation.
+      return mirror::String::AllocEmptyString(self, allocator_type).Ptr();
+    } else {
+      if (!c->IsFinalizable() && c->IsInstantiable()) {
+        // Cache non-finalizable classes for next calls.
+        UpdateCache(self, dex_pc_ptr, c.Ptr());
+      }
+      return AllocObjectFromCode(c, self, allocator_type).Ptr();
+    }
+  } else {
+    // For all other cases, cache the class.
+    UpdateCache(self, dex_pc_ptr, c.Ptr());
+  }
+  return c.Ptr();
+}
+
+extern "C" mirror::Object* NterpLoadObject(Thread* self, ArtMethod* caller, uint16_t* dex_pc_ptr)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  const Instruction* inst = Instruction::At(dex_pc_ptr);
+  ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+  switch (inst->Opcode()) {
+    case Instruction::CONST_STRING:
+    case Instruction::CONST_STRING_JUMBO: {
+      dex::StringIndex string_index(
+          (inst->Opcode() == Instruction::CONST_STRING)
+              ? inst->VRegB_21c()
+              : inst->VRegB_31c());
+      ObjPtr<mirror::String> str = class_linker->ResolveString(string_index, caller);
+      if (str == nullptr) {
+        DCHECK(self->IsExceptionPending());
+        return nullptr;
+      }
+      UpdateCache(self, dex_pc_ptr, str.Ptr());
+      return str.Ptr();
+    }
+    case Instruction::CONST_METHOD_HANDLE: {
+      // Don't cache: we don't expect this to be performance sensitive, and we
+      // don't want the cache to conflict with a performance sensitive entry.
+      return class_linker->ResolveMethodHandle(self, inst->VRegB_21c(), caller).Ptr();
+    }
+    case Instruction::CONST_METHOD_TYPE: {
+      // Don't cache: we don't expect this to be performance sensitive, and we
+      // don't want the cache to conflict with a performance sensitive entry.
+      return class_linker->ResolveMethodType(
+          self, dex::ProtoIndex(inst->VRegB_21c()), caller).Ptr();
+    }
+    default:
+      LOG(FATAL) << "Unreachable";
+  }
+  return nullptr;
+}
+
+extern "C" void NterpUnimplemented() {
+  LOG(FATAL) << "Unimplemented";
+}
+
+static mirror::Object* DoFilledNewArray(Thread* self,
+                                        ArtMethod* caller,
+                                        uint16_t* dex_pc_ptr,
+                                        int32_t* regs,
+                                        bool is_range)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  const Instruction* inst = Instruction::At(dex_pc_ptr);
+  if (kIsDebugBuild) {
+    if (is_range) {
+      DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY_RANGE);
+    } else {
+      DCHECK_EQ(inst->Opcode(), Instruction::FILLED_NEW_ARRAY);
+    }
+  }
+  const int32_t length = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
+  DCHECK_GE(length, 0);
+  if (!is_range) {
+    // Checks FILLED_NEW_ARRAY's length does not exceed 5 arguments.
+    DCHECK_LE(length, 5);
+  }
+  uint16_t type_idx = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
+  ObjPtr<mirror::Class> array_class = ResolveVerifyAndClinit(dex::TypeIndex(type_idx),
+                                                             caller,
+                                                             self,
+                                                             /* can_run_clinit= */ true,
+                                                             /* verify_access= */ false);
+  if (UNLIKELY(array_class == nullptr)) {
+    DCHECK(self->IsExceptionPending());
+    return nullptr;
+  }
+  DCHECK(array_class->IsArrayClass());
+  ObjPtr<mirror::Class> component_class = array_class->GetComponentType();
+  const bool is_primitive_int_component = component_class->IsPrimitiveInt();
+  if (UNLIKELY(component_class->IsPrimitive() && !is_primitive_int_component)) {
+    if (component_class->IsPrimitiveLong() || component_class->IsPrimitiveDouble()) {
+      ThrowRuntimeException("Bad filled array request for type %s",
+                            component_class->PrettyDescriptor().c_str());
+    } else {
+      self->ThrowNewExceptionF(
+          "Ljava/lang/InternalError;",
+          "Found type %s; filled-new-array not implemented for anything but 'int'",
+          component_class->PrettyDescriptor().c_str());
+    }
+    return nullptr;
+  }
+  ObjPtr<mirror::Object> new_array = mirror::Array::Alloc(
+      self,
+      array_class,
+      length,
+      array_class->GetComponentSizeShift(),
+      Runtime::Current()->GetHeap()->GetCurrentAllocator());
+  if (UNLIKELY(new_array == nullptr)) {
+    self->AssertPendingOOMException();
+    return nullptr;
+  }
+  uint32_t arg[Instruction::kMaxVarArgRegs];  // only used in filled-new-array.
+  uint32_t vregC = 0;   // only used in filled-new-array-range.
+  if (is_range) {
+    vregC = inst->VRegC_3rc();
+  } else {
+    inst->GetVarArgs(arg);
+  }
+  for (int32_t i = 0; i < length; ++i) {
+    size_t src_reg = is_range ? vregC + i : arg[i];
+    if (is_primitive_int_component) {
+      new_array->AsIntArray()->SetWithoutChecks</* kTransactionActive= */ false>(i, regs[src_reg]);
+    } else {
+      new_array->AsObjectArray<mirror::Object>()->SetWithoutChecks</* kTransactionActive= */ false>(
+          i, reinterpret_cast<mirror::Object*>(regs[src_reg]));
+    }
+  }
+  return new_array.Ptr();
+}
+
+extern "C" mirror::Object* NterpFilledNewArray(Thread* self,
+                                               ArtMethod* caller,
+                                               int32_t* registers,
+                                               uint16_t* dex_pc_ptr)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  return DoFilledNewArray(self, caller, dex_pc_ptr, registers, /* is_range= */ false);
+}
+
+extern "C" mirror::Object* NterpFilledNewArrayRange(Thread* self,
+                                                    ArtMethod* caller,
+                                                    int32_t* registers,
+                                                    uint16_t* dex_pc_ptr)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  return DoFilledNewArray(self, caller, dex_pc_ptr, registers, /* is_range= */ true);
+}
+
+extern "C" jit::OsrData* NterpHotMethod(ArtMethod* method, uint16_t* dex_pc_ptr, uint32_t* vregs)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  ScopedAssertNoThreadSuspension sants("In nterp");
+  jit::Jit* jit = Runtime::Current()->GetJit();
+  if (jit != nullptr) {
+    // Nterp passes null on entry where we don't want to OSR.
+    if (dex_pc_ptr != nullptr) {
+      // This could be a loop back edge, check if we can OSR.
+      CodeItemInstructionAccessor accessor(method->DexInstructions());
+      uint32_t dex_pc = dex_pc_ptr - accessor.Insns();
+      jit::OsrData* osr_data = jit->PrepareForOsr(
+          method->GetInterfaceMethodIfProxy(kRuntimePointerSize), dex_pc, vregs);
+      if (osr_data != nullptr) {
+        return osr_data;
+      }
+    }
+    jit->EnqueueCompilationFromNterp(method, Thread::Current());
+  }
+  return nullptr;
+}
+
+extern "C" ssize_t MterpDoPackedSwitch(const uint16_t* switchData, int32_t testVal);
+extern "C" ssize_t NterpDoPackedSwitch(const uint16_t* switchData, int32_t testVal)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  return MterpDoPackedSwitch(switchData, testVal);
+}
+
+extern "C" ssize_t MterpDoSparseSwitch(const uint16_t* switchData, int32_t testVal);
+extern "C" ssize_t NterpDoSparseSwitch(const uint16_t* switchData, int32_t testVal)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  return MterpDoSparseSwitch(switchData, testVal);
+}
+
+}  // namespace interpreter
+}  // namespace art
diff --git a/runtime/interpreter/mterp/nterp_stub.cc b/runtime/interpreter/mterp/nterp_stub.cc
new file mode 100644
index 0000000..c1b1ec3
--- /dev/null
+++ b/runtime/interpreter/mterp/nterp_stub.cc
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/locks.h"
+
+/*
+ * Stub definitions for targets without nterp implementations.
+ */
+
+namespace art {
+
+class ArtMethod;
+
+namespace interpreter {
+
+bool IsNterpSupported() {
+  return false;
+}
+
+bool CanRuntimeUseNterp() {
+  return false;
+}
+
+bool CanMethodUseNterp(ArtMethod* method ATTRIBUTE_UNUSED) {
+  return false;
+}
+
+const void* GetNterpEntryPoint() {
+  return nullptr;
+}
+
+void CheckNterpAsmConstants() {
+}
+
+extern "C" void ExecuteNterpImpl() REQUIRES_SHARED(Locks::mutator_lock_) {
+  UNIMPLEMENTED(FATAL);
+}
+
+extern "C" void* artNterpAsmInstructionStart[] = { nullptr };
+extern "C" void* artNterpAsmInstructionEnd[] = { nullptr };
+
+}  // namespace interpreter
+}  // namespace art
diff --git a/runtime/interpreter/mterp/x86/floating_point.S b/runtime/interpreter/mterp/x86/floating_point.S
index bc7c59d..0b3c06c 100644
--- a/runtime/interpreter/mterp/x86/floating_point.S
+++ b/runtime/interpreter/mterp/x86/floating_point.S
@@ -56,10 +56,17 @@
     movzbl  2(rPC), %ecx                    # ecx <- BB
     movzbl  3(rPC), %eax                    # eax <- CC
     GET_VREG_XMM${suff} %xmm0, %ecx         # %xmm0 <- 1st src
+#ifdef MTERP_USE_AVX
+    v${instr}${suff} VREG_ADDRESS(%eax), %xmm0, %xmm0
+    SET_VREG_XMM${suff} %xmm0, rINST        # vAA <- %xmm0
+    vpxor    %xmm0, %xmm0, %xmm0
+    vmovs${suff}   %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
+#else
     ${instr}${suff} VREG_ADDRESS(%eax), %xmm0
     SET_VREG_XMM${suff} %xmm0, rINST        # vAA <- %xmm0
     pxor    %xmm0, %xmm0
     movs${suff}   %xmm0, VREG_REF_ADDRESS(rINST) # clear ref
+#endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 %def sseBinop2Addr(instr="", suff=""):
@@ -67,10 +74,17 @@
     andl    $$0xf, %ecx                     # ecx <- A
     GET_VREG_XMM${suff} %xmm0, %ecx         # %xmm0 <- 1st src
     sarl    $$4, rINST                      # rINST<- B
+#ifdef MTERP_USE_AVX
+    v${instr}${suff} VREG_ADDRESS(rINST), %xmm0, %xmm0
+    SET_VREG_XMM${suff} %xmm0, %ecx         # vAA<- %xmm0
+    vpxor    %xmm0, %xmm0, %xmm0
+    vmovs${suff} %xmm0, VREG_REF_ADDRESS(rINST)  # clear ref
+#else
     ${instr}${suff} VREG_ADDRESS(rINST), %xmm0
     SET_VREG_XMM${suff} %xmm0, %ecx         # vAA<- %xmm0
     pxor    %xmm0, %xmm0
     movs${suff} %xmm0, VREG_REF_ADDRESS(rINST)  # clear ref
+#endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
 %def op_add_double():
diff --git a/runtime/interpreter/mterp/x86_64/arithmetic.S b/runtime/interpreter/mterp/x86_64/arithmetic.S
index ff64b53..0ef7a83 100644
--- a/runtime/interpreter/mterp/x86_64/arithmetic.S
+++ b/runtime/interpreter/mterp/x86_64/arithmetic.S
@@ -1,4 +1,4 @@
-%def bindiv(result="", second="", wide="", suffix="", rem="0", ext="cdq"):
+%def bindiv(result="", second="", tmp="", wide="", suffix="", rem="0", ext="cdq"):
 /*
  * 32-bit binary div/rem operation.  Handles special case of op1=-1.
  */
@@ -16,6 +16,8 @@
     jz      common_errDivideByZero
     cmp${suffix}  $$-1, $second
     je      2f
+    cmp${suffix}  $$2, $second
+    je 3f
     $ext                                    # rdx:rax <- sign-extended of rax
     idiv${suffix}   $second
 1:
@@ -32,8 +34,31 @@
     neg${suffix} $result
     .endif
     jmp     1b
+3:
+    .if $rem
+    mov${suffix} $tmp, $result
+    .if $wide
+    shr${suffix} $$63, $result
+    .else
+    shr${suffix} $$31, $result
+    .endif
+    add${suffix} $tmp, $result
+    and${suffix} $$-2, $result
+    sub${suffix} $result, $tmp
+    mov${suffix} $tmp, $result
+    .else
+    mov${suffix} $result, $tmp
+    .if $wide
+    shr${suffix} $$63, $tmp
+    .else
+    shr${suffix} $$31, $tmp
+    .endif
+    add${suffix} $tmp, $result
+    sar${suffix} $result
+    .endif
+    jmp     1b
 
-%def bindiv2addr(result="", second="", wide="", suffix="", rem="0", ext="cdq"):
+%def bindiv2addr(result="", second="", tmp="", wide="", suffix="", rem="0", ext="cdq"):
 /*
  * 32-bit binary div/rem operation.  Handles special case of op1=-1.
  */
@@ -52,6 +77,8 @@
     jz      common_errDivideByZero
     cmp${suffix}  $$-1, $second
     je      2f
+    cmp${suffix}  $$2, $second
+    je      3f
     $ext                                    # rdx:rax <- sign-extended of rax
     idiv${suffix}   $second
 1:
@@ -68,6 +95,29 @@
     neg${suffix} $result
     .endif
     jmp     1b
+3:
+    .if $rem
+    mov${suffix} $tmp, $result
+    .if $wide
+    shr${suffix} $$63, $result
+    .else
+    shr${suffix} $$31, $result
+    .endif
+    add${suffix} $tmp, $result
+    and${suffix} $$-2, $result
+    sub${suffix} $result, $tmp
+    mov${suffix} $tmp, $result
+    .else
+    mov${suffix} $result, $tmp
+    .if $wide
+    shr${suffix} $$63, $tmp
+    .else
+    shr${suffix} $$31, $tmp
+    .endif
+    add${suffix} $tmp, $result
+    sar${suffix} $result
+    .endif
+    jmp     1b
 
 %def bindivLit16(result="", rem="0"):
 /*
@@ -372,10 +422,10 @@
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 %def op_div_int():
-%  bindiv(result="%eax", second="%ecx", wide="0", suffix="l")
+%  bindiv(result="%eax", second="%ecx", tmp="%edx", wide="0", suffix="l")
 
 %def op_div_int_2addr():
-%  bindiv2addr(result="%eax", second="%ecx", wide="0", suffix="l")
+%  bindiv2addr(result="%eax", second="%ecx", tmp="%edx", wide="0", suffix="l")
 
 %def op_div_int_lit16():
 %  bindivLit16(result="%eax")
@@ -384,10 +434,10 @@
 %  bindivLit8(result="%eax")
 
 %def op_div_long():
-%  bindiv(result="%rax", second="%rcx", wide="1", suffix="q", ext="cqo")
+%  bindiv(result="%rax", second="%rcx", tmp="%rdx", wide="1", suffix="q", ext="cqo")
 
 %def op_div_long_2addr():
-%  bindiv2addr(result="%rax", second="%rcx", wide="1", suffix="q", ext="cqo")
+%  bindiv2addr(result="%rax", second="%rcx", tmp="%rdx", wide="1", suffix="q", ext="cqo")
 
 %def op_int_to_byte():
 %  unop(instr="movsbl  %al, %eax")
@@ -475,10 +525,10 @@
 %  binopWide2addr(instr="orq")
 
 %def op_rem_int():
-%  bindiv(result="%edx", second="%ecx", wide="0", suffix="l", rem="1")
+%  bindiv(result="%edx", second="%ecx", tmp="%eax", wide="0", suffix="l", rem="1")
 
 %def op_rem_int_2addr():
-%  bindiv2addr(result="%edx", second="%ecx", wide="0", suffix="l", rem="1")
+%  bindiv2addr(result="%edx", second="%ecx", tmp="%eax", wide="0", suffix="l", rem="1")
 
 %def op_rem_int_lit16():
 %  bindivLit16(result="%edx", rem="1")
@@ -487,10 +537,10 @@
 %  bindivLit8(result="%edx", rem="1")
 
 %def op_rem_long():
-%  bindiv(result="%rdx", second="%rcx", wide="1", suffix="q", ext="cqo", rem="1")
+%  bindiv(result="%rdx", second="%rcx", tmp="%rax", wide="1", suffix="q", ext="cqo", rem="1")
 
 %def op_rem_long_2addr():
-%  bindiv2addr(result="%rdx", second="%rcx", wide="1", suffix="q", rem="1", ext="cqo")
+%  bindiv2addr(result="%rdx", second="%rcx", tmp="%rax", wide="1", suffix="q", rem="1", ext="cqo")
 
 %def op_rsub_int():
 /* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */
diff --git a/runtime/interpreter/mterp/x86_64/floating_point.S b/runtime/interpreter/mterp/x86_64/floating_point.S
index 7fcb742..599b3f4 100644
--- a/runtime/interpreter/mterp/x86_64/floating_point.S
+++ b/runtime/interpreter/mterp/x86_64/floating_point.S
@@ -56,10 +56,17 @@
     movzbq  2(rPC), %rcx                    # ecx <- BB
     movzbq  3(rPC), %rax                    # eax <- CC
     GET_VREG_XMM${suff} %xmm0, %rcx         # %xmm0 <- 1st src
+#ifdef MTERP_USE_AVX
+    v${instr}${suff} VREG_ADDRESS(%rax), %xmm0, %xmm0
+    SET_VREG_XMM${suff} %xmm0, rINSTq       # vAA <- %xmm0
+    vpxor    %xmm0, %xmm0, %xmm0
+    vmovs${suff}   %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+#else
     ${instr}${suff} VREG_ADDRESS(%rax), %xmm0
     SET_VREG_XMM${suff} %xmm0, rINSTq       # vAA <- %xmm0
     pxor    %xmm0, %xmm0
     movs${suff}   %xmm0, VREG_REF_ADDRESS(rINSTq) # clear ref
+#endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
 
 %def sseBinop2Addr(instr="", suff=""):
@@ -67,10 +74,17 @@
     andl    $$0xf, %ecx                     # ecx <- A
     GET_VREG_XMM${suff} %xmm0, %rcx         # %xmm0 <- 1st src
     sarl    $$4, rINST                      # rINST<- B
+#ifdef MTERP_USE_AVX
+    v${instr}${suff} VREG_ADDRESS(rINSTq), %xmm0, %xmm0
+    SET_VREG_XMM${suff} %xmm0, %rcx         # vAA <- %xmm0
+    vpxor    %xmm0, %xmm0, %xmm0
+    vmovs${suff} %xmm0, VREG_REF_ADDRESS(rINSTq)  # clear ref
+#else
     ${instr}${suff} VREG_ADDRESS(rINSTq), %xmm0
     SET_VREG_XMM${suff} %xmm0, %rcx         # vAA <- %xmm0
     pxor    %xmm0, %xmm0
     movs${suff} %xmm0, VREG_REF_ADDRESS(rINSTq)  # clear ref
+#endif
     ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
 
 %def op_add_double():
diff --git a/runtime/interpreter/mterp/x86_64ng/array.S b/runtime/interpreter/mterp/x86_64ng/array.S
new file mode 100644
index 0000000..baf5f30
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64ng/array.S
@@ -0,0 +1,151 @@
+%def op_aget(load="movl", shift="4", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET", wide="0", is_object="0"):
+/*
+ * Array get.  vAA <- vBB[vCC].
+ *
+ * for: aget, aget-boolean, aget-byte, aget-char, aget-short, aget-wide, aget-object
+ *
+ */
+    /* op vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # eax <- BB
+    movzbq  3(rPC), %rcx                    # ecx <- CC
+    GET_VREG %edi, %rax                     # eax <- vBB (array object)
+    GET_VREG %esi, %rcx                     # ecx <- vCC (requested index)
+    testl   %edi, %edi                      # null array object?
+    je      common_errNullObject            # bail if so
+    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%edi), %esi
+    jae     common_errArrayIndex            # index >= length, bail.
+    .if $wide
+    movq    $data_offset(%rdi,%rsi,8), %rax
+    SET_WIDE_VREG %rax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+    .elseif $is_object
+    testb $$READ_BARRIER_TEST_VALUE, GRAY_BYTE_OFFSET(%edi)
+    $load   $data_offset(%rdi,%rsi,$shift), %eax
+    jnz 2f
+1:
+    SET_VREG_OBJECT %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+    // reg00 is eax
+    call art_quick_read_barrier_mark_reg00
+    jmp 1b
+    .else
+    $load   $data_offset(%rdi,%rsi,$shift), %eax
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+    .endif
+
+%def op_aget_boolean():
+%  op_aget(load="movzbl", shift="1", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET", is_object="0")
+
+%def op_aget_byte():
+%  op_aget(load="movsbl", shift="1", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET", is_object="0")
+
+%def op_aget_char():
+%  op_aget(load="movzwl", shift="2", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET", is_object="0")
+
+%def op_aget_object():
+%  op_aget(load="movl", shift="4", data_offset="MIRROR_OBJECT_ARRAY_DATA_OFFSET", is_object="1")
+
+%def op_aget_short():
+%  op_aget(load="movswl", shift="2", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET", is_object="0")
+
+%def op_aget_wide():
+%  op_aget(load="movq", shift="8", data_offset="MIRROR_WIDE_ARRAY_DATA_OFFSET", wide="1", is_object="0")
+
+%def op_aput(rINST_reg="rINST", store="movl", shift="4", data_offset="MIRROR_INT_ARRAY_DATA_OFFSET", wide="0"):
+/*
+ * Array put.  vBB[vCC] <- vAA.
+ *
+ * for: aput, aput-boolean, aput-byte, aput-char, aput-short, aput-wide
+ *
+ */
+    /* op vAA, vBB, vCC */
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movzbq  3(rPC), %rcx                    # rcx <- CC
+    GET_VREG %edi, %rax                     # edi <- vBB (array object)
+    GET_VREG %esi, %rcx                     # esi <- vCC (requested index)
+    testl   %edi, %edi                      # null array object?
+    je      common_errNullObject            # bail if so
+    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%edi), %esi
+    jae     common_errArrayIndex            # index >= length, bail.
+    .if $wide
+    GET_WIDE_VREG rINSTq, rINSTq
+    .else
+    GET_VREG rINST, rINSTq
+    .endif
+    $store    $rINST_reg, $data_offset(%rdi,%rsi,$shift)
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_aput_boolean():
+%  op_aput(rINST_reg="rINSTbl", store="movb", shift="1", data_offset="MIRROR_BOOLEAN_ARRAY_DATA_OFFSET", wide="0")
+
+%def op_aput_byte():
+%  op_aput(rINST_reg="rINSTbl", store="movb", shift="1", data_offset="MIRROR_BYTE_ARRAY_DATA_OFFSET", wide="0")
+
+%def op_aput_char():
+%  op_aput(rINST_reg="rINSTw", store="movw", shift="2", data_offset="MIRROR_CHAR_ARRAY_DATA_OFFSET", wide="0")
+
+%def op_aput_short():
+%  op_aput(rINST_reg="rINSTw", store="movw", shift="2", data_offset="MIRROR_SHORT_ARRAY_DATA_OFFSET", wide="0")
+
+%def op_aput_wide():
+%  op_aput(rINST_reg="rINSTq", store="movq", shift="8", data_offset="MIRROR_WIDE_ARRAY_DATA_OFFSET", wide="1")
+
+%def op_aput_object():
+    movzbq  2(rPC), %rax                    # rax <- BB
+    movzbq  3(rPC), %rcx                    # rcx <- CC
+    GET_VREG %edi, %rax                     # edi <- vBB (array object)
+    GET_VREG %esi, %rcx                     # esi <- vCC (requested index)
+    testl   %edi, %edi                      # null array object?
+    je      common_errNullObject            # bail if so
+    cmpl    MIRROR_ARRAY_LENGTH_OFFSET(%edi), %esi
+    jae     common_errArrayIndex            # index >= length, bail.
+    GET_VREG %edx, rINSTq
+    call art_quick_aput_obj
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_array_length():
+/*
+ * Return the length of an array.
+ */
+    movl    rINST, %eax                     # eax <- BA
+    sarl    $$4, rINST                      # rINST <- B
+    GET_VREG %ecx, rINSTq                   # ecx <- vB (object ref)
+    testl   %ecx, %ecx                      # is null?
+    je      common_errNullObject
+    andb    $$0xf, %al                      # eax <- A
+    movl    MIRROR_ARRAY_LENGTH_OFFSET(%rcx), rINST
+    SET_VREG rINST, %rax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_fill_array_data():
+    /* fill-array-data vAA, +BBBBBBBB */
+    EXPORT_PC
+    movslq  2(rPC), %rcx                    # rcx <- ssssssssBBBBbbbb
+    leaq    (rPC,%rcx,2), OUT_ARG0          # OUT_ARG0 <- PC + ssssssssBBBBbbbb*2
+    GET_VREG OUT_32_ARG1, rINSTq            # OUT_ARG1 <- vAA (array object)
+    call    art_quick_handle_fill_data
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_filled_new_array(helper="nterp_filled_new_array"):
+/*
+ * Create a new array with elements filled from registers.
+ *
+ * for: filled-new-array, filled-new-array/range
+ */
+    /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
+    /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
+    EXPORT_PC
+    movq    rSELF:THREAD_SELF_OFFSET, OUT_ARG0
+    movq    (%rsp), OUT_ARG1
+    movq    rFP, OUT_ARG2
+    movq    rPC, OUT_ARG3
+    call    SYMBOL($helper)
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_filled_new_array_range():
+%  op_filled_new_array(helper="nterp_filled_new_array_range")
+
+%def op_new_array():
+  jmp NterpNewArray
diff --git a/runtime/interpreter/mterp/x86_64ng/control_flow.S b/runtime/interpreter/mterp/x86_64ng/control_flow.S
new file mode 100644
index 0000000..35276d4
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64ng/control_flow.S
@@ -0,0 +1,179 @@
+%def bincmp(revcmp=""):
+/*
+ * Generic two-operand compare-and-branch operation.  Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
+ */
+    /* if-cmp vA, vB, +CCCC */
+    movl    rINST, %ecx                     # rcx <- A+
+    sarl    $$4, rINST                      # rINST <- B
+    andb    $$0xf, %cl                      # rcx <- A
+    GET_VREG %eax, %rcx                     # eax <- vA
+    cmpl    VREG_ADDRESS(rINSTq), %eax      # compare (vA, vB)
+    j${revcmp}   1f
+    movswq  2(rPC), rINSTq                  # Get signed branch offset
+    BRANCH
+1:
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def zcmp(revcmp=""):
+/*
+ * Generic one-operand compare-and-branch operation.  Provide a "revcmp"
+ * fragment that specifies the *reverse* comparison to perform, e.g.
+ * for "if-le" you would use "gt".
+ *
+ * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez
+ */
+    /* if-cmp vAA, +BBBB */
+    cmpl    $$0, VREG_ADDRESS(rINSTq)       # compare (vA, 0)
+    j${revcmp}   1f
+    movswq  2(rPC), rINSTq                  # fetch signed displacement
+    BRANCH
+1:
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_goto():
+/*
+ * Unconditional branch, 8-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+    /* goto +AA */
+    movsbq  rINSTbl, rINSTq                 # rINSTq <- ssssssAA
+    BRANCH
+
+%def op_goto_16():
+/*
+ * Unconditional branch, 16-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ */
+    /* goto/16 +AAAA */
+    movswq  2(rPC), rINSTq                  # rINSTq <- ssssAAAA
+    BRANCH
+
+%def op_goto_32():
+/*
+ * Unconditional branch, 32-bit offset.
+ *
+ * The branch distance is a signed code-unit offset, which we need to
+ * double to get a byte offset.
+ *
+ * Because we need the SF bit set, we'll use an adds
+ * to convert from Dalvik offset to byte offset.
+ */
+    /* goto/32 +AAAAAAAA */
+    movslq  2(rPC), rINSTq                  # rINSTq <- AAAAAAAA
+    BRANCH
+
+%def op_if_eq():
+%  bincmp(revcmp="ne")
+
+%def op_if_eqz():
+%  zcmp(revcmp="ne")
+
+%def op_if_ge():
+%  bincmp(revcmp="l")
+
+%def op_if_gez():
+%  zcmp(revcmp="l")
+
+%def op_if_gt():
+%  bincmp(revcmp="le")
+
+%def op_if_gtz():
+%  zcmp(revcmp="le")
+
+%def op_if_le():
+%  bincmp(revcmp="g")
+
+%def op_if_lez():
+%  zcmp(revcmp="g")
+
+%def op_if_lt():
+%  bincmp(revcmp="ge")
+
+%def op_if_ltz():
+%  zcmp(revcmp="ge")
+
+%def op_if_ne():
+%  bincmp(revcmp="e")
+
+%def op_if_nez():
+%  zcmp(revcmp="e")
+
+%def op_packed_switch(func="NterpDoPackedSwitch"):
+/*
+ * Handle a packed-switch or sparse-switch instruction.  In both cases
+ * we decode it and hand it off to a helper function.
+ *
+ * We don't really expect backward branches in a switch statement, but
+ * they're perfectly legal, so we check for them here.
+ *
+ * for: packed-switch, sparse-switch
+ */
+    /* op vAA, +BBBB */
+    movslq  2(rPC), OUT_ARG0                # rcx <- ssssssssBBBBbbbb
+    leaq    (rPC,OUT_ARG0,2), OUT_ARG0      # rcx <- PC + ssssssssBBBBbbbb*2
+    GET_VREG OUT_32_ARG1, rINSTq            # eax <- vAA
+    call    SYMBOL($func)
+    movslq  %eax, rINSTq
+    BRANCH
+
+/*
+ * Return a 32-bit value.
+ */
+%def op_return(is_object="0"):
+    GET_VREG %eax, rINSTq                   # eax <- vAA
+    .if !$is_object
+    // In case we're going back to compiled code, put the
+    // result also in a xmm register.
+    movd %eax, %xmm0
+    .endif
+    CFI_REMEMBER_STATE
+    movq -8(rREFS), %rsp
+    CFI_DEF_CFA(rsp, CALLEE_SAVES_SIZE)
+    RESTORE_ALL_CALLEE_SAVES
+    ret
+    CFI_RESTORE_STATE
+
+%def op_return_object():
+%  op_return(is_object="1")
+
+%def op_return_void():
+    // Thread fence for constructor is a no-op on x86_64.
+    CFI_REMEMBER_STATE
+    movq -8(rREFS), %rsp
+    CFI_DEF_CFA(rsp, CALLEE_SAVES_SIZE)
+    RESTORE_ALL_CALLEE_SAVES
+    ret
+    CFI_RESTORE_STATE
+
+%def op_return_void_no_barrier():
+%  op_return_void()
+
+%def op_return_wide():
+    GET_WIDE_VREG %rax, rINSTq   # eax <- vAA
+    // In case we're going back to compiled code, put the
+    // result also in a xmm register.
+    movq    %rax, %xmm0
+    CFI_REMEMBER_STATE
+    movq    -8(rREFS), %rsp
+    CFI_DEF_CFA(rsp, CALLEE_SAVES_SIZE)
+    RESTORE_ALL_CALLEE_SAVES
+    ret
+    CFI_RESTORE_STATE
+
+%def op_sparse_switch():
+%  op_packed_switch(func="NterpDoSparseSwitch")
+
+%def op_throw():
+  EXPORT_PC
+  GET_VREG %edi, rINSTq                   # edi<- vAA (exception object)
+  movq rSELF:THREAD_SELF_OFFSET, %rsi
+  call SYMBOL(art_quick_deliver_exception)
+  int3
diff --git a/runtime/interpreter/mterp/x86_64ng/invoke.S b/runtime/interpreter/mterp/x86_64ng/invoke.S
new file mode 100644
index 0000000..dba1caa
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64ng/invoke.S
@@ -0,0 +1,181 @@
+%def invoke(helper="NterpUnimplemented"):
+    call    SYMBOL($helper)
+
+%def op_invoke_custom():
+   EXPORT_PC
+   movzwl 2(rPC), %edi // call_site index, first argument of runtime call.
+   jmp NterpCommonInvokeCustom
+
+%def op_invoke_custom_range():
+   EXPORT_PC
+   movzwl 2(rPC), %edi // call_site index, first argument of runtime call.
+   jmp NterpCommonInvokeCustomRange
+
+%def invoke_direct_or_super(helper="", range=""):
+   EXPORT_PC
+   // Fast-path which gets the method from thread-local cache.
+   FETCH_FROM_THREAD_CACHE %rdi, 2f
+1:
+   // Load the first argument (the 'this' pointer).
+   movzwl 4(rPC), %r11d // arguments
+   .if !$range
+   andq $$0xf, %r11
+   .endif
+   movl (rFP, %r11, 4), %esi
+   // NullPointerException check.
+   movl (%esi), %eax
+   jmp $helper
+2:
+   movq rSELF:THREAD_SELF_OFFSET, %rdi
+   movq 0(%rsp), %rsi
+   movq rPC, %rdx
+   call nterp_get_method
+   movq %rax, %rdi
+   testl MACRO_LITERAL(1), %eax
+   je 1b
+   andq $$-2, %rdi  // Remove the extra bit that marks it's a String.<init> method.
+   .if $range
+   jmp NterpHandleStringInitRange
+   .else
+   jmp NterpHandleStringInit
+   .endif
+
+%def op_invoke_direct():
+%  invoke_direct_or_super(helper="NterpCommonInvokeInstance", range="0")
+
+%def op_invoke_direct_range():
+%  invoke_direct_or_super(helper="NterpCommonInvokeInstanceRange", range="1")
+
+%def op_invoke_polymorphic():
+   EXPORT_PC
+   // No need to fetch the target method.
+   // Load the first argument (the 'this' pointer).
+   movzwl 4(rPC), %r11d // arguments
+   andq $$0xf, %r11
+   movl (rFP, %r11, 4), %esi
+   // NullPointerException check.
+   movl (%esi), %eax
+   jmp NterpCommonInvokePolymorphic
+
+%def op_invoke_polymorphic_range():
+   EXPORT_PC
+   // No need to fetch the target method.
+   // Load the first argument (the 'this' pointer).
+   movzwl 4(rPC), %r11d // arguments
+   movl (rFP, %r11, 4), %esi
+   // NullPointerException check.
+   movl (%esi), %eax
+   jmp NterpCommonInvokePolymorphicRange
+
+%def invoke_interface(helper="", range=""):
+   EXPORT_PC
+   // Fast-path which gets the method from thread-local cache.
+   FETCH_FROM_THREAD_CACHE %rax, 2f
+1:
+   // First argument is the 'this' pointer.
+   movzwl 4(rPC), %r11d // arguments
+   .if !$range
+   andq $$0xf, %r11
+   .endif
+   movl (rFP, %r11, 4), %esi
+   movl MIRROR_OBJECT_CLASS_OFFSET(%esi), %edx
+   movq MIRROR_CLASS_IMT_PTR_OFFSET_64(%edx), %rdx
+   movq (%rdx, %rax, 8), %rdi
+   jmp $helper
+2:
+   movq rSELF:THREAD_SELF_OFFSET, %rdi
+   movq 0(%rsp), %rsi
+   movq rPC, %rdx
+   call nterp_get_method
+   testl %eax, %eax
+   jns 1b
+   // For j.l.Object interface calls, the high bit is set. Also the method index is 16bits.
+   andl LITERAL(0xffff), %eax
+   .if $range
+   jmp NterpHandleInvokeInterfaceOnObjectMethodRange
+   .else
+   jmp NterpHandleInvokeInterfaceOnObjectMethod
+   .endif
+
+%def op_invoke_interface():
+%  invoke_interface(helper="NterpCommonInvokeInterface", range="0")
+
+%def op_invoke_interface_range():
+%  invoke_interface(helper="NterpCommonInvokeInterfaceRange", range="1")
+
+%def invoke_static(helper=""):
+   EXPORT_PC
+   // Fast-path which gets the method from thread-local cache.
+   FETCH_FROM_THREAD_CACHE %rdi, 1f
+   jmp $helper
+1:
+   movq rSELF:THREAD_SELF_OFFSET, %rdi
+   movq 0(%rsp), %rsi
+   movq rPC, %rdx
+   call nterp_get_method
+   movq %rax, %rdi
+   jmp $helper
+
+%def op_invoke_static():
+%  invoke_static(helper="NterpCommonInvokeStatic")
+
+%def op_invoke_static_range():
+%  invoke_static(helper="NterpCommonInvokeStaticRange")
+
+%def op_invoke_super():
+%  invoke_direct_or_super(helper="NterpCommonInvokeInstance", range="0")
+
+%def op_invoke_super_range():
+%  invoke_direct_or_super(helper="NterpCommonInvokeInstanceRange", range="1")
+
+%def invoke_virtual(helper="", range=""):
+   EXPORT_PC
+   // Fast-path which gets the method from thread-local cache.
+   FETCH_FROM_THREAD_CACHE %rdi, 2f
+1:
+   // First argument is the 'this' pointer.
+   movzwl 4(rPC), %r11d // arguments
+   .if !$range
+   andq $$0xf, %r11
+   .endif
+   movl (rFP, %r11, 4), %esi
+   // Note: if esi is null, this will be handled by our SIGSEGV handler.
+   movl MIRROR_OBJECT_CLASS_OFFSET(%esi), %edx
+   movq MIRROR_CLASS_VTABLE_OFFSET_64(%edx, %edi, 8), %rdi
+   jmp $helper
+2:
+   movq rSELF:THREAD_SELF_OFFSET, %rdi
+   movq 0(%rsp), %rsi
+   movq rPC, %rdx
+   call nterp_get_method
+   movl %eax, %edi
+   jmp 1b
+
+%def op_invoke_virtual():
+%  invoke_virtual(helper="NterpCommonInvokeInstance", range="0")
+
+%def op_invoke_virtual_quick():
+   EXPORT_PC
+   movzwl 2(rPC), %eax // offset
+   // First argument is the 'this' pointer.
+   movzwl 4(rPC), %r11d // arguments
+   andq $$0xf, %r11
+   movl (rFP, %r11, 4), %esi
+   // Note: if esi is null, this will be handled by our SIGSEGV handler.
+   movl MIRROR_OBJECT_CLASS_OFFSET(%esi), %edx
+   movq MIRROR_CLASS_VTABLE_OFFSET_64(%edx, %eax, 8), %rdi
+   jmp NterpCommonInvokeInstance
+
+%def op_invoke_virtual_range():
+%  invoke_virtual(helper="NterpCommonInvokeInstanceRange", range="1")
+
+%def op_invoke_virtual_range_quick():
+   EXPORT_PC
+   movzwl 2(rPC), %eax // offset
+   // First argument is the 'this' pointer.
+   movzwl 4(rPC), %r11d // arguments
+   movl (rFP, %r11, 4), %esi
+   // Note: if esi is null, this will be handled by our SIGSEGV handler.
+   movl MIRROR_OBJECT_CLASS_OFFSET(%esi), %edx
+   movq MIRROR_CLASS_VTABLE_OFFSET_64(%edx, %eax, 8), %rdi
+   jmp NterpCommonInvokeInstanceRange
diff --git a/runtime/interpreter/mterp/x86_64ng/main.S b/runtime/interpreter/mterp/x86_64ng/main.S
new file mode 100644
index 0000000..4eaf95e
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64ng/main.S
@@ -0,0 +1,2018 @@
+%def header():
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This is a #include, not a %include, because we want the C pre-processor
+ * to expand the macros into assembler assignment statements.
+ */
+#include "asm_support.h"
+#include "arch/x86_64/asm_support_x86_64.S"
+#include "interpreter/cfi_asm_support.h"
+
+/**
+ * x86_64 ABI general notes:
+ *
+ * Caller save set:
+ *    rax, rdx, rcx, rsi, rdi, r8-r11, st(0)-st(7)
+ * Callee save set:
+ *    rbx, rbp, r12-r15
+ * Return regs:
+ *    32-bit in eax
+ *    64-bit in rax
+ *    fp on xmm0
+ *
+ * First 8 fp parameters came in xmm0-xmm7.
+ * First 6 non-fp parameters came in rdi, rsi, rdx, rcx, r8, r9.
+ * Other parameters passed on stack, pushed right-to-left.  On entry to target, first
+ * param is at 8(%esp).
+ *
+ * Stack must be 16-byte aligned to support SSE in native code.
+ */
+
+#define IN_ARG3        %rcx
+#define IN_ARG2        %rdx
+#define IN_ARG1        %rsi
+#define IN_ARG0        %rdi
+/* Out Args  */
+#define OUT_ARG3       %rcx
+#define OUT_ARG2       %rdx
+#define OUT_ARG1       %rsi
+#define OUT_ARG0       %rdi
+#define OUT_32_ARG3    %ecx
+#define OUT_32_ARG2    %edx
+#define OUT_32_ARG1    %esi
+#define OUT_32_ARG0    %edi
+#define OUT_FP_ARG1    %xmm1
+#define OUT_FP_ARG0    %xmm0
+
+/*
+ * single-purpose registers, given names for clarity
+ */
+#define rSELF    %gs
+#define rPC      %r12
+#define CFI_DEX  12 // DWARF register number of the register holding dex-pc (rPC).
+#define CFI_TMP  5  // DWARF register number of the first argument register (rdi).
+#define rFP      %r13
+#define rINST    %ebx
+#define rINSTq   %rbx
+#define rINSTw   %bx
+#define rINSTbh  %bh
+#define rINSTbl  %bl
+#define rIBASE   %r14
+#define rREFS    %r15
+#define CFI_REFS 15 // DWARF register number of the reference array (r15).
+
+// Temporary registers while setting up a frame.
+#define rNEW_FP   %r8
+#define rNEW_REFS %r9
+#define CFI_NEW_REFS 9
+
+/*
+ * Get/set the 32-bit value from a Dalvik register.
+ */
+#define VREG_ADDRESS(_vreg) (rFP,_vreg,4)
+#define VREG_HIGH_ADDRESS(_vreg) 4(rFP,_vreg,4)
+#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4)
+#define VREG_REF_HIGH_ADDRESS(_vreg) 4(rREFS,_vreg,4)
+
+// Includes the return address implictly pushed on stack by 'call'.
+#define CALLEE_SAVES_SIZE (6 * 8 + 4 * 8 + 1 * 8)
+
+// +8 for the ArtMethod of the caller.
+#define OFFSET_TO_FIRST_ARGUMENT_IN_STACK (CALLEE_SAVES_SIZE + 8)
+
+/*
+ * Refresh rINST.
+ * At enter to handler rINST does not contain the opcode number.
+ * However some utilities require the full value, so this macro
+ * restores the opcode number.
+ */
+.macro REFRESH_INST _opnum
+    movb    rINSTbl, rINSTbh
+    movb    $$\_opnum, rINSTbl
+.endm
+
+/*
+ * Fetch the next instruction from rPC into rINSTw.  Does not advance rPC.
+ */
+.macro FETCH_INST
+    movzwq  (rPC), rINSTq
+.endm
+
+/*
+ * Remove opcode from rINST, compute the address of handler and jump to it.
+ */
+.macro GOTO_NEXT
+    movzx   rINSTbl,%ecx
+    movzbl  rINSTbh,rINST
+    shll    MACRO_LITERAL(${handler_size_bits}), %ecx
+    addq    rIBASE, %rcx
+    jmp     *%rcx
+.endm
+
+/*
+ * Advance rPC by instruction count.
+ */
+.macro ADVANCE_PC _count
+    leaq    2*\_count(rPC), rPC
+.endm
+
+/*
+ * Advance rPC by instruction count, fetch instruction and jump to handler.
+ */
+.macro ADVANCE_PC_FETCH_AND_GOTO_NEXT _count
+    ADVANCE_PC \_count
+    FETCH_INST
+    GOTO_NEXT
+.endm
+
+.macro GET_VREG _reg _vreg
+    movl    VREG_ADDRESS(\_vreg), \_reg
+.endm
+
+.macro GET_VREG_OBJECT _reg _vreg
+    movl    VREG_REF_ADDRESS(\_vreg), \_reg
+.endm
+
+/* Read wide value. */
+.macro GET_WIDE_VREG _reg _vreg
+    movq    VREG_ADDRESS(\_vreg), \_reg
+.endm
+
+.macro SET_VREG _reg _vreg
+    movl    \_reg, VREG_ADDRESS(\_vreg)
+    movl    MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
+.endm
+
+/* Write wide value. reg is clobbered. */
+.macro SET_WIDE_VREG _reg _vreg
+    movq    \_reg, VREG_ADDRESS(\_vreg)
+    xorq    \_reg, \_reg
+    movq    \_reg, VREG_REF_ADDRESS(\_vreg)
+.endm
+
+.macro SET_VREG_OBJECT _reg _vreg
+    movl    \_reg, VREG_ADDRESS(\_vreg)
+    movl    \_reg, VREG_REF_ADDRESS(\_vreg)
+.endm
+
+.macro GET_VREG_HIGH _reg _vreg
+    movl    VREG_HIGH_ADDRESS(\_vreg), \_reg
+.endm
+
+.macro SET_VREG_HIGH _reg _vreg
+    movl    \_reg, VREG_HIGH_ADDRESS(\_vreg)
+    movl    MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg)
+.endm
+
+.macro CLEAR_REF _vreg
+    movl    MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
+.endm
+
+.macro CLEAR_WIDE_REF _vreg
+    movl    MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
+    movl    MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg)
+.endm
+
+.macro GET_VREG_XMMs _xmmreg _vreg
+    movss VREG_ADDRESS(\_vreg), \_xmmreg
+.endm
+.macro GET_VREG_XMMd _xmmreg _vreg
+    movsd VREG_ADDRESS(\_vreg), \_xmmreg
+.endm
+.macro SET_VREG_XMMs _xmmreg _vreg
+    movss \_xmmreg, VREG_ADDRESS(\_vreg)
+.endm
+.macro SET_VREG_XMMd _xmmreg _vreg
+    movsd \_xmmreg, VREG_ADDRESS(\_vreg)
+.endm
+
+// An assembly entry that has a OatQuickMethodHeader prefix.
+.macro OAT_ENTRY name, end
+    FUNCTION_TYPE(\name)
+    ASM_HIDDEN SYMBOL(\name)
+    .global SYMBOL(\name)
+    .balign 16
+    .long 0
+    .long (SYMBOL(\end) - SYMBOL(\name))
+SYMBOL(\name):
+.endm
+
+.macro ENTRY name
+    .text
+    ASM_HIDDEN SYMBOL(\name)
+    .global SYMBOL(\name)
+    FUNCTION_TYPE(\name)
+SYMBOL(\name):
+.endm
+
+.macro END name
+    SIZE(\name)
+.endm
+
+// Macro for defining entrypoints into runtime. We don't need to save registers
+// (we're not holding references there), but there is no
+// kDontSave runtime method. So just use the kSaveRefsOnly runtime method.
+.macro NTERP_TRAMPOLINE name, helper
+DEFINE_FUNCTION \name
+  SETUP_SAVE_REFS_ONLY_FRAME
+  call \helper
+  RESTORE_SAVE_REFS_ONLY_FRAME
+  RETURN_OR_DELIVER_PENDING_EXCEPTION
+END_FUNCTION \name
+.endm
+
+.macro CLEAR_VOLATILE_MARKER reg
+  andq MACRO_LITERAL(-2), \reg
+.endm
+
+.macro EXPORT_PC
+    movq    rPC, -16(rREFS)
+.endm
+
+
+.macro BRANCH
+    // Update method counter and do a suspend check if the branch is negative.
+    testq rINSTq, rINSTq
+    js 3f
+2:
+    leaq    (rPC, rINSTq, 2), rPC
+    FETCH_INST
+    GOTO_NEXT
+3:
+    movq (%rsp), %rdi
+    addw $$1, ART_METHOD_HOTNESS_COUNT_OFFSET(%rdi)
+    // If the counter overflows, handle this in the runtime.
+    jo NterpHandleHotnessOverflow
+    // Otherwise, do a suspend check.
+    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), rSELF:THREAD_FLAGS_OFFSET
+    jz      2b
+    EXPORT_PC
+    call    SYMBOL(art_quick_test_suspend)
+    jmp 2b
+.endm
+
+// Setup the stack to start executing the method. Expects:
+// - rdi to contain the ArtMethod
+// - rbx, r10, r11 to be available.
+//
+// Outputs
+// - rbx contains the dex registers size
+// - r11 contains the old stack pointer.
+.macro SETUP_STACK_FRAME code_item, refs, fp, cfi_refs
+    // Fetch dex register size.
+    movzwl CODE_ITEM_REGISTERS_SIZE_OFFSET(\code_item), %ebx
+    // Fetch outs size.
+    movzwq CODE_ITEM_OUTS_SIZE_OFFSET(\code_item), \refs
+
+    // Compute required frame size for dex registers: ((2 * ebx) + refs)
+    leaq (\refs, %rbx, 2), %r11
+    salq $$2, %r11
+
+    // Compute new stack pointer in r10: add 24 for saving the previous frame,
+    // pc, and method being executed.
+    leaq -24(%rsp), %r10
+    subq %r11, %r10
+    // Alignment
+    andq $$-16, %r10
+
+    // Set reference and dex registers.
+    leaq 24(%r10, \refs, 4), \refs
+    leaq (\refs, %rbx, 4), \fp
+
+    // Now setup the stack pointer.
+    movq %rsp, %r11
+    CFI_DEF_CFA_REGISTER(r11)
+    movq %r10, %rsp
+    movq %r11, -8(\refs)
+    CFI_DEFINE_CFA_DEREF(\cfi_refs, -8, (6 + 4 + 1) * 8)
+
+    // Put nulls in reference frame.
+    testl %ebx, %ebx
+    je 2f
+    movq \refs, %r10
+1:
+    movl $$0, (%r10)
+    addq $$4, %r10
+    cmpq %r10, \fp
+    jne 1b
+2:
+    // Save the ArtMethod.
+    movq %rdi, (%rsp)
+.endm
+
+// Puts the next floating point argument into the expected register,
+// fetching values based on a non-range invoke.
+// Uses rax as temporary.
+//
+// TODO: We could simplify a lot of code by loading the G argument into
+// the "inst" register. Given that we enter the handler with "1(rPC)" in
+// the rINST, we can just add rINST<<16 to the args and we don't even
+// need to pass "arg_index" around.
+.macro LOOP_OVER_SHORTY_LOADING_XMMS xmm_reg, inst, shorty, arg_index, finished
+1: // LOOP
+    movb (REG_VAR(shorty)), %al             // bl := *shorty
+    addq MACRO_LITERAL(1), REG_VAR(shorty)  // shorty++
+    cmpb MACRO_LITERAL(0), %al              // if (al == '\0') goto finished
+    je VAR(finished)
+    cmpb MACRO_LITERAL(68), %al             // if (al == 'D') goto FOUND_DOUBLE
+    je 2f
+    cmpb MACRO_LITERAL(70), %al             // if (al == 'F') goto FOUND_FLOAT
+    je 3f
+    shrq MACRO_LITERAL(4), REG_VAR(inst)
+    addq MACRO_LITERAL(1), REG_VAR(arg_index)
+    //  Handle extra argument in arg array taken by a long.
+    cmpb MACRO_LITERAL(74), %al   // if (al != 'J') goto LOOP
+    jne 1b
+    shrq MACRO_LITERAL(4), REG_VAR(inst)
+    addq MACRO_LITERAL(1), REG_VAR(arg_index)
+    jmp 1b                        // goto LOOP
+2:  // FOUND_DOUBLE
+    subq MACRO_LITERAL(8), %rsp
+    movq REG_VAR(inst), %rax
+    andq MACRO_LITERAL(0xf), %rax
+    GET_VREG %eax, %rax
+    movl %eax, (%rsp)
+    shrq MACRO_LITERAL(4), REG_VAR(inst)
+    addq MACRO_LITERAL(1), REG_VAR(arg_index)
+    cmpq MACRO_LITERAL(4), REG_VAR(arg_index)
+    je 5f
+    movq REG_VAR(inst), %rax
+    andq MACRO_LITERAL(0xf), %rax
+    shrq MACRO_LITERAL(4), REG_VAR(inst)
+    addq MACRO_LITERAL(1), REG_VAR(arg_index)
+    jmp 6f
+5:
+    movzbl 1(rPC), %eax
+    andq MACRO_LITERAL(0xf), %rax
+6:
+    GET_VREG %eax, %rax
+    movl %eax, 4(%rsp)
+    movsd (%rsp), REG_VAR(xmm_reg)
+    addq MACRO_LITERAL(8), %rsp
+    jmp 4f
+3:  // FOUND_FLOAT
+    cmpq MACRO_LITERAL(4), REG_VAR(arg_index)
+    je 7f
+    movq REG_VAR(inst), %rax
+    andq MACRO_LITERAL(0xf), %rax
+    shrq MACRO_LITERAL(4), REG_VAR(inst)
+    addq MACRO_LITERAL(1), REG_VAR(arg_index)
+    jmp 8f
+7:
+    movzbl 1(rPC), %eax
+    andq MACRO_LITERAL(0xf), %rax
+8:
+    GET_VREG_XMMs REG_VAR(xmm_reg), %rax
+4:
+.endm
+
+// Puts the next int/long/object argument in the expected register,
+// fetching values based on a non-range invoke.
+// Uses rax as temporary.
+.macro LOOP_OVER_SHORTY_LOADING_GPRS gpr_reg64, gpr_reg32, inst, shorty, arg_index, finished
+1: // LOOP
+    movb (REG_VAR(shorty)), %al   // bl := *shorty
+    addq MACRO_LITERAL(1), REG_VAR(shorty)  // shorty++
+    cmpb MACRO_LITERAL(0), %al    // if (al == '\0') goto finished
+    je  VAR(finished)
+    cmpb MACRO_LITERAL(74), %al   // if (al == 'J') goto FOUND_LONG
+    je 2f
+    cmpb MACRO_LITERAL(70), %al   // if (al == 'F') goto SKIP_FLOAT
+    je 3f
+    cmpb MACRO_LITERAL(68), %al   // if (al == 'D') goto SKIP_DOUBLE
+    je 4f
+    cmpq MACRO_LITERAL(4), REG_VAR(arg_index)
+    je 7f
+    movq REG_VAR(inst), %rax
+    andq MACRO_LITERAL(0xf), %rax
+    shrq MACRO_LITERAL(4), REG_VAR(inst)
+    addq MACRO_LITERAL(1), REG_VAR(arg_index)
+    jmp 8f
+7:
+    movzbl 1(rPC), %eax
+    andq MACRO_LITERAL(0xf), %rax
+8:
+    GET_VREG REG_VAR(gpr_reg32), %rax
+    jmp 5f
+2:  // FOUND_LONG
+    subq MACRO_LITERAL(8), %rsp
+    movq REG_VAR(inst), %rax
+    andq MACRO_LITERAL(0xf), %rax
+    GET_VREG %eax, %rax
+    movl %eax, (%rsp)
+    shrq MACRO_LITERAL(4), REG_VAR(inst)
+    addq MACRO_LITERAL(1), REG_VAR(arg_index)
+    cmpq MACRO_LITERAL(4), REG_VAR(arg_index)
+    je 9f
+    movq REG_VAR(inst), %rax
+    andq MACRO_LITERAL(0xf), %rax
+    shrq MACRO_LITERAL(4), REG_VAR(inst)
+    addq MACRO_LITERAL(1), REG_VAR(arg_index)
+    jmp 10f
+9:
+    movzbl 1(rPC), %eax
+    andq MACRO_LITERAL(0xf), %rax
+10:
+    GET_VREG %eax, %rax
+    movl %eax, 4(%rsp)
+    movq (%rsp), REG_VAR(gpr_reg64)
+    addq MACRO_LITERAL(8), %rsp
+    jmp 5f
+3:  // SKIP_FLOAT
+    shrq MACRO_LITERAL(4), REG_VAR(inst)
+    addq MACRO_LITERAL(1), REG_VAR(arg_index)
+    jmp 1b
+4:  // SKIP_DOUBLE
+    shrq MACRO_LITERAL(4), REG_VAR(inst)
+    addq MACRO_LITERAL(1), REG_VAR(arg_index)
+    cmpq MACRO_LITERAL(4), REG_VAR(arg_index)
+    je 1b
+    shrq MACRO_LITERAL(4), REG_VAR(inst)
+    addq MACRO_LITERAL(1), REG_VAR(arg_index)
+    jmp 1b
+5:
+.endm
+
+// Puts the next floating point argument into the expected register,
+// fetching values based on a range invoke.
+// Uses rax as temporary.
+.macro LOOP_RANGE_OVER_SHORTY_LOADING_XMMS xmm_reg, shorty, arg_index, stack_index, finished
+1: // LOOP
+    movb (REG_VAR(shorty)), %al             // bl := *shorty
+    addq MACRO_LITERAL(1), REG_VAR(shorty)  // shorty++
+    cmpb MACRO_LITERAL(0), %al              // if (al == '\0') goto finished
+    je VAR(finished)
+    cmpb MACRO_LITERAL(68), %al             // if (al == 'D') goto FOUND_DOUBLE
+    je 2f
+    cmpb MACRO_LITERAL(70), %al             // if (al == 'F') goto FOUND_FLOAT
+    je 3f
+    addq MACRO_LITERAL(1), REG_VAR(arg_index)
+    addq MACRO_LITERAL(1), REG_VAR(stack_index)
+    //  Handle extra argument in arg array taken by a long.
+    cmpb MACRO_LITERAL(74), %al   // if (al != 'J') goto LOOP
+    jne 1b
+    addq MACRO_LITERAL(1), REG_VAR(arg_index)
+    addq MACRO_LITERAL(1), REG_VAR(stack_index)
+    jmp 1b                        // goto LOOP
+2:  // FOUND_DOUBLE
+    GET_VREG_XMMd REG_VAR(xmm_reg), REG_VAR(arg_index)
+    addq MACRO_LITERAL(2), REG_VAR(arg_index)
+    addq MACRO_LITERAL(2), REG_VAR(stack_index)
+    jmp 4f
+3:  // FOUND_FLOAT
+    GET_VREG_XMMs REG_VAR(xmm_reg), REG_VAR(arg_index)
+    addq MACRO_LITERAL(1), REG_VAR(arg_index)
+    addq MACRO_LITERAL(1), REG_VAR(stack_index)
+4:
+.endm
+
+// Puts the next floating point argument into the expected stack slot,
+// fetching values based on a range invoke.
+// Uses rax as temporary.
+//
+// TODO: We could just copy all the vregs to the stack slots in a simple loop
+// (or REP MOVSD) without looking at the shorty at all. (We could also drop
+// the "stack_index" from the macros for loading registers.) We could also do
+// that conditionally if argument word count > 6; otherwise we know that all
+// args fit into registers.
+.macro LOOP_RANGE_OVER_FPs shorty, arg_index, stack_index, finished
+1: // LOOP
+    movb (REG_VAR(shorty)), %al             // bl := *shorty
+    addq MACRO_LITERAL(1), REG_VAR(shorty)  // shorty++
+    cmpb MACRO_LITERAL(0), %al              // if (al == '\0') goto finished
+    je VAR(finished)
+    cmpb MACRO_LITERAL(68), %al             // if (al == 'D') goto FOUND_DOUBLE
+    je 2f
+    cmpb MACRO_LITERAL(70), %al             // if (al == 'F') goto FOUND_FLOAT
+    je 3f
+    addq MACRO_LITERAL(1), REG_VAR(arg_index)
+    addq MACRO_LITERAL(1), REG_VAR(stack_index)
+    //  Handle extra argument in arg array taken by a long.
+    cmpb MACRO_LITERAL(74), %al   // if (al != 'J') goto LOOP
+    jne 1b
+    addq MACRO_LITERAL(1), REG_VAR(arg_index)
+    addq MACRO_LITERAL(1), REG_VAR(stack_index)
+    jmp 1b                        // goto LOOP
+2:  // FOUND_DOUBLE
+    movq (rFP, REG_VAR(arg_index), 4), %rax
+    movq %rax, 8(%rsp, REG_VAR(stack_index), 4)
+    addq MACRO_LITERAL(2), REG_VAR(arg_index)
+    addq MACRO_LITERAL(2), REG_VAR(stack_index)
+    jmp 1b
+3:  // FOUND_FLOAT
+    movl (rFP, REG_VAR(arg_index), 4), %eax
+    movl %eax, 8(%rsp, REG_VAR(stack_index), 4)
+    addq MACRO_LITERAL(1), REG_VAR(arg_index)
+    addq MACRO_LITERAL(1), REG_VAR(stack_index)
+    jmp 1b
+.endm
+
+// Puts the next int/long/object argument in the expected register,
+// fetching values based on a range invoke.
+// Uses rax as temporary.
+.macro LOOP_RANGE_OVER_SHORTY_LOADING_GPRS gpr_reg64, gpr_reg32, shorty, arg_index, stack_index, finished
+1: // LOOP
+    movb (REG_VAR(shorty)), %al             // bl := *shorty
+    addq MACRO_LITERAL(1), REG_VAR(shorty)  // shorty++
+    cmpb MACRO_LITERAL(0), %al    // if (al == '\0') goto finished
+    je  VAR(finished)
+    cmpb MACRO_LITERAL(74), %al   // if (al == 'J') goto FOUND_LONG
+    je 2f
+    cmpb MACRO_LITERAL(70), %al   // if (al == 'F') goto SKIP_FLOAT
+    je 3f
+    cmpb MACRO_LITERAL(68), %al   // if (al == 'D') goto SKIP_DOUBLE
+    je 4f
+    movl       (rFP, REG_VAR(arg_index), 4), REG_VAR(gpr_reg32)
+    addq MACRO_LITERAL(1), REG_VAR(arg_index)
+    addq MACRO_LITERAL(1), REG_VAR(stack_index)
+    jmp 5f
+2:  // FOUND_LONG
+    movq (rFP, REG_VAR(arg_index), 4), REG_VAR(gpr_reg64)
+    addq MACRO_LITERAL(2), REG_VAR(arg_index)
+    addq MACRO_LITERAL(2), REG_VAR(stack_index)
+    jmp 5f
+3:  // SKIP_FLOAT
+    addq MACRO_LITERAL(1), REG_VAR(arg_index)
+    addq MACRO_LITERAL(1), REG_VAR(stack_index)
+    jmp 1b
+4:  // SKIP_DOUBLE
+    addq MACRO_LITERAL(2), REG_VAR(arg_index)
+    addq MACRO_LITERAL(2), REG_VAR(stack_index)
+    jmp 1b
+5:
+.endm
+
+// Puts the next int/long/object argument in the expected stack slot,
+// fetching values based on a range invoke.
+// Uses rax as temporary.
+.macro LOOP_RANGE_OVER_INTs shorty, arg_index, stack_index, finished
+1: // LOOP
+    movb (REG_VAR(shorty)), %al             // al := *shorty
+    addq MACRO_LITERAL(1), REG_VAR(shorty)  // shorty++
+    cmpb MACRO_LITERAL(0), %al    // if (al == '\0') goto finished
+    je  VAR(finished)
+    cmpb MACRO_LITERAL(74), %al   // if (al == 'J') goto FOUND_LONG
+    je 2f
+    cmpb MACRO_LITERAL(70), %al   // if (al == 'F') goto SKIP_FLOAT
+    je 3f
+    cmpb MACRO_LITERAL(68), %al   // if (al == 'D') goto SKIP_DOUBLE
+    je 4f
+    movl (rFP, REG_VAR(arg_index), 4), %eax
+    movl %eax, 8(%rsp, REG_VAR(stack_index), 4)
+    addq MACRO_LITERAL(1), REG_VAR(arg_index)
+    addq MACRO_LITERAL(1), REG_VAR(stack_index)
+    jmp 1b
+2:  // FOUND_LONG
+    movq (rFP, REG_VAR(arg_index), 4), %rax
+    movq %rax, 8(%rsp, REG_VAR(stack_index), 4)
+    addq MACRO_LITERAL(2), REG_VAR(arg_index)
+    addq MACRO_LITERAL(2), REG_VAR(stack_index)
+    jmp 1b
+3:  // SKIP_FLOAT
+    addq MACRO_LITERAL(1), REG_VAR(arg_index)
+    addq MACRO_LITERAL(1), REG_VAR(stack_index)
+    jmp 1b
+4:  // SKIP_DOUBLE
+    addq MACRO_LITERAL(2), REG_VAR(arg_index)
+    addq MACRO_LITERAL(2), REG_VAR(stack_index)
+    jmp 1b
+.endm
+
+// Puts the next floating point parameter passed in physical register
+// in the expected dex register array entry.
+// Uses rax as temporary.
+.macro LOOP_OVER_SHORTY_STORING_XMMS xmm_reg, shorty, arg_index, fp, finished
+1: // LOOP
+    movb (REG_VAR(shorty)), %al             // al := *shorty
+    addq MACRO_LITERAL(1), REG_VAR(shorty)  // shorty++
+    cmpb MACRO_LITERAL(0), %al              // if (al == '\0') goto finished
+    je VAR(finished)
+    cmpb MACRO_LITERAL(68), %al             // if (al == 'D') goto FOUND_DOUBLE
+    je 2f
+    cmpb MACRO_LITERAL(70), %al             // if (al == 'F') goto FOUND_FLOAT
+    je 3f
+    addq MACRO_LITERAL(4), REG_VAR(arg_index)
+    //  Handle extra argument in arg array taken by a long.
+    cmpb MACRO_LITERAL(74), %al   // if (al != 'J') goto LOOP
+    jne 1b
+    addq MACRO_LITERAL(4), REG_VAR(arg_index)
+    jmp 1b                        // goto LOOP
+2:  // FOUND_DOUBLE
+    movsd REG_VAR(xmm_reg),(REG_VAR(fp), REG_VAR(arg_index), 1)
+    addq MACRO_LITERAL(8), REG_VAR(arg_index)
+    jmp 4f
+3:  // FOUND_FLOAT
+    movss REG_VAR(xmm_reg), (REG_VAR(fp), REG_VAR(arg_index), 1)
+    addq MACRO_LITERAL(4), REG_VAR(arg_index)
+4:
+.endm
+
+// Puts the next int/long/object parameter passed in physical register
+// in the expected dex register array entry, and in case of object in the
+// expected reference array entry.
+// Uses rax as temporary.
+.macro LOOP_OVER_SHORTY_STORING_GPRS gpr_reg64, gpr_reg32, shorty, arg_index, regs, refs, finished
+1: // LOOP
+    movb (REG_VAR(shorty)), %al             // bl := *shorty
+    addq MACRO_LITERAL(1), REG_VAR(shorty)  // shorty++
+    cmpb MACRO_LITERAL(0), %al    // if (al == '\0') goto finished
+    je  VAR(finished)
+    cmpb MACRO_LITERAL(74), %al   // if (al == 'J') goto FOUND_LONG
+    je 2f
+    cmpb MACRO_LITERAL(70), %al   // if (al == 'F') goto SKIP_FLOAT
+    je 3f
+    cmpb MACRO_LITERAL(68), %al   // if (al == 'D') goto SKIP_DOUBLE
+    je 4f
+    movl REG_VAR(gpr_reg32), (REG_VAR(regs), REG_VAR(arg_index), 1)
+    cmpb MACRO_LITERAL(76), %al   // if (al != 'L') goto NOT_REFERENCE
+    jne 6f
+    movl REG_VAR(gpr_reg32), (REG_VAR(refs), REG_VAR(arg_index), 1)
+6:  // NOT_REFERENCE
+    addq MACRO_LITERAL(4), REG_VAR(arg_index)
+    jmp 5f
+2:  // FOUND_LONG
+    movq REG_VAR(gpr_reg64), (REG_VAR(regs), REG_VAR(arg_index), 1)
+    addq MACRO_LITERAL(8), REG_VAR(arg_index)
+    jmp 5f
+3:  // SKIP_FLOAT
+    addq MACRO_LITERAL(4), REG_VAR(arg_index)
+    jmp 1b
+4:  // SKIP_DOUBLE
+    addq MACRO_LITERAL(8), REG_VAR(arg_index)
+    jmp 1b
+5:
+.endm
+
+// Puts the next floating point parameter passed in stack
+// in the expected dex register array entry.
+// Uses rax as temporary.
+//
+// TODO: Or we could just spill regs to the reserved slots in the caller's
+// frame and copy all regs in a simple loop. This time, however, we would
+// need to look at the shorty anyway to look for the references.
+// (The trade-off is different for passing arguments and receiving them.)
+.macro LOOP_OVER_FPs shorty, arg_index, regs, stack_ptr, finished
+1: // LOOP
+    movb (REG_VAR(shorty)), %al             // bl := *shorty
+    addq MACRO_LITERAL(1), REG_VAR(shorty)  // shorty++
+    cmpb MACRO_LITERAL(0), %al              // if (al == '\0') goto finished
+    je VAR(finished)
+    cmpb MACRO_LITERAL(68), %al             // if (al == 'D') goto FOUND_DOUBLE
+    je 2f
+    cmpb MACRO_LITERAL(70), %al             // if (al == 'F') goto FOUND_FLOAT
+    je 3f
+    addq MACRO_LITERAL(4), REG_VAR(arg_index)
+    //  Handle extra argument in arg array taken by a long.
+    cmpb MACRO_LITERAL(74), %al   // if (al != 'J') goto LOOP
+    jne 1b
+    addq MACRO_LITERAL(4), REG_VAR(arg_index)
+    jmp 1b                        // goto LOOP
+2:  // FOUND_DOUBLE
+    movq OFFSET_TO_FIRST_ARGUMENT_IN_STACK(REG_VAR(stack_ptr), REG_VAR(arg_index), 1), %rax
+    movq %rax, (REG_VAR(regs), REG_VAR(arg_index), 1)
+    addq MACRO_LITERAL(8), REG_VAR(arg_index)
+    jmp 1b
+3:  // FOUND_FLOAT
+    movl OFFSET_TO_FIRST_ARGUMENT_IN_STACK(REG_VAR(stack_ptr), REG_VAR(arg_index), 1), %eax
+    movl %eax, (REG_VAR(regs), REG_VAR(arg_index), 1)
+    addq MACRO_LITERAL(4), REG_VAR(arg_index)
+    jmp 1b
+.endm
+
+// Puts the next int/long/object parameter passed in stack
+// in the expected dex register array entry, and in case of object in the
+// expected reference array entry.
+// Uses rax as temporary.
+.macro LOOP_OVER_INTs shorty, arg_index, regs, refs, stack_ptr, finished
+1: // LOOP
+    movb (REG_VAR(shorty)), %al             // bl := *shorty
+    addq MACRO_LITERAL(1), REG_VAR(shorty)  // shorty++
+    cmpb MACRO_LITERAL(0), %al    // if (al == '\0') goto finished
+    je  VAR(finished)
+    cmpb MACRO_LITERAL(74), %al   // if (al == 'J') goto FOUND_LONG
+    je 2f
+    cmpb MACRO_LITERAL(76), %al   // if (al == 'L') goto FOUND_REFERENCE
+    je 6f
+    cmpb MACRO_LITERAL(70), %al   // if (al == 'F') goto SKIP_FLOAT
+    je 3f
+    cmpb MACRO_LITERAL(68), %al   // if (al == 'D') goto SKIP_DOUBLE
+    je 4f
+    movl OFFSET_TO_FIRST_ARGUMENT_IN_STACK(REG_VAR(stack_ptr), REG_VAR(arg_index), 1), %eax
+    movl %eax, (REG_VAR(regs), REG_VAR(arg_index), 1)
+    addq MACRO_LITERAL(4), REG_VAR(arg_index)
+    jmp 1b
+6:  // FOUND_REFERENCE
+    movl OFFSET_TO_FIRST_ARGUMENT_IN_STACK(REG_VAR(stack_ptr), REG_VAR(arg_index), 1), %eax
+    movl %eax, (REG_VAR(regs), REG_VAR(arg_index), 1)
+    movl %eax, (REG_VAR(refs), REG_VAR(arg_index), 1)
+    addq MACRO_LITERAL(4), REG_VAR(arg_index)
+    jmp 1b
+2:  // FOUND_LONG
+    movq OFFSET_TO_FIRST_ARGUMENT_IN_STACK(REG_VAR(stack_ptr), REG_VAR(arg_index), 1), %rax
+    movq %rax, (REG_VAR(regs), REG_VAR(arg_index), 1)
+    addq MACRO_LITERAL(8), REG_VAR(arg_index)
+    jmp 1b
+3:  // SKIP_FLOAT
+    addq MACRO_LITERAL(4), REG_VAR(arg_index)
+    jmp 1b
+4:  // SKIP_DOUBLE
+    addq MACRO_LITERAL(8), REG_VAR(arg_index)
+    jmp 1b
+.endm
+
+// Increase method hotness and do suspend check before starting executing the method.
+.macro START_EXECUTING_INSTRUCTIONS
+   movq (%rsp), %rdi
+   addw $$1, ART_METHOD_HOTNESS_COUNT_OFFSET(%rdi)
+   jo 2f
+   testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), rSELF:THREAD_FLAGS_OFFSET
+   jz 1f
+   EXPORT_PC
+   call SYMBOL(art_quick_test_suspend)
+1:
+   FETCH_INST
+   GOTO_NEXT
+2:
+   movq $$0, %rsi
+   movq rFP, %rdx
+   call nterp_hot_method
+   jmp 1b
+.endm
+
+.macro SPILL_ALL_CALLEE_SAVES
+    PUSH r15
+    PUSH r14
+    PUSH r13
+    PUSH r12
+    PUSH rbp
+    PUSH rbx
+    SETUP_FP_CALLEE_SAVE_FRAME
+.endm
+
+.macro RESTORE_ALL_CALLEE_SAVES
+    RESTORE_FP_CALLEE_SAVE_FRAME
+    POP rbx
+    POP rbp
+    POP r12
+    POP r13
+    POP r14
+    POP r15
+.endm
+
+// Helper to setup the stack after doing a nterp to nterp call. This will setup:
+// - rNEW_FP: the new pointer to dex registers
+// - rNEW_REFS: the new pointer to references
+// - rPC: the new PC pointer to execute
+// - edi: number of arguments
+// - ecx: first dex register
+.macro SETUP_STACK_FOR_INVOKE
+   // We do the same stack overflow check as the compiler. See CanMethodUseNterp
+   // in how we limit the maximum nterp frame size.
+   testq %rax, -STACK_OVERFLOW_RESERVED_BYTES(%rsp)
+
+   // Spill all callee saves to have a consistent stack frame whether we
+   // are called by compiled code or nterp.
+   SPILL_ALL_CALLEE_SAVES
+
+   // Setup the frame.
+   SETUP_STACK_FRAME %rax, rNEW_REFS, rNEW_FP, CFI_NEW_REFS
+   // Make r11 point to the top of the dex register array.
+   leaq (rNEW_FP, %rbx, 4), %r11
+
+   // Fetch instruction information before replacing rPC.
+   movzbl 1(rPC), %edi
+   movzwl 4(rPC), %ecx
+
+   // Set the dex pc pointer.
+   leaq CODE_ITEM_INSNS_OFFSET(%rax), rPC
+   CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
+.endm
+
+// Setup arguments based on a non-range nterp to nterp call, and start executing
+// the method. We expect:
+// - rNEW_FP: the new pointer to dex registers
+// - rNEW_REFS: the new pointer to references
+// - rPC: the new PC pointer to execute
+// - edi: number of arguments
+// - ecx: first dex register
+// - r11: top of dex register array
+// - esi: receiver if non-static.
+.macro SETUP_NON_RANGE_ARGUMENTS_AND_EXECUTE is_static=0, is_string_init=0
+   // Now all temporary registers (except r11 containing top of registers array)
+   // are available, copy the parameters.
+   // /* op vA, vB, {vC...vG} */
+   movl %edi, %eax
+   shrl $$4, %eax # Number of arguments
+   jz 6f  # shl sets the Z flag
+   movq MACRO_LITERAL(-1), %r10
+   cmpl MACRO_LITERAL(2), %eax
+   jl 1f
+   je 2f
+   cmpl MACRO_LITERAL(4), %eax
+   jl 3f
+   je 4f
+
+  // We use a decrementing r10 to store references relative
+  // to rNEW_FP and dex registers relative to r11.
+  //
+  // TODO: We could set up r10 as the number of registers (this can be an additional output from
+  // SETUP_STACK_FOR_INVOKE) and then just decrement it by one before copying each arg to
+  // (rNEW_FP, r10, 4) and (rNEW_REFS, r10, 4).
+  // Maybe even introduce macros NEW_VREG_ADDRESS/NEW_VREG_REF_ADDRESS.
+5:
+   andq        MACRO_LITERAL(15), %rdi
+   GET_VREG_OBJECT %edx, %rdi
+   movl        %edx, (rNEW_FP, %r10, 4)
+   GET_VREG    %edx, %rdi
+   movl        %edx, (%r11, %r10, 4)
+   subq        MACRO_LITERAL(1), %r10
+4:
+   movl        %ecx, %eax
+   shrl        MACRO_LITERAL(12), %eax
+   GET_VREG_OBJECT %edx, %rax
+   movl        %edx, (rNEW_FP, %r10, 4)
+   GET_VREG    %edx, %rax
+   movl        %edx, (%r11, %r10, 4)
+   subq        MACRO_LITERAL(1), %r10
+3:
+   movl        %ecx, %eax
+   shrl        MACRO_LITERAL(8), %eax
+   andl        MACRO_LITERAL(0xf), %eax
+   GET_VREG_OBJECT %edx, %rax
+   movl        %edx, (rNEW_FP, %r10, 4)
+   GET_VREG    %edx, %rax
+   movl        %edx, (%r11, %r10, 4)
+   subq        MACRO_LITERAL(1), %r10
+2:
+   movl        %ecx, %eax
+   shrl        MACRO_LITERAL(4), %eax
+   andl        MACRO_LITERAL(0xf), %eax
+   GET_VREG_OBJECT %edx, %rax
+   movl        %edx, (rNEW_FP, %r10, 4)
+   GET_VREG    %edx, %rax
+   movl        %edx, (%r11, %r10, 4)
+   subq        MACRO_LITERAL(1), %r10
+1:
+   .if \is_string_init
+   // Ignore the first argument
+   .elseif \is_static
+   movl        %ecx, %eax
+   andq        MACRO_LITERAL(0x000f), %rax
+   GET_VREG_OBJECT %edx, %rax
+   movl        %edx, (rNEW_FP, %r10, 4)
+   GET_VREG    %edx, %rax
+   movl        %edx, (%r11, %r10, 4)
+   .else
+   movl        %esi, (rNEW_FP, %r10, 4)
+   movl        %esi, (%r11, %r10, 4)
+   .endif
+
+6:
+   // Start executing the method.
+   movq rNEW_FP, rFP
+   movq rNEW_REFS, rREFS
+   CFI_DEFINE_CFA_DEREF(CFI_REFS, -8, (6 + 4 + 1) * 8)
+   START_EXECUTING_INSTRUCTIONS
+.endm
+
+// Setup arguments based on a range nterp to nterp call, and start executing
+// the method.
+.macro SETUP_RANGE_ARGUMENTS_AND_EXECUTE is_static=0, is_string_init=0
+   // edi is number of arguments
+   // ecx is first register
+   movq MACRO_LITERAL(-4), %r10
+   .if \is_string_init
+   // Ignore the first argument
+   subl $$1, %edi
+   addl $$1, %ecx
+   .elseif !\is_static
+   subl $$1, %edi
+   addl $$1, %ecx
+   .endif
+
+   testl %edi, %edi
+   je 2f
+   leaq  (rREFS, %rcx, 4), %rax  # pointer to first argument in reference array
+   leaq  (%rax, %rdi, 4), %rax   # pointer to last argument in reference array
+   leaq  (rFP, %rcx, 4), %rcx    # pointer to first argument in register array
+   leaq  (%rcx, %rdi, 4), %rdi   # pointer to last argument in register array
+   // TODO: Same comment for copying arguments as in SETUP_NON_RANGE_ARGUMENTS_AND_EXECUTE.
+1:
+   movl  -4(%rax), %edx
+   movl  %edx, (rNEW_FP, %r10, 1)
+   movl  -4(%rdi), %edx
+   movl  %edx, (%r11, %r10, 1)
+   subq  MACRO_LITERAL(4), %r10
+   subq  MACRO_LITERAL(4), %rax
+   subq  MACRO_LITERAL(4), %rdi
+   cmpq  %rcx, %rdi
+   jne 1b
+
+2:
+   .if \is_string_init
+   // Ignore first argument
+   .elseif !\is_static
+   movl        %esi, (rNEW_FP, %r10, 1)
+   movl        %esi, (%r11, %r10, 1)
+   .endif
+   movq rNEW_FP, rFP
+   movq rNEW_REFS, rREFS
+   CFI_DEFINE_CFA_DEREF(CFI_REFS, -8, (6 + 4 + 1) * 8)
+   START_EXECUTING_INSTRUCTIONS
+.endm
+
+.macro GET_SHORTY dest, is_interface, is_polymorphic, is_custom
+   push %rdi
+   push %rsi
+   .if \is_polymorphic
+   movq 16(%rsp), %rdi
+   movq rPC, %rsi
+   call SYMBOL(NterpGetShortyFromInvokePolymorphic)
+   .elseif \is_custom
+   movq 16(%rsp), %rdi
+   movq rPC, %rsi
+   call SYMBOL(NterpGetShortyFromInvokeCustom)
+   .elseif \is_interface
+   movq 16(%rsp), %rdi
+   movzwl 2(rPC), %esi
+   call SYMBOL(NterpGetShortyFromMethodId)
+   .else
+   call SYMBOL(NterpGetShorty)
+   .endif
+   pop %rsi
+   pop %rdi
+   movq %rax, \dest
+.endm
+
+.macro DO_ENTRY_POINT_CHECK call_compiled_code
+   // On entry, the method is %rdi, the instance is %rsi
+   leaq ExecuteNterpImpl(%rip), %rax
+   cmpq %rax, ART_METHOD_QUICK_CODE_OFFSET_64(%rdi)
+   jne  VAR(call_compiled_code)
+
+   // TODO: Get code item in a better way and remove below
+   push %rdi
+   push %rsi
+   call SYMBOL(NterpGetCodeItem)
+   pop %rsi
+   pop %rdi
+   // TODO: Get code item in a better way and remove above
+.endm
+
+// Uses r9 and r10 as temporary
+.macro UPDATE_REGISTERS_FOR_STRING_INIT old_value, new_value
+   movq rREFS, %r9
+   movq rFP, %r10
+1:
+   cmpl (%r9), \old_value
+   jne 2f
+   movl \new_value, (%r9)
+   movl \new_value, (%r10)
+2:
+   addq $$4, %r9
+   addq $$4, %r10
+   cmpq %r9, rFP
+   jne 1b
+.endm
+
+.macro COMMON_INVOKE_NON_RANGE is_static=0, is_interface=0, suffix="", is_string_init=0, is_polymorphic=0, is_custom=0
+   .if \is_polymorphic
+   // We always go to compiled code for polymorphic calls.
+   .elseif \is_custom
+   // We always go to compiled code for custom calls.
+   .else
+     DO_ENTRY_POINT_CHECK .Lcall_compiled_code_\suffix
+     .if \is_string_init
+     call nterp_to_nterp_string_init_non_range
+     .elseif \is_static
+     call nterp_to_nterp_static_non_range
+     .else
+     call nterp_to_nterp_instance_non_range
+     .endif
+     jmp .Ldone_return_\suffix
+   .endif
+
+.Lcall_compiled_code_\suffix:
+   GET_SHORTY rINSTq, \is_interface, \is_polymorphic, \is_custom
+   // From this point:
+   // - rISNTq contains shorty (in callee-save to switch over return value after call).
+   // - rdi contains method
+   // - rsi contains 'this' pointer for instance method.
+   leaq 1(rINSTq), %r9  // shorty + 1  ; ie skip return arg character
+   movzwl 4(rPC), %r11d // arguments
+   .if \is_string_init
+   shrq MACRO_LITERAL(4), %r11
+   movq $$1, %r10       // ignore first argument
+   .elseif \is_static
+   movq $$0, %r10       // arg_index
+   .else
+   shrq MACRO_LITERAL(4), %r11
+   movq $$1, %r10       // arg_index
+   .endif
+   LOOP_OVER_SHORTY_LOADING_XMMS xmm0, r11, r9, r10, .Lxmm_setup_finished_\suffix
+   LOOP_OVER_SHORTY_LOADING_XMMS xmm1, r11, r9, r10, .Lxmm_setup_finished_\suffix
+   LOOP_OVER_SHORTY_LOADING_XMMS xmm2, r11, r9, r10, .Lxmm_setup_finished_\suffix
+   LOOP_OVER_SHORTY_LOADING_XMMS xmm3, r11, r9, r10, .Lxmm_setup_finished_\suffix
+   LOOP_OVER_SHORTY_LOADING_XMMS xmm4, r11, r9, r10, .Lxmm_setup_finished_\suffix
+.Lxmm_setup_finished_\suffix:
+   leaq 1(rINSTq), %r9  // shorty + 1  ; ie skip return arg character
+   movzwl 4(rPC), %r11d // arguments
+   .if \is_string_init
+   movq $$1, %r10       // ignore first argument
+   shrq MACRO_LITERAL(4), %r11
+   LOOP_OVER_SHORTY_LOADING_GPRS rsi, esi, r11, r9, r10, .Lgpr_setup_finished_\suffix
+   .elseif \is_static
+   movq $$0, %r10       // arg_index
+   LOOP_OVER_SHORTY_LOADING_GPRS rsi, esi, r11, r9, r10, .Lgpr_setup_finished_\suffix
+   .else
+   shrq MACRO_LITERAL(4), %r11
+   movq $$1, %r10       // arg_index
+   .endif
+   LOOP_OVER_SHORTY_LOADING_GPRS rdx, edx, r11, r9, r10, .Lgpr_setup_finished_\suffix
+   LOOP_OVER_SHORTY_LOADING_GPRS rcx, ecx, r11, r9, r10, .Lgpr_setup_finished_\suffix
+   LOOP_OVER_SHORTY_LOADING_GPRS r8, r8d, r11, r9, r10, .Lgpr_setup_finished_\suffix
+   LOOP_OVER_SHORTY_LOADING_GPRS r9, r9d, r11, r9, r10, .Lgpr_setup_finished_\suffix
+.Lgpr_setup_finished_\suffix:
+   .if \is_polymorphic
+   call SYMBOL(art_quick_invoke_polymorphic)
+   .elseif \is_custom
+   call SYMBOL(art_quick_invoke_custom)
+   .else
+      .if \is_interface
+      movzwl 2(rPC), %eax
+      .endif
+      call *ART_METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method.
+   .endif
+   cmpb LITERAL(68), (rINSTq)       // Test if result type char == 'D'.
+   je .Lreturn_double_\suffix
+   cmpb LITERAL(70), (rINSTq)       // Test if result type char == 'F'.
+   jne .Ldone_return_\suffix
+.Lreturn_float_\suffix:
+   movd %xmm0, %eax
+   jmp .Ldone_return_\suffix
+.Lreturn_double_\suffix:
+   movq %xmm0, %rax
+.Ldone_return_\suffix:
+   /* resume execution of caller */
+   .if \is_string_init
+   movzwl 4(rPC), %r11d // arguments
+   andq $$0xf, %r11
+   GET_VREG %esi, %r11
+   UPDATE_REGISTERS_FOR_STRING_INIT %esi, %eax
+   .endif
+
+   .if \is_polymorphic
+   ADVANCE_PC_FETCH_AND_GOTO_NEXT 4
+   .else
+   ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+   .endif
+.endm
+
+.macro COMMON_INVOKE_RANGE is_static=0, is_interface=0, suffix="", is_string_init=0, is_polymorphic=0, is_custom=0
+   .if \is_polymorphic
+   // We always go to compiled code for polymorphic calls.
+   .elseif \is_custom
+   // We always go to compiled code for custom calls.
+   .else
+     DO_ENTRY_POINT_CHECK .Lcall_compiled_code_range_\suffix
+     .if \is_string_init
+     call nterp_to_nterp_string_init_range
+     .elseif \is_static
+     call nterp_to_nterp_static_range
+     .else
+     call nterp_to_nterp_instance_range
+     .endif
+     jmp .Ldone_return_range_\suffix
+   .endif
+
+.Lcall_compiled_code_range_\suffix:
+   GET_SHORTY rINSTq, \is_interface, \is_polymorphic, \is_custom
+   // From this point:
+   // - rINSTq contains shorty (in callee-save to switch over return value after call).
+   // - rdi contains method
+   // - rsi contains 'this' pointer for instance method.
+   leaq 1(rINSTq), %r9  // shorty + 1  ; ie skip return arg character
+   movzwl 4(rPC), %r10d // arg start index
+   .if \is_string_init
+   addq $$1, %r10       // arg start index
+   movq $$1, %rbp       // index in stack
+   .elseif \is_static
+   movq $$0, %rbp       // index in stack
+   .else
+   addq $$1, %r10       // arg start index
+   movq $$1, %rbp       // index in stack
+   .endif
+   LOOP_RANGE_OVER_SHORTY_LOADING_XMMS xmm0, r9, r10, rbp, .Lxmm_setup_finished_range_\suffix
+   LOOP_RANGE_OVER_SHORTY_LOADING_XMMS xmm1, r9, r10, rbp, .Lxmm_setup_finished_range_\suffix
+   LOOP_RANGE_OVER_SHORTY_LOADING_XMMS xmm2, r9, r10, rbp, .Lxmm_setup_finished_range_\suffix
+   LOOP_RANGE_OVER_SHORTY_LOADING_XMMS xmm3, r9, r10, rbp, .Lxmm_setup_finished_range_\suffix
+   LOOP_RANGE_OVER_SHORTY_LOADING_XMMS xmm4, r9, r10, rbp, .Lxmm_setup_finished_range_\suffix
+   LOOP_RANGE_OVER_SHORTY_LOADING_XMMS xmm5, r9, r10, rbp, .Lxmm_setup_finished_range_\suffix
+   LOOP_RANGE_OVER_SHORTY_LOADING_XMMS xmm6, r9, r10, rbp, .Lxmm_setup_finished_range_\suffix
+   LOOP_RANGE_OVER_SHORTY_LOADING_XMMS xmm7, r9, r10, rbp, .Lxmm_setup_finished_range_\suffix
+   LOOP_RANGE_OVER_FPs r9, r10, rbp, .Lxmm_setup_finished_range_\suffix
+.Lxmm_setup_finished_range_\suffix:
+   leaq 1(%rbx), %r11  // shorty + 1  ; ie skip return arg character
+   movzwl 4(rPC), %r10d // arg start index
+   .if \is_string_init
+   addq $$1, %r10       // arg start index
+   movq $$1, %rbp       // index in stack
+   LOOP_RANGE_OVER_SHORTY_LOADING_GPRS rsi, esi, r11, r10, rbp, .Lgpr_setup_finished_\suffix
+   .elseif \is_static
+   movq $$0, %rbp // index in stack
+   LOOP_RANGE_OVER_SHORTY_LOADING_GPRS rsi, esi, r11, r10, rbp, .Lgpr_setup_finished_\suffix
+   .else
+   addq $$1, %r10       // arg start index
+   movq $$1, %rbp // index in stack
+   .endif
+   LOOP_RANGE_OVER_SHORTY_LOADING_GPRS rdx, edx, r11, r10, rbp, .Lgpr_setup_finished_range_\suffix
+   LOOP_RANGE_OVER_SHORTY_LOADING_GPRS rcx, ecx, r11, r10, rbp, .Lgpr_setup_finished_range_\suffix
+   LOOP_RANGE_OVER_SHORTY_LOADING_GPRS r8, r8d, r11, r10, rbp, .Lgpr_setup_finished_range_\suffix
+   LOOP_RANGE_OVER_SHORTY_LOADING_GPRS r9, r9d, r11, r10, rbp, .Lgpr_setup_finished_range_\suffix
+   LOOP_RANGE_OVER_INTs r11, r10, rbp, .Lgpr_setup_finished_range_\suffix
+
+.Lgpr_setup_finished_range_\suffix:
+   .if \is_polymorphic
+   call SYMBOL(art_quick_invoke_polymorphic)
+   .elseif \is_custom
+   call SYMBOL(art_quick_invoke_custom)
+   .else
+     .if \is_interface
+     movzwl 2(rPC), %eax
+     .endif
+     call *ART_METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method.
+   .endif
+   cmpb LITERAL(68), (%rbx)       // Test if result type char == 'D'.
+   je .Lreturn_range_double_\suffix
+   cmpb LITERAL(70), (%rbx)       // Test if result type char == 'F'.
+   je .Lreturn_range_float_\suffix
+   /* resume execution of caller */
+.Ldone_return_range_\suffix:
+   .if \is_string_init
+   movzwl 4(rPC), %r11d // arguments
+   GET_VREG %esi, %r11
+   UPDATE_REGISTERS_FOR_STRING_INIT %esi, %eax
+   .endif
+
+   .if \is_polymorphic
+   ADVANCE_PC_FETCH_AND_GOTO_NEXT 4
+   .else
+   ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+   .endif
+.Lreturn_range_double_\suffix:
+    movq %xmm0, %rax
+    jmp .Ldone_return_range_\suffix
+.Lreturn_range_float_\suffix:
+    movd %xmm0, %eax
+    jmp .Ldone_return_range_\suffix
+.endm
+
+// Fetch some information from the thread cache.
+// Uses rax, rdx, rcx as temporaries.
+.macro FETCH_FROM_THREAD_CACHE dest_reg, slow_path
+   movq rSELF:THREAD_SELF_OFFSET, %rax
+   movq rPC, %rdx
+   salq MACRO_LITERAL(THREAD_INTERPRETER_CACHE_SIZE_SHIFT), %rdx
+   andq MACRO_LITERAL(THREAD_INTERPRETER_CACHE_SIZE_MASK), %rdx
+   cmpq THREAD_INTERPRETER_CACHE_OFFSET(%rax, %rdx, 1), rPC
+   jne \slow_path
+   movq __SIZEOF_POINTER__+THREAD_INTERPRETER_CACHE_OFFSET(%rax, %rdx, 1), \dest_reg
+.endm
+
+// Helper for static field get.
+.macro OP_SGET load="movl", wide="0"
+   // Fast-path which gets the field from thread-local cache.
+   FETCH_FROM_THREAD_CACHE %rax, 2f
+1:
+   movl ART_FIELD_OFFSET_OFFSET(%rax), %edx
+   movl ART_FIELD_DECLARING_CLASS_OFFSET(%rax), %eax
+   cmpq $$0, rSELF:THREAD_READ_BARRIER_MARK_REG00_OFFSET
+   jne 3f
+4:
+   .if \wide
+   movq (%eax,%edx,1), %rax
+   SET_WIDE_VREG %rax, rINSTq              # fp[A] <- value
+   .else
+   \load (%eax, %edx, 1), %eax
+   SET_VREG %eax, rINSTq            # fp[A] <- value
+   .endif
+   ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+   movq rSELF:THREAD_SELF_OFFSET, %rdi
+   movq 0(%rsp), %rsi
+   movq rPC, %rdx
+   EXPORT_PC
+   call nterp_get_static_field
+   // Clear the marker that we put for volatile fields. The x86 memory
+   // model doesn't require a barrier.
+   andq $$-2, %rax
+   jmp 1b
+3:
+   call art_quick_read_barrier_mark_reg00
+   jmp 4b
+.endm
+
+// Helper for static field put.
+.macro OP_SPUT rINST_reg="rINST", store="movl", wide="0":
+   // Fast-path which gets the field from thread-local cache.
+   FETCH_FROM_THREAD_CACHE %rax, 2f
+1:
+   movl ART_FIELD_OFFSET_OFFSET(%rax), %edx
+   movl ART_FIELD_DECLARING_CLASS_OFFSET(%rax), %eax
+   cmpq $$0, rSELF:THREAD_READ_BARRIER_MARK_REG00_OFFSET
+   jne 3f
+4:
+   .if \wide
+   GET_WIDE_VREG rINSTq, rINSTq           # rINST <- v[A]
+   .else
+   GET_VREG rINST, rINSTq                  # rINST <- v[A]
+   .endif
+   \store    \rINST_reg, (%rax,%rdx,1)
+   ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+   movq rSELF:THREAD_SELF_OFFSET, %rdi
+   movq 0(%rsp), %rsi
+   movq rPC, %rdx
+   EXPORT_PC
+   call nterp_get_static_field
+   testq MACRO_LITERAL(1), %rax
+   je 1b
+   // Clear the marker that we put for volatile fields. The x86 memory
+   // model doesn't require a barrier.
+   CLEAR_VOLATILE_MARKER %rax
+   movl ART_FIELD_OFFSET_OFFSET(%rax), %edx
+   movl ART_FIELD_DECLARING_CLASS_OFFSET(%rax), %eax
+   cmpq $$0, rSELF:THREAD_READ_BARRIER_MARK_REG00_OFFSET
+   jne 6f
+5:
+   .if \wide
+   GET_WIDE_VREG rINSTq, rINSTq           # rINST <- v[A]
+   .else
+   GET_VREG rINST, rINSTq                  # rINST <- v[A]
+   .endif
+   \store    \rINST_reg, (%rax,%rdx,1)
+   lock addl $$0, (%rsp)
+   ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+3:
+   call art_quick_read_barrier_mark_reg00
+   jmp 4b
+6:
+   call art_quick_read_barrier_mark_reg00
+   jmp 5b
+.endm
+
+
+.macro OP_IPUT_INTERNAL rINST_reg="rINST", store="movl", wide="0":
+   movzbq  rINSTbl, %rcx                   # rcx <- BA
+   sarl    $$4, %ecx                       # ecx <- B
+   GET_VREG %ecx, %rcx                     # vB (object we're operating on)
+   testl   %ecx, %ecx                      # is object null?
+   je      common_errNullObject
+   andb    $$0xf, rINSTbl                  # rINST <- A
+   .if \wide
+   GET_WIDE_VREG rINSTq, rINSTq              # rax<- fp[A]/fp[A+1]
+   .else
+   GET_VREG rINST, rINSTq                  # rINST <- v[A]
+   .endif
+   \store \rINST_reg, (%rcx,%rax,1)
+.endm
+
+// Helper for instance field put.
+.macro OP_IPUT rINST_reg="rINST", store="movl", wide="0":
+   // Fast-path which gets the field from thread-local cache.
+   FETCH_FROM_THREAD_CACHE %rax, 2f
+1:
+   OP_IPUT_INTERNAL \rINST_reg, \store, \wide
+   ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+   movq rSELF:THREAD_SELF_OFFSET, %rdi
+   movq 0(%rsp), %rsi
+   movq rPC, %rdx
+   EXPORT_PC
+   call nterp_get_instance_field_offset
+   testl %eax, %eax
+   jns 1b
+   negl %eax
+   OP_IPUT_INTERNAL \rINST_reg, \store, \wide
+   lock addl $$0, (%rsp)
+   ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+.endm
+
+// Helper for instance field get.
+.macro OP_IGET load="movl", wide="0"
+   // Fast-path which gets the field from thread-local cache.
+   FETCH_FROM_THREAD_CACHE %rax, 2f
+1:
+   movl    rINST, %ecx                     # rcx <- BA
+   sarl    $$4, %ecx                       # ecx <- B
+   GET_VREG %ecx, %rcx                     # vB (object we're operating on)
+   testl   %ecx, %ecx                      # is object null?
+   je      common_errNullObject
+   andb    $$0xf,rINSTbl                   # rINST <- A
+   .if \wide
+   movq (%rcx,%rax,1), %rax
+   SET_WIDE_VREG %rax, rINSTq              # fp[A] <- value
+   .else
+   \load (%rcx,%rax,1), %eax
+   SET_VREG %eax, rINSTq                   # fp[A] <- value
+   .endif
+   ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+   movq rSELF:THREAD_SELF_OFFSET, %rdi
+   movq 0(%rsp), %rsi
+   movq rPC, %rdx
+   EXPORT_PC
+   call nterp_get_instance_field_offset
+   testl %eax, %eax
+   jns 1b
+   negl %eax
+   jmp 1b
+.endm
+
+%def entry():
+/*
+ * ArtMethod entry point.
+ *
+ * On entry:
+ *  rdi   ArtMethod* callee
+ *  rest  method parameters
+ */
+
+OAT_ENTRY ExecuteNterpImpl, EndExecuteNterpImpl
+    .cfi_startproc
+    .cfi_def_cfa rsp, 8
+    testq %rax, -STACK_OVERFLOW_RESERVED_BYTES(%rsp)
+    /* Spill callee save regs */
+    SPILL_ALL_CALLEE_SAVES
+
+    // TODO: Get shorty in a better way and remove below
+    PUSH rdi
+    PUSH rsi
+    PUSH rdx
+    PUSH rcx
+    PUSH r8
+    PUSH r9
+
+    // Save xmm registers + alignment.
+    subq MACRO_LITERAL(8 * 8 + 8), %rsp
+    CFI_ADJUST_CFA_OFFSET(8 * 8 + 8)
+    movq %xmm0, 0(%rsp)
+    movq %xmm1, 8(%rsp)
+    movq %xmm2, 16(%rsp)
+    movq %xmm3, 24(%rsp)
+    movq %xmm4, 32(%rsp)
+    movq %xmm5, 40(%rsp)
+    movq %xmm6, 48(%rsp)
+    movq %xmm7, 56(%rsp)
+
+    // Save method in callee-save rbx.
+    movq %rdi, %rbx
+    call SYMBOL(NterpGetShorty)
+    // Save shorty in callee-save rbp.
+    movq %rax, %rbp
+    movq %rbx, %rdi
+    call SYMBOL(NterpGetCodeItem)
+    movq %rax, rPC
+
+    // Restore xmm registers + alignment.
+    movq 0(%rsp), %xmm0
+    movq 8(%rsp), %xmm1
+    movq 16(%rsp), %xmm2
+    movq 24(%rsp), %xmm3
+    movq 32(%rsp), %xmm4
+    movq 40(%rsp), %xmm5
+    movq 48(%rsp), %xmm6
+    movq 56(%rsp), %xmm7
+    addq MACRO_LITERAL(8 * 8 + 8), %rsp
+    CFI_ADJUST_CFA_OFFSET(-8 * 8 - 8)
+
+    POP r9
+    POP r8
+    POP rcx
+    POP rdx
+    POP rsi
+    POP rdi
+    // TODO: Get shorty in a better way and remove above
+
+    // Setup the stack for executing the method.
+    SETUP_STACK_FRAME rPC, rREFS, rFP, CFI_REFS
+
+    // Setup the parameters
+    movzwl CODE_ITEM_INS_SIZE_OFFSET(rPC), %r14d
+    testl %r14d, %r14d
+    je .Lxmm_setup_finished
+
+    subq %r14, %rbx
+    salq $$2, %rbx // rbx is now the offset for inputs into the registers array.
+
+    testl $$ART_METHOD_IS_STATIC_FLAG, ART_METHOD_ACCESS_FLAGS_OFFSET(%rdi)
+
+    // Available: rdi, r10, r14
+    // Note the leaq below don't change the flags.
+    leaq 1(%rbp), %r10  // shorty + 1  ; ie skip return arg character
+    leaq (rFP, %rbx, 1), %rdi
+    leaq (rREFS, %rbx, 1), %rbx
+    jne .Lhandle_static_method
+    movl %esi, (%rdi)
+    movl %esi, (%rbx)
+    addq $$4, %rdi
+    addq $$4, %rbx
+    addq $$4, %r11
+    movq $$0, %r14
+    jmp .Lcontinue_setup_gprs
+.Lhandle_static_method:
+    movq $$0, %r14
+    LOOP_OVER_SHORTY_STORING_GPRS rsi, esi, r10, r14, rdi, rbx, .Lgpr_setup_finished
+.Lcontinue_setup_gprs:
+    LOOP_OVER_SHORTY_STORING_GPRS rdx, edx, r10, r14, rdi, rbx, .Lgpr_setup_finished
+    LOOP_OVER_SHORTY_STORING_GPRS rcx, ecx, r10, r14, rdi, rbx, .Lgpr_setup_finished
+    LOOP_OVER_SHORTY_STORING_GPRS r8, r8d, r10, r14, rdi, rbx, .Lgpr_setup_finished
+    LOOP_OVER_SHORTY_STORING_GPRS r9, r9d, r10, r14, rdi, rbx, .Lgpr_setup_finished
+    LOOP_OVER_INTs r10, r14, rdi, rbx, r11, .Lgpr_setup_finished
+.Lgpr_setup_finished:
+    leaq 1(%rbp), %r10  // shorty + 1  ; ie skip return arg character
+    movq $$0, %r14 // reset counter
+    LOOP_OVER_SHORTY_STORING_XMMS xmm0, r10, r14, rdi, .Lxmm_setup_finished
+    LOOP_OVER_SHORTY_STORING_XMMS xmm1, r10, r14, rdi, .Lxmm_setup_finished
+    LOOP_OVER_SHORTY_STORING_XMMS xmm2, r10, r14, rdi, .Lxmm_setup_finished
+    LOOP_OVER_SHORTY_STORING_XMMS xmm3, r10, r14, rdi, .Lxmm_setup_finished
+    LOOP_OVER_SHORTY_STORING_XMMS xmm4, r10, r14, rdi, .Lxmm_setup_finished
+    LOOP_OVER_SHORTY_STORING_XMMS xmm5, r10, r14, rdi, .Lxmm_setup_finished
+    LOOP_OVER_SHORTY_STORING_XMMS xmm6, r10, r14, rdi, .Lxmm_setup_finished
+    LOOP_OVER_SHORTY_STORING_XMMS xmm7, r10, r14, rdi, .Lxmm_setup_finished
+    LOOP_OVER_FPs r10, r14, rdi, r11, .Lxmm_setup_finished
+.Lxmm_setup_finished:
+    // Set the dex pc pointer.
+    addq $$CODE_ITEM_INSNS_OFFSET, rPC
+    CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
+
+    // Set rIBASE
+    leaq artNterpAsmInstructionStart(%rip), rIBASE
+    /* start executing the instruction at rPC */
+    START_EXECUTING_INSTRUCTIONS
+    /* NOTE: no fallthrough */
+    // cfi info continues, and covers the whole nterp implementation.
+    END ExecuteNterpImpl
+
+%def opcode_pre():
+
+%def helpers():
+
+%def footer():
+/*
+ * ===========================================================================
+ *  Common subroutines and data
+ * ===========================================================================
+ */
+
+    .text
+    .align  2
+
+// Note: mterp also uses the common_* names below for helpers, but that's OK
+// as the C compiler compiled each interpreter separately.
+common_errDivideByZero:
+    EXPORT_PC
+    call art_quick_throw_div_zero
+
+common_errArrayIndex:
+    EXPORT_PC
+    movl MIRROR_ARRAY_LENGTH_OFFSET(%edi), %eax
+    movl %esi, %edi
+    movl %eax, %esi
+    call art_quick_throw_array_bounds
+
+common_errNullObject:
+    EXPORT_PC
+    call art_quick_throw_null_pointer_exception
+
+NterpCommonInvokeStatic:
+    COMMON_INVOKE_NON_RANGE is_static=1, is_interface=0, suffix="invokeStatic"
+
+NterpCommonInvokeStaticRange:
+    COMMON_INVOKE_RANGE is_static=1, is_interface=0, suffix="invokeStatic"
+
+NterpCommonInvokeInstance:
+    COMMON_INVOKE_NON_RANGE is_static=0, is_interface=0, suffix="invokeInstance"
+
+NterpCommonInvokeInstanceRange:
+    COMMON_INVOKE_RANGE is_static=0, is_interface=0, suffix="invokeInstance"
+
+NterpCommonInvokeInterface:
+    COMMON_INVOKE_NON_RANGE is_static=0, is_interface=1, suffix="invokeInterface"
+
+NterpCommonInvokeInterfaceRange:
+    COMMON_INVOKE_RANGE is_static=0, is_interface=1, suffix="invokeInterface"
+
+NterpCommonInvokePolymorphic:
+    COMMON_INVOKE_NON_RANGE is_static=0, is_interface=0, is_string_init=0, is_polymorphic=1, suffix="invokePolymorphic"
+
+NterpCommonInvokePolymorphicRange:
+    COMMON_INVOKE_RANGE is_static=0, is_interface=0, is_polymorphic=1, suffix="invokePolymorphic"
+
+NterpCommonInvokeCustom:
+    COMMON_INVOKE_NON_RANGE is_static=1, is_interface=0, is_string_init=0, is_polymorphic=0, is_custom=1, suffix="invokeCustom"
+
+NterpCommonInvokeCustomRange:
+    COMMON_INVOKE_RANGE is_static=1, is_interface=0, is_polymorphic=0, is_custom=1, suffix="invokeCustom"
+
+NterpHandleStringInit:
+   COMMON_INVOKE_NON_RANGE is_static=0, is_interface=0, is_string_init=1, suffix="stringInit"
+
+NterpHandleStringInitRange:
+   COMMON_INVOKE_RANGE is_static=0, is_interface=0, is_string_init=1, suffix="stringInit"
+
+NterpNewInstance:
+   EXPORT_PC
+   // Fast-path which gets the class from thread-local cache.
+   FETCH_FROM_THREAD_CACHE %rdi, 2f
+   cmpq $$0, rSELF:THREAD_READ_BARRIER_MARK_REG00_OFFSET
+   jne 3f
+4:
+   callq *rSELF:THREAD_ALLOC_OBJECT_ENTRYPOINT_OFFSET
+1:
+   SET_VREG_OBJECT %eax, rINSTq            # fp[A] <- value
+   ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+   movq rSELF:THREAD_SELF_OFFSET, %rdi
+   movq 0(%rsp), %rsi
+   movq rPC, %rdx
+   call nterp_get_class_or_allocate_object
+   jmp 1b
+3:
+   // 07 is %rdi
+   call art_quick_read_barrier_mark_reg07
+   jmp 4b
+
+NterpNewArray:
+   /* new-array vA, vB, class@CCCC */
+   EXPORT_PC
+   // Fast-path which gets the class from thread-local cache.
+   FETCH_FROM_THREAD_CACHE %rdi, 2f
+   cmpq $$0, rSELF:THREAD_READ_BARRIER_MARK_REG00_OFFSET
+   jne 3f
+1:
+   movzbl  rINSTbl,%esi
+   sarl    $$4,%esi                          # esi<- B
+   GET_VREG %esi %rsi                        # esi<- vB (array length)
+   andb    $$0xf,rINSTbl                     # rINST<- A
+   callq *rSELF:THREAD_ALLOC_ARRAY_ENTRYPOINT_OFFSET
+   SET_VREG_OBJECT %eax, rINSTq            # fp[A] <- value
+   ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+   movq rSELF:THREAD_SELF_OFFSET, %rdi
+   movq 0(%rsp), %rsi
+   movq rPC, %rdx
+   call nterp_get_class_or_allocate_object
+   movq %rax, %rdi
+   jmp 1b
+3:
+   // 07 is %rdi
+   call art_quick_read_barrier_mark_reg07
+   jmp 1b
+
+NterpPutObjectInstanceField:
+   // Fast-path which gets the field from thread-local cache.
+   FETCH_FROM_THREAD_CACHE %rax, 2f
+1:
+   movzbq  rINSTbl, %rcx                   # rcx <- BA
+   sarl    $$4, %ecx                       # ecx <- B
+   GET_VREG %ecx, %rcx                     # vB (object we're operating on)
+   testl   %ecx, %ecx                      # is object null?
+   je      common_errNullObject
+   andb    $$0xf, rINSTbl                  # rINST <- A
+   GET_VREG rINST, rINSTq                  # rINST <- v[A]
+   movl rINST, (%rcx,%rax,1)
+   testl rINST, rINST
+   je 4f
+   movq rSELF:THREAD_CARD_TABLE_OFFSET, %rax
+   shrq $$CARD_TABLE_CARD_SHIFT, %rcx
+   movb %al, (%rax, %rcx, 1)
+4:
+   ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+   EXPORT_PC
+   movq rSELF:THREAD_SELF_OFFSET, %rdi
+   movq 0(%rsp), %rsi
+   movq rPC, %rdx
+   EXPORT_PC
+   call nterp_get_instance_field_offset
+   testl %eax, %eax
+   jns 1b
+   negl %eax
+   movzbq  rINSTbl, %rcx                   # rcx <- BA
+   sarl    $$4, %ecx                       # ecx <- B
+   GET_VREG %ecx, %rcx                     # vB (object we're operating on)
+   testl   %ecx, %ecx                      # is object null?
+   je      common_errNullObject
+   andb    $$0xf, rINSTbl                  # rINST <- A
+   GET_VREG rINST, rINSTq                  # rINST <- v[A]
+   movl rINST, (%rcx,%rax,1)
+   testl rINST, rINST
+   je 5f
+   movq rSELF:THREAD_CARD_TABLE_OFFSET, %rax
+   shrq $$CARD_TABLE_CARD_SHIFT, %rcx
+   movb %al, (%rcx, %rax, 1)
+5:
+   lock addl $$0, (%rsp)
+   ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+NterpGetObjectInstanceField:
+   // Fast-path which gets the field from thread-local cache.
+   FETCH_FROM_THREAD_CACHE %rax, 2f
+1:
+   movl    rINST, %ecx                     # rcx <- BA
+   sarl    $$4, %ecx                       # ecx <- B
+   GET_VREG %ecx, %rcx                     # vB (object we're operating on)
+   testl   %ecx, %ecx                      # is object null?
+   je      common_errNullObject
+   testb $$READ_BARRIER_TEST_VALUE, GRAY_BYTE_OFFSET(%ecx)
+   movl (%rcx,%rax,1), %eax
+   jnz 3f
+4:
+   andb    $$0xf,rINSTbl                   # rINST <- A
+   SET_VREG_OBJECT %eax, rINSTq            # fp[A] <- value
+   ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+   EXPORT_PC
+   movq rSELF:THREAD_SELF_OFFSET, %rdi
+   movq 0(%rsp), %rsi
+   movq rPC, %rdx
+   EXPORT_PC
+   call nterp_get_instance_field_offset
+   testl %eax, %eax
+   jns 1b
+   // For volatile fields, we return a negative offset. Remove the sign
+   // and no need for any barrier thanks to the memory model.
+   negl %eax
+   jmp 1b
+3:
+   // reg00 is eax
+   call art_quick_read_barrier_mark_reg00
+   jmp 4b
+
+NterpPutObjectStaticField:
+   // Fast-path which gets the field from thread-local cache.
+   FETCH_FROM_THREAD_CACHE %rax, 2f
+1:
+   movl ART_FIELD_OFFSET_OFFSET(%rax), %edx
+   movl ART_FIELD_DECLARING_CLASS_OFFSET(%rax), %eax
+   cmpq $$0, rSELF:THREAD_READ_BARRIER_MARK_REG00_OFFSET
+   jne 3f
+5:
+   GET_VREG %ecx, rINSTq
+   movl %ecx, (%eax, %edx, 1)
+   testl %ecx, %ecx
+   je 4f
+   movq rSELF:THREAD_CARD_TABLE_OFFSET, %rcx
+   shrq $$CARD_TABLE_CARD_SHIFT, %rax
+   movb %cl, (%rax, %rcx, 1)
+4:
+   ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+   movq rSELF:THREAD_SELF_OFFSET, %rdi
+   movq 0(%rsp), %rsi
+   movq rPC, %rdx
+   EXPORT_PC
+   call nterp_get_static_field
+   testq MACRO_LITERAL(1), %rax
+   je 1b
+   CLEAR_VOLATILE_MARKER %rax
+   movl ART_FIELD_OFFSET_OFFSET(%rax), %edx
+   movl ART_FIELD_DECLARING_CLASS_OFFSET(%rax), %eax
+   cmpq $$0, rSELF:THREAD_READ_BARRIER_MARK_REG00_OFFSET
+   jne 7f
+6:
+   movzbl rINSTbl, %ecx
+   GET_VREG %ecx, %rcx
+   movl %ecx, (%eax, %edx, 1)
+   testl %ecx, %ecx
+   je 8f
+   movq rSELF:THREAD_CARD_TABLE_OFFSET, %rcx
+   shrq $$CARD_TABLE_CARD_SHIFT, %rax
+   movb %cl, (%rax, %rcx, 1)
+8:
+   lock addl $$0, (%rsp)
+   ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+3:
+   call art_quick_read_barrier_mark_reg00
+   jmp 5b
+7:
+   call art_quick_read_barrier_mark_reg00
+   jmp 6b
+
+NterpGetObjectStaticField:
+   // Fast-path which gets the field from thread-local cache.
+   FETCH_FROM_THREAD_CACHE %rax, 2f
+1:
+   movl ART_FIELD_OFFSET_OFFSET(%rax), %edx
+   movl ART_FIELD_DECLARING_CLASS_OFFSET(%rax), %eax
+   cmpq $$0, rSELF:THREAD_READ_BARRIER_MARK_REG00_OFFSET
+   jne 5f
+6:
+   testb $$READ_BARRIER_TEST_VALUE, GRAY_BYTE_OFFSET(%eax)
+   movl (%eax, %edx, 1), %eax
+   jnz 3f
+4:
+   SET_VREG_OBJECT %eax, rINSTq            # fp[A] <- value
+   ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+   movq rSELF:THREAD_SELF_OFFSET, %rdi
+   movq 0(%rsp), %rsi
+   movq rPC, %rdx
+   EXPORT_PC
+   call nterp_get_static_field
+   andq $$-2, %rax
+   jmp 1b
+3:
+   call art_quick_read_barrier_mark_reg00
+   jmp 4b
+5:
+   call art_quick_read_barrier_mark_reg00
+   jmp 6b
+
+NterpGetBooleanStaticField:
+  OP_SGET load="movsbl", wide=0
+
+NterpGetByteStaticField:
+  OP_SGET load="movsbl", wide=0
+
+NterpGetCharStaticField:
+  OP_SGET load="movzwl", wide=0
+
+NterpGetShortStaticField:
+  OP_SGET load="movswl", wide=0
+
+NterpGetWideStaticField:
+  OP_SGET load="movq", wide=1
+
+NterpGetIntStaticField:
+  OP_SGET load="movl", wide=0
+
+NterpPutStaticField:
+  OP_SPUT rINST_reg=rINST, store="movl", wide=0
+
+NterpPutBooleanStaticField:
+NterpPutByteStaticField:
+  OP_SPUT rINST_reg=rINSTbl, store="movb", wide=0
+
+NterpPutCharStaticField:
+NterpPutShortStaticField:
+  OP_SPUT rINST_reg=rINSTw, store="movw", wide=0
+
+NterpPutWideStaticField:
+  OP_SPUT rINST_reg=rINSTq, store="movq", wide=1
+
+NterpPutInstanceField:
+  OP_IPUT rINST_reg=rINST, store="movl", wide=0
+
+NterpPutBooleanInstanceField:
+NterpPutByteInstanceField:
+  OP_IPUT rINST_reg=rINSTbl, store="movb", wide=0
+
+NterpPutCharInstanceField:
+NterpPutShortInstanceField:
+  OP_IPUT rINST_reg=rINSTw, store="movw", wide=0
+
+NterpPutWideInstanceField:
+  OP_IPUT rINST_reg=rINSTq, store="movq", wide=1
+
+NterpGetBooleanInstanceField:
+  OP_IGET load="movzbl", wide=0
+
+NterpGetByteInstanceField:
+  OP_IGET load="movsbl", wide=0
+
+NterpGetCharInstanceField:
+  OP_IGET load="movzwl", wide=0
+
+NterpGetShortInstanceField:
+  OP_IGET load="movswl", wide=0
+
+NterpGetWideInstanceField:
+  OP_IGET load="movq", wide=1
+
+NterpGetInstanceField:
+  OP_IGET load="movl", wide=0
+
+NterpInstanceOf:
+    /* instance-of vA, vB, class@CCCC */
+   // Fast-path which gets the class from thread-local cache.
+   EXPORT_PC
+   FETCH_FROM_THREAD_CACHE %rsi, 2f
+   cmpq $$0, rSELF:THREAD_READ_BARRIER_MARK_REG00_OFFSET
+   jne 5f
+1:
+   movzbl  rINSTbl,%edi
+   sarl    $$4,%edi                          # edi<- B
+   GET_VREG %edi %rdi                        # edi<- vB (object)
+   andb    $$0xf,rINSTbl                     # rINST<- A
+   testl %edi, %edi
+   je 3f
+   call art_quick_instance_of
+   SET_VREG %eax, rINSTq            # fp[A] <- value
+4:
+   ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+3:
+   SET_VREG %edi, rINSTq            # fp[A] <-0
+   jmp 4b
+2:
+   movq rSELF:THREAD_SELF_OFFSET, %rdi
+   movq 0(%rsp), %rsi
+   movq rPC, %rdx
+   call nterp_get_class_or_allocate_object
+   movq %rax, %rsi
+   jmp 1b
+5:
+   // 06 is %rsi
+   call art_quick_read_barrier_mark_reg06
+   jmp 1b
+
+NterpCheckCast:
+   // Fast-path which gets the class from thread-local cache.
+   EXPORT_PC
+   FETCH_FROM_THREAD_CACHE %rsi, 3f
+   cmpq $$0, rSELF:THREAD_READ_BARRIER_MARK_REG00_OFFSET
+   jne 4f
+1:
+   GET_VREG %edi, rINSTq
+   testl %edi, %edi
+   je 2f
+   call art_quick_check_instance_of
+2:
+   ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+3:
+   movq rSELF:THREAD_SELF_OFFSET, %rdi
+   movq 0(%rsp), %rsi
+   movq rPC, %rdx
+   call nterp_get_class_or_allocate_object
+   movq %rax, %rsi
+   jmp 1b
+4:
+   // 06 is %rsi
+   call art_quick_read_barrier_mark_reg06
+   jmp 1b
+
+NterpHandleHotnessOverflow:
+    leaq (rPC, rINSTq, 2), %rsi
+    movq rFP, %rdx
+    call nterp_hot_method
+    testq %rax, %rax
+    jne 1f
+    leaq    (rPC, rINSTq, 2), rPC
+    FETCH_INST
+    GOTO_NEXT
+1:
+    // Drop the current frame.
+    movq -8(rREFS), %rsp
+    CFI_DEF_CFA(rsp, CALLEE_SAVES_SIZE)
+
+    // Setup the new frame
+    movq OSR_DATA_FRAME_SIZE(%rax), %rcx
+    // Given stack size contains all callee saved registers, remove them.
+    subq $$CALLEE_SAVES_SIZE, %rcx
+
+    // Remember CFA.
+    movq %rsp, %rbp
+    CFI_DEF_CFA_REGISTER(rbp)
+
+    subq %rcx, %rsp
+    movq %rsp, %rdi               // rdi := beginning of stack
+    leaq OSR_DATA_MEMORY(%rax), %rsi  // rsi := memory to copy
+    rep movsb                     // while (rcx--) { *rdi++ = *rsi++ }
+
+    // Fetch the native PC to jump to and save it in a callee-save register.
+    movq OSR_DATA_NATIVE_PC(%rax), %rbx
+
+    // Free the memory holding OSR Data.
+    movq %rax, %rdi
+    call free
+
+    // Jump to the compiled code.
+    jmp *%rbx
+
+NterpHandleInvokeInterfaceOnObjectMethodRange:
+   // First argument is the 'this' pointer.
+   movzwl 4(rPC), %r11d // arguments
+   movl (rFP, %r11, 4), %esi
+   // Note: if esi is null, this will be handled by our SIGSEGV handler.
+   movl MIRROR_OBJECT_CLASS_OFFSET(%esi), %edx
+   movq MIRROR_CLASS_VTABLE_OFFSET_64(%edx, %eax, 8), %rdi
+   jmp NterpCommonInvokeInstanceRange
+
+NterpHandleInvokeInterfaceOnObjectMethod:
+   // First argument is the 'this' pointer.
+   movzwl 4(rPC), %r11d // arguments
+   andq MACRO_LITERAL(0xf), %r11
+   movl (rFP, %r11, 4), %esi
+   // Note: if esi is null, this will be handled by our SIGSEGV handler.
+   movl MIRROR_OBJECT_CLASS_OFFSET(%esi), %edx
+   movq MIRROR_CLASS_VTABLE_OFFSET_64(%edx, %eax, 8), %rdi
+   jmp NterpCommonInvokeInstance
+
+// This is the logical end of ExecuteNterpImpl, where the frame info applies.
+// EndExecuteNterpImpl includes the methods below as we want the runtime to
+// see them as part of the Nterp PCs.
+.cfi_endproc
+
+nterp_to_nterp_static_non_range:
+    .cfi_startproc
+    .cfi_def_cfa rsp, 8
+    SETUP_STACK_FOR_INVOKE
+    SETUP_NON_RANGE_ARGUMENTS_AND_EXECUTE is_static=1, is_string_init=0
+    .cfi_endproc
+
+nterp_to_nterp_string_init_non_range:
+    .cfi_startproc
+    .cfi_def_cfa rsp, 8
+    SETUP_STACK_FOR_INVOKE
+    SETUP_NON_RANGE_ARGUMENTS_AND_EXECUTE is_static=0, is_string_init=1
+    .cfi_endproc
+
+nterp_to_nterp_instance_non_range:
+    .cfi_startproc
+    .cfi_def_cfa rsp, 8
+    SETUP_STACK_FOR_INVOKE
+    SETUP_NON_RANGE_ARGUMENTS_AND_EXECUTE is_static=0, is_string_init=0
+    .cfi_endproc
+
+nterp_to_nterp_static_range:
+    .cfi_startproc
+    .cfi_def_cfa rsp, 8
+    SETUP_STACK_FOR_INVOKE
+    SETUP_RANGE_ARGUMENTS_AND_EXECUTE is_static=1
+    .cfi_endproc
+
+nterp_to_nterp_instance_range:
+    .cfi_startproc
+    .cfi_def_cfa rsp, 8
+    SETUP_STACK_FOR_INVOKE
+    SETUP_RANGE_ARGUMENTS_AND_EXECUTE is_static=0
+    .cfi_endproc
+
+nterp_to_nterp_string_init_range:
+    .cfi_startproc
+    .cfi_def_cfa rsp, 8
+    SETUP_STACK_FOR_INVOKE
+    SETUP_RANGE_ARGUMENTS_AND_EXECUTE is_static=0, is_string_init=1
+    .cfi_endproc
+
+// This is the end of PCs contained by the OatQuickMethodHeader created for the interpreter
+// entry point.
+    FUNCTION_TYPE(EndExecuteNterpImpl)
+    ASM_HIDDEN SYMBOL(EndExecuteNterpImpl)
+    .global SYMBOL(EndExecuteNterpImpl)
+SYMBOL(EndExecuteNterpImpl):
+
+// Entrypoints into runtime.
+NTERP_TRAMPOLINE nterp_get_static_field, NterpGetStaticField
+NTERP_TRAMPOLINE nterp_get_instance_field_offset, NterpGetInstanceFieldOffset
+NTERP_TRAMPOLINE nterp_filled_new_array, NterpFilledNewArray
+NTERP_TRAMPOLINE nterp_filled_new_array_range, NterpFilledNewArrayRange
+NTERP_TRAMPOLINE nterp_get_class_or_allocate_object, NterpGetClassOrAllocateObject
+NTERP_TRAMPOLINE nterp_get_method, NterpGetMethod
+NTERP_TRAMPOLINE nterp_hot_method, NterpHotMethod
+NTERP_TRAMPOLINE nterp_load_object, NterpLoadObject
+
+// gen_mterp.py will inline the following definitions
+// within [ExecuteNterpImpl, EndExecuteNterpImpl).
+%def instruction_end():
+
+    FUNCTION_TYPE(artNterpAsmInstructionEnd)
+    ASM_HIDDEN SYMBOL(artNterpAsmInstructionEnd)
+    .global SYMBOL(artNterpAsmInstructionEnd)
+SYMBOL(artNterpAsmInstructionEnd):
+    // artNterpAsmInstructionEnd is used as landing pad for exception handling.
+    FETCH_INST
+    GOTO_NEXT
+
+%def instruction_start():
+
+    FUNCTION_TYPE(artNterpAsmInstructionStart)
+    ASM_HIDDEN SYMBOL(artNterpAsmInstructionStart)
+    .global SYMBOL(artNterpAsmInstructionStart)
+SYMBOL(artNterpAsmInstructionStart) = .L_op_nop
+    .text
+
+%def opcode_start():
+    ENTRY nterp_${opcode}
+%def opcode_end():
+    END nterp_${opcode}
+%def helper_start(name):
+    ENTRY ${name}
+%def helper_end(name):
+    END ${name}
diff --git a/runtime/interpreter/mterp/x86_64ng/object.S b/runtime/interpreter/mterp/x86_64ng/object.S
new file mode 100644
index 0000000..cb231e3
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64ng/object.S
@@ -0,0 +1,204 @@
+%def op_check_cast():
+  jmp NterpCheckCast
+
+%def op_iget_boolean():
+   jmp NterpGetBooleanInstanceField
+
+%def op_iget_boolean_quick():
+%  op_iget_quick(load="movsbl")
+
+%def op_iget_byte():
+   jmp NterpGetByteInstanceField
+
+%def op_iget_byte_quick():
+%  op_iget_quick(load="movsbl")
+
+%def op_iget_char():
+   jmp NterpGetCharInstanceField
+
+%def op_iget_char_quick():
+%  op_iget_quick(load="movzwl")
+
+%def op_iget_object():
+    jmp NterpGetObjectInstanceField
+
+%def op_iget_object_quick():
+   movzwq  2(rPC), %rax                    # eax <- field byte offset
+   movl    rINST, %ecx                     # rcx <- BA
+   sarl    $$4, %ecx                       # ecx <- B
+   GET_VREG %ecx, %rcx                     # vB (object we're operating on)
+   testl   %ecx, %ecx                      # is object null?
+   je      common_errNullObject
+   testb $$READ_BARRIER_TEST_VALUE, GRAY_BYTE_OFFSET(%ecx)
+   movl (%rcx,%rax,1), %eax
+   jnz 2f
+1:
+   andb    $$0xf,rINSTbl                   # rINST <- A
+   SET_VREG_OBJECT %eax, rINSTq            # fp[A] <- value
+   ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+2:
+   // reg00 is eax
+   call art_quick_read_barrier_mark_reg00
+   jmp 1b
+
+%def op_iget_quick(load="movl", wide="0"):
+    /* For: iget-quick, iget-boolean-quick, iget-byte-quick, iget-char-quick, iget-short-quick, iget-wide-quick */
+    /* op vA, vB, offset@CCCC */
+    movl    rINST, %ecx                     # rcx <- BA
+    sarl    $$4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
+    movzwq  2(rPC), %rax                    # eax <- field byte offset
+    testl   %ecx, %ecx                      # is object null?
+    je      common_errNullObject
+    andb    $$0xf,rINSTbl                   # rINST <- A
+    .if $wide
+    movq (%rcx,%rax,1), %rax
+    SET_WIDE_VREG %rax, rINSTq              # fp[A] <- value
+    .else
+    ${load} (%rcx,%rax,1), %eax
+    SET_VREG %eax, rINSTq                   # fp[A] <- value
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_iget_short():
+   jmp NterpGetShortInstanceField
+
+%def op_iget_short_quick():
+%  op_iget_quick(load="movswl")
+
+%def op_iget_wide():
+   jmp NterpGetWideInstanceField
+
+%def op_iget_wide_quick():
+%  op_iget_quick(load="movq", wide="1")
+
+%def op_instance_of():
+   jmp NterpInstanceOf
+
+%def op_iget():
+   jmp NterpGetInstanceField
+
+%def op_iput():
+   jmp NterpPutInstanceField
+
+%def op_iput_boolean():
+   jmp NterpPutBooleanInstanceField
+
+%def op_iput_boolean_quick():
+%  op_iput_quick(reg="rINSTbl", store="movb")
+
+%def op_iput_byte():
+   jmp NterpPutByteInstanceField
+
+%def op_iput_byte_quick():
+%  op_iput_quick(reg="rINSTbl", store="movb")
+
+%def op_iput_char():
+   jmp NterpPutCharInstanceField
+
+%def op_iput_char_quick():
+%  op_iput_quick(reg="rINSTw", store="movw")
+
+%def op_iput_object():
+    jmp NterpPutObjectInstanceField
+
+%def op_iput_object_quick():
+   movzwq  2(rPC), %rax                    # eax <- field byte offset
+   movzbq  rINSTbl, %rcx                   # rcx <- BA
+   sarl    $$4, %ecx                       # ecx <- B
+   GET_VREG %ecx, %rcx                     # vB (object we're operating on)
+   testl   %ecx, %ecx                      # is object null?
+   je      common_errNullObject
+   andb    $$0xf, rINSTbl                  # rINST <- A
+   GET_VREG rINST, rINSTq                  # rINST <- v[A]
+   movl rINST, (%rcx,%rax,1)
+   testl rINST, rINST
+   je 1f
+   movq rSELF:THREAD_CARD_TABLE_OFFSET, %rax
+   shrq $$CARD_TABLE_CARD_SHIFT, %rcx
+   movb %al, (%rcx, %rax, 1)
+1:
+   ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_iput_quick(reg="rINST", store="movl"):
+    /* For: iput-quick, iput-object-quick */
+    /* op vA, vB, offset@CCCC */
+    movzbq  rINSTbl, %rcx                   # rcx <- BA
+    sarl    $$4, %ecx                       # ecx <- B
+    GET_VREG %ecx, %rcx                     # vB (object we're operating on)
+    testl   %ecx, %ecx                      # is object null?
+    je      common_errNullObject
+    andb    $$0xf, rINSTbl                  # rINST <- A
+    GET_VREG rINST, rINSTq                  # rINST <- v[A]
+    movzwq  2(rPC), %rax                    # rax <- field byte offset
+    ${store}    ${reg}, (%rcx,%rax,1)
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_iput_short():
+   jmp NterpPutShortInstanceField
+
+%def op_iput_short_quick():
+%  op_iput_quick(reg="rINSTw", store="movw")
+
+%def op_iput_wide():
+   jmp NterpPutWideInstanceField
+
+%def op_iput_wide_quick():
+    /* iput-wide-quick vA, vB, offset@CCCC */
+    movzbq    rINSTbl, %rcx                 # rcx<- BA
+    sarl      $$4, %ecx                     # ecx<- B
+    GET_VREG  %ecx, %rcx                    # vB (object we're operating on)
+    testl     %ecx, %ecx                    # is object null?
+    je        common_errNullObject
+    movzwq    2(rPC), %rax                  # rax<- field byte offset
+    leaq      (%rcx,%rax,1), %rcx           # ecx<- Address of 64-bit target
+    andb      $$0xf, rINSTbl                # rINST<- A
+    GET_WIDE_VREG %rax, rINSTq              # rax<- fp[A]/fp[A+1]
+    movq      %rax, (%rcx)                  # obj.field<- r0/r1
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_sget(load="movl", wide="0"):
+   jmp NterpGetIntStaticField
+
+%def op_sget_boolean():
+   jmp NterpGetBooleanStaticField
+
+%def op_sget_byte():
+   jmp NterpGetByteStaticField
+
+%def op_sget_char():
+   jmp NterpGetCharStaticField
+
+%def op_sget_object():
+   jmp NterpGetObjectStaticField
+
+%def op_sget_short():
+   jmp NterpGetShortStaticField
+
+%def op_sget_wide():
+   jmp NterpGetWideStaticField
+
+%def op_sput():
+   jmp NterpPutStaticField
+
+%def op_sput_boolean():
+   jmp NterpPutBooleanStaticField
+
+%def op_sput_byte():
+   jmp NterpPutByteStaticField
+
+%def op_sput_char():
+   jmp NterpPutCharStaticField
+
+%def op_sput_object():
+   jmp NterpPutObjectStaticField
+
+%def op_sput_short():
+   jmp NterpPutShortStaticField
+
+%def op_sput_wide():
+   jmp NterpPutWideStaticField
+
+%def op_new_instance():
+   // The routine is too big to fit in a handler, so jump to it.
+   jmp NterpNewInstance
diff --git a/runtime/interpreter/mterp/x86_64ng/other.S b/runtime/interpreter/mterp/x86_64ng/other.S
new file mode 100644
index 0000000..7d82c3b
--- /dev/null
+++ b/runtime/interpreter/mterp/x86_64ng/other.S
@@ -0,0 +1,273 @@
+%def unused():
+    int3
+
+%def op_const():
+    /* const vAA, #+BBBBbbbb */
+    movl    2(rPC), %eax                    # grab all 32 bits at once
+    SET_VREG %eax, rINSTq                   # vAA<- eax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_const_16():
+    /* const/16 vAA, #+BBBB */
+    movswl  2(rPC), %ecx                    # ecx <- ssssBBBB
+    SET_VREG %ecx, rINSTq                   # vAA <- ssssBBBB
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_const_4():
+    /* const/4 vA, #+B */
+    movsbl  rINSTbl, %eax                   # eax <-ssssssBx
+    andl    MACRO_LITERAL(0xf), rINST       # rINST <- A
+    sarl    MACRO_LITERAL(4), %eax
+    SET_VREG %eax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_const_high16():
+    /* const/high16 vAA, #+BBBB0000 */
+    movzwl  2(rPC), %eax                    # eax <- 0000BBBB
+    sall    MACRO_LITERAL(16), %eax         # eax <- BBBB0000
+    SET_VREG %eax, rINSTq                   # vAA <- eax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_const_object(jumbo="0", helper="nterp_load_object"):
+   // Fast-path which gets the object from thread-local cache.
+   FETCH_FROM_THREAD_CACHE %rax, 2f
+   cmpq MACRO_LITERAL(0), rSELF:THREAD_READ_BARRIER_MARK_REG00_OFFSET
+   jne 3f
+1:
+   SET_VREG_OBJECT %eax, rINSTq            # vAA <- value
+   .if $jumbo
+   ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+   .else
+   ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+   .endif
+2:
+   EXPORT_PC
+   movq rSELF:THREAD_SELF_OFFSET, %rdi
+   movq 0(%rsp), %rsi
+   movq rPC, %rdx
+   call SYMBOL($helper)
+   jmp 1b
+3:
+   // 00 is %rax
+   call art_quick_read_barrier_mark_reg00
+   jmp 1b
+
+%def op_const_class():
+%  op_const_object(jumbo="0", helper="nterp_get_class_or_allocate_object")
+
+%def op_const_method_handle():
+%  op_const_object(jumbo="0")
+
+%def op_const_method_type():
+%  op_const_object(jumbo="0")
+
+%def op_const_string():
+   /* const/string vAA, String@BBBB */
+%  op_const_object(jumbo="0")
+
+%def op_const_string_jumbo():
+   /* const/string vAA, String@BBBBBBBB */
+%  op_const_object(jumbo="1")
+
+%def op_const_wide():
+    /* const-wide vAA, #+HHHHhhhhBBBBbbbb */
+    movq    2(rPC), %rax                    # rax <- HHHHhhhhBBBBbbbb
+    SET_WIDE_VREG %rax, rINSTq
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 5
+
+%def op_const_wide_16():
+    /* const-wide/16 vAA, #+BBBB */
+    movswq  2(rPC), %rax                    # rax <- ssssssssssssBBBB
+    SET_WIDE_VREG %rax, rINSTq              # store
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_const_wide_32():
+    /* const-wide/32 vAA, #+BBBBbbbb */
+    movslq   2(rPC), %rax                   # eax <- ssssssssBBBBbbbb
+    SET_WIDE_VREG %rax, rINSTq              # store
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_const_wide_high16():
+    /* const-wide/high16 vAA, #+BBBB000000000000 */
+    movzwq  2(rPC), %rax                    # eax <- 000000000000BBBB
+    salq    $$48, %rax                      # eax <- 00000000BBBB0000
+    SET_WIDE_VREG %rax, rINSTq              # v[AA+0] <- eax
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_monitor_enter():
+/*
+ * Synchronize on an object.
+ */
+    /* monitor-enter vAA */
+    EXPORT_PC
+    GET_VREG %edi, rINSTq
+    call art_quick_lock_object
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_monitor_exit():
+/*
+ * Unlock an object.
+ *
+ * Exceptions that occur when unlocking a monitor need to appear as
+ * if they happened at the following instruction.  See the Dalvik
+ * instruction spec.
+ */
+    /* monitor-exit vAA */
+    EXPORT_PC
+    GET_VREG %edi, rINSTq
+    call art_quick_unlock_object
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move(is_object="0"):
+    /* for move, move-object, long-to-int */
+    /* op vA, vB */
+    movl    rINST, %eax                     # eax <- BA
+    andb    $$0xf, %al                      # eax <- A
+    shrl    $$4, rINST                      # rINST <- B
+    GET_VREG %edx, rINSTq
+    .if $is_object
+    SET_VREG_OBJECT %edx, %rax              # fp[A] <- fp[B]
+    .else
+    SET_VREG %edx, %rax                     # fp[A] <- fp[B]
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move_16(is_object="0"):
+    /* for: move/16, move-object/16 */
+    /* op vAAAA, vBBBB */
+    movzwq  4(rPC), %rcx                    # ecx <- BBBB
+    movzwq  2(rPC), %rax                    # eax <- AAAA
+    GET_VREG %edx, %rcx
+    .if $is_object
+    SET_VREG_OBJECT %edx, %rax              # fp[A] <- fp[B]
+    .else
+    SET_VREG %edx, %rax                     # fp[A] <- fp[B]
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_move_exception():
+    /* move-exception vAA */
+    movl    rSELF:THREAD_EXCEPTION_OFFSET, %eax
+    SET_VREG_OBJECT %eax, rINSTq            # fp[AA] <- exception object
+    movl    $$0, rSELF:THREAD_EXCEPTION_OFFSET
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move_from16(is_object="0"):
+    /* for: move/from16, move-object/from16 */
+    /* op vAA, vBBBB */
+    movzwq  2(rPC), %rax                    # eax <- BBBB
+    GET_VREG %edx, %rax                     # edx <- fp[BBBB]
+    .if $is_object
+    SET_VREG_OBJECT %edx, rINSTq            # fp[A] <- fp[B]
+    .else
+    SET_VREG %edx, rINSTq                   # fp[A] <- fp[B]
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_move_object():
+%  op_move(is_object="1")
+
+%def op_move_object_16():
+%  op_move_16(is_object="1")
+
+%def op_move_object_from16():
+%  op_move_from16(is_object="1")
+
+%def op_move_result(is_object="0"):
+    /* for: move-result, move-result-object */
+    /* op vAA */
+    .if $is_object
+    SET_VREG_OBJECT %eax, rINSTq            # fp[A] <- fp[B]
+    .else
+    SET_VREG %eax, rINSTq                   # fp[A] <- fp[B]
+    .endif
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move_result_object():
+%  op_move_result(is_object="1")
+
+%def op_move_result_wide():
+    /* move-result-wide vAA */
+    SET_WIDE_VREG %rax, rINSTq                   # v[AA] <- rdx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move_wide():
+    /* move-wide vA, vB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    movl    rINST, %ecx                     # ecx <- BA
+    sarl    $$4, rINST                      # rINST <- B
+    andb    $$0xf, %cl                      # ecx <- A
+    GET_WIDE_VREG %rdx, rINSTq              # rdx <- v[B]
+    SET_WIDE_VREG %rdx, %rcx                # v[A] <- rdx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_move_wide_16():
+    /* move-wide/16 vAAAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    movzwq  4(rPC), %rcx                    # ecx<- BBBB
+    movzwq  2(rPC), %rax                    # eax<- AAAA
+    GET_WIDE_VREG %rdx, %rcx                # rdx <- v[B]
+    SET_WIDE_VREG %rdx, %rax                # v[A] <- rdx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 3
+
+%def op_move_wide_from16():
+    /* move-wide/from16 vAA, vBBBB */
+    /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */
+    movzwl  2(rPC), %ecx                    # ecx <- BBBB
+    GET_WIDE_VREG %rdx, %rcx                # rdx <- v[B]
+    SET_WIDE_VREG %rdx, rINSTq              # v[A] <- rdx
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
+
+%def op_nop():
+    ADVANCE_PC_FETCH_AND_GOTO_NEXT 1
+
+%def op_unused_3e():
+%  unused()
+
+%def op_unused_3f():
+%  unused()
+
+%def op_unused_40():
+%  unused()
+
+%def op_unused_41():
+%  unused()
+
+%def op_unused_42():
+%  unused()
+
+%def op_unused_43():
+%  unused()
+
+%def op_unused_79():
+%  unused()
+
+%def op_unused_7a():
+%  unused()
+
+%def op_unused_f3():
+%  unused()
+
+%def op_unused_f4():
+%  unused()
+
+%def op_unused_f5():
+%  unused()
+
+%def op_unused_f6():
+%  unused()
+
+%def op_unused_f7():
+%  unused()
+
+%def op_unused_f8():
+%  unused()
+
+%def op_unused_f9():
+%  unused()
+
+%def op_unused_fc():
+%  unused()
+
+%def op_unused_fd():
+%  unused()
diff --git a/runtime/interpreter/shadow_frame-inl.h b/runtime/interpreter/shadow_frame-inl.h
index 7eaad59..799b2d2 100644
--- a/runtime/interpreter/shadow_frame-inl.h
+++ b/runtime/interpreter/shadow_frame-inl.h
@@ -33,9 +33,7 @@
   ReadBarrier::MaybeAssertToSpaceInvariant(val.Ptr());
   uint32_t* vreg = &vregs_[i];
   reinterpret_cast<StackReference<mirror::Object>*>(vreg)->Assign(val);
-  if (HasReferenceArray()) {
-    References()[i].Assign(val);
-  }
+  References()[i].Assign(val);
 }
 
 }  // namespace art
diff --git a/runtime/interpreter/shadow_frame.h b/runtime/interpreter/shadow_frame.h
index 3f6b729..8cb2b33 100644
--- a/runtime/interpreter/shadow_frame.h
+++ b/runtime/interpreter/shadow_frame.h
@@ -57,6 +57,11 @@
     kForcePopFrame  = 1 << 1,
     // We have been asked to re-execute the last instruction.
     kForceRetryInst = 1 << 2,
+    // Mark that we expect the next frame to retry the last instruction (used by instrumentation and
+    // debuggers to keep track of required events)
+    kSkipMethodExitEvents = 1 << 3,
+    // Used to suppress exception events caused by other instrumentation events.
+    kSkipNextExceptionEvent = 1 << 4,
   };
 
  public:
@@ -92,12 +97,6 @@
 
   ~ShadowFrame() {}
 
-  // TODO(iam): Clean references array up since they're always there,
-  // we don't need to do conditionals.
-  bool HasReferenceArray() const {
-    return true;
-  }
-
   uint32_t NumberOfVRegs() const {
     return number_of_vregs_;
   }
@@ -152,7 +151,6 @@
   }
 
   uint32_t* GetShadowRefAddr(size_t i) {
-    DCHECK(HasReferenceArray());
     DCHECK_LT(i, NumberOfVRegs());
     return &vregs_[i + NumberOfVRegs()];
   }
@@ -189,7 +187,6 @@
   mirror::Object* GetVRegReference(size_t i) const REQUIRES_SHARED(Locks::mutator_lock_) {
     DCHECK_LT(i, NumberOfVRegs());
     mirror::Object* ref;
-    DCHECK(HasReferenceArray());
     ref = References()[i].AsMirrorPtr();
     ReadBarrier::MaybeAssertToSpaceInvariant(ref);
     if (kVerifyFlags & kVerifyReads) {
@@ -209,9 +206,7 @@
     *reinterpret_cast<int32_t*>(vreg) = val;
     // This is needed for moving collectors since these can update the vreg references if they
     // happen to agree with references in the reference array.
-    if (kMovingCollector && HasReferenceArray()) {
-      References()[i].Clear();
-    }
+    References()[i].Clear();
   }
 
   void SetVRegFloat(size_t i, float val) {
@@ -220,9 +215,7 @@
     *reinterpret_cast<float*>(vreg) = val;
     // This is needed for moving collectors since these can update the vreg references if they
     // happen to agree with references in the reference array.
-    if (kMovingCollector && HasReferenceArray()) {
-      References()[i].Clear();
-    }
+    References()[i].Clear();
   }
 
   void SetVRegLong(size_t i, int64_t val) {
@@ -232,10 +225,8 @@
     *reinterpret_cast<unaligned_int64*>(vreg) = val;
     // This is needed for moving collectors since these can update the vreg references if they
     // happen to agree with references in the reference array.
-    if (kMovingCollector && HasReferenceArray()) {
-      References()[i].Clear();
-      References()[i + 1].Clear();
-    }
+    References()[i].Clear();
+    References()[i + 1].Clear();
   }
 
   void SetVRegDouble(size_t i, double val) {
@@ -245,10 +236,8 @@
     *reinterpret_cast<unaligned_double*>(vreg) = val;
     // This is needed for moving collectors since these can update the vreg references if they
     // happen to agree with references in the reference array.
-    if (kMovingCollector && HasReferenceArray()) {
-      References()[i].Clear();
-      References()[i + 1].Clear();
-    }
+    References()[i].Clear();
+    References()[i + 1].Clear();
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -271,14 +260,8 @@
   mirror::Object* GetThisObject(uint16_t num_ins) const REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool Contains(StackReference<mirror::Object>* shadow_frame_entry_obj) const {
-    if (HasReferenceArray()) {
-      return ((&References()[0] <= shadow_frame_entry_obj) &&
-              (shadow_frame_entry_obj <= (&References()[NumberOfVRegs() - 1])));
-    } else {
-      uint32_t* shadow_frame_entry = reinterpret_cast<uint32_t*>(shadow_frame_entry_obj);
-      return ((&vregs_[0] <= shadow_frame_entry) &&
-              (shadow_frame_entry <= (&vregs_[NumberOfVRegs() - 1])));
-    }
+    return ((&References()[0] <= shadow_frame_entry_obj) &&
+            (shadow_frame_entry_obj <= (&References()[NumberOfVRegs() - 1])));
   }
 
   LockCountData& GetLockCountData() {
@@ -335,7 +318,7 @@
                                             ArtMethod* method,
                                             uint32_t dex_pc,
                                             void* memory) {
-    return new (memory) ShadowFrame(num_vregs, link, method, dex_pc, true);
+    return new (memory) ShadowFrame(num_vregs, link, method, dex_pc);
   }
 
   const uint16_t* GetDexPCPtr() {
@@ -374,6 +357,22 @@
     UpdateFrameFlag(enable, FrameFlags::kForceRetryInst);
   }
 
+  bool GetSkipMethodExitEvents() const {
+    return GetFrameFlag(FrameFlags::kSkipMethodExitEvents);
+  }
+
+  void SetSkipMethodExitEvents(bool enable) {
+    UpdateFrameFlag(enable, FrameFlags::kSkipMethodExitEvents);
+  }
+
+  bool GetSkipNextExceptionEvent() const {
+    return GetFrameFlag(FrameFlags::kSkipNextExceptionEvent);
+  }
+
+  void SetSkipNextExceptionEvent(bool enable) {
+    UpdateFrameFlag(enable, FrameFlags::kSkipNextExceptionEvent);
+  }
+
   void CheckConsistentVRegs() const {
     if (kIsDebugBuild) {
       // A shadow frame visible to GC requires the following rule: for a given vreg,
@@ -386,8 +385,7 @@
   }
 
  private:
-  ShadowFrame(uint32_t num_vregs, ShadowFrame* link, ArtMethod* method,
-              uint32_t dex_pc, bool has_reference_array)
+  ShadowFrame(uint32_t num_vregs, ShadowFrame* link, ArtMethod* method, uint32_t dex_pc)
       : link_(link),
         method_(method),
         result_register_(nullptr),
@@ -398,13 +396,7 @@
         cached_hotness_countdown_(0),
         hotness_countdown_(0),
         frame_flags_(0) {
-    // TODO(iam): Remove this parameter, it's an an artifact of portable removal
-    DCHECK(has_reference_array);
-    if (has_reference_array) {
-      memset(vregs_, 0, num_vregs * (sizeof(uint32_t) + sizeof(StackReference<mirror::Object>)));
-    } else {
-      memset(vregs_, 0, num_vregs * sizeof(uint32_t));
-    }
+    memset(vregs_, 0, num_vregs * (sizeof(uint32_t) + sizeof(StackReference<mirror::Object>)));
   }
 
   void UpdateFrameFlag(bool enable, FrameFlags flag) {
@@ -420,7 +412,6 @@
   }
 
   const StackReference<mirror::Object>* References() const {
-    DCHECK(HasReferenceArray());
     const uint32_t* vreg_end = &vregs_[NumberOfVRegs()];
     return reinterpret_cast<const StackReference<mirror::Object>*>(vreg_end);
   }
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 9b905ee..5986982 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -839,6 +839,11 @@
     return;
   }
 
+  if (Runtime::Current()->IsActiveTransaction() && !CheckWriteConstraint(self, dst_obj)) {
+    DCHECK(self->IsExceptionPending());
+    return;
+  }
+
   // Type checking.
   ObjPtr<mirror::Class> src_type = shadow_frame->GetVRegReference(arg_offset)->GetClass()->
       GetComponentType();
@@ -1358,7 +1363,8 @@
       hs.NewHandle(shadow_frame->GetVRegReference(arg_offset + 2)->AsCharArray()));
   Runtime* runtime = Runtime::Current();
   gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentAllocator();
-  result->SetL(mirror::String::AllocFromCharArray<true>(self, char_count, h_char_array, offset, allocator));
+  result->SetL(
+      mirror::String::AllocFromCharArray(self, char_count, h_char_array, offset, allocator));
 }
 
 // This allows creating the new style of String objects during compilation.
@@ -1373,8 +1379,8 @@
   Handle<mirror::String> h_string(hs.NewHandle(to_copy));
   Runtime* runtime = Runtime::Current();
   gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentAllocator();
-  result->SetL(mirror::String::AllocFromString<true>(self, h_string->GetLength(), h_string, 0,
-                                                     allocator));
+  result->SetL(
+      mirror::String::AllocFromString(self, h_string->GetLength(), h_string, 0, allocator));
 }
 
 void UnstartedRuntime::UnstartedStringFastSubstring(
@@ -1390,19 +1396,21 @@
   DCHECK_LE(start + length, h_string->GetLength());
   Runtime* runtime = Runtime::Current();
   gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentAllocator();
-  result->SetL(mirror::String::AllocFromString<true>(self, length, h_string, start, allocator));
+  result->SetL(mirror::String::AllocFromString(self, length, h_string, start, allocator));
 }
 
 // This allows getting the char array for new style of String objects during compilation.
 void UnstartedRuntime::UnstartedStringToCharArray(
     Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjPtr<mirror::String> string = shadow_frame->GetVRegReference(arg_offset)->AsString();
+  StackHandleScope<1> hs(self);
+  Handle<mirror::String> string =
+      hs.NewHandle(shadow_frame->GetVRegReference(arg_offset)->AsString());
   if (string == nullptr) {
     AbortTransactionOrFail(self, "String.charAt with null object");
     return;
   }
-  result->SetL(string->ToCharArray(self));
+  result->SetL(mirror::String::ToCharArray(string, self));
 }
 
 // This allows statically initializing ConcurrentHashMap and SynchronousQueue.
@@ -1458,6 +1466,10 @@
   bool success;
   // Check whether we're in a transaction, call accordingly.
   if (Runtime::Current()->IsActiveTransaction()) {
+    if (!CheckWriteConstraint(self, obj)) {
+      DCHECK(self->IsExceptionPending());
+      return;
+    }
     success = obj->CasFieldStrongSequentiallyConsistent64<true>(MemberOffset(offset),
                                                                 expectedValue,
                                                                 newValue);
@@ -1479,7 +1491,7 @@
   }
   int64_t offset = shadow_frame->GetVRegLong(arg_offset + 2);
   mirror::Object* expected_value = shadow_frame->GetVRegReference(arg_offset + 4);
-  mirror::Object* newValue = shadow_frame->GetVRegReference(arg_offset + 5);
+  mirror::Object* new_value = shadow_frame->GetVRegReference(arg_offset + 5);
 
   // Must use non transactional mode.
   if (kUseReadBarrier) {
@@ -1500,15 +1512,19 @@
   bool success;
   // Check whether we're in a transaction, call accordingly.
   if (Runtime::Current()->IsActiveTransaction()) {
+    if (!CheckWriteConstraint(self, obj) || !CheckWriteValueConstraint(self, new_value)) {
+      DCHECK(self->IsExceptionPending());
+      return;
+    }
     success = obj->CasFieldObject<true>(MemberOffset(offset),
                                         expected_value,
-                                        newValue,
+                                        new_value,
                                         CASMode::kStrong,
                                         std::memory_order_seq_cst);
   } else {
     success = obj->CasFieldObject<false>(MemberOffset(offset),
                                          expected_value,
-                                         newValue,
+                                         new_value,
                                          CASMode::kStrong,
                                          std::memory_order_seq_cst);
   }
@@ -1541,6 +1557,10 @@
   int64_t offset = shadow_frame->GetVRegLong(arg_offset + 2);
   mirror::Object* value = shadow_frame->GetVRegReference(arg_offset + 4);
   if (Runtime::Current()->IsActiveTransaction()) {
+    if (!CheckWriteConstraint(self, obj) || !CheckWriteValueConstraint(self, value)) {
+      DCHECK(self->IsExceptionPending());
+      return;
+    }
     obj->SetFieldObjectVolatile<true>(MemberOffset(offset), value);
   } else {
     obj->SetFieldObjectVolatile<false>(MemberOffset(offset), value);
@@ -1557,12 +1577,16 @@
     return;
   }
   int64_t offset = shadow_frame->GetVRegLong(arg_offset + 2);
-  mirror::Object* newValue = shadow_frame->GetVRegReference(arg_offset + 4);
+  mirror::Object* new_value = shadow_frame->GetVRegReference(arg_offset + 4);
   std::atomic_thread_fence(std::memory_order_release);
   if (Runtime::Current()->IsActiveTransaction()) {
-    obj->SetFieldObject<true>(MemberOffset(offset), newValue);
+    if (!CheckWriteConstraint(self, obj) || !CheckWriteValueConstraint(self, new_value)) {
+      DCHECK(self->IsExceptionPending());
+      return;
+    }
+    obj->SetFieldObject<true>(MemberOffset(offset), new_value);
   } else {
-    obj->SetFieldObject<false>(MemberOffset(offset), newValue);
+    obj->SetFieldObject<false>(MemberOffset(offset), new_value);
   }
 }
 
@@ -1720,11 +1744,8 @@
       runtime->GetClassLinker()->FindArrayClass(self, element_class->AsClass());
   DCHECK(array_class != nullptr);
   gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentAllocator();
-  result->SetL(mirror::Array::Alloc<true, true>(self,
-                                                array_class,
-                                                length,
-                                                array_class->GetComponentSizeShift(),
-                                                allocator));
+  result->SetL(mirror::Array::Alloc</*kIsInstrumented=*/ true, /*kFillUsable=*/ true>(
+      self, array_class, length, array_class->GetComponentSizeShift(), allocator));
 }
 
 void UnstartedRuntime::UnstartedJNIVMStackGetCallingClassLoader(
@@ -1799,7 +1820,9 @@
 void UnstartedRuntime::UnstartedJNIObjectInternalClone(
     Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver,
     uint32_t* args ATTRIBUTE_UNUSED, JValue* result) {
-  result->SetL(receiver->Clone(self));
+  StackHandleScope<1> hs(self);
+  Handle<mirror::Object> h_receiver = hs.NewHandle(receiver);
+  result->SetL(mirror::Object::Clone(h_receiver, self));
 }
 
 void UnstartedRuntime::UnstartedJNIObjectNotifyAll(
@@ -1891,6 +1914,10 @@
   jint newValue = args[4];
   bool success;
   if (Runtime::Current()->IsActiveTransaction()) {
+    if (!CheckWriteConstraint(self, obj)) {
+      DCHECK(self->IsExceptionPending());
+      return;
+    }
     success = obj->CasField32<true>(MemberOffset(offset),
                                     expectedValue,
                                     newValue,
@@ -1932,11 +1959,15 @@
     return;
   }
   jlong offset = (static_cast<uint64_t>(args[2]) << 32) | args[1];
-  ObjPtr<mirror::Object> newValue = reinterpret_cast32<mirror::Object*>(args[3]);
+  ObjPtr<mirror::Object> new_value = reinterpret_cast32<mirror::Object*>(args[3]);
   if (Runtime::Current()->IsActiveTransaction()) {
-    obj->SetFieldObject<true>(MemberOffset(offset), newValue);
+    if (!CheckWriteConstraint(self, obj) || !CheckWriteValueConstraint(self, new_value)) {
+      DCHECK(self->IsExceptionPending());
+      return;
+    }
+    obj->SetFieldObject<true>(MemberOffset(offset), new_value);
   } else {
-    obj->SetFieldObject<false>(MemberOffset(offset), newValue);
+    obj->SetFieldObject<false>(MemberOffset(offset), new_value);
   }
 }
 
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index 495039c..4429f63 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -785,19 +785,19 @@
     {
       JValue result;
       tmp->SetVReg(0, static_cast<int32_t>(i));
-      Runtime::Current()->EnterTransactionMode();
+      EnterTransactionMode();
       UnstartedCharacterToLowerCase(self, tmp.get(), &result, 0);
-      ASSERT_TRUE(Runtime::Current()->IsTransactionAborted());
-      Runtime::Current()->ExitTransactionMode();
+      ASSERT_TRUE(IsTransactionAborted());
+      ExitTransactionMode();
       ASSERT_TRUE(self->IsExceptionPending());
     }
     {
       JValue result;
       tmp->SetVReg(0, static_cast<int32_t>(i));
-      Runtime::Current()->EnterTransactionMode();
+      EnterTransactionMode();
       UnstartedCharacterToUpperCase(self, tmp.get(), &result, 0);
-      ASSERT_TRUE(Runtime::Current()->IsTransactionAborted());
-      Runtime::Current()->ExitTransactionMode();
+      ASSERT_TRUE(IsTransactionAborted());
+      ExitTransactionMode();
       ASSERT_TRUE(self->IsExceptionPending());
     }
   }
@@ -805,19 +805,19 @@
     {
       JValue result;
       tmp->SetVReg(0, static_cast<int32_t>(i));
-      Runtime::Current()->EnterTransactionMode();
+      EnterTransactionMode();
       UnstartedCharacterToLowerCase(self, tmp.get(), &result, 0);
-      ASSERT_TRUE(Runtime::Current()->IsTransactionAborted());
-      Runtime::Current()->ExitTransactionMode();
+      ASSERT_TRUE(IsTransactionAborted());
+      ExitTransactionMode();
       ASSERT_TRUE(self->IsExceptionPending());
     }
     {
       JValue result;
       tmp->SetVReg(0, static_cast<int32_t>(i));
-      Runtime::Current()->EnterTransactionMode();
+      EnterTransactionMode();
       UnstartedCharacterToUpperCase(self, tmp.get(), &result, 0);
-      ASSERT_TRUE(Runtime::Current()->IsTransactionAborted());
-      Runtime::Current()->ExitTransactionMode();
+      ASSERT_TRUE(IsTransactionAborted());
+      ExitTransactionMode();
       ASSERT_TRUE(self->IsExceptionPending());
     }
   }
@@ -980,10 +980,10 @@
     UniqueDeoptShadowFramePtr caller_frame = CreateShadowFrame(10, nullptr, caller_method, 0);
     shadow_frame->SetLink(caller_frame.get());
 
-    Runtime::Current()->EnterTransactionMode();
+    EnterTransactionMode();
     UnstartedThreadLocalGet(self, shadow_frame.get(), &result, 0);
-    ASSERT_TRUE(Runtime::Current()->IsTransactionAborted());
-    Runtime::Current()->ExitTransactionMode();
+    ASSERT_TRUE(IsTransactionAborted());
+    ExitTransactionMode();
     ASSERT_TRUE(self->IsExceptionPending());
     self->ClearException();
 
@@ -1050,10 +1050,10 @@
   PrepareForAborts();
 
   {
-    Runtime::Current()->EnterTransactionMode();
+    EnterTransactionMode();
     UnstartedThreadCurrentThread(self, shadow_frame.get(), &result, 0);
-    ASSERT_TRUE(Runtime::Current()->IsTransactionAborted());
-    Runtime::Current()->ExitTransactionMode();
+    ASSERT_TRUE(IsTransactionAborted());
+    ExitTransactionMode();
     ASSERT_TRUE(self->IsExceptionPending());
     self->ClearException();
   }
@@ -1120,7 +1120,7 @@
       CHECK(name_string != nullptr);
 
       if (in_transaction) {
-        Runtime::Current()->EnterTransactionMode();
+        EnterTransactionMode();
       }
       CHECK(!self->IsExceptionPending());
 
@@ -1132,13 +1132,13 @@
       } else {
         CHECK(self->IsExceptionPending()) << name;
         if (in_transaction) {
-          ASSERT_TRUE(Runtime::Current()->IsTransactionAborted());
+          ASSERT_TRUE(IsTransactionAborted());
         }
         self->ClearException();
       }
 
       if (in_transaction) {
-        Runtime::Current()->ExitTransactionMode();
+        ExitTransactionMode();
       }
     }
   }
diff --git a/runtime/intrinsics_list.h b/runtime/intrinsics_list.h
index 57e81a7..fc4734e 100644
--- a/runtime/intrinsics_list.h
+++ b/runtime/intrinsics_list.h
@@ -165,6 +165,15 @@
   V(MemoryPokeIntNative, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kCanThrow, "Llibcore/io/Memory;", "pokeIntNative", "(JI)V") \
   V(MemoryPokeLongNative, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kCanThrow, "Llibcore/io/Memory;", "pokeLongNative", "(JJ)V") \
   V(MemoryPokeShortNative, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kCanThrow, "Llibcore/io/Memory;", "pokeShortNative", "(JS)V") \
+  V(FP16Ceil, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Llibcore/util/FP16;", "ceil", "(S)S") \
+  V(FP16Floor, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Llibcore/util/FP16;", "floor", "(S)S") \
+  V(FP16Rint, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Llibcore/util/FP16;", "rint", "(S)S") \
+  V(FP16ToFloat, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Llibcore/util/FP16;", "toFloat", "(S)F") \
+  V(FP16ToHalf, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Llibcore/util/FP16;", "toHalf", "(F)S") \
+  V(FP16Greater, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Llibcore/util/FP16;", "greater", "(SS)Z") \
+  V(FP16GreaterEquals, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Llibcore/util/FP16;", "greaterEquals", "(SS)Z") \
+  V(FP16Less, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Llibcore/util/FP16;", "less", "(SS)Z") \
+  V(FP16LessEquals, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Llibcore/util/FP16;", "lessEquals", "(SS)Z") \
   V(StringCharAt, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "charAt", "(I)C") \
   V(StringCompareTo, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "compareTo", "(Ljava/lang/String;)I") \
   V(StringEquals, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/lang/String;", "equals", "(Ljava/lang/Object;)Z") \
@@ -181,7 +190,16 @@
   V(StringBufferAppend, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringBuffer;", "append", "(Ljava/lang/String;)Ljava/lang/StringBuffer;") \
   V(StringBufferLength, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kNoThrow, "Ljava/lang/StringBuffer;", "length", "()I") \
   V(StringBufferToString, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringBuffer;", "toString", "()Ljava/lang/String;") \
-  V(StringBuilderAppend, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringBuilder;", "append", "(Ljava/lang/String;)Ljava/lang/StringBuilder;") \
+  V(StringBuilderAppendObject, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringBuilder;", "append", "(Ljava/lang/Object;)Ljava/lang/StringBuilder;") \
+  V(StringBuilderAppendString, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringBuilder;", "append", "(Ljava/lang/String;)Ljava/lang/StringBuilder;") \
+  V(StringBuilderAppendCharSequence, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringBuilder;", "append", "(Ljava/lang/CharSequence;)Ljava/lang/StringBuilder;") \
+  V(StringBuilderAppendCharArray, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringBuilder;", "append", "([C)Ljava/lang/StringBuilder;") \
+  V(StringBuilderAppendBoolean, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringBuilder;", "append", "(Z)Ljava/lang/StringBuilder;") \
+  V(StringBuilderAppendChar, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringBuilder;", "append", "(C)Ljava/lang/StringBuilder;") \
+  V(StringBuilderAppendInt, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringBuilder;", "append", "(I)Ljava/lang/StringBuilder;") \
+  V(StringBuilderAppendLong, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringBuilder;", "append", "(J)Ljava/lang/StringBuilder;") \
+  V(StringBuilderAppendFloat, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringBuilder;", "append", "(F)Ljava/lang/StringBuilder;") \
+  V(StringBuilderAppendDouble, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringBuilder;", "append", "(D)Ljava/lang/StringBuilder;") \
   V(StringBuilderLength, kVirtual, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/StringBuilder;", "length", "()I") \
   V(StringBuilderToString, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Ljava/lang/StringBuilder;", "toString", "()Ljava/lang/String;") \
   V(UnsafeCASInt, kVirtual, kNeedsEnvironmentOrCache, kAllSideEffects, kCanThrow, "Lsun/misc/Unsafe;", "compareAndSwapInt", "(Ljava/lang/Object;JII)Z") \
diff --git a/runtime/java_frame_root_info.cc b/runtime/java_frame_root_info.cc
index dd3be5d..9a0f184 100644
--- a/runtime/java_frame_root_info.cc
+++ b/runtime/java_frame_root_info.cc
@@ -24,7 +24,18 @@
   const StackVisitor* visitor = stack_visitor_;
   CHECK(visitor != nullptr);
   os << "Type=" << GetType() << " thread_id=" << GetThreadId() << " location=" <<
-      visitor->DescribeLocation() << " vreg=" << vreg_;
+      visitor->DescribeLocation() << " vreg=";
+  if (vreg_ == JavaFrameRootInfo::kUnknownVreg) {
+    os << "Unknown";
+  } else if (vreg_ == JavaFrameRootInfo::kImpreciseVreg) {
+    os << "imprecise";
+  } else if (vreg_ == JavaFrameRootInfo::kProxyReferenceArgument) {
+    os << "Proxy reference argument";
+  } else if (vreg_ == JavaFrameRootInfo::kMethodDeclaringClass) {
+    os << "method declaring class";
+  } else {
+    os << vreg_;
+  }
 }
 
 }  // namespace art
diff --git a/runtime/java_frame_root_info.h b/runtime/java_frame_root_info.h
index 8141ea2..c21eee1 100644
--- a/runtime/java_frame_root_info.h
+++ b/runtime/java_frame_root_info.h
@@ -18,6 +18,7 @@
 #define ART_RUNTIME_JAVA_FRAME_ROOT_INFO_H_
 
 #include <iosfwd>
+#include <limits>
 
 #include "base/locks.h"
 #include "base/macros.h"
@@ -29,6 +30,20 @@
 
 class JavaFrameRootInfo final : public RootInfo {
  public:
+  static_assert(std::numeric_limits<size_t>::max() > std::numeric_limits<uint16_t>::max(),
+                "No extra space in vreg to store meta-data");
+  // Unable to determine what register number the root is from.
+  static constexpr size_t kUnknownVreg = -1;
+  // The register number for the root might be determinable but we did not attempt to find that
+  // information.
+  static constexpr size_t kImpreciseVreg = -2;
+  // The root is from the declaring class of the current method.
+  static constexpr size_t kMethodDeclaringClass = -3;
+  // The root is from the argument to a Proxy invoke.
+  static constexpr size_t kProxyReferenceArgument = -4;
+  // The maximum precise vreg number
+  static constexpr size_t kMaxVReg = std::numeric_limits<uint16_t>::max();
+
   JavaFrameRootInfo(uint32_t thread_id, const StackVisitor* stack_visitor, size_t vreg)
      : RootInfo(kRootJavaFrame, thread_id), stack_visitor_(stack_visitor), vreg_(vreg) {
   }
diff --git a/runtime/jdwp/README.txt b/runtime/jdwp/README.txt
deleted file mode 100644
index da25fb1..0000000
--- a/runtime/jdwp/README.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-Java Debug Wire Protocol support
-
-This is a reasonably complete implementation, but only messages that are
-actually generated by debuggers have been implemented.  The reasoning
-behind this is that it's better to leave a call unimplemented than have
-something that appears implemented but has never been tested.
-
-An attempt has been made to keep the JDWP implementation distinct from the
-runtime, so that the code might be useful in other projects. Once you get
-multiple simultaneous events and debugger requests with thread suspension
-bouncing around, though, it's difficult to keep things "generic".
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
deleted file mode 100644
index bf1d665..0000000
--- a/runtime/jdwp/jdwp.h
+++ /dev/null
@@ -1,511 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_JDWP_JDWP_H_
-#define ART_RUNTIME_JDWP_JDWP_H_
-
-#include "base/atomic.h"
-#include "base/logging.h"  // For VLOG.
-#include "base/mutex.h"
-#include "jdwp/jdwp_bits.h"
-#include "jdwp/jdwp_constants.h"
-#include "jdwp/jdwp_expand_buf.h"
-#include "obj_ptr.h"
-
-#include <pthread.h>
-#include <stddef.h>
-#include <stdint.h>
-#include <string.h>
-#include <vector>
-
-struct iovec;
-
-namespace art {
-
-class ArtField;
-class ArtMethod;
-union JValue;
-class Thread;
-
-namespace mirror {
-class Class;
-class Object;
-class Throwable;
-}  // namespace mirror
-class Thread;
-
-namespace JDWP {
-
-/*
- * Fundamental types.
- *
- * ObjectId and RefTypeId must be the same size.
- * Its OK to change MethodId and FieldId sizes as long as the size is <= 8 bytes.
- * Note that ArtFields are 64 bit pointers on 64 bit targets. So this one must remain 8 bytes.
- */
-typedef uint64_t FieldId;     /* static or instance field */
-typedef uint64_t MethodId;    /* any kind of method, including constructors */
-typedef uint64_t ObjectId;    /* any object (threadID, stringID, arrayID, etc) */
-typedef uint64_t RefTypeId;   /* like ObjectID, but unique for Class objects */
-typedef uint64_t FrameId;     /* short-lived stack frame ID */
-
-ObjectId ReadObjectId(const uint8_t** pBuf);
-
-static inline void SetFieldId(uint8_t* buf, FieldId val) { return Set8BE(buf, val); }
-static inline void SetMethodId(uint8_t* buf, MethodId val) { return Set8BE(buf, val); }
-static inline void SetObjectId(uint8_t* buf, ObjectId val) { return Set8BE(buf, val); }
-static inline void SetRefTypeId(uint8_t* buf, RefTypeId val) { return Set8BE(buf, val); }
-static inline void SetFrameId(uint8_t* buf, FrameId val) { return Set8BE(buf, val); }
-static inline void expandBufAddFieldId(ExpandBuf* pReply, FieldId id) { expandBufAdd8BE(pReply, id); }
-static inline void expandBufAddMethodId(ExpandBuf* pReply, MethodId id) { expandBufAdd8BE(pReply, id); }
-static inline void expandBufAddObjectId(ExpandBuf* pReply, ObjectId id) { expandBufAdd8BE(pReply, id); }
-static inline void expandBufAddRefTypeId(ExpandBuf* pReply, RefTypeId id) { expandBufAdd8BE(pReply, id); }
-static inline void expandBufAddFrameId(ExpandBuf* pReply, FrameId id) { expandBufAdd8BE(pReply, id); }
-
-struct EventLocation {
-  ArtMethod* method;
-  uint32_t dex_pc;
-};
-
-/*
- * Holds a JDWP "location".
- */
-struct JdwpLocation {
-  JdwpTypeTag type_tag;
-  RefTypeId class_id;
-  MethodId method_id;
-  uint64_t dex_pc;
-};
-std::ostream& operator<<(std::ostream& os, const JdwpLocation& rhs)
-    REQUIRES_SHARED(Locks::mutator_lock_);
-bool operator==(const JdwpLocation& lhs, const JdwpLocation& rhs);
-bool operator!=(const JdwpLocation& lhs, const JdwpLocation& rhs);
-
-/*
- * How we talk to the debugger.
- */
-enum JdwpTransportType {
-  kJdwpTransportNone = 0,
-  kJdwpTransportUnknown,      // Unknown tranpsort
-  kJdwpTransportSocket,       // transport=dt_socket
-  kJdwpTransportAndroidAdb,   // transport=dt_android_adb
-};
-std::ostream& operator<<(std::ostream& os, const JdwpTransportType& rhs);
-
-struct JdwpOptions {
-  JdwpTransportType transport = kJdwpTransportNone;
-  bool server = false;
-  bool suspend = false;
-  std::string host = "";
-  uint16_t port = static_cast<uint16_t>(-1);
-};
-
-bool operator==(const JdwpOptions& lhs, const JdwpOptions& rhs);
-
-bool ParseJdwpOptions(const std::string& options, JdwpOptions* jdwp_options);
-
-struct JdwpEvent;
-class JdwpNetStateBase;
-struct ModBasket;
-class Request;
-
-/*
- * State for JDWP functions.
- */
-struct JdwpState {
-  /*
-   * Perform one-time initialization.
-   *
-   * Among other things, this binds to a port to listen for a connection from
-   * the debugger.
-   *
-   * Returns a newly-allocated JdwpState struct on success, or nullptr on failure.
-   *
-   * NO_THREAD_SAFETY_ANALYSIS since we can't annotate that we do not have
-   * state->thread_start_lock_ held.
-   */
-  static JdwpState* Create(const JdwpOptions* options)
-      REQUIRES(!Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
-
-  ~JdwpState();
-
-  /*
-   * Returns "true" if a debugger or DDM is connected.
-   */
-  bool IsActive();
-
-  /**
-   * Returns the Thread* for the JDWP daemon thread.
-   */
-  Thread* GetDebugThread();
-
-  /*
-   * Get time, in milliseconds, since the last debugger activity.
-   */
-  int64_t LastDebuggerActivity();
-
-  void ExitAfterReplying(int exit_status);
-
-  // Acquires/releases the JDWP synchronization token for the debugger
-  // thread (command handler) so no event thread posts an event while
-  // it processes a command. This must be called only from the debugger
-  // thread.
-  void AcquireJdwpTokenForCommand() REQUIRES(!jdwp_token_lock_);
-  void ReleaseJdwpTokenForCommand() REQUIRES(!jdwp_token_lock_);
-
-  // Acquires/releases the JDWP synchronization token for the event thread
-  // so no other thread (debugger thread or event thread) interleaves with
-  // it when posting an event. This must NOT be called from the debugger
-  // thread, only event thread.
-  void AcquireJdwpTokenForEvent(ObjectId threadId) REQUIRES(!jdwp_token_lock_);
-  void ReleaseJdwpTokenForEvent() REQUIRES(!jdwp_token_lock_);
-
-  /*
-   * These notify the debug code that something interesting has happened.  This
-   * could be a thread starting or ending, an exception, or an opportunity
-   * for a breakpoint.  These calls do not mean that an event the debugger
-   * is interested has happened, just that something has happened that the
-   * debugger *might* be interested in.
-   *
-   * The item of interest may trigger multiple events, some or all of which
-   * are grouped together in a single response.
-   *
-   * The event may cause the current thread or all threads (except the
-   * JDWP support thread) to be suspended.
-   */
-
-  /*
-   * The VM has finished initializing.  Only called when the debugger is
-   * connected at the time initialization completes.
-   */
-  void PostVMStart() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!jdwp_token_lock_);
-
-  /*
-   * A location of interest has been reached.  This is used for breakpoints,
-   * single-stepping, and method entry/exit.  (JDWP requires that these four
-   * events are grouped together in a single response.)
-   *
-   * In some cases "*pLoc" will just have a method and class name, e.g. when
-   * issuing a MethodEntry on a native method.
-   *
-   * "eventFlags" indicates the types of events that have occurred.
-   *
-   * "returnValue" is non-null for MethodExit events only.
-   */
-  void PostLocationEvent(const EventLocation* pLoc, mirror::Object* thisPtr, int eventFlags,
-                         const JValue* returnValue)
-     REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
-
-  /*
-   * A field of interest has been accessed or modified. This is used for field access and field
-   * modification events.
-   *
-   * "fieldValue" is non-null for field modification events only.
-   * "is_modification" is true for field modification, false for field access.
-   */
-  void PostFieldEvent(const EventLocation* pLoc, ArtField* field, mirror::Object* thisPtr,
-                      const JValue* fieldValue, bool is_modification)
-      REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
-
-  /*
-   * An exception has been thrown.
-   *
-   * Pass in a zeroed-out "*pCatchLoc" if the exception wasn't caught.
-   */
-  void PostException(const EventLocation* pThrowLoc, mirror::Throwable* exception_object,
-                     const EventLocation* pCatchLoc, mirror::Object* thisPtr)
-      REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
-
-  /*
-   * A thread has started or stopped.
-   */
-  void PostThreadChange(Thread* thread, bool start)
-      REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
-
-  /*
-   * Class has been prepared.
-   */
-  void PostClassPrepare(mirror::Class* klass)
-      REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
-
-  /*
-   * The VM is about to stop.
-   */
-  bool PostVMDeath();
-
-  // Called if/when we realize we're talking to DDMS.
-  void NotifyDdmsActive() REQUIRES_SHARED(Locks::mutator_lock_);
-
-
-  void SetupChunkHeader(uint32_t type, size_t data_len, size_t header_size, uint8_t* out_header);
-
-  /*
-   * Send up a chunk of DDM data.
-   */
-  void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  bool HandlePacket() REQUIRES(!shutdown_lock_, !jdwp_token_lock_);
-
-  void SendRequest(ExpandBuf* pReq);
-
-  void ResetState()
-      REQUIRES(!event_list_lock_)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  /* atomic ops to get next serial number */
-  uint32_t NextRequestSerial();
-  uint32_t NextEventSerial();
-
-  void Run()
-      REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_, !thread_start_lock_,
-               !attach_lock_, !event_list_lock_);
-
-  /*
-   * Register an event by adding it to the event list.
-   *
-   * "*pEvent" must be storage allocated with jdwpEventAlloc().  The caller
-   * may discard its pointer after calling this.
-   */
-  JdwpError RegisterEvent(JdwpEvent* pEvent)
-      REQUIRES(!event_list_lock_)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  /*
-   * Unregister an event, given the requestId.
-   */
-  void UnregisterEventById(uint32_t requestId)
-      REQUIRES(!event_list_lock_)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  void UnregisterLocationEventsOnClass(ObjPtr<mirror::Class> klass)
-      REQUIRES(!event_list_lock_)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  /*
-   * Unregister all events.
-   */
-  void UnregisterAll()
-      REQUIRES(!event_list_lock_)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
- private:
-  explicit JdwpState(const JdwpOptions* options);
-  size_t ProcessRequest(Request* request, ExpandBuf* pReply, bool* skip_reply)
-      REQUIRES(!jdwp_token_lock_);
-  bool InvokeInProgress();
-  bool IsConnected();
-  void SuspendByPolicy(JdwpSuspendPolicy suspend_policy, JDWP::ObjectId thread_self_id)
-      REQUIRES(!Locks::mutator_lock_);
-  void SendRequestAndPossiblySuspend(ExpandBuf* pReq, JdwpSuspendPolicy suspend_policy,
-                                     ObjectId threadId)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!jdwp_token_lock_);
-  void CleanupMatchList(const std::vector<JdwpEvent*>& match_list)
-      REQUIRES(event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
-  void EventFinish(ExpandBuf* pReq);
-  bool FindMatchingEvents(JdwpEventKind eventKind, const ModBasket& basket,
-                          std::vector<JdwpEvent*>* match_list)
-      REQUIRES(!event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
-  void FindMatchingEventsLocked(JdwpEventKind eventKind, const ModBasket& basket,
-                                std::vector<JdwpEvent*>* match_list)
-      REQUIRES(event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
-  void UnregisterEvent(JdwpEvent* pEvent)
-      REQUIRES(event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
-  void SendBufferedRequest(uint32_t type, const std::vector<iovec>& iov);
-
-  /*
-   * When we hit a debugger event that requires suspension, it's important
-   * that we wait for the thread to suspend itself before processing any
-   * additional requests. Otherwise, if the debugger immediately sends a
-   * "resume thread" command, the resume might arrive before the thread has
-   * suspended itself.
-   *
-   * It's also important no event thread suspends while we process a command
-   * from the debugger. Otherwise we could post an event ("thread death")
-   * before sending the reply of the command being processed ("resume") and
-   * cause bad synchronization with the debugger.
-   *
-   * The thread wanting "exclusive" access to the JDWP world must call the
-   * SetWaitForJdwpToken method before processing a command from the
-   * debugger or sending an event to the debugger.
-   * Once the command is processed or the event thread has posted its event,
-   * it must call the ClearWaitForJdwpToken method to allow another thread
-   * to do JDWP stuff.
-   *
-   * Therefore the main JDWP handler loop will wait for the event thread
-   * suspension before processing the next command. Once the event thread
-   * has suspended itself and cleared the token, the JDWP handler continues
-   * processing commands. This works in the suspend-all case because the
-   * event thread doesn't suspend itself until everything else has suspended.
-   *
-   * It's possible that multiple threads could encounter thread-suspending
-   * events at the same time, so we grab a mutex in the SetWaitForJdwpToken
-   * call, and release it in the ClearWaitForJdwpToken call.
-   */
-  void SetWaitForJdwpToken(ObjectId threadId) REQUIRES(!jdwp_token_lock_);
-  void ClearWaitForJdwpToken() REQUIRES(!jdwp_token_lock_);
-
- public:  // TODO: fix privacy
-  const JdwpOptions* options_;
-
- private:
-  /* wait for creation of the JDWP thread */
-  Mutex thread_start_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
-  ConditionVariable thread_start_cond_ GUARDED_BY(thread_start_lock_);
-
-  pthread_t pthread_;
-  Thread* thread_;
-
-  volatile int32_t debug_thread_started_ GUARDED_BY(thread_start_lock_);
-  ObjectId debug_thread_id_;
-
- private:
-  bool run;
-
- public:  // TODO: fix privacy
-  JdwpNetStateBase* netState;
-
- private:
-  // For wait-for-debugger.
-  Mutex attach_lock_ ACQUIRED_AFTER(thread_start_lock_);
-  ConditionVariable attach_cond_ GUARDED_BY(attach_lock_);
-
-  // Time of last debugger activity, in milliseconds.
-  Atomic<int64_t> last_activity_time_ms_;
-
-  // Global counters and a mutex to protect them.
-  AtomicInteger request_serial_;
-  AtomicInteger event_serial_;
-
-  // Linked list of events requested by the debugger (breakpoints, class prep, etc).
-  Mutex event_list_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_BEFORE(Locks::breakpoint_lock_);
-  JdwpEvent* event_list_ GUARDED_BY(event_list_lock_);
-  size_t event_list_size_ GUARDED_BY(event_list_lock_);  // Number of elements in event_list_.
-
-  // Used to synchronize JDWP command handler thread and event threads so only one
-  // thread does JDWP stuff at a time. This prevent from interleaving command handling
-  // and event notification. Otherwise we could receive a "resume" command for an
-  // event thread that is not suspended yet, or post a "thread death" or event "VM death"
-  // event before sending the reply of the "resume" command that caused it.
-  Mutex jdwp_token_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
-  ConditionVariable jdwp_token_cond_ GUARDED_BY(jdwp_token_lock_);
-  ObjectId jdwp_token_owner_thread_id_;
-
-  bool ddm_is_active_;
-
-  // Used for VirtualMachine.Exit command handling.
-  bool should_exit_;
-  int exit_status_;
-
-  // Used to synchronize runtime shutdown with JDWP command handler thread.
-  // When the runtime shuts down, it needs to stop JDWP command handler thread by closing the
-  // JDWP connection. However, if the JDWP thread is processing a command, it needs to wait
-  // for the command to finish so we can send its reply before closing the connection.
-  Mutex shutdown_lock_ ACQUIRED_AFTER(event_list_lock_);
-  ConditionVariable shutdown_cond_ GUARDED_BY(shutdown_lock_);
-  bool processing_request_ GUARDED_BY(shutdown_lock_);
-};
-
-std::string DescribeField(const FieldId& field_id) REQUIRES_SHARED(Locks::mutator_lock_);
-std::string DescribeMethod(const MethodId& method_id) REQUIRES_SHARED(Locks::mutator_lock_);
-std::string DescribeRefTypeId(const RefTypeId& ref_type_id) REQUIRES_SHARED(Locks::mutator_lock_);
-
-class Request {
- public:
-  Request(const uint8_t* bytes, uint32_t available);
-  ~Request();
-
-  std::string ReadUtf8String();
-
-  // Helper function: read a variable-width value from the input buffer.
-  uint64_t ReadValue(size_t width);
-
-  int32_t ReadSigned32(const char* what);
-
-  uint32_t ReadUnsigned32(const char* what);
-
-  FieldId ReadFieldId() REQUIRES_SHARED(Locks::mutator_lock_);
-
-  MethodId ReadMethodId() REQUIRES_SHARED(Locks::mutator_lock_);
-
-  ObjectId ReadObjectId(const char* specific_kind);
-
-  ObjectId ReadArrayId();
-
-  ObjectId ReadObjectId();
-
-  ObjectId ReadThreadId();
-
-  ObjectId ReadThreadGroupId();
-
-  RefTypeId ReadRefTypeId() REQUIRES_SHARED(Locks::mutator_lock_);
-
-  FrameId ReadFrameId();
-
-  template <typename T> T ReadEnum1(const char* specific_kind) {
-    T value = static_cast<T>(Read1());
-    VLOG(jdwp) << "    " << specific_kind << " " << value;
-    return value;
-  }
-
-  JdwpTag ReadTag();
-
-  JdwpTypeTag ReadTypeTag();
-
-  JdwpLocation ReadLocation() REQUIRES_SHARED(Locks::mutator_lock_);
-
-  JdwpModKind ReadModKind();
-
-  //
-  // Return values from this JDWP packet's header.
-  //
-  size_t GetLength() { return byte_count_; }
-  uint32_t GetId() { return id_; }
-  uint8_t GetCommandSet() { return command_set_; }
-  uint8_t GetCommand() { return command_; }
-
-  // Returns the number of bytes remaining.
-  size_t size() { return end_ - p_; }
-
-  // Returns a pointer to the next byte.
-  const uint8_t* data() { return p_; }
-
-  void Skip(size_t count) { p_ += count; }
-
-  void CheckConsumed();
-
- private:
-  uint8_t Read1();
-  uint16_t Read2BE();
-  uint32_t Read4BE();
-  uint64_t Read8BE();
-
-  uint32_t byte_count_;
-  uint32_t id_;
-  uint8_t command_set_;
-  uint8_t command_;
-
-  const uint8_t* p_;
-  const uint8_t* end_;
-
-  DISALLOW_COPY_AND_ASSIGN(Request);
-};
-
-}  // namespace JDWP
-
-}  // namespace art
-
-#endif  // ART_RUNTIME_JDWP_JDWP_H_
diff --git a/runtime/jdwp/jdwp_adb.cc b/runtime/jdwp/jdwp_adb.cc
deleted file mode 100644
index d64f11f..0000000
--- a/runtime/jdwp/jdwp_adb.cc
+++ /dev/null
@@ -1,487 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <errno.h>
-#include <stdio.h>
-#include <sys/socket.h>
-#include <sys/un.h>
-#include <unistd.h>
-
-#include "android-base/stringprintf.h"
-
-#include "base/logging.h"  // For VLOG.
-#include "base/socket_peer_is_trusted.h"
-#include "jdwp/jdwp_priv.h"
-#include "thread-current-inl.h"
-
-/*
- * The JDWP <-> ADB transport protocol is explained in detail
- * in system/core/adb/jdwp_service.c. Here's a summary.
- *
- * 1/ when the JDWP thread starts, it tries to connect to a Unix
- *    domain stream socket (@jdwp-control) that is opened by the
- *    ADB daemon.
- *
- * 2/ it then sends the current process PID as an int32_t.
- *
- * 3/ then, it uses recvmsg to receive file descriptors from the
- *    daemon. each incoming file descriptor is a pass-through to
- *    a given JDWP debugger, that can be used to read the usual
- *    JDWP-handshake, etc...
- */
-
-static constexpr char kJdwpControlName[] = "\0jdwp-control";
-static constexpr size_t kJdwpControlNameLen = sizeof(kJdwpControlName) - 1;
-/* This timeout is for connect/send with control socket. In practice, the
- * connect should never timeout since it's just connect to a local unix domain
- * socket. But in case adb is buggy and doesn't respond to any connection, the
- * connect will block. For send, actually it would never block since we only send
- * several bytes and the kernel buffer is big enough to accept it. 10 seconds
- * should be far enough.
- */
-static constexpr int kControlSockSendTimeout = 10;
-
-namespace art {
-
-namespace JDWP {
-
-using android::base::StringPrintf;
-
-struct JdwpAdbState : public JdwpNetStateBase {
- public:
-  explicit JdwpAdbState(JdwpState* state)
-      : JdwpNetStateBase(state),
-        state_lock_("JdwpAdbState lock", kJdwpAdbStateLock) {
-    control_sock_ = -1;
-    shutting_down_ = false;
-
-    control_addr_.controlAddrUn.sun_family = AF_UNIX;
-    control_addr_len_ = sizeof(control_addr_.controlAddrUn.sun_family) + kJdwpControlNameLen;
-    memcpy(control_addr_.controlAddrUn.sun_path, kJdwpControlName, kJdwpControlNameLen);
-  }
-
-  ~JdwpAdbState() {
-    if (clientSock != -1) {
-      shutdown(clientSock, SHUT_RDWR);
-      close(clientSock);
-    }
-    if (control_sock_ != -1) {
-      shutdown(control_sock_, SHUT_RDWR);
-      close(control_sock_);
-    }
-  }
-
-  bool Accept() override REQUIRES(!state_lock_);
-
-  bool Establish(const JdwpOptions*) override {
-    return false;
-  }
-
-  void Shutdown() override REQUIRES(!state_lock_) {
-    int control_sock;
-    int local_clientSock;
-    {
-      MutexLock mu(Thread::Current(), state_lock_);
-      shutting_down_ = true;
-      control_sock = this->control_sock_;
-      local_clientSock = this->clientSock;
-      /* clear these out so it doesn't wake up and try to reuse them */
-      this->control_sock_ = this->clientSock = -1;
-    }
-
-    if (local_clientSock != -1) {
-      shutdown(local_clientSock, SHUT_RDWR);
-    }
-
-    if (control_sock != -1) {
-      shutdown(control_sock, SHUT_RDWR);
-    }
-
-    WakePipe();
-  }
-
-  bool ProcessIncoming() override REQUIRES(!state_lock_);
-
- private:
-  int ReceiveClientFd() REQUIRES(!state_lock_);
-
-  bool IsDown() REQUIRES(!state_lock_) {
-    MutexLock mu(Thread::Current(), state_lock_);
-    return shutting_down_;
-  }
-
-  int ControlSock() REQUIRES(!state_lock_) {
-    MutexLock mu(Thread::Current(), state_lock_);
-    if (shutting_down_) {
-      CHECK_EQ(control_sock_, -1);
-    }
-    return control_sock_;
-  }
-
-  int control_sock_ GUARDED_BY(state_lock_);
-  bool shutting_down_ GUARDED_BY(state_lock_);
-  Mutex state_lock_;
-
-  socklen_t control_addr_len_;
-  union {
-    sockaddr_un controlAddrUn;
-    sockaddr controlAddrPlain;
-  } control_addr_;
-};
-
-/*
- * Do initial prep work, e.g. binding to ports and opening files.  This
- * runs in the main thread, before the JDWP thread starts, so it shouldn't
- * do anything that might block forever.
- */
-bool InitAdbTransport(JdwpState* state, const JdwpOptions*) {
-  VLOG(jdwp) << "ADB transport startup";
-  state->netState = new JdwpAdbState(state);
-  return (state->netState != nullptr);
-}
-
-/*
- * Receive a file descriptor from ADB.  The fd can be used to communicate
- * directly with a debugger or DDMS.
- *
- * Returns the file descriptor on success.  On failure, returns -1 and
- * closes netState->control_sock_.
- */
-int JdwpAdbState::ReceiveClientFd() {
-  char dummy = '!';
-  union {
-    cmsghdr cm;
-    char buffer[CMSG_SPACE(sizeof(int))];
-  } cm_un;
-
-  iovec iov;
-  iov.iov_base       = &dummy;
-  iov.iov_len        = 1;
-
-  msghdr msg;
-  msg.msg_name       = nullptr;
-  msg.msg_namelen    = 0;
-  msg.msg_iov        = &iov;
-  msg.msg_iovlen     = 1;
-  msg.msg_flags      = 0;
-  msg.msg_control    = cm_un.buffer;
-  msg.msg_controllen = sizeof(cm_un.buffer);
-
-  cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
-  cmsg->cmsg_len   = msg.msg_controllen;
-  cmsg->cmsg_level = SOL_SOCKET;
-  cmsg->cmsg_type  = SCM_RIGHTS;
-  (reinterpret_cast<int*>(CMSG_DATA(cmsg)))[0] = -1;
-
-  int rc = TEMP_FAILURE_RETRY(recvmsg(ControlSock(), &msg, 0));
-
-  if (rc <= 0) {
-    if (rc == -1) {
-      PLOG(WARNING) << "Receiving file descriptor from ADB failed (socket " << ControlSock() << ")";
-    }
-    MutexLock mu(Thread::Current(), state_lock_);
-    close(control_sock_);
-    control_sock_ = -1;
-    return -1;
-  }
-
-  return (reinterpret_cast<int*>(CMSG_DATA(cmsg)))[0];
-}
-
-/*
- * Block forever, waiting for a debugger to connect to us.  Called from the
- * JDWP thread.
- *
- * This needs to un-block and return "false" if the VM is shutting down.  It
- * should return "true" when it successfully accepts a connection.
- */
-bool JdwpAdbState::Accept() {
-  int retryCount = 0;
-
-  /* first, ensure that we get a connection to the ADB daemon */
-
- retry:
-  if (IsDown()) {
-    return false;
-  }
-
-  if (ControlSock() == -1) {
-    int        sleep_ms     = 500;
-    const int  sleep_max_ms = 2*1000;
-
-    int sock = socket(AF_UNIX, SOCK_SEQPACKET, 0);
-    if (sock < 0) {
-      PLOG(ERROR) << "Could not create ADB control socket";
-      return false;
-    }
-    struct timeval timeout;
-    timeout.tv_sec = kControlSockSendTimeout;
-    timeout.tv_usec = 0;
-    setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, &timeout, sizeof(timeout));
-    {
-      MutexLock mu(Thread::Current(), state_lock_);
-      control_sock_ = sock;
-      if (shutting_down_) {
-        return false;
-      }
-      if (!MakePipe()) {
-        return false;
-      }
-    }
-
-    int32_t pid = getpid();
-
-    for (;;) {
-      /*
-       * If adbd isn't running, because USB debugging was disabled or
-       * perhaps the system is restarting it for "adb root", the
-       * connect() will fail.  We loop here forever waiting for it
-       * to come back.
-       *
-       * Waking up and polling every couple of seconds is generally a
-       * bad thing to do, but we only do this if the application is
-       * debuggable *and* adbd isn't running.  Still, for the sake
-       * of battery life, we should consider timing out and giving
-       * up after a few minutes in case somebody ships an app with
-       * the debuggable flag set.
-       */
-      int ret = connect(ControlSock(), &control_addr_.controlAddrPlain, control_addr_len_);
-      if (!ret) {
-        int control_sock = ControlSock();
-#ifdef ART_TARGET_ANDROID
-        if (control_sock < 0 || !art::SocketPeerIsTrusted(control_sock)) {
-          if (control_sock >= 0 && shutdown(control_sock, SHUT_RDWR)) {
-            PLOG(ERROR) << "trouble shutting down socket";
-          }
-          return false;
-        }
-#endif
-
-        /* now try to send our pid to the ADB daemon */
-        ret = TEMP_FAILURE_RETRY(send(control_sock, &pid, sizeof(pid), 0));
-        if (ret == sizeof(pid)) {
-          VLOG(jdwp) << "PID " << pid << " sent to ADB";
-          break;
-        }
-
-        PLOG(ERROR) << "Weird, can't send JDWP process pid to ADB";
-        return false;
-      }
-      if (VLOG_IS_ON(jdwp)) {
-        PLOG(ERROR) << "Can't connect to ADB control socket";
-      }
-
-      usleep(sleep_ms * 1000);
-
-      sleep_ms += (sleep_ms >> 1);
-      if (sleep_ms > sleep_max_ms) {
-        sleep_ms = sleep_max_ms;
-      }
-      if (IsDown()) {
-        return false;
-      }
-    }
-  }
-
-  VLOG(jdwp) << "trying to receive file descriptor from ADB";
-  /* now we can receive a client file descriptor */
-  int sock = ReceiveClientFd();
-  {
-    MutexLock mu(Thread::Current(), state_lock_);
-    clientSock = sock;
-    if (shutting_down_) {
-      return false;       // suppress logs and additional activity
-    }
-  }
-  if (clientSock == -1) {
-    if (++retryCount > 5) {
-      LOG(ERROR) << "adb connection max retries exceeded";
-      return false;
-    }
-    goto retry;
-  } else {
-    VLOG(jdwp) << "received file descriptor " << clientSock << " from ADB";
-    SetAwaitingHandshake(true);
-    input_count_ = 0;
-    return true;
-  }
-}
-
-/*
- * Process incoming data.  If no data is available, this will block until
- * some arrives.
- *
- * If we get a full packet, handle it.
- *
- * To take some of the mystery out of life, we want to reject incoming
- * connections if we already have a debugger attached.  If we don't, the
- * debugger will just mysteriously hang until it times out.  We could just
- * close the listen socket, but there's a good chance we won't be able to
- * bind to the same port again, which would confuse utilities.
- *
- * Returns "false" on error (indicating that the connection has been severed),
- * "true" if things are still okay.
- */
-bool JdwpAdbState::ProcessIncoming() {
-  int readCount;
-
-  CHECK_NE(clientSock, -1);
-
-  if (!HaveFullPacket()) {
-    /* read some more, looping until we have data */
-    errno = 0;
-    while (true) {
-      int selCount;
-      fd_set readfds;
-      int maxfd = -1;
-      int fd;
-
-      FD_ZERO(&readfds);
-
-      /* configure fds; note these may get zapped by another thread */
-      fd = ControlSock();
-      if (fd >= 0) {
-        FD_SET(fd, &readfds);
-        if (maxfd < fd) {
-          maxfd = fd;
-        }
-      }
-      fd = clientSock;
-      if (fd >= 0) {
-        FD_SET(fd, &readfds);
-        if (maxfd < fd) {
-          maxfd = fd;
-        }
-      }
-      fd = wake_pipe_[0];
-      if (fd >= 0) {
-        FD_SET(fd, &readfds);
-        if (maxfd < fd) {
-          maxfd = fd;
-        }
-      } else {
-        LOG(INFO) << "NOTE: entering select w/o wakepipe";
-      }
-
-      if (maxfd < 0) {
-        VLOG(jdwp) << "+++ all fds are closed";
-        return false;
-      }
-
-      /*
-       * Select blocks until it sees activity on the file descriptors.
-       * Closing the local file descriptor does not count as activity,
-       * so we can't rely on that to wake us up (it works for read()
-       * and accept(), but not select()).
-       *
-       * We can do one of three things: (1) send a signal and catch
-       * EINTR, (2) open an additional fd ("wake pipe") and write to
-       * it when it's time to exit, or (3) time out periodically and
-       * re-issue the select.  We're currently using #2, as it's more
-       * reliable than #1 and generally better than #3.  Wastes two fds.
-       */
-      selCount = select(maxfd + 1, &readfds, nullptr, nullptr, nullptr);
-      if (selCount < 0) {
-        if (errno == EINTR) {
-          continue;
-        }
-        PLOG(ERROR) << "select failed";
-        goto fail;
-      }
-
-      if (wake_pipe_[0] >= 0 && FD_ISSET(wake_pipe_[0], &readfds)) {
-        VLOG(jdwp) << "Got wake-up signal, bailing out of select";
-        goto fail;
-      }
-      int control_sock = ControlSock();
-      if (control_sock >= 0 && FD_ISSET(control_sock, &readfds)) {
-        int  sock = ReceiveClientFd();
-        if (sock >= 0) {
-          LOG(INFO) << "Ignoring second debugger -- accepting and dropping";
-          close(sock);
-        } else {
-          CHECK_EQ(ControlSock(), -1);
-          /*
-           * Remote side most likely went away, so our next read
-           * on clientSock will fail and throw us out of the loop.
-           */
-        }
-      }
-      if (clientSock >= 0 && FD_ISSET(clientSock, &readfds)) {
-        readCount = read(clientSock, input_buffer_ + input_count_, sizeof(input_buffer_) - input_count_);
-        if (readCount < 0) {
-          /* read failed */
-          if (errno != EINTR) {
-            goto fail;
-          }
-          VLOG(jdwp) << "+++ EINTR hit";
-          return true;
-        } else if (readCount == 0) {
-          /* EOF hit -- far end went away */
-          VLOG(jdwp) << "+++ peer disconnected";
-          goto fail;
-        } else {
-          break;
-        }
-      }
-    }
-
-    input_count_ += readCount;
-    if (!HaveFullPacket()) {
-      return true;        /* still not there yet */
-    }
-  }
-
-  /*
-   * Special-case the initial handshake.  For some bizarre reason we're
-   * expected to emulate bad tty settings by echoing the request back
-   * exactly as it was sent.  Note the handshake is always initiated by
-   * the debugger, no matter who connects to whom.
-   *
-   * Other than this one case, the protocol [claims to be] stateless.
-   */
-  if (IsAwaitingHandshake()) {
-    if (memcmp(input_buffer_, kMagicHandshake, kMagicHandshakeLen) != 0) {
-      LOG(ERROR) << StringPrintf("ERROR: bad handshake '%.14s'", input_buffer_);
-      goto fail;
-    }
-
-    errno = 0;
-    int cc = TEMP_FAILURE_RETRY(write(clientSock, input_buffer_, kMagicHandshakeLen));
-    if (cc != kMagicHandshakeLen) {
-      PLOG(ERROR) << "Failed writing handshake bytes (" << cc << " of " << kMagicHandshakeLen << ")";
-      goto fail;
-    }
-
-    ConsumeBytes(kMagicHandshakeLen);
-    SetAwaitingHandshake(false);
-    VLOG(jdwp) << "+++ handshake complete";
-    return true;
-  }
-
-  /*
-   * Handle this packet.
-   */
-  return state_->HandlePacket();
-
- fail:
-  Close();
-  return false;
-}
-
-}  // namespace JDWP
-
-}  // namespace art
diff --git a/runtime/jdwp/jdwp_bits.h b/runtime/jdwp/jdwp_bits.h
deleted file mode 100644
index 33b98f3..0000000
--- a/runtime/jdwp/jdwp_bits.h
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_JDWP_JDWP_BITS_H_
-#define ART_RUNTIME_JDWP_JDWP_BITS_H_
-
-#include <stddef.h>
-#include <stdint.h>
-#include <stdlib.h>
-#include <string.h>
-#include <string>
-#include <vector>
-
-namespace art {
-
-namespace JDWP {
-
-static inline uint32_t Get4BE(unsigned char const* pSrc) {
-  return (pSrc[0] << 24) | (pSrc[1] << 16) | (pSrc[2] << 8) | pSrc[3];
-}
-
-static inline void Append1BE(std::vector<uint8_t>& bytes, uint8_t value) {
-  bytes.push_back(value);
-}
-
-static inline void Append2BE(std::vector<uint8_t>& bytes, uint16_t value) {
-  bytes.push_back(static_cast<uint8_t>(value >> 8));
-  bytes.push_back(static_cast<uint8_t>(value));
-}
-
-static inline void Append4BE(std::vector<uint8_t>& bytes, uint32_t value) {
-  bytes.push_back(static_cast<uint8_t>(value >> 24));
-  bytes.push_back(static_cast<uint8_t>(value >> 16));
-  bytes.push_back(static_cast<uint8_t>(value >> 8));
-  bytes.push_back(static_cast<uint8_t>(value));
-}
-
-static inline void Append8BE(std::vector<uint8_t>& bytes, uint64_t value) {
-  bytes.push_back(static_cast<uint8_t>(value >> 56));
-  bytes.push_back(static_cast<uint8_t>(value >> 48));
-  bytes.push_back(static_cast<uint8_t>(value >> 40));
-  bytes.push_back(static_cast<uint8_t>(value >> 32));
-  bytes.push_back(static_cast<uint8_t>(value >> 24));
-  bytes.push_back(static_cast<uint8_t>(value >> 16));
-  bytes.push_back(static_cast<uint8_t>(value >> 8));
-  bytes.push_back(static_cast<uint8_t>(value));
-}
-
-static inline void AppendUtf16BE(std::vector<uint8_t>& bytes, const uint16_t* chars,
-                                 size_t char_count) {
-  Append4BE(bytes, char_count);
-  for (size_t i = 0; i < char_count; ++i) {
-    Append2BE(bytes, chars[i]);
-  }
-}
-
-static inline void AppendUtf16CompressedBE(std::vector<uint8_t>& bytes,
-                                           const uint8_t* chars, size_t char_count) {
-  Append4BE(bytes, char_count);
-  for (size_t i = 0; i < char_count; ++i) {
-    Append2BE(bytes, static_cast<uint16_t>(chars[i]));
-  }
-}
-
-// @deprecated
-static inline void Set1(uint8_t* buf, uint8_t val) {
-  *buf = val;
-}
-
-// @deprecated
-static inline void Set2BE(uint8_t* buf, uint16_t val) {
-  *buf++ = (uint8_t)(val >> 8);
-  *buf = (uint8_t)(val);
-}
-
-// @deprecated
-static inline void Set4BE(uint8_t* buf, uint32_t val) {
-  *buf++ = (uint8_t)(val >> 24);
-  *buf++ = (uint8_t)(val >> 16);
-  *buf++ = (uint8_t)(val >> 8);
-  *buf = (uint8_t)(val);
-}
-
-// @deprecated
-static inline void Set8BE(uint8_t* buf, uint64_t val) {
-  *buf++ = (uint8_t)(val >> 56);
-  *buf++ = (uint8_t)(val >> 48);
-  *buf++ = (uint8_t)(val >> 40);
-  *buf++ = (uint8_t)(val >> 32);
-  *buf++ = (uint8_t)(val >> 24);
-  *buf++ = (uint8_t)(val >> 16);
-  *buf++ = (uint8_t)(val >> 8);
-  *buf = (uint8_t)(val);
-}
-
-static inline void Write1BE(uint8_t** dst, uint8_t value) {
-  Set1(*dst, value);
-  *dst += sizeof(value);
-}
-
-static inline void Write2BE(uint8_t** dst, uint16_t value) {
-  Set2BE(*dst, value);
-  *dst += sizeof(value);
-}
-
-static inline void Write4BE(uint8_t** dst, uint32_t value) {
-  Set4BE(*dst, value);
-  *dst += sizeof(value);
-}
-
-static inline void Write8BE(uint8_t** dst, uint64_t value) {
-  Set8BE(*dst, value);
-  *dst += sizeof(value);
-}
-
-}  // namespace JDWP
-
-}  // namespace art
-
-#endif  // ART_RUNTIME_JDWP_JDWP_BITS_H_
diff --git a/runtime/jdwp/jdwp_constants.h b/runtime/jdwp/jdwp_constants.h
deleted file mode 100644
index 9fc896d..0000000
--- a/runtime/jdwp/jdwp_constants.h
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * These come out of the JDWP documentation.
- */
-#ifndef ART_RUNTIME_JDWP_JDWP_CONSTANTS_H_
-#define ART_RUNTIME_JDWP_JDWP_CONSTANTS_H_
-
-#include <iosfwd>
-
-namespace art {
-
-namespace JDWP {
-
-/*
- * Error constants.
- */
-enum JdwpError {
-  ERR_NONE                                        = 0,
-  ERR_INVALID_THREAD                              = 10,
-  ERR_INVALID_THREAD_GROUP                        = 11,
-  ERR_INVALID_PRIORITY                            = 12,
-  ERR_THREAD_NOT_SUSPENDED                        = 13,
-  ERR_THREAD_SUSPENDED                            = 14,
-  ERR_THREAD_NOT_ALIVE                            = 15,
-  ERR_INVALID_OBJECT                              = 20,
-  ERR_INVALID_CLASS                               = 21,
-  ERR_CLASS_NOT_PREPARED                          = 22,
-  ERR_INVALID_METHODID                            = 23,
-  ERR_INVALID_LOCATION                            = 24,
-  ERR_INVALID_FIELDID                             = 25,
-  ERR_INVALID_FRAMEID                             = 30,
-  ERR_NO_MORE_FRAMES                              = 31,
-  ERR_OPAQUE_FRAME                                = 32,
-  ERR_NOT_CURRENT_FRAME                           = 33,
-  ERR_TYPE_MISMATCH                               = 34,
-  ERR_INVALID_SLOT                                = 35,
-  ERR_DUPLICATE                                   = 40,
-  ERR_NOT_FOUND                                   = 41,
-  ERR_INVALID_MONITOR                             = 50,
-  ERR_NOT_MONITOR_OWNER                           = 51,
-  ERR_INTERRUPT                                   = 52,
-  ERR_INVALID_CLASS_FORMAT                        = 60,
-  ERR_CIRCULAR_CLASS_DEFINITION                   = 61,
-  ERR_FAILS_VERIFICATION                          = 62,
-  ERR_ADD_METHOD_NOT_IMPLEMENTED                  = 63,
-  ERR_SCHEMA_CHANGE_NOT_IMPLEMENTED               = 64,
-  ERR_INVALID_TYPESTATE                           = 65,
-  ERR_HIERARCHY_CHANGE_NOT_IMPLEMENTED            = 66,
-  ERR_DELETE_METHOD_NOT_IMPLEMENTED               = 67,
-  ERR_UNSUPPORTED_VERSION                         = 68,
-  ERR_NAMES_DONT_MATCH                            = 69,
-  ERR_CLASS_MODIFIERS_CHANGE_NOT_IMPLEMENTED      = 70,
-  ERR_METHOD_MODIFIERS_CHANGE_NOT_IMPLEMENTED     = 71,
-  ERR_NOT_IMPLEMENTED                             = 99,
-  ERR_NULL_POINTER                                = 100,
-  ERR_ABSENT_INFORMATION                          = 101,
-  ERR_INVALID_EVENT_TYPE                          = 102,
-  ERR_ILLEGAL_ARGUMENT                            = 103,
-  ERR_OUT_OF_MEMORY                               = 110,
-  ERR_ACCESS_DENIED                               = 111,
-  ERR_VM_DEAD                                     = 112,
-  ERR_INTERNAL                                    = 113,
-  ERR_UNATTACHED_THREAD                           = 115,
-  ERR_INVALID_TAG                                 = 500,
-  ERR_ALREADY_INVOKING                            = 502,
-  ERR_INVALID_INDEX                               = 503,
-  ERR_INVALID_LENGTH                              = 504,
-  ERR_INVALID_STRING                              = 506,
-  ERR_INVALID_CLASS_LOADER                        = 507,
-  ERR_INVALID_ARRAY                               = 508,
-  ERR_TRANSPORT_LOAD                              = 509,
-  ERR_TRANSPORT_INIT                              = 510,
-  ERR_NATIVE_METHOD                               = 511,
-  ERR_INVALID_COUNT                               = 512,
-};
-std::ostream& operator<<(std::ostream& os, const JdwpError& value);
-
-
-/*
- * ClassStatus constants.  These are bit flags that can be ORed together.
- */
-enum JdwpClassStatus {
-  CS_VERIFIED             = 0x01,
-  CS_PREPARED             = 0x02,
-  CS_INITIALIZED          = 0x04,
-  CS_ERROR                = 0x08,
-};
-std::ostream& operator<<(std::ostream& os, const JdwpClassStatus& value);
-
-/*
- * EventKind constants.
- */
-enum JdwpEventKind {
-  EK_SINGLE_STEP          = 1,
-  EK_BREAKPOINT           = 2,
-  EK_FRAME_POP            = 3,
-  EK_EXCEPTION            = 4,
-  EK_USER_DEFINED         = 5,
-  EK_THREAD_START         = 6,
-  EK_THREAD_DEATH         = 7,  // Formerly known as THREAD_END.
-  EK_CLASS_PREPARE        = 8,
-  EK_CLASS_UNLOAD         = 9,
-  EK_CLASS_LOAD           = 10,
-  EK_FIELD_ACCESS         = 20,
-  EK_FIELD_MODIFICATION   = 21,
-  EK_EXCEPTION_CATCH      = 30,
-  EK_METHOD_ENTRY         = 40,
-  EK_METHOD_EXIT          = 41,
-  EK_METHOD_EXIT_WITH_RETURN_VALUE = 42,
-  EK_MONITOR_CONTENDED_ENTER       = 43,
-  EK_MONITOR_CONTENDED_ENTERED     = 44,
-  EK_MONITOR_WAIT         = 45,
-  EK_MONITOR_WAITED       = 46,
-  EK_VM_START             = 90,  // Formerly known as VM_INIT.
-  EK_VM_DEATH             = 99,
-  EK_VM_DISCONNECTED      = 100,  // "Never sent across JDWP".
-};
-std::ostream& operator<<(std::ostream& os, const JdwpEventKind& value);
-
-/*
- * Values for "modKind" in EventRequest.Set.
- */
-enum JdwpModKind {
-  MK_COUNT                = 1,
-  MK_CONDITIONAL          = 2,
-  MK_THREAD_ONLY          = 3,
-  MK_CLASS_ONLY           = 4,
-  MK_CLASS_MATCH          = 5,
-  MK_CLASS_EXCLUDE        = 6,
-  MK_LOCATION_ONLY        = 7,
-  MK_EXCEPTION_ONLY       = 8,
-  MK_FIELD_ONLY           = 9,
-  MK_STEP                 = 10,
-  MK_INSTANCE_ONLY        = 11,
-  MK_SOURCE_NAME_MATCH    = 12,  // Since Java 6.
-};
-std::ostream& operator<<(std::ostream& os, const JdwpModKind& value);
-
-/*
- * InvokeOptions constants (bit flags).
- */
-enum JdwpInvokeOptions {
-  INVOKE_SINGLE_THREADED  = 0x01,
-  INVOKE_NONVIRTUAL       = 0x02,
-};
-std::ostream& operator<<(std::ostream& os, const JdwpInvokeOptions& value);
-
-/*
- * StepDepth constants.
- */
-enum JdwpStepDepth {
-  SD_INTO                 = 0,    // Step into method calls.
-  SD_OVER                 = 1,    // Step over method calls.
-  SD_OUT                  = 2,    // Step out of current method.
-};
-std::ostream& operator<<(std::ostream& os, const JdwpStepDepth& value);
-
-/*
- * StepSize constants.
- */
-enum JdwpStepSize {
-  SS_MIN                  = 0,    // Step by minimum (for example, one bytecode).
-  SS_LINE                 = 1,    // If possible, step to next line.
-};
-std::ostream& operator<<(std::ostream& os, const JdwpStepSize& value);
-
-/*
- * SuspendPolicy constants.
- */
-enum JdwpSuspendPolicy {
-  SP_NONE                 = 0,    // Suspend no threads.
-  SP_EVENT_THREAD         = 1,    // Suspend event thread.
-  SP_ALL                  = 2,    // Suspend all threads.
-};
-std::ostream& operator<<(std::ostream& os, const JdwpSuspendPolicy& value);
-
-/*
- * SuspendStatus constants.
- */
-enum JdwpSuspendStatus {
-  SUSPEND_STATUS_NOT_SUSPENDED = 0,
-  SUSPEND_STATUS_SUSPENDED     = 1,
-};
-std::ostream& operator<<(std::ostream& os, const JdwpSuspendStatus& value);
-
-/*
- * ThreadStatus constants.
- */
-enum JdwpThreadStatus {
-  TS_ZOMBIE               = 0,
-  TS_RUNNING              = 1,        // RUNNING
-  TS_SLEEPING             = 2,        // (in Thread.sleep())
-  TS_MONITOR              = 3,        // WAITING (monitor wait)
-  TS_WAIT                 = 4,        // (in Object.wait())
-};
-std::ostream& operator<<(std::ostream& os, const JdwpThreadStatus& value);
-
-/*
- * TypeTag constants.
- */
-enum JdwpTypeTag {
-  TT_CLASS                = 1,
-  TT_INTERFACE            = 2,
-  TT_ARRAY                = 3,
-};
-std::ostream& operator<<(std::ostream& os, const JdwpTypeTag& value);
-
-/*
- * Tag constants.
- */
-enum JdwpTag {
-  JT_ARRAY                 = '[',
-  JT_BYTE                  = 'B',
-  JT_CHAR                  = 'C',
-  JT_OBJECT                = 'L',
-  JT_FLOAT                 = 'F',
-  JT_DOUBLE                = 'D',
-  JT_INT                   = 'I',
-  JT_LONG                  = 'J',
-  JT_SHORT                 = 'S',
-  JT_VOID                  = 'V',
-  JT_BOOLEAN               = 'Z',
-  JT_STRING                = 's',
-  JT_THREAD                = 't',
-  JT_THREAD_GROUP          = 'g',
-  JT_CLASS_LOADER          = 'l',
-  JT_CLASS_OBJECT          = 'c',
-};
-std::ostream& operator<<(std::ostream& os, const JdwpTag& value);
-
-}  // namespace JDWP
-
-}  // namespace art
-
-#endif  // ART_RUNTIME_JDWP_JDWP_CONSTANTS_H_
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
deleted file mode 100644
index 7ce70cb..0000000
--- a/runtime/jdwp/jdwp_event.cc
+++ /dev/null
@@ -1,1378 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "jdwp/jdwp_event.h"
-
-#include <stddef.h>     /* for offsetof() */
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-
-#include "android-base/stringprintf.h"
-
-#include "art_field-inl.h"
-#include "art_method-inl.h"
-#include "base/logging.h"  // For VLOG.
-#include "debugger.h"
-#include "jdwp/jdwp_constants.h"
-#include "jdwp/jdwp_expand_buf.h"
-#include "jdwp/jdwp_priv.h"
-#include "jdwp/object_registry.h"
-#include "scoped_thread_state_change-inl.h"
-#include "thread-inl.h"
-
-#include "handle_scope-inl.h"
-
-/*
-General notes:
-
-The event add/remove stuff usually happens from the debugger thread,
-in response to requests from the debugger, but can also happen as the
-result of an event in an arbitrary thread (e.g. an event with a "count"
-mod expires).  It's important to keep the event list locked when processing
-events.
-
-Event posting can happen from any thread.  The JDWP thread will not usually
-post anything but VM start/death, but if a JDWP request causes a class
-to be loaded, the ClassPrepare event will come from the JDWP thread.
-
-
-We can have serialization issues when we post an event to the debugger.
-For example, a thread could send an "I hit a breakpoint and am suspending
-myself" message to the debugger.  Before it manages to suspend itself, the
-debugger's response ("not interested, resume thread") arrives and is
-processed.  We try to resume a thread that hasn't yet suspended.
-
-This means that, after posting an event to the debugger, we need to wait
-for the event thread to suspend itself (and, potentially, all other threads)
-before processing any additional requests from the debugger.  While doing
-so we need to be aware that multiple threads may be hitting breakpoints
-or other events simultaneously, so we either need to wait for all of them
-or serialize the events with each other.
-
-The current mechanism works like this:
-  Event thread:
-   - If I'm going to suspend, grab the "I am posting an event" token.  Wait
-     for it if it's not currently available.
-   - Post the event to the debugger.
-   - If appropriate, suspend others and then myself.  As part of suspending
-     myself, release the "I am posting" token.
-  JDWP thread:
-   - When an event arrives, see if somebody is posting an event.  If so,
-     sleep until we can acquire the "I am posting an event" token.  Release
-     it immediately and continue processing -- the event we have already
-     received should not interfere with other events that haven't yet
-     been posted.
-
-Some care must be taken to avoid deadlock:
-
- - thread A and thread B exit near-simultaneously, and post thread-death
-   events with a "suspend all" clause
- - thread A gets the event token, thread B sits and waits for it
- - thread A wants to suspend all other threads, but thread B is waiting
-   for the token and can't be suspended
-
-So we need to mark thread B in such a way that thread A doesn't wait for it.
-
-If we just bracket the "grab event token" call with a change to VMWAIT
-before sleeping, the switch back to RUNNING state when we get the token
-will cause thread B to suspend (remember, thread A's global suspend is
-still in force, even after it releases the token).  Suspending while
-holding the event token is very bad, because it prevents the JDWP thread
-from processing incoming messages.
-
-We need to change to VMWAIT state at the *start* of posting an event,
-and stay there until we either finish posting the event or decide to
-put ourselves to sleep.  That way we don't interfere with anyone else and
-don't allow anyone else to interfere with us.
-*/
-
-namespace art {
-
-namespace JDWP {
-
-using android::base::StringPrintf;
-
-/*
- * Stuff to compare against when deciding if a mod matches.  Only the
- * values for mods valid for the event being evaluated will be filled in.
- * The rest will be zeroed.
- * Must be allocated on the stack only. This is enforced by removing the
- * operator new.
- */
-struct ModBasket {
-  explicit ModBasket(Thread* self)
-    : hs(self), pLoc(nullptr), thread(self),
-      locationClass(hs.NewHandle<mirror::Class>(nullptr)),
-      exceptionClass(hs.NewHandle<mirror::Class>(nullptr)),
-      caught(false),
-      field(nullptr),
-      thisPtr(hs.NewHandle<mirror::Object>(nullptr)) { }
-
-  StackHandleScope<3> hs;
-  const EventLocation*            pLoc;             /* LocationOnly */
-  std::string                     className;        /* ClassMatch/ClassExclude */
-  Thread* const                   thread;           /* ThreadOnly */
-  MutableHandle<mirror::Class>    locationClass;    /* ClassOnly */
-  MutableHandle<mirror::Class>    exceptionClass;   /* ExceptionOnly */
-  bool                            caught;           /* ExceptionOnly */
-  ArtField*                       field;            /* FieldOnly */
-  MutableHandle<mirror::Object>   thisPtr;          /* InstanceOnly */
-  /* nothing for StepOnly -- handled differently */
-
- private:
-  DISALLOW_ALLOCATION();  // forbids allocation on the heap.
-  DISALLOW_IMPLICIT_CONSTRUCTORS(ModBasket);
-};
-
-static bool NeedsFullDeoptimization(JdwpEventKind eventKind) {
-  if (!Dbg::RequiresDeoptimization()) {
-    // We don't need deoptimization for debugging.
-    return false;
-  }
-  switch (eventKind) {
-      case EK_METHOD_ENTRY:
-      case EK_METHOD_EXIT:
-      case EK_METHOD_EXIT_WITH_RETURN_VALUE:
-      case EK_FIELD_ACCESS:
-      case EK_FIELD_MODIFICATION:
-        return true;
-      default:
-        return false;
-    }
-}
-
-// Returns the instrumentation event the DebugInstrumentationListener must
-// listen to in order to properly report the given JDWP event to the debugger.
-static uint32_t GetInstrumentationEventFor(JdwpEventKind eventKind) {
-  switch (eventKind) {
-    case EK_BREAKPOINT:
-    case EK_SINGLE_STEP:
-      return instrumentation::Instrumentation::kDexPcMoved;
-    case EK_EXCEPTION:
-    case EK_EXCEPTION_CATCH:
-      return instrumentation::Instrumentation::kExceptionThrown;
-    case EK_METHOD_ENTRY:
-      return instrumentation::Instrumentation::kMethodEntered;
-    case EK_METHOD_EXIT:
-    case EK_METHOD_EXIT_WITH_RETURN_VALUE:
-      return instrumentation::Instrumentation::kMethodExited;
-    case EK_FIELD_ACCESS:
-      return instrumentation::Instrumentation::kFieldRead;
-    case EK_FIELD_MODIFICATION:
-      return instrumentation::Instrumentation::kFieldWritten;
-    default:
-      return 0;
-  }
-}
-
-/*
- * Add an event to the list.  Ordering is not important.
- *
- * If something prevents the event from being registered, e.g. it's a
- * single-step request on a thread that doesn't exist, the event will
- * not be added to the list, and an appropriate error will be returned.
- */
-JdwpError JdwpState::RegisterEvent(JdwpEvent* pEvent) {
-  CHECK(pEvent != nullptr);
-  CHECK(pEvent->prev == nullptr);
-  CHECK(pEvent->next == nullptr);
-
-  {
-    /*
-     * If one or more "break"-type mods are used, register them with
-     * the interpreter.
-     */
-    DeoptimizationRequest req;
-    for (int i = 0; i < pEvent->modCount; i++) {
-      const JdwpEventMod* pMod = &pEvent->mods[i];
-      if (pMod->modKind == MK_LOCATION_ONLY) {
-        // Should only concern breakpoint, field access, field modification, step, and exception
-        // events.
-        // However breakpoint requires specific handling. Field access, field modification and step
-        // events need full deoptimization to be reported while exception event is reported during
-        // exception handling.
-        if (pEvent->eventKind == EK_BREAKPOINT) {
-          Dbg::WatchLocation(&pMod->locationOnly.loc, &req);
-        }
-      } else if (pMod->modKind == MK_STEP) {
-        /* should only be for EK_SINGLE_STEP; should only be one */
-        JdwpStepSize size = static_cast<JdwpStepSize>(pMod->step.size);
-        JdwpStepDepth depth = static_cast<JdwpStepDepth>(pMod->step.depth);
-        JdwpError status = Dbg::ConfigureStep(pMod->step.threadId, size, depth);
-        if (status != ERR_NONE) {
-          return status;
-        }
-      }
-    }
-    if (NeedsFullDeoptimization(pEvent->eventKind)) {
-      CHECK_EQ(req.GetKind(), DeoptimizationRequest::kNothing);
-      CHECK(req.Method() == nullptr);
-      req.SetKind(DeoptimizationRequest::kFullDeoptimization);
-    }
-    Dbg::RequestDeoptimization(req);
-  }
-  uint32_t instrumentation_event = GetInstrumentationEventFor(pEvent->eventKind);
-  if (instrumentation_event != 0) {
-    DeoptimizationRequest req;
-    req.SetKind(DeoptimizationRequest::kRegisterForEvent);
-    req.SetInstrumentationEvent(instrumentation_event);
-    Dbg::RequestDeoptimization(req);
-  }
-
-  {
-    /*
-     * Add to list.
-     */
-    MutexLock mu(Thread::Current(), event_list_lock_);
-    if (event_list_ != nullptr) {
-      pEvent->next = event_list_;
-      event_list_->prev = pEvent;
-    }
-    event_list_ = pEvent;
-    ++event_list_size_;
-  }
-
-  Dbg::ManageDeoptimization();
-
-  return ERR_NONE;
-}
-
-void JdwpState::UnregisterLocationEventsOnClass(ObjPtr<mirror::Class> klass) {
-  VLOG(jdwp) << "Removing events within " << klass->PrettyClass();
-  StackHandleScope<1> hs(Thread::Current());
-  Handle<mirror::Class> h_klass(hs.NewHandle(klass));
-  std::vector<JdwpEvent*> to_remove;
-  MutexLock mu(Thread::Current(), event_list_lock_);
-  for (JdwpEvent* cur_event = event_list_; cur_event != nullptr; cur_event = cur_event->next) {
-    // Fill in the to_remove list
-    bool found_event = false;
-    for (int i = 0; i < cur_event->modCount && !found_event; i++) {
-      JdwpEventMod& mod = cur_event->mods[i];
-      switch (mod.modKind) {
-        case MK_LOCATION_ONLY: {
-          JdwpLocation& loc = mod.locationOnly.loc;
-          JdwpError error;
-          ObjPtr<mirror::Class> breakpoint_class(
-              Dbg::GetObjectRegistry()->Get<art::mirror::Class*>(loc.class_id, &error));
-          DCHECK_EQ(error, ERR_NONE);
-          if (breakpoint_class == h_klass.Get()) {
-            to_remove.push_back(cur_event);
-            found_event = true;
-          }
-          break;
-        }
-        default:
-          // TODO Investigate how we should handle non-locationOnly events.
-          break;
-      }
-    }
-  }
-
-  for (JdwpEvent* event : to_remove) {
-    UnregisterEvent(event);
-    EventFree(event);
-  }
-}
-
-/*
- * Remove an event from the list.  This will also remove the event from
- * any optimization tables, e.g. breakpoints.
- *
- * Does not free the JdwpEvent.
- *
- * Grab the eventLock before calling here.
- */
-void JdwpState::UnregisterEvent(JdwpEvent* pEvent) {
-  if (pEvent->prev == nullptr) {
-    /* head of the list */
-    CHECK(event_list_ == pEvent);
-
-    event_list_ = pEvent->next;
-  } else {
-    pEvent->prev->next = pEvent->next;
-  }
-
-  if (pEvent->next != nullptr) {
-    pEvent->next->prev = pEvent->prev;
-    pEvent->next = nullptr;
-  }
-  pEvent->prev = nullptr;
-
-  {
-    /*
-     * Unhook us from the interpreter, if necessary.
-     */
-    DeoptimizationRequest req;
-    for (int i = 0; i < pEvent->modCount; i++) {
-      JdwpEventMod* pMod = &pEvent->mods[i];
-      if (pMod->modKind == MK_LOCATION_ONLY) {
-        // Like in RegisterEvent, we need specific handling for breakpoint only.
-        if (pEvent->eventKind == EK_BREAKPOINT) {
-          Dbg::UnwatchLocation(&pMod->locationOnly.loc, &req);
-        }
-      }
-      if (pMod->modKind == MK_STEP) {
-        /* should only be for EK_SINGLE_STEP; should only be one */
-        Dbg::UnconfigureStep(pMod->step.threadId);
-      }
-    }
-    if (NeedsFullDeoptimization(pEvent->eventKind)) {
-      CHECK_EQ(req.GetKind(), DeoptimizationRequest::kNothing);
-      CHECK(req.Method() == nullptr);
-      req.SetKind(DeoptimizationRequest::kFullUndeoptimization);
-    }
-    Dbg::RequestDeoptimization(req);
-  }
-  uint32_t instrumentation_event = GetInstrumentationEventFor(pEvent->eventKind);
-  if (instrumentation_event != 0) {
-    DeoptimizationRequest req;
-    req.SetKind(DeoptimizationRequest::kUnregisterForEvent);
-    req.SetInstrumentationEvent(instrumentation_event);
-    Dbg::RequestDeoptimization(req);
-  }
-
-  --event_list_size_;
-  CHECK(event_list_size_ != 0 || event_list_ == nullptr);
-}
-
-/*
- * Remove the event with the given ID from the list.
- *
- */
-void JdwpState::UnregisterEventById(uint32_t requestId) {
-  bool found = false;
-  {
-    MutexLock mu(Thread::Current(), event_list_lock_);
-
-    for (JdwpEvent* pEvent = event_list_; pEvent != nullptr; pEvent = pEvent->next) {
-      if (pEvent->requestId == requestId) {
-        found = true;
-        UnregisterEvent(pEvent);
-        EventFree(pEvent);
-        break;      /* there can be only one with a given ID */
-      }
-    }
-  }
-
-  if (found) {
-    Dbg::ManageDeoptimization();
-  } else {
-    // Failure to find the event isn't really an error. For instance, it looks like Eclipse will
-    // try to be extra careful and will explicitly remove one-off single-step events (using a
-    // 'count' event modifier of 1). So the event may have already been removed as part of the
-    // event notification (see JdwpState::CleanupMatchList).
-    VLOG(jdwp) << StringPrintf("No match when removing event reqId=0x%04x", requestId);
-  }
-}
-
-/*
- * Remove all entries from the event list.
- */
-void JdwpState::UnregisterAll() {
-  MutexLock mu(Thread::Current(), event_list_lock_);
-
-  JdwpEvent* pEvent = event_list_;
-  while (pEvent != nullptr) {
-    JdwpEvent* pNextEvent = pEvent->next;
-
-    UnregisterEvent(pEvent);
-    EventFree(pEvent);
-    pEvent = pNextEvent;
-  }
-
-  event_list_ = nullptr;
-}
-
-/*
- * Allocate a JdwpEvent struct with enough space to hold the specified
- * number of mod records.
- */
-JdwpEvent* EventAlloc(int numMods) {
-  JdwpEvent* newEvent;
-  int allocSize = offsetof(JdwpEvent, mods) + numMods * sizeof(newEvent->mods[0]);
-  newEvent = reinterpret_cast<JdwpEvent*>(malloc(allocSize));
-  memset(newEvent, 0, allocSize);
-  return newEvent;
-}
-
-/*
- * Free a JdwpEvent.
- *
- * Do not call this until the event has been removed from the list.
- */
-void EventFree(JdwpEvent* pEvent) {
-  if (pEvent == nullptr) {
-    return;
-  }
-
-  /* make sure it was removed from the list */
-  CHECK(pEvent->prev == nullptr);
-  CHECK(pEvent->next == nullptr);
-  /* want to check state->event_list_ != pEvent */
-
-  /*
-   * Free any hairy bits in the mods.
-   */
-  for (int i = 0; i < pEvent->modCount; i++) {
-    if (pEvent->mods[i].modKind == MK_CLASS_MATCH) {
-      free(pEvent->mods[i].classMatch.classPattern);
-      pEvent->mods[i].classMatch.classPattern = nullptr;
-    }
-    if (pEvent->mods[i].modKind == MK_CLASS_EXCLUDE) {
-      free(pEvent->mods[i].classExclude.classPattern);
-      pEvent->mods[i].classExclude.classPattern = nullptr;
-    }
-  }
-
-  free(pEvent);
-}
-
-/*
- * Run through the list and remove any entries with an expired "count" mod
- * from the event list.
- */
-void JdwpState::CleanupMatchList(const std::vector<JdwpEvent*>& match_list) {
-  for (JdwpEvent* pEvent : match_list) {
-    for (int i = 0; i < pEvent->modCount; ++i) {
-      if (pEvent->mods[i].modKind == MK_COUNT && pEvent->mods[i].count.count == 0) {
-        VLOG(jdwp) << StringPrintf("##### Removing expired event (requestId=%#" PRIx32 ")",
-                                   pEvent->requestId);
-        UnregisterEvent(pEvent);
-        EventFree(pEvent);
-        break;
-      }
-    }
-  }
-}
-
-/*
- * Match a string against a "restricted regular expression", which is just
- * a string that may start or end with '*' (e.g. "*.Foo" or "java.*").
- *
- * ("Restricted name globbing" might have been a better term.)
- */
-static bool PatternMatch(const char* pattern, const std::string& target) {
-  size_t patLen = strlen(pattern);
-  if (pattern[0] == '*') {
-    patLen--;
-    if (target.size() < patLen) {
-      return false;
-    }
-    return strcmp(pattern+1, target.c_str() + (target.size()-patLen)) == 0;
-  } else if (pattern[patLen-1] == '*') {
-    return strncmp(pattern, target.c_str(), patLen-1) == 0;
-  } else {
-    return strcmp(pattern, target.c_str()) == 0;
-  }
-}
-
-/*
- * See if the event's mods match up with the contents of "basket".
- *
- * If we find a Count mod before rejecting an event, we decrement it.  We
- * need to do this even if later mods cause us to ignore the event.
- */
-static bool ModsMatch(JdwpEvent* pEvent, const ModBasket& basket)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  JdwpEventMod* pMod = pEvent->mods;
-
-  for (int i = pEvent->modCount; i > 0; i--, pMod++) {
-    switch (pMod->modKind) {
-    case MK_COUNT:
-      CHECK_GT(pMod->count.count, 0);
-      pMod->count.count--;
-      if (pMod->count.count > 0) {
-        return false;
-      }
-      break;
-    case MK_CONDITIONAL:
-      LOG(FATAL) << "Unexpected MK_CONDITIONAL";  // should not be getting these
-      UNREACHABLE();
-    case MK_THREAD_ONLY:
-      if (!Dbg::MatchThread(pMod->threadOnly.threadId, basket.thread)) {
-        return false;
-      }
-      break;
-    case MK_CLASS_ONLY:
-      if (!Dbg::MatchType(basket.locationClass.Get(), pMod->classOnly.refTypeId)) {
-        return false;
-      }
-      break;
-    case MK_CLASS_MATCH:
-      if (!PatternMatch(pMod->classMatch.classPattern, basket.className)) {
-        return false;
-      }
-      break;
-    case MK_CLASS_EXCLUDE:
-      if (PatternMatch(pMod->classMatch.classPattern, basket.className)) {
-        return false;
-      }
-      break;
-    case MK_LOCATION_ONLY:
-      if (!Dbg::MatchLocation(pMod->locationOnly.loc, *basket.pLoc)) {
-        return false;
-      }
-      break;
-    case MK_EXCEPTION_ONLY:
-      if (pMod->exceptionOnly.refTypeId != 0 &&
-          !Dbg::MatchType(basket.exceptionClass.Get(), pMod->exceptionOnly.refTypeId)) {
-        return false;
-      }
-      if ((basket.caught && !pMod->exceptionOnly.caught) ||
-          (!basket.caught && !pMod->exceptionOnly.uncaught)) {
-        return false;
-      }
-      break;
-    case MK_FIELD_ONLY:
-      if (!Dbg::MatchField(pMod->fieldOnly.refTypeId, pMod->fieldOnly.fieldId, basket.field)) {
-        return false;
-      }
-      break;
-    case MK_STEP:
-      if (!Dbg::MatchThread(pMod->step.threadId, basket.thread)) {
-        return false;
-      }
-      break;
-    case MK_INSTANCE_ONLY:
-      if (!Dbg::MatchInstance(pMod->instanceOnly.objectId, basket.thisPtr.Get())) {
-        return false;
-      }
-      break;
-    default:
-      LOG(FATAL) << "unknown mod kind " << pMod->modKind;
-      UNREACHABLE();
-    }
-  }
-  return true;
-}
-
-/*
- * Find all events of type "event_kind" with mods that match up with the
- * rest of the arguments while holding the event list lock. This method
- * is used by FindMatchingEvents below.
- *
- * Found events are appended to "match_list" so this may be called multiple times for grouped
- * events.
- *
- * DO NOT call this multiple times for the same eventKind, as Count mods are
- * decremented during the scan.
- */
-void JdwpState::FindMatchingEventsLocked(JdwpEventKind event_kind, const ModBasket& basket,
-                                         std::vector<JdwpEvent*>* match_list) {
-  for (JdwpEvent* pEvent = event_list_; pEvent != nullptr; pEvent = pEvent->next) {
-    if (pEvent->eventKind == event_kind && ModsMatch(pEvent, basket)) {
-      match_list->push_back(pEvent);
-    }
-  }
-}
-
-/*
- * Find all events of type "event_kind" with mods that match up with the
- * rest of the arguments and return true if at least one event matches,
- * false otherwise.
- *
- * Found events are appended to "match_list" so this may be called multiple
- * times for grouped events.
- *
- * DO NOT call this multiple times for the same eventKind, as Count mods are
- * decremented during the scan.
- */
-bool JdwpState::FindMatchingEvents(JdwpEventKind event_kind, const ModBasket& basket,
-                                   std::vector<JdwpEvent*>* match_list) {
-  MutexLock mu(Thread::Current(), event_list_lock_);
-  match_list->reserve(event_list_size_);
-  FindMatchingEventsLocked(event_kind, basket, match_list);
-  return !match_list->empty();
-}
-
-/*
- * Scan through the list of matches and determine the most severe
- * suspension policy.
- */
-static JdwpSuspendPolicy ScanSuspendPolicy(const std::vector<JdwpEvent*>& match_list) {
-  JdwpSuspendPolicy policy = SP_NONE;
-
-  for (JdwpEvent* pEvent : match_list) {
-    if (pEvent->suspend_policy > policy) {
-      policy = pEvent->suspend_policy;
-    }
-  }
-
-  return policy;
-}
-
-/*
- * Three possibilities:
- *  SP_NONE - do nothing
- *  SP_EVENT_THREAD - suspend ourselves
- *  SP_ALL - suspend everybody except JDWP support thread
- */
-void JdwpState::SuspendByPolicy(JdwpSuspendPolicy suspend_policy, JDWP::ObjectId thread_self_id) {
-  VLOG(jdwp) << "SuspendByPolicy(" << suspend_policy << ")";
-  if (suspend_policy == SP_NONE) {
-    return;
-  }
-
-  if (suspend_policy == SP_ALL) {
-    Dbg::SuspendVM();
-  } else {
-    CHECK_EQ(suspend_policy, SP_EVENT_THREAD);
-  }
-
-  /* this is rare but possible -- see CLASS_PREPARE handling */
-  if (thread_self_id == debug_thread_id_) {
-    LOG(INFO) << "NOTE: SuspendByPolicy not suspending JDWP thread";
-    return;
-  }
-
-  while (true) {
-    Dbg::SuspendSelf();
-
-    /*
-     * The JDWP thread has told us (and possibly all other threads) to
-     * resume.  See if it has left anything in our DebugInvokeReq mailbox.
-     */
-    DebugInvokeReq* const pReq = Dbg::GetInvokeReq();
-    if (pReq == nullptr) {
-      break;
-    }
-
-    // Execute method.
-    Dbg::ExecuteMethod(pReq);
-  }
-}
-
-void JdwpState::SendRequestAndPossiblySuspend(ExpandBuf* pReq, JdwpSuspendPolicy suspend_policy,
-                                              ObjectId threadId) {
-  Thread* const self = Thread::Current();
-  self->AssertThreadSuspensionIsAllowable();
-  CHECK(pReq != nullptr);
-  CHECK_EQ(threadId, Dbg::GetThreadSelfId()) << "Only the current thread can suspend itself";
-  /* send request and possibly suspend ourselves */
-  ScopedThreadSuspension sts(self, kWaitingForDebuggerSend);
-  if (suspend_policy != SP_NONE) {
-    AcquireJdwpTokenForEvent(threadId);
-  }
-  EventFinish(pReq);
-  {
-    // Before suspending, we change our state to kSuspended so the debugger sees us as RUNNING.
-    ScopedThreadStateChange stsc(self, kSuspended);
-    SuspendByPolicy(suspend_policy, threadId);
-  }
-}
-
-/*
- * Determine if there is a method invocation in progress in the current
- * thread.
- *
- * We look at the "invoke_needed" flag in the per-thread DebugInvokeReq
- * state.  If set, we're in the process of invoking a method.
- */
-bool JdwpState::InvokeInProgress() {
-  DebugInvokeReq* pReq = Dbg::GetInvokeReq();
-  return pReq != nullptr;
-}
-
-void JdwpState::AcquireJdwpTokenForCommand() {
-  CHECK_EQ(Thread::Current(), GetDebugThread()) << "Expected debugger thread";
-  SetWaitForJdwpToken(debug_thread_id_);
-}
-
-void JdwpState::ReleaseJdwpTokenForCommand() {
-  CHECK_EQ(Thread::Current(), GetDebugThread()) << "Expected debugger thread";
-  ClearWaitForJdwpToken();
-}
-
-void JdwpState::AcquireJdwpTokenForEvent(ObjectId threadId) {
-  SetWaitForJdwpToken(threadId);
-}
-
-void JdwpState::ReleaseJdwpTokenForEvent() {
-  ClearWaitForJdwpToken();
-}
-
-/*
- * We need the JDWP thread to hold off on doing stuff while we post an
- * event and then suspend ourselves.
- *
- * This could go to sleep waiting for another thread, so it's important
- * that the thread be marked as VMWAIT before calling here.
- */
-void JdwpState::SetWaitForJdwpToken(ObjectId threadId) {
-  bool waited = false;
-  Thread* const self = Thread::Current();
-  CHECK_NE(threadId, 0u);
-  CHECK_NE(self->GetState(), kRunnable);
-  Locks::mutator_lock_->AssertNotHeld(self);
-
-  /* this is held for very brief periods; contention is unlikely */
-  MutexLock mu(self, jdwp_token_lock_);
-
-  if (jdwp_token_owner_thread_id_ == threadId) {
-    // Only the debugger thread may already hold the event token. For instance, it may trigger
-    // a CLASS_PREPARE event while processing a command that initializes a class.
-    CHECK_EQ(threadId, debug_thread_id_) << "Non-debugger thread is already holding event token";
-  } else {
-    /*
-     * If another thread is already doing stuff, wait for it.  This can
-     * go to sleep indefinitely.
-     */
-
-    while (jdwp_token_owner_thread_id_ != 0) {
-      VLOG(jdwp) << StringPrintf("event in progress (%#" PRIx64 "), %#" PRIx64 " sleeping",
-                                 jdwp_token_owner_thread_id_, threadId);
-      waited = true;
-      jdwp_token_cond_.Wait(self);
-    }
-
-    if (waited || threadId != debug_thread_id_) {
-      VLOG(jdwp) << StringPrintf("event token grabbed (%#" PRIx64 ")", threadId);
-    }
-    jdwp_token_owner_thread_id_ = threadId;
-  }
-}
-
-/*
- * Clear the threadId and signal anybody waiting.
- */
-void JdwpState::ClearWaitForJdwpToken() {
-  /*
-   * Grab the mutex.  Don't try to go in/out of VMWAIT mode, as this
-   * function is called by Dbg::SuspendSelf(), and the transition back
-   * to RUNNING would confuse it.
-   */
-  Thread* const self = Thread::Current();
-  MutexLock mu(self, jdwp_token_lock_);
-
-  CHECK_NE(jdwp_token_owner_thread_id_, 0U);
-  VLOG(jdwp) << StringPrintf("cleared event token (%#" PRIx64 ")", jdwp_token_owner_thread_id_);
-
-  jdwp_token_owner_thread_id_ = 0;
-  jdwp_token_cond_.Signal(self);
-}
-
-/*
- * Prep an event.  Allocates storage for the message and leaves space for
- * the header.
- */
-static ExpandBuf* eventPrep() {
-  ExpandBuf* pReq = expandBufAlloc();
-  expandBufAddSpace(pReq, kJDWPHeaderLen);
-  return pReq;
-}
-
-/*
- * Write the header into the buffer and send the packet off to the debugger.
- *
- * Takes ownership of "pReq" (currently discards it).
- */
-void JdwpState::EventFinish(ExpandBuf* pReq) {
-  uint8_t* buf = expandBufGetBuffer(pReq);
-
-  Set4BE(buf + kJDWPHeaderSizeOffset, expandBufGetLength(pReq));
-  Set4BE(buf + kJDWPHeaderIdOffset, NextRequestSerial());
-  Set1(buf + kJDWPHeaderFlagsOffset, 0);     /* flags */
-  Set1(buf + kJDWPHeaderCmdSetOffset, kJDWPEventCmdSet);
-  Set1(buf + kJDWPHeaderCmdOffset, kJDWPEventCompositeCmd);
-
-  SendRequest(pReq);
-
-  expandBufFree(pReq);
-}
-
-
-/*
- * Tell the debugger that we have finished initializing.  This is always
- * sent, even if the debugger hasn't requested it.
- *
- * This should be sent "before the main thread is started and before
- * any application code has been executed".  The thread ID in the message
- * must be for the main thread.
- */
-void JdwpState::PostVMStart() {
-  JdwpSuspendPolicy suspend_policy = (options_->suspend) ? SP_ALL : SP_NONE;
-  ObjectId threadId = Dbg::GetThreadSelfId();
-
-  VLOG(jdwp) << "EVENT: " << EK_VM_START;
-  VLOG(jdwp) << "  suspend_policy=" << suspend_policy;
-
-  ExpandBuf* pReq = eventPrep();
-  expandBufAdd1(pReq, suspend_policy);
-  expandBufAdd4BE(pReq, 1);
-  expandBufAdd1(pReq, EK_VM_START);
-  expandBufAdd4BE(pReq, 0);       /* requestId */
-  expandBufAddObjectId(pReq, threadId);
-
-  Dbg::ManageDeoptimization();
-
-  /* send request and possibly suspend ourselves */
-  SendRequestAndPossiblySuspend(pReq, suspend_policy, threadId);
-}
-
-static void LogMatchingEventsAndThread(const std::vector<JdwpEvent*>& match_list,
-                                       ObjectId thread_id)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  for (size_t i = 0, e = match_list.size(); i < e; ++i) {
-    JdwpEvent* pEvent = match_list[i];
-    VLOG(jdwp) << "EVENT #" << i << ": " << pEvent->eventKind
-               << StringPrintf(" (requestId=%#" PRIx32 ")", pEvent->requestId);
-  }
-  std::string thread_name;
-  JdwpError error = Dbg::GetThreadName(thread_id, &thread_name);
-  if (error != JDWP::ERR_NONE) {
-    thread_name = "<unknown>";
-  }
-  VLOG(jdwp) << StringPrintf("  thread=%#" PRIx64, thread_id) << " " << thread_name;
-}
-
-static void SetJdwpLocationFromEventLocation(const JDWP::EventLocation* event_location,
-                                             JDWP::JdwpLocation* jdwp_location)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  DCHECK(event_location != nullptr);
-  DCHECK(jdwp_location != nullptr);
-  Dbg::SetJdwpLocation(jdwp_location, event_location->method, event_location->dex_pc);
-}
-
-/*
- * A location of interest has been reached.  This handles:
- *   Breakpoint
- *   SingleStep
- *   MethodEntry
- *   MethodExit
- * These four types must be grouped together in a single response.  The
- * "eventFlags" indicates the type of event(s) that have happened.
- *
- * Valid mods:
- *   Count, ThreadOnly, ClassOnly, ClassMatch, ClassExclude, InstanceOnly
- *   LocationOnly (for breakpoint/step only)
- *   Step (for step only)
- *
- * Interesting test cases:
- *  - Put a breakpoint on a native method.  Eclipse creates METHOD_ENTRY
- *    and METHOD_EXIT events with a ClassOnly mod on the method's class.
- *  - Use "run to line".  Eclipse creates a BREAKPOINT with Count=1.
- *  - Single-step to a line with a breakpoint.  Should get a single
- *    event message with both events in it.
- */
-void JdwpState::PostLocationEvent(const EventLocation* pLoc, mirror::Object* thisPtr,
-                                  int eventFlags, const JValue* returnValue) {
-  DCHECK(pLoc != nullptr);
-  DCHECK(pLoc->method != nullptr);
-  DCHECK_EQ(pLoc->method->IsStatic(), thisPtr == nullptr);
-
-  ModBasket basket(Thread::Current());
-  basket.pLoc = pLoc;
-  basket.locationClass.Assign(pLoc->method->GetDeclaringClass());
-  basket.thisPtr.Assign(thisPtr);
-  basket.className = Dbg::GetClassName(basket.locationClass.Get());
-
-  /*
-   * On rare occasions we may need to execute interpreted code in the VM
-   * while handling a request from the debugger.  Don't fire breakpoints
-   * while doing so.  (I don't think we currently do this at all, so
-   * this is mostly paranoia.)
-   */
-  if (basket.thread == GetDebugThread()) {
-    VLOG(jdwp) << "Ignoring location event in JDWP thread";
-    return;
-  }
-
-  /*
-   * The debugger variable display tab may invoke the interpreter to format
-   * complex objects.  We want to ignore breakpoints and method entry/exit
-   * traps while working on behalf of the debugger.
-   *
-   * If we don't ignore them, the VM will get hung up, because we'll
-   * suspend on a breakpoint while the debugger is still waiting for its
-   * method invocation to complete.
-   */
-  if (InvokeInProgress()) {
-    VLOG(jdwp) << "Not checking breakpoints during invoke (" << basket.className << ")";
-    return;
-  }
-
-  std::vector<JdwpEvent*> match_list;
-  {
-    // We use the locked version because we have multiple possible match events.
-    MutexLock mu(Thread::Current(), event_list_lock_);
-    match_list.reserve(event_list_size_);
-    if ((eventFlags & Dbg::kBreakpoint) != 0) {
-      FindMatchingEventsLocked(EK_BREAKPOINT, basket, &match_list);
-    }
-    if ((eventFlags & Dbg::kSingleStep) != 0) {
-      FindMatchingEventsLocked(EK_SINGLE_STEP, basket, &match_list);
-    }
-    if ((eventFlags & Dbg::kMethodEntry) != 0) {
-      FindMatchingEventsLocked(EK_METHOD_ENTRY, basket, &match_list);
-    }
-    if ((eventFlags & Dbg::kMethodExit) != 0) {
-      FindMatchingEventsLocked(EK_METHOD_EXIT, basket, &match_list);
-      FindMatchingEventsLocked(EK_METHOD_EXIT_WITH_RETURN_VALUE, basket, &match_list);
-    }
-  }
-  if (match_list.empty()) {
-    // No matching event.
-    return;
-  }
-  JdwpSuspendPolicy suspend_policy = ScanSuspendPolicy(match_list);
-
-  ObjectId thread_id = Dbg::GetThreadId(basket.thread);
-  JDWP::JdwpLocation jdwp_location;
-  SetJdwpLocationFromEventLocation(pLoc, &jdwp_location);
-
-  if (VLOG_IS_ON(jdwp)) {
-    LogMatchingEventsAndThread(match_list, thread_id);
-    VLOG(jdwp) << "  location=" << jdwp_location;
-    VLOG(jdwp) << "  suspend_policy=" << suspend_policy;
-  }
-
-  ExpandBuf* pReq = eventPrep();
-  expandBufAdd1(pReq, suspend_policy);
-  expandBufAdd4BE(pReq, match_list.size());
-
-  for (const JdwpEvent* pEvent : match_list) {
-    expandBufAdd1(pReq, pEvent->eventKind);
-    expandBufAdd4BE(pReq, pEvent->requestId);
-    expandBufAddObjectId(pReq, thread_id);
-    expandBufAddLocation(pReq, jdwp_location);
-    if (pEvent->eventKind == EK_METHOD_EXIT_WITH_RETURN_VALUE) {
-      Dbg::OutputMethodReturnValue(jdwp_location.method_id, returnValue, pReq);
-    }
-  }
-
-  {
-    MutexLock mu(Thread::Current(), event_list_lock_);
-    CleanupMatchList(match_list);
-  }
-
-  Dbg::ManageDeoptimization();
-
-  SendRequestAndPossiblySuspend(pReq, suspend_policy, thread_id);
-}
-
-void JdwpState::PostFieldEvent(const EventLocation* pLoc, ArtField* field,
-                               mirror::Object* this_object, const JValue* fieldValue,
-                               bool is_modification) {
-  DCHECK(pLoc != nullptr);
-  DCHECK(field != nullptr);
-  DCHECK_EQ(fieldValue != nullptr, is_modification);
-  DCHECK_EQ(field->IsStatic(), this_object == nullptr);
-
-  ModBasket basket(Thread::Current());
-  basket.pLoc = pLoc;
-  basket.locationClass.Assign(pLoc->method->GetDeclaringClass());
-  basket.thisPtr.Assign(this_object);
-  basket.className = Dbg::GetClassName(basket.locationClass.Get());
-  basket.field = field;
-
-  if (InvokeInProgress()) {
-    VLOG(jdwp) << "Not posting field event during invoke (" << basket.className << ")";
-    return;
-  }
-
-  std::vector<JdwpEvent*> match_list;
-  const JdwpEventKind match_kind = (is_modification) ? EK_FIELD_MODIFICATION : EK_FIELD_ACCESS;
-  if (!FindMatchingEvents(match_kind, basket, &match_list)) {
-    // No matching event.
-    return;
-  }
-
-  JdwpSuspendPolicy suspend_policy = ScanSuspendPolicy(match_list);
-  ObjectId thread_id = Dbg::GetThreadId(basket.thread);
-  ObjectRegistry* registry = Dbg::GetObjectRegistry();
-  ObjectId instance_id = registry->Add(basket.thisPtr);
-  RefTypeId field_type_id = registry->AddRefType(field->GetDeclaringClass());
-  FieldId field_id = Dbg::ToFieldId(field);
-  JDWP::JdwpLocation jdwp_location;
-  SetJdwpLocationFromEventLocation(pLoc, &jdwp_location);
-
-  if (VLOG_IS_ON(jdwp)) {
-    LogMatchingEventsAndThread(match_list, thread_id);
-    VLOG(jdwp) << "  location=" << jdwp_location;
-    VLOG(jdwp) << StringPrintf("  this=%#" PRIx64, instance_id);
-    VLOG(jdwp) << StringPrintf("  type=%#" PRIx64, field_type_id) << " "
-        << Dbg::GetClassName(field_id);
-    VLOG(jdwp) << StringPrintf("  field=%#" PRIx64, field_id) << " "
-        << Dbg::GetFieldName(field_id);
-    VLOG(jdwp) << "  suspend_policy=" << suspend_policy;
-  }
-
-  ExpandBuf* pReq = eventPrep();
-  expandBufAdd1(pReq, suspend_policy);
-  expandBufAdd4BE(pReq, match_list.size());
-
-  // Get field's reference type tag.
-  JDWP::JdwpTypeTag type_tag = Dbg::GetTypeTag(field->GetDeclaringClass());
-
-  // Get instance type tag.
-  uint8_t tag;
-  {
-    ScopedObjectAccessUnchecked soa(Thread::Current());
-    tag = Dbg::TagFromObject(soa, basket.thisPtr.Get());
-  }
-
-  for (const JdwpEvent* pEvent : match_list) {
-    expandBufAdd1(pReq, pEvent->eventKind);
-    expandBufAdd4BE(pReq, pEvent->requestId);
-    expandBufAddObjectId(pReq, thread_id);
-    expandBufAddLocation(pReq, jdwp_location);
-    expandBufAdd1(pReq, type_tag);
-    expandBufAddRefTypeId(pReq, field_type_id);
-    expandBufAddFieldId(pReq, field_id);
-    expandBufAdd1(pReq, tag);
-    expandBufAddObjectId(pReq, instance_id);
-    if (is_modification) {
-      Dbg::OutputFieldValue(field_id, fieldValue, pReq);
-    }
-  }
-
-  {
-    MutexLock mu(Thread::Current(), event_list_lock_);
-    CleanupMatchList(match_list);
-  }
-
-  Dbg::ManageDeoptimization();
-
-  SendRequestAndPossiblySuspend(pReq, suspend_policy, thread_id);
-}
-
-/*
- * A thread is starting or stopping.
- *
- * Valid mods:
- *  Count, ThreadOnly
- */
-void JdwpState::PostThreadChange(Thread* thread, bool start) {
-  CHECK_EQ(thread, Thread::Current());
-
-  /*
-   * I don't think this can happen.
-   */
-  if (InvokeInProgress()) {
-    LOG(WARNING) << "Not posting thread change during invoke";
-    return;
-  }
-
-  // We need the java.lang.Thread object associated to the starting/ending
-  // thread to get its JDWP id. Therefore we can't report event if there
-  // is no Java peer. This happens when the runtime shuts down and re-attaches
-  // the current thread without creating a Java peer.
-  if (thread->GetPeer() == nullptr) {
-    return;
-  }
-
-  ModBasket basket(thread);
-
-  std::vector<JdwpEvent*> match_list;
-  const JdwpEventKind match_kind = (start) ? EK_THREAD_START : EK_THREAD_DEATH;
-  if (!FindMatchingEvents(match_kind, basket, &match_list)) {
-    // No matching event.
-    return;
-  }
-
-  JdwpSuspendPolicy suspend_policy = ScanSuspendPolicy(match_list);
-  ObjectId thread_id = Dbg::GetThreadId(basket.thread);
-
-  if (VLOG_IS_ON(jdwp)) {
-    LogMatchingEventsAndThread(match_list, thread_id);
-    VLOG(jdwp) << "  suspend_policy=" << suspend_policy;
-  }
-
-  ExpandBuf* pReq = eventPrep();
-  expandBufAdd1(pReq, suspend_policy);
-  expandBufAdd4BE(pReq, match_list.size());
-
-  for (const JdwpEvent* pEvent : match_list) {
-    expandBufAdd1(pReq, pEvent->eventKind);
-    expandBufAdd4BE(pReq, pEvent->requestId);
-    expandBufAdd8BE(pReq, thread_id);
-  }
-
-  {
-    MutexLock mu(Thread::Current(), event_list_lock_);
-    CleanupMatchList(match_list);
-  }
-
-  Dbg::ManageDeoptimization();
-
-  SendRequestAndPossiblySuspend(pReq, suspend_policy, thread_id);
-}
-
-/*
- * Send a polite "VM is dying" message to the debugger.
- *
- * Skips the usual "event token" stuff.
- */
-bool JdwpState::PostVMDeath() {
-  VLOG(jdwp) << "EVENT: " << EK_VM_DEATH;
-
-  ExpandBuf* pReq = eventPrep();
-  expandBufAdd1(pReq, SP_NONE);
-  expandBufAdd4BE(pReq, 1);
-
-  expandBufAdd1(pReq, EK_VM_DEATH);
-  expandBufAdd4BE(pReq, 0);
-  EventFinish(pReq);
-  return true;
-}
-
-/*
- * An exception has been thrown.  It may or may not have been caught.
- *
- * Valid mods:
- *  Count, ThreadOnly, ClassOnly, ClassMatch, ClassExclude, LocationOnly,
- *    ExceptionOnly, InstanceOnly
- *
- * The "exceptionId" has not been added to the GC-visible object registry,
- * because there's a pretty good chance that we're not going to send it
- * up the debugger.
- */
-void JdwpState::PostException(const EventLocation* pThrowLoc, mirror::Throwable* exception_object,
-                              const EventLocation* pCatchLoc, mirror::Object* thisPtr) {
-  DCHECK(exception_object != nullptr);
-  DCHECK(pThrowLoc != nullptr);
-  DCHECK(pCatchLoc != nullptr);
-  if (pThrowLoc->method != nullptr) {
-    DCHECK_EQ(pThrowLoc->method->IsStatic(), thisPtr == nullptr);
-  } else {
-    VLOG(jdwp) << "Unexpected: exception event with empty throw location";
-  }
-
-  ModBasket basket(Thread::Current());
-  basket.pLoc = pThrowLoc;
-  if (pThrowLoc->method != nullptr) {
-    basket.locationClass.Assign(pThrowLoc->method->GetDeclaringClass());
-  }
-  basket.className = Dbg::GetClassName(basket.locationClass.Get());
-  basket.exceptionClass.Assign(exception_object->GetClass());
-  basket.caught = (pCatchLoc->method != nullptr);
-  basket.thisPtr.Assign(thisPtr);
-
-  /* don't try to post an exception caused by the debugger */
-  if (InvokeInProgress()) {
-    VLOG(jdwp) << "Not posting exception hit during invoke (" << basket.className << ")";
-    return;
-  }
-
-  std::vector<JdwpEvent*> match_list;
-  if (!FindMatchingEvents(EK_EXCEPTION, basket, &match_list)) {
-    // No matching event.
-    return;
-  }
-
-  JdwpSuspendPolicy suspend_policy = ScanSuspendPolicy(match_list);
-  ObjectId thread_id = Dbg::GetThreadId(basket.thread);
-  ObjectRegistry* registry = Dbg::GetObjectRegistry();
-  ObjectId exceptionId = registry->Add(exception_object);
-  JDWP::JdwpLocation jdwp_throw_location;
-  JDWP::JdwpLocation jdwp_catch_location;
-  SetJdwpLocationFromEventLocation(pThrowLoc, &jdwp_throw_location);
-  SetJdwpLocationFromEventLocation(pCatchLoc, &jdwp_catch_location);
-
-  if (VLOG_IS_ON(jdwp)) {
-    std::string exceptionClassName(mirror::Class::PrettyDescriptor(exception_object->GetClass()));
-
-    LogMatchingEventsAndThread(match_list, thread_id);
-    VLOG(jdwp) << "  throwLocation=" << jdwp_throw_location;
-    if (jdwp_catch_location.class_id == 0) {
-      VLOG(jdwp) << "  catchLocation=uncaught";
-    } else {
-      VLOG(jdwp) << "  catchLocation=" << jdwp_catch_location;
-    }
-    VLOG(jdwp) << StringPrintf("  exception=%#" PRIx64, exceptionId) << " "
-        << exceptionClassName;
-    VLOG(jdwp) << "  suspend_policy=" << suspend_policy;
-  }
-
-  ExpandBuf* pReq = eventPrep();
-  expandBufAdd1(pReq, suspend_policy);
-  expandBufAdd4BE(pReq, match_list.size());
-
-  for (const JdwpEvent* pEvent : match_list) {
-    expandBufAdd1(pReq, pEvent->eventKind);
-    expandBufAdd4BE(pReq, pEvent->requestId);
-    expandBufAddObjectId(pReq, thread_id);
-    expandBufAddLocation(pReq, jdwp_throw_location);
-    expandBufAdd1(pReq, JT_OBJECT);
-    expandBufAddObjectId(pReq, exceptionId);
-    expandBufAddLocation(pReq, jdwp_catch_location);
-  }
-
-  {
-    MutexLock mu(Thread::Current(), event_list_lock_);
-    CleanupMatchList(match_list);
-  }
-
-  Dbg::ManageDeoptimization();
-
-  SendRequestAndPossiblySuspend(pReq, suspend_policy, thread_id);
-}
-
-/*
- * Announce that a class has been loaded.
- *
- * Valid mods:
- *  Count, ThreadOnly, ClassOnly, ClassMatch, ClassExclude
- */
-void JdwpState::PostClassPrepare(mirror::Class* klass) {
-  DCHECK(klass != nullptr);
-
-  ModBasket basket(Thread::Current());
-  basket.locationClass.Assign(klass);
-  basket.className = Dbg::GetClassName(basket.locationClass.Get());
-
-  /* suppress class prep caused by debugger */
-  if (InvokeInProgress()) {
-    VLOG(jdwp) << "Not posting class prep caused by invoke (" << basket.className << ")";
-    return;
-  }
-
-  std::vector<JdwpEvent*> match_list;
-  if (!FindMatchingEvents(EK_CLASS_PREPARE, basket, &match_list)) {
-    // No matching event.
-    return;
-  }
-
-  JdwpSuspendPolicy suspend_policy = ScanSuspendPolicy(match_list);
-  ObjectId thread_id = Dbg::GetThreadId(basket.thread);
-  ObjectRegistry* registry = Dbg::GetObjectRegistry();
-  RefTypeId class_id = registry->AddRefType(basket.locationClass);
-
-  // OLD-TODO - we currently always send both "verified" and "prepared" since
-  // debuggers seem to like that.  There might be some advantage to honesty,
-  // since the class may not yet be verified.
-  int status = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
-  JDWP::JdwpTypeTag tag = Dbg::GetTypeTag(basket.locationClass.Get());
-  std::string temp;
-  std::string signature(basket.locationClass->GetDescriptor(&temp));
-
-  if (VLOG_IS_ON(jdwp)) {
-    LogMatchingEventsAndThread(match_list, thread_id);
-    VLOG(jdwp) << StringPrintf("  type=%#" PRIx64, class_id) << " " << signature;
-    VLOG(jdwp) << "  suspend_policy=" << suspend_policy;
-  }
-
-  ObjectId reported_thread_id = thread_id;
-  if (reported_thread_id == debug_thread_id_) {
-    /*
-     * JDWP says that, for a class prep in the debugger thread, we
-     * should set thread to null and if any threads were supposed
-     * to be suspended then we suspend all other threads.
-     */
-    VLOG(jdwp) << "  NOTE: class prepare in debugger thread!";
-    reported_thread_id = 0;
-    if (suspend_policy == SP_EVENT_THREAD) {
-      suspend_policy = SP_ALL;
-    }
-  }
-
-  ExpandBuf* pReq = eventPrep();
-  expandBufAdd1(pReq, suspend_policy);
-  expandBufAdd4BE(pReq, match_list.size());
-
-  for (const JdwpEvent* pEvent : match_list) {
-    expandBufAdd1(pReq, pEvent->eventKind);
-    expandBufAdd4BE(pReq, pEvent->requestId);
-    expandBufAddObjectId(pReq, reported_thread_id);
-    expandBufAdd1(pReq, tag);
-    expandBufAddRefTypeId(pReq, class_id);
-    expandBufAddUtf8String(pReq, signature);
-    expandBufAdd4BE(pReq, status);
-  }
-
-  {
-    MutexLock mu(Thread::Current(), event_list_lock_);
-    CleanupMatchList(match_list);
-  }
-
-  Dbg::ManageDeoptimization();
-
-  SendRequestAndPossiblySuspend(pReq, suspend_policy, thread_id);
-}
-
-/*
- * Setup the header for a chunk of DDM data.
- */
-void JdwpState::SetupChunkHeader(uint32_t type, size_t data_len, size_t header_size,
-                                 uint8_t* out_header) {
-  CHECK_EQ(header_size, static_cast<size_t>(kJDWPHeaderLen + 8));
-  /* form the header (JDWP plus DDMS) */
-  Set4BE(out_header, header_size + data_len);
-  Set4BE(out_header + 4, NextRequestSerial());
-  Set1(out_header + 8, 0);     /* flags */
-  Set1(out_header + 9, kJDWPDdmCmdSet);
-  Set1(out_header + 10, kJDWPDdmCmd);
-  Set4BE(out_header + 11, type);
-  Set4BE(out_header + 15, data_len);
-}
-
-/*
- * Send up a chunk of DDM data.
- *
- * While this takes the form of a JDWP "event", it doesn't interact with
- * other debugger traffic, and can't suspend the VM, so we skip all of
- * the fun event token gymnastics.
- */
-void JdwpState::DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) {
-  uint8_t header[kJDWPHeaderLen + 8] = { 0 };
-  size_t dataLen = 0;
-
-  CHECK(iov != nullptr);
-  CHECK_GT(iov_count, 0);
-  CHECK_LT(iov_count, 10);
-
-  /*
-   * "Wrap" the contents of the iovec with a JDWP/DDMS header.  We do
-   * this by creating a new copy of the vector with space for the header.
-   */
-  std::vector<iovec> wrapiov;
-  wrapiov.push_back(iovec());
-  for (int i = 0; i < iov_count; i++) {
-    wrapiov.push_back(iov[i]);
-    dataLen += iov[i].iov_len;
-  }
-
-  SetupChunkHeader(type, dataLen, sizeof(header), header);
-
-  wrapiov[0].iov_base = header;
-  wrapiov[0].iov_len = sizeof(header);
-
-  // Try to avoid blocking GC during a send, but only safe when not using mutexes at a lower-level
-  // than mutator for lock ordering reasons.
-  Thread* self = Thread::Current();
-  bool safe_to_release_mutator_lock_over_send = !Locks::mutator_lock_->IsExclusiveHeld(self);
-  if (safe_to_release_mutator_lock_over_send) {
-    for (size_t i = 0; i < kMutatorLock; ++i) {
-      if (self->GetHeldMutex(static_cast<LockLevel>(i)) != nullptr) {
-        safe_to_release_mutator_lock_over_send = false;
-        break;
-      }
-    }
-  }
-  if (safe_to_release_mutator_lock_over_send) {
-    // Change state to waiting to allow GC, ... while we're sending.
-    ScopedThreadSuspension sts(self, kWaitingForDebuggerSend);
-    SendBufferedRequest(type, wrapiov);
-  } else {
-    // Send and possibly block GC...
-    SendBufferedRequest(type, wrapiov);
-  }
-}
-
-}  // namespace JDWP
-
-}  // namespace art
diff --git a/runtime/jdwp/jdwp_event.h b/runtime/jdwp/jdwp_event.h
deleted file mode 100644
index d269761..0000000
--- a/runtime/jdwp/jdwp_event.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Handle registration of events, and debugger event notification.
- */
-#ifndef ART_RUNTIME_JDWP_JDWP_EVENT_H_
-#define ART_RUNTIME_JDWP_JDWP_EVENT_H_
-
-#include "jdwp/jdwp.h"
-#include "jdwp/jdwp_constants.h"
-#include "jdwp/jdwp_expand_buf.h"
-
-namespace art {
-
-namespace JDWP {
-
-/*
- * Event modifiers.  A JdwpEvent may have zero or more of these.
- */
-union JdwpEventMod {
-  JdwpModKind modKind;
-  struct {
-    JdwpModKind modKind;
-    int         count;
-  } count;
-  struct {
-    JdwpModKind modKind;
-    uint32_t          exprId;
-  } conditional;
-  struct {
-    JdwpModKind modKind;
-    ObjectId    threadId;
-  } threadOnly;
-  struct {
-    JdwpModKind modKind;
-    RefTypeId   refTypeId;
-  } classOnly;
-  struct {
-    JdwpModKind modKind;
-    char*       classPattern;
-  } classMatch;
-  struct {
-    JdwpModKind modKind;
-    char*       classPattern;
-  } classExclude;
-  struct {
-    JdwpModKind modKind;
-    JdwpLocation loc;
-  } locationOnly;
-  struct {
-    JdwpModKind modKind;
-    uint8_t          caught;
-    uint8_t          uncaught;
-    RefTypeId   refTypeId;
-  } exceptionOnly;
-  struct {
-    JdwpModKind modKind;
-    RefTypeId   refTypeId;
-    FieldId     fieldId;
-  } fieldOnly;
-  struct {
-    JdwpModKind modKind;
-    ObjectId    threadId;
-    int         size;           /* JdwpStepSize */
-    int         depth;          /* JdwpStepDepth */
-  } step;
-  struct {
-    JdwpModKind modKind;
-    ObjectId    objectId;
-  } instanceOnly;
-};
-
-/*
- * One of these for every registered event.
- *
- * We over-allocate the struct to hold the modifiers.
- */
-struct JdwpEvent {
-  JdwpEvent* prev;           /* linked list */
-  JdwpEvent* next;
-
-  JdwpEventKind eventKind;      /* what kind of event is this? */
-  JdwpSuspendPolicy suspend_policy;  /* suspend all, none, or self? */
-  int modCount;       /* #of entries in mods[] */
-  uint32_t requestId;      /* serial#, reported to debugger */
-
-  JdwpEventMod mods[1];        /* MUST be last field in struct */
-};
-
-/*
- * Allocate an event structure with enough space.
- */
-JdwpEvent* EventAlloc(int numMods);
-void EventFree(JdwpEvent* pEvent);
-
-}  // namespace JDWP
-
-}  // namespace art
-
-#endif  // ART_RUNTIME_JDWP_JDWP_EVENT_H_
diff --git a/runtime/jdwp/jdwp_expand_buf.cc b/runtime/jdwp/jdwp_expand_buf.cc
deleted file mode 100644
index 4b4ca0e..0000000
--- a/runtime/jdwp/jdwp_expand_buf.cc
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Implementation of an expandable byte buffer.  Designed for serializing
- * primitive values, e.g. JDWP replies.
- */
-
-#include "jdwp/jdwp_expand_buf.h"
-
-#include <stdlib.h>
-#include <string.h>
-
-#include <android-base/logging.h>
-
-#include "jdwp/jdwp.h"
-#include "jdwp/jdwp_bits.h"
-
-namespace art {
-
-namespace JDWP {
-
-/*
- * Data structure used to track buffer use.
- */
-struct ExpandBuf {
-  uint8_t*     storage;
-  int     curLen;
-  int     maxLen;
-};
-
-#define kInitialStorage 64
-
-/*
- * Allocate a JdwpBuf and some initial storage.
- */
-ExpandBuf* expandBufAlloc() {
-  ExpandBuf* newBuf = new ExpandBuf;
-  newBuf->storage = reinterpret_cast<uint8_t*>(malloc(kInitialStorage));
-  newBuf->curLen = 0;
-  newBuf->maxLen = kInitialStorage;
-  return newBuf;
-}
-
-/*
- * Free a JdwpBuf and associated storage.
- */
-void expandBufFree(ExpandBuf* pBuf) {
-  if (pBuf == nullptr) {
-    return;
-  }
-
-  free(pBuf->storage);
-  delete pBuf;
-}
-
-/*
- * Get a pointer to the start of the buffer.
- */
-uint8_t* expandBufGetBuffer(ExpandBuf* pBuf) {
-  return pBuf->storage;
-}
-
-/*
- * Get the amount of data currently in the buffer.
- */
-size_t expandBufGetLength(ExpandBuf* pBuf) {
-  return pBuf->curLen;
-}
-
-/*
- * Ensure that the buffer has enough space to hold incoming data.  If it
- * doesn't, resize the buffer.
- */
-static void ensureSpace(ExpandBuf* pBuf, int newCount) {
-  if (pBuf->curLen + newCount <= pBuf->maxLen) {
-    return;
-  }
-
-  while (pBuf->curLen + newCount > pBuf->maxLen) {
-    pBuf->maxLen *= 2;
-  }
-
-  uint8_t* newPtr = reinterpret_cast<uint8_t*>(realloc(pBuf->storage, pBuf->maxLen));
-  if (newPtr == nullptr) {
-    LOG(FATAL) << "realloc(" << pBuf->maxLen << ") failed";
-  }
-
-  pBuf->storage = newPtr;
-}
-
-/*
- * Allocate some space in the buffer.
- */
-uint8_t* expandBufAddSpace(ExpandBuf* pBuf, int gapSize) {
-  uint8_t* gapStart;
-
-  ensureSpace(pBuf, gapSize);
-  gapStart = pBuf->storage + pBuf->curLen;
-  /* do we want to garbage-fill the gap for debugging? */
-  pBuf->curLen += gapSize;
-
-  return gapStart;
-}
-
-/*
- * Append a byte.
- */
-void expandBufAdd1(ExpandBuf* pBuf, uint8_t val) {
-  ensureSpace(pBuf, sizeof(val));
-  *(pBuf->storage + pBuf->curLen) = val;
-  pBuf->curLen++;
-}
-
-/*
- * Append two big-endian bytes.
- */
-void expandBufAdd2BE(ExpandBuf* pBuf, uint16_t val) {
-  ensureSpace(pBuf, sizeof(val));
-  Set2BE(pBuf->storage + pBuf->curLen, val);
-  pBuf->curLen += sizeof(val);
-}
-
-/*
- * Append four big-endian bytes.
- */
-void expandBufAdd4BE(ExpandBuf* pBuf, uint32_t val) {
-  ensureSpace(pBuf, sizeof(val));
-  Set4BE(pBuf->storage + pBuf->curLen, val);
-  pBuf->curLen += sizeof(val);
-}
-
-/*
- * Append eight big-endian bytes.
- */
-void expandBufAdd8BE(ExpandBuf* pBuf, uint64_t val) {
-  ensureSpace(pBuf, sizeof(val));
-  Set8BE(pBuf->storage + pBuf->curLen, val);
-  pBuf->curLen += sizeof(val);
-}
-
-static void SetUtf8String(uint8_t* buf, const char* str, size_t strLen) {
-  Set4BE(buf, strLen);
-  if (str != nullptr) {
-    memcpy(buf + sizeof(uint32_t), str, strLen);
-  }
-}
-
-/*
- * Add a UTF8 string as a 4-byte length followed by a non-nullptr-terminated
- * string.
- *
- * Because these strings are coming out of the VM, it's safe to assume that
- * they can be null-terminated (either they don't have null bytes or they
- * have stored null bytes in a multi-byte encoding).
- */
-void expandBufAddUtf8String(ExpandBuf* pBuf, const char* s) {
-  int strLen = (s != nullptr ? strlen(s) : 0);
-  ensureSpace(pBuf, sizeof(uint32_t) + strLen);
-  SetUtf8String(pBuf->storage + pBuf->curLen, s, strLen);
-  pBuf->curLen += sizeof(uint32_t) + strLen;
-}
-
-void expandBufAddUtf8String(ExpandBuf* pBuf, const std::string& s) {
-  ensureSpace(pBuf, sizeof(uint32_t) + s.size());
-  SetUtf8String(pBuf->storage + pBuf->curLen, s.data(), s.size());
-  pBuf->curLen += sizeof(uint32_t) + s.size();
-}
-
-void expandBufAddLocation(ExpandBuf* buf, const JdwpLocation& location) {
-  expandBufAdd1(buf, location.type_tag);
-  expandBufAddObjectId(buf, location.class_id);
-  expandBufAddMethodId(buf, location.method_id);
-  expandBufAdd8BE(buf, location.dex_pc);
-}
-
-}  // namespace JDWP
-
-}  // namespace art
diff --git a/runtime/jdwp/jdwp_expand_buf.h b/runtime/jdwp/jdwp_expand_buf.h
deleted file mode 100644
index 81e01e2..0000000
--- a/runtime/jdwp/jdwp_expand_buf.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * Expanding byte buffer, with primitives for appending basic data types.
- */
-#ifndef ART_RUNTIME_JDWP_JDWP_EXPAND_BUF_H_
-#define ART_RUNTIME_JDWP_JDWP_EXPAND_BUF_H_
-
-#include <string>
-
-#include <stddef.h>
-#include <stdint.h>
-
-namespace art {
-
-namespace JDWP {
-
-struct ExpandBuf;   /* private */
-struct JdwpLocation;
-
-/* create a new struct */
-ExpandBuf* expandBufAlloc();
-/* free storage */
-void expandBufFree(ExpandBuf* pBuf);
-
-/*
- * Accessors.  The buffer pointer and length will only be valid until more
- * data is added.
- */
-uint8_t* expandBufGetBuffer(ExpandBuf* pBuf);
-size_t expandBufGetLength(ExpandBuf* pBuf);
-
-/*
- * The "add" operations allocate additional storage and append the data.
- *
- * There are no "get" operations included with this "class", other than
- * GetBuffer().  If you want to get or set data from a position other
- * than the end, get a pointer to the buffer and use the inline functions
- * defined elsewhere.
- *
- * expandBufAddSpace() returns a pointer to the *start* of the region
- * added.
- */
-uint8_t* expandBufAddSpace(ExpandBuf* pBuf, int gapSize);
-void expandBufAdd1(ExpandBuf* pBuf, uint8_t val);
-void expandBufAdd2BE(ExpandBuf* pBuf, uint16_t val);
-void expandBufAdd4BE(ExpandBuf* pBuf, uint32_t val);
-void expandBufAdd8BE(ExpandBuf* pBuf, uint64_t val);
-void expandBufAddUtf8String(ExpandBuf* pBuf, const char* s);
-void expandBufAddUtf8String(ExpandBuf* pBuf, const std::string& s);
-void expandBufAddLocation(ExpandBuf* pReply, const JdwpLocation& location);
-
-}  // namespace JDWP
-
-}  // namespace art
-
-#endif  // ART_RUNTIME_JDWP_JDWP_EXPAND_BUF_H_
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
deleted file mode 100644
index 37365ff..0000000
--- a/runtime/jdwp/jdwp_handler.cc
+++ /dev/null
@@ -1,1714 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <memory>
-#include <string>
-
-#include "android-base/stringprintf.h"
-
-#include "base/atomic.h"
-#include "base/hex_dump.h"
-#include "base/logging.h"  // For VLOG.
-#include "base/macros.h"
-#include "debugger.h"
-#include "dex/utf.h"
-#include "jdwp/jdwp_constants.h"
-#include "jdwp/jdwp_event.h"
-#include "jdwp/jdwp_expand_buf.h"
-#include "jdwp/jdwp_priv.h"
-#include "runtime.h"
-#include "scoped_thread_state_change-inl.h"
-#include "thread-current-inl.h"
-
-namespace art {
-
-namespace JDWP {
-
-using android::base::StringPrintf;
-
-std::string DescribeField(const FieldId& field_id) {
-  return StringPrintf("%#" PRIx64 " (%s)", field_id, Dbg::GetFieldName(field_id).c_str());
-}
-
-std::string DescribeMethod(const MethodId& method_id) {
-  return StringPrintf("%#" PRIx64 " (%s)", method_id, Dbg::GetMethodName(method_id).c_str());
-}
-
-std::string DescribeRefTypeId(const RefTypeId& ref_type_id) {
-  std::string signature("unknown");
-  Dbg::GetSignature(ref_type_id, &signature);
-  return StringPrintf("%#" PRIx64 " (%s)", ref_type_id, signature.c_str());
-}
-
-static JdwpError WriteTaggedObject(ExpandBuf* reply, ObjectId object_id)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  uint8_t tag;
-  JdwpError rc = Dbg::GetObjectTag(object_id, &tag);
-  if (rc == ERR_NONE) {
-    expandBufAdd1(reply, tag);
-    expandBufAddObjectId(reply, object_id);
-  }
-  return rc;
-}
-
-static JdwpError WriteTaggedObjectList(ExpandBuf* reply, const std::vector<ObjectId>& objects)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  expandBufAdd4BE(reply, objects.size());
-  for (size_t i = 0; i < objects.size(); ++i) {
-    JdwpError rc = WriteTaggedObject(reply, objects[i]);
-    if (rc != ERR_NONE) {
-      return rc;
-    }
-  }
-  return ERR_NONE;
-}
-
-/*
- * Common code for *_InvokeMethod requests.
- *
- * If "is_constructor" is set, this returns "object_id" rather than the
- * expected-to-be-void return value of the called function.
- */
-static JdwpError RequestInvoke(JdwpState*, Request* request,
-                               ObjectId thread_id, ObjectId object_id,
-                               RefTypeId class_id, MethodId method_id, bool is_constructor)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  CHECK(!is_constructor || object_id != 0);
-
-  int32_t arg_count = request->ReadSigned32("argument count");
-
-  VLOG(jdwp) << StringPrintf("    --> thread_id=%#" PRIx64 " object_id=%#" PRIx64,
-                             thread_id, object_id);
-  VLOG(jdwp) << StringPrintf("        class_id=%#" PRIx64 " method_id=%#" PRIx64 " %s.%s",
-                             class_id, method_id, Dbg::GetClassName(class_id).c_str(),
-                             Dbg::GetMethodName(method_id).c_str());
-  VLOG(jdwp) << StringPrintf("        %d args:", arg_count);
-
-  std::unique_ptr<JdwpTag[]> argTypes(arg_count > 0 ? new JdwpTag[arg_count] : nullptr);
-  std::unique_ptr<uint64_t[]> argValues(arg_count > 0 ? new uint64_t[arg_count] : nullptr);
-  for (int32_t i = 0; i < arg_count; ++i) {
-    argTypes[i] = request->ReadTag();
-    size_t width = Dbg::GetTagWidth(argTypes[i]);
-    argValues[i] = request->ReadValue(width);
-    VLOG(jdwp) << "          " << argTypes[i] << StringPrintf("(%zd): %#" PRIx64, width,
-                                                              argValues[i]);
-  }
-
-  uint32_t options = request->ReadUnsigned32("InvokeOptions bit flags");
-  VLOG(jdwp) << StringPrintf("        options=0x%04x%s%s", options,
-                             (options & INVOKE_SINGLE_THREADED) ? " (SINGLE_THREADED)" : "",
-                             (options & INVOKE_NONVIRTUAL) ? " (NONVIRTUAL)" : "");
-
-  JDWP::JdwpError error =  Dbg::PrepareInvokeMethod(request->GetId(), thread_id, object_id,
-                                                    class_id, method_id, arg_count,
-                                                    argValues.get(), argTypes.get(), options);
-  if (error == JDWP::ERR_NONE) {
-    // We successfully requested the invoke. The event thread now owns the arguments array in its
-    // DebugInvokeReq mailbox.
-    argValues.release();
-  }
-  return error;
-}
-
-static JdwpError VM_Version(JdwpState*, Request*, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  // Text information on runtime version.
-  std::string version(StringPrintf("Android Runtime %s", Runtime::Current()->GetVersion()));
-  expandBufAddUtf8String(pReply, version);
-
-  // JDWP version numbers, major and minor.
-  expandBufAdd4BE(pReply, 1);
-  expandBufAdd4BE(pReply, 6);
-
-  // "java.version".
-  expandBufAddUtf8String(pReply, "1.6.0");
-
-  // "java.vm.name".
-  expandBufAddUtf8String(pReply, "Dalvik");
-
-  return ERR_NONE;
-}
-
-/*
- * Given a class JNI signature (e.g. "Ljava/lang/Error;"), return the
- * referenceTypeID.  We need to send back more than one if the class has
- * been loaded by multiple class loaders.
- */
-static JdwpError VM_ClassesBySignature(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  std::string classDescriptor(request->ReadUtf8String());
-
-  std::vector<RefTypeId> ids;
-  Dbg::FindLoadedClassBySignature(classDescriptor.c_str(), &ids);
-
-  expandBufAdd4BE(pReply, ids.size());
-
-  for (size_t i = 0; i < ids.size(); ++i) {
-    // Get class vs. interface and status flags.
-    JDWP::JdwpTypeTag type_tag;
-    uint32_t class_status;
-    JDWP::JdwpError status = Dbg::GetClassInfo(ids[i], &type_tag, &class_status, nullptr);
-    if (status != ERR_NONE) {
-      return status;
-    }
-
-    expandBufAdd1(pReply, type_tag);
-    expandBufAddRefTypeId(pReply, ids[i]);
-    expandBufAdd4BE(pReply, class_status);
-  }
-
-  return ERR_NONE;
-}
-
-/*
- * Handle request for the thread IDs of all running threads.
- *
- * We exclude ourselves from the list, because we don't allow ourselves
- * to be suspended, and that violates some JDWP expectations.
- */
-static JdwpError VM_AllThreads(JdwpState*, Request*, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  std::vector<ObjectId> thread_ids;
-  Dbg::GetThreads(nullptr /* all thread groups */, &thread_ids);
-
-  expandBufAdd4BE(pReply, thread_ids.size());
-  for (uint32_t i = 0; i < thread_ids.size(); ++i) {
-    expandBufAddObjectId(pReply, thread_ids[i]);
-  }
-
-  return ERR_NONE;
-}
-
-/*
- * List all thread groups that do not have a parent.
- */
-static JdwpError VM_TopLevelThreadGroups(JdwpState*, Request*, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  /*
-   * TODO: maintain a list of parentless thread groups in the VM.
-   *
-   * For now, just return "system".  Application threads are created
-   * in "main", which is a child of "system".
-   */
-  uint32_t groups = 1;
-  expandBufAdd4BE(pReply, groups);
-  ObjectId thread_group_id = Dbg::GetSystemThreadGroupId();
-  expandBufAddObjectId(pReply, thread_group_id);
-
-  return ERR_NONE;
-}
-
-/*
- * Respond with the sizes of the basic debugger types.
- */
-static JdwpError VM_IDSizes(JdwpState*, Request*, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  expandBufAdd4BE(pReply, sizeof(FieldId));
-  expandBufAdd4BE(pReply, sizeof(MethodId));
-  expandBufAdd4BE(pReply, sizeof(ObjectId));
-  expandBufAdd4BE(pReply, sizeof(RefTypeId));
-  expandBufAdd4BE(pReply, sizeof(FrameId));
-  return ERR_NONE;
-}
-
-static JdwpError VM_Dispose(JdwpState*, Request*, ExpandBuf*)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  Dbg::Dispose();
-  return ERR_NONE;
-}
-
-/*
- * Suspend the execution of the application running in the VM (i.e. suspend
- * all threads).
- *
- * This needs to increment the "suspend count" on all threads.
- */
-static JdwpError VM_Suspend(JdwpState*, Request*, ExpandBuf*)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  Thread* self = Thread::Current();
-  ScopedThreadSuspension sts(self, kWaitingForDebuggerSuspension);
-  Dbg::SuspendVM();
-  return ERR_NONE;
-}
-
-/*
- * Resume execution.  Decrements the "suspend count" of all threads.
- */
-static JdwpError VM_Resume(JdwpState*, Request*, ExpandBuf*)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  Dbg::ResumeVM();
-  return ERR_NONE;
-}
-
-static JdwpError VM_Exit(JdwpState* state, Request* request, ExpandBuf*)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  uint32_t exit_status = request->ReadUnsigned32("exit_status");
-  state->ExitAfterReplying(exit_status);
-  return ERR_NONE;
-}
-
-/*
- * Create a new string in the VM and return its ID.
- *
- * (Ctrl-Shift-I in Eclipse on an array of objects causes it to create the
- * string "java.util.Arrays".)
- */
-static JdwpError VM_CreateString(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  std::string str(request->ReadUtf8String());
-  ObjectId string_id;
-  JdwpError status = Dbg::CreateString(str, &string_id);
-  if (status != ERR_NONE) {
-    return status;
-  }
-  expandBufAddObjectId(pReply, string_id);
-  return ERR_NONE;
-}
-
-static JdwpError VM_ClassPaths(JdwpState*, Request*, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  expandBufAddUtf8String(pReply, "/");
-
-  std::vector<std::string> class_path;
-  Split(Runtime::Current()->GetClassPathString(), ':', &class_path);
-  expandBufAdd4BE(pReply, class_path.size());
-  for (const std::string& str : class_path) {
-    expandBufAddUtf8String(pReply, str);
-  }
-
-  std::vector<std::string> boot_class_path = Runtime::Current()->GetBootClassPath();
-  expandBufAdd4BE(pReply, boot_class_path.size());
-  for (const std::string& str : boot_class_path) {
-    expandBufAddUtf8String(pReply, str);
-  }
-
-  return ERR_NONE;
-}
-
-static JdwpError VM_DisposeObjects(JdwpState*, Request* request, ExpandBuf*)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  size_t object_count = request->ReadUnsigned32("object_count");
-  for (size_t i = 0; i < object_count; ++i) {
-    ObjectId object_id = request->ReadObjectId();
-    uint32_t reference_count = request->ReadUnsigned32("reference_count");
-    Dbg::DisposeObject(object_id, reference_count);
-  }
-  return ERR_NONE;
-}
-
-static JdwpError VM_Capabilities(JdwpState*, Request*, ExpandBuf* reply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  expandBufAdd1(reply, true);    // canWatchFieldModification
-  expandBufAdd1(reply, true);    // canWatchFieldAccess
-  expandBufAdd1(reply, true);    // canGetBytecodes
-  expandBufAdd1(reply, true);    // canGetSyntheticAttribute
-  expandBufAdd1(reply, true);    // canGetOwnedMonitorInfo
-  expandBufAdd1(reply, true);    // canGetCurrentContendedMonitor
-  expandBufAdd1(reply, true);    // canGetMonitorInfo
-  return ERR_NONE;
-}
-
-static JdwpError VM_CapabilitiesNew(JdwpState*, Request* request, ExpandBuf* reply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  // The first few capabilities are the same as those reported by the older call.
-  VM_Capabilities(nullptr, request, reply);
-
-  expandBufAdd1(reply, false);   // canRedefineClasses
-  expandBufAdd1(reply, false);   // canAddMethod
-  expandBufAdd1(reply, false);   // canUnrestrictedlyRedefineClasses
-  expandBufAdd1(reply, false);   // canPopFrames
-  expandBufAdd1(reply, true);    // canUseInstanceFilters
-  expandBufAdd1(reply, true);    // canGetSourceDebugExtension
-  expandBufAdd1(reply, false);   // canRequestVMDeathEvent
-  expandBufAdd1(reply, false);   // canSetDefaultStratum
-  expandBufAdd1(reply, true);    // 1.6: canGetInstanceInfo
-  expandBufAdd1(reply, false);   // 1.6: canRequestMonitorEvents
-  expandBufAdd1(reply, true);    // 1.6: canGetMonitorFrameInfo
-  expandBufAdd1(reply, false);   // 1.6: canUseSourceNameFilters
-  expandBufAdd1(reply, false);   // 1.6: canGetConstantPool
-  expandBufAdd1(reply, false);   // 1.6: canForceEarlyReturn
-
-  // Fill in reserved22 through reserved32; note count started at 1.
-  for (size_t i = 22; i <= 32; ++i) {
-    expandBufAdd1(reply, false);
-  }
-  return ERR_NONE;
-}
-
-static JdwpError VM_AllClassesImpl(ExpandBuf* pReply, bool descriptor_and_status, bool generic)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  std::vector<JDWP::RefTypeId> classes;
-  Dbg::GetClassList(&classes);
-
-  expandBufAdd4BE(pReply, classes.size());
-
-  for (size_t i = 0; i < classes.size(); ++i) {
-    static const char genericSignature[1] = "";
-    JDWP::JdwpTypeTag type_tag;
-    std::string descriptor;
-    uint32_t class_status;
-    JDWP::JdwpError status = Dbg::GetClassInfo(classes[i], &type_tag, &class_status, &descriptor);
-    if (status != ERR_NONE) {
-      return status;
-    }
-
-    expandBufAdd1(pReply, type_tag);
-    expandBufAddRefTypeId(pReply, classes[i]);
-    if (descriptor_and_status) {
-      expandBufAddUtf8String(pReply, descriptor);
-      if (generic) {
-        expandBufAddUtf8String(pReply, genericSignature);
-      }
-      expandBufAdd4BE(pReply, class_status);
-    }
-  }
-
-  return ERR_NONE;
-}
-
-static JdwpError VM_AllClasses(JdwpState*, Request*, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  return VM_AllClassesImpl(pReply, true, false);
-}
-
-static JdwpError VM_AllClassesWithGeneric(JdwpState*, Request*, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  return VM_AllClassesImpl(pReply, true, true);
-}
-
-static JdwpError VM_InstanceCounts(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  int32_t class_count = request->ReadSigned32("class count");
-  if (class_count < 0) {
-    return ERR_ILLEGAL_ARGUMENT;
-  }
-  std::vector<RefTypeId> class_ids;
-  for (int32_t i = 0; i < class_count; ++i) {
-    class_ids.push_back(request->ReadRefTypeId());
-  }
-
-  std::vector<uint64_t> counts;
-  JdwpError rc = Dbg::GetInstanceCounts(class_ids, &counts);
-  if (rc != ERR_NONE) {
-    return rc;
-  }
-
-  expandBufAdd4BE(pReply, counts.size());
-  for (size_t i = 0; i < counts.size(); ++i) {
-    expandBufAdd8BE(pReply, counts[i]);
-  }
-  return ERR_NONE;
-}
-
-static JdwpError RT_Modifiers(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  RefTypeId refTypeId = request->ReadRefTypeId();
-  return Dbg::GetModifiers(refTypeId, pReply);
-}
-
-/*
- * Get values from static fields in a reference type.
- */
-static JdwpError RT_GetValues(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  RefTypeId refTypeId = request->ReadRefTypeId();
-  int32_t field_count = request->ReadSigned32("field count");
-  expandBufAdd4BE(pReply, field_count);
-  for (int32_t i = 0; i < field_count; ++i) {
-    FieldId fieldId = request->ReadFieldId();
-    JdwpError status = Dbg::GetStaticFieldValue(refTypeId, fieldId, pReply);
-    if (status != ERR_NONE) {
-      return status;
-    }
-  }
-  return ERR_NONE;
-}
-
-/*
- * Get the name of the source file in which a reference type was declared.
- */
-static JdwpError RT_SourceFile(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  RefTypeId refTypeId = request->ReadRefTypeId();
-  std::string source_file;
-  JdwpError status = Dbg::GetSourceFile(refTypeId, &source_file);
-  if (status != ERR_NONE) {
-    return status;
-  }
-  expandBufAddUtf8String(pReply, source_file);
-  return ERR_NONE;
-}
-
-/*
- * Return the current status of the reference type.
- */
-static JdwpError RT_Status(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  RefTypeId refTypeId = request->ReadRefTypeId();
-  JDWP::JdwpTypeTag type_tag;
-  uint32_t class_status;
-  JDWP::JdwpError status = Dbg::GetClassInfo(refTypeId, &type_tag, &class_status, nullptr);
-  if (status != ERR_NONE) {
-    return status;
-  }
-  expandBufAdd4BE(pReply, class_status);
-  return ERR_NONE;
-}
-
-/*
- * Return interfaces implemented directly by this class.
- */
-static JdwpError RT_Interfaces(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  RefTypeId refTypeId = request->ReadRefTypeId();
-  return Dbg::OutputDeclaredInterfaces(refTypeId, pReply);
-}
-
-/*
- * Return the class object corresponding to this type.
- */
-static JdwpError RT_ClassObject(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  RefTypeId refTypeId = request->ReadRefTypeId();
-  ObjectId class_object_id;
-  JdwpError status = Dbg::GetClassObject(refTypeId, &class_object_id);
-  if (status != ERR_NONE) {
-    return status;
-  }
-  VLOG(jdwp) << StringPrintf("    --> ObjectId %#" PRIx64, class_object_id);
-  expandBufAddObjectId(pReply, class_object_id);
-  return ERR_NONE;
-}
-
-/*
- * Returns the value of the SourceDebugExtension attribute.
- */
-static JdwpError RT_SourceDebugExtension(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  /* referenceTypeId in, string out */
-  RefTypeId refTypeId = request->ReadRefTypeId();
-  std::string extension_data;
-  JdwpError status = Dbg::GetSourceDebugExtension(refTypeId, &extension_data);
-  if (status != ERR_NONE) {
-    return status;
-  }
-  expandBufAddUtf8String(pReply, extension_data);
-  return ERR_NONE;
-}
-
-static JdwpError RT_Signature(JdwpState*, Request* request, ExpandBuf* pReply, bool with_generic)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  RefTypeId refTypeId = request->ReadRefTypeId();
-
-  std::string signature;
-  JdwpError status = Dbg::GetSignature(refTypeId, &signature);
-  if (status != ERR_NONE) {
-    return status;
-  }
-  expandBufAddUtf8String(pReply, signature);
-  if (with_generic) {
-    expandBufAddUtf8String(pReply, "");
-  }
-  return ERR_NONE;
-}
-
-static JdwpError RT_Signature(JdwpState* state, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  return RT_Signature(state, request, pReply, false);
-}
-
-static JdwpError RT_SignatureWithGeneric(JdwpState* state, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  return RT_Signature(state, request, pReply, true);
-}
-
-/*
- * Return the instance of java.lang.ClassLoader that loaded the specified
- * reference type, or null if it was loaded by the system loader.
- */
-static JdwpError RT_ClassLoader(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  RefTypeId refTypeId = request->ReadRefTypeId();
-  return Dbg::GetClassLoader(refTypeId, pReply);
-}
-
-/*
- * Given a referenceTypeId, return a block of stuff that describes the
- * fields declared by a class.
- */
-static JdwpError RT_FieldsWithGeneric(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  RefTypeId refTypeId = request->ReadRefTypeId();
-  return Dbg::OutputDeclaredFields(refTypeId, true, pReply);
-}
-
-// Obsolete equivalent of FieldsWithGeneric, without the generic type information.
-static JdwpError RT_Fields(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  RefTypeId refTypeId = request->ReadRefTypeId();
-  return Dbg::OutputDeclaredFields(refTypeId, false, pReply);
-}
-
-/*
- * Given a referenceTypeID, return a block of goodies describing the
- * methods declared by a class.
- */
-static JdwpError RT_MethodsWithGeneric(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  RefTypeId refTypeId = request->ReadRefTypeId();
-  return Dbg::OutputDeclaredMethods(refTypeId, true, pReply);
-}
-
-// Obsolete equivalent of MethodsWithGeneric, without the generic type information.
-static JdwpError RT_Methods(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  RefTypeId refTypeId = request->ReadRefTypeId();
-  return Dbg::OutputDeclaredMethods(refTypeId, false, pReply);
-}
-
-static JdwpError RT_Instances(JdwpState*, Request* request, ExpandBuf* reply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  RefTypeId class_id = request->ReadRefTypeId();
-  int32_t max_count = request->ReadSigned32("max count");
-  if (max_count < 0) {
-    return ERR_ILLEGAL_ARGUMENT;
-  }
-
-  std::vector<ObjectId> instances;
-  JdwpError rc = Dbg::GetInstances(class_id, max_count, &instances);
-  if (rc != ERR_NONE) {
-    return rc;
-  }
-
-  return WriteTaggedObjectList(reply, instances);
-}
-
-/*
- * Return the immediate superclass of a class.
- */
-static JdwpError CT_Superclass(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  RefTypeId class_id = request->ReadRefTypeId();
-  RefTypeId superClassId;
-  JdwpError status = Dbg::GetSuperclass(class_id, &superClassId);
-  if (status != ERR_NONE) {
-    return status;
-  }
-  expandBufAddRefTypeId(pReply, superClassId);
-  return ERR_NONE;
-}
-
-/*
- * Set static class values.
- */
-static JdwpError CT_SetValues(JdwpState* , Request* request, ExpandBuf*)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  RefTypeId class_id = request->ReadRefTypeId();
-  int32_t values_count = request->ReadSigned32("values count");
-
-  UNUSED(class_id);
-
-  for (int32_t i = 0; i < values_count; ++i) {
-    FieldId fieldId = request->ReadFieldId();
-    JDWP::JdwpTag fieldTag = Dbg::GetStaticFieldBasicTag(fieldId);
-    size_t width = Dbg::GetTagWidth(fieldTag);
-    uint64_t value = request->ReadValue(width);
-
-    VLOG(jdwp) << "    --> field=" << fieldId << " tag=" << fieldTag << " --> " << value;
-    JdwpError status = Dbg::SetStaticFieldValue(fieldId, value, width);
-    if (status != ERR_NONE) {
-      return status;
-    }
-  }
-
-  return ERR_NONE;
-}
-
-/*
- * Invoke a static method.
- *
- * Example: Eclipse sometimes uses java/lang/Class.forName(String s) on
- * values in the "variables" display.
- */
-static JdwpError CT_InvokeMethod(JdwpState* state, Request* request,
-                                 ExpandBuf* pReply ATTRIBUTE_UNUSED)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  RefTypeId class_id = request->ReadRefTypeId();
-  ObjectId thread_id = request->ReadThreadId();
-  MethodId method_id = request->ReadMethodId();
-
-  return RequestInvoke(state, request, thread_id, 0, class_id, method_id, false);
-}
-
-/*
- * Create a new object of the requested type, and invoke the specified
- * constructor.
- *
- * Example: in IntelliJ, create a watch on "new String(myByteArray)" to
- * see the contents of a byte[] as a string.
- */
-static JdwpError CT_NewInstance(JdwpState* state, Request* request,
-                                ExpandBuf* pReply ATTRIBUTE_UNUSED)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  RefTypeId class_id = request->ReadRefTypeId();
-  ObjectId thread_id = request->ReadThreadId();
-  MethodId method_id = request->ReadMethodId();
-
-  ObjectId object_id;
-  JdwpError status = Dbg::CreateObject(class_id, &object_id);
-  if (status != ERR_NONE) {
-    return status;
-  }
-  return RequestInvoke(state, request, thread_id, object_id, class_id, method_id, true);
-}
-
-/*
- * Create a new array object of the requested type and length.
- */
-static JdwpError AT_newInstance(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  RefTypeId arrayTypeId = request->ReadRefTypeId();
-  int32_t length = request->ReadSigned32("length");
-
-  ObjectId object_id;
-  JdwpError status = Dbg::CreateArrayObject(arrayTypeId, length, &object_id);
-  if (status != ERR_NONE) {
-    return status;
-  }
-  expandBufAdd1(pReply, JT_ARRAY);
-  expandBufAddObjectId(pReply, object_id);
-  return ERR_NONE;
-}
-
-/*
- * Invoke a static method on an interface.
- */
-static JdwpError IT_InvokeMethod(JdwpState* state, Request* request,
-                                 ExpandBuf* pReply ATTRIBUTE_UNUSED)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  RefTypeId class_id = request->ReadRefTypeId();
-  ObjectId thread_id = request->ReadThreadId();
-  MethodId method_id = request->ReadMethodId();
-
-  return RequestInvoke(state, request, thread_id, 0, class_id, method_id, false);
-}
-
-/*
- * Return line number information for the method, if present.
- */
-static JdwpError M_LineTable(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  RefTypeId refTypeId = request->ReadRefTypeId();
-  MethodId method_id = request->ReadMethodId();
-
-  Dbg::OutputLineTable(refTypeId, method_id, pReply);
-
-  return ERR_NONE;
-}
-
-static JdwpError M_VariableTable(JdwpState*, Request* request, ExpandBuf* pReply,
-                                 bool generic)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  RefTypeId class_id = request->ReadRefTypeId();
-  MethodId method_id = request->ReadMethodId();
-
-  // We could return ERR_ABSENT_INFORMATION here if the DEX file was built without local variable
-  // information. That will cause Eclipse to make a best-effort attempt at displaying local
-  // variables anonymously. However, the attempt isn't very good, so we're probably better off just
-  // not showing anything.
-  Dbg::OutputVariableTable(class_id, method_id, generic, pReply);
-  return ERR_NONE;
-}
-
-static JdwpError M_VariableTable(JdwpState* state, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  return M_VariableTable(state, request, pReply, false);
-}
-
-static JdwpError M_VariableTableWithGeneric(JdwpState* state, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  return M_VariableTable(state, request, pReply, true);
-}
-
-static JdwpError M_Bytecodes(JdwpState*, Request* request, ExpandBuf* reply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  RefTypeId class_id = request->ReadRefTypeId();
-  MethodId method_id = request->ReadMethodId();
-
-  std::vector<uint8_t> bytecodes;
-  JdwpError rc = Dbg::GetBytecodes(class_id, method_id, &bytecodes);
-  if (rc != ERR_NONE) {
-    return rc;
-  }
-
-  expandBufAdd4BE(reply, bytecodes.size());
-  for (size_t i = 0; i < bytecodes.size(); ++i) {
-    expandBufAdd1(reply, bytecodes[i]);
-  }
-
-  return ERR_NONE;
-}
-
-static JdwpError M_IsObsolete(JdwpState*, Request* request, ExpandBuf* reply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  request->ReadRefTypeId();  // unused reference type ID
-  MethodId id = request->ReadMethodId();
-  expandBufAdd1(reply, Dbg::IsMethodObsolete(id));
-  return ERR_NONE;
-}
-
-/*
- * Given an object reference, return the runtime type of the object
- * (class or array).
- *
- * This can get called on different things, e.g. thread_id gets
- * passed in here.
- */
-static JdwpError OR_ReferenceType(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId object_id = request->ReadObjectId();
-  return Dbg::GetReferenceType(object_id, pReply);
-}
-
-/*
- * Get values from the fields of an object.
- */
-static JdwpError OR_GetValues(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId object_id = request->ReadObjectId();
-  int32_t field_count = request->ReadSigned32("field count");
-
-  expandBufAdd4BE(pReply, field_count);
-  for (int32_t i = 0; i < field_count; ++i) {
-    FieldId fieldId = request->ReadFieldId();
-    JdwpError status = Dbg::GetFieldValue(object_id, fieldId, pReply);
-    if (status != ERR_NONE) {
-      return status;
-    }
-  }
-
-  return ERR_NONE;
-}
-
-/*
- * Set values in the fields of an object.
- */
-static JdwpError OR_SetValues(JdwpState*, Request* request, ExpandBuf*)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId object_id = request->ReadObjectId();
-  int32_t field_count = request->ReadSigned32("field count");
-
-  for (int32_t i = 0; i < field_count; ++i) {
-    FieldId fieldId = request->ReadFieldId();
-
-    JDWP::JdwpTag fieldTag = Dbg::GetFieldBasicTag(fieldId);
-    size_t width = Dbg::GetTagWidth(fieldTag);
-    uint64_t value = request->ReadValue(width);
-
-    VLOG(jdwp) << "    --> fieldId=" << fieldId << " tag=" << fieldTag << "(" << width << ") value=" << value;
-    JdwpError status = Dbg::SetFieldValue(object_id, fieldId, value, width);
-    if (status != ERR_NONE) {
-      return status;
-    }
-  }
-
-  return ERR_NONE;
-}
-
-static JdwpError OR_MonitorInfo(JdwpState*, Request* request, ExpandBuf* reply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId object_id = request->ReadObjectId();
-  return Dbg::GetMonitorInfo(object_id, reply);
-}
-
-/*
- * Invoke an instance method.  The invocation must occur in the specified
- * thread, which must have been suspended by an event.
- *
- * The call is synchronous.  All threads in the VM are resumed, unless the
- * SINGLE_THREADED flag is set.
- *
- * If you ask Eclipse to "inspect" an object (or ask JDB to "print" an
- * object), it will try to invoke the object's toString() function.  This
- * feature becomes crucial when examining ArrayLists with Eclipse.
- */
-static JdwpError OR_InvokeMethod(JdwpState* state, Request* request,
-                                 ExpandBuf* pReply ATTRIBUTE_UNUSED)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId object_id = request->ReadObjectId();
-  ObjectId thread_id = request->ReadThreadId();
-  RefTypeId class_id = request->ReadRefTypeId();
-  MethodId method_id = request->ReadMethodId();
-
-  return RequestInvoke(state, request, thread_id, object_id, class_id, method_id, false);
-}
-
-static JdwpError OR_DisableCollection(JdwpState*, Request* request, ExpandBuf*)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId object_id = request->ReadObjectId();
-  return Dbg::DisableCollection(object_id);
-}
-
-static JdwpError OR_EnableCollection(JdwpState*, Request* request, ExpandBuf*)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId object_id = request->ReadObjectId();
-  return Dbg::EnableCollection(object_id);
-}
-
-static JdwpError OR_IsCollected(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId object_id = request->ReadObjectId();
-  bool is_collected;
-  JdwpError rc = Dbg::IsCollected(object_id, &is_collected);
-  expandBufAdd1(pReply, is_collected ? 1 : 0);
-  return rc;
-}
-
-static JdwpError OR_ReferringObjects(JdwpState*, Request* request, ExpandBuf* reply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId object_id = request->ReadObjectId();
-  int32_t max_count = request->ReadSigned32("max count");
-  if (max_count < 0) {
-    return ERR_ILLEGAL_ARGUMENT;
-  }
-
-  std::vector<ObjectId> referring_objects;
-  JdwpError rc = Dbg::GetReferringObjects(object_id, max_count, &referring_objects);
-  if (rc != ERR_NONE) {
-    return rc;
-  }
-
-  return WriteTaggedObjectList(reply, referring_objects);
-}
-
-/*
- * Return the string value in a string object.
- */
-static JdwpError SR_Value(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId stringObject = request->ReadObjectId();
-  std::string str;
-  JDWP::JdwpError error = Dbg::StringToUtf8(stringObject, &str);
-  if (error != JDWP::ERR_NONE) {
-    return error;
-  }
-
-  VLOG(jdwp) << StringPrintf("    --> %s", PrintableString(str.c_str()).c_str());
-
-  expandBufAddUtf8String(pReply, str);
-
-  return ERR_NONE;
-}
-
-/*
- * Return a thread's name.
- */
-static JdwpError TR_Name(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId thread_id = request->ReadThreadId();
-
-  std::string name;
-  JdwpError error = Dbg::GetThreadName(thread_id, &name);
-  if (error != ERR_NONE) {
-    return error;
-  }
-  VLOG(jdwp) << StringPrintf("  Name of thread %#" PRIx64 " is \"%s\"", thread_id, name.c_str());
-  expandBufAddUtf8String(pReply, name);
-
-  return ERR_NONE;
-}
-
-/*
- * Suspend the specified thread.
- *
- * It's supposed to remain suspended even if interpreted code wants to
- * resume it; only the JDI is allowed to resume it.
- */
-static JdwpError TR_Suspend(JdwpState*, Request* request, ExpandBuf*)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId thread_id = request->ReadThreadId();
-
-  if (thread_id == Dbg::GetThreadSelfId()) {
-    LOG(INFO) << "  Warning: ignoring request to suspend self";
-    return ERR_THREAD_NOT_SUSPENDED;
-  }
-
-  Thread* self = Thread::Current();
-  ScopedThreadSuspension sts(self, kWaitingForDebuggerSend);
-  JdwpError result = Dbg::SuspendThread(thread_id);
-  return result;
-}
-
-/*
- * Resume the specified thread.
- */
-static JdwpError TR_Resume(JdwpState*, Request* request, ExpandBuf*)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId thread_id = request->ReadThreadId();
-
-  if (thread_id == Dbg::GetThreadSelfId()) {
-    LOG(INFO) << "  Warning: ignoring request to resume self";
-    return ERR_NONE;
-  }
-
-  Dbg::ResumeThread(thread_id);
-  return ERR_NONE;
-}
-
-/*
- * Return status of specified thread.
- */
-static JdwpError TR_Status(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId thread_id = request->ReadThreadId();
-
-  JDWP::JdwpThreadStatus threadStatus;
-  JDWP::JdwpSuspendStatus suspendStatus;
-  JdwpError error = Dbg::GetThreadStatus(thread_id, &threadStatus, &suspendStatus);
-  if (error != ERR_NONE) {
-    return error;
-  }
-
-  VLOG(jdwp) << "    --> " << threadStatus << ", " << suspendStatus;
-
-  expandBufAdd4BE(pReply, threadStatus);
-  expandBufAdd4BE(pReply, suspendStatus);
-
-  return ERR_NONE;
-}
-
-/*
- * Return the thread group that the specified thread is a member of.
- */
-static JdwpError TR_ThreadGroup(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId thread_id = request->ReadThreadId();
-  return Dbg::GetThreadGroup(thread_id, pReply);
-}
-
-/*
- * Return the current call stack of a suspended thread.
- *
- * If the thread isn't suspended, the error code isn't defined, but should
- * be THREAD_NOT_SUSPENDED.
- */
-static JdwpError TR_Frames(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId thread_id = request->ReadThreadId();
-  uint32_t start_frame = request->ReadUnsigned32("start frame");
-  uint32_t length = request->ReadUnsigned32("length");
-
-  size_t actual_frame_count;
-  JdwpError error = Dbg::GetThreadFrameCount(thread_id, &actual_frame_count);
-  if (error != ERR_NONE) {
-    return error;
-  }
-
-  if (actual_frame_count <= 0) {
-    return ERR_THREAD_NOT_SUSPENDED;  // 0 means no managed frames (which means "in native").
-  }
-
-  if (start_frame > actual_frame_count) {
-    return ERR_INVALID_INDEX;
-  }
-  if (length == static_cast<uint32_t>(-1)) {
-    length = actual_frame_count - start_frame;
-  }
-  if (start_frame + length > actual_frame_count) {
-    return ERR_INVALID_LENGTH;
-  }
-
-  return Dbg::GetThreadFrames(thread_id, start_frame, length, pReply);
-}
-
-/*
- * Returns the #of frames on the specified thread, which must be suspended.
- */
-static JdwpError TR_FrameCount(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId thread_id = request->ReadThreadId();
-
-  size_t frame_count;
-  JdwpError rc = Dbg::GetThreadFrameCount(thread_id, &frame_count);
-  if (rc != ERR_NONE) {
-    return rc;
-  }
-  expandBufAdd4BE(pReply, static_cast<uint32_t>(frame_count));
-
-  return ERR_NONE;
-}
-
-static JdwpError TR_OwnedMonitors(Request* request, ExpandBuf* reply, bool with_stack_depths)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId thread_id = request->ReadThreadId();
-
-  std::vector<ObjectId> monitors;
-  std::vector<uint32_t> stack_depths;
-  JdwpError rc = Dbg::GetOwnedMonitors(thread_id, &monitors, &stack_depths);
-  if (rc != ERR_NONE) {
-    return rc;
-  }
-
-  expandBufAdd4BE(reply, monitors.size());
-  for (size_t i = 0; i < monitors.size(); ++i) {
-    rc = WriteTaggedObject(reply, monitors[i]);
-    if (rc != ERR_NONE) {
-      return rc;
-    }
-    if (with_stack_depths) {
-      expandBufAdd4BE(reply, stack_depths[i]);
-    }
-  }
-  return ERR_NONE;
-}
-
-static JdwpError TR_OwnedMonitors(JdwpState*, Request* request, ExpandBuf* reply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  return TR_OwnedMonitors(request, reply, false);
-}
-
-static JdwpError TR_OwnedMonitorsStackDepthInfo(JdwpState*, Request* request, ExpandBuf* reply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  return TR_OwnedMonitors(request, reply, true);
-}
-
-static JdwpError TR_CurrentContendedMonitor(JdwpState*, Request* request, ExpandBuf* reply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId thread_id = request->ReadThreadId();
-
-  ObjectId contended_monitor;
-  JdwpError rc = Dbg::GetContendedMonitor(thread_id, &contended_monitor);
-  if (rc != ERR_NONE) {
-    return rc;
-  }
-  return WriteTaggedObject(reply, contended_monitor);
-}
-
-static JdwpError TR_Interrupt(JdwpState*, Request* request, ExpandBuf* reply ATTRIBUTE_UNUSED)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId thread_id = request->ReadThreadId();
-  return Dbg::Interrupt(thread_id);
-}
-
-/*
- * Return the debug suspend count for the specified thread.
- *
- * (The thread *might* still be running -- it might not have examined
- * its suspend count recently.)
- */
-static JdwpError TR_DebugSuspendCount(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId thread_id = request->ReadThreadId();
-  return Dbg::GetThreadDebugSuspendCount(thread_id, pReply);
-}
-
-/*
- * Return the name of a thread group.
- *
- * The Eclipse debugger recognizes "main" and "system" as special.
- */
-static JdwpError TGR_Name(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId thread_group_id = request->ReadThreadGroupId();
-  return Dbg::GetThreadGroupName(thread_group_id, pReply);
-}
-
-/*
- * Returns the thread group -- if any -- that contains the specified
- * thread group.
- */
-static JdwpError TGR_Parent(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId thread_group_id = request->ReadThreadGroupId();
-  return Dbg::GetThreadGroupParent(thread_group_id, pReply);
-}
-
-/*
- * Return the active threads and thread groups that are part of the
- * specified thread group.
- */
-static JdwpError TGR_Children(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId thread_group_id = request->ReadThreadGroupId();
-  return Dbg::GetThreadGroupChildren(thread_group_id, pReply);
-}
-
-/*
- * Return the #of components in the array.
- */
-static JdwpError AR_Length(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId array_id = request->ReadArrayId();
-
-  int32_t length;
-  JdwpError status = Dbg::GetArrayLength(array_id, &length);
-  if (status != ERR_NONE) {
-    return status;
-  }
-  VLOG(jdwp) << "    --> " << length;
-
-  expandBufAdd4BE(pReply, length);
-
-  return ERR_NONE;
-}
-
-/*
- * Return the values from an array.
- */
-static JdwpError AR_GetValues(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId array_id = request->ReadArrayId();
-  uint32_t offset = request->ReadUnsigned32("offset");
-  uint32_t length = request->ReadUnsigned32("length");
-  return Dbg::OutputArray(array_id, offset, length, pReply);
-}
-
-/*
- * Set values in an array.
- */
-static JdwpError AR_SetValues(JdwpState*, Request* request, ExpandBuf*)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId array_id = request->ReadArrayId();
-  uint32_t offset = request->ReadUnsigned32("offset");
-  uint32_t count = request->ReadUnsigned32("count");
-  return Dbg::SetArrayElements(array_id, offset, count, request);
-}
-
-static JdwpError CLR_VisibleClasses(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  request->ReadObjectId();  // classLoaderObject
-  // TODO: we should only return classes which have the given class loader as a defining or
-  // initiating loader. The former would be easy; the latter is hard, because we don't have
-  // any such notion.
-  return VM_AllClassesImpl(pReply, false, false);
-}
-
-// Delete function class to use std::unique_ptr with JdwpEvent.
-struct JdwpEventDeleter {
-  void operator()(JdwpEvent* event) {
-    EventFree(event);
-  }
-};
-
-/*
- * Set an event trigger.
- *
- * Reply with a requestID.
- */
-static JdwpError ER_Set(JdwpState* state, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  JdwpEventKind event_kind = request->ReadEnum1<JdwpEventKind>("event kind");
-  JdwpSuspendPolicy suspend_policy = request->ReadEnum1<JdwpSuspendPolicy>("suspend policy");
-  int32_t modifier_count = request->ReadSigned32("modifier count");
-
-  CHECK_LT(modifier_count, 256);    /* reasonableness check */
-
-  std::unique_ptr<JDWP::JdwpEvent, JdwpEventDeleter> pEvent(EventAlloc(modifier_count));
-  pEvent->eventKind = event_kind;
-  pEvent->suspend_policy = suspend_policy;
-  pEvent->modCount = modifier_count;
-
-  /*
-   * Read modifiers.  Ordering may be significant (see explanation of Count
-   * mods in JDWP doc).
-   */
-  for (int32_t i = 0; i < modifier_count; ++i) {
-    JdwpEventMod& mod = pEvent->mods[i];
-    mod.modKind = request->ReadModKind();
-    switch (mod.modKind) {
-    case MK_COUNT:
-      {
-        // Report once, when "--count" reaches 0.
-        uint32_t count = request->ReadUnsigned32("count");
-        if (count == 0) {
-          return ERR_INVALID_COUNT;
-        }
-        mod.count.count = count;
-      }
-      break;
-    case MK_CONDITIONAL:
-      {
-        // Conditional on expression.
-        uint32_t exprId = request->ReadUnsigned32("expr id");
-        mod.conditional.exprId = exprId;
-      }
-      break;
-    case MK_THREAD_ONLY:
-      {
-        // Only report events in specified thread.
-        ObjectId thread_id = request->ReadThreadId();
-        mod.threadOnly.threadId = thread_id;
-      }
-      break;
-    case MK_CLASS_ONLY:
-      {
-        // For ClassPrepare, MethodEntry.
-        RefTypeId class_id = request->ReadRefTypeId();
-        mod.classOnly.refTypeId = class_id;
-      }
-      break;
-    case MK_CLASS_MATCH:
-      {
-        // Restrict events to matching classes.
-        // pattern is "java.foo.*", we want "java/foo/*".
-        std::string pattern(request->ReadUtf8String());
-        std::replace(pattern.begin(), pattern.end(), '.', '/');
-        mod.classMatch.classPattern = strdup(pattern.c_str());
-      }
-      break;
-    case MK_CLASS_EXCLUDE:
-      {
-        // Restrict events to non-matching classes.
-        // pattern is "java.foo.*", we want "java/foo/*".
-        std::string pattern(request->ReadUtf8String());
-        std::replace(pattern.begin(), pattern.end(), '.', '/');
-        mod.classExclude.classPattern = strdup(pattern.c_str());
-      }
-      break;
-    case MK_LOCATION_ONLY:
-      {
-        // Restrict certain events based on location.
-        JdwpLocation location = request->ReadLocation();
-        mod.locationOnly.loc = location;
-      }
-      break;
-    case MK_EXCEPTION_ONLY:
-      {
-        // Modifies EK_EXCEPTION events,
-        mod.exceptionOnly.refTypeId = request->ReadRefTypeId();  // null => all exceptions.
-        mod.exceptionOnly.caught = request->ReadEnum1<uint8_t>("caught");
-        mod.exceptionOnly.uncaught = request->ReadEnum1<uint8_t>("uncaught");
-      }
-      break;
-    case MK_FIELD_ONLY:
-      {
-        // For field access/modification events.
-        RefTypeId declaring = request->ReadRefTypeId();
-        FieldId fieldId = request->ReadFieldId();
-        mod.fieldOnly.refTypeId = declaring;
-        mod.fieldOnly.fieldId = fieldId;
-      }
-      break;
-    case MK_STEP:
-      {
-        // For use with EK_SINGLE_STEP.
-        ObjectId thread_id = request->ReadThreadId();
-        uint32_t size = request->ReadUnsigned32("step size");
-        uint32_t depth = request->ReadUnsigned32("step depth");
-        VLOG(jdwp) << StringPrintf("    Step: thread=%#" PRIx64, thread_id)
-                     << " size=" << JdwpStepSize(size) << " depth=" << JdwpStepDepth(depth);
-
-        mod.step.threadId = thread_id;
-        mod.step.size = size;
-        mod.step.depth = depth;
-      }
-      break;
-    case MK_INSTANCE_ONLY:
-      {
-        // Report events related to a specific object.
-        ObjectId instance = request->ReadObjectId();
-        mod.instanceOnly.objectId = instance;
-      }
-      break;
-    default:
-      LOG(WARNING) << "Unsupported modifier " << mod.modKind << " for event " << pEvent->eventKind;
-      return JDWP::ERR_NOT_IMPLEMENTED;
-    }
-  }
-
-  /*
-   * We reply with an integer "requestID".
-   */
-  uint32_t requestId = state->NextEventSerial();
-  expandBufAdd4BE(pReply, requestId);
-
-  pEvent->requestId = requestId;
-
-  VLOG(jdwp) << StringPrintf("    --> event requestId=%#x", requestId);
-
-  /* add it to the list */
-  // TODO: RegisterEvent() should take std::unique_ptr<>.
-  JdwpError err = state->RegisterEvent(pEvent.get());
-  if (err != ERR_NONE) {
-    /* registration failed, probably because event is bogus */
-    LOG(WARNING) << "WARNING: event request rejected";
-    return err;
-  }
-  pEvent.release();  // NOLINT b/117926937
-  return ERR_NONE;
-}
-
-static JdwpError ER_Clear(JdwpState* state, Request* request, ExpandBuf*)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  request->ReadEnum1<JdwpEventKind>("event kind");
-  uint32_t requestId = request->ReadUnsigned32("request id");
-
-  // Failure to find an event with a matching ID is a no-op
-  // and does not return an error.
-  state->UnregisterEventById(requestId);
-  return ERR_NONE;
-}
-
-/*
- * Return the values of arguments and local variables.
- */
-static JdwpError SF_GetValues(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  return Dbg::GetLocalValues(request, pReply);
-}
-
-/*
- * Set the values of arguments and local variables.
- */
-static JdwpError SF_SetValues(JdwpState*, Request* request, ExpandBuf*)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  return Dbg::SetLocalValues(request);
-}
-
-static JdwpError SF_ThisObject(JdwpState*, Request* request, ExpandBuf* reply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjectId thread_id = request->ReadThreadId();
-  FrameId frame_id = request->ReadFrameId();
-
-  ObjectId object_id;
-  JdwpError rc = Dbg::GetThisObject(thread_id, frame_id, &object_id);
-  if (rc != ERR_NONE) {
-    return rc;
-  }
-
-  return WriteTaggedObject(reply, object_id);
-}
-
-/*
- * Return the reference type reflected by this class object.
- *
- * This appears to be required because ReferenceTypeId values are NEVER
- * reused, whereas ClassIds can be recycled like any other object.  (Either
- * that, or I have no idea what this is for.)
- */
-static JdwpError COR_ReflectedType(JdwpState*, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  RefTypeId class_object_id = request->ReadRefTypeId();
-  return Dbg::GetReflectedType(class_object_id, pReply);
-}
-
-/*
- * Handle a DDM packet with a single chunk in it.
- */
-static JdwpError DDM_Chunk(JdwpState* state, Request* request, ExpandBuf* pReply)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  state->NotifyDdmsActive();
-  uint8_t* replyBuf = nullptr;
-  int replyLen = -1;
-  if (Dbg::DdmHandlePacket(request, &replyBuf, &replyLen)) {
-    // If they want to send something back, we copy it into the buffer.
-    // TODO: consider altering the JDWP stuff to hold the packet header
-    // in a separate buffer.  That would allow us to writev() DDM traffic
-    // instead of copying it into the expanding buffer.  The reduction in
-    // heap requirements is probably more valuable than the efficiency.
-    CHECK_GT(replyLen, 0);
-    memcpy(expandBufAddSpace(pReply, replyLen), replyBuf, replyLen);
-    delete[] replyBuf;
-  }
-  return ERR_NONE;
-}
-
-/*
- * Handler map decl.
- */
-using JdwpRequestHandler = JdwpError(*)(JdwpState* state, Request* request, ExpandBuf* reply);
-
-struct JdwpHandlerMap {
-  uint8_t cmdSet;
-  uint8_t cmd;
-  JdwpRequestHandler func;
-  const char* name;
-};
-
-/*
- * Map commands to functions.
- *
- * Command sets 0-63 are incoming requests, 64-127 are outbound requests,
- * and 128-256 are vendor-defined.
- */
-static const JdwpHandlerMap gHandlers[] = {
-  /* VirtualMachine command set (1) */
-  { 1,    1,  VM_Version,               "VirtualMachine.Version" },
-  { 1,    2,  VM_ClassesBySignature,    "VirtualMachine.ClassesBySignature" },
-  { 1,    3,  VM_AllClasses,            "VirtualMachine.AllClasses" },
-  { 1,    4,  VM_AllThreads,            "VirtualMachine.AllThreads" },
-  { 1,    5,  VM_TopLevelThreadGroups,  "VirtualMachine.TopLevelThreadGroups" },
-  { 1,    6,  VM_Dispose,               "VirtualMachine.Dispose" },
-  { 1,    7,  VM_IDSizes,               "VirtualMachine.IDSizes" },
-  { 1,    8,  VM_Suspend,               "VirtualMachine.Suspend" },
-  { 1,    9,  VM_Resume,                "VirtualMachine.Resume" },
-  { 1,    10, VM_Exit,                  "VirtualMachine.Exit" },
-  { 1,    11, VM_CreateString,          "VirtualMachine.CreateString" },
-  { 1,    12, VM_Capabilities,          "VirtualMachine.Capabilities" },
-  { 1,    13, VM_ClassPaths,            "VirtualMachine.ClassPaths" },
-  { 1,    14, VM_DisposeObjects,        "VirtualMachine.DisposeObjects" },
-  { 1,    15, nullptr,                  "VirtualMachine.HoldEvents" },
-  { 1,    16, nullptr,                  "VirtualMachine.ReleaseEvents" },
-  { 1,    17, VM_CapabilitiesNew,       "VirtualMachine.CapabilitiesNew" },
-  { 1,    18, nullptr,                  "VirtualMachine.RedefineClasses" },
-  { 1,    19, nullptr,                  "VirtualMachine.SetDefaultStratum" },
-  { 1,    20, VM_AllClassesWithGeneric, "VirtualMachine.AllClassesWithGeneric" },
-  { 1,    21, VM_InstanceCounts,        "VirtualMachine.InstanceCounts" },
-
-  /* ReferenceType command set (2) */
-  { 2,    1,  RT_Signature,            "ReferenceType.Signature" },
-  { 2,    2,  RT_ClassLoader,          "ReferenceType.ClassLoader" },
-  { 2,    3,  RT_Modifiers,            "ReferenceType.Modifiers" },
-  { 2,    4,  RT_Fields,               "ReferenceType.Fields" },
-  { 2,    5,  RT_Methods,              "ReferenceType.Methods" },
-  { 2,    6,  RT_GetValues,            "ReferenceType.GetValues" },
-  { 2,    7,  RT_SourceFile,           "ReferenceType.SourceFile" },
-  { 2,    8,  nullptr,                 "ReferenceType.NestedTypes" },
-  { 2,    9,  RT_Status,               "ReferenceType.Status" },
-  { 2,    10, RT_Interfaces,           "ReferenceType.Interfaces" },
-  { 2,    11, RT_ClassObject,          "ReferenceType.ClassObject" },
-  { 2,    12, RT_SourceDebugExtension, "ReferenceType.SourceDebugExtension" },
-  { 2,    13, RT_SignatureWithGeneric, "ReferenceType.SignatureWithGeneric" },
-  { 2,    14, RT_FieldsWithGeneric,    "ReferenceType.FieldsWithGeneric" },
-  { 2,    15, RT_MethodsWithGeneric,   "ReferenceType.MethodsWithGeneric" },
-  { 2,    16, RT_Instances,            "ReferenceType.Instances" },
-  { 2,    17, nullptr,                 "ReferenceType.ClassFileVersion" },
-  { 2,    18, nullptr,                 "ReferenceType.ConstantPool" },
-
-  /* ClassType command set (3) */
-  { 3,    1,  CT_Superclass,    "ClassType.Superclass" },
-  { 3,    2,  CT_SetValues,     "ClassType.SetValues" },
-  { 3,    3,  CT_InvokeMethod,  "ClassType.InvokeMethod" },
-  { 3,    4,  CT_NewInstance,   "ClassType.NewInstance" },
-
-  /* ArrayType command set (4) */
-  { 4,    1,  AT_newInstance,   "ArrayType.NewInstance" },
-
-  /* InterfaceType command set (5) */
-  { 5,    1, IT_InvokeMethod,  "InterfaceType.InvokeMethod" },
-
-  /* Method command set (6) */
-  { 6,    1,  M_LineTable,                "Method.LineTable" },
-  { 6,    2,  M_VariableTable,            "Method.VariableTable" },
-  { 6,    3,  M_Bytecodes,                "Method.Bytecodes" },
-  { 6,    4,  M_IsObsolete,               "Method.IsObsolete" },
-  { 6,    5,  M_VariableTableWithGeneric, "Method.VariableTableWithGeneric" },
-
-  /* Field command set (8) */
-
-  /* ObjectReference command set (9) */
-  { 9,    1,  OR_ReferenceType,     "ObjectReference.ReferenceType" },
-  { 9,    2,  OR_GetValues,         "ObjectReference.GetValues" },
-  { 9,    3,  OR_SetValues,         "ObjectReference.SetValues" },
-  { 9,    4,  nullptr,              "ObjectReference.UNUSED" },
-  { 9,    5,  OR_MonitorInfo,       "ObjectReference.MonitorInfo" },
-  { 9,    6,  OR_InvokeMethod,      "ObjectReference.InvokeMethod" },
-  { 9,    7,  OR_DisableCollection, "ObjectReference.DisableCollection" },
-  { 9,    8,  OR_EnableCollection,  "ObjectReference.EnableCollection" },
-  { 9,    9,  OR_IsCollected,       "ObjectReference.IsCollected" },
-  { 9,    10, OR_ReferringObjects,  "ObjectReference.ReferringObjects" },
-
-  /* StringReference command set (10) */
-  { 10,   1,  SR_Value,         "StringReference.Value" },
-
-  /* ThreadReference command set (11) */
-  { 11,   1,  TR_Name,                        "ThreadReference.Name" },
-  { 11,   2,  TR_Suspend,                     "ThreadReference.Suspend" },
-  { 11,   3,  TR_Resume,                      "ThreadReference.Resume" },
-  { 11,   4,  TR_Status,                      "ThreadReference.Status" },
-  { 11,   5,  TR_ThreadGroup,                 "ThreadReference.ThreadGroup" },
-  { 11,   6,  TR_Frames,                      "ThreadReference.Frames" },
-  { 11,   7,  TR_FrameCount,                  "ThreadReference.FrameCount" },
-  { 11,   8,  TR_OwnedMonitors,               "ThreadReference.OwnedMonitors" },
-  { 11,   9,  TR_CurrentContendedMonitor,     "ThreadReference.CurrentContendedMonitor" },
-  { 11,   10, nullptr,                        "ThreadReference.Stop" },
-  { 11,   11, TR_Interrupt,                   "ThreadReference.Interrupt" },
-  { 11,   12, TR_DebugSuspendCount,           "ThreadReference.SuspendCount" },
-  { 11,   13, TR_OwnedMonitorsStackDepthInfo, "ThreadReference.OwnedMonitorsStackDepthInfo" },
-  { 11,   14, nullptr,                        "ThreadReference.ForceEarlyReturn" },
-
-  /* ThreadGroupReference command set (12) */
-  { 12,   1,  TGR_Name,         "ThreadGroupReference.Name" },
-  { 12,   2,  TGR_Parent,       "ThreadGroupReference.Parent" },
-  { 12,   3,  TGR_Children,     "ThreadGroupReference.Children" },
-
-  /* ArrayReference command set (13) */
-  { 13,   1,  AR_Length,        "ArrayReference.Length" },
-  { 13,   2,  AR_GetValues,     "ArrayReference.GetValues" },
-  { 13,   3,  AR_SetValues,     "ArrayReference.SetValues" },
-
-  /* ClassLoaderReference command set (14) */
-  { 14,   1,  CLR_VisibleClasses, "ClassLoaderReference.VisibleClasses" },
-
-  /* EventRequest command set (15) */
-  { 15,   1,  ER_Set,           "EventRequest.Set" },
-  { 15,   2,  ER_Clear,         "EventRequest.Clear" },
-  { 15,   3,  nullptr,          "EventRequest.ClearAllBreakpoints" },
-
-  /* StackFrame command set (16) */
-  { 16,   1,  SF_GetValues,     "StackFrame.GetValues" },
-  { 16,   2,  SF_SetValues,     "StackFrame.SetValues" },
-  { 16,   3,  SF_ThisObject,    "StackFrame.ThisObject" },
-  { 16,   4,  nullptr,          "StackFrame.PopFrames" },
-
-  /* ClassObjectReference command set (17) */
-  { 17,   1,  COR_ReflectedType, "ClassObjectReference.ReflectedType" },
-
-  /* Event command set (64) */
-  { 64, 100,  nullptr, "Event.Composite" },  // sent from VM to debugger, never received by VM
-
-  { 199,  1,  DDM_Chunk,        "DDM.Chunk" },
-};
-
-static const char* GetCommandName(Request* request) {
-  for (size_t i = 0; i < arraysize(gHandlers); ++i) {
-    if (gHandlers[i].cmdSet == request->GetCommandSet() &&
-        gHandlers[i].cmd == request->GetCommand()) {
-      return gHandlers[i].name;
-    }
-  }
-  return "?UNKNOWN?";
-}
-
-static std::string DescribeCommand(Request* request) {
-  std::string result;
-  result += "REQUEST: ";
-  result += GetCommandName(request);
-  result += StringPrintf(" (length=%zu id=0x%06x)", request->GetLength(), request->GetId());
-  return result;
-}
-
-// Returns true if the given command_set and command identify an "invoke" command.
-static bool IsInvokeCommand(uint8_t command_set, uint8_t command) {
-  if (command_set == kJDWPClassTypeCmdSet) {
-    return command == kJDWPClassTypeInvokeMethodCmd || command == kJDWPClassTypeNewInstanceCmd;
-  } else if (command_set == kJDWPObjectReferenceCmdSet) {
-    return command == kJDWPObjectReferenceInvokeCmd;
-  } else if (command_set == kJDWPInterfaceTypeCmdSet) {
-    return command == kJDWPInterfaceTypeInvokeMethodCmd;
-  } else {
-    return false;
-  }
-}
-
-/*
- * Process a request from the debugger. The skip_reply flag is set to true to indicate to the
- * caller the reply must not be sent to the debugger. This is used for invoke commands where the
- * reply is sent by the event thread after completing the invoke.
- *
- * On entry, the JDWP thread is in VMWAIT.
- */
-size_t JdwpState::ProcessRequest(Request* request, ExpandBuf* pReply, bool* skip_reply) {
-  JdwpError result = ERR_NONE;
-  *skip_reply = false;
-
-  if (request->GetCommandSet() != kJDWPDdmCmdSet) {
-    /*
-     * Activity from a debugger, not merely ddms.  Mark us as having an
-     * active debugger session, and zero out the last-activity timestamp
-     * so waitForDebugger() doesn't return if we stall for a bit here.
-     */
-    Dbg::GoActive();
-    last_activity_time_ms_.store(0, std::memory_order_seq_cst);
-  }
-
-  /*
-   * If a debugger event has fired in another thread, wait until the
-   * initiating thread has suspended itself before processing commands
-   * from the debugger.  Otherwise we (the JDWP thread) could be told to
-   * resume the thread before it has suspended.
-   *
-   * Note that we MUST clear the event token before waking the event
-   * thread up, or risk waiting for the thread to suspend after we've
-   * told it to resume.
-   */
-  AcquireJdwpTokenForCommand();
-
-  /*
-   * Tell the VM that we're running and shouldn't be interrupted by GC.
-   * Do this after anything that can stall indefinitely.
-   */
-  Thread* self = Thread::Current();
-  ScopedObjectAccess soa(self);
-
-  expandBufAddSpace(pReply, kJDWPHeaderLen);
-
-  size_t i;
-  for (i = 0; i < arraysize(gHandlers); ++i) {
-    if (gHandlers[i].cmdSet == request->GetCommandSet() &&
-        gHandlers[i].cmd == request->GetCommand() &&
-        gHandlers[i].func != nullptr) {
-      VLOG(jdwp) << DescribeCommand(request);
-      result = (*gHandlers[i].func)(this, request, pReply);
-      if (result == ERR_NONE) {
-        request->CheckConsumed();
-      }
-      self->AssertNoPendingException();
-      break;
-    }
-  }
-  if (i == arraysize(gHandlers)) {
-    LOG(ERROR) << "Command not implemented: " << DescribeCommand(request);
-    LOG(ERROR) << HexDump(request->data(), request->size(), false, "");
-    result = ERR_NOT_IMPLEMENTED;
-  }
-
-  size_t replyLength = 0U;
-  if (result == ERR_NONE && IsInvokeCommand(request->GetCommandSet(), request->GetCommand())) {
-    // We successfully request an invoke in the event thread. It will send the reply once the
-    // invoke completes so we must not send it now.
-    *skip_reply = true;
-  } else {
-    /*
-     * Set up the reply header.
-     *
-     * If we encountered an error, only send the header back.
-     */
-    uint8_t* replyBuf = expandBufGetBuffer(pReply);
-    replyLength = (result == ERR_NONE) ? expandBufGetLength(pReply) : kJDWPHeaderLen;
-    Set4BE(replyBuf + kJDWPHeaderSizeOffset, replyLength);
-    Set4BE(replyBuf + kJDWPHeaderIdOffset, request->GetId());
-    Set1(replyBuf + kJDWPHeaderFlagsOffset, kJDWPFlagReply);
-    Set2BE(replyBuf + kJDWPHeaderErrorCodeOffset, result);
-
-    CHECK_GT(expandBufGetLength(pReply), 0U) << GetCommandName(request) << " " << request->GetId();
-
-    size_t respLen = expandBufGetLength(pReply) - kJDWPHeaderLen;
-    VLOG(jdwp) << "REPLY: " << GetCommandName(request) << " " << result << " (length=" << respLen << ")";
-    if (false) {
-      VLOG(jdwp) << HexDump(expandBufGetBuffer(pReply) + kJDWPHeaderLen, respLen, false, "");
-    }
-  }
-
-  VLOG(jdwp) << "----------";
-
-  /*
-   * Update last-activity timestamp.  We really only need this during
-   * the initial setup.  Only update if this is a non-DDMS packet.
-   */
-  if (request->GetCommandSet() != kJDWPDdmCmdSet) {
-    last_activity_time_ms_.store(MilliTime(), std::memory_order_seq_cst);
-  }
-
-  return replyLength;
-}
-
-}  // namespace JDWP
-
-}  // namespace art
diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc
deleted file mode 100644
index 447e3bf..0000000
--- a/runtime/jdwp/jdwp_main.cc
+++ /dev/null
@@ -1,784 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <errno.h>
-#include <stdlib.h>
-#include <sys/time.h>
-#include <time.h>
-#include <unistd.h>
-
-#include "android-base/stringprintf.h"
-
-#include "base/atomic.h"
-#include "base/logging.h"  // For VLOG.
-#include "base/time_utils.h"
-#include "debugger.h"
-#include "jdwp/jdwp_priv.h"
-#include "scoped_thread_state_change-inl.h"
-
-namespace art {
-
-namespace JDWP {
-
-using android::base::StringPrintf;
-
-static void* StartJdwpThread(void* arg);
-
-
-static bool ParseJdwpOption(const std::string& name,
-                            const std::string& value,
-                            JdwpOptions* jdwp_options) {
-  if (name == "transport") {
-    if (value == "dt_socket") {
-      jdwp_options->transport = JDWP::kJdwpTransportSocket;
-    } else if (value == "dt_android_adb") {
-      jdwp_options->transport = JDWP::kJdwpTransportAndroidAdb;
-    } else {
-      jdwp_options->transport = JDWP::kJdwpTransportUnknown;
-      LOG(ERROR) << "JDWP transport not supported: " << value;
-      return false;
-    }
-  } else if (name == "server") {
-    if (value == "n") {
-      jdwp_options->server = false;
-    } else if (value == "y") {
-      jdwp_options->server = true;
-    } else {
-      LOG(ERROR) << "JDWP option 'server' must be 'y' or 'n'";
-      return false;
-    }
-  } else if (name == "suspend") {
-    if (value == "n") {
-      jdwp_options->suspend = false;
-    } else if (value == "y") {
-      jdwp_options->suspend = true;
-    } else {
-      LOG(ERROR) << "JDWP option 'suspend' must be 'y' or 'n'";
-      return false;
-    }
-  } else if (name == "address") {
-    /* this is either <port> or <host>:<port> */
-    std::string port_string;
-    jdwp_options->host.clear();
-    std::string::size_type colon = value.find(':');
-    if (colon != std::string::npos) {
-      jdwp_options->host = value.substr(0, colon);
-      port_string = value.substr(colon + 1);
-    } else {
-      port_string = value;
-    }
-    if (port_string.empty()) {
-      LOG(ERROR) << "JDWP address missing port: " << value;
-      return false;
-    }
-    char* end;
-    uint64_t port = strtoul(port_string.c_str(), &end, 10);
-    if (*end != '\0' || port > 0xffff) {
-      LOG(ERROR) << "JDWP address has junk in port field: " << value;
-      return false;
-    }
-    jdwp_options->port = port;
-  } else if (name == "launch" || name == "onthrow" || name == "oncaught" || name == "timeout") {
-    /* valid but unsupported */
-    LOG(INFO) << "Ignoring JDWP option '" << name << "'='" << value << "'";
-  } else {
-    LOG(INFO) << "Ignoring unrecognized JDWP option '" << name << "'='" << value << "'";
-  }
-
-  return true;
-}
-
-bool ParseJdwpOptions(const std::string& options, JdwpOptions* jdwp_options) {
-  VLOG(jdwp) << "ParseJdwpOptions: " << options;
-
-  if (options == "help") {
-    LOG(ERROR) << "Example: -XjdwpOptions:transport=dt_socket,address=8000,server=y\n"
-               << "Example: -Xrunjdwp:transport=dt_socket,address=8000,server=y\n"
-               << "Example: -Xrunjdwp:transport=dt_socket,address=localhost:6500,server=n\n";
-    return false;
-  }
-
-  const std::string s;
-
-  std::vector<std::string> pairs;
-  Split(options, ',', &pairs);
-
-  for (const std::string& jdwp_option : pairs) {
-    std::string::size_type equals_pos = jdwp_option.find('=');
-    if (equals_pos == std::string::npos) {
-      LOG(ERROR) << s << "Can't parse JDWP option '" << jdwp_option << "' in '" << options << "'";
-      return false;
-    }
-
-    bool parse_attempt = ParseJdwpOption(jdwp_option.substr(0, equals_pos),
-                                         jdwp_option.substr(equals_pos + 1),
-                                         jdwp_options);
-    if (!parse_attempt) {
-      // We fail to parse this JDWP option.
-      return parse_attempt;
-    }
-  }
-
-  if (jdwp_options->transport == JDWP::kJdwpTransportUnknown) {
-    LOG(ERROR) << s << "Must specify JDWP transport: " << options;
-    return false;
-  }
-#if ART_TARGET_ANDROID
-  if (jdwp_options->transport == JDWP::kJdwpTransportNone) {
-    jdwp_options->transport = JDWP::kJdwpTransportAndroidAdb;
-    LOG(WARNING) << "no JDWP transport specified. Defaulting to dt_android_adb";
-  }
-#endif
-  if (!jdwp_options->server && (jdwp_options->host.empty() || jdwp_options->port == 0)) {
-    LOG(ERROR) << s << "Must specify JDWP host and port when server=n: " << options;
-    return false;
-  }
-
-  return true;
-}
-
-/*
- * JdwpNetStateBase class implementation
- */
-JdwpNetStateBase::JdwpNetStateBase(JdwpState* state)
-    : state_(state), socket_lock_("JdwpNetStateBase lock", kJdwpSocketLock) {
-  clientSock = -1;
-  wake_pipe_[0] = -1;
-  wake_pipe_[1] = -1;
-  input_count_ = 0;
-  awaiting_handshake_ = false;
-}
-
-JdwpNetStateBase::~JdwpNetStateBase() {
-  if (wake_pipe_[0] != -1) {
-    close(wake_pipe_[0]);
-    wake_pipe_[0] = -1;
-  }
-  if (wake_pipe_[1] != -1) {
-    close(wake_pipe_[1]);
-    wake_pipe_[1] = -1;
-  }
-}
-
-bool JdwpNetStateBase::MakePipe() {
-  if (pipe(wake_pipe_) == -1) {
-    PLOG(ERROR) << "pipe failed";
-    return false;
-  }
-  return true;
-}
-
-void JdwpNetStateBase::WakePipe() {
-  // If we might be sitting in select, kick us loose.
-  if (wake_pipe_[1] != -1) {
-    VLOG(jdwp) << "+++ writing to wake pipe";
-    TEMP_FAILURE_RETRY(write(wake_pipe_[1], "", 1));
-  }
-}
-
-void JdwpNetStateBase::ConsumeBytes(size_t count) {
-  CHECK_GT(count, 0U);
-  CHECK_LE(count, input_count_);
-
-  if (count == input_count_) {
-    input_count_ = 0;
-    return;
-  }
-
-  memmove(input_buffer_, input_buffer_ + count, input_count_ - count);
-  input_count_ -= count;
-}
-
-bool JdwpNetStateBase::HaveFullPacket() {
-  if (awaiting_handshake_) {
-    return (input_count_ >= kMagicHandshakeLen);
-  }
-  if (input_count_ < 4) {
-    return false;
-  }
-  uint32_t length = Get4BE(input_buffer_);
-  return (input_count_ >= length);
-}
-
-bool JdwpNetStateBase::IsAwaitingHandshake() {
-  return awaiting_handshake_;
-}
-
-void JdwpNetStateBase::SetAwaitingHandshake(bool new_state) {
-  awaiting_handshake_ = new_state;
-}
-
-bool JdwpNetStateBase::IsConnected() {
-  return clientSock >= 0;
-}
-
-// Close a connection from a debugger (which may have already dropped us).
-// Resets the state so we're ready to receive a new connection.
-// Only called from the JDWP thread.
-void JdwpNetStateBase::Close() {
-  if (clientSock < 0) {
-    return;
-  }
-
-  VLOG(jdwp) << "+++ closing JDWP connection on fd " << clientSock;
-
-  close(clientSock);
-  clientSock = -1;
-}
-
-/*
- * Write a packet of "length" bytes. Grabs a mutex to assure atomicity.
- */
-ssize_t JdwpNetStateBase::WritePacket(ExpandBuf* pReply, size_t length) {
-  DCHECK_LE(length, expandBufGetLength(pReply));
-  if (!IsConnected()) {
-    LOG(WARNING) << "Connection with debugger is closed";
-    return -1;
-  }
-  MutexLock mu(Thread::Current(), socket_lock_);
-  return TEMP_FAILURE_RETRY(write(clientSock, expandBufGetBuffer(pReply), length));
-}
-
-/*
- * Write a buffered packet. Grabs a mutex to assure atomicity.
- */
-ssize_t JdwpNetStateBase::WriteBufferedPacket(const std::vector<iovec>& iov) {
-  MutexLock mu(Thread::Current(), socket_lock_);
-  return WriteBufferedPacketLocked(iov);
-}
-
-ssize_t JdwpNetStateBase::WriteBufferedPacketLocked(const std::vector<iovec>& iov) {
-  socket_lock_.AssertHeld(Thread::Current());
-  DCHECK(IsConnected()) << "Connection with debugger is closed";
-  return TEMP_FAILURE_RETRY(writev(clientSock, &iov[0], iov.size()));
-}
-
-bool JdwpState::IsConnected() {
-  return netState != nullptr && netState->IsConnected();
-}
-
-void JdwpState::SendBufferedRequest(uint32_t type, const std::vector<iovec>& iov) {
-  if (!IsConnected()) {
-    // Can happen with some DDMS events.
-    VLOG(jdwp) << "Not sending JDWP packet: no debugger attached!";
-    return;
-  }
-
-  size_t expected = 0;
-  for (size_t i = 0; i < iov.size(); ++i) {
-    expected += iov[i].iov_len;
-  }
-
-  errno = 0;
-  ssize_t actual = netState->WriteBufferedPacket(iov);
-  if (static_cast<size_t>(actual) != expected) {
-    PLOG(ERROR) << StringPrintf("Failed to send JDWP packet %c%c%c%c to debugger (%zd of %zu)",
-                                static_cast<char>(type >> 24),
-                                static_cast<char>(type >> 16),
-                                static_cast<char>(type >> 8),
-                                static_cast<char>(type),
-                                actual, expected);
-  }
-}
-
-void JdwpState::SendRequest(ExpandBuf* pReq) {
-  if (!IsConnected()) {
-    // Can happen with some DDMS events.
-    VLOG(jdwp) << "Not sending JDWP packet: no debugger attached!";
-    return;
-  }
-
-  errno = 0;
-  ssize_t actual = netState->WritePacket(pReq, expandBufGetLength(pReq));
-  if (static_cast<size_t>(actual) != expandBufGetLength(pReq)) {
-    PLOG(ERROR) << StringPrintf("Failed to send JDWP packet to debugger (%zd of %zu)",
-                                actual, expandBufGetLength(pReq));
-  }
-}
-
-/*
- * Get the next "request" serial number.  We use this when sending
- * packets to the debugger.
- */
-uint32_t JdwpState::NextRequestSerial() {
-  return request_serial_++;
-}
-
-/*
- * Get the next "event" serial number.  We use this in the response to
- * message type EventRequest.Set.
- */
-uint32_t JdwpState::NextEventSerial() {
-  return event_serial_++;
-}
-
-JdwpState::JdwpState(const JdwpOptions* options)
-    : options_(options),
-      thread_start_lock_("JDWP thread start lock", kJdwpStartLock),
-      thread_start_cond_("JDWP thread start condition variable", thread_start_lock_),
-      pthread_(0),
-      thread_(nullptr),
-      debug_thread_started_(false),
-      debug_thread_id_(0),
-      run(false),
-      netState(nullptr),
-      attach_lock_("JDWP attach lock", kJdwpAttachLock),
-      attach_cond_("JDWP attach condition variable", attach_lock_),
-      last_activity_time_ms_(0),
-      request_serial_(0x10000000),
-      event_serial_(0x20000000),
-      event_list_lock_("JDWP event list lock", kJdwpEventListLock),
-      event_list_(nullptr),
-      event_list_size_(0),
-      jdwp_token_lock_("JDWP token lock"),
-      jdwp_token_cond_("JDWP token condition variable", jdwp_token_lock_),
-      jdwp_token_owner_thread_id_(0),
-      ddm_is_active_(false),
-      should_exit_(false),
-      exit_status_(0),
-      shutdown_lock_("JDWP shutdown lock", kJdwpShutdownLock),
-      shutdown_cond_("JDWP shutdown condition variable", shutdown_lock_),
-      processing_request_(false) {
-  Locks::AddToExpectedMutexesOnWeakRefAccess(&event_list_lock_);
-}
-
-/*
- * Initialize JDWP.
- *
- * Does not return until JDWP thread is running, but may return before
- * the thread is accepting network connections.
- */
-JdwpState* JdwpState::Create(const JdwpOptions* options) {
-  Thread* self = Thread::Current();
-  Locks::mutator_lock_->AssertNotHeld(self);
-  std::unique_ptr<JdwpState> state(new JdwpState(options));
-  switch (options->transport) {
-    case kJdwpTransportSocket:
-      InitSocketTransport(state.get(), options);
-      break;
-#ifdef ART_TARGET_ANDROID
-    case kJdwpTransportAndroidAdb:
-      InitAdbTransport(state.get(), options);
-      break;
-#endif
-    default:
-      LOG(FATAL) << "Unknown transport: " << options->transport;
-  }
-  {
-    /*
-     * Grab a mutex before starting the thread.  This ensures they
-     * won't signal the cond var before we're waiting.
-     */
-    state->thread_start_lock_.AssertNotHeld(self);
-    MutexLock thread_start_locker(self, state->thread_start_lock_);
-
-    /*
-     * We have bound to a port, or are trying to connect outbound to a
-     * debugger.  Create the JDWP thread and let it continue the mission.
-     */
-    CHECK_PTHREAD_CALL(pthread_create, (&state->pthread_, nullptr, StartJdwpThread, state.get()),
-                       "JDWP thread");
-
-    /*
-     * Wait until the thread finishes basic initialization.
-     */
-    while (!state->debug_thread_started_) {
-      state->thread_start_cond_.Wait(self);
-    }
-  }
-
-  if (options->suspend) {
-    /*
-     * For suspend=y, wait for the debugger to connect to us or for us to
-     * connect to the debugger.
-     *
-     * The JDWP thread will signal us when it connects successfully or
-     * times out (for timeout=xxx), so we have to check to see what happened
-     * when we wake up.
-     */
-    {
-      ScopedThreadStateChange tsc(self, kWaitingForDebuggerToAttach);
-      MutexLock attach_locker(self, state->attach_lock_);
-      while (state->debug_thread_id_ == 0) {
-        state->attach_cond_.Wait(self);
-      }
-    }
-    if (!state->IsActive()) {
-      LOG(ERROR) << "JDWP connection failed";
-      return nullptr;
-    }
-
-    LOG(INFO) << "JDWP connected";
-
-    /*
-     * Ordinarily we would pause briefly to allow the debugger to set
-     * breakpoints and so on, but for "suspend=y" the VM init code will
-     * pause the VM when it sends the VM_START message.
-     */
-  }
-
-  return state.release();
-}
-
-/*
- * Reset all session-related state.  There should not be an active connection
- * to the client at this point.  The rest of the VM still thinks there is
- * a debugger attached.
- *
- * This includes freeing up the debugger event list.
- */
-void JdwpState::ResetState() {
-  /* could reset the serial numbers, but no need to */
-
-  UnregisterAll();
-  {
-    MutexLock mu(Thread::Current(), event_list_lock_);
-    CHECK(event_list_ == nullptr);
-  }
-
-  /*
-   * Should not have one of these in progress.  If the debugger went away
-   * mid-request, though, we could see this.
-   */
-  if (jdwp_token_owner_thread_id_ != 0) {
-    LOG(WARNING) << "Resetting state while event in progress";
-    DCHECK(false);
-  }
-}
-
-/*
- * Tell the JDWP thread to shut down.  Frees "state".
- */
-JdwpState::~JdwpState() {
-  if (netState != nullptr) {
-    /*
-     * Close down the network to inspire the thread to halt. If a request is being processed,
-     * we need to wait for it to finish first.
-     */
-    {
-      Thread* self = Thread::Current();
-      MutexLock mu(self, shutdown_lock_);
-      while (processing_request_) {
-        VLOG(jdwp) << "JDWP command in progress: wait for it to finish ...";
-        shutdown_cond_.Wait(self);
-      }
-
-      VLOG(jdwp) << "JDWP shutting down net...";
-      netState->Shutdown();
-    }
-
-    if (debug_thread_started_) {
-      run = false;
-      void* threadReturn;
-      if (pthread_join(pthread_, &threadReturn) != 0) {
-        LOG(WARNING) << "JDWP thread join failed";
-      }
-    }
-
-    VLOG(jdwp) << "JDWP freeing netstate...";
-    delete netState;
-    netState = nullptr;
-  }
-  CHECK(netState == nullptr);
-
-  ResetState();
-
-  Locks::RemoveFromExpectedMutexesOnWeakRefAccess(&event_list_lock_);
-}
-
-/*
- * Are we talking to a debugger?
- */
-bool JdwpState::IsActive() {
-  return IsConnected();
-}
-
-// Returns "false" if we encounter a connection-fatal error.
-bool JdwpState::HandlePacket() {
-  Thread* const self = Thread::Current();
-  {
-    MutexLock mu(self, shutdown_lock_);
-    processing_request_ = true;
-  }
-  JdwpNetStateBase* netStateBase = netState;
-  CHECK(netStateBase != nullptr) << "Connection has been closed";
-  JDWP::Request request(netStateBase->input_buffer_, netStateBase->input_count_);
-
-  ExpandBuf* pReply = expandBufAlloc();
-  bool skip_reply = false;
-  size_t replyLength = ProcessRequest(&request, pReply, &skip_reply);
-  ssize_t cc = 0;
-  if (!skip_reply) {
-    cc = netStateBase->WritePacket(pReply, replyLength);
-  } else {
-    DCHECK_EQ(replyLength, 0U);
-  }
-  expandBufFree(pReply);
-
-  /*
-   * We processed this request and sent its reply so we can release the JDWP token.
-   */
-  ReleaseJdwpTokenForCommand();
-
-  if (cc != static_cast<ssize_t>(replyLength)) {
-    PLOG(ERROR) << "Failed sending reply to debugger";
-    return false;
-  }
-  netStateBase->ConsumeBytes(request.GetLength());
-  {
-    MutexLock mu(self, shutdown_lock_);
-    processing_request_ = false;
-    shutdown_cond_.Broadcast(self);
-  }
-  return true;
-}
-
-/*
- * Entry point for JDWP thread.  The thread was created through the VM
- * mechanisms, so there is a java/lang/Thread associated with us.
- */
-static void* StartJdwpThread(void* arg) {
-  JdwpState* state = reinterpret_cast<JdwpState*>(arg);
-  CHECK(state != nullptr);
-
-  state->Run();
-  return nullptr;
-}
-
-void JdwpState::Run() {
-  Runtime* runtime = Runtime::Current();
-  CHECK(runtime->AttachCurrentThread("JDWP", true, runtime->GetSystemThreadGroup(),
-                                     !runtime->IsAotCompiler()));
-
-  VLOG(jdwp) << "JDWP: thread running";
-
-  /*
-   * Finish initializing, then notify the creating thread that
-   * we're running.
-   */
-  thread_ = Thread::Current();
-  run = true;
-
-  {
-    MutexLock locker(thread_, thread_start_lock_);
-    debug_thread_started_ = true;
-    thread_start_cond_.Broadcast(thread_);
-  }
-
-  /* set the thread state to kWaitingInMainDebuggerLoop so GCs don't wait for us */
-  CHECK_EQ(thread_->GetState(), kNative);
-  Locks::mutator_lock_->AssertNotHeld(thread_);
-  thread_->SetState(kWaitingInMainDebuggerLoop);
-
-  /*
-   * Loop forever if we're in server mode, processing connections.  In
-   * non-server mode, we bail out of the thread when the debugger drops
-   * us.
-   *
-   * We broadcast a notification when a debugger attaches, after we
-   * successfully process the handshake.
-   */
-  while (run) {
-    if (options_->server) {
-      /*
-       * Block forever, waiting for a connection.  To support the
-       * "timeout=xxx" option we'll need to tweak this.
-       */
-      if (!netState->Accept()) {
-        break;
-      }
-    } else {
-      /*
-       * If we're not acting as a server, we need to connect out to the
-       * debugger.  To support the "timeout=xxx" option we need to
-       * have a timeout if the handshake reply isn't received in a
-       * reasonable amount of time.
-       */
-      if (!netState->Establish(options_)) {
-        /* wake anybody who was waiting for us to succeed */
-        MutexLock mu(thread_, attach_lock_);
-        debug_thread_id_ = static_cast<ObjectId>(-1);
-        attach_cond_.Broadcast(thread_);
-        break;
-      }
-    }
-
-    /* prep debug code to handle the new connection */
-    Dbg::Connected();
-
-    /* process requests until the debugger drops */
-    bool first = true;
-    while (!Dbg::IsDisposed()) {
-      // sanity check -- shouldn't happen?
-      CHECK_EQ(thread_->GetState(), kWaitingInMainDebuggerLoop);
-
-      if (!netState->ProcessIncoming()) {
-        /* blocking read */
-        break;
-      }
-
-      if (should_exit_) {
-        exit(exit_status_);
-      }
-
-      if (first && !netState->IsAwaitingHandshake()) {
-        /* handshake worked, tell the interpreter that we're active */
-        first = false;
-
-        /* set thread ID; requires object registry to be active */
-        {
-          ScopedObjectAccess soa(thread_);
-          debug_thread_id_ = Dbg::GetThreadSelfId();
-        }
-
-        /* wake anybody who's waiting for us */
-        MutexLock mu(thread_, attach_lock_);
-        attach_cond_.Broadcast(thread_);
-      }
-    }
-
-    netState->Close();
-
-    if (ddm_is_active_) {
-      ddm_is_active_ = false;
-
-      /* broadcast the disconnect; must be in RUNNING state */
-      ScopedObjectAccess soa(thread_);
-      Dbg::DdmDisconnected();
-    }
-
-    {
-      ScopedObjectAccess soa(thread_);
-
-      // Release session state, e.g. remove breakpoint instructions.
-      ResetState();
-    }
-    // Tell the rest of the runtime that the debugger is no longer around.
-    Dbg::Disconnected();
-
-    /* if we had threads suspended, resume them now */
-    Dbg::UndoDebuggerSuspensions();
-
-    /* if we connected out, this was a one-shot deal */
-    if (!options_->server) {
-      run = false;
-    }
-  }
-
-  /* back to native, for thread shutdown */
-  CHECK_EQ(thread_->GetState(), kWaitingInMainDebuggerLoop);
-  thread_->SetState(kNative);
-
-  VLOG(jdwp) << "JDWP: thread detaching and exiting...";
-  runtime->DetachCurrentThread();
-}
-
-void JdwpState::NotifyDdmsActive() {
-  if (!ddm_is_active_) {
-    ddm_is_active_ = true;
-    Dbg::DdmConnected();
-  }
-}
-
-Thread* JdwpState::GetDebugThread() {
-  return thread_;
-}
-
-/*
- * Support routines for waitForDebugger().
- *
- * We can't have a trivial "waitForDebugger" function that returns the
- * instant the debugger connects, because we run the risk of executing code
- * before the debugger has had a chance to configure breakpoints or issue
- * suspend calls.  It would be nice to just sit in the suspended state, but
- * most debuggers don't expect any threads to be suspended when they attach.
- *
- * There's no JDWP event we can post to tell the debugger, "we've stopped,
- * and we like it that way".  We could send a fake breakpoint, which should
- * cause the debugger to immediately send a resume, but the debugger might
- * send the resume immediately or might throw an exception of its own upon
- * receiving a breakpoint event that it didn't ask for.
- *
- * What we really want is a "wait until the debugger is done configuring
- * stuff" event.  We can approximate this with a "wait until the debugger
- * has been idle for a brief period".
- */
-
-/*
- * Return the time, in milliseconds, since the last debugger activity.
- *
- * Returns -1 if no debugger is attached, or 0 if we're in the middle of
- * processing a debugger request.
- */
-int64_t JdwpState::LastDebuggerActivity() {
-  if (!Dbg::IsDebuggerActive()) {
-    LOG(WARNING) << "no active debugger";
-    return -1;
-  }
-
-  int64_t last = last_activity_time_ms_.load(std::memory_order_seq_cst);
-
-  /* initializing or in the middle of something? */
-  if (last == 0) {
-    VLOG(jdwp) << "+++ last=busy";
-    return 0;
-  }
-
-  /* now get the current time */
-  int64_t now = MilliTime();
-  CHECK_GE(now, last);
-
-  VLOG(jdwp) << "+++ debugger interval=" << (now - last);
-  return now - last;
-}
-
-void JdwpState::ExitAfterReplying(int exit_status) {
-  LOG(WARNING) << "Debugger told VM to exit with status " << exit_status;
-  should_exit_ = true;
-  exit_status_ = exit_status;
-}
-
-std::ostream& operator<<(std::ostream& os, const JdwpLocation& rhs) {
-  os << "JdwpLocation["
-     << Dbg::GetClassName(rhs.class_id) << "." << Dbg::GetMethodName(rhs.method_id)
-     << "@" << StringPrintf("%#" PRIx64, rhs.dex_pc) << " " << rhs.type_tag << "]";
-  return os;
-}
-
-bool operator==(const JdwpLocation& lhs, const JdwpLocation& rhs) {
-  return lhs.dex_pc == rhs.dex_pc && lhs.method_id == rhs.method_id &&
-      lhs.class_id == rhs.class_id && lhs.type_tag == rhs.type_tag;
-}
-
-bool operator!=(const JdwpLocation& lhs, const JdwpLocation& rhs) {
-  return !(lhs == rhs);
-}
-
-bool operator==(const JdwpOptions& lhs, const JdwpOptions& rhs) {
-  if (&lhs == &rhs) {
-    return true;
-  }
-
-  return lhs.transport == rhs.transport &&
-      lhs.server == rhs.server &&
-      lhs.suspend == rhs.suspend &&
-      lhs.host == rhs.host &&
-      lhs.port == rhs.port;
-}
-
-}  // namespace JDWP
-
-}  // namespace art
diff --git a/runtime/jdwp/jdwp_options_test.cc b/runtime/jdwp/jdwp_options_test.cc
deleted file mode 100644
index 10c52e8..0000000
--- a/runtime/jdwp/jdwp_options_test.cc
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "jdwp.h"
-
-#include "gtest/gtest.h"
-
-namespace art {
-namespace JDWP {
-
-TEST(JdwpOptionsTest, Options) {
-  {
-    /*
-     * "Example: -Xrunjdwp:transport=dt_socket,address=8000,server=y\n"
-     */
-    JDWP::JdwpOptions opt = JDWP::JdwpOptions();
-    const char *opt_args = "transport=dt_socket,address=8000,server=y";
-
-    EXPECT_TRUE(ParseJdwpOptions(opt_args, &opt));
-    EXPECT_EQ(opt.transport, JdwpTransportType::kJdwpTransportSocket);
-    EXPECT_EQ(opt.port, 8000u);
-    EXPECT_EQ(opt.server, true);
-    EXPECT_EQ(opt.suspend, false);
-  }
-
-  {
-    /*
-     * Example: transport=dt_socket,address=localhost:6500,server=n
-     */
-    JDWP::JdwpOptions opt = JDWP::JdwpOptions();
-    const char *opt_args = "transport=dt_socket,address=localhost:6500,server=y";
-
-    EXPECT_TRUE(ParseJdwpOptions(opt_args, &opt));
-    EXPECT_EQ(opt.transport, JdwpTransportType::kJdwpTransportSocket);
-    EXPECT_EQ(opt.port, 6500u);
-    EXPECT_EQ(opt.host, "localhost");
-    EXPECT_EQ(opt.server, true);
-    EXPECT_EQ(opt.suspend, false);
-  }
-
-  {
-    /*
-     * Example: transport=dt_android_adb,server=n,suspend=y;
-     */
-    JDWP::JdwpOptions opt = JDWP::JdwpOptions();
-    const char *opt_args = "transport=dt_android_adb,server=y";
-
-    EXPECT_TRUE(ParseJdwpOptions(opt_args, &opt));
-    EXPECT_EQ(opt.transport, JdwpTransportType::kJdwpTransportAndroidAdb);
-    EXPECT_EQ(opt.port, 0xFFFF);
-    EXPECT_EQ(opt.host, "");
-    EXPECT_EQ(opt.server, true);
-    EXPECT_EQ(opt.suspend, false);
-  }
-
-  /*
-   * Test failures
-  */
-  JDWP::JdwpOptions opt = JDWP::JdwpOptions();
-  EXPECT_FALSE(ParseJdwpOptions("help", &opt));
-  EXPECT_FALSE(ParseJdwpOptions("blabla", &opt));
-  EXPECT_FALSE(ParseJdwpOptions("transport=dt_android_adb,server=n", &opt));
-}
-
-}  // namespace JDWP
-}  // namespace art
diff --git a/runtime/jdwp/jdwp_priv.h b/runtime/jdwp/jdwp_priv.h
deleted file mode 100644
index 4e1bda8..0000000
--- a/runtime/jdwp/jdwp_priv.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-/*
- * JDWP internal interfaces.
- */
-#ifndef ART_RUNTIME_JDWP_JDWP_PRIV_H_
-#define ART_RUNTIME_JDWP_JDWP_PRIV_H_
-
-#include "debugger.h"
-#include "jdwp/jdwp.h"
-#include "jdwp/jdwp_event.h"
-
-#include <pthread.h>
-#include <sys/uio.h>
-
-/*
- * JDWP constants.
- */
-static constexpr size_t kJDWPHeaderSizeOffset = 0U;
-static constexpr size_t kJDWPHeaderIdOffset = 4U;
-static constexpr size_t kJDWPHeaderFlagsOffset = 8U;
-static constexpr size_t kJDWPHeaderErrorCodeOffset = 9U;
-static constexpr size_t kJDWPHeaderCmdSetOffset = 9U;
-static constexpr size_t kJDWPHeaderCmdOffset = 10U;
-static constexpr size_t kJDWPHeaderLen = 11U;
-static constexpr uint8_t kJDWPFlagReply = 0x80;
-
-static constexpr const char kMagicHandshake[] = "JDWP-Handshake";
-static constexpr size_t kMagicHandshakeLen = sizeof(kMagicHandshake) - 1;
-
-/* Invoke commands */
-static constexpr uint8_t kJDWPClassTypeCmdSet = 3U;
-static constexpr uint8_t kJDWPClassTypeInvokeMethodCmd = 3U;
-static constexpr uint8_t kJDWPClassTypeNewInstanceCmd = 4U;
-static constexpr uint8_t kJDWPInterfaceTypeCmdSet = 5U;
-static constexpr uint8_t kJDWPInterfaceTypeInvokeMethodCmd = 1U;
-static constexpr uint8_t kJDWPObjectReferenceCmdSet = 9U;
-static constexpr uint8_t kJDWPObjectReferenceInvokeCmd = 6U;
-
-/* Event command */
-static constexpr uint8_t kJDWPEventCmdSet = 64U;
-static constexpr uint8_t kJDWPEventCompositeCmd = 100U;
-
-/* DDM support */
-static constexpr uint8_t kJDWPDdmCmdSet = 199U;  // 0xc7, or 'G'+128
-static constexpr uint8_t kJDWPDdmCmd = 1U;
-
-namespace art {
-
-namespace JDWP {
-
-struct JdwpState;
-
-bool InitSocketTransport(JdwpState*, const JdwpOptions*);
-bool InitAdbTransport(JdwpState*, const JdwpOptions*);
-
-/*
- * Base class for the adb and socket JdwpNetState implementations.
- */
-class JdwpNetStateBase {
- public:
-  explicit JdwpNetStateBase(JdwpState*);
-  virtual ~JdwpNetStateBase();
-
-  virtual bool Accept() = 0;
-  virtual bool Establish(const JdwpOptions*) = 0;
-  virtual void Shutdown() = 0;
-  virtual bool ProcessIncoming() = 0;
-
-  void ConsumeBytes(size_t byte_count);
-
-  bool IsConnected();
-
-  bool IsAwaitingHandshake();
-
-  void Close();
-
-  ssize_t WritePacket(ExpandBuf* pReply, size_t length) REQUIRES(!socket_lock_);
-  ssize_t WriteBufferedPacket(const std::vector<iovec>& iov) REQUIRES(!socket_lock_);
-  Mutex* GetSocketLock() {
-    return &socket_lock_;
-  }
-  ssize_t WriteBufferedPacketLocked(const std::vector<iovec>& iov);
-
-  int clientSock;  // Active connection to debugger.
-
-  int wake_pipe_[2];  // Used to break out of select.
-
-  uint8_t input_buffer_[8192];
-  size_t input_count_;
-
- protected:
-  bool HaveFullPacket();
-
-  bool MakePipe();
-  void WakePipe();
-
-  void SetAwaitingHandshake(bool new_state);
-
-  JdwpState* state_;
-
- private:
-  // Used to serialize writes to the socket.
-  Mutex socket_lock_;
-
-  // Are we waiting for the JDWP handshake?
-  bool awaiting_handshake_;
-};
-
-}  // namespace JDWP
-
-}  // namespace art
-
-#endif  // ART_RUNTIME_JDWP_JDWP_PRIV_H_
diff --git a/runtime/jdwp/jdwp_request.cc b/runtime/jdwp/jdwp_request.cc
deleted file mode 100644
index a77962e..0000000
--- a/runtime/jdwp/jdwp_request.cc
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "jdwp/jdwp.h"
-
-#include <inttypes.h>
-
-#include "android-base/stringprintf.h"
-
-#include "jdwp/jdwp_priv.h"
-
-namespace art {
-
-namespace JDWP {
-
-Request::Request(const uint8_t* bytes, uint32_t available) : p_(bytes) {
-  byte_count_ = Read4BE();
-  end_ =  bytes + byte_count_;
-  CHECK_LE(byte_count_, available);
-
-  id_ = Read4BE();
-  int8_t flags = Read1();
-  if ((flags & kJDWPFlagReply) != 0) {
-    LOG(FATAL) << "reply?!";
-  }
-
-  command_set_ = Read1();
-  command_ = Read1();
-}
-
-Request::~Request() {
-}
-
-void Request::CheckConsumed() {
-  if (p_ < end_) {
-    CHECK(p_ == end_) << "read too few bytes: " << (end_ - p_);
-  } else if (p_ > end_) {
-    CHECK(p_ == end_) << "read too many bytes: " << (p_ - end_);
-  }
-}
-
-std::string Request::ReadUtf8String() {
-  uint32_t length = Read4BE();
-  std::string s;
-  s.resize(length);
-  memcpy(&s[0], p_, length);
-  p_ += length;
-  VLOG(jdwp) << "    string \"" << s << "\"";
-  return s;
-}
-
-// Helper function: read a variable-width value from the input buffer.
-uint64_t Request::ReadValue(size_t width) {
-  uint64_t value = -1;
-  switch (width) {
-    case 1: value = Read1(); break;
-    case 2: value = Read2BE(); break;
-    case 4: value = Read4BE(); break;
-    case 8: value = Read8BE(); break;
-    default: LOG(FATAL) << width;
-  }
-  return value;
-}
-
-int32_t Request::ReadSigned32(const char* what) {
-  int32_t value = static_cast<int32_t>(Read4BE());
-  VLOG(jdwp) << "    " << what << " " << value;
-  return value;
-}
-
-uint32_t Request::ReadUnsigned32(const char* what) {
-  uint32_t value = Read4BE();
-  VLOG(jdwp) << "    " << what << " " << value;
-  return value;
-}
-
-FieldId Request::ReadFieldId() {
-  FieldId id = Read8BE();
-  VLOG(jdwp) << "    field id " << DescribeField(id);
-  return id;
-}
-
-MethodId Request::ReadMethodId() {
-  MethodId id = Read8BE();
-  VLOG(jdwp) << "    method id " << DescribeMethod(id);
-  return id;
-}
-
-ObjectId Request::ReadObjectId(const char* specific_kind) {
-  ObjectId id = Read8BE();
-  VLOG(jdwp) << android::base::StringPrintf("    %s id %#" PRIx64, specific_kind, id);
-  return id;
-}
-
-ObjectId Request::ReadArrayId() {
-  return ReadObjectId("array");
-}
-
-ObjectId Request::ReadObjectId() {
-  return ReadObjectId("object");
-}
-
-ObjectId Request::ReadThreadId() {
-  return ReadObjectId("thread");
-}
-
-ObjectId Request::ReadThreadGroupId() {
-  return ReadObjectId("thread group");
-}
-
-RefTypeId Request::ReadRefTypeId() {
-  RefTypeId id = Read8BE();
-  VLOG(jdwp) << "    ref type id " << DescribeRefTypeId(id);
-  return id;
-}
-
-FrameId Request::ReadFrameId() {
-  FrameId id = Read8BE();
-  VLOG(jdwp) << "    frame id " << id;
-  return id;
-}
-
-JdwpTag Request::ReadTag() {
-  return ReadEnum1<JdwpTag>("tag");
-}
-
-JdwpTypeTag Request::ReadTypeTag() {
-  return ReadEnum1<JdwpTypeTag>("type tag");
-}
-
-JdwpLocation Request::ReadLocation() {
-  JdwpLocation location;
-  memset(&location, 0, sizeof(location));  // Allows memcmp(3) later.
-  location.type_tag = ReadTypeTag();
-  location.class_id = ReadObjectId("class");
-  location.method_id = ReadMethodId();
-  location.dex_pc = Read8BE();
-  VLOG(jdwp) << "    location " << location;
-  return location;
-}
-
-JdwpModKind Request::ReadModKind() {
-  return ReadEnum1<JdwpModKind>("mod kind");
-}
-
-uint8_t Request::Read1() {
-  return *p_++;
-}
-
-uint16_t Request::Read2BE() {
-  uint16_t result = p_[0] << 8 | p_[1];
-  p_ += 2;
-  return result;
-}
-
-uint32_t Request::Read4BE() {
-  uint32_t result = p_[0] << 24;
-  result |= p_[1] << 16;
-  result |= p_[2] << 8;
-  result |= p_[3];
-  p_ += 4;
-  return result;
-}
-
-uint64_t Request::Read8BE() {
-  uint64_t high = Read4BE();
-  uint64_t low = Read4BE();
-  return (high << 32) | low;
-}
-
-}  // namespace JDWP
-
-}  // namespace art
diff --git a/runtime/jdwp/jdwp_socket.cc b/runtime/jdwp/jdwp_socket.cc
deleted file mode 100644
index b8b0e16..0000000
--- a/runtime/jdwp/jdwp_socket.cc
+++ /dev/null
@@ -1,534 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <arpa/inet.h>
-#include <errno.h>
-#include <netdb.h>
-#include <netinet/in.h>
-#include <netinet/tcp.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include "android-base/stringprintf.h"
-
-#include "base/logging.h"  // For VLOG.
-#include "jdwp/jdwp_priv.h"
-
-namespace art {
-
-namespace JDWP {
-
-static constexpr uint16_t kBasePort = 8000;
-static constexpr uint16_t kMaxPort = 8040;
-
-/*
- * JDWP network state.
- *
- * We only talk to one debugger at a time.
- */
-struct JdwpSocketState : public JdwpNetStateBase {
-  uint16_t listenPort;
-  int     listenSock;         /* listen for connection from debugger */
-
-  explicit JdwpSocketState(JdwpState* state)
-      : JdwpNetStateBase(state),
-        listenPort(0U),
-        listenSock(-1),
-        remote_port_(0U) {
-  }
-
-  bool Accept() override;
-  bool Establish(const JdwpOptions*) override;
-  void Shutdown() override;
-  bool ProcessIncoming() override;
-
- private:
-  in_addr remote_addr_;
-  uint16_t remote_port_;
-};
-
-static JdwpSocketState* SocketStartup(JdwpState* state, uint16_t port, bool probe);
-
-/*
- * Set up some stuff for transport=dt_socket.
- */
-bool InitSocketTransport(JdwpState* state, const JdwpOptions* options) {
-  uint16_t port = options->port;
-
-  if (options->server) {
-    if (options->port != 0) {
-      /* try only the specified port */
-      state->netState = SocketStartup(state, port, false);
-    } else {
-      /* scan through a range of ports, binding to the first available */
-      for (port = kBasePort; port <= kMaxPort; port++) {
-        state->netState = SocketStartup(state, port, true);
-        if (state->netState != nullptr) {
-          break;
-        }
-      }
-    }
-    if (state->netState == nullptr) {
-      LOG(ERROR) << "JDWP net startup failed (req port=" << options->port << ")";
-      return false;
-    }
-  } else {
-    state->netState = SocketStartup(state, 0, false);
-  }
-
-  if (options->suspend) {
-    LOG(INFO) << "JDWP will wait for debugger on port " << port;
-  } else {
-    LOG(INFO) << "JDWP will " << (options->server ? "listen" : "connect") << " on port " << port;
-  }
-
-  return true;
-}
-
-/*
- * Initialize JDWP stuff.
- *
- * Allocates a new state structure.  If "port" is non-zero, this also
- * tries to bind to a listen port.  If "port" is zero, we assume
- * we're preparing for an outbound connection, and return without binding
- * to anything.
- *
- * This may be called several times if we're probing for a port.
- *
- * Returns 0 on success.
- */
-static JdwpSocketState* SocketStartup(JdwpState* state, uint16_t port, bool probe) {
-  JdwpSocketState* netState = new JdwpSocketState(state);
-  if (port == 0) {
-    return netState;
-  }
-
-  netState->listenSock = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP);
-  if (netState->listenSock < 0) {
-    PLOG(probe ? ::android::base::ERROR : ::android::base::FATAL) << "Socket create failed";
-    goto fail;
-  }
-
-  /* allow immediate re-use */
-  {
-    int one = 1;
-    if (setsockopt(netState->listenSock, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)) < 0) {
-      PLOG(probe ? ::android::base::ERROR : ::android::base::FATAL)
-          << "setsockopt(SO_REUSEADDR) failed";
-      goto fail;
-    }
-  }
-
-  union {
-    sockaddr_in  addrInet;
-    sockaddr     addrPlain;
-  } addr;
-  addr.addrInet.sin_family = AF_INET;
-  addr.addrInet.sin_port = htons(port);
-  inet_aton("127.0.0.1", &addr.addrInet.sin_addr);
-
-  if (bind(netState->listenSock, &addr.addrPlain, sizeof(addr)) != 0) {
-    PLOG(probe ? ::android::base::ERROR : ::android::base::FATAL)
-        << "Attempt to bind to port " << port << " failed";
-    goto fail;
-  }
-
-  netState->listenPort = port;
-
-  if (listen(netState->listenSock, 5) != 0) {
-    PLOG(probe ? ::android::base::ERROR : ::android::base::FATAL) << "Listen failed";
-    goto fail;
-  }
-
-  return netState;
-
- fail:
-  netState->Shutdown();
-  delete netState;
-  return nullptr;
-}
-
-/*
- * Shut down JDWP listener.  Don't free state.
- *
- * This may be called from a non-JDWP thread as part of shutting the
- * JDWP thread down.
- *
- * (This is currently called several times during startup as we probe
- * for an open port.)
- */
-void JdwpSocketState::Shutdown() {
-  int local_listenSock = this->listenSock;
-  int local_clientSock = this->clientSock;
-
-  /* clear these out so it doesn't wake up and try to reuse them */
-  this->listenSock = this->clientSock = -1;
-
-  /* "shutdown" dislodges blocking read() and accept() calls */
-  if (local_listenSock != -1) {
-    shutdown(local_listenSock, SHUT_RDWR);
-    close(local_listenSock);
-  }
-  if (local_clientSock != -1) {
-    shutdown(local_clientSock, SHUT_RDWR);
-    close(local_clientSock);
-  }
-
-  WakePipe();
-}
-
-/*
- * Disable the TCP Nagle algorithm, which delays transmission of outbound
- * packets until the previous transmissions have been acked.  JDWP does a
- * lot of back-and-forth with small packets, so this may help.
- */
-static int SetNoDelay(int fd) {
-  int on = 1;
-  int cc = setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on));
-  CHECK_EQ(cc, 0);
-  return cc;
-}
-
-/*
- * Accept a connection.  This will block waiting for somebody to show up.
- * If that's not desirable, use checkConnection() to make sure something
- * is pending.
- */
-bool JdwpSocketState::Accept() {
-  union {
-    sockaddr_in  addrInet;
-    sockaddr     addrPlain;
-  } addr;
-  socklen_t addrlen;
-  int sock;
-
-  if (listenSock < 0) {
-    return false;       /* you're not listening! */
-  }
-
-  CHECK_EQ(clientSock, -1);      /* must not already be talking */
-
-  addrlen = sizeof(addr);
-  do {
-    sock = accept(listenSock, &addr.addrPlain, &addrlen);
-    if (sock < 0 && errno != EINTR) {
-      // When we call shutdown() on the socket, accept() returns with
-      // EINVAL.  Don't gripe about it.
-      if (errno == EINVAL) {
-        if (VLOG_IS_ON(jdwp)) {
-          PLOG(ERROR) << "accept failed";
-        }
-      } else {
-        PLOG(ERROR) << "accept failed";
-        return false;
-      }
-    }
-  } while (sock < 0);
-
-  remote_addr_ = addr.addrInet.sin_addr;
-  remote_port_ = ntohs(addr.addrInet.sin_port);
-  VLOG(jdwp) << "+++ accepted connection from " << inet_ntoa(remote_addr_) << ":" << remote_port_;
-
-  clientSock = sock;
-  SetAwaitingHandshake(true);
-  input_count_ = 0;
-
-  VLOG(jdwp) << "Setting TCP_NODELAY on accepted socket";
-  SetNoDelay(clientSock);
-
-  if (!MakePipe()) {
-    return false;
-  }
-
-  return true;
-}
-
-/*
- * Create a connection to a waiting debugger.
- */
-bool JdwpSocketState::Establish(const JdwpOptions* options) {
-  union {
-    sockaddr_in  addrInet;
-    sockaddr     addrPlain;
-  } addr;
-  hostent* pEntry;
-
-  CHECK(!options->server);
-  CHECK(!options->host.empty());
-  CHECK_NE(options->port, 0);
-
-  /*
-   * Start by resolving the host name.
-   */
-#if defined(__linux__)
-  // Initial size of the work buffer used in gethostbyname_r.
-  //
-  // The call to gethostbyname_r below requires a user-allocated buffer,
-  // the size of which depends on the system. The initial implementation
-  // used to use a 128-byte buffer, but that was not enough on some
-  // systems (maybe because of IPv6), causing failures in JDWP host
-  // testing; thus it was increased to 256.
-  //
-  // However, we should not use a fixed size: gethostbyname_r's
-  // documentation states that if the work buffer is too small (i.e. if
-  // gethostbyname_r returns `ERANGE`), then the function should be
-  // called again with a bigger buffer. Which we do now, starting with
-  // an initial 256-byte buffer, and doubling it until gethostbyname_r
-  // accepts this size.
-  static constexpr size_t kInitialAuxBufSize = 256;
-
-  std::vector<char> auxBuf(kInitialAuxBufSize);
-  hostent he;
-  int error;
-  int cc;
-  while ((cc = gethostbyname_r(
-             options->host.c_str(), &he, auxBuf.data(), auxBuf.size(), &pEntry, &error))
-         == ERANGE) {
-    // The work buffer `auxBuf` is too small; enlarge it.
-    auxBuf.resize(auxBuf.size() * 2);
-  }
-  if (cc != 0 || pEntry == nullptr) {
-    LOG(WARNING) << "gethostbyname_r('" << options->host << "') failed: " << hstrerror(error);
-    return false;
-  }
-#else
-  h_errno = 0;
-  pEntry = gethostbyname(options->host.c_str());
-  if (pEntry == nullptr) {
-    PLOG(WARNING) << "gethostbyname('" << options->host << "') failed";
-    return false;
-  }
-#endif
-
-  /* copy it out ASAP to minimize risk of multithreaded annoyances */
-  memcpy(&addr.addrInet.sin_addr, pEntry->h_addr, pEntry->h_length);
-  addr.addrInet.sin_family = pEntry->h_addrtype;
-
-  addr.addrInet.sin_port = htons(options->port);
-
-  LOG(INFO) << "Connecting out to " << inet_ntoa(addr.addrInet.sin_addr) << ":"
-            << ntohs(addr.addrInet.sin_port);
-
-  /*
-   * Create a socket.
-   */
-  clientSock = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP);
-  if (clientSock < 0) {
-    PLOG(ERROR) << "Unable to create socket";
-    return false;
-  }
-
-  /*
-   * Try to connect.
-   */
-  if (connect(clientSock, &addr.addrPlain, sizeof(addr)) != 0) {
-    PLOG(ERROR) << "Unable to connect to " << inet_ntoa(addr.addrInet.sin_addr) << ":"
-                << ntohs(addr.addrInet.sin_port);
-    close(clientSock);
-    clientSock = -1;
-    return false;
-  }
-
-  LOG(INFO) << "Connection established to " << options->host << " ("
-            << inet_ntoa(addr.addrInet.sin_addr) << ":" << ntohs(addr.addrInet.sin_port) << ")";
-  SetAwaitingHandshake(true);
-  input_count_ = 0;
-
-  SetNoDelay(clientSock);
-
-  if (!MakePipe()) {
-    return false;
-  }
-
-  return true;
-}
-
-/*
- * Process incoming data.  If no data is available, this will block until
- * some arrives.
- *
- * If we get a full packet, handle it.
- *
- * To take some of the mystery out of life, we want to reject incoming
- * connections if we already have a debugger attached.  If we don't, the
- * debugger will just mysteriously hang until it times out.  We could just
- * close the listen socket, but there's a good chance we won't be able to
- * bind to the same port again, which would confuse utilities.
- *
- * Returns "false" on error (indicating that the connection has been severed),
- * "true" if things are still okay.
- */
-bool JdwpSocketState::ProcessIncoming() {
-  int readCount;
-
-  CHECK_NE(clientSock, -1);
-
-  if (!HaveFullPacket()) {
-    /* read some more, looping until we have data */
-    errno = 0;
-    while (true) {
-      int selCount;
-      fd_set readfds;
-      int maxfd = -1;
-      int fd;
-
-      FD_ZERO(&readfds);
-
-      /* configure fds; note these may get zapped by another thread */
-      fd = listenSock;
-      if (fd >= 0) {
-        FD_SET(fd, &readfds);
-        if (maxfd < fd) {
-          maxfd = fd;
-        }
-      }
-      fd = clientSock;
-      if (fd >= 0) {
-        FD_SET(fd, &readfds);
-        if (maxfd < fd) {
-          maxfd = fd;
-        }
-      }
-      fd = wake_pipe_[0];
-      if (fd >= 0) {
-        FD_SET(fd, &readfds);
-        if (maxfd < fd) {
-          maxfd = fd;
-        }
-      } else {
-        LOG(INFO) << "NOTE: entering select w/o wakepipe";
-      }
-
-      if (maxfd < 0) {
-        VLOG(jdwp) << "+++ all fds are closed";
-        return false;
-      }
-
-      /*
-       * Select blocks until it sees activity on the file descriptors.
-       * Closing the local file descriptor does not count as activity,
-       * so we can't rely on that to wake us up (it works for read()
-       * and accept(), but not select()).
-       *
-       * We can do one of three things: (1) send a signal and catch
-       * EINTR, (2) open an additional fd ("wake pipe") and write to
-       * it when it's time to exit, or (3) time out periodically and
-       * re-issue the select.  We're currently using #2, as it's more
-       * reliable than #1 and generally better than #3.  Wastes two fds.
-       */
-      selCount = select(maxfd + 1, &readfds, nullptr, nullptr, nullptr);
-      if (selCount < 0) {
-        if (errno == EINTR) {
-          continue;
-        }
-        PLOG(ERROR) << "select failed";
-        goto fail;
-      }
-
-      if (wake_pipe_[0] >= 0 && FD_ISSET(wake_pipe_[0], &readfds)) {
-        if (listenSock >= 0) {
-          LOG(ERROR) << "Exit wake set, but not exiting?";
-        } else {
-          VLOG(jdwp) << "Got wake-up signal, bailing out of select";
-        }
-        goto fail;
-      }
-      if (listenSock >= 0 && FD_ISSET(listenSock, &readfds)) {
-        LOG(INFO) << "Ignoring second debugger -- accepting and dropping";
-        union {
-          sockaddr_in   addrInet;
-          sockaddr      addrPlain;
-        } addr;
-        socklen_t addrlen;
-        int tmpSock;
-        tmpSock = accept(listenSock, &addr.addrPlain, &addrlen);
-        if (tmpSock < 0) {
-          LOG(INFO) << "Weird -- accept failed";
-        } else {
-          close(tmpSock);
-        }
-      }
-      if (clientSock >= 0 && FD_ISSET(clientSock, &readfds)) {
-        readCount =
-            read(clientSock, input_buffer_ + input_count_, sizeof(input_buffer_) - input_count_);
-        if (readCount < 0) {
-          /* read failed */
-          if (errno != EINTR) {
-            goto fail;
-          }
-          VLOG(jdwp) << "+++ EINTR hit";
-          return true;
-        } else if (readCount == 0) {
-          /* EOF hit -- far end went away */
-          VLOG(jdwp) << "+++ peer disconnected";
-          goto fail;
-        } else {
-          break;
-        }
-      }
-    }
-
-    input_count_ += readCount;
-    if (!HaveFullPacket()) {
-      return true;        /* still not there yet */
-    }
-  }
-
-  /*
-   * Special-case the initial handshake.  For some bizarre reason we're
-   * expected to emulate bad tty settings by echoing the request back
-   * exactly as it was sent.  Note the handshake is always initiated by
-   * the debugger, no matter who connects to whom.
-   *
-   * Other than this one case, the protocol [claims to be] stateless.
-   */
-  if (IsAwaitingHandshake()) {
-    if (memcmp(input_buffer_, kMagicHandshake, kMagicHandshakeLen) != 0) {
-      LOG(ERROR) << android::base::StringPrintf("ERROR: bad handshake '%.14s'", input_buffer_);
-      goto fail;
-    }
-
-    errno = 0;
-    int cc = TEMP_FAILURE_RETRY(write(clientSock, input_buffer_, kMagicHandshakeLen));
-    if (cc != kMagicHandshakeLen) {
-      PLOG(ERROR) << "Failed writing handshake bytes ("
-                  << cc << " of " << kMagicHandshakeLen << ")";
-      goto fail;
-    }
-
-    ConsumeBytes(kMagicHandshakeLen);
-    SetAwaitingHandshake(false);
-    VLOG(jdwp) << "+++ handshake complete";
-    return true;
-  }
-
-  /*
-   * Handle this packet.
-   */
-  return state_->HandlePacket();
-
- fail:
-  Close();
-  return false;
-}
-
-}  // namespace JDWP
-
-}  // namespace art
diff --git a/runtime/jdwp/object_registry.cc b/runtime/jdwp/object_registry.cc
deleted file mode 100644
index df1eb2b..0000000
--- a/runtime/jdwp/object_registry.cc
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "object_registry.h"
-
-#include "handle_scope-inl.h"
-#include "jni/jni_internal.h"
-#include "mirror/class.h"
-#include "mirror/throwable.h"
-#include "obj_ptr-inl.h"
-#include "scoped_thread_state_change-inl.h"
-
-namespace art {
-
-std::ostream& operator<<(std::ostream& os, const ObjectRegistryEntry& rhs) {
-  os << "ObjectRegistryEntry[" << rhs.jni_reference_type
-     << ",reference=" << rhs.jni_reference
-     << ",count=" << rhs.reference_count
-     << ",id=" << rhs.id << "]";
-  return os;
-}
-
-ObjectRegistry::ObjectRegistry()
-    : lock_("ObjectRegistry lock", kJdwpObjectRegistryLock), next_id_(1) {
-  Locks::AddToExpectedMutexesOnWeakRefAccess(&lock_);
-}
-
-ObjectRegistry::~ObjectRegistry() {
-  Locks::RemoveFromExpectedMutexesOnWeakRefAccess(&lock_);
-}
-
-JDWP::RefTypeId ObjectRegistry::AddRefType(ObjPtr<mirror::Class> c) {
-  return Add(c);
-}
-
-JDWP::RefTypeId ObjectRegistry::AddRefType(Handle<mirror::Class> c_h) {
-  return Add(c_h);
-}
-
-JDWP::ObjectId ObjectRegistry::Add(ObjPtr<mirror::Object> o) {
-  if (o == nullptr) {
-    return 0;
-  }
-  Thread* const self = Thread::Current();
-  StackHandleScope<1> hs(self);
-  return InternalAdd(hs.NewHandle(o));
-}
-
-// Template instantiations must be declared below.
-template<class T>
-JDWP::ObjectId ObjectRegistry::Add(Handle<T> obj_h) {
-  if (obj_h == nullptr) {
-    return 0;
-  }
-  return InternalAdd(obj_h);
-}
-
-// Explicit template instantiation.
-template
-REQUIRES_SHARED(Locks::mutator_lock_)
-REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
-JDWP::ObjectId ObjectRegistry::Add(Handle<mirror::Object> obj_h);
-
-template
-REQUIRES_SHARED(Locks::mutator_lock_)
-REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
-JDWP::ObjectId ObjectRegistry::Add(Handle<mirror::Throwable> obj_h);
-
-template<class T>
-JDWP::ObjectId ObjectRegistry::InternalAdd(Handle<T> obj_h) {
-  CHECK(obj_h != nullptr);
-
-  Thread* const self = Thread::Current();
-  self->AssertNoPendingException();
-  // Object::IdentityHashCode may cause these locks to be held so check we do not already
-  // hold them.
-  Locks::thread_list_lock_->AssertNotHeld(self);
-  Locks::thread_suspend_count_lock_->AssertNotHeld(self);
-
-  // Call IdentityHashCode here to avoid a lock level violation between lock_ and monitor_lock.
-  int32_t identity_hash_code = obj_h->IdentityHashCode();
-
-  ScopedObjectAccessUnchecked soa(self);
-  MutexLock mu(soa.Self(), lock_);
-  ObjectRegistryEntry* entry = nullptr;
-  if (ContainsLocked(soa.Self(), obj_h.Get(), identity_hash_code, &entry)) {
-    // This object was already in our map.
-    ++entry->reference_count;
-  } else {
-    entry = new ObjectRegistryEntry;
-    entry->jni_reference_type = JNIWeakGlobalRefType;
-    entry->jni_reference = nullptr;
-    entry->reference_count = 0;
-    entry->id = 0;
-    entry->identity_hash_code = identity_hash_code;
-    object_to_entry_.insert(std::make_pair(identity_hash_code, entry));
-
-    // This object isn't in the registry yet, so add it.
-    JNIEnv* env = soa.Env();
-
-    jobject local_reference = soa.AddLocalReference<jobject>(obj_h.Get());
-
-    entry->jni_reference_type = JNIWeakGlobalRefType;
-    entry->jni_reference = env->NewWeakGlobalRef(local_reference);
-    entry->reference_count = 1;
-    entry->id = next_id_++;
-
-    id_to_entry_.Put(entry->id, entry);
-
-    env->DeleteLocalRef(local_reference);
-  }
-  return entry->id;
-}
-
-bool ObjectRegistry::ContainsLocked(Thread* self,
-                                    ObjPtr<mirror::Object> o,
-                                    int32_t identity_hash_code,
-                                    ObjectRegistryEntry** out_entry) {
-  DCHECK(o != nullptr);
-  for (auto it = object_to_entry_.lower_bound(identity_hash_code), end = object_to_entry_.end();
-       it != end && it->first == identity_hash_code; ++it) {
-    ObjectRegistryEntry* entry = it->second;
-    if (o == self->DecodeJObject(entry->jni_reference)) {
-      if (out_entry != nullptr) {
-        *out_entry = entry;
-      }
-      return true;
-    }
-  }
-  return false;
-}
-
-void ObjectRegistry::Clear() {
-  Thread* const self = Thread::Current();
-
-  // We must not hold the mutator lock exclusively if we want to delete weak global
-  // references. Otherwise this can lead to a deadlock with a running GC:
-  // 1. GC thread disables access to weak global references, then releases
-  //    mutator lock.
-  // 2. JDWP thread takes mutator lock exclusively after suspending all
-  //    threads.
-  // 3. GC thread waits for shared mutator lock which is held by JDWP
-  //    thread.
-  // 4. JDWP thread clears weak global references but need to wait for GC
-  //    thread to re-enable access to them.
-  Locks::mutator_lock_->AssertNotExclusiveHeld(self);
-
-  MutexLock mu(self, lock_);
-  VLOG(jdwp) << "Object registry contained " << object_to_entry_.size() << " entries";
-  // Delete all the JNI references.
-  JNIEnv* env = self->GetJniEnv();
-  for (const auto& pair : object_to_entry_) {
-    const ObjectRegistryEntry* entry = pair.second;
-    if (entry->jni_reference_type == JNIWeakGlobalRefType) {
-      env->DeleteWeakGlobalRef(entry->jni_reference);
-    } else {
-      env->DeleteGlobalRef(entry->jni_reference);
-    }
-    delete entry;
-  }
-  // Clear the maps.
-  object_to_entry_.clear();
-  id_to_entry_.clear();
-}
-
-mirror::Object* ObjectRegistry::InternalGet(JDWP::ObjectId id, JDWP::JdwpError* error) {
-  Thread* self = Thread::Current();
-  MutexLock mu(self, lock_);
-  auto it = id_to_entry_.find(id);
-  if (it == id_to_entry_.end()) {
-    *error = JDWP::ERR_INVALID_OBJECT;
-    return nullptr;
-  }
-  ObjectRegistryEntry& entry = *it->second;
-  *error = JDWP::ERR_NONE;
-  return self->DecodeJObject(entry.jni_reference).Ptr();
-}
-
-jobject ObjectRegistry::GetJObject(JDWP::ObjectId id) {
-  if (id == 0) {
-    return nullptr;
-  }
-  Thread* self = Thread::Current();
-  MutexLock mu(self, lock_);
-  auto it = id_to_entry_.find(id);
-  CHECK(it != id_to_entry_.end()) << id;
-  ObjectRegistryEntry& entry = *it->second;
-  return entry.jni_reference;
-}
-
-void ObjectRegistry::DisableCollection(JDWP::ObjectId id) {
-  Thread* self = Thread::Current();
-  MutexLock mu(self, lock_);
-  auto it = id_to_entry_.find(id);
-  CHECK(it != id_to_entry_.end());
-  Promote(*it->second);
-}
-
-void ObjectRegistry::EnableCollection(JDWP::ObjectId id) {
-  Thread* self = Thread::Current();
-  MutexLock mu(self, lock_);
-  auto it = id_to_entry_.find(id);
-  CHECK(it != id_to_entry_.end());
-  Demote(*it->second);
-}
-
-void ObjectRegistry::Demote(ObjectRegistryEntry& entry) {
-  if (entry.jni_reference_type == JNIGlobalRefType) {
-    Thread* self = Thread::Current();
-    JNIEnv* env = self->GetJniEnv();
-    jobject global = entry.jni_reference;
-    entry.jni_reference = env->NewWeakGlobalRef(entry.jni_reference);
-    entry.jni_reference_type = JNIWeakGlobalRefType;
-    env->DeleteGlobalRef(global);
-  }
-}
-
-void ObjectRegistry::Promote(ObjectRegistryEntry& entry) {
-  if (entry.jni_reference_type == JNIWeakGlobalRefType) {
-    Thread* self = Thread::Current();
-    JNIEnv* env = self->GetJniEnv();
-    jobject weak = entry.jni_reference;
-    entry.jni_reference = env->NewGlobalRef(entry.jni_reference);
-    entry.jni_reference_type = JNIGlobalRefType;
-    env->DeleteWeakGlobalRef(weak);
-  }
-}
-
-bool ObjectRegistry::IsCollected(JDWP::ObjectId id) {
-  Thread* self = Thread::Current();
-  MutexLock mu(self, lock_);
-  auto it = id_to_entry_.find(id);
-  CHECK(it != id_to_entry_.end());
-  ObjectRegistryEntry& entry = *it->second;
-  if (entry.jni_reference_type == JNIWeakGlobalRefType) {
-    JNIEnv* env = self->GetJniEnv();
-    return env->IsSameObject(entry.jni_reference, nullptr);  // Has the jweak been collected?
-  } else {
-    return false;  // We hold a strong reference, so we know this is live.
-  }
-}
-
-void ObjectRegistry::DisposeObject(JDWP::ObjectId id, uint32_t reference_count) {
-  Thread* self = Thread::Current();
-  MutexLock mu(self, lock_);
-  auto it = id_to_entry_.find(id);
-  if (it == id_to_entry_.end()) {
-    return;
-  }
-  ObjectRegistryEntry* entry = it->second;
-  entry->reference_count -= reference_count;
-  if (entry->reference_count <= 0) {
-    JNIEnv* env = self->GetJniEnv();
-    // Erase the object from the maps. Note object may be null if it's
-    // a weak ref and the GC has cleared it.
-    int32_t hash_code = entry->identity_hash_code;
-    for (auto inner_it = object_to_entry_.lower_bound(hash_code), end = object_to_entry_.end();
-         inner_it != end && inner_it->first == hash_code; ++inner_it) {
-      if (entry == inner_it->second) {
-        object_to_entry_.erase(inner_it);
-        break;
-      }
-    }
-    if (entry->jni_reference_type == JNIWeakGlobalRefType) {
-      env->DeleteWeakGlobalRef(entry->jni_reference);
-    } else {
-      env->DeleteGlobalRef(entry->jni_reference);
-    }
-    id_to_entry_.erase(id);
-    delete entry;
-  }
-}
-
-}  // namespace art
diff --git a/runtime/jdwp/object_registry.h b/runtime/jdwp/object_registry.h
deleted file mode 100644
index 1728a73..0000000
--- a/runtime/jdwp/object_registry.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_JDWP_OBJECT_REGISTRY_H_
-#define ART_RUNTIME_JDWP_OBJECT_REGISTRY_H_
-
-#include <jni.h>
-#include <stdint.h>
-
-#include <map>
-
-#include "base/casts.h"
-#include "base/safe_map.h"
-#include "handle.h"
-#include "jdwp/jdwp.h"
-#include "obj_ptr.h"
-
-namespace art {
-
-namespace mirror {
-class Object;
-class Class;
-}  // namespace mirror
-
-struct ObjectRegistryEntry {
-  // Is jni_reference a weak global or a regular global reference?
-  jobjectRefType jni_reference_type;
-
-  // The reference itself.
-  jobject jni_reference;
-
-  // A reference count, so we can implement DisposeObject.
-  int32_t reference_count;
-
-  // The corresponding id, so we only need one map lookup in Add.
-  JDWP::ObjectId id;
-
-  // The identity hash code of the object. This is the same as the key
-  // for object_to_entry_. Store this for DisposeObject().
-  int32_t identity_hash_code;
-};
-std::ostream& operator<<(std::ostream& os, const ObjectRegistryEntry& rhs);
-
-// Tracks those objects currently known to the debugger, so we can use consistent ids when
-// referring to them. Normally we keep JNI weak global references to objects, so they can
-// still be garbage collected. The debugger can ask us to retain objects, though, so we can
-// also promote references to regular JNI global references (and demote them back again if
-// the debugger tells us that's okay).
-class ObjectRegistry {
- public:
-  ObjectRegistry();
-  ~ObjectRegistry();
-
-  JDWP::ObjectId Add(ObjPtr<mirror::Object> o)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_);
-
-  JDWP::RefTypeId AddRefType(ObjPtr<mirror::Class> c)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_);
-
-  template<class T>
-  JDWP::ObjectId Add(Handle<T> obj_h)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_);
-
-  JDWP::RefTypeId AddRefType(Handle<mirror::Class> c_h)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_);
-
-  template<typename T> T Get(JDWP::ObjectId id, JDWP::JdwpError* error)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_) {
-    if (id == 0) {
-      *error = JDWP::ERR_NONE;
-      return nullptr;
-    }
-    return down_cast<T>(InternalGet(id, error));
-  }
-
-  void Clear() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_);
-
-  void DisableCollection(JDWP::ObjectId id)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_);
-
-  void EnableCollection(JDWP::ObjectId id)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_);
-
-  bool IsCollected(JDWP::ObjectId id)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_);
-
-  void DisposeObject(JDWP::ObjectId id, uint32_t reference_count)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_);
-
-  // This is needed to get the jobject instead of the Object*.
-  // Avoid using this and use standard Get when possible.
-  jobject GetJObject(JDWP::ObjectId id) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_);
-
- private:
-  template<class T>
-  JDWP::ObjectId InternalAdd(Handle<T> obj_h)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
-
-  mirror::Object* InternalGet(JDWP::ObjectId id, JDWP::JdwpError* error)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_);
-
-  void Demote(ObjectRegistryEntry& entry)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(lock_);
-
-  void Promote(ObjectRegistryEntry& entry)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(lock_);
-
-  bool ContainsLocked(Thread* self,
-                      ObjPtr<mirror::Object> o,
-                      int32_t identity_hash_code,
-                      ObjectRegistryEntry** out_entry)
-      REQUIRES(lock_) REQUIRES_SHARED(Locks::mutator_lock_);
-
-  Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
-  std::multimap<int32_t, ObjectRegistryEntry*> object_to_entry_ GUARDED_BY(lock_);
-  SafeMap<JDWP::ObjectId, ObjectRegistryEntry*> id_to_entry_ GUARDED_BY(lock_);
-
-  size_t next_id_ GUARDED_BY(lock_);
-};
-
-}  // namespace art
-
-#endif  // ART_RUNTIME_JDWP_OBJECT_REGISTRY_H_
diff --git a/runtime/jdwp_provider.h b/runtime/jdwp_provider.h
index 29fbc3f..9cd3145 100644
--- a/runtime/jdwp_provider.h
+++ b/runtime/jdwp_provider.h
@@ -29,7 +29,6 @@
   // should not be used and one should always call CanonicalizeJdwpProvider which will remove this
   // value before using a JdwpProvider value.
   kUnset,
-  kInternal,
   kAdbConnection,
 
   // The current default provider. Used if you run -XjdwpProvider:default
diff --git a/runtime/jit/TEST_MAPPING b/runtime/jit/TEST_MAPPING
new file mode 100644
index 0000000..8f94589
--- /dev/null
+++ b/runtime/jit/TEST_MAPPING
@@ -0,0 +1,8 @@
+{
+  "presubmit": [
+    {
+      "name": "CtsSimpleperfTestCases",
+      "file_patterns": ["debugger_interface.cc"]
+    }
+  ]
+}
diff --git a/runtime/jit/debugger_interface.cc b/runtime/jit/debugger_interface.cc
index a69429f..cecf533 100644
--- a/runtime/jit/debugger_interface.cc
+++ b/runtime/jit/debugger_interface.cc
@@ -19,18 +19,21 @@
 #include <android-base/logging.h>
 
 #include "base/array_ref.h"
+#include "base/bit_utils.h"
 #include "base/logging.h"
 #include "base/mutex.h"
 #include "base/time_utils.h"
 #include "base/utils.h"
 #include "dex/dex_file.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
+#include "jit/jit_memory_region.h"
+#include "runtime.h"
 #include "thread-current-inl.h"
 #include "thread.h"
 
 #include <atomic>
 #include <cstddef>
-#include <deque>
-#include <map>
 
 //
 // Debug interface for native tools (gdb, lldb, libunwind, simpleperf).
@@ -43,7 +46,31 @@
 //    method, which is called after every modification of the linked list.
 //    GDB does this, but it is complex to set up and it stops the process.
 //
-// 2) Asynchronously, by monitoring the action_seqlock_.
+// 2) Asynchronously, using the entry seqlocks.
+//   * The seqlock is a monotonically increasing counter, which
+//     is even if the entry is valid and odd if it is invalid.
+//     It is set to even value after all other fields are set,
+//     and it is set to odd value before the entry is deleted.
+//   * This makes it possible to safely read the symfile data:
+//     * The reader should read the value of the seqlock both
+//       before and after reading the symfile. If the seqlock
+//       values match and are even the copy is consistent.
+//   * Entries are recycled, but never freed, which guarantees
+//     that the seqlock is not overwritten by a random value.
+//   * The linked-list is one level higher.  The next-pointer
+//     must always point to an entry with even seqlock, which
+//     ensures that entries of a crashed process can be read.
+//     This means the entry must be added after it is created
+//     and it must be removed before it is invalidated (odd).
+//   * When iterating over the linked list the reader can use
+//     the timestamps to ensure that current and next entry
+//     were not deleted using the following steps:
+//       1) Read next pointer and the next entry's seqlock.
+//       2) Read the symfile and re-read the next pointer.
+//       3) Re-read both the current and next seqlock.
+//       4) Go to step 1 with using new entry and seqlock.
+//
+// 3) Asynchronously, using the global seqlock.
 //   * The seqlock is a monotonically increasing counter which is incremented
 //     before and after every modification of the linked list. Odd value of
 //     the counter means the linked list is being modified (it is locked).
@@ -56,23 +83,9 @@
 //     * Note that the process might even free and munmap the data while
 //       it is being copied, therefore the reader should either handle
 //       SEGV or use OS calls to read the memory (e.g. process_vm_readv).
-//   * The seqlock can be used to determine the number of modifications of
-//     the linked list, which can be used to intelligently cache the data.
-//     Note the possible overflow of the seqlock.  It is intentionally
-//     32-bit, since 64-bit atomics can be tricky on some architectures.
 //   * The timestamps on the entry record the time when the entry was
 //     created which is relevant if the unwinding is not live and is
 //     postponed until much later.  All timestamps must be unique.
-//   * Memory barriers are used to make it possible to reason about
-//     the data even when it is being modified (e.g. the process crashed
-//     while that data was locked, and thus it will be never unlocked).
-//     * In particular, it should be possible to:
-//       1) read the seqlock and then the linked list head pointer.
-//       2) copy the entry and check that seqlock has not changed.
-//       3) copy the symfile and check that seqlock has not changed.
-//       4) go back to step 2 using the next pointer (if non-null).
-//       This safely creates copy of all symfiles, although other data
-//       might be inconsistent/unusable (e.g. prev_, action_timestamp_).
 //   * For full conformance with the C++ memory model, all seqlock
 //     protected accesses should be atomic. We currently do this in the
 //     more critical cases. The rest will have to be fixed before
@@ -84,6 +97,11 @@
 static Mutex g_jit_debug_lock("JIT native debug entries", kNativeDebugInterfaceLock);
 static Mutex g_dex_debug_lock("DEX native debug entries", kNativeDebugInterfaceLock);
 
+// Most loads and stores need no synchronization since all memory is protected by the global locks.
+// Some writes are synchronized so libunwindstack can read the memory safely from another process.
+constexpr std::memory_order kNonRacingRelaxed = std::memory_order_relaxed;
+
+// Public binary interface between ART and native tools (gdb, libunwind, etc).
 extern "C" {
   enum JITAction {
     JIT_NOACTION = 0,
@@ -91,39 +109,68 @@
     JIT_UNREGISTER_FN
   };
 
-  struct JITCodeEntry {
-    // Atomic to ensure the reader can always iterate over the linked list
-    // (e.g. the process could crash in the middle of writing this field).
-    std::atomic<JITCodeEntry*> next_;
-    // Non-atomic. The reader should not use it. It is only used for deletion.
-    JITCodeEntry* prev_;
-    const uint8_t* symfile_addr_;
-    uint64_t symfile_size_;  // Beware of the offset (12 on x86; but 16 on ARM32).
+  // Public/stable binary interface.
+  struct JITCodeEntryPublic {
+    std::atomic<const JITCodeEntry*> next_;  // Atomic to guarantee consistency after crash.
+    const JITCodeEntry* prev_ = nullptr;     // For linked list deletion. Unused in readers.
+    const uint8_t* symfile_addr_ = nullptr;  // Address of the in-memory ELF file.
+    uint64_t symfile_size_ = 0;              // NB: The offset is 12 on x86 but 16 on ARM32.
 
     // Android-specific fields:
-    uint64_t register_timestamp_;  // CLOCK_MONOTONIC time of entry registration.
+    uint64_t timestamp_;                     // CLOCK_MONOTONIC time of entry registration.
+    std::atomic_uint32_t seqlock_{1};        // Synchronization. Even value if entry is valid.
   };
 
-  struct JITDescriptor {
-    uint32_t version_ = 1;                      // NB: GDB supports only version 1.
-    uint32_t action_flag_ = JIT_NOACTION;       // One of the JITAction enum values.
-    JITCodeEntry* relevant_entry_ = nullptr;    // The entry affected by the action.
-    std::atomic<JITCodeEntry*> head_{nullptr};  // Head of link list of all entries.
+  // Implementation-specific fields (which can be used only in this file).
+  struct JITCodeEntry : public JITCodeEntryPublic {
+    // Unpacked entries: Code address of the symbol in the ELF file.
+    // Packed entries: The start address of the covered memory range.
+    const void* addr_ = nullptr;
+    // Allow merging of ELF files to save space.
+    // Packing drops advanced DWARF data, so it is not always desirable.
+    bool allow_packing_ = false;
+    // Whether this entry has been LZMA compressed.
+    // Compression is expensive, so we don't always do it.
+    bool is_compressed_ = false;
+  };
+
+  // Public/stable binary interface.
+  struct JITDescriptorPublic {
+    uint32_t version_ = 1;                            // NB: GDB supports only version 1.
+    uint32_t action_flag_ = JIT_NOACTION;             // One of the JITAction enum values.
+    const JITCodeEntry* relevant_entry_ = nullptr;    // The entry affected by the action.
+    std::atomic<const JITCodeEntry*> head_{nullptr};  // Head of link list of all entries.
 
     // Android-specific fields:
-    uint8_t magic_[8] = {'A', 'n', 'd', 'r', 'o', 'i', 'd', '1'};
+    uint8_t magic_[8] = {'A', 'n', 'd', 'r', 'o', 'i', 'd', '2'};
     uint32_t flags_ = 0;  // Reserved for future use. Must be 0.
-    uint32_t sizeof_descriptor = sizeof(JITDescriptor);
-    uint32_t sizeof_entry = sizeof(JITCodeEntry);
-    std::atomic_uint32_t action_seqlock_{0};  // Incremented before and after any modification.
-    uint64_t action_timestamp_ = 1;           // CLOCK_MONOTONIC time of last action.
+    uint32_t sizeof_descriptor = sizeof(JITDescriptorPublic);
+    uint32_t sizeof_entry = sizeof(JITCodeEntryPublic);
+    std::atomic_uint32_t seqlock_{0};  // Incremented before and after any modification.
+    uint64_t timestamp_ = 1;           // CLOCK_MONOTONIC time of last action.
   };
 
+  // Implementation-specific fields (which can be used only in this file).
+  struct JITDescriptor : public JITDescriptorPublic {
+    const JITCodeEntry* tail_ = nullptr;          // Tail of link list of all live entries.
+    const JITCodeEntry* free_entries_ = nullptr;  // List of deleted entries ready for reuse.
+
+    // Used for memory sharing with zygote. See NativeDebugInfoPreFork().
+    const JITCodeEntry* zygote_head_entry_ = nullptr;
+    JITCodeEntry application_tail_entry_{};
+  };
+
+  // Public interface: Can be used by reader to check the structs have the expected size.
+  uint32_t g_art_sizeof_jit_code_entry = sizeof(JITCodeEntryPublic);
+  uint32_t g_art_sizeof_jit_descriptor = sizeof(JITDescriptorPublic);
+
   // Check that std::atomic has the expected layout.
   static_assert(alignof(std::atomic_uint32_t) == alignof(uint32_t), "Weird alignment");
   static_assert(sizeof(std::atomic_uint32_t) == sizeof(uint32_t), "Weird size");
+  static_assert(std::atomic_uint32_t::is_always_lock_free, "Expected to be lock free");
   static_assert(alignof(std::atomic<void*>) == alignof(void*), "Weird alignment");
   static_assert(sizeof(std::atomic<void*>) == sizeof(void*), "Weird size");
+  static_assert(std::atomic<void*>::is_always_lock_free, "Expected to be lock free");
 
   // GDB may set breakpoint here. We must ensure it is not removed or deduplicated.
   void __attribute__((noinline)) __jit_debug_register_code() {
@@ -144,269 +191,415 @@
   JITDescriptor __dex_debug_descriptor GUARDED_BY(g_dex_debug_lock) {};
 }
 
+struct DexNativeInfo {
+  static constexpr bool kCopySymfileData = false;  // Just reference DEX files.
+  static JITDescriptor& Descriptor() { return __dex_debug_descriptor; }
+  static void NotifyNativeDebugger() { __dex_debug_register_code_ptr(); }
+  static const void* Alloc(size_t size) { return malloc(size); }
+  static void Free(const void* ptr) { free(const_cast<void*>(ptr)); }
+  template<class T> static T* Writable(const T* v) { return const_cast<T*>(v); }
+};
+
+struct JitNativeInfo {
+  static constexpr bool kCopySymfileData = true;  // Copy debug info to JIT memory.
+  static JITDescriptor& Descriptor() { return __jit_debug_descriptor; }
+  static void NotifyNativeDebugger() { __jit_debug_register_code_ptr(); }
+  static const void* Alloc(size_t size) { return Memory()->AllocateData(size); }
+  static void Free(const void* ptr) { Memory()->FreeData(reinterpret_cast<const uint8_t*>(ptr)); }
+  static void Free(void* ptr) = delete;
+
+  template<class T> static T* Writable(const T* v) {
+    // Special case: This entry is in static memory and not allocated in JIT memory.
+    if (v == reinterpret_cast<const void*>(&Descriptor().application_tail_entry_)) {
+      return const_cast<T*>(v);
+    }
+    return const_cast<T*>(Memory()->GetWritableDataAddress(v));
+  }
+
+  static jit::JitMemoryRegion* Memory() ASSERT_CAPABILITY(Locks::jit_lock_) {
+    Locks::jit_lock_->AssertHeld(Thread::Current());
+    jit::JitCodeCache* jit_code_cache = Runtime::Current()->GetJitCodeCache();
+    CHECK(jit_code_cache != nullptr);
+    jit::JitMemoryRegion* memory = jit_code_cache->GetCurrentRegion();
+    CHECK(memory->IsValid());
+    return memory;
+  }
+};
+
+ArrayRef<const uint8_t> GetJITCodeEntrySymFile(const JITCodeEntry* entry) {
+  return ArrayRef<const uint8_t>(entry->symfile_addr_, entry->symfile_size_);
+}
+
+// Ensure the timestamp is monotonically increasing even in presence of low
+// granularity system timer.  This ensures each entry has unique timestamp.
+static uint64_t GetNextTimestamp(JITDescriptor& descriptor) {
+  return std::max(descriptor.timestamp_ + 1, NanoTime());
+}
+
 // Mark the descriptor as "locked", so native tools know the data is being modified.
-static void ActionSeqlock(JITDescriptor& descriptor) {
-  DCHECK_EQ(descriptor.action_seqlock_.load() & 1, 0u) << "Already locked";
-  descriptor.action_seqlock_.fetch_add(1, std::memory_order_relaxed);
+static void Seqlock(JITDescriptor& descriptor) {
+  DCHECK_EQ(descriptor.seqlock_.load(kNonRacingRelaxed) & 1, 0u) << "Already locked";
+  descriptor.seqlock_.fetch_add(1, std::memory_order_relaxed);
   // Ensure that any writes within the locked section cannot be reordered before the increment.
   std::atomic_thread_fence(std::memory_order_release);
 }
 
 // Mark the descriptor as "unlocked", so native tools know the data is safe to read.
-static void ActionSequnlock(JITDescriptor& descriptor) {
-  DCHECK_EQ(descriptor.action_seqlock_.load() & 1, 1u) << "Already unlocked";
+static void Sequnlock(JITDescriptor& descriptor) {
+  DCHECK_EQ(descriptor.seqlock_.load(kNonRacingRelaxed) & 1, 1u) << "Already unlocked";
   // Ensure that any writes within the locked section cannot be reordered after the increment.
   std::atomic_thread_fence(std::memory_order_release);
-  descriptor.action_seqlock_.fetch_add(1, std::memory_order_relaxed);
+  descriptor.seqlock_.fetch_add(1, std::memory_order_relaxed);
 }
 
-static JITCodeEntry* CreateJITCodeEntryInternal(
-    JITDescriptor& descriptor,
-    void (*register_code_ptr)(),
-    ArrayRef<const uint8_t> symfile,
-    bool copy_symfile) {
+// Insert 'entry' in the linked list before 'next' and mark it as valid (append if 'next' is null).
+// This method must be called under global lock (g_jit_debug_lock or g_dex_debug_lock).
+template<class NativeInfo>
+static void InsertNewEntry(const JITCodeEntry* entry, const JITCodeEntry* next) {
+  CHECK_EQ(entry->seqlock_.load(kNonRacingRelaxed) & 1, 1u) << "Expected invalid entry";
+  JITDescriptor& descriptor = NativeInfo::Descriptor();
+  const JITCodeEntry* prev = (next != nullptr ? next->prev_ : descriptor.tail_);
+  JITCodeEntry* writable = NativeInfo::Writable(entry);
+  writable->next_ = next;
+  writable->prev_ = prev;
+  writable->seqlock_.fetch_add(1, std::memory_order_release);  // Mark as valid.
+  // Backward pointers should not be used by readers, so they are non-atomic.
+  if (next != nullptr) {
+    NativeInfo::Writable(next)->prev_ = entry;
+  } else {
+    descriptor.tail_ = entry;
+  }
+  // Forward pointers must be atomic and they must point to a valid entry at all times.
+  if (prev != nullptr) {
+    NativeInfo::Writable(prev)->next_.store(entry, std::memory_order_release);
+  } else {
+    descriptor.head_.store(entry, std::memory_order_release);
+  }
+}
+
+// This must be called with the appropriate lock taken (g_{jit,dex}_debug_lock).
+template<class NativeInfo>
+static const JITCodeEntry* CreateJITCodeEntryInternal(
+    ArrayRef<const uint8_t> symfile = ArrayRef<const uint8_t>(),
+    const void* addr = nullptr,
+    bool allow_packing = false,
+    bool is_compressed = false) {
+  JITDescriptor& descriptor = NativeInfo::Descriptor();
+
+  // Allocate JITCodeEntry if needed.
+  if (descriptor.free_entries_ == nullptr) {
+    const void* memory = NativeInfo::Alloc(sizeof(JITCodeEntry));
+    if (memory == nullptr) {
+      LOG(ERROR) << "Failed to allocate memory for native debug info";
+      return nullptr;
+    }
+    new (NativeInfo::Writable(memory)) JITCodeEntry();
+    descriptor.free_entries_ = reinterpret_cast<const JITCodeEntry*>(memory);
+  }
+
   // Make a copy of the buffer to shrink it and to pass ownership to JITCodeEntry.
-  if (copy_symfile) {
-    uint8_t* copy = new uint8_t[symfile.size()];
-    CHECK(copy != nullptr);
-    memcpy(copy, symfile.data(), symfile.size());
+  if (NativeInfo::kCopySymfileData && !symfile.empty()) {
+    const uint8_t* copy = reinterpret_cast<const uint8_t*>(NativeInfo::Alloc(symfile.size()));
+    if (copy == nullptr) {
+      LOG(ERROR) << "Failed to allocate memory for native debug info";
+      return nullptr;
+    }
+    memcpy(NativeInfo::Writable(copy), symfile.data(), symfile.size());
     symfile = ArrayRef<const uint8_t>(copy, symfile.size());
   }
 
-  // Ensure the timestamp is monotonically increasing even in presence of low
-  // granularity system timer.  This ensures each entry has unique timestamp.
-  uint64_t timestamp = std::max(descriptor.action_timestamp_ + 1, NanoTime());
+  uint64_t timestamp = GetNextTimestamp(descriptor);
 
-  JITCodeEntry* head = descriptor.head_.load(std::memory_order_relaxed);
-  JITCodeEntry* entry = new JITCodeEntry;
-  CHECK(entry != nullptr);
-  entry->symfile_addr_ = symfile.data();
-  entry->symfile_size_ = symfile.size();
-  entry->prev_ = nullptr;
-  entry->next_.store(head, std::memory_order_relaxed);
-  entry->register_timestamp_ = timestamp;
-
-  // We are going to modify the linked list, so take the seqlock.
-  ActionSeqlock(descriptor);
-  if (head != nullptr) {
-    head->prev_ = entry;
+  // We must insert entries at specific place.  See NativeDebugInfoPreFork().
+  const JITCodeEntry* next = descriptor.head_.load(kNonRacingRelaxed);  // Insert at the head.
+  if (descriptor.zygote_head_entry_ != nullptr && Runtime::Current()->IsZygote()) {
+    next = nullptr;  // Insert zygote entries at the tail.
   }
-  descriptor.head_.store(entry, std::memory_order_relaxed);
+
+  // Pop entry from the free list.
+  const JITCodeEntry* entry = descriptor.free_entries_;
+  descriptor.free_entries_ = descriptor.free_entries_->next_.load(kNonRacingRelaxed);
+
+  // Create the entry and set all its fields.
+  JITCodeEntry* writable_entry = NativeInfo::Writable(entry);
+  writable_entry->symfile_addr_ = symfile.data();
+  writable_entry->symfile_size_ = symfile.size();
+  writable_entry->addr_ = addr;
+  writable_entry->allow_packing_ = allow_packing;
+  writable_entry->is_compressed_ = is_compressed;
+  writable_entry->timestamp_ = timestamp;
+
+  // Add the entry to the main linked list.
+  Seqlock(descriptor);
+  InsertNewEntry<NativeInfo>(entry, next);
   descriptor.relevant_entry_ = entry;
   descriptor.action_flag_ = JIT_REGISTER_FN;
-  descriptor.action_timestamp_ = timestamp;
-  ActionSequnlock(descriptor);
+  descriptor.timestamp_ = timestamp;
+  Sequnlock(descriptor);
 
-  (*register_code_ptr)();
+  NativeInfo::NotifyNativeDebugger();
+
   return entry;
 }
 
-static void DeleteJITCodeEntryInternal(
-    JITDescriptor& descriptor,
-    void (*register_code_ptr)(),
-    JITCodeEntry* entry,
-    bool free_symfile) {
+template<class NativeInfo>
+static void DeleteJITCodeEntryInternal(const JITCodeEntry* entry) {
   CHECK(entry != nullptr);
-  const uint8_t* symfile = entry->symfile_addr_;
+  JITDescriptor& descriptor = NativeInfo::Descriptor();
 
-  // Ensure the timestamp is monotonically increasing even in presence of low
-  // granularity system timer.  This ensures each entry has unique timestamp.
-  uint64_t timestamp = std::max(descriptor.action_timestamp_ + 1, NanoTime());
-
-  // We are going to modify the linked list, so take the seqlock.
-  ActionSeqlock(descriptor);
-  JITCodeEntry* next = entry->next_.load(std::memory_order_relaxed);
-  if (entry->prev_ != nullptr) {
-    entry->prev_->next_.store(next, std::memory_order_relaxed);
+  // Remove the entry from the main linked-list.
+  Seqlock(descriptor);
+  const JITCodeEntry* next = entry->next_.load(kNonRacingRelaxed);
+  const JITCodeEntry* prev = entry->prev_;
+  if (next != nullptr) {
+    NativeInfo::Writable(next)->prev_ = prev;
+  } else {
+    descriptor.tail_ = prev;
+  }
+  if (prev != nullptr) {
+    NativeInfo::Writable(prev)->next_.store(next, std::memory_order_relaxed);
   } else {
     descriptor.head_.store(next, std::memory_order_relaxed);
   }
-  if (next != nullptr) {
-    next->prev_ = entry->prev_;
-  }
   descriptor.relevant_entry_ = entry;
   descriptor.action_flag_ = JIT_UNREGISTER_FN;
-  descriptor.action_timestamp_ = timestamp;
-  ActionSequnlock(descriptor);
+  descriptor.timestamp_ = GetNextTimestamp(descriptor);
+  Sequnlock(descriptor);
 
-  (*register_code_ptr)();
+  NativeInfo::NotifyNativeDebugger();
 
-  // Ensure that clear below can not be reordered above the unlock above.
+  // Delete the entry.
+  JITCodeEntry* writable_entry = NativeInfo::Writable(entry);
+  CHECK_EQ(writable_entry->seqlock_.load(kNonRacingRelaxed) & 1, 0u) << "Expected valid entry";
+  // Release: Ensures that "next_" points to valid entry at any time in reader.
+  writable_entry->seqlock_.fetch_add(1, std::memory_order_release);  // Mark as invalid.
+  // Release: Ensures that the entry is seen as invalid before it's data is freed.
   std::atomic_thread_fence(std::memory_order_release);
-
-  // Aggressively clear the entry as an extra check of the synchronisation.
-  memset(entry, 0, sizeof(*entry));
-
-  delete entry;
-  if (free_symfile) {
-    delete[] symfile;
+  const uint8_t* symfile = entry->symfile_addr_;
+  writable_entry->symfile_addr_ = nullptr;
+  if (NativeInfo::kCopySymfileData && symfile != nullptr) {
+    NativeInfo::Free(symfile);
   }
-}
 
-static std::map<const DexFile*, JITCodeEntry*> g_dex_debug_entries GUARDED_BY(g_dex_debug_lock);
+  // Push the entry to the free list.
+  writable_entry->next_.store(descriptor.free_entries_, kNonRacingRelaxed);
+  writable_entry->prev_ = nullptr;
+  descriptor.free_entries_ = entry;
+}
 
 void AddNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) {
   MutexLock mu(self, g_dex_debug_lock);
   DCHECK(dexfile != nullptr);
-  // This is just defensive check. The class linker should not register the dex file twice.
-  if (g_dex_debug_entries.count(dexfile) == 0) {
-    const ArrayRef<const uint8_t> symfile(dexfile->Begin(), dexfile->Size());
-    JITCodeEntry* entry = CreateJITCodeEntryInternal(__dex_debug_descriptor,
-                                                     __dex_debug_register_code_ptr,
-                                                     symfile,
-                                                     /*copy_symfile=*/ false);
-    g_dex_debug_entries.emplace(dexfile, entry);
-  }
+  const ArrayRef<const uint8_t> symfile(dexfile->Begin(), dexfile->Size());
+  CreateJITCodeEntryInternal<DexNativeInfo>(symfile);
 }
 
 void RemoveNativeDebugInfoForDex(Thread* self, const DexFile* dexfile) {
   MutexLock mu(self, g_dex_debug_lock);
-  auto it = g_dex_debug_entries.find(dexfile);
+  DCHECK(dexfile != nullptr);
   // We register dex files in the class linker and free them in DexFile_closeDexFile, but
   // there might be cases where we load the dex file without using it in the class linker.
-  if (it != g_dex_debug_entries.end()) {
-    DeleteJITCodeEntryInternal(__dex_debug_descriptor,
-                               __dex_debug_register_code_ptr,
-                               /*entry=*/ it->second,
-                               /*free_symfile=*/ false);
-    g_dex_debug_entries.erase(it);
+  // On the other hand, single dex file might also be used with different class-loaders.
+  for (const JITCodeEntry* entry = __dex_debug_descriptor.head_; entry != nullptr; ) {
+    const JITCodeEntry* next = entry->next_;  // Save next pointer before we free the memory.
+    if (entry->symfile_addr_ == dexfile->Begin()) {
+      DeleteJITCodeEntryInternal<DexNativeInfo>(entry);
+    }
+    entry = next;
   }
 }
 
-// Mapping from handle to entry. Used to manage life-time of the entries.
-static std::multimap<const void*, JITCodeEntry*> g_jit_debug_entries GUARDED_BY(g_jit_debug_lock);
-
-// Number of entries added since last packing.  Used to pack entries in bulk.
-static size_t g_jit_num_unpacked_entries GUARDED_BY(g_jit_debug_lock) = 0;
-
-// We postpone removal so that it is done in bulk.
-static std::deque<const void*> g_jit_removed_entries GUARDED_BY(g_jit_debug_lock);
-
-// Split the JIT code cache into groups of fixed size and create singe JITCodeEntry for each group.
-// The start address of method's code determines which group it belongs to.  The end is irrelevant.
-// As a consequnce, newly added mini debug infos will be merged and old ones (GCed) will be pruned.
-static void MaybePackJitMiniDebugInfo(PackElfFileForJITFunction pack,
-                                      InstructionSet isa,
-                                      const InstructionSetFeatures* features)
-    REQUIRES(g_jit_debug_lock) {
-  // Size of memory range covered by each JITCodeEntry.
-  // The number of methods per entry is variable (depending on how many fit in that range).
-  constexpr uint32_t kGroupSize = 64 * KB;
-  // Even if there are no removed entries, we want to pack new entries on regular basis.
-  constexpr uint32_t kPackFrequency = 64;
-
-  std::deque<const void*>& removed_entries = g_jit_removed_entries;
-  std::sort(removed_entries.begin(), removed_entries.end());
-  if (removed_entries.empty() && g_jit_num_unpacked_entries < kPackFrequency) {
-    return;  // Nothing to do.
+// Splits the linked linked in to two parts:
+// The first part (including the static head pointer) is owned by the application.
+// The second part is owned by zygote and might be concurrently modified by it.
+//
+// We add two empty entries at the boundary which are never removed (app_tail, zygote_head).
+// These entries are needed to preserve the next/prev pointers in the linked list,
+// since zygote can not modify the application's data and vice versa.
+//
+// <------- owned by the application memory --------> <--- owned by zygote memory --->
+//         |----------------------|------------------|-------------|-----------------|
+// head -> | application_entries* | application_tail | zygote_head | zygote_entries* |
+//         |+---------------------|------------------|-------------|----------------+|
+//          |                                                                       |
+//          \-(new application entries)                        (new zygote entries)-/
+//
+// Zygote entries are inserted at the end, which means that repacked zygote entries
+// will still be seen by single forward iteration of the linked list (avoiding race).
+//
+// Application entries are inserted at the start which introduces repacking race,
+// but that is ok, since it is easy to read new entries from head in further pass.
+// The benefit is that this makes it fast to read only the new entries.
+//
+void NativeDebugInfoPreFork() {
+  CHECK(Runtime::Current()->IsZygote());
+  JITDescriptor& descriptor = JitNativeInfo::Descriptor();
+  if (descriptor.zygote_head_entry_ != nullptr) {
+    return;  // Already done - we need to do this only on the first fork.
   }
 
-  std::vector<ArrayRef<const uint8_t>> added_elf_files;
-  std::vector<const void*> removed_symbols;
-  auto added_it = g_jit_debug_entries.begin();
-  auto removed_it = removed_entries.begin();
-  while (added_it != g_jit_debug_entries.end()) {
-    // Collect all entries that have been added or removed within our memory range.
-    const void* group_ptr = AlignDown(added_it->first, kGroupSize);
-    added_elf_files.clear();
-    auto added_begin = added_it;
-    while (added_it != g_jit_debug_entries.end() &&
-           AlignDown(added_it->first, kGroupSize) == group_ptr) {
-      JITCodeEntry* entry = (added_it++)->second;
-      added_elf_files.emplace_back(entry->symfile_addr_, entry->symfile_size_);
+  // Create the zygote-owned head entry (with no ELF file).
+  // The data will be allocated from the current JIT memory (owned by zygote).
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);  // Needed to alloc entry.
+  const JITCodeEntry* zygote_head =
+    reinterpret_cast<const JITCodeEntry*>(JitNativeInfo::Alloc(sizeof(JITCodeEntry)));
+  CHECK(zygote_head != nullptr);
+  new (JitNativeInfo::Writable(zygote_head)) JITCodeEntry();  // Initialize.
+  InsertNewEntry<JitNativeInfo>(zygote_head, descriptor.head_);
+  descriptor.zygote_head_entry_ = zygote_head;
+
+  // Create the child-owned tail entry (with no ELF file).
+  // The data is statically allocated since it must be owned by the forked process.
+  InsertNewEntry<JitNativeInfo>(&descriptor.application_tail_entry_, descriptor.head_);
+}
+
+void NativeDebugInfoPostFork() {
+  CHECK(!Runtime::Current()->IsZygote());
+  JITDescriptor& descriptor = JitNativeInfo::Descriptor();
+  descriptor.free_entries_ = nullptr;  // Don't reuse zygote's entries.
+}
+
+// Size of JIT code range covered by each packed JITCodeEntry.
+static constexpr uint32_t kJitRepackGroupSize = 64 * KB;
+
+// Automatically call the repack method every 'n' new entries.
+static constexpr uint32_t kJitRepackFrequency = 64;
+static uint32_t g_jit_num_unpacked_entries = 0;
+
+// Split the JIT code cache into groups of fixed size and create single JITCodeEntry for each group.
+// The start address of method's code determines which group it belongs to.  The end is irrelevant.
+// New mini debug infos will be merged if possible, and entries for GCed functions will be removed.
+static void RepackEntries(bool compress_entries, ArrayRef<const void*> removed)
+    REQUIRES(g_jit_debug_lock) {
+  DCHECK(std::is_sorted(removed.begin(), removed.end()));
+  jit::Jit* jit = Runtime::Current()->GetJit();
+  if (jit == nullptr) {
+    return;
+  }
+  JITDescriptor& descriptor = __jit_debug_descriptor;
+  bool is_zygote = Runtime::Current()->IsZygote();
+
+  // Collect entries that we want to pack.
+  std::vector<const JITCodeEntry*> entries;
+  entries.reserve(2 * kJitRepackFrequency);
+  for (const JITCodeEntry* it = descriptor.head_; it != nullptr; it = it->next_) {
+    if (it == descriptor.zygote_head_entry_ && !is_zygote) {
+      break;  // Memory owned by the zygote process (read-only for an app).
     }
-    removed_symbols.clear();
-    while (removed_it != removed_entries.end() &&
-           AlignDown(*removed_it, kGroupSize) == group_ptr) {
-      removed_symbols.push_back(*(removed_it++));
+    if (it->allow_packing_) {
+      if (!compress_entries && it->is_compressed_ && removed.empty()) {
+        continue;  // If we are not compressing, also avoid decompressing.
+      }
+      entries.push_back(it);
+    }
+  }
+  auto cmp = [](const JITCodeEntry* l, const JITCodeEntry* r) { return l->addr_ < r->addr_; };
+  std::sort(entries.begin(), entries.end(), cmp);  // Sort by address.
+
+  // Process the entries in groups (each spanning memory range of size kJitRepackGroupSize).
+  for (auto group_it = entries.begin(); group_it != entries.end();) {
+    const void* group_ptr = AlignDown((*group_it)->addr_, kJitRepackGroupSize);
+    const void* group_end = reinterpret_cast<const uint8_t*>(group_ptr) + kJitRepackGroupSize;
+
+    // Find all entries in this group (each entry is an in-memory ELF file).
+    auto begin = group_it;
+    auto end = std::find_if(begin, entries.end(), [=](auto* e) { return e->addr_ >= group_end; });
+    CHECK(end > begin);
+    ArrayRef<const JITCodeEntry*> elfs(&*begin, end - begin);
+
+    // Find all symbols that have been removed in this memory range.
+    auto removed_begin = std::lower_bound(removed.begin(), removed.end(), group_ptr);
+    auto removed_end = std::lower_bound(removed.begin(), removed.end(), group_end);
+    CHECK(removed_end >= removed_begin);
+    ArrayRef<const void*> removed_subset(&*removed_begin, removed_end - removed_begin);
+
+    // Optimization: Don't compress the last group since it will likely change again soon.
+    bool compress = compress_entries && end != entries.end();
+
+    // Bail out early if there is nothing to do for this group.
+    if (elfs.size() == 1 && removed_subset.empty() && (*begin)->is_compressed_ == compress) {
+      group_it = end;  // Go to next group.
+      continue;
     }
 
-    // Create new singe JITCodeEntry that covers this memory range.
-    if (added_elf_files.size() == 1 && removed_symbols.size() == 0) {
-      continue;  // Nothing changed in this memory range.
-    }
-    uint64_t start_time = MilliTime();
-    size_t symbols;
-    std::vector<uint8_t> packed = pack(isa, features, added_elf_files, removed_symbols, &symbols);
+    // Create new single JITCodeEntry that covers this memory range.
+    uint64_t start_time = MicroTime();
+    size_t live_symbols;
+    std::vector<uint8_t> packed = jit->GetJitCompiler()->PackElfFileForJIT(
+        elfs, removed_subset, compress, &live_symbols);
     VLOG(jit)
-        << "JIT mini-debug-info packed"
+        << "JIT mini-debug-info repacked"
         << " for " << group_ptr
-        << " in " << MilliTime() - start_time << "ms"
-        << " files=" << added_elf_files.size()
-        << " removed=" << removed_symbols.size()
-        << " symbols=" << symbols
-        << " size=" << PrettySize(packed.size());
+        << " in " << MicroTime() - start_time << "us"
+        << " elfs=" << elfs.size()
+        << " dead=" << removed_subset.size()
+        << " live=" << live_symbols
+        << " size=" << packed.size() << (compress ? "(lzma)" : "");
 
     // Replace the old entries with the new one (with their lifetime temporally overlapping).
-    JITCodeEntry* packed_entry = CreateJITCodeEntryInternal(
-        __jit_debug_descriptor,
-        __jit_debug_register_code_ptr,
-        ArrayRef<const uint8_t>(packed),
-        /*copy_symfile=*/ true);
-    for (auto it = added_begin; it != added_it; ++it) {
-      DeleteJITCodeEntryInternal(__jit_debug_descriptor,
-                                 __jit_debug_register_code_ptr,
-                                 /*entry=*/ it->second,
-                                 /*free_symfile=*/ true);
+    CreateJITCodeEntryInternal<JitNativeInfo>(ArrayRef<const uint8_t>(packed),
+                                              /*addr_=*/ group_ptr,
+                                              /*allow_packing_=*/ true,
+                                              /*is_compressed_=*/ compress);
+    for (auto it : elfs) {
+      DeleteJITCodeEntryInternal<JitNativeInfo>(/*entry=*/ it);
     }
-    g_jit_debug_entries.erase(added_begin, added_it);
-    g_jit_debug_entries.emplace(group_ptr, packed_entry);
+    group_it = end;  // Go to next group.
   }
-  CHECK(added_it == g_jit_debug_entries.end());
-  CHECK(removed_it == removed_entries.end());
-  removed_entries.clear();
   g_jit_num_unpacked_entries = 0;
 }
 
-void AddNativeDebugInfoForJit(Thread* self,
-                              const void* code_ptr,
+void AddNativeDebugInfoForJit(const void* code_ptr,
                               const std::vector<uint8_t>& symfile,
-                              PackElfFileForJITFunction pack,
-                              InstructionSet isa,
-                              const InstructionSetFeatures* features) {
-  MutexLock mu(self, g_jit_debug_lock);
+                              bool allow_packing) {
+  MutexLock mu(Thread::Current(), g_jit_debug_lock);
   DCHECK_NE(symfile.size(), 0u);
 
-  MaybePackJitMiniDebugInfo(pack, isa, features);
-
-  JITCodeEntry* entry = CreateJITCodeEntryInternal(
-      __jit_debug_descriptor,
-      __jit_debug_register_code_ptr,
-      ArrayRef<const uint8_t>(symfile),
-      /*copy_symfile=*/ true);
+  CreateJITCodeEntryInternal<JitNativeInfo>(ArrayRef<const uint8_t>(symfile),
+                                            /*addr=*/ code_ptr,
+                                            /*allow_packing=*/ allow_packing,
+                                            /*is_compressed=*/ false);
 
   VLOG(jit)
       << "JIT mini-debug-info added"
       << " for " << code_ptr
       << " size=" << PrettySize(symfile.size());
 
-  // We don't provide code_ptr for type debug info, which means we cannot free it later.
-  // (this only happens when --generate-debug-info flag is enabled for the purpose
-  // of being debugged with gdb; it does not happen for debuggable apps by default).
-  if (code_ptr != nullptr) {
-    g_jit_debug_entries.emplace(code_ptr, entry);
-    // Count how many entries we have added since the last mini-debug-info packing.
-    // We avoid g_jit_debug_entries.size() here because it can shrink during packing.
-    g_jit_num_unpacked_entries++;
+  // Automatically repack entries on regular basis to save space.
+  // Pack (but don't compress) recent entries - this is cheap and reduces memory use by ~4x.
+  // We delay compression until after GC since it is more expensive (and saves further ~4x).
+  // Always compress zygote, since it does not GC and we want to keep the high-water mark low.
+  if (++g_jit_num_unpacked_entries >= kJitRepackFrequency) {
+    bool is_zygote = Runtime::Current()->IsZygote();
+    RepackEntries(/*compress_entries=*/ is_zygote, /*removed=*/ ArrayRef<const void*>());
   }
 }
 
-void RemoveNativeDebugInfoForJit(Thread* self, const void* code_ptr) {
-  MutexLock mu(self, g_jit_debug_lock);
-  // We generate JIT native debug info only if the right runtime flags are enabled,
-  // but we try to remove it unconditionally whenever code is freed from JIT cache.
-  if (!g_jit_debug_entries.empty()) {
-    g_jit_removed_entries.push_back(code_ptr);
+void RemoveNativeDebugInfoForJit(ArrayRef<const void*> removed) {
+  MutexLock mu(Thread::Current(), g_jit_debug_lock);
+  RepackEntries(/*compress_entries=*/ true, removed);
+
+  // Remove entries which are not allowed to be packed (containing single method each).
+  for (const JITCodeEntry* it = __jit_debug_descriptor.head_; it != nullptr;) {
+    const JITCodeEntry* next = it->next_;
+    if (!it->allow_packing_ && std::binary_search(removed.begin(), removed.end(), it->addr_)) {
+      DeleteJITCodeEntryInternal<JitNativeInfo>(/*entry=*/ it);
+    }
+    it = next;
   }
 }
 
 size_t GetJitMiniDebugInfoMemUsage() {
   MutexLock mu(Thread::Current(), g_jit_debug_lock);
   size_t size = 0;
-  for (auto entry : g_jit_debug_entries) {
-    size += sizeof(JITCodeEntry) + entry.second->symfile_size_ + /*map entry*/ 4 * sizeof(void*);
+  for (const JITCodeEntry* it = __jit_debug_descriptor.head_; it != nullptr; it = it->next_) {
+    size += sizeof(JITCodeEntry) + it->symfile_size_;
   }
   return size;
 }
 
+Mutex* GetNativeDebugInfoLock() {
+  return &g_jit_debug_lock;
+}
+
 }  // namespace art
diff --git a/runtime/jit/debugger_interface.h b/runtime/jit/debugger_interface.h
index 51b7041..477d58c 100644
--- a/runtime/jit/debugger_interface.h
+++ b/runtime/jit/debugger_interface.h
@@ -17,6 +17,7 @@
 #ifndef ART_RUNTIME_JIT_DEBUGGER_INTERFACE_H_
 #define ART_RUNTIME_JIT_DEBUGGER_INTERFACE_H_
 
+#include <functional>
 #include <inttypes.h>
 #include <vector>
 
@@ -27,16 +28,18 @@
 namespace art {
 
 class DexFile;
+class Mutex;
 class Thread;
+struct JITCodeEntry;
 
-// This method is declared in the compiler library.
-// We need to pass it by pointer to be able to call it from runtime.
-typedef std::vector<uint8_t> PackElfFileForJITFunction(
-    InstructionSet isa,
-    const InstructionSetFeatures* features,
-    std::vector<ArrayRef<const uint8_t>>& added_elf_files,
-    std::vector<const void*>& removed_symbols,
-    /*out*/ size_t* num_symbols);
+// Must be called before zygote forks.
+// Used to ensure that zygote's mini-debug-info can be shared with apps.
+void NativeDebugInfoPreFork();
+
+// Must be called after zygote forks.
+void NativeDebugInfoPostFork();
+
+ArrayRef<const uint8_t> GetJITCodeEntrySymFile(const JITCodeEntry*);
 
 // Notify native tools (e.g. libunwind) that DEX file has been opened.
 void AddNativeDebugInfoForDex(Thread* self, const DexFile* dexfile);
@@ -44,20 +47,26 @@
 // Notify native tools (e.g. libunwind) that DEX file has been closed.
 void RemoveNativeDebugInfoForDex(Thread* self, const DexFile* dexfile);
 
-// Notify native tools (e.g. libunwind) that JIT has compiled a new method.
+// Notify native tools (e.g. libunwind) that JIT has compiled a single new method.
 // The method will make copy of the passed ELF file (to shrink it to the minimum size).
-void AddNativeDebugInfoForJit(Thread* self,
-                              const void* code_ptr,
+// If packing is allowed, the ELF file might be merged with others to save space
+// (however, this drops all ELF sections other than symbols names and unwinding info).
+void AddNativeDebugInfoForJit(const void* code_ptr,
                               const std::vector<uint8_t>& symfile,
-                              PackElfFileForJITFunction pack,
-                              InstructionSet isa,
-                              const InstructionSetFeatures* features);
+                              bool allow_packing)
+    REQUIRES_SHARED(Locks::jit_lock_);  // Might need JIT code cache to allocate memory.
 
 // Notify native tools (e.g. libunwind) that JIT code has been garbage collected.
-void RemoveNativeDebugInfoForJit(Thread* self, const void* code_ptr);
+void RemoveNativeDebugInfoForJit(ArrayRef<const void*> removed_code_ptrs)
+    REQUIRES_SHARED(Locks::jit_lock_);  // Might need JIT code cache to allocate memory.
 
 // Returns approximate memory used by debug info for JIT code.
-size_t GetJitMiniDebugInfoMemUsage();
+size_t GetJitMiniDebugInfoMemUsage() REQUIRES_SHARED(Locks::jit_lock_);
+
+// Get the lock which protects the native debug info.
+// Used only in tests to unwind while the JIT thread is running.
+// TODO: Unwinding should be race-free. Remove this.
+Mutex* GetNativeDebugInfoLock();
 
 }  // namespace art
 
diff --git a/runtime/jit/jit-inl.h b/runtime/jit/jit-inl.h
index 80324ad..e6b4095 100644
--- a/runtime/jit/jit-inl.h
+++ b/runtime/jit/jit-inl.h
@@ -46,14 +46,12 @@
   // NB: The method needs to see the transitions of the counter past the thresholds.
   uint32_t old_batch = RoundDown(old_count, kJitSamplesBatchSize);  // Clear lower bits.
   uint32_t new_batch = RoundDown(new_count, kJitSamplesBatchSize);  // Clear lower bits.
-  if (UNLIKELY(old_batch == 0)) {
-    // For low sample counts, we check every time (which is important for tests).
+  if (UNLIKELY(kSlowMode)) {  // Check every time in slow-debug mode.
     if (!MaybeCompileMethod(self, method, old_count, new_count, with_backedges)) {
       // Tests may check that the counter is 0 for methods that we never compile.
       return;  // Ignore the samples for now and retry later.
     }
   } else if (UNLIKELY(old_batch != new_batch)) {
-    // For high sample counts, we check only when we move past the batch boundary.
     if (!MaybeCompileMethod(self, method, old_batch, new_batch, with_backedges)) {
       // OSR compilation will ignore the samples if they don't have backedges.
       return;  // Ignore the samples for now and retry later.
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index b828aaf..8d434b8 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -22,6 +22,7 @@
 #include "base/enums.h"
 #include "base/file_utils.h"
 #include "base/logging.h"  // For VLOG.
+#include "base/memfd.h"
 #include "base/memory_tool.h"
 #include "base/runtime_debug.h"
 #include "base/scoped_flock.h"
@@ -29,7 +30,10 @@
 #include "class_root.h"
 #include "debugger.h"
 #include "dex/type_lookup_table.h"
+#include "gc/space/image_space.h"
+#include "entrypoints/entrypoint_utils-inl.h"
 #include "entrypoints/runtime_asm_entrypoints.h"
+#include "image-inl.h"
 #include "interpreter/interpreter.h"
 #include "jit-inl.h"
 #include "jit_code_cache.h"
@@ -39,6 +43,7 @@
 #include "oat_file.h"
 #include "oat_file_manager.h"
 #include "oat_quick_method_header.h"
+#include "profile/profile_boot_info.h"
 #include "profile/profile_compilation_info.h"
 #include "profile_saver.h"
 #include "runtime.h"
@@ -48,42 +53,44 @@
 #include "thread-inl.h"
 #include "thread_list.h"
 
+using android::base::unique_fd;
+
 namespace art {
 namespace jit {
 
 static constexpr bool kEnableOnStackReplacement = true;
 
+// Maximum permitted threshold value.
+static constexpr uint32_t kJitMaxThreshold = std::numeric_limits<uint16_t>::max();
+
 // Different compilation threshold constants. These can be overridden on the command line.
-static constexpr size_t kJitDefaultCompileThreshold           = 10000;  // Non-debug default.
-static constexpr size_t kJitStressDefaultCompileThreshold     = 100;    // Fast-debug build.
-static constexpr size_t kJitSlowStressDefaultCompileThreshold = 2;      // Slow-debug build.
+
+// Non-debug default
+static constexpr uint32_t kJitDefaultCompileThreshold = 20 * kJitSamplesBatchSize;
+// Fast-debug build.
+static constexpr uint32_t kJitStressDefaultCompileThreshold = 2 * kJitSamplesBatchSize;
+// Slow-debug build.
+static constexpr uint32_t kJitSlowStressDefaultCompileThreshold = 2;
+
+// Different warm-up threshold constants. These default to the equivalent compile thresholds divided
+// by 2, but can be overridden at the command-line.
+static constexpr uint32_t kJitDefaultWarmUpThreshold = kJitDefaultCompileThreshold / 2;
+static constexpr uint32_t kJitStressDefaultWarmUpThreshold = kJitStressDefaultCompileThreshold / 2;
+static constexpr uint32_t kJitSlowStressDefaultWarmUpThreshold =
+    kJitSlowStressDefaultCompileThreshold / 2;
+
+DEFINE_RUNTIME_DEBUG_FLAG(Jit, kSlowMode);
 
 // JIT compiler
 void* Jit::jit_library_handle_ = nullptr;
-void* Jit::jit_compiler_handle_ = nullptr;
-void* (*Jit::jit_load_)(void) = nullptr;
-void (*Jit::jit_unload_)(void*) = nullptr;
-bool (*Jit::jit_compile_method_)(void*, ArtMethod*, Thread*, bool, bool) = nullptr;
-void (*Jit::jit_types_loaded_)(void*, mirror::Class**, size_t count) = nullptr;
-bool (*Jit::jit_generate_debug_info_)(void*) = nullptr;
-void (*Jit::jit_update_options_)(void*) = nullptr;
-
-struct StressModeHelper {
-  DECLARE_RUNTIME_DEBUG_FLAG(kSlowMode);
-};
-DEFINE_RUNTIME_DEBUG_FLAG(StressModeHelper, kSlowMode);
-
-uint32_t JitOptions::RoundUpThreshold(uint32_t threshold) {
-  if (threshold > kJitSamplesBatchSize) {
-    threshold = RoundUp(threshold, kJitSamplesBatchSize);
-  }
-  CHECK_LE(threshold, std::numeric_limits<uint16_t>::max());
-  return threshold;
-}
+JitCompilerInterface* Jit::jit_compiler_ = nullptr;
+JitCompilerInterface* (*Jit::jit_load_)(void) = nullptr;
 
 JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& options) {
   auto* jit_options = new JitOptions;
   jit_options->use_jit_compilation_ = options.GetOrDefault(RuntimeArgumentMap::UseJitCompilation);
+  jit_options->use_tiered_jit_compilation_ =
+      options.GetOrDefault(RuntimeArgumentMap::UseTieredJitCompilation);
 
   jit_options->code_cache_initial_capacity_ =
       options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheInitialCapacity);
@@ -96,35 +103,66 @@
   jit_options->thread_pool_pthread_priority_ =
       options.GetOrDefault(RuntimeArgumentMap::JITPoolThreadPthreadPriority);
 
+  // Set default compile threshold to aide with sanity checking defaults.
+  jit_options->compile_threshold_ =
+      kIsDebugBuild
+      ? (Jit::kSlowMode
+         ? kJitSlowStressDefaultCompileThreshold
+         : kJitStressDefaultCompileThreshold)
+      : kJitDefaultCompileThreshold;
+
+  // When not running in slow-mode, thresholds are quantized to kJitSamplesbatchsize.
+  const uint32_t kJitThresholdStep = Jit::kSlowMode ? 1u : kJitSamplesBatchSize;
+
+  // Set default warm-up threshold to aide with sanity checking defaults.
+  jit_options->warmup_threshold_ =
+      kIsDebugBuild ? (Jit::kSlowMode
+                       ? kJitSlowStressDefaultWarmUpThreshold
+                       : kJitStressDefaultWarmUpThreshold)
+      : kJitDefaultWarmUpThreshold;
+
+  // Warmup threshold should be less than compile threshold (so long as compile threshold is not
+  // zero == JIT-on-first-use).
+  DCHECK_LT(jit_options->warmup_threshold_, jit_options->compile_threshold_);
+  DCHECK_EQ(RoundUp(jit_options->warmup_threshold_, kJitThresholdStep),
+            jit_options->warmup_threshold_);
+
   if (options.Exists(RuntimeArgumentMap::JITCompileThreshold)) {
     jit_options->compile_threshold_ = *options.Get(RuntimeArgumentMap::JITCompileThreshold);
-  } else {
-    jit_options->compile_threshold_ =
-        kIsDebugBuild
-            ? (StressModeHelper::kSlowMode
-                   ? kJitSlowStressDefaultCompileThreshold
-                   : kJitStressDefaultCompileThreshold)
-            : kJitDefaultCompileThreshold;
   }
-  jit_options->compile_threshold_ = RoundUpThreshold(jit_options->compile_threshold_);
+  jit_options->compile_threshold_ = RoundUp(jit_options->compile_threshold_, kJitThresholdStep);
 
   if (options.Exists(RuntimeArgumentMap::JITWarmupThreshold)) {
     jit_options->warmup_threshold_ = *options.Get(RuntimeArgumentMap::JITWarmupThreshold);
-  } else {
-    jit_options->warmup_threshold_ = jit_options->compile_threshold_ / 2;
   }
-  jit_options->warmup_threshold_ = RoundUpThreshold(jit_options->warmup_threshold_);
+  jit_options->warmup_threshold_ = RoundUp(jit_options->warmup_threshold_, kJitThresholdStep);
 
   if (options.Exists(RuntimeArgumentMap::JITOsrThreshold)) {
     jit_options->osr_threshold_ = *options.Get(RuntimeArgumentMap::JITOsrThreshold);
   } else {
     jit_options->osr_threshold_ = jit_options->compile_threshold_ * 2;
-    if (jit_options->osr_threshold_ > std::numeric_limits<uint16_t>::max()) {
+    if (jit_options->osr_threshold_ > kJitMaxThreshold) {
       jit_options->osr_threshold_ =
-          RoundDown(std::numeric_limits<uint16_t>::max(), kJitSamplesBatchSize);
+          RoundDown(kJitMaxThreshold, kJitThresholdStep);
     }
   }
-  jit_options->osr_threshold_ = RoundUpThreshold(jit_options->osr_threshold_);
+  jit_options->osr_threshold_ = RoundUp(jit_options->osr_threshold_, kJitThresholdStep);
+
+  // Enforce ordering constraints between thresholds if not jit-on-first-use (when the compile
+  // threshold is 0).
+  if (jit_options->compile_threshold_ != 0) {
+    // Clamp thresholds such that OSR > compile > warm-up (see Jit::MaybeCompileMethod).
+    jit_options->osr_threshold_ = std::clamp(jit_options->osr_threshold_,
+                                             2u * kJitThresholdStep,
+                                             RoundDown(kJitMaxThreshold, kJitThresholdStep));
+    jit_options->compile_threshold_ = std::clamp(jit_options->compile_threshold_,
+                                                 kJitThresholdStep,
+                                                 jit_options->osr_threshold_ - kJitThresholdStep);
+    jit_options->warmup_threshold_ =
+        std::clamp(jit_options->warmup_threshold_,
+                   0u,
+                   jit_options->compile_threshold_ - kJitThresholdStep);
+  }
 
   if (options.Exists(RuntimeArgumentMap::JITPriorityThreadWeight)) {
     jit_options->priority_thread_weight_ =
@@ -176,17 +214,21 @@
 Jit::Jit(JitCodeCache* code_cache, JitOptions* options)
     : code_cache_(code_cache),
       options_(options),
+      boot_completed_lock_("Jit::boot_completed_lock_"),
       cumulative_timings_("JIT timings"),
       memory_use_("Memory used for compilation", 16),
-      lock_("JIT memory use lock") {}
+      lock_("JIT memory use lock"),
+      zygote_mapping_methods_(),
+      fd_methods_(-1),
+      fd_methods_size_(0) {}
 
 Jit* Jit::Create(JitCodeCache* code_cache, JitOptions* options) {
   if (jit_load_ == nullptr) {
     LOG(WARNING) << "Not creating JIT: library not loaded";
     return nullptr;
   }
-  jit_compiler_handle_ = (jit_load_)();
-  if (jit_compiler_handle_ == nullptr) {
+  jit_compiler_ = (jit_load_)();
+  if (jit_compiler_ == nullptr) {
     LOG(WARNING) << "Not creating JIT: failed to allocate a compiler";
     return nullptr;
   }
@@ -197,7 +239,7 @@
   // We aren't able to keep method pointers live during the instrumentation method entry trampoline
   // so we will just disable jit-gc if we are doing that.
   if (code_cache->GetGarbageCollectCode()) {
-    code_cache->SetGarbageCollectCode(!jit_generate_debug_info_(jit_compiler_handle_) &&
+    code_cache->SetGarbageCollectCode(!jit_compiler_->GenerateDebugInfo() &&
         !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled());
   }
 
@@ -207,6 +249,15 @@
       << ", compile_threshold=" << options->GetCompileThreshold()
       << ", profile_saver_options=" << options->GetProfileSaverOptions();
 
+  // We want to know whether the compiler is compiling baseline, as this
+  // affects how we GC ProfilingInfos.
+  for (const std::string& option : Runtime::Current()->GetCompilerOptions()) {
+    if (option == "--baseline") {
+      options->SetUseBaselineCompiler();
+      break;
+    }
+  }
+
   // Notify native debugger about the classes already loaded before the creation of the jit.
   jit->DumpTypeInfoForLoadedTypes(Runtime::Current()->GetClassLinker());
   return jit.release();
@@ -231,22 +282,14 @@
     *error_msg = oss.str();
     return false;
   }
-  bool all_resolved = true;
-  all_resolved = all_resolved && LoadSymbol(&jit_load_, "jit_load", error_msg);
-  all_resolved = all_resolved && LoadSymbol(&jit_unload_, "jit_unload", error_msg);
-  all_resolved = all_resolved && LoadSymbol(&jit_compile_method_, "jit_compile_method", error_msg);
-  all_resolved = all_resolved && LoadSymbol(&jit_types_loaded_, "jit_types_loaded", error_msg);
-  all_resolved = all_resolved && LoadSymbol(&jit_update_options_, "jit_update_options", error_msg);
-  all_resolved = all_resolved &&
-      LoadSymbol(&jit_generate_debug_info_, "jit_generate_debug_info", error_msg);
-  if (!all_resolved) {
+  if (!LoadSymbol(&jit_load_, "jit_load", error_msg)) {
     dlclose(jit_library_handle_);
     return false;
   }
   return true;
 }
 
-bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool baseline, bool osr) {
+bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool baseline, bool osr, bool prejit) {
   DCHECK(Runtime::Current()->UseJitCompilation());
   DCHECK(!method->IsRuntimeMethod());
 
@@ -259,6 +302,15 @@
     return false;
   }
 
+  if (!method->IsCompilable()) {
+    DCHECK(method->GetDeclaringClass()->IsObsoleteObject() ||
+           method->IsProxyMethod()) << method->PrettyMethod();
+    VLOG(jit) << "JIT not compiling " << method->PrettyMethod() << " due to method being made "
+              << "obsolete while waiting for JIT task to run. This probably happened due to "
+              << "concurrent structural class redefinition.";
+    return false;
+  }
+
   // Don't compile the method if we are supposed to be deoptimized.
   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
   if (instrumentation->AreAllMethodsDeoptimized() || instrumentation->IsDeoptimized(method)) {
@@ -266,17 +318,26 @@
     return false;
   }
 
+  JitMemoryRegion* region = GetCodeCache()->GetCurrentRegion();
+  if (osr && GetCodeCache()->IsSharedRegion(*region)) {
+    VLOG(jit) << "JIT not osr compiling "
+              << method->PrettyMethod()
+              << " due to using shared region";
+    return false;
+  }
+
   // If we get a request to compile a proxy method, we pass the actual Java method
   // of that proxy method, as the compiler does not expect a proxy method.
   ArtMethod* method_to_compile = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
-  if (!code_cache_->NotifyCompilationOf(method_to_compile, self, osr)) {
+  if (!code_cache_->NotifyCompilationOf(method_to_compile, self, osr, prejit, baseline, region)) {
     return false;
   }
 
   VLOG(jit) << "Compiling method "
             << ArtMethod::PrettyMethod(method_to_compile)
-            << " osr=" << std::boolalpha << osr;
-  bool success = jit_compile_method_(jit_compiler_handle_, method_to_compile, self, baseline, osr);
+            << " osr=" << std::boolalpha << osr
+            << " baseline=" << std::boolalpha << baseline;
+  bool success = jit_compiler_->CompileMethod(self, region, method_to_compile, baseline, osr);
   code_cache_->DoneCompiling(method_to_compile, self, osr);
   if (!success) {
     VLOG(jit) << "Failed to compile method "
@@ -303,7 +364,6 @@
 
 void Jit::DeleteThreadPool() {
   Thread* self = Thread::Current();
-  DCHECK(Runtime::Current()->IsShuttingDown(self));
   if (thread_pool_ != nullptr) {
     std::unique_ptr<ThreadPool> pool;
     {
@@ -353,9 +413,9 @@
     Runtime::Current()->DumpDeoptimizations(LOG_STREAM(INFO));
   }
   DeleteThreadPool();
-  if (jit_compiler_handle_ != nullptr) {
-    jit_unload_(jit_compiler_handle_);
-    jit_compiler_handle_ = nullptr;
+  if (jit_compiler_ != nullptr) {
+    delete jit_compiler_;
+    jit_compiler_ = nullptr;
   }
   if (jit_library_handle_ != nullptr) {
     dlclose(jit_library_handle_);
@@ -369,9 +429,8 @@
     return;
   }
   jit::Jit* jit = Runtime::Current()->GetJit();
-  if (jit_generate_debug_info_(jit->jit_compiler_handle_)) {
-    DCHECK(jit->jit_types_loaded_ != nullptr);
-    jit->jit_types_loaded_(jit->jit_compiler_handle_, &type, 1);
+  if (jit->jit_compiler_->GenerateDebugInfo()) {
+    jit_compiler_->TypesLoaded(&type, 1);
   }
 }
 
@@ -384,12 +443,12 @@
     std::vector<mirror::Class*> classes_;
   };
 
-  if (jit_generate_debug_info_(jit_compiler_handle_)) {
+  if (jit_compiler_->GenerateDebugInfo()) {
     ScopedObjectAccess so(Thread::Current());
 
     CollectClasses visitor;
     linker->VisitClasses(&visitor);
-    jit_types_loaded_(jit_compiler_handle_, visitor.classes_.data(), visitor.classes_.size());
+    jit_compiler_->TypesLoaded(visitor.classes_.data(), visitor.classes_.size());
   }
 }
 
@@ -400,15 +459,105 @@
                                    const char* shorty,
                                    Thread* self);
 
+OsrData* Jit::PrepareForOsr(ArtMethod* method, uint32_t dex_pc, uint32_t* vregs) {
+  if (!kEnableOnStackReplacement) {
+    return nullptr;
+  }
+
+  // Cheap check if the method has been compiled already. That's an indicator that we should
+  // osr into it.
+  if (!GetCodeCache()->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
+    return nullptr;
+  }
+
+  // Fetch some data before looking up for an OSR method. We don't want thread
+  // suspension once we hold an OSR method, as the JIT code cache could delete the OSR
+  // method while we are being suspended.
+  CodeItemDataAccessor accessor(method->DexInstructionData());
+  const size_t number_of_vregs = accessor.RegistersSize();
+  std::string method_name(VLOG_IS_ON(jit) ? method->PrettyMethod() : "");
+  OsrData* osr_data = nullptr;
+
+  {
+    ScopedAssertNoThreadSuspension sts("Holding OSR method");
+    const OatQuickMethodHeader* osr_method = GetCodeCache()->LookupOsrMethodHeader(method);
+    if (osr_method == nullptr) {
+      // No osr method yet, just return to the interpreter.
+      return nullptr;
+    }
+
+    CodeInfo code_info(osr_method);
+
+    // Find stack map starting at the target dex_pc.
+    StackMap stack_map = code_info.GetOsrStackMapForDexPc(dex_pc);
+    if (!stack_map.IsValid()) {
+      // There is no OSR stack map for this dex pc offset. Just return to the interpreter in the
+      // hope that the next branch has one.
+      return nullptr;
+    }
+
+    // We found a stack map, now fill the frame with dex register values from the interpreter's
+    // shadow frame.
+    DexRegisterMap vreg_map = code_info.GetDexRegisterMapOf(stack_map);
+    DCHECK_EQ(vreg_map.size(), number_of_vregs);
+
+    size_t frame_size = osr_method->GetFrameSizeInBytes();
+
+    // Allocate memory to put shadow frame values. The osr stub will copy that memory to
+    // stack.
+    // Note that we could pass the shadow frame to the stub, and let it copy the values there,
+    // but that is engineering complexity not worth the effort for something like OSR.
+    osr_data = reinterpret_cast<OsrData*>(malloc(sizeof(OsrData) + frame_size));
+    if (osr_data == nullptr) {
+      return nullptr;
+    }
+    memset(osr_data, 0, sizeof(OsrData) + frame_size);
+    osr_data->frame_size = frame_size;
+
+    // Art ABI: ArtMethod is at the bottom of the stack.
+    osr_data->memory[0] = method;
+
+    if (vreg_map.empty()) {
+      // If we don't have a dex register map, then there are no live dex registers at
+      // this dex pc.
+    } else {
+      for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
+        DexRegisterLocation::Kind location = vreg_map[vreg].GetKind();
+        if (location == DexRegisterLocation::Kind::kNone) {
+          // Dex register is dead or uninitialized.
+          continue;
+        }
+
+        if (location == DexRegisterLocation::Kind::kConstant) {
+          // We skip constants because the compiled code knows how to handle them.
+          continue;
+        }
+
+        DCHECK_EQ(location, DexRegisterLocation::Kind::kInStack);
+
+        int32_t vreg_value = vregs[vreg];
+        int32_t slot_offset = vreg_map[vreg].GetStackOffsetInBytes();
+        DCHECK_LT(slot_offset, static_cast<int32_t>(frame_size));
+        DCHECK_GT(slot_offset, 0);
+        (reinterpret_cast<int32_t*>(osr_data->memory))[slot_offset / sizeof(int32_t)] = vreg_value;
+      }
+    }
+
+    osr_data->native_pc = stack_map.GetNativePcOffset(kRuntimeISA) +
+        osr_method->GetEntryPoint();
+    VLOG(jit) << "Jumping to "
+              << method_name
+              << "@"
+              << std::hex << reinterpret_cast<uintptr_t>(osr_data->native_pc);
+  }
+  return osr_data;
+}
+
 bool Jit::MaybeDoOnStackReplacement(Thread* thread,
                                     ArtMethod* method,
                                     uint32_t dex_pc,
                                     int32_t dex_pc_offset,
                                     JValue* result) {
-  if (!kEnableOnStackReplacement) {
-    return false;
-  }
-
   Jit* jit = Runtime::Current()->GetJit();
   if (jit == nullptr) {
     return false;
@@ -426,110 +575,31 @@
   // and the JIT code cache do not expect methods from proxy classes.
   method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
 
-  // Cheap check if the method has been compiled already. That's an indicator that we should
-  // osr into it.
-  if (!jit->GetCodeCache()->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
+  // Before allowing the jump, make sure no code is actively inspecting the method to avoid
+  // jumping from interpreter to OSR while e.g. single stepping. Note that we could selectively
+  // disable OSR when single stepping, but that's currently hard to know at this point.
+  if (Runtime::Current()->GetRuntimeCallbacks()->IsMethodBeingInspected(method)) {
     return false;
   }
 
-  // Fetch some data before looking up for an OSR method. We don't want thread
-  // suspension once we hold an OSR method, as the JIT code cache could delete the OSR
-  // method while we are being suspended.
-  CodeItemDataAccessor accessor(method->DexInstructionData());
-  const size_t number_of_vregs = accessor.RegistersSize();
-  const char* shorty = method->GetShorty();
-  std::string method_name(VLOG_IS_ON(jit) ? method->PrettyMethod() : "");
-  void** memory = nullptr;
-  size_t frame_size = 0;
-  ShadowFrame* shadow_frame = nullptr;
-  const uint8_t* native_pc = nullptr;
+  ShadowFrame* shadow_frame = thread->GetManagedStack()->GetTopShadowFrame();
+  OsrData* osr_data = jit->PrepareForOsr(method,
+                                         dex_pc + dex_pc_offset,
+                                         shadow_frame->GetVRegArgs(0));
 
-  {
-    ScopedAssertNoThreadSuspension sts("Holding OSR method");
-    const OatQuickMethodHeader* osr_method = jit->GetCodeCache()->LookupOsrMethodHeader(method);
-    if (osr_method == nullptr) {
-      // No osr method yet, just return to the interpreter.
-      return false;
-    }
-
-    CodeInfo code_info(osr_method);
-
-    // Find stack map starting at the target dex_pc.
-    StackMap stack_map = code_info.GetOsrStackMapForDexPc(dex_pc + dex_pc_offset);
-    if (!stack_map.IsValid()) {
-      // There is no OSR stack map for this dex pc offset. Just return to the interpreter in the
-      // hope that the next branch has one.
-      return false;
-    }
-
-    // Before allowing the jump, make sure no code is actively inspecting the method to avoid
-    // jumping from interpreter to OSR while e.g. single stepping. Note that we could selectively
-    // disable OSR when single stepping, but that's currently hard to know at this point.
-    if (Runtime::Current()->GetRuntimeCallbacks()->IsMethodBeingInspected(method)) {
-      return false;
-    }
-
-    // We found a stack map, now fill the frame with dex register values from the interpreter's
-    // shadow frame.
-    DexRegisterMap vreg_map = code_info.GetDexRegisterMapOf(stack_map);
-
-    frame_size = osr_method->GetFrameSizeInBytes();
-
-    // Allocate memory to put shadow frame values. The osr stub will copy that memory to
-    // stack.
-    // Note that we could pass the shadow frame to the stub, and let it copy the values there,
-    // but that is engineering complexity not worth the effort for something like OSR.
-    memory = reinterpret_cast<void**>(malloc(frame_size));
-    CHECK(memory != nullptr);
-    memset(memory, 0, frame_size);
-
-    // Art ABI: ArtMethod is at the bottom of the stack.
-    memory[0] = method;
-
-    shadow_frame = thread->PopShadowFrame();
-    if (vreg_map.empty()) {
-      // If we don't have a dex register map, then there are no live dex registers at
-      // this dex pc.
-    } else {
-      DCHECK_EQ(vreg_map.size(), number_of_vregs);
-      for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
-        DexRegisterLocation::Kind location = vreg_map[vreg].GetKind();
-        if (location == DexRegisterLocation::Kind::kNone) {
-          // Dex register is dead or uninitialized.
-          continue;
-        }
-
-        if (location == DexRegisterLocation::Kind::kConstant) {
-          // We skip constants because the compiled code knows how to handle them.
-          continue;
-        }
-
-        DCHECK_EQ(location, DexRegisterLocation::Kind::kInStack);
-
-        int32_t vreg_value = shadow_frame->GetVReg(vreg);
-        int32_t slot_offset = vreg_map[vreg].GetStackOffsetInBytes();
-        DCHECK_LT(slot_offset, static_cast<int32_t>(frame_size));
-        DCHECK_GT(slot_offset, 0);
-        (reinterpret_cast<int32_t*>(memory))[slot_offset / sizeof(int32_t)] = vreg_value;
-      }
-    }
-
-    native_pc = stack_map.GetNativePcOffset(kRuntimeISA) +
-        osr_method->GetEntryPoint();
-    VLOG(jit) << "Jumping to "
-              << method_name
-              << "@"
-              << std::hex << reinterpret_cast<uintptr_t>(native_pc);
+  if (osr_data == nullptr) {
+    return false;
   }
 
   {
+    thread->PopShadowFrame();
     ManagedStack fragment;
     thread->PushManagedStackFragment(&fragment);
-    (*art_quick_osr_stub)(memory,
-                          frame_size,
-                          native_pc,
+    (*art_quick_osr_stub)(osr_data->memory,
+                          osr_data->frame_size,
+                          osr_data->native_pc,
                           result,
-                          shorty,
+                          method->GetShorty(),
                           thread);
 
     if (UNLIKELY(thread->GetException() == Thread::GetDeoptimizationException())) {
@@ -537,9 +607,9 @@
     }
     thread->PopManagedStackFragment(fragment);
   }
-  free(memory);
+  free(osr_data);
   thread->PushShadowFrame(shadow_frame);
-  VLOG(jit) << "Done running OSR code for " << method_name;
+  VLOG(jit) << "Done running OSR code for " << method->PrettyMethod();
   return true;
 }
 
@@ -554,6 +624,135 @@
   memory_use_.AddValue(bytes);
 }
 
+void Jit::NotifyZygoteCompilationDone() {
+  if (fd_methods_ == -1) {
+    return;
+  }
+
+  size_t offset = 0;
+  for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
+    const ImageHeader& header = space->GetImageHeader();
+    const ImageSection& section = header.GetMethodsSection();
+    // Because mremap works at page boundaries, we can only handle methods
+    // within a page range. For methods that falls above or below the range,
+    // the child processes will copy their contents to their private mapping
+    // in `child_mapping_methods`. See `MapBootImageMethods`.
+    uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), kPageSize);
+    uint8_t* page_end =
+        AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), kPageSize);
+    if (page_end > page_start) {
+      uint64_t capacity = page_end - page_start;
+      memcpy(zygote_mapping_methods_.Begin() + offset, page_start, capacity);
+      offset += capacity;
+    }
+  }
+
+  // Do an msync to ensure we are not affected by writes still being in caches.
+  if (msync(zygote_mapping_methods_.Begin(), fd_methods_size_, MS_SYNC) != 0) {
+    PLOG(WARNING) << "Failed to sync boot image methods memory";
+    code_cache_->GetZygoteMap()->SetCompilationState(ZygoteCompilationState::kNotifiedFailure);
+    return;
+  }
+
+  // We don't need the shared mapping anymore, and we need to drop it in case
+  // the file hasn't been sealed writable.
+  zygote_mapping_methods_ = MemMap::Invalid();
+
+  // Seal writes now. Zygote and children will map the memory private in order
+  // to write to it.
+  if (fcntl(fd_methods_, F_ADD_SEALS, F_SEAL_SEAL | F_SEAL_WRITE) == -1) {
+    PLOG(WARNING) << "Failed to seal boot image methods file descriptor";
+    code_cache_->GetZygoteMap()->SetCompilationState(ZygoteCompilationState::kNotifiedFailure);
+    return;
+  }
+
+  std::string error_str;
+  MemMap child_mapping_methods = MemMap::MapFile(
+      fd_methods_size_,
+      PROT_READ | PROT_WRITE,
+      MAP_PRIVATE,
+      fd_methods_,
+      /* start= */ 0,
+      /* low_4gb= */ false,
+      "boot-image-methods",
+      &error_str);
+
+  if (!child_mapping_methods.IsValid()) {
+    LOG(WARNING) << "Failed to create child mapping of boot image methods: " << error_str;
+    code_cache_->GetZygoteMap()->SetCompilationState(ZygoteCompilationState::kNotifiedFailure);
+    return;
+  }
+
+  // Ensure the contents are the same as before: there was a window between
+  // the memcpy and the sealing where other processes could have changed the
+  // contents.
+  // Note this would not be needed if we could have used F_SEAL_FUTURE_WRITE,
+  // see b/143833776.
+  offset = 0;
+  for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
+    const ImageHeader& header = space->GetImageHeader();
+    const ImageSection& section = header.GetMethodsSection();
+    // Because mremap works at page boundaries, we can only handle methods
+    // within a page range. For methods that falls above or below the range,
+    // the child processes will copy their contents to their private mapping
+    // in `child_mapping_methods`. See `MapBootImageMethods`.
+    uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), kPageSize);
+    uint8_t* page_end =
+        AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), kPageSize);
+    if (page_end > page_start) {
+      uint64_t capacity = page_end - page_start;
+      if (memcmp(child_mapping_methods.Begin() + offset, page_start, capacity) != 0) {
+        LOG(WARNING) << "Contents differ in boot image methods data";
+        code_cache_->GetZygoteMap()->SetCompilationState(
+            ZygoteCompilationState::kNotifiedFailure);
+        return;
+      }
+      offset += capacity;
+    }
+  }
+
+  // Future spawned processes don't need the fd anymore.
+  fd_methods_.reset();
+
+  // In order to have the zygote and children share the memory, we also remap
+  // the memory into the zygote process.
+  offset = 0;
+  for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
+    const ImageHeader& header = space->GetImageHeader();
+    const ImageSection& section = header.GetMethodsSection();
+    // Because mremap works at page boundaries, we can only handle methods
+    // within a page range. For methods that falls above or below the range,
+    // the child processes will copy their contents to their private mapping
+    // in `child_mapping_methods`. See `MapBootImageMethods`.
+    uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), kPageSize);
+    uint8_t* page_end =
+        AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), kPageSize);
+    if (page_end > page_start) {
+      uint64_t capacity = page_end - page_start;
+      if (mremap(child_mapping_methods.Begin() + offset,
+                 capacity,
+                 capacity,
+                 MREMAP_FIXED | MREMAP_MAYMOVE,
+                 page_start) == MAP_FAILED) {
+        // Failing to remap is safe as the process will just use the old
+        // contents.
+        PLOG(WARNING) << "Failed mremap of boot image methods of " << space->GetImageFilename();
+      }
+      offset += capacity;
+    }
+  }
+
+  LOG(INFO) << "Successfully notified child processes on sharing boot image methods";
+
+  // Mark that compilation of boot classpath is done, and memory can now be
+  // shared. Other processes will pick up this information.
+  code_cache_->GetZygoteMap()->SetCompilationState(ZygoteCompilationState::kNotifiedOk);
+
+  // The private mapping created for this process has been mremaped. We can
+  // reset it.
+  child_mapping_methods.Reset();
+}
+
 class JitCompileTask final : public Task {
  public:
   enum class TaskKind {
@@ -561,13 +760,17 @@
     kCompile,
     kCompileBaseline,
     kCompileOsr,
+    kPreCompile,
   };
 
   JitCompileTask(ArtMethod* method, TaskKind kind) : method_(method), kind_(kind), klass_(nullptr) {
     ScopedObjectAccess soa(Thread::Current());
     // For a non-bootclasspath class, add a global ref to the class to prevent class unloading
     // until compilation is done.
-    if (method->GetDeclaringClass()->GetClassLoader() != nullptr) {
+    // When we precompile, this is either with boot classpath methods, or main
+    // class loader methods, so we don't need to keep a global reference.
+    if (method->GetDeclaringClass()->GetClassLoader() != nullptr &&
+        kind_ != TaskKind::kPreCompile) {
       klass_ = soa.Vm()->AddGlobalRef(soa.Self(), method_->GetDeclaringClass());
       CHECK(klass_ != nullptr);
     }
@@ -581,23 +784,27 @@
   }
 
   void Run(Thread* self) override {
-    ScopedObjectAccess soa(self);
-    switch (kind_) {
-      case TaskKind::kCompile:
-      case TaskKind::kCompileBaseline:
-      case TaskKind::kCompileOsr: {
-        Runtime::Current()->GetJit()->CompileMethod(
-            method_,
-            self,
-            /* baseline= */ (kind_ == TaskKind::kCompileBaseline),
-            /* osr= */ (kind_ == TaskKind::kCompileOsr));
-        break;
-      }
-      case TaskKind::kAllocateProfile: {
-        if (ProfilingInfo::Create(self, method_, /* retry_allocation= */ true)) {
-          VLOG(jit) << "Start profiling " << ArtMethod::PrettyMethod(method_);
+    {
+      ScopedObjectAccess soa(self);
+      switch (kind_) {
+        case TaskKind::kPreCompile:
+        case TaskKind::kCompile:
+        case TaskKind::kCompileBaseline:
+        case TaskKind::kCompileOsr: {
+          Runtime::Current()->GetJit()->CompileMethod(
+              method_,
+              self,
+              /* baseline= */ (kind_ == TaskKind::kCompileBaseline),
+              /* osr= */ (kind_ == TaskKind::kCompileOsr),
+              /* prejit= */ (kind_ == TaskKind::kPreCompile));
+          break;
         }
-        break;
+        case TaskKind::kAllocateProfile: {
+          if (ProfilingInfo::Create(self, method_, /* retry_allocation= */ true)) {
+            VLOG(jit) << "Start profiling " << ArtMethod::PrettyMethod(method_);
+          }
+          break;
+        }
       }
     }
     ProfileSaver::NotifyJitActivity();
@@ -615,27 +822,145 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask);
 };
 
+static std::string GetProfileFile(const std::string& dex_location) {
+  // Hardcoded assumption where the profile file is.
+  // TODO(ngeoffray): this is brittle and we would need to change change if we
+  // wanted to do more eager JITting of methods in a profile. This is
+  // currently only for system server.
+  return dex_location + ".prof";
+}
+
+static std::string GetBootProfileFile(const std::string& profile) {
+  // The boot profile can be found next to the compilation profile, with a
+  // different extension.
+  return ReplaceFileExtension(profile, "bprof");
+}
+
+/**
+ * A JIT task to run after all profile compilation is done.
+ */
+class JitDoneCompilingProfileTask final : public SelfDeletingTask {
+ public:
+  explicit JitDoneCompilingProfileTask(const std::vector<const DexFile*>& dex_files)
+      : dex_files_(dex_files) {}
+
+  void Run(Thread* self ATTRIBUTE_UNUSED) override {
+    // Madvise DONTNEED dex files now that we're done compiling methods.
+    for (const DexFile* dex_file : dex_files_) {
+      if (IsAddressKnownBackedByFileOrShared(dex_file->Begin())) {
+        int result = madvise(const_cast<uint8_t*>(AlignDown(dex_file->Begin(), kPageSize)),
+                             RoundUp(dex_file->Size(), kPageSize),
+                             MADV_DONTNEED);
+        if (result == -1) {
+          PLOG(WARNING) << "Madvise failed";
+        }
+      }
+    }
+
+    if (Runtime::Current()->IsZygote()) {
+      // Record that we are done compiling the profile.
+      Runtime::Current()->GetJit()->GetCodeCache()->GetZygoteMap()->SetCompilationState(
+          ZygoteCompilationState::kDone);
+    }
+  }
+
+ private:
+  std::vector<const DexFile*> dex_files_;
+
+  DISALLOW_COPY_AND_ASSIGN(JitDoneCompilingProfileTask);
+};
+
+/**
+ * A JIT task to run Java verification of boot classpath classes that were not
+ * verified at compile-time.
+ */
+class ZygoteVerificationTask final : public Task {
+ public:
+  ZygoteVerificationTask() {}
+
+  void Run(Thread* self) override {
+    // We are going to load class and run verification, which may also need to load
+    // classes. If the thread cannot load classes (typically when the runtime is
+    // debuggable), then just return.
+    if (!self->CanLoadClasses()) {
+      return;
+    }
+    Runtime* runtime = Runtime::Current();
+    ClassLinker* linker = runtime->GetClassLinker();
+    const std::vector<const DexFile*>& boot_class_path =
+        runtime->GetClassLinker()->GetBootClassPath();
+    ScopedObjectAccess soa(self);
+    StackHandleScope<1> hs(self);
+    MutableHandle<mirror::Class> klass = hs.NewHandle<mirror::Class>(nullptr);
+    uint64_t start_ns = ThreadCpuNanoTime();
+    uint64_t number_of_classes = 0;
+    for (const DexFile* dex_file : boot_class_path) {
+      if (dex_file->GetOatDexFile() != nullptr &&
+          dex_file->GetOatDexFile()->GetOatFile() != nullptr) {
+        // If backed by an .oat file, we have already run verification at
+        // compile-time. Note that some classes may still have failed
+        // verification there if they reference updatable mainline module
+        // classes.
+        continue;
+      }
+      for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
+        const dex::ClassDef& class_def = dex_file->GetClassDef(i);
+        const char* descriptor = dex_file->GetClassDescriptor(class_def);
+        ScopedNullHandle<mirror::ClassLoader> null_loader;
+        klass.Assign(linker->FindClass(self, descriptor, null_loader));
+        if (klass == nullptr) {
+          self->ClearException();
+          LOG(WARNING) << "Could not find " << descriptor;
+          continue;
+        }
+        ++number_of_classes;
+        if (linker->VerifyClass(self, klass) == verifier::FailureKind::kHardFailure) {
+          DCHECK(self->IsExceptionPending());
+          LOG(FATAL) << "Methods in the boot classpath failed to verify: "
+                     << self->GetException()->Dump();
+        }
+        CHECK(!self->IsExceptionPending());
+      }
+    }
+    LOG(INFO) << "Verified "
+              << number_of_classes
+              << " classes from mainline modules in "
+              << PrettyDuration(ThreadCpuNanoTime() - start_ns);
+  }
+};
+
 class ZygoteTask final : public Task {
  public:
   ZygoteTask() {}
 
   void Run(Thread* self) override {
     Runtime* runtime = Runtime::Current();
-    std::string profile_file;
-    for (const std::string& option : runtime->GetImageCompilerOptions()) {
-      if (android::base::StartsWith(option, "--profile-file=")) {
-        profile_file = option.substr(strlen("--profile-file="));
-        break;
+    uint32_t added_to_queue = 0;
+    for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
+      const std::string& profile_file = space->GetProfileFile();
+      if (profile_file.empty()) {
+        continue;
       }
+      LOG(INFO) << "JIT Zygote looking at profile " << profile_file;
+
+      const std::vector<const DexFile*>& boot_class_path =
+          runtime->GetClassLinker()->GetBootClassPath();
+      ScopedNullHandle<mirror::ClassLoader> null_handle;
+      // We add to the queue for zygote so that we can fork processes in-between
+      // compilations.
+      if (Runtime::Current()->IsPrimaryZygote()) {
+        std::string boot_profile = GetBootProfileFile(profile_file);
+        // We avoid doing compilation at boot for the secondary zygote, as apps
+        // forked from it are not critical for boot.
+        added_to_queue += runtime->GetJit()->CompileMethodsFromBootProfile(
+            self, boot_class_path, boot_profile, null_handle, /* add_to_queue= */ true);
+      }
+      added_to_queue += runtime->GetJit()->CompileMethodsFromProfile(
+          self, boot_class_path, profile_file, null_handle, /* add_to_queue= */ true);
     }
 
-    const std::vector<const DexFile*>& boot_class_path =
-        runtime->GetClassLinker()->GetBootClassPath();
-    ScopedNullHandle<mirror::ClassLoader> null_handle;
-    // We add to the queue for zygote so that we can fork processes in-between
-    // compilations.
-    runtime->GetJit()->CompileMethodsFromProfile(
-        self, boot_class_path, profile_file, null_handle, /* add_to_queue= */ true);
+    JitCodeCache* code_cache = runtime->GetJit()->GetCodeCache();
+    code_cache->GetZygoteMap()->Initialize(added_to_queue);
   }
 
   void Finalize() override {
@@ -646,27 +971,23 @@
   DISALLOW_COPY_AND_ASSIGN(ZygoteTask);
 };
 
-static std::string GetProfileFile(const std::string& dex_location) {
-  // Hardcoded assumption where the profile file is.
-  // TODO(ngeoffray): this is brittle and we would need to change change if we
-  // wanted to do more eager JITting of methods in a profile. This is
-  // currently only for system server.
-  return dex_location + ".prof";
-}
-
 class JitProfileTask final : public Task {
  public:
   JitProfileTask(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
-                 ObjPtr<mirror::ClassLoader> class_loader) {
+                 jobject class_loader) {
+    ScopedObjectAccess soa(Thread::Current());
+    StackHandleScope<1> hs(soa.Self());
+    Handle<mirror::ClassLoader> h_loader(hs.NewHandle(
+        soa.Decode<mirror::ClassLoader>(class_loader)));
     ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
     for (const auto& dex_file : dex_files) {
       dex_files_.push_back(dex_file.get());
       // Register the dex file so that we can guarantee it doesn't get deleted
       // while reading it during the task.
-      class_linker->RegisterDexFile(*dex_file.get(), class_loader);
+      class_linker->RegisterDexFile(*dex_file.get(), h_loader.Get());
     }
-    ScopedObjectAccess soa(Thread::Current());
-    class_loader_ = soa.Vm()->AddGlobalRef(soa.Self(), class_loader.Ptr());
+    // We also create our own global ref to use this class loader later.
+    class_loader_ = soa.Vm()->AddGlobalRef(soa.Self(), h_loader.Get());
   }
 
   void Run(Thread* self) override {
@@ -674,18 +995,36 @@
     StackHandleScope<1> hs(self);
     Handle<mirror::ClassLoader> loader = hs.NewHandle<mirror::ClassLoader>(
         soa.Decode<mirror::ClassLoader>(class_loader_));
-    Runtime::Current()->GetJit()->CompileMethodsFromProfile(
+
+    std::string profile = GetProfileFile(dex_files_[0]->GetLocation());
+    std::string boot_profile = GetBootProfileFile(profile);
+
+    Jit* jit = Runtime::Current()->GetJit();
+
+    jit->CompileMethodsFromBootProfile(
         self,
         dex_files_,
-        GetProfileFile(dex_files_[0]->GetLocation()),
+        boot_profile,
         loader,
         /* add_to_queue= */ false);
+
+    jit->CompileMethodsFromProfile(
+        self,
+        dex_files_,
+        profile,
+        loader,
+        /* add_to_queue= */ true);
   }
 
   void Finalize() override {
     delete this;
   }
 
+  ~JitProfileTask() {
+    ScopedObjectAccess soa(Thread::Current());
+    soa.Vm()->DeleteGlobalRef(soa.Self(), class_loader_);
+  }
+
  private:
   std::vector<const DexFile*> dex_files_;
   jobject class_loader_;
@@ -693,6 +1032,167 @@
   DISALLOW_COPY_AND_ASSIGN(JitProfileTask);
 };
 
+static void CopyIfDifferent(void* s1, const void* s2, size_t n) {
+  if (memcmp(s1, s2, n) != 0) {
+    memcpy(s1, s2, n);
+  }
+}
+
+void Jit::MapBootImageMethods() {
+  if (Runtime::Current()->IsJavaDebuggable()) {
+    LOG(INFO) << "Not mapping boot image methods due to process being debuggable";
+    return;
+  }
+  CHECK_NE(fd_methods_.get(), -1);
+  if (!code_cache_->GetZygoteMap()->CanMapBootImageMethods()) {
+    LOG(WARNING) << "Not mapping boot image methods due to error from zygote";
+    // We don't need the fd anymore.
+    fd_methods_.reset();
+    return;
+  }
+
+  std::string error_str;
+  MemMap child_mapping_methods = MemMap::MapFile(
+      fd_methods_size_,
+      PROT_READ | PROT_WRITE,
+      MAP_PRIVATE,
+      fd_methods_,
+      /* start= */ 0,
+      /* low_4gb= */ false,
+      "boot-image-methods",
+      &error_str);
+
+  // We don't need the fd anymore.
+  fd_methods_.reset();
+
+  if (!child_mapping_methods.IsValid()) {
+    LOG(WARNING) << "Failed to create child mapping of boot image methods: " << error_str;
+    return;
+  }
+  size_t offset = 0;
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
+    const ImageHeader& header = space->GetImageHeader();
+    const ImageSection& section = header.GetMethodsSection();
+    uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), kPageSize);
+    uint8_t* page_end =
+        AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), kPageSize);
+    if (page_end <= page_start) {
+      // Section doesn't contain one aligned entire page.
+      continue;
+    }
+    uint64_t capacity = page_end - page_start;
+    // Walk over methods in the boot image, and check for ones whose class is
+    // not initialized in the process, but are in the zygote process. For
+    // such methods, we need their entrypoints to be stubs that do the
+    // initialization check.
+    header.VisitPackedArtMethods([&](ArtMethod& method) NO_THREAD_SAFETY_ANALYSIS {
+      if (method.IsRuntimeMethod()) {
+        return;
+      }
+      if (method.GetDeclaringClassUnchecked()->IsVisiblyInitialized() ||
+          !method.IsStatic() ||
+          method.IsConstructor()) {
+        // Method does not need any stub.
+        return;
+      }
+
+      //  We are going to mremap the child mapping into the image:
+      //
+      //                            ImageSection       ChildMappingMethods
+      //
+      //         section start -->  -----------
+      //                            |         |
+      //                            |         |
+      //            page_start -->  |         |   <-----   -----------
+      //                            |         |            |         |
+      //                            |         |            |         |
+      //                            |         |            |         |
+      //                            |         |            |         |
+      //                            |         |            |         |
+      //                            |         |            |         |
+      //                            |         |            |         |
+      //             page_end  -->  |         |   <-----   -----------
+      //                            |         |
+      //         section end   -->  -----------
+
+
+      uint8_t* pointer = reinterpret_cast<uint8_t*>(&method);
+      // Note: We could refactor this to only check if the ArtMethod entrypoint is inside the
+      // page region. This would remove the need for the edge case handling below.
+      if (pointer >= page_start && pointer + sizeof(ArtMethod) < page_end) {
+        // For all the methods in the mapping, put the entrypoint to the
+        // resolution stub.
+        ArtMethod* new_method = reinterpret_cast<ArtMethod*>(
+            child_mapping_methods.Begin() + offset + (pointer - page_start));
+        const void* code = new_method->GetEntryPointFromQuickCompiledCode();
+        if (!class_linker->IsQuickGenericJniStub(code) &&
+            !class_linker->IsQuickToInterpreterBridge(code) &&
+            !class_linker->IsQuickResolutionStub(code)) {
+          LOG(INFO) << "Putting back the resolution stub to an ArtMethod";
+          new_method->SetEntryPointFromQuickCompiledCode(GetQuickResolutionStub());
+        }
+      } else if (pointer < page_start && (pointer + sizeof(ArtMethod)) > page_start) {
+        LOG(INFO) << "Copying parts of the contents of an ArtMethod spanning page_start";
+        // If the method spans `page_start`, copy the contents of the child
+        // into the pages we are going to remap into the image.
+        //
+        //         section start -->  -----------
+        //                            |         |
+        //                            |         |
+        //            page_start -->  |/////////|            -----------
+        //                            |/////////| -> copy -> |/////////|
+        //                            |         |            |         |
+        //
+        CopyIfDifferent(child_mapping_methods.Begin() + offset,
+                        page_start,
+                        pointer + sizeof(ArtMethod) - page_start);
+      } else if (pointer < page_end && (pointer + sizeof(ArtMethod)) > page_end) {
+        LOG(INFO) << "Copying parts of the contents of an ArtMethod spanning page_end";
+        // If the method spans `page_end`, copy the contents of the child
+        // into the pages we are going to remap into the image.
+        //
+        //                            |         |            |         |
+        //                            |/////////| -> copy -> |/////////|
+        //             page_end  -->  |/////////|            -----------
+        //                            |         |
+        //         section end   -->  -----------
+        //
+        size_t bytes_to_copy = (page_end - pointer);
+        CopyIfDifferent(child_mapping_methods.Begin() + offset + capacity - bytes_to_copy,
+                        page_end - bytes_to_copy,
+                        bytes_to_copy);
+      }
+    }, space->Begin(), kRuntimePointerSize);
+
+    // Map the memory in the boot image range.
+    if (mremap(child_mapping_methods.Begin() + offset,
+               capacity,
+               capacity,
+               MREMAP_FIXED | MREMAP_MAYMOVE,
+               page_start) == MAP_FAILED) {
+      PLOG(WARNING) << "Fail to mremap boot image methods for " << space->GetImageFilename();
+    }
+    offset += capacity;
+  }
+
+  // The private mapping created for this process has been mremaped. We can
+  // reset it.
+  child_mapping_methods.Reset();
+  LOG(INFO) << "Successfully mapped boot image methods";
+}
+
+// Return whether a boot image has a profile. This means we'll need to pre-JIT
+// methods in that profile for performance.
+static bool HasImageWithProfile() {
+  for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
+    if (!space->GetProfileFile().empty()) {
+      return true;
+    }
+  }
+  return false;
+}
+
 void Jit::CreateThreadPool() {
   // There is a DCHECK in the 'AddSamples' method to ensure the tread pool
   // is not null when we instrument.
@@ -704,26 +1204,202 @@
   thread_pool_->SetPthreadPriority(options_->GetThreadPoolPthreadPriority());
   Start();
 
-  // If we're not using the default boot image location, request a JIT task to
-  // compile all methods in the boot image profile.
   Runtime* runtime = Runtime::Current();
-  if (runtime->IsZygote() && runtime->IsUsingApexBootImageLocation() && UseJitCompilation()) {
+  if (runtime->IsZygote()) {
+    // To speed up class lookups, generate a type lookup table for
+    // dex files not backed by oat file.
+    for (const DexFile* dex_file : runtime->GetClassLinker()->GetBootClassPath()) {
+      if (dex_file->GetOatDexFile() == nullptr) {
+        TypeLookupTable type_lookup_table = TypeLookupTable::Create(*dex_file);
+        type_lookup_tables_.push_back(
+            std::make_unique<art::OatDexFile>(std::move(type_lookup_table)));
+        dex_file->SetOatDexFile(type_lookup_tables_.back().get());
+      }
+    }
+
+    // Add a task that will verify boot classpath jars that were not
+    // pre-compiled.
+    thread_pool_->AddTask(Thread::Current(), new ZygoteVerificationTask());
+  }
+
+  if (runtime->IsZygote() && HasImageWithProfile() && UseJitCompilation()) {
+    // If we have an image with a profile, request a JIT task to
+    // compile all methods in that profile.
     thread_pool_->AddTask(Thread::Current(), new ZygoteTask());
+
+    // And create mappings to share boot image methods memory from the zygote to
+    // child processes.
+
+    // Compute the total capacity required for the boot image methods.
+    uint64_t total_capacity = 0;
+    for (gc::space::ImageSpace* space : Runtime::Current()->GetHeap()->GetBootImageSpaces()) {
+      const ImageHeader& header = space->GetImageHeader();
+      const ImageSection& section = header.GetMethodsSection();
+      // Mappings need to be at the page level.
+      uint8_t* page_start = AlignUp(header.GetImageBegin() + section.Offset(), kPageSize);
+      uint8_t* page_end =
+          AlignDown(header.GetImageBegin() + section.Offset() + section.Size(), kPageSize);
+      if (page_end > page_start) {
+        total_capacity += (page_end - page_start);
+      }
+    }
+
+    // Create the child and zygote mappings to the boot image methods.
+    if (total_capacity > 0) {
+      // Start with '/boot' and end with '.art' to match the pattern recognized
+      // by android_os_Debug.cpp for boot images.
+      const char* name = "/boot-image-methods.art";
+      unique_fd mem_fd = unique_fd(art::memfd_create(name, /* flags= */ MFD_ALLOW_SEALING));
+      if (mem_fd.get() == -1) {
+        PLOG(WARNING) << "Could not create boot image methods file descriptor";
+        return;
+      }
+      if (ftruncate(mem_fd.get(), total_capacity) != 0) {
+        PLOG(WARNING) << "Failed to truncate boot image methods file to " << total_capacity;
+        return;
+      }
+      std::string error_str;
+
+      // Create the shared mapping eagerly, as this prevents other processes
+      // from adding the writable seal.
+      zygote_mapping_methods_ = MemMap::MapFile(
+        total_capacity,
+        PROT_READ | PROT_WRITE,
+        MAP_SHARED,
+        mem_fd,
+        /* start= */ 0,
+        /* low_4gb= */ false,
+        "boot-image-methods",
+        &error_str);
+
+      if (!zygote_mapping_methods_.IsValid()) {
+        LOG(WARNING) << "Failed to create zygote mapping of boot image methods:  " << error_str;
+        return;
+      }
+      if (zygote_mapping_methods_.MadviseDontFork() != 0) {
+        LOG(WARNING) << "Failed to madvise dont fork boot image methods";
+        zygote_mapping_methods_ = MemMap();
+        return;
+      }
+
+      // We should use the F_SEAL_FUTURE_WRITE flag, but this has unexpected
+      // behavior on private mappings after fork (the mapping becomes shared between
+      // parent and children), see b/143833776.
+      // We will seal the write once we are done writing to the shared mapping.
+      if (fcntl(mem_fd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW) == -1) {
+        PLOG(WARNING) << "Failed to seal boot image methods file descriptor";
+        zygote_mapping_methods_ = MemMap();
+        return;
+      }
+      fd_methods_ = unique_fd(mem_fd.release());
+      fd_methods_size_ = total_capacity;
+    }
   }
 }
 
 void Jit::RegisterDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
-                           ObjPtr<mirror::ClassLoader> class_loader) {
+                           jobject class_loader) {
   if (dex_files.empty()) {
     return;
   }
   Runtime* runtime = Runtime::Current();
-  if (runtime->IsSystemServer() && runtime->IsUsingApexBootImageLocation() && UseJitCompilation()) {
+  // If the runtime is debuggable, no need to precompile methods.
+  if (runtime->IsSystemServer() &&
+      UseJitCompilation() && HasImageWithProfile() &&
+      !runtime->IsJavaDebuggable()) {
     thread_pool_->AddTask(Thread::Current(), new JitProfileTask(dex_files, class_loader));
   }
 }
 
-void Jit::CompileMethodsFromProfile(
+bool Jit::CompileMethodFromProfile(Thread* self,
+                                   ClassLinker* class_linker,
+                                   uint32_t method_idx,
+                                   Handle<mirror::DexCache> dex_cache,
+                                   Handle<mirror::ClassLoader> class_loader,
+                                   bool add_to_queue,
+                                   bool compile_after_boot) {
+  ArtMethod* method = class_linker->ResolveMethodWithoutInvokeType(
+      method_idx, dex_cache, class_loader);
+  if (method == nullptr) {
+    self->ClearException();
+    return false;
+  }
+  if (!method->IsCompilable() || !method->IsInvokable()) {
+    return false;
+  }
+  if (method->IsPreCompiled()) {
+    // Already seen by another profile.
+    return false;
+  }
+  const void* entry_point = method->GetEntryPointFromQuickCompiledCode();
+  if (class_linker->IsQuickToInterpreterBridge(entry_point) ||
+      class_linker->IsQuickGenericJniStub(entry_point) ||
+      // We explicitly check for the stub. The trampoline is for methods backed by
+      // a .oat file that has a compiled version of the method.
+      (entry_point == GetQuickResolutionStub())) {
+    method->SetPreCompiled();
+    if (!add_to_queue) {
+      CompileMethod(method, self, /* baseline= */ false, /* osr= */ false, /* prejit= */ true);
+    } else {
+      Task* task = new JitCompileTask(method, JitCompileTask::TaskKind::kPreCompile);
+      if (compile_after_boot) {
+        MutexLock mu(Thread::Current(), boot_completed_lock_);
+        if (!boot_completed_) {
+          tasks_after_boot_.push_back(task);
+          return true;
+        }
+        DCHECK(tasks_after_boot_.empty());
+      }
+      thread_pool_->AddTask(self, task);
+      return true;
+    }
+  }
+  return false;
+}
+
+uint32_t Jit::CompileMethodsFromBootProfile(
+    Thread* self,
+    const std::vector<const DexFile*>& dex_files,
+    const std::string& profile_file,
+    Handle<mirror::ClassLoader> class_loader,
+    bool add_to_queue) {
+  unix_file::FdFile profile(profile_file.c_str(), O_RDONLY, true);
+
+  if (profile.Fd() == -1) {
+    PLOG(WARNING) << "No boot profile: " << profile_file;
+    return 0u;
+  }
+
+  ProfileBootInfo profile_info;
+  if (!profile_info.Load(profile.Fd(), dex_files)) {
+    LOG(ERROR) << "Could not load profile file: " << profile_file;
+    return 0u;
+  }
+
+  ScopedObjectAccess soa(self);
+  VariableSizedHandleScope handles(self);
+  std::vector<Handle<mirror::DexCache>> dex_caches;
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  for (const DexFile* dex_file : profile_info.GetDexFiles()) {
+    dex_caches.push_back(handles.NewHandle(class_linker->FindDexCache(self, *dex_file)));
+  }
+
+  uint32_t added_to_queue = 0;
+  for (const std::pair<uint32_t, uint32_t>& pair : profile_info.GetMethods()) {
+    if (CompileMethodFromProfile(self,
+                                 class_linker,
+                                 pair.second,
+                                 dex_caches[pair.first],
+                                 class_loader,
+                                 add_to_queue,
+                                 /*compile_after_boot=*/false)) {
+      ++added_to_queue;
+    }
+  }
+  return added_to_queue;
+}
+
+uint32_t Jit::CompileMethodsFromProfile(
     Thread* self,
     const std::vector<const DexFile*>& dex_files,
     const std::string& profile_file,
@@ -732,41 +1408,33 @@
 
   if (profile_file.empty()) {
     LOG(WARNING) << "Expected a profile file in JIT zygote mode";
-    return;
+    return 0u;
   }
 
-  std::string error_msg;
-  ScopedFlock profile = LockedFile::Open(
-      profile_file.c_str(), O_RDONLY, /* block= */ false, &error_msg);
+  // We don't generate boot profiles on device, therefore we don't
+  // need to lock the file.
+  unix_file::FdFile profile(profile_file.c_str(), O_RDONLY, true);
 
-  // Return early if we're unable to obtain a lock on the profile.
-  if (profile.get() == nullptr) {
-    LOG(ERROR) << "Cannot lock profile: " << error_msg;
-    return;
+  if (profile.Fd() == -1) {
+    PLOG(WARNING) << "No profile: " << profile_file;
+    return 0u;
   }
 
   ProfileCompilationInfo profile_info;
-  if (!profile_info.Load(profile->Fd())) {
+  if (!profile_info.Load(profile.Fd())) {
     LOG(ERROR) << "Could not load profile file";
-    return;
+    return 0u;
   }
   ScopedObjectAccess soa(self);
   StackHandleScope<1> hs(self);
   MutableHandle<mirror::DexCache> dex_cache = hs.NewHandle<mirror::DexCache>(nullptr);
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  uint32_t added_to_queue = 0u;
   for (const DexFile* dex_file : dex_files) {
-    if (LocationIsOnRuntimeModule(dex_file->GetLocation().c_str())) {
-      // The runtime module jars are already preopted.
+    if (LocationIsOnArtModule(dex_file->GetLocation().c_str())) {
+      // The ART module jars are already preopted.
       continue;
     }
-    // To speed up class lookups, generate a type lookup table for
-    // the dex file.
-    if (dex_file->GetOatDexFile() == nullptr) {
-      TypeLookupTable type_lookup_table = TypeLookupTable::Create(*dex_file);
-      type_lookup_tables_.push_back(
-            std::make_unique<art::OatDexFile>(std::move(type_lookup_table)));
-      dex_file->SetOatDexFile(type_lookup_tables_.back().get());
-    }
 
     std::set<dex::TypeIndex> class_types;
     std::set<uint16_t> all_methods;
@@ -783,40 +1451,32 @@
     CHECK(dex_cache != nullptr) << "Could not find dex cache for " << dex_file->GetLocation();
 
     for (uint16_t method_idx : all_methods) {
-      ArtMethod* method = class_linker->ResolveMethodWithoutInvokeType(
-          method_idx, dex_cache, class_loader);
-      if (method == nullptr) {
-        self->ClearException();
-        continue;
-      }
-      if (!method->IsCompilable() || !method->IsInvokable()) {
-        continue;
-      }
-      const void* entry_point = method->GetEntryPointFromQuickCompiledCode();
-      if (class_linker->IsQuickToInterpreterBridge(entry_point) ||
-          class_linker->IsQuickGenericJniStub(entry_point) ||
-          class_linker->IsQuickResolutionStub(entry_point)) {
-        if (!method->IsNative()) {
-          // The compiler requires a ProfilingInfo object for non-native methods.
-          ProfilingInfo::Create(self, method, /* retry_allocation= */ true);
-        }
-        // Special case ZygoteServer class so that it gets compiled before the
-        // zygote enters it. This avoids needing to do OSR during app startup.
-        // TODO: have a profile instead.
-        if (!add_to_queue || method->GetDeclaringClass()->DescriptorEquals(
-                "Lcom/android/internal/os/ZygoteServer;")) {
-          CompileMethod(method, self, /* baseline= */ false, /* osr= */ false);
-        } else {
-          thread_pool_->AddTask(self,
-              new JitCompileTask(method, JitCompileTask::TaskKind::kCompile));
-        }
+      if (CompileMethodFromProfile(self,
+                                   class_linker,
+                                   method_idx,
+                                   dex_cache,
+                                   class_loader,
+                                   add_to_queue,
+                                   /*compile_after_boot=*/true)) {
+        ++added_to_queue;
       }
     }
   }
+
+  // Add a task to run when all compilation is done.
+  JitDoneCompilingProfileTask* task = new JitDoneCompilingProfileTask(dex_files);
+  MutexLock mu(Thread::Current(), boot_completed_lock_);
+  if (!boot_completed_) {
+    tasks_after_boot_.push_back(task);
+  } else {
+    DCHECK(tasks_after_boot_.empty());
+    thread_pool_->AddTask(self, task);
+  }
+  return added_to_queue;
 }
 
 static bool IgnoreSamplesForMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
-  if (method->IsClassInitializer() || !method->IsCompilable()) {
+  if (method->IsClassInitializer() || !method->IsCompilable() || method->IsPreCompiled()) {
     // We do not want to compile such methods.
     return true;
   }
@@ -826,7 +1486,7 @@
         klass == GetClassRoot<mirror::VarHandle>()) {
       // MethodHandle and VarHandle invocation methods are required to throw an
       // UnsupportedOperationException if invoked reflectively. We achieve this by having native
-      // implementations that arise the exception. We need to disable JIT compilation of these JNI
+      // implementations that raise the exception. We need to disable JIT compilation of these JNI
       // methods as it can lead to transitioning between JIT compiled JNI stubs and generic JNI
       // stubs. Since these stubs have different stack representations we can then crash in stack
       // walking (b/78151261).
@@ -842,12 +1502,19 @@
                              uint32_t new_count,
                              bool with_backedges) {
   if (thread_pool_ == nullptr) {
-    // Should only see this when shutting down, starting up, or in safe mode.
-    DCHECK(Runtime::Current()->IsShuttingDown(self) ||
-           !Runtime::Current()->IsFinishedStarting() ||
-           Runtime::Current()->IsSafeMode());
     return false;
   }
+  if (UNLIKELY(method->IsPreCompiled()) && !with_backedges /* don't check for OSR */) {
+    if (!NeedsClinitCheckBeforeCall(method) ||
+        method->GetDeclaringClass()->IsVisiblyInitialized()) {
+      const void* entry_point = code_cache_->GetSavedEntryPointOfPreCompiledMethod(method);
+      if (entry_point != nullptr) {
+        Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(method, entry_point);
+        return true;
+      }
+    }
+  }
+
   if (IgnoreSamplesForMethod(method)) {
     return false;
   }
@@ -855,7 +1522,6 @@
     // Tests might request JIT on first use (compiled synchronously in the interpreter).
     return false;
   }
-  DCHECK(thread_pool_ != nullptr);
   DCHECK_GT(WarmMethodThreshold(), 0);
   DCHECK_GT(HotMethodThreshold(), WarmMethodThreshold());
   DCHECK_GT(OSRMethodThreshold(), HotMethodThreshold());
@@ -864,7 +1530,10 @@
 
   if (old_count < WarmMethodThreshold() && new_count >= WarmMethodThreshold()) {
     // Note: Native method have no "warm" state or profiling info.
-    if (!method->IsNative() && method->GetProfilingInfo(kRuntimePointerSize) == nullptr) {
+    if (!method->IsNative() &&
+        (method->GetProfilingInfo(kRuntimePointerSize) == nullptr) &&
+        code_cache_->CanAllocateProfilingInfo() &&
+        !options_->UseTieredJitCompilation()) {
       bool success = ProfilingInfo::Create(self, method, /* retry_allocation= */ false);
       if (success) {
         VLOG(jit) << "Start profiling " << method->PrettyMethod();
@@ -873,7 +1542,6 @@
       if (thread_pool_ == nullptr) {
         // Calling ProfilingInfo::Create might put us in a suspended state, which could
         // lead to the thread pool being deleted when we are shutting down.
-        DCHECK(Runtime::Current()->IsShuttingDown(self));
         return false;
       }
 
@@ -886,17 +1554,14 @@
     }
   }
   if (UseJitCompilation()) {
-    if (old_count == 0 &&
-        method->IsNative() &&
-        Runtime::Current()->IsUsingApexBootImageLocation()) {
-      // jitzygote: Compile JNI stub on first use to avoid the expensive generic stub.
-      CompileMethod(method, self, /* baseline= */ false, /* osr= */ false);
-      return true;
-    }
     if (old_count < HotMethodThreshold() && new_count >= HotMethodThreshold()) {
       if (!code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
         DCHECK(thread_pool_ != nullptr);
-        thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompile));
+        JitCompileTask::TaskKind kind =
+            (options_->UseTieredJitCompilation() || options_->UseBaselineCompiler())
+                ? JitCompileTask::TaskKind::kCompileBaseline
+                : JitCompileTask::TaskKind::kCompile;
+        thread_pool_->AddTask(self, new JitCompileTask(method, kind));
       }
     }
     if (old_count < OSRMethodThreshold() && new_count >= OSRMethodThreshold()) {
@@ -914,6 +1579,19 @@
   return true;
 }
 
+void Jit::EnqueueOptimizedCompilation(ArtMethod* method, Thread* self) {
+  if (thread_pool_ == nullptr) {
+    return;
+  }
+  // We arrive here after a baseline compiled code has reached its baseline
+  // hotness threshold. If tiered compilation is enabled, enqueue a compilation
+  // task that will compile optimize the method.
+  if (options_->UseTieredJitCompilation()) {
+    thread_pool_->AddTask(
+        self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompile));
+  }
+}
+
 class ScopedSetRuntimeThread {
  public:
   explicit ScopedSetRuntimeThread(Thread* self)
@@ -932,13 +1610,15 @@
 
 void Jit::MethodEntered(Thread* thread, ArtMethod* method) {
   Runtime* runtime = Runtime::Current();
-  if (UNLIKELY(runtime->UseJitCompilation() && runtime->GetJit()->JitAtFirstUse())) {
+  if (UNLIKELY(runtime->UseJitCompilation() && JitAtFirstUse())) {
     ArtMethod* np_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
     if (np_method->IsCompilable()) {
-      if (!np_method->IsNative()) {
+      if (!np_method->IsNative() && GetCodeCache()->CanAllocateProfilingInfo()) {
         // The compiler requires a ProfilingInfo object for non-native methods.
         ProfilingInfo::Create(thread, np_method, /* retry_allocation= */ true);
       }
+      // TODO(ngeoffray): For JIT at first use, use kPreCompile. Currently we don't due to
+      // conflicts with jitzygote optimizations.
       JitCompileTask compile_task(method, JitCompileTask::TaskKind::kCompile);
       // Fake being in a runtime thread so that class-load behavior will be the same as normal jit.
       ScopedSetRuntimeThread ssrt(thread);
@@ -1007,43 +1687,82 @@
   }
 }
 
-void Jit::PostForkChildAction(bool is_system_server, bool is_zygote) {
-  if (is_zygote) {
-    // Remove potential tasks that have been inherited from the zygote. Child zygotes
-    // currently don't need the whole boot image compiled (ie webview_zygote).
-    thread_pool_->RemoveAllTasks(Thread::Current());
-    // Don't transition if this is for a child zygote.
-    return;
+static void* RunPollingThread(void* arg) {
+  Jit* jit = reinterpret_cast<Jit*>(arg);
+  do {
+    sleep(10);
+  } while (!jit->GetCodeCache()->GetZygoteMap()->IsCompilationNotified());
+
+  // We will suspend other threads: we can only do that if we're attached to the
+  // runtime.
+  Runtime* runtime = Runtime::Current();
+  bool thread_attached = runtime->AttachCurrentThread(
+      "BootImagePollingThread",
+      /* as_daemon= */ true,
+      /* thread_group= */ nullptr,
+      /* create_peer= */ false);
+  CHECK(thread_attached);
+
+  {
+    // Prevent other threads from running while we are remapping the boot image
+    // ArtMethod's. Native threads might still be running, but they cannot
+    // change the contents of ArtMethod's.
+    ScopedSuspendAll ssa(__FUNCTION__);
+    runtime->GetJit()->MapBootImageMethods();
   }
-  if (Runtime::Current()->IsSafeMode()) {
+
+  Runtime::Current()->DetachCurrentThread();
+  return nullptr;
+}
+
+void Jit::PostForkChildAction(bool is_system_server, bool is_zygote) {
+  // Clear the potential boot tasks inherited from the zygote.
+  {
+    MutexLock mu(Thread::Current(), boot_completed_lock_);
+    tasks_after_boot_.clear();
+  }
+
+  Runtime* const runtime = Runtime::Current();
+  // Check if we'll need to remap the boot image methods.
+  if (!is_zygote && fd_methods_ != -1) {
+    // Create a thread that will poll the status of zygote compilation, and map
+    // the private mapping of boot image methods.
+    // For child zygote, we instead query IsCompilationNotified() post zygote fork.
+    zygote_mapping_methods_.ResetInForkedProcess();
+    pthread_t polling_thread;
+    pthread_attr_t attr;
+    CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread");
+    CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED),
+                       "PTHREAD_CREATE_DETACHED");
+    CHECK_PTHREAD_CALL(
+        pthread_create,
+        (&polling_thread, &attr, RunPollingThread, reinterpret_cast<void*>(this)),
+        "Methods maps thread");
+  }
+
+  if (is_zygote || runtime->IsSafeMode()) {
     // Delete the thread pool, we are not going to JIT.
     thread_pool_.reset(nullptr);
     return;
   }
   // At this point, the compiler options have been adjusted to the particular configuration
   // of the forked child. Parse them again.
-  jit_update_options_(jit_compiler_handle_);
+  jit_compiler_->ParseCompilerOptions();
 
   // Adjust the status of code cache collection: the status from zygote was to not collect.
-  code_cache_->SetGarbageCollectCode(!jit_generate_debug_info_(jit_compiler_handle_) &&
+  code_cache_->SetGarbageCollectCode(!jit_compiler_->GenerateDebugInfo() &&
       !Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled());
 
-  if (thread_pool_ != nullptr) {
-    if (!is_system_server) {
-      // Remove potential tasks that have been inherited from the zygote.
-      // We keep the queue for system server, as not having those methods compiled
-      // impacts app startup.
-      thread_pool_->RemoveAllTasks(Thread::Current());
-    } else if (Runtime::Current()->IsUsingApexBootImageLocation() && UseJitCompilation()) {
-      // Disable garbage collection: we don't want it to delete methods we're compiling
-      // through boot and system server profiles.
-      // TODO(ngeoffray): Fix this so we still collect deoptimized and unused code.
-      code_cache_->SetGarbageCollectCode(false);
-    }
-
-    // Resume JIT compilation.
-    thread_pool_->CreateThreads();
+  if (is_system_server && HasImageWithProfile()) {
+    // Disable garbage collection: we don't want it to delete methods we're compiling
+    // through boot and system server profiles.
+    // TODO(ngeoffray): Fix this so we still collect deoptimized and unused code.
+    code_cache_->SetGarbageCollectCode(false);
   }
+
+  // We do this here instead of PostZygoteFork, as NativeDebugInfoPostFork only
+  // applies to a child.
+  NativeDebugInfoPostFork();
 }
 
 void Jit::PreZygoteFork() {
@@ -1051,14 +1770,96 @@
     return;
   }
   thread_pool_->DeleteThreads();
+
+  NativeDebugInfoPreFork();
 }
 
 void Jit::PostZygoteFork() {
   if (thread_pool_ == nullptr) {
+    // If this is a child zygote, check if we need to remap the boot image
+    // methods.
+    if (Runtime::Current()->IsZygote() &&
+        fd_methods_ != -1 &&
+        code_cache_->GetZygoteMap()->IsCompilationNotified()) {
+      ScopedSuspendAll ssa(__FUNCTION__);
+      MapBootImageMethods();
+    }
     return;
   }
+  if (Runtime::Current()->IsZygote() &&
+      code_cache_->GetZygoteMap()->IsCompilationDoneButNotNotified()) {
+    // Copy the boot image methods data to the mappings we created to share
+    // with the children. We do this here as we are the only thread running and
+    // we don't risk other threads concurrently updating the ArtMethod's.
+    CHECK_EQ(GetTaskCount(), 1);
+    NotifyZygoteCompilationDone();
+    CHECK(code_cache_->GetZygoteMap()->IsCompilationNotified());
+  }
   thread_pool_->CreateThreads();
 }
 
+void Jit::BootCompleted() {
+  Thread* self = Thread::Current();
+  std::deque<Task*> tasks;
+  {
+    MutexLock mu(self, boot_completed_lock_);
+    tasks = std::move(tasks_after_boot_);
+    boot_completed_ = true;
+  }
+  for (Task* task : tasks) {
+    thread_pool_->AddTask(self, task);
+  }
+}
+
+bool Jit::CanEncodeMethod(ArtMethod* method, bool is_for_shared_region) const {
+  return !is_for_shared_region ||
+      Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(method->GetDeclaringClass());
+}
+
+bool Jit::CanEncodeClass(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const {
+  return !is_for_shared_region || Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(cls);
+}
+
+bool Jit::CanEncodeString(ObjPtr<mirror::String> string, bool is_for_shared_region) const {
+  return !is_for_shared_region || Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(string);
+}
+
+bool Jit::CanAssumeInitialized(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const {
+  if (!is_for_shared_region) {
+    return cls->IsInitialized();
+  } else {
+    // Look up the class status in the oat file.
+    const DexFile& dex_file = *cls->GetDexCache()->GetDexFile();
+    const OatDexFile* oat_dex_file = dex_file.GetOatDexFile();
+    // In case we run without an image there won't be a backing oat file.
+    if (oat_dex_file == nullptr || oat_dex_file->GetOatFile() == nullptr) {
+      return false;
+    }
+    uint16_t class_def_index = cls->GetDexClassDefIndex();
+    return oat_dex_file->GetOatClass(class_def_index).GetStatus() >= ClassStatus::kInitialized;
+  }
+}
+
+void Jit::EnqueueCompilationFromNterp(ArtMethod* method, Thread* self) {
+  if (thread_pool_ == nullptr) {
+    return;
+  }
+  if (GetCodeCache()->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
+    // If we already have compiled code for it, nterp may be stuck in a loop.
+    // Compile OSR.
+    thread_pool_->AddTask(
+        self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompileOsr));
+    return;
+  }
+  if (GetCodeCache()->CanAllocateProfilingInfo()) {
+    ProfilingInfo::Create(self, method, /* retry_allocation= */ false);
+    thread_pool_->AddTask(
+        self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompileBaseline));
+  } else {
+    thread_pool_->AddTask(
+        self, new JitCompileTask(method, JitCompileTask::TaskKind::kCompile));
+  }
+}
+
 }  // namespace jit
 }  // namespace art
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 4b81f71..e9fd915 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -17,11 +17,17 @@
 #ifndef ART_RUNTIME_JIT_JIT_H_
 #define ART_RUNTIME_JIT_JIT_H_
 
+#include <android-base/unique_fd.h>
+
 #include "base/histogram-inl.h"
 #include "base/macros.h"
 #include "base/mutex.h"
+#include "base/runtime_debug.h"
 #include "base/timing_logger.h"
 #include "handle.h"
+#include "offsets.h"
+#include "interpreter/mterp/mterp.h"
+#include "jit/debugger_interface.h"
 #include "jit/profile_saver_options.h"
 #include "obj_ptr.h"
 #include "thread_pool.h"
@@ -39,11 +45,14 @@
 class Object;
 class Class;
 class ClassLoader;
+class DexCache;
+class String;
 }   // namespace mirror
 
 namespace jit {
 
 class JitCodeCache;
+class JitMemoryRegion;
 class JitOptions;
 
 static constexpr int16_t kJitCheckForOSR = -1;
@@ -51,7 +60,9 @@
 // At what priority to schedule jit threads. 9 is the lowest foreground priority on device.
 // See android/os/Process.java.
 static constexpr int kJitPoolThreadPthreadDefaultPriority = 9;
-static constexpr uint32_t kJitSamplesBatchSize = 32;  // Must be power of 2.
+// We check whether to jit-compile the method every Nth invoke.
+// The tests often use threshold of 1000 (and thus 500 to start profiling).
+static constexpr uint32_t kJitSamplesBatchSize = 512;  // Must be power of 2.
 
 class JitOptions {
  public:
@@ -105,6 +116,16 @@
     return use_jit_compilation_;
   }
 
+  bool UseTieredJitCompilation() const {
+    return use_tiered_jit_compilation_;
+  }
+
+  bool CanCompileBaseline() const {
+    return use_tiered_jit_compilation_ ||
+           use_baseline_compiler_ ||
+           interpreter::IsNterpSupported();
+  }
+
   void SetUseJitCompilation(bool b) {
     use_jit_compilation_ = b;
   }
@@ -117,21 +138,27 @@
     profile_saver_options_.SetWaitForJitNotificationsToSave(value);
   }
 
-  void SetProfileAOTCode(bool value) {
-    profile_saver_options_.SetProfileAOTCode(value);
-  }
-
   void SetJitAtFirstUse() {
     use_jit_compilation_ = true;
     compile_threshold_ = 0;
   }
 
+  void SetUseBaselineCompiler() {
+    use_baseline_compiler_ = true;
+  }
+
+  bool UseBaselineCompiler() const {
+    return use_baseline_compiler_;
+  }
+
  private:
   // We add the sample in batches of size kJitSamplesBatchSize.
   // This method rounds the threshold so that it is multiple of the batch size.
   static uint32_t RoundUpThreshold(uint32_t threshold);
 
   bool use_jit_compilation_;
+  bool use_tiered_jit_compilation_;
+  bool use_baseline_compiler_;
   size_t code_cache_initial_capacity_;
   size_t code_cache_max_capacity_;
   uint32_t compile_threshold_;
@@ -145,6 +172,8 @@
 
   JitOptions()
       : use_jit_compilation_(false),
+        use_tiered_jit_compilation_(false),
+        use_baseline_compiler_(false),
         code_cache_initial_capacity_(0),
         code_cache_max_capacity_(0),
         compile_threshold_(0),
@@ -158,6 +187,48 @@
   DISALLOW_COPY_AND_ASSIGN(JitOptions);
 };
 
+// Implemented and provided by the compiler library.
+class JitCompilerInterface {
+ public:
+  virtual ~JitCompilerInterface() {}
+  virtual bool CompileMethod(
+      Thread* self, JitMemoryRegion* region, ArtMethod* method, bool baseline, bool osr)
+      REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+  virtual void TypesLoaded(mirror::Class**, size_t count)
+      REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+  virtual bool GenerateDebugInfo() = 0;
+  virtual void ParseCompilerOptions() = 0;
+
+  virtual std::vector<uint8_t> PackElfFileForJIT(ArrayRef<const JITCodeEntry*> elf_files,
+                                                 ArrayRef<const void*> removed_symbols,
+                                                 bool compress,
+                                                 /*out*/ size_t* num_symbols) = 0;
+};
+
+// Data structure holding information to perform an OSR.
+struct OsrData {
+  // The native PC to jump to.
+  const uint8_t* native_pc;
+
+  // The frame size of the compiled code to jump to.
+  size_t frame_size;
+
+  // The dynamically allocated memory of size `frame_size` to copy to stack.
+  void* memory[0];
+
+  static constexpr MemberOffset NativePcOffset() {
+    return MemberOffset(OFFSETOF_MEMBER(OsrData, native_pc));
+  }
+
+  static constexpr MemberOffset FrameSizeOffset() {
+    return MemberOffset(OFFSETOF_MEMBER(OsrData, frame_size));
+  }
+
+  static constexpr MemberOffset MemoryOffset() {
+    return MemberOffset(OFFSETOF_MEMBER(OsrData, memory));
+  }
+};
+
 class Jit {
  public:
   static constexpr size_t kDefaultPriorityThreadWeightRatio = 1000;
@@ -165,12 +236,14 @@
   // How frequently should the interpreter check to see if OSR compilation is ready.
   static constexpr int16_t kJitRecheckOSRThreshold = 101;  // Prime number to avoid patterns.
 
+  DECLARE_RUNTIME_DEBUG_FLAG(kSlowMode);
+
   virtual ~Jit();
 
   // Create JIT itself.
   static Jit* Create(JitCodeCache* code_cache, JitOptions* options);
 
-  bool CompileMethod(ArtMethod* method, Thread* self, bool baseline, bool osr)
+  bool CompileMethod(ArtMethod* method, Thread* self, bool baseline, bool osr, bool prejit)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   const JitCodeCache* GetCodeCache() const {
@@ -181,6 +254,10 @@
     return code_cache_;
   }
 
+  JitCompilerInterface* GetJitCompiler() const {
+    return jit_compiler_;
+  }
+
   void CreateThreadPool();
   void DeleteThreadPool();
   void WaitForWorkersToBeCreated();
@@ -211,7 +288,8 @@
     return options_->GetPriorityThreadWeight();
   }
 
-  // Returns false if we only need to save profile information and not compile methods.
+  // Return whether we should do JIT compilation. Note this will returns false
+  // if we only need to save profile information and not compile methods.
   bool UseJitCompilation() const {
     return options_->UseJitCompilation();
   }
@@ -274,6 +352,11 @@
   // Return whether the runtime should use a priority thread weight when sampling.
   static bool ShouldUsePriorityThreadWeight(Thread* self);
 
+  // Return the information required to do an OSR jump. Return null if the OSR
+  // cannot be done.
+  OsrData* PrepareForOsr(ArtMethod* method, uint32_t dex_pc, uint32_t* vregs)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
   // If an OSR compiled version is available for `method`,
   // and `dex_pc + dex_pc_offset` is an entry point of that compiled
   // version, this method will jump to the compiled code, let it run,
@@ -307,23 +390,71 @@
   // Adjust state after forking.
   void PostZygoteFork();
 
-  // Compile methods from the given profile. If `add_to_queue` is true, methods
-  // in the profile are added to the JIT queue. Otherwise they are compiled
+  // Called when system finishes booting.
+  void BootCompleted();
+
+  // Compile methods from the given profile (.prof extension). If `add_to_queue`
+  // is true, methods in the profile are added to the JIT queue. Otherwise they are compiled
   // directly.
-  void CompileMethodsFromProfile(Thread* self,
-                                 const std::vector<const DexFile*>& dex_files,
-                                 const std::string& profile_path,
-                                 Handle<mirror::ClassLoader> class_loader,
-                                 bool add_to_queue);
+  // Return the number of methods added to the queue.
+  uint32_t CompileMethodsFromProfile(Thread* self,
+                                     const std::vector<const DexFile*>& dex_files,
+                                     const std::string& profile_path,
+                                     Handle<mirror::ClassLoader> class_loader,
+                                     bool add_to_queue);
+
+  // Compile methods from the given boot profile (.bprof extension). If `add_to_queue`
+  // is true, methods in the profile are added to the JIT queue. Otherwise they are compiled
+  // directly.
+  // Return the number of methods added to the queue.
+  uint32_t CompileMethodsFromBootProfile(Thread* self,
+                                         const std::vector<const DexFile*>& dex_files,
+                                         const std::string& profile_path,
+                                         Handle<mirror::ClassLoader> class_loader,
+                                         bool add_to_queue);
 
   // Register the dex files to the JIT. This is to perform any compilation/optimization
   // at the point of loading the dex files.
   void RegisterDexFiles(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
-                        ObjPtr<mirror::ClassLoader> class_loader);
+                        jobject class_loader);
+
+  // Called by the compiler to know whether it can directly encode the
+  // method/class/string.
+  bool CanEncodeMethod(ArtMethod* method, bool is_for_shared_region) const
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  bool CanEncodeClass(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  bool CanEncodeString(ObjPtr<mirror::String> string, bool is_for_shared_region) const
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  bool CanAssumeInitialized(ObjPtr<mirror::Class> cls, bool is_for_shared_region) const
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Map boot image methods after all compilation in zygote has been done.
+  void MapBootImageMethods() REQUIRES(Locks::mutator_lock_);
+
+  // Notify to other processes that the zygote is done profile compiling boot
+  // class path methods.
+  void NotifyZygoteCompilationDone();
+
+  void EnqueueOptimizedCompilation(ArtMethod* method, Thread* self);
+
+  void EnqueueCompilationFromNterp(ArtMethod* method, Thread* self)
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
   Jit(JitCodeCache* code_cache, JitOptions* options);
 
+  // Compile an individual method listed in a profile. If `add_to_queue` is
+  // true and the method was resolved, return true. Otherwise return false.
+  bool CompileMethodFromProfile(Thread* self,
+                                ClassLinker* linker,
+                                uint32_t method_idx,
+                                Handle<mirror::DexCache> dex_cache,
+                                Handle<mirror::ClassLoader> class_loader,
+                                bool add_to_queue,
+                                bool compile_after_boot)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
   // Compile the method if the number of samples passes a threshold.
   // Returns false if we can not compile now - don't increment the counter and retry later.
   bool MaybeCompileMethod(Thread* self,
@@ -337,13 +468,8 @@
 
   // JIT compiler
   static void* jit_library_handle_;
-  static void* jit_compiler_handle_;
-  static void* (*jit_load_)(void);
-  static void (*jit_unload_)(void*);
-  static bool (*jit_compile_method_)(void*, ArtMethod*, Thread*, bool, bool);
-  static void (*jit_types_loaded_)(void*, mirror::Class**, size_t count);
-  static void (*jit_update_options_)(void*);
-  static bool (*jit_generate_debug_info_)(void*);
+  static JitCompilerInterface* jit_compiler_;
+  static JitCompilerInterface* (*jit_load_)(void);
   template <typename T> static bool LoadSymbol(T*, const char* symbol, std::string* error_msg);
 
   // JIT resources owned by runtime.
@@ -353,11 +479,35 @@
   std::unique_ptr<ThreadPool> thread_pool_;
   std::vector<std::unique_ptr<OatDexFile>> type_lookup_tables_;
 
+  Mutex boot_completed_lock_;
+  bool boot_completed_ GUARDED_BY(boot_completed_lock_) = false;
+  std::deque<Task*> tasks_after_boot_ GUARDED_BY(boot_completed_lock_);
+
   // Performance monitoring.
   CumulativeLogger cumulative_timings_;
   Histogram<uint64_t> memory_use_ GUARDED_BY(lock_);
   Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
 
+  // In the JIT zygote configuration, after all compilation is done, the zygote
+  // will copy its contents of the boot image to the zygote_mapping_methods_,
+  // which will be picked up by processes that will map the memory
+  // in-place within the boot image mapping.
+  //
+  // zygote_mapping_methods_ is shared memory only usable by the zygote and not
+  // inherited by child processes. We create it eagerly to ensure other
+  // processes cannot seal writable the file.
+  MemMap zygote_mapping_methods_;
+
+  // The file descriptor created through memfd_create pointing to memory holding
+  // boot image methods. Created by the zygote, and inherited by child
+  // processes. The descriptor will be closed in each process (including the
+  // zygote) once they don't need it.
+  android::base::unique_fd fd_methods_;
+
+  // The size of the memory pointed by `fd_methods_`. Cached here to avoid
+  // recomputing it.
+  size_t fd_methods_size_;
+
   DISALLOW_COPY_AND_ASSIGN(Jit);
 };
 
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index a6aefc4..8383002 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -19,7 +19,6 @@
 #include <sstream>
 
 #include <android-base/logging.h>
-#include <android-base/unique_fd.h>
 
 #include "arch/context.h"
 #include "art_method-inl.h"
@@ -38,6 +37,7 @@
 #include "debugger_interface.h"
 #include "dex/dex_file_loader.h"
 #include "dex/method_reference.h"
+#include "entrypoints/entrypoint_utils-inl.h"
 #include "entrypoints/runtime_asm_entrypoints.h"
 #include "gc/accounting/bitmap-inl.h"
 #include "gc/allocator/dlmalloc.h"
@@ -47,6 +47,7 @@
 #include "intern_table.h"
 #include "jit/jit.h"
 #include "jit/profiling_info.h"
+#include "jit/jit_scoped_code_cache_write.h"
 #include "linear_alloc.h"
 #include "oat_file-inl.h"
 #include "oat_quick_method_header.h"
@@ -57,37 +58,12 @@
 #include "thread-current-inl.h"
 #include "thread_list.h"
 
-using android::base::unique_fd;
-
 namespace art {
 namespace jit {
 
 static constexpr size_t kCodeSizeLogThreshold = 50 * KB;
 static constexpr size_t kStackMapSizeLogThreshold = 50 * KB;
 
-// Data cache will be half of the capacity
-// Code cache will be the other half of the capacity.
-// TODO: Make this variable?
-static constexpr size_t kCodeAndDataCapacityDivider = 2;
-
-static constexpr int kProtR = PROT_READ;
-static constexpr int kProtRW = PROT_READ | PROT_WRITE;
-static constexpr int kProtRWX = PROT_READ | PROT_WRITE | PROT_EXEC;
-static constexpr int kProtRX = PROT_READ | PROT_EXEC;
-
-namespace {
-
-// Translate an address belonging to one memory map into an address in a second. This is useful
-// when there are two virtual memory ranges for the same physical memory range.
-template <typename T>
-T* TranslateAddress(T* src_ptr, const MemMap& src, const MemMap& dst) {
-  CHECK(src.HasAddress(src_ptr));
-  uint8_t* const raw_src_ptr = reinterpret_cast<uint8_t*>(src_ptr);
-  return reinterpret_cast<T*>(raw_src_ptr - src.Begin() + dst.Begin());
-}
-
-}  // namespace
-
 class JitCodeCache::JniStubKey {
  public:
   explicit JniStubKey(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_)
@@ -144,6 +120,41 @@
     code_ = code;
   }
 
+  void UpdateEntryPoints(const void* entrypoint) REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(IsCompiled());
+    DCHECK(entrypoint == OatQuickMethodHeader::FromCodePointer(GetCode())->GetEntryPoint());
+    instrumentation::Instrumentation* instrum = Runtime::Current()->GetInstrumentation();
+    for (ArtMethod* m : GetMethods()) {
+      // Because `m` might be in the process of being deleted:
+      // - Call the dedicated method instead of the more generic UpdateMethodsCode
+      // - Check the class status without a full read barrier; use ReadBarrier::IsMarked().
+      bool can_set_entrypoint = true;
+      if (NeedsClinitCheckBeforeCall(m)) {
+        // To avoid resurrecting an unreachable object, we must not use a full read
+        // barrier but we do not want to miss updating an entrypoint under common
+        // circumstances, i.e. during a GC the class becomes visibly initialized,
+        // the method becomes hot, we compile the thunk and want to update the
+        // entrypoint while the method's declaring class field still points to the
+        // from-space class object with the old status. Therefore we read the
+        // declaring class without a read barrier and check if it's already marked.
+        // If yes, we check the status of the to-space class object as intended.
+        // Otherwise, there is no to-space object and the from-space class object
+        // contains the most recent value of the status field; even if this races
+        // with another thread doing a read barrier and updating the status, that's
+        // no different from a race with a thread that just updates the status.
+        // Such race can happen only for the zygote method pre-compilation, as we
+        // otherwise compile only thunks for methods of visibly initialized classes.
+        ObjPtr<mirror::Class> klass = m->GetDeclaringClass<kWithoutReadBarrier>();
+        ObjPtr<mirror::Class> marked = ReadBarrier::IsMarked(klass.Ptr());
+        ObjPtr<mirror::Class> checked_klass = (marked != nullptr) ? marked : klass;
+        can_set_entrypoint = checked_klass->IsVisiblyInitialized();
+      }
+      if (can_set_entrypoint) {
+        instrum->UpdateNativeMethodsCodeToJitCode(m, entrypoint);
+      }
+    }
+  }
+
   const void* GetCode() const {
     return code_;
   }
@@ -189,171 +200,6 @@
   std::vector<ArtMethod*> methods_;
 };
 
-bool JitCodeCache::InitializeMappings(bool rwx_memory_allowed,
-                                      bool is_zygote,
-                                      std::string* error_msg) {
-  ScopedTrace trace(__PRETTY_FUNCTION__);
-
-  const size_t capacity = max_capacity_;
-  const size_t data_capacity = capacity / kCodeAndDataCapacityDivider;
-  const size_t exec_capacity = capacity - data_capacity;
-
-  // File descriptor enabling dual-view mapping of code section.
-  unique_fd mem_fd;
-
-  // Zygote shouldn't create a shared mapping for JIT, so we cannot use dual view
-  // for it.
-  if (!is_zygote) {
-    // Bionic supports memfd_create, but the call may fail on older kernels.
-    mem_fd = unique_fd(art::memfd_create("/jit-cache", /* flags= */ 0));
-    if (mem_fd.get() < 0) {
-      std::ostringstream oss;
-      oss << "Failed to initialize dual view JIT. memfd_create() error: " << strerror(errno);
-      if (!rwx_memory_allowed) {
-        // Without using RWX page permissions, the JIT can not fallback to single mapping as it
-        // requires tranitioning the code pages to RWX for updates.
-        *error_msg = oss.str();
-        return false;
-      }
-      VLOG(jit) << oss.str();
-    }
-  }
-
-  if (mem_fd.get() >= 0 && ftruncate(mem_fd, capacity) != 0) {
-    std::ostringstream oss;
-    oss << "Failed to initialize memory file: " << strerror(errno);
-    *error_msg = oss.str();
-    return false;
-  }
-
-  std::string data_cache_name = is_zygote ? "zygote-data-code-cache" : "data-code-cache";
-  std::string exec_cache_name = is_zygote ? "zygote-jit-code-cache" : "jit-code-cache";
-
-  std::string error_str;
-  // Map name specific for android_os_Debug.cpp accounting.
-  // Map in low 4gb to simplify accessing root tables for x86_64.
-  // We could do PC-relative addressing to avoid this problem, but that
-  // would require reserving code and data area before submitting, which
-  // means more windows for the code memory to be RWX.
-  int base_flags;
-  MemMap data_pages;
-  if (mem_fd.get() >= 0) {
-    // Dual view of JIT code cache case. Create an initial mapping of data pages large enough
-    // for data and non-writable view of JIT code pages. We use the memory file descriptor to
-    // enable dual mapping - we'll create a second mapping using the descriptor below. The
-    // mappings will look like:
-    //
-    //       VA                  PA
-    //
-    //       +---------------+
-    //       | non exec code |\
-    //       +---------------+ \
-    //       :               :\ \
-    //       +---------------+.\.+---------------+
-    //       |  exec code    |  \|     code      |
-    //       +---------------+...+---------------+
-    //       |      data     |   |     data      |
-    //       +---------------+...+---------------+
-    //
-    // In this configuration code updates are written to the non-executable view of the code
-    // cache, and the executable view of the code cache has fixed RX memory protections.
-    //
-    // This memory needs to be mapped shared as the code portions will have two mappings.
-    base_flags = MAP_SHARED;
-    data_pages = MemMap::MapFile(
-        data_capacity + exec_capacity,
-        kProtRW,
-        base_flags,
-        mem_fd,
-        /* start= */ 0,
-        /* low_4gb= */ true,
-        data_cache_name.c_str(),
-        &error_str);
-  } else {
-    // Single view of JIT code cache case. Create an initial mapping of data pages large enough
-    // for data and JIT code pages. The mappings will look like:
-    //
-    //       VA                  PA
-    //
-    //       +---------------+...+---------------+
-    //       |  exec code    |   |     code      |
-    //       +---------------+...+---------------+
-    //       |      data     |   |     data      |
-    //       +---------------+...+---------------+
-    //
-    // In this configuration code updates are written to the executable view of the code cache,
-    // and the executable view of the code cache transitions RX to RWX for the update and then
-    // back to RX after the update.
-    base_flags = MAP_PRIVATE | MAP_ANON;
-    data_pages = MemMap::MapAnonymous(
-        data_cache_name.c_str(),
-        data_capacity + exec_capacity,
-        kProtRW,
-        /* low_4gb= */ true,
-        &error_str);
-  }
-
-  if (!data_pages.IsValid()) {
-    std::ostringstream oss;
-    oss << "Failed to create read write cache: " << error_str << " size=" << capacity;
-    *error_msg = oss.str();
-    return false;
-  }
-
-  MemMap exec_pages;
-  MemMap non_exec_pages;
-  if (exec_capacity > 0) {
-    uint8_t* const divider = data_pages.Begin() + data_capacity;
-    // Set initial permission for executable view to catch any SELinux permission problems early
-    // (for processes that cannot map WX pages). Otherwise, this region does not need to be
-    // executable as there is no code in the cache yet.
-    exec_pages = data_pages.RemapAtEnd(divider,
-                                       exec_cache_name.c_str(),
-                                       kProtRX,
-                                       base_flags | MAP_FIXED,
-                                       mem_fd.get(),
-                                       (mem_fd.get() >= 0) ? data_capacity : 0,
-                                       &error_str);
-    if (!exec_pages.IsValid()) {
-      std::ostringstream oss;
-      oss << "Failed to create read execute code cache: " << error_str << " size=" << capacity;
-      *error_msg = oss.str();
-      return false;
-    }
-
-    if (mem_fd.get() >= 0) {
-      // For dual view, create the secondary view of code memory used for updating code. This view
-      // is never executable.
-      std::string name = exec_cache_name + "-rw";
-      non_exec_pages = MemMap::MapFile(exec_capacity,
-                                       kProtR,
-                                       base_flags,
-                                       mem_fd,
-                                       /* start= */ data_capacity,
-                                       /* low_4GB= */ false,
-                                       name.c_str(),
-                                       &error_str);
-      if (!non_exec_pages.IsValid()) {
-        static const char* kFailedNxView = "Failed to map non-executable view of JIT code cache";
-        if (rwx_memory_allowed) {
-          // Log and continue as single view JIT (requires RWX memory).
-          VLOG(jit) << kFailedNxView;
-        } else {
-          *error_msg = kFailedNxView;
-          return false;
-        }
-      }
-    }
-  } else {
-    // Profiling only. No memory for code required.
-  }
-
-  data_pages_ = std::move(data_pages);
-  exec_pages_ = std::move(exec_pages);
-  non_exec_pages_ = std::move(non_exec_pages);
-  return true;
-}
-
 JitCodeCache* JitCodeCache::Create(bool used_only_for_profile_data,
                                    bool rwx_memory_allowed,
                                    bool is_zygote,
@@ -368,6 +214,7 @@
     }
   }
 
+  size_t initial_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheInitialCapacity();
   // Check whether the provided max capacity in options is below 1GB.
   size_t max_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheMaxCapacity();
   // We need to have 32 bit offsets from method headers in code cache which point to things
@@ -381,23 +228,24 @@
     return nullptr;
   }
 
-  size_t initial_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheInitialCapacity();
-
-  std::unique_ptr<JitCodeCache> jit_code_cache(new JitCodeCache());
-
-  MutexLock mu(Thread::Current(), jit_code_cache->lock_);
-  jit_code_cache->InitializeState(initial_capacity, max_capacity);
-
-  // Zygote should never collect code to share the memory with the children.
-  if (is_zygote) {
-    jit_code_cache->garbage_collect_code_ = false;
-  }
-
-  if (!jit_code_cache->InitializeMappings(rwx_memory_allowed, is_zygote, error_msg)) {
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+  JitMemoryRegion region;
+  if (!region.Initialize(initial_capacity,
+                         max_capacity,
+                         rwx_memory_allowed,
+                         is_zygote,
+                         error_msg)) {
     return nullptr;
   }
 
-  jit_code_cache->InitializeSpaces();
+  std::unique_ptr<JitCodeCache> jit_code_cache(new JitCodeCache());
+  if (is_zygote) {
+    // Zygote should never collect code to share the memory with the children.
+    jit_code_cache->garbage_collect_code_ = false;
+    jit_code_cache->shared_region_ = std::move(region);
+  } else {
+    jit_code_cache->private_region_ = std::move(region);
+  }
 
   VLOG(jit) << "Created jit code cache: initial capacity="
             << PrettySize(initial_capacity)
@@ -408,82 +256,29 @@
 }
 
 JitCodeCache::JitCodeCache()
-    : lock_("Jit code cache", kJitCodeCacheLock),
-      lock_cond_("Jit code cache condition variable", lock_),
+    : is_weak_access_enabled_(true),
+      inline_cache_cond_("Jit inline cache condition variable", *Locks::jit_lock_),
+      zygote_map_(&shared_region_),
+      lock_cond_("Jit code cache condition variable", *Locks::jit_lock_),
       collection_in_progress_(false),
       last_collection_increased_code_cache_(false),
       garbage_collect_code_(true),
-      used_memory_for_data_(0),
-      used_memory_for_code_(0),
       number_of_compilations_(0),
       number_of_osr_compilations_(0),
       number_of_collections_(0),
       histogram_stack_map_memory_use_("Memory used for stack maps", 16),
       histogram_code_memory_use_("Memory used for compiled code", 16),
-      histogram_profiling_info_memory_use_("Memory used for profiling info", 16),
-      is_weak_access_enabled_(true),
-      inline_cache_cond_("Jit inline cache condition variable", lock_),
-      zygote_data_pages_(),
-      zygote_exec_pages_(),
-      zygote_data_mspace_(nullptr),
-      zygote_exec_mspace_(nullptr) {
-}
-
-void JitCodeCache::InitializeState(size_t initial_capacity, size_t max_capacity) {
-  CHECK_GE(max_capacity, initial_capacity);
-  CHECK(max_capacity <= 1 * GB) << "The max supported size for JIT code cache is 1GB";
-  // Align both capacities to page size, as that's the unit mspaces use.
-  initial_capacity = RoundDown(initial_capacity, 2 * kPageSize);
-  max_capacity = RoundDown(max_capacity, 2 * kPageSize);
-
-  used_memory_for_data_ = 0;
-  used_memory_for_code_ = 0;
-  number_of_compilations_ = 0;
-  number_of_osr_compilations_ = 0;
-  number_of_collections_ = 0;
-
-  data_pages_ = MemMap();
-  exec_pages_ = MemMap();
-  non_exec_pages_ = MemMap();
-  initial_capacity_ = initial_capacity;
-  max_capacity_ = max_capacity;
-  current_capacity_ = initial_capacity,
-  data_end_ = initial_capacity / kCodeAndDataCapacityDivider;
-  exec_end_ = initial_capacity - data_end_;
-}
-
-void JitCodeCache::InitializeSpaces() {
-  // Initialize the data heap
-  data_mspace_ = create_mspace_with_base(data_pages_.Begin(), data_end_, false /*locked*/);
-  CHECK(data_mspace_ != nullptr) << "create_mspace_with_base (data) failed";
-
-  // Initialize the code heap
-  MemMap* code_heap = nullptr;
-  if (non_exec_pages_.IsValid()) {
-    code_heap = &non_exec_pages_;
-  } else if (exec_pages_.IsValid()) {
-    code_heap = &exec_pages_;
-  }
-  if (code_heap != nullptr) {
-    // Make all pages reserved for the code heap writable. The mspace allocator, that manages the
-    // heap, will take and initialize pages in create_mspace_with_base().
-    CheckedCall(mprotect, "create code heap", code_heap->Begin(), code_heap->Size(), kProtRW);
-    exec_mspace_ = create_mspace_with_base(code_heap->Begin(), exec_end_, false /*locked*/);
-    CHECK(exec_mspace_ != nullptr) << "create_mspace_with_base (exec) failed";
-    SetFootprintLimit(initial_capacity_);
-    // Protect pages containing heap metadata. Updates to the code heap toggle write permission to
-    // perform the update and there are no other times write access is required.
-    CheckedCall(mprotect, "protect code heap", code_heap->Begin(), code_heap->Size(), kProtR);
-  } else {
-    exec_mspace_ = nullptr;
-    SetFootprintLimit(initial_capacity_);
-  }
+      histogram_profiling_info_memory_use_("Memory used for profiling info", 16) {
 }
 
 JitCodeCache::~JitCodeCache() {}
 
+bool JitCodeCache::PrivateRegionContainsPc(const void* ptr) const {
+  return private_region_.IsInExecSpace(ptr);
+}
+
 bool JitCodeCache::ContainsPc(const void* ptr) const {
-  return exec_pages_.HasAddress(ptr) || zygote_exec_pages_.HasAddress(ptr);
+  return PrivateRegionContainsPc(ptr) || shared_region_.IsInExecSpace(ptr);
 }
 
 bool JitCodeCache::WillExecuteJitCode(ArtMethod* method) {
@@ -498,7 +293,7 @@
 }
 
 bool JitCodeCache::ContainsMethod(ArtMethod* method) {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   if (UNLIKELY(method->IsNative())) {
     auto it = jni_stubs_map_.find(JniStubKey(method));
     if (it != jni_stubs_map_.end() &&
@@ -512,13 +307,16 @@
         return true;
       }
     }
+    if (zygote_map_.ContainsMethod(method)) {
+      return true;
+    }
   }
   return false;
 }
 
 const void* JitCodeCache::GetJniStubCode(ArtMethod* method) {
   DCHECK(method->IsNative());
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   auto it = jni_stubs_map_.find(JniStubKey(method));
   if (it != jni_stubs_map_.end()) {
     JniStubData& data = it->second;
@@ -545,97 +343,26 @@
   return info->GetSavedEntryPoint();
 }
 
-const void* JitCodeCache::GetZygoteSavedEntryPoint(ArtMethod* method) {
-  if (Runtime::Current()->IsUsingApexBootImageLocation() &&
-      // Currently only applies to boot classpath
-      method->GetDeclaringClass()->GetClassLoader() == nullptr) {
-    const void* entry_point = nullptr;
-    if (method->IsNative()) {
-      const void* code_ptr = GetJniStubCode(method);
-      if (code_ptr != nullptr) {
-        entry_point = OatQuickMethodHeader::FromCodePointer(code_ptr)->GetEntryPoint();
-      }
+const void* JitCodeCache::GetSavedEntryPointOfPreCompiledMethod(ArtMethod* method) {
+  if (method->IsPreCompiled()) {
+    const void* code_ptr = nullptr;
+    if (method->GetDeclaringClass()->GetClassLoader() == nullptr) {
+      code_ptr = zygote_map_.GetCodeFor(method);
     } else {
-      ProfilingInfo* profiling_info = method->GetProfilingInfo(kRuntimePointerSize);
-      if (profiling_info != nullptr) {
-        entry_point = profiling_info->GetSavedEntryPoint();
+      MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+      auto it = saved_compiled_methods_map_.find(method);
+      if (it != saved_compiled_methods_map_.end()) {
+        code_ptr = it->second;
       }
     }
-    if (Runtime::Current()->IsZygote() || IsInZygoteExecSpace(entry_point)) {
-      return entry_point;
+    if (code_ptr != nullptr) {
+      OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+      return method_header->GetEntryPoint();
     }
   }
   return nullptr;
 }
 
-class ScopedCodeCacheWrite : ScopedTrace {
- public:
-  explicit ScopedCodeCacheWrite(const JitCodeCache* const code_cache)
-      : ScopedTrace("ScopedCodeCacheWrite"),
-        code_cache_(code_cache) {
-    ScopedTrace trace("mprotect all");
-    const MemMap* const updatable_pages = code_cache_->GetUpdatableCodeMapping();
-    if (updatable_pages != nullptr) {
-      int prot = code_cache_->HasDualCodeMapping() ? kProtRW : kProtRWX;
-      CheckedCall(mprotect, "Cache +W", updatable_pages->Begin(), updatable_pages->Size(), prot);
-    }
-  }
-
-  ~ScopedCodeCacheWrite() {
-    ScopedTrace trace("mprotect code");
-    const MemMap* const updatable_pages = code_cache_->GetUpdatableCodeMapping();
-    if (updatable_pages != nullptr) {
-      int prot = code_cache_->HasDualCodeMapping() ? kProtR : kProtRX;
-      CheckedCall(mprotect, "Cache -W", updatable_pages->Begin(), updatable_pages->Size(), prot);
-    }
-  }
-
- private:
-  const JitCodeCache* const code_cache_;
-
-  DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite);
-};
-
-uint8_t* JitCodeCache::CommitCode(Thread* self,
-                                  ArtMethod* method,
-                                  uint8_t* stack_map,
-                                  uint8_t* roots_data,
-                                  const uint8_t* code,
-                                  size_t code_size,
-                                  size_t data_size,
-                                  bool osr,
-                                  const std::vector<Handle<mirror::Object>>& roots,
-                                  bool has_should_deoptimize_flag,
-                                  const ArenaSet<ArtMethod*>& cha_single_implementation_list) {
-  uint8_t* result = CommitCodeInternal(self,
-                                       method,
-                                       stack_map,
-                                       roots_data,
-                                       code,
-                                       code_size,
-                                       data_size,
-                                       osr,
-                                       roots,
-                                       has_should_deoptimize_flag,
-                                       cha_single_implementation_list);
-  if (result == nullptr) {
-    // Retry.
-    GarbageCollectCache(self);
-    result = CommitCodeInternal(self,
-                                method,
-                                stack_map,
-                                roots_data,
-                                code,
-                                code_size,
-                                data_size,
-                                osr,
-                                roots,
-                                has_should_deoptimize_flag,
-                                cha_single_implementation_list);
-  }
-  return result;
-}
-
 bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) {
   bool in_collection = false;
   while (collection_in_progress_) {
@@ -645,43 +372,19 @@
   return in_collection;
 }
 
-static size_t GetJitCodeAlignment() {
-  if (kRuntimeISA == InstructionSet::kArm || kRuntimeISA == InstructionSet::kThumb2) {
-    // Some devices with 32-bit ARM kernels need additional JIT code alignment when using dual
-    // view JIT (b/132205399). The alignment returned here coincides with the typical ARM d-cache
-    // line (though the value should be probed ideally). Both the method header and code in the
-    // cache are aligned to this size. Anything less than 64-bytes exhibits the problem.
-    return 64;
-  }
-  return GetInstructionSetAlignment(kRuntimeISA);
-}
-
 static uintptr_t FromCodeToAllocation(const void* code) {
-  size_t alignment = GetJitCodeAlignment();
+  size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
   return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment);
 }
 
-static uint32_t ComputeRootTableSize(uint32_t number_of_roots) {
-  return sizeof(uint32_t) + number_of_roots * sizeof(GcRoot<mirror::Object>);
-}
-
 static uint32_t GetNumberOfRoots(const uint8_t* stack_map) {
   // The length of the table is stored just before the stack map (and therefore at the end of
   // the table itself), in order to be able to fetch it from a `stack_map` pointer.
   return reinterpret_cast<const uint32_t*>(stack_map)[-1];
 }
 
-static void FillRootTableLength(uint8_t* roots_data, uint32_t length) {
-  // Store the length of the table at the end. This will allow fetching it from a `stack_map`
-  // pointer.
-  reinterpret_cast<uint32_t*>(roots_data)[length] = length;
-}
-
-static const uint8_t* FromStackMapToRoots(const uint8_t* stack_map_data) {
-  return stack_map_data - ComputeRootTableSize(GetNumberOfRoots(stack_map_data));
-}
-
-static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots)
+static void DCheckRootsAreValid(const std::vector<Handle<mirror::Object>>& roots,
+                                bool is_shared_region)
     REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
   if (!kIsDebugBuild) {
     return;
@@ -694,21 +397,14 @@
       ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
       CHECK(class_linker->GetInternTable()->LookupStrong(Thread::Current(), str) != nullptr);
     }
+    // Ensure that we don't put movable objects in the shared region.
+    if (is_shared_region) {
+      CHECK(!Runtime::Current()->GetHeap()->IsMovableObject(object.Get()));
+    }
   }
 }
 
-void JitCodeCache::FillRootTable(uint8_t* roots_data,
-                                 const std::vector<Handle<mirror::Object>>& roots) {
-  GcRoot<mirror::Object>* gc_roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data);
-  const uint32_t length = roots.size();
-  // Put all roots in `roots_data`.
-  for (uint32_t i = 0; i < length; ++i) {
-    ObjPtr<mirror::Object> object = roots[i].Get();
-    gc_roots[i] = GcRoot<mirror::Object>(object);
-  }
-}
-
-static uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = nullptr) {
+static const uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = nullptr) {
   OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
   uint8_t* data = method_header->GetOptimizedCodeInfoPtr();
   uint32_t roots = GetNumberOfRoots(data);
@@ -718,49 +414,19 @@
   return data - ComputeRootTableSize(roots);
 }
 
-// Use a sentinel for marking entries in the JIT table that have been cleared.
-// This helps diagnosing in case the compiled code tries to wrongly access such
-// entries.
-static mirror::Class* const weak_sentinel =
-    reinterpret_cast<mirror::Class*>(Context::kBadGprBase + 0xff);
-
-// Helper for the GC to process a weak class in a JIT root table.
-static inline void ProcessWeakClass(GcRoot<mirror::Class>* root_ptr,
-                                    IsMarkedVisitor* visitor,
-                                    mirror::Class* update)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  // This does not need a read barrier because this is called by GC.
-  mirror::Class* cls = root_ptr->Read<kWithoutReadBarrier>();
-  if (cls != nullptr && cls != weak_sentinel) {
-    DCHECK((cls->IsClass<kDefaultVerifyFlags>()));
-    // Look at the classloader of the class to know if it has been unloaded.
-    // This does not need a read barrier because this is called by GC.
-    ObjPtr<mirror::Object> class_loader =
-        cls->GetClassLoader<kDefaultVerifyFlags, kWithoutReadBarrier>();
-    if (class_loader == nullptr || visitor->IsMarked(class_loader.Ptr()) != nullptr) {
-      // The class loader is live, update the entry if the class has moved.
-      mirror::Class* new_cls = down_cast<mirror::Class*>(visitor->IsMarked(cls));
-      // Note that new_object can be null for CMS and newly allocated objects.
-      if (new_cls != nullptr && new_cls != cls) {
-        *root_ptr = GcRoot<mirror::Class>(new_cls);
-      }
-    } else {
-      // The class loader is not live, clear the entry.
-      *root_ptr = GcRoot<mirror::Class>(update);
-    }
-  }
-}
-
 void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   for (const auto& entry : method_code_map_) {
     uint32_t number_of_roots = 0;
-    uint8_t* roots_data = GetRootTable(entry.first, &number_of_roots);
+    const uint8_t* root_table = GetRootTable(entry.first, &number_of_roots);
+    uint8_t* roots_data = private_region_.IsInDataSpace(root_table)
+        ? private_region_.GetWritableDataAddress(root_table)
+        : shared_region_.GetWritableDataAddress(root_table);
     GcRoot<mirror::Object>* roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data);
     for (uint32_t i = 0; i < number_of_roots; ++i) {
       // This does not need a read barrier because this is called by GC.
       mirror::Object* object = roots[i].Read<kWithoutReadBarrier>();
-      if (object == nullptr || object == weak_sentinel) {
+      if (object == nullptr || object == Runtime::GetWeakClassSentinel()) {
         // entry got deleted in a previous sweep.
       } else if (object->IsString<kDefaultVerifyFlags>()) {
         mirror::Object* new_object = visitor->IsMarked(object);
@@ -775,8 +441,10 @@
           roots[i] = GcRoot<mirror::Object>(new_object);
         }
       } else {
-        ProcessWeakClass(
-            reinterpret_cast<GcRoot<mirror::Class>*>(&roots[i]), visitor, weak_sentinel);
+        Runtime::ProcessWeakClass(
+            reinterpret_cast<GcRoot<mirror::Class>*>(&roots[i]),
+            visitor,
+            Runtime::GetWeakClassSentinel());
       }
     }
   }
@@ -785,31 +453,28 @@
     for (size_t i = 0; i < info->number_of_inline_caches_; ++i) {
       InlineCache* cache = &info->cache_[i];
       for (size_t j = 0; j < InlineCache::kIndividualCacheSize; ++j) {
-        ProcessWeakClass(&cache->classes_[j], visitor, nullptr);
+        Runtime::ProcessWeakClass(&cache->classes_[j], visitor, nullptr);
       }
     }
   }
 }
 
-void JitCodeCache::FreeCodeAndData(const void* code_ptr) {
+void JitCodeCache::FreeCodeAndData(const void* code_ptr, bool free_debug_info) {
   if (IsInZygoteExecSpace(code_ptr)) {
     // No need to free, this is shared memory.
     return;
   }
   uintptr_t allocation = FromCodeToAllocation(code_ptr);
-  // Notify native debugger that we are about to remove the code.
-  // It does nothing if we are not using native debugger.
-  RemoveNativeDebugInfoForJit(Thread::Current(), code_ptr);
+  if (free_debug_info) {
+    // Remove compressed mini-debug info for the method.
+    // TODO: This is expensive, so we should always do it in the caller in bulk.
+    RemoveNativeDebugInfoForJit(ArrayRef<const void*>(&code_ptr, 1));
+  }
   if (OatQuickMethodHeader::FromCodePointer(code_ptr)->IsOptimized()) {
-    FreeData(GetRootTable(code_ptr));
+    private_region_.FreeData(GetRootTable(code_ptr));
   }  // else this is a JNI stub without any data.
 
-  uint8_t* code_allocation = reinterpret_cast<uint8_t*>(allocation);
-  if (HasDualCodeMapping()) {
-    code_allocation = TranslateAddress(code_allocation, exec_pages_, non_exec_pages_);
-  }
-
-  FreeCode(code_allocation);
+  private_region_.FreeCode(reinterpret_cast<uint8_t*>(allocation));
 }
 
 void JitCodeCache::FreeAllMethodHeaders(
@@ -818,16 +483,25 @@
   // first since once we do FreeCode() below, the memory can be reused
   // so it's possible for the same method_header to start representing
   // different compile code.
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   {
     MutexLock mu2(Thread::Current(), *Locks::cha_lock_);
     Runtime::Current()->GetClassLinker()->GetClassHierarchyAnalysis()
         ->RemoveDependentsWithMethodHeaders(method_headers);
   }
 
-  ScopedCodeCacheWrite scc(this);
+  // Remove compressed mini-debug info for the methods.
+  std::vector<const void*> removed_symbols;
+  removed_symbols.reserve(method_headers.size());
   for (const OatQuickMethodHeader* method_header : method_headers) {
-    FreeCodeAndData(method_header->GetCode());
+    removed_symbols.push_back(method_header->GetCode());
+  }
+  std::sort(removed_symbols.begin(), removed_symbols.end());
+  RemoveNativeDebugInfoForJit(ArrayRef<const void*>(removed_symbols));
+
+  ScopedCodeCacheWrite scc(private_region_);
+  for (const OatQuickMethodHeader* method_header : method_headers) {
+    FreeCodeAndData(method_header->GetCode(), /*free_debug_info=*/ false);
   }
 }
 
@@ -839,12 +513,11 @@
   // the CHA dependency map just once with an unordered_set.
   std::unordered_set<OatQuickMethodHeader*> method_headers;
   {
-    MutexLock mu(self, lock_);
+    MutexLock mu(self, *Locks::jit_lock_);
     // We do not check if a code cache GC is in progress, as this method comes
     // with the classlinker_classes_lock_ held, and suspending ourselves could
     // lead to a deadlock.
     {
-      ScopedCodeCacheWrite scc(this);
       for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) {
         it->second.RemoveMethodsIn(alloc);
         if (it->second.GetMethods().empty()) {
@@ -877,7 +550,7 @@
       ProfilingInfo* info = *it;
       if (alloc.ContainsUnsafe(info->GetMethod())) {
         info->GetMethod()->SetProfilingInfo(nullptr);
-        FreeData(reinterpret_cast<uint8_t*>(info));
+        private_region_.FreeWritableData(reinterpret_cast<uint8_t*>(info));
         it = profiling_infos_.erase(it);
       } else {
         ++it;
@@ -898,7 +571,7 @@
     return;
   }
   ScopedThreadSuspension sts(self, kWaitingWeakGcRootRead);
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *Locks::jit_lock_);
   while (!IsWeakAccessEnabled(self)) {
     inline_cache_cond_.Wait(self);
   }
@@ -906,7 +579,7 @@
 
 void JitCodeCache::BroadcastForInlineCacheAccess() {
   Thread* self = Thread::Current();
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *Locks::jit_lock_);
   inline_cache_cond_.Broadcast(self);
 }
 
@@ -951,141 +624,56 @@
 
 void JitCodeCache::WaitForPotentialCollectionToCompleteRunnable(Thread* self) {
   while (collection_in_progress_) {
-    lock_.Unlock(self);
+    Locks::jit_lock_->Unlock(self);
     {
       ScopedThreadSuspension sts(self, kSuspended);
-      MutexLock mu(self, lock_);
+      MutexLock mu(self, *Locks::jit_lock_);
       WaitForPotentialCollectionToComplete(self);
     }
-    lock_.Lock(self);
+    Locks::jit_lock_->Lock(self);
   }
 }
 
-const MemMap* JitCodeCache::GetUpdatableCodeMapping() const {
-  if (HasDualCodeMapping()) {
-    return &non_exec_pages_;
-  } else if (HasCodeMapping()) {
-    return &exec_pages_;
-  } else {
-    return nullptr;
-  }
-}
-
-uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
-                                          ArtMethod* method,
-                                          uint8_t* stack_map,
-                                          uint8_t* roots_data,
-                                          const uint8_t* code,
-                                          size_t code_size,
-                                          size_t data_size,
-                                          bool osr,
-                                          const std::vector<Handle<mirror::Object>>& roots,
-                                          bool has_should_deoptimize_flag,
-                                          const ArenaSet<ArtMethod*>&
-                                              cha_single_implementation_list) {
+bool JitCodeCache::Commit(Thread* self,
+                          JitMemoryRegion* region,
+                          ArtMethod* method,
+                          ArrayRef<const uint8_t> reserved_code,
+                          ArrayRef<const uint8_t> code,
+                          ArrayRef<const uint8_t> reserved_data,
+                          const std::vector<Handle<mirror::Object>>& roots,
+                          ArrayRef<const uint8_t> stack_map,
+                          bool osr,
+                          bool has_should_deoptimize_flag,
+                          const ArenaSet<ArtMethod*>& cha_single_implementation_list) {
   DCHECK(!method->IsNative() || !osr);
 
   if (!method->IsNative()) {
     // We need to do this before grabbing the lock_ because it needs to be able to see the string
     // InternTable. Native methods do not have roots.
-    DCheckRootsAreValid(roots);
+    DCheckRootsAreValid(roots, IsSharedRegion(*region));
   }
 
-  OatQuickMethodHeader* method_header = nullptr;
-  uint8_t* nox_memory = nullptr;
-  uint8_t* code_ptr = nullptr;
+  const uint8_t* roots_data = reserved_data.data();
+  size_t root_table_size = ComputeRootTableSize(roots.size());
+  const uint8_t* stack_map_data = roots_data + root_table_size;
 
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *Locks::jit_lock_);
   // We need to make sure that there will be no jit-gcs going on and wait for any ongoing one to
   // finish.
   WaitForPotentialCollectionToCompleteRunnable(self);
-  {
-    ScopedCodeCacheWrite scc(this);
-
-    size_t alignment = GetJitCodeAlignment();
-    // Ensure the header ends up at expected instruction alignment.
-    size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
-    size_t total_size = header_size + code_size;
-
-    // AllocateCode allocates memory in non-executable region for alignment header and code. The
-    // header size may include alignment padding.
-    nox_memory = AllocateCode(total_size);
-    if (nox_memory == nullptr) {
-      return nullptr;
-    }
-
-    // code_ptr points to non-executable code.
-    code_ptr = nox_memory + header_size;
-    std::copy(code, code + code_size, code_ptr);
-    method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
-
-    // From here code_ptr points to executable code.
-    if (HasDualCodeMapping()) {
-      code_ptr = TranslateAddress(code_ptr, non_exec_pages_, exec_pages_);
-    }
-
-    new (method_header) OatQuickMethodHeader(
-        (stack_map != nullptr) ? code_ptr - stack_map : 0u,
-        code_size);
-
-    DCHECK(!Runtime::Current()->IsAotCompiler());
-    if (has_should_deoptimize_flag) {
-      method_header->SetHasShouldDeoptimizeFlag();
-    }
-
-    // Update method_header pointer to executable code region.
-    if (HasDualCodeMapping()) {
-      method_header = TranslateAddress(method_header, non_exec_pages_, exec_pages_);
-    }
-
-    // Both instruction and data caches need flushing to the point of unification where both share
-    // a common view of memory. Flushing the data cache ensures the dirty cachelines from the
-    // newly added code are written out to the point of unification. Flushing the instruction
-    // cache ensures the newly written code will be fetched from the point of unification before
-    // use. Memory in the code cache is re-cycled as code is added and removed. The flushes
-    // prevent stale code from residing in the instruction cache.
-    //
-    // Caches are flushed before write permission is removed because some ARMv8 Qualcomm kernels
-    // may trigger a segfault if a page fault occurs when requesting a cache maintenance
-    // operation. This is a kernel bug that we need to work around until affected devices
-    // (e.g. Nexus 5X and 6P) stop being supported or their kernels are fixed.
-    //
-    // For reference, this behavior is caused by this commit:
-    // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
-    //
-    bool cache_flush_success = true;
-    if (HasDualCodeMapping()) {
-      // Flush the data cache lines associated with the non-executable copy of the code just added.
-      cache_flush_success = FlushCpuCaches(nox_memory, nox_memory + total_size);
-    }
-
-    // Invalidate i-cache for the executable mapping.
-    if (cache_flush_success) {
-      uint8_t* x_memory = reinterpret_cast<uint8_t*>(FromCodeToAllocation(code_ptr));
-      cache_flush_success = FlushCpuCaches(x_memory, x_memory + total_size);
-    }
-
-    // If flushing the cache has failed, reject the allocation because we can't guarantee
-    // correctness of the instructions present in the processor caches.
-    if (!cache_flush_success) {
-      PLOG(ERROR) << "Cache flush failed for JIT code, code not committed.";
-      FreeCode(nox_memory);
-      return nullptr;
-    }
-
-    // Ensure CPU instruction pipelines are flushed for all cores. This is necessary for
-    // correctness as code may still be in instruction pipelines despite the i-cache flush. It is
-    // not safe to assume that changing permissions with mprotect (RX->RWX->RX) will cause a TLB
-    // shootdown (incidentally invalidating the CPU pipelines by sending an IPI to all cores to
-    // notify them of the TLB invalidation). Some architectures, notably ARM and ARM64, have
-    // hardware support that broadcasts TLB invalidations and so their kernels have no software
-    // based TLB shootdown. The sync-core flavor of membarrier was introduced in Linux 4.16 to
-    // address this (see mbarrier(2)). The membarrier here will fail on prior kernels and on
-    // platforms lacking the appropriate support.
-    art::membarrier(art::MembarrierCommand::kPrivateExpeditedSyncCore);
-
-    number_of_compilations_++;
+  const uint8_t* code_ptr = region->CommitCode(
+      reserved_code, code, stack_map_data, has_should_deoptimize_flag);
+  if (code_ptr == nullptr) {
+    return false;
   }
+  OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+
+  // Commit roots and stack maps before updating the entry point.
+  if (!region->CommitData(reserved_data, roots, stack_map)) {
+    return false;
+  }
+
+  number_of_compilations_++;
 
   // We need to update the entry point in the runnable state for the instrumentation.
   {
@@ -1106,9 +694,9 @@
     }
 
     // Discard the code if any single-implementation assumptions are now invalid.
-    if (!single_impl_still_valid) {
+    if (UNLIKELY(!single_impl_still_valid)) {
       VLOG(jit) << "JIT discarded jitted code due to invalid single-implementation assumptions.";
-      return nullptr;
+      return false;
     }
     DCHECK(cha_single_implementation_list.empty() || !Runtime::Current()->IsJavaDebuggable())
         << "Should not be using cha on debuggable apps/runs!";
@@ -1126,47 +714,36 @@
       DCHECK(ContainsElement(data->GetMethods(), method))
           << "Entry inserted in NotifyCompilationOf() should contain this method.";
       data->SetCode(code_ptr);
-      instrumentation::Instrumentation* instrum = Runtime::Current()->GetInstrumentation();
-      for (ArtMethod* m : data->GetMethods()) {
-        if (!class_linker->IsQuickResolutionStub(m->GetEntryPointFromQuickCompiledCode())) {
-          instrum->UpdateMethodsCode(m, method_header->GetEntryPoint());
-        }
-      }
+      data->UpdateEntryPoints(method_header->GetEntryPoint());
     } else {
-      // Fill the root table before updating the entry point.
-      DCHECK_EQ(FromStackMapToRoots(stack_map), roots_data);
-      DCHECK_LE(roots_data, stack_map);
-      FillRootTable(roots_data, roots);
-      {
-        // Flush data cache, as compiled code references literals in it.
-        // TODO(oth): establish whether this is necessary.
-        if (!FlushCpuCaches(roots_data, roots_data + data_size)) {
-          PLOG(ERROR) << "Cache flush failed for JIT data, code not committed.";
-          ScopedCodeCacheWrite scc(this);
-          FreeCode(nox_memory);
-          return nullptr;
-        }
+      if (method->IsPreCompiled() && IsSharedRegion(*region)) {
+        zygote_map_.Put(code_ptr, method);
+      } else {
+        method_code_map_.Put(code_ptr, method);
       }
-      method_code_map_.Put(code_ptr, method);
       if (osr) {
         number_of_osr_compilations_++;
         osr_code_map_.Put(method, code_ptr);
-      } else if (class_linker->IsQuickResolutionStub(
-          method->GetEntryPointFromQuickCompiledCode())) {
+      } else if (NeedsClinitCheckBeforeCall(method) &&
+                 !method->GetDeclaringClass()->IsVisiblyInitialized()) {
         // This situation currently only occurs in the jit-zygote mode.
-        DCHECK(Runtime::Current()->IsZygote());
-        DCHECK(Runtime::Current()->IsUsingApexBootImageLocation());
-        DCHECK(method->GetProfilingInfo(kRuntimePointerSize) != nullptr);
-        DCHECK(method->GetDeclaringClass()->GetClassLoader() == nullptr);
-        // Save the entrypoint, so it can be fethed later once the class is
-        // initialized.
-        method->GetProfilingInfo(kRuntimePointerSize)->SetSavedEntryPoint(
-            method_header->GetEntryPoint());
+        DCHECK(!garbage_collect_code_);
+        DCHECK(method->IsPreCompiled());
+        // The shared region can easily be queried. For the private region, we
+        // use a side map.
+        if (!IsSharedRegion(*region)) {
+          saved_compiled_methods_map_.Put(method, code_ptr);
+        }
       } else {
         Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
             method, method_header->GetEntryPoint());
       }
     }
+    if (collection_in_progress_) {
+      // We need to update the live bitmap if there is a GC to ensure it sees this new
+      // code.
+      GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr));
+    }
     VLOG(jit)
         << "JIT added (osr=" << std::boolalpha << osr << std::noboolalpha << ") "
         << ArtMethod::PrettyMethod(method) << "@" << method
@@ -1175,20 +752,13 @@
         << reinterpret_cast<const void*>(method_header->GetEntryPoint()) << ","
         << reinterpret_cast<const void*>(method_header->GetEntryPoint() +
                                          method_header->GetCodeSize());
-    histogram_code_memory_use_.AddValue(code_size);
-    if (code_size > kCodeSizeLogThreshold) {
-      LOG(INFO) << "JIT allocated "
-                << PrettySize(code_size)
-                << " for compiled code of "
-                << ArtMethod::PrettyMethod(method);
-    }
   }
 
-  return reinterpret_cast<uint8_t*>(method_header);
+  return true;
 }
 
 size_t JitCodeCache::CodeCacheSize() {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   return CodeCacheSizeLocked();
 }
 
@@ -1196,7 +766,7 @@
   // This function is used only for testing and only with non-native methods.
   CHECK(!method->IsNative());
 
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
 
   bool osr = osr_code_map_.find(method) != osr_code_map_.end();
   bool in_cache = RemoveMethodLocked(method, release_memory);
@@ -1226,7 +796,7 @@
   }
 
   bool in_cache = false;
-  ScopedCodeCacheWrite ccw(this);
+  ScopedCodeCacheWrite ccw(private_region_);
   if (UNLIKELY(method->IsNative())) {
     auto it = jni_stubs_map_.find(JniStubKey(method));
     if (it != jni_stubs_map_.end() && it->second.RemoveMethod(method)) {
@@ -1266,7 +836,7 @@
 // any cached information it has on the method. All threads must be suspended before calling this
 // method. The compiled code for the method (if there is any) must not be in any threads call stack.
 void JitCodeCache::NotifyMethodRedefined(ArtMethod* method) {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   RemoveMethodLocked(method, /* release_memory= */ true);
 }
 
@@ -1277,7 +847,7 @@
 // shouldn't be used since it is no longer logically in the jit code cache.
 // TODO We should add DCHECKS that validate that the JIT is paused when this method is entered.
 void JitCodeCache::MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method) {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   if (old_method->IsNative()) {
     // Update methods in jni_stubs_map_.
     for (auto& entry : jni_stubs_map_) {
@@ -1313,88 +883,112 @@
   }
 }
 
-void JitCodeCache::ClearEntryPointsInZygoteExecSpace() {
-  MutexLock mu(Thread::Current(), lock_);
-  // Iterate over profiling infos to know which methods may have been JITted. Note that
-  // to be JITted, a method must have a profiling info.
-  for (ProfilingInfo* info : profiling_infos_) {
-    ArtMethod* method = info->GetMethod();
-    if (IsInZygoteExecSpace(method->GetEntryPointFromQuickCompiledCode())) {
-      method->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
+void JitCodeCache::TransitionToDebuggable() {
+  // Check that none of our methods have an entrypoint in the zygote exec
+  // space (this should be taken care of by
+  // ClassLinker::UpdateEntryPointsClassVisitor.
+  {
+    MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+    if (kIsDebugBuild) {
+      for (const auto& it : method_code_map_) {
+        ArtMethod* method = it.second;
+        DCHECK(!method->IsPreCompiled());
+        DCHECK(!IsInZygoteExecSpace(method->GetEntryPointFromQuickCompiledCode()));
+      }
     }
-    // If zygote does method tracing, or in some configuration where
-    // the JIT zygote does GC, we also need to clear the saved entry point
-    // in the profiling info.
-    if (IsInZygoteExecSpace(info->GetSavedEntryPoint())) {
-      info->SetSavedEntryPoint(nullptr);
+    // Not strictly necessary, but this map is useless now.
+    saved_compiled_methods_map_.clear();
+  }
+  if (kIsDebugBuild) {
+    for (const auto& entry : zygote_map_) {
+      ArtMethod* method = entry.method;
+      if (method != nullptr) {
+        DCHECK(!method->IsPreCompiled());
+        DCHECK(!IsInZygoteExecSpace(method->GetEntryPointFromQuickCompiledCode()));
+      }
     }
   }
 }
 
 size_t JitCodeCache::CodeCacheSizeLocked() {
-  return used_memory_for_code_;
+  return GetCurrentRegion()->GetUsedMemoryForCode();
 }
 
 size_t JitCodeCache::DataCacheSize() {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   return DataCacheSizeLocked();
 }
 
 size_t JitCodeCache::DataCacheSizeLocked() {
-  return used_memory_for_data_;
+  return GetCurrentRegion()->GetUsedMemoryForData();
 }
 
-void JitCodeCache::ClearData(Thread* self,
-                             uint8_t* stack_map_data,
-                             uint8_t* roots_data) {
-  DCHECK_EQ(FromStackMapToRoots(stack_map_data), roots_data);
-  MutexLock mu(self, lock_);
-  FreeData(reinterpret_cast<uint8_t*>(roots_data));
-}
+bool JitCodeCache::Reserve(Thread* self,
+                           JitMemoryRegion* region,
+                           size_t code_size,
+                           size_t stack_map_size,
+                           size_t number_of_roots,
+                           ArtMethod* method,
+                           /*out*/ArrayRef<const uint8_t>* reserved_code,
+                           /*out*/ArrayRef<const uint8_t>* reserved_data) {
+  code_size = OatQuickMethodHeader::InstructionAlignedSize() + code_size;
+  size_t data_size = RoundUp(ComputeRootTableSize(number_of_roots) + stack_map_size, sizeof(void*));
 
-size_t JitCodeCache::ReserveData(Thread* self,
-                                 size_t stack_map_size,
-                                 size_t number_of_roots,
-                                 ArtMethod* method,
-                                 uint8_t** stack_map_data,
-                                 uint8_t** roots_data) {
-  size_t table_size = ComputeRootTableSize(number_of_roots);
-  size_t size = RoundUp(stack_map_size + table_size, sizeof(void*));
-  uint8_t* result = nullptr;
-
-  {
-    ScopedThreadSuspension sts(self, kSuspended);
-    MutexLock mu(self, lock_);
-    WaitForPotentialCollectionToComplete(self);
-    result = AllocateData(size);
+  const uint8_t* code;
+  const uint8_t* data;
+  // We might need to try the allocation twice (with GC in between to free up memory).
+  for (int i = 0; i < 2; i++) {
+    {
+      ScopedThreadSuspension sts(self, kSuspended);
+      MutexLock mu(self, *Locks::jit_lock_);
+      WaitForPotentialCollectionToComplete(self);
+      ScopedCodeCacheWrite ccw(*region);
+      code = region->AllocateCode(code_size);
+      data = region->AllocateData(data_size);
+    }
+    if (code == nullptr || data == nullptr) {
+      Free(self, region, code, data);
+      if (i == 0) {
+        GarbageCollectCache(self);
+        continue;  // Retry after GC.
+      } else {
+        return false;  // Fail.
+      }
+    }
+    break;  // Success.
   }
+  *reserved_code = ArrayRef<const uint8_t>(code, code_size);
+  *reserved_data = ArrayRef<const uint8_t>(data, data_size);
 
-  if (result == nullptr) {
-    // Retry.
-    GarbageCollectCache(self);
-    ScopedThreadSuspension sts(self, kSuspended);
-    MutexLock mu(self, lock_);
-    WaitForPotentialCollectionToComplete(self);
-    result = AllocateData(size);
-  }
-
-  MutexLock mu(self, lock_);
-  histogram_stack_map_memory_use_.AddValue(size);
-  if (size > kStackMapSizeLogThreshold) {
+  MutexLock mu(self, *Locks::jit_lock_);
+  histogram_code_memory_use_.AddValue(code_size);
+  if (code_size > kCodeSizeLogThreshold) {
     LOG(INFO) << "JIT allocated "
-              << PrettySize(size)
+              << PrettySize(code_size)
+              << " for compiled code of "
+              << ArtMethod::PrettyMethod(method);
+  }
+  histogram_stack_map_memory_use_.AddValue(data_size);
+  if (data_size > kStackMapSizeLogThreshold) {
+    LOG(INFO) << "JIT allocated "
+              << PrettySize(data_size)
               << " for stack maps of "
               << ArtMethod::PrettyMethod(method);
   }
-  if (result != nullptr) {
-    *roots_data = result;
-    *stack_map_data = result + table_size;
-    FillRootTableLength(*roots_data, number_of_roots);
-    return size;
-  } else {
-    *roots_data = nullptr;
-    *stack_map_data = nullptr;
-    return 0;
+  return true;
+}
+
+void JitCodeCache::Free(Thread* self,
+                        JitMemoryRegion* region,
+                        const uint8_t* code,
+                        const uint8_t* data) {
+  MutexLock mu(self, *Locks::jit_lock_);
+  ScopedCodeCacheWrite ccw(*region);
+  if (code != nullptr) {
+    region->FreeCode(code);
+  }
+  if (data != nullptr) {
+    region->FreeData(data);
   }
 }
 
@@ -1428,13 +1022,12 @@
       // The stack walking code queries the side instrumentation stack if it
       // sees an instrumentation exit pc, so the JIT code of methods in that stack
       // must have been seen. We sanity check this below.
-      for (const instrumentation::InstrumentationStackFrame& frame
-              : *thread->GetInstrumentationStack()) {
+      for (const auto& it : *thread->GetInstrumentationStack()) {
         // The 'method_' in InstrumentationStackFrame is the one that has return_pc_ in
         // its stack frame, it is not the method owning return_pc_. We just pass null to
         // LookupMethodHeader: the method is only checked against in debug builds.
         OatQuickMethodHeader* method_header =
-            code_cache_->LookupMethodHeader(frame.return_pc_, /* method= */ nullptr);
+            code_cache_->LookupMethodHeader(it.second.return_pc_, /* method= */ nullptr);
         if (method_header != nullptr) {
           const void* code = method_header->GetCode();
           CHECK(bitmap_->Test(FromCodeToAllocation(code)));
@@ -1455,40 +1048,6 @@
   lock_cond_.Broadcast(self);
 }
 
-void JitCodeCache::SetFootprintLimit(size_t new_footprint) {
-  size_t data_space_footprint = new_footprint / kCodeAndDataCapacityDivider;
-  DCHECK(IsAlignedParam(data_space_footprint, kPageSize));
-  DCHECK_EQ(data_space_footprint * kCodeAndDataCapacityDivider, new_footprint);
-  mspace_set_footprint_limit(data_mspace_, data_space_footprint);
-  if (HasCodeMapping()) {
-    ScopedCodeCacheWrite scc(this);
-    mspace_set_footprint_limit(exec_mspace_, new_footprint - data_space_footprint);
-  }
-}
-
-bool JitCodeCache::IncreaseCodeCacheCapacity() {
-  if (current_capacity_ == max_capacity_) {
-    return false;
-  }
-
-  // Double the capacity if we're below 1MB, or increase it by 1MB if
-  // we're above.
-  if (current_capacity_ < 1 * MB) {
-    current_capacity_ *= 2;
-  } else {
-    current_capacity_ += 1 * MB;
-  }
-  if (current_capacity_ > max_capacity_) {
-    current_capacity_ = max_capacity_;
-  }
-
-  VLOG(jit) << "Increasing code cache capacity to " << PrettySize(current_capacity_);
-
-  SetFootprintLimit(current_capacity_);
-
-  return true;
-}
-
 void JitCodeCache::MarkCompiledCodeOnThreadStacks(Thread* self) {
   Barrier barrier(0);
   size_t threads_running_checkpoint = 0;
@@ -1503,10 +1062,10 @@
 }
 
 bool JitCodeCache::ShouldDoFullCollection() {
-  if (current_capacity_ == max_capacity_) {
+  if (private_region_.GetCurrentCapacity() == private_region_.GetMaxCapacity()) {
     // Always do a full collection when the code cache is full.
     return true;
-  } else if (current_capacity_ < kReservedCapacity) {
+  } else if (private_region_.GetCurrentCapacity() < kReservedCapacity) {
     // Always do partial collection when the code cache size is below the reserved
     // capacity.
     return false;
@@ -1524,9 +1083,9 @@
   // Wait for an existing collection, or let everyone know we are starting one.
   {
     ScopedThreadSuspension sts(self, kSuspended);
-    MutexLock mu(self, lock_);
+    MutexLock mu(self, *Locks::jit_lock_);
     if (!garbage_collect_code_) {
-      IncreaseCodeCacheCapacity();
+      private_region_.IncreaseCodeCacheCapacity();
       return;
     } else if (WaitForPotentialCollectionToComplete(self)) {
       return;
@@ -1534,8 +1093,9 @@
       number_of_collections_++;
       live_bitmap_.reset(CodeCacheBitmap::Create(
           "code-cache-bitmap",
-          reinterpret_cast<uintptr_t>(exec_pages_.Begin()),
-          reinterpret_cast<uintptr_t>(exec_pages_.Begin() + current_capacity_ / 2)));
+          reinterpret_cast<uintptr_t>(private_region_.GetExecPages()->Begin()),
+          reinterpret_cast<uintptr_t>(
+              private_region_.GetExecPages()->Begin() + private_region_.GetCurrentCapacity() / 2)));
       collection_in_progress_ = true;
     }
   }
@@ -1546,7 +1106,7 @@
 
     bool do_full_collection = false;
     {
-      MutexLock mu(self, lock_);
+      MutexLock mu(self, *Locks::jit_lock_);
       do_full_collection = ShouldDoFullCollection();
     }
 
@@ -1563,7 +1123,7 @@
               << ", data=" << PrettySize(DataCacheSize());
 
     {
-      MutexLock mu(self, lock_);
+      MutexLock mu(self, *Locks::jit_lock_);
 
       // Increase the code cache only when we do partial collections.
       // TODO: base this strategy on how full the code cache is?
@@ -1571,30 +1131,34 @@
         last_collection_increased_code_cache_ = false;
       } else {
         last_collection_increased_code_cache_ = true;
-        IncreaseCodeCacheCapacity();
+        private_region_.IncreaseCodeCacheCapacity();
       }
 
       bool next_collection_will_be_full = ShouldDoFullCollection();
 
       // Start polling the liveness of compiled code to prepare for the next full collection.
       if (next_collection_will_be_full) {
-        // Save the entry point of methods we have compiled, and update the entry
-        // point of those methods to the interpreter. If the method is invoked, the
-        // interpreter will update its entry point to the compiled code and call it.
-        for (ProfilingInfo* info : profiling_infos_) {
-          const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
-          if (!IsInZygoteDataSpace(info) && ContainsPc(entry_point)) {
-            info->SetSavedEntryPoint(entry_point);
-            // Don't call Instrumentation::UpdateMethodsCode(), as it can check the declaring
-            // class of the method. We may be concurrently running a GC which makes accessing
-            // the class unsafe. We know it is OK to bypass the instrumentation as we've just
-            // checked that the current entry point is JIT compiled code.
-            info->GetMethod()->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
+        if (Runtime::Current()->GetJITOptions()->CanCompileBaseline()) {
+          for (ProfilingInfo* info : profiling_infos_) {
+            info->SetBaselineHotnessCount(0);
+          }
+        } else {
+          // Save the entry point of methods we have compiled, and update the entry
+          // point of those methods to the interpreter. If the method is invoked, the
+          // interpreter will update its entry point to the compiled code and call it.
+          for (ProfilingInfo* info : profiling_infos_) {
+            const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
+            if (!IsInZygoteDataSpace(info) && ContainsPc(entry_point)) {
+              info->SetSavedEntryPoint(entry_point);
+              // Don't call Instrumentation::UpdateMethodsCode(), as it can check the declaring
+              // class of the method. We may be concurrently running a GC which makes accessing
+              // the class unsafe. We know it is OK to bypass the instrumentation as we've just
+              // checked that the current entry point is JIT compiled code.
+              info->GetMethod()->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
+            }
           }
         }
 
-        DCHECK(CheckLiveCompiledCodeHasProfilingInfo());
-
         // Change entry points of native methods back to the GenericJNI entrypoint.
         for (const auto& entry : jni_stubs_map_) {
           const JniStubData& data = entry.second;
@@ -1625,8 +1189,7 @@
   ScopedTrace trace(__FUNCTION__);
   std::unordered_set<OatQuickMethodHeader*> method_headers;
   {
-    MutexLock mu(self, lock_);
-    ScopedCodeCacheWrite scc(this);
+    MutexLock mu(self, *Locks::jit_lock_);
     // Iterate over all compiled code and remove entries that are not marked.
     for (auto it = jni_stubs_map_.begin(); it != jni_stubs_map_.end();) {
       JniStubData* data = &it->second;
@@ -1655,13 +1218,13 @@
 }
 
 bool JitCodeCache::GetGarbageCollectCode() {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   return garbage_collect_code_;
 }
 
 void JitCodeCache::SetGarbageCollectCode(bool value) {
   Thread* self = Thread::Current();
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *Locks::jit_lock_);
   if (garbage_collect_code_ != value) {
     if (garbage_collect_code_) {
       // When dynamically disabling the garbage collection, we neee
@@ -1680,29 +1243,51 @@
 void JitCodeCache::DoCollection(Thread* self, bool collect_profiling_info) {
   ScopedTrace trace(__FUNCTION__);
   {
-    MutexLock mu(self, lock_);
-    if (collect_profiling_info) {
-      // Clear the profiling info of methods that do not have compiled code as entrypoint.
-      // Also remove the saved entry point from the ProfilingInfo objects.
-      for (ProfilingInfo* info : profiling_infos_) {
-        const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
-        if (!ContainsPc(ptr) && !info->IsInUseByCompiler() && !IsInZygoteDataSpace(info)) {
-          info->GetMethod()->SetProfilingInfo(nullptr);
-        }
+    MutexLock mu(self, *Locks::jit_lock_);
 
-        if (info->GetSavedEntryPoint() != nullptr) {
-          info->SetSavedEntryPoint(nullptr);
-          // We are going to move this method back to interpreter. Clear the counter now to
-          // give it a chance to be hot again.
-          ClearMethodCounter(info->GetMethod(), /*was_warm=*/ true);
+    if (Runtime::Current()->GetJITOptions()->CanCompileBaseline()) {
+      // Update to interpreter the methods that have baseline entrypoints and whose baseline
+      // hotness count is zero.
+      // Note that these methods may be in thread stack or concurrently revived
+      // between. That's OK, as the thread executing it will mark it.
+      for (ProfilingInfo* info : profiling_infos_) {
+        if (info->GetBaselineHotnessCount() == 0) {
+          const void* entry_point = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
+          if (ContainsPc(entry_point)) {
+            OatQuickMethodHeader* method_header =
+                OatQuickMethodHeader::FromEntryPoint(entry_point);
+            if (CodeInfo::IsBaseline(method_header->GetOptimizedCodeInfoPtr())) {
+              info->GetMethod()->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
+            }
+          }
         }
       }
-    } else if (kIsDebugBuild) {
-      // Sanity check that the profiling infos do not have a dangling entry point.
-      for (ProfilingInfo* info : profiling_infos_) {
-        DCHECK(!Runtime::Current()->IsZygote());
-        const void* entry_point = info->GetSavedEntryPoint();
-        DCHECK(entry_point == nullptr || IsInZygoteExecSpace(entry_point));
+      // TODO: collect profiling info
+      // TODO: collect optimized code?
+    } else {
+      if (collect_profiling_info) {
+        // Clear the profiling info of methods that do not have compiled code as entrypoint.
+        // Also remove the saved entry point from the ProfilingInfo objects.
+        for (ProfilingInfo* info : profiling_infos_) {
+          const void* ptr = info->GetMethod()->GetEntryPointFromQuickCompiledCode();
+          if (!ContainsPc(ptr) && !info->IsInUseByCompiler() && !IsInZygoteDataSpace(info)) {
+            info->GetMethod()->SetProfilingInfo(nullptr);
+          }
+
+          if (info->GetSavedEntryPoint() != nullptr) {
+            info->SetSavedEntryPoint(nullptr);
+            // We are going to move this method back to interpreter. Clear the counter now to
+            // give it a chance to be hot again.
+            ClearMethodCounter(info->GetMethod(), /*was_warm=*/ true);
+          }
+        }
+      } else if (kIsDebugBuild) {
+        // Sanity check that the profiling infos do not have a dangling entry point.
+        for (ProfilingInfo* info : profiling_infos_) {
+          DCHECK(!Runtime::Current()->IsZygote());
+          const void* entry_point = info->GetSavedEntryPoint();
+          DCHECK(entry_point == nullptr || IsInZygoteExecSpace(entry_point));
+        }
       }
     }
 
@@ -1750,7 +1335,7 @@
   RemoveUnmarkedCode(self);
 
   if (collect_profiling_info) {
-    MutexLock mu(self, lock_);
+    MutexLock mu(self, *Locks::jit_lock_);
     // Free all profiling infos of methods not compiled nor being compiled.
     auto profiling_kept_end = std::remove_if(profiling_infos_.begin(), profiling_infos_.end(),
       [this] (ProfilingInfo* info) NO_THREAD_SAFETY_ANALYSIS {
@@ -1766,36 +1351,15 @@
           info->GetMethod()->SetProfilingInfo(info);
         } else if (info->GetMethod()->GetProfilingInfo(kRuntimePointerSize) != info) {
           // No need for this ProfilingInfo object anymore.
-          FreeData(reinterpret_cast<uint8_t*>(info));
+          private_region_.FreeWritableData(reinterpret_cast<uint8_t*>(info));
           return true;
         }
         return false;
       });
     profiling_infos_.erase(profiling_kept_end, profiling_infos_.end());
-    DCHECK(CheckLiveCompiledCodeHasProfilingInfo());
   }
 }
 
-bool JitCodeCache::CheckLiveCompiledCodeHasProfilingInfo() {
-  ScopedTrace trace(__FUNCTION__);
-  // Check that methods we have compiled do have a ProfilingInfo object. We would
-  // have memory leaks of compiled code otherwise.
-  for (const auto& it : method_code_map_) {
-    ArtMethod* method = it.second;
-    if (method->GetProfilingInfo(kRuntimePointerSize) == nullptr) {
-      const void* code_ptr = it.first;
-      const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
-      if (method_header->GetEntryPoint() == method->GetEntryPointFromQuickCompiledCode()) {
-        // If the code is not dead, then we have a problem. Note that this can even
-        // happen just after a collection, as mutator threads are running in parallel
-        // and could deoptimize an existing compiled code.
-        return false;
-      }
-    }
-  }
-  return true;
-}
-
 OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
   static_assert(kRuntimeISA != InstructionSet::kThumb2, "kThumb2 cannot be a runtime ISA");
   if (kRuntimeISA == InstructionSet::kArm) {
@@ -1811,7 +1375,7 @@
     CHECK(method != nullptr);
   }
 
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   OatQuickMethodHeader* method_header = nullptr;
   ArtMethod* found_method = nullptr;  // Only for DCHECK(), not for JNI stubs.
   if (method != nullptr && UNLIKELY(method->IsNative())) {
@@ -1825,6 +1389,12 @@
       return nullptr;
     }
   } else {
+    if (shared_region_.IsInExecSpace(reinterpret_cast<const void*>(pc))) {
+      const void* code_ptr = zygote_map_.GetCodeFor(method, pc);
+      if (code_ptr != nullptr) {
+        return OatQuickMethodHeader::FromCodePointer(code_ptr);
+      }
+    }
     auto it = method_code_map_.lower_bound(reinterpret_cast<const void*>(pc));
     if (it != method_code_map_.begin()) {
       --it;
@@ -1851,24 +1421,16 @@
   }
 
   if (kIsDebugBuild && method != nullptr && !method->IsNative()) {
-    // When we are walking the stack to redefine classes and creating obsolete methods it is
-    // possible that we might have updated the method_code_map by making this method obsolete in a
-    // previous frame. Therefore we should just check that the non-obsolete version of this method
-    // is the one we expect. We change to the non-obsolete versions in the error message since the
-    // obsolete version of the method might not be fully initialized yet. This situation can only
-    // occur when we are in the process of allocating and setting up obsolete methods. Otherwise
-    // method and it->second should be identical. (See openjdkjvmti/ti_redefine.cc for more
-    // information.)
-    DCHECK_EQ(found_method->GetNonObsoleteMethod(), method->GetNonObsoleteMethod())
-        << ArtMethod::PrettyMethod(method->GetNonObsoleteMethod()) << " "
-        << ArtMethod::PrettyMethod(found_method->GetNonObsoleteMethod()) << " "
+    DCHECK_EQ(found_method, method)
+        << ArtMethod::PrettyMethod(method) << " "
+        << ArtMethod::PrettyMethod(found_method) << " "
         << std::hex << pc;
   }
   return method_header;
 }
 
 OatQuickMethodHeader* JitCodeCache::LookupOsrMethodHeader(ArtMethod* method) {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   auto it = osr_code_map_.find(method);
   if (it == osr_code_map_.end()) {
     return nullptr;
@@ -1882,23 +1444,24 @@
                                               bool retry_allocation)
     // No thread safety analysis as we are using TryLock/Unlock explicitly.
     NO_THREAD_SAFETY_ANALYSIS {
+  DCHECK(CanAllocateProfilingInfo());
   ProfilingInfo* info = nullptr;
   if (!retry_allocation) {
     // If we are allocating for the interpreter, just try to lock, to avoid
     // lock contention with the JIT.
-    if (lock_.ExclusiveTryLock(self)) {
+    if (Locks::jit_lock_->ExclusiveTryLock(self)) {
       info = AddProfilingInfoInternal(self, method, entries);
-      lock_.ExclusiveUnlock(self);
+      Locks::jit_lock_->ExclusiveUnlock(self);
     }
   } else {
     {
-      MutexLock mu(self, lock_);
+      MutexLock mu(self, *Locks::jit_lock_);
       info = AddProfilingInfoInternal(self, method, entries);
     }
 
     if (info == nullptr) {
       GarbageCollectCache(self);
-      MutexLock mu(self, lock_);
+      MutexLock mu(self, *Locks::jit_lock_);
       info = AddProfilingInfoInternal(self, method, entries);
     }
   }
@@ -1918,11 +1481,12 @@
     return info;
   }
 
-  uint8_t* data = AllocateData(profile_info_size);
+  const uint8_t* data = private_region_.AllocateData(profile_info_size);
   if (data == nullptr) {
     return nullptr;
   }
-  info = new (data) ProfilingInfo(method, entries);
+  uint8_t* writable_data = private_region_.GetWritableDataAddress(data);
+  info = new (writable_data) ProfilingInfo(method, entries);
 
   // Make sure other threads see the data in the profiling info object before the
   // store in the ArtMethod's ProfilingInfo pointer.
@@ -1934,28 +1498,17 @@
   return info;
 }
 
-// NO_THREAD_SAFETY_ANALYSIS as this is called from mspace code, at which point the lock
-// is already held.
-void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) NO_THREAD_SAFETY_ANALYSIS {
-  if (mspace == exec_mspace_) {
-    DCHECK(exec_mspace_ != nullptr);
-    const MemMap* const code_pages = GetUpdatableCodeMapping();
-    void* result = code_pages->Begin() + exec_end_;
-    exec_end_ += increment;
-    return result;
-  } else {
-    DCHECK_EQ(data_mspace_, mspace);
-    void* result = data_pages_.Begin() + data_end_;
-    data_end_ += increment;
-    return result;
-  }
+void* JitCodeCache::MoreCore(const void* mspace, intptr_t increment) {
+  return shared_region_.OwnsSpace(mspace)
+      ? shared_region_.MoreCore(mspace, increment)
+      : private_region_.MoreCore(mspace, increment);
 }
 
 void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_locations,
                                       std::vector<ProfileMethodInfo>& methods) {
   Thread* self = Thread::Current();
   WaitUntilInlineCacheAccessible(self);
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *Locks::jit_lock_);
   ScopedTrace trace(__FUNCTION__);
   uint16_t jit_compile_threshold = Runtime::Current()->GetJITOptions()->GetCompileThreshold();
   for (const ProfilingInfo* info : profiling_infos_) {
@@ -2036,20 +1589,43 @@
 }
 
 bool JitCodeCache::IsOsrCompiled(ArtMethod* method) {
-  MutexLock mu(Thread::Current(), lock_);
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
   return osr_code_map_.find(method) != osr_code_map_.end();
 }
 
-bool JitCodeCache::NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr) {
-  if (!osr && ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
-    return false;
+bool JitCodeCache::NotifyCompilationOf(ArtMethod* method,
+                                       Thread* self,
+                                       bool osr,
+                                       bool prejit,
+                                       bool baseline,
+                                       JitMemoryRegion* region) {
+  const void* existing_entry_point = method->GetEntryPointFromQuickCompiledCode();
+  if (!osr && ContainsPc(existing_entry_point)) {
+    OatQuickMethodHeader* method_header =
+        OatQuickMethodHeader::FromEntryPoint(existing_entry_point);
+    if (CodeInfo::IsBaseline(method_header->GetOptimizedCodeInfoPtr()) == baseline) {
+      VLOG(jit) << "Not compiling "
+                << method->PrettyMethod()
+                << " because it has already been compiled"
+                << " baseline=" << std::boolalpha << baseline;
+      return false;
+    }
   }
 
-  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  if (class_linker->IsQuickResolutionStub(method->GetEntryPointFromQuickCompiledCode())) {
-    if (!Runtime::Current()->IsUsingApexBootImageLocation() || !Runtime::Current()->IsZygote()) {
-      // Unless we're running as zygote in the jitzygote experiment, we currently don't save
-      // the JIT compiled code if we cannot update the entrypoint due to having the resolution stub.
+  if (NeedsClinitCheckBeforeCall(method) && !prejit) {
+    // We do not need a synchronization barrier for checking the visibly initialized status
+    // or checking the initialized status just for requesting visible initialization.
+    ClassStatus status = method->GetDeclaringClass()
+        ->GetStatus<kDefaultVerifyFlags, /*kWithSynchronizationBarrier=*/ false>();
+    if (status != ClassStatus::kVisiblyInitialized) {
+      // Unless we're pre-jitting, we currently don't save the JIT compiled code if we cannot
+      // update the entrypoint due to needing an initialization check.
+      if (status == ClassStatus::kInitialized) {
+        // Request visible initialization but do not block to allow compiling other methods.
+        // Hopefully, this will complete by the time the method becomes hot again.
+        Runtime::Current()->GetClassLinker()->MakeInitializedClassesVisiblyInitialized(
+            self, /*wait=*/ false);
+      }
       VLOG(jit) << "Not compiling "
                 << method->PrettyMethod()
                 << " because it has the resolution stub";
@@ -2059,12 +1635,15 @@
     }
   }
 
-  MutexLock mu(self, lock_);
-  if (osr && (osr_code_map_.find(method) != osr_code_map_.end())) {
-    return false;
+  if (osr) {
+    MutexLock mu(self, *Locks::jit_lock_);
+    if (osr_code_map_.find(method) != osr_code_map_.end()) {
+      return false;
+    }
   }
 
   if (UNLIKELY(method->IsNative())) {
+    MutexLock mu(self, *Locks::jit_lock_);
     JniStubKey key(method);
     auto it = jni_stubs_map_.find(key);
     bool new_compilation = false;
@@ -2083,14 +1662,7 @@
       // changed these entrypoints to GenericJNI in preparation for a full GC, we may
       // as well change them back as this stub shall not be collected anyway and this
       // can avoid a few expensive GenericJNI calls.
-      instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
-      for (ArtMethod* m : data->GetMethods()) {
-        // Call the dedicated method instead of the more generic UpdateMethodsCode, because
-        // `m` might be in the process of being deleted.
-        if (!class_linker->IsQuickResolutionStub(m->GetEntryPointFromQuickCompiledCode())) {
-          instrumentation->UpdateNativeMethodsCodeToJitCode(m, entrypoint);
-        }
-      }
+      data->UpdateEntryPoints(entrypoint);
       if (collection_in_progress_) {
         if (!IsInZygoteExecSpace(data->GetCode())) {
           GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(data->GetCode()));
@@ -2100,25 +1672,34 @@
     return new_compilation;
   } else {
     ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
+    if (CanAllocateProfilingInfo() && baseline && info == nullptr) {
+      // We can retry allocation here as we're the JIT thread.
+      if (ProfilingInfo::Create(self, method, /* retry_allocation= */ true)) {
+        info = method->GetProfilingInfo(kRuntimePointerSize);
+      }
+    }
     if (info == nullptr) {
-      VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled";
-      // Because the counter is not atomic, there are some rare cases where we may not hit the
-      // threshold for creating the ProfilingInfo. Reset the counter now to "correct" this.
-      ClearMethodCounter(method, /*was_warm=*/ false);
-      return false;
+      // When prejitting, we don't allocate a profiling info.
+      if (!prejit && !IsSharedRegion(*region)) {
+        VLOG(jit) << method->PrettyMethod() << " needs a ProfilingInfo to be compiled";
+        // Because the counter is not atomic, there are some rare cases where we may not hit the
+        // threshold for creating the ProfilingInfo. Reset the counter now to "correct" this.
+        ClearMethodCounter(method, /*was_warm=*/ false);
+        return false;
+      }
+    } else {
+      MutexLock mu(self, *Locks::jit_lock_);
+      if (info->IsMethodBeingCompiled(osr)) {
+        return false;
+      }
+      info->SetIsMethodBeingCompiled(true, osr);
     }
-
-    if (info->IsMethodBeingCompiled(osr)) {
-      return false;
-    }
-
-    info->SetIsMethodBeingCompiled(true, osr);
     return true;
   }
 }
 
 ProfilingInfo* JitCodeCache::NotifyCompilerUse(ArtMethod* method, Thread* self) {
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *Locks::jit_lock_);
   ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
   if (info != nullptr) {
     if (!info->IncrementInlineUse()) {
@@ -2130,7 +1711,7 @@
 }
 
 void JitCodeCache::DoneCompilerUse(ArtMethod* method, Thread* self) {
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *Locks::jit_lock_);
   ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
   DCHECK(info != nullptr);
   info->DecrementInlineUse();
@@ -2138,7 +1719,7 @@
 
 void JitCodeCache::DoneCompiling(ArtMethod* method, Thread* self, bool osr) {
   DCHECK_EQ(Thread::Current(), self);
-  MutexLock mu(self, lock_);
+  MutexLock mu(self, *Locks::jit_lock_);
   if (UNLIKELY(method->IsNative())) {
     auto it = jni_stubs_map_.find(JniStubKey(method));
     DCHECK(it != jni_stubs_map_.end());
@@ -2147,14 +1728,38 @@
     if (UNLIKELY(!data->IsCompiled())) {
       // Failed to compile; the JNI compiler never fails, but the cache may be full.
       jni_stubs_map_.erase(it);  // Remove the entry added in NotifyCompilationOf().
-    }  // else CommitCodeInternal() updated entrypoints of all methods in the JniStubData.
+    }  // else Commit() updated entrypoints of all methods in the JniStubData.
   } else {
     ProfilingInfo* info = method->GetProfilingInfo(kRuntimePointerSize);
-    DCHECK(info->IsMethodBeingCompiled(osr));
-    info->SetIsMethodBeingCompiled(false, osr);
+    if (info != nullptr) {
+      DCHECK(info->IsMethodBeingCompiled(osr));
+      info->SetIsMethodBeingCompiled(false, osr);
+    }
   }
 }
 
+void JitCodeCache::InvalidateAllCompiledCode() {
+  art::MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+  size_t cnt = profiling_infos_.size();
+  size_t osr_size = osr_code_map_.size();
+  for (ProfilingInfo* pi : profiling_infos_) {
+    // NB Due to OSR we might run this on some methods multiple times but this should be fine.
+    ArtMethod* meth = pi->GetMethod();
+    pi->SetSavedEntryPoint(nullptr);
+    // We had a ProfilingInfo so we must be warm.
+    ClearMethodCounter(meth, /*was_warm=*/true);
+    ClassLinker* linker = Runtime::Current()->GetClassLinker();
+    if (meth->IsObsolete()) {
+      linker->SetEntryPointsForObsoleteMethod(meth);
+    } else {
+      linker->SetEntryPointsToInterpreter(meth);
+    }
+  }
+  osr_code_map_.clear();
+  VLOG(jit) << "Invalidated the compiled code of " << (cnt - osr_size) << " methods and "
+            << osr_size << " OSRs.";
+}
+
 void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method,
                                              const OatQuickMethodHeader* header) {
   DCHECK(!method->IsNative());
@@ -2177,58 +1782,39 @@
         method, GetQuickToInterpreterBridge());
     ClearMethodCounter(method, /*was_warm=*/ profiling_info != nullptr);
   } else {
-    MutexLock mu(Thread::Current(), lock_);
+    MutexLock mu(Thread::Current(), *Locks::jit_lock_);
     auto it = osr_code_map_.find(method);
     if (it != osr_code_map_.end() && OatQuickMethodHeader::FromCodePointer(it->second) == header) {
       // Remove the OSR method, to avoid using it again.
       osr_code_map_.erase(it);
     }
   }
-}
 
-uint8_t* JitCodeCache::AllocateCode(size_t allocation_size) {
-  // Each allocation should be on its own set of cache lines. The allocation must be large enough
-  // for header, code, and any padding.
-  size_t alignment = GetJitCodeAlignment();
-  uint8_t* result = reinterpret_cast<uint8_t*>(
-      mspace_memalign(exec_mspace_, alignment, allocation_size));
-  size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
-  // Ensure the header ends up at expected instruction alignment.
-  DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(result + header_size), alignment);
-  used_memory_for_code_ += mspace_usable_size(result);
-  return result;
-}
-
-void JitCodeCache::FreeCode(uint8_t* code) {
-  if (IsInZygoteExecSpace(code)) {
-    // No need to free, this is shared memory.
-    return;
+  // In case the method was pre-compiled, clear that information so we
+  // can recompile it ourselves.
+  if (method->IsPreCompiled()) {
+    method->ClearPreCompiled();
   }
-  used_memory_for_code_ -= mspace_usable_size(code);
-  mspace_free(exec_mspace_, code);
-}
-
-uint8_t* JitCodeCache::AllocateData(size_t data_size) {
-  void* result = mspace_malloc(data_mspace_, data_size);
-  used_memory_for_data_ += mspace_usable_size(result);
-  return reinterpret_cast<uint8_t*>(result);
-}
-
-void JitCodeCache::FreeData(uint8_t* data) {
-  if (IsInZygoteDataSpace(data)) {
-    // No need to free, this is shared memory.
-    return;
-  }
-  used_memory_for_data_ -= mspace_usable_size(data);
-  mspace_free(data_mspace_, data);
 }
 
 void JitCodeCache::Dump(std::ostream& os) {
-  MutexLock mu(Thread::Current(), lock_);
-  os << "Current JIT code cache size: " << PrettySize(used_memory_for_code_) << "\n"
-     << "Current JIT data cache size: " << PrettySize(used_memory_for_data_) << "\n"
-     << "Current JIT mini-debug-info size: " << PrettySize(GetJitMiniDebugInfoMemUsage()) << "\n"
-     << "Current JIT capacity: " << PrettySize(current_capacity_) << "\n"
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+  os << "Current JIT code cache size (used / resident): "
+     << GetCurrentRegion()->GetUsedMemoryForCode() / KB << "KB / "
+     << GetCurrentRegion()->GetResidentMemoryForCode() / KB << "KB\n"
+     << "Current JIT data cache size (used / resident): "
+     << GetCurrentRegion()->GetUsedMemoryForData() / KB << "KB / "
+     << GetCurrentRegion()->GetResidentMemoryForData() / KB << "KB\n";
+  if (!Runtime::Current()->IsZygote()) {
+    os << "Zygote JIT code cache size (at point of fork): "
+       << shared_region_.GetUsedMemoryForCode() / KB << "KB / "
+       << shared_region_.GetResidentMemoryForCode() / KB << "KB\n"
+       << "Zygote JIT data cache size (at point of fork): "
+       << shared_region_.GetUsedMemoryForData() / KB << "KB / "
+       << shared_region_.GetResidentMemoryForData() / KB << "KB\n";
+  }
+  os << "Current JIT mini-debug-info size: " << PrettySize(GetJitMiniDebugInfoMemUsage()) << "\n"
+     << "Current JIT capacity: " << PrettySize(GetCurrentRegion()->GetCurrentCapacity()) << "\n"
      << "Current number of JIT JNI stub entries: " << jni_stubs_map_.size() << "\n"
      << "Current number of JIT code cache entries: " << method_code_map_.size() << "\n"
      << "Total number of JIT compilations: " << number_of_compilations_ << "\n"
@@ -2241,29 +1827,144 @@
 }
 
 void JitCodeCache::PostForkChildAction(bool is_system_server, bool is_zygote) {
-  if (is_zygote) {
-    // Don't transition if this is for a child zygote.
+  Thread* self = Thread::Current();
+
+  // Remove potential tasks that have been inherited from the zygote.
+  // We do this now and not in Jit::PostForkChildAction, as system server calls
+  // JitCodeCache::PostForkChildAction first, and then does some code loading
+  // that may result in new JIT tasks that we want to keep.
+  ThreadPool* pool = Runtime::Current()->GetJit()->GetThreadPool();
+  if (pool != nullptr) {
+    pool->RemoveAllTasks(self);
+  }
+
+  MutexLock mu(self, *Locks::jit_lock_);
+
+  // Reset potential writable MemMaps inherited from the zygote. We never want
+  // to write to them.
+  shared_region_.ResetWritableMappings();
+
+  if (is_zygote || Runtime::Current()->IsSafeMode()) {
+    // Don't create a private region for a child zygote. Regions are usually map shared
+    // (to satisfy dual-view), and we don't want children of a child zygote to inherit it.
     return;
   }
-  MutexLock mu(Thread::Current(), lock_);
 
-  zygote_data_pages_ = std::move(data_pages_);
-  zygote_exec_pages_ = std::move(exec_pages_);
-  zygote_data_mspace_ = data_mspace_;
-  zygote_exec_mspace_ = exec_mspace_;
+  // Reset all statistics to be specific to this process.
+  number_of_compilations_ = 0;
+  number_of_osr_compilations_ = 0;
+  number_of_collections_ = 0;
+  histogram_stack_map_memory_use_.Reset();
+  histogram_code_memory_use_.Reset();
+  histogram_profiling_info_memory_use_.Reset();
 
   size_t initial_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheInitialCapacity();
   size_t max_capacity = Runtime::Current()->GetJITOptions()->GetCodeCacheMaxCapacity();
-
-  InitializeState(initial_capacity, max_capacity);
-
   std::string error_msg;
-  if (!InitializeMappings(/* rwx_memory_allowed= */ !is_system_server, is_zygote, &error_msg)) {
-    LOG(WARNING) << "Could not reset JIT state after zygote fork: " << error_msg;
+  if (!private_region_.Initialize(initial_capacity,
+                                  max_capacity,
+                                  /* rwx_memory_allowed= */ !is_system_server,
+                                  is_zygote,
+                                  &error_msg)) {
+    LOG(WARNING) << "Could not create private region after zygote fork: " << error_msg;
+  }
+}
+
+JitMemoryRegion* JitCodeCache::GetCurrentRegion() {
+  return Runtime::Current()->IsZygote() ? &shared_region_ : &private_region_;
+}
+
+void ZygoteMap::Initialize(uint32_t number_of_methods) {
+  MutexLock mu(Thread::Current(), *Locks::jit_lock_);
+  // Allocate for 40-80% capacity. This will offer OK lookup times, and termination
+  // cases.
+  size_t capacity = RoundUpToPowerOfTwo(number_of_methods * 100 / 80);
+  const uint8_t* memory = region_->AllocateData(
+      capacity * sizeof(Entry) + sizeof(ZygoteCompilationState));
+  if (memory == nullptr) {
+    LOG(WARNING) << "Could not allocate data for the zygote map";
     return;
   }
+  const Entry* data = reinterpret_cast<const Entry*>(memory);
+  region_->FillData(data, capacity, Entry { nullptr, nullptr });
+  map_ = ArrayRef(data, capacity);
+  compilation_state_ = reinterpret_cast<const ZygoteCompilationState*>(
+      memory + capacity * sizeof(Entry));
+  region_->WriteData(compilation_state_, ZygoteCompilationState::kInProgress);
+}
 
-  InitializeSpaces();
+const void* ZygoteMap::GetCodeFor(ArtMethod* method, uintptr_t pc) const {
+  if (map_.empty()) {
+    return nullptr;
+  }
+
+  if (method == nullptr) {
+    // Do a linear search. This should only be used in debug builds.
+    CHECK(kIsDebugBuild);
+    for (const Entry& entry : map_) {
+      const void* code_ptr = entry.code_ptr;
+      if (code_ptr != nullptr) {
+        OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+        if (method_header->Contains(pc)) {
+          return code_ptr;
+        }
+      }
+    }
+    return nullptr;
+  }
+
+  std::hash<ArtMethod*> hf;
+  size_t index = hf(method) & (map_.size() - 1u);
+  size_t original_index = index;
+  // Loop over the array: we know this loop terminates as we will either
+  // encounter the given method, or a null entry. Both terminate the loop.
+  // Note that the zygote may concurrently write new entries to the map. That's OK as the
+  // map is never resized.
+  while (true) {
+    const Entry& entry = map_[index];
+    if (entry.method == nullptr) {
+      // Not compiled yet.
+      return nullptr;
+    }
+    if (entry.method == method) {
+      if (entry.code_ptr == nullptr) {
+        // This is a race with the zygote which wrote the method, but hasn't written the
+        // code. Just bail and wait for the next time we need the method.
+        return nullptr;
+      }
+      if (pc != 0 && !OatQuickMethodHeader::FromCodePointer(entry.code_ptr)->Contains(pc)) {
+        return nullptr;
+      }
+      return entry.code_ptr;
+    }
+    index = (index + 1) & (map_.size() - 1);
+    DCHECK_NE(original_index, index);
+  }
+}
+
+void ZygoteMap::Put(const void* code, ArtMethod* method) {
+  if (map_.empty()) {
+    return;
+  }
+  CHECK(Runtime::Current()->IsZygote());
+  std::hash<ArtMethod*> hf;
+  size_t index = hf(method) & (map_.size() - 1);
+  size_t original_index = index;
+  // Because the size of the map is bigger than the number of methods that will
+  // be added, we are guaranteed to find a free slot in the array, and
+  // therefore for this loop to terminate.
+  while (true) {
+    const Entry* entry = &map_[index];
+    if (entry->method == nullptr) {
+      // Note that readers can read this memory concurrently, but that's OK as
+      // we are writing pointers.
+      region_->WriteData(entry, Entry { method, code });
+      break;
+    }
+    index = (index + 1) & (map_.size() - 1);
+    DCHECK_NE(original_index, index);
+  }
+  DCHECK_EQ(GetCodeFor(method), code);
 }
 
 }  // namespace jit
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index df58f19..f13e05c 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -25,12 +25,14 @@
 #include <vector>
 
 #include "base/arena_containers.h"
+#include "base/array_ref.h"
 #include "base/atomic.h"
 #include "base/histogram.h"
 #include "base/macros.h"
 #include "base/mem_map.h"
 #include "base/mutex.h"
 #include "base/safe_map.h"
+#include "jit_memory_region.h"
 
 namespace art {
 
@@ -72,16 +74,107 @@
 namespace jit {
 
 class MarkCodeClosure;
-class ScopedCodeCacheWrite;
-
-// Number of bytes represented by a bit in the CodeCacheBitmap. Value is reasonable for all
-// architectures.
-static constexpr int kJitCodeAccountingBytes = 16;
 
 // Type of bitmap used for tracking live functions in the JIT code cache for the purposes
 // of garbage collecting code.
 using CodeCacheBitmap = gc::accounting::MemoryRangeBitmap<kJitCodeAccountingBytes>;
 
+// The state of profile-based compilation in the zygote.
+// - kInProgress:      JIT compilation is happening
+// - kDone:            JIT compilation is finished, and the zygote is preparing notifying
+//                     the other processes.
+// - kNotifiedOk:      the zygote has notified the other processes, which can start
+//                     sharing the boot image method mappings.
+// - kNotifiedFailure: the zygote has notified the other processes, but they
+//                     cannot share the boot image method mappings due to
+//                     unexpected errors
+enum class ZygoteCompilationState : uint8_t {
+  kInProgress = 0,
+  kDone = 1,
+  kNotifiedOk = 2,
+  kNotifiedFailure = 3,
+};
+
+// Class abstraction over a map of ArtMethod -> compiled code, where the
+// ArtMethod are compiled by the zygote, and the map acts as a communication
+// channel between the zygote and the other processes.
+// For the zygote process, this map is the only map it is placing the compiled
+// code. JitCodeCache.method_code_map_ is empty.
+//
+// This map is writable only by the zygote, and readable by all children.
+class ZygoteMap {
+ public:
+  struct Entry {
+    ArtMethod* method;
+    // Note we currently only allocate code in the low 4g, so we could just reserve 4 bytes
+    // for the code pointer. For simplicity and in the case we move to 64bit
+    // addresses for code, just keep it void* for now.
+    const void* code_ptr;
+  };
+
+  explicit ZygoteMap(JitMemoryRegion* region)
+      : map_(), region_(region), compilation_state_(nullptr) {}
+
+  // Initialize the data structure so it can hold `number_of_methods` mappings.
+  // Note that the map is fixed size and never grows.
+  void Initialize(uint32_t number_of_methods) REQUIRES(!Locks::jit_lock_);
+
+  // Add the mapping method -> code.
+  void Put(const void* code, ArtMethod* method) REQUIRES(Locks::jit_lock_);
+
+  // Return the code pointer for the given method. If pc is not zero, check that
+  // the pc falls into that code range. Return null otherwise.
+  const void* GetCodeFor(ArtMethod* method, uintptr_t pc = 0) const;
+
+  // Return whether the map has associated code for the given method.
+  bool ContainsMethod(ArtMethod* method) const {
+    return GetCodeFor(method) != nullptr;
+  }
+
+  void SetCompilationState(ZygoteCompilationState state) {
+    region_->WriteData(compilation_state_, state);
+  }
+
+  bool IsCompilationDoneButNotNotified() const {
+    return compilation_state_ != nullptr && *compilation_state_ == ZygoteCompilationState::kDone;
+  }
+
+  bool IsCompilationNotified() const {
+    return compilation_state_ != nullptr && *compilation_state_ > ZygoteCompilationState::kDone;
+  }
+
+  bool CanMapBootImageMethods() const {
+    return compilation_state_ != nullptr &&
+           *compilation_state_ == ZygoteCompilationState::kNotifiedOk;
+  }
+
+  ArrayRef<const Entry>::const_iterator cbegin() const {
+    return map_.cbegin();
+  }
+  ArrayRef<const Entry>::iterator begin() {
+    return map_.begin();
+  }
+  ArrayRef<const Entry>::const_iterator cend() const {
+    return map_.cend();
+  }
+  ArrayRef<const Entry>::iterator end() {
+    return map_.end();
+  }
+
+ private:
+  // The map allocated with `region_`.
+  ArrayRef<const Entry> map_;
+
+  // The region in which the map is allocated.
+  JitMemoryRegion* const region_;
+
+  // The current state of compilation in the zygote. Starts with kInProgress,
+  // and should end with kNotifiedOk or kNotifiedFailure.
+  const ZygoteCompilationState* compilation_state_;
+
+  DISALLOW_COPY_AND_ASSIGN(ZygoteMap);
+};
+
 class JitCodeCache {
  public:
   static constexpr size_t kMaxCapacity = 64 * MB;
@@ -100,13 +193,18 @@
                               std::string* error_msg);
   ~JitCodeCache();
 
-  bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr)
+  bool NotifyCompilationOf(ArtMethod* method,
+                           Thread* self,
+                           bool osr,
+                           bool prejit,
+                           bool baseline,
+                           JitMemoryRegion* region)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!lock_);
+      REQUIRES(!Locks::jit_lock_);
 
   void NotifyMethodRedefined(ArtMethod* method)
       REQUIRES(Locks::mutator_lock_)
-      REQUIRES(!lock_);
+      REQUIRES(!Locks::jit_lock_);
 
   // Notify to the code cache that the compiler wants to use the
   // profiling info of `method` to drive optimizations,
@@ -114,96 +212,101 @@
   // collected.
   ProfilingInfo* NotifyCompilerUse(ArtMethod* method, Thread* self)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!lock_);
+      REQUIRES(!Locks::jit_lock_);
 
   void DoneCompiling(ArtMethod* method, Thread* self, bool osr)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!lock_);
+      REQUIRES(!Locks::jit_lock_);
 
   void DoneCompilerUse(ArtMethod* method, Thread* self)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!lock_);
+      REQUIRES(!Locks::jit_lock_);
 
-  // Allocate and write code and its metadata to the code cache.
+  // Return true if the code cache contains this pc.
+  bool ContainsPc(const void* pc) const;
+
+  // Return true if the code cache contains this pc in the private region (i.e. not from zygote).
+  bool PrivateRegionContainsPc(const void* pc) const;
+
+  // Returns true if either the method's entrypoint is JIT compiled code or it is the
+  // instrumentation entrypoint and we can jump to jit code for this method. For testing use only.
+  bool WillExecuteJitCode(ArtMethod* method) REQUIRES(!Locks::jit_lock_);
+
+  // Return true if the code cache contains this method.
+  bool ContainsMethod(ArtMethod* method) REQUIRES(!Locks::jit_lock_);
+
+  // Return the code pointer for a JNI-compiled stub if the method is in the cache, null otherwise.
+  const void* GetJniStubCode(ArtMethod* method) REQUIRES(!Locks::jit_lock_);
+
+  // Allocate a region for both code and data in the JIT code cache.
+  // The reserved memory is left completely uninitialized.
+  bool Reserve(Thread* self,
+               JitMemoryRegion* region,
+               size_t code_size,
+               size_t stack_map_size,
+               size_t number_of_roots,
+               ArtMethod* method,
+               /*out*/ArrayRef<const uint8_t>* reserved_code,
+               /*out*/ArrayRef<const uint8_t>* reserved_data)
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(!Locks::jit_lock_);
+
+  // Initialize code and data of previously allocated memory.
+  //
   // `cha_single_implementation_list` needs to be registered via CHA (if it's
   // still valid), since the compiled code still needs to be invalidated if the
   // single-implementation assumptions are violated later. This needs to be done
   // even if `has_should_deoptimize_flag` is false, which can happen due to CHA
   // guard elimination.
-  uint8_t* CommitCode(Thread* self,
-                      ArtMethod* method,
-                      uint8_t* stack_map,
-                      uint8_t* roots_data,
-                      const uint8_t* code,
-                      size_t code_size,
-                      size_t data_size,
-                      bool osr,
-                      const std::vector<Handle<mirror::Object>>& roots,
-                      bool has_should_deoptimize_flag,
-                      const ArenaSet<ArtMethod*>& cha_single_implementation_list)
+  bool Commit(Thread* self,
+              JitMemoryRegion* region,
+              ArtMethod* method,
+              ArrayRef<const uint8_t> reserved_code,  // Uninitialized destination.
+              ArrayRef<const uint8_t> code,           // Compiler output (source).
+              ArrayRef<const uint8_t> reserved_data,  // Uninitialized destination.
+              const std::vector<Handle<mirror::Object>>& roots,
+              ArrayRef<const uint8_t> stack_map,      // Compiler output (source).
+              bool osr,
+              bool has_should_deoptimize_flag,
+              const ArenaSet<ArtMethod*>& cha_single_implementation_list)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!lock_);
+      REQUIRES(!Locks::jit_lock_);
 
-  // Return true if the code cache contains this pc.
-  bool ContainsPc(const void* pc) const;
-
-  // Returns true if either the method's entrypoint is JIT compiled code or it is the
-  // instrumentation entrypoint and we can jump to jit code for this method. For testing use only.
-  bool WillExecuteJitCode(ArtMethod* method) REQUIRES(!lock_);
-
-  // Return true if the code cache contains this method.
-  bool ContainsMethod(ArtMethod* method) REQUIRES(!lock_);
-
-  // Return the code pointer for a JNI-compiled stub if the method is in the cache, null otherwise.
-  const void* GetJniStubCode(ArtMethod* method) REQUIRES(!lock_);
-
-  // Allocate a region of data that contain `size` bytes, and potentially space
-  // for storing `number_of_roots` roots. Returns null if there is no more room.
-  // Return the number of bytes allocated.
-  size_t ReserveData(Thread* self,
-                     size_t stack_map_size,
-                     size_t number_of_roots,
-                     ArtMethod* method,
-                     uint8_t** stack_map_data,
-                     uint8_t** roots_data)
+  // Free the previously allocated memory regions.
+  void Free(Thread* self, JitMemoryRegion* region, const uint8_t* code, const uint8_t* data)
       REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!lock_);
-
-  // Clear data from the data portion of the code cache.
-  void ClearData(Thread* self, uint8_t* stack_map_data, uint8_t* roots_data)
-      REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!lock_);
+      REQUIRES(!Locks::jit_lock_);
 
   // Perform a collection on the code cache.
   void GarbageCollectCache(Thread* self)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Given the 'pc', try to find the JIT compiled code associated with it.
   // Return null if 'pc' is not in the code cache. 'method' is passed for
   // sanity check.
   OatQuickMethodHeader* LookupMethodHeader(uintptr_t pc, ArtMethod* method)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   OatQuickMethodHeader* LookupOsrMethodHeader(ArtMethod* method)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Removes method from the cache for testing purposes. The caller
   // must ensure that all threads are suspended and the method should
   // not be in any thread's stack.
   bool RemoveMethod(ArtMethod* method, bool release_memory)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES(Locks::mutator_lock_);
 
   // Remove all methods in our cache that were allocated by 'alloc'.
   void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void CopyInlineCacheInto(const InlineCache& ic, Handle<mirror::ObjectArray<mirror::Class>> array)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true,
@@ -212,11 +315,11 @@
                                   ArtMethod* method,
                                   const std::vector<uint32_t>& entries,
                                   bool retry_allocation)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
-    return mspace == data_mspace_ || mspace == exec_mspace_;
+    return private_region_.OwnsSpace(mspace) || shared_region_.OwnsSpace(mspace);
   }
 
   void* MoreCore(const void* mspace, intptr_t increment);
@@ -224,290 +327,241 @@
   // Adds to `methods` all profiled methods which are part of any of the given dex locations.
   void GetProfiledMethods(const std::set<std::string>& dex_base_locations,
                           std::vector<ProfileMethodInfo>& methods)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  void InvalidateAllCompiledCode()
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void InvalidateCompiledCodeFor(ArtMethod* method, const OatQuickMethodHeader* code)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  void Dump(std::ostream& os) REQUIRES(!lock_);
+  void Dump(std::ostream& os) REQUIRES(!Locks::jit_lock_);
 
-  bool IsOsrCompiled(ArtMethod* method) REQUIRES(!lock_);
+  bool IsOsrCompiled(ArtMethod* method) REQUIRES(!Locks::jit_lock_);
 
   void SweepRootTables(IsMarkedVisitor* visitor)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // The GC needs to disallow the reading of inline caches when it processes them,
   // to avoid having a class being used while it is being deleted.
-  void AllowInlineCacheAccess() REQUIRES(!lock_);
-  void DisallowInlineCacheAccess() REQUIRES(!lock_);
-  void BroadcastForInlineCacheAccess() REQUIRES(!lock_);
+  void AllowInlineCacheAccess() REQUIRES(!Locks::jit_lock_);
+  void DisallowInlineCacheAccess() REQUIRES(!Locks::jit_lock_);
+  void BroadcastForInlineCacheAccess() REQUIRES(!Locks::jit_lock_);
 
   // Notify the code cache that the method at the pointer 'old_method' is being moved to the pointer
   // 'new_method' since it is being made obsolete.
   void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method)
-      REQUIRES(!lock_) REQUIRES(Locks::mutator_lock_);
+      REQUIRES(!Locks::jit_lock_) REQUIRES(Locks::mutator_lock_);
 
   // Dynamically change whether we want to garbage collect code.
-  void SetGarbageCollectCode(bool value) REQUIRES(!lock_);
+  void SetGarbageCollectCode(bool value) REQUIRES(!Locks::jit_lock_);
 
-  bool GetGarbageCollectCode() REQUIRES(!lock_);
+  bool GetGarbageCollectCode() REQUIRES(!Locks::jit_lock_);
 
   // Unsafe variant for debug checks.
   bool GetGarbageCollectCodeUnsafe() const NO_THREAD_SAFETY_ANALYSIS {
     return garbage_collect_code_;
   }
+  ZygoteMap* GetZygoteMap() {
+    return &zygote_map_;
+  }
 
   // If Jit-gc has been disabled (and instrumentation has been enabled) this will return the
   // jit-compiled entrypoint for this method.  Otherwise it will return null.
   const void* FindCompiledCodeForInstrumentation(ArtMethod* method)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Fetch the entrypoint that zygote may have saved for a method. The zygote saves an entrypoint
-  // only for the case when the method's declaring class is not initialized.
-  const void* GetZygoteSavedEntryPoint(ArtMethod* method)
-      REQUIRES(!lock_)
+  // Fetch the code of a method that was JITted, but the JIT could not
+  // update its entrypoint due to the resolution trampoline.
+  const void* GetSavedEntryPointOfPreCompiledMethod(ArtMethod* method)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void PostForkChildAction(bool is_system_server, bool is_zygote);
 
   // Clear the entrypoints of JIT compiled methods that belong in the zygote space.
   // This is used for removing non-debuggable JIT code at the point we realize the runtime
-  // is debuggable.
-  void ClearEntryPointsInZygoteExecSpace() REQUIRES(!lock_) REQUIRES(Locks::mutator_lock_);
+  // is debuggable. Also clear the Precompiled flag from all methods so the non-debuggable code
+  // doesn't come back.
+  void TransitionToDebuggable() REQUIRES(!Locks::jit_lock_) REQUIRES(Locks::mutator_lock_);
+
+  JitMemoryRegion* GetCurrentRegion();
+  bool IsSharedRegion(const JitMemoryRegion& region) const { return &region == &shared_region_; }
+  bool CanAllocateProfilingInfo() {
+    // If we don't have a private region, we cannot allocate a profiling info.
+    // A shared region doesn't support in general GC objects, which a profiling info
+    // can reference.
+    JitMemoryRegion* region = GetCurrentRegion();
+    return region->IsValid() && !IsSharedRegion(*region);
+  }
+
+  // Return whether the given `ptr` is in the zygote executable memory space.
+  bool IsInZygoteExecSpace(const void* ptr) const {
+    return shared_region_.IsInExecSpace(ptr);
+  }
 
  private:
   JitCodeCache();
 
-  void InitializeState(size_t initial_capacity, size_t max_capacity) REQUIRES(lock_);
-
-  bool InitializeMappings(bool rwx_memory_allowed, bool is_zygote, std::string* error_msg)
-      REQUIRES(lock_);
-
-  void InitializeSpaces() REQUIRES(lock_);
-
-  // Internal version of 'CommitCode' that will not retry if the
-  // allocation fails. Return null if the allocation fails.
-  uint8_t* CommitCodeInternal(Thread* self,
-                              ArtMethod* method,
-                              uint8_t* stack_map,
-                              uint8_t* roots_data,
-                              const uint8_t* code,
-                              size_t code_size,
-                              size_t data_size,
-                              bool osr,
-                              const std::vector<Handle<mirror::Object>>& roots,
-                              bool has_should_deoptimize_flag,
-                              const ArenaSet<ArtMethod*>& cha_single_implementation_list)
-      REQUIRES(!lock_)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  // Adds the given roots to the roots_data. Only a member for annotalysis.
-  void FillRootTable(uint8_t* roots_data, const std::vector<Handle<mirror::Object>>& roots)
-      REQUIRES(lock_)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
   ProfilingInfo* AddProfilingInfoInternal(Thread* self,
                                           ArtMethod* method,
                                           const std::vector<uint32_t>& entries)
-      REQUIRES(lock_)
+      REQUIRES(Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // If a collection is in progress, wait for it to finish. Must be called with the mutator lock.
   // The non-mutator lock version should be used if possible. This method will release then
   // re-acquire the mutator lock.
   void WaitForPotentialCollectionToCompleteRunnable(Thread* self)
-      REQUIRES(lock_, !Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_);
+      REQUIRES(Locks::jit_lock_, !Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // If a collection is in progress, wait for it to finish. Return
   // whether the thread actually waited.
   bool WaitForPotentialCollectionToComplete(Thread* self)
-      REQUIRES(lock_) REQUIRES(!Locks::mutator_lock_);
+      REQUIRES(Locks::jit_lock_) REQUIRES(!Locks::mutator_lock_);
 
   // Remove CHA dependents and underlying allocations for entries in `method_headers`.
   void FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader*>& method_headers)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES(!Locks::cha_lock_);
 
   // Removes method from the cache. The caller must ensure that all threads
   // are suspended and the method should not be in any thread's stack.
   bool RemoveMethodLocked(ArtMethod* method, bool release_memory)
-      REQUIRES(lock_)
+      REQUIRES(Locks::jit_lock_)
       REQUIRES(Locks::mutator_lock_);
 
   // Free code and data allocations for `code_ptr`.
-  void FreeCodeAndData(const void* code_ptr) REQUIRES(lock_);
+  void FreeCodeAndData(const void* code_ptr, bool free_debug_info = true)
+      REQUIRES(Locks::jit_lock_);
 
   // Number of bytes allocated in the code cache.
-  size_t CodeCacheSize() REQUIRES(!lock_);
+  size_t CodeCacheSize() REQUIRES(!Locks::jit_lock_);
 
   // Number of bytes allocated in the data cache.
-  size_t DataCacheSize() REQUIRES(!lock_);
+  size_t DataCacheSize() REQUIRES(!Locks::jit_lock_);
 
   // Number of bytes allocated in the code cache.
-  size_t CodeCacheSizeLocked() REQUIRES(lock_);
+  size_t CodeCacheSizeLocked() REQUIRES(Locks::jit_lock_);
 
   // Number of bytes allocated in the data cache.
-  size_t DataCacheSizeLocked() REQUIRES(lock_);
+  size_t DataCacheSizeLocked() REQUIRES(Locks::jit_lock_);
 
   // Notify all waiting threads that a collection is done.
-  void NotifyCollectionDone(Thread* self) REQUIRES(lock_);
-
-  // Try to increase the current capacity of the code cache. Return whether we
-  // succeeded at doing so.
-  bool IncreaseCodeCacheCapacity() REQUIRES(lock_);
-
-  // Set the footprint limit of the code cache.
-  void SetFootprintLimit(size_t new_footprint) REQUIRES(lock_);
+  void NotifyCollectionDone(Thread* self) REQUIRES(Locks::jit_lock_);
 
   // Return whether we should do a full collection given the current state of the cache.
   bool ShouldDoFullCollection()
-      REQUIRES(lock_)
+      REQUIRES(Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void DoCollection(Thread* self, bool collect_profiling_info)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void RemoveUnmarkedCode(Thread* self)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void MarkCompiledCodeOnThreadStacks(Thread* self)
-      REQUIRES(!lock_)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-
-  bool CheckLiveCompiledCodeHasProfilingInfo()
-      REQUIRES(lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   CodeCacheBitmap* GetLiveBitmap() const {
     return live_bitmap_.get();
   }
 
-  uint8_t* AllocateCode(size_t code_size) REQUIRES(lock_);
-  void FreeCode(uint8_t* code) REQUIRES(lock_);
-  uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
-  void FreeData(uint8_t* data) REQUIRES(lock_);
-
-  bool HasDualCodeMapping() const {
-    return non_exec_pages_.IsValid();
-  }
-
-  bool HasCodeMapping() const {
-    return exec_pages_.IsValid();
-  }
-
-  const MemMap* GetUpdatableCodeMapping() const;
-
   bool IsInZygoteDataSpace(const void* ptr) const {
-    return zygote_data_pages_.HasAddress(ptr);
-  }
-
-  bool IsInZygoteExecSpace(const void* ptr) const {
-    return zygote_exec_pages_.HasAddress(ptr);
+    return shared_region_.IsInDataSpace(ptr);
   }
 
   bool IsWeakAccessEnabled(Thread* self) const;
   void WaitUntilInlineCacheAccessible(Thread* self)
-      REQUIRES(!lock_)
+      REQUIRES(!Locks::jit_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   class JniStubKey;
   class JniStubData;
 
-  // Lock for guarding allocations, collections, and the method_code_map_.
-  Mutex lock_ BOTTOM_MUTEX_ACQUIRED_AFTER;
-  // Condition to wait on during collection.
-  ConditionVariable lock_cond_ GUARDED_BY(lock_);
-  // Whether there is a code cache collection in progress.
-  bool collection_in_progress_ GUARDED_BY(lock_);
-  // Mem map which holds data (stack maps and profiling info).
-  MemMap data_pages_;
-  // Mem map which holds code and has executable permission.
-  MemMap exec_pages_;
-  // Mem map which holds code with non executable permission. Only valid for dual view JIT when
-  // this is the non-executable view of code used to write updates.
-  MemMap non_exec_pages_;
-  // The opaque mspace for allocating data.
-  void* data_mspace_ GUARDED_BY(lock_);
-  // The opaque mspace for allocating code.
-  void* exec_mspace_ GUARDED_BY(lock_);
-  // Bitmap for collecting code and data.
-  std::unique_ptr<CodeCacheBitmap> live_bitmap_;
-  // Holds compiled code associated with the shorty for a JNI stub.
-  SafeMap<JniStubKey, JniStubData> jni_stubs_map_ GUARDED_BY(lock_);
-  // Holds compiled code associated to the ArtMethod.
-  SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(lock_);
-  // Holds osr compiled code associated to the ArtMethod.
-  SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(lock_);
-  // ProfilingInfo objects we have allocated.
-  std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(lock_);
-
-  // The initial capacity in bytes this code cache starts with.
-  size_t initial_capacity_ GUARDED_BY(lock_);
-
-  // The maximum capacity in bytes this code cache can go to.
-  size_t max_capacity_ GUARDED_BY(lock_);
-
-  // The current capacity in bytes of the code cache.
-  size_t current_capacity_ GUARDED_BY(lock_);
-
-  // The current footprint in bytes of the data portion of the code cache.
-  size_t data_end_ GUARDED_BY(lock_);
-
-  // The current footprint in bytes of the code portion of the code cache.
-  size_t exec_end_ GUARDED_BY(lock_);
-
-  // Whether the last collection round increased the code cache.
-  bool last_collection_increased_code_cache_ GUARDED_BY(lock_);
-
-  // Whether we can do garbage collection. Not 'const' as tests may override this.
-  bool garbage_collect_code_ GUARDED_BY(lock_);
-
-  // The size in bytes of used memory for the data portion of the code cache.
-  size_t used_memory_for_data_ GUARDED_BY(lock_);
-
-  // The size in bytes of used memory for the code portion of the code cache.
-  size_t used_memory_for_code_ GUARDED_BY(lock_);
-
-  // Number of compilations done throughout the lifetime of the JIT.
-  size_t number_of_compilations_ GUARDED_BY(lock_);
-
-  // Number of compilations for on-stack-replacement done throughout the lifetime of the JIT.
-  size_t number_of_osr_compilations_ GUARDED_BY(lock_);
-
-  // Number of code cache collections done throughout the lifetime of the JIT.
-  size_t number_of_collections_ GUARDED_BY(lock_);
-
-  // Histograms for keeping track of stack map size statistics.
-  Histogram<uint64_t> histogram_stack_map_memory_use_ GUARDED_BY(lock_);
-
-  // Histograms for keeping track of code size statistics.
-  Histogram<uint64_t> histogram_code_memory_use_ GUARDED_BY(lock_);
-
-  // Histograms for keeping track of profiling info statistics.
-  Histogram<uint64_t> histogram_profiling_info_memory_use_ GUARDED_BY(lock_);
-
   // Whether the GC allows accessing weaks in inline caches. Note that this
   // is not used by the concurrent collector, which uses
   // Thread::SetWeakRefAccessEnabled instead.
   Atomic<bool> is_weak_access_enabled_;
 
   // Condition to wait on for accessing inline caches.
-  ConditionVariable inline_cache_cond_ GUARDED_BY(lock_);
+  ConditionVariable inline_cache_cond_ GUARDED_BY(Locks::jit_lock_);
 
-  // Mem map which holds zygote data (stack maps and profiling info).
-  MemMap zygote_data_pages_;
-  // Mem map which holds zygote code and has executable permission.
-  MemMap zygote_exec_pages_;
-  // The opaque mspace for allocating zygote data.
-  void* zygote_data_mspace_ GUARDED_BY(lock_);
-  // The opaque mspace for allocating zygote code.
-  void* zygote_exec_mspace_ GUARDED_BY(lock_);
+  // -------------- JIT memory regions ------------------------------------- //
+
+  // Shared region, inherited from the zygote.
+  JitMemoryRegion shared_region_;
+
+  // Process's own region.
+  JitMemoryRegion private_region_;
+
+  // -------------- Global JIT maps --------------------------------------- //
+
+  // Holds compiled code associated with the shorty for a JNI stub.
+  SafeMap<JniStubKey, JniStubData> jni_stubs_map_ GUARDED_BY(Locks::jit_lock_);
+
+  // Holds compiled code associated to the ArtMethod.
+  SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(Locks::jit_lock_);
+
+  // Holds compiled code associated to the ArtMethod. Used when pre-jitting
+  // methods whose entrypoints have the resolution stub.
+  SafeMap<ArtMethod*, const void*> saved_compiled_methods_map_ GUARDED_BY(Locks::jit_lock_);
+
+  // Holds osr compiled code associated to the ArtMethod.
+  SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(Locks::jit_lock_);
+
+  // ProfilingInfo objects we have allocated.
+  std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(Locks::jit_lock_);
+
+  // Methods that the zygote has compiled and can be shared across processes
+  // forked from the zygote.
+  ZygoteMap zygote_map_;
+
+  // -------------- JIT GC related data structures ----------------------- //
+
+  // Condition to wait on during collection.
+  ConditionVariable lock_cond_ GUARDED_BY(Locks::jit_lock_);
+
+  // Whether there is a code cache collection in progress.
+  bool collection_in_progress_ GUARDED_BY(Locks::jit_lock_);
+
+  // Bitmap for collecting code and data.
+  std::unique_ptr<CodeCacheBitmap> live_bitmap_;
+
+  // Whether the last collection round increased the code cache.
+  bool last_collection_increased_code_cache_ GUARDED_BY(Locks::jit_lock_);
+
+  // Whether we can do garbage collection. Not 'const' as tests may override this.
+  bool garbage_collect_code_ GUARDED_BY(Locks::jit_lock_);
+
+  // ---------------- JIT statistics -------------------------------------- //
+
+  // Number of compilations done throughout the lifetime of the JIT.
+  size_t number_of_compilations_ GUARDED_BY(Locks::jit_lock_);
+
+  // Number of compilations for on-stack-replacement done throughout the lifetime of the JIT.
+  size_t number_of_osr_compilations_ GUARDED_BY(Locks::jit_lock_);
+
+  // Number of code cache collections done throughout the lifetime of the JIT.
+  size_t number_of_collections_ GUARDED_BY(Locks::jit_lock_);
+
+  // Histograms for keeping track of stack map size statistics.
+  Histogram<uint64_t> histogram_stack_map_memory_use_ GUARDED_BY(Locks::jit_lock_);
+
+  // Histograms for keeping track of code size statistics.
+  Histogram<uint64_t> histogram_code_memory_use_ GUARDED_BY(Locks::jit_lock_);
+
+  // Histograms for keeping track of profiling info statistics.
+  Histogram<uint64_t> histogram_profiling_info_memory_use_ GUARDED_BY(Locks::jit_lock_);
 
   friend class art::JitJniStubTestHelper;
   friend class ScopedCodeCacheWrite;
diff --git a/runtime/jit/jit_memory_region.cc b/runtime/jit/jit_memory_region.cc
new file mode 100644
index 0000000..09980c8
--- /dev/null
+++ b/runtime/jit/jit_memory_region.cc
@@ -0,0 +1,600 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jit_memory_region.h"
+
+#include <fcntl.h>
+#include <unistd.h>
+
+#include <android-base/unique_fd.h>
+#include "base/bit_utils.h"  // For RoundDown, RoundUp
+#include "base/globals.h"
+#include "base/logging.h"  // For VLOG.
+#include "base/membarrier.h"
+#include "base/memfd.h"
+#include "base/systrace.h"
+#include "gc/allocator/dlmalloc.h"
+#include "jit/jit_scoped_code_cache_write.h"
+#include "oat_quick_method_header.h"
+#include "palette/palette.h"
+
+using android::base::unique_fd;
+
+namespace art {
+namespace jit {
+
+// Data cache will be half of the capacity
+// Code cache will be the other half of the capacity.
+// TODO: Make this variable?
+static constexpr size_t kCodeAndDataCapacityDivider = 2;
+
+bool JitMemoryRegion::Initialize(size_t initial_capacity,
+                                 size_t max_capacity,
+                                 bool rwx_memory_allowed,
+                                 bool is_zygote,
+                                 std::string* error_msg) {
+  ScopedTrace trace(__PRETTY_FUNCTION__);
+
+  CHECK_GE(max_capacity, initial_capacity);
+  CHECK(max_capacity <= 1 * GB) << "The max supported size for JIT code cache is 1GB";
+  // Align both capacities to page size, as that's the unit mspaces use.
+  initial_capacity_ = RoundDown(initial_capacity, 2 * kPageSize);
+  max_capacity_ = RoundDown(max_capacity, 2 * kPageSize);
+  current_capacity_ = initial_capacity,
+  data_end_ = initial_capacity / kCodeAndDataCapacityDivider;
+  exec_end_ = initial_capacity - data_end_;
+
+  const size_t capacity = max_capacity_;
+  const size_t data_capacity = capacity / kCodeAndDataCapacityDivider;
+  const size_t exec_capacity = capacity - data_capacity;
+
+  // File descriptor enabling dual-view mapping of code section.
+  unique_fd mem_fd;
+
+  if (is_zygote) {
+    // Because we are not going to GC code generated by the zygote, just use all available.
+    current_capacity_ = max_capacity;
+    mem_fd = unique_fd(CreateZygoteMemory(capacity, error_msg));
+    if (mem_fd.get() < 0) {
+      return false;
+    }
+  } else {
+    // Bionic supports memfd_create, but the call may fail on older kernels.
+    mem_fd = unique_fd(art::memfd_create("jit-cache", /* flags= */ 0));
+    if (mem_fd.get() < 0) {
+      std::ostringstream oss;
+      oss << "Failed to initialize dual view JIT. memfd_create() error: " << strerror(errno);
+      if (!rwx_memory_allowed) {
+        // Without using RWX page permissions, the JIT can not fallback to single mapping as it
+        // requires tranitioning the code pages to RWX for updates.
+        *error_msg = oss.str();
+        return false;
+      }
+      VLOG(jit) << oss.str();
+    } else if (ftruncate(mem_fd, capacity) != 0) {
+      std::ostringstream oss;
+      oss << "Failed to initialize memory file: " << strerror(errno);
+      *error_msg = oss.str();
+      return false;
+    }
+  }
+
+  std::string data_cache_name = is_zygote ? "zygote-data-code-cache" : "data-code-cache";
+  std::string exec_cache_name = is_zygote ? "zygote-jit-code-cache" : "jit-code-cache";
+
+  std::string error_str;
+  // Map name specific for android_os_Debug.cpp accounting.
+  // Map in low 4gb to simplify accessing root tables for x86_64.
+  // We could do PC-relative addressing to avoid this problem, but that
+  // would require reserving code and data area before submitting, which
+  // means more windows for the code memory to be RWX.
+  int base_flags;
+  MemMap data_pages;
+  if (mem_fd.get() >= 0) {
+    // Dual view of JIT code cache case. Create an initial mapping of data pages large enough
+    // for data and non-writable view of JIT code pages. We use the memory file descriptor to
+    // enable dual mapping - we'll create a second mapping using the descriptor below. The
+    // mappings will look like:
+    //
+    //       VA                  PA
+    //
+    //       +---------------+
+    //       | non exec code |\
+    //       +---------------+ \
+    //       | writable data |\ \
+    //       +---------------+ \ \
+    //       :               :\ \ \
+    //       +---------------+.\.\.+---------------+
+    //       |  exec code    |  \ \|     code      |
+    //       +---------------+...\.+---------------+
+    //       | readonly data |    \|     data      |
+    //       +---------------+.....+---------------+
+    //
+    // In this configuration code updates are written to the non-executable view of the code
+    // cache, and the executable view of the code cache has fixed RX memory protections.
+    //
+    // This memory needs to be mapped shared as the code portions will have two mappings.
+    //
+    // Additionally, the zyzote will create a dual view of the data portion of
+    // the cache. This mapping will be read-only, whereas the second mapping
+    // will be writable.
+    base_flags = MAP_SHARED;
+    data_pages = MemMap::MapFile(
+        data_capacity + exec_capacity,
+        kProtR,
+        base_flags,
+        mem_fd,
+        /* start= */ 0,
+        /* low_4gb= */ true,
+        data_cache_name.c_str(),
+        &error_str);
+  } else {
+    // Single view of JIT code cache case. Create an initial mapping of data pages large enough
+    // for data and JIT code pages. The mappings will look like:
+    //
+    //       VA                  PA
+    //
+    //       +---------------+...+---------------+
+    //       |  exec code    |   |     code      |
+    //       +---------------+...+---------------+
+    //       |      data     |   |     data      |
+    //       +---------------+...+---------------+
+    //
+    // In this configuration code updates are written to the executable view of the code cache,
+    // and the executable view of the code cache transitions RX to RWX for the update and then
+    // back to RX after the update.
+    base_flags = MAP_PRIVATE | MAP_ANON;
+    data_pages = MemMap::MapAnonymous(
+        data_cache_name.c_str(),
+        data_capacity + exec_capacity,
+        kProtRW,
+        /* low_4gb= */ true,
+        &error_str);
+  }
+
+  if (!data_pages.IsValid()) {
+    std::ostringstream oss;
+    oss << "Failed to create read write cache: " << error_str << " size=" << capacity;
+    *error_msg = oss.str();
+    return false;
+  }
+
+  MemMap exec_pages;
+  MemMap non_exec_pages;
+  MemMap writable_data_pages;
+  if (exec_capacity > 0) {
+    uint8_t* const divider = data_pages.Begin() + data_capacity;
+    // Set initial permission for executable view to catch any SELinux permission problems early
+    // (for processes that cannot map WX pages). Otherwise, this region does not need to be
+    // executable as there is no code in the cache yet.
+    exec_pages = data_pages.RemapAtEnd(divider,
+                                       exec_cache_name.c_str(),
+                                       kProtRX,
+                                       base_flags | MAP_FIXED,
+                                       mem_fd.get(),
+                                       (mem_fd.get() >= 0) ? data_capacity : 0,
+                                       &error_str);
+    if (!exec_pages.IsValid()) {
+      std::ostringstream oss;
+      oss << "Failed to create read execute code cache: " << error_str << " size=" << capacity;
+      *error_msg = oss.str();
+      return false;
+    }
+
+    if (mem_fd.get() >= 0) {
+      // For dual view, create the secondary view of code memory used for updating code. This view
+      // is never executable.
+      std::string name = exec_cache_name + "-rw";
+      non_exec_pages = MemMap::MapFile(exec_capacity,
+                                       kProtR,
+                                       base_flags,
+                                       mem_fd,
+                                       /* start= */ data_capacity,
+                                       /* low_4GB= */ false,
+                                       name.c_str(),
+                                       &error_str);
+      if (!non_exec_pages.IsValid()) {
+        static const char* kFailedNxView = "Failed to map non-executable view of JIT code cache";
+        if (rwx_memory_allowed) {
+          // Log and continue as single view JIT (requires RWX memory).
+          VLOG(jit) << kFailedNxView;
+        } else {
+          *error_msg = kFailedNxView;
+          return false;
+        }
+      }
+      // Create a dual view of the data cache.
+      name = data_cache_name + "-rw";
+      writable_data_pages = MemMap::MapFile(data_capacity,
+                                            kProtRW,
+                                            base_flags,
+                                            mem_fd,
+                                            /* start= */ 0,
+                                            /* low_4GB= */ false,
+                                            name.c_str(),
+                                            &error_str);
+      if (!writable_data_pages.IsValid()) {
+        std::ostringstream oss;
+        oss << "Failed to create dual data view: " << error_str;
+        *error_msg = oss.str();
+        return false;
+      }
+      if (writable_data_pages.MadviseDontFork() != 0) {
+        *error_msg = "Failed to madvise dont fork the writable data view";
+        return false;
+      }
+      if (non_exec_pages.MadviseDontFork() != 0) {
+        *error_msg = "Failed to madvise dont fork the writable code view";
+        return false;
+      }
+      // Now that we have created the writable and executable mappings, prevent creating any new
+      // ones.
+      if (is_zygote && !ProtectZygoteMemory(mem_fd.get(), error_msg)) {
+        return false;
+      }
+    }
+  } else {
+    // Profiling only. No memory for code required.
+  }
+
+  data_pages_ = std::move(data_pages);
+  exec_pages_ = std::move(exec_pages);
+  non_exec_pages_ = std::move(non_exec_pages);
+  writable_data_pages_ = std::move(writable_data_pages);
+
+  VLOG(jit) << "Created JitMemoryRegion"
+            << ": data_pages=" << reinterpret_cast<void*>(data_pages_.Begin())
+            << ", exec_pages=" << reinterpret_cast<void*>(exec_pages_.Begin())
+            << ", non_exec_pages=" << reinterpret_cast<void*>(non_exec_pages_.Begin())
+            << ", writable_data_pages=" << reinterpret_cast<void*>(writable_data_pages_.Begin());
+
+  // Now that the pages are initialized, initialize the spaces.
+
+  // Initialize the data heap.
+  data_mspace_ = create_mspace_with_base(
+      HasDualDataMapping() ? writable_data_pages_.Begin() : data_pages_.Begin(),
+      data_end_,
+      /* locked= */ false);
+  CHECK(data_mspace_ != nullptr) << "create_mspace_with_base (data) failed";
+
+  // Allow mspace to use the full data capacity.
+  // It will still only use as litle memory as possible and ask for MoreCore as needed.
+  CHECK(IsAlignedParam(data_capacity, kPageSize));
+  mspace_set_footprint_limit(data_mspace_, data_capacity);
+
+  // Initialize the code heap.
+  MemMap* code_heap = nullptr;
+  if (non_exec_pages_.IsValid()) {
+    code_heap = &non_exec_pages_;
+  } else if (exec_pages_.IsValid()) {
+    code_heap = &exec_pages_;
+  }
+  if (code_heap != nullptr) {
+    // Make all pages reserved for the code heap writable. The mspace allocator, that manages the
+    // heap, will take and initialize pages in create_mspace_with_base().
+    CheckedCall(mprotect, "create code heap", code_heap->Begin(), code_heap->Size(), kProtRW);
+    exec_mspace_ = create_mspace_with_base(code_heap->Begin(), exec_end_, false /*locked*/);
+    CHECK(exec_mspace_ != nullptr) << "create_mspace_with_base (exec) failed";
+    SetFootprintLimit(current_capacity_);
+    // Protect pages containing heap metadata. Updates to the code heap toggle write permission to
+    // perform the update and there are no other times write access is required.
+    CheckedCall(mprotect, "protect code heap", code_heap->Begin(), code_heap->Size(), kProtR);
+  } else {
+    exec_mspace_ = nullptr;
+    SetFootprintLimit(current_capacity_);
+  }
+  return true;
+}
+
+void JitMemoryRegion::SetFootprintLimit(size_t new_footprint) {
+  size_t data_space_footprint = new_footprint / kCodeAndDataCapacityDivider;
+  DCHECK(IsAlignedParam(data_space_footprint, kPageSize));
+  DCHECK_EQ(data_space_footprint * kCodeAndDataCapacityDivider, new_footprint);
+  if (HasCodeMapping()) {
+    ScopedCodeCacheWrite scc(*this);
+    mspace_set_footprint_limit(exec_mspace_, new_footprint - data_space_footprint);
+  }
+}
+
+bool JitMemoryRegion::IncreaseCodeCacheCapacity() {
+  if (current_capacity_ == max_capacity_) {
+    return false;
+  }
+
+  // Double the capacity if we're below 1MB, or increase it by 1MB if
+  // we're above.
+  if (current_capacity_ < 1 * MB) {
+    current_capacity_ *= 2;
+  } else {
+    current_capacity_ += 1 * MB;
+  }
+  if (current_capacity_ > max_capacity_) {
+    current_capacity_ = max_capacity_;
+  }
+
+  VLOG(jit) << "Increasing code cache capacity to " << PrettySize(current_capacity_);
+
+  SetFootprintLimit(current_capacity_);
+
+  return true;
+}
+
+// NO_THREAD_SAFETY_ANALYSIS as this is called from mspace code, at which point the lock
+// is already held.
+void* JitMemoryRegion::MoreCore(const void* mspace, intptr_t increment) NO_THREAD_SAFETY_ANALYSIS {
+  if (mspace == exec_mspace_) {
+    CHECK(exec_mspace_ != nullptr);
+    const MemMap* const code_pages = GetUpdatableCodeMapping();
+    void* result = code_pages->Begin() + exec_end_;
+    exec_end_ += increment;
+    return result;
+  } else {
+    CHECK_EQ(data_mspace_, mspace);
+    const MemMap* const writable_data_pages = GetWritableDataMapping();
+    void* result = writable_data_pages->Begin() + data_end_;
+    data_end_ += increment;
+    return result;
+  }
+}
+
+const uint8_t* JitMemoryRegion::CommitCode(ArrayRef<const uint8_t> reserved_code,
+                                           ArrayRef<const uint8_t> code,
+                                           const uint8_t* stack_map,
+                                           bool has_should_deoptimize_flag) {
+  DCHECK(IsInExecSpace(reserved_code.data()));
+  ScopedCodeCacheWrite scc(*this);
+
+  size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
+  size_t header_size = OatQuickMethodHeader::InstructionAlignedSize();
+  size_t total_size = header_size + code.size();
+
+  // Each allocation should be on its own set of cache lines.
+  // `total_size` covers the OatQuickMethodHeader, the JIT generated machine code,
+  // and any alignment padding.
+  DCHECK_GT(total_size, header_size);
+  DCHECK_LE(total_size, reserved_code.size());
+  uint8_t* x_memory = const_cast<uint8_t*>(reserved_code.data());
+  uint8_t* w_memory = const_cast<uint8_t*>(GetNonExecutableAddress(x_memory));
+  // Ensure the header ends up at expected instruction alignment.
+  DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(w_memory + header_size), alignment);
+  const uint8_t* result = x_memory + header_size;
+
+  // Write the code.
+  std::copy(code.begin(), code.end(), w_memory + header_size);
+
+  // Write the header.
+  OatQuickMethodHeader* method_header =
+      OatQuickMethodHeader::FromCodePointer(w_memory + header_size);
+  new (method_header) OatQuickMethodHeader(
+      (stack_map != nullptr) ? result - stack_map : 0u,
+      code.size());
+  if (has_should_deoptimize_flag) {
+    method_header->SetHasShouldDeoptimizeFlag();
+  }
+
+  // Both instruction and data caches need flushing to the point of unification where both share
+  // a common view of memory. Flushing the data cache ensures the dirty cachelines from the
+  // newly added code are written out to the point of unification. Flushing the instruction
+  // cache ensures the newly written code will be fetched from the point of unification before
+  // use. Memory in the code cache is re-cycled as code is added and removed. The flushes
+  // prevent stale code from residing in the instruction cache.
+  //
+  // Caches are flushed before write permission is removed because some ARMv8 Qualcomm kernels
+  // may trigger a segfault if a page fault occurs when requesting a cache maintenance
+  // operation. This is a kernel bug that we need to work around until affected devices
+  // (e.g. Nexus 5X and 6P) stop being supported or their kernels are fixed.
+  //
+  // For reference, this behavior is caused by this commit:
+  // https://android.googlesource.com/kernel/msm/+/3fbe6bc28a6b9939d0650f2f17eb5216c719950c
+  //
+  bool cache_flush_success = true;
+  if (HasDualCodeMapping()) {
+    // Flush d-cache for the non-executable mapping.
+    cache_flush_success = FlushCpuCaches(w_memory, w_memory + total_size);
+  }
+
+  // Invalidate i-cache for the executable mapping.
+  if (cache_flush_success) {
+    cache_flush_success = FlushCpuCaches(x_memory, x_memory + total_size);
+  }
+
+  // If flushing the cache has failed, reject the allocation because we can't guarantee
+  // correctness of the instructions present in the processor caches.
+  if (!cache_flush_success) {
+    PLOG(ERROR) << "Cache flush failed triggering code allocation failure";
+    return nullptr;
+  }
+
+  // Ensure CPU instruction pipelines are flushed for all cores. This is necessary for
+  // correctness as code may still be in instruction pipelines despite the i-cache flush. It is
+  // not safe to assume that changing permissions with mprotect (RX->RWX->RX) will cause a TLB
+  // shootdown (incidentally invalidating the CPU pipelines by sending an IPI to all cores to
+  // notify them of the TLB invalidation). Some architectures, notably ARM and ARM64, have
+  // hardware support that broadcasts TLB invalidations and so their kernels have no software
+  // based TLB shootdown. The sync-core flavor of membarrier was introduced in Linux 4.16 to
+  // address this (see mbarrier(2)). The membarrier here will fail on prior kernels and on
+  // platforms lacking the appropriate support.
+  art::membarrier(art::MembarrierCommand::kPrivateExpeditedSyncCore);
+
+  return result;
+}
+
+static void FillRootTable(uint8_t* roots_data, const std::vector<Handle<mirror::Object>>& roots)
+    REQUIRES(Locks::jit_lock_)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  GcRoot<mirror::Object>* gc_roots = reinterpret_cast<GcRoot<mirror::Object>*>(roots_data);
+  const uint32_t length = roots.size();
+  // Put all roots in `roots_data`.
+  for (uint32_t i = 0; i < length; ++i) {
+    ObjPtr<mirror::Object> object = roots[i].Get();
+    gc_roots[i] = GcRoot<mirror::Object>(object);
+  }
+  // Store the length of the table at the end. This will allow fetching it from a stack_map
+  // pointer.
+  reinterpret_cast<uint32_t*>(roots_data)[length] = length;
+}
+
+bool JitMemoryRegion::CommitData(ArrayRef<const uint8_t> reserved_data,
+                                 const std::vector<Handle<mirror::Object>>& roots,
+                                 ArrayRef<const uint8_t> stack_map) {
+  DCHECK(IsInDataSpace(reserved_data.data()));
+  uint8_t* roots_data = GetWritableDataAddress(reserved_data.data());
+  size_t root_table_size = ComputeRootTableSize(roots.size());
+  uint8_t* stack_map_data = roots_data + root_table_size;
+  DCHECK_LE(root_table_size + stack_map.size(), reserved_data.size());
+  FillRootTable(roots_data, roots);
+  memcpy(stack_map_data, stack_map.data(), stack_map.size());
+  // Flush data cache, as compiled code references literals in it.
+  // TODO(oth): establish whether this is necessary.
+  if (UNLIKELY(!FlushCpuCaches(roots_data, roots_data + root_table_size + stack_map.size()))) {
+    VLOG(jit) << "Failed to flush data in CommitData";
+    return false;
+  }
+  return true;
+}
+
+const uint8_t* JitMemoryRegion::AllocateCode(size_t size) {
+  size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
+  void* result = mspace_memalign(exec_mspace_, alignment, size);
+  if (UNLIKELY(result == nullptr)) {
+    return nullptr;
+  }
+  used_memory_for_code_ += mspace_usable_size(result);
+  return reinterpret_cast<uint8_t*>(GetExecutableAddress(result));
+}
+
+void JitMemoryRegion::FreeCode(const uint8_t* code) {
+  code = GetNonExecutableAddress(code);
+  used_memory_for_code_ -= mspace_usable_size(code);
+  mspace_free(exec_mspace_, const_cast<uint8_t*>(code));
+}
+
+const uint8_t* JitMemoryRegion::AllocateData(size_t data_size) {
+  void* result = mspace_malloc(data_mspace_, data_size);
+  if (UNLIKELY(result == nullptr)) {
+    return nullptr;
+  }
+  used_memory_for_data_ += mspace_usable_size(result);
+  return reinterpret_cast<uint8_t*>(GetNonWritableDataAddress(result));
+}
+
+void JitMemoryRegion::FreeData(const uint8_t* data) {
+  FreeWritableData(GetWritableDataAddress(data));
+}
+
+void JitMemoryRegion::FreeWritableData(uint8_t* writable_data) REQUIRES(Locks::jit_lock_) {
+  used_memory_for_data_ -= mspace_usable_size(writable_data);
+  mspace_free(data_mspace_, writable_data);
+}
+
+#if defined(__BIONIC__) && defined(ART_TARGET)
+// The code below only works on bionic on target.
+
+int JitMemoryRegion::CreateZygoteMemory(size_t capacity, std::string* error_msg) {
+  if (CacheOperationsMaySegFault()) {
+    // Zygote JIT requires dual code mappings by design. We can only do this if the cache flush
+    // and invalidate instructions work without raising faults.
+    *error_msg = "Zygote memory only works with dual mappings";
+    return -1;
+  }
+  /* Check if kernel support exists, otherwise fall back to ashmem */
+  static const char* kRegionName = "jit-zygote-cache";
+  if (art::IsSealFutureWriteSupported()) {
+    int fd = art::memfd_create(kRegionName, MFD_ALLOW_SEALING);
+    if (fd == -1) {
+      std::ostringstream oss;
+      oss << "Failed to create zygote mapping: " << strerror(errno);
+      *error_msg = oss.str();
+      return -1;
+    }
+
+    if (ftruncate(fd, capacity) != 0) {
+      std::ostringstream oss;
+      oss << "Failed to create zygote mapping: " << strerror(errno);
+      *error_msg = oss.str();
+      return -1;
+    }
+
+    return fd;
+  }
+
+  LOG(INFO) << "Falling back to ashmem implementation for JIT zygote mapping";
+
+  int fd;
+  PaletteStatus status = PaletteAshmemCreateRegion(kRegionName, capacity, &fd);
+  if (status != PaletteStatus::kOkay) {
+    CHECK_EQ(status, PaletteStatus::kCheckErrno);
+    std::ostringstream oss;
+    oss << "Failed to create zygote mapping: " << strerror(errno);
+    *error_msg = oss.str();
+    return -1;
+  }
+  return fd;
+}
+
+bool JitMemoryRegion::ProtectZygoteMemory(int fd, std::string* error_msg) {
+  if (art::IsSealFutureWriteSupported()) {
+    if (fcntl(fd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_SEAL | F_SEAL_FUTURE_WRITE)
+            == -1) {
+      std::ostringstream oss;
+      oss << "Failed to protect zygote mapping: " << strerror(errno);
+      *error_msg = oss.str();
+      return false;
+    }
+  } else {
+    PaletteStatus status = PaletteAshmemSetProtRegion(fd, PROT_READ);
+    if (status != PaletteStatus::kOkay) {
+      CHECK_EQ(status, PaletteStatus::kCheckErrno);
+      std::ostringstream oss;
+      oss << "Failed to protect zygote mapping: " << strerror(errno);
+      *error_msg = oss.str();
+      return false;
+    }
+  }
+  return true;
+}
+
+#else
+
+int JitMemoryRegion::CreateZygoteMemory(size_t capacity, std::string* error_msg) {
+  // To simplify host building, we don't rely on the latest memfd features.
+  LOG(WARNING) << "Returning un-sealable region on non-bionic";
+  static const char* kRegionName = "/jit-zygote-cache";
+  int fd = art::memfd_create(kRegionName, 0);
+  if (fd == -1) {
+    std::ostringstream oss;
+    oss << "Failed to create zygote mapping: " << strerror(errno);
+    *error_msg = oss.str();
+    return -1;
+  }
+  if (ftruncate(fd, capacity) != 0) {
+    std::ostringstream oss;
+    oss << "Failed to create zygote mapping: " << strerror(errno);
+    *error_msg = oss.str();
+    return -1;
+  }
+  return fd;
+}
+
+bool JitMemoryRegion::ProtectZygoteMemory(int fd ATTRIBUTE_UNUSED,
+                                          std::string* error_msg ATTRIBUTE_UNUSED) {
+  return true;
+}
+
+#endif
+
+}  // namespace jit
+}  // namespace art
diff --git a/runtime/jit/jit_memory_region.h b/runtime/jit/jit_memory_region.h
new file mode 100644
index 0000000..6db931d
--- /dev/null
+++ b/runtime/jit/jit_memory_region.h
@@ -0,0 +1,288 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JIT_JIT_MEMORY_REGION_H_
+#define ART_RUNTIME_JIT_JIT_MEMORY_REGION_H_
+
+#include <string>
+
+#include "arch/instruction_set.h"
+#include "base/globals.h"
+#include "base/locks.h"
+#include "base/mem_map.h"
+#include "gc_root-inl.h"
+#include "handle.h"
+
+namespace art {
+
+namespace mirror {
+class Object;
+}
+
+namespace jit {
+
+class TestZygoteMemory;
+
+// Number of bytes represented by a bit in the CodeCacheBitmap. Value is reasonable for all
+// architectures.
+static constexpr int kJitCodeAccountingBytes = 16;
+
+// Helper to get the size required for emitting `number_of_roots` in the
+// data portion of a JIT memory region.
+uint32_t inline ComputeRootTableSize(uint32_t number_of_roots) {
+  return sizeof(uint32_t) + number_of_roots * sizeof(GcRoot<mirror::Object>);
+}
+
+// Represents a memory region for the JIT, where code and data are stored. This class
+// provides allocation and deallocation primitives.
+class JitMemoryRegion {
+ public:
+  JitMemoryRegion()
+      : initial_capacity_(0),
+        max_capacity_(0),
+        current_capacity_(0),
+        data_end_(0),
+        exec_end_(0),
+        used_memory_for_code_(0),
+        used_memory_for_data_(0),
+        data_pages_(),
+        writable_data_pages_(),
+        exec_pages_(),
+        non_exec_pages_(),
+        data_mspace_(nullptr),
+        exec_mspace_(nullptr) {}
+
+  bool Initialize(size_t initial_capacity,
+                  size_t max_capacity,
+                  bool rwx_memory_allowed,
+                  bool is_zygote,
+                  std::string* error_msg)
+      REQUIRES(Locks::jit_lock_);
+
+  // Try to increase the current capacity of the code cache. Return whether we
+  // succeeded at doing so.
+  bool IncreaseCodeCacheCapacity() REQUIRES(Locks::jit_lock_);
+
+  // Set the footprint limit of the code cache.
+  void SetFootprintLimit(size_t new_footprint) REQUIRES(Locks::jit_lock_);
+
+  const uint8_t* AllocateCode(size_t code_size) REQUIRES(Locks::jit_lock_);
+  void FreeCode(const uint8_t* code) REQUIRES(Locks::jit_lock_);
+  const uint8_t* AllocateData(size_t data_size) REQUIRES(Locks::jit_lock_);
+  void FreeData(const uint8_t* data) REQUIRES(Locks::jit_lock_);
+  void FreeData(uint8_t* writable_data) REQUIRES(Locks::jit_lock_) = delete;
+  void FreeWritableData(uint8_t* writable_data) REQUIRES(Locks::jit_lock_);
+
+  // Emit header and code into the memory pointed by `reserved_code` (despite it being const).
+  // Returns pointer to copied code (within reserved_code region; after OatQuickMethodHeader).
+  const uint8_t* CommitCode(ArrayRef<const uint8_t> reserved_code,
+                            ArrayRef<const uint8_t> code,
+                            const uint8_t* stack_map,
+                            bool has_should_deoptimize_flag)
+      REQUIRES(Locks::jit_lock_);
+
+  // Emit roots and stack map into the memory pointed by `roots_data` (despite it being const).
+  bool CommitData(ArrayRef<const uint8_t> reserved_data,
+                  const std::vector<Handle<mirror::Object>>& roots,
+                  ArrayRef<const uint8_t> stack_map)
+      REQUIRES(Locks::jit_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  void ResetWritableMappings() REQUIRES(Locks::jit_lock_) {
+    non_exec_pages_.ResetInForkedProcess();
+    writable_data_pages_.ResetInForkedProcess();
+    // Also clear the mspaces, which, in their implementation,
+    // point to the discarded mappings.
+    exec_mspace_ = nullptr;
+    data_mspace_ = nullptr;
+  }
+
+  bool IsValid() const NO_THREAD_SAFETY_ANALYSIS {
+    return exec_mspace_ != nullptr || data_mspace_ != nullptr;
+  }
+
+  template <typename T>
+  void FillData(const T* address, size_t n, const T& t)  REQUIRES(Locks::jit_lock_) {
+    std::fill_n(GetWritableDataAddress(address), n, t);
+  }
+
+  // Generic helper for writing abritrary data in the data portion of the
+  // region.
+  template <typename T>
+  void WriteData(const T* address, const T& value) {
+    *GetWritableDataAddress(address) = value;
+  }
+
+  bool HasDualCodeMapping() const {
+    return non_exec_pages_.IsValid();
+  }
+
+  bool HasDualDataMapping() const {
+    return writable_data_pages_.IsValid();
+  }
+
+  bool HasCodeMapping() const {
+    return exec_pages_.IsValid();
+  }
+
+  bool IsInDataSpace(const void* ptr) const {
+    return data_pages_.HasAddress(ptr);
+  }
+
+  bool IsInExecSpace(const void* ptr) const {
+    return exec_pages_.HasAddress(ptr);
+  }
+
+  const MemMap* GetExecPages() const {
+    return &exec_pages_;
+  }
+
+  void* MoreCore(const void* mspace, intptr_t increment);
+
+  bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
+    return mspace == data_mspace_ || mspace == exec_mspace_;
+  }
+
+  size_t GetCurrentCapacity() const REQUIRES(Locks::jit_lock_) {
+    return current_capacity_;
+  }
+
+  size_t GetMaxCapacity() const REQUIRES(Locks::jit_lock_) {
+    return max_capacity_;
+  }
+
+  size_t GetUsedMemoryForCode() const REQUIRES(Locks::jit_lock_) {
+    return used_memory_for_code_;
+  }
+
+  size_t GetResidentMemoryForCode() const REQUIRES(Locks::jit_lock_) {
+    return exec_end_;
+  }
+
+  size_t GetUsedMemoryForData() const REQUIRES(Locks::jit_lock_) {
+    return used_memory_for_data_;
+  }
+
+  size_t GetResidentMemoryForData() const REQUIRES(Locks::jit_lock_) {
+    return data_end_;
+  }
+
+  template <typename T> T* GetWritableDataAddress(const T* src_ptr) {
+    if (!HasDualDataMapping()) {
+      return const_cast<T*>(src_ptr);
+    }
+    return const_cast<T*>(TranslateAddress(src_ptr, data_pages_, writable_data_pages_));
+  }
+
+ private:
+  template <typename T>
+  T* TranslateAddress(T* src_ptr, const MemMap& src, const MemMap& dst) {
+    CHECK(src.HasAddress(src_ptr)) << reinterpret_cast<const void*>(src_ptr);
+    const uint8_t* const raw_src_ptr = reinterpret_cast<const uint8_t*>(src_ptr);
+    return reinterpret_cast<T*>(raw_src_ptr - src.Begin() + dst.Begin());
+  }
+
+  const MemMap* GetUpdatableCodeMapping() const {
+    if (HasDualCodeMapping()) {
+      return &non_exec_pages_;
+    } else if (HasCodeMapping()) {
+      return &exec_pages_;
+    } else {
+      return nullptr;
+    }
+  }
+
+  const MemMap* GetWritableDataMapping() const {
+    if (HasDualDataMapping()) {
+      return &writable_data_pages_;
+    } else {
+      return &data_pages_;
+    }
+  }
+
+  template <typename T> T* GetNonWritableDataAddress(T* src_ptr) {
+    if (!HasDualDataMapping()) {
+      return src_ptr;
+    }
+    return TranslateAddress(src_ptr, writable_data_pages_, data_pages_);
+  }
+
+  template <typename T> T* GetExecutableAddress(T* src_ptr) {
+    if (!HasDualCodeMapping()) {
+      return src_ptr;
+    }
+    return TranslateAddress(src_ptr, non_exec_pages_, exec_pages_);
+  }
+
+  template <typename T> T* GetNonExecutableAddress(T* src_ptr) {
+    if (!HasDualCodeMapping()) {
+      return src_ptr;
+    }
+    return TranslateAddress(src_ptr, exec_pages_, non_exec_pages_);
+  }
+
+  static int CreateZygoteMemory(size_t capacity, std::string* error_msg);
+  static bool ProtectZygoteMemory(int fd, std::string* error_msg);
+
+  // The initial capacity in bytes this code region starts with.
+  size_t initial_capacity_ GUARDED_BY(Locks::jit_lock_);
+
+  // The maximum capacity in bytes this region can go to.
+  size_t max_capacity_ GUARDED_BY(Locks::jit_lock_);
+
+  // The current capacity in bytes of the region.
+  size_t current_capacity_ GUARDED_BY(Locks::jit_lock_);
+
+  // The current footprint in bytes of the data portion of the region.
+  size_t data_end_ GUARDED_BY(Locks::jit_lock_);
+
+  // The current footprint in bytes of the code portion of the region.
+  size_t exec_end_ GUARDED_BY(Locks::jit_lock_);
+
+  // The size in bytes of used memory for the code portion of the region.
+  size_t used_memory_for_code_ GUARDED_BY(Locks::jit_lock_);
+
+  // The size in bytes of used memory for the data portion of the region.
+  size_t used_memory_for_data_ GUARDED_BY(Locks::jit_lock_);
+
+  // Mem map which holds data (stack maps and profiling info).
+  MemMap data_pages_;
+
+  // Mem map which holds data with writable permission. Only valid for dual view
+  // JIT when this is the writable view and data_pages_ is the readable view.
+  MemMap writable_data_pages_;
+
+  // Mem map which holds code and has executable permission.
+  MemMap exec_pages_;
+
+  // Mem map which holds code with non executable permission. Only valid for dual view JIT when
+  // this is the non-executable view of code used to write updates.
+  MemMap non_exec_pages_;
+
+  // The opaque mspace for allocating data.
+  void* data_mspace_ GUARDED_BY(Locks::jit_lock_);
+
+  // The opaque mspace for allocating code.
+  void* exec_mspace_ GUARDED_BY(Locks::jit_lock_);
+
+  friend class ScopedCodeCacheWrite;  // For GetUpdatableCodeMapping
+  friend class TestZygoteMemory;
+};
+
+}  // namespace jit
+}  // namespace art
+
+#endif  // ART_RUNTIME_JIT_JIT_MEMORY_REGION_H_
diff --git a/runtime/jit/jit_memory_region_test.cc b/runtime/jit/jit_memory_region_test.cc
new file mode 100644
index 0000000..2049611
--- /dev/null
+++ b/runtime/jit/jit_memory_region_test.cc
@@ -0,0 +1,516 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jit/jit_memory_region.h"
+
+#include <signal.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <android-base/unique_fd.h>
+#include <gtest/gtest.h>
+
+#include "base/globals.h"
+#include "base/memfd.h"
+#include "base/utils.h"
+#include "common_runtime_test.h"
+
+namespace art {
+namespace jit {
+
+// These tests only run on bionic.
+#if defined(__BIONIC__)
+static constexpr int kReturnFromFault = 42;
+
+// These globals are only set in child processes.
+void* gAddrToFaultOn = nullptr;
+
+void handler(int ATTRIBUTE_UNUSED, siginfo_t* info, void* ATTRIBUTE_UNUSED) {
+  CHECK_EQ(info->si_addr, gAddrToFaultOn);
+  exit(kReturnFromFault);
+}
+
+static void registerSignalHandler() {
+  struct sigaction sa;
+  sigemptyset(&sa.sa_mask);
+  sa.sa_flags = SA_SIGINFO;
+  sa.sa_sigaction = handler;
+  sigaction(SIGSEGV, &sa, nullptr);
+}
+
+class TestZygoteMemory : public testing::Test {
+ public:
+  void BasicTest() {
+    // Zygote JIT memory only works on kernels that don't segfault on flush.
+    TEST_DISABLED_FOR_KERNELS_WITH_CACHE_SEGFAULT();
+    std::string error_msg;
+    size_t size = kPageSize;
+    android::base::unique_fd fd(JitMemoryRegion::CreateZygoteMemory(size, &error_msg));
+    CHECK_NE(fd.get(), -1);
+
+    // Create a writable mapping.
+    int32_t* addr = reinterpret_cast<int32_t*>(
+        mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0));
+    CHECK(addr != nullptr);
+    CHECK_NE(addr, MAP_FAILED);
+
+    // Test that we can write into the mapping.
+    addr[0] = 42;
+    CHECK_EQ(addr[0], 42);
+
+    // Protect the memory.
+    bool res = JitMemoryRegion::ProtectZygoteMemory(fd.get(), &error_msg);
+    CHECK(res);
+
+    // Test that we can still write into the mapping.
+    addr[0] = 2;
+    CHECK_EQ(addr[0], 2);
+
+    // Test that we cannot create another writable mapping.
+    int32_t* addr2 = reinterpret_cast<int32_t*>(
+        mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0));
+    CHECK_EQ(addr2, MAP_FAILED);
+
+    // With the existing mapping, we can toggle read/write.
+    CHECK_EQ(mprotect(addr, size, PROT_READ), 0) << strerror(errno);
+    CHECK_EQ(mprotect(addr, size, PROT_READ | PROT_WRITE), 0) << strerror(errno);
+
+    // Test mremap with old_size = 0. From the man pages:
+    //    If the value of old_size is zero, and old_address refers to a shareable mapping
+    //    (see mmap(2) MAP_SHARED), then mremap() will create a new mapping of the same pages.
+    addr2 = reinterpret_cast<int32_t*>(mremap(addr, 0, kPageSize, MREMAP_MAYMOVE));
+    CHECK_NE(addr2, MAP_FAILED);
+
+    // Test that we can  write into the remapped mapping.
+    addr2[0] = 3;
+    CHECK_EQ(addr2[0], 3);
+
+    addr2 = reinterpret_cast<int32_t*>(mremap(addr, kPageSize, 2 * kPageSize, MREMAP_MAYMOVE));
+    CHECK_NE(addr2, MAP_FAILED);
+
+    // Test that we can  write into the remapped mapping.
+    addr2[0] = 4;
+    CHECK_EQ(addr2[0], 4);
+  }
+
+  void TestUnmapWritableAfterFork() {
+    // Zygote JIT memory only works on kernels that don't segfault on flush.
+    TEST_DISABLED_FOR_KERNELS_WITH_CACHE_SEGFAULT();
+    std::string error_msg;
+    size_t size = kPageSize;
+    int32_t* addr = nullptr;
+    int32_t* addr2 = nullptr;
+    {
+      android::base::unique_fd fd(JitMemoryRegion::CreateZygoteMemory(size, &error_msg));
+      CHECK_NE(fd.get(), -1);
+
+      // Create a writable mapping.
+      addr = reinterpret_cast<int32_t*>(
+          mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0));
+      CHECK(addr != nullptr);
+      CHECK_NE(addr, MAP_FAILED);
+
+      // Test that we can write into the mapping.
+      addr[0] = 42;
+      CHECK_EQ(addr[0], 42);
+
+      // Create a read-only mapping.
+      addr2 = reinterpret_cast<int32_t*>(
+          mmap(nullptr, kPageSize, PROT_READ, MAP_SHARED, fd.get(), 0));
+      CHECK(addr2 != nullptr);
+
+      // Protect the memory.
+      bool res = JitMemoryRegion::ProtectZygoteMemory(fd.get(), &error_msg);
+      CHECK(res);
+    }
+    // At this point, the fd has been dropped, but the memory mappings are still
+    // there.
+
+    // Create a mapping of atomic ints to communicate between processes.
+    android::base::unique_fd fd2(JitMemoryRegion::CreateZygoteMemory(size, &error_msg));
+    CHECK_NE(fd2.get(), -1);
+    std::atomic<int32_t>* shared = reinterpret_cast<std::atomic<int32_t>*>(
+        mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd2.get(), 0));
+
+    // Values used for the tests below.
+    const int32_t parent_value = 66;
+    const int32_t child_value = 33;
+    const int32_t starting_value = 22;
+
+    shared[0] = 0;
+    addr[0] = starting_value;
+    CHECK_EQ(addr[0], starting_value);
+    CHECK_EQ(addr2[0], starting_value);
+    pid_t pid = fork();
+    if (pid == 0) {
+      // Test that we can write into the mapping.
+      addr[0] = child_value;
+      CHECK_EQ(addr[0], child_value);
+      CHECK_EQ(addr2[0], child_value);
+
+      // Unmap the writable mappping.
+      munmap(addr, kPageSize);
+
+      CHECK_EQ(addr2[0], child_value);
+
+      // Notify parent process.
+      shared[0] = 1;
+
+      // Wait for parent process for a new value.
+      while (shared[0] != 2) {
+        sched_yield();
+      }
+      CHECK_EQ(addr2[0], parent_value);
+
+      // Test that we cannot write into the mapping. The signal handler will
+      // exit the process.
+      gAddrToFaultOn = addr;
+      registerSignalHandler();
+      // This write will trigger a fault, as `addr` is unmapped.
+      addr[0] = child_value + 1;
+      exit(0);
+    } else {
+      while (shared[0] != 1) {
+        sched_yield();
+      }
+      CHECK_EQ(addr[0], child_value);
+      CHECK_EQ(addr2[0], child_value);
+      addr[0] = parent_value;
+      // Notify the child if the new value.
+      shared[0] = 2;
+      int status;
+      CHECK_EQ(waitpid(pid, &status, 0), pid);
+      CHECK(WIFEXITED(status)) << strerror(errno);
+      CHECK_EQ(WEXITSTATUS(status), kReturnFromFault);
+      CHECK_EQ(addr[0], parent_value);
+      CHECK_EQ(addr2[0], parent_value);
+      munmap(addr, kPageSize);
+      munmap(addr2, kPageSize);
+      munmap(shared, kPageSize);
+    }
+  }
+
+  void TestMadviseDontFork() {
+    // Zygote JIT memory only works on kernels that don't segfault on flush.
+    TEST_DISABLED_FOR_KERNELS_WITH_CACHE_SEGFAULT();
+    std::string error_msg;
+    size_t size = kPageSize;
+    int32_t* addr = nullptr;
+    int32_t* addr2 = nullptr;
+    {
+      android::base::unique_fd fd(JitMemoryRegion::CreateZygoteMemory(size, &error_msg));
+      CHECK_NE(fd.get(), -1);
+
+      // Create a writable mapping.
+      addr = reinterpret_cast<int32_t*>(
+          mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0));
+      CHECK(addr != nullptr);
+      CHECK_NE(addr, MAP_FAILED);
+      CHECK_EQ(madvise(addr, kPageSize, MADV_DONTFORK), 0);
+
+      // Test that we can write into the mapping.
+      addr[0] = 42;
+      CHECK_EQ(addr[0], 42);
+
+      // Create a read-only mapping.
+      addr2 = reinterpret_cast<int32_t*>(
+          mmap(nullptr, kPageSize, PROT_READ, MAP_SHARED, fd.get(), 0));
+      CHECK(addr2 != nullptr);
+
+      // Protect the memory.
+      bool res = JitMemoryRegion::ProtectZygoteMemory(fd.get(), &error_msg);
+      CHECK(res);
+    }
+    // At this point, the fd has been dropped, but the memory mappings are still
+    // there.
+
+    // Create a mapping of atomic ints to communicate between processes.
+    android::base::unique_fd fd2(JitMemoryRegion::CreateZygoteMemory(size, &error_msg));
+    CHECK_NE(fd2.get(), -1);
+    std::atomic<int32_t>* shared = reinterpret_cast<std::atomic<int32_t>*>(
+        mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd2.get(), 0));
+
+    // Values used for the tests below.
+    const int32_t parent_value = 66;
+    const int32_t child_value = 33;
+    const int32_t starting_value = 22;
+
+    shared[0] = 0;
+    addr[0] = starting_value;
+    CHECK_EQ(addr[0], starting_value);
+    CHECK_EQ(addr2[0], starting_value);
+    pid_t pid = fork();
+    if (pid == 0) {
+      CHECK_EQ(addr2[0], starting_value);
+
+      // Notify parent process.
+      shared[0] = 1;
+
+      // Wait for parent process for new value.
+      while (shared[0] != 2) {
+        sched_yield();
+      }
+
+      CHECK_EQ(addr2[0], parent_value);
+      // Test that we cannot write into the mapping. The signal handler will
+      // exit the process.
+      gAddrToFaultOn = addr;
+      registerSignalHandler();
+      addr[0] = child_value + 1;
+      exit(0);
+    } else {
+      while (shared[0] != 1) {
+        sched_yield();
+      }
+      CHECK_EQ(addr[0], starting_value);
+      CHECK_EQ(addr2[0], starting_value);
+      addr[0] = parent_value;
+      // Notify the child of the new value.
+      shared[0] = 2;
+      int status;
+      CHECK_EQ(waitpid(pid, &status, 0), pid);
+      CHECK(WIFEXITED(status)) << strerror(errno);
+      CHECK_EQ(WEXITSTATUS(status), kReturnFromFault);
+      CHECK_EQ(addr[0], parent_value);
+      CHECK_EQ(addr2[0], parent_value);
+
+      munmap(addr, kPageSize);
+      munmap(addr2, kPageSize);
+      munmap(shared, kPageSize);
+    }
+  }
+
+  // This code is testing some behavior that ART could potentially use: get a
+  // copy-on-write mapping that can incorporate changes from a shared mapping
+  // owned by another process.
+  void TestFromSharedToPrivate() {
+    // Zygote JIT memory only works on kernels that don't segfault on flush.
+    TEST_DISABLED_FOR_KERNELS_WITH_CACHE_SEGFAULT();
+    // This test is only for memfd with future write sealing support:
+    // 1) ashmem with PROT_READ doesn't permit mapping MAP_PRIVATE | PROT_WRITE
+    // 2) ashmem mapped MAP_PRIVATE discards the contents already written.
+    if (!art::IsSealFutureWriteSupported()) {
+      return;
+    }
+    std::string error_msg;
+    size_t size = kPageSize;
+    int32_t* addr = nullptr;
+    android::base::unique_fd fd(JitMemoryRegion::CreateZygoteMemory(size, &error_msg));
+    CHECK_NE(fd.get(), -1);
+
+    // Create a writable mapping.
+    addr = reinterpret_cast<int32_t*>(
+        mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0));
+    CHECK(addr != nullptr);
+    CHECK_NE(addr, MAP_FAILED);
+
+    // Test that we can write into the mapping.
+    addr[0] = 42;
+    CHECK_EQ(addr[0], 42);
+
+    // Create another mapping of atomic ints to communicate between processes.
+    android::base::unique_fd fd2(JitMemoryRegion::CreateZygoteMemory(size, &error_msg));
+    CHECK_NE(fd2.get(), -1);
+    std::atomic<int32_t>* shared = reinterpret_cast<std::atomic<int32_t>*>(
+        mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd2.get(), 0));
+
+    // Protect the memory.
+    CHECK(JitMemoryRegion::ProtectZygoteMemory(fd.get(), &error_msg));
+
+    // Values used for the tests below.
+    const int32_t parent_value = 66;
+    const int32_t child_value = 33;
+    const int32_t starting_value = 22;
+
+    // Check that updates done by a child mapping write-private are not visible
+    // to the parent.
+    addr[0] = starting_value;
+    shared[0] = 0;
+    pid_t pid = fork();
+    if (pid == 0) {
+      CHECK_EQ(mmap(addr, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd.get(), 0),
+               addr);
+      addr[0] = child_value;
+      exit(0);
+    } else {
+      int status;
+      CHECK_EQ(waitpid(pid, &status, 0), pid);
+      CHECK(WIFEXITED(status)) << strerror(errno);
+      CHECK_EQ(addr[0], starting_value);
+    }
+
+    addr[0] = starting_value;
+    shared[0] = 0;
+
+    // Check getting back and forth on shared mapping.
+    pid = fork();
+    if (pid == 0) {
+      // Map it private with write access. MAP_FIXED will replace the existing
+      // mapping.
+      CHECK_EQ(mmap(addr, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd.get(), 0),
+               addr);
+      addr[0] = child_value;
+      CHECK_EQ(addr[0], child_value);
+
+      // Check that mapping shared with write access fails.
+      CHECK_EQ(mmap(addr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd.get(), 0),
+               MAP_FAILED);
+      CHECK_EQ(errno, EPERM);
+
+      // Map shared with read access.
+      CHECK_EQ(mmap(addr, kPageSize, PROT_READ, MAP_SHARED | MAP_FIXED, fd.get(), 0), addr);
+      CHECK_NE(addr[0], child_value);
+
+      // Wait for the parent to notify.
+      while (shared[0] != 1) {
+        sched_yield();
+      }
+      CHECK_EQ(addr[0], parent_value);
+
+      // Notify the parent for getting a new update of the buffer.
+      shared[0] = 2;
+
+      // Map it private again.
+      CHECK_EQ(mmap(addr, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd.get(), 0),
+               addr);
+      addr[0] = child_value + 1;
+      CHECK_EQ(addr[0], child_value + 1);
+
+      // And map it back shared.
+      CHECK_EQ(mmap(addr, kPageSize, PROT_READ, MAP_SHARED | MAP_FIXED, fd.get(), 0), addr);
+      while (shared[0] != 3) {
+        sched_yield();
+      }
+      CHECK_EQ(addr[0], parent_value + 1);
+      exit(0);
+    } else {
+      addr[0] = parent_value;
+      CHECK_EQ(addr[0], parent_value);
+
+      // Notify the child of the new value.
+      shared[0] = 1;
+
+      // Wait for the child to ask for a new value;
+      while (shared[0] != 2) {
+        sched_yield();
+      }
+      addr[0] = parent_value + 1;
+      CHECK_EQ(addr[0], parent_value + 1);
+
+      // Notify the child of a new value.
+      shared[0] = 3;
+      int status;
+      CHECK_EQ(waitpid(pid, &status, 0), pid);
+      CHECK(WIFEXITED(status)) << strerror(errno);
+      CHECK_EQ(addr[0], parent_value + 1);
+    }
+
+    // Check that updates done by the parent are visible after a new mmap
+    // write-private.
+    shared[0] = 0;
+    addr[0] = starting_value;
+    pid = fork();
+    if (pid == 0) {
+      CHECK_EQ(mmap(addr, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd.get(), 0),
+               addr);
+      CHECK_EQ(addr[0], starting_value);
+      addr[0] = child_value;
+      CHECK_EQ(addr[0], child_value);
+
+      // Notify the parent to update the buffer.
+      shared[0] = 1;
+
+      // Wait for the parent update.
+      while (shared[0] != 2) {
+        sched_yield();
+      }
+      // Test the buffer still contains our own data, and not the parent's.
+      CHECK_EQ(addr[0], child_value);
+
+      // Test the buffer contains the parent data after a new mmap.
+      CHECK_EQ(mmap(addr, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd.get(), 0),
+               addr);
+      CHECK_EQ(addr[0], parent_value);
+      exit(0);
+    } else {
+      // Wait for the child to start
+      while (shared[0] != 1) {
+        sched_yield();
+      }
+      CHECK_EQ(addr[0], starting_value);
+      addr[0] = parent_value;
+      // Notify the child that the buffer has been written.
+      shared[0] = 2;
+      int status;
+      CHECK_EQ(waitpid(pid, &status, 0), pid);
+      CHECK(WIFEXITED(status)) << strerror(errno);
+      CHECK_EQ(addr[0], parent_value);
+    }
+
+    // Check that updates done by the parent are visible for a new mmap
+    // write-private that hasn't written to the buffer yet.
+    shared[0] = 0;
+    addr[0] = starting_value;
+    pid = fork();
+    if (pid == 0) {
+      CHECK_EQ(mmap(addr, kPageSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, fd.get(), 0),
+               addr);
+      CHECK_EQ(addr[0], starting_value);
+      // Notify the parent for a new update of the buffer.
+      shared[0] = 1;
+      while (addr[0] != parent_value) {
+        sched_yield();
+      }
+      addr[0] = child_value;
+      CHECK_EQ(addr[0], child_value);
+      exit(0);
+    } else {
+      while (shared[0] != 1) {
+        sched_yield();
+      }
+      CHECK_EQ(addr[0], starting_value);
+      addr[0] = parent_value;
+      int status;
+      CHECK_EQ(waitpid(pid, &status, 0), pid);
+      CHECK(WIFEXITED(status)) << strerror(errno);
+      CHECK_EQ(addr[0], parent_value);
+    }
+    munmap(addr, kPageSize);
+    munmap(shared, kPageSize);
+  }
+};
+
+TEST_F(TestZygoteMemory, BasicTest) {
+  BasicTest();
+}
+
+TEST_F(TestZygoteMemory, TestUnmapWritableAfterFork) {
+  TestUnmapWritableAfterFork();
+}
+
+TEST_F(TestZygoteMemory, TestMadviseDontFork) {
+  TestMadviseDontFork();
+}
+
+TEST_F(TestZygoteMemory, TestFromSharedToPrivate) {
+  TestFromSharedToPrivate();
+}
+
+#endif  // defined (__BIONIC__)
+
+}  // namespace jit
+}  // namespace art
diff --git a/runtime/jit/jit_scoped_code_cache_write.h b/runtime/jit/jit_scoped_code_cache_write.h
new file mode 100644
index 0000000..ea99bdf
--- /dev/null
+++ b/runtime/jit/jit_scoped_code_cache_write.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JIT_JIT_SCOPED_CODE_CACHE_WRITE_H_
+#define ART_RUNTIME_JIT_JIT_SCOPED_CODE_CACHE_WRITE_H_
+
+#include <sys/mman.h>
+
+#include "base/systrace.h"
+#include "base/utils.h"  // For CheckedCall
+
+namespace art {
+namespace jit {
+
+class JitMemoryRegion;
+
+static constexpr int kProtR = PROT_READ;
+static constexpr int kProtRW = PROT_READ | PROT_WRITE;
+static constexpr int kProtRWX = PROT_READ | PROT_WRITE | PROT_EXEC;
+static constexpr int kProtRX = PROT_READ | PROT_EXEC;
+
+// Helper for toggling JIT memory R <-> RW.
+class ScopedCodeCacheWrite : ScopedTrace {
+ public:
+  explicit ScopedCodeCacheWrite(const JitMemoryRegion& region)
+      : ScopedTrace("ScopedCodeCacheWrite"),
+        region_(region) {
+    ScopedTrace trace("mprotect all");
+    const MemMap* const updatable_pages = region.GetUpdatableCodeMapping();
+    if (updatable_pages != nullptr) {
+      int prot = region.HasDualCodeMapping() ? kProtRW : kProtRWX;
+      CheckedCall(mprotect, "Cache +W", updatable_pages->Begin(), updatable_pages->Size(), prot);
+    }
+  }
+
+  ~ScopedCodeCacheWrite() {
+    ScopedTrace trace("mprotect code");
+    const MemMap* const updatable_pages = region_.GetUpdatableCodeMapping();
+    if (updatable_pages != nullptr) {
+      int prot = region_.HasDualCodeMapping() ? kProtR : kProtRX;
+      CheckedCall(mprotect, "Cache -W", updatable_pages->Begin(), updatable_pages->Size(), prot);
+    }
+  }
+
+ private:
+  const JitMemoryRegion& region_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite);
+};
+
+}  // namespace jit
+}  // namespace art
+
+#endif  // ART_RUNTIME_JIT_JIT_SCOPED_CODE_CACHE_WRITE_H_
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 7346a2c..fe551f3b 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -37,6 +37,7 @@
 #include "gc/collector_type.h"
 #include "gc/gc_cause.h"
 #include "gc/scoped_gc_critical_section.h"
+#include "jit/jit.h"
 #include "jit/profiling_info.h"
 #include "oat_file_manager.h"
 #include "profile/profile_compilation_info.h"
@@ -44,6 +45,8 @@
 
 namespace art {
 
+using Hotness = ProfileCompilationInfo::MethodHotness;
+
 ProfileSaver* ProfileSaver::instance_ = nullptr;
 pthread_t ProfileSaver::profiler_pthread_ = 0U;
 
@@ -95,7 +98,6 @@
       total_number_of_failed_writes_(0),
       total_ms_of_sleep_(0),
       total_ns_of_work_(0),
-      max_number_of_profile_entries_cached_(0),
       total_number_of_hot_spikes_(0),
       total_number_of_wake_ups_(0),
       options_(options) {
@@ -122,6 +124,10 @@
 void ProfileSaver::Run() {
   Thread* self = Thread::Current();
 
+  // For thread annotalysis, the setup is more complicated than it should be. Run needs to start
+  // under mutex, but should drop it.
+  Locks::profiler_lock_->ExclusiveUnlock(self);
+
   // Fetch the resolved classes for the app images after sleeping for
   // options_.GetSaveResolvedClassesDelayMs().
   // TODO(calin) This only considers the case of the primary profile file.
@@ -417,9 +423,12 @@
     MutexLock mu(self, *Locks::profiler_lock_);
     profiler_pthread = profiler_pthread_;
   }
-  const uint32_t hot_method_sample_threshold = startup ?
-      options_.GetHotStartupMethodSamples(is_low_ram) :
-      std::numeric_limits<uint32_t>::max();
+  uint32_t hot_method_sample_threshold = std::numeric_limits<uint32_t>::max();
+  if (startup) {
+    hot_method_sample_threshold = options_.GetHotStartupMethodSamples(is_low_ram);
+  } else if (Runtime::Current()->GetJit() != nullptr) {
+    hot_method_sample_threshold = Runtime::Current()->GetJit()->WarmMethodThreshold();
+  }
   SampleClassesAndExecutedMethods(profiler_pthread,
                                   options_.GetProfileBootClassPath(),
                                   &allocator,
@@ -429,17 +438,15 @@
                                   &hot_methods,
                                   &sampled_methods);
   MutexLock mu(self, *Locks::profiler_lock_);
-  uint64_t total_number_of_profile_entries_cached = 0;
-  using Hotness = ProfileCompilationInfo::MethodHotness;
 
   for (const auto& it : tracked_dex_base_locations_) {
-    std::set<DexCacheResolvedClasses> resolved_classes_for_location;
     const std::string& filename = it.first;
     auto info_it = profile_cache_.find(filename);
     if (info_it == profile_cache_.end()) {
       info_it = profile_cache_.Put(
           filename,
-          new ProfileCompilationInfo(Runtime::Current()->GetArenaPool()));
+          new ProfileCompilationInfo(
+              Runtime::Current()->GetArenaPool(), options_.GetProfileBootClassPath()));
     }
     ProfileCompilationInfo* cached_info = info_it->second;
 
@@ -455,13 +462,14 @@
                      << " found=" << (locations.find(base_location) != locations.end())
                      << " indices size=" << indices.size();
       if (locations.find(base_location) != locations.end()) {
-        uint8_t flags = Hotness::kFlagHot;
+        uint32_t flags = Hotness::kFlagHot;
         flags |= startup ? Hotness::kFlagStartup : Hotness::kFlagPostStartup;
         cached_info->AddMethodsForDex(
-            static_cast<Hotness::Flag>(flags),
+            AnnotateSampleFlags(flags),
             dex_file,
             indices.begin(),
-            indices.end());
+            indices.end(),
+            GetProfileSampleAnnotation());
       }
     }
     for (const auto& pair : sampled_methods.GetMap()) {
@@ -472,10 +480,12 @@
                      << " found=" << (locations.find(base_location) != locations.end())
                      << " indices size=" << indices.size();
       if (locations.find(base_location) != locations.end()) {
-        cached_info->AddMethodsForDex(startup ? Hotness::kFlagStartup : Hotness::kFlagPostStartup,
-                                      dex_file,
-                                      indices.begin(),
-                                      indices.end());
+        cached_info->AddMethodsForDex(
+            AnnotateSampleFlags(startup ? Hotness::kFlagStartup : Hotness::kFlagPostStartup),
+            dex_file,
+            indices.begin(),
+            indices.end(),
+            GetProfileSampleAnnotation());
       }
     }
     for (const auto& pair : resolved_classes.GetMap()) {
@@ -486,16 +496,15 @@
         VLOG(profiler) << "Added " << classes.size() << " classes for location "
                        << base_location
                        << " (" << dex_file->GetLocation() << ")";
-        cached_info->AddClassesForDex(dex_file, classes.begin(), classes.end());
+        cached_info->AddClassesForDex(dex_file,
+                                      classes.begin(),
+                                      classes.end(),
+                                      GetProfileSampleAnnotation());
       } else {
         VLOG(profiler) << "Location not found " << base_location;
       }
     }
-    total_number_of_profile_entries_cached += resolved_classes_for_location.size();
   }
-  max_number_of_profile_entries_cached_ = std::max(
-      max_number_of_profile_entries_cached_,
-      total_number_of_profile_entries_cached);
   VLOG(profiler) << "Profile saver recorded " << hot_methods.NumReferences() << " hot methods and "
                  << sampled_methods.NumReferences() << " sampled methods with threshold "
                  << hot_method_sample_threshold << " in "
@@ -549,6 +558,15 @@
         LOG(WARNING) << "Could not forcefully load profile " << filename;
         continue;
       }
+      if (options_.GetProfileBootClassPath() != info.IsForBootImage()) {
+        // If we enabled boot class path profiling but the profile is a regular one,
+        // (or the opposite), clear the profile. We do not support cross-version merges.
+        LOG(WARNING) << "Adjust profile version: for_boot_classpath="
+            << options_.GetProfileBootClassPath();
+        info.ClearDataAndAdjustVersion(options_.GetProfileBootClassPath());
+        // For saving to ensure we persist the new version.
+        force_save = true;
+      }
       uint64_t last_save_number_of_methods = info.GetNumberOfMethods();
       uint64_t last_save_number_of_classes = info.GetNumberOfResolvedClasses();
       VLOG(profiler) << "last_save_number_of_methods=" << last_save_number_of_methods
@@ -558,8 +576,10 @@
       // Try to add the method data. Note this may fail is the profile loaded from disk contains
       // outdated data (e.g. the previous profiled dex files might have been updated).
       // If this happens we clear the profile data and for the save to ensure the file is cleared.
-      if (!info.AddMethods(profile_methods,
-              ProfileCompilationInfo::MethodHotness::kFlagPostStartup)) {
+      if (!info.AddMethods(
+              profile_methods,
+              AnnotateSampleFlags(Hotness::kFlagHot | Hotness::kFlagPostStartup),
+              GetProfileSampleAnnotation())) {
         LOG(WARNING) << "Could not add methods to the existing profiler. "
             << "Clearing the profile data.";
         info.ClearData();
@@ -647,8 +667,11 @@
     return nullptr;
   }
 
-  ProfileSaver* profile_saver = reinterpret_cast<ProfileSaver*>(arg);
-  profile_saver->Run();
+  {
+    Locks::profiler_lock_->ExclusiveLock(Thread::Current());
+    CHECK_EQ(reinterpret_cast<ProfileSaver*>(arg), instance_);
+    instance_->Run();
+  }
 
   runtime->DetachCurrentThread();
   VLOG(profiler) << "Profile saver shutdown";
@@ -681,7 +704,7 @@
   return true;
 }
 
-void ProfileSaver::Start(const ProfileSaverOptions& options,
+void  ProfileSaver::Start(const ProfileSaverOptions& options,
                          const std::string& output_filename,
                          jit::JitCodeCache* jit_code_cache,
                          const std::vector<std::string>& code_paths) {
@@ -706,12 +729,14 @@
   if (options.GetProfileBootClassPath()) {
     std::set<std::string> code_paths_keys;
     for (const std::string& location : code_paths) {
-      code_paths_keys.insert(ProfileCompilationInfo::GetProfileDexFileKey(location));
+      // Use the profile base key for checking file uniqueness (as it is constructed solely based
+      // on the location and ignores other metadata like origin package).
+      code_paths_keys.insert(ProfileCompilationInfo::GetProfileDexFileBaseKey(location));
     }
     for (const DexFile* dex_file : runtime->GetClassLinker()->GetBootClassPath()) {
       // Don't check ShouldProfileLocation since the boot class path may be speed compiled.
       const std::string& location = dex_file->GetLocation();
-      const std::string key = ProfileCompilationInfo::GetProfileDexFileKey(location);
+      const std::string key = ProfileCompilationInfo::GetProfileDexFileBaseKey(location);
       VLOG(profiler) << "Registering boot dex file " << location;
       if (code_paths_keys.find(key) != code_paths_keys.end()) {
         LOG(WARNING) << "Boot class path location key conflicts with code path " << location;
@@ -784,7 +809,7 @@
 
   // Force save everything before destroying the thread since we want profiler_pthread_ to remain
   // valid.
-  instance_->ProcessProfilingInfo(/*force_save=*/true, /*number_of_new_methods=*/nullptr);
+  profile_saver->ProcessProfilingInfo(/*force_save=*/true, /*number_of_new_methods=*/nullptr);
 
   // Wait for the saver thread to stop.
   CHECK_PTHREAD_CALL(pthread_join, (profiler_pthread, nullptr), "profile saver thread shutdown");
@@ -881,8 +906,6 @@
      << "ProfileSaver total_number_of_failed_writes=" << total_number_of_failed_writes_ << '\n'
      << "ProfileSaver total_ms_of_sleep=" << total_ms_of_sleep_ << '\n'
      << "ProfileSaver total_ms_of_work=" << NsToMs(total_ns_of_work_) << '\n'
-     << "ProfileSaver max_number_profile_entries_cached="
-     << max_number_of_profile_entries_cached_ << '\n'
      << "ProfileSaver total_number_of_hot_spikes=" << total_number_of_hot_spikes_ << '\n'
      << "ProfileSaver total_number_of_wake_ups=" << total_number_of_wake_ups_ << '\n';
 }
@@ -909,11 +932,8 @@
     if (!info.Load(profile, /*clear_if_invalid=*/false)) {
       return false;
     }
-    ProfileCompilationInfo::MethodHotness hotness = info.GetMethodHotness(ref);
-    // Ignore hot parameter for now since it was causing test 595 to be flaky. TODO: Investigate.
-    // b/63635729
-    UNUSED(hot);
-    return hotness.IsInProfile();
+    const ProfileCompilationInfo::MethodHotness hotness = info.GetMethodHotness(ref);
+    return hot ? hotness.IsHot() : hotness.IsInProfile();
   }
   return false;
 }
@@ -952,4 +972,32 @@
   }
 }
 
+ProfileCompilationInfo::ProfileSampleAnnotation ProfileSaver::GetProfileSampleAnnotation() {
+  // Ideally, this would be cached in the ProfileSaver class, when we start the thread.
+  // However the profile is initialized before the process package name is set and fixing this
+  // would require unnecessary complex synchronizations.
+  std::string package_name = Runtime::Current()->GetProcessPackageName();
+  if (package_name.empty()) {
+    package_name = "unknown";
+  }
+  // We only use annotation for the boot image profiles. Regular apps do not use the extra
+  // metadata and as such there is no need to pay the cost (storage and computational)
+  // that comes with the annotations.
+  return options_.GetProfileBootClassPath()
+      ? ProfileCompilationInfo::ProfileSampleAnnotation(package_name)
+      : ProfileCompilationInfo::ProfileSampleAnnotation::kNone;
+}
+
+Hotness::Flag ProfileSaver::AnnotateSampleFlags(uint32_t flags) {
+  uint32_t extra_flags = 0;
+  // We only add the extra flags for the boot image profile because individual apps do not use
+  // this information.
+  if (options_.GetProfileBootClassPath()) {
+    extra_flags = Is64BitInstructionSet(Runtime::Current()->GetInstructionSet())
+        ? Hotness::kFlag64bit
+        : Hotness::kFlag32bit;
+  }
+  return static_cast<Hotness::Flag>(flags | extra_flags);
+}
+
 }   // namespace art
diff --git a/runtime/jit/profile_saver.h b/runtime/jit/profile_saver.h
index 97271c9..60959d2 100644
--- a/runtime/jit/profile_saver.h
+++ b/runtime/jit/profile_saver.h
@@ -34,13 +34,10 @@
                     const std::string& output_filename,
                     jit::JitCodeCache* jit_code_cache,
                     const std::vector<std::string>& code_paths)
-      REQUIRES(!Locks::profiler_lock_, !wait_lock_);
+      REQUIRES(!Locks::profiler_lock_, !instance_->wait_lock_);
 
   // Stops the profile saver thread.
-  // NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock.
-  static void Stop(bool dump_info_)
-      REQUIRES(!Locks::profiler_lock_, !wait_lock_)
-      NO_THREAD_SAFETY_ANALYSIS;
+  static void Stop(bool dump_info_) REQUIRES(!Locks::profiler_lock_, !instance_->wait_lock_);
 
   // Returns true if the profile saver is started.
   static bool IsStarted() REQUIRES(!Locks::profiler_lock_);
@@ -48,19 +45,17 @@
   // If the profile saver is running, dumps statistics to the `os`. Otherwise it does nothing.
   static void DumpInstanceInfo(std::ostream& os);
 
-  // NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock.
-  static void NotifyJitActivity()
-      REQUIRES(!Locks::profiler_lock_, !wait_lock_)
-      NO_THREAD_SAFETY_ANALYSIS;
+  static void NotifyJitActivity() REQUIRES(!Locks::profiler_lock_, !instance_->wait_lock_);
 
   // For testing or manual purposes (SIGUSR1).
-  static void ForceProcessProfiles();
+  static void ForceProcessProfiles() REQUIRES(!Locks::profiler_lock_, !Locks::mutator_lock_);
 
   // Just for testing purposes.
-  static bool HasSeenMethod(const std::string& profile, bool hot, MethodReference ref);
+  static bool HasSeenMethod(const std::string& profile, bool hot, MethodReference ref)
+      REQUIRES(!Locks::profiler_lock_);
 
   // Notify that startup has completed.
-  static void NotifyStartupCompleted();
+  static void NotifyStartupCompleted() REQUIRES(!Locks::profiler_lock_, !instance_->wait_lock_);
 
  private:
   ProfileSaver(const ProfileSaverOptions& options,
@@ -69,13 +64,13 @@
                const std::vector<std::string>& code_paths);
   ~ProfileSaver();
 
-  // NO_THREAD_SAFETY_ANALYSIS for static function calling into member function with excludes lock.
   static void* RunProfileSaverThread(void* arg)
-      REQUIRES(!Locks::profiler_lock_, !wait_lock_)
-      NO_THREAD_SAFETY_ANALYSIS;
+      REQUIRES(!Locks::profiler_lock_, !instance_->wait_lock_);
 
   // The run loop for the saver.
-  void Run() REQUIRES(!Locks::profiler_lock_, !wait_lock_);
+  void Run()
+      REQUIRES(Locks::profiler_lock_, !wait_lock_)
+      RELEASE(Locks::profiler_lock_);
 
   // Processes the existing profiling info from the jit code cache and returns
   // true if it needed to be saved to disk.
@@ -84,8 +79,8 @@
   // If force_save is true, the saver will ignore any constraints which limit IO (e.g. will write
   // the profile to disk even if it's just one new method).
   bool ProcessProfilingInfo(bool force_save, /*out*/uint16_t* number_of_new_methods)
-    REQUIRES(!Locks::profiler_lock_)
-    REQUIRES(!Locks::mutator_lock_);
+      REQUIRES(!Locks::profiler_lock_)
+      REQUIRES(!Locks::mutator_lock_);
 
   void NotifyJitActivityInternal() REQUIRES(!wait_lock_);
   void WakeUpSaver() REQUIRES(wait_lock_);
@@ -99,7 +94,7 @@
 
   // Fetches the current resolved classes and methods from the ClassLinker and stores them in the
   // profile_cache_ for later save.
-  void FetchAndCacheResolvedClassesAndMethods(bool startup);
+  void FetchAndCacheResolvedClassesAndMethods(bool startup) REQUIRES(!Locks::profiler_lock_);
 
   void DumpInfo(std::ostream& os);
 
@@ -107,6 +102,13 @@
   // and put the result in tracked_dex_base_locations_.
   void ResolveTrackedLocations() REQUIRES(!Locks::profiler_lock_);
 
+  // Get the profile metadata that should be associated with the profile session during the current
+  // profile saver session.
+  ProfileCompilationInfo::ProfileSampleAnnotation GetProfileSampleAnnotation();
+
+  // Extends the given set of flags with global flags if necessary (e.g. the running architecture).
+  ProfileCompilationInfo::MethodHotness::Flag AnnotateSampleFlags(uint32_t flags);
+
   // The only instance of the saver.
   static ProfileSaver* instance_ GUARDED_BY(Locks::profiler_lock_);
   // Profile saver thread.
@@ -148,11 +150,14 @@
   uint64_t total_ms_of_sleep_;
   uint64_t total_ns_of_work_;
   // TODO(calin): replace with an actual size.
-  uint64_t max_number_of_profile_entries_cached_;
   uint64_t total_number_of_hot_spikes_;
   uint64_t total_number_of_wake_ups_;
 
   const ProfileSaverOptions options_;
+
+  friend class ProfileSaverTest;
+  friend class ProfileSaverForBootTest;
+
   DISALLOW_COPY_AND_ASSIGN(ProfileSaver);
 };
 
diff --git a/runtime/jit/profile_saver_options.h b/runtime/jit/profile_saver_options.h
index 18f7899..1cff713 100644
--- a/runtime/jit/profile_saver_options.h
+++ b/runtime/jit/profile_saver_options.h
@@ -112,9 +112,6 @@
   bool GetProfileAOTCode() const {
     return profile_aot_code_;
   }
-  void SetProfileAOTCode(bool value) {
-    profile_aot_code_ = value;
-  }
   bool GetWaitForJitNotificationsToSave() const {
     return wait_for_jit_notifications_to_save_;
   }
diff --git a/runtime/jit/profile_saver_test.cc b/runtime/jit/profile_saver_test.cc
new file mode 100644
index 0000000..9a866a3
--- /dev/null
+++ b/runtime/jit/profile_saver_test.cc
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "common_runtime_test.h"
+#include "compiler_callbacks.h"
+#include "jit/jit.h"
+#include "profile_saver.h"
+#include "profile/profile_compilation_info.h"
+
+namespace art {
+
+using Hotness = ProfileCompilationInfo::MethodHotness;
+
+class ProfileSaverTest : public CommonRuntimeTest {
+ public:
+  void SetUpRuntimeOptions(RuntimeOptions *options) override {
+    // Reset the callbacks so that the runtime doesn't think it's for AOT.
+    callbacks_ = nullptr;
+    CommonRuntimeTest::SetUpRuntimeOptions(options);
+    // Enable profile saving and the jit.
+    options->push_back(std::make_pair("-Xjitsaveprofilinginfo", nullptr));
+    options->push_back(std::make_pair("-Xusejit:true", nullptr));
+  }
+
+  void PostRuntimeCreate() override {
+    // Create a profile saver.
+    Runtime* runtime = Runtime::Current();
+    const std::vector<std::string> code_paths;
+    const std::string fake_file = "fake_file";
+    profile_saver_ = new ProfileSaver(
+        runtime->GetJITOptions()->GetProfileSaverOptions(),
+        fake_file,
+        runtime->GetJitCodeCache(),
+        code_paths);
+  }
+
+  ~ProfileSaverTest() {
+    if (profile_saver_ != nullptr) {
+      delete profile_saver_;
+    }
+  }
+
+  ProfileCompilationInfo::ProfileSampleAnnotation GetProfileSampleAnnotation() {
+    return profile_saver_->GetProfileSampleAnnotation();
+  }
+
+  Hotness::Flag AnnotateSampleFlags(uint32_t flags) {
+    return profile_saver_->AnnotateSampleFlags(flags);
+  }
+
+ protected:
+  ProfileSaver* profile_saver_ = nullptr;
+};
+
+// Test profile saving operations for boot image.
+class ProfileSaverForBootTest : public ProfileSaverTest {
+ public:
+  void SetUpRuntimeOptions(RuntimeOptions *options) override {
+    ProfileSaverTest::SetUpRuntimeOptions(options);
+    options->push_back(std::make_pair("-Xps-profile-boot-class-path", nullptr));
+  }
+};
+
+TEST_F(ProfileSaverTest, GetProfileSampleAnnotation) {
+  ASSERT_EQ(ProfileCompilationInfo::ProfileSampleAnnotation::kNone,
+            GetProfileSampleAnnotation());
+}
+
+TEST_F(ProfileSaverForBootTest, GetProfileSampleAnnotationUnkown) {
+  ProfileCompilationInfo::ProfileSampleAnnotation expected("unknown");
+  ASSERT_EQ(expected, GetProfileSampleAnnotation());
+}
+
+TEST_F(ProfileSaverForBootTest, GetProfileSampleAnnotation) {
+  Runtime::Current()->SetProcessPackageName("test.package");
+  ProfileCompilationInfo::ProfileSampleAnnotation expected("test.package");
+  ASSERT_EQ(expected, GetProfileSampleAnnotation());
+}
+
+TEST_F(ProfileSaverForBootTest, AnnotateSampleFlags) {
+  Hotness::Flag expected_flag = Is64BitInstructionSet(Runtime::Current()->GetInstructionSet())
+        ? Hotness::kFlag64bit
+        : Hotness::kFlag32bit;
+  Hotness::Flag actual = AnnotateSampleFlags(Hotness::kFlagHot);
+
+  ASSERT_EQ(static_cast<Hotness::Flag>(expected_flag | Hotness::kFlagHot), actual);
+}
+
+TEST_F(ProfileSaverTest, AnnotateSampleFlags) {
+  Hotness::Flag actual = AnnotateSampleFlags(Hotness::kFlagHot);
+
+  ASSERT_EQ(Hotness::kFlagHot, actual);
+}
+
+}  // namespace art
diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc
index 2cb569c..8c88760 100644
--- a/runtime/jit/profiling_info.cc
+++ b/runtime/jit/profiling_info.cc
@@ -26,7 +26,8 @@
 namespace art {
 
 ProfilingInfo::ProfilingInfo(ArtMethod* method, const std::vector<uint32_t>& entries)
-      : method_(method),
+      : baseline_hotness_count_(0),
+        method_(method),
         saved_entry_point_(nullptr),
         number_of_inline_caches_(entries.size()),
         current_inline_uses_(0),
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index f6139bb..14d76d2 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -21,6 +21,7 @@
 
 #include "base/macros.h"
 #include "gc_root.h"
+#include "offsets.h"
 
 namespace art {
 
@@ -39,8 +40,13 @@
 // Once the classes_ array is full, we consider the INVOKE to be megamorphic.
 class InlineCache {
  public:
+  // This is hard coded in the assembly stub art_quick_update_inline_cache.
   static constexpr uint8_t kIndividualCacheSize = 5;
 
+  static constexpr MemberOffset ClassesOffset() {
+    return MemberOffset(OFFSETOF_MEMBER(InlineCache, classes_));
+  }
+
  private:
   uint32_t dex_pc_;
   GcRoot<mirror::Class> classes_[kIndividualCacheSize];
@@ -99,15 +105,6 @@
     return saved_entry_point_;
   }
 
-  void ClearGcRootsInInlineCaches() {
-    for (size_t i = 0; i < number_of_inline_caches_; ++i) {
-      InlineCache* cache = &cache_[i];
-      memset(&cache->classes_[0],
-             0,
-             InlineCache::kIndividualCacheSize * sizeof(GcRoot<mirror::Class>));
-    }
-  }
-
   // Increments the number of times this method is currently being inlined.
   // Returns whether it was successful, that is it could increment without
   // overflowing.
@@ -129,9 +126,26 @@
         (current_inline_uses_ > 0);
   }
 
+  static constexpr MemberOffset BaselineHotnessCountOffset() {
+    return MemberOffset(OFFSETOF_MEMBER(ProfilingInfo, baseline_hotness_count_));
+  }
+
+  void SetBaselineHotnessCount(uint16_t count) {
+    baseline_hotness_count_ = count;
+  }
+
+  uint16_t GetBaselineHotnessCount() const {
+    return baseline_hotness_count_;
+  }
+
  private:
   ProfilingInfo(ArtMethod* method, const std::vector<uint32_t>& entries);
 
+  // Hotness count for methods compiled with the JIT baseline compiler. Once
+  // a threshold is hit (currentily the maximum value of uint16_t), we will
+  // JIT compile optimized the method.
+  uint16_t baseline_hotness_count_;
+
   // Method this profiling info is for.
   // Not 'const' as JVMTI introduces obsolete methods that we implement by creating new ArtMethods.
   // See JitCodeCache::MoveObsoleteMethod.
diff --git a/runtime/jit/profiling_info_test.cc b/runtime/jit/profiling_info_test.cc
index f695c8f..319a3e1 100644
--- a/runtime/jit/profiling_info_test.cc
+++ b/runtime/jit/profiling_info_test.cc
@@ -36,8 +36,6 @@
 
 using Hotness = ProfileCompilationInfo::MethodHotness;
 
-static constexpr size_t kMaxMethodIds = 65535;
-
 class ProfileCompilationInfoTest : public CommonRuntimeTest {
  public:
   void PostRuntimeCreate() override {
@@ -63,35 +61,6 @@
     return methods;
   }
 
-  bool AddMethod(const std::string& dex_location,
-                 uint32_t checksum,
-                 uint16_t method_index,
-                 ProfileCompilationInfo* info) {
-    return info->AddMethodIndex(Hotness::kFlagHot,
-                                dex_location,
-                                checksum,
-                                method_index,
-                                kMaxMethodIds);
-  }
-
-  bool AddMethod(const std::string& dex_location,
-                 uint32_t checksum,
-                 uint16_t method_index,
-                 const ProfileCompilationInfo::OfflineProfileMethodInfo& pmi,
-                 ProfileCompilationInfo* info) {
-    return info->AddMethod(
-        dex_location, checksum, method_index, kMaxMethodIds, pmi, Hotness::kFlagPostStartup);
-  }
-
-  bool AddClass(const std::string& dex_location,
-                uint32_t checksum,
-                dex::TypeIndex type_index,
-                ProfileCompilationInfo* info) {
-    DexCacheResolvedClasses classes(dex_location, dex_location, checksum, kMaxMethodIds);
-    classes.AddClass(type_index);
-    return info->AddClasses({classes});
-  }
-
   uint32_t GetFd(const ScratchFile& file) {
     return static_cast<uint32_t>(file.GetFd());
   }
@@ -99,7 +68,6 @@
   bool SaveProfilingInfo(
       const std::string& filename,
       const std::vector<ArtMethod*>& methods,
-      const std::set<DexCacheResolvedClasses>& resolved_classes,
       Hotness::Flag flags) {
     ProfileCompilationInfo info;
     std::vector<ProfileMethodInfo> profile_methods;
@@ -108,7 +76,7 @@
       profile_methods.emplace_back(
           MethodReference(method->GetDexFile(), method->GetDexMethodIndex()));
     }
-    if (!info.AddMethods(profile_methods, flags) || !info.AddClasses(resolved_classes)) {
+    if (!info.AddMethods(profile_methods, flags)) {
       return false;
     }
     if (info.GetNumberOfMethods() != profile_methods.size()) {
@@ -204,7 +172,7 @@
         dex_pc_data.AddClass(dex_profile_index, class_ref.TypeIndex());
         if (dex_profile_index >= offline_pmi.dex_references.size()) {
           // This is a new dex.
-          const std::string& dex_key = ProfileCompilationInfo::GetProfileDexFileKey(
+          const std::string& dex_key = ProfileCompilationInfo::GetProfileDexFileBaseKey(
               class_ref.dex_file->GetLocation());
           offline_pmi.dex_references.emplace_back(dex_key,
                                                   class_ref.dex_file->GetLocationChecksum(),
@@ -240,10 +208,11 @@
   ASSERT_NE(class_loader, nullptr);
 
   // Save virtual methods from Main.
-  std::set<DexCacheResolvedClasses> resolved_classes;
   std::vector<ArtMethod*> main_methods = GetVirtualMethods(class_loader, "LMain;");
   ASSERT_TRUE(SaveProfilingInfo(
-      profile.GetFilename(), main_methods, resolved_classes, Hotness::kFlagPostStartup));
+      profile.GetFilename(),
+      main_methods,
+      static_cast<Hotness::Flag>(Hotness::kFlagHot | Hotness::kFlagPostStartup)));
 
   // Check that what we saved is in the profile.
   ProfileCompilationInfo info1;
@@ -261,7 +230,9 @@
   // Save virtual methods from Second.
   std::vector<ArtMethod*> second_methods = GetVirtualMethods(class_loader, "LSecond;");
   ASSERT_TRUE(SaveProfilingInfo(
-    profile.GetFilename(), second_methods, resolved_classes, Hotness::kFlagStartup));
+    profile.GetFilename(),
+    second_methods,
+    static_cast<Hotness::Flag>(Hotness::kFlagHot | Hotness::kFlagStartup)));
 
   // Check that what we saved is in the profile (methods form Main and Second).
   ProfileCompilationInfo info2;
@@ -295,12 +266,14 @@
   ASSERT_NE(class_loader, nullptr);
 
   // Save virtual methods from Main.
-  std::set<DexCacheResolvedClasses> resolved_classes;
   std::vector<ArtMethod*> main_methods = GetVirtualMethods(class_loader, "LMain;");
 
   SafeMap<ArtMethod*, ProfileMethodInfo> profile_methods_map;
   ASSERT_TRUE(SaveProfilingInfoWithFakeInlineCaches(
-      profile.GetFilename(), main_methods, Hotness::kFlagStartup, &profile_methods_map));
+      profile.GetFilename(),
+      main_methods,
+      static_cast<Hotness::Flag>(Hotness::kFlagHot | Hotness::kFlagStartup),
+      &profile_methods_map));
 
   // Check that what we saved is in the profile.
   ProfileCompilationInfo info;
@@ -309,14 +282,13 @@
   {
     ScopedObjectAccess soa(self);
     for (ArtMethod* m : main_methods) {
-      Hotness h = info.GetMethodHotness(MethodReference(m->GetDexFile(), m->GetDexMethodIndex()));
+      MethodReference method_ref(m->GetDexFile(), m->GetDexMethodIndex());
+      Hotness h = info.GetMethodHotness(method_ref);
       ASSERT_TRUE(h.IsHot());
       ASSERT_TRUE(h.IsStartup());
       const ProfileMethodInfo& pmi = profile_methods_map.find(m)->second;
       std::unique_ptr<ProfileCompilationInfo::OfflineProfileMethodInfo> offline_pmi =
-          info.GetMethod(m->GetDexFile()->GetLocation(),
-                         m->GetDexFile()->GetLocationChecksum(),
-                         m->GetDexMethodIndex());
+          info.GetHotMethodInfo(method_ref);
       ASSERT_TRUE(offline_pmi != nullptr);
       ProfileCompilationInfo::OfflineProfileMethodInfo converted_pmi =
           ConvertProfileMethodInfo(pmi);
diff --git a/runtime/jni/java_vm_ext.cc b/runtime/jni/java_vm_ext.cc
index 1bf88c5..e5b3d4d 100644
--- a/runtime/jni/java_vm_ext.cc
+++ b/runtime/jni/java_vm_ext.cc
@@ -385,6 +385,13 @@
       return JNI_ERR;
     }
     JavaVMExt* raw_vm = reinterpret_cast<JavaVMExt*>(vm);
+
+    // Wait for all non-dameon threads to terminate before we start destroying
+    // bits of the runtime. Thread list deletion will repeat this in case more
+    // threads are created by daemons in the meantime.
+    raw_vm->GetRuntime()->GetThreadList()
+          ->WaitForOtherNonDaemonThreadsToExit(/*check_no_birth=*/ false);
+
     delete raw_vm->GetRuntime();
     android::ResetNativeLoader();
     return JNI_OK;
diff --git a/runtime/jni/jni_env_ext.cc b/runtime/jni/jni_env_ext.cc
index 976f89b..cf6a22c 100644
--- a/runtime/jni/jni_env_ext.cc
+++ b/runtime/jni/jni_env_ext.cc
@@ -24,6 +24,7 @@
 #include "base/mutex.h"
 #include "base/to_str.h"
 #include "check_jni.h"
+#include "hidden_api.h"
 #include "indirect_reference_table.h"
 #include "java_vm_ext.h"
 #include "jni_internal.h"
@@ -89,7 +90,6 @@
 
 void JNIEnvExt::SetFunctionsToRuntimeShutdownFunctions() {
   functions = GetRuntimeShutdownNativeInterface();
-  runtime_deleted_ = true;
 }
 
 JNIEnvExt::~JNIEnvExt() {
@@ -290,11 +290,12 @@
   }
 }
 
-static void ThreadResetFunctionTable(Thread* thread, void* arg ATTRIBUTE_UNUSED)
+void ThreadResetFunctionTable(Thread* thread, void* arg ATTRIBUTE_UNUSED)
     REQUIRES(Locks::jni_function_table_lock_) {
   JNIEnvExt* env = thread->GetJniEnv();
   bool check_jni = env->IsCheckJniEnabled();
   env->functions = JNIEnvExt::GetFunctionTable(check_jni);
+  env->unchecked_functions_ = GetJniNativeInterface();
 }
 
 void JNIEnvExt::SetTableOverride(const JNINativeInterface* table_override) {
@@ -308,6 +309,9 @@
   Runtime* runtime = Runtime::Current();
   if (runtime != nullptr) {
     runtime->GetThreadList()->ForEach(ThreadResetFunctionTable, nullptr);
+    // Core Platform API checks rely on stack walking and classifying the caller. If a table
+    // override is installed do not try to guess what semantics should be.
+    runtime->SetCorePlatformApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kDisabled);
   }
 }
 
@@ -319,4 +323,12 @@
   return check_jni ? GetCheckJniNativeInterface() : GetJniNativeInterface();
 }
 
+void JNIEnvExt::ResetFunctionTable() {
+  MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+  MutexLock mu2(Thread::Current(), *Locks::jni_function_table_lock_);
+  Runtime* runtime = Runtime::Current();
+  CHECK(runtime != nullptr);
+  runtime->GetThreadList()->ForEach(ThreadResetFunctionTable, nullptr);
+}
+
 }  // namespace art
diff --git a/runtime/jni/jni_env_ext.h b/runtime/jni/jni_env_ext.h
index 61de074..2c7ba3b 100644
--- a/runtime/jni/jni_env_ext.h
+++ b/runtime/jni/jni_env_ext.h
@@ -27,7 +27,10 @@
 
 namespace art {
 
+class ArtMethod;
+class ArtField;
 class JavaVMExt;
+class ScopedObjectAccessAlreadyRunnable;
 
 namespace mirror {
 class Object;
@@ -107,7 +110,8 @@
   }
   JavaVMExt* GetVm() const { return vm_; }
 
-  bool IsRuntimeDeleted() const { return runtime_deleted_; }
+  void SetRuntimeDeleted() { runtime_deleted_.store(true, std::memory_order_relaxed); }
+  bool IsRuntimeDeleted() const { return runtime_deleted_.load(std::memory_order_relaxed); }
   bool IsCheckJniEnabled() const { return check_jni_; }
 
 
@@ -131,6 +135,9 @@
   // Set the functions to the runtime shutdown functions.
   void SetFunctionsToRuntimeShutdownFunctions();
 
+  // Set the functions to the new JNI functions based on Runtime::GetJniIdType.
+  void UpdateJniFunctionsPointer();
+
   // Set the function table override. This will install the override (or original table, if null)
   // to all threads.
   // Note: JNI function table overrides are sensitive to the order of operations wrt/ CheckJNI.
@@ -143,6 +150,9 @@
   static const JNINativeInterface* GetFunctionTable(bool check_jni)
       REQUIRES(Locks::jni_function_table_lock_);
 
+  static void ResetFunctionTable()
+      REQUIRES(!Locks::thread_list_lock_, !Locks::jni_function_table_lock_);
+
  private:
   // Checking "locals" requires the mutator lock, but at creation time we're
   // really only interested in validity, which isn't changing. To avoid grabbing
@@ -179,7 +189,7 @@
   ReferenceTable monitors_;
 
   // Used by -Xcheck:jni.
-  const JNINativeInterface* unchecked_functions_;
+  JNINativeInterface const* unchecked_functions_;
 
   // All locked objects, with the (Java caller) stack frame that locked them. Used in CheckJNI
   // to ensure that only monitors locked in this native frame are being unlocked, and that at
@@ -197,11 +207,12 @@
   bool check_jni_;
 
   // If we are a JNI env for a daemon thread with a deleted runtime.
-  bool runtime_deleted_;
+  std::atomic<bool> runtime_deleted_;
 
-  friend class JNI;
+  template<bool kEnableIndexIds> friend class JNI;
   friend class ScopedJniEnvLocalRefState;
   friend class Thread;
+  friend void ThreadResetFunctionTable(Thread* thread, void* arg);
   ART_FRIEND_TEST(JniInternalTest, JNIEnvExtOffsets);
 };
 
diff --git a/runtime/jni/jni_id_manager.cc b/runtime/jni/jni_id_manager.cc
new file mode 100644
index 0000000..8070505
--- /dev/null
+++ b/runtime/jni/jni_id_manager.cc
@@ -0,0 +1,690 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_id_manager.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <type_traits>
+
+#include "android-base/macros.h"
+#include "art_field-inl.h"
+#include "art_method-inl.h"
+#include "base/enums.h"
+#include "base/globals.h"
+#include "base/locks.h"
+#include "base/mutex.h"
+#include "gc/allocation_listener.h"
+#include "gc/heap.h"
+#include "jni/jni_internal.h"
+#include "jni_id_type.h"
+#include "mirror/array-inl.h"
+#include "mirror/array.h"
+#include "mirror/class-alloc-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/class.h"
+#include "mirror/class_ext-inl.h"
+#include "mirror/object-inl.h"
+#include "obj_ptr-inl.h"
+#include "reflective_handle_scope-inl.h"
+#include "reflective_handle_scope.h"
+#include "reflective_value_visitor.h"
+#include "thread-inl.h"
+#include "thread.h"
+
+namespace art {
+namespace jni {
+
+constexpr bool kTraceIds = false;
+
+// TODO This whole thing could be done lock & wait free (since we never remove anything from the
+// ids list). It's not clear this would be worthwile though.
+
+namespace {
+
+static constexpr size_t IdToIndex(uintptr_t id) {
+  return id >> 1;
+}
+
+static constexpr uintptr_t IndexToId(size_t index) {
+  return (index << 1) + 1;
+}
+
+template <typename ArtType>
+ObjPtr<mirror::PointerArray> GetIds(ObjPtr<mirror::Class> k, ArtType* t)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  ObjPtr<mirror::Object> ret;
+  if constexpr (std::is_same_v<ArtType, ArtField>) {
+    ret = t->IsStatic() ? k->GetStaticFieldIds() : k->GetInstanceFieldIds();
+  } else {
+    ret = t->IsObsolete() ? nullptr : k->GetMethodIds();
+  }
+  DCHECK(ret.IsNull() || ret->IsArrayInstance()) << "Should have bailed out early!";
+  if (kIsDebugBuild && !ret.IsNull()) {
+    if (kRuntimePointerSize == PointerSize::k32) {
+      CHECK(ret->IsIntArray());
+    } else {
+      CHECK(ret->IsLongArray());
+    }
+  }
+  return down_cast<mirror::PointerArray*>(ret.Ptr());
+}
+
+template <typename ArtType>
+bool ShouldReturnPointer(ObjPtr<mirror::Class> klass, ArtType* t)
+    REQUIRES_SHARED(Locks::mutator_lock_);
+
+template <>
+bool ShouldReturnPointer(ObjPtr<mirror::Class> klass, ArtMethod* t ATTRIBUTE_UNUSED) {
+  ObjPtr<mirror::ClassExt> ext(klass->GetExtData());
+  if (ext.IsNull()) {
+    return true;
+  }
+  ObjPtr<mirror::Object> arr = ext->GetJMethodIDs();
+  return arr.IsNull() || !arr->IsArrayInstance();
+}
+
+template<>
+bool ShouldReturnPointer(ObjPtr<mirror::Class> klass, ArtField* t) {
+  ObjPtr<mirror::ClassExt> ext(klass->GetExtData());
+  if (ext.IsNull()) {
+    return true;
+  }
+  ObjPtr<mirror::Object> arr = t->IsStatic() ? ext->GetStaticJFieldIDs()
+                                             : ext->GetInstanceJFieldIDs();
+  return arr.IsNull() || !arr->IsArrayInstance();
+}
+
+
+// Forces the appropriate id array to be present if possible. Returns true if allocation was
+// attempted but failed.
+template <typename ArtType>
+bool EnsureIdsArray(Thread* self, ObjPtr<mirror::Class> k, ArtType* t)
+    REQUIRES_SHARED(Locks::mutator_lock_);
+
+template <>
+bool EnsureIdsArray(Thread* self, ObjPtr<mirror::Class> k, ArtField* field) {
+  ScopedExceptionStorage ses(self);
+  StackHandleScope<1> hs(self);
+  Handle<mirror::Class> h_k(hs.NewHandle(k));
+  if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
+    return false;
+  } else {
+    // NB This modifies the class to allocate the ClassExt and the ids array.
+    field->IsStatic() ? mirror::Class::EnsureStaticFieldIds(h_k)
+                      : mirror::Class::EnsureInstanceFieldIds(h_k);
+  }
+  if (self->IsExceptionPending()) {
+    self->AssertPendingOOMException();
+    ses.SuppressOldException("Failed to allocate maps for jmethodIDs. ");
+    return true;
+  }
+  return false;
+}
+
+template <>
+bool EnsureIdsArray(Thread* self, ObjPtr<mirror::Class> k, ArtMethod* method) {
+  if (method->IsObsolete()) {
+    if (kTraceIds) {
+      LOG(INFO) << "jmethodID for Obsolete method " << method->PrettyMethod() << " requested!";
+    }
+    // No ids array for obsolete methods. Just do a linear scan.
+    return false;
+  }
+  StackHandleScope<1> hs(self);
+  Handle<mirror::Class> h_k(hs.NewHandle(k));
+  if (Locks::mutator_lock_->IsExclusiveHeld(self) || !Locks::mutator_lock_->IsSharedHeld(self)) {
+    return false;
+  } else {
+    // NB This modifies the class to allocate the ClassExt and the ids array.
+    mirror::Class::EnsureMethodIds(h_k);
+  }
+  if (self->IsExceptionPending()) {
+    self->AssertPendingOOMException();
+    return true;
+  }
+  return false;
+}
+
+template <typename ArtType>
+size_t GetIdOffset(ObjPtr<mirror::Class> k, ArtType* t, PointerSize pointer_size)
+    REQUIRES_SHARED(Locks::mutator_lock_);
+template <>
+size_t GetIdOffset(ObjPtr<mirror::Class> k, ArtField* f, PointerSize ptr_size ATTRIBUTE_UNUSED) {
+  return f->IsStatic() ? k->GetStaticFieldIdOffset(f) : k->GetInstanceFieldIdOffset(f);
+}
+template <>
+size_t GetIdOffset(ObjPtr<mirror::Class> k, ArtMethod* method, PointerSize pointer_size) {
+  return method->IsObsolete() ? -1 : k->GetMethodIdOffset(method, pointer_size);
+}
+
+// Calls the relevant PrettyMethod/PrettyField on the input.
+template <typename ArtType>
+std::string PrettyGeneric(ArtType t) REQUIRES_SHARED(Locks::mutator_lock_);
+template <>
+std::string PrettyGeneric(ArtMethod* f) {
+  return f->PrettyMethod();
+}
+template <>
+std::string PrettyGeneric(ReflectiveHandle<ArtMethod> f) {
+  return f->PrettyMethod();
+}
+template <>
+std::string PrettyGeneric(ArtField* f) {
+  return f->PrettyField();
+}
+template <>
+std::string PrettyGeneric(ReflectiveHandle<ArtField> f) {
+  return f->PrettyField();
+}
+
+// Checks if the field or method is obsolete.
+template <typename ArtType>
+bool IsObsolete(ReflectiveHandle<ArtType> t) REQUIRES_SHARED(Locks::mutator_lock_);
+template <>
+bool IsObsolete(ReflectiveHandle<ArtField> t ATTRIBUTE_UNUSED) {
+  return false;
+}
+template <>
+bool IsObsolete(ReflectiveHandle<ArtMethod> t) {
+  return t->IsObsolete();
+}
+
+// Get the canonical (non-copied) version of the field or method. Only relevant for methods.
+template <typename ArtType>
+ArtType* Canonicalize(ReflectiveHandle<ArtType> t) REQUIRES_SHARED(Locks::mutator_lock_);
+template <>
+ArtField* Canonicalize(ReflectiveHandle<ArtField> t) {
+  return t.Get();
+}
+template <>
+ArtMethod* Canonicalize(ReflectiveHandle<ArtMethod> t) {
+  if (UNLIKELY(t->IsCopied())) {
+    return t->GetCanonicalMethod();
+  }
+  return t.Get();
+}
+
+};  // namespace
+
+// We increment the id by 2 each time to allow us to use the LSB as a flag that the ID is an index
+// and not a pointer. This gives us 2**31 unique methods that can be addressed on 32-bit art, which
+// should be more than enough.
+template <>
+uintptr_t JniIdManager::GetNextId<ArtField>(JniIdType type) {
+  DCHECK_EQ(type, JniIdType::kIndices);
+  uintptr_t res = next_field_id_;
+  next_field_id_ += 2;
+  CHECK_GT(next_field_id_, res) << "jfieldID Overflow";
+  return res;
+}
+
+template <>
+uintptr_t JniIdManager::GetNextId<ArtMethod>(JniIdType type) {
+  DCHECK_EQ(type, JniIdType::kIndices);
+  uintptr_t res = next_method_id_;
+  next_method_id_ += 2;
+  CHECK_GT(next_method_id_, res) << "jmethodID Overflow";
+  return res;
+}
+template <>
+std::vector<ArtField*>& JniIdManager::GetGenericMap<ArtField>() {
+  return field_id_map_;
+}
+
+template <>
+std::vector<ArtMethod*>& JniIdManager::GetGenericMap<ArtMethod>() {
+  return method_id_map_;
+}
+template <>
+size_t JniIdManager::GetLinearSearchStartId<ArtField>(
+    ReflectiveHandle<ArtField> t ATTRIBUTE_UNUSED) {
+  return deferred_allocation_field_id_start_;
+}
+
+template <>
+size_t JniIdManager::GetLinearSearchStartId<ArtMethod>(ReflectiveHandle<ArtMethod> m) {
+  if (m->IsObsolete()) {
+    return 1;
+  } else {
+    return deferred_allocation_method_id_start_;
+  }
+}
+
+// TODO need to fix races in here with visitors
+template <typename ArtType>
+uintptr_t JniIdManager::EncodeGenericId(ReflectiveHandle<ArtType> t) {
+  static_assert(std::is_same_v<ArtType, ArtField> || std::is_same_v<ArtType, ArtMethod>,
+                "Expected ArtField or ArtMethod");
+  Runtime* runtime = Runtime::Current();
+  JniIdType id_type = runtime->GetJniIdType();
+  if (id_type == JniIdType::kPointer || t == nullptr) {
+    return reinterpret_cast<uintptr_t>(t.Get());
+  }
+  Thread* self = Thread::Current();
+  ScopedExceptionStorage ses(self);
+  DCHECK(!t->GetDeclaringClass().IsNull()) << "Null declaring class " << PrettyGeneric(t);
+  size_t off = GetIdOffset(t->GetDeclaringClass(), Canonicalize(t), kRuntimePointerSize);
+  // Here is the earliest point we can suspend.
+  bool allocation_failure = EnsureIdsArray(self, t->GetDeclaringClass(), t.Get());
+  if (allocation_failure) {
+    self->AssertPendingOOMException();
+    ses.SuppressOldException("OOM exception while trying to allocate JNI ids.");
+    return 0u;
+  } else if (ShouldReturnPointer(t->GetDeclaringClass(), t.Get())) {
+    return reinterpret_cast<uintptr_t>(t.Get());
+  }
+  ObjPtr<mirror::Class> klass = t->GetDeclaringClass();
+  ObjPtr<mirror::PointerArray> ids(GetIds(klass, t.Get()));
+  uintptr_t cur_id = 0;
+  if (!ids.IsNull()) {
+    DCHECK_GT(ids->GetLength(), static_cast<int32_t>(off)) << " is " << PrettyGeneric(t);
+    DCHECK_LE(0, static_cast<int32_t>(off)) << " is " << PrettyGeneric(t);
+    cur_id = ids->GetElementPtrSize<uintptr_t>(off, kRuntimePointerSize);
+  }
+  if (cur_id != 0) {
+    return cur_id;
+  }
+  WriterMutexLock mu(self, *Locks::jni_id_lock_);
+  ScopedAssertNoThreadSuspension sants("EncodeJniId critical section.");
+  // Check the ids array for a racing id.
+  constexpr std::pair<size_t, size_t> counts {
+    std::is_same_v<ArtType, ArtField> ? 1 : 0,
+    std::is_same_v<ArtType, ArtField> ? 0 : 1,
+  };
+  StackReflectiveHandleScope<counts.first, counts.second> hs(self);
+  t = hs.NewHandle(Canonicalize(t));
+  if (!ids.IsNull()) {
+    // It's possible we got suspended and structurally redefined during the EnsureIdsArray. We need
+    // to get the information again.
+    ids = GetIds(klass, t.Get());
+    off = GetIdOffset(klass, Canonicalize(t), kRuntimePointerSize);
+    CHECK(!ids.IsNull());
+    cur_id = ids->GetElementPtrSize<uintptr_t>(off, kRuntimePointerSize);
+    if (cur_id != 0) {
+      // We were racing some other thread and lost.
+      return cur_id;
+    }
+  } else {
+    // We cannot allocate anything here or don't have an ids array (we might be an obsolete method).
+    DCHECK(IsObsolete(t) || deferred_allocation_refcount_ > 0u)
+        << "deferred_allocation_refcount_: " << deferred_allocation_refcount_
+        << " t: " << PrettyGeneric(t);
+    // Check to see if we raced and lost to another thread.
+    const std::vector<ArtType*>& vec = GetGenericMap<ArtType>();
+    bool found = false;
+    // simple count-while.
+    size_t search_start_index = IdToIndex(GetLinearSearchStartId(t));
+    size_t index = std::count_if(vec.cbegin() + search_start_index,
+                                 vec.cend(),
+                                 [&found, &self, t](const ArtType* candidate) {
+                                   Locks::mutator_lock_->AssertSharedHeld(self);
+                                   found = found || candidate == t.Get();
+                                   return !found;
+                                 }) +
+                   search_start_index;
+    if (found) {
+      // We were either racing some other thread and lost or this thread was asked to encode the
+      // same method multiple times while holding the mutator lock.
+      DCHECK_EQ(vec[index], t.Get())
+          << "Expected: " << PrettyGeneric(vec[index]) << " got " << PrettyGeneric(t)
+          << " at index " << index << " (id: " << IndexToId(index) << ").";
+      return IndexToId(index);
+    }
+  }
+  cur_id = GetNextId<ArtType>(id_type);
+  DCHECK_EQ(cur_id % 2, 1u);
+  size_t cur_index = IdToIndex(cur_id);
+  std::vector<ArtType*>& vec = GetGenericMap<ArtType>();
+  vec.reserve(cur_index + 1);
+  vec.resize(std::max(vec.size(), cur_index + 1), nullptr);
+  vec[cur_index] = t.Get();
+  if (ids.IsNull()) {
+    if (kIsDebugBuild && !IsObsolete(t)) {
+      CHECK_NE(deferred_allocation_refcount_, 0u)
+          << "Failed to allocate ids array despite not being forbidden from doing so!";
+      Locks::mutator_lock_->AssertExclusiveHeld(self);
+    }
+  } else {
+    ids->SetElementPtrSize(off, reinterpret_cast<void*>(cur_id), kRuntimePointerSize);
+  }
+  return cur_id;
+}
+
+jfieldID JniIdManager::EncodeFieldId(ArtField* field) {
+  StackArtFieldHandleScope<1> rhs(Thread::Current());
+  return EncodeFieldId(rhs.NewHandle(field));
+}
+
+jfieldID JniIdManager::EncodeFieldId(ReflectiveHandle<ArtField> field) {
+  auto* res = reinterpret_cast<jfieldID>(EncodeGenericId(field));
+  if (kTraceIds && field != nullptr) {
+    LOG(INFO) << "Returning " << res << " for field " << field->PrettyField();
+  }
+  return res;
+}
+
+jmethodID JniIdManager::EncodeMethodId(ArtMethod* method) {
+  StackArtMethodHandleScope<1> rhs(Thread::Current());
+  return EncodeMethodId(rhs.NewHandle(method));
+}
+
+jmethodID JniIdManager::EncodeMethodId(ReflectiveHandle<ArtMethod> method) {
+  auto* res = reinterpret_cast<jmethodID>(EncodeGenericId(method));
+  if (kTraceIds && method != nullptr) {
+    LOG(INFO) << "Returning " << res << " for method " << method->PrettyMethod();
+  }
+  return res;
+}
+
+void JniIdManager::VisitRoots(RootVisitor *visitor) {
+  pointer_marker_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
+}
+
+void JniIdManager::Init(Thread* self) {
+  // When compiling we don't want to have anything to do with any of this, which is fine since JNI
+  // ids won't be created during AOT compilation. This also means we don't need to do any
+  // complicated stuff with the image-writer.
+  if (!Runtime::Current()->IsAotCompiler()) {
+    // Allocate the marker
+    StackHandleScope<3> hs(self);
+    Handle<mirror::Object> marker_obj(
+        hs.NewHandle(GetClassRoot<mirror::Object>()->AllocObject(self)));
+    CHECK(!marker_obj.IsNull());
+    pointer_marker_ = GcRoot<mirror::Object>(marker_obj.Get());
+    // Manually mark class-ext as having all pointer-ids to avoid any annoying loops.
+    Handle<mirror::Class> class_ext_class(hs.NewHandle(GetClassRoot<mirror::ClassExt>()));
+    mirror::Class::EnsureExtDataPresent(class_ext_class, self);
+    Handle<mirror::ClassExt> class_ext_ext(hs.NewHandle(class_ext_class->GetExtData()));
+    class_ext_ext->SetIdsArraysForClassExtExtData(marker_obj.Get());
+  }
+}
+
+void JniIdManager::VisitReflectiveTargets(ReflectiveValueVisitor* rvv) {
+  art::WriterMutexLock mu(Thread::Current(), *Locks::jni_id_lock_);
+  for (auto it = field_id_map_.begin(); it != field_id_map_.end(); ++it) {
+    ArtField* old_field = *it;
+    uintptr_t id = IndexToId(std::distance(field_id_map_.begin(), it));
+    ArtField* new_field =
+        rvv->VisitField(old_field, JniIdReflectiveSourceInfo(reinterpret_cast<jfieldID>(id)));
+    if (old_field != new_field) {
+      *it = new_field;
+      ObjPtr<mirror::Class> old_class(old_field->GetDeclaringClass());
+      ObjPtr<mirror::Class> new_class(new_field->GetDeclaringClass());
+      ObjPtr<mirror::ClassExt> old_ext_data(old_class->GetExtData());
+      ObjPtr<mirror::ClassExt> new_ext_data(new_class->GetExtData());
+      if (!old_ext_data.IsNull()) {
+        CHECK(!old_ext_data->HasInstanceFieldPointerIdMarker() &&
+              !old_ext_data->HasStaticFieldPointerIdMarker())
+            << old_class->PrettyClass();
+        // Clear the old field mapping.
+        if (old_field->IsStatic()) {
+          size_t old_off = ArraySlice<ArtField>(old_class->GetSFieldsPtr()).OffsetOf(old_field);
+          ObjPtr<mirror::PointerArray> old_statics(old_ext_data->GetStaticJFieldIDsPointerArray());
+          if (!old_statics.IsNull()) {
+            old_statics->SetElementPtrSize(old_off, 0, kRuntimePointerSize);
+          }
+        } else {
+          size_t old_off = ArraySlice<ArtField>(old_class->GetIFieldsPtr()).OffsetOf(old_field);
+          ObjPtr<mirror::PointerArray> old_instances(
+              old_ext_data->GetInstanceJFieldIDsPointerArray());
+          if (!old_instances.IsNull()) {
+            old_instances->SetElementPtrSize(old_off, 0, kRuntimePointerSize);
+          }
+        }
+      }
+      if (!new_ext_data.IsNull()) {
+        CHECK(!new_ext_data->HasInstanceFieldPointerIdMarker() &&
+              !new_ext_data->HasStaticFieldPointerIdMarker())
+            << new_class->PrettyClass();
+        // Set the new field mapping.
+        if (new_field->IsStatic()) {
+          size_t new_off = ArraySlice<ArtField>(new_class->GetSFieldsPtr()).OffsetOf(new_field);
+          ObjPtr<mirror::PointerArray> new_statics(new_ext_data->GetStaticJFieldIDsPointerArray());
+          if (!new_statics.IsNull()) {
+            new_statics->SetElementPtrSize(new_off, id, kRuntimePointerSize);
+          }
+        } else {
+          size_t new_off = ArraySlice<ArtField>(new_class->GetIFieldsPtr()).OffsetOf(new_field);
+          ObjPtr<mirror::PointerArray> new_instances(
+              new_ext_data->GetInstanceJFieldIDsPointerArray());
+          if (!new_instances.IsNull()) {
+            new_instances->SetElementPtrSize(new_off, id, kRuntimePointerSize);
+          }
+        }
+      }
+    }
+  }
+  for (auto it = method_id_map_.begin(); it != method_id_map_.end(); ++it) {
+    ArtMethod* old_method = *it;
+    uintptr_t id = IndexToId(std::distance(method_id_map_.begin(), it));
+    ArtMethod* new_method =
+        rvv->VisitMethod(old_method, JniIdReflectiveSourceInfo(reinterpret_cast<jmethodID>(id)));
+    if (old_method != new_method) {
+      *it = new_method;
+      ObjPtr<mirror::Class> old_class(old_method->GetDeclaringClass());
+      ObjPtr<mirror::Class> new_class(new_method->GetDeclaringClass());
+      ObjPtr<mirror::ClassExt> old_ext_data(old_class->GetExtData());
+      ObjPtr<mirror::ClassExt> new_ext_data(new_class->GetExtData());
+      if (!old_ext_data.IsNull()) {
+        CHECK(!old_ext_data->HasMethodPointerIdMarker()) << old_class->PrettyClass();
+        // Clear the old method mapping.
+        size_t old_off = ArraySlice<ArtMethod>(old_class->GetMethodsPtr()).OffsetOf(old_method);
+        ObjPtr<mirror::PointerArray> old_methods(old_ext_data->GetJMethodIDsPointerArray());
+        if (!old_methods.IsNull()) {
+          old_methods->SetElementPtrSize(old_off, 0, kRuntimePointerSize);
+        }
+      }
+      if (!new_ext_data.IsNull()) {
+        CHECK(!new_ext_data->HasMethodPointerIdMarker()) << new_class->PrettyClass();
+        // Set the new method mapping.
+        size_t new_off = ArraySlice<ArtMethod>(new_class->GetMethodsPtr()).OffsetOf(new_method);
+        ObjPtr<mirror::PointerArray> new_methods(new_ext_data->GetJMethodIDsPointerArray());
+        if (!new_methods.IsNull()) {
+          new_methods->SetElementPtrSize(new_off, id, kRuntimePointerSize);
+        }
+      }
+    }
+  }
+}
+
+template <typename ArtType> ArtType* JniIdManager::DecodeGenericId(uintptr_t t) {
+  if (Runtime::Current()->GetJniIdType() == JniIdType::kIndices && (t % 2) == 1) {
+    ReaderMutexLock mu(Thread::Current(), *Locks::jni_id_lock_);
+    size_t index = IdToIndex(t);
+    DCHECK_GT(GetGenericMap<ArtType>().size(), index);
+    return GetGenericMap<ArtType>().at(index);
+  } else {
+    DCHECK_EQ((t % 2), 0u) << "id: " << t;
+    return reinterpret_cast<ArtType*>(t);
+  }
+}
+
+ArtMethod* JniIdManager::DecodeMethodId(jmethodID method) {
+  return DecodeGenericId<ArtMethod>(reinterpret_cast<uintptr_t>(method));
+}
+
+ArtField* JniIdManager::DecodeFieldId(jfieldID field) {
+  return DecodeGenericId<ArtField>(reinterpret_cast<uintptr_t>(field));
+}
+
+ObjPtr<mirror::Object> JniIdManager::GetPointerMarker() {
+  return pointer_marker_.Read();
+}
+
+// This whole defer system is an annoying requirement to allow us to generate IDs during heap-walks
+// such as those required for instrumentation tooling.
+//
+// The defer system works with the normal id-assignment routine to ensure that all the class-ext
+// data structures are eventually created and filled in. Basically how it works is the id-assignment
+// function will check to see if it has a strong mutator-lock. If it does not then it will try to
+// allocate the class-ext data structures normally and fail if it is unable to do so. In the case
+// where mutator-lock is being held exclusive no attempt to allocate will be made and the thread
+// will CHECK that allocations are being deferred (or that the method is obsolete, in which case
+// there is no class-ext to store the method->id map in).
+//
+// Once the thread is done holding the exclusive mutator-lock it will go back and fill-in the
+// class-ext data of all the methods that were added. We do this without the exclusive mutator-lock
+// on a copy of the maps before we decrement the deferred refcount. This ensures that any other
+// threads running at the same time know they need to perform a linear scan of the id-map. Since we
+// don't have the mutator-lock anymore other threads can allocate the class-ext data, meaning our
+// copy is fine. The only way additional methods could end up on the id-maps after our copy without
+// having class-ext data is if another thread picked up the exclusive mutator-lock and added another
+// defer, in which case that thread would fix-up the remaining ids. In this way we maintain eventual
+// consistency between the class-ext method/field->id maps and the JniIdManager id->method/field
+// maps.
+//
+// TODO It is possible that another thread to gain the mutator-lock and allocate new ids without
+// calling StartDefer. This is basically a race that we should try to catch but doing so is
+// rather difficult and since this defer system is only used in very rare circumstances unlikely to
+// be worth the trouble.
+void JniIdManager::StartDefer() {
+  Thread* self = Thread::Current();
+  WriterMutexLock mu(self, *Locks::jni_id_lock_);
+  if (deferred_allocation_refcount_++ == 0) {
+    deferred_allocation_field_id_start_ = next_field_id_;
+    deferred_allocation_method_id_start_ = next_method_id_;
+  }
+}
+
+class JniIdDeferStackReflectiveScope : public BaseReflectiveHandleScope {
+ public:
+  JniIdDeferStackReflectiveScope() REQUIRES_SHARED(art::Locks::mutator_lock_)
+      : BaseReflectiveHandleScope(), methods_(), fields_() {
+    PushScope(Thread::Current());
+  }
+
+  void Initialize(const std::vector<ArtMethod*>& methods, const std::vector<ArtField*>& fields)
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Roles::uninterruptible_) {
+    methods_ = methods;
+    fields_ = fields;
+  }
+
+  ~JniIdDeferStackReflectiveScope() REQUIRES_SHARED(Locks::mutator_lock_) {
+    PopScope();
+  }
+
+  void VisitTargets(ReflectiveValueVisitor* visitor) override
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    for (auto it = methods_.begin(); it != methods_.end(); ++it) {
+      if (*it == nullptr) {
+        continue;
+      }
+      *it = visitor->VisitMethod(*it, ReflectiveHandleScopeSourceInfo(this));
+    }
+    for (auto it = fields_.begin(); it != fields_.end(); ++it) {
+      if (*it == nullptr) {
+        continue;
+      }
+      *it = visitor->VisitField(*it, ReflectiveHandleScopeSourceInfo(this));
+    }
+  }
+
+  ArtField** GetFieldPtr(size_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
+    return &fields_[idx];
+  }
+
+  ArtMethod** GetMethodPtr(size_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
+    return &methods_[idx];
+  }
+
+  size_t NumFields() const {
+    return fields_.size();
+  }
+  size_t NumMethods() const {
+    return methods_.size();
+  }
+
+ private:
+  std::vector<ArtMethod*> methods_;
+  std::vector<ArtField*> fields_;
+};
+
+void JniIdManager::EndDefer() {
+  // Fixup the method->id map.
+  Thread* self = Thread::Current();
+  auto set_id = [&](auto** t, uintptr_t id) REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (t == nullptr) {
+      return;
+    }
+    bool alloc_failure = EnsureIdsArray(self, (*t)->GetDeclaringClass(), *t);
+    ObjPtr<mirror::Class> klass((*t)->GetDeclaringClass());
+    size_t off = GetIdOffset(klass, (*t), kRuntimePointerSize);
+    ObjPtr<mirror::PointerArray> ids = GetIds(klass, (*t));
+    CHECK(!alloc_failure) << "Could not allocate jni ids array!";
+    if (ids.IsNull()) {
+      return;
+    }
+    if (kIsDebugBuild) {
+      uintptr_t old_id = ids->GetElementPtrSize<uintptr_t, kRuntimePointerSize>(off);
+      if (old_id != 0) {
+        DCHECK_EQ(old_id, id);
+      }
+    }
+    ids->SetElementPtrSize(off, reinterpret_cast<void*>(id), kRuntimePointerSize);
+  };
+  // To ensure eventual consistency this depends on the fact that the method_id_map_ and
+  // field_id_map_ are the ultimate source of truth and no id is ever reused to be valid. It also
+  // relies on all threads always getting calling StartDefer if they are going to be allocating jni
+  // ids while suspended. If a thread tries to do so while it doesn't have a scope we could miss
+  // ids.
+  // TODO We should use roles or something to verify that this requirement is not broken.
+  //
+  // If another thread comes along and adds more methods to the list after
+  // copying either (1) the id-maps are already present for the method and everything is fine, (2)
+  // the thread is not suspended and so can create the ext-data and id lists or, (3) the thread also
+  // suspended everything and incremented the deferred_allocation_refcount_ so it will fix up new
+  // ids when it finishes.
+  Locks::mutator_lock_->AssertNotExclusiveHeld(self);
+  Locks::mutator_lock_->AssertSharedHeld(self);
+  JniIdDeferStackReflectiveScope jidsrs;
+  uintptr_t method_start_id;
+  uintptr_t field_start_id;
+  {
+    ReaderMutexLock mu(self, *Locks::jni_id_lock_);
+    ScopedAssertNoThreadSuspension sants(__FUNCTION__);
+    jidsrs.Initialize(method_id_map_, field_id_map_);
+    method_start_id = deferred_allocation_method_id_start_;
+    field_start_id = deferred_allocation_field_id_start_;
+  }
+
+  for (size_t index = kIsDebugBuild ? 0 : IdToIndex(method_start_id); index < jidsrs.NumMethods();
+       ++index) {
+    set_id(jidsrs.GetMethodPtr(index), IndexToId(index));
+  }
+  for (size_t index = kIsDebugBuild ? 0 : IdToIndex(field_start_id); index < jidsrs.NumFields();
+       ++index) {
+    set_id(jidsrs.GetFieldPtr(index), IndexToId(index));
+  }
+  WriterMutexLock mu(self, *Locks::jni_id_lock_);
+  DCHECK_GE(deferred_allocation_refcount_, 1u);
+  if (--deferred_allocation_refcount_ == 0) {
+    deferred_allocation_field_id_start_ = 0;
+    deferred_allocation_method_id_start_ = 0;
+  }
+}
+
+ScopedEnableSuspendAllJniIdQueries::ScopedEnableSuspendAllJniIdQueries()
+    : manager_(Runtime::Current()->GetJniIdManager()) {
+  manager_->StartDefer();
+}
+
+ScopedEnableSuspendAllJniIdQueries::~ScopedEnableSuspendAllJniIdQueries() {
+  manager_->EndDefer();
+}
+
+};  // namespace jni
+};  // namespace art
diff --git a/runtime/jni/jni_id_manager.h b/runtime/jni/jni_id_manager.h
new file mode 100644
index 0000000..c8ebfc3
--- /dev/null
+++ b/runtime/jni/jni_id_manager.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JNI_JNI_ID_MANAGER_H_
+#define ART_RUNTIME_JNI_JNI_ID_MANAGER_H_
+
+#include <jni.h>
+
+#include <atomic>
+#include <vector>
+
+#include "art_field.h"
+#include "art_method.h"
+#include "base/mutex.h"
+#include "gc_root.h"
+#include "jni_id_type.h"
+#include "reflective_value_visitor.h"
+
+namespace art {
+namespace mirror {
+class Object;
+class ClassExt;
+}  // namespace mirror
+template<typename RT> class ReflectiveHandle;
+
+namespace jni {
+
+class ScopedEnableSuspendAllJniIdQueries;
+class JniIdManager {
+ public:
+  template <typename T,
+            typename = typename std::enable_if<std::is_same_v<T, jmethodID> ||
+                                               std::is_same_v<T, jfieldID>>>
+  static constexpr bool IsIndexId(T val) {
+    return val == nullptr || reinterpret_cast<uintptr_t>(val) % 2 == 1;
+  }
+
+  void Init(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
+
+  ArtMethod* DecodeMethodId(jmethodID method) REQUIRES(!Locks::jni_id_lock_);
+  ArtField* DecodeFieldId(jfieldID field) REQUIRES(!Locks::jni_id_lock_);
+  jmethodID EncodeMethodId(ReflectiveHandle<ArtMethod> method) REQUIRES(!Locks::jni_id_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  jmethodID EncodeMethodId(ArtMethod* method) REQUIRES(!Locks::jni_id_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  jfieldID EncodeFieldId(ReflectiveHandle<ArtField> field) REQUIRES(!Locks::jni_id_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  jfieldID EncodeFieldId(ArtField* field) REQUIRES(!Locks::jni_id_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  void VisitReflectiveTargets(ReflectiveValueVisitor* rvv)
+      REQUIRES(Locks::mutator_lock_, !Locks::jni_id_lock_);
+
+  void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+
+  ObjPtr<mirror::Object> GetPointerMarker() REQUIRES_SHARED(Locks::mutator_lock_);
+
+ private:
+  template <typename ArtType>
+  uintptr_t EncodeGenericId(ReflectiveHandle<ArtType> t) REQUIRES(!Locks::jni_id_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  template <typename ArtType>
+  ArtType* DecodeGenericId(uintptr_t input) REQUIRES(!Locks::jni_id_lock_);
+  template <typename ArtType> std::vector<ArtType*>& GetGenericMap()
+      REQUIRES(Locks::jni_id_lock_);
+  template <typename ArtType> uintptr_t GetNextId(JniIdType id)
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      REQUIRES(Locks::jni_id_lock_);
+  template <typename ArtType>
+  size_t GetLinearSearchStartId(ReflectiveHandle<ArtType> t)
+      REQUIRES(Locks::jni_id_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  void StartDefer() REQUIRES(!Locks::jni_id_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+  void EndDefer() REQUIRES(!Locks::jni_id_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
+
+  uintptr_t next_method_id_ GUARDED_BY(Locks::jni_id_lock_) = 1u;
+  std::vector<ArtMethod*> method_id_map_ GUARDED_BY(Locks::jni_id_lock_);
+  uintptr_t next_field_id_ GUARDED_BY(Locks::jni_id_lock_) = 1u;
+  std::vector<ArtField*> field_id_map_ GUARDED_BY(Locks::jni_id_lock_);
+
+  // If non-zero indicates that some thread is trying to allocate ids without being able to update
+  // the method->id mapping (due to not being able to allocate or something). In this case decode
+  // and encode need to do a linear scan of the lists. The ScopedEnableSuspendAllJniIdQueries struct
+  // will deal with fixing everything up.
+  size_t deferred_allocation_refcount_ GUARDED_BY(Locks::jni_id_lock_) = 0;
+  // min jmethodID that might not have it's method->id mapping filled in.
+  uintptr_t deferred_allocation_method_id_start_ GUARDED_BY(Locks::jni_id_lock_) = 0u;
+  // min jfieldID that might not have it's field->id mapping filled in.
+  uintptr_t deferred_allocation_field_id_start_ GUARDED_BY(Locks::jni_id_lock_) = 0u;
+
+  GcRoot<mirror::Object> pointer_marker_;
+
+  friend class ScopedEnableSuspendAllJniIdQueries;
+  // For GetPointerMarker
+  friend class mirror::ClassExt;
+};
+
+// A scope that will enable using the Encode/Decode JNI id functions with all threads suspended.
+// This is required since normally we need to be able to allocate to encode new ids. This should
+// only be used when absolutely required, for example to invoke user-callbacks during heap walking
+// or similar.
+class ScopedEnableSuspendAllJniIdQueries {
+ public:
+  ScopedEnableSuspendAllJniIdQueries() REQUIRES_SHARED(Locks::mutator_lock_);
+  ~ScopedEnableSuspendAllJniIdQueries() REQUIRES_SHARED(Locks::mutator_lock_);
+
+ private:
+  JniIdManager* manager_;
+};
+
+}  // namespace jni
+}  // namespace art
+
+#endif  // ART_RUNTIME_JNI_JNI_ID_MANAGER_H_
diff --git a/runtime/jni/jni_internal.cc b/runtime/jni/jni_internal.cc
index 1055057..f4ef785 100644
--- a/runtime/jni/jni_internal.cc
+++ b/runtime/jni/jni_internal.cc
@@ -19,24 +19,19 @@
 #include <cstdarg>
 #include <log/log.h>
 #include <memory>
-#include <mutex>
 #include <utility>
 
-#include <link.h>
-
 #include "art_field-inl.h"
 #include "art_method-inl.h"
 #include "base/allocator.h"
 #include "base/atomic.h"
-#include "base/bit_utils.h"
 #include "base/casts.h"
 #include "base/enums.h"
+#include "base/file_utils.h"
 #include "base/logging.h"  // For VLOG.
-#include "base/memory_type_table.h"
 #include "base/mutex.h"
 #include "base/safe_map.h"
 #include "base/stl_util.h"
-#include "base/string_view_cpp20.h"
 #include "class_linker-inl.h"
 #include "class_root.h"
 #include "dex/dex_file-inl.h"
@@ -162,195 +157,15 @@
   return len;
 }
 
-static constexpr size_t kMaxReturnAddressDepth = 4;
-
-inline void* GetReturnAddress(size_t depth) {
-  DCHECK_LT(depth, kMaxReturnAddressDepth);
-  switch (depth) {
-    case 0u: return __builtin_return_address(0);
-    case 1u: return __builtin_return_address(1);
-    case 2u: return __builtin_return_address(2);
-    case 3u: return __builtin_return_address(3);
-    default:
-      return nullptr;
-  }
-}
-
-enum class SharedObjectKind {
-  kRuntime = 0,
-  kApexModule = 1,
-  kOther = 2
-};
-
-std::ostream& operator<<(std::ostream& os, SharedObjectKind kind) {
-  switch (kind) {
-    case SharedObjectKind::kRuntime:
-      os << "Runtime";
-      break;
-    case SharedObjectKind::kApexModule:
-      os << "APEX Module";
-      break;
-    case SharedObjectKind::kOther:
-      os << "Other";
-      break;
-  }
-  return os;
-}
-
-// Class holding Cached ranges of loaded shared objects to facilitate checks of field and method
-// resolutions within the Core Platform API for native callers.
-class CodeRangeCache final {
- public:
-  static CodeRangeCache& GetSingleton() {
-    static CodeRangeCache Singleton;
-    return Singleton;
-  }
-
-  SharedObjectKind GetSharedObjectKind(void* pc) {
-    uintptr_t address = reinterpret_cast<uintptr_t>(pc);
-    SharedObjectKind kind;
-    if (Find(address, &kind)) {
-      return kind;
-    }
-    return SharedObjectKind::kOther;
-  }
-
-  bool HasCache() const {
-    return memory_type_table_.Size() != 0;
-  }
-
-  void BuildCache() {
-    DCHECK(!HasCache());
-    art::MemoryTypeTable<SharedObjectKind>::Builder builder;
-    builder_ = &builder;
-    libjavacore_loaded_ = false;
-    libnativehelper_loaded_ = false;
-    libopenjdk_loaded_ = false;
-
-    // Iterate over ELF headers populating table_builder with executable ranges.
-    dl_iterate_phdr(VisitElfInfo, this);
-    memory_type_table_ = builder_->Build();
-
-    // Check expected libraries loaded when iterating headers.
-    CHECK(libjavacore_loaded_);
-    CHECK(libnativehelper_loaded_);
-    CHECK(libopenjdk_loaded_);
-    builder_ = nullptr;
-  }
-
-  void DropCache() {
-    memory_type_table_ = {};
-  }
-
- private:
-  CodeRangeCache() {}
-
-  bool Find(uintptr_t address, SharedObjectKind* kind) const {
-    const art::MemoryTypeRange<SharedObjectKind>* range = memory_type_table_.Lookup(address);
-    if (range == nullptr) {
-      return false;
-    }
-    *kind = range->Type();
-    return true;
-  }
-
-  static int VisitElfInfo(struct dl_phdr_info *info, size_t size ATTRIBUTE_UNUSED, void *data)
-      NO_THREAD_SAFETY_ANALYSIS {
-    auto cache = reinterpret_cast<CodeRangeCache*>(data);
-    art::MemoryTypeTable<SharedObjectKind>::Builder* builder = cache->builder_;
-
-    for (size_t i = 0u; i < info->dlpi_phnum; ++i) {
-      const ElfW(Phdr)& phdr = info->dlpi_phdr[i];
-      if (phdr.p_type != PT_LOAD || ((phdr.p_flags & PF_X) != PF_X)) {
-        continue;  // Skip anything other than code pages
-      }
-      uintptr_t start = info->dlpi_addr + phdr.p_vaddr;
-      const uintptr_t limit = art::RoundUp(start + phdr.p_memsz, art::kPageSize);
-      SharedObjectKind kind = GetKind(info->dlpi_name, start, limit);
-      art::MemoryTypeRange<SharedObjectKind> range{start, limit, kind};
-      if (!builder->Add(range)) {
-        LOG(WARNING) << "Overlapping/invalid range found in ELF headers: " << range;
-      }
-    }
-
-    // Update sanity check state.
-    std::string_view dlpi_name{info->dlpi_name};
-    if (!cache->libjavacore_loaded_) {
-      cache->libjavacore_loaded_ = art::EndsWith(dlpi_name, kLibjavacore);
-    }
-    if (!cache->libnativehelper_loaded_) {
-      cache->libnativehelper_loaded_ = art::EndsWith(dlpi_name, kLibnativehelper);
-    }
-    if (!cache->libopenjdk_loaded_) {
-      cache->libopenjdk_loaded_ = art::EndsWith(dlpi_name, kLibopenjdk);
-    }
-
-    return 0;
-  }
-
-  static SharedObjectKind GetKind(const char* so_name, uintptr_t start, uintptr_t limit) {
-    uintptr_t runtime_method = reinterpret_cast<uintptr_t>(art::GetJniNativeInterface);
-    if (runtime_method >= start && runtime_method < limit) {
-      return SharedObjectKind::kRuntime;
-    }
-    return art::LocationIsOnApex(so_name) ? SharedObjectKind::kApexModule
-                                          : SharedObjectKind::kOther;
-  }
-
-  art::MemoryTypeTable<SharedObjectKind> memory_type_table_;
-
-  // Table builder, only valid during BuildCache().
-  art::MemoryTypeTable<SharedObjectKind>::Builder* builder_;
-
-  // Sanity checking state.
-  bool libjavacore_loaded_;
-  bool libnativehelper_loaded_;
-  bool libopenjdk_loaded_;
-
-  static constexpr std::string_view kLibjavacore = "libjavacore.so";
-  static constexpr std::string_view kLibnativehelper = "libnativehelper.so";
-  static constexpr std::string_view kLibopenjdk = art::kIsDebugBuild ? "libopenjdkd.so"
-                                                                     : "libopenjdk.so";
-
-  DISALLOW_COPY_AND_ASSIGN(CodeRangeCache);
-};
-
-// Whitelisted native callers can resolve method and field id's via JNI. Check the first caller
-// outside of the JNI library who will have called Get(Static)?(Field|Member)ID(). The presence of
-// checked JNI means we need to walk frames as the internal methods can be called directly from an
-// external shared-object or indirectly (via checked JNI) from an external shared-object.
-static inline bool IsWhitelistedNativeCaller() {
-  if (!art::kIsTargetBuild) {
-    return false;
-  }
-  for (size_t i = 0; i < kMaxReturnAddressDepth; ++i) {
-    void* return_address = GetReturnAddress(i);
-    if (return_address == nullptr) {
-      return false;
-    }
-    SharedObjectKind kind = CodeRangeCache::GetSingleton().GetSharedObjectKind(return_address);
-    if (kind != SharedObjectKind::kRuntime) {
-      return kind == SharedObjectKind::kApexModule;
-    }
-  }
-  return false;
-}
-
 }  // namespace
 
 // Consider turning this on when there is errors which could be related to JNI array copies such as
 // things not rendering correctly. E.g. b/16858794
 static constexpr bool kWarnJniAbort = false;
 
-// Disable native JNI checking pending stack walk re-evaluation (b/136276414).
-static constexpr bool kNativeJniCheckEnabled = false;
-
 template<typename T>
 ALWAYS_INLINE static bool ShouldDenyAccessToMember(T* member, Thread* self)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  if (kNativeJniCheckEnabled && IsWhitelistedNativeCaller()) {
-    return false;
-  }
   return hiddenapi::ShouldDenyAccessToMember(
       member,
       [&]() REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -386,7 +201,7 @@
     JValue val;
     val.SetL(self->DecodeJObject(jval));
     instrumentation->FieldWriteEvent(self,
-                                     self->DecodeJObject(obj).Ptr(),
+                                     self->DecodeJObject(obj),
                                      cur_method,
                                      0,  // dex_pc is always 0 since this is a native method.
                                      field,
@@ -411,7 +226,7 @@
     }
     DCHECK(cur_method->IsNative());
     instrumentation->FieldWriteEvent(self,
-                                     self->DecodeJObject(obj).Ptr(),
+                                     self->DecodeJObject(obj),
                                      cur_method,
                                      0,  // dex_pc is always 0 since this is a native method.
                                      field,
@@ -435,7 +250,7 @@
     }
     DCHECK(cur_method->IsNative());
     instrumentation->FieldReadEvent(self,
-                                    self->DecodeJObject(obj).Ptr(),
+                                    self->DecodeJObject(obj),
                                     cur_method,
                                     0,  // dex_pc is always 0 since this is a native method.
                                     field);
@@ -466,21 +281,6 @@
   return result;
 }
 
-static void ThrowNoSuchMethodError(ScopedObjectAccess& soa,
-                                   ObjPtr<mirror::Class> c,
-                                   const char* name,
-                                   const char* sig,
-                                   const char* kind)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  std::string temp;
-  soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchMethodError;",
-                                 "no %s method \"%s.%s%s\"",
-                                 kind,
-                                 c->GetDescriptor(&temp),
-                                 name,
-                                 sig);
-}
-
 static void ReportInvalidJNINativeMethod(const ScopedObjectAccess& soa,
                                          ObjPtr<mirror::Class> c,
                                          const char* kind,
@@ -496,48 +296,20 @@
                                  idx);
 }
 
-static ObjPtr<mirror::Class> EnsureInitialized(Thread* self, ObjPtr<mirror::Class> klass)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  if (LIKELY(klass->IsInitialized())) {
-    return klass;
-  }
-  StackHandleScope<1> hs(self);
-  Handle<mirror::Class> h_klass(hs.NewHandle(klass));
-  if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_klass, true, true)) {
-    return nullptr;
-  }
-  return h_klass.Get();
-}
-
+template<bool kEnableIndexIds>
 static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class,
                               const char* name, const char* sig, bool is_static)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  ObjPtr<mirror::Class> c = EnsureInitialized(soa.Self(), soa.Decode<mirror::Class>(jni_class));
-  if (c == nullptr) {
-    return nullptr;
-  }
-  ArtMethod* method = nullptr;
-  auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
-  if (c->IsInterface()) {
-    method = c->FindInterfaceMethod(name, sig, pointer_size);
-  } else {
-    method = c->FindClassMethod(name, sig, pointer_size);
-  }
-  if (method != nullptr && ShouldDenyAccessToMember(method, soa.Self())) {
-    method = nullptr;
-  }
-  if (method == nullptr || method->IsStatic() != is_static) {
-    ThrowNoSuchMethodError(soa, c, name, sig, is_static ? "static" : "non-static");
-    return nullptr;
-  }
-  return jni::EncodeArtMethod(method);
+  return jni::EncodeArtMethod<kEnableIndexIds>(FindMethodJNI(soa, jni_class, name, sig, is_static));
 }
 
+template<bool kEnableIndexIds>
 static ObjPtr<mirror::ClassLoader> GetClassLoader(const ScopedObjectAccess& soa)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   ArtMethod* method = soa.Self()->GetCurrentMethod(nullptr);
   // If we are running Runtime.nativeLoad, use the overriding ClassLoader it set.
-  if (method == jni::DecodeArtMethod(WellKnownClasses::java_lang_Runtime_nativeLoad)) {
+  if (method ==
+      jni::DecodeArtMethod<kEnableIndexIds>(WellKnownClasses::java_lang_Runtime_nativeLoad)) {
     return soa.Decode<mirror::ClassLoader>(soa.Self()->GetClassLoaderOverride());
   }
   // If we have a method, use its ClassLoader for context.
@@ -563,9 +335,92 @@
   return nullptr;
 }
 
+template<bool kEnableIndexIds>
 static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, const char* name,
                             const char* sig, bool is_static)
     REQUIRES_SHARED(Locks::mutator_lock_) {
+  return jni::EncodeArtField<kEnableIndexIds>(FindFieldJNI(soa, jni_class, name, sig, is_static));
+}
+
+static void ThrowAIOOBE(ScopedObjectAccess& soa,
+                        ObjPtr<mirror::Array> array,
+                        jsize start,
+                        jsize length,
+                        const char* identifier)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  std::string type(array->PrettyTypeOf());
+  soa.Self()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;",
+                                 "%s offset=%d length=%d %s.length=%d",
+                                 type.c_str(), start, length, identifier, array->GetLength());
+}
+
+static void ThrowSIOOBE(ScopedObjectAccess& soa, jsize start, jsize length,
+                        jsize array_length)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  soa.Self()->ThrowNewExceptionF("Ljava/lang/StringIndexOutOfBoundsException;",
+                                 "offset=%d length=%d string.length()=%d", start, length,
+                                 array_length);
+}
+
+static void ThrowNoSuchMethodError(const ScopedObjectAccess& soa,
+                                   ObjPtr<mirror::Class> c,
+                                   const char* name,
+                                   const char* sig,
+                                   const char* kind)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  std::string temp;
+  soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchMethodError;",
+                                 "no %s method \"%s.%s%s\"",
+                                 kind,
+                                 c->GetDescriptor(&temp),
+                                 name,
+                                 sig);
+}
+
+static ObjPtr<mirror::Class> EnsureInitialized(Thread* self, ObjPtr<mirror::Class> klass)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  if (LIKELY(klass->IsInitialized())) {
+    return klass;
+  }
+  StackHandleScope<1> hs(self);
+  Handle<mirror::Class> h_klass(hs.NewHandle(klass));
+  if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_klass, true, true)) {
+    return nullptr;
+  }
+  return h_klass.Get();
+}
+
+ArtMethod* FindMethodJNI(const ScopedObjectAccess& soa,
+                         jclass jni_class,
+                         const char* name,
+                         const char* sig,
+                         bool is_static) {
+  ObjPtr<mirror::Class> c = EnsureInitialized(soa.Self(), soa.Decode<mirror::Class>(jni_class));
+  if (c == nullptr) {
+    return nullptr;
+  }
+  ArtMethod* method = nullptr;
+  auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
+  if (c->IsInterface()) {
+    method = c->FindInterfaceMethod(name, sig, pointer_size);
+  } else {
+    method = c->FindClassMethod(name, sig, pointer_size);
+  }
+  if (method != nullptr && ShouldDenyAccessToMember(method, soa.Self())) {
+    method = nullptr;
+  }
+  if (method == nullptr || method->IsStatic() != is_static) {
+    ThrowNoSuchMethodError(soa, c, name, sig, is_static ? "static" : "non-static");
+    return nullptr;
+  }
+  return method;
+}
+
+ArtField* FindFieldJNI(const ScopedObjectAccess& soa,
+                       jclass jni_class,
+                       const char* name,
+                       const char* sig,
+                       bool is_static) {
   StackHandleScope<2> hs(soa.Self());
   Handle<mirror::Class> c(
       hs.NewHandle(EnsureInitialized(soa.Self(), soa.Decode<mirror::Class>(jni_class))));
@@ -615,27 +470,7 @@
                                    sig, name, c->GetDescriptor(&temp));
     return nullptr;
   }
-  return jni::EncodeArtField(field);
-}
-
-static void ThrowAIOOBE(ScopedObjectAccess& soa,
-                        ObjPtr<mirror::Array> array,
-                        jsize start,
-                        jsize length,
-                        const char* identifier)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  std::string type(array->PrettyTypeOf());
-  soa.Self()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;",
-                                 "%s offset=%d length=%d %s.length=%d",
-                                 type.c_str(), start, length, identifier, array->GetLength());
-}
-
-static void ThrowSIOOBE(ScopedObjectAccess& soa, jsize start, jsize length,
-                        jsize array_length)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  soa.Self()->ThrowNewExceptionF("Ljava/lang/StringIndexOutOfBoundsException;",
-                                 "offset=%d length=%d string.length()=%d", start, length,
-                                 array_length);
+  return field;
 }
 
 int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobject cause)
@@ -722,6 +557,7 @@
   return nullptr;
 }
 
+template <bool kEnableIndexIds>
 class JNI {
  public:
   static jint GetVersion(JNIEnv*) {
@@ -742,7 +578,7 @@
     ObjPtr<mirror::Class> c = nullptr;
     if (runtime->IsStarted()) {
       StackHandleScope<1> hs(soa.Self());
-      Handle<mirror::ClassLoader> class_loader(hs.NewHandle(GetClassLoader(soa)));
+      Handle<mirror::ClassLoader> class_loader(hs.NewHandle(GetClassLoader<kEnableIndexIds>(soa)));
       c = class_linker->FindClass(soa.Self(), descriptor.c_str(), class_loader);
     } else {
       c = class_linker->FindSystemClass(soa.Self(), descriptor.c_str());
@@ -753,7 +589,7 @@
   static jmethodID FromReflectedMethod(JNIEnv* env, jobject jlr_method) {
     CHECK_NON_NULL_ARGUMENT(jlr_method);
     ScopedObjectAccess soa(env);
-    return jni::EncodeArtMethod(ArtMethod::FromReflectedMethod(soa, jlr_method));
+    return jni::EncodeArtMethod<kEnableIndexIds>(ArtMethod::FromReflectedMethod(soa, jlr_method));
   }
 
   static jfieldID FromReflectedField(JNIEnv* env, jobject jlr_field) {
@@ -765,7 +601,7 @@
       return nullptr;
     }
     ObjPtr<mirror::Field> field = ObjPtr<mirror::Field>::DownCast(obj_field);
-    return jni::EncodeArtField(field->GetArtField());
+    return jni::EncodeArtField<kEnableIndexIds>(field->GetArtField());
   }
 
   static jobject ToReflectedMethod(JNIEnv* env, jclass, jmethodID mid, jboolean) {
@@ -989,8 +825,8 @@
     }
     if (c->IsStringClass()) {
       gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
-      return soa.AddLocalReference<jobject>(mirror::String::AllocEmptyString<true>(soa.Self(),
-                                                                              allocator_type));
+      return soa.AddLocalReference<jobject>(
+          mirror::String::AllocEmptyString(soa.Self(), allocator_type));
     }
     return soa.AddLocalReference<jobject>(c->AllocObject(soa.Self()));
   }
@@ -1016,7 +852,7 @@
     }
     if (c->IsStringClass()) {
       // Replace calls to String.<init> with equivalent StringFactory call.
-      jmethodID sf_mid = jni::EncodeArtMethod(
+      jmethodID sf_mid = jni::EncodeArtMethod<kEnableIndexIds>(
           WellKnownClasses::StringInitToStringFactory(jni::DecodeArtMethod(mid)));
       return CallStaticObjectMethodV(env, WellKnownClasses::java_lang_StringFactory, sf_mid, args);
     }
@@ -1043,7 +879,7 @@
     }
     if (c->IsStringClass()) {
       // Replace calls to String.<init> with equivalent StringFactory call.
-      jmethodID sf_mid = jni::EncodeArtMethod(
+      jmethodID sf_mid = jni::EncodeArtMethod<kEnableIndexIds>(
           WellKnownClasses::StringInitToStringFactory(jni::DecodeArtMethod(mid)));
       return CallStaticObjectMethodA(env, WellKnownClasses::java_lang_StringFactory, sf_mid, args);
     }
@@ -1064,7 +900,7 @@
     CHECK_NON_NULL_ARGUMENT(name);
     CHECK_NON_NULL_ARGUMENT(sig);
     ScopedObjectAccess soa(env);
-    return FindMethodID(soa, java_class, name, sig, false);
+    return FindMethodID<kEnableIndexIds>(soa, java_class, name, sig, false);
   }
 
   static jmethodID GetStaticMethodID(JNIEnv* env, jclass java_class, const char* name,
@@ -1073,7 +909,7 @@
     CHECK_NON_NULL_ARGUMENT(name);
     CHECK_NON_NULL_ARGUMENT(sig);
     ScopedObjectAccess soa(env);
-    return FindMethodID(soa, java_class, name, sig, true);
+    return FindMethodID<kEnableIndexIds>(soa, java_class, name, sig, true);
   }
 
   static jobject CallObjectMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
@@ -1605,7 +1441,7 @@
     CHECK_NON_NULL_ARGUMENT(name);
     CHECK_NON_NULL_ARGUMENT(sig);
     ScopedObjectAccess soa(env);
-    return FindFieldID(soa, java_class, name, sig, false);
+    return FindFieldID<kEnableIndexIds>(soa, java_class, name, sig, false);
   }
 
   static jfieldID GetStaticFieldID(JNIEnv* env, jclass java_class, const char* name,
@@ -1614,14 +1450,14 @@
     CHECK_NON_NULL_ARGUMENT(name);
     CHECK_NON_NULL_ARGUMENT(sig);
     ScopedObjectAccess soa(env);
-    return FindFieldID(soa, java_class, name, sig, true);
+    return FindFieldID<kEnableIndexIds>(soa, java_class, name, sig, true);
   }
 
   static jobject GetObjectField(JNIEnv* env, jobject obj, jfieldID fid) {
     CHECK_NON_NULL_ARGUMENT(obj);
     CHECK_NON_NULL_ARGUMENT(fid);
     ScopedObjectAccess soa(env);
-    ArtField* f = jni::DecodeArtField(fid);
+    ArtField* f = jni::DecodeArtField<kEnableIndexIds>(fid);
     NotifyGetField(f, obj);
     ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(obj);
     return soa.AddLocalReference<jobject>(f->GetObject(o));
@@ -1630,7 +1466,7 @@
   static jobject GetStaticObjectField(JNIEnv* env, jclass, jfieldID fid) {
     CHECK_NON_NULL_ARGUMENT(fid);
     ScopedObjectAccess soa(env);
-    ArtField* f = jni::DecodeArtField(fid);
+    ArtField* f = jni::DecodeArtField<kEnableIndexIds>(fid);
     NotifyGetField(f, nullptr);
     return soa.AddLocalReference<jobject>(f->GetObject(f->GetDeclaringClass()));
   }
@@ -1639,7 +1475,7 @@
     CHECK_NON_NULL_ARGUMENT_RETURN_VOID(java_object);
     CHECK_NON_NULL_ARGUMENT_RETURN_VOID(fid);
     ScopedObjectAccess soa(env);
-    ArtField* f = jni::DecodeArtField(fid);
+    ArtField* f = jni::DecodeArtField<kEnableIndexIds>(fid);
     NotifySetObjectField(f, java_object, java_value);
     ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(java_object);
     ObjPtr<mirror::Object> v = soa.Decode<mirror::Object>(java_value);
@@ -1649,7 +1485,7 @@
   static void SetStaticObjectField(JNIEnv* env, jclass, jfieldID fid, jobject java_value) {
     CHECK_NON_NULL_ARGUMENT_RETURN_VOID(fid);
     ScopedObjectAccess soa(env);
-    ArtField* f = jni::DecodeArtField(fid);
+    ArtField* f = jni::DecodeArtField<kEnableIndexIds>(fid);
     NotifySetObjectField(f, nullptr, java_value);
     ObjPtr<mirror::Object> v = soa.Decode<mirror::Object>(java_value);
     f->SetObject<false>(f->GetDeclaringClass(), v);
@@ -1659,7 +1495,7 @@
   CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(instance); \
   CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(fid); \
   ScopedObjectAccess soa(env); \
-  ArtField* f = jni::DecodeArtField(fid); \
+  ArtField* f = jni::DecodeArtField<kEnableIndexIds>(fid); \
   NotifyGetField(f, instance); \
   ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(instance); \
   return f->Get ##fn (o)
@@ -1667,7 +1503,7 @@
 #define GET_STATIC_PRIMITIVE_FIELD(fn) \
   CHECK_NON_NULL_ARGUMENT_RETURN_ZERO(fid); \
   ScopedObjectAccess soa(env); \
-  ArtField* f = jni::DecodeArtField(fid); \
+  ArtField* f = jni::DecodeArtField<kEnableIndexIds>(fid); \
   NotifyGetField(f, nullptr); \
   return f->Get ##fn (f->GetDeclaringClass())
 
@@ -1675,7 +1511,7 @@
   CHECK_NON_NULL_ARGUMENT_RETURN_VOID(instance); \
   CHECK_NON_NULL_ARGUMENT_RETURN_VOID(fid); \
   ScopedObjectAccess soa(env); \
-  ArtField* f = jni::DecodeArtField(fid); \
+  ArtField* f = jni::DecodeArtField<kEnableIndexIds>(fid); \
   NotifySetPrimitiveField(f, instance, JValue::FromPrimitive<decltype(value)>(value)); \
   ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(instance); \
   f->Set ##fn <false>(o, value)
@@ -1683,7 +1519,7 @@
 #define SET_STATIC_PRIMITIVE_FIELD(fn, value) \
   CHECK_NON_NULL_ARGUMENT_RETURN_VOID(fid); \
   ScopedObjectAccess soa(env); \
-  ArtField* f = jni::DecodeArtField(fid); \
+  ArtField* f = jni::DecodeArtField<kEnableIndexIds>(fid); \
   NotifySetPrimitiveField(f, nullptr, JValue::FromPrimitive<decltype(value)>(value)); \
   f->Set ##fn <false>(f->GetDeclaringClass(), value)
 
@@ -3016,244 +2852,253 @@
   }
 };
 
-const JNINativeInterface gJniNativeInterface = {
-  nullptr,  // reserved0.
-  nullptr,  // reserved1.
-  nullptr,  // reserved2.
-  nullptr,  // reserved3.
-  JNI::GetVersion,
-  JNI::DefineClass,
-  JNI::FindClass,
-  JNI::FromReflectedMethod,
-  JNI::FromReflectedField,
-  JNI::ToReflectedMethod,
-  JNI::GetSuperclass,
-  JNI::IsAssignableFrom,
-  JNI::ToReflectedField,
-  JNI::Throw,
-  JNI::ThrowNew,
-  JNI::ExceptionOccurred,
-  JNI::ExceptionDescribe,
-  JNI::ExceptionClear,
-  JNI::FatalError,
-  JNI::PushLocalFrame,
-  JNI::PopLocalFrame,
-  JNI::NewGlobalRef,
-  JNI::DeleteGlobalRef,
-  JNI::DeleteLocalRef,
-  JNI::IsSameObject,
-  JNI::NewLocalRef,
-  JNI::EnsureLocalCapacity,
-  JNI::AllocObject,
-  JNI::NewObject,
-  JNI::NewObjectV,
-  JNI::NewObjectA,
-  JNI::GetObjectClass,
-  JNI::IsInstanceOf,
-  JNI::GetMethodID,
-  JNI::CallObjectMethod,
-  JNI::CallObjectMethodV,
-  JNI::CallObjectMethodA,
-  JNI::CallBooleanMethod,
-  JNI::CallBooleanMethodV,
-  JNI::CallBooleanMethodA,
-  JNI::CallByteMethod,
-  JNI::CallByteMethodV,
-  JNI::CallByteMethodA,
-  JNI::CallCharMethod,
-  JNI::CallCharMethodV,
-  JNI::CallCharMethodA,
-  JNI::CallShortMethod,
-  JNI::CallShortMethodV,
-  JNI::CallShortMethodA,
-  JNI::CallIntMethod,
-  JNI::CallIntMethodV,
-  JNI::CallIntMethodA,
-  JNI::CallLongMethod,
-  JNI::CallLongMethodV,
-  JNI::CallLongMethodA,
-  JNI::CallFloatMethod,
-  JNI::CallFloatMethodV,
-  JNI::CallFloatMethodA,
-  JNI::CallDoubleMethod,
-  JNI::CallDoubleMethodV,
-  JNI::CallDoubleMethodA,
-  JNI::CallVoidMethod,
-  JNI::CallVoidMethodV,
-  JNI::CallVoidMethodA,
-  JNI::CallNonvirtualObjectMethod,
-  JNI::CallNonvirtualObjectMethodV,
-  JNI::CallNonvirtualObjectMethodA,
-  JNI::CallNonvirtualBooleanMethod,
-  JNI::CallNonvirtualBooleanMethodV,
-  JNI::CallNonvirtualBooleanMethodA,
-  JNI::CallNonvirtualByteMethod,
-  JNI::CallNonvirtualByteMethodV,
-  JNI::CallNonvirtualByteMethodA,
-  JNI::CallNonvirtualCharMethod,
-  JNI::CallNonvirtualCharMethodV,
-  JNI::CallNonvirtualCharMethodA,
-  JNI::CallNonvirtualShortMethod,
-  JNI::CallNonvirtualShortMethodV,
-  JNI::CallNonvirtualShortMethodA,
-  JNI::CallNonvirtualIntMethod,
-  JNI::CallNonvirtualIntMethodV,
-  JNI::CallNonvirtualIntMethodA,
-  JNI::CallNonvirtualLongMethod,
-  JNI::CallNonvirtualLongMethodV,
-  JNI::CallNonvirtualLongMethodA,
-  JNI::CallNonvirtualFloatMethod,
-  JNI::CallNonvirtualFloatMethodV,
-  JNI::CallNonvirtualFloatMethodA,
-  JNI::CallNonvirtualDoubleMethod,
-  JNI::CallNonvirtualDoubleMethodV,
-  JNI::CallNonvirtualDoubleMethodA,
-  JNI::CallNonvirtualVoidMethod,
-  JNI::CallNonvirtualVoidMethodV,
-  JNI::CallNonvirtualVoidMethodA,
-  JNI::GetFieldID,
-  JNI::GetObjectField,
-  JNI::GetBooleanField,
-  JNI::GetByteField,
-  JNI::GetCharField,
-  JNI::GetShortField,
-  JNI::GetIntField,
-  JNI::GetLongField,
-  JNI::GetFloatField,
-  JNI::GetDoubleField,
-  JNI::SetObjectField,
-  JNI::SetBooleanField,
-  JNI::SetByteField,
-  JNI::SetCharField,
-  JNI::SetShortField,
-  JNI::SetIntField,
-  JNI::SetLongField,
-  JNI::SetFloatField,
-  JNI::SetDoubleField,
-  JNI::GetStaticMethodID,
-  JNI::CallStaticObjectMethod,
-  JNI::CallStaticObjectMethodV,
-  JNI::CallStaticObjectMethodA,
-  JNI::CallStaticBooleanMethod,
-  JNI::CallStaticBooleanMethodV,
-  JNI::CallStaticBooleanMethodA,
-  JNI::CallStaticByteMethod,
-  JNI::CallStaticByteMethodV,
-  JNI::CallStaticByteMethodA,
-  JNI::CallStaticCharMethod,
-  JNI::CallStaticCharMethodV,
-  JNI::CallStaticCharMethodA,
-  JNI::CallStaticShortMethod,
-  JNI::CallStaticShortMethodV,
-  JNI::CallStaticShortMethodA,
-  JNI::CallStaticIntMethod,
-  JNI::CallStaticIntMethodV,
-  JNI::CallStaticIntMethodA,
-  JNI::CallStaticLongMethod,
-  JNI::CallStaticLongMethodV,
-  JNI::CallStaticLongMethodA,
-  JNI::CallStaticFloatMethod,
-  JNI::CallStaticFloatMethodV,
-  JNI::CallStaticFloatMethodA,
-  JNI::CallStaticDoubleMethod,
-  JNI::CallStaticDoubleMethodV,
-  JNI::CallStaticDoubleMethodA,
-  JNI::CallStaticVoidMethod,
-  JNI::CallStaticVoidMethodV,
-  JNI::CallStaticVoidMethodA,
-  JNI::GetStaticFieldID,
-  JNI::GetStaticObjectField,
-  JNI::GetStaticBooleanField,
-  JNI::GetStaticByteField,
-  JNI::GetStaticCharField,
-  JNI::GetStaticShortField,
-  JNI::GetStaticIntField,
-  JNI::GetStaticLongField,
-  JNI::GetStaticFloatField,
-  JNI::GetStaticDoubleField,
-  JNI::SetStaticObjectField,
-  JNI::SetStaticBooleanField,
-  JNI::SetStaticByteField,
-  JNI::SetStaticCharField,
-  JNI::SetStaticShortField,
-  JNI::SetStaticIntField,
-  JNI::SetStaticLongField,
-  JNI::SetStaticFloatField,
-  JNI::SetStaticDoubleField,
-  JNI::NewString,
-  JNI::GetStringLength,
-  JNI::GetStringChars,
-  JNI::ReleaseStringChars,
-  JNI::NewStringUTF,
-  JNI::GetStringUTFLength,
-  JNI::GetStringUTFChars,
-  JNI::ReleaseStringUTFChars,
-  JNI::GetArrayLength,
-  JNI::NewObjectArray,
-  JNI::GetObjectArrayElement,
-  JNI::SetObjectArrayElement,
-  JNI::NewBooleanArray,
-  JNI::NewByteArray,
-  JNI::NewCharArray,
-  JNI::NewShortArray,
-  JNI::NewIntArray,
-  JNI::NewLongArray,
-  JNI::NewFloatArray,
-  JNI::NewDoubleArray,
-  JNI::GetBooleanArrayElements,
-  JNI::GetByteArrayElements,
-  JNI::GetCharArrayElements,
-  JNI::GetShortArrayElements,
-  JNI::GetIntArrayElements,
-  JNI::GetLongArrayElements,
-  JNI::GetFloatArrayElements,
-  JNI::GetDoubleArrayElements,
-  JNI::ReleaseBooleanArrayElements,
-  JNI::ReleaseByteArrayElements,
-  JNI::ReleaseCharArrayElements,
-  JNI::ReleaseShortArrayElements,
-  JNI::ReleaseIntArrayElements,
-  JNI::ReleaseLongArrayElements,
-  JNI::ReleaseFloatArrayElements,
-  JNI::ReleaseDoubleArrayElements,
-  JNI::GetBooleanArrayRegion,
-  JNI::GetByteArrayRegion,
-  JNI::GetCharArrayRegion,
-  JNI::GetShortArrayRegion,
-  JNI::GetIntArrayRegion,
-  JNI::GetLongArrayRegion,
-  JNI::GetFloatArrayRegion,
-  JNI::GetDoubleArrayRegion,
-  JNI::SetBooleanArrayRegion,
-  JNI::SetByteArrayRegion,
-  JNI::SetCharArrayRegion,
-  JNI::SetShortArrayRegion,
-  JNI::SetIntArrayRegion,
-  JNI::SetLongArrayRegion,
-  JNI::SetFloatArrayRegion,
-  JNI::SetDoubleArrayRegion,
-  JNI::RegisterNatives,
-  JNI::UnregisterNatives,
-  JNI::MonitorEnter,
-  JNI::MonitorExit,
-  JNI::GetJavaVM,
-  JNI::GetStringRegion,
-  JNI::GetStringUTFRegion,
-  JNI::GetPrimitiveArrayCritical,
-  JNI::ReleasePrimitiveArrayCritical,
-  JNI::GetStringCritical,
-  JNI::ReleaseStringCritical,
-  JNI::NewWeakGlobalRef,
-  JNI::DeleteWeakGlobalRef,
-  JNI::ExceptionCheck,
-  JNI::NewDirectByteBuffer,
-  JNI::GetDirectBufferAddress,
-  JNI::GetDirectBufferCapacity,
-  JNI::GetObjectRefType,
+template<bool kEnableIndexIds>
+struct JniNativeInterfaceFunctions {
+  using JNIImpl = JNI<kEnableIndexIds>;
+  static constexpr JNINativeInterface gJniNativeInterface = {
+    nullptr,  // reserved0.
+    nullptr,  // reserved1.
+    nullptr,  // reserved2.
+    nullptr,  // reserved3.
+    JNIImpl::GetVersion,
+    JNIImpl::DefineClass,
+    JNIImpl::FindClass,
+    JNIImpl::FromReflectedMethod,
+    JNIImpl::FromReflectedField,
+    JNIImpl::ToReflectedMethod,
+    JNIImpl::GetSuperclass,
+    JNIImpl::IsAssignableFrom,
+    JNIImpl::ToReflectedField,
+    JNIImpl::Throw,
+    JNIImpl::ThrowNew,
+    JNIImpl::ExceptionOccurred,
+    JNIImpl::ExceptionDescribe,
+    JNIImpl::ExceptionClear,
+    JNIImpl::FatalError,
+    JNIImpl::PushLocalFrame,
+    JNIImpl::PopLocalFrame,
+    JNIImpl::NewGlobalRef,
+    JNIImpl::DeleteGlobalRef,
+    JNIImpl::DeleteLocalRef,
+    JNIImpl::IsSameObject,
+    JNIImpl::NewLocalRef,
+    JNIImpl::EnsureLocalCapacity,
+    JNIImpl::AllocObject,
+    JNIImpl::NewObject,
+    JNIImpl::NewObjectV,
+    JNIImpl::NewObjectA,
+    JNIImpl::GetObjectClass,
+    JNIImpl::IsInstanceOf,
+    JNIImpl::GetMethodID,
+    JNIImpl::CallObjectMethod,
+    JNIImpl::CallObjectMethodV,
+    JNIImpl::CallObjectMethodA,
+    JNIImpl::CallBooleanMethod,
+    JNIImpl::CallBooleanMethodV,
+    JNIImpl::CallBooleanMethodA,
+    JNIImpl::CallByteMethod,
+    JNIImpl::CallByteMethodV,
+    JNIImpl::CallByteMethodA,
+    JNIImpl::CallCharMethod,
+    JNIImpl::CallCharMethodV,
+    JNIImpl::CallCharMethodA,
+    JNIImpl::CallShortMethod,
+    JNIImpl::CallShortMethodV,
+    JNIImpl::CallShortMethodA,
+    JNIImpl::CallIntMethod,
+    JNIImpl::CallIntMethodV,
+    JNIImpl::CallIntMethodA,
+    JNIImpl::CallLongMethod,
+    JNIImpl::CallLongMethodV,
+    JNIImpl::CallLongMethodA,
+    JNIImpl::CallFloatMethod,
+    JNIImpl::CallFloatMethodV,
+    JNIImpl::CallFloatMethodA,
+    JNIImpl::CallDoubleMethod,
+    JNIImpl::CallDoubleMethodV,
+    JNIImpl::CallDoubleMethodA,
+    JNIImpl::CallVoidMethod,
+    JNIImpl::CallVoidMethodV,
+    JNIImpl::CallVoidMethodA,
+    JNIImpl::CallNonvirtualObjectMethod,
+    JNIImpl::CallNonvirtualObjectMethodV,
+    JNIImpl::CallNonvirtualObjectMethodA,
+    JNIImpl::CallNonvirtualBooleanMethod,
+    JNIImpl::CallNonvirtualBooleanMethodV,
+    JNIImpl::CallNonvirtualBooleanMethodA,
+    JNIImpl::CallNonvirtualByteMethod,
+    JNIImpl::CallNonvirtualByteMethodV,
+    JNIImpl::CallNonvirtualByteMethodA,
+    JNIImpl::CallNonvirtualCharMethod,
+    JNIImpl::CallNonvirtualCharMethodV,
+    JNIImpl::CallNonvirtualCharMethodA,
+    JNIImpl::CallNonvirtualShortMethod,
+    JNIImpl::CallNonvirtualShortMethodV,
+    JNIImpl::CallNonvirtualShortMethodA,
+    JNIImpl::CallNonvirtualIntMethod,
+    JNIImpl::CallNonvirtualIntMethodV,
+    JNIImpl::CallNonvirtualIntMethodA,
+    JNIImpl::CallNonvirtualLongMethod,
+    JNIImpl::CallNonvirtualLongMethodV,
+    JNIImpl::CallNonvirtualLongMethodA,
+    JNIImpl::CallNonvirtualFloatMethod,
+    JNIImpl::CallNonvirtualFloatMethodV,
+    JNIImpl::CallNonvirtualFloatMethodA,
+    JNIImpl::CallNonvirtualDoubleMethod,
+    JNIImpl::CallNonvirtualDoubleMethodV,
+    JNIImpl::CallNonvirtualDoubleMethodA,
+    JNIImpl::CallNonvirtualVoidMethod,
+    JNIImpl::CallNonvirtualVoidMethodV,
+    JNIImpl::CallNonvirtualVoidMethodA,
+    JNIImpl::GetFieldID,
+    JNIImpl::GetObjectField,
+    JNIImpl::GetBooleanField,
+    JNIImpl::GetByteField,
+    JNIImpl::GetCharField,
+    JNIImpl::GetShortField,
+    JNIImpl::GetIntField,
+    JNIImpl::GetLongField,
+    JNIImpl::GetFloatField,
+    JNIImpl::GetDoubleField,
+    JNIImpl::SetObjectField,
+    JNIImpl::SetBooleanField,
+    JNIImpl::SetByteField,
+    JNIImpl::SetCharField,
+    JNIImpl::SetShortField,
+    JNIImpl::SetIntField,
+    JNIImpl::SetLongField,
+    JNIImpl::SetFloatField,
+    JNIImpl::SetDoubleField,
+    JNIImpl::GetStaticMethodID,
+    JNIImpl::CallStaticObjectMethod,
+    JNIImpl::CallStaticObjectMethodV,
+    JNIImpl::CallStaticObjectMethodA,
+    JNIImpl::CallStaticBooleanMethod,
+    JNIImpl::CallStaticBooleanMethodV,
+    JNIImpl::CallStaticBooleanMethodA,
+    JNIImpl::CallStaticByteMethod,
+    JNIImpl::CallStaticByteMethodV,
+    JNIImpl::CallStaticByteMethodA,
+    JNIImpl::CallStaticCharMethod,
+    JNIImpl::CallStaticCharMethodV,
+    JNIImpl::CallStaticCharMethodA,
+    JNIImpl::CallStaticShortMethod,
+    JNIImpl::CallStaticShortMethodV,
+    JNIImpl::CallStaticShortMethodA,
+    JNIImpl::CallStaticIntMethod,
+    JNIImpl::CallStaticIntMethodV,
+    JNIImpl::CallStaticIntMethodA,
+    JNIImpl::CallStaticLongMethod,
+    JNIImpl::CallStaticLongMethodV,
+    JNIImpl::CallStaticLongMethodA,
+    JNIImpl::CallStaticFloatMethod,
+    JNIImpl::CallStaticFloatMethodV,
+    JNIImpl::CallStaticFloatMethodA,
+    JNIImpl::CallStaticDoubleMethod,
+    JNIImpl::CallStaticDoubleMethodV,
+    JNIImpl::CallStaticDoubleMethodA,
+    JNIImpl::CallStaticVoidMethod,
+    JNIImpl::CallStaticVoidMethodV,
+    JNIImpl::CallStaticVoidMethodA,
+    JNIImpl::GetStaticFieldID,
+    JNIImpl::GetStaticObjectField,
+    JNIImpl::GetStaticBooleanField,
+    JNIImpl::GetStaticByteField,
+    JNIImpl::GetStaticCharField,
+    JNIImpl::GetStaticShortField,
+    JNIImpl::GetStaticIntField,
+    JNIImpl::GetStaticLongField,
+    JNIImpl::GetStaticFloatField,
+    JNIImpl::GetStaticDoubleField,
+    JNIImpl::SetStaticObjectField,
+    JNIImpl::SetStaticBooleanField,
+    JNIImpl::SetStaticByteField,
+    JNIImpl::SetStaticCharField,
+    JNIImpl::SetStaticShortField,
+    JNIImpl::SetStaticIntField,
+    JNIImpl::SetStaticLongField,
+    JNIImpl::SetStaticFloatField,
+    JNIImpl::SetStaticDoubleField,
+    JNIImpl::NewString,
+    JNIImpl::GetStringLength,
+    JNIImpl::GetStringChars,
+    JNIImpl::ReleaseStringChars,
+    JNIImpl::NewStringUTF,
+    JNIImpl::GetStringUTFLength,
+    JNIImpl::GetStringUTFChars,
+    JNIImpl::ReleaseStringUTFChars,
+    JNIImpl::GetArrayLength,
+    JNIImpl::NewObjectArray,
+    JNIImpl::GetObjectArrayElement,
+    JNIImpl::SetObjectArrayElement,
+    JNIImpl::NewBooleanArray,
+    JNIImpl::NewByteArray,
+    JNIImpl::NewCharArray,
+    JNIImpl::NewShortArray,
+    JNIImpl::NewIntArray,
+    JNIImpl::NewLongArray,
+    JNIImpl::NewFloatArray,
+    JNIImpl::NewDoubleArray,
+    JNIImpl::GetBooleanArrayElements,
+    JNIImpl::GetByteArrayElements,
+    JNIImpl::GetCharArrayElements,
+    JNIImpl::GetShortArrayElements,
+    JNIImpl::GetIntArrayElements,
+    JNIImpl::GetLongArrayElements,
+    JNIImpl::GetFloatArrayElements,
+    JNIImpl::GetDoubleArrayElements,
+    JNIImpl::ReleaseBooleanArrayElements,
+    JNIImpl::ReleaseByteArrayElements,
+    JNIImpl::ReleaseCharArrayElements,
+    JNIImpl::ReleaseShortArrayElements,
+    JNIImpl::ReleaseIntArrayElements,
+    JNIImpl::ReleaseLongArrayElements,
+    JNIImpl::ReleaseFloatArrayElements,
+    JNIImpl::ReleaseDoubleArrayElements,
+    JNIImpl::GetBooleanArrayRegion,
+    JNIImpl::GetByteArrayRegion,
+    JNIImpl::GetCharArrayRegion,
+    JNIImpl::GetShortArrayRegion,
+    JNIImpl::GetIntArrayRegion,
+    JNIImpl::GetLongArrayRegion,
+    JNIImpl::GetFloatArrayRegion,
+    JNIImpl::GetDoubleArrayRegion,
+    JNIImpl::SetBooleanArrayRegion,
+    JNIImpl::SetByteArrayRegion,
+    JNIImpl::SetCharArrayRegion,
+    JNIImpl::SetShortArrayRegion,
+    JNIImpl::SetIntArrayRegion,
+    JNIImpl::SetLongArrayRegion,
+    JNIImpl::SetFloatArrayRegion,
+    JNIImpl::SetDoubleArrayRegion,
+    JNIImpl::RegisterNatives,
+    JNIImpl::UnregisterNatives,
+    JNIImpl::MonitorEnter,
+    JNIImpl::MonitorExit,
+    JNIImpl::GetJavaVM,
+    JNIImpl::GetStringRegion,
+    JNIImpl::GetStringUTFRegion,
+    JNIImpl::GetPrimitiveArrayCritical,
+    JNIImpl::ReleasePrimitiveArrayCritical,
+    JNIImpl::GetStringCritical,
+    JNIImpl::ReleaseStringCritical,
+    JNIImpl::NewWeakGlobalRef,
+    JNIImpl::DeleteWeakGlobalRef,
+    JNIImpl::ExceptionCheck,
+    JNIImpl::NewDirectByteBuffer,
+    JNIImpl::GetDirectBufferAddress,
+    JNIImpl::GetDirectBufferCapacity,
+    JNIImpl::GetObjectRefType,
+  };
 };
 
 const JNINativeInterface* GetJniNativeInterface() {
-  return &gJniNativeInterface;
+  // The template argument is passed down through the Encode/DecodeArtMethod/Field calls so if
+  // JniIdType is kPointer the calls will be a simple cast with no branches. This ensures that
+  // the normal case is still fast.
+  return Runtime::Current()->GetJniIdType() == JniIdType::kPointer
+             ? &JniNativeInterfaceFunctions<false>::gJniNativeInterface
+             : &JniNativeInterfaceFunctions<true>::gJniNativeInterface;
 }
 
 void (*gJniSleepForeverStub[])()  = {
@@ -3496,16 +3341,6 @@
   return reinterpret_cast<JNINativeInterface*>(&gJniSleepForeverStub);
 }
 
-void JniInitializeNativeCallerCheck() {
-  // This method should be called only once and before there are multiple runtime threads.
-  DCHECK(!CodeRangeCache::GetSingleton().HasCache());
-  CodeRangeCache::GetSingleton().BuildCache();
-}
-
-void JniShutdownNativeCallerCheck() {
-  CodeRangeCache::GetSingleton().DropCache();
-}
-
 }  // namespace art
 
 std::ostream& operator<<(std::ostream& os, const jobjectRefType& rhs) {
diff --git a/runtime/jni/jni_internal.h b/runtime/jni/jni_internal.h
index 4359074..1616ee5 100644
--- a/runtime/jni/jni_internal.h
+++ b/runtime/jni/jni_internal.h
@@ -20,12 +20,18 @@
 #include <jni.h>
 #include <iosfwd>
 
+#include "base/locks.h"
 #include "base/macros.h"
+#include "reflective_handle.h"
+#include "reflective_handle_scope.h"
+#include "runtime.h"
+#include "thread.h"
 
 namespace art {
 
 class ArtField;
 class ArtMethod;
+class ScopedObjectAccess;
 
 const JNINativeInterface* GetJniNativeInterface();
 const JNINativeInterface* GetRuntimeShutdownNativeInterface();
@@ -39,26 +45,100 @@
 // Removes native stack checking state.
 void JniShutdownNativeCallerCheck();
 
+// Finds the method using JNI semantics and initializes any classes. Does not encode the method in a
+// JNI id
+ArtMethod* FindMethodJNI(const ScopedObjectAccess& soa,
+                         jclass java_class,
+                         const char* name,
+                         const char* sig,
+                         bool is_static) REQUIRES_SHARED(Locks::mutator_lock_);
+
+// Finds the field using JNI semantics and initializes any classes. Does not encode the method in a
+// JNI id.
+ArtField* FindFieldJNI(const ScopedObjectAccess& soa,
+                       jclass java_class,
+                       const char* name,
+                       const char* sig,
+                       bool is_static) REQUIRES_SHARED(Locks::mutator_lock_);
+
 namespace jni {
 
+// We want to maintain a branchless fast-path for performance reasons. The JniIdManager is the
+// ultimate source of truth for how the IDs are handed out but we inline the normal non-index cases
+// here.
+
+template <bool kEnableIndexIds>
+ALWAYS_INLINE
+static bool IsIndexId(jmethodID mid) {
+  return kEnableIndexIds && ((reinterpret_cast<uintptr_t>(mid) % 2) != 0);
+}
+
+template <bool kEnableIndexIds>
+ALWAYS_INLINE
+static bool IsIndexId(jfieldID fid) {
+  return kEnableIndexIds && ((reinterpret_cast<uintptr_t>(fid) % 2) != 0);
+}
+
+template <bool kEnableIndexIds = true>
 ALWAYS_INLINE
 static inline ArtField* DecodeArtField(jfieldID fid) {
-  return reinterpret_cast<ArtField*>(fid);
+  if (IsIndexId<kEnableIndexIds>(fid)) {
+    return Runtime::Current()->GetJniIdManager()->DecodeFieldId(fid);
+  } else {
+    return reinterpret_cast<ArtField*>(fid);
+  }
 }
 
+template <bool kEnableIndexIds = true>
+ALWAYS_INLINE static inline jfieldID EncodeArtField(ReflectiveHandle<ArtField> field)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  if (kEnableIndexIds && Runtime::Current()->GetJniIdType() != JniIdType::kPointer) {
+    return Runtime::Current()->GetJniIdManager()->EncodeFieldId(field);
+  } else {
+    return reinterpret_cast<jfieldID>(field.Get());
+  }
+}
+
+template <bool kEnableIndexIds = true>
 ALWAYS_INLINE
-static inline jfieldID EncodeArtField(ArtField* field) {
-  return reinterpret_cast<jfieldID>(field);
+static inline jfieldID EncodeArtField(ArtField* field) REQUIRES_SHARED(Locks::mutator_lock_)  {
+  if (kEnableIndexIds && Runtime::Current()->GetJniIdType() != JniIdType::kPointer) {
+    return Runtime::Current()->GetJniIdManager()->EncodeFieldId(field);
+  } else {
+    return reinterpret_cast<jfieldID>(field);
+  }
 }
 
+template <bool kEnableIndexIds = true>
 ALWAYS_INLINE
-static inline jmethodID EncodeArtMethod(ArtMethod* art_method) {
-  return reinterpret_cast<jmethodID>(art_method);
+static inline jmethodID EncodeArtMethod(ReflectiveHandle<ArtMethod> art_method)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  if (kEnableIndexIds && Runtime::Current()->GetJniIdType() != JniIdType::kPointer) {
+    return Runtime::Current()->GetJniIdManager()->EncodeMethodId(art_method);
+  } else {
+    return reinterpret_cast<jmethodID>(art_method.Get());
+  }
 }
 
+template <bool kEnableIndexIds = true>
+ALWAYS_INLINE
+static inline jmethodID EncodeArtMethod(ArtMethod* art_method)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  if (kEnableIndexIds && Runtime::Current()->GetJniIdType() != JniIdType::kPointer) {
+    return Runtime::Current()->GetJniIdManager()->EncodeMethodId(art_method);
+  } else {
+    return reinterpret_cast<jmethodID>(art_method);
+  }
+}
+
+template <bool kEnableIndexIds = true>
 ALWAYS_INLINE
 static inline ArtMethod* DecodeArtMethod(jmethodID method_id) {
-  return reinterpret_cast<ArtMethod*>(method_id);
+  if (IsIndexId<kEnableIndexIds>(method_id)) {
+    return Runtime::Current()->GetJniIdManager()->DecodeMethodId(method_id);
+  } else {
+    return reinterpret_cast<ArtMethod*>(method_id);
+  }
 }
 
 }  // namespace jni
diff --git a/runtime/jni_id_type.h b/runtime/jni_id_type.h
new file mode 100644
index 0000000..3f952b6
--- /dev/null
+++ b/runtime/jni_id_type.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JNI_ID_TYPE_H_
+#define ART_RUNTIME_JNI_ID_TYPE_H_
+
+#include <iosfwd>
+
+namespace art {
+
+enum class JniIdType {
+  // All Jni method/field IDs are pointers to the corresponding Art{Field,Method} type
+  kPointer,
+
+  // All Jni method/field IDs are indices into a table.
+  kIndices,
+
+  // All Jni method/field IDs are pointers to the corresponding Art{Field,Method} type but we
+  // keep around extra information support changing modes to either kPointer or kIndices later.
+  kSwapablePointer,
+
+  kDefault = kPointer,
+};
+
+std::ostream& operator<<(std::ostream& os, const JniIdType& rhs);
+
+}  // namespace art
+#endif  // ART_RUNTIME_JNI_ID_TYPE_H_
diff --git a/runtime/lock_word.h b/runtime/lock_word.h
index ac7890c..30559a0 100644
--- a/runtime/lock_word.h
+++ b/runtime/lock_word.h
@@ -42,6 +42,7 @@
  *  |10|9|8|765432109876|5432109876543210|
  *  |00|m|r| lock count |thread id owner |
  *
+ * The lock count is zero, but the owner is nonzero for a simply held lock.
  * When the lock word is in the "fat" state and its bits are formatted as follows:
  *
  *  |33|2|2|2222222211111111110000000000|
@@ -72,7 +73,8 @@
     kMarkBitStateSize = 1,
     // Number of bits to encode the thin lock owner.
     kThinLockOwnerSize = 16,
-    // Remaining bits are the recursive lock count.
+    // Remaining bits are the recursive lock count. Zero means it is locked exactly once
+    // and not recursively.
     kThinLockCountSize = 32 - kThinLockOwnerSize - kStateSize - kReadBarrierStateSize -
         kMarkBitStateSize,
 
@@ -234,7 +236,8 @@
   // Return the owner thin lock thread id.
   uint32_t ThinLockOwner() const;
 
-  // Return the number of times a lock value has been locked.
+  // Return the number of times a lock value has been re-locked. Only valid in thin-locked state.
+  // If the lock is held only once the return value is zero.
   uint32_t ThinLockCount() const;
 
   // Return the Monitor encoded in a fat lock.
diff --git a/runtime/method_handles.cc b/runtime/method_handles.cc
index 5471d38..2dc9f67 100644
--- a/runtime/method_handles.cc
+++ b/runtime/method_handles.cc
@@ -20,6 +20,7 @@
 
 #include "class_root.h"
 #include "common_dex_operations.h"
+#include "common_throws.h"
 #include "interpreter/shadow_frame-inl.h"
 #include "jvalue-inl.h"
 #include "mirror/class-inl.h"
@@ -418,6 +419,7 @@
 static inline bool MethodHandleInvokeMethod(ArtMethod* called_method,
                                             Handle<mirror::MethodType> callsite_type,
                                             Handle<mirror::MethodType> target_type,
+                                            Handle<mirror::MethodType> nominal_type,
                                             Thread* self,
                                             ShadowFrame& shadow_frame,
                                             const InstructionOperands* const operands,
@@ -542,6 +544,11 @@
     return false;
   }
 
+  if (nominal_type != nullptr) {
+    return ConvertReturnValue(nominal_type, target_type, result) &&
+        ConvertReturnValue(callsite_type, nominal_type, result);
+  }
+
   return ConvertReturnValue(callsite_type, target_type, result);
 }
 
@@ -685,7 +692,14 @@
     if (referrer_class == declaring_class) {
       return target_method;
     }
-    if (!declaring_class->IsInterface()) {
+    if (declaring_class->IsInterface()) {
+      if (target_method->IsAbstract()) {
+        std::string msg =
+            "Method " + target_method->PrettyMethod() + " is abstract interface method!";
+        ThrowIllegalAccessException(msg.c_str());
+        return nullptr;
+      }
+    } else {
       ObjPtr<mirror::Class> super_class = referrer_class->GetSuperClass();
       uint16_t vtable_index = target_method->GetMethodIndex();
       DCHECK(super_class != nullptr);
@@ -706,8 +720,9 @@
                                const InstructionOperands* const operands,
                                JValue* result)
   REQUIRES_SHARED(Locks::mutator_lock_) {
-  StackHandleScope<1> hs(self);
+  StackHandleScope<2> hs(self);
   Handle<mirror::MethodType> handle_type(hs.NewHandle(method_handle->GetMethodType()));
+  Handle<mirror::MethodType> nominal_handle_type(hs.NewHandle(method_handle->GetNominalType()));
   const mirror::MethodHandle::Kind handle_kind = method_handle->GetHandleKind();
   DCHECK(IsInvoke(handle_kind));
 
@@ -753,6 +768,7 @@
     return MethodHandleInvokeMethod(called_method,
                                     callsite_type,
                                     handle_type,
+                                    nominal_handle_type,
                                     self,
                                     shadow_frame,
                                     operands,
diff --git a/runtime/mirror/array-alloc-inl.h b/runtime/mirror/array-alloc-inl.h
index 2ae4cab..c1e0175 100644
--- a/runtime/mirror/array-alloc-inl.h
+++ b/runtime/mirror/array-alloc-inl.h
@@ -25,6 +25,7 @@
 #include "base/bit_utils.h"
 #include "base/casts.h"
 #include "class.h"
+#include "gc/allocator_type.h"
 #include "gc/heap-inl.h"
 #include "obj_ptr-inl.h"
 #include "runtime.h"
@@ -143,14 +144,14 @@
   if (!kFillUsable) {
     SetLengthVisitor visitor(component_count);
     result = ObjPtr<Array>::DownCast(
-        heap->AllocObjectWithAllocator<kIsInstrumented, true>(
+        heap->AllocObjectWithAllocator<kIsInstrumented>(
             self, array_class, size, allocator_type, visitor));
   } else {
     SetLengthToUsableSizeVisitor visitor(component_count,
                                          DataOffset(1U << component_size_shift).SizeValue(),
                                          component_size_shift);
     result = ObjPtr<Array>::DownCast(
-        heap->AllocObjectWithAllocator<kIsInstrumented, true>(
+        heap->AllocObjectWithAllocator<kIsInstrumented>(
             self, array_class, size, allocator_type, visitor));
   }
   if (kIsDebugBuild && result != nullptr && Runtime::Current()->IsStarted()) {
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 34925f5..3f1cb16 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -227,22 +227,30 @@
 
 template<typename T, PointerSize kPointerSize, VerifyObjectFlags kVerifyFlags>
 inline T PointerArray::GetElementPtrSize(uint32_t idx) {
-  // C style casts here since we sometimes have T be a pointer, or sometimes an integer
-  // (for stack traces).
   if (kPointerSize == PointerSize::k64) {
-    return (T)static_cast<uintptr_t>(AsLongArray<kVerifyFlags>()->GetWithoutChecks(idx));
+    DCHECK(IsLongArray<kVerifyFlags>());
+  } else {
+    DCHECK(IsIntArray<kVerifyFlags>());
   }
-  return (T)static_cast<uintptr_t>(AsIntArray<kVerifyFlags>()->GetWithoutChecks(idx));
+  return GetElementPtrSizeUnchecked<T, kPointerSize, kVerifyFlags>(idx);
 }
+
 template<typename T, PointerSize kPointerSize, VerifyObjectFlags kVerifyFlags>
 inline T PointerArray::GetElementPtrSizeUnchecked(uint32_t idx) {
   // C style casts here since we sometimes have T be a pointer, or sometimes an integer
   // (for stack traces).
+  using ConversionType = typename std::conditional_t<std::is_pointer_v<T>, uintptr_t, T>;
   if (kPointerSize == PointerSize::k64) {
-    return (T)static_cast<uintptr_t>(AsLongArrayUnchecked<kVerifyFlags>()->GetWithoutChecks(idx));
+    uint64_t value =
+        static_cast<uint64_t>(AsLongArrayUnchecked<kVerifyFlags>()->GetWithoutChecks(idx));
+    return (T) dchecked_integral_cast<ConversionType>(value);
+  } else {
+    uint32_t value =
+        static_cast<uint32_t>(AsIntArrayUnchecked<kVerifyFlags>()->GetWithoutChecks(idx));
+    return (T) dchecked_integral_cast<ConversionType>(value);
   }
-  return (T)static_cast<uintptr_t>(AsIntArrayUnchecked<kVerifyFlags>()->GetWithoutChecks(idx));
 }
+
 template<typename T, VerifyObjectFlags kVerifyFlags>
 inline T PointerArray::GetElementPtrSize(uint32_t idx, PointerSize ptr_size) {
   if (ptr_size == PointerSize::k64) {
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index d42f5a0..e011e1c 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -54,8 +54,8 @@
   Handle<mirror::Class> h_component_type(hs.NewHandle(array_class->GetComponentType()));
   size_t component_size_shift = h_component_type->GetPrimitiveTypeSizeShift();
   gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
-  Handle<Array> new_array(hs.NewHandle(Array::Alloc<true>(
-      self, array_class.Get(), array_length, component_size_shift, allocator_type)));
+  Handle<Array> new_array(hs.NewHandle(
+      Array::Alloc(self, array_class.Get(), array_length, component_size_shift, allocator_type)));
   if (UNLIKELY(new_array == nullptr)) {
     CHECK(self->IsExceptionPending());
     return nullptr;
@@ -122,11 +122,11 @@
 template<typename T>
 ObjPtr<PrimitiveArray<T>> PrimitiveArray<T>::Alloc(Thread* self, size_t length) {
   gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
-  ObjPtr<Array> raw_array = Array::Alloc<true>(self,
-                                               GetClassRoot<PrimitiveArray<T>>(),
-                                               length,
-                                               ComponentSizeShiftWidth(sizeof(T)),
-                                               allocator_type);
+  ObjPtr<Array> raw_array = Array::Alloc(self,
+                                         GetClassRoot<PrimitiveArray<T>>(),
+                                         length,
+                                         ComponentSizeShiftWidth(sizeof(T)),
+                                         allocator_type);
   return ObjPtr<PrimitiveArray<T>>::DownCast(raw_array);
 }
 
@@ -138,20 +138,18 @@
   art::ThrowArrayStoreException(object->GetClass(), this->GetClass());
 }
 
-ObjPtr<Array> Array::CopyOf(Thread* self, int32_t new_length) {
-  ObjPtr<Class> klass = GetClass();
+ObjPtr<Array> Array::CopyOf(Handle<Array> h_this, Thread* self, int32_t new_length) {
+  ObjPtr<Class> klass = h_this->GetClass();
   CHECK(klass->IsPrimitiveArray()) << "Will miss write barriers";
   DCHECK_GE(new_length, 0);
-  // We may get copied by a compacting GC.
-  StackHandleScope<1> hs(self);
-  auto h_this(hs.NewHandle(this));
   auto* heap = Runtime::Current()->GetHeap();
-  gc::AllocatorType allocator_type = heap->IsMovableObject(this) ? heap->GetCurrentAllocator() :
-      heap->GetCurrentNonMovingAllocator();
+  gc::AllocatorType allocator_type = heap->IsMovableObject(h_this.Get())
+      ? heap->GetCurrentAllocator()
+      : heap->GetCurrentNonMovingAllocator();
   const auto component_size = klass->GetComponentSize();
   const auto component_shift = klass->GetComponentSizeShift();
   ObjPtr<Array> new_array =
-      Alloc<true>(self, klass, new_length, component_shift, allocator_type);  // Invalidates klass.
+      Alloc(self, klass, new_length, component_shift, allocator_type);  // Invalidates klass.
   if (LIKELY(new_array != nullptr)) {
     memcpy(new_array->GetRawData(component_size, 0),
            h_this->GetRawData(component_size, 0),
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 1ee4e50..19f9a92 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -19,12 +19,15 @@
 
 #include "base/bit_utils.h"
 #include "base/enums.h"
-#include "gc/allocator_type.h"
 #include "obj_ptr.h"
 #include "object.h"
 
 namespace art {
 
+namespace gc {
+enum AllocatorType : char;
+}  // namespace gc
+
 template<class T> class Handle;
 class Thread;
 
@@ -40,7 +43,7 @@
   // Allocates an array with the given properties, if kFillUsable is true the array will be of at
   // least component_count size, however, if there's usable space at the end of the allocation the
   // array will fill it.
-  template <bool kIsInstrumented, bool kFillUsable = false>
+  template <bool kIsInstrumented = true, bool kFillUsable = false>
   ALWAYS_INLINE static ObjPtr<Array> Alloc(Thread* self,
                                            ObjPtr<Class> array_class,
                                            int32_t component_count,
@@ -122,8 +125,8 @@
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE bool CheckIsValidIndex(int32_t index) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ObjPtr<Array> CopyOf(Thread* self, int32_t new_length) REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!Roles::uninterruptible_);
+  static ObjPtr<Array> CopyOf(Handle<Array> h_this, Thread* self, int32_t new_length)
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
  protected:
   void ThrowArrayStoreException(ObjPtr<Object> object) REQUIRES_SHARED(Locks::mutator_lock_)
diff --git a/runtime/mirror/class-alloc-inl.h b/runtime/mirror/class-alloc-inl.h
index d4a532e..5627b49 100644
--- a/runtime/mirror/class-alloc-inl.h
+++ b/runtime/mirror/class-alloc-inl.h
@@ -19,6 +19,7 @@
 
 #include "class-inl.h"
 
+#include "gc/allocator_type.h"
 #include "gc/heap-inl.h"
 #include "object-inl.h"
 #include "runtime.h"
@@ -45,21 +46,24 @@
   DCHECK_GE(this->object_size_, sizeof(Object));
 }
 
-template<bool kIsInstrumented, bool kCheckAddFinalizer>
+template<bool kIsInstrumented, Class::AddFinalizer kAddFinalizer, bool kCheckAddFinalizer>
 inline ObjPtr<Object> Class::Alloc(Thread* self, gc::AllocatorType allocator_type) {
   CheckObjectAlloc();
   gc::Heap* heap = Runtime::Current()->GetHeap();
-  const bool add_finalizer = kCheckAddFinalizer && IsFinalizable();
-  if (!kCheckAddFinalizer) {
-    DCHECK(!IsFinalizable());
+  bool add_finalizer;
+  switch (kAddFinalizer) {
+    case Class::AddFinalizer::kUseClassTag:
+      add_finalizer = IsFinalizable();
+      break;
+    case Class::AddFinalizer::kNoAddFinalizer:
+      add_finalizer = false;
+      DCHECK(!kCheckAddFinalizer || !IsFinalizable());
+      break;
   }
-  // Note that the this pointer may be invalidated after the allocation.
+  // Note that the `this` pointer may be invalidated after the allocation.
   ObjPtr<Object> obj =
-      heap->AllocObjectWithAllocator<kIsInstrumented, false>(self,
-                                                             this,
-                                                             this->object_size_,
-                                                             allocator_type,
-                                                             VoidFunctor());
+      heap->AllocObjectWithAllocator<kIsInstrumented, /*kCheckLargeObject=*/ false>(
+          self, this, this->object_size_, allocator_type, VoidFunctor());
   if (add_finalizer && LIKELY(obj != nullptr)) {
     heap->AddFinalizerReference(self, &obj);
     if (UNLIKELY(self->IsExceptionPending())) {
@@ -71,11 +75,11 @@
 }
 
 inline ObjPtr<Object> Class::AllocObject(Thread* self) {
-  return Alloc<true>(self, Runtime::Current()->GetHeap()->GetCurrentAllocator());
+  return Alloc(self, Runtime::Current()->GetHeap()->GetCurrentAllocator());
 }
 
 inline ObjPtr<Object> Class::AllocNonMovableObject(Thread* self) {
-  return Alloc<true>(self, Runtime::Current()->GetHeap()->GetCurrentNonMovingAllocator());
+  return Alloc(self, Runtime::Current()->GetHeap()->GetCurrentNonMovingAllocator());
 }
 
 }  // namespace mirror
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 3ee8bfe..6a5317c 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -24,6 +24,7 @@
 #include "base/array_slice.h"
 #include "base/iteration_range.h"
 #include "base/length_prefixed_array.h"
+#include "base/stride_iterator.h"
 #include "base/utils.h"
 #include "class_linker.h"
 #include "class_loader.h"
@@ -77,7 +78,8 @@
     DCHECK(old_super_class == nullptr || old_super_class == new_super_class);
   }
   DCHECK(new_super_class != nullptr);
-  SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_), new_super_class);
+  SetFieldObject</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
+      OFFSET_OF_OBJECT_MEMBER(Class, super_class_), new_super_class);
 }
 
 inline bool Class::HasSuperClass() {
@@ -129,7 +131,7 @@
 
 template<VerifyObjectFlags kVerifyFlags>
 inline ArraySlice<ArtMethod> Class::GetDirectMethodsSlice(PointerSize pointer_size) {
-  DCHECK(IsLoaded() || IsErroneous());
+  DCHECK(IsLoaded() || IsErroneous()) << GetStatus();
   return GetDirectMethodsSliceUnchecked(pointer_size);
 }
 
@@ -142,7 +144,7 @@
 
 template<VerifyObjectFlags kVerifyFlags>
 inline ArraySlice<ArtMethod> Class::GetDeclaredMethodsSlice(PointerSize pointer_size) {
-  DCHECK(IsLoaded() || IsErroneous());
+  DCHECK(IsLoaded() || IsErroneous()) << GetStatus();
   return GetDeclaredMethodsSliceUnchecked(pointer_size);
 }
 
@@ -155,7 +157,7 @@
 
 template<VerifyObjectFlags kVerifyFlags>
 inline ArraySlice<ArtMethod> Class::GetDeclaredVirtualMethodsSlice(PointerSize pointer_size) {
-  DCHECK(IsLoaded() || IsErroneous());
+  DCHECK(IsLoaded() || IsErroneous()) << GetStatus();
   return GetDeclaredVirtualMethodsSliceUnchecked(pointer_size);
 }
 
@@ -298,7 +300,8 @@
 }
 
 inline void Class::SetVTable(ObjPtr<PointerArray> new_vtable) {
-  SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, vtable_), new_vtable);
+  SetFieldObject</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
+      OFFSET_OF_OBJECT_MEMBER(Class, vtable_), new_vtable);
 }
 
 template<VerifyObjectFlags kVerifyFlags>
@@ -344,7 +347,8 @@
 }
 
 inline void Class::SetEmbeddedVTableLength(int32_t len) {
-  SetField32<false>(MemberOffset(EmbeddedVTableLengthOffset()), len);
+  SetField32</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
+      MemberOffset(EmbeddedVTableLengthOffset()), len);
 }
 
 inline ImTable* Class::GetImt(PointerSize pointer_size) {
@@ -352,7 +356,8 @@
 }
 
 inline void Class::SetImt(ImTable* imt, PointerSize pointer_size) {
-  return SetFieldPtrWithSize<false>(ImtPtrOffset(pointer_size), imt, pointer_size);
+  return SetFieldPtrWithSize</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
+      ImtPtrOffset(pointer_size), imt, pointer_size);
 }
 
 inline MemberOffset Class::EmbeddedVTableEntryOffset(uint32_t i, PointerSize pointer_size) {
@@ -366,7 +371,8 @@
 
 inline void Class::SetEmbeddedVTableEntryUnchecked(
     uint32_t i, ArtMethod* method, PointerSize pointer_size) {
-  SetFieldPtrWithSize<false>(EmbeddedVTableEntryOffset(i, pointer_size), method, pointer_size);
+  SetFieldPtrWithSize</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
+      EmbeddedVTableEntryOffset(i, pointer_size), method, pointer_size);
 }
 
 inline void Class::SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method, PointerSize pointer_size) {
@@ -549,6 +555,22 @@
       access_to, method, dex_cache, method_idx, throw_invoke_type);
 }
 
+inline bool Class::IsObsoleteVersionOf(ObjPtr<Class> klass) {
+  DCHECK(!klass->IsObsoleteObject()) << klass->PrettyClass() << " is obsolete!";
+  if (LIKELY(!IsObsoleteObject())) {
+    return false;
+  }
+  ObjPtr<Class> current(klass);
+  do {
+    if (UNLIKELY(current == this)) {
+      return true;
+    } else {
+      current = current->GetObsoleteClass();
+    }
+  } while (!current.IsNull());
+  return false;
+}
+
 inline bool Class::IsSubClass(ObjPtr<Class> klass) {
   // Since the SubtypeCheck::IsSubtypeOf needs to lookup the Depth,
   // it is always O(Depth) in terms of speed to do the check.
@@ -654,7 +676,8 @@
 
 inline void Class::SetIfTable(ObjPtr<IfTable> new_iftable) {
   DCHECK(new_iftable != nullptr) << PrettyClass(this);
-  SetFieldObject<false>(IfTableOffset(), new_iftable);
+  SetFieldObject</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
+      IfTableOffset(), new_iftable);
 }
 
 inline LengthPrefixedArray<ArtField>* Class::GetIFieldsPtr() {
@@ -845,7 +868,10 @@
       return false;
     }
     ++match;
-    klass = klass->GetComponentType();
+    // No read barrier needed, we're reading a chain of constant references for comparison
+    // with null. Then we follow up below with reading constant references to read constant
+    // primitive data in both proxy and non-proxy paths. See ReadBarrierOption.
+    klass = klass->GetComponentType<kDefaultVerifyFlags, kWithoutReadBarrier>();
   }
   if (klass->IsPrimitive()) {
     return strcmp(Primitive::Descriptor(klass->GetPrimitiveType()), match) == 0;
@@ -899,8 +925,15 @@
   klass->SetDexClassDefIndex(DexFile::kDexNoIndex16);  // Default to no valid class def index.
   klass->SetDexTypeIndex(dex::TypeIndex(DexFile::kDexNoIndex16));  // Default to no valid type
                                                                    // index.
-  // Default to force slow path until initialized.
-  klass->SetObjectSizeAllocFastPath(std::numeric_limits<uint32_t>::max());
+  // Default to force slow path until visibly initialized.
+  // There is no need for release store (volatile) in pre-fence visitor.
+  klass->SetField32</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
+      ObjectSizeAllocFastPathOffset(), std::numeric_limits<uint32_t>::max());
+}
+
+inline void Class::SetAccessFlagsDuringLinking(uint32_t new_access_flags) {
+  SetField32</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
+      AccessFlagsOffset(), new_access_flags);
 }
 
 inline void Class::SetAccessFlags(uint32_t new_access_flags) {
@@ -916,11 +949,8 @@
 }
 
 inline void Class::SetClassFlags(uint32_t new_flags) {
-  if (Runtime::Current()->IsActiveTransaction()) {
-    SetField32<true>(OFFSET_OF_OBJECT_MEMBER(Class, class_flags_), new_flags);
-  } else {
-    SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, class_flags_), new_flags);
-  }
+  SetField32</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
+      OFFSET_OF_OBJECT_MEMBER(Class, class_flags_), new_flags);
 }
 
 inline uint32_t Class::NumDirectInterfaces() {
@@ -987,10 +1017,6 @@
   return MakeIterationRangeFromLengthPrefixedArray(GetSFieldsPtrUnchecked());
 }
 
-inline MemberOffset Class::EmbeddedVTableOffset(PointerSize pointer_size) {
-  return MemberOffset(ImtPtrOffset(pointer_size).Uint32Value() + static_cast<size_t>(pointer_size));
-}
-
 inline void Class::CheckPointerSize(PointerSize pointer_size) {
   DCHECK_EQ(pointer_size, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
 }
@@ -1004,7 +1030,8 @@
   DCHECK(GetComponentType() == nullptr);
   DCHECK(new_component_type != nullptr);
   // Component type is invariant: use non-transactional mode without check.
-  SetFieldObject<false, false>(ComponentTypeOffset(), new_component_type);
+  SetFieldObject</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
+      ComponentTypeOffset(), new_component_type);
 }
 
 inline size_t Class::GetComponentSize() {
@@ -1172,14 +1199,9 @@
   return component->IsPrimitive() || component->CannotBeAssignedFromOtherTypes();
 }
 
-template <bool kCheckTransaction>
 inline void Class::SetClassLoader(ObjPtr<ClassLoader> new_class_loader) {
-  if (kCheckTransaction && Runtime::Current()->IsActiveTransaction()) {
-    SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), new_class_loader);
-  } else {
-    DCHECK(!Runtime::Current()->IsActiveTransaction());
-    SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), new_class_loader);
-  }
+  SetFieldObject</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
+      OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), new_class_loader);
 }
 
 inline void Class::SetRecursivelyInitialized() {
@@ -1191,7 +1213,7 @@
 inline void Class::SetHasDefaultMethods() {
   DCHECK_EQ(GetLockOwnerThreadId(), Thread::Current()->GetThreadId());
   uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
-  SetAccessFlags(flags | kAccHasDefaultMethod);
+  SetAccessFlagsDuringLinking(flags | kAccHasDefaultMethod);
 }
 
 }  // namespace mirror
diff --git a/runtime/mirror/class-refvisitor-inl.h b/runtime/mirror/class-refvisitor-inl.h
index 263b774..8c85387 100644
--- a/runtime/mirror/class-refvisitor-inl.h
+++ b/runtime/mirror/class-refvisitor-inl.h
@@ -53,20 +53,14 @@
 
 template<ReadBarrierOption kReadBarrierOption, class Visitor>
 void Class::VisitNativeRoots(Visitor& visitor, PointerSize pointer_size) {
-  for (ArtField& field : GetSFieldsUnchecked()) {
-    // Visit roots first in case the declaring class gets moved.
-    field.VisitRoots(visitor);
+  VisitFields<kReadBarrierOption>([&](ArtField* field) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    field->VisitRoots(visitor);
     if (kIsDebugBuild && IsResolved()) {
-      CHECK_EQ(field.GetDeclaringClass<kReadBarrierOption>(), this) << GetStatus();
+      CHECK_EQ(field->GetDeclaringClass<kReadBarrierOption>(), this)
+          << GetStatus() << field->GetDeclaringClass()->PrettyClass() << " != " << PrettyClass();
     }
-  }
-  for (ArtField& field : GetIFieldsUnchecked()) {
-    // Visit roots first in case the declaring class gets moved.
-    field.VisitRoots(visitor);
-    if (kIsDebugBuild && IsResolved()) {
-      CHECK_EQ(field.GetDeclaringClass<kReadBarrierOption>(), this) << GetStatus();
-    }
-  }
+  });
+  // Don't use VisitMethods because we don't want to hit the class-ext methods twice.
   for (ArtMethod& method : GetMethods(pointer_size)) {
     method.VisitRoots<kReadBarrierOption>(visitor, pointer_size);
   }
@@ -76,6 +70,27 @@
   }
 }
 
+template<ReadBarrierOption kReadBarrierOption, class Visitor>
+void Class::VisitMethods(Visitor visitor, PointerSize pointer_size) {
+  for (ArtMethod& method : GetMethods(pointer_size)) {
+    visitor(&method);
+  }
+  ObjPtr<ClassExt> ext(GetExtData<kDefaultVerifyFlags, kReadBarrierOption>());
+  if (!ext.IsNull()) {
+    ext->VisitMethods<kReadBarrierOption, Visitor>(visitor, pointer_size);
+  }
+}
+
+template<ReadBarrierOption kReadBarrierOption, class Visitor>
+void Class::VisitFields(Visitor visitor) {
+  for (ArtField& sfield : GetSFieldsUnchecked()) {
+    visitor(&sfield);
+  }
+  for (ArtField& ifield : GetIFieldsUnchecked()) {
+    visitor(&ifield);
+  }
+}
+
 }  // namespace mirror
 }  // namespace art
 
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index ec07a50..a7f013c 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -16,14 +16,20 @@
 
 #include "class.h"
 
+#include <unordered_set>
+#include <string_view>
+
+#include "android-base/macros.h"
 #include "android-base/stringprintf.h"
 
+#include "array-inl.h"
 #include "art_field-inl.h"
 #include "art_method-inl.h"
+#include "base/enums.h"
 #include "base/logging.h"  // For VLOG.
 #include "base/utils.h"
 #include "class-inl.h"
-#include "class_ext.h"
+#include "class_ext-inl.h"
 #include "class_linker-inl.h"
 #include "class_loader.h"
 #include "class_root.h"
@@ -36,6 +42,7 @@
 #include "gc/heap-inl.h"
 #include "handle_scope-inl.h"
 #include "hidden_api.h"
+#include "jni_id_type.h"
 #include "subtype_check.h"
 #include "method.h"
 #include "object-inl.h"
@@ -58,6 +65,49 @@
 
 using android::base::StringPrintf;
 
+bool Class::IsMirrored() {
+  if (LIKELY(!IsBootStrapClassLoaded())) {
+    return false;
+  }
+  if (IsPrimitive() || IsArrayClass() || IsProxyClass()) {
+    return true;
+  }
+  // TODO Have this list automatically populated.
+  std::unordered_set<std::string_view> mirror_types = {
+    "Ljava/lang/Class;",
+    "Ljava/lang/ClassLoader;",
+    "Ljava/lang/ClassNotFoundException;",
+    "Ljava/lang/DexCache;",
+    "Ljava/lang/Object;",
+    "Ljava/lang/StackTraceElement;",
+    "Ljava/lang/String;",
+    "Ljava/lang/Throwable;",
+    "Ljava/lang/invoke/ArrayElementVarHandle;",
+    "Ljava/lang/invoke/ByteArrayViewVarHandle;",
+    "Ljava/lang/invoke/ByteBufferViewVarHandle;",
+    "Ljava/lang/invoke/CallSite;",
+    "Ljava/lang/invoke/FieldVarHandle;",
+    "Ljava/lang/invoke/MethodHandle;",
+    "Ljava/lang/invoke/MethodHandleImpl;",
+    "Ljava/lang/invoke/MethodHandles$Lookup;",
+    "Ljava/lang/invoke/MethodType;",
+    "Ljava/lang/invoke/VarHandle;",
+    "Ljava/lang/ref/FinalizerReference;",
+    "Ljava/lang/ref/Reference;",
+    "Ljava/lang/reflect/AccessibleObject;",
+    "Ljava/lang/reflect/Constructor;",
+    "Ljava/lang/reflect/Executable;",
+    "Ljava/lang/reflect/Field;",
+    "Ljava/lang/reflect/Method;",
+    "Ljava/lang/reflect/Proxy;",
+    "Ldalvik/system/ClassExt;",
+    "Ldalvik/system/EmulatedStackFrame;",
+  };
+  std::string name_storage;
+  const std::string name(this->GetDescriptor(&name_storage));
+  return mirror_types.find(name) != mirror_types.end();
+}
+
 ObjPtr<mirror::Class> Class::GetPrimitiveClass(ObjPtr<mirror::String> name) {
   const char* expected_name = nullptr;
   ClassRoot class_root = ClassRoot::kJavaLangObject;  // Invalid.
@@ -94,14 +144,12 @@
   }
 }
 
-ObjPtr<ClassExt> Class::EnsureExtDataPresent(Thread* self) {
-  ObjPtr<ClassExt> existing(GetExtData());
+ObjPtr<ClassExt> Class::EnsureExtDataPresent(Handle<Class> h_this, Thread* self) {
+  ObjPtr<ClassExt> existing(h_this->GetExtData());
   if (!existing.IsNull()) {
     return existing;
   }
-  StackHandleScope<3> hs(self);
-  // Handlerize 'this' since we are allocating here.
-  Handle<Class> h_this(hs.NewHandle(this));
+  StackHandleScope<2> hs(self);
   // Clear exception so we can allocate.
   Handle<Throwable> throwable(hs.NewHandle(self->GetException()));
   self->ClearException();
@@ -140,24 +188,64 @@
   }
 }
 
+template <typename T>
+static void CheckSetStatus(Thread* self, T thiz, ClassStatus new_status, ClassStatus old_status)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  if (UNLIKELY(new_status <= old_status && new_status != ClassStatus::kErrorUnresolved &&
+               new_status != ClassStatus::kErrorResolved && new_status != ClassStatus::kRetired)) {
+    LOG(FATAL) << "Unexpected change back of class status for " << thiz->PrettyClass() << " "
+               << old_status << " -> " << new_status;
+  }
+  if (old_status == ClassStatus::kInitialized) {
+    // We do not hold the lock for making the class visibly initialized
+    // as this is unnecessary and could lead to deadlocks.
+    CHECK_EQ(new_status, ClassStatus::kVisiblyInitialized);
+  } else if ((new_status >= ClassStatus::kResolved || old_status >= ClassStatus::kResolved) &&
+             !Locks::mutator_lock_->IsExclusiveHeld(self)) {
+    // When classes are being resolved the resolution code should hold the
+    // lock or have everything else suspended
+    CHECK_EQ(thiz->GetLockOwnerThreadId(), self->GetThreadId())
+        << "Attempt to change status of class while not holding its lock: " << thiz->PrettyClass()
+        << " " << old_status << " -> " << new_status;
+  }
+  if (UNLIKELY(Locks::mutator_lock_->IsExclusiveHeld(self))) {
+    CHECK(!Class::IsErroneous(new_status))
+        << "status " << new_status
+        << " cannot be set while suspend-all is active. Would require allocations.";
+    CHECK(thiz->IsResolved())
+        << thiz->PrettyClass()
+        << " not resolved during suspend-all status change. Waiters might be missed!";
+  }
+}
+
+void Class::SetStatusInternal(ClassStatus new_status) {
+  if (kBitstringSubtypeCheckEnabled) {
+    // FIXME: This looks broken with respect to aborted transactions.
+    SubtypeCheck<ObjPtr<mirror::Class>>::WriteStatus(this, new_status);
+  } else {
+    // The ClassStatus is always in the 4 most-significant bits of status_.
+    static_assert(sizeof(status_) == sizeof(uint32_t), "Size of status_ not equal to uint32");
+    uint32_t new_status_value = static_cast<uint32_t>(new_status) << (32 - kClassStatusBitSize);
+    if (Runtime::Current()->IsActiveTransaction()) {
+      SetField32Volatile<true>(StatusOffset(), new_status_value);
+    } else {
+      SetField32Volatile<false>(StatusOffset(), new_status_value);
+    }
+  }
+}
+
+void Class::SetStatusLocked(ClassStatus new_status) {
+  ClassStatus old_status = GetStatus();
+  CheckSetStatus(Thread::Current(), this, new_status, old_status);
+  SetStatusInternal(new_status);
+}
+
 void Class::SetStatus(Handle<Class> h_this, ClassStatus new_status, Thread* self) {
   ClassStatus old_status = h_this->GetStatus();
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   bool class_linker_initialized = class_linker != nullptr && class_linker->IsInitialized();
   if (LIKELY(class_linker_initialized)) {
-    if (UNLIKELY(new_status <= old_status &&
-                 new_status != ClassStatus::kErrorUnresolved &&
-                 new_status != ClassStatus::kErrorResolved &&
-                 new_status != ClassStatus::kRetired)) {
-      LOG(FATAL) << "Unexpected change back of class status for " << h_this->PrettyClass()
-                 << " " << old_status << " -> " << new_status;
-    }
-    if (new_status >= ClassStatus::kResolved || old_status >= ClassStatus::kResolved) {
-      // When classes are being resolved the resolution code should hold the lock.
-      CHECK_EQ(h_this->GetLockOwnerThreadId(), self->GetThreadId())
-            << "Attempt to change status of class while not holding its lock: "
-            << h_this->PrettyClass() << " " << old_status << " -> " << new_status;
-    }
+    CheckSetStatus(self, h_this, new_status, old_status);
   }
   if (UNLIKELY(IsErroneous(new_status))) {
     CHECK(!h_this->IsErroneous())
@@ -172,7 +260,7 @@
       }
     }
 
-    ObjPtr<ClassExt> ext(h_this->EnsureExtDataPresent(self));
+    ObjPtr<ClassExt> ext(EnsureExtDataPresent(h_this, self));
     if (!ext.IsNull()) {
       self->AssertPendingException();
       ext->SetVerifyError(self->GetException());
@@ -182,25 +270,12 @@
     self->AssertPendingException();
   }
 
-  if (kBitstringSubtypeCheckEnabled) {
-    // FIXME: This looks broken with respect to aborted transactions.
-    ObjPtr<mirror::Class> h_this_ptr = h_this.Get();
-    SubtypeCheck<ObjPtr<mirror::Class>>::WriteStatus(h_this_ptr, new_status);
-  } else {
-    // The ClassStatus is always in the 4 most-significant bits of status_.
-    static_assert(sizeof(status_) == sizeof(uint32_t), "Size of status_ not equal to uint32");
-    uint32_t new_status_value = static_cast<uint32_t>(new_status) << (32 - kClassStatusBitSize);
-    if (Runtime::Current()->IsActiveTransaction()) {
-      h_this->SetField32Volatile<true>(StatusOffset(), new_status_value);
-    } else {
-      h_this->SetField32Volatile<false>(StatusOffset(), new_status_value);
-    }
-  }
+  h_this->SetStatusInternal(new_status);
 
   // Setting the object size alloc fast path needs to be after the status write so that if the
   // alloc path sees a valid object size, we would know that it's initialized as long as it has a
   // load-acquire/fake dependency.
-  if (new_status == ClassStatus::kInitialized && !h_this->IsVariableSize()) {
+  if (new_status == ClassStatus::kVisiblyInitialized && !h_this->IsVariableSize()) {
     DCHECK_EQ(h_this->GetObjectSizeAllocFastPath(), std::numeric_limits<uint32_t>::max());
     // Finalizable objects must always go slow path.
     if (!h_this->IsFinalizable()) {
@@ -226,6 +301,10 @@
       if (new_status == ClassStatus::kRetired || new_status == ClassStatus::kErrorUnresolved) {
         h_this->NotifyAll(self);
       }
+    } else if (old_status == ClassStatus::kInitialized) {
+      // Do not notify for transition from kInitialized to ClassStatus::kVisiblyInitialized.
+      // This is a hidden transition, not observable by bytecode.
+      DCHECK_EQ(new_status, ClassStatus::kVisiblyInitialized);  // Already CHECK()ed above.
     } else {
       CHECK_NE(new_status, ClassStatus::kRetired);
       if (old_status >= ClassStatus::kResolved || new_status >= ClassStatus::kResolved) {
@@ -235,6 +314,38 @@
   }
 }
 
+void Class::SetStatusForPrimitiveOrArray(ClassStatus new_status) {
+  DCHECK(IsPrimitive<kVerifyNone>() || IsArrayClass<kVerifyNone>());
+  DCHECK(!IsErroneous(new_status));
+  DCHECK(!IsErroneous(GetStatus<kVerifyNone>()));
+  DCHECK_GT(new_status, GetStatus<kVerifyNone>());
+
+  if (kBitstringSubtypeCheckEnabled) {
+    LOG(FATAL) << "Unimplemented";
+  }
+  // The ClassStatus is always in the 4 most-significant bits of status_.
+  static_assert(sizeof(status_) == sizeof(uint32_t), "Size of status_ not equal to uint32");
+  uint32_t new_status_value = static_cast<uint32_t>(new_status) << (32 - kClassStatusBitSize);
+  // Use normal store. For primitives and core arrays classes (Object[],
+  // Class[], String[] and primitive arrays), the status is set while the
+  // process is still single threaded. For other arrays classes, it is set
+  // in a pre-fence visitor which initializes all fields and the subsequent
+  // fence together with address dependency shall ensure memory visibility.
+  SetField32</*kTransactionActive=*/ false,
+             /*kCheckTransaction=*/ false,
+             kVerifyNone>(StatusOffset(), new_status_value);
+
+  // Do not update `object_alloc_fast_path_`. Arrays are variable size and
+  // instances of primitive classes cannot be created at all.
+
+  if (kIsDebugBuild && new_status >= ClassStatus::kInitialized) {
+    CHECK(WasVerificationAttempted()) << PrettyClassAndClassLoader();
+  }
+
+  // There can be no waiters to notify as these classes are initialized
+  // before another thread can see them.
+}
+
 void Class::SetDexCache(ObjPtr<DexCache> new_dex_cache) {
   SetFieldObjectTransaction(OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_), new_dex_cache);
 }
@@ -245,7 +356,17 @@
     LOG(FATAL_WITHOUT_ABORT) << new_class_size << " vs " << GetClassSize();
     LOG(FATAL) << "class=" << PrettyTypeOf();
   }
-  SetField32Transaction(OFFSET_OF_OBJECT_MEMBER(Class, class_size_), new_class_size);
+  SetField32</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
+      OFFSET_OF_OBJECT_MEMBER(Class, class_size_), new_class_size);
+}
+
+ObjPtr<Class> Class::GetObsoleteClass() {
+  ObjPtr<ClassExt> ext(GetExtData());
+  if (ext.IsNull()) {
+    return nullptr;
+  } else {
+    return ext->GetObsoleteClass();
+  }
 }
 
 // Return the class' name. The exact format is bizarre, but it's the specified behavior for
@@ -989,6 +1110,33 @@
   return nullptr;
 }
 
+void Class::ClearSkipAccessChecksFlagOnAllMethods(PointerSize pointer_size) {
+  DCHECK(IsVerified());
+  for (auto& m : GetMethods(pointer_size)) {
+    if (!m.IsNative() && m.IsInvokable()) {
+      m.ClearSkipAccessChecks();
+    }
+  }
+}
+
+void Class::ClearMustCountLocksFlagOnAllMethods(PointerSize pointer_size) {
+  DCHECK(IsVerified());
+  for (auto& m : GetMethods(pointer_size)) {
+    if (!m.IsNative() && m.IsInvokable()) {
+      m.ClearMustCountLocks();
+    }
+  }
+}
+
+void Class::ClearDontCompileFlagOnAllMethods(PointerSize pointer_size) {
+  DCHECK(IsVerified());
+  for (auto& m : GetMethods(pointer_size)) {
+    if (!m.IsNative() && m.IsInvokable()) {
+      m.ClearDontCompile();
+    }
+  }
+}
+
 void Class::SetSkipAccessChecksFlagOnAllMethods(PointerSize pointer_size) {
   DCHECK(IsVerified());
   for (auto& m : GetMethods(pointer_size)) {
@@ -1205,12 +1353,13 @@
   DISALLOW_COPY_AND_ASSIGN(CopyClassVisitor);
 };
 
-ObjPtr<Class> Class::CopyOf(
-    Thread* self, int32_t new_length, ImTable* imt, PointerSize pointer_size) {
+ObjPtr<Class> Class::CopyOf(Handle<Class> h_this,
+                            Thread* self,
+                            int32_t new_length,
+                            ImTable* imt,
+                            PointerSize pointer_size) {
   DCHECK_GE(new_length, static_cast<int32_t>(sizeof(Class)));
   // We may get copied by a compacting GC.
-  StackHandleScope<1> hs(self);
-  Handle<Class> h_this(hs.NewHandle(this));
   Runtime* runtime = Runtime::Current();
   gc::Heap* heap = runtime->GetHeap();
   // The num_bytes (3rd param) is sizeof(Class) as opposed to SizeOf()
@@ -1218,8 +1367,8 @@
   CopyClassVisitor visitor(self, &h_this, new_length, sizeof(Class), imt, pointer_size);
   ObjPtr<mirror::Class> java_lang_Class = GetClassRoot<mirror::Class>(runtime->GetClassLinker());
   ObjPtr<Object> new_class = kMovingClasses ?
-      heap->AllocObject<true>(self, java_lang_Class, new_length, visitor) :
-      heap->AllocNonMovableObject<true>(self, java_lang_Class, new_length, visitor);
+      heap->AllocObject(self, java_lang_Class, new_length, visitor) :
+      heap->AllocNonMovableObject(self, java_lang_Class, new_length, visitor);
   if (UNLIKELY(new_class == nullptr)) {
     self->AssertPendingOOMException();
     return nullptr;
@@ -1496,6 +1645,12 @@
 
 std::string Class::PrettyClass() {
   std::string result;
+  if (IsObsoleteObject()) {
+    result += "(Obsolete)";
+  }
+  if (IsRetired()) {
+    result += "(Retired)";
+  }
   result += "java.lang.Class<";
   result += PrettyDescriptor();
   result += ">";
@@ -1548,5 +1703,120 @@
         (new_access_flags & kAccVerificationAttempted) != 0);
 }
 
+ObjPtr<Object> Class::GetMethodIds() {
+  ObjPtr<ClassExt> ext(GetExtData());
+  if (ext.IsNull()) {
+    return nullptr;
+  } else {
+    return ext->GetJMethodIDs();
+  }
+}
+bool Class::EnsureMethodIds(Handle<Class> h_this) {
+  DCHECK_NE(Runtime::Current()->GetJniIdType(), JniIdType::kPointer) << "JNI Ids are pointers!";
+  Thread* self = Thread::Current();
+  ObjPtr<ClassExt> ext(EnsureExtDataPresent(h_this, self));
+  if (ext.IsNull()) {
+    self->AssertPendingOOMException();
+    return false;
+  }
+  return ext->EnsureJMethodIDsArrayPresent(h_this->NumMethods());
+}
+
+ObjPtr<Object> Class::GetStaticFieldIds() {
+  ObjPtr<ClassExt> ext(GetExtData());
+  if (ext.IsNull()) {
+    return nullptr;
+  } else {
+    return ext->GetStaticJFieldIDs();
+  }
+}
+bool Class::EnsureStaticFieldIds(Handle<Class> h_this) {
+  DCHECK_NE(Runtime::Current()->GetJniIdType(), JniIdType::kPointer) << "JNI Ids are pointers!";
+  Thread* self = Thread::Current();
+  ObjPtr<ClassExt> ext(EnsureExtDataPresent(h_this, self));
+  if (ext.IsNull()) {
+    self->AssertPendingOOMException();
+    return false;
+  }
+  return ext->EnsureStaticJFieldIDsArrayPresent(h_this->NumStaticFields());
+}
+ObjPtr<Object> Class::GetInstanceFieldIds() {
+  ObjPtr<ClassExt> ext(GetExtData());
+  if (ext.IsNull()) {
+    return nullptr;
+  } else {
+    return ext->GetInstanceJFieldIDs();
+  }
+}
+bool Class::EnsureInstanceFieldIds(Handle<Class> h_this) {
+  DCHECK_NE(Runtime::Current()->GetJniIdType(), JniIdType::kPointer) << "JNI Ids are pointers!";
+  Thread* self = Thread::Current();
+  ObjPtr<ClassExt> ext(EnsureExtDataPresent(h_this, self));
+  if (ext.IsNull()) {
+    self->AssertPendingOOMException();
+    return false;
+  }
+  return ext->EnsureInstanceJFieldIDsArrayPresent(h_this->NumInstanceFields());
+}
+
+size_t Class::GetStaticFieldIdOffset(ArtField* field) {
+  DCHECK_LT(reinterpret_cast<uintptr_t>(field),
+            reinterpret_cast<uintptr_t>(&*GetSFieldsPtr()->end()))
+      << "field not part of the current class. " << field->PrettyField() << " class is "
+      << PrettyClass();
+  DCHECK_GE(reinterpret_cast<uintptr_t>(field),
+            reinterpret_cast<uintptr_t>(&*GetSFieldsPtr()->begin()))
+      << "field not part of the current class. " << field->PrettyField() << " class is "
+      << PrettyClass();
+  uintptr_t start = reinterpret_cast<uintptr_t>(&GetSFieldsPtr()->At(0));
+  uintptr_t fld = reinterpret_cast<uintptr_t>(field);
+  size_t res = (fld - start) / sizeof(ArtField);
+  DCHECK_EQ(&GetSFieldsPtr()->At(res), field)
+      << "Incorrect field computation expected: " << field->PrettyField()
+      << " got: " << GetSFieldsPtr()->At(res).PrettyField();
+  return res;
+}
+
+size_t Class::GetInstanceFieldIdOffset(ArtField* field) {
+  DCHECK_LT(reinterpret_cast<uintptr_t>(field),
+            reinterpret_cast<uintptr_t>(&*GetIFieldsPtr()->end()))
+      << "field not part of the current class. " << field->PrettyField() << " class is "
+      << PrettyClass();
+  DCHECK_GE(reinterpret_cast<uintptr_t>(field),
+            reinterpret_cast<uintptr_t>(&*GetIFieldsPtr()->begin()))
+      << "field not part of the current class. " << field->PrettyField() << " class is "
+      << PrettyClass();
+  uintptr_t start = reinterpret_cast<uintptr_t>(&GetIFieldsPtr()->At(0));
+  uintptr_t fld = reinterpret_cast<uintptr_t>(field);
+  size_t res = (fld - start) / sizeof(ArtField);
+  DCHECK_EQ(&GetIFieldsPtr()->At(res), field)
+      << "Incorrect field computation expected: " << field->PrettyField()
+      << " got: " << GetIFieldsPtr()->At(res).PrettyField();
+  return res;
+}
+
+size_t Class::GetMethodIdOffset(ArtMethod* method, PointerSize pointer_size) {
+  DCHECK(GetMethodsSlice(kRuntimePointerSize).Contains(method))
+      << "method not part of the current class. " << method->PrettyMethod() << "( " << reinterpret_cast<void*>(method) << ")" << " class is "
+      << PrettyClass() << [&]() REQUIRES_SHARED(Locks::mutator_lock_) {
+        std::ostringstream os;
+        os << " Methods are [";
+        for (ArtMethod& m : GetMethodsSlice(kRuntimePointerSize)) {
+          os << m.PrettyMethod() << "( " << reinterpret_cast<void*>(&m) << "), ";
+        }
+        os << "]";
+        return os.str();
+      }();
+  uintptr_t start = reinterpret_cast<uintptr_t>(&*GetMethodsSlice(pointer_size).begin());
+  uintptr_t fld = reinterpret_cast<uintptr_t>(method);
+  size_t art_method_size = ArtMethod::Size(pointer_size);
+  size_t art_method_align = ArtMethod::Alignment(pointer_size);
+  size_t res = (fld - start) / art_method_size;
+  DCHECK_EQ(&GetMethodsPtr()->At(res, art_method_size, art_method_align), method)
+      << "Incorrect method computation expected: " << method->PrettyMethod()
+      << " got: " << GetMethodsPtr()->At(res, art_method_size, art_method_align).PrettyMethod();
+  return res;
+}
+
 }  // namespace mirror
 }  // namespace art
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index ac5d52d..30a64b1 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -21,13 +21,11 @@
 
 #include "base/bit_utils.h"
 #include "base/casts.h"
-#include "base/stride_iterator.h"
 #include "class_flags.h"
 #include "class_status.h"
 #include "dex/dex_file_types.h"
 #include "dex/modifiers.h"
 #include "dex/primitive.h"
-#include "gc/allocator_type.h"
 #include "object.h"
 #include "object_array.h"
 #include "read_barrier_option.h"
@@ -39,10 +37,18 @@
 class TypeList;
 }  // namespace dex
 
+namespace gc {
+enum AllocatorType : char;
+}  // namespace gc
+
 namespace hiddenapi {
 class AccessContext;
 }  // namespace hiddenapi
 
+namespace linker {
+class ImageWriter;
+}  // namespace linker
+
 template<typename T> class ArraySlice;
 class ArtField;
 class ArtMethod;
@@ -55,6 +61,7 @@
 template<typename T> class LengthPrefixedArray;
 enum class PointerSize : size_t;
 class Signature;
+template<typename T> class StrideIterator;
 template<size_t kNumReferences> class PACKED(4) StackHandleScope;
 class Thread;
 
@@ -86,18 +93,28 @@
   static constexpr uint32_t kPrimitiveTypeSizeShiftShift = 16;
   static constexpr uint32_t kPrimitiveTypeMask = (1u << kPrimitiveTypeSizeShiftShift) - 1;
 
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+           bool kWithSynchronizationBarrier = true>
   ClassStatus GetStatus() REQUIRES_SHARED(Locks::mutator_lock_) {
+    // Reading the field without barrier is used exclusively for IsVisiblyInitialized().
+    int32_t field_value = kWithSynchronizationBarrier
+        ? GetField32Volatile<kVerifyFlags>(StatusOffset())
+        : GetField32<kVerifyFlags>(StatusOffset());
     // Avoid including "subtype_check_bits_and_status.h" to get the field.
     // The ClassStatus is always in the 4 most-significant bits of status_.
-    return enum_cast<ClassStatus>(
-        static_cast<uint32_t>(GetField32Volatile<kVerifyFlags>(StatusOffset())) >> (32 - 4));
+    return enum_cast<ClassStatus>(static_cast<uint32_t>(field_value) >> (32 - 4));
   }
 
   // This is static because 'this' may be moved by GC.
   static void SetStatus(Handle<Class> h_this, ClassStatus new_status, Thread* self)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
+  // Used for structural redefinition to directly set the class-status while
+  // holding a strong mutator-lock.
+  void SetStatusLocked(ClassStatus new_status) REQUIRES(Locks::mutator_lock_);
+
+  void SetStatusForPrimitiveOrArray(ClassStatus new_status) REQUIRES_SHARED(Locks::mutator_lock_);
+
   static constexpr MemberOffset StatusOffset() {
     return MemberOffset(OFFSET_OF_OBJECT_MEMBER(Class, status_));
   }
@@ -156,6 +173,13 @@
     return GetStatus<kVerifyFlags>() == ClassStatus::kRetryVerificationAtRuntime;
   }
 
+  // Returns true if the class has been verified at compile time, but should be
+  // executed with access checks.
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  bool IsVerifiedNeedsAccessChecks() REQUIRES_SHARED(Locks::mutator_lock_) {
+    return GetStatus<kVerifyFlags>() >= ClassStatus::kVerifiedNeedsAccessChecks;
+  }
+
   // Returns true if the class has been verified.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool IsVerified() REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -171,9 +195,20 @@
   // Returns true if the class is initialized.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool IsInitialized() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return GetStatus<kVerifyFlags>() == ClassStatus::kInitialized;
+    return GetStatus<kVerifyFlags>() >= ClassStatus::kInitialized;
   }
 
+  // Returns true if the class is visibly initialized.
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  bool IsVisiblyInitialized() REQUIRES_SHARED(Locks::mutator_lock_) {
+    // Note: Avoiding the synchronization barrier for the visibly initialized check.
+    ClassStatus status = GetStatus<kVerifyFlags, /*kWithSynchronizationBarrier=*/ false>();
+    return status == ClassStatus::kVisiblyInitialized;
+  }
+
+  // Returns true if this class is ever accessed through a C++ mirror.
+  bool IsMirrored() REQUIRES_SHARED(Locks::mutator_lock_);
+
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   ALWAYS_INLINE uint32_t GetAccessFlags() REQUIRES_SHARED(Locks::mutator_lock_) {
     if (kIsDebugBuild) {
@@ -192,6 +227,10 @@
   }
   void SetClassFlags(uint32_t new_flags) REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Set access flags during linking, these cannot be rolled back by a Transaction.
+  void SetAccessFlagsDuringLinking(uint32_t new_access_flags) REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Set access flags, recording the change if running inside a Transaction.
   void SetAccessFlags(uint32_t new_access_flags) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Returns true if the class is an enum.
@@ -234,7 +273,7 @@
 
   ALWAYS_INLINE void SetFinalizable() REQUIRES_SHARED(Locks::mutator_lock_) {
     uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
-    SetAccessFlags(flags | kAccClassIsFinalizable);
+    SetAccessFlagsDuringLinking(flags | kAccClassIsFinalizable);
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -296,6 +335,17 @@
     }
   }
 
+  bool IsObsoleteObject() REQUIRES_SHARED(Locks::mutator_lock_) {
+    return (GetAccessFlags() & kAccObsoleteObject) != 0;
+  }
+
+  void SetObsoleteObject() REQUIRES_SHARED(Locks::mutator_lock_) {
+    uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
+    if ((flags & kAccObsoleteObject) == 0) {
+      SetAccessFlags(flags | kAccObsoleteObject);
+    }
+  }
+
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool IsTypeOfReferenceClass() REQUIRES_SHARED(Locks::mutator_lock_) {
     return (GetClassFlags<kVerifyFlags>() & kClassFlagReference) != 0;
@@ -368,7 +418,8 @@
     DCHECK_EQ(v32 & kPrimitiveTypeMask, v32) << "upper 16 bits aren't zero";
     // Store the component size shift in the upper 16 bits.
     v32 |= Primitive::ComponentSizeShift(new_type) << kPrimitiveTypeSizeShiftShift;
-    SetField32Transaction(OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_), v32);
+    SetField32</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
+        OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_), v32);
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -463,8 +514,21 @@
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool IsPrimitiveArray() REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Enum used to control whether we try to add a finalizer-reference for object alloc or not.
+  enum class AddFinalizer {
+    // Don't create a finalizer reference regardless of what the class-flags say.
+    kNoAddFinalizer,
+    // Use the class-flags to figure out if we should make a finalizer reference.
+    kUseClassTag,
+  };
+
   // Creates a raw object instance but does not invoke the default constructor.
-  template<bool kIsInstrumented, bool kCheckAddFinalizer = true>
+  // kCheckAddFinalizer controls whether we use a DCHECK to sanity check that we create a
+  // finalizer-reference if needed. This should only be disabled when doing structural class
+  // redefinition.
+  template <bool kIsInstrumented = true,
+            AddFinalizer kAddFinalizer = AddFinalizer::kUseClassTag,
+            bool kCheckAddFinalizer = true>
   ALWAYS_INLINE ObjPtr<Object> Alloc(Thread* self, gc::AllocatorType allocator_type)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
@@ -586,6 +650,11 @@
   // to themselves. Classes for primitive types may not assign to each other.
   ALWAYS_INLINE bool IsAssignableFrom(ObjPtr<Class> src) REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Checks if 'klass' is a redefined version of this.
+  bool IsObsoleteVersionOf(ObjPtr<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
+
+  ObjPtr<Class> GetObsoleteClass() REQUIRES_SHARED(Locks::mutator_lock_);
+
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   ALWAYS_INLINE ObjPtr<Class> GetSuperClass() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -606,7 +675,6 @@
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   ObjPtr<ClassLoader> GetClassLoader() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
 
-  template <bool kCheckTransaction = true>
   void SetClassLoader(ObjPtr<ClassLoader> new_cl) REQUIRES_SHARED(Locks::mutator_lock_);
 
   static constexpr MemberOffset DexCacheOffset() {
@@ -763,6 +831,11 @@
                 static_cast<size_t>(pointer_size)));
   }
 
+  static constexpr MemberOffset EmbeddedVTableOffset(PointerSize pointer_size) {
+    return MemberOffset(
+        ImtPtrOffset(pointer_size).Uint32Value() + static_cast<size_t>(pointer_size));
+  }
+
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   bool ShouldHaveImt() REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -1064,7 +1137,7 @@
   // Returns the ExtData for this class, allocating one if necessary. This should be the only way
   // to force ext_data_ to be set. No functions are available for changing an already set ext_data_
   // since doing so is not allowed.
-  ObjPtr<ClassExt> EnsureExtDataPresent(Thread* self)
+  static ObjPtr<ClassExt> EnsureExtDataPresent(Handle<Class> h_this, Thread* self)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
   uint16_t GetDexClassDefIndex() REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1072,7 +1145,8 @@
   }
 
   void SetDexClassDefIndex(uint16_t class_def_idx) REQUIRES_SHARED(Locks::mutator_lock_) {
-    SetField32Transaction(OFFSET_OF_OBJECT_MEMBER(Class, dex_class_def_idx_), class_def_idx);
+    SetField32</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
+        OFFSET_OF_OBJECT_MEMBER(Class, dex_class_def_idx_), class_def_idx);
   }
 
   dex::TypeIndex GetDexTypeIndex() REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -1081,7 +1155,8 @@
   }
 
   void SetDexTypeIndex(dex::TypeIndex type_idx) REQUIRES_SHARED(Locks::mutator_lock_) {
-    SetField32Transaction(OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_), type_idx.index_);
+    SetField32</*kTransactionActive=*/ false, /*kCheckTransaction=*/ false>(
+        OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_), type_idx.index_);
   }
 
   dex::TypeIndex FindTypeIndexInOtherDexFile(const DexFile& dex_file)
@@ -1093,10 +1168,29 @@
   void VisitNativeRoots(Visitor& visitor, PointerSize pointer_size)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Visit ArtMethods directly owned by this class.
+  template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, class Visitor>
+  void VisitMethods(Visitor visitor, PointerSize pointer_size)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Visit ArtFields directly owned by this class.
+  template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, class Visitor>
+  void VisitFields(Visitor visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+
   // Get one of the primitive classes.
   static ObjPtr<mirror::Class> GetPrimitiveClass(ObjPtr<mirror::String> name)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Clear the kAccMustCountLocks flag on each method, for class redefinition.
+  void ClearMustCountLocksFlagOnAllMethods(PointerSize pointer_size)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  // Clear the kAccCompileDontBother flag on each method, for class redefinition.
+  void ClearDontCompileFlagOnAllMethods(PointerSize pointer_size)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Clear the kAccSkipAccessChecks flag on each method, for class redefinition.
+  void ClearSkipAccessChecksFlagOnAllMethods(PointerSize pointer_size)
+      REQUIRES_SHARED(Locks::mutator_lock_);
   // When class is verified, set the kAccSkipAccessChecks flag on each method.
   void SetSkipAccessChecksFlagOnAllMethods(PointerSize pointer_size)
       REQUIRES_SHARED(Locks::mutator_lock_);
@@ -1137,7 +1231,11 @@
   void AssertInitializedOrInitializingInThread(Thread* self)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ObjPtr<Class> CopyOf(Thread* self, int32_t new_length, ImTable* imt, PointerSize pointer_size)
+  static ObjPtr<Class> CopyOf(Handle<Class> h_this,
+                              Thread* self,
+                              int32_t new_length,
+                              ImTable* imt,
+                              PointerSize pointer_size)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
   // For proxy class only.
@@ -1222,6 +1320,26 @@
   void FixupNativePointers(Class* dest, PointerSize pointer_size, const Visitor& visitor)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Get or create the various jni id arrays in a lock-less thread safe manner.
+  static bool EnsureMethodIds(Handle<Class> h_this)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  ObjPtr<Object> GetMethodIds() REQUIRES_SHARED(Locks::mutator_lock_);
+  static bool EnsureStaticFieldIds(Handle<Class> h_this)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  ObjPtr<Object> GetStaticFieldIds() REQUIRES_SHARED(Locks::mutator_lock_);
+  static bool EnsureInstanceFieldIds(Handle<Class> h_this)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  ObjPtr<Object> GetInstanceFieldIds() REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Calculate the index in the ifields_, methods_ or sfields_ arrays a method is located at. This
+  // is to be used with the above Get{,OrCreate}...Ids functions.
+  size_t GetStaticFieldIdOffset(ArtField* field)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  size_t GetInstanceFieldIdOffset(ArtField* field)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  size_t GetMethodIdOffset(ArtMethod* method, PointerSize pointer_size)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
  private:
   template <typename T, VerifyObjectFlags kVerifyFlags, typename Visitor>
   void FixupNativePointer(
@@ -1283,7 +1401,6 @@
   // Check that the pointer size matches the one in the class linker.
   ALWAYS_INLINE static void CheckPointerSize(PointerSize pointer_size);
 
-  static MemberOffset EmbeddedVTableOffset(PointerSize pointer_size);
   template <bool kVisitNativeRoots,
             VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
             ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
@@ -1291,6 +1408,10 @@
   void VisitReferences(ObjPtr<Class> klass, const Visitor& visitor)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Helper to set the status without any validity cheks.
+  void SetStatusInternal(ClassStatus new_status)
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+
   // 'Class' Object Fields
   // Order governed by java field ordering. See art::ClassLinker::LinkFields.
 
@@ -1445,6 +1566,7 @@
   ART_FRIEND_TEST(DexCacheTest, TestResolvedFieldAccess);  // For ResolvedFieldAccessTest
   friend struct art::ClassOffsets;  // for verifying offset information
   friend class Object;  // For VisitReferences
+  friend class linker::ImageWriter;  // For SetStatusInternal
   DISALLOW_IMPLICIT_CONSTRUCTORS(Class);
 };
 
diff --git a/runtime/mirror/class_ext-inl.h b/runtime/mirror/class_ext-inl.h
index bf51654..99f7f49 100644
--- a/runtime/mirror/class_ext-inl.h
+++ b/runtime/mirror/class_ext-inl.h
@@ -19,12 +19,133 @@
 
 #include "class_ext.h"
 
+#include "array-inl.h"
 #include "art_method-inl.h"
+#include "base/enums.h"
+#include "base/globals.h"
+#include "class_root.h"
+#include "handle_scope.h"
+#include "jni/jni_internal.h"
+#include "jni_id_type.h"
+#include "mirror/array.h"
+#include "mirror/object.h"
 #include "object-inl.h"
+#include "verify_object.h"
+#include "well_known_classes.h"
 
 namespace art {
 namespace mirror {
 
+template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+inline bool ClassExt::EnsureJniIdsArrayPresent(MemberOffset off, size_t count) {
+  ObjPtr<Object> existing(
+      GetFieldObject<Object, kVerifyFlags, kReadBarrierOption>(off));
+  if (!existing.IsNull()) {
+    return true;
+  }
+  Thread* self = Thread::Current();
+  StackHandleScope<2> hs(self);
+  Handle<ClassExt> h_this(hs.NewHandle(this));
+  MutableHandle<Object> new_arr(hs.NewHandle<Object>(nullptr));
+  if (UNLIKELY(Runtime::Current()->GetJniIdType() == JniIdType::kSwapablePointer)) {
+    new_arr.Assign(Runtime::Current()->GetJniIdManager()->GetPointerMarker());
+  } else {
+    new_arr.Assign(Runtime::Current()->GetClassLinker()->AllocPointerArray(self, count));
+  }
+  if (new_arr.IsNull()) {
+    // Fail.
+    self->AssertPendingOOMException();
+    return false;
+  }
+  bool set;
+  // Set the ext_data_ field using CAS semantics.
+  if (Runtime::Current()->IsActiveTransaction()) {
+    set = h_this->CasFieldObject<true>(
+        off, nullptr, new_arr.Get(), CASMode::kStrong, std::memory_order_seq_cst);
+  } else {
+    set = h_this->CasFieldObject<false>(
+        off, nullptr, new_arr.Get(), CASMode::kStrong, std::memory_order_seq_cst);
+  }
+  if (kIsDebugBuild) {
+    ObjPtr<Object> ret(
+        set ? new_arr.Get()
+            : h_this->GetFieldObject<PointerArray, kVerifyFlags, kReadBarrierOption>(off));
+    CHECK(!ret.IsNull());
+  }
+  return true;
+}
+
+template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+inline bool ClassExt::EnsureJMethodIDsArrayPresent(size_t count) {
+  return EnsureJniIdsArrayPresent<kVerifyFlags, kReadBarrierOption>(
+      MemberOffset(OFFSET_OF_OBJECT_MEMBER(ClassExt, jmethod_ids_)), count);
+}
+template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+inline bool ClassExt::EnsureStaticJFieldIDsArrayPresent(size_t count) {
+  return EnsureJniIdsArrayPresent<kVerifyFlags, kReadBarrierOption>(
+      MemberOffset(OFFSET_OF_OBJECT_MEMBER(ClassExt, static_jfield_ids_)), count);
+}
+template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+inline bool ClassExt::EnsureInstanceJFieldIDsArrayPresent(size_t count) {
+  return EnsureJniIdsArrayPresent<kVerifyFlags, kReadBarrierOption>(
+      MemberOffset(OFFSET_OF_OBJECT_MEMBER(ClassExt, instance_jfield_ids_)), count);
+}
+
+template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+inline ObjPtr<Object> ClassExt::GetInstanceJFieldIDs() {
+  return GetFieldObject<Object, kVerifyFlags, kReadBarrierOption>(
+      OFFSET_OF_OBJECT_MEMBER(ClassExt, instance_jfield_ids_));
+}
+template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+inline bool ClassExt::HasInstanceFieldPointerIdMarker() {
+  ObjPtr<Object> arr(GetInstanceJFieldIDs<kVerifyFlags, kReadBarrierOption>());
+  return !arr.IsNull() && !arr->IsArrayInstance();
+}
+template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+inline ObjPtr<PointerArray> ClassExt::GetInstanceJFieldIDsPointerArray() {
+  DCHECK(!HasInstanceFieldPointerIdMarker());
+  return down_cast<PointerArray*>(GetInstanceJFieldIDs<kVerifyFlags, kReadBarrierOption>().Ptr());
+}
+
+template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+inline ObjPtr<Object> ClassExt::GetStaticJFieldIDs() {
+  return GetFieldObject<Object, kVerifyFlags, kReadBarrierOption>(
+      OFFSET_OF_OBJECT_MEMBER(ClassExt, static_jfield_ids_));
+}
+template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+inline ObjPtr<PointerArray> ClassExt::GetStaticJFieldIDsPointerArray() {
+  DCHECK(!HasStaticFieldPointerIdMarker());
+  return down_cast<PointerArray*>(GetStaticJFieldIDs<kVerifyFlags, kReadBarrierOption>().Ptr());
+}
+template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+inline bool ClassExt::HasStaticFieldPointerIdMarker() {
+  ObjPtr<Object> arr(GetStaticJFieldIDs<kVerifyFlags, kReadBarrierOption>());
+  return !arr.IsNull() && !arr->IsArrayInstance();
+}
+
+template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+inline ObjPtr<Class> ClassExt::GetObsoleteClass() {
+  return GetFieldObject<Class, kVerifyFlags, kReadBarrierOption>(
+      OFFSET_OF_OBJECT_MEMBER(ClassExt, obsolete_class_));
+}
+
+template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+inline ObjPtr<Object> ClassExt::GetJMethodIDs() {
+  return GetFieldObject<Object, kVerifyFlags, kReadBarrierOption>(
+      OFFSET_OF_OBJECT_MEMBER(ClassExt, jmethod_ids_));
+}
+template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+inline ObjPtr<PointerArray> ClassExt::GetJMethodIDsPointerArray() {
+  DCHECK(!HasMethodPointerIdMarker());
+  return down_cast<PointerArray*>(GetJMethodIDs<kVerifyFlags, kReadBarrierOption>().Ptr());
+}
+template <VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+inline bool ClassExt::HasMethodPointerIdMarker() {
+  ObjPtr<Object> arr(GetJMethodIDs<kVerifyFlags, kReadBarrierOption>());
+  return !arr.IsNull() && !arr->IsArrayInstance();
+}
+
+
 inline ObjPtr<Object> ClassExt::GetVerifyError() {
   return GetFieldObject<ClassExt>(OFFSET_OF_OBJECT_MEMBER(ClassExt, verify_error_));
 }
@@ -47,15 +168,61 @@
 
 template<ReadBarrierOption kReadBarrierOption, class Visitor>
 void ClassExt::VisitNativeRoots(Visitor& visitor, PointerSize pointer_size) {
+  VisitMethods<kReadBarrierOption>([&](ArtMethod* method) {
+    method->VisitRoots<kReadBarrierOption>(visitor, pointer_size);
+  }, pointer_size);
+}
+
+template<ReadBarrierOption kReadBarrierOption, class Visitor>
+void ClassExt::VisitMethods(Visitor visitor, PointerSize pointer_size) {
   ObjPtr<PointerArray> arr(GetObsoleteMethods<kDefaultVerifyFlags, kReadBarrierOption>());
-  if (arr.IsNull()) {
-    return;
+  if (!arr.IsNull()) {
+    int32_t len = arr->GetLength();
+    for (int32_t i = 0; i < len; i++) {
+      ArtMethod* method = arr->GetElementPtrSize<ArtMethod*>(i, pointer_size);
+      if (method != nullptr) {
+        visitor(method);
+      }
+    }
   }
-  int32_t len = arr->GetLength();
-  for (int32_t i = 0; i < len; i++) {
-    ArtMethod* method = arr->GetElementPtrSize<ArtMethod*, kDefaultVerifyFlags>(i, pointer_size);
-    if (method != nullptr) {
-      method->VisitRoots<kReadBarrierOption>(visitor, pointer_size);
+}
+
+template<ReadBarrierOption kReadBarrierOption, class Visitor>
+void ClassExt::VisitJMethodIDs(Visitor v) {
+  ObjPtr<Object> arr(GetJMethodIDs<kDefaultVerifyFlags, kReadBarrierOption>());
+  if (!arr.IsNull() && arr->IsArrayInstance()) {
+    ObjPtr<PointerArray> marr(down_cast<PointerArray*>(arr.Ptr()));
+    int32_t len = marr->GetLength();
+    for (int32_t i = 0; i < len; i++) {
+      jmethodID id = marr->GetElementPtrSize<jmethodID>(i, kRuntimePointerSize);
+      if (id != nullptr) {
+        v(id, i);
+      }
+    }
+  }
+}
+template<ReadBarrierOption kReadBarrierOption, class Visitor>
+void ClassExt::VisitJFieldIDs(Visitor v) {
+  ObjPtr<Object> sarr_obj(GetStaticJFieldIDs<kDefaultVerifyFlags, kReadBarrierOption>());
+  if (!sarr_obj.IsNull() && sarr_obj->IsArrayInstance()) {
+    ObjPtr<PointerArray> sarr(down_cast<PointerArray*>(sarr_obj->AsArray().Ptr()));
+    int32_t len = sarr->GetLength();
+    for (int32_t i = 0; i < len; i++) {
+      jfieldID id = sarr->GetElementPtrSize<jfieldID>(i, kRuntimePointerSize);
+      if (id != nullptr) {
+        v(id, i, true);
+      }
+    }
+  }
+  ObjPtr<PointerArray> iarr_obj(GetInstanceJFieldIDs<kDefaultVerifyFlags, kReadBarrierOption>());
+  if (!iarr_obj.IsNull() && iarr_obj->IsArrayInstance()) {
+    ObjPtr<PointerArray> iarr(down_cast<PointerArray*>(iarr_obj->AsArray().Ptr()));
+    int32_t len = iarr->GetLength();
+    for (int32_t i = 0; i < len; i++) {
+      jfieldID id = iarr->GetElementPtrSize<jfieldID>(i, kRuntimePointerSize);
+      if (id != nullptr) {
+        v(id, i, false);
+      }
     }
   }
 }
diff --git a/runtime/mirror/class_ext.cc b/runtime/mirror/class_ext.cc
index d12f340..ba1ae5f 100644
--- a/runtime/mirror/class_ext.cc
+++ b/runtime/mirror/class_ext.cc
@@ -25,6 +25,8 @@
 #include "class_root.h"
 #include "dex/dex_file-inl.h"
 #include "gc/accounting/card_table-inl.h"
+#include "mirror/object.h"
+#include "mirror/object_array.h"
 #include "object-inl.h"
 #include "object_array-alloc-inl.h"
 #include "object_array-inl.h"
@@ -49,13 +51,19 @@
   SetFieldObject<false>(obsolete_methods_off, methods);
 }
 
+void ClassExt::SetIdsArraysForClassExtExtData(ObjPtr<Object> marker) {
+  CHECK(!marker.IsNull());
+  SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ClassExt, instance_jfield_ids_), marker);
+  SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ClassExt, static_jfield_ids_), marker);
+  SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ClassExt, jmethod_ids_), marker);
+}
+
 // We really need to be careful how we update this. If we ever in the future make it so that
 // these arrays are written into without all threads being suspended we have a race condition! This
 // race could cause obsolete methods to be missed.
-bool ClassExt::ExtendObsoleteArrays(Thread* self, uint32_t increase) {
+bool ClassExt::ExtendObsoleteArrays(Handle<ClassExt> h_this, Thread* self, uint32_t increase) {
   // TODO It would be good to check that we have locked the class associated with this ClassExt.
-  StackHandleScope<5> hs(self);
-  Handle<ClassExt> h_this(hs.NewHandle(this));
+  StackHandleScope<4> hs(self);
   Handle<PointerArray> old_methods(hs.NewHandle(h_this->GetObsoleteMethods()));
   Handle<ObjectArray<DexCache>> old_dex_caches(hs.NewHandle(h_this->GetObsoleteDexCaches()));
   ClassLinker* cl = Runtime::Current()->GetClassLinker();
@@ -102,6 +110,10 @@
   return true;
 }
 
+void ClassExt::SetObsoleteClass(ObjPtr<Class> klass) {
+  SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ClassExt, obsolete_class_), klass);
+}
+
 ObjPtr<ClassExt> ClassExt::Alloc(Thread* self) {
   return ObjPtr<ClassExt>::DownCast(GetClassRoot<ClassExt>()->AllocObject(self));
 }
diff --git a/runtime/mirror/class_ext.h b/runtime/mirror/class_ext.h
index 8fbbf5c..fa4e87a 100644
--- a/runtime/mirror/class_ext.h
+++ b/runtime/mirror/class_ext.h
@@ -48,10 +48,59 @@
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
            ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  bool EnsureInstanceJFieldIDsArrayPresent(size_t count)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  ObjPtr<PointerArray> GetInstanceJFieldIDsPointerArray() REQUIRES_SHARED(Locks::mutator_lock_);
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  ObjPtr<Object> GetInstanceJFieldIDs() REQUIRES_SHARED(Locks::mutator_lock_);
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  bool HasInstanceFieldPointerIdMarker() REQUIRES_SHARED(Locks::mutator_lock_);
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  bool EnsureStaticJFieldIDsArrayPresent(size_t count)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  ObjPtr<PointerArray> GetStaticJFieldIDsPointerArray() REQUIRES_SHARED(Locks::mutator_lock_);
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  ObjPtr<Object> GetStaticJFieldIDs() REQUIRES_SHARED(Locks::mutator_lock_);
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  bool HasStaticFieldPointerIdMarker() REQUIRES_SHARED(Locks::mutator_lock_);
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  bool EnsureJMethodIDsArrayPresent(size_t count)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  ObjPtr<Object> GetJMethodIDs() REQUIRES_SHARED(Locks::mutator_lock_);
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  ObjPtr<PointerArray> GetJMethodIDsPointerArray() REQUIRES_SHARED(Locks::mutator_lock_);
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  bool HasMethodPointerIdMarker() REQUIRES_SHARED(Locks::mutator_lock_);
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
   ObjPtr<PointerArray> GetObsoleteMethods() REQUIRES_SHARED(Locks::mutator_lock_);
 
   ObjPtr<Object> GetOriginalDexFile() REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Used to manually initialize the ext-ids arrays for the ClassExt associated
+  // with the Class<ClassExt>. This simplifies the id allocation path.
+  void SetIdsArraysForClassExtExtData(ObjPtr<Object> marker) REQUIRES_SHARED(Locks::mutator_lock_);
+
   void SetOriginalDexFile(ObjPtr<Object> bytes) REQUIRES_SHARED(Locks::mutator_lock_);
 
   uint16_t GetPreRedefineClassDefIndex() REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -72,23 +121,61 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Extend the obsolete arrays by the given amount.
-  bool ExtendObsoleteArrays(Thread* self, uint32_t increase)
+  static bool ExtendObsoleteArrays(Handle<ClassExt> h_this, Thread* self, uint32_t increase)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, class Visitor>
   inline void VisitNativeRoots(Visitor& visitor, PointerSize pointer_size)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, class Visitor>
+  inline void VisitMethods(Visitor visitor, PointerSize pointer_size)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
   static ObjPtr<ClassExt> Alloc(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // TODO Save the obsolete class, if we have one.
+  // TODO We need this so jit-cleanup can work. the obsolete class might get cleaned up early
+  // otherwise. We should remove the need for this.
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  ObjPtr<Class> GetObsoleteClass() REQUIRES_SHARED(Locks::mutator_lock_);
+  void SetObsoleteClass(ObjPtr<Class> classes) REQUIRES_SHARED(Locks::mutator_lock_);
+
+  template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
+  inline void VisitJFieldIDs(Visitor v) REQUIRES_SHARED(Locks::mutator_lock_);
+
+  template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
+  inline void VisitJMethodIDs(Visitor v) REQUIRES_SHARED(Locks::mutator_lock_);
+
  private:
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+           ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+  bool EnsureJniIdsArrayPresent(MemberOffset off, size_t count)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
   // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
+  // An array containing the jfieldIDs assigned to each field in the corresponding position in the
+  // classes ifields_ array or '0' if no id has been assigned to that field yet.
+  HeapReference<PointerArray> instance_jfield_ids_;
+
+  // An array containing the jmethodIDs assigned to each method in the corresponding position in
+  // the classes methods_ array or '0' if no id has been assigned to that method yet.
+  HeapReference<PointerArray> jmethod_ids_;
+
+  // If set this is the Class object that was being used before a structural redefinition occurred.
+  HeapReference<Class> obsolete_class_;
+
   HeapReference<ObjectArray<DexCache>> obsolete_dex_caches_;
 
   HeapReference<PointerArray> obsolete_methods_;
 
   HeapReference<Object> original_dex_file_;
 
+  // An array containing the jfieldIDs assigned to each field in the corresponding position in the
+  // classes sfields_ array or '0' if no id has been assigned to that field yet.
+  HeapReference<PointerArray> static_jfield_ids_;
+
   // The saved verification error of this class.
   HeapReference<Object> verify_error_;
 
diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h
index 80b5a34..d5e1362 100644
--- a/runtime/mirror/dex_cache-inl.h
+++ b/runtime/mirror/dex_cache-inl.h
@@ -164,6 +164,7 @@
 
 inline void DexCache::SetResolvedType(dex::TypeIndex type_idx, ObjPtr<Class> resolved) {
   DCHECK(resolved != nullptr);
+  DCHECK(resolved->IsResolved()) << resolved->GetStatus();
   // TODO default transaction support.
   // Use a release store for SetResolvedType. This is done to prevent other threads from seeing a
   // class but not necessarily seeing the loaded members like the static fields array.
@@ -382,9 +383,11 @@
     }
 
     GcRoot<mirror::String>* const preresolved_strings = GetPreResolvedStrings();
-    const size_t num_preresolved_strings = NumPreResolvedStrings();
-    for (size_t i = 0; i != num_preresolved_strings; ++i) {
-      visitor.VisitRootIfNonNull(preresolved_strings[i].AddressWithoutBarrier());
+    if (preresolved_strings != nullptr) {
+      const size_t num_preresolved_strings = NumPreResolvedStrings();
+      for (size_t i = 0; i != num_preresolved_strings; ++i) {
+        visitor.VisitRootIfNonNull(preresolved_strings[i].AddressWithoutBarrier());
+      }
     }
   }
 }
diff --git a/runtime/mirror/dex_cache.cc b/runtime/mirror/dex_cache.cc
index f97f521..40997f6 100644
--- a/runtime/mirror/dex_cache.cc
+++ b/runtime/mirror/dex_cache.cc
@@ -25,11 +25,13 @@
 #include "object-inl.h"
 #include "object.h"
 #include "object_array-inl.h"
+#include "reflective_value_visitor.h"
 #include "runtime.h"
 #include "runtime_globals.h"
 #include "string.h"
 #include "thread.h"
 #include "utils/dex_cache_arrays_layout-inl.h"
+#include "write_barrier.h"
 
 namespace art {
 namespace mirror {
@@ -172,23 +174,66 @@
                   dex_file->NumCallSiteIds());
 }
 
+void DexCache::VisitReflectiveTargets(ReflectiveValueVisitor* visitor) {
+  bool wrote = false;
+  for (size_t i = 0; i < NumResolvedFields(); i++) {
+    auto pair(GetNativePairPtrSize(GetResolvedFields(), i, kRuntimePointerSize));
+    if (pair.index == FieldDexCachePair::InvalidIndexForSlot(i)) {
+      continue;
+    }
+    ArtField* new_val = visitor->VisitField(
+        pair.object, DexCacheSourceInfo(kSourceDexCacheResolvedField, pair.index, this));
+    if (UNLIKELY(new_val != pair.object)) {
+      if (new_val == nullptr) {
+        pair = FieldDexCachePair(nullptr, FieldDexCachePair::InvalidIndexForSlot(i));
+      } else {
+        pair.object = new_val;
+      }
+      SetNativePairPtrSize(GetResolvedFields(), i, pair, kRuntimePointerSize);
+      wrote = true;
+    }
+  }
+  for (size_t i = 0; i < NumResolvedMethods(); i++) {
+    auto pair(GetNativePairPtrSize(GetResolvedMethods(), i, kRuntimePointerSize));
+    if (pair.index == MethodDexCachePair::InvalidIndexForSlot(i)) {
+      continue;
+    }
+    ArtMethod* new_val = visitor->VisitMethod(
+        pair.object, DexCacheSourceInfo(kSourceDexCacheResolvedMethod, pair.index, this));
+    if (UNLIKELY(new_val != pair.object)) {
+      if (new_val == nullptr) {
+        pair = MethodDexCachePair(nullptr, MethodDexCachePair::InvalidIndexForSlot(i));
+      } else {
+        pair.object = new_val;
+      }
+      SetNativePairPtrSize(GetResolvedMethods(), i, pair, kRuntimePointerSize);
+      wrote = true;
+    }
+  }
+  if (wrote) {
+    WriteBarrier::ForEveryFieldWrite(this);
+  }
+}
+
 bool DexCache::AddPreResolvedStringsArray() {
   DCHECK_EQ(NumPreResolvedStrings(), 0u);
   Thread* const self = Thread::Current();
   LinearAlloc* linear_alloc = Runtime::Current()->GetLinearAlloc();
   const size_t num_strings = GetDexFile()->NumStringIds();
-  GcRoot<mirror::String>* strings =
-      linear_alloc->AllocArray<GcRoot<mirror::String>>(self, num_strings);
-  if (strings == nullptr) {
-    // Failed to allocate pre-resolved string array (probably due to address fragmentation), bail.
-    return false;
-  }
-  SetField32<false>(NumPreResolvedStringsOffset(), num_strings);
+  if (num_strings != 0) {
+    GcRoot<mirror::String>* strings =
+        linear_alloc->AllocArray<GcRoot<mirror::String>>(self, num_strings);
+    if (strings == nullptr) {
+      // Failed to allocate pre-resolved string array (probably due to address fragmentation), bail.
+      return false;
+    }
+    SetField32<false>(NumPreResolvedStringsOffset(), num_strings);
 
-  CHECK(strings != nullptr);
-  SetPreResolvedStrings(strings);
-  for (size_t i = 0; i < GetDexFile()->NumStringIds(); ++i) {
-    CHECK(GetPreResolvedStrings()[i].Read() == nullptr);
+    CHECK(strings != nullptr);
+    SetPreResolvedStrings(strings);
+    for (size_t i = 0; i < GetDexFile()->NumStringIds(); ++i) {
+      CHECK(GetPreResolvedStrings()[i].Read() == nullptr);
+    }
   }
   return true;
 }
@@ -236,7 +281,11 @@
   SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_), location);
 }
 
-#if !defined(__aarch64__) && !defined(__x86_64__) && !defined(__mips__)
+void DexCache::SetClassLoader(ObjPtr<ClassLoader> class_loader) {
+  SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, class_loader_), class_loader);
+}
+
+#if !defined(__aarch64__) && !defined(__x86_64__)
 static pthread_mutex_t dex_cache_slow_atomic_mutex = PTHREAD_MUTEX_INITIALIZER;
 
 DexCache::ConversionPair64 DexCache::AtomicLoadRelaxed16B(std::atomic<ConversionPair64>* target) {
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index b41443e..6067c76 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -37,12 +37,14 @@
 class DexFile;
 union JValue;
 class LinearAlloc;
+class ReflectiveValueVisitor;
 class Thread;
 
 namespace mirror {
 
 class CallSite;
 class Class;
+class ClassLoader;
 class MethodType;
 class String;
 
@@ -476,6 +478,10 @@
   // Returns true if we succeeded in adding the pre-resolved string array.
   bool AddPreResolvedStringsArray() REQUIRES_SHARED(Locks::mutator_lock_);
 
+  void VisitReflectiveTargets(ReflectiveValueVisitor* visitor) REQUIRES(Locks::mutator_lock_);
+
+  void SetClassLoader(ObjPtr<ClassLoader> class_loader) REQUIRES_SHARED(Locks::mutator_lock_);
+
  private:
   void Init(const DexFile* dex_file,
             ObjPtr<String> location,
@@ -515,8 +521,8 @@
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
 
   // Due to lack of 16-byte atomics support, we use hand-crafted routines.
-#if defined(__aarch64__) || defined(__mips__)
-  // 16-byte atomics are supported on aarch64, mips and mips64.
+#if defined(__aarch64__)
+  // 16-byte atomics are supported on aarch64.
   ALWAYS_INLINE static ConversionPair64 AtomicLoadRelaxed16B(
       std::atomic<ConversionPair64>* target) {
     return target->load(std::memory_order_relaxed);
@@ -556,10 +562,8 @@
   static void AtomicStoreRelease16B(std::atomic<ConversionPair64>* target, ConversionPair64 value);
 #endif
 
+  HeapReference<ClassLoader> class_loader_;
   HeapReference<String> location_;
-  // Number of elements in the preresolved_strings_ array. Note that this appears here because of
-  // our packing logic for 32 bit fields.
-  uint32_t num_preresolved_strings_;
 
   uint64_t dex_file_;                // const DexFile*
   uint64_t preresolved_strings_;     // GcRoot<mirror::String*> array with num_preresolved_strings
@@ -575,6 +579,7 @@
   uint64_t strings_;                 // std::atomic<StringDexCachePair>*, array with num_strings_
                                      // elements.
 
+  uint32_t num_preresolved_strings_;    // Number of elements in the preresolved_strings_ array.
   uint32_t num_resolved_call_sites_;    // Number of elements in the call_sites_ array.
   uint32_t num_resolved_fields_;        // Number of elements in the resolved_fields_ array.
   uint32_t num_resolved_method_types_;  // Number of elements in the resolved_method_types_ array.
diff --git a/runtime/mirror/executable-inl.h b/runtime/mirror/executable-inl.h
index 6d4b46a..77669da 100644
--- a/runtime/mirror/executable-inl.h
+++ b/runtime/mirror/executable-inl.h
@@ -20,6 +20,8 @@
 #include "executable.h"
 
 #include "object-inl.h"
+#include "reflective_value_visitor.h"
+#include "verify_object.h"
 
 namespace art {
 namespace mirror {
@@ -36,6 +38,19 @@
   return GetFieldObject<mirror::Class>(DeclaringClassOffset());
 }
 
+template<VerifyObjectFlags kVerifiyFlags>
+inline void Executable::VisitTarget(ReflectiveValueVisitor* v) {
+  HeapReflectiveSourceInfo hrsi(kSourceJavaLangReflectExecutable, this);
+  ArtMethod* orig = GetArtMethod<kVerifiyFlags>();
+  ArtMethod* new_target = v->VisitMethod(orig, hrsi);
+  if (orig != new_target) {
+    SetArtMethod(new_target);
+    SetDexMethodIndex(new_target->GetDexMethodIndex());
+    SetDeclaringClass(new_target->GetDeclaringClass());
+    WriteBarrier::ForEveryFieldWrite(this);
+  }
+}
+
 }  // namespace mirror
 }  // namespace art
 
diff --git a/runtime/mirror/executable.h b/runtime/mirror/executable.h
index a99c3ec..750a167 100644
--- a/runtime/mirror/executable.h
+++ b/runtime/mirror/executable.h
@@ -25,6 +25,7 @@
 
 struct ExecutableOffsets;
 class ArtMethod;
+class ReflectiveValueVisitor;
 
 namespace mirror {
 
@@ -41,6 +42,9 @@
     return reinterpret_cast64<ArtMethod*>(GetField64<kVerifyFlags>(ArtMethodOffset()));
   }
 
+  template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  inline void VisitTarget(ReflectiveValueVisitor* v) REQUIRES(Locks::mutator_lock_);
+
   template <bool kTransactionActive = false,
             bool kCheckTransaction = true,
             VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -61,6 +65,21 @@
   uint32_t access_flags_;
   uint32_t dex_method_index_;
 
+  template<bool kTransactionActive = false>
+  void SetDeclaringClass(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_) {
+    SetFieldObject<kTransactionActive>(DeclaringClassOffset(), klass);
+  }
+
+  template<bool kTransactionActive = false>
+  void SetAccessFlags(uint32_t flags) REQUIRES_SHARED(Locks::mutator_lock_) {
+    SetField32<kTransactionActive>(AccessFlagsOffset(), flags);
+  }
+
+  template<bool kTransactionActive = false>
+  void SetDexMethodIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
+    SetField32<kTransactionActive>(DexMethodIndexOffset(), idx);
+  }
+
   static MemberOffset DeclaringClassOffset() {
     return MemberOffset(OFFSETOF_MEMBER(Executable, declaring_class_));
   }
diff --git a/runtime/mirror/field-inl.h b/runtime/mirror/field-inl.h
index ac11be1..8a9cec4 100644
--- a/runtime/mirror/field-inl.h
+++ b/runtime/mirror/field-inl.h
@@ -89,7 +89,12 @@
   ret->SetType<kTransactionActive>(type.Get());
   ret->SetDeclaringClass<kTransactionActive>(field->GetDeclaringClass());
   ret->SetAccessFlags<kTransactionActive>(field->GetAccessFlags());
-  ret->SetDexFieldIndex<kTransactionActive>(dex_field_index);
+  auto iter_range = field->IsStatic() ? field->GetDeclaringClass()->GetSFields()
+                                      : field->GetDeclaringClass()->GetIFields();
+  auto position = std::find_if(
+      iter_range.begin(), iter_range.end(), [&](const auto& f) { return &f == field; });
+  DCHECK(position != iter_range.end());
+  ret->SetArtFieldIndex<kTransactionActive>(std::distance(iter_range.begin(), position));
   ret->SetOffset<kTransactionActive>(field->GetOffset().Int32Value());
   return ret.Get();
 }
diff --git a/runtime/mirror/field.cc b/runtime/mirror/field.cc
index f4d1e73..e9669b8 100644
--- a/runtime/mirror/field.cc
+++ b/runtime/mirror/field.cc
@@ -20,36 +20,38 @@
 #include "dex_cache-inl.h"
 #include "object-inl.h"
 #include "object_array-inl.h"
+#include "write_barrier.h"
 
 namespace art {
 namespace mirror {
 
+void Field::VisitTarget(ReflectiveValueVisitor* v) {
+  HeapReflectiveSourceInfo hrsi(kSourceJavaLangReflectField, this);
+  ArtField* orig = GetArtField();
+  ArtField* new_value = v->VisitField(orig, hrsi);
+  if (orig != new_value) {
+    SetOffset<false>(new_value->GetOffset().Int32Value());
+    SetDeclaringClass<false>(new_value->GetDeclaringClass());
+    auto new_range =
+        IsStatic() ? GetDeclaringClass()->GetSFields() : GetDeclaringClass()->GetIFields();
+    auto position = std::find_if(
+        new_range.begin(), new_range.end(), [&](const auto& f) { return &f == new_value; });
+    DCHECK(position != new_range.end());
+    SetArtFieldIndex<false>(std::distance(new_range.begin(), position));
+    WriteBarrier::ForEveryFieldWrite(this);
+  }
+  DCHECK_EQ(new_value, GetArtField());
+}
+
 ArtField* Field::GetArtField() {
   ObjPtr<mirror::Class> declaring_class = GetDeclaringClass();
-  if (UNLIKELY(declaring_class->IsProxyClass())) {
-    DCHECK(IsStatic());
-    DCHECK_EQ(declaring_class->NumStaticFields(), 2U);
-    // 0 == Class[] interfaces; 1 == Class[][] throws;
-    if (GetDexFieldIndex() == 0) {
-      return &declaring_class->GetSFieldsPtr()->At(0);
-    } else {
-      DCHECK_EQ(GetDexFieldIndex(), 1U);
-      return &declaring_class->GetSFieldsPtr()->At(1);
-    }
+  DCHECK_LT(GetArtFieldIndex(),
+            IsStatic() ? declaring_class->NumStaticFields() : declaring_class->NumInstanceFields());
+  if (IsStatic()) {
+    return declaring_class->GetStaticField(GetArtFieldIndex());
+  } else {
+    return declaring_class->GetInstanceField(GetArtFieldIndex());
   }
-  const ObjPtr<mirror::DexCache> dex_cache = declaring_class->GetDexCache();
-  ArtField* art_field = dex_cache->GetResolvedField(GetDexFieldIndex(), kRuntimePointerSize);
-  if (UNLIKELY(art_field == nullptr)) {
-    if (IsStatic()) {
-      art_field = declaring_class->FindDeclaredStaticField(dex_cache, GetDexFieldIndex());
-    } else {
-      art_field = declaring_class->FindInstanceField(dex_cache, GetDexFieldIndex());
-    }
-    CHECK(art_field != nullptr);
-    dex_cache->SetResolvedField(GetDexFieldIndex(), art_field, kRuntimePointerSize);
-  }
-  CHECK_EQ(declaring_class, art_field->GetDeclaringClass());
-  return art_field;
 }
 
 }  // namespace mirror
diff --git a/runtime/mirror/field.h b/runtime/mirror/field.h
index 6ba8dc6..dd5ee76 100644
--- a/runtime/mirror/field.h
+++ b/runtime/mirror/field.h
@@ -29,6 +29,7 @@
 
 class ArtField;
 struct FieldOffsets;
+class ReflectiveValueVisitor;
 
 namespace mirror {
 
@@ -38,9 +39,15 @@
 // C++ mirror of java.lang.reflect.Field.
 class MANAGED Field : public AccessibleObject {
  public:
-  ALWAYS_INLINE uint32_t GetDexFieldIndex() REQUIRES_SHARED(Locks::mutator_lock_) {
-    return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, dex_field_index_));
+  ALWAYS_INLINE uint32_t GetArtFieldIndex() REQUIRES_SHARED(Locks::mutator_lock_) {
+    return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, art_field_index_));
   }
+  // Public for use by class redefinition code.
+  template<bool kTransactionActive>
+  void SetArtFieldIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
+    SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, art_field_index_), idx);
+  }
+
 
   ObjPtr<mirror::Class> GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -68,7 +75,6 @@
     return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, offset_));
   }
 
-  // Slow, try to use only for PrettyField and such.
   ArtField* GetArtField() REQUIRES_SHARED(Locks::mutator_lock_);
 
   template <PointerSize kPointerSize, bool kTransactionActive = false>
@@ -77,6 +83,11 @@
                                                   bool force_resolve)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
+
+  // Used to modify the target of this Field object, if required for structural redefinition or some
+  // other purpose.
+  void VisitTarget(ReflectiveValueVisitor* v) REQUIRES(Locks::mutator_lock_);
+
  private:
   // Padding required for matching alignment with the Java peer.
   uint8_t padding_[2];
@@ -84,7 +95,7 @@
   HeapReference<mirror::Class> declaring_class_;
   HeapReference<mirror::Class> type_;
   int32_t access_flags_;
-  int32_t dex_field_index_;
+  int32_t art_field_index_;
   int32_t offset_;
 
   template<bool kTransactionActive>
@@ -99,11 +110,6 @@
   }
 
   template<bool kTransactionActive>
-  void SetDexFieldIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
-    SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, dex_field_index_), idx);
-  }
-
-  template<bool kTransactionActive>
   void SetOffset(uint32_t offset) REQUIRES_SHARED(Locks::mutator_lock_) {
     SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, offset_), offset);
   }
diff --git a/runtime/mirror/method_handle_impl-inl.h b/runtime/mirror/method_handle_impl-inl.h
index 12047d4..27ccc53 100644
--- a/runtime/mirror/method_handle_impl-inl.h
+++ b/runtime/mirror/method_handle_impl-inl.h
@@ -33,12 +33,6 @@
   return GetFieldObject<mirror::MethodType>(OFFSET_OF_OBJECT_MEMBER(MethodHandle, nominal_type_));
 }
 
-inline ObjPtr<mirror::Class> MethodHandle::GetTargetClass() {
-  Kind kind = GetHandleKind();
-  return (kind <= kLastValidKind) ?
-      GetTargetMethod()->GetDeclaringClass() : GetTargetField()->GetDeclaringClass();
-}
-
 }  // namespace mirror
 }  // namespace art
 
diff --git a/runtime/mirror/method_handle_impl.cc b/runtime/mirror/method_handle_impl.cc
index 433d4ba..dd25fc9 100644
--- a/runtime/mirror/method_handle_impl.cc
+++ b/runtime/mirror/method_handle_impl.cc
@@ -54,5 +54,20 @@
   return mh.Get();
 }
 
+void MethodHandle::VisitTarget(ReflectiveValueVisitor* v) {
+  void* target = GetTargetField();
+  void* result;
+  HeapReflectiveSourceInfo hrsi(kSourceJavaLangInvokeMethodHandle, this);
+  if (GetHandleKind() < kFirstAccessorKind) {
+    result = v->VisitMethod(GetTargetMethod(), hrsi);
+  } else {
+    result = v->VisitField(GetTargetField(), hrsi);
+  }
+  if (result != target) {
+    SetField64<false>(ArtFieldOrMethodOffset(), reinterpret_cast<uintptr_t>(result));
+  }
+}
+
+
 }  // namespace mirror
 }  // namespace art
diff --git a/runtime/mirror/method_handle_impl.h b/runtime/mirror/method_handle_impl.h
index c973a24..a0f02f6 100644
--- a/runtime/mirror/method_handle_impl.h
+++ b/runtime/mirror/method_handle_impl.h
@@ -28,6 +28,7 @@
 
 struct MethodHandleOffsets;
 struct MethodHandleImplOffsets;
+class ReflectiveValueVisitor;
 
 namespace mirror {
 
@@ -81,12 +82,14 @@
         GetField64(OFFSET_OF_OBJECT_MEMBER(MethodHandle, art_field_or_method_)));
   }
 
-  ALWAYS_INLINE ObjPtr<mirror::Class> GetTargetClass() REQUIRES_SHARED(Locks::mutator_lock_);
-
   // Gets the return type for a named invoke method, or nullptr if the invoke method is not
   // supported.
   static const char* GetReturnTypeDescriptor(const char* invoke_method_name);
 
+  // Used when classes become structurally obsolete to change the MethodHandle to refer to the new
+  // method or field.
+  void VisitTarget(ReflectiveValueVisitor* v) REQUIRES(Locks::mutator_lock_);
+
  protected:
   void Initialize(uintptr_t art_field_or_method, Kind kind, Handle<MethodType> method_type)
       REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/mirror/object-readbarrier-inl.h b/runtime/mirror/object-readbarrier-inl.h
index ee84997..8b5703e 100644
--- a/runtime/mirror/object-readbarrier-inl.h
+++ b/runtime/mirror/object-readbarrier-inl.h
@@ -116,12 +116,9 @@
   uint32_t rb_state = lw.ReadBarrierState();
   return rb_state;
 #else
-  // MIPS32/MIPS64: use a memory barrier to prevent load-load reordering.
-  LockWord lw = GetLockWord(false);
-  *fake_address_dependency = 0;
-  std::atomic_thread_fence(std::memory_order_acquire);
-  uint32_t rb_state = lw.ReadBarrierState();
-  return rb_state;
+  UNUSED(fake_address_dependency);
+  LOG(FATAL) << "Unsupported architecture.";
+  UNREACHABLE();
 #endif
 }
 
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 4afabe2..ede1c66 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -151,22 +151,17 @@
   DISALLOW_COPY_AND_ASSIGN(CopyObjectVisitor);
 };
 
-ObjPtr<Object> Object::Clone(Thread* self) {
-  CHECK(!IsClass()) << "Can't clone classes.";
+ObjPtr<Object> Object::Clone(Handle<Object> h_this, Thread* self) {
+  CHECK(!h_this->IsClass()) << "Can't clone classes.";
   // Object::SizeOf gets the right size even if we're an array. Using c->AllocObject() here would
   // be wrong.
   gc::Heap* heap = Runtime::Current()->GetHeap();
-  size_t num_bytes = SizeOf();
-  StackHandleScope<1> hs(self);
-  Handle<Object> this_object(hs.NewHandle(this));
-  ObjPtr<Object> copy;
-  CopyObjectVisitor visitor(&this_object, num_bytes);
-  if (heap->IsMovableObject(this)) {
-    copy = heap->AllocObject<true>(self, GetClass(), num_bytes, visitor);
-  } else {
-    copy = heap->AllocNonMovableObject<true>(self, GetClass(), num_bytes, visitor);
-  }
-  if (this_object->GetClass()->IsFinalizable()) {
+  size_t num_bytes = h_this->SizeOf();
+  CopyObjectVisitor visitor(&h_this, num_bytes);
+  ObjPtr<Object> copy = heap->IsMovableObject(h_this.Get())
+      ? heap->AllocObject(self, h_this->GetClass(), num_bytes, visitor)
+      : heap->AllocNonMovableObject(self, h_this->GetClass(), num_bytes, visitor);
+  if (h_this->GetClass()->IsFinalizable()) {
     heap->AddFinalizerReference(self, &copy);
   }
   return copy;
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index e6e9160..2eff560 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -33,6 +33,7 @@
 
 class ArtField;
 class ArtMethod;
+template <class T> class Handle;
 class LockWord;
 class Monitor;
 struct ObjectOffsets;
@@ -130,7 +131,8 @@
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   size_t SizeOf() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ObjPtr<Object> Clone(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_)
+  static ObjPtr<Object> Clone(Handle<Object> h_this, Thread* self)
+      REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Roles::uninterruptible_);
 
   int32_t IdentityHashCode()
diff --git a/runtime/mirror/object_array-alloc-inl.h b/runtime/mirror/object_array-alloc-inl.h
index 8e96d9f..594b0a6 100644
--- a/runtime/mirror/object_array-alloc-inl.h
+++ b/runtime/mirror/object_array-alloc-inl.h
@@ -37,11 +37,11 @@
                                                     ObjPtr<Class> object_array_class,
                                                     int32_t length,
                                                     gc::AllocatorType allocator_type) {
-  ObjPtr<Array> array = Array::Alloc<true>(self,
-                                           object_array_class,
-                                           length,
-                                           ComponentSizeShiftWidth(kHeapReferenceSize),
-                                           allocator_type);
+  ObjPtr<Array> array = Array::Alloc(self,
+                                     object_array_class,
+                                     length,
+                                     ComponentSizeShiftWidth(kHeapReferenceSize),
+                                     allocator_type);
   if (UNLIKELY(array == nullptr)) {
     return nullptr;
   }
@@ -61,15 +61,15 @@
 }
 
 template<class T>
-inline ObjPtr<ObjectArray<T>> ObjectArray<T>::CopyOf(Thread* self, int32_t new_length) {
+inline ObjPtr<ObjectArray<T>> ObjectArray<T>::CopyOf(Handle<ObjectArray<T>> h_this,
+                                                     Thread* self,
+                                                     int32_t new_length) {
   DCHECK_GE(new_length, 0);
-  // We may get copied by a compacting GC.
-  StackHandleScope<1> hs(self);
-  Handle<ObjectArray<T>> h_this(hs.NewHandle(this));
   gc::Heap* heap = Runtime::Current()->GetHeap();
-  gc::AllocatorType allocator_type = heap->IsMovableObject(this) ? heap->GetCurrentAllocator() :
-      heap->GetCurrentNonMovingAllocator();
-  ObjPtr<ObjectArray<T>> new_array = Alloc(self, GetClass(), new_length, allocator_type);
+  gc::AllocatorType allocator_type = heap->IsMovableObject(h_this.Get())
+      ? heap->GetCurrentAllocator()
+      : heap->GetCurrentNonMovingAllocator();
+  ObjPtr<ObjectArray<T>> new_array = Alloc(self, h_this->GetClass(), new_length, allocator_type);
   if (LIKELY(new_array != nullptr)) {
     new_array->AssignableMemcpy(0, h_this.Get(), 0, std::min(h_this->GetLength(), new_length));
   }
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index 154302e..e4fe03b 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -17,6 +17,7 @@
 #ifndef ART_RUNTIME_MIRROR_OBJECT_ARRAY_INL_H_
 #define ART_RUNTIME_MIRROR_OBJECT_ARRAY_INL_H_
 
+#include "base/globals.h"
 #include "object_array.h"
 
 #include <string>
@@ -78,7 +79,7 @@
 template<class T>
 template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
 inline void ObjectArray<T>::SetWithoutChecks(int32_t i, ObjPtr<T> object) {
-  DCHECK(CheckIsValidIndex<kVerifyFlags>(i));
+  DCHECK(CheckIsValidIndex<kVerifyFlags>(i)) << i << " vs " << GetLength();
   DCHECK(CheckAssignable<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>(object));
   SetFieldObject<kTransactionActive, kCheckTransaction, kVerifyFlags>(OffsetOfElement(i), object);
 }
@@ -86,7 +87,7 @@
 template<class T>
 template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
 inline void ObjectArray<T>::SetWithoutChecksAndWriteBarrier(int32_t i, ObjPtr<T> object) {
-  DCHECK(CheckIsValidIndex<kVerifyFlags>(i));
+  DCHECK(CheckIsValidIndex<kVerifyFlags>(i)) << i << " vs " << GetLength();
   // TODO:  enable this check. It fails when writing the image in ImageWriter::FixupObjectArray.
   // DCHECK(CheckAssignable(object));
   SetFieldObjectWithoutWriteBarrier<kTransactionActive, kCheckTransaction, kVerifyFlags>(
@@ -95,7 +96,7 @@
 
 template<class T> template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
 inline ObjPtr<T> ObjectArray<T>::GetWithoutChecks(int32_t i) {
-  DCHECK(CheckIsValidIndex(i));
+  DCHECK(CheckIsValidIndex(i)) << i << " vs " << GetLength();
   return GetFieldObject<T, kVerifyFlags, kReadBarrierOption>(OffsetOfElement(i));
 }
 
@@ -330,6 +331,49 @@
   }
 }
 
+template <class T>
+inline ConstObjPtrArrayIter<T> ObjectArray<T>::cbegin() const {
+  return ConstObjPtrArrayIter<T>(this, 0);
+}
+template <class T>
+inline ConstObjPtrArrayIter<T> ObjectArray<T>::cend() const {
+  return ConstObjPtrArrayIter<T>(this, GetLength());
+}
+template <class T>
+inline ConstHandleArrayIter<T> ObjectArray<T>::cbegin(const Handle<ObjectArray<T>>& h_this) {
+  return ConstHandleArrayIter<T>(h_this, 0);
+}
+template <class T>
+inline ConstHandleArrayIter<T> ObjectArray<T>::cend(const Handle<ObjectArray<T>>& h_this) {
+  return ConstHandleArrayIter<T>(h_this, h_this->GetLength());
+}
+
+template <class T>
+inline ObjPtrArrayIter<T> ObjectArray<T>::begin() {
+  return ObjPtrArrayIter<T>(this, 0);
+}
+template <class T>
+inline ObjPtrArrayIter<T> ObjectArray<T>::end() {
+  return ObjPtrArrayIter<T>(this, GetLength());
+}
+template <class T>
+inline HandleArrayIter<T> ObjectArray<T>::begin(Handle<ObjectArray<T>>& h_this) {
+  return HandleArrayIter<T>(h_this, 0);
+}
+template <class T>
+inline HandleArrayIter<T> ObjectArray<T>::end(Handle<ObjectArray<T>>& h_this) {
+  return HandleArrayIter<T>(h_this, h_this->GetLength());
+}
+
+template<typename T, typename C>
+inline void ArrayIter<T, C>::CheckIdx() const {
+  if (kIsDebugBuild) {
+    Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
+  }
+  DCHECK_LE(0, idx_);
+  DCHECK_LE(idx_, array_->GetLength());
+}
+
 }  // namespace mirror
 }  // namespace art
 
diff --git a/runtime/mirror/object_array.h b/runtime/mirror/object_array.h
index f7046d1..e58787c 100644
--- a/runtime/mirror/object_array.h
+++ b/runtime/mirror/object_array.h
@@ -17,12 +17,20 @@
 #ifndef ART_RUNTIME_MIRROR_OBJECT_ARRAY_H_
 #define ART_RUNTIME_MIRROR_OBJECT_ARRAY_H_
 
+#include <iterator>
 #include "array.h"
+#include "base/iteration_range.h"
 #include "obj_ptr.h"
 
 namespace art {
 namespace mirror {
 
+template<typename T, typename Container> class ArrayIter;
+template <typename T> using ConstObjPtrArrayIter = ArrayIter<T, const ObjPtr<ObjectArray<T>>>;
+template <typename T> using ConstHandleArrayIter = ArrayIter<T, const Handle<ObjectArray<T>>>;
+template <typename T> using ObjPtrArrayIter = ArrayIter<T, ObjPtr<ObjectArray<T>>>;
+template <typename T> using HandleArrayIter = ArrayIter<T, Handle<ObjectArray<T>>>;
+
 template<class T>
 class MANAGED ObjectArray: public Array {
  public:
@@ -99,12 +107,42 @@
                                 bool throw_exception)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ObjPtr<ObjectArray<T>> CopyOf(Thread* self, int32_t new_length)
+  static ObjPtr<ObjectArray<T>> CopyOf(Handle<ObjectArray<T>> h_this,
+                                       Thread* self,
+                                       int32_t new_length)
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Roles::uninterruptible_);
 
   static MemberOffset OffsetOfElement(int32_t i);
 
+  inline ConstObjPtrArrayIter<T> cbegin() const REQUIRES_SHARED(Locks::mutator_lock_);
+  inline ConstObjPtrArrayIter<T> cend() const REQUIRES_SHARED(Locks::mutator_lock_);
+  inline IterationRange<ConstObjPtrArrayIter<T>> ConstIterate() const REQUIRES_SHARED(Locks::mutator_lock_) {
+    return IterationRange(cbegin(), cend());
+  }
+  inline ObjPtrArrayIter<T> begin() REQUIRES_SHARED(Locks::mutator_lock_);
+  inline ObjPtrArrayIter<T> end() REQUIRES_SHARED(Locks::mutator_lock_);
+  inline IterationRange<ObjPtrArrayIter<T>> Iterate() REQUIRES_SHARED(Locks::mutator_lock_) {
+    return IterationRange(begin(), end());
+  }
+
+  static inline ConstHandleArrayIter<T> cbegin(const Handle<ObjectArray<T>>& h_this)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  static inline ConstHandleArrayIter<T> cend(const Handle<ObjectArray<T>>& h_this)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  static inline IterationRange<ConstHandleArrayIter<T>> ConstIterate(
+      const Handle<ObjectArray<T>>& h_this) REQUIRES_SHARED(Locks::mutator_lock_) {
+    return IterationRange(cbegin(h_this), cend(h_this));
+  }
+  static inline HandleArrayIter<T> begin(Handle<ObjectArray<T>>& h_this)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  static inline HandleArrayIter<T> end(Handle<ObjectArray<T>>& h_this)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  static inline IterationRange<HandleArrayIter<T>> Iterate(Handle<ObjectArray<T>>& h_this)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    return IterationRange(begin(h_this), end(h_this));
+  }
+
  private:
   // TODO fix thread safety analysis broken by the use of template. This should be
   // REQUIRES_SHARED(Locks::mutator_lock_).
@@ -115,6 +153,65 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectArray);
 };
 
+// Everything is NO_THREAD_SAFETY_ANALYSIS to work-around STL incompat with thread-annotations.
+// Everything should have REQUIRES_SHARED(Locks::mutator_lock_).
+template <typename T, typename Container>
+class ArrayIter : public std::iterator<std::forward_iterator_tag, ObjPtr<T>> {
+ private:
+  using Iter = ArrayIter<T, Container>;
+
+ public:
+  ArrayIter(Container array, int32_t idx) NO_THREAD_SAFETY_ANALYSIS : array_(array), idx_(idx) {
+    CheckIdx();
+  }
+
+  ArrayIter(const Iter& other) = default;  // NOLINT(runtime/explicit)
+  Iter& operator=(const Iter& other) = default;
+
+  bool operator!=(const Iter& other) const NO_THREAD_SAFETY_ANALYSIS {
+    CheckIdx();
+    return !(*this == other);
+  }
+  bool operator==(const Iter& other) const NO_THREAD_SAFETY_ANALYSIS {
+    return Ptr(other.array_) == Ptr(array_) && other.idx_ == idx_;
+  }
+  Iter& operator++() NO_THREAD_SAFETY_ANALYSIS {
+    idx_++;
+    CheckIdx();
+    return *this;
+  }
+  Iter operator++(int) NO_THREAD_SAFETY_ANALYSIS {
+    Iter res(this);
+    idx_++;
+    CheckIdx();
+    return res;
+  }
+  ObjPtr<T> operator->() const NO_THREAD_SAFETY_ANALYSIS {
+    CheckIdx();
+    return array_->GetWithoutChecks(idx_);
+  }
+  ObjPtr<T> operator*() const NO_THREAD_SAFETY_ANALYSIS {
+    CheckIdx();
+    return array_->GetWithoutChecks(idx_);
+  }
+
+ private:
+  // Checks current index and that locks are properly held.
+  void CheckIdx() const REQUIRES_SHARED(Locks::mutator_lock_);
+
+  static ObjectArray<T>* Ptr(const Handle<ObjectArray<T>>& p)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    return p.Get();
+  }
+  static ObjectArray<T>* Ptr(const ObjPtr<ObjectArray<T>>& p)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    return p.Ptr();
+  }
+
+  Container array_;
+  int32_t idx_;
+};
+
 }  // namespace mirror
 }  // namespace art
 
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 1cb22f2..8ef7025 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -111,7 +111,7 @@
   StackHandleScope<2> hs(soa.Self());
   Handle<ObjectArray<Object>> a1(hs.NewHandle(AllocObjectArray<Object>(soa.Self(), 256)));
   size_t s1 = a1->SizeOf();
-  ObjPtr<Object> clone = a1->Clone(soa.Self());
+  ObjPtr<Object> clone = Object::Clone(a1, soa.Self());
   EXPECT_EQ(s1, clone->SizeOf());
   EXPECT_TRUE(clone->GetClass() == a1->GetClass());
 }
@@ -158,17 +158,17 @@
   MutableHandle<Class> c = hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[I"));
   gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
   MutableHandle<Array> a = hs.NewHandle(
-      Array::Alloc<true>(soa.Self(), c.Get(), 1, c->GetComponentSizeShift(), allocator_type));
+      Array::Alloc(soa.Self(), c.Get(), 1, c->GetComponentSizeShift(), allocator_type));
   EXPECT_TRUE(c.Get() == a->GetClass());
   EXPECT_EQ(1, a->GetLength());
 
   c.Assign(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;"));
-  a.Assign(Array::Alloc<true>(soa.Self(), c.Get(), 1, c->GetComponentSizeShift(), allocator_type));
+  a.Assign(Array::Alloc(soa.Self(), c.Get(), 1, c->GetComponentSizeShift(), allocator_type));
   EXPECT_TRUE(c.Get() == a->GetClass());
   EXPECT_EQ(1, a->GetLength());
 
   c.Assign(class_linker_->FindSystemClass(soa.Self(), "[[Ljava/lang/Object;"));
-  a.Assign(Array::Alloc<true>(soa.Self(), c.Get(), 1, c->GetComponentSizeShift(), allocator_type));
+  a.Assign(Array::Alloc(soa.Self(), c.Get(), 1, c->GetComponentSizeShift(), allocator_type));
   EXPECT_TRUE(c.Get() == a->GetClass());
   EXPECT_EQ(1, a->GetLength());
 }
@@ -179,25 +179,26 @@
   MutableHandle<Class> c = hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[B"));
   gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
   MutableHandle<Array> a = hs.NewHandle(
-      Array::Alloc<true, true>(soa.Self(), c.Get(), 1, c->GetComponentSizeShift(), allocator_type));
+      Array::Alloc</*kIsInstrumented=*/ true, /*kFillUsable=*/ true>(
+          soa.Self(), c.Get(), 1, c->GetComponentSizeShift(), allocator_type));
   EXPECT_TRUE(c.Get() == a->GetClass());
   EXPECT_LE(1, a->GetLength());
 
   c.Assign(class_linker_->FindSystemClass(soa.Self(), "[I"));
-  a.Assign(
-      Array::Alloc<true, true>(soa.Self(), c.Get(), 2, c->GetComponentSizeShift(), allocator_type));
+  a.Assign(Array::Alloc</*kIsInstrumented=*/ true, /*kFillUsable=*/ true>(
+      soa.Self(), c.Get(), 2, c->GetComponentSizeShift(), allocator_type));
   EXPECT_TRUE(c.Get() == a->GetClass());
   EXPECT_LE(2, a->GetLength());
 
   c.Assign(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;"));
-  a.Assign(
-      Array::Alloc<true, true>(soa.Self(), c.Get(), 2, c->GetComponentSizeShift(), allocator_type));
+  a.Assign(Array::Alloc</*kIsInstrumented=*/ true, /*kFillUsable=*/ true>(
+      soa.Self(), c.Get(), 2, c->GetComponentSizeShift(), allocator_type));
   EXPECT_TRUE(c.Get() == a->GetClass());
   EXPECT_LE(2, a->GetLength());
 
   c.Assign(class_linker_->FindSystemClass(soa.Self(), "[[Ljava/lang/Object;"));
-  a.Assign(
-      Array::Alloc<true, true>(soa.Self(), c.Get(), 2, c->GetComponentSizeShift(), allocator_type));
+  a.Assign(Array::Alloc</*kIsInstrumented=*/ true, /*kFillUsable=*/ true>(
+      soa.Self(), c.Get(), 2, c->GetComponentSizeShift(), allocator_type));
   EXPECT_TRUE(c.Get() == a->GetClass());
   EXPECT_LE(2, a->GetLength());
 }
@@ -252,6 +253,48 @@
   TestPrimitiveArray<ShortArray>(class_linker_);
 }
 
+TEST_F(ObjectTest, PointerArrayWriteRead) {
+  ScopedObjectAccess soa(Thread::Current());
+  StackHandleScope<2> hs(soa.Self());
+
+  Handle<PointerArray> a32 =
+      hs.NewHandle(ObjPtr<PointerArray>::DownCast<Array>(IntArray::Alloc(soa.Self(), 1)));
+  ASSERT_TRUE(a32 != nullptr);
+  ASSERT_EQ(1, a32->GetLength());
+  EXPECT_EQ(0u, (a32->GetElementPtrSize<uint32_t, PointerSize::k32>(0u)));
+  EXPECT_EQ(0u, (a32->GetElementPtrSizeUnchecked<uint32_t, PointerSize::k32>(0u)));
+  for (uint32_t value : { 0u, 1u, 0x7fffffffu, 0x80000000u, 0xffffffffu }) {
+    a32->SetElementPtrSize(0u, value, PointerSize::k32);
+    EXPECT_EQ(value, (a32->GetElementPtrSize<uint32_t, PointerSize::k32>(0u)));
+    EXPECT_EQ(value, (a32->GetElementPtrSizeUnchecked<uint32_t, PointerSize::k32>(0u)));
+    // Check that the value matches also when retrieved as `uint64_t`.
+    // This is a regression test for unintended sign-extension. b/155780442
+    // (Using `uint64_t` rather than `uintptr_t`, so that the 32-bit test checks this too.)
+    EXPECT_EQ(value, (a32->GetElementPtrSize<uint64_t, PointerSize::k32>(0u)));
+    EXPECT_EQ(value, (a32->GetElementPtrSizeUnchecked<uint64_t, PointerSize::k32>(0u)));
+  }
+
+  Handle<PointerArray> a64 =
+      hs.NewHandle(ObjPtr<PointerArray>::DownCast<Array>(LongArray::Alloc(soa.Self(), 1)));
+  ASSERT_TRUE(a64 != nullptr);
+  ASSERT_EQ(1, a64->GetLength());
+  EXPECT_EQ(0u, (a64->GetElementPtrSize<uint32_t, PointerSize::k64>(0u)));
+  EXPECT_EQ(0u, (a64->GetElementPtrSizeUnchecked<uint32_t, PointerSize::k64>(0u)));
+  for (uint64_t value : { UINT64_C(0),
+                          UINT64_C(1),
+                          UINT64_C(0x7fffffff),
+                          UINT64_C(0x80000000),
+                          UINT64_C(0xffffffff),
+                          UINT64_C(0x100000000),
+                          UINT64_C(0x7fffffffffffffff),
+                          UINT64_C(0x8000000000000000),
+                          UINT64_C(0xffffffffffffffff) }) {
+    a64->SetElementPtrSize(0u, value, PointerSize::k64);
+    EXPECT_EQ(value, (a64->GetElementPtrSize<uint64_t, PointerSize::k64>(0u)));
+    EXPECT_EQ(value, (a64->GetElementPtrSizeUnchecked<uint64_t, PointerSize::k64>(0u)));
+  }
+}
+
 TEST_F(ObjectTest, PrimitiveArray_Double_Alloc) {
   using ArrayT = DoubleArray;
   ScopedObjectAccess soa(Thread::Current());
diff --git a/runtime/mirror/string-alloc-inl.h b/runtime/mirror/string-alloc-inl.h
index 7215c39..e2b0805 100644
--- a/runtime/mirror/string-alloc-inl.h
+++ b/runtime/mirror/string-alloc-inl.h
@@ -24,6 +24,7 @@
 #include "base/bit_utils.h"
 #include "class.h"
 #include "class_root.h"
+#include "gc/allocator_type.h"
 #include "gc/heap-inl.h"
 #include "obj_ptr.h"
 #include "runtime.h"
@@ -191,11 +192,11 @@
 
   gc::Heap* heap = runtime->GetHeap();
   return ObjPtr<String>::DownCast(
-      heap->AllocObjectWithAllocator<kIsInstrumented, true>(self,
-                                                            string_class,
-                                                            alloc_size,
-                                                            allocator_type,
-                                                            pre_fence_visitor));
+      heap->AllocObjectWithAllocator<kIsInstrumented>(self,
+                                                      string_class,
+                                                      alloc_size,
+                                                      allocator_type,
+                                                      pre_fence_visitor));
 }
 
 template <bool kIsInstrumented>
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index 1881c57..0356080 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -89,7 +89,7 @@
   gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
   const int32_t length_with_flag = String::GetFlaggedCount(length, compressible);
   SetStringCountVisitor visitor(length_with_flag);
-  ObjPtr<String> string = Alloc<true>(self, length_with_flag, allocator_type, visitor);
+  ObjPtr<String> string = Alloc(self, length_with_flag, allocator_type, visitor);
   if (UNLIKELY(string == nullptr)) {
     return nullptr;
   }
@@ -130,7 +130,7 @@
   const int32_t length_with_flag = String::GetFlaggedCount(length + length2, compressible);
 
   SetStringCountVisitor visitor(length_with_flag);
-  ObjPtr<String> new_string = Alloc<true>(self, length_with_flag, allocator_type, visitor);
+  ObjPtr<String> new_string = Alloc(self, length_with_flag, allocator_type, visitor);
   if (UNLIKELY(new_string == nullptr)) {
     return nullptr;
   }
@@ -167,7 +167,7 @@
                             String::AllASCII<uint16_t>(utf16_data_in, utf16_length);
   int32_t length_with_flag = String::GetFlaggedCount(utf16_length, compressible);
   SetStringCountVisitor visitor(length_with_flag);
-  ObjPtr<String> string = Alloc<true>(self, length_with_flag, allocator_type, visitor);
+  ObjPtr<String> string = Alloc(self, length_with_flag, allocator_type, visitor);
   if (UNLIKELY(string == nullptr)) {
     return nullptr;
   }
@@ -203,7 +203,7 @@
   const bool compressible = kUseStringCompression && (utf16_length == utf8_length);
   const int32_t utf16_length_with_flag = String::GetFlaggedCount(utf16_length, compressible);
   SetStringCountVisitor visitor(utf16_length_with_flag);
-  ObjPtr<String> string = Alloc<true>(self, utf16_length_with_flag, allocator_type, visitor);
+  ObjPtr<String> string = Alloc(self, utf16_length_with_flag, allocator_type, visitor);
   if (UNLIKELY(string == nullptr)) {
     return nullptr;
   }
@@ -323,18 +323,16 @@
   return count_diff;
 }
 
-ObjPtr<CharArray> String::ToCharArray(Thread* self) {
-  StackHandleScope<1> hs(self);
-  Handle<String> string(hs.NewHandle(this));
-  ObjPtr<CharArray> result = CharArray::Alloc(self, GetLength());
+ObjPtr<CharArray> String::ToCharArray(Handle<String> h_this, Thread* self) {
+  ObjPtr<CharArray> result = CharArray::Alloc(self, h_this->GetLength());
   if (result != nullptr) {
-    if (string->IsCompressed()) {
-      int32_t length = string->GetLength();
+    if (h_this->IsCompressed()) {
+      int32_t length = h_this->GetLength();
       for (int i = 0; i < length; ++i) {
-        result->GetData()[i] = string->CharAt(i);
+        result->GetData()[i] = h_this->CharAt(i);
       }
     } else {
-      memcpy(result->GetData(), string->GetValue(), string->GetLength() * sizeof(uint16_t));
+      memcpy(result->GetData(), h_this->GetValue(), h_this->GetLength() * sizeof(uint16_t));
     }
   } else {
     self->AssertPendingOOMException();
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index 07c7238..ccb1e49 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -18,15 +18,19 @@
 #define ART_RUNTIME_MIRROR_STRING_H_
 
 #include "base/bit_utils.h"
-#include "gc/allocator_type.h"
 #include "class.h"
 #include "object.h"
 #include "runtime_globals.h"
 
 namespace art {
 
+namespace gc {
+enum AllocatorType : char;
+}  // namespace gc
+
 template<class T> class Handle;
 template<class MirrorType> class ObjPtr;
+class StringBuilderAppend;
 struct StringOffsets;
 class StubTest_ReadBarrierForRoot_Test;
 
@@ -114,7 +118,7 @@
 
   ObjPtr<String> Intern() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  template <bool kIsInstrumented>
+  template <bool kIsInstrumented = true>
   ALWAYS_INLINE static ObjPtr<String> AllocFromByteArray(Thread* self,
                                                          int32_t byte_length,
                                                          Handle<ByteArray> array,
@@ -123,7 +127,7 @@
                                                          gc::AllocatorType allocator_type)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
-  template <bool kIsInstrumented>
+  template <bool kIsInstrumented = true>
   ALWAYS_INLINE static ObjPtr<String> AllocFromCharArray(Thread* self,
                                                          int32_t count,
                                                          Handle<CharArray> array,
@@ -131,7 +135,7 @@
                                                          gc::AllocatorType allocator_type)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
-  template <bool kIsInstrumented>
+  template <bool kIsInstrumented = true>
   ALWAYS_INLINE static ObjPtr<String> AllocFromString(Thread* self,
                                                       int32_t string_length,
                                                       Handle<String> string,
@@ -139,7 +143,7 @@
                                                       gc::AllocatorType allocator_type)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
-  template <bool kIsInstrumented>
+  template <bool kIsInstrumented = true>
   ALWAYS_INLINE static ObjPtr<String> AllocEmptyString(Thread* self,
                                                        gc::AllocatorType allocator_type)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
@@ -183,7 +187,8 @@
 
   int32_t CompareTo(ObjPtr<String> other) REQUIRES_SHARED(Locks::mutator_lock_);
 
-  ObjPtr<CharArray> ToCharArray(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_)
+  static ObjPtr<CharArray> ToCharArray(Handle<String> h_this, Thread* self)
+      REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Roles::uninterruptible_);
 
   void GetChars(int32_t start, int32_t end, Handle<CharArray> array, int32_t index)
@@ -248,7 +253,7 @@
     SetField32<false, false>(OFFSET_OF_OBJECT_MEMBER(String, hash_code_), new_hash_code);
   }
 
-  template <bool kIsInstrumented, typename PreFenceVisitor>
+  template <bool kIsInstrumented = true, typename PreFenceVisitor>
   ALWAYS_INLINE static ObjPtr<String> Alloc(Thread* self,
                                             int32_t utf16_length_with_flag,
                                             gc::AllocatorType allocator_type,
@@ -269,6 +274,7 @@
     uint8_t value_compressed_[0];
   };
 
+  friend class art::StringBuilderAppend;
   friend struct art::StringOffsets;  // for verifying offset information
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(String);
diff --git a/runtime/mirror/var_handle.cc b/runtime/mirror/var_handle.cc
index d887b5a..6d5ff2c 100644
--- a/runtime/mirror/var_handle.cc
+++ b/runtime/mirror/var_handle.cc
@@ -2032,5 +2032,16 @@
   UNREACHABLE();
 }
 
+void FieldVarHandle::VisitTarget(ReflectiveValueVisitor* v) {
+  ArtField* orig = GetField();
+  ArtField* new_value =
+      v->VisitField(orig, HeapReflectiveSourceInfo(kSourceJavaLangInvokeFieldVarHandle, this));
+  if (orig != new_value) {
+    SetField64</*kTransactionActive*/ false>(ArtFieldOffset(),
+                                             reinterpret_cast<uintptr_t>(new_value));
+  }
+}
+
+
 }  // namespace mirror
 }  // namespace art
diff --git a/runtime/mirror/var_handle.h b/runtime/mirror/var_handle.h
index a46b466..02a0d8c 100644
--- a/runtime/mirror/var_handle.h
+++ b/runtime/mirror/var_handle.h
@@ -36,6 +36,7 @@
 struct ByteArrayViewVarHandleOffsets;
 struct ByteBufferViewVarHandleOffsets;
 
+class ReflectiveValueVisitor;
 class ShadowFrameGetter;
 
 namespace mirror {
@@ -197,6 +198,9 @@
 
   ArtField* GetField() REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Used for updating var-handles to obsolete fields.
+  void VisitTarget(ReflectiveValueVisitor* v) REQUIRES(Locks::mutator_lock_);
+
  private:
   static MemberOffset ArtFieldOffset() {
     return MemberOffset(OFFSETOF_MEMBER(FieldVarHandle, art_field_));
diff --git a/runtime/module_exclusion_test.cc b/runtime/module_exclusion_test.cc
index 67b79d4..1f5f87e 100644
--- a/runtime/module_exclusion_test.cc
+++ b/runtime/module_exclusion_test.cc
@@ -16,6 +16,8 @@
 
 #include "common_compiler_test.h"
 
+#include "aot_class_linker.h"
+#include "base/casts.h"
 #include "class_linker-inl.h"
 #include "handle.h"
 #include "handle_scope-inl.h"
@@ -71,7 +73,15 @@
     }
   }
 
- private:
+ protected:
+  void SetUpRuntimeOptions(RuntimeOptions* options) override {
+    CommonCompilerTest::SetUpRuntimeOptions(options);
+
+    // Set up the image location to be used by StartDex2OatCommandLine().
+    // Using a prebuilt image also makes the test run faster.
+    options->push_back(std::make_pair("-Ximage:" + GetImageLocation(), nullptr));
+  }
+
   std::string GetModuleFileName() const {
     std::vector<std::string> filename = GetLibCoreDexFileNames({ module_ });
     CHECK_EQ(filename.size(), 1u);
@@ -124,7 +134,60 @@
 };
 
 TEST_F(ConscryptExclusionTest, Test) {
+  Runtime* runtime = Runtime::Current();
+  ASSERT_TRUE(runtime->IsAotCompiler());
+  AotClassLinker* aot_class_linker = down_cast<AotClassLinker*>(runtime->GetClassLinker());
+  const std::vector<std::string> package_list = {
+      // Reserved conscrypt packages (includes sub-packages under these paths).
+      "android.net.ssl",
+      "com.android.org.conscrypt",
+  };
+  bool list_applied = aot_class_linker->SetUpdatableBootClassPackages(package_list);
+  ASSERT_TRUE(list_applied);
   DoTest();
+
+  // Also test passing the list to dex2oat.
+  ScratchFile package_list_file;
+  for (const std::string& package : package_list) {
+    std::string data = package + '\n';
+    ASSERT_TRUE(package_list_file.GetFile()->WriteFully(data.data(), data.size()));
+  }
+  ASSERT_EQ(0, package_list_file.GetFile()->Flush());
+  ScratchDir scratch_dir;
+  std::string jar_name = GetModuleFileName();
+  std::string odex_name = scratch_dir.GetPath() + module_ + ".odex";
+  std::vector<std::string> argv;
+  std::string error_msg;
+  bool success = StartDex2OatCommandLine(&argv, &error_msg);
+  ASSERT_TRUE(success) << error_msg;
+  argv.insert(argv.end(), {
+      "--dex-file=" + jar_name,
+      "--dex-location=" + jar_name,
+      "--oat-file=" + odex_name,
+      "--compiler-filter=speed",
+      "--updatable-bcp-packages-file=" + package_list_file.GetFilename()
+  });
+  success = RunDex2Oat(argv, &error_msg);
+  ASSERT_TRUE(success) << error_msg;
+  // Load the odex file.
+  std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
+                                                   odex_name.c_str(),
+                                                   odex_name.c_str(),
+                                                   /*executable=*/ false,
+                                                   /*low_4gb=*/ false,
+                                                   jar_name,
+                                                   &error_msg));
+  ASSERT_TRUE(odex_file != nullptr) << error_msg;
+  // Check that no classes have been resolved.
+  for (const OatDexFile* oat_dex_file : odex_file->GetOatDexFiles()) {
+    std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error_msg);
+    ASSERT_TRUE(dex_file != nullptr);
+    for (size_t i = 0, num_class_defs = dex_file->NumClassDefs(); i != num_class_defs; ++i) {
+      ClassStatus status = oat_dex_file->GetOatClass(i).GetStatus();
+      ASSERT_FALSE(mirror::Class::IsErroneous(status));
+      ASSERT_LT(status, ClassStatus::kResolved);
+    }
+  }
 }
 
 }  // namespace art
diff --git a/runtime/monitor-inl.h b/runtime/monitor-inl.h
index e8ffafa..f7e31a0 100644
--- a/runtime/monitor-inl.h
+++ b/runtime/monitor-inl.h
@@ -29,6 +29,57 @@
   return obj_.Read<kReadBarrierOption>();
 }
 
+// Check for request to set lock owner info.
+void Monitor::CheckLockOwnerRequest(Thread* self) {
+  DCHECK(self != nullptr);
+  Thread* request_thread = lock_owner_request_.load(std::memory_order_relaxed);
+  if (request_thread == self) {
+    SetLockingMethod(self);
+    // Only do this the first time after a request.
+    lock_owner_request_.store(nullptr, std::memory_order_relaxed);
+  }
+}
+
+uintptr_t Monitor::LockOwnerInfoChecksum(ArtMethod* m, uint32_t dex_pc, Thread* t) {
+  uintptr_t dpc_and_thread = static_cast<uintptr_t>(dex_pc << 8) ^ reinterpret_cast<uintptr_t>(t);
+  return reinterpret_cast<uintptr_t>(m) ^ dpc_and_thread
+      ^ (dpc_and_thread << (/* ptr_size / 2 */ (sizeof m) << 2));
+}
+
+void Monitor::SetLockOwnerInfo(ArtMethod* method, uint32_t dex_pc, Thread* t) {
+  lock_owner_method_.store(method, std::memory_order_relaxed);
+  lock_owner_dex_pc_.store(dex_pc, std::memory_order_relaxed);
+  lock_owner_.store(t, std::memory_order_relaxed);
+  uintptr_t sum = LockOwnerInfoChecksum(method, dex_pc, t);
+  lock_owner_sum_.store(sum, std::memory_order_relaxed);
+}
+
+void Monitor::GetLockOwnerInfo(/*out*/ArtMethod** method, /*out*/uint32_t* dex_pc,
+                               Thread* t) {
+  ArtMethod* owners_method;
+  uint32_t owners_dex_pc;
+  Thread* owner;
+  uintptr_t owners_sum;
+  DCHECK(t != nullptr);
+  do {
+    owner = lock_owner_.load(std::memory_order_relaxed);
+    if (owner == nullptr) {
+      break;
+    }
+    owners_method = lock_owner_method_.load(std::memory_order_relaxed);
+    owners_dex_pc = lock_owner_dex_pc_.load(std::memory_order_relaxed);
+    owners_sum = lock_owner_sum_.load(std::memory_order_relaxed);
+  } while (owners_sum != LockOwnerInfoChecksum(owners_method, owners_dex_pc, owner));
+  if (owner == t) {
+    *method = owners_method;
+    *dex_pc = owners_dex_pc;
+  } else {
+    *method = nullptr;
+    *dex_pc = 0;
+  }
+}
+
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_MONITOR_INL_H_
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 676bceb..8190960 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -52,7 +52,8 @@
 /*
  * Every Object has a monitor associated with it, but not every Object is actually locked.  Even
  * the ones that are locked do not need a full-fledged monitor until a) there is actual contention
- * or b) wait() is called on the Object.
+ * or b) wait() is called on the Object, or (c) we need to lock an object that also has an
+ * identity hashcode.
  *
  * For Android, we have implemented a scheme similar to the one described in Bacon et al.'s
  * "Thin locks: featherweight synchronization for Java" (ACM 1998).  Things are even easier for us,
@@ -91,7 +92,6 @@
 
 Monitor::Monitor(Thread* self, Thread* owner, ObjPtr<mirror::Object> obj, int32_t hash_code)
     : monitor_lock_("a monitor lock", kMonitorLock),
-      monitor_contenders_("monitor contenders", monitor_lock_),
       num_waiters_(0),
       owner_(owner),
       lock_count_(0),
@@ -99,8 +99,11 @@
       wait_set_(nullptr),
       wake_set_(nullptr),
       hash_code_(hash_code),
-      locking_method_(nullptr),
-      locking_dex_pc_(0),
+      lock_owner_(nullptr),
+      lock_owner_method_(nullptr),
+      lock_owner_dex_pc_(0),
+      lock_owner_sum_(0),
+      lock_owner_request_(nullptr),
       monitor_id_(MonitorPool::ComputeMonitorId(this, self)) {
 #ifdef __LP64__
   DCHECK(false) << "Should not be reached in 64b";
@@ -118,7 +121,6 @@
                  int32_t hash_code,
                  MonitorId id)
     : monitor_lock_("a monitor lock", kMonitorLock),
-      monitor_contenders_("monitor contenders", monitor_lock_),
       num_waiters_(0),
       owner_(owner),
       lock_count_(0),
@@ -126,8 +128,11 @@
       wait_set_(nullptr),
       wake_set_(nullptr),
       hash_code_(hash_code),
-      locking_method_(nullptr),
-      locking_dex_pc_(0),
+      lock_owner_(nullptr),
+      lock_owner_method_(nullptr),
+      lock_owner_dex_pc_(0),
+      lock_owner_sum_(0),
+      lock_owner_request_(nullptr),
       monitor_id_(id) {
 #ifdef __LP64__
   next_free_ = nullptr;
@@ -150,20 +155,106 @@
   return hc;
 }
 
-bool Monitor::Install(Thread* self) {
-  MutexLock mu(self, monitor_lock_);  // Uncontended mutex acquisition as monitor isn't yet public.
-  CHECK(owner_ == nullptr || owner_ == self || owner_->IsSuspended());
+void Monitor::SetLockingMethod(Thread* owner) {
+  DCHECK(owner == Thread::Current() || owner->IsSuspended());
+  // Do not abort on dex pc errors. This can easily happen when we want to dump a stack trace on
+  // abort.
+  ArtMethod* lock_owner_method;
+  uint32_t lock_owner_dex_pc;
+  lock_owner_method = owner->GetCurrentMethod(&lock_owner_dex_pc, false);
+  if (lock_owner_method != nullptr && UNLIKELY(lock_owner_method->IsProxyMethod())) {
+    // Grab another frame. Proxy methods are not helpful for lock profiling. This should be rare
+    // enough that it's OK to walk the stack twice.
+    struct NextMethodVisitor final : public StackVisitor {
+      explicit NextMethodVisitor(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
+          : StackVisitor(thread,
+                         nullptr,
+                         StackVisitor::StackWalkKind::kIncludeInlinedFrames,
+                         false),
+            count_(0),
+            method_(nullptr),
+            dex_pc_(0) {}
+      bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
+        ArtMethod* m = GetMethod();
+        if (m->IsRuntimeMethod()) {
+          // Continue if this is a runtime method.
+          return true;
+        }
+        count_++;
+        if (count_ == 2u) {
+          method_ = m;
+          dex_pc_ = GetDexPc(false);
+          return false;
+        }
+        return true;
+      }
+      size_t count_;
+      ArtMethod* method_;
+      uint32_t dex_pc_;
+    };
+    NextMethodVisitor nmv(owner_.load(std::memory_order_relaxed));
+    nmv.WalkStack();
+    lock_owner_method = nmv.method_;
+    lock_owner_dex_pc = nmv.dex_pc_;
+  }
+  SetLockOwnerInfo(lock_owner_method, lock_owner_dex_pc, owner);
+  DCHECK(lock_owner_method == nullptr || !lock_owner_method->IsProxyMethod());
+}
+
+void Monitor::SetLockingMethodNoProxy(Thread *owner) {
+  DCHECK(owner == Thread::Current());
+  uint32_t lock_owner_dex_pc;
+  ArtMethod* lock_owner_method = owner->GetCurrentMethod(&lock_owner_dex_pc);
+  // We don't expect a proxy method here.
+  DCHECK(lock_owner_method == nullptr || !lock_owner_method->IsProxyMethod());
+  SetLockOwnerInfo(lock_owner_method, lock_owner_dex_pc, owner);
+}
+
+bool Monitor::Install(Thread* self) NO_THREAD_SAFETY_ANALYSIS {
+  // This may or may not result in acquiring monitor_lock_. Its behavior is much more complicated
+  // than what clang thread safety analysis understands.
+  // Monitor is not yet public.
+  Thread* owner = owner_.load(std::memory_order_relaxed);
+  CHECK(owner == nullptr || owner == self || (ART_USE_FUTEXES && owner->IsSuspended()));
   // Propagate the lock state.
   LockWord lw(GetObject()->GetLockWord(false));
   switch (lw.GetState()) {
     case LockWord::kThinLocked: {
-      CHECK_EQ(owner_->GetThreadId(), lw.ThinLockOwner());
+      DCHECK(owner != nullptr);
+      CHECK_EQ(owner->GetThreadId(), lw.ThinLockOwner());
+      DCHECK_EQ(monitor_lock_.GetExclusiveOwnerTid(), 0) << " my tid = " << SafeGetTid(self);
       lock_count_ = lw.ThinLockCount();
-      break;
+#if ART_USE_FUTEXES
+      monitor_lock_.ExclusiveLockUncontendedFor(owner);
+#else
+      monitor_lock_.ExclusiveLock(owner);
+#endif
+      DCHECK_EQ(monitor_lock_.GetExclusiveOwnerTid(), owner->GetTid())
+          << " my tid = " << SafeGetTid(self);
+      LockWord fat(this, lw.GCState());
+      // Publish the updated lock word, which may race with other threads.
+      bool success = GetObject()->CasLockWord(lw, fat, CASMode::kWeak, std::memory_order_release);
+      if (success) {
+        if (ATraceEnabled()) {
+          SetLockingMethod(owner);
+        }
+        return true;
+      } else {
+#if ART_USE_FUTEXES
+        monitor_lock_.ExclusiveUnlockUncontended();
+#else
+        for (uint32_t i = 0; i <= lockCount; ++i) {
+          monitor_lock_.ExclusiveUnlock(owner);
+        }
+#endif
+        return false;
+      }
     }
     case LockWord::kHashCode: {
       CHECK_EQ(hash_code_.load(std::memory_order_relaxed), static_cast<int32_t>(lw.GetHashCode()));
-      break;
+      DCHECK_EQ(monitor_lock_.GetExclusiveOwnerTid(), 0) << " my tid = " << SafeGetTid(self);
+      LockWord fat(this, lw.GCState());
+      return GetObject()->CasLockWord(lw, fat, CASMode::kWeak, std::memory_order_release);
     }
     case LockWord::kFatLocked: {
       // The owner_ is suspended but another thread beat us to install a monitor.
@@ -178,52 +269,6 @@
       UNREACHABLE();
     }
   }
-  LockWord fat(this, lw.GCState());
-  // Publish the updated lock word, which may race with other threads.
-  bool success = GetObject()->CasLockWord(lw, fat, CASMode::kWeak, std::memory_order_release);
-  // Lock profiling.
-  if (success && owner_ != nullptr && lock_profiling_threshold_ != 0) {
-    // Do not abort on dex pc errors. This can easily happen when we want to dump a stack trace on
-    // abort.
-    locking_method_ = owner_->GetCurrentMethod(&locking_dex_pc_, false);
-    if (locking_method_ != nullptr && UNLIKELY(locking_method_->IsProxyMethod())) {
-      // Grab another frame. Proxy methods are not helpful for lock profiling. This should be rare
-      // enough that it's OK to walk the stack twice.
-      struct NextMethodVisitor final : public StackVisitor {
-        explicit NextMethodVisitor(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
-            : StackVisitor(thread,
-                           nullptr,
-                           StackVisitor::StackWalkKind::kIncludeInlinedFrames,
-                           false),
-              count_(0),
-              method_(nullptr),
-              dex_pc_(0) {}
-        bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
-          ArtMethod* m = GetMethod();
-          if (m->IsRuntimeMethod()) {
-            // Continue if this is a runtime method.
-            return true;
-          }
-          count_++;
-          if (count_ == 2u) {
-            method_ = m;
-            dex_pc_ = GetDexPc(false);
-            return false;
-          }
-          return true;
-        }
-        size_t count_;
-        ArtMethod* method_;
-        uint32_t dex_pc_;
-      };
-      NextMethodVisitor nmv(owner_);
-      nmv.WalkStack();
-      locking_method_ = nmv.method_;
-      locking_dex_pc_ = nmv.dex_pc_;
-    }
-    DCHECK(locking_method_ == nullptr || !locking_method_->IsProxyMethod());
-  }
-  return success;
 }
 
 Monitor::~Monitor() {
@@ -371,226 +416,222 @@
   return oss.str();
 }
 
-bool Monitor::TryLockLocked(Thread* self) {
-  if (owner_ == nullptr) {  // Unowned.
-    owner_ = self;
-    CHECK_EQ(lock_count_, 0);
-    // When debugging, save the current monitor holder for future
-    // acquisition failures to use in sampled logging.
-    if (lock_profiling_threshold_ != 0) {
-      locking_method_ = self->GetCurrentMethod(&locking_dex_pc_);
-      // We don't expect a proxy method here.
-      DCHECK(locking_method_ == nullptr || !locking_method_->IsProxyMethod());
-    }
-  } else if (owner_ == self) {  // Recursive.
+bool Monitor::TryLock(Thread* self, bool spin) {
+  Thread *owner = owner_.load(std::memory_order_relaxed);
+  if (owner == self) {
     lock_count_++;
+    CHECK_NE(lock_count_, 0u);  // Abort on overflow.
   } else {
-    return false;
+    bool success = spin ? monitor_lock_.ExclusiveTryLockWithSpinning(self)
+        : monitor_lock_.ExclusiveTryLock(self);
+    if (!success) {
+      return false;
+    }
+    DCHECK(owner_.load(std::memory_order_relaxed) == nullptr);
+    owner_.store(self, std::memory_order_relaxed);
+    CHECK_EQ(lock_count_, 0u);
+    if (ATraceEnabled()) {
+      SetLockingMethodNoProxy(self);
+    }
   }
+  DCHECK(monitor_lock_.IsExclusiveHeld(self));
   AtraceMonitorLock(self, GetObject(), /* is_wait= */ false);
   return true;
 }
 
-bool Monitor::TryLock(Thread* self) {
-  MutexLock mu(self, monitor_lock_);
-  return TryLockLocked(self);
-}
-
-// Asserts that a mutex isn't held when the class comes into and out of scope.
-class ScopedAssertNotHeld {
- public:
-  ScopedAssertNotHeld(Thread* self, Mutex& mu) : self_(self), mu_(mu) {
-    mu_.AssertNotHeld(self_);
-  }
-
-  ~ScopedAssertNotHeld() {
-    mu_.AssertNotHeld(self_);
-  }
-
- private:
-  Thread* const self_;
-  Mutex& mu_;
-  DISALLOW_COPY_AND_ASSIGN(ScopedAssertNotHeld);
-};
-
 template <LockReason reason>
 void Monitor::Lock(Thread* self) {
-  ScopedAssertNotHeld sanh(self, monitor_lock_);
   bool called_monitors_callback = false;
-  monitor_lock_.Lock(self);
-  while (true) {
-    if (TryLockLocked(self)) {
-      break;
+  if (TryLock(self, /*spin=*/ true)) {
+    // TODO: This preserves original behavior. Correct?
+    if (called_monitors_callback) {
+      CHECK(reason == LockReason::kForLock);
+      Runtime::Current()->GetRuntimeCallbacks()->MonitorContendedLocked(this);
     }
-    // Contended.
-    const bool log_contention = (lock_profiling_threshold_ != 0);
-    uint64_t wait_start_ms = log_contention ? MilliTime() : 0;
-    ArtMethod* owners_method = locking_method_;
-    uint32_t owners_dex_pc = locking_dex_pc_;
-    // Do this before releasing the lock so that we don't get deflated.
-    size_t num_waiters = num_waiters_;
-    ++num_waiters_;
-
-    // If systrace logging is enabled, first look at the lock owner. Acquiring the monitor's
-    // lock and then re-acquiring the mutator lock can deadlock.
-    bool started_trace = false;
-    if (ATraceEnabled()) {
-      if (owner_ != nullptr) {  // Did the owner_ give the lock up?
-        std::ostringstream oss;
-        std::string name;
-        owner_->GetThreadName(name);
-        oss << PrettyContentionInfo(name,
-                                    owner_->GetTid(),
-                                    owners_method,
-                                    owners_dex_pc,
-                                    num_waiters);
-        // Add info for contending thread.
-        uint32_t pc;
-        ArtMethod* m = self->GetCurrentMethod(&pc);
-        const char* filename;
-        int32_t line_number;
-        TranslateLocation(m, pc, &filename, &line_number);
-        oss << " blocking from "
-            << ArtMethod::PrettyMethod(m) << "(" << (filename != nullptr ? filename : "null")
-            << ":" << line_number << ")";
-        ATraceBegin(oss.str().c_str());
-        started_trace = true;
-      }
-    }
-
-    monitor_lock_.Unlock(self);  // Let go of locks in order.
-    // Call the contended locking cb once and only once. Also only call it if we are locking for
-    // the first time, not during a Wait wakeup.
-    if (reason == LockReason::kForLock && !called_monitors_callback) {
-      called_monitors_callback = true;
-      Runtime::Current()->GetRuntimeCallbacks()->MonitorContendedLocking(this);
-    }
-    self->SetMonitorEnterObject(GetObject().Ptr());
-    {
-      ScopedThreadSuspension tsc(self, kBlocked);  // Change to blocked and give up mutator_lock_.
-      uint32_t original_owner_thread_id = 0u;
-      {
-        // Reacquire monitor_lock_ without mutator_lock_ for Wait.
-        MutexLock mu2(self, monitor_lock_);
-        if (owner_ != nullptr) {  // Did the owner_ give the lock up?
-          original_owner_thread_id = owner_->GetThreadId();
-          monitor_contenders_.Wait(self);  // Still contended so wait.
-        }
-      }
-      if (original_owner_thread_id != 0u) {
-        // Woken from contention.
-        if (log_contention) {
-          uint64_t wait_ms = MilliTime() - wait_start_ms;
-          uint32_t sample_percent;
-          if (wait_ms >= lock_profiling_threshold_) {
-            sample_percent = 100;
-          } else {
-            sample_percent = 100 * wait_ms / lock_profiling_threshold_;
-          }
-          if (sample_percent != 0 && (static_cast<uint32_t>(rand() % 100) < sample_percent)) {
-            // Reacquire mutator_lock_ for logging.
-            ScopedObjectAccess soa(self);
-
-            bool owner_alive = false;
-            pid_t original_owner_tid = 0;
-            std::string original_owner_name;
-
-            const bool should_dump_stacks = stack_dump_lock_profiling_threshold_ > 0 &&
-                wait_ms > stack_dump_lock_profiling_threshold_;
-            std::string owner_stack_dump;
-
-            // Acquire thread-list lock to find thread and keep it from dying until we've got all
-            // the info we need.
-            {
-              Locks::thread_list_lock_->ExclusiveLock(Thread::Current());
-
-              // Re-find the owner in case the thread got killed.
-              Thread* original_owner = Runtime::Current()->GetThreadList()->FindThreadByThreadId(
-                  original_owner_thread_id);
-
-              if (original_owner != nullptr) {
-                owner_alive = true;
-                original_owner_tid = original_owner->GetTid();
-                original_owner->GetThreadName(original_owner_name);
-
-                if (should_dump_stacks) {
-                  // Very long contention. Dump stacks.
-                  struct CollectStackTrace : public Closure {
-                    void Run(art::Thread* thread) override
-                        REQUIRES_SHARED(art::Locks::mutator_lock_) {
-                      thread->DumpJavaStack(oss);
-                    }
-
-                    std::ostringstream oss;
-                  };
-                  CollectStackTrace owner_trace;
-                  // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its
-                  // execution.
-                  original_owner->RequestSynchronousCheckpoint(&owner_trace);
-                  owner_stack_dump = owner_trace.oss.str();
-                } else {
-                  Locks::thread_list_lock_->ExclusiveUnlock(Thread::Current());
-                }
-              } else {
-                Locks::thread_list_lock_->ExclusiveUnlock(Thread::Current());
-              }
-              // This is all the data we need. Now drop the thread-list lock, it's OK for the
-              // owner to go away now.
-            }
-
-            // If we found the owner (and thus have owner data), go and log now.
-            if (owner_alive) {
-              // Give the detailed traces for really long contention.
-              if (should_dump_stacks) {
-                // This must be here (and not above) because we cannot hold the thread-list lock
-                // while running the checkpoint.
-                std::ostringstream self_trace_oss;
-                self->DumpJavaStack(self_trace_oss);
-
-                uint32_t pc;
-                ArtMethod* m = self->GetCurrentMethod(&pc);
-
-                LOG(WARNING) << "Long "
-                    << PrettyContentionInfo(original_owner_name,
-                                            original_owner_tid,
-                                            owners_method,
-                                            owners_dex_pc,
-                                            num_waiters)
-                    << " in " << ArtMethod::PrettyMethod(m) << " for "
-                    << PrettyDuration(MsToNs(wait_ms)) << "\n"
-                    << "Current owner stack:\n" << owner_stack_dump
-                    << "Contender stack:\n" << self_trace_oss.str();
-              } else if (wait_ms > kLongWaitMs && owners_method != nullptr) {
-                uint32_t pc;
-                ArtMethod* m = self->GetCurrentMethod(&pc);
-                // TODO: We should maybe check that original_owner is still a live thread.
-                LOG(WARNING) << "Long "
-                    << PrettyContentionInfo(original_owner_name,
-                                            original_owner_tid,
-                                            owners_method,
-                                            owners_dex_pc,
-                                            num_waiters)
-                    << " in " << ArtMethod::PrettyMethod(m) << " for "
-                    << PrettyDuration(MsToNs(wait_ms));
-              }
-              LogContentionEvent(self,
-                                wait_ms,
-                                sample_percent,
-                                owners_method,
-                                owners_dex_pc);
-            }
-          }
-        }
-      }
-    }
-    if (started_trace) {
-      ATraceEnd();
-    }
-    self->SetMonitorEnterObject(nullptr);
-    monitor_lock_.Lock(self);  // Reacquire locks in order.
-    --num_waiters_;
+    return;
   }
-  monitor_lock_.Unlock(self);
+  // Contended; not reentrant. We hold no locks, so tread carefully.
+  const bool log_contention = (lock_profiling_threshold_ != 0);
+  uint64_t wait_start_ms = log_contention ? MilliTime() : 0;
+
+  Thread *orig_owner = nullptr;
+  ArtMethod* owners_method;
+  uint32_t owners_dex_pc;
+
+  // Do this before releasing the mutator lock so that we don't get deflated.
+  size_t num_waiters = num_waiters_.fetch_add(1, std::memory_order_relaxed);
+
+  bool started_trace = false;
+  if (ATraceEnabled() && owner_.load(std::memory_order_relaxed) != nullptr) {
+    // Acquiring thread_list_lock_ ensures that owner doesn't disappear while
+    // we're looking at it.
+    Locks::thread_list_lock_->ExclusiveLock(self);
+    orig_owner = owner_.load(std::memory_order_relaxed);
+    if (orig_owner != nullptr) {  // Did the owner_ give the lock up?
+      const uint32_t orig_owner_thread_id = orig_owner->GetThreadId();
+      GetLockOwnerInfo(&owners_method, &owners_dex_pc, orig_owner);
+      std::ostringstream oss;
+      std::string name;
+      orig_owner->GetThreadName(name);
+      oss << PrettyContentionInfo(name,
+                                  orig_owner_thread_id,
+                                  owners_method,
+                                  owners_dex_pc,
+                                  num_waiters);
+      Locks::thread_list_lock_->ExclusiveUnlock(self);
+      // Add info for contending thread.
+      uint32_t pc;
+      ArtMethod* m = self->GetCurrentMethod(&pc);
+      const char* filename;
+      int32_t line_number;
+      TranslateLocation(m, pc, &filename, &line_number);
+      oss << " blocking from "
+          << ArtMethod::PrettyMethod(m) << "(" << (filename != nullptr ? filename : "null")
+          << ":" << line_number << ")";
+      ATraceBegin(oss.str().c_str());
+      started_trace = true;
+    } else {
+      Locks::thread_list_lock_->ExclusiveUnlock(self);
+    }
+  }
+  if (log_contention) {
+    // Request the current holder to set lock_owner_info.
+    // Do this even if tracing is enabled, so we semi-consistently get the information
+    // corresponding to MonitorExit.
+    // TODO: Consider optionally obtaining a stack trace here via a checkpoint.  That would allow
+    // us to see what the other thread is doing while we're waiting.
+    orig_owner = owner_.load(std::memory_order_relaxed);
+    lock_owner_request_.store(orig_owner, std::memory_order_relaxed);
+  }
+  // Call the contended locking cb once and only once. Also only call it if we are locking for
+  // the first time, not during a Wait wakeup.
+  if (reason == LockReason::kForLock && !called_monitors_callback) {
+    called_monitors_callback = true;
+    Runtime::Current()->GetRuntimeCallbacks()->MonitorContendedLocking(this);
+  }
+  self->SetMonitorEnterObject(GetObject().Ptr());
+  {
+    ScopedThreadSuspension tsc(self, kBlocked);  // Change to blocked and give up mutator_lock_.
+
+    // Acquire monitor_lock_ without mutator_lock_, expecting to block this time.
+    // We already tried spinning above. The shutdown procedure currently assumes we stop
+    // touching monitors shortly after we suspend, so don't spin again here.
+    monitor_lock_.ExclusiveLock(self);
+
+    if (log_contention && orig_owner != nullptr) {
+      // Woken from contention.
+      uint64_t wait_ms = MilliTime() - wait_start_ms;
+      uint32_t sample_percent;
+      if (wait_ms >= lock_profiling_threshold_) {
+        sample_percent = 100;
+      } else {
+        sample_percent = 100 * wait_ms / lock_profiling_threshold_;
+      }
+      if (sample_percent != 0 && (static_cast<uint32_t>(rand() % 100) < sample_percent)) {
+        // Do this unconditionally for consistency. It's possible another thread
+        // snuck in in the middle, and tracing was enabled. In that case, we may get its
+        // MonitorEnter information. We can live with that.
+        GetLockOwnerInfo(&owners_method, &owners_dex_pc, orig_owner);
+
+        // Reacquire mutator_lock_ for logging.
+        ScopedObjectAccess soa(self);
+
+        const bool should_dump_stacks = stack_dump_lock_profiling_threshold_ > 0 &&
+            wait_ms > stack_dump_lock_profiling_threshold_;
+
+        // Acquire thread-list lock to find thread and keep it from dying until we've got all
+        // the info we need.
+        Locks::thread_list_lock_->ExclusiveLock(self);
+
+        // Is there still a thread at the same address as the original owner?
+        // We tolerate the fact that it may occasionally be the wrong one.
+        if (Runtime::Current()->GetThreadList()->Contains(orig_owner)) {
+          uint32_t original_owner_tid = orig_owner->GetTid();  // System thread id.
+          std::string original_owner_name;
+          orig_owner->GetThreadName(original_owner_name);
+          std::string owner_stack_dump;
+
+          if (should_dump_stacks) {
+            // Very long contention. Dump stacks.
+            struct CollectStackTrace : public Closure {
+              void Run(art::Thread* thread) override
+                  REQUIRES_SHARED(art::Locks::mutator_lock_) {
+                thread->DumpJavaStack(oss);
+              }
+
+              std::ostringstream oss;
+            };
+            CollectStackTrace owner_trace;
+            // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its
+            // execution.
+            orig_owner->RequestSynchronousCheckpoint(&owner_trace);
+            owner_stack_dump = owner_trace.oss.str();
+          } else {
+            Locks::thread_list_lock_->ExclusiveUnlock(self);
+          }
+
+          // This is all the data we need. We dropped the thread-list lock, it's OK for the
+          // owner to go away now.
+
+          if (should_dump_stacks) {
+            // Give the detailed traces for really long contention.
+            // This must be here (and not above) because we cannot hold the thread-list lock
+            // while running the checkpoint.
+            std::ostringstream self_trace_oss;
+            self->DumpJavaStack(self_trace_oss);
+
+            uint32_t pc;
+            ArtMethod* m = self->GetCurrentMethod(&pc);
+
+            LOG(WARNING) << "Long "
+                << PrettyContentionInfo(original_owner_name,
+                                        original_owner_tid,
+                                        owners_method,
+                                        owners_dex_pc,
+                                        num_waiters)
+                << " in " << ArtMethod::PrettyMethod(m) << " for "
+                << PrettyDuration(MsToNs(wait_ms)) << "\n"
+                << "Current owner stack:\n" << owner_stack_dump
+                << "Contender stack:\n" << self_trace_oss.str();
+          } else if (wait_ms > kLongWaitMs && owners_method != nullptr) {
+            uint32_t pc;
+            ArtMethod* m = self->GetCurrentMethod(&pc);
+            // TODO: We should maybe check that original_owner is still a live thread.
+            LOG(WARNING) << "Long "
+                << PrettyContentionInfo(original_owner_name,
+                                        original_owner_tid,
+                                        owners_method,
+                                        owners_dex_pc,
+                                        num_waiters)
+                << " in " << ArtMethod::PrettyMethod(m) << " for "
+                << PrettyDuration(MsToNs(wait_ms));
+          }
+          LogContentionEvent(self,
+                            wait_ms,
+                            sample_percent,
+                            owners_method,
+                            owners_dex_pc);
+        } else {
+          Locks::thread_list_lock_->ExclusiveUnlock(self);
+        }
+      }
+    }
+  }
+  // We've successfully acquired monitor_lock_, released thread_list_lock, and are runnable.
+
+  // We avoided touching monitor fields while suspended, so set owner_ here.
+  owner_.store(self, std::memory_order_relaxed);
+  DCHECK_EQ(lock_count_, 0u);
+
+  if (ATraceEnabled()) {
+    SetLockingMethodNoProxy(self);
+  }
+  if (started_trace) {
+    ATraceEnd();
+  }
+  self->SetMonitorEnterObject(nullptr);
+  num_waiters_.fetch_sub(1, std::memory_order_relaxed);
+  DCHECK(monitor_lock_.IsExclusiveHeld(self));
   // We need to pair this with a single contended locking call. NB we match the RI behavior and call
   // this even if MonitorEnter failed.
   if (called_monitors_callback) {
@@ -634,7 +675,6 @@
                            uint32_t expected_owner_thread_id,
                            uint32_t found_owner_thread_id,
                            Monitor* monitor) {
-  // Acquire thread list lock so threads won't disappear from under us.
   std::string current_owner_string;
   std::string expected_owner_string;
   std::string found_owner_string;
@@ -700,39 +740,44 @@
 
 bool Monitor::Unlock(Thread* self) {
   DCHECK(self != nullptr);
-  uint32_t owner_thread_id = 0u;
-  DCHECK(!monitor_lock_.IsExclusiveHeld(self));
-  monitor_lock_.Lock(self);
-  Thread* owner = owner_;
-  if (owner != nullptr) {
-    owner_thread_id = owner->GetThreadId();
-  }
+  Thread* owner = owner_.load(std::memory_order_relaxed);
   if (owner == self) {
     // We own the monitor, so nobody else can be in here.
+    CheckLockOwnerRequest(self);
     AtraceMonitorUnlock();
     if (lock_count_ == 0) {
-      owner_ = nullptr;
-      locking_method_ = nullptr;
-      locking_dex_pc_ = 0;
-      SignalContendersAndReleaseMonitorLock(self);
-      return true;
+      owner_.store(nullptr, std::memory_order_relaxed);
+      SignalWaiterAndReleaseMonitorLock(self);
     } else {
       --lock_count_;
-      monitor_lock_.Unlock(self);
-      return true;
+      DCHECK(monitor_lock_.IsExclusiveHeld(self));
+      DCHECK_EQ(owner_.load(std::memory_order_relaxed), self);
+      // Keep monitor_lock_, but pretend we released it.
+      FakeUnlockMonitorLock();
     }
+    return true;
   }
   // We don't own this, so we're not allowed to unlock it.
   // The JNI spec says that we should throw IllegalMonitorStateException in this case.
+  uint32_t owner_thread_id = 0u;
+  {
+    MutexLock mu(self, *Locks::thread_list_lock_);
+    owner = owner_.load(std::memory_order_relaxed);
+    if (owner != nullptr) {
+      owner_thread_id = owner->GetThreadId();
+    }
+  }
   FailedUnlock(GetObject(), self->GetThreadId(), owner_thread_id, this);
-  monitor_lock_.Unlock(self);
+  // Pretend to release monitor_lock_, which we should not.
+  FakeUnlockMonitorLock();
   return false;
 }
 
-void Monitor::SignalContendersAndReleaseMonitorLock(Thread* self) {
-  // We want to signal one thread to wake up, to acquire the monitor that
-  // we are releasing. This could either be a Thread waiting on its own
-  // ConditionVariable, or a thread waiting on monitor_contenders_.
+void Monitor::SignalWaiterAndReleaseMonitorLock(Thread* self) {
+  // We want to release the monitor and signal up to one thread that was waiting
+  // but has since been notified.
+  DCHECK_EQ(lock_count_, 0u);
+  DCHECK(monitor_lock_.IsExclusiveHeld(self));
   while (wake_set_ != nullptr) {
     // No risk of waking ourselves here; since monitor_lock_ is not released until we're ready to
     // return, notify can't move the current thread from wait_set_ to wake_set_ until this
@@ -740,6 +785,7 @@
     Thread* thread = wake_set_;
     wake_set_ = thread->GetWaitNext();
     thread->SetWaitNext(nullptr);
+    DCHECK(owner_.load(std::memory_order_relaxed) == nullptr);
 
     // Check to see if the thread is still waiting.
     {
@@ -764,16 +810,14 @@
         // Release the lock, so that a potentially awakened thread will not
         // immediately contend on it. The lock ordering here is:
         // monitor_lock_, self->GetWaitMutex, thread->GetWaitMutex
-        monitor_lock_.Unlock(self);
+        monitor_lock_.Unlock(self);  // Releases contenders.
         thread->GetWaitConditionVariable()->Signal(self);
         return;
       }
     }
   }
-  // If we didn't wake any threads that were originally waiting on us,
-  // wake a contender.
-  monitor_contenders_.Signal(self);
   monitor_lock_.Unlock(self);
+  DCHECK(!monitor_lock_.IsExclusiveHeld(self));
 }
 
 void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
@@ -781,11 +825,8 @@
   DCHECK(self != nullptr);
   DCHECK(why == kTimedWaiting || why == kWaiting || why == kSleeping);
 
-  monitor_lock_.Lock(self);
-
   // Make sure that we hold the lock.
-  if (owner_ != self) {
-    monitor_lock_.Unlock(self);
+  if (owner_.load(std::memory_order_relaxed) != self) {
     ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
     return;
   }
@@ -798,23 +839,19 @@
 
   // Enforce the timeout range.
   if (ms < 0 || ns < 0 || ns > 999999) {
-    monitor_lock_.Unlock(self);
     self->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;",
                              "timeout arguments out of range: ms=%" PRId64 " ns=%d", ms, ns);
     return;
   }
 
+  CheckLockOwnerRequest(self);
+
   /*
    * Release our hold - we need to let it go even if we're a few levels
    * deep in a recursive lock, and we need to restore that later.
    */
-  int prev_lock_count = lock_count_;
+  unsigned int prev_lock_count = lock_count_;
   lock_count_ = 0;
-  owner_ = nullptr;
-  ArtMethod* saved_method = locking_method_;
-  locking_method_ = nullptr;
-  uintptr_t saved_dex_pc = locking_dex_pc_;
-  locking_dex_pc_ = 0;
 
   AtraceMonitorUnlock();  // For the implict Unlock() just above. This will only end the deepest
                           // nesting, but that is enough for the visualization, and corresponds to
@@ -823,6 +860,9 @@
 
   bool was_interrupted = false;
   bool timed_out = false;
+  // Update monitor state now; it's not safe once we're "suspended".
+  owner_.store(nullptr, std::memory_order_relaxed);
+  num_waiters_.fetch_add(1, std::memory_order_relaxed);
   {
     // Update thread state. If the GC wakes up, it'll ignore us, knowing
     // that we won't touch any references in this state, and we'll check
@@ -840,8 +880,6 @@
      * until we've signalled contenders on this monitor.
      */
     AppendToWaitSet(self);
-    ++num_waiters_;
-
 
     // Set wait_monitor_ to the monitor object we will be waiting on. When wait_monitor_ is
     // non-null a notifying or interrupting thread must signal the thread's wait_cond_ to wake it
@@ -850,7 +888,8 @@
     self->SetWaitMonitor(this);
 
     // Release the monitor lock.
-    SignalContendersAndReleaseMonitorLock(self);
+    DCHECK(monitor_lock_.IsExclusiveHeld(self));
+    SignalWaiterAndReleaseMonitorLock(self);
 
     // Handle the case where the thread was interrupted before we called wait().
     if (self->IsInterrupted()) {
@@ -899,30 +938,18 @@
 
   // Re-acquire the monitor and lock.
   Lock<LockReason::kForWait>(self);
-  monitor_lock_.Lock(self);
+  lock_count_ = prev_lock_count;
+  DCHECK(monitor_lock_.IsExclusiveHeld(self));
   self->GetWaitMutex()->AssertNotHeld(self);
 
-  /*
-   * We remove our thread from wait set after restoring the count
-   * and owner fields so the subroutine can check that the calling
-   * thread owns the monitor. Aside from that, the order of member
-   * updates is not order sensitive as we hold the pthread mutex.
-   */
-  owner_ = self;
-  lock_count_ = prev_lock_count;
-  locking_method_ = saved_method;
-  locking_dex_pc_ = saved_dex_pc;
-  --num_waiters_;
+  num_waiters_.fetch_sub(1, std::memory_order_relaxed);
   RemoveFromWaitSet(self);
-
-  monitor_lock_.Unlock(self);
 }
 
 void Monitor::Notify(Thread* self) {
   DCHECK(self != nullptr);
-  MutexLock mu(self, monitor_lock_);
   // Make sure that we hold the lock.
-  if (owner_ != self) {
+  if (owner_.load(std::memory_order_relaxed) != self) {
     ThrowIllegalMonitorStateExceptionF("object not locked by thread before notify()");
     return;
   }
@@ -937,9 +964,8 @@
 
 void Monitor::NotifyAll(Thread* self) {
   DCHECK(self != nullptr);
-  MutexLock mu(self, monitor_lock_);
   // Make sure that we hold the lock.
-  if (owner_ != self) {
+  if (owner_.load(std::memory_order_relaxed) != self) {
     ThrowIllegalMonitorStateExceptionF("object not locked by thread before notifyAll()");
     return;
   }
@@ -968,30 +994,18 @@
   if (lw.GetState() == LockWord::kFatLocked) {
     Monitor* monitor = lw.FatLockMonitor();
     DCHECK(monitor != nullptr);
-    MutexLock mu(self, monitor->monitor_lock_);
-    // Can't deflate if we have anybody waiting on the CV.
-    if (monitor->num_waiters_ > 0) {
+    // Can't deflate if we have anybody waiting on the CV or trying to acquire the monitor.
+    if (monitor->num_waiters_.load(std::memory_order_relaxed) > 0) {
       return false;
     }
-    Thread* owner = monitor->owner_;
-    if (owner != nullptr) {
-      // Can't deflate if we are locked and have a hash code.
-      if (monitor->HasHashCode()) {
-        return false;
-      }
-      // Can't deflate if our lock count is too high.
-      if (static_cast<uint32_t>(monitor->lock_count_) > LockWord::kThinLockMaxCount) {
-        return false;
-      }
-      // Deflate to a thin lock.
-      LockWord new_lw = LockWord::FromThinLockId(owner->GetThreadId(),
-                                                 monitor->lock_count_,
-                                                 lw.GCState());
-      // Assume no concurrent read barrier state changes as mutators are suspended.
-      obj->SetLockWord(new_lw, false);
-      VLOG(monitor) << "Deflated " << obj << " to thin lock " << owner->GetTid() << " / "
-          << monitor->lock_count_;
-    } else if (monitor->HasHashCode()) {
+    if (!monitor->monitor_lock_.ExclusiveTryLock(self)) {
+      // We cannot deflate a monitor that's currently held. It's unclear whether we should if
+      // we could.
+      return false;
+    }
+    DCHECK_EQ(monitor->lock_count_, 0u);
+    DCHECK_EQ(monitor->owner_.load(std::memory_order_relaxed), static_cast<Thread*>(nullptr));
+    if (monitor->HasHashCode()) {
       LockWord new_lw = LockWord::FromHashCode(monitor->GetHashCode(), lw.GCState());
       // Assume no concurrent read barrier state changes as mutators are suspended.
       obj->SetLockWord(new_lw, false);
@@ -1003,6 +1017,8 @@
       obj->SetLockWord(new_lw, false);
       VLOG(monitor) << "Deflated" << obj << " to empty lock word";
     }
+    monitor->monitor_lock_.ExclusiveUnlock(self);
+    DCHECK(!(monitor->monitor_lock_.IsExclusiveHeld(self)));
     // The monitor is deflated, mark the object as null so that we know to delete it during the
     // next GC.
     monitor->obj_ = GcRoot<mirror::Object>(nullptr);
@@ -1088,6 +1104,10 @@
   size_t contention_count = 0;
   StackHandleScope<1> hs(self);
   Handle<mirror::Object> h_obj(hs.NewHandle(obj));
+#if !ART_USE_FUTEXES
+  // In this case we cannot inflate an unowned monitor, so we sometimes defer inflation.
+  bool should_inflate = false;
+#endif
   while (true) {
     // We initially read the lockword with ordinary Java/relaxed semantics. When stronger
     // semantics are needed, we address it below. Since GetLockWord bottoms out to a relaxed load,
@@ -1098,6 +1118,11 @@
         // No ordering required for preceding lockword read, since we retest.
         LockWord thin_locked(LockWord::FromThinLockId(thread_id, 0, lock_word.GCState()));
         if (h_obj->CasLockWord(lock_word, thin_locked, CASMode::kWeak, std::memory_order_acquire)) {
+#if !ART_USE_FUTEXES
+          if (should_inflate) {
+            InflateThinLocked(self, h_obj, lock_word, 0);
+          }
+#endif
           AtraceMonitorLock(self, h_obj.Get(), /* is_wait= */ false);
           return h_obj.Get();  // Success!
         }
@@ -1152,9 +1177,16 @@
             // of nanoseconds or less.
             sched_yield();
           } else {
+#if ART_USE_FUTEXES
             contention_count = 0;
             // No ordering required for initial lockword read. Install rereads it anyway.
             InflateThinLocked(self, h_obj, lock_word, 0);
+#else
+            // Can't inflate from non-owning thread. Keep waiting. Bad for power, but this code
+            // isn't used on-device.
+            should_inflate = true;
+            usleep(10);
+#endif
           }
         }
         continue;  // Start from the beginning.
@@ -1168,6 +1200,7 @@
           return mon->TryLock(self) ? h_obj.Get() : nullptr;
         } else {
           mon->Lock(self);
+          DCHECK(mon->monitor_lock_.IsExclusiveHeld(self));
           return h_obj.Get();  // Success!
         }
       }
@@ -1481,6 +1514,9 @@
     bool success = false;
     for (uint32_t dex_reg : dex_lock_info.dex_registers) {
       uint32_t value;
+
+      // For optimized code we expect the DexRegisterMap to be present - monitor information
+      // not be optimized out.
       success = stack_visitor->GetVReg(m, dex_reg, kReferenceVReg, &value);
       if (success) {
         ObjPtr<mirror::Object> o = reinterpret_cast<mirror::Object*>(value);
@@ -1528,8 +1564,7 @@
 }
 
 bool Monitor::IsLocked() REQUIRES_SHARED(Locks::mutator_lock_) {
-  MutexLock mu(Thread::Current(), monitor_lock_);
-  return owner_ != nullptr;
+  return GetOwner() != nullptr;
 }
 
 void Monitor::TranslateLocation(ArtMethod* method,
@@ -1550,8 +1585,9 @@
 }
 
 uint32_t Monitor::GetOwnerThreadId() {
-  MutexLock mu(Thread::Current(), monitor_lock_);
-  Thread* owner = owner_;
+  // Make sure owner is not deallocated during access.
+  MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+  Thread* owner = GetOwner();
   if (owner != nullptr) {
     return owner->GetThreadId();
   } else {
@@ -1679,14 +1715,16 @@
       break;
     case LockWord::kFatLocked: {
       Monitor* mon = lock_word.FatLockMonitor();
-      owner_ = mon->owner_;
+      owner_ = mon->owner_.load(std::memory_order_relaxed);
       // Here it is okay for the owner to be null since we don't reset the LockWord back to
       // kUnlocked until we get a GC. In cases where this hasn't happened yet we will have a fat
       // lock without an owner.
+      // Neither owner_ nor entry_count_ is touched by threads in "suspended" state, so
+      // we must see consistent values.
       if (owner_ != nullptr) {
         entry_count_ = 1 + mon->lock_count_;
       } else {
-        DCHECK_EQ(mon->lock_count_, 0) << "Monitor is fat-locked without any owner!";
+        DCHECK_EQ(mon->lock_count_, 0u) << "Monitor is fat-locked without any owner!";
       }
       for (Thread* waiter = mon->wait_set_; waiter != nullptr; waiter = waiter->GetWaitNext()) {
         waiters_.push_back(waiter);
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 4187f27..0714da1 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -21,6 +21,7 @@
 #include <stdint.h>
 #include <stdlib.h>
 
+#include <atomic>
 #include <iosfwd>
 #include <list>
 #include <vector>
@@ -129,12 +130,14 @@
 
   void SetObject(ObjPtr<mirror::Object> object);
 
-  Thread* GetOwner() const NO_THREAD_SAFETY_ANALYSIS {
-    return owner_;
+  // Provides no memory ordering guarantees.
+  Thread* GetOwner() const {
+    return owner_.load(std::memory_order_relaxed);
   }
 
   int32_t GetHashCode();
 
+  // Is the monitor currently locked? Debug only, provides no memory ordering guarantees.
   bool IsLocked() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!monitor_lock_);
 
   bool HasHashCode() const {
@@ -176,7 +179,7 @@
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Install the monitor into its object, may fail if another thread installs a different monitor
-  // first.
+  // first. Monitor remains in the same logical state as before, i.e. held the same # of times.
   bool Install(Thread* self)
       REQUIRES(!monitor_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
@@ -189,7 +192,10 @@
   // this routine.
   void RemoveFromWaitSet(Thread* thread) REQUIRES(monitor_lock_);
 
-  void SignalContendersAndReleaseMonitorLock(Thread* self) RELEASE(monitor_lock_);
+  // Release the monitor lock and signal a waiting thread that has been notified and now needs the
+  // lock. Assumes the monitor lock is held exactly once, and the owner_ field has been reset to
+  // null. Caller may be suspended (Wait) or runnable (MonitorExit).
+  void SignalWaiterAndReleaseMonitorLock(Thread* self) RELEASE(monitor_lock_);
 
   // Changes the shape of a monitor from thin to fat, preserving the internal lock state. The
   // calling thread must own the lock or the owner must be suspended. There's a race with other
@@ -210,37 +216,33 @@
                            uint32_t expected_owner_thread_id,
                            uint32_t found_owner_thread_id,
                            Monitor* mon)
-      REQUIRES(!Locks::thread_list_lock_,
-               !monitor_lock_)
+      REQUIRES(!Locks::thread_list_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Try to lock without blocking, returns true if we acquired the lock.
-  bool TryLock(Thread* self)
-      REQUIRES(!monitor_lock_)
-      REQUIRES_SHARED(Locks::mutator_lock_);
-  // Variant for already holding the monitor lock.
-  bool TryLockLocked(Thread* self)
-      REQUIRES(monitor_lock_)
+  // If spin is true, then we spin for a short period before failing.
+  bool TryLock(Thread* self, bool spin = false)
+      TRY_ACQUIRE(true, monitor_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   template<LockReason reason = LockReason::kForLock>
   void Lock(Thread* self)
-      REQUIRES(!monitor_lock_)
+      ACQUIRE(monitor_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool Unlock(Thread* thread)
-      REQUIRES(!monitor_lock_)
+      RELEASE(monitor_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   static void DoNotify(Thread* self, ObjPtr<mirror::Object> obj, bool notify_all)
       REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;  // For mon->Notify.
 
   void Notify(Thread* self)
-      REQUIRES(!monitor_lock_)
+      REQUIRES(monitor_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void NotifyAll(Thread* self)
-      REQUIRES(!monitor_lock_)
+      REQUIRES(monitor_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   static std::string PrettyContentionInfo(const std::string& owner_name,
@@ -270,7 +272,7 @@
   // Since we're allowed to wake up "early", we clamp extremely long durations to return at the end
   // of the 32-bit time epoch.
   void Wait(Thread* self, int64_t msec, int32_t nsec, bool interruptShouldThrow, ThreadState why)
-      REQUIRES(!monitor_lock_)
+      REQUIRES(monitor_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Translates the provided method and pc into its declaring class' source file and line number.
@@ -279,8 +281,18 @@
                                 int32_t* line_number)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Provides no memory ordering guarantees.
   uint32_t GetOwnerThreadId() REQUIRES(!monitor_lock_);
 
+  // Set locking_method_ and locking_dex_pc_ corresponding to owner's current stack.
+  // owner is either self or suspended.
+  void SetLockingMethod(Thread* owner) REQUIRES(monitor_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // The same, but without checking for a proxy method. Currently requires owner == self.
+  void SetLockingMethodNoProxy(Thread* owner) REQUIRES(monitor_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
   // Support for systrace output of monitor operations.
   ALWAYS_INLINE static void AtraceMonitorLock(Thread* self,
                                               ObjPtr<mirror::Object> obj,
@@ -294,19 +306,27 @@
 
   static uint32_t lock_profiling_threshold_;
   static uint32_t stack_dump_lock_profiling_threshold_;
+  static bool capture_method_eagerly_;
 
+  // Holding the monitor N times is represented by holding monitor_lock_ N times.
   Mutex monitor_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
 
-  ConditionVariable monitor_contenders_ GUARDED_BY(monitor_lock_);
+  // Pretend to unlock monitor lock.
+  void FakeUnlockMonitorLock() RELEASE(monitor_lock_) NO_THREAD_SAFETY_ANALYSIS {}
 
-  // Number of people waiting on the condition.
-  size_t num_waiters_ GUARDED_BY(monitor_lock_);
+  // Number of threads either waiting on the condition or waiting on a contended
+  // monitor acquisition. Prevents deflation.
+  std::atomic<size_t> num_waiters_;
 
-  // Which thread currently owns the lock?
-  Thread* volatile owner_ GUARDED_BY(monitor_lock_);
+  // Which thread currently owns the lock? monitor_lock_ only keeps the tid.
+  // Only set while holding monitor_lock_. Non-locking readers only use it to
+  // compare to self or for debugging.
+  std::atomic<Thread*> owner_;
 
-  // Owner's recursive lock depth.
-  int lock_count_ GUARDED_BY(monitor_lock_);
+  // Owner's recursive lock depth. Owner_ non-null, and lock_count_ == 0 ==> held once.
+  unsigned int lock_count_ GUARDED_BY(monitor_lock_);
+
+  // Owner's recursive lock depth is given by monitor_lock_.GetDepth().
 
   // What object are we part of. This is a weak root. Do not access
   // this directly, use GetObject() to read it so it will be guarded
@@ -322,11 +342,76 @@
   // Stored object hash code, generated lazily by GetHashCode.
   AtomicInteger hash_code_;
 
-  // Method and dex pc where the lock owner acquired the lock, used when lock
-  // sampling is enabled. locking_method_ may be null if the lock is currently
-  // unlocked, or if the lock is acquired by the system when the stack is empty.
-  ArtMethod* locking_method_ GUARDED_BY(monitor_lock_);
-  uint32_t locking_dex_pc_ GUARDED_BY(monitor_lock_);
+  // Data structure used to remember the method and dex pc of a recent holder of the
+  // lock. Used for tracing and contention reporting. Setting these is expensive, since it
+  // involves a partial stack walk. We set them only as follows, to minimize the cost:
+  // - If tracing is enabled, they are needed immediately when we first notice contention, so we
+  //   set them unconditionally when a monitor is acquired.
+  // - If contention reporting is enabled, we use the lock_owner_request_ field to have the
+  //   contending thread request them. The current owner then sets them when releasing the monitor,
+  //   making them available when the contending thread acquires the monitor.
+  // - If both are enabled, we blindly do both. This usually prevents us from switching between
+  //   reporting the end and beginning of critical sections for contention logging when tracing is
+  //   enabled.  We expect that tracing overhead is normally much higher than for contention
+  //   logging, so the added cost should be small. It also minimizes glitches when enabling and
+  //   disabling traces.
+  // We're tolerant of missing information. E.g. when tracing is initially turned on, we may
+  // not have the lock holder information if the holder acquired the lock with tracing off.
+  //
+  // We make this data unconditionally atomic; for contention logging all accesses are in fact
+  // protected by the monitor, but for tracing, reads are not. Writes are always
+  // protected by the monitor.
+  //
+  // The fields are always accessed without memory ordering. We store a checksum, and reread if
+  // the checksum doesn't correspond to the values.  This results in values that are correct with
+  // very high probability, but not certainty.
+  //
+  // If we need lock_owner information for a certain thread for contenion logging, we store its
+  // tid in lock_owner_request_. To satisfy the request, we store lock_owner_tid_,
+  // lock_owner_method_, and lock_owner_dex_pc_ and the corresponding checksum while holding the
+  // monitor.
+  //
+  // At all times, either lock_owner_ is zero, the checksum is valid, or a thread is actively
+  // in the process of establishing one of those states. Only one thread at a time can be actively
+  // establishing such a state, since writes are protected by the monitor.
+  std::atomic<Thread*> lock_owner_;  // *lock_owner_ may no longer exist!
+  std::atomic<ArtMethod*> lock_owner_method_;
+  std::atomic<uint32_t> lock_owner_dex_pc_;
+  std::atomic<uintptr_t> lock_owner_sum_;
+
+  // Request lock owner save method and dex_pc. Written asynchronously.
+  std::atomic<Thread*> lock_owner_request_;
+
+  // Compute method, dex pc, and tid "checksum".
+  uintptr_t LockOwnerInfoChecksum(ArtMethod* m, uint32_t dex_pc, Thread* t);
+
+  // Set owning method, dex pc, and tid. owner_ field is set and points to current thread.
+  void SetLockOwnerInfo(ArtMethod* method, uint32_t dex_pc, Thread* t)
+      REQUIRES(monitor_lock_);
+
+  // Get owning method and dex pc for the given thread, if available.
+  void GetLockOwnerInfo(/*out*/ArtMethod** method, /*out*/uint32_t* dex_pc, Thread* t);
+
+  // Do the same, while holding the monitor. There are no concurrent updates.
+  void GetLockOwnerInfoLocked(/*out*/ArtMethod** method, /*out*/uint32_t* dex_pc,
+                              uint32_t thread_id)
+      REQUIRES(monitor_lock_);
+
+  // We never clear lock_owner method and dex pc. Since it often reflects
+  // ownership when we last detected contention, it may be inconsistent with owner_
+  // and not 100% reliable. For lock contention monitoring, in the absence of tracing,
+  // there is a small risk that the current owner may finish before noticing the request,
+  // or the information will be overwritten by another intervening request and monitor
+  // release, so it's also not 100% reliable. But if we report information at all, it
+  // should generally (modulo accidental checksum matches) pertain to to an acquisition of the
+  // right monitor by the right thread, so it's extremely unlikely to be seriously misleading.
+  // Since we track threads by a pointer to the Thread structure, there is a small chance we may
+  // confuse threads allocated at the same exact address, if a contending thread dies before
+  // we inquire about it.
+
+  // Check for and act on a pending lock_owner_request_
+  void CheckLockOwnerRequest(Thread* self)
+      REQUIRES(monitor_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
 
   // The denser encoded version of this monitor as stored in the lock word.
   MonitorId monitor_id_;
diff --git a/runtime/native/dalvik_system_BaseDexClassLoader.cc b/runtime/native/dalvik_system_BaseDexClassLoader.cc
new file mode 100644
index 0000000..607395d
--- /dev/null
+++ b/runtime/native/dalvik_system_BaseDexClassLoader.cc
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dalvik_system_DexFile.h"
+
+#include <memory>
+
+#include "class_loader_context.h"
+#include "native_util.h"
+#include "nativehelper/jni_macros.h"
+#include "well_known_classes.h"
+
+namespace art {
+
+static bool append_string(JNIEnv* env, jobjectArray array, uint32_t& i, const std::string& string) {
+  ScopedLocalRef<jstring> jstring(env, env->NewStringUTF(string.c_str()));
+  if (jstring.get() == nullptr) {
+    DCHECK(env->ExceptionCheck());
+    return false;
+  }
+  env->SetObjectArrayElement(array, i++, jstring.get());
+  return true;
+}
+
+static jobjectArray BaseDexClassLoader_computeClassLoaderContextsNative(JNIEnv* env,
+                                                                        jobject class_loader) {
+  CHECK(class_loader != nullptr);
+  std::map<std::string, std::string> contextMap =
+      ClassLoaderContext::EncodeClassPathContextsForClassLoader(class_loader);
+  jobjectArray result = env->NewObjectArray(2 * contextMap.size(),
+                                            WellKnownClasses::java_lang_String,
+                                            nullptr);
+  if (result == nullptr) {
+    DCHECK(env->ExceptionCheck());
+    return nullptr;
+  }
+  uint32_t i = 0;
+  for (const auto& classpath_to_context : contextMap) {
+    const std::string& classpath = classpath_to_context.first;
+    const std::string& context = classpath_to_context.second;
+    if (!append_string(env, result, i, classpath) || !append_string(env, result, i, context)) {
+      return nullptr;
+    }
+  }
+  return result;
+}
+
+static JNINativeMethod gMethods[] = {
+  NATIVE_METHOD(BaseDexClassLoader, computeClassLoaderContextsNative,
+                "()[Ljava/lang/String;"),
+};
+
+void register_dalvik_system_BaseDexClassLoader(JNIEnv* env) {
+  REGISTER_NATIVE_METHODS("dalvik/system/BaseDexClassLoader");
+}
+
+}  // namespace art
diff --git a/runtime/native/dalvik_system_BaseDexClassLoader.h b/runtime/native/dalvik_system_BaseDexClassLoader.h
new file mode 100644
index 0000000..4ec03ef
--- /dev/null
+++ b/runtime/native/dalvik_system_BaseDexClassLoader.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_DALVIK_SYSTEM_BASEDEXCLASSLOADER_H_
+#define ART_RUNTIME_NATIVE_DALVIK_SYSTEM_BASEDEXCLASSLOADER_H_
+
+#include <jni.h>
+#include <unistd.h>
+
+namespace art {
+
+void register_dalvik_system_BaseDexClassLoader(JNIEnv* env);
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_NATIVE_DALVIK_SYSTEM_BASEDEXCLASSLOADER_H_
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index eee8cfc..d2b5edf 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -22,6 +22,7 @@
 
 #include "base/casts.h"
 #include "base/file_utils.h"
+#include "base/hiddenapi_domain.h"
 #include "base/logging.h"
 #include "base/os.h"
 #include "base/stl_util.h"
@@ -575,10 +576,12 @@
     return OatFileAssistant::kNoDexOptNeeded;
   }
 
+  std::vector<int> context_fds;
   return oat_file_assistant.GetDexOptNeeded(filter,
+                                            context.get(),
+                                            context_fds,
                                             profile_changed,
-                                            downgrade,
-                                            context.get());
+                                            downgrade);
 }
 
 static jstring DexFile_getDexFileStatus(JNIEnv* env,
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 83398ec..de43c4f 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -83,7 +83,7 @@
 }
 
 static jint VMDebug_getAllocCount(JNIEnv*, jclass, jint kind) {
-  return Runtime::Current()->GetStat(kind);
+  return static_cast<jint>(Runtime::Current()->GetStat(kind));
 }
 
 static void VMDebug_resetAllocCount(JNIEnv*, jclass, jint kinds) {
@@ -172,7 +172,9 @@
 }
 
 static jboolean VMDebug_isDebuggerConnected(JNIEnv*, jclass) {
-  return Dbg::IsDebuggerActive();
+  // This function will be replaced by the debugger when it's connected. See
+  // external/oj-libjdwp/src/share/vmDebug.c for implementation when debugger is connected.
+  return false;
 }
 
 static jboolean VMDebug_isDebuggingEnabled(JNIEnv* env, jclass) {
@@ -181,7 +183,9 @@
 }
 
 static jlong VMDebug_lastDebuggerActivity(JNIEnv*, jclass) {
-  return Dbg::LastDebuggerActivity();
+  // This function will be replaced by the debugger when it's connected. See
+  // external/oj-libjdwp/src/share/vmDebug.c for implementation when debugger is connected.
+  return -1;
 }
 
 static void ThrowUnsupportedOperationException(JNIEnv* env) {
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 399813c..efaa3d9 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -34,6 +34,7 @@
 #include "base/enums.h"
 #include "base/sdk_version.h"
 #include "class_linker-inl.h"
+#include "class_loader_context.h"
 #include "common_throws.h"
 #include "debugger.h"
 #include "dex/class_accessor-inl.h"
@@ -46,6 +47,7 @@
 #include "gc/space/image_space.h"
 #include "gc/task_processor.h"
 #include "intern_table.h"
+#include "jit/jit.h"
 #include "jni/java_vm_ext.h"
 #include "jni/jni_internal.h"
 #include "mirror/array-alloc-inl.h"
@@ -118,11 +120,11 @@
     return nullptr;
   }
   gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentNonMovingAllocator();
-  ObjPtr<mirror::Array> result = mirror::Array::Alloc<true>(soa.Self(),
-                                                            array_class,
-                                                            length,
-                                                            array_class->GetComponentSizeShift(),
-                                                            allocator);
+  ObjPtr<mirror::Array> result = mirror::Array::Alloc(soa.Self(),
+                                                      array_class,
+                                                      length,
+                                                      array_class->GetComponentSizeShift(),
+                                                      allocator);
   return soa.AddLocalReference<jobject>(result);
 }
 
@@ -145,12 +147,13 @@
     return nullptr;
   }
   gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentAllocator();
-  ObjPtr<mirror::Array> result = mirror::Array::Alloc<true, true>(
-      soa.Self(),
-      array_class,
-      length,
-      array_class->GetComponentSizeShift(),
-      allocator);
+  ObjPtr<mirror::Array> result =
+      mirror::Array::Alloc</*kIsInstrumented=*/ true, /*kFillUsable=*/ true>(
+          soa.Self(),
+          array_class,
+          length,
+          array_class->GetComponentSizeShift(),
+          allocator);
   return soa.AddLocalReference<jobject>(result);
 }
 
@@ -179,10 +182,6 @@
   Runtime::Current()->GetHeap()->ClampGrowthLimit();
 }
 
-static jboolean VMRuntime_isDebuggerActive(JNIEnv*, jobject) {
-  return Dbg::IsDebuggerActive();
-}
-
 static jboolean VMRuntime_isNativeDebuggable(JNIEnv*, jobject) {
   return Runtime::Current()->IsNativeDebuggable();
 }
@@ -273,6 +272,20 @@
 #endif
 }
 
+static void VMRuntime_setDisabledCompatChangesNative(JNIEnv* env, jobject,
+    jlongArray disabled_compat_changes) {
+  if (disabled_compat_changes == nullptr) {
+    return;
+  }
+  std::set<uint64_t> disabled_compat_changes_set;
+  int length = env->GetArrayLength(disabled_compat_changes);
+  jlong* elements = env->GetLongArrayElements(disabled_compat_changes, /*isCopy*/nullptr);
+  for (int i = 0; i < length; i++) {
+    disabled_compat_changes_set.insert(static_cast<uint64_t>(elements[i]));
+  }
+  Runtime::Current()->SetDisabledCompatChanges(disabled_compat_changes_set);
+}
+
 static inline size_t clamp_to_size_t(jlong n) {
   if (sizeof(jlong) > sizeof(size_t)
       && UNLIKELY(n > static_cast<jlong>(std::numeric_limits<size_t>::max()))) {
@@ -386,7 +399,6 @@
   if (string == nullptr) {
     return;
   }
-  // LOG(INFO) << "VMRuntime.preloadDexCaches resolved string=" << utf8;
   dex_cache->SetResolvedString(string_idx, string);
 }
 
@@ -406,17 +418,10 @@
   ObjPtr<mirror::Class> klass = (class_name[1] == '\0')
       ? linker->LookupPrimitiveClass(class_name[0])
       : linker->LookupClass(self, class_name, nullptr);
-  if (klass == nullptr) {
+  if (klass == nullptr || !klass->IsResolved()) {
     return;
   }
-  // LOG(INFO) << "VMRuntime.preloadDexCaches resolved klass=" << class_name;
   dex_cache->SetResolvedType(type_idx, klass);
-  // Skip uninitialized classes because filled static storage entry implies it is initialized.
-  if (!klass->IsInitialized()) {
-    // LOG(INFO) << "VMRuntime.preloadDexCaches uninitialized klass=" << class_name;
-    return;
-  }
-  // LOG(INFO) << "VMRuntime.preloadDexCaches static storage klass=" << class_name;
 }
 
 // Based on ClassLinker::ResolveField.
@@ -655,6 +660,9 @@
   Runtime::Current()->RegisterAppInfo(code_paths_vec, profile_file_str);
 }
 
+static void VMRuntime_doNotInitializeInAot() {
+}
+
 static jboolean VMRuntime_isBootClassPathOnDisk(JNIEnv* env, jclass, jstring java_instruction_set) {
   ScopedUtfChars instruction_set(env, java_instruction_set);
   if (instruction_set.c_str() == nullptr) {
@@ -667,11 +675,7 @@
     env->ThrowNew(iae.get(), message.c_str());
     return JNI_FALSE;
   }
-  std::string error_msg;
-  Runtime* runtime = Runtime::Current();
-  std::unique_ptr<ImageHeader> image_header(gc::space::ImageSpace::ReadImageHeader(
-      runtime->GetImageLocation().c_str(), isa, runtime->GetImageSpaceLoadingOrder(), &error_msg));
-  return image_header.get() != nullptr;
+  return gc::space::ImageSpace::IsBootClassPathOnDisk(isa);
 }
 
 static jstring VMRuntime_getCurrentInstructionSet(JNIEnv* env, jclass) {
@@ -723,6 +727,54 @@
   return Runtime::Current()->GetHeap()->HasBootImageSpace() ? JNI_TRUE : JNI_FALSE;
 }
 
+static void VMRuntime_bootCompleted(JNIEnv* env ATTRIBUTE_UNUSED,
+                                    jclass klass ATTRIBUTE_UNUSED) {
+  jit::Jit* jit = Runtime::Current()->GetJit();
+  if (jit != nullptr) {
+    jit->BootCompleted();
+  }
+}
+
+class ClearJitCountersVisitor : public ClassVisitor {
+ public:
+  bool operator()(ObjPtr<mirror::Class> klass) override REQUIRES_SHARED(Locks::mutator_lock_) {
+    // Avoid some types of classes that don't need their methods visited.
+    if (klass->IsProxyClass() ||
+        klass->IsArrayClass() ||
+        klass->IsPrimitive() ||
+        !klass->IsResolved() ||
+        klass->IsErroneousResolved()) {
+      return true;
+    }
+    for (ArtMethod& m : klass->GetMethods(kRuntimePointerSize)) {
+      if (!m.IsAbstract()) {
+        if (m.GetCounter() != 0) {
+          m.SetCounter(0);
+        }
+      }
+    }
+    return true;
+  }
+};
+
+static void VMRuntime_resetJitCounters(JNIEnv* env, jclass klass ATTRIBUTE_UNUSED) {
+  ScopedObjectAccess soa(env);
+  ClearJitCountersVisitor visitor;
+  Runtime::Current()->GetClassLinker()->VisitClasses(&visitor);
+}
+
+static jboolean VMRuntime_isValidClassLoaderContext(JNIEnv* env,
+                                                    jclass klass ATTRIBUTE_UNUSED,
+                                                    jstring jencoded_class_loader_context) {
+  if (UNLIKELY(jencoded_class_loader_context == nullptr)) {
+    ScopedFastNativeObjectAccess soa(env);
+    ThrowNullPointerException("encoded_class_loader_context == null");
+    return false;
+  }
+  ScopedUtfChars encoded_class_loader_context(env, jencoded_class_loader_context);
+  return ClassLoaderContext::IsValidEncoding(encoded_class_loader_context.c_str());
+}
+
 static JNINativeMethod gMethods[] = {
   FAST_NATIVE_METHOD(VMRuntime, addressOf, "(Ljava/lang/Object;)J"),
   NATIVE_METHOD(VMRuntime, bootClassPath, "()Ljava/lang/String;"),
@@ -735,7 +787,6 @@
   NATIVE_METHOD(VMRuntime, setHiddenApiExemptions, "([Ljava/lang/String;)V"),
   NATIVE_METHOD(VMRuntime, setHiddenApiAccessLogSamplingRate, "(I)V"),
   NATIVE_METHOD(VMRuntime, getTargetHeapUtilization, "()F"),
-  FAST_NATIVE_METHOD(VMRuntime, isDebuggerActive, "()Z"),
   FAST_NATIVE_METHOD(VMRuntime, isNativeDebuggable, "()Z"),
   NATIVE_METHOD(VMRuntime, isJavaDebuggable, "()Z"),
   NATIVE_METHOD(VMRuntime, nativeSetTargetHeapUtilization, "(F)V"),
@@ -743,6 +794,7 @@
   FAST_NATIVE_METHOD(VMRuntime, newUnpaddedArray, "(Ljava/lang/Class;I)Ljava/lang/Object;"),
   NATIVE_METHOD(VMRuntime, properties, "()[Ljava/lang/String;"),
   NATIVE_METHOD(VMRuntime, setTargetSdkVersionNative, "(I)V"),
+  NATIVE_METHOD(VMRuntime, setDisabledCompatChangesNative, "([J)V"),
   NATIVE_METHOD(VMRuntime, registerNativeAllocation, "(J)V"),
   NATIVE_METHOD(VMRuntime, registerNativeFree, "(J)V"),
   NATIVE_METHOD(VMRuntime, getNotifyNativeInterval, "()I"),
@@ -765,6 +817,7 @@
   FAST_NATIVE_METHOD(VMRuntime, isCheckJniEnabled, "()Z"),
   NATIVE_METHOD(VMRuntime, preloadDexCaches, "()V"),
   NATIVE_METHOD(VMRuntime, registerAppInfo, "(Ljava/lang/String;[Ljava/lang/String;)V"),
+  CRITICAL_NATIVE_METHOD(VMRuntime, doNotInitializeInAot, "()V"),
   NATIVE_METHOD(VMRuntime, isBootClassPathOnDisk, "(Ljava/lang/String;)Z"),
   NATIVE_METHOD(VMRuntime, getCurrentInstructionSet, "()Ljava/lang/String;"),
   NATIVE_METHOD(VMRuntime, didPruneDalvikCache, "()Z"),
@@ -772,6 +825,9 @@
   NATIVE_METHOD(VMRuntime, setDedupeHiddenApiWarnings, "(Z)V"),
   NATIVE_METHOD(VMRuntime, setProcessPackageName, "(Ljava/lang/String;)V"),
   NATIVE_METHOD(VMRuntime, setProcessDataDirectory, "(Ljava/lang/String;)V"),
+  NATIVE_METHOD(VMRuntime, bootCompleted, "()V"),
+  NATIVE_METHOD(VMRuntime, resetJitCounters, "()V"),
+  NATIVE_METHOD(VMRuntime, isValidClassLoaderContext, "(Ljava/lang/String;)Z"),
 };
 
 void register_dalvik_system_VMRuntime(JNIEnv* env) {
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index de28c28..c37b8bb 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -134,22 +134,25 @@
 
 // Must match values in com.android.internal.os.Zygote.
 enum {
-  DEBUG_ENABLE_JDWP                  = 1,
-  DEBUG_ENABLE_CHECKJNI              = 1 << 1,
-  DEBUG_ENABLE_ASSERT                = 1 << 2,
-  DEBUG_ENABLE_SAFEMODE              = 1 << 3,
-  DEBUG_ENABLE_JNI_LOGGING           = 1 << 4,
-  DEBUG_GENERATE_DEBUG_INFO          = 1 << 5,
-  DEBUG_ALWAYS_JIT                   = 1 << 6,
-  DEBUG_NATIVE_DEBUGGABLE            = 1 << 7,
-  DEBUG_JAVA_DEBUGGABLE              = 1 << 8,
-  DISABLE_VERIFIER                   = 1 << 9,
-  ONLY_USE_SYSTEM_OAT_FILES          = 1 << 10,
-  DEBUG_GENERATE_MINI_DEBUG_INFO     = 1 << 11,
-  HIDDEN_API_ENFORCEMENT_POLICY_MASK = (1 << 12)
-                                     | (1 << 13),
-  PROFILE_SYSTEM_SERVER              = 1 << 14,
-  USE_APP_IMAGE_STARTUP_CACHE        = 1 << 16,
+  DEBUG_ENABLE_JDWP                   = 1,
+  DEBUG_ENABLE_CHECKJNI               = 1 << 1,
+  DEBUG_ENABLE_ASSERT                 = 1 << 2,
+  DEBUG_ENABLE_SAFEMODE               = 1 << 3,
+  DEBUG_ENABLE_JNI_LOGGING            = 1 << 4,
+  DEBUG_GENERATE_DEBUG_INFO           = 1 << 5,
+  DEBUG_ALWAYS_JIT                    = 1 << 6,
+  DEBUG_NATIVE_DEBUGGABLE             = 1 << 7,
+  DEBUG_JAVA_DEBUGGABLE               = 1 << 8,
+  DISABLE_VERIFIER                    = 1 << 9,
+  ONLY_USE_SYSTEM_OAT_FILES           = 1 << 10,
+  DEBUG_GENERATE_MINI_DEBUG_INFO      = 1 << 11,
+  HIDDEN_API_ENFORCEMENT_POLICY_MASK  = (1 << 12)
+                                      | (1 << 13),
+  PROFILE_SYSTEM_SERVER               = 1 << 14,
+  PROFILE_FROM_SHELL                  = 1 << 15,
+  USE_APP_IMAGE_STARTUP_CACHE         = 1 << 16,
+  DEBUG_IGNORE_APP_SIGNAL_HANDLER     = 1 << 17,
+  DISABLE_TEST_API_ENFORCEMENT_POLICY = 1 << 18,
 
   // bits to shift (flags & HIDDEN_API_ENFORCEMENT_POLICY_MASK) by to get a value
   // corresponding to hiddenapi::EnforcementPolicy
@@ -235,6 +238,14 @@
     runtime_flags &= ~DEBUG_GENERATE_DEBUG_INFO;
   }
 
+  if ((runtime_flags & DEBUG_IGNORE_APP_SIGNAL_HANDLER) != 0) {
+    runtime->SetSignalHookDebuggable(true);
+    runtime_flags &= ~DEBUG_IGNORE_APP_SIGNAL_HANDLER;
+  }
+
+  runtime->SetProfileableFromShell((runtime_flags & PROFILE_FROM_SHELL) != 0);
+  runtime_flags &= ~PROFILE_FROM_SHELL;
+
   return runtime_flags;
 }
 
@@ -249,15 +260,16 @@
 }
 
 static void ZygoteHooks_nativePostZygoteFork(JNIEnv*, jclass) {
-  Runtime* runtime = Runtime::Current();
-  if (runtime->IsZygote()) {
-    runtime->PostZygoteFork();
-  }
+  Runtime::Current()->PostZygoteFork();
 }
 
 static void ZygoteHooks_nativePostForkSystemServer(JNIEnv* env ATTRIBUTE_UNUSED,
-                                                   jclass klass ATTRIBUTE_UNUSED) {
-  Runtime::Current()->SetSystemServer(true);
+                                                   jclass klass ATTRIBUTE_UNUSED,
+                                                   jint runtime_flags) {
+  // Set the runtime state as the first thing, in case JIT and other services
+  // start querying it.
+  Runtime::Current()->SetAsSystemServer();
+
   // This JIT code cache for system server is created whilst the runtime is still single threaded.
   // System server has a window where it can create executable pages for this purpose, but this is
   // turned off after this hook. Consequently, the only JIT mode supported is the dual-view JIT
@@ -266,10 +278,11 @@
     Runtime::Current()->GetJit()->GetCodeCache()->PostForkChildAction(
         /* is_system_server= */ true, /* is_zygote= */ false);
   }
-  // Allow picking up verity-protected files from the dalvik cache for pre-caching. This window will
-  // be closed in the common nativePostForkChild below.
-  Runtime::Current()->GetOatFileManager().SetOnlyUseSystemOatFiles(
-      /*enforce=*/false, /*assert_no_files_loaded=*/false);
+  // Enable profiling if required based on the flags. This is done here instead of in
+  // nativePostForkChild since nativePostForkChild is called after loading the system server oat
+  // files.
+  bool profile_system_server = (runtime_flags & PROFILE_SYSTEM_SERVER) == PROFILE_SYSTEM_SERVER;
+  Runtime::Current()->GetJITOptions()->SetSaveProfilingInfo(profile_system_server);
 }
 
 static void ZygoteHooks_nativePostForkChild(JNIEnv* env,
@@ -280,6 +293,9 @@
                                             jboolean is_zygote,
                                             jstring instruction_set) {
   DCHECK(!(is_system_server && is_zygote));
+  // Set the runtime state as the first thing, in case JIT and other services
+  // start querying it.
+  Runtime::Current()->SetAsZygoteChild(is_system_server, is_zygote);
 
   Thread* thread = reinterpret_cast<Thread*>(token);
   // Our system thread ID, etc, has changed so reset Thread state.
@@ -294,18 +310,22 @@
     runtime_flags &= ~DISABLE_VERIFIER;
   }
 
-  bool only_use_system_oat_files = false;
   if ((runtime_flags & ONLY_USE_SYSTEM_OAT_FILES) != 0 || is_system_server) {
-    only_use_system_oat_files = true;
+    runtime->GetOatFileManager().SetOnlyUseSystemOatFiles();
     runtime_flags &= ~ONLY_USE_SYSTEM_OAT_FILES;
   }
-  runtime->GetOatFileManager().SetOnlyUseSystemOatFiles(only_use_system_oat_files,
-                                                        !is_system_server);
 
   api_enforcement_policy = hiddenapi::EnforcementPolicyFromInt(
       (runtime_flags & HIDDEN_API_ENFORCEMENT_POLICY_MASK) >> API_ENFORCEMENT_POLICY_SHIFT);
   runtime_flags &= ~HIDDEN_API_ENFORCEMENT_POLICY_MASK;
 
+  if ((runtime_flags & DISABLE_TEST_API_ENFORCEMENT_POLICY) != 0u) {
+    runtime->SetTestApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kDisabled);
+  } else {
+    runtime->SetTestApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kEnabled);
+  }
+  runtime_flags &= ~DISABLE_TEST_API_ENFORCEMENT_POLICY;
+
   bool profile_system_server = (runtime_flags & PROFILE_SYSTEM_SERVER) == PROFILE_SYSTEM_SERVER;
   runtime_flags &= ~PROFILE_SYSTEM_SERVER;
 
@@ -385,13 +405,6 @@
     std::srand(static_cast<uint32_t>(NanoTime()));
   }
 
-  if (is_zygote) {
-    // If creating a child-zygote, do not call into the runtime's post-fork logic.
-    // Doing so would spin up threads for Binder and JDWP. Instead, the Java side
-    // of the child process will call a static main in a class specified by the parent.
-    return;
-  }
-
   if (instruction_set != nullptr && !is_system_server) {
     ScopedUtfChars isa_string(env, instruction_set);
     InstructionSet isa = GetInstructionSetFromString(isa_string.c_str());
@@ -399,11 +412,12 @@
     if (isa != InstructionSet::kNone && isa != kRuntimeISA) {
       action = Runtime::NativeBridgeAction::kInitialize;
     }
-    runtime->InitNonZygoteOrPostFork(env, is_system_server, action, isa_string.c_str());
+    runtime->InitNonZygoteOrPostFork(env, is_system_server, is_zygote, action, isa_string.c_str());
   } else {
     runtime->InitNonZygoteOrPostFork(
         env,
         is_system_server,
+        is_zygote,
         Runtime::NativeBridgeAction::kUnload,
         /*isa=*/ nullptr,
         profile_system_server);
@@ -423,7 +437,7 @@
 static JNINativeMethod gMethods[] = {
   NATIVE_METHOD(ZygoteHooks, nativePreFork, "()J"),
   NATIVE_METHOD(ZygoteHooks, nativePostZygoteFork, "()V"),
-  NATIVE_METHOD(ZygoteHooks, nativePostForkSystemServer, "()V"),
+  NATIVE_METHOD(ZygoteHooks, nativePostForkSystemServer, "(I)V"),
   NATIVE_METHOD(ZygoteHooks, nativePostForkChild, "(JIZZLjava/lang/String;)V"),
   NATIVE_METHOD(ZygoteHooks, startZygoteNoThreadCreation, "()V"),
   NATIVE_METHOD(ZygoteHooks, stopZygoteNoThreadCreation, "()V"),
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 2b75c59..2c537c6 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -39,6 +39,7 @@
 #include "mirror/object-inl.h"
 #include "mirror/object_array-alloc-inl.h"
 #include "mirror/object_array-inl.h"
+#include "mirror/proxy.h"
 #include "mirror/string-alloc-inl.h"
 #include "mirror/string-inl.h"
 #include "native_util.h"
@@ -48,18 +49,24 @@
 #include "nth_caller_visitor.h"
 #include "obj_ptr-inl.h"
 #include "reflection.h"
+#include "reflective_handle_scope-inl.h"
 #include "scoped_fast_native_object_access-inl.h"
 #include "scoped_thread_state_change-inl.h"
 #include "well_known_classes.h"
 
 namespace art {
 
+// Should be the same as dalvik.system.VMRuntime.PREVENT_META_REFLECTION_BLACKLIST_ACCESS.
+// Corresponds to a bug id.
+static constexpr uint64_t kPreventMetaReflectionBlacklistAccess = 142365358;
+
 // Walks the stack, finds the caller of this reflective call and returns
 // a hiddenapi AccessContext formed from its declaring class.
 static hiddenapi::AccessContext GetReflectionCaller(Thread* self)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  // Walk the stack and find the first frame not from java.lang.Class and not
-  // from java.lang.invoke. This is very expensive. Save this till the last.
+  // Walk the stack and find the first frame not from java.lang.Class,
+  // java.lang.invoke or java.lang.reflect. This is very expensive.
+  // Save this till the last.
   struct FirstExternalCallerVisitor : public StackVisitor {
     explicit FirstExternalCallerVisitor(Thread* thread)
         : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
@@ -92,6 +99,16 @@
             && !m->IsClassInitializer()) {
           return true;
         }
+        // Check for classes in the java.lang.reflect package, except for java.lang.reflect.Proxy.
+        // java.lang.reflect.Proxy does its own hidden api checks (https://r.android.com/915496),
+        // and walking over this frame would cause a null pointer dereference
+        // (e.g. in 691-hiddenapi-proxy).
+        ObjPtr<mirror::Class> proxy_class = GetClassRoot<mirror::Proxy>();
+        if (declaring_class->IsInSamePackage(proxy_class) && declaring_class != proxy_class) {
+          if (Runtime::Current()->isChangeEnabled(kPreventMetaReflectionBlacklistAccess)) {
+            return true;
+          }
+        }
       }
 
       caller = m;
@@ -218,9 +235,17 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::Class> klass = hs.NewHandle(DecodeClass(soa, javaThis));
+  if (klass->IsObsoleteObject()) {
+    ThrowRuntimeException("Obsolete Object!");
+    return nullptr;
+  }
 
   if (klass->IsProxyClass()) {
-    return soa.AddLocalReference<jobjectArray>(klass->GetProxyInterfaces()->Clone(soa.Self()));
+    StackHandleScope<1> hs2(soa.Self());
+    Handle<mirror::ObjectArray<mirror::Class>> interfaces =
+        hs2.NewHandle(klass->GetProxyInterfaces());
+    return soa.AddLocalReference<jobjectArray>(
+        mirror::ObjectArray<mirror::Class>::Clone(interfaces, soa.Self()));
   }
 
   const dex::TypeList* iface_list = klass->GetInterfaceTypeList();
@@ -258,6 +283,10 @@
     ObjPtr<mirror::Class> klass,
     bool public_only,
     bool force_resolve) REQUIRES_SHARED(Locks::mutator_lock_) {
+  if (UNLIKELY(klass->IsObsoleteObject())) {
+    ThrowRuntimeException("Obsolete Object!");
+    return nullptr;
+  }
   StackHandleScope<1> hs(self);
   IterationRange<StrideIterator<ArtField>> ifields = klass->GetIFields();
   IterationRange<StrideIterator<ArtField>> sfields = klass->GetSFields();
@@ -382,6 +411,10 @@
                                                                    ObjPtr<mirror::Class> c,
                                                                    ObjPtr<mirror::String> name)
     REQUIRES_SHARED(Locks::mutator_lock_) {
+  if (UNLIKELY(c->IsObsoleteObject())) {
+    ThrowRuntimeException("Obsolete Object!");
+    return nullptr;
+  }
   ArtField* art_field = FindFieldByName(name, c->GetIFieldsPtr());
   if (art_field != nullptr) {
     return mirror::Field::CreateFromArtField<kRuntimePointerSize>(self, art_field, true);
@@ -400,6 +433,10 @@
   DCHECK(name != nullptr);
   DCHECK(self != nullptr);
 
+  if (UNLIKELY(clazz->IsObsoleteObject())) {
+    ThrowRuntimeException("Obsolete Object!");
+    return nullptr;
+  }
   StackHandleScope<2> hs(self);
   MutableHandle<mirror::Class> h_clazz(hs.NewHandle(clazz));
   Handle<mirror::String> h_name(hs.NewHandle(name));
@@ -497,10 +534,15 @@
   DCHECK(!Runtime::Current()->IsActiveTransaction());
 
   StackHandleScope<1> hs(soa.Self());
+  ObjPtr<mirror::Class> klass = DecodeClass(soa, javaThis);
+  if (UNLIKELY(klass->IsObsoleteObject())) {
+    ThrowRuntimeException("Obsolete Object!");
+    return nullptr;
+  }
   Handle<mirror::Constructor> result = hs.NewHandle(
       mirror::Class::GetDeclaredConstructorInternal<kRuntimePointerSize, false>(
       soa.Self(),
-      DecodeClass(soa, javaThis),
+      klass,
       soa.Decode<mirror::ObjectArray<mirror::Class>>(args)));
   if (result == nullptr || ShouldDenyAccessToMember(result->GetArtMethod(), soa.Self())) {
     return nullptr;
@@ -525,6 +567,10 @@
   bool public_only = (publicOnly != JNI_FALSE);
   hiddenapi::AccessContext hiddenapi_context = GetReflectionCaller(soa.Self());
   Handle<mirror::Class> h_klass = hs.NewHandle(DecodeClass(soa, javaThis));
+  if (UNLIKELY(h_klass->IsObsoleteObject())) {
+    ThrowRuntimeException("Obsolete Object!");
+    return nullptr;
+  }
   size_t constructor_count = 0;
   // Two pass approach for speed.
   for (auto& m : h_klass->GetDirectMethods(kRuntimePointerSize)) {
@@ -559,10 +605,15 @@
   StackHandleScope<1> hs(soa.Self());
   DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
   DCHECK(!Runtime::Current()->IsActiveTransaction());
+  ObjPtr<mirror::Class> klass = DecodeClass(soa, javaThis);
+  if (UNLIKELY(klass->IsObsoleteObject())) {
+    ThrowRuntimeException("Obsolete Object!");
+    return nullptr;
+  }
   Handle<mirror::Method> result = hs.NewHandle(
       mirror::Class::GetDeclaredMethodInternal<kRuntimePointerSize, false>(
           soa.Self(),
-          DecodeClass(soa, javaThis),
+          klass,
           soa.Decode<mirror::String>(name),
           soa.Decode<mirror::ObjectArray<mirror::Class>>(args),
           GetHiddenapiAccessContextFunction(soa.Self())));
@@ -581,6 +632,10 @@
   bool public_only = (publicOnly != JNI_FALSE);
 
   Handle<mirror::Class> klass = hs.NewHandle(DecodeClass(soa, javaThis));
+  if (klass->IsObsoleteObject()) {
+    ThrowRuntimeException("Obsolete Object!");
+    return nullptr;
+  }
   size_t num_methods = 0;
   for (ArtMethod& m : klass->GetDeclaredMethods(kRuntimePointerSize)) {
     uint32_t modifiers = m.GetAccessFlags();
@@ -619,6 +674,10 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<2> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+  if (klass->IsObsoleteObject()) {
+    ThrowRuntimeException("Obsolete Object!");
+    return nullptr;
+  }
 
   // Handle public contract to throw NPE if the "annotationClass" argument was null.
   if (UNLIKELY(annotationClass == nullptr)) {
@@ -638,6 +697,10 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+  if (klass->IsObsoleteObject()) {
+    ThrowRuntimeException("Obsolete Object!");
+    return nullptr;
+  }
   if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
     // Return an empty array instead of a null pointer.
     ObjPtr<mirror::Class>  annotation_array_class =
@@ -655,6 +718,10 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+  if (klass->IsObsoleteObject()) {
+    ThrowRuntimeException("Obsolete Object!");
+    return nullptr;
+  }
   ObjPtr<mirror::ObjectArray<mirror::Class>> classes = nullptr;
   if (!klass->IsProxyClass() && klass->GetDexCache() != nullptr) {
     classes = annotations::GetDeclaredClasses(klass);
@@ -678,6 +745,10 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+  if (klass->IsObsoleteObject()) {
+    ThrowRuntimeException("Obsolete Object!");
+    return nullptr;
+  }
   if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
     return nullptr;
   }
@@ -688,6 +759,10 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+  if (klass->IsObsoleteObject()) {
+    ThrowRuntimeException("Obsolete Object!");
+    return nullptr;
+  }
   if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
     return nullptr;
   }
@@ -704,6 +779,10 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+  if (klass->IsObsoleteObject()) {
+    ThrowRuntimeException("Obsolete Object!");
+    return nullptr;
+  }
   if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
     return nullptr;
   }
@@ -720,6 +799,10 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+  if (klass->IsObsoleteObject()) {
+    ThrowRuntimeException("Obsolete Object!");
+    return 0;
+  }
   return mirror::Class::GetInnerClassFlags(klass, defaultValue);
 }
 
@@ -727,6 +810,10 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+  if (klass->IsObsoleteObject()) {
+    ThrowRuntimeException("Obsolete Object!");
+    return nullptr;
+  }
   if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
     return nullptr;
   }
@@ -741,6 +828,10 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+  if (klass->IsObsoleteObject()) {
+    ThrowRuntimeException("Obsolete Object!");
+    return nullptr;
+  }
   if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
     return nullptr;
   }
@@ -752,6 +843,10 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+  if (klass->IsObsoleteObject()) {
+    ThrowRuntimeException("Obsolete Object!");
+    return 0;
+  }
   if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
     return false;
   }
@@ -767,6 +862,10 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<2> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+  if (klass->IsObsoleteObject()) {
+    ThrowRuntimeException("Obsolete Object!");
+    return false;
+  }
   if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
     return false;
   }
@@ -778,6 +877,10 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
+  if (klass->IsObsoleteObject()) {
+    ThrowRuntimeException("Obsolete Object!");
+    return nullptr;
+  }
   if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
     return nullptr;
   }
@@ -792,6 +895,10 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<4> hs(soa.Self());
   Handle<mirror::Class> klass = hs.NewHandle(DecodeClass(soa, javaThis));
+  if (klass->IsObsoleteObject()) {
+    ThrowRuntimeException("Obsolete Object!");
+    return nullptr;
+  }
   if (UNLIKELY(klass->GetPrimitiveType() != 0 || klass->IsInterface() || klass->IsArrayClass() ||
                klass->IsAbstract())) {
     soa.Self()->ThrowNewExceptionF("Ljava/lang/InstantiationException;",
@@ -810,11 +917,10 @@
       return nullptr;
     }
   }
-  ArtMethod* constructor = klass->GetDeclaredConstructor(
-      soa.Self(),
-      ScopedNullHandle<mirror::ObjectArray<mirror::Class>>(),
-      kRuntimePointerSize);
-  if (UNLIKELY(constructor == nullptr) || ShouldDenyAccessToMember(constructor, soa.Self())) {
+  StackArtMethodHandleScope<1> mhs(soa.Self());
+  ReflectiveHandle<ArtMethod> constructor(mhs.NewMethodHandle(klass->GetDeclaredConstructor(
+      soa.Self(), ScopedNullHandle<mirror::ObjectArray<mirror::Class>>(), kRuntimePointerSize)));
+  if (UNLIKELY(constructor == nullptr) || ShouldDenyAccessToMember(constructor.Get(), soa.Self())) {
     soa.Self()->ThrowNewExceptionF("Ljava/lang/InstantiationException;",
                                    "%s has no zero argument constructor",
                                    klass->PrettyClass().c_str());
@@ -823,7 +929,7 @@
   // Invoke the string allocator to return an empty string for the string class.
   if (klass->IsStringClass()) {
     gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
-    ObjPtr<mirror::Object> obj = mirror::String::AllocEmptyString<true>(soa.Self(), allocator_type);
+    ObjPtr<mirror::Object> obj = mirror::String::AllocEmptyString(soa.Self(), allocator_type);
     if (UNLIKELY(soa.Self()->IsExceptionPending())) {
       return nullptr;
     } else {
@@ -842,9 +948,9 @@
       caller.Assign(GetCallingClass(soa.Self(), 1));
     }
     if (UNLIKELY(caller != nullptr && !VerifyAccess(receiver.Get(),
-                                                          declaring_class,
-                                                          constructor->GetAccessFlags(),
-                                                          caller.Get()))) {
+                                                    declaring_class,
+                                                    constructor->GetAccessFlags(),
+                                                    caller.Get()))) {
       soa.Self()->ThrowNewExceptionF(
           "Ljava/lang/IllegalAccessException;", "%s is not accessible from %s",
           constructor->PrettyMethod().c_str(), caller->PrettyClass().c_str());
@@ -852,12 +958,15 @@
     }
   }
   // Ensure that we are initialized.
-  if (UNLIKELY(!declaring_class->IsInitialized())) {
-    if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(
-        soa.Self(), hs.NewHandle(declaring_class), true, true)) {
-      soa.Self()->AssertPendingException();
+  if (UNLIKELY(!declaring_class->IsVisiblyInitialized())) {
+    Thread* self = soa.Self();
+    Handle<mirror::Class> h_class = hs.NewHandle(declaring_class);
+    if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(
+                      self, h_class, /*can_init_fields=*/ true, /*can_init_parents=*/ true))) {
+      DCHECK(self->IsExceptionPending());
       return nullptr;
     }
+    DCHECK(h_class->IsInitializing());
   }
   // Invoke the constructor.
   JValue result;
diff --git a/runtime/native/java_lang_Object.cc b/runtime/native/java_lang_Object.cc
index 48540f8..8fc10d1 100644
--- a/runtime/native/java_lang_Object.cc
+++ b/runtime/native/java_lang_Object.cc
@@ -18,6 +18,7 @@
 
 #include "nativehelper/jni_macros.h"
 
+#include "handle_scope-inl.h"
 #include "jni/jni_internal.h"
 #include "mirror/object-inl.h"
 #include "native_util.h"
@@ -27,8 +28,9 @@
 
 static jobject Object_internalClone(JNIEnv* env, jobject java_this) {
   ScopedFastNativeObjectAccess soa(env);
-  ObjPtr<mirror::Object> o = soa.Decode<mirror::Object>(java_this);
-  return soa.AddLocalReference<jobject>(o->Clone(soa.Self()));
+  StackHandleScope<1u> hs(soa.Self());
+  Handle<mirror::Object> o = hs.NewHandle(soa.Decode<mirror::Object>(java_this));
+  return soa.AddLocalReference<jobject>(mirror::Class::Clone(o, soa.Self()));
 }
 
 static void Object_notify(JNIEnv* env, jobject java_this) {
diff --git a/runtime/native/java_lang_String.cc b/runtime/native/java_lang_String.cc
index 83498f6..7c7c553 100644
--- a/runtime/native/java_lang_String.cc
+++ b/runtime/native/java_lang_String.cc
@@ -73,11 +73,11 @@
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::String> string_this(hs.NewHandle(soa.Decode<mirror::String>(java_this)));
   gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
-  ObjPtr<mirror::String> result = mirror::String::AllocFromString<true>(soa.Self(),
-                                                                        length,
-                                                                        string_this,
-                                                                        start,
-                                                                        allocator_type);
+  ObjPtr<mirror::String> result = mirror::String::AllocFromString(soa.Self(),
+                                                                  length,
+                                                                  string_this,
+                                                                  start,
+                                                                  allocator_type);
   return soa.AddLocalReference<jstring>(result);
 }
 
@@ -105,8 +105,9 @@
 
 static jcharArray String_toCharArray(JNIEnv* env, jobject java_this) {
   ScopedFastNativeObjectAccess soa(env);
-  ObjPtr<mirror::String> s = soa.Decode<mirror::String>(java_this);
-  return soa.AddLocalReference<jcharArray>(s->ToCharArray(soa.Self()));
+  StackHandleScope<1u> hs(soa.Self());
+  Handle<mirror::String> s = hs.NewHandle(soa.Decode<mirror::String>(java_this));
+  return soa.AddLocalReference<jcharArray>(mirror::String::ToCharArray(s, soa.Self()));
 }
 
 static JNINativeMethod gMethods[] = {
diff --git a/runtime/native/java_lang_StringFactory.cc b/runtime/native/java_lang_StringFactory.cc
index 13f8d5b..178d5da 100644
--- a/runtime/native/java_lang_StringFactory.cc
+++ b/runtime/native/java_lang_StringFactory.cc
@@ -47,12 +47,12 @@
     return nullptr;
   }
   gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
-  ObjPtr<mirror::String> result = mirror::String::AllocFromByteArray<true>(soa.Self(),
-                                                                           byte_count,
-                                                                           byte_array,
-                                                                           offset,
-                                                                           high,
-                                                                           allocator_type);
+  ObjPtr<mirror::String> result = mirror::String::AllocFromByteArray(soa.Self(),
+                                                                     byte_count,
+                                                                     byte_array,
+                                                                     offset,
+                                                                     high,
+                                                                     allocator_type);
   return soa.AddLocalReference<jstring>(result);
 }
 
@@ -64,11 +64,11 @@
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::CharArray> char_array(hs.NewHandle(soa.Decode<mirror::CharArray>(java_data)));
   gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
-  ObjPtr<mirror::String> result = mirror::String::AllocFromCharArray<true>(soa.Self(),
-                                                                           char_count,
-                                                                           char_array,
-                                                                           offset,
-                                                                           allocator_type);
+  ObjPtr<mirror::String> result = mirror::String::AllocFromCharArray(soa.Self(),
+                                                                     char_count,
+                                                                     char_array,
+                                                                     offset,
+                                                                     allocator_type);
   return soa.AddLocalReference<jstring>(result);
 }
 
@@ -81,11 +81,11 @@
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::String> string(hs.NewHandle(soa.Decode<mirror::String>(to_copy)));
   gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
-  ObjPtr<mirror::String> result = mirror::String::AllocFromString<true>(soa.Self(),
-                                                                        string->GetLength(),
-                                                                        string,
-                                                                        0,
-                                                                        allocator_type);
+  ObjPtr<mirror::String> result = mirror::String::AllocFromString(soa.Self(),
+                                                                  string->GetLength(),
+                                                                  string,
+                                                                  /*offset=*/ 0,
+                                                                  allocator_type);
   return soa.AddLocalReference<jstring>(result);
 }
 
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index 37b3fe6..5f21998 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -112,6 +112,13 @@
   return -1;  // Unreachable.
 }
 
+static jint Thread_getNativeTid(JNIEnv* env, jobject java_thread) {
+  ScopedFastNativeObjectAccess soa(env);
+  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
+  Thread* thread = Thread::FromManagedThread(soa, java_thread);
+  return (thread != nullptr) ? thread->GetTid() : 0;
+}
+
 static jboolean Thread_holdsLock(JNIEnv* env, jclass, jobject java_object) {
   ScopedObjectAccess soa(env);
   ObjPtr<mirror::Object> object = soa.Decode<mirror::Object>(java_object);
@@ -198,6 +205,7 @@
   FAST_NATIVE_METHOD(Thread, currentThread, "()Ljava/lang/Thread;"),
   FAST_NATIVE_METHOD(Thread, interrupted, "()Z"),
   FAST_NATIVE_METHOD(Thread, isInterrupted, "()Z"),
+  FAST_NATIVE_METHOD(Thread, getNativeTid, "()I"),
   NATIVE_METHOD(Thread, nativeCreate, "(Ljava/lang/Thread;JZ)V"),
   NATIVE_METHOD(Thread, nativeGetStatus, "(Z)I"),
   NATIVE_METHOD(Thread, holdsLock, "(Ljava/lang/Object;)Z"),
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index f21ded9..42b6c22 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -129,15 +129,17 @@
   soa.Self()->AssertThreadSuspensionIsAllowable();
   ObjPtr<mirror::Class> declaring_class = (*f)->GetDeclaringClass();
   if ((*f)->IsStatic()) {
-    if (UNLIKELY(!declaring_class->IsInitialized())) {
-      StackHandleScope<2> hs(soa.Self());
+    if (UNLIKELY(!declaring_class->IsVisiblyInitialized())) {
+      Thread* self = soa.Self();
+      StackHandleScope<2> hs(self);
       HandleWrapperObjPtr<mirror::Field> h_f(hs.NewHandleWrapper(f));
       HandleWrapperObjPtr<mirror::Class> h_klass(hs.NewHandleWrapper(&declaring_class));
-      ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
-      if (UNLIKELY(!class_linker->EnsureInitialized(soa.Self(), h_klass, true, true))) {
-        DCHECK(soa.Self()->IsExceptionPending());
+      if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(
+                        self, h_klass, /*can_init_fields=*/ true, /*can_init_parents=*/ true))) {
+        DCHECK(self->IsExceptionPending());
         return false;
       }
+      DCHECK(h_klass->IsInitializing());
     }
     *class_or_rcvr = declaring_class;
     return true;
@@ -256,7 +258,7 @@
                                                bool allow_references,
                                                const JValue& new_value)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  DCHECK(f->GetDeclaringClass()->IsInitialized());
+  DCHECK(f->GetDeclaringClass()->IsInitializing());
   MemberOffset offset(f->GetOffset());
   const bool is_volatile = f->IsVolatile();
   switch (field_type) {
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index 4525157..0d9a257 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -59,9 +59,11 @@
       ++i;
     }
     CHECK_NE(throws_index, -1);
-    ObjPtr<mirror::ObjectArray<mirror::Class>> declared_exceptions =
-        klass->GetProxyThrows()->Get(throws_index);
-    return soa.AddLocalReference<jobjectArray>(declared_exceptions->Clone(soa.Self()));
+    StackHandleScope<1u> hs(soa.Self());
+    Handle<mirror::ObjectArray<mirror::Class>> declared_exceptions =
+        hs.NewHandle(klass->GetProxyThrows()->Get(throws_index));
+    return soa.AddLocalReference<jobjectArray>(
+        mirror::ObjectArray<mirror::Class>::Clone(declared_exceptions, soa.Self()));
   } else {
     ObjPtr<mirror::ObjectArray<mirror::Class>> result_array =
         annotations::GetExceptionTypesForMethod(method);
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
index 028675d..d405735 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
@@ -20,6 +20,7 @@
 
 #include "base/file_utils.h"
 #include "base/mutex.h"
+#include "base/endian_utils.h"
 #include "debugger.h"
 #include "gc/heap.h"
 #include "jni/jni_internal.h"
@@ -101,6 +102,57 @@
 static const int kThstBytesPerEntry = 18;
 static const int kThstHeaderLen = 4;
 
+static constexpr uint8_t ToJdwpThreadStatus(ThreadState state) {
+  /*
+  * ThreadStatus constants.
+  */
+  enum JdwpThreadStatus : uint8_t {
+    TS_ZOMBIE   = 0,
+    TS_RUNNING  = 1,  // RUNNING
+    TS_SLEEPING = 2,  // (in Thread.sleep())
+    TS_MONITOR  = 3,  // WAITING (monitor wait)
+    TS_WAIT     = 4,  // (in Object.wait())
+  };
+  switch (state) {
+    case kBlocked:
+      return TS_MONITOR;
+    case kNative:
+    case kRunnable:
+    case kSuspended:
+      return TS_RUNNING;
+    case kSleeping:
+      return TS_SLEEPING;
+    case kStarting:
+    case kTerminated:
+      return TS_ZOMBIE;
+    case kTimedWaiting:
+    case kWaitingForTaskProcessor:
+    case kWaitingForLockInflation:
+    case kWaitingForCheckPointsToRun:
+    case kWaitingForDebuggerSend:
+    case kWaitingForDebuggerSuspension:
+    case kWaitingForDebuggerToAttach:
+    case kWaitingForDeoptimization:
+    case kWaitingForGcToComplete:
+    case kWaitingForGetObjectsAllocated:
+    case kWaitingForJniOnLoad:
+    case kWaitingForMethodTracingStart:
+    case kWaitingForSignalCatcherOutput:
+    case kWaitingForVisitObjects:
+    case kWaitingInMainDebuggerLoop:
+    case kWaitingInMainSignalCatcherLoop:
+    case kWaitingPerformingGc:
+    case kWaitingWeakGcRootRead:
+    case kWaitingForGcThreadFlip:
+    case kNativeForAbort:
+    case kWaiting:
+      return TS_WAIT;
+      // Don't add a 'default' here so the compiler can spot incompatible enum changes.
+  }
+  LOG(FATAL) << "Unknown thread state: " << state;
+  UNREACHABLE();
+}
+
 static void ThreadStatsGetterCallback(Thread* t, void* context) {
   /*
    * Generate the contents of a THST chunk.  The data encompasses all known
@@ -130,12 +182,12 @@
   GetTaskStats(t->GetTid(), &native_thread_state, &utime, &stime, &task_cpu);
 
   std::vector<uint8_t>& bytes = *reinterpret_cast<std::vector<uint8_t>*>(context);
-  JDWP::Append4BE(bytes, t->GetThreadId());
-  JDWP::Append1BE(bytes, Dbg::ToJdwpThreadStatus(t->GetState()));
-  JDWP::Append4BE(bytes, t->GetTid());
-  JDWP::Append4BE(bytes, utime);
-  JDWP::Append4BE(bytes, stime);
-  JDWP::Append1BE(bytes, t->IsDaemon());
+  Append4BE(bytes, t->GetThreadId());
+  Append1BE(bytes, ToJdwpThreadStatus(t->GetState()));
+  Append4BE(bytes, t->GetTid());
+  Append4BE(bytes, utime);
+  Append4BE(bytes, stime);
+  Append1BE(bytes, t->IsDaemon());
 }
 
 static jbyteArray DdmVmInternal_getThreadStats(JNIEnv* env, jclass) {
@@ -148,9 +200,9 @@
     uint16_t thread_count = 0;
     thread_list->ForEach(ThreadCountCallback, &thread_count);
 
-    JDWP::Append1BE(bytes, kThstHeaderLen);
-    JDWP::Append1BE(bytes, kThstBytesPerEntry);
-    JDWP::Append2BE(bytes, thread_count);
+    Append1BE(bytes, kThstHeaderLen);
+    Append1BE(bytes, kThstBytesPerEntry);
+    Append2BE(bytes, thread_count);
 
     thread_list->ForEach(ThreadStatsGetterCallback, &bytes);
   }
diff --git a/runtime/native_bridge_art_interface.cc b/runtime/native_bridge_art_interface.cc
index def48e8..0651f0c 100644
--- a/runtime/native_bridge_art_interface.cc
+++ b/runtime/native_bridge_art_interface.cc
@@ -110,7 +110,16 @@
 #endif
 }
 
+void PreZygoteForkNativeBridge() {
+  android::PreZygoteForkNativeBridge();
+}
+
 void InitializeNativeBridge(JNIEnv* env, const char* instruction_set) {
+  if (android::NativeBridgeInitialized()) {
+    // This happens in apps forked from app-zygote, since native bridge
+    // is initialized in the zygote.
+    return;
+  }
   if (android::InitializeNativeBridge(env, instruction_set)) {
     if (android::NativeBridgeGetVersion() >= 2U) {
 #ifdef _NSIG  // Undefined on Apple, but we don't support running on Mac, anyways.
diff --git a/runtime/native_bridge_art_interface.h b/runtime/native_bridge_art_interface.h
index c86e5da..873cd1f 100644
--- a/runtime/native_bridge_art_interface.h
+++ b/runtime/native_bridge_art_interface.h
@@ -31,6 +31,8 @@
 // This is mostly for testing purposes, as in a full system this is called by Zygote code.
 void PreInitializeNativeBridge(const std::string& dir);
 
+void PreZygoteForkNativeBridge();
+
 void InitializeNativeBridge(JNIEnv* env, const char* instruction_set);
 
 void UnloadNativeBridge();
diff --git a/runtime/native_stack_dump.cc b/runtime/native_stack_dump.cc
index 150fa78..74d9033 100644
--- a/runtime/native_stack_dump.cc
+++ b/runtime/native_stack_dump.cc
@@ -39,6 +39,7 @@
 #include <sys/time.h>
 #include <sys/types.h>
 
+#include "android-base/file.h"
 #include "android-base/stringprintf.h"
 #include "android-base/strings.h"
 
@@ -233,12 +234,17 @@
                       std::ostream& os,
                       const char* prefix,
                       std::unique_ptr<Addr2linePipe>* pipe /* inout */) {
-  DCHECK(pipe != nullptr);
-
-  if (map_src == "[vdso]" || android::base::EndsWith(map_src, ".vdex")) {
+  std::array<const char*, 3> kIgnoreSuffixes{ ".dex", ".jar", ".vdex" };
+  for (const char* ignore_suffix : kIgnoreSuffixes) {
+    if (android::base::EndsWith(map_src, ignore_suffix)) {
+      // Ignore file names that do not have map information addr2line can consume. e.g. vdex
+      // files are special frames injected for the interpreter so they don't have any line
+      // number information available.
+      return;
+    }
+  }
+  if (map_src == "[vdso]") {
     // addr2line will not work on the vdso.
-    // vdex files are special frames injected for the interpreter
-    // so they don't have any line number information available.
     return;
   }
 
@@ -410,44 +416,6 @@
   }
 }
 
-void DumpKernelStack(std::ostream& os, pid_t tid, const char* prefix, bool include_count) {
-  if (tid == GetTid()) {
-    // There's no point showing that we're reading our stack out of /proc!
-    return;
-  }
-
-  std::string kernel_stack_filename(StringPrintf("/proc/self/task/%d/stack", tid));
-  std::string kernel_stack;
-  if (!ReadFileToString(kernel_stack_filename, &kernel_stack)) {
-    os << prefix << "(couldn't read " << kernel_stack_filename << ")\n";
-    return;
-  }
-
-  std::vector<std::string> kernel_stack_frames;
-  Split(kernel_stack, '\n', &kernel_stack_frames);
-  if (kernel_stack_frames.empty()) {
-    os << prefix << "(" << kernel_stack_filename << " is empty)\n";
-    return;
-  }
-  // We skip the last stack frame because it's always equivalent to "[<ffffffff>] 0xffffffff",
-  // which looking at the source appears to be the kernel's way of saying "that's all, folks!".
-  kernel_stack_frames.pop_back();
-  for (size_t i = 0; i < kernel_stack_frames.size(); ++i) {
-    // Turn "[<ffffffff8109156d>] futex_wait_queue_me+0xcd/0x110"
-    // into "futex_wait_queue_me+0xcd/0x110".
-    const char* text = kernel_stack_frames[i].c_str();
-    const char* close_bracket = strchr(text, ']');
-    if (close_bracket != nullptr) {
-      text = close_bracket + 2;
-    }
-    os << prefix;
-    if (include_count) {
-      os << StringPrintf("#%02zd ", i);
-    }
-    os << text << std::endl;
-  }
-}
-
 #elif defined(__APPLE__)
 
 void DumpNativeStack(std::ostream& os ATTRIBUTE_UNUSED,
@@ -459,12 +427,6 @@
                      bool skip_frames ATTRIBUTE_UNUSED) {
 }
 
-void DumpKernelStack(std::ostream& os ATTRIBUTE_UNUSED,
-                     pid_t tid ATTRIBUTE_UNUSED,
-                     const char* prefix ATTRIBUTE_UNUSED,
-                     bool include_count ATTRIBUTE_UNUSED) {
-}
-
 #else
 #error "Unsupported architecture for native stack dumps."
 #endif
diff --git a/runtime/native_stack_dump.h b/runtime/native_stack_dump.h
index ad4bfab..4d4b36b 100644
--- a/runtime/native_stack_dump.h
+++ b/runtime/native_stack_dump.h
@@ -39,12 +39,6 @@
                      bool skip_frames = true)
     NO_THREAD_SAFETY_ANALYSIS;
 
-// Dumps the kernel stack for thread 'tid' to 'os'. Note that this is only available on linux-x86.
-void DumpKernelStack(std::ostream& os,
-                     pid_t tid,
-                     const char* prefix = "",
-                     bool include_count = true);
-
 }  // namespace art
 
 #endif  // ART_RUNTIME_NATIVE_STACK_DUMP_H_
diff --git a/runtime/nterp_helpers.cc b/runtime/nterp_helpers.cc
new file mode 100644
index 0000000..a2ec882
--- /dev/null
+++ b/runtime/nterp_helpers.cc
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "art_method-inl.h"
+#include "dex/code_item_accessors.h"
+#include "entrypoints/quick/callee_save_frame.h"
+#include "interpreter/interpreter_mterp_impl.h"
+#include "nterp_helpers.h"
+#include "oat_quick_method_header.h"
+#include "quick/quick_method_frame_info.h"
+
+namespace art {
+
+/**
+ * An nterp frame follows the optimizing compiler's ABI conventions, with
+ * int/long/reference parameters being passed in core registers / stack and
+ * float/double parameters being passed in floating point registers / stack.
+ *
+ * There are no ManagedStack transitions between compiler and nterp frames.
+ *
+ * On entry, nterp will copy its parameters to a dex register array allocated on
+ * the stack. There is a fast path when calling from nterp to nterp to not
+ * follow the ABI but just copy the parameters from the caller's dex registers
+ * to the callee's dex registers.
+ *
+ * The stack layout of an nterp frame is:
+ *    ----------------
+ *    |              |      All callee save registers of the platform
+ *    | callee-save  |      (core and floating point).
+ *    | registers    |      On x86 and x64 this includes the return address,
+ *    |              |      already spilled on entry.
+ *    ----------------
+ *    |  alignment   |      Stack aligment of kStackAlignment.
+ *    ----------------
+ *    |              |      Contains `registers_size` entries (of size 4) from
+ *    |    dex       |      the code item information of the method.
+ *    |  registers   |
+ *    |              |
+ *    ----------------
+ *    |              |      A copy of the dex registers above, but only
+ *    |  reference   |      containing references, used for GC.
+ *    |  registers   |
+ *    |              |
+ *    ----------------
+ *    |  caller fp   |      Frame pointer of caller. Stored below the reference
+ *    ----------------      registers array for easy access from nterp when returning.
+ *    |  dex_pc_ptr  |      Pointer to the dex instruction being executed.
+ *    ----------------      Stored whenever nterp goes into the runtime.
+ *    |              |      In case nterp calls compiled code, we reserve space
+ *    |     out      |      for out registers. This space will be used for
+ *    |   registers  |      arguments passed on stack.
+ *    |              |
+ *    ----------------
+ *    |  ArtMethod*  |      The method being currently executed.
+ *    ----------------
+ *
+ *    Exception handling:
+ *    Nterp follows the same convention than the compiler,
+ *    with the addition of:
+ *    - All catch handlers have the same landing pad.
+ *    - Before doing the longjmp for exception delivery, the register containing the
+ *      dex PC pointer must be updated.
+ *
+ *    Stack walking:
+ *    An nterp frame is walked like a compiled code frame. We add an
+ *    OatQuickMethodHeader prefix to the nterp entry point, which contains:
+ *    - vmap_table_offset=0 (nterp doesn't need one).
+ *    - code_size=NterpEnd-NterpStart
+ */
+
+static constexpr size_t kPointerSize = static_cast<size_t>(kRuntimePointerSize);
+
+static constexpr size_t NterpGetFrameEntrySize() {
+  uint32_t core_spills =
+      RuntimeCalleeSaveFrame::GetCoreSpills(CalleeSaveType::kSaveAllCalleeSaves);
+  uint32_t fp_spills =
+      RuntimeCalleeSaveFrame::GetFpSpills(CalleeSaveType::kSaveAllCalleeSaves);
+  // Note: the return address is considered part of the callee saves.
+  return (POPCOUNT(core_spills) + POPCOUNT(fp_spills)) * kPointerSize;
+}
+
+size_t NterpGetFrameSize(ArtMethod* method) {
+  CodeItemDataAccessor accessor(method->DexInstructionData());
+  const uint16_t num_regs = accessor.RegistersSize();
+  const uint16_t out_regs = accessor.OutsSize();
+
+  size_t frame_size =
+      NterpGetFrameEntrySize() +
+      (num_regs * kVRegSize) * 2 +  // dex registers and reference registers
+      kPointerSize +  // previous frame
+      kPointerSize +  // saved dex pc
+      (out_regs * kVRegSize) +  // out arguments
+      kPointerSize;  // method
+  return RoundUp(frame_size, kStackAlignment);
+}
+
+QuickMethodFrameInfo NterpFrameInfo(ArtMethod** frame) {
+  uint32_t core_spills =
+      RuntimeCalleeSaveFrame::GetCoreSpills(CalleeSaveType::kSaveAllCalleeSaves);
+  uint32_t fp_spills =
+      RuntimeCalleeSaveFrame::GetFpSpills(CalleeSaveType::kSaveAllCalleeSaves);
+  return QuickMethodFrameInfo(NterpGetFrameSize(*frame), core_spills, fp_spills);
+}
+
+uintptr_t NterpGetRegistersArray(ArtMethod** frame) {
+  CodeItemDataAccessor accessor((*frame)->DexInstructionData());
+  const uint16_t num_regs = accessor.RegistersSize();
+  // The registers array is just above the reference array.
+  return NterpGetReferenceArray(frame) + (num_regs * kVRegSize);
+}
+
+uintptr_t NterpGetReferenceArray(ArtMethod** frame) {
+  CodeItemDataAccessor accessor((*frame)->DexInstructionData());
+  const uint16_t out_regs = accessor.OutsSize();
+  // The references array is just above the saved frame pointer.
+  return reinterpret_cast<uintptr_t>(frame) +
+      kPointerSize +  // method
+      (out_regs * kVRegSize) +  // out arguments
+      kPointerSize +  // saved dex pc
+      kPointerSize;  // previous frame.
+}
+
+uint32_t NterpGetDexPC(ArtMethod** frame) {
+  CodeItemDataAccessor accessor((*frame)->DexInstructionData());
+  const uint16_t out_regs = accessor.OutsSize();
+  uintptr_t dex_pc_ptr = reinterpret_cast<uintptr_t>(frame) +
+      kPointerSize +  // method
+      (out_regs * kVRegSize);  // out arguments
+  CodeItemInstructionAccessor instructions((*frame)->DexInstructions());
+  return *reinterpret_cast<const uint16_t**>(dex_pc_ptr) - instructions.Insns();
+}
+
+uint32_t NterpGetVReg(ArtMethod** frame, uint16_t vreg) {
+  return reinterpret_cast<uint32_t*>(NterpGetRegistersArray(frame))[vreg];
+}
+
+uint32_t NterpGetVRegReference(ArtMethod** frame, uint16_t vreg) {
+  return reinterpret_cast<uint32_t*>(NterpGetReferenceArray(frame))[vreg];
+}
+
+uintptr_t NterpGetCatchHandler() {
+  // Nterp uses the same landing pad for all exceptions. The dex_pc_ptr set before
+  // longjmp will actually be used to jmp to the catch handler.
+  return reinterpret_cast<uintptr_t>(artNterpAsmInstructionEnd);
+}
+
+}  // namespace art
diff --git a/runtime/nterp_helpers.h b/runtime/nterp_helpers.h
new file mode 100644
index 0000000..aacd178
--- /dev/null
+++ b/runtime/nterp_helpers.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NTERP_HELPERS_H_
+#define ART_RUNTIME_NTERP_HELPERS_H_
+
+#include "quick/quick_method_frame_info.h"
+
+namespace art {
+
+class ArtMethod;
+
+/**
+ * The frame size nterp will use for the given method.
+ */
+size_t NterpGetFrameSize(ArtMethod* method)
+    REQUIRES_SHARED(Locks::mutator_lock_);
+
+/**
+ * Returns the QuickMethodFrameInfo of the given frame corresponding to the
+ * given method.
+ */
+QuickMethodFrameInfo NterpFrameInfo(ArtMethod** frame)
+    REQUIRES_SHARED(Locks::mutator_lock_);
+
+/**
+ * Returns the dex PC at which the given nterp frame is executing.
+ */
+uint32_t NterpGetDexPC(ArtMethod** frame)
+    REQUIRES_SHARED(Locks::mutator_lock_);
+
+/**
+ * Returns the reference array to be used by the GC to visit references in an
+ * nterp frame.
+ */
+uintptr_t NterpGetReferenceArray(ArtMethod** frame)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+/**
+ * Returns the dex register array to be used by the GC to update references in
+ * an nterp frame.
+ */
+uintptr_t NterpGetRegistersArray(ArtMethod** frame)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+/**
+ * Returns the nterp landing pad for catching an exception.
+ */
+uintptr_t NterpGetCatchHandler();
+
+/**
+ * Returns the value of dex register number `vreg` in the given frame.
+ */
+uint32_t NterpGetVReg(ArtMethod** frame, uint16_t vreg)
+    REQUIRES_SHARED(Locks::mutator_lock_);
+
+/**
+ * Returns the value of dex register number `vreg` in the given frame if it is a
+ * reference. Return 0 otehrwise.
+ */
+uint32_t NterpGetVRegReference(ArtMethod** frame, uint16_t vreg)
+    REQUIRES_SHARED(Locks::mutator_lock_);
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_NTERP_HELPERS_H_
diff --git a/runtime/oat.cc b/runtime/oat.cc
index db6cda5..17c797a 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -72,7 +72,8 @@
       dex_file_count_(dex_file_count),
       oat_dex_files_offset_(0),
       executable_offset_(0),
-      jni_dlsym_lookup_offset_(0),
+      jni_dlsym_lookup_trampoline_offset_(0),
+      jni_dlsym_lookup_critical_trampoline_offset_(0),
       quick_generic_jni_trampoline_offset_(0),
       quick_imt_conflict_trampoline_offset_(0),
       quick_resolution_trampoline_offset_(0),
@@ -112,13 +113,13 @@
 std::string OatHeader::GetValidationErrorMessage() const {
   if (magic_ != kOatMagic) {
     static_assert(sizeof(kOatMagic) == 4, "kOatMagic has unexpected length");
-    return StringPrintf("Invalid oat magic, expected 0x%x%x%x%x, got 0x%x%x%x%x.",
+    return StringPrintf("Invalid oat magic, expected 0x%02x%02x%02x%02x, got 0x%02x%02x%02x%02x.",
                         kOatMagic[0], kOatMagic[1], kOatMagic[2], kOatMagic[3],
                         magic_[0], magic_[1], magic_[2], magic_[3]);
   }
   if (version_ != kOatVersion) {
     static_assert(sizeof(kOatVersion) == 4, "kOatVersion has unexpected length");
-    return StringPrintf("Invalid oat version, expected 0x%x%x%x%x, got 0x%x%x%x%x.",
+    return StringPrintf("Invalid oat version, expected 0x%02x%02x%02x%02x, got 0x%02x%02x%02x%02x.",
                         kOatVersion[0], kOatVersion[1], kOatVersion[2], kOatVersion[3],
                         version_[0], version_[1], version_[2], version_[3]);
   }
@@ -136,7 +137,8 @@
 void OatHeader::CheckOatVersion(std::array<uint8_t, 4> version) {
   constexpr std::array<uint8_t, 4> expected = kOatVersion;  // Runtime oat version.
   if (version != kOatVersion) {
-    LOG(FATAL) << StringPrintf("Invalid oat version, expected 0x%x%x%x%x, got 0x%x%x%x%x.",
+    LOG(FATAL) << StringPrintf("Invalid oat version, expected 0x%02x%02x%02x%02x, "
+                                   "got 0x%02x%02x%02x%02x.",
                                expected[0], expected[1], expected[2], expected[3],
                                version[0], version[1], version[2], version[3]);
   }
@@ -200,20 +202,36 @@
   return (offset != 0u) ? reinterpret_cast<const uint8_t*>(&header) + offset : nullptr;
 }
 
-const void* OatHeader::GetJniDlsymLookup() const {
-  return GetTrampoline(*this, GetJniDlsymLookupOffset());
+const void* OatHeader::GetJniDlsymLookupTrampoline() const {
+  return GetTrampoline(*this, GetJniDlsymLookupTrampolineOffset());
 }
 
-uint32_t OatHeader::GetJniDlsymLookupOffset() const {
+uint32_t OatHeader::GetJniDlsymLookupTrampolineOffset() const {
   DCHECK(IsValid());
-  return jni_dlsym_lookup_offset_;
+  return jni_dlsym_lookup_trampoline_offset_;
 }
 
-void OatHeader::SetJniDlsymLookupOffset(uint32_t offset) {
+void OatHeader::SetJniDlsymLookupTrampolineOffset(uint32_t offset) {
   DCHECK(IsValid());
-  DCHECK_EQ(jni_dlsym_lookup_offset_, 0U) << offset;
+  DCHECK_EQ(jni_dlsym_lookup_trampoline_offset_, 0U) << offset;
 
-  jni_dlsym_lookup_offset_ = offset;
+  jni_dlsym_lookup_trampoline_offset_ = offset;
+}
+
+const void* OatHeader::GetJniDlsymLookupCriticalTrampoline() const {
+  return GetTrampoline(*this, GetJniDlsymLookupCriticalTrampolineOffset());
+}
+
+uint32_t OatHeader::GetJniDlsymLookupCriticalTrampolineOffset() const {
+  DCHECK(IsValid());
+  return jni_dlsym_lookup_critical_trampoline_offset_;
+}
+
+void OatHeader::SetJniDlsymLookupCriticalTrampolineOffset(uint32_t offset) {
+  DCHECK(IsValid());
+  DCHECK_EQ(jni_dlsym_lookup_critical_trampoline_offset_, 0U) << offset;
+
+  jni_dlsym_lookup_critical_trampoline_offset_ = offset;
 }
 
 const void* OatHeader::GetQuickGenericJniTrampoline() const {
@@ -222,12 +240,12 @@
 
 uint32_t OatHeader::GetQuickGenericJniTrampolineOffset() const {
   DCHECK(IsValid());
-  CHECK_GE(quick_generic_jni_trampoline_offset_, jni_dlsym_lookup_offset_);
+  CHECK_GE(quick_generic_jni_trampoline_offset_, jni_dlsym_lookup_trampoline_offset_);
   return quick_generic_jni_trampoline_offset_;
 }
 
 void OatHeader::SetQuickGenericJniTrampolineOffset(uint32_t offset) {
-  CHECK(offset == 0 || offset >= jni_dlsym_lookup_offset_);
+  CHECK(offset == 0 || offset >= jni_dlsym_lookup_trampoline_offset_);
   DCHECK(IsValid());
   DCHECK_EQ(quick_generic_jni_trampoline_offset_, 0U) << offset;
 
@@ -411,9 +429,4 @@
   key_value_store_size_ = data_ptr - reinterpret_cast<char*>(&key_value_store_);
 }
 
-OatMethodOffsets::OatMethodOffsets(uint32_t code_offset) : code_offset_(code_offset) {
-}
-
-OatMethodOffsets::~OatMethodOffsets() {}
-
 }  // namespace art
diff --git a/runtime/oat.h b/runtime/oat.h
index 15059a8..080a706 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,8 +32,8 @@
 class PACKED(4) OatHeader {
  public:
   static constexpr std::array<uint8_t, 4> kOatMagic { { 'o', 'a', 't', '\n' } };
-  // Last oat version changed reason: Remove unused trampoline entrypoints.
-  static constexpr std::array<uint8_t, 4> kOatVersion { { '1', '7', '0', '\0' } };
+  // Last oat version changed reason: Change ClassStatus bits with kVerifiedNeedsAccessChecks.
+  static constexpr std::array<uint8_t, 4> kOatVersion { { '1', '8', '3', '\0' } };
 
   static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
   static constexpr const char* kDebuggableKey = "debuggable";
@@ -69,9 +69,12 @@
   uint32_t GetExecutableOffset() const;
   void SetExecutableOffset(uint32_t executable_offset);
 
-  const void* GetJniDlsymLookup() const;
-  uint32_t GetJniDlsymLookupOffset() const;
-  void SetJniDlsymLookupOffset(uint32_t offset);
+  const void* GetJniDlsymLookupTrampoline() const;
+  uint32_t GetJniDlsymLookupTrampolineOffset() const;
+  void SetJniDlsymLookupTrampolineOffset(uint32_t offset);
+  const void* GetJniDlsymLookupCriticalTrampoline() const;
+  uint32_t GetJniDlsymLookupCriticalTrampolineOffset() const;
+  void SetJniDlsymLookupCriticalTrampolineOffset(uint32_t offset);
 
   const void* GetQuickGenericJniTrampoline() const;
   uint32_t GetQuickGenericJniTrampolineOffset() const;
@@ -122,7 +125,8 @@
   uint32_t dex_file_count_;
   uint32_t oat_dex_files_offset_;
   uint32_t executable_offset_;
-  uint32_t jni_dlsym_lookup_offset_;
+  uint32_t jni_dlsym_lookup_trampoline_offset_;
+  uint32_t jni_dlsym_lookup_critical_trampoline_offset_;
   uint32_t quick_generic_jni_trampoline_offset_;
   uint32_t quick_imt_conflict_trampoline_offset_;
   uint32_t quick_resolution_trampoline_offset_;
@@ -134,31 +138,6 @@
   DISALLOW_COPY_AND_ASSIGN(OatHeader);
 };
 
-// OatMethodOffsets are currently 5x32-bits=160-bits long, so if we can
-// save even one OatMethodOffsets struct, the more complicated encoding
-// using a bitmap pays for itself since few classes will have 160
-// methods.
-enum OatClassType {
-  kOatClassAllCompiled = 0,   // OatClass is followed by an OatMethodOffsets for each method.
-  kOatClassSomeCompiled = 1,  // A bitmap of which OatMethodOffsets are present follows the OatClass.
-  kOatClassNoneCompiled = 2,  // All methods are interpreted so no OatMethodOffsets are necessary.
-  kOatClassMax = 3,
-};
-
-std::ostream& operator<<(std::ostream& os, const OatClassType& rhs);
-
-class PACKED(4) OatMethodOffsets {
- public:
-  explicit OatMethodOffsets(uint32_t code_offset = 0);
-
-  ~OatMethodOffsets();
-
-  OatMethodOffsets(const OatMethodOffsets&) = default;
-  OatMethodOffsets& operator=(const OatMethodOffsets&) = default;
-
-  uint32_t code_offset_;
-};
-
 }  // namespace art
 
 #endif  // ART_RUNTIME_OAT_H_
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 54dae10..d72fc7e 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -45,6 +45,7 @@
 #include "base/mem_map.h"
 #include "base/os.h"
 #include "base/stl_util.h"
+#include "base/string_view_cpp20.h"
 #include "base/systrace.h"
 #include "base/unix_file/fd_file.h"
 #include "base/utils.h"
@@ -110,7 +111,7 @@
                                   bool writable,
                                   bool executable,
                                   bool low_4gb,
-                                  const char* abs_dex_location,
+                                  ArrayRef<const std::string> dex_filenames,
                                   /*inout*/MemMap* reservation,  // Where to load if not null.
                                   /*out*/std::string* error_msg);
 
@@ -123,7 +124,7 @@
                                   bool writable,
                                   bool executable,
                                   bool low_4gb,
-                                  const char* abs_dex_location,
+                                  ArrayRef<const std::string> dex_filenames,
                                   /*inout*/MemMap* reservation,  // Where to load if not null.
                                   /*out*/std::string* error_msg);
 
@@ -164,7 +165,7 @@
 
   virtual void PreSetup(const std::string& elf_filename) = 0;
 
-  bool Setup(int zip_fd, const char* abs_dex_location, std::string* error_msg);
+  bool Setup(int zip_fd, ArrayRef<const std::string> dex_filenames, std::string* error_msg);
   bool Setup(const std::vector<const DexFile*>& dex_files);
 
   // Setters exposed for ElfOatFile.
@@ -182,6 +183,10 @@
   }
 
  private:
+  // Returns true if we want to remove quickened opcodes before loading the VDEX file, false
+  // otherwise.
+  bool ShouldUnquickenVDex() const;
+
   DISALLOW_COPY_AND_ASSIGN(OatFileBase);
 };
 
@@ -193,7 +198,7 @@
                                       bool writable,
                                       bool executable,
                                       bool low_4gb,
-                                      const char* abs_dex_location,
+                                      ArrayRef<const std::string> dex_filenames,
                                       /*inout*/MemMap* reservation,
                                       /*out*/std::string* error_msg) {
   std::unique_ptr<OatFileBase> ret(new kOatFileBaseSubType(location, executable));
@@ -219,7 +224,7 @@
     return nullptr;
   }
 
-  if (!ret->Setup(zip_fd, abs_dex_location, error_msg)) {
+  if (!ret->Setup(zip_fd, dex_filenames, error_msg)) {
     return nullptr;
   }
 
@@ -235,7 +240,7 @@
                                       bool writable,
                                       bool executable,
                                       bool low_4gb,
-                                      const char* abs_dex_location,
+                                      ArrayRef<const std::string> dex_filenames,
                                       /*inout*/MemMap* reservation,
                                       /*out*/std::string* error_msg) {
   std::unique_ptr<OatFileBase> ret(new kOatFileBaseSubType(oat_location, executable));
@@ -259,13 +264,20 @@
     return nullptr;
   }
 
-  if (!ret->Setup(zip_fd, abs_dex_location, error_msg)) {
+  if (!ret->Setup(zip_fd, dex_filenames, error_msg)) {
     return nullptr;
   }
 
   return ret.release();
 }
 
+bool OatFileBase::ShouldUnquickenVDex() const {
+  // We sometimes load oat files without a runtime (eg oatdump) and don't want to do anything in
+  // that case. If we are debuggable there are no -quick opcodes to unquicken. If the runtime is not
+  // debuggable we don't care whether there are -quick opcodes or not so no need to do anything.
+  return Runtime::Current() != nullptr && !IsDebuggable() && Runtime::Current()->IsJavaDebuggable();
+}
+
 bool OatFileBase::LoadVdex(const std::string& vdex_filename,
                            bool writable,
                            bool low_4gb,
@@ -276,7 +288,7 @@
                                   vdex_filename,
                                   writable,
                                   low_4gb,
-                                  /* unquicken=*/ false,
+                                  ShouldUnquickenVDex(),
                                   error_msg);
   if (vdex_.get() == nullptr) {
     *error_msg = StringPrintf("Failed to load vdex file '%s' %s",
@@ -298,16 +310,17 @@
     if (rc == -1) {
       PLOG(WARNING) << "Failed getting length of vdex file";
     } else {
-      vdex_ = VdexFile::OpenAtAddress(vdex_begin_,
-                                      vdex_end_ - vdex_begin_,
-                                      /*mmap_reuse=*/ vdex_begin_ != nullptr,
-                                      vdex_fd,
-                                      s.st_size,
-                                      vdex_filename,
-                                      writable,
-                                      low_4gb,
-                                      /*unquicken=*/ false,
-                                      error_msg);
+      vdex_ = VdexFile::OpenAtAddress(
+          vdex_begin_,
+          vdex_end_ - vdex_begin_,
+          /*mmap_reuse=*/ vdex_begin_ != nullptr,
+          vdex_fd,
+          s.st_size,
+          vdex_filename,
+          writable,
+          low_4gb,
+          ShouldUnquickenVDex(),
+          error_msg);
       if (vdex_.get() == nullptr) {
         *error_msg = "Failed opening vdex file.";
         return false;
@@ -472,7 +485,9 @@
   return true;
 }
 
-bool OatFileBase::Setup(int zip_fd, const char* abs_dex_location, std::string* error_msg) {
+bool OatFileBase::Setup(int zip_fd,
+                        ArrayRef<const std::string> dex_filenames,
+                        std::string* error_msg) {
   if (!GetOatHeader().IsValid()) {
     std::string cause = GetOatHeader().GetValidationErrorMessage();
     *error_msg = StringPrintf("Invalid oat header for '%s': %s",
@@ -553,6 +568,9 @@
     return false;
   }
 
+  std::string_view primary_location;
+  std::string_view primary_location_replacement;
+  size_t dex_filenames_pos = 0u;
   uint32_t dex_file_count = GetOatHeader().GetDexFileCount();
   oat_dex_files_storage_.reserve(dex_file_count);
   for (size_t i = 0; i < dex_file_count; i++) {
@@ -580,19 +598,61 @@
     const char* dex_file_location_data = reinterpret_cast<const char*>(oat);
     oat += dex_file_location_size;
 
-    // Location encoded in the oat file. We will use this for multidex naming,
-    // see ResolveRelativeEncodedDexLocation.
-    std::string oat_dex_file_location(dex_file_location_data, dex_file_location_size);
-    // If `oat_dex_file_location` is relative (so that the oat file can be moved to
-    // a different folder), resolve to absolute location. Also resolve the file name
-    // in case dex files need to be opened from disk. The file name and location
-    // differ when cross-compiling on host for target.
-    std::string dex_file_name;
-    std::string dex_file_location;
-    ResolveRelativeEncodedDexLocation(abs_dex_location,
-                                      oat_dex_file_location,
-                                      &dex_file_location,
-                                      &dex_file_name);
+    // Location encoded in the oat file. We will use this for multidex naming.
+    std::string_view oat_dex_file_location(dex_file_location_data, dex_file_location_size);
+    std::string dex_file_location(oat_dex_file_location);
+    bool is_multidex = DexFileLoader::IsMultiDexLocation(dex_file_location.c_str());
+    // Check that `is_multidex` does not clash with other indicators. The first dex location
+    // must be primary location and, if we're opening external dex files, the location must
+    // be multi-dex if and only if we already have a dex file opened for it.
+    if ((i == 0 && is_multidex) ||
+        (!external_dex_files_.empty() && (is_multidex != (i < external_dex_files_.size())))) {
+      *error_msg = StringPrintf("In oat file '%s' found unexpected %s location '%s'",
+                                GetLocation().c_str(),
+                                is_multidex ? "multi-dex" : "primary",
+                                dex_file_location.c_str());
+      return false;
+    }
+    // Remember the primary location and, if provided, the replacement from `dex_filenames`.
+    if (!is_multidex) {
+      primary_location = oat_dex_file_location;
+      if (!dex_filenames.empty()) {
+        if (dex_filenames_pos == dex_filenames.size()) {
+          *error_msg = StringPrintf("In oat file '%s' found excessive primary location '%s'"
+                                        ", expected only %zu primary locations",
+                                    GetLocation().c_str(),
+                                    dex_file_location.c_str(),
+                                    dex_filenames.size());
+          return false;
+        }
+        primary_location_replacement = dex_filenames[dex_filenames_pos];
+        ++dex_filenames_pos;
+      }
+    }
+    // Check that the base location of a multidex location matches the last seen primary location.
+    if (is_multidex &&
+        (!StartsWith(dex_file_location, primary_location) ||
+             dex_file_location[primary_location.size()] != DexFileLoader::kMultiDexSeparator)) {
+      *error_msg = StringPrintf("In oat file '%s' found unexpected multidex location '%s',"
+                                    " unrelated to '%s'",
+                                GetLocation().c_str(),
+                                dex_file_location.c_str(),
+                                std::string(primary_location).c_str());
+      return false;
+    }
+    std::string dex_file_name = dex_file_location;
+    if (!dex_filenames.empty()) {
+      dex_file_name.replace(/*pos*/ 0u, primary_location.size(), primary_location_replacement);
+      // If the location does not contain path and matches the file name component,
+      // use the provided file name also as the location.
+      // TODO: Do we need this for anything other than tests?
+      if (dex_file_location.find('/') == std::string::npos &&
+          dex_file_name.size() > dex_file_location.size() &&
+          dex_file_name[dex_file_name.size() - dex_file_location.size() - 1u] == '/' &&
+          EndsWith(dex_file_name, dex_file_location)) {
+        dex_file_location = dex_file_name;
+      }
+    }
 
     uint32_t dex_file_checksum;
     if (UNLIKELY(!ReadOatDexFileData(*this, &oat, &dex_file_checksum))) {
@@ -625,17 +685,18 @@
     }
     const uint8_t* dex_file_pointer = nullptr;
     if (UNLIKELY(dex_file_offset == 0U)) {
-      if (uncompressed_dex_files_ == nullptr) {
-        // Do not support mixed-mode oat files.
-        if (i > 0) {
-          *error_msg = StringPrintf("In oat file '%s', unsupported uncompressed-dex-file for dex "
-                                        "file %zu (%s)",
-                                    GetLocation().c_str(),
-                                    i,
-                                    dex_file_location.c_str());
-          return false;
-        }
-        uncompressed_dex_files_.reset(new std::vector<std::unique_ptr<const DexFile>>());
+      // Do not support mixed-mode oat files.
+      if (i != 0u && external_dex_files_.empty()) {
+        *error_msg = StringPrintf("In oat file '%s', unsupported uncompressed-dex-file for dex "
+                                      "file %zu (%s)",
+                                  GetLocation().c_str(),
+                                  i,
+                                  dex_file_location.c_str());
+        return false;
+      }
+      DCHECK_LE(i, external_dex_files_.size());
+      if (i == external_dex_files_.size()) {
+        std::vector<std::unique_ptr<const DexFile>> new_dex_files;
         // No dex files, load it from location.
         const ArtDexFileLoader dex_file_loader;
         bool loaded = false;
@@ -645,14 +706,14 @@
                                            /*verify=*/ false,
                                            /*verify_checksum=*/ false,
                                            error_msg,
-                                           uncompressed_dex_files_.get());
+                                           &new_dex_files);
         } else {
           loaded = dex_file_loader.Open(dex_file_name.c_str(),
                                         dex_file_location,
                                         /*verify=*/ false,
                                         /*verify_checksum=*/ false,
                                         error_msg,
-                                        uncompressed_dex_files_.get());
+                                        &new_dex_files);
         }
         if (!loaded) {
           if (Runtime::Current() == nullptr) {
@@ -667,22 +728,32 @@
         }
         // The oat file may be out of date wrt/ the dex-file location. We need to be defensive
         // here and ensure that at least the number of dex files still matches.
+        // If we have a zip_fd, or reached the end of provided `dex_filenames`, we must
+        // load all dex files from that file, otherwise we may open multiple files.
         // Note: actual checksum comparisons are the duty of the OatFileAssistant and will be
         //       done after loading the OatFile.
-        if (uncompressed_dex_files_->size() != dex_file_count) {
-          *error_msg = StringPrintf("In oat file '%s', expected %u uncompressed dex files, but "
+        size_t max_dex_files = dex_file_count - external_dex_files_.size();
+        bool expect_all =
+            (zip_fd != -1) || (!dex_filenames.empty() && dex_filenames_pos == dex_filenames.size());
+        if (expect_all ? new_dex_files.size() != max_dex_files
+                       : new_dex_files.size() > max_dex_files) {
+          *error_msg = StringPrintf("In oat file '%s', expected %s%zu uncompressed dex files, but "
                                         "found %zu in '%s'",
                                     GetLocation().c_str(),
-                                    dex_file_count,
-                                    uncompressed_dex_files_->size(),
+                                    (expect_all ? "" : "<="),
+                                    max_dex_files,
+                                    new_dex_files.size(),
                                     dex_file_location.c_str());
           return false;
         }
+        for (std::unique_ptr<const DexFile>& dex_file : new_dex_files) {
+          external_dex_files_.push_back(std::move(dex_file));
+        }
       }
-      dex_file_pointer = (*uncompressed_dex_files_)[i]->Begin();
+      dex_file_pointer = external_dex_files_[i]->Begin();
     } else {
       // Do not support mixed-mode oat files.
-      if (uncompressed_dex_files_ != nullptr) {
+      if (!external_dex_files_.empty()) {
         *error_msg = StringPrintf("In oat file '%s', unsupported embedded dex-file for dex file "
                                       "%zu (%s)",
                                   GetLocation().c_str(),
@@ -837,35 +908,44 @@
     oat_dex_files_storage_.push_back(oat_dex_file);
 
     // Add the location and canonical location (if different) to the oat_dex_files_ table.
-    // Note: we use the dex_file_location_data storage for the view, as oat_dex_file_location
-    // is just a temporary string.
-    std::string_view key(dex_file_location_data, dex_file_location_size);
+    // Note: We do not add the non-canonical `dex_file_name`. If it is different from both
+    // the location and canonical location, GetOatDexFile() shall canonicalize it when
+    // requested and match the canonical path.
+    std::string_view key = oat_dex_file_location;  // References oat file data.
     std::string_view canonical_key(oat_dex_file->GetCanonicalDexFileLocation());
     oat_dex_files_.Put(key, oat_dex_file);
     if (canonical_key != key) {
       oat_dex_files_.Put(canonical_key, oat_dex_file);
     }
   }
-
-  Runtime* runtime = Runtime::Current();
+  if (!dex_filenames.empty() && dex_filenames_pos != dex_filenames.size()) {
+    *error_msg = StringPrintf("Oat file '%s' contains only %zu primary dex locations, expected %zu",
+                              GetLocation().c_str(),
+                              dex_filenames_pos,
+                              dex_filenames.size());
+    return false;
+  }
 
   if (DataBimgRelRoBegin() != nullptr) {
-    // Make .data.bimg.rel.ro read only. ClassLinker shall make it writable for relocation.
+    // Make .data.bimg.rel.ro read only. ClassLinker shall temporarily make it writable for
+    // relocation when we register a dex file from this oat file. We do not do the relocation
+    // here to avoid dirtying the pages if the code is never actually ready to be executed.
     uint8_t* reloc_begin = const_cast<uint8_t*>(DataBimgRelRoBegin());
     CheckedCall(mprotect, "protect relocations", reloc_begin, DataBimgRelRoSize(), PROT_READ);
-    if (UNLIKELY(runtime == nullptr)) {
-      // This must be oatdump without boot image.
-    } else if (!IsExecutable()) {
-      // Do not check whether we have a boot image if the oat file is not executable.
-    } else if (UNLIKELY(runtime->GetHeap()->GetBootImageSpaces().empty())) {
-      *error_msg = StringPrintf("Cannot load oat file '%s' with .data.bimg.rel.ro as executable "
-                                    "without boot image.",
-                                GetLocation().c_str());
-      return false;
-    } else {
-      // ClassLinker shall perform the relocation when we register a dex file from
-      // this oat file. We do not do the relocation here to avoid dirtying the pages
-      // if the code is never actually ready to be executed.
+    // Make sure the file lists a boot image dependency, otherwise the .data.bimg.rel.ro
+    // section is bogus. The full dependency is checked before the code is executed.
+    // We cannot do this check if we do not have a key-value store, i.e. for secondary
+    // oat files for boot image extensions.
+    if (GetOatHeader().GetKeyValueStoreSize() != 0u) {
+      const char* boot_class_path_checksum =
+          GetOatHeader().GetStoreValueByKey(OatHeader::kBootClassPathChecksumsKey);
+      if (boot_class_path_checksum == nullptr ||
+          boot_class_path_checksum[0] != gc::space::ImageSpace::kImageChecksumPrefix) {
+        *error_msg = StringPrintf("Oat file '%s' contains .data.bimg.rel.ro section "
+                                      "without boot image dependency.",
+                                  GetLocation().c_str());
+        return false;
+      }
     }
   }
 
@@ -1151,6 +1231,11 @@
   LOG(FATAL) << "Should not reach here.";
   UNREACHABLE();
 #else
+  struct DummyMapData {
+    const char* name;
+    uint8_t* vaddr;
+    size_t memsz;
+  };
   struct dl_iterate_context {
     static int callback(dl_phdr_info* info, size_t size ATTRIBUTE_UNUSED, void* data) {
       auto* context = reinterpret_cast<dl_iterate_context*>(data);
@@ -1185,8 +1270,18 @@
             uint8_t* vaddr = reinterpret_cast<uint8_t*>(info->dlpi_addr +
                 info->dlpi_phdr[i].p_vaddr);
             size_t memsz = info->dlpi_phdr[i].p_memsz;
-            MemMap mmap = MemMap::MapDummy(info->dlpi_name, vaddr, memsz);
-            context->dlopen_mmaps_->push_back(std::move(mmap));
+            size_t name_size = strlen(info->dlpi_name) + 1u;
+            std::vector<char>* dummy_maps_names = context->dummy_maps_names_;
+            // We must not allocate any memory in the callback, see b/156312036 .
+            if (name_size < dummy_maps_names->capacity() - dummy_maps_names->size() &&
+                context->dummy_maps_data_->size() < context->dummy_maps_data_->capacity()) {
+              dummy_maps_names->insert(
+                  dummy_maps_names->end(), info->dlpi_name, info->dlpi_name + name_size);
+              const char* name = &(*dummy_maps_names)[dummy_maps_names->size() - name_size];
+              context->dummy_maps_data_->push_back({ name, vaddr, memsz });
+            }
+            context->num_dummy_maps_ += 1u;
+            context->dummy_maps_names_size_ += name_size;
           }
         }
         return 1;  // Stop iteration and return 1 from dl_iterate_phdr.
@@ -1194,24 +1289,71 @@
       return 0;  // Continue iteration and return 0 from dl_iterate_phdr when finished.
     }
     const uint8_t* const begin_;
-    std::vector<MemMap>* const dlopen_mmaps_;
-    const size_t shared_objects_before;
+    std::vector<DummyMapData>* dummy_maps_data_;
+    size_t num_dummy_maps_;
+    std::vector<char>* dummy_maps_names_;
+    size_t dummy_maps_names_size_;
+    size_t shared_objects_before;
     size_t shared_objects_seen;
   };
-  dl_iterate_context context = { Begin(), &dlopen_mmaps_, shared_objects_before_, 0};
+
+  // We must not allocate any memory in the callback, see b/156312036 .
+  // Therefore we pre-allocate storage for the data we need for creating the dummy maps.
+  std::vector<DummyMapData> dummy_maps_data;
+  dummy_maps_data.reserve(32);  // 32 should be enough. If not, we'll retry.
+  std::vector<char> dummy_maps_names;
+  dummy_maps_names.reserve(4 * KB);  // 4KiB should be enough. If not, we'll retry.
+
+  dl_iterate_context context = {
+      Begin(),
+      &dummy_maps_data,
+      /*num_dummy_maps_*/ 0u,
+      &dummy_maps_names,
+      /*dummy_maps_names_size_*/ 0u,
+      shared_objects_before_,
+      /*shared_objects_seen*/ 0u
+  };
 
   if (dl_iterate_phdr(dl_iterate_context::callback, &context) == 0) {
     // Hm. Maybe our optimization went wrong. Try another time with shared_objects_before == 0
     // before giving up. This should be unusual.
     VLOG(oat) << "Need a second run in PreSetup, didn't find with shared_objects_before="
               << shared_objects_before_;
-    dl_iterate_context context0 = { Begin(), &dlopen_mmaps_, 0, 0};
-    if (dl_iterate_phdr(dl_iterate_context::callback, &context0) == 0) {
+    DCHECK(dummy_maps_data.empty());
+    DCHECK_EQ(context.num_dummy_maps_, 0u);
+    DCHECK(dummy_maps_names.empty());
+    DCHECK_EQ(context.dummy_maps_names_size_, 0u);
+    context.shared_objects_before = 0u;
+    context.shared_objects_seen = 0u;
+    if (dl_iterate_phdr(dl_iterate_context::callback, &context) == 0) {
       // OK, give up and print an error.
       PrintFileToLog("/proc/self/maps", android::base::LogSeverity::WARNING);
       LOG(ERROR) << "File " << elf_filename << " loaded with dlopen but cannot find its mmaps.";
     }
   }
+
+  if (dummy_maps_data.size() < context.num_dummy_maps_) {
+    // Insufficient capacity. Reserve more space and retry.
+    dummy_maps_data.clear();
+    dummy_maps_data.reserve(context.num_dummy_maps_);
+    context.num_dummy_maps_ = 0u;
+    dummy_maps_names.clear();
+    dummy_maps_names.reserve(context.dummy_maps_names_size_);
+    context.dummy_maps_names_size_ = 0u;
+    context.shared_objects_before = 0u;
+    context.shared_objects_seen = 0u;
+    bool success = (dl_iterate_phdr(dl_iterate_context::callback, &context) != 0);
+    CHECK(success);
+  }
+
+  CHECK_EQ(dummy_maps_data.size(), context.num_dummy_maps_);
+  CHECK_EQ(dummy_maps_names.size(), context.dummy_maps_names_size_);
+  DCHECK_EQ(static_cast<size_t>(std::count(dummy_maps_names.begin(), dummy_maps_names.end(), '\0')),
+            context.num_dummy_maps_);
+  for (const DummyMapData& data : dummy_maps_data) {
+    MemMap mmap = MemMap::MapDummy(data.name, data.vaddr, data.memsz);
+    dlopen_mmaps_.push_back(std::move(mmap));
+  }
 #endif
 }
 
@@ -1223,20 +1365,10 @@
  public:
   ElfOatFile(const std::string& filename, bool executable) : OatFileBase(filename, executable) {}
 
-  static ElfOatFile* OpenElfFile(int zip_fd,
-                                 File* file,
-                                 const std::string& location,
-                                 bool writable,
-                                 bool executable,
-                                 bool low_4gb,
-                                 const char* abs_dex_location,
-                                 /*inout*/MemMap* reservation,  // Where to load if not null.
-                                 /*out*/std::string* error_msg);
-
   bool InitializeFromElfFile(int zip_fd,
                              ElfFile* elf_file,
                              VdexFile* vdex_file,
-                             const char* abs_dex_location,
+                             ArrayRef<const std::string> dex_filenames,
                              std::string* error_msg);
 
  protected:
@@ -1284,44 +1416,10 @@
   DISALLOW_COPY_AND_ASSIGN(ElfOatFile);
 };
 
-ElfOatFile* ElfOatFile::OpenElfFile(int zip_fd,
-                                    File* file,
-                                    const std::string& location,
-                                    bool writable,
-                                    bool executable,
-                                    bool low_4gb,
-                                    const char* abs_dex_location,
-                                    /*inout*/MemMap* reservation,  // Where to load if not null.
-                                    /*out*/std::string* error_msg) {
-  ScopedTrace trace("Open elf file " + location);
-  std::unique_ptr<ElfOatFile> oat_file(new ElfOatFile(location, executable));
-  bool success = oat_file->ElfFileOpen(file,
-                                       writable,
-                                       low_4gb,
-                                       executable,
-                                       reservation,
-                                       error_msg);
-  if (!success) {
-    CHECK(!error_msg->empty());
-    return nullptr;
-  }
-
-  // Complete the setup.
-  if (!oat_file->ComputeFields(file->GetPath(), error_msg)) {
-    return nullptr;
-  }
-
-  if (!oat_file->Setup(zip_fd, abs_dex_location, error_msg)) {
-    return nullptr;
-  }
-
-  return oat_file.release();
-}
-
 bool ElfOatFile::InitializeFromElfFile(int zip_fd,
                                        ElfFile* elf_file,
                                        VdexFile* vdex_file,
-                                       const char* abs_dex_location,
+                                       ArrayRef<const std::string> dex_filenames,
                                        std::string* error_msg) {
   ScopedTrace trace(__PRETTY_FUNCTION__);
   if (IsExecutable()) {
@@ -1336,7 +1434,7 @@
   SetBegin(elf_file->Begin() + offset);
   SetEnd(elf_file->Begin() + size + offset);
   // Ignore the optional .bss section when opening non-executable.
-  return Setup(zip_fd, abs_dex_location, error_msg);
+  return Setup(zip_fd, dex_filenames, error_msg);
 }
 
 bool ElfOatFile::Load(const std::string& elf_filename,
@@ -1501,94 +1599,16 @@
 // General OatFile code //
 //////////////////////////
 
-static bool IsLocationSuffix(const char* abs_dex_location, const std::string& rel_dex_location) {
-  std::string_view abs_location(abs_dex_location);
-  std::string target_suffix = "/" + DexFileLoader::GetBaseLocation(rel_dex_location);
-  if (abs_location.size() <= target_suffix.size()) {
-    return false;
-  }
-  size_t pos = abs_location.size() - target_suffix.size();
-  return abs_location.compare(pos, std::string::npos, target_suffix) == 0;
-}
-
-static void MaybeResolveDexPath(const char* abs_dex_location,
-                                const std::string& rel_dex_location,
-                                bool resolve,
-                                /* out */ std::string* out_location) {
-  DCHECK(!resolve || abs_dex_location != nullptr);
-  if (out_location != nullptr) {
-    *out_location = resolve
-        ? std::string(abs_dex_location) + DexFileLoader::GetMultiDexSuffix(rel_dex_location)
-        : rel_dex_location;
-  }
-}
-
-void OatFile::ResolveRelativeEncodedDexLocation(const char* abs_dex_location,
-                                                const std::string& rel_dex_location,
-                                                /* out */ std::string* dex_file_location,
-                                                /* out */ std::string* dex_file_name) {
-  // Note that in this context `abs_dex_location` may not always be absolute
-  // and `rel_dex_location` may not always be relative. It simply means that
-  // we will try to resolve `rel_dex_location` into an absolute location using
-  // `abs_dex_location` for the base directory if needed.
-
-  bool resolve_location = false;
-  bool resolve_filename = false;
-
-  if (abs_dex_location != nullptr) {
-    if (!IsAbsoluteLocation(rel_dex_location) &&
-        IsLocationSuffix(abs_dex_location, rel_dex_location)) {
-      // The base location (w/o multidex suffix) of the relative `rel_dex_location` is a suffix
-      // of `abs_dex_location`. This typically happens for oat files which only encode the
-      // basename() so the oat and dex files can move to different directories.
-      // Example:
-      //   abs_dex_location = "/data/app/myapp/MyApplication.apk"
-      //   rel_dex_location = "MyApplication.apk!classes2.dex"
-      resolve_location = true;
-      resolve_filename = true;
-    } else {
-      // Case 1: `rel_dex_location` is absolute
-      //   On target always use `rel_dex_location` for both dex file name and dex location.
-      //   On host assume we're cross-compiling and use `abs_dex_location` as a file name
-      //   (for loading files) and `rel_dex_location` as the dex location. If we're not
-      //   cross-compiling, the two paths should be equal.
-      // Case 2: `rel_dex_location` is relative and not suffix of `abs_location`
-      //   This should never happen outside of tests. On target always use `rel_dex_location`. On
-      //   host use `abs_dex_location` with the appropriate multidex suffix because
-      //   `rel_dex_location` might be the target path.
-      resolve_location = false;
-      resolve_filename = !kIsTargetBuild;
-    }
-  }
-
-  // Construct dex file location and dex file name if the correspoding out-param pointers
-  // were provided by the caller.
-  MaybeResolveDexPath(abs_dex_location, rel_dex_location, resolve_location, dex_file_location);
-  MaybeResolveDexPath(abs_dex_location, rel_dex_location, resolve_filename, dex_file_name);
-}
-
 static void CheckLocation(const std::string& location) {
   CHECK(!location.empty());
 }
 
-OatFile* OatFile::OpenWithElfFile(int zip_fd,
-                                  ElfFile* elf_file,
-                                  VdexFile* vdex_file,
-                                  const std::string& location,
-                                  const char* abs_dex_location,
-                                  std::string* error_msg) {
-  std::unique_ptr<ElfOatFile> oat_file(new ElfOatFile(location, /*executable=*/ false));
-  return oat_file->InitializeFromElfFile(zip_fd, elf_file, vdex_file, abs_dex_location, error_msg)
-      ? oat_file.release()
-      : nullptr;
-}
-
 OatFile* OatFile::Open(int zip_fd,
                        const std::string& oat_filename,
                        const std::string& oat_location,
                        bool executable,
                        bool low_4gb,
-                       const char* abs_dex_location,
+                       ArrayRef<const std::string> dex_filenames,
                        /*inout*/MemMap* reservation,
                        /*out*/std::string* error_msg) {
   ScopedTrace trace("Open oat file " + oat_location);
@@ -1615,7 +1635,7 @@
                                                                  /*writable=*/ false,
                                                                  executable,
                                                                  low_4gb,
-                                                                 abs_dex_location,
+                                                                 dex_filenames,
                                                                  reservation,
                                                                  error_msg);
   if (with_dlopen != nullptr) {
@@ -1632,7 +1652,7 @@
   // open a generated dex file by name, remove the file, then open
   // another generated dex file with the same name. http://b/10614658
   //
-  // On host, dlopen is expected to fail when cross compiling, so fall back to OpenElfFile.
+  // On host, dlopen is expected to fail when cross compiling, so fall back to ElfOatFile.
   //
   //
   // Another independent reason is the absolute placement of boot.oat. dlopen on the host usually
@@ -1644,7 +1664,7 @@
                                                                 /*writable=*/ false,
                                                                 executable,
                                                                 low_4gb,
-                                                                abs_dex_location,
+                                                                dex_filenames,
                                                                 reservation,
                                                                 error_msg);
   return with_internal;
@@ -1656,7 +1676,7 @@
                        const std::string& oat_location,
                        bool executable,
                        bool low_4gb,
-                       const char* abs_dex_location,
+                       ArrayRef<const std::string> dex_filenames,
                        /*inout*/MemMap* reservation,
                        /*out*/std::string* error_msg) {
   CHECK(!oat_location.empty()) << oat_location;
@@ -1671,46 +1691,12 @@
                                                                 /*writable=*/ false,
                                                                 executable,
                                                                 low_4gb,
-                                                                abs_dex_location,
+                                                                dex_filenames,
                                                                 reservation,
                                                                 error_msg);
   return with_internal;
 }
 
-OatFile* OatFile::OpenWritable(int zip_fd,
-                               File* file,
-                               const std::string& location,
-                               const char* abs_dex_location,
-                               std::string* error_msg) {
-  CheckLocation(location);
-  return ElfOatFile::OpenElfFile(zip_fd,
-                                 file,
-                                 location,
-                                 /*writable=*/ true,
-                                 /*executable=*/ false,
-                                 /*low_4gb=*/false,
-                                 abs_dex_location,
-                                 /*reservation=*/ nullptr,
-                                 error_msg);
-}
-
-OatFile* OatFile::OpenReadable(int zip_fd,
-                               File* file,
-                               const std::string& location,
-                               const char* abs_dex_location,
-                               std::string* error_msg) {
-  CheckLocation(location);
-  return ElfOatFile::OpenElfFile(zip_fd,
-                                 file,
-                                 location,
-                                 /*writable=*/ false,
-                                 /*executable=*/ false,
-                                 /*low_4gb=*/false,
-                                 abs_dex_location,
-                                 /*reservation=*/ nullptr,
-                                 error_msg);
-}
-
 OatFile* OatFile::OpenFromVdex(const std::vector<const DexFile*>& dex_files,
                                std::unique_ptr<VdexFile>&& vdex_file,
                                const std::string& location) {
@@ -1978,12 +1964,13 @@
   const uint8_t* status_pointer = oat_class_pointer;
   CHECK_LT(status_pointer, oat_file_->End()) << oat_file_->GetLocation();
   ClassStatus status = enum_cast<ClassStatus>(*reinterpret_cast<const int16_t*>(status_pointer));
-  CHECK_LE(status, ClassStatus::kLast);
+  CHECK_LE(status, ClassStatus::kLast) << static_cast<uint32_t>(status)
+      << " at " << oat_file_->GetLocation();
 
   const uint8_t* type_pointer = status_pointer + sizeof(uint16_t);
   CHECK_LT(type_pointer, oat_file_->End()) << oat_file_->GetLocation();
   OatClassType type = static_cast<OatClassType>(*reinterpret_cast<const uint16_t*>(type_pointer));
-  CHECK_LT(type, kOatClassMax);
+  CHECK_LT(type, kOatClassMax) << oat_file_->GetLocation();
 
   const uint8_t* after_type_pointer = type_pointer + sizeof(int16_t);
   CHECK_LE(after_type_pointer, oat_file_->End()) << oat_file_->GetLocation();
@@ -2160,11 +2147,6 @@
   return OatMethod(oat_file_->Begin(), 0);
 }
 
-void OatFile::OatMethod::LinkMethod(ArtMethod* method) const {
-  CHECK(method != nullptr);
-  method->SetEntryPointFromQuickCompiledCode(GetQuickCode());
-}
-
 bool OatFile::IsDebuggable() const {
   return GetOatHeader().IsDebuggable();
 }
@@ -2229,8 +2211,7 @@
                 reloc_begin,
                 DataBimgRelRoSize(),
                 PROT_READ | PROT_WRITE);
-    uint32_t boot_image_begin = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(
-        Runtime::Current()->GetHeap()->GetBootImageSpaces().front()->Begin()));
+    uint32_t boot_image_begin = Runtime::Current()->GetHeap()->GetBootImagesStartAddress();
     for (const uint32_t& relocation : GetBootImageRelocations()) {
       const_cast<uint32_t&>(relocation) += boot_image_begin;
     }
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 47032d4..dce34d9 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -34,8 +34,6 @@
 #include "dex/utf.h"
 #include "index_bss_mapping.h"
 #include "mirror/object.h"
-#include "oat.h"
-#include "runtime.h"
 
 namespace art {
 
@@ -61,6 +59,31 @@
 }  // namespace collector
 }  // namespace gc
 
+// OatMethodOffsets are currently 5x32-bits=160-bits long, so if we can
+// save even one OatMethodOffsets struct, the more complicated encoding
+// using a bitmap pays for itself since few classes will have 160
+// methods.
+enum OatClassType {
+  kOatClassAllCompiled = 0,   // OatClass is followed by an OatMethodOffsets for each method.
+  kOatClassSomeCompiled = 1,  // A bitmap of OatMethodOffsets that are present follows the OatClass.
+  kOatClassNoneCompiled = 2,  // All methods are interpreted so no OatMethodOffsets are necessary.
+  kOatClassMax = 3,
+};
+
+std::ostream& operator<<(std::ostream& os, const OatClassType& rhs);
+
+class PACKED(4) OatMethodOffsets {
+ public:
+  explicit OatMethodOffsets(uint32_t code_offset = 0) : code_offset_(code_offset) {}
+
+  ~OatMethodOffsets() {}
+
+  OatMethodOffsets(const OatMethodOffsets&) = default;
+  OatMethodOffsets& operator=(const OatMethodOffsets&) = default;
+
+  uint32_t code_offset_;
+};
+
 // Runtime representation of the OAT file format which holds compiler output.
 // The class opens an OAT file from storage and maps it to memory, typically with
 // dlopen and provides access to its internal data structures (see OatWriter for
@@ -74,26 +97,52 @@
   // Special classpath that skips shared library check.
   static constexpr const char* kSpecialSharedLibrary = "&";
 
-  // Opens an oat file contained within the given elf file. This is always opened as
-  // non-executable at the moment.
-  static OatFile* OpenWithElfFile(int zip_fd,
-                                  ElfFile* elf_file,
-                                  VdexFile* vdex_file,
-                                  const std::string& location,
-                                  const char* abs_dex_location,
-                                  std::string* error_msg);
-  // Open an oat file. Returns null on failure.  Requested base can
-  // optionally be used to request where the file should be loaded.
-  // See the ResolveRelativeEncodedDexLocation for a description of how the
-  // abs_dex_location argument is used.
+  // Open an oat file. Returns null on failure.
+  // The `dex_filenames` argument, if provided, overrides the dex locations
+  // from oat file when opening the dex files if they are not embedded in the
+  // vdex file. These may differ for cross-compilation (the dex file name is
+  // the host path and dex location is the future path on target) and testing.
   static OatFile* Open(int zip_fd,
                        const std::string& filename,
                        const std::string& location,
                        bool executable,
                        bool low_4gb,
-                       const char* abs_dex_location,
+                       ArrayRef<const std::string> dex_filenames,
                        /*inout*/MemMap* reservation,  // Where to load if not null.
                        /*out*/std::string* error_msg);
+  // Helper overload that takes a single dex filename and no reservation.
+  static OatFile* Open(int zip_fd,
+                       const std::string& filename,
+                       const std::string& location,
+                       bool executable,
+                       bool low_4gb,
+                       const std::string& dex_filename,
+                       /*out*/std::string* error_msg) {
+    return Open(zip_fd,
+                filename,
+                location,
+                executable,
+                low_4gb,
+                ArrayRef<const std::string>(&dex_filename, /*size=*/ 1u),
+                /*reservation=*/ nullptr,
+                error_msg);
+  }
+  // Helper overload that takes no dex filename and no reservation.
+  static OatFile* Open(int zip_fd,
+                       const std::string& filename,
+                       const std::string& location,
+                       bool executable,
+                       bool low_4gb,
+                       /*out*/std::string* error_msg) {
+    return Open(zip_fd,
+                filename,
+                location,
+                executable,
+                low_4gb,
+                ArrayRef<const std::string>(),
+                /*reservation=*/ nullptr,
+                error_msg);
+  }
 
   // Similar to OatFile::Open(const std::string...), but accepts input vdex and
   // odex files as file descriptors. We also take zip_fd in case the vdex does not
@@ -104,27 +153,10 @@
                        const std::string& oat_location,
                        bool executable,
                        bool low_4gb,
-                       const char* abs_dex_location,
+                       ArrayRef<const std::string> dex_filenames,
                        /*inout*/MemMap* reservation,  // Where to load if not null.
                        /*out*/std::string* error_msg);
 
-  // Open an oat file from an already opened File.
-  // Does not use dlopen underneath so cannot be used for runtime use
-  // where relocations may be required. Currently used from
-  // ImageWriter which wants to open a writable version from an existing
-  // file descriptor for patching.
-  static OatFile* OpenWritable(int zip_fd,
-                               File* file,
-                               const std::string& location,
-                               const char* abs_dex_location,
-                               std::string* error_msg);
-  // Open an oat file from an already opened File. Maps it PROT_READ, MAP_PRIVATE.
-  static OatFile* OpenReadable(int zip_fd,
-                               File* file,
-                               const std::string& location,
-                               const char* abs_dex_location,
-                               std::string* error_msg);
-
   // Initialize OatFile instance from an already loaded VdexFile. This assumes
   // the vdex does not have a dex section and accepts a vector of DexFiles separately.
   static OatFile* OpenFromVdex(const std::vector<const DexFile*>& dex_files,
@@ -154,8 +186,6 @@
 
   class OatMethod final {
    public:
-    void LinkMethod(ArtMethod* method) const;
-
     uint32_t GetCodeOffset() const;
 
     const void* GetQuickCode() const;
@@ -335,28 +365,6 @@
   // Initialize relocation sections (.data.bimg.rel.ro and .bss).
   void InitializeRelocations() const;
 
-  // Constructs the absolute dex location and/or dex file name for the relative dex
-  // location (`rel_dex_location`) in the oat file, using the `abs_dex_location` of
-  // the dex file this oat belongs to.
-  //
-  // The dex file name and dex location differ when cross compiling where the dex file
-  // name is the host path (for opening files) and dex location is the future path on target.
-  //
-  // If not null, abs_dex_location is used to resolve the absolute dex
-  // location of relative dex locations encoded in the oat file.
-  // For example, given absolute location "/data/app/foo/base.apk", encoded
-  // dex locations "base.apk", "base.apk!classes2.dex", etc. would be resolved
-  // to "/data/app/foo/base.apk", "/data/app/foo/base.apk!classes2.dex", etc.
-  // Relative encoded dex locations that don't match the given abs_dex_location
-  // are left unchanged.
-  //
-  // Computation of both `dex_file_location` and `dex_file_name` can be skipped
-  // by setting the corresponding out parameter to `nullptr`.
-  static void ResolveRelativeEncodedDexLocation(const char* abs_dex_location,
-                                                const std::string& rel_dex_location,
-                                                /* out */ std::string* dex_file_location,
-                                                /* out */ std::string* dex_file_name = nullptr);
-
   // Finds the associated oat class for a dex_file and descriptor. Returns an invalid OatClass on
   // error and sets found to false.
   static OatClass FindOatClass(const DexFile& dex_file, uint16_t class_def_idx, bool* found);
@@ -367,7 +375,7 @@
 
   // Whether the OatFile embeds the Dex code.
   bool ContainsDexCode() const {
-    return uncompressed_dex_files_ == nullptr;
+    return external_dex_files_.empty();
   }
 
  protected:
@@ -448,9 +456,9 @@
   // elements. std::list<> and std::deque<> satisfy this requirement, std::vector<> doesn't.
   mutable std::list<std::string> string_cache_ GUARDED_BY(secondary_lookup_lock_);
 
-  // Cache of dex files mapped directly from a location, in case the OatFile does
-  // not embed the dex code.
-  std::unique_ptr<std::vector<std::unique_ptr<const DexFile>>> uncompressed_dex_files_;
+  // Dex files opened directly from a file referenced from the oat file or specifed
+  // by the `dex_filenames` parameter, in case the OatFile does not embed the dex code.
+  std::vector<std::unique_ptr<const DexFile>> external_dex_files_;
 
   friend class gc::collector::DummyOatFile;  // For modifying begin_ and end_.
   friend class OatClass;
@@ -472,12 +480,6 @@
   // May return null if the OatDexFile only contains a type lookup table. This case only happens
   // for the compiler to speed up compilation, or in jitzygote.
   const OatFile* GetOatFile() const {
-    // Avoid pulling in runtime.h in the header file.
-    if (kIsDebugBuild && oat_file_ == nullptr) {
-      if (!Runtime::Current()->IsUsingApexBootImageLocation()) {
-        AssertAotCompiler();
-      }
-    }
     return oat_file_;
   }
 
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 9bb1203..f374883 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -30,6 +30,7 @@
 #include "base/os.h"
 #include "base/stl_util.h"
 #include "base/string_view_cpp20.h"
+#include "base/systrace.h"
 #include "base/utils.h"
 #include "class_linker.h"
 #include "class_loader_context.h"
@@ -184,16 +185,16 @@
 }
 
 int OatFileAssistant::GetDexOptNeeded(CompilerFilter::Filter target,
-                                      bool profile_changed,
-                                      bool downgrade,
                                       ClassLoaderContext* class_loader_context,
-                                      const std::vector<int>& context_fds) {
+                                      const std::vector<int>& context_fds,
+                                      bool profile_changed,
+                                      bool downgrade) {
   OatFileInfo& info = GetBestInfo();
   DexOptNeeded dexopt_needed = info.GetDexOptNeeded(target,
-                                                    profile_changed,
-                                                    downgrade,
                                                     class_loader_context,
-                                                    context_fds);
+                                                    context_fds,
+                                                    profile_changed,
+                                                    downgrade);
   if (info.IsOatLocation() || dexopt_needed == kDex2OatFromScratch) {
     return dexopt_needed;
   }
@@ -305,6 +306,7 @@
 }
 
 bool OatFileAssistant::HasOriginalDexFiles() {
+  ScopedTrace trace("HasOriginalDexFiles");
   // Ensure GetRequiredDexChecksums has been run so that
   // has_original_dex_files_ is initialized. We don't care about the result of
   // GetRequiredDexChecksums.
@@ -321,6 +323,7 @@
 }
 
 bool OatFileAssistant::DexChecksumUpToDate(const VdexFile& file, std::string* error_msg) {
+  ScopedTrace trace("DexChecksumUpToDate(vdex)");
   const std::vector<uint32_t>* required_dex_checksums = GetRequiredDexChecksums();
   if (required_dex_checksums == nullptr) {
     LOG(WARNING) << "Required dex checksums not found. Assuming dex checksums are up to date.";
@@ -353,6 +356,7 @@
 }
 
 bool OatFileAssistant::DexChecksumUpToDate(const OatFile& file, std::string* error_msg) {
+  ScopedTrace trace("DexChecksumUpToDate(oat)");
   const std::vector<uint32_t>* required_dex_checksums = GetRequiredDexChecksums();
   if (required_dex_checksums == nullptr) {
     LOG(WARNING) << "Required dex checksums not found. Assuming dex checksums are up to date.";
@@ -600,81 +604,59 @@
 }
 
 bool OatFileAssistant::ValidateBootClassPathChecksums(const OatFile& oat_file) {
-  // Get the BCP from the oat file.
-  const char* oat_boot_class_path =
-      oat_file.GetOatHeader().GetStoreValueByKey(OatHeader::kBootClassPathKey);
-  if (oat_boot_class_path == nullptr) {
-    return false;
-  }
-
-  // Check that the oat BCP is a prefix of current BCP locations and count components.
-  Runtime* runtime = Runtime::Current();
-  size_t component_count = 0u;
-  std::string_view remaining_bcp(oat_boot_class_path);
-  bool bcp_ok = false;
-  for (const std::string& location : runtime->GetBootClassPathLocations()) {
-    if (!StartsWith(remaining_bcp, location)) {
-      break;
-    }
-    remaining_bcp.remove_prefix(location.size());
-    ++component_count;
-    if (remaining_bcp.empty()) {
-      bcp_ok = true;
-      break;
-    }
-    if (!StartsWith(remaining_bcp, ":")) {
-      break;
-    }
-    remaining_bcp.remove_prefix(1u);
-  }
-  if (!bcp_ok) {
-    return false;
-  }
-
-  // Get the checksums.
+  // Get the checksums and the BCP from the oat file.
   const char* oat_boot_class_path_checksums =
       oat_file.GetOatHeader().GetStoreValueByKey(OatHeader::kBootClassPathChecksumsKey);
-  if (oat_boot_class_path_checksums == nullptr) {
+  const char* oat_boot_class_path =
+      oat_file.GetOatHeader().GetStoreValueByKey(OatHeader::kBootClassPathKey);
+  if (oat_boot_class_path_checksums == nullptr || oat_boot_class_path == nullptr) {
     return false;
   }
-
-  // Retrieve checksums for this portion of the BCP if we do not have them cached.
-  if (cached_boot_class_path_checksum_component_count_ != component_count) {
-    ArrayRef<const std::string> boot_class_path(runtime->GetBootClassPath());
-    std::string error_msg;
-    std::string boot_class_path_checksums = gc::space::ImageSpace::GetBootClassPathChecksums(
-        boot_class_path.SubArray(/* pos= */ 0u, component_count),
-        runtime->GetImageLocation(),
-        isa_,
-        runtime->GetImageSpaceLoadingOrder(),
-        &error_msg);
-    if (boot_class_path_checksums.empty()) {
-      VLOG(oat) << "No image for oat image checksum to match against.";
-
-      if (HasOriginalDexFiles()) {
-        return false;
-      }
-
-      // If there is no original dex file to fall back to, grudgingly accept
-      // the oat file. This could technically lead to crashes, but there's no
-      // way we could find a better oat file to use for this dex location,
-      // and it's better than being stuck in a boot loop with no way out.
-      // The problem will hopefully resolve itself the next time the runtime
-      // starts up.
-      LOG(WARNING) << "Dex location " << dex_location_ << " does not seem to include dex file. "
-          << "Allow oat file use. This is potentially dangerous.";
-
-      return true;
-    }
-    cached_boot_class_path_checksum_component_count_ = component_count;
-    cached_boot_class_path_checksums_ = boot_class_path_checksums;
+  std::string_view oat_boot_class_path_checksums_view(oat_boot_class_path_checksums);
+  std::string_view oat_boot_class_path_view(oat_boot_class_path);
+  if (oat_boot_class_path_view == cached_boot_class_path_ &&
+      oat_boot_class_path_checksums_view == cached_boot_class_path_checksums_) {
+    return true;
   }
 
-  // Compare the checksums.
-  return cached_boot_class_path_checksums_ == oat_boot_class_path_checksums;
+  Runtime* runtime = Runtime::Current();
+  std::string error_msg;
+  bool result = gc::space::ImageSpace::VerifyBootClassPathChecksums(
+      oat_boot_class_path_checksums_view,
+      oat_boot_class_path_view,
+      runtime->GetImageLocation(),
+      ArrayRef<const std::string>(runtime->GetBootClassPathLocations()),
+      ArrayRef<const std::string>(runtime->GetBootClassPath()),
+      isa_,
+      runtime->GetImageSpaceLoadingOrder(),
+      &error_msg);
+  if (!result) {
+    VLOG(oat) << "Failed to verify checksums of oat file " << oat_file.GetLocation()
+        << " error: " << error_msg;
+
+    if (HasOriginalDexFiles()) {
+      return false;
+    }
+
+    // If there is no original dex file to fall back to, grudgingly accept
+    // the oat file. This could technically lead to crashes, but there's no
+    // way we could find a better oat file to use for this dex location,
+    // and it's better than being stuck in a boot loop with no way out.
+    // The problem will hopefully resolve itself the next time the runtime
+    // starts up.
+    LOG(WARNING) << "Dex location " << dex_location_ << " does not seem to include dex file. "
+        << "Allow oat file use. This is potentially dangerous.";
+    return true;
+  }
+
+  // This checksum has been validated, so save it.
+  cached_boot_class_path_ = oat_boot_class_path_view;
+  cached_boot_class_path_checksums_ = oat_boot_class_path_checksums_view;
+  return true;
 }
 
 OatFileAssistant::OatFileInfo& OatFileAssistant::GetBestInfo() {
+  ScopedTrace trace("GetBestInfo");
   // TODO(calin): Document the side effects of class loading when
   // running dalvikvm command line.
   if (dex_parent_writable_ || UseFdToReadFiles()) {
@@ -746,6 +728,7 @@
 }
 
 bool OatFileAssistant::OatFileInfo::IsUseable() {
+  ScopedTrace trace("IsUseable");
   switch (Status()) {
     case kOatCannotOpen:
     case kOatDexOutOfDate:
@@ -757,6 +740,7 @@
 }
 
 OatFileAssistant::OatStatus OatFileAssistant::OatFileInfo::Status() {
+  ScopedTrace trace("Status");
   if (!status_attempted_) {
     status_attempted_ = true;
     const OatFile* file = GetFile();
@@ -813,10 +797,10 @@
 
 OatFileAssistant::DexOptNeeded OatFileAssistant::OatFileInfo::GetDexOptNeeded(
     CompilerFilter::Filter target,
-    bool profile_changed,
-    bool downgrade,
     ClassLoaderContext* context,
-    const std::vector<int>& context_fds) {
+    const std::vector<int>& context_fds,
+    bool profile_changed,
+    bool downgrade) {
 
   bool filter_okay = CompilerFilterIsOkay(target, profile_changed, downgrade);
   bool class_loader_context_okay = ClassLoaderContextIsOkay(context, context_fds);
@@ -859,13 +843,15 @@
       std::string error_msg;
       if (use_fd_) {
         if (oat_fd_ >= 0 && vdex_fd_ >= 0) {
+          ArrayRef<const std::string> dex_locations(&oat_file_assistant_->dex_location_,
+                                                    /*size=*/ 1u);
           file_.reset(OatFile::Open(zip_fd_,
                                     vdex_fd_,
                                     oat_fd_,
                                     filename_.c_str(),
                                     executable,
                                     /*low_4gb=*/ false,
-                                    oat_file_assistant_->dex_location_.c_str(),
+                                    dex_locations,
                                     /*reservation=*/ nullptr,
                                     &error_msg));
         }
@@ -875,8 +861,7 @@
                                   filename_.c_str(),
                                   executable,
                                   /*low_4gb=*/ false,
-                                  oat_file_assistant_->dex_location_.c_str(),
-                                  /*reservation=*/ nullptr,
+                                  oat_file_assistant_->dex_location_,
                                   &error_msg));
       }
       if (file_.get() == nullptr) {
@@ -908,17 +893,26 @@
 
 bool OatFileAssistant::OatFileInfo::ClassLoaderContextIsOkay(ClassLoaderContext* context,
                                                              const std::vector<int>& context_fds) {
-  if (context == nullptr) {
-    VLOG(oat) << "ClassLoaderContext check ignored: null context";
-    return true;
-  }
-
   const OatFile* file = GetFile();
   if (file == nullptr) {
     // No oat file means we have nothing to verify.
     return true;
   }
 
+  if (!CompilerFilter::IsVerificationEnabled(file->GetCompilerFilter())) {
+    // If verification is not enabled we don't need to verify the class loader context and we
+    // assume it's ok.
+    return true;
+  }
+
+
+  if (context == nullptr) {
+    // TODO(calin): stop using null for the unkown contexts.
+    // b/148494302 introduces runtime encoding for unknown context which will make this possible.
+    VLOG(oat) << "ClassLoaderContext check failed: uknown(null) context";
+    return false;
+  }
+
   size_t dir_index = oat_file_assistant_->dex_location_.rfind('/');
   std::string classpath_dir = (dir_index != std::string::npos)
       ? oat_file_assistant_->dex_location_.substr(0, dir_index)
@@ -970,6 +964,7 @@
 }
 
 std::unique_ptr<OatFile> OatFileAssistant::OatFileInfo::ReleaseFileForUse() {
+  ScopedTrace trace("ReleaseFileForUse");
   if (Status() == kOatUpToDate) {
     return ReleaseFile();
   }
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index 1f3f74f..aa4d83b 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -144,10 +144,10 @@
   // the oat location. Returns a negative status code if the status refers to
   // the oat file in the odex location.
   int GetDexOptNeeded(CompilerFilter::Filter target_compiler_filter,
+                      ClassLoaderContext* context,
+                      const std::vector<int>& context_fds,
                       bool profile_changed = false,
-                      bool downgrade = false,
-                      ClassLoaderContext* context = nullptr,
-                      const std::vector<int>& context_fds = std::vector<int>());
+                      bool downgrade = false);
 
   // Returns true if there is up-to-date code for this dex location,
   // irrespective of the compiler filter of the up-to-date code.
@@ -292,10 +292,10 @@
     // downgrade should be true if the purpose of dexopt is to downgrade the
     // compiler filter.
     DexOptNeeded GetDexOptNeeded(CompilerFilter::Filter target_compiler_filter,
-                                 bool profile_changed,
-                                 bool downgrade,
                                  ClassLoaderContext* context,
-                                 const std::vector<int>& context_fds);
+                                 const std::vector<int>& context_fds,
+                                 bool profile_changed,
+                                 bool downgrade);
 
     // Returns the loaded file.
     // Loads the file if needed. Returns null if the file failed to load.
@@ -439,7 +439,7 @@
   // File descriptor corresponding to apk, dex file, or zip.
   int zip_fd_;
 
-  size_t cached_boot_class_path_checksum_component_count_ = 0u;
+  std::string cached_boot_class_path_;
   std::string cached_boot_class_path_checksums_;
 
   friend class OatFileAssistantTest;
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 9a5409f..ed47ca3 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -34,6 +34,7 @@
 #include "common_runtime_test.h"
 #include "dexopt_test.h"
 #include "hidden_api.h"
+#include "oat.h"
 #include "oat_file.h"
 #include "oat_file_manager.h"
 #include "scoped_thread_state_change-inl.h"
@@ -61,6 +62,7 @@
       VerifyOptimizationStatus(
           file, CompilerFilter::NameOfFilter(expected_filter), expected_reason);
   }
+
   void InsertNewBootClasspathEntry() {
     std::string extra_dex_filename = GetMultiDexSrc1();
     Runtime* runtime = Runtime::Current();
@@ -69,6 +71,33 @@
       runtime->boot_class_path_locations_.push_back(extra_dex_filename);
     }
   }
+
+  int GetDexOptNeeded(OatFileAssistant* assistant,
+                      CompilerFilter::Filter compiler_filter,
+                      bool profile_changed) {
+    std::vector<int> context_fds;
+    return GetDexOptNeeded(assistant,
+        compiler_filter,
+        ClassLoaderContext::Default(),
+        context_fds,
+        profile_changed,
+        /*downgrade=*/ false);
+  }
+
+  int GetDexOptNeeded(
+      OatFileAssistant* assistant,
+      CompilerFilter::Filter compiler_filter,
+      const std::unique_ptr<ClassLoaderContext>& context = ClassLoaderContext::Default(),
+      const std::vector<int>& context_fds = std::vector<int>(),
+      bool profile_changed = false,
+      bool downgrade = false) {
+    return assistant->GetDexOptNeeded(
+        compiler_filter,
+        context.get(),
+        context_fds,
+        profile_changed,
+        downgrade);
+  }
 };
 
 class ScopedNonWritable {
@@ -188,11 +217,9 @@
   std::unique_ptr<ClassLoaderContext> relative_context =
       ClassLoaderContext::Create("PCL[ContextDex.jar]");
   EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
-            oat_file_assistant.GetDexOptNeeded(
+            GetDexOptNeeded(&oat_file_assistant,
                 CompilerFilter::kDefaultCompilerFilter,
-                /* profile_changed= */ false,
-                /* downgrade= */ false,
-                relative_context.get()));
+                relative_context));
 }
 
 // Case: We have a DEX file, but no OAT file for it.
@@ -204,13 +231,13 @@
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
 
   EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kExtract));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kExtract));
   EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kQuicken));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kQuicken));
   EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeedProfile));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeedProfile));
   EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
   EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
@@ -228,7 +255,7 @@
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
 
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
   EXPECT_FALSE(oat_file_assistant.HasOriginalDexFiles());
 
   // Trying to get the best oat file should fail, but not crash.
@@ -249,13 +276,13 @@
       dex_location.c_str(), kRuntimeISA, /*load_executable=*/ false);
 
   EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
-            oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+            GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
   EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
-            oat_file_assistant.GetDexOptNeeded(CompilerFilter::kQuicken));
+            GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kQuicken));
   EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
-            oat_file_assistant.GetDexOptNeeded(CompilerFilter::kExtract));
+            GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kExtract));
   EXPECT_EQ(-OatFileAssistant::kDex2OatForFilter,
-            oat_file_assistant.GetDexOptNeeded(CompilerFilter::kEverything));
+            GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kEverything));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
   EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OdexFileStatus());
@@ -281,13 +308,13 @@
       dex_location.c_str(), kRuntimeISA, /*load_executable=*/ false);
 
   EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
-            oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+            GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
   EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
-            oat_file_assistant.GetDexOptNeeded(CompilerFilter::kQuicken));
+            GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kQuicken));
   EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
-            oat_file_assistant.GetDexOptNeeded(CompilerFilter::kExtract));
+            GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kExtract));
   EXPECT_EQ(-OatFileAssistant::kDex2OatForFilter,
-            oat_file_assistant.GetDexOptNeeded(CompilerFilter::kEverything));
+            GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kEverything));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
   EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OdexFileStatus());
@@ -316,13 +343,13 @@
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
 
   EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
   EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kQuicken));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kQuicken));
   EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kExtract));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kExtract));
   EXPECT_EQ(-OatFileAssistant::kDex2OatForFilter,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kEverything));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kEverything));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
   EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OdexFileStatus());
@@ -350,13 +377,13 @@
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
 
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kQuicken));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kQuicken));
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kExtract));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kExtract));
   EXPECT_EQ(OatFileAssistant::kDex2OatForFilter,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kEverything));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kEverything));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
   EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
@@ -391,13 +418,13 @@
                                       odex_fd.get(),
                                       zip_fd.get());
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kQuicken));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kQuicken));
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kExtract));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kExtract));
   EXPECT_EQ(-OatFileAssistant::kDex2OatForFilter,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kEverything));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kEverything));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
   EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OdexFileStatus());
@@ -429,9 +456,9 @@
                                       /* oat_fd= */ -1,
                                       zip_fd.get());
   EXPECT_EQ(-OatFileAssistant::kDex2OatForBootImage,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
   EXPECT_EQ(-OatFileAssistant::kDex2OatForBootImage,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kEverything));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kEverything));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
   EXPECT_EQ(OatFileAssistant::kOatBootImageOutOfDate, oat_file_assistant.OdexFileStatus());
@@ -463,7 +490,7 @@
                                       zip_fd.get());
 
   EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
   EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
   EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OatFileStatus());
@@ -486,7 +513,7 @@
                                       /* oat_fd= */ -1,
                                       zip_fd);
   EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
   EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
   EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OatFileStatus());
 }
@@ -512,7 +539,7 @@
   // depends on the boot image and is out of date with respect to the boot
   // image.
   EXPECT_EQ(-OatFileAssistant::kDex2OatForBootImage,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
 
   // Make sure we don't crash in this case when we dump the status. We don't
   // care what the actual dumped value is.
@@ -533,7 +560,7 @@
 
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
   EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
 }
 
 // Case: We have a DEX file and up-to-date (OAT) VDEX file for it, but no OAT
@@ -565,7 +592,7 @@
   // depends on the boot image and is out of date with respect to the boot
   // image.
   EXPECT_EQ(OatFileAssistant::kDex2OatForBootImage,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
 }
 
 // Case: We have a DEX file and speed-profile OAT file for it.
@@ -588,13 +615,13 @@
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
 
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeedProfile, false));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeedProfile, false));
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kQuicken, false));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kQuicken, false));
   EXPECT_EQ(OatFileAssistant::kDex2OatForFilter,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeedProfile, true));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeedProfile, true));
   EXPECT_EQ(OatFileAssistant::kDex2OatForFilter,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kQuicken, true));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kQuicken, true));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
   EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
@@ -620,7 +647,7 @@
 
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed, false));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
   EXPECT_TRUE(oat_file_assistant.HasOriginalDexFiles());
 
   // Verify we can load both dex files.
@@ -656,7 +683,7 @@
 
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
   EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed, false));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
   EXPECT_TRUE(oat_file_assistant.HasOriginalDexFiles());
 }
 
@@ -713,9 +740,9 @@
 
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
   EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kExtract));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kExtract));
   EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
   EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
@@ -737,7 +764,7 @@
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
 
   EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
 }
 
 // Case: We have a MultiDEX (ODEX) VDEX file where the non-main multidex entry
@@ -754,7 +781,7 @@
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
 
   EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
 }
 
 // Case: We have a DEX file and an OAT file out of date with respect to the
@@ -778,11 +805,11 @@
 
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
   EXPECT_EQ(OatFileAssistant::kDex2OatForBootImage,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kExtract));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kExtract));
   EXPECT_EQ(OatFileAssistant::kDex2OatForBootImage,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kQuicken));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kQuicken));
   EXPECT_EQ(OatFileAssistant::kDex2OatForBootImage,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
   EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
@@ -813,9 +840,9 @@
 
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kExtract));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kExtract));
   EXPECT_EQ(OatFileAssistant::kDex2OatForFilter,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kQuicken));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kQuicken));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
   EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
@@ -836,9 +863,9 @@
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
 
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kExtract));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kExtract));
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
   EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OdexFileStatus());
@@ -866,7 +893,7 @@
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
 
   EXPECT_EQ(-OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
   EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OdexFileStatus());
@@ -902,11 +929,11 @@
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
 
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kExtract));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kExtract));
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
   EXPECT_EQ(-OatFileAssistant::kDex2OatForFilter,  // Compiling from the .vdex file
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kEverything));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kEverything));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
   EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OdexFileStatus());
@@ -933,11 +960,11 @@
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
 
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kExtract));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kExtract));
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kQuicken));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kQuicken));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
   EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
@@ -945,7 +972,7 @@
   EXPECT_FALSE(oat_file_assistant.HasOriginalDexFiles());
 
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
   EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
@@ -968,7 +995,7 @@
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
 
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
-            oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+            GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
   EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OdexFileStatus());
@@ -998,9 +1025,9 @@
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
 
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kExtract));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kExtract));
   EXPECT_EQ(-OatFileAssistant::kDex2OatForFilter,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
   EXPECT_EQ(OatFileAssistant::kOatUpToDate, oat_file_assistant.OdexFileStatus());
@@ -1140,7 +1167,7 @@
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
   EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
   EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
   EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OatFileStatus());
 }
@@ -1154,7 +1181,7 @@
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
   EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
   EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
   EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OatFileStatus());
   EXPECT_FALSE(oat_file_assistant.HasOriginalDexFiles());
@@ -1169,7 +1196,7 @@
   OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
 
   EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
-      oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
+      GetDexOptNeeded(&oat_file_assistant, CompilerFilter::kSpeed));
 
   EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
   EXPECT_EQ(OatFileAssistant::kOatCannotOpen, oat_file_assistant.OdexFileStatus());
@@ -1359,29 +1386,50 @@
 
 TEST_F(OatFileAssistantTest, GetDexOptNeededWithOutOfDateContext) {
   std::string dex_location = GetScratchDir() + "/TestDex.jar";
+  std::string odex_location = GetOdexDir() + "/TestDex.odex";
+
   std::string context_location = GetScratchDir() + "/ContextDex.jar";
   Copy(GetDexSrc1(), dex_location);
   Copy(GetDexSrc2(), context_location);
 
-  OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
-
-  std::string error_msg;
   std::string context_str = "PCL[" + context_location + "]";
+
   std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create(context_str);
   ASSERT_TRUE(context != nullptr);
   ASSERT_TRUE(context->OpenDexFiles(kRuntimeISA, ""));
 
+  std::string error_msg;
+  std::vector<std::string> args;
+  args.push_back("--dex-file=" + dex_location);
+  args.push_back("--oat-file=" + odex_location);
+  args.push_back("--class-loader-context=" + context_str);
+  ASSERT_TRUE(Dex2Oat(args, &error_msg)) << error_msg;
+
   // Update the context by overriding the jar file.
   Copy(GetMultiDexSrc2(), context_location);
-  std::unique_ptr<ClassLoaderContext> updated_context = ClassLoaderContext::Create(context_str);
-  ASSERT_TRUE(updated_context != nullptr);
-  // DexOptNeeded should advise compilation from scratch.
-  EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
-            oat_file_assistant.GetDexOptNeeded(
-                  CompilerFilter::kDefaultCompilerFilter,
-                  /* profile_changed= */ false,
-                  /* downgrade= */ false,
-                  updated_context.get()));
+
+  {
+    std::unique_ptr<ClassLoaderContext> updated_context = ClassLoaderContext::Create(context_str);
+    ASSERT_TRUE(updated_context != nullptr);
+    OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+    // DexOptNeeded should advise compilation from scratch when the context changes.
+    EXPECT_EQ(OatFileAssistant::kDex2OatFromScratch,
+              GetDexOptNeeded(&oat_file_assistant,
+                    CompilerFilter::kDefaultCompilerFilter,
+                    updated_context));
+  }
+  {
+    std::unique_ptr<ClassLoaderContext> updated_context = ClassLoaderContext::Create(context_str);
+    ASSERT_TRUE(updated_context != nullptr);
+    OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+    // Now check that DexOptNeeded does not advise compilation if we only extracted the file.
+    args.push_back("--compiler-filter=extract");
+    ASSERT_TRUE(Dex2Oat(args, &error_msg)) << error_msg;
+    EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
+              GetDexOptNeeded(&oat_file_assistant,
+                    CompilerFilter::kExtract,
+                    updated_context));
+  }
 }
 
 // Test that GetLocation of a dex file is the same whether the dex
@@ -1406,7 +1454,7 @@
       /*dex_elements=*/nullptr,
       &oat_file,
       &error_msgs);
-  EXPECT_EQ(dex_files.size(), 1u);
+  ASSERT_EQ(dex_files.size(), 1u) << android::base::Join(error_msgs, "\n");
   EXPECT_EQ(oat_file, nullptr);
   std::string stored_dex_location = dex_files[0]->GetLocation();
   {
@@ -1424,8 +1472,8 @@
       /*dex_elements=*/nullptr,
       &oat_file,
       &error_msgs);
-  EXPECT_EQ(dex_files.size(), 1u);
-  EXPECT_NE(oat_file, nullptr);
+  ASSERT_EQ(dex_files.size(), 1u) << android::base::Join(error_msgs, "\n");
+  ASSERT_NE(oat_file, nullptr);
   std::string oat_stored_dex_location = dex_files[0]->GetLocation();
   EXPECT_EQ(oat_stored_dex_location, stored_dex_location);
 }
@@ -1460,7 +1508,7 @@
       /*dex_elements=*/nullptr,
       &oat_file,
       &error_msgs);
-  EXPECT_EQ(dex_files_first.size(), 1u);
+  ASSERT_EQ(dex_files_first.size(), 1u) << android::base::Join(error_msgs, "\n");
   EXPECT_EQ(oat_file, nullptr) << dex_location;
   EXPECT_EQ(dex_files_first[0]->GetOatDexFile(), nullptr);
 
@@ -1488,8 +1536,8 @@
       /*dex_elements=*/nullptr,
       &oat_file,
       &error_msgs);
-  EXPECT_EQ(dex_files_second.size(), 1u);
-  EXPECT_NE(oat_file, nullptr);
+  ASSERT_EQ(dex_files_second.size(), 1u) << android::base::Join(error_msgs, "\n");
+  ASSERT_NE(oat_file, nullptr);
   EXPECT_NE(dex_files_second[0]->GetOatDexFile(), nullptr);
   EXPECT_NE(dex_files_second[0]->GetOatDexFile()->GetOatFile(), nullptr);
 
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index ed55110..118dd01 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -393,6 +393,12 @@
     return CheckCollisionResult::kSkippedUnsupportedClassLoader;
   }
 
+  if (!CompilerFilter::IsVerificationEnabled(oat_file->GetCompilerFilter())) {
+    // If verification is not enabled we don't need to check for collisions as the oat file
+    // is either extracted or assumed verified.
+    return CheckCollisionResult::kSkippedVerificationDisabled;
+  }
+
   // If the oat file loading context matches the context used during compilation then we accept
   // the oat file without addition checks
   ClassLoaderContext::VerificationResult result = context->VerifyClassLoaderContextMatch(
@@ -406,7 +412,7 @@
       // Mismatched context, do the actual collision check.
       break;
     case ClassLoaderContext::VerificationResult::kVerifies:
-      return CheckCollisionResult::kNoCollisions;
+      return CheckCollisionResult::kClassLoaderContextMatches;
   }
 
   // The class loader context does not match. Perform a full duplicate classes check.
@@ -428,7 +434,8 @@
   if (kEnableAppImage && (!runtime->IsJavaDebuggable() || source_oat_file->IsDebuggable())) {
     // If we verified the class loader context (skipping due to the special marker doesn't
     // count), then also avoid the collision check.
-    bool load_image = check_collision_result == CheckCollisionResult::kNoCollisions;
+    bool load_image = check_collision_result == CheckCollisionResult::kNoCollisions
+        || check_collision_result == CheckCollisionResult::kClassLoaderContextMatches;
     // If we skipped the collision check, we need to reverify to be sure its OK to load the
     // image.
     if (!load_image &&
@@ -473,7 +480,7 @@
 
   OatFileAssistant oat_file_assistant(dex_location,
                                       kRuntimeISA,
-                                      !runtime->IsAotCompiler(),
+                                      runtime->GetOatFilesExecutable(),
                                       only_use_system_oat_files_);
 
   // Get the oat file on disk.
@@ -571,8 +578,6 @@
             ScopedTrace trace2(StringPrintf("Adding image space for location %s", dex_location));
             added_image_space = runtime->GetClassLinker()->AddImageSpace(image_space.get(),
                                                                          h_loader,
-                                                                         dex_elements,
-                                                                         dex_location,
                                                                          /*out*/&dex_files,
                                                                          /*out*/&temp_error_msg);
           }
@@ -641,17 +646,52 @@
         error_msgs->push_back("Fallback mode disabled, skipping dex files.");
       }
     } else {
-      error_msgs->push_back("No original dex files found for dex location "
-          + std::string(dex_location));
+      std::string msg = StringPrintf("No original dex files found for dex location (%s) %s",
+          GetInstructionSetString(kRuntimeISA), dex_location);
+      error_msgs->push_back(msg);
     }
   }
 
   if (Runtime::Current()->GetJit() != nullptr) {
-    ScopedObjectAccess soa(self);
-    Runtime::Current()->GetJit()->RegisterDexFiles(
-        dex_files, soa.Decode<mirror::ClassLoader>(class_loader));
+    Runtime::Current()->GetJit()->RegisterDexFiles(dex_files, class_loader);
   }
 
+  // Verify if any of the dex files being loaded is already in the class path.
+  // If so, report an error with the current stack trace.
+  // Most likely the developer didn't intend to do this because it will waste
+  // performance and memory.
+  // We perform the check only if the class loader context match failed or did
+  // not run (e.g. not that we do not run the check if we don't have an oat file).
+  if (context != nullptr
+          && check_collision_result != CheckCollisionResult::kClassLoaderContextMatches) {
+    std::set<const DexFile*> already_exists_in_classpath =
+        context->CheckForDuplicateDexFiles(MakeNonOwningPointerVector(dex_files));
+    if (!already_exists_in_classpath.empty()) {
+      auto duplicate_it = already_exists_in_classpath.begin();
+      std::string duplicates = (*duplicate_it)->GetLocation();
+      for (duplicate_it++ ; duplicate_it != already_exists_in_classpath.end(); duplicate_it++) {
+        duplicates += "," + (*duplicate_it)->GetLocation();
+      }
+
+      std::ostringstream out;
+      out << "Trying to load dex files which is already loaded in the same ClassLoader hierarchy.\n"
+        << "This is a strong indication of bad ClassLoader construct which leads to poor "
+        << "performance and wastes memory.\n"
+        << "The list of duplicate dex files is: " << duplicates << "\n"
+        << "The current class loader context is: " << context->EncodeContextForOatFile("") << "\n"
+        << "Java stack trace:\n";
+
+      {
+        ScopedObjectAccess soa(self);
+        self->DumpJavaStack(out);
+      }
+
+      // We log this as an ERROR to stress the fact that this is most likely unintended.
+      // Note that ART cannot do anything about it. It is up to the app to fix their logic.
+      // Here we are trying to give a heads up on why the app might have performance issues.
+      LOG(ERROR) << out.str();
+    }
+  }
   return dex_files;
 }
 
@@ -1034,21 +1074,20 @@
   }
 }
 
-void OatFileManager::SetOnlyUseSystemOatFiles(bool enforce, bool assert_no_files_loaded) {
+void OatFileManager::SetOnlyUseSystemOatFiles() {
   ReaderMutexLock mu(Thread::Current(), *Locks::oat_file_manager_lock_);
-  if (!only_use_system_oat_files_ && enforce && assert_no_files_loaded) {
-    // Make sure all files that were loaded up to this point are on /system. Skip the image
-    // files.
-    std::vector<const OatFile*> boot_vector = GetBootOatFiles();
-    std::unordered_set<const OatFile*> boot_set(boot_vector.begin(), boot_vector.end());
+  // Make sure all files that were loaded up to this point are on /system.
+  // Skip the image files as they can encode locations that don't exist (eg not
+  // containing the arch in the path, or for JIT zygote /nonx/existent).
+  std::vector<const OatFile*> boot_vector = GetBootOatFiles();
+  std::unordered_set<const OatFile*> boot_set(boot_vector.begin(), boot_vector.end());
 
-    for (const std::unique_ptr<const OatFile>& oat_file : oat_files_) {
-      if (boot_set.find(oat_file.get()) == boot_set.end()) {
-        CHECK(LocationIsOnSystem(oat_file->GetLocation().c_str())) << oat_file->GetLocation();
-      }
+  for (const std::unique_ptr<const OatFile>& oat_file : oat_files_) {
+    if (boot_set.find(oat_file.get()) == boot_set.end()) {
+      CHECK(LocationIsOnSystem(oat_file->GetLocation().c_str())) << oat_file->GetLocation();
     }
   }
-  only_use_system_oat_files_ = enforce;
+  only_use_system_oat_files_ = true;
 }
 
 void OatFileManager::DumpForSigQuit(std::ostream& os) {
diff --git a/runtime/oat_file_manager.h b/runtime/oat_file_manager.h
index d09b6d6..9c3a38a 100644
--- a/runtime/oat_file_manager.h
+++ b/runtime/oat_file_manager.h
@@ -120,7 +120,7 @@
 
   void DumpForSigQuit(std::ostream& os);
 
-  void SetOnlyUseSystemOatFiles(bool enforce, bool assert_no_files_loaded);
+  void SetOnlyUseSystemOatFiles();
 
   // Spawn a background thread which verifies all classes in the given dex files.
   void RunBackgroundVerification(const std::vector<const DexFile*>& dex_files,
@@ -144,8 +144,10 @@
   enum class CheckCollisionResult {
     kSkippedUnsupportedClassLoader,
     kSkippedClassLoaderContextSharedLibrary,
+    kSkippedVerificationDisabled,
     kNoCollisions,
     kPerformedHasCollisions,
+    kClassLoaderContextMatches
   };
 
   std::vector<std::unique_ptr<const DexFile>> OpenDexFilesFromOat_Impl(
diff --git a/runtime/oat_file_test.cc b/runtime/oat_file_test.cc
index 7a122ba..8222a8aa 100644
--- a/runtime/oat_file_test.cc
+++ b/runtime/oat_file_test.cc
@@ -30,107 +30,6 @@
 class OatFileTest : public DexoptTest {
 };
 
-TEST_F(OatFileTest, ResolveRelativeEncodedDexLocation_NullAbsLocation) {
-  std::string dex_location;
-  std::string dex_file_name;
-  OatFile::ResolveRelativeEncodedDexLocation(nullptr,
-                                             "/data/app/foo/base.apk",
-                                             &dex_location,
-                                             &dex_file_name);
-  ASSERT_EQ("/data/app/foo/base.apk", dex_file_name);
-  ASSERT_EQ("/data/app/foo/base.apk", dex_location);
-}
-
-TEST_F(OatFileTest, ResolveRelativeEncodedDexLocation_NullAbsLocation_Multidex) {
-  std::string dex_location;
-  std::string dex_file_name;
-  OatFile::ResolveRelativeEncodedDexLocation(nullptr,
-                                             "/data/app/foo/base.apk!classes2.dex",
-                                             &dex_location,
-                                             &dex_file_name);
-  ASSERT_EQ("/data/app/foo/base.apk!classes2.dex", dex_file_name);
-  ASSERT_EQ("/data/app/foo/base.apk!classes2.dex", dex_location);
-}
-
-TEST_F(OatFileTest, ResolveRelativeEncodedDexLocation_RelLocationAbsolute) {
-  std::string dex_location;
-  std::string dex_file_name;
-  OatFile::ResolveRelativeEncodedDexLocation("base.apk",
-                                             "/system/framework/base.apk",
-                                             &dex_location,
-                                             &dex_file_name);
-  ASSERT_EQ(kIsTargetBuild ? "/system/framework/base.apk" : "base.apk", dex_file_name);
-  ASSERT_EQ("/system/framework/base.apk", dex_location);
-}
-
-TEST_F(OatFileTest, ResolveRelativeEncodedDexLocation_BothAbsoluteLocations) {
-  std::string dex_location;
-  std::string dex_file_name;
-  OatFile::ResolveRelativeEncodedDexLocation("/data/app/foo/base.apk",
-                                             "/system/framework/base.apk",
-                                             &dex_location,
-                                             &dex_file_name);
-  ASSERT_EQ(kIsTargetBuild ? "/system/framework/base.apk" : "/data/app/foo/base.apk",
-            dex_file_name);
-  ASSERT_EQ("/system/framework/base.apk", dex_location);
-}
-
-TEST_F(OatFileTest, ResolveRelativeEncodedDexLocation_RelSuffixOfAbsLocation1) {
-  std::string dex_location;
-  std::string dex_file_name;
-  OatFile::ResolveRelativeEncodedDexLocation("/data/app/foo/base.apk",
-                                             "base.apk",
-                                             &dex_location,
-                                             &dex_file_name);
-  ASSERT_EQ("/data/app/foo/base.apk", dex_file_name);
-  ASSERT_EQ("/data/app/foo/base.apk", dex_location);
-}
-
-TEST_F(OatFileTest, ResolveRelativeEncodedDexLocation_RelSuffixOfAbsLocation2) {
-  std::string dex_location;
-  std::string dex_file_name;
-  OatFile::ResolveRelativeEncodedDexLocation("/data/app/foo/base.apk",
-                                             "foo/base.apk",
-                                             &dex_location,
-                                             &dex_file_name);
-  ASSERT_EQ("/data/app/foo/base.apk", dex_file_name);
-  ASSERT_EQ("/data/app/foo/base.apk", dex_location);
-}
-
-TEST_F(OatFileTest, ResolveRelativeEncodedDexLocation_RelSuffixOfAbsLocation_Multidex) {
-  std::string dex_location;
-  std::string dex_file_name;
-  OatFile::ResolveRelativeEncodedDexLocation("/data/app/foo/base.apk",
-                                             "base.apk!classes11.dex",
-                                             &dex_location,
-                                             &dex_file_name);
-  ASSERT_EQ("/data/app/foo/base.apk!classes11.dex", dex_file_name);
-  ASSERT_EQ("/data/app/foo/base.apk!classes11.dex", dex_location);
-}
-
-TEST_F(OatFileTest, ResolveRelativeEncodedDexLocation_RelNotSuffixOfAbsLocation1) {
-  std::string dex_location;
-  std::string dex_file_name;
-  OatFile::ResolveRelativeEncodedDexLocation("/data/app/foo/sludge.apk",
-                                             "base.apk!classes2.dex",
-                                             &dex_location,
-                                             &dex_file_name);
-  ASSERT_EQ(kIsTargetBuild ? "base.apk!classes2.dex" : "/data/app/foo/sludge.apk!classes2.dex",
-            dex_file_name);
-  ASSERT_EQ("base.apk!classes2.dex", dex_location);
-}
-
-TEST_F(OatFileTest, ResolveRelativeEncodedDexLocation_RelNotSuffixOfAbsLocation2) {
-  std::string dex_location;
-  std::string dex_file_name;
-  OatFile::ResolveRelativeEncodedDexLocation("/data/app/foo/sludge.apk",
-                                             "o/base.apk",
-                                             &dex_location,
-                                             &dex_file_name);
-  ASSERT_EQ(kIsTargetBuild ? "o/base.apk" : "/data/app/foo/sludge.apk", dex_file_name);
-  ASSERT_EQ("o/base.apk", dex_location);
-}
-
 TEST_F(OatFileTest, LoadOat) {
   std::string dex_location = GetScratchDir() + "/LoadOat.jar";
 
@@ -146,8 +45,7 @@
                                                    oat_location.c_str(),
                                                    /*executable=*/ false,
                                                    /*low_4gb=*/ false,
-                                                   dex_location.c_str(),
-                                                   /*reservation=*/ nullptr,
+                                                   dex_location,
                                                    &error_msg));
   ASSERT_TRUE(odex_file.get() != nullptr);
 
@@ -156,9 +54,9 @@
 }
 
 TEST_F(OatFileTest, ChangingMultiDexUncompressed) {
-  std::string dex_location = GetScratchDir() + "/MultiDexUncompressed.jar";
+  std::string dex_location = GetScratchDir() + "/MultiDexUncompressedAligned.jar";
 
-  Copy(GetTestDexFileName("MultiDexUncompressed"), dex_location);
+  Copy(GetTestDexFileName("MultiDexUncompressedAligned"), dex_location);
   GenerateOatForTest(dex_location.c_str(), CompilerFilter::kQuicken);
 
   std::string oat_location;
@@ -173,15 +71,14 @@
                                                      oat_location.c_str(),
                                                      /*executable=*/ false,
                                                      /*low_4gb=*/ false,
-                                                     dex_location.c_str(),
-                                                     /*reservation=*/ nullptr,
+                                                     dex_location,
                                                      &error_msg));
     ASSERT_TRUE(odex_file != nullptr);
     ASSERT_EQ(2u, odex_file->GetOatDexFiles().size());
   }
 
   // Now replace the source.
-  Copy(GetTestDexFileName("MainUncompressed"), dex_location);
+  Copy(GetTestDexFileName("MainUncompressedAligned"), dex_location);
 
   // And try to load again.
   std::unique_ptr<OatFile> odex_file(OatFile::Open(/*zip_fd=*/ -1,
@@ -189,8 +86,7 @@
                                                    oat_location,
                                                    /*executable=*/ false,
                                                    /*low_4gb=*/ false,
-                                                   dex_location.c_str(),
-                                                   /*reservation=*/ nullptr,
+                                                   dex_location,
                                                    &error_msg));
   EXPECT_TRUE(odex_file == nullptr);
   EXPECT_NE(std::string::npos, error_msg.find("expected 2 uncompressed dex files, but found 1"))
diff --git a/runtime/oat_quick_method_header.cc b/runtime/oat_quick_method_header.cc
index 3ed2a91..ebb868b 100644
--- a/runtime/oat_quick_method_header.cc
+++ b/runtime/oat_quick_method_header.cc
@@ -18,29 +18,34 @@
 
 #include "art_method.h"
 #include "dex/dex_file_types.h"
+#include "interpreter/interpreter_mterp_impl.h"
+#include "interpreter/mterp/mterp.h"
+#include "nterp_helpers.h"
 #include "scoped_thread_state_change-inl.h"
 #include "stack_map.h"
 #include "thread.h"
 
 namespace art {
 
-uint32_t OatQuickMethodHeader::ToDexPc(ArtMethod* method,
+uint32_t OatQuickMethodHeader::ToDexPc(ArtMethod** frame,
                                        const uintptr_t pc,
                                        bool abort_on_failure) const {
+  ArtMethod* method = *frame;
   const void* entry_point = GetEntryPoint();
   uint32_t sought_offset = pc - reinterpret_cast<uintptr_t>(entry_point);
   if (method->IsNative()) {
     return dex::kDexNoIndex;
+  } else if (IsNterpMethodHeader()) {
+    return NterpGetDexPC(frame);
   } else {
     DCHECK(IsOptimized());
-    CodeInfo code_info(this, CodeInfo::DecodeFlags::InlineInfoOnly);
+    CodeInfo code_info = CodeInfo::DecodeInlineInfoOnly(this);
     StackMap stack_map = code_info.GetStackMapForNativePcOffset(sought_offset);
     if (stack_map.IsValid()) {
       return stack_map.GetDexPc();
     }
   }
   if (abort_on_failure) {
-    ScopedObjectAccess soa(Thread::Current());
     LOG(FATAL) << "Failed to find Dex offset for PC offset "
            << reinterpret_cast<void*>(sought_offset)
            << "(PC " << reinterpret_cast<void*>(pc) << ", entry_point=" << entry_point
@@ -56,9 +61,14 @@
                                                 bool abort_on_failure) const {
   const void* entry_point = GetEntryPoint();
   DCHECK(!method->IsNative());
+  if (IsNterpMethodHeader()) {
+    // This should only be called on an nterp frame for getting a catch handler.
+    CHECK(is_for_catch_handler);
+    return NterpGetCatchHandler();
+  }
   DCHECK(IsOptimized());
   // Search for the dex-to-pc mapping in stack maps.
-  CodeInfo code_info(this, CodeInfo::DecodeFlags::InlineInfoOnly);
+  CodeInfo code_info = CodeInfo::DecodeInlineInfoOnly(this);
 
   // All stack maps are stored in the same CodeItem section, safepoint stack
   // maps first, then catch stack maps. We use `is_for_catch_handler` to select
@@ -78,4 +88,15 @@
   return UINTPTR_MAX;
 }
 
+OatQuickMethodHeader* OatQuickMethodHeader::NterpMethodHeader =
+    (interpreter::IsNterpSupported()
+        ? reinterpret_cast<OatQuickMethodHeader*>(
+              reinterpret_cast<uintptr_t>(interpreter::GetNterpEntryPoint()) -
+                  sizeof(OatQuickMethodHeader))
+        : nullptr);
+
+bool OatQuickMethodHeader::IsNterpMethodHeader() const {
+  return interpreter::IsNterpSupported() ? (this == NterpMethodHeader) : false;
+}
+
 }  // namespace art
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index 8798c69..9a1133e 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -18,6 +18,7 @@
 #define ART_RUNTIME_OAT_QUICK_METHOD_HEADER_H_
 
 #include "arch/instruction_set.h"
+#include "base/locks.h"
 #include "base/macros.h"
 #include "base/utils.h"
 #include "quick/quick_method_frame_info.h"
@@ -37,6 +38,10 @@
         code_size_(code_size) {
   }
 
+  static OatQuickMethodHeader* NterpMethodHeader;
+
+  bool IsNterpMethodHeader() const;
+
   static OatQuickMethodHeader* FromCodePointer(const void* code_ptr) {
     uintptr_t code = reinterpret_cast<uintptr_t>(code_ptr);
     uintptr_t header = code - OFFSETOF_MEMBER(OatQuickMethodHeader, code_);
@@ -50,6 +55,10 @@
     return FromCodePointer(EntryPointToCodePointer(entry_point));
   }
 
+  static size_t InstructionAlignedSize() {
+    return RoundUp(sizeof(OatQuickMethodHeader), GetInstructionSetAlignment(kRuntimeISA));
+  }
+
   OatQuickMethodHeader(const OatQuickMethodHeader&) = default;
   OatQuickMethodHeader& operator=(const OatQuickMethodHeader&) = default;
 
@@ -76,6 +85,10 @@
   }
 
   uint32_t GetCodeSize() const {
+    // ART compiled method are prefixed with header, but we can also easily
+    // accidentally use a function pointer to one of the stubs/trampolines.
+    // We prefix those with 0xFF in the aseembly so that we can do DCHECKs.
+    CHECK_NE(code_size_, 0xFFFFFFFF) << code_size_;
     return code_size_ & kCodeSizeMask;
   }
 
@@ -101,7 +114,8 @@
   }
 
   bool Contains(uintptr_t pc) const {
-    uintptr_t code_start = reinterpret_cast<uintptr_t>(code_);
+    // Remove hwasan tag to make comparison below valid. The PC from the stack does not have it.
+    uintptr_t code_start = reinterpret_cast<uintptr_t>(HWASanUntag(code_));
     static_assert(kRuntimeISA != InstructionSet::kThumb2, "kThumb2 cannot be a runtime ISA");
     if (kRuntimeISA == InstructionSet::kArm) {
       // On Thumb-2, the pc is offset by one.
@@ -140,7 +154,10 @@
                             bool is_for_catch_handler,
                             bool abort_on_failure = true) const;
 
-  uint32_t ToDexPc(ArtMethod* method, const uintptr_t pc, bool abort_on_failure = true) const;
+  uint32_t ToDexPc(ArtMethod** frame,
+                   const uintptr_t pc,
+                   bool abort_on_failure = true) const
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   void SetHasShouldDeoptimizeFlag() {
     DCHECK_EQ(code_size_ & kShouldDeoptimizeMask, 0u);
diff --git a/runtime/offsets.h b/runtime/offsets.h
index 6d1a8e0..2f36fe6 100644
--- a/runtime/offsets.h
+++ b/runtime/offsets.h
@@ -37,6 +37,9 @@
   constexpr size_t SizeValue() const {
     return val_;
   }
+  constexpr bool operator==(Offset o) const {
+    return SizeValue() == o.SizeValue();
+  }
 
  protected:
   size_t val_;
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 7117e93..c62caa9 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -27,6 +27,7 @@
 #include "base/utils.h"
 #include "debugger.h"
 #include "gc/heap.h"
+#include "jni_id_type.h"
 #include "monitor.h"
 #include "runtime.h"
 #include "ti/agent.h"
@@ -82,6 +83,8 @@
   parser_builder->
        Define("-Xzygote")
           .IntoKey(M::Zygote)
+      .Define("-Xprimaryzygote")
+          .IntoKey(M::PrimaryZygote)
       .Define("-help")
           .IntoKey(M::Help)
       .Define("-showversion")
@@ -110,7 +113,7 @@
       .Define("-XjdwpProvider:_")
           .WithType<JdwpProvider>()
           .IntoKey(M::JdwpProvider)
-      .Define({"-Xrunjdwp:_", "-agentlib:jdwp=_", "-XjdwpOptions:_"})
+      .Define("-XjdwpOptions:_")
           .WithType<std::string>()
           .IntoKey(M::JdwpOptions)
       // TODO Re-enable -agentlib: once I have a good way to transform the values.
@@ -138,6 +141,9 @@
       .Define("-XX:NonMovingSpaceCapacity=_")
           .WithType<MemoryKiB>()
           .IntoKey(M::NonMovingSpaceCapacity)
+      .Define("-XX:StopForNativeAllocs=_")
+          .WithType<MemoryKiB>()
+          .IntoKey(M::StopForNativeAllocs)
       .Define("-XX:HeapTargetUtilization=_")
           .WithType<double>().WithRange(0.1, 0.9)
           .IntoKey(M::HeapTargetUtilization)
@@ -195,6 +201,10 @@
           .WithType<bool>()
           .WithValueMap({{"false", false}, {"true", true}})
           .IntoKey(M::UseJitCompilation)
+      .Define("-Xusetieredjit:_")
+          .WithType<bool>()
+          .WithValueMap({{"false", false}, {"true", true}})
+          .IntoKey(M::UseTieredJitCompilation)
       .Define("-Xjitinitialsize:_")
           .WithType<MemoryKiB>()
           .IntoKey(M::JITCodeCacheInitialCapacity)
@@ -362,6 +372,27 @@
           .WithType<bool>()
           .WithValueMap({{"false", false}, {"true", true}})
           .IntoKey(M::FastClassNotFoundException)
+      .Define("-Xopaque-jni-ids:_")
+          .WithType<JniIdType>()
+          .WithValueMap({{"true", JniIdType::kIndices},
+                         {"false", JniIdType::kPointer},
+                         {"swapable", JniIdType::kSwapablePointer},
+                         {"pointer", JniIdType::kPointer},
+                         {"indices", JniIdType::kIndices},
+                         {"default", JniIdType::kDefault}})
+          .IntoKey(M::OpaqueJniIds)
+      .Define("-Xauto-promote-opaque-jni-ids:_")
+          .WithType<bool>()
+          .WithValueMap({{"true", true}, {"false", false}})
+          .IntoKey(M::AutoPromoteOpaqueJniIds)
+      .Define("-XX:VerifierMissingKThrowFatal=_")
+          .WithType<bool>()
+          .WithValueMap({{"false", false}, {"true", true}})
+          .IntoKey(M::VerifierMissingKThrowFatal)
+      .Define("-XX:PerfettoHprof=_")
+          .WithType<bool>()
+          .WithValueMap({{"false", false}, {"true", true}})
+          .IntoKey(M::PerfettoHprof)
       .Ignore({
           "-ea", "-da", "-enableassertions", "-disableassertions", "--runtime-arg", "-esa",
           "-dsa", "-enablesystemassertions", "-disablesystemassertions", "-Xrs", "-Xint:_",
@@ -478,6 +509,7 @@
   //  gLogVerbosity.deopt = true;  // TODO: don't check this in!
   //  gLogVerbosity.gc = true;  // TODO: don't check this in!
   //  gLogVerbosity.heap = true;  // TODO: don't check this in!
+  //  gLogVerbosity.interpreter = true;  // TODO: don't check this in!
   //  gLogVerbosity.jdwp = true;  // TODO: don't check this in!
   //  gLogVerbosity.jit = true;  // TODO: don't check this in!
   //  gLogVerbosity.jni = true;  // TODO: don't check this in!
@@ -577,7 +609,7 @@
 
   MaybeOverrideVerbosity();
 
-  SetRuntimeDebugFlagsEnabled(args.Get(M::SlowDebug));
+  SetRuntimeDebugFlagsEnabled(args.GetOrDefault(M::SlowDebug));
 
   // -Xprofile:
   Trace::SetDefaultClockSource(args.GetOrDefault(M::ProfileClock));
@@ -588,7 +620,6 @@
 
   {
     // If not set, background collector type defaults to homogeneous compaction.
-    // If foreground is GSS, use GSS as background collector.
     // If not low memory mode, semispace otherwise.
 
     gc::CollectorType background_collector_type_;
@@ -604,12 +635,8 @@
     }
 
     if (background_collector_type_ == gc::kCollectorTypeNone) {
-      if (collector_type_ != gc::kCollectorTypeGSS) {
-        background_collector_type_ = low_memory_mode_ ?
-            gc::kCollectorTypeSS : gc::kCollectorTypeHomogeneousSpaceCompact;
-      } else {
-        background_collector_type_ = collector_type_;
-      }
+      background_collector_type_ = low_memory_mode_ ?
+          gc::kCollectorTypeSS : gc::kCollectorTypeHomogeneousSpaceCompact;
     }
 
     args.Set(M::BackgroundGc, BackgroundGcOption { background_collector_type_ });
@@ -683,19 +710,21 @@
   UsageMessage(stream, "The following standard options are supported:\n");
   UsageMessage(stream, "  -classpath classpath (-cp classpath)\n");
   UsageMessage(stream, "  -Dproperty=value\n");
-  UsageMessage(stream, "  -verbose:tag ('gc', 'jit', 'jni', or 'class')\n");
+  UsageMessage(stream, "  -verbose:tag[,tag...] (currently valid tags: 'agents', 'class',\n"
+                       "    'collector', 'compiler', 'deopt', 'dex', 'gc', 'heap', 'image',\n"
+                       "    'interpreter', 'jdwp', 'jit', 'jni', 'monitor', 'oat', 'profiler',\n"
+                       "    'signals', 'simulator', 'startup', 'systrace-locks',\n"
+                       "    'third-party-jni', 'threads', 'verifier', 'verifier-debug')\n");
   UsageMessage(stream, "  -showversion\n");
   UsageMessage(stream, "  -help\n");
-  UsageMessage(stream, "  -agentlib:jdwp=options\n");
   // TODO add back in once -agentlib actually does something.
   // UsageMessage(stream, "  -agentlib:library=options (Experimental feature, "
   //                      "requires -Xexperimental:agent, some features might not be supported)\n");
-  UsageMessage(stream, "  -agentpath:library_path=options (Experimental feature, "
-                       "requires -Xexperimental:agent, some features might not be supported)\n");
+  UsageMessage(stream, "  -agentpath:library_path=options (Experimental feature, requires\n"
+                       "    -Xexperimental:agent, some features might not be supported)\n");
   UsageMessage(stream, "\n");
 
   UsageMessage(stream, "The following extended options are supported:\n");
-  UsageMessage(stream, "  -Xrunjdwp:<options>\n");
   UsageMessage(stream, "  -Xbootclasspath:bootclasspath\n");
   UsageMessage(stream, "  -Xcheck:tag  (e.g. 'jni')\n");
   UsageMessage(stream, "  -XmsN (min heap, must be multiple of 1K, >= 1MB)\n");
@@ -744,11 +773,12 @@
   UsageMessage(stream, "  -XX:BackgroundGC=none\n");
   UsageMessage(stream, "  -XX:LargeObjectSpace={disabled,map,freelist}\n");
   UsageMessage(stream, "  -XX:LargeObjectThreshold=N\n");
+  UsageMessage(stream, "  -XX:StopForNativeAllocs=N\n");
   UsageMessage(stream, "  -XX:DumpNativeStackOnSigQuit=booleanvalue\n");
   UsageMessage(stream, "  -XX:MadviseRandomAccess:booleanvalue\n");
   UsageMessage(stream, "  -XX:SlowDebug={false,true}\n");
   UsageMessage(stream, "  -Xmethod-trace\n");
-  UsageMessage(stream, "  -Xmethod-trace-file:filename");
+  UsageMessage(stream, "  -Xmethod-trace-file:filename\n");
   UsageMessage(stream, "  -Xmethod-trace-file-size:integervalue\n");
   UsageMessage(stream, "  -Xps-min-save-period-ms:integervalue\n");
   UsageMessage(stream, "  -Xps-save-resolved-classes-delay-ms:integervalue\n");
@@ -778,6 +808,8 @@
                        "(Enable new and experimental agent support)\n");
   UsageMessage(stream, "  -Xexperimental:agents"
                        "(Enable new and experimental agent support)\n");
+  UsageMessage(stream, "  -Xopaque-jni-ids:{true,false,swapable}");
+  UsageMessage(stream, "(Use opauque integers for jni ids, yes, no or punt for later)\n");
   UsageMessage(stream, "\n");
 
   UsageMessage(stream, "The following previously supported Dalvik options are ignored:\n");
diff --git a/runtime/parsed_options_test.cc b/runtime/parsed_options_test.cc
index ca2a4ea..8873eb9 100644
--- a/runtime/parsed_options_test.cc
+++ b/runtime/parsed_options_test.cc
@@ -64,6 +64,7 @@
   options.push_back(std::make_pair("-Xmx4k", nullptr));
   options.push_back(std::make_pair("-Xss1m", nullptr));
   options.push_back(std::make_pair("-XX:HeapTargetUtilization=0.75", nullptr));
+  options.push_back(std::make_pair("-XX:StopForNativeAllocs=200m", nullptr));
   options.push_back(std::make_pair("-Dfoo=bar", nullptr));
   options.push_back(std::make_pair("-Dbaz=qux", nullptr));
   options.push_back(std::make_pair("-verbose:gc,class,jni", nullptr));
@@ -90,6 +91,7 @@
   EXPECT_PARSED_EQ(2048U, Opt::MemoryInitialSize);
   EXPECT_PARSED_EQ(4 * KB, Opt::MemoryMaximumSize);
   EXPECT_PARSED_EQ(1 * MB, Opt::StackSize);
+  EXPECT_PARSED_EQ(200 * MB, Opt::StopForNativeAllocs);
   EXPECT_DOUBLE_EQ(0.75, map.GetOrDefault(Opt::HeapTargetUtilization));
   EXPECT_TRUE(test_vfprintf == map.GetOrDefault(Opt::HookVfprintf));
   EXPECT_TRUE(test_exit == map.GetOrDefault(Opt::HookExit));
@@ -98,6 +100,7 @@
   EXPECT_FALSE(VLOG_IS_ON(compiler));
   EXPECT_FALSE(VLOG_IS_ON(heap));
   EXPECT_TRUE(VLOG_IS_ON(gc));
+  EXPECT_FALSE(VLOG_IS_ON(interpreter));
   EXPECT_FALSE(VLOG_IS_ON(jdwp));
   EXPECT_TRUE(VLOG_IS_ON(jni));
   EXPECT_FALSE(VLOG_IS_ON(monitor));
@@ -160,13 +163,11 @@
     EXPECT_EQ(kRuntimeISA, isa);
   }
 
-  const char* isa_strings[] = { "arm", "arm64", "x86", "x86_64", "mips", "mips64" };
+  const char* isa_strings[] = { "arm", "arm64", "x86", "x86_64" };
   InstructionSet ISAs[] = { InstructionSet::kArm,
                             InstructionSet::kArm64,
                             InstructionSet::kX86,
-                            InstructionSet::kX86_64,
-                            InstructionSet::kMips,
-                            InstructionSet::kMips64 };
+                            InstructionSet::kX86_64 };
   static_assert(arraysize(isa_strings) == arraysize(ISAs), "Need same amount.");
 
   for (size_t i = 0; i < arraysize(isa_strings); ++i) {
diff --git a/runtime/plugin.cc b/runtime/plugin.cc
index 6f1c517..6b9e008 100644
--- a/runtime/plugin.cc
+++ b/runtime/plugin.cc
@@ -19,6 +19,9 @@
 #include <dlfcn.h>
 
 #include "android-base/stringprintf.h"
+#include "base/locks.h"
+#include "base/mutex.h"
+#include "thread-current-inl.h"
 
 namespace art {
 
@@ -32,6 +35,7 @@
 }
 
 bool Plugin::Load(/*out*/std::string* error_msg) {
+  Locks::mutator_lock_->AssertNotHeld(Thread::Current());
   DCHECK(!IsLoaded());
   void* res = dlopen(library_.c_str(), RTLD_LAZY);
   if (res == nullptr) {
@@ -55,6 +59,7 @@
 }
 
 bool Plugin::Unload() {
+  Locks::mutator_lock_->AssertNotHeld(Thread::Current());
   DCHECK(IsLoaded());
   bool ret = true;
   void* handle = dlopen_handle_;
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 3bc718b..90732e1 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -33,6 +33,7 @@
 #include "mirror/class-inl.h"
 #include "mirror/class_loader.h"
 #include "mirror/throwable.h"
+#include "nterp_helpers.h"
 #include "oat_quick_method_header.h"
 #include "stack.h"
 #include "stack_map.h"
@@ -52,7 +53,6 @@
       handler_quick_frame_pc_(0),
       handler_method_header_(nullptr),
       handler_quick_arg0_(0),
-      handler_method_(nullptr),
       handler_dex_pc_(0),
       clear_exception_(false),
       handler_frame_depth_(kInvalidFrameDepth),
@@ -82,19 +82,6 @@
       // This is the upcall, we remember the frame and last pc so that we may long jump to them.
       exception_handler_->SetHandlerQuickFramePc(GetCurrentQuickFramePc());
       exception_handler_->SetHandlerQuickFrame(GetCurrentQuickFrame());
-      exception_handler_->SetHandlerMethodHeader(GetCurrentOatQuickMethodHeader());
-      uint32_t next_dex_pc;
-      ArtMethod* next_art_method;
-      bool has_next = GetNextMethodAndDexPc(&next_art_method, &next_dex_pc);
-      // Report the method that did the down call as the handler.
-      exception_handler_->SetHandlerDexPc(next_dex_pc);
-      exception_handler_->SetHandlerMethod(next_art_method);
-      if (!has_next) {
-        // No next method? Check exception handler is set up for the unhandled exception handler
-        // case.
-        DCHECK_EQ(0U, exception_handler_->GetHandlerDexPc());
-        DCHECK(nullptr == exception_handler_->GetHandlerMethod());
-      }
       return false;  // End stack walk.
     }
     if (skip_frames_ != 0) {
@@ -123,7 +110,6 @@
       uint32_t found_dex_pc = method->FindCatchBlock(to_find, dex_pc, &clear_exception);
       exception_handler_->SetClearException(clear_exception);
       if (found_dex_pc != dex::kDexNoIndex) {
-        exception_handler_->SetHandlerMethod(method);
         exception_handler_->SetHandlerDexPc(found_dex_pc);
         exception_handler_->SetHandlerQuickFramePc(
             GetCurrentOatQuickMethodHeader()->ToNativeQuickPc(
@@ -155,37 +141,6 @@
   DISALLOW_COPY_AND_ASSIGN(CatchBlockStackVisitor);
 };
 
-static size_t GetInstrumentationFramesToPop(Thread* self, size_t frame_depth)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  CHECK_NE(frame_depth, kInvalidFrameDepth);
-  size_t instrumentation_frames_to_pop = 0;
-  StackVisitor::WalkStack(
-      [&](art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
-        size_t current_frame_depth = stack_visitor->GetFrameDepth();
-        if (current_frame_depth < frame_depth) {
-          CHECK(stack_visitor->GetMethod() != nullptr);
-          if (UNLIKELY(reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) ==
-                  stack_visitor->GetReturnPc())) {
-            if (!stack_visitor->IsInInlinedFrame()) {
-              // We do not count inlined frames, because we do not instrument them. The reason we
-              // include them in the stack walking is the check against `frame_depth_`, which is
-              // given to us by a visitor that visits inlined frames.
-              ++instrumentation_frames_to_pop;
-            }
-          }
-          return true;
-        }
-        // We reached the frame of the catch handler or the upcall.
-        return false;
-      },
-      self,
-      /* context= */ nullptr,
-      art::StackVisitor::StackWalkKind::kIncludeInlinedFrames,
-      /* check_suspended */ true,
-      /* include_transitions */ true);
-  return instrumentation_frames_to_pop;
-}
-
 // Finds the appropriate exception catch after calling all method exit instrumentation functions.
 // Note that this might change the exception being thrown.
 void QuickExceptionHandler::FindCatch(ObjPtr<mirror::Throwable> exception) {
@@ -218,23 +173,17 @@
     DCHECK_GE(new_pop_count, already_popped);
     already_popped = new_pop_count;
 
-    // Figure out how many of those frames have instrumentation we need to remove (Should be the
-    // exact same as number of new_pop_count if there aren't inlined frames).
-    size_t instrumentation_frames_to_pop =
-        GetInstrumentationFramesToPop(self_, handler_frame_depth_);
-
     if (kDebugExceptionDelivery) {
       if (*handler_quick_frame_ == nullptr) {
         LOG(INFO) << "Handler is upcall";
       }
-      if (handler_method_ != nullptr) {
-        const DexFile* dex_file = handler_method_->GetDexFile();
-        int line_number = annotations::GetLineNumFromPC(dex_file, handler_method_, handler_dex_pc_);
-        LOG(INFO) << "Handler: " << handler_method_->PrettyMethod() << " (line: "
+      if (GetHandlerMethod() != nullptr) {
+        const DexFile* dex_file = GetHandlerMethod()->GetDexFile();
+        int line_number =
+            annotations::GetLineNumFromPC(dex_file, GetHandlerMethod(), handler_dex_pc_);
+        LOG(INFO) << "Handler: " << GetHandlerMethod()->PrettyMethod() << " (line: "
                   << line_number << ")";
       }
-      LOG(INFO) << "Will attempt to pop " << instrumentation_frames_to_pop
-                << " off of the instrumentation stack";
     }
     // Exception was cleared as part of delivery.
     DCHECK(!self_->IsExceptionPending());
@@ -244,7 +193,8 @@
         handler_method_header_->IsOptimized()) {
       SetCatchEnvironmentForOptimizedHandler(&visitor);
     }
-    popped_to_top = popper.PopFramesTo(instrumentation_frames_to_pop, exception_ref);
+    popped_to_top =
+        popper.PopFramesTo(reinterpret_cast<uintptr_t>(handler_quick_frame_), exception_ref);
   } while (!popped_to_top);
   if (!clear_exception_) {
     // Put exception back in root set with clear throw location.
@@ -287,13 +237,13 @@
 void QuickExceptionHandler::SetCatchEnvironmentForOptimizedHandler(StackVisitor* stack_visitor) {
   DCHECK(!is_deoptimization_);
   DCHECK(*handler_quick_frame_ != nullptr) << "Method should not be called on upcall exceptions";
-  DCHECK(handler_method_ != nullptr && handler_method_header_->IsOptimized());
+  DCHECK(GetHandlerMethod() != nullptr && handler_method_header_->IsOptimized());
 
   if (kDebugExceptionDelivery) {
     self_->DumpStack(LOG_STREAM(INFO) << "Setting catch phis: ");
   }
 
-  CodeItemDataAccessor accessor(handler_method_->DexInstructionData());
+  CodeItemDataAccessor accessor(GetHandlerMethod()->DexInstructionData());
   const size_t number_of_vregs = accessor.RegistersSize();
   CodeInfo code_info(handler_method_header_);
 
@@ -301,10 +251,11 @@
   StackMap catch_stack_map = code_info.GetCatchStackMapForDexPc(GetHandlerDexPc());
   DCHECK(catch_stack_map.IsValid());
   DexRegisterMap catch_vreg_map = code_info.GetDexRegisterMapOf(catch_stack_map);
+  DCHECK_EQ(catch_vreg_map.size(), number_of_vregs);
+
   if (!catch_vreg_map.HasAnyLiveDexRegisters()) {
     return;
   }
-  DCHECK_EQ(catch_vreg_map.size(), number_of_vregs);
 
   // Find stack map of the throwing instruction.
   StackMap throw_stack_map =
@@ -324,10 +275,12 @@
     // Get vreg value from its current location.
     uint32_t vreg_value;
     VRegKind vreg_kind = ToVRegKind(throw_vreg_map[vreg].GetKind());
-    bool get_vreg_success = stack_visitor->GetVReg(stack_visitor->GetMethod(),
-                                                   vreg,
-                                                   vreg_kind,
-                                                   &vreg_value);
+    bool get_vreg_success =
+        stack_visitor->GetVReg(stack_visitor->GetMethod(),
+                               vreg,
+                               vreg_kind,
+                               &vreg_value,
+                               throw_vreg_map[vreg]);
     CHECK(get_vreg_success) << "VReg " << vreg << " was optimized out ("
                             << "method=" << ArtMethod::PrettyMethod(stack_visitor->GetMethod())
                             << ", dex_pc=" << stack_visitor->GetDexPc() << ", "
@@ -433,7 +386,11 @@
         updated_vregs = GetThread()->GetUpdatedVRegFlags(frame_id);
         DCHECK(updated_vregs != nullptr);
       }
-      HandleOptimizingDeoptimization(method, new_frame, updated_vregs);
+      if (GetCurrentOatQuickMethodHeader()->IsNterpMethodHeader()) {
+        HandleNterpDeoptimization(method, new_frame, updated_vregs);
+      } else {
+        HandleOptimizingDeoptimization(method, new_frame, updated_vregs);
+      }
       if (updated_vregs != nullptr) {
         // Calling Thread::RemoveDebuggerShadowFrameMapping will also delete the updated_vregs
         // array so this must come after we processed the frame.
@@ -463,6 +420,35 @@
   }
 
  private:
+  void HandleNterpDeoptimization(ArtMethod* m,
+                                 ShadowFrame* new_frame,
+                                 const bool* updated_vregs)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
+    StackReference<mirror::Object>* vreg_ref_base =
+        reinterpret_cast<StackReference<mirror::Object>*>(NterpGetReferenceArray(cur_quick_frame));
+    int32_t* vreg_int_base =
+        reinterpret_cast<int32_t*>(NterpGetRegistersArray(cur_quick_frame));
+    CodeItemDataAccessor accessor(m->DexInstructionData());
+    const uint16_t num_regs = accessor.RegistersSize();
+    // An nterp frame has two arrays: a dex register array and a reference array
+    // that shadows the dex register array but only containing references
+    // (non-reference dex registers have nulls). See nterp_helpers.cc.
+    for (size_t reg = 0; reg < num_regs; ++reg) {
+      if (updated_vregs != nullptr && updated_vregs[reg]) {
+        // Keep the value set by debugger.
+        continue;
+      }
+      StackReference<mirror::Object>* ref_addr = vreg_ref_base + reg;
+      mirror::Object* ref = ref_addr->AsMirrorPtr();
+      if (ref != nullptr) {
+        new_frame->SetVRegReference(reg, ref);
+      } else {
+        new_frame->SetVReg(reg, vreg_int_base[reg]);
+      }
+    }
+  }
+
   void HandleOptimizingDeoptimization(ArtMethod* m,
                                       ShadowFrame* new_frame,
                                       const bool* updated_vregs)
@@ -478,10 +464,11 @@
     DexRegisterMap vreg_map = IsInInlinedFrame()
         ? code_info.GetInlineDexRegisterMapOf(stack_map, GetCurrentInlinedFrame())
         : code_info.GetDexRegisterMapOf(stack_map);
+
+    DCHECK_EQ(vreg_map.size(), number_of_vregs);
     if (vreg_map.empty()) {
       return;
     }
-    DCHECK_EQ(vreg_map.size(), number_of_vregs);
 
     for (uint16_t vreg = 0; vreg < number_of_vregs; ++vreg) {
       if (updated_vregs != nullptr && updated_vregs[vreg]) {
@@ -641,10 +628,9 @@
   DCHECK(is_deoptimization_) << "Non-deoptimization handlers should use FindCatch";
   uintptr_t return_pc = 0;
   if (method_tracing_active_) {
-    size_t instrumentation_frames_to_pop =
-        GetInstrumentationFramesToPop(self_, handler_frame_depth_);
     instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
-    return_pc = instrumentation->PopFramesForDeoptimization(self_, instrumentation_frames_to_pop);
+    return_pc = instrumentation->PopFramesForDeoptimization(
+        self_, reinterpret_cast<uintptr_t>(handler_quick_frame_));
   }
   return return_pc;
 }
@@ -659,6 +645,12 @@
   if (smash_caller_saves) {
     context_->SmashCallerSaves();
   }
+  if (!is_deoptimization_ &&
+      handler_method_header_ != nullptr &&
+      handler_method_header_->IsNterpMethodHeader()) {
+    context_->SetNterpDexPC(reinterpret_cast<uintptr_t>(
+        GetHandlerMethod()->DexInstructions().Insns() + handler_dex_pc_));
+  }
   context_->DoLongJump();
   UNREACHABLE();
 }
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index 5579d36..4ff981d 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -99,11 +99,7 @@
   }
 
   ArtMethod* GetHandlerMethod() const {
-    return handler_method_;
-  }
-
-  void SetHandlerMethod(ArtMethod* handler_quick_method) {
-    handler_method_ = handler_quick_method;
+    return *handler_quick_frame_;
   }
 
   uint32_t GetHandlerDexPc() const {
@@ -154,8 +150,6 @@
   const OatQuickMethodHeader* handler_method_header_;
   // The value for argument 0.
   uintptr_t handler_quick_arg0_;
-  // The handler method to report to the debugger.
-  ArtMethod* handler_method_;
   // The handler's dex PC, zero implies an uncaught exception.
   uint32_t handler_dex_pc_;
   // Should the exception be cleared as the catch block has no move-exception?
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 0dbec85..469d329 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -497,14 +497,10 @@
     // If we get another exception when we are trying to wrap, then just use that instead.
     ScopedLocalRef<jthrowable> th(soa.Env(), soa.Env()->ExceptionOccurred());
     soa.Self()->ClearException();
-    jclass exception_class = soa.Env()->FindClass("java/lang/reflect/InvocationTargetException");
-    if (exception_class == nullptr) {
-      soa.Self()->AssertPendingException();
-      return false;
-    }
-    jmethodID mid = soa.Env()->GetMethodID(exception_class, "<init>", "(Ljava/lang/Throwable;)V");
-    CHECK(mid != nullptr);
-    jobject exception_instance = soa.Env()->NewObject(exception_class, mid, th.get());
+    jobject exception_instance =
+        soa.Env()->NewObject(WellKnownClasses::java_lang_reflect_InvocationTargetException,
+                             WellKnownClasses::java_lang_reflect_InvocationTargetException_init,
+                             th.get());
     if (exception_instance == nullptr) {
       soa.Self()->AssertPendingException();
       return false;
@@ -518,9 +514,11 @@
 
 }  // anonymous namespace
 
-JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid,
-                         va_list args)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
+template <>
+JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa,
+                         jobject obj,
+                         ArtMethod* method,
+                         va_list args) REQUIRES_SHARED(Locks::mutator_lock_) {
   // We want to make sure that the stack is not within a small distance from the
   // protected region in case we are calling into a leaf function whose stack
   // check has been elided.
@@ -528,8 +526,6 @@
     ThrowStackOverflowError(soa.Self());
     return JValue();
   }
-
-  ArtMethod* method = jni::DecodeArtMethod(mid);
   bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor();
   if (is_string_init) {
     // Replace calls to String.<init> with equivalent StringFactory call.
@@ -550,7 +546,19 @@
   return result;
 }
 
-JValue InvokeWithJValues(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid,
+template <>
+JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa,
+                         jobject obj,
+                         jmethodID mid,
+                         va_list args) REQUIRES_SHARED(Locks::mutator_lock_) {
+  DCHECK(mid != nullptr) << "Called with null jmethodID";
+  return InvokeWithVarArgs(soa, obj, jni::DecodeArtMethod(mid), args);
+}
+
+template <>
+JValue InvokeWithJValues(const ScopedObjectAccessAlreadyRunnable& soa,
+                         jobject obj,
+                         ArtMethod* method,
                          const jvalue* args) {
   // We want to make sure that the stack is not within a small distance from the
   // protected region in case we are calling into a leaf function whose stack
@@ -559,8 +567,6 @@
     ThrowStackOverflowError(soa.Self());
     return JValue();
   }
-
-  ArtMethod* method = jni::DecodeArtMethod(mid);
   bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor();
   if (is_string_init) {
     // Replace calls to String.<init> with equivalent StringFactory call.
@@ -581,8 +587,20 @@
   return result;
 }
 
+template <>
+JValue InvokeWithJValues(const ScopedObjectAccessAlreadyRunnable& soa,
+                         jobject obj,
+                         jmethodID mid,
+                         const jvalue* args) {
+  DCHECK(mid != nullptr) << "Called with null jmethodID";
+  return InvokeWithJValues(soa, obj, jni::DecodeArtMethod(mid), args);
+}
+
+template <>
 JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccessAlreadyRunnable& soa,
-                                           jobject obj, jmethodID mid, const jvalue* args) {
+                                           jobject obj,
+                                           ArtMethod* interface_method,
+                                           const jvalue* args) {
   // We want to make sure that the stack is not within a small distance from the
   // protected region in case we are calling into a leaf function whose stack
   // check has been elided.
@@ -590,9 +608,8 @@
     ThrowStackOverflowError(soa.Self());
     return JValue();
   }
-
   ObjPtr<mirror::Object> receiver = soa.Decode<mirror::Object>(obj);
-  ArtMethod* method = FindVirtualMethod(receiver, jni::DecodeArtMethod(mid));
+  ArtMethod* method = FindVirtualMethod(receiver, interface_method);
   bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor();
   if (is_string_init) {
     // Replace calls to String.<init> with equivalent StringFactory call.
@@ -613,8 +630,20 @@
   return result;
 }
 
+template <>
+JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccessAlreadyRunnable& soa,
+                                           jobject obj,
+                                           jmethodID mid,
+                                           const jvalue* args) {
+  DCHECK(mid != nullptr) << "Called with null jmethodID";
+  return InvokeVirtualOrInterfaceWithJValues(soa, obj, jni::DecodeArtMethod(mid), args);
+}
+
+template <>
 JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa,
-                                           jobject obj, jmethodID mid, va_list args) {
+                                           jobject obj,
+                                           ArtMethod* interface_method,
+                                           va_list args) {
   // We want to make sure that the stack is not within a small distance from the
   // protected region in case we are calling into a leaf function whose stack
   // check has been elided.
@@ -624,7 +653,7 @@
   }
 
   ObjPtr<mirror::Object> receiver = soa.Decode<mirror::Object>(obj);
-  ArtMethod* method = FindVirtualMethod(receiver, jni::DecodeArtMethod(mid));
+  ArtMethod* method = FindVirtualMethod(receiver, interface_method);
   bool is_string_init = method->GetDeclaringClass()->IsStringClass() && method->IsConstructor();
   if (is_string_init) {
     // Replace calls to String.<init> with equivalent StringFactory call.
@@ -645,6 +674,15 @@
   return result;
 }
 
+template <>
+JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa,
+                                           jobject obj,
+                                           jmethodID mid,
+                                           va_list args) {
+  DCHECK(mid != nullptr) << "Called with null jmethodID";
+  return InvokeVirtualOrInterfaceWithVarArgs(soa, obj, jni::DecodeArtMethod(mid), args);
+}
+
 jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaMethod,
                      jobject javaReceiver, jobject javaArgs, size_t num_frames) {
   // We want to make sure that the stack is not within a small distance from the
@@ -661,12 +699,16 @@
   ArtMethod* m = executable->GetArtMethod();
 
   ObjPtr<mirror::Class> declaring_class = m->GetDeclaringClass();
-  if (UNLIKELY(!declaring_class->IsInitialized())) {
-    StackHandleScope<1> hs(soa.Self());
+  if (UNLIKELY(!declaring_class->IsVisiblyInitialized())) {
+    Thread* self = soa.Self();
+    StackHandleScope<1> hs(self);
     HandleWrapperObjPtr<mirror::Class> h_class(hs.NewHandleWrapper(&declaring_class));
-    if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(soa.Self(), h_class, true, true)) {
+    if (UNLIKELY(!Runtime::Current()->GetClassLinker()->EnsureInitialized(
+                      self, h_class, /*can_init_fields=*/ true, /*can_init_parents=*/ true))) {
+      DCHECK(self->IsExceptionPending());
       return nullptr;
     }
+    DCHECK(h_class->IsInitializing());
   }
 
   ObjPtr<mirror::Object> receiver;
@@ -738,7 +780,7 @@
     CHECK(constructor->IsConstructor());
 
     ObjPtr<mirror::Class> declaring_class = constructor->GetDeclaringClass();
-    CHECK(declaring_class->IsInitialized());
+    CHECK(declaring_class->IsInitializing());
 
     // Calls to String.<init> should have been repplaced with with equivalent StringFactory calls.
     CHECK(!declaring_class->IsStringClass());
diff --git a/runtime/reflection.h b/runtime/reflection.h
index 574e302..5a2da35 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -60,27 +60,39 @@
                                          JValue* dst)
     REQUIRES_SHARED(Locks::mutator_lock_);
 
+// Invokes the given method (either an ArtMethod or a jmethodID) with direct/static semantics.
+template<typename MethodType>
 JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa,
                          jobject obj,
-                         jmethodID mid,
+                         MethodType mid,
                          va_list args)
     REQUIRES_SHARED(Locks::mutator_lock_);
 
+// Invokes the given method (either an ArtMethod or a jmethodID) with reflection semantics.
+template<typename MethodType>
 JValue InvokeWithJValues(const ScopedObjectAccessAlreadyRunnable& soa,
                          jobject obj,
-                         jmethodID mid,
+                         MethodType mid,
                          const jvalue* args)
     REQUIRES_SHARED(Locks::mutator_lock_);
 
+// Invokes the given method (either an ArtMethod or a jmethodID) with virtual/interface semantics.
+// Note this will perform lookup based on the 'obj' to determine which implementation of the given
+// method should be invoked.
+template<typename MethodType>
 JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccessAlreadyRunnable& soa,
                                            jobject obj,
-                                           jmethodID mid,
+                                           MethodType mid,
                                            const jvalue* args)
     REQUIRES_SHARED(Locks::mutator_lock_);
 
+// Invokes the given method (either an ArtMethod or a jmethodID) with virtual/interface semantics.
+// Note this will perform lookup based on the 'obj' to determine which implementation of the given
+// method should be invoked.
+template<typename MethodType>
 JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa,
                                            jobject obj,
-                                           jmethodID mid,
+                                           MethodType mid,
                                            va_list args)
     REQUIRES_SHARED(Locks::mutator_lock_);
 
diff --git a/runtime/reflective_handle.h b/runtime/reflective_handle.h
new file mode 100644
index 0000000..014d976
--- /dev/null
+++ b/runtime/reflective_handle.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_REFLECTIVE_HANDLE_H_
+#define ART_RUNTIME_REFLECTIVE_HANDLE_H_
+
+#include "base/value_object.h"
+#include "reflective_reference.h"
+
+namespace art {
+
+// This is a holder similar to Handle<T> that is used to hold reflective references to ArtField and
+// ArtMethod structures. A reflective reference is one that must be updated if the underlying class
+// or instances are replaced due to structural redefinition or some other process. In general these
+// don't need to be used. It's only when it's important that a reference to a field not become
+// obsolete and it needs to be held over a suspend point that this should be used.
+template <typename T>
+class ReflectiveHandle : public ValueObject {
+ public:
+  static_assert(std::is_same_v<T, ArtField> || std::is_same_v<T, ArtMethod>,
+                "Expected ArtField or ArtMethod");
+
+  ReflectiveHandle() : reference_(nullptr) {}
+
+  ALWAYS_INLINE ReflectiveHandle(const ReflectiveHandle<T>& handle) = default;
+  ALWAYS_INLINE ReflectiveHandle<T>& operator=(const ReflectiveHandle<T>& handle) = default;
+
+  ALWAYS_INLINE explicit ReflectiveHandle(ReflectiveReference<T>* reference)
+      : reference_(reference) {}
+
+  ALWAYS_INLINE T& operator*() const REQUIRES_SHARED(Locks::mutator_lock_) {
+    return *Get();
+  }
+
+  ALWAYS_INLINE T* operator->() const REQUIRES_SHARED(Locks::mutator_lock_) {
+    return Get();
+  }
+
+  ALWAYS_INLINE T* Get() const REQUIRES_SHARED(Locks::mutator_lock_) {
+    return reference_->Ptr();
+  }
+
+  ALWAYS_INLINE bool IsNull() const {
+    // It's safe to null-check it without a read barrier.
+    return reference_->IsNull();
+  }
+
+  ALWAYS_INLINE bool operator!=(std::nullptr_t) const REQUIRES_SHARED(Locks::mutator_lock_) {
+    return !IsNull();
+  }
+
+  ALWAYS_INLINE bool operator==(std::nullptr_t) const REQUIRES_SHARED(Locks::mutator_lock_) {
+    return IsNull();
+  }
+
+ protected:
+  ReflectiveReference<T>* reference_;
+
+ private:
+  friend class BaseReflectiveHandleScope;
+  template <size_t kNumFieldReferences, size_t kNumMethodReferences>
+  friend class StackReflectiveHandleScope;
+};
+
+// Handles that support assignment.
+template <typename T>
+class MutableReflectiveHandle : public ReflectiveHandle<T> {
+ public:
+  MutableReflectiveHandle() {}
+
+  ALWAYS_INLINE MutableReflectiveHandle(const MutableReflectiveHandle<T>& handle)
+      REQUIRES_SHARED(Locks::mutator_lock_) = default;
+
+  ALWAYS_INLINE MutableReflectiveHandle<T>& operator=(const MutableReflectiveHandle<T>& handle)
+      REQUIRES_SHARED(Locks::mutator_lock_) = default;
+
+  ALWAYS_INLINE explicit MutableReflectiveHandle(ReflectiveReference<T>* reference)
+      REQUIRES_SHARED(Locks::mutator_lock_)
+      : ReflectiveHandle<T>(reference) {}
+
+  ALWAYS_INLINE T* Assign(T* reference) REQUIRES_SHARED(Locks::mutator_lock_) {
+    ReflectiveReference<T>* ref = ReflectiveHandle<T>::reference_;
+    T* old = ref->Ptr();
+    ref->Assign(reference);
+    return old;
+  }
+
+ private:
+  friend class BaseReflectiveHandleScope;
+  template <size_t kNumFieldReferences, size_t kNumMethodReferences>
+  friend class StackReflectiveHandleScope;
+};
+
+template<typename T>
+class ReflectiveHandleWrapper : public MutableReflectiveHandle<T> {
+ public:
+  ReflectiveHandleWrapper(T** obj, const MutableReflectiveHandle<T>& handle)
+     : MutableReflectiveHandle<T>(handle), obj_(obj) {
+  }
+
+  ReflectiveHandleWrapper(const ReflectiveHandleWrapper&) = default;
+
+  ~ReflectiveHandleWrapper() {
+    *obj_ = MutableReflectiveHandle<T>::Get();
+  }
+
+ private:
+  T** const obj_;
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_REFLECTIVE_HANDLE_H_
diff --git a/runtime/reflective_handle_scope-inl.h b/runtime/reflective_handle_scope-inl.h
new file mode 100644
index 0000000..64ea9f9
--- /dev/null
+++ b/runtime/reflective_handle_scope-inl.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_REFLECTIVE_HANDLE_SCOPE_INL_H_
+#define ART_RUNTIME_REFLECTIVE_HANDLE_SCOPE_INL_H_
+
+#include "android-base/thread_annotations.h"
+#include "base/mutex.h"
+#include "reflective_handle.h"
+#include "reflective_handle_scope.h"
+#include "thread-current-inl.h"
+
+namespace art {
+
+template <size_t kNumFields, size_t kNumMethods>
+StackReflectiveHandleScope<kNumFields, kNumMethods>::StackReflectiveHandleScope(Thread* self) : field_pos_(0), method_pos_(0) {
+  DCHECK_EQ(self, Thread::Current());
+  PushScope(self);
+}
+
+template <size_t kNumFields, size_t kNumMethods>
+void StackReflectiveHandleScope<kNumFields, kNumMethods>::VisitTargets(
+    ReflectiveValueVisitor* visitor) {
+  Thread* self = Thread::Current();
+  DCHECK(GetThread() == self ||
+         Locks::mutator_lock_->IsExclusiveHeld(self))
+      << *GetThread() << " on thread " << *self;
+  auto visit_one = [&](auto& rv) NO_THREAD_SAFETY_ANALYSIS {
+    Locks::mutator_lock_->AssertSharedHeld(self);
+    if (!rv.IsNull()) {
+      rv.Assign((*visitor)(rv.Ptr(), ReflectiveHandleScopeSourceInfo(this)));
+    }
+  };
+  std::for_each(fields_.begin(), fields_.begin() + field_pos_, visit_one);
+  std::for_each(methods_.begin(), methods_.begin() + method_pos_, visit_one);
+}
+
+template <size_t kNumFields, size_t kNumMethods>
+StackReflectiveHandleScope<kNumFields, kNumMethods>::~StackReflectiveHandleScope() {
+  PopScope();
+}
+
+void BaseReflectiveHandleScope::PushScope(Thread* self) {
+  DCHECK_EQ(self, Thread::Current());
+  self_ = self;
+  link_ = self_->GetTopReflectiveHandleScope();
+  self_->PushReflectiveHandleScope(this);
+}
+
+void BaseReflectiveHandleScope::PopScope() {
+  auto* prev = self_->PopReflectiveHandleScope();
+  CHECK_EQ(prev, this);
+  link_ = nullptr;
+}
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_REFLECTIVE_HANDLE_SCOPE_INL_H_
diff --git a/runtime/reflective_handle_scope.cc b/runtime/reflective_handle_scope.cc
new file mode 100644
index 0000000..2c3ae5e
--- /dev/null
+++ b/runtime/reflective_handle_scope.cc
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "reflective_handle_scope.h"
+
+#include <ostream>
+
+#include "thread.h"
+
+namespace art {
+
+
+void BaseReflectiveHandleScope::Describe(std::ostream& os) const {
+  os << "[BaseReflectiveHandleScope self_=" << *self_ << ", link_=" << link_ << "]";
+}
+
+std::ostream& operator<<(std::ostream& os, const BaseReflectiveHandleScope& brhs) {
+  brhs.Describe(os);
+  return os;
+}
+
+}  // namespace art
diff --git a/runtime/reflective_handle_scope.h b/runtime/reflective_handle_scope.h
new file mode 100644
index 0000000..46cff8b
--- /dev/null
+++ b/runtime/reflective_handle_scope.h
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_REFLECTIVE_HANDLE_SCOPE_H_
+#define ART_RUNTIME_REFLECTIVE_HANDLE_SCOPE_H_
+
+#include <android-base/logging.h>
+
+#include <array>
+#include <compare>
+#include <functional>
+#include <stack>
+
+#include "android-base/macros.h"
+#include "base/enums.h"
+#include "base/globals.h"
+#include "base/locks.h"
+#include "base/macros.h"
+#include "base/value_object.h"
+#include "reflective_handle.h"
+#include "reflective_reference.h"
+#include "reflective_value_visitor.h"
+
+namespace art {
+
+class ArtField;
+class ArtMethod;
+class BaseReflectiveHandleScope;
+class Thread;
+
+// This is a holder similar to StackHandleScope that is used to hold reflective references to
+// ArtField and ArtMethod structures. A reflective reference is one that must be updated if the
+// underlying class or instances are replaced due to structural redefinition or some other process.
+// In general these don't need to be used. It's only when it's important that a reference to a field
+// not become obsolete and it needs to be held over a suspend point that this should be used. This
+// takes care of the book-keeping to allow the runtime to visit and update ReflectiveHandles when
+// structural redefinition occurs.
+class BaseReflectiveHandleScope {
+ public:
+  template <typename Visitor>
+  ALWAYS_INLINE void VisitTargets(Visitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
+    FunctionReflectiveValueVisitor v(&visitor);
+    VisitTargets(&v);
+  }
+
+  ALWAYS_INLINE virtual ~BaseReflectiveHandleScope() {
+    DCHECK(link_ == nullptr);
+  }
+
+  virtual void VisitTargets(ReflectiveValueVisitor* visitor)
+      REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+
+  BaseReflectiveHandleScope* GetLink() {
+    return link_;
+  }
+
+  Thread* GetThread() {
+    return self_;
+  }
+
+  void Describe(std::ostream& os) const;
+
+ protected:
+  ALWAYS_INLINE BaseReflectiveHandleScope() : self_(nullptr), link_(nullptr) {}
+
+  ALWAYS_INLINE inline void PushScope(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
+  ALWAYS_INLINE inline void PopScope() REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Thread this node is rooted in.
+  Thread* self_;
+  // Next node in the handle-scope linked list. Root is held by Thread.
+  BaseReflectiveHandleScope* link_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(BaseReflectiveHandleScope);
+};
+std::ostream& operator<<(std::ostream& os, const BaseReflectiveHandleScope& brhs);
+
+template <size_t kNumFields, size_t kNumMethods>
+class StackReflectiveHandleScope : public BaseReflectiveHandleScope {
+ private:
+  static constexpr bool kHasFields = kNumFields > 0;
+  static constexpr bool kHasMethods = kNumMethods > 0;
+
+ public:
+  ALWAYS_INLINE explicit StackReflectiveHandleScope(Thread* self)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  ALWAYS_INLINE ~StackReflectiveHandleScope() REQUIRES_SHARED(Locks::mutator_lock_);
+
+  void VisitTargets(ReflectiveValueVisitor* visitor) override REQUIRES_SHARED(Locks::mutator_lock_);
+
+  template <typename T,
+            typename = typename std::enable_if_t<(kHasFields && std::is_same_v<T, ArtField>) ||
+                                                 (kHasMethods && std::is_same_v<T, ArtMethod>)>>
+  ALWAYS_INLINE MutableReflectiveHandle<T> NewHandle(T* t) REQUIRES_SHARED(Locks::mutator_lock_) {
+    if constexpr (std::is_same_v<T, ArtField>) {
+      return NewFieldHandle(t);
+    } else {
+      static_assert(std::is_same_v<T, ArtMethod>, "Expected ArtField or ArtMethod");
+      return NewMethodHandle(t);
+    }
+  }
+  template<typename T>
+  ALWAYS_INLINE ReflectiveHandleWrapper<T> NewReflectiveHandleWrapper(T** t)
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    return ReflectiveHandleWrapper<T>(t, NewHandle(*t));
+  }
+
+  ALWAYS_INLINE MutableReflectiveHandle<ArtField> NewFieldHandle(ArtField* f)
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    static_assert(kHasFields, "No fields");
+    DCHECK_LT(field_pos_, kNumFields);
+    MutableReflectiveHandle<ArtField> fh(GetMutableFieldHandle(field_pos_++));
+    fh.Assign(f);
+    return fh;
+  }
+  ALWAYS_INLINE ReflectiveHandleWrapper<ArtField> NewReflectiveFieldHandleWrapper(ArtField** f)
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    return ReflectiveHandleWrapper<ArtField>(f, NewMethodHandle(*f));
+  }
+
+  ALWAYS_INLINE ArtField* GetField(size_t i) {
+    static_assert(kHasFields, "No fields");
+    return GetFieldReference(i)->Ptr();
+  }
+  ALWAYS_INLINE ReflectiveHandle<ArtField> GetFieldHandle(size_t i) {
+    static_assert(kHasFields, "No fields");
+    return ReflectiveHandle<ArtField>(GetFieldReference(i));
+  }
+  ALWAYS_INLINE MutableReflectiveHandle<ArtField> GetMutableFieldHandle(size_t i) {
+    static_assert(kHasFields, "No fields");
+    return MutableReflectiveHandle<ArtField>(GetFieldReference(i));
+  }
+
+  ALWAYS_INLINE MutableReflectiveHandle<ArtMethod> NewMethodHandle(ArtMethod* m)
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    static_assert(kHasMethods, "No methods");
+    DCHECK_LT(method_pos_, kNumMethods);
+    MutableReflectiveHandle<ArtMethod> mh(GetMutableMethodHandle(method_pos_++));
+    mh.Assign(m);
+    return mh;
+  }
+  ALWAYS_INLINE ReflectiveHandleWrapper<ArtMethod> NewReflectiveMethodHandleWrapper(ArtMethod** m)
+      REQUIRES_SHARED(art::Locks::mutator_lock_) {
+    return ReflectiveHandleWrapper<ArtMethod>(m, NewMethodHandle(*m));
+  }
+
+  ALWAYS_INLINE ArtMethod* GetMethod(size_t i) {
+    static_assert(kHasMethods, "No methods");
+    return GetMethodReference(i)->Ptr();
+  }
+  ALWAYS_INLINE ReflectiveHandle<ArtMethod> GetMethodHandle(size_t i) {
+    static_assert(kHasMethods, "No methods");
+    return ReflectiveHandle<ArtMethod>(GetMethodReference(i));
+  }
+  ALWAYS_INLINE MutableReflectiveHandle<ArtMethod> GetMutableMethodHandle(size_t i) {
+    static_assert(kHasMethods, "No methods");
+    return MutableReflectiveHandle<ArtMethod>(GetMethodReference(i));
+  }
+
+  size_t RemainingFieldSlots() const {
+    return kNumFields - field_pos_;
+  }
+
+  size_t RemainingMethodSlots() const {
+    return kNumMethods - method_pos_;
+  }
+
+ private:
+  ReflectiveReference<ArtMethod>* GetMethodReference(size_t i) {
+    DCHECK_LT(i, method_pos_);
+    return &methods_[i];
+  }
+
+  ReflectiveReference<ArtField>* GetFieldReference(size_t i) {
+    DCHECK_LT(i, field_pos_);
+    return &fields_[i];
+  }
+
+  size_t field_pos_;
+  size_t method_pos_;
+  std::array<ReflectiveReference<ArtField>, kNumFields> fields_;
+  std::array<ReflectiveReference<ArtMethod>, kNumMethods> methods_;
+};
+
+template <size_t kNumMethods>
+using StackArtMethodHandleScope = StackReflectiveHandleScope</*kNumFields=*/0, kNumMethods>;
+
+template <size_t kNumFields>
+using StackArtFieldHandleScope = StackReflectiveHandleScope<kNumFields, /*kNumMethods=*/0>;
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_REFLECTIVE_HANDLE_SCOPE_H_
diff --git a/runtime/reflective_reference.h b/runtime/reflective_reference.h
new file mode 100644
index 0000000..f57c030
--- /dev/null
+++ b/runtime/reflective_reference.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_REFLECTIVE_REFERENCE_H_
+#define ART_RUNTIME_REFLECTIVE_REFERENCE_H_
+
+#include "android-base/macros.h"
+#include "base/macros.h"
+#include "mirror/object_reference.h"
+
+namespace art {
+
+class ArtField;
+class ArtMethod;
+// A reference to a ArtField or ArtMethod.
+template <class ReflectiveType>
+class ReflectiveReference {
+ public:
+  static_assert(std::is_same_v<ReflectiveType, ArtMethod> ||
+                    std::is_same_v<ReflectiveType, ArtField>,
+                "Uknown type!");
+  ReflectiveReference() : val_(nullptr) {}
+  explicit ReflectiveReference(ReflectiveType* r) : val_(r) {}
+  ReflectiveReference<ReflectiveType>& operator=(const ReflectiveReference<ReflectiveType>& t) =
+      default;
+
+  ReflectiveType* Ptr() {
+    return val_;
+  }
+
+  void Assign(ReflectiveType* r) {
+    val_ = r;
+  }
+
+  bool IsNull() const {
+    return val_ == nullptr;
+  }
+
+  bool operator==(const ReflectiveReference<ReflectiveType>& rr) const {
+    return val_ == rr.val_;
+  }
+  bool operator!=(const ReflectiveReference<ReflectiveType>& rr) const {
+    return !operator==(rr);
+  }
+  bool operator==(std::nullptr_t) const {
+    return IsNull();
+  }
+  bool operator!=(std::nullptr_t) const {
+    return !IsNull();
+  }
+
+ private:
+  ReflectiveType* val_;
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_REFLECTIVE_REFERENCE_H_
diff --git a/runtime/reflective_value_visitor.cc b/runtime/reflective_value_visitor.cc
new file mode 100644
index 0000000..5a288d3
--- /dev/null
+++ b/runtime/reflective_value_visitor.cc
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "reflective_value_visitor.h"
+#include <sstream>
+
+#include "base/locks.h"
+#include "base/mutex-inl.h"
+#include "mirror/class.h"
+#include "mirror/object-inl.h"
+
+namespace art {
+
+void HeapReflectiveSourceInfo::Describe(std::ostream& os) const {
+  Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
+  ReflectionSourceInfo::Describe(os);
+  os << " Class=" << src_->GetClass()->PrettyClass();
+}
+
+template<>
+void JniIdReflectiveSourceInfo<jfieldID>::Describe(std::ostream& os) const {
+  ReflectionSourceInfo::Describe(os);
+  os << " jfieldID=" << reinterpret_cast<uintptr_t>(id_);
+}
+
+template<>
+void JniIdReflectiveSourceInfo<jmethodID>::Describe(std::ostream& os) const {
+  ReflectionSourceInfo::Describe(os);
+  os << " jmethodID=" << reinterpret_cast<uintptr_t>(id_);
+}
+
+void ReflectiveHandleScopeSourceInfo::Describe(std::ostream& os) const {
+  ReflectionSourceInfo::Describe(os);
+  os << " source= (" << source_ << ") ";
+  if (source_ == nullptr) {
+    os << "nullptr";
+  } else {
+    os << *source_;
+  }
+}
+}  // namespace art
diff --git a/runtime/reflective_value_visitor.h b/runtime/reflective_value_visitor.h
new file mode 100644
index 0000000..3a72760
--- /dev/null
+++ b/runtime/reflective_value_visitor.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_REFLECTIVE_VALUE_VISITOR_H_
+#define ART_RUNTIME_REFLECTIVE_VALUE_VISITOR_H_
+
+#include <android-base/logging.h>
+
+#include <array>
+#include <compare>
+#include <functional>
+#include <stack>
+
+#include "android-base/macros.h"
+#include "base/enums.h"
+#include "base/globals.h"
+#include "base/locks.h"
+#include "base/macros.h"
+#include "base/value_object.h"
+#include "dex/dex_file.h"
+#include "jni.h"
+#include "mirror/dex_cache.h"
+#include "obj_ptr.h"
+
+namespace art {
+
+class ArtField;
+class ArtMethod;
+class BaseReflectiveHandleScope;
+class Thread;
+
+class ReflectionSourceInfo;
+
+class ReflectiveValueVisitor : public ValueObject {
+ public:
+  virtual ~ReflectiveValueVisitor() {}
+
+  virtual ArtMethod* VisitMethod(ArtMethod* in, const ReflectionSourceInfo& info)
+      REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+  virtual ArtField* VisitField(ArtField* in, const ReflectionSourceInfo& info)
+      REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+
+  // Give it an entrypoint through operator() to interact with things that expect lambda-like things
+  template <typename T,
+            typename = typename std::enable_if<std::is_same_v<T, ArtField> ||
+                                               std::is_same_v<T, ArtMethod>>>
+  T* operator()(T* t, const ReflectionSourceInfo& info) REQUIRES_SHARED(Locks::mutator_lock_) {
+    if constexpr (std::is_same_v<T, ArtField>) {
+      return VisitField(t, info);
+    } else {
+      static_assert(std::is_same_v<T, ArtMethod>, "Expected ArtField or ArtMethod");
+      return VisitMethod(t, info);
+    }
+  }
+};
+
+template <typename FieldVis, typename MethodVis>
+class FunctionReflectiveValueVisitor : public ReflectiveValueVisitor {
+ public:
+  FunctionReflectiveValueVisitor(FieldVis fv, MethodVis mv) : fv_(fv), mv_(mv) {}
+  ArtField* VisitField(ArtField* in, const ReflectionSourceInfo& info) override
+      REQUIRES(Locks::mutator_lock_) {
+    return fv_(in, info);
+  }
+  ArtMethod* VisitMethod(ArtMethod* in, const ReflectionSourceInfo& info) override
+      REQUIRES(Locks::mutator_lock_) {
+    return mv_(in, info);
+  }
+
+ private:
+  FieldVis fv_;
+  MethodVis mv_;
+};
+
+enum ReflectionSourceType {
+  kSourceUnknown = 0,
+  kSourceJavaLangReflectExecutable,
+  kSourceJavaLangReflectField,
+  kSourceJavaLangInvokeMethodHandle,
+  kSourceJavaLangInvokeFieldVarHandle,
+  kSourceThreadHandleScope,
+  kSourceJniFieldId,
+  kSourceJniMethodId,
+  kSourceDexCacheResolvedMethod,
+  kSourceDexCacheResolvedField,
+  kSourceMiscInternal,
+};
+std::ostream& operator<<(std::ostream& os, const ReflectionSourceType& type);
+
+class ReflectionSourceInfo : public ValueObject {
+ public:
+  virtual ~ReflectionSourceInfo() {}
+  // Thread id 0 is for non thread roots.
+  explicit ReflectionSourceInfo(ReflectionSourceType type) : type_(type) {}
+  virtual void Describe(std::ostream& os) const {
+    os << "Type=" << type_;
+  }
+
+  ReflectionSourceType GetType() const {
+    return type_;
+  }
+
+ private:
+  const ReflectionSourceType type_;
+
+  DISALLOW_COPY_AND_ASSIGN(ReflectionSourceInfo);
+};
+inline std::ostream& operator<<(std::ostream& os, const ReflectionSourceInfo& info) {
+  info.Describe(os);
+  return os;
+}
+
+class ReflectiveHandleScopeSourceInfo : public ReflectionSourceInfo {
+ public:
+  explicit ReflectiveHandleScopeSourceInfo(BaseReflectiveHandleScope* source)
+      : ReflectionSourceInfo(kSourceThreadHandleScope), source_(source) {}
+
+  void Describe(std::ostream& os) const override;
+
+ private:
+  BaseReflectiveHandleScope* source_;
+};
+
+// TODO Maybe give this the ability to retrieve the type and ref, if it's useful.
+class HeapReflectiveSourceInfo : public ReflectionSourceInfo {
+ public:
+  HeapReflectiveSourceInfo(ReflectionSourceType t, mirror::Object* src)
+      : ReflectionSourceInfo(t), src_(src) {}
+  void Describe(std::ostream& os) const override;
+
+ private:
+  ObjPtr<mirror::Object> src_;
+};
+
+// TODO Maybe give this the ability to retrieve the id if it's useful.
+template <typename T,
+          typename = typename std::enable_if_t<std::is_same_v<T, jmethodID> ||
+                                               std::is_same_v<T, jfieldID>>>
+class JniIdReflectiveSourceInfo : public ReflectionSourceInfo {
+ public:
+  explicit JniIdReflectiveSourceInfo(T id)
+      : ReflectionSourceInfo(std::is_same_v<T, jmethodID> ? kSourceJniMethodId : kSourceJniFieldId),
+        id_(id) {}
+  void Describe(std::ostream& os) const override;
+
+ private:
+  T id_;
+};
+
+class DexCacheSourceInfo : public ReflectionSourceInfo {
+ public:
+  explicit DexCacheSourceInfo(ReflectionSourceType type,
+                              size_t index,
+                              ObjPtr<mirror::DexCache> cache)
+      : ReflectionSourceInfo(type), index_(index), cache_(cache) {}
+
+  void Describe(std::ostream& os) const override REQUIRES(Locks::mutator_lock_) {
+    ReflectionSourceInfo::Describe(os);
+    os << " index=" << index_ << " cache_=" << cache_.PtrUnchecked()
+       << " files=" << *cache_->GetDexFile();
+  }
+
+ private:
+  size_t index_;
+  ObjPtr<mirror::DexCache> cache_;
+};
+}  // namespace art
+
+#endif  // ART_RUNTIME_REFLECTIVE_VALUE_VISITOR_H_
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 51a40e7..7c6e34c 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -35,6 +35,7 @@
 #include <cstdlib>
 #include <limits>
 #include <thread>
+#include <unordered_set>
 #include <vector>
 
 #include "android-base/strings.h"
@@ -44,8 +45,6 @@
 #include "arch/arm64/registers_arm64.h"
 #include "arch/context.h"
 #include "arch/instruction_set_features.h"
-#include "arch/mips/registers_mips.h"
-#include "arch/mips64/registers_mips64.h"
 #include "arch/x86/registers_x86.h"
 #include "arch/x86_64/registers_x86_64.h"
 #include "art_field-inl.h"
@@ -84,6 +83,7 @@
 #include "gc/space/image_space.h"
 #include "gc/space/space-inl.h"
 #include "gc/system_weak.h"
+#include "gc/task_processor.h"
 #include "handle_scope-inl.h"
 #include "hidden_api.h"
 #include "image-inl.h"
@@ -94,14 +94,15 @@
 #include "jit/jit_code_cache.h"
 #include "jit/profile_saver.h"
 #include "jni/java_vm_ext.h"
-#include "jni/jni_internal.h"
+#include "jni/jni_id_manager.h"
+#include "jni_id_type.h"
 #include "linear_alloc.h"
 #include "memory_representation.h"
 #include "mirror/array.h"
 #include "mirror/class-alloc-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/class_ext.h"
-#include "mirror/class_loader.h"
+#include "mirror/class_loader-inl.h"
 #include "mirror/emulated_stack_frame.h"
 #include "mirror/field.h"
 #include "mirror/method.h"
@@ -113,6 +114,7 @@
 #include "mirror/var_handle.h"
 #include "monitor.h"
 #include "native/dalvik_system_DexFile.h"
+#include "native/dalvik_system_BaseDexClassLoader.h"
 #include "native/dalvik_system_VMDebug.h"
 #include "native/dalvik_system_VMRuntime.h"
 #include "native/dalvik_system_VMStack.h"
@@ -143,13 +145,16 @@
 #include "native_bridge_art_interface.h"
 #include "native_stack_dump.h"
 #include "nativehelper/scoped_local_ref.h"
+#include "oat.h"
 #include "oat_file.h"
 #include "oat_file_manager.h"
+#include "oat_quick_method_header.h"
 #include "object_callbacks.h"
 #include "parsed_options.h"
 #include "quick/quick_method_frame_info.h"
 #include "reflection.h"
 #include "runtime_callbacks.h"
+#include "runtime_common.h"
 #include "runtime_intrinsics.h"
 #include "runtime_options.h"
 #include "scoped_thread_state_change-inl.h"
@@ -189,8 +194,6 @@
 // barrier config.
 static constexpr double kExtraDefaultHeapGrowthMultiplier = kUseReadBarrier ? 1.0 : 0.0;
 
-static constexpr const char* kApexBootImageLocation = "/system/framework/apex.art";
-
 Runtime* Runtime::instance_ = nullptr;
 
 struct TraceConfig {
@@ -229,6 +232,7 @@
       instruction_set_(InstructionSet::kNone),
       compiler_callbacks_(nullptr),
       is_zygote_(false),
+      is_primary_zygote_(false),
       is_system_server_(false),
       must_relocate_(false),
       is_concurrent_gc_enabled_(true),
@@ -283,6 +287,7 @@
       safe_mode_(false),
       hidden_api_policy_(hiddenapi::EnforcementPolicy::kDisabled),
       core_platform_api_policy_(hiddenapi::EnforcementPolicy::kDisabled),
+      test_api_policy_(hiddenapi::EnforcementPolicy::kDisabled),
       dedupe_hidden_api_warnings_(true),
       hidden_api_access_event_log_rate_(0),
       dump_native_stack_on_sig_quit_(true),
@@ -290,7 +295,9 @@
       // Initially assume we perceive jank in case the process state is never updated.
       process_state_(kProcessStateJankPerceptible),
       zygote_no_threads_(false),
-      verifier_logging_threshold_ms_(100) {
+      verifier_logging_threshold_ms_(100),
+      verifier_missing_kthrow_fatal_(false),
+      perfetto_hprof_enabled_(false) {
   static_assert(Runtime::kCalleeSaveSize ==
                     static_cast<uint32_t>(CalleeSaveType::kLastCalleeSaveType), "Unexpected size");
   CheckConstants();
@@ -376,6 +383,10 @@
     // The saver will try to dump the profiles before being sopped and that
     // requires holding the mutator lock.
     jit_->StopProfileSaver();
+    // Delete thread pool before the thread list since we don't want to wait forever on the
+    // JIT compiler threads. Also this should be run before marking the runtime
+    // as shutting down as some tasks may require mutator access.
+    jit_->DeleteThreadPool();
   }
   if (oat_file_manager_ != nullptr) {
     oat_file_manager_->WaitForWorkersToBeCreated();
@@ -402,7 +413,7 @@
   // Shutdown any trace running.
   Trace::Shutdown();
 
-  // Report death. Clients me require a working thread, still, so do it before GC completes and
+  // Report death. Clients may require a working thread, still, so do it before GC completes and
   // all non-daemon threads are done.
   {
     ScopedObjectAccess soa(self);
@@ -417,13 +428,6 @@
   // Make sure to let the GC complete if it is running.
   heap_->WaitForGcToComplete(gc::kGcCauseBackground, self);
   heap_->DeleteThreadPool();
-  if (jit_ != nullptr) {
-    ScopedTrace trace2("Delete jit");
-    VLOG(jit) << "Deleting jit thread pool";
-    // Delete thread pool before the thread list since we don't want to wait forever on the
-    // JIT compiler threads.
-    jit_->DeleteThreadPool();
-  }
   if (oat_file_manager_ != nullptr) {
     oat_file_manager_->DeleteThreadPool();
   }
@@ -432,9 +436,14 @@
 
   // Make sure our internal threads are dead before we start tearing down things they're using.
   GetRuntimeCallbacks()->StopDebugger();
+  // Deletion ordering is tricky. Null out everything we've deleted.
   delete signal_catcher_;
+  signal_catcher_ = nullptr;
 
   // Make sure all other non-daemon threads have terminated, and all daemon threads are suspended.
+  // Also wait for daemon threads to quiesce, so that in addition to being "suspended", they
+  // no longer access monitor and thread list data structures. We leak user daemon threads
+  // themselves, since we have no mechanism for shutting them down.
   {
     ScopedTrace trace2("Delete thread list");
     thread_list_->ShutDown();
@@ -451,7 +460,10 @@
   }
 
   // Finally delete the thread list.
+  // Thread_list_ can be accessed by "suspended" threads, e.g. in InflateThinLocked.
+  // We assume that by this point, we've waited long enough for things to quiesce.
   delete thread_list_;
+  thread_list_ = nullptr;
 
   // Delete the JIT after thread list to ensure that there is no remaining threads which could be
   // accessing the instrumentation when we delete it.
@@ -466,11 +478,17 @@
 
   ScopedTrace trace2("Delete state");
   delete monitor_list_;
+  monitor_list_ = nullptr;
   delete monitor_pool_;
+  monitor_pool_ = nullptr;
   delete class_linker_;
+  class_linker_ = nullptr;
   delete heap_;
+  heap_ = nullptr;
   delete intern_table_;
+  intern_table_ = nullptr;
   delete oat_file_manager_;
+  oat_file_manager_ = nullptr;
   Thread::Shutdown();
   QuasiAtomic::Shutdown();
   verifier::ClassVerifier::Shutdown();
@@ -492,8 +510,6 @@
   // instance. We rely on a small initialization order issue in Runtime::Start() that requires
   // elements of WellKnownClasses to be null, see b/65500943.
   WellKnownClasses::Clear();
-
-  JniShutdownNativeCallerCheck();
 }
 
 struct AbortState {
@@ -519,7 +535,6 @@
 
     if (self == nullptr) {
       os << "(Aborting thread was not attached to runtime!)\n";
-      DumpKernelStack(os, GetTid(), "  kernel: ", false);
       DumpNativeStack(os, GetTid(), nullptr, "  native: ", nullptr);
     } else {
       os << "Aborting thread:\n";
@@ -609,6 +624,16 @@
 #endif
   }
 
+  // May be coming from an unattached thread.
+  if (Thread::Current() == nullptr) {
+    Runtime* current = Runtime::Current();
+    if (current != nullptr && current->IsStarted() && !current->IsShuttingDown(nullptr)) {
+      // We do not flag this to the unexpected-signal handler so that that may dump the stack.
+      abort();
+      UNREACHABLE();
+    }
+  }
+
   {
     // Ensure that we don't have multiple threads trying to abort at once,
     // which would result in significantly worse diagnostics.
@@ -638,6 +663,8 @@
     LOG(FATAL_WITHOUT_ABORT) << msg;
   }
 
+  FlagRuntimeAbort();
+
   // Call the abort hook if we have one.
   if (Runtime::Current() != nullptr && Runtime::Current()->abort_ != nullptr) {
     LOG(FATAL_WITHOUT_ABORT) << "Calling abort hook...";
@@ -655,12 +682,15 @@
     GetJit()->PreZygoteFork();
   }
   heap_->PreZygoteFork();
+  PreZygoteForkNativeBridge();
 }
 
 void Runtime::PostZygoteFork() {
   if (GetJit() != nullptr) {
     GetJit()->PostZygoteFork();
   }
+  // Reset all stats.
+  ResetStats(0xFFFFFFFF);
 }
 
 void Runtime::CallExitHook(jint status) {
@@ -684,6 +714,7 @@
     // from mutators. See b/32167580.
     GetJit()->GetCodeCache()->SweepRootTables(visitor);
   }
+  thread_list_->SweepInterpreterCaches(visitor);
 
   // All other generic system-weak holders.
   for (gc::AbstractSystemWeakHolder* holder : system_weak_holders_) {
@@ -756,7 +787,7 @@
 
   JValue result = InvokeWithJValues(soa,
                                     nullptr,
-                                    jni::EncodeArtMethod(getSystemClassLoader),
+                                    getSystemClassLoader,
                                     nullptr);
   JNIEnv* env = soa.Self()->GetJniEnv();
   ScopedLocalRef<jobject> system_class_loader(env, soa.AddLocalReference<jobject>(result.GetL()));
@@ -784,8 +815,13 @@
   if (!compiler_executable_.empty()) {
     return compiler_executable_;
   }
-  std::string compiler_executable(GetAndroidRoot());
-  compiler_executable += (kIsDebugBuild ? "/bin/dex2oatd" : "/bin/dex2oat");
+  std::string compiler_executable = GetArtBinDir() + "/dex2oat";
+  if (kIsDebugBuild) {
+    compiler_executable += 'd';
+  }
+  if (kIsTargetBuild) {
+    compiler_executable += Is64BitInstructionSet(kRuntimeISA) ? "64" : "32";
+  }
   return compiler_executable;
 }
 
@@ -815,7 +851,9 @@
   // Only 64-bit as prctl() may fail in 32 bit userspace on a 64-bit kernel.
 #if defined(__linux__) && !defined(ART_TARGET_ANDROID) && defined(__x86_64__)
   if (kIsDebugBuild) {
-    CHECK_EQ(prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY), 0);
+    if (prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY) != 0) {
+      PLOG(WARNING) << "Failed setting PR_SET_PTRACER to PR_SET_PTRACER_ANY";
+    }
   }
 #endif
 
@@ -828,13 +866,15 @@
 
   if (!IsImageDex2OatEnabled() || !GetHeap()->HasBootImageSpace()) {
     ScopedObjectAccess soa(self);
-    StackHandleScope<2> hs(soa.Self());
+    StackHandleScope<3> hs(soa.Self());
 
     ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots = GetClassLinker()->GetClassRoots();
     auto class_class(hs.NewHandle<mirror::Class>(GetClassRoot<mirror::Class>(class_roots)));
+    auto string_class(hs.NewHandle<mirror::Class>(GetClassRoot<mirror::String>(class_roots)));
     auto field_class(hs.NewHandle<mirror::Class>(GetClassRoot<mirror::Field>(class_roots)));
 
     class_linker_->EnsureInitialized(soa.Self(), class_class, true, true);
+    class_linker_->EnsureInitialized(soa.Self(), string_class, true, true);
     self->AssertNoPendingException();
     // Field class is needed for register_java_net_InetAddress in libcore, b/28153851.
     class_linker_->EnsureInitialized(soa.Self(), field_class, true, true);
@@ -853,6 +893,10 @@
   // needs the SignaturePolymorphic annotation class which is initialized in WellKnownClasses::Init.
   InitializeIntrinsics();
 
+  // InitializeCorePlatformApiPrivateFields() needs to be called after well known class
+  // initializtion in InitNativeMethods().
+  art::hiddenapi::InitializeCorePlatformApiPrivateFields();
+
   // Initialize well known thread group values that may be accessed threads while attaching.
   InitThreadGroups(self);
 
@@ -893,6 +937,7 @@
         : NativeBridgeAction::kUnload;
     InitNonZygoteOrPostFork(self->GetJniEnv(),
                             /* is_system_server= */ false,
+                            /* is_child_zygote= */ false,
                             action,
                             GetInstructionSetString(kRuntimeISA));
   }
@@ -957,35 +1002,49 @@
 void Runtime::InitNonZygoteOrPostFork(
     JNIEnv* env,
     bool is_system_server,
+    // This is true when we are initializing a child-zygote. It requires
+    // native bridge initialization to be able to run guest native code in
+    // doPreload().
+    bool is_child_zygote,
     NativeBridgeAction action,
     const char* isa,
     bool profile_system_server) {
-  is_zygote_ = false;
-
   if (is_native_bridge_loaded_) {
     switch (action) {
       case NativeBridgeAction::kUnload:
         UnloadNativeBridge();
         is_native_bridge_loaded_ = false;
         break;
-
       case NativeBridgeAction::kInitialize:
         InitializeNativeBridge(env, isa);
         break;
     }
   }
 
-  if (is_system_server) {
-    jit_options_->SetSaveProfilingInfo(profile_system_server);
-    if (profile_system_server) {
-      jit_options_->SetWaitForJitNotificationsToSaveProfile(false);
-      VLOG(profiler) << "Enabling system server profiles";
-    }
+  if (is_child_zygote) {
+    // If creating a child-zygote we only initialize native bridge. The rest of
+    // runtime post-fork logic would spin up threads for Binder and JDWP.
+    // Instead, the Java side of the child process will call a static main in a
+    // class specified by the parent.
+    return;
+  }
+
+  DCHECK(!IsZygote());
+
+  if (is_system_server && profile_system_server) {
+    // Set the system server package name to "android".
+    // This is used to tell the difference between samples provided by system server
+    // and samples generated by other apps when processing boot image profiles.
+    SetProcessPackageName("android");
+    jit_options_->SetWaitForJitNotificationsToSaveProfile(false);
+    VLOG(profiler) << "Enabling system server profiles";
   }
 
   // Create the thread pools.
   heap_->CreateThreadPool();
-  {
+  // Avoid creating the runtime thread pool for system server since it will not be used and would
+  // waste memory.
+  if (!is_system_server) {
     ScopedTrace timing("CreateThreadPool");
     constexpr size_t kStackSize = 64 * KB;
     constexpr size_t kMaxRuntimeWorkers = 4u;
@@ -1003,10 +1062,27 @@
 
   StartSignalCatcher();
 
+  ScopedObjectAccess soa(Thread::Current());
+  if (IsPerfettoHprofEnabled() &&
+      (Dbg::IsJdwpAllowed() || IsProfileableFromShell() || IsJavaDebuggable() ||
+       Runtime::Current()->IsSystemServer())) {
+    std::string err;
+    ScopedTrace tr("perfetto_hprof init.");
+    ScopedThreadSuspension sts(Thread::Current(), ThreadState::kNative);
+    if (!EnsurePerfettoPlugin(&err)) {
+      LOG(WARNING) << "Failed to load perfetto_hprof: " << err;
+    }
+  }
+  if (LIKELY(automatically_set_jni_ids_indirection_) && CanSetJniIdType()) {
+    if (IsJavaDebuggable()) {
+      SetJniIdType(JniIdType::kIndices);
+    } else {
+      SetJniIdType(JniIdType::kPointer);
+    }
+  }
   // Start the JDWP thread. If the command-line debugger flags specified "suspend=y",
   // this will pause the runtime (in the internal debugger implementation), so we probably want
   // this to come last.
-  ScopedObjectAccess soa(Thread::Current());
   GetRuntimeCallbacks()->StartDebugger();
 }
 
@@ -1057,13 +1133,6 @@
       continue;
     }
     bool verify = Runtime::Current()->IsVerificationEnabled();
-    // In the case we're using the apex boot image, we don't have support yet
-    // on reading vdex files of boot classpath. So just assume all boot classpath
-    // dex files have been verified (this should always be the case as the default boot
-    // image has been generated at build time).
-    if (Runtime::Current()->IsUsingApexBootImageLocation() && !kIsDebugBuild) {
-      verify = false;
-    }
     if (!dex_file_loader.Open(dex_filename,
                               dex_location,
                               verify,
@@ -1077,7 +1146,7 @@
   return failure_count;
 }
 
-void Runtime::SetSentinel(mirror::Object* sentinel) {
+void Runtime::SetSentinel(ObjPtr<mirror::Object> sentinel) {
   CHECK(sentinel_.Read() == nullptr);
   CHECK(sentinel != nullptr);
   CHECK(!heap_->IsMovableObject(sentinel));
@@ -1101,7 +1170,7 @@
   CHECK(klass != nullptr);
   gc::AllocatorType allocator_type = runtime->GetHeap()->GetCurrentAllocator();
   ObjPtr<mirror::Throwable> exception_object = ObjPtr<mirror::Throwable>::DownCast(
-      klass->Alloc</* kIsInstrumented= */ true>(self, allocator_type));
+      klass->Alloc(self, allocator_type));
   CHECK(exception_object != nullptr);
   *exception = GcRoot<mirror::Throwable>(exception_object);
   // Initialize the "detailMessage" field.
@@ -1131,6 +1200,9 @@
 
   MemMap::Init();
 
+  verifier_missing_kthrow_fatal_ = runtime_options.GetOrDefault(Opt::VerifierMissingKThrowFatal);
+  perfetto_hprof_enabled_ = runtime_options.GetOrDefault(Opt::PerfettoHprof);
+
   // Try to reserve a dedicated fault page. This is allocated for clobbered registers and sentinels.
   // If we cannot reserve it, log a warning.
   // Note: We allocate this first to have a good chance of grabbing the page. The address (0xebad..)
@@ -1162,15 +1234,13 @@
 
   oat_file_manager_ = new OatFileManager;
 
+  jni_id_manager_.reset(new jni::JniIdManager);
+
   Thread::SetSensitiveThreadHook(runtime_options.GetOrDefault(Opt::HookIsSensitiveThread));
   Monitor::Init(runtime_options.GetOrDefault(Opt::LockProfThreshold),
                 runtime_options.GetOrDefault(Opt::StackDumpLockProfThreshold));
 
   image_location_ = runtime_options.GetOrDefault(Opt::Image);
-  {
-    std::string error_msg;
-    is_using_apex_boot_image_location_ = (image_location_ == kApexBootImageLocation);
-  }
 
   SetInstructionSet(runtime_options.GetOrDefault(Opt::ImageInstructionSet));
   boot_class_path_ = runtime_options.ReleaseOrDefault(Opt::BootClassPath);
@@ -1192,8 +1262,6 @@
                                                     system_oat_location,
                                                     /*executable=*/ false,
                                                     /*low_4gb=*/ false,
-                                                    /*abs_dex_location=*/ nullptr,
-                                                    /*reservation=*/ nullptr,
                                                     &error_msg));
     if (oat_file == nullptr) {
       LOG(ERROR) << "Could not open boot oat file for extracting boot class path: " << error_msg;
@@ -1216,6 +1284,7 @@
   compiler_callbacks_ = runtime_options.GetOrDefault(Opt::CompilerCallbacksPtr);
   must_relocate_ = runtime_options.GetOrDefault(Opt::Relocate);
   is_zygote_ = runtime_options.Exists(Opt::Zygote);
+  is_primary_zygote_ = runtime_options.Exists(Opt::PrimaryZygote);
   is_explicit_gc_disabled_ = runtime_options.Exists(Opt::DisableExplicitGC);
   image_dex2oat_enabled_ = runtime_options.GetOrDefault(Opt::ImageDex2Oat);
   dump_native_stack_on_sig_quit_ = runtime_options.GetOrDefault(Opt::DumpNativeStackOnSigQuit);
@@ -1282,6 +1351,10 @@
   is_low_memory_mode_ = runtime_options.Exists(Opt::LowMemoryMode);
   madvise_random_access_ = runtime_options.GetOrDefault(Opt::MadviseRandomAccess);
 
+  jni_ids_indirection_ = runtime_options.GetOrDefault(Opt::OpaqueJniIds);
+  automatically_set_jni_ids_indirection_ =
+      runtime_options.GetOrDefault(Opt::AutoPromoteOpaqueJniIds);
+
   plugins_ = runtime_options.ReleaseOrDefault(Opt::Plugins);
   agent_specs_ = runtime_options.ReleaseOrDefault(Opt::AgentPath);
   // TODO Add back in -agentlib
@@ -1311,6 +1384,7 @@
                        runtime_options.GetOrDefault(Opt::HeapMaxFree),
                        runtime_options.GetOrDefault(Opt::HeapTargetUtilization),
                        foreground_heap_growth_multiplier,
+                       runtime_options.GetOrDefault(Opt::StopForNativeAllocs),
                        runtime_options.GetOrDefault(Opt::MemoryMaximumSize),
                        runtime_options.GetOrDefault(Opt::NonMovingSpaceCapacity),
                        GetBootClassPath(),
@@ -1360,30 +1434,16 @@
       VLOG(jdwp) << "Disabling all JDWP support.";
       if (!jdwp_options_.empty()) {
         bool has_transport = jdwp_options_.find("transport") != std::string::npos;
-        const char* transport_internal = !has_transport ? "transport=dt_android_adb," : "";
         std::string adb_connection_args =
             std::string("  -XjdwpProvider:adbconnection -XjdwpOptions:") + jdwp_options_;
         LOG(WARNING) << "Jdwp options given when jdwp is disabled! You probably want to enable "
                      << "jdwp with one of:" << std::endl
-                     << "  -XjdwpProvider:internal "
-                     << "-XjdwpOptions:" << transport_internal << jdwp_options_ << std::endl
                      << "  -Xplugin:libopenjdkjvmti" << (kIsDebugBuild ? "d" : "") << ".so "
                      << "-agentpath:libjdwp.so=" << jdwp_options_ << std::endl
                      << (has_transport ? "" : adb_connection_args);
       }
       break;
     }
-    case JdwpProvider::kInternal: {
-      if (runtime_options.Exists(Opt::JdwpOptions)) {
-        JDWP::JdwpOptions ops;
-        if (!JDWP::ParseJdwpOptions(runtime_options.GetOrDefault(Opt::JdwpOptions), &ops)) {
-          LOG(ERROR) << "failed to parse jdwp options!";
-          return false;
-        }
-        Dbg::ConfigureJdwp(ops);
-      }
-      break;
-    }
     case JdwpProvider::kAdbConnection: {
       constexpr const char* plugin_name = kIsDebugBuild ? "libadbconnectiond.so"
                                                         : "libadbconnection.so";
@@ -1395,7 +1455,6 @@
     }
   }
   callbacks_->AddThreadLifecycleCallback(Dbg::GetThreadLifecycleCallback());
-  callbacks_->AddClassLoadCallback(Dbg::GetClassLoadCallback());
 
   jit_options_.reset(jit::JitOptions::CreateFromRuntimeArguments(runtime_options));
   if (IsAotCompiler()) {
@@ -1434,8 +1493,6 @@
     case InstructionSet::kX86:
     case InstructionSet::kArm64:
     case InstructionSet::kX86_64:
-    case InstructionSet::kMips:
-    case InstructionSet::kMips64:
       implicit_null_checks_ = true;
       // Historical note: Installing stack protection was not playing well with Valgrind.
       implicit_so_checks_ = true;
@@ -1544,9 +1601,9 @@
       }
       class_linker_->AddExtraBootDexFiles(self, std::move(extra_boot_class_path));
     }
-    if (IsJavaDebuggable()) {
-      // Now that we have loaded the boot image, deoptimize its methods if we are running
-      // debuggable, as the code may have been compiled non-debuggable.
+    if (IsJavaDebuggable() || jit_options_->GetProfileSaverOptions().GetProfileBootClassPath()) {
+      // Deoptimize the boot image if debuggable  as the code may have been compiled non-debuggable.
+      // Also deoptimize if we are profiling the boot class path.
       ScopedThreadSuspension sts(self, ThreadState::kNative);
       ScopedSuspendAll ssa(__FUNCTION__);
       DeoptimizeBootImage();
@@ -1577,7 +1634,7 @@
 
   CHECK(class_linker_ != nullptr);
 
-  verifier::ClassVerifier::Init();
+  verifier::ClassVerifier::Init(class_linker_);
 
   if (runtime_options.Exists(Opt::MethodTrace)) {
     trace_config_.reset(new TraceConfig());
@@ -1594,20 +1651,23 @@
 
   if (GetHeap()->HasBootImageSpace()) {
     const ImageHeader& image_header = GetHeap()->GetBootImageSpaces()[0]->GetImageHeader();
+    ObjPtr<mirror::ObjectArray<mirror::Object>> boot_image_live_objects =
+        ObjPtr<mirror::ObjectArray<mirror::Object>>::DownCast(
+            image_header.GetImageRoot(ImageHeader::kBootImageLiveObjects));
     pre_allocated_OutOfMemoryError_when_throwing_exception_ = GcRoot<mirror::Throwable>(
-        image_header.GetImageRoot(ImageHeader::kOomeWhenThrowingException)->AsThrowable());
+        boot_image_live_objects->Get(ImageHeader::kOomeWhenThrowingException)->AsThrowable());
     DCHECK(pre_allocated_OutOfMemoryError_when_throwing_exception_.Read()->GetClass()
                ->DescriptorEquals("Ljava/lang/OutOfMemoryError;"));
     pre_allocated_OutOfMemoryError_when_throwing_oome_ = GcRoot<mirror::Throwable>(
-        image_header.GetImageRoot(ImageHeader::kOomeWhenThrowingOome)->AsThrowable());
+        boot_image_live_objects->Get(ImageHeader::kOomeWhenThrowingOome)->AsThrowable());
     DCHECK(pre_allocated_OutOfMemoryError_when_throwing_oome_.Read()->GetClass()
                ->DescriptorEquals("Ljava/lang/OutOfMemoryError;"));
     pre_allocated_OutOfMemoryError_when_handling_stack_overflow_ = GcRoot<mirror::Throwable>(
-        image_header.GetImageRoot(ImageHeader::kOomeWhenHandlingStackOverflow)->AsThrowable());
+        boot_image_live_objects->Get(ImageHeader::kOomeWhenHandlingStackOverflow)->AsThrowable());
     DCHECK(pre_allocated_OutOfMemoryError_when_handling_stack_overflow_.Read()->GetClass()
                ->DescriptorEquals("Ljava/lang/OutOfMemoryError;"));
     pre_allocated_NoClassDefFoundError_ = GcRoot<mirror::Throwable>(
-        image_header.GetImageRoot(ImageHeader::kNoClassDefFoundError)->AsThrowable());
+        boot_image_live_objects->Get(ImageHeader::kNoClassDefFoundError)->AsThrowable());
     DCHECK(pre_allocated_NoClassDefFoundError_.Read()->GetClass()
                ->DescriptorEquals("Ljava/lang/NoClassDefFoundError;"));
   } else {
@@ -1645,6 +1705,9 @@
                                     "no stack trace available");
   }
 
+  // Class-roots are setup, we can now finish initializing the JniIdManager.
+  GetJniIdManager()->Init(self);
+
   // Runtime initialization is largely done now.
   // We load plugins first since that can modify the runtime state slightly.
   // Load all plugins
@@ -1725,29 +1788,48 @@
     callbacks_->NextRuntimePhase(RuntimePhaseCallback::RuntimePhase::kInitialAgents);
   }
 
+  if (IsZygote() && IsPerfettoHprofEnabled()) {
+    constexpr const char* plugin_name = kIsDebugBuild ?
+    "libperfetto_hprofd.so" : "libperfetto_hprof.so";
+    // Load eagerly in Zygote to improve app startup times. This will make
+    // subsequent dlopens for the library no-ops.
+    dlopen(plugin_name, RTLD_NOW | RTLD_LOCAL);
+  }
+
   VLOG(startup) << "Runtime::Init exiting";
 
   // Set OnlyUseSystemOatFiles only after boot classpath has been set up.
-  if (is_zygote_ || runtime_options.Exists(Opt::OnlyUseSystemOatFiles)) {
-    oat_file_manager_->SetOnlyUseSystemOatFiles(/*enforce=*/ true,
-                                                /*assert_no_files_loaded=*/ true);
+  if (runtime_options.Exists(Opt::OnlyUseSystemOatFiles)) {
+    oat_file_manager_->SetOnlyUseSystemOatFiles();
   }
 
   return true;
 }
 
-static bool EnsureJvmtiPlugin(Runtime* runtime,
-                              std::vector<Plugin>* plugins,
-                              std::string* error_msg) {
-  constexpr const char* plugin_name = kIsDebugBuild ? "libopenjdkjvmtid.so" : "libopenjdkjvmti.so";
-
+bool Runtime::EnsurePluginLoaded(const char* plugin_name, std::string* error_msg) {
   // Is the plugin already loaded?
-  for (const Plugin& p : *plugins) {
+  for (const Plugin& p : plugins_) {
     if (p.GetLibrary() == plugin_name) {
       return true;
     }
   }
+  Plugin new_plugin = Plugin::Create(plugin_name);
 
+  if (!new_plugin.Load(error_msg)) {
+    return false;
+  }
+  plugins_.push_back(std::move(new_plugin));
+  return true;
+}
+
+bool Runtime::EnsurePerfettoPlugin(std::string* error_msg) {
+  constexpr const char* plugin_name = kIsDebugBuild ?
+    "libperfetto_hprofd.so" : "libperfetto_hprof.so";
+  return EnsurePluginLoaded(plugin_name, error_msg);
+}
+
+static bool EnsureJvmtiPlugin(Runtime* runtime,
+                              std::string* error_msg) {
   // TODO Rename Dbg::IsJdwpAllowed is IsDebuggingAllowed.
   DCHECK(Dbg::IsJdwpAllowed() || !runtime->IsJavaDebuggable())
       << "Being debuggable requires that jdwp (i.e. debugging) is allowed.";
@@ -1758,14 +1840,8 @@
     return false;
   }
 
-  Plugin new_plugin = Plugin::Create(plugin_name);
-
-  if (!new_plugin.Load(error_msg)) {
-    return false;
-  }
-
-  plugins->push_back(std::move(new_plugin));
-  return true;
+  constexpr const char* plugin_name = kIsDebugBuild ? "libopenjdkjvmtid.so" : "libopenjdkjvmti.so";
+  return runtime->EnsurePluginLoaded(plugin_name, error_msg);
 }
 
 // Attach a new agent and add it to the list of runtime agents
@@ -1776,7 +1852,7 @@
 //
 void Runtime::AttachAgent(JNIEnv* env, const std::string& agent_arg, jobject class_loader) {
   std::string error_msg;
-  if (!EnsureJvmtiPlugin(this, &plugins_, &error_msg)) {
+  if (!EnsureJvmtiPlugin(this, &error_msg)) {
     LOG(WARNING) << "Could not load plugin: " << error_msg;
     ScopedObjectAccess soa(Thread::Current());
     ThrowIOException("%s", error_msg.c_str());
@@ -1813,9 +1889,20 @@
   // methods to be loaded first.
   WellKnownClasses::Init(env);
 
-  // Then set up libjavacore / libopenjdk, which are just a regular JNI libraries with
-  // a regular JNI_OnLoad. Most JNI libraries can just use System.loadLibrary, but
-  // libcore can't because it's the library that implements System.loadLibrary!
+  // Then set up libjavacore / libopenjdk / libicu_jni ,which are just
+  // a regular JNI libraries with a regular JNI_OnLoad. Most JNI libraries can
+  // just use System.loadLibrary, but libcore can't because it's the library
+  // that implements System.loadLibrary!
+
+  // libicu_jni has to be initialized before libopenjdk{d} due to runtime dependency from
+  // libopenjdk{d} to Icu4cMetadata native methods in libicu_jni. See http://b/143888405
+  {
+    std::string error_msg;
+    if (!java_vm_->LoadNativeLibrary(
+          env, "libicu_jni.so", nullptr, WellKnownClasses::java_lang_Object, &error_msg)) {
+      LOG(FATAL) << "LoadNativeLibrary failed for \"libicu_jni.so\": " << error_msg;
+    }
+  }
   {
     std::string error_msg;
     if (!java_vm_->LoadNativeLibrary(
@@ -1837,10 +1924,6 @@
   // Initialize well known classes that may invoke runtime native methods.
   WellKnownClasses::LateInit(env);
 
-  // Having loaded native libraries for Managed Core library, enable field and
-  // method resolution checks via JNI from native code.
-  JniInitializeNativeCallerCheck();
-
   VLOG(startup) << "Runtime::InitNativeMethods exiting";
 }
 
@@ -1880,6 +1963,7 @@
 
 void Runtime::RegisterRuntimeNativeMethods(JNIEnv* env) {
   register_dalvik_system_DexFile(env);
+  register_dalvik_system_BaseDexClassLoader(env);
   register_dalvik_system_VMDebug(env);
   register_dalvik_system_VMRuntime(env);
   register_dalvik_system_VMStack(env);
@@ -1986,7 +2070,7 @@
   Thread::Current()->GetStats()->Clear(kinds >> 16);
 }
 
-int32_t Runtime::GetStat(int kind) {
+uint64_t Runtime::GetStat(int kind) {
   RuntimeStats* stats;
   if (kind < (1<<16)) {
     stats = GetStats();
@@ -2008,8 +2092,7 @@
   case KIND_CLASS_INIT_COUNT:
     return stats->class_init_count;
   case KIND_CLASS_INIT_TIME:
-    // Convert ns to us, reduce to 32 bits.
-    return static_cast<int>(stats->class_init_time_ns / 1000);
+    return stats->class_init_time_ns;
   case KIND_EXT_ALLOCATED_OBJECTS:
   case KIND_EXT_ALLOCATED_BYTES:
   case KIND_EXT_FREED_OBJECTS:
@@ -2112,12 +2195,12 @@
 void Runtime::VisitConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags) {
   intern_table_->VisitRoots(visitor, flags);
   class_linker_->VisitRoots(visitor, flags);
+  jni_id_manager_->VisitRoots(visitor);
   heap_->VisitAllocationRecords(visitor);
   if ((flags & kVisitRootFlagNewRoots) == 0) {
     // Guaranteed to have no new roots in the constant roots.
     VisitConstantRoots(visitor);
   }
-  Dbg::VisitRoots(visitor);
 }
 
 void Runtime::VisitTransactionRoots(RootVisitor* visitor) {
@@ -2155,6 +2238,13 @@
   VisitConcurrentRoots(visitor, flags);
 }
 
+void Runtime::VisitReflectiveTargets(ReflectiveValueVisitor *visitor) {
+  thread_list_->VisitReflectiveTargets(visitor);
+  heap_->VisitReflectiveTargets(visitor);
+  jni_id_manager_->VisitReflectiveTargets(visitor);
+  callbacks_->VisitReflectiveTargets(visitor);
+}
+
 void Runtime::VisitImageRoots(RootVisitor* visitor) {
   for (auto* space : GetHeap()->GetContinuousSpaces()) {
     if (space->IsImageSpace()) {
@@ -2173,7 +2263,8 @@
   }
 }
 
-static ArtMethod* CreateRuntimeMethod(ClassLinker* class_linker, LinearAlloc* linear_alloc) {
+static ArtMethod* CreateRuntimeMethod(ClassLinker* class_linker, LinearAlloc* linear_alloc)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
   const PointerSize image_pointer_size = class_linker->GetImagePointerSize();
   const size_t method_alignment = ArtMethod::Alignment(image_pointer_size);
   const size_t method_size = ArtMethod::Size(image_pointer_size);
@@ -2290,8 +2381,6 @@
       break;
     case InstructionSet::kArm:
     case InstructionSet::kArm64:
-    case InstructionSet::kMips:
-    case InstructionSet::kMips64:
     case InstructionSet::kX86:
     case InstructionSet::kX86_64:
       break;
@@ -2332,7 +2421,7 @@
     return;
   }
   if (!OS::FileExists(profile_output_filename.c_str(), /*check_file_type=*/ false)) {
-    LOG(WARNING) << "JIT profile information will not be recorded: profile file does not exits.";
+    LOG(WARNING) << "JIT profile information will not be recorded: profile file does not exist.";
     return;
   }
   if (code_paths.empty()) {
@@ -2348,14 +2437,14 @@
   return !preinitialization_transactions_.empty() && !GetTransaction()->IsRollingBack();
 }
 
-void Runtime::EnterTransactionMode() {
-  DCHECK(IsAotCompiler());
-  DCHECK(!IsActiveTransaction());
-  preinitialization_transactions_.push_back(std::make_unique<Transaction>());
-}
-
 void Runtime::EnterTransactionMode(bool strict, mirror::Class* root) {
   DCHECK(IsAotCompiler());
+  if (preinitialization_transactions_.empty()) {  // Top-level transaction?
+    // Make initialized classes visibly initialized now. If that happened during the transaction
+    // and then the transaction was aborted, we would roll back the status update but not the
+    // ClassLinker's bookkeeping structures, so these classes would never be visibly initialized.
+    GetClassLinker()->MakeInitializedClassesVisiblyInitialized(Thread::Current(), /*wait=*/ true);
+  }
   preinitialization_transactions_.push_back(std::make_unique<Transaction>(strict, root));
 }
 
@@ -2650,14 +2739,20 @@
 }
 
 bool Runtime::IsAsyncDeoptimizeable(uintptr_t code) const {
+  if (OatQuickMethodHeader::NterpMethodHeader != nullptr) {
+    if (OatQuickMethodHeader::NterpMethodHeader->Contains(code)) {
+      return true;
+    }
+  }
   // We only support async deopt (ie the compiled code is not explicitly asking for
   // deopt, but something else like the debugger) in debuggable JIT code.
   // We could look at the oat file where `code` is being defined,
   // and check whether it's been compiled debuggable, but we decided to
   // only rely on the JIT for debuggable apps.
-  return IsJavaDebuggable() &&
-      GetJit() != nullptr &&
-      GetJit()->GetCodeCache()->ContainsPc(reinterpret_cast<const void*>(code));
+  // The JIT-zygote is not debuggable so we need to be sure to exclude code from the non-private
+  // region as well.
+  return IsJavaDebuggable() && GetJit() != nullptr &&
+         GetJit()->GetCodeCache()->PrivateRegionContainsPc(reinterpret_cast<const void*>(code));
 }
 
 LinearAlloc* Runtime::CreateLinearAlloc() {
@@ -2749,6 +2844,20 @@
           !m.IsProxyMethod()) {
         instrumentation_->UpdateMethodsCodeForJavaDebuggable(&m, GetQuickToInterpreterBridge());
       }
+
+      if (Runtime::Current()->GetJit() != nullptr &&
+          Runtime::Current()->GetJit()->GetCodeCache()->IsInZygoteExecSpace(code) &&
+          !m.IsNative()) {
+        DCHECK(!m.IsProxyMethod());
+        instrumentation_->UpdateMethodsCodeForJavaDebuggable(&m, GetQuickToInterpreterBridge());
+      }
+
+      if (m.IsPreCompiled()) {
+        // Precompilation is incompatible with debuggable, so clear the flag
+        // and update the entrypoint in case it has been compiled.
+        m.ClearPreCompiled();
+        instrumentation_->UpdateMethodsCodeForJavaDebuggable(&m, GetQuickToInterpreterBridge());
+      }
     }
     return true;
   }
@@ -2771,10 +2880,31 @@
     GetClassLinker()->VisitClasses(&visitor);
     jit::Jit* jit = GetJit();
     if (jit != nullptr) {
-      // Code JITted by the zygote is not compiled debuggable.
-      jit->GetCodeCache()->ClearEntryPointsInZygoteExecSpace();
+      // Code previously compiled may not be compiled debuggable.
+      jit->GetCodeCache()->TransitionToDebuggable();
     }
   }
+  // Also de-quicken all -quick opcodes. We do this for both BCP and non-bcp so if we are swapping
+  // debuggable during startup by a plugin (eg JVMTI) even non-BCP code has its vdex files deopted.
+  std::unordered_set<const VdexFile*> vdexs;
+  GetClassLinker()->VisitKnownDexFiles(Thread::Current(), [&](const art::DexFile* df) {
+    const OatDexFile* odf = df->GetOatDexFile();
+    if (odf == nullptr) {
+      return;
+    }
+    const OatFile* of = odf->GetOatFile();
+    if (of == nullptr || of->IsDebuggable()) {
+      // no Oat or already debuggable so no -quick.
+      return;
+    }
+    vdexs.insert(of->GetVdexFile());
+  });
+  LOG(INFO) << "Unquickening " << vdexs.size() << " vdex files!";
+  for (const VdexFile* vf : vdexs) {
+    vf->AllowWriting(true);
+    vf->UnquickenInPlace(/*decompile_return_instruction=*/true);
+    vf->AllowWriting(false);
+  }
 }
 
 Runtime::ScopedThreadPoolUsage::ScopedThreadPoolUsage()
@@ -2817,6 +2947,58 @@
   }
 }
 
+void Runtime::ResetStartupCompleted() {
+  startup_completed_.store(false, std::memory_order_seq_cst);
+}
+
+class Runtime::NotifyStartupCompletedTask : public gc::HeapTask {
+ public:
+  NotifyStartupCompletedTask() : gc::HeapTask(/*target_run_time=*/ NanoTime()) {}
+
+  void Run(Thread* self) override {
+    VLOG(startup) << "NotifyStartupCompletedTask running";
+    Runtime* const runtime = Runtime::Current();
+    {
+      ScopedTrace trace("Releasing app image spaces metadata");
+      ScopedObjectAccess soa(Thread::Current());
+      for (gc::space::ContinuousSpace* space : runtime->GetHeap()->GetContinuousSpaces()) {
+        if (space->IsImageSpace()) {
+          gc::space::ImageSpace* image_space = space->AsImageSpace();
+          if (image_space->GetImageHeader().IsAppImage()) {
+            image_space->DisablePreResolvedStrings();
+          }
+        }
+      }
+      // Request empty checkpoints to make sure no threads are accessing the image space metadata
+      // section when we madvise it. Use GC exclusion to prevent deadlocks that may happen if
+      // multiple threads are attempting to run empty checkpoints at the same time.
+      {
+        // Avoid using ScopedGCCriticalSection since that does not allow thread suspension. This is
+        // not allowed to prevent allocations, but it's still safe to suspend temporarily for the
+        // checkpoint.
+        gc::ScopedInterruptibleGCCriticalSection sigcs(self,
+                                                       gc::kGcCauseRunEmptyCheckpoint,
+                                                       gc::kCollectorTypeCriticalSection);
+        runtime->GetThreadList()->RunEmptyCheckpoint();
+      }
+      for (gc::space::ContinuousSpace* space : runtime->GetHeap()->GetContinuousSpaces()) {
+        if (space->IsImageSpace()) {
+          gc::space::ImageSpace* image_space = space->AsImageSpace();
+          if (image_space->GetImageHeader().IsAppImage()) {
+            image_space->ReleaseMetadata();
+          }
+        }
+      }
+    }
+
+    {
+      // Delete the thread pool used for app image loading since startup is assumed to be completed.
+      ScopedTrace trace2("Delete thread pool");
+      runtime->DeleteThreadPool();
+    }
+  }
+};
+
 void Runtime::NotifyStartupCompleted() {
   bool expected = false;
   if (!startup_completed_.compare_exchange_strong(expected, true, std::memory_order_seq_cst)) {
@@ -2824,66 +3006,63 @@
     // once externally. For this reason there are no asserts.
     return;
   }
-  VLOG(startup) << "Startup completed notified";
 
-  {
-    ScopedTrace trace("Releasing app image spaces metadata");
-    ScopedObjectAccess soa(Thread::Current());
-    for (gc::space::ContinuousSpace* space : GetHeap()->GetContinuousSpaces()) {
-      if (space->IsImageSpace()) {
-        gc::space::ImageSpace* image_space = space->AsImageSpace();
-        if (image_space->GetImageHeader().IsAppImage()) {
-          image_space->DisablePreResolvedStrings();
-        }
-      }
-    }
-    // Request empty checkpoint to make sure no threads are accessing the section when we madvise
-    // it. Avoid using RunEmptyCheckpoint since only one concurrent caller is supported. We could
-    // add a GC critical section here but that may cause significant jank if the GC is running.
-    {
-      class EmptyClosure : public Closure {
-       public:
-        explicit EmptyClosure(Barrier* barrier) : barrier_(barrier) {}
-        void Run(Thread* thread ATTRIBUTE_UNUSED) override {
-          barrier_->Pass(Thread::Current());
-        }
-
-       private:
-        Barrier* const barrier_;
-      };
-      Barrier barrier(0);
-      EmptyClosure closure(&barrier);
-      size_t threads_running_checkpoint = GetThreadList()->RunCheckpoint(&closure);
-      // Now that we have run our checkpoint, move to a suspended state and wait
-      // for other threads to run the checkpoint.
-      Thread* self = Thread::Current();
-      ScopedThreadSuspension sts(self, kSuspended);
-      if (threads_running_checkpoint != 0) {
-        barrier.Increment(self, threads_running_checkpoint);
-      }
-    }
-    for (gc::space::ContinuousSpace* space : GetHeap()->GetContinuousSpaces()) {
-      if (space->IsImageSpace()) {
-        gc::space::ImageSpace* image_space = space->AsImageSpace();
-        if (image_space->GetImageHeader().IsAppImage()) {
-          image_space->ReleaseMetadata();
-        }
-      }
-    }
+  VLOG(startup) << "Adding NotifyStartupCompleted task";
+  // Use the heap task processor since we want to be exclusive with the GC and we don't want to
+  // block the caller if the GC is running.
+  if (!GetHeap()->AddHeapTask(new NotifyStartupCompletedTask)) {
+    VLOG(startup) << "Failed to add NotifyStartupCompletedTask";
   }
 
   // Notify the profiler saver that startup is now completed.
   ProfileSaver::NotifyStartupCompleted();
-
-  {
-    // Delete the thread pool used for app image loading startup is completed.
-    ScopedTrace trace2("Delete thread pool");
-    DeleteThreadPool();
-  }
 }
 
 bool Runtime::GetStartupCompleted() const {
   return startup_completed_.load(std::memory_order_seq_cst);
 }
 
+void Runtime::SetSignalHookDebuggable(bool value) {
+  SkipAddSignalHandler(value);
+}
+
+void Runtime::SetJniIdType(JniIdType t) {
+  CHECK(CanSetJniIdType()) << "Not allowed to change id type!";
+  if (t == GetJniIdType()) {
+    return;
+  }
+  jni_ids_indirection_ = t;
+  JNIEnvExt::ResetFunctionTable();
+  WellKnownClasses::HandleJniIdTypeChange(Thread::Current()->GetJniEnv());
+}
+
+bool Runtime::GetOatFilesExecutable() const {
+  return !IsAotCompiler() && !(IsSystemServer() && jit_options_->GetSaveProfilingInfo());
+}
+
+void Runtime::ProcessWeakClass(GcRoot<mirror::Class>* root_ptr,
+                               IsMarkedVisitor* visitor,
+                               mirror::Class* update) {
+    // This does not need a read barrier because this is called by GC.
+  mirror::Class* cls = root_ptr->Read<kWithoutReadBarrier>();
+  if (cls != nullptr && cls != GetWeakClassSentinel()) {
+    DCHECK((cls->IsClass<kDefaultVerifyFlags>()));
+    // Look at the classloader of the class to know if it has been unloaded.
+    // This does not need a read barrier because this is called by GC.
+    ObjPtr<mirror::Object> class_loader =
+        cls->GetClassLoader<kDefaultVerifyFlags, kWithoutReadBarrier>();
+    if (class_loader == nullptr || visitor->IsMarked(class_loader.Ptr()) != nullptr) {
+      // The class loader is live, update the entry if the class has moved.
+      mirror::Class* new_cls = down_cast<mirror::Class*>(visitor->IsMarked(cls));
+      // Note that new_object can be null for CMS and newly allocated objects.
+      if (new_cls != nullptr && new_cls != cls) {
+        *root_ptr = GcRoot<mirror::Class>(new_cls);
+      }
+    } else {
+      // The class loader is not live, clear the entry.
+      *root_ptr = GcRoot<mirror::Class>(update);
+    }
+  }
+}
+
 }  // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 6df9e3e..822c0ac 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -27,10 +27,10 @@
 #include <memory>
 #include <vector>
 
-#include "base/file_utils.h"
 #include "base/locks.h"
 #include "base/macros.h"
 #include "base/mem_map.h"
+#include "base/string_view_cpp20.h"
 #include "deoptimization_kind.h"
 #include "dex/dex_file_types.h"
 #include "experimental_flags.h"
@@ -38,10 +38,13 @@
 #include "gc_root.h"
 #include "instrumentation.h"
 #include "jdwp_provider.h"
+#include "jni/jni_id_manager.h"
+#include "jni_id_type.h"
 #include "obj_ptr.h"
 #include "offsets.h"
 #include "process_state.h"
 #include "quick/quick_method_frame_info.h"
+#include "reflective_value_visitor.h"
 #include "runtime_stats.h"
 
 namespace art {
@@ -84,6 +87,7 @@
 enum class CalleeSaveType: uint32_t;
 class ClassLinker;
 class CompilerCallbacks;
+class Dex2oatImageTest;
 class DexFile;
 enum class InstructionSet;
 class InternTable;
@@ -124,6 +128,9 @@
   static bool Create(const RuntimeOptions& raw_options, bool ignore_unrecognized)
       SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
 
+  bool EnsurePluginLoaded(const char* plugin_name, std::string* error_msg);
+  bool EnsurePerfettoPlugin(std::string* error_msg);
+
   // IsAotCompiler for compilers that don't have a running runtime. Only dex2oat currently.
   bool IsAotCompiler() const {
     return !UseJitCompilation() && IsCompiler();
@@ -164,12 +171,25 @@
     return is_zygote_;
   }
 
+  bool IsPrimaryZygote() const {
+    return is_primary_zygote_;
+  }
+
   bool IsSystemServer() const {
     return is_system_server_;
   }
 
-  void SetSystemServer(bool value) {
-    is_system_server_ = value;
+  void SetAsSystemServer() {
+    is_system_server_ = true;
+    is_zygote_ = false;
+    is_primary_zygote_ = false;
+  }
+
+  void SetAsZygoteChild(bool is_system_server, bool is_zygote) {
+    // System server should have been set earlier in SetAsSystemServer.
+    CHECK_EQ(is_system_server_, is_system_server);
+    is_zygote_ = is_zygote;
+    is_primary_zygote_ = false;
   }
 
   bool IsExplicitGcDisabled() const {
@@ -194,10 +214,6 @@
     return image_location_;
   }
 
-  bool IsUsingApexBootImageLocation() const {
-    return is_using_apex_boot_image_location_;
-  }
-
   // Starts a runtime, which may cause threads to be started and code to run.
   bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_);
 
@@ -276,6 +292,10 @@
     return class_linker_;
   }
 
+  jni::JniIdManager* GetJniIdManager() const {
+    return jni_id_manager_.get();
+  }
+
   size_t GetDefaultStackSize() const {
     return default_stack_size_;
   }
@@ -376,6 +396,17 @@
   void SweepSystemWeaks(IsMarkedVisitor* visitor)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Walk all reflective objects and visit their targets as well as any method/fields held by the
+  // runtime threads that are marked as being reflective.
+  void VisitReflectiveTargets(ReflectiveValueVisitor* visitor) REQUIRES(Locks::mutator_lock_);
+  // Helper for visiting reflective targets with lambdas for both field and method reflective
+  // targets.
+  template <typename FieldVis, typename MethodVis>
+  void VisitReflectiveTargets(FieldVis&& fv, MethodVis&& mv) REQUIRES(Locks::mutator_lock_) {
+    FunctionReflectiveValueVisitor frvv(fv, mv);
+    VisitReflectiveTargets(&frvv);
+  }
+
   // Returns a special method that calls into a trampoline for runtime method resolution
   ArtMethod* GetResolutionMethod();
 
@@ -402,7 +433,7 @@
     imt_conflict_method_ = nullptr;
   }
 
-  void FixupConflictTables();
+  void FixupConflictTables() REQUIRES_SHARED(Locks::mutator_lock_);
   void SetImtConflictMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
   void SetImtUnimplementedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -442,7 +473,7 @@
 
   ArtMethod* CreateCalleeSaveMethod() REQUIRES_SHARED(Locks::mutator_lock_);
 
-  int32_t GetStat(int kind);
+  uint64_t GetStat(int kind);
 
   RuntimeStats* GetStats() {
     return &stats_;
@@ -466,6 +497,10 @@
     return jit_.get();
   }
 
+  jit::JitCodeCache* GetJitCodeCache() const {
+    return jit_code_cache_.get();
+  }
+
   // Returns true if JIT compilations are enabled. GetJit() will be not null in this case.
   bool UseJitCompilation() const;
 
@@ -474,6 +509,7 @@
   void InitNonZygoteOrPostFork(
       JNIEnv* env,
       bool is_system_server,
+      bool is_child_zygote,
       NativeBridgeAction action,
       const char* isa,
       bool profile_system_server = false);
@@ -491,7 +527,6 @@
 
   // Transaction support.
   bool IsActiveTransaction() const;
-  void EnterTransactionMode();
   void EnterTransactionMode(bool strict, mirror::Class* root);
   void ExitTransactionMode();
   void RollbackAllTransactions() REQUIRES_SHARED(Locks::mutator_lock_);
@@ -565,6 +600,14 @@
     return core_platform_api_policy_;
   }
 
+  void SetTestApiEnforcementPolicy(hiddenapi::EnforcementPolicy policy) {
+    test_api_policy_ = policy;
+  }
+
+  hiddenapi::EnforcementPolicy GetTestApiEnforcementPolicy() const {
+    return test_api_policy_;
+  }
+
   void SetHiddenApiExemptions(const std::vector<std::string>& exemptions) {
     hidden_api_exemptions_ = exemptions;
   }
@@ -633,6 +676,19 @@
     return target_sdk_version_;
   }
 
+  void SetDisabledCompatChanges(const std::set<uint64_t>& disabled_changes) {
+    disabled_compat_changes_ = disabled_changes;
+  }
+
+  std::set<uint64_t> GetDisabledCompatChanges() const {
+    return disabled_compat_changes_;
+  }
+
+  bool isChangeEnabled(uint64_t change_id) const {
+    // TODO(145743810): add an up call to java to log to statsd
+    return disabled_compat_changes_.count(change_id) == 0;
+  }
+
   uint32_t GetZygoteMaxFailedBoots() const {
     return zygote_max_failed_boots_;
   }
@@ -670,6 +726,14 @@
     return is_java_debuggable_;
   }
 
+  void SetProfileableFromShell(bool value) {
+    is_profileable_from_shell_ = value;
+  }
+
+  bool IsProfileableFromShell() const {
+    return is_profileable_from_shell_;
+  }
+
   void SetJavaDebuggable(bool value);
 
   // Deoptimize the boot image, called for Java debuggable apps.
@@ -683,6 +747,8 @@
     is_native_debuggable_ = value;
   }
 
+  void SetSignalHookDebuggable(bool value);
+
   bool AreNonStandardExitsEnabled() const {
     return non_standard_exits_enabled_;
   }
@@ -716,11 +782,25 @@
   }
 
   // Called from class linker.
-  void SetSentinel(mirror::Object* sentinel) REQUIRES_SHARED(Locks::mutator_lock_);
+  void SetSentinel(ObjPtr<mirror::Object> sentinel) REQUIRES_SHARED(Locks::mutator_lock_);
   // For testing purpose only.
   // TODO: Remove this when this is no longer needed (b/116087961).
   GcRoot<mirror::Object> GetSentinel() REQUIRES_SHARED(Locks::mutator_lock_);
 
+
+  // Use a sentinel for marking entries in a table that have been cleared.
+  // This helps diagnosing in case code tries to wrongly access such
+  // entries.
+  static mirror::Class* GetWeakClassSentinel() {
+    return reinterpret_cast<mirror::Class*>(0xebadbeef);
+  }
+
+  // Helper for the GC to process a weak class in a table.
+  static void ProcessWeakClass(GcRoot<mirror::Class>* root_ptr,
+                               IsMarkedVisitor* visitor,
+                               mirror::Class* update)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
   // Create a normal LinearAlloc or low 4gb version if we are 64 bit AOT compiler.
   LinearAlloc* CreateLinearAlloc();
 
@@ -831,6 +911,18 @@
     return jdwp_provider_;
   }
 
+  JniIdType GetJniIdType() const {
+    return jni_ids_indirection_;
+  }
+
+  bool CanSetJniIdType() const {
+    return GetJniIdType() == JniIdType::kSwapablePointer;
+  }
+
+  // Changes the JniIdType to the given type. Only allowed if CanSetJniIdType(). All threads must be
+  // suspended to call this function.
+  void SetJniIdType(JniIdType t);
+
   uint32_t GetVerifierLoggingThresholdMs() const {
     return verifier_logging_threshold_ms_;
   }
@@ -865,6 +957,10 @@
     load_app_image_startup_cache_ = enabled;
   }
 
+  // Reset the startup completed status so that we can call NotifyStartupCompleted again. Should
+  // only be used for testing.
+  void ResetStartupCompleted();
+
   // Notify the runtime that application startup is considered completed. Only has effect for the
   // first call.
   void NotifyStartupCompleted();
@@ -876,6 +972,17 @@
     return image_space_loading_order_;
   }
 
+  bool IsVerifierMissingKThrowFatal() const {
+    return verifier_missing_kthrow_fatal_;
+  }
+
+  bool IsPerfettoHprofEnabled() const {
+    return perfetto_hprof_enabled_;
+  }
+
+  // Return true if we should load oat files as executable or not.
+  bool GetOatFilesExecutable() const;
+
  private:
   static void InitPlatformSignalHandlers();
 
@@ -945,6 +1052,7 @@
 
   CompilerCallbacks* compiler_callbacks_;
   bool is_zygote_;
+  bool is_primary_zygote_;
   bool is_system_server_;
   bool must_relocate_;
   bool is_concurrent_gc_enabled_;
@@ -955,7 +1063,6 @@
   std::vector<std::string> compiler_options_;
   std::vector<std::string> image_compiler_options_;
   std::string image_location_;
-  bool is_using_apex_boot_image_location_;
 
   std::vector<std::string> boot_class_path_;
   std::vector<std::string> boot_class_path_locations_;
@@ -997,6 +1104,8 @@
 
   SignalCatcher* signal_catcher_;
 
+  std::unique_ptr<jni::JniIdManager> jni_id_manager_;
+
   std::unique_ptr<JavaVMExt> java_vm_;
 
   std::unique_ptr<jit::Jit> jit_;
@@ -1073,6 +1182,9 @@
   // Specifies target SDK version to allow workarounds for certain API levels.
   uint32_t target_sdk_version_;
 
+  // A set of disabled compat changes for the running app, all other changes are enabled.
+  std::set<uint64_t> disabled_compat_changes_;
+
   // Implicit checks flags.
   bool implicit_null_checks_;       // NullPointer checks are implicit.
   bool implicit_so_checks_;         // StackOverflow checks are implicit.
@@ -1111,6 +1223,8 @@
   // Whether Java code needs to be debuggable.
   bool is_java_debuggable_;
 
+  bool is_profileable_from_shell_ = false;
+
   // The maximum number of failed boots we allow before pruning the dalvik cache
   // and trying again. This option is only inspected when we're running as a
   // zygote.
@@ -1144,6 +1258,9 @@
   // Whether access checks on core platform API should be performed.
   hiddenapi::EnforcementPolicy core_platform_api_policy_;
 
+  // Whether access checks on test API should be performed.
+  hiddenapi::EnforcementPolicy test_api_policy_;
+
   // List of signature prefixes of methods that have been removed from the blacklist, and treated
   // as if whitelisted.
   std::vector<std::string> hidden_api_exemptions_;
@@ -1180,6 +1297,14 @@
   // The jdwp provider we were configured with.
   JdwpProvider jdwp_provider_;
 
+  // True if jmethodID and jfieldID are opaque Indices. When false (the default) these are simply
+  // pointers. This is set by -Xopaque-jni-ids:{true,false}.
+  JniIdType jni_ids_indirection_;
+
+  // Set to false in cases where we want to directly control when jni-id
+  // indirection is changed. This is intended only for testing JNI id swapping.
+  bool automatically_set_jni_ids_indirection_;
+
   // Saved environment.
   class EnvSnapshot {
    public:
@@ -1214,10 +1339,15 @@
   gc::space::ImageSpaceLoadingOrder image_space_loading_order_ =
       gc::space::ImageSpaceLoadingOrder::kSystemFirst;
 
+  bool verifier_missing_kthrow_fatal_;
+  bool perfetto_hprof_enabled_;
+
   // Note: See comments on GetFaultMessage.
   friend std::string GetFaultMessageForAbortLogging();
+  friend class Dex2oatImageTest;
   friend class ScopedThreadPoolUsage;
   friend class OatFileAssistantTest;
+  class NotifyStartupCompletedTask;
 
   DISALLOW_COPY_AND_ASSIGN(Runtime);
 };
diff --git a/runtime/runtime_callbacks.cc b/runtime/runtime_callbacks.cc
index 40976c2..e0f57c0 100644
--- a/runtime/runtime_callbacks.cc
+++ b/runtime/runtime_callbacks.cc
@@ -228,6 +228,19 @@
   }
 }
 
+void RuntimeCallbacks::EndDefineClass() {
+  for (ClassLoadCallback* cb : COPY(class_callbacks_)) {
+    cb->EndDefineClass();
+  }
+}
+
+void RuntimeCallbacks::BeginDefineClass() {
+  for (ClassLoadCallback* cb : COPY(class_callbacks_)) {
+    cb->BeginDefineClass();
+  }
+}
+
+
 void RuntimeCallbacks::ClassPreDefine(const char* descriptor,
                                       Handle<mirror::Class> temp_class,
                                       Handle<mirror::ClassLoader> loader,
@@ -319,4 +332,20 @@
   }
 }
 
+void RuntimeCallbacks::AddReflectiveValueVisitCallback(ReflectiveValueVisitCallback *cb) {
+  WriterMutexLock mu(Thread::Current(), *callback_lock_);
+  reflective_value_visit_callbacks_.push_back(cb);
+}
+
+void RuntimeCallbacks::RemoveReflectiveValueVisitCallback(ReflectiveValueVisitCallback *cb) {
+  WriterMutexLock mu(Thread::Current(), *callback_lock_);
+  Remove(cb, &reflective_value_visit_callbacks_);
+}
+
+void RuntimeCallbacks::VisitReflectiveTargets(ReflectiveValueVisitor *visitor) {
+  for (ReflectiveValueVisitCallback* cb : COPY(reflective_value_visit_callbacks_)) {
+    cb->VisitReflectiveTargets(visitor);
+  }
+}
+
 }  // namespace art
diff --git a/runtime/runtime_callbacks.h b/runtime/runtime_callbacks.h
index fe7bb0c..3cadd97 100644
--- a/runtime/runtime_callbacks.h
+++ b/runtime/runtime_callbacks.h
@@ -44,6 +44,7 @@
 class Monitor;
 class ReaderWriterMutex;
 class ThreadLifecycleCallback;
+class ReflectiveValueVisitor;
 
 // Note: RuntimeCallbacks uses the mutator lock to synchronize the callback lists. A thread must
 //       hold the exclusive lock to add or remove a listener. A thread must hold the shared lock
@@ -156,6 +157,17 @@
   virtual bool MethodNeedsDebugVersion(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
 };
 
+// Callback to let something request to be notified when reflective objects are being visited and
+// updated to update any bare ArtMethod/ArtField pointers it might have.
+class ReflectiveValueVisitCallback {
+ public:
+  virtual ~ReflectiveValueVisitCallback() {}
+
+  // Called when something visits all reflective values with the update visitor.
+  virtual void VisitReflectiveTargets(ReflectiveValueVisitor* visitor)
+      REQUIRES(Locks::mutator_lock_) = 0;
+};
+
 class RuntimeCallbacks {
  public:
   RuntimeCallbacks();
@@ -169,6 +181,8 @@
   void AddClassLoadCallback(ClassLoadCallback* cb) REQUIRES(Locks::mutator_lock_);
   void RemoveClassLoadCallback(ClassLoadCallback* cb) REQUIRES(Locks::mutator_lock_);
 
+  void BeginDefineClass() REQUIRES_SHARED(Locks::mutator_lock_);
+  void EndDefineClass() REQUIRES_SHARED(Locks::mutator_lock_);
   void ClassLoad(Handle<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
   void ClassPrepare(Handle<mirror::Class> temp_klass, Handle<mirror::Class> klass)
       REQUIRES_SHARED(Locks::mutator_lock_);
@@ -257,6 +271,13 @@
   void RemoveDebuggerControlCallback(DebuggerControlCallback* cb)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  void VisitReflectiveTargets(ReflectiveValueVisitor* visitor) REQUIRES(Locks::mutator_lock_);
+
+  void AddReflectiveValueVisitCallback(ReflectiveValueVisitCallback* cb)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+  void RemoveReflectiveValueVisitCallback(ReflectiveValueVisitCallback* cb)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
  private:
   std::unique_ptr<ReaderWriterMutex> callback_lock_ BOTTOM_MUTEX_ACQUIRED_AFTER;
 
@@ -280,6 +301,8 @@
       GUARDED_BY(callback_lock_);
   std::vector<DebuggerControlCallback*> debugger_control_callbacks_
       GUARDED_BY(callback_lock_);
+  std::vector<ReflectiveValueVisitCallback*> reflective_value_visit_callbacks_
+      GUARDED_BY(callback_lock_);
 };
 
 }  // namespace art
diff --git a/runtime/runtime_common.cc b/runtime/runtime_common.cc
index 5676577..c4a695f 100644
--- a/runtime/runtime_common.cc
+++ b/runtime/runtime_common.cc
@@ -281,7 +281,6 @@
   DumpArmStatusRegister(os, context.pstate);
   os << '\n';
 #else
-  // TODO: Add support for MIPS32 and MIPS64.
   os << "Unknown architecture/word size/OS in ucontext dump";
 #endif
 }
@@ -376,6 +375,12 @@
   return  (runtime != nullptr) ? runtime->GetFaultMessage() : "";
 }
 
+static std::atomic<bool> gIsRuntimeAbort = false;
+
+void FlagRuntimeAbort() {
+  gIsRuntimeAbort = true;
+}
+
 static void HandleUnexpectedSignalCommonDump(int signal_number,
                                              siginfo_t* info,
                                              void* raw_context,
@@ -444,6 +449,11 @@
                                   void* raw_context,
                                   bool handle_timeout_signal,
                                   bool dump_on_stderr) {
+  bool runtime_abort = gIsRuntimeAbort.exchange(false);
+  if (runtime_abort) {
+    return;
+  }
+
   // Local _static_ storing the currently handled signal (or -1).
   static int handling_unexpected_signal = -1;
 
diff --git a/runtime/runtime_common.h b/runtime/runtime_common.h
index 698d060..925594e 100644
--- a/runtime/runtime_common.h
+++ b/runtime/runtime_common.h
@@ -77,6 +77,8 @@
                                       struct sigaction* oldact,
                                       bool handle_timeout_signal);
 
+void FlagRuntimeAbort();
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_RUNTIME_COMMON_H_
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 4488680..5707a33 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -35,6 +35,7 @@
 
 // Parse-able keys from the command line.
 RUNTIME_OPTIONS_KEY (Unit,                Zygote)
+RUNTIME_OPTIONS_KEY (Unit,                PrimaryZygote)
 RUNTIME_OPTIONS_KEY (Unit,                Help)
 RUNTIME_OPTIONS_KEY (Unit,                ShowVersion)
 RUNTIME_OPTIONS_KEY (ParseStringList<':'>,BootClassPath)           // std::vector<std::string>
@@ -51,6 +52,7 @@
 RUNTIME_OPTIONS_KEY (MemoryKiB,           HeapMinFree,                    gc::Heap::kDefaultMinFree)
 RUNTIME_OPTIONS_KEY (MemoryKiB,           HeapMaxFree,                    gc::Heap::kDefaultMaxFree)
 RUNTIME_OPTIONS_KEY (MemoryKiB,           NonMovingSpaceCapacity,         gc::Heap::kDefaultNonMovingSpaceCapacity)
+RUNTIME_OPTIONS_KEY (MemoryKiB,           StopForNativeAllocs,            1 * GB)
 RUNTIME_OPTIONS_KEY (double,              HeapTargetUtilization,          gc::Heap::kDefaultTargetUtilization)
 RUNTIME_OPTIONS_KEY (double,              ForegroundHeapGrowthMultiplier, gc::Heap::kDefaultHeapGrowthMultiplier)
 RUNTIME_OPTIONS_KEY (unsigned int,        ParallelGCThreads,              0u)
@@ -73,8 +75,11 @@
 RUNTIME_OPTIONS_KEY (bool,                UseTLAB,                        (kUseTlab || kUseReadBarrier))
 RUNTIME_OPTIONS_KEY (bool,                EnableHSpaceCompactForOOM,      true)
 RUNTIME_OPTIONS_KEY (bool,                UseJitCompilation,              true)
+RUNTIME_OPTIONS_KEY (bool,                UseTieredJitCompilation,        interpreter::IsNterpSupported())
 RUNTIME_OPTIONS_KEY (bool,                DumpNativeStackOnSigQuit,       true)
 RUNTIME_OPTIONS_KEY (bool,                MadviseRandomAccess,            false)
+RUNTIME_OPTIONS_KEY (JniIdType,           OpaqueJniIds,                   JniIdType::kDefault)  // -Xopaque-jni-ids:{true, false, swapable}
+RUNTIME_OPTIONS_KEY (bool,                AutoPromoteOpaqueJniIds,        true)  // testing use only. -Xauto-promote-opaque-jni-ids:{true, false}
 RUNTIME_OPTIONS_KEY (unsigned int,        JITCompileThreshold)
 RUNTIME_OPTIONS_KEY (unsigned int,        JITWarmupThreshold)
 RUNTIME_OPTIONS_KEY (unsigned int,        JITOsrThreshold)
@@ -141,7 +146,7 @@
 RUNTIME_OPTIONS_KEY (std::vector<std::unique_ptr<const DexFile>>*, \
                                           BootClassPathDexList)
 RUNTIME_OPTIONS_KEY (InstructionSet,      ImageInstructionSet,            kRuntimeISA)
-RUNTIME_OPTIONS_KEY (CompilerCallbacks*,  CompilerCallbacksPtr)  // TDOO: make unique_ptr
+RUNTIME_OPTIONS_KEY (CompilerCallbacks*,  CompilerCallbacksPtr)  // TODO: make unique_ptr
 RUNTIME_OPTIONS_KEY (bool (*)(),          HookIsSensitiveThread)
 RUNTIME_OPTIONS_KEY (int32_t (*)(FILE* stream, const char* format, va_list ap), \
                                           HookVfprintf,                   vfprintf)
@@ -166,5 +171,19 @@
                      gc::space::ImageSpaceLoadingOrder::kSystemFirst)
 
 RUNTIME_OPTIONS_KEY (bool,                FastClassNotFoundException,     true)
+RUNTIME_OPTIONS_KEY (bool,                VerifierMissingKThrowFatal,     true)
+
+// Whether to allow loading of the perfetto hprof plugin.
+// Even with this option set, we will still only actually load the plugin
+// if we are on a userdebug build or the app is debuggable or profileable.
+//
+// We do not want to enable this by default because PerfettoHprof does not
+// work on host, and we do not want to enable it in tests.
+//
+// Switching this on adds ~500us to the startup on userdebug builds, or for
+// profileable / debuggable apps.
+//
+// This is set to true in frameworks/base/core/jni/AndroidRuntime.cpp.
+RUNTIME_OPTIONS_KEY (bool,                PerfettoHprof,                  false)
 
 #undef RUNTIME_OPTIONS_KEY
diff --git a/runtime/runtime_options.h b/runtime/runtime_options.h
index 19ec75e..abc1fc0 100644
--- a/runtime/runtime_options.h
+++ b/runtime/runtime_options.h
@@ -29,7 +29,6 @@
 #include "gc/space/image_space_loading_order.h"
 #include "gc/space/large_object_space.h"
 #include "hidden_api.h"
-#include "jdwp/jdwp.h"
 #include "jit/jit.h"
 #include "jit/jit_code_cache.h"
 #include "jit/profile_saver_options.h"
diff --git a/runtime/runtime_test.cc b/runtime/runtime_test.cc
index 282e430..3fe281b 100644
--- a/runtime/runtime_test.cc
+++ b/runtime/runtime_test.cc
@@ -14,10 +14,13 @@
  * limitations under the License.
  */
 
+#include "common_runtime_test.h"
+
+#include <thread>
+
 #include "android-base/logging.h"
 #include "base/locks.h"
 #include "base/mutex.h"
-#include "common_runtime_test.h"
 #include "runtime.h"
 #include "thread-current-inl.h"
 
@@ -54,4 +57,22 @@
   }, kDeathRegex);
 }
 
+TEST_F(RuntimeTest, AbortFromUnattachedThread) {
+  // This assumes the test is run single-threaded: do not start the runtime to avoid daemon threads.
+
+  constexpr const char* kDeathRegex = "Going down";
+  ASSERT_EXIT({
+    // The regex only works if we can ensure output goes to stderr.
+    android::base::SetLogger(android::base::StderrLogger);
+
+    Thread::Current()->TransitionFromSuspendedToRunnable();
+    runtime_->Start();
+
+    std::thread t([]() {
+      LOG(FATAL) << "Going down";
+    });
+    t.join();
+  }, ::testing::KilledBySignal(SIGABRT), kDeathRegex);
+}
+
 }  // namespace art
diff --git a/runtime/scoped_thread_state_change-inl.h b/runtime/scoped_thread_state_change-inl.h
index 2541ab5..04be224 100644
--- a/runtime/scoped_thread_state_change-inl.h
+++ b/runtime/scoped_thread_state_change-inl.h
@@ -57,11 +57,7 @@
 
 inline ScopedThreadStateChange::~ScopedThreadStateChange() {
   if (UNLIKELY(self_ == nullptr)) {
-    if (!expected_has_no_thread_) {
-      Runtime* runtime = Runtime::Current();
-      bool shutting_down = (runtime == nullptr) || runtime->IsShuttingDown(nullptr);
-      CHECK(shutting_down);
-    }
+    ScopedThreadChangeDestructorCheck();
   } else {
     if (old_thread_state_ != thread_state_) {
       if (old_thread_state_ == kRunnable) {
diff --git a/runtime/scoped_thread_state_change.cc b/runtime/scoped_thread_state_change.cc
index ae833b4..4051c31 100644
--- a/runtime/scoped_thread_state_change.cc
+++ b/runtime/scoped_thread_state_change.cc
@@ -18,6 +18,7 @@
 
 #include <type_traits>
 
+#include "base/aborting.h"
 #include "base/casts.h"
 #include "jni/java_vm_ext.h"
 #include "mirror/object-inl.h"
@@ -38,4 +39,12 @@
   return vm_->ForceCopy();
 }
 
+void ScopedThreadStateChange::ScopedThreadChangeDestructorCheck() {
+  if (!expected_has_no_thread_) {
+    Runtime* runtime = Runtime::Current();
+    bool shutting_down = (runtime == nullptr) || runtime->IsShuttingDown(nullptr) || gAborting > 0;
+    CHECK(shutting_down);
+  }
+}
+
 }  // namespace art
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index b2ad90a..08cb5b4 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -58,6 +58,8 @@
   const ThreadState thread_state_ = kTerminated;
 
  private:
+  void ScopedThreadChangeDestructorCheck();
+
   ThreadState old_thread_state_ = kTerminated;
   const bool expected_has_no_thread_ = true;
 
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index 8da5fee..9b3de2a 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -27,6 +27,7 @@
 
 #include <sstream>
 
+#include <android-base/file.h>
 #include <android-base/stringprintf.h>
 
 #include "arch/instruction_set.h"
@@ -52,7 +53,7 @@
   // On Android, /proc/self/cmdline will have been rewritten to something like "system_server".
   // Note: The string "Cmd line:" is chosen to match the format used by debuggerd.
   std::string current_cmd_line;
-  if (ReadFileToString("/proc/self/cmdline", &current_cmd_line)) {
+  if (android::base::ReadFileToString("/proc/self/cmdline", &current_cmd_line)) {
     current_cmd_line.resize(current_cmd_line.find_last_not_of('\0') + 1);  // trim trailing '\0's
     std::replace(current_cmd_line.begin(), current_cmd_line.end(), '\0', ' ');
 
@@ -133,7 +134,7 @@
 
   if ((false)) {
     std::string maps;
-    if (ReadFileToString("/proc/self/maps", &maps)) {
+    if (android::base::ReadFileToString("/proc/self/maps", &maps)) {
       os << "/proc/self/maps:\n" << maps;
     }
   }
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 80a563b..58a73cc 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -15,6 +15,7 @@
  */
 
 #include "stack.h"
+#include <limits>
 
 #include "android-base/stringprintf.h"
 
@@ -37,7 +38,9 @@
 #include "mirror/class-inl.h"
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
+#include "nterp_helpers.h"
 #include "oat_quick_method_header.h"
+#include "obj_ptr-inl.h"
 #include "quick/quick_method_frame_info.h"
 #include "runtime.h"
 #include "thread.h"
@@ -68,6 +71,8 @@
       cur_oat_quick_method_header_(nullptr),
       num_frames_(num_frames),
       cur_depth_(0),
+      cur_inline_info_(nullptr, CodeInfo()),
+      cur_stack_map_(0, StackMap()),
       context_(context),
       check_suspended_(check_suspended) {
   if (check_suspended_) {
@@ -75,15 +80,34 @@
   }
 }
 
+CodeInfo* StackVisitor::GetCurrentInlineInfo() const {
+  DCHECK(!(*cur_quick_frame_)->IsNative());
+  const OatQuickMethodHeader* header = GetCurrentOatQuickMethodHeader();
+  if (cur_inline_info_.first != header) {
+    cur_inline_info_ = std::make_pair(header, CodeInfo::DecodeInlineInfoOnly(header));
+  }
+  return &cur_inline_info_.second;
+}
+
+StackMap* StackVisitor::GetCurrentStackMap() const {
+  DCHECK(!(*cur_quick_frame_)->IsNative());
+  const OatQuickMethodHeader* header = GetCurrentOatQuickMethodHeader();
+  if (cur_stack_map_.first != cur_quick_frame_pc_) {
+    uint32_t pc = header->NativeQuickPcOffset(cur_quick_frame_pc_);
+    cur_stack_map_ = std::make_pair(cur_quick_frame_pc_,
+                                    GetCurrentInlineInfo()->GetStackMapForNativePcOffset(pc));
+  }
+  return &cur_stack_map_.second;
+}
+
 ArtMethod* StackVisitor::GetMethod() const {
   if (cur_shadow_frame_ != nullptr) {
     return cur_shadow_frame_->GetMethod();
   } else if (cur_quick_frame_ != nullptr) {
     if (IsInInlinedFrame()) {
-      const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
-      CodeInfo code_info(method_header);
+      CodeInfo* code_info = GetCurrentInlineInfo();
       DCHECK(walk_kind_ != StackWalkKind::kSkipInlinedFrames);
-      return GetResolvedMethod(*GetCurrentQuickFrame(), code_info, current_inline_frames_);
+      return GetResolvedMethod(*GetCurrentQuickFrame(), *code_info, current_inline_frames_);
     } else {
       return *cur_quick_frame_;
     }
@@ -99,9 +123,16 @@
       return current_inline_frames_.back().GetDexPc();
     } else if (cur_oat_quick_method_header_ == nullptr) {
       return dex::kDexNoIndex;
-    } else {
+    } else if ((*GetCurrentQuickFrame())->IsNative()) {
       return cur_oat_quick_method_header_->ToDexPc(
-          GetMethod(), cur_quick_frame_pc_, abort_on_failure);
+          GetCurrentQuickFrame(), cur_quick_frame_pc_, abort_on_failure);
+    } else if (cur_oat_quick_method_header_->IsOptimized()) {
+      StackMap* stack_map = GetCurrentStackMap();
+      DCHECK(stack_map->IsValid());
+      return stack_map->GetDexPc();
+    } else {
+      DCHECK(cur_oat_quick_method_header_->IsNterpMethodHeader());
+      return NterpGetDexPC(cur_quick_frame_);
     }
   } else {
     return 0;
@@ -111,15 +142,25 @@
 extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
     REQUIRES_SHARED(Locks::mutator_lock_);
 
-mirror::Object* StackVisitor::GetThisObject() const {
+ObjPtr<mirror::Object> StackVisitor::GetThisObject() const {
   DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
   ArtMethod* m = GetMethod();
   if (m->IsStatic()) {
     return nullptr;
   } else if (m->IsNative()) {
     if (cur_quick_frame_ != nullptr) {
-      HandleScope* hs = reinterpret_cast<HandleScope*>(
-          reinterpret_cast<char*>(cur_quick_frame_) + sizeof(ArtMethod*));
+      HandleScope* hs;
+      if (cur_oat_quick_method_header_ != nullptr) {
+        hs = reinterpret_cast<HandleScope*>(
+            reinterpret_cast<char*>(cur_quick_frame_) + sizeof(ArtMethod*));
+      } else {
+        // GenericJNI frames have the HandleScope under the managed frame.
+        uint32_t shorty_len;
+        const char* shorty = m->GetShorty(&shorty_len);
+        const size_t num_handle_scope_references =
+            /* this */ 1u + std::count(shorty + 1, shorty + shorty_len, 'L');
+        hs = GetGenericJniHandleScope(cur_quick_frame_, num_handle_scope_references);
+      }
       return hs->GetReference(0);
     } else {
       return cur_shadow_frame_->GetVRegReference(0);
@@ -175,7 +216,11 @@
   return false;
 }
 
-bool StackVisitor::GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const {
+bool StackVisitor::GetVReg(ArtMethod* m,
+                           uint16_t vreg,
+                           VRegKind kind,
+                           uint32_t* val,
+                           std::optional<DexRegisterLocation> location) const {
   if (cur_quick_frame_ != nullptr) {
     DCHECK(context_ != nullptr);  // You can't reliably read registers without a context.
     DCHECK(m == GetMethod());
@@ -183,8 +228,34 @@
     if (GetVRegFromDebuggerShadowFrame(vreg, kind, val)) {
       return true;
     }
-    DCHECK(cur_oat_quick_method_header_->IsOptimized());
-    return GetVRegFromOptimizedCode(m, vreg, kind, val);
+    bool result = false;
+    if (cur_oat_quick_method_header_->IsNterpMethodHeader()) {
+      result = true;
+      *val = (kind == kReferenceVReg)
+          ? NterpGetVRegReference(cur_quick_frame_, vreg)
+          : NterpGetVReg(cur_quick_frame_, vreg);
+    } else {
+      DCHECK(cur_oat_quick_method_header_->IsOptimized());
+      if (location.has_value() && kind != kReferenceVReg) {
+        uint32_t val2 = *val;
+        // The caller already known the register location, so we can use the faster overload
+        // which does not decode the stack maps.
+        result = GetVRegFromOptimizedCode(location.value(), kind, val);
+        // Compare to the slower overload.
+        DCHECK_EQ(result, GetVRegFromOptimizedCode(m, vreg, kind, &val2));
+        DCHECK_EQ(*val, val2);
+      } else {
+        result = GetVRegFromOptimizedCode(m, vreg, kind, val);
+      }
+    }
+    if (kind == kReferenceVReg) {
+      // Perform a read barrier in case we are in a different thread and GC is ongoing.
+      mirror::Object* out = reinterpret_cast<mirror::Object*>(static_cast<uintptr_t>(*val));
+      uintptr_t ptr_out = reinterpret_cast<uintptr_t>(GcRoot<mirror::Object>(out).Read());
+      DCHECK_LT(ptr_out, std::numeric_limits<uint32_t>::max());
+      *val = static_cast<uint32_t>(ptr_out);
+    }
+    return result;
   } else {
     DCHECK(cur_shadow_frame_ != nullptr);
     if (kind == kReferenceVReg) {
@@ -264,6 +335,32 @@
   }
 }
 
+bool StackVisitor::GetVRegFromOptimizedCode(DexRegisterLocation location,
+                                            VRegKind kind,
+                                            uint32_t* val) const {
+  switch (location.GetKind()) {
+    case DexRegisterLocation::Kind::kInvalid:
+      break;
+    case DexRegisterLocation::Kind::kInStack: {
+      const uint8_t* sp = reinterpret_cast<const uint8_t*>(cur_quick_frame_);
+      *val = *reinterpret_cast<const uint32_t*>(sp + location.GetStackOffsetInBytes());
+      return true;
+    }
+    case DexRegisterLocation::Kind::kInRegister:
+    case DexRegisterLocation::Kind::kInRegisterHigh:
+    case DexRegisterLocation::Kind::kInFpuRegister:
+    case DexRegisterLocation::Kind::kInFpuRegisterHigh:
+      return GetRegisterIfAccessible(location.GetMachineRegister(), kind, val);
+    case DexRegisterLocation::Kind::kConstant:
+      *val = location.GetConstant();
+      return true;
+    case DexRegisterLocation::Kind::kNone:
+      return false;
+  }
+  LOG(FATAL) << "Unexpected location kind " << location.GetKind();
+  UNREACHABLE();
+}
+
 bool StackVisitor::GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const {
   const bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
 
@@ -273,14 +370,6 @@
     reg = (kind == kDoubleHiVReg) ? (2 * reg + 1) : (2 * reg);
   }
 
-  // MIPS32 float registers are used as 64-bit (for MIPS32r2 it is pair
-  // F(2n)-F(2n+1), and for MIPS32r6 it is 64-bit register F(2n)). When
-  // accessing upper 32-bits from double, reg + 1 should be used.
-  if ((kRuntimeISA == InstructionSet::kMips) && (kind == kDoubleHiVReg)) {
-    DCHECK_ALIGNED(reg, 2);
-    reg++;
-  }
-
   if (!IsAccessibleRegister(reg, is_float)) {
     return false;
   }
@@ -328,16 +417,22 @@
   if (GetVRegPairFromDebuggerShadowFrame(vreg, kind_lo, kind_hi, val)) {
     return true;
   }
-  if (cur_quick_frame_ != nullptr) {
-    DCHECK(context_ != nullptr);  // You can't reliably read registers without a context.
-    DCHECK(m == GetMethod());
-    DCHECK(cur_oat_quick_method_header_->IsOptimized());
-    return GetVRegPairFromOptimizedCode(m, vreg, kind_lo, kind_hi, val);
-  } else {
+  if (cur_quick_frame_ == nullptr) {
     DCHECK(cur_shadow_frame_ != nullptr);
     *val = cur_shadow_frame_->GetVRegLong(vreg);
     return true;
   }
+  if (cur_oat_quick_method_header_->IsNterpMethodHeader()) {
+    uint64_t val_lo = NterpGetVReg(cur_quick_frame_, vreg);
+    uint64_t val_hi = NterpGetVReg(cur_quick_frame_, vreg + 1);
+    *val = (val_hi << 32) + val_lo;
+    return true;
+  }
+
+  DCHECK(context_ != nullptr);  // You can't reliably read registers without a context.
+  DCHECK(m == GetMethod());
+  DCHECK(cur_oat_quick_method_header_->IsOptimized());
+  return GetVRegPairFromOptimizedCode(m, vreg, kind_lo, kind_hi, val);
 }
 
 bool StackVisitor::GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg,
@@ -372,13 +467,10 @@
   return true;
 }
 
-bool StackVisitor::SetVReg(ArtMethod* m,
-                           uint16_t vreg,
-                           uint32_t new_value,
-                           VRegKind kind) {
+ShadowFrame* StackVisitor::PrepareSetVReg(ArtMethod* m, uint16_t vreg, bool wide) {
   CodeItemDataAccessor accessor(m->DexInstructionData());
   if (!accessor.HasCodeItem()) {
-    return false;
+    return nullptr;
   }
   ShadowFrame* shadow_frame = GetCurrentShadowFrame();
   if (shadow_frame == nullptr) {
@@ -388,15 +480,32 @@
     const uint16_t num_regs = accessor.RegistersSize();
     shadow_frame = thread_->FindOrCreateDebuggerShadowFrame(frame_id, num_regs, m, GetDexPc());
     CHECK(shadow_frame != nullptr);
-    // Remember the vreg has been set for debugging and must not be overwritten by the
+    // Remember the vreg(s) has been set for debugging and must not be overwritten by the
     // original value during deoptimization of the stack.
     thread_->GetUpdatedVRegFlags(frame_id)[vreg] = true;
+    if (wide) {
+      thread_->GetUpdatedVRegFlags(frame_id)[vreg + 1] = true;
+    }
   }
-  if (kind == kReferenceVReg) {
-    shadow_frame->SetVRegReference(vreg, reinterpret_cast<mirror::Object*>(new_value));
-  } else {
-    shadow_frame->SetVReg(vreg, new_value);
+  return shadow_frame;
+}
+
+bool StackVisitor::SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind) {
+  DCHECK(kind == kIntVReg || kind == kFloatVReg);
+  ShadowFrame* shadow_frame = PrepareSetVReg(m, vreg, /* wide= */ false);
+  if (shadow_frame == nullptr) {
+    return false;
   }
+  shadow_frame->SetVReg(vreg, new_value);
+  return true;
+}
+
+bool StackVisitor::SetVRegReference(ArtMethod* m, uint16_t vreg, ObjPtr<mirror::Object> new_value) {
+  ShadowFrame* shadow_frame = PrepareSetVReg(m, vreg, /* wide= */ false);
+  if (shadow_frame == nullptr) {
+    return false;
+  }
+  shadow_frame->SetVRegReference(vreg, new_value);
   return true;
 }
 
@@ -413,21 +522,9 @@
     LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi;
     UNREACHABLE();
   }
-  CodeItemDataAccessor accessor(m->DexInstructionData());
-  if (!accessor.HasCodeItem()) {
-    return false;
-  }
-  ShadowFrame* shadow_frame = GetCurrentShadowFrame();
+  ShadowFrame* shadow_frame = PrepareSetVReg(m, vreg, /* wide= */ true);
   if (shadow_frame == nullptr) {
-    // This is a compiled frame: we must prepare for deoptimization (see SetVRegFromDebugger).
-    const size_t frame_id = GetFrameId();
-    const uint16_t num_regs = accessor.RegistersSize();
-    shadow_frame = thread_->FindOrCreateDebuggerShadowFrame(frame_id, num_regs, m, GetDexPc());
-    CHECK(shadow_frame != nullptr);
-    // Remember the vreg pair has been set for debugging and must not be overwritten by the
-    // original value during deoptimization of the stack.
-    thread_->GetUpdatedVRegFlags(frame_id)[vreg] = true;
-    thread_->GetUpdatedVRegFlags(frame_id)[vreg + 1] = true;
+    return false;
   }
   shadow_frame->SetVRegLong(vreg, new_value);
   return true;
@@ -461,18 +558,18 @@
   return context_->GetFPR(reg);
 }
 
+uintptr_t StackVisitor::GetReturnPcAddr() const {
+  uintptr_t sp = reinterpret_cast<uintptr_t>(GetCurrentQuickFrame());
+  DCHECK_NE(sp, 0u);
+  return sp + GetCurrentQuickFrameInfo().GetReturnPcOffset();
+}
+
 uintptr_t StackVisitor::GetReturnPc() const {
-  uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
-  DCHECK(sp != nullptr);
-  uint8_t* pc_addr = sp + GetCurrentQuickFrameInfo().GetReturnPcOffset();
-  return *reinterpret_cast<uintptr_t*>(pc_addr);
+  return *reinterpret_cast<uintptr_t*>(GetReturnPcAddr());
 }
 
 void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
-  uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
-  CHECK(sp != nullptr);
-  uint8_t* pc_addr = sp + GetCurrentQuickFrameInfo().GetReturnPcOffset();
-  *reinterpret_cast<uintptr_t*>(pc_addr) = new_ret_pc;
+  *reinterpret_cast<uintptr_t*>(GetReturnPcAddr()) = new_ret_pc;
 }
 
 size_t StackVisitor::ComputeNumFrames(Thread* thread, StackWalkKind walk_kind) {
@@ -633,7 +730,8 @@
       // Check class linker linear allocs.
       // We get the canonical method as copied methods may have their declaring
       // class from another class loader.
-      ArtMethod* canonical = method->GetCanonicalMethod();
+      const PointerSize ptrSize = runtime->GetClassLinker()->GetImagePointerSize();
+      ArtMethod* canonical = method->GetCanonicalMethod(ptrSize);
       ObjPtr<mirror::Class> klass = canonical->GetDeclaringClass();
       LinearAlloc* const class_linear_alloc = (klass != nullptr)
           ? runtime->GetClassLinker()->GetAllocatorForClassLoader(klass->GetClassLoader())
@@ -662,14 +760,13 @@
       // Frame sanity.
       size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes();
       CHECK_NE(frame_size, 0u);
-      // A rough guess at an upper size we expect to see for a frame.
+      // For compiled code, we could try to have a rough guess at an upper size we expect
+      // to see for a frame:
       // 256 registers
       // 2 words HandleScope overhead
       // 3+3 register spills
-      // TODO: this seems architecture specific for the case of JNI frames.
-      // TODO: 083-compiler-regressions ManyFloatArgs shows this estimate is wrong.
       // const size_t kMaxExpectedFrameSize = (256 + 2 + 3 + 3) * sizeof(word);
-      const size_t kMaxExpectedFrameSize = 2 * KB;
+      const size_t kMaxExpectedFrameSize = interpreter::kMaxNterpFrame;
       CHECK_LE(frame_size, kMaxExpectedFrameSize) << method->PrettyMethod();
       size_t return_pc_offset = GetCurrentQuickFrameInfo().GetReturnPcOffset();
       CHECK_LT(return_pc_offset, frame_size);
@@ -677,24 +774,14 @@
   }
 }
 
-// Counts the number of references in the parameter list of the corresponding method.
-// Note: Thus does _not_ include "this" for non-static methods.
-static uint32_t GetNumberOfReferenceArgsWithoutReceiver(ArtMethod* method)
-    REQUIRES_SHARED(Locks::mutator_lock_) {
-  uint32_t shorty_len;
-  const char* shorty = method->GetShorty(&shorty_len);
-  uint32_t refs = 0;
-  for (uint32_t i = 1; i < shorty_len ; ++i) {
-    if (shorty[i] == 'L') {
-      refs++;
-    }
-  }
-  return refs;
-}
-
 QuickMethodFrameInfo StackVisitor::GetCurrentQuickFrameInfo() const {
   if (cur_oat_quick_method_header_ != nullptr) {
-    return cur_oat_quick_method_header_->GetFrameInfo();
+    if (cur_oat_quick_method_header_->IsOptimized()) {
+      return cur_oat_quick_method_header_->GetFrameInfo();
+    } else {
+      DCHECK(cur_oat_quick_method_header_->IsNterpMethodHeader());
+      return NterpFrameInfo(cur_quick_frame_);
+    }
   }
 
   ArtMethod* method = GetMethod();
@@ -718,10 +805,12 @@
     return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
   }
 
-  // The only remaining case is if the method is native and uses the generic JNI stub,
-  // called either directly or through some (resolution, instrumentation) trampoline.
+  // The only remaining cases are for native methods that either
+  //   - use the Generic JNI stub, called either directly or through some
+  //     (resolution, instrumentation) trampoline; or
+  //   - fake a Generic JNI frame in art_jni_dlsym_lookup_critical_stub.
   DCHECK(method->IsNative());
-  if (kIsDebugBuild) {
+  if (kIsDebugBuild && !method->IsCriticalNative()) {
     ClassLinker* class_linker = runtime->GetClassLinker();
     const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(method,
                                                                              kRuntimePointerSize);
@@ -731,18 +820,9 @@
           (runtime->GetJit() != nullptr &&
            runtime->GetJit()->GetCodeCache()->ContainsPc(entry_point))) << method->PrettyMethod();
   }
-  // Generic JNI frame.
-  uint32_t handle_refs = GetNumberOfReferenceArgsWithoutReceiver(method) + 1;
-  size_t scope_size = HandleScope::SizeOf(handle_refs);
-  constexpr QuickMethodFrameInfo callee_info =
-      RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
-
-  // Callee saves + handle scope + method ref + alignment
-  // Note: -sizeof(void*) since callee-save frame stores a whole method pointer.
-  size_t frame_size = RoundUp(
-      callee_info.FrameSizeInBytes() - sizeof(void*) + sizeof(ArtMethod*) + scope_size,
-      kStackAlignment);
-  return QuickMethodFrameInfo(frame_size, callee_info.CoreSpillMask(), callee_info.FpSpillMask());
+  // Generic JNI frame is just like the SaveRefsAndArgs frame.
+  // Note that HandleScope, if any, is below the frame.
+  return RuntimeCalleeSaveFrame::GetMethodFrameInfo(CalleeSaveType::kSaveRefsAndArgs);
 }
 
 template <StackVisitor::CountTransitions kCount>
@@ -751,8 +831,6 @@
     DCHECK(thread_ == Thread::Current() || thread_->IsSuspended());
   }
   CHECK_EQ(cur_depth_, 0U);
-  bool exit_stubs_installed = Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled();
-  uint32_t instrumentation_stack_depth = 0;
   size_t inlined_frames_count = 0;
 
   for (const ManagedStack* current_fragment = thread_->GetManagedStack();
@@ -760,8 +838,7 @@
     cur_shadow_frame_ = current_fragment->GetTopShadowFrame();
     cur_quick_frame_ = current_fragment->GetTopQuickFrame();
     cur_quick_frame_pc_ = 0;
-    cur_oat_quick_method_header_ = nullptr;
-
+    DCHECK(cur_oat_quick_method_header_ == nullptr);
     if (cur_quick_frame_ != nullptr) {  // Handle quick stack frames.
       // Can't be both a shadow and a quick fragment.
       DCHECK(current_fragment->GetTopShadowFrame() == nullptr);
@@ -813,17 +890,14 @@
         if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames)
             && (cur_oat_quick_method_header_ != nullptr)
             && cur_oat_quick_method_header_->IsOptimized()
-            // JNI methods cannot have any inlined frames.
-            && !method->IsNative()) {
+            && !method->IsNative()  // JNI methods cannot have any inlined frames.
+            && CodeInfo::HasInlineInfo(cur_oat_quick_method_header_->GetOptimizedCodeInfoPtr())) {
           DCHECK_NE(cur_quick_frame_pc_, 0u);
-          current_code_info_ = CodeInfo(cur_oat_quick_method_header_,
-                                        CodeInfo::DecodeFlags::InlineInfoOnly);
-          uint32_t native_pc_offset =
-              cur_oat_quick_method_header_->NativeQuickPcOffset(cur_quick_frame_pc_);
-          StackMap stack_map = current_code_info_.GetStackMapForNativePcOffset(native_pc_offset);
-          if (stack_map.IsValid() && stack_map.HasInlineInfo()) {
+          CodeInfo* code_info = GetCurrentInlineInfo();
+          StackMap* stack_map = GetCurrentStackMap();
+          if (stack_map->IsValid() && stack_map->HasInlineInfo()) {
             DCHECK_EQ(current_inline_frames_.size(), 0u);
-            for (current_inline_frames_ = current_code_info_.GetInlineInfosOf(stack_map);
+            for (current_inline_frames_ = code_info->GetInlineInfosOf(*stack_map);
                  !current_inline_frames_.empty();
                  current_inline_frames_.pop_back()) {
               bool should_continue = VisitFrame();
@@ -847,47 +921,37 @@
         }
         // Compute PC for next stack frame from return PC.
         size_t frame_size = frame_info.FrameSizeInBytes();
-        size_t return_pc_offset = frame_size - sizeof(void*);
-        uint8_t* return_pc_addr = reinterpret_cast<uint8_t*>(cur_quick_frame_) + return_pc_offset;
+        uintptr_t return_pc_addr = GetReturnPcAddr();
         uintptr_t return_pc = *reinterpret_cast<uintptr_t*>(return_pc_addr);
 
-        if (UNLIKELY(exit_stubs_installed ||
-                     reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) == return_pc)) {
+        if (UNLIKELY(reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) == return_pc)) {
           // While profiling, the return pc is restored from the side stack, except when walking
           // the stack for an exception where the side stack will be unwound in VisitFrame.
-          if (reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) == return_pc) {
-            CHECK_LT(instrumentation_stack_depth, thread_->GetInstrumentationStack()->size());
-            const instrumentation::InstrumentationStackFrame& instrumentation_frame =
-                (*thread_->GetInstrumentationStack())[instrumentation_stack_depth];
-            instrumentation_stack_depth++;
-            if (GetMethod() ==
-                Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveAllCalleeSaves)) {
-              // Skip runtime save all callee frames which are used to deliver exceptions.
-            } else if (instrumentation_frame.interpreter_entry_) {
-              ArtMethod* callee =
-                  Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs);
-              CHECK_EQ(GetMethod(), callee) << "Expected: " << ArtMethod::PrettyMethod(callee)
-                                            << " Found: " << ArtMethod::PrettyMethod(GetMethod());
-            } else {
-              // Instrumentation generally doesn't distinguish between a method's obsolete and
-              // non-obsolete version.
-              CHECK_EQ(instrumentation_frame.method_->GetNonObsoleteMethod(),
-                       GetMethod()->GetNonObsoleteMethod())
-                  << "Expected: "
-                  << ArtMethod::PrettyMethod(instrumentation_frame.method_->GetNonObsoleteMethod())
-                  << " Found: " << ArtMethod::PrettyMethod(GetMethod()->GetNonObsoleteMethod());
-            }
-            if (num_frames_ != 0) {
-              // Check agreement of frame Ids only if num_frames_ is computed to avoid infinite
-              // recursion.
-              size_t frame_id = instrumentation::Instrumentation::ComputeFrameId(
-                  thread_,
-                  cur_depth_,
-                  inlined_frames_count);
-              CHECK_EQ(instrumentation_frame.frame_id_, frame_id);
-            }
-            return_pc = instrumentation_frame.return_pc_;
+          const std::map<uintptr_t, instrumentation::InstrumentationStackFrame>&
+              instrumentation_stack = *thread_->GetInstrumentationStack();
+          auto it = instrumentation_stack.find(return_pc_addr);
+          CHECK(it != instrumentation_stack.end());
+          const instrumentation::InstrumentationStackFrame& instrumentation_frame = it->second;
+          if (GetMethod() ==
+              Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveAllCalleeSaves)) {
+            // Skip runtime save all callee frames which are used to deliver exceptions.
+          } else if (instrumentation_frame.interpreter_entry_) {
+            ArtMethod* callee =
+                Runtime::Current()->GetCalleeSaveMethod(CalleeSaveType::kSaveRefsAndArgs);
+            CHECK_EQ(GetMethod(), callee) << "Expected: " << ArtMethod::PrettyMethod(callee)
+                                          << " Found: " << ArtMethod::PrettyMethod(GetMethod());
+          } else if (!instrumentation_frame.method_->IsRuntimeMethod()) {
+            // Trampolines get replaced with their actual method in the stack,
+            // so don't do the check below for runtime methods.
+            // Instrumentation generally doesn't distinguish between a method's obsolete and
+            // non-obsolete version.
+            CHECK_EQ(instrumentation_frame.method_->GetNonObsoleteMethod(),
+                     GetMethod()->GetNonObsoleteMethod())
+                << "Expected: "
+                << ArtMethod::PrettyMethod(instrumentation_frame.method_->GetNonObsoleteMethod())
+                << " Found: " << ArtMethod::PrettyMethod(GetMethod()->GetNonObsoleteMethod());
           }
+          return_pc = instrumentation_frame.return_pc_;
         }
 
         cur_quick_frame_pc_ = return_pc;
@@ -911,6 +975,8 @@
         }
         method = *cur_quick_frame_;
       }
+      // We reached a transition frame, it doesn't have a method header.
+      cur_oat_quick_method_header_ = nullptr;
     } else if (cur_shadow_frame_ != nullptr) {
       do {
         SanityCheckFrame();
diff --git a/runtime/stack.h b/runtime/stack.h
index 1f305d2..af33e6c 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -17,11 +17,13 @@
 #ifndef ART_RUNTIME_STACK_H_
 #define ART_RUNTIME_STACK_H_
 
+#include <optional>
 #include <stdint.h>
 #include <string>
 
 #include "base/locks.h"
 #include "base/macros.h"
+#include "obj_ptr.h"
 #include "quick/quick_method_frame_info.h"
 #include "stack_map.h"
 
@@ -193,7 +195,7 @@
 
   uint32_t GetDexPc(bool abort_on_failure = true) const REQUIRES_SHARED(Locks::mutator_lock_);
 
-  mirror::Object* GetThisObject() const REQUIRES_SHARED(Locks::mutator_lock_);
+  ObjPtr<mirror::Object> GetThisObject() const REQUIRES_SHARED(Locks::mutator_lock_);
 
   size_t GetNativePcOffset() const REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -222,7 +224,12 @@
   bool GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  bool GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const
+  bool GetVReg(ArtMethod* m,
+               uint16_t vreg,
+               VRegKind kind,
+               uint32_t* val,
+               std::optional<DexRegisterLocation> location =
+                   std::optional<DexRegisterLocation>()) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   bool GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
@@ -236,6 +243,11 @@
 
   // Values will be set in debugger shadow frames. Debugger will make sure deoptimization
   // is triggered to make the values effective.
+  bool SetVRegReference(ArtMethod* m, uint16_t vreg, ObjPtr<mirror::Object> new_value)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  // Values will be set in debugger shadow frames. Debugger will make sure deoptimization
+  // is triggered to make the values effective.
   bool SetVRegPair(ArtMethod* m,
                    uint16_t vreg,
                    uint64_t new_value,
@@ -246,6 +258,7 @@
   uintptr_t* GetGPRAddress(uint32_t reg) const;
 
   uintptr_t GetReturnPc() const REQUIRES_SHARED(Locks::mutator_lock_);
+  uintptr_t GetReturnPcAddr() const REQUIRES_SHARED(Locks::mutator_lock_);
 
   void SetReturnPc(uintptr_t new_ret_pc) REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -324,12 +337,20 @@
                                     VRegKind kind_lo, VRegKind kind_hi,
                                     uint64_t* val) const
       REQUIRES_SHARED(Locks::mutator_lock_);
+  bool GetVRegFromOptimizedCode(DexRegisterLocation location, VRegKind kind, uint32_t* val) const
+      REQUIRES_SHARED(Locks::mutator_lock_);
   bool GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, VRegKind kind_lo,
                                    uint64_t* val) const
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  ShadowFrame* PrepareSetVReg(ArtMethod* m, uint16_t vreg, bool wide)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
   void SanityCheckFrame() const REQUIRES_SHARED(Locks::mutator_lock_);
 
+  ALWAYS_INLINE CodeInfo* GetCurrentInlineInfo() const;
+  ALWAYS_INLINE StackMap* GetCurrentStackMap() const;
+
   Thread* const thread_;
   const StackWalkKind walk_kind_;
   ShadowFrame* cur_shadow_frame_;
@@ -342,9 +363,14 @@
   size_t cur_depth_;
   // Current inlined frames of the method we are currently at.
   // We keep poping frames from the end as we visit the frames.
-  CodeInfo current_code_info_;
   BitTableRange<InlineInfo> current_inline_frames_;
 
+  // Cache the most recently decoded inline info data.
+  // The 'current_inline_frames_' refers to this data, so we need to keep it alive anyway.
+  // Marked mutable since the cache fields are updated from const getters.
+  mutable std::pair<const OatQuickMethodHeader*, CodeInfo> cur_inline_info_;
+  mutable std::pair<uintptr_t, StackMap> cur_stack_map_;
+
  protected:
   Context* const context_;
   const bool check_suspended_;
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index 62dec15..d813fd5 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -27,69 +27,112 @@
 
 namespace art {
 
-CodeInfo::CodeInfo(const OatQuickMethodHeader* header, DecodeFlags flags)
-  : CodeInfo(header->GetOptimizedCodeInfoPtr(), flags) {
-}
-
-// Returns true if the decoded table was deduped.
-template<typename Accessor>
-ALWAYS_INLINE static bool DecodeTable(BitTable<Accessor>& table, BitMemoryReader& reader) {
-  bool is_deduped = reader.ReadBit();
-  if (is_deduped) {
-    ssize_t bit_offset = reader.NumberOfReadBits() - reader.ReadVarint();
-    BitMemoryReader reader2(reader.data(), bit_offset);  // The offset is negative.
-    table.Decode(reader2);
-  } else {
-    table.Decode(reader);
-  }
-  return is_deduped;
-}
-
-void CodeInfo::Decode(const uint8_t* data, DecodeFlags flags) {
+// The callback is used to inform the caller about memory bounds of the bit-tables.
+template<typename DecodeCallback>
+CodeInfo::CodeInfo(const uint8_t* data, size_t* num_read_bits, DecodeCallback callback) {
   BitMemoryReader reader(data);
-  ForEachHeaderField([this, &reader](auto member_pointer) {
-    this->*member_pointer = reader.ReadVarint();
+  std::array<uint32_t, kNumHeaders> header = reader.ReadInterleavedVarints<kNumHeaders>();
+  ForEachHeaderField([this, &header](size_t i, auto member_pointer) {
+    this->*member_pointer = header[i];
   });
-  ForEachBitTableField([this, &reader](auto member_pointer) {
-    DecodeTable(this->*member_pointer, reader);
-  }, flags);
-  size_in_bits_ = reader.NumberOfReadBits();
+  ForEachBitTableField([this, &reader, &callback](size_t i, auto member_pointer) {
+    auto& table = this->*member_pointer;
+    if (LIKELY(HasBitTable(i))) {
+      if (UNLIKELY(IsBitTableDeduped(i))) {
+        ssize_t bit_offset = reader.NumberOfReadBits() - reader.ReadVarint();
+        BitMemoryReader reader2(reader.data(), bit_offset);  // The offset is negative.
+        table.Decode(reader2);
+        callback(i, &table, reader2.GetReadRegion());
+      } else {
+        ssize_t bit_offset = reader.NumberOfReadBits();
+        table.Decode(reader);
+        callback(i, &table, reader.GetReadRegion().Subregion(bit_offset));
+      }
+    }
+  });
+  if (num_read_bits != nullptr) {
+    *num_read_bits = reader.NumberOfReadBits();
+  }
+}
+
+CodeInfo::CodeInfo(const uint8_t* data, size_t* num_read_bits)
+    : CodeInfo(data, num_read_bits, [](size_t, auto*, BitMemoryRegion){}) {}
+
+CodeInfo::CodeInfo(const OatQuickMethodHeader* header)
+    : CodeInfo(header->GetOptimizedCodeInfoPtr()) {}
+
+QuickMethodFrameInfo CodeInfo::DecodeFrameInfo(const uint8_t* data) {
+  CodeInfo code_info(data);
+  return QuickMethodFrameInfo(code_info.packed_frame_size_ * kStackAlignment,
+                              code_info.core_spill_mask_,
+                              code_info.fp_spill_mask_);
+}
+
+CodeInfo CodeInfo::DecodeGcMasksOnly(const OatQuickMethodHeader* header) {
+  CodeInfo code_info(header->GetOptimizedCodeInfoPtr());
+  CodeInfo copy;  // Copy to dead-code-eliminate all fields that we do not need.
+  copy.stack_maps_ = code_info.stack_maps_;
+  copy.register_masks_ = code_info.register_masks_;
+  copy.stack_masks_ = code_info.stack_masks_;
+  return copy;
+}
+
+CodeInfo CodeInfo::DecodeInlineInfoOnly(const OatQuickMethodHeader* header) {
+  CodeInfo code_info(header->GetOptimizedCodeInfoPtr());
+  CodeInfo copy;  // Copy to dead-code-eliminate all fields that we do not need.
+  copy.number_of_dex_registers_ = code_info.number_of_dex_registers_;
+  copy.stack_maps_ = code_info.stack_maps_;
+  copy.inline_infos_ = code_info.inline_infos_;
+  copy.method_infos_ = code_info.method_infos_;
+  return copy;
 }
 
 size_t CodeInfo::Deduper::Dedupe(const uint8_t* code_info_data) {
   writer_.ByteAlign();
   size_t deduped_offset = writer_.NumberOfWrittenBits() / kBitsPerByte;
-  BitMemoryReader reader(code_info_data);
-  CodeInfo code_info;  // Temporary storage for decoded data.
-  ForEachHeaderField([this, &reader, &code_info](auto member_pointer) {
-    code_info.*member_pointer = reader.ReadVarint();
-    writer_.WriteVarint(code_info.*member_pointer);
+
+  // The back-reference offset takes space so dedupe is not worth it for tiny tables.
+  constexpr size_t kMinDedupSize = 32;  // Assume 32-bit offset on average.
+
+  // Read the existing code info and find (and keep) dedup-map iterator for each table.
+  // The iterator stores BitMemoryRegion and bit_offset of previous identical BitTable.
+  std::map<BitMemoryRegion, uint32_t, BitMemoryRegion::Less>::iterator it[kNumBitTables];
+  CodeInfo code_info(code_info_data, nullptr, [&](size_t i, auto*, BitMemoryRegion region) {
+    it[i] = dedupe_map_.emplace(region, /*bit_offset=*/0).first;
+    if (it[i]->second != 0 && region.size_in_bits() > kMinDedupSize) {  // Seen before and large?
+      code_info.SetBitTableDeduped(i);  // Mark as deduped before we write header.
+    }
   });
-  ForEachBitTableField([this, &reader, &code_info](auto member_pointer) {
-    bool is_deduped = reader.ReadBit();
-    DCHECK(!is_deduped);
-    size_t bit_table_start = reader.NumberOfReadBits();
-    (code_info.*member_pointer).Decode(reader);
-    BitMemoryRegion region = reader.GetReadRegion().Subregion(bit_table_start);
-    auto it = dedupe_map_.insert(std::make_pair(region, /* placeholder */ 0));
-    if (it.second /* new bit table */ || region.size_in_bits() < 32) {
-      writer_.WriteBit(false);  // Is not deduped.
-      it.first->second = writer_.NumberOfWrittenBits();
-      writer_.WriteRegion(region);
-    } else {
-      writer_.WriteBit(true);  // Is deduped.
-      size_t bit_offset = writer_.NumberOfWrittenBits();
-      writer_.WriteVarint(bit_offset - it.first->second);
+
+  // Write the code info back, but replace deduped tables with relative offsets.
+  std::array<uint32_t, kNumHeaders> header;
+  ForEachHeaderField([&code_info, &header](size_t i, auto member_pointer) {
+    header[i] = code_info.*member_pointer;
+  });
+  writer_.WriteInterleavedVarints(header);
+  ForEachBitTableField([this, &code_info, &it](size_t i, auto) {
+    if (code_info.HasBitTable(i)) {
+      uint32_t& bit_offset = it[i]->second;
+      if (code_info.IsBitTableDeduped(i)) {
+        DCHECK_NE(bit_offset, 0u);
+        writer_.WriteVarint(writer_.NumberOfWrittenBits() - bit_offset);
+      } else {
+        bit_offset = writer_.NumberOfWrittenBits();  // Store offset in dedup map.
+        writer_.WriteRegion(it[i]->first);
+      }
     }
   });
 
   if (kIsDebugBuild) {
     CodeInfo old_code_info(code_info_data);
     CodeInfo new_code_info(writer_.data() + deduped_offset);
-    ForEachHeaderField([&old_code_info, &new_code_info](auto member_pointer) {
-      DCHECK_EQ(old_code_info.*member_pointer, new_code_info.*member_pointer);
+    ForEachHeaderField([&old_code_info, &new_code_info](size_t, auto member_pointer) {
+      if (member_pointer != &CodeInfo::bit_table_flags_) {  // Expected to differ.
+        DCHECK_EQ(old_code_info.*member_pointer, new_code_info.*member_pointer);
+      }
     });
-    ForEachBitTableField([&old_code_info, &new_code_info](auto member_pointer) {
+    ForEachBitTableField([&old_code_info, &new_code_info](size_t i, auto member_pointer) {
+      DCHECK_EQ(old_code_info.HasBitTable(i), new_code_info.HasBitTable(i));
       DCHECK((old_code_info.*member_pointer).Equals(new_code_info.*member_pointer));
     });
   }
@@ -97,17 +140,15 @@
   return deduped_offset;
 }
 
-BitTable<StackMap>::const_iterator CodeInfo::BinarySearchNativePc(uint32_t packed_pc) const {
-  return std::partition_point(
+StackMap CodeInfo::GetStackMapForNativePcOffset(uint32_t pc, InstructionSet isa) const {
+  uint32_t packed_pc = StackMap::PackNativePc(pc, isa);
+  // Binary search.  All catch stack maps are stored separately at the end.
+  auto it = std::partition_point(
       stack_maps_.begin(),
       stack_maps_.end(),
       [packed_pc](const StackMap& sm) {
         return sm.GetPackedNativePc() < packed_pc && sm.GetKind() != StackMap::Kind::Catch;
       });
-}
-
-StackMap CodeInfo::GetStackMapForNativePcOffset(uint32_t pc, InstructionSet isa) const {
-  auto it = BinarySearchNativePc(StackMap::PackNativePc(pc, isa));
   // Start at the lower bound and iterate over all stack maps with the given native pc.
   for (; it != stack_maps_.end() && (*it).GetNativePcOffset(isa) == pc; ++it) {
     StackMap::Kind kind = static_cast<StackMap::Kind>((*it).GetKind());
@@ -185,28 +226,23 @@
 void CodeInfo::CollectSizeStats(const uint8_t* code_info_data, /*out*/ Stats* parent) {
   Stats* codeinfo_stats = parent->Child("CodeInfo");
   BitMemoryReader reader(code_info_data);
-  ForEachHeaderField([&reader](auto) { reader.ReadVarint(); });
+  reader.ReadInterleavedVarints<kNumHeaders>();
   codeinfo_stats->Child("Header")->AddBits(reader.NumberOfReadBits());
-  CodeInfo code_info;  // Temporary storage for decoded tables.
-  ForEachBitTableField([codeinfo_stats, &reader, &code_info](auto member_pointer) {
-    auto& table = code_info.*member_pointer;
-    size_t bit_offset = reader.NumberOfReadBits();
-    bool deduped = DecodeTable(table, reader);
-    if (deduped) {
-      codeinfo_stats->Child("DedupeOffset")->AddBits(reader.NumberOfReadBits() - bit_offset);
-    } else {
-      Stats* table_stats = codeinfo_stats->Child(table.GetName());
-      table_stats->AddBits(reader.NumberOfReadBits() - bit_offset);
-      const char* const* column_names = table.GetColumnNames();
-      for (size_t c = 0; c < table.NumColumns(); c++) {
-        if (table.NumColumnBits(c) > 0) {
+  size_t num_bits;
+  CodeInfo code_info(code_info_data, &num_bits, [&](size_t i, auto* table, BitMemoryRegion region) {
+    if (!code_info.IsBitTableDeduped(i)) {
+      Stats* table_stats = codeinfo_stats->Child(table->GetName());
+      table_stats->AddBits(region.size_in_bits());
+      const char* const* column_names = table->GetColumnNames();
+      for (size_t c = 0; c < table->NumColumns(); c++) {
+        if (table->NumColumnBits(c) > 0) {
           Stats* column_stats = table_stats->Child(column_names[c]);
-          column_stats->AddBits(table.NumRows() * table.NumColumnBits(c), table.NumRows());
+          column_stats->AddBits(table->NumRows() * table->NumColumnBits(c), table->NumRows());
         }
       }
     }
   });
-  codeinfo_stats->AddBytes(BitsToBytesRoundUp(reader.NumberOfReadBits()));
+  codeinfo_stats->AddBytes(BitsToBytesRoundUp(num_bits));
 }
 
 void DexRegisterMap::Dump(VariableIndentationOutputStream* vios) const {
@@ -226,14 +262,14 @@
                     uint32_t code_offset,
                     bool verbose,
                     InstructionSet instruction_set) const {
-  vios->Stream() << "CodeInfo BitSize=" << size_in_bits_
+  vios->Stream() << "CodeInfo "
     << " FrameSize:" << packed_frame_size_ * kStackAlignment
     << " CoreSpillMask:" << std::hex << core_spill_mask_
     << " FpSpillMask:" << std::hex << fp_spill_mask_
     << " NumberOfDexRegisters:" << std::dec << number_of_dex_registers_
     << "\n";
   ScopedIndentation indent1(vios);
-  ForEachBitTableField([this, &vios, verbose](auto member_pointer) {
+  ForEachBitTableField([this, &vios, verbose](size_t, auto member_pointer) {
     const auto& table = this->*member_pointer;
     if (table.NumRows() != 0) {
       vios->Stream() << table.GetName() << " BitSize=" << table.DataBitSize();
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 87133cf..2065a79 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -181,7 +181,6 @@
   BIT_TABLE_COLUMN(3, ArtMethodHi)  // High bits of ArtMethod*.
   BIT_TABLE_COLUMN(4, ArtMethodLo)  // Low bits of ArtMethod*.
   BIT_TABLE_COLUMN(5, NumberOfDexRegisters)  // Includes outer levels and the main method.
-  BIT_TABLE_COLUMN(6, DexRegisterMapIndex)
 
   static constexpr uint32_t kLast = -1;
   static constexpr uint32_t kMore = 0;
@@ -292,26 +291,14 @@
     std::map<BitMemoryRegion, uint32_t, BitMemoryRegion::Less> dedupe_map_;
   };
 
-  enum DecodeFlags {
-    AllTables = 0,
-    // Limits the decoding only to the data needed by GC.
-    GcMasksOnly = 1,
-    // Limits the decoding only to the main stack map table and inline info table.
-    // This is sufficient for many use cases and makes the header decoding faster.
-    InlineInfoOnly = 2,
-  };
+  ALWAYS_INLINE CodeInfo() {}
+  ALWAYS_INLINE explicit CodeInfo(const uint8_t* data, size_t* num_read_bits = nullptr);
+  ALWAYS_INLINE explicit CodeInfo(const OatQuickMethodHeader* header);
 
-  CodeInfo() {}
-
-  explicit CodeInfo(const uint8_t* data, DecodeFlags flags = AllTables) {
-    Decode(reinterpret_cast<const uint8_t*>(data), flags);
-  }
-
-  explicit CodeInfo(const OatQuickMethodHeader* header, DecodeFlags flags = AllTables);
-
-  size_t Size() const {
-    return BitsToBytesRoundUp(size_in_bits_);
-  }
+  // The following methods decode only part of the data.
+  static QuickMethodFrameInfo DecodeFrameInfo(const uint8_t* data);
+  static CodeInfo DecodeGcMasksOnly(const OatQuickMethodHeader* header);
+  static CodeInfo DecodeInlineInfoOnly(const OatQuickMethodHeader* header);
 
   ALWAYS_INLINE const BitTable<StackMap>& GetStackMaps() const {
     return stack_maps_;
@@ -438,58 +425,72 @@
   // Accumulate code info size statistics into the given Stats tree.
   static void CollectSizeStats(const uint8_t* code_info, /*out*/ Stats* parent);
 
-  ALWAYS_INLINE static QuickMethodFrameInfo DecodeFrameInfo(const uint8_t* data) {
-    BitMemoryReader reader(data);
-    return QuickMethodFrameInfo(
-        reader.ReadVarint() * kStackAlignment,  // Decode packed_frame_size_ and unpack.
-        reader.ReadVarint(),  // core_spill_mask_.
-        reader.ReadVarint());  // fp_spill_mask_.
+  ALWAYS_INLINE static bool HasInlineInfo(const uint8_t* code_info_data) {
+    return (*code_info_data & kHasInlineInfo) != 0;
+  }
+
+  ALWAYS_INLINE static bool IsBaseline(const uint8_t* code_info_data) {
+    return (*code_info_data & kIsBaseline) != 0;
   }
 
  private:
-  // Returns lower bound (fist stack map which has pc greater or equal than the desired one).
-  // It ignores catch stack maps at the end (it is the same as if they had maximum pc value).
-  BitTable<StackMap>::const_iterator BinarySearchNativePc(uint32_t packed_pc) const;
-
   // Scan backward to determine dex register locations at given stack map.
   void DecodeDexRegisterMap(uint32_t stack_map_index,
                             uint32_t first_dex_register,
                             /*out*/ DexRegisterMap* map) const;
 
-  void Decode(const uint8_t* data, DecodeFlags flags);
+  template<typename DecodeCallback>  // (size_t index, BitTable<...>*, BitMemoryRegion).
+  ALWAYS_INLINE CodeInfo(const uint8_t* data, size_t* num_read_bits, DecodeCallback callback);
 
-  // Invokes the callback with member pointer of each header field.
+  // Invokes the callback with index and member pointer of each header field.
   template<typename Callback>
   ALWAYS_INLINE static void ForEachHeaderField(Callback callback) {
-    callback(&CodeInfo::packed_frame_size_);
-    callback(&CodeInfo::core_spill_mask_);
-    callback(&CodeInfo::fp_spill_mask_);
-    callback(&CodeInfo::number_of_dex_registers_);
+    size_t index = 0;
+    callback(index++, &CodeInfo::flags_);
+    callback(index++, &CodeInfo::packed_frame_size_);
+    callback(index++, &CodeInfo::core_spill_mask_);
+    callback(index++, &CodeInfo::fp_spill_mask_);
+    callback(index++, &CodeInfo::number_of_dex_registers_);
+    callback(index++, &CodeInfo::bit_table_flags_);
+    DCHECK_EQ(index, kNumHeaders);
   }
 
-  // Invokes the callback with member pointer of each BitTable field.
+  // Invokes the callback with index and member pointer of each BitTable field.
   template<typename Callback>
-  ALWAYS_INLINE static void ForEachBitTableField(Callback callback, DecodeFlags flags = AllTables) {
-    callback(&CodeInfo::stack_maps_);
-    callback(&CodeInfo::register_masks_);
-    callback(&CodeInfo::stack_masks_);
-    if (flags & DecodeFlags::GcMasksOnly) {
-      return;
-    }
-    callback(&CodeInfo::inline_infos_);
-    callback(&CodeInfo::method_infos_);
-    if (flags & DecodeFlags::InlineInfoOnly) {
-      return;
-    }
-    callback(&CodeInfo::dex_register_masks_);
-    callback(&CodeInfo::dex_register_maps_);
-    callback(&CodeInfo::dex_register_catalog_);
+  ALWAYS_INLINE static void ForEachBitTableField(Callback callback) {
+    size_t index = 0;
+    callback(index++, &CodeInfo::stack_maps_);
+    callback(index++, &CodeInfo::register_masks_);
+    callback(index++, &CodeInfo::stack_masks_);
+    callback(index++, &CodeInfo::inline_infos_);
+    callback(index++, &CodeInfo::method_infos_);
+    callback(index++, &CodeInfo::dex_register_masks_);
+    callback(index++, &CodeInfo::dex_register_maps_);
+    callback(index++, &CodeInfo::dex_register_catalog_);
+    DCHECK_EQ(index, kNumBitTables);
   }
 
+  bool HasBitTable(size_t i) { return ((bit_table_flags_ >> i) & 1) != 0; }
+  bool IsBitTableDeduped(size_t i) { return ((bit_table_flags_ >> (kNumBitTables + i)) & 1) != 0; }
+  void SetBitTableDeduped(size_t i) { bit_table_flags_ |= 1 << (kNumBitTables + i); }
+
+  enum Flags {
+    kHasInlineInfo = 1 << 0,
+    kIsBaseline = 1 << 1,
+  };
+
+  // The CodeInfo starts with sequence of variable-length bit-encoded integers.
+  static constexpr size_t kNumHeaders = 6;
+  uint32_t flags_ = 0;
   uint32_t packed_frame_size_ = 0;  // Frame size in kStackAlignment units.
   uint32_t core_spill_mask_ = 0;
   uint32_t fp_spill_mask_ = 0;
   uint32_t number_of_dex_registers_ = 0;
+  uint32_t bit_table_flags_ = 0;
+
+  // The encoded bit-tables follow the header.  Based on the above flags field,
+  // bit-tables might be omitted or replaced by relative bit-offset if deduped.
+  static constexpr size_t kNumBitTables = 8;
   BitTable<StackMap> stack_maps_;
   BitTable<RegisterMask> register_masks_;
   BitTable<StackMask> stack_masks_;
@@ -498,7 +499,8 @@
   BitTable<DexRegisterMask> dex_register_masks_;
   BitTable<DexRegisterMapInfo> dex_register_maps_;
   BitTable<DexRegisterInfo> dex_register_catalog_;
-  uint32_t size_in_bits_ = 0;
+
+  friend class StackMapStream;
 };
 
 #undef ELEMENT_BYTE_OFFSET_AFTER
diff --git a/runtime/string_builder_append.cc b/runtime/string_builder_append.cc
new file mode 100644
index 0000000..85b70eb
--- /dev/null
+++ b/runtime/string_builder_append.cc
@@ -0,0 +1,364 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "string_builder_append.h"
+
+#include "base/casts.h"
+#include "base/logging.h"
+#include "common_throws.h"
+#include "gc/heap.h"
+#include "mirror/string-alloc-inl.h"
+#include "obj_ptr-inl.h"
+#include "runtime.h"
+
+namespace art {
+
+class StringBuilderAppend::Builder {
+ public:
+  Builder(uint32_t format, const uint32_t* args, Thread* self)
+      : format_(format),
+        args_(args),
+        hs_(self) {}
+
+  int32_t CalculateLengthWithFlag() REQUIRES_SHARED(Locks::mutator_lock_);
+
+  void operator()(ObjPtr<mirror::Object> obj, size_t usable_size) const
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+ private:
+  static size_t Uint64Length(uint64_t value);
+
+  static size_t Int64Length(int64_t value) {
+    uint64_t v = static_cast<uint64_t>(value);
+    return (value >= 0) ? Uint64Length(v) : 1u + Uint64Length(-v);
+  }
+
+  static size_t RemainingSpace(ObjPtr<mirror::String> new_string, const uint8_t* data)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(new_string->IsCompressed());
+    DCHECK_GE(new_string->GetLength(), data - new_string->GetValueCompressed());
+    return new_string->GetLength() - (data - new_string->GetValueCompressed());
+  }
+
+  static size_t RemainingSpace(ObjPtr<mirror::String> new_string, const uint16_t* data)
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(!new_string->IsCompressed());
+    DCHECK_GE(new_string->GetLength(), data - new_string->GetValue());
+    return new_string->GetLength() - (data - new_string->GetValue());
+  }
+
+  template <typename CharType, size_t size>
+  static CharType* AppendLiteral(ObjPtr<mirror::String> new_string,
+                                 CharType* data,
+                                 const char (&literal)[size]) REQUIRES_SHARED(Locks::mutator_lock_);
+
+  template <typename CharType>
+  static CharType* AppendString(ObjPtr<mirror::String> new_string,
+                                CharType* data,
+                                ObjPtr<mirror::String> str) REQUIRES_SHARED(Locks::mutator_lock_);
+
+  template <typename CharType>
+  static CharType* AppendInt64(ObjPtr<mirror::String> new_string,
+                               CharType* data,
+                               int64_t value) REQUIRES_SHARED(Locks::mutator_lock_);
+
+  template <typename CharType>
+  void StoreData(ObjPtr<mirror::String> new_string, CharType* data) const
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  static constexpr char kNull[] = "null";
+  static constexpr size_t kNullLength = sizeof(kNull) - 1u;
+  static constexpr char kTrue[] = "true";
+  static constexpr size_t kTrueLength = sizeof(kTrue) - 1u;
+  static constexpr char kFalse[] = "false";
+  static constexpr size_t kFalseLength = sizeof(kFalse) - 1u;
+
+  // The format and arguments to append.
+  const uint32_t format_;
+  const uint32_t* const args_;
+
+  // References are moved to the handle scope during CalculateLengthWithFlag().
+  StackHandleScope<kMaxArgs> hs_;
+
+  // The length and flag to store when the AppendBuilder is used as a pre-fence visitor.
+  int32_t length_with_flag_ = 0u;
+};
+
+inline size_t StringBuilderAppend::Builder::Uint64Length(uint64_t value)  {
+  if (value == 0u) {
+    return 1u;
+  }
+  // Calculate floor(log2(value)).
+  size_t log2_value = BitSizeOf<uint64_t>() - 1u - CLZ(value);
+  // Calculate an estimate of floor(log10(value)).
+  //   log10(2) = 0.301029996 > 0.296875 = 19/64
+  //   floor(log10(v)) == floor(log2(v) * log10(2))
+  //                   >= floor(log2(v) * 19/64)
+  //                   >= floor(floor(log2(v)) * 19/64)
+  // This estimate is no more that one off from the actual value because log2(value) < 64 and thus
+  //   log2(v) * log10(2) - log2(v) * 19/64 < 64*(log10(2) - 19/64)
+  // for the first approximation and
+  //   log2(v) * 19/64 - floor(log2(v)) * 19/64 < 19/64
+  // for the second one. Together,
+  //   64*(log10(2) - 19/64) + 19/64 = 0.56278 < 1 .
+  size_t log10_value_estimate = log2_value * 19u / 64u;
+  static constexpr uint64_t bounds[] = {
+      UINT64_C(9),
+      UINT64_C(99),
+      UINT64_C(999),
+      UINT64_C(9999),
+      UINT64_C(99999),
+      UINT64_C(999999),
+      UINT64_C(9999999),
+      UINT64_C(99999999),
+      UINT64_C(999999999),
+      UINT64_C(9999999999),
+      UINT64_C(99999999999),
+      UINT64_C(999999999999),
+      UINT64_C(9999999999999),
+      UINT64_C(99999999999999),
+      UINT64_C(999999999999999),
+      UINT64_C(9999999999999999),
+      UINT64_C(99999999999999999),
+      UINT64_C(999999999999999999),
+      UINT64_C(9999999999999999999),
+  };
+  // Add 1 for the lowest digit, add another 1 if the estimate was too low.
+  DCHECK_LT(log10_value_estimate, std::size(bounds));
+  size_t adjustment = (value > bounds[log10_value_estimate]) ? 2u : 1u;
+  return log10_value_estimate + adjustment;
+}
+
+template <typename CharType, size_t size>
+inline CharType* StringBuilderAppend::Builder::AppendLiteral(ObjPtr<mirror::String> new_string,
+                                                             CharType* data,
+                                                             const char (&literal)[size]) {
+  static_assert(size >= 2, "We need something to append.");
+
+  // Literals are zero-terminated.
+  constexpr size_t length = size - 1u;
+  DCHECK_EQ(literal[length], '\0');
+
+  DCHECK_LE(length, RemainingSpace(new_string, data));
+  for (size_t i = 0; i != length; ++i) {
+    data[i] = literal[i];
+  }
+  return data + length;
+}
+
+template <typename CharType>
+inline CharType* StringBuilderAppend::Builder::AppendString(ObjPtr<mirror::String> new_string,
+                                                            CharType* data,
+                                                            ObjPtr<mirror::String> str) {
+  size_t length = dchecked_integral_cast<size_t>(str->GetLength());
+  DCHECK_LE(length, RemainingSpace(new_string, data));
+  if (sizeof(CharType) == sizeof(uint8_t) || str->IsCompressed()) {
+    DCHECK(str->IsCompressed());
+    const uint8_t* value = str->GetValueCompressed();
+    for (size_t i = 0; i != length; ++i) {
+      data[i] = value[i];
+    }
+  } else {
+    const uint16_t* value = str->GetValue();
+    for (size_t i = 0; i != length; ++i) {
+      data[i] = dchecked_integral_cast<CharType>(value[i]);
+    }
+  }
+  return data + length;
+}
+
+template <typename CharType>
+inline CharType* StringBuilderAppend::Builder::AppendInt64(ObjPtr<mirror::String> new_string,
+                                                           CharType* data,
+                                                           int64_t value) {
+  DCHECK_GE(RemainingSpace(new_string, data), Int64Length(value));
+  uint64_t v = static_cast<uint64_t>(value);
+  if (value < 0) {
+    *data = '-';
+    ++data;
+    v = -v;
+  }
+  size_t length = Uint64Length(v);
+  // Write the digits from the end, do not write the most significant digit
+  // in the loop to avoid an unnecessary division.
+  for (size_t i = 1; i != length; ++i) {
+    uint64_t digit = v % UINT64_C(10);
+    v /= UINT64_C(10);
+    data[length - i] = '0' + static_cast<char>(digit);
+  }
+  DCHECK_LE(v, 10u);
+  *data = '0' + static_cast<char>(v);
+  return data + length;
+}
+
+inline int32_t StringBuilderAppend::Builder::CalculateLengthWithFlag() {
+  static_assert(static_cast<size_t>(Argument::kEnd) == 0u, "kEnd must be 0.");
+  bool compressible = mirror::kUseStringCompression;
+  uint64_t length = 0u;
+  const uint32_t* current_arg = args_;
+  for (uint32_t f = format_; f != 0u; f >>= kBitsPerArg) {
+    DCHECK_LE(f & kArgMask, static_cast<uint32_t>(Argument::kLast));
+    switch (static_cast<Argument>(f & kArgMask)) {
+      case Argument::kString: {
+        Handle<mirror::String> str =
+            hs_.NewHandle(reinterpret_cast32<mirror::String*>(*current_arg));
+        if (str != nullptr) {
+          length += str->GetLength();
+          compressible = compressible && str->IsCompressed();
+        } else {
+          length += kNullLength;
+        }
+        break;
+      }
+      case Argument::kBoolean: {
+        length += (*current_arg != 0u) ? kTrueLength : kFalseLength;
+        break;
+      }
+      case Argument::kChar: {
+        length += 1u;
+        compressible = compressible &&
+            mirror::String::IsASCII(reinterpret_cast<const uint16_t*>(current_arg)[0]);
+        break;
+      }
+      case Argument::kInt: {
+        length += Int64Length(static_cast<int32_t>(*current_arg));
+        break;
+      }
+      case Argument::kLong: {
+        current_arg = AlignUp(current_arg, sizeof(int64_t));
+        length += Int64Length(*reinterpret_cast<const int64_t*>(current_arg));
+        ++current_arg;  // Skip the low word, let the common code skip the high word.
+        break;
+      }
+
+      case Argument::kStringBuilder:
+      case Argument::kCharArray:
+      case Argument::kObject:
+      case Argument::kFloat:
+      case Argument::kDouble:
+        LOG(FATAL) << "Unimplemented arg format: 0x" << std::hex
+            << (f & kArgMask) << " full format: 0x" << std::hex << format_;
+        UNREACHABLE();
+      default:
+        LOG(FATAL) << "Unexpected arg format: 0x" << std::hex
+            << (f & kArgMask) << " full format: 0x" << std::hex << format_;
+        UNREACHABLE();
+    }
+    ++current_arg;
+    DCHECK_LE(hs_.NumberOfReferences(), kMaxArgs);
+  }
+
+  if (length > std::numeric_limits<int32_t>::max()) {
+    // We cannot allocate memory for the entire result.
+    hs_.Self()->ThrowNewException("Ljava/lang/OutOfMemoryError;",
+                                  "Out of memory for StringBuilder append.");
+    return -1;
+  }
+
+  length_with_flag_ = mirror::String::GetFlaggedCount(length, compressible);
+  return length_with_flag_;
+}
+
+template <typename CharType>
+inline void StringBuilderAppend::Builder::StoreData(ObjPtr<mirror::String> new_string,
+                                                    CharType* data) const {
+  size_t handle_index = 0u;
+  const uint32_t* current_arg = args_;
+  for (uint32_t f = format_; f != 0u; f >>= kBitsPerArg) {
+    DCHECK_LE(f & kArgMask, static_cast<uint32_t>(Argument::kLast));
+    switch (static_cast<Argument>(f & kArgMask)) {
+      case Argument::kString: {
+        ObjPtr<mirror::String> str =
+            ObjPtr<mirror::String>::DownCast(hs_.GetReference(handle_index));
+        ++handle_index;
+        if (str != nullptr) {
+          data = AppendString(new_string, data, str);
+        } else {
+          data = AppendLiteral(new_string, data, kNull);
+        }
+        break;
+      }
+      case Argument::kBoolean: {
+        if (*current_arg != 0u) {
+          data = AppendLiteral(new_string, data, kTrue);
+        } else {
+          data = AppendLiteral(new_string, data, kFalse);
+        }
+        break;
+      }
+      case Argument::kChar: {
+        DCHECK_GE(RemainingSpace(new_string, data), 1u);
+        *data = *reinterpret_cast<const CharType*>(current_arg);
+        ++data;
+        break;
+      }
+      case Argument::kInt: {
+        data = AppendInt64(new_string, data, static_cast<int32_t>(*current_arg));
+        break;
+      }
+      case Argument::kLong: {
+        current_arg = AlignUp(current_arg, sizeof(int64_t));
+        data = AppendInt64(new_string, data, *reinterpret_cast<const int64_t*>(current_arg));
+        ++current_arg;  // Skip the low word, let the common code skip the high word.
+        break;
+      }
+
+      case Argument::kStringBuilder:
+      case Argument::kCharArray:
+      case Argument::kFloat:
+      case Argument::kDouble:
+        LOG(FATAL) << "Unimplemented arg format: 0x" << std::hex
+            << (f & kArgMask) << " full format: 0x" << std::hex << format_;
+        UNREACHABLE();
+      default:
+        LOG(FATAL) << "Unexpected arg format: 0x" << std::hex
+            << (f & kArgMask) << " full format: 0x" << std::hex << format_;
+        UNREACHABLE();
+    }
+    ++current_arg;
+    DCHECK_LE(handle_index, hs_.NumberOfReferences());
+  }
+  DCHECK_EQ(RemainingSpace(new_string, data), 0u) << std::hex << format_;
+}
+
+inline void StringBuilderAppend::Builder::operator()(ObjPtr<mirror::Object> obj,
+                                                     size_t usable_size ATTRIBUTE_UNUSED) const {
+  ObjPtr<mirror::String> new_string = ObjPtr<mirror::String>::DownCast(obj);
+  new_string->SetCount(length_with_flag_);
+  if (mirror::String::IsCompressed(length_with_flag_)) {
+    StoreData(new_string, new_string->GetValueCompressed());
+  } else {
+    StoreData(new_string, new_string->GetValue());
+  }
+}
+
+ObjPtr<mirror::String> StringBuilderAppend::AppendF(uint32_t format,
+                                                    const uint32_t* args,
+                                                    Thread* self) {
+  Builder builder(format, args, self);
+  self->AssertNoPendingException();
+  int32_t length_with_flag = builder.CalculateLengthWithFlag();
+  if (self->IsExceptionPending()) {
+    return nullptr;
+  }
+  gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
+  ObjPtr<mirror::String> result = mirror::String::Alloc(
+      self, length_with_flag, allocator_type, builder);
+
+  return result;
+}
+
+}  // namespace art
diff --git a/runtime/string_builder_append.h b/runtime/string_builder_append.h
new file mode 100644
index 0000000..fee6419
--- /dev/null
+++ b/runtime/string_builder_append.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_STRING_BUILDER_APPEND_H_
+#define ART_RUNTIME_STRING_BUILDER_APPEND_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/bit_utils.h"
+#include "base/locks.h"
+#include "obj_ptr.h"
+
+namespace art {
+
+class Thread;
+
+namespace mirror {
+class String;
+}  // namespace mirror
+
+class StringBuilderAppend {
+ public:
+  enum class Argument : uint8_t {
+    kEnd = 0u,
+    kObject,
+    kStringBuilder,
+    kString,
+    kCharArray,
+    kBoolean,
+    kChar,
+    kInt,
+    kLong,
+    kFloat,
+    kDouble,
+    kLast = kDouble
+  };
+
+  static constexpr size_t kBitsPerArg =
+      MinimumBitsToStore(static_cast<size_t>(Argument::kLast));
+  static constexpr size_t kMaxArgs = BitSizeOf<uint32_t>() / kBitsPerArg;
+  static_assert(kMaxArgs * kBitsPerArg == BitSizeOf<uint32_t>(), "Expecting no extra bits.");
+  static constexpr uint32_t kArgMask = MaxInt<uint32_t>(kBitsPerArg);
+
+  static ObjPtr<mirror::String> AppendF(uint32_t format, const uint32_t* args, Thread* self)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+ private:
+  class Builder;
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_STRING_BUILDER_APPEND_H_
diff --git a/runtime/subtype_check_bits.h b/runtime/subtype_check_bits.h
index 23d8ac3..7e73afb 100644
--- a/runtime/subtype_check_bits.h
+++ b/runtime/subtype_check_bits.h
@@ -57,8 +57,8 @@
  * See subtype_check.h and subtype_check_info.h for more details.
  */
 BITSTRUCT_DEFINE_START(SubtypeCheckBits, /*size=*/ BitString::BitStructSizeOf() + 1u)
-  BitStructField<BitString, /*lsb=*/ 0> bitstring_;
-  BitStructUint</*lsb=*/ BitString::BitStructSizeOf(), /*width=*/ 1> overflow_;
+  BITSTRUCT_FIELD(BitString, /*lsb=*/ 0, /*width=*/ BitString::BitStructSizeOf()) bitstring_;
+  BITSTRUCT_UINT(/*lsb=*/ BitString::BitStructSizeOf(), /*width=*/ 1) overflow_;
 BITSTRUCT_DEFINE_END(SubtypeCheckBits);
 
 }  // namespace art
diff --git a/runtime/subtype_check_bits_and_status.h b/runtime/subtype_check_bits_and_status.h
index eec6e21..e774955 100644
--- a/runtime/subtype_check_bits_and_status.h
+++ b/runtime/subtype_check_bits_and_status.h
@@ -68,11 +68,13 @@
 static constexpr size_t kClassStatusBitSize = MinimumBitsToStore(enum_cast<>(ClassStatus::kLast));
 static_assert(kClassStatusBitSize == 4u, "ClassStatus should need 4 bits.");
 BITSTRUCT_DEFINE_START(SubtypeCheckBitsAndStatus, BitSizeOf<BitString::StorageType>())
-  BitStructField<SubtypeCheckBits, /*lsb=*/ 0> subtype_check_info_;
-  BitStructField<ClassStatus,
-                 /*lsb=*/ SubtypeCheckBits::BitStructSizeOf(),
-                 /*width=*/ kClassStatusBitSize> status_;
-  BitStructInt</*lsb=*/ 0, /*width=*/ BitSizeOf<BitString::StorageType>()> int32_alias_;
+  BITSTRUCT_FIELD(SubtypeCheckBits,
+                  /*lsb=*/ 0,
+                  /*width=*/ SubtypeCheckBits::BitStructSizeOf()) subtype_check_info_;
+  BITSTRUCT_FIELD(ClassStatus,
+                  /*lsb=*/ SubtypeCheckBits::BitStructSizeOf(),
+                  /*width=*/ kClassStatusBitSize) status_;
+  BITSTRUCT_INT(/*lsb=*/ 0, /*width=*/ BitSizeOf<BitString::StorageType>()) int32_alias_;
 BITSTRUCT_DEFINE_END(SubtypeCheckBitsAndStatus);
 
 // Use the spare alignment from "ClassStatus" to store all the new SubtypeCheckInfo data.
diff --git a/runtime/suspend_reason.h b/runtime/suspend_reason.h
index af2be10..7f377d5 100644
--- a/runtime/suspend_reason.h
+++ b/runtime/suspend_reason.h
@@ -22,12 +22,10 @@
 namespace art {
 
 // The various reasons that we might be suspending a thread.
-enum class SuspendReason {
+enum class SuspendReason : char {
   // Suspending for internal reasons (e.g. GC, stack trace, etc.).
   // TODO Split this into more descriptive sections.
   kInternal,
-  // Suspending for debugger (code in Dbg::*, runtime/jdwp/, etc.).
-  kForDebugger,
   // Suspending due to non-runtime, user controlled, code. (For example Thread#Suspend()).
   kForUserCode,
 };
diff --git a/runtime/thread-current-inl.h b/runtime/thread-current-inl.h
index 9241b1f..d12a45c 100644
--- a/runtime/thread-current-inl.h
+++ b/runtime/thread-current-inl.h
@@ -19,8 +19,8 @@
 
 #include "thread.h"
 
-#ifdef ART_TARGET_ANDROID
-#include <bionic_tls.h>  // Access to our own TLS slot.
+#ifdef __BIONIC__
+#include <bionic/tls.h>  // Access to our own TLS slot.
 #endif
 
 #include <pthread.h>
@@ -33,10 +33,10 @@
   if (!is_started_) {
     return nullptr;
   } else {
-#ifdef ART_TARGET_ANDROID
+#ifdef __BIONIC__
     void* thread = __get_tls()[TLS_SLOT_ART_THREAD_SELF];
 #else
-    void* thread = pthread_getspecific(Thread::pthread_key_self_);
+    Thread* thread = Thread::self_tls_;
 #endif
     return reinterpret_cast<Thread*>(thread);
   }
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 00f882e..a6da16f 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -27,6 +27,7 @@
 #include "jni/jni_env_ext.h"
 #include "managed_stack-inl.h"
 #include "obj_ptr.h"
+#include "suspend_reason.h"
 #include "thread-current-inl.h"
 #include "thread_pool.h"
 
@@ -113,7 +114,8 @@
   }
   union StateAndFlags old_state_and_flags;
   old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
-  CHECK_NE(old_state_and_flags.as_struct.state, kRunnable);
+  CHECK_NE(old_state_and_flags.as_struct.state, kRunnable) << new_state << " " << *this << " "
+      << *Thread::Current();
   tls32_.state_and_flags.as_struct.state = new_state;
   return static_cast<ThreadState>(old_state_and_flags.as_struct.state);
 }
diff --git a/runtime/thread.cc b/runtime/thread.cc
index be0e30a..77b9f4f 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -35,6 +35,7 @@
 #include <list>
 #include <sstream>
 
+#include "android-base/file.h"
 #include "android-base/stringprintf.h"
 #include "android-base/strings.h"
 
@@ -88,6 +89,7 @@
 #include "native_stack_dump.h"
 #include "nativehelper/scoped_local_ref.h"
 #include "nativehelper/scoped_utf_chars.h"
+#include "nterp_helpers.h"
 #include "nth_caller_visitor.h"
 #include "oat_quick_method_header.h"
 #include "obj_ptr-inl.h"
@@ -97,6 +99,7 @@
 #include "quick_exception_handler.h"
 #include "read_barrier-inl.h"
 #include "reflection.h"
+#include "reflective_handle_scope-inl.h"
 #include "runtime-inl.h"
 #include "runtime.h"
 #include "runtime_callbacks.h"
@@ -130,6 +133,9 @@
 const size_t Thread::kStackOverflowImplicitCheckSize = GetStackOverflowReservedBytes(kRuntimeISA);
 bool (*Thread::is_sensitive_thread_hook_)() = nullptr;
 Thread* Thread::jit_sensitive_thread_ = nullptr;
+#ifndef __BIONIC__
+thread_local Thread* Thread::self_tls_ = nullptr;
+#endif
 
 static constexpr bool kVerifyImageObjectsMarked = kIsDebugBuild;
 
@@ -580,6 +586,14 @@
   InitTid();
 }
 
+void Thread::DeleteJPeer(JNIEnv* env) {
+  // Make sure nothing can observe both opeer and jpeer set at the same time.
+  jobject old_jpeer = tlsPtr_.jpeer;
+  CHECK(old_jpeer != nullptr);
+  tlsPtr_.jpeer = nullptr;
+  env->DeleteGlobalRef(old_jpeer);
+}
+
 void* Thread::CreateCallback(void* arg) {
   Thread* self = reinterpret_cast<Thread*>(arg);
   Runtime* runtime = Runtime::Current();
@@ -609,8 +623,8 @@
     // Copy peer into self, deleting global reference when done.
     CHECK(self->tlsPtr_.jpeer != nullptr);
     self->tlsPtr_.opeer = soa.Decode<mirror::Object>(self->tlsPtr_.jpeer).Ptr();
-    self->GetJniEnv()->DeleteGlobalRef(self->tlsPtr_.jpeer);
-    self->tlsPtr_.jpeer = nullptr;
+    // Make sure nothing can observe both opeer and jpeer set at the same time.
+    self->DeleteJPeer(self->GetJniEnv());
     self->SetThreadName(self->GetThreadName()->ToModifiedUtf8().c_str());
 
     ArtField* priorityField = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority);
@@ -776,7 +790,9 @@
 #else
           1u;
 #endif
-      volatile char space[kPageSize - (kAsanMultiplier * 256)];
+      // Keep space uninitialized as it can overflow the stack otherwise (should Clang actually
+      // auto-initialize this local variable).
+      volatile char space[kPageSize - (kAsanMultiplier * 256)] __attribute__((uninitialized));
       char sink ATTRIBUTE_UNUSED = space[zero];  // NOLINT
       // Remove tag from the pointer. Nop in non-hwasan builds.
       uintptr_t addr = reinterpret_cast<uintptr_t>(__hwasan_tag_pointer(space, 0));
@@ -887,9 +903,9 @@
     MutexLock mu(self, *Locks::runtime_shutdown_lock_);
     runtime->EndThreadBirth();
   }
-  // Manually delete the global reference since Thread::Init will not have been run.
-  env->DeleteGlobalRef(child_thread->tlsPtr_.jpeer);
-  child_thread->tlsPtr_.jpeer = nullptr;
+  // Manually delete the global reference since Thread::Init will not have been run. Make sure
+  // nothing can observe both opeer and jpeer set at the same time.
+  child_thread->DeleteJPeer(env);
   delete child_thread;
   child_thread = nullptr;
   // TODO: remove from thread group?
@@ -932,10 +948,11 @@
     interpreter::InitInterpreterTls(this);
   }
 
-#ifdef ART_TARGET_ANDROID
+#ifdef __BIONIC__
   __get_tls()[TLS_SLOT_ART_THREAD_SELF] = this;
 #else
   CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self");
+  Thread::self_tls_ = this;
 #endif
   DCHECK_EQ(Thread::Current(), this);
 
@@ -1447,9 +1464,6 @@
 
   tls32_.suspend_count += delta;
   switch (reason) {
-    case SuspendReason::kForDebugger:
-      tls32_.debug_suspend_count += delta;
-      break;
     case SuspendReason::kForUserCode:
       tls32_.user_code_suspend_count += delta;
       break;
@@ -1773,7 +1787,8 @@
   // 1:cpuacct,cpu:/
   // We want the third field from the line whose second field contains the "cpu" token.
   std::string cgroup_file;
-  if (!ReadFileToString(StringPrintf("/proc/self/task/%d/cgroup", tid), &cgroup_file)) {
+  if (!android::base::ReadFileToString(StringPrintf("/proc/self/task/%d/cgroup", tid),
+                                       &cgroup_file)) {
     return "";
   }
   std::vector<std::string> cgroup_lines;
@@ -1909,7 +1924,8 @@
 
   // Grab the scheduler stats for this thread.
   std::string scheduler_stats;
-  if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats)
+  if (android::base::ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid),
+                                      &scheduler_stats)
       && !scheduler_stats.empty()) {
     scheduler_stats = android::base::Trim(scheduler_stats);  // Lose the trailing '\n'.
   } else {
@@ -2141,23 +2157,12 @@
   // assumption that there is no exception pending on entry. Thus, stash any pending exception.
   // Thread::Current() instead of this in case a thread is dumping the stack of another suspended
   // thread.
-  StackHandleScope<1> scope(Thread::Current());
-  Handle<mirror::Throwable> exc;
-  bool have_exception = false;
-  if (IsExceptionPending()) {
-    exc = scope.NewHandle(GetException());
-    const_cast<Thread*>(this)->ClearException();
-    have_exception = true;
-  }
+  ScopedExceptionStorage ses(Thread::Current());
 
   std::unique_ptr<Context> context(Context::Create());
   StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(),
                           !tls32_.throwing_OutOfMemoryError, check_suspended, dump_locks);
   dumper.WalkStack();
-
-  if (have_exception) {
-    const_cast<Thread*>(this)->SetException(exc.Get());
-  }
 }
 
 void Thread::DumpStack(std::ostream& os,
@@ -2177,7 +2182,6 @@
   if (safe_to_dump || force_dump_stack) {
     // If we're currently in native code, dump that stack before dumping the managed stack.
     if (dump_native_stack && (dump_for_abort || force_dump_stack || ShouldShowNativeStack(this))) {
-      DumpKernelStack(os, GetTid(), "  kernel: ", false);
       ArtMethod* method =
           GetCurrentMethod(nullptr,
                            /*check_suspended=*/ !force_dump_stack,
@@ -2198,10 +2202,11 @@
     LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's "
         "going to use a pthread_key_create destructor?): " << *self;
     CHECK(is_started_);
-#ifdef ART_TARGET_ANDROID
+#ifdef __BIONIC__
     __get_tls()[TLS_SLOT_ART_THREAD_SELF] = self;
 #else
     CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self");
+    Thread::self_tls_ = self;
 #endif
     self->tls32_.thread_exit_check_count = 1;
   } else {
@@ -2232,6 +2237,9 @@
   if (pthread_getspecific(pthread_key_self_) != nullptr) {
     LOG(FATAL) << "Newly-created pthread TLS slot is not nullptr";
   }
+#ifndef __BIONIC__
+  CHECK(Thread::self_tls_ == nullptr);
+#endif
 }
 
 void Thread::FinishStartup() {
@@ -2290,7 +2298,8 @@
       is_runtime_thread_(false) {
   wait_mutex_ = new Mutex("a thread wait mutex", LockLevel::kThreadWaitLock);
   wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_);
-  tlsPtr_.instrumentation_stack = new std::deque<instrumentation::InstrumentationStackFrame>;
+  tlsPtr_.instrumentation_stack =
+      new std::map<uintptr_t, instrumentation::InstrumentationStackFrame>;
   tlsPtr_.name = new std::string(kThreadNameDuringStartup);
 
   static_assert((sizeof(Thread) % 4) == 0U,
@@ -2313,6 +2322,7 @@
   tlsPtr_.thread_local_mark_stack = nullptr;
   tls32_.is_transitioning_to_runnable = false;
   tls32_.use_mterp = false;
+  ResetTlab();
 }
 
 void Thread::NotifyInTheadList() {
@@ -2439,9 +2449,11 @@
   {
     ScopedObjectAccess soa(self);
     Runtime::Current()->GetHeap()->RevokeThreadLocalBuffers(this);
-    if (kUseReadBarrier) {
-      Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->RevokeThreadLocalMarkStack(this);
-    }
+  }
+  // Mark-stack revocation must be performed at the very end. No
+  // checkpoint/flip-function or read-barrier should be called after this.
+  if (kUseReadBarrier) {
+    Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->RevokeThreadLocalMarkStack(this);
   }
 }
 
@@ -2462,6 +2474,12 @@
   CHECK(tlsPtr_.flip_function == nullptr);
   CHECK_EQ(tls32_.is_transitioning_to_runnable, false);
 
+  if (kUseReadBarrier) {
+    Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()
+        ->AssertNoThreadMarkStackMapping(this);
+    gc::accounting::AtomicStack<mirror::Object>* tl_mark_stack = GetThreadLocalMarkStack();
+    CHECK(tl_mark_stack == nullptr) << "mark-stack: " << tl_mark_stack;
+  }
   // Make sure we processed all deoptimization requests.
   CHECK(tlsPtr_.deoptimization_context_stack == nullptr) << "Missed deoptimization";
   CHECK(tlsPtr_.frame_id_to_shadow_frame == nullptr) <<
@@ -2481,9 +2499,6 @@
     CleanupCpu();
   }
 
-  if (tlsPtr_.single_step_control != nullptr) {
-    delete tlsPtr_.single_step_control;
-  }
   delete tlsPtr_.instrumentation_stack;
   delete tlsPtr_.name;
   delete tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample;
@@ -3284,7 +3299,7 @@
       ++i;
     }
     ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(exception.Get()));
-    InvokeWithJValues(soa, ref.get(), jni::EncodeArtMethod(exception_init_method), jv_args);
+    InvokeWithJValues(soa, ref.get(), exception_init_method, jv_args);
     if (LIKELY(!IsExceptionPending())) {
       SetException(exception.Get());
     }
@@ -3356,6 +3371,7 @@
       return; \
     }
   JNI_ENTRY_POINT_INFO(pDlsymLookup)
+  JNI_ENTRY_POINT_INFO(pDlsymLookupCritical)
 #undef JNI_ENTRY_POINT_INFO
 
 #define QUICK_ENTRY_POINT_INFO(x) \
@@ -3547,7 +3563,7 @@
     // Instrumentation may cause GC so keep the exception object safe.
     StackHandleScope<1> hs(this);
     HandleWrapperObjPtr<mirror::Throwable> h_exception(hs.NewHandleWrapper(&exception));
-    instrumentation->ExceptionThrownEvent(this, exception.Ptr());
+    instrumentation->ExceptionThrownEvent(this, exception);
   }
   // Does instrumentation need to deoptimize the stack or otherwise go to interpreter for something?
   // Note: we do this *after* reporting the exception to instrumentation in case it now requires
@@ -3559,8 +3575,8 @@
   // instrumentation trampolines (for example with DDMS tracing). That forces us to do deopt later
   // and see every frame being popped. We don't need to handle it any differently.
   ShadowFrame* cf;
-  bool force_deopt;
-  {
+  bool force_deopt = false;
+  if (Runtime::Current()->AreNonStandardExitsEnabled() || kIsDebugBuild) {
     NthCallerVisitor visitor(this, 0, false);
     visitor.WalkStack();
     cf = visitor.GetCurrentShadowFrame();
@@ -3570,16 +3586,16 @@
     bool force_frame_pop = cf != nullptr && cf->GetForcePopFrame();
     bool force_retry_instr = cf != nullptr && cf->GetForceRetryInstruction();
     if (kIsDebugBuild && force_frame_pop) {
+      DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
       NthCallerVisitor penultimate_visitor(this, 1, false);
       penultimate_visitor.WalkStack();
       ShadowFrame* penultimate_frame = penultimate_visitor.GetCurrentShadowFrame();
       if (penultimate_frame == nullptr) {
         penultimate_frame = FindDebuggerShadowFrame(penultimate_visitor.GetFrameId());
       }
-      DCHECK(penultimate_frame != nullptr &&
-             penultimate_frame->GetForceRetryInstruction())
-          << "Force pop frame without retry instruction found. penultimate frame is null: "
-          << (penultimate_frame == nullptr ? "true" : "false");
+    }
+    if (force_retry_instr) {
+      DCHECK(Runtime::Current()->AreNonStandardExitsEnabled());
     }
     force_deopt = force_frame_pop || force_retry_instr;
   }
@@ -3607,7 +3623,7 @@
           method_type);
       artDeoptimize(this);
       UNREACHABLE();
-    } else {
+    } else if (visitor.caller != nullptr) {
       LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method "
                    << visitor.caller->PrettyMethod();
     }
@@ -3697,6 +3713,8 @@
     ShadowFrame* shadow_frame = GetCurrentShadowFrame();
     if (shadow_frame != nullptr) {
       VisitShadowFrame(shadow_frame);
+    } else if (GetCurrentOatQuickMethodHeader()->IsNterpMethodHeader()) {
+      VisitNterpFrame();
     } else {
       VisitQuickFrame();
     }
@@ -3708,7 +3726,6 @@
     VisitDeclaringClass(m);
     DCHECK(m != nullptr);
     size_t num_regs = shadow_frame->NumberOfVRegs();
-    DCHECK(m->IsNative() || shadow_frame->HasReferenceArray());
     // handle scope for JNI or References for interpreter.
     for (size_t reg = 0; reg < num_regs; ++reg) {
       mirror::Object* ref = shadow_frame->GetVRegReference(reg);
@@ -3759,13 +3776,39 @@
         }
       }
       mirror::Object* new_ref = klass.Ptr();
-      visitor_(&new_ref, /* vreg= */ -1, this);
+      visitor_(&new_ref, /* vreg= */ JavaFrameRootInfo::kMethodDeclaringClass, this);
       if (new_ref != klass) {
         method->CASDeclaringClass(klass.Ptr(), new_ref->AsClass());
       }
     }
   }
 
+  void VisitNterpFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
+    ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
+    StackReference<mirror::Object>* vreg_ref_base =
+        reinterpret_cast<StackReference<mirror::Object>*>(NterpGetReferenceArray(cur_quick_frame));
+    StackReference<mirror::Object>* vreg_int_base =
+        reinterpret_cast<StackReference<mirror::Object>*>(NterpGetRegistersArray(cur_quick_frame));
+    CodeItemDataAccessor accessor((*cur_quick_frame)->DexInstructionData());
+    const uint16_t num_regs = accessor.RegistersSize();
+    // An nterp frame has two arrays: a dex register array and a reference array
+    // that shadows the dex register array but only containing references
+    // (non-reference dex registers have nulls). See nterp_helpers.cc.
+    for (size_t reg = 0; reg < num_regs; ++reg) {
+      StackReference<mirror::Object>* ref_addr = vreg_ref_base + reg;
+      mirror::Object* ref = ref_addr->AsMirrorPtr();
+      if (ref != nullptr) {
+        mirror::Object* new_ref = ref;
+        visitor_(&new_ref, reg, this);
+        if (new_ref != ref) {
+          ref_addr->Assign(new_ref);
+          StackReference<mirror::Object>* int_addr = vreg_int_base + reg;
+          int_addr->Assign(new_ref);
+        }
+      }
+    }
+  }
+
   template <typename T>
   ALWAYS_INLINE
   inline void VisitQuickFrameWithVregCallback() REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -3781,9 +3824,9 @@
       StackReference<mirror::Object>* vreg_base =
           reinterpret_cast<StackReference<mirror::Object>*>(cur_quick_frame);
       uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
-      CodeInfo code_info(method_header, kPrecise
-          ? CodeInfo::DecodeFlags::AllTables  // We will need dex register maps.
-          : CodeInfo::DecodeFlags::GcMasksOnly);
+      CodeInfo code_info = kPrecise
+          ? CodeInfo(method_header)  // We will need dex register maps.
+          : CodeInfo::DecodeGcMasksOnly(method_header);
       StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
       DCHECK(map.IsValid());
 
@@ -3832,7 +3875,7 @@
         mirror::Object* ref = ref_addr->AsMirrorPtr();
         if (ref != nullptr) {
           mirror::Object* new_ref = ref;
-          visitor_(&new_ref, /* vreg= */ -1, this);
+          visitor_(&new_ref, /* vreg= */ JavaFrameRootInfo::kProxyReferenceArgument, this);
           if (ref != new_ref) {
             ref_addr->Assign(new_ref);
           }
@@ -3863,7 +3906,7 @@
                       size_t stack_index ATTRIBUTE_UNUSED,
                       const StackVisitor* stack_visitor)
           REQUIRES_SHARED(Locks::mutator_lock_) {
-        visitor(ref, -1, stack_visitor);
+        visitor(ref, JavaFrameRootInfo::kImpreciseVreg, stack_visitor);
       }
 
       ALWAYS_INLINE
@@ -3871,7 +3914,7 @@
                          size_t register_index ATTRIBUTE_UNUSED,
                          const StackVisitor* stack_visitor)
           REQUIRES_SHARED(Locks::mutator_lock_) {
-        visitor(ref, -1, stack_visitor);
+        visitor(ref, JavaFrameRootInfo::kImpreciseVreg, stack_visitor);
       }
 
       RootVisitor& visitor;
@@ -3889,6 +3932,7 @@
             code_info(_code_info),
             dex_register_map(code_info.GetDexRegisterMapOf(map)),
             visitor(_visitor) {
+        DCHECK_EQ(dex_register_map.size(), number_of_dex_registers);
       }
 
       // TODO: If necessary, we should consider caching a reverse map instead of the linear
@@ -3908,8 +3952,8 @@
         }
 
         if (!found) {
-          // If nothing found, report with -1.
-          visitor(ref, -1, stack_visitor);
+          // If nothing found, report with unknown.
+          visitor(ref, JavaFrameRootInfo::kUnknownVreg, stack_visitor);
         }
       }
 
@@ -3958,6 +4002,14 @@
   const uint32_t tid_;
 };
 
+void Thread::VisitReflectiveTargets(ReflectiveValueVisitor* visitor) {
+  for (BaseReflectiveHandleScope* brhs = GetTopReflectiveHandleScope();
+       brhs != nullptr;
+       brhs = brhs->GetLink()) {
+    brhs->VisitTargets(visitor);
+  }
+}
+
 template <bool kPrecise>
 void Thread::VisitRoots(RootVisitor* visitor) {
   const pid_t thread_id = GetThreadId();
@@ -3974,9 +4026,6 @@
   tlsPtr_.jni_env->VisitJniLocalRoots(visitor, RootInfo(kRootJNILocal, thread_id));
   tlsPtr_.jni_env->VisitMonitorRoots(visitor, RootInfo(kRootJNIMonitor, thread_id));
   HandleScopeVisitRoots(visitor, thread_id);
-  if (tlsPtr_.debug_invoke_req != nullptr) {
-    tlsPtr_.debug_invoke_req->VisitRoots(visitor, RootInfo(kRootDebugger, thread_id));
-  }
   // Visit roots for deoptimization.
   if (tlsPtr_.stacked_shadow_frame_record != nullptr) {
     RootCallbackVisitor visitor_to_callback(visitor, thread_id);
@@ -4018,8 +4067,43 @@
   RootCallbackVisitor visitor_to_callback(visitor, thread_id);
   ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, &context, visitor_to_callback);
   mapper.template WalkStack<StackVisitor::CountTransitions::kNo>(false);
-  for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) {
-    visitor->VisitRootIfNonNull(&frame.this_object_, RootInfo(kRootVMInternal, thread_id));
+  for (auto& entry : *GetInstrumentationStack()) {
+    visitor->VisitRootIfNonNull(&entry.second.this_object_, RootInfo(kRootVMInternal, thread_id));
+  }
+}
+
+void Thread::SweepInterpreterCache(IsMarkedVisitor* visitor) {
+  for (InterpreterCache::Entry& entry : GetInterpreterCache()->GetArray()) {
+    const Instruction* inst = reinterpret_cast<const Instruction*>(entry.first);
+    if (inst != nullptr) {
+      if (inst->Opcode() == Instruction::NEW_INSTANCE ||
+          inst->Opcode() == Instruction::CHECK_CAST ||
+          inst->Opcode() == Instruction::INSTANCE_OF ||
+          inst->Opcode() == Instruction::NEW_ARRAY ||
+          inst->Opcode() == Instruction::CONST_CLASS) {
+        mirror::Class* cls = reinterpret_cast<mirror::Class*>(entry.second);
+        if (cls == nullptr || cls == Runtime::GetWeakClassSentinel()) {
+          // Entry got deleted in a previous sweep.
+          continue;
+        }
+        Runtime::ProcessWeakClass(
+            reinterpret_cast<GcRoot<mirror::Class>*>(&entry.second),
+            visitor,
+            Runtime::GetWeakClassSentinel());
+      } else if (inst->Opcode() == Instruction::CONST_STRING ||
+                 inst->Opcode() == Instruction::CONST_STRING_JUMBO) {
+        mirror::Object* object = reinterpret_cast<mirror::Object*>(entry.second);
+        mirror::Object* new_object = visitor->IsMarked(object);
+        // We know the string is marked because it's a strongly-interned string that
+        // is always alive (see b/117621117 for trying to make those strings weak).
+        // The IsMarked implementation of the CMS collector returns
+        // null for newly allocated objects, but we know those haven't moved. Therefore,
+        // only update the entry if we get a different non-null string.
+        if (new_object != nullptr && new_object != object) {
+          entry.second = reinterpret_cast<size_t>(new_object);
+        }
+      }
+    }
   }
 }
 
@@ -4081,8 +4165,12 @@
   tlsPtr_.thread_local_objects = 0;
 }
 
+void Thread::ResetTlab() {
+  SetTlab(nullptr, nullptr, nullptr);
+}
+
 bool Thread::HasTlab() const {
-  bool has_tlab = tlsPtr_.thread_local_pos != nullptr;
+  const bool has_tlab = tlsPtr_.thread_local_pos != nullptr;
   if (has_tlab) {
     DCHECK(tlsPtr_.thread_local_start != nullptr && tlsPtr_.thread_local_end != nullptr);
   } else {
@@ -4116,37 +4204,6 @@
   return mprotect(pregion, kStackOverflowProtectedSize, PROT_READ|PROT_WRITE) == 0;
 }
 
-void Thread::ActivateSingleStepControl(SingleStepControl* ssc) {
-  CHECK(Dbg::IsDebuggerActive());
-  CHECK(GetSingleStepControl() == nullptr) << "Single step already active in thread " << *this;
-  CHECK(ssc != nullptr);
-  tlsPtr_.single_step_control = ssc;
-}
-
-void Thread::DeactivateSingleStepControl() {
-  CHECK(Dbg::IsDebuggerActive());
-  CHECK(GetSingleStepControl() != nullptr) << "Single step not active in thread " << *this;
-  SingleStepControl* ssc = GetSingleStepControl();
-  tlsPtr_.single_step_control = nullptr;
-  delete ssc;
-}
-
-void Thread::SetDebugInvokeReq(DebugInvokeReq* req) {
-  CHECK(Dbg::IsDebuggerActive());
-  CHECK(GetInvokeReq() == nullptr) << "Debug invoke req already active in thread " << *this;
-  CHECK(Thread::Current() != this) << "Debug invoke can't be dispatched by the thread itself";
-  CHECK(req != nullptr);
-  tlsPtr_.debug_invoke_req = req;
-}
-
-void Thread::ClearDebugInvokeReq() {
-  CHECK(GetInvokeReq() != nullptr) << "Debug invoke req not active in thread " << *this;
-  CHECK(Thread::Current() == this) << "Debug invoke must be finished by the thread itself";
-  DebugInvokeReq* req = tlsPtr_.debug_invoke_req;
-  tlsPtr_.debug_invoke_req = nullptr;
-  delete req;
-}
-
 void Thread::PushVerifier(verifier::MethodVerifier* verifier) {
   verifier->link_ = tlsPtr_.method_verifier;
   tlsPtr_.method_verifier = verifier;
@@ -4288,4 +4345,24 @@
       WellKnownClasses::java_lang_Thread_systemDaemon)->GetBoolean(GetPeer());
 }
 
+ScopedExceptionStorage::ScopedExceptionStorage(art::Thread* self)
+    : self_(self), hs_(self_), excp_(hs_.NewHandle<art::mirror::Throwable>(self_->GetException())) {
+  self_->ClearException();
+}
+
+void ScopedExceptionStorage::SuppressOldException(const char* message) {
+  CHECK(self_->IsExceptionPending()) << *self_;
+  ObjPtr<mirror::Throwable> old_suppressed(excp_.Get());
+  excp_.Assign(self_->GetException());
+  LOG(WARNING) << message << "Suppressing old exception: " << old_suppressed->Dump();
+  self_->ClearException();
+}
+
+ScopedExceptionStorage::~ScopedExceptionStorage() {
+  CHECK(!self_->IsExceptionPending()) << *self_;
+  if (!excp_.IsNull()) {
+    self_->SetException(excp_.Get());
+  }
+}
+
 }  // namespace art
diff --git a/runtime/thread.h b/runtime/thread.h
index ae04600..7129526 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -33,15 +33,16 @@
 #include "base/value_object.h"
 #include "entrypoints/jni/jni_entrypoints.h"
 #include "entrypoints/quick/quick_entrypoints.h"
+#include "handle.h"
 #include "handle_scope.h"
 #include "interpreter/interpreter_cache.h"
 #include "jvalue.h"
 #include "managed_stack.h"
 #include "offsets.h"
 #include "read_barrier_config.h"
+#include "reflective_handle_scope.h"
 #include "runtime_globals.h"
 #include "runtime_stats.h"
-#include "suspend_reason.h"
 #include "thread_state.h"
 
 class BacktraceMap;
@@ -84,18 +85,18 @@
 class ClassLinker;
 class Closure;
 class Context;
-struct DebugInvokeReq;
 class DeoptimizationContextRecord;
 class DexFile;
 class FrameIdToShadowFrame;
+class IsMarkedVisitor;
 class JavaVMExt;
 class JNIEnvExt;
 class Monitor;
 class RootVisitor;
 class ScopedObjectAccessAlreadyRunnable;
 class ShadowFrame;
-class SingleStepControl;
 class StackedShadowFrameRecord;
+enum class SuspendReason : char;
 class Thread;
 class ThreadList;
 enum VisitRootFlags : uint8_t;
@@ -261,6 +262,17 @@
         (state_and_flags.as_struct.flags & kSuspendRequest) != 0;
   }
 
+  void DecrDefineClassCount() {
+    tls32_.define_class_counter--;
+  }
+
+  void IncrDefineClassCount() {
+    tls32_.define_class_counter++;
+  }
+  uint32_t GetDefineClassCount() const {
+    return tls32_.define_class_counter;
+  }
+
   // If delta > 0 and (this != self or suspend_barrier is not null), this function may temporarily
   // release thread_suspend_count_lock_ internally.
   ALWAYS_INLINE
@@ -353,6 +365,21 @@
     Roles::uninterruptible_.Release();  // No-op.
   }
 
+  // End region where no thread suspension is expected. Returns the current open region in case we
+  // want to reopen it. Used for ScopedAllowThreadSuspension. Not supported if no_thread_suspension
+  // is larger than one.
+  const char* EndAssertNoThreadSuspension() RELEASE(Roles::uninterruptible_) WARN_UNUSED {
+    const char* ret = nullptr;
+    if (kIsDebugBuild) {
+      CHECK_EQ(tls32_.no_thread_suspension, 1u);
+      tls32_.no_thread_suspension--;
+      ret = tlsPtr_.last_no_thread_suspension_cause;
+      tlsPtr_.last_no_thread_suspension_cause = nullptr;
+    }
+    Roles::uninterruptible_.Release();  // No-op.
+    return ret;
+  }
+
   void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
 
   // Return true if thread suspension is allowable.
@@ -634,6 +661,9 @@
   void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  void VisitReflectiveTargets(ReflectiveValueVisitor* visitor)
+      REQUIRES(Locks::mutator_lock_);
+
   void VerifyStack() REQUIRES_SHARED(Locks::mutator_lock_) {
     if (kVerifyStack) {
       VerifyStackImpl();
@@ -698,8 +728,15 @@
   }
 
  public:
-  static uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,
-                                                PointerSize pointer_size) {
+  template<PointerSize pointer_size>
+  static constexpr ThreadOffset<pointer_size> QuickEntryPointOffset(
+      size_t quick_entrypoint_offset) {
+    return ThreadOffsetFromTlsPtr<pointer_size>(
+        OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
+  }
+
+  static constexpr uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,
+                                                          PointerSize pointer_size) {
     if (pointer_size == PointerSize::k32) {
       return QuickEntryPointOffset<PointerSize::k32>(quick_entrypoint_offset).
           Uint32Value();
@@ -710,12 +747,6 @@
   }
 
   template<PointerSize pointer_size>
-  static ThreadOffset<pointer_size> QuickEntryPointOffset(size_t quick_entrypoint_offset) {
-    return ThreadOffsetFromTlsPtr<pointer_size>(
-        OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
-  }
-
-  template<PointerSize pointer_size>
   static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
     return ThreadOffsetFromTlsPtr<pointer_size>(
         OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
@@ -723,7 +754,7 @@
 
   // Return the entry point offset integer value for ReadBarrierMarkRegX, where X is `reg`.
   template <PointerSize pointer_size>
-  static int32_t ReadBarrierMarkEntryPointsOffset(size_t reg) {
+  static constexpr int32_t ReadBarrierMarkEntryPointsOffset(size_t reg) {
     // The entry point list defines 30 ReadBarrierMarkRegX entry points.
     DCHECK_LT(reg, 30u);
     // The ReadBarrierMarkRegX entry points are ordered by increasing
@@ -891,12 +922,21 @@
                                                                 top_handle_scope));
   }
 
-  DebugInvokeReq* GetInvokeReq() const {
-    return tlsPtr_.debug_invoke_req;
+  BaseReflectiveHandleScope* GetTopReflectiveHandleScope() {
+    return tlsPtr_.top_reflective_handle_scope;
   }
 
-  SingleStepControl* GetSingleStepControl() const {
-    return tlsPtr_.single_step_control;
+  void PushReflectiveHandleScope(BaseReflectiveHandleScope* scope) {
+    DCHECK_EQ(scope->GetLink(), tlsPtr_.top_reflective_handle_scope);
+    DCHECK_EQ(scope->GetThread(), this);
+    tlsPtr_.top_reflective_handle_scope = scope;
+  }
+
+  BaseReflectiveHandleScope* PopReflectiveHandleScope() {
+    BaseReflectiveHandleScope* handle_scope = tlsPtr_.top_reflective_handle_scope;
+    DCHECK(handle_scope != nullptr);
+    tlsPtr_.top_reflective_handle_scope = tlsPtr_.top_reflective_handle_scope->GetLink();
+    return handle_scope;
   }
 
   // Indicates whether this thread is ready to invoke a method for debugging. This
@@ -963,28 +1003,17 @@
     is_runtime_thread_ = is_runtime_thread;
   }
 
+  uint32_t CorePlatformApiCookie() {
+    return core_platform_api_cookie_;
+  }
+
+  void SetCorePlatformApiCookie(uint32_t cookie) {
+    core_platform_api_cookie_ = cookie;
+  }
+
   // Returns true if the thread is allowed to load java classes.
   bool CanLoadClasses() const;
 
-  // Activates single step control for debugging. The thread takes the
-  // ownership of the given SingleStepControl*. It is deleted by a call
-  // to DeactivateSingleStepControl or upon thread destruction.
-  void ActivateSingleStepControl(SingleStepControl* ssc);
-
-  // Deactivates single step control for debugging.
-  void DeactivateSingleStepControl();
-
-  // Sets debug invoke request for debugging. When the thread is resumed,
-  // it executes the method described by this request then sends the reply
-  // before suspending itself. The thread takes the ownership of the given
-  // DebugInvokeReq*. It is deleted by a call to ClearDebugInvokeReq.
-  void SetDebugInvokeReq(DebugInvokeReq* req);
-
-  // Clears debug invoke request for debugging. When the thread completes
-  // method invocation, it deletes its debug invoke request and suspends
-  // itself.
-  void ClearDebugInvokeReq();
-
   // Returns the fake exception used to activate deoptimization.
   static mirror::Throwable* GetDeoptimizationException() {
     // Note that the mirror::Throwable must be aligned to kObjectAlignment or else it cannot be
@@ -1035,7 +1064,19 @@
   void RemoveDebuggerShadowFrameMapping(size_t frame_id)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() {
+  // While getting this map requires shared the mutator lock, manipulating it
+  // should actually follow these rules:
+  // (1) The owner of this map (the thread) can change it with its mutator lock.
+  // (2) Other threads can read this map when the owner is suspended and they
+  //     hold the mutator lock.
+  // (3) Other threads can change this map when owning the mutator lock exclusively.
+  //
+  // The reason why (3) needs the mutator lock exclusively (and not just having
+  // the owner suspended) is that we don't want other threads to concurrently read the map.
+  //
+  // TODO: Add a class abstraction to express these rules.
+  std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* GetInstrumentationStack()
+      REQUIRES_SHARED(Locks::mutator_lock_) {
     return tlsPtr_.instrumentation_stack;
   }
 
@@ -1124,13 +1165,16 @@
   mirror::Object* AllocTlab(size_t bytes);
   void SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit);
   bool HasTlab() const;
+  void ResetTlab();
   uint8_t* GetTlabStart() {
     return tlsPtr_.thread_local_start;
   }
   uint8_t* GetTlabPos() {
     return tlsPtr_.thread_local_pos;
   }
-
+  uint8_t* GetTlabEnd() {
+    return tlsPtr_.thread_local_end;
+  }
   // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
   // equal to a valid pointer.
   // TODO: does this need to atomic?  I don't think so.
@@ -1220,6 +1264,15 @@
     return tls32_.force_interpreter_count != 0;
   }
 
+  bool IncrementMakeVisiblyInitializedCounter() {
+    tls32_.make_visibly_initialized_counter += 1u;
+    return tls32_.make_visibly_initialized_counter == kMakeVisiblyInitializedCounterTriggerCount;
+  }
+
+  void ClearMakeVisiblyInitializedCounter() {
+    tls32_.make_visibly_initialized_counter = 0u;
+  }
+
   void PushVerifier(verifier::MethodVerifier* verifier);
   void PopVerifier(verifier::MethodVerifier* verifier);
 
@@ -1294,6 +1347,10 @@
   ~Thread() REQUIRES(!Locks::mutator_lock_, !Locks::thread_suspend_count_lock_);
   void Destroy();
 
+  // Deletes and clears the tlsPtr_.jpeer field. Done in a way so that both it and opeer cannot be
+  // observed to be set at the same time by instrumentation.
+  void DeleteJPeer(JNIEnv* env);
+
   void NotifyInTheadList()
       REQUIRES_SHARED(Locks::thread_list_lock_);
 
@@ -1315,8 +1372,7 @@
                        jint thread_priority)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and
-  // Dbg::ManageDeoptimization.
+  // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit and, ~Thread
   ThreadState SetStateUnsafe(ThreadState new_state) {
     ThreadState old_state = GetState();
     if (old_state == kRunnable && new_state != kRunnable) {
@@ -1415,6 +1471,8 @@
   template <bool kPrecise>
   void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
 
+  void SweepInterpreterCache(IsMarkedVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
+
   static bool IsAotCompiler();
 
   void ReleaseLongJumpContextInternal();
@@ -1464,6 +1522,8 @@
   // Stores the jit sensitive thread (which for now is the UI thread).
   static Thread* jit_sensitive_thread_;
 
+  static constexpr uint32_t kMakeVisiblyInitializedCounterTriggerCount = 128;
+
   /***********************************************************************************************/
   // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
   // pointer size differences. To encourage shorter encoding, more frequently used values appear
@@ -1475,14 +1535,27 @@
     // to be 4-byte quantities.
     typedef uint32_t bool32_t;
 
-    explicit tls_32bit_sized_values(bool is_daemon) :
-      suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0),
-      daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0),
-      thread_exit_check_count(0), handling_signal_(false),
-      is_transitioning_to_runnable(false), ready_for_debug_invoke(false),
-      debug_method_entry_(false), is_gc_marking(false), weak_ref_access_enabled(true),
-      disable_thread_flip_count(0), user_code_suspend_count(0), force_interpreter_count(0) {
-    }
+    explicit tls_32bit_sized_values(bool is_daemon)
+        : suspend_count(0),
+          debug_suspend_count(0),
+          thin_lock_thread_id(0),
+          tid(0),
+          daemon(is_daemon),
+          throwing_OutOfMemoryError(false),
+          no_thread_suspension(0),
+          thread_exit_check_count(0),
+          handling_signal_(false),
+          is_transitioning_to_runnable(false),
+          ready_for_debug_invoke(false),
+          debug_method_entry_(false),
+          is_gc_marking(false),
+          weak_ref_access_enabled(true),
+          disable_thread_flip_count(0),
+          user_code_suspend_count(0),
+          force_interpreter_count(0),
+          use_mterp(0),
+          make_visibly_initialized_counter(0),
+          define_class_counter(0) {}
 
     union StateAndFlags state_and_flags;
     static_assert(sizeof(union StateAndFlags) == sizeof(int32_t),
@@ -1572,6 +1645,18 @@
     // True if everything is in the ideal state for fast interpretation.
     // False if we need to switch to the C++ interpreter to handle special cases.
     std::atomic<bool32_t> use_mterp;
+
+    // Counter for calls to initialize a class that's initialized but not visibly initialized.
+    // When this reaches kMakeVisiblyInitializedCounterTriggerCount, we call the runtime to
+    // make initialized classes visibly initialized. This is needed because we usually make
+    // classes visibly initialized in batches but we do not want to be stuck with a class
+    // initialized but not visibly initialized for a long time even if no more classes are
+    // being initialized anymore.
+    uint32_t make_visibly_initialized_counter;
+
+    // Counter for how many nested define-classes are ongoing in this thread. Used to allow waiting
+    // for threads to be done with class-definition work.
+    uint32_t define_class_counter;
   } tls32_;
 
   struct PACKED(8) tls_64bit_sized_values {
@@ -1590,7 +1675,7 @@
       self(nullptr), opeer(nullptr), jpeer(nullptr), stack_begin(nullptr), stack_size(0),
       deps_or_stack_trace_sample(), wait_next(nullptr), monitor_enter_object(nullptr),
       top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
-      instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
+      instrumentation_stack(nullptr),
       stacked_shadow_frame_record(nullptr), deoptimization_context_stack(nullptr),
       frame_id_to_shadow_frame(nullptr), name(nullptr), pthread_self(0),
       last_no_thread_suspension_cause(nullptr), checkpoint_function(nullptr),
@@ -1599,7 +1684,7 @@
       thread_local_objects(0), mterp_current_ibase(nullptr), thread_local_alloc_stack_top(nullptr),
       thread_local_alloc_stack_end(nullptr),
       flip_function(nullptr), method_verifier(nullptr), thread_local_mark_stack(nullptr),
-      async_exception(nullptr) {
+      async_exception(nullptr), top_reflective_handle_scope(nullptr) {
       std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
     }
 
@@ -1673,14 +1758,12 @@
     Context* long_jump_context;
 
     // Additional stack used by method instrumentation to store method and return pc values.
-    // Stored as a pointer since std::deque is not PACKED.
-    std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack;
-
-    // JDWP invoke-during-breakpoint support.
-    DebugInvokeReq* debug_invoke_req;
-
-    // JDWP single-stepping support.
-    SingleStepControl* single_step_control;
+    // Stored as a pointer since std::map is not PACKED.
+    // !DO NOT CHANGE! to std::unordered_map: the users of this map require an
+    // ordered iteration on the keys (which are stack addresses).
+    // Also see Thread::GetInstrumentationStack for the requirements on
+    // manipulating and reading this map.
+    std::map<uintptr_t, instrumentation::InstrumentationStackFrame>* instrumentation_stack;
 
     // For gc purpose, a shadow frame record stack that keeps track of:
     // 1) shadow frames under construction.
@@ -1757,6 +1840,9 @@
 
     // The pending async-exception or null.
     mirror::Throwable* async_exception;
+
+    // Top of the linked-list for reflective-handle scopes or null if none.
+    BaseReflectiveHandleScope* top_reflective_handle_scope;
   } tlsPtr_;
 
   // Small thread-local cache to be used from the interpreter.
@@ -1788,10 +1874,18 @@
   // compiled code or entrypoints.
   SafeMap<std::string, std::unique_ptr<TLSData>> custom_tls_ GUARDED_BY(Locks::custom_tls_lock_);
 
+#ifndef __BIONIC__
+  __attribute__((tls_model("initial-exec")))
+  static thread_local Thread* self_tls_;
+#endif
+
   // True if the thread is some form of runtime thread (ex, GC or JIT).
   bool is_runtime_thread_;
 
-  friend class Dbg;  // For SetStateUnsafe.
+  // Set during execution of JNI methods that get field and method id's as part of determining if
+  // the caller is allowed to access all fields and methods in the Core Platform API.
+  uint32_t core_platform_api_cookie_ = 0;
+
   friend class gc::collector::SemiSpace;  // For getting stack traces.
   friend class Runtime;  // For CreatePeer.
   friend class QuickExceptionHandler;  // For dumping the stack.
@@ -1837,6 +1931,30 @@
   const char* old_cause_;
 };
 
+class ScopedAllowThreadSuspension {
+ public:
+  ALWAYS_INLINE ScopedAllowThreadSuspension() RELEASE(Roles::uninterruptible_) {
+    if (kIsDebugBuild) {
+      self_ = Thread::Current();
+      old_cause_ = self_->EndAssertNoThreadSuspension();
+    } else {
+      Roles::uninterruptible_.Release();  // No-op.
+    }
+  }
+  ALWAYS_INLINE ~ScopedAllowThreadSuspension() ACQUIRE(Roles::uninterruptible_) {
+    if (kIsDebugBuild) {
+      CHECK(self_->StartAssertNoThreadSuspension(old_cause_) == nullptr);
+    } else {
+      Roles::uninterruptible_.Acquire();  // No-op.
+    }
+  }
+
+ private:
+  Thread* self_;
+  const char* old_cause_;
+};
+
+
 class ScopedStackedShadowFramePusher {
  public:
   ScopedStackedShadowFramePusher(Thread* self, ShadowFrame* sf, StackedShadowFrameType type)
@@ -1896,6 +2014,19 @@
   virtual void ThreadDeath(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
 };
 
+// Store an exception from the thread and suppress it for the duration of this object.
+class ScopedExceptionStorage {
+ public:
+  explicit ScopedExceptionStorage(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
+  void SuppressOldException(const char* message = "") REQUIRES_SHARED(Locks::mutator_lock_);
+  ~ScopedExceptionStorage() REQUIRES_SHARED(Locks::mutator_lock_);
+
+ private:
+  Thread* self_;
+  StackHandleScope<1> hs_;
+  MutableHandle<mirror::Throwable> excp_;
+};
+
 std::ostream& operator<<(std::ostream& os, const Thread& thread);
 std::ostream& operator<<(std::ostream& os, const StackedShadowFrameType& thread);
 
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index ed6b2c9..5d1dd55 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -73,7 +73,6 @@
 
 ThreadList::ThreadList(uint64_t thread_suspend_timeout_ns)
     : suspend_all_count_(0),
-      debug_suspend_all_count_(0),
       unregistering_count_(0),
       suspend_all_historam_("suspend all histogram", 16, 64),
       long_suspend_(false),
@@ -162,7 +161,6 @@
   // TODO: No thread safety analysis as DumpState with a null thread won't access fields, should
   // refactor DumpState to avoid skipping analysis.
   Thread::DumpState(os, nullptr, tid);
-  DumpKernelStack(os, tid, "  kernel: ", false);
   if (dump_native_stack) {
     DumpNativeStack(os, tid, nullptr, "  native: ");
   }
@@ -335,23 +333,44 @@
     count = list_.size();
     for (const auto& thread : list_) {
       if (thread != self) {
+        bool requested_suspend = false;
         while (true) {
           if (thread->RequestCheckpoint(checkpoint_function)) {
             // This thread will run its checkpoint some time in the near future.
+            if (requested_suspend) {
+              // The suspend request is now unnecessary.
+              bool updated =
+                  thread->ModifySuspendCount(self, -1, nullptr, SuspendReason::kInternal);
+              DCHECK(updated);
+              requested_suspend = false;
+            }
             break;
           } else {
-            // We are probably suspended, try to make sure that we stay suspended.
-            // The thread switched back to runnable.
+            // The thread is probably suspended, try to make sure that it stays suspended.
             if (thread->GetState() == kRunnable) {
               // Spurious fail, try again.
               continue;
             }
-            bool updated = thread->ModifySuspendCount(self, +1, nullptr, SuspendReason::kInternal);
-            DCHECK(updated);
-            suspended_count_modified_threads.push_back(thread);
-            break;
+            if (!requested_suspend) {
+              bool updated =
+                  thread->ModifySuspendCount(self, +1, nullptr, SuspendReason::kInternal);
+              DCHECK(updated);
+              requested_suspend = true;
+              if (thread->IsSuspended()) {
+                break;
+              }
+              // The thread raced us to become Runnable. Try to RequestCheckpoint() again.
+            } else {
+              // The thread previously raced our suspend request to become Runnable but
+              // since it is suspended again, it must honor that suspend request now.
+              DCHECK(thread->IsSuspended());
+              break;
+            }
           }
         }
+        if (requested_suspend) {
+          suspended_count_modified_threads.push_back(thread);
+        }
       }
     }
     // Run the callback to be called inside this critical section.
@@ -365,26 +384,8 @@
 
   // Run the checkpoint on the suspended threads.
   for (const auto& thread : suspended_count_modified_threads) {
-    if (!thread->IsSuspended()) {
-      ScopedTrace trace([&]() {
-        std::ostringstream oss;
-        thread->ShortDump(oss);
-        return std::string("Waiting for suspension of thread ") + oss.str();
-      });
-      // Busy wait until the thread is suspended.
-      const uint64_t start_time = NanoTime();
-      do {
-        ThreadSuspendSleep(kThreadSuspendInitialSleepUs);
-      } while (!thread->IsSuspended());
-      const uint64_t total_delay = NanoTime() - start_time;
-      // Shouldn't need to wait for longer than 1000 microseconds.
-      constexpr uint64_t kLongWaitThreshold = MsToNs(1);
-      if (UNLIKELY(total_delay > kLongWaitThreshold)) {
-        LOG(WARNING) << "Long wait of " << PrettyDuration(total_delay) << " for "
-            << *thread << " suspension!";
-      }
-    }
     // We know for sure that the thread is suspended at this point.
+    DCHECK(thread->IsSuspended());
     checkpoint_function->Run(thread);
     {
       MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
@@ -667,9 +668,6 @@
 }
 
 // Ensures all threads running Java suspend and that those not running Java don't start.
-// Debugger thread might be set to kRunnable for a short period of time after the
-// SuspendAllInternal. This is safe because it will be set back to suspended state before
-// the SuspendAll returns.
 void ThreadList::SuspendAllInternal(Thread* self,
                                     Thread* ignore1,
                                     Thread* ignore2,
@@ -703,9 +701,6 @@
     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
     // Update global suspend all state for attaching threads.
     ++suspend_all_count_;
-    if (reason == SuspendReason::kForDebugger) {
-      ++debug_suspend_all_count_;
-    }
     pending_threads.store(list_.size() - num_ignored, std::memory_order_relaxed);
     // Increment everybody's suspend count (except those that should be ignored).
     for (const auto& thread : list_) {
@@ -1115,198 +1110,27 @@
   return nullptr;
 }
 
-void ThreadList::SuspendAllForDebugger() {
-  Thread* self = Thread::Current();
-  Thread* debug_thread = Dbg::GetDebugThread();
-
-  VLOG(threads) << *self << " SuspendAllForDebugger starting...";
-
-  SuspendAllInternal(self, self, debug_thread, SuspendReason::kForDebugger);
-  // Block on the mutator lock until all Runnable threads release their share of access then
-  // immediately unlock again.
-#if HAVE_TIMED_RWLOCK
-  // Timeout if we wait more than 30 seconds.
-  if (!Locks::mutator_lock_->ExclusiveLockWithTimeout(self, 30 * 1000, 0)) {
-    UnsafeLogFatalForThreadSuspendAllTimeout();
-  } else {
-    Locks::mutator_lock_->ExclusiveUnlock(self);
-  }
-#else
-  Locks::mutator_lock_->ExclusiveLock(self);
-  Locks::mutator_lock_->ExclusiveUnlock(self);
-#endif
-  // Disabled for the following race condition:
-  // Thread 1 calls SuspendAllForDebugger, gets preempted after pulsing the mutator lock.
-  // Thread 2 calls SuspendAll and SetStateUnsafe (perhaps from Dbg::Disconnected).
-  // Thread 1 fails assertion that all threads are suspended due to thread 2 being in a runnable
-  // state (from SetStateUnsafe).
-  // AssertThreadsAreSuspended(self, self, debug_thread);
-
-  VLOG(threads) << *self << " SuspendAllForDebugger complete";
-}
-
-void ThreadList::SuspendSelfForDebugger() {
-  Thread* const self = Thread::Current();
-  self->SetReadyForDebugInvoke(true);
-
-  // The debugger thread must not suspend itself due to debugger activity!
-  Thread* debug_thread = Dbg::GetDebugThread();
-  CHECK(self != debug_thread);
-  CHECK_NE(self->GetState(), kRunnable);
-  Locks::mutator_lock_->AssertNotHeld(self);
-
-  // The debugger may have detached while we were executing an invoke request. In that case, we
-  // must not suspend ourself.
-  DebugInvokeReq* pReq = self->GetInvokeReq();
-  const bool skip_thread_suspension = (pReq != nullptr && !Dbg::IsDebuggerActive());
-  if (!skip_thread_suspension) {
-    // Collisions with other suspends aren't really interesting. We want
-    // to ensure that we're the only one fiddling with the suspend count
-    // though.
-    MutexLock mu(self, *Locks::thread_suspend_count_lock_);
-    bool updated = self->ModifySuspendCount(self, +1, nullptr, SuspendReason::kForDebugger);
-    DCHECK(updated);
-    CHECK_GT(self->GetSuspendCount(), 0);
-
-    VLOG(threads) << *self << " self-suspending (debugger)";
-  } else {
-    // We must no longer be subject to debugger suspension.
-    MutexLock mu(self, *Locks::thread_suspend_count_lock_);
-    CHECK_EQ(self->GetDebugSuspendCount(), 0) << "Debugger detached without resuming us";
-
-    VLOG(threads) << *self << " not self-suspending because debugger detached during invoke";
-  }
-
-  // If the debugger requested an invoke, we need to send the reply and clear the request.
-  if (pReq != nullptr) {
-    Dbg::FinishInvokeMethod(pReq);
-    self->ClearDebugInvokeReq();
-    pReq = nullptr;  // object has been deleted, clear it for safety.
-  }
-
-  // Tell JDWP that we've completed suspension. The JDWP thread can't
-  // tell us to resume before we're fully asleep because we hold the
-  // suspend count lock.
-  Dbg::ClearWaitForEventThread();
-
-  {
-    MutexLock mu(self, *Locks::thread_suspend_count_lock_);
-    while (self->GetSuspendCount() != 0) {
-      Thread::resume_cond_->Wait(self);
-      if (self->GetSuspendCount() != 0) {
-        // The condition was signaled but we're still suspended. This
-        // can happen when we suspend then resume all threads to
-        // update instrumentation or compute monitor info. This can
-        // also happen if the debugger lets go while a SIGQUIT thread
-        // dump event is pending (assuming SignalCatcher was resumed for
-        // just long enough to try to grab the thread-suspend lock).
-        VLOG(jdwp) << *self << " still suspended after undo "
-                   << "(suspend count=" << self->GetSuspendCount() << ", "
-                   << "debug suspend count=" << self->GetDebugSuspendCount() << ")";
-      }
-    }
-    CHECK_EQ(self->GetSuspendCount(), 0);
-  }
-
-  self->SetReadyForDebugInvoke(false);
-  VLOG(threads) << *self << " self-reviving (debugger)";
-}
-
-void ThreadList::ResumeAllForDebugger() {
-  Thread* self = Thread::Current();
-  Thread* debug_thread = Dbg::GetDebugThread();
-
-  VLOG(threads) << *self << " ResumeAllForDebugger starting...";
-
-  // Threads can't resume if we exclusively hold the mutator lock.
-  Locks::mutator_lock_->AssertNotExclusiveHeld(self);
-
-  {
-    MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
-    {
-      MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
-      // Update global suspend all state for attaching threads.
-      DCHECK_GE(suspend_all_count_, debug_suspend_all_count_);
-      if (debug_suspend_all_count_ > 0) {
-        --suspend_all_count_;
-        --debug_suspend_all_count_;
-      } else {
-        // We've been asked to resume all threads without being asked to
-        // suspend them all before. That may happen if a debugger tries
-        // to resume some suspended threads (with suspend count == 1)
-        // at once with a VirtualMachine.Resume command. Let's print a
-        // warning.
-        LOG(WARNING) << "Debugger attempted to resume all threads without "
-                     << "having suspended them all before.";
-      }
-      // Decrement everybody's suspend count (except our own).
-      for (const auto& thread : list_) {
-        if (thread == self || thread == debug_thread) {
-          continue;
-        }
-        if (thread->GetDebugSuspendCount() == 0) {
-          // This thread may have been individually resumed with ThreadReference.Resume.
-          continue;
-        }
-        VLOG(threads) << "requesting thread resume: " << *thread;
-        bool updated = thread->ModifySuspendCount(self, -1, nullptr, SuspendReason::kForDebugger);
-        DCHECK(updated);
-      }
-    }
-  }
-
-  {
-    MutexLock mu(self, *Locks::thread_suspend_count_lock_);
-    Thread::resume_cond_->Broadcast(self);
-  }
-
-  VLOG(threads) << *self << " ResumeAllForDebugger complete";
-}
-
-void ThreadList::UndoDebuggerSuspensions() {
-  Thread* self = Thread::Current();
-
-  VLOG(threads) << *self << " UndoDebuggerSuspensions starting";
-
-  {
-    MutexLock mu(self, *Locks::thread_list_lock_);
-    MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
-    // Update global suspend all state for attaching threads.
-    suspend_all_count_ -= debug_suspend_all_count_;
-    debug_suspend_all_count_ = 0;
-    // Update running threads.
-    for (const auto& thread : list_) {
-      if (thread == self || thread->GetDebugSuspendCount() == 0) {
-        continue;
-      }
-      bool suspended = thread->ModifySuspendCount(self,
-                                                  -thread->GetDebugSuspendCount(),
-                                                  nullptr,
-                                                  SuspendReason::kForDebugger);
-      DCHECK(suspended);
-    }
-  }
-
-  {
-    MutexLock mu(self, *Locks::thread_suspend_count_lock_);
-    Thread::resume_cond_->Broadcast(self);
-  }
-
-  VLOG(threads) << "UndoDebuggerSuspensions(" << *self << ") complete";
-}
-
-void ThreadList::WaitForOtherNonDaemonThreadsToExit() {
+void ThreadList::WaitForOtherNonDaemonThreadsToExit(bool check_no_birth) {
   ScopedTrace trace(__PRETTY_FUNCTION__);
   Thread* self = Thread::Current();
   Locks::mutator_lock_->AssertNotHeld(self);
   while (true) {
-    {
+    Locks::runtime_shutdown_lock_->Lock(self);
+    if (check_no_birth) {
       // No more threads can be born after we start to shutdown.
-      MutexLock mu(self, *Locks::runtime_shutdown_lock_);
       CHECK(Runtime::Current()->IsShuttingDownLocked());
       CHECK_EQ(Runtime::Current()->NumberOfThreadsBeingBorn(), 0U);
+    } else {
+      if (Runtime::Current()->NumberOfThreadsBeingBorn() != 0U) {
+        // Awkward. Shutdown_cond_ is private, but the only live thread may not be registered yet.
+        // Fortunately, this is used mostly for testing, and not performance-critical.
+        Locks::runtime_shutdown_lock_->Unlock(self);
+        usleep(1000);
+        continue;
+      }
     }
     MutexLock mu(self, *Locks::thread_list_lock_);
+    Locks::runtime_shutdown_lock_->Unlock(self);
     // Also wait for any threads that are unregistering to finish. This is required so that no
     // threads access the thread list after it is deleted. TODO: This may not work for user daemon
     // threads since they could unregister at the wrong time.
@@ -1349,20 +1173,27 @@
       thread->GetJniEnv()->SetFunctionsToRuntimeShutdownFunctions();
     }
   }
-  // If we have any daemons left, wait 200ms to ensure they are not stuck in a place where they
-  // are about to access runtime state and are not in a runnable state. Examples: Monitor code
-  // or waking up from a condition variable. TODO: Try and see if there is a better way to wait
-  // for daemon threads to be in a blocked state.
-  if (daemons_left > 0) {
-    static constexpr size_t kDaemonSleepTime = 200 * 1000;
-    usleep(kDaemonSleepTime);
+  if (daemons_left == 0) {
+    // No threads left; safe to shut down.
+    return;
   }
-  // Give the threads a chance to suspend, complaining if they're slow.
+  // There is not a clean way to shut down if we have daemons left. We have no mechanism for
+  // killing them and reclaiming thread stacks. We also have no mechanism for waiting until they
+  // have truly finished touching the memory we are about to deallocate. We do the best we can with
+  // timeouts.
+  //
+  // If we have any daemons left, wait until they are (a) suspended and (b) they are not stuck
+  // in a place where they are about to access runtime state and are not in a runnable state.
+  // We attempt to do the latter by just waiting long enough for things to
+  // quiesce. Examples: Monitor code or waking up from a condition variable.
+  //
+  // Give the threads a chance to suspend, complaining if they're slow. (a)
   bool have_complained = false;
   static constexpr size_t kTimeoutMicroseconds = 2000 * 1000;
   static constexpr size_t kSleepMicroseconds = 1000;
-  for (size_t i = 0; i < kTimeoutMicroseconds / kSleepMicroseconds; ++i) {
-    bool all_suspended = true;
+  bool all_suspended = false;
+  for (size_t i = 0; !all_suspended && i < kTimeoutMicroseconds / kSleepMicroseconds; ++i) {
+    bool found_running = false;
     {
       MutexLock mu(self, *Locks::thread_list_lock_);
       for (const auto& thread : list_) {
@@ -1371,16 +1202,53 @@
             LOG(WARNING) << "daemon thread not yet suspended: " << *thread;
             have_complained = true;
           }
-          all_suspended = false;
+          found_running = true;
         }
       }
     }
-    if (all_suspended) {
-      return;
+    if (found_running) {
+      // Sleep briefly before checking again. Max total sleep time is kTimeoutMicroseconds.
+      usleep(kSleepMicroseconds);
+    } else {
+      all_suspended = true;
     }
-    usleep(kSleepMicroseconds);
   }
-  LOG(WARNING) << "timed out suspending all daemon threads";
+  if (!all_suspended) {
+    // We can get here if a daemon thread executed a fastnative native call, so that it
+    // remained in runnable state, and then made a JNI call after we called
+    // SetFunctionsToRuntimeShutdownFunctions(), causing it to permanently stay in a harmless
+    // but runnable state. See b/147804269 .
+    LOG(WARNING) << "timed out suspending all daemon threads";
+  }
+  // Assume all threads are either suspended or somehow wedged.
+  // Wait again for all the now "suspended" threads to actually quiesce. (b)
+  static constexpr size_t kDaemonSleepTime = 200 * 1000;
+  usleep(kDaemonSleepTime);
+  std::list<Thread*> list_copy;
+  {
+    MutexLock mu(self, *Locks::thread_list_lock_);
+    // Half-way through the wait, set the "runtime deleted" flag, causing any newly awoken
+    // threads to immediately go back to sleep without touching memory. This prevents us from
+    // touching deallocated memory, but it also prevents mutexes from getting released. Thus we
+    // only do this once we're reasonably sure that no system mutexes are still held.
+    for (const auto& thread : list_) {
+      DCHECK(thread == self || !all_suspended || thread->GetState() != kRunnable);
+      // In the !all_suspended case, the target is probably sleeping.
+      thread->GetJniEnv()->SetRuntimeDeleted();
+      // Possibly contended Mutex acquisitions are unsafe after this.
+      // Releasing thread_list_lock_ is OK, since it can't block.
+    }
+  }
+  // Finally wait for any threads woken before we set the "runtime deleted" flags to finish
+  // touching memory.
+  usleep(kDaemonSleepTime);
+#if defined(__has_feature)
+#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer)
+  // Sleep a bit longer with -fsanitize=address, since everything is slower.
+  usleep(2 * kDaemonSleepTime);
+#endif
+#endif
+  // At this point no threads should be touching our data structures anymore.
 }
 
 void ThreadList::Register(Thread* self) {
@@ -1397,14 +1265,9 @@
   // SuspendAll requests.
   MutexLock mu(self, *Locks::thread_list_lock_);
   MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
-  CHECK_GE(suspend_all_count_, debug_suspend_all_count_);
   // Modify suspend count in increments of 1 to maintain invariants in ModifySuspendCount. While
   // this isn't particularly efficient the suspend counts are most commonly 0 or 1.
-  for (int delta = debug_suspend_all_count_; delta > 0; delta--) {
-    bool updated = self->ModifySuspendCount(self, +1, nullptr, SuspendReason::kForDebugger);
-    DCHECK(updated);
-  }
-  for (int delta = suspend_all_count_ - debug_suspend_all_count_; delta > 0; delta--) {
+  for (int delta = suspend_all_count_; delta > 0; delta--) {
     bool updated = self->ModifySuspendCount(self, +1, nullptr, SuspendReason::kInternal);
     DCHECK(updated);
   }
@@ -1481,10 +1344,11 @@
 
   // Clear the TLS data, so that the underlying native thread is recognizably detached.
   // (It may wish to reattach later.)
-#ifdef ART_TARGET_ANDROID
+#ifdef __BIONIC__
   __get_tls()[TLS_SLOT_ART_THREAD_SELF] = nullptr;
 #else
   CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, nullptr), "detach self");
+  Thread::self_tls_ = nullptr;
 #endif
 
   // Signal that a thread just detached.
@@ -1542,6 +1406,20 @@
   }
 }
 
+void ThreadList::SweepInterpreterCaches(IsMarkedVisitor* visitor) const {
+  MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+  for (const auto& thread : list_) {
+    thread->SweepInterpreterCache(visitor);
+  }
+}
+
+void ThreadList::VisitReflectiveTargets(ReflectiveValueVisitor *visitor) const {
+  MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+  for (const auto& thread : list_) {
+    thread->VisitReflectiveTargets(visitor);
+  }
+}
+
 uint32_t ThreadList::AllocThreadId(Thread* self) {
   MutexLock mu(self, *Locks::allocated_thread_ids_lock_);
   for (size_t i = 0; i < allocated_ids_.size(); ++i) {
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index cdfb934..8fc219b 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -20,9 +20,9 @@
 #include "barrier.h"
 #include "base/histogram.h"
 #include "base/mutex.h"
-#include "base/time_utils.h"
 #include "base/value_object.h"
 #include "jni.h"
+#include "reflective_handle_scope.h"
 #include "suspend_reason.h"
 
 #include <bitset>
@@ -37,6 +37,7 @@
 class GcPauseListener;
 }  // namespace gc
 class Closure;
+class IsMarkedVisitor;
 class RootVisitor;
 class Thread;
 class TimingLogger;
@@ -47,7 +48,8 @@
   static constexpr uint32_t kMaxThreadId = 0xFFFF;
   static constexpr uint32_t kInvalidThreadId = 0;
   static constexpr uint32_t kMainThreadId = 1;
-  static constexpr uint64_t kDefaultThreadSuspendTimeout = MsToNs(kIsDebugBuild ? 50000 : 10000);
+  static constexpr uint64_t kDefaultThreadSuspendTimeout =
+      kIsDebugBuild ? 50'000'000'000ull : 10'000'000'000ull;
 
   explicit ThreadList(uint64_t thread_suspend_timeout_ns);
   ~ThreadList();
@@ -102,6 +104,10 @@
   // Find an existing thread (or self) by its thread id (not tid).
   Thread* FindThreadByThreadId(uint32_t thread_id) REQUIRES(Locks::thread_list_lock_);
 
+  // Does the thread list still contain the given thread, or one at the same address?
+  // Used by Monitor to provide (mostly accurate) debugging information.
+  bool Contains(Thread* thread) REQUIRES(Locks::thread_list_lock_);
+
   // Run a checkpoint on threads, running threads are not suspended but run the checkpoint inside
   // of the suspend check. Returns how many checkpoints that are expected to run, including for
   // already suspended threads for b/24191051. Run the callback, if non-null, inside the
@@ -128,26 +134,17 @@
                !Locks::thread_list_lock_,
                !Locks::thread_suspend_count_lock_);
 
-  // Suspends all threads
-  void SuspendAllForDebugger()
-      REQUIRES(!Locks::mutator_lock_,
-               !Locks::thread_list_lock_,
-               !Locks::thread_suspend_count_lock_);
-
-  void SuspendSelfForDebugger()
-      REQUIRES(!Locks::thread_suspend_count_lock_);
-
-  // Resume all threads
-  void ResumeAllForDebugger()
-      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
-
-  void UndoDebuggerSuspensions()
-      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
-
   // Iterates over all the threads.
   void ForEach(void (*callback)(Thread*, void*), void* context)
       REQUIRES(Locks::thread_list_lock_);
 
+  template<typename CallBack>
+  void ForEach(CallBack cb) REQUIRES(Locks::thread_list_lock_) {
+    ForEach([](Thread* t, void* ctx) REQUIRES(Locks::thread_list_lock_) {
+      (*reinterpret_cast<CallBack*>(ctx))(t);
+    }, &cb);
+  }
+
   // Add/remove current thread from list.
   void Register(Thread* self)
       REQUIRES(Locks::runtime_shutdown_lock_)
@@ -166,6 +163,8 @@
       REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  void VisitReflectiveTargets(ReflectiveValueVisitor* visitor) const REQUIRES(Locks::mutator_lock_);
+
   // Return a copy of the thread list.
   std::list<Thread*> GetList() REQUIRES(Locks::thread_list_lock_) {
     return list_;
@@ -178,11 +177,18 @@
     return empty_checkpoint_barrier_.get();
   }
 
+  void SweepInterpreterCaches(IsMarkedVisitor* visitor) const
+      REQUIRES(!Locks::thread_list_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  void WaitForOtherNonDaemonThreadsToExit(bool check_no_birth = true)
+      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_,
+               !Locks::mutator_lock_);
+
  private:
   uint32_t AllocThreadId(Thread* self);
   void ReleaseThreadId(Thread* self, uint32_t id) REQUIRES(!Locks::allocated_thread_ids_lock_);
 
-  bool Contains(Thread* thread) REQUIRES(Locks::thread_list_lock_);
   bool Contains(pid_t tid) REQUIRES(Locks::thread_list_lock_);
   size_t RunCheckpoint(Closure* checkpoint_function, bool includeSuspended)
       REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
@@ -192,8 +198,6 @@
 
   void SuspendAllDaemonThreadsForShutdown()
       REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
-  void WaitForOtherNonDaemonThreadsToExit()
-      REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
 
   void SuspendAllInternal(Thread* self,
                           Thread* ignore1,
@@ -211,7 +215,6 @@
 
   // Ongoing suspend all requests, used to ensure threads added to list_ respect SuspendAll.
   int suspend_all_count_ GUARDED_BY(Locks::thread_suspend_count_lock_);
-  int debug_suspend_all_count_ GUARDED_BY(Locks::thread_suspend_count_lock_);
 
   // Number of threads unregistering, ~ThreadList blocks until this hits 0.
   int unregistering_count_ GUARDED_BY(Locks::thread_list_lock_);
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index e1c756d..2bca5a9 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -97,10 +97,17 @@
 void* ThreadPoolWorker::Callback(void* arg) {
   ThreadPoolWorker* worker = reinterpret_cast<ThreadPoolWorker*>(arg);
   Runtime* runtime = Runtime::Current();
-  CHECK(runtime->AttachCurrentThread(worker->name_.c_str(),
-                                     true,
-                                     nullptr,
-                                     worker->thread_pool_->create_peers_));
+  CHECK(runtime->AttachCurrentThread(
+      worker->name_.c_str(),
+      true,
+      // Thread-groups are only tracked by the peer j.l.Thread objects. If we aren't creating peers
+      // we don't need to specify the thread group. We want to place these threads in the System
+      // thread group because that thread group is where important threads that debuggers and
+      // similar tools should not mess with are placed. As this is an internal-thread-pool we might
+      // rely on being able to (for example) wait for all threads to finish some task. If debuggers
+      // are suspending these threads that might not be possible.
+      worker->thread_pool_->create_peers_ ? runtime->GetSystemThreadGroup() : nullptr,
+      worker->thread_pool_->create_peers_));
   worker->thread_ = Thread::Current();
   // Mark thread pool workers as runtime-threads.
   worker->thread_->SetIsRuntimeThread(true);
@@ -120,6 +127,12 @@
 }
 
 void ThreadPool::RemoveAllTasks(Thread* self) {
+  // The ThreadPool is responsible for calling Finalize (which usually delete
+  // the task memory) on all the tasks.
+  Task* task = nullptr;
+  while ((task = TryGetTask(self)) != nullptr) {
+    task->Finalize();
+  }
   MutexLock mu(self, task_queue_lock_);
   tasks_.clear();
 }
@@ -195,6 +208,7 @@
 
 ThreadPool::~ThreadPool() {
   DeleteThreads();
+  RemoveAllTasks(Thread::Current());
 }
 
 void ThreadPool::StartWorkers(Thread* self) {
diff --git a/runtime/thread_state.h b/runtime/thread_state.h
index c8f3826..f36bc80 100644
--- a/runtime/thread_state.h
+++ b/runtime/thread_state.h
@@ -21,7 +21,12 @@
 
 namespace art {
 
+// State stored in our C++ class Thread.
+// When we refer to "a suspended state", or when function names mention "ToSuspended" or
+// "FromSuspended", we mean any state other than kRunnable, i.e. any state in which the thread is
+// guaranteed not to access the Java heap. The kSuspended state is merely one of these.
 enum ThreadState {
+  //                                   Java
   //                                   Thread.State   JDWP state
   kTerminated = 66,                 // TERMINATED     TS_ZOMBIE    Thread.run has returned, but Thread* still around
   kRunnable,                        // RUNNABLE       TS_RUNNING   runnable
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 0e28f29..63d72d8 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -19,6 +19,7 @@
 #include <sys/uio.h>
 #include <unistd.h>
 
+#include "android-base/macros.h"
 #include "android-base/stringprintf.h"
 
 #include "art_method-inl.h"
@@ -336,7 +337,7 @@
     Thread::Current()->ThrowNewException("Ljava/lang/RuntimeException;", msg.c_str());
     return;
   }
-  std::unique_ptr<File> file(new File(trace_fd, "tracefile"));
+  std::unique_ptr<File> file(new File(trace_fd, /* path= */ "tracefile", /* check_usage= */ true));
   Start(std::move(file), buffer_size, flags, output_mode, trace_mode, interval_us);
 }
 
@@ -649,9 +650,9 @@
   os << StringPrintf("vm=art\n");
   os << StringPrintf("pid=%d\n", getpid());
   if ((flags_ & kTraceCountAllocs) != 0) {
-    os << StringPrintf("alloc-count=%d\n", Runtime::Current()->GetStat(KIND_ALLOCATED_OBJECTS));
-    os << StringPrintf("alloc-size=%d\n", Runtime::Current()->GetStat(KIND_ALLOCATED_BYTES));
-    os << StringPrintf("gc-count=%d\n", Runtime::Current()->GetStat(KIND_GC_INVOCATIONS));
+    os << "alloc-count=" << Runtime::Current()->GetStat(KIND_ALLOCATED_OBJECTS) << "\n";
+    os << "alloc-size=" << Runtime::Current()->GetStat(KIND_ALLOCATED_BYTES) << "\n";
+    os << "gc-count=" <<  Runtime::Current()->GetStat(KIND_GC_INVOCATIONS) << "\n";
   }
   os << StringPrintf("%cthreads\n", kTraceTokenChar);
   DumpThreadList(os);
@@ -746,12 +747,16 @@
                          Handle<mirror::Object> this_object ATTRIBUTE_UNUSED,
                          ArtMethod* method,
                          uint32_t dex_pc ATTRIBUTE_UNUSED,
-                         const JValue& return_value ATTRIBUTE_UNUSED) {
+                         instrumentation::OptionalFrame frame ATTRIBUTE_UNUSED,
+                         JValue& return_value ATTRIBUTE_UNUSED) {
   uint32_t thread_clock_diff = 0;
   uint32_t wall_clock_diff = 0;
   ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
-  LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodExited,
-                      thread_clock_diff, wall_clock_diff);
+  LogMethodTraceEvent(thread,
+                      method,
+                      instrumentation::Instrumentation::kMethodExited,
+                      thread_clock_diff,
+                      wall_clock_diff);
 }
 
 void Trace::MethodUnwind(Thread* thread,
diff --git a/runtime/trace.h b/runtime/trace.h
index 567f6ed..eccf157 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -185,7 +185,8 @@
                     Handle<mirror::Object> this_object,
                     ArtMethod* method,
                     uint32_t dex_pc,
-                    const JValue& return_value)
+                    instrumentation::OptionalFrame frame,
+                    JValue& return_value)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_)
       override;
   void MethodUnwind(Thread* thread,
diff --git a/runtime/transaction.cc b/runtime/transaction.cc
index 62482fd..5d9456b 100644
--- a/runtime/transaction.cc
+++ b/runtime/transaction.cc
@@ -18,15 +18,19 @@
 
 #include <android-base/logging.h>
 
+#include "aot_class_linker.h"
 #include "base/mutex-inl.h"
 #include "base/stl_util.h"
 #include "gc/accounting/card_table-inl.h"
+#include "gc/heap.h"
 #include "gc_root-inl.h"
 #include "intern_table.h"
 #include "mirror/class-inl.h"
 #include "mirror/dex_cache-inl.h"
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
+#include "obj_ptr-inl.h"
+#include "runtime.h"
 
 #include <list>
 
@@ -35,17 +39,15 @@
 // TODO: remove (only used for debugging purpose).
 static constexpr bool kEnableTransactionStats = false;
 
-Transaction::Transaction()
-  : log_lock_("transaction log lock", kTransactionLogLock),
-    aborted_(false),
-    rolling_back_(false),
-    strict_(false) {
-  CHECK(Runtime::Current()->IsAotCompiler());
-}
-
-Transaction::Transaction(bool strict, mirror::Class* root) : Transaction() {
-  strict_ = strict;
-  root_ = root;
+Transaction::Transaction(bool strict, mirror::Class* root)
+    : log_lock_("transaction log lock", kTransactionLogLock),
+      aborted_(false),
+      rolling_back_(false),
+      heap_(Runtime::Current()->GetHeap()),
+      strict_(strict),
+      root_(root),
+      assert_no_new_records_reason_(nullptr) {
+  DCHECK(Runtime::Current()->IsAotCompiler());
 }
 
 Transaction::~Transaction() {
@@ -111,35 +113,56 @@
   return rolling_back_;
 }
 
-bool Transaction::IsStrict() {
-  MutexLock mu(Thread::Current(), log_lock_);
-  return strict_;
-}
-
 const std::string& Transaction::GetAbortMessage() {
   MutexLock mu(Thread::Current(), log_lock_);
   return abort_message_;
 }
 
-bool Transaction::WriteConstraint(mirror::Object* obj, ArtField* field) {
-  MutexLock mu(Thread::Current(), log_lock_);
-  if (strict_  // no constraint for boot image
-      && field->IsStatic()  // no constraint instance updating
-      && obj != root_) {  // modifying other classes' static field, fail
+bool Transaction::WriteConstraint(Thread* self, ObjPtr<mirror::Object> obj) {
+  DCHECK(obj != nullptr);
+  MutexLock mu(self, log_lock_);
+
+  // Prevent changes in boot image spaces for app or boot image extension.
+  // For boot image there are no boot image spaces and this condition evaluates to false.
+  if (heap_->ObjectIsInBootImageSpace(obj)) {
     return true;
   }
-  return false;
+
+  // For apps, also prevent writing to other classes.
+  return IsStrict() &&
+         obj->IsClass() &&  // no constraint updating instances or arrays
+         obj != root_;  // modifying other classes' static field, fail
 }
 
-bool Transaction::ReadConstraint(mirror::Object* obj, ArtField* field) {
-  DCHECK(field->IsStatic());
+bool Transaction::WriteValueConstraint(Thread* self, ObjPtr<mirror::Object> value) {
+  if (value == nullptr) {
+    return false;  // We can always store null values.
+  }
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  MutexLock mu(self, log_lock_);
+  if (IsStrict()) {
+    // TODO: Should we restrict writes the same way as for boot image extension?
+    return false;
+  } else if (heap->GetBootImageSpaces().empty()) {
+    return false;  // No constraints for boot image.
+  } else {
+    // Boot image extension.
+    ObjPtr<mirror::Class> klass = value->IsClass() ? value->AsClass() : value->GetClass();
+    return !AotClassLinker::CanReferenceInBootImageExtension(klass, heap);
+  }
+}
+
+bool Transaction::ReadConstraint(Thread* self, ObjPtr<mirror::Object> obj) {
+  // Read constraints are checked only for static field reads as there are
+  // no constraints on reading instance fields and array elements.
   DCHECK(obj->IsClass());
-  MutexLock mu(Thread::Current(), log_lock_);
-  if (!strict_ ||   // no constraint for boot image
-      obj == root_) {  // self-updating, pass
+  MutexLock mu(self, log_lock_);
+  if (IsStrict()) {
+    return obj != root_;  // fail if not self-updating
+  } else {
+    // For boot image and boot image extension, allow reading any field.
     return false;
   }
-  return true;
 }
 
 void Transaction::RecordWriteFieldBoolean(mirror::Object* obj,
@@ -148,6 +171,7 @@
                                           bool is_volatile) {
   DCHECK(obj != nullptr);
   MutexLock mu(Thread::Current(), log_lock_);
+  DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
   ObjectLog& object_log = object_logs_[obj];
   object_log.LogBooleanValue(field_offset, value, is_volatile);
 }
@@ -158,6 +182,7 @@
                                        bool is_volatile) {
   DCHECK(obj != nullptr);
   MutexLock mu(Thread::Current(), log_lock_);
+  DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
   ObjectLog& object_log = object_logs_[obj];
   object_log.LogByteValue(field_offset, value, is_volatile);
 }
@@ -168,6 +193,7 @@
                                        bool is_volatile) {
   DCHECK(obj != nullptr);
   MutexLock mu(Thread::Current(), log_lock_);
+  DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
   ObjectLog& object_log = object_logs_[obj];
   object_log.LogCharValue(field_offset, value, is_volatile);
 }
@@ -179,6 +205,7 @@
                                         bool is_volatile) {
   DCHECK(obj != nullptr);
   MutexLock mu(Thread::Current(), log_lock_);
+  DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
   ObjectLog& object_log = object_logs_[obj];
   object_log.LogShortValue(field_offset, value, is_volatile);
 }
@@ -190,6 +217,7 @@
                                      bool is_volatile) {
   DCHECK(obj != nullptr);
   MutexLock mu(Thread::Current(), log_lock_);
+  DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
   ObjectLog& object_log = object_logs_[obj];
   object_log.Log32BitsValue(field_offset, value, is_volatile);
 }
@@ -200,6 +228,7 @@
                                      bool is_volatile) {
   DCHECK(obj != nullptr);
   MutexLock mu(Thread::Current(), log_lock_);
+  DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
   ObjectLog& object_log = object_logs_[obj];
   object_log.Log64BitsValue(field_offset, value, is_volatile);
 }
@@ -210,6 +239,7 @@
                                             bool is_volatile) {
   DCHECK(obj != nullptr);
   MutexLock mu(Thread::Current(), log_lock_);
+  DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
   ObjectLog& object_log = object_logs_[obj];
   object_log.LogReferenceValue(field_offset, value, is_volatile);
 }
@@ -219,6 +249,7 @@
   DCHECK(array->IsArrayInstance());
   DCHECK(!array->IsObjectArray());
   MutexLock mu(Thread::Current(), log_lock_);
+  DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
   auto it = array_logs_.find(array);
   if (it == array_logs_.end()) {
     ArrayLog log;
@@ -232,6 +263,7 @@
   DCHECK(dex_cache != nullptr);
   DCHECK_LT(string_idx.index_, dex_cache->GetDexFile()->NumStringIds());
   MutexLock mu(Thread::Current(), log_lock_);
+  DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
   resolve_string_logs_.emplace_back(dex_cache, string_idx);
 }
 
@@ -258,6 +290,7 @@
 void Transaction::LogInternedString(InternStringLog&& log) {
   Locks::intern_table_lock_->AssertExclusiveHeld(Thread::Current());
   MutexLock mu(Thread::Current(), log_lock_);
+  DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
   intern_string_logs_.push_front(std::move(log));
 }
 
@@ -460,11 +493,11 @@
       if (UNLIKELY(field_value.is_volatile)) {
         obj->SetFieldBooleanVolatile<false, kCheckTransaction>(
             field_offset,
-            static_cast<bool>(field_value.value));
+            field_value.value);
       } else {
         obj->SetFieldBoolean<false, kCheckTransaction>(
             field_offset,
-            static_cast<bool>(field_value.value));
+            field_value.value);
       }
       break;
     case kByte:
@@ -676,4 +709,27 @@
   }
 }
 
+Transaction* ScopedAssertNoNewTransactionRecords::InstallAssertion(const char* reason) {
+  Transaction* transaction = nullptr;
+  if (kIsDebugBuild && Runtime::Current()->IsActiveTransaction()) {
+    transaction = Runtime::Current()->GetTransaction().get();
+    if (transaction != nullptr) {
+      MutexLock mu(Thread::Current(), transaction->log_lock_);
+      CHECK(transaction->assert_no_new_records_reason_ == nullptr)
+          << "old: " << transaction->assert_no_new_records_reason_ << " new: " << reason;
+      transaction->assert_no_new_records_reason_ = reason;
+    }
+  }
+  return transaction;
+}
+
+void ScopedAssertNoNewTransactionRecords::RemoveAssertion(Transaction* transaction) {
+  if (kIsDebugBuild) {
+    CHECK(Runtime::Current()->GetTransaction().get() == transaction);
+    MutexLock mu(Thread::Current(), transaction->log_lock_);
+    CHECK(transaction->assert_no_new_records_reason_ != nullptr);
+    transaction->assert_no_new_records_reason_ = nullptr;
+  }
+}
+
 }  // namespace art
diff --git a/runtime/transaction.h b/runtime/transaction.h
index de6edd2..184280f 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -30,6 +30,9 @@
 #include <map>
 
 namespace art {
+namespace gc {
+class Heap;
+}  // namespace gc
 namespace mirror {
 class Array;
 class Class;
@@ -38,14 +41,14 @@
 class String;
 }  // namespace mirror
 class InternTable;
+template<class MirrorType> class ObjPtr;
 
 class Transaction final {
  public:
   static constexpr const char* kAbortExceptionDescriptor = "dalvik.system.TransactionAbortError";
   static constexpr const char* kAbortExceptionSignature = "Ldalvik/system/TransactionAbortError;";
 
-  Transaction();
-  explicit Transaction(bool strict, mirror::Class* root);
+  Transaction(bool strict, mirror::Class* root);
   ~Transaction();
 
   void Abort(const std::string& abort_message)
@@ -63,7 +66,9 @@
   // If the transaction is in strict mode, then all access of static fields will be constrained,
   // one class's clinit will not be allowed to read or modify another class's static fields, unless
   // the transaction is aborted.
-  bool IsStrict() REQUIRES(!log_lock_);
+  bool IsStrict() {
+    return strict_;
+  }
 
   // Record object field changes.
   void RecordWriteFieldBoolean(mirror::Object* obj,
@@ -135,11 +140,15 @@
       REQUIRES(!log_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  bool ReadConstraint(mirror::Object* obj, ArtField* field)
+  bool ReadConstraint(Thread* self, ObjPtr<mirror::Object> obj)
       REQUIRES(!log_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  bool WriteConstraint(mirror::Object* obj, ArtField* field)
+  bool WriteConstraint(Thread* self, ObjPtr<mirror::Object> obj)
+      REQUIRES(!log_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  bool WriteValueConstraint(Thread* self, ObjPtr<mirror::Object> value)
       REQUIRES(!log_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -307,13 +316,35 @@
   std::list<ResolveStringLog> resolve_string_logs_ GUARDED_BY(log_lock_);
   bool aborted_ GUARDED_BY(log_lock_);
   bool rolling_back_;  // Single thread, no race.
-  bool strict_ GUARDED_BY(log_lock_);
+  gc::Heap* const heap_;
+  const bool strict_;
   std::string abort_message_ GUARDED_BY(log_lock_);
   mirror::Class* root_ GUARDED_BY(log_lock_);
+  const char* assert_no_new_records_reason_ GUARDED_BY(log_lock_);
+
+  friend class ScopedAssertNoNewTransactionRecords;
 
   DISALLOW_COPY_AND_ASSIGN(Transaction);
 };
 
+class ScopedAssertNoNewTransactionRecords {
+ public:
+  explicit ScopedAssertNoNewTransactionRecords(const char* reason)
+    : transaction_(kIsDebugBuild ? InstallAssertion(reason) : nullptr) {}
+
+  ~ScopedAssertNoNewTransactionRecords() {
+    if (kIsDebugBuild && transaction_ != nullptr) {
+      RemoveAssertion(transaction_);
+    }
+  }
+
+ private:
+  static Transaction* InstallAssertion(const char* reason);
+  static void RemoveAssertion(Transaction* transaction);
+
+  Transaction* transaction_;
+};
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_TRANSACTION_H_
diff --git a/runtime/transaction_test.cc b/runtime/transaction_test.cc
index aea2211..74725c2 100644
--- a/runtime/transaction_test.cc
+++ b/runtime/transaction_test.cc
@@ -28,7 +28,12 @@
 namespace art {
 
 class TransactionTest : public CommonRuntimeTest {
- public:
+ protected:
+  void SetUpRuntimeOptions(/*out*/RuntimeOptions* options) override {
+    // Set up the image location.
+    options->emplace_back("-Ximage:" + GetImageLocation(), nullptr);
+  }
+
   // Tests failing class initialization due to native call with transaction rollback.
   void testTransactionAbort(const char* tested_class_signature) {
     ScopedObjectAccess soa(Thread::Current());
@@ -70,9 +75,9 @@
     ClassStatus old_status = h_klass->GetStatus();
     LockWord old_lock_word = h_klass->GetLockWord(false);
 
-    Runtime::Current()->EnterTransactionMode();
+    EnterTransactionMode();
     bool success = class_linker_->EnsureInitialized(soa.Self(), h_klass, true, true);
-    ASSERT_TRUE(Runtime::Current()->IsTransactionAborted());
+    ASSERT_TRUE(IsTransactionAborted());
     ASSERT_FALSE(success);
     ASSERT_TRUE(h_klass->IsErroneous());
     ASSERT_TRUE(soa.Self()->IsExceptionPending());
@@ -83,7 +88,7 @@
 
     // Check class status is rolled back properly.
     soa.Self()->ClearException();
-    Runtime::Current()->RollbackAndExitTransactionMode();
+    RollbackAndExitTransactionMode();
     ASSERT_EQ(old_status, h_klass->GetStatus());
   }
 };
@@ -96,12 +101,12 @@
       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
   ASSERT_TRUE(h_klass != nullptr);
 
-  Runtime::Current()->EnterTransactionMode();
+  EnterTransactionMode();
   Handle<mirror::Object> h_obj(hs.NewHandle(h_klass->AllocObject(soa.Self())));
   ASSERT_TRUE(h_obj != nullptr);
   ASSERT_OBJ_PTR_EQ(h_obj->GetClass(), h_klass.Get());
   // Rolling back transaction's changes must not clear the Object::class field.
-  Runtime::Current()->RollbackAndExitTransactionMode();
+  RollbackAndExitTransactionMode();
   EXPECT_OBJ_PTR_EQ(h_obj->GetClass(), h_klass.Get());
 }
 
@@ -120,12 +125,12 @@
   h_obj->MonitorEnter(soa.Self());
   LockWord old_lock_word = h_obj->GetLockWord(false);
 
-  Runtime::Current()->EnterTransactionMode();
+  EnterTransactionMode();
   // Unlock object's monitor inside the transaction.
   h_obj->MonitorExit(soa.Self());
   LockWord new_lock_word = h_obj->GetLockWord(false);
   // Rolling back transaction's changes must not change monitor's state.
-  Runtime::Current()->RollbackAndExitTransactionMode();
+  RollbackAndExitTransactionMode();
 
   LockWord aborted_lock_word = h_obj->GetLockWord(false);
   EXPECT_FALSE(LockWord::Equal<false>(old_lock_word, new_lock_word));
@@ -142,17 +147,18 @@
 
   constexpr int32_t kArraySize = 2;
 
-  Runtime::Current()->EnterTransactionMode();
+  EnterTransactionMode();
 
   // Allocate an array during transaction.
-  Handle<mirror::Array> h_obj(
-      hs.NewHandle(
-          mirror::Array::Alloc<true>(soa.Self(), h_klass.Get(), kArraySize,
-                                     h_klass->GetComponentSizeShift(),
-                                     Runtime::Current()->GetHeap()->GetCurrentAllocator())));
+  Handle<mirror::Array> h_obj = hs.NewHandle(
+      mirror::Array::Alloc(soa.Self(),
+                           h_klass.Get(),
+                           kArraySize,
+                           h_klass->GetComponentSizeShift(),
+                           Runtime::Current()->GetHeap()->GetCurrentAllocator()));
   ASSERT_TRUE(h_obj != nullptr);
   ASSERT_OBJ_PTR_EQ(h_obj->GetClass(), h_klass.Get());
-  Runtime::Current()->RollbackAndExitTransactionMode();
+  RollbackAndExitTransactionMode();
 
   // Rolling back transaction's changes must not reset array's length.
   EXPECT_EQ(h_obj->GetLength(), kArraySize);
@@ -230,7 +236,7 @@
   ASSERT_OBJ_PTR_EQ(h_obj->GetClass(), h_klass.Get());
 
   // Modify fields inside transaction then rollback changes.
-  Runtime::Current()->EnterTransactionMode();
+  EnterTransactionMode();
   booleanField->SetBoolean<true>(h_klass.Get(), true);
   byteField->SetByte<true>(h_klass.Get(), 1);
   charField->SetChar<true>(h_klass.Get(), 1u);
@@ -240,7 +246,7 @@
   floatField->SetFloat<true>(h_klass.Get(), 1.0);
   doubleField->SetDouble<true>(h_klass.Get(), 1.0);
   objectField->SetObject<true>(h_klass.Get(), h_obj.Get());
-  Runtime::Current()->RollbackAndExitTransactionMode();
+  RollbackAndExitTransactionMode();
 
   // Check values have properly been restored to their original (default) value.
   EXPECT_EQ(booleanField->GetBoolean(h_klass.Get()), false);
@@ -330,7 +336,7 @@
   ASSERT_OBJ_PTR_EQ(h_obj->GetClass(), h_klass.Get());
 
   // Modify fields inside transaction then rollback changes.
-  Runtime::Current()->EnterTransactionMode();
+  EnterTransactionMode();
   booleanField->SetBoolean<true>(h_instance.Get(), true);
   byteField->SetByte<true>(h_instance.Get(), 1);
   charField->SetChar<true>(h_instance.Get(), 1u);
@@ -340,7 +346,7 @@
   floatField->SetFloat<true>(h_instance.Get(), 1.0);
   doubleField->SetDouble<true>(h_instance.Get(), 1.0);
   objectField->SetObject<true>(h_instance.Get(), h_obj.Get());
-  Runtime::Current()->RollbackAndExitTransactionMode();
+  RollbackAndExitTransactionMode();
 
   // Check values have properly been restored to their original (default) value.
   EXPECT_EQ(booleanField->GetBoolean(h_instance.Get()), false);
@@ -453,7 +459,7 @@
   ASSERT_OBJ_PTR_EQ(h_obj->GetClass(), h_klass.Get());
 
   // Modify fields inside transaction then rollback changes.
-  Runtime::Current()->EnterTransactionMode();
+  EnterTransactionMode();
   booleanArray->SetWithoutChecks<true>(0, true);
   byteArray->SetWithoutChecks<true>(0, 1);
   charArray->SetWithoutChecks<true>(0, 1u);
@@ -463,7 +469,7 @@
   floatArray->SetWithoutChecks<true>(0, 1.0);
   doubleArray->SetWithoutChecks<true>(0, 1.0);
   objectArray->SetWithoutChecks<true>(0, h_obj.Get());
-  Runtime::Current()->RollbackAndExitTransactionMode();
+  RollbackAndExitTransactionMode();
 
   // Check values have properly been restored to their original (default) value.
   EXPECT_EQ(booleanArray->GetWithoutChecks(0), false);
@@ -505,7 +511,7 @@
   EXPECT_TRUE(class_linker_->LookupString(string_idx, h_dex_cache.Get()) == nullptr);
   EXPECT_TRUE(h_dex_cache->GetResolvedString(string_idx) == nullptr);
   // Do the transaction, then roll back.
-  Runtime::Current()->EnterTransactionMode();
+  EnterTransactionMode();
   bool success = class_linker_->EnsureInitialized(soa.Self(), h_klass, true, true);
   ASSERT_TRUE(success);
   ASSERT_TRUE(h_klass->IsInitialized());
@@ -517,7 +523,7 @@
     EXPECT_STREQ(s->ToModifiedUtf8().c_str(), kResolvedString);
     EXPECT_OBJ_PTR_EQ(s, h_dex_cache->GetResolvedString(string_idx));
   }
-  Runtime::Current()->RollbackAndExitTransactionMode();
+  RollbackAndExitTransactionMode();
   // Check that the string did not stay resolved.
   EXPECT_TRUE(class_linker_->LookupString(string_idx, h_dex_cache.Get()) == nullptr);
   EXPECT_TRUE(h_dex_cache->GetResolvedString(string_idx) == nullptr);
@@ -540,9 +546,9 @@
   class_linker_->VerifyClass(soa.Self(), h_klass);
   ASSERT_TRUE(h_klass->IsVerified());
 
-  Runtime::Current()->EnterTransactionMode();
+  EnterTransactionMode();
   bool success = class_linker_->EnsureInitialized(soa.Self(), h_klass, true, true);
-  Runtime::Current()->ExitTransactionMode();
+  ExitTransactionMode();
   ASSERT_TRUE(success);
   ASSERT_TRUE(h_klass->IsInitialized());
   ASSERT_FALSE(soa.Self()->IsExceptionPending());
@@ -563,9 +569,9 @@
   class_linker_->VerifyClass(soa.Self(), h_klass);
   ASSERT_TRUE(h_klass->IsVerified());
 
-  Runtime::Current()->EnterTransactionMode();
+  EnterTransactionMode();
   bool success = class_linker_->EnsureInitialized(soa.Self(), h_klass, true, true);
-  Runtime::Current()->ExitTransactionMode();
+  ExitTransactionMode();
   ASSERT_TRUE(success);
   ASSERT_TRUE(h_klass->IsInitialized());
   ASSERT_FALSE(soa.Self()->IsExceptionPending());
@@ -597,4 +603,138 @@
 TEST_F(TransactionTest, FinalizableAbortClass) {
   testTransactionAbort("LTransaction$FinalizableAbortClass;");
 }
+
+TEST_F(TransactionTest, Constraints) {
+  ScopedObjectAccess soa(Thread::Current());
+  StackHandleScope<11> hs(soa.Self());
+  Handle<mirror::ClassLoader> class_loader(
+      hs.NewHandle(soa.Decode<mirror::ClassLoader>(LoadDex("Transaction"))));
+
+  gc::Heap* heap = Runtime::Current()->GetHeap();
+  Handle<mirror::Class> boolean_class = hs.NewHandle(
+      class_linker_->FindClass(soa.Self(), "Ljava/lang/Boolean;", class_loader));
+  ASSERT_TRUE(boolean_class != nullptr);
+  ASSERT_TRUE(heap->ObjectIsInBootImageSpace(boolean_class.Get()));
+  ArtField* true_field =
+      mirror::Class::FindField(soa.Self(), boolean_class.Get(), "TRUE", "Ljava/lang/Boolean;");
+  ASSERT_TRUE(true_field != nullptr);
+  ASSERT_TRUE(true_field->IsStatic());
+  Handle<mirror::Object> true_value = hs.NewHandle(true_field->GetObject(boolean_class.Get()));
+  ASSERT_TRUE(true_value != nullptr);
+  ASSERT_TRUE(heap->ObjectIsInBootImageSpace(true_value.Get()));
+  ArtField* value_field =
+      mirror::Class::FindField(soa.Self(), boolean_class.Get(), "value", "Z");
+  ASSERT_TRUE(value_field != nullptr);
+  ASSERT_FALSE(value_field->IsStatic());
+
+  Handle<mirror::Class> static_field_class(hs.NewHandle(
+      class_linker_->FindClass(soa.Self(), "LTransaction$StaticFieldClass;", class_loader)));
+  ASSERT_TRUE(static_field_class != nullptr);
+  ASSERT_FALSE(heap->ObjectIsInBootImageSpace(static_field_class.Get()));
+  ArtField* int_field =
+      mirror::Class::FindField(soa.Self(), static_field_class.Get(), "intField", "I");
+  ASSERT_TRUE(int_field != nullptr);
+
+  Handle<mirror::Class> static_fields_test_class(hs.NewHandle(
+      class_linker_->FindClass(soa.Self(), "LStaticFieldsTest;", class_loader)));
+  ASSERT_TRUE(static_fields_test_class != nullptr);
+  ASSERT_FALSE(heap->ObjectIsInBootImageSpace(static_fields_test_class.Get()));
+  ArtField* static_fields_test_int_field =
+      mirror::Class::FindField(soa.Self(), static_fields_test_class.Get(), "intField", "I");
+  ASSERT_TRUE(static_fields_test_int_field != nullptr);
+
+  Handle<mirror::Class> instance_fields_test_class(hs.NewHandle(
+      class_linker_->FindClass(soa.Self(), "LInstanceFieldsTest;", class_loader)));
+  ASSERT_TRUE(instance_fields_test_class != nullptr);
+  ASSERT_FALSE(heap->ObjectIsInBootImageSpace(instance_fields_test_class.Get()));
+  ArtField* instance_fields_test_int_field =
+      mirror::Class::FindField(soa.Self(), instance_fields_test_class.Get(), "intField", "I");
+  ASSERT_TRUE(instance_fields_test_int_field != nullptr);
+  Handle<mirror::Object> instance_fields_test_object = hs.NewHandle(
+      instance_fields_test_class->Alloc(soa.Self(), heap->GetCurrentAllocator()));
+  ASSERT_TRUE(instance_fields_test_object != nullptr);
+  ASSERT_FALSE(heap->ObjectIsInBootImageSpace(instance_fields_test_object.Get()));
+
+  Handle<mirror::Class> long_array_dim2_class = hs.NewHandle(
+      class_linker_->FindClass(soa.Self(), "[[J", class_loader));
+  ASSERT_TRUE(long_array_dim2_class != nullptr);
+  ASSERT_FALSE(heap->ObjectIsInBootImageSpace(long_array_dim2_class.Get()));
+  ASSERT_TRUE(heap->ObjectIsInBootImageSpace(long_array_dim2_class->GetComponentType()));
+  Handle<mirror::Array> long_array_dim2 = hs.NewHandle(mirror::Array::Alloc(
+      soa.Self(),
+      long_array_dim2_class.Get(),
+      /*component_count=*/ 1,
+      long_array_dim2_class->GetComponentSizeShift(),
+      heap->GetCurrentAllocator()));
+  ASSERT_TRUE(long_array_dim2 != nullptr);
+  ASSERT_FALSE(heap->ObjectIsInBootImageSpace(long_array_dim2.Get()));
+  Handle<mirror::Array> long_array = hs.NewHandle(mirror::Array::Alloc(
+      soa.Self(),
+      long_array_dim2_class->GetComponentType(),
+      /*component_count=*/ 1,
+      long_array_dim2_class->GetComponentType()->GetComponentSizeShift(),
+      heap->GetCurrentAllocator()));
+  ASSERT_TRUE(long_array != nullptr);
+  ASSERT_FALSE(heap->ObjectIsInBootImageSpace(long_array.Get()));
+
+  // Use the Array's IfTable as an array from the boot image.
+  Handle<mirror::ObjectArray<mirror::Object>> array_iftable =
+      hs.NewHandle(long_array_dim2_class->GetIfTable());
+  ASSERT_TRUE(array_iftable != nullptr);
+  ASSERT_TRUE(heap->ObjectIsInBootImageSpace(array_iftable.Get()));
+
+  // Test non-strict transaction.
+  Transaction transaction(/*strict=*/ false, /*root=*/ nullptr);
+  // Static field in boot image.
+  EXPECT_TRUE(transaction.WriteConstraint(soa.Self(), boolean_class.Get()));
+  EXPECT_FALSE(transaction.ReadConstraint(soa.Self(), boolean_class.Get()));
+  // Instance field or array element in boot image.
+  // Do not check ReadConstraint(), it expects only static fields (checks for class object).
+  EXPECT_TRUE(transaction.WriteConstraint(soa.Self(), true_value.Get()));
+  EXPECT_TRUE(transaction.WriteConstraint(soa.Self(), array_iftable.Get()));
+  // Static field not in boot image.
+  EXPECT_FALSE(transaction.WriteConstraint(soa.Self(), static_fields_test_class.Get()));
+  EXPECT_FALSE(transaction.ReadConstraint(soa.Self(), static_fields_test_class.Get()));
+  // Instance field or array element not in boot image.
+  // Do not check ReadConstraint(), it expects only static fields (checks for class object).
+  EXPECT_FALSE(transaction.WriteConstraint(soa.Self(), instance_fields_test_object.Get()));
+  EXPECT_FALSE(transaction.WriteConstraint(soa.Self(), long_array_dim2.Get()));
+  // Write value constraints.
+  EXPECT_FALSE(transaction.WriteValueConstraint(soa.Self(), static_fields_test_class.Get()));
+  EXPECT_FALSE(transaction.WriteValueConstraint(soa.Self(), instance_fields_test_object.Get()));
+  EXPECT_TRUE(transaction.WriteValueConstraint(soa.Self(), long_array_dim2->GetClass()));
+  EXPECT_TRUE(transaction.WriteValueConstraint(soa.Self(), long_array_dim2.Get()));
+  EXPECT_FALSE(transaction.WriteValueConstraint(soa.Self(), long_array->GetClass()));
+  EXPECT_FALSE(transaction.WriteValueConstraint(soa.Self(), long_array.Get()));
+
+  // Test strict transaction.
+  Transaction strict_transaction(/*strict=*/ true, /*root=*/ static_field_class.Get());
+  // Static field in boot image.
+  EXPECT_TRUE(strict_transaction.WriteConstraint(soa.Self(), boolean_class.Get()));
+  EXPECT_TRUE(strict_transaction.ReadConstraint(soa.Self(), boolean_class.Get()));
+  // Instance field or array element in boot image.
+  // Do not check ReadConstraint(), it expects only static fields (checks for class object).
+  EXPECT_TRUE(strict_transaction.WriteConstraint(soa.Self(), true_value.Get()));
+  EXPECT_TRUE(strict_transaction.WriteConstraint(soa.Self(), array_iftable.Get()));
+  // Static field in another class not in boot image.
+  EXPECT_TRUE(strict_transaction.WriteConstraint(soa.Self(), static_fields_test_class.Get()));
+  EXPECT_TRUE(strict_transaction.ReadConstraint(soa.Self(), static_fields_test_class.Get()));
+  // Instance field or array element not in boot image.
+  // Do not check ReadConstraint(), it expects only static fields (checks for class object).
+  EXPECT_FALSE(strict_transaction.WriteConstraint(soa.Self(), instance_fields_test_object.Get()));
+  EXPECT_FALSE(strict_transaction.WriteConstraint(soa.Self(), long_array_dim2.Get()));
+  // Static field in the same class.
+  EXPECT_FALSE(strict_transaction.WriteConstraint(soa.Self(), static_field_class.Get()));
+  EXPECT_FALSE(strict_transaction.ReadConstraint(soa.Self(), static_field_class.Get()));
+  // Write value constraints.
+  EXPECT_FALSE(strict_transaction.WriteValueConstraint(soa.Self(), static_fields_test_class.Get()));
+  EXPECT_FALSE(
+      strict_transaction.WriteValueConstraint(soa.Self(), instance_fields_test_object.Get()));
+  // TODO: The following may be revised, see a TODO in Transaction::WriteValueConstraint().
+  EXPECT_FALSE(strict_transaction.WriteValueConstraint(soa.Self(), long_array_dim2->GetClass()));
+  EXPECT_FALSE(strict_transaction.WriteValueConstraint(soa.Self(), long_array_dim2.Get()));
+  EXPECT_FALSE(strict_transaction.WriteValueConstraint(soa.Self(), long_array->GetClass()));
+  EXPECT_FALSE(strict_transaction.WriteValueConstraint(soa.Self(), long_array.Get()));
+}
+
 }  // namespace art
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index cd60fab..d67a968 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -27,6 +27,7 @@
 #include "base/bit_utils.h"
 #include "base/leb128.h"
 #include "base/stl_util.h"
+#include "base/systrace.h"
 #include "base/unix_file/fd_file.h"
 #include "class_linker.h"
 #include "class_loader_context.h"
@@ -102,6 +103,7 @@
                                                   bool low_4gb,
                                                   bool unquicken,
                                                   std::string* error_msg) {
+  ScopedTrace trace(("VdexFile::OpenAtAddress " + vdex_filename).c_str());
   if (!OS::FileExists(vdex_filename.c_str())) {
     *error_msg = "File " + vdex_filename + " does not exist.";
     return nullptr;
@@ -153,11 +155,13 @@
     mmap_reuse = false;
   }
   CHECK(!mmap_reuse || mmap_addr != nullptr);
+  CHECK(!(writable && unquicken)) << "We don't want to be writing unquickened files out to disk!";
+  // Start as PROT_WRITE so we can mprotect back to it if we want to.
   MemMap mmap = MemMap::MapFileAtAddress(
       mmap_addr,
       vdex_length,
-      (writable || unquicken) ? PROT_READ | PROT_WRITE : PROT_READ,
-      unquicken ? MAP_PRIVATE : MAP_SHARED,
+      PROT_READ | PROT_WRITE,
+      writable ? MAP_SHARED : MAP_PRIVATE,
       file_fd,
       /* start= */ 0u,
       low_4gb,
@@ -181,13 +185,19 @@
     if (!vdex->OpenAllDexFiles(&unique_ptr_dex_files, error_msg)) {
       return nullptr;
     }
+    // TODO: It would be nice to avoid doing the return-instruction stuff but then we end up not
+    // being able to tell if we need dequickening later. Instead just get rid of that too.
     vdex->Unquicken(MakeNonOwningPointerVector(unique_ptr_dex_files),
-                    /* decompile_return_instruction= */ false);
+                    /* decompile_return_instruction= */ true);
     // Update the quickening info size to pretend there isn't any.
     size_t offset = vdex->GetDexSectionHeaderOffset();
     reinterpret_cast<DexSectionHeader*>(vdex->mmap_.Begin() + offset)->quickening_info_size_ = 0;
   }
 
+  if (!writable) {
+    vdex->AllowWriting(false);
+  }
+
   return vdex;
 }
 
@@ -207,8 +217,12 @@
   }
 }
 
+void VdexFile::AllowWriting(bool val) const {
+  CHECK(mmap_.Protect(val ? (PROT_READ | PROT_WRITE) : PROT_READ));
+}
+
 bool VdexFile::OpenAllDexFiles(std::vector<std::unique_ptr<const DexFile>>* dex_files,
-                               std::string* error_msg) {
+                               std::string* error_msg) const {
   const ArtDexFileLoader dex_file_loader;
   size_t i = 0;
   for (const uint8_t* dex_file_start = GetNextDexFileData(nullptr);
@@ -237,6 +251,23 @@
   return true;
 }
 
+void VdexFile::UnquickenInPlace(bool decompile_return_instruction) const {
+  CHECK_NE(mmap_.GetProtect() & PROT_WRITE, 0)
+      << "File not mapped writable. Cannot unquicken! " << mmap_;
+  if (HasDexSection()) {
+    std::vector<std::unique_ptr<const DexFile>> unique_ptr_dex_files;
+    std::string error_msg;
+    if (!OpenAllDexFiles(&unique_ptr_dex_files, &error_msg)) {
+      return;
+    }
+    Unquicken(MakeNonOwningPointerVector(unique_ptr_dex_files),
+              decompile_return_instruction);
+    // Update the quickening info size to pretend there isn't any.
+    size_t offset = GetDexSectionHeaderOffset();
+    reinterpret_cast<DexSectionHeader*>(mmap_.Begin() + offset)->quickening_info_size_ = 0;
+  }
+}
+
 void VdexFile::Unquicken(const std::vector<const DexFile*>& target_dex_files,
                          bool decompile_return_instruction) const {
   const uint8_t* source_dex = GetNextDexFileData(nullptr);
@@ -277,7 +308,8 @@
 void VdexFile::UnquickenDexFile(const DexFile& target_dex_file,
                                 const DexFile& source_dex_file,
                                 bool decompile_return_instruction) const {
-  UnquickenDexFile(target_dex_file, source_dex_file.Begin(), decompile_return_instruction);
+  UnquickenDexFile(
+      target_dex_file, source_dex_file.Begin(), decompile_return_instruction);
 }
 
 void VdexFile::UnquickenDexFile(const DexFile& target_dex_file,
@@ -329,9 +361,16 @@
 
 static std::string ComputeBootClassPathChecksumString() {
   Runtime* const runtime = Runtime::Current();
+  // Do not include boot image extension checksums, use their dex file checksums instead. Unlike
+  // oat files, vdex files do not reference anything in image spaces, so there is no reason why
+  // loading or not loading a boot image extension would affect the validity of the vdex file.
+  // Note: Update of a boot class path module such as conscrypt invalidates the vdex file anyway.
+  ArrayRef<gc::space::ImageSpace* const> image_spaces(runtime->GetHeap()->GetBootImageSpaces());
+  size_t boot_image_components =
+      image_spaces.empty() ? 0u : image_spaces[0]->GetImageHeader().GetComponentCount();
   return gc::space::ImageSpace::GetBootClassPathChecksums(
-          runtime->GetHeap()->GetBootImageSpaces(),
-          runtime->GetClassLinker()->GetBootClassPath());
+          image_spaces.SubArray(/*pos=*/ 0u, boot_image_components),
+          ArrayRef<const DexFile* const>(runtime->GetClassLinker()->GetBootClassPath()));
 }
 
 static bool CreateDirectories(const std::string& child_path, /* out */ std::string* error_msg) {
diff --git a/runtime/vdex_file.h b/runtime/vdex_file.h
index 102b73f..d205904 100644
--- a/runtime/vdex_file.h
+++ b/runtime/vdex_file.h
@@ -309,7 +309,7 @@
 
   // Open all the dex files contained in this vdex file.
   bool OpenAllDexFiles(std::vector<std::unique_ptr<const DexFile>>* dex_files,
-                       std::string* error_msg);
+                       std::string* error_msg) const;
 
   // In-place unquicken the given `dex_files` based on `quickening_info`.
   // `decompile_return_instruction` controls if RETURN_VOID_BARRIER instructions are
@@ -319,6 +319,8 @@
   void Unquicken(const std::vector<const DexFile*>& target_dex_files,
                  bool decompile_return_instruction) const;
 
+  void UnquickenInPlace(bool decompile_return_instruction) const;
+
   // Fully unquicken `target_dex_file` based on `quickening_info`.
   void UnquickenDexFile(const DexFile& target_dex_file,
                         const DexFile& source_dex_file,
@@ -354,6 +356,10 @@
   // Returns true if the class loader context stored in the vdex matches `context`.
   bool MatchesClassLoaderContext(const ClassLoaderContext& context) const;
 
+  // Make the Vdex file & underlying dex-files RW or RO. Should only be used for in-place
+  // dequickening.
+  void AllowWriting(bool value) const;
+
  private:
   uint32_t GetQuickeningInfoTableOffset(const uint8_t* source_dex_begin) const;
 
@@ -382,7 +388,8 @@
     return DexBegin() + GetDexSectionHeader().GetDexSize();
   }
 
-  MemMap mmap_;
+  // mutable for AllowWriting()
+  mutable MemMap mmap_;
 
   DISALLOW_COPY_AND_ASSIGN(VdexFile);
 };
diff --git a/runtime/verifier/class_verifier.cc b/runtime/verifier/class_verifier.cc
index 649fb11..ed83652 100644
--- a/runtime/verifier/class_verifier.cc
+++ b/runtime/verifier/class_verifier.cc
@@ -20,6 +20,9 @@
 #include <android-base/stringprintf.h>
 
 #include "art_method-inl.h"
+#include "base/enums.h"
+#include "base/locks.h"
+#include "base/logging.h"
 #include "base/systrace.h"
 #include "base/utils.h"
 #include "class_linker.h"
@@ -28,11 +31,15 @@
 #include "dex/class_reference.h"
 #include "dex/descriptors_names.h"
 #include "dex/dex_file-inl.h"
+#include "handle.h"
 #include "handle_scope-inl.h"
 #include "method_verifier-inl.h"
 #include "mirror/class-inl.h"
 #include "mirror/dex_cache.h"
 #include "runtime.h"
+#include "thread.h"
+#include "verifier/method_verifier.h"
+#include "verifier/reg_type_cache.h"
 
 namespace art {
 namespace verifier {
@@ -43,6 +50,83 @@
 // sure we only print this once.
 static bool gPrintedDxMonitorText = false;
 
+class StandardVerifyCallback : public VerifierCallback {
+ public:
+  void SetDontCompile(ArtMethod* m, bool value) override REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (value) {
+      m->SetDontCompile();
+    }
+  }
+  void SetMustCountLocks(ArtMethod* m, bool value) override REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (value) {
+      m->SetMustCountLocks();
+    }
+  }
+};
+
+FailureKind ClassVerifier::ReverifyClass(Thread* self,
+                                         ObjPtr<mirror::Class> klass,
+                                         HardFailLogMode log_level,
+                                         uint32_t api_level,
+                                         std::string* error) {
+  DCHECK(!Runtime::Current()->IsAotCompiler());
+  StackHandleScope<1> hs(self);
+  Handle<mirror::Class> h_klass(hs.NewHandle(klass));
+  // We don't want to mess with these while other mutators are possibly looking at them. Instead we
+  // will wait until we can update them while everything is suspended.
+  class DelayedVerifyCallback : public VerifierCallback {
+   public:
+    void SetDontCompile(ArtMethod* m, bool value) override REQUIRES_SHARED(Locks::mutator_lock_) {
+      dont_compiles_.push_back({ m, value });
+    }
+    void SetMustCountLocks(ArtMethod* m, bool value) override
+        REQUIRES_SHARED(Locks::mutator_lock_) {
+      count_locks_.push_back({ m, value });
+    }
+    void UpdateFlags(bool skip_access_checks) REQUIRES(Locks::mutator_lock_) {
+      for (auto it : count_locks_) {
+        VLOG(verifier_debug) << "Setting " << it.first->PrettyMethod() << " count locks to "
+                             << it.second;
+        if (it.second) {
+          it.first->SetMustCountLocks();
+        } else {
+          it.first->ClearMustCountLocks();
+        }
+        if (skip_access_checks && it.first->IsInvokable() && !it.first->IsNative()) {
+          it.first->SetSkipAccessChecks();
+        }
+      }
+      for (auto it : dont_compiles_) {
+        VLOG(verifier_debug) << "Setting " << it.first->PrettyMethod() << " dont-compile to "
+                             << it.second;
+        if (it.second) {
+          it.first->SetDontCompile();
+        } else {
+          it.first->ClearDontCompile();
+        }
+      }
+    }
+
+   private:
+    std::vector<std::pair<ArtMethod*, bool>> dont_compiles_;
+    std::vector<std::pair<ArtMethod*, bool>> count_locks_;
+  };
+  DelayedVerifyCallback dvc;
+  FailureKind res = CommonVerifyClass(self,
+                                      h_klass.Get(),
+                                      /*callbacks=*/nullptr,
+                                      &dvc,
+                                      /*allow_soft_failures=*/false,
+                                      log_level,
+                                      api_level,
+                                      error);
+  DCHECK_NE(res, FailureKind::kHardFailure);
+  ScopedThreadSuspension sts(Thread::Current(), ThreadState::kSuspended);
+  ScopedSuspendAll ssa("Update method flags for reverify");
+  dvc.UpdateFlags(res == FailureKind::kNoFailure);
+  return res;
+}
+
 FailureKind ClassVerifier::VerifyClass(Thread* self,
                                        ObjPtr<mirror::Class> klass,
                                        CompilerCallbacks* callbacks,
@@ -53,6 +137,25 @@
   if (klass->IsVerified()) {
     return FailureKind::kNoFailure;
   }
+  StandardVerifyCallback svc;
+  return CommonVerifyClass(self,
+                           klass,
+                           callbacks,
+                           &svc,
+                           allow_soft_failures,
+                           log_level,
+                           api_level,
+                           error);
+}
+
+FailureKind ClassVerifier::CommonVerifyClass(Thread* self,
+                                             ObjPtr<mirror::Class> klass,
+                                             CompilerCallbacks* callbacks,
+                                             VerifierCallback* verifier_callback,
+                                             bool allow_soft_failures,
+                                             HardFailLogMode log_level,
+                                             uint32_t api_level,
+                                             std::string* error) {
   bool early_failure = false;
   std::string failure_message;
   const DexFile& dex_file = klass->GetDexFile();
@@ -86,6 +189,32 @@
                      class_loader,
                      *class_def,
                      callbacks,
+                     verifier_callback,
+                     allow_soft_failures,
+                     log_level,
+                     api_level,
+                     error);
+}
+
+
+FailureKind ClassVerifier::VerifyClass(Thread* self,
+                                       const DexFile* dex_file,
+                                       Handle<mirror::DexCache> dex_cache,
+                                       Handle<mirror::ClassLoader> class_loader,
+                                       const dex::ClassDef& class_def,
+                                       CompilerCallbacks* callbacks,
+                                       bool allow_soft_failures,
+                                       HardFailLogMode log_level,
+                                       uint32_t api_level,
+                                       std::string* error) {
+  StandardVerifyCallback svc;
+  return VerifyClass(self,
+                     dex_file,
+                     dex_cache,
+                     class_loader,
+                     class_def,
+                     callbacks,
+                     &svc,
                      allow_soft_failures,
                      log_level,
                      api_level,
@@ -98,6 +227,7 @@
                                        Handle<mirror::ClassLoader> class_loader,
                                        const dex::ClassDef& class_def,
                                        CompilerCallbacks* callbacks,
+                                       VerifierCallback* verifier_callback,
                                        bool allow_soft_failures,
                                        HardFailLogMode log_level,
                                        uint32_t api_level,
@@ -140,6 +270,8 @@
     std::string hard_failure_msg;
     MethodVerifier::FailureData result =
         MethodVerifier::VerifyMethod(self,
+                                     linker,
+                                     Runtime::Current()->GetArenaPool(),
                                      method_idx,
                                      dex_file,
                                      dex_cache,
@@ -149,10 +281,12 @@
                                      resolved_method,
                                      method.GetAccessFlags(),
                                      callbacks,
+                                     verifier_callback,
                                      allow_soft_failures,
                                      log_level,
                                      /*need_precise_constants=*/ false,
                                      api_level,
+                                     Runtime::Current()->IsAotCompiler(),
                                      &hard_failure_msg);
     if (result.kind == FailureKind::kHardFailure) {
       if (failure_data.kind == FailureKind::kHardFailure) {
@@ -190,8 +324,8 @@
   }
 }
 
-void ClassVerifier::Init() {
-  MethodVerifier::Init();
+void ClassVerifier::Init(ClassLinker* class_linker) {
+  MethodVerifier::Init(class_linker);
 }
 
 void ClassVerifier::Shutdown() {
diff --git a/runtime/verifier/class_verifier.h b/runtime/verifier/class_verifier.h
index b7d3850..0b22966 100644
--- a/runtime/verifier/class_verifier.h
+++ b/runtime/verifier/class_verifier.h
@@ -25,10 +25,13 @@
 #include "base/locks.h"
 #include "handle.h"
 #include "obj_ptr.h"
+#include "verifier/method_verifier.h"
+#include "verifier/reg_type_cache.h"
 #include "verifier_enums.h"
 
 namespace art {
 
+class ClassLinker;
 class CompilerCallbacks;
 class DexFile;
 class RootVisitor;
@@ -49,6 +52,16 @@
 // Verifier that ensures the complete class is OK.
 class ClassVerifier {
  public:
+  // Redo verification on a loaded class. This is for use by class redefinition. This must be called
+  // with all methods already having all of kAccDontCompile and kAccCountLocks and not having
+  // kAccSkipAccessChecks. This will remove some of these flags from the method. The caller must
+  // ensure this cannot race with other changes to the verification class flags.
+  static FailureKind ReverifyClass(Thread* self,
+                                   ObjPtr<mirror::Class> klass,
+                                   HardFailLogMode log_level,
+                                   uint32_t api_level,
+                                   std::string* error)
+      REQUIRES_SHARED(Locks::mutator_lock_);
   // Verify a class. Returns "kNoFailure" on success.
   static FailureKind VerifyClass(Thread* self,
                                  ObjPtr<mirror::Class> klass,
@@ -70,13 +83,35 @@
                                  std::string* error)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static void Init() REQUIRES_SHARED(Locks::mutator_lock_);
+  static void Init(ClassLinker* class_linker) REQUIRES_SHARED(Locks::mutator_lock_);
   static void Shutdown();
 
   static void VisitStaticRoots(RootVisitor* visitor)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
  private:
+  static FailureKind CommonVerifyClass(Thread* self,
+                                       ObjPtr<mirror::Class> klass,
+                                       CompilerCallbacks* callbacks,
+                                       VerifierCallback* verifier_callback,
+                                       bool allow_soft_failures,
+                                       HardFailLogMode log_level,
+                                       uint32_t api_level,
+                                       std::string* error)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
+  static FailureKind VerifyClass(Thread* self,
+                                 const DexFile* dex_file,
+                                 Handle<mirror::DexCache> dex_cache,
+                                 Handle<mirror::ClassLoader> class_loader,
+                                 const dex::ClassDef& class_def,
+                                 CompilerCallbacks* callbacks,
+                                 VerifierCallback* verifier_callback,
+                                 bool allow_soft_failures,
+                                 HardFailLogMode log_level,
+                                 uint32_t api_level,
+                                 std::string* error)
+      REQUIRES_SHARED(Locks::mutator_lock_);
   DISALLOW_COPY_AND_ASSIGN(ClassVerifier);
 };
 
diff --git a/runtime/verifier/instruction_flags.h b/runtime/verifier/instruction_flags.h
index e5e71a4..6cd2865 100644
--- a/runtime/verifier/instruction_flags.h
+++ b/runtime/verifier/instruction_flags.h
@@ -102,11 +102,6 @@
     return (flags_ & (1 << kReturn)) != 0;
   }
 
-  void SetCompileTimeInfoPointAndReturn() {
-    SetCompileTimeInfoPoint();
-    SetReturn();
-  }
-
   std::string ToString() const;
 
  private:
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 32cd47a..d695e08 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -63,6 +63,7 @@
 #include "scoped_thread_state_change-inl.h"
 #include "stack.h"
 #include "vdex_file.h"
+#include "verifier/method_verifier.h"
 #include "verifier_compiler_binding.h"
 #include "verifier_deps.h"
 
@@ -96,8 +97,6 @@
       case kTrackRegsBranches:
         interesting = flags[i].IsBranchTarget();
         break;
-      default:
-        break;
     }
     if (interesting) {
       register_lines_[i].reset(RegisterLine::Create(registers_size, allocator, reg_types));
@@ -111,8 +110,9 @@
 namespace {
 
 enum class CheckAccess {
-  kYes,
   kNo,
+  kOnResolvedClass,
+  kYes,
 };
 
 enum class FieldAccessType {
@@ -120,6 +120,18 @@
   kAccPut
 };
 
+// Instruction types that are not marked as throwing (because they normally would not), but for
+// historical reasons may do so. These instructions cannot be marked kThrow as that would introduce
+// a general flow that is unwanted.
+//
+// Note: Not implemented as Instruction::Flags value as that set is full and we'd need to increase
+//       the struct size (making it a non-power-of-two) for a single element.
+//
+// Note: This should eventually be removed.
+constexpr bool IsCompatThrow(Instruction::Code opcode) {
+  return opcode == Instruction::Code::RETURN_OBJECT || opcode == Instruction::Code::MOVE_EXCEPTION;
+}
+
 template <bool kVerifierDebug>
 class MethodVerifier final : public ::art::verifier::MethodVerifier {
  public:
@@ -139,21 +151,50 @@
 
  private:
   MethodVerifier(Thread* self,
+                 ClassLinker* class_linker,
+                 ArenaPool* arena_pool,
                  const DexFile* dex_file,
+                 const dex::CodeItem* code_item,
+                 uint32_t method_idx,
+                 bool can_load_classes,
+                 bool allow_thread_suspension,
+                 bool allow_soft_failures,
+                 bool aot_mode,
                  Handle<mirror::DexCache> dex_cache,
                  Handle<mirror::ClassLoader> class_loader,
                  const dex::ClassDef& class_def,
-                 const dex::CodeItem* code_item,
-                 uint32_t method_idx,
                  ArtMethod* method,
                  uint32_t access_flags,
-                 bool can_load_classes,
-                 bool allow_soft_failures,
                  bool need_precise_constants,
                  bool verify_to_dump,
-                 bool allow_thread_suspension,
-                 uint32_t api_level)
-      REQUIRES_SHARED(Locks::mutator_lock_);
+                 bool fill_register_lines,
+                 uint32_t api_level) REQUIRES_SHARED(Locks::mutator_lock_)
+     : art::verifier::MethodVerifier(self,
+                                     class_linker,
+                                     arena_pool,
+                                     dex_file,
+                                     code_item,
+                                     method_idx,
+                                     can_load_classes,
+                                     allow_thread_suspension,
+                                     allow_soft_failures,
+                                     aot_mode),
+       method_being_verified_(method),
+       method_access_flags_(access_flags),
+       return_type_(nullptr),
+       dex_cache_(dex_cache),
+       class_loader_(class_loader),
+       class_def_(class_def),
+       declaring_class_(nullptr),
+       interesting_dex_pc_(-1),
+       monitor_enter_dex_pcs_(nullptr),
+       need_precise_constants_(need_precise_constants),
+       verify_to_dump_(verify_to_dump),
+       allow_thread_suspension_(allow_thread_suspension),
+       is_constructor_(false),
+       fill_register_lines_(fill_register_lines),
+       api_level_(api_level == 0 ? std::numeric_limits<uint32_t>::max() : api_level) {
+  }
 
   void UninstantiableError(const char* descriptor) {
     Fail(VerifyError::VERIFY_ERROR_NO_CLASS) << "Could not create precise reference for "
@@ -175,10 +216,22 @@
   }
 
   // Adds the given string to the beginning of the last failure message.
-  void PrependToLastFailMessage(std::string);
+  void PrependToLastFailMessage(std::string prepend) {
+    size_t failure_num = failure_messages_.size();
+    DCHECK_NE(failure_num, 0U);
+    std::ostringstream* last_fail_message = failure_messages_[failure_num - 1];
+    prepend += last_fail_message->str();
+    failure_messages_[failure_num - 1] = new std::ostringstream(prepend, std::ostringstream::ate);
+    delete last_fail_message;
+  }
 
   // Adds the given string to the end of the last failure message.
-  void AppendToLastFailMessage(const std::string& append);
+  void AppendToLastFailMessage(const std::string& append) {
+    size_t failure_num = failure_messages_.size();
+    DCHECK_NE(failure_num, 0U);
+    std::ostringstream* last_fail_message = failure_messages_[failure_num - 1];
+    (*last_fail_message) << append;
+  }
 
   /*
    * Compute the width of the instruction at each address in the instruction stream, and store it in
@@ -253,26 +306,70 @@
   bool VerifyInstruction(const Instruction* inst, uint32_t code_offset);
 
   /* Ensure that the register index is valid for this code item. */
-  bool CheckRegisterIndex(uint32_t idx);
+  bool CheckRegisterIndex(uint32_t idx) {
+    if (UNLIKELY(idx >= code_item_accessor_.RegistersSize())) {
+      Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register index out of range (" << idx << " >= "
+                                        << code_item_accessor_.RegistersSize() << ")";
+      return false;
+    }
+    return true;
+  }
 
   /* Ensure that the wide register index is valid for this code item. */
-  bool CheckWideRegisterIndex(uint32_t idx);
+  bool CheckWideRegisterIndex(uint32_t idx) {
+    if (UNLIKELY(idx + 1 >= code_item_accessor_.RegistersSize())) {
+      Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register index out of range (" << idx
+                                        << "+1 >= " << code_item_accessor_.RegistersSize() << ")";
+      return false;
+    }
+    return true;
+  }
 
   // Perform static checks on an instruction referencing a CallSite. All we do here is ensure that
   // the call site index is in the valid range.
-  bool CheckCallSiteIndex(uint32_t idx);
+  bool CheckCallSiteIndex(uint32_t idx) {
+    uint32_t limit = dex_file_->NumCallSiteIds();
+    if (UNLIKELY(idx >= limit)) {
+      Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad call site index " << idx << " (max "
+                                        << limit << ")";
+      return false;
+    }
+    return true;
+  }
 
   // Perform static checks on a field Get or set instruction. All we do here is ensure that the
   // field index is in the valid range.
-  bool CheckFieldIndex(uint32_t idx);
+  bool CheckFieldIndex(uint32_t idx) {
+    if (UNLIKELY(idx >= dex_file_->GetHeader().field_ids_size_)) {
+      Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad field index " << idx << " (max "
+                                        << dex_file_->GetHeader().field_ids_size_ << ")";
+      return false;
+    }
+    return true;
+  }
 
   // Perform static checks on a method invocation instruction. All we do here is ensure that the
   // method index is in the valid range.
-  bool CheckMethodIndex(uint32_t idx);
+  bool CheckMethodIndex(uint32_t idx) {
+    if (UNLIKELY(idx >= dex_file_->GetHeader().method_ids_size_)) {
+      Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad method index " << idx << " (max "
+                                        << dex_file_->GetHeader().method_ids_size_ << ")";
+      return false;
+    }
+    return true;
+  }
 
   // Perform static checks on an instruction referencing a constant method handle. All we do here
   // is ensure that the method index is in the valid range.
-  bool CheckMethodHandleIndex(uint32_t idx);
+  bool CheckMethodHandleIndex(uint32_t idx) {
+    uint32_t limit = dex_file_->NumMethodHandles();
+    if (UNLIKELY(idx >= limit)) {
+      Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad method handle index " << idx << " (max "
+                                        << limit << ")";
+      return false;
+    }
+    return true;
+  }
 
   // Perform static checks on a "new-instance" instruction. Specifically, make sure the class
   // reference isn't for an array class.
@@ -280,14 +377,35 @@
 
   // Perform static checks on a prototype indexing instruction. All we do here is ensure that the
   // prototype index is in the valid range.
-  bool CheckPrototypeIndex(uint32_t idx);
+  bool CheckPrototypeIndex(uint32_t idx) {
+    if (UNLIKELY(idx >= dex_file_->GetHeader().proto_ids_size_)) {
+      Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad prototype index " << idx << " (max "
+                                        << dex_file_->GetHeader().proto_ids_size_ << ")";
+      return false;
+    }
+    return true;
+  }
 
   /* Ensure that the string index is in the valid range. */
-  bool CheckStringIndex(uint32_t idx);
+  bool CheckStringIndex(uint32_t idx) {
+    if (UNLIKELY(idx >= dex_file_->GetHeader().string_ids_size_)) {
+      Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad string index " << idx << " (max "
+                                        << dex_file_->GetHeader().string_ids_size_ << ")";
+      return false;
+    }
+    return true;
+  }
 
   // Perform static checks on an instruction that takes a class constant. Ensure that the class
   // index is in the valid range.
-  bool CheckTypeIndex(dex::TypeIndex idx);
+  bool CheckTypeIndex(dex::TypeIndex idx) {
+    if (UNLIKELY(idx.index_ >= dex_file_->GetHeader().type_ids_size_)) {
+      Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad type index " << idx.index_ << " (max "
+                                        << dex_file_->GetHeader().type_ids_size_ << ")";
+      return false;
+    }
+    return true;
+  }
 
   // Perform static checks on a "new-array" instruction. Specifically, make sure they aren't
   // creating an array of arrays that causes the number of dimensions to exceed 255.
@@ -313,12 +431,33 @@
   // There are some tests we don't do here, e.g. we don't try to verify that invoking a method that
   // takes a double is done with consecutive registers. This requires parsing the target method
   // signature, which we will be doing later on during the code flow analysis.
-  bool CheckVarArgRegs(uint32_t vA, uint32_t arg[]);
+  bool CheckVarArgRegs(uint32_t vA, uint32_t arg[]) {
+    uint16_t registers_size = code_item_accessor_.RegistersSize();
+    for (uint32_t idx = 0; idx < vA; idx++) {
+      if (UNLIKELY(arg[idx] >= registers_size)) {
+        Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid reg index (" << arg[idx]
+                                          << ") in non-range invoke (>= " << registers_size << ")";
+        return false;
+      }
+    }
+
+    return true;
+  }
 
   // Check the register indices used in a "vararg/range" instruction, such as invoke-virtual/range
   // or filled-new-array/range.
   // - vA holds word count, vC holds index of first reg.
-  bool CheckVarArgRangeRegs(uint32_t vA, uint32_t vC);
+  bool CheckVarArgRangeRegs(uint32_t vA, uint32_t vC) {
+    uint16_t registers_size = code_item_accessor_.RegistersSize();
+    // vA/vC are unsigned 8-bit/16-bit quantities for /range instructions, so there's no risk of
+    // integer overflow when adding them here.
+    if (UNLIKELY(vA + vC > registers_size)) {
+      Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid reg index " << vA << "+" << vC
+                                        << " in range invoke (> " << registers_size << ")";
+      return false;
+    }
+    return true;
+  }
 
   // Checks the method matches the expectations required to be signature polymorphic.
   bool CheckSignaturePolymorphicMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
@@ -384,6 +523,7 @@
    * reordering by specifying that you can't execute the new-instance instruction if a register
    * contains an uninitialized instance created by that same instruction.
    */
+  template <bool kMonitorDexPCs>
   bool CodeFlowVerifyMethod() REQUIRES_SHARED(Locks::mutator_lock_);
 
   /*
@@ -545,7 +685,20 @@
   const RegType& GetMethodReturnType() REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Get a type representing the declaring class of the method.
-  const RegType& GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_);
+  const RegType& GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (declaring_class_ == nullptr) {
+      const dex::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
+      const char* descriptor
+          = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(method_id.class_idx_));
+      if (method_being_verified_ != nullptr) {
+        ObjPtr<mirror::Class> klass = method_being_verified_->GetDeclaringClass();
+        declaring_class_ = &FromClass(descriptor, klass, klass->CannotBeAssignedFromOtherTypes());
+      } else {
+        declaring_class_ = &reg_types_.FromDescriptor(class_loader_.Get(), descriptor, false);
+      }
+    }
+    return *declaring_class_;
+  }
 
   InstructionFlags* CurrentInsnFlags() {
     return &GetModifiableInstructionFlags(work_insn_idx_);
@@ -560,7 +713,15 @@
   // Note: we reuse NO_CLASS as this will throw an exception at runtime, when the failing class is
   //       actually touched.
   const RegType& FromClass(const char* descriptor, ObjPtr<mirror::Class> klass, bool precise)
-      REQUIRES_SHARED(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    DCHECK(klass != nullptr);
+    if (precise && !klass->IsInstantiable() && !klass->IsPrimitive()) {
+      Fail(VerifyError::VERIFY_ERROR_NO_CLASS) << "Could not create precise reference for "
+          << "non-instantiable klass " << descriptor;
+      precise = false;
+    }
+    return reg_types_.FromClass(descriptor, klass, precise);
+  }
 
   ALWAYS_INLINE bool FailOrAbort(bool condition, const char* error_msg, uint32_t work_insn_idx);
 
@@ -570,17 +731,50 @@
 
   // Returns the method index of an invoke instruction.
   uint16_t GetMethodIdxOfInvoke(const Instruction* inst)
-      REQUIRES_SHARED(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    switch (inst->Opcode()) {
+      case Instruction::INVOKE_VIRTUAL_RANGE_QUICK:
+      case Instruction::INVOKE_VIRTUAL_QUICK: {
+        DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_)
+            << dex_file_->PrettyMethod(dex_method_idx_, true) << "@" << work_insn_idx_;
+        DCHECK(method_being_verified_ != nullptr);
+        uint16_t method_idx = method_being_verified_->GetIndexFromQuickening(work_insn_idx_);
+        CHECK_NE(method_idx, DexFile::kDexNoIndex16);
+        return method_idx;
+      }
+      default: {
+        return inst->VRegB();
+      }
+    }
+  }
   // Returns the field index of a field access instruction.
   uint16_t GetFieldIdxOfFieldAccess(const Instruction* inst, bool is_static)
-      REQUIRES_SHARED(Locks::mutator_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (is_static) {
+      return inst->VRegB_21c();
+    } else if (inst->IsQuickened()) {
+      DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_);
+      DCHECK(method_being_verified_ != nullptr);
+      uint16_t field_idx = method_being_verified_->GetIndexFromQuickening(work_insn_idx_);
+      CHECK_NE(field_idx, DexFile::kDexNoIndex16);
+      return field_idx;
+    } else {
+      return inst->VRegC_22c();
+    }
+  }
 
   // Run verification on the method. Returns true if verification completes and false if the input
   // has an irrecoverable corruption.
   bool Verify() override REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Dump the failures encountered by the verifier.
-  std::ostream& DumpFailures(std::ostream& os);
+  std::ostream& DumpFailures(std::ostream& os) {
+    DCHECK_EQ(failures_.size(), failure_messages_.size());
+    for (const auto* stream : failure_messages_) {
+        os << stream->str() << "\n";
+    }
+    return os;
+  }
 
   // Dump the state of the verifier, namely each instruction, what flags are set on it, register
   // information
@@ -590,6 +784,8 @@
   }
   void Dump(VariableIndentationOutputStream* vios) REQUIRES_SHARED(Locks::mutator_lock_);
 
+  bool HandleMoveException(const Instruction* inst) REQUIRES_SHARED(Locks::mutator_lock_);
+
   ArtMethod* method_being_verified_;  // Its ArtMethod representation if known.
   const uint32_t method_access_flags_;  // Method's access flags.
   const RegType* return_type_;  // Lazily computed return type of the method.
@@ -630,6 +826,9 @@
   // Note: this flag is only valid once Verify() has started.
   bool is_constructor_;
 
+  // Whether to attempt to fill all register lines for (ex) debugger use.
+  bool fill_register_lines_;
+
   // API level, for dependent checks. Note: we do not use '0' for unset here, to simplify checks.
   // Instead, unset level should correspond to max().
   const uint32_t api_level_;
@@ -681,45 +880,6 @@
 }
 
 template <bool kVerifierDebug>
-MethodVerifier<kVerifierDebug>::MethodVerifier(Thread* self,
-                                               const DexFile* dex_file,
-                                               Handle<mirror::DexCache> dex_cache,
-                                               Handle<mirror::ClassLoader> class_loader,
-                                               const dex::ClassDef& class_def,
-                                               const dex::CodeItem* code_item,
-                                               uint32_t dex_method_idx,
-                                               ArtMethod* method,
-                                               uint32_t method_access_flags,
-                                               bool can_load_classes,
-                                               bool allow_soft_failures,
-                                               bool need_precise_constants,
-                                               bool verify_to_dump,
-                                               bool allow_thread_suspension,
-                                               uint32_t api_level)
-    : art::verifier::MethodVerifier(self,
-                                    dex_file,
-                                    code_item,
-                                    dex_method_idx,
-                                    can_load_classes,
-                                    allow_thread_suspension,
-                                    allow_soft_failures),
-      method_being_verified_(method),
-      method_access_flags_(method_access_flags),
-      return_type_(nullptr),
-      dex_cache_(dex_cache),
-      class_loader_(class_loader),
-      class_def_(class_def),
-      declaring_class_(nullptr),
-      interesting_dex_pc_(-1),
-      monitor_enter_dex_pcs_(nullptr),
-      need_precise_constants_(need_precise_constants),
-      verify_to_dump_(verify_to_dump),
-      allow_thread_suspension_(allow_thread_suspension),
-      is_constructor_(false),
-      api_level_(api_level == 0 ? std::numeric_limits<uint32_t>::max() : api_level) {
-}
-
-template <bool kVerifierDebug>
 void MethodVerifier<kVerifierDebug>::FindLocksAtDexPc() {
   CHECK(monitor_enter_dex_pcs_ != nullptr);
   CHECK(code_item_accessor_.HasCodeItem());  // This only makes sense for methods with code.
@@ -893,7 +1053,7 @@
                             InstructionFlags());
   // Run through the instructions and see if the width checks out.
   bool result = ComputeWidthsAndCountOps();
-  bool allow_runtime_only_instructions = !Runtime::Current()->IsAotCompiler() || verify_to_dump_;
+  bool allow_runtime_only_instructions = !IsAotMode() || verify_to_dump_;
   // Flag instructions guarded by a "try" block and check exception handlers.
   result = result && ScanTryCatchBlocks();
   // Perform static instruction verification.
@@ -907,24 +1067,6 @@
 }
 
 template <bool kVerifierDebug>
-void MethodVerifier<kVerifierDebug>::PrependToLastFailMessage(std::string prepend) {
-  size_t failure_num = failure_messages_.size();
-  DCHECK_NE(failure_num, 0U);
-  std::ostringstream* last_fail_message = failure_messages_[failure_num - 1];
-  prepend += last_fail_message->str();
-  failure_messages_[failure_num - 1] = new std::ostringstream(prepend, std::ostringstream::ate);
-  delete last_fail_message;
-}
-
-template <bool kVerifierDebug>
-void MethodVerifier<kVerifierDebug>::AppendToLastFailMessage(const std::string& append) {
-  size_t failure_num = failure_messages_.size();
-  DCHECK_NE(failure_num, 0U);
-  std::ostringstream* last_fail_message = failure_messages_[failure_num - 1];
-  (*last_fail_message) << append;
-}
-
-template <bool kVerifierDebug>
 bool MethodVerifier<kVerifierDebug>::ComputeWidthsAndCountOps() {
   // We can't assume the instruction is well formed, handle the case where calculating the size
   // goes past the end of the code item.
@@ -954,6 +1096,7 @@
                                       << it.DexPc() << " vs. " << insns_size << ")";
     return false;
   }
+  DCHECK(GetInstructionFlags(0).IsOpcode());
 
   return true;
 }
@@ -986,7 +1129,7 @@
   // Iterate over each of the handlers to verify target addresses.
   const uint8_t* handlers_ptr = code_item_accessor_.GetCatchHandlerData();
   const uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
-  ClassLinker* linker = Runtime::Current()->GetClassLinker();
+  ClassLinker* linker = GetClassLinker();
   for (uint32_t idx = 0; idx < handlers_size; idx++) {
     CatchHandlerIterator iterator(handlers_ptr);
     for (; iterator.HasNext(); iterator.Next()) {
@@ -1021,31 +1164,20 @@
 template <bool kVerifierDebug>
 template <bool kAllowRuntimeOnlyInstructions>
 bool MethodVerifier<kVerifierDebug>::VerifyInstructions() {
-  /* Flag the start of the method as a branch target, and a GC point due to stack overflow errors */
+  // Flag the start of the method as a branch target.
   GetModifiableInstructionFlags(0).SetBranchTarget();
-  GetModifiableInstructionFlags(0).SetCompileTimeInfoPoint();
   for (const DexInstructionPcPair& inst : code_item_accessor_) {
     const uint32_t dex_pc = inst.DexPc();
     if (!VerifyInstruction<kAllowRuntimeOnlyInstructions>(&inst.Inst(), dex_pc)) {
       DCHECK_NE(failures_.size(), 0U);
       return false;
     }
-    /* Flag instructions that are garbage collection points */
-    // All invoke points are marked as "Throw" points already.
-    // We are relying on this to also count all the invokes as interesting.
-    if (inst->IsBranch()) {
+    // Flag some interesting instructions.
+    if (inst->IsReturn()) {
+      GetModifiableInstructionFlags(dex_pc).SetReturn();
+    } else if (inst->Opcode() == Instruction::CHECK_CAST) {
+      // The dex-to-dex compiler wants type information to elide check-casts.
       GetModifiableInstructionFlags(dex_pc).SetCompileTimeInfoPoint();
-      // The compiler also needs safepoints for fall-through to loop heads.
-      // Such a loop head must be a target of a branch.
-      int32_t offset = 0;
-      bool cond, self_ok;
-      bool target_ok = GetBranchOffset(dex_pc, &offset, &cond, &self_ok);
-      DCHECK(target_ok);
-      GetModifiableInstructionFlags(dex_pc + offset).SetCompileTimeInfoPoint();
-    } else if (inst->IsSwitch() || inst->IsThrow()) {
-      GetModifiableInstructionFlags(dex_pc).SetCompileTimeInfoPoint();
-    } else if (inst->IsReturn()) {
-      GetModifiableInstructionFlags(dex_pc).SetCompileTimeInfoPointAndReturn();
     }
   }
   return true;
@@ -1062,7 +1194,7 @@
     // the data flow analysis will fail.
     Fail(VERIFY_ERROR_FORCE_INTERPRETER)
         << "experimental instruction is not supported by verifier; skipping verification";
-    have_pending_experimental_failure_ = true;
+    flags_.have_pending_experimental_failure_ = true;
     return false;
   }
 
@@ -1180,68 +1312,6 @@
 }
 
 template <bool kVerifierDebug>
-inline bool MethodVerifier<kVerifierDebug>::CheckRegisterIndex(uint32_t idx) {
-  if (UNLIKELY(idx >= code_item_accessor_.RegistersSize())) {
-    Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "register index out of range (" << idx << " >= "
-                                      << code_item_accessor_.RegistersSize() << ")";
-    return false;
-  }
-  return true;
-}
-
-template <bool kVerifierDebug>
-inline bool MethodVerifier<kVerifierDebug>::CheckWideRegisterIndex(uint32_t idx) {
-  if (UNLIKELY(idx + 1 >= code_item_accessor_.RegistersSize())) {
-    Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "wide register index out of range (" << idx
-                                      << "+1 >= " << code_item_accessor_.RegistersSize() << ")";
-    return false;
-  }
-  return true;
-}
-
-template <bool kVerifierDebug>
-inline bool MethodVerifier<kVerifierDebug>::CheckCallSiteIndex(uint32_t idx) {
-  uint32_t limit = dex_file_->NumCallSiteIds();
-  if (UNLIKELY(idx >= limit)) {
-    Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad call site index " << idx << " (max "
-                                      << limit << ")";
-    return false;
-  }
-  return true;
-}
-
-template <bool kVerifierDebug>
-inline bool MethodVerifier<kVerifierDebug>::CheckFieldIndex(uint32_t idx) {
-  if (UNLIKELY(idx >= dex_file_->GetHeader().field_ids_size_)) {
-    Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad field index " << idx << " (max "
-                                      << dex_file_->GetHeader().field_ids_size_ << ")";
-    return false;
-  }
-  return true;
-}
-
-template <bool kVerifierDebug>
-inline bool MethodVerifier<kVerifierDebug>::CheckMethodIndex(uint32_t idx) {
-  if (UNLIKELY(idx >= dex_file_->GetHeader().method_ids_size_)) {
-    Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad method index " << idx << " (max "
-                                      << dex_file_->GetHeader().method_ids_size_ << ")";
-    return false;
-  }
-  return true;
-}
-
-template <bool kVerifierDebug>
-inline bool MethodVerifier<kVerifierDebug>::CheckMethodHandleIndex(uint32_t idx) {
-  uint32_t limit = dex_file_->NumMethodHandles();
-  if (UNLIKELY(idx >= limit)) {
-    Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad method handle index " << idx << " (max "
-                                      << limit << ")";
-    return false;
-  }
-  return true;
-}
-
-template <bool kVerifierDebug>
 inline bool MethodVerifier<kVerifierDebug>::CheckNewInstance(dex::TypeIndex idx) {
   if (UNLIKELY(idx.index_ >= dex_file_->GetHeader().type_ids_size_)) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad type index " << idx.index_ << " (max "
@@ -1262,36 +1332,6 @@
 }
 
 template <bool kVerifierDebug>
-inline bool MethodVerifier<kVerifierDebug>::CheckPrototypeIndex(uint32_t idx) {
-  if (UNLIKELY(idx >= dex_file_->GetHeader().proto_ids_size_)) {
-    Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad prototype index " << idx << " (max "
-                                      << dex_file_->GetHeader().proto_ids_size_ << ")";
-    return false;
-  }
-  return true;
-}
-
-template <bool kVerifierDebug>
-inline bool MethodVerifier<kVerifierDebug>::CheckStringIndex(uint32_t idx) {
-  if (UNLIKELY(idx >= dex_file_->GetHeader().string_ids_size_)) {
-    Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad string index " << idx << " (max "
-                                      << dex_file_->GetHeader().string_ids_size_ << ")";
-    return false;
-  }
-  return true;
-}
-
-template <bool kVerifierDebug>
-inline bool MethodVerifier<kVerifierDebug>::CheckTypeIndex(dex::TypeIndex idx) {
-  if (UNLIKELY(idx.index_ >= dex_file_->GetHeader().type_ids_size_)) {
-    Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad type index " << idx.index_ << " (max "
-                                      << dex_file_->GetHeader().type_ids_size_ << ")";
-    return false;
-  }
-  return true;
-}
-
-template <bool kVerifierDebug>
 bool MethodVerifier<kVerifierDebug>::CheckNewArray(dex::TypeIndex idx) {
   if (UNLIKELY(idx.index_ >= dex_file_->GetHeader().type_ids_size_)) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "bad type index " << idx.index_ << " (max "
@@ -1547,38 +1587,14 @@
 }
 
 template <bool kVerifierDebug>
-bool MethodVerifier<kVerifierDebug>::CheckVarArgRegs(uint32_t vA, uint32_t arg[]) {
-  uint16_t registers_size = code_item_accessor_.RegistersSize();
-  for (uint32_t idx = 0; idx < vA; idx++) {
-    if (UNLIKELY(arg[idx] >= registers_size)) {
-      Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid reg index (" << arg[idx]
-                                        << ") in non-range invoke (>= " << registers_size << ")";
-      return false;
-    }
-  }
-
-  return true;
-}
-
-template <bool kVerifierDebug>
-bool MethodVerifier<kVerifierDebug>::CheckVarArgRangeRegs(uint32_t vA, uint32_t vC) {
-  uint16_t registers_size = code_item_accessor_.RegistersSize();
-  // vA/vC are unsigned 8-bit/16-bit quantities for /range instructions, so there's no risk of
-  // integer overflow when adding them here.
-  if (UNLIKELY(vA + vC > registers_size)) {
-    Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid reg index " << vA << "+" << vC
-                                      << " in range invoke (> " << registers_size << ")";
-    return false;
-  }
-  return true;
-}
-
-template <bool kVerifierDebug>
 bool MethodVerifier<kVerifierDebug>::VerifyCodeFlow() {
   const uint16_t registers_size = code_item_accessor_.RegistersSize();
 
   /* Create and initialize table holding register status */
-  reg_table_.Init(kTrackCompilerInterestPoints,
+  RegisterTrackingMode base_mode = IsAotMode()
+                                       ? kTrackCompilerInterestPoints
+                                       : kTrackRegsBranches;
+  reg_table_.Init(fill_register_lines_ ? kTrackRegsAll : base_mode,
                   insn_flags_.get(),
                   code_item_accessor_.InsnsSizeInCodeUnits(),
                   registers_size,
@@ -1597,10 +1613,13 @@
     return false;
   }
   // We may have a runtime failure here, clear.
-  have_pending_runtime_throw_failure_ = false;
+  flags_.have_pending_runtime_throw_failure_ = false;
 
   /* Perform code flow verification. */
-  if (!CodeFlowVerifyMethod()) {
+  bool res = LIKELY(monitor_enter_dex_pcs_ == nullptr)
+                 ? CodeFlowVerifyMethod</*kMonitorDexPCs=*/ false>()
+                 : CodeFlowVerifyMethod</*kMonitorDexPCs=*/ true>();
+  if (UNLIKELY(!res)) {
     DCHECK_NE(failures_.size(), 0U);
     return false;
   }
@@ -1608,15 +1627,6 @@
 }
 
 template <bool kVerifierDebug>
-std::ostream& MethodVerifier<kVerifierDebug>::DumpFailures(std::ostream& os) {
-  DCHECK_EQ(failures_.size(), failure_messages_.size());
-  for (size_t i = 0; i < failures_.size(); ++i) {
-      os << failure_messages_[i]->str() << "\n";
-  }
-  return os;
-}
-
-template <bool kVerifierDebug>
 void MethodVerifier<kVerifierDebug>::Dump(VariableIndentationOutputStream* vios) {
   if (!code_item_accessor_.HasCodeItem()) {
     vios->Stream() << "Native method\n";
@@ -1826,7 +1836,30 @@
   return result;
 }
 
+COLD_ATTR
+void HandleMonitorDexPcsWorkLine(
+    std::vector<::art::verifier::MethodVerifier::DexLockInfo>* monitor_enter_dex_pcs,
+    RegisterLine* work_line) {
+  monitor_enter_dex_pcs->clear();  // The new work line is more accurate than the previous one.
+
+  std::map<uint32_t, ::art::verifier::MethodVerifier::DexLockInfo> depth_to_lock_info;
+  auto collector = [&](uint32_t dex_reg, uint32_t depth) {
+    auto insert_pair = depth_to_lock_info.emplace(
+        depth, ::art::verifier::MethodVerifier::DexLockInfo(depth));
+    auto it = insert_pair.first;
+    auto set_insert_pair = it->second.dex_registers.insert(dex_reg);
+    DCHECK(set_insert_pair.second);
+  };
+  work_line->IterateRegToLockDepths(collector);
+  for (auto& pair : depth_to_lock_info) {
+    monitor_enter_dex_pcs->push_back(pair.second);
+    // Map depth to dex PC.
+    monitor_enter_dex_pcs->back().dex_pc = work_line->GetMonitorEnterDexPc(pair.second.dex_pc);
+  }
+}
+
 template <bool kVerifierDebug>
+template <bool kMonitorDexPCs>
 bool MethodVerifier<kVerifierDebug>::CodeFlowVerifyMethod() {
   const uint16_t* insns = code_item_accessor_.Insns();
   const uint32_t insns_size = code_item_accessor_.InsnsSizeInCodeUnits();
@@ -1882,6 +1915,15 @@
         }
       }
     }
+
+    // If we're doing FindLocksAtDexPc, check whether we're at the dex pc we care about.
+    // We want the state _before_ the instruction, for the case where the dex pc we're
+    // interested in is itself a monitor-enter instruction (which is a likely place
+    // for a thread to be suspended).
+    if (kMonitorDexPCs && UNLIKELY(work_insn_idx_ == interesting_dex_pc_)) {
+      HandleMonitorDexPcsWorkLine(monitor_enter_dex_pcs_, work_line_.get());
+    }
+
     if (!CodeFlowVerifyInstruction(&start_guess)) {
       std::string prepend(dex_file_->PrettyMethod(dex_method_idx_));
       prepend += " failed to verify: ";
@@ -1992,29 +2034,6 @@
 
 template <bool kVerifierDebug>
 bool MethodVerifier<kVerifierDebug>::CodeFlowVerifyInstruction(uint32_t* start_guess) {
-  // If we're doing FindLocksAtDexPc, check whether we're at the dex pc we care about.
-  // We want the state _before_ the instruction, for the case where the dex pc we're
-  // interested in is itself a monitor-enter instruction (which is a likely place
-  // for a thread to be suspended).
-  if (monitor_enter_dex_pcs_ != nullptr && work_insn_idx_ == interesting_dex_pc_) {
-    monitor_enter_dex_pcs_->clear();  // The new work line is more accurate than the previous one.
-
-    std::map<uint32_t, DexLockInfo> depth_to_lock_info;
-    auto collector = [&](uint32_t dex_reg, uint32_t depth) {
-      auto insert_pair = depth_to_lock_info.emplace(depth, DexLockInfo(depth));
-      auto it = insert_pair.first;
-      auto set_insert_pair = it->second.dex_registers.insert(dex_reg);
-      DCHECK(set_insert_pair.second);
-    };
-    work_line_->IterateRegToLockDepths(collector);
-    for (auto& pair : depth_to_lock_info) {
-      monitor_enter_dex_pcs_->push_back(pair.second);
-      // Map depth to dex PC.
-      (*monitor_enter_dex_pcs_)[monitor_enter_dex_pcs_->size() - 1].dex_pc =
-          work_line_->GetMonitorEnterDexPc(pair.second.dex_pc);
-    }
-  }
-
   /*
    * Once we finish decoding the instruction, we need to figure out where
    * we can go from here. There are three possible ways to transfer
@@ -2052,12 +2071,15 @@
    * from the "successful" code path (e.g. a check-cast that "improves"
    * a type) to be visible to the exception handler.
    */
-  if ((opcode_flags & Instruction::kThrow) != 0 && CurrentInsnFlags()->IsInTry()) {
+  if (((opcode_flags & Instruction::kThrow) != 0 || IsCompatThrow(inst->Opcode())) &&
+      CurrentInsnFlags()->IsInTry()) {
     saved_line_->CopyFromLine(work_line_.get());
   } else if (kIsDebugBuild) {
     saved_line_->FillWithGarbage();
   }
-  DCHECK(!have_pending_runtime_throw_failure_);  // Per-instruction flag, should not be set here.
+  // Per-instruction flag, should not be set here.
+  DCHECK(!flags_.have_pending_runtime_throw_failure_);
+  bool exc_handler_unreachable = false;
 
 
   // We need to ensure the work line is consistent while performing validation. When we spot a
@@ -2127,21 +2149,12 @@
       work_line_->CopyResultRegister1(this, inst->VRegA_11x(), true);
       break;
 
-    case Instruction::MOVE_EXCEPTION: {
-      // We do not allow MOVE_EXCEPTION as the first instruction in a method. This is a simple case
-      // where one entrypoint to the catch block is not actually an exception path.
-      if (work_insn_idx_ == 0) {
-        Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "move-exception at pc 0x0";
-        break;
+    case Instruction::MOVE_EXCEPTION:
+      if (!HandleMoveException(inst)) {
+        exc_handler_unreachable = true;
       }
-      /*
-       * This statement can only appear as the first instruction in an exception handler. We verify
-       * that as part of extracting the exception type from the catch block list.
-       */
-      const RegType& res_type = GetCaughtExceptionType();
-      work_line_->SetRegisterType<LockOp::kClear>(this, inst->VRegA_11x(), res_type);
       break;
-    }
+
     case Instruction::RETURN_VOID:
       if (!IsInstanceConstructor() || work_line_->CheckConstructorReturn(this)) {
         if (!GetMethodReturnType().IsConflict()) {
@@ -2216,8 +2229,8 @@
                                               << reg_type;
           } else if (!return_type.IsAssignableFrom(reg_type, this)) {
             if (reg_type.IsUnresolvedTypes() || return_type.IsUnresolvedTypes()) {
-              Fail(VERIFY_ERROR_NO_CLASS) << " can't resolve returned type '" << return_type
-                  << "' or '" << reg_type << "'";
+              Fail(api_level_ > 29u ? VERIFY_ERROR_BAD_CLASS_SOFT : VERIFY_ERROR_NO_CLASS)
+                  << " can't resolve returned type '" << return_type << "' or '" << reg_type << "'";
             } else {
               bool soft_error = false;
               // Check whether arrays are involved. They will show a valid class status, even
@@ -2426,7 +2439,7 @@
       const RegType& res_type = ResolveClass<CheckAccess::kYes>(type_idx);
       if (res_type.IsConflict()) {
         // If this is a primitive type, fail HARD.
-        ObjPtr<mirror::Class> klass = Runtime::Current()->GetClassLinker()->LookupResolvedType(
+        ObjPtr<mirror::Class> klass = GetClassLinker()->LookupResolvedType(
             type_idx, dex_cache_.Get(), class_loader_.Get());
         if (klass != nullptr && klass->IsPrimitive()) {
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "using primitive type "
@@ -2663,20 +2676,15 @@
       }
 
       // Find previous instruction - its existence is a precondition to peephole optimization.
-      uint32_t instance_of_idx = 0;
-      if (0 != work_insn_idx_) {
-        instance_of_idx = work_insn_idx_ - 1;
-        while (0 != instance_of_idx && !GetInstructionFlags(instance_of_idx).IsOpcode()) {
-          instance_of_idx--;
-        }
-        if (FailOrAbort(GetInstructionFlags(instance_of_idx).IsOpcode(),
-                        "Unable to get previous instruction of if-eqz/if-nez for work index ",
-                        work_insn_idx_)) {
-          break;
-        }
-      } else {
+      if (UNLIKELY(0 == work_insn_idx_)) {
         break;
       }
+      uint32_t instance_of_idx = work_insn_idx_ - 1;
+      while (0 != instance_of_idx && !GetInstructionFlags(instance_of_idx).IsOpcode()) {
+        instance_of_idx--;
+      }
+      // Dex index 0 must be an opcode.
+      DCHECK(GetInstructionFlags(instance_of_idx).IsOpcode());
 
       const Instruction& instance_of_inst = code_item_accessor_.InstructionAt(instance_of_idx);
 
@@ -2706,8 +2714,11 @@
         // type is assignable to the original then allow optimization. This check is performed to
         // ensure that subsequent merges don't lose type information - such as becoming an
         // interface from a class that would lose information relevant to field checks.
+        //
+        // Note: do not do an access check. This may mark this with a runtime throw that actually
+        //       happens at the instanceof, not the branch (and branches aren't flagged to throw).
         const RegType& orig_type = work_line_->GetRegisterType(this, instance_of_inst.VRegB_22c());
-        const RegType& cast_type = ResolveClass<CheckAccess::kYes>(
+        const RegType& cast_type = ResolveClass<CheckAccess::kNo>(
             dex::TypeIndex(instance_of_inst.VRegC_22c()));
 
         if (!orig_type.Equals(cast_type) &&
@@ -2738,11 +2749,7 @@
             while (0 != move_idx && !GetInstructionFlags(move_idx).IsOpcode()) {
               move_idx--;
             }
-            if (FailOrAbort(GetInstructionFlags(move_idx).IsOpcode(),
-                            "Unable to get previous instruction of if-eqz/if-nez for work index ",
-                            work_insn_idx_)) {
-              break;
-            }
+            DCHECK(GetInstructionFlags(move_idx).IsOpcode());
             auto maybe_update_fn = [&instance_of_inst, update_line, this, &cast_type](
                 uint16_t move_src,
                 uint16_t move_trg)
@@ -3465,8 +3472,8 @@
      */
   }  // end - switch (dec_insn.opcode)
 
-  if (have_pending_hard_failure_) {
-    if (Runtime::Current()->IsAotCompiler()) {
+  if (flags_.have_pending_hard_failure_) {
+    if (IsAotMode()) {
       /* When AOT compiling, check that the last failure is a hard failure */
       if (failures_[failures_.size() - 1] != VERIFY_ERROR_BAD_CLASS_HARD) {
         LOG(ERROR) << "Pending failures:";
@@ -3482,7 +3489,8 @@
     /* immediate failure, reject class */
     info_messages_ << "Rejecting opcode " << inst->DumpString(dex_file_);
     return false;
-  } else if (have_pending_runtime_throw_failure_) {
+  } else if (flags_.have_pending_runtime_throw_failure_) {
+    LogVerifyInfo() << "Elevating opcode flags from " << opcode_flags << " to Throw";
     /* checking interpreter will throw, mark following code as unreachable */
     opcode_flags = Instruction::kThrow;
     // Note: the flag must be reset as it is only global to decouple Fail and is semantically per
@@ -3585,7 +3593,7 @@
     CatchHandlerIterator iterator(code_item_accessor_, *try_item);
 
     // Need the linker to try and resolve the handled class to check if it's Throwable.
-    ClassLinker* linker = Runtime::Current()->GetClassLinker();
+    ClassLinker* linker = GetClassLinker();
 
     for (; iterator.HasNext(); iterator.Next()) {
       dex::TypeIndex handler_type_idx = iterator.GetHandlerTypeIndex();
@@ -3642,7 +3650,7 @@
    *        because it changes work_line_ when performing peephole optimization
    *        and this change should not be used in those cases.
    */
-  if ((opcode_flags & Instruction::kContinue) != 0) {
+  if ((opcode_flags & Instruction::kContinue) != 0 && !exc_handler_unreachable) {
     DCHECK_EQ(&code_item_accessor_.InstructionAt(work_insn_idx_), inst);
     uint32_t next_insn_idx = work_insn_idx_ + inst->SizeInCodeUnits();
     if (next_insn_idx >= code_item_accessor_.InsnsSizeInCodeUnits()) {
@@ -3702,10 +3710,10 @@
   DCHECK_LT(*start_guess, code_item_accessor_.InsnsSizeInCodeUnits());
   DCHECK(GetInstructionFlags(*start_guess).IsOpcode());
 
-  if (have_pending_runtime_throw_failure_) {
-    have_any_pending_runtime_throw_failure_ = true;
+  if (flags_.have_pending_runtime_throw_failure_) {
+    flags_.have_any_pending_runtime_throw_failure_ = true;
     // Reset the pending_runtime_throw flag now.
-    have_pending_runtime_throw_failure_ = false;
+    flags_.have_pending_runtime_throw_failure_ = false;
   }
 
   return true;
@@ -3714,7 +3722,7 @@
 template <bool kVerifierDebug>
 template <CheckAccess C>
 const RegType& MethodVerifier<kVerifierDebug>::ResolveClass(dex::TypeIndex class_idx) {
-  ClassLinker* linker = Runtime::Current()->GetClassLinker();
+  ClassLinker* linker = GetClassLinker();
   ObjPtr<mirror::Class> klass = can_load_classes_
       ? linker->ResolveType(class_idx, dex_cache_, class_loader_)
       : linker->LookupResolvedType(class_idx, dex_cache_.Get(), class_loader_.Get());
@@ -3755,9 +3763,10 @@
   // the access-checks interpreter. If result is primitive, skip the access check.
   //
   // Note: we do this for unresolved classes to trigger re-verification at runtime.
-  if (C == CheckAccess::kYes &&
+  if (C != CheckAccess::kNo &&
       result->IsNonZeroReferenceTypes() &&
-      (IsSdkVersionSetAndAtLeast(api_level_, SdkVersion::kP) || !result->IsUnresolvedTypes())) {
+      ((C == CheckAccess::kYes && IsSdkVersionSetAndAtLeast(api_level_, SdkVersion::kP))
+          || !result->IsUnresolvedTypes())) {
     const RegType& referrer = GetDeclaringClass();
     if ((IsSdkVersionSetAndAtLeast(api_level_, SdkVersion::kP) || !referrer.IsUnresolvedTypes()) &&
         !referrer.CanAccess(*result)) {
@@ -3769,55 +3778,94 @@
 }
 
 template <bool kVerifierDebug>
-const RegType& MethodVerifier<kVerifierDebug>::GetCaughtExceptionType() {
-  const RegType* common_super = nullptr;
-  if (code_item_accessor_.TriesSize() != 0) {
-    const uint8_t* handlers_ptr = code_item_accessor_.GetCatchHandlerData();
-    uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
-    for (uint32_t i = 0; i < handlers_size; i++) {
-      CatchHandlerIterator iterator(handlers_ptr);
-      for (; iterator.HasNext(); iterator.Next()) {
-        if (iterator.GetHandlerAddress() == (uint32_t) work_insn_idx_) {
-          if (!iterator.GetHandlerTypeIndex().IsValid()) {
-            common_super = &reg_types_.JavaLangThrowable(false);
-          } else {
-            const RegType& exception =
-                ResolveClass<CheckAccess::kYes>(iterator.GetHandlerTypeIndex());
-            if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(exception, this)) {
-              DCHECK(!exception.IsUninitializedTypes());  // Comes from dex, shouldn't be uninit.
-              if (exception.IsUnresolvedTypes()) {
-                // We don't know enough about the type. Fail here and let runtime handle it.
-                Fail(VERIFY_ERROR_NO_CLASS) << "unresolved exception class " << exception;
-                return exception;
-              } else {
-                Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "unexpected non-exception class " << exception;
-                return reg_types_.Conflict();
-              }
-            } else if (common_super == nullptr) {
-              common_super = &exception;
-            } else if (common_super->Equals(exception)) {
-              // odd case, but nothing to do
+bool MethodVerifier<kVerifierDebug>::HandleMoveException(const Instruction* inst)  {
+  // We do not allow MOVE_EXCEPTION as the first instruction in a method. This is a simple case
+  // where one entrypoint to the catch block is not actually an exception path.
+  if (work_insn_idx_ == 0) {
+    Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "move-exception at pc 0x0";
+    return true;
+  }
+  /*
+   * This statement can only appear as the first instruction in an exception handler. We verify
+   * that as part of extracting the exception type from the catch block list.
+   */
+  auto caught_exc_type_fn = [&]() REQUIRES_SHARED(Locks::mutator_lock_) ->
+      std::pair<bool, const RegType*> {
+    const RegType* common_super = nullptr;
+    if (code_item_accessor_.TriesSize() != 0) {
+      const uint8_t* handlers_ptr = code_item_accessor_.GetCatchHandlerData();
+      uint32_t handlers_size = DecodeUnsignedLeb128(&handlers_ptr);
+      const RegType* unresolved = nullptr;
+      for (uint32_t i = 0; i < handlers_size; i++) {
+        CatchHandlerIterator iterator(handlers_ptr);
+        for (; iterator.HasNext(); iterator.Next()) {
+          if (iterator.GetHandlerAddress() == (uint32_t) work_insn_idx_) {
+            if (!iterator.GetHandlerTypeIndex().IsValid()) {
+              common_super = &reg_types_.JavaLangThrowable(false);
             } else {
-              common_super = &common_super->Merge(exception, &reg_types_, this);
-              if (FailOrAbort(reg_types_.JavaLangThrowable(false).IsAssignableFrom(
-                                  *common_super, this),
-                              "java.lang.Throwable is not assignable-from common_super at ",
-                              work_insn_idx_)) {
-                break;
+              // Do access checks only on resolved exception classes.
+              const RegType& exception =
+                  ResolveClass<CheckAccess::kOnResolvedClass>(iterator.GetHandlerTypeIndex());
+              if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(exception, this)) {
+                DCHECK(!exception.IsUninitializedTypes());  // Comes from dex, shouldn't be uninit.
+                if (exception.IsUnresolvedTypes()) {
+                  if (unresolved == nullptr) {
+                    unresolved = &exception;
+                  } else {
+                    unresolved = &unresolved->SafeMerge(exception, &reg_types_, this);
+                  }
+                } else {
+                  Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "unexpected non-exception class "
+                                                    << exception;
+                  return std::make_pair(true, &reg_types_.Conflict());
+                }
+              } else if (common_super == nullptr) {
+                common_super = &exception;
+              } else if (common_super->Equals(exception)) {
+                // odd case, but nothing to do
+              } else {
+                common_super = &common_super->Merge(exception, &reg_types_, this);
+                if (FailOrAbort(reg_types_.JavaLangThrowable(false).IsAssignableFrom(
+                    *common_super, this),
+                    "java.lang.Throwable is not assignable-from common_super at ",
+                    work_insn_idx_)) {
+                  break;
+                }
               }
             }
           }
         }
+        handlers_ptr = iterator.EndDataPointer();
       }
-      handlers_ptr = iterator.EndDataPointer();
+      if (unresolved != nullptr) {
+        if (!IsAotMode() && common_super == nullptr) {
+          // This is an unreachable handler.
+
+          // We need to post a failure. The compiler currently does not handle unreachable
+          // code correctly.
+          Fail(VERIFY_ERROR_SKIP_COMPILER, /*pending_exc=*/ false)
+              << "Unresolved catch handler, fail for compiler";
+
+          return std::make_pair(false, unresolved);
+        }
+        // Soft-fail, but do not handle this with a synthetic throw.
+        Fail(VERIFY_ERROR_NO_CLASS, /*pending_exc=*/ false) << "Unresolved catch handler";
+        if (common_super != nullptr) {
+          unresolved = &unresolved->Merge(*common_super, &reg_types_, this);
+        }
+        return std::make_pair(true, unresolved);
+      }
     }
-  }
-  if (common_super == nullptr) {
-    /* no catch blocks, or no catches with classes we can find */
-    Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "unable to find exception handler";
-    return reg_types_.Conflict();
-  }
-  return *common_super;
+    if (common_super == nullptr) {
+      /* no catch blocks, or no catches with classes we can find */
+      Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "unable to find exception handler";
+      return std::make_pair(true, &reg_types_.Conflict());
+    }
+    return std::make_pair(true, common_super);
+  };
+  auto result = caught_exc_type_fn();
+  work_line_->SetRegisterType<LockOp::kClear>(this, inst->VRegA_11x(), *result.second);
+  return result.first;
 }
 
 template <bool kVerifierDebug>
@@ -3836,7 +3884,7 @@
   }
   ObjPtr<mirror::Class> klass = klass_type.GetClass();
   const RegType& referrer = GetDeclaringClass();
-  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  ClassLinker* class_linker = GetClassLinker();
   PointerSize pointer_size = class_linker->GetImagePointerSize();
 
   ArtMethod* res_method = dex_cache_->GetResolvedMethod(dex_method_idx, pointer_size);
@@ -3993,7 +4041,7 @@
   if (method_type != METHOD_STATIC) {
     const RegType& actual_arg_type = work_line_->GetInvocationThis(this, inst);
     if (actual_arg_type.IsConflict()) {  // GetInvocationThis failed.
-      CHECK(have_pending_hard_failure_);
+      CHECK(flags_.have_pending_hard_failure_);
       return nullptr;
     }
     bool is_init = false;
@@ -4042,7 +4090,7 @@
             << *res_method_class << "'";
         // Continue on soft failures. We need to find possible hard failures to avoid problems in
         // the compiler.
-        if (have_pending_hard_failure_) {
+        if (flags_.have_pending_hard_failure_) {
           return nullptr;
         }
       }
@@ -4085,7 +4133,7 @@
       if (!work_line_->VerifyRegisterType(this, get_reg, reg_type)) {
         // Continue on soft failures. We need to find possible hard failures to avoid problems in
         // the compiler.
-        if (have_pending_hard_failure_) {
+        if (flags_.have_pending_hard_failure_) {
           return nullptr;
         }
       } else if (reg_type.IsLongOrDoubleTypes()) {
@@ -4136,7 +4184,7 @@
   }
 
   CallSiteArrayValueIterator it(*dex_file_, dex_file_->GetCallSiteId(call_site_idx));
-  // Check essential arguments are provided. The dex file verifier has verified indicies of the
+  // Check essential arguments are provided. The dex file verifier has verified indices of the
   // main values (method handle, name, method_type).
   static const size_t kRequiredArguments = 3;
   if (it.Size() < kRequiredArguments) {
@@ -4217,7 +4265,7 @@
   ArtMethod* res_method = ResolveMethodAndCheckAccess(method_idx, method_type);
   if (res_method == nullptr) {  // error or class is unresolved
     // Check what we can statically.
-    if (!have_pending_hard_failure_) {
+    if (!flags_.have_pending_hard_failure_) {
       VerifyInvocationArgsUnresolvedMethod(inst, method_type, is_range);
     }
     return nullptr;
@@ -4236,13 +4284,6 @@
       return nullptr;
     }
     if (reference_type.GetClass()->IsInterface()) {
-      // TODO Can we verify anything else.
-      if (class_idx == class_def_.class_idx_) {
-        Fail(VERIFY_ERROR_CLASS_CHANGE) << "Cannot invoke-super on self as interface";
-        return nullptr;
-      }
-      // TODO Revisit whether we want to allow invoke-super on direct interfaces only like the JLS
-      // does.
       if (!GetDeclaringClass().HasClass()) {
         Fail(VERIFY_ERROR_NO_CLASS) << "Unable to resolve the full class of 'this' used in an"
                                     << "interface invoke-super";
@@ -4294,8 +4335,7 @@
   const char* method_name = method->GetName();
 
   const char* expected_return_descriptor;
-  ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots =
-      Runtime::Current()->GetClassLinker()->GetClassRoots();
+  ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots = GetClassLinker()->GetClassRoots();
   if (klass == GetClassRoot<mirror::MethodHandle>(class_roots)) {
     expected_return_descriptor = mirror::MethodHandle::GetReturnTypeDescriptor(method_name);
   } else if (klass == GetClassRoot<mirror::VarHandle>(class_roots)) {
@@ -4360,8 +4400,7 @@
         << this_type;
     return false;
   } else {
-    ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots =
-        Runtime::Current()->GetClassLinker()->GetClassRoots();
+    ObjPtr<mirror::ObjectArray<mirror::Class>> class_roots = GetClassLinker()->GetClassRoots();
     if (!this_type.GetClass()->IsSubClass(GetClassRoot<mirror::MethodHandle>(class_roots)) &&
         !this_type.GetClass()->IsSubClass(GetClassRoot<mirror::VarHandle>(class_roots))) {
       Fail(VERIFY_ERROR_BAD_CLASS_HARD)
@@ -4374,40 +4413,6 @@
 }
 
 template <bool kVerifierDebug>
-uint16_t MethodVerifier<kVerifierDebug>::GetMethodIdxOfInvoke(const Instruction* inst) {
-  switch (inst->Opcode()) {
-    case Instruction::INVOKE_VIRTUAL_RANGE_QUICK:
-    case Instruction::INVOKE_VIRTUAL_QUICK: {
-      DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_)
-          << dex_file_->PrettyMethod(dex_method_idx_, true) << "@" << work_insn_idx_;
-      DCHECK(method_being_verified_ != nullptr);
-      uint16_t method_idx = method_being_verified_->GetIndexFromQuickening(work_insn_idx_);
-      CHECK_NE(method_idx, DexFile::kDexNoIndex16);
-      return method_idx;
-    }
-    default: {
-      return inst->VRegB();
-    }
-  }
-}
-
-template <bool kVerifierDebug>
-uint16_t MethodVerifier<kVerifierDebug>::GetFieldIdxOfFieldAccess(const Instruction* inst,
-                                                                  bool is_static) {
-  if (is_static) {
-    return inst->VRegB_21c();
-  } else if (inst->IsQuickened()) {
-    DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_);
-    DCHECK(method_being_verified_ != nullptr);
-    uint16_t field_idx = method_being_verified_->GetIndexFromQuickening(work_insn_idx_);
-    CHECK_NE(field_idx, DexFile::kDexNoIndex16);
-    return field_idx;
-  } else {
-    return inst->VRegC_22c();
-  }
-}
-
-template <bool kVerifierDebug>
 void MethodVerifier<kVerifierDebug>::VerifyNewArray(const Instruction* inst,
                                                     bool is_filled,
                                                     bool is_range) {
@@ -4665,7 +4670,7 @@
 
     return nullptr;  // Can't resolve Class so no more to do here, will do checking at runtime.
   }
-  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  ClassLinker* class_linker = GetClassLinker();
   ArtField* field = class_linker->ResolveFieldJLS(field_idx, dex_cache_, class_loader_);
 
   // Record result of the field resolution attempt.
@@ -4692,6 +4697,12 @@
 
 template <bool kVerifierDebug>
 ArtField* MethodVerifier<kVerifierDebug>::GetInstanceField(const RegType& obj_type, int field_idx) {
+  if (!obj_type.IsZeroOrNull() && !obj_type.IsReferenceTypes()) {
+    // Trying to read a field from something that isn't a reference.
+    Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "instance field access on object that has "
+        << "non-reference type " << obj_type;
+    return nullptr;
+  }
   const dex::FieldId& field_id = dex_file_->GetFieldId(field_idx);
   // Check access to class.
   const RegType& klass_type = ResolveClass<CheckAccess::kYes>(field_id.class_idx_);
@@ -4709,7 +4720,7 @@
 
     return nullptr;  // Can't resolve Class so no more to do here
   }
-  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  ClassLinker* class_linker = GetClassLinker();
   ArtField* field = class_linker->ResolveFieldJLS(field_idx, dex_cache_, class_loader_);
 
   // Record result of the field resolution attempt.
@@ -4725,11 +4736,6 @@
   } else if (obj_type.IsZeroOrNull()) {
     // Cannot infer and check type, however, access will cause null pointer exception.
     // Fall through into a few last soft failure checks below.
-  } else if (!obj_type.IsReferenceTypes()) {
-    // Trying to read a field from something that isn't a reference.
-    Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "instance field access on object that has "
-                                      << "non-reference type " << obj_type;
-    return nullptr;
   } else {
     std::string temp;
     ObjPtr<mirror::Class> klass = field->GetDeclaringClass();
@@ -4753,7 +4759,7 @@
       // of C1. For resolution to occur the declared class of the field must be compatible with
       // obj_type, we've discovered this wasn't so, so report the field didn't exist.
       VerifyError type;
-      bool is_aot = Runtime::Current()->IsAotCompiler();
+      bool is_aot = IsAotMode();
       if (is_aot && (field_klass.IsUnresolvedTypes() || obj_type.IsUnresolvedTypes())) {
         // Compiler & unresolved types involved, retry at runtime.
         type = VerifyError::VERIFY_ERROR_NO_CLASS;
@@ -4807,7 +4813,7 @@
                                        ? GetRegTypeCache()->FromUninitialized(object_type)
                                        : object_type;
     field = GetInstanceField(adjusted_type, field_idx);
-    if (UNLIKELY(have_pending_hard_failure_)) {
+    if (UNLIKELY(flags_.have_pending_hard_failure_)) {
       return;
     }
     if (should_adjust) {
@@ -4964,7 +4970,7 @@
       const Instruction* ret_inst = &code_item_accessor_.InstructionAt(next_insn);
       AdjustReturnLine(this, ret_inst, target_line);
       // Directly bail if a hard failure was found.
-      if (have_pending_hard_failure_) {
+      if (flags_.have_pending_hard_failure_) {
         return false;
       }
     }
@@ -4975,7 +4981,7 @@
       copy->CopyFromLine(target_line);
     }
     changed = target_line->MergeRegisters(this, merge_line);
-    if (have_pending_hard_failure_) {
+    if (flags_.have_pending_hard_failure_) {
       return false;
     }
     if (kVerifierDebug && changed) {
@@ -5023,22 +5029,6 @@
 }
 
 template <bool kVerifierDebug>
-const RegType& MethodVerifier<kVerifierDebug>::GetDeclaringClass() {
-  if (declaring_class_ == nullptr) {
-    const dex::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
-    const char* descriptor
-        = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(method_id.class_idx_));
-    if (method_being_verified_ != nullptr) {
-      ObjPtr<mirror::Class> klass = method_being_verified_->GetDeclaringClass();
-      declaring_class_ = &FromClass(descriptor, klass, klass->CannotBeAssignedFromOtherTypes());
-    } else {
-      declaring_class_ = &reg_types_.FromDescriptor(class_loader_.Get(), descriptor, false);
-    }
-  }
-  return *declaring_class_;
-}
-
-template <bool kVerifierDebug>
 const RegType& MethodVerifier<kVerifierDebug>::DetermineCat1Constant(int32_t value, bool precise) {
   if (precise) {
     // Precise constant type.
@@ -5067,46 +5057,35 @@
   }
 }
 
-template <bool kVerifierDebug>
-const RegType& MethodVerifier<kVerifierDebug>::FromClass(const char* descriptor,
-                                                         ObjPtr<mirror::Class> klass,
-                                                         bool precise) {
-  DCHECK(klass != nullptr);
-  if (precise && !klass->IsInstantiable() && !klass->IsPrimitive()) {
-    Fail(VerifyError::VERIFY_ERROR_NO_CLASS) << "Could not create precise reference for "
-        << "non-instantiable klass " << descriptor;
-    precise = false;
-  }
-  return reg_types_.FromClass(descriptor, klass, precise);
-}
-
 }  // namespace
 }  // namespace impl
 
 MethodVerifier::MethodVerifier(Thread* self,
+                               ClassLinker* class_linker,
+                               ArenaPool* arena_pool,
                                const DexFile* dex_file,
                                const dex::CodeItem* code_item,
                                uint32_t dex_method_idx,
                                bool can_load_classes,
                                bool allow_thread_suspension,
-                               bool allow_soft_failures)
+                               bool allow_soft_failures,
+                               bool aot_mode)
     : self_(self),
-      arena_stack_(Runtime::Current()->GetArenaPool()),
+      arena_stack_(arena_pool),
       allocator_(&arena_stack_),
-      reg_types_(can_load_classes, allocator_, allow_thread_suspension),
+      reg_types_(class_linker, can_load_classes, allocator_, allow_thread_suspension),
       reg_table_(allocator_),
       work_insn_idx_(dex::kDexNoIndex),
       dex_method_idx_(dex_method_idx),
       dex_file_(dex_file),
       code_item_accessor_(*dex_file, code_item),
-      have_pending_hard_failure_(false),
-      have_pending_runtime_throw_failure_(false),
-      have_pending_experimental_failure_(false),
-      have_any_pending_runtime_throw_failure_(false),
+      // TODO: make it designated initialization when we compile as C++20.
+      flags_({false, false, false, false, aot_mode}),
       encountered_failure_types_(0),
       can_load_classes_(can_load_classes),
       allow_soft_failures_(allow_soft_failures),
       has_check_casts_(false),
+      class_linker_(class_linker),
       link_(nullptr) {
   self->PushVerifier(this);
 }
@@ -5117,6 +5096,8 @@
 }
 
 MethodVerifier::FailureData MethodVerifier::VerifyMethod(Thread* self,
+                                                         ClassLinker* class_linker,
+                                                         ArenaPool* arena_pool,
                                                          uint32_t method_idx,
                                                          const DexFile* dex_file,
                                                          Handle<mirror::DexCache> dex_cache,
@@ -5126,13 +5107,17 @@
                                                          ArtMethod* method,
                                                          uint32_t method_access_flags,
                                                          CompilerCallbacks* callbacks,
+                                                         VerifierCallback* verifier_callback,
                                                          bool allow_soft_failures,
                                                          HardFailLogMode log_level,
                                                          bool need_precise_constants,
                                                          uint32_t api_level,
+                                                         bool aot_mode,
                                                          std::string* hard_failure_msg) {
   if (VLOG_IS_ON(verifier_debug)) {
     return VerifyMethod<true>(self,
+                              class_linker,
+                              arena_pool,
                               method_idx,
                               dex_file,
                               dex_cache,
@@ -5142,13 +5127,17 @@
                               method,
                               method_access_flags,
                               callbacks,
+                              verifier_callback,
                               allow_soft_failures,
                               log_level,
                               need_precise_constants,
                               api_level,
+                              aot_mode,
                               hard_failure_msg);
   } else {
     return VerifyMethod<false>(self,
+                               class_linker,
+                               arena_pool,
                                method_idx,
                                dex_file,
                                dex_cache,
@@ -5158,16 +5147,33 @@
                                method,
                                method_access_flags,
                                callbacks,
+                               verifier_callback,
                                allow_soft_failures,
                                log_level,
                                need_precise_constants,
                                api_level,
+                               aot_mode,
                                hard_failure_msg);
   }
 }
 
+// Return whether the runtime knows how to execute a method without needing to
+// re-verify it at runtime (and therefore save on first use of the class). We
+// currently only support it for access checks, where the runtime will mark the
+// methods as needing access checks and have the interpreter execute with them.
+// The AOT/JIT compiled code is not affected.
+static inline bool CanRuntimeHandleVerificationFailure(uint32_t encountered_failure_types) {
+  constexpr uint32_t unresolved_mask =
+      verifier::VerifyError::VERIFY_ERROR_ACCESS_CLASS |
+      verifier::VerifyError::VERIFY_ERROR_ACCESS_FIELD |
+      verifier::VerifyError::VERIFY_ERROR_ACCESS_METHOD;
+  return (encountered_failure_types & (~unresolved_mask)) == 0;
+}
+
 template <bool kVerifierDebug>
 MethodVerifier::FailureData MethodVerifier::VerifyMethod(Thread* self,
+                                                         ClassLinker* class_linker,
+                                                         ArenaPool* arena_pool,
                                                          uint32_t method_idx,
                                                          const DexFile* dex_file,
                                                          Handle<mirror::DexCache> dex_cache,
@@ -5177,39 +5183,46 @@
                                                          ArtMethod* method,
                                                          uint32_t method_access_flags,
                                                          CompilerCallbacks* callbacks,
+                                                         VerifierCallback* verifier_callback,
                                                          bool allow_soft_failures,
                                                          HardFailLogMode log_level,
                                                          bool need_precise_constants,
                                                          uint32_t api_level,
+                                                         bool aot_mode,
                                                          std::string* hard_failure_msg) {
   MethodVerifier::FailureData result;
   uint64_t start_ns = kTimeVerifyMethod ? NanoTime() : 0;
 
   impl::MethodVerifier<kVerifierDebug> verifier(self,
+                                                class_linker,
+                                                arena_pool,
                                                 dex_file,
+                                                code_item,
+                                                method_idx,
+                                                /* can_load_classes= */ true,
+                                                /* allow_thread_suspension= */ true,
+                                                allow_soft_failures,
+                                                aot_mode,
                                                 dex_cache,
                                                 class_loader,
                                                 class_def,
-                                                code_item,
-                                                method_idx,
                                                 method,
                                                 method_access_flags,
-                                                /* can_load_classes= */ true,
-                                                allow_soft_failures,
                                                 need_precise_constants,
                                                 /* verify to dump */ false,
-                                                /* allow_thread_suspension= */ true,
+                                                /* fill_register_lines= */ false,
                                                 api_level);
   if (verifier.Verify()) {
     // Verification completed, however failures may be pending that didn't cause the verification
     // to hard fail.
-    CHECK(!verifier.have_pending_hard_failure_);
+    CHECK(!verifier.flags_.have_pending_hard_failure_);
 
     if (code_item != nullptr && callbacks != nullptr) {
       // Let the interested party know that the method was verified.
       callbacks->MethodVerified(&verifier);
     }
 
+    bool set_dont_compile = false;
     if (verifier.failures_.size() != 0) {
       if (VLOG_IS_ON(verifier)) {
         verifier.DumpFailures(VLOG_STREAM(verifier) << "Soft verification failures in "
@@ -5219,17 +5232,20 @@
         LOG(INFO) << verifier.info_messages_.str();
         verifier.Dump(LOG_STREAM(INFO));
       }
-      result.kind = FailureKind::kSoftFailure;
+      if (CanRuntimeHandleVerificationFailure(verifier.encountered_failure_types_)) {
+        result.kind = FailureKind::kAccessChecksFailure;
+      } else {
+        result.kind = FailureKind::kSoftFailure;
+      }
       if (method != nullptr &&
           !CanCompilerHandleVerificationFailure(verifier.encountered_failure_types_)) {
-        method->SetDontCompile();
+        set_dont_compile = true;
       }
     }
     if (method != nullptr) {
       if (verifier.HasInstructionThatWillThrow()) {
-        method->SetDontCompile();
-        if (Runtime::Current()->IsAotCompiler() &&
-            (callbacks != nullptr) && !callbacks->IsBootImage()) {
+        set_dont_compile = true;
+        if (aot_mode && (callbacks != nullptr) && !callbacks->IsBootImage()) {
           // When compiling apps, make HasInstructionThatWillThrow a soft error to trigger
           // re-verification at runtime.
           // The dead code after the throw is not verified and might be invalid. This may cause
@@ -5242,20 +5258,23 @@
           result.kind = FailureKind::kSoftFailure;
         }
       }
+      bool must_count_locks = false;
       if ((verifier.encountered_failure_types_ & VerifyError::VERIFY_ERROR_LOCKING) != 0) {
-        method->SetMustCountLocks();
+        must_count_locks = true;
       }
+      verifier_callback->SetDontCompile(method, set_dont_compile);
+      verifier_callback->SetMustCountLocks(method, must_count_locks);
     }
   } else {
     // Bad method data.
     CHECK_NE(verifier.failures_.size(), 0U);
 
-    if (UNLIKELY(verifier.have_pending_experimental_failure_)) {
+    if (UNLIKELY(verifier.flags_.have_pending_experimental_failure_)) {
       // Failed due to being forced into interpreter. This is ok because
       // we just want to skip verification.
       result.kind = FailureKind::kSoftFailure;
     } else {
-      CHECK(verifier.have_pending_hard_failure_);
+      CHECK(verifier.flags_.have_pending_hard_failure_);
       if (VLOG_IS_ON(verifier)) {
         log_level = std::max(HardFailLogMode::kLogVerbose, log_level);
       }
@@ -5309,15 +5328,61 @@
   if (kTimeVerifyMethod) {
     uint64_t duration_ns = NanoTime() - start_ns;
     if (duration_ns > MsToNs(Runtime::Current()->GetVerifierLoggingThresholdMs())) {
+      double bytecodes_per_second =
+          verifier.code_item_accessor_.InsnsSizeInCodeUnits() / (duration_ns * 1e-9);
       LOG(WARNING) << "Verification of " << dex_file->PrettyMethod(method_idx)
                    << " took " << PrettyDuration(duration_ns)
-                   << (impl::IsLargeMethod(verifier.CodeItem()) ? " (large method)" : "");
+                   << (impl::IsLargeMethod(verifier.CodeItem()) ? " (large method)" : "")
+                   << " (" << StringPrintf("%.2f", bytecodes_per_second) << " bytecodes/s)"
+                   << " (" << verifier.allocator_.ApproximatePeakBytes()
+                   << "B approximate peak alloc)";
     }
   }
   result.types = verifier.encountered_failure_types_;
   return result;
 }
 
+MethodVerifier* MethodVerifier::CalculateVerificationInfo(
+      Thread* self,
+      ArtMethod* method,
+      Handle<mirror::DexCache> dex_cache,
+      Handle<mirror::ClassLoader> class_loader) {
+  std::unique_ptr<impl::MethodVerifier<false>> verifier(
+      new impl::MethodVerifier<false>(self,
+                                      Runtime::Current()->GetClassLinker(),
+                                      Runtime::Current()->GetArenaPool(),
+                                      method->GetDexFile(),
+                                      method->GetCodeItem(),
+                                      method->GetDexMethodIndex(),
+                                      /* can_load_classes= */ false,
+                                      /* allow_thread_suspension= */ false,
+                                      /* allow_soft_failures= */ true,
+                                      Runtime::Current()->IsAotCompiler(),
+                                      dex_cache,
+                                      class_loader,
+                                      *method->GetDeclaringClass()->GetClassDef(),
+                                      method,
+                                      method->GetAccessFlags(),
+                                      /* need_precise_constants= */ true,
+                                      /* verify_to_dump= */ false,
+                                      /* fill_register_lines= */ true,
+                                      // Just use the verifier at the current skd-version.
+                                      // This might affect what soft-verifier errors are reported.
+                                      // Callers can then filter out relevant errors if needed.
+                                      Runtime::Current()->GetTargetSdkVersion()));
+  verifier->Verify();
+  if (VLOG_IS_ON(verifier)) {
+    verifier->DumpFailures(VLOG_STREAM(verifier));
+    VLOG(verifier) << verifier->info_messages_.str();
+    verifier->Dump(VLOG_STREAM(verifier));
+  }
+  if (verifier->flags_.have_pending_hard_failure_) {
+    return nullptr;
+  } else {
+    return verifier.release();
+  }
+}
+
 MethodVerifier* MethodVerifier::VerifyMethodAndDump(Thread* self,
                                                     VariableIndentationOutputStream* vios,
                                                     uint32_t dex_method_idx,
@@ -5331,26 +5396,30 @@
                                                     uint32_t api_level) {
   impl::MethodVerifier<false>* verifier = new impl::MethodVerifier<false>(
       self,
+      Runtime::Current()->GetClassLinker(),
+      Runtime::Current()->GetArenaPool(),
       dex_file,
+      code_item,
+      dex_method_idx,
+      /* can_load_classes= */ true,
+      /* allow_thread_suspension= */ true,
+      /* allow_soft_failures= */ true,
+      Runtime::Current()->IsAotCompiler(),
       dex_cache,
       class_loader,
       class_def,
-      code_item,
-      dex_method_idx,
       method,
       method_access_flags,
-      /* can_load_classes= */ true,
-      /* allow_soft_failures= */ true,
       /* need_precise_constants= */ true,
       /* verify_to_dump= */ true,
-      /* allow_thread_suspension= */ true,
+      /* fill_register_lines= */ false,
       api_level);
   verifier->Verify();
   verifier->DumpFailures(vios->Stream());
   vios->Stream() << verifier->info_messages_.str();
   // Only dump and return if no hard failures. Otherwise the verifier may be not fully initialized
   // and querying any info is dangerous/can abort.
-  if (verifier->have_pending_hard_failure_) {
+  if (verifier->flags_.have_pending_hard_failure_) {
     delete verifier;
     return nullptr;
   } else {
@@ -5368,19 +5437,23 @@
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(m->GetDexCache()));
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(m->GetClassLoader()));
   impl::MethodVerifier<false> verifier(hs.Self(),
+                                       Runtime::Current()->GetClassLinker(),
+                                       Runtime::Current()->GetArenaPool(),
                                        m->GetDexFile(),
+                                       m->GetCodeItem(),
+                                       m->GetDexMethodIndex(),
+                                       /* can_load_classes= */ false,
+                                       /* allow_thread_suspension= */ false,
+                                       /* allow_soft_failures= */ true,
+                                       Runtime::Current()->IsAotCompiler(),
                                        dex_cache,
                                        class_loader,
                                        m->GetClassDef(),
-                                       m->GetCodeItem(),
-                                       m->GetDexMethodIndex(),
                                        m,
                                        m->GetAccessFlags(),
-                                       /* can_load_classes= */ false,
-                                       /* allow_soft_failures= */ true,
                                        /* need_precise_constants= */ false,
                                        /* verify_to_dump= */ false,
-                                       /* allow_thread_suspension= */ false,
+                                       /* fill_register_lines= */ false,
                                        api_level);
   verifier.interesting_dex_pc_ = dex_pc;
   verifier.monitor_enter_dex_pcs_ = monitor_enter_dex_pcs;
@@ -5403,24 +5476,28 @@
                                                bool allow_thread_suspension,
                                                uint32_t api_level) {
   return new impl::MethodVerifier<false>(self,
+                                         Runtime::Current()->GetClassLinker(),
+                                         Runtime::Current()->GetArenaPool(),
                                          dex_file,
+                                         code_item,
+                                         method_idx,
+                                         can_load_classes,
+                                         allow_thread_suspension,
+                                         allow_soft_failures,
+                                         Runtime::Current()->IsAotCompiler(),
                                          dex_cache,
                                          class_loader,
                                          class_def,
-                                         code_item,
-                                         method_idx,
                                          method,
                                          access_flags,
-                                         can_load_classes,
-                                         allow_soft_failures,
                                          need_precise_constants,
                                          verify_to_dump,
-                                         allow_thread_suspension,
+                                         /* fill_register_lines= */ false,
                                          api_level);
 }
 
-void MethodVerifier::Init() {
-  art::verifier::RegTypeCache::Init();
+void MethodVerifier::Init(ClassLinker* class_linker) {
+  art::verifier::RegTypeCache::Init(class_linker);
 }
 
 void MethodVerifier::Shutdown() {
@@ -5435,66 +5512,85 @@
   reg_types_.VisitRoots(visitor, root_info);
 }
 
-std::ostream& MethodVerifier::Fail(VerifyError error) {
+std::ostream& MethodVerifier::Fail(VerifyError error, bool pending_exc) {
   // Mark the error type as encountered.
   encountered_failure_types_ |= static_cast<uint32_t>(error);
 
-  switch (error) {
-    case VERIFY_ERROR_NO_CLASS:
-    case VERIFY_ERROR_NO_FIELD:
-    case VERIFY_ERROR_NO_METHOD:
-    case VERIFY_ERROR_ACCESS_CLASS:
-    case VERIFY_ERROR_ACCESS_FIELD:
-    case VERIFY_ERROR_ACCESS_METHOD:
-    case VERIFY_ERROR_INSTANTIATION:
-    case VERIFY_ERROR_CLASS_CHANGE:
-    case VERIFY_ERROR_FORCE_INTERPRETER:
-    case VERIFY_ERROR_LOCKING:
-      if (Runtime::Current()->IsAotCompiler() || !can_load_classes_) {
-        // If we're optimistically running verification at compile time, turn NO_xxx, ACCESS_xxx,
-        // class change and instantiation errors into soft verification errors so that we re-verify
-        // at runtime. We may fail to find or to agree on access because of not yet available class
-        // loaders, or class loaders that will differ at runtime. In these cases, we don't want to
-        // affect the soundness of the code being compiled. Instead, the generated code runs "slow
-        // paths" that dynamically perform the verification and cause the behavior to be that akin
-        // to an interpreter.
-        error = VERIFY_ERROR_BAD_CLASS_SOFT;
-      } else {
-        // If we fail again at runtime, mark that this instruction would throw and force this
-        // method to be executed using the interpreter with checks.
-        have_pending_runtime_throw_failure_ = true;
-
-        // We need to save the work_line if the instruction wasn't throwing before. Otherwise we'll
-        // try to merge garbage.
-        // Note: this assumes that Fail is called before we do any work_line modifications.
-        // Note: this can fail before we touch any instruction, for the signature of a method. So
-        //       add a check.
+  if (pending_exc) {
+    switch (error) {
+      case VERIFY_ERROR_NO_CLASS:
+      case VERIFY_ERROR_NO_FIELD:
+      case VERIFY_ERROR_NO_METHOD:
+      case VERIFY_ERROR_ACCESS_CLASS:
+      case VERIFY_ERROR_ACCESS_FIELD:
+      case VERIFY_ERROR_ACCESS_METHOD:
+      case VERIFY_ERROR_INSTANTIATION:
+      case VERIFY_ERROR_CLASS_CHANGE:
+      case VERIFY_ERROR_FORCE_INTERPRETER:
+      case VERIFY_ERROR_LOCKING:
+        if (IsAotMode() || !can_load_classes_) {
+          if (error != VERIFY_ERROR_ACCESS_CLASS &&
+              error != VERIFY_ERROR_ACCESS_FIELD &&
+              error != VERIFY_ERROR_ACCESS_METHOD) {
+            // If we're optimistically running verification at compile time, turn NO_xxx,
+            // class change and instantiation errors into soft verification errors so that we
+            // re-verify at runtime. We may fail to find or to agree on access because of not yet
+            // available class loaders, or class loaders that will differ at runtime. In these
+            // cases, we don't want to affect the soundness of the code being compiled. Instead, the
+            // generated code runs "slow paths" that dynamically perform the verification and cause
+            // the behavior to be that akin to an interpreter.
+            error = VERIFY_ERROR_BAD_CLASS_SOFT;
+          }
+        } else {
+          // If we fail again at runtime, mark that this instruction would throw and force this
+          // method to be executed using the interpreter with checks.
+          flags_.have_pending_runtime_throw_failure_ = true;
+        }
+        // How to handle runtime failures for instructions that are not flagged kThrow.
+        //
+        // The verifier may fail before we touch any instruction, for the signature of a method. So
+        // add a check.
         if (work_insn_idx_ < dex::kDexNoIndex) {
           const Instruction& inst = code_item_accessor_.InstructionAt(work_insn_idx_);
-          int opcode_flags = Instruction::FlagsOf(inst.Opcode());
-
-          if ((opcode_flags & Instruction::kThrow) == 0 &&
+          Instruction::Code opcode = inst.Opcode();
+          if ((Instruction::FlagsOf(opcode) & Instruction::kThrow) == 0 &&
+              !impl::IsCompatThrow(opcode) &&
               GetInstructionFlags(work_insn_idx_).IsInTry()) {
+            if (Runtime::Current()->IsVerifierMissingKThrowFatal()) {
+              LOG(FATAL) << "Unexpected throw: " << std::hex << work_insn_idx_ << " " << opcode;
+              UNREACHABLE();
+            }
+            // We need to save the work_line if the instruction wasn't throwing before. Otherwise
+            // we'll try to merge garbage.
+            // Note: this assumes that Fail is called before we do any work_line modifications.
             saved_line_->CopyFromLine(work_line_.get());
           }
         }
-      }
-      break;
+        break;
 
-      // Indication that verification should be retried at runtime.
-    case VERIFY_ERROR_BAD_CLASS_SOFT:
-      if (!allow_soft_failures_) {
-        have_pending_hard_failure_ = true;
-      }
-      break;
+        // Indication that verification should be retried at runtime.
+      case VERIFY_ERROR_BAD_CLASS_SOFT:
+        if (!allow_soft_failures_) {
+          flags_.have_pending_hard_failure_ = true;
+        }
+        break;
 
-      // Hard verification failures at compile time will still fail at runtime, so the class is
-      // marked as rejected to prevent it from being compiled.
-    case VERIFY_ERROR_BAD_CLASS_HARD: {
-      have_pending_hard_failure_ = true;
-      break;
+        // Hard verification failures at compile time will still fail at runtime, so the class is
+        // marked as rejected to prevent it from being compiled.
+      case VERIFY_ERROR_BAD_CLASS_HARD: {
+        flags_.have_pending_hard_failure_ = true;
+        break;
+      }
+
+      case VERIFY_ERROR_SKIP_COMPILER:
+        // Nothing to do, just remember the failure type.
+        break;
     }
+  } else if (kIsDebugBuild) {
+    CHECK_NE(error, VERIFY_ERROR_BAD_CLASS_SOFT);
+    CHECK_NE(error, VERIFY_ERROR_BAD_CLASS_HARD);
   }
+
   failures_.push_back(error);
   std::string location(StringPrintf("%s: [0x%X] ", dex_file_->PrettyMethod(dex_method_idx_).c_str(),
                                     work_insn_idx_));
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 0af09c3..83dafd3 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -72,6 +72,16 @@
   kTrackRegsAll,
 };
 
+// A class used by the verifier to tell users about what options need to be set for given methods.
+class VerifierCallback {
+ public:
+  virtual ~VerifierCallback() {}
+  virtual void SetDontCompile(ArtMethod* method, bool value)
+      REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+  virtual void SetMustCountLocks(ArtMethod* method, bool value)
+      REQUIRES_SHARED(Locks::mutator_lock_) = 0;
+};
+
 // A mapping from a dex pc to the register line statuses as they are immediately prior to the
 // execution of that instruction.
 class PcToRegisterLineTable {
@@ -118,6 +128,16 @@
                                              uint32_t api_level)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  // Calculates the verification information for every instruction of the given method. The given
+  // dex-cache and class-loader will be used for lookups. No classes will be loaded. If verification
+  // fails hard nullptr will be returned. This should only be used if one needs to examine what the
+  // verifier believes about the registers of a given method.
+  static MethodVerifier* CalculateVerificationInfo(Thread* self,
+                                                   ArtMethod* method,
+                                                   Handle<mirror::DexCache> dex_cache,
+                                                   Handle<mirror::ClassLoader> class_loader)
+      REQUIRES_SHARED(Locks::mutator_lock_);
+
   const DexFile& GetDexFile() const {
     DCHECK(dex_file_ != nullptr);
     return *dex_file_;
@@ -128,7 +148,7 @@
   }
 
   // Log a verification failure.
-  std::ostream& Fail(VerifyError error);
+  std::ostream& Fail(VerifyError error, bool pending_exc = true);
 
   // Log for verification information.
   ScopedNewLine LogVerifyInfo();
@@ -153,7 +173,7 @@
                                uint32_t api_level)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
-  static void Init() REQUIRES_SHARED(Locks::mutator_lock_);
+  static void Init(ClassLinker* class_linker) REQUIRES_SHARED(Locks::mutator_lock_);
   static void Shutdown();
 
   virtual ~MethodVerifier();
@@ -174,7 +194,7 @@
   bool HasCheckCasts() const;
   bool HasFailures() const;
   bool HasInstructionThatWillThrow() const {
-    return have_any_pending_runtime_throw_failure_;
+    return flags_.have_any_pending_runtime_throw_failure_;
   }
 
   virtual const RegType& ResolveCheckedClass(dex::TypeIndex class_idx)
@@ -184,14 +204,25 @@
     return encountered_failure_types_;
   }
 
+  ClassLinker* GetClassLinker() {
+    return class_linker_;
+  }
+
+  bool IsAotMode() const {
+    return flags_.aot_mode_;
+  }
+
  protected:
   MethodVerifier(Thread* self,
+                 ClassLinker* class_linker,
+                 ArenaPool* arena_pool,
                  const DexFile* dex_file,
                  const dex::CodeItem* code_item,
                  uint32_t dex_method_idx,
                  bool can_load_classes,
                  bool allow_thread_suspension,
-                 bool allow_soft_failures)
+                 bool allow_soft_failures,
+                 bool aot_mode)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   // Verification result for method(s). Includes a (maximum) failure kind, and (the union of)
@@ -216,6 +247,8 @@
    *      for code flow problems.
    */
   static FailureData VerifyMethod(Thread* self,
+                                  ClassLinker* class_linker,
+                                  ArenaPool* arena_pool,
                                   uint32_t method_idx,
                                   const DexFile* dex_file,
                                   Handle<mirror::DexCache> dex_cache,
@@ -225,15 +258,19 @@
                                   ArtMethod* method,
                                   uint32_t method_access_flags,
                                   CompilerCallbacks* callbacks,
+                                  VerifierCallback* verifier_callback,
                                   bool allow_soft_failures,
                                   HardFailLogMode log_level,
                                   bool need_precise_constants,
                                   uint32_t api_level,
+                                  bool aot_mode,
                                   std::string* hard_failure_msg)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   template <bool kVerifierDebug>
   static FailureData VerifyMethod(Thread* self,
+                                  ClassLinker* class_linker,
+                                  ArenaPool* arena_pool,
                                   uint32_t method_idx,
                                   const DexFile* dex_file,
                                   Handle<mirror::DexCache> dex_cache,
@@ -243,10 +280,12 @@
                                   ArtMethod* method,
                                   uint32_t method_access_flags,
                                   CompilerCallbacks* callbacks,
+                                  VerifierCallback* verifier_callback,
                                   bool allow_soft_failures,
                                   HardFailLogMode log_level,
                                   bool need_precise_constants,
                                   uint32_t api_level,
+                                  bool aot_mode,
                                   std::string* hard_failure_msg)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
@@ -305,19 +344,27 @@
   std::vector<VerifyError> failures_;
   // Error messages associated with failures.
   std::vector<std::ostringstream*> failure_messages_;
-  // Is there a pending hard failure?
-  bool have_pending_hard_failure_;
-  // Is there a pending runtime throw failure? A runtime throw failure is when an instruction
-  // would fail at runtime throwing an exception. Such an instruction causes the following code
-  // to be unreachable. This is set by Fail and used to ensure we don't process unreachable
-  // instructions that would hard fail the verification.
-  // Note: this flag is reset after processing each instruction.
-  bool have_pending_runtime_throw_failure_;
-  // Is there a pending experimental failure?
-  bool have_pending_experimental_failure_;
+  struct {
+    // Is there a pending hard failure?
+    bool have_pending_hard_failure_ : 1;
 
-  // A version of the above that is not reset and thus captures if there were *any* throw failures.
-  bool have_any_pending_runtime_throw_failure_;
+    // Is there a pending runtime throw failure? A runtime throw failure is when an instruction
+    // would fail at runtime throwing an exception. Such an instruction causes the following code
+    // to be unreachable. This is set by Fail and used to ensure we don't process unreachable
+    // instructions that would hard fail the verification.
+    // Note: this flag is reset after processing each instruction.
+    bool have_pending_runtime_throw_failure_ : 1;
+
+    // Is there a pending experimental failure?
+    bool have_pending_experimental_failure_ : 1;
+
+    // A version of the above that is not reset and thus captures if there were *any* throw
+    // failures.
+    bool have_any_pending_runtime_throw_failure_ : 1;
+
+    // Verify in AoT mode?
+    bool aot_mode_ : 1;
+  } flags_;
 
   // Info message log use primarily for verifier diagnostics.
   std::ostringstream info_messages_;
@@ -336,6 +383,9 @@
   // check-cast.
   bool has_check_casts_;
 
+  // Classlinker to use when resolving.
+  ClassLinker* class_linker_;
+
   // Link, for the method verifier root linked list.
   MethodVerifier* link_;
 
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 150d35c..2d17030 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -602,6 +602,148 @@
   return a.IsConstantTypes() ? (b.IsZero() ? a : b) : a;
 }
 
+
+namespace {
+
+ObjPtr<mirror::Class> ArrayClassJoin(ObjPtr<mirror::Class> s,
+                                     ObjPtr<mirror::Class> t,
+                                     ClassLinker* class_linker)
+    REQUIRES_SHARED(Locks::mutator_lock_);
+
+ObjPtr<mirror::Class> InterfaceClassJoin(ObjPtr<mirror::Class> s, ObjPtr<mirror::Class> t)
+    REQUIRES_SHARED(Locks::mutator_lock_);
+
+/*
+ * A basic Join operation on classes. For a pair of types S and T the Join, written S v T = J, is
+ * S <: J, T <: J and for-all U such that S <: U, T <: U then J <: U. That is J is the parent of
+ * S and T such that there isn't a parent of both S and T that isn't also the parent of J (ie J
+ * is the deepest (lowest upper bound) parent of S and T).
+ *
+ * This operation applies for regular classes and arrays, however, for interface types there
+ * needn't be a partial ordering on the types. We could solve the problem of a lack of a partial
+ * order by introducing sets of types, however, the only operation permissible on an interface is
+ * invoke-interface. In the tradition of Java verifiers [1] we defer the verification of interface
+ * types until an invoke-interface call on the interface typed reference at runtime and allow
+ * the perversion of Object being assignable to an interface type (note, however, that we don't
+ * allow assignment of Object or Interface to any concrete class and are therefore type safe).
+ *
+ * Note: This may return null in case of internal errors, e.g., OOME when a new class would have
+ *       to be created but there is no heap space. The exception will stay pending, and it is
+ *       the job of the caller to handle it.
+ *
+ * [1] Java bytecode verification: algorithms and formalizations, Xavier Leroy
+ */
+ObjPtr<mirror::Class> ClassJoin(ObjPtr<mirror::Class> s,
+                                ObjPtr<mirror::Class> t,
+                                ClassLinker* class_linker)
+    REQUIRES_SHARED(Locks::mutator_lock_) {
+  DCHECK(!s->IsPrimitive()) << s->PrettyClass();
+  DCHECK(!t->IsPrimitive()) << t->PrettyClass();
+  if (s == t) {
+    return s;
+  } else if (s->IsAssignableFrom(t)) {
+    return s;
+  } else if (t->IsAssignableFrom(s)) {
+    return t;
+  } else if (s->IsArrayClass() && t->IsArrayClass()) {
+    return ArrayClassJoin(s, t, class_linker);
+  } else if (s->IsInterface() || t->IsInterface()) {
+    return InterfaceClassJoin(s, t);
+  } else {
+    size_t s_depth = s->Depth();
+    size_t t_depth = t->Depth();
+    // Get s and t to the same depth in the hierarchy
+    if (s_depth > t_depth) {
+      while (s_depth > t_depth) {
+        s = s->GetSuperClass();
+        s_depth--;
+      }
+    } else {
+      while (t_depth > s_depth) {
+        t = t->GetSuperClass();
+        t_depth--;
+      }
+    }
+    // Go up the hierarchy until we get to the common parent
+    while (s != t) {
+      s = s->GetSuperClass();
+      t = t->GetSuperClass();
+    }
+    return s;
+  }
+}
+
+ObjPtr<mirror::Class> ArrayClassJoin(ObjPtr<mirror::Class> s,
+                                     ObjPtr<mirror::Class> t,
+                                     ClassLinker* class_linker) {
+  ObjPtr<mirror::Class> s_ct = s->GetComponentType();
+  ObjPtr<mirror::Class> t_ct = t->GetComponentType();
+  if (s_ct->IsPrimitive() || t_ct->IsPrimitive()) {
+    // Given the types aren't the same, if either array is of primitive types then the only
+    // common parent is java.lang.Object
+    ObjPtr<mirror::Class> result = s->GetSuperClass();  // short-cut to java.lang.Object
+    DCHECK(result->IsObjectClass());
+    return result;
+  }
+  Thread* self = Thread::Current();
+  ObjPtr<mirror::Class> common_elem = ClassJoin(s_ct, t_ct, class_linker);
+  if (UNLIKELY(common_elem == nullptr)) {
+    self->AssertPendingException();
+    return nullptr;
+  }
+  // Note: The following lookup invalidates existing ObjPtr<>s.
+  ObjPtr<mirror::Class> array_class = class_linker->FindArrayClass(self, common_elem);
+  if (UNLIKELY(array_class == nullptr)) {
+    self->AssertPendingException();
+    return nullptr;
+  }
+  return array_class;
+}
+
+ObjPtr<mirror::Class> InterfaceClassJoin(ObjPtr<mirror::Class> s, ObjPtr<mirror::Class> t) {
+  // This is expensive, as we do not have good data structures to do this even halfway
+  // efficiently.
+  //
+  // We're not following JVMS for interface verification (not everything is assignable to an
+  // interface, we trade this for IMT dispatch). We also don't have set types to make up for
+  // it. So we choose one arbitrary common ancestor interface by walking the interface tables
+  // backwards.
+  //
+  // For comparison, runtimes following the JVMS will punt all interface type checking to
+  // runtime.
+  ObjPtr<mirror::IfTable> s_if = s->GetIfTable();
+  int32_t s_if_count = s->GetIfTableCount();
+  ObjPtr<mirror::IfTable> t_if = t->GetIfTable();
+  int32_t t_if_count = t->GetIfTableCount();
+
+  // Note: we'll be using index == count to stand for the argument itself.
+  for (int32_t s_it = s_if_count; s_it >= 0; --s_it) {
+    ObjPtr<mirror::Class> s_cl = s_it == s_if_count ? s : s_if->GetInterface(s_it);
+    if (!s_cl->IsInterface()) {
+      continue;
+    }
+
+    for (int32_t t_it = t_if_count; t_it >= 0; --t_it) {
+      ObjPtr<mirror::Class> t_cl = t_it == t_if_count ? t : t_if->GetInterface(t_it);
+      if (!t_cl->IsInterface()) {
+        continue;
+      }
+
+      if (s_cl == t_cl) {
+        // Found something arbitrary in common.
+        return s_cl;
+      }
+    }
+  }
+
+  // Return java.lang.Object.
+  ObjPtr<mirror::Class> obj_class = s->IsInterface() ? s->GetSuperClass() : t->GetSuperClass();
+  DCHECK(obj_class->IsObjectClass());
+  return obj_class;
+}
+
+}  // namespace
+
 const RegType& RegType::Merge(const RegType& incoming_type,
                               RegTypeCache* reg_types,
                               MethodVerifier* verifier) const {
@@ -730,7 +872,9 @@
       // Do not cache the classes as ClassJoin() can suspend and invalidate ObjPtr<>s.
       DCHECK(GetClass() != nullptr && !GetClass()->IsPrimitive());
       DCHECK(incoming_type.GetClass() != nullptr && !incoming_type.GetClass()->IsPrimitive());
-      ObjPtr<mirror::Class> join_class = ClassJoin(GetClass(), incoming_type.GetClass());
+      ObjPtr<mirror::Class> join_class = ClassJoin(GetClass(),
+                                                   incoming_type.GetClass(),
+                                                   reg_types->GetClassLinker());
       if (UNLIKELY(join_class == nullptr)) {
         // Internal error joining the classes (e.g., OOME). Report an unresolved reference type.
         // We cannot report an unresolved merge type, as that will attempt to merge the resolved
@@ -743,7 +887,7 @@
 
         // When compiling on the host, we rather want to abort to ensure determinism for preopting.
         // (In that case, it is likely a misconfiguration of dex2oat.)
-        if (!kIsTargetBuild && Runtime::Current()->IsAotCompiler()) {
+        if (!kIsTargetBuild && (verifier != nullptr && verifier->IsAotMode())) {
           LOG(FATAL) << "Could not create class join of "
                      << GetClass()->PrettyClass()
                      << " & "
@@ -783,64 +927,6 @@
   }
 }
 
-// See comment in reg_type.h
-ObjPtr<mirror::Class> RegType::ClassJoin(ObjPtr<mirror::Class> s, ObjPtr<mirror::Class> t) {
-  DCHECK(!s->IsPrimitive()) << s->PrettyClass();
-  DCHECK(!t->IsPrimitive()) << t->PrettyClass();
-  if (s == t) {
-    return s;
-  } else if (s->IsAssignableFrom(t)) {
-    return s;
-  } else if (t->IsAssignableFrom(s)) {
-    return t;
-  } else if (s->IsArrayClass() && t->IsArrayClass()) {
-    ObjPtr<mirror::Class> s_ct = s->GetComponentType();
-    ObjPtr<mirror::Class> t_ct = t->GetComponentType();
-    if (s_ct->IsPrimitive() || t_ct->IsPrimitive()) {
-      // Given the types aren't the same, if either array is of primitive types then the only
-      // common parent is java.lang.Object
-      ObjPtr<mirror::Class> result = s->GetSuperClass();  // short-cut to java.lang.Object
-      DCHECK(result->IsObjectClass());
-      return result;
-    }
-    Thread* self = Thread::Current();
-    ObjPtr<mirror::Class> common_elem = ClassJoin(s_ct, t_ct);
-    if (UNLIKELY(common_elem == nullptr)) {
-      self->AssertPendingException();
-      return nullptr;
-    }
-    // Note: The following lookup invalidates existing ObjPtr<>s.
-    ObjPtr<mirror::Class> array_class =
-        Runtime::Current()->GetClassLinker()->FindArrayClass(self, common_elem);
-    if (UNLIKELY(array_class == nullptr)) {
-      self->AssertPendingException();
-      return nullptr;
-    }
-    return array_class;
-  } else {
-    size_t s_depth = s->Depth();
-    size_t t_depth = t->Depth();
-    // Get s and t to the same depth in the hierarchy
-    if (s_depth > t_depth) {
-      while (s_depth > t_depth) {
-        s = s->GetSuperClass();
-        s_depth--;
-      }
-    } else {
-      while (t_depth > s_depth) {
-        t = t->GetSuperClass();
-        t_depth--;
-      }
-    }
-    // Go up the hierarchy until we get to the common parent
-    while (s != t) {
-      s = s->GetSuperClass();
-      t = t->GetSuperClass();
-    }
-    return s;
-  }
-}
-
 void RegType::CheckInvariants() const {
   if (IsConstant() || IsConstantLo() || IsConstantHi()) {
     CHECK(descriptor_.empty()) << *this;
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 56073db..f634645 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -345,28 +345,6 @@
  private:
   virtual void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
 
-  /*
-   * A basic Join operation on classes. For a pair of types S and T the Join, written S v T = J, is
-   * S <: J, T <: J and for-all U such that S <: U, T <: U then J <: U. That is J is the parent of
-   * S and T such that there isn't a parent of both S and T that isn't also the parent of J (ie J
-   * is the deepest (lowest upper bound) parent of S and T).
-   *
-   * This operation applies for regular classes and arrays, however, for interface types there
-   * needn't be a partial ordering on the types. We could solve the problem of a lack of a partial
-   * order by introducing sets of types, however, the only operation permissible on an interface is
-   * invoke-interface. In the tradition of Java verifiers [1] we defer the verification of interface
-   * types until an invoke-interface call on the interface typed reference at runtime and allow
-   * the perversion of Object being assignable to an interface type (note, however, that we don't
-   * allow assignment of Object or Interface to any concrete class and are therefore type safe).
-   *
-   * Note: This may return null in case of internal errors, e.g., OOME when a new class would have
-   *       to be created but there is no heap space. The exception will stay pending, and it is
-   *       the job of the caller to handle it.
-   *
-   * [1] Java bytecode verification: algorithms and formalizations, Xavier Leroy
-   */
-  static ObjPtr<mirror::Class> ClassJoin(ObjPtr<mirror::Class> s, ObjPtr<mirror::Class> t)
-      REQUIRES_SHARED(Locks::mutator_lock_);
 
   static bool AssignableFrom(const RegType& lhs,
                              const RegType& rhs,
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 1553017..2edb0f1 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -39,6 +39,12 @@
 const PreciseConstType* RegTypeCache::small_precise_constants_[kMaxSmallConstant -
                                                                kMinSmallConstant + 1];
 
+namespace {
+
+ClassLinker* gInitClassLinker = nullptr;
+
+}  // namespace
+
 ALWAYS_INLINE static inline bool MatchingPrecisionForClass(const RegType* entry, bool precise)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   if (entry->IsPreciseReference() == precise) {
@@ -153,15 +159,14 @@
                                                  ObjPtr<mirror::ClassLoader> loader) {
   // Class was not found, must create new type.
   // Try resolving class
-  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   Thread* self = Thread::Current();
   StackHandleScope<1> hs(self);
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(loader));
   ObjPtr<mirror::Class> klass = nullptr;
   if (can_load_classes_) {
-    klass = class_linker->FindClass(self, descriptor, class_loader);
+    klass = class_linker_->FindClass(self, descriptor, class_loader);
   } else {
-    klass = class_linker->LookupClass(self, descriptor, loader);
+    klass = class_linker_->LookupClass(self, descriptor, loader);
     if (klass != nullptr && !klass->IsResolved()) {
       // We found the class but without it being loaded its not safe for use.
       klass = nullptr;
@@ -277,11 +282,16 @@
   return *reg_type;
 }
 
-RegTypeCache::RegTypeCache(bool can_load_classes, ScopedArenaAllocator& allocator, bool can_suspend)
+RegTypeCache::RegTypeCache(ClassLinker* class_linker,
+                           bool can_load_classes,
+                           ScopedArenaAllocator& allocator,
+                           bool can_suspend)
     : entries_(allocator.Adapter(kArenaAllocVerifier)),
       klass_entries_(allocator.Adapter(kArenaAllocVerifier)),
-      can_load_classes_(can_load_classes),
-      allocator_(allocator) {
+      allocator_(allocator),
+      class_linker_(class_linker),
+      can_load_classes_(can_load_classes) {
+  DCHECK_EQ(class_linker, gInitClassLinker);
   DCHECK(can_suspend || !can_load_classes) << "Cannot load classes if suspension is disabled!";
   if (kIsDebugBuild && can_suspend) {
     Thread::Current()->AssertThreadSuspensionIsAllowable(gAborting == 0);
@@ -337,7 +347,9 @@
 };
 }  // namespace
 
-void RegTypeCache::CreatePrimitiveAndSmallConstantTypes() {
+void RegTypeCache::CreatePrimitiveAndSmallConstantTypes(ClassLinker* class_linker) {
+  gInitClassLinker = class_linker;
+
   // Note: this must have the same order as FillPrimitiveAndSmallConstantTypes.
 
   // It is acceptable to pass on the const char* in type to CreateInstance, as all calls below are
@@ -349,8 +361,7 @@
     // Try loading the class from linker.
     DCHECK(type.descriptor != nullptr);
     if (strlen(type.descriptor) > 0) {
-      klass = art::Runtime::Current()->GetClassLinker()->FindSystemClass(Thread::Current(),
-                                                                         type.descriptor);
+      klass = class_linker->FindSystemClass(Thread::Current(), type.descriptor);
       DCHECK(klass != nullptr);
     }
     const Type* entry = Type::CreateInstance(klass,
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index a9a8116..a6d226a 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -28,10 +28,13 @@
 #include "gc_root.h"
 
 namespace art {
+
 namespace mirror {
 class Class;
 class ClassLoader;
 }  // namespace mirror
+
+class ClassLinker;
 class ScopedArenaAllocator;
 
 namespace verifier {
@@ -62,12 +65,15 @@
 
 class RegTypeCache {
  public:
-  RegTypeCache(bool can_load_classes, ScopedArenaAllocator& allocator, bool can_suspend = true);
+  RegTypeCache(ClassLinker* class_linker,
+               bool can_load_classes,
+               ScopedArenaAllocator& allocator,
+               bool can_suspend = true);
   ~RegTypeCache();
-  static void Init() REQUIRES_SHARED(Locks::mutator_lock_) {
+  static void Init(ClassLinker* class_linker) REQUIRES_SHARED(Locks::mutator_lock_) {
     if (!RegTypeCache::primitive_initialized_) {
       CHECK_EQ(RegTypeCache::primitive_count_, 0);
-      CreatePrimitiveAndSmallConstantTypes();
+      CreatePrimitiveAndSmallConstantTypes(class_linker);
       CHECK_EQ(RegTypeCache::primitive_count_, kNumPrimitivesAndSmallConstants);
       RegTypeCache::primitive_initialized_ = true;
     }
@@ -160,6 +166,10 @@
   static void VisitStaticRoots(RootVisitor* visitor)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
+  ClassLinker* GetClassLinker() {
+    return class_linker_;
+  }
+
  private:
   void FillPrimitiveAndSmallConstantTypes() REQUIRES_SHARED(Locks::mutator_lock_);
   ObjPtr<mirror::Class> ResolveClass(const char* descriptor, ObjPtr<mirror::ClassLoader> loader)
@@ -177,7 +187,8 @@
   // verifier and return a string view.
   std::string_view AddString(const std::string_view& str);
 
-  static void CreatePrimitiveAndSmallConstantTypes() REQUIRES_SHARED(Locks::mutator_lock_);
+  static void CreatePrimitiveAndSmallConstantTypes(ClassLinker* class_linker)
+      REQUIRES_SHARED(Locks::mutator_lock_);
 
   // A quick look up for popular small constants.
   static constexpr int32_t kMinSmallConstant = -1;
@@ -200,12 +211,14 @@
   // Fast lookup for quickly finding entries that have a matching class.
   ScopedArenaVector<std::pair<GcRoot<mirror::Class>, const RegType*>> klass_entries_;
 
-  // Whether or not we're allowed to load classes.
-  const bool can_load_classes_;
-
   // Arena allocator.
   ScopedArenaAllocator& allocator_;
 
+  ClassLinker* class_linker_;
+
+  // Whether or not we're allowed to load classes.
+  const bool can_load_classes_;
+
   DISALLOW_COPY_AND_ASSIGN(RegTypeCache);
 };
 
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index 3224385..9cac5fb 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -38,7 +38,7 @@
   ArenaStack stack(Runtime::Current()->GetArenaPool());
   ScopedArenaAllocator allocator(&stack);
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true, allocator);
+  RegTypeCache cache(Runtime::Current()->GetClassLinker(), true, allocator);
   const RegType& ref_type_const_0 = cache.FromCat1Const(10, true);
   const RegType& ref_type_const_1 = cache.FromCat1Const(10, true);
   const RegType& ref_type_const_2 = cache.FromCat1Const(30, true);
@@ -62,7 +62,7 @@
   ArenaStack stack(Runtime::Current()->GetArenaPool());
   ScopedArenaAllocator allocator(&stack);
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true, allocator);
+  RegTypeCache cache(Runtime::Current()->GetClassLinker(), true, allocator);
   int64_t val = static_cast<int32_t>(1234);
   const RegType& precise_lo = cache.FromCat2ConstLo(static_cast<int32_t>(val), true);
   const RegType& precise_hi = cache.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
@@ -88,7 +88,7 @@
   ArenaStack stack(Runtime::Current()->GetArenaPool());
   ScopedArenaAllocator allocator(&stack);
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true, allocator);
+  RegTypeCache cache(Runtime::Current()->GetClassLinker(), true, allocator);
 
   const RegType& bool_reg_type = cache.Boolean();
   EXPECT_FALSE(bool_reg_type.IsUndefined());
@@ -363,7 +363,7 @@
   ArenaStack stack(Runtime::Current()->GetArenaPool());
   ScopedArenaAllocator allocator(&stack);
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true, allocator);
+  RegTypeCache cache(Runtime::Current()->GetClassLinker(), true, allocator);
   const RegType& imprecise_obj = cache.JavaLangObject(false);
   const RegType& precise_obj = cache.JavaLangObject(true);
   const RegType& precise_obj_2 = cache.FromDescriptor(nullptr, "Ljava/lang/Object;", true);
@@ -380,7 +380,7 @@
   ArenaStack stack(Runtime::Current()->GetArenaPool());
   ScopedArenaAllocator allocator(&stack);
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true, allocator);
+  RegTypeCache cache(Runtime::Current()->GetClassLinker(), true, allocator);
   const RegType& ref_type_0 = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
   EXPECT_TRUE(ref_type_0.IsNonZeroReferenceTypes());
@@ -398,7 +398,7 @@
   ArenaStack stack(Runtime::Current()->GetArenaPool());
   ScopedArenaAllocator allocator(&stack);
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true, allocator);
+  RegTypeCache cache(Runtime::Current()->GetClassLinker(), true, allocator);
   const RegType& ref_type_0 = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
   const RegType& ref_type = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
@@ -422,7 +422,7 @@
   ArenaStack stack(Runtime::Current()->GetArenaPool());
   ScopedArenaAllocator allocator(&stack);
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true, allocator);
+  RegTypeCache cache(Runtime::Current()->GetClassLinker(), true, allocator);
   const RegType& unresolved_ref = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
   const RegType& unresolved_ref_another = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExistEither;", true);
   const RegType& resolved_ref = cache.JavaLangString();
@@ -450,7 +450,7 @@
   ArenaStack stack(Runtime::Current()->GetArenaPool());
   ScopedArenaAllocator allocator(&stack);
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true, allocator);
+  RegTypeCache cache(Runtime::Current()->GetClassLinker(), true, allocator);
   const RegType& ref_type = cache.JavaLangString();
   const RegType& ref_type_2 = cache.JavaLangString();
   const RegType& ref_type_3 = cache.FromDescriptor(nullptr, "Ljava/lang/String;", true);
@@ -472,7 +472,7 @@
   ArenaStack stack(Runtime::Current()->GetArenaPool());
   ScopedArenaAllocator allocator(&stack);
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true, allocator);
+  RegTypeCache cache(Runtime::Current()->GetClassLinker(), true, allocator);
   const RegType& ref_type = cache.JavaLangObject(true);
   const RegType& ref_type_2 = cache.JavaLangObject(true);
   const RegType& ref_type_3 = cache.FromDescriptor(nullptr, "Ljava/lang/Object;", true);
@@ -487,7 +487,7 @@
   ScopedObjectAccess soa(Thread::Current());
   ArenaStack stack(Runtime::Current()->GetArenaPool());
   ScopedArenaAllocator allocator(&stack);
-  RegTypeCache cache_new(true, allocator);
+  RegTypeCache cache_new(Runtime::Current()->GetClassLinker(), true, allocator);
   const RegType& string = cache_new.JavaLangString();
   const RegType& Object = cache_new.JavaLangObject(true);
   EXPECT_TRUE(string.Merge(Object, &cache_new, /* verifier= */ nullptr).IsJavaLangObject());
@@ -512,7 +512,7 @@
   ArenaStack stack(Runtime::Current()->GetArenaPool());
   ScopedArenaAllocator allocator(&stack);
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache_new(true, allocator);
+  RegTypeCache cache_new(Runtime::Current()->GetClassLinker(), true, allocator);
 
   constexpr int32_t kTestConstantValue = 10;
   const RegType& float_type = cache_new.Float();
@@ -545,7 +545,7 @@
   ArenaStack stack(Runtime::Current()->GetArenaPool());
   ScopedArenaAllocator allocator(&stack);
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache_new(true, allocator);
+  RegTypeCache cache_new(Runtime::Current()->GetClassLinker(), true, allocator);
 
   constexpr int32_t kTestConstantValue = 10;
   const RegType& long_lo_type = cache_new.LongLo();
@@ -605,7 +605,7 @@
   ArenaStack stack(Runtime::Current()->GetArenaPool());
   ScopedArenaAllocator allocator(&stack);
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache_new(true, allocator);
+  RegTypeCache cache_new(Runtime::Current()->GetClassLinker(), true, allocator);
 
   constexpr int32_t kTestConstantValue = 10;
   const RegType& double_lo_type = cache_new.DoubleLo();
@@ -664,6 +664,23 @@
   }
 }
 
+// Without a running MethodVerifier, the class-bearing register types may become stale as the GC
+// will not visit them. It is easiest to disable moving GC.
+//
+// For some of the tests we need (or want) a working RegTypeCache that can load classes. So it is
+// not generally possible to disable GC using ScopedGCCriticalSection (as it blocks GC and
+// suspension completely).
+struct ScopedDisableMovingGC {
+  explicit ScopedDisableMovingGC(Thread* t) : self(t) {
+    Runtime::Current()->GetHeap()->IncrementDisableMovingGC(self);
+  }
+  ~ScopedDisableMovingGC() {
+    Runtime::Current()->GetHeap()->DecrementDisableMovingGC(self);
+  }
+
+  Thread* self;
+};
+
 TEST_F(RegTypeTest, MergeSemiLatticeRef) {
   //  (Incomplete) semilattice:
   //
@@ -700,12 +717,9 @@
   ScopedArenaAllocator allocator(&stack);
   ScopedObjectAccess soa(Thread::Current());
 
-  // We cannot allow moving GC. Otherwise we'd have to ensure the reg types are updated (reference
-  // reg types store a class pointer in a GCRoot, which is normally updated through active verifiers
-  // being registered with their thread), which is unnecessarily complex.
-  Runtime::Current()->GetHeap()->IncrementDisableMovingGC(soa.Self());
+  ScopedDisableMovingGC no_gc(soa.Self());
 
-  RegTypeCache cache(true, allocator);
+  RegTypeCache cache(Runtime::Current()->GetClassLinker(), true, allocator);
 
   const RegType& conflict = cache.Conflict();
   const RegType& zero = cache.Zero();
@@ -1022,8 +1036,6 @@
     check(triple.in1, triple.in2, triple.out);
     check(triple.in2, triple.in1, triple.out);
   }
-
-  Runtime::Current()->GetHeap()->DecrementDisableMovingGC(soa.Self());
 }
 
 TEST_F(RegTypeTest, ConstPrecision) {
@@ -1031,7 +1043,7 @@
   ArenaStack stack(Runtime::Current()->GetArenaPool());
   ScopedArenaAllocator allocator(&stack);
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache_new(true, allocator);
+  RegTypeCache cache_new(Runtime::Current()->GetClassLinker(), true, allocator);
   const RegType& imprecise_const = cache_new.FromCat1Const(10, false);
   const RegType& precise_const = cache_new.FromCat1Const(10, true);
 
@@ -1060,10 +1072,7 @@
   ScopedArenaAllocator allocator(&stack);
   ScopedObjectAccess soa(Thread::Current());
 
-  // We cannot allow moving GC. Otherwise we'd have to ensure the reg types are updated (reference
-  // reg types store a class pointer in a GCRoot, which is normally updated through active verifiers
-  // being registered with their thread), which is unnecessarily complex.
-  Runtime::Current()->GetHeap()->IncrementDisableMovingGC(soa.Self());
+  ScopedDisableMovingGC no_gc(soa.Self());
 
   // We merge nested array of primitive wrappers. These have a join type of an array of Number of
   // the same depth. We start with depth five, as we want at least two newly created classes to
@@ -1073,7 +1082,7 @@
   constexpr const char* kNumberArrayFour = "[[[[Ljava/lang/Number;";
   constexpr const char* kNumberArrayFive = "[[[[[Ljava/lang/Number;";
 
-  RegTypeCache cache(true, allocator);
+  RegTypeCache cache(Runtime::Current()->GetClassLinker(), true, allocator);
   const RegType& int_array_array = cache.From(nullptr, kIntArrayFive, false);
   ASSERT_TRUE(int_array_array.HasClass());
   const RegType& float_array_array = cache.From(nullptr, kFloatArrayFive, false);
@@ -1090,8 +1099,51 @@
 
   const RegType& join_type = int_array_array.Merge(float_array_array, &cache, nullptr);
   ASSERT_TRUE(join_type.IsUnresolvedReference());
+}
 
-  Runtime::Current()->GetHeap()->DecrementDisableMovingGC(soa.Self());
+class RegTypeClassJoinTest : public RegTypeTest {
+ protected:
+  void TestClassJoin(const char* in1, const char* in2, const char* out) {
+    ArenaStack stack(Runtime::Current()->GetArenaPool());
+    ScopedArenaAllocator allocator(&stack);
+
+    ScopedObjectAccess soa(Thread::Current());
+    jobject jclass_loader = LoadDex("Interfaces");
+    StackHandleScope<4> hs(soa.Self());
+    Handle<mirror::ClassLoader> class_loader(
+        hs.NewHandle(soa.Decode<mirror::ClassLoader>(jclass_loader)));
+
+    Handle<mirror::Class> c1(hs.NewHandle(
+        class_linker_->FindClass(soa.Self(), in1, class_loader)));
+    Handle<mirror::Class> c2(hs.NewHandle(
+        class_linker_->FindClass(soa.Self(), in2, class_loader)));
+    ASSERT_TRUE(c1 != nullptr);
+    ASSERT_TRUE(c2 != nullptr);
+
+    ScopedDisableMovingGC no_gc(soa.Self());
+
+    RegTypeCache cache(Runtime::Current()->GetClassLinker(), true, allocator);
+    const RegType& c1_reg_type = *cache.InsertClass(in1, c1.Get(), false);
+    const RegType& c2_reg_type = *cache.InsertClass(in2, c2.Get(), false);
+
+    const RegType& join_type = c1_reg_type.Merge(c2_reg_type, &cache, nullptr);
+    EXPECT_TRUE(join_type.HasClass());
+    EXPECT_EQ(join_type.GetDescriptor(), std::string_view(out));
+  }
+};
+
+TEST_F(RegTypeClassJoinTest, ClassJoinInterfaces) {
+  TestClassJoin("LInterfaces$K;", "LInterfaces$L;", "LInterfaces$J;");
+}
+
+TEST_F(RegTypeClassJoinTest, ClassJoinInterfaceClass) {
+  TestClassJoin("LInterfaces$B;", "LInterfaces$L;", "LInterfaces$J;");
+}
+
+TEST_F(RegTypeClassJoinTest, ClassJoinClassClass) {
+  // This test codifies that we prefer the class hierarchy over interfaces. It's a mostly
+  // arbitrary choice, optimally we'd have set types and could handle multi-inheritance precisely.
+  TestClassJoin("LInterfaces$A;", "LInterfaces$B;", "Ljava/lang/Object;");
 }
 
 }  // namespace verifier
diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h
index bf40fbe..1e3416a 100644
--- a/runtime/verifier/register_line-inl.h
+++ b/runtime/verifier/register_line-inl.h
@@ -175,7 +175,7 @@
 
 inline void RegisterLine::VerifyMonitorStackEmpty(MethodVerifier* verifier) const {
   if (MonitorStackDepth() != 0) {
-    verifier->Fail(VERIFY_ERROR_LOCKING);
+    verifier->Fail(VERIFY_ERROR_LOCKING, /*pending_exc=*/ false);
     if (kDumpLockFailures) {
       VLOG(verifier) << "expected empty monitor stack in "
                      << verifier->GetMethodReference().PrettyMethod();
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 1bbf5a6..b69267c 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -442,7 +442,7 @@
   }
   if (monitors_.size() > 0 || incoming_line->monitors_.size() > 0) {
     if (monitors_.size() != incoming_line->monitors_.size()) {
-      verifier->Fail(VERIFY_ERROR_LOCKING);
+      verifier->Fail(VERIFY_ERROR_LOCKING, /*pending_exc=*/ false);
       if (kDumpLockFailures) {
         VLOG(verifier) << "mismatched stack depths (depth=" << MonitorStackDepth()
                        << ", incoming depth=" << incoming_line->MonitorStackDepth() << ") in "
@@ -476,7 +476,7 @@
               !FindLockAliasedRegister(idx,
                                        incoming_line->reg_to_lock_depths_,
                                        reg_to_lock_depths_)) {
-            verifier->Fail(VERIFY_ERROR_LOCKING);
+            verifier->Fail(VERIFY_ERROR_LOCKING, /*pending_exc=*/ false);
             if (kDumpLockFailures) {
               VLOG(verifier) << "mismatched stack depths for register v" << idx
                              << ": " << depths  << " != " << incoming_depths << " in "
@@ -517,7 +517,7 @@
                                          incoming_line->reg_to_lock_depths_,
                                          reg_to_lock_depths_)) {
               // No aliases for both current and incoming, we'll lose information.
-              verifier->Fail(VERIFY_ERROR_LOCKING);
+              verifier->Fail(VERIFY_ERROR_LOCKING, /*pending_exc=*/ false);
               if (kDumpLockFailures) {
                 VLOG(verifier) << "mismatched lock levels for register v" << idx << ": "
                                << std::hex << locked_levels << std::dec  << " != "
diff --git a/runtime/verifier/verifier_deps.cc b/runtime/verifier/verifier_deps.cc
index ed5488c..219627f 100644
--- a/runtime/verifier/verifier_deps.cc
+++ b/runtime/verifier/verifier_deps.cc
@@ -830,7 +830,20 @@
 }
 
 void VerifierDeps::Dump(VariableIndentationOutputStream* vios) const {
+  // Sort dex files by their location to ensure deterministic ordering.
+  using DepsEntry = std::pair<const DexFile*, const DexFileDeps*>;
+  std::vector<DepsEntry> dex_deps;
+  dex_deps.reserve(dex_deps_.size());
   for (const auto& dep : dex_deps_) {
+    dex_deps.emplace_back(dep.first, dep.second.get());
+  }
+  std::sort(
+      dex_deps.begin(),
+      dex_deps.end(),
+      [](const DepsEntry& lhs, const DepsEntry& rhs) {
+        return lhs.first->GetLocation() < rhs.first->GetLocation();
+      });
+  for (const auto& dep : dex_deps) {
     const DexFile& dex_file = *dep.first;
     vios->Stream()
         << "Dependencies of "
diff --git a/runtime/verifier/verifier_enums.h b/runtime/verifier/verifier_enums.h
index bbdd45d..33eca4d 100644
--- a/runtime/verifier/verifier_enums.h
+++ b/runtime/verifier/verifier_enums.h
@@ -32,6 +32,7 @@
 // The outcome of verification.
 enum class FailureKind {
   kNoFailure,
+  kAccessChecksFailure,
   kSoftFailure,
   kHardFailure,
 };
@@ -72,27 +73,33 @@
  * the class to remain uncompiled. Other errors denote verification errors that cause bytecode
  * to be rewritten to fail at runtime.
  */
-enum VerifyError {
-  VERIFY_ERROR_BAD_CLASS_HARD = 1,        // VerifyError; hard error that skips compilation.
-  VERIFY_ERROR_BAD_CLASS_SOFT = 2,        // VerifyError; soft error that verifies again at runtime.
+enum VerifyError : uint32_t {
+  VERIFY_ERROR_BAD_CLASS_HARD =    1 << 0,   // VerifyError; hard error that skips compilation.
+  VERIFY_ERROR_BAD_CLASS_SOFT =    1 << 1,   // VerifyError; soft error that verifies again at
+                                             // runtime.
 
-  VERIFY_ERROR_NO_CLASS = 4,              // NoClassDefFoundError.
-  VERIFY_ERROR_NO_FIELD = 8,              // NoSuchFieldError.
-  VERIFY_ERROR_NO_METHOD = 16,            // NoSuchMethodError.
-  VERIFY_ERROR_ACCESS_CLASS = 32,         // IllegalAccessError.
-  VERIFY_ERROR_ACCESS_FIELD = 64,         // IllegalAccessError.
-  VERIFY_ERROR_ACCESS_METHOD = 128,       // IllegalAccessError.
-  VERIFY_ERROR_CLASS_CHANGE = 256,        // IncompatibleClassChangeError.
-  VERIFY_ERROR_INSTANTIATION = 512,       // InstantiationError.
+  VERIFY_ERROR_NO_CLASS =          1 << 2,   // NoClassDefFoundError.
+  VERIFY_ERROR_NO_FIELD =          1 << 3,   // NoSuchFieldError.
+  VERIFY_ERROR_NO_METHOD =         1 << 4,   // NoSuchMethodError.
+  VERIFY_ERROR_ACCESS_CLASS =      1 << 5,   // IllegalAccessError.
+  VERIFY_ERROR_ACCESS_FIELD =      1 << 6,   // IllegalAccessError.
+  VERIFY_ERROR_ACCESS_METHOD =     1 << 7,   // IllegalAccessError.
+  VERIFY_ERROR_CLASS_CHANGE =      1 << 8,   // IncompatibleClassChangeError.
+  VERIFY_ERROR_INSTANTIATION =     1 << 9,   // InstantiationError.
   // For opcodes that don't have complete verifier support,  we need a way to continue
   // execution at runtime without attempting to re-verify (since we know it will fail no
   // matter what). Instead, run as the interpreter in a special "do access checks" mode
   // which will perform verifier-like checking on the fly.
-  VERIFY_ERROR_FORCE_INTERPRETER = 1024,  // Skip the verification phase at runtime;
-                                          // force the interpreter to do access checks.
-                                          // (sets a soft fail at compile time).
-  VERIFY_ERROR_LOCKING = 2048,            // Could not guarantee balanced locking. This should be
-                                          // punted to the interpreter with access checks.
+  VERIFY_ERROR_FORCE_INTERPRETER = 1 << 10,  // Skip the verification phase at runtime;
+                                             // force the interpreter to do access checks.
+                                             // (sets a soft fail at compile time).
+  VERIFY_ERROR_LOCKING =           1 << 11,  // Could not guarantee balanced locking. This should be
+                                             // punted to the interpreter with access checks.
+  VERIFY_ERROR_SKIP_COMPILER =    1u << 31,  // Flag to note that the failure should preclude
+                                             // optimization. Meant as a signal from the verifier
+                                             // to the compiler that there is unreachable unverified
+                                             // code. May be removed once the compiler handles
+                                             // unreachable code correctly.
 };
 std::ostream& operator<<(std::ostream& os, const VerifyError& rhs);
 
diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc
index db90ae8..fabd4db 100644
--- a/runtime/well_known_classes.cc
+++ b/runtime/well_known_classes.cc
@@ -29,12 +29,14 @@
 #include "entrypoints/runtime_asm_entrypoints.h"
 #include "hidden_api.h"
 #include "jni/jni_internal.h"
+#include "jni_id_type.h"
 #include "mirror/class.h"
 #include "mirror/throwable.h"
 #include "nativehelper/scoped_local_ref.h"
 #include "obj_ptr-inl.h"
 #include "runtime.h"
 #include "scoped_thread_state_change-inl.h"
+#include "scoped_thread_state_change.h"
 #include "thread-current-inl.h"
 
 namespace art {
@@ -61,6 +63,7 @@
 jclass WellKnownClasses::java_lang_NoClassDefFoundError;
 jclass WellKnownClasses::java_lang_Object;
 jclass WellKnownClasses::java_lang_OutOfMemoryError;
+jclass WellKnownClasses::java_lang_reflect_InvocationTargetException;
 jclass WellKnownClasses::java_lang_reflect_Parameter;
 jclass WellKnownClasses::java_lang_reflect_Parameter__array;
 jclass WellKnownClasses::java_lang_reflect_Proxy;
@@ -101,6 +104,7 @@
 jmethodID WellKnownClasses::java_lang_Long_valueOf;
 jmethodID WellKnownClasses::java_lang_ref_FinalizerReference_add;
 jmethodID WellKnownClasses::java_lang_ref_ReferenceQueue_add;
+jmethodID WellKnownClasses::java_lang_reflect_InvocationTargetException_init;
 jmethodID WellKnownClasses::java_lang_reflect_Parameter_init;
 jmethodID WellKnownClasses::java_lang_reflect_Proxy_init;
 jmethodID WellKnownClasses::java_lang_reflect_Proxy_invoke;
@@ -126,6 +130,8 @@
 jfieldID WellKnownClasses::dalvik_system_DexPathList_dexElements;
 jfieldID WellKnownClasses::dalvik_system_DexPathList__Element_dexFile;
 jfieldID WellKnownClasses::dalvik_system_VMRuntime_nonSdkApiUsageConsumer;
+jfieldID WellKnownClasses::java_io_FileDescriptor_descriptor;
+jfieldID WellKnownClasses::java_io_FileDescriptor_ownerId;
 jfieldID WellKnownClasses::java_lang_Thread_parkBlocker;
 jfieldID WellKnownClasses::java_lang_Thread_daemon;
 jfieldID WellKnownClasses::java_lang_Thread_group;
@@ -146,6 +152,10 @@
 jfieldID WellKnownClasses::java_lang_Throwable_stackTrace;
 jfieldID WellKnownClasses::java_lang_Throwable_stackState;
 jfieldID WellKnownClasses::java_lang_Throwable_suppressedExceptions;
+jfieldID WellKnownClasses::java_nio_Buffer_address;
+jfieldID WellKnownClasses::java_nio_Buffer_elementSizeShift;
+jfieldID WellKnownClasses::java_nio_Buffer_limit;
+jfieldID WellKnownClasses::java_nio_Buffer_position;
 jfieldID WellKnownClasses::java_nio_ByteBuffer_address;
 jfieldID WellKnownClasses::java_nio_ByteBuffer_hb;
 jfieldID WellKnownClasses::java_nio_ByteBuffer_isReadOnly;
@@ -170,8 +180,17 @@
 
 static jfieldID CacheField(JNIEnv* env, jclass c, bool is_static,
                            const char* name, const char* signature) {
-  jfieldID fid = is_static ? env->GetStaticFieldID(c, name, signature) :
-      env->GetFieldID(c, name, signature);
+  jfieldID fid;
+  {
+    ScopedObjectAccess soa(env);
+    if (Runtime::Current()->GetJniIdType() != JniIdType::kSwapablePointer) {
+      fid = jni::EncodeArtField</*kEnableIndexIds*/ true>(
+          FindFieldJNI(soa, c, name, signature, is_static));
+    } else {
+      fid = jni::EncodeArtField</*kEnableIndexIds*/ false>(
+          FindFieldJNI(soa, c, name, signature, is_static));
+    }
+  }
   if (fid == nullptr) {
     ScopedObjectAccess soa(env);
     if (soa.Self()->IsExceptionPending()) {
@@ -187,8 +206,17 @@
 
 static jmethodID CacheMethod(JNIEnv* env, jclass c, bool is_static,
                              const char* name, const char* signature) {
-  jmethodID mid = is_static ? env->GetStaticMethodID(c, name, signature) :
-      env->GetMethodID(c, name, signature);
+  jmethodID mid;
+  {
+    ScopedObjectAccess soa(env);
+    if (Runtime::Current()->GetJniIdType() != JniIdType::kSwapablePointer) {
+      mid = jni::EncodeArtMethod</*kEnableIndexIds*/ true>(
+          FindMethodJNI(soa, c, name, signature, is_static));
+    } else {
+      mid = jni::EncodeArtMethod</*kEnableIndexIds*/ false>(
+          FindMethodJNI(soa, c, name, signature, is_static));
+    }
+  }
   if (mid == nullptr) {
     ScopedObjectAccess soa(env);
     if (soa.Self()->IsExceptionPending()) {
@@ -324,6 +352,7 @@
   java_lang_Error = CacheClass(env, "java/lang/Error");
   java_lang_IllegalAccessError = CacheClass(env, "java/lang/IllegalAccessError");
   java_lang_NoClassDefFoundError = CacheClass(env, "java/lang/NoClassDefFoundError");
+  java_lang_reflect_InvocationTargetException = CacheClass(env, "java/lang/reflect/InvocationTargetException");
   java_lang_reflect_Parameter = CacheClass(env, "java/lang/reflect/Parameter");
   java_lang_reflect_Parameter__array = CacheClass(env, "[Ljava/lang/reflect/Parameter;");
   java_lang_reflect_Proxy = CacheClass(env, "java/lang/reflect/Proxy");
@@ -345,9 +374,17 @@
   org_apache_harmony_dalvik_ddmc_Chunk = CacheClass(env, "org/apache/harmony/dalvik/ddmc/Chunk");
   org_apache_harmony_dalvik_ddmc_DdmServer = CacheClass(env, "org/apache/harmony/dalvik/ddmc/DdmServer");
 
+  InitFieldsAndMethodsOnly(env);
+}
+
+void WellKnownClasses::InitFieldsAndMethodsOnly(JNIEnv* env) {
+  hiddenapi::ScopedHiddenApiEnforcementPolicySetting hiddenapi_exemption(
+      hiddenapi::EnforcementPolicy::kDisabled);
+
   dalvik_system_BaseDexClassLoader_getLdLibraryPath = CacheMethod(env, dalvik_system_BaseDexClassLoader, false, "getLdLibraryPath", "()Ljava/lang/String;");
   dalvik_system_VMRuntime_runFinalization = CacheMethod(env, dalvik_system_VMRuntime, true, "runFinalization", "(J)V");
   dalvik_system_VMRuntime_hiddenApiUsed = CacheMethod(env, dalvik_system_VMRuntime, true, "hiddenApiUsed", "(ILjava/lang/String;Ljava/lang/String;IZ)V");
+
   java_lang_ClassNotFoundException_init = CacheMethod(env, java_lang_ClassNotFoundException, false, "<init>", "(Ljava/lang/String;Ljava/lang/Throwable;)V");
   java_lang_ClassLoader_loadClass = CacheMethod(env, java_lang_ClassLoader, false, "loadClass", "(Ljava/lang/String;)Ljava/lang/Class;");
 
@@ -360,6 +397,7 @@
   java_lang_ref_FinalizerReference_add = CacheMethod(env, "java/lang/ref/FinalizerReference", true, "add", "(Ljava/lang/Object;)V");
   java_lang_ref_ReferenceQueue_add = CacheMethod(env, "java/lang/ref/ReferenceQueue", true, "add", "(Ljava/lang/ref/Reference;)V");
 
+  java_lang_reflect_InvocationTargetException_init = CacheMethod(env, java_lang_reflect_InvocationTargetException, false, "<init>", "(Ljava/lang/Throwable;)V");
   java_lang_reflect_Parameter_init = CacheMethod(env, java_lang_reflect_Parameter, false, "<init>", "(Ljava/lang/String;ILjava/lang/reflect/Executable;I)V");
   java_lang_String_charAt = CacheMethod(env, java_lang_String, false, "charAt", "(I)C");
   java_lang_Thread_dispatchUncaughtException = CacheMethod(env, java_lang_Thread, false, "dispatchUncaughtException", "(Ljava/lang/Throwable;)V");
@@ -381,6 +419,11 @@
   dalvik_system_DexPathList_dexElements = CacheField(env, dalvik_system_DexPathList, false, "dexElements", "[Ldalvik/system/DexPathList$Element;");
   dalvik_system_DexPathList__Element_dexFile = CacheField(env, dalvik_system_DexPathList__Element, false, "dexFile", "Ldalvik/system/DexFile;");
   dalvik_system_VMRuntime_nonSdkApiUsageConsumer = CacheField(env, dalvik_system_VMRuntime, true, "nonSdkApiUsageConsumer", "Ljava/util/function/Consumer;");
+
+  ScopedLocalRef<jclass> java_io_FileDescriptor(env, env->FindClass("java/io/FileDescriptor"));
+  java_io_FileDescriptor_descriptor = CacheField(env, java_io_FileDescriptor.get(), false, "descriptor", "I");
+  java_io_FileDescriptor_ownerId = CacheField(env, java_io_FileDescriptor.get(), false, "ownerId", "J");
+
   java_lang_Thread_parkBlocker = CacheField(env, java_lang_Thread, false, "parkBlocker", "Ljava/lang/Object;");
   java_lang_Thread_daemon = CacheField(env, java_lang_Thread, false, "daemon", "Z");
   java_lang_Thread_group = CacheField(env, java_lang_Thread, false, "group", "Ljava/lang/ThreadGroup;");
@@ -401,6 +444,13 @@
   java_lang_Throwable_stackTrace = CacheField(env, java_lang_Throwable, false, "stackTrace", "[Ljava/lang/StackTraceElement;");
   java_lang_Throwable_stackState = CacheField(env, java_lang_Throwable, false, "backtrace", "Ljava/lang/Object;");
   java_lang_Throwable_suppressedExceptions = CacheField(env, java_lang_Throwable, false, "suppressedExceptions", "Ljava/util/List;");
+
+  ScopedLocalRef<jclass> java_nio_Buffer(env, env->FindClass("java/nio/Buffer"));
+  java_nio_Buffer_address = CacheField(env, java_nio_Buffer.get(), false, "address", "J");
+  java_nio_Buffer_elementSizeShift = CacheField(env, java_nio_Buffer.get(), false, "_elementSizeShift", "I");
+  java_nio_Buffer_limit = CacheField(env, java_nio_Buffer.get(), false, "limit", "I");
+  java_nio_Buffer_position = CacheField(env, java_nio_Buffer.get(), false, "position", "I");
+
   java_nio_ByteBuffer_address = CacheField(env, java_nio_ByteBuffer, false, "address", "J");
   java_nio_ByteBuffer_hb = CacheField(env, java_nio_ByteBuffer, false, "hb", "[B");
   java_nio_ByteBuffer_isReadOnly = CacheField(env, java_nio_ByteBuffer, false, "isReadOnly", "Z");
@@ -426,10 +476,10 @@
 }
 
 void WellKnownClasses::LateInit(JNIEnv* env) {
-  ScopedLocalRef<jclass> java_lang_Runtime(env, env->FindClass("java/lang/Runtime"));
   // CacheField and CacheMethod will initialize their classes. Classes below
   // have clinit sections that call JNI methods. Late init is required
   // to make sure these JNI methods are available.
+  ScopedLocalRef<jclass> java_lang_Runtime(env, env->FindClass("java/lang/Runtime"));
   java_lang_Runtime_nativeLoad =
       CacheMethod(env, java_lang_Runtime.get(), true, "nativeLoad",
                   "(Ljava/lang/String;Ljava/lang/ClassLoader;Ljava/lang/Class;)"
@@ -448,6 +498,11 @@
                     "[Ljava/lang/Object;)Ljava/lang/Object;");
 }
 
+void WellKnownClasses::HandleJniIdTypeChange(JNIEnv* env) {
+  WellKnownClasses::InitFieldsAndMethodsOnly(env);
+  WellKnownClasses::LateInit(env);
+}
+
 void WellKnownClasses::Clear() {
   dalvik_annotation_optimization_CriticalNative = nullptr;
   dalvik_annotation_optimization_FastNative = nullptr;
@@ -470,6 +525,7 @@
   java_lang_NoClassDefFoundError = nullptr;
   java_lang_Object = nullptr;
   java_lang_OutOfMemoryError = nullptr;
+  java_lang_reflect_InvocationTargetException = nullptr;
   java_lang_reflect_Parameter = nullptr;
   java_lang_reflect_Parameter__array = nullptr;
   java_lang_reflect_Proxy = nullptr;
@@ -493,6 +549,8 @@
   dalvik_system_BaseDexClassLoader_getLdLibraryPath = nullptr;
   dalvik_system_VMRuntime_runFinalization = nullptr;
   dalvik_system_VMRuntime_hiddenApiUsed = nullptr;
+  java_io_FileDescriptor_descriptor = nullptr;
+  java_io_FileDescriptor_ownerId = nullptr;
   java_lang_Boolean_valueOf = nullptr;
   java_lang_Byte_valueOf = nullptr;
   java_lang_Character_valueOf = nullptr;
@@ -508,6 +566,7 @@
   java_lang_Long_valueOf = nullptr;
   java_lang_ref_FinalizerReference_add = nullptr;
   java_lang_ref_ReferenceQueue_add = nullptr;
+  java_lang_reflect_InvocationTargetException_init = nullptr;
   java_lang_reflect_Parameter_init = nullptr;
   java_lang_reflect_Proxy_init = nullptr;
   java_lang_reflect_Proxy_invoke = nullptr;
@@ -549,6 +608,10 @@
   java_lang_Throwable_stackTrace = nullptr;
   java_lang_Throwable_stackState = nullptr;
   java_lang_Throwable_suppressedExceptions = nullptr;
+  java_nio_Buffer_address = nullptr;
+  java_nio_Buffer_elementSizeShift = nullptr;
+  java_nio_Buffer_limit = nullptr;
+  java_nio_Buffer_position = nullptr;
   java_nio_ByteBuffer_address = nullptr;
   java_nio_ByteBuffer_hb = nullptr;
   java_nio_ByteBuffer_isReadOnly = nullptr;
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index 3c5144f..6f67fde6 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -29,17 +29,19 @@
 class Class;
 }  // namespace mirror
 
-// Various classes used in JNI. We cache them so we don't have to keep looking
-// them up. Similar to libcore's JniConstants (except there's no overlap, so
-// we keep them separate).
+// Various classes used in JNI. We cache them so we don't have to keep looking them up.
 
 struct WellKnownClasses {
  public:
-  static void Init(JNIEnv* env);  // Run before native methods are registered.
-  static void LateInit(JNIEnv* env);  // Run after native methods are registered.
+  // Run before native methods are registered.
+  static void Init(JNIEnv* env);
+  // Run after native methods are registered.
+  static void LateInit(JNIEnv* env);
 
   static void Clear();
 
+  static void HandleJniIdTypeChange(JNIEnv* env);
+
   static void InitStringInit(ObjPtr<mirror::Class> string_class,
                              ObjPtr<mirror::Class> string_builder_class)
       REQUIRES_SHARED(Locks::mutator_lock_);
@@ -48,6 +50,10 @@
 
   static ObjPtr<mirror::Class> ToClass(jclass global_jclass) REQUIRES_SHARED(Locks::mutator_lock_);
 
+ private:
+  static void InitFieldsAndMethodsOnly(JNIEnv* env);
+
+ public:
   static jclass dalvik_annotation_optimization_CriticalNative;
   static jclass dalvik_annotation_optimization_FastNative;
   static jclass dalvik_system_BaseDexClassLoader;
@@ -70,6 +76,7 @@
   static jclass java_lang_NoClassDefFoundError;
   static jclass java_lang_Object;
   static jclass java_lang_OutOfMemoryError;
+  static jclass java_lang_reflect_InvocationTargetException;
   static jclass java_lang_reflect_Parameter;
   static jclass java_lang_reflect_Parameter__array;
   static jclass java_lang_reflect_Proxy;
@@ -81,10 +88,10 @@
   static jclass java_lang_Thread;
   static jclass java_lang_ThreadGroup;
   static jclass java_lang_Throwable;
-  static jclass java_util_Collections;
-  static jclass java_util_function_Consumer;
   static jclass java_nio_ByteBuffer;
   static jclass java_nio_DirectByteBuffer;
+  static jclass java_util_Collections;
+  static jclass java_util_function_Consumer;
   static jclass libcore_reflect_AnnotationFactory;
   static jclass libcore_reflect_AnnotationMember;
   static jclass libcore_util_EmptyArray;
@@ -110,6 +117,7 @@
   static jmethodID java_lang_Long_valueOf;
   static jmethodID java_lang_ref_FinalizerReference_add;
   static jmethodID java_lang_ref_ReferenceQueue_add;
+  static jmethodID java_lang_reflect_InvocationTargetException_init;
   static jmethodID java_lang_reflect_Parameter_init;
   static jmethodID java_lang_reflect_Proxy_init;
   static jmethodID java_lang_reflect_Proxy_invoke;
@@ -135,6 +143,8 @@
   static jfieldID dalvik_system_DexPathList_dexElements;
   static jfieldID dalvik_system_DexPathList__Element_dexFile;
   static jfieldID dalvik_system_VMRuntime_nonSdkApiUsageConsumer;
+  static jfieldID java_io_FileDescriptor_descriptor;
+  static jfieldID java_io_FileDescriptor_ownerId;
   static jfieldID java_lang_Thread_parkBlocker;
   static jfieldID java_lang_Thread_daemon;
   static jfieldID java_lang_Thread_group;
@@ -155,6 +165,10 @@
   static jfieldID java_lang_Throwable_stackTrace;
   static jfieldID java_lang_Throwable_stackState;
   static jfieldID java_lang_Throwable_suppressedExceptions;
+  static jfieldID java_nio_Buffer_address;
+  static jfieldID java_nio_Buffer_elementSizeShift;
+  static jfieldID java_nio_Buffer_limit;
+  static jfieldID java_nio_Buffer_position;
   static jfieldID java_nio_ByteBuffer_address;
   static jfieldID java_nio_ByteBuffer_hb;
   static jfieldID java_nio_ByteBuffer_isReadOnly;
diff --git a/sigchainlib/Android.bp b/sigchainlib/Android.bp
index 3209478..ac909a6 100644
--- a/sigchainlib/Android.bp
+++ b/sigchainlib/Android.bp
@@ -16,9 +16,13 @@
 
 cc_library {
     name: "libsigchain",
+    defaults: ["art_defaults"],
+    visibility: [
+        // TODO(b/133140750): Clean this up.
+        "//frameworks/base/cmds/app_process",
+    ],
 
     host_supported: true,
-    defaults: ["art_defaults"],
     target: {
         linux: {
             shared: {
@@ -37,6 +41,15 @@
             whole_static_libs: ["libasync_safe"],
         },
     },
+
+    export_include_dirs: ["."],
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+        // TODO(b/142944931) Clean this up. This is due to the dependency from
+        // app_process
+        "//apex_available:platform",
+    ],
 }
 
 // Create a dummy version of libsigchain which expose the necessary symbols
@@ -52,6 +65,8 @@
             whole_static_libs: ["libasync_safe"],
         },
     },
+
+    export_include_dirs: ["."],
 }
 
 art_cc_test {
@@ -65,10 +80,18 @@
 
 filegroup {
     name: "art_sigchain_version_script32.txt",
+    visibility: [
+        // TODO(b/133140750): Clean this up.
+        "//frameworks/base/cmds/app_process",
+    ],
     srcs: ["version-script32.txt"],
 }
 
 filegroup {
     name: "art_sigchain_version_script64.txt",
+    visibility: [
+        // TODO(b/133140750): Clean this up.
+        "//frameworks/base/cmds/app_process",
+    ],
     srcs: ["version-script64.txt"],
 }
diff --git a/sigchainlib/sigchain.cc b/sigchainlib/sigchain.cc
index 08ee690..f58fe21 100644
--- a/sigchainlib/sigchain.cc
+++ b/sigchainlib/sigchain.cc
@@ -59,7 +59,7 @@
 //               doesn't have SA_RESTART, and raise the signal to avoid restarting syscalls that are
 //               expected to be interrupted?
 
-#if defined(__BIONIC__) && !defined(__LP64__) && !defined(__mips__)
+#if defined(__BIONIC__) && !defined(__LP64__)
 static int sigismember(const sigset64_t* sigset, int signum) {
   return sigismember64(sigset, signum);
 }
@@ -98,9 +98,28 @@
 static decltype(&sigprocmask64) linked_sigprocmask64;
 #endif
 
-template<typename T>
-static void lookup_next_symbol(T* output, T wrapper, const char* name) {
-  void* sym = dlsym(RTLD_NEXT, name);  // NOLINT glibc triggers cert-dcl16-c with RTLD_NEXT.
+template <typename T>
+static void lookup_libc_symbol(T* output, T wrapper, const char* name) {
+#if defined(__BIONIC__)
+  constexpr const char* libc_name = "libc.so";
+#elif defined(__GLIBC__)
+#if __GNU_LIBRARY__ != 6
+#error unsupported glibc version
+#endif
+  constexpr const char* libc_name = "libc.so.6";
+#else
+#error unsupported libc: not bionic or glibc?
+#endif
+
+  static void* libc = []() {
+    void* result = dlopen(libc_name, RTLD_LOCAL | RTLD_LAZY);
+    if (!result) {
+      fatal("failed to dlopen %s: %s", libc_name, dlerror());
+    }
+    return result;
+  }();
+
+  void* sym = dlsym(libc, name);  // NOLINT glibc triggers cert-dcl16-c with RTLD_NEXT.
   if (sym == nullptr) {
     sym = dlsym(RTLD_DEFAULT, name);
     if (sym == wrapper || sym == sigaction) {
@@ -113,12 +132,12 @@
 __attribute__((constructor)) static void InitializeSignalChain() {
   static std::once_flag once;
   std::call_once(once, []() {
-    lookup_next_symbol(&linked_sigaction, sigaction, "sigaction");
-    lookup_next_symbol(&linked_sigprocmask, sigprocmask, "sigprocmask");
+    lookup_libc_symbol(&linked_sigaction, sigaction, "sigaction");
+    lookup_libc_symbol(&linked_sigprocmask, sigprocmask, "sigprocmask");
 
 #if defined(__BIONIC__)
-    lookup_next_symbol(&linked_sigaction64, sigaction64, "sigaction64");
-    lookup_next_symbol(&linked_sigprocmask64, sigprocmask64, "sigprocmask64");
+    lookup_libc_symbol(&linked_sigaction64, sigaction64, "sigaction64");
+    lookup_libc_symbol(&linked_sigprocmask64, sigprocmask64, "sigprocmask64");
 #endif
   });
 }
@@ -271,6 +290,8 @@
 // Leave an empty element at index 0 for convenience.
 static SignalChain chains[_NSIG + 1];
 
+static bool is_signal_hook_debuggable = false;
+
 void SignalChain::Handler(int signo, siginfo_t* siginfo, void* ucontext_raw) {
   // Try the special handlers first.
   // If one of them crashes, we'll reenter this handler and pass that crash onto the user handler.
@@ -339,6 +360,10 @@
                        SigactionType* old_action,
                        int (*linked)(int, const SigactionType*,
                                      SigactionType*)) {
+  if (is_signal_hook_debuggable) {
+    return 0;
+  }
+
   // If this signal has been claimed as a signal chain, record the user's
   // action but don't pass it on to the kernel.
   // Note that we check that the signal number is in range here.  An out of range signal
@@ -506,5 +531,9 @@
   }
 }
 
+extern "C" void SkipAddSignalHandler(bool value) {
+  is_signal_hook_debuggable = value;
+}
+
 }   // namespace art
 
diff --git a/sigchainlib/sigchain.h b/sigchainlib/sigchain.h
index 23fba03..9c24a6f 100644
--- a/sigchainlib/sigchain.h
+++ b/sigchainlib/sigchain.h
@@ -35,6 +35,7 @@
 extern "C" void RemoveSpecialSignalHandlerFn(int signal, bool (*fn)(int, siginfo_t*, void*));
 
 extern "C" void EnsureFrontOfChain(int signal);
+extern "C" void SkipAddSignalHandler(bool value);
 
 }  // namespace art
 
diff --git a/sigchainlib/sigchain_dummy.cc b/sigchainlib/sigchain_dummy.cc
index c274530..db72b58 100644
--- a/sigchainlib/sigchain_dummy.cc
+++ b/sigchainlib/sigchain_dummy.cc
@@ -46,6 +46,11 @@
   abort();
 }
 
+extern "C" void SkipAddSignalHandler(bool value ATTRIBUTE_UNUSED) {
+  log("SkipAddSignalHandler is not exported by the main executable.");
+  abort();
+}
+
 #pragma GCC diagnostic pop
 
 }  // namespace art
diff --git a/sigchainlib/version-script32.txt b/sigchainlib/version-script32.txt
index e8a18e7..70810e0 100644
--- a/sigchainlib/version-script32.txt
+++ b/sigchainlib/version-script32.txt
@@ -3,6 +3,7 @@
   EnsureFrontOfChain;
   AddSpecialSignalHandlerFn;
   RemoveSpecialSignalHandlerFn;
+  SkipAddSignalHandler;
   bsd_signal;
   sigaction;
   sigaction64;
diff --git a/sigchainlib/version-script64.txt b/sigchainlib/version-script64.txt
index 72c86a1..7bcd76b 100644
--- a/sigchainlib/version-script64.txt
+++ b/sigchainlib/version-script64.txt
@@ -3,6 +3,7 @@
   EnsureFrontOfChain;
   AddSpecialSignalHandlerFn;
   RemoveSpecialSignalHandlerFn;
+  SkipAddSignalHandler;
   sigaction;
   sigaction64;
   signal;
diff --git a/simulator/Android.bp b/simulator/Android.bp
index 223c891..1410444 100644
--- a/simulator/Android.bp
+++ b/simulator/Android.bp
@@ -97,4 +97,7 @@
         "libartbased",
         "libartd",
     ],
+    apex_available: [
+        "com.android.art.debug",
+    ],
 }
diff --git a/test.py b/test.py
index bc15736..9e54363 100755
--- a/test.py
+++ b/test.py
@@ -62,14 +62,9 @@
   if options.target or not options.host:
     build_target += ' test-art-target-gtest'
 
-  build_command = 'make'
-  build_command += ' -j' + str(options.n_threads)
-  build_command += ' -C ' + ANDROID_BUILD_TOP
-  build_command += ' ' + build_target
-
+  build_command = 'm -j' + str(options.n_threads) + ' ' + build_target
   print build_command
-
-  if subprocess.call(build_command.split()):
+  if subprocess.call(build_command.split(), cwd=ANDROID_BUILD_TOP):
       sys.exit(1)
 
 sys.exit(0)
diff --git a/test/004-SignalTest/signaltest.cc b/test/004-SignalTest/signaltest.cc
index 49fe369..6f97b2a 100644
--- a/test/004-SignalTest/signaltest.cc
+++ b/test/004-SignalTest/signaltest.cc
@@ -118,7 +118,7 @@
   sigfillset(&action.sa_mask);
   sigdelset(&action.sa_mask, UNBLOCKED_SIGNAL);
   action.sa_flags = SA_SIGINFO | SA_ONSTACK;
-#if !defined(__APPLE__) && !defined(__mips__)
+#if !defined(__APPLE__)
   action.sa_restorer = nullptr;
 #endif
 
diff --git a/test/004-ThreadStress/run b/test/004-ThreadStress/run
index 067e0d0..8004036 100755
--- a/test/004-ThreadStress/run
+++ b/test/004-ThreadStress/run
@@ -15,29 +15,7 @@
 # limitations under the License.
 
 # Enable lock contention logging.
-if [[ "x$ART_DEFAULT_GC_TYPE" = xGSS ]]; then
-  # NonMovingAlloc operations fail an assertion with the Generational
-  # Semi-Space (GSS) collector (see b/72738921); disable them for now
-  # by explicitly assigning frequencies to operations when the GSS
-  # collector is used.
-  #
-  # Note: The trick to use command substitution to have comments within
-  # a multi-line command is from https://stackoverflow.com/a/12797512.
-  ${RUN} --runtime-option -Xlockprofthreshold:10 "${@}" Main \
-    -oom:0.005           `#   1/200` \
-    -sigquit:0.095       `#  19/200` \
-    -alloc:0.225         `#  45/200` \
-    -largealloc:0.05     `#  10/200` \
-    -nonmovingalloc:0.0  `#   0/200` \
-    -stacktrace:0.1      `#  20/200` \
-    -exit:0.225          `#  45/200` \
-    -sleep:0.125         `#  25/200` \
-    -timedwait:0.05      `#  10/200` \
-    -wait:0.075          `#  15/200` \
-    -queuedwait:0.05     `#  10/200`
-else
-  ${RUN} --runtime-option -Xlockprofthreshold:10 "${@}"
-fi
+${RUN} --runtime-option -Xlockprofthreshold:10 "${@}"
 return_status1=$?
 
 # Run locks-only mode with stack-dump lock profiling. Reduce the number of total operations from
diff --git a/test/005-annotations/build b/test/005-annotations/build
index 5342eea..dad23b2 100644
--- a/test/005-annotations/build
+++ b/test/005-annotations/build
@@ -41,4 +41,6 @@
 
 ######################################################################
 
+# Build intermediate object to preserve class-retention annotations.
+export D8_FLAGS=--intermediate
 ./default-build "$@"
diff --git a/test/018-stack-overflow/expected.txt b/test/018-stack-overflow/expected.txt
index cc10c0c..dbe8112 100644
--- a/test/018-stack-overflow/expected.txt
+++ b/test/018-stack-overflow/expected.txt
@@ -1,9 +1,3 @@
-libartd run.
-caught SOE3 in testSelfRecursion
-caught SOE10 in testSelfRecursion
-caught SOE in testMutualRecursion
-SOE test done
-libart run.
 caught SOE3 in testSelfRecursion
 caught SOE10 in testSelfRecursion
 caught SOE in testMutualRecursion
diff --git a/test/018-stack-overflow/run b/test/018-stack-overflow/run
deleted file mode 100755
index 7443bd7..0000000
--- a/test/018-stack-overflow/run
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2014 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Run normal. This will be the debug build.
-echo "libartd run."
-${RUN} "${@}"
-return_status1=$?
-
-# Run non-debug.
-echo "libart run."
-${RUN} "${@/#libartd.so/libart.so}"
-return_status2=$?
-
-# Make sure we don't silently ignore an early failure.
-(exit $return_status1) && (exit $return_status2)
diff --git a/test/030-bad-finalizer/check b/test/030-bad-finalizer/check
deleted file mode 100755
index e5d5c4e..0000000
--- a/test/030-bad-finalizer/check
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2014 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Strip timeout logging. These are "E/System" messages.
-sed -e '/^E\/System/d' "$2" > "$2.tmp"
-
-diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
diff --git a/test/030-bad-finalizer/run b/test/030-bad-finalizer/run
index 7a0d0d0..54747ee 100755
--- a/test/030-bad-finalizer/run
+++ b/test/030-bad-finalizer/run
@@ -14,6 +14,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+# The test logs error messages which is expected, discard them.
+export ANDROID_LOG_TAGS='*:f'
+
 # Squash the exit status and put it in expected
-./default-run "$@"
+./default-run --external-log-tags "${@}"
 echo "exit status:" $?
diff --git a/test/059-finalizer-throw/check b/test/059-finalizer-throw/check
deleted file mode 100755
index 8bc59c6..0000000
--- a/test/059-finalizer-throw/check
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2014 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Strip uncaught exception logging. These are "E/System" messages.
-sed -e '/^E\/System/d' "$2" > "$2.tmp"
-
-diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
diff --git a/test/059-finalizer-throw/run b/test/059-finalizer-throw/run
new file mode 100644
index 0000000..dda4159
--- /dev/null
+++ b/test/059-finalizer-throw/run
@@ -0,0 +1,19 @@
+#!/bin/bash
+#
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The test logs error messages which is expected, discard them.
+export ANDROID_LOG_TAGS='*:f'
+exec ${RUN} --external-log-tags "${@}"
diff --git a/test/070-nio-buffer/src/Main.java b/test/070-nio-buffer/src/Main.java
index 86eb553..a3eeb3f 100644
--- a/test/070-nio-buffer/src/Main.java
+++ b/test/070-nio-buffer/src/Main.java
@@ -14,7 +14,6 @@
  * limitations under the License.
  */
 
-import java.nio.Buffer;
 import java.nio.BufferOverflowException;
 import java.nio.ByteBuffer;
 import java.nio.ByteOrder;
@@ -51,9 +50,9 @@
             1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031
         };
 
-        ((Buffer) shortBuf).position(0);
+        shortBuf.position(0);
         shortBuf.put(myShorts, 0, 32);      // should work
-        ((Buffer) shortBuf).position(0);
+        shortBuf.position(0);
         shortBuf.put(myShorts, 16, 16);     // should work
         shortBuf.put(myShorts, 16, 16);     // advance to end
 
@@ -65,7 +64,7 @@
         }
 
         try {
-            ((Buffer) shortBuf).position(0);
+            shortBuf.position(0);
             shortBuf.put(myShorts, 0, 33);     // should fail
             System.out.println("ERROR: out-of-bounds put succeeded\n");
         } catch (IndexOutOfBoundsException ioobe) {
@@ -73,7 +72,7 @@
         }
 
         try {
-            ((Buffer) shortBuf).position(16);
+            shortBuf.position(16);
             shortBuf.put(myShorts, 0, 17);     // should fail
             System.out.println("ERROR: out-of-bounds put succeeded\n");
         } catch (BufferOverflowException boe) {
@@ -94,13 +93,13 @@
         int data[] = new int[25];
         //FloatBuffer int1 = direct.asFloatBuffer();
         //float data[] = new float[25];
-        ((Buffer) int1).clear();
-        int1.put(data);
-        ((Buffer) int1).position(0);
-
-        ((Buffer) int1).clear();
+        int1.clear ();
         int1.put (data);
-        ((Buffer) int1).position(0);
+        int1.position (0);
+
+        int1.clear ();
+        int1.put (data);
+        int1.position (0);
     }
 
     /*
@@ -120,7 +119,7 @@
     }
 
     static void storeValues(ByteBuffer directBuf) {
-        ((Buffer) directBuf).position(0);
+        directBuf.position(0);
         ShortBuffer shortBuf = directBuf.asShortBuffer();
         CharBuffer charBuf = directBuf.asCharBuffer();
         IntBuffer intBuf = directBuf.asIntBuffer();
@@ -158,7 +157,7 @@
             throw new RuntimeException("double get/store failed");
         }
 
-        ((Buffer) directBuf).position(0);
+        directBuf.position(0);
         char[] outBuf = new char[directBuf.limit() * 2];
         for (int i = 0; i < directBuf.limit(); i++) {
             byte b = directBuf.get();
diff --git a/test/100-reflect2/src/Main.java b/test/100-reflect2/src/Main.java
index 5f6ffa8..56b4a82 100644
--- a/test/100-reflect2/src/Main.java
+++ b/test/100-reflect2/src/Main.java
@@ -308,11 +308,73 @@
     }
   }
 
+  private static void testReflectFieldSetDuringClinit() {
+    try {
+      int value = ReflectFieldSetDuringClinit.intField;
+      int expected = 42;
+      if (value != expected) {
+        System.out.println("Unexpected value: " + value + ", expected: " + expected);
+      }
+    } catch (Exception e) {
+      // Error.
+      e.printStackTrace(System.out);
+    }
+  }
+
+  private static void testReflectNewInstanceDuringClinit() {
+    try {
+      int value = ReflectNewInstanceDuringClinit.instance.intField;
+      int expected = 42;
+      if (value != expected) {
+        System.out.println("Unexpected value: " + value + ", expected: " + expected);
+      }
+    } catch (Exception e) {
+      // Error.
+      e.printStackTrace(System.out);
+    }
+  }
+
   public static void main(String[] args) throws Exception {
     testFieldReflection();
     testMethodReflection();
     testConstructorReflection();
     testPackagePrivateConstructor();
     testPackagePrivateAccessibleConstructor();
+    testReflectFieldSetDuringClinit();
+    testReflectNewInstanceDuringClinit();
+  }
+}
+
+class ReflectFieldSetDuringClinit {
+  public static int intField;
+
+  static {
+    try {
+      Field f = ReflectFieldSetDuringClinit.class.getDeclaredField("intField");
+      f.setInt(null, 42);
+    } catch (Exception e) {
+      // Error.
+      e.printStackTrace(System.out);
+    }
+  }
+}
+
+class ReflectNewInstanceDuringClinit {
+  public int intField;
+
+  public ReflectNewInstanceDuringClinit() {
+    intField = 42;
+  }
+
+  public static ReflectNewInstanceDuringClinit instance;
+
+  static {
+    try {
+      Constructor<?> ctor = ReflectNewInstanceDuringClinit.class.getConstructor();
+      instance = (ReflectNewInstanceDuringClinit) ctor.newInstance();
+    } catch (Exception e) {
+      // Error.
+      e.printStackTrace(System.out);
+    }
   }
 }
diff --git a/test/1002-notify-startup/check b/test/1002-notify-startup/check
new file mode 100644
index 0000000..9d8f464
--- /dev/null
+++ b/test/1002-notify-startup/check
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Oat file manager will complain about duplicate dex files. Ignore.
+sed -e '/.*oat_file_manager.*/d' "$2" > "$2.tmp"
+
+diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
diff --git a/test/1002-notify-startup/src-art/Main.java b/test/1002-notify-startup/src-art/Main.java
index 9a1e442..8951af8 100644
--- a/test/1002-notify-startup/src-art/Main.java
+++ b/test/1002-notify-startup/src-art/Main.java
@@ -14,15 +14,62 @@
  * limitations under the License.
  */
 
+import dalvik.system.PathClassLoader;
 import dalvik.system.VMRuntime;
 
-public class Main {
-  public static void main(String[] args) {
-    System.loadLibrary(args[0]);
-    System.out.println("Startup completed: " + hasStartupCompleted());
-    VMRuntime.getRuntime().notifyStartupCompleted();
-    System.out.println("Startup completed: " + hasStartupCompleted());
-  }
+import java.lang.ref.WeakReference;
+import java.lang.reflect.Constructor;
+import java.util.concurrent.atomic.AtomicBoolean;
 
-  private static native boolean hasStartupCompleted();
+public class Main {
+    static final String DEX_FILE = System.getenv("DEX_LOCATION") + "/1002-notify-startup.jar";
+    static final String LIBRARY_SEARCH_PATH = System.getProperty("java.library.path");
+    static AtomicBoolean completed = new AtomicBoolean(false);
+
+    public static void main(String[] args) {
+        System.loadLibrary(args[0]);
+        System.out.println("Startup completed: " + hasStartupCompleted());
+        Thread workerThread = new WorkerThread();
+        workerThread.start();
+        do {
+            resetStartupCompleted();
+            VMRuntime.getRuntime().notifyStartupCompleted();
+            Thread.yield();
+        } while (!completed.get());
+        try {
+            workerThread.join();
+        } catch (Throwable e) {
+            System.err.println(e);
+        }
+        System.out.println("Startup completed: " + hasStartupCompleted());
+    }
+
+    private static class WorkerThread extends Thread {
+        static final int NUM_ITERATIONS = 100;
+
+        private WeakReference<Class<?>> $noinline$loadClassInLoader() throws Exception {
+            ClassLoader loader = new PathClassLoader(
+                    DEX_FILE, LIBRARY_SEARCH_PATH, ClassLoader.getSystemClassLoader());
+            Class ret = loader.loadClass("Main");
+            return new WeakReference(ret);
+        }
+
+        public void run() {
+            for (int i = 0; i < NUM_ITERATIONS; ++i) {
+                try {
+                    WeakReference<Class<?>> ref = $noinline$loadClassInLoader();
+                    Runtime.getRuntime().gc();
+                    Thread.yield();
+                    // Don't validate the unloading since app images will keep classes live (for now).
+                } catch (Throwable e) {
+                    System.err.println(e);
+                    break;
+                }
+            }
+            completed.set(true);
+        }
+    }
+
+    private static native boolean hasStartupCompleted();
+    private static native void resetStartupCompleted();
 }
diff --git a/test/1002-notify-startup/startup_interface.cc b/test/1002-notify-startup/startup_interface.cc
index 8705bb2..c9a238a 100644
--- a/test/1002-notify-startup/startup_interface.cc
+++ b/test/1002-notify-startup/startup_interface.cc
@@ -24,5 +24,9 @@
   return Runtime::Current()->GetStartupCompleted();
 }
 
+extern "C" void JNICALL Java_Main_resetStartupCompleted(JNIEnv*, jclass) {
+  Runtime::Current()->ResetStartupCompleted();
+}
+
 }  // namespace
 }  // namespace art
diff --git a/test/115-native-bridge/nativebridge.cc b/test/115-native-bridge/nativebridge.cc
index cc7e806..754cb00 100644
--- a/test/115-native-bridge/nativebridge.cc
+++ b/test/115-native-bridge/nativebridge.cc
@@ -211,7 +211,7 @@
   struct sigaction tmp;
   sigemptyset(&tmp.sa_mask);
   tmp.sa_sigaction = test_sigaction_handler;
-#if !defined(__APPLE__) && !defined(__mips__)
+#if !defined(__APPLE__)
   tmp.sa_restorer = nullptr;
 #endif
 
diff --git a/test/115-native-bridge/run b/test/115-native-bridge/run
index 22f5c67..8ce44a9 100644
--- a/test/115-native-bridge/run
+++ b/test/115-native-bridge/run
@@ -16,11 +16,16 @@
 
 ARGS=${@}
 
+BRIDGE_SO=libnativebridgetestd.so
+if echo ${ARGS} | grep -q " -O"; then
+  BRIDGE_SO=libnativebridgetest.so
+fi
+
 # Use libnativebridgetest as a native bridge, start NativeBridgeMain (Main is JniTest main file).
 LIBPATH=$(echo ${ARGS} | sed -r 's/.*Djava.library.path=([^ ]*) .*/\1/')
 # Trim all but the last entry in LIBPATH, which will be nativetest[64]
 LIBPATH=${LIBPATH##*:}
-ln -sf ${LIBPATH}/libnativebridgetest.so .
+ln -sf ${LIBPATH}/$BRIDGE_SO .
 touch libarttest.so
 touch libarttestd.so
 touch libinvalid.so
@@ -31,4 +36,4 @@
 LEFT=$(echo ${ARGS} | sed -r 's/-Djava.library.path.*//')
 RIGHT=$(echo ${ARGS} | sed -r 's/.*Djava.library.path[^ ]* //')
 MODARGS="${LEFT} -Djava.library.path=`pwd` ${RIGHT}"
-exec ${RUN} --runtime-option -Xforce-nb-testing --runtime-option -XX:NativeBridge=libnativebridgetest.so ${MODARGS} NativeBridgeMain
+exec ${RUN} --runtime-option -Xforce-nb-testing --runtime-option -XX:NativeBridge=$BRIDGE_SO ${MODARGS} NativeBridgeMain
diff --git a/test/1336-short-finalizer-timeout/check b/test/1336-short-finalizer-timeout/check
deleted file mode 100755
index e5d5c4e..0000000
--- a/test/1336-short-finalizer-timeout/check
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2014 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Strip timeout logging. These are "E/System" messages.
-sed -e '/^E\/System/d' "$2" > "$2.tmp"
-
-diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
diff --git a/test/1336-short-finalizer-timeout/run b/test/1336-short-finalizer-timeout/run
index 3e33524..4a07034 100755
--- a/test/1336-short-finalizer-timeout/run
+++ b/test/1336-short-finalizer-timeout/run
@@ -14,6 +14,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+# The test logs error messages which is expected, discard them.
+export ANDROID_LOG_TAGS='*:f'
+
 # Squash the exit status and put it in expected
-./default-run "$@" --runtime-option -XX:FinalizerTimeoutMs=500
+./default-run --external-log-tags "$@" --runtime-option -XX:FinalizerTimeoutMs=500
 echo "exit status:" $?
diff --git a/test/1337-gc-coverage/gc_coverage.cc b/test/1337-gc-coverage/gc_coverage.cc
index ac959f6..eb8ec0e 100644
--- a/test/1337-gc-coverage/gc_coverage.cc
+++ b/test/1337-gc-coverage/gc_coverage.cc
@@ -46,19 +46,5 @@
   return reinterpret_cast<jlong>(soa.Decode<mirror::Object>(object).Ptr());
 }
 
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_supportCollectorTransition(JNIEnv*, jclass) {
-  // Same as supportHomogeneousSpaceCompact for now.
-  return Runtime::Current()->GetHeap()->SupportHomogeneousSpaceCompactAndCollectorTransitions() ?
-      JNI_TRUE : JNI_FALSE;
-}
-
-extern "C" JNIEXPORT void JNICALL Java_Main_transitionToSS(JNIEnv*, jclass) {
-  Runtime::Current()->GetHeap()->TransitionCollector(gc::kCollectorTypeSS);
-}
-
-extern "C" JNIEXPORT void JNICALL Java_Main_transitionToCMS(JNIEnv*, jclass) {
-  Runtime::Current()->GetHeap()->TransitionCollector(gc::kCollectorTypeCMS);
-}
-
 }  // namespace
 }  // namespace art
diff --git a/test/1337-gc-coverage/src/Main.java b/test/1337-gc-coverage/src/Main.java
index 7875eb1..128ad4d 100644
--- a/test/1337-gc-coverage/src/Main.java
+++ b/test/1337-gc-coverage/src/Main.java
@@ -22,7 +22,6 @@
   public static void main(String[] args) {
     System.loadLibrary(args[0]);
     testHomogeneousCompaction();
-    testCollectorTransitions();
     System.out.println("Done.");
   }
 
@@ -68,40 +67,10 @@
     }
   }
 
-  private static void testCollectorTransitions() {
-    if (supportCollectorTransition()) {
-      Object o = new Object();
-      // Transition to semi-space collector.
-      allocateStuff();
-      transitionToSS();
-      allocateStuff();
-      long addressBefore = objectAddress(o);
-      Runtime.getRuntime().gc();
-      long addressAfter = objectAddress(o);
-      if (addressBefore == addressAfter) {
-        System.out.println("error: Expected different adddress " + addressBefore + " vs " +
-            addressAfter);
-      }
-      // Transition back to CMS.
-      transitionToCMS();
-      allocateStuff();
-      addressBefore = objectAddress(o);
-      Runtime.getRuntime().gc();
-      addressAfter = objectAddress(o);
-      if (addressBefore != addressAfter) {
-        System.out.println("error: Expected same adddress " + addressBefore + " vs " +
-            addressAfter);
-      }
-    }
-  }
-
   // Methods to get access to ART internals.
   private static native boolean supportHomogeneousSpaceCompact();
   private static native boolean performHomogeneousSpaceCompact();
   private static native void incrementDisableMovingGC();
   private static native void decrementDisableMovingGC();
   private static native long objectAddress(Object object);
-  private static native boolean supportCollectorTransition();
-  private static native void transitionToSS();
-  private static native void transitionToCMS();
 }
diff --git a/test/137-cfi/cfi.cc b/test/137-cfi/cfi.cc
index 4d886f5..aeb996c 100644
--- a/test/137-cfi/cfi.cc
+++ b/test/137-cfi/cfi.cc
@@ -25,6 +25,7 @@
 
 #include "jni.h"
 
+#include <android-base/file.h>
 #include <android-base/logging.h>
 #include <android-base/stringprintf.h>
 #include <backtrace/Backtrace.h>
@@ -32,9 +33,11 @@
 #include "base/file_utils.h"
 #include "base/logging.h"
 #include "base/macros.h"
+#include "base/mutex.h"
 #include "base/utils.h"
 #include "gc/heap.h"
 #include "gc/space/image_space.h"
+#include "jit/debugger_interface.h"
 #include "oat_file.h"
 #include "runtime.h"
 
@@ -56,10 +59,11 @@
 }
 
 extern "C" JNIEXPORT jint JNICALL Java_Main_startSecondaryProcess(JNIEnv*, jclass) {
+  printf("Java_Main_startSecondaryProcess\n");
 #if __linux__
   // Get our command line so that we can use it to start identical process.
   std::string cmdline;  // null-separated and null-terminated arguments.
-  ReadFileToString("/proc/self/cmdline", &cmdline);
+  android::base::ReadFileToString("/proc/self/cmdline", &cmdline);
   cmdline = cmdline + "--secondary" + '\0';  // Let the child know it is a helper.
 
   // Split the string into individual arguments suitable for execv.
@@ -83,7 +87,9 @@
 }
 
 extern "C" JNIEXPORT jboolean JNICALL Java_Main_sigstop(JNIEnv*, jclass) {
+  printf("Java_Main_sigstop\n");
 #if __linux__
+  MutexLock mu(Thread::Current(), *GetNativeDebugInfoLock());  // Avoid races with the JIT thread.
   raise(SIGSTOP);
 #endif
   return true;  // Prevent the compiler from tail-call optimizing this method away.
@@ -118,8 +124,6 @@
 }
 
 static void MoreErrorInfo(pid_t pid, bool sig_quit_on_fail) {
-  printf("Secondary pid is %d\n", pid);
-
   PrintFileToLog(android::base::StringPrintf("/proc/%d/maps", pid), ::android::base::ERROR);
 
   if (sig_quit_on_fail) {
@@ -132,7 +136,10 @@
 #endif
 
 extern "C" JNIEXPORT jboolean JNICALL Java_Main_unwindInProcess(JNIEnv*, jclass) {
+  printf("Java_Main_unwindInProcess\n");
 #if __linux__
+  MutexLock mu(Thread::Current(), *GetNativeDebugInfoLock());  // Avoid races with the JIT thread.
+
   std::unique_ptr<Backtrace> bt(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, GetTid()));
   if (!bt->Unwind(0, nullptr)) {
     printf("Cannot unwind in process.\n");
@@ -203,6 +210,7 @@
 #endif
 
 extern "C" JNIEXPORT jboolean JNICALL Java_Main_unwindOtherProcess(JNIEnv*, jclass, jint pid_int) {
+  printf("Java_Main_unwindOtherProcess\n");
 #if __linux__
   pid_t pid = static_cast<pid_t>(pid_int);
 
@@ -219,7 +227,7 @@
   int total_sleep_time_usec = 0;
   int signal = wait_for_sigstop(pid, &total_sleep_time_usec, &detach_failed);
   if (signal != SIGSTOP) {
-    LOG(WARNING) << "wait_for_sigstop failed.";
+    printf("wait_for_sigstop failed.\n");
     return JNI_FALSE;
   }
 
@@ -248,10 +256,12 @@
 
   constexpr bool kSigQuitOnFail = true;
   if (!result) {
+    printf("Failed to unwind secondary with pid %d\n", pid);
     MoreErrorInfo(pid, kSigQuitOnFail);
   }
 
   if (ptrace(PTRACE_DETACH, pid, 0, 0) != 0) {
+    printf("Detach failed\n");
     PLOG(ERROR) << "Detach failed";
   }
 
@@ -265,6 +275,7 @@
 
   return result ? JNI_TRUE : JNI_FALSE;
 #else
+  printf("Remote unwind supported only on linux\n");
   UNUSED(pid_int);
   return JNI_FALSE;
 #endif
diff --git a/test/137-cfi/expected.txt b/test/137-cfi/expected.txt
index eedae8f..9411154 100644
--- a/test/137-cfi/expected.txt
+++ b/test/137-cfi/expected.txt
@@ -1,7 +1,18 @@
+args: --test-local --test-remote
 JNI_OnLoad called
-Unwind in process: PASS
+Java_Main_unwindInProcess
+PASS
+Java_Main_startSecondaryProcess
+Java_Main_unwindOtherProcess
+args: --test-local --test-remote --secondary
 JNI_OnLoad called
-Unwind other process: PASS
+Java_Main_sigstop
+PASS
+args: --test-remote
 JNI_OnLoad called
+Java_Main_startSecondaryProcess
+Java_Main_unwindOtherProcess
+args: --test-remote --secondary
 JNI_OnLoad called
-Unwind other process: PASS
+Java_Main_sigstop
+PASS
diff --git a/test/137-cfi/src/Main.java b/test/137-cfi/src/Main.java
index ed5f332..4633303 100644
--- a/test/137-cfi/src/Main.java
+++ b/test/137-cfi/src/Main.java
@@ -31,6 +31,8 @@
   private static boolean secondary;
 
   public static void main(String[] args) throws Exception {
+      System.out.println("args: " + String.join(" ", Arrays.copyOfRange(args, 1, args.length)));
+
       System.loadLibrary(args[0]);
       for (int i = 1; i < args.length; i++) {
           if (args[i].equals("--test-local")) {
@@ -69,16 +71,14 @@
       }
 
       if (testLocal) {
-          String result = unwindInProcess() ? "PASS" : "FAIL";
-          System.out.println("Unwind in process: " + result);
+          System.out.println(unwindInProcess() ? "PASS" : "FAIL");
       }
 
       if (testRemote) {
           // Start a secondary helper process. It will stop itself when it is ready.
           int pid = startSecondaryProcess();
           // Wait for the secondary process to stop and then unwind it remotely.
-          String result = unwindOtherProcess(pid) ? "PASS" : "FAIL";
-          System.out.println("Unwind other process: " + result);
+          System.out.println(unwindOtherProcess(pid) ? "PASS" : "FAIL");
       }
   }
 
diff --git a/test/146-bad-interface/check b/test/146-bad-interface/check
new file mode 100644
index 0000000..9d8f464
--- /dev/null
+++ b/test/146-bad-interface/check
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Oat file manager will complain about duplicate dex files. Ignore.
+sed -e '/.*oat_file_manager.*/d' "$2" > "$2.tmp"
+
+diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
diff --git a/test/151-OpenFileLimit/run b/test/151-OpenFileLimit/run
index 5c83fd0..6faeb0d 100755
--- a/test/151-OpenFileLimit/run
+++ b/test/151-OpenFileLimit/run
@@ -14,8 +14,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+# Filter out expected error messages, which happen on device.
+export ANDROID_LOG_TAGS='*:f'
+
 flags="$@"
 
 # Reduce the file descriptor limit so the test will reach the limit sooner.
 ulimit -n 512
-${RUN} ${flags}
+${RUN} --external-log-tags ${flags}
diff --git a/test/159-app-image-fields/run b/test/159-app-image-fields/run
index 7cc107a..74facb0 100644
--- a/test/159-app-image-fields/run
+++ b/test/159-app-image-fields/run
@@ -16,5 +16,6 @@
 
 # Use a profile to put specific classes in the app image.
 # Also run the compiler with -j1 to ensure specific class verification order.
+# And limit the managed heap to 16 MiB to speed up GCs.
 exec ${RUN} $@ --profile -Xcompiler-option --compiler-filter=speed-profile \
-    -Xcompiler-option -j1
+    -Xcompiler-option -j1 --runtime-option -Xmx16m
diff --git a/test/163-app-image-methods/run b/test/163-app-image-methods/run
index 7cc107a..74facb0 100644
--- a/test/163-app-image-methods/run
+++ b/test/163-app-image-methods/run
@@ -16,5 +16,6 @@
 
 # Use a profile to put specific classes in the app image.
 # Also run the compiler with -j1 to ensure specific class verification order.
+# And limit the managed heap to 16 MiB to speed up GCs.
 exec ${RUN} $@ --profile -Xcompiler-option --compiler-filter=speed-profile \
-    -Xcompiler-option -j1
+    -Xcompiler-option -j1 --runtime-option -Xmx16m
diff --git a/test/176-app-image-string/expected.txt b/test/176-app-image-string/expected.txt
new file mode 100644
index 0000000..2ae2839
--- /dev/null
+++ b/test/176-app-image-string/expected.txt
@@ -0,0 +1 @@
+pass
diff --git a/test/176-app-image-string/info.txt b/test/176-app-image-string/info.txt
new file mode 100644
index 0000000..ca0951c
--- /dev/null
+++ b/test/176-app-image-string/info.txt
@@ -0,0 +1 @@
+Regression test for strings being wrongly interned in images.
diff --git a/test/176-app-image-string/profile b/test/176-app-image-string/profile
new file mode 100644
index 0000000..6d9e6c8
--- /dev/null
+++ b/test/176-app-image-string/profile
@@ -0,0 +1 @@
+LMain;
diff --git a/test/176-app-image-string/run b/test/176-app-image-string/run
new file mode 100644
index 0000000..52d2b5f
--- /dev/null
+++ b/test/176-app-image-string/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+exec ${RUN} $@ --profile
diff --git a/test/176-app-image-string/src/Main.java b/test/176-app-image-string/src/Main.java
new file mode 100644
index 0000000..f842c10
--- /dev/null
+++ b/test/176-app-image-string/src/Main.java
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+    public static final String internedString = "test-string";
+    public static final String nonInternedCopy = new String("test-string");
+
+    public static void main(String args[]) throws Exception {
+        if (internedString == nonInternedCopy) {
+            throw new AssertionError();
+        }
+        System.out.println("pass");
+    }
+}
diff --git a/test/177-visibly-initialized-deadlock/expected.txt b/test/177-visibly-initialized-deadlock/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/177-visibly-initialized-deadlock/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/177-visibly-initialized-deadlock/info.txt b/test/177-visibly-initialized-deadlock/info.txt
new file mode 100644
index 0000000..9ede3d5
--- /dev/null
+++ b/test/177-visibly-initialized-deadlock/info.txt
@@ -0,0 +1,2 @@
+Regression test for deadlock when trying to make class visibly initialized.
+b/138561860
diff --git a/test/177-visibly-initialized-deadlock/src/Main.java b/test/177-visibly-initialized-deadlock/src/Main.java
new file mode 100644
index 0000000..1755f54
--- /dev/null
+++ b/test/177-visibly-initialized-deadlock/src/Main.java
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Main {
+  private static final boolean DEBUG = false;
+
+  public static void main(String[] args) throws Exception {
+    System.loadLibrary(args[0]);
+    makeVisiblyInitialized();
+    Class<?> testClass = Class.forName("TestClass");  // Request initialized class.
+    boolean is_visibly_initialized = isVisiblyInitialized(testClass);
+    if (DEBUG) {
+      System.out.println((is_visibly_initialized ? "Already" : "Not yet") + " visibly intialized");
+    }
+    if (!is_visibly_initialized) {
+      synchronized(testClass) {
+        Thread t = new Thread() {
+          public void run() {
+            // Regression test: This would have previously deadlocked
+            // trying to lock on testClass. b/138561860
+            makeVisiblyInitialized();
+          }
+        };
+        t.start();
+        t.join();
+      }
+      if (!isVisiblyInitialized(testClass)) {
+        throw new Error("Should be visibly initialized now.");
+      }
+    }
+  }
+
+  public static native void makeVisiblyInitialized();
+  public static native boolean isVisiblyInitialized(Class<?> klass);
+}
+
+class TestClass {
+  static {
+    // Add a static constructor that prevents initialization at compile time (app images).
+    Main.isVisiblyInitialized(TestClass.class);  // Native call, discard result.
+  }
+}
diff --git a/test/177-visibly-initialized-deadlock/visibly_initialized.cc b/test/177-visibly-initialized-deadlock/visibly_initialized.cc
new file mode 100644
index 0000000..44bb318
--- /dev/null
+++ b/test/177-visibly-initialized-deadlock/visibly_initialized.cc
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "jni.h"
+
+#include "class_linker.h"
+#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-current-inl.h"
+
+namespace art {
+
+extern "C" JNIEXPORT void JNICALL Java_Main_makeVisiblyInitialized(JNIEnv*, jclass) {
+  Runtime::Current()->GetClassLinker()->MakeInitializedClassesVisiblyInitialized(
+      Thread::Current(), /*wait=*/ true);
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isVisiblyInitialized(JNIEnv*, jclass, jclass c) {
+  ScopedObjectAccess soa(Thread::Current());
+  ObjPtr<mirror::Class> klass = soa.Decode<mirror::Class>(c);
+  return klass->IsVisiblyInitialized() ? JNI_TRUE : JNI_FALSE;
+}
+
+}  // namespace art
diff --git a/test/178-app-image-native-method/check b/test/178-app-image-native-method/check
new file mode 100755
index 0000000..5336295
--- /dev/null
+++ b/test/178-app-image-native-method/check
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Filter out error messages for missing native methods.
+grep -v 'No implementation found for ' "$2" | diff -q "$1" - >/dev/null
diff --git a/test/178-app-image-native-method/expected.txt b/test/178-app-image-native-method/expected.txt
new file mode 100644
index 0000000..30cc336
--- /dev/null
+++ b/test/178-app-image-native-method/expected.txt
@@ -0,0 +1,14 @@
+JNI_OnLoad called
+test
+testFast
+testCritical
+testMissing
+testMissingFast
+testMissingCritical
+JNI_OnLoad called
+test
+testFast
+testCritical
+testMissing
+testMissingFast
+testMissingCritical
diff --git a/test/178-app-image-native-method/info.txt b/test/178-app-image-native-method/info.txt
new file mode 100644
index 0000000..4cf01fe
--- /dev/null
+++ b/test/178-app-image-native-method/info.txt
@@ -0,0 +1 @@
+Tests that native methods in app image using compiled stubs or Generic JNI work correctly.
diff --git a/test/178-app-image-native-method/native_methods.cc b/test/178-app-image-native-method/native_methods.cc
new file mode 100644
index 0000000..794a78a
--- /dev/null
+++ b/test/178-app-image-native-method/native_methods.cc
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni.h"
+
+namespace art {
+
+static inline bool VerifyManyParameters(
+    jint i1, jlong l1, jfloat f1, jdouble d1,
+    jint i2, jlong l2, jfloat f2, jdouble d2,
+    jint i3, jlong l3, jfloat f3, jdouble d3,
+    jint i4, jlong l4, jfloat f4, jdouble d4,
+    jint i5, jlong l5, jfloat f5, jdouble d5,
+    jint i6, jlong l6, jfloat f6, jdouble d6,
+    jint i7, jlong l7, jfloat f7, jdouble d7,
+    jint i8, jlong l8, jfloat f8, jdouble d8) {
+  return
+      (i1 == 11) && (l1 == 12) && (f1 == 13.0) && (d1 == 14.0) &&
+      (i2 == 21) && (l2 == 22) && (f2 == 23.0) && (d2 == 24.0) &&
+      (i3 == 31) && (l3 == 32) && (f3 == 33.0) && (d3 == 34.0) &&
+      (i4 == 41) && (l4 == 42) && (f4 == 43.0) && (d4 == 44.0) &&
+      (i5 == 51) && (l5 == 52) && (f5 == 53.0) && (d5 == 54.0) &&
+      (i6 == 61) && (l6 == 62) && (f6 == 63.0) && (d6 == 64.0) &&
+      (i7 == 71) && (l7 == 72) && (f7 == 73.0) && (d7 == 74.0) &&
+      (i8 == 81) && (l8 == 82) && (f8 == 83.0) && (d8 == 84.0);
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_Test_nativeMethodVoid(JNIEnv*, jclass) {
+  return 42;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_Test_nativeMethod(JNIEnv*, jclass, jint i) {
+  return i;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_Test_nativeMethodWithManyParameters(
+    JNIEnv*, jclass,
+    jint i1, jlong l1, jfloat f1, jdouble d1,
+    jint i2, jlong l2, jfloat f2, jdouble d2,
+    jint i3, jlong l3, jfloat f3, jdouble d3,
+    jint i4, jlong l4, jfloat f4, jdouble d4,
+    jint i5, jlong l5, jfloat f5, jdouble d5,
+    jint i6, jlong l6, jfloat f6, jdouble d6,
+    jint i7, jlong l7, jfloat f7, jdouble d7,
+    jint i8, jlong l8, jfloat f8, jdouble d8) {
+  bool ok = VerifyManyParameters(
+      i1, l1, f1, d1,
+      i2, l2, f2, d2,
+      i3, l3, f3, d3,
+      i4, l4, f4, d4,
+      i5, l5, f5, d5,
+      i6, l6, f6, d6,
+      i7, l7, f7, d7,
+      i8, l8, f8, d8);
+  return ok ? 42 : -1;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_TestFast_nativeMethodVoid(JNIEnv*, jclass) {
+  return 42;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_TestFast_nativeMethod(JNIEnv*, jclass, jint i) {
+  return i;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_TestFast_nativeMethodWithManyParameters(
+    JNIEnv*, jclass,
+    jint i1, jlong l1, jfloat f1, jdouble d1,
+    jint i2, jlong l2, jfloat f2, jdouble d2,
+    jint i3, jlong l3, jfloat f3, jdouble d3,
+    jint i4, jlong l4, jfloat f4, jdouble d4,
+    jint i5, jlong l5, jfloat f5, jdouble d5,
+    jint i6, jlong l6, jfloat f6, jdouble d6,
+    jint i7, jlong l7, jfloat f7, jdouble d7,
+    jint i8, jlong l8, jfloat f8, jdouble d8) {
+  bool ok = VerifyManyParameters(
+      i1, l1, f1, d1,
+      i2, l2, f2, d2,
+      i3, l3, f3, d3,
+      i4, l4, f4, d4,
+      i5, l5, f5, d5,
+      i6, l6, f6, d6,
+      i7, l7, f7, d7,
+      i8, l8, f8, d8);
+  return ok ? 42 : -1;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_TestCritical_nativeMethodVoid() {
+  return 42;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_TestCritical_nativeMethod(jint i) {
+  return i;
+}
+
+extern "C" JNIEXPORT jint JNICALL Java_TestCritical_nativeMethodWithManyParameters(
+    jint i1, jlong l1, jfloat f1, jdouble d1,
+    jint i2, jlong l2, jfloat f2, jdouble d2,
+    jint i3, jlong l3, jfloat f3, jdouble d3,
+    jint i4, jlong l4, jfloat f4, jdouble d4,
+    jint i5, jlong l5, jfloat f5, jdouble d5,
+    jint i6, jlong l6, jfloat f6, jdouble d6,
+    jint i7, jlong l7, jfloat f7, jdouble d7,
+    jint i8, jlong l8, jfloat f8, jdouble d8) {
+  bool ok = VerifyManyParameters(
+      i1, l1, f1, d1,
+      i2, l2, f2, d2,
+      i3, l3, f3, d3,
+      i4, l4, f4, d4,
+      i5, l5, f5, d5,
+      i6, l6, f6, d6,
+      i7, l7, f7, d7,
+      i8, l8, f8, d8);
+  return ok ? 42 : -1;
+}
+
+}  // namespace art
diff --git a/test/178-app-image-native-method/profile b/test/178-app-image-native-method/profile
new file mode 100644
index 0000000..597dde1
--- /dev/null
+++ b/test/178-app-image-native-method/profile
@@ -0,0 +1,8 @@
+LMain;
+LTest;
+HSPLMain;->test()V
+HSPLMain;->testFast()V
+HSPLMain;->testCritical()V
+HSPLMain;->testMissing()V
+HSPLMain;->testMissingFast()V
+HSPLMain;->testMissingCritical()V
diff --git a/test/178-app-image-native-method/run b/test/178-app-image-native-method/run
new file mode 100644
index 0000000..f4b07f0
--- /dev/null
+++ b/test/178-app-image-native-method/run
@@ -0,0 +1,25 @@
+#!/bin/bash
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Use a profile to put specific classes in the app image.
+${RUN} $@ --profile -Xcompiler-option --compiler-filter=speed-profile
+return_status1=$?
+
+# Also run with the verify filter to avoid compiling JNI stubs.
+${RUN} ${@} --profile -Xcompiler-option --compiler-filter=verify
+return_status2=$?
+
+(exit ${return_status1}) && (exit ${return_status2})
diff --git a/test/178-app-image-native-method/src/Main.java b/test/178-app-image-native-method/src/Main.java
new file mode 100644
index 0000000..07990cb
--- /dev/null
+++ b/test/178-app-image-native-method/src/Main.java
@@ -0,0 +1,283 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import dalvik.annotation.optimization.FastNative;
+import dalvik.annotation.optimization.CriticalNative;
+
+public class Main {
+
+  public static void main(String[] args) throws Exception {
+    System.loadLibrary(args[0]);
+
+    // To avoid going through resolution trampoline, make test classes visibly initialized.
+    new Test();
+    new TestFast();
+    new TestCritical();
+    new TestMissing();
+    new TestMissingFast();
+    new TestMissingCritical();
+    makeVisiblyInitialized();  // Make sure they are visibly initialized.
+
+    test();
+    testFast();
+    testCritical();
+    testMissing();
+    testMissingFast();
+    testMissingCritical();
+  }
+
+  static void test() {
+    System.out.println("test");
+    assertEquals(42, Test.nativeMethodVoid());
+    assertEquals(42, Test.nativeMethod(42));
+    assertEquals(42, Test.nativeMethodWithManyParameters(
+        11, 12L, 13.0f, 14.0d,
+        21, 22L, 23.0f, 24.0d,
+        31, 32L, 33.0f, 34.0d,
+        41, 42L, 43.0f, 44.0d,
+        51, 52L, 53.0f, 54.0d,
+        61, 62L, 63.0f, 64.0d,
+        71, 72L, 73.0f, 74.0d,
+        81, 82L, 83.0f, 84.0d));
+  }
+
+  static void testFast() {
+    System.out.println("testFast");
+    assertEquals(42, TestFast.nativeMethodVoid());
+    assertEquals(42, TestFast.nativeMethod(42));
+    assertEquals(42, TestFast.nativeMethodWithManyParameters(
+        11, 12L, 13.0f, 14.0d,
+        21, 22L, 23.0f, 24.0d,
+        31, 32L, 33.0f, 34.0d,
+        41, 42L, 43.0f, 44.0d,
+        51, 52L, 53.0f, 54.0d,
+        61, 62L, 63.0f, 64.0d,
+        71, 72L, 73.0f, 74.0d,
+        81, 82L, 83.0f, 84.0d));
+  }
+
+  static void testCritical() {
+    System.out.println("testCritical");
+    assertEquals(42, TestCritical.nativeMethodVoid());
+    assertEquals(42, TestCritical.nativeMethod(42));
+    assertEquals(42, TestCritical.nativeMethodWithManyParameters(
+        11, 12L, 13.0f, 14.0d,
+        21, 22L, 23.0f, 24.0d,
+        31, 32L, 33.0f, 34.0d,
+        41, 42L, 43.0f, 44.0d,
+        51, 52L, 53.0f, 54.0d,
+        61, 62L, 63.0f, 64.0d,
+        71, 72L, 73.0f, 74.0d,
+        81, 82L, 83.0f, 84.0d));
+  }
+
+  static void testMissing() {
+    System.out.println("testMissing");
+
+    try {
+      TestMissing.nativeMethodVoid();
+      throw new Error("UNREACHABLE");
+    } catch (LinkageError expected) {}
+
+    try {
+      TestMissing.nativeMethod(42);
+      throw new Error("UNREACHABLE");
+    } catch (LinkageError expected) {}
+
+    try {
+      TestMissing.nativeMethodWithManyParameters(
+          11, 12L, 13.0f, 14.0d,
+          21, 22L, 23.0f, 24.0d,
+          31, 32L, 33.0f, 34.0d,
+          41, 42L, 43.0f, 44.0d,
+          51, 52L, 53.0f, 54.0d,
+          61, 62L, 63.0f, 64.0d,
+          71, 72L, 73.0f, 74.0d,
+          81, 82L, 83.0f, 84.0d);
+      throw new Error("UNREACHABLE");
+    } catch (LinkageError expected) {}
+  }
+
+  static void testMissingFast() {
+    System.out.println("testMissingFast");
+
+    try {
+      TestMissingFast.nativeMethodVoid();
+      throw new Error("UNREACHABLE");
+    } catch (LinkageError expected) {}
+
+    try {
+      TestMissingFast.nativeMethod(42);
+      throw new Error("UNREACHABLE");
+    } catch (LinkageError expected) {}
+
+    try {
+      TestMissingFast.nativeMethodWithManyParameters(
+          11, 12L, 13.0f, 14.0d,
+          21, 22L, 23.0f, 24.0d,
+          31, 32L, 33.0f, 34.0d,
+          41, 42L, 43.0f, 44.0d,
+          51, 52L, 53.0f, 54.0d,
+          61, 62L, 63.0f, 64.0d,
+          71, 72L, 73.0f, 74.0d,
+          81, 82L, 83.0f, 84.0d);
+      throw new Error("UNREACHABLE");
+    } catch (LinkageError expected) {}
+  }
+
+  static void testMissingCritical() {
+    System.out.println("testMissingCritical");
+
+    try {
+      TestMissingCritical.nativeMethodVoid();
+      throw new Error("UNREACHABLE");
+    } catch (LinkageError expected) {}
+
+    try {
+      TestMissingCritical.nativeMethod(42);
+      throw new Error("UNREACHABLE");
+    } catch (LinkageError expected) {}
+
+    try {
+      TestMissingCritical.nativeMethodWithManyParameters(
+          11, 12L, 13.0f, 14.0d,
+          21, 22L, 23.0f, 24.0d,
+          31, 32L, 33.0f, 34.0d,
+          41, 42L, 43.0f, 44.0d,
+          51, 52L, 53.0f, 54.0d,
+          61, 62L, 63.0f, 64.0d,
+          71, 72L, 73.0f, 74.0d,
+          81, 82L, 83.0f, 84.0d);
+      throw new Error("UNREACHABLE");
+    } catch (LinkageError expected) {}
+  }
+
+  static void assertEquals(int expected, int actual) {
+    if (expected != actual) {
+      throw new AssertionError("Expected " + expected + " got " + actual);
+    }
+  }
+
+  public static native void makeVisiblyInitialized();
+}
+
+class Test {
+  public static native int nativeMethodVoid();
+
+  public static native int nativeMethod(int i);
+
+  public static native int nativeMethodWithManyParameters(
+      int i1, long l1, float f1, double d1,
+      int i2, long l2, float f2, double d2,
+      int i3, long l3, float f3, double d3,
+      int i4, long l4, float f4, double d4,
+      int i5, long l5, float f5, double d5,
+      int i6, long l6, float f6, double d6,
+      int i7, long l7, float f7, double d7,
+      int i8, long l8, float f8, double d8);
+}
+
+class TestFast {
+  @FastNative
+  public static native int nativeMethodVoid();
+
+  @FastNative
+  public static native int nativeMethod(int i);
+
+  @FastNative
+  public static native int nativeMethodWithManyParameters(
+      int i1, long l1, float f1, double d1,
+      int i2, long l2, float f2, double d2,
+      int i3, long l3, float f3, double d3,
+      int i4, long l4, float f4, double d4,
+      int i5, long l5, float f5, double d5,
+      int i6, long l6, float f6, double d6,
+      int i7, long l7, float f7, double d7,
+      int i8, long l8, float f8, double d8);
+}
+
+class TestCritical {
+  @CriticalNative
+  public static native int nativeMethodVoid();
+
+  @CriticalNative
+  public static native int nativeMethod(int i);
+
+  @CriticalNative
+  public static native int nativeMethodWithManyParameters(
+      int i1, long l1, float f1, double d1,
+      int i2, long l2, float f2, double d2,
+      int i3, long l3, float f3, double d3,
+      int i4, long l4, float f4, double d4,
+      int i5, long l5, float f5, double d5,
+      int i6, long l6, float f6, double d6,
+      int i7, long l7, float f7, double d7,
+      int i8, long l8, float f8, double d8);
+}
+
+class TestMissing {
+  public static native int nativeMethodVoid();
+
+  public static native int nativeMethod(int i);
+
+  public static native int nativeMethodWithManyParameters(
+      int i1, long l1, float f1, double d1,
+      int i2, long l2, float f2, double d2,
+      int i3, long l3, float f3, double d3,
+      int i4, long l4, float f4, double d4,
+      int i5, long l5, float f5, double d5,
+      int i6, long l6, float f6, double d6,
+      int i7, long l7, float f7, double d7,
+      int i8, long l8, float f8, double d8);
+}
+
+class TestMissingFast {
+  @FastNative
+  public static native int nativeMethodVoid();
+
+  @FastNative
+  public static native int nativeMethod(int i);
+
+  @FastNative
+  public static native int nativeMethodWithManyParameters(
+      int i1, long l1, float f1, double d1,
+      int i2, long l2, float f2, double d2,
+      int i3, long l3, float f3, double d3,
+      int i4, long l4, float f4, double d4,
+      int i5, long l5, float f5, double d5,
+      int i6, long l6, float f6, double d6,
+      int i7, long l7, float f7, double d7,
+      int i8, long l8, float f8, double d8);
+}
+
+class TestMissingCritical {
+  @CriticalNative
+  public static native int nativeMethodVoid();
+
+  @CriticalNative
+  public static native int nativeMethod(int i);
+
+  @CriticalNative
+  public static native int nativeMethodWithManyParameters(
+      int i1, long l1, float f1, double d1,
+      int i2, long l2, float f2, double d2,
+      int i3, long l3, float f3, double d3,
+      int i4, long l4, float f4, double d4,
+      int i5, long l5, float f5, double d5,
+      int i6, long l6, float f6, double d6,
+      int i7, long l7, float f7, double d7,
+      int i8, long l8, float f8, double d8);
+}
diff --git a/test/180-native-default-method/build b/test/180-native-default-method/build
new file mode 100644
index 0000000..3963fd3
--- /dev/null
+++ b/test/180-native-default-method/build
@@ -0,0 +1,30 @@
+#!/bin/bash
+#
+# Copyright 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+./default-build "$@"
+
+if [[ $@ != *"--jvm"* ]]; then
+  # Change the generated dex file to have a v35 magic number if it is version 39
+  if test -f classes.dex && head -c 7 classes.dex | grep -q 039; then
+    # place ascii value '035' into the classes.dex file starting at byte 4.
+    printf '035' | dd status=none conv=notrunc of=classes.dex bs=1 seek=4 count=3
+    rm -f $TEST_NAME.jar
+    zip $TEST_NAME.jar classes.dex
+  fi
+fi
diff --git a/test/640-checker-byte-simd/expected.txt b/test/180-native-default-method/expected.txt
similarity index 100%
copy from test/640-checker-byte-simd/expected.txt
copy to test/180-native-default-method/expected.txt
diff --git a/test/180-native-default-method/info.txt b/test/180-native-default-method/info.txt
new file mode 100644
index 0000000..0cba4eb
--- /dev/null
+++ b/test/180-native-default-method/info.txt
@@ -0,0 +1,3 @@
+Regression test for DCHECK() failure for copying a default native method from
+an interface to a class implementing that interface. The default native method
+should result in ClassFormatError before we reach that DCHECK().
diff --git a/test/180-native-default-method/jasmin/TestClass.j b/test/180-native-default-method/jasmin/TestClass.j
new file mode 100644
index 0000000..fddd99b
--- /dev/null
+++ b/test/180-native-default-method/jasmin/TestClass.j
@@ -0,0 +1,25 @@
+; Copyright (C) 2020 The Android Open Source Project
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+
+.class                   public TestClass
+.super                   java/lang/Object
+.implements              TestInterface
+
+.method                  public <init>()V
+   .limit stack          1
+   .limit locals         1
+   aload_0
+   invokespecial         java/lang/Object/<init>()V
+   return
+.end method
diff --git a/test/180-native-default-method/jasmin/TestInterface.j b/test/180-native-default-method/jasmin/TestInterface.j
new file mode 100644
index 0000000..080474e
--- /dev/null
+++ b/test/180-native-default-method/jasmin/TestInterface.j
@@ -0,0 +1,19 @@
+; Copyright (C) 2020 The Android Open Source Project
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+
+.interface               public TestInterface
+.super                   java/lang/Object
+
+.method                  public native foo()V
+.end method
diff --git a/test/180-native-default-method/src/Main.java b/test/180-native-default-method/src/Main.java
new file mode 100644
index 0000000..4b2704b
--- /dev/null
+++ b/test/180-native-default-method/src/Main.java
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String args[]) {
+    try {
+      // Regression test for default native methods that should cause ClassFormatException
+      // if they pass the dex file verification, i.e. for old dex file versions.
+      // We previously did not handle this case properly and failed a DCHECK() for
+      // a non-interface class creating a copied method that was native. b/157170505
+      Class.forName("TestClass");
+      throw new Error("UNREACHABLE");
+    } catch (ClassFormatError expected) {
+      System.out.println("passed");
+    } catch (Throwable unexpected) {
+      unexpected.printStackTrace();
+    }
+  }
+}
diff --git a/test/1900-track-alloc/src/art/Main.java b/test/1900-track-alloc/src/art/Main.java
deleted file mode 100644
index aa5498b..0000000
--- a/test/1900-track-alloc/src/art/Main.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-// Binder class so the agent's C code has something that can be bound and exposed to tests.
-// In a package to separate cleanly and work around CTS reference issues (though this class
-// should be replaced in the CTS version).
-public class Main {
-  // Load the given class with the given classloader, and bind all native methods to corresponding
-  // C methods in the agent. Will abort if any of the steps fail.
-  public static native void bindAgentJNI(String className, ClassLoader classLoader);
-  // Same as above, giving the class directly.
-  public static native void bindAgentJNIForClass(Class<?> klass);
-
-  // Common infrastructure.
-  public static native void setTag(Object o, long tag);
-  public static native long getTag(Object o);
-}
diff --git a/test/1900-track-alloc/src/art/Main.java b/test/1900-track-alloc/src/art/Main.java
new file mode 120000
index 0000000..84ae4ac
--- /dev/null
+++ b/test/1900-track-alloc/src/art/Main.java
@@ -0,0 +1 @@
+../../../jvmti-common/Main.java
\ No newline at end of file
diff --git a/test/1902-suspend/src/art/Suspension.java b/test/1902-suspend/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1902-suspend/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1903-suspend-self/src/art/Suspension.java b/test/1903-suspend-self/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1903-suspend-self/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1903-suspend-self/src/art/Suspension.java b/test/1903-suspend-self/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1903-suspend-self/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1904-double-suspend/src/art/Suspension.java b/test/1904-double-suspend/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1904-double-suspend/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1904-double-suspend/src/art/Suspension.java b/test/1904-double-suspend/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1904-double-suspend/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1905-suspend-native/src/art/Suspension.java b/test/1905-suspend-native/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1905-suspend-native/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1905-suspend-native/src/art/Suspension.java b/test/1905-suspend-native/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1905-suspend-native/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1906-suspend-list-me-first/src/art/Suspension.java b/test/1906-suspend-list-me-first/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1906-suspend-list-me-first/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1906-suspend-list-me-first/src/art/Suspension.java b/test/1906-suspend-list-me-first/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1906-suspend-list-me-first/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1907-suspend-list-self-twice/src/art/Suspension.java b/test/1907-suspend-list-self-twice/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1907-suspend-list-self-twice/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1907-suspend-list-self-twice/src/art/Suspension.java b/test/1907-suspend-list-self-twice/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1907-suspend-list-self-twice/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1908-suspend-native-resume-self/src/art/Suspension.java b/test/1908-suspend-native-resume-self/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1908-suspend-native-resume-self/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1908-suspend-native-resume-self/src/art/Suspension.java b/test/1908-suspend-native-resume-self/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1908-suspend-native-resume-self/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1909-per-agent-tls/src/art/Main.java b/test/1909-per-agent-tls/src/art/Main.java
deleted file mode 100644
index aa5498b..0000000
--- a/test/1909-per-agent-tls/src/art/Main.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-// Binder class so the agent's C code has something that can be bound and exposed to tests.
-// In a package to separate cleanly and work around CTS reference issues (though this class
-// should be replaced in the CTS version).
-public class Main {
-  // Load the given class with the given classloader, and bind all native methods to corresponding
-  // C methods in the agent. Will abort if any of the steps fail.
-  public static native void bindAgentJNI(String className, ClassLoader classLoader);
-  // Same as above, giving the class directly.
-  public static native void bindAgentJNIForClass(Class<?> klass);
-
-  // Common infrastructure.
-  public static native void setTag(Object o, long tag);
-  public static native long getTag(Object o);
-}
diff --git a/test/1909-per-agent-tls/src/art/Main.java b/test/1909-per-agent-tls/src/art/Main.java
new file mode 120000
index 0000000..84ae4ac
--- /dev/null
+++ b/test/1909-per-agent-tls/src/art/Main.java
@@ -0,0 +1 @@
+../../../jvmti-common/Main.java
\ No newline at end of file
diff --git a/test/1910-transform-with-default/src/art/Redefinition.java b/test/1910-transform-with-default/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/1910-transform-with-default/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/1910-transform-with-default/src/art/Redefinition.java b/test/1910-transform-with-default/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1910-transform-with-default/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1911-get-local-var-table/src/art/Breakpoint.java b/test/1911-get-local-var-table/src/art/Breakpoint.java
deleted file mode 100644
index bbb89f7..0000000
--- a/test/1911-get-local-var-table/src/art/Breakpoint.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Objects;
-
-public class Breakpoint {
-  public static class Manager {
-    public static class BP {
-      public final Executable method;
-      public final long location;
-
-      public BP(Executable method) {
-        this(method, getStartLocation(method));
-      }
-
-      public BP(Executable method, long location) {
-        this.method = method;
-        this.location = location;
-      }
-
-      @Override
-      public boolean equals(Object other) {
-        return (other instanceof BP) &&
-            method.equals(((BP)other).method) &&
-            location == ((BP)other).location;
-      }
-
-      @Override
-      public String toString() {
-        return method.toString() + " @ " + getLine();
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(method, location);
-      }
-
-      public int getLine() {
-        try {
-          LineNumber[] lines = getLineNumberTable(method);
-          int best = -1;
-          for (LineNumber l : lines) {
-            if (l.location > location) {
-              break;
-            } else {
-              best = l.line;
-            }
-          }
-          return best;
-        } catch (Exception e) {
-          return -1;
-        }
-      }
-    }
-
-    private Set<BP> breaks = new HashSet<>();
-
-    public void setBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.add(b)) {
-          Breakpoint.setBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void setBreakpoint(Executable method, long location) {
-      setBreakpoints(new BP(method, location));
-    }
-
-    public void clearBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.remove(b)) {
-          Breakpoint.clearBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void clearBreakpoint(Executable method, long location) {
-      clearBreakpoints(new BP(method, location));
-    }
-
-    public void clearAllBreakpoints() {
-      clearBreakpoints(breaks.toArray(new BP[0]));
-    }
-  }
-
-  public static void startBreakpointWatch(Class<?> methodClass,
-                                          Executable breakpointReached,
-                                          Thread thr) {
-    startBreakpointWatch(methodClass, breakpointReached, false, thr);
-  }
-
-  /**
-   * Enables the trapping of breakpoint events.
-   *
-   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
-   */
-  public static native void startBreakpointWatch(Class<?> methodClass,
-                                                 Executable breakpointReached,
-                                                 boolean allowRecursive,
-                                                 Thread thr);
-  public static native void stopBreakpointWatch(Thread thr);
-
-  public static final class LineNumber implements Comparable<LineNumber> {
-    public final long location;
-    public final int line;
-
-    private LineNumber(long loc, int line) {
-      this.location = loc;
-      this.line = line;
-    }
-
-    public boolean equals(Object other) {
-      return other instanceof LineNumber && ((LineNumber)other).line == line &&
-          ((LineNumber)other).location == location;
-    }
-
-    public int compareTo(LineNumber other) {
-      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
-      if (v != 0) {
-        return v;
-      } else {
-        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
-      }
-    }
-  }
-
-  public static native void setBreakpoint(Executable m, long loc);
-  public static void setBreakpoint(Executable m, LineNumber l) {
-    setBreakpoint(m, l.location);
-  }
-
-  public static native void clearBreakpoint(Executable m, long loc);
-  public static void clearBreakpoint(Executable m, LineNumber l) {
-    clearBreakpoint(m, l.location);
-  }
-
-  private static native Object[] getLineNumberTableNative(Executable m);
-  public static LineNumber[] getLineNumberTable(Executable m) {
-    Object[] nativeTable = getLineNumberTableNative(m);
-    long[] location = (long[])(nativeTable[0]);
-    int[] lines = (int[])(nativeTable[1]);
-    if (lines.length != location.length) {
-      throw new Error("Lines and locations have different lengths!");
-    }
-    LineNumber[] out = new LineNumber[lines.length];
-    for (int i = 0; i < lines.length; i++) {
-      out[i] = new LineNumber(location[i], lines[i]);
-    }
-    return out;
-  }
-
-  public static native long getStartLocation(Executable m);
-
-  public static int locationToLine(Executable m, long location) {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      int best = -1;
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.location > location) {
-          break;
-        } else {
-          best = l.line;
-        }
-      }
-      return best;
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  public static long lineToLocation(Executable m, int line) throws Exception {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.line == line) {
-          return l.location;
-        }
-      }
-      throw new Exception("Unable to find line " + line + " in " + m);
-    } catch (Exception e) {
-      throw new Exception("Unable to get line number info for " + m, e);
-    }
-  }
-}
-
diff --git a/test/1911-get-local-var-table/src/art/Breakpoint.java b/test/1911-get-local-var-table/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1911-get-local-var-table/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1911-get-local-var-table/src/art/Locals.java b/test/1911-get-local-var-table/src/art/Locals.java
deleted file mode 100644
index 22e21be..0000000
--- a/test/1911-get-local-var-table/src/art/Locals.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.Objects;
-
-public class Locals {
-  public static native void EnableLocalVariableAccess();
-
-  public static class VariableDescription {
-    public final long start_location;
-    public final int length;
-    public final String name;
-    public final String signature;
-    public final String generic_signature;
-    public final int slot;
-
-    public VariableDescription(
-        long start, int length, String name, String sig, String gen_sig, int slot) {
-      this.start_location = start;
-      this.length = length;
-      this.name = name;
-      this.signature = sig;
-      this.generic_signature = gen_sig;
-      this.slot = slot;
-    }
-
-    @Override
-    public String toString() {
-      return String.format(
-          "VariableDescription { " +
-            "Sig: '%s', Name: '%s', Gen_sig: '%s', slot: %d, start: %d, len: %d" +
-          "}",
-          this.signature,
-          this.name,
-          this.generic_signature,
-          this.slot,
-          this.start_location,
-          this.length);
-    }
-    public boolean equals(Object other) {
-      if (!(other instanceof VariableDescription)) {
-        return false;
-      } else {
-        VariableDescription v = (VariableDescription)other;
-        return Objects.equals(v.signature, signature) &&
-            Objects.equals(v.name, name) &&
-            Objects.equals(v.generic_signature, generic_signature) &&
-            v.slot == slot &&
-            v.start_location == start_location &&
-            v.length == length;
-      }
-    }
-    public int hashCode() {
-      return Objects.hash(this.signature, this.name, this.generic_signature, this.slot,
-          this.start_location, this.length);
-    }
-  }
-
-  public static native VariableDescription[] GetLocalVariableTable(Executable e);
-
-  public static VariableDescription GetVariableAtLine(
-      Executable e, String name, String sig, int line) throws Exception {
-    return GetVariableAtLocation(e, name, sig, Breakpoint.lineToLocation(e, line));
-  }
-
-  public static VariableDescription GetVariableAtLocation(
-      Executable e, String name, String sig, long loc) {
-    VariableDescription[] vars = GetLocalVariableTable(e);
-    for (VariableDescription var : vars) {
-      if (var.start_location <= loc &&
-          var.length + var.start_location > loc &&
-          var.name.equals(name) &&
-          var.signature.equals(sig)) {
-        return var;
-      }
-    }
-    throw new Error(
-        "Unable to find variable " + name + " (sig: " + sig + ") in " + e + " at loc " + loc);
-  }
-
-  public static native int GetLocalVariableInt(Thread thr, int depth, int slot);
-  public static native long GetLocalVariableLong(Thread thr, int depth, int slot);
-  public static native float GetLocalVariableFloat(Thread thr, int depth, int slot);
-  public static native double GetLocalVariableDouble(Thread thr, int depth, int slot);
-  public static native Object GetLocalVariableObject(Thread thr, int depth, int slot);
-  public static native Object GetLocalInstance(Thread thr, int depth);
-
-  public static void SetLocalVariableInt(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableInt(thr, depth, slot, ((Number)val).intValue());
-  }
-  public static void SetLocalVariableLong(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableLong(thr, depth, slot, ((Number)val).longValue());
-  }
-  public static void SetLocalVariableFloat(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableFloat(thr, depth, slot, ((Number)val).floatValue());
-  }
-  public static void SetLocalVariableDouble(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableDouble(thr, depth, slot, ((Number)val).doubleValue());
-  }
-  public static native void SetLocalVariableInt(Thread thr, int depth, int slot, int val);
-  public static native void SetLocalVariableLong(Thread thr, int depth, int slot, long val);
-  public static native void SetLocalVariableFloat(Thread thr, int depth, int slot, float val);
-  public static native void SetLocalVariableDouble(Thread thr, int depth, int slot, double val);
-  public static native void SetLocalVariableObject(Thread thr, int depth, int slot, Object val);
-}
diff --git a/test/1911-get-local-var-table/src/art/Locals.java b/test/1911-get-local-var-table/src/art/Locals.java
new file mode 120000
index 0000000..2998386
--- /dev/null
+++ b/test/1911-get-local-var-table/src/art/Locals.java
@@ -0,0 +1 @@
+../../../jvmti-common/Locals.java
\ No newline at end of file
diff --git a/test/1911-get-local-var-table/src/art/Suspension.java b/test/1911-get-local-var-table/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1911-get-local-var-table/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1911-get-local-var-table/src/art/Suspension.java b/test/1911-get-local-var-table/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1911-get-local-var-table/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1912-get-set-local-primitive/expected.txt b/test/1912-get-set-local-primitive/expected.txt
index f2c5ce8..8a03566 100644
--- a/test/1912-get-set-local-primitive/expected.txt
+++ b/test/1912-get-set-local-primitive/expected.txt
@@ -10,6 +10,9 @@
 Running public static void art.Test1912.IntMethod(java.lang.Runnable) with "GetDouble" on remote thread.
 "GetDouble" on public static void art.Test1912.IntMethod(java.lang.Runnable) failed due to JVMTI_ERROR_TYPE_MISMATCH
 	Value is '42' (class: class java.lang.Integer)
+Running public static void art.Test1912.IntMethod(java.lang.Runnable) with "GetObject" on remote thread.
+"GetObject" on public static void art.Test1912.IntMethod(java.lang.Runnable) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '42' (class: class java.lang.Integer)
 Running public static void art.Test1912.IntMethod(java.lang.Runnable) with "SetInt" on remote thread.
 "SetInt" on public static void art.Test1912.IntMethod(java.lang.Runnable) set value: 2147483647
 	Value is '2147483647' (class: class java.lang.Integer)
@@ -22,6 +25,12 @@
 Running public static void art.Test1912.IntMethod(java.lang.Runnable) with "SetDouble" on remote thread.
 "SetDouble" on public static void art.Test1912.IntMethod(java.lang.Runnable) failed to set value 12.4 due to JVMTI_ERROR_TYPE_MISMATCH
 	Value is '42' (class: class java.lang.Integer)
+Running public static void art.Test1912.IntMethod(java.lang.Runnable) with "SetObject" on remote thread.
+"SetObject" on public static void art.Test1912.IntMethod(java.lang.Runnable) failed to set value NEW_VALUE_FOR_SET due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art.Test1912.IntMethod(java.lang.Runnable) with "SetNullObject" on remote thread.
+"SetNullObject" on public static void art.Test1912.IntMethod(java.lang.Runnable) failed to set value null due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '42' (class: class java.lang.Integer)
 Running public static void art.Test1912.LongMethod(java.lang.Runnable) with "GetInt" on remote thread.
 "GetInt" on public static void art.Test1912.LongMethod(java.lang.Runnable) failed due to JVMTI_ERROR_TYPE_MISMATCH
 	Value is '9001' (class: class java.lang.Long)
@@ -34,6 +43,9 @@
 Running public static void art.Test1912.LongMethod(java.lang.Runnable) with "GetDouble" on remote thread.
 "GetDouble" on public static void art.Test1912.LongMethod(java.lang.Runnable) failed due to JVMTI_ERROR_TYPE_MISMATCH
 	Value is '9001' (class: class java.lang.Long)
+Running public static void art.Test1912.LongMethod(java.lang.Runnable) with "GetObject" on remote thread.
+"GetObject" on public static void art.Test1912.LongMethod(java.lang.Runnable) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '9001' (class: class java.lang.Long)
 Running public static void art.Test1912.LongMethod(java.lang.Runnable) with "SetInt" on remote thread.
 "SetInt" on public static void art.Test1912.LongMethod(java.lang.Runnable) failed to set value 2147483647 due to JVMTI_ERROR_TYPE_MISMATCH
 	Value is '9001' (class: class java.lang.Long)
@@ -46,6 +58,12 @@
 Running public static void art.Test1912.LongMethod(java.lang.Runnable) with "SetDouble" on remote thread.
 "SetDouble" on public static void art.Test1912.LongMethod(java.lang.Runnable) failed to set value 12.4 due to JVMTI_ERROR_TYPE_MISMATCH
 	Value is '9001' (class: class java.lang.Long)
+Running public static void art.Test1912.LongMethod(java.lang.Runnable) with "SetObject" on remote thread.
+"SetObject" on public static void art.Test1912.LongMethod(java.lang.Runnable) failed to set value NEW_VALUE_FOR_SET due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '9001' (class: class java.lang.Long)
+Running public static void art.Test1912.LongMethod(java.lang.Runnable) with "SetNullObject" on remote thread.
+"SetNullObject" on public static void art.Test1912.LongMethod(java.lang.Runnable) failed to set value null due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '9001' (class: class java.lang.Long)
 Running public static void art.Test1912.FloatMethod(java.lang.Runnable) with "GetInt" on remote thread.
 "GetInt" on public static void art.Test1912.FloatMethod(java.lang.Runnable) failed due to JVMTI_ERROR_TYPE_MISMATCH
 	Value is '1.618' (class: class java.lang.Float)
@@ -58,6 +76,9 @@
 Running public static void art.Test1912.FloatMethod(java.lang.Runnable) with "GetDouble" on remote thread.
 "GetDouble" on public static void art.Test1912.FloatMethod(java.lang.Runnable) failed due to JVMTI_ERROR_TYPE_MISMATCH
 	Value is '1.618' (class: class java.lang.Float)
+Running public static void art.Test1912.FloatMethod(java.lang.Runnable) with "GetObject" on remote thread.
+"GetObject" on public static void art.Test1912.FloatMethod(java.lang.Runnable) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '1.618' (class: class java.lang.Float)
 Running public static void art.Test1912.FloatMethod(java.lang.Runnable) with "SetInt" on remote thread.
 "SetInt" on public static void art.Test1912.FloatMethod(java.lang.Runnable) failed to set value 2147483647 due to JVMTI_ERROR_TYPE_MISMATCH
 	Value is '1.618' (class: class java.lang.Float)
@@ -70,6 +91,12 @@
 Running public static void art.Test1912.FloatMethod(java.lang.Runnable) with "SetDouble" on remote thread.
 "SetDouble" on public static void art.Test1912.FloatMethod(java.lang.Runnable) failed to set value 12.4 due to JVMTI_ERROR_TYPE_MISMATCH
 	Value is '1.618' (class: class java.lang.Float)
+Running public static void art.Test1912.FloatMethod(java.lang.Runnable) with "SetObject" on remote thread.
+"SetObject" on public static void art.Test1912.FloatMethod(java.lang.Runnable) failed to set value NEW_VALUE_FOR_SET due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '1.618' (class: class java.lang.Float)
+Running public static void art.Test1912.FloatMethod(java.lang.Runnable) with "SetNullObject" on remote thread.
+"SetNullObject" on public static void art.Test1912.FloatMethod(java.lang.Runnable) failed to set value null due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '1.618' (class: class java.lang.Float)
 Running public static void art.Test1912.DoubleMethod(java.lang.Runnable) with "GetInt" on remote thread.
 "GetInt" on public static void art.Test1912.DoubleMethod(java.lang.Runnable) failed due to JVMTI_ERROR_TYPE_MISMATCH
 	Value is '3.1415' (class: class java.lang.Double)
@@ -82,6 +109,9 @@
 Running public static void art.Test1912.DoubleMethod(java.lang.Runnable) with "GetDouble" on remote thread.
 "GetDouble" on public static void art.Test1912.DoubleMethod(java.lang.Runnable) got value: 3.1415
 	Value is '3.1415' (class: class java.lang.Double)
+Running public static void art.Test1912.DoubleMethod(java.lang.Runnable) with "GetObject" on remote thread.
+"GetObject" on public static void art.Test1912.DoubleMethod(java.lang.Runnable) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '3.1415' (class: class java.lang.Double)
 Running public static void art.Test1912.DoubleMethod(java.lang.Runnable) with "SetInt" on remote thread.
 "SetInt" on public static void art.Test1912.DoubleMethod(java.lang.Runnable) failed to set value 2147483647 due to JVMTI_ERROR_TYPE_MISMATCH
 	Value is '3.1415' (class: class java.lang.Double)
@@ -94,6 +124,78 @@
 Running public static void art.Test1912.DoubleMethod(java.lang.Runnable) with "SetDouble" on remote thread.
 "SetDouble" on public static void art.Test1912.DoubleMethod(java.lang.Runnable) set value: 12.4
 	Value is '12.4' (class: class java.lang.Double)
+Running public static void art.Test1912.DoubleMethod(java.lang.Runnable) with "SetObject" on remote thread.
+"SetObject" on public static void art.Test1912.DoubleMethod(java.lang.Runnable) failed to set value NEW_VALUE_FOR_SET due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '3.1415' (class: class java.lang.Double)
+Running public static void art.Test1912.DoubleMethod(java.lang.Runnable) with "SetNullObject" on remote thread.
+"SetNullObject" on public static void art.Test1912.DoubleMethod(java.lang.Runnable) failed to set value null due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '3.1415' (class: class java.lang.Double)
+Running public static void art.Test1912.ObjectMethod(java.lang.Runnable) with "GetInt" on remote thread.
+"GetInt" on public static void art.Test1912.ObjectMethod(java.lang.Runnable) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'TARGET OBJECT' (class: class java.lang.String)
+Running public static void art.Test1912.ObjectMethod(java.lang.Runnable) with "GetLong" on remote thread.
+"GetLong" on public static void art.Test1912.ObjectMethod(java.lang.Runnable) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'TARGET OBJECT' (class: class java.lang.String)
+Running public static void art.Test1912.ObjectMethod(java.lang.Runnable) with "GetFloat" on remote thread.
+"GetFloat" on public static void art.Test1912.ObjectMethod(java.lang.Runnable) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'TARGET OBJECT' (class: class java.lang.String)
+Running public static void art.Test1912.ObjectMethod(java.lang.Runnable) with "GetDouble" on remote thread.
+"GetDouble" on public static void art.Test1912.ObjectMethod(java.lang.Runnable) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'TARGET OBJECT' (class: class java.lang.String)
+Running public static void art.Test1912.ObjectMethod(java.lang.Runnable) with "GetObject" on remote thread.
+"GetObject" on public static void art.Test1912.ObjectMethod(java.lang.Runnable) got value: TARGET OBJECT
+	Value is 'TARGET OBJECT' (class: class java.lang.String)
+Running public static void art.Test1912.ObjectMethod(java.lang.Runnable) with "SetInt" on remote thread.
+"SetInt" on public static void art.Test1912.ObjectMethod(java.lang.Runnable) failed to set value 2147483647 due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'TARGET OBJECT' (class: class java.lang.String)
+Running public static void art.Test1912.ObjectMethod(java.lang.Runnable) with "SetLong" on remote thread.
+"SetLong" on public static void art.Test1912.ObjectMethod(java.lang.Runnable) failed to set value 9223372036854775807 due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'TARGET OBJECT' (class: class java.lang.String)
+Running public static void art.Test1912.ObjectMethod(java.lang.Runnable) with "SetFloat" on remote thread.
+"SetFloat" on public static void art.Test1912.ObjectMethod(java.lang.Runnable) failed to set value 9.2 due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'TARGET OBJECT' (class: class java.lang.String)
+Running public static void art.Test1912.ObjectMethod(java.lang.Runnable) with "SetDouble" on remote thread.
+"SetDouble" on public static void art.Test1912.ObjectMethod(java.lang.Runnable) failed to set value 12.4 due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'TARGET OBJECT' (class: class java.lang.String)
+Running public static void art.Test1912.ObjectMethod(java.lang.Runnable) with "SetObject" on remote thread.
+"SetObject" on public static void art.Test1912.ObjectMethod(java.lang.Runnable) set value: NEW_VALUE_FOR_SET
+	Value is 'NEW_VALUE_FOR_SET' (class: class java.lang.String)
+Running public static void art.Test1912.ObjectMethod(java.lang.Runnable) with "SetNullObject" on remote thread.
+"SetNullObject" on public static void art.Test1912.ObjectMethod(java.lang.Runnable) set value: null
+	Value is 'null' (class: null)
+Running public static void art.Test1912.NullObjectMethod(java.lang.Runnable) with "GetInt" on remote thread.
+"GetInt" on public static void art.Test1912.NullObjectMethod(java.lang.Runnable) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'null' (class: null)
+Running public static void art.Test1912.NullObjectMethod(java.lang.Runnable) with "GetLong" on remote thread.
+"GetLong" on public static void art.Test1912.NullObjectMethod(java.lang.Runnable) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'null' (class: null)
+Running public static void art.Test1912.NullObjectMethod(java.lang.Runnable) with "GetFloat" on remote thread.
+"GetFloat" on public static void art.Test1912.NullObjectMethod(java.lang.Runnable) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'null' (class: null)
+Running public static void art.Test1912.NullObjectMethod(java.lang.Runnable) with "GetDouble" on remote thread.
+"GetDouble" on public static void art.Test1912.NullObjectMethod(java.lang.Runnable) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'null' (class: null)
+Running public static void art.Test1912.NullObjectMethod(java.lang.Runnable) with "GetObject" on remote thread.
+"GetObject" on public static void art.Test1912.NullObjectMethod(java.lang.Runnable) got value: null
+	Value is 'null' (class: null)
+Running public static void art.Test1912.NullObjectMethod(java.lang.Runnable) with "SetInt" on remote thread.
+"SetInt" on public static void art.Test1912.NullObjectMethod(java.lang.Runnable) failed to set value 2147483647 due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'null' (class: null)
+Running public static void art.Test1912.NullObjectMethod(java.lang.Runnable) with "SetLong" on remote thread.
+"SetLong" on public static void art.Test1912.NullObjectMethod(java.lang.Runnable) failed to set value 9223372036854775807 due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'null' (class: null)
+Running public static void art.Test1912.NullObjectMethod(java.lang.Runnable) with "SetFloat" on remote thread.
+"SetFloat" on public static void art.Test1912.NullObjectMethod(java.lang.Runnable) failed to set value 9.2 due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'null' (class: null)
+Running public static void art.Test1912.NullObjectMethod(java.lang.Runnable) with "SetDouble" on remote thread.
+"SetDouble" on public static void art.Test1912.NullObjectMethod(java.lang.Runnable) failed to set value 12.4 due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'null' (class: null)
+Running public static void art.Test1912.NullObjectMethod(java.lang.Runnable) with "SetObject" on remote thread.
+"SetObject" on public static void art.Test1912.NullObjectMethod(java.lang.Runnable) set value: NEW_VALUE_FOR_SET
+	Value is 'NEW_VALUE_FOR_SET' (class: class java.lang.String)
+Running public static void art.Test1912.NullObjectMethod(java.lang.Runnable) with "SetNullObject" on remote thread.
+"SetNullObject" on public static void art.Test1912.NullObjectMethod(java.lang.Runnable) set value: null
+	Value is 'null' (class: null)
 Running public static void art.Test1912.BooleanMethod(java.lang.Runnable) with "SetIntBoolSize" on remote thread.
 "SetIntBoolSize" on public static void art.Test1912.BooleanMethod(java.lang.Runnable) set value: 1
 	Value is 'true' (class: class java.lang.Boolean)
diff --git a/test/1912-get-set-local-primitive/src/art/Breakpoint.java b/test/1912-get-set-local-primitive/src/art/Breakpoint.java
deleted file mode 100644
index bbb89f7..0000000
--- a/test/1912-get-set-local-primitive/src/art/Breakpoint.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Objects;
-
-public class Breakpoint {
-  public static class Manager {
-    public static class BP {
-      public final Executable method;
-      public final long location;
-
-      public BP(Executable method) {
-        this(method, getStartLocation(method));
-      }
-
-      public BP(Executable method, long location) {
-        this.method = method;
-        this.location = location;
-      }
-
-      @Override
-      public boolean equals(Object other) {
-        return (other instanceof BP) &&
-            method.equals(((BP)other).method) &&
-            location == ((BP)other).location;
-      }
-
-      @Override
-      public String toString() {
-        return method.toString() + " @ " + getLine();
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(method, location);
-      }
-
-      public int getLine() {
-        try {
-          LineNumber[] lines = getLineNumberTable(method);
-          int best = -1;
-          for (LineNumber l : lines) {
-            if (l.location > location) {
-              break;
-            } else {
-              best = l.line;
-            }
-          }
-          return best;
-        } catch (Exception e) {
-          return -1;
-        }
-      }
-    }
-
-    private Set<BP> breaks = new HashSet<>();
-
-    public void setBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.add(b)) {
-          Breakpoint.setBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void setBreakpoint(Executable method, long location) {
-      setBreakpoints(new BP(method, location));
-    }
-
-    public void clearBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.remove(b)) {
-          Breakpoint.clearBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void clearBreakpoint(Executable method, long location) {
-      clearBreakpoints(new BP(method, location));
-    }
-
-    public void clearAllBreakpoints() {
-      clearBreakpoints(breaks.toArray(new BP[0]));
-    }
-  }
-
-  public static void startBreakpointWatch(Class<?> methodClass,
-                                          Executable breakpointReached,
-                                          Thread thr) {
-    startBreakpointWatch(methodClass, breakpointReached, false, thr);
-  }
-
-  /**
-   * Enables the trapping of breakpoint events.
-   *
-   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
-   */
-  public static native void startBreakpointWatch(Class<?> methodClass,
-                                                 Executable breakpointReached,
-                                                 boolean allowRecursive,
-                                                 Thread thr);
-  public static native void stopBreakpointWatch(Thread thr);
-
-  public static final class LineNumber implements Comparable<LineNumber> {
-    public final long location;
-    public final int line;
-
-    private LineNumber(long loc, int line) {
-      this.location = loc;
-      this.line = line;
-    }
-
-    public boolean equals(Object other) {
-      return other instanceof LineNumber && ((LineNumber)other).line == line &&
-          ((LineNumber)other).location == location;
-    }
-
-    public int compareTo(LineNumber other) {
-      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
-      if (v != 0) {
-        return v;
-      } else {
-        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
-      }
-    }
-  }
-
-  public static native void setBreakpoint(Executable m, long loc);
-  public static void setBreakpoint(Executable m, LineNumber l) {
-    setBreakpoint(m, l.location);
-  }
-
-  public static native void clearBreakpoint(Executable m, long loc);
-  public static void clearBreakpoint(Executable m, LineNumber l) {
-    clearBreakpoint(m, l.location);
-  }
-
-  private static native Object[] getLineNumberTableNative(Executable m);
-  public static LineNumber[] getLineNumberTable(Executable m) {
-    Object[] nativeTable = getLineNumberTableNative(m);
-    long[] location = (long[])(nativeTable[0]);
-    int[] lines = (int[])(nativeTable[1]);
-    if (lines.length != location.length) {
-      throw new Error("Lines and locations have different lengths!");
-    }
-    LineNumber[] out = new LineNumber[lines.length];
-    for (int i = 0; i < lines.length; i++) {
-      out[i] = new LineNumber(location[i], lines[i]);
-    }
-    return out;
-  }
-
-  public static native long getStartLocation(Executable m);
-
-  public static int locationToLine(Executable m, long location) {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      int best = -1;
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.location > location) {
-          break;
-        } else {
-          best = l.line;
-        }
-      }
-      return best;
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  public static long lineToLocation(Executable m, int line) throws Exception {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.line == line) {
-          return l.location;
-        }
-      }
-      throw new Exception("Unable to find line " + line + " in " + m);
-    } catch (Exception e) {
-      throw new Exception("Unable to get line number info for " + m, e);
-    }
-  }
-}
-
diff --git a/test/1912-get-set-local-primitive/src/art/Breakpoint.java b/test/1912-get-set-local-primitive/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1912-get-set-local-primitive/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1912-get-set-local-primitive/src/art/Locals.java b/test/1912-get-set-local-primitive/src/art/Locals.java
deleted file mode 100644
index 22e21be..0000000
--- a/test/1912-get-set-local-primitive/src/art/Locals.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.Objects;
-
-public class Locals {
-  public static native void EnableLocalVariableAccess();
-
-  public static class VariableDescription {
-    public final long start_location;
-    public final int length;
-    public final String name;
-    public final String signature;
-    public final String generic_signature;
-    public final int slot;
-
-    public VariableDescription(
-        long start, int length, String name, String sig, String gen_sig, int slot) {
-      this.start_location = start;
-      this.length = length;
-      this.name = name;
-      this.signature = sig;
-      this.generic_signature = gen_sig;
-      this.slot = slot;
-    }
-
-    @Override
-    public String toString() {
-      return String.format(
-          "VariableDescription { " +
-            "Sig: '%s', Name: '%s', Gen_sig: '%s', slot: %d, start: %d, len: %d" +
-          "}",
-          this.signature,
-          this.name,
-          this.generic_signature,
-          this.slot,
-          this.start_location,
-          this.length);
-    }
-    public boolean equals(Object other) {
-      if (!(other instanceof VariableDescription)) {
-        return false;
-      } else {
-        VariableDescription v = (VariableDescription)other;
-        return Objects.equals(v.signature, signature) &&
-            Objects.equals(v.name, name) &&
-            Objects.equals(v.generic_signature, generic_signature) &&
-            v.slot == slot &&
-            v.start_location == start_location &&
-            v.length == length;
-      }
-    }
-    public int hashCode() {
-      return Objects.hash(this.signature, this.name, this.generic_signature, this.slot,
-          this.start_location, this.length);
-    }
-  }
-
-  public static native VariableDescription[] GetLocalVariableTable(Executable e);
-
-  public static VariableDescription GetVariableAtLine(
-      Executable e, String name, String sig, int line) throws Exception {
-    return GetVariableAtLocation(e, name, sig, Breakpoint.lineToLocation(e, line));
-  }
-
-  public static VariableDescription GetVariableAtLocation(
-      Executable e, String name, String sig, long loc) {
-    VariableDescription[] vars = GetLocalVariableTable(e);
-    for (VariableDescription var : vars) {
-      if (var.start_location <= loc &&
-          var.length + var.start_location > loc &&
-          var.name.equals(name) &&
-          var.signature.equals(sig)) {
-        return var;
-      }
-    }
-    throw new Error(
-        "Unable to find variable " + name + " (sig: " + sig + ") in " + e + " at loc " + loc);
-  }
-
-  public static native int GetLocalVariableInt(Thread thr, int depth, int slot);
-  public static native long GetLocalVariableLong(Thread thr, int depth, int slot);
-  public static native float GetLocalVariableFloat(Thread thr, int depth, int slot);
-  public static native double GetLocalVariableDouble(Thread thr, int depth, int slot);
-  public static native Object GetLocalVariableObject(Thread thr, int depth, int slot);
-  public static native Object GetLocalInstance(Thread thr, int depth);
-
-  public static void SetLocalVariableInt(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableInt(thr, depth, slot, ((Number)val).intValue());
-  }
-  public static void SetLocalVariableLong(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableLong(thr, depth, slot, ((Number)val).longValue());
-  }
-  public static void SetLocalVariableFloat(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableFloat(thr, depth, slot, ((Number)val).floatValue());
-  }
-  public static void SetLocalVariableDouble(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableDouble(thr, depth, slot, ((Number)val).doubleValue());
-  }
-  public static native void SetLocalVariableInt(Thread thr, int depth, int slot, int val);
-  public static native void SetLocalVariableLong(Thread thr, int depth, int slot, long val);
-  public static native void SetLocalVariableFloat(Thread thr, int depth, int slot, float val);
-  public static native void SetLocalVariableDouble(Thread thr, int depth, int slot, double val);
-  public static native void SetLocalVariableObject(Thread thr, int depth, int slot, Object val);
-}
diff --git a/test/1912-get-set-local-primitive/src/art/Locals.java b/test/1912-get-set-local-primitive/src/art/Locals.java
new file mode 120000
index 0000000..2998386
--- /dev/null
+++ b/test/1912-get-set-local-primitive/src/art/Locals.java
@@ -0,0 +1 @@
+../../../jvmti-common/Locals.java
\ No newline at end of file
diff --git a/test/1912-get-set-local-primitive/src/art/StackTrace.java b/test/1912-get-set-local-primitive/src/art/StackTrace.java
deleted file mode 100644
index 2ea2f20..0000000
--- a/test/1912-get-set-local-primitive/src/art/StackTrace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Executable;
-
-public class StackTrace {
-  public static class StackFrameData {
-    public final Thread thr;
-    public final Executable method;
-    public final long current_location;
-    public final int depth;
-
-    public StackFrameData(Thread thr, Executable e, long loc, int depth) {
-      this.thr = thr;
-      this.method = e;
-      this.current_location = loc;
-      this.depth = depth;
-    }
-    @Override
-    public String toString() {
-      return String.format(
-          "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }",
-          this.thr,
-          this.method,
-          this.current_location,
-          this.depth);
-    }
-  }
-
-  public static native int GetStackDepth(Thread thr);
-
-  private static native StackFrameData[] nativeGetStackTrace(Thread thr);
-
-  public static StackFrameData[] GetStackTrace(Thread thr) {
-    // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not
-    // suspended. The spec says that not being suspended is fine but since we want this to be
-    // consistent we will suspend for the RI.
-    boolean suspend_thread =
-        !System.getProperty("java.vm.name").equals("Dalvik") &&
-        !thr.equals(Thread.currentThread()) &&
-        !Suspension.isSuspended(thr);
-    if (suspend_thread) {
-      Suspension.suspend(thr);
-    }
-    StackFrameData[] out = nativeGetStackTrace(thr);
-    if (suspend_thread) {
-      Suspension.resume(thr);
-    }
-    return out;
-  }
-}
-
diff --git a/test/1912-get-set-local-primitive/src/art/StackTrace.java b/test/1912-get-set-local-primitive/src/art/StackTrace.java
new file mode 120000
index 0000000..e1a08aa
--- /dev/null
+++ b/test/1912-get-set-local-primitive/src/art/StackTrace.java
@@ -0,0 +1 @@
+../../../jvmti-common/StackTrace.java
\ No newline at end of file
diff --git a/test/1912-get-set-local-primitive/src/art/Suspension.java b/test/1912-get-set-local-primitive/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1912-get-set-local-primitive/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1912-get-set-local-primitive/src/art/Suspension.java b/test/1912-get-set-local-primitive/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1912-get-set-local-primitive/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1912-get-set-local-primitive/src/art/Test1912.java b/test/1912-get-set-local-primitive/src/art/Test1912.java
index 24149f4..f0a6065 100644
--- a/test/1912-get-set-local-primitive/src/art/Test1912.java
+++ b/test/1912-get-set-local-primitive/src/art/Test1912.java
@@ -20,29 +20,39 @@
 import java.lang.reflect.Executable;
 import java.lang.reflect.Method;
 import java.nio.ByteBuffer;
-import java.util.concurrent.Semaphore;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
 import java.util.Set;
+import java.util.concurrent.Semaphore;
+import java.util.function.Consumer;
 import java.util.function.Function;
 import java.util.function.Predicate;
 import java.util.function.Supplier;
-import java.util.function.Consumer;
 
 // TODO Rename test to set-get-local-prim
 
 public class Test1912 {
   public static final String TARGET_VAR = "TARGET";
 
-
   public static void reportValue(Object val) {
     if (val instanceof Character) {
       val = "<Char: " + Character.getNumericValue(((Character)val).charValue()) + ">";
     }
-    System.out.println("\tValue is '" + val + "' (class: " + val.getClass() + ")");
+    System.out.println("\tValue is '" + val +
+                       "' (class: " + (val != null ? val.getClass().toString() : "null") + ")");
   }
 
+  public static void NullObjectMethod(Runnable safepoint) {
+    Object TARGET = null;
+    safepoint.run();
+    reportValue(TARGET);
+  }
+  public static void ObjectMethod(Runnable safepoint) {
+    Object TARGET = "TARGET OBJECT";
+    safepoint.run();
+    reportValue(TARGET);
+  }
   public static void BooleanMethod(Runnable safepoint) {
     boolean TARGET = false;
     safepoint.run();
@@ -85,31 +95,27 @@
   }
 
   public static interface SafepointFunction {
-    public void invoke(
-        Thread thread,
-        Method target,
-        Locals.VariableDescription TARGET_desc,
-        int depth) throws Exception;
+    public void
+    invoke(Thread thread, Method target, Locals.VariableDescription TARGET_desc, int depth)
+        throws Exception;
   }
 
   public static interface SetterFunction {
     public void SetVar(Thread t, int depth, int slot, Object v);
   }
 
-  public static interface GetterFunction {
-    public Object GetVar(Thread t, int depth, int slot);
-  }
+  public static interface GetterFunction { public Object GetVar(Thread t, int depth, int slot); }
 
-  public static SafepointFunction NamedSet(
-      final String type, final SetterFunction get, final Object v) {
+  public static SafepointFunction
+  NamedSet(final String type, final SetterFunction get, final Object v) {
     return new SafepointFunction() {
       public void invoke(Thread t, Method method, Locals.VariableDescription desc, int depth) {
         try {
           get.SetVar(t, depth, desc.slot, v);
           System.out.println(this + " on " + method + " set value: " + v);
         } catch (Exception e) {
-          System.out.println(
-              this + " on " + method + " failed to set value " + v + " due to " + e.getMessage());
+          System.out.println(this + " on " + method + " failed to set value " + v + " due to " +
+                             e.getMessage());
         }
       }
       public String toString() {
@@ -171,15 +177,13 @@
     public void exec(final SafepointFunction safepoint) throws Exception {
       System.out.println("Running " + target + " with " + safepoint + " on remote thread.");
       final ThreadPauser pause = new ThreadPauser();
-      Thread remote = new Thread(
-          () -> {
-            try {
-              target.invoke(null, pause);
-            } catch (Exception e) {
-              throw new Error("Error invoking remote thread " + Thread.currentThread(), e);
-            }
-          },
-          "remote thread for " + target + " with " + safepoint);
+      Thread remote = new Thread(() -> {
+        try {
+          target.invoke(null, pause);
+        } catch (Exception e) {
+          throw new Error("Error invoking remote thread " + Thread.currentThread(), e);
+        }
+      }, "remote thread for " + target + " with " + safepoint);
       remote.start();
       pause.waitForOtherThreadToPause();
       try {
@@ -196,14 +200,12 @@
 
     private Locals.VariableDescription findTargetVar(long loc) {
       for (Locals.VariableDescription var : Locals.GetLocalVariableTable(target)) {
-        if (var.start_location <= loc &&
-            var.length + var.start_location > loc &&
+        if (var.start_location <= loc && var.length + var.start_location > loc &&
             var.name.equals(TARGET_VAR)) {
           return var;
         }
       }
-      throw new Error(
-          "Unable to find variable " + TARGET_VAR + " in " + target + " at loc " + loc);
+      throw new Error("Unable to find variable " + TARGET_VAR + " in " + target + " at loc " + loc);
     }
 
     private StackTrace.StackFrameData findStackFrame(Thread thr) {
@@ -222,10 +224,9 @@
   public static void run() throws Exception {
     Locals.EnableLocalVariableAccess();
     final TestCase[] MAIN_TEST_CASES = new TestCase[] {
-      new TestCase(getMethod("IntMethod")),
-      new TestCase(getMethod("LongMethod")),
-      new TestCase(getMethod("FloatMethod")),
-      new TestCase(getMethod("DoubleMethod")),
+      new TestCase(getMethod("IntMethod")),    new TestCase(getMethod("LongMethod")),
+      new TestCase(getMethod("FloatMethod")),  new TestCase(getMethod("DoubleMethod")),
+      new TestCase(getMethod("ObjectMethod")), new TestCase(getMethod("NullObjectMethod")),
     };
 
     final SafepointFunction[] SAFEPOINTS = new SafepointFunction[] {
@@ -233,28 +234,30 @@
       NamedGet("Long", Locals::GetLocalVariableLong),
       NamedGet("Float", Locals::GetLocalVariableFloat),
       NamedGet("Double", Locals::GetLocalVariableDouble),
+      NamedGet("Object", Locals::GetLocalVariableObject),
       NamedSet("Int", Locals::SetLocalVariableInt, Integer.MAX_VALUE),
       NamedSet("Long", Locals::SetLocalVariableLong, Long.MAX_VALUE),
       NamedSet("Float", Locals::SetLocalVariableFloat, 9.2f),
       NamedSet("Double", Locals::SetLocalVariableDouble, 12.4d),
+      NamedSet("Object", Locals::SetLocalVariableObject, "NEW_VALUE_FOR_SET"),
+      NamedSet("NullObject", Locals::SetLocalVariableObject, null),
     };
 
-    for (TestCase t: MAIN_TEST_CASES) {
+    for (TestCase t : MAIN_TEST_CASES) {
       for (SafepointFunction s : SAFEPOINTS) {
         t.exec(s);
       }
     }
 
     // Test int for small values.
-    new TestCase(getMethod("BooleanMethod")).exec(
-        NamedSet("IntBoolSize", Locals::SetLocalVariableInt, 1));
-    new TestCase(getMethod("ByteMethod")).exec(
-      NamedSet("IntByteSize", Locals::SetLocalVariableInt, Byte.MAX_VALUE - 1));
+    new TestCase(getMethod("BooleanMethod"))
+        .exec(NamedSet("IntBoolSize", Locals::SetLocalVariableInt, 1));
+    new TestCase(getMethod("ByteMethod"))
+        .exec(NamedSet("IntByteSize", Locals::SetLocalVariableInt, Byte.MAX_VALUE - 1));
 
-    new TestCase(getMethod("CharMethod")).exec(
-      NamedSet("IntCharSize", Locals::SetLocalVariableInt, Character.MAX_VALUE - 1));
-    new TestCase(getMethod("ShortMethod")).exec(
-      NamedSet("IntShortSize", Locals::SetLocalVariableInt, Short.MAX_VALUE - 1));
+    new TestCase(getMethod("CharMethod"))
+        .exec(NamedSet("IntCharSize", Locals::SetLocalVariableInt, Character.MAX_VALUE - 1));
+    new TestCase(getMethod("ShortMethod"))
+        .exec(NamedSet("IntShortSize", Locals::SetLocalVariableInt, Short.MAX_VALUE - 1));
   }
 }
-
diff --git a/test/1913-get-set-local-objects/expected.txt b/test/1913-get-set-local-objects/expected.txt
index 23f4992..2338915 100644
--- a/test/1913-get-set-local-objects/expected.txt
+++ b/test/1913-get-set-local-objects/expected.txt
@@ -70,3 +70,57 @@
 Running public static void art.Test1913.PrimitiveMethod(java.lang.Runnable) with "SetTestClass2impl" on remote thread.
 "SetTestClass2impl" on public static void art.Test1913.PrimitiveMethod(java.lang.Runnable) failed to set value TestClass2impl("TestClass2("Set TestClass2impl")") due to JVMTI_ERROR_TYPE_MISMATCH
 	Value is '42' (class: class java.lang.Integer)
+Running public static void art.Test1913.NullObjectMethod(java.lang.Runnable) with "GetGetObject" on remote thread.
+"GetGetObject" on public static void art.Test1913.NullObjectMethod(java.lang.Runnable) got value: null
+	Value is 'null' (class: NULL)
+Running public static void art.Test1913.NullObjectMethod(java.lang.Runnable) with "SetNull" on remote thread.
+"SetNull" on public static void art.Test1913.NullObjectMethod(java.lang.Runnable) set value: null
+	Value is 'null' (class: NULL)
+Running public static void art.Test1913.NullObjectMethod(java.lang.Runnable) with "SetTestClass1" on remote thread.
+"SetTestClass1" on public static void art.Test1913.NullObjectMethod(java.lang.Runnable) set value: TestClass1("Set TestClass1")
+	Value is 'TestClass1("Set TestClass1")' (class: class art.Test1913$TestClass1)
+Running public static void art.Test1913.NullObjectMethod(java.lang.Runnable) with "SetTestClass1ext" on remote thread.
+"SetTestClass1ext" on public static void art.Test1913.NullObjectMethod(java.lang.Runnable) set value: TestClass1ext("TestClass1("Set TestClass1ext")")
+	Value is 'TestClass1ext("TestClass1("Set TestClass1ext")")' (class: class art.Test1913$TestClass1ext)
+Running public static void art.Test1913.NullObjectMethod(java.lang.Runnable) with "SetTestClass2" on remote thread.
+"SetTestClass2" on public static void art.Test1913.NullObjectMethod(java.lang.Runnable) set value: TestClass2("Set TestClass2")
+	Value is 'TestClass2("Set TestClass2")' (class: class art.Test1913$TestClass2)
+Running public static void art.Test1913.NullObjectMethod(java.lang.Runnable) with "SetTestClass2impl" on remote thread.
+"SetTestClass2impl" on public static void art.Test1913.NullObjectMethod(java.lang.Runnable) set value: TestClass2impl("TestClass2("Set TestClass2impl")")
+	Value is 'TestClass2impl("TestClass2("Set TestClass2impl")")' (class: class art.Test1913$TestClass2impl)
+Running public static void art.Test1913.NullInterfaceMethod(java.lang.Runnable) with "GetGetObject" on remote thread.
+"GetGetObject" on public static void art.Test1913.NullInterfaceMethod(java.lang.Runnable) got value: null
+	Value is 'null' (class: NULL)
+Running public static void art.Test1913.NullInterfaceMethod(java.lang.Runnable) with "SetNull" on remote thread.
+"SetNull" on public static void art.Test1913.NullInterfaceMethod(java.lang.Runnable) set value: null
+	Value is 'null' (class: NULL)
+Running public static void art.Test1913.NullInterfaceMethod(java.lang.Runnable) with "SetTestClass1" on remote thread.
+"SetTestClass1" on public static void art.Test1913.NullInterfaceMethod(java.lang.Runnable) set value: TestClass1("Set TestClass1")
+	Value is 'TestClass1("Set TestClass1")' (class: class art.Test1913$TestClass1)
+Running public static void art.Test1913.NullInterfaceMethod(java.lang.Runnable) with "SetTestClass1ext" on remote thread.
+"SetTestClass1ext" on public static void art.Test1913.NullInterfaceMethod(java.lang.Runnable) set value: TestClass1ext("TestClass1("Set TestClass1ext")")
+	Value is 'TestClass1ext("TestClass1("Set TestClass1ext")")' (class: class art.Test1913$TestClass1ext)
+Running public static void art.Test1913.NullInterfaceMethod(java.lang.Runnable) with "SetTestClass2" on remote thread.
+"SetTestClass2" on public static void art.Test1913.NullInterfaceMethod(java.lang.Runnable) failed to set value TestClass2("Set TestClass2") due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'null' (class: NULL)
+Running public static void art.Test1913.NullInterfaceMethod(java.lang.Runnable) with "SetTestClass2impl" on remote thread.
+"SetTestClass2impl" on public static void art.Test1913.NullInterfaceMethod(java.lang.Runnable) set value: TestClass2impl("TestClass2("Set TestClass2impl")")
+	Value is 'TestClass2impl("TestClass2("Set TestClass2impl")")' (class: class art.Test1913$TestClass2impl)
+Running public static void art.Test1913.NullSpecificClassMethod(java.lang.Runnable) with "GetGetObject" on remote thread.
+"GetGetObject" on public static void art.Test1913.NullSpecificClassMethod(java.lang.Runnable) got value: null
+	Value is 'null' (class: NULL)
+Running public static void art.Test1913.NullSpecificClassMethod(java.lang.Runnable) with "SetNull" on remote thread.
+"SetNull" on public static void art.Test1913.NullSpecificClassMethod(java.lang.Runnable) set value: null
+	Value is 'null' (class: NULL)
+Running public static void art.Test1913.NullSpecificClassMethod(java.lang.Runnable) with "SetTestClass1" on remote thread.
+"SetTestClass1" on public static void art.Test1913.NullSpecificClassMethod(java.lang.Runnable) set value: TestClass1("Set TestClass1")
+	Value is 'TestClass1("Set TestClass1")' (class: class art.Test1913$TestClass1)
+Running public static void art.Test1913.NullSpecificClassMethod(java.lang.Runnable) with "SetTestClass1ext" on remote thread.
+"SetTestClass1ext" on public static void art.Test1913.NullSpecificClassMethod(java.lang.Runnable) set value: TestClass1ext("TestClass1("Set TestClass1ext")")
+	Value is 'TestClass1ext("TestClass1("Set TestClass1ext")")' (class: class art.Test1913$TestClass1ext)
+Running public static void art.Test1913.NullSpecificClassMethod(java.lang.Runnable) with "SetTestClass2" on remote thread.
+"SetTestClass2" on public static void art.Test1913.NullSpecificClassMethod(java.lang.Runnable) failed to set value TestClass2("Set TestClass2") due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'null' (class: NULL)
+Running public static void art.Test1913.NullSpecificClassMethod(java.lang.Runnable) with "SetTestClass2impl" on remote thread.
+"SetTestClass2impl" on public static void art.Test1913.NullSpecificClassMethod(java.lang.Runnable) failed to set value TestClass2impl("TestClass2("Set TestClass2impl")") due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'null' (class: NULL)
diff --git a/test/1913-get-set-local-objects/src/art/Breakpoint.java b/test/1913-get-set-local-objects/src/art/Breakpoint.java
deleted file mode 100644
index bbb89f7..0000000
--- a/test/1913-get-set-local-objects/src/art/Breakpoint.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Objects;
-
-public class Breakpoint {
-  public static class Manager {
-    public static class BP {
-      public final Executable method;
-      public final long location;
-
-      public BP(Executable method) {
-        this(method, getStartLocation(method));
-      }
-
-      public BP(Executable method, long location) {
-        this.method = method;
-        this.location = location;
-      }
-
-      @Override
-      public boolean equals(Object other) {
-        return (other instanceof BP) &&
-            method.equals(((BP)other).method) &&
-            location == ((BP)other).location;
-      }
-
-      @Override
-      public String toString() {
-        return method.toString() + " @ " + getLine();
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(method, location);
-      }
-
-      public int getLine() {
-        try {
-          LineNumber[] lines = getLineNumberTable(method);
-          int best = -1;
-          for (LineNumber l : lines) {
-            if (l.location > location) {
-              break;
-            } else {
-              best = l.line;
-            }
-          }
-          return best;
-        } catch (Exception e) {
-          return -1;
-        }
-      }
-    }
-
-    private Set<BP> breaks = new HashSet<>();
-
-    public void setBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.add(b)) {
-          Breakpoint.setBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void setBreakpoint(Executable method, long location) {
-      setBreakpoints(new BP(method, location));
-    }
-
-    public void clearBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.remove(b)) {
-          Breakpoint.clearBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void clearBreakpoint(Executable method, long location) {
-      clearBreakpoints(new BP(method, location));
-    }
-
-    public void clearAllBreakpoints() {
-      clearBreakpoints(breaks.toArray(new BP[0]));
-    }
-  }
-
-  public static void startBreakpointWatch(Class<?> methodClass,
-                                          Executable breakpointReached,
-                                          Thread thr) {
-    startBreakpointWatch(methodClass, breakpointReached, false, thr);
-  }
-
-  /**
-   * Enables the trapping of breakpoint events.
-   *
-   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
-   */
-  public static native void startBreakpointWatch(Class<?> methodClass,
-                                                 Executable breakpointReached,
-                                                 boolean allowRecursive,
-                                                 Thread thr);
-  public static native void stopBreakpointWatch(Thread thr);
-
-  public static final class LineNumber implements Comparable<LineNumber> {
-    public final long location;
-    public final int line;
-
-    private LineNumber(long loc, int line) {
-      this.location = loc;
-      this.line = line;
-    }
-
-    public boolean equals(Object other) {
-      return other instanceof LineNumber && ((LineNumber)other).line == line &&
-          ((LineNumber)other).location == location;
-    }
-
-    public int compareTo(LineNumber other) {
-      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
-      if (v != 0) {
-        return v;
-      } else {
-        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
-      }
-    }
-  }
-
-  public static native void setBreakpoint(Executable m, long loc);
-  public static void setBreakpoint(Executable m, LineNumber l) {
-    setBreakpoint(m, l.location);
-  }
-
-  public static native void clearBreakpoint(Executable m, long loc);
-  public static void clearBreakpoint(Executable m, LineNumber l) {
-    clearBreakpoint(m, l.location);
-  }
-
-  private static native Object[] getLineNumberTableNative(Executable m);
-  public static LineNumber[] getLineNumberTable(Executable m) {
-    Object[] nativeTable = getLineNumberTableNative(m);
-    long[] location = (long[])(nativeTable[0]);
-    int[] lines = (int[])(nativeTable[1]);
-    if (lines.length != location.length) {
-      throw new Error("Lines and locations have different lengths!");
-    }
-    LineNumber[] out = new LineNumber[lines.length];
-    for (int i = 0; i < lines.length; i++) {
-      out[i] = new LineNumber(location[i], lines[i]);
-    }
-    return out;
-  }
-
-  public static native long getStartLocation(Executable m);
-
-  public static int locationToLine(Executable m, long location) {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      int best = -1;
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.location > location) {
-          break;
-        } else {
-          best = l.line;
-        }
-      }
-      return best;
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  public static long lineToLocation(Executable m, int line) throws Exception {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.line == line) {
-          return l.location;
-        }
-      }
-      throw new Exception("Unable to find line " + line + " in " + m);
-    } catch (Exception e) {
-      throw new Exception("Unable to get line number info for " + m, e);
-    }
-  }
-}
-
diff --git a/test/1913-get-set-local-objects/src/art/Breakpoint.java b/test/1913-get-set-local-objects/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1913-get-set-local-objects/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1913-get-set-local-objects/src/art/Locals.java b/test/1913-get-set-local-objects/src/art/Locals.java
deleted file mode 100644
index 22e21be..0000000
--- a/test/1913-get-set-local-objects/src/art/Locals.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.Objects;
-
-public class Locals {
-  public static native void EnableLocalVariableAccess();
-
-  public static class VariableDescription {
-    public final long start_location;
-    public final int length;
-    public final String name;
-    public final String signature;
-    public final String generic_signature;
-    public final int slot;
-
-    public VariableDescription(
-        long start, int length, String name, String sig, String gen_sig, int slot) {
-      this.start_location = start;
-      this.length = length;
-      this.name = name;
-      this.signature = sig;
-      this.generic_signature = gen_sig;
-      this.slot = slot;
-    }
-
-    @Override
-    public String toString() {
-      return String.format(
-          "VariableDescription { " +
-            "Sig: '%s', Name: '%s', Gen_sig: '%s', slot: %d, start: %d, len: %d" +
-          "}",
-          this.signature,
-          this.name,
-          this.generic_signature,
-          this.slot,
-          this.start_location,
-          this.length);
-    }
-    public boolean equals(Object other) {
-      if (!(other instanceof VariableDescription)) {
-        return false;
-      } else {
-        VariableDescription v = (VariableDescription)other;
-        return Objects.equals(v.signature, signature) &&
-            Objects.equals(v.name, name) &&
-            Objects.equals(v.generic_signature, generic_signature) &&
-            v.slot == slot &&
-            v.start_location == start_location &&
-            v.length == length;
-      }
-    }
-    public int hashCode() {
-      return Objects.hash(this.signature, this.name, this.generic_signature, this.slot,
-          this.start_location, this.length);
-    }
-  }
-
-  public static native VariableDescription[] GetLocalVariableTable(Executable e);
-
-  public static VariableDescription GetVariableAtLine(
-      Executable e, String name, String sig, int line) throws Exception {
-    return GetVariableAtLocation(e, name, sig, Breakpoint.lineToLocation(e, line));
-  }
-
-  public static VariableDescription GetVariableAtLocation(
-      Executable e, String name, String sig, long loc) {
-    VariableDescription[] vars = GetLocalVariableTable(e);
-    for (VariableDescription var : vars) {
-      if (var.start_location <= loc &&
-          var.length + var.start_location > loc &&
-          var.name.equals(name) &&
-          var.signature.equals(sig)) {
-        return var;
-      }
-    }
-    throw new Error(
-        "Unable to find variable " + name + " (sig: " + sig + ") in " + e + " at loc " + loc);
-  }
-
-  public static native int GetLocalVariableInt(Thread thr, int depth, int slot);
-  public static native long GetLocalVariableLong(Thread thr, int depth, int slot);
-  public static native float GetLocalVariableFloat(Thread thr, int depth, int slot);
-  public static native double GetLocalVariableDouble(Thread thr, int depth, int slot);
-  public static native Object GetLocalVariableObject(Thread thr, int depth, int slot);
-  public static native Object GetLocalInstance(Thread thr, int depth);
-
-  public static void SetLocalVariableInt(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableInt(thr, depth, slot, ((Number)val).intValue());
-  }
-  public static void SetLocalVariableLong(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableLong(thr, depth, slot, ((Number)val).longValue());
-  }
-  public static void SetLocalVariableFloat(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableFloat(thr, depth, slot, ((Number)val).floatValue());
-  }
-  public static void SetLocalVariableDouble(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableDouble(thr, depth, slot, ((Number)val).doubleValue());
-  }
-  public static native void SetLocalVariableInt(Thread thr, int depth, int slot, int val);
-  public static native void SetLocalVariableLong(Thread thr, int depth, int slot, long val);
-  public static native void SetLocalVariableFloat(Thread thr, int depth, int slot, float val);
-  public static native void SetLocalVariableDouble(Thread thr, int depth, int slot, double val);
-  public static native void SetLocalVariableObject(Thread thr, int depth, int slot, Object val);
-}
diff --git a/test/1913-get-set-local-objects/src/art/Locals.java b/test/1913-get-set-local-objects/src/art/Locals.java
new file mode 120000
index 0000000..2998386
--- /dev/null
+++ b/test/1913-get-set-local-objects/src/art/Locals.java
@@ -0,0 +1 @@
+../../../jvmti-common/Locals.java
\ No newline at end of file
diff --git a/test/1913-get-set-local-objects/src/art/StackTrace.java b/test/1913-get-set-local-objects/src/art/StackTrace.java
deleted file mode 100644
index 2ea2f20..0000000
--- a/test/1913-get-set-local-objects/src/art/StackTrace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Executable;
-
-public class StackTrace {
-  public static class StackFrameData {
-    public final Thread thr;
-    public final Executable method;
-    public final long current_location;
-    public final int depth;
-
-    public StackFrameData(Thread thr, Executable e, long loc, int depth) {
-      this.thr = thr;
-      this.method = e;
-      this.current_location = loc;
-      this.depth = depth;
-    }
-    @Override
-    public String toString() {
-      return String.format(
-          "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }",
-          this.thr,
-          this.method,
-          this.current_location,
-          this.depth);
-    }
-  }
-
-  public static native int GetStackDepth(Thread thr);
-
-  private static native StackFrameData[] nativeGetStackTrace(Thread thr);
-
-  public static StackFrameData[] GetStackTrace(Thread thr) {
-    // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not
-    // suspended. The spec says that not being suspended is fine but since we want this to be
-    // consistent we will suspend for the RI.
-    boolean suspend_thread =
-        !System.getProperty("java.vm.name").equals("Dalvik") &&
-        !thr.equals(Thread.currentThread()) &&
-        !Suspension.isSuspended(thr);
-    if (suspend_thread) {
-      Suspension.suspend(thr);
-    }
-    StackFrameData[] out = nativeGetStackTrace(thr);
-    if (suspend_thread) {
-      Suspension.resume(thr);
-    }
-    return out;
-  }
-}
-
diff --git a/test/1913-get-set-local-objects/src/art/StackTrace.java b/test/1913-get-set-local-objects/src/art/StackTrace.java
new file mode 120000
index 0000000..e1a08aa
--- /dev/null
+++ b/test/1913-get-set-local-objects/src/art/StackTrace.java
@@ -0,0 +1 @@
+../../../jvmti-common/StackTrace.java
\ No newline at end of file
diff --git a/test/1913-get-set-local-objects/src/art/Suspension.java b/test/1913-get-set-local-objects/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1913-get-set-local-objects/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1913-get-set-local-objects/src/art/Suspension.java b/test/1913-get-set-local-objects/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1913-get-set-local-objects/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1913-get-set-local-objects/src/art/Test1913.java b/test/1913-get-set-local-objects/src/art/Test1913.java
index 417138a..df86493 100644
--- a/test/1913-get-set-local-objects/src/art/Test1913.java
+++ b/test/1913-get-set-local-objects/src/art/Test1913.java
@@ -20,15 +20,15 @@
 import java.lang.reflect.Executable;
 import java.lang.reflect.Method;
 import java.nio.ByteBuffer;
-import java.util.concurrent.Semaphore;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
 import java.util.Set;
+import java.util.concurrent.Semaphore;
+import java.util.function.Consumer;
 import java.util.function.Function;
 import java.util.function.Predicate;
 import java.util.function.Supplier;
-import java.util.function.Consumer;
 
 public class Test1913 {
   public static final String TARGET_VAR = "TARGET";
@@ -38,27 +38,43 @@
   }
   public static class TestClass1 implements TestInterface {
     public String id;
-    public TestClass1(String id) { this.id = id; }
-    public String toString() { return String.format("TestClass1(\"%s\")", id); }
+    public TestClass1(String id) {
+      this.id = id;
+    }
+    public String toString() {
+      return String.format("TestClass1(\"%s\")", id);
+    }
   }
 
   public static class TestClass1ext extends TestClass1 {
-    public TestClass1ext(String id) { super(id); }
-    public String toString() { return String.format("TestClass1ext(\"%s\")", super.toString()); }
+    public TestClass1ext(String id) {
+      super(id);
+    }
+    public String toString() {
+      return String.format("TestClass1ext(\"%s\")", super.toString());
+    }
   }
   public static class TestClass2 {
     public String id;
-    public TestClass2(String id) { this.id = id; }
-    public String toString() { return String.format("TestClass2(\"%s\")", id); }
+    public TestClass2(String id) {
+      this.id = id;
+    }
+    public String toString() {
+      return String.format("TestClass2(\"%s\")", id);
+    }
   }
   public static class TestClass2impl extends TestClass2 implements TestInterface {
-    public TestClass2impl(String id) { super(id); }
-    public String toString() { return String.format("TestClass2impl(\"%s\")", super.toString()); }
+    public TestClass2impl(String id) {
+      super(id);
+    }
+    public String toString() {
+      return String.format("TestClass2impl(\"%s\")", super.toString());
+    }
   }
 
   public static void reportValue(Object val) {
-    System.out.println("\tValue is '" + val + "' (class: "
-        + (val != null ? val.getClass() : "NULL") + ")");
+    System.out.println("\tValue is '" + val +
+                       "' (class: " + (val != null ? val.getClass() : "NULL") + ")");
   }
 
   public static void PrimitiveMethod(Runnable safepoint) {
@@ -68,7 +84,28 @@
   }
 
   // b/64115302: Needed to make sure that DX doesn't change the type of TARGET to TestClass1.
-  private static Object AsObject(Object o) { return o; }
+  private static Object AsObject(Object o) {
+    return o;
+  }
+
+  public static void NullObjectMethod(Runnable safepoint) {
+    Object TARGET = null;
+    safepoint.run();
+    reportValue(TARGET);
+  }
+
+  public static void NullInterfaceMethod(Runnable safepoint) {
+    TestInterface TARGET = null;
+    safepoint.run();
+    reportValue(TARGET);
+  }
+
+  public static void NullSpecificClassMethod(Runnable safepoint) {
+    TestClass1 TARGET = null;
+    safepoint.run();
+    reportValue(TARGET);
+  }
+
   public static void ObjectMethod(Runnable safepoint) {
     Object TARGET = AsObject(new TestClass1("ObjectMethod"));
     safepoint.run();
@@ -88,31 +125,27 @@
   }
 
   public static interface SafepointFunction {
-    public void invoke(
-        Thread thread,
-        Method target,
-        Locals.VariableDescription TARGET_desc,
-        int depth) throws Exception;
+    public void
+    invoke(Thread thread, Method target, Locals.VariableDescription TARGET_desc, int depth)
+        throws Exception;
   }
 
   public static interface SetterFunction {
     public void SetVar(Thread t, int depth, int slot, Object v);
   }
 
-  public static interface GetterFunction {
-    public Object GetVar(Thread t, int depth, int slot);
-  }
+  public static interface GetterFunction { public Object GetVar(Thread t, int depth, int slot); }
 
-  public static SafepointFunction NamedSet(
-      final String type, final SetterFunction get, final Object v) {
+  public static SafepointFunction
+  NamedSet(final String type, final SetterFunction get, final Object v) {
     return new SafepointFunction() {
       public void invoke(Thread t, Method method, Locals.VariableDescription desc, int depth) {
         try {
           get.SetVar(t, depth, desc.slot, v);
           System.out.println(this + " on " + method + " set value: " + v);
         } catch (Exception e) {
-          System.out.println(
-              this + " on " + method + " failed to set value " + v + " due to " + e.getMessage());
+          System.out.println(this + " on " + method + " failed to set value " + v + " due to " +
+                             e.getMessage());
         }
       }
       public String toString() {
@@ -174,15 +207,13 @@
     public void exec(final SafepointFunction safepoint) throws Exception {
       System.out.println("Running " + target + " with " + safepoint + " on remote thread.");
       final ThreadPauser pause = new ThreadPauser();
-      Thread remote = new Thread(
-          () -> {
-            try {
-              target.invoke(null, pause);
-            } catch (Exception e) {
-              throw new Error("Error invoking remote thread " + Thread.currentThread(), e);
-            }
-          },
-          "remote thread for " + target + " with " + safepoint);
+      Thread remote = new Thread(() -> {
+        try {
+          target.invoke(null, pause);
+        } catch (Exception e) {
+          throw new Error("Error invoking remote thread " + Thread.currentThread(), e);
+        }
+      }, "remote thread for " + target + " with " + safepoint);
       remote.start();
       pause.waitForOtherThreadToPause();
       try {
@@ -199,14 +230,12 @@
 
     private Locals.VariableDescription findTargetVar(long loc) {
       for (Locals.VariableDescription var : Locals.GetLocalVariableTable(target)) {
-        if (var.start_location <= loc &&
-            var.length + var.start_location > loc &&
+        if (var.start_location <= loc && var.length + var.start_location > loc &&
             var.name.equals(TARGET_VAR)) {
           return var;
         }
       }
-      throw new Error(
-          "Unable to find variable " + TARGET_VAR + " in " + target + " at loc " + loc);
+      throw new Error("Unable to find variable " + TARGET_VAR + " in " + target + " at loc " + loc);
     }
 
     private StackTrace.StackFrameData findStackFrame(Thread thr) {
@@ -225,27 +254,27 @@
   public static void run() throws Exception {
     Locals.EnableLocalVariableAccess();
     final TestCase[] MAIN_TEST_CASES = new TestCase[] {
-      new TestCase(getMethod("ObjectMethod")),
-      new TestCase(getMethod("InterfaceMethod")),
-      new TestCase(getMethod("SpecificClassMethod")),
-      new TestCase(getMethod("PrimitiveMethod")),
+      new TestCase(getMethod("ObjectMethod")),        new TestCase(getMethod("InterfaceMethod")),
+      new TestCase(getMethod("SpecificClassMethod")), new TestCase(getMethod("PrimitiveMethod")),
+      new TestCase(getMethod("NullObjectMethod")),
+      new TestCase(getMethod("NullInterfaceMethod")),
+      new TestCase(getMethod("NullSpecificClassMethod")),
     };
 
     final SetterFunction set_obj = Locals::SetLocalVariableObject;
     final SafepointFunction[] SAFEPOINTS = new SafepointFunction[] {
-      NamedGet("GetObject",      Locals::GetLocalVariableObject),
-      NamedSet("Null",           set_obj, null),
-      NamedSet("TestClass1",     set_obj, new TestClass1("Set TestClass1")),
-      NamedSet("TestClass1ext",  set_obj, new TestClass1ext("Set TestClass1ext")),
-      NamedSet("TestClass2",     set_obj, new TestClass2("Set TestClass2")),
+      NamedGet("GetObject", Locals::GetLocalVariableObject),
+      NamedSet("Null", set_obj, null),
+      NamedSet("TestClass1", set_obj, new TestClass1("Set TestClass1")),
+      NamedSet("TestClass1ext", set_obj, new TestClass1ext("Set TestClass1ext")),
+      NamedSet("TestClass2", set_obj, new TestClass2("Set TestClass2")),
       NamedSet("TestClass2impl", set_obj, new TestClass2impl("Set TestClass2impl")),
     };
 
-    for (TestCase t: MAIN_TEST_CASES) {
+    for (TestCase t : MAIN_TEST_CASES) {
       for (SafepointFunction s : SAFEPOINTS) {
         t.exec(s);
       }
     }
   }
 }
-
diff --git a/test/1914-get-local-instance/src/art/Breakpoint.java b/test/1914-get-local-instance/src/art/Breakpoint.java
deleted file mode 100644
index bbb89f7..0000000
--- a/test/1914-get-local-instance/src/art/Breakpoint.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Objects;
-
-public class Breakpoint {
-  public static class Manager {
-    public static class BP {
-      public final Executable method;
-      public final long location;
-
-      public BP(Executable method) {
-        this(method, getStartLocation(method));
-      }
-
-      public BP(Executable method, long location) {
-        this.method = method;
-        this.location = location;
-      }
-
-      @Override
-      public boolean equals(Object other) {
-        return (other instanceof BP) &&
-            method.equals(((BP)other).method) &&
-            location == ((BP)other).location;
-      }
-
-      @Override
-      public String toString() {
-        return method.toString() + " @ " + getLine();
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(method, location);
-      }
-
-      public int getLine() {
-        try {
-          LineNumber[] lines = getLineNumberTable(method);
-          int best = -1;
-          for (LineNumber l : lines) {
-            if (l.location > location) {
-              break;
-            } else {
-              best = l.line;
-            }
-          }
-          return best;
-        } catch (Exception e) {
-          return -1;
-        }
-      }
-    }
-
-    private Set<BP> breaks = new HashSet<>();
-
-    public void setBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.add(b)) {
-          Breakpoint.setBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void setBreakpoint(Executable method, long location) {
-      setBreakpoints(new BP(method, location));
-    }
-
-    public void clearBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.remove(b)) {
-          Breakpoint.clearBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void clearBreakpoint(Executable method, long location) {
-      clearBreakpoints(new BP(method, location));
-    }
-
-    public void clearAllBreakpoints() {
-      clearBreakpoints(breaks.toArray(new BP[0]));
-    }
-  }
-
-  public static void startBreakpointWatch(Class<?> methodClass,
-                                          Executable breakpointReached,
-                                          Thread thr) {
-    startBreakpointWatch(methodClass, breakpointReached, false, thr);
-  }
-
-  /**
-   * Enables the trapping of breakpoint events.
-   *
-   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
-   */
-  public static native void startBreakpointWatch(Class<?> methodClass,
-                                                 Executable breakpointReached,
-                                                 boolean allowRecursive,
-                                                 Thread thr);
-  public static native void stopBreakpointWatch(Thread thr);
-
-  public static final class LineNumber implements Comparable<LineNumber> {
-    public final long location;
-    public final int line;
-
-    private LineNumber(long loc, int line) {
-      this.location = loc;
-      this.line = line;
-    }
-
-    public boolean equals(Object other) {
-      return other instanceof LineNumber && ((LineNumber)other).line == line &&
-          ((LineNumber)other).location == location;
-    }
-
-    public int compareTo(LineNumber other) {
-      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
-      if (v != 0) {
-        return v;
-      } else {
-        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
-      }
-    }
-  }
-
-  public static native void setBreakpoint(Executable m, long loc);
-  public static void setBreakpoint(Executable m, LineNumber l) {
-    setBreakpoint(m, l.location);
-  }
-
-  public static native void clearBreakpoint(Executable m, long loc);
-  public static void clearBreakpoint(Executable m, LineNumber l) {
-    clearBreakpoint(m, l.location);
-  }
-
-  private static native Object[] getLineNumberTableNative(Executable m);
-  public static LineNumber[] getLineNumberTable(Executable m) {
-    Object[] nativeTable = getLineNumberTableNative(m);
-    long[] location = (long[])(nativeTable[0]);
-    int[] lines = (int[])(nativeTable[1]);
-    if (lines.length != location.length) {
-      throw new Error("Lines and locations have different lengths!");
-    }
-    LineNumber[] out = new LineNumber[lines.length];
-    for (int i = 0; i < lines.length; i++) {
-      out[i] = new LineNumber(location[i], lines[i]);
-    }
-    return out;
-  }
-
-  public static native long getStartLocation(Executable m);
-
-  public static int locationToLine(Executable m, long location) {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      int best = -1;
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.location > location) {
-          break;
-        } else {
-          best = l.line;
-        }
-      }
-      return best;
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  public static long lineToLocation(Executable m, int line) throws Exception {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.line == line) {
-          return l.location;
-        }
-      }
-      throw new Exception("Unable to find line " + line + " in " + m);
-    } catch (Exception e) {
-      throw new Exception("Unable to get line number info for " + m, e);
-    }
-  }
-}
-
diff --git a/test/1914-get-local-instance/src/art/Breakpoint.java b/test/1914-get-local-instance/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1914-get-local-instance/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1914-get-local-instance/src/art/Locals.java b/test/1914-get-local-instance/src/art/Locals.java
deleted file mode 100644
index 22e21be..0000000
--- a/test/1914-get-local-instance/src/art/Locals.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.Objects;
-
-public class Locals {
-  public static native void EnableLocalVariableAccess();
-
-  public static class VariableDescription {
-    public final long start_location;
-    public final int length;
-    public final String name;
-    public final String signature;
-    public final String generic_signature;
-    public final int slot;
-
-    public VariableDescription(
-        long start, int length, String name, String sig, String gen_sig, int slot) {
-      this.start_location = start;
-      this.length = length;
-      this.name = name;
-      this.signature = sig;
-      this.generic_signature = gen_sig;
-      this.slot = slot;
-    }
-
-    @Override
-    public String toString() {
-      return String.format(
-          "VariableDescription { " +
-            "Sig: '%s', Name: '%s', Gen_sig: '%s', slot: %d, start: %d, len: %d" +
-          "}",
-          this.signature,
-          this.name,
-          this.generic_signature,
-          this.slot,
-          this.start_location,
-          this.length);
-    }
-    public boolean equals(Object other) {
-      if (!(other instanceof VariableDescription)) {
-        return false;
-      } else {
-        VariableDescription v = (VariableDescription)other;
-        return Objects.equals(v.signature, signature) &&
-            Objects.equals(v.name, name) &&
-            Objects.equals(v.generic_signature, generic_signature) &&
-            v.slot == slot &&
-            v.start_location == start_location &&
-            v.length == length;
-      }
-    }
-    public int hashCode() {
-      return Objects.hash(this.signature, this.name, this.generic_signature, this.slot,
-          this.start_location, this.length);
-    }
-  }
-
-  public static native VariableDescription[] GetLocalVariableTable(Executable e);
-
-  public static VariableDescription GetVariableAtLine(
-      Executable e, String name, String sig, int line) throws Exception {
-    return GetVariableAtLocation(e, name, sig, Breakpoint.lineToLocation(e, line));
-  }
-
-  public static VariableDescription GetVariableAtLocation(
-      Executable e, String name, String sig, long loc) {
-    VariableDescription[] vars = GetLocalVariableTable(e);
-    for (VariableDescription var : vars) {
-      if (var.start_location <= loc &&
-          var.length + var.start_location > loc &&
-          var.name.equals(name) &&
-          var.signature.equals(sig)) {
-        return var;
-      }
-    }
-    throw new Error(
-        "Unable to find variable " + name + " (sig: " + sig + ") in " + e + " at loc " + loc);
-  }
-
-  public static native int GetLocalVariableInt(Thread thr, int depth, int slot);
-  public static native long GetLocalVariableLong(Thread thr, int depth, int slot);
-  public static native float GetLocalVariableFloat(Thread thr, int depth, int slot);
-  public static native double GetLocalVariableDouble(Thread thr, int depth, int slot);
-  public static native Object GetLocalVariableObject(Thread thr, int depth, int slot);
-  public static native Object GetLocalInstance(Thread thr, int depth);
-
-  public static void SetLocalVariableInt(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableInt(thr, depth, slot, ((Number)val).intValue());
-  }
-  public static void SetLocalVariableLong(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableLong(thr, depth, slot, ((Number)val).longValue());
-  }
-  public static void SetLocalVariableFloat(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableFloat(thr, depth, slot, ((Number)val).floatValue());
-  }
-  public static void SetLocalVariableDouble(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableDouble(thr, depth, slot, ((Number)val).doubleValue());
-  }
-  public static native void SetLocalVariableInt(Thread thr, int depth, int slot, int val);
-  public static native void SetLocalVariableLong(Thread thr, int depth, int slot, long val);
-  public static native void SetLocalVariableFloat(Thread thr, int depth, int slot, float val);
-  public static native void SetLocalVariableDouble(Thread thr, int depth, int slot, double val);
-  public static native void SetLocalVariableObject(Thread thr, int depth, int slot, Object val);
-}
diff --git a/test/1914-get-local-instance/src/art/Locals.java b/test/1914-get-local-instance/src/art/Locals.java
new file mode 120000
index 0000000..2998386
--- /dev/null
+++ b/test/1914-get-local-instance/src/art/Locals.java
@@ -0,0 +1 @@
+../../../jvmti-common/Locals.java
\ No newline at end of file
diff --git a/test/1914-get-local-instance/src/art/StackTrace.java b/test/1914-get-local-instance/src/art/StackTrace.java
deleted file mode 100644
index 2ea2f20..0000000
--- a/test/1914-get-local-instance/src/art/StackTrace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Executable;
-
-public class StackTrace {
-  public static class StackFrameData {
-    public final Thread thr;
-    public final Executable method;
-    public final long current_location;
-    public final int depth;
-
-    public StackFrameData(Thread thr, Executable e, long loc, int depth) {
-      this.thr = thr;
-      this.method = e;
-      this.current_location = loc;
-      this.depth = depth;
-    }
-    @Override
-    public String toString() {
-      return String.format(
-          "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }",
-          this.thr,
-          this.method,
-          this.current_location,
-          this.depth);
-    }
-  }
-
-  public static native int GetStackDepth(Thread thr);
-
-  private static native StackFrameData[] nativeGetStackTrace(Thread thr);
-
-  public static StackFrameData[] GetStackTrace(Thread thr) {
-    // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not
-    // suspended. The spec says that not being suspended is fine but since we want this to be
-    // consistent we will suspend for the RI.
-    boolean suspend_thread =
-        !System.getProperty("java.vm.name").equals("Dalvik") &&
-        !thr.equals(Thread.currentThread()) &&
-        !Suspension.isSuspended(thr);
-    if (suspend_thread) {
-      Suspension.suspend(thr);
-    }
-    StackFrameData[] out = nativeGetStackTrace(thr);
-    if (suspend_thread) {
-      Suspension.resume(thr);
-    }
-    return out;
-  }
-}
-
diff --git a/test/1914-get-local-instance/src/art/StackTrace.java b/test/1914-get-local-instance/src/art/StackTrace.java
new file mode 120000
index 0000000..e1a08aa
--- /dev/null
+++ b/test/1914-get-local-instance/src/art/StackTrace.java
@@ -0,0 +1 @@
+../../../jvmti-common/StackTrace.java
\ No newline at end of file
diff --git a/test/1914-get-local-instance/src/art/Suspension.java b/test/1914-get-local-instance/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1914-get-local-instance/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1914-get-local-instance/src/art/Suspension.java b/test/1914-get-local-instance/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1914-get-local-instance/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1915-get-set-local-current-thread/src/art/Breakpoint.java b/test/1915-get-set-local-current-thread/src/art/Breakpoint.java
deleted file mode 100644
index bbb89f7..0000000
--- a/test/1915-get-set-local-current-thread/src/art/Breakpoint.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Objects;
-
-public class Breakpoint {
-  public static class Manager {
-    public static class BP {
-      public final Executable method;
-      public final long location;
-
-      public BP(Executable method) {
-        this(method, getStartLocation(method));
-      }
-
-      public BP(Executable method, long location) {
-        this.method = method;
-        this.location = location;
-      }
-
-      @Override
-      public boolean equals(Object other) {
-        return (other instanceof BP) &&
-            method.equals(((BP)other).method) &&
-            location == ((BP)other).location;
-      }
-
-      @Override
-      public String toString() {
-        return method.toString() + " @ " + getLine();
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(method, location);
-      }
-
-      public int getLine() {
-        try {
-          LineNumber[] lines = getLineNumberTable(method);
-          int best = -1;
-          for (LineNumber l : lines) {
-            if (l.location > location) {
-              break;
-            } else {
-              best = l.line;
-            }
-          }
-          return best;
-        } catch (Exception e) {
-          return -1;
-        }
-      }
-    }
-
-    private Set<BP> breaks = new HashSet<>();
-
-    public void setBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.add(b)) {
-          Breakpoint.setBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void setBreakpoint(Executable method, long location) {
-      setBreakpoints(new BP(method, location));
-    }
-
-    public void clearBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.remove(b)) {
-          Breakpoint.clearBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void clearBreakpoint(Executable method, long location) {
-      clearBreakpoints(new BP(method, location));
-    }
-
-    public void clearAllBreakpoints() {
-      clearBreakpoints(breaks.toArray(new BP[0]));
-    }
-  }
-
-  public static void startBreakpointWatch(Class<?> methodClass,
-                                          Executable breakpointReached,
-                                          Thread thr) {
-    startBreakpointWatch(methodClass, breakpointReached, false, thr);
-  }
-
-  /**
-   * Enables the trapping of breakpoint events.
-   *
-   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
-   */
-  public static native void startBreakpointWatch(Class<?> methodClass,
-                                                 Executable breakpointReached,
-                                                 boolean allowRecursive,
-                                                 Thread thr);
-  public static native void stopBreakpointWatch(Thread thr);
-
-  public static final class LineNumber implements Comparable<LineNumber> {
-    public final long location;
-    public final int line;
-
-    private LineNumber(long loc, int line) {
-      this.location = loc;
-      this.line = line;
-    }
-
-    public boolean equals(Object other) {
-      return other instanceof LineNumber && ((LineNumber)other).line == line &&
-          ((LineNumber)other).location == location;
-    }
-
-    public int compareTo(LineNumber other) {
-      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
-      if (v != 0) {
-        return v;
-      } else {
-        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
-      }
-    }
-  }
-
-  public static native void setBreakpoint(Executable m, long loc);
-  public static void setBreakpoint(Executable m, LineNumber l) {
-    setBreakpoint(m, l.location);
-  }
-
-  public static native void clearBreakpoint(Executable m, long loc);
-  public static void clearBreakpoint(Executable m, LineNumber l) {
-    clearBreakpoint(m, l.location);
-  }
-
-  private static native Object[] getLineNumberTableNative(Executable m);
-  public static LineNumber[] getLineNumberTable(Executable m) {
-    Object[] nativeTable = getLineNumberTableNative(m);
-    long[] location = (long[])(nativeTable[0]);
-    int[] lines = (int[])(nativeTable[1]);
-    if (lines.length != location.length) {
-      throw new Error("Lines and locations have different lengths!");
-    }
-    LineNumber[] out = new LineNumber[lines.length];
-    for (int i = 0; i < lines.length; i++) {
-      out[i] = new LineNumber(location[i], lines[i]);
-    }
-    return out;
-  }
-
-  public static native long getStartLocation(Executable m);
-
-  public static int locationToLine(Executable m, long location) {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      int best = -1;
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.location > location) {
-          break;
-        } else {
-          best = l.line;
-        }
-      }
-      return best;
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  public static long lineToLocation(Executable m, int line) throws Exception {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.line == line) {
-          return l.location;
-        }
-      }
-      throw new Exception("Unable to find line " + line + " in " + m);
-    } catch (Exception e) {
-      throw new Exception("Unable to get line number info for " + m, e);
-    }
-  }
-}
-
diff --git a/test/1915-get-set-local-current-thread/src/art/Breakpoint.java b/test/1915-get-set-local-current-thread/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1915-get-set-local-current-thread/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1915-get-set-local-current-thread/src/art/Locals.java b/test/1915-get-set-local-current-thread/src/art/Locals.java
deleted file mode 100644
index 22e21be..0000000
--- a/test/1915-get-set-local-current-thread/src/art/Locals.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.Objects;
-
-public class Locals {
-  public static native void EnableLocalVariableAccess();
-
-  public static class VariableDescription {
-    public final long start_location;
-    public final int length;
-    public final String name;
-    public final String signature;
-    public final String generic_signature;
-    public final int slot;
-
-    public VariableDescription(
-        long start, int length, String name, String sig, String gen_sig, int slot) {
-      this.start_location = start;
-      this.length = length;
-      this.name = name;
-      this.signature = sig;
-      this.generic_signature = gen_sig;
-      this.slot = slot;
-    }
-
-    @Override
-    public String toString() {
-      return String.format(
-          "VariableDescription { " +
-            "Sig: '%s', Name: '%s', Gen_sig: '%s', slot: %d, start: %d, len: %d" +
-          "}",
-          this.signature,
-          this.name,
-          this.generic_signature,
-          this.slot,
-          this.start_location,
-          this.length);
-    }
-    public boolean equals(Object other) {
-      if (!(other instanceof VariableDescription)) {
-        return false;
-      } else {
-        VariableDescription v = (VariableDescription)other;
-        return Objects.equals(v.signature, signature) &&
-            Objects.equals(v.name, name) &&
-            Objects.equals(v.generic_signature, generic_signature) &&
-            v.slot == slot &&
-            v.start_location == start_location &&
-            v.length == length;
-      }
-    }
-    public int hashCode() {
-      return Objects.hash(this.signature, this.name, this.generic_signature, this.slot,
-          this.start_location, this.length);
-    }
-  }
-
-  public static native VariableDescription[] GetLocalVariableTable(Executable e);
-
-  public static VariableDescription GetVariableAtLine(
-      Executable e, String name, String sig, int line) throws Exception {
-    return GetVariableAtLocation(e, name, sig, Breakpoint.lineToLocation(e, line));
-  }
-
-  public static VariableDescription GetVariableAtLocation(
-      Executable e, String name, String sig, long loc) {
-    VariableDescription[] vars = GetLocalVariableTable(e);
-    for (VariableDescription var : vars) {
-      if (var.start_location <= loc &&
-          var.length + var.start_location > loc &&
-          var.name.equals(name) &&
-          var.signature.equals(sig)) {
-        return var;
-      }
-    }
-    throw new Error(
-        "Unable to find variable " + name + " (sig: " + sig + ") in " + e + " at loc " + loc);
-  }
-
-  public static native int GetLocalVariableInt(Thread thr, int depth, int slot);
-  public static native long GetLocalVariableLong(Thread thr, int depth, int slot);
-  public static native float GetLocalVariableFloat(Thread thr, int depth, int slot);
-  public static native double GetLocalVariableDouble(Thread thr, int depth, int slot);
-  public static native Object GetLocalVariableObject(Thread thr, int depth, int slot);
-  public static native Object GetLocalInstance(Thread thr, int depth);
-
-  public static void SetLocalVariableInt(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableInt(thr, depth, slot, ((Number)val).intValue());
-  }
-  public static void SetLocalVariableLong(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableLong(thr, depth, slot, ((Number)val).longValue());
-  }
-  public static void SetLocalVariableFloat(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableFloat(thr, depth, slot, ((Number)val).floatValue());
-  }
-  public static void SetLocalVariableDouble(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableDouble(thr, depth, slot, ((Number)val).doubleValue());
-  }
-  public static native void SetLocalVariableInt(Thread thr, int depth, int slot, int val);
-  public static native void SetLocalVariableLong(Thread thr, int depth, int slot, long val);
-  public static native void SetLocalVariableFloat(Thread thr, int depth, int slot, float val);
-  public static native void SetLocalVariableDouble(Thread thr, int depth, int slot, double val);
-  public static native void SetLocalVariableObject(Thread thr, int depth, int slot, Object val);
-}
diff --git a/test/1915-get-set-local-current-thread/src/art/Locals.java b/test/1915-get-set-local-current-thread/src/art/Locals.java
new file mode 120000
index 0000000..2998386
--- /dev/null
+++ b/test/1915-get-set-local-current-thread/src/art/Locals.java
@@ -0,0 +1 @@
+../../../jvmti-common/Locals.java
\ No newline at end of file
diff --git a/test/1915-get-set-local-current-thread/src/art/StackTrace.java b/test/1915-get-set-local-current-thread/src/art/StackTrace.java
deleted file mode 100644
index 2ea2f20..0000000
--- a/test/1915-get-set-local-current-thread/src/art/StackTrace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Executable;
-
-public class StackTrace {
-  public static class StackFrameData {
-    public final Thread thr;
-    public final Executable method;
-    public final long current_location;
-    public final int depth;
-
-    public StackFrameData(Thread thr, Executable e, long loc, int depth) {
-      this.thr = thr;
-      this.method = e;
-      this.current_location = loc;
-      this.depth = depth;
-    }
-    @Override
-    public String toString() {
-      return String.format(
-          "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }",
-          this.thr,
-          this.method,
-          this.current_location,
-          this.depth);
-    }
-  }
-
-  public static native int GetStackDepth(Thread thr);
-
-  private static native StackFrameData[] nativeGetStackTrace(Thread thr);
-
-  public static StackFrameData[] GetStackTrace(Thread thr) {
-    // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not
-    // suspended. The spec says that not being suspended is fine but since we want this to be
-    // consistent we will suspend for the RI.
-    boolean suspend_thread =
-        !System.getProperty("java.vm.name").equals("Dalvik") &&
-        !thr.equals(Thread.currentThread()) &&
-        !Suspension.isSuspended(thr);
-    if (suspend_thread) {
-      Suspension.suspend(thr);
-    }
-    StackFrameData[] out = nativeGetStackTrace(thr);
-    if (suspend_thread) {
-      Suspension.resume(thr);
-    }
-    return out;
-  }
-}
-
diff --git a/test/1915-get-set-local-current-thread/src/art/StackTrace.java b/test/1915-get-set-local-current-thread/src/art/StackTrace.java
new file mode 120000
index 0000000..e1a08aa
--- /dev/null
+++ b/test/1915-get-set-local-current-thread/src/art/StackTrace.java
@@ -0,0 +1 @@
+../../../jvmti-common/StackTrace.java
\ No newline at end of file
diff --git a/test/1915-get-set-local-current-thread/src/art/Suspension.java b/test/1915-get-set-local-current-thread/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1915-get-set-local-current-thread/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1915-get-set-local-current-thread/src/art/Suspension.java b/test/1915-get-set-local-current-thread/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1915-get-set-local-current-thread/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1916-get-set-current-frame/src/art/Breakpoint.java b/test/1916-get-set-current-frame/src/art/Breakpoint.java
deleted file mode 100644
index bbb89f7..0000000
--- a/test/1916-get-set-current-frame/src/art/Breakpoint.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Objects;
-
-public class Breakpoint {
-  public static class Manager {
-    public static class BP {
-      public final Executable method;
-      public final long location;
-
-      public BP(Executable method) {
-        this(method, getStartLocation(method));
-      }
-
-      public BP(Executable method, long location) {
-        this.method = method;
-        this.location = location;
-      }
-
-      @Override
-      public boolean equals(Object other) {
-        return (other instanceof BP) &&
-            method.equals(((BP)other).method) &&
-            location == ((BP)other).location;
-      }
-
-      @Override
-      public String toString() {
-        return method.toString() + " @ " + getLine();
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(method, location);
-      }
-
-      public int getLine() {
-        try {
-          LineNumber[] lines = getLineNumberTable(method);
-          int best = -1;
-          for (LineNumber l : lines) {
-            if (l.location > location) {
-              break;
-            } else {
-              best = l.line;
-            }
-          }
-          return best;
-        } catch (Exception e) {
-          return -1;
-        }
-      }
-    }
-
-    private Set<BP> breaks = new HashSet<>();
-
-    public void setBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.add(b)) {
-          Breakpoint.setBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void setBreakpoint(Executable method, long location) {
-      setBreakpoints(new BP(method, location));
-    }
-
-    public void clearBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.remove(b)) {
-          Breakpoint.clearBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void clearBreakpoint(Executable method, long location) {
-      clearBreakpoints(new BP(method, location));
-    }
-
-    public void clearAllBreakpoints() {
-      clearBreakpoints(breaks.toArray(new BP[0]));
-    }
-  }
-
-  public static void startBreakpointWatch(Class<?> methodClass,
-                                          Executable breakpointReached,
-                                          Thread thr) {
-    startBreakpointWatch(methodClass, breakpointReached, false, thr);
-  }
-
-  /**
-   * Enables the trapping of breakpoint events.
-   *
-   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
-   */
-  public static native void startBreakpointWatch(Class<?> methodClass,
-                                                 Executable breakpointReached,
-                                                 boolean allowRecursive,
-                                                 Thread thr);
-  public static native void stopBreakpointWatch(Thread thr);
-
-  public static final class LineNumber implements Comparable<LineNumber> {
-    public final long location;
-    public final int line;
-
-    private LineNumber(long loc, int line) {
-      this.location = loc;
-      this.line = line;
-    }
-
-    public boolean equals(Object other) {
-      return other instanceof LineNumber && ((LineNumber)other).line == line &&
-          ((LineNumber)other).location == location;
-    }
-
-    public int compareTo(LineNumber other) {
-      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
-      if (v != 0) {
-        return v;
-      } else {
-        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
-      }
-    }
-  }
-
-  public static native void setBreakpoint(Executable m, long loc);
-  public static void setBreakpoint(Executable m, LineNumber l) {
-    setBreakpoint(m, l.location);
-  }
-
-  public static native void clearBreakpoint(Executable m, long loc);
-  public static void clearBreakpoint(Executable m, LineNumber l) {
-    clearBreakpoint(m, l.location);
-  }
-
-  private static native Object[] getLineNumberTableNative(Executable m);
-  public static LineNumber[] getLineNumberTable(Executable m) {
-    Object[] nativeTable = getLineNumberTableNative(m);
-    long[] location = (long[])(nativeTable[0]);
-    int[] lines = (int[])(nativeTable[1]);
-    if (lines.length != location.length) {
-      throw new Error("Lines and locations have different lengths!");
-    }
-    LineNumber[] out = new LineNumber[lines.length];
-    for (int i = 0; i < lines.length; i++) {
-      out[i] = new LineNumber(location[i], lines[i]);
-    }
-    return out;
-  }
-
-  public static native long getStartLocation(Executable m);
-
-  public static int locationToLine(Executable m, long location) {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      int best = -1;
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.location > location) {
-          break;
-        } else {
-          best = l.line;
-        }
-      }
-      return best;
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  public static long lineToLocation(Executable m, int line) throws Exception {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.line == line) {
-          return l.location;
-        }
-      }
-      throw new Exception("Unable to find line " + line + " in " + m);
-    } catch (Exception e) {
-      throw new Exception("Unable to get line number info for " + m, e);
-    }
-  }
-}
-
diff --git a/test/1916-get-set-current-frame/src/art/Breakpoint.java b/test/1916-get-set-current-frame/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1916-get-set-current-frame/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1916-get-set-current-frame/src/art/Locals.java b/test/1916-get-set-current-frame/src/art/Locals.java
deleted file mode 100644
index 22e21be..0000000
--- a/test/1916-get-set-current-frame/src/art/Locals.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.Objects;
-
-public class Locals {
-  public static native void EnableLocalVariableAccess();
-
-  public static class VariableDescription {
-    public final long start_location;
-    public final int length;
-    public final String name;
-    public final String signature;
-    public final String generic_signature;
-    public final int slot;
-
-    public VariableDescription(
-        long start, int length, String name, String sig, String gen_sig, int slot) {
-      this.start_location = start;
-      this.length = length;
-      this.name = name;
-      this.signature = sig;
-      this.generic_signature = gen_sig;
-      this.slot = slot;
-    }
-
-    @Override
-    public String toString() {
-      return String.format(
-          "VariableDescription { " +
-            "Sig: '%s', Name: '%s', Gen_sig: '%s', slot: %d, start: %d, len: %d" +
-          "}",
-          this.signature,
-          this.name,
-          this.generic_signature,
-          this.slot,
-          this.start_location,
-          this.length);
-    }
-    public boolean equals(Object other) {
-      if (!(other instanceof VariableDescription)) {
-        return false;
-      } else {
-        VariableDescription v = (VariableDescription)other;
-        return Objects.equals(v.signature, signature) &&
-            Objects.equals(v.name, name) &&
-            Objects.equals(v.generic_signature, generic_signature) &&
-            v.slot == slot &&
-            v.start_location == start_location &&
-            v.length == length;
-      }
-    }
-    public int hashCode() {
-      return Objects.hash(this.signature, this.name, this.generic_signature, this.slot,
-          this.start_location, this.length);
-    }
-  }
-
-  public static native VariableDescription[] GetLocalVariableTable(Executable e);
-
-  public static VariableDescription GetVariableAtLine(
-      Executable e, String name, String sig, int line) throws Exception {
-    return GetVariableAtLocation(e, name, sig, Breakpoint.lineToLocation(e, line));
-  }
-
-  public static VariableDescription GetVariableAtLocation(
-      Executable e, String name, String sig, long loc) {
-    VariableDescription[] vars = GetLocalVariableTable(e);
-    for (VariableDescription var : vars) {
-      if (var.start_location <= loc &&
-          var.length + var.start_location > loc &&
-          var.name.equals(name) &&
-          var.signature.equals(sig)) {
-        return var;
-      }
-    }
-    throw new Error(
-        "Unable to find variable " + name + " (sig: " + sig + ") in " + e + " at loc " + loc);
-  }
-
-  public static native int GetLocalVariableInt(Thread thr, int depth, int slot);
-  public static native long GetLocalVariableLong(Thread thr, int depth, int slot);
-  public static native float GetLocalVariableFloat(Thread thr, int depth, int slot);
-  public static native double GetLocalVariableDouble(Thread thr, int depth, int slot);
-  public static native Object GetLocalVariableObject(Thread thr, int depth, int slot);
-  public static native Object GetLocalInstance(Thread thr, int depth);
-
-  public static void SetLocalVariableInt(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableInt(thr, depth, slot, ((Number)val).intValue());
-  }
-  public static void SetLocalVariableLong(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableLong(thr, depth, slot, ((Number)val).longValue());
-  }
-  public static void SetLocalVariableFloat(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableFloat(thr, depth, slot, ((Number)val).floatValue());
-  }
-  public static void SetLocalVariableDouble(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableDouble(thr, depth, slot, ((Number)val).doubleValue());
-  }
-  public static native void SetLocalVariableInt(Thread thr, int depth, int slot, int val);
-  public static native void SetLocalVariableLong(Thread thr, int depth, int slot, long val);
-  public static native void SetLocalVariableFloat(Thread thr, int depth, int slot, float val);
-  public static native void SetLocalVariableDouble(Thread thr, int depth, int slot, double val);
-  public static native void SetLocalVariableObject(Thread thr, int depth, int slot, Object val);
-}
diff --git a/test/1916-get-set-current-frame/src/art/Locals.java b/test/1916-get-set-current-frame/src/art/Locals.java
new file mode 120000
index 0000000..2998386
--- /dev/null
+++ b/test/1916-get-set-current-frame/src/art/Locals.java
@@ -0,0 +1 @@
+../../../jvmti-common/Locals.java
\ No newline at end of file
diff --git a/test/1916-get-set-current-frame/src/art/StackTrace.java b/test/1916-get-set-current-frame/src/art/StackTrace.java
deleted file mode 100644
index 2ea2f20..0000000
--- a/test/1916-get-set-current-frame/src/art/StackTrace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Executable;
-
-public class StackTrace {
-  public static class StackFrameData {
-    public final Thread thr;
-    public final Executable method;
-    public final long current_location;
-    public final int depth;
-
-    public StackFrameData(Thread thr, Executable e, long loc, int depth) {
-      this.thr = thr;
-      this.method = e;
-      this.current_location = loc;
-      this.depth = depth;
-    }
-    @Override
-    public String toString() {
-      return String.format(
-          "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }",
-          this.thr,
-          this.method,
-          this.current_location,
-          this.depth);
-    }
-  }
-
-  public static native int GetStackDepth(Thread thr);
-
-  private static native StackFrameData[] nativeGetStackTrace(Thread thr);
-
-  public static StackFrameData[] GetStackTrace(Thread thr) {
-    // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not
-    // suspended. The spec says that not being suspended is fine but since we want this to be
-    // consistent we will suspend for the RI.
-    boolean suspend_thread =
-        !System.getProperty("java.vm.name").equals("Dalvik") &&
-        !thr.equals(Thread.currentThread()) &&
-        !Suspension.isSuspended(thr);
-    if (suspend_thread) {
-      Suspension.suspend(thr);
-    }
-    StackFrameData[] out = nativeGetStackTrace(thr);
-    if (suspend_thread) {
-      Suspension.resume(thr);
-    }
-    return out;
-  }
-}
-
diff --git a/test/1916-get-set-current-frame/src/art/StackTrace.java b/test/1916-get-set-current-frame/src/art/StackTrace.java
new file mode 120000
index 0000000..e1a08aa
--- /dev/null
+++ b/test/1916-get-set-current-frame/src/art/StackTrace.java
@@ -0,0 +1 @@
+../../../jvmti-common/StackTrace.java
\ No newline at end of file
diff --git a/test/1916-get-set-current-frame/src/art/Suspension.java b/test/1916-get-set-current-frame/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1916-get-set-current-frame/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1916-get-set-current-frame/src/art/Suspension.java b/test/1916-get-set-current-frame/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1916-get-set-current-frame/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1917-get-stack-frame/src/art/Breakpoint.java b/test/1917-get-stack-frame/src/art/Breakpoint.java
deleted file mode 100644
index bbb89f7..0000000
--- a/test/1917-get-stack-frame/src/art/Breakpoint.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Objects;
-
-public class Breakpoint {
-  public static class Manager {
-    public static class BP {
-      public final Executable method;
-      public final long location;
-
-      public BP(Executable method) {
-        this(method, getStartLocation(method));
-      }
-
-      public BP(Executable method, long location) {
-        this.method = method;
-        this.location = location;
-      }
-
-      @Override
-      public boolean equals(Object other) {
-        return (other instanceof BP) &&
-            method.equals(((BP)other).method) &&
-            location == ((BP)other).location;
-      }
-
-      @Override
-      public String toString() {
-        return method.toString() + " @ " + getLine();
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(method, location);
-      }
-
-      public int getLine() {
-        try {
-          LineNumber[] lines = getLineNumberTable(method);
-          int best = -1;
-          for (LineNumber l : lines) {
-            if (l.location > location) {
-              break;
-            } else {
-              best = l.line;
-            }
-          }
-          return best;
-        } catch (Exception e) {
-          return -1;
-        }
-      }
-    }
-
-    private Set<BP> breaks = new HashSet<>();
-
-    public void setBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.add(b)) {
-          Breakpoint.setBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void setBreakpoint(Executable method, long location) {
-      setBreakpoints(new BP(method, location));
-    }
-
-    public void clearBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.remove(b)) {
-          Breakpoint.clearBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void clearBreakpoint(Executable method, long location) {
-      clearBreakpoints(new BP(method, location));
-    }
-
-    public void clearAllBreakpoints() {
-      clearBreakpoints(breaks.toArray(new BP[0]));
-    }
-  }
-
-  public static void startBreakpointWatch(Class<?> methodClass,
-                                          Executable breakpointReached,
-                                          Thread thr) {
-    startBreakpointWatch(methodClass, breakpointReached, false, thr);
-  }
-
-  /**
-   * Enables the trapping of breakpoint events.
-   *
-   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
-   */
-  public static native void startBreakpointWatch(Class<?> methodClass,
-                                                 Executable breakpointReached,
-                                                 boolean allowRecursive,
-                                                 Thread thr);
-  public static native void stopBreakpointWatch(Thread thr);
-
-  public static final class LineNumber implements Comparable<LineNumber> {
-    public final long location;
-    public final int line;
-
-    private LineNumber(long loc, int line) {
-      this.location = loc;
-      this.line = line;
-    }
-
-    public boolean equals(Object other) {
-      return other instanceof LineNumber && ((LineNumber)other).line == line &&
-          ((LineNumber)other).location == location;
-    }
-
-    public int compareTo(LineNumber other) {
-      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
-      if (v != 0) {
-        return v;
-      } else {
-        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
-      }
-    }
-  }
-
-  public static native void setBreakpoint(Executable m, long loc);
-  public static void setBreakpoint(Executable m, LineNumber l) {
-    setBreakpoint(m, l.location);
-  }
-
-  public static native void clearBreakpoint(Executable m, long loc);
-  public static void clearBreakpoint(Executable m, LineNumber l) {
-    clearBreakpoint(m, l.location);
-  }
-
-  private static native Object[] getLineNumberTableNative(Executable m);
-  public static LineNumber[] getLineNumberTable(Executable m) {
-    Object[] nativeTable = getLineNumberTableNative(m);
-    long[] location = (long[])(nativeTable[0]);
-    int[] lines = (int[])(nativeTable[1]);
-    if (lines.length != location.length) {
-      throw new Error("Lines and locations have different lengths!");
-    }
-    LineNumber[] out = new LineNumber[lines.length];
-    for (int i = 0; i < lines.length; i++) {
-      out[i] = new LineNumber(location[i], lines[i]);
-    }
-    return out;
-  }
-
-  public static native long getStartLocation(Executable m);
-
-  public static int locationToLine(Executable m, long location) {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      int best = -1;
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.location > location) {
-          break;
-        } else {
-          best = l.line;
-        }
-      }
-      return best;
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  public static long lineToLocation(Executable m, int line) throws Exception {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.line == line) {
-          return l.location;
-        }
-      }
-      throw new Exception("Unable to find line " + line + " in " + m);
-    } catch (Exception e) {
-      throw new Exception("Unable to get line number info for " + m, e);
-    }
-  }
-}
-
diff --git a/test/1917-get-stack-frame/src/art/Breakpoint.java b/test/1917-get-stack-frame/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1917-get-stack-frame/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1917-get-stack-frame/src/art/StackTrace.java b/test/1917-get-stack-frame/src/art/StackTrace.java
deleted file mode 100644
index 2ea2f20..0000000
--- a/test/1917-get-stack-frame/src/art/StackTrace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Executable;
-
-public class StackTrace {
-  public static class StackFrameData {
-    public final Thread thr;
-    public final Executable method;
-    public final long current_location;
-    public final int depth;
-
-    public StackFrameData(Thread thr, Executable e, long loc, int depth) {
-      this.thr = thr;
-      this.method = e;
-      this.current_location = loc;
-      this.depth = depth;
-    }
-    @Override
-    public String toString() {
-      return String.format(
-          "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }",
-          this.thr,
-          this.method,
-          this.current_location,
-          this.depth);
-    }
-  }
-
-  public static native int GetStackDepth(Thread thr);
-
-  private static native StackFrameData[] nativeGetStackTrace(Thread thr);
-
-  public static StackFrameData[] GetStackTrace(Thread thr) {
-    // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not
-    // suspended. The spec says that not being suspended is fine but since we want this to be
-    // consistent we will suspend for the RI.
-    boolean suspend_thread =
-        !System.getProperty("java.vm.name").equals("Dalvik") &&
-        !thr.equals(Thread.currentThread()) &&
-        !Suspension.isSuspended(thr);
-    if (suspend_thread) {
-      Suspension.suspend(thr);
-    }
-    StackFrameData[] out = nativeGetStackTrace(thr);
-    if (suspend_thread) {
-      Suspension.resume(thr);
-    }
-    return out;
-  }
-}
-
diff --git a/test/1917-get-stack-frame/src/art/StackTrace.java b/test/1917-get-stack-frame/src/art/StackTrace.java
new file mode 120000
index 0000000..e1a08aa
--- /dev/null
+++ b/test/1917-get-stack-frame/src/art/StackTrace.java
@@ -0,0 +1 @@
+../../../jvmti-common/StackTrace.java
\ No newline at end of file
diff --git a/test/1917-get-stack-frame/src/art/Suspension.java b/test/1917-get-stack-frame/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1917-get-stack-frame/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1917-get-stack-frame/src/art/Suspension.java b/test/1917-get-stack-frame/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1917-get-stack-frame/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1919-vminit-thread-start-timing/src/art/Main.java b/test/1919-vminit-thread-start-timing/src/art/Main.java
deleted file mode 100644
index 8b01920..0000000
--- a/test/1919-vminit-thread-start-timing/src/art/Main.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-// Binder class so the agent's C code has something that can be bound and exposed to tests.
-// In a package to separate cleanly and work around CTS reference issues (though this class
-// should be replaced in the CTS version).
-public class Main {
-  // Load the given class with the given classloader, and bind all native methods to corresponding
-  // C methods in the agent. Will abort if any of the steps fail.
-  public static native void bindAgentJNI(String className, ClassLoader classLoader);
-  // Same as above, giving the class directly.
-  public static native void bindAgentJNIForClass(Class<?> klass);
-}
diff --git a/test/1919-vminit-thread-start-timing/src/art/Main.java b/test/1919-vminit-thread-start-timing/src/art/Main.java
new file mode 120000
index 0000000..84ae4ac
--- /dev/null
+++ b/test/1919-vminit-thread-start-timing/src/art/Main.java
@@ -0,0 +1 @@
+../../../jvmti-common/Main.java
\ No newline at end of file
diff --git a/test/1920-suspend-native-monitor/src/art/Suspension.java b/test/1920-suspend-native-monitor/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1920-suspend-native-monitor/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1920-suspend-native-monitor/src/art/Suspension.java b/test/1920-suspend-native-monitor/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1920-suspend-native-monitor/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1921-suspend-native-recursive-monitor/src/art/Suspension.java b/test/1921-suspend-native-recursive-monitor/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1921-suspend-native-recursive-monitor/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1921-suspend-native-recursive-monitor/src/art/Suspension.java b/test/1921-suspend-native-recursive-monitor/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1921-suspend-native-recursive-monitor/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1922-owned-monitors-info/src/art/Suspension.java b/test/1922-owned-monitors-info/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1922-owned-monitors-info/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1922-owned-monitors-info/src/art/Suspension.java b/test/1922-owned-monitors-info/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1922-owned-monitors-info/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1923-frame-pop/src/art/Breakpoint.java b/test/1923-frame-pop/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1923-frame-pop/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1923-frame-pop/src/art/FramePop.java b/test/1923-frame-pop/src/art/FramePop.java
new file mode 120000
index 0000000..3e573af
--- /dev/null
+++ b/test/1923-frame-pop/src/art/FramePop.java
@@ -0,0 +1 @@
+../../../jvmti-common/FramePop.java
\ No newline at end of file
diff --git a/test/1923-frame-pop/src/art/Locals.java b/test/1923-frame-pop/src/art/Locals.java
new file mode 120000
index 0000000..2998386
--- /dev/null
+++ b/test/1923-frame-pop/src/art/Locals.java
@@ -0,0 +1 @@
+../../../jvmti-common/Locals.java
\ No newline at end of file
diff --git a/test/1923-frame-pop/src/art/StackTrace.java b/test/1923-frame-pop/src/art/StackTrace.java
new file mode 120000
index 0000000..e1a08aa
--- /dev/null
+++ b/test/1923-frame-pop/src/art/StackTrace.java
@@ -0,0 +1 @@
+../../../jvmti-common/StackTrace.java
\ No newline at end of file
diff --git a/test/1923-frame-pop/src/art/Suspension.java b/test/1923-frame-pop/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1923-frame-pop/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1923-frame-pop/src/art/Suspension.java b/test/1923-frame-pop/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1923-frame-pop/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1923-frame-pop/src/art/Trace.java b/test/1923-frame-pop/src/art/Trace.java
new file mode 120000
index 0000000..5d9b44b
--- /dev/null
+++ b/test/1923-frame-pop/src/art/Trace.java
@@ -0,0 +1 @@
+../../../jvmti-common/Trace.java
\ No newline at end of file
diff --git a/test/1924-frame-pop-toggle/src/art/Breakpoint.java b/test/1924-frame-pop-toggle/src/art/Breakpoint.java
deleted file mode 100644
index bbb89f7..0000000
--- a/test/1924-frame-pop-toggle/src/art/Breakpoint.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Objects;
-
-public class Breakpoint {
-  public static class Manager {
-    public static class BP {
-      public final Executable method;
-      public final long location;
-
-      public BP(Executable method) {
-        this(method, getStartLocation(method));
-      }
-
-      public BP(Executable method, long location) {
-        this.method = method;
-        this.location = location;
-      }
-
-      @Override
-      public boolean equals(Object other) {
-        return (other instanceof BP) &&
-            method.equals(((BP)other).method) &&
-            location == ((BP)other).location;
-      }
-
-      @Override
-      public String toString() {
-        return method.toString() + " @ " + getLine();
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(method, location);
-      }
-
-      public int getLine() {
-        try {
-          LineNumber[] lines = getLineNumberTable(method);
-          int best = -1;
-          for (LineNumber l : lines) {
-            if (l.location > location) {
-              break;
-            } else {
-              best = l.line;
-            }
-          }
-          return best;
-        } catch (Exception e) {
-          return -1;
-        }
-      }
-    }
-
-    private Set<BP> breaks = new HashSet<>();
-
-    public void setBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.add(b)) {
-          Breakpoint.setBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void setBreakpoint(Executable method, long location) {
-      setBreakpoints(new BP(method, location));
-    }
-
-    public void clearBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.remove(b)) {
-          Breakpoint.clearBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void clearBreakpoint(Executable method, long location) {
-      clearBreakpoints(new BP(method, location));
-    }
-
-    public void clearAllBreakpoints() {
-      clearBreakpoints(breaks.toArray(new BP[0]));
-    }
-  }
-
-  public static void startBreakpointWatch(Class<?> methodClass,
-                                          Executable breakpointReached,
-                                          Thread thr) {
-    startBreakpointWatch(methodClass, breakpointReached, false, thr);
-  }
-
-  /**
-   * Enables the trapping of breakpoint events.
-   *
-   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
-   */
-  public static native void startBreakpointWatch(Class<?> methodClass,
-                                                 Executable breakpointReached,
-                                                 boolean allowRecursive,
-                                                 Thread thr);
-  public static native void stopBreakpointWatch(Thread thr);
-
-  public static final class LineNumber implements Comparable<LineNumber> {
-    public final long location;
-    public final int line;
-
-    private LineNumber(long loc, int line) {
-      this.location = loc;
-      this.line = line;
-    }
-
-    public boolean equals(Object other) {
-      return other instanceof LineNumber && ((LineNumber)other).line == line &&
-          ((LineNumber)other).location == location;
-    }
-
-    public int compareTo(LineNumber other) {
-      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
-      if (v != 0) {
-        return v;
-      } else {
-        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
-      }
-    }
-  }
-
-  public static native void setBreakpoint(Executable m, long loc);
-  public static void setBreakpoint(Executable m, LineNumber l) {
-    setBreakpoint(m, l.location);
-  }
-
-  public static native void clearBreakpoint(Executable m, long loc);
-  public static void clearBreakpoint(Executable m, LineNumber l) {
-    clearBreakpoint(m, l.location);
-  }
-
-  private static native Object[] getLineNumberTableNative(Executable m);
-  public static LineNumber[] getLineNumberTable(Executable m) {
-    Object[] nativeTable = getLineNumberTableNative(m);
-    long[] location = (long[])(nativeTable[0]);
-    int[] lines = (int[])(nativeTable[1]);
-    if (lines.length != location.length) {
-      throw new Error("Lines and locations have different lengths!");
-    }
-    LineNumber[] out = new LineNumber[lines.length];
-    for (int i = 0; i < lines.length; i++) {
-      out[i] = new LineNumber(location[i], lines[i]);
-    }
-    return out;
-  }
-
-  public static native long getStartLocation(Executable m);
-
-  public static int locationToLine(Executable m, long location) {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      int best = -1;
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.location > location) {
-          break;
-        } else {
-          best = l.line;
-        }
-      }
-      return best;
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  public static long lineToLocation(Executable m, int line) throws Exception {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.line == line) {
-          return l.location;
-        }
-      }
-      throw new Exception("Unable to find line " + line + " in " + m);
-    } catch (Exception e) {
-      throw new Exception("Unable to get line number info for " + m, e);
-    }
-  }
-}
-
diff --git a/test/1924-frame-pop-toggle/src/art/Breakpoint.java b/test/1924-frame-pop-toggle/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1924-frame-pop-toggle/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1924-frame-pop-toggle/src/art/FramePop.java b/test/1924-frame-pop-toggle/src/art/FramePop.java
deleted file mode 100644
index 86bf226..0000000
--- a/test/1924-frame-pop-toggle/src/art/FramePop.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Method;
-
-public class FramePop {
-  public static native void enableFramePopEvent(Class klass, Method method, Thread thr)
-      throws Exception;
-  public static native void notifyFramePop(Thread target, int depth) throws Exception;
-}
diff --git a/test/1924-frame-pop-toggle/src/art/FramePop.java b/test/1924-frame-pop-toggle/src/art/FramePop.java
new file mode 120000
index 0000000..3e573af
--- /dev/null
+++ b/test/1924-frame-pop-toggle/src/art/FramePop.java
@@ -0,0 +1 @@
+../../../jvmti-common/FramePop.java
\ No newline at end of file
diff --git a/test/1924-frame-pop-toggle/src/art/Locals.java b/test/1924-frame-pop-toggle/src/art/Locals.java
deleted file mode 100644
index 22e21be..0000000
--- a/test/1924-frame-pop-toggle/src/art/Locals.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.Objects;
-
-public class Locals {
-  public static native void EnableLocalVariableAccess();
-
-  public static class VariableDescription {
-    public final long start_location;
-    public final int length;
-    public final String name;
-    public final String signature;
-    public final String generic_signature;
-    public final int slot;
-
-    public VariableDescription(
-        long start, int length, String name, String sig, String gen_sig, int slot) {
-      this.start_location = start;
-      this.length = length;
-      this.name = name;
-      this.signature = sig;
-      this.generic_signature = gen_sig;
-      this.slot = slot;
-    }
-
-    @Override
-    public String toString() {
-      return String.format(
-          "VariableDescription { " +
-            "Sig: '%s', Name: '%s', Gen_sig: '%s', slot: %d, start: %d, len: %d" +
-          "}",
-          this.signature,
-          this.name,
-          this.generic_signature,
-          this.slot,
-          this.start_location,
-          this.length);
-    }
-    public boolean equals(Object other) {
-      if (!(other instanceof VariableDescription)) {
-        return false;
-      } else {
-        VariableDescription v = (VariableDescription)other;
-        return Objects.equals(v.signature, signature) &&
-            Objects.equals(v.name, name) &&
-            Objects.equals(v.generic_signature, generic_signature) &&
-            v.slot == slot &&
-            v.start_location == start_location &&
-            v.length == length;
-      }
-    }
-    public int hashCode() {
-      return Objects.hash(this.signature, this.name, this.generic_signature, this.slot,
-          this.start_location, this.length);
-    }
-  }
-
-  public static native VariableDescription[] GetLocalVariableTable(Executable e);
-
-  public static VariableDescription GetVariableAtLine(
-      Executable e, String name, String sig, int line) throws Exception {
-    return GetVariableAtLocation(e, name, sig, Breakpoint.lineToLocation(e, line));
-  }
-
-  public static VariableDescription GetVariableAtLocation(
-      Executable e, String name, String sig, long loc) {
-    VariableDescription[] vars = GetLocalVariableTable(e);
-    for (VariableDescription var : vars) {
-      if (var.start_location <= loc &&
-          var.length + var.start_location > loc &&
-          var.name.equals(name) &&
-          var.signature.equals(sig)) {
-        return var;
-      }
-    }
-    throw new Error(
-        "Unable to find variable " + name + " (sig: " + sig + ") in " + e + " at loc " + loc);
-  }
-
-  public static native int GetLocalVariableInt(Thread thr, int depth, int slot);
-  public static native long GetLocalVariableLong(Thread thr, int depth, int slot);
-  public static native float GetLocalVariableFloat(Thread thr, int depth, int slot);
-  public static native double GetLocalVariableDouble(Thread thr, int depth, int slot);
-  public static native Object GetLocalVariableObject(Thread thr, int depth, int slot);
-  public static native Object GetLocalInstance(Thread thr, int depth);
-
-  public static void SetLocalVariableInt(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableInt(thr, depth, slot, ((Number)val).intValue());
-  }
-  public static void SetLocalVariableLong(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableLong(thr, depth, slot, ((Number)val).longValue());
-  }
-  public static void SetLocalVariableFloat(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableFloat(thr, depth, slot, ((Number)val).floatValue());
-  }
-  public static void SetLocalVariableDouble(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableDouble(thr, depth, slot, ((Number)val).doubleValue());
-  }
-  public static native void SetLocalVariableInt(Thread thr, int depth, int slot, int val);
-  public static native void SetLocalVariableLong(Thread thr, int depth, int slot, long val);
-  public static native void SetLocalVariableFloat(Thread thr, int depth, int slot, float val);
-  public static native void SetLocalVariableDouble(Thread thr, int depth, int slot, double val);
-  public static native void SetLocalVariableObject(Thread thr, int depth, int slot, Object val);
-}
diff --git a/test/1924-frame-pop-toggle/src/art/Locals.java b/test/1924-frame-pop-toggle/src/art/Locals.java
new file mode 120000
index 0000000..2998386
--- /dev/null
+++ b/test/1924-frame-pop-toggle/src/art/Locals.java
@@ -0,0 +1 @@
+../../../jvmti-common/Locals.java
\ No newline at end of file
diff --git a/test/1924-frame-pop-toggle/src/art/StackTrace.java b/test/1924-frame-pop-toggle/src/art/StackTrace.java
deleted file mode 100644
index 2ea2f20..0000000
--- a/test/1924-frame-pop-toggle/src/art/StackTrace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Executable;
-
-public class StackTrace {
-  public static class StackFrameData {
-    public final Thread thr;
-    public final Executable method;
-    public final long current_location;
-    public final int depth;
-
-    public StackFrameData(Thread thr, Executable e, long loc, int depth) {
-      this.thr = thr;
-      this.method = e;
-      this.current_location = loc;
-      this.depth = depth;
-    }
-    @Override
-    public String toString() {
-      return String.format(
-          "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }",
-          this.thr,
-          this.method,
-          this.current_location,
-          this.depth);
-    }
-  }
-
-  public static native int GetStackDepth(Thread thr);
-
-  private static native StackFrameData[] nativeGetStackTrace(Thread thr);
-
-  public static StackFrameData[] GetStackTrace(Thread thr) {
-    // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not
-    // suspended. The spec says that not being suspended is fine but since we want this to be
-    // consistent we will suspend for the RI.
-    boolean suspend_thread =
-        !System.getProperty("java.vm.name").equals("Dalvik") &&
-        !thr.equals(Thread.currentThread()) &&
-        !Suspension.isSuspended(thr);
-    if (suspend_thread) {
-      Suspension.suspend(thr);
-    }
-    StackFrameData[] out = nativeGetStackTrace(thr);
-    if (suspend_thread) {
-      Suspension.resume(thr);
-    }
-    return out;
-  }
-}
-
diff --git a/test/1924-frame-pop-toggle/src/art/StackTrace.java b/test/1924-frame-pop-toggle/src/art/StackTrace.java
new file mode 120000
index 0000000..e1a08aa
--- /dev/null
+++ b/test/1924-frame-pop-toggle/src/art/StackTrace.java
@@ -0,0 +1 @@
+../../../jvmti-common/StackTrace.java
\ No newline at end of file
diff --git a/test/1924-frame-pop-toggle/src/art/Suspension.java b/test/1924-frame-pop-toggle/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1924-frame-pop-toggle/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1924-frame-pop-toggle/src/art/Suspension.java b/test/1924-frame-pop-toggle/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1924-frame-pop-toggle/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1924-frame-pop-toggle/src/art/Trace.java b/test/1924-frame-pop-toggle/src/art/Trace.java
deleted file mode 100644
index 8999bb1..0000000
--- a/test/1924-frame-pop-toggle/src/art/Trace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Method;
-
-public class Trace {
-  public static native void enableTracing(Class<?> methodClass,
-                                          Method entryMethod,
-                                          Method exitMethod,
-                                          Method fieldAccess,
-                                          Method fieldModify,
-                                          Method singleStep,
-                                          Thread thr);
-  public static native void disableTracing(Thread thr);
-
-  public static void enableFieldTracing(Class<?> methodClass,
-                                        Method fieldAccess,
-                                        Method fieldModify,
-                                        Thread thr) {
-    enableTracing(methodClass, null, null, fieldAccess, fieldModify, null, thr);
-  }
-
-  public static void enableMethodTracing(Class<?> methodClass,
-                                         Method entryMethod,
-                                         Method exitMethod,
-                                         Thread thr) {
-    enableTracing(methodClass, entryMethod, exitMethod, null, null, null, thr);
-  }
-
-  public static void enableSingleStepTracing(Class<?> methodClass,
-                                             Method singleStep,
-                                             Thread thr) {
-    enableTracing(methodClass, null, null, null, null, singleStep, thr);
-  }
-
-  public static native void watchFieldAccess(Field f);
-  public static native void watchFieldModification(Field f);
-  public static native void watchAllFieldAccesses();
-  public static native void watchAllFieldModifications();
-
-  // the names, arguments, and even line numbers of these functions are embedded in the tests so we
-  // need to add to the bottom and not modify old ones to maintain compat.
-  public static native void enableTracing2(Class<?> methodClass,
-                                           Method entryMethod,
-                                           Method exitMethod,
-                                           Method fieldAccess,
-                                           Method fieldModify,
-                                           Method singleStep,
-                                           Method ThreadStart,
-                                           Method ThreadEnd,
-                                           Thread thr);
-}
diff --git a/test/1924-frame-pop-toggle/src/art/Trace.java b/test/1924-frame-pop-toggle/src/art/Trace.java
new file mode 120000
index 0000000..5d9b44b
--- /dev/null
+++ b/test/1924-frame-pop-toggle/src/art/Trace.java
@@ -0,0 +1 @@
+../../../jvmti-common/Trace.java
\ No newline at end of file
diff --git a/test/1925-self-frame-pop/src/art/Breakpoint.java b/test/1925-self-frame-pop/src/art/Breakpoint.java
deleted file mode 100644
index bbb89f7..0000000
--- a/test/1925-self-frame-pop/src/art/Breakpoint.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Objects;
-
-public class Breakpoint {
-  public static class Manager {
-    public static class BP {
-      public final Executable method;
-      public final long location;
-
-      public BP(Executable method) {
-        this(method, getStartLocation(method));
-      }
-
-      public BP(Executable method, long location) {
-        this.method = method;
-        this.location = location;
-      }
-
-      @Override
-      public boolean equals(Object other) {
-        return (other instanceof BP) &&
-            method.equals(((BP)other).method) &&
-            location == ((BP)other).location;
-      }
-
-      @Override
-      public String toString() {
-        return method.toString() + " @ " + getLine();
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(method, location);
-      }
-
-      public int getLine() {
-        try {
-          LineNumber[] lines = getLineNumberTable(method);
-          int best = -1;
-          for (LineNumber l : lines) {
-            if (l.location > location) {
-              break;
-            } else {
-              best = l.line;
-            }
-          }
-          return best;
-        } catch (Exception e) {
-          return -1;
-        }
-      }
-    }
-
-    private Set<BP> breaks = new HashSet<>();
-
-    public void setBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.add(b)) {
-          Breakpoint.setBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void setBreakpoint(Executable method, long location) {
-      setBreakpoints(new BP(method, location));
-    }
-
-    public void clearBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.remove(b)) {
-          Breakpoint.clearBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void clearBreakpoint(Executable method, long location) {
-      clearBreakpoints(new BP(method, location));
-    }
-
-    public void clearAllBreakpoints() {
-      clearBreakpoints(breaks.toArray(new BP[0]));
-    }
-  }
-
-  public static void startBreakpointWatch(Class<?> methodClass,
-                                          Executable breakpointReached,
-                                          Thread thr) {
-    startBreakpointWatch(methodClass, breakpointReached, false, thr);
-  }
-
-  /**
-   * Enables the trapping of breakpoint events.
-   *
-   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
-   */
-  public static native void startBreakpointWatch(Class<?> methodClass,
-                                                 Executable breakpointReached,
-                                                 boolean allowRecursive,
-                                                 Thread thr);
-  public static native void stopBreakpointWatch(Thread thr);
-
-  public static final class LineNumber implements Comparable<LineNumber> {
-    public final long location;
-    public final int line;
-
-    private LineNumber(long loc, int line) {
-      this.location = loc;
-      this.line = line;
-    }
-
-    public boolean equals(Object other) {
-      return other instanceof LineNumber && ((LineNumber)other).line == line &&
-          ((LineNumber)other).location == location;
-    }
-
-    public int compareTo(LineNumber other) {
-      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
-      if (v != 0) {
-        return v;
-      } else {
-        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
-      }
-    }
-  }
-
-  public static native void setBreakpoint(Executable m, long loc);
-  public static void setBreakpoint(Executable m, LineNumber l) {
-    setBreakpoint(m, l.location);
-  }
-
-  public static native void clearBreakpoint(Executable m, long loc);
-  public static void clearBreakpoint(Executable m, LineNumber l) {
-    clearBreakpoint(m, l.location);
-  }
-
-  private static native Object[] getLineNumberTableNative(Executable m);
-  public static LineNumber[] getLineNumberTable(Executable m) {
-    Object[] nativeTable = getLineNumberTableNative(m);
-    long[] location = (long[])(nativeTable[0]);
-    int[] lines = (int[])(nativeTable[1]);
-    if (lines.length != location.length) {
-      throw new Error("Lines and locations have different lengths!");
-    }
-    LineNumber[] out = new LineNumber[lines.length];
-    for (int i = 0; i < lines.length; i++) {
-      out[i] = new LineNumber(location[i], lines[i]);
-    }
-    return out;
-  }
-
-  public static native long getStartLocation(Executable m);
-
-  public static int locationToLine(Executable m, long location) {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      int best = -1;
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.location > location) {
-          break;
-        } else {
-          best = l.line;
-        }
-      }
-      return best;
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  public static long lineToLocation(Executable m, int line) throws Exception {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.line == line) {
-          return l.location;
-        }
-      }
-      throw new Exception("Unable to find line " + line + " in " + m);
-    } catch (Exception e) {
-      throw new Exception("Unable to get line number info for " + m, e);
-    }
-  }
-}
-
diff --git a/test/1925-self-frame-pop/src/art/Breakpoint.java b/test/1925-self-frame-pop/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1925-self-frame-pop/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1925-self-frame-pop/src/art/FramePop.java b/test/1925-self-frame-pop/src/art/FramePop.java
deleted file mode 100644
index 86bf226..0000000
--- a/test/1925-self-frame-pop/src/art/FramePop.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Method;
-
-public class FramePop {
-  public static native void enableFramePopEvent(Class klass, Method method, Thread thr)
-      throws Exception;
-  public static native void notifyFramePop(Thread target, int depth) throws Exception;
-}
diff --git a/test/1925-self-frame-pop/src/art/FramePop.java b/test/1925-self-frame-pop/src/art/FramePop.java
new file mode 120000
index 0000000..3e573af
--- /dev/null
+++ b/test/1925-self-frame-pop/src/art/FramePop.java
@@ -0,0 +1 @@
+../../../jvmti-common/FramePop.java
\ No newline at end of file
diff --git a/test/1925-self-frame-pop/src/art/Locals.java b/test/1925-self-frame-pop/src/art/Locals.java
deleted file mode 100644
index 22e21be..0000000
--- a/test/1925-self-frame-pop/src/art/Locals.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.Objects;
-
-public class Locals {
-  public static native void EnableLocalVariableAccess();
-
-  public static class VariableDescription {
-    public final long start_location;
-    public final int length;
-    public final String name;
-    public final String signature;
-    public final String generic_signature;
-    public final int slot;
-
-    public VariableDescription(
-        long start, int length, String name, String sig, String gen_sig, int slot) {
-      this.start_location = start;
-      this.length = length;
-      this.name = name;
-      this.signature = sig;
-      this.generic_signature = gen_sig;
-      this.slot = slot;
-    }
-
-    @Override
-    public String toString() {
-      return String.format(
-          "VariableDescription { " +
-            "Sig: '%s', Name: '%s', Gen_sig: '%s', slot: %d, start: %d, len: %d" +
-          "}",
-          this.signature,
-          this.name,
-          this.generic_signature,
-          this.slot,
-          this.start_location,
-          this.length);
-    }
-    public boolean equals(Object other) {
-      if (!(other instanceof VariableDescription)) {
-        return false;
-      } else {
-        VariableDescription v = (VariableDescription)other;
-        return Objects.equals(v.signature, signature) &&
-            Objects.equals(v.name, name) &&
-            Objects.equals(v.generic_signature, generic_signature) &&
-            v.slot == slot &&
-            v.start_location == start_location &&
-            v.length == length;
-      }
-    }
-    public int hashCode() {
-      return Objects.hash(this.signature, this.name, this.generic_signature, this.slot,
-          this.start_location, this.length);
-    }
-  }
-
-  public static native VariableDescription[] GetLocalVariableTable(Executable e);
-
-  public static VariableDescription GetVariableAtLine(
-      Executable e, String name, String sig, int line) throws Exception {
-    return GetVariableAtLocation(e, name, sig, Breakpoint.lineToLocation(e, line));
-  }
-
-  public static VariableDescription GetVariableAtLocation(
-      Executable e, String name, String sig, long loc) {
-    VariableDescription[] vars = GetLocalVariableTable(e);
-    for (VariableDescription var : vars) {
-      if (var.start_location <= loc &&
-          var.length + var.start_location > loc &&
-          var.name.equals(name) &&
-          var.signature.equals(sig)) {
-        return var;
-      }
-    }
-    throw new Error(
-        "Unable to find variable " + name + " (sig: " + sig + ") in " + e + " at loc " + loc);
-  }
-
-  public static native int GetLocalVariableInt(Thread thr, int depth, int slot);
-  public static native long GetLocalVariableLong(Thread thr, int depth, int slot);
-  public static native float GetLocalVariableFloat(Thread thr, int depth, int slot);
-  public static native double GetLocalVariableDouble(Thread thr, int depth, int slot);
-  public static native Object GetLocalVariableObject(Thread thr, int depth, int slot);
-  public static native Object GetLocalInstance(Thread thr, int depth);
-
-  public static void SetLocalVariableInt(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableInt(thr, depth, slot, ((Number)val).intValue());
-  }
-  public static void SetLocalVariableLong(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableLong(thr, depth, slot, ((Number)val).longValue());
-  }
-  public static void SetLocalVariableFloat(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableFloat(thr, depth, slot, ((Number)val).floatValue());
-  }
-  public static void SetLocalVariableDouble(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableDouble(thr, depth, slot, ((Number)val).doubleValue());
-  }
-  public static native void SetLocalVariableInt(Thread thr, int depth, int slot, int val);
-  public static native void SetLocalVariableLong(Thread thr, int depth, int slot, long val);
-  public static native void SetLocalVariableFloat(Thread thr, int depth, int slot, float val);
-  public static native void SetLocalVariableDouble(Thread thr, int depth, int slot, double val);
-  public static native void SetLocalVariableObject(Thread thr, int depth, int slot, Object val);
-}
diff --git a/test/1925-self-frame-pop/src/art/Locals.java b/test/1925-self-frame-pop/src/art/Locals.java
new file mode 120000
index 0000000..2998386
--- /dev/null
+++ b/test/1925-self-frame-pop/src/art/Locals.java
@@ -0,0 +1 @@
+../../../jvmti-common/Locals.java
\ No newline at end of file
diff --git a/test/1925-self-frame-pop/src/art/StackTrace.java b/test/1925-self-frame-pop/src/art/StackTrace.java
deleted file mode 100644
index 2ea2f20..0000000
--- a/test/1925-self-frame-pop/src/art/StackTrace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Executable;
-
-public class StackTrace {
-  public static class StackFrameData {
-    public final Thread thr;
-    public final Executable method;
-    public final long current_location;
-    public final int depth;
-
-    public StackFrameData(Thread thr, Executable e, long loc, int depth) {
-      this.thr = thr;
-      this.method = e;
-      this.current_location = loc;
-      this.depth = depth;
-    }
-    @Override
-    public String toString() {
-      return String.format(
-          "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }",
-          this.thr,
-          this.method,
-          this.current_location,
-          this.depth);
-    }
-  }
-
-  public static native int GetStackDepth(Thread thr);
-
-  private static native StackFrameData[] nativeGetStackTrace(Thread thr);
-
-  public static StackFrameData[] GetStackTrace(Thread thr) {
-    // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not
-    // suspended. The spec says that not being suspended is fine but since we want this to be
-    // consistent we will suspend for the RI.
-    boolean suspend_thread =
-        !System.getProperty("java.vm.name").equals("Dalvik") &&
-        !thr.equals(Thread.currentThread()) &&
-        !Suspension.isSuspended(thr);
-    if (suspend_thread) {
-      Suspension.suspend(thr);
-    }
-    StackFrameData[] out = nativeGetStackTrace(thr);
-    if (suspend_thread) {
-      Suspension.resume(thr);
-    }
-    return out;
-  }
-}
-
diff --git a/test/1925-self-frame-pop/src/art/StackTrace.java b/test/1925-self-frame-pop/src/art/StackTrace.java
new file mode 120000
index 0000000..e1a08aa
--- /dev/null
+++ b/test/1925-self-frame-pop/src/art/StackTrace.java
@@ -0,0 +1 @@
+../../../jvmti-common/StackTrace.java
\ No newline at end of file
diff --git a/test/1925-self-frame-pop/src/art/Suspension.java b/test/1925-self-frame-pop/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1925-self-frame-pop/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1925-self-frame-pop/src/art/Suspension.java b/test/1925-self-frame-pop/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1925-self-frame-pop/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1925-self-frame-pop/src/art/Trace.java b/test/1925-self-frame-pop/src/art/Trace.java
deleted file mode 100644
index 8999bb1..0000000
--- a/test/1925-self-frame-pop/src/art/Trace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Method;
-
-public class Trace {
-  public static native void enableTracing(Class<?> methodClass,
-                                          Method entryMethod,
-                                          Method exitMethod,
-                                          Method fieldAccess,
-                                          Method fieldModify,
-                                          Method singleStep,
-                                          Thread thr);
-  public static native void disableTracing(Thread thr);
-
-  public static void enableFieldTracing(Class<?> methodClass,
-                                        Method fieldAccess,
-                                        Method fieldModify,
-                                        Thread thr) {
-    enableTracing(methodClass, null, null, fieldAccess, fieldModify, null, thr);
-  }
-
-  public static void enableMethodTracing(Class<?> methodClass,
-                                         Method entryMethod,
-                                         Method exitMethod,
-                                         Thread thr) {
-    enableTracing(methodClass, entryMethod, exitMethod, null, null, null, thr);
-  }
-
-  public static void enableSingleStepTracing(Class<?> methodClass,
-                                             Method singleStep,
-                                             Thread thr) {
-    enableTracing(methodClass, null, null, null, null, singleStep, thr);
-  }
-
-  public static native void watchFieldAccess(Field f);
-  public static native void watchFieldModification(Field f);
-  public static native void watchAllFieldAccesses();
-  public static native void watchAllFieldModifications();
-
-  // the names, arguments, and even line numbers of these functions are embedded in the tests so we
-  // need to add to the bottom and not modify old ones to maintain compat.
-  public static native void enableTracing2(Class<?> methodClass,
-                                           Method entryMethod,
-                                           Method exitMethod,
-                                           Method fieldAccess,
-                                           Method fieldModify,
-                                           Method singleStep,
-                                           Method ThreadStart,
-                                           Method ThreadEnd,
-                                           Thread thr);
-}
diff --git a/test/1925-self-frame-pop/src/art/Trace.java b/test/1925-self-frame-pop/src/art/Trace.java
new file mode 120000
index 0000000..5d9b44b
--- /dev/null
+++ b/test/1925-self-frame-pop/src/art/Trace.java
@@ -0,0 +1 @@
+../../../jvmti-common/Trace.java
\ No newline at end of file
diff --git a/test/1926-missed-frame-pop/src/art/Breakpoint.java b/test/1926-missed-frame-pop/src/art/Breakpoint.java
deleted file mode 100644
index bbb89f7..0000000
--- a/test/1926-missed-frame-pop/src/art/Breakpoint.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Objects;
-
-public class Breakpoint {
-  public static class Manager {
-    public static class BP {
-      public final Executable method;
-      public final long location;
-
-      public BP(Executable method) {
-        this(method, getStartLocation(method));
-      }
-
-      public BP(Executable method, long location) {
-        this.method = method;
-        this.location = location;
-      }
-
-      @Override
-      public boolean equals(Object other) {
-        return (other instanceof BP) &&
-            method.equals(((BP)other).method) &&
-            location == ((BP)other).location;
-      }
-
-      @Override
-      public String toString() {
-        return method.toString() + " @ " + getLine();
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(method, location);
-      }
-
-      public int getLine() {
-        try {
-          LineNumber[] lines = getLineNumberTable(method);
-          int best = -1;
-          for (LineNumber l : lines) {
-            if (l.location > location) {
-              break;
-            } else {
-              best = l.line;
-            }
-          }
-          return best;
-        } catch (Exception e) {
-          return -1;
-        }
-      }
-    }
-
-    private Set<BP> breaks = new HashSet<>();
-
-    public void setBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.add(b)) {
-          Breakpoint.setBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void setBreakpoint(Executable method, long location) {
-      setBreakpoints(new BP(method, location));
-    }
-
-    public void clearBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.remove(b)) {
-          Breakpoint.clearBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void clearBreakpoint(Executable method, long location) {
-      clearBreakpoints(new BP(method, location));
-    }
-
-    public void clearAllBreakpoints() {
-      clearBreakpoints(breaks.toArray(new BP[0]));
-    }
-  }
-
-  public static void startBreakpointWatch(Class<?> methodClass,
-                                          Executable breakpointReached,
-                                          Thread thr) {
-    startBreakpointWatch(methodClass, breakpointReached, false, thr);
-  }
-
-  /**
-   * Enables the trapping of breakpoint events.
-   *
-   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
-   */
-  public static native void startBreakpointWatch(Class<?> methodClass,
-                                                 Executable breakpointReached,
-                                                 boolean allowRecursive,
-                                                 Thread thr);
-  public static native void stopBreakpointWatch(Thread thr);
-
-  public static final class LineNumber implements Comparable<LineNumber> {
-    public final long location;
-    public final int line;
-
-    private LineNumber(long loc, int line) {
-      this.location = loc;
-      this.line = line;
-    }
-
-    public boolean equals(Object other) {
-      return other instanceof LineNumber && ((LineNumber)other).line == line &&
-          ((LineNumber)other).location == location;
-    }
-
-    public int compareTo(LineNumber other) {
-      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
-      if (v != 0) {
-        return v;
-      } else {
-        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
-      }
-    }
-  }
-
-  public static native void setBreakpoint(Executable m, long loc);
-  public static void setBreakpoint(Executable m, LineNumber l) {
-    setBreakpoint(m, l.location);
-  }
-
-  public static native void clearBreakpoint(Executable m, long loc);
-  public static void clearBreakpoint(Executable m, LineNumber l) {
-    clearBreakpoint(m, l.location);
-  }
-
-  private static native Object[] getLineNumberTableNative(Executable m);
-  public static LineNumber[] getLineNumberTable(Executable m) {
-    Object[] nativeTable = getLineNumberTableNative(m);
-    long[] location = (long[])(nativeTable[0]);
-    int[] lines = (int[])(nativeTable[1]);
-    if (lines.length != location.length) {
-      throw new Error("Lines and locations have different lengths!");
-    }
-    LineNumber[] out = new LineNumber[lines.length];
-    for (int i = 0; i < lines.length; i++) {
-      out[i] = new LineNumber(location[i], lines[i]);
-    }
-    return out;
-  }
-
-  public static native long getStartLocation(Executable m);
-
-  public static int locationToLine(Executable m, long location) {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      int best = -1;
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.location > location) {
-          break;
-        } else {
-          best = l.line;
-        }
-      }
-      return best;
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  public static long lineToLocation(Executable m, int line) throws Exception {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.line == line) {
-          return l.location;
-        }
-      }
-      throw new Exception("Unable to find line " + line + " in " + m);
-    } catch (Exception e) {
-      throw new Exception("Unable to get line number info for " + m, e);
-    }
-  }
-}
-
diff --git a/test/1926-missed-frame-pop/src/art/Breakpoint.java b/test/1926-missed-frame-pop/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1926-missed-frame-pop/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1926-missed-frame-pop/src/art/FramePop.java b/test/1926-missed-frame-pop/src/art/FramePop.java
deleted file mode 100644
index 86bf226..0000000
--- a/test/1926-missed-frame-pop/src/art/FramePop.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Method;
-
-public class FramePop {
-  public static native void enableFramePopEvent(Class klass, Method method, Thread thr)
-      throws Exception;
-  public static native void notifyFramePop(Thread target, int depth) throws Exception;
-}
diff --git a/test/1926-missed-frame-pop/src/art/FramePop.java b/test/1926-missed-frame-pop/src/art/FramePop.java
new file mode 120000
index 0000000..3e573af
--- /dev/null
+++ b/test/1926-missed-frame-pop/src/art/FramePop.java
@@ -0,0 +1 @@
+../../../jvmti-common/FramePop.java
\ No newline at end of file
diff --git a/test/1926-missed-frame-pop/src/art/Locals.java b/test/1926-missed-frame-pop/src/art/Locals.java
deleted file mode 100644
index 22e21be..0000000
--- a/test/1926-missed-frame-pop/src/art/Locals.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.Objects;
-
-public class Locals {
-  public static native void EnableLocalVariableAccess();
-
-  public static class VariableDescription {
-    public final long start_location;
-    public final int length;
-    public final String name;
-    public final String signature;
-    public final String generic_signature;
-    public final int slot;
-
-    public VariableDescription(
-        long start, int length, String name, String sig, String gen_sig, int slot) {
-      this.start_location = start;
-      this.length = length;
-      this.name = name;
-      this.signature = sig;
-      this.generic_signature = gen_sig;
-      this.slot = slot;
-    }
-
-    @Override
-    public String toString() {
-      return String.format(
-          "VariableDescription { " +
-            "Sig: '%s', Name: '%s', Gen_sig: '%s', slot: %d, start: %d, len: %d" +
-          "}",
-          this.signature,
-          this.name,
-          this.generic_signature,
-          this.slot,
-          this.start_location,
-          this.length);
-    }
-    public boolean equals(Object other) {
-      if (!(other instanceof VariableDescription)) {
-        return false;
-      } else {
-        VariableDescription v = (VariableDescription)other;
-        return Objects.equals(v.signature, signature) &&
-            Objects.equals(v.name, name) &&
-            Objects.equals(v.generic_signature, generic_signature) &&
-            v.slot == slot &&
-            v.start_location == start_location &&
-            v.length == length;
-      }
-    }
-    public int hashCode() {
-      return Objects.hash(this.signature, this.name, this.generic_signature, this.slot,
-          this.start_location, this.length);
-    }
-  }
-
-  public static native VariableDescription[] GetLocalVariableTable(Executable e);
-
-  public static VariableDescription GetVariableAtLine(
-      Executable e, String name, String sig, int line) throws Exception {
-    return GetVariableAtLocation(e, name, sig, Breakpoint.lineToLocation(e, line));
-  }
-
-  public static VariableDescription GetVariableAtLocation(
-      Executable e, String name, String sig, long loc) {
-    VariableDescription[] vars = GetLocalVariableTable(e);
-    for (VariableDescription var : vars) {
-      if (var.start_location <= loc &&
-          var.length + var.start_location > loc &&
-          var.name.equals(name) &&
-          var.signature.equals(sig)) {
-        return var;
-      }
-    }
-    throw new Error(
-        "Unable to find variable " + name + " (sig: " + sig + ") in " + e + " at loc " + loc);
-  }
-
-  public static native int GetLocalVariableInt(Thread thr, int depth, int slot);
-  public static native long GetLocalVariableLong(Thread thr, int depth, int slot);
-  public static native float GetLocalVariableFloat(Thread thr, int depth, int slot);
-  public static native double GetLocalVariableDouble(Thread thr, int depth, int slot);
-  public static native Object GetLocalVariableObject(Thread thr, int depth, int slot);
-  public static native Object GetLocalInstance(Thread thr, int depth);
-
-  public static void SetLocalVariableInt(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableInt(thr, depth, slot, ((Number)val).intValue());
-  }
-  public static void SetLocalVariableLong(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableLong(thr, depth, slot, ((Number)val).longValue());
-  }
-  public static void SetLocalVariableFloat(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableFloat(thr, depth, slot, ((Number)val).floatValue());
-  }
-  public static void SetLocalVariableDouble(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableDouble(thr, depth, slot, ((Number)val).doubleValue());
-  }
-  public static native void SetLocalVariableInt(Thread thr, int depth, int slot, int val);
-  public static native void SetLocalVariableLong(Thread thr, int depth, int slot, long val);
-  public static native void SetLocalVariableFloat(Thread thr, int depth, int slot, float val);
-  public static native void SetLocalVariableDouble(Thread thr, int depth, int slot, double val);
-  public static native void SetLocalVariableObject(Thread thr, int depth, int slot, Object val);
-}
diff --git a/test/1926-missed-frame-pop/src/art/Locals.java b/test/1926-missed-frame-pop/src/art/Locals.java
new file mode 120000
index 0000000..2998386
--- /dev/null
+++ b/test/1926-missed-frame-pop/src/art/Locals.java
@@ -0,0 +1 @@
+../../../jvmti-common/Locals.java
\ No newline at end of file
diff --git a/test/1926-missed-frame-pop/src/art/StackTrace.java b/test/1926-missed-frame-pop/src/art/StackTrace.java
deleted file mode 100644
index 2ea2f20..0000000
--- a/test/1926-missed-frame-pop/src/art/StackTrace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Executable;
-
-public class StackTrace {
-  public static class StackFrameData {
-    public final Thread thr;
-    public final Executable method;
-    public final long current_location;
-    public final int depth;
-
-    public StackFrameData(Thread thr, Executable e, long loc, int depth) {
-      this.thr = thr;
-      this.method = e;
-      this.current_location = loc;
-      this.depth = depth;
-    }
-    @Override
-    public String toString() {
-      return String.format(
-          "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }",
-          this.thr,
-          this.method,
-          this.current_location,
-          this.depth);
-    }
-  }
-
-  public static native int GetStackDepth(Thread thr);
-
-  private static native StackFrameData[] nativeGetStackTrace(Thread thr);
-
-  public static StackFrameData[] GetStackTrace(Thread thr) {
-    // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not
-    // suspended. The spec says that not being suspended is fine but since we want this to be
-    // consistent we will suspend for the RI.
-    boolean suspend_thread =
-        !System.getProperty("java.vm.name").equals("Dalvik") &&
-        !thr.equals(Thread.currentThread()) &&
-        !Suspension.isSuspended(thr);
-    if (suspend_thread) {
-      Suspension.suspend(thr);
-    }
-    StackFrameData[] out = nativeGetStackTrace(thr);
-    if (suspend_thread) {
-      Suspension.resume(thr);
-    }
-    return out;
-  }
-}
-
diff --git a/test/1926-missed-frame-pop/src/art/StackTrace.java b/test/1926-missed-frame-pop/src/art/StackTrace.java
new file mode 120000
index 0000000..e1a08aa
--- /dev/null
+++ b/test/1926-missed-frame-pop/src/art/StackTrace.java
@@ -0,0 +1 @@
+../../../jvmti-common/StackTrace.java
\ No newline at end of file
diff --git a/test/1926-missed-frame-pop/src/art/Suspension.java b/test/1926-missed-frame-pop/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1926-missed-frame-pop/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1926-missed-frame-pop/src/art/Suspension.java b/test/1926-missed-frame-pop/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1926-missed-frame-pop/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1926-missed-frame-pop/src/art/Trace.java b/test/1926-missed-frame-pop/src/art/Trace.java
deleted file mode 100644
index 8999bb1..0000000
--- a/test/1926-missed-frame-pop/src/art/Trace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Method;
-
-public class Trace {
-  public static native void enableTracing(Class<?> methodClass,
-                                          Method entryMethod,
-                                          Method exitMethod,
-                                          Method fieldAccess,
-                                          Method fieldModify,
-                                          Method singleStep,
-                                          Thread thr);
-  public static native void disableTracing(Thread thr);
-
-  public static void enableFieldTracing(Class<?> methodClass,
-                                        Method fieldAccess,
-                                        Method fieldModify,
-                                        Thread thr) {
-    enableTracing(methodClass, null, null, fieldAccess, fieldModify, null, thr);
-  }
-
-  public static void enableMethodTracing(Class<?> methodClass,
-                                         Method entryMethod,
-                                         Method exitMethod,
-                                         Thread thr) {
-    enableTracing(methodClass, entryMethod, exitMethod, null, null, null, thr);
-  }
-
-  public static void enableSingleStepTracing(Class<?> methodClass,
-                                             Method singleStep,
-                                             Thread thr) {
-    enableTracing(methodClass, null, null, null, null, singleStep, thr);
-  }
-
-  public static native void watchFieldAccess(Field f);
-  public static native void watchFieldModification(Field f);
-  public static native void watchAllFieldAccesses();
-  public static native void watchAllFieldModifications();
-
-  // the names, arguments, and even line numbers of these functions are embedded in the tests so we
-  // need to add to the bottom and not modify old ones to maintain compat.
-  public static native void enableTracing2(Class<?> methodClass,
-                                           Method entryMethod,
-                                           Method exitMethod,
-                                           Method fieldAccess,
-                                           Method fieldModify,
-                                           Method singleStep,
-                                           Method ThreadStart,
-                                           Method ThreadEnd,
-                                           Thread thr);
-}
diff --git a/test/1926-missed-frame-pop/src/art/Trace.java b/test/1926-missed-frame-pop/src/art/Trace.java
new file mode 120000
index 0000000..5d9b44b
--- /dev/null
+++ b/test/1926-missed-frame-pop/src/art/Trace.java
@@ -0,0 +1 @@
+../../../jvmti-common/Trace.java
\ No newline at end of file
diff --git a/test/1927-exception-event/src/art/Breakpoint.java b/test/1927-exception-event/src/art/Breakpoint.java
deleted file mode 100644
index bbb89f7..0000000
--- a/test/1927-exception-event/src/art/Breakpoint.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Objects;
-
-public class Breakpoint {
-  public static class Manager {
-    public static class BP {
-      public final Executable method;
-      public final long location;
-
-      public BP(Executable method) {
-        this(method, getStartLocation(method));
-      }
-
-      public BP(Executable method, long location) {
-        this.method = method;
-        this.location = location;
-      }
-
-      @Override
-      public boolean equals(Object other) {
-        return (other instanceof BP) &&
-            method.equals(((BP)other).method) &&
-            location == ((BP)other).location;
-      }
-
-      @Override
-      public String toString() {
-        return method.toString() + " @ " + getLine();
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(method, location);
-      }
-
-      public int getLine() {
-        try {
-          LineNumber[] lines = getLineNumberTable(method);
-          int best = -1;
-          for (LineNumber l : lines) {
-            if (l.location > location) {
-              break;
-            } else {
-              best = l.line;
-            }
-          }
-          return best;
-        } catch (Exception e) {
-          return -1;
-        }
-      }
-    }
-
-    private Set<BP> breaks = new HashSet<>();
-
-    public void setBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.add(b)) {
-          Breakpoint.setBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void setBreakpoint(Executable method, long location) {
-      setBreakpoints(new BP(method, location));
-    }
-
-    public void clearBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.remove(b)) {
-          Breakpoint.clearBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void clearBreakpoint(Executable method, long location) {
-      clearBreakpoints(new BP(method, location));
-    }
-
-    public void clearAllBreakpoints() {
-      clearBreakpoints(breaks.toArray(new BP[0]));
-    }
-  }
-
-  public static void startBreakpointWatch(Class<?> methodClass,
-                                          Executable breakpointReached,
-                                          Thread thr) {
-    startBreakpointWatch(methodClass, breakpointReached, false, thr);
-  }
-
-  /**
-   * Enables the trapping of breakpoint events.
-   *
-   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
-   */
-  public static native void startBreakpointWatch(Class<?> methodClass,
-                                                 Executable breakpointReached,
-                                                 boolean allowRecursive,
-                                                 Thread thr);
-  public static native void stopBreakpointWatch(Thread thr);
-
-  public static final class LineNumber implements Comparable<LineNumber> {
-    public final long location;
-    public final int line;
-
-    private LineNumber(long loc, int line) {
-      this.location = loc;
-      this.line = line;
-    }
-
-    public boolean equals(Object other) {
-      return other instanceof LineNumber && ((LineNumber)other).line == line &&
-          ((LineNumber)other).location == location;
-    }
-
-    public int compareTo(LineNumber other) {
-      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
-      if (v != 0) {
-        return v;
-      } else {
-        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
-      }
-    }
-  }
-
-  public static native void setBreakpoint(Executable m, long loc);
-  public static void setBreakpoint(Executable m, LineNumber l) {
-    setBreakpoint(m, l.location);
-  }
-
-  public static native void clearBreakpoint(Executable m, long loc);
-  public static void clearBreakpoint(Executable m, LineNumber l) {
-    clearBreakpoint(m, l.location);
-  }
-
-  private static native Object[] getLineNumberTableNative(Executable m);
-  public static LineNumber[] getLineNumberTable(Executable m) {
-    Object[] nativeTable = getLineNumberTableNative(m);
-    long[] location = (long[])(nativeTable[0]);
-    int[] lines = (int[])(nativeTable[1]);
-    if (lines.length != location.length) {
-      throw new Error("Lines and locations have different lengths!");
-    }
-    LineNumber[] out = new LineNumber[lines.length];
-    for (int i = 0; i < lines.length; i++) {
-      out[i] = new LineNumber(location[i], lines[i]);
-    }
-    return out;
-  }
-
-  public static native long getStartLocation(Executable m);
-
-  public static int locationToLine(Executable m, long location) {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      int best = -1;
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.location > location) {
-          break;
-        } else {
-          best = l.line;
-        }
-      }
-      return best;
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  public static long lineToLocation(Executable m, int line) throws Exception {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.line == line) {
-          return l.location;
-        }
-      }
-      throw new Exception("Unable to find line " + line + " in " + m);
-    } catch (Exception e) {
-      throw new Exception("Unable to get line number info for " + m, e);
-    }
-  }
-}
-
diff --git a/test/1927-exception-event/src/art/Breakpoint.java b/test/1927-exception-event/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1927-exception-event/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1927-exception-event/src/art/Exceptions.java b/test/1927-exception-event/src/art/Exceptions.java
new file mode 120000
index 0000000..b8450fe
--- /dev/null
+++ b/test/1927-exception-event/src/art/Exceptions.java
@@ -0,0 +1 @@
+../../../jvmti-common/Exceptions.java
\ No newline at end of file
diff --git a/test/1927-exception-event/src/art/StackTrace.java b/test/1927-exception-event/src/art/StackTrace.java
deleted file mode 100644
index 2ea2f20..0000000
--- a/test/1927-exception-event/src/art/StackTrace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Executable;
-
-public class StackTrace {
-  public static class StackFrameData {
-    public final Thread thr;
-    public final Executable method;
-    public final long current_location;
-    public final int depth;
-
-    public StackFrameData(Thread thr, Executable e, long loc, int depth) {
-      this.thr = thr;
-      this.method = e;
-      this.current_location = loc;
-      this.depth = depth;
-    }
-    @Override
-    public String toString() {
-      return String.format(
-          "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }",
-          this.thr,
-          this.method,
-          this.current_location,
-          this.depth);
-    }
-  }
-
-  public static native int GetStackDepth(Thread thr);
-
-  private static native StackFrameData[] nativeGetStackTrace(Thread thr);
-
-  public static StackFrameData[] GetStackTrace(Thread thr) {
-    // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not
-    // suspended. The spec says that not being suspended is fine but since we want this to be
-    // consistent we will suspend for the RI.
-    boolean suspend_thread =
-        !System.getProperty("java.vm.name").equals("Dalvik") &&
-        !thr.equals(Thread.currentThread()) &&
-        !Suspension.isSuspended(thr);
-    if (suspend_thread) {
-      Suspension.suspend(thr);
-    }
-    StackFrameData[] out = nativeGetStackTrace(thr);
-    if (suspend_thread) {
-      Suspension.resume(thr);
-    }
-    return out;
-  }
-}
-
diff --git a/test/1927-exception-event/src/art/StackTrace.java b/test/1927-exception-event/src/art/StackTrace.java
new file mode 120000
index 0000000..e1a08aa
--- /dev/null
+++ b/test/1927-exception-event/src/art/StackTrace.java
@@ -0,0 +1 @@
+../../../jvmti-common/StackTrace.java
\ No newline at end of file
diff --git a/test/1927-exception-event/src/art/Suspension.java b/test/1927-exception-event/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1927-exception-event/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1927-exception-event/src/art/Suspension.java b/test/1927-exception-event/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1927-exception-event/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1928-exception-event-exception/src/art/Breakpoint.java b/test/1928-exception-event-exception/src/art/Breakpoint.java
deleted file mode 100644
index bbb89f7..0000000
--- a/test/1928-exception-event-exception/src/art/Breakpoint.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Objects;
-
-public class Breakpoint {
-  public static class Manager {
-    public static class BP {
-      public final Executable method;
-      public final long location;
-
-      public BP(Executable method) {
-        this(method, getStartLocation(method));
-      }
-
-      public BP(Executable method, long location) {
-        this.method = method;
-        this.location = location;
-      }
-
-      @Override
-      public boolean equals(Object other) {
-        return (other instanceof BP) &&
-            method.equals(((BP)other).method) &&
-            location == ((BP)other).location;
-      }
-
-      @Override
-      public String toString() {
-        return method.toString() + " @ " + getLine();
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(method, location);
-      }
-
-      public int getLine() {
-        try {
-          LineNumber[] lines = getLineNumberTable(method);
-          int best = -1;
-          for (LineNumber l : lines) {
-            if (l.location > location) {
-              break;
-            } else {
-              best = l.line;
-            }
-          }
-          return best;
-        } catch (Exception e) {
-          return -1;
-        }
-      }
-    }
-
-    private Set<BP> breaks = new HashSet<>();
-
-    public void setBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.add(b)) {
-          Breakpoint.setBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void setBreakpoint(Executable method, long location) {
-      setBreakpoints(new BP(method, location));
-    }
-
-    public void clearBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.remove(b)) {
-          Breakpoint.clearBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void clearBreakpoint(Executable method, long location) {
-      clearBreakpoints(new BP(method, location));
-    }
-
-    public void clearAllBreakpoints() {
-      clearBreakpoints(breaks.toArray(new BP[0]));
-    }
-  }
-
-  public static void startBreakpointWatch(Class<?> methodClass,
-                                          Executable breakpointReached,
-                                          Thread thr) {
-    startBreakpointWatch(methodClass, breakpointReached, false, thr);
-  }
-
-  /**
-   * Enables the trapping of breakpoint events.
-   *
-   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
-   */
-  public static native void startBreakpointWatch(Class<?> methodClass,
-                                                 Executable breakpointReached,
-                                                 boolean allowRecursive,
-                                                 Thread thr);
-  public static native void stopBreakpointWatch(Thread thr);
-
-  public static final class LineNumber implements Comparable<LineNumber> {
-    public final long location;
-    public final int line;
-
-    private LineNumber(long loc, int line) {
-      this.location = loc;
-      this.line = line;
-    }
-
-    public boolean equals(Object other) {
-      return other instanceof LineNumber && ((LineNumber)other).line == line &&
-          ((LineNumber)other).location == location;
-    }
-
-    public int compareTo(LineNumber other) {
-      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
-      if (v != 0) {
-        return v;
-      } else {
-        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
-      }
-    }
-  }
-
-  public static native void setBreakpoint(Executable m, long loc);
-  public static void setBreakpoint(Executable m, LineNumber l) {
-    setBreakpoint(m, l.location);
-  }
-
-  public static native void clearBreakpoint(Executable m, long loc);
-  public static void clearBreakpoint(Executable m, LineNumber l) {
-    clearBreakpoint(m, l.location);
-  }
-
-  private static native Object[] getLineNumberTableNative(Executable m);
-  public static LineNumber[] getLineNumberTable(Executable m) {
-    Object[] nativeTable = getLineNumberTableNative(m);
-    long[] location = (long[])(nativeTable[0]);
-    int[] lines = (int[])(nativeTable[1]);
-    if (lines.length != location.length) {
-      throw new Error("Lines and locations have different lengths!");
-    }
-    LineNumber[] out = new LineNumber[lines.length];
-    for (int i = 0; i < lines.length; i++) {
-      out[i] = new LineNumber(location[i], lines[i]);
-    }
-    return out;
-  }
-
-  public static native long getStartLocation(Executable m);
-
-  public static int locationToLine(Executable m, long location) {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      int best = -1;
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.location > location) {
-          break;
-        } else {
-          best = l.line;
-        }
-      }
-      return best;
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  public static long lineToLocation(Executable m, int line) throws Exception {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.line == line) {
-          return l.location;
-        }
-      }
-      throw new Exception("Unable to find line " + line + " in " + m);
-    } catch (Exception e) {
-      throw new Exception("Unable to get line number info for " + m, e);
-    }
-  }
-}
-
diff --git a/test/1928-exception-event-exception/src/art/Breakpoint.java b/test/1928-exception-event-exception/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1928-exception-event-exception/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1928-exception-event-exception/src/art/Exceptions.java b/test/1928-exception-event-exception/src/art/Exceptions.java
deleted file mode 100644
index 2c959ec..0000000
--- a/test/1928-exception-event-exception/src/art/Exceptions.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Method;
-
-public class Exceptions {
-  public static native void setupExceptionTracing(
-      Class<?> methodClass,
-      Class<?> exceptionClass,
-      Method exceptionEventMethod,
-      Method exceptionCaughtEventMethod);
-
-  public static native void enableExceptionCatchEvent(Thread thr);
-  public static native void enableExceptionEvent(Thread thr);
-  public static native void disableExceptionCatchEvent(Thread thr);
-  public static native void disableExceptionEvent(Thread thr);
-}
diff --git a/test/1928-exception-event-exception/src/art/Exceptions.java b/test/1928-exception-event-exception/src/art/Exceptions.java
new file mode 120000
index 0000000..b8450fe
--- /dev/null
+++ b/test/1928-exception-event-exception/src/art/Exceptions.java
@@ -0,0 +1 @@
+../../../jvmti-common/Exceptions.java
\ No newline at end of file
diff --git a/test/1928-exception-event-exception/src/art/StackTrace.java b/test/1928-exception-event-exception/src/art/StackTrace.java
deleted file mode 100644
index 2ea2f20..0000000
--- a/test/1928-exception-event-exception/src/art/StackTrace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Executable;
-
-public class StackTrace {
-  public static class StackFrameData {
-    public final Thread thr;
-    public final Executable method;
-    public final long current_location;
-    public final int depth;
-
-    public StackFrameData(Thread thr, Executable e, long loc, int depth) {
-      this.thr = thr;
-      this.method = e;
-      this.current_location = loc;
-      this.depth = depth;
-    }
-    @Override
-    public String toString() {
-      return String.format(
-          "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }",
-          this.thr,
-          this.method,
-          this.current_location,
-          this.depth);
-    }
-  }
-
-  public static native int GetStackDepth(Thread thr);
-
-  private static native StackFrameData[] nativeGetStackTrace(Thread thr);
-
-  public static StackFrameData[] GetStackTrace(Thread thr) {
-    // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not
-    // suspended. The spec says that not being suspended is fine but since we want this to be
-    // consistent we will suspend for the RI.
-    boolean suspend_thread =
-        !System.getProperty("java.vm.name").equals("Dalvik") &&
-        !thr.equals(Thread.currentThread()) &&
-        !Suspension.isSuspended(thr);
-    if (suspend_thread) {
-      Suspension.suspend(thr);
-    }
-    StackFrameData[] out = nativeGetStackTrace(thr);
-    if (suspend_thread) {
-      Suspension.resume(thr);
-    }
-    return out;
-  }
-}
-
diff --git a/test/1928-exception-event-exception/src/art/StackTrace.java b/test/1928-exception-event-exception/src/art/StackTrace.java
new file mode 120000
index 0000000..e1a08aa
--- /dev/null
+++ b/test/1928-exception-event-exception/src/art/StackTrace.java
@@ -0,0 +1 @@
+../../../jvmti-common/StackTrace.java
\ No newline at end of file
diff --git a/test/1928-exception-event-exception/src/art/Suspension.java b/test/1928-exception-event-exception/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1928-exception-event-exception/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1928-exception-event-exception/src/art/Suspension.java b/test/1928-exception-event-exception/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1928-exception-event-exception/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1929-exception-catch-exception/src/art/Breakpoint.java b/test/1929-exception-catch-exception/src/art/Breakpoint.java
deleted file mode 100644
index bbb89f7..0000000
--- a/test/1929-exception-catch-exception/src/art/Breakpoint.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Objects;
-
-public class Breakpoint {
-  public static class Manager {
-    public static class BP {
-      public final Executable method;
-      public final long location;
-
-      public BP(Executable method) {
-        this(method, getStartLocation(method));
-      }
-
-      public BP(Executable method, long location) {
-        this.method = method;
-        this.location = location;
-      }
-
-      @Override
-      public boolean equals(Object other) {
-        return (other instanceof BP) &&
-            method.equals(((BP)other).method) &&
-            location == ((BP)other).location;
-      }
-
-      @Override
-      public String toString() {
-        return method.toString() + " @ " + getLine();
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(method, location);
-      }
-
-      public int getLine() {
-        try {
-          LineNumber[] lines = getLineNumberTable(method);
-          int best = -1;
-          for (LineNumber l : lines) {
-            if (l.location > location) {
-              break;
-            } else {
-              best = l.line;
-            }
-          }
-          return best;
-        } catch (Exception e) {
-          return -1;
-        }
-      }
-    }
-
-    private Set<BP> breaks = new HashSet<>();
-
-    public void setBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.add(b)) {
-          Breakpoint.setBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void setBreakpoint(Executable method, long location) {
-      setBreakpoints(new BP(method, location));
-    }
-
-    public void clearBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.remove(b)) {
-          Breakpoint.clearBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void clearBreakpoint(Executable method, long location) {
-      clearBreakpoints(new BP(method, location));
-    }
-
-    public void clearAllBreakpoints() {
-      clearBreakpoints(breaks.toArray(new BP[0]));
-    }
-  }
-
-  public static void startBreakpointWatch(Class<?> methodClass,
-                                          Executable breakpointReached,
-                                          Thread thr) {
-    startBreakpointWatch(methodClass, breakpointReached, false, thr);
-  }
-
-  /**
-   * Enables the trapping of breakpoint events.
-   *
-   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
-   */
-  public static native void startBreakpointWatch(Class<?> methodClass,
-                                                 Executable breakpointReached,
-                                                 boolean allowRecursive,
-                                                 Thread thr);
-  public static native void stopBreakpointWatch(Thread thr);
-
-  public static final class LineNumber implements Comparable<LineNumber> {
-    public final long location;
-    public final int line;
-
-    private LineNumber(long loc, int line) {
-      this.location = loc;
-      this.line = line;
-    }
-
-    public boolean equals(Object other) {
-      return other instanceof LineNumber && ((LineNumber)other).line == line &&
-          ((LineNumber)other).location == location;
-    }
-
-    public int compareTo(LineNumber other) {
-      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
-      if (v != 0) {
-        return v;
-      } else {
-        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
-      }
-    }
-  }
-
-  public static native void setBreakpoint(Executable m, long loc);
-  public static void setBreakpoint(Executable m, LineNumber l) {
-    setBreakpoint(m, l.location);
-  }
-
-  public static native void clearBreakpoint(Executable m, long loc);
-  public static void clearBreakpoint(Executable m, LineNumber l) {
-    clearBreakpoint(m, l.location);
-  }
-
-  private static native Object[] getLineNumberTableNative(Executable m);
-  public static LineNumber[] getLineNumberTable(Executable m) {
-    Object[] nativeTable = getLineNumberTableNative(m);
-    long[] location = (long[])(nativeTable[0]);
-    int[] lines = (int[])(nativeTable[1]);
-    if (lines.length != location.length) {
-      throw new Error("Lines and locations have different lengths!");
-    }
-    LineNumber[] out = new LineNumber[lines.length];
-    for (int i = 0; i < lines.length; i++) {
-      out[i] = new LineNumber(location[i], lines[i]);
-    }
-    return out;
-  }
-
-  public static native long getStartLocation(Executable m);
-
-  public static int locationToLine(Executable m, long location) {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      int best = -1;
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.location > location) {
-          break;
-        } else {
-          best = l.line;
-        }
-      }
-      return best;
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  public static long lineToLocation(Executable m, int line) throws Exception {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.line == line) {
-          return l.location;
-        }
-      }
-      throw new Exception("Unable to find line " + line + " in " + m);
-    } catch (Exception e) {
-      throw new Exception("Unable to get line number info for " + m, e);
-    }
-  }
-}
-
diff --git a/test/1929-exception-catch-exception/src/art/Breakpoint.java b/test/1929-exception-catch-exception/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1929-exception-catch-exception/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1929-exception-catch-exception/src/art/Exceptions.java b/test/1929-exception-catch-exception/src/art/Exceptions.java
deleted file mode 100644
index 2c959ec..0000000
--- a/test/1929-exception-catch-exception/src/art/Exceptions.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Method;
-
-public class Exceptions {
-  public static native void setupExceptionTracing(
-      Class<?> methodClass,
-      Class<?> exceptionClass,
-      Method exceptionEventMethod,
-      Method exceptionCaughtEventMethod);
-
-  public static native void enableExceptionCatchEvent(Thread thr);
-  public static native void enableExceptionEvent(Thread thr);
-  public static native void disableExceptionCatchEvent(Thread thr);
-  public static native void disableExceptionEvent(Thread thr);
-}
diff --git a/test/1929-exception-catch-exception/src/art/Exceptions.java b/test/1929-exception-catch-exception/src/art/Exceptions.java
new file mode 120000
index 0000000..b8450fe
--- /dev/null
+++ b/test/1929-exception-catch-exception/src/art/Exceptions.java
@@ -0,0 +1 @@
+../../../jvmti-common/Exceptions.java
\ No newline at end of file
diff --git a/test/1929-exception-catch-exception/src/art/StackTrace.java b/test/1929-exception-catch-exception/src/art/StackTrace.java
deleted file mode 100644
index 2ea2f20..0000000
--- a/test/1929-exception-catch-exception/src/art/StackTrace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Executable;
-
-public class StackTrace {
-  public static class StackFrameData {
-    public final Thread thr;
-    public final Executable method;
-    public final long current_location;
-    public final int depth;
-
-    public StackFrameData(Thread thr, Executable e, long loc, int depth) {
-      this.thr = thr;
-      this.method = e;
-      this.current_location = loc;
-      this.depth = depth;
-    }
-    @Override
-    public String toString() {
-      return String.format(
-          "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }",
-          this.thr,
-          this.method,
-          this.current_location,
-          this.depth);
-    }
-  }
-
-  public static native int GetStackDepth(Thread thr);
-
-  private static native StackFrameData[] nativeGetStackTrace(Thread thr);
-
-  public static StackFrameData[] GetStackTrace(Thread thr) {
-    // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not
-    // suspended. The spec says that not being suspended is fine but since we want this to be
-    // consistent we will suspend for the RI.
-    boolean suspend_thread =
-        !System.getProperty("java.vm.name").equals("Dalvik") &&
-        !thr.equals(Thread.currentThread()) &&
-        !Suspension.isSuspended(thr);
-    if (suspend_thread) {
-      Suspension.suspend(thr);
-    }
-    StackFrameData[] out = nativeGetStackTrace(thr);
-    if (suspend_thread) {
-      Suspension.resume(thr);
-    }
-    return out;
-  }
-}
-
diff --git a/test/1929-exception-catch-exception/src/art/StackTrace.java b/test/1929-exception-catch-exception/src/art/StackTrace.java
new file mode 120000
index 0000000..e1a08aa
--- /dev/null
+++ b/test/1929-exception-catch-exception/src/art/StackTrace.java
@@ -0,0 +1 @@
+../../../jvmti-common/StackTrace.java
\ No newline at end of file
diff --git a/test/1929-exception-catch-exception/src/art/Suspension.java b/test/1929-exception-catch-exception/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1929-exception-catch-exception/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1929-exception-catch-exception/src/art/Suspension.java b/test/1929-exception-catch-exception/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1929-exception-catch-exception/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1930-monitor-info/src/art/Monitors.java b/test/1930-monitor-info/src/art/Monitors.java
new file mode 120000
index 0000000..61e8367
--- /dev/null
+++ b/test/1930-monitor-info/src/art/Monitors.java
@@ -0,0 +1 @@
+../../../jvmti-common/Monitors.java
\ No newline at end of file
diff --git a/test/1930-monitor-info/src/art/Suspension.java b/test/1930-monitor-info/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1930-monitor-info/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1930-monitor-info/src/art/Suspension.java b/test/1930-monitor-info/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1930-monitor-info/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1931-monitor-events/src/art/Monitors.java b/test/1931-monitor-events/src/art/Monitors.java
deleted file mode 100644
index 7fe2b60..0000000
--- a/test/1931-monitor-events/src/art/Monitors.java
+++ /dev/null
@@ -1,344 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Method;
-import java.util.concurrent.atomic.*;
-import java.util.function.Function;
-import java.util.stream.Stream;
-import java.util.Arrays;
-import java.util.Objects;
-
-public class Monitors {
-  public native static void setupMonitorEvents(
-      Class<?> method_klass,
-      Method monitor_contended_enter_event,
-      Method monitor_contended_entered_event,
-      Method monitor_wait_event,
-      Method monitor_waited_event,
-      Class<?> lock_klass,
-      Thread thr);
-  public native static void stopMonitorEvents();
-
-  public static class NamedLock {
-    public final String name;
-    private volatile int calledNotify;
-    public NamedLock(String name) {
-      this.name = name;
-      calledNotify = 0;
-    }
-
-    public String toString() {
-      return String.format("NamedLock[%s]", name);
-    }
-
-    public final void DoWait() throws Exception {
-      final int v = calledNotify;
-      while (v == calledNotify) {
-        wait();
-      }
-    }
-
-    public final void DoWait(long t) throws Exception {
-      final int v = calledNotify;
-      final long target = System.currentTimeMillis() + (t / 2);
-      while (v == calledNotify && (t < 0 || System.currentTimeMillis() < target)) {
-        wait(t);
-      }
-    }
-
-    public final void DoNotifyAll() throws Exception {
-      calledNotify++;
-      notifyAll();
-    }
-
-    public final void DoNotify() throws Exception {
-      calledNotify++;
-      notify();
-    }
-  }
-
-  public static final class MonitorUsage {
-    public final Object monitor;
-    public final Thread owner;
-    public final int entryCount;
-    public final Thread[] waiters;
-    public final Thread[] notifyWaiters;
-
-    public MonitorUsage(
-        Object monitor,
-        Thread owner,
-        int entryCount,
-        Thread[] waiters,
-        Thread[] notifyWaiters) {
-      this.monitor = monitor;
-      this.entryCount = entryCount;
-      this.owner = owner;
-      this.waiters = waiters;
-      this.notifyWaiters = notifyWaiters;
-    }
-
-    private static String toNameList(Thread[] ts) {
-      return Arrays.toString(Arrays.stream(ts).map((Thread t) -> t.getName()).toArray());
-    }
-
-    public String toString() {
-      return String.format(
-          "MonitorUsage{ monitor: %s, owner: %s, entryCount: %d, waiters: %s, notify_waiters: %s }",
-          monitor,
-          (owner != null) ? owner.getName() : "<NULL>",
-          entryCount,
-          toNameList(waiters),
-          toNameList(notifyWaiters));
-    }
-  }
-
-  public static native MonitorUsage getObjectMonitorUsage(Object monitor);
-  public static native Object getCurrentContendedMonitor(Thread thr);
-
-  public static class TestException extends Error {
-    public TestException() { super(); }
-    public TestException(String s) { super(s); }
-    public TestException(String s, Throwable c) { super(s, c); }
-  }
-
-  public static class LockController {
-    private static enum Action { HOLD, RELEASE, NOTIFY, NOTIFY_ALL, WAIT, TIMED_WAIT }
-
-    public final NamedLock lock;
-    public final long timeout;
-    private final AtomicStampedReference<Action> action;
-    private volatile Thread runner = null;
-    private volatile boolean started = false;
-    private volatile boolean held = false;
-    private static final AtomicInteger cnt = new AtomicInteger(0);
-    private volatile Throwable exe;
-
-    public LockController(NamedLock lock) {
-      this(lock, 10 * 1000);
-    }
-    public LockController(NamedLock lock, long timeout) {
-      this.lock = lock;
-      this.timeout = timeout;
-      this.action = new AtomicStampedReference(Action.HOLD, 0);
-      this.exe = null;
-    }
-
-    public boolean IsWorkerThread(Thread thd) {
-      return Objects.equals(runner, thd);
-    }
-
-    public boolean IsLocked() {
-      checkException();
-      return held;
-    }
-
-    public void checkException() {
-      if (exe != null) {
-        throw new TestException("Exception thrown by other thread!", exe);
-      }
-    }
-
-    private void setAction(Action a) {
-      int stamp = action.getStamp();
-      // Wait for it to be HOLD before updating.
-      while (!action.compareAndSet(Action.HOLD, a, stamp, stamp + 1)) {
-        stamp = action.getStamp();
-      }
-    }
-
-    public synchronized void suspendWorker() throws Exception {
-      checkException();
-      if (runner == null) {
-        throw new TestException("We don't have any runner holding  " + lock);
-      }
-      Suspension.suspend(runner);
-    }
-
-    public Object getWorkerContendedMonitor() throws Exception {
-      checkException();
-      if (runner == null) {
-        return null;
-      }
-      return getCurrentContendedMonitor(runner);
-    }
-
-    public synchronized void DoLock() {
-      if (IsLocked()) {
-        throw new Error("lock is already acquired or being acquired.");
-      }
-      if (runner != null) {
-        throw new Error("Already have thread!");
-      }
-      runner = new Thread(() -> {
-        started = true;
-        try {
-          synchronized (lock) {
-            held = true;
-            int[] stamp_h = new int[] { -1 };
-            Action cur_action = Action.HOLD;
-            try {
-              while (true) {
-                cur_action = action.get(stamp_h);
-                int stamp = stamp_h[0];
-                if (cur_action == Action.RELEASE) {
-                  // The other thread will deal with reseting action.
-                  break;
-                }
-                try {
-                  switch (cur_action) {
-                    case HOLD:
-                      Thread.yield();
-                      break;
-                    case NOTIFY:
-                      lock.DoNotify();
-                      break;
-                    case NOTIFY_ALL:
-                      lock.DoNotifyAll();
-                      break;
-                    case TIMED_WAIT:
-                      lock.DoWait(timeout);
-                      break;
-                    case WAIT:
-                      lock.DoWait();
-                      break;
-                    default:
-                      throw new Error("Unknown action " + action);
-                  }
-                } finally {
-                  // reset action back to hold if it isn't something else.
-                  action.compareAndSet(cur_action, Action.HOLD, stamp, stamp+1);
-                }
-              }
-            } catch (Exception e) {
-              throw new TestException("Got an error while performing action " + cur_action, e);
-            }
-          }
-        } finally {
-          held = false;
-          started = false;
-        }
-      }, "Locker thread " + cnt.getAndIncrement() + " for " + lock);
-      // Make sure we can get any exceptions this throws.
-      runner.setUncaughtExceptionHandler((t, e) -> { exe = e; });
-      runner.start();
-    }
-
-    public void waitForLockToBeHeld() throws Exception {
-      while (true) {
-        if (IsLocked() && Objects.equals(runner, Monitors.getObjectMonitorUsage(lock).owner)) {
-          return;
-        }
-      }
-    }
-
-    public synchronized void waitForNotifySleep() throws Exception {
-      if (runner == null) {
-        throw new Error("No thread trying to lock!");
-      }
-      do {
-        checkException();
-      } while (!started ||
-          !Arrays.asList(Monitors.getObjectMonitorUsage(lock).notifyWaiters).contains(runner));
-    }
-
-    public synchronized void waitForContendedSleep() throws Exception {
-      if (runner == null) {
-        throw new Error("No thread trying to lock!");
-      }
-      do {
-        checkException();
-      } while (!started ||
-          runner.getState() != Thread.State.BLOCKED ||
-          !Arrays.asList(Monitors.getObjectMonitorUsage(lock).waiters).contains(runner));
-    }
-
-    public synchronized void DoNotify() {
-      if (!IsLocked()) {
-        throw new Error("Not locked");
-      }
-      setAction(Action.NOTIFY);
-    }
-
-    public synchronized void DoNotifyAll() {
-      if (!IsLocked()) {
-        throw new Error("Not locked");
-      }
-      setAction(Action.NOTIFY_ALL);
-    }
-
-    public synchronized void DoTimedWait() throws Exception {
-      if (!IsLocked()) {
-        throw new Error("Not locked");
-      }
-      setAction(Action.TIMED_WAIT);
-    }
-
-    public synchronized void DoWait() throws Exception {
-      if (!IsLocked()) {
-        throw new Error("Not locked");
-      }
-      setAction(Action.WAIT);
-    }
-
-    public synchronized void interruptWorker() throws Exception {
-      if (!IsLocked()) {
-        throw new Error("Not locked");
-      }
-      runner.interrupt();
-    }
-
-    public synchronized void waitForActionToFinish() throws Exception {
-      checkException();
-      while (action.getReference() != Action.HOLD) { checkException(); }
-    }
-
-    public synchronized void DoUnlock() throws Exception {
-      Error throwing = null;
-      if (!IsLocked()) {
-        // We might just be racing some exception that was thrown by the worker thread. Cache the
-        // exception, we will throw one from the worker before this one.
-        throwing = new Error("Not locked!");
-      }
-      setAction(Action.RELEASE);
-      Thread run = runner;
-      runner = null;
-      while (held) {}
-      run.join();
-      action.set(Action.HOLD, 0);
-      // Make sure to throw any exception that occurred since it might not have unlocked due to our
-      // request.
-      checkException();
-      DoCleanup();
-      if (throwing != null) {
-        throw throwing;
-      }
-    }
-
-    public synchronized void DoCleanup() throws Exception {
-      if (runner != null) {
-        Thread run = runner;
-        runner = null;
-        while (held) {}
-        run.join();
-      }
-      action.set(Action.HOLD, 0);
-      exe = null;
-    }
-  }
-}
-
diff --git a/test/1931-monitor-events/src/art/Monitors.java b/test/1931-monitor-events/src/art/Monitors.java
new file mode 120000
index 0000000..61e8367
--- /dev/null
+++ b/test/1931-monitor-events/src/art/Monitors.java
@@ -0,0 +1 @@
+../../../jvmti-common/Monitors.java
\ No newline at end of file
diff --git a/test/1931-monitor-events/src/art/Suspension.java b/test/1931-monitor-events/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1931-monitor-events/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1931-monitor-events/src/art/Suspension.java b/test/1931-monitor-events/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1931-monitor-events/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1932-monitor-events-misc/src/art/Monitors.java b/test/1932-monitor-events-misc/src/art/Monitors.java
deleted file mode 100644
index 7fe2b60..0000000
--- a/test/1932-monitor-events-misc/src/art/Monitors.java
+++ /dev/null
@@ -1,344 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Method;
-import java.util.concurrent.atomic.*;
-import java.util.function.Function;
-import java.util.stream.Stream;
-import java.util.Arrays;
-import java.util.Objects;
-
-public class Monitors {
-  public native static void setupMonitorEvents(
-      Class<?> method_klass,
-      Method monitor_contended_enter_event,
-      Method monitor_contended_entered_event,
-      Method monitor_wait_event,
-      Method monitor_waited_event,
-      Class<?> lock_klass,
-      Thread thr);
-  public native static void stopMonitorEvents();
-
-  public static class NamedLock {
-    public final String name;
-    private volatile int calledNotify;
-    public NamedLock(String name) {
-      this.name = name;
-      calledNotify = 0;
-    }
-
-    public String toString() {
-      return String.format("NamedLock[%s]", name);
-    }
-
-    public final void DoWait() throws Exception {
-      final int v = calledNotify;
-      while (v == calledNotify) {
-        wait();
-      }
-    }
-
-    public final void DoWait(long t) throws Exception {
-      final int v = calledNotify;
-      final long target = System.currentTimeMillis() + (t / 2);
-      while (v == calledNotify && (t < 0 || System.currentTimeMillis() < target)) {
-        wait(t);
-      }
-    }
-
-    public final void DoNotifyAll() throws Exception {
-      calledNotify++;
-      notifyAll();
-    }
-
-    public final void DoNotify() throws Exception {
-      calledNotify++;
-      notify();
-    }
-  }
-
-  public static final class MonitorUsage {
-    public final Object monitor;
-    public final Thread owner;
-    public final int entryCount;
-    public final Thread[] waiters;
-    public final Thread[] notifyWaiters;
-
-    public MonitorUsage(
-        Object monitor,
-        Thread owner,
-        int entryCount,
-        Thread[] waiters,
-        Thread[] notifyWaiters) {
-      this.monitor = monitor;
-      this.entryCount = entryCount;
-      this.owner = owner;
-      this.waiters = waiters;
-      this.notifyWaiters = notifyWaiters;
-    }
-
-    private static String toNameList(Thread[] ts) {
-      return Arrays.toString(Arrays.stream(ts).map((Thread t) -> t.getName()).toArray());
-    }
-
-    public String toString() {
-      return String.format(
-          "MonitorUsage{ monitor: %s, owner: %s, entryCount: %d, waiters: %s, notify_waiters: %s }",
-          monitor,
-          (owner != null) ? owner.getName() : "<NULL>",
-          entryCount,
-          toNameList(waiters),
-          toNameList(notifyWaiters));
-    }
-  }
-
-  public static native MonitorUsage getObjectMonitorUsage(Object monitor);
-  public static native Object getCurrentContendedMonitor(Thread thr);
-
-  public static class TestException extends Error {
-    public TestException() { super(); }
-    public TestException(String s) { super(s); }
-    public TestException(String s, Throwable c) { super(s, c); }
-  }
-
-  public static class LockController {
-    private static enum Action { HOLD, RELEASE, NOTIFY, NOTIFY_ALL, WAIT, TIMED_WAIT }
-
-    public final NamedLock lock;
-    public final long timeout;
-    private final AtomicStampedReference<Action> action;
-    private volatile Thread runner = null;
-    private volatile boolean started = false;
-    private volatile boolean held = false;
-    private static final AtomicInteger cnt = new AtomicInteger(0);
-    private volatile Throwable exe;
-
-    public LockController(NamedLock lock) {
-      this(lock, 10 * 1000);
-    }
-    public LockController(NamedLock lock, long timeout) {
-      this.lock = lock;
-      this.timeout = timeout;
-      this.action = new AtomicStampedReference(Action.HOLD, 0);
-      this.exe = null;
-    }
-
-    public boolean IsWorkerThread(Thread thd) {
-      return Objects.equals(runner, thd);
-    }
-
-    public boolean IsLocked() {
-      checkException();
-      return held;
-    }
-
-    public void checkException() {
-      if (exe != null) {
-        throw new TestException("Exception thrown by other thread!", exe);
-      }
-    }
-
-    private void setAction(Action a) {
-      int stamp = action.getStamp();
-      // Wait for it to be HOLD before updating.
-      while (!action.compareAndSet(Action.HOLD, a, stamp, stamp + 1)) {
-        stamp = action.getStamp();
-      }
-    }
-
-    public synchronized void suspendWorker() throws Exception {
-      checkException();
-      if (runner == null) {
-        throw new TestException("We don't have any runner holding  " + lock);
-      }
-      Suspension.suspend(runner);
-    }
-
-    public Object getWorkerContendedMonitor() throws Exception {
-      checkException();
-      if (runner == null) {
-        return null;
-      }
-      return getCurrentContendedMonitor(runner);
-    }
-
-    public synchronized void DoLock() {
-      if (IsLocked()) {
-        throw new Error("lock is already acquired or being acquired.");
-      }
-      if (runner != null) {
-        throw new Error("Already have thread!");
-      }
-      runner = new Thread(() -> {
-        started = true;
-        try {
-          synchronized (lock) {
-            held = true;
-            int[] stamp_h = new int[] { -1 };
-            Action cur_action = Action.HOLD;
-            try {
-              while (true) {
-                cur_action = action.get(stamp_h);
-                int stamp = stamp_h[0];
-                if (cur_action == Action.RELEASE) {
-                  // The other thread will deal with reseting action.
-                  break;
-                }
-                try {
-                  switch (cur_action) {
-                    case HOLD:
-                      Thread.yield();
-                      break;
-                    case NOTIFY:
-                      lock.DoNotify();
-                      break;
-                    case NOTIFY_ALL:
-                      lock.DoNotifyAll();
-                      break;
-                    case TIMED_WAIT:
-                      lock.DoWait(timeout);
-                      break;
-                    case WAIT:
-                      lock.DoWait();
-                      break;
-                    default:
-                      throw new Error("Unknown action " + action);
-                  }
-                } finally {
-                  // reset action back to hold if it isn't something else.
-                  action.compareAndSet(cur_action, Action.HOLD, stamp, stamp+1);
-                }
-              }
-            } catch (Exception e) {
-              throw new TestException("Got an error while performing action " + cur_action, e);
-            }
-          }
-        } finally {
-          held = false;
-          started = false;
-        }
-      }, "Locker thread " + cnt.getAndIncrement() + " for " + lock);
-      // Make sure we can get any exceptions this throws.
-      runner.setUncaughtExceptionHandler((t, e) -> { exe = e; });
-      runner.start();
-    }
-
-    public void waitForLockToBeHeld() throws Exception {
-      while (true) {
-        if (IsLocked() && Objects.equals(runner, Monitors.getObjectMonitorUsage(lock).owner)) {
-          return;
-        }
-      }
-    }
-
-    public synchronized void waitForNotifySleep() throws Exception {
-      if (runner == null) {
-        throw new Error("No thread trying to lock!");
-      }
-      do {
-        checkException();
-      } while (!started ||
-          !Arrays.asList(Monitors.getObjectMonitorUsage(lock).notifyWaiters).contains(runner));
-    }
-
-    public synchronized void waitForContendedSleep() throws Exception {
-      if (runner == null) {
-        throw new Error("No thread trying to lock!");
-      }
-      do {
-        checkException();
-      } while (!started ||
-          runner.getState() != Thread.State.BLOCKED ||
-          !Arrays.asList(Monitors.getObjectMonitorUsage(lock).waiters).contains(runner));
-    }
-
-    public synchronized void DoNotify() {
-      if (!IsLocked()) {
-        throw new Error("Not locked");
-      }
-      setAction(Action.NOTIFY);
-    }
-
-    public synchronized void DoNotifyAll() {
-      if (!IsLocked()) {
-        throw new Error("Not locked");
-      }
-      setAction(Action.NOTIFY_ALL);
-    }
-
-    public synchronized void DoTimedWait() throws Exception {
-      if (!IsLocked()) {
-        throw new Error("Not locked");
-      }
-      setAction(Action.TIMED_WAIT);
-    }
-
-    public synchronized void DoWait() throws Exception {
-      if (!IsLocked()) {
-        throw new Error("Not locked");
-      }
-      setAction(Action.WAIT);
-    }
-
-    public synchronized void interruptWorker() throws Exception {
-      if (!IsLocked()) {
-        throw new Error("Not locked");
-      }
-      runner.interrupt();
-    }
-
-    public synchronized void waitForActionToFinish() throws Exception {
-      checkException();
-      while (action.getReference() != Action.HOLD) { checkException(); }
-    }
-
-    public synchronized void DoUnlock() throws Exception {
-      Error throwing = null;
-      if (!IsLocked()) {
-        // We might just be racing some exception that was thrown by the worker thread. Cache the
-        // exception, we will throw one from the worker before this one.
-        throwing = new Error("Not locked!");
-      }
-      setAction(Action.RELEASE);
-      Thread run = runner;
-      runner = null;
-      while (held) {}
-      run.join();
-      action.set(Action.HOLD, 0);
-      // Make sure to throw any exception that occurred since it might not have unlocked due to our
-      // request.
-      checkException();
-      DoCleanup();
-      if (throwing != null) {
-        throw throwing;
-      }
-    }
-
-    public synchronized void DoCleanup() throws Exception {
-      if (runner != null) {
-        Thread run = runner;
-        runner = null;
-        while (held) {}
-        run.join();
-      }
-      action.set(Action.HOLD, 0);
-      exe = null;
-    }
-  }
-}
-
diff --git a/test/1932-monitor-events-misc/src/art/Monitors.java b/test/1932-monitor-events-misc/src/art/Monitors.java
new file mode 120000
index 0000000..61e8367
--- /dev/null
+++ b/test/1932-monitor-events-misc/src/art/Monitors.java
@@ -0,0 +1 @@
+../../../jvmti-common/Monitors.java
\ No newline at end of file
diff --git a/test/1932-monitor-events-misc/src/art/Suspension.java b/test/1932-monitor-events-misc/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1932-monitor-events-misc/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1932-monitor-events-misc/src/art/Suspension.java b/test/1932-monitor-events-misc/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1932-monitor-events-misc/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1933-monitor-current-contended/src/art/Monitors.java b/test/1933-monitor-current-contended/src/art/Monitors.java
deleted file mode 100644
index 7fe2b60..0000000
--- a/test/1933-monitor-current-contended/src/art/Monitors.java
+++ /dev/null
@@ -1,344 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Method;
-import java.util.concurrent.atomic.*;
-import java.util.function.Function;
-import java.util.stream.Stream;
-import java.util.Arrays;
-import java.util.Objects;
-
-public class Monitors {
-  public native static void setupMonitorEvents(
-      Class<?> method_klass,
-      Method monitor_contended_enter_event,
-      Method monitor_contended_entered_event,
-      Method monitor_wait_event,
-      Method monitor_waited_event,
-      Class<?> lock_klass,
-      Thread thr);
-  public native static void stopMonitorEvents();
-
-  public static class NamedLock {
-    public final String name;
-    private volatile int calledNotify;
-    public NamedLock(String name) {
-      this.name = name;
-      calledNotify = 0;
-    }
-
-    public String toString() {
-      return String.format("NamedLock[%s]", name);
-    }
-
-    public final void DoWait() throws Exception {
-      final int v = calledNotify;
-      while (v == calledNotify) {
-        wait();
-      }
-    }
-
-    public final void DoWait(long t) throws Exception {
-      final int v = calledNotify;
-      final long target = System.currentTimeMillis() + (t / 2);
-      while (v == calledNotify && (t < 0 || System.currentTimeMillis() < target)) {
-        wait(t);
-      }
-    }
-
-    public final void DoNotifyAll() throws Exception {
-      calledNotify++;
-      notifyAll();
-    }
-
-    public final void DoNotify() throws Exception {
-      calledNotify++;
-      notify();
-    }
-  }
-
-  public static final class MonitorUsage {
-    public final Object monitor;
-    public final Thread owner;
-    public final int entryCount;
-    public final Thread[] waiters;
-    public final Thread[] notifyWaiters;
-
-    public MonitorUsage(
-        Object monitor,
-        Thread owner,
-        int entryCount,
-        Thread[] waiters,
-        Thread[] notifyWaiters) {
-      this.monitor = monitor;
-      this.entryCount = entryCount;
-      this.owner = owner;
-      this.waiters = waiters;
-      this.notifyWaiters = notifyWaiters;
-    }
-
-    private static String toNameList(Thread[] ts) {
-      return Arrays.toString(Arrays.stream(ts).map((Thread t) -> t.getName()).toArray());
-    }
-
-    public String toString() {
-      return String.format(
-          "MonitorUsage{ monitor: %s, owner: %s, entryCount: %d, waiters: %s, notify_waiters: %s }",
-          monitor,
-          (owner != null) ? owner.getName() : "<NULL>",
-          entryCount,
-          toNameList(waiters),
-          toNameList(notifyWaiters));
-    }
-  }
-
-  public static native MonitorUsage getObjectMonitorUsage(Object monitor);
-  public static native Object getCurrentContendedMonitor(Thread thr);
-
-  public static class TestException extends Error {
-    public TestException() { super(); }
-    public TestException(String s) { super(s); }
-    public TestException(String s, Throwable c) { super(s, c); }
-  }
-
-  public static class LockController {
-    private static enum Action { HOLD, RELEASE, NOTIFY, NOTIFY_ALL, WAIT, TIMED_WAIT }
-
-    public final NamedLock lock;
-    public final long timeout;
-    private final AtomicStampedReference<Action> action;
-    private volatile Thread runner = null;
-    private volatile boolean started = false;
-    private volatile boolean held = false;
-    private static final AtomicInteger cnt = new AtomicInteger(0);
-    private volatile Throwable exe;
-
-    public LockController(NamedLock lock) {
-      this(lock, 10 * 1000);
-    }
-    public LockController(NamedLock lock, long timeout) {
-      this.lock = lock;
-      this.timeout = timeout;
-      this.action = new AtomicStampedReference(Action.HOLD, 0);
-      this.exe = null;
-    }
-
-    public boolean IsWorkerThread(Thread thd) {
-      return Objects.equals(runner, thd);
-    }
-
-    public boolean IsLocked() {
-      checkException();
-      return held;
-    }
-
-    public void checkException() {
-      if (exe != null) {
-        throw new TestException("Exception thrown by other thread!", exe);
-      }
-    }
-
-    private void setAction(Action a) {
-      int stamp = action.getStamp();
-      // Wait for it to be HOLD before updating.
-      while (!action.compareAndSet(Action.HOLD, a, stamp, stamp + 1)) {
-        stamp = action.getStamp();
-      }
-    }
-
-    public synchronized void suspendWorker() throws Exception {
-      checkException();
-      if (runner == null) {
-        throw new TestException("We don't have any runner holding  " + lock);
-      }
-      Suspension.suspend(runner);
-    }
-
-    public Object getWorkerContendedMonitor() throws Exception {
-      checkException();
-      if (runner == null) {
-        return null;
-      }
-      return getCurrentContendedMonitor(runner);
-    }
-
-    public synchronized void DoLock() {
-      if (IsLocked()) {
-        throw new Error("lock is already acquired or being acquired.");
-      }
-      if (runner != null) {
-        throw new Error("Already have thread!");
-      }
-      runner = new Thread(() -> {
-        started = true;
-        try {
-          synchronized (lock) {
-            held = true;
-            int[] stamp_h = new int[] { -1 };
-            Action cur_action = Action.HOLD;
-            try {
-              while (true) {
-                cur_action = action.get(stamp_h);
-                int stamp = stamp_h[0];
-                if (cur_action == Action.RELEASE) {
-                  // The other thread will deal with reseting action.
-                  break;
-                }
-                try {
-                  switch (cur_action) {
-                    case HOLD:
-                      Thread.yield();
-                      break;
-                    case NOTIFY:
-                      lock.DoNotify();
-                      break;
-                    case NOTIFY_ALL:
-                      lock.DoNotifyAll();
-                      break;
-                    case TIMED_WAIT:
-                      lock.DoWait(timeout);
-                      break;
-                    case WAIT:
-                      lock.DoWait();
-                      break;
-                    default:
-                      throw new Error("Unknown action " + action);
-                  }
-                } finally {
-                  // reset action back to hold if it isn't something else.
-                  action.compareAndSet(cur_action, Action.HOLD, stamp, stamp+1);
-                }
-              }
-            } catch (Exception e) {
-              throw new TestException("Got an error while performing action " + cur_action, e);
-            }
-          }
-        } finally {
-          held = false;
-          started = false;
-        }
-      }, "Locker thread " + cnt.getAndIncrement() + " for " + lock);
-      // Make sure we can get any exceptions this throws.
-      runner.setUncaughtExceptionHandler((t, e) -> { exe = e; });
-      runner.start();
-    }
-
-    public void waitForLockToBeHeld() throws Exception {
-      while (true) {
-        if (IsLocked() && Objects.equals(runner, Monitors.getObjectMonitorUsage(lock).owner)) {
-          return;
-        }
-      }
-    }
-
-    public synchronized void waitForNotifySleep() throws Exception {
-      if (runner == null) {
-        throw new Error("No thread trying to lock!");
-      }
-      do {
-        checkException();
-      } while (!started ||
-          !Arrays.asList(Monitors.getObjectMonitorUsage(lock).notifyWaiters).contains(runner));
-    }
-
-    public synchronized void waitForContendedSleep() throws Exception {
-      if (runner == null) {
-        throw new Error("No thread trying to lock!");
-      }
-      do {
-        checkException();
-      } while (!started ||
-          runner.getState() != Thread.State.BLOCKED ||
-          !Arrays.asList(Monitors.getObjectMonitorUsage(lock).waiters).contains(runner));
-    }
-
-    public synchronized void DoNotify() {
-      if (!IsLocked()) {
-        throw new Error("Not locked");
-      }
-      setAction(Action.NOTIFY);
-    }
-
-    public synchronized void DoNotifyAll() {
-      if (!IsLocked()) {
-        throw new Error("Not locked");
-      }
-      setAction(Action.NOTIFY_ALL);
-    }
-
-    public synchronized void DoTimedWait() throws Exception {
-      if (!IsLocked()) {
-        throw new Error("Not locked");
-      }
-      setAction(Action.TIMED_WAIT);
-    }
-
-    public synchronized void DoWait() throws Exception {
-      if (!IsLocked()) {
-        throw new Error("Not locked");
-      }
-      setAction(Action.WAIT);
-    }
-
-    public synchronized void interruptWorker() throws Exception {
-      if (!IsLocked()) {
-        throw new Error("Not locked");
-      }
-      runner.interrupt();
-    }
-
-    public synchronized void waitForActionToFinish() throws Exception {
-      checkException();
-      while (action.getReference() != Action.HOLD) { checkException(); }
-    }
-
-    public synchronized void DoUnlock() throws Exception {
-      Error throwing = null;
-      if (!IsLocked()) {
-        // We might just be racing some exception that was thrown by the worker thread. Cache the
-        // exception, we will throw one from the worker before this one.
-        throwing = new Error("Not locked!");
-      }
-      setAction(Action.RELEASE);
-      Thread run = runner;
-      runner = null;
-      while (held) {}
-      run.join();
-      action.set(Action.HOLD, 0);
-      // Make sure to throw any exception that occurred since it might not have unlocked due to our
-      // request.
-      checkException();
-      DoCleanup();
-      if (throwing != null) {
-        throw throwing;
-      }
-    }
-
-    public synchronized void DoCleanup() throws Exception {
-      if (runner != null) {
-        Thread run = runner;
-        runner = null;
-        while (held) {}
-        run.join();
-      }
-      action.set(Action.HOLD, 0);
-      exe = null;
-    }
-  }
-}
-
diff --git a/test/1933-monitor-current-contended/src/art/Monitors.java b/test/1933-monitor-current-contended/src/art/Monitors.java
new file mode 120000
index 0000000..61e8367
--- /dev/null
+++ b/test/1933-monitor-current-contended/src/art/Monitors.java
@@ -0,0 +1 @@
+../../../jvmti-common/Monitors.java
\ No newline at end of file
diff --git a/test/1933-monitor-current-contended/src/art/Suspension.java b/test/1933-monitor-current-contended/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1933-monitor-current-contended/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1933-monitor-current-contended/src/art/Suspension.java b/test/1933-monitor-current-contended/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1933-monitor-current-contended/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1934-jvmti-signal-thread/src/art/Monitors.java b/test/1934-jvmti-signal-thread/src/art/Monitors.java
deleted file mode 100644
index 7fe2b60..0000000
--- a/test/1934-jvmti-signal-thread/src/art/Monitors.java
+++ /dev/null
@@ -1,344 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Method;
-import java.util.concurrent.atomic.*;
-import java.util.function.Function;
-import java.util.stream.Stream;
-import java.util.Arrays;
-import java.util.Objects;
-
-public class Monitors {
-  public native static void setupMonitorEvents(
-      Class<?> method_klass,
-      Method monitor_contended_enter_event,
-      Method monitor_contended_entered_event,
-      Method monitor_wait_event,
-      Method monitor_waited_event,
-      Class<?> lock_klass,
-      Thread thr);
-  public native static void stopMonitorEvents();
-
-  public static class NamedLock {
-    public final String name;
-    private volatile int calledNotify;
-    public NamedLock(String name) {
-      this.name = name;
-      calledNotify = 0;
-    }
-
-    public String toString() {
-      return String.format("NamedLock[%s]", name);
-    }
-
-    public final void DoWait() throws Exception {
-      final int v = calledNotify;
-      while (v == calledNotify) {
-        wait();
-      }
-    }
-
-    public final void DoWait(long t) throws Exception {
-      final int v = calledNotify;
-      final long target = System.currentTimeMillis() + (t / 2);
-      while (v == calledNotify && (t < 0 || System.currentTimeMillis() < target)) {
-        wait(t);
-      }
-    }
-
-    public final void DoNotifyAll() throws Exception {
-      calledNotify++;
-      notifyAll();
-    }
-
-    public final void DoNotify() throws Exception {
-      calledNotify++;
-      notify();
-    }
-  }
-
-  public static final class MonitorUsage {
-    public final Object monitor;
-    public final Thread owner;
-    public final int entryCount;
-    public final Thread[] waiters;
-    public final Thread[] notifyWaiters;
-
-    public MonitorUsage(
-        Object monitor,
-        Thread owner,
-        int entryCount,
-        Thread[] waiters,
-        Thread[] notifyWaiters) {
-      this.monitor = monitor;
-      this.entryCount = entryCount;
-      this.owner = owner;
-      this.waiters = waiters;
-      this.notifyWaiters = notifyWaiters;
-    }
-
-    private static String toNameList(Thread[] ts) {
-      return Arrays.toString(Arrays.stream(ts).map((Thread t) -> t.getName()).toArray());
-    }
-
-    public String toString() {
-      return String.format(
-          "MonitorUsage{ monitor: %s, owner: %s, entryCount: %d, waiters: %s, notify_waiters: %s }",
-          monitor,
-          (owner != null) ? owner.getName() : "<NULL>",
-          entryCount,
-          toNameList(waiters),
-          toNameList(notifyWaiters));
-    }
-  }
-
-  public static native MonitorUsage getObjectMonitorUsage(Object monitor);
-  public static native Object getCurrentContendedMonitor(Thread thr);
-
-  public static class TestException extends Error {
-    public TestException() { super(); }
-    public TestException(String s) { super(s); }
-    public TestException(String s, Throwable c) { super(s, c); }
-  }
-
-  public static class LockController {
-    private static enum Action { HOLD, RELEASE, NOTIFY, NOTIFY_ALL, WAIT, TIMED_WAIT }
-
-    public final NamedLock lock;
-    public final long timeout;
-    private final AtomicStampedReference<Action> action;
-    private volatile Thread runner = null;
-    private volatile boolean started = false;
-    private volatile boolean held = false;
-    private static final AtomicInteger cnt = new AtomicInteger(0);
-    private volatile Throwable exe;
-
-    public LockController(NamedLock lock) {
-      this(lock, 10 * 1000);
-    }
-    public LockController(NamedLock lock, long timeout) {
-      this.lock = lock;
-      this.timeout = timeout;
-      this.action = new AtomicStampedReference(Action.HOLD, 0);
-      this.exe = null;
-    }
-
-    public boolean IsWorkerThread(Thread thd) {
-      return Objects.equals(runner, thd);
-    }
-
-    public boolean IsLocked() {
-      checkException();
-      return held;
-    }
-
-    public void checkException() {
-      if (exe != null) {
-        throw new TestException("Exception thrown by other thread!", exe);
-      }
-    }
-
-    private void setAction(Action a) {
-      int stamp = action.getStamp();
-      // Wait for it to be HOLD before updating.
-      while (!action.compareAndSet(Action.HOLD, a, stamp, stamp + 1)) {
-        stamp = action.getStamp();
-      }
-    }
-
-    public synchronized void suspendWorker() throws Exception {
-      checkException();
-      if (runner == null) {
-        throw new TestException("We don't have any runner holding  " + lock);
-      }
-      Suspension.suspend(runner);
-    }
-
-    public Object getWorkerContendedMonitor() throws Exception {
-      checkException();
-      if (runner == null) {
-        return null;
-      }
-      return getCurrentContendedMonitor(runner);
-    }
-
-    public synchronized void DoLock() {
-      if (IsLocked()) {
-        throw new Error("lock is already acquired or being acquired.");
-      }
-      if (runner != null) {
-        throw new Error("Already have thread!");
-      }
-      runner = new Thread(() -> {
-        started = true;
-        try {
-          synchronized (lock) {
-            held = true;
-            int[] stamp_h = new int[] { -1 };
-            Action cur_action = Action.HOLD;
-            try {
-              while (true) {
-                cur_action = action.get(stamp_h);
-                int stamp = stamp_h[0];
-                if (cur_action == Action.RELEASE) {
-                  // The other thread will deal with reseting action.
-                  break;
-                }
-                try {
-                  switch (cur_action) {
-                    case HOLD:
-                      Thread.yield();
-                      break;
-                    case NOTIFY:
-                      lock.DoNotify();
-                      break;
-                    case NOTIFY_ALL:
-                      lock.DoNotifyAll();
-                      break;
-                    case TIMED_WAIT:
-                      lock.DoWait(timeout);
-                      break;
-                    case WAIT:
-                      lock.DoWait();
-                      break;
-                    default:
-                      throw new Error("Unknown action " + action);
-                  }
-                } finally {
-                  // reset action back to hold if it isn't something else.
-                  action.compareAndSet(cur_action, Action.HOLD, stamp, stamp+1);
-                }
-              }
-            } catch (Exception e) {
-              throw new TestException("Got an error while performing action " + cur_action, e);
-            }
-          }
-        } finally {
-          held = false;
-          started = false;
-        }
-      }, "Locker thread " + cnt.getAndIncrement() + " for " + lock);
-      // Make sure we can get any exceptions this throws.
-      runner.setUncaughtExceptionHandler((t, e) -> { exe = e; });
-      runner.start();
-    }
-
-    public void waitForLockToBeHeld() throws Exception {
-      while (true) {
-        if (IsLocked() && Objects.equals(runner, Monitors.getObjectMonitorUsage(lock).owner)) {
-          return;
-        }
-      }
-    }
-
-    public synchronized void waitForNotifySleep() throws Exception {
-      if (runner == null) {
-        throw new Error("No thread trying to lock!");
-      }
-      do {
-        checkException();
-      } while (!started ||
-          !Arrays.asList(Monitors.getObjectMonitorUsage(lock).notifyWaiters).contains(runner));
-    }
-
-    public synchronized void waitForContendedSleep() throws Exception {
-      if (runner == null) {
-        throw new Error("No thread trying to lock!");
-      }
-      do {
-        checkException();
-      } while (!started ||
-          runner.getState() != Thread.State.BLOCKED ||
-          !Arrays.asList(Monitors.getObjectMonitorUsage(lock).waiters).contains(runner));
-    }
-
-    public synchronized void DoNotify() {
-      if (!IsLocked()) {
-        throw new Error("Not locked");
-      }
-      setAction(Action.NOTIFY);
-    }
-
-    public synchronized void DoNotifyAll() {
-      if (!IsLocked()) {
-        throw new Error("Not locked");
-      }
-      setAction(Action.NOTIFY_ALL);
-    }
-
-    public synchronized void DoTimedWait() throws Exception {
-      if (!IsLocked()) {
-        throw new Error("Not locked");
-      }
-      setAction(Action.TIMED_WAIT);
-    }
-
-    public synchronized void DoWait() throws Exception {
-      if (!IsLocked()) {
-        throw new Error("Not locked");
-      }
-      setAction(Action.WAIT);
-    }
-
-    public synchronized void interruptWorker() throws Exception {
-      if (!IsLocked()) {
-        throw new Error("Not locked");
-      }
-      runner.interrupt();
-    }
-
-    public synchronized void waitForActionToFinish() throws Exception {
-      checkException();
-      while (action.getReference() != Action.HOLD) { checkException(); }
-    }
-
-    public synchronized void DoUnlock() throws Exception {
-      Error throwing = null;
-      if (!IsLocked()) {
-        // We might just be racing some exception that was thrown by the worker thread. Cache the
-        // exception, we will throw one from the worker before this one.
-        throwing = new Error("Not locked!");
-      }
-      setAction(Action.RELEASE);
-      Thread run = runner;
-      runner = null;
-      while (held) {}
-      run.join();
-      action.set(Action.HOLD, 0);
-      // Make sure to throw any exception that occurred since it might not have unlocked due to our
-      // request.
-      checkException();
-      DoCleanup();
-      if (throwing != null) {
-        throw throwing;
-      }
-    }
-
-    public synchronized void DoCleanup() throws Exception {
-      if (runner != null) {
-        Thread run = runner;
-        runner = null;
-        while (held) {}
-        run.join();
-      }
-      action.set(Action.HOLD, 0);
-      exe = null;
-    }
-  }
-}
-
diff --git a/test/1934-jvmti-signal-thread/src/art/Monitors.java b/test/1934-jvmti-signal-thread/src/art/Monitors.java
new file mode 120000
index 0000000..61e8367
--- /dev/null
+++ b/test/1934-jvmti-signal-thread/src/art/Monitors.java
@@ -0,0 +1 @@
+../../../jvmti-common/Monitors.java
\ No newline at end of file
diff --git a/test/1934-jvmti-signal-thread/src/art/Suspension.java b/test/1934-jvmti-signal-thread/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1934-jvmti-signal-thread/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1934-jvmti-signal-thread/src/art/Suspension.java b/test/1934-jvmti-signal-thread/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1934-jvmti-signal-thread/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1934-jvmti-signal-thread/src/art/Threads.java b/test/1934-jvmti-signal-thread/src/art/Threads.java
new file mode 120000
index 0000000..0749743
--- /dev/null
+++ b/test/1934-jvmti-signal-thread/src/art/Threads.java
@@ -0,0 +1 @@
+../../../jvmti-common/Threads.java
\ No newline at end of file
diff --git a/test/1935-get-set-current-frame-jit/src/art/Breakpoint.java b/test/1935-get-set-current-frame-jit/src/art/Breakpoint.java
deleted file mode 100644
index bbb89f7..0000000
--- a/test/1935-get-set-current-frame-jit/src/art/Breakpoint.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Objects;
-
-public class Breakpoint {
-  public static class Manager {
-    public static class BP {
-      public final Executable method;
-      public final long location;
-
-      public BP(Executable method) {
-        this(method, getStartLocation(method));
-      }
-
-      public BP(Executable method, long location) {
-        this.method = method;
-        this.location = location;
-      }
-
-      @Override
-      public boolean equals(Object other) {
-        return (other instanceof BP) &&
-            method.equals(((BP)other).method) &&
-            location == ((BP)other).location;
-      }
-
-      @Override
-      public String toString() {
-        return method.toString() + " @ " + getLine();
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(method, location);
-      }
-
-      public int getLine() {
-        try {
-          LineNumber[] lines = getLineNumberTable(method);
-          int best = -1;
-          for (LineNumber l : lines) {
-            if (l.location > location) {
-              break;
-            } else {
-              best = l.line;
-            }
-          }
-          return best;
-        } catch (Exception e) {
-          return -1;
-        }
-      }
-    }
-
-    private Set<BP> breaks = new HashSet<>();
-
-    public void setBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.add(b)) {
-          Breakpoint.setBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void setBreakpoint(Executable method, long location) {
-      setBreakpoints(new BP(method, location));
-    }
-
-    public void clearBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.remove(b)) {
-          Breakpoint.clearBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void clearBreakpoint(Executable method, long location) {
-      clearBreakpoints(new BP(method, location));
-    }
-
-    public void clearAllBreakpoints() {
-      clearBreakpoints(breaks.toArray(new BP[0]));
-    }
-  }
-
-  public static void startBreakpointWatch(Class<?> methodClass,
-                                          Executable breakpointReached,
-                                          Thread thr) {
-    startBreakpointWatch(methodClass, breakpointReached, false, thr);
-  }
-
-  /**
-   * Enables the trapping of breakpoint events.
-   *
-   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
-   */
-  public static native void startBreakpointWatch(Class<?> methodClass,
-                                                 Executable breakpointReached,
-                                                 boolean allowRecursive,
-                                                 Thread thr);
-  public static native void stopBreakpointWatch(Thread thr);
-
-  public static final class LineNumber implements Comparable<LineNumber> {
-    public final long location;
-    public final int line;
-
-    private LineNumber(long loc, int line) {
-      this.location = loc;
-      this.line = line;
-    }
-
-    public boolean equals(Object other) {
-      return other instanceof LineNumber && ((LineNumber)other).line == line &&
-          ((LineNumber)other).location == location;
-    }
-
-    public int compareTo(LineNumber other) {
-      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
-      if (v != 0) {
-        return v;
-      } else {
-        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
-      }
-    }
-  }
-
-  public static native void setBreakpoint(Executable m, long loc);
-  public static void setBreakpoint(Executable m, LineNumber l) {
-    setBreakpoint(m, l.location);
-  }
-
-  public static native void clearBreakpoint(Executable m, long loc);
-  public static void clearBreakpoint(Executable m, LineNumber l) {
-    clearBreakpoint(m, l.location);
-  }
-
-  private static native Object[] getLineNumberTableNative(Executable m);
-  public static LineNumber[] getLineNumberTable(Executable m) {
-    Object[] nativeTable = getLineNumberTableNative(m);
-    long[] location = (long[])(nativeTable[0]);
-    int[] lines = (int[])(nativeTable[1]);
-    if (lines.length != location.length) {
-      throw new Error("Lines and locations have different lengths!");
-    }
-    LineNumber[] out = new LineNumber[lines.length];
-    for (int i = 0; i < lines.length; i++) {
-      out[i] = new LineNumber(location[i], lines[i]);
-    }
-    return out;
-  }
-
-  public static native long getStartLocation(Executable m);
-
-  public static int locationToLine(Executable m, long location) {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      int best = -1;
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.location > location) {
-          break;
-        } else {
-          best = l.line;
-        }
-      }
-      return best;
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  public static long lineToLocation(Executable m, int line) throws Exception {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.line == line) {
-          return l.location;
-        }
-      }
-      throw new Exception("Unable to find line " + line + " in " + m);
-    } catch (Exception e) {
-      throw new Exception("Unable to get line number info for " + m, e);
-    }
-  }
-}
-
diff --git a/test/1935-get-set-current-frame-jit/src/art/Breakpoint.java b/test/1935-get-set-current-frame-jit/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1935-get-set-current-frame-jit/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1935-get-set-current-frame-jit/src/art/Locals.java b/test/1935-get-set-current-frame-jit/src/art/Locals.java
deleted file mode 100644
index 22e21be..0000000
--- a/test/1935-get-set-current-frame-jit/src/art/Locals.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.Objects;
-
-public class Locals {
-  public static native void EnableLocalVariableAccess();
-
-  public static class VariableDescription {
-    public final long start_location;
-    public final int length;
-    public final String name;
-    public final String signature;
-    public final String generic_signature;
-    public final int slot;
-
-    public VariableDescription(
-        long start, int length, String name, String sig, String gen_sig, int slot) {
-      this.start_location = start;
-      this.length = length;
-      this.name = name;
-      this.signature = sig;
-      this.generic_signature = gen_sig;
-      this.slot = slot;
-    }
-
-    @Override
-    public String toString() {
-      return String.format(
-          "VariableDescription { " +
-            "Sig: '%s', Name: '%s', Gen_sig: '%s', slot: %d, start: %d, len: %d" +
-          "}",
-          this.signature,
-          this.name,
-          this.generic_signature,
-          this.slot,
-          this.start_location,
-          this.length);
-    }
-    public boolean equals(Object other) {
-      if (!(other instanceof VariableDescription)) {
-        return false;
-      } else {
-        VariableDescription v = (VariableDescription)other;
-        return Objects.equals(v.signature, signature) &&
-            Objects.equals(v.name, name) &&
-            Objects.equals(v.generic_signature, generic_signature) &&
-            v.slot == slot &&
-            v.start_location == start_location &&
-            v.length == length;
-      }
-    }
-    public int hashCode() {
-      return Objects.hash(this.signature, this.name, this.generic_signature, this.slot,
-          this.start_location, this.length);
-    }
-  }
-
-  public static native VariableDescription[] GetLocalVariableTable(Executable e);
-
-  public static VariableDescription GetVariableAtLine(
-      Executable e, String name, String sig, int line) throws Exception {
-    return GetVariableAtLocation(e, name, sig, Breakpoint.lineToLocation(e, line));
-  }
-
-  public static VariableDescription GetVariableAtLocation(
-      Executable e, String name, String sig, long loc) {
-    VariableDescription[] vars = GetLocalVariableTable(e);
-    for (VariableDescription var : vars) {
-      if (var.start_location <= loc &&
-          var.length + var.start_location > loc &&
-          var.name.equals(name) &&
-          var.signature.equals(sig)) {
-        return var;
-      }
-    }
-    throw new Error(
-        "Unable to find variable " + name + " (sig: " + sig + ") in " + e + " at loc " + loc);
-  }
-
-  public static native int GetLocalVariableInt(Thread thr, int depth, int slot);
-  public static native long GetLocalVariableLong(Thread thr, int depth, int slot);
-  public static native float GetLocalVariableFloat(Thread thr, int depth, int slot);
-  public static native double GetLocalVariableDouble(Thread thr, int depth, int slot);
-  public static native Object GetLocalVariableObject(Thread thr, int depth, int slot);
-  public static native Object GetLocalInstance(Thread thr, int depth);
-
-  public static void SetLocalVariableInt(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableInt(thr, depth, slot, ((Number)val).intValue());
-  }
-  public static void SetLocalVariableLong(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableLong(thr, depth, slot, ((Number)val).longValue());
-  }
-  public static void SetLocalVariableFloat(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableFloat(thr, depth, slot, ((Number)val).floatValue());
-  }
-  public static void SetLocalVariableDouble(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableDouble(thr, depth, slot, ((Number)val).doubleValue());
-  }
-  public static native void SetLocalVariableInt(Thread thr, int depth, int slot, int val);
-  public static native void SetLocalVariableLong(Thread thr, int depth, int slot, long val);
-  public static native void SetLocalVariableFloat(Thread thr, int depth, int slot, float val);
-  public static native void SetLocalVariableDouble(Thread thr, int depth, int slot, double val);
-  public static native void SetLocalVariableObject(Thread thr, int depth, int slot, Object val);
-}
diff --git a/test/1935-get-set-current-frame-jit/src/art/Locals.java b/test/1935-get-set-current-frame-jit/src/art/Locals.java
new file mode 120000
index 0000000..2998386
--- /dev/null
+++ b/test/1935-get-set-current-frame-jit/src/art/Locals.java
@@ -0,0 +1 @@
+../../../jvmti-common/Locals.java
\ No newline at end of file
diff --git a/test/1935-get-set-current-frame-jit/src/art/StackTrace.java b/test/1935-get-set-current-frame-jit/src/art/StackTrace.java
deleted file mode 100644
index 2ea2f20..0000000
--- a/test/1935-get-set-current-frame-jit/src/art/StackTrace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Executable;
-
-public class StackTrace {
-  public static class StackFrameData {
-    public final Thread thr;
-    public final Executable method;
-    public final long current_location;
-    public final int depth;
-
-    public StackFrameData(Thread thr, Executable e, long loc, int depth) {
-      this.thr = thr;
-      this.method = e;
-      this.current_location = loc;
-      this.depth = depth;
-    }
-    @Override
-    public String toString() {
-      return String.format(
-          "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }",
-          this.thr,
-          this.method,
-          this.current_location,
-          this.depth);
-    }
-  }
-
-  public static native int GetStackDepth(Thread thr);
-
-  private static native StackFrameData[] nativeGetStackTrace(Thread thr);
-
-  public static StackFrameData[] GetStackTrace(Thread thr) {
-    // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not
-    // suspended. The spec says that not being suspended is fine but since we want this to be
-    // consistent we will suspend for the RI.
-    boolean suspend_thread =
-        !System.getProperty("java.vm.name").equals("Dalvik") &&
-        !thr.equals(Thread.currentThread()) &&
-        !Suspension.isSuspended(thr);
-    if (suspend_thread) {
-      Suspension.suspend(thr);
-    }
-    StackFrameData[] out = nativeGetStackTrace(thr);
-    if (suspend_thread) {
-      Suspension.resume(thr);
-    }
-    return out;
-  }
-}
-
diff --git a/test/1935-get-set-current-frame-jit/src/art/StackTrace.java b/test/1935-get-set-current-frame-jit/src/art/StackTrace.java
new file mode 120000
index 0000000..e1a08aa
--- /dev/null
+++ b/test/1935-get-set-current-frame-jit/src/art/StackTrace.java
@@ -0,0 +1 @@
+../../../jvmti-common/StackTrace.java
\ No newline at end of file
diff --git a/test/1935-get-set-current-frame-jit/src/art/Suspension.java b/test/1935-get-set-current-frame-jit/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1935-get-set-current-frame-jit/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1935-get-set-current-frame-jit/src/art/Suspension.java b/test/1935-get-set-current-frame-jit/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1935-get-set-current-frame-jit/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1936-thread-end-events/src/art/Trace.java b/test/1936-thread-end-events/src/art/Trace.java
deleted file mode 100644
index 8999bb1..0000000
--- a/test/1936-thread-end-events/src/art/Trace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Method;
-
-public class Trace {
-  public static native void enableTracing(Class<?> methodClass,
-                                          Method entryMethod,
-                                          Method exitMethod,
-                                          Method fieldAccess,
-                                          Method fieldModify,
-                                          Method singleStep,
-                                          Thread thr);
-  public static native void disableTracing(Thread thr);
-
-  public static void enableFieldTracing(Class<?> methodClass,
-                                        Method fieldAccess,
-                                        Method fieldModify,
-                                        Thread thr) {
-    enableTracing(methodClass, null, null, fieldAccess, fieldModify, null, thr);
-  }
-
-  public static void enableMethodTracing(Class<?> methodClass,
-                                         Method entryMethod,
-                                         Method exitMethod,
-                                         Thread thr) {
-    enableTracing(methodClass, entryMethod, exitMethod, null, null, null, thr);
-  }
-
-  public static void enableSingleStepTracing(Class<?> methodClass,
-                                             Method singleStep,
-                                             Thread thr) {
-    enableTracing(methodClass, null, null, null, null, singleStep, thr);
-  }
-
-  public static native void watchFieldAccess(Field f);
-  public static native void watchFieldModification(Field f);
-  public static native void watchAllFieldAccesses();
-  public static native void watchAllFieldModifications();
-
-  // the names, arguments, and even line numbers of these functions are embedded in the tests so we
-  // need to add to the bottom and not modify old ones to maintain compat.
-  public static native void enableTracing2(Class<?> methodClass,
-                                           Method entryMethod,
-                                           Method exitMethod,
-                                           Method fieldAccess,
-                                           Method fieldModify,
-                                           Method singleStep,
-                                           Method ThreadStart,
-                                           Method ThreadEnd,
-                                           Thread thr);
-}
diff --git a/test/1936-thread-end-events/src/art/Trace.java b/test/1936-thread-end-events/src/art/Trace.java
new file mode 120000
index 0000000..5d9b44b
--- /dev/null
+++ b/test/1936-thread-end-events/src/art/Trace.java
@@ -0,0 +1 @@
+../../../jvmti-common/Trace.java
\ No newline at end of file
diff --git a/test/1937-transform-soft-fail/src/art/Redefinition.java b/test/1937-transform-soft-fail/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/1937-transform-soft-fail/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/1937-transform-soft-fail/src/art/Redefinition.java b/test/1937-transform-soft-fail/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1937-transform-soft-fail/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1938-transform-abstract-single-impl/src/art/Redefinition.java b/test/1938-transform-abstract-single-impl/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/1938-transform-abstract-single-impl/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/1938-transform-abstract-single-impl/src/art/Redefinition.java b/test/1938-transform-abstract-single-impl/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1938-transform-abstract-single-impl/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1939-proxy-frames/src/art/Breakpoint.java b/test/1939-proxy-frames/src/art/Breakpoint.java
deleted file mode 100644
index bbb89f7..0000000
--- a/test/1939-proxy-frames/src/art/Breakpoint.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Objects;
-
-public class Breakpoint {
-  public static class Manager {
-    public static class BP {
-      public final Executable method;
-      public final long location;
-
-      public BP(Executable method) {
-        this(method, getStartLocation(method));
-      }
-
-      public BP(Executable method, long location) {
-        this.method = method;
-        this.location = location;
-      }
-
-      @Override
-      public boolean equals(Object other) {
-        return (other instanceof BP) &&
-            method.equals(((BP)other).method) &&
-            location == ((BP)other).location;
-      }
-
-      @Override
-      public String toString() {
-        return method.toString() + " @ " + getLine();
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(method, location);
-      }
-
-      public int getLine() {
-        try {
-          LineNumber[] lines = getLineNumberTable(method);
-          int best = -1;
-          for (LineNumber l : lines) {
-            if (l.location > location) {
-              break;
-            } else {
-              best = l.line;
-            }
-          }
-          return best;
-        } catch (Exception e) {
-          return -1;
-        }
-      }
-    }
-
-    private Set<BP> breaks = new HashSet<>();
-
-    public void setBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.add(b)) {
-          Breakpoint.setBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void setBreakpoint(Executable method, long location) {
-      setBreakpoints(new BP(method, location));
-    }
-
-    public void clearBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.remove(b)) {
-          Breakpoint.clearBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void clearBreakpoint(Executable method, long location) {
-      clearBreakpoints(new BP(method, location));
-    }
-
-    public void clearAllBreakpoints() {
-      clearBreakpoints(breaks.toArray(new BP[0]));
-    }
-  }
-
-  public static void startBreakpointWatch(Class<?> methodClass,
-                                          Executable breakpointReached,
-                                          Thread thr) {
-    startBreakpointWatch(methodClass, breakpointReached, false, thr);
-  }
-
-  /**
-   * Enables the trapping of breakpoint events.
-   *
-   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
-   */
-  public static native void startBreakpointWatch(Class<?> methodClass,
-                                                 Executable breakpointReached,
-                                                 boolean allowRecursive,
-                                                 Thread thr);
-  public static native void stopBreakpointWatch(Thread thr);
-
-  public static final class LineNumber implements Comparable<LineNumber> {
-    public final long location;
-    public final int line;
-
-    private LineNumber(long loc, int line) {
-      this.location = loc;
-      this.line = line;
-    }
-
-    public boolean equals(Object other) {
-      return other instanceof LineNumber && ((LineNumber)other).line == line &&
-          ((LineNumber)other).location == location;
-    }
-
-    public int compareTo(LineNumber other) {
-      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
-      if (v != 0) {
-        return v;
-      } else {
-        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
-      }
-    }
-  }
-
-  public static native void setBreakpoint(Executable m, long loc);
-  public static void setBreakpoint(Executable m, LineNumber l) {
-    setBreakpoint(m, l.location);
-  }
-
-  public static native void clearBreakpoint(Executable m, long loc);
-  public static void clearBreakpoint(Executable m, LineNumber l) {
-    clearBreakpoint(m, l.location);
-  }
-
-  private static native Object[] getLineNumberTableNative(Executable m);
-  public static LineNumber[] getLineNumberTable(Executable m) {
-    Object[] nativeTable = getLineNumberTableNative(m);
-    long[] location = (long[])(nativeTable[0]);
-    int[] lines = (int[])(nativeTable[1]);
-    if (lines.length != location.length) {
-      throw new Error("Lines and locations have different lengths!");
-    }
-    LineNumber[] out = new LineNumber[lines.length];
-    for (int i = 0; i < lines.length; i++) {
-      out[i] = new LineNumber(location[i], lines[i]);
-    }
-    return out;
-  }
-
-  public static native long getStartLocation(Executable m);
-
-  public static int locationToLine(Executable m, long location) {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      int best = -1;
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.location > location) {
-          break;
-        } else {
-          best = l.line;
-        }
-      }
-      return best;
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  public static long lineToLocation(Executable m, int line) throws Exception {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.line == line) {
-          return l.location;
-        }
-      }
-      throw new Exception("Unable to find line " + line + " in " + m);
-    } catch (Exception e) {
-      throw new Exception("Unable to get line number info for " + m, e);
-    }
-  }
-}
-
diff --git a/test/1939-proxy-frames/src/art/Breakpoint.java b/test/1939-proxy-frames/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1939-proxy-frames/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1939-proxy-frames/src/art/Locals.java b/test/1939-proxy-frames/src/art/Locals.java
deleted file mode 100644
index 22e21be..0000000
--- a/test/1939-proxy-frames/src/art/Locals.java
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.Objects;
-
-public class Locals {
-  public static native void EnableLocalVariableAccess();
-
-  public static class VariableDescription {
-    public final long start_location;
-    public final int length;
-    public final String name;
-    public final String signature;
-    public final String generic_signature;
-    public final int slot;
-
-    public VariableDescription(
-        long start, int length, String name, String sig, String gen_sig, int slot) {
-      this.start_location = start;
-      this.length = length;
-      this.name = name;
-      this.signature = sig;
-      this.generic_signature = gen_sig;
-      this.slot = slot;
-    }
-
-    @Override
-    public String toString() {
-      return String.format(
-          "VariableDescription { " +
-            "Sig: '%s', Name: '%s', Gen_sig: '%s', slot: %d, start: %d, len: %d" +
-          "}",
-          this.signature,
-          this.name,
-          this.generic_signature,
-          this.slot,
-          this.start_location,
-          this.length);
-    }
-    public boolean equals(Object other) {
-      if (!(other instanceof VariableDescription)) {
-        return false;
-      } else {
-        VariableDescription v = (VariableDescription)other;
-        return Objects.equals(v.signature, signature) &&
-            Objects.equals(v.name, name) &&
-            Objects.equals(v.generic_signature, generic_signature) &&
-            v.slot == slot &&
-            v.start_location == start_location &&
-            v.length == length;
-      }
-    }
-    public int hashCode() {
-      return Objects.hash(this.signature, this.name, this.generic_signature, this.slot,
-          this.start_location, this.length);
-    }
-  }
-
-  public static native VariableDescription[] GetLocalVariableTable(Executable e);
-
-  public static VariableDescription GetVariableAtLine(
-      Executable e, String name, String sig, int line) throws Exception {
-    return GetVariableAtLocation(e, name, sig, Breakpoint.lineToLocation(e, line));
-  }
-
-  public static VariableDescription GetVariableAtLocation(
-      Executable e, String name, String sig, long loc) {
-    VariableDescription[] vars = GetLocalVariableTable(e);
-    for (VariableDescription var : vars) {
-      if (var.start_location <= loc &&
-          var.length + var.start_location > loc &&
-          var.name.equals(name) &&
-          var.signature.equals(sig)) {
-        return var;
-      }
-    }
-    throw new Error(
-        "Unable to find variable " + name + " (sig: " + sig + ") in " + e + " at loc " + loc);
-  }
-
-  public static native int GetLocalVariableInt(Thread thr, int depth, int slot);
-  public static native long GetLocalVariableLong(Thread thr, int depth, int slot);
-  public static native float GetLocalVariableFloat(Thread thr, int depth, int slot);
-  public static native double GetLocalVariableDouble(Thread thr, int depth, int slot);
-  public static native Object GetLocalVariableObject(Thread thr, int depth, int slot);
-  public static native Object GetLocalInstance(Thread thr, int depth);
-
-  public static void SetLocalVariableInt(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableInt(thr, depth, slot, ((Number)val).intValue());
-  }
-  public static void SetLocalVariableLong(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableLong(thr, depth, slot, ((Number)val).longValue());
-  }
-  public static void SetLocalVariableFloat(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableFloat(thr, depth, slot, ((Number)val).floatValue());
-  }
-  public static void SetLocalVariableDouble(Thread thr, int depth, int slot, Object val) {
-    SetLocalVariableDouble(thr, depth, slot, ((Number)val).doubleValue());
-  }
-  public static native void SetLocalVariableInt(Thread thr, int depth, int slot, int val);
-  public static native void SetLocalVariableLong(Thread thr, int depth, int slot, long val);
-  public static native void SetLocalVariableFloat(Thread thr, int depth, int slot, float val);
-  public static native void SetLocalVariableDouble(Thread thr, int depth, int slot, double val);
-  public static native void SetLocalVariableObject(Thread thr, int depth, int slot, Object val);
-}
diff --git a/test/1939-proxy-frames/src/art/Locals.java b/test/1939-proxy-frames/src/art/Locals.java
new file mode 120000
index 0000000..2998386
--- /dev/null
+++ b/test/1939-proxy-frames/src/art/Locals.java
@@ -0,0 +1 @@
+../../../jvmti-common/Locals.java
\ No newline at end of file
diff --git a/test/1939-proxy-frames/src/art/StackTrace.java b/test/1939-proxy-frames/src/art/StackTrace.java
deleted file mode 100644
index 2ea2f20..0000000
--- a/test/1939-proxy-frames/src/art/StackTrace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Executable;
-
-public class StackTrace {
-  public static class StackFrameData {
-    public final Thread thr;
-    public final Executable method;
-    public final long current_location;
-    public final int depth;
-
-    public StackFrameData(Thread thr, Executable e, long loc, int depth) {
-      this.thr = thr;
-      this.method = e;
-      this.current_location = loc;
-      this.depth = depth;
-    }
-    @Override
-    public String toString() {
-      return String.format(
-          "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }",
-          this.thr,
-          this.method,
-          this.current_location,
-          this.depth);
-    }
-  }
-
-  public static native int GetStackDepth(Thread thr);
-
-  private static native StackFrameData[] nativeGetStackTrace(Thread thr);
-
-  public static StackFrameData[] GetStackTrace(Thread thr) {
-    // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not
-    // suspended. The spec says that not being suspended is fine but since we want this to be
-    // consistent we will suspend for the RI.
-    boolean suspend_thread =
-        !System.getProperty("java.vm.name").equals("Dalvik") &&
-        !thr.equals(Thread.currentThread()) &&
-        !Suspension.isSuspended(thr);
-    if (suspend_thread) {
-      Suspension.suspend(thr);
-    }
-    StackFrameData[] out = nativeGetStackTrace(thr);
-    if (suspend_thread) {
-      Suspension.resume(thr);
-    }
-    return out;
-  }
-}
-
diff --git a/test/1939-proxy-frames/src/art/StackTrace.java b/test/1939-proxy-frames/src/art/StackTrace.java
new file mode 120000
index 0000000..e1a08aa
--- /dev/null
+++ b/test/1939-proxy-frames/src/art/StackTrace.java
@@ -0,0 +1 @@
+../../../jvmti-common/StackTrace.java
\ No newline at end of file
diff --git a/test/1939-proxy-frames/src/art/Suspension.java b/test/1939-proxy-frames/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1939-proxy-frames/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1939-proxy-frames/src/art/Suspension.java b/test/1939-proxy-frames/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1939-proxy-frames/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1941-dispose-stress/src/art/Breakpoint.java b/test/1941-dispose-stress/src/art/Breakpoint.java
deleted file mode 100644
index bbb89f7..0000000
--- a/test/1941-dispose-stress/src/art/Breakpoint.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Objects;
-
-public class Breakpoint {
-  public static class Manager {
-    public static class BP {
-      public final Executable method;
-      public final long location;
-
-      public BP(Executable method) {
-        this(method, getStartLocation(method));
-      }
-
-      public BP(Executable method, long location) {
-        this.method = method;
-        this.location = location;
-      }
-
-      @Override
-      public boolean equals(Object other) {
-        return (other instanceof BP) &&
-            method.equals(((BP)other).method) &&
-            location == ((BP)other).location;
-      }
-
-      @Override
-      public String toString() {
-        return method.toString() + " @ " + getLine();
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(method, location);
-      }
-
-      public int getLine() {
-        try {
-          LineNumber[] lines = getLineNumberTable(method);
-          int best = -1;
-          for (LineNumber l : lines) {
-            if (l.location > location) {
-              break;
-            } else {
-              best = l.line;
-            }
-          }
-          return best;
-        } catch (Exception e) {
-          return -1;
-        }
-      }
-    }
-
-    private Set<BP> breaks = new HashSet<>();
-
-    public void setBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.add(b)) {
-          Breakpoint.setBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void setBreakpoint(Executable method, long location) {
-      setBreakpoints(new BP(method, location));
-    }
-
-    public void clearBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.remove(b)) {
-          Breakpoint.clearBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void clearBreakpoint(Executable method, long location) {
-      clearBreakpoints(new BP(method, location));
-    }
-
-    public void clearAllBreakpoints() {
-      clearBreakpoints(breaks.toArray(new BP[0]));
-    }
-  }
-
-  public static void startBreakpointWatch(Class<?> methodClass,
-                                          Executable breakpointReached,
-                                          Thread thr) {
-    startBreakpointWatch(methodClass, breakpointReached, false, thr);
-  }
-
-  /**
-   * Enables the trapping of breakpoint events.
-   *
-   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
-   */
-  public static native void startBreakpointWatch(Class<?> methodClass,
-                                                 Executable breakpointReached,
-                                                 boolean allowRecursive,
-                                                 Thread thr);
-  public static native void stopBreakpointWatch(Thread thr);
-
-  public static final class LineNumber implements Comparable<LineNumber> {
-    public final long location;
-    public final int line;
-
-    private LineNumber(long loc, int line) {
-      this.location = loc;
-      this.line = line;
-    }
-
-    public boolean equals(Object other) {
-      return other instanceof LineNumber && ((LineNumber)other).line == line &&
-          ((LineNumber)other).location == location;
-    }
-
-    public int compareTo(LineNumber other) {
-      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
-      if (v != 0) {
-        return v;
-      } else {
-        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
-      }
-    }
-  }
-
-  public static native void setBreakpoint(Executable m, long loc);
-  public static void setBreakpoint(Executable m, LineNumber l) {
-    setBreakpoint(m, l.location);
-  }
-
-  public static native void clearBreakpoint(Executable m, long loc);
-  public static void clearBreakpoint(Executable m, LineNumber l) {
-    clearBreakpoint(m, l.location);
-  }
-
-  private static native Object[] getLineNumberTableNative(Executable m);
-  public static LineNumber[] getLineNumberTable(Executable m) {
-    Object[] nativeTable = getLineNumberTableNative(m);
-    long[] location = (long[])(nativeTable[0]);
-    int[] lines = (int[])(nativeTable[1]);
-    if (lines.length != location.length) {
-      throw new Error("Lines and locations have different lengths!");
-    }
-    LineNumber[] out = new LineNumber[lines.length];
-    for (int i = 0; i < lines.length; i++) {
-      out[i] = new LineNumber(location[i], lines[i]);
-    }
-    return out;
-  }
-
-  public static native long getStartLocation(Executable m);
-
-  public static int locationToLine(Executable m, long location) {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      int best = -1;
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.location > location) {
-          break;
-        } else {
-          best = l.line;
-        }
-      }
-      return best;
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  public static long lineToLocation(Executable m, int line) throws Exception {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.line == line) {
-          return l.location;
-        }
-      }
-      throw new Exception("Unable to find line " + line + " in " + m);
-    } catch (Exception e) {
-      throw new Exception("Unable to get line number info for " + m, e);
-    }
-  }
-}
-
diff --git a/test/1941-dispose-stress/src/art/Breakpoint.java b/test/1941-dispose-stress/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1941-dispose-stress/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1941-dispose-stress/src/art/Test1941.java b/test/1941-dispose-stress/src/art/Test1941.java
index 2cffa56..d0d76b6 100644
--- a/test/1941-dispose-stress/src/art/Test1941.java
+++ b/test/1941-dispose-stress/src/art/Test1941.java
@@ -24,6 +24,7 @@
 public class Test1941 {
   public static final boolean PRINT_CNT = false;
   public static long CNT = 0;
+  public static final long MAX_ITERS = 100_000;
 
   // Method with multiple paths we can break on.
   public static long fib(long f) {
@@ -42,9 +43,14 @@
     // Don't bother actually doing anything.
   }
 
-  public static void LoopAllocFreeEnv(Semaphore sem) {
+  public static void LoopAllocFreeEnv(Semaphore sem, Semaphore delay) {
     sem.release();
-    while (!Thread.interrupted()) {
+    try {
+      delay.acquire();
+    } catch (Exception e) {
+      throw new Error("exception occurred!", e);
+    }
+    while (!Thread.interrupted() && CNT < MAX_ITERS) {
       CNT++;
       long env = AllocEnv();
       FreeEnv(env);
@@ -59,7 +65,8 @@
 
   public static void run() throws Exception {
     final Semaphore sem = new Semaphore(0);
-    Thread thr = new Thread(() -> { LoopAllocFreeEnv(sem); }, "LoopNative");
+    final Semaphore delay = new Semaphore(0);
+    Thread thr = new Thread(() -> { LoopAllocFreeEnv(sem, delay); }, "LoopNative");
     thr.start();
     // Make sure the other thread is actually started.
     sem.acquire();
@@ -68,6 +75,10 @@
             "notifySingleStep", Thread.class, Executable.class, Long.TYPE),
         thr);
     setTracingOn(Thread.currentThread(), true);
+    // Don't let the other thread start actually running until we've started
+    // tracing this thread too in order to ensure that the (formerly) racy
+    // behavior can happen.
+    delay.release();
 
     System.out.println("fib(20) is " + fib(20));
 
diff --git a/test/1941-dispose-stress/src/art/Trace.java b/test/1941-dispose-stress/src/art/Trace.java
deleted file mode 100644
index 8999bb1..0000000
--- a/test/1941-dispose-stress/src/art/Trace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Method;
-
-public class Trace {
-  public static native void enableTracing(Class<?> methodClass,
-                                          Method entryMethod,
-                                          Method exitMethod,
-                                          Method fieldAccess,
-                                          Method fieldModify,
-                                          Method singleStep,
-                                          Thread thr);
-  public static native void disableTracing(Thread thr);
-
-  public static void enableFieldTracing(Class<?> methodClass,
-                                        Method fieldAccess,
-                                        Method fieldModify,
-                                        Thread thr) {
-    enableTracing(methodClass, null, null, fieldAccess, fieldModify, null, thr);
-  }
-
-  public static void enableMethodTracing(Class<?> methodClass,
-                                         Method entryMethod,
-                                         Method exitMethod,
-                                         Thread thr) {
-    enableTracing(methodClass, entryMethod, exitMethod, null, null, null, thr);
-  }
-
-  public static void enableSingleStepTracing(Class<?> methodClass,
-                                             Method singleStep,
-                                             Thread thr) {
-    enableTracing(methodClass, null, null, null, null, singleStep, thr);
-  }
-
-  public static native void watchFieldAccess(Field f);
-  public static native void watchFieldModification(Field f);
-  public static native void watchAllFieldAccesses();
-  public static native void watchAllFieldModifications();
-
-  // the names, arguments, and even line numbers of these functions are embedded in the tests so we
-  // need to add to the bottom and not modify old ones to maintain compat.
-  public static native void enableTracing2(Class<?> methodClass,
-                                           Method entryMethod,
-                                           Method exitMethod,
-                                           Method fieldAccess,
-                                           Method fieldModify,
-                                           Method singleStep,
-                                           Method ThreadStart,
-                                           Method ThreadEnd,
-                                           Thread thr);
-}
diff --git a/test/1941-dispose-stress/src/art/Trace.java b/test/1941-dispose-stress/src/art/Trace.java
new file mode 120000
index 0000000..5d9b44b
--- /dev/null
+++ b/test/1941-dispose-stress/src/art/Trace.java
@@ -0,0 +1 @@
+../../../jvmti-common/Trace.java
\ No newline at end of file
diff --git a/test/1942-suspend-raw-monitor-exit/src/art/Suspension.java b/test/1942-suspend-raw-monitor-exit/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1942-suspend-raw-monitor-exit/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1942-suspend-raw-monitor-exit/src/art/Suspension.java b/test/1942-suspend-raw-monitor-exit/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1942-suspend-raw-monitor-exit/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1943-suspend-raw-monitor-wait/src/art/Suspension.java b/test/1943-suspend-raw-monitor-wait/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1943-suspend-raw-monitor-wait/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1943-suspend-raw-monitor-wait/src/art/Suspension.java b/test/1943-suspend-raw-monitor-wait/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1943-suspend-raw-monitor-wait/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1944-sudden-exit/check b/test/1944-sudden-exit/check
deleted file mode 100755
index 591fbb8..0000000
--- a/test/1944-sudden-exit/check
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# The number of paused background threads (and therefore InterruptedExceptions)
-# can change so we will just delete their lines from the log.
-
-# Pure virtual function can be printed because sudden exits are not really
-# supported. It is an error message but the test is to make sure that we exit
-# with the right exit code.
-cat "$2" \
-  | sed "/Pure virtual function called!/d" \
-  | diff --strip-trailing-cr -q "$1" - >/dev/null
diff --git a/test/1944-sudden-exit/expected.txt b/test/1944-sudden-exit/expected.txt
deleted file mode 100644
index 4c6eb47..0000000
--- a/test/1944-sudden-exit/expected.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-All threads started
-Exiting suddenly
-exit status:  12
diff --git a/test/1944-sudden-exit/info.txt b/test/1944-sudden-exit/info.txt
deleted file mode 100644
index d575ce5..0000000
--- a/test/1944-sudden-exit/info.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-Test to make sure the runtime will not crash if an agent calls exit(3) while
-other threads are performing operations.
-
-In this case we have multiple threads all performing single stepping when we
-call exit(3).
diff --git a/test/1944-sudden-exit/run b/test/1944-sudden-exit/run
deleted file mode 100755
index eb601fd..0000000
--- a/test/1944-sudden-exit/run
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Ask for stack traces to be dumped to a file rather than to stdout.
-./default-run "$@" --jvmti
-echo "exit status: " $?
diff --git a/test/1944-sudden-exit/src/Main.java b/test/1944-sudden-exit/src/Main.java
deleted file mode 100644
index 1644c6e..0000000
--- a/test/1944-sudden-exit/src/Main.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-public class Main {
-  public static void main(String[] args) throws Exception {
-    art.Test1944.run();
-  }
-}
diff --git a/test/1944-sudden-exit/src/art/Test1944.java b/test/1944-sudden-exit/src/art/Test1944.java
deleted file mode 100644
index 36cbb2b..0000000
--- a/test/1944-sudden-exit/src/art/Test1944.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.Arrays;
-import java.lang.reflect.Executable;
-import java.lang.reflect.Method;
-import java.util.concurrent.Semaphore;
-
-public class Test1944 {
-  // Just calculate fib forever.
-  public static void fib(Semaphore started) {
-    started.release();
-    long a = 1;
-    long b = 1;
-    while (true) {
-      long c = a + b;
-      a = b;
-      b = c;
-    }
-  }
-
-  // Don't bother actually doing anything.
-  public static void notifySingleStep(Thread thr, Executable e, long loc) { }
-
-  public static native void exitNow();
-
-  private static int num_threads = 10;
-
-  public static void run() throws Exception {
-    final Semaphore started = new Semaphore(-(num_threads - 1));
-
-    Trace.enableSingleStepTracing(Test1944.class,
-        Test1944.class.getDeclaredMethod(
-            "notifySingleStep", Thread.class, Executable.class, Long.TYPE),
-        null);
-
-    Thread[] threads = new Thread[num_threads];
-    for (int i = 0; i < num_threads; i++) {
-      threads[i] = new Thread(() -> { fib(started); });
-      // Make half daemons.
-      threads[i].setDaemon(i % 2 == 0);
-      threads[i].start();
-    }
-    // Wait for all threads to start.
-    started.acquire();
-    System.out.println("All threads started");
-    // sleep a little
-    Thread.sleep(10);
-    // Die.
-    System.out.println("Exiting suddenly");
-    exitNow();
-    System.out.println("FAILED: Should not reach here!");
-  }
-}
diff --git a/test/1944-sudden-exit/src/art/Trace.java b/test/1944-sudden-exit/src/art/Trace.java
deleted file mode 100644
index 8999bb1..0000000
--- a/test/1944-sudden-exit/src/art/Trace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Method;
-
-public class Trace {
-  public static native void enableTracing(Class<?> methodClass,
-                                          Method entryMethod,
-                                          Method exitMethod,
-                                          Method fieldAccess,
-                                          Method fieldModify,
-                                          Method singleStep,
-                                          Thread thr);
-  public static native void disableTracing(Thread thr);
-
-  public static void enableFieldTracing(Class<?> methodClass,
-                                        Method fieldAccess,
-                                        Method fieldModify,
-                                        Thread thr) {
-    enableTracing(methodClass, null, null, fieldAccess, fieldModify, null, thr);
-  }
-
-  public static void enableMethodTracing(Class<?> methodClass,
-                                         Method entryMethod,
-                                         Method exitMethod,
-                                         Thread thr) {
-    enableTracing(methodClass, entryMethod, exitMethod, null, null, null, thr);
-  }
-
-  public static void enableSingleStepTracing(Class<?> methodClass,
-                                             Method singleStep,
-                                             Thread thr) {
-    enableTracing(methodClass, null, null, null, null, singleStep, thr);
-  }
-
-  public static native void watchFieldAccess(Field f);
-  public static native void watchFieldModification(Field f);
-  public static native void watchAllFieldAccesses();
-  public static native void watchAllFieldModifications();
-
-  // the names, arguments, and even line numbers of these functions are embedded in the tests so we
-  // need to add to the bottom and not modify old ones to maintain compat.
-  public static native void enableTracing2(Class<?> methodClass,
-                                           Method entryMethod,
-                                           Method exitMethod,
-                                           Method fieldAccess,
-                                           Method fieldModify,
-                                           Method singleStep,
-                                           Method ThreadStart,
-                                           Method ThreadEnd,
-                                           Thread thr);
-}
diff --git a/test/1944-sudden-exit/sudden_exit.cc b/test/1944-sudden-exit/sudden_exit.cc
deleted file mode 100644
index e0a076e..0000000
--- a/test/1944-sudden-exit/sudden_exit.cc
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <stdlib.h>
-#include "jni.h"
-
-namespace art {
-namespace Test1944SuddenExit {
-
-extern "C" JNIEXPORT void JNICALL Java_art_Test1944_exitNow(JNIEnv*, jclass)
-    __attribute__((noreturn));
-
-extern "C" JNIEXPORT void JNICALL Java_art_Test1944_exitNow(JNIEnv*, jclass) {
-  exit(12);
-}
-
-}  // namespace Test1944SuddenExit
-}  // namespace art
-
diff --git a/test/1946-list-descriptors/src-art/art/Test1946.java b/test/1946-list-descriptors/src-art/art/Test1946.java
index 3e5ec65..19032c8 100644
--- a/test/1946-list-descriptors/src-art/art/Test1946.java
+++ b/test/1946-list-descriptors/src-art/art/Test1946.java
@@ -17,6 +17,7 @@
 package art;
 
 import java.util.*;
+import java.util.function.*;
 import java.lang.reflect.*;
 import java.nio.ByteBuffer;
 import dalvik.system.InMemoryDexClassLoader;
@@ -49,22 +50,24 @@
   public class TMP2 {}
   public class TMP3 extends ArrayList {}
 
-  private static void check(boolean b, String msg) {
+  private static void check(boolean b, Supplier<String> msg) {
     if (!b) {
-      throw new Error("Test failed! " + msg);
+      throw new Error("Test failed! " + msg.get());
     }
   }
 
-  private static <T> void checkEq(T[] full, T[] sub, String msg) {
+  private static <T> void checkEq(T[] full, T[] sub, final String msg) {
     List<T> f = Arrays.asList(full);
-    check(full.length == sub.length, "not equal length");
-    msg = Arrays.toString(full) + " is not same as " + Arrays.toString(sub) + ": " + msg;
-    check(Arrays.asList(full).containsAll(Arrays.asList(sub)), msg);
+    check(full.length == sub.length, () -> "not equal length");
+    Supplier<String> msgGen =
+      () -> Arrays.toString(full) + " is not same as " + Arrays.toString(sub) + ": " + msg;
+    check(new HashSet<T>(Arrays.asList(full)).containsAll(Arrays.asList(sub)), msgGen);
   }
 
-  private static <T> void checkSubset(T[] full, T[] sub, String msg) {
-    msg = Arrays.toString(full) + " does not contain all of " + Arrays.toString(sub) + ": " + msg;
-    check(Arrays.asList(full).containsAll(Arrays.asList(sub)), msg);
+  private static <T> void checkSubset(T[] full, T[] sub, final String msg) {
+    Supplier<String> msgGen =
+      () -> Arrays.toString(full) + " does not contain all of " + Arrays.toString(sub) + ": " + msg;
+    check(new HashSet<T>(Arrays.asList(full)).containsAll(Arrays.asList(sub)), msgGen);
   }
 
   public static void run() throws Exception {
diff --git a/test/1947-breakpoint-redefine-deopt/src/art/Breakpoint.java b/test/1947-breakpoint-redefine-deopt/src/art/Breakpoint.java
deleted file mode 100644
index bbb89f7..0000000
--- a/test/1947-breakpoint-redefine-deopt/src/art/Breakpoint.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Objects;
-
-public class Breakpoint {
-  public static class Manager {
-    public static class BP {
-      public final Executable method;
-      public final long location;
-
-      public BP(Executable method) {
-        this(method, getStartLocation(method));
-      }
-
-      public BP(Executable method, long location) {
-        this.method = method;
-        this.location = location;
-      }
-
-      @Override
-      public boolean equals(Object other) {
-        return (other instanceof BP) &&
-            method.equals(((BP)other).method) &&
-            location == ((BP)other).location;
-      }
-
-      @Override
-      public String toString() {
-        return method.toString() + " @ " + getLine();
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(method, location);
-      }
-
-      public int getLine() {
-        try {
-          LineNumber[] lines = getLineNumberTable(method);
-          int best = -1;
-          for (LineNumber l : lines) {
-            if (l.location > location) {
-              break;
-            } else {
-              best = l.line;
-            }
-          }
-          return best;
-        } catch (Exception e) {
-          return -1;
-        }
-      }
-    }
-
-    private Set<BP> breaks = new HashSet<>();
-
-    public void setBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.add(b)) {
-          Breakpoint.setBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void setBreakpoint(Executable method, long location) {
-      setBreakpoints(new BP(method, location));
-    }
-
-    public void clearBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.remove(b)) {
-          Breakpoint.clearBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void clearBreakpoint(Executable method, long location) {
-      clearBreakpoints(new BP(method, location));
-    }
-
-    public void clearAllBreakpoints() {
-      clearBreakpoints(breaks.toArray(new BP[0]));
-    }
-  }
-
-  public static void startBreakpointWatch(Class<?> methodClass,
-                                          Executable breakpointReached,
-                                          Thread thr) {
-    startBreakpointWatch(methodClass, breakpointReached, false, thr);
-  }
-
-  /**
-   * Enables the trapping of breakpoint events.
-   *
-   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
-   */
-  public static native void startBreakpointWatch(Class<?> methodClass,
-                                                 Executable breakpointReached,
-                                                 boolean allowRecursive,
-                                                 Thread thr);
-  public static native void stopBreakpointWatch(Thread thr);
-
-  public static final class LineNumber implements Comparable<LineNumber> {
-    public final long location;
-    public final int line;
-
-    private LineNumber(long loc, int line) {
-      this.location = loc;
-      this.line = line;
-    }
-
-    public boolean equals(Object other) {
-      return other instanceof LineNumber && ((LineNumber)other).line == line &&
-          ((LineNumber)other).location == location;
-    }
-
-    public int compareTo(LineNumber other) {
-      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
-      if (v != 0) {
-        return v;
-      } else {
-        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
-      }
-    }
-  }
-
-  public static native void setBreakpoint(Executable m, long loc);
-  public static void setBreakpoint(Executable m, LineNumber l) {
-    setBreakpoint(m, l.location);
-  }
-
-  public static native void clearBreakpoint(Executable m, long loc);
-  public static void clearBreakpoint(Executable m, LineNumber l) {
-    clearBreakpoint(m, l.location);
-  }
-
-  private static native Object[] getLineNumberTableNative(Executable m);
-  public static LineNumber[] getLineNumberTable(Executable m) {
-    Object[] nativeTable = getLineNumberTableNative(m);
-    long[] location = (long[])(nativeTable[0]);
-    int[] lines = (int[])(nativeTable[1]);
-    if (lines.length != location.length) {
-      throw new Error("Lines and locations have different lengths!");
-    }
-    LineNumber[] out = new LineNumber[lines.length];
-    for (int i = 0; i < lines.length; i++) {
-      out[i] = new LineNumber(location[i], lines[i]);
-    }
-    return out;
-  }
-
-  public static native long getStartLocation(Executable m);
-
-  public static int locationToLine(Executable m, long location) {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      int best = -1;
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.location > location) {
-          break;
-        } else {
-          best = l.line;
-        }
-      }
-      return best;
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  public static long lineToLocation(Executable m, int line) throws Exception {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.line == line) {
-          return l.location;
-        }
-      }
-      throw new Exception("Unable to find line " + line + " in " + m);
-    } catch (Exception e) {
-      throw new Exception("Unable to get line number info for " + m, e);
-    }
-  }
-}
-
diff --git a/test/1947-breakpoint-redefine-deopt/src/art/Breakpoint.java b/test/1947-breakpoint-redefine-deopt/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1947-breakpoint-redefine-deopt/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1947-breakpoint-redefine-deopt/src/art/Redefinition.java b/test/1947-breakpoint-redefine-deopt/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/1947-breakpoint-redefine-deopt/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/1947-breakpoint-redefine-deopt/src/art/Redefinition.java b/test/1947-breakpoint-redefine-deopt/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1947-breakpoint-redefine-deopt/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1948-obsolete-const-method-handle/util-src/build-classes b/test/1948-obsolete-const-method-handle/util-src/build-classes
index 1b2d79a..af22dd4 100755
--- a/test/1948-obsolete-const-method-handle/util-src/build-classes
+++ b/test/1948-obsolete-const-method-handle/util-src/build-classes
@@ -38,7 +38,7 @@
 fi
 
 # Build the initial class files.
-(cd "${SRC_PATH}" && javac -cp "${ASM_CLASSPATH}" -d "${BUILD_PATH}" Main.java art/*.java art/constmethodhandle/*.java) || fail "javac error"
+(cd "${SRC_PATH}" && javac -source 8 -target 8 -cp "${ASM_CLASSPATH}" -d "${BUILD_PATH}" Main.java art/*.java art/constmethodhandle/*.java) || fail "javac error"
 # Modify the class files using ASM
 (cd "${SCRIPT_PATH}" && java -cp "${ASM_CLASSPATH}:${BUILD_PATH}" art.constmethodhandle.TestGenerator "${BUILD_PATH}" "$D8") || fail "generator failure"
 # Remove the modification classes. We don't need nor want them for the actual test.
diff --git a/test/1949-short-dex-file/src/art/Redefinition.java b/test/1949-short-dex-file/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/1949-short-dex-file/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/1949-short-dex-file/src/art/Redefinition.java b/test/1949-short-dex-file/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1949-short-dex-file/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1950-unprepared-transform/src/art/Redefinition.java b/test/1950-unprepared-transform/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/1950-unprepared-transform/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/1950-unprepared-transform/src/art/Redefinition.java b/test/1950-unprepared-transform/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1950-unprepared-transform/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1951-monitor-enter-no-suspend/src/art/Main.java b/test/1951-monitor-enter-no-suspend/src/art/Main.java
deleted file mode 100644
index aa5498b..0000000
--- a/test/1951-monitor-enter-no-suspend/src/art/Main.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-// Binder class so the agent's C code has something that can be bound and exposed to tests.
-// In a package to separate cleanly and work around CTS reference issues (though this class
-// should be replaced in the CTS version).
-public class Main {
-  // Load the given class with the given classloader, and bind all native methods to corresponding
-  // C methods in the agent. Will abort if any of the steps fail.
-  public static native void bindAgentJNI(String className, ClassLoader classLoader);
-  // Same as above, giving the class directly.
-  public static native void bindAgentJNIForClass(Class<?> klass);
-
-  // Common infrastructure.
-  public static native void setTag(Object o, long tag);
-  public static native long getTag(Object o);
-}
diff --git a/test/1951-monitor-enter-no-suspend/src/art/Main.java b/test/1951-monitor-enter-no-suspend/src/art/Main.java
new file mode 120000
index 0000000..84ae4ac
--- /dev/null
+++ b/test/1951-monitor-enter-no-suspend/src/art/Main.java
@@ -0,0 +1 @@
+../../../jvmti-common/Main.java
\ No newline at end of file
diff --git a/test/1951-monitor-enter-no-suspend/src/art/Suspension.java b/test/1951-monitor-enter-no-suspend/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1951-monitor-enter-no-suspend/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1951-monitor-enter-no-suspend/src/art/Suspension.java b/test/1951-monitor-enter-no-suspend/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1951-monitor-enter-no-suspend/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1953-pop-frame/pop_frame.cc b/test/1953-pop-frame/pop_frame.cc
index 1c2d2a1..86345d6 100644
--- a/test/1953-pop-frame/pop_frame.cc
+++ b/test/1953-pop-frame/pop_frame.cc
@@ -36,898 +36,11 @@
 #include "test_env.h"
 #include "ti_macros.h"
 
+#include "suspend_event_helper.h"
+
 namespace art {
 namespace Test1953PopFrame {
 
-struct TestData {
-  jlocation target_loc;
-  jmethodID target_method;
-  jclass target_klass;
-  jfieldID target_field;
-  jrawMonitorID notify_monitor;
-  jint frame_pop_offset;
-  jmethodID frame_pop_setup_method;
-  std::vector<std::string> interesting_classes;
-  bool hit_location;
-
-  TestData(jvmtiEnv* jvmti,
-           JNIEnv* env,
-           jlocation loc,
-           jobject meth,
-           jclass klass,
-           jobject field,
-           jobject setup_meth,
-           jint pop_offset,
-           const std::vector<std::string>&& interesting)
-      : target_loc(loc),
-        target_method(meth != nullptr ? env->FromReflectedMethod(meth) : nullptr),
-        target_klass(reinterpret_cast<jclass>(env->NewGlobalRef(klass))),
-        target_field(field != nullptr ? env->FromReflectedField(field) : nullptr),
-        frame_pop_offset(pop_offset),
-        frame_pop_setup_method(setup_meth != nullptr ? env->FromReflectedMethod(setup_meth)
-                                                     : nullptr),
-        interesting_classes(interesting),
-        hit_location(false) {
-    JvmtiErrorToException(env, jvmti, jvmti->CreateRawMonitor("SuspendStopMonitor",
-                                                              &notify_monitor));
-  }
-
-  void PerformSuspend(jvmtiEnv* jvmti, JNIEnv* env) {
-    // Wake up the waiting thread.
-    JvmtiErrorToException(env, jvmti, jvmti->RawMonitorEnter(notify_monitor));
-    hit_location = true;
-    JvmtiErrorToException(env, jvmti, jvmti->RawMonitorNotifyAll(notify_monitor));
-    JvmtiErrorToException(env, jvmti, jvmti->RawMonitorExit(notify_monitor));
-    // Suspend ourself
-    jvmti->SuspendThread(nullptr);
-  }
-};
-
-void JNICALL cbSingleStep(jvmtiEnv* jvmti,
-                          JNIEnv* env,
-                          jthread thr,
-                          jmethodID meth,
-                          jlocation loc) {
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti,
-                            jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data != nullptr);
-  if (meth != data->target_method || loc != data->target_loc) {
-    return;
-  }
-  data->PerformSuspend(jvmti, env);
-}
-
-void JNICALL cbExceptionCatch(jvmtiEnv *jvmti,
-                              JNIEnv* env,
-                              jthread thr,
-                              jmethodID method,
-                              jlocation location ATTRIBUTE_UNUSED,
-                              jobject exception ATTRIBUTE_UNUSED) {
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti,
-                            jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data != nullptr);
-  if (method != data->target_method) {
-    return;
-  }
-  data->PerformSuspend(jvmti, env);
-}
-
-void JNICALL cbException(jvmtiEnv *jvmti,
-                         JNIEnv* env,
-                         jthread thr,
-                         jmethodID method,
-                         jlocation location ATTRIBUTE_UNUSED,
-                         jobject exception ATTRIBUTE_UNUSED,
-                         jmethodID catch_method ATTRIBUTE_UNUSED,
-                         jlocation catch_location ATTRIBUTE_UNUSED) {
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti,
-                            jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data != nullptr);
-  if (method != data->target_method) {
-    return;
-  }
-  data->PerformSuspend(jvmti, env);
-}
-
-void JNICALL cbMethodEntry(jvmtiEnv *jvmti,
-                           JNIEnv* env,
-                           jthread thr,
-                           jmethodID method) {
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti,
-                            jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data != nullptr);
-  if (method != data->target_method) {
-    return;
-  }
-  data->PerformSuspend(jvmti, env);
-}
-
-void JNICALL cbMethodExit(jvmtiEnv *jvmti,
-                          JNIEnv* env,
-                          jthread thr,
-                          jmethodID method,
-                          jboolean was_popped_by_exception ATTRIBUTE_UNUSED,
-                          jvalue return_value ATTRIBUTE_UNUSED) {
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti,
-                            jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data != nullptr);
-  if (method != data->target_method) {
-    return;
-  }
-  data->PerformSuspend(jvmti, env);
-}
-
-void JNICALL cbFieldModification(jvmtiEnv* jvmti,
-                                 JNIEnv* env,
-                                 jthread thr,
-                                 jmethodID method ATTRIBUTE_UNUSED,
-                                 jlocation location ATTRIBUTE_UNUSED,
-                                 jclass field_klass ATTRIBUTE_UNUSED,
-                                 jobject object ATTRIBUTE_UNUSED,
-                                 jfieldID field,
-                                 char signature_type ATTRIBUTE_UNUSED,
-                                 jvalue new_value ATTRIBUTE_UNUSED) {
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti,
-                            jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data != nullptr);
-  if (field != data->target_field) {
-    // TODO What to do here.
-    LOG(FATAL) << "Strange, shouldn't get here!";
-  }
-  data->PerformSuspend(jvmti, env);
-}
-
-void JNICALL cbFieldAccess(jvmtiEnv* jvmti,
-                           JNIEnv* env,
-                           jthread thr,
-                           jmethodID method ATTRIBUTE_UNUSED,
-                           jlocation location ATTRIBUTE_UNUSED,
-                           jclass field_klass,
-                           jobject object ATTRIBUTE_UNUSED,
-                           jfieldID field) {
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti,
-                            jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data != nullptr);
-  if (field != data->target_field || !env->IsSameObject(field_klass, data->target_klass)) {
-    // TODO What to do here.
-    LOG(FATAL) << "Strange, shouldn't get here!";
-  }
-  data->PerformSuspend(jvmti, env);
-}
-
-void JNICALL cbBreakpointHit(jvmtiEnv* jvmti,
-                             JNIEnv* env,
-                             jthread thr,
-                             jmethodID method,
-                             jlocation loc) {
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti,
-                            jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data != nullptr);
-  if (data->frame_pop_setup_method == method) {
-    CHECK(loc == 0) << "We should have stopped at location 0";
-    if (JvmtiErrorToException(env,
-                              jvmti,
-                              jvmti->NotifyFramePop(thr, data->frame_pop_offset))) {
-      return;
-    }
-    return;
-  }
-  if (method != data->target_method || loc != data->target_loc) {
-    // TODO What to do here.
-    LOG(FATAL) << "Strange, shouldn't get here!";
-  }
-  data->PerformSuspend(jvmti, env);
-}
-
-void JNICALL cbFramePop(jvmtiEnv* jvmti,
-                        JNIEnv* env,
-                        jthread thr,
-                        jmethodID method ATTRIBUTE_UNUSED,
-                        jboolean was_popped_by_exception ATTRIBUTE_UNUSED) {
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti,
-                            jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data != nullptr);
-  data->PerformSuspend(jvmti, env);
-}
-
-void JNICALL cbClassLoadOrPrepare(jvmtiEnv* jvmti,
-                                  JNIEnv* env,
-                                  jthread thr,
-                                  jclass klass) {
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti,
-                            jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data != nullptr);
-  char* name;
-  if (JvmtiErrorToException(env, jvmti, jvmti->GetClassSignature(klass, &name, nullptr))) {
-    return;
-  }
-  std::string name_str(name);
-  if (JvmtiErrorToException(env,
-                            jvmti,
-                            jvmti->Deallocate(reinterpret_cast<unsigned char*>(name)))) {
-    return;
-  }
-  if (std::find(data->interesting_classes.cbegin(),
-                data->interesting_classes.cend(),
-                name_str) != data->interesting_classes.cend()) {
-    data->PerformSuspend(jvmti, env);
-  }
-}
-
-extern "C" JNIEXPORT
-void JNICALL Java_art_Test1953_setupTest(JNIEnv* env, jclass klass ATTRIBUTE_UNUSED) {
-  jvmtiCapabilities caps;
-  memset(&caps, 0, sizeof(caps));
-  // Most of these will already be there but might as well be complete.
-  caps.can_pop_frame                          = 1;
-  caps.can_generate_single_step_events        = 1;
-  caps.can_generate_breakpoint_events         = 1;
-  caps.can_suspend                            = 1;
-  caps.can_generate_method_entry_events       = 1;
-  caps.can_generate_method_exit_events        = 1;
-  caps.can_generate_monitor_events            = 1;
-  caps.can_generate_exception_events          = 1;
-  caps.can_generate_frame_pop_events          = 1;
-  caps.can_generate_field_access_events       = 1;
-  caps.can_generate_field_modification_events = 1;
-  caps.can_redefine_classes                   = 1;
-  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->AddCapabilities(&caps))) {
-    return;
-  }
-  jvmtiEventCallbacks cb;
-  memset(&cb, 0, sizeof(cb));
-  // TODO Add the rest of these.
-  cb.Breakpoint        = cbBreakpointHit;
-  cb.SingleStep        = cbSingleStep;
-  cb.FieldAccess       = cbFieldAccess;
-  cb.FieldModification = cbFieldModification;
-  cb.MethodEntry       = cbMethodEntry;
-  cb.MethodExit        = cbMethodExit;
-  cb.Exception         = cbException;
-  cb.ExceptionCatch    = cbExceptionCatch;
-  cb.FramePop          = cbFramePop;
-  cb.ClassLoad         = cbClassLoadOrPrepare;
-  cb.ClassPrepare      = cbClassLoadOrPrepare;
-  JvmtiErrorToException(env, jvmti_env, jvmti_env->SetEventCallbacks(&cb, sizeof(cb)));
-}
-
-static bool DeleteTestData(JNIEnv* env, jthread thr, TestData* data) {
-  env->DeleteGlobalRef(data->target_klass);
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetThreadLocalStorage(thr, nullptr))) {
-    return false;
-  }
-  return JvmtiErrorToException(env,
-                               jvmti_env,
-                               jvmti_env->Deallocate(reinterpret_cast<uint8_t*>(data)));
-}
-
-static TestData* SetupTestData(JNIEnv* env,
-                               jobject meth,
-                               jlocation loc,
-                               jclass target_klass,
-                               jobject field,
-                               jobject setup_meth,
-                               jint pop_offset,
-                               const std::vector<std::string>&& interesting_names) {
-  void* data_ptr;
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->Allocate(sizeof(TestData),
-                                                reinterpret_cast<uint8_t**>(&data_ptr)))) {
-    return nullptr;
-  }
-  data = new (data_ptr) TestData(jvmti_env,
-                                 env,
-                                 loc,
-                                 meth,
-                                 target_klass,
-                                 field,
-                                 setup_meth,
-                                 pop_offset,
-                                 std::move(interesting_names));
-  if (env->ExceptionCheck()) {
-    env->DeleteGlobalRef(data->target_klass);
-    jvmti_env->Deallocate(reinterpret_cast<uint8_t*>(data));
-    return nullptr;
-  }
-  return data;
-}
-
-static TestData* SetupTestData(JNIEnv* env,
-                               jobject meth,
-                               jlocation loc,
-                               jclass target_klass,
-                               jobject field,
-                               jobject setup_meth,
-                               jint pop_offset) {
-  std::vector<std::string> empty;
-  return SetupTestData(
-      env, meth, loc, target_klass, field, setup_meth, pop_offset, std::move(empty));
-}
-
-extern "C" JNIEXPORT
-void JNICALL Java_art_Test1953_setupSuspendClassEvent(JNIEnv* env,
-                                                      jclass klass ATTRIBUTE_UNUSED,
-                                                      jint event_num,
-                                                      jobjectArray interesting_names,
-                                                      jthread thr) {
-  CHECK(event_num == JVMTI_EVENT_CLASS_LOAD || event_num == JVMTI_EVENT_CLASS_PREPARE);
-  std::vector<std::string> names;
-  jint cnt = env->GetArrayLength(interesting_names);
-  for (jint i = 0; i < cnt; i++) {
-    env->PushLocalFrame(1);
-    jstring name_obj = reinterpret_cast<jstring>(env->GetObjectArrayElement(interesting_names, i));
-    const char* name_chr = env->GetStringUTFChars(name_obj, nullptr);
-    names.push_back(std::string(name_chr));
-    env->ReleaseStringUTFChars(name_obj, name_chr);
-    env->PopLocalFrame(nullptr);
-  }
-  TestData* data;
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->GetThreadLocalStorage(thr,
-                                                             reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data == nullptr) << "Data was not cleared!";
-  data = SetupTestData(env, nullptr, 0, nullptr, nullptr, nullptr, 0, std::move(names));
-  if (data == nullptr) {
-    return;
-  }
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetThreadLocalStorage(thr, data))) {
-    return;
-  }
-  JvmtiErrorToException(env,
-                        jvmti_env,
-                        jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
-                                                            static_cast<jvmtiEvent>(event_num),
-                                                            thr));
-}
-
-extern "C" JNIEXPORT
-void JNICALL Java_art_Test1953_clearSuspendClassEvent(JNIEnv* env,
-                                                      jclass klass ATTRIBUTE_UNUSED,
-                                                      jthread thr) {
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->GetThreadLocalStorage(thr,
-                                                             reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data != nullptr);
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
-                                                                JVMTI_EVENT_CLASS_LOAD,
-                                                                thr))) {
-    return;
-  }
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
-                                                                JVMTI_EVENT_CLASS_PREPARE,
-                                                                thr))) {
-    return;
-  }
-  DeleteTestData(env, thr, data);
-}
-
-extern "C" JNIEXPORT
-void JNICALL Java_art_Test1953_setupSuspendSingleStepAt(JNIEnv* env,
-                                                        jclass klass ATTRIBUTE_UNUSED,
-                                                        jobject meth,
-                                                        jlocation loc,
-                                                        jthread thr) {
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->GetThreadLocalStorage(thr,
-                                                             reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data == nullptr) << "Data was not cleared!";
-  data = SetupTestData(env, meth, loc, nullptr, nullptr, nullptr, 0);
-  if (data == nullptr) {
-    return;
-  }
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetThreadLocalStorage(thr, data))) {
-    return;
-  }
-  JvmtiErrorToException(env, jvmti_env, jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
-                                                                            JVMTI_EVENT_SINGLE_STEP,
-                                                                            thr));
-}
-
-extern "C" JNIEXPORT
-void JNICALL Java_art_Test1953_clearSuspendSingleStepFor(JNIEnv* env,
-                                                         jclass klass ATTRIBUTE_UNUSED,
-                                                         jthread thr) {
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->GetThreadLocalStorage(thr,
-                                                             reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data != nullptr);
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
-                                                                JVMTI_EVENT_SINGLE_STEP,
-                                                                thr))) {
-    return;
-  }
-  DeleteTestData(env, thr, data);
-}
-
-extern "C" JNIEXPORT
-void JNICALL Java_art_Test1953_setupSuspendPopFrameEvent(JNIEnv* env,
-                                                         jclass klass ATTRIBUTE_UNUSED,
-                                                         jint offset,
-                                                         jobject breakpoint_func,
-                                                         jthread thr) {
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->GetThreadLocalStorage(thr,
-                                                             reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data == nullptr) << "Data was not cleared!";
-  data = SetupTestData(env, nullptr, 0, nullptr, nullptr, breakpoint_func, offset);
-  CHECK(data != nullptr);
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetThreadLocalStorage(thr, data))) {
-    return;
-  }
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
-                                                                JVMTI_EVENT_FRAME_POP,
-                                                                thr))) {
-    return;
-  }
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
-                                                                JVMTI_EVENT_BREAKPOINT,
-                                                                thr))) {
-    return;
-  }
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetBreakpoint(data->frame_pop_setup_method, 0))) {
-    return;
-  }
-}
-
-extern "C" JNIEXPORT
-void JNICALL Java_art_Test1953_clearSuspendPopFrameEvent(JNIEnv* env,
-                                                         jclass klass ATTRIBUTE_UNUSED,
-                                                         jthread thr) {
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->GetThreadLocalStorage(thr,
-                                                             reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data != nullptr);
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
-                                                                JVMTI_EVENT_FRAME_POP,
-                                                                thr))) {
-    return;
-  }
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
-                                                                JVMTI_EVENT_BREAKPOINT,
-                                                                thr))) {
-    return;
-  }
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->ClearBreakpoint(data->frame_pop_setup_method, 0))) {
-    return;
-  }
-  DeleteTestData(env, thr, data);
-}
-
-extern "C" JNIEXPORT
-void JNICALL Java_art_Test1953_setupSuspendBreakpointFor(JNIEnv* env,
-                                                         jclass klass ATTRIBUTE_UNUSED,
-                                                         jobject meth,
-                                                         jlocation loc,
-                                                         jthread thr) {
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->GetThreadLocalStorage(thr,
-                                                             reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data == nullptr) << "Data was not cleared!";
-  data = SetupTestData(env, meth, loc, nullptr, nullptr, nullptr, 0);
-  if (data == nullptr) {
-    return;
-  }
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetThreadLocalStorage(thr, data))) {
-    return;
-  }
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetEventNotificationMode(JVMTI_ENABLE,
-                                                                JVMTI_EVENT_BREAKPOINT,
-                                                                thr))) {
-    return;
-  }
-  JvmtiErrorToException(env, jvmti_env, jvmti_env->SetBreakpoint(data->target_method,
-                                                                 data->target_loc));
-}
-
-extern "C" JNIEXPORT
-void JNICALL Java_art_Test1953_clearSuspendBreakpointFor(JNIEnv* env,
-                                                         jclass klass ATTRIBUTE_UNUSED,
-                                                         jthread thr) {
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->GetThreadLocalStorage(thr,
-                                                             reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data != nullptr);
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
-                                                                JVMTI_EVENT_BREAKPOINT,
-                                                                thr))) {
-    return;
-  }
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->ClearBreakpoint(data->target_method,
-                                                       data->target_loc))) {
-    return;
-  }
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetThreadLocalStorage(thr, nullptr))) {
-    return;
-  }
-  DeleteTestData(env, thr, data);
-}
-
-extern "C" JNIEXPORT
-void JNICALL Java_art_Test1953_setupSuspendExceptionEvent(JNIEnv* env,
-                                                          jclass klass ATTRIBUTE_UNUSED,
-                                                          jobject method,
-                                                          jboolean is_catch,
-                                                          jthread thr) {
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->GetThreadLocalStorage(
-                                thr, reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data == nullptr) << "Data was not cleared!";
-  data = SetupTestData(env, method, 0, nullptr, nullptr, nullptr, 0);
-  if (data == nullptr) {
-    return;
-  }
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetThreadLocalStorage(thr, data))) {
-    return;
-  }
-  JvmtiErrorToException(env,
-                        jvmti_env,
-                        jvmti_env->SetEventNotificationMode(
-                            JVMTI_ENABLE,
-                            is_catch ? JVMTI_EVENT_EXCEPTION_CATCH : JVMTI_EVENT_EXCEPTION,
-                            thr));
-}
-
-extern "C" JNIEXPORT
-void JNICALL Java_art_Test1953_clearSuspendExceptionEvent(JNIEnv* env,
-                                                          jclass klass ATTRIBUTE_UNUSED,
-                                                          jthread thr) {
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->GetThreadLocalStorage(thr,
-                                                             reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data != nullptr);
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
-                                                                JVMTI_EVENT_EXCEPTION_CATCH,
-                                                                thr))) {
-    return;
-  }
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
-                                                                JVMTI_EVENT_EXCEPTION,
-                                                                thr))) {
-    return;
-  }
-  DeleteTestData(env, thr, data);
-}
-
-extern "C" JNIEXPORT
-void JNICALL Java_art_Test1953_setupSuspendMethodEvent(JNIEnv* env,
-                                                       jclass klass ATTRIBUTE_UNUSED,
-                                                       jobject method,
-                                                       jboolean enter,
-                                                       jthread thr) {
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->GetThreadLocalStorage(
-                                thr, reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data == nullptr) << "Data was not cleared!";
-  data = SetupTestData(env, method, 0, nullptr, nullptr, nullptr, 0);
-  if (data == nullptr) {
-    return;
-  }
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetThreadLocalStorage(thr, data))) {
-    return;
-  }
-  JvmtiErrorToException(env,
-                        jvmti_env,
-                        jvmti_env->SetEventNotificationMode(
-                            JVMTI_ENABLE,
-                            enter ? JVMTI_EVENT_METHOD_ENTRY : JVMTI_EVENT_METHOD_EXIT,
-                            thr));
-}
-
-extern "C" JNIEXPORT
-void JNICALL Java_art_Test1953_clearSuspendMethodEvent(JNIEnv* env,
-                                                       jclass klass ATTRIBUTE_UNUSED,
-                                                       jthread thr) {
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->GetThreadLocalStorage(thr,
-                                                             reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data != nullptr);
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
-                                                                JVMTI_EVENT_METHOD_EXIT,
-                                                                thr))) {
-    return;
-  }
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
-                                                                JVMTI_EVENT_METHOD_ENTRY,
-                                                                thr))) {
-    return;
-  }
-  DeleteTestData(env, thr, data);
-}
-
-extern "C" JNIEXPORT
-void JNICALL Java_art_Test1953_setupFieldSuspendFor(JNIEnv* env,
-                                                    jclass klass ATTRIBUTE_UNUSED,
-                                                    jclass target_klass,
-                                                    jobject field,
-                                                    jboolean access,
-                                                    jthread thr) {
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->GetThreadLocalStorage(
-                                thr, reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data == nullptr) << "Data was not cleared!";
-  data = SetupTestData(env, nullptr, 0, target_klass, field, nullptr, 0);
-  if (data == nullptr) {
-    return;
-  }
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetThreadLocalStorage(thr, data))) {
-    return;
-  }
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetEventNotificationMode(
-                                JVMTI_ENABLE,
-                                access ? JVMTI_EVENT_FIELD_ACCESS : JVMTI_EVENT_FIELD_MODIFICATION,
-                                thr))) {
-    return;
-  }
-  if (access) {
-    JvmtiErrorToException(env, jvmti_env, jvmti_env->SetFieldAccessWatch(data->target_klass,
-                                                                         data->target_field));
-  } else {
-    JvmtiErrorToException(env, jvmti_env, jvmti_env->SetFieldModificationWatch(data->target_klass,
-                                                                               data->target_field));
-  }
-}
-
-extern "C" JNIEXPORT
-void JNICALL Java_art_Test1953_clearFieldSuspendFor(JNIEnv* env,
-                                                    jclass klass ATTRIBUTE_UNUSED,
-                                                    jthread thr) {
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->GetThreadLocalStorage(thr,
-                                                             reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data != nullptr);
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
-                                                                JVMTI_EVENT_FIELD_ACCESS,
-                                                                thr))) {
-    return;
-  }
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetEventNotificationMode(JVMTI_DISABLE,
-                                                                JVMTI_EVENT_FIELD_MODIFICATION,
-                                                                thr))) {
-    return;
-  }
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->ClearFieldModificationWatch(
-                                data->target_klass, data->target_field)) &&
-      JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->ClearFieldAccessWatch(
-                                data->target_klass, data->target_field))) {
-    return;
-  } else {
-    env->ExceptionClear();
-  }
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetThreadLocalStorage(thr, nullptr))) {
-    return;
-  }
-  DeleteTestData(env, thr, data);
-}
-
-extern "C" JNIEXPORT
-void JNICALL Java_art_Test1953_setupWaitForNativeCall(JNIEnv* env,
-                                                      jclass klass ATTRIBUTE_UNUSED,
-                                                      jthread thr) {
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->GetThreadLocalStorage(
-                                thr, reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data == nullptr) << "Data was not cleared!";
-  data = SetupTestData(env, nullptr, 0, nullptr, nullptr, nullptr, 0);
-  if (data == nullptr) {
-    return;
-  }
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetThreadLocalStorage(thr, data))) {
-    return;
-  }
-}
-
-extern "C" JNIEXPORT
-void JNICALL Java_art_Test1953_clearWaitForNativeCall(JNIEnv* env,
-                                                      jclass klass ATTRIBUTE_UNUSED,
-                                                      jthread thr) {
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->GetThreadLocalStorage(thr,
-                                                             reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data != nullptr);
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->SetThreadLocalStorage(thr, nullptr))) {
-    return;
-  }
-  DeleteTestData(env, thr, data);
-}
-
-extern "C" JNIEXPORT
-void JNICALL Java_art_Test1953_waitForSuspendHit(JNIEnv* env,
-                                                 jclass klass ATTRIBUTE_UNUSED,
-                                                 jthread thr) {
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->GetThreadLocalStorage(thr,
-                                                             reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data != nullptr);
-  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->RawMonitorEnter(data->notify_monitor))) {
-    return;
-  }
-  while (!data->hit_location) {
-    if (JvmtiErrorToException(env, jvmti_env, jvmti_env->RawMonitorWait(data->notify_monitor, -1))) {
-      return;
-    }
-  }
-  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->RawMonitorExit(data->notify_monitor))) {
-    return;
-  }
-  jint state = 0;
-  while (!JvmtiErrorToException(env, jvmti_env, jvmti_env->GetThreadState(thr, &state)) &&
-         (state & JVMTI_THREAD_STATE_SUSPENDED) == 0) { }
-}
 
 extern "C" JNIEXPORT
 void JNICALL Java_art_Test1953_popFrame(JNIEnv* env,
@@ -944,15 +57,7 @@
   jfieldID cnt = env->GetFieldID(klass, "cnt", "I");
   env->SetIntField(thiz, cnt, env->GetIntField(thiz, cnt) + 1);
   env->PopLocalFrame(nullptr);
-  TestData *data;
-  if (JvmtiErrorToException(env,
-                            jvmti_env,
-                            jvmti_env->GetThreadLocalStorage(/* thread */ nullptr,
-                                                             reinterpret_cast<void**>(&data)))) {
-    return;
-  }
-  CHECK(data != nullptr);
-  data->PerformSuspend(jvmti_env, env);
+  art::common_suspend_event::PerformSuspension(jvmti_env, env);
 }
 
 extern "C" JNIEXPORT
diff --git a/test/1953-pop-frame/src/art/Breakpoint.java b/test/1953-pop-frame/src/art/Breakpoint.java
deleted file mode 100644
index bbb89f7..0000000
--- a/test/1953-pop-frame/src/art/Breakpoint.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Objects;
-
-public class Breakpoint {
-  public static class Manager {
-    public static class BP {
-      public final Executable method;
-      public final long location;
-
-      public BP(Executable method) {
-        this(method, getStartLocation(method));
-      }
-
-      public BP(Executable method, long location) {
-        this.method = method;
-        this.location = location;
-      }
-
-      @Override
-      public boolean equals(Object other) {
-        return (other instanceof BP) &&
-            method.equals(((BP)other).method) &&
-            location == ((BP)other).location;
-      }
-
-      @Override
-      public String toString() {
-        return method.toString() + " @ " + getLine();
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(method, location);
-      }
-
-      public int getLine() {
-        try {
-          LineNumber[] lines = getLineNumberTable(method);
-          int best = -1;
-          for (LineNumber l : lines) {
-            if (l.location > location) {
-              break;
-            } else {
-              best = l.line;
-            }
-          }
-          return best;
-        } catch (Exception e) {
-          return -1;
-        }
-      }
-    }
-
-    private Set<BP> breaks = new HashSet<>();
-
-    public void setBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.add(b)) {
-          Breakpoint.setBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void setBreakpoint(Executable method, long location) {
-      setBreakpoints(new BP(method, location));
-    }
-
-    public void clearBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.remove(b)) {
-          Breakpoint.clearBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void clearBreakpoint(Executable method, long location) {
-      clearBreakpoints(new BP(method, location));
-    }
-
-    public void clearAllBreakpoints() {
-      clearBreakpoints(breaks.toArray(new BP[0]));
-    }
-  }
-
-  public static void startBreakpointWatch(Class<?> methodClass,
-                                          Executable breakpointReached,
-                                          Thread thr) {
-    startBreakpointWatch(methodClass, breakpointReached, false, thr);
-  }
-
-  /**
-   * Enables the trapping of breakpoint events.
-   *
-   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
-   */
-  public static native void startBreakpointWatch(Class<?> methodClass,
-                                                 Executable breakpointReached,
-                                                 boolean allowRecursive,
-                                                 Thread thr);
-  public static native void stopBreakpointWatch(Thread thr);
-
-  public static final class LineNumber implements Comparable<LineNumber> {
-    public final long location;
-    public final int line;
-
-    private LineNumber(long loc, int line) {
-      this.location = loc;
-      this.line = line;
-    }
-
-    public boolean equals(Object other) {
-      return other instanceof LineNumber && ((LineNumber)other).line == line &&
-          ((LineNumber)other).location == location;
-    }
-
-    public int compareTo(LineNumber other) {
-      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
-      if (v != 0) {
-        return v;
-      } else {
-        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
-      }
-    }
-  }
-
-  public static native void setBreakpoint(Executable m, long loc);
-  public static void setBreakpoint(Executable m, LineNumber l) {
-    setBreakpoint(m, l.location);
-  }
-
-  public static native void clearBreakpoint(Executable m, long loc);
-  public static void clearBreakpoint(Executable m, LineNumber l) {
-    clearBreakpoint(m, l.location);
-  }
-
-  private static native Object[] getLineNumberTableNative(Executable m);
-  public static LineNumber[] getLineNumberTable(Executable m) {
-    Object[] nativeTable = getLineNumberTableNative(m);
-    long[] location = (long[])(nativeTable[0]);
-    int[] lines = (int[])(nativeTable[1]);
-    if (lines.length != location.length) {
-      throw new Error("Lines and locations have different lengths!");
-    }
-    LineNumber[] out = new LineNumber[lines.length];
-    for (int i = 0; i < lines.length; i++) {
-      out[i] = new LineNumber(location[i], lines[i]);
-    }
-    return out;
-  }
-
-  public static native long getStartLocation(Executable m);
-
-  public static int locationToLine(Executable m, long location) {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      int best = -1;
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.location > location) {
-          break;
-        } else {
-          best = l.line;
-        }
-      }
-      return best;
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  public static long lineToLocation(Executable m, int line) throws Exception {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.line == line) {
-          return l.location;
-        }
-      }
-      throw new Exception("Unable to find line " + line + " in " + m);
-    } catch (Exception e) {
-      throw new Exception("Unable to get line number info for " + m, e);
-    }
-  }
-}
-
diff --git a/test/1953-pop-frame/src/art/Breakpoint.java b/test/1953-pop-frame/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1953-pop-frame/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1953-pop-frame/src/art/Redefinition.java b/test/1953-pop-frame/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/1953-pop-frame/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/1953-pop-frame/src/art/Redefinition.java b/test/1953-pop-frame/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1953-pop-frame/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1953-pop-frame/src/art/StackTrace.java b/test/1953-pop-frame/src/art/StackTrace.java
deleted file mode 100644
index 2ea2f20..0000000
--- a/test/1953-pop-frame/src/art/StackTrace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Executable;
-
-public class StackTrace {
-  public static class StackFrameData {
-    public final Thread thr;
-    public final Executable method;
-    public final long current_location;
-    public final int depth;
-
-    public StackFrameData(Thread thr, Executable e, long loc, int depth) {
-      this.thr = thr;
-      this.method = e;
-      this.current_location = loc;
-      this.depth = depth;
-    }
-    @Override
-    public String toString() {
-      return String.format(
-          "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }",
-          this.thr,
-          this.method,
-          this.current_location,
-          this.depth);
-    }
-  }
-
-  public static native int GetStackDepth(Thread thr);
-
-  private static native StackFrameData[] nativeGetStackTrace(Thread thr);
-
-  public static StackFrameData[] GetStackTrace(Thread thr) {
-    // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not
-    // suspended. The spec says that not being suspended is fine but since we want this to be
-    // consistent we will suspend for the RI.
-    boolean suspend_thread =
-        !System.getProperty("java.vm.name").equals("Dalvik") &&
-        !thr.equals(Thread.currentThread()) &&
-        !Suspension.isSuspended(thr);
-    if (suspend_thread) {
-      Suspension.suspend(thr);
-    }
-    StackFrameData[] out = nativeGetStackTrace(thr);
-    if (suspend_thread) {
-      Suspension.resume(thr);
-    }
-    return out;
-  }
-}
-
diff --git a/test/1953-pop-frame/src/art/StackTrace.java b/test/1953-pop-frame/src/art/StackTrace.java
new file mode 120000
index 0000000..e1a08aa
--- /dev/null
+++ b/test/1953-pop-frame/src/art/StackTrace.java
@@ -0,0 +1 @@
+../../../jvmti-common/StackTrace.java
\ No newline at end of file
diff --git a/test/1953-pop-frame/src/art/SuspendEvents.java b/test/1953-pop-frame/src/art/SuspendEvents.java
new file mode 120000
index 0000000..f7a5f7e
--- /dev/null
+++ b/test/1953-pop-frame/src/art/SuspendEvents.java
@@ -0,0 +1 @@
+../../../jvmti-common/SuspendEvents.java
\ No newline at end of file
diff --git a/test/1953-pop-frame/src/art/Suspension.java b/test/1953-pop-frame/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1953-pop-frame/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1953-pop-frame/src/art/Suspension.java b/test/1953-pop-frame/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1953-pop-frame/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1953-pop-frame/src/art/Test1953.java b/test/1953-pop-frame/src/art/Test1953.java
index c0eba8e..ff41d24 100644
--- a/test/1953-pop-frame/src/art/Test1953.java
+++ b/test/1953-pop-frame/src/art/Test1953.java
@@ -24,6 +24,18 @@
 import java.util.concurrent.CountDownLatch;
 import java.util.function.Consumer;
 
+import static art.SuspendEvents.setupTest;
+import static art.SuspendEvents.setupSuspendBreakpointFor;
+import static art.SuspendEvents.clearSuspendBreakpointFor;
+import static art.SuspendEvents.setupSuspendSingleStepAt;
+import static art.SuspendEvents.setupFieldSuspendFor;
+import static art.SuspendEvents.setupSuspendMethodEvent;
+import static art.SuspendEvents.setupSuspendExceptionEvent;
+import static art.SuspendEvents.setupSuspendPopFrameEvent;
+import static art.SuspendEvents.EVENT_TYPE_CLASS_LOAD;
+import static art.SuspendEvents.EVENT_TYPE_CLASS_PREPARE;
+import static art.SuspendEvents.setupSuspendClassEvent;
+
 public class Test1953 {
   public final boolean canRunClassLoadTests;
   public static void doNothing() {}
@@ -46,7 +58,7 @@
   public static TestSuspender makeSuspend(final ThreadRunnable setup, final ThreadRunnable clean) {
     return new TestSuspender() {
       public void setup(Thread thr) { setup.run(thr); }
-      public void waitForSuspend(Thread thr) { Test1953.waitForSuspendHit(thr); }
+      public void waitForSuspend(Thread thr) { SuspendEvents.waitForSuspendHit(thr); }
       public void cleanup(Thread thr) { clean.run(thr); }
     };
   }
@@ -712,7 +724,7 @@
     System.out.println("Test stopped using breakpoint");
     runTestOn(new StandardTestObject(),
         (thr) -> setupSuspendBreakpointFor(calledFunction, loc, thr),
-        Test1953::clearSuspendBreakpointFor);
+        SuspendEvents::clearSuspendBreakpointFor);
 
     final Method syncFunctionCalledFunction =
         SynchronizedFunctionTestObject.class.getDeclaredMethod("calledFunction");
@@ -725,7 +737,7 @@
     System.out.println("Test stopped using breakpoint with declared synchronized function");
     runTestOn(new SynchronizedFunctionTestObject(),
         (thr) -> setupSuspendBreakpointFor(syncFunctionCalledFunction, syncFunctionLoc, thr),
-        Test1953::clearSuspendBreakpointFor);
+        SuspendEvents::clearSuspendBreakpointFor);
 
     final Method syncCalledFunction =
         SynchronizedTestObject.class.getDeclaredMethod("calledFunction");
@@ -737,87 +749,87 @@
     synchronized (lock) {}
     runTestOn(new SynchronizedTestObject(lock),
         (thr) -> setupSuspendBreakpointFor(syncCalledFunction, syncLoc, thr),
-        Test1953::clearSuspendBreakpointFor);
+        SuspendEvents::clearSuspendBreakpointFor);
     synchronized (lock) {}
 
     System.out.println("Test stopped on single step");
     runTestOn(new StandardTestObject(),
         (thr) -> setupSuspendSingleStepAt(calledFunction, loc, thr),
-        Test1953::clearSuspendSingleStepFor);
+        SuspendEvents::clearSuspendSingleStepFor);
 
     final Field target_field = FieldBasedTestObject.class.getDeclaredField("TARGET_FIELD");
     System.out.println("Test stopped on field access");
     runTestOn(new FieldBasedTestObject(),
         (thr) -> setupFieldSuspendFor(FieldBasedTestObject.class, target_field, true, thr),
-        Test1953::clearFieldSuspendFor);
+        SuspendEvents::clearFieldSuspendFor);
 
     System.out.println("Test stopped on field modification");
     runTestOn(new FieldBasedTestObject(),
         (thr) -> setupFieldSuspendFor(FieldBasedTestObject.class, target_field, false, thr),
-        Test1953::clearFieldSuspendFor);
+        SuspendEvents::clearFieldSuspendFor);
 
     System.out.println("Test stopped during Method Exit of doNothing");
     runTestOn(new StandardTestObject(false),
         (thr) -> setupSuspendMethodEvent(doNothingMethod, /*enter*/ false, thr),
-        Test1953::clearSuspendMethodEvent);
+        SuspendEvents::clearSuspendMethodEvent);
 
     // NB We need another test to make sure the MethodEntered event is triggered twice.
     System.out.println("Test stopped during Method Enter of doNothing");
     runTestOn(new StandardTestObject(false),
         (thr) -> setupSuspendMethodEvent(doNothingMethod, /*enter*/ true, thr),
-        Test1953::clearSuspendMethodEvent);
+        SuspendEvents::clearSuspendMethodEvent);
 
     System.out.println("Test stopped during Method Exit of calledFunction");
     runTestOn(new StandardTestObject(false),
         (thr) -> setupSuspendMethodEvent(calledFunction, /*enter*/ false, thr),
-        Test1953::clearSuspendMethodEvent);
+        SuspendEvents::clearSuspendMethodEvent);
 
     System.out.println("Test stopped during Method Enter of calledFunction");
     runTestOn(new StandardTestObject(false),
         (thr) -> setupSuspendMethodEvent(calledFunction, /*enter*/ true, thr),
-        Test1953::clearSuspendMethodEvent);
+        SuspendEvents::clearSuspendMethodEvent);
 
     final Method exceptionOnceCalledMethod =
         ExceptionOnceObject.class.getDeclaredMethod("calledFunction");
     System.out.println("Test stopped during Method Exit due to exception thrown in same function");
     runTestOn(new ExceptionOnceObject(/*throwInSub*/ false),
         (thr) -> setupSuspendMethodEvent(exceptionOnceCalledMethod, /*enter*/ false, thr),
-        Test1953::clearSuspendMethodEvent);
+        SuspendEvents::clearSuspendMethodEvent);
 
     System.out.println("Test stopped during Method Exit due to exception thrown in subroutine");
     runTestOn(new ExceptionOnceObject(/*throwInSub*/ true),
         (thr) -> setupSuspendMethodEvent(exceptionOnceCalledMethod, /*enter*/ false, thr),
-        Test1953::clearSuspendMethodEvent);
+        SuspendEvents::clearSuspendMethodEvent);
 
     System.out.println("Test stopped during notifyFramePop without exception on pop of calledFunction");
     runTestOn(new StandardTestObject(false),
         (thr) -> setupSuspendPopFrameEvent(1, doNothingMethod, thr),
-        Test1953::clearSuspendPopFrameEvent);
+        SuspendEvents::clearSuspendPopFrameEvent);
 
     System.out.println("Test stopped during notifyFramePop without exception on pop of doNothing");
     runTestOn(new StandardTestObject(false),
         (thr) -> setupSuspendPopFrameEvent(0, doNothingMethod, thr),
-        Test1953::clearSuspendPopFrameEvent);
+        SuspendEvents::clearSuspendPopFrameEvent);
 
     final Method exceptionThrowCalledMethod =
         ExceptionThrowTestObject.class.getDeclaredMethod("calledFunction");
     System.out.println("Test stopped during notifyFramePop with exception on pop of calledFunction");
     runTestOn(new ExceptionThrowTestObject(false),
         (thr) -> setupSuspendPopFrameEvent(0, exceptionThrowCalledMethod, thr),
-        Test1953::clearSuspendPopFrameEvent);
+        SuspendEvents::clearSuspendPopFrameEvent);
 
     final Method exceptionCatchThrowMethod =
         ExceptionCatchTestObject.class.getDeclaredMethod("doThrow");
     System.out.println("Test stopped during notifyFramePop with exception on pop of doThrow");
     runTestOn(new ExceptionCatchTestObject(),
         (thr) -> setupSuspendPopFrameEvent(0, exceptionCatchThrowMethod, thr),
-        Test1953::clearSuspendPopFrameEvent);
+        SuspendEvents::clearSuspendPopFrameEvent);
 
     System.out.println("Test stopped during ExceptionCatch event of calledFunction " +
         "(catch in called function, throw in called function)");
     runTestOn(new ExceptionThrowTestObject(true),
         (thr) -> setupSuspendExceptionEvent(exceptionThrowCalledMethod, /*catch*/ true, thr),
-        Test1953::clearSuspendExceptionEvent);
+        SuspendEvents::clearSuspendExceptionEvent);
 
     final Method exceptionCatchCalledMethod =
         ExceptionCatchTestObject.class.getDeclaredMethod("calledFunction");
@@ -825,19 +837,19 @@
         "(catch in called function, throw in subroutine)");
     runTestOn(new ExceptionCatchTestObject(),
         (thr) -> setupSuspendExceptionEvent(exceptionCatchCalledMethod, /*catch*/ true, thr),
-        Test1953::clearSuspendExceptionEvent);
+        SuspendEvents::clearSuspendExceptionEvent);
 
     System.out.println("Test stopped during Exception event of calledFunction " +
         "(catch in calling function)");
     runTestOn(new ExceptionThrowTestObject(false),
         (thr) -> setupSuspendExceptionEvent(exceptionThrowCalledMethod, /*catch*/ false, thr),
-        Test1953::clearSuspendExceptionEvent);
+        SuspendEvents::clearSuspendExceptionEvent);
 
     System.out.println("Test stopped during Exception event of calledFunction " +
         "(catch in called function)");
     runTestOn(new ExceptionThrowTestObject(true),
         (thr) -> setupSuspendExceptionEvent(exceptionThrowCalledMethod, /*catch*/ false, thr),
-        Test1953::clearSuspendExceptionEvent);
+        SuspendEvents::clearSuspendExceptionEvent);
 
     final Method exceptionThrowFarCalledMethod =
         ExceptionThrowFarTestObject.class.getDeclaredMethod("calledFunction");
@@ -845,13 +857,13 @@
         "(catch in parent of calling function)");
     runTestOn(new ExceptionThrowFarTestObject(false),
         (thr) -> setupSuspendExceptionEvent(exceptionThrowFarCalledMethod, /*catch*/ false, thr),
-        Test1953::clearSuspendExceptionEvent);
+        SuspendEvents::clearSuspendExceptionEvent);
 
     System.out.println("Test stopped during Exception event of calledFunction " +
         "(catch in called function)");
     runTestOn(new ExceptionThrowFarTestObject(true),
         (thr) -> setupSuspendExceptionEvent(exceptionThrowFarCalledMethod, /*catch*/ false, thr),
-        Test1953::clearSuspendExceptionEvent);
+        SuspendEvents::clearSuspendExceptionEvent);
 
     // These tests are disabled for either the RI (b/116003018) or for jvmti-stress. For the
     // later it is due to the additional agent causing classes to be loaded earlier as it forces
@@ -864,7 +876,7 @@
       System.out.println("Test stopped during a ClassLoad event.");
       runTestOn(new ClassLoadObject(),
           (thr) -> setupSuspendClassEvent(EVENT_TYPE_CLASS_LOAD, ClassLoadObject.CLASS_NAMES, thr),
-          Test1953::clearSuspendClassEvent);
+          SuspendEvents::clearSuspendClassEvent);
 
       // The RI handles a PopFrame during a ClassPrepare event incorrectly. See b/116003018 for
       // more information.
@@ -873,7 +885,7 @@
           (thr) -> setupSuspendClassEvent(EVENT_TYPE_CLASS_PREPARE,
                                           ClassLoadObject.CLASS_NAMES,
                                           thr),
-          Test1953::clearSuspendClassEvent);
+          SuspendEvents::clearSuspendClassEvent);
     }
     System.out.println("Test stopped during random Suspend.");
     final SuspendSuddenlyObject sso = new SuspendSuddenlyObject();
@@ -906,14 +918,15 @@
 
     System.out.println("Test stopped during a native method fails");
     runTestOn(new NativeCalledObject(),
-        Test1953::setupWaitForNativeCall,
-        Test1953::clearWaitForNativeCall);
+        SuspendEvents::setupWaitForNativeCall,
+        SuspendEvents::clearWaitForNativeCall);
 
     System.out.println("Test stopped in a method called by native fails");
     final Method nativeCallerMethod = NativeCallerObject.class.getDeclaredMethod("calledFunction");
     runTestOn(new NativeCallerObject(),
         (thr) -> setupSuspendMethodEvent(nativeCallerMethod, /*enter*/ false, thr),
-        Test1953::clearSuspendMethodEvent);
+        SuspendEvents::clearSuspendMethodEvent);
+
 
     final Object lock2 = new Object();
     synchronized (lock2) {}
@@ -927,7 +940,7 @@
           }
         },
         (thr) -> setupSuspendBreakpointFor(calledFunction, loc, thr),
-        Test1953::clearSuspendBreakpointFor);
+        SuspendEvents::clearSuspendBreakpointFor);
     synchronized (lock2) {}
   }
 
@@ -961,38 +974,5 @@
   }
 
   public static native boolean isClassLoaded(String name);
-
-  public static native void setupTest();
   public static native void popFrame(Thread thr);
-
-  public static native void setupSuspendBreakpointFor(Executable meth, long loc, Thread thr);
-  public static native void clearSuspendBreakpointFor(Thread thr);
-
-  public static native void setupSuspendSingleStepAt(Executable meth, long loc, Thread thr);
-  public static native void clearSuspendSingleStepFor(Thread thr);
-
-  public static native void setupFieldSuspendFor(Class klass, Field f, boolean access, Thread thr);
-  public static native void clearFieldSuspendFor(Thread thr);
-
-  public static native void setupSuspendMethodEvent(Executable meth, boolean enter, Thread thr);
-  public static native void clearSuspendMethodEvent(Thread thr);
-
-  public static native void setupSuspendExceptionEvent(
-      Executable meth, boolean is_catch, Thread thr);
-  public static native void clearSuspendExceptionEvent(Thread thr);
-
-  public static native void setupSuspendPopFrameEvent(
-      int offset, Executable breakpointFunction, Thread thr);
-  public static native void clearSuspendPopFrameEvent(Thread thr);
-
-  public static final int EVENT_TYPE_CLASS_LOAD = 55;
-  public static final int EVENT_TYPE_CLASS_PREPARE = 56;
-  public static native void setupSuspendClassEvent(
-      int eventType, String[] interestingNames, Thread thr);
-  public static native void clearSuspendClassEvent(Thread thr);
-
-  public static native void setupWaitForNativeCall(Thread thr);
-  public static native void clearWaitForNativeCall(Thread thr);
-
-  public static native void waitForSuspendHit(Thread thr);
 }
diff --git a/test/1954-pop-frame-jit/src/art/Breakpoint.java b/test/1954-pop-frame-jit/src/art/Breakpoint.java
deleted file mode 100644
index bbb89f7..0000000
--- a/test/1954-pop-frame-jit/src/art/Breakpoint.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Objects;
-
-public class Breakpoint {
-  public static class Manager {
-    public static class BP {
-      public final Executable method;
-      public final long location;
-
-      public BP(Executable method) {
-        this(method, getStartLocation(method));
-      }
-
-      public BP(Executable method, long location) {
-        this.method = method;
-        this.location = location;
-      }
-
-      @Override
-      public boolean equals(Object other) {
-        return (other instanceof BP) &&
-            method.equals(((BP)other).method) &&
-            location == ((BP)other).location;
-      }
-
-      @Override
-      public String toString() {
-        return method.toString() + " @ " + getLine();
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(method, location);
-      }
-
-      public int getLine() {
-        try {
-          LineNumber[] lines = getLineNumberTable(method);
-          int best = -1;
-          for (LineNumber l : lines) {
-            if (l.location > location) {
-              break;
-            } else {
-              best = l.line;
-            }
-          }
-          return best;
-        } catch (Exception e) {
-          return -1;
-        }
-      }
-    }
-
-    private Set<BP> breaks = new HashSet<>();
-
-    public void setBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.add(b)) {
-          Breakpoint.setBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void setBreakpoint(Executable method, long location) {
-      setBreakpoints(new BP(method, location));
-    }
-
-    public void clearBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.remove(b)) {
-          Breakpoint.clearBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void clearBreakpoint(Executable method, long location) {
-      clearBreakpoints(new BP(method, location));
-    }
-
-    public void clearAllBreakpoints() {
-      clearBreakpoints(breaks.toArray(new BP[0]));
-    }
-  }
-
-  public static void startBreakpointWatch(Class<?> methodClass,
-                                          Executable breakpointReached,
-                                          Thread thr) {
-    startBreakpointWatch(methodClass, breakpointReached, false, thr);
-  }
-
-  /**
-   * Enables the trapping of breakpoint events.
-   *
-   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
-   */
-  public static native void startBreakpointWatch(Class<?> methodClass,
-                                                 Executable breakpointReached,
-                                                 boolean allowRecursive,
-                                                 Thread thr);
-  public static native void stopBreakpointWatch(Thread thr);
-
-  public static final class LineNumber implements Comparable<LineNumber> {
-    public final long location;
-    public final int line;
-
-    private LineNumber(long loc, int line) {
-      this.location = loc;
-      this.line = line;
-    }
-
-    public boolean equals(Object other) {
-      return other instanceof LineNumber && ((LineNumber)other).line == line &&
-          ((LineNumber)other).location == location;
-    }
-
-    public int compareTo(LineNumber other) {
-      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
-      if (v != 0) {
-        return v;
-      } else {
-        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
-      }
-    }
-  }
-
-  public static native void setBreakpoint(Executable m, long loc);
-  public static void setBreakpoint(Executable m, LineNumber l) {
-    setBreakpoint(m, l.location);
-  }
-
-  public static native void clearBreakpoint(Executable m, long loc);
-  public static void clearBreakpoint(Executable m, LineNumber l) {
-    clearBreakpoint(m, l.location);
-  }
-
-  private static native Object[] getLineNumberTableNative(Executable m);
-  public static LineNumber[] getLineNumberTable(Executable m) {
-    Object[] nativeTable = getLineNumberTableNative(m);
-    long[] location = (long[])(nativeTable[0]);
-    int[] lines = (int[])(nativeTable[1]);
-    if (lines.length != location.length) {
-      throw new Error("Lines and locations have different lengths!");
-    }
-    LineNumber[] out = new LineNumber[lines.length];
-    for (int i = 0; i < lines.length; i++) {
-      out[i] = new LineNumber(location[i], lines[i]);
-    }
-    return out;
-  }
-
-  public static native long getStartLocation(Executable m);
-
-  public static int locationToLine(Executable m, long location) {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      int best = -1;
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.location > location) {
-          break;
-        } else {
-          best = l.line;
-        }
-      }
-      return best;
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  public static long lineToLocation(Executable m, int line) throws Exception {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.line == line) {
-          return l.location;
-        }
-      }
-      throw new Exception("Unable to find line " + line + " in " + m);
-    } catch (Exception e) {
-      throw new Exception("Unable to get line number info for " + m, e);
-    }
-  }
-}
-
diff --git a/test/1954-pop-frame-jit/src/art/Breakpoint.java b/test/1954-pop-frame-jit/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1954-pop-frame-jit/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1954-pop-frame-jit/src/art/Redefinition.java b/test/1954-pop-frame-jit/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/1954-pop-frame-jit/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/1954-pop-frame-jit/src/art/Redefinition.java b/test/1954-pop-frame-jit/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1954-pop-frame-jit/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1954-pop-frame-jit/src/art/StackTrace.java b/test/1954-pop-frame-jit/src/art/StackTrace.java
deleted file mode 100644
index 2ea2f20..0000000
--- a/test/1954-pop-frame-jit/src/art/StackTrace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Executable;
-
-public class StackTrace {
-  public static class StackFrameData {
-    public final Thread thr;
-    public final Executable method;
-    public final long current_location;
-    public final int depth;
-
-    public StackFrameData(Thread thr, Executable e, long loc, int depth) {
-      this.thr = thr;
-      this.method = e;
-      this.current_location = loc;
-      this.depth = depth;
-    }
-    @Override
-    public String toString() {
-      return String.format(
-          "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }",
-          this.thr,
-          this.method,
-          this.current_location,
-          this.depth);
-    }
-  }
-
-  public static native int GetStackDepth(Thread thr);
-
-  private static native StackFrameData[] nativeGetStackTrace(Thread thr);
-
-  public static StackFrameData[] GetStackTrace(Thread thr) {
-    // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not
-    // suspended. The spec says that not being suspended is fine but since we want this to be
-    // consistent we will suspend for the RI.
-    boolean suspend_thread =
-        !System.getProperty("java.vm.name").equals("Dalvik") &&
-        !thr.equals(Thread.currentThread()) &&
-        !Suspension.isSuspended(thr);
-    if (suspend_thread) {
-      Suspension.suspend(thr);
-    }
-    StackFrameData[] out = nativeGetStackTrace(thr);
-    if (suspend_thread) {
-      Suspension.resume(thr);
-    }
-    return out;
-  }
-}
-
diff --git a/test/1954-pop-frame-jit/src/art/StackTrace.java b/test/1954-pop-frame-jit/src/art/StackTrace.java
new file mode 120000
index 0000000..e1a08aa
--- /dev/null
+++ b/test/1954-pop-frame-jit/src/art/StackTrace.java
@@ -0,0 +1 @@
+../../../jvmti-common/StackTrace.java
\ No newline at end of file
diff --git a/test/1954-pop-frame-jit/src/art/SuspendEvents.java b/test/1954-pop-frame-jit/src/art/SuspendEvents.java
new file mode 120000
index 0000000..ee0632b
--- /dev/null
+++ b/test/1954-pop-frame-jit/src/art/SuspendEvents.java
@@ -0,0 +1 @@
+../../../1953-pop-frame/src/art/SuspendEvents.java
\ No newline at end of file
diff --git a/test/1954-pop-frame-jit/src/art/Suspension.java b/test/1954-pop-frame-jit/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1954-pop-frame-jit/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1954-pop-frame-jit/src/art/Suspension.java b/test/1954-pop-frame-jit/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1954-pop-frame-jit/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1955-pop-frame-jit-called/src/art/Breakpoint.java b/test/1955-pop-frame-jit-called/src/art/Breakpoint.java
deleted file mode 100644
index bbb89f7..0000000
--- a/test/1955-pop-frame-jit-called/src/art/Breakpoint.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Objects;
-
-public class Breakpoint {
-  public static class Manager {
-    public static class BP {
-      public final Executable method;
-      public final long location;
-
-      public BP(Executable method) {
-        this(method, getStartLocation(method));
-      }
-
-      public BP(Executable method, long location) {
-        this.method = method;
-        this.location = location;
-      }
-
-      @Override
-      public boolean equals(Object other) {
-        return (other instanceof BP) &&
-            method.equals(((BP)other).method) &&
-            location == ((BP)other).location;
-      }
-
-      @Override
-      public String toString() {
-        return method.toString() + " @ " + getLine();
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(method, location);
-      }
-
-      public int getLine() {
-        try {
-          LineNumber[] lines = getLineNumberTable(method);
-          int best = -1;
-          for (LineNumber l : lines) {
-            if (l.location > location) {
-              break;
-            } else {
-              best = l.line;
-            }
-          }
-          return best;
-        } catch (Exception e) {
-          return -1;
-        }
-      }
-    }
-
-    private Set<BP> breaks = new HashSet<>();
-
-    public void setBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.add(b)) {
-          Breakpoint.setBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void setBreakpoint(Executable method, long location) {
-      setBreakpoints(new BP(method, location));
-    }
-
-    public void clearBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.remove(b)) {
-          Breakpoint.clearBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void clearBreakpoint(Executable method, long location) {
-      clearBreakpoints(new BP(method, location));
-    }
-
-    public void clearAllBreakpoints() {
-      clearBreakpoints(breaks.toArray(new BP[0]));
-    }
-  }
-
-  public static void startBreakpointWatch(Class<?> methodClass,
-                                          Executable breakpointReached,
-                                          Thread thr) {
-    startBreakpointWatch(methodClass, breakpointReached, false, thr);
-  }
-
-  /**
-   * Enables the trapping of breakpoint events.
-   *
-   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
-   */
-  public static native void startBreakpointWatch(Class<?> methodClass,
-                                                 Executable breakpointReached,
-                                                 boolean allowRecursive,
-                                                 Thread thr);
-  public static native void stopBreakpointWatch(Thread thr);
-
-  public static final class LineNumber implements Comparable<LineNumber> {
-    public final long location;
-    public final int line;
-
-    private LineNumber(long loc, int line) {
-      this.location = loc;
-      this.line = line;
-    }
-
-    public boolean equals(Object other) {
-      return other instanceof LineNumber && ((LineNumber)other).line == line &&
-          ((LineNumber)other).location == location;
-    }
-
-    public int compareTo(LineNumber other) {
-      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
-      if (v != 0) {
-        return v;
-      } else {
-        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
-      }
-    }
-  }
-
-  public static native void setBreakpoint(Executable m, long loc);
-  public static void setBreakpoint(Executable m, LineNumber l) {
-    setBreakpoint(m, l.location);
-  }
-
-  public static native void clearBreakpoint(Executable m, long loc);
-  public static void clearBreakpoint(Executable m, LineNumber l) {
-    clearBreakpoint(m, l.location);
-  }
-
-  private static native Object[] getLineNumberTableNative(Executable m);
-  public static LineNumber[] getLineNumberTable(Executable m) {
-    Object[] nativeTable = getLineNumberTableNative(m);
-    long[] location = (long[])(nativeTable[0]);
-    int[] lines = (int[])(nativeTable[1]);
-    if (lines.length != location.length) {
-      throw new Error("Lines and locations have different lengths!");
-    }
-    LineNumber[] out = new LineNumber[lines.length];
-    for (int i = 0; i < lines.length; i++) {
-      out[i] = new LineNumber(location[i], lines[i]);
-    }
-    return out;
-  }
-
-  public static native long getStartLocation(Executable m);
-
-  public static int locationToLine(Executable m, long location) {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      int best = -1;
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.location > location) {
-          break;
-        } else {
-          best = l.line;
-        }
-      }
-      return best;
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  public static long lineToLocation(Executable m, int line) throws Exception {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.line == line) {
-          return l.location;
-        }
-      }
-      throw new Exception("Unable to find line " + line + " in " + m);
-    } catch (Exception e) {
-      throw new Exception("Unable to get line number info for " + m, e);
-    }
-  }
-}
-
diff --git a/test/1955-pop-frame-jit-called/src/art/Breakpoint.java b/test/1955-pop-frame-jit-called/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1955-pop-frame-jit-called/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1955-pop-frame-jit-called/src/art/Redefinition.java b/test/1955-pop-frame-jit-called/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/1955-pop-frame-jit-called/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/1955-pop-frame-jit-called/src/art/Redefinition.java b/test/1955-pop-frame-jit-called/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1955-pop-frame-jit-called/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1955-pop-frame-jit-called/src/art/StackTrace.java b/test/1955-pop-frame-jit-called/src/art/StackTrace.java
deleted file mode 100644
index 2ea2f20..0000000
--- a/test/1955-pop-frame-jit-called/src/art/StackTrace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Executable;
-
-public class StackTrace {
-  public static class StackFrameData {
-    public final Thread thr;
-    public final Executable method;
-    public final long current_location;
-    public final int depth;
-
-    public StackFrameData(Thread thr, Executable e, long loc, int depth) {
-      this.thr = thr;
-      this.method = e;
-      this.current_location = loc;
-      this.depth = depth;
-    }
-    @Override
-    public String toString() {
-      return String.format(
-          "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }",
-          this.thr,
-          this.method,
-          this.current_location,
-          this.depth);
-    }
-  }
-
-  public static native int GetStackDepth(Thread thr);
-
-  private static native StackFrameData[] nativeGetStackTrace(Thread thr);
-
-  public static StackFrameData[] GetStackTrace(Thread thr) {
-    // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not
-    // suspended. The spec says that not being suspended is fine but since we want this to be
-    // consistent we will suspend for the RI.
-    boolean suspend_thread =
-        !System.getProperty("java.vm.name").equals("Dalvik") &&
-        !thr.equals(Thread.currentThread()) &&
-        !Suspension.isSuspended(thr);
-    if (suspend_thread) {
-      Suspension.suspend(thr);
-    }
-    StackFrameData[] out = nativeGetStackTrace(thr);
-    if (suspend_thread) {
-      Suspension.resume(thr);
-    }
-    return out;
-  }
-}
-
diff --git a/test/1955-pop-frame-jit-called/src/art/StackTrace.java b/test/1955-pop-frame-jit-called/src/art/StackTrace.java
new file mode 120000
index 0000000..e1a08aa
--- /dev/null
+++ b/test/1955-pop-frame-jit-called/src/art/StackTrace.java
@@ -0,0 +1 @@
+../../../jvmti-common/StackTrace.java
\ No newline at end of file
diff --git a/test/1955-pop-frame-jit-called/src/art/SuspendEvents.java b/test/1955-pop-frame-jit-called/src/art/SuspendEvents.java
new file mode 120000
index 0000000..ee0632b
--- /dev/null
+++ b/test/1955-pop-frame-jit-called/src/art/SuspendEvents.java
@@ -0,0 +1 @@
+../../../1953-pop-frame/src/art/SuspendEvents.java
\ No newline at end of file
diff --git a/test/1955-pop-frame-jit-called/src/art/Suspension.java b/test/1955-pop-frame-jit-called/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1955-pop-frame-jit-called/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1955-pop-frame-jit-called/src/art/Suspension.java b/test/1955-pop-frame-jit-called/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1955-pop-frame-jit-called/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1956-pop-frame-jit-calling/src/art/Breakpoint.java b/test/1956-pop-frame-jit-calling/src/art/Breakpoint.java
deleted file mode 100644
index bbb89f7..0000000
--- a/test/1956-pop-frame-jit-calling/src/art/Breakpoint.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Objects;
-
-public class Breakpoint {
-  public static class Manager {
-    public static class BP {
-      public final Executable method;
-      public final long location;
-
-      public BP(Executable method) {
-        this(method, getStartLocation(method));
-      }
-
-      public BP(Executable method, long location) {
-        this.method = method;
-        this.location = location;
-      }
-
-      @Override
-      public boolean equals(Object other) {
-        return (other instanceof BP) &&
-            method.equals(((BP)other).method) &&
-            location == ((BP)other).location;
-      }
-
-      @Override
-      public String toString() {
-        return method.toString() + " @ " + getLine();
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(method, location);
-      }
-
-      public int getLine() {
-        try {
-          LineNumber[] lines = getLineNumberTable(method);
-          int best = -1;
-          for (LineNumber l : lines) {
-            if (l.location > location) {
-              break;
-            } else {
-              best = l.line;
-            }
-          }
-          return best;
-        } catch (Exception e) {
-          return -1;
-        }
-      }
-    }
-
-    private Set<BP> breaks = new HashSet<>();
-
-    public void setBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.add(b)) {
-          Breakpoint.setBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void setBreakpoint(Executable method, long location) {
-      setBreakpoints(new BP(method, location));
-    }
-
-    public void clearBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.remove(b)) {
-          Breakpoint.clearBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void clearBreakpoint(Executable method, long location) {
-      clearBreakpoints(new BP(method, location));
-    }
-
-    public void clearAllBreakpoints() {
-      clearBreakpoints(breaks.toArray(new BP[0]));
-    }
-  }
-
-  public static void startBreakpointWatch(Class<?> methodClass,
-                                          Executable breakpointReached,
-                                          Thread thr) {
-    startBreakpointWatch(methodClass, breakpointReached, false, thr);
-  }
-
-  /**
-   * Enables the trapping of breakpoint events.
-   *
-   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
-   */
-  public static native void startBreakpointWatch(Class<?> methodClass,
-                                                 Executable breakpointReached,
-                                                 boolean allowRecursive,
-                                                 Thread thr);
-  public static native void stopBreakpointWatch(Thread thr);
-
-  public static final class LineNumber implements Comparable<LineNumber> {
-    public final long location;
-    public final int line;
-
-    private LineNumber(long loc, int line) {
-      this.location = loc;
-      this.line = line;
-    }
-
-    public boolean equals(Object other) {
-      return other instanceof LineNumber && ((LineNumber)other).line == line &&
-          ((LineNumber)other).location == location;
-    }
-
-    public int compareTo(LineNumber other) {
-      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
-      if (v != 0) {
-        return v;
-      } else {
-        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
-      }
-    }
-  }
-
-  public static native void setBreakpoint(Executable m, long loc);
-  public static void setBreakpoint(Executable m, LineNumber l) {
-    setBreakpoint(m, l.location);
-  }
-
-  public static native void clearBreakpoint(Executable m, long loc);
-  public static void clearBreakpoint(Executable m, LineNumber l) {
-    clearBreakpoint(m, l.location);
-  }
-
-  private static native Object[] getLineNumberTableNative(Executable m);
-  public static LineNumber[] getLineNumberTable(Executable m) {
-    Object[] nativeTable = getLineNumberTableNative(m);
-    long[] location = (long[])(nativeTable[0]);
-    int[] lines = (int[])(nativeTable[1]);
-    if (lines.length != location.length) {
-      throw new Error("Lines and locations have different lengths!");
-    }
-    LineNumber[] out = new LineNumber[lines.length];
-    for (int i = 0; i < lines.length; i++) {
-      out[i] = new LineNumber(location[i], lines[i]);
-    }
-    return out;
-  }
-
-  public static native long getStartLocation(Executable m);
-
-  public static int locationToLine(Executable m, long location) {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      int best = -1;
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.location > location) {
-          break;
-        } else {
-          best = l.line;
-        }
-      }
-      return best;
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  public static long lineToLocation(Executable m, int line) throws Exception {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.line == line) {
-          return l.location;
-        }
-      }
-      throw new Exception("Unable to find line " + line + " in " + m);
-    } catch (Exception e) {
-      throw new Exception("Unable to get line number info for " + m, e);
-    }
-  }
-}
-
diff --git a/test/1956-pop-frame-jit-calling/src/art/Breakpoint.java b/test/1956-pop-frame-jit-calling/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1956-pop-frame-jit-calling/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1956-pop-frame-jit-calling/src/art/Redefinition.java b/test/1956-pop-frame-jit-calling/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/1956-pop-frame-jit-calling/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/1956-pop-frame-jit-calling/src/art/Redefinition.java b/test/1956-pop-frame-jit-calling/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1956-pop-frame-jit-calling/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1956-pop-frame-jit-calling/src/art/StackTrace.java b/test/1956-pop-frame-jit-calling/src/art/StackTrace.java
deleted file mode 100644
index 2ea2f20..0000000
--- a/test/1956-pop-frame-jit-calling/src/art/StackTrace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Executable;
-
-public class StackTrace {
-  public static class StackFrameData {
-    public final Thread thr;
-    public final Executable method;
-    public final long current_location;
-    public final int depth;
-
-    public StackFrameData(Thread thr, Executable e, long loc, int depth) {
-      this.thr = thr;
-      this.method = e;
-      this.current_location = loc;
-      this.depth = depth;
-    }
-    @Override
-    public String toString() {
-      return String.format(
-          "StackFrameData { thr: '%s', method: '%s', loc: %d, depth: %d }",
-          this.thr,
-          this.method,
-          this.current_location,
-          this.depth);
-    }
-  }
-
-  public static native int GetStackDepth(Thread thr);
-
-  private static native StackFrameData[] nativeGetStackTrace(Thread thr);
-
-  public static StackFrameData[] GetStackTrace(Thread thr) {
-    // The RI seems to give inconsistent (and sometimes nonsensical) results if the thread is not
-    // suspended. The spec says that not being suspended is fine but since we want this to be
-    // consistent we will suspend for the RI.
-    boolean suspend_thread =
-        !System.getProperty("java.vm.name").equals("Dalvik") &&
-        !thr.equals(Thread.currentThread()) &&
-        !Suspension.isSuspended(thr);
-    if (suspend_thread) {
-      Suspension.suspend(thr);
-    }
-    StackFrameData[] out = nativeGetStackTrace(thr);
-    if (suspend_thread) {
-      Suspension.resume(thr);
-    }
-    return out;
-  }
-}
-
diff --git a/test/1956-pop-frame-jit-calling/src/art/StackTrace.java b/test/1956-pop-frame-jit-calling/src/art/StackTrace.java
new file mode 120000
index 0000000..e1a08aa
--- /dev/null
+++ b/test/1956-pop-frame-jit-calling/src/art/StackTrace.java
@@ -0,0 +1 @@
+../../../jvmti-common/StackTrace.java
\ No newline at end of file
diff --git a/test/1956-pop-frame-jit-calling/src/art/SuspendEvents.java b/test/1956-pop-frame-jit-calling/src/art/SuspendEvents.java
new file mode 120000
index 0000000..ee0632b
--- /dev/null
+++ b/test/1956-pop-frame-jit-calling/src/art/SuspendEvents.java
@@ -0,0 +1 @@
+../../../1953-pop-frame/src/art/SuspendEvents.java
\ No newline at end of file
diff --git a/test/1956-pop-frame-jit-calling/src/art/Suspension.java b/test/1956-pop-frame-jit-calling/src/art/Suspension.java
deleted file mode 100644
index 16e62cc..0000000
--- a/test/1956-pop-frame-jit-calling/src/art/Suspension.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-public class Suspension {
-  // Suspends a thread using jvmti.
-  public native static void suspend(Thread thr);
-
-  // Resumes a thread using jvmti.
-  public native static void resume(Thread thr);
-
-  public native static boolean isSuspended(Thread thr);
-
-  public native static int[] suspendList(Thread... threads);
-  public native static int[] resumeList(Thread... threads);
-}
diff --git a/test/1956-pop-frame-jit-calling/src/art/Suspension.java b/test/1956-pop-frame-jit-calling/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1956-pop-frame-jit-calling/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1957-error-ext/src/art/Redefinition.java b/test/1957-error-ext/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/1957-error-ext/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/1957-error-ext/src/art/Redefinition.java b/test/1957-error-ext/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1957-error-ext/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1958-transform-try-jit/src/art/Redefinition.java b/test/1958-transform-try-jit/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/1958-transform-try-jit/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/1958-transform-try-jit/src/art/Redefinition.java b/test/1958-transform-try-jit/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1958-transform-try-jit/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1959-redefine-object-instrument/src/art/Breakpoint.java b/test/1959-redefine-object-instrument/src/art/Breakpoint.java
deleted file mode 100644
index bbb89f7..0000000
--- a/test/1959-redefine-object-instrument/src/art/Breakpoint.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Objects;
-
-public class Breakpoint {
-  public static class Manager {
-    public static class BP {
-      public final Executable method;
-      public final long location;
-
-      public BP(Executable method) {
-        this(method, getStartLocation(method));
-      }
-
-      public BP(Executable method, long location) {
-        this.method = method;
-        this.location = location;
-      }
-
-      @Override
-      public boolean equals(Object other) {
-        return (other instanceof BP) &&
-            method.equals(((BP)other).method) &&
-            location == ((BP)other).location;
-      }
-
-      @Override
-      public String toString() {
-        return method.toString() + " @ " + getLine();
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(method, location);
-      }
-
-      public int getLine() {
-        try {
-          LineNumber[] lines = getLineNumberTable(method);
-          int best = -1;
-          for (LineNumber l : lines) {
-            if (l.location > location) {
-              break;
-            } else {
-              best = l.line;
-            }
-          }
-          return best;
-        } catch (Exception e) {
-          return -1;
-        }
-      }
-    }
-
-    private Set<BP> breaks = new HashSet<>();
-
-    public void setBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.add(b)) {
-          Breakpoint.setBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void setBreakpoint(Executable method, long location) {
-      setBreakpoints(new BP(method, location));
-    }
-
-    public void clearBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.remove(b)) {
-          Breakpoint.clearBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void clearBreakpoint(Executable method, long location) {
-      clearBreakpoints(new BP(method, location));
-    }
-
-    public void clearAllBreakpoints() {
-      clearBreakpoints(breaks.toArray(new BP[0]));
-    }
-  }
-
-  public static void startBreakpointWatch(Class<?> methodClass,
-                                          Executable breakpointReached,
-                                          Thread thr) {
-    startBreakpointWatch(methodClass, breakpointReached, false, thr);
-  }
-
-  /**
-   * Enables the trapping of breakpoint events.
-   *
-   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
-   */
-  public static native void startBreakpointWatch(Class<?> methodClass,
-                                                 Executable breakpointReached,
-                                                 boolean allowRecursive,
-                                                 Thread thr);
-  public static native void stopBreakpointWatch(Thread thr);
-
-  public static final class LineNumber implements Comparable<LineNumber> {
-    public final long location;
-    public final int line;
-
-    private LineNumber(long loc, int line) {
-      this.location = loc;
-      this.line = line;
-    }
-
-    public boolean equals(Object other) {
-      return other instanceof LineNumber && ((LineNumber)other).line == line &&
-          ((LineNumber)other).location == location;
-    }
-
-    public int compareTo(LineNumber other) {
-      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
-      if (v != 0) {
-        return v;
-      } else {
-        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
-      }
-    }
-  }
-
-  public static native void setBreakpoint(Executable m, long loc);
-  public static void setBreakpoint(Executable m, LineNumber l) {
-    setBreakpoint(m, l.location);
-  }
-
-  public static native void clearBreakpoint(Executable m, long loc);
-  public static void clearBreakpoint(Executable m, LineNumber l) {
-    clearBreakpoint(m, l.location);
-  }
-
-  private static native Object[] getLineNumberTableNative(Executable m);
-  public static LineNumber[] getLineNumberTable(Executable m) {
-    Object[] nativeTable = getLineNumberTableNative(m);
-    long[] location = (long[])(nativeTable[0]);
-    int[] lines = (int[])(nativeTable[1]);
-    if (lines.length != location.length) {
-      throw new Error("Lines and locations have different lengths!");
-    }
-    LineNumber[] out = new LineNumber[lines.length];
-    for (int i = 0; i < lines.length; i++) {
-      out[i] = new LineNumber(location[i], lines[i]);
-    }
-    return out;
-  }
-
-  public static native long getStartLocation(Executable m);
-
-  public static int locationToLine(Executable m, long location) {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      int best = -1;
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.location > location) {
-          break;
-        } else {
-          best = l.line;
-        }
-      }
-      return best;
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  public static long lineToLocation(Executable m, int line) throws Exception {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.line == line) {
-          return l.location;
-        }
-      }
-      throw new Exception("Unable to find line " + line + " in " + m);
-    } catch (Exception e) {
-      throw new Exception("Unable to get line number info for " + m, e);
-    }
-  }
-}
-
diff --git a/test/1959-redefine-object-instrument/src/art/Breakpoint.java b/test/1959-redefine-object-instrument/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1959-redefine-object-instrument/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/640-checker-byte-simd/expected.txt b/test/1960-checker-bounds-codegen/expected.txt
similarity index 100%
copy from test/640-checker-byte-simd/expected.txt
copy to test/1960-checker-bounds-codegen/expected.txt
diff --git a/test/1960-checker-bounds-codegen/info.txt b/test/1960-checker-bounds-codegen/info.txt
new file mode 100644
index 0000000..1a4f84f
--- /dev/null
+++ b/test/1960-checker-bounds-codegen/info.txt
@@ -0,0 +1 @@
+Test code generation for BoundsCheck.
diff --git a/test/1960-checker-bounds-codegen/src/Main.java b/test/1960-checker-bounds-codegen/src/Main.java
new file mode 100644
index 0000000..a84d67f
--- /dev/null
+++ b/test/1960-checker-bounds-codegen/src/Main.java
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Test code generation for BoundsCheck.
+ */
+public class Main {
+  // Constant index, variable length.
+  /// CHECK-START-ARM64: int Main.constantIndex(int[]) disassembly (after)
+  /// CHECK:                     BoundsCheck
+  /// CHECK:                     cmp {{w\d+}}, #0x0
+  /// CHECK:                     b.ls #+0x{{[0-9a-f]+}} (addr 0x<<SLOW:[0-9a-f]+>>)
+  /// CHECK:                     BoundsCheckSlowPathARM64
+  /// CHECK-NEXT:                0x{{0*}}<<SLOW>>:
+  /// CHECK-START-ARM: int Main.constantIndex(int[]) disassembly (after)
+  /// CHECK:                     BoundsCheck
+  /// CHECK:                     cmp {{r\d+}}, #0
+  /// CHECK:                     bls.w <<SLOW:0x[0-9a-f]+>>
+  /// CHECK:                     BoundsCheckSlowPathARMVIXL
+  /// CHECK-NEXT:                <<SLOW>>:
+  public static int constantIndex(int[] a) {
+    try {
+      a[0] = 42;
+    } catch (ArrayIndexOutOfBoundsException expected) {
+      return -1;
+    }
+    return a.length;
+  }
+
+  // Constant length, variable index.
+  /// CHECK-START-ARM64: int Main.constantLength(int) disassembly (after)
+  /// CHECK:                     BoundsCheck
+  /// CHECK:                     cmp {{w\d+}}, #0xa
+  /// CHECK:                     b.hs #+0x{{[0-9a-f]+}} (addr 0x<<SLOW:[0-9a-f]+>>)
+  /// CHECK:                     BoundsCheckSlowPathARM64
+  /// CHECK-NEXT:                0x{{0*}}<<SLOW>>:
+  /// CHECK-START-ARM: int Main.constantLength(int) disassembly (after)
+  /// CHECK:                     BoundsCheck
+  /// CHECK:                     cmp {{r\d+}}, #10
+  /// CHECK:                     bcs.w <<SLOW:0x[0-9a-f]+>>
+  /// CHECK:                     BoundsCheckSlowPathARMVIXL
+  /// CHECK-NEXT:                <<SLOW>>:
+  public static int constantLength(int index) {
+    int[] a = new int[10];
+    try {
+      a[index] = 1;
+    } catch (ArrayIndexOutOfBoundsException expected) {
+      return -1;
+    }
+    return index;
+  }
+
+  // Constant index and length, out of bounds access. Check that we only have
+  // the slow path.
+  /// CHECK-START-ARM64: int Main.constantIndexAndLength() disassembly (after)
+  /// CHECK:                     BoundsCheck
+  /// CHECK-NOT:                 cmp
+  /// CHECK:                     b #+0x{{[0-9a-f]+}} (addr 0x<<SLOW:[0-9a-f]+>>)
+  /// CHECK:                     BoundsCheckSlowPathARM64
+  /// CHECK-NEXT:                0x{{0*}}<<SLOW>>:
+  /// CHECK-START-ARM: int Main.constantIndexAndLength() disassembly (after)
+  /// CHECK:                     BoundsCheck
+  /// CHECK-NOT:                 cmp
+  /// CHECK:                     b <<SLOW:0x[0-9a-f]+>>
+  /// CHECK:                     BoundsCheckSlowPathARMVIXL
+  /// CHECK-NEXT:                <<SLOW>>:
+  public static int constantIndexAndLength() {
+    try {
+      int[] a = new int[5];
+      a[10] = 42;
+    } catch (ArrayIndexOutOfBoundsException expected) {
+      return -1;
+    }
+    return 0;
+  }
+
+  public static void main(String[] args) {
+    int[] a = new int[10];
+    int[] b = new int[0];
+    expectEquals(a.length, constantIndex(a));
+    expectEquals(-1, constantIndex(b));
+    expectEquals(0, constantLength(0));
+    expectEquals(9, constantLength(9));
+    expectEquals(-1, constantLength(10));
+    expectEquals(-1, constantLength(-2));
+    expectEquals(-1, constantIndexAndLength());
+    System.out.println("passed");
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}
diff --git a/test/1960-obsolete-jit-multithread-native/src/Main.java b/test/1960-obsolete-jit-multithread-native/src/Main.java
index 1ae5fb7..dbe11a5 100644
--- a/test/1960-obsolete-jit-multithread-native/src/Main.java
+++ b/test/1960-obsolete-jit-multithread-native/src/Main.java
@@ -31,7 +31,7 @@
   // class Transform {
   //   public native void nativeSayHi(Consumer<Consumer<String>> r, Consumer<String> rep);
   //   public void sayHi(Consumer<Consumer<String>> r, Consumer<String> reporter) {
-  //    reporter.accept("goodbye - Start method sayHi");
+  //     reporter.accept("goodbye - Start method sayHi");
   //     r.accept(reporter);
   //     reporter.accept("goodbye - End method sayHi");
   //   }
@@ -156,8 +156,6 @@
     }
 
     public void run() {
-      // Figure out if we can even JIT at all.
-      final boolean has_jit = hasJit();
       try {
         this.arrivalLatch.await();
         maybePrint("REDEFINITION THREAD: redefining something!");
@@ -201,7 +199,5 @@
     }
   }
 
-  private static native boolean hasJit();
-
   private static native void ensureJitCompiled(Class c, String name);
 }
diff --git a/test/1960-obsolete-jit-multithread-native/src/art/Redefinition.java b/test/1960-obsolete-jit-multithread-native/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/1960-obsolete-jit-multithread-native/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/1960-obsolete-jit-multithread-native/src/art/Redefinition.java b/test/1960-obsolete-jit-multithread-native/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1960-obsolete-jit-multithread-native/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1961-checker-loop-vectorizer/expected.txt b/test/1961-checker-loop-vectorizer/expected.txt
new file mode 100644
index 0000000..e25e266
--- /dev/null
+++ b/test/1961-checker-loop-vectorizer/expected.txt
@@ -0,0 +1,3 @@
+DivZeroCheck
+CheckCast
+BoundsCheck
diff --git a/test/1961-checker-loop-vectorizer/info.txt b/test/1961-checker-loop-vectorizer/info.txt
new file mode 100644
index 0000000..ab768d9
--- /dev/null
+++ b/test/1961-checker-loop-vectorizer/info.txt
@@ -0,0 +1 @@
+Test loop vectorizer corner cases.
diff --git a/test/1961-checker-loop-vectorizer/src/Main.java b/test/1961-checker-loop-vectorizer/src/Main.java
new file mode 100644
index 0000000..4d521a1
--- /dev/null
+++ b/test/1961-checker-loop-vectorizer/src/Main.java
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Test corner cases for loop vectorizer.
+ */
+public class Main {
+  /// CHECK-START: void Main.$noinline$testDivZeroCheck() loop_optimization (before)
+  /// CHECK: DivZeroCheck
+  /// CHECK-NOT: DivZeroCheck
+  /// CHECK-START: void Main.$noinline$testDivZeroCheck() loop_optimization (after)
+  /// CHECK: DivZeroCheck
+  public static void $noinline$testDivZeroCheck() {
+    int[] a = new int[10];
+    for (int i = 0; i < a.length; ++i) {
+      int x = 42 / 0;  // unused but throwing
+      a[i] = 42;
+    }
+  }
+
+  static class Base {}
+  static class Foo extends Base {}
+  static class Bar extends Base {}
+
+  /// CHECK-START: void Main.$noinline$testCheckCast() loop_optimization (before)
+  /// CHECK: CheckCast
+  /// CHECK-NOT: CheckCast
+  /// CHECK-START: void Main.$noinline$testCheckCast() loop_optimization (after)
+  /// CHECK: CheckCast
+  public static void $noinline$testCheckCast() {
+    Base base = new Foo();
+    int[] a = new int[10];
+    for (int i = 0; i < a.length; ++i) {
+      Bar bar = (Bar) base;  // unused but throwing
+      a[i] = 42;
+    }
+  }
+
+  /// CHECK-START: void Main.$noinline$testBoundsCheck() loop_optimization (before)
+  /// CHECK: BoundsCheck
+  /// CHECK-NOT: BoundsCheck
+  /// CHECK-START: void Main.$noinline$testBoundsCheck() loop_optimization (after)
+  /// CHECK: BoundsCheck
+  public static void $noinline$testBoundsCheck() {
+    int[] a = new int[10];
+    for (int i = 0; i < a.length; ++i) {
+      int x = a[11];  // unused but throwing
+      a[i] = 42;
+    }
+  }
+
+  public static void main(String[] args) {
+    // We must not optimize any of the exceptions away.
+    try {
+      $noinline$testDivZeroCheck();
+    } catch (java.lang.ArithmeticException e) {
+      System.out.println("DivZeroCheck");
+    }
+    try {
+      $noinline$testCheckCast();
+    } catch (java.lang.ClassCastException e) {
+      System.out.println("CheckCast");
+    }
+    try {
+      $noinline$testBoundsCheck();
+    } catch (java.lang.ArrayIndexOutOfBoundsException e) {
+      System.out.println("BoundsCheck");
+    }
+  }
+}
diff --git a/test/1961-obsolete-jit-multithread/src/Main.java b/test/1961-obsolete-jit-multithread/src/Main.java
index 81bb936..390a9de 100644
--- a/test/1961-obsolete-jit-multithread/src/Main.java
+++ b/test/1961-obsolete-jit-multithread/src/Main.java
@@ -30,7 +30,7 @@
   //
   // class Transform {
   //   public void sayHi(Consumer<Consumer<String>> r, Consumer<String> reporter) {
-  //    reporter.accept("goodbye - Start method sayHi");
+  //     reporter.accept("goodbye - Start method sayHi");
   //     r.accept(reporter);
   //     reporter.accept("goodbye - End method sayHi");
   //   }
@@ -154,8 +154,6 @@
     }
 
     public void run() {
-      // Figure out if we can even JIT at all.
-      final boolean has_jit = hasJit();
       try {
         this.arrivalLatch.await();
         maybePrint("REDEFINITION THREAD: redefining something!");
@@ -199,7 +197,5 @@
     }
   }
 
-  private static native boolean hasJit();
-
   private static native void ensureJitCompiled(Class c, String name);
 }
diff --git a/test/1961-obsolete-jit-multithread/src/art/Redefinition.java b/test/1961-obsolete-jit-multithread/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/1961-obsolete-jit-multithread/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/1961-obsolete-jit-multithread/src/art/Redefinition.java b/test/1961-obsolete-jit-multithread/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1961-obsolete-jit-multithread/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1963-add-to-dex-classloader-in-memory/add_to_loader.cc b/test/1963-add-to-dex-classloader-in-memory/add_to_loader.cc
new file mode 100644
index 0000000..1c3f36d
--- /dev/null
+++ b/test/1963-add-to-dex-classloader-in-memory/add_to_loader.cc
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <atomic>
+
+#include "jvmti.h"
+
+// Test infrastructure
+#include "jvmti_helper.h"
+#include "scoped_local_ref.h"
+#include "test_env.h"
+
+namespace art {
+namespace Test1963AddToDexClassLoaderInMemory {
+
+using AddToDexClassLoaderInMemory = jvmtiError (*)(jvmtiEnv* env,
+                                                   jobject loader,
+                                                   const unsigned char* dex_file,
+                                                   jint dex_file_length);
+
+template <typename T> static void Dealloc(T* t) {
+  jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(t));
+}
+
+template <typename T, typename... Rest> static void Dealloc(T* t, Rest... rs) {
+  Dealloc(t);
+  Dealloc(rs...);
+}
+static void DeallocParams(jvmtiParamInfo* params, jint n_params) {
+  for (jint i = 0; i < n_params; i++) {
+    Dealloc(params[i].name);
+  }
+}
+
+AddToDexClassLoaderInMemory GetAddFunction(JNIEnv* env) {
+  // Get the extensions.
+  jint n_ext = 0;
+  jvmtiExtensionFunctionInfo* infos = nullptr;
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->GetExtensionFunctions(&n_ext, &infos))) {
+    return nullptr;
+  }
+  AddToDexClassLoaderInMemory result = nullptr;
+  for (jint i = 0; i < n_ext; i++) {
+    jvmtiExtensionFunctionInfo* cur_info = &infos[i];
+    if (strcmp("com.android.art.classloader.add_to_dex_class_loader_in_memory", cur_info->id) ==
+        0) {
+      result = reinterpret_cast<AddToDexClassLoaderInMemory>(cur_info->func);
+    }
+    // Cleanup the cur_info
+    DeallocParams(cur_info->params, cur_info->param_count);
+    Dealloc(cur_info->id, cur_info->short_description, cur_info->params, cur_info->errors);
+  }
+  // Cleanup the array.
+  Dealloc(infos);
+  return result;
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Test1963_addToClassLoaderNative(JNIEnv* env,
+                                                                           jclass,
+                                                                           jobject loader,
+                                                                           jobject bytebuffer) {
+  AddToDexClassLoaderInMemory add_func = GetAddFunction(env);
+  if (add_func == nullptr) {
+    env->ThrowNew(env->FindClass("java/lang/RuntimeError"), "Failed to find extension function");
+    return;
+  }
+  JvmtiErrorToException(
+      env,
+      jvmti_env,
+      add_func(jvmti_env,
+               loader,
+               reinterpret_cast<unsigned char*>(env->GetDirectBufferAddress(bytebuffer)),
+               env->GetDirectBufferCapacity(bytebuffer)));
+}
+
+}  // namespace Test1963AddToDexClassLoaderInMemory
+}  // namespace art
diff --git a/test/1963-add-to-dex-classloader-in-memory/check b/test/1963-add-to-dex-classloader-in-memory/check
new file mode 100755
index 0000000..ae1ebf3
--- /dev/null
+++ b/test/1963-add-to-dex-classloader-in-memory/check
@@ -0,0 +1,26 @@
+#!/bin/bash
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Some of our test devices are so old that they don't have memfd_create and are setup in such a way
+# that tmpfile() doesn't work. In these cases this test cannot complete successfully.
+
+if grep -q  -- '---NO memfd_create---' $@; then
+  echo "The test device doesn't have memfd_create. Cannot verify test!" >&2
+  exit 0
+fi
+
+
+./default-check "$@"
diff --git a/test/1963-add-to-dex-classloader-in-memory/check_memfd_create.cc b/test/1963-add-to-dex-classloader-in-memory/check_memfd_create.cc
new file mode 100644
index 0000000..70a64d7
--- /dev/null
+++ b/test/1963-add-to-dex-classloader-in-memory/check_memfd_create.cc
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <string>
+#include <iostream>
+#include <sstream>
+
+#include "jvmti.h"
+
+#include "base/logging.h"
+#include "base/globals.h"
+#include "base/memfd.h"
+
+#ifdef __linux__
+#include <sys/utsname.h>
+#endif
+
+namespace art {
+namespace Test1963AddToDexClassLoaderInMemory {
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasWorkingMemfdCreate(JNIEnv*, jclass) {
+  // We should always have a working version if we're on normal buildbots.
+  if (!art::kIsTargetBuild) {
+    return true;
+  }
+#ifdef __linux__
+  struct utsname name;
+  if (uname(&name) >= 0) {
+    std::istringstream version(name.release);
+    std::string major_str;
+    std::string minor_str;
+    std::getline(version, major_str, '.');
+    std::getline(version, minor_str, '.');
+    int major = std::stoi(major_str);
+    int minor = std::stoi(minor_str);
+    if (major >= 4 || (major == 3 && minor >= 17)) {
+      // memfd_create syscall was added in 3.17
+      return true;
+    }
+  }
+#endif
+  int res = memfd_create_compat("TEST THAT MEMFD CREATE WORKS", 0);
+  if (res < 0) {
+    PLOG(ERROR) << "Unable to call memfd_create_compat successfully!";
+    return false;
+  } else {
+    close(res);
+    return true;
+  }
+}
+
+}  // namespace Test1963AddToDexClassLoaderInMemory
+}  // namespace art
diff --git a/test/1963-add-to-dex-classloader-in-memory/expected.txt b/test/1963-add-to-dex-classloader-in-memory/expected.txt
new file mode 100644
index 0000000..c3cc448
--- /dev/null
+++ b/test/1963-add-to-dex-classloader-in-memory/expected.txt
@@ -0,0 +1,19 @@
+ - Run while adding new referenced class.
+ -- Running sayHi before redefinition
+Hello from TestClass sayHi function
+Goodbye from TestClass!
+ -- Adding NewClass to classloader!
+ -- Redefine the TestClass
+ -- call TestClass again, now with NewClass refs
+Hello again from TestClass sayHi function
+Hello from NewClass sayHi function
+Goodbye again from TestClass!
+ - Run without adding new referenced class.
+ -- Running sayHi before redefinition
+Hello from TestClass sayHi function
+Goodbye from TestClass!
+ -- Redefine the TestClass
+ -- call TestClass again, now with NewClass refs
+Hello again from TestClass sayHi function
+ -- Exception caught when running test without new class added! java.lang.NoClassDefFoundError
+ --- java.lang.NoClassDefFoundError At foobar.TestClass.sayHi(TestClass.java:5)
diff --git a/test/1963-add-to-dex-classloader-in-memory/info.txt b/test/1963-add-to-dex-classloader-in-memory/info.txt
new file mode 100644
index 0000000..48df982
--- /dev/null
+++ b/test/1963-add-to-dex-classloader-in-memory/info.txt
@@ -0,0 +1 @@
+Tests we can add dex-file buffers to an existing classloader and the old classes can see them.
\ No newline at end of file
diff --git a/test/1963-add-to-dex-classloader-in-memory/run b/test/1963-add-to-dex-classloader-in-memory/run
new file mode 100755
index 0000000..c6e62ae
--- /dev/null
+++ b/test/1963-add-to-dex-classloader-in-memory/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/1963-add-to-dex-classloader-in-memory/src/Main.java b/test/1963-add-to-dex-classloader-in-memory/src/Main.java
new file mode 100644
index 0000000..1825e4f
--- /dev/null
+++ b/test/1963-add-to-dex-classloader-in-memory/src/Main.java
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    try {
+      if (!hasWorkingMemfdCreate()) {
+        System.out.println("---NO memfd_create---");
+      }
+      art.Test1963.run();
+    } catch (Throwable t) {
+      System.out.println(t);
+      t.printStackTrace(System.out);
+      return;
+    }
+  }
+
+  public static native boolean hasWorkingMemfdCreate();
+}
diff --git a/test/1963-add-to-dex-classloader-in-memory/src/art/Redefinition.java b/test/1963-add-to-dex-classloader-in-memory/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1963-add-to-dex-classloader-in-memory/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1963-add-to-dex-classloader-in-memory/src/art/Test1963.java b/test/1963-add-to-dex-classloader-in-memory/src/art/Test1963.java
new file mode 100644
index 0000000..ecefa6d
--- /dev/null
+++ b/test/1963-add-to-dex-classloader-in-memory/src/art/Test1963.java
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Base64;
+
+public final class Test1963 {
+  private static boolean IS_ART = System.getProperty("java.vm.name").equals("Dalvik");
+
+  private static String TEST_CLASS_NAME = "foobar.TestClass";
+  private static String NEW_CLASS_NAME = "foobar.NewClass";
+
+  /**
+   * base64 encoded class/dex file for
+   * package foobar;
+   * public class NewClass {
+   *   static void sayHi() {
+   *    System.out.println("Hello from NewClass sayHi function");
+   *    TestClass.sayBye();
+   *   }
+   * }
+   */
+  private static byte[] NEW_CLASS_BYTES = Base64.getDecoder().decode(
+      "yv66vgAAADUAIQoABwAPCQAQABEIABIKABMAFAoAFQAWBwAXBwAYAQAGPGluaXQ+AQADKClWAQAE"
+      + "Q29kZQEAD0xpbmVOdW1iZXJUYWJsZQEABXNheUhpAQAKU291cmNlRmlsZQEADU5ld0NsYXNzLmph"
+      + "dmEMAAgACQcAGQwAGgAbAQAiSGVsbG8gZnJvbSBOZXdDbGFzcyBzYXlIaSBmdW5jdGlvbgcAHAwA"
+      + "HQAeBwAfDAAgAAkBAA9mb29iYXIvTmV3Q2xhc3MBABBqYXZhL2xhbmcvT2JqZWN0AQAQamF2YS9s"
+      + "YW5nL1N5c3RlbQEAA291dAEAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwEAE2phdmEvaW8vUHJpbnRT"
+      + "dHJlYW0BAAdwcmludGxuAQAVKExqYXZhL2xhbmcvU3RyaW5nOylWAQAQZm9vYmFyL1Rlc3RDbGFz"
+      + "cwEABnNheUJ5ZQAhAAYABwAAAAAAAgABAAgACQABAAoAAAAdAAEAAQAAAAUqtwABsQAAAAEACwAA"
+      + "AAYAAQAAAAIACAAMAAkAAQAKAAAALAACAAAAAAAMsgACEgO2AAS4AAWxAAAAAQALAAAADgADAAAA"
+      + "BAAIAAUACwAGAAEADQAAAAIADg==");
+  private static byte[] NEW_DEX_BYTES = Base64.getDecoder().decode(
+      "ZGV4CjAzNQA8kzH5IALCWT88v716WlU7OfqukCT2o6WQAwAAcAAAAHhWNBIAAAAAAAAAAOQCAAAQ"
+      + "AAAAcAAAAAcAAACwAAAAAgAAAMwAAAABAAAA5AAAAAUAAADsAAAAAQAAABQBAABcAgAANAEAAIIB"
+      + "AACKAQAArgEAAMEBAADVAQAA7AEAAAACAAAUAgAAKAIAADcCAAA6AgAAPgIAAEMCAABMAgAAVAIA"
+      + "AFsCAAACAAAAAwAAAAQAAAAFAAAABgAAAAcAAAAJAAAACQAAAAYAAAAAAAAACgAAAAYAAAB8AQAA"
+      + "BQACAAsAAAAAAAAAAAAAAAAAAAAOAAAAAQAAAA0AAAACAAEADAAAAAMAAAAAAAAAAAAAAAEAAAAD"
+      + "AAAAAAAAAAgAAAAAAAAA0gIAAAAAAAABAAEAAQAAAHIBAAAEAAAAcBAEAAAADgACAAAAAgAAAHYB"
+      + "AAALAAAAYgAAABoBAQBuIAMAEABxAAIAAAAOAAIADgAEAA54PAABAAAABAAGPGluaXQ+ACJIZWxs"
+      + "byBmcm9tIE5ld0NsYXNzIHNheUhpIGZ1bmN0aW9uABFMZm9vYmFyL05ld0NsYXNzOwASTGZvb2Jh"
+      + "ci9UZXN0Q2xhc3M7ABVMamF2YS9pby9QcmludFN0cmVhbTsAEkxqYXZhL2xhbmcvT2JqZWN0OwAS"
+      + "TGphdmEvbGFuZy9TdHJpbmc7ABJMamF2YS9sYW5nL1N5c3RlbTsADU5ld0NsYXNzLmphdmEAAVYA"
+      + "AlZMAANvdXQAB3ByaW50bG4ABnNheUJ5ZQAFc2F5SGkAdX5+RDh7ImNvbXBpbGF0aW9uLW1vZGUi"
+      + "OiJkZWJ1ZyIsIm1pbi1hcGkiOjEsInNoYS0xIjoiZDMyODJiOGY1NDdjMjM0YzRlNGM5MzA5YzM2"
+      + "Yzc5NWEyOTg1NmVhYiIsInZlcnNpb24iOiIxLjYuMS1kZXYifQAAAAIAAIGABLQCAQjMAgAAAAAO"
+      + "AAAAAAAAAAEAAAAAAAAAAQAAABAAAABwAAAAAgAAAAcAAACwAAAAAwAAAAIAAADMAAAABAAAAAEA"
+      + "AADkAAAABQAAAAUAAADsAAAABgAAAAEAAAAUAQAAASAAAAIAAAA0AQAAAyAAAAIAAAByAQAAARAA"
+      + "AAEAAAB8AQAAAiAAABAAAACCAQAAACAAAAEAAADSAgAAAxAAAAEAAADgAgAAABAAAAEAAADkAgAA");
+  /**
+   * base64 encoded class/dex file for
+   * package foobar;
+   * public class TestClass {
+   *   public static void sayHi() {
+   *    System.out.println("Hello again from TestClass sayHi function");
+   *    TestClass.sayBye();
+   *   }
+   *   static void sayBye() {
+   *    System.out.println("Goodbye from TestClass!");
+   *   }
+   * }
+   */
+  private static byte[] CLASS_BYTES = Base64.getDecoder().decode(
+      "yv66vgAAADUAIQoACAARCQASABMIABQKABUAFgoABwAXCAAYBwAZBwAaAQAGPGluaXQ+AQADKClW"
+      + "AQAEQ29kZQEAD0xpbmVOdW1iZXJUYWJsZQEABXNheUhpAQAGc2F5QnllAQAKU291cmNlRmlsZQEA"
+      + "DlRlc3RDbGFzcy5qYXZhDAAJAAoHABsMABwAHQEAI0hlbGxvIGZyb20gVGVzdENsYXNzIHNheUhp"
+      + "IGZ1bmN0aW9uBwAeDAAfACAMAA4ACgEAF0dvb2RieWUgZnJvbSBUZXN0Q2xhc3MhAQAQZm9vYmFy"
+      + "L1Rlc3RDbGFzcwEAEGphdmEvbGFuZy9PYmplY3QBABBqYXZhL2xhbmcvU3lzdGVtAQADb3V0AQAV"
+      + "TGphdmEvaW8vUHJpbnRTdHJlYW07AQATamF2YS9pby9QcmludFN0cmVhbQEAB3ByaW50bG4BABUo"
+      + "TGphdmEvbGFuZy9TdHJpbmc7KVYAIQAHAAgAAAAAAAMAAQAJAAoAAQALAAAAHQABAAEAAAAFKrcA"
+      + "AbEAAAABAAwAAAAGAAEAAAACAAkADQAKAAEACwAAACwAAgAAAAAADLIAAhIDtgAEuAAFsQAAAAEA"
+      + "DAAAAA4AAwAAAAQACAAFAAsABgAIAA4ACgABAAsAAAAlAAIAAAAAAAmyAAISBrYABLEAAAABAAwA"
+      + "AAAKAAIAAAAIAAgACQABAA8AAAACABA=");
+
+  private static byte[] DEX_BYTES = Base64.getDecoder().decode(
+      "ZGV4CjAzNQARmtFTPdWXebnrTNy5b71tEiJKC96qIPXAAwAAcAAAAHhWNBIAAAAAAAAAABQDAAAQ"
+      + "AAAAcAAAAAYAAACwAAAAAgAAAMgAAAABAAAA4AAAAAUAAADoAAAAAQAAABABAACQAgAAMAEAAKYB"
+      + "AACuAQAAxwEAAOwBAAAAAgAAFwIAACsCAAA/AgAAUwIAAGMCAABmAgAAagIAAG8CAAB4AgAAgAIA"
+      + "AIcCAAADAAAABAAAAAUAAAAGAAAABwAAAAkAAAAJAAAABQAAAAAAAAAKAAAABQAAAKABAAAEAAEA"
+      + "CwAAAAAAAAAAAAAAAAAAAA0AAAAAAAAADgAAAAEAAQAMAAAAAgAAAAAAAAAAAAAAAQAAAAIAAAAA"
+      + "AAAACAAAAAAAAAD+AgAAAAAAAAEAAQABAAAAjgEAAAQAAABwEAQAAAAOAAIAAAACAAAAkgEAAAgA"
+      + "AABiAAAAGgEBAG4gAwAQAA4AAgAAAAIAAACXAQAACwAAAGIAAAAaAQIAbiADABAAcQABAAAADgAC"
+      + "AA4ACAAOeAAEAA54PAAAAAABAAAAAwAGPGluaXQ+ABdHb29kYnllIGZyb20gVGVzdENsYXNzIQAj"
+      + "SGVsbG8gZnJvbSBUZXN0Q2xhc3Mgc2F5SGkgZnVuY3Rpb24AEkxmb29iYXIvVGVzdENsYXNzOwAV"
+      + "TGphdmEvaW8vUHJpbnRTdHJlYW07ABJMamF2YS9sYW5nL09iamVjdDsAEkxqYXZhL2xhbmcvU3Ry"
+      + "aW5nOwASTGphdmEvbGFuZy9TeXN0ZW07AA5UZXN0Q2xhc3MuamF2YQABVgACVkwAA291dAAHcHJp"
+      + "bnRsbgAGc2F5QnllAAVzYXlIaQB1fn5EOHsiY29tcGlsYXRpb24tbW9kZSI6ImRlYnVnIiwibWlu"
+      + "LWFwaSI6MSwic2hhLTEiOiJkMzI4MmI4ZjU0N2MyMzRjNGU0YzkzMDljMzZjNzk1YTI5ODU2ZWFi"
+      + "IiwidmVyc2lvbiI6IjEuNi4xLWRldiJ9AAAAAwAAgYAEsAIBCMgCAQnoAgAAAAAOAAAAAAAAAAEA"
+      + "AAAAAAAAAQAAABAAAABwAAAAAgAAAAYAAACwAAAAAwAAAAIAAADIAAAABAAAAAEAAADgAAAABQAA"
+      + "AAUAAADoAAAABgAAAAEAAAAQAQAAASAAAAMAAAAwAQAAAyAAAAMAAACOAQAAARAAAAEAAACgAQAA"
+      + "AiAAABAAAACmAQAAACAAAAEAAAD+AgAAAxAAAAEAAAAQAwAAABAAAAEAAAAUAwAA");
+  /**
+   * base64 encoded class/dex file for
+   * package foobar;
+   * public class TestClass {
+   *   public static void sayHi() {
+   *    System.out.println("Hello again from TestClass sayHi function");
+   *    NewClass.sayHi();
+   *   }
+   *   static void sayBye() {
+   *    System.out.println("Goodbye again from TestClass!");
+   *   }
+   * }
+   */
+  private static byte[] REDEF_CLASS_BYTES = Base64.getDecoder().decode(
+      "yv66vgAAADUAIwoACAARCQASABMIABQKABUAFgoAFwAYCAAZBwAaBwAbAQAGPGluaXQ+AQADKClW"
+      + "AQAEQ29kZQEAD0xpbmVOdW1iZXJUYWJsZQEABXNheUhpAQAGc2F5QnllAQAKU291cmNlRmlsZQEA"
+      + "DlRlc3RDbGFzcy5qYXZhDAAJAAoHABwMAB0AHgEAKUhlbGxvIGFnYWluIGZyb20gVGVzdENsYXNz"
+      + "IHNheUhpIGZ1bmN0aW9uBwAfDAAgACEHACIMAA0ACgEAHUdvb2RieWUgYWdhaW4gZnJvbSBUZXN0"
+      + "Q2xhc3MhAQAQZm9vYmFyL1Rlc3RDbGFzcwEAEGphdmEvbGFuZy9PYmplY3QBABBqYXZhL2xhbmcv"
+      + "U3lzdGVtAQADb3V0AQAVTGphdmEvaW8vUHJpbnRTdHJlYW07AQATamF2YS9pby9QcmludFN0cmVh"
+      + "bQEAB3ByaW50bG4BABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBAA9mb29iYXIvTmV3Q2xhc3MAIQAH"
+      + "AAgAAAAAAAMAAQAJAAoAAQALAAAAHQABAAEAAAAFKrcAAbEAAAABAAwAAAAGAAEAAAACAAkADQAK"
+      + "AAEACwAAACwAAgAAAAAADLIAAhIDtgAEuAAFsQAAAAEADAAAAA4AAwAAAAQACAAFAAsABgAIAA4A"
+      + "CgABAAsAAAAlAAIAAAAAAAmyAAISBrYABLEAAAABAAwAAAAKAAIAAAAIAAgACQABAA8AAAACABA=");
+
+  private static byte[] REDEF_DEX_BYTES = Base64.getDecoder().decode(
+      "ZGV4CjAzNQA2plEeYRH4vl6wJgnAZOVcZ537QN9NXB3wAwAAcAAAAHhWNBIAAAAAAAAAAEQDAAAR"
+      + "AAAAcAAAAAcAAAC0AAAAAgAAANAAAAABAAAA6AAAAAYAAADwAAAAAQAAACABAACwAgAAQAEAALYB"
+      + "AAC+AQAA3QEAAAgCAAAbAgAALwIAAEYCAABaAgAAbgIAAIICAACSAgAAlQIAAJkCAACeAgAApwIA"
+      + "AK8CAAC2AgAAAwAAAAQAAAAFAAAABgAAAAcAAAAIAAAACgAAAAoAAAAGAAAAAAAAAAsAAAAGAAAA"
+      + "sAEAAAUAAgAMAAAAAAAAAA8AAAABAAAAAAAAAAEAAAAOAAAAAQAAAA8AAAACAAEADQAAAAMAAAAA"
+      + "AAAAAQAAAAEAAAADAAAAAAAAAAkAAAAAAAAALQMAAAAAAAABAAEAAQAAAJ4BAAAEAAAAcBAFAAAA"
+      + "DgACAAAAAgAAAKIBAAAIAAAAYgAAABoBAQBuIAQAEAAOAAIAAAACAAAApwEAAAsAAABiAAAAGgEC"
+      + "AG4gBAAQAHEAAAAAAA4AAgAOAAgADngABAAOeDwAAAAAAQAAAAQABjxpbml0PgAdR29vZGJ5ZSBh"
+      + "Z2FpbiBmcm9tIFRlc3RDbGFzcyEAKUhlbGxvIGFnYWluIGZyb20gVGVzdENsYXNzIHNheUhpIGZ1"
+      + "bmN0aW9uABFMZm9vYmFyL05ld0NsYXNzOwASTGZvb2Jhci9UZXN0Q2xhc3M7ABVMamF2YS9pby9Q"
+      + "cmludFN0cmVhbTsAEkxqYXZhL2xhbmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7ABJMamF2"
+      + "YS9sYW5nL1N5c3RlbTsADlRlc3RDbGFzcy5qYXZhAAFWAAJWTAADb3V0AAdwcmludGxuAAZzYXlC"
+      + "eWUABXNheUhpAHV+fkQ4eyJjb21waWxhdGlvbi1tb2RlIjoiZGVidWciLCJtaW4tYXBpIjoxLCJz"
+      + "aGEtMSI6ImQzMjgyYjhmNTQ3YzIzNGM0ZTRjOTMwOWMzNmM3OTVhMjk4NTZlYWIiLCJ2ZXJzaW9u"
+      + "IjoiMS42LjEtZGV2In0AAAADAAGBgATAAgEI2AIBCfgCAAAAAAAOAAAAAAAAAAEAAAAAAAAAAQAA"
+      + "ABEAAABwAAAAAgAAAAcAAAC0AAAAAwAAAAIAAADQAAAABAAAAAEAAADoAAAABQAAAAYAAADwAAAA"
+      + "BgAAAAEAAAAgAQAAASAAAAMAAABAAQAAAyAAAAMAAACeAQAAARAAAAEAAACwAQAAAiAAABEAAAC2"
+      + "AQAAACAAAAEAAAAtAwAAAxAAAAEAAABAAwAAABAAAAEAAABEAwAA");
+
+  public static void SafePrintCause(Throwable t) {
+    StackTraceElement cause = t.getStackTrace()[0];
+    System.out.println(" --- " + t.getClass().getName() + " At " + cause.getClassName() + "." +
+                       cause.getMethodName() + "(" + cause.getFileName() + ":" +
+                       cause.getLineNumber() + ")");
+  }
+
+  public static void run() throws Exception {
+    System.out.println(" - Run while adding new referenced class.");
+    // No exception expected.
+    run(true);
+    System.out.println(" - Run without adding new referenced class.");
+    try {
+      run(false);
+    } catch (Exception e) {
+      if (e.getCause() == null || !(e.getCause() instanceof NoClassDefFoundError)) {
+        throw new Exception("Unexpected error from test!", e);
+      }
+      // Unfortunately art and RI have different messages here so just return the type.
+      System.out.println(" -- Exception caught when running test without new class added! " +
+                         e.getCause().getClass().getName());
+      SafePrintCause(e.getCause());
+    }
+  }
+
+  public static void run(boolean add_new) throws Exception {
+    ClassLoader cl = getClassLoader();
+    Class<?> target = cl.loadClass(TEST_CLASS_NAME);
+    Method sayHi = target.getDeclaredMethod("sayHi");
+    System.out.println(" -- Running sayHi before redefinition");
+    sayHi.invoke(null);
+    if (add_new) {
+      System.out.println(" -- Adding NewClass to classloader!");
+      addToClassLoader(cl, NEW_CLASS_BYTES, NEW_DEX_BYTES);
+    }
+    System.out.println(" -- Redefine the TestClass");
+    Redefinition.doCommonClassRedefinition(target, REDEF_CLASS_BYTES, REDEF_DEX_BYTES);
+    System.out.println(" -- call TestClass again, now with NewClass refs");
+    sayHi.invoke(null);
+  }
+
+  public static class ExtensibleClassLoader extends ClassLoader {
+    private byte[] new_class = null;
+    public ExtensibleClassLoader() {
+      super(ExtensibleClassLoader.class.getClassLoader());
+    }
+
+    public void addSingleClass(byte[] bb) {
+      new_class = bb;
+    }
+
+    protected Class<?> findClass(String name) throws ClassNotFoundException {
+      if (name.equals(TEST_CLASS_NAME)) {
+        return this.defineClass(TEST_CLASS_NAME, CLASS_BYTES, 0, CLASS_BYTES.length);
+      }
+      if (name.equals(NEW_CLASS_NAME) && new_class != null) {
+        return this.defineClass(name, new_class, 0, new_class.length);
+      } else {
+        return super.findClass(name);
+      }
+    }
+  }
+
+  public static ClassLoader getClassLoader() throws Exception {
+    if (!IS_ART) {
+      return new ExtensibleClassLoader();
+    } else {
+      Class<?> class_loader_class = Class.forName("dalvik.system.InMemoryDexClassLoader");
+      Constructor<?> ctor = class_loader_class.getConstructor(ByteBuffer.class, ClassLoader.class);
+      return (ClassLoader)ctor.newInstance(ByteBuffer.wrap(DEX_BYTES),
+                                           Test1963.class.getClassLoader());
+    }
+  }
+
+  public static void addToClassLoader(ClassLoader cl, byte[] class_bytes, byte[] dex_bytes) {
+    if (IS_ART) {
+      addToClassLoaderNative(cl, ByteBuffer.allocateDirect(dex_bytes.length).put(dex_bytes));
+    } else {
+      ((ExtensibleClassLoader)cl).addSingleClass(class_bytes);
+    }
+  }
+
+  public static native void addToClassLoaderNative(ClassLoader loader, ByteBuffer buff);
+}
diff --git a/test/1964-add-to-dex-classloader-file/add_to_loader.cc b/test/1964-add-to-dex-classloader-file/add_to_loader.cc
new file mode 100644
index 0000000..9fbea97
--- /dev/null
+++ b/test/1964-add-to-dex-classloader-file/add_to_loader.cc
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <atomic>
+
+#include "jvmti.h"
+
+// Test infrastructure
+#include "jvmti_helper.h"
+#include "scoped_local_ref.h"
+#include "test_env.h"
+
+namespace art {
+namespace Test1964AddToDexClassLoader {
+
+using AddToDexClassLoader = jvmtiError (*)(jvmtiEnv* env,
+                                                   jobject loader,
+                                                   const char* segment);
+
+template <typename T> static void Dealloc(T* t) {
+  jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(t));
+}
+
+template <typename T, typename... Rest> static void Dealloc(T* t, Rest... rs) {
+  Dealloc(t);
+  Dealloc(rs...);
+}
+static void DeallocParams(jvmtiParamInfo* params, jint n_params) {
+  for (jint i = 0; i < n_params; i++) {
+    Dealloc(params[i].name);
+  }
+}
+
+AddToDexClassLoader GetAddFunction(JNIEnv* env) {
+  // Get the extensions.
+  jint n_ext = 0;
+  jvmtiExtensionFunctionInfo* infos = nullptr;
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->GetExtensionFunctions(&n_ext, &infos))) {
+    return nullptr;
+  }
+  AddToDexClassLoader result = nullptr;
+  for (jint i = 0; i < n_ext; i++) {
+    jvmtiExtensionFunctionInfo* cur_info = &infos[i];
+    if (strcmp("com.android.art.classloader.add_to_dex_class_loader", cur_info->id) ==
+        0) {
+      result = reinterpret_cast<AddToDexClassLoader>(cur_info->func);
+    }
+    // Cleanup the cur_info
+    DeallocParams(cur_info->params, cur_info->param_count);
+    Dealloc(cur_info->id, cur_info->short_description, cur_info->params, cur_info->errors);
+  }
+  // Cleanup the array.
+  Dealloc(infos);
+  return result;
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_addToClassLoaderNative(JNIEnv* env,
+                                                                           jclass,
+                                                                           jobject loader,
+                                                                           jstring segment) {
+  AddToDexClassLoader add_func = GetAddFunction(env);
+  if (add_func == nullptr) {
+    env->ThrowNew(env->FindClass("java/lang/RuntimeError"), "Failed to find extension function");
+    return;
+  }
+  const char* chars = env->GetStringUTFChars(segment, nullptr);
+  JvmtiErrorToException(
+      env,
+      jvmti_env,
+      add_func(jvmti_env,
+               loader,
+               chars));
+  env->ReleaseStringUTFChars(segment, chars);
+}
+
+}  // namespace Test1964AddToDexClassLoader
+}  // namespace art
diff --git a/test/1964-add-to-dex-classloader-file/expected.txt b/test/1964-add-to-dex-classloader-file/expected.txt
new file mode 100644
index 0000000..58b86ef
--- /dev/null
+++ b/test/1964-add-to-dex-classloader-file/expected.txt
@@ -0,0 +1,23 @@
+ - Run while adding new referenced class.
+ -- Running sayHi before redefinition
+Hello from TestClass sayHi function
+Goodbye from TestClass!
+ -- Adding NewClass to classloader!
+ -- Redefine the TestClass
+ -- call TestClass again, now with NewClass refs
+Hello again from TestClass sayHi function
+Hello from NewClass sayHi function
+Nearby stack:
+	private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread)(line: -1)
+	public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread)(line: 61)
+	static void foobar.NewClass.sayHi() throws java.lang.Exception(line: 27)
+	public static void foobar.TestClass.sayHi()(line: 5)
+ - Run without adding new referenced class.
+ -- Running sayHi before redefinition
+Hello from TestClass sayHi function
+Goodbye from TestClass!
+ -- Redefine the TestClass
+ -- call TestClass again, now with NewClass refs
+Hello again from TestClass sayHi function
+ -- Exception caught when running test without new class added! java.lang.NoClassDefFoundError
+ --- java.lang.NoClassDefFoundError At foobar.TestClass.sayHi(TestClass.java:5)
diff --git a/test/1964-add-to-dex-classloader-file/info.txt b/test/1964-add-to-dex-classloader-file/info.txt
new file mode 100644
index 0000000..48df982
--- /dev/null
+++ b/test/1964-add-to-dex-classloader-file/info.txt
@@ -0,0 +1 @@
+Tests we can add dex-file buffers to an existing classloader and the old classes can see them.
\ No newline at end of file
diff --git a/test/1964-add-to-dex-classloader-file/run b/test/1964-add-to-dex-classloader-file/run
new file mode 100755
index 0000000..c6e62ae
--- /dev/null
+++ b/test/1964-add-to-dex-classloader-file/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/1964-add-to-dex-classloader-file/src-ex/foobar/NewClass.java b/test/1964-add-to-dex-classloader-file/src-ex/foobar/NewClass.java
new file mode 100644
index 0000000..a27d5d3
--- /dev/null
+++ b/test/1964-add-to-dex-classloader-file/src-ex/foobar/NewClass.java
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package foobar;
+import art.Breakpoint;
+import art.StackTrace;
+
+public class NewClass {
+  static void sayHi() throws Exception {
+    System.out.println("Hello from NewClass sayHi function");
+    // Doing this would be nice but it would make compiling the test more tricky. Just use
+    // reflection and check the classloader is the same.
+    // TestClass.sayBye();
+    StackTrace.StackFrameData[] stack = StackTrace.GetStackTrace(Thread.currentThread());
+    StackTrace.StackFrameData caller = null;
+    System.out.println("Nearby stack:");
+    for (StackTrace.StackFrameData sfd : stack) {
+      String caller_name = sfd.method.getDeclaringClass().getName();
+      if (caller_name.startsWith("art.") || caller_name.startsWith("foobar.")) {
+        System.out.println("\t" + sfd.method + "(line: " +
+                           Breakpoint.locationToLine(sfd.method, sfd.current_location) + ")");
+        caller = sfd;
+      } else {
+        break;
+      }
+    }
+    if (NewClass.class.getClassLoader() != caller.method.getDeclaringClass().getClassLoader()) {
+      System.out.println("Different classloader for TestClass and my class.");
+    }
+  }
+}
\ No newline at end of file
diff --git a/test/1964-add-to-dex-classloader-file/src/Main.java b/test/1964-add-to-dex-classloader-file/src/Main.java
new file mode 100644
index 0000000..2293d42
--- /dev/null
+++ b/test/1964-add-to-dex-classloader-file/src/Main.java
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import art.Redefinition;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+import java.net.URL;
+import java.net.URLClassLoader;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Base64;
+
+public class Main {
+  private static String TEST_NAME = "1964-add-to-dex-classloader-file";
+  private static boolean IS_ART = System.getProperty("java.vm.name").equals("Dalvik");
+
+  private static String TEST_CLASS_NAME = "foobar.TestClass";
+  private static String NEW_CLASS_NAME = "foobar.NewClass";
+
+  /**
+   * base64 encoded class/dex file for
+   * package foobar;
+   * public class TestClass {
+   *   public static void sayHi() {
+   *    System.out.println("Hello again from TestClass sayHi function");
+   *    TestClass.sayBye();
+   *   }
+   *   static void sayBye() {
+   *    System.out.println("Goodbye from TestClass!");
+   *   }
+   * }
+   */
+  private static byte[] CLASS_BYTES = Base64.getDecoder().decode(
+      "yv66vgAAADUAIQoACAARCQASABMIABQKABUAFgoABwAXCAAYBwAZBwAaAQAGPGluaXQ+AQADKClW"
+      + "AQAEQ29kZQEAD0xpbmVOdW1iZXJUYWJsZQEABXNheUhpAQAGc2F5QnllAQAKU291cmNlRmlsZQEA"
+      + "DlRlc3RDbGFzcy5qYXZhDAAJAAoHABsMABwAHQEAI0hlbGxvIGZyb20gVGVzdENsYXNzIHNheUhp"
+      + "IGZ1bmN0aW9uBwAeDAAfACAMAA4ACgEAF0dvb2RieWUgZnJvbSBUZXN0Q2xhc3MhAQAQZm9vYmFy"
+      + "L1Rlc3RDbGFzcwEAEGphdmEvbGFuZy9PYmplY3QBABBqYXZhL2xhbmcvU3lzdGVtAQADb3V0AQAV"
+      + "TGphdmEvaW8vUHJpbnRTdHJlYW07AQATamF2YS9pby9QcmludFN0cmVhbQEAB3ByaW50bG4BABUo"
+      + "TGphdmEvbGFuZy9TdHJpbmc7KVYAIQAHAAgAAAAAAAMAAQAJAAoAAQALAAAAHQABAAEAAAAFKrcA"
+      + "AbEAAAABAAwAAAAGAAEAAAACAAkADQAKAAEACwAAACwAAgAAAAAADLIAAhIDtgAEuAAFsQAAAAEA"
+      + "DAAAAA4AAwAAAAQACAAFAAsABgAIAA4ACgABAAsAAAAlAAIAAAAAAAmyAAISBrYABLEAAAABAAwA"
+      + "AAAKAAIAAAAIAAgACQABAA8AAAACABA=");
+
+  private static byte[] DEX_BYTES = Base64.getDecoder().decode(
+      "ZGV4CjAzNQARmtFTPdWXebnrTNy5b71tEiJKC96qIPXAAwAAcAAAAHhWNBIAAAAAAAAAABQDAAAQ"
+      + "AAAAcAAAAAYAAACwAAAAAgAAAMgAAAABAAAA4AAAAAUAAADoAAAAAQAAABABAACQAgAAMAEAAKYB"
+      + "AACuAQAAxwEAAOwBAAAAAgAAFwIAACsCAAA/AgAAUwIAAGMCAABmAgAAagIAAG8CAAB4AgAAgAIA"
+      + "AIcCAAADAAAABAAAAAUAAAAGAAAABwAAAAkAAAAJAAAABQAAAAAAAAAKAAAABQAAAKABAAAEAAEA"
+      + "CwAAAAAAAAAAAAAAAAAAAA0AAAAAAAAADgAAAAEAAQAMAAAAAgAAAAAAAAAAAAAAAQAAAAIAAAAA"
+      + "AAAACAAAAAAAAAD+AgAAAAAAAAEAAQABAAAAjgEAAAQAAABwEAQAAAAOAAIAAAACAAAAkgEAAAgA"
+      + "AABiAAAAGgEBAG4gAwAQAA4AAgAAAAIAAACXAQAACwAAAGIAAAAaAQIAbiADABAAcQABAAAADgAC"
+      + "AA4ACAAOeAAEAA54PAAAAAABAAAAAwAGPGluaXQ+ABdHb29kYnllIGZyb20gVGVzdENsYXNzIQAj"
+      + "SGVsbG8gZnJvbSBUZXN0Q2xhc3Mgc2F5SGkgZnVuY3Rpb24AEkxmb29iYXIvVGVzdENsYXNzOwAV"
+      + "TGphdmEvaW8vUHJpbnRTdHJlYW07ABJMamF2YS9sYW5nL09iamVjdDsAEkxqYXZhL2xhbmcvU3Ry"
+      + "aW5nOwASTGphdmEvbGFuZy9TeXN0ZW07AA5UZXN0Q2xhc3MuamF2YQABVgACVkwAA291dAAHcHJp"
+      + "bnRsbgAGc2F5QnllAAVzYXlIaQB1fn5EOHsiY29tcGlsYXRpb24tbW9kZSI6ImRlYnVnIiwibWlu"
+      + "LWFwaSI6MSwic2hhLTEiOiJkMzI4MmI4ZjU0N2MyMzRjNGU0YzkzMDljMzZjNzk1YTI5ODU2ZWFi"
+      + "IiwidmVyc2lvbiI6IjEuNi4xLWRldiJ9AAAAAwAAgYAEsAIBCMgCAQnoAgAAAAAOAAAAAAAAAAEA"
+      + "AAAAAAAAAQAAABAAAABwAAAAAgAAAAYAAACwAAAAAwAAAAIAAADIAAAABAAAAAEAAADgAAAABQAA"
+      + "AAUAAADoAAAABgAAAAEAAAAQAQAAASAAAAMAAAAwAQAAAyAAAAMAAACOAQAAARAAAAEAAACgAQAA"
+      + "AiAAABAAAACmAQAAACAAAAEAAAD+AgAAAxAAAAEAAAAQAwAAABAAAAEAAAAUAwAA");
+  /**
+   * base64 encoded class/dex file for
+   * package foobar;
+   * public class TestClass {
+   *   public static void sayHi() {
+   *    System.out.println("Hello again from TestClass sayHi function");
+   *    NewClass.sayHi();
+   *   }
+   *   static void sayBye() {
+   *    System.out.println("Goodbye again from TestClass!");
+   *   }
+   * }
+   */
+  private static byte[] REDEF_CLASS_BYTES = Base64.getDecoder().decode(
+      "yv66vgAAADUAIwoACAARCQASABMIABQKABUAFgoAFwAYCAAZBwAaBwAbAQAGPGluaXQ+AQADKClW"
+      + "AQAEQ29kZQEAD0xpbmVOdW1iZXJUYWJsZQEABXNheUhpAQAGc2F5QnllAQAKU291cmNlRmlsZQEA"
+      + "DlRlc3RDbGFzcy5qYXZhDAAJAAoHABwMAB0AHgEAKUhlbGxvIGFnYWluIGZyb20gVGVzdENsYXNz"
+      + "IHNheUhpIGZ1bmN0aW9uBwAfDAAgACEHACIMAA0ACgEAHUdvb2RieWUgYWdhaW4gZnJvbSBUZXN0"
+      + "Q2xhc3MhAQAQZm9vYmFyL1Rlc3RDbGFzcwEAEGphdmEvbGFuZy9PYmplY3QBABBqYXZhL2xhbmcv"
+      + "U3lzdGVtAQADb3V0AQAVTGphdmEvaW8vUHJpbnRTdHJlYW07AQATamF2YS9pby9QcmludFN0cmVh"
+      + "bQEAB3ByaW50bG4BABUoTGphdmEvbGFuZy9TdHJpbmc7KVYBAA9mb29iYXIvTmV3Q2xhc3MAIQAH"
+      + "AAgAAAAAAAMAAQAJAAoAAQALAAAAHQABAAEAAAAFKrcAAbEAAAABAAwAAAAGAAEAAAACAAkADQAK"
+      + "AAEACwAAACwAAgAAAAAADLIAAhIDtgAEuAAFsQAAAAEADAAAAA4AAwAAAAQACAAFAAsABgAIAA4A"
+      + "CgABAAsAAAAlAAIAAAAAAAmyAAISBrYABLEAAAABAAwAAAAKAAIAAAAIAAgACQABAA8AAAACABA=");
+
+  private static byte[] REDEF_DEX_BYTES = Base64.getDecoder().decode(
+      "ZGV4CjAzNQA2plEeYRH4vl6wJgnAZOVcZ537QN9NXB3wAwAAcAAAAHhWNBIAAAAAAAAAAEQDAAAR"
+      + "AAAAcAAAAAcAAAC0AAAAAgAAANAAAAABAAAA6AAAAAYAAADwAAAAAQAAACABAACwAgAAQAEAALYB"
+      + "AAC+AQAA3QEAAAgCAAAbAgAALwIAAEYCAABaAgAAbgIAAIICAACSAgAAlQIAAJkCAACeAgAApwIA"
+      + "AK8CAAC2AgAAAwAAAAQAAAAFAAAABgAAAAcAAAAIAAAACgAAAAoAAAAGAAAAAAAAAAsAAAAGAAAA"
+      + "sAEAAAUAAgAMAAAAAAAAAA8AAAABAAAAAAAAAAEAAAAOAAAAAQAAAA8AAAACAAEADQAAAAMAAAAA"
+      + "AAAAAQAAAAEAAAADAAAAAAAAAAkAAAAAAAAALQMAAAAAAAABAAEAAQAAAJ4BAAAEAAAAcBAFAAAA"
+      + "DgACAAAAAgAAAKIBAAAIAAAAYgAAABoBAQBuIAQAEAAOAAIAAAACAAAApwEAAAsAAABiAAAAGgEC"
+      + "AG4gBAAQAHEAAAAAAA4AAgAOAAgADngABAAOeDwAAAAAAQAAAAQABjxpbml0PgAdR29vZGJ5ZSBh"
+      + "Z2FpbiBmcm9tIFRlc3RDbGFzcyEAKUhlbGxvIGFnYWluIGZyb20gVGVzdENsYXNzIHNheUhpIGZ1"
+      + "bmN0aW9uABFMZm9vYmFyL05ld0NsYXNzOwASTGZvb2Jhci9UZXN0Q2xhc3M7ABVMamF2YS9pby9Q"
+      + "cmludFN0cmVhbTsAEkxqYXZhL2xhbmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7ABJMamF2"
+      + "YS9sYW5nL1N5c3RlbTsADlRlc3RDbGFzcy5qYXZhAAFWAAJWTAADb3V0AAdwcmludGxuAAZzYXlC"
+      + "eWUABXNheUhpAHV+fkQ4eyJjb21waWxhdGlvbi1tb2RlIjoiZGVidWciLCJtaW4tYXBpIjoxLCJz"
+      + "aGEtMSI6ImQzMjgyYjhmNTQ3YzIzNGM0ZTRjOTMwOWMzNmM3OTVhMjk4NTZlYWIiLCJ2ZXJzaW9u"
+      + "IjoiMS42LjEtZGV2In0AAAADAAGBgATAAgEI2AIBCfgCAAAAAAAOAAAAAAAAAAEAAAAAAAAAAQAA"
+      + "ABEAAABwAAAAAgAAAAcAAAC0AAAAAwAAAAIAAADQAAAABAAAAAEAAADoAAAABQAAAAYAAADwAAAA"
+      + "BgAAAAEAAAAgAQAAASAAAAMAAABAAQAAAyAAAAMAAACeAQAAARAAAAEAAACwAQAAAiAAABEAAAC2"
+      + "AQAAACAAAAEAAAAtAwAAAxAAAAEAAABAAwAAABAAAAEAAABEAwAA");
+
+  public static void SafePrintCause(Throwable t) {
+    StackTraceElement cause = t.getStackTrace()[0];
+    System.out.println(" --- " + t.getClass().getName() + " At " + cause.getClassName() + "." +
+                       cause.getMethodName() + "(" + cause.getFileName() + ":" +
+                       cause.getLineNumber() + ")");
+  }
+
+  public static void run() throws Exception {
+    System.out.println(" - Run while adding new referenced class.");
+    try {
+      run(true);
+    } catch (Exception e) {
+      // Unfortunately art and RI have different messages here so just return the type.
+      System.out.println(" -- Exception caught when running test with new class added! " +
+                         e.getCause().getClass().getName());
+      SafePrintCause(e.getCause());
+      System.out.println(e);
+      e.printStackTrace();
+    }
+    System.out.println(" - Run without adding new referenced class.");
+    try {
+      run(false);
+    } catch (Exception e) {
+      // Unfortunately art and RI have different messages here so just return the type.
+      System.out.println(" -- Exception caught when running test without new class added! " +
+                         e.getCause().getClass().getName());
+      SafePrintCause(e.getCause());
+    }
+  }
+
+  public static void run(boolean add_new) throws Exception {
+    ClassLoader cl = getClassLoader();
+    Class<?> target = cl.loadClass(TEST_CLASS_NAME);
+    Method sayHi = target.getDeclaredMethod("sayHi");
+    System.out.println(" -- Running sayHi before redefinition");
+    sayHi.invoke(null);
+    if (add_new) {
+      System.out.println(" -- Adding NewClass to classloader!");
+      addToClassLoader(cl);
+    }
+    System.out.println(" -- Redefine the TestClass");
+    Redefinition.doCommonClassRedefinition(target, REDEF_CLASS_BYTES, REDEF_DEX_BYTES);
+    System.out.println(" -- call TestClass again, now with NewClass refs");
+    sayHi.invoke(null);
+  }
+
+  public static class ExtensibleClassLoader extends URLClassLoader {
+    public ExtensibleClassLoader() {
+      // Initially we don't have any URLs
+      super(new URL[] {}, ExtensibleClassLoader.class.getClassLoader());
+    }
+
+    public void addSingleUrl(String file) throws Exception {
+      this.addURL(new URL("file://" + file));
+    }
+
+    protected Class<?> findClass(String name) throws ClassNotFoundException {
+      // Just define the TestClass without other jars.
+      if (name.equals(TEST_CLASS_NAME)) {
+        return this.defineClass(TEST_CLASS_NAME, CLASS_BYTES, 0, CLASS_BYTES.length);
+      } else {
+        return super.findClass(name);
+      }
+    }
+  }
+
+  public static ClassLoader getClassLoader() throws Exception {
+    if (!IS_ART) {
+      return new ExtensibleClassLoader();
+    } else {
+      Class<?> class_loader_class = Class.forName("dalvik.system.InMemoryDexClassLoader");
+      Constructor<?> ctor = class_loader_class.getConstructor(ByteBuffer.class, ClassLoader.class);
+      return (ClassLoader)ctor.newInstance(ByteBuffer.wrap(DEX_BYTES), Main.class.getClassLoader());
+    }
+  }
+
+  public static void addToClassLoader(ClassLoader cl) throws Exception {
+    if (IS_ART) {
+      addToClassLoaderNative(cl, System.getenv("DEX_LOCATION") + "/" + TEST_NAME + "-ex.jar");
+    } else {
+      ((ExtensibleClassLoader)cl).addSingleUrl(System.getenv("DEX_LOCATION") + "/classes-ex/");
+    }
+  }
+
+  public static native void addToClassLoaderNative(ClassLoader loader, String segment);
+  public static void main(String[] args) throws Exception {
+    run();
+  }
+}
diff --git a/test/1964-add-to-dex-classloader-file/src/art/Breakpoint.java b/test/1964-add-to-dex-classloader-file/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1964-add-to-dex-classloader-file/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1964-add-to-dex-classloader-file/src/art/Redefinition.java b/test/1964-add-to-dex-classloader-file/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1964-add-to-dex-classloader-file/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1964-add-to-dex-classloader-file/src/art/StackTrace.java b/test/1964-add-to-dex-classloader-file/src/art/StackTrace.java
new file mode 120000
index 0000000..e1a08aa
--- /dev/null
+++ b/test/1964-add-to-dex-classloader-file/src/art/StackTrace.java
@@ -0,0 +1 @@
+../../../jvmti-common/StackTrace.java
\ No newline at end of file
diff --git a/test/1964-add-to-dex-classloader-file/src/art/Suspension.java b/test/1964-add-to-dex-classloader-file/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1964-add-to-dex-classloader-file/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1965-get-set-local-primitive-no-tables/build b/test/1965-get-set-local-primitive-no-tables/build
new file mode 100644
index 0000000..6631df9
--- /dev/null
+++ b/test/1965-get-set-local-primitive-no-tables/build
@@ -0,0 +1,25 @@
+#!/bin/bash
+#
+# Copyright 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+if [[ $@ != *"--jvm"* ]]; then
+  mv jasmin jasmin-unused
+else
+  mv smali smali-unused
+fi
+./default-build "$@" 
diff --git a/test/1965-get-set-local-primitive-no-tables/expected.txt b/test/1965-get-set-local-primitive-no-tables/expected.txt
new file mode 100644
index 0000000..97f8ab0
--- /dev/null
+++ b/test/1965-get-set-local-primitive-no-tables/expected.txt
@@ -0,0 +1,210 @@
+Running public static void art_test.TestCases1965.IntMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetInt" on remote thread.
+"GetInt" on public static void art_test.TestCases1965.IntMethod(java.util.function.IntConsumer,java.util.function.Consumer) got value: 42
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art_test.TestCases1965.IntMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetLong" on remote thread.
+"GetLong" on public static void art_test.TestCases1965.IntMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art_test.TestCases1965.IntMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetFloat" on remote thread.
+"GetFloat" on public static void art_test.TestCases1965.IntMethod(java.util.function.IntConsumer,java.util.function.Consumer) got value: 5.9E-44
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art_test.TestCases1965.IntMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetDouble" on remote thread.
+"GetDouble" on public static void art_test.TestCases1965.IntMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art_test.TestCases1965.IntMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetObject" on remote thread.
+"GetObject" on public static void art_test.TestCases1965.IntMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art_test.TestCases1965.IntMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetInt" on remote thread.
+"SetInt" on public static void art_test.TestCases1965.IntMethod(java.util.function.IntConsumer,java.util.function.Consumer) set value: 2147483647
+	Value is '2147483647' (class: class java.lang.Integer)
+Running public static void art_test.TestCases1965.IntMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetLong" on remote thread.
+"SetLong" on public static void art_test.TestCases1965.IntMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed to set value 9223372036854775807 due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art_test.TestCases1965.IntMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetFloat" on remote thread.
+"SetFloat" on public static void art_test.TestCases1965.IntMethod(java.util.function.IntConsumer,java.util.function.Consumer) set value: 9.2
+	Value is '1091777331' (class: class java.lang.Integer)
+Running public static void art_test.TestCases1965.IntMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetDouble" on remote thread.
+"SetDouble" on public static void art_test.TestCases1965.IntMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed to set value 12.4 due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art_test.TestCases1965.IntMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetObject" on remote thread.
+"SetObject" on public static void art_test.TestCases1965.IntMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed to set value NEW_VALUE_FOR_SET due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art_test.TestCases1965.IntMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetNullObject" on remote thread.
+"SetNullObject" on public static void art_test.TestCases1965.IntMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed to set value null due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art_test.TestCases1965.LongMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetInt" on remote thread.
+"GetInt" on public static void art_test.TestCases1965.LongMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '9001' (class: class java.lang.Long)
+Running public static void art_test.TestCases1965.LongMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetLong" on remote thread.
+"GetLong" on public static void art_test.TestCases1965.LongMethod(java.util.function.IntConsumer,java.util.function.Consumer) got value: 9001
+	Value is '9001' (class: class java.lang.Long)
+Running public static void art_test.TestCases1965.LongMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetFloat" on remote thread.
+"GetFloat" on public static void art_test.TestCases1965.LongMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '9001' (class: class java.lang.Long)
+Running public static void art_test.TestCases1965.LongMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetDouble" on remote thread.
+"GetDouble" on public static void art_test.TestCases1965.LongMethod(java.util.function.IntConsumer,java.util.function.Consumer) got value: 4.447E-320
+	Value is '9001' (class: class java.lang.Long)
+Running public static void art_test.TestCases1965.LongMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetObject" on remote thread.
+"GetObject" on public static void art_test.TestCases1965.LongMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '9001' (class: class java.lang.Long)
+Running public static void art_test.TestCases1965.LongMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetInt" on remote thread.
+"SetInt" on public static void art_test.TestCases1965.LongMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed to set value 2147483647 due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '9001' (class: class java.lang.Long)
+Running public static void art_test.TestCases1965.LongMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetLong" on remote thread.
+"SetLong" on public static void art_test.TestCases1965.LongMethod(java.util.function.IntConsumer,java.util.function.Consumer) set value: 9223372036854775807
+	Value is '9223372036854775807' (class: class java.lang.Long)
+Running public static void art_test.TestCases1965.LongMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetFloat" on remote thread.
+"SetFloat" on public static void art_test.TestCases1965.LongMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed to set value 9.2 due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '9001' (class: class java.lang.Long)
+Running public static void art_test.TestCases1965.LongMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetDouble" on remote thread.
+"SetDouble" on public static void art_test.TestCases1965.LongMethod(java.util.function.IntConsumer,java.util.function.Consumer) set value: 12.4
+	Value is '4623170197477182669' (class: class java.lang.Long)
+Running public static void art_test.TestCases1965.LongMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetObject" on remote thread.
+"SetObject" on public static void art_test.TestCases1965.LongMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed to set value NEW_VALUE_FOR_SET due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '9001' (class: class java.lang.Long)
+Running public static void art_test.TestCases1965.LongMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetNullObject" on remote thread.
+"SetNullObject" on public static void art_test.TestCases1965.LongMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed to set value null due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '9001' (class: class java.lang.Long)
+Running public static void art_test.TestCases1965.FloatMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetInt" on remote thread.
+"GetInt" on public static void art_test.TestCases1965.FloatMethod(java.util.function.IntConsumer,java.util.function.Consumer) got value: 1070537376
+	Value is '1.618' (class: class java.lang.Float)
+Running public static void art_test.TestCases1965.FloatMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetLong" on remote thread.
+"GetLong" on public static void art_test.TestCases1965.FloatMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '1.618' (class: class java.lang.Float)
+Running public static void art_test.TestCases1965.FloatMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetFloat" on remote thread.
+"GetFloat" on public static void art_test.TestCases1965.FloatMethod(java.util.function.IntConsumer,java.util.function.Consumer) got value: 1.618
+	Value is '1.618' (class: class java.lang.Float)
+Running public static void art_test.TestCases1965.FloatMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetDouble" on remote thread.
+"GetDouble" on public static void art_test.TestCases1965.FloatMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '1.618' (class: class java.lang.Float)
+Running public static void art_test.TestCases1965.FloatMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetObject" on remote thread.
+"GetObject" on public static void art_test.TestCases1965.FloatMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '1.618' (class: class java.lang.Float)
+Running public static void art_test.TestCases1965.FloatMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetInt" on remote thread.
+"SetInt" on public static void art_test.TestCases1965.FloatMethod(java.util.function.IntConsumer,java.util.function.Consumer) set value: 2147483647
+	Value is 'NaN' (class: class java.lang.Float)
+Running public static void art_test.TestCases1965.FloatMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetLong" on remote thread.
+"SetLong" on public static void art_test.TestCases1965.FloatMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed to set value 9223372036854775807 due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '1.618' (class: class java.lang.Float)
+Running public static void art_test.TestCases1965.FloatMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetFloat" on remote thread.
+"SetFloat" on public static void art_test.TestCases1965.FloatMethod(java.util.function.IntConsumer,java.util.function.Consumer) set value: 9.2
+	Value is '9.2' (class: class java.lang.Float)
+Running public static void art_test.TestCases1965.FloatMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetDouble" on remote thread.
+"SetDouble" on public static void art_test.TestCases1965.FloatMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed to set value 12.4 due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '1.618' (class: class java.lang.Float)
+Running public static void art_test.TestCases1965.FloatMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetObject" on remote thread.
+"SetObject" on public static void art_test.TestCases1965.FloatMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed to set value NEW_VALUE_FOR_SET due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '1.618' (class: class java.lang.Float)
+Running public static void art_test.TestCases1965.FloatMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetNullObject" on remote thread.
+"SetNullObject" on public static void art_test.TestCases1965.FloatMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed to set value null due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '1.618' (class: class java.lang.Float)
+Running public static void art_test.TestCases1965.DoubleMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetInt" on remote thread.
+"GetInt" on public static void art_test.TestCases1965.DoubleMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '3.1415' (class: class java.lang.Double)
+Running public static void art_test.TestCases1965.DoubleMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetLong" on remote thread.
+"GetLong" on public static void art_test.TestCases1965.DoubleMethod(java.util.function.IntConsumer,java.util.function.Consumer) got value: 4614256447914709615
+	Value is '3.1415' (class: class java.lang.Double)
+Running public static void art_test.TestCases1965.DoubleMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetFloat" on remote thread.
+"GetFloat" on public static void art_test.TestCases1965.DoubleMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '3.1415' (class: class java.lang.Double)
+Running public static void art_test.TestCases1965.DoubleMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetDouble" on remote thread.
+"GetDouble" on public static void art_test.TestCases1965.DoubleMethod(java.util.function.IntConsumer,java.util.function.Consumer) got value: 3.1415
+	Value is '3.1415' (class: class java.lang.Double)
+Running public static void art_test.TestCases1965.DoubleMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetObject" on remote thread.
+"GetObject" on public static void art_test.TestCases1965.DoubleMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '3.1415' (class: class java.lang.Double)
+Running public static void art_test.TestCases1965.DoubleMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetInt" on remote thread.
+"SetInt" on public static void art_test.TestCases1965.DoubleMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed to set value 2147483647 due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '3.1415' (class: class java.lang.Double)
+Running public static void art_test.TestCases1965.DoubleMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetLong" on remote thread.
+"SetLong" on public static void art_test.TestCases1965.DoubleMethod(java.util.function.IntConsumer,java.util.function.Consumer) set value: 9223372036854775807
+	Value is 'NaN' (class: class java.lang.Double)
+Running public static void art_test.TestCases1965.DoubleMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetFloat" on remote thread.
+"SetFloat" on public static void art_test.TestCases1965.DoubleMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed to set value 9.2 due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '3.1415' (class: class java.lang.Double)
+Running public static void art_test.TestCases1965.DoubleMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetDouble" on remote thread.
+"SetDouble" on public static void art_test.TestCases1965.DoubleMethod(java.util.function.IntConsumer,java.util.function.Consumer) set value: 12.4
+	Value is '12.4' (class: class java.lang.Double)
+Running public static void art_test.TestCases1965.DoubleMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetObject" on remote thread.
+"SetObject" on public static void art_test.TestCases1965.DoubleMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed to set value NEW_VALUE_FOR_SET due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '3.1415' (class: class java.lang.Double)
+Running public static void art_test.TestCases1965.DoubleMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetNullObject" on remote thread.
+"SetNullObject" on public static void art_test.TestCases1965.DoubleMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed to set value null due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '3.1415' (class: class java.lang.Double)
+Running public static void art_test.TestCases1965.ObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetInt" on remote thread.
+"GetInt" on public static void art_test.TestCases1965.ObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'TARGET_VALUE' (class: class java.lang.String)
+Running public static void art_test.TestCases1965.ObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetLong" on remote thread.
+"GetLong" on public static void art_test.TestCases1965.ObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'TARGET_VALUE' (class: class java.lang.String)
+Running public static void art_test.TestCases1965.ObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetFloat" on remote thread.
+"GetFloat" on public static void art_test.TestCases1965.ObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'TARGET_VALUE' (class: class java.lang.String)
+Running public static void art_test.TestCases1965.ObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetDouble" on remote thread.
+"GetDouble" on public static void art_test.TestCases1965.ObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'TARGET_VALUE' (class: class java.lang.String)
+Running public static void art_test.TestCases1965.ObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetObject" on remote thread.
+"GetObject" on public static void art_test.TestCases1965.ObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) got value: TARGET_VALUE
+	Value is 'TARGET_VALUE' (class: class java.lang.String)
+Running public static void art_test.TestCases1965.ObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetInt" on remote thread.
+"SetInt" on public static void art_test.TestCases1965.ObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed to set value 2147483647 due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'TARGET_VALUE' (class: class java.lang.String)
+Running public static void art_test.TestCases1965.ObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetLong" on remote thread.
+"SetLong" on public static void art_test.TestCases1965.ObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed to set value 9223372036854775807 due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'TARGET_VALUE' (class: class java.lang.String)
+Running public static void art_test.TestCases1965.ObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetFloat" on remote thread.
+"SetFloat" on public static void art_test.TestCases1965.ObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed to set value 9.2 due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'TARGET_VALUE' (class: class java.lang.String)
+Running public static void art_test.TestCases1965.ObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetDouble" on remote thread.
+"SetDouble" on public static void art_test.TestCases1965.ObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed to set value 12.4 due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'TARGET_VALUE' (class: class java.lang.String)
+Running public static void art_test.TestCases1965.ObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetObject" on remote thread.
+"SetObject" on public static void art_test.TestCases1965.ObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) set value: NEW_VALUE_FOR_SET
+	Value is 'NEW_VALUE_FOR_SET' (class: class java.lang.String)
+Running public static void art_test.TestCases1965.ObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetNullObject" on remote thread.
+"SetNullObject" on public static void art_test.TestCases1965.ObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) set value: null
+	Value is 'null' (class: null)
+Running public static void art_test.TestCases1965.NullObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetInt" on remote thread.
+"GetInt" on public static void art_test.TestCases1965.NullObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) got value: 0
+	Value is 'null' (class: null)
+Running public static void art_test.TestCases1965.NullObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetLong" on remote thread.
+"GetLong" on public static void art_test.TestCases1965.NullObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'null' (class: null)
+Running public static void art_test.TestCases1965.NullObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetFloat" on remote thread.
+"GetFloat" on public static void art_test.TestCases1965.NullObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) got value: 0.0
+	Value is 'null' (class: null)
+Running public static void art_test.TestCases1965.NullObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetDouble" on remote thread.
+"GetDouble" on public static void art_test.TestCases1965.NullObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'null' (class: null)
+Running public static void art_test.TestCases1965.NullObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "GetObject" on remote thread.
+"GetObject" on public static void art_test.TestCases1965.NullObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) got value: null
+	Value is 'null' (class: null)
+Running public static void art_test.TestCases1965.NullObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetInt" on remote thread.
+"SetInt" on public static void art_test.TestCases1965.NullObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed to set value 2147483647 due to JVMTI_ERROR_INTERNAL
+	Value is 'null' (class: null)
+Running public static void art_test.TestCases1965.NullObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetLong" on remote thread.
+"SetLong" on public static void art_test.TestCases1965.NullObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed to set value 9223372036854775807 due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'null' (class: null)
+Running public static void art_test.TestCases1965.NullObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetFloat" on remote thread.
+"SetFloat" on public static void art_test.TestCases1965.NullObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed to set value 9.2 due to JVMTI_ERROR_INTERNAL
+	Value is 'null' (class: null)
+Running public static void art_test.TestCases1965.NullObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetDouble" on remote thread.
+"SetDouble" on public static void art_test.TestCases1965.NullObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed to set value 12.4 due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'null' (class: null)
+Running public static void art_test.TestCases1965.NullObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetObject" on remote thread.
+"SetObject" on public static void art_test.TestCases1965.NullObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed to set value NEW_VALUE_FOR_SET due to JVMTI_ERROR_INTERNAL
+	Value is 'null' (class: null)
+Running public static void art_test.TestCases1965.NullObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetNullObject" on remote thread.
+"SetNullObject" on public static void art_test.TestCases1965.NullObjectMethod(java.util.function.IntConsumer,java.util.function.Consumer) set value: null
+	Value is 'null' (class: null)
+Running public static void art_test.TestCases1965.BooleanMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetIntBoolSize" on remote thread.
+"SetIntBoolSize" on public static void art_test.TestCases1965.BooleanMethod(java.util.function.IntConsumer,java.util.function.Consumer) failed to set value 1 due to JVMTI_ERROR_INTERNAL
+	Value is 'false' (class: class java.lang.Boolean)
+Running public static void art_test.TestCases1965.ByteMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetIntByteSize" on remote thread.
+"SetIntByteSize" on public static void art_test.TestCases1965.ByteMethod(java.util.function.IntConsumer,java.util.function.Consumer) set value: 126
+	Value is '126' (class: class java.lang.Byte)
+Running public static void art_test.TestCases1965.CharMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetIntCharSize" on remote thread.
+"SetIntCharSize" on public static void art_test.TestCases1965.CharMethod(java.util.function.IntConsumer,java.util.function.Consumer) set value: 65534
+	Value is '<Char: -1>' (class: class java.lang.String)
+Running public static void art_test.TestCases1965.ShortMethod(java.util.function.IntConsumer,java.util.function.Consumer) with "SetIntShortSize" on remote thread.
+"SetIntShortSize" on public static void art_test.TestCases1965.ShortMethod(java.util.function.IntConsumer,java.util.function.Consumer) set value: 32766
+	Value is '32766' (class: class java.lang.Short)
diff --git a/test/1965-get-set-local-primitive-no-tables/info.txt b/test/1965-get-set-local-primitive-no-tables/info.txt
new file mode 100644
index 0000000..87a7b35
--- /dev/null
+++ b/test/1965-get-set-local-primitive-no-tables/info.txt
@@ -0,0 +1,2 @@
+Tests for jvmti get/set Local variable primitives.
+
diff --git a/test/1965-get-set-local-primitive-no-tables/jasmin/TestCases1965.j b/test/1965-get-set-local-primitive-no-tables/jasmin/TestCases1965.j
new file mode 100644
index 0000000..6374262
--- /dev/null
+++ b/test/1965-get-set-local-primitive-no-tables/jasmin/TestCases1965.j
@@ -0,0 +1,173 @@
+; Copyright (C) 2019 The Android Open Source Project
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+
+.class public final art_test/TestCases1965
+.super java/lang/Object
+
+.method public <init>()V
+  .limit stack 1
+  .limit locals 1
+  0: aload_0
+  1: invokespecial java/lang/Object/<init>()V
+  4: return
+.end method
+
+; NB We limit locals 4 so that every method has space to fit a long/double in it.
+.method public static NullObjectMethod(Ljava/util/function/IntConsumer;Ljava/util/function/Consumer;)V
+  .limit stack 2
+  .limit locals 4
+  0: aconst_null
+  1: astore_2
+  2: aload_0
+  3: sipush 2
+  6: invokeinterface java/util/function/IntConsumer/accept(I)V 2
+  11: aload_1
+  12: aload_2
+  13: invokeinterface java/util/function/Consumer/accept(Ljava/lang/Object;)V 2
+  18: return
+.end method
+
+.method public static ObjectMethod(Ljava/util/function/IntConsumer;Ljava/util/function/Consumer;)V
+  .limit stack 2
+  .limit locals 4
+  0: ldc "TARGET_VALUE"
+  2: astore_2
+  3: aload_0
+  4: sipush 2
+  7: invokeinterface java/util/function/IntConsumer/accept(I)V 2
+  12: aload_1
+  13: aload_2
+  14: invokeinterface java/util/function/Consumer/accept(Ljava/lang/Object;)V 2
+  19: return
+.end method
+
+.method public static BooleanMethod(Ljava/util/function/IntConsumer;Ljava/util/function/Consumer;)V
+  .limit stack 2
+  .limit locals 4
+  0: iconst_0
+  1: istore_2
+  2: aload_0
+  3: sipush 2
+  6: invokeinterface java/util/function/IntConsumer/accept(I)V 2
+  11: aload_1
+  12: iload_2
+  13: invokestatic java/lang/Boolean/valueOf(Z)Ljava/lang/Boolean;
+  16: invokeinterface java/util/function/Consumer/accept(Ljava/lang/Object;)V 2
+  21: return
+.end method
+
+.method public static ByteMethod(Ljava/util/function/IntConsumer;Ljava/util/function/Consumer;)V
+  .limit stack 2
+  .limit locals 4
+  0: bipush 8
+  2: istore_2
+  3: aload_0
+  4: sipush 2
+  7: invokeinterface java/util/function/IntConsumer/accept(I)V 2
+  12: aload_1
+  13: iload_2
+  14: invokestatic java/lang/Byte/valueOf(B)Ljava/lang/Byte;
+  17: invokeinterface java/util/function/Consumer/accept(Ljava/lang/Object;)V 2
+  22: return
+.end method
+
+.method public static CharMethod(Ljava/util/function/IntConsumer;Ljava/util/function/Consumer;)V
+  .limit stack 2
+  .limit locals 4
+  0: bipush 113
+  2: istore_2
+  3: aload_0
+  4: sipush 2
+  7: invokeinterface java/util/function/IntConsumer/accept(I)V 2
+  12: aload_1
+  13: iload_2
+  14: invokestatic java/lang/Character/valueOf(C)Ljava/lang/Character;
+  17: invokeinterface java/util/function/Consumer/accept(Ljava/lang/Object;)V 2
+  22: return
+.end method
+
+.method public static ShortMethod(Ljava/util/function/IntConsumer;Ljava/util/function/Consumer;)V
+  .limit stack 2
+  .limit locals 4
+  0: sipush 321
+  3: istore_2
+  4: aload_0
+  5: sipush 2
+  8: invokeinterface java/util/function/IntConsumer/accept(I)V 2
+  13: aload_1
+  14: iload_2
+  15: invokestatic java/lang/Short/valueOf(S)Ljava/lang/Short;
+  18: invokeinterface java/util/function/Consumer/accept(Ljava/lang/Object;)V 2
+  23: return
+.end method
+
+.method public static IntMethod(Ljava/util/function/IntConsumer;Ljava/util/function/Consumer;)V
+  .limit stack 2
+  .limit locals 4
+  0: bipush 42
+  2: istore_2
+  3: aload_0
+  4: sipush 2
+  7: invokeinterface java/util/function/IntConsumer/accept(I)V 2
+  12: aload_1
+  13: iload_2
+  14: invokestatic java/lang/Integer/valueOf(I)Ljava/lang/Integer;
+  17: invokeinterface java/util/function/Consumer/accept(Ljava/lang/Object;)V 2
+  22: return
+.end method
+
+.method public static LongMethod(Ljava/util/function/IntConsumer;Ljava/util/function/Consumer;)V
+  .limit stack 3
+  .limit locals 4
+  0: ldc2_w 9001
+  3: lstore_2
+  4: aload_0
+  5: sipush 2
+  8: invokeinterface java/util/function/IntConsumer/accept(I)V 2
+  13: aload_1
+  14: lload_2
+  15: invokestatic java/lang/Long/valueOf(J)Ljava/lang/Long;
+  18: invokeinterface java/util/function/Consumer/accept(Ljava/lang/Object;)V 2
+  23: return
+.end method
+
+.method public static FloatMethod(Ljava/util/function/IntConsumer;Ljava/util/function/Consumer;)V
+  .limit stack 2
+  .limit locals 4
+  0: ldc 1.618
+  2: fstore_2
+  3: aload_0
+  4: sipush 2
+  7: invokeinterface java/util/function/IntConsumer/accept(I)V 2
+  12: aload_1
+  13: fload_2
+  14: invokestatic java/lang/Float/valueOf(F)Ljava/lang/Float;
+  17: invokeinterface java/util/function/Consumer/accept(Ljava/lang/Object;)V 2
+  22: return
+.end method
+
+.method public static DoubleMethod(Ljava/util/function/IntConsumer;Ljava/util/function/Consumer;)V
+  .limit stack 3
+  .limit locals 4
+  0: ldc2_w 3.1415
+  3: dstore_2
+  4: aload_0
+  5: sipush 2
+  8: invokeinterface java/util/function/IntConsumer/accept(I)V 2
+  13: aload_1
+  14: dload_2
+  15: invokestatic java/lang/Double/valueOf(D)Ljava/lang/Double;
+  18: invokeinterface java/util/function/Consumer/accept(Ljava/lang/Object;)V 2
+  23: return
+.end method
diff --git a/test/1965-get-set-local-primitive-no-tables/run b/test/1965-get-set-local-primitive-no-tables/run
new file mode 100755
index 0000000..9b741ee
--- /dev/null
+++ b/test/1965-get-set-local-primitive-no-tables/run
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# If we compile the .oat files non-debuggable we could end up with dex2dex running over the files
+# which will cause some instructions to be removed from smali/TestCases1966.smali. This test relies
+# on the instructions being exactly as written so pass --debuggable to 'dex2oat' only to prevent
+# this from happening.
+./default-run "$@" --jvmti --compiler-only-option --debuggable
diff --git a/test/1965-get-set-local-primitive-no-tables/smali/TestCases1965.smali b/test/1965-get-set-local-primitive-no-tables/smali/TestCases1965.smali
new file mode 100644
index 0000000..29aa08c
--- /dev/null
+++ b/test/1965-get-set-local-primitive-no-tables/smali/TestCases1965.smali
@@ -0,0 +1,140 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public final Lart_test/TestCases1965;
+.super Ljava/lang/Object;
+
+
+# direct methods
+.method public constructor <init>()V
+    .registers 1
+    invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+    return-void
+.end method
+
+.method public static BooleanMethod(Ljava/util/function/IntConsumer;Ljava/util/function/Consumer;)V
+    .registers 4
+    const/4 v0, 0x0
+    # Slot for value.
+    const/16 v1, 0x0
+    invoke-interface {p0, v1}, Ljava/util/function/IntConsumer;->accept(I)V
+    invoke-static {v0}, Ljava/lang/Boolean;->valueOf(Z)Ljava/lang/Boolean;
+    move-result-object v1
+    invoke-interface {p1, v1}, Ljava/util/function/Consumer;->accept(Ljava/lang/Object;)V
+    return-void
+.end method
+
+.method public static ByteMethod(Ljava/util/function/IntConsumer;Ljava/util/function/Consumer;)V
+    .registers 4
+    const/16 v0, 0x8
+    # Slot for value.
+    const/16 v1, 0x0
+    invoke-interface {p0, v1}, Ljava/util/function/IntConsumer;->accept(I)V
+    invoke-static {v0}, Ljava/lang/Byte;->valueOf(B)Ljava/lang/Byte;
+    move-result-object v1
+    invoke-interface {p1, v1}, Ljava/util/function/Consumer;->accept(Ljava/lang/Object;)V
+    return-void
+.end method
+
+.method public static CharMethod(Ljava/util/function/IntConsumer;Ljava/util/function/Consumer;)V
+    .registers 4
+    const/16 v0, 0x71
+    # Slot for value
+    const/16 v1, 0x0
+    invoke-interface {p0, v1}, Ljava/util/function/IntConsumer;->accept(I)V
+    invoke-static {v0}, Ljava/lang/Character;->valueOf(C)Ljava/lang/Character;
+    move-result-object v1
+    invoke-interface {p1, v1}, Ljava/util/function/Consumer;->accept(Ljava/lang/Object;)V
+    return-void
+.end method
+
+.method public static DoubleMethod(Ljava/util/function/IntConsumer;Ljava/util/function/Consumer;)V
+    .registers 5
+    const-wide v0, 0x400921cac083126fL    # 3.1415
+    # Slot for value
+    const/16 v2, 0x0
+    invoke-interface {p0, v2}, Ljava/util/function/IntConsumer;->accept(I)V
+    invoke-static {v0, v1}, Ljava/lang/Double;->valueOf(D)Ljava/lang/Double;
+    move-result-object v2
+    invoke-interface {p1, v2}, Ljava/util/function/Consumer;->accept(Ljava/lang/Object;)V
+    return-void
+.end method
+
+.method public static FloatMethod(Ljava/util/function/IntConsumer;Ljava/util/function/Consumer;)V
+    .registers 4
+    const v0, 0x3fcf1aa0    # 1.618f
+    # Slot for value
+    const/16 v1, 0x0
+    invoke-interface {p0, v1}, Ljava/util/function/IntConsumer;->accept(I)V
+    invoke-static {v0}, Ljava/lang/Float;->valueOf(F)Ljava/lang/Float;
+    move-result-object v1
+    invoke-interface {p1, v1}, Ljava/util/function/Consumer;->accept(Ljava/lang/Object;)V
+    return-void
+.end method
+
+.method public static IntMethod(Ljava/util/function/IntConsumer;Ljava/util/function/Consumer;)V
+    .registers 4
+    const/16 v0, 0x2a
+    # Slot for value
+    const/16 v1, 0x0
+    invoke-interface {p0, v1}, Ljava/util/function/IntConsumer;->accept(I)V
+    invoke-static {v0}, Ljava/lang/Integer;->valueOf(I)Ljava/lang/Integer;
+    move-result-object v1
+    invoke-interface {p1, v1}, Ljava/util/function/Consumer;->accept(Ljava/lang/Object;)V
+    return-void
+.end method
+
+.method public static LongMethod(Ljava/util/function/IntConsumer;Ljava/util/function/Consumer;)V
+    .registers 5
+    const-wide/16 v0, 0x2329
+    # Slot for value
+    const/16 v2, 0x0
+    invoke-interface {p0, v2}, Ljava/util/function/IntConsumer;->accept(I)V
+    invoke-static {v0, v1}, Ljava/lang/Long;->valueOf(J)Ljava/lang/Long;
+    move-result-object v2
+    invoke-interface {p1, v2}, Ljava/util/function/Consumer;->accept(Ljava/lang/Object;)V
+    return-void
+.end method
+
+.method public static NullObjectMethod(Ljava/util/function/IntConsumer;Ljava/util/function/Consumer;)V
+    .registers 4
+    const/4 v0, 0x0
+    # Slot for value
+    const/16 v1, 0x0
+    invoke-interface {p0, v1}, Ljava/util/function/IntConsumer;->accept(I)V
+    invoke-interface {p1, v0}, Ljava/util/function/Consumer;->accept(Ljava/lang/Object;)V
+    return-void
+.end method
+
+.method public static ObjectMethod(Ljava/util/function/IntConsumer;Ljava/util/function/Consumer;)V
+    .registers 4
+    const-string v0, "TARGET_VALUE"
+    # Slot for value
+    const/16 v1, 0x0
+    invoke-interface {p0, v1}, Ljava/util/function/IntConsumer;->accept(I)V
+    invoke-interface {p1, v0}, Ljava/util/function/Consumer;->accept(Ljava/lang/Object;)V
+    return-void
+.end method
+
+.method public static ShortMethod(Ljava/util/function/IntConsumer;Ljava/util/function/Consumer;)V
+    .registers 4
+    const/16 v0, 0x141
+    # slot for value
+    const/16 v1, 0x0
+    invoke-interface {p0, v1}, Ljava/util/function/IntConsumer;->accept(I)V
+    invoke-static {v0}, Ljava/lang/Short;->valueOf(S)Ljava/lang/Short;
+    move-result-object v1
+    invoke-interface {p1, v1}, Ljava/util/function/Consumer;->accept(Ljava/lang/Object;)V
+    return-void
+.end method
diff --git a/test/1965-get-set-local-primitive-no-tables/src/Main.java b/test/1965-get-set-local-primitive-no-tables/src/Main.java
new file mode 100644
index 0000000..73ce85f
--- /dev/null
+++ b/test/1965-get-set-local-primitive-no-tables/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1965.run();
+  }
+}
diff --git a/test/1965-get-set-local-primitive-no-tables/src/art/Breakpoint.java b/test/1965-get-set-local-primitive-no-tables/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1965-get-set-local-primitive-no-tables/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1965-get-set-local-primitive-no-tables/src/art/Locals.java b/test/1965-get-set-local-primitive-no-tables/src/art/Locals.java
new file mode 120000
index 0000000..2998386
--- /dev/null
+++ b/test/1965-get-set-local-primitive-no-tables/src/art/Locals.java
@@ -0,0 +1 @@
+../../../jvmti-common/Locals.java
\ No newline at end of file
diff --git a/test/1965-get-set-local-primitive-no-tables/src/art/StackTrace.java b/test/1965-get-set-local-primitive-no-tables/src/art/StackTrace.java
new file mode 120000
index 0000000..e1a08aa
--- /dev/null
+++ b/test/1965-get-set-local-primitive-no-tables/src/art/StackTrace.java
@@ -0,0 +1 @@
+../../../jvmti-common/StackTrace.java
\ No newline at end of file
diff --git a/test/1965-get-set-local-primitive-no-tables/src/art/Suspension.java b/test/1965-get-set-local-primitive-no-tables/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1965-get-set-local-primitive-no-tables/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1965-get-set-local-primitive-no-tables/src/art/Test1965.java b/test/1965-get-set-local-primitive-no-tables/src/art/Test1965.java
new file mode 100644
index 0000000..f516d18
--- /dev/null
+++ b/test/1965-get-set-local-primitive-no-tables/src/art/Test1965.java
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Executable;
+import java.lang.reflect.Method;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.Semaphore;
+import java.util.function.Consumer;
+import java.util.function.Function;
+import java.util.function.IntConsumer;
+import java.util.function.IntFunction;
+import java.util.function.Predicate;
+import java.util.function.Supplier;
+
+public class Test1965 {
+  public static final String TARGET_VAR = "TARGET";
+
+  public static void reportValue(Object val) {
+    if (val instanceof Character) {
+      val = "<Char: " + Character.getNumericValue(((Character)val).charValue()) + ">";
+    }
+    System.out.println("\tValue is '" + val +
+                       "' (class: " + (val != null ? val.getClass().toString() : "null") + ")");
+  }
+
+  public static interface SafepointFunction {
+    public void invoke(Thread thread, Method target, int slot, int depth) throws Exception;
+  }
+
+  public static interface SetterFunction {
+    public void SetVar(Thread t, int depth, int slot, Object v);
+  }
+
+  public static interface GetterFunction { public Object GetVar(Thread t, int depth, int slot); }
+
+  public static SafepointFunction
+  NamedSet(final String type, final SetterFunction get, final Object v) {
+    return new SafepointFunction() {
+      public void invoke(Thread t, Method method, int slot, int depth) {
+        try {
+          get.SetVar(t, depth, slot, v);
+          System.out.println(this + " on " + method + " set value: " + v);
+        } catch (Exception e) {
+          System.out.println(this + " on " + method + " failed to set value " + v + " due to " +
+                             e.getMessage());
+        }
+      }
+      public String toString() {
+        return "\"Set" + type + "\"";
+      }
+    };
+  }
+
+  public static SafepointFunction NamedGet(final String type, final GetterFunction get) {
+    return new SafepointFunction() {
+      public void invoke(Thread t, Method method, int slot, int depth) {
+        try {
+          Object res = get.GetVar(t, depth, slot);
+          System.out.println(this + " on " + method + " got value: " + res);
+        } catch (Exception e) {
+          System.out.println(this + " on " + method + " failed due to " + e.getMessage());
+        }
+      }
+      public String toString() {
+        return "\"Get" + type + "\"";
+      }
+    };
+  }
+
+  public static class TestCase {
+    public final Method target;
+
+    public TestCase(Method target) {
+      this.target = target;
+    }
+
+    public static class ThreadPauser implements IntConsumer {
+      public final Semaphore sem_wakeup_main;
+      public final Semaphore sem_wait;
+      public int slot = -1;
+
+      public ThreadPauser() {
+        sem_wakeup_main = new Semaphore(0);
+        sem_wait = new Semaphore(0);
+      }
+
+      public void accept(int v) {
+        try {
+          slot = v;
+          sem_wakeup_main.release();
+          sem_wait.acquire();
+        } catch (Exception e) {
+          throw new Error("Error with semaphores!", e);
+        }
+      }
+
+      public void waitForOtherThreadToPause() throws Exception {
+        sem_wakeup_main.acquire();
+      }
+
+      public void wakeupOtherThread() throws Exception {
+        sem_wait.release();
+      }
+    }
+
+    public void exec(final SafepointFunction safepoint) throws Exception {
+      System.out.println("Running " + target + " with " + safepoint + " on remote thread.");
+      final ThreadPauser pause = new ThreadPauser();
+      final Consumer<?> reporter = Test1965::reportValue;
+      Thread remote = new Thread(() -> {
+        try {
+          target.invoke(null, pause, reporter);
+        } catch (Exception e) {
+          throw new Error("Error invoking remote thread " + Thread.currentThread(), e);
+        }
+      }, "remote thread for " + target + " with " + safepoint);
+      remote.start();
+      pause.waitForOtherThreadToPause();
+      try {
+        Suspension.suspend(remote);
+        StackTrace.StackFrameData frame = findStackFrame(remote);
+        safepoint.invoke(remote, target, pause.slot, frame.depth);
+      } finally {
+        Suspension.resume(remote);
+        pause.wakeupOtherThread();
+        remote.join();
+      }
+    }
+
+    private Locals.VariableDescription findTargetVar(long loc) {
+      for (Locals.VariableDescription var : Locals.GetLocalVariableTable(target)) {
+        if (var.start_location <= loc && var.length + var.start_location > loc &&
+            var.name.equals(TARGET_VAR)) {
+          return var;
+        }
+      }
+      throw new Error("Unable to find variable " + TARGET_VAR + " in " + target + " at loc " + loc);
+    }
+
+    private StackTrace.StackFrameData findStackFrame(Thread thr) {
+      for (StackTrace.StackFrameData frame : StackTrace.GetStackTrace(thr)) {
+        if (frame.method.equals(target)) {
+          return frame;
+        }
+      }
+      throw new Error("Unable to find stack frame in method " + target + " on thread " + thr);
+    }
+  }
+  public static Method getMethod(String name) throws Exception {
+    return Class.forName("art_test.TestCases1965")
+        .getDeclaredMethod(name, IntConsumer.class, Consumer.class);
+  }
+
+  public static void run() throws Exception {
+    Locals.EnableLocalVariableAccess();
+    final TestCase[] MAIN_TEST_CASES = new TestCase[] {
+      new TestCase(getMethod("IntMethod")),    new TestCase(getMethod("LongMethod")),
+      new TestCase(getMethod("FloatMethod")),  new TestCase(getMethod("DoubleMethod")),
+      new TestCase(getMethod("ObjectMethod")), new TestCase(getMethod("NullObjectMethod")),
+    };
+
+    final SafepointFunction[] SAFEPOINTS = new SafepointFunction[] {
+      NamedGet("Int", Locals::GetLocalVariableInt),
+      NamedGet("Long", Locals::GetLocalVariableLong),
+      NamedGet("Float", Locals::GetLocalVariableFloat),
+      NamedGet("Double", Locals::GetLocalVariableDouble),
+      NamedGet("Object", Locals::GetLocalVariableObject),
+      NamedSet("Int", Locals::SetLocalVariableInt, Integer.MAX_VALUE),
+      NamedSet("Long", Locals::SetLocalVariableLong, Long.MAX_VALUE),
+      NamedSet("Float", Locals::SetLocalVariableFloat, 9.2f),
+      NamedSet("Double", Locals::SetLocalVariableDouble, 12.4d),
+      NamedSet("Object", Locals::SetLocalVariableObject, "NEW_VALUE_FOR_SET"),
+      NamedSet("NullObject", Locals::SetLocalVariableObject, null),
+    };
+
+    for (TestCase t : MAIN_TEST_CASES) {
+      for (SafepointFunction s : SAFEPOINTS) {
+        t.exec(s);
+      }
+    }
+
+    // Test int for small values.
+    new TestCase(getMethod("BooleanMethod"))
+        .exec(NamedSet("IntBoolSize", Locals::SetLocalVariableInt, 1));
+    new TestCase(getMethod("ByteMethod"))
+        .exec(NamedSet("IntByteSize", Locals::SetLocalVariableInt, Byte.MAX_VALUE - 1));
+
+    new TestCase(getMethod("CharMethod"))
+        .exec(NamedSet("IntCharSize", Locals::SetLocalVariableInt, Character.MAX_VALUE - 1));
+    new TestCase(getMethod("ShortMethod"))
+        .exec(NamedSet("IntShortSize", Locals::SetLocalVariableInt, Short.MAX_VALUE - 1));
+  }
+}
diff --git a/test/1966-get-set-local-objects-no-table/build b/test/1966-get-set-local-objects-no-table/build
new file mode 100644
index 0000000..6631df9
--- /dev/null
+++ b/test/1966-get-set-local-objects-no-table/build
@@ -0,0 +1,25 @@
+#!/bin/bash
+#
+# Copyright 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# make us exit on a failure
+set -e
+
+if [[ $@ != *"--jvm"* ]]; then
+  mv jasmin jasmin-unused
+else
+  mv smali smali-unused
+fi
+./default-build "$@" 
diff --git a/test/1966-get-set-local-objects-no-table/expected.txt b/test/1966-get-set-local-objects-no-table/expected.txt
new file mode 100644
index 0000000..f24dfe7
--- /dev/null
+++ b/test/1966-get-set-local-objects-no-table/expected.txt
@@ -0,0 +1,162 @@
+Running public static void art_test.TestCases1966.ObjectMethod(java.util.function.IntConsumer) with "GetGetObject" on remote thread.
+"GetGetObject" on public static void art_test.TestCases1966.ObjectMethod(java.util.function.IntConsumer) got value: TestClass1("ObjectMethod")
+	Value is 'TestClass1("ObjectMethod")' (class: class art.Test1966$TestClass1)
+Running public static void art_test.TestCases1966.ObjectMethod(java.util.function.IntConsumer) with "SetNull" on remote thread.
+"SetNull" on public static void art_test.TestCases1966.ObjectMethod(java.util.function.IntConsumer) set value: null
+	Value is 'null' (class: NULL)
+Running public static void art_test.TestCases1966.ObjectMethod(java.util.function.IntConsumer) with "SetTestClass1" on remote thread.
+"SetTestClass1" on public static void art_test.TestCases1966.ObjectMethod(java.util.function.IntConsumer) set value: TestClass1("Set TestClass1")
+	Value is 'TestClass1("Set TestClass1")' (class: class art.Test1966$TestClass1)
+Running public static void art_test.TestCases1966.ObjectMethod(java.util.function.IntConsumer) with "SetTestClass1ext" on remote thread.
+"SetTestClass1ext" on public static void art_test.TestCases1966.ObjectMethod(java.util.function.IntConsumer) set value: TestClass1ext("TestClass1("Set TestClass1ext")")
+	Value is 'TestClass1ext("TestClass1("Set TestClass1ext")")' (class: class art.Test1966$TestClass1ext)
+Running public static void art_test.TestCases1966.ObjectMethod(java.util.function.IntConsumer) with "SetTestClass2" on remote thread.
+"SetTestClass2" on public static void art_test.TestCases1966.ObjectMethod(java.util.function.IntConsumer) set value: TestClass2("Set TestClass2")
+	Value is 'TestClass2("Set TestClass2")' (class: class art.Test1966$TestClass2)
+Running public static void art_test.TestCases1966.ObjectMethod(java.util.function.IntConsumer) with "SetTestClass2impl" on remote thread.
+"SetTestClass2impl" on public static void art_test.TestCases1966.ObjectMethod(java.util.function.IntConsumer) set value: TestClass2impl("TestClass2("Set TestClass2impl")")
+	Value is 'TestClass2impl("TestClass2("Set TestClass2impl")")' (class: class art.Test1966$TestClass2impl)
+Running public static void art_test.TestCases1966.CastInterfaceMethod(java.util.function.IntConsumer) with "GetGetObject" on remote thread.
+"GetGetObject" on public static void art_test.TestCases1966.CastInterfaceMethod(java.util.function.IntConsumer) got value: TestClass1("ObjectMethod")
+	Value is 'TestClass1("ObjectMethod")' (class: class art.Test1966$TestClass1)
+Running public static void art_test.TestCases1966.CastInterfaceMethod(java.util.function.IntConsumer) with "SetNull" on remote thread.
+"SetNull" on public static void art_test.TestCases1966.CastInterfaceMethod(java.util.function.IntConsumer) set value: null
+	Value is 'null' (class: NULL)
+Running public static void art_test.TestCases1966.CastInterfaceMethod(java.util.function.IntConsumer) with "SetTestClass1" on remote thread.
+"SetTestClass1" on public static void art_test.TestCases1966.CastInterfaceMethod(java.util.function.IntConsumer) set value: TestClass1("Set TestClass1")
+	Value is 'TestClass1("Set TestClass1")' (class: class art.Test1966$TestClass1)
+Running public static void art_test.TestCases1966.CastInterfaceMethod(java.util.function.IntConsumer) with "SetTestClass1ext" on remote thread.
+"SetTestClass1ext" on public static void art_test.TestCases1966.CastInterfaceMethod(java.util.function.IntConsumer) set value: TestClass1ext("TestClass1("Set TestClass1ext")")
+	Value is 'TestClass1ext("TestClass1("Set TestClass1ext")")' (class: class art.Test1966$TestClass1ext)
+Running public static void art_test.TestCases1966.CastInterfaceMethod(java.util.function.IntConsumer) with "SetTestClass2" on remote thread.
+"SetTestClass2" on public static void art_test.TestCases1966.CastInterfaceMethod(java.util.function.IntConsumer) failed to set value TestClass2("Set TestClass2") due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'TestClass1("ObjectMethod")' (class: class art.Test1966$TestClass1)
+Running public static void art_test.TestCases1966.CastInterfaceMethod(java.util.function.IntConsumer) with "SetTestClass2impl" on remote thread.
+"SetTestClass2impl" on public static void art_test.TestCases1966.CastInterfaceMethod(java.util.function.IntConsumer) failed to set value TestClass2impl("TestClass2("Set TestClass2impl")") due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'TestClass1("ObjectMethod")' (class: class art.Test1966$TestClass1)
+Running public static void art_test.TestCases1966.CastExactMethod(java.util.function.IntConsumer) with "GetGetObject" on remote thread.
+"GetGetObject" on public static void art_test.TestCases1966.CastExactMethod(java.util.function.IntConsumer) got value: TestClass1("ObjectMethod")
+	Value is 'TestClass1("ObjectMethod")' (class: class art.Test1966$TestClass1)
+Running public static void art_test.TestCases1966.CastExactMethod(java.util.function.IntConsumer) with "SetNull" on remote thread.
+"SetNull" on public static void art_test.TestCases1966.CastExactMethod(java.util.function.IntConsumer) set value: null
+	Value is 'null' (class: NULL)
+Running public static void art_test.TestCases1966.CastExactMethod(java.util.function.IntConsumer) with "SetTestClass1" on remote thread.
+"SetTestClass1" on public static void art_test.TestCases1966.CastExactMethod(java.util.function.IntConsumer) set value: TestClass1("Set TestClass1")
+	Value is 'TestClass1("Set TestClass1")' (class: class art.Test1966$TestClass1)
+Running public static void art_test.TestCases1966.CastExactMethod(java.util.function.IntConsumer) with "SetTestClass1ext" on remote thread.
+"SetTestClass1ext" on public static void art_test.TestCases1966.CastExactMethod(java.util.function.IntConsumer) set value: TestClass1ext("TestClass1("Set TestClass1ext")")
+	Value is 'TestClass1ext("TestClass1("Set TestClass1ext")")' (class: class art.Test1966$TestClass1ext)
+Running public static void art_test.TestCases1966.CastExactMethod(java.util.function.IntConsumer) with "SetTestClass2" on remote thread.
+"SetTestClass2" on public static void art_test.TestCases1966.CastExactMethod(java.util.function.IntConsumer) failed to set value TestClass2("Set TestClass2") due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'TestClass1("ObjectMethod")' (class: class art.Test1966$TestClass1)
+Running public static void art_test.TestCases1966.CastExactMethod(java.util.function.IntConsumer) with "SetTestClass2impl" on remote thread.
+"SetTestClass2impl" on public static void art_test.TestCases1966.CastExactMethod(java.util.function.IntConsumer) failed to set value TestClass2impl("TestClass2("Set TestClass2impl")") due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'TestClass1("ObjectMethod")' (class: class art.Test1966$TestClass1)
+Running public static void art_test.TestCases1966.InterfaceMethod(java.util.function.IntConsumer) with "GetGetObject" on remote thread.
+"GetGetObject" on public static void art_test.TestCases1966.InterfaceMethod(java.util.function.IntConsumer) got value: TestClass1("InterfaceMethod")
+	Value is 'TestClass1("InterfaceMethod")' (class: class art.Test1966$TestClass1)
+Running public static void art_test.TestCases1966.InterfaceMethod(java.util.function.IntConsumer) with "SetNull" on remote thread.
+"SetNull" on public static void art_test.TestCases1966.InterfaceMethod(java.util.function.IntConsumer) set value: null
+	Value is 'null' (class: NULL)
+Running public static void art_test.TestCases1966.InterfaceMethod(java.util.function.IntConsumer) with "SetTestClass1" on remote thread.
+"SetTestClass1" on public static void art_test.TestCases1966.InterfaceMethod(java.util.function.IntConsumer) set value: TestClass1("Set TestClass1")
+	Value is 'TestClass1("Set TestClass1")' (class: class art.Test1966$TestClass1)
+Running public static void art_test.TestCases1966.InterfaceMethod(java.util.function.IntConsumer) with "SetTestClass1ext" on remote thread.
+"SetTestClass1ext" on public static void art_test.TestCases1966.InterfaceMethod(java.util.function.IntConsumer) set value: TestClass1ext("TestClass1("Set TestClass1ext")")
+	Value is 'TestClass1ext("TestClass1("Set TestClass1ext")")' (class: class art.Test1966$TestClass1ext)
+Running public static void art_test.TestCases1966.InterfaceMethod(java.util.function.IntConsumer) with "SetTestClass2" on remote thread.
+"SetTestClass2" on public static void art_test.TestCases1966.InterfaceMethod(java.util.function.IntConsumer) failed to set value TestClass2("Set TestClass2") due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'TestClass1("InterfaceMethod")' (class: class art.Test1966$TestClass1)
+Running public static void art_test.TestCases1966.InterfaceMethod(java.util.function.IntConsumer) with "SetTestClass2impl" on remote thread.
+"SetTestClass2impl" on public static void art_test.TestCases1966.InterfaceMethod(java.util.function.IntConsumer) set value: TestClass2impl("TestClass2("Set TestClass2impl")")
+	Value is 'TestClass2impl("TestClass2("Set TestClass2impl")")' (class: class art.Test1966$TestClass2impl)
+Running public static void art_test.TestCases1966.ExactClassMethod(java.util.function.IntConsumer) with "GetGetObject" on remote thread.
+"GetGetObject" on public static void art_test.TestCases1966.ExactClassMethod(java.util.function.IntConsumer) got value: TestClass1("SpecificClassMethod")
+	Value is 'TestClass1("SpecificClassMethod")' (class: class art.Test1966$TestClass1)
+Running public static void art_test.TestCases1966.ExactClassMethod(java.util.function.IntConsumer) with "SetNull" on remote thread.
+"SetNull" on public static void art_test.TestCases1966.ExactClassMethod(java.util.function.IntConsumer) set value: null
+	Value is 'null' (class: NULL)
+Running public static void art_test.TestCases1966.ExactClassMethod(java.util.function.IntConsumer) with "SetTestClass1" on remote thread.
+"SetTestClass1" on public static void art_test.TestCases1966.ExactClassMethod(java.util.function.IntConsumer) set value: TestClass1("Set TestClass1")
+	Value is 'TestClass1("Set TestClass1")' (class: class art.Test1966$TestClass1)
+Running public static void art_test.TestCases1966.ExactClassMethod(java.util.function.IntConsumer) with "SetTestClass1ext" on remote thread.
+"SetTestClass1ext" on public static void art_test.TestCases1966.ExactClassMethod(java.util.function.IntConsumer) set value: TestClass1ext("TestClass1("Set TestClass1ext")")
+	Value is 'TestClass1ext("TestClass1("Set TestClass1ext")")' (class: class art.Test1966$TestClass1ext)
+Running public static void art_test.TestCases1966.ExactClassMethod(java.util.function.IntConsumer) with "SetTestClass2" on remote thread.
+"SetTestClass2" on public static void art_test.TestCases1966.ExactClassMethod(java.util.function.IntConsumer) failed to set value TestClass2("Set TestClass2") due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'TestClass1("SpecificClassMethod")' (class: class art.Test1966$TestClass1)
+Running public static void art_test.TestCases1966.ExactClassMethod(java.util.function.IntConsumer) with "SetTestClass2impl" on remote thread.
+"SetTestClass2impl" on public static void art_test.TestCases1966.ExactClassMethod(java.util.function.IntConsumer) failed to set value TestClass2impl("TestClass2("Set TestClass2impl")") due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'TestClass1("SpecificClassMethod")' (class: class art.Test1966$TestClass1)
+Running public static void art_test.TestCases1966.PrimitiveMethod(java.util.function.IntConsumer) with "GetGetObject" on remote thread.
+"GetGetObject" on public static void art_test.TestCases1966.PrimitiveMethod(java.util.function.IntConsumer) failed due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art_test.TestCases1966.PrimitiveMethod(java.util.function.IntConsumer) with "SetNull" on remote thread.
+"SetNull" on public static void art_test.TestCases1966.PrimitiveMethod(java.util.function.IntConsumer) failed to set value null due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art_test.TestCases1966.PrimitiveMethod(java.util.function.IntConsumer) with "SetTestClass1" on remote thread.
+"SetTestClass1" on public static void art_test.TestCases1966.PrimitiveMethod(java.util.function.IntConsumer) failed to set value TestClass1("Set TestClass1") due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art_test.TestCases1966.PrimitiveMethod(java.util.function.IntConsumer) with "SetTestClass1ext" on remote thread.
+"SetTestClass1ext" on public static void art_test.TestCases1966.PrimitiveMethod(java.util.function.IntConsumer) failed to set value TestClass1ext("TestClass1("Set TestClass1ext")") due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art_test.TestCases1966.PrimitiveMethod(java.util.function.IntConsumer) with "SetTestClass2" on remote thread.
+"SetTestClass2" on public static void art_test.TestCases1966.PrimitiveMethod(java.util.function.IntConsumer) failed to set value TestClass2("Set TestClass2") due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art_test.TestCases1966.PrimitiveMethod(java.util.function.IntConsumer) with "SetTestClass2impl" on remote thread.
+"SetTestClass2impl" on public static void art_test.TestCases1966.PrimitiveMethod(java.util.function.IntConsumer) failed to set value TestClass2impl("TestClass2("Set TestClass2impl")") due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art_test.TestCases1966.NullMethod(java.util.function.IntConsumer) with "GetGetObject" on remote thread.
+"GetGetObject" on public static void art_test.TestCases1966.NullMethod(java.util.function.IntConsumer) got value: null
+	Value is 'null' (class: NULL)
+Running public static void art_test.TestCases1966.NullMethod(java.util.function.IntConsumer) with "SetNull" on remote thread.
+"SetNull" on public static void art_test.TestCases1966.NullMethod(java.util.function.IntConsumer) set value: null
+	Value is 'null' (class: NULL)
+Running public static void art_test.TestCases1966.NullMethod(java.util.function.IntConsumer) with "SetTestClass1" on remote thread.
+"SetTestClass1" on public static void art_test.TestCases1966.NullMethod(java.util.function.IntConsumer) failed to set value TestClass1("Set TestClass1") due to JVMTI_ERROR_INTERNAL
+	Value is 'null' (class: NULL)
+Running public static void art_test.TestCases1966.NullMethod(java.util.function.IntConsumer) with "SetTestClass1ext" on remote thread.
+"SetTestClass1ext" on public static void art_test.TestCases1966.NullMethod(java.util.function.IntConsumer) failed to set value TestClass1ext("TestClass1("Set TestClass1ext")") due to JVMTI_ERROR_INTERNAL
+	Value is 'null' (class: NULL)
+Running public static void art_test.TestCases1966.NullMethod(java.util.function.IntConsumer) with "SetTestClass2" on remote thread.
+"SetTestClass2" on public static void art_test.TestCases1966.NullMethod(java.util.function.IntConsumer) failed to set value TestClass2("Set TestClass2") due to JVMTI_ERROR_INTERNAL
+	Value is 'null' (class: NULL)
+Running public static void art_test.TestCases1966.NullMethod(java.util.function.IntConsumer) with "SetTestClass2impl" on remote thread.
+"SetTestClass2impl" on public static void art_test.TestCases1966.NullMethod(java.util.function.IntConsumer) failed to set value TestClass2impl("TestClass2("Set TestClass2impl")") due to JVMTI_ERROR_INTERNAL
+	Value is 'null' (class: NULL)
+Running public static void art_test.TestCases1966.CastExactNullMethod(java.util.function.IntConsumer) with "GetGetObject" on remote thread.
+"GetGetObject" on public static void art_test.TestCases1966.CastExactNullMethod(java.util.function.IntConsumer) got value: null
+	Value is 'null' (class: NULL)
+Running public static void art_test.TestCases1966.CastExactNullMethod(java.util.function.IntConsumer) with "SetNull" on remote thread.
+"SetNull" on public static void art_test.TestCases1966.CastExactNullMethod(java.util.function.IntConsumer) set value: null
+	Value is 'null' (class: NULL)
+Running public static void art_test.TestCases1966.CastExactNullMethod(java.util.function.IntConsumer) with "SetTestClass1" on remote thread.
+"SetTestClass1" on public static void art_test.TestCases1966.CastExactNullMethod(java.util.function.IntConsumer) set value: TestClass1("Set TestClass1")
+	Value is 'TestClass1("Set TestClass1")' (class: class art.Test1966$TestClass1)
+Running public static void art_test.TestCases1966.CastExactNullMethod(java.util.function.IntConsumer) with "SetTestClass1ext" on remote thread.
+"SetTestClass1ext" on public static void art_test.TestCases1966.CastExactNullMethod(java.util.function.IntConsumer) set value: TestClass1ext("TestClass1("Set TestClass1ext")")
+	Value is 'TestClass1ext("TestClass1("Set TestClass1ext")")' (class: class art.Test1966$TestClass1ext)
+Running public static void art_test.TestCases1966.CastExactNullMethod(java.util.function.IntConsumer) with "SetTestClass2" on remote thread.
+"SetTestClass2" on public static void art_test.TestCases1966.CastExactNullMethod(java.util.function.IntConsumer) failed to set value TestClass2("Set TestClass2") due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'null' (class: NULL)
+Running public static void art_test.TestCases1966.CastExactNullMethod(java.util.function.IntConsumer) with "SetTestClass2impl" on remote thread.
+"SetTestClass2impl" on public static void art_test.TestCases1966.CastExactNullMethod(java.util.function.IntConsumer) failed to set value TestClass2impl("TestClass2("Set TestClass2impl")") due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'null' (class: NULL)
+Running public static void art_test.TestCases1966.CastInterfaceNullMethod(java.util.function.IntConsumer) with "GetGetObject" on remote thread.
+"GetGetObject" on public static void art_test.TestCases1966.CastInterfaceNullMethod(java.util.function.IntConsumer) got value: null
+	Value is 'null' (class: NULL)
+Running public static void art_test.TestCases1966.CastInterfaceNullMethod(java.util.function.IntConsumer) with "SetNull" on remote thread.
+"SetNull" on public static void art_test.TestCases1966.CastInterfaceNullMethod(java.util.function.IntConsumer) set value: null
+	Value is 'null' (class: NULL)
+Running public static void art_test.TestCases1966.CastInterfaceNullMethod(java.util.function.IntConsumer) with "SetTestClass1" on remote thread.
+"SetTestClass1" on public static void art_test.TestCases1966.CastInterfaceNullMethod(java.util.function.IntConsumer) set value: TestClass1("Set TestClass1")
+	Value is 'TestClass1("Set TestClass1")' (class: class art.Test1966$TestClass1)
+Running public static void art_test.TestCases1966.CastInterfaceNullMethod(java.util.function.IntConsumer) with "SetTestClass1ext" on remote thread.
+"SetTestClass1ext" on public static void art_test.TestCases1966.CastInterfaceNullMethod(java.util.function.IntConsumer) set value: TestClass1ext("TestClass1("Set TestClass1ext")")
+	Value is 'TestClass1ext("TestClass1("Set TestClass1ext")")' (class: class art.Test1966$TestClass1ext)
+Running public static void art_test.TestCases1966.CastInterfaceNullMethod(java.util.function.IntConsumer) with "SetTestClass2" on remote thread.
+"SetTestClass2" on public static void art_test.TestCases1966.CastInterfaceNullMethod(java.util.function.IntConsumer) failed to set value TestClass2("Set TestClass2") due to JVMTI_ERROR_TYPE_MISMATCH
+	Value is 'null' (class: NULL)
+Running public static void art_test.TestCases1966.CastInterfaceNullMethod(java.util.function.IntConsumer) with "SetTestClass2impl" on remote thread.
+"SetTestClass2impl" on public static void art_test.TestCases1966.CastInterfaceNullMethod(java.util.function.IntConsumer) set value: TestClass2impl("TestClass2("Set TestClass2impl")")
+	Value is 'TestClass2impl("TestClass2("Set TestClass2impl")")' (class: class art.Test1966$TestClass2impl)
diff --git a/test/1966-get-set-local-objects-no-table/info.txt b/test/1966-get-set-local-objects-no-table/info.txt
new file mode 100644
index 0000000..86ac743
--- /dev/null
+++ b/test/1966-get-set-local-objects-no-table/info.txt
@@ -0,0 +1,2 @@
+Tests for jvmti get and set local variable object.
+
diff --git a/test/1966-get-set-local-objects-no-table/jasmin/TestCases1966.j b/test/1966-get-set-local-objects-no-table/jasmin/TestCases1966.j
new file mode 100644
index 0000000..721f7ce
--- /dev/null
+++ b/test/1966-get-set-local-objects-no-table/jasmin/TestCases1966.j
@@ -0,0 +1,161 @@
+; Copyright (C) 2019 The Android Open Source Project
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+
+.class public art_test/TestCases1966
+.super java/lang/Object
+.inner class public static TestClass1 inner art/Test1966$TestClass1 outer art/Test1966
+.inner interface public static abstract TestInterface inner art/Test1966$TestInterface outer art/Test1966
+
+.method public <init>()V
+  .limit stack 1
+  .limit locals 1
+  0: aload_0
+  1: invokespecial java/lang/Object/<init>()V
+  4: return
+.end method
+
+.method public static PrimitiveMethod(Ljava/util/function/IntConsumer;)V
+  .limit stack 2
+  .limit locals 2
+  0: bipush 42
+  2: istore_1
+  3: aload_0
+  4: sipush 1
+  7: invokeinterface java/util/function/IntConsumer/accept(I)V 2
+  12: iload_1
+  13: invokestatic java/lang/Integer/valueOf(I)Ljava/lang/Integer;
+  16: invokestatic art/Test1966/reportValue(Ljava/lang/Object;)V
+  19: return
+.end method
+
+.method public static CastInterfaceMethod(Ljava/util/function/IntConsumer;)V
+  .limit stack 2
+  .limit locals 3
+  0: ldc "ObjectMethod"
+  2: invokestatic art/Test1966$TestClass1/create(Ljava/lang/String;)Ljava/lang/Object;
+  5: astore_1
+  6: aload_1
+  7: checkcast art/Test1966$TestClass1
+  10: astore_2
+  11: aload_0
+  12: sipush 2
+  15: invokeinterface java/util/function/IntConsumer/accept(I)V 2
+  20: aload_2
+  21: invokestatic art/Test1966/reportValue(Ljava/lang/Object;)V
+  24: return
+.end method
+
+.method public static CastExactMethod(Ljava/util/function/IntConsumer;)V
+  .limit stack 2
+  .limit locals 3
+  0: ldc "ObjectMethod"
+  2: invokestatic art/Test1966$TestClass1/create(Ljava/lang/String;)Ljava/lang/Object;
+  5: astore_1
+  6: aload_1
+  7: checkcast art/Test1966$TestClass1
+  10: astore_2
+  11: aload_0
+  12: sipush 2
+  15: invokeinterface java/util/function/IntConsumer/accept(I)V 2
+  20: aload_2
+  21: invokestatic art/Test1966/reportValue(Ljava/lang/Object;)V
+  24: return
+.end method
+
+.method public static ObjectMethod(Ljava/util/function/IntConsumer;)V
+  .limit stack 2
+  .limit locals 2
+  0: ldc "ObjectMethod"
+  2: invokestatic art/Test1966$TestClass1/create(Ljava/lang/String;)Ljava/lang/Object;
+  5: astore_1
+  6: aload_0
+  7: sipush 1
+  10: invokeinterface java/util/function/IntConsumer/accept(I)V 2
+  15: aload_1
+  16: invokestatic art/Test1966/reportValue(Ljava/lang/Object;)V
+  19: return
+.end method
+
+.method public static InterfaceMethod(Ljava/util/function/IntConsumer;)V
+  .limit stack 2
+  .limit locals 2
+  0: ldc "InterfaceMethod"
+  2: invokestatic art/Test1966$TestClass1/createInterface(Ljava/lang/String;)Lart/Test1966$TestInterface;
+  5: astore_1
+  6: aload_0
+  7: sipush 1
+  10: invokeinterface java/util/function/IntConsumer/accept(I)V 2
+  15: aload_1
+  16: invokestatic art/Test1966/reportValue(Ljava/lang/Object;)V
+  19: return
+.end method
+
+.method public static ExactClassMethod(Ljava/util/function/IntConsumer;)V
+  .limit stack 2
+  .limit locals 2
+  0: ldc "SpecificClassMethod"
+  2: invokestatic art/Test1966$TestClass1/createExact(Ljava/lang/String;)Lart/Test1966$TestClass1;
+  5: astore_1
+  6: aload_0
+  7: sipush 1
+  10: invokeinterface java/util/function/IntConsumer/accept(I)V 2
+  15: aload_1
+  16: invokestatic art/Test1966/reportValue(Ljava/lang/Object;)V
+  19: return
+.end method
+
+.method public static CastExactNullMethod(Ljava/util/function/IntConsumer;)V
+  .limit stack 2
+  .limit locals 3
+  0: aconst_null
+  1: astore_1
+  2: aload_1
+  3: checkcast art/Test1966$TestClass1
+  6: astore_2
+  7: aload_0
+  8: sipush 2
+  11: invokeinterface java/util/function/IntConsumer/accept(I)V 2
+  16: aload_2
+  17: invokestatic art/Test1966/reportValue(Ljava/lang/Object;)V
+  20: return
+.end method
+
+.method public static CastInterfaceNullMethod(Ljava/util/function/IntConsumer;)V
+  .limit stack 2
+  .limit locals 3
+  0: aconst_null
+  1: astore_1
+  2: aload_1
+  3: checkcast art/Test1966$TestInterface
+  6: astore_2
+  7: aload_0
+  8: sipush 2
+  11: invokeinterface java/util/function/IntConsumer/accept(I)V 2
+  16: aload_2
+  17: invokestatic art/Test1966/reportValue(Ljava/lang/Object;)V
+  20: return
+.end method
+
+.method public static NullMethod(Ljava/util/function/IntConsumer;)V
+  .limit stack 2
+  .limit locals 2
+  0: aconst_null
+  1: astore_1
+  2: aload_0
+  3: sipush 1
+  6: invokeinterface java/util/function/IntConsumer/accept(I)V 2
+  11: aload_1
+  12: invokestatic art/Test1966/reportValue(Ljava/lang/Object;)V
+  15: return
+.end method
diff --git a/test/1966-get-set-local-objects-no-table/run b/test/1966-get-set-local-objects-no-table/run
new file mode 100755
index 0000000..9b741ee
--- /dev/null
+++ b/test/1966-get-set-local-objects-no-table/run
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# If we compile the .oat files non-debuggable we could end up with dex2dex running over the files
+# which will cause some instructions to be removed from smali/TestCases1966.smali. This test relies
+# on the instructions being exactly as written so pass --debuggable to 'dex2oat' only to prevent
+# this from happening.
+./default-run "$@" --jvmti --compiler-only-option --debuggable
diff --git a/test/1966-get-set-local-objects-no-table/smali/TestCases1966.smali b/test/1966-get-set-local-objects-no-table/smali/TestCases1966.smali
new file mode 100644
index 0000000..d460dcd
--- /dev/null
+++ b/test/1966-get-set-local-objects-no-table/smali/TestCases1966.smali
@@ -0,0 +1,121 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+.class public Lart_test/TestCases1966;
+.super Ljava/lang/Object;
+
+# direct methods
+.method public constructor <init>()V
+    .registers 1
+    invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+    return-void
+.end method
+
+.method public static CastExactMethod(Ljava/util/function/IntConsumer;)V
+    .registers 3
+    const-string v0, "ObjectMethod"
+    invoke-static {v0}, Lart/Test1966$TestClass1;->create(Ljava/lang/String;)Ljava/lang/Object;
+    move-result-object v0
+    check-cast v0, Lart/Test1966$TestClass1;
+    const/16 v1, 0x0
+    invoke-interface {p0, v1}, Ljava/util/function/IntConsumer;->accept(I)V
+    invoke-static {v0}, Lart/Test1966;->reportValue(Ljava/lang/Object;)V
+    return-void
+.end method
+
+.method public static CastInterfaceMethod(Ljava/util/function/IntConsumer;)V
+    .registers 3
+    const-string v0, "ObjectMethod"
+    invoke-static {v0}, Lart/Test1966$TestClass1;->create(Ljava/lang/String;)Ljava/lang/Object;
+    move-result-object v0
+    check-cast v0, Lart/Test1966$TestClass1;
+    const/16 v1, 0x0
+    invoke-interface {p0, v1}, Ljava/util/function/IntConsumer;->accept(I)V
+    invoke-static {v0}, Lart/Test1966;->reportValue(Ljava/lang/Object;)V
+    return-void
+.end method
+
+.method public static ExactClassMethod(Ljava/util/function/IntConsumer;)V
+    .registers 3
+    const-string v0, "SpecificClassMethod"
+    invoke-static {v0}, Lart/Test1966$TestClass1;->createExact(Ljava/lang/String;)Lart/Test1966$TestClass1;
+    move-result-object v0
+    const/16 v1, 0x0
+    invoke-interface {p0, v1}, Ljava/util/function/IntConsumer;->accept(I)V
+    invoke-static {v0}, Lart/Test1966;->reportValue(Ljava/lang/Object;)V
+    return-void
+.end method
+
+.method public static InterfaceMethod(Ljava/util/function/IntConsumer;)V
+    .registers 3
+    const-string v0, "InterfaceMethod"
+    invoke-static {v0}, Lart/Test1966$TestClass1;->createInterface(Ljava/lang/String;)Lart/Test1966$TestInterface;
+    move-result-object v0
+    const/16 v1, 0x0
+    invoke-interface {p0, v1}, Ljava/util/function/IntConsumer;->accept(I)V
+    invoke-static {v0}, Lart/Test1966;->reportValue(Ljava/lang/Object;)V
+    return-void
+.end method
+
+.method public static ObjectMethod(Ljava/util/function/IntConsumer;)V
+    .registers 3
+    const-string v0, "ObjectMethod"
+    invoke-static {v0}, Lart/Test1966$TestClass1;->create(Ljava/lang/String;)Ljava/lang/Object;
+    move-result-object v0
+    const/16 v1, 0x0
+    invoke-interface {p0, v1}, Ljava/util/function/IntConsumer;->accept(I)V
+    invoke-static {v0}, Lart/Test1966;->reportValue(Ljava/lang/Object;)V
+    return-void
+.end method
+
+.method public static PrimitiveMethod(Ljava/util/function/IntConsumer;)V
+    .registers 3
+    const/16 v0, 0x2a
+    const/16 v1, 0x0
+    invoke-interface {p0, v1}, Ljava/util/function/IntConsumer;->accept(I)V
+    invoke-static {v0}, Ljava/lang/Integer;->valueOf(I)Ljava/lang/Integer;
+    move-result-object p0
+    invoke-static {p0}, Lart/Test1966;->reportValue(Ljava/lang/Object;)V
+    return-void
+.end method
+
+.method public static NullMethod(Ljava/util/function/IntConsumer;)V
+    .registers 3
+    const/4 v0, 0x0
+    const/16 v1, 0x0
+    invoke-interface {p0, v1}, Ljava/util/function/IntConsumer;->accept(I)V
+    invoke-static {v0}, Lart/Test1966;->reportValue(Ljava/lang/Object;)V
+    return-void
+.end method
+
+.method public static CastInterfaceNullMethod(Ljava/util/function/IntConsumer;)V
+    .registers 3
+    const/4 v0, 0x0
+    check-cast v0, Lart/Test1966$TestInterface;
+    const/16 v1, 0x0
+    invoke-interface {p0, v1}, Ljava/util/function/IntConsumer;->accept(I)V
+    invoke-static {v0}, Lart/Test1966;->reportValue(Ljava/lang/Object;)V
+    return-void
+.end method
+
+.method public static CastExactNullMethod(Ljava/util/function/IntConsumer;)V
+    .registers 3
+    const/4 v0, 0x0
+    check-cast v0, Lart/Test1966$TestClass1;
+    const/16 v1, 0x0
+    invoke-interface {p0, v1}, Ljava/util/function/IntConsumer;->accept(I)V
+    invoke-static {v0}, Lart/Test1966;->reportValue(Ljava/lang/Object;)V
+    return-void
+.end method
\ No newline at end of file
diff --git a/test/1966-get-set-local-objects-no-table/src/Main.java b/test/1966-get-set-local-objects-no-table/src/Main.java
new file mode 100644
index 0000000..198f319
--- /dev/null
+++ b/test/1966-get-set-local-objects-no-table/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1966.run();
+  }
+}
diff --git a/test/1966-get-set-local-objects-no-table/src/art/Breakpoint.java b/test/1966-get-set-local-objects-no-table/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1966-get-set-local-objects-no-table/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1966-get-set-local-objects-no-table/src/art/Locals.java b/test/1966-get-set-local-objects-no-table/src/art/Locals.java
new file mode 120000
index 0000000..2998386
--- /dev/null
+++ b/test/1966-get-set-local-objects-no-table/src/art/Locals.java
@@ -0,0 +1 @@
+../../../jvmti-common/Locals.java
\ No newline at end of file
diff --git a/test/1966-get-set-local-objects-no-table/src/art/StackTrace.java b/test/1966-get-set-local-objects-no-table/src/art/StackTrace.java
new file mode 120000
index 0000000..e1a08aa
--- /dev/null
+++ b/test/1966-get-set-local-objects-no-table/src/art/StackTrace.java
@@ -0,0 +1 @@
+../../../jvmti-common/StackTrace.java
\ No newline at end of file
diff --git a/test/1966-get-set-local-objects-no-table/src/art/Suspension.java b/test/1966-get-set-local-objects-no-table/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1966-get-set-local-objects-no-table/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1966-get-set-local-objects-no-table/src/art/Test1966.java b/test/1966-get-set-local-objects-no-table/src/art/Test1966.java
new file mode 100644
index 0000000..00f3c4e
--- /dev/null
+++ b/test/1966-get-set-local-objects-no-table/src/art/Test1966.java
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Executable;
+import java.lang.reflect.Method;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.Semaphore;
+import java.util.function.Consumer;
+import java.util.function.Function;
+import java.util.function.IntConsumer;
+import java.util.function.Predicate;
+import java.util.function.Supplier;
+
+public class Test1966 {
+  public static final String TARGET_VAR = "TARGET";
+
+  public static interface TestInterface {
+    public default void doNothing() {}
+  }
+  public static class TestClass1 implements TestInterface {
+    public String id;
+    public TestClass1(String id) {
+      this.id = id;
+    }
+    public String toString() {
+      return String.format("TestClass1(\"%s\")", id);
+    }
+
+    public static TestInterface createInterface(String id) {
+      return new TestClass1(id);
+    }
+    public static TestClass1 createExact(String id) {
+      return new TestClass1(id);
+    }
+    public static Object create(String id) {
+      return new TestClass1(id);
+    }
+  }
+
+  public static class TestClass1ext extends TestClass1 {
+    public TestClass1ext(String id) {
+      super(id);
+    }
+    public String toString() {
+      return String.format("TestClass1ext(\"%s\")", super.toString());
+    }
+  }
+  public static class TestClass2 {
+    public String id;
+    public TestClass2(String id) {
+      this.id = id;
+    }
+    public String toString() {
+      return String.format("TestClass2(\"%s\")", id);
+    }
+  }
+  public static class TestClass2impl extends TestClass2 implements TestInterface {
+    public TestClass2impl(String id) {
+      super(id);
+    }
+    public String toString() {
+      return String.format("TestClass2impl(\"%s\")", super.toString());
+    }
+  }
+
+  public static void reportValue(Object val) {
+    System.out.println("\tValue is '" + val +
+                       "' (class: " + (val != null ? val.getClass() : "NULL") + ")");
+  }
+
+  public static interface SafepointFunction {
+    public void invoke(Thread thread, Method target, int slot, int depth) throws Exception;
+  }
+
+  public static interface SetterFunction {
+    public void SetVar(Thread t, int depth, int slot, Object v);
+  }
+
+  public static interface GetterFunction { public Object GetVar(Thread t, int depth, int slot); }
+
+  public static SafepointFunction
+  NamedSet(final String type, final SetterFunction get, final Object v) {
+    return new SafepointFunction() {
+      public void invoke(Thread t, Method method, int slot, int depth) {
+        try {
+          get.SetVar(t, depth, slot, v);
+          System.out.println(this + " on " + method + " set value: " + v);
+        } catch (Exception e) {
+          System.out.println(this + " on " + method + " failed to set value " + v + " due to " +
+                             e.getMessage());
+        }
+      }
+      public String toString() {
+        return "\"Set" + type + "\"";
+      }
+    };
+  }
+
+  public static SafepointFunction NamedGet(final String type, final GetterFunction get) {
+    return new SafepointFunction() {
+      public void invoke(Thread t, Method method, int slot, int depth) {
+        try {
+          Object res = get.GetVar(t, depth, slot);
+          System.out.println(this + " on " + method + " got value: " + res);
+        } catch (Exception e) {
+          System.out.println(this + " on " + method + " failed due to " + e.getMessage());
+        }
+      }
+      public String toString() {
+        return "\"Get" + type + "\"";
+      }
+    };
+  }
+
+  public static class TestCase {
+    public final Method target;
+
+    public TestCase(Method target) {
+      this.target = target;
+    }
+
+    public static class ThreadPauser implements IntConsumer {
+      public final Semaphore sem_wakeup_main;
+      public final Semaphore sem_wait;
+      public int slot = -1;
+
+      public ThreadPauser() {
+        sem_wakeup_main = new Semaphore(0);
+        sem_wait = new Semaphore(0);
+      }
+
+      public void accept(int i) {
+        try {
+          slot = i;
+          sem_wakeup_main.release();
+          sem_wait.acquire();
+        } catch (Exception e) {
+          throw new Error("Error with semaphores!", e);
+        }
+      }
+
+      public void waitForOtherThreadToPause() throws Exception {
+        sem_wakeup_main.acquire();
+      }
+
+      public void wakeupOtherThread() throws Exception {
+        sem_wait.release();
+      }
+    }
+
+    public void exec(final SafepointFunction safepoint) throws Exception {
+      System.out.println("Running " + target + " with " + safepoint + " on remote thread.");
+      final ThreadPauser pause = new ThreadPauser();
+      Thread remote = new Thread(() -> {
+        try {
+          target.invoke(null, pause);
+        } catch (Exception e) {
+          throw new Error("Error invoking remote thread " + Thread.currentThread(), e);
+        }
+      }, "remote thread for " + target + " with " + safepoint);
+      remote.start();
+      pause.waitForOtherThreadToPause();
+      try {
+        Suspension.suspend(remote);
+        StackTrace.StackFrameData frame = findStackFrame(remote);
+        safepoint.invoke(remote, target, pause.slot, frame.depth);
+      } finally {
+        Suspension.resume(remote);
+        pause.wakeupOtherThread();
+        remote.join();
+      }
+    }
+
+    private StackTrace.StackFrameData findStackFrame(Thread thr) {
+      for (StackTrace.StackFrameData frame : StackTrace.GetStackTrace(thr)) {
+        if (frame.method.equals(target)) {
+          return frame;
+        }
+      }
+      throw new Error("Unable to find stack frame in method " + target + " on thread " + thr);
+    }
+  }
+  public static Method getMethod(String name) throws Exception {
+    return Class.forName("art_test.TestCases1966").getDeclaredMethod(name, IntConsumer.class);
+  }
+
+  public static void run() throws Exception {
+    Locals.EnableLocalVariableAccess();
+    final TestCase[] MAIN_TEST_CASES = new TestCase[] {
+      new TestCase(getMethod("ObjectMethod")),
+      new TestCase(getMethod("CastInterfaceMethod")),
+      new TestCase(getMethod("CastExactMethod")),
+      new TestCase(getMethod("InterfaceMethod")),
+      new TestCase(getMethod("ExactClassMethod")),
+      new TestCase(getMethod("PrimitiveMethod")),
+      new TestCase(getMethod("NullMethod")),
+      new TestCase(getMethod("CastExactNullMethod")),
+      new TestCase(getMethod("CastInterfaceNullMethod")),
+    };
+
+    final SetterFunction set_obj = Locals::SetLocalVariableObject;
+    final SafepointFunction[] SAFEPOINTS = new SafepointFunction[] {
+      NamedGet("GetObject", Locals::GetLocalVariableObject),
+      NamedSet("Null", set_obj, null),
+      NamedSet("TestClass1", set_obj, new TestClass1("Set TestClass1")),
+      NamedSet("TestClass1ext", set_obj, new TestClass1ext("Set TestClass1ext")),
+      NamedSet("TestClass2", set_obj, new TestClass2("Set TestClass2")),
+      NamedSet("TestClass2impl", set_obj, new TestClass2impl("Set TestClass2impl")),
+    };
+
+    for (TestCase t : MAIN_TEST_CASES) {
+      for (SafepointFunction s : SAFEPOINTS) {
+        t.exec(s);
+      }
+    }
+  }
+}
diff --git a/test/1967-get-set-local-bad-slot/expected.txt b/test/1967-get-set-local-bad-slot/expected.txt
new file mode 100644
index 0000000..e2b3ff5
--- /dev/null
+++ b/test/1967-get-set-local-bad-slot/expected.txt
@@ -0,0 +1,108 @@
+Running public static void art.Test1967.IntMethod(java.lang.Runnable) with "GetInt_at_too_high" on remote thread.
+"GetInt_at_too_high" on public static void art.Test1967.IntMethod(java.lang.Runnable) failed due to JVMTI_ERROR_INVALID_SLOT
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art.Test1967.IntMethod(java.lang.Runnable) with "GetLong_at_too_high" on remote thread.
+"GetLong_at_too_high" on public static void art.Test1967.IntMethod(java.lang.Runnable) failed due to JVMTI_ERROR_INVALID_SLOT
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art.Test1967.IntMethod(java.lang.Runnable) with "GetObject_at_too_high" on remote thread.
+"GetObject_at_too_high" on public static void art.Test1967.IntMethod(java.lang.Runnable) failed due to JVMTI_ERROR_INVALID_SLOT
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art.Test1967.IntMethod(java.lang.Runnable) with "SetInt_at_too_high" on remote thread.
+"SetInt_at_too_high" on public static void art.Test1967.IntMethod(java.lang.Runnable) failed to set value 2147483647 due to JVMTI_ERROR_INVALID_SLOT
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art.Test1967.IntMethod(java.lang.Runnable) with "SetLong_at_too_high" on remote thread.
+"SetLong_at_too_high" on public static void art.Test1967.IntMethod(java.lang.Runnable) failed to set value 9223372036854775807 due to JVMTI_ERROR_INVALID_SLOT
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art.Test1967.IntMethod(java.lang.Runnable) with "SetObject_at_too_high" on remote thread.
+"SetObject_at_too_high" on public static void art.Test1967.IntMethod(java.lang.Runnable) failed to set value NEW_FOR_SET due to JVMTI_ERROR_INVALID_SLOT
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art.Test1967.IntMethod(java.lang.Runnable) with "GetInt_at_negative" on remote thread.
+"GetInt_at_negative" on public static void art.Test1967.IntMethod(java.lang.Runnable) failed due to JVMTI_ERROR_INVALID_SLOT
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art.Test1967.IntMethod(java.lang.Runnable) with "GetLong_at_negative" on remote thread.
+"GetLong_at_negative" on public static void art.Test1967.IntMethod(java.lang.Runnable) failed due to JVMTI_ERROR_INVALID_SLOT
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art.Test1967.IntMethod(java.lang.Runnable) with "GetObject_at_negative" on remote thread.
+"GetObject_at_negative" on public static void art.Test1967.IntMethod(java.lang.Runnable) failed due to JVMTI_ERROR_INVALID_SLOT
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art.Test1967.IntMethod(java.lang.Runnable) with "SetInt_at_negative" on remote thread.
+"SetInt_at_negative" on public static void art.Test1967.IntMethod(java.lang.Runnable) failed to set value 2147483647 due to JVMTI_ERROR_INVALID_SLOT
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art.Test1967.IntMethod(java.lang.Runnable) with "SetLong_at_negative" on remote thread.
+"SetLong_at_negative" on public static void art.Test1967.IntMethod(java.lang.Runnable) failed to set value 9223372036854775807 due to JVMTI_ERROR_INVALID_SLOT
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art.Test1967.IntMethod(java.lang.Runnable) with "SetObject_at_negative" on remote thread.
+"SetObject_at_negative" on public static void art.Test1967.IntMethod(java.lang.Runnable) failed to set value NEW_FOR_SET due to JVMTI_ERROR_INVALID_SLOT
+	Value is '42' (class: class java.lang.Integer)
+Running public static void art.Test1967.LongMethod(java.lang.Runnable) with "GetInt_at_too_high" on remote thread.
+"GetInt_at_too_high" on public static void art.Test1967.LongMethod(java.lang.Runnable) failed due to JVMTI_ERROR_INVALID_SLOT
+	Value is '9001' (class: class java.lang.Long)
+Running public static void art.Test1967.LongMethod(java.lang.Runnable) with "GetLong_at_too_high" on remote thread.
+"GetLong_at_too_high" on public static void art.Test1967.LongMethod(java.lang.Runnable) failed due to JVMTI_ERROR_INVALID_SLOT
+	Value is '9001' (class: class java.lang.Long)
+Running public static void art.Test1967.LongMethod(java.lang.Runnable) with "GetObject_at_too_high" on remote thread.
+"GetObject_at_too_high" on public static void art.Test1967.LongMethod(java.lang.Runnable) failed due to JVMTI_ERROR_INVALID_SLOT
+	Value is '9001' (class: class java.lang.Long)
+Running public static void art.Test1967.LongMethod(java.lang.Runnable) with "SetInt_at_too_high" on remote thread.
+"SetInt_at_too_high" on public static void art.Test1967.LongMethod(java.lang.Runnable) failed to set value 2147483647 due to JVMTI_ERROR_INVALID_SLOT
+	Value is '9001' (class: class java.lang.Long)
+Running public static void art.Test1967.LongMethod(java.lang.Runnable) with "SetLong_at_too_high" on remote thread.
+"SetLong_at_too_high" on public static void art.Test1967.LongMethod(java.lang.Runnable) failed to set value 9223372036854775807 due to JVMTI_ERROR_INVALID_SLOT
+	Value is '9001' (class: class java.lang.Long)
+Running public static void art.Test1967.LongMethod(java.lang.Runnable) with "SetObject_at_too_high" on remote thread.
+"SetObject_at_too_high" on public static void art.Test1967.LongMethod(java.lang.Runnable) failed to set value NEW_FOR_SET due to JVMTI_ERROR_INVALID_SLOT
+	Value is '9001' (class: class java.lang.Long)
+Running public static void art.Test1967.LongMethod(java.lang.Runnable) with "GetInt_at_negative" on remote thread.
+"GetInt_at_negative" on public static void art.Test1967.LongMethod(java.lang.Runnable) failed due to JVMTI_ERROR_INVALID_SLOT
+	Value is '9001' (class: class java.lang.Long)
+Running public static void art.Test1967.LongMethod(java.lang.Runnable) with "GetLong_at_negative" on remote thread.
+"GetLong_at_negative" on public static void art.Test1967.LongMethod(java.lang.Runnable) failed due to JVMTI_ERROR_INVALID_SLOT
+	Value is '9001' (class: class java.lang.Long)
+Running public static void art.Test1967.LongMethod(java.lang.Runnable) with "GetObject_at_negative" on remote thread.
+"GetObject_at_negative" on public static void art.Test1967.LongMethod(java.lang.Runnable) failed due to JVMTI_ERROR_INVALID_SLOT
+	Value is '9001' (class: class java.lang.Long)
+Running public static void art.Test1967.LongMethod(java.lang.Runnable) with "SetInt_at_negative" on remote thread.
+"SetInt_at_negative" on public static void art.Test1967.LongMethod(java.lang.Runnable) failed to set value 2147483647 due to JVMTI_ERROR_INVALID_SLOT
+	Value is '9001' (class: class java.lang.Long)
+Running public static void art.Test1967.LongMethod(java.lang.Runnable) with "SetLong_at_negative" on remote thread.
+"SetLong_at_negative" on public static void art.Test1967.LongMethod(java.lang.Runnable) failed to set value 9223372036854775807 due to JVMTI_ERROR_INVALID_SLOT
+	Value is '9001' (class: class java.lang.Long)
+Running public static void art.Test1967.LongMethod(java.lang.Runnable) with "SetObject_at_negative" on remote thread.
+"SetObject_at_negative" on public static void art.Test1967.LongMethod(java.lang.Runnable) failed to set value NEW_FOR_SET due to JVMTI_ERROR_INVALID_SLOT
+	Value is '9001' (class: class java.lang.Long)
+Running public static void art.Test1967.ObjectMethod(java.lang.Runnable) with "GetInt_at_too_high" on remote thread.
+"GetInt_at_too_high" on public static void art.Test1967.ObjectMethod(java.lang.Runnable) failed due to JVMTI_ERROR_INVALID_SLOT
+	Value is 'TARGET OBJECT' (class: class java.lang.String)
+Running public static void art.Test1967.ObjectMethod(java.lang.Runnable) with "GetLong_at_too_high" on remote thread.
+"GetLong_at_too_high" on public static void art.Test1967.ObjectMethod(java.lang.Runnable) failed due to JVMTI_ERROR_INVALID_SLOT
+	Value is 'TARGET OBJECT' (class: class java.lang.String)
+Running public static void art.Test1967.ObjectMethod(java.lang.Runnable) with "GetObject_at_too_high" on remote thread.
+"GetObject_at_too_high" on public static void art.Test1967.ObjectMethod(java.lang.Runnable) failed due to JVMTI_ERROR_INVALID_SLOT
+	Value is 'TARGET OBJECT' (class: class java.lang.String)
+Running public static void art.Test1967.ObjectMethod(java.lang.Runnable) with "SetInt_at_too_high" on remote thread.
+"SetInt_at_too_high" on public static void art.Test1967.ObjectMethod(java.lang.Runnable) failed to set value 2147483647 due to JVMTI_ERROR_INVALID_SLOT
+	Value is 'TARGET OBJECT' (class: class java.lang.String)
+Running public static void art.Test1967.ObjectMethod(java.lang.Runnable) with "SetLong_at_too_high" on remote thread.
+"SetLong_at_too_high" on public static void art.Test1967.ObjectMethod(java.lang.Runnable) failed to set value 9223372036854775807 due to JVMTI_ERROR_INVALID_SLOT
+	Value is 'TARGET OBJECT' (class: class java.lang.String)
+Running public static void art.Test1967.ObjectMethod(java.lang.Runnable) with "SetObject_at_too_high" on remote thread.
+"SetObject_at_too_high" on public static void art.Test1967.ObjectMethod(java.lang.Runnable) failed to set value NEW_FOR_SET due to JVMTI_ERROR_INVALID_SLOT
+	Value is 'TARGET OBJECT' (class: class java.lang.String)
+Running public static void art.Test1967.ObjectMethod(java.lang.Runnable) with "GetInt_at_negative" on remote thread.
+"GetInt_at_negative" on public static void art.Test1967.ObjectMethod(java.lang.Runnable) failed due to JVMTI_ERROR_INVALID_SLOT
+	Value is 'TARGET OBJECT' (class: class java.lang.String)
+Running public static void art.Test1967.ObjectMethod(java.lang.Runnable) with "GetLong_at_negative" on remote thread.
+"GetLong_at_negative" on public static void art.Test1967.ObjectMethod(java.lang.Runnable) failed due to JVMTI_ERROR_INVALID_SLOT
+	Value is 'TARGET OBJECT' (class: class java.lang.String)
+Running public static void art.Test1967.ObjectMethod(java.lang.Runnable) with "GetObject_at_negative" on remote thread.
+"GetObject_at_negative" on public static void art.Test1967.ObjectMethod(java.lang.Runnable) failed due to JVMTI_ERROR_INVALID_SLOT
+	Value is 'TARGET OBJECT' (class: class java.lang.String)
+Running public static void art.Test1967.ObjectMethod(java.lang.Runnable) with "SetInt_at_negative" on remote thread.
+"SetInt_at_negative" on public static void art.Test1967.ObjectMethod(java.lang.Runnable) failed to set value 2147483647 due to JVMTI_ERROR_INVALID_SLOT
+	Value is 'TARGET OBJECT' (class: class java.lang.String)
+Running public static void art.Test1967.ObjectMethod(java.lang.Runnable) with "SetLong_at_negative" on remote thread.
+"SetLong_at_negative" on public static void art.Test1967.ObjectMethod(java.lang.Runnable) failed to set value 9223372036854775807 due to JVMTI_ERROR_INVALID_SLOT
+	Value is 'TARGET OBJECT' (class: class java.lang.String)
+Running public static void art.Test1967.ObjectMethod(java.lang.Runnable) with "SetObject_at_negative" on remote thread.
+"SetObject_at_negative" on public static void art.Test1967.ObjectMethod(java.lang.Runnable) failed to set value NEW_FOR_SET due to JVMTI_ERROR_INVALID_SLOT
+	Value is 'TARGET OBJECT' (class: class java.lang.String)
diff --git a/test/1967-get-set-local-bad-slot/info.txt b/test/1967-get-set-local-bad-slot/info.txt
new file mode 100644
index 0000000..87a7b35
--- /dev/null
+++ b/test/1967-get-set-local-bad-slot/info.txt
@@ -0,0 +1,2 @@
+Tests for jvmti get/set Local variable primitives.
+
diff --git a/test/1967-get-set-local-bad-slot/run b/test/1967-get-set-local-bad-slot/run
new file mode 100755
index 0000000..51875a7
--- /dev/null
+++ b/test/1967-get-set-local-bad-slot/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Ask for stack traces to be dumped to a file rather than to stdout.
+./default-run "$@" --jvmti
diff --git a/test/1967-get-set-local-bad-slot/src/Main.java b/test/1967-get-set-local-bad-slot/src/Main.java
new file mode 100644
index 0000000..561edbe
--- /dev/null
+++ b/test/1967-get-set-local-bad-slot/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1967.run();
+  }
+}
diff --git a/test/1967-get-set-local-bad-slot/src/art/Breakpoint.java b/test/1967-get-set-local-bad-slot/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1967-get-set-local-bad-slot/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1967-get-set-local-bad-slot/src/art/Locals.java b/test/1967-get-set-local-bad-slot/src/art/Locals.java
new file mode 120000
index 0000000..2998386
--- /dev/null
+++ b/test/1967-get-set-local-bad-slot/src/art/Locals.java
@@ -0,0 +1 @@
+../../../jvmti-common/Locals.java
\ No newline at end of file
diff --git a/test/1967-get-set-local-bad-slot/src/art/StackTrace.java b/test/1967-get-set-local-bad-slot/src/art/StackTrace.java
new file mode 120000
index 0000000..e1a08aa
--- /dev/null
+++ b/test/1967-get-set-local-bad-slot/src/art/StackTrace.java
@@ -0,0 +1 @@
+../../../jvmti-common/StackTrace.java
\ No newline at end of file
diff --git a/test/1967-get-set-local-bad-slot/src/art/Suspension.java b/test/1967-get-set-local-bad-slot/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1967-get-set-local-bad-slot/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1967-get-set-local-bad-slot/src/art/Test1967.java b/test/1967-get-set-local-bad-slot/src/art/Test1967.java
new file mode 100644
index 0000000..49e375d
--- /dev/null
+++ b/test/1967-get-set-local-bad-slot/src/art/Test1967.java
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Method;
+import java.util.concurrent.Semaphore;
+import java.util.function.ToIntFunction;
+
+public class Test1967 {
+  public static final String TARGET_VAR = "TARGET";
+
+  public static void reportValue(Object val) {
+    if (val instanceof Character) {
+      val = "<Char: " + Character.getNumericValue(((Character) val).charValue()) + ">";
+    }
+    System.out.println(
+        "\tValue is '"
+            + val
+            + "' (class: "
+            + (val != null ? val.getClass().toString() : "null")
+            + ")");
+  }
+
+  public static void ObjectMethod(Runnable safepoint) {
+    Object TARGET = "TARGET OBJECT";
+    safepoint.run();
+    reportValue(TARGET);
+  }
+
+  public static void IntMethod(Runnable safepoint) {
+    int TARGET = 42;
+    safepoint.run();
+    reportValue(TARGET);
+  }
+
+  public static void LongMethod(Runnable safepoint) {
+    long TARGET = 9001;
+    safepoint.run();
+    reportValue(TARGET);
+  }
+
+  public static interface SafepointFunction {
+    public void invoke(
+        Thread thread, Method target, Locals.VariableDescription TARGET_desc, int depth)
+        throws Exception;
+  }
+
+  public static interface SetterFunction {
+    public void SetVar(Thread t, int depth, int slot, Object v);
+  }
+
+  public static interface GetterFunction {
+    public Object GetVar(Thread t, int depth, int slot);
+  }
+
+  public static SafepointFunction BadSet(
+      final String type,
+      final SetterFunction get,
+      final Object v,
+      final ToIntFunction<Integer> transform) {
+    return new SafepointFunction() {
+      public void invoke(Thread t, Method method, Locals.VariableDescription desc, int depth) {
+        int real_slot = transform.applyAsInt(desc.slot);
+        try {
+          get.SetVar(t, depth, real_slot, v);
+          System.out.println(this + " on " + method + " set value: " + v);
+        } catch (Exception e) {
+          System.out.println(
+              this + " on " + method + " failed to set value " + v + " due to " + e.getMessage());
+        }
+      }
+
+      public String toString() {
+        return "\"Set" + type + "\"";
+      }
+    };
+  }
+
+  public static SafepointFunction BadGet(
+      final String type, final GetterFunction get, final ToIntFunction<Integer> transform) {
+    return new SafepointFunction() {
+      public void invoke(Thread t, Method method, Locals.VariableDescription desc, int depth) {
+        int real_slot = transform.applyAsInt(desc.slot);
+        try {
+          Object res = get.GetVar(t, depth, real_slot);
+          System.out.println(this + " on " + method + " got value: " + res);
+        } catch (Exception e) {
+          System.out.println(this + " on " + method + " failed due to " + e.getMessage());
+        }
+      }
+
+      public String toString() {
+        return "\"Get" + type + "\"";
+      }
+    };
+  }
+
+  public static class TestCase {
+    public final Method target;
+
+    public TestCase(Method target) {
+      this.target = target;
+    }
+
+    public static class ThreadPauser implements Runnable {
+      public final Semaphore sem_wakeup_main;
+      public final Semaphore sem_wait;
+
+      public ThreadPauser() {
+        sem_wakeup_main = new Semaphore(0);
+        sem_wait = new Semaphore(0);
+      }
+
+      public void run() {
+        try {
+          sem_wakeup_main.release();
+          sem_wait.acquire();
+        } catch (Exception e) {
+          throw new Error("Error with semaphores!", e);
+        }
+      }
+
+      public void waitForOtherThreadToPause() throws Exception {
+        sem_wakeup_main.acquire();
+      }
+
+      public void wakeupOtherThread() throws Exception {
+        sem_wait.release();
+      }
+    }
+
+    public void exec(final SafepointFunction safepoint) throws Exception {
+      System.out.println("Running " + target + " with " + safepoint + " on remote thread.");
+      final ThreadPauser pause = new ThreadPauser();
+      Thread remote =
+          new Thread(
+              () -> {
+                try {
+                  target.invoke(null, pause);
+                } catch (Exception e) {
+                  throw new Error("Error invoking remote thread " + Thread.currentThread(), e);
+                }
+              },
+              "remote thread for " + target + " with " + safepoint);
+      remote.start();
+      pause.waitForOtherThreadToPause();
+      try {
+        Suspension.suspend(remote);
+        StackTrace.StackFrameData frame = findStackFrame(remote);
+        Locals.VariableDescription desc = findTargetVar(frame.current_location);
+        safepoint.invoke(remote, target, desc, frame.depth);
+      } finally {
+        Suspension.resume(remote);
+        pause.wakeupOtherThread();
+        remote.join();
+      }
+    }
+
+    private Locals.VariableDescription findTargetVar(long loc) {
+      for (Locals.VariableDescription var : Locals.GetLocalVariableTable(target)) {
+        if (var.start_location <= loc
+            && var.length + var.start_location > loc
+            && var.name.equals(TARGET_VAR)) {
+          return var;
+        }
+      }
+      throw new Error("Unable to find variable " + TARGET_VAR + " in " + target + " at loc " + loc);
+    }
+
+    private StackTrace.StackFrameData findStackFrame(Thread thr) {
+      for (StackTrace.StackFrameData frame : StackTrace.GetStackTrace(thr)) {
+        if (frame.method.equals(target)) {
+          return frame;
+        }
+      }
+      throw new Error("Unable to find stack frame in method " + target + " on thread " + thr);
+    }
+  }
+
+  public static Method getMethod(String name) throws Exception {
+    return Test1967.class.getDeclaredMethod(name, Runnable.class);
+  }
+
+  public static void run() throws Exception {
+    Locals.EnableLocalVariableAccess();
+    final TestCase[] MAIN_TEST_CASES =
+        new TestCase[] {
+          new TestCase(getMethod("IntMethod")),
+          new TestCase(getMethod("LongMethod")),
+          new TestCase(getMethod("ObjectMethod")),
+        };
+
+    final SafepointFunction[] SAFEPOINTS =
+        new SafepointFunction[] {
+          BadGet("Int_at_too_high", Locals::GetLocalVariableInt, (i) -> i + 100),
+          BadGet("Long_at_too_high", Locals::GetLocalVariableLong, (i) -> i + 100),
+          BadGet("Object_at_too_high", Locals::GetLocalVariableObject, (i) -> i + 100),
+          BadSet("Int_at_too_high", Locals::SetLocalVariableInt, Integer.MAX_VALUE, (i) -> i + 100),
+          BadSet("Long_at_too_high", Locals::SetLocalVariableLong, Long.MAX_VALUE, (i) -> i + 100),
+          BadSet(
+              "Object_at_too_high", Locals::SetLocalVariableObject, "NEW_FOR_SET", (i) -> i + 100),
+          BadGet("Int_at_negative", Locals::GetLocalVariableInt, (i) -> -1),
+          BadGet("Long_at_negative", Locals::GetLocalVariableLong, (i) -> -1),
+          BadGet("Object_at_negative", Locals::GetLocalVariableObject, (i) -> -1),
+          BadSet("Int_at_negative", Locals::SetLocalVariableInt, Integer.MAX_VALUE, (i) -> -1),
+          BadSet("Long_at_negative", Locals::SetLocalVariableLong, Long.MAX_VALUE, (i) -> -1),
+          BadSet("Object_at_negative", Locals::SetLocalVariableObject, "NEW_FOR_SET", (i) -> -1),
+        };
+
+    for (TestCase t : MAIN_TEST_CASES) {
+      for (SafepointFunction s : SAFEPOINTS) {
+        t.exec(s);
+      }
+    }
+  }
+}
diff --git a/test/1968-force-early-return/expected.txt b/test/1968-force-early-return/expected.txt
new file mode 100644
index 0000000..bd38590
--- /dev/null
+++ b/test/1968-force-early-return/expected.txt
@@ -0,0 +1,195 @@
+Test stopped using breakpoint
+NORMAL RUN: Single call with no interference on (ID: 0) StandardTestObject { cnt: 0 }
+NORMAL RUN: result for (ID: 0) StandardTestObject { cnt: 2 } is IntContainer { value: 1 }
+Single call with force-early-return on (ID: 1) StandardTestObject { cnt: 0 }
+Will force return of OveriddenReturnValue { id: 0 }
+result for (ID: 1) StandardTestObject { cnt: 1 } is OveriddenReturnValue { id: 0 }
+Test stopped using breakpoint with declared synchronized function
+NORMAL RUN: Single call with no interference on (ID: 2) SynchronizedFunctionTestObject { cnt: 0 }
+NORMAL RUN: result for (ID: 2) SynchronizedFunctionTestObject { cnt: 2 } is IntContainer { value: 1 }
+Single call with force-early-return on (ID: 3) SynchronizedFunctionTestObject { cnt: 0 }
+Will force return of OveriddenReturnValue { id: 1 }
+result for (ID: 3) SynchronizedFunctionTestObject { cnt: 1 } is OveriddenReturnValue { id: 1 }
+Test stopped using breakpoint with synchronized block
+NORMAL RUN: Single call with no interference on (ID: 4) SynchronizedTestObject { cnt: 0 }
+NORMAL RUN: result for (ID: 4) SynchronizedTestObject { cnt: 2 } is IntContainer { value: 1 }
+Single call with force-early-return on (ID: 5) SynchronizedTestObject { cnt: 0 }
+Will force return of OveriddenReturnValue { id: 2 }
+result for (ID: 5) SynchronizedTestObject { cnt: 1 } is OveriddenReturnValue { id: 2 }
+Test stopped on single step
+NORMAL RUN: Single call with no interference on (ID: 6) StandardTestObject { cnt: 0 }
+NORMAL RUN: result for (ID: 6) StandardTestObject { cnt: 2 } is IntContainer { value: 1 }
+Single call with force-early-return on (ID: 7) StandardTestObject { cnt: 0 }
+Will force return of OveriddenReturnValue { id: 3 }
+result for (ID: 7) StandardTestObject { cnt: 1 } is OveriddenReturnValue { id: 3 }
+Test stopped on field access
+NORMAL RUN: Single call with no interference on (ID: 8) FieldBasedTestObject { TARGET_FIELD: 0 }
+NORMAL RUN: result for (ID: 8) FieldBasedTestObject { TARGET_FIELD: 10 } is IntContainer { value: 10 }
+Single call with force-early-return on (ID: 9) FieldBasedTestObject { TARGET_FIELD: 0 }
+Will force return of OveriddenReturnValue { id: 4 }
+result for (ID: 9) FieldBasedTestObject { TARGET_FIELD: 0 } is OveriddenReturnValue { id: 4 }
+Test stopped on field modification
+NORMAL RUN: Single call with no interference on (ID: 10) FieldBasedTestObject { TARGET_FIELD: 0 }
+NORMAL RUN: result for (ID: 10) FieldBasedTestObject { TARGET_FIELD: 10 } is IntContainer { value: 10 }
+Single call with force-early-return on (ID: 11) FieldBasedTestObject { TARGET_FIELD: 0 }
+Will force return of OveriddenReturnValue { id: 5 }
+result for (ID: 11) FieldBasedTestObject { TARGET_FIELD: 0 } is OveriddenReturnValue { id: 5 }
+Test stopped during Method Exit of calledFunction
+NORMAL RUN: Single call with no interference on (ID: 12) StandardTestObject { cnt: 0 }
+NORMAL RUN: result for (ID: 12) StandardTestObject { cnt: 2 } is IntContainer { value: 1 }
+Single call with force-early-return on (ID: 13) StandardTestObject { cnt: 0 }
+Will force return of OveriddenReturnValue { id: 6 }
+result for (ID: 13) StandardTestObject { cnt: 2 } is OveriddenReturnValue { id: 6 }
+Test stopped during Method Enter of calledFunction
+NORMAL RUN: Single call with no interference on (ID: 14) StandardTestObject { cnt: 0 }
+NORMAL RUN: result for (ID: 14) StandardTestObject { cnt: 2 } is IntContainer { value: 1 }
+Single call with force-early-return on (ID: 15) StandardTestObject { cnt: 0 }
+Will force return of OveriddenReturnValue { id: 7 }
+result for (ID: 15) StandardTestObject { cnt: 0 } is OveriddenReturnValue { id: 7 }
+Test stopped during Method Exit due to exception thrown in same function
+NORMAL RUN: Single call with no interference on (ID: 16) ExceptionOnceObject { cnt: 0, throwInSub: false }
+Uncaught exception in thread Thread[Test1968 target thread - 16,5,main] - art.Test1968$ExceptionOnceObject$TestError: null
+	art.Test1968$ExceptionOnceObject.calledFunction(Test1968.java)
+	art.Test1968$AbstractTestObject.run(Test1968.java)
+	art.Test1968$2.run(Test1968.java)
+	java.lang.Thread.run(Thread.java)
+
+NORMAL RUN: result for (ID: 16) ExceptionOnceObject { cnt: 1, throwInSub: false } is null
+Single call with force-early-return on (ID: 17) ExceptionOnceObject { cnt: 0, throwInSub: false }
+Will force return of OveriddenReturnValue { id: 8 }
+result for (ID: 17) ExceptionOnceObject { cnt: 1, throwInSub: false } is OveriddenReturnValue { id: 8 }
+Test stopped during Method Exit due to exception thrown in subroutine
+NORMAL RUN: Single call with no interference on (ID: 18) ExceptionOnceObject { cnt: 0, throwInSub: true }
+Uncaught exception in thread Thread[Test1968 target thread - 18,5,main] - art.Test1968$ExceptionOnceObject$TestError: null
+	art.Test1968$ExceptionOnceObject.doThrow(Test1968.java)
+	art.Test1968$ExceptionOnceObject.calledFunction(Test1968.java)
+	art.Test1968$AbstractTestObject.run(Test1968.java)
+	art.Test1968$2.run(Test1968.java)
+	java.lang.Thread.run(Thread.java)
+
+NORMAL RUN: result for (ID: 18) ExceptionOnceObject { cnt: 1, throwInSub: true } is null
+Single call with force-early-return on (ID: 19) ExceptionOnceObject { cnt: 0, throwInSub: true }
+Will force return of OveriddenReturnValue { id: 9 }
+result for (ID: 19) ExceptionOnceObject { cnt: 1, throwInSub: true } is OveriddenReturnValue { id: 9 }
+Test stopped during notifyFramePop with exception on pop of calledFunction
+NORMAL RUN: Single call with no interference on (ID: 20) ExceptionThrowTestObject { cnt: 0, baseCnt: 0 }
+art.Test1968$ExceptionThrowTestObject$TestError thrown and caught!
+NORMAL RUN: result for (ID: 20) ExceptionThrowTestObject { cnt: 2, baseCnt: 2 } is null
+Single call with force-early-return on (ID: 21) ExceptionThrowTestObject { cnt: 0, baseCnt: 0 }
+Will force return of OveriddenReturnValue { id: 10 }
+result for (ID: 21) ExceptionThrowTestObject { cnt: 2, baseCnt: 2 } is OveriddenReturnValue { id: 10 }
+Test stopped during notifyFramePop with exception on pop of doThrow
+NORMAL RUN: Single call with no interference on (ID: 22) ExceptionCatchTestObject { cnt: 0 }
+art.Test1968$ExceptionCatchTestObject$TestError caught in called function.
+NORMAL RUN: result for (ID: 22) ExceptionCatchTestObject { cnt: 2 } is IntContainer { value: 1 }
+Single call with force-early-return on (ID: 23) ExceptionCatchTestObject { cnt: 0 }
+Will force return of OveriddenReturnValue { id: 11 }
+result for (ID: 23) ExceptionCatchTestObject { cnt: 101 } is IntContainer { value: 1 }
+Test stopped during ExceptionCatch event of calledFunction (catch in called function, throw in called function)
+NORMAL RUN: Single call with no interference on (ID: 24) ExceptionThrowTestObject { cnt: 0, baseCnt: 0 }
+art.Test1968$ExceptionThrowTestObject$TestError caught in same function.
+NORMAL RUN: result for (ID: 24) ExceptionThrowTestObject { cnt: 111, baseCnt: 2 } is IntContainer { value: 11 }
+Single call with force-early-return on (ID: 25) ExceptionThrowTestObject { cnt: 0, baseCnt: 0 }
+Will force return of OveriddenReturnValue { id: 12 }
+result for (ID: 25) ExceptionThrowTestObject { cnt: 11, baseCnt: 2 } is OveriddenReturnValue { id: 12 }
+Test stopped during ExceptionCatch event of calledFunction (catch in called function, throw in subroutine)
+NORMAL RUN: Single call with no interference on (ID: 26) ExceptionCatchTestObject { cnt: 0 }
+art.Test1968$ExceptionCatchTestObject$TestError caught in called function.
+NORMAL RUN: result for (ID: 26) ExceptionCatchTestObject { cnt: 2 } is IntContainer { value: 1 }
+Single call with force-early-return on (ID: 27) ExceptionCatchTestObject { cnt: 0 }
+Will force return of OveriddenReturnValue { id: 13 }
+result for (ID: 27) ExceptionCatchTestObject { cnt: 1 } is OveriddenReturnValue { id: 13 }
+Test stopped during Exception event of calledFunction (catch in calling function)
+NORMAL RUN: Single call with no interference on (ID: 28) ExceptionThrowTestObject { cnt: 0, baseCnt: 0 }
+art.Test1968$ExceptionThrowTestObject$TestError thrown and caught!
+NORMAL RUN: result for (ID: 28) ExceptionThrowTestObject { cnt: 2, baseCnt: 2 } is null
+Single call with force-early-return on (ID: 29) ExceptionThrowTestObject { cnt: 0, baseCnt: 0 }
+Will force return of OveriddenReturnValue { id: 14 }
+result for (ID: 29) ExceptionThrowTestObject { cnt: 2, baseCnt: 2 } is OveriddenReturnValue { id: 14 }
+Test stopped during Exception event of calledFunction (catch in called function)
+NORMAL RUN: Single call with no interference on (ID: 30) ExceptionThrowTestObject { cnt: 0, baseCnt: 0 }
+art.Test1968$ExceptionThrowTestObject$TestError caught in same function.
+NORMAL RUN: result for (ID: 30) ExceptionThrowTestObject { cnt: 111, baseCnt: 2 } is IntContainer { value: 11 }
+Single call with force-early-return on (ID: 31) ExceptionThrowTestObject { cnt: 0, baseCnt: 0 }
+Will force return of OveriddenReturnValue { id: 15 }
+result for (ID: 31) ExceptionThrowTestObject { cnt: 11, baseCnt: 2 } is OveriddenReturnValue { id: 15 }
+Test stopped during Exception event of calledFunction (catch in parent of calling function)
+NORMAL RUN: Single call with no interference on (ID: 32) ExceptionThrowFarTestObject { cnt: 0, baseCnt: 0 }
+art.Test1968$ExceptionThrowFarTestObject$TestError thrown and caught!
+NORMAL RUN: result for (ID: 32) ExceptionThrowFarTestObject { cnt: 2, baseCnt: 2 } is null
+Single call with force-early-return on (ID: 33) ExceptionThrowFarTestObject { cnt: 0, baseCnt: 0 }
+Will force return of OveriddenReturnValue { id: 16 }
+result for (ID: 33) ExceptionThrowFarTestObject { cnt: 2, baseCnt: 2 } is OveriddenReturnValue { id: 16 }
+Test stopped during Exception event of calledFunction (catch in called function)
+NORMAL RUN: Single call with no interference on (ID: 34) ExceptionThrowFarTestObject { cnt: 0, baseCnt: 0 }
+art.Test1968$ExceptionThrowFarTestObject$TestError caught in same function.
+NORMAL RUN: result for (ID: 34) ExceptionThrowFarTestObject { cnt: 111, baseCnt: 2 } is IntContainer { value: 101 }
+Single call with force-early-return on (ID: 35) ExceptionThrowFarTestObject { cnt: 0, baseCnt: 0 }
+Will force return of OveriddenReturnValue { id: 17 }
+result for (ID: 35) ExceptionThrowFarTestObject { cnt: 101, baseCnt: 2 } is OveriddenReturnValue { id: 17 }
+Test stopped during random Suspend.
+NORMAL RUN: Single call with no interference on (ID: 36) SuspendSuddenlyObject { cnt: 0, spun: false }
+NORMAL RUN: result for (ID: 36) SuspendSuddenlyObject { cnt: 2, spun: true } is IntContainer { value: 1 }
+Single call with force-early-return on (ID: 37) SuspendSuddenlyObject { cnt: 0, spun: false }
+Will force return of OveriddenReturnValue { id: 18 }
+result for (ID: 37) SuspendSuddenlyObject { cnt: 1, spun: true } is OveriddenReturnValue { id: 18 }
+Test stopped during a native method fails
+NORMAL RUN: Single call with no interference on (ID: 38) NativeCalledObject { cnt: 0 }
+NORMAL RUN: result for (ID: 38) NativeCalledObject { cnt: 2 } is IntContainer { value: 1 }
+Single call with force-early-return on (ID: 39) NativeCalledObject { cnt: 0 }
+Will force return of OveriddenReturnValue { id: 19 }
+Failed to force-return due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+	art.NonStandardExit.forceEarlyReturnObject(Native Method)
+	art.NonStandardExit.forceEarlyReturn(NonStandardExit.java)
+	art.Test1968$TestSuspender.performForceReturn(Test1968.java)
+	art.Test1968.runTestOn(Test1968.java)
+	art.Test1968.runTestOn(Test1968.java)
+	art.Test1968.runTestOn(Test1968.java)
+	art.Test1968.runTests(Test1968.java)
+	<Additional frames hidden>
+
+result for (ID: 39) NativeCalledObject { cnt: 2 } is IntContainer { value: 1 }
+Test stopped in a method called by native succeeds
+NORMAL RUN: Single call with no interference on (ID: 40) NativeCallerObject { cnt: 0 }
+NORMAL RUN: result for (ID: 40) NativeCallerObject { cnt: 2 } is IntContainer { value: 1 }
+Single call with force-early-return on (ID: 41) NativeCallerObject { cnt: 0 }
+Will force return of OveriddenReturnValue { id: 20 }
+result for (ID: 41) NativeCallerObject { cnt: 2 } is OveriddenReturnValue { id: 20 }
+Test stopped in a static method
+NORMAL RUN: Single call with no interference on (ID: 42) StaticMethodObject { cnt: 0 }
+NORMAL RUN: result for (ID: 42) StaticMethodObject { cnt: 2 } is IntContainer { value: 1 }
+Single call with force-early-return on (ID: 43) StaticMethodObject { cnt: 0 }
+Will force return of OveriddenReturnValue { id: 21 }
+result for (ID: 43) StaticMethodObject { cnt: 1 } is OveriddenReturnValue { id: 21 }
+Test force-return of void function fails!
+NORMAL RUN: Single call with no interference on (ID: 44) BadForceVoidObject { cnt: 0 }
+NORMAL RUN: result for (ID: 44) BadForceVoidObject { cnt: 2 } is null
+Single call with force-early-return on (ID: 45) BadForceVoidObject { cnt: 0 }
+Will force return of OveriddenReturnValue { id: 22 }
+Failed to force-return due to java.lang.RuntimeException: JVMTI_ERROR_TYPE_MISMATCH
+	art.NonStandardExit.forceEarlyReturnObject(Native Method)
+	art.NonStandardExit.forceEarlyReturn(NonStandardExit.java)
+	art.Test1968$TestSuspender.performForceReturn(Test1968.java)
+	art.Test1968.runTestOn(Test1968.java)
+	art.Test1968.runTestOn(Test1968.java)
+	art.Test1968.runTestOn(Test1968.java)
+	art.Test1968.runTests(Test1968.java)
+	<Additional frames hidden>
+
+result for (ID: 45) BadForceVoidObject { cnt: 2 } is null
+Test force-return of int function fails!
+NORMAL RUN: Single call with no interference on (ID: 46) BadForceIntObject { cnt: 0 }
+NORMAL RUN: result for (ID: 46) BadForceIntObject { cnt: 2 } is 1
+Single call with force-early-return on (ID: 47) BadForceIntObject { cnt: 0 }
+Will force return of OveriddenReturnValue { id: 23 }
+Failed to force-return due to java.lang.RuntimeException: JVMTI_ERROR_TYPE_MISMATCH
+	art.NonStandardExit.forceEarlyReturnObject(Native Method)
+	art.NonStandardExit.forceEarlyReturn(NonStandardExit.java)
+	art.Test1968$TestSuspender.performForceReturn(Test1968.java)
+	art.Test1968.runTestOn(Test1968.java)
+	art.Test1968.runTestOn(Test1968.java)
+	art.Test1968.runTestOn(Test1968.java)
+	art.Test1968.runTests(Test1968.java)
+	<Additional frames hidden>
+
+result for (ID: 47) BadForceIntObject { cnt: 2 } is 1
diff --git a/test/1968-force-early-return/force_early_return.cc b/test/1968-force-early-return/force_early_return.cc
new file mode 100644
index 0000000..6742165
--- /dev/null
+++ b/test/1968-force-early-return/force_early_return.cc
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+
+#include <cstdio>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "android-base/logging.h"
+#include "android-base/stringprintf.h"
+
+#include "jni.h"
+#include "jvmti.h"
+#include "scoped_local_ref.h"
+#include "scoped_utf_chars.h"
+
+// Test infrastructure
+#include "jni_binder.h"
+#include "jni_helper.h"
+#include "jvmti_helper.h"
+#include "test_env.h"
+#include "ti_macros.h"
+
+#include "suspend_event_helper.h"
+
+namespace art {
+namespace Test1968ForceEarlyReturn {
+
+extern "C" JNIEXPORT
+jobject JNICALL Java_art_Test1968_00024NativeCalledObject_calledFunction(
+    JNIEnv* env, jobject thiz) {
+  env->PushLocalFrame(4);
+  jclass klass = env->GetObjectClass(thiz);
+  jfieldID cnt = env->GetFieldID(klass, "cnt", "I");
+  env->SetIntField(thiz, cnt, env->GetIntField(thiz, cnt) + 1);
+  jclass int_container_klass = env->FindClass("art/Test1968$IntContainer");
+  jmethodID int_cont_new = env->GetMethodID(int_container_klass, "<init>", "(I)V");
+  jobject res = env->NewObject(int_container_klass, int_cont_new, env->GetIntField(thiz, cnt));
+  env->SetIntField(thiz, cnt, env->GetIntField(thiz, cnt) + 1);
+  void *data;
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->GetThreadLocalStorage(/* thread */ nullptr,
+                                                             reinterpret_cast<void**>(&data)))) {
+    env->PopLocalFrame(nullptr);
+    return nullptr;
+  }
+  if (data != nullptr) {
+    art::common_suspend_event::PerformSuspension(jvmti_env, env);
+  }
+  return env->PopLocalFrame(res);
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1968_00024NativeCallerObject_run(
+    JNIEnv* env, jobject thiz) {
+  env->PushLocalFrame(1);
+  jclass klass = env->GetObjectClass(thiz);
+  jfieldID ret = env->GetFieldID(klass, "returnValue", "Ljava/lang/Object;");
+  jmethodID called = env->GetMethodID(klass, "calledFunction", "()Ljava/lang/Object;");
+  env->SetObjectField(thiz, ret, env->CallObjectMethod(thiz, called));
+  env->PopLocalFrame(nullptr);
+}
+
+}  // namespace Test1968ForceEarlyReturn
+}  // namespace art
+
diff --git a/test/1968-force-early-return/info.txt b/test/1968-force-early-return/info.txt
new file mode 100644
index 0000000..621d881
--- /dev/null
+++ b/test/1968-force-early-return/info.txt
@@ -0,0 +1,4 @@
+Test JVMTI ForceEarlyReturnObject functionality
+
+Checks that we can call the ForceEarlyReturn functions successfully and force
+returns of objects. It also checks some of the basic error modes.
diff --git a/test/1968-force-early-return/run b/test/1968-force-early-return/run
new file mode 100755
index 0000000..d16d4e6
--- /dev/null
+++ b/test/1968-force-early-return/run
@@ -0,0 +1,24 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# On RI we need to turn class-load tests off since those events are buggy around
+# pop-frame (see b/116003018).
+ARGS=""
+if [[ "$TEST_RUNTIME" == "jvm" ]]; then
+  ARGS="--args DISABLE_CLASS_LOAD_TESTS"
+fi
+
+./default-run "$@" --jvmti $ARGS
diff --git a/test/1968-force-early-return/src/Main.java b/test/1968-force-early-return/src/Main.java
new file mode 100644
index 0000000..2aa26bf
--- /dev/null
+++ b/test/1968-force-early-return/src/Main.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
+ * in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+import java.util.Arrays;
+import java.util.List;
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1968.run();
+  }
+}
diff --git a/test/1968-force-early-return/src/art/Breakpoint.java b/test/1968-force-early-return/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1968-force-early-return/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1968-force-early-return/src/art/NonStandardExit.java b/test/1968-force-early-return/src/art/NonStandardExit.java
new file mode 120000
index 0000000..d542a3c
--- /dev/null
+++ b/test/1968-force-early-return/src/art/NonStandardExit.java
@@ -0,0 +1 @@
+../../../jvmti-common/NonStandardExit.java
\ No newline at end of file
diff --git a/test/1968-force-early-return/src/art/StackTrace.java b/test/1968-force-early-return/src/art/StackTrace.java
new file mode 120000
index 0000000..e1a08aa
--- /dev/null
+++ b/test/1968-force-early-return/src/art/StackTrace.java
@@ -0,0 +1 @@
+../../../jvmti-common/StackTrace.java
\ No newline at end of file
diff --git a/test/1968-force-early-return/src/art/SuspendEvents.java b/test/1968-force-early-return/src/art/SuspendEvents.java
new file mode 120000
index 0000000..f7a5f7e
--- /dev/null
+++ b/test/1968-force-early-return/src/art/SuspendEvents.java
@@ -0,0 +1 @@
+../../../jvmti-common/SuspendEvents.java
\ No newline at end of file
diff --git a/test/1968-force-early-return/src/art/Suspension.java b/test/1968-force-early-return/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1968-force-early-return/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1968-force-early-return/src/art/Test1968.java b/test/1968-force-early-return/src/art/Test1968.java
new file mode 100644
index 0000000..a6aea86
--- /dev/null
+++ b/test/1968-force-early-return/src/art/Test1968.java
@@ -0,0 +1,903 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
+ * in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package art;
+
+import static art.SuspendEvents.setupFieldSuspendFor;
+import static art.SuspendEvents.setupSuspendBreakpointFor;
+import static art.SuspendEvents.setupSuspendExceptionEvent;
+import static art.SuspendEvents.setupSuspendMethodEvent;
+import static art.SuspendEvents.setupSuspendPopFrameEvent;
+import static art.SuspendEvents.setupSuspendSingleStepAt;
+import static art.SuspendEvents.setupTest;
+import static art.SuspendEvents.waitForSuspendHit;
+
+import java.io.*;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.util.concurrent.CountDownLatch;
+import java.util.function.Consumer;
+import java.util.function.Supplier;
+
+public class Test1968 {
+  public static final boolean PRINT_STACK_TRACE = false;
+  public static long OVERRIDE_ID = 0;
+
+  public static final class OveriddenReturnValue {
+    public final Thread target;
+    public final Thread.State state;
+    public final StackTraceElement stack[];
+    public final long id;
+
+    public OveriddenReturnValue(Thread thr) {
+      target = thr;
+      state = thr.getState();
+      stack = thr.getStackTrace();
+      id = OVERRIDE_ID++;
+    }
+
+    public String toString() {
+      String stackTrace =
+          PRINT_STACK_TRACE
+              ? ",\n\tthread: "
+                  + target.toString()
+                  + ",\n\tstate: "
+                  + state
+                  + ",\n\tstack:\n"
+                  + safeDumpStackTrace(stack, "\t\t")
+                  + ",\n\t"
+              : "";
+      return "OveriddenReturnValue { id: " + id + stackTrace + " }";
+    }
+  }
+
+  // Returns a value to be used for the return value of the given thread.
+  public static Object getOveriddenReturnValue(Thread thr) {
+    return new OveriddenReturnValue(thr);
+  }
+
+  public static void doNothing() {}
+
+  public interface TestRunnable extends Runnable {
+    public Object getReturnValue();
+  }
+
+  public static interface TestSuspender {
+    public void setupForceReturnRun(Thread thr);
+
+    public void waitForSuspend(Thread thr);
+
+    public void cleanup(Thread thr);
+
+    public default void performForceReturn(Thread thr) {
+      Object ret = getOveriddenReturnValue(thr);
+      System.out.println("Will force return of " + ret);
+      NonStandardExit.forceEarlyReturn(thr, ret);
+    }
+
+    public default void setupNormalRun(Thread thr) {}
+  }
+
+  public static interface ThreadRunnable {
+    public void run(Thread thr);
+  }
+
+  public static TestSuspender makeSuspend(final ThreadRunnable setup, final ThreadRunnable clean) {
+    return new TestSuspender() {
+      public void setupForceReturnRun(Thread thr) {
+        setup.run(thr);
+      }
+
+      public void waitForSuspend(Thread thr) {
+        waitForSuspendHit(thr);
+      }
+
+      public void cleanup(Thread thr) {
+        clean.run(thr);
+      }
+    };
+  }
+
+  public void runTestOn(Supplier<TestRunnable> testObj, ThreadRunnable su, ThreadRunnable cl)
+      throws Exception {
+    runTestOn(testObj, makeSuspend(su, cl));
+  }
+
+  private static void SafePrintStackTrace(StackTraceElement st[]) {
+    System.out.println(safeDumpStackTrace(st, "\t"));
+  }
+
+  private static String safeDumpStackTrace(StackTraceElement st[], String prefix) {
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintStream os = new PrintStream(baos);
+    for (StackTraceElement e : st) {
+      os.println(
+          prefix
+              + e.getClassName()
+              + "."
+              + e.getMethodName()
+              + "("
+              + (e.isNativeMethod() ? "Native Method" : e.getFileName())
+              + ")");
+      if (e.getClassName().equals("art.Test1968") && e.getMethodName().equals("runTests")) {
+        os.println(prefix + "<Additional frames hidden>");
+        break;
+      }
+    }
+    os.flush();
+    return baos.toString();
+  }
+
+  static long ID_COUNTER = 0;
+
+  public TestRunnable Id(final TestRunnable tr) {
+    final long my_id = ID_COUNTER++;
+    return new TestRunnable() {
+      public void run() {
+        tr.run();
+      }
+
+      public Object getReturnValue() {
+        return tr.getReturnValue();
+      }
+
+      public String toString() {
+        return "(ID: " + my_id + ") " + tr.toString();
+      }
+    };
+  }
+
+  public static long THREAD_COUNT = 0;
+
+  public Thread mkThread(Runnable r) {
+    Thread t = new Thread(r, "Test1968 target thread - " + THREAD_COUNT++);
+    t.setUncaughtExceptionHandler(
+        (thr, e) -> {
+          System.out.println(
+              "Uncaught exception in thread "
+                  + thr
+                  + " - "
+                  + e.getClass().getName()
+                  + ": "
+                  + e.getLocalizedMessage());
+          SafePrintStackTrace(e.getStackTrace());
+        });
+    return t;
+  }
+
+  final class TestConfig {
+    public final TestRunnable testObj;
+    public final TestSuspender suspender;
+
+    public TestConfig(TestRunnable obj, TestSuspender su) {
+      this.testObj = obj;
+      this.suspender = su;
+    }
+  }
+
+  public void runTestOn(Supplier<TestRunnable> testObjGen, TestSuspender su) throws Exception {
+    runTestOn(() -> new TestConfig(testObjGen.get(), su));
+  }
+
+  public void runTestOn(Supplier<TestConfig> config) throws Exception {
+    TestConfig normal_config = config.get();
+    TestRunnable normal_run = Id(normal_config.testObj);
+    try {
+      System.out.println("NORMAL RUN: Single call with no interference on " + normal_run);
+      Thread normal_thread = mkThread(normal_run);
+      normal_config.suspender.setupNormalRun(normal_thread);
+      normal_thread.start();
+      normal_thread.join();
+      System.out.println(
+          "NORMAL RUN: result for " + normal_run + " is " + normal_run.getReturnValue());
+    } catch (Exception e) {
+      System.out.println("NORMAL RUN: Ended with exception for " + normal_run + "!");
+      e.printStackTrace(System.out);
+    }
+
+    TestConfig force_return_config = config.get();
+    TestRunnable testObj = Id(force_return_config.testObj);
+    TestSuspender su = force_return_config.suspender;
+    System.out.println("Single call with force-early-return on " + testObj);
+    final CountDownLatch continue_latch = new CountDownLatch(1);
+    final CountDownLatch startup_latch = new CountDownLatch(1);
+    Runnable await =
+        () -> {
+          try {
+            startup_latch.countDown();
+            continue_latch.await();
+          } catch (Exception e) {
+            throw new Error("Failed to await latch", e);
+          }
+        };
+    Thread thr =
+        mkThread(
+            () -> {
+              await.run();
+              testObj.run();
+            });
+    thr.start();
+
+    // Wait until the other thread is started.
+    startup_latch.await();
+
+    // Do any final setup.
+    preTest.accept(testObj);
+
+    // Setup suspension method on the thread.
+    su.setupForceReturnRun(thr);
+
+    // Let the other thread go.
+    continue_latch.countDown();
+
+    // Wait for the other thread to hit the breakpoint/watchpoint/whatever and
+    // suspend itself
+    // (without re-entering java)
+    su.waitForSuspend(thr);
+
+    // Cleanup the breakpoint/watchpoint/etc.
+    su.cleanup(thr);
+
+    try {
+      // Pop the frame.
+      su.performForceReturn(thr);
+    } catch (Exception e) {
+      System.out.println("Failed to force-return due to " + e);
+      SafePrintStackTrace(e.getStackTrace());
+    }
+
+    // Start the other thread going again.
+    Suspension.resume(thr);
+
+    // Wait for the other thread to finish.
+    thr.join();
+
+    // See how many times calledFunction was called.
+    System.out.println("result for " + testObj + " is " + testObj.getReturnValue());
+  }
+
+  public abstract static class AbstractTestObject implements TestRunnable {
+    private Object resultObject;
+
+    public AbstractTestObject() {
+      resultObject = null;
+    }
+
+    public Object getReturnValue() {
+      return resultObject;
+    }
+
+    public void run() {
+      // This function should have it's return-value replaced by force-early-return.
+      resultObject = calledFunction();
+    }
+
+    public abstract Object calledFunction();
+  }
+
+  public static class IntContainer {
+    private final int value;
+
+    public IntContainer(int i) {
+      value = i;
+    }
+
+    public String toString() {
+      return "IntContainer { value: " + value + " }";
+    }
+  }
+
+  public static class FieldBasedTestObject extends AbstractTestObject implements Runnable {
+    public int TARGET_FIELD;
+
+    public FieldBasedTestObject() {
+      super();
+      TARGET_FIELD = 0;
+    }
+
+    public Object calledFunction() {
+      // We put a watchpoint here and force-early-return when we are at it.
+      TARGET_FIELD += 10;
+      return new IntContainer(TARGET_FIELD);
+    }
+
+    public String toString() {
+      return "FieldBasedTestObject { TARGET_FIELD: " + TARGET_FIELD + " }";
+    }
+  }
+
+  public static class StandardTestObject extends AbstractTestObject implements Runnable {
+    public int cnt;
+
+    public StandardTestObject() {
+      super();
+      cnt = 0;
+    }
+
+    public Object calledFunction() {
+      cnt++; // line +0
+      // We put a breakpoint here and PopFrame when we are at it.
+      Object result = new IntContainer(cnt); // line +2
+      cnt++; // line +3
+      return result; // line +4
+    }
+
+    public String toString() {
+      return "StandardTestObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class SynchronizedFunctionTestObject extends AbstractTestObject
+      implements Runnable {
+    public int cnt;
+
+    public SynchronizedFunctionTestObject() {
+      super();
+      cnt = 0;
+    }
+
+    public synchronized Object calledFunction() {
+      cnt++; // line +0
+      // We put a breakpoint here and PopFrame when we are at it.
+      Object result = new IntContainer(cnt); // line +2
+      cnt++; // line +3
+      return result;
+    }
+
+    public String toString() {
+      return "SynchronizedFunctionTestObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class SynchronizedTestObject extends AbstractTestObject implements Runnable {
+    public final Object lock;
+    public int cnt;
+
+    public SynchronizedTestObject() {
+      this(new Object());
+    }
+
+    public SynchronizedTestObject(Object lock) {
+      super();
+      this.lock = lock;
+      cnt = 0;
+    }
+
+    public Object calledFunction() {
+      synchronized (lock) { // line +0
+        cnt++; // line +1
+        // We put a breakpoint here and PopFrame when we are at it.
+        Object result = new IntContainer(cnt); // line +3
+        cnt++; // line +4
+        return result; // line +5
+      }
+    }
+
+    public String toString() {
+      return "SynchronizedTestObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class ExceptionCatchTestObject extends AbstractTestObject implements Runnable {
+    public static class TestError extends Error {}
+
+    public int cnt;
+
+    public ExceptionCatchTestObject() {
+      super();
+      cnt = 0;
+    }
+
+    public Object calledFunction() {
+      cnt++;
+      Object result = new IntContainer(cnt);
+      try {
+        doThrow();
+        cnt += 100;
+      } catch (TestError e) {
+        System.out.println(e.getClass().getName() + " caught in called function.");
+        cnt++;
+      }
+      return result;
+    }
+
+    public Object doThrow() {
+      throw new TestError();
+    }
+
+    public String toString() {
+      return "ExceptionCatchTestObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class ExceptionThrowFarTestObject implements TestRunnable {
+    public static class TestError extends Error {}
+
+    public int cnt;
+    public int baseCallCnt;
+    public final boolean catchInCalled;
+    public Object result;
+
+    public ExceptionThrowFarTestObject(boolean catchInCalled) {
+      super();
+      cnt = 0;
+      baseCallCnt = 0;
+      this.catchInCalled = catchInCalled;
+    }
+
+    public void run() {
+      baseCallCnt++;
+      try {
+        result = callingFunction();
+      } catch (TestError e) {
+        System.out.println(e.getClass().getName() + " thrown and caught!");
+      }
+      baseCallCnt++;
+    }
+
+    public Object callingFunction() {
+      return calledFunction();
+    }
+
+    public Object calledFunction() {
+      cnt++;
+      if (catchInCalled) {
+        try {
+          cnt += 100;
+          throw new TestError(); // We put a watch here.
+        } catch (TestError e) {
+          System.out.println(e.getClass().getName() + " caught in same function.");
+          Object result = new IntContainer(cnt);
+          cnt += 10;
+          return result;
+        }
+      } else {
+        cnt++;
+        throw new TestError(); // We put a watch here.
+      }
+    }
+
+    public String toString() {
+      return "ExceptionThrowFarTestObject { cnt: " + cnt + ", baseCnt: " + baseCallCnt + " }";
+    }
+
+    @Override
+    public Object getReturnValue() {
+      return result;
+    }
+  }
+
+  public static class ExceptionOnceObject extends AbstractTestObject {
+    public static final class TestError extends Error {}
+
+    public int cnt;
+    public final boolean throwInSub;
+
+    public ExceptionOnceObject(boolean throwInSub) {
+      super();
+      cnt = 0;
+      this.throwInSub = throwInSub;
+    }
+
+    public Object calledFunction() {
+      cnt++;
+      if (cnt == 1) {
+        if (throwInSub) {
+          return doThrow();
+        } else {
+          throw new TestError();
+        }
+      }
+      return new IntContainer(cnt++);
+    }
+
+    public Object doThrow() {
+      throw new TestError();
+    }
+
+    public String toString() {
+      return "ExceptionOnceObject { cnt: " + cnt + ", throwInSub: " + throwInSub + " }";
+    }
+  }
+
+  public static class ExceptionThrowTestObject implements TestRunnable {
+    public static class TestError extends Error {}
+
+    public Object getReturnValue() {
+      return result;
+    }
+
+    public int cnt;
+    public int baseCallCnt;
+    public final boolean catchInCalled;
+    public Object result;
+
+    public ExceptionThrowTestObject(boolean catchInCalled) {
+      super();
+      cnt = 0;
+      baseCallCnt = 0;
+      this.catchInCalled = catchInCalled;
+    }
+
+    public void run() {
+      baseCallCnt++;
+      try {
+        result = calledFunction();
+      } catch (TestError e) {
+        System.out.println(e.getClass().getName() + " thrown and caught!");
+      }
+      baseCallCnt++;
+    }
+
+    public Object calledFunction() {
+      cnt++;
+      if (catchInCalled) {
+        try {
+          cnt += 10;
+          throw new TestError(); // We put a watch here.
+        } catch (TestError e) {
+          System.out.println(e.getClass().getName() + " caught in same function.");
+          Object result = new IntContainer(cnt);
+          cnt += 100;
+          return result;
+        }
+      } else {
+        cnt += 1;
+        throw new TestError(); // We put a watch here.
+      }
+    }
+
+    public String toString() {
+      return "ExceptionThrowTestObject { cnt: " + cnt + ", baseCnt: " + baseCallCnt + " }";
+    }
+  }
+
+  public static class NativeCalledObject extends AbstractTestObject {
+    public int cnt = 0;
+
+    public native Object calledFunction();
+
+    public String toString() {
+      return "NativeCalledObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class NativeCallerObject implements TestRunnable {
+    public Object returnValue = null;
+    public int cnt = 0;
+
+    public Object getReturnValue() {
+      return returnValue;
+    }
+
+    public native void run();
+
+    public Object calledFunction() {
+      cnt++;
+      // We will stop using a MethodExit event.
+      Object res = new IntContainer(cnt);
+      cnt++;
+      return res;
+    }
+
+    public String toString() {
+      return "NativeCallerObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class StaticMethodObject implements TestRunnable {
+    public int cnt = 0;
+    public Object result = null;
+    public Object getReturnValue() {
+      return result;
+    }
+
+    public static Object calledFunction(Supplier<Object> incr) {
+      Object res = incr.get(); // line +0
+      // We put a breakpoint here to force the return.
+      doNothing();  // line +2
+      incr.get();   // line +3
+      return res;   // line +4
+    }
+
+    public void run() {
+      result = calledFunction(() -> new IntContainer(++cnt));
+    }
+
+    public String toString() {
+      return "StaticMethodObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class SuspendSuddenlyObject extends AbstractTestObject {
+    public volatile boolean should_spin = true;
+    public volatile boolean is_spinning = false;
+    public int cnt = 0;
+
+    public Object calledFunction() {
+      cnt++;
+      do {
+        is_spinning = true;
+      } while (should_spin);
+      return new IntContainer(cnt++);
+    }
+
+    public String toString() {
+      return "SuspendSuddenlyObject { cnt: " + cnt + ", spun: " + is_spinning + " }";
+    }
+  }
+
+  public static class BadForceVoidObject implements TestRunnable {
+    public int cnt = 0;
+    public Object getReturnValue() {
+      return null;
+    }
+    public void run() {
+      incrCnt();
+    }
+    public void incrCnt() {
+      ++cnt;  // line +0
+      // We set a breakpoint here and try to force-early-return.
+      doNothing(); // line +2
+      ++cnt;  // line +3
+    }
+    public String toString() {
+      return "BadForceVoidObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class BadForceIntObject implements TestRunnable {
+    public int cnt = 0;
+    public int result = 0;
+    public Object getReturnValue() {
+      return Integer.valueOf(result);
+    }
+    public void run() {
+      result = incrCnt();
+    }
+    public int incrCnt() {
+      ++cnt;  // line +0
+      // We set a breakpoint here and try to force-early-return.
+      int res = cnt;  // line +2
+      ++cnt;  // line +3
+      return res;
+    }
+    public String toString() {
+      return "BadForceIntObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static void run() throws Exception {
+    new Test1968((x) -> {}).runTests();
+  }
+
+  public Test1968(Consumer<TestRunnable> preTest) {
+    this.preTest = preTest;
+  }
+
+  private Consumer<TestRunnable> preTest;
+
+  public static void no_runTestOn(Supplier<Object> a, ThreadRunnable b, ThreadRunnable c) {}
+
+  public void runTests() throws Exception {
+    setupTest();
+
+    final Method calledFunction = StandardTestObject.class.getDeclaredMethod("calledFunction");
+    // Add a breakpoint on the second line after the start of the function
+    final int line = Breakpoint.locationToLine(calledFunction, 0) + 2;
+    final long loc = Breakpoint.lineToLocation(calledFunction, line);
+    System.out.println("Test stopped using breakpoint");
+    runTestOn(
+        StandardTestObject::new,
+        (thr) -> setupSuspendBreakpointFor(calledFunction, loc, thr),
+        SuspendEvents::clearSuspendBreakpointFor);
+
+    final Method syncFunctionCalledFunction =
+        SynchronizedFunctionTestObject.class.getDeclaredMethod("calledFunction");
+    // Add a breakpoint on the second line after the start of the function Annoyingly r8 generally
+    // has the first instruction (a monitor enter) not be marked as being on any line but javac has
+    // it marked as being on the first line of the function. Just use the second entry on the
+    // line-number table to get the breakpoint. This should be good for both.
+    final long syncFunctionLoc =
+        Breakpoint.getLineNumberTable(syncFunctionCalledFunction)[1].location;
+    System.out.println("Test stopped using breakpoint with declared synchronized function");
+    runTestOn(
+        SynchronizedFunctionTestObject::new,
+        (thr) -> setupSuspendBreakpointFor(syncFunctionCalledFunction, syncFunctionLoc, thr),
+        SuspendEvents::clearSuspendBreakpointFor);
+
+    final Method syncCalledFunction =
+        SynchronizedTestObject.class.getDeclaredMethod("calledFunction");
+    // Add a breakpoint on the second line after the start of the function
+    final int syncLine = Breakpoint.locationToLine(syncCalledFunction, 0) + 3;
+    final long syncLoc = Breakpoint.lineToLocation(syncCalledFunction, syncLine);
+    System.out.println("Test stopped using breakpoint with synchronized block");
+    final Object lockObj = new Object();
+    runTestOn(
+        () -> new SynchronizedTestObject(lockObj),
+        (thr) -> setupSuspendBreakpointFor(syncCalledFunction, syncLoc, thr),
+        SuspendEvents::clearSuspendBreakpointFor);
+    // Make sure we can still lock the object.
+    synchronized (lockObj) { }
+
+    System.out.println("Test stopped on single step");
+    runTestOn(
+        StandardTestObject::new,
+        (thr) -> setupSuspendSingleStepAt(calledFunction, loc, thr),
+        SuspendEvents::clearSuspendSingleStepFor);
+
+    final Field target_field = FieldBasedTestObject.class.getDeclaredField("TARGET_FIELD");
+    System.out.println("Test stopped on field access");
+    runTestOn(
+        FieldBasedTestObject::new,
+        (thr) -> setupFieldSuspendFor(FieldBasedTestObject.class, target_field, true, thr),
+        SuspendEvents::clearFieldSuspendFor);
+
+    System.out.println("Test stopped on field modification");
+    runTestOn(
+        FieldBasedTestObject::new,
+        (thr) -> setupFieldSuspendFor(FieldBasedTestObject.class, target_field, false, thr),
+        SuspendEvents::clearFieldSuspendFor);
+
+    System.out.println("Test stopped during Method Exit of calledFunction");
+    runTestOn(
+        StandardTestObject::new,
+        (thr) -> setupSuspendMethodEvent(calledFunction, /* enter */ false, thr),
+        SuspendEvents::clearSuspendMethodEvent);
+
+    System.out.println("Test stopped during Method Enter of calledFunction");
+    runTestOn(
+        StandardTestObject::new,
+        (thr) -> setupSuspendMethodEvent(calledFunction, /* enter */ true, thr),
+        SuspendEvents::clearSuspendMethodEvent);
+
+    final Method exceptionOnceCalledMethod =
+        ExceptionOnceObject.class.getDeclaredMethod("calledFunction");
+    System.out.println("Test stopped during Method Exit due to exception thrown in same function");
+    runTestOn(
+        () -> new ExceptionOnceObject(/* throwInSub */ false),
+        (thr) -> setupSuspendMethodEvent(exceptionOnceCalledMethod, /* enter */ false, thr),
+        SuspendEvents::clearSuspendMethodEvent);
+
+    System.out.println("Test stopped during Method Exit due to exception thrown in subroutine");
+    runTestOn(
+        () -> new ExceptionOnceObject(/* throwInSub */ true),
+        (thr) -> setupSuspendMethodEvent(exceptionOnceCalledMethod, /* enter */ false, thr),
+        SuspendEvents::clearSuspendMethodEvent);
+
+    final Method exceptionThrowCalledMethod =
+        ExceptionThrowTestObject.class.getDeclaredMethod("calledFunction");
+    System.out.println(
+        "Test stopped during notifyFramePop with exception on pop of calledFunction");
+    runTestOn(
+        () -> new ExceptionThrowTestObject(false),
+        (thr) -> setupSuspendPopFrameEvent(0, exceptionThrowCalledMethod, thr),
+        SuspendEvents::clearSuspendPopFrameEvent);
+
+    final Method exceptionCatchThrowMethod =
+        ExceptionCatchTestObject.class.getDeclaredMethod("doThrow");
+    System.out.println("Test stopped during notifyFramePop with exception on pop of doThrow");
+    runTestOn(
+        ExceptionCatchTestObject::new,
+        (thr) -> setupSuspendPopFrameEvent(0, exceptionCatchThrowMethod, thr),
+        SuspendEvents::clearSuspendPopFrameEvent);
+
+    System.out.println(
+        "Test stopped during ExceptionCatch event of calledFunction "
+            + "(catch in called function, throw in called function)");
+    runTestOn(
+        () -> new ExceptionThrowTestObject(true),
+        (thr) -> setupSuspendExceptionEvent(exceptionThrowCalledMethod, /* catch */ true, thr),
+        SuspendEvents::clearSuspendExceptionEvent);
+
+    final Method exceptionCatchCalledMethod =
+        ExceptionCatchTestObject.class.getDeclaredMethod("calledFunction");
+    System.out.println(
+        "Test stopped during ExceptionCatch event of calledFunction "
+            + "(catch in called function, throw in subroutine)");
+    runTestOn(
+        ExceptionCatchTestObject::new,
+        (thr) -> setupSuspendExceptionEvent(exceptionCatchCalledMethod, /* catch */ true, thr),
+        SuspendEvents::clearSuspendExceptionEvent);
+
+    System.out.println(
+        "Test stopped during Exception event of calledFunction " + "(catch in calling function)");
+    runTestOn(
+        () -> new ExceptionThrowTestObject(false),
+        (thr) -> setupSuspendExceptionEvent(exceptionThrowCalledMethod, /* catch */ false, thr),
+        SuspendEvents::clearSuspendExceptionEvent);
+
+    System.out.println(
+        "Test stopped during Exception event of calledFunction (catch in called function)");
+    runTestOn(
+        () -> new ExceptionThrowTestObject(true),
+        (thr) -> setupSuspendExceptionEvent(exceptionThrowCalledMethod, /* catch */ false, thr),
+        SuspendEvents::clearSuspendExceptionEvent);
+
+    final Method exceptionThrowFarCalledMethod =
+        ExceptionThrowFarTestObject.class.getDeclaredMethod("calledFunction");
+    System.out.println(
+        "Test stopped during Exception event of calledFunction "
+            + "(catch in parent of calling function)");
+    runTestOn(
+        () -> new ExceptionThrowFarTestObject(false),
+        (thr) -> setupSuspendExceptionEvent(exceptionThrowFarCalledMethod, /* catch */ false, thr),
+        SuspendEvents::clearSuspendExceptionEvent);
+
+    System.out.println(
+        "Test stopped during Exception event of calledFunction " + "(catch in called function)");
+    runTestOn(
+        () -> new ExceptionThrowFarTestObject(true),
+        (thr) -> setupSuspendExceptionEvent(exceptionThrowFarCalledMethod, /* catch */ false, thr),
+        SuspendEvents::clearSuspendExceptionEvent);
+
+    System.out.println("Test stopped during random Suspend.");
+    runTestOn(() -> {
+      final SuspendSuddenlyObject sso = new SuspendSuddenlyObject();
+      return new TestConfig(sso, new TestSuspender() {
+        public void setupForceReturnRun(Thread thr) { }
+        public void setupNormalRun(Thread thr) {
+          sso.should_spin = false;
+        }
+
+        public void waitForSuspend(Thread thr) {
+          while (!sso.is_spinning) { }
+          Suspension.suspend(thr);
+        }
+
+        public void cleanup(Thread thr) { }
+      });
+    });
+
+    System.out.println("Test stopped during a native method fails");
+    runTestOn(
+        NativeCalledObject::new,
+        SuspendEvents::setupWaitForNativeCall,
+        SuspendEvents::clearWaitForNativeCall);
+
+    System.out.println("Test stopped in a method called by native succeeds");
+    final Method nativeCallerMethod = NativeCallerObject.class.getDeclaredMethod("calledFunction");
+    runTestOn(
+        NativeCallerObject::new,
+        (thr) -> setupSuspendMethodEvent(nativeCallerMethod, /* enter */ false, thr),
+        SuspendEvents::clearSuspendMethodEvent);
+
+    System.out.println("Test stopped in a static method");
+    final Method staticCalledMethod = StaticMethodObject.class.getDeclaredMethod("calledFunction", Supplier.class);
+    final int staticFunctionLine= Breakpoint.locationToLine(staticCalledMethod, 0) + 2;
+    final long staticFunctionLoc = Breakpoint.lineToLocation(staticCalledMethod, staticFunctionLine);
+    runTestOn(
+        StaticMethodObject::new,
+        (thr) -> setupSuspendBreakpointFor(staticCalledMethod, staticFunctionLoc, thr),
+        SuspendEvents::clearSuspendMethodEvent);
+
+    System.out.println("Test force-return of void function fails!");
+    final Method voidFunction = BadForceVoidObject.class.getDeclaredMethod("incrCnt");
+    final int voidLine = Breakpoint.locationToLine(voidFunction, 0) + 2;
+    final long voidLoc = Breakpoint.lineToLocation(voidFunction, voidLine);
+    runTestOn(
+        BadForceVoidObject::new,
+        (thr) -> setupSuspendBreakpointFor(voidFunction, voidLoc, thr),
+        SuspendEvents::clearSuspendMethodEvent);
+
+    System.out.println("Test force-return of int function fails!");
+    final Method intFunction = BadForceIntObject.class.getDeclaredMethod("incrCnt");
+    final int intLine = Breakpoint.locationToLine(intFunction, 0) + 2;
+    final long intLoc = Breakpoint.lineToLocation(intFunction, intLine);
+    runTestOn(
+        BadForceIntObject::new,
+        (thr) -> setupSuspendBreakpointFor(intFunction, intLoc, thr),
+        SuspendEvents::clearSuspendMethodEvent);
+  }
+}
diff --git a/test/1969-force-early-return-void/check b/test/1969-force-early-return-void/check
new file mode 100755
index 0000000..d552272
--- /dev/null
+++ b/test/1969-force-early-return-void/check
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The RI has restrictions and bugs around some PopFrame behavior that ART lacks.
+# See b/116003018. Some configurations cannot handle the class load events in
+# quite the right way so they are disabled there too.
+./default-check "$@" || \
+  (patch -p0 expected.txt < class-loading-expected.patch >/dev/null && ./default-check "$@")
diff --git a/test/1969-force-early-return-void/class-loading-expected.patch b/test/1969-force-early-return-void/class-loading-expected.patch
new file mode 100644
index 0000000..5e13595
--- /dev/null
+++ b/test/1969-force-early-return-void/class-loading-expected.patch
@@ -0,0 +1,35 @@
+178a179,212
+> Test stopped during class-load.
+> NORMAL RUN: Single call with no interference on (ID: 46) ClassLoadObject { cnt: 0, curClass: 0}
+> TC0.foo == 100
+> NORMAL RUN: result for (ID: 46) ClassLoadObject { cnt: 1, curClass: 1} on Test1969 target thread - 46
+> Single call with force-early-return on (ID: 47) ClassLoadObject { cnt: 0, curClass: 1}
+> Will force return of Test1969 target thread - 47
+> Failed to force-return due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+> 	art.NonStandardExit.forceEarlyReturnVoid(Native Method)
+> 	art.Test1969$TestSuspender.performForceReturn(Test1969.java)
+> 	art.Test1969.runTestOn(Test1969.java)
+> 	art.Test1969.runTestOn(Test1969.java)
+> 	art.Test1969.runTestOn(Test1969.java)
+> 	art.Test1969.runTests(Test1969.java)
+> 	<Additional frames hidden>
+> 
+> TC1.foo == 201
+> result for (ID: 47) ClassLoadObject { cnt: 1, curClass: 2} on Test1969 target thread - 47
+> Test stopped during class-load.
+> NORMAL RUN: Single call with no interference on (ID: 48) ClassLoadObject { cnt: 0, curClass: 2}
+> TC2.foo == 302
+> NORMAL RUN: result for (ID: 48) ClassLoadObject { cnt: 1, curClass: 3} on Test1969 target thread - 48
+> Single call with force-early-return on (ID: 49) ClassLoadObject { cnt: 0, curClass: 3}
+> Will force return of Test1969 target thread - 49
+> Failed to force-return due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+> 	art.NonStandardExit.forceEarlyReturnVoid(Native Method)
+> 	art.Test1969$TestSuspender.performForceReturn(Test1969.java)
+> 	art.Test1969.runTestOn(Test1969.java)
+> 	art.Test1969.runTestOn(Test1969.java)
+> 	art.Test1969.runTestOn(Test1969.java)
+> 	art.Test1969.runTests(Test1969.java)
+> 	<Additional frames hidden>
+> 
+> TC3.foo == 403
+> result for (ID: 49) ClassLoadObject { cnt: 1, curClass: 4} on Test1969 target thread - 49
diff --git a/test/1969-force-early-return-void/expected.txt b/test/1969-force-early-return-void/expected.txt
new file mode 100644
index 0000000..fc685b4
--- /dev/null
+++ b/test/1969-force-early-return-void/expected.txt
@@ -0,0 +1,178 @@
+Test stopped using breakpoint
+NORMAL RUN: Single call with no interference on (ID: 0) StandardTestObject { cnt: 0 }
+NORMAL RUN: result for (ID: 0) StandardTestObject { cnt: 2 } on Test1969 target thread - 0
+Single call with force-early-return on (ID: 1) StandardTestObject { cnt: 0 }
+Will force return of Test1969 target thread - 1
+result for (ID: 1) StandardTestObject { cnt: 1 } on Test1969 target thread - 1
+Test stopped using breakpoint with declared synchronized function
+NORMAL RUN: Single call with no interference on (ID: 2) SynchronizedFunctionTestObject { cnt: 0 }
+NORMAL RUN: result for (ID: 2) SynchronizedFunctionTestObject { cnt: 2 } on Test1969 target thread - 2
+Single call with force-early-return on (ID: 3) SynchronizedFunctionTestObject { cnt: 0 }
+Will force return of Test1969 target thread - 3
+result for (ID: 3) SynchronizedFunctionTestObject { cnt: 1 } on Test1969 target thread - 3
+Test stopped using breakpoint with synchronized block
+NORMAL RUN: Single call with no interference on (ID: 4) SynchronizedTestObject { cnt: 0 }
+NORMAL RUN: result for (ID: 4) SynchronizedTestObject { cnt: 2 } on Test1969 target thread - 4
+Single call with force-early-return on (ID: 5) SynchronizedTestObject { cnt: 0 }
+Will force return of Test1969 target thread - 5
+result for (ID: 5) SynchronizedTestObject { cnt: 1 } on Test1969 target thread - 5
+Test stopped on single step
+NORMAL RUN: Single call with no interference on (ID: 6) StandardTestObject { cnt: 0 }
+NORMAL RUN: result for (ID: 6) StandardTestObject { cnt: 2 } on Test1969 target thread - 6
+Single call with force-early-return on (ID: 7) StandardTestObject { cnt: 0 }
+Will force return of Test1969 target thread - 7
+result for (ID: 7) StandardTestObject { cnt: 1 } on Test1969 target thread - 7
+Test stopped on field access
+NORMAL RUN: Single call with no interference on (ID: 8) FieldBasedTestObject { TARGET_FIELD: 0, cnt: 0 }
+NORMAL RUN: result for (ID: 8) FieldBasedTestObject { TARGET_FIELD: 10, cnt: 2 } on Test1969 target thread - 8
+Single call with force-early-return on (ID: 9) FieldBasedTestObject { TARGET_FIELD: 0, cnt: 0 }
+Will force return of Test1969 target thread - 9
+result for (ID: 9) FieldBasedTestObject { TARGET_FIELD: 0, cnt: 1 } on Test1969 target thread - 9
+Test stopped on field modification
+NORMAL RUN: Single call with no interference on (ID: 10) FieldBasedTestObject { TARGET_FIELD: 0, cnt: 0 }
+NORMAL RUN: result for (ID: 10) FieldBasedTestObject { TARGET_FIELD: 10, cnt: 2 } on Test1969 target thread - 10
+Single call with force-early-return on (ID: 11) FieldBasedTestObject { TARGET_FIELD: 0, cnt: 0 }
+Will force return of Test1969 target thread - 11
+result for (ID: 11) FieldBasedTestObject { TARGET_FIELD: 0, cnt: 1 } on Test1969 target thread - 11
+Test stopped during Method Exit of calledFunction
+NORMAL RUN: Single call with no interference on (ID: 12) StandardTestObject { cnt: 0 }
+NORMAL RUN: result for (ID: 12) StandardTestObject { cnt: 2 } on Test1969 target thread - 12
+Single call with force-early-return on (ID: 13) StandardTestObject { cnt: 0 }
+Will force return of Test1969 target thread - 13
+result for (ID: 13) StandardTestObject { cnt: 2 } on Test1969 target thread - 13
+Test stopped during Method Enter of calledFunction
+NORMAL RUN: Single call with no interference on (ID: 14) StandardTestObject { cnt: 0 }
+NORMAL RUN: result for (ID: 14) StandardTestObject { cnt: 2 } on Test1969 target thread - 14
+Single call with force-early-return on (ID: 15) StandardTestObject { cnt: 0 }
+Will force return of Test1969 target thread - 15
+result for (ID: 15) StandardTestObject { cnt: 0 } on Test1969 target thread - 15
+Test stopped during Method Exit due to exception thrown in same function
+NORMAL RUN: Single call with no interference on (ID: 16) ExceptionOnceObject { cnt: 0, throwInSub: false }
+Uncaught exception in thread Thread[Test1969 target thread - 16,5,main] - art.Test1969$ExceptionOnceObject$TestError: null
+	art.Test1969$ExceptionOnceObject.calledFunction(Test1969.java)
+	art.Test1969$AbstractTestObject.run(Test1969.java)
+	art.Test1969$2.run(Test1969.java)
+	java.lang.Thread.run(Thread.java)
+
+NORMAL RUN: result for (ID: 16) ExceptionOnceObject { cnt: 1, throwInSub: false } on Test1969 target thread - 16
+Single call with force-early-return on (ID: 17) ExceptionOnceObject { cnt: 0, throwInSub: false }
+Will force return of Test1969 target thread - 17
+result for (ID: 17) ExceptionOnceObject { cnt: 1, throwInSub: false } on Test1969 target thread - 17
+Test stopped during Method Exit due to exception thrown in subroutine
+NORMAL RUN: Single call with no interference on (ID: 18) ExceptionOnceObject { cnt: 0, throwInSub: true }
+Uncaught exception in thread Thread[Test1969 target thread - 18,5,main] - art.Test1969$ExceptionOnceObject$TestError: null
+	art.Test1969$ExceptionOnceObject.doThrow(Test1969.java)
+	art.Test1969$ExceptionOnceObject.calledFunction(Test1969.java)
+	art.Test1969$AbstractTestObject.run(Test1969.java)
+	art.Test1969$2.run(Test1969.java)
+	java.lang.Thread.run(Thread.java)
+
+NORMAL RUN: result for (ID: 18) ExceptionOnceObject { cnt: 1, throwInSub: true } on Test1969 target thread - 18
+Single call with force-early-return on (ID: 19) ExceptionOnceObject { cnt: 0, throwInSub: true }
+Will force return of Test1969 target thread - 19
+result for (ID: 19) ExceptionOnceObject { cnt: 1, throwInSub: true } on Test1969 target thread - 19
+Test stopped during notifyFramePop with exception on pop of calledFunction
+NORMAL RUN: Single call with no interference on (ID: 20) ExceptionThrowTestObject { cnt: 0, baseCnt: 0 }
+art.Test1969$ExceptionThrowTestObject$TestError thrown and caught!
+NORMAL RUN: result for (ID: 20) ExceptionThrowTestObject { cnt: 2, baseCnt: 2 } on Test1969 target thread - 20
+Single call with force-early-return on (ID: 21) ExceptionThrowTestObject { cnt: 0, baseCnt: 0 }
+Will force return of Test1969 target thread - 21
+result for (ID: 21) ExceptionThrowTestObject { cnt: 2, baseCnt: 2 } on Test1969 target thread - 21
+Test stopped during notifyFramePop with exception on pop of doThrow
+NORMAL RUN: Single call with no interference on (ID: 22) ExceptionCatchTestObject { cnt: 0 }
+art.Test1969$ExceptionCatchTestObject$TestError caught in called function.
+NORMAL RUN: result for (ID: 22) ExceptionCatchTestObject { cnt: 2 } on Test1969 target thread - 22
+Single call with force-early-return on (ID: 23) ExceptionCatchTestObject { cnt: 0 }
+Will force return of Test1969 target thread - 23
+Failed to force-return due to java.lang.RuntimeException: JVMTI_ERROR_TYPE_MISMATCH
+	art.NonStandardExit.forceEarlyReturnVoid(Native Method)
+	art.Test1969$TestSuspender.performForceReturn(Test1969.java)
+	art.Test1969.runTestOn(Test1969.java)
+	art.Test1969.runTestOn(Test1969.java)
+	art.Test1969.runTestOn(Test1969.java)
+	art.Test1969.runTests(Test1969.java)
+	<Additional frames hidden>
+
+art.Test1969$ExceptionCatchTestObject$TestError caught in called function.
+result for (ID: 23) ExceptionCatchTestObject { cnt: 2 } on Test1969 target thread - 23
+Test stopped during ExceptionCatch event of calledFunction (catch in called function, throw in called function)
+NORMAL RUN: Single call with no interference on (ID: 24) ExceptionThrowTestObject { cnt: 0, baseCnt: 0 }
+art.Test1969$ExceptionThrowTestObject$TestError caught in same function.
+NORMAL RUN: result for (ID: 24) ExceptionThrowTestObject { cnt: 111, baseCnt: 2 } on Test1969 target thread - 24
+Single call with force-early-return on (ID: 25) ExceptionThrowTestObject { cnt: 0, baseCnt: 0 }
+Will force return of Test1969 target thread - 25
+result for (ID: 25) ExceptionThrowTestObject { cnt: 11, baseCnt: 2 } on Test1969 target thread - 25
+Test stopped during ExceptionCatch event of calledFunction (catch in called function, throw in subroutine)
+NORMAL RUN: Single call with no interference on (ID: 26) ExceptionCatchTestObject { cnt: 0 }
+art.Test1969$ExceptionCatchTestObject$TestError caught in called function.
+NORMAL RUN: result for (ID: 26) ExceptionCatchTestObject { cnt: 2 } on Test1969 target thread - 26
+Single call with force-early-return on (ID: 27) ExceptionCatchTestObject { cnt: 0 }
+Will force return of Test1969 target thread - 27
+result for (ID: 27) ExceptionCatchTestObject { cnt: 1 } on Test1969 target thread - 27
+Test stopped during Exception event of calledFunction (catch in calling function)
+NORMAL RUN: Single call with no interference on (ID: 28) ExceptionThrowTestObject { cnt: 0, baseCnt: 0 }
+art.Test1969$ExceptionThrowTestObject$TestError thrown and caught!
+NORMAL RUN: result for (ID: 28) ExceptionThrowTestObject { cnt: 2, baseCnt: 2 } on Test1969 target thread - 28
+Single call with force-early-return on (ID: 29) ExceptionThrowTestObject { cnt: 0, baseCnt: 0 }
+Will force return of Test1969 target thread - 29
+result for (ID: 29) ExceptionThrowTestObject { cnt: 2, baseCnt: 2 } on Test1969 target thread - 29
+Test stopped during Exception event of calledFunction (catch in called function)
+NORMAL RUN: Single call with no interference on (ID: 30) ExceptionThrowTestObject { cnt: 0, baseCnt: 0 }
+art.Test1969$ExceptionThrowTestObject$TestError caught in same function.
+NORMAL RUN: result for (ID: 30) ExceptionThrowTestObject { cnt: 111, baseCnt: 2 } on Test1969 target thread - 30
+Single call with force-early-return on (ID: 31) ExceptionThrowTestObject { cnt: 0, baseCnt: 0 }
+Will force return of Test1969 target thread - 31
+result for (ID: 31) ExceptionThrowTestObject { cnt: 11, baseCnt: 2 } on Test1969 target thread - 31
+Test stopped during Exception event of calledFunction (catch in parent of calling function)
+NORMAL RUN: Single call with no interference on (ID: 32) ExceptionThrowFarTestObject { cnt: 0, baseCnt: 0 }
+art.Test1969$ExceptionThrowFarTestObject$TestError thrown and caught!
+NORMAL RUN: result for (ID: 32) ExceptionThrowFarTestObject { cnt: 2, baseCnt: 2 } on Test1969 target thread - 32
+Single call with force-early-return on (ID: 33) ExceptionThrowFarTestObject { cnt: 0, baseCnt: 0 }
+Will force return of Test1969 target thread - 33
+result for (ID: 33) ExceptionThrowFarTestObject { cnt: 2, baseCnt: 2 } on Test1969 target thread - 33
+Test stopped during Exception event of calledFunction (catch in called function)
+NORMAL RUN: Single call with no interference on (ID: 34) ExceptionThrowFarTestObject { cnt: 0, baseCnt: 0 }
+art.Test1969$ExceptionThrowFarTestObject$TestError caught in same function.
+NORMAL RUN: result for (ID: 34) ExceptionThrowFarTestObject { cnt: 111, baseCnt: 2 } on Test1969 target thread - 34
+Single call with force-early-return on (ID: 35) ExceptionThrowFarTestObject { cnt: 0, baseCnt: 0 }
+Will force return of Test1969 target thread - 35
+result for (ID: 35) ExceptionThrowFarTestObject { cnt: 101, baseCnt: 2 } on Test1969 target thread - 35
+Test stopped during random Suspend.
+NORMAL RUN: Single call with no interference on (ID: 36) SuspendSuddenlyObject { cnt: 0, spun: false }
+NORMAL RUN: result for (ID: 36) SuspendSuddenlyObject { cnt: 2, spun: true } on Test1969 target thread - 36
+Single call with force-early-return on (ID: 37) SuspendSuddenlyObject { cnt: 0, spun: false }
+Will force return of Test1969 target thread - 37
+result for (ID: 37) SuspendSuddenlyObject { cnt: 1, spun: true } on Test1969 target thread - 37
+Test stopped during a native method fails
+NORMAL RUN: Single call with no interference on (ID: 38) NativeCalledObject { cnt: 0 }
+NORMAL RUN: result for (ID: 38) NativeCalledObject { cnt: 2 } on Test1969 target thread - 38
+Single call with force-early-return on (ID: 39) NativeCalledObject { cnt: 0 }
+Will force return of Test1969 target thread - 39
+Failed to force-return due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+	art.NonStandardExit.forceEarlyReturnVoid(Native Method)
+	art.Test1969$TestSuspender.performForceReturn(Test1969.java)
+	art.Test1969.runTestOn(Test1969.java)
+	art.Test1969.runTestOn(Test1969.java)
+	art.Test1969.runTestOn(Test1969.java)
+	art.Test1969.runTests(Test1969.java)
+	<Additional frames hidden>
+
+result for (ID: 39) NativeCalledObject { cnt: 2 } on Test1969 target thread - 39
+Test stopped in a method called by native succeeds
+NORMAL RUN: Single call with no interference on (ID: 40) NativeCallerObject { cnt: 0 }
+NORMAL RUN: result for (ID: 40) NativeCallerObject { cnt: 2 } on Test1969 target thread - 40
+Single call with force-early-return on (ID: 41) NativeCallerObject { cnt: 0 }
+Will force return of Test1969 target thread - 41
+result for (ID: 41) NativeCallerObject { cnt: 2 } on Test1969 target thread - 41
+Test stopped in a static method
+NORMAL RUN: Single call with no interference on (ID: 42) StaticMethodObject { cnt: 0 }
+NORMAL RUN: result for (ID: 42) StaticMethodObject { cnt: 2 } on Test1969 target thread - 42
+Single call with force-early-return on (ID: 43) StaticMethodObject { cnt: 0 }
+Will force return of Test1969 target thread - 43
+result for (ID: 43) StaticMethodObject { cnt: 1 } on Test1969 target thread - 43
+Test stopped in a Object <init> method
+NORMAL RUN: Single call with no interference on (ID: 44) ObjectInitTestObject { cnt: 0 }
+NORMAL RUN: result for (ID: 44) ObjectInitTestObject { cnt: 2 } on Test1969 target thread - 44
+Single call with force-early-return on (ID: 45) ObjectInitTestObject { cnt: 0 }
+Will force return of Test1969 target thread - 45
+result for (ID: 45) ObjectInitTestObject { cnt: 1 } on Test1969 target thread - 45
diff --git a/test/1969-force-early-return-void/force_early_return_void.cc b/test/1969-force-early-return-void/force_early_return_void.cc
new file mode 100644
index 0000000..2935362
--- /dev/null
+++ b/test/1969-force-early-return-void/force_early_return_void.cc
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+
+#include <cstdio>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "android-base/logging.h"
+#include "android-base/stringprintf.h"
+
+#include "jni.h"
+#include "jvmti.h"
+#include "scoped_local_ref.h"
+#include "scoped_utf_chars.h"
+
+// Test infrastructure
+#include "jni_binder.h"
+#include "jni_helper.h"
+#include "jvmti_helper.h"
+#include "test_env.h"
+#include "ti_macros.h"
+
+#include "suspend_event_helper.h"
+
+namespace art {
+namespace Test1969ForceEarlyReturnVoid {
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1969_00024NativeCalledObject_calledFunction(
+    JNIEnv* env, jobject thiz) {
+  jclass klass = env->GetObjectClass(thiz);
+  jfieldID cnt = env->GetFieldID(klass, "cnt", "I");
+  env->SetIntField(thiz, cnt, env->GetIntField(thiz, cnt) + 1);
+  env->SetIntField(thiz, cnt, env->GetIntField(thiz, cnt) + 1);
+  void *data;
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->GetThreadLocalStorage(/* thread */ nullptr,
+                                                             reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  if (data != nullptr) {
+    art::common_suspend_event::PerformSuspension(jvmti_env, env);
+  }
+  return;
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1969_00024NativeCallerObject_run(
+    JNIEnv* env, jobject thiz) {
+  env->PushLocalFrame(1);
+  jclass klass = env->GetObjectClass(thiz);
+  jmethodID called = env->GetMethodID(klass, "calledFunction", "()V");
+  env->CallVoidMethod(thiz, called);
+  env->PopLocalFrame(nullptr);
+}
+
+extern "C" JNIEXPORT
+jboolean JNICALL Java_art_Test1969_isClassLoaded(JNIEnv* env, jclass, jstring name) {
+  ScopedUtfChars chr(env, name);
+  if (env->ExceptionCheck()) {
+    return false;
+  }
+  jint cnt = 0;
+  jclass* klasses = nullptr;
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->GetLoadedClasses(&cnt, &klasses))) {
+    return false;
+  }
+  bool res = false;
+  for (jint i = 0; !res && i < cnt; i++) {
+    char* sig;
+    if (JvmtiErrorToException(env,
+                              jvmti_env,
+                              jvmti_env->GetClassSignature(klasses[i], &sig, nullptr))) {
+      return false;
+    }
+    res = (strcmp(sig, chr.c_str()) == 0);
+    jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(sig));
+  }
+  jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(klasses));
+  return res;
+}
+
+}  // namespace Test1969ForceEarlyReturnVoid
+}  // namespace art
+
diff --git a/test/1969-force-early-return-void/info.txt b/test/1969-force-early-return-void/info.txt
new file mode 100644
index 0000000..19fdb1a
--- /dev/null
+++ b/test/1969-force-early-return-void/info.txt
@@ -0,0 +1,4 @@
+Test JVMTI ForceEarlyReturnVoid functionality
+
+Checks that we can call the ForceEarlyReturn functions successfully and force
+returns of objects. It also checks some of the basic error modes.
diff --git a/test/1969-force-early-return-void/run b/test/1969-force-early-return-void/run
new file mode 100755
index 0000000..e92b873
--- /dev/null
+++ b/test/1969-force-early-return-void/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/1969-force-early-return-void/src/Main.java b/test/1969-force-early-return-void/src/Main.java
new file mode 100644
index 0000000..e37c910
--- /dev/null
+++ b/test/1969-force-early-return-void/src/Main.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
+ * in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+import java.util.Arrays;
+import java.util.List;
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1969.run(!Arrays.asList(args).contains("DISABLE_CLASS_LOAD_TESTS"));
+  }
+}
diff --git a/test/1969-force-early-return-void/src/art/Breakpoint.java b/test/1969-force-early-return-void/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1969-force-early-return-void/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1969-force-early-return-void/src/art/NonStandardExit.java b/test/1969-force-early-return-void/src/art/NonStandardExit.java
new file mode 120000
index 0000000..d542a3c
--- /dev/null
+++ b/test/1969-force-early-return-void/src/art/NonStandardExit.java
@@ -0,0 +1 @@
+../../../jvmti-common/NonStandardExit.java
\ No newline at end of file
diff --git a/test/1969-force-early-return-void/src/art/StackTrace.java b/test/1969-force-early-return-void/src/art/StackTrace.java
new file mode 120000
index 0000000..e1a08aa
--- /dev/null
+++ b/test/1969-force-early-return-void/src/art/StackTrace.java
@@ -0,0 +1 @@
+../../../jvmti-common/StackTrace.java
\ No newline at end of file
diff --git a/test/1969-force-early-return-void/src/art/SuspendEvents.java b/test/1969-force-early-return-void/src/art/SuspendEvents.java
new file mode 120000
index 0000000..f7a5f7e
--- /dev/null
+++ b/test/1969-force-early-return-void/src/art/SuspendEvents.java
@@ -0,0 +1 @@
+../../../jvmti-common/SuspendEvents.java
\ No newline at end of file
diff --git a/test/1969-force-early-return-void/src/art/Suspension.java b/test/1969-force-early-return-void/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1969-force-early-return-void/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1969-force-early-return-void/src/art/Test1969.java b/test/1969-force-early-return-void/src/art/Test1969.java
new file mode 100644
index 0000000..898da27
--- /dev/null
+++ b/test/1969-force-early-return-void/src/art/Test1969.java
@@ -0,0 +1,973 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
+ * in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package art;
+
+import static art.SuspendEvents.EVENT_TYPE_CLASS_LOAD;
+import static art.SuspendEvents.setupFieldSuspendFor;
+import static art.SuspendEvents.setupSuspendBreakpointFor;
+import static art.SuspendEvents.setupSuspendClassEvent;
+import static art.SuspendEvents.setupSuspendExceptionEvent;
+import static art.SuspendEvents.setupSuspendMethodEvent;
+import static art.SuspendEvents.setupSuspendPopFrameEvent;
+import static art.SuspendEvents.setupSuspendSingleStepAt;
+import static art.SuspendEvents.setupTest;
+import static art.SuspendEvents.waitForSuspendHit;
+
+import java.io.*;
+import java.lang.reflect.Executable;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.util.concurrent.CountDownLatch;
+import java.util.function.Consumer;
+import java.util.function.Supplier;
+
+public class Test1969 {
+  public static final boolean PRINT_STACK_TRACE = false;
+
+  public final boolean canRunClassLoadTests;
+
+  public static void doNothing() {}
+
+  public static interface TestSuspender {
+    public void setupForceReturnRun(Thread thr);
+
+    public void waitForSuspend(Thread thr);
+
+    public void cleanup(Thread thr);
+
+    public default void performForceReturn(Thread thr) {
+      System.out.println("Will force return of " + thr.getName());
+      NonStandardExit.forceEarlyReturnVoid(thr);
+    }
+
+    public default void setupNormalRun(Thread thr) {}
+  }
+
+  public static interface ThreadRunnable {
+    public void run(Thread thr);
+  }
+
+  public static TestSuspender makeSuspend(final ThreadRunnable setup, final ThreadRunnable clean) {
+    return new TestSuspender() {
+      public void setupForceReturnRun(Thread thr) {
+        setup.run(thr);
+      }
+
+      public void waitForSuspend(Thread thr) {
+        waitForSuspendHit(thr);
+      }
+
+      public void cleanup(Thread thr) {
+        clean.run(thr);
+      }
+    };
+  }
+
+  public void runTestOn(Supplier<Runnable> testObj, ThreadRunnable su, ThreadRunnable cl)
+      throws Exception {
+    runTestOn(testObj, makeSuspend(su, cl));
+  }
+
+  private static void SafePrintStackTrace(StackTraceElement st[]) {
+    System.out.println(safeDumpStackTrace(st, "\t"));
+  }
+
+  private static String safeDumpStackTrace(StackTraceElement st[], String prefix) {
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintStream os = new PrintStream(baos);
+    for (StackTraceElement e : st) {
+      os.println(
+          prefix
+              + e.getClassName()
+              + "."
+              + e.getMethodName()
+              + "("
+              + (e.isNativeMethod() ? "Native Method" : e.getFileName())
+              + ")");
+      if (e.getClassName().equals("art.Test1969") && e.getMethodName().equals("runTests")) {
+        os.println(prefix + "<Additional frames hidden>");
+        break;
+      }
+    }
+    os.flush();
+    return baos.toString();
+  }
+
+  static long ID_COUNTER = 0;
+
+  public Runnable Id(final Runnable tr) {
+    final long my_id = ID_COUNTER++;
+    return new Runnable() {
+      public void run() {
+        tr.run();
+      }
+
+      public String toString() {
+        return "(ID: " + my_id + ") " + tr.toString();
+      }
+    };
+  }
+
+  public static long THREAD_COUNT = 0;
+
+  public Thread mkThread(Runnable r) {
+    Thread t = new Thread(r, "Test1969 target thread - " + THREAD_COUNT++);
+    t.setUncaughtExceptionHandler(
+        (thr, e) -> {
+          System.out.println(
+              "Uncaught exception in thread "
+                  + thr
+                  + " - "
+                  + e.getClass().getName()
+                  + ": "
+                  + e.getLocalizedMessage());
+          SafePrintStackTrace(e.getStackTrace());
+        });
+    return t;
+  }
+
+  final class TestConfig {
+    public final Runnable testObj;
+    public final TestSuspender suspender;
+
+    public TestConfig(Runnable obj, TestSuspender su) {
+      this.testObj = obj;
+      this.suspender = su;
+    }
+  }
+
+  public void runTestOn(Supplier<Runnable> testObjGen, TestSuspender su) throws Exception {
+    runTestOn(() -> new TestConfig(testObjGen.get(), su));
+  }
+
+  public void runTestOn(Supplier<TestConfig> config) throws Exception {
+    TestConfig normal_config = config.get();
+    Runnable normal_run = Id(normal_config.testObj);
+    try {
+      System.out.println("NORMAL RUN: Single call with no interference on " + normal_run);
+      Thread normal_thread = mkThread(normal_run);
+      normal_config.suspender.setupNormalRun(normal_thread);
+      normal_thread.start();
+      normal_thread.join();
+      System.out.println("NORMAL RUN: result for " + normal_run + " on " + normal_thread.getName());
+    } catch (Exception e) {
+      System.out.println("NORMAL RUN: Ended with exception for " + normal_run + "!");
+      e.printStackTrace(System.out);
+    }
+
+    TestConfig force_return_config = config.get();
+    Runnable testObj = Id(force_return_config.testObj);
+    TestSuspender su = force_return_config.suspender;
+    System.out.println("Single call with force-early-return on " + testObj);
+    final CountDownLatch continue_latch = new CountDownLatch(1);
+    final CountDownLatch startup_latch = new CountDownLatch(1);
+    Runnable await =
+        () -> {
+          try {
+            startup_latch.countDown();
+            continue_latch.await();
+          } catch (Exception e) {
+            throw new Error("Failed to await latch", e);
+          }
+        };
+    Thread thr =
+        mkThread(
+            () -> {
+              await.run();
+              testObj.run();
+            });
+    thr.start();
+
+    // Wait until the other thread is started.
+    startup_latch.await();
+
+    // Setup suspension method on the thread.
+    su.setupForceReturnRun(thr);
+
+    // Let the other thread go.
+    continue_latch.countDown();
+
+    // Wait for the other thread to hit the breakpoint/watchpoint/whatever and
+    // suspend itself
+    // (without re-entering java)
+    su.waitForSuspend(thr);
+
+    // Cleanup the breakpoint/watchpoint/etc.
+    su.cleanup(thr);
+
+    try {
+      // Pop the frame.
+      su.performForceReturn(thr);
+    } catch (Exception e) {
+      System.out.println("Failed to force-return due to " + e);
+      SafePrintStackTrace(e.getStackTrace());
+    }
+
+    // Start the other thread going again.
+    Suspension.resume(thr);
+
+    // Wait for the other thread to finish.
+    thr.join();
+
+    // See how many times calledFunction was called.
+    System.out.println("result for " + testObj + " on " + thr.getName());
+  }
+
+  public abstract static class AbstractTestObject implements Runnable {
+    public AbstractTestObject() {}
+
+    public void run() {
+      // This function should be force-early-returned.
+      calledFunction();
+    }
+
+    public abstract void calledFunction();
+  }
+
+  public static class FieldBasedTestObject extends AbstractTestObject implements Runnable {
+    public int TARGET_FIELD;
+    public int cnt = 0;
+
+    public FieldBasedTestObject() {
+      super();
+      TARGET_FIELD = 0;
+    }
+
+    public void calledFunction() {
+      cnt++;
+      // We put a watchpoint here and force-early-return when we are at it.
+      TARGET_FIELD += 10;
+      cnt++;
+    }
+
+    public String toString() {
+      return "FieldBasedTestObject { TARGET_FIELD: " + TARGET_FIELD + ", cnt: " + cnt + " }";
+    }
+  }
+
+  public static class StandardTestObject extends AbstractTestObject implements Runnable {
+    public int cnt;
+
+    public StandardTestObject() {
+      super();
+      cnt = 0;
+    }
+
+    public void calledFunction() {
+      cnt++; // line +0
+      // We put a breakpoint here and force-early-return when we are at it.
+      doNothing(); // line +2
+      cnt++; // line +3
+      return; // line +4
+    }
+
+    public String toString() {
+      return "StandardTestObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class SynchronizedFunctionTestObject extends AbstractTestObject
+      implements Runnable {
+    public int cnt;
+
+    public SynchronizedFunctionTestObject() {
+      super();
+      cnt = 0;
+    }
+
+    public synchronized void calledFunction() {
+      cnt++; // line +0
+      // We put a breakpoint here and PopFrame when we are at it.
+      doNothing(); // line +2
+      cnt++; // line +3
+      return;
+    }
+
+    public String toString() {
+      return "SynchronizedFunctionTestObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class SynchronizedTestObject extends AbstractTestObject implements Runnable {
+    public final Object lock;
+    public int cnt;
+
+    public SynchronizedTestObject() {
+      super();
+      lock = new Object();
+      cnt = 0;
+    }
+
+    public void calledFunction() {
+      synchronized (lock) { // line +0
+        cnt++; // line +1
+        // We put a breakpoint here and PopFrame when we are at it.
+        doNothing(); // line +3
+        cnt++; // line +4
+        return; // line +5
+      }
+    }
+
+    public String toString() {
+      return "SynchronizedTestObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class ExceptionCatchTestObject extends AbstractTestObject implements Runnable {
+    public static class TestError extends Error {}
+
+    public int cnt;
+
+    public ExceptionCatchTestObject() {
+      super();
+      cnt = 0;
+    }
+
+    public void calledFunction() {
+      cnt++;
+      try {
+        doThrow();
+        cnt += 100;
+      } catch (TestError e) {
+        System.out.println(e.getClass().getName() + " caught in called function.");
+        cnt++;
+      }
+      return;
+    }
+
+    public Object doThrow() {
+      throw new TestError();
+    }
+
+    public String toString() {
+      return "ExceptionCatchTestObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class ExceptionThrowFarTestObject implements Runnable {
+    public static class TestError extends Error {}
+
+    public int cnt;
+    public int baseCallCnt;
+    public final boolean catchInCalled;
+
+    public ExceptionThrowFarTestObject(boolean catchInCalled) {
+      super();
+      cnt = 0;
+      baseCallCnt = 0;
+      this.catchInCalled = catchInCalled;
+    }
+
+    public void run() {
+      baseCallCnt++;
+      try {
+        callingFunction();
+      } catch (TestError e) {
+        System.out.println(e.getClass().getName() + " thrown and caught!");
+      }
+      baseCallCnt++;
+    }
+
+    public void callingFunction() {
+      calledFunction();
+    }
+
+    public void calledFunction() {
+      cnt++;
+      if (catchInCalled) {
+        try {
+          cnt += 100;
+          throw new TestError(); // We put a watch here.
+        } catch (TestError e) {
+          System.out.println(e.getClass().getName() + " caught in same function.");
+          doNothing();
+          cnt += 10;
+          return;
+        }
+      } else {
+        cnt++;
+        throw new TestError(); // We put a watch here.
+      }
+    }
+
+    public String toString() {
+      return "ExceptionThrowFarTestObject { cnt: " + cnt + ", baseCnt: " + baseCallCnt + " }";
+    }
+  }
+
+  public static class ExceptionOnceObject extends AbstractTestObject {
+    public static final class TestError extends Error {}
+
+    public int cnt;
+    public final boolean throwInSub;
+
+    public ExceptionOnceObject(boolean throwInSub) {
+      super();
+      cnt = 0;
+      this.throwInSub = throwInSub;
+    }
+
+    public void calledFunction() {
+      cnt++;
+      if (cnt == 1) {
+        if (throwInSub) {
+          doThrow();
+        } else {
+          throw new TestError();
+        }
+      }
+      return;
+    }
+
+    public void doThrow() {
+      throw new TestError();
+    }
+
+    public String toString() {
+      return "ExceptionOnceObject { cnt: " + cnt + ", throwInSub: " + throwInSub + " }";
+    }
+  }
+
+  public static class ExceptionThrowTestObject implements Runnable {
+    public static class TestError extends Error {}
+
+    public int cnt;
+    public int baseCallCnt;
+    public final boolean catchInCalled;
+
+    public ExceptionThrowTestObject(boolean catchInCalled) {
+      super();
+      cnt = 0;
+      baseCallCnt = 0;
+      this.catchInCalled = catchInCalled;
+    }
+
+    public void run() {
+      baseCallCnt++;
+      try {
+        calledFunction();
+      } catch (TestError e) {
+        System.out.println(e.getClass().getName() + " thrown and caught!");
+      }
+      baseCallCnt++;
+    }
+
+    public void calledFunction() {
+      cnt++;
+      if (catchInCalled) {
+        try {
+          cnt += 10;
+          throw new TestError(); // We put a watch here.
+        } catch (TestError e) {
+          System.out.println(e.getClass().getName() + " caught in same function.");
+          doNothing();
+          cnt += 100;
+          return;
+        }
+      } else {
+        cnt += 1;
+        throw new TestError(); // We put a watch here.
+      }
+    }
+
+    public String toString() {
+      return "ExceptionThrowTestObject { cnt: " + cnt + ", baseCnt: " + baseCallCnt + " }";
+    }
+  }
+
+  public static class NativeCalledObject extends AbstractTestObject {
+    public int cnt = 0;
+
+    public native void calledFunction();
+
+    public String toString() {
+      return "NativeCalledObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class NativeCallerObject implements Runnable {
+    public Object returnValue = null;
+    public int cnt = 0;
+
+    public Object getReturnValue() {
+      return returnValue;
+    }
+
+    public native void run();
+
+    public void calledFunction() {
+      cnt++;
+      // We will stop using a MethodExit event.
+      doNothing();
+      cnt++;
+      return;
+    }
+
+    public String toString() {
+      return "NativeCallerObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class ClassLoadObject implements Runnable {
+    public int cnt;
+
+    public static final String[] CLASS_NAMES =
+        new String[] {
+          "Lart/Test1969$ClassLoadObject$TC0;",
+          "Lart/Test1969$ClassLoadObject$TC1;",
+          "Lart/Test1969$ClassLoadObject$TC2;",
+          "Lart/Test1969$ClassLoadObject$TC3;",
+          "Lart/Test1969$ClassLoadObject$TC4;",
+          "Lart/Test1969$ClassLoadObject$TC5;",
+          "Lart/Test1969$ClassLoadObject$TC6;",
+          "Lart/Test1969$ClassLoadObject$TC7;",
+          "Lart/Test1969$ClassLoadObject$TC8;",
+          "Lart/Test1969$ClassLoadObject$TC9;",
+        };
+
+    private static int curClass = 0;
+
+    private static class TC0 { public static int foo; static { foo = 100 + curClass; } }
+
+    private static class TC1 { public static int foo; static { foo = 200 + curClass; } }
+
+    private static class TC2 { public static int foo; static { foo = 300 + curClass; } }
+
+    private static class TC3 { public static int foo; static { foo = 400 + curClass; } }
+
+    private static class TC4 { public static int foo; static { foo = 500 + curClass; } }
+
+    private static class TC5 { public static int foo; static { foo = 600 + curClass; } }
+
+    private static class TC6 { public static int foo; static { foo = 700 + curClass; } }
+
+    private static class TC7 { public static int foo; static { foo = 800 + curClass; } }
+
+    private static class TC8 { public static int foo; static { foo = 900 + curClass; } }
+
+    private static class TC9 { public static int foo; static { foo = 1000 + curClass; } }
+
+    public ClassLoadObject() {
+      super();
+      cnt = 0;
+    }
+
+    public void run() {
+      if (curClass == 0) {
+        calledFunction0();
+      } else if (curClass == 1) {
+        calledFunction1();
+      } else if (curClass == 2) {
+        calledFunction2();
+      } else if (curClass == 3) {
+        calledFunction3();
+      } else if (curClass == 4) {
+        calledFunction4();
+      } else if (curClass == 5) {
+        calledFunction5();
+      } else if (curClass == 6) {
+        calledFunction6();
+      } else if (curClass == 7) {
+        calledFunction7();
+      } else if (curClass == 8) {
+        calledFunction8();
+      } else if (curClass == 9) {
+        calledFunction9();
+      }
+      curClass++;
+    }
+
+    public void calledFunction0() {
+      cnt++;
+      System.out.println("TC0.foo == " + TC0.foo);
+    }
+
+    public void calledFunction1() {
+      cnt++;
+      System.out.println("TC1.foo == " + TC1.foo);
+    }
+
+    public void calledFunction2() {
+      cnt++;
+      System.out.println("TC2.foo == " + TC2.foo);
+    }
+
+    public void calledFunction3() {
+      cnt++;
+      System.out.println("TC3.foo == " + TC3.foo);
+    }
+
+    public void calledFunction4() {
+      cnt++;
+      System.out.println("TC4.foo == " + TC4.foo);
+    }
+
+    public void calledFunction5() {
+      cnt++;
+      System.out.println("TC5.foo == " + TC5.foo);
+    }
+
+    public void calledFunction6() {
+      cnt++;
+      System.out.println("TC6.foo == " + TC6.foo);
+    }
+
+    public void calledFunction7() {
+      cnt++;
+      System.out.println("TC7.foo == " + TC7.foo);
+    }
+
+    public void calledFunction8() {
+      cnt++;
+      System.out.println("TC8.foo == " + TC8.foo);
+    }
+
+    public void calledFunction9() {
+      cnt++;
+      System.out.println("TC9.foo == " + TC9.foo);
+    }
+
+    public String toString() {
+      return "ClassLoadObject { cnt: " + cnt + ", curClass: " + curClass + "}";
+    }
+  }
+
+  public static class ObjectInitTestObject implements Runnable {
+    // TODO How do we do this for <clinit>
+    public int cnt = 0;
+    public static final class ObjectInitTarget {
+      public ObjectInitTarget(Runnable r) {
+        super();  // line +0
+        r.run(); // line +1
+        // We set a breakpoint here and force-early-return
+        doNothing();  // line +3
+        r.run();  // line +4
+      }
+    }
+
+    public void run() {
+      new ObjectInitTarget(() -> cnt++);
+    }
+
+    public String toString() {
+      return "ObjectInitTestObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class SuspendSuddenlyObject extends AbstractTestObject {
+    public volatile boolean should_spin = true;
+    public volatile boolean is_spinning = false;
+    public int cnt = 0;
+
+    public void calledFunction() {
+      cnt++;
+      do {
+        is_spinning = true;
+      } while (should_spin);
+      cnt++;
+      return;
+    }
+
+    public String toString() {
+      return "SuspendSuddenlyObject { cnt: " + cnt + ", spun: " + is_spinning + " }";
+    }
+  }
+
+  public static final class StaticMethodObject implements Runnable {
+    public int cnt = 0;
+
+    public static void calledFunction(Runnable incr) {
+      incr.run();   // line +0
+      // We put a breakpoint here to force the return.
+      doNothing();  // line +2
+      incr.run();   // line +3
+      return;       // line +4
+    }
+
+    public void run() {
+      calledFunction(() -> cnt++);
+    }
+
+    public final String toString() {
+      return "StaticMethodObject { cnt: " + cnt + " }";
+    }
+  }
+
+  // Only used by CTS to run without class-load tests.
+  public static void run() throws Exception {
+    new Test1969(false).runTests();
+  }
+  public static void run(boolean canRunClassLoadTests) throws Exception {
+    new Test1969(canRunClassLoadTests).runTests();
+  }
+
+  public Test1969(boolean canRunClassLoadTests) {
+    this.canRunClassLoadTests = canRunClassLoadTests;
+  }
+
+  public static void no_runTestOn(Supplier<Object> a, ThreadRunnable b, ThreadRunnable c) {}
+
+  public void runTests() throws Exception {
+    setupTest();
+
+    final Method calledFunction = StandardTestObject.class.getDeclaredMethod("calledFunction");
+    // Add a breakpoint on the second line after the start of the function
+    final int line = Breakpoint.locationToLine(calledFunction, 0) + 2;
+    final long loc = Breakpoint.lineToLocation(calledFunction, line);
+    System.out.println("Test stopped using breakpoint");
+    runTestOn(
+        StandardTestObject::new,
+        (thr) -> setupSuspendBreakpointFor(calledFunction, loc, thr),
+        SuspendEvents::clearSuspendBreakpointFor);
+
+    final Method syncFunctionCalledFunction =
+        SynchronizedFunctionTestObject.class.getDeclaredMethod("calledFunction");
+    // Add a breakpoint on the second line after the start of the function
+    // Annoyingly r8 generally
+    // has the first instruction (a monitor enter) not be marked as being on any
+    // line but javac has
+    // it marked as being on the first line of the function. Just use the second
+    // entry on the
+    // line-number table to get the breakpoint. This should be good for both.
+    final long syncFunctionLoc =
+        Breakpoint.getLineNumberTable(syncFunctionCalledFunction)[1].location;
+    System.out.println("Test stopped using breakpoint with declared synchronized function");
+    runTestOn(
+        SynchronizedFunctionTestObject::new,
+        (thr) -> setupSuspendBreakpointFor(syncFunctionCalledFunction, syncFunctionLoc, thr),
+        SuspendEvents::clearSuspendBreakpointFor);
+
+    final Method syncCalledFunction =
+        SynchronizedTestObject.class.getDeclaredMethod("calledFunction");
+    // Add a breakpoint on the second line after the start of the function
+    final int syncLine = Breakpoint.locationToLine(syncCalledFunction, 0) + 3;
+    final long syncLoc = Breakpoint.lineToLocation(syncCalledFunction, syncLine);
+    System.out.println("Test stopped using breakpoint with synchronized block");
+    runTestOn(
+        SynchronizedTestObject::new,
+        (thr) -> setupSuspendBreakpointFor(syncCalledFunction, syncLoc, thr),
+        SuspendEvents::clearSuspendBreakpointFor);
+
+    System.out.println("Test stopped on single step");
+    runTestOn(
+        StandardTestObject::new,
+        (thr) -> setupSuspendSingleStepAt(calledFunction, loc, thr),
+        SuspendEvents::clearSuspendSingleStepFor);
+
+    final Field target_field = FieldBasedTestObject.class.getDeclaredField("TARGET_FIELD");
+    System.out.println("Test stopped on field access");
+    runTestOn(
+        FieldBasedTestObject::new,
+        (thr) -> setupFieldSuspendFor(FieldBasedTestObject.class, target_field, true, thr),
+        SuspendEvents::clearFieldSuspendFor);
+
+    System.out.println("Test stopped on field modification");
+    runTestOn(
+        FieldBasedTestObject::new,
+        (thr) -> setupFieldSuspendFor(FieldBasedTestObject.class, target_field, false, thr),
+        SuspendEvents::clearFieldSuspendFor);
+
+    System.out.println("Test stopped during Method Exit of calledFunction");
+    runTestOn(
+        StandardTestObject::new,
+        (thr) -> setupSuspendMethodEvent(calledFunction, /* enter */ false, thr),
+        SuspendEvents::clearSuspendMethodEvent);
+
+    System.out.println("Test stopped during Method Enter of calledFunction");
+    runTestOn(
+        StandardTestObject::new,
+        (thr) -> setupSuspendMethodEvent(calledFunction, /* enter */ true, thr),
+        SuspendEvents::clearSuspendMethodEvent);
+
+    final Method exceptionOnceCalledMethod =
+        ExceptionOnceObject.class.getDeclaredMethod("calledFunction");
+    System.out.println("Test stopped during Method Exit due to exception thrown in same function");
+    runTestOn(
+        () -> new ExceptionOnceObject(/* throwInSub */ false),
+        (thr) -> setupSuspendMethodEvent(exceptionOnceCalledMethod, /* enter */ false, thr),
+        SuspendEvents::clearSuspendMethodEvent);
+
+    System.out.println("Test stopped during Method Exit due to exception thrown in subroutine");
+    runTestOn(
+        () -> new ExceptionOnceObject(/* throwInSub */ true),
+        (thr) -> setupSuspendMethodEvent(exceptionOnceCalledMethod, /* enter */ false, thr),
+        SuspendEvents::clearSuspendMethodEvent);
+
+    final Method exceptionThrowCalledMethod =
+        ExceptionThrowTestObject.class.getDeclaredMethod("calledFunction");
+    System.out.println(
+        "Test stopped during notifyFramePop with exception on pop of calledFunction");
+    runTestOn(
+        () -> new ExceptionThrowTestObject(false),
+        (thr) -> setupSuspendPopFrameEvent(0, exceptionThrowCalledMethod, thr),
+        SuspendEvents::clearSuspendPopFrameEvent);
+
+    final Method exceptionCatchThrowMethod =
+        ExceptionCatchTestObject.class.getDeclaredMethod("doThrow");
+    System.out.println("Test stopped during notifyFramePop with exception on pop of doThrow");
+    runTestOn(
+        ExceptionCatchTestObject::new,
+        (thr) -> setupSuspendPopFrameEvent(0, exceptionCatchThrowMethod, thr),
+        SuspendEvents::clearSuspendPopFrameEvent);
+
+    System.out.println(
+        "Test stopped during ExceptionCatch event of calledFunction "
+            + "(catch in called function, throw in called function)");
+    runTestOn(
+        () -> new ExceptionThrowTestObject(true),
+        (thr) -> setupSuspendExceptionEvent(exceptionThrowCalledMethod, /* catch */ true, thr),
+        SuspendEvents::clearSuspendExceptionEvent);
+
+    final Method exceptionCatchCalledMethod =
+        ExceptionCatchTestObject.class.getDeclaredMethod("calledFunction");
+    System.out.println(
+        "Test stopped during ExceptionCatch event of calledFunction "
+            + "(catch in called function, throw in subroutine)");
+    runTestOn(
+        ExceptionCatchTestObject::new,
+        (thr) -> setupSuspendExceptionEvent(exceptionCatchCalledMethod, /* catch */ true, thr),
+        SuspendEvents::clearSuspendExceptionEvent);
+
+    System.out.println(
+        "Test stopped during Exception event of calledFunction " + "(catch in calling function)");
+    runTestOn(
+        () -> new ExceptionThrowTestObject(false),
+        (thr) -> setupSuspendExceptionEvent(exceptionThrowCalledMethod, /* catch */ false, thr),
+        SuspendEvents::clearSuspendExceptionEvent);
+
+    System.out.println(
+        "Test stopped during Exception event of calledFunction (catch in called function)");
+    runTestOn(
+        () -> new ExceptionThrowTestObject(true),
+        (thr) -> setupSuspendExceptionEvent(exceptionThrowCalledMethod, /* catch */ false, thr),
+        SuspendEvents::clearSuspendExceptionEvent);
+
+    final Method exceptionThrowFarCalledMethod =
+        ExceptionThrowFarTestObject.class.getDeclaredMethod("calledFunction");
+    System.out.println(
+        "Test stopped during Exception event of calledFunction "
+            + "(catch in parent of calling function)");
+    runTestOn(
+        () -> new ExceptionThrowFarTestObject(false),
+        (thr) -> setupSuspendExceptionEvent(exceptionThrowFarCalledMethod, /* catch */ false, thr),
+        SuspendEvents::clearSuspendExceptionEvent);
+
+    System.out.println(
+        "Test stopped during Exception event of calledFunction " + "(catch in called function)");
+    runTestOn(
+        () -> new ExceptionThrowFarTestObject(true),
+        (thr) -> setupSuspendExceptionEvent(exceptionThrowFarCalledMethod, /* catch */ false, thr),
+        SuspendEvents::clearSuspendExceptionEvent);
+
+    System.out.println("Test stopped during random Suspend.");
+    runTestOn(
+        () -> {
+          final SuspendSuddenlyObject sso = new SuspendSuddenlyObject();
+          return new TestConfig(
+              sso,
+              new TestSuspender() {
+                public void setupForceReturnRun(Thread thr) {}
+
+                public void setupNormalRun(Thread thr) {
+                  sso.should_spin = false;
+                }
+
+                public void waitForSuspend(Thread thr) {
+                  while (!sso.is_spinning) {}
+                  Suspension.suspend(thr);
+                }
+
+                public void cleanup(Thread thr) {}
+              });
+        });
+
+    System.out.println("Test stopped during a native method fails");
+    runTestOn(
+        NativeCalledObject::new,
+        SuspendEvents::setupWaitForNativeCall,
+        SuspendEvents::clearWaitForNativeCall);
+
+    System.out.println("Test stopped in a method called by native succeeds");
+    final Method nativeCallerMethod = NativeCallerObject.class.getDeclaredMethod("calledFunction");
+    runTestOn(
+        NativeCallerObject::new,
+        (thr) -> setupSuspendMethodEvent(nativeCallerMethod, /* enter */ false, thr),
+        SuspendEvents::clearSuspendMethodEvent);
+
+
+    System.out.println("Test stopped in a static method");
+    final Method staticCalledMethod = StaticMethodObject.class.getDeclaredMethod("calledFunction", Runnable.class);
+    final int staticFunctionLine= Breakpoint.locationToLine(staticCalledMethod, 0) + 2;
+    final long staticFunctionLoc = Breakpoint.lineToLocation(staticCalledMethod, staticFunctionLine);
+    runTestOn(
+        StaticMethodObject::new,
+        (thr) -> setupSuspendBreakpointFor(staticCalledMethod, staticFunctionLoc, thr),
+        SuspendEvents::clearSuspendMethodEvent);
+
+    System.out.println("Test stopped in a Object <init> method");
+    final Executable initCalledMethod = ObjectInitTestObject.ObjectInitTarget.class.getConstructor(Runnable.class);
+    final int initFunctionLine= Breakpoint.locationToLine(initCalledMethod, 0) + 3;
+    final long initFunctionLoc = Breakpoint.lineToLocation(initCalledMethod, initFunctionLine);
+    runTestOn(
+        ObjectInitTestObject::new,
+        (thr) -> setupSuspendBreakpointFor(initCalledMethod, initFunctionLoc, thr),
+        SuspendEvents::clearSuspendMethodEvent);
+
+    if (canRunClassLoadTests && CanRunClassLoadingTests()) {
+      System.out.println("Test stopped during class-load.");
+      runTestOn(
+          ClassLoadObject::new,
+          (thr) -> setupSuspendClassEvent(EVENT_TYPE_CLASS_LOAD, ClassLoadObject.CLASS_NAMES, thr),
+          SuspendEvents::clearSuspendClassEvent);
+      System.out.println("Test stopped during class-load.");
+      runTestOn(
+          ClassLoadObject::new,
+          (thr) -> setupSuspendClassEvent(EVENT_TYPE_CLASS_LOAD, ClassLoadObject.CLASS_NAMES, thr),
+          SuspendEvents::clearSuspendClassEvent);
+    }
+  }
+
+
+  // Volatile is to prevent any future optimizations that could invalidate this test by doing
+  // constant propagation and eliminating the failing paths before the verifier is able to load the
+  // class.
+  static volatile boolean ranClassLoadTest = false;
+  static boolean classesPreverified = false;
+  private static final class RCLT0 { public void foo() {} }
+  private static final class RCLT1 { public void foo() {} }
+  // If classes are not preverified for some reason (interp-ac, no-image, etc) the verifier will
+  // actually load classes as it runs. This means that we cannot use the class-load tests as they
+  // are written. TODO Support this.
+  public boolean CanRunClassLoadingTests() {
+    if (ranClassLoadTest) {
+      return classesPreverified;
+    }
+    if (!ranClassLoadTest) {
+      // Only this will ever be executed.
+      new RCLT0().foo();
+    } else {
+      // This will never be executed. If classes are not preverified the verifier will load RCLT1
+      // when the enclosing method is run. This behavior makes the class-load/prepare test cases
+      // impossible to successfully run (they will deadlock).
+      new RCLT1().foo();
+      System.out.println("FAILURE: UNREACHABLE Location!");
+    }
+    classesPreverified = !isClassLoaded("Lart/Test1969$RCLT1;");
+    ranClassLoadTest = true;
+    return classesPreverified;
+  }
+
+  public static native boolean isClassLoaded(String name);
+}
diff --git a/test/1970-force-early-return-long/expected.txt b/test/1970-force-early-return-long/expected.txt
new file mode 100644
index 0000000..ab79587
--- /dev/null
+++ b/test/1970-force-early-return-long/expected.txt
@@ -0,0 +1,222 @@
+Test stopped using breakpoint
+NORMAL RUN: Single call with no interference on (ID: 0) StandardTestObject { cnt: 0 }
+NORMAL RUN: result for (ID: 0) StandardTestObject { cnt: 2 } is 1
+Single call with force-early-return on (ID: 1) StandardTestObject { cnt: 0 }
+Will force return of 987000
+result for (ID: 1) StandardTestObject { cnt: 1 } is 987000
+Test stopped using breakpoint with declared synchronized function
+NORMAL RUN: Single call with no interference on (ID: 2) SynchronizedFunctionTestObject { cnt: 0 }
+NORMAL RUN: result for (ID: 2) SynchronizedFunctionTestObject { cnt: 2 } is 1
+Single call with force-early-return on (ID: 3) SynchronizedFunctionTestObject { cnt: 0 }
+Will force return of 987001
+result for (ID: 3) SynchronizedFunctionTestObject { cnt: 1 } is 987001
+Test stopped using breakpoint with synchronized block
+NORMAL RUN: Single call with no interference on (ID: 4) SynchronizedTestObject { cnt: 0 }
+NORMAL RUN: result for (ID: 4) SynchronizedTestObject { cnt: 2 } is 1
+Single call with force-early-return on (ID: 5) SynchronizedTestObject { cnt: 0 }
+Will force return of 987002
+result for (ID: 5) SynchronizedTestObject { cnt: 1 } is 987002
+Test stopped on single step
+NORMAL RUN: Single call with no interference on (ID: 6) StandardTestObject { cnt: 0 }
+NORMAL RUN: result for (ID: 6) StandardTestObject { cnt: 2 } is 1
+Single call with force-early-return on (ID: 7) StandardTestObject { cnt: 0 }
+Will force return of 987003
+result for (ID: 7) StandardTestObject { cnt: 1 } is 987003
+Test stopped on field access
+NORMAL RUN: Single call with no interference on (ID: 8) FieldBasedTestObject { TARGET_FIELD: 0 }
+NORMAL RUN: result for (ID: 8) FieldBasedTestObject { TARGET_FIELD: 10 } is 10
+Single call with force-early-return on (ID: 9) FieldBasedTestObject { TARGET_FIELD: 0 }
+Will force return of 987004
+result for (ID: 9) FieldBasedTestObject { TARGET_FIELD: 0 } is 987004
+Test stopped on field modification
+NORMAL RUN: Single call with no interference on (ID: 10) FieldBasedTestObject { TARGET_FIELD: 0 }
+NORMAL RUN: result for (ID: 10) FieldBasedTestObject { TARGET_FIELD: 10 } is 10
+Single call with force-early-return on (ID: 11) FieldBasedTestObject { TARGET_FIELD: 0 }
+Will force return of 987005
+result for (ID: 11) FieldBasedTestObject { TARGET_FIELD: 0 } is 987005
+Test stopped during Method Exit of calledFunction
+NORMAL RUN: Single call with no interference on (ID: 12) StandardTestObject { cnt: 0 }
+NORMAL RUN: result for (ID: 12) StandardTestObject { cnt: 2 } is 1
+Single call with force-early-return on (ID: 13) StandardTestObject { cnt: 0 }
+Will force return of 987006
+result for (ID: 13) StandardTestObject { cnt: 2 } is 987006
+Test stopped during Method Enter of calledFunction
+NORMAL RUN: Single call with no interference on (ID: 14) StandardTestObject { cnt: 0 }
+NORMAL RUN: result for (ID: 14) StandardTestObject { cnt: 2 } is 1
+Single call with force-early-return on (ID: 15) StandardTestObject { cnt: 0 }
+Will force return of 987007
+result for (ID: 15) StandardTestObject { cnt: 0 } is 987007
+Test stopped during Method Exit due to exception thrown in same function
+NORMAL RUN: Single call with no interference on (ID: 16) ExceptionOnceObject { cnt: 0, throwInSub: false }
+Uncaught exception in thread Thread[Test1970 target thread - 16,5,main] - art.Test1970$ExceptionOnceObject$TestError: null
+	art.Test1970$ExceptionOnceObject.calledFunction(Test1970.java)
+	art.Test1970$AbstractTestObject.run(Test1970.java)
+	art.Test1970$2.run(Test1970.java)
+	java.lang.Thread.run(Thread.java)
+
+NORMAL RUN: result for (ID: 16) ExceptionOnceObject { cnt: 1, throwInSub: false } is 0
+Single call with force-early-return on (ID: 17) ExceptionOnceObject { cnt: 0, throwInSub: false }
+Will force return of 987008
+result for (ID: 17) ExceptionOnceObject { cnt: 1, throwInSub: false } is 987008
+Test stopped during Method Exit due to exception thrown in subroutine
+NORMAL RUN: Single call with no interference on (ID: 18) ExceptionOnceObject { cnt: 0, throwInSub: true }
+Uncaught exception in thread Thread[Test1970 target thread - 18,5,main] - art.Test1970$ExceptionOnceObject$TestError: null
+	art.Test1970$ExceptionOnceObject.doThrow(Test1970.java)
+	art.Test1970$ExceptionOnceObject.calledFunction(Test1970.java)
+	art.Test1970$AbstractTestObject.run(Test1970.java)
+	art.Test1970$2.run(Test1970.java)
+	java.lang.Thread.run(Thread.java)
+
+NORMAL RUN: result for (ID: 18) ExceptionOnceObject { cnt: 1, throwInSub: true } is 0
+Single call with force-early-return on (ID: 19) ExceptionOnceObject { cnt: 0, throwInSub: true }
+Will force return of 987009
+result for (ID: 19) ExceptionOnceObject { cnt: 1, throwInSub: true } is 987009
+Test stopped during notifyFramePop with exception on pop of calledFunction
+NORMAL RUN: Single call with no interference on (ID: 20) ExceptionThrowTestObject { cnt: 0, baseCnt: 0 }
+art.Test1970$ExceptionThrowTestObject$TestError thrown and caught!
+NORMAL RUN: result for (ID: 20) ExceptionThrowTestObject { cnt: 2, baseCnt: 2 } is 0
+Single call with force-early-return on (ID: 21) ExceptionThrowTestObject { cnt: 0, baseCnt: 0 }
+Will force return of 987010
+result for (ID: 21) ExceptionThrowTestObject { cnt: 2, baseCnt: 2 } is 987010
+Test stopped during notifyFramePop with exception on pop of doThrow
+NORMAL RUN: Single call with no interference on (ID: 22) ExceptionCatchTestObject { cnt: 0 }
+art.Test1970$ExceptionCatchTestObject$TestError caught in called function.
+NORMAL RUN: result for (ID: 22) ExceptionCatchTestObject { cnt: 2 } is 1
+Single call with force-early-return on (ID: 23) ExceptionCatchTestObject { cnt: 0 }
+Will force return of 987011
+Failed to force-return due to java.lang.RuntimeException: JVMTI_ERROR_TYPE_MISMATCH
+	art.NonStandardExit.forceEarlyReturnLong(Native Method)
+	art.NonStandardExit.forceEarlyReturn(NonStandardExit.java)
+	art.Test1970$TestSuspender.performForceReturn(Test1970.java)
+	art.Test1970.runTestOn(Test1970.java)
+	art.Test1970.runTestOn(Test1970.java)
+	art.Test1970.runTestOn(Test1970.java)
+	art.Test1970.runTests(Test1970.java)
+	<Additional frames hidden>
+
+art.Test1970$ExceptionCatchTestObject$TestError caught in called function.
+result for (ID: 23) ExceptionCatchTestObject { cnt: 2 } is 1
+Test stopped during ExceptionCatch event of calledFunction (catch in called function, throw in called function)
+NORMAL RUN: Single call with no interference on (ID: 24) ExceptionThrowTestObject { cnt: 0, baseCnt: 0 }
+art.Test1970$ExceptionThrowTestObject$TestError caught in same function.
+NORMAL RUN: result for (ID: 24) ExceptionThrowTestObject { cnt: 111, baseCnt: 2 } is 11
+Single call with force-early-return on (ID: 25) ExceptionThrowTestObject { cnt: 0, baseCnt: 0 }
+Will force return of 987012
+result for (ID: 25) ExceptionThrowTestObject { cnt: 11, baseCnt: 2 } is 987012
+Test stopped during ExceptionCatch event of calledFunction (catch in called function, throw in subroutine)
+NORMAL RUN: Single call with no interference on (ID: 26) ExceptionCatchTestObject { cnt: 0 }
+art.Test1970$ExceptionCatchTestObject$TestError caught in called function.
+NORMAL RUN: result for (ID: 26) ExceptionCatchTestObject { cnt: 2 } is 1
+Single call with force-early-return on (ID: 27) ExceptionCatchTestObject { cnt: 0 }
+Will force return of 987013
+result for (ID: 27) ExceptionCatchTestObject { cnt: 1 } is 987013
+Test stopped during Exception event of calledFunction (catch in calling function)
+NORMAL RUN: Single call with no interference on (ID: 28) ExceptionThrowTestObject { cnt: 0, baseCnt: 0 }
+art.Test1970$ExceptionThrowTestObject$TestError thrown and caught!
+NORMAL RUN: result for (ID: 28) ExceptionThrowTestObject { cnt: 2, baseCnt: 2 } is 0
+Single call with force-early-return on (ID: 29) ExceptionThrowTestObject { cnt: 0, baseCnt: 0 }
+Will force return of 987014
+result for (ID: 29) ExceptionThrowTestObject { cnt: 2, baseCnt: 2 } is 987014
+Test stopped during Exception event of calledFunction (catch in called function)
+NORMAL RUN: Single call with no interference on (ID: 30) ExceptionThrowTestObject { cnt: 0, baseCnt: 0 }
+art.Test1970$ExceptionThrowTestObject$TestError caught in same function.
+NORMAL RUN: result for (ID: 30) ExceptionThrowTestObject { cnt: 111, baseCnt: 2 } is 11
+Single call with force-early-return on (ID: 31) ExceptionThrowTestObject { cnt: 0, baseCnt: 0 }
+Will force return of 987015
+result for (ID: 31) ExceptionThrowTestObject { cnt: 11, baseCnt: 2 } is 987015
+Test stopped during Exception event of calledFunction (catch in parent of calling function)
+NORMAL RUN: Single call with no interference on (ID: 32) ExceptionThrowFarTestObject { cnt: 0, baseCnt: 0 }
+art.Test1970$ExceptionThrowFarTestObject$TestError thrown and caught!
+NORMAL RUN: result for (ID: 32) ExceptionThrowFarTestObject { cnt: 2, baseCnt: 2 } is 0
+Single call with force-early-return on (ID: 33) ExceptionThrowFarTestObject { cnt: 0, baseCnt: 0 }
+Will force return of 987016
+result for (ID: 33) ExceptionThrowFarTestObject { cnt: 2, baseCnt: 2 } is 987016
+Test stopped during Exception event of calledFunction (catch in called function)
+NORMAL RUN: Single call with no interference on (ID: 34) ExceptionThrowFarTestObject { cnt: 0, baseCnt: 0 }
+art.Test1970$ExceptionThrowFarTestObject$TestError caught in same function.
+NORMAL RUN: result for (ID: 34) ExceptionThrowFarTestObject { cnt: 111, baseCnt: 2 } is 101
+Single call with force-early-return on (ID: 35) ExceptionThrowFarTestObject { cnt: 0, baseCnt: 0 }
+Will force return of 987017
+result for (ID: 35) ExceptionThrowFarTestObject { cnt: 101, baseCnt: 2 } is 987017
+Test stopped during random Suspend.
+NORMAL RUN: Single call with no interference on (ID: 36) SuspendSuddenlyObject { cnt: 0, spun: false }
+NORMAL RUN: result for (ID: 36) SuspendSuddenlyObject { cnt: 2, spun: true } is 1
+Single call with force-early-return on (ID: 37) SuspendSuddenlyObject { cnt: 0, spun: false }
+Will force return of 987018
+result for (ID: 37) SuspendSuddenlyObject { cnt: 1, spun: true } is 987018
+Test stopped during a native method fails
+NORMAL RUN: Single call with no interference on (ID: 38) NativeCalledObject { cnt: 0 }
+NORMAL RUN: result for (ID: 38) NativeCalledObject { cnt: 2 } is 1
+Single call with force-early-return on (ID: 39) NativeCalledObject { cnt: 0 }
+Will force return of 987019
+Failed to force-return due to java.lang.RuntimeException: JVMTI_ERROR_OPAQUE_FRAME
+	art.NonStandardExit.forceEarlyReturnLong(Native Method)
+	art.NonStandardExit.forceEarlyReturn(NonStandardExit.java)
+	art.Test1970$TestSuspender.performForceReturn(Test1970.java)
+	art.Test1970.runTestOn(Test1970.java)
+	art.Test1970.runTestOn(Test1970.java)
+	art.Test1970.runTestOn(Test1970.java)
+	art.Test1970.runTests(Test1970.java)
+	<Additional frames hidden>
+
+result for (ID: 39) NativeCalledObject { cnt: 2 } is 1
+Test stopped in a method called by native succeeds
+NORMAL RUN: Single call with no interference on (ID: 40) NativeCallerObject { cnt: 0 }
+NORMAL RUN: result for (ID: 40) NativeCallerObject { cnt: 2 } is 1
+Single call with force-early-return on (ID: 41) NativeCallerObject { cnt: 0 }
+Will force return of 987020
+result for (ID: 41) NativeCallerObject { cnt: 2 } is 987020
+Test stopped in a static method
+NORMAL RUN: Single call with no interference on (ID: 42) StaticMethodObject { cnt: 0 }
+NORMAL RUN: result for (ID: 42) StaticMethodObject { cnt: 2 } is 1
+Single call with force-early-return on (ID: 43) StaticMethodObject { cnt: 0 }
+Will force return of 987021
+result for (ID: 43) StaticMethodObject { cnt: 1 } is 987021
+Test force-return of void function fails!
+NORMAL RUN: Single call with no interference on (ID: 44) BadForceVoidObject { cnt: 0 }
+NORMAL RUN: result for (ID: 44) BadForceVoidObject { cnt: 2 } is -1
+Single call with force-early-return on (ID: 45) BadForceVoidObject { cnt: 0 }
+Will force return of 987022
+Failed to force-return due to java.lang.RuntimeException: JVMTI_ERROR_TYPE_MISMATCH
+	art.NonStandardExit.forceEarlyReturnLong(Native Method)
+	art.NonStandardExit.forceEarlyReturn(NonStandardExit.java)
+	art.Test1970$TestSuspender.performForceReturn(Test1970.java)
+	art.Test1970.runTestOn(Test1970.java)
+	art.Test1970.runTestOn(Test1970.java)
+	art.Test1970.runTestOn(Test1970.java)
+	art.Test1970.runTests(Test1970.java)
+	<Additional frames hidden>
+
+result for (ID: 45) BadForceVoidObject { cnt: 2 } is -1
+Test force-return of int function fails!
+NORMAL RUN: Single call with no interference on (ID: 46) BadForceIntObject { cnt: 0 }
+NORMAL RUN: result for (ID: 46) BadForceIntObject { cnt: 2 } is 1
+Single call with force-early-return on (ID: 47) BadForceIntObject { cnt: 0 }
+Will force return of 987023
+Failed to force-return due to java.lang.RuntimeException: JVMTI_ERROR_TYPE_MISMATCH
+	art.NonStandardExit.forceEarlyReturnLong(Native Method)
+	art.NonStandardExit.forceEarlyReturn(NonStandardExit.java)
+	art.Test1970$TestSuspender.performForceReturn(Test1970.java)
+	art.Test1970.runTestOn(Test1970.java)
+	art.Test1970.runTestOn(Test1970.java)
+	art.Test1970.runTestOn(Test1970.java)
+	art.Test1970.runTests(Test1970.java)
+	<Additional frames hidden>
+
+result for (ID: 47) BadForceIntObject { cnt: 2 } is 1
+Test force-return of Object function fails!
+NORMAL RUN: Single call with no interference on (ID: 48) BadForceIntObject { cnt: 0 }
+NORMAL RUN: result for (ID: 48) BadForceIntObject { cnt: 2 } is 1
+Single call with force-early-return on (ID: 49) BadForceIntObject { cnt: 0 }
+Will force return of 987024
+Failed to force-return due to java.lang.RuntimeException: JVMTI_ERROR_TYPE_MISMATCH
+	art.NonStandardExit.forceEarlyReturnLong(Native Method)
+	art.NonStandardExit.forceEarlyReturn(NonStandardExit.java)
+	art.Test1970$TestSuspender.performForceReturn(Test1970.java)
+	art.Test1970.runTestOn(Test1970.java)
+	art.Test1970.runTestOn(Test1970.java)
+	art.Test1970.runTestOn(Test1970.java)
+	art.Test1970.runTests(Test1970.java)
+	<Additional frames hidden>
+
+result for (ID: 49) BadForceIntObject { cnt: 2 } is 1
diff --git a/test/1970-force-early-return-long/force_early_return_long.cc b/test/1970-force-early-return-long/force_early_return_long.cc
new file mode 100644
index 0000000..da0c946
--- /dev/null
+++ b/test/1970-force-early-return-long/force_early_return_long.cc
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+
+#include <cstdio>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "android-base/logging.h"
+#include "android-base/stringprintf.h"
+
+#include "jni.h"
+#include "jvmti.h"
+#include "scoped_local_ref.h"
+#include "scoped_utf_chars.h"
+
+// Test infrastructure
+#include "jni_binder.h"
+#include "jni_helper.h"
+#include "jvmti_helper.h"
+#include "test_env.h"
+#include "ti_macros.h"
+
+#include "suspend_event_helper.h"
+
+namespace art {
+namespace Test1970ForceEarlyReturnLong {
+
+extern "C" JNIEXPORT
+jlong JNICALL Java_art_Test1970_00024NativeCalledObject_calledFunction(
+    JNIEnv* env, jobject thiz) {
+  env->PushLocalFrame(4);
+  jclass klass = env->GetObjectClass(thiz);
+  jfieldID cnt = env->GetFieldID(klass, "cnt", "I");
+  env->SetIntField(thiz, cnt, env->GetIntField(thiz, cnt) + 1);
+  jlong res = static_cast<jlong>(env->GetIntField(thiz, cnt));
+  env->SetIntField(thiz, cnt, env->GetIntField(thiz, cnt) + 1);
+  void *data;
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->GetThreadLocalStorage(/* thread */ nullptr,
+                                                             reinterpret_cast<void**>(&data)))) {
+    env->PopLocalFrame(nullptr);
+    return -1;
+  }
+  if (data != nullptr) {
+    art::common_suspend_event::PerformSuspension(jvmti_env, env);
+  }
+  env->PopLocalFrame(nullptr);
+  return res;
+}
+
+extern "C" JNIEXPORT
+void JNICALL Java_art_Test1970_00024NativeCallerObject_run(
+    JNIEnv* env, jobject thiz) {
+  env->PushLocalFrame(1);
+  jclass klass = env->GetObjectClass(thiz);
+  jfieldID ret = env->GetFieldID(klass, "returnValue", "J");
+  jmethodID called = env->GetMethodID(klass, "calledFunction", "()J");
+  env->SetLongField(thiz, ret, env->CallLongMethod(thiz, called));
+  env->PopLocalFrame(nullptr);
+}
+
+}  // namespace Test1970ForceEarlyReturnLong
+}  // namespace art
+
diff --git a/test/1970-force-early-return-long/info.txt b/test/1970-force-early-return-long/info.txt
new file mode 100644
index 0000000..621d881
--- /dev/null
+++ b/test/1970-force-early-return-long/info.txt
@@ -0,0 +1,4 @@
+Test JVMTI ForceEarlyReturnObject functionality
+
+Checks that we can call the ForceEarlyReturn functions successfully and force
+returns of objects. It also checks some of the basic error modes.
diff --git a/test/1970-force-early-return-long/run b/test/1970-force-early-return-long/run
new file mode 100755
index 0000000..d16d4e6
--- /dev/null
+++ b/test/1970-force-early-return-long/run
@@ -0,0 +1,24 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# On RI we need to turn class-load tests off since those events are buggy around
+# pop-frame (see b/116003018).
+ARGS=""
+if [[ "$TEST_RUNTIME" == "jvm" ]]; then
+  ARGS="--args DISABLE_CLASS_LOAD_TESTS"
+fi
+
+./default-run "$@" --jvmti $ARGS
diff --git a/test/1970-force-early-return-long/src/Main.java b/test/1970-force-early-return-long/src/Main.java
new file mode 100644
index 0000000..5a75458
--- /dev/null
+++ b/test/1970-force-early-return-long/src/Main.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
+ * in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+import java.util.Arrays;
+import java.util.List;
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1970.run();
+  }
+}
diff --git a/test/1970-force-early-return-long/src/art/Breakpoint.java b/test/1970-force-early-return-long/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1970-force-early-return-long/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1970-force-early-return-long/src/art/NonStandardExit.java b/test/1970-force-early-return-long/src/art/NonStandardExit.java
new file mode 120000
index 0000000..d542a3c
--- /dev/null
+++ b/test/1970-force-early-return-long/src/art/NonStandardExit.java
@@ -0,0 +1 @@
+../../../jvmti-common/NonStandardExit.java
\ No newline at end of file
diff --git a/test/1970-force-early-return-long/src/art/StackTrace.java b/test/1970-force-early-return-long/src/art/StackTrace.java
new file mode 120000
index 0000000..e1a08aa
--- /dev/null
+++ b/test/1970-force-early-return-long/src/art/StackTrace.java
@@ -0,0 +1 @@
+../../../jvmti-common/StackTrace.java
\ No newline at end of file
diff --git a/test/1970-force-early-return-long/src/art/SuspendEvents.java b/test/1970-force-early-return-long/src/art/SuspendEvents.java
new file mode 120000
index 0000000..f7a5f7e
--- /dev/null
+++ b/test/1970-force-early-return-long/src/art/SuspendEvents.java
@@ -0,0 +1 @@
+../../../jvmti-common/SuspendEvents.java
\ No newline at end of file
diff --git a/test/1970-force-early-return-long/src/art/Suspension.java b/test/1970-force-early-return-long/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1970-force-early-return-long/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1970-force-early-return-long/src/art/Test1970.java b/test/1970-force-early-return-long/src/art/Test1970.java
new file mode 100644
index 0000000..976d4e9
--- /dev/null
+++ b/test/1970-force-early-return-long/src/art/Test1970.java
@@ -0,0 +1,887 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
+ * in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package art;
+
+import static art.SuspendEvents.setupFieldSuspendFor;
+import static art.SuspendEvents.setupSuspendBreakpointFor;
+import static art.SuspendEvents.setupSuspendExceptionEvent;
+import static art.SuspendEvents.setupSuspendMethodEvent;
+import static art.SuspendEvents.setupSuspendPopFrameEvent;
+import static art.SuspendEvents.setupSuspendSingleStepAt;
+import static art.SuspendEvents.setupTest;
+import static art.SuspendEvents.waitForSuspendHit;
+
+import java.io.*;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.util.concurrent.CountDownLatch;
+import java.util.function.Consumer;
+import java.util.function.Supplier;
+
+public class Test1970 {
+  // Make sure this is always high enough that it's easily distinguishable from the results the
+  // methods would normally return.
+  public static long OVERRIDE_ID = 987000;
+
+  // Returns a value to be used for the return value of the given thread.
+  public static long getOveriddenReturnValue(Thread thr) {
+    return OVERRIDE_ID++;
+  }
+
+  public static void doNothing() {}
+
+  public interface TestRunnable extends Runnable {
+    public long getReturnValue();
+  }
+
+  public static interface TestSuspender {
+    public void setupForceReturnRun(Thread thr);
+
+    public void waitForSuspend(Thread thr);
+
+    public void cleanup(Thread thr);
+
+    public default void performForceReturn(Thread thr) {
+      long ret = getOveriddenReturnValue(thr);
+      System.out.println("Will force return of " + ret);
+      NonStandardExit.forceEarlyReturn(thr, ret);
+    }
+
+    public default void setupNormalRun(Thread thr) {}
+  }
+
+  public static interface ThreadRunnable {
+    public void run(Thread thr);
+  }
+
+  public static TestSuspender makeSuspend(final ThreadRunnable setup, final ThreadRunnable clean) {
+    return new TestSuspender() {
+      public void setupForceReturnRun(Thread thr) {
+        setup.run(thr);
+      }
+
+      public void waitForSuspend(Thread thr) {
+        waitForSuspendHit(thr);
+      }
+
+      public void cleanup(Thread thr) {
+        clean.run(thr);
+      }
+    };
+  }
+
+  public void runTestOn(Supplier<TestRunnable> testObj, ThreadRunnable su, ThreadRunnable cl)
+      throws Exception {
+    runTestOn(testObj, makeSuspend(su, cl));
+  }
+
+  private static void SafePrintStackTrace(StackTraceElement st[]) {
+    System.out.println(safeDumpStackTrace(st, "\t"));
+  }
+
+  private static String safeDumpStackTrace(StackTraceElement st[], String prefix) {
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintStream os = new PrintStream(baos);
+    for (StackTraceElement e : st) {
+      os.println(
+          prefix
+              + e.getClassName()
+              + "."
+              + e.getMethodName()
+              + "("
+              + (e.isNativeMethod() ? "Native Method" : e.getFileName())
+              + ")");
+      if (e.getClassName().equals("art.Test1970") && e.getMethodName().equals("runTests")) {
+        os.println(prefix + "<Additional frames hidden>");
+        break;
+      }
+    }
+    os.flush();
+    return baos.toString();
+  }
+
+  static long ID_COUNTER = 0;
+
+  public TestRunnable Id(final TestRunnable tr) {
+    final long my_id = ID_COUNTER++;
+    return new TestRunnable() {
+      public void run() {
+        tr.run();
+      }
+
+      public long getReturnValue() {
+        return tr.getReturnValue();
+      }
+
+      public String toString() {
+        return "(ID: " + my_id + ") " + tr.toString();
+      }
+    };
+  }
+
+  public static long THREAD_COUNT = 0;
+
+  public Thread mkThread(Runnable r) {
+    Thread t = new Thread(r, "Test1970 target thread - " + THREAD_COUNT++);
+    t.setUncaughtExceptionHandler(
+        (thr, e) -> {
+          System.out.println(
+              "Uncaught exception in thread "
+                  + thr
+                  + " - "
+                  + e.getClass().getName()
+                  + ": "
+                  + e.getLocalizedMessage());
+          SafePrintStackTrace(e.getStackTrace());
+        });
+    return t;
+  }
+
+  final class TestConfig {
+    public final TestRunnable testObj;
+    public final TestSuspender suspender;
+
+    public TestConfig(TestRunnable obj, TestSuspender su) {
+      this.testObj = obj;
+      this.suspender = su;
+    }
+  }
+
+  public void runTestOn(Supplier<TestRunnable> testObjGen, TestSuspender su) throws Exception {
+    runTestOn(() -> new TestConfig(testObjGen.get(), su));
+  }
+
+  public void runTestOn(Supplier<TestConfig> config) throws Exception {
+    TestConfig normal_config = config.get();
+    TestRunnable normal_run = Id(normal_config.testObj);
+    try {
+      System.out.println("NORMAL RUN: Single call with no interference on " + normal_run);
+      Thread normal_thread = mkThread(normal_run);
+      normal_config.suspender.setupNormalRun(normal_thread);
+      normal_thread.start();
+      normal_thread.join();
+      System.out.println(
+          "NORMAL RUN: result for " + normal_run + " is " + normal_run.getReturnValue());
+    } catch (Exception e) {
+      System.out.println("NORMAL RUN: Ended with exception for " + normal_run + "!");
+      e.printStackTrace(System.out);
+    }
+
+    TestConfig force_return_config = config.get();
+    TestRunnable testObj = Id(force_return_config.testObj);
+    TestSuspender su = force_return_config.suspender;
+    System.out.println("Single call with force-early-return on " + testObj);
+    final CountDownLatch continue_latch = new CountDownLatch(1);
+    final CountDownLatch startup_latch = new CountDownLatch(1);
+    Runnable await =
+        () -> {
+          try {
+            startup_latch.countDown();
+            continue_latch.await();
+          } catch (Exception e) {
+            throw new Error("Failed to await latch", e);
+          }
+        };
+    Thread thr =
+        mkThread(
+            () -> {
+              await.run();
+              testObj.run();
+            });
+    thr.start();
+
+    // Wait until the other thread is started.
+    startup_latch.await();
+
+    // Setup suspension method on the thread.
+    su.setupForceReturnRun(thr);
+
+    // Let the other thread go.
+    continue_latch.countDown();
+
+    // Wait for the other thread to hit the breakpoint/watchpoint/whatever and
+    // suspend itself
+    // (without re-entering java)
+    su.waitForSuspend(thr);
+
+    // Cleanup the breakpoint/watchpoint/etc.
+    su.cleanup(thr);
+
+    try {
+      // Pop the frame.
+      su.performForceReturn(thr);
+    } catch (Exception e) {
+      System.out.println("Failed to force-return due to " + e);
+      SafePrintStackTrace(e.getStackTrace());
+    }
+
+    // Start the other thread going again.
+    Suspension.resume(thr);
+
+    // Wait for the other thread to finish.
+    thr.join();
+
+    // See how many times calledFunction was called.
+    System.out.println("result for " + testObj + " is " + testObj.getReturnValue());
+  }
+
+  public abstract static class AbstractTestObject implements TestRunnable {
+    private long resultVal = 0;
+
+    public AbstractTestObject() { }
+
+    public long getReturnValue() {
+      return resultVal;
+    }
+
+    public void run() {
+      // This function should have it's return-value replaced by force-early-return.
+      resultVal = calledFunction();
+    }
+
+    public abstract long calledFunction();
+  }
+
+  public static class IntContainer {
+    private final int value;
+
+    public IntContainer(int i) {
+      value = i;
+    }
+
+    public String toString() {
+      return "IntContainer { value: " + value + " }";
+    }
+  }
+
+  public static class FieldBasedTestObject extends AbstractTestObject implements Runnable {
+    public int TARGET_FIELD;
+
+    public FieldBasedTestObject() {
+      super();
+      TARGET_FIELD = 0;
+    }
+
+    public long calledFunction() {
+      // We put a watchpoint here and force-early-return when we are at it.
+      TARGET_FIELD += 10;
+      return TARGET_FIELD;
+    }
+
+    public String toString() {
+      return "FieldBasedTestObject { TARGET_FIELD: " + TARGET_FIELD + " }";
+    }
+  }
+
+  public static class StandardTestObject extends AbstractTestObject implements Runnable {
+    public int cnt;
+
+    public StandardTestObject() {
+      super();
+      cnt = 0;
+    }
+
+    public long calledFunction() {
+      cnt++; // line +0
+      // We put a breakpoint here and PopFrame when we are at it.
+      long result = cnt; // line +2
+      cnt++; // line +3
+      return result; // line +4
+    }
+
+    public String toString() {
+      return "StandardTestObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class SynchronizedFunctionTestObject extends AbstractTestObject
+      implements Runnable {
+    public int cnt;
+
+    public SynchronizedFunctionTestObject() {
+      super();
+      cnt = 0;
+    }
+
+    public synchronized long calledFunction() {
+      cnt++; // line +0
+      // We put a breakpoint here and PopFrame when we are at it.
+      long result = cnt; // line +2
+      cnt++; // line +3
+      return result;
+    }
+
+    public String toString() {
+      return "SynchronizedFunctionTestObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class SynchronizedTestObject extends AbstractTestObject implements Runnable {
+    public final Object lock;
+    public int cnt;
+
+    public SynchronizedTestObject() {
+      super();
+      lock = new Object();
+      cnt = 0;
+    }
+
+    public long calledFunction() {
+      synchronized (lock) { // line +0
+        cnt++; // line +1
+        // We put a breakpoint here and PopFrame when we are at it.
+        long result = cnt; // line +3
+        cnt++; // line +4
+        return result; // line +5
+      }
+    }
+
+    public String toString() {
+      return "SynchronizedTestObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class ExceptionCatchTestObject extends AbstractTestObject implements Runnable {
+    public static class TestError extends Error {}
+
+    public int cnt;
+
+    public ExceptionCatchTestObject() {
+      super();
+      cnt = 0;
+    }
+
+    public long calledFunction() {
+      cnt++;
+      long result = cnt;
+      try {
+        doThrow();
+        cnt += 100;
+      } catch (TestError e) {
+        System.out.println(e.getClass().getName() + " caught in called function.");
+        cnt++;
+      }
+      return result;
+    }
+
+    public Object doThrow() {
+      throw new TestError();
+    }
+
+    public String toString() {
+      return "ExceptionCatchTestObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class ExceptionThrowFarTestObject implements TestRunnable {
+    public static class TestError extends Error {}
+
+    public int cnt;
+    public int baseCallCnt;
+    public final boolean catchInCalled;
+    public long result;
+
+    public ExceptionThrowFarTestObject(boolean catchInCalled) {
+      super();
+      cnt = 0;
+      baseCallCnt = 0;
+      this.catchInCalled = catchInCalled;
+    }
+
+    public void run() {
+      baseCallCnt++;
+      try {
+        result = callingFunction();
+      } catch (TestError e) {
+        System.out.println(e.getClass().getName() + " thrown and caught!");
+      }
+      baseCallCnt++;
+    }
+
+    public long callingFunction() {
+      return calledFunction();
+    }
+
+    public long calledFunction() {
+      cnt++;
+      if (catchInCalled) {
+        try {
+          cnt += 100;
+          throw new TestError(); // We put a watch here.
+        } catch (TestError e) {
+          System.out.println(e.getClass().getName() + " caught in same function.");
+          long result = cnt;
+          cnt += 10;
+          return result;
+        }
+      } else {
+        cnt++;
+        throw new TestError(); // We put a watch here.
+      }
+    }
+
+    public String toString() {
+      return "ExceptionThrowFarTestObject { cnt: " + cnt + ", baseCnt: " + baseCallCnt + " }";
+    }
+
+    @Override
+    public long getReturnValue() {
+      return result;
+    }
+  }
+
+  public static class ExceptionOnceObject extends AbstractTestObject {
+    public static final class TestError extends Error {}
+
+    public int cnt;
+    public final boolean throwInSub;
+
+    public ExceptionOnceObject(boolean throwInSub) {
+      super();
+      cnt = 0;
+      this.throwInSub = throwInSub;
+    }
+
+    public long calledFunction() {
+      cnt++;
+      if (cnt == 1) {
+        if (throwInSub) {
+          return doThrow();
+        } else {
+          throw new TestError();
+        }
+      }
+      return cnt++;
+    }
+
+    public long doThrow() {
+      throw new TestError();
+    }
+
+    public String toString() {
+      return "ExceptionOnceObject { cnt: " + cnt + ", throwInSub: " + throwInSub + " }";
+    }
+  }
+
+  public static class ExceptionThrowTestObject implements TestRunnable {
+    public static class TestError extends Error {}
+
+    public long getReturnValue() {
+      return result;
+    }
+
+    public int cnt;
+    public int baseCallCnt;
+    public final boolean catchInCalled;
+    public long result;
+
+    public ExceptionThrowTestObject(boolean catchInCalled) {
+      super();
+      cnt = 0;
+      baseCallCnt = 0;
+      this.catchInCalled = catchInCalled;
+    }
+
+    public void run() {
+      baseCallCnt++;
+      try {
+        result = calledFunction();
+      } catch (TestError e) {
+        System.out.println(e.getClass().getName() + " thrown and caught!");
+      }
+      baseCallCnt++;
+    }
+
+    public long calledFunction() {
+      cnt++;
+      if (catchInCalled) {
+        try {
+          cnt += 10;
+          throw new TestError(); // We put a watch here.
+        } catch (TestError e) {
+          System.out.println(e.getClass().getName() + " caught in same function.");
+          long result = cnt;
+          cnt += 100;
+          return result;
+        }
+      } else {
+        cnt += 1;
+        throw new TestError(); // We put a watch here.
+      }
+    }
+
+    public String toString() {
+      return "ExceptionThrowTestObject { cnt: " + cnt + ", baseCnt: " + baseCallCnt + " }";
+    }
+  }
+
+  public static class NativeCalledObject extends AbstractTestObject {
+    public int cnt = 0;
+
+    public native long calledFunction();
+
+    public String toString() {
+      return "NativeCalledObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class NativeCallerObject implements TestRunnable {
+    public long returnValue = -1;
+    public int cnt = 0;
+
+    public long getReturnValue() {
+      return returnValue;
+    }
+
+    public native void run();
+
+    public long calledFunction() {
+      cnt++;
+      // We will stop using a MethodExit event.
+      long res = cnt;
+      cnt++;
+      return res;
+    }
+
+    public String toString() {
+      return "NativeCallerObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class StaticMethodObject implements TestRunnable {
+    public int cnt = 0;
+    public long result = -1;
+    public long getReturnValue() {
+      return result;
+    }
+
+    public static long calledFunction(Supplier<Long> incr) {
+      long res = incr.get().longValue(); // line +0
+      // We put a breakpoint here to force the return.
+      doNothing();  // line +2
+      incr.get();   // line +3
+      return res;   // line +4
+    }
+
+    public void run() {
+      result = calledFunction(() -> (long)++cnt);
+    }
+
+    public String toString() {
+      return "StaticMethodObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class SuspendSuddenlyObject extends AbstractTestObject {
+    public volatile boolean should_spin = true;
+    public volatile boolean is_spinning = false;
+    public int cnt = 0;
+
+    public long calledFunction() {
+      cnt++;
+      do {
+        is_spinning = true;
+      } while (should_spin);
+      return cnt++;
+    }
+
+    public String toString() {
+      return "SuspendSuddenlyObject { cnt: " + cnt + ", spun: " + is_spinning + " }";
+    }
+  }
+
+  public static class BadForceVoidObject implements TestRunnable {
+    public int cnt = 0;
+    public long getReturnValue() {
+      return -1;
+    }
+    public void run() {
+      incrCnt();
+    }
+    public void incrCnt() {
+      ++cnt;  // line +0
+      // We set a breakpoint here and try to force-early-return.
+      doNothing(); // line +2
+      ++cnt;  // line +3
+    }
+    public String toString() {
+      return "BadForceVoidObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static class BadForceObjectObject implements TestRunnable {
+    public int cnt = 0;
+    public Long result = null;
+    public long getReturnValue() {
+      return result.longValue();
+    }
+    public void run() {
+      result = incrCnt();
+    }
+    public Long incrCnt() {
+      ++cnt;  // line +0
+      // We set a breakpoint here and try to force-early-return.
+      Long res = Long.valueOf(cnt);  // line +2
+      ++cnt;  // line +3
+      return res;
+    }
+    public String toString() {
+      return "BadForceIntObject { cnt: " + cnt + " }";
+    }
+  }
+  public static class BadForceIntObject implements TestRunnable {
+    public int cnt = 0;
+    public int result = 0;
+    public long getReturnValue() {
+      return result;
+    }
+    public void run() {
+      result = incrCnt();
+    }
+    public int incrCnt() {
+      ++cnt;  // line +0
+      // We set a breakpoint here and try to force-early-return.
+      int res = cnt;  // line +2
+      ++cnt;  // line +3
+      return res;
+    }
+    public String toString() {
+      return "BadForceIntObject { cnt: " + cnt + " }";
+    }
+  }
+
+  public static void run() throws Exception {
+    new Test1970().runTests();
+  }
+
+  public static void no_runTestOn(Supplier<Object> a, ThreadRunnable b, ThreadRunnable c) {}
+
+  public void runTests() throws Exception {
+    setupTest();
+
+    final Method calledFunction = StandardTestObject.class.getDeclaredMethod("calledFunction");
+    // Add a breakpoint on the second line after the start of the function
+    final int line = Breakpoint.locationToLine(calledFunction, 0) + 2;
+    final long loc = Breakpoint.lineToLocation(calledFunction, line);
+    System.out.println("Test stopped using breakpoint");
+    runTestOn(
+        StandardTestObject::new,
+        (thr) -> setupSuspendBreakpointFor(calledFunction, loc, thr),
+        SuspendEvents::clearSuspendBreakpointFor);
+
+    final Method syncFunctionCalledFunction =
+        SynchronizedFunctionTestObject.class.getDeclaredMethod("calledFunction");
+    // Add a breakpoint on the second line after the start of the function Annoyingly r8 generally
+    // has the first instruction (a monitor enter) not be marked as being on any line but javac has
+    // it marked as being on the first line of the function. Just use the second entry on the
+    // line-number table to get the breakpoint. This should be good for both.
+    final long syncFunctionLoc =
+        Breakpoint.getLineNumberTable(syncFunctionCalledFunction)[1].location;
+    System.out.println("Test stopped using breakpoint with declared synchronized function");
+    runTestOn(
+        SynchronizedFunctionTestObject::new,
+        (thr) -> setupSuspendBreakpointFor(syncFunctionCalledFunction, syncFunctionLoc, thr),
+        SuspendEvents::clearSuspendBreakpointFor);
+
+    final Method syncCalledFunction =
+        SynchronizedTestObject.class.getDeclaredMethod("calledFunction");
+    // Add a breakpoint on the second line after the start of the function
+    final int syncLine = Breakpoint.locationToLine(syncCalledFunction, 0) + 3;
+    final long syncLoc = Breakpoint.lineToLocation(syncCalledFunction, syncLine);
+    System.out.println("Test stopped using breakpoint with synchronized block");
+    runTestOn(
+        SynchronizedTestObject::new,
+        (thr) -> setupSuspendBreakpointFor(syncCalledFunction, syncLoc, thr),
+        SuspendEvents::clearSuspendBreakpointFor);
+
+    System.out.println("Test stopped on single step");
+    runTestOn(
+        StandardTestObject::new,
+        (thr) -> setupSuspendSingleStepAt(calledFunction, loc, thr),
+        SuspendEvents::clearSuspendSingleStepFor);
+
+    final Field target_field = FieldBasedTestObject.class.getDeclaredField("TARGET_FIELD");
+    System.out.println("Test stopped on field access");
+    runTestOn(
+        FieldBasedTestObject::new,
+        (thr) -> setupFieldSuspendFor(FieldBasedTestObject.class, target_field, true, thr),
+        SuspendEvents::clearFieldSuspendFor);
+
+    System.out.println("Test stopped on field modification");
+    runTestOn(
+        FieldBasedTestObject::new,
+        (thr) -> setupFieldSuspendFor(FieldBasedTestObject.class, target_field, false, thr),
+        SuspendEvents::clearFieldSuspendFor);
+
+    System.out.println("Test stopped during Method Exit of calledFunction");
+    runTestOn(
+        StandardTestObject::new,
+        (thr) -> setupSuspendMethodEvent(calledFunction, /* enter */ false, thr),
+        SuspendEvents::clearSuspendMethodEvent);
+
+    System.out.println("Test stopped during Method Enter of calledFunction");
+    runTestOn(
+        StandardTestObject::new,
+        (thr) -> setupSuspendMethodEvent(calledFunction, /* enter */ true, thr),
+        SuspendEvents::clearSuspendMethodEvent);
+
+    final Method exceptionOnceCalledMethod =
+        ExceptionOnceObject.class.getDeclaredMethod("calledFunction");
+    System.out.println("Test stopped during Method Exit due to exception thrown in same function");
+    runTestOn(
+        () -> new ExceptionOnceObject(/* throwInSub */ false),
+        (thr) -> setupSuspendMethodEvent(exceptionOnceCalledMethod, /* enter */ false, thr),
+        SuspendEvents::clearSuspendMethodEvent);
+
+    System.out.println("Test stopped during Method Exit due to exception thrown in subroutine");
+    runTestOn(
+        () -> new ExceptionOnceObject(/* throwInSub */ true),
+        (thr) -> setupSuspendMethodEvent(exceptionOnceCalledMethod, /* enter */ false, thr),
+        SuspendEvents::clearSuspendMethodEvent);
+
+    final Method exceptionThrowCalledMethod =
+        ExceptionThrowTestObject.class.getDeclaredMethod("calledFunction");
+    System.out.println(
+        "Test stopped during notifyFramePop with exception on pop of calledFunction");
+    runTestOn(
+        () -> new ExceptionThrowTestObject(false),
+        (thr) -> setupSuspendPopFrameEvent(0, exceptionThrowCalledMethod, thr),
+        SuspendEvents::clearSuspendPopFrameEvent);
+
+    final Method exceptionCatchThrowMethod =
+        ExceptionCatchTestObject.class.getDeclaredMethod("doThrow");
+    System.out.println("Test stopped during notifyFramePop with exception on pop of doThrow");
+    runTestOn(
+        ExceptionCatchTestObject::new,
+        (thr) -> setupSuspendPopFrameEvent(0, exceptionCatchThrowMethod, thr),
+        SuspendEvents::clearSuspendPopFrameEvent);
+
+    System.out.println(
+        "Test stopped during ExceptionCatch event of calledFunction "
+            + "(catch in called function, throw in called function)");
+    runTestOn(
+        () -> new ExceptionThrowTestObject(true),
+        (thr) -> setupSuspendExceptionEvent(exceptionThrowCalledMethod, /* catch */ true, thr),
+        SuspendEvents::clearSuspendExceptionEvent);
+
+    final Method exceptionCatchCalledMethod =
+        ExceptionCatchTestObject.class.getDeclaredMethod("calledFunction");
+    System.out.println(
+        "Test stopped during ExceptionCatch event of calledFunction "
+            + "(catch in called function, throw in subroutine)");
+    runTestOn(
+        ExceptionCatchTestObject::new,
+        (thr) -> setupSuspendExceptionEvent(exceptionCatchCalledMethod, /* catch */ true, thr),
+        SuspendEvents::clearSuspendExceptionEvent);
+
+    System.out.println(
+        "Test stopped during Exception event of calledFunction " + "(catch in calling function)");
+    runTestOn(
+        () -> new ExceptionThrowTestObject(false),
+        (thr) -> setupSuspendExceptionEvent(exceptionThrowCalledMethod, /* catch */ false, thr),
+        SuspendEvents::clearSuspendExceptionEvent);
+
+    System.out.println(
+        "Test stopped during Exception event of calledFunction (catch in called function)");
+    runTestOn(
+        () -> new ExceptionThrowTestObject(true),
+        (thr) -> setupSuspendExceptionEvent(exceptionThrowCalledMethod, /* catch */ false, thr),
+        SuspendEvents::clearSuspendExceptionEvent);
+
+    final Method exceptionThrowFarCalledMethod =
+        ExceptionThrowFarTestObject.class.getDeclaredMethod("calledFunction");
+    System.out.println(
+        "Test stopped during Exception event of calledFunction "
+            + "(catch in parent of calling function)");
+    runTestOn(
+        () -> new ExceptionThrowFarTestObject(false),
+        (thr) -> setupSuspendExceptionEvent(exceptionThrowFarCalledMethod, /* catch */ false, thr),
+        SuspendEvents::clearSuspendExceptionEvent);
+
+    System.out.println(
+        "Test stopped during Exception event of calledFunction " + "(catch in called function)");
+    runTestOn(
+        () -> new ExceptionThrowFarTestObject(true),
+        (thr) -> setupSuspendExceptionEvent(exceptionThrowFarCalledMethod, /* catch */ false, thr),
+        SuspendEvents::clearSuspendExceptionEvent);
+
+    System.out.println("Test stopped during random Suspend.");
+    runTestOn(() -> {
+      final SuspendSuddenlyObject sso = new SuspendSuddenlyObject();
+      return new TestConfig(sso, new TestSuspender() {
+        public void setupForceReturnRun(Thread thr) { }
+        public void setupNormalRun(Thread thr) {
+          sso.should_spin = false;
+        }
+
+        public void waitForSuspend(Thread thr) {
+          while (!sso.is_spinning) { }
+          Suspension.suspend(thr);
+        }
+
+        public void cleanup(Thread thr) { }
+      });
+    });
+
+    System.out.println("Test stopped during a native method fails");
+    runTestOn(
+        NativeCalledObject::new,
+        SuspendEvents::setupWaitForNativeCall,
+        SuspendEvents::clearWaitForNativeCall);
+
+    System.out.println("Test stopped in a method called by native succeeds");
+    final Method nativeCallerMethod = NativeCallerObject.class.getDeclaredMethod("calledFunction");
+    runTestOn(
+        NativeCallerObject::new,
+        (thr) -> setupSuspendMethodEvent(nativeCallerMethod, /* enter */ false, thr),
+        SuspendEvents::clearSuspendMethodEvent);
+
+    System.out.println("Test stopped in a static method");
+    final Method staticCalledMethod = StaticMethodObject.class.getDeclaredMethod("calledFunction", Supplier.class);
+    final int staticFunctionLine= Breakpoint.locationToLine(staticCalledMethod, 0) + 2;
+    final long staticFunctionLoc = Breakpoint.lineToLocation(staticCalledMethod, staticFunctionLine);
+    runTestOn(
+        StaticMethodObject::new,
+        (thr) -> setupSuspendBreakpointFor(staticCalledMethod, staticFunctionLoc, thr),
+        SuspendEvents::clearSuspendMethodEvent);
+
+    System.out.println("Test force-return of void function fails!");
+    final Method voidFunction = BadForceVoidObject.class.getDeclaredMethod("incrCnt");
+    final int voidLine = Breakpoint.locationToLine(voidFunction, 0) + 2;
+    final long voidLoc = Breakpoint.lineToLocation(voidFunction, voidLine);
+    runTestOn(
+        BadForceVoidObject::new,
+        (thr) -> setupSuspendBreakpointFor(voidFunction, voidLoc, thr),
+        SuspendEvents::clearSuspendMethodEvent);
+
+    System.out.println("Test force-return of int function fails!");
+    final Method intFunction = BadForceIntObject.class.getDeclaredMethod("incrCnt");
+    final int intLine = Breakpoint.locationToLine(intFunction, 0) + 2;
+    final long intLoc = Breakpoint.lineToLocation(intFunction, intLine);
+    runTestOn(
+        BadForceIntObject::new,
+        (thr) -> setupSuspendBreakpointFor(intFunction, intLoc, thr),
+        SuspendEvents::clearSuspendMethodEvent);
+
+    System.out.println("Test force-return of Object function fails!");
+    final Method objFunction = BadForceObjectObject.class.getDeclaredMethod("incrCnt");
+    final int objLine = Breakpoint.locationToLine(objFunction, 0) + 2;
+    final long objLoc = Breakpoint.lineToLocation(objFunction, objLine);
+    runTestOn(
+        BadForceObjectObject::new,
+        (thr) -> setupSuspendBreakpointFor(objFunction, objLoc, thr),
+        SuspendEvents::clearSuspendMethodEvent);
+  }
+}
diff --git a/test/1971-multi-force-early-return/expected.txt b/test/1971-multi-force-early-return/expected.txt
new file mode 100644
index 0000000..2d62363
--- /dev/null
+++ b/test/1971-multi-force-early-return/expected.txt
@@ -0,0 +1,3 @@
+Thread 0: Thread: Test1971 - Thread 0 method returned: art.Test1971$NormalExit { thread: Test1971 - Thread 0, creator: Test1971 - Thread 0 }
+Thread 1: Thread: Test1971 - Thread 1 method returned: art.Test1971$ForcedExit { thread: Test1971 - Thread 1, creator: Concurrent thread force-returner - 1 }
+Thread 2: Thread: Test1971 - Thread 2 method returned: art.Test1971$ForcedExit { thread: Test1971 - Thread 2, creator: Concurrent thread force-returner - 2 }
diff --git a/test/1971-multi-force-early-return/info.txt b/test/1971-multi-force-early-return/info.txt
new file mode 100644
index 0000000..621d881
--- /dev/null
+++ b/test/1971-multi-force-early-return/info.txt
@@ -0,0 +1,4 @@
+Test JVMTI ForceEarlyReturnObject functionality
+
+Checks that we can call the ForceEarlyReturn functions successfully and force
+returns of objects. It also checks some of the basic error modes.
diff --git a/test/1971-multi-force-early-return/run b/test/1971-multi-force-early-return/run
new file mode 100755
index 0000000..d16d4e6
--- /dev/null
+++ b/test/1971-multi-force-early-return/run
@@ -0,0 +1,24 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# On RI we need to turn class-load tests off since those events are buggy around
+# pop-frame (see b/116003018).
+ARGS=""
+if [[ "$TEST_RUNTIME" == "jvm" ]]; then
+  ARGS="--args DISABLE_CLASS_LOAD_TESTS"
+fi
+
+./default-run "$@" --jvmti $ARGS
diff --git a/test/1971-multi-force-early-return/src/Main.java b/test/1971-multi-force-early-return/src/Main.java
new file mode 100644
index 0000000..a2e4fd2
--- /dev/null
+++ b/test/1971-multi-force-early-return/src/Main.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
+ * in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+import java.util.Arrays;
+import java.util.List;
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1971.run();
+  }
+}
diff --git a/test/1971-multi-force-early-return/src/art/Breakpoint.java b/test/1971-multi-force-early-return/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/1971-multi-force-early-return/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/1971-multi-force-early-return/src/art/NonStandardExit.java b/test/1971-multi-force-early-return/src/art/NonStandardExit.java
new file mode 120000
index 0000000..d542a3c
--- /dev/null
+++ b/test/1971-multi-force-early-return/src/art/NonStandardExit.java
@@ -0,0 +1 @@
+../../../jvmti-common/NonStandardExit.java
\ No newline at end of file
diff --git a/test/1971-multi-force-early-return/src/art/StackTrace.java b/test/1971-multi-force-early-return/src/art/StackTrace.java
new file mode 120000
index 0000000..e1a08aa
--- /dev/null
+++ b/test/1971-multi-force-early-return/src/art/StackTrace.java
@@ -0,0 +1 @@
+../../../jvmti-common/StackTrace.java
\ No newline at end of file
diff --git a/test/1971-multi-force-early-return/src/art/SuspendEvents.java b/test/1971-multi-force-early-return/src/art/SuspendEvents.java
new file mode 120000
index 0000000..f7a5f7e
--- /dev/null
+++ b/test/1971-multi-force-early-return/src/art/SuspendEvents.java
@@ -0,0 +1 @@
+../../../jvmti-common/SuspendEvents.java
\ No newline at end of file
diff --git a/test/1971-multi-force-early-return/src/art/Suspension.java b/test/1971-multi-force-early-return/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/1971-multi-force-early-return/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/1971-multi-force-early-return/src/art/Test1971.java b/test/1971-multi-force-early-return/src/art/Test1971.java
new file mode 100644
index 0000000..0efbf9d
--- /dev/null
+++ b/test/1971-multi-force-early-return/src/art/Test1971.java
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
+ * in compliance with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software distributed under the License
+ * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
+ * or implied. See the License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package art;
+
+import java.io.*;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.util.concurrent.CountDownLatch;
+import java.util.function.Consumer;
+import java.util.function.IntConsumer;
+import java.util.function.Supplier;
+
+public class Test1971 {
+  public static final boolean PRINT_STACK_TRACE = false;
+  public static final int NUM_THREADS = 3;
+
+  public static class ReturnValue {
+    public final Thread target;
+    public final Thread creator;
+    public final Thread.State state;
+    public final StackTraceElement stack[];
+
+    public ReturnValue(Thread thr) {
+      target = thr;
+      creator = Thread.currentThread();
+      state = thr.getState();
+      stack = thr.getStackTrace();
+    }
+
+    public String toString() {
+      String stackTrace =
+          PRINT_STACK_TRACE
+              ?  ",\n\tstate: "
+                  + state
+                  + ",\n\tstack:\n"
+                  + safeDumpStackTrace(stack, "\t\t")
+                  + ",\n\t"
+              : "";
+      return this.getClass().getName()
+                  + " { thread: " + target.getName()
+                  + ", creator: " + creator.getName()
+                  + stackTrace + " }";
+    }
+  }
+
+  private static String safeDumpStackTrace(StackTraceElement st[], String prefix) {
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintStream os = new PrintStream(baos);
+    for (StackTraceElement e : st) {
+      os.println(
+          prefix
+              + e.getClassName()
+              + "."
+              + e.getMethodName()
+              + "("
+              + (e.isNativeMethod() ? "Native Method" : e.getFileName())
+              + ")");
+      if (e.getClassName().equals("art.Test1971") && e.getMethodName().equals("runTests")) {
+        os.println(prefix + "<Additional frames hidden>");
+        break;
+      }
+    }
+    os.flush();
+    return baos.toString();
+  }
+
+  public static final class ForcedExit extends ReturnValue {
+    public ForcedExit(Thread thr) {
+      super(thr);
+    }
+  }
+
+  public static final class NormalExit extends ReturnValue {
+    public NormalExit() {
+      super(Thread.currentThread());
+    }
+  }
+
+  public static void runTest(Consumer<String> con) {
+    String thread_name = Thread.currentThread().getName();
+    con.accept("Thread: " + thread_name + " method returned: " + targetMethod());
+  }
+  public static Object targetMethod() {
+    // Set a breakpoint here and perform a force-early-return
+    return new NormalExit();
+  }
+
+  public static void run() throws Exception {
+    SuspendEvents.setupTest();
+
+    final String[] results = new String[NUM_THREADS];
+    final Thread[] targets = new Thread[NUM_THREADS];
+    final CountDownLatch cdl = new CountDownLatch(1);
+    final CountDownLatch startup = new CountDownLatch(NUM_THREADS);
+    for (int i = 0; i < NUM_THREADS; i++) {
+      final int idx = i;
+      targets[i] = new Thread(() -> {
+        try {
+          startup.countDown();
+          cdl.await();
+          runTest((s) -> {
+            synchronized(results) {
+              results[idx] = s;
+            }
+          });
+        } catch (Exception e) {
+          throw new Error("Failed to run test!", e);
+        }
+      }, "Test1971 - Thread " + i);
+      targets[i].start();
+    }
+    // Wait for the targets to start.
+    startup.await();
+    final Method targetMethod = Test1971.class.getDeclaredMethod("targetMethod");
+    final long targetLoc = 0;
+    // Setup breakpoints on all targets.
+    for (Thread thr : targets) {
+      try {
+        SuspendEvents.setupSuspendBreakpointFor(targetMethod, targetLoc, thr);
+      } catch (RuntimeException e) {
+        if (e.getMessage().equals("JVMTI_ERROR_DUPLICATE")) {
+          continue;
+        } else {
+          throw e;
+        }
+      }
+    }
+    // Allow tests to continue.
+    cdl.countDown();
+    // Wait for breakpoint to be hit on all threads.
+    for (Thread thr : targets) {
+      SuspendEvents.waitForSuspendHit(thr);
+    }
+    final CountDownLatch force_return_start = new CountDownLatch(NUM_THREADS);
+    final CountDownLatch force_return_latch = new CountDownLatch(1);
+    Thread[] returners = new Thread[NUM_THREADS];
+    for (int i = 0; i < NUM_THREADS; i++) {
+      final int idx = i;
+      final Thread target = targets[i];
+      returners[i] = new Thread(() -> {
+        try {
+          force_return_start.countDown();
+          force_return_latch.await();
+          if (idx % 5 != 0) {
+            NonStandardExit.forceEarlyReturn(target, new ForcedExit(target));
+          }
+          Suspension.resume(target);
+        } catch (Exception e) {
+          throw new Error("Failed to resume!", e);
+        }
+      }, "Concurrent thread force-returner - " + i);
+      returners[i].start();
+    }
+    // Force-early-return and resume on all threads simultaneously.
+    force_return_start.await();
+    force_return_latch.countDown();
+
+    // Wait for all threads to finish.
+    for (int i = 0; i < NUM_THREADS; i++) {
+      returners[i].join();
+      targets[i].join();
+    }
+
+    // Print results
+    for (int i = 0; i < NUM_THREADS; i++) {
+      System.out.println("Thread " + i + ": " + results[i]);
+    }
+  }
+}
diff --git a/test/1972-jni-id-swap-indices/expected.txt b/test/1972-jni-id-swap-indices/expected.txt
new file mode 100644
index 0000000..a22979f
--- /dev/null
+++ b/test/1972-jni-id-swap-indices/expected.txt
@@ -0,0 +1,7 @@
+JNI_OnLoad called
+JNI Type is: SwapablePointer
+pointer ID looks like a pointer!
+JNI Type is: Indices
+index ID looks like an index!
+pointer ID remains a pointer!
+index WKC ID looks like an index!
diff --git a/test/1972-jni-id-swap-indices/info.txt b/test/1972-jni-id-swap-indices/info.txt
new file mode 100644
index 0000000..8b9c215
--- /dev/null
+++ b/test/1972-jni-id-swap-indices/info.txt
@@ -0,0 +1,3 @@
+Tests changing from SwapablePointer to indices for JniIdType
+
+Also tests that WellKnownClasses jmethodIDs are indices after swap.
\ No newline at end of file
diff --git a/test/1972-jni-id-swap-indices/jni_id.cc b/test/1972-jni-id-swap-indices/jni_id.cc
new file mode 100644
index 0000000..7de7131
--- /dev/null
+++ b/test/1972-jni-id-swap-indices/jni_id.cc
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <jni.h>
+#include <sstream>
+#include <stdio.h>
+
+#include <android-base/logging.h>
+#include <android-base/macros.h>
+
+#include "jni/java_vm_ext.h"
+#include "runtime.h"
+
+namespace art {
+
+extern "C" JNIEXPORT jlong JNICALL Java_Main_GetMethodId(JNIEnv* env,
+                                                         jclass k ATTRIBUTE_UNUSED,
+                                                         bool is_static,
+                                                         jclass target,
+                                                         jstring name,
+                                                         jstring sig) {
+  auto get_id = is_static ? env->functions->GetStaticMethodID : env->functions->GetMethodID;
+  jboolean cpy;
+  const char* cname = env->GetStringUTFChars(name, &cpy);
+  const char* csig = env->GetStringUTFChars(sig, &cpy);
+  jlong res = static_cast<jlong>(reinterpret_cast<intptr_t>(get_id(env, target, cname, csig)));
+  env->ReleaseStringUTFChars(name, cname);
+  env->ReleaseStringUTFChars(sig, csig);
+  return res;
+}
+
+extern "C" JNIEXPORT jobject JNICALL Java_Main_GetJniType(JNIEnv* env, jclass k ATTRIBUTE_UNUSED) {
+  std::ostringstream oss;
+  oss << Runtime::Current()->GetJniIdType();
+  return env->NewStringUTF(oss.str().c_str());
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_SetToPointerIds(JNIEnv* env ATTRIBUTE_UNUSED,
+                                                            jclass k ATTRIBUTE_UNUSED) {
+  Runtime::Current()->SetJniIdType(JniIdType::kPointer);
+}
+extern "C" JNIEXPORT void JNICALL Java_Main_SetToIndexIds(JNIEnv* env ATTRIBUTE_UNUSED,
+                                                          jclass k ATTRIBUTE_UNUSED) {
+  Runtime::Current()->SetJniIdType(JniIdType::kIndices);
+}
+
+}  // namespace art
diff --git a/test/1972-jni-id-swap-indices/run b/test/1972-jni-id-swap-indices/run
new file mode 100755
index 0000000..999b92a
--- /dev/null
+++ b/test/1972-jni-id-swap-indices/run
@@ -0,0 +1,19 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+args=$(echo "$@" | sed  's/--runtime-option -Xopaque-jni-ids\:true//g')
+
+./default-run $args --android-runtime-option -Xopaque-jni-ids:swapable --android-runtime-option -Xauto-promote-opaque-jni-ids:false
\ No newline at end of file
diff --git a/test/1972-jni-id-swap-indices/src/Main.java b/test/1972-jni-id-swap-indices/src/Main.java
new file mode 100644
index 0000000..eaf4126
--- /dev/null
+++ b/test/1972-jni-id-swap-indices/src/Main.java
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.function.Consumer;
+
+public class Main {
+  public static final boolean PRINT = false;
+
+  public static class PtrCls {
+    public static void doNothingPtr() {}
+  }
+
+  public static class IdxCls {
+    public static void doNothingIdx() {}
+  }
+
+  public static void DbgPrint(String str) {
+    if (PRINT) {
+      System.out.println(str);
+    }
+  }
+
+  public static long GetId(Class<?> k, String name) {
+    return GetMethodId(true, k, name, "()V");
+  }
+
+  public static void main(String[] args) {
+    System.loadLibrary(args[0]);
+    System.out.println("JNI Type is: " + GetJniType());
+    long expect_ptr_id = GetId(PtrCls.class, "doNothingPtr");
+    DbgPrint(String.format("expected_ptr_id is 0x%x", expect_ptr_id));
+    if (expect_ptr_id % 4 != 0) {
+      throw new Error("ID " + expect_ptr_id + " is not aligned!");
+    } else {
+      System.out.println("pointer ID looks like a pointer!");
+    }
+    SetToIndexIds();
+    System.out.println("JNI Type is: " + GetJniType());
+    long expect_idx_id = GetId(IdxCls.class, "doNothingIdx");
+    DbgPrint(String.format("expected_idx_id is 0x%x", expect_idx_id));
+    if (expect_idx_id % 2 != 1) {
+      throw new Error("ID " + expect_ptr_id + " is not odd!");
+    } else {
+      System.out.println("index ID looks like an index!");
+    }
+    long again_ptr_id = GetId(PtrCls.class, "doNothingPtr");
+    if (expect_ptr_id != again_ptr_id) {
+      throw new Error(
+          "Got different id values for same method. " + expect_ptr_id + " vs " + again_ptr_id);
+    } else {
+      System.out.println("pointer ID remains a pointer!");
+    }
+    long well_known_id = GetMethodId(false, Consumer.class, "accept", "(Ljava/lang/Object;)V");
+    DbgPrint(String.format("well_known_id is 0x%x", well_known_id));
+    if (well_known_id % 2 != 1) {
+      throw new Error("WKC ID " + well_known_id + " is not odd!");
+    } else {
+      System.out.println("index WKC ID looks like an index!");
+    }
+  }
+
+  private static native String GetJniType();
+  private static native void SetToIndexIds();
+  private static native long GetMethodId(boolean is_static, Class k, String name, String sig);
+}
diff --git a/test/1973-jni-id-swap-pointer/expected.txt b/test/1973-jni-id-swap-pointer/expected.txt
new file mode 100644
index 0000000..5ea7858
--- /dev/null
+++ b/test/1973-jni-id-swap-pointer/expected.txt
@@ -0,0 +1,6 @@
+JNI_OnLoad called
+JNI Type is: SwapablePointer
+pointer ID looks like a pointer!
+JNI Type is: Pointer
+pointer2 ID looks like a pointer!
+pointer ID remains a pointer!
diff --git a/test/1973-jni-id-swap-pointer/info.txt b/test/1973-jni-id-swap-pointer/info.txt
new file mode 100644
index 0000000..42f7ef2
--- /dev/null
+++ b/test/1973-jni-id-swap-pointer/info.txt
@@ -0,0 +1 @@
+Tests changing from SwapablePointer to indices for JniIdType
\ No newline at end of file
diff --git a/test/1973-jni-id-swap-pointer/run b/test/1973-jni-id-swap-pointer/run
new file mode 100755
index 0000000..999b92a
--- /dev/null
+++ b/test/1973-jni-id-swap-pointer/run
@@ -0,0 +1,19 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+args=$(echo "$@" | sed  's/--runtime-option -Xopaque-jni-ids\:true//g')
+
+./default-run $args --android-runtime-option -Xopaque-jni-ids:swapable --android-runtime-option -Xauto-promote-opaque-jni-ids:false
\ No newline at end of file
diff --git a/test/1973-jni-id-swap-pointer/src/Main.java b/test/1973-jni-id-swap-pointer/src/Main.java
new file mode 100644
index 0000000..654cfec
--- /dev/null
+++ b/test/1973-jni-id-swap-pointer/src/Main.java
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static final boolean PRINT = false;
+
+  public static class PtrCls {
+    public static void doNothingPtr() {}
+  }
+
+  public static class IdxCls {
+    public static void doNothingIdx() {}
+  }
+
+  public static void DbgPrint(String str) {
+    if (PRINT) {
+      System.out.println(str);
+    }
+  }
+
+  public static long GetId(Class<?> c, String name) {
+    return GetMethodId(true, c, name, "()V");
+  }
+
+  public static void main(String[] args) {
+    System.loadLibrary(args[0]);
+    System.out.println("JNI Type is: " + GetJniType());
+    long expect_ptr_id = GetId(PtrCls.class, "doNothingPtr");
+    DbgPrint(String.format("expected_ptr_id is 0x%x", expect_ptr_id));
+    if (expect_ptr_id % 4 != 0) {
+      throw new Error("ID " + expect_ptr_id + " is not aligned!");
+    } else {
+      System.out.println("pointer ID looks like a pointer!");
+    }
+    SetToPointerIds();
+    System.out.println("JNI Type is: " + GetJniType());
+    long expect_ptr_id2 = GetId(IdxCls.class, "doNothingIdx");
+    DbgPrint(String.format("expected_ptr_id2 is 0x%x", expect_ptr_id2));
+    if (expect_ptr_id2 % 4 != 0) {
+      throw new Error("ID " + expect_ptr_id + " is not aligned!");
+    } else {
+      System.out.println("pointer2 ID looks like a pointer!");
+    }
+    long again_ptr_id = GetId(PtrCls.class, "doNothingPtr");
+    if (expect_ptr_id != again_ptr_id) {
+      throw new Error(
+          "Got different id values for same method. " + expect_ptr_id + " vs " + again_ptr_id);
+    } else {
+      System.out.println("pointer ID remains a pointer!");
+    }
+  }
+
+  private static native String GetJniType();
+  private static native void SetToPointerIds();
+  private static native long GetMethodId(boolean is_static, Class k, String name, String sig);
+}
diff --git a/test/1974-resize-array/expected.txt b/test/1974-resize-array/expected.txt
new file mode 100644
index 0000000..997b22c
--- /dev/null
+++ b/test/1974-resize-array/expected.txt
@@ -0,0 +1,84 @@
+Test instance
+val is: [1, 2, 3] resize +3
+val is: [1, 2, 3, 0, 0, 0, 0, 0]
+Same value? true
+
+Test HashMap
+val is: [1, 2, 3, 4] resize +3
+Map is: ([1, 2, 3, 4]->Other Value), ([1, 2, 3, 4]->THE VALUE), ([1, 4]->Third value), 
+val is: [1, 2, 3, 4, 0, 0, 0]
+Map is: ([1, 2, 3, 4]->Other Value), ([1, 2, 3, 4, 0, 0, 0]->THE VALUE), ([1, 4]->Third value), 
+
+Test j.l.r.WeakReference
+val is: [weak, ref] resize +3
+val is: [weak, ref, null, null, null, null, null]
+Same value? true
+
+Test instance self-ref
+val is: [<SELF REF>, A, B, C] resize +5 item 0 is [<SELF REF>, A, B, C]
+val is: [<SELF REF>, A, B, C, null, null, null, null, null]
+val is: [<SELF REF>, A, B, C, null, null, null, null, null]
+Same value? true
+Same structure? true
+Same inner-structure? true
+
+Test instance self-ref smaller
+val is: [<SELF REF>, A, B, C, null, null, null, null, null] resize -7 item 0 is [<SELF REF>, A, B, C, null, null, null, null, null]
+val is: [<SELF REF>, A]
+val is: [<SELF REF>, A]
+Same value? true
+Same structure? true
+Same inner-structure? true
+
+Test local
+val is: [2, 3, 4] resize +5
+val is: [2, 3, 4, 0, 0, 0, 0, 0]
+Same value? true
+
+Test local smaller
+val is: [1, 2, 3, 4, 5] resize -2
+val is: [1, 2, 3]
+Same value? true
+
+T1: Test local multi-thread
+T1: val is: [1, 2, 3] resize -2
+T1: val is: [1]
+T1: Same value? true
+T2: Test local multi-thread
+T2: val is: [1, 2, 3] resize -2
+T2: val is: [1]
+T2: Same value? true
+
+Test locks
+val is: [A, 2, C] resize -2
+val is: [A]
+Same value? true
+Locks seem to all work.
+
+Test jni-ref
+val is: [1, 11, 111] resize +5
+val is: [1, 11, 111, null, null, null, null, null]
+Same value? true
+
+Test weak jni-ref
+val is: [2, 22, 222] resize +5
+val is: [2, 22, 222, null, null, null, null, null]
+Same value? true
+
+Test jni local ref
+val is: [3, 32, 322]
+Resize +4
+val is: [3, 32, 322, null, null, null, null]
+Same value? true
+
+Test jvmti-tags
+val is: [[3, 33, 333]] resize +5
+val is: [[3, 33, 333, null, null, null, null, null]]
+Same value? true
+
+Test jvmti-tags with obsolete
+val is: [[4, 44, 444]] resize +5
+val is: [[4, 44, 444, null, null, null, null, null]]
+Same value? true
+Everything looks good WRT obsolete object!
+
diff --git a/test/1974-resize-array/info.txt b/test/1974-resize-array/info.txt
new file mode 100644
index 0000000..ef4fa40
--- /dev/null
+++ b/test/1974-resize-array/info.txt
@@ -0,0 +1,3 @@
+Test for change_array_size extension function.
+
+Tests that we are able to use the extension function to change the length of arrays.
diff --git a/test/1974-resize-array/resize_array.cc b/test/1974-resize-array/resize_array.cc
new file mode 100644
index 0000000..60037b8
--- /dev/null
+++ b/test/1974-resize-array/resize_array.cc
@@ -0,0 +1,268 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstdio>
+#include <memory>
+#include <mutex>
+#include <string>
+#include <vector>
+
+#include "android-base/logging.h"
+#include "android-base/macros.h"
+#include "android-base/stringprintf.h"
+
+#include "jni.h"
+#include "jvmti.h"
+#include "scoped_local_ref.h"
+#include "scoped_utf_chars.h"
+
+// Test infrastructure
+#include "jni_helper.h"
+#include "jvmti_helper.h"
+#include "test_env.h"
+#include "ti_macros.h"
+
+namespace art {
+namespace Test1974ResizeArray {
+
+using ChangeArraySize = jvmtiError (*)(jvmtiEnv* env, jobject arr, jint size);
+
+template <typename T> static void Dealloc(T* t) {
+  jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(t));
+}
+
+template <typename T, typename... Rest> static void Dealloc(T* t, Rest... rs) {
+  Dealloc(t);
+  Dealloc(rs...);
+}
+
+static void DeallocParams(jvmtiParamInfo* params, jint n_params) {
+  for (jint i = 0; i < n_params; i++) {
+    Dealloc(params[i].name);
+  }
+}
+
+static jint FindExtensionEvent(JNIEnv* env, const std::string& name) {
+  jint n_ext;
+  jvmtiExtensionEventInfo* infos;
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->GetExtensionEvents(&n_ext, &infos))) {
+    return -1;
+  }
+  jint res = -1;
+  bool found = false;
+  for (jint i = 0; i < n_ext; i++) {
+    jvmtiExtensionEventInfo* cur_info = &infos[i];
+    if (strcmp(name.c_str(), cur_info->id) == 0) {
+      res = cur_info->extension_event_index;
+      found = true;
+    }
+    // Cleanup the cur_info
+    DeallocParams(cur_info->params, cur_info->param_count);
+    Dealloc(cur_info->id, cur_info->short_description, cur_info->params);
+  }
+  // Cleanup the array.
+  Dealloc(infos);
+  if (!found) {
+    ScopedLocalRef<jclass> rt_exception(env, env->FindClass("java/lang/RuntimeException"));
+    env->ThrowNew(rt_exception.get(), (name + " extensions not found").c_str());
+    return -1;
+  }
+  return res;
+}
+
+static jvmtiExtensionFunction FindExtensionMethod(JNIEnv* env, const std::string& name) {
+  jint n_ext;
+  jvmtiExtensionFunctionInfo* infos;
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->GetExtensionFunctions(&n_ext, &infos))) {
+    return nullptr;
+  }
+  jvmtiExtensionFunction res = nullptr;
+  for (jint i = 0; i < n_ext; i++) {
+    jvmtiExtensionFunctionInfo* cur_info = &infos[i];
+    if (strcmp(name.c_str(), cur_info->id) == 0) {
+      res = cur_info->func;
+    }
+    // Cleanup the cur_info
+    DeallocParams(cur_info->params, cur_info->param_count);
+    Dealloc(cur_info->id, cur_info->short_description, cur_info->params, cur_info->errors);
+  }
+  // Cleanup the array.
+  Dealloc(infos);
+  if (res == nullptr) {
+    ScopedLocalRef<jclass> rt_exception(env, env->FindClass("java/lang/RuntimeException"));
+    env->ThrowNew(rt_exception.get(), (name + " extensions not found").c_str());
+    return nullptr;
+  }
+  return res;
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Test1974_ResizeArray(JNIEnv* env,
+                                                                jclass klass ATTRIBUTE_UNUSED,
+                                                                jobject ref_gen,
+                                                                jint new_size) {
+  ChangeArraySize change_array_size = reinterpret_cast<ChangeArraySize>(
+      FindExtensionMethod(env, "com.android.art.heap.change_array_size"));
+  if (change_array_size == nullptr) {
+    return;
+  }
+  jmethodID getArr = env->GetMethodID(
+      env->FindClass("java/util/function/Supplier"), "get", "()Ljava/lang/Object;");
+  jobject arr = env->CallObjectMethod(ref_gen, getArr);
+  JvmtiErrorToException(env, jvmti_env, change_array_size(jvmti_env, arr, new_size));
+}
+
+extern "C" JNIEXPORT jobject JNICALL Java_art_Test1974_ReadJniRef(JNIEnv* env,
+                                                                  jclass klass ATTRIBUTE_UNUSED,
+                                                                  jlong r) {
+  return env->NewLocalRef(reinterpret_cast<jobject>(static_cast<intptr_t>(r)));
+}
+
+extern "C" JNIEXPORT jlong JNICALL
+Java_art_Test1974_GetWeakGlobalJniRef(JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject r) {
+  return static_cast<jlong>(reinterpret_cast<intptr_t>(env->NewWeakGlobalRef(r)));
+}
+
+extern "C" JNIEXPORT jlong JNICALL Java_art_Test1974_GetGlobalJniRef(JNIEnv* env,
+                                                                     jclass klass ATTRIBUTE_UNUSED,
+                                                                     jobject r) {
+  return static_cast<jlong>(reinterpret_cast<intptr_t>(env->NewGlobalRef(r)));
+}
+
+extern "C" JNIEXPORT jobjectArray JNICALL
+Java_art_Test1974_GetObjectsWithTag(JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jlong tag) {
+  jsize cnt = 0;
+  jobject* res = nullptr;
+  if (JvmtiErrorToException(
+          env, jvmti_env, jvmti_env->GetObjectsWithTags(1, &tag, &cnt, &res, nullptr))) {
+    return nullptr;
+  }
+  jobjectArray ret = env->NewObjectArray(cnt, env->FindClass("java/lang/Object"), nullptr);
+  if (ret == nullptr) {
+    return nullptr;
+  }
+  for (jsize i = 0; i < cnt; i++) {
+    env->SetObjectArrayElement(ret, i, res[i]);
+  }
+  jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(res));
+  return ret;
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Test1974_runNativeTest(JNIEnv* env,
+                                                                  jclass klass ATTRIBUTE_UNUSED,
+                                                                  jobjectArray arr,
+                                                                  jobject resize,
+                                                                  jobject print,
+                                                                  jobject check) {
+  jmethodID run = env->GetMethodID(env->FindClass("java/lang/Runnable"), "run", "()V");
+  jmethodID accept = env->GetMethodID(
+      env->FindClass("java/util/function/Consumer"), "accept", "(Ljava/lang/Object;)V");
+  env->CallVoidMethod(print, accept, arr);
+  env->CallVoidMethod(resize, run);
+  env->CallVoidMethod(print, accept, arr);
+  env->CallVoidMethod(check, accept, arr);
+}
+
+struct JvmtiInfo {
+  std::mutex mu_;
+  std::vector<jlong> freed_tags_;
+};
+
+extern "C" JNIEXPORT void JNICALL Java_art_Test1974_StartCollectFrees(JNIEnv* env,
+                                                                      jclass k ATTRIBUTE_UNUSED) {
+  jvmtiEventCallbacks cb{
+    .ObjectFree =
+        [](jvmtiEnv* jvmti, jlong tag) {
+          JvmtiInfo* dat = nullptr;
+          CHECK_EQ(jvmti->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&dat)),
+                   JVMTI_ERROR_NONE);
+          std::lock_guard<std::mutex> mu(dat->mu_);
+          dat->freed_tags_.push_back(tag);
+        },
+  };
+  JvmtiInfo* info = new JvmtiInfo;
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->SetEnvironmentLocalStorage(info))) {
+    LOG(INFO) << "couldn't set env-local storage";
+    return;
+  }
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->SetEventCallbacks(&cb, sizeof(cb)))) {
+    LOG(INFO) << "couldn't set event callback";
+    return;
+  }
+  JvmtiErrorToException(
+      env,
+      jvmti_env,
+      jvmti_env->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_OBJECT_FREE, nullptr));
+}
+
+extern "C" JNIEXPORT void JNICALL
+Java_art_Test1974_StartAssignObsoleteIncrementedId(JNIEnv* env, jclass k ATTRIBUTE_UNUSED) {
+  jint id = FindExtensionEvent(env, "com.android.art.heap.obsolete_object_created");
+  if (env->ExceptionCheck()) {
+    LOG(INFO) << "Could not find extension event!";
+    return;
+  }
+  using ObsoleteEvent = void (*)(jvmtiEnv * env, jlong * obsolete, jlong * non_obsolete);
+  ObsoleteEvent oe = [](jvmtiEnv* env ATTRIBUTE_UNUSED, jlong* obsolete, jlong* non_obsolete) {
+    *non_obsolete = *obsolete;
+    *obsolete = *obsolete + 1;
+  };
+  JvmtiErrorToException(
+      env,
+      jvmti_env,
+      jvmti_env->SetExtensionEventCallback(id, reinterpret_cast<jvmtiExtensionEvent>(oe)));
+}
+
+extern "C" JNIEXPORT void JNICALL
+Java_art_Test1974_EndAssignObsoleteIncrementedId(JNIEnv* env, jclass k ATTRIBUTE_UNUSED) {
+  jint id = FindExtensionEvent(env, "com.android.art.heap.obsolete_object_created");
+  if (env->ExceptionCheck()) {
+    LOG(INFO) << "Could not find extension event!";
+    return;
+  }
+  JvmtiErrorToException(env, jvmti_env, jvmti_env->SetExtensionEventCallback(id, nullptr));
+}
+
+extern "C" JNIEXPORT jlongArray JNICALL
+Java_art_Test1974_CollectFreedTags(JNIEnv* env, jclass k ATTRIBUTE_UNUSED) {
+  if (JvmtiErrorToException(
+          env,
+          jvmti_env,
+          jvmti_env->SetEventNotificationMode(JVMTI_DISABLE, JVMTI_EVENT_OBJECT_FREE, nullptr))) {
+    return nullptr;
+  }
+  JvmtiInfo* info_p = nullptr;
+  if (JvmtiErrorToException(
+          env,
+          jvmti_env,
+          jvmti_env->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&info_p)))) {
+    return nullptr;
+  }
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->SetEnvironmentLocalStorage(nullptr))) {
+    return nullptr;
+  }
+  std::unique_ptr<JvmtiInfo> info(info_p);
+  ScopedLocalRef<jlongArray> arr(env, env->NewLongArray(info->freed_tags_.size()));
+  if (env->ExceptionCheck()) {
+    return nullptr;
+  }
+  env->SetLongArrayRegion(arr.get(), 0, info->freed_tags_.size(), info->freed_tags_.data());
+  if (env->ExceptionCheck()) {
+    return nullptr;
+  }
+  return arr.release();
+}
+}  // namespace Test1974ResizeArray
+}  // namespace art
diff --git a/test/1974-resize-array/run b/test/1974-resize-array/run
new file mode 100755
index 0000000..96646c8
--- /dev/null
+++ b/test/1974-resize-array/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+./default-run "$@" --jvmti
diff --git a/test/1974-resize-array/src/Main.java b/test/1974-resize-array/src/Main.java
new file mode 100644
index 0000000..3843973
--- /dev/null
+++ b/test/1974-resize-array/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1974.run();
+  }
+}
diff --git a/test/1974-resize-array/src/art/Main.java b/test/1974-resize-array/src/art/Main.java
new file mode 120000
index 0000000..84ae4ac
--- /dev/null
+++ b/test/1974-resize-array/src/art/Main.java
@@ -0,0 +1 @@
+../../../jvmti-common/Main.java
\ No newline at end of file
diff --git a/test/1974-resize-array/src/art/Test1974.java b/test/1974-resize-array/src/art/Test1974.java
new file mode 100644
index 0000000..e1a1861
--- /dev/null
+++ b/test/1974-resize-array/src/art/Test1974.java
@@ -0,0 +1,534 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.ref.WeakReference;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.concurrent.CountDownLatch;
+import java.util.function.Consumer;
+import java.util.function.Function;
+import java.util.function.Supplier;
+
+public class Test1974 {
+
+  public static final boolean DEBUG = false;
+
+  public static int[] static_field = new int[] {1, 2, 3};
+  public static Object[] static_field_ref = new String[] {"a", "b", "c"};
+
+  public static final class InstanceClass {
+    public int[] instance_field = new int[] {1, 2, 3};
+    public Object[] self_ref;
+
+    public InstanceClass() {
+      self_ref = new Object[] {null, "A", "B", "C"};
+      self_ref[0] = self_ref;
+    }
+  }
+
+  static InstanceClass theInstanceClass;
+  static InstanceClass theOtherInstanceClass;
+
+  static {
+    theInstanceClass = new InstanceClass();
+    theOtherInstanceClass = new InstanceClass();
+    theOtherInstanceClass.instance_field = theInstanceClass.instance_field;
+    theOtherInstanceClass.self_ref = theInstanceClass.self_ref;
+  }
+
+  public static void DbgPrintln(String s) {
+    if (DEBUG) {
+      System.out.println(s);
+    }
+  }
+
+  public interface ThrowRunnable extends Runnable {
+    public default void run() {
+      try {
+        throwRun();
+      } catch (Exception e) {
+        throw new Error("Exception in runner!", e);
+      }
+    }
+
+    public void throwRun() throws Exception;
+  }
+
+  public static void runAsThread(ThrowRunnable r) throws Exception {
+    Thread t = new Thread(r);
+    t.start();
+    t.join();
+    System.out.println("");
+  }
+
+  public static void runInstance() {
+    System.out.println("Test instance");
+    DbgPrintln("Pre hash: " + theInstanceClass.instance_field.hashCode());
+    System.out.println(
+        "val is: " + Arrays.toString(theInstanceClass.instance_field) + " resize +3");
+    ResizeArray(() -> theInstanceClass.instance_field, theInstanceClass.instance_field.length + 5);
+    System.out.println("val is: " + Arrays.toString(theInstanceClass.instance_field));
+    DbgPrintln("Post hash: " + theInstanceClass.instance_field.hashCode());
+    System.out.println(
+        "Same value? " + (theInstanceClass.instance_field == theOtherInstanceClass.instance_field));
+  }
+
+  public static void runHashMap() {
+    System.out.println("Test HashMap");
+    HashMap<byte[], Comparable> map = new HashMap();
+    Comparable the_value = "THE VALUE";
+    Supplier<byte[]> get_the_value =
+        () ->
+            map.entrySet().stream()
+                .filter((x) -> x.getValue().equals(the_value))
+                .findFirst()
+                .get()
+                .getKey();
+    map.put(new byte[] {1, 2, 3, 4}, the_value);
+    map.put(new byte[] {1, 2, 3, 4}, "Other Value");
+    map.put(new byte[] {1, 4}, "Third value");
+    System.out.println("val is: " + Arrays.toString(get_the_value.get()) + " resize +3");
+    System.out.print("Map is: ");
+    map.entrySet().stream()
+        .sorted((x, y) -> x.getValue().compareTo(y.getValue()))
+        .forEach(
+            (e) -> {
+              System.out.print("(" + Arrays.toString(e.getKey()) + "->" + e.getValue() + "), ");
+            });
+    System.out.println();
+    ResizeArray(get_the_value, 7);
+    System.out.println("val is: " + Arrays.toString(get_the_value.get()));
+    System.out.print("Map is: ");
+    map.entrySet().stream()
+        .sorted((x, y) -> x.getValue().compareTo(y.getValue()))
+        .forEach(
+            (e) -> {
+              System.out.print("(" + Arrays.toString(e.getKey()) + "->" + e.getValue() + "), ");
+            });
+    System.out.println();
+  }
+
+  public static void runWeakReference() {
+    System.out.println("Test j.l.r.WeakReference");
+    String[] arr = new String[] {"weak", "ref"};
+    WeakReference<String[]> wr = new WeakReference(arr);
+    DbgPrintln("Pre hash: " + wr.get().hashCode());
+    System.out.println("val is: " + Arrays.toString(wr.get()) + " resize +3");
+    ResizeArray(wr::get, wr.get().length + 5);
+    System.out.println("val is: " + Arrays.toString(wr.get()));
+    DbgPrintln("Post hash: " + wr.get().hashCode());
+    System.out.println("Same value? " + (wr.get() == arr));
+  }
+
+  public static void runInstanceSelfRef() {
+    System.out.println("Test instance self-ref");
+    DbgPrintln("Pre hash: " + Integer.toHexString(theInstanceClass.self_ref.hashCode()));
+    String pre_to_string = theInstanceClass.self_ref.toString();
+    System.out.println(
+        "val is: "
+            + Arrays.toString(theInstanceClass.self_ref).replace(pre_to_string, "<SELF REF>")
+            + " resize +5 item 0 is "
+            + Arrays.toString((Object[]) theInstanceClass.self_ref[0])
+                .replace(pre_to_string, "<SELF REF>"));
+    ResizeArray(() -> theInstanceClass.self_ref, theInstanceClass.self_ref.length + 5);
+    System.out.println(
+        "val is: "
+            + Arrays.toString(theInstanceClass.self_ref).replace(pre_to_string, "<SELF REF>"));
+    System.out.println(
+        "val is: "
+            + Arrays.toString((Object[]) theInstanceClass.self_ref[0])
+                .replace(pre_to_string, "<SELF REF>"));
+    DbgPrintln("Post hash: " + Integer.toHexString(theInstanceClass.self_ref.hashCode()));
+    System.out.println(
+        "Same value? " + (theInstanceClass.self_ref == theOtherInstanceClass.self_ref));
+    System.out.println(
+        "Same structure? " + (theInstanceClass.self_ref == theInstanceClass.self_ref[0]));
+    System.out.println(
+        "Same inner-structure? "
+            + (theInstanceClass.self_ref[0] == ((Object[]) theInstanceClass.self_ref[0])[0]));
+  }
+
+  public static void runInstanceSelfRefSmall() {
+    System.out.println("Test instance self-ref smaller");
+    DbgPrintln("Pre hash: " + Integer.toHexString(theInstanceClass.self_ref.hashCode()));
+    String pre_to_string = theInstanceClass.self_ref.toString();
+    System.out.println(
+        "val is: "
+            + Arrays.toString(theInstanceClass.self_ref).replace(pre_to_string, "<SELF REF>")
+            + " resize -7 item 0 is "
+            + Arrays.toString((Object[]) theInstanceClass.self_ref[0])
+                .replace(pre_to_string, "<SELF REF>"));
+    ResizeArray(() -> theInstanceClass.self_ref, theInstanceClass.self_ref.length - 7);
+    System.out.println(
+        "val is: "
+            + Arrays.toString(theInstanceClass.self_ref).replace(pre_to_string, "<SELF REF>"));
+    System.out.println(
+        "val is: "
+            + Arrays.toString((Object[]) theInstanceClass.self_ref[0])
+                .replace(pre_to_string, "<SELF REF>"));
+    DbgPrintln("Post hash: " + Integer.toHexString(theInstanceClass.self_ref.hashCode()));
+    System.out.println(
+        "Same value? " + (theInstanceClass.self_ref == theOtherInstanceClass.self_ref));
+    System.out.println(
+        "Same structure? " + (theInstanceClass.self_ref == theInstanceClass.self_ref[0]));
+    System.out.println(
+        "Same inner-structure? "
+            + (theInstanceClass.self_ref[0] == ((Object[]) theInstanceClass.self_ref[0])[0]));
+  }
+
+  public static void runLocal() throws Exception {
+    final int[] arr_loc = new int[] {2, 3, 4};
+    int[] arr_loc_2 = arr_loc;
+
+    System.out.println("Test local");
+    DbgPrintln("Pre hash: " + arr_loc.hashCode());
+    System.out.println("val is: " + Arrays.toString(arr_loc) + " resize +5");
+    ResizeArray(() -> arr_loc, arr_loc.length + 5);
+    System.out.println("val is: " + Arrays.toString(arr_loc));
+    DbgPrintln("Post hash: " + arr_loc.hashCode());
+    System.out.println("Same value? " + (arr_loc == arr_loc_2));
+  }
+
+  public static void runLocalSmall() throws Exception {
+    final int[] arr_loc = new int[] {1, 2, 3, 4, 5};
+    int[] arr_loc_2 = arr_loc;
+
+    System.out.println("Test local smaller");
+    DbgPrintln("Pre hash: " + arr_loc.hashCode());
+    System.out.println("val is: " + Arrays.toString(arr_loc) + " resize -2");
+    ResizeArray(() -> arr_loc, arr_loc.length - 2);
+    System.out.println("val is: " + Arrays.toString(arr_loc));
+    DbgPrintln("Post hash: " + arr_loc.hashCode());
+    System.out.println("Same value? " + (arr_loc == arr_loc_2));
+  }
+
+  public static void runMultiThreadLocal() throws Exception {
+    final CountDownLatch cdl = new CountDownLatch(1);
+    final CountDownLatch start_cdl = new CountDownLatch(2);
+    final Supplier<Object[]> getArr =
+        new Supplier<Object[]>() {
+          public final Object[] arr = new Object[] {"1", "2", "3"};
+
+          public Object[] get() {
+            return arr;
+          }
+        };
+    final ArrayList<String> msg1 = new ArrayList();
+    final ArrayList<String> msg2 = new ArrayList();
+    final Consumer<String> print1 =
+        (String s) -> {
+          msg1.add(s);
+        };
+    final Consumer<String> print2 =
+        (String s) -> {
+          msg2.add(s);
+        };
+    Function<Consumer<String>, Runnable> r =
+        (final Consumer<String> c) ->
+            () -> {
+              c.accept("Test local multi-thread");
+              Object[] arr_loc = getArr.get();
+              Object[] arr_loc_2 = getArr.get();
+
+              DbgPrintln("Pre hash: " + arr_loc.hashCode());
+              c.accept("val is: " + Arrays.toString(arr_loc) + " resize -2");
+
+              try {
+                start_cdl.countDown();
+                cdl.await();
+              } catch (Exception e) {
+                throw new Error("failed await", e);
+              }
+              c.accept("val is: " + Arrays.toString(arr_loc));
+              DbgPrintln("Post hash: " + arr_loc.hashCode());
+              c.accept("Same value? " + (arr_loc == arr_loc_2));
+            };
+    Thread t1 = new Thread(r.apply(print1));
+    Thread t2 = new Thread(r.apply(print2));
+    t1.start();
+    t2.start();
+    start_cdl.await();
+    ResizeArray(getArr, 1);
+    cdl.countDown();
+    t1.join();
+    t2.join();
+    for (String s : msg1) {
+      System.out.println("T1: " + s);
+    }
+    for (String s : msg2) {
+      System.out.println("T2: " + s);
+    }
+  }
+
+  public static void runWithLocks() throws Exception {
+    final CountDownLatch cdl = new CountDownLatch(1);
+    final CountDownLatch start_cdl = new CountDownLatch(2);
+    final CountDownLatch waiter_start_cdl = new CountDownLatch(1);
+    final Supplier<Object[]> getArr =
+        new Supplier<Object[]>() {
+          public final Object[] arr = new Object[] {"A", "2", "C"};
+
+          public Object[] get() {
+            return arr;
+          }
+        };
+    // basic order of operations noted above each line.
+    // Waiter runs to the 'wait' then t1 runs to the cdl.await, then current thread runs.
+    Runnable r =
+        () -> {
+          System.out.println("Test locks");
+          Object[] arr_loc = getArr.get();
+          Object[] arr_loc_2 = getArr.get();
+
+          DbgPrintln("Pre hash: " + arr_loc.hashCode());
+          System.out.println("val is: " + Arrays.toString(arr_loc) + " resize -2");
+
+          try {
+            // OP 1
+            waiter_start_cdl.await();
+            // OP 6
+            synchronized (arr_loc) {
+              // OP 7
+              synchronized (arr_loc_2) {
+                // OP 8
+                start_cdl.countDown();
+                // OP 9
+                cdl.await();
+                // OP 13
+              }
+            }
+          } catch (Exception e) {
+            throw new Error("failed await", e);
+          }
+          System.out.println("val is: " + Arrays.toString(arr_loc));
+          DbgPrintln("Post hash: " + arr_loc.hashCode());
+          System.out.println("Same value? " + (arr_loc == arr_loc_2));
+        };
+    Thread t1 = new Thread(r);
+    Thread waiter =
+        new Thread(
+            () -> {
+              try {
+                Object a = getArr.get();
+                // OP 2
+                synchronized (a) {
+                  // OP 3
+                  waiter_start_cdl.countDown();
+                  // OP 4
+                  start_cdl.countDown();
+                  // OP 5
+                  a.wait();
+                  // OP 15
+                }
+              } catch (Exception e) {
+                throw new Error("Failed wait!", e);
+              }
+            });
+    waiter.start();
+    t1.start();
+    // OP 10
+    start_cdl.await();
+    // OP 11
+    ResizeArray(getArr, 1);
+    // OP 12
+    cdl.countDown();
+    // OP 14
+    synchronized (getArr.get()) {
+      // Make sure thread wakes up and has the right lock.
+      getArr.get().notifyAll();
+    }
+    waiter.join();
+    t1.join();
+    // Make sure other threads can still lock it.
+    synchronized (getArr.get()) {
+    }
+    System.out.println("Locks seem to all work.");
+  }
+
+  public static void runWithJniGlobal() throws Exception {
+    Object[] arr = new Object[] {"1", "11", "111"};
+    final long globalID = GetGlobalJniRef(arr);
+    System.out.println("Test jni-ref");
+    DbgPrintln("Pre hash: " + ReadJniRef(globalID).hashCode());
+    System.out.println(
+        "val is: " + Arrays.toString((Object[]) ReadJniRef(globalID)) + " resize +5");
+    ResizeArray(() -> ReadJniRef(globalID), ((Object[]) ReadJniRef(globalID)).length + 5);
+    System.out.println("val is: " + Arrays.toString((Object[]) ReadJniRef(globalID)));
+    DbgPrintln("Post hash: " + ReadJniRef(globalID).hashCode());
+    System.out.println("Same value? " + (ReadJniRef(globalID) == arr));
+  }
+
+  public static void runWithJniWeakGlobal() throws Exception {
+    Object[] arr = new Object[] {"2", "22", "222"};
+    final long globalID = GetWeakGlobalJniRef(arr);
+    System.out.println("Test weak jni-ref");
+    DbgPrintln("Pre hash: " + ReadJniRef(globalID).hashCode());
+    System.out.println(
+        "val is: " + Arrays.toString((Object[]) ReadJniRef(globalID)) + " resize +5");
+    ResizeArray(() -> ReadJniRef(globalID), ((Object[]) ReadJniRef(globalID)).length + 5);
+    System.out.println("val is: " + Arrays.toString((Object[]) ReadJniRef(globalID)));
+    DbgPrintln("Post hash: " + ReadJniRef(globalID).hashCode());
+    System.out.println("Same value? " + (ReadJniRef(globalID) == arr));
+    if (ReadJniRef(globalID) != arr) {
+      throw new Error("Didn't update weak global!");
+    }
+  }
+
+  public static void runWithJniLocals() throws Exception {
+    final Object[] arr = new Object[] {"3", "32", "322"};
+    System.out.println("Test jni local ref");
+    Consumer<Object> checker = (o) -> System.out.println("Same value? " + (o == arr));
+    Consumer<Object> printer =
+        (o) -> System.out.println("val is: " + Arrays.toString((Object[]) o));
+    Runnable resize =
+        () -> {
+          System.out.println("Resize +4");
+          ResizeArray(() -> arr, arr.length + 4);
+        };
+    runNativeTest(arr, resize, printer, checker);
+  }
+
+  public static native void runNativeTest(
+      Object[] arr, Runnable resize, Consumer<Object> printer, Consumer<Object> checker);
+
+  public static void runWithJvmtiTags() throws Exception {
+    Object[] arr = new Object[] {"3", "33", "333"};
+    long globalID = 333_333_333l;
+    Main.setTag(arr, globalID);
+    System.out.println("Test jvmti-tags");
+    DbgPrintln("Pre hash: " + arr.hashCode());
+    System.out.println(
+        "val is: " + Arrays.deepToString(GetObjectsWithTag(globalID)) + " resize +5");
+    ResizeArray(() -> arr, arr.length + 5);
+    Object[] after_tagged_obj = GetObjectsWithTag(globalID);
+    System.out.println("val is: " + Arrays.deepToString(GetObjectsWithTag(globalID)));
+    DbgPrintln("Post hash: " + after_tagged_obj[0].hashCode());
+    System.out.println("Same value? " + (after_tagged_obj[0] == arr));
+  }
+
+  public static void runWithJvmtiTagsObsolete() throws Exception {
+    Object[] arr = new Object[] {"4", "44", "444"};
+    long globalID = 444_444_444l;
+    System.out.println("Test jvmti-tags with obsolete");
+    Main.setTag(arr, globalID);
+    StartCollectFrees();
+    StartAssignObsoleteIncrementedId();
+    DbgPrintln("Pre hash: " + arr.hashCode());
+    System.out.println(
+        "val is: " + Arrays.deepToString(GetObjectsWithTag(globalID)) + " resize +5");
+    ResizeArray(() -> arr, arr.length + 5);
+    Object[] after_tagged_obj = GetObjectsWithTag(globalID);
+    Object[] obsolete_tagged_obj = GetObjectsWithTag(globalID + 1);
+    System.out.println("val is: " + Arrays.deepToString(GetObjectsWithTag(globalID)));
+    EndAssignObsoleteIncrementedId();
+    long[] obsoletes_freed = CollectFreedTags();
+    DbgPrintln("Post hash: " + after_tagged_obj[0].hashCode());
+    System.out.println("Same value? " + (after_tagged_obj[0] == arr));
+    if (obsolete_tagged_obj.length >= 1) {
+      DbgPrintln("Found objects with obsolete tag: " + Arrays.deepToString(obsolete_tagged_obj));
+      boolean bad = false;
+      if (obsolete_tagged_obj.length != 1) {
+        System.out.println(
+            "Found obsolete tag'd objects: "
+                + Arrays.deepHashCode(obsolete_tagged_obj)
+                + " but only expected one!");
+        bad = true;
+      }
+      if (!Arrays.deepEquals(
+          Arrays.copyOf(arr, ((Object[]) obsolete_tagged_obj[0]).length),
+          (Object[]) obsolete_tagged_obj[0])) {
+        System.out.println("Obsolete array was unexpectedly different than non-obsolete one!");
+        bad = true;
+      }
+      if (!Arrays.stream(obsoletes_freed).anyMatch((l) -> l == globalID + 1)) {
+        DbgPrintln("Didn't see a free of the obsolete id");
+      }
+      if (!bad) {
+        System.out.println("Everything looks good WRT obsolete object!");
+      }
+    } else {
+      if (!Arrays.stream(obsoletes_freed).anyMatch((l) -> l == globalID + 1)) {
+        System.out.println("Didn't see a free of the obsolete id");
+      } else {
+        DbgPrintln("Saw a free of obsolete id!");
+        System.out.println("Everything looks good WRT obsolete object!");
+      }
+    }
+  }
+
+  public static void run() throws Exception {
+    // Simple
+    runAsThread(Test1974::runInstance);
+
+    // HashMap
+    runAsThread(Test1974::runHashMap);
+
+    // j.l.ref.WeakReference
+    runAsThread(Test1974::runWeakReference);
+
+    // Self-referential arrays.
+    runAsThread(Test1974::runInstanceSelfRef);
+    runAsThread(Test1974::runInstanceSelfRefSmall);
+
+    // Local variables simple
+    runAsThread(Test1974::runLocal);
+    runAsThread(Test1974::runLocalSmall);
+
+    // multiple threads local variables
+    runAsThread(Test1974::runMultiThreadLocal);
+
+    // using as monitors and waiting
+    runAsThread(Test1974::runWithLocks);
+
+    // Basic jni global refs
+    runAsThread(Test1974::runWithJniGlobal);
+
+    // Basic jni weak global refs
+    runAsThread(Test1974::runWithJniWeakGlobal);
+
+    // Basic JNI local refs
+    runAsThread(Test1974::runWithJniLocals);
+
+    // Basic jvmti tags
+    runAsThread(Test1974::runWithJvmtiTags);
+
+    // Grab obsolete reference using tags/detect free
+    runAsThread(Test1974::runWithJvmtiTagsObsolete);
+  }
+
+  // Use a supplier so that we don't have to have a local ref to the resized
+  // array if we don't want it
+  public static native <T> void ResizeArray(Supplier<T> arr, int new_size);
+
+  public static native <T> long GetGlobalJniRef(T t);
+
+  public static native <T> long GetWeakGlobalJniRef(T t);
+
+  public static native <T> T ReadJniRef(long t);
+
+  public static native Object[] GetObjectsWithTag(long tag);
+
+  public static native void StartCollectFrees();
+
+  public static native void StartAssignObsoleteIncrementedId();
+
+  public static native void EndAssignObsoleteIncrementedId();
+
+  public static native long[] CollectFreedTags();
+}
diff --git a/test/1975-hello-structural-transformation/expected.txt b/test/1975-hello-structural-transformation/expected.txt
new file mode 100644
index 0000000..07d3ac2
--- /dev/null
+++ b/test/1975-hello-structural-transformation/expected.txt
@@ -0,0 +1,98 @@
+Saving Field object (ID: 0) public static java.lang.Class art.Transform1975.CUR_CLASS for later
+Saving Field object (ID: 1) public static byte[] art.Transform1975.REDEFINED_DEX_BYTES for later
+Saving MethodHandle object (ID: 2) MethodHandle()Class for later
+Saving MethodHandle object (ID: 3) MethodHandle()byte[] for later
+Saving writable MethodHandle (ID: 4) MethodHandle(Class)void for later
+Reading fields before redefinition
+Reading with reflection.
+public static java.lang.Class art.Transform1975.CUR_CLASS = (ID: 5) class art.Transform1975
+public static byte[] art.Transform1975.REDEFINED_DEX_BYTES = (ID: 6) [100, 101, 120, 10, 48, 51, 53, 0, -51, 24, ...]
+Reading normally in same class.
+ORIGINAL VALUE CUR_CLASS: class art.Transform1975
+ORIGINAL VALUE REDEFINED_DEX_BYTES: ZGV4CjAzNQDNGFvYlmyIt+u4bnNv+OyNVekBxlrJi6EgBwAAcAAAAHhWNBIAAAAAAAAAAFwGAAAmAAAAcAAAAAwAAAAIAQAABwAAADgBAAAEAAAAjAEAAAwAAACsAQAAAQAAAAwCAAD0BAAALAIAAGYDAABrAwAAdQMAAH0DAACIAwAAmQMAAKsDAACuAwAAsgMAAMcDAADmAwAA/QMAABAEAAAjBAAANwQAAEsEAABmBAAAegQAAJYEAACqBAAAwQQAANkEAAD6BAAABgUAABsFAAAvBQAAMgUAADYFAAA6BQAAQgUAAE8FAABfBQAAawUAAHAFAAB5BQAAhQUAAI8FAACWBQAACAAAAAkAAAAKAAAACwAAAA0AAAAOAAAADwAAABAAAAARAAAAEgAAABkAAAAbAAAABgAAAAUAAAAAAAAABwAAAAUAAABQAwAABwAAAAYAAABYAwAABwAAAAYAAABgAwAABgAAAAgAAAAAAAAAGQAAAAoAAAAAAAAAGgAAAAoAAABgAwAAAAADAAMAAAAAAAUAFgAAAAAACwAXAAAABwACACAAAAAAAAUAAQAAAAAABQACAAAAAAAFAB0AAAAAAAUAIgAAAAIABgAhAAAABAAFAAIAAAAGAAUAAgAAAAYAAgAcAAAABgADABwAAAAGAAAAIwAAAAgAAQAeAAAACQAEAB8AAAAAAAAAAQAAAAQAAAAAAAAAGAAAAEQGAAAYBgAAAAAAAAAAAAAAAAAAMgMAAAEAAAAOAAAAAQABAAEAAAA2AwAABAAAAHAQBQAAAA4AAgAAAAIAAAA6AwAADAAAAGIAAwAaAQQAbiAEABAAGgAFAGkAAQAOAAQAAAACAAAAQAMAAFEAAABiAAMAYgEAACICBgBwEAYAAgAaAxMAbiAIADIAbiAHABIAbhAJAAIADAFuIAQAEABiAAMAcQALAAAADAFiAgIAbiAKACEADAEiAgYAcBAGAAIAGgMVAG4gCAAyAG4gCAASAG4QCQACAAwBbiAEABAAYgADAGIBAQAiAgYAcBAGAAIAGgMUAG4gCAAyAG4gCAASAG4QCQACAAwBbiAEABAADgAEAA4AAwAOAAkADnhLAA0ADgEYDwEgDwEYDwAAAAABAAAACwAAAAEAAAAEAAAAAQAAAAUAAyo+OwAIPGNsaW5pdD4ABjxpbml0PgAJQ1VSX0NMQVNTAA9Eb2luZyBzb21ldGhpbmcAEEkgZGlkIHNvbWV0aGluZyEAAUwAAkxMABNMYXJ0L1RyYW5zZm9ybTE5NzU7AB1MZGFsdmlrL2Fubm90YXRpb24vU2lnbmF0dXJlOwAVTGphdmEvaW8vUHJpbnRTdHJlYW07ABFMamF2YS9sYW5nL0NsYXNzOwARTGphdmEvbGFuZy9DbGFzczwAEkxqYXZhL2xhbmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7ABlMamF2YS9sYW5nL1N0cmluZ0J1aWxkZXI7ABJMamF2YS9sYW5nL1N5c3RlbTsAGkxqYXZhL3V0aWwvQmFzZTY0JEVuY29kZXI7ABJMamF2YS91dGlsL0Jhc2U2NDsAFU5FVyBWQUxVRSBDVVJfQ0xBU1M6IAAWTkVXIFZBTFVFIE5FV19TVFJJTkc6IAAfTkVXIFZBTFVFIFJFREVGSU5FRF9ERVhfQllURVM6IAAKTkVXX1NUUklORwATUkVERUZJTkVEX0RFWF9CWVRFUwASVHJhbnNmb3JtMTk3NS5qYXZhAAFWAAJWTAACW0IABmFwcGVuZAALZG9Tb21ldGhpbmcADmVuY29kZVRvU3RyaW5nAApnZXRFbmNvZGVyAANvdXQAB3ByaW50bG4ACnJlYWRGaWVsZHMACHRvU3RyaW5nAAV2YWx1ZQB2fn5EOHsiY29tcGlsYXRpb24tbW9kZSI6ImRlYnVnIiwibWluLWFwaSI6MSwic2hhLTEiOiJhODM1MmYyNTQ4ODUzNjJjY2Q4ZDkwOWQzNTI5YzYwMDk0ZGQ4OTZlIiwidmVyc2lvbiI6IjEuNi4yMC1kZXYifQACAQEkHAIXDBcAAwAEAAAJAQkBCQCIgASsBAGBgATABAEJ2AQBCYAFAAAAAAAAAQAAAA4GAAA4BgAAAQAAAAAAAAAAAAAAAAAAADwGAAAQAAAAAAAAAAEAAAAAAAAAAQAAACYAAABwAAAAAgAAAAwAAAAIAQAAAwAAAAcAAAA4AQAABAAAAAQAAACMAQAABQAAAAwAAACsAQAABgAAAAEAAAAMAgAAASAAAAQAAAAsAgAAAyAAAAQAAAAyAwAAARAAAAMAAABQAwAAAiAAACYAAABmAwAABCAAAAEAAAAOBgAAACAAAAEAAAAYBgAAAxAAAAIAAAA4BgAABiAAAAEAAABEBgAAABAAAAEAAABcBgAA
+Reading with native.
+Field public static java.lang.Class art.Transform1975.CUR_CLASS = (ID: 5) class art.Transform1975
+Field public static byte[] art.Transform1975.REDEFINED_DEX_BYTES = (ID: 6) [100, 101, 120, 10, 48, 51, 53, 0, -51, 24, ...]
+Reading normally in other class.
+Read CUR_CLASS field: (ID: 5) class art.Transform1975
+Read REDEFINED_DEX_BYTES field: (ID: 6) [100, 101, 120, 10, 48, 51, 53, 0, -51, 24, ...]
+Reading using method handles.
+(ID: 7) MethodHandle()Class (public static java.lang.Class art.Transform1975.CUR_CLASS) = (ID: 5) class art.Transform1975
+(ID: 8) MethodHandle()byte[] (public static byte[] art.Transform1975.REDEFINED_DEX_BYTES) = (ID: 6) [100, 101, 120, 10, 48, 51, 53, 0, -51, 24, ...]
+Doing modification maybe
+Not doing anything
+Reading with reflection after possible modification.
+public static java.lang.Class art.Transform1975.CUR_CLASS = (ID: 5) class art.Transform1975
+public static byte[] art.Transform1975.REDEFINED_DEX_BYTES = (ID: 6) [100, 101, 120, 10, 48, 51, 53, 0, -51, 24, ...]
+Reading normally in same class after possible modification.
+ORIGINAL VALUE CUR_CLASS: class art.Transform1975
+ORIGINAL VALUE REDEFINED_DEX_BYTES: ZGV4CjAzNQDNGFvYlmyIt+u4bnNv+OyNVekBxlrJi6EgBwAAcAAAAHhWNBIAAAAAAAAAAFwGAAAmAAAAcAAAAAwAAAAIAQAABwAAADgBAAAEAAAAjAEAAAwAAACsAQAAAQAAAAwCAAD0BAAALAIAAGYDAABrAwAAdQMAAH0DAACIAwAAmQMAAKsDAACuAwAAsgMAAMcDAADmAwAA/QMAABAEAAAjBAAANwQAAEsEAABmBAAAegQAAJYEAACqBAAAwQQAANkEAAD6BAAABgUAABsFAAAvBQAAMgUAADYFAAA6BQAAQgUAAE8FAABfBQAAawUAAHAFAAB5BQAAhQUAAI8FAACWBQAACAAAAAkAAAAKAAAACwAAAA0AAAAOAAAADwAAABAAAAARAAAAEgAAABkAAAAbAAAABgAAAAUAAAAAAAAABwAAAAUAAABQAwAABwAAAAYAAABYAwAABwAAAAYAAABgAwAABgAAAAgAAAAAAAAAGQAAAAoAAAAAAAAAGgAAAAoAAABgAwAAAAADAAMAAAAAAAUAFgAAAAAACwAXAAAABwACACAAAAAAAAUAAQAAAAAABQACAAAAAAAFAB0AAAAAAAUAIgAAAAIABgAhAAAABAAFAAIAAAAGAAUAAgAAAAYAAgAcAAAABgADABwAAAAGAAAAIwAAAAgAAQAeAAAACQAEAB8AAAAAAAAAAQAAAAQAAAAAAAAAGAAAAEQGAAAYBgAAAAAAAAAAAAAAAAAAMgMAAAEAAAAOAAAAAQABAAEAAAA2AwAABAAAAHAQBQAAAA4AAgAAAAIAAAA6AwAADAAAAGIAAwAaAQQAbiAEABAAGgAFAGkAAQAOAAQAAAACAAAAQAMAAFEAAABiAAMAYgEAACICBgBwEAYAAgAaAxMAbiAIADIAbiAHABIAbhAJAAIADAFuIAQAEABiAAMAcQALAAAADAFiAgIAbiAKACEADAEiAgYAcBAGAAIAGgMVAG4gCAAyAG4gCAASAG4QCQACAAwBbiAEABAAYgADAGIBAQAiAgYAcBAGAAIAGgMUAG4gCAAyAG4gCAASAG4QCQACAAwBbiAEABAADgAEAA4AAwAOAAkADnhLAA0ADgEYDwEgDwEYDwAAAAABAAAACwAAAAEAAAAEAAAAAQAAAAUAAyo+OwAIPGNsaW5pdD4ABjxpbml0PgAJQ1VSX0NMQVNTAA9Eb2luZyBzb21ldGhpbmcAEEkgZGlkIHNvbWV0aGluZyEAAUwAAkxMABNMYXJ0L1RyYW5zZm9ybTE5NzU7AB1MZGFsdmlrL2Fubm90YXRpb24vU2lnbmF0dXJlOwAVTGphdmEvaW8vUHJpbnRTdHJlYW07ABFMamF2YS9sYW5nL0NsYXNzOwARTGphdmEvbGFuZy9DbGFzczwAEkxqYXZhL2xhbmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7ABlMamF2YS9sYW5nL1N0cmluZ0J1aWxkZXI7ABJMamF2YS9sYW5nL1N5c3RlbTsAGkxqYXZhL3V0aWwvQmFzZTY0JEVuY29kZXI7ABJMamF2YS91dGlsL0Jhc2U2NDsAFU5FVyBWQUxVRSBDVVJfQ0xBU1M6IAAWTkVXIFZBTFVFIE5FV19TVFJJTkc6IAAfTkVXIFZBTFVFIFJFREVGSU5FRF9ERVhfQllURVM6IAAKTkVXX1NUUklORwATUkVERUZJTkVEX0RFWF9CWVRFUwASVHJhbnNmb3JtMTk3NS5qYXZhAAFWAAJWTAACW0IABmFwcGVuZAALZG9Tb21ldGhpbmcADmVuY29kZVRvU3RyaW5nAApnZXRFbmNvZGVyAANvdXQAB3ByaW50bG4ACnJlYWRGaWVsZHMACHRvU3RyaW5nAAV2YWx1ZQB2fn5EOHsiY29tcGlsYXRpb24tbW9kZSI6ImRlYnVnIiwibWluLWFwaSI6MSwic2hhLTEiOiJhODM1MmYyNTQ4ODUzNjJjY2Q4ZDkwOWQzNTI5YzYwMDk0ZGQ4OTZlIiwidmVyc2lvbiI6IjEuNi4yMC1kZXYifQACAQEkHAIXDBcAAwAEAAAJAQkBCQCIgASsBAGBgATABAEJ2AQBCYAFAAAAAAAAAQAAAA4GAAA4BgAAAQAAAAAAAAAAAAAAAAAAADwGAAAQAAAAAAAAAAEAAAAAAAAAAQAAACYAAABwAAAAAgAAAAwAAAAIAQAAAwAAAAcAAAA4AQAABAAAAAQAAACMAQAABQAAAAwAAACsAQAABgAAAAEAAAAMAgAAASAAAAQAAAAsAgAAAyAAAAQAAAAyAwAAARAAAAMAAABQAwAAAiAAACYAAABmAwAABCAAAAEAAAAOBgAAACAAAAEAAAAYBgAAAxAAAAIAAAA4BgAABiAAAAEAAABEBgAAABAAAAEAAABcBgAA
+Reading with native after possible modification.
+Field public static java.lang.Class art.Transform1975.CUR_CLASS = (ID: 5) class art.Transform1975
+Field public static byte[] art.Transform1975.REDEFINED_DEX_BYTES = (ID: 6) [100, 101, 120, 10, 48, 51, 53, 0, -51, 24, ...]
+Reading normally in other class after possible modification.
+Read CUR_CLASS field: (ID: 5) class art.Transform1975
+Read REDEFINED_DEX_BYTES field: (ID: 6) [100, 101, 120, 10, 48, 51, 53, 0, -51, 24, ...]
+Reading using method handles.
+(ID: 9) MethodHandle()Class (public static java.lang.Class art.Transform1975.CUR_CLASS) = (ID: 5) class art.Transform1975
+(ID: 10) MethodHandle()byte[] (public static byte[] art.Transform1975.REDEFINED_DEX_BYTES) = (ID: 6) [100, 101, 120, 10, 48, 51, 53, 0, -51, 24, ...]
+Reading fields after redefinition
+Reading with reflection.
+public static java.lang.Class art.Transform1975.CUR_CLASS = (ID: 5) class art.Transform1975
+public static java.lang.String art.Transform1975.NEW_STRING = (ID: 11) <NULL>
+public static byte[] art.Transform1975.REDEFINED_DEX_BYTES = (ID: 6) [100, 101, 120, 10, 48, 51, 53, 0, -51, 24, ...]
+Reading normally in same class.
+NEW VALUE CUR_CLASS: class art.Transform1975
+NEW VALUE REDEFINED_DEX_BYTES: ZGV4CjAzNQDNGFvYlmyIt+u4bnNv+OyNVekBxlrJi6EgBwAAcAAAAHhWNBIAAAAAAAAAAFwGAAAmAAAAcAAAAAwAAAAIAQAABwAAADgBAAAEAAAAjAEAAAwAAACsAQAAAQAAAAwCAAD0BAAALAIAAGYDAABrAwAAdQMAAH0DAACIAwAAmQMAAKsDAACuAwAAsgMAAMcDAADmAwAA/QMAABAEAAAjBAAANwQAAEsEAABmBAAAegQAAJYEAACqBAAAwQQAANkEAAD6BAAABgUAABsFAAAvBQAAMgUAADYFAAA6BQAAQgUAAE8FAABfBQAAawUAAHAFAAB5BQAAhQUAAI8FAACWBQAACAAAAAkAAAAKAAAACwAAAA0AAAAOAAAADwAAABAAAAARAAAAEgAAABkAAAAbAAAABgAAAAUAAAAAAAAABwAAAAUAAABQAwAABwAAAAYAAABYAwAABwAAAAYAAABgAwAABgAAAAgAAAAAAAAAGQAAAAoAAAAAAAAAGgAAAAoAAABgAwAAAAADAAMAAAAAAAUAFgAAAAAACwAXAAAABwACACAAAAAAAAUAAQAAAAAABQACAAAAAAAFAB0AAAAAAAUAIgAAAAIABgAhAAAABAAFAAIAAAAGAAUAAgAAAAYAAgAcAAAABgADABwAAAAGAAAAIwAAAAgAAQAeAAAACQAEAB8AAAAAAAAAAQAAAAQAAAAAAAAAGAAAAEQGAAAYBgAAAAAAAAAAAAAAAAAAMgMAAAEAAAAOAAAAAQABAAEAAAA2AwAABAAAAHAQBQAAAA4AAgAAAAIAAAA6AwAADAAAAGIAAwAaAQQAbiAEABAAGgAFAGkAAQAOAAQAAAACAAAAQAMAAFEAAABiAAMAYgEAACICBgBwEAYAAgAaAxMAbiAIADIAbiAHABIAbhAJAAIADAFuIAQAEABiAAMAcQALAAAADAFiAgIAbiAKACEADAEiAgYAcBAGAAIAGgMVAG4gCAAyAG4gCAASAG4QCQACAAwBbiAEABAAYgADAGIBAQAiAgYAcBAGAAIAGgMUAG4gCAAyAG4gCAASAG4QCQACAAwBbiAEABAADgAEAA4AAwAOAAkADnhLAA0ADgEYDwEgDwEYDwAAAAABAAAACwAAAAEAAAAEAAAAAQAAAAUAAyo+OwAIPGNsaW5pdD4ABjxpbml0PgAJQ1VSX0NMQVNTAA9Eb2luZyBzb21ldGhpbmcAEEkgZGlkIHNvbWV0aGluZyEAAUwAAkxMABNMYXJ0L1RyYW5zZm9ybTE5NzU7AB1MZGFsdmlrL2Fubm90YXRpb24vU2lnbmF0dXJlOwAVTGphdmEvaW8vUHJpbnRTdHJlYW07ABFMamF2YS9sYW5nL0NsYXNzOwARTGphdmEvbGFuZy9DbGFzczwAEkxqYXZhL2xhbmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7ABlMamF2YS9sYW5nL1N0cmluZ0J1aWxkZXI7ABJMamF2YS9sYW5nL1N5c3RlbTsAGkxqYXZhL3V0aWwvQmFzZTY0JEVuY29kZXI7ABJMamF2YS91dGlsL0Jhc2U2NDsAFU5FVyBWQUxVRSBDVVJfQ0xBU1M6IAAWTkVXIFZBTFVFIE5FV19TVFJJTkc6IAAfTkVXIFZBTFVFIFJFREVGSU5FRF9ERVhfQllURVM6IAAKTkVXX1NUUklORwATUkVERUZJTkVEX0RFWF9CWVRFUwASVHJhbnNmb3JtMTk3NS5qYXZhAAFWAAJWTAACW0IABmFwcGVuZAALZG9Tb21ldGhpbmcADmVuY29kZVRvU3RyaW5nAApnZXRFbmNvZGVyAANvdXQAB3ByaW50bG4ACnJlYWRGaWVsZHMACHRvU3RyaW5nAAV2YWx1ZQB2fn5EOHsiY29tcGlsYXRpb24tbW9kZSI6ImRlYnVnIiwibWluLWFwaSI6MSwic2hhLTEiOiJhODM1MmYyNTQ4ODUzNjJjY2Q4ZDkwOWQzNTI5YzYwMDk0ZGQ4OTZlIiwidmVyc2lvbiI6IjEuNi4yMC1kZXYifQACAQEkHAIXDBcAAwAEAAAJAQkBCQCIgASsBAGBgATABAEJ2AQBCYAFAAAAAAAAAQAAAA4GAAA4BgAAAQAAAAAAAAAAAAAAAAAAADwGAAAQAAAAAAAAAAEAAAAAAAAAAQAAACYAAABwAAAAAgAAAAwAAAAIAQAAAwAAAAcAAAA4AQAABAAAAAQAAACMAQAABQAAAAwAAACsAQAABgAAAAEAAAAMAgAAASAAAAQAAAAsAgAAAyAAAAQAAAAyAwAAARAAAAMAAABQAwAAAiAAACYAAABmAwAABCAAAAEAAAAOBgAAACAAAAEAAAAYBgAAAxAAAAIAAAA4BgAABiAAAAEAAABEBgAAABAAAAEAAABcBgAA
+NEW VALUE NEW_STRING: null
+Reading with native.
+Field public static java.lang.Class art.Transform1975.CUR_CLASS = (ID: 5) class art.Transform1975
+Field public static java.lang.String art.Transform1975.NEW_STRING = (ID: 11) <NULL>
+Field public static byte[] art.Transform1975.REDEFINED_DEX_BYTES = (ID: 6) [100, 101, 120, 10, 48, 51, 53, 0, -51, 24, ...]
+Reading normally in other class.
+Read CUR_CLASS field: (ID: 5) class art.Transform1975
+Read REDEFINED_DEX_BYTES field: (ID: 6) [100, 101, 120, 10, 48, 51, 53, 0, -51, 24, ...]
+Read NEW_STRING field: (ID: 11) <NULL>
+Reading using method handles.
+(ID: 12) MethodHandle()Class (public static java.lang.Class art.Transform1975.CUR_CLASS) = (ID: 5) class art.Transform1975
+(ID: 13) MethodHandle()String (public static java.lang.String art.Transform1975.NEW_STRING) = (ID: 11) <NULL>
+(ID: 14) MethodHandle()byte[] (public static byte[] art.Transform1975.REDEFINED_DEX_BYTES) = (ID: 6) [100, 101, 120, 10, 48, 51, 53, 0, -51, 24, ...]
+Doing modification maybe
+Doing something
+Reading with reflection after possible modification.
+public static java.lang.Class art.Transform1975.CUR_CLASS = (ID: 5) class art.Transform1975
+public static java.lang.String art.Transform1975.NEW_STRING = (ID: 15) I did something!
+public static byte[] art.Transform1975.REDEFINED_DEX_BYTES = (ID: 6) [100, 101, 120, 10, 48, 51, 53, 0, -51, 24, ...]
+Reading normally in same class after possible modification.
+NEW VALUE CUR_CLASS: class art.Transform1975
+NEW VALUE REDEFINED_DEX_BYTES: ZGV4CjAzNQDNGFvYlmyIt+u4bnNv+OyNVekBxlrJi6EgBwAAcAAAAHhWNBIAAAAAAAAAAFwGAAAmAAAAcAAAAAwAAAAIAQAABwAAADgBAAAEAAAAjAEAAAwAAACsAQAAAQAAAAwCAAD0BAAALAIAAGYDAABrAwAAdQMAAH0DAACIAwAAmQMAAKsDAACuAwAAsgMAAMcDAADmAwAA/QMAABAEAAAjBAAANwQAAEsEAABmBAAAegQAAJYEAACqBAAAwQQAANkEAAD6BAAABgUAABsFAAAvBQAAMgUAADYFAAA6BQAAQgUAAE8FAABfBQAAawUAAHAFAAB5BQAAhQUAAI8FAACWBQAACAAAAAkAAAAKAAAACwAAAA0AAAAOAAAADwAAABAAAAARAAAAEgAAABkAAAAbAAAABgAAAAUAAAAAAAAABwAAAAUAAABQAwAABwAAAAYAAABYAwAABwAAAAYAAABgAwAABgAAAAgAAAAAAAAAGQAAAAoAAAAAAAAAGgAAAAoAAABgAwAAAAADAAMAAAAAAAUAFgAAAAAACwAXAAAABwACACAAAAAAAAUAAQAAAAAABQACAAAAAAAFAB0AAAAAAAUAIgAAAAIABgAhAAAABAAFAAIAAAAGAAUAAgAAAAYAAgAcAAAABgADABwAAAAGAAAAIwAAAAgAAQAeAAAACQAEAB8AAAAAAAAAAQAAAAQAAAAAAAAAGAAAAEQGAAAYBgAAAAAAAAAAAAAAAAAAMgMAAAEAAAAOAAAAAQABAAEAAAA2AwAABAAAAHAQBQAAAA4AAgAAAAIAAAA6AwAADAAAAGIAAwAaAQQAbiAEABAAGgAFAGkAAQAOAAQAAAACAAAAQAMAAFEAAABiAAMAYgEAACICBgBwEAYAAgAaAxMAbiAIADIAbiAHABIAbhAJAAIADAFuIAQAEABiAAMAcQALAAAADAFiAgIAbiAKACEADAEiAgYAcBAGAAIAGgMVAG4gCAAyAG4gCAASAG4QCQACAAwBbiAEABAAYgADAGIBAQAiAgYAcBAGAAIAGgMUAG4gCAAyAG4gCAASAG4QCQACAAwBbiAEABAADgAEAA4AAwAOAAkADnhLAA0ADgEYDwEgDwEYDwAAAAABAAAACwAAAAEAAAAEAAAAAQAAAAUAAyo+OwAIPGNsaW5pdD4ABjxpbml0PgAJQ1VSX0NMQVNTAA9Eb2luZyBzb21ldGhpbmcAEEkgZGlkIHNvbWV0aGluZyEAAUwAAkxMABNMYXJ0L1RyYW5zZm9ybTE5NzU7AB1MZGFsdmlrL2Fubm90YXRpb24vU2lnbmF0dXJlOwAVTGphdmEvaW8vUHJpbnRTdHJlYW07ABFMamF2YS9sYW5nL0NsYXNzOwARTGphdmEvbGFuZy9DbGFzczwAEkxqYXZhL2xhbmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7ABlMamF2YS9sYW5nL1N0cmluZ0J1aWxkZXI7ABJMamF2YS9sYW5nL1N5c3RlbTsAGkxqYXZhL3V0aWwvQmFzZTY0JEVuY29kZXI7ABJMamF2YS91dGlsL0Jhc2U2NDsAFU5FVyBWQUxVRSBDVVJfQ0xBU1M6IAAWTkVXIFZBTFVFIE5FV19TVFJJTkc6IAAfTkVXIFZBTFVFIFJFREVGSU5FRF9ERVhfQllURVM6IAAKTkVXX1NUUklORwATUkVERUZJTkVEX0RFWF9CWVRFUwASVHJhbnNmb3JtMTk3NS5qYXZhAAFWAAJWTAACW0IABmFwcGVuZAALZG9Tb21ldGhpbmcADmVuY29kZVRvU3RyaW5nAApnZXRFbmNvZGVyAANvdXQAB3ByaW50bG4ACnJlYWRGaWVsZHMACHRvU3RyaW5nAAV2YWx1ZQB2fn5EOHsiY29tcGlsYXRpb24tbW9kZSI6ImRlYnVnIiwibWluLWFwaSI6MSwic2hhLTEiOiJhODM1MmYyNTQ4ODUzNjJjY2Q4ZDkwOWQzNTI5YzYwMDk0ZGQ4OTZlIiwidmVyc2lvbiI6IjEuNi4yMC1kZXYifQACAQEkHAIXDBcAAwAEAAAJAQkBCQCIgASsBAGBgATABAEJ2AQBCYAFAAAAAAAAAQAAAA4GAAA4BgAAAQAAAAAAAAAAAAAAAAAAADwGAAAQAAAAAAAAAAEAAAAAAAAAAQAAACYAAABwAAAAAgAAAAwAAAAIAQAAAwAAAAcAAAA4AQAABAAAAAQAAACMAQAABQAAAAwAAACsAQAABgAAAAEAAAAMAgAAASAAAAQAAAAsAgAAAyAAAAQAAAAyAwAAARAAAAMAAABQAwAAAiAAACYAAABmAwAABCAAAAEAAAAOBgAAACAAAAEAAAAYBgAAAxAAAAIAAAA4BgAABiAAAAEAAABEBgAAABAAAAEAAABcBgAA
+NEW VALUE NEW_STRING: I did something!
+Reading with native after possible modification.
+Field public static java.lang.Class art.Transform1975.CUR_CLASS = (ID: 5) class art.Transform1975
+Field public static java.lang.String art.Transform1975.NEW_STRING = (ID: 15) I did something!
+Field public static byte[] art.Transform1975.REDEFINED_DEX_BYTES = (ID: 6) [100, 101, 120, 10, 48, 51, 53, 0, -51, 24, ...]
+Reading normally in other class after possible modification.
+Read CUR_CLASS field: (ID: 5) class art.Transform1975
+Read REDEFINED_DEX_BYTES field: (ID: 6) [100, 101, 120, 10, 48, 51, 53, 0, -51, 24, ...]
+Read NEW_STRING field: (ID: 15) I did something!
+Reading using method handles.
+(ID: 16) MethodHandle()Class (public static java.lang.Class art.Transform1975.CUR_CLASS) = (ID: 5) class art.Transform1975
+(ID: 17) MethodHandle()String (public static java.lang.String art.Transform1975.NEW_STRING) = (ID: 15) I did something!
+(ID: 18) MethodHandle()byte[] (public static byte[] art.Transform1975.REDEFINED_DEX_BYTES) = (ID: 6) [100, 101, 120, 10, 48, 51, 53, 0, -51, 24, ...]
+reading reflectively with old reflection objects
+OLD FIELD OBJECT: public static java.lang.Class art.Transform1975.CUR_CLASS = (ID: 5) class art.Transform1975
+OLD FIELD OBJECT: public static byte[] art.Transform1975.REDEFINED_DEX_BYTES = (ID: 6) [100, 101, 120, 10, 48, 51, 53, 0, -51, 24, ...]
+reading natively with old jfieldIDs
+Field public static java.lang.Class art.Transform1975.CUR_CLASS = (ID: 5) class art.Transform1975
+Field public static byte[] art.Transform1975.REDEFINED_DEX_BYTES = (ID: 6) [100, 101, 120, 10, 48, 51, 53, 0, -51, 24, ...]
+reading natively with new jfieldIDs
+Reading with old method handles
+(ID: 2) MethodHandle()Class (public static java.lang.Class art.Transform1975.CUR_CLASS) = (ID: 5) class art.Transform1975
+(ID: 3) MethodHandle()byte[] (public static byte[] art.Transform1975.REDEFINED_DEX_BYTES) = (ID: 6) [100, 101, 120, 10, 48, 51, 53, 0, -51, 24, ...]
+Reading with new method handles
+(ID: 19) MethodHandle()Class (public static java.lang.Class art.Transform1975.CUR_CLASS) = (ID: 5) class art.Transform1975
+(ID: 20) MethodHandle()String (public static java.lang.String art.Transform1975.NEW_STRING) = (ID: 15) I did something!
+(ID: 21) MethodHandle()byte[] (public static byte[] art.Transform1975.REDEFINED_DEX_BYTES) = (ID: 6) [100, 101, 120, 10, 48, 51, 53, 0, -51, 24, ...]
+Writing (ID: 22) class art.Test1975 to CUR_CLASS with old method handle
+Reading changed value
+CUR_CLASS is now (ID: 22) class art.Test1975
diff --git a/test/1975-hello-structural-transformation/info.txt b/test/1975-hello-structural-transformation/info.txt
new file mode 100644
index 0000000..218cf4e
--- /dev/null
+++ b/test/1975-hello-structural-transformation/info.txt
@@ -0,0 +1 @@
+Test adding static fields using structural class redefinition.
diff --git a/test/1975-hello-structural-transformation/run b/test/1975-hello-structural-transformation/run
new file mode 100755
index 0000000..03e41a5
--- /dev/null
+++ b/test/1975-hello-structural-transformation/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/1975-hello-structural-transformation/src/Main.java b/test/1975-hello-structural-transformation/src/Main.java
new file mode 100644
index 0000000..9cfb95b
--- /dev/null
+++ b/test/1975-hello-structural-transformation/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1975.run();
+  }
+}
diff --git a/test/1975-hello-structural-transformation/src/art/Redefinition.java b/test/1975-hello-structural-transformation/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1975-hello-structural-transformation/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1975-hello-structural-transformation/src/art/Test1975.java b/test/1975-hello-structural-transformation/src/art/Test1975.java
new file mode 100644
index 0000000..0009c03
--- /dev/null
+++ b/test/1975-hello-structural-transformation/src/art/Test1975.java
@@ -0,0 +1,264 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+import java.lang.ref.*;
+import java.lang.reflect.*;
+import java.util.*;
+
+public class Test1975 {
+  public static void run() throws Exception {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest();
+  }
+
+  private static final boolean PRINT_NONDETERMINISTIC = false;
+
+  public static WeakHashMap<Object, Long> id_nums = new WeakHashMap<>();
+  public static long next_id = 0;
+
+  public static String printGeneric(Object o) {
+    Long id = id_nums.get(o);
+    if (id == null) {
+      id = Long.valueOf(next_id++);
+      id_nums.put(o, id);
+    }
+    if (o == null) {
+      return "(ID: " + id + ") <NULL>";
+    }
+    Class oc = o.getClass();
+    if (oc.isArray() && oc.getComponentType() == Byte.TYPE) {
+      return "(ID: "
+          + id
+          + ") "
+          + Arrays.toString(Arrays.copyOf((byte[]) o, 10)).replace(']', ',')
+          + " ...]";
+    } else {
+      return "(ID: " + id + ") " + o.toString();
+    }
+  }
+
+  // Since we are adding fields we redefine this class with the Transform1975 class to add new
+  // field-reads.
+  public static final class ReadTransformFields implements Runnable {
+    public void run() {
+      System.out.println("Read CUR_CLASS field: " + printGeneric(Transform1975.CUR_CLASS));
+      System.out.println(
+          "Read REDEFINED_DEX_BYTES field: " + printGeneric(Transform1975.REDEFINED_DEX_BYTES));
+    }
+  }
+
+  /* Base64 encoded dex file for:
+   * public static final class ReadTransformFields implements Runnable {
+   *   public void run() {
+   *     System.out.println("Read CUR_CLASS field: " + printGeneric(Transform1975.CUR_CLASS));
+   *     System.out.println("Read REDEFINED_DEX_BYTES field: " + printGeneric(Transform1975.REDEFINED_DEX_BYTES));
+   *     System.out.println("Read NEW_STRING field: " + printGeneric(Transform1975.NEW_STRING));
+   *   }
+   * }
+   */
+  private static final byte[] NEW_READ_BYTES =
+      Base64.getDecoder()
+          .decode(
+              "ZGV4CjAzNQCHIfWvfkMos9E+Snhux5rSGhnDAbiVJlyYBgAAcAAAAHhWNBIAAAAAAAAAANQFAAAk"
+                  + "AAAAcAAAAA4AAAAAAQAABQAAADgBAAAEAAAAdAEAAAgAAACUAQAAAQAAANQBAACkBAAA9AEAAO4C"
+                  + "AAD2AgAAAQMAAAQDAAAIAwAALAMAADwDAABRAwAAdQMAAJUDAACsAwAAvwMAANMDAADpAwAA/QMA"
+                  + "ABgEAAAsBAAAOAQAAE0EAABlBAAAfgQAAKAEAAC1BAAAxAQAAMcEAADLBAAAzwQAANwEAADkBAAA"
+                  + "6gQAAO8EAAD9BAAABgUAAAsFAAAVBQAAHAUAAAQAAAAFAAAABgAAAAcAAAAIAAAACQAAAAoAAAAL"
+                  + "AAAADAAAAA0AAAAOAAAADwAAABcAAAAZAAAAAgAAAAkAAAAAAAAAAwAAAAkAAADgAgAAAwAAAAoA"
+                  + "AADoAgAAFwAAAAwAAAAAAAAAGAAAAAwAAADoAgAAAgAGAAEAAAACAAkAEAAAAAIADQARAAAACwAF"
+                  + "AB0AAAAAAAMAAAAAAAAAAwAgAAAAAQABAB4AAAAFAAQAHwAAAAcAAwAAAAAACgADAAAAAAAKAAIA"
+                  + "GwAAAAoAAAAhAAAAAAAAABEAAAAHAAAA2AIAABYAAADEBQAAowUAAAAAAAABAAEAAQAAAMYCAAAE"
+                  + "AAAAcBAEAAAADgAFAAEAAgAAAMoCAABVAAAAYgADAGIBAABxEAIAAQAMASICCgBwEAUAAgAaAxIA"
+                  + "biAGADIAbiAGABIAbhAHAAIADAFuIAMAEABiAAMAYgECAHEQAgABAAwBIgIKAHAQBQACABoDFABu"
+                  + "IAYAMgBuIAYAEgBuEAcAAgAMAW4gAwAQAGIAAwBiAQEAcRACAAEADAEiAgoAcBAFAAIAGgMTAG4g"
+                  + "BgAyAG4gBgASAG4QBwACAAwBbiADABAADgAEAA4ABgAOARwPARwPARwPAAABAAAACAAAAAEAAAAH"
+                  + "AAAAAQAAAAkABjxpbml0PgAJQ1VSX0NMQVNTAAFMAAJMTAAiTGFydC9UZXN0MTk3NSRSZWFkVHJh"
+                  + "bnNmb3JtRmllbGRzOwAOTGFydC9UZXN0MTk3NTsAE0xhcnQvVHJhbnNmb3JtMTk3NTsAIkxkYWx2"
+                  + "aWsvYW5ub3RhdGlvbi9FbmNsb3NpbmdDbGFzczsAHkxkYWx2aWsvYW5ub3RhdGlvbi9Jbm5lckNs"
+                  + "YXNzOwAVTGphdmEvaW8vUHJpbnRTdHJlYW07ABFMamF2YS9sYW5nL0NsYXNzOwASTGphdmEvbGFu"
+                  + "Zy9PYmplY3Q7ABRMamF2YS9sYW5nL1J1bm5hYmxlOwASTGphdmEvbGFuZy9TdHJpbmc7ABlMamF2"
+                  + "YS9sYW5nL1N0cmluZ0J1aWxkZXI7ABJMamF2YS9sYW5nL1N5c3RlbTsACk5FV19TVFJJTkcAE1JF"
+                  + "REVGSU5FRF9ERVhfQllURVMAFlJlYWQgQ1VSX0NMQVNTIGZpZWxkOiAAF1JlYWQgTkVXX1NUUklO"
+                  + "RyBmaWVsZDogACBSZWFkIFJFREVGSU5FRF9ERVhfQllURVMgZmllbGQ6IAATUmVhZFRyYW5zZm9y"
+                  + "bUZpZWxkcwANVGVzdDE5NzUuamF2YQABVgACVkwAAltCAAthY2Nlc3NGbGFncwAGYXBwZW5kAARu"
+                  + "YW1lAANvdXQADHByaW50R2VuZXJpYwAHcHJpbnRsbgADcnVuAAh0b1N0cmluZwAFdmFsdWUAdn5+"
+                  + "RDh7ImNvbXBpbGF0aW9uLW1vZGUiOiJkZWJ1ZyIsIm1pbi1hcGkiOjEsInNoYS0xIjoiYTgzNTJm"
+                  + "MjU0ODg1MzYyY2NkOGQ5MDlkMzUyOWM2MDA5NGRkODk2ZSIsInZlcnNpb24iOiIxLjYuMjAtZGV2"
+                  + "In0AAgMBIhgBAgQCGgQZHBcVAAABAQCBgAT0AwEBjAQAAAAAAAAAAgAAAJQFAACaBQAAuAUAAAAA"
+                  + "AAAAAAAAAAAAABAAAAAAAAAAAQAAAAAAAAABAAAAJAAAAHAAAAACAAAADgAAAAABAAADAAAABQAA"
+                  + "ADgBAAAEAAAABAAAAHQBAAAFAAAACAAAAJQBAAAGAAAAAQAAANQBAAABIAAAAgAAAPQBAAADIAAA"
+                  + "AgAAAMYCAAABEAAAAwAAANgCAAACIAAAJAAAAO4CAAAEIAAAAgAAAJQFAAAAIAAAAQAAAKMFAAAD"
+                  + "EAAAAgAAALQFAAAGIAAAAQAAAMQFAAAAEAAAAQAAANQFAAA=");
+
+  static void ReadFields() throws Exception {
+    Runnable r = new ReadTransformFields();
+    System.out.println("Reading with reflection.");
+    for (Field f : Transform1975.class.getFields()) {
+      System.out.println(f.toString() + " = " + printGeneric(f.get(null)));
+    }
+    System.out.println("Reading normally in same class.");
+    Transform1975.readFields();
+    System.out.println("Reading with native.");
+    readNativeFields(Transform1975.class, getNativeFields(Transform1975.class.getFields()));
+    System.out.println("Reading normally in other class.");
+    r.run();
+    System.out.println("Reading using method handles.");
+    readMethodHandles(getMethodHandles(Transform1975.class.getFields()));
+    System.out.println("Doing modification maybe");
+    Transform1975.doSomething();
+    System.out.println("Reading with reflection after possible modification.");
+    for (Field f : Transform1975.class.getFields()) {
+      System.out.println(f.toString() + " = " + printGeneric(f.get(null)));
+    }
+    System.out.println("Reading normally in same class after possible modification.");
+    Transform1975.readFields();
+    System.out.println("Reading with native after possible modification.");
+    readNativeFields(Transform1975.class, getNativeFields(Transform1975.class.getFields()));
+    System.out.println("Reading normally in other class after possible modification.");
+    r.run();
+    System.out.println("Reading using method handles.");
+    readMethodHandles(getMethodHandles(Transform1975.class.getFields()));
+  }
+
+  public static final class MethodHandleWrapper {
+    private MethodHandle mh;
+    private Field f;
+    public MethodHandleWrapper(MethodHandle mh, Field f) {
+      this.f = f;
+      this.mh = mh;
+    }
+    public MethodHandle getHandle() {
+      return mh;
+    }
+    public Field getField() {
+      return f;
+    }
+    public Object invoke() throws Throwable {
+      return mh.invoke();
+    }
+    public String toString() {
+      return mh.toString();
+    }
+  }
+
+  public static MethodHandleWrapper[] getMethodHandles(Field[] fields) throws Exception {
+    final MethodHandles.Lookup l = MethodHandles.lookup();
+    MethodHandleWrapper[] res = new MethodHandleWrapper[fields.length];
+    for (int i = 0; i < res.length; i++) {
+      res[i] = new MethodHandleWrapper(l.unreflectGetter(fields[i]), fields[i]);;
+    }
+    return res;
+  }
+
+  public static void readMethodHandles(MethodHandleWrapper[] handles) throws Exception {
+    for (MethodHandleWrapper h : handles) {
+      try {
+        System.out.println(printGeneric(h) + " (" + h.getField() + ") = " + printGeneric(h.invoke()));
+      } catch (Throwable t) {
+        if (t instanceof Exception) {
+          throw (Exception)t;
+        } else if (t instanceof Error) {
+          throw (Error)t;
+        } else {
+          throw new RuntimeException("Unexpected throwable thrown!", t);
+        }
+      }
+    }
+  }
+  public static void doTest() throws Exception {
+    // TODO It would be good to have a test of invoke-custom too but since that requires smali and
+    // internally we just store the resolved MethodHandle this should all be good enough.
+
+    // Grab Field objects from before the transformation.
+    Field[] old_fields = Transform1975.class.getFields();
+    for (Field f : old_fields) {
+      System.out.println("Saving Field object " + printGeneric(f) + " for later");
+    }
+    // Grab jfieldIDs from before the transformation.
+    long[] old_native_fields = getNativeFields(Transform1975.class.getFields());
+    // Grab MethodHandles from before the transformation.
+    MethodHandleWrapper[] handles = getMethodHandles(Transform1975.class.getFields());
+    for (MethodHandleWrapper h : handles) {
+      System.out.println("Saving MethodHandle object " + printGeneric(h) + " for later");
+    }
+    // Grab a 'setter' MethodHandle from before the redefinition.
+    Field cur_class_field = Transform1975.class.getDeclaredField("CUR_CLASS");
+    MethodHandleWrapper write_wrapper = new MethodHandleWrapper(MethodHandles.lookup().unreflectSetter(cur_class_field), cur_class_field);
+    System.out.println("Saving writable MethodHandle " + printGeneric(write_wrapper) + " for later");
+
+    // Read the fields in all possible ways.
+    System.out.println("Reading fields before redefinition");
+    ReadFields();
+    // Redefine the transform class. Also change the ReadTransformFields so we don't have to deal
+    // with annoying compilation stuff.
+    Redefinition.doCommonStructuralClassRedefinition(
+        Transform1975.class, Transform1975.REDEFINED_DEX_BYTES);
+    Redefinition.doCommonClassRedefinition(
+        ReadTransformFields.class, new byte[] {}, NEW_READ_BYTES);
+    // Read the fields in all possible ways.
+    System.out.println("Reading fields after redefinition");
+    ReadFields();
+    // Check that the old Field, jfieldID, and MethodHandle objects were updated.
+    System.out.println("reading reflectively with old reflection objects");
+    for (Field f : old_fields) {
+      System.out.println("OLD FIELD OBJECT: " + f.toString() + " = " + printGeneric(f.get(null)));
+    }
+    System.out.println("reading natively with old jfieldIDs");
+    readNativeFields(Transform1975.class, old_native_fields);
+    // Make sure the fields keep the same id.
+    System.out.println("reading natively with new jfieldIDs");
+    long[] new_fields = getNativeFields(Transform1975.class.getFields());
+    Arrays.sort(old_native_fields);
+    Arrays.sort(new_fields);
+    boolean different = new_fields.length == old_native_fields.length;
+    for (int i = 0; i < old_native_fields.length && !different; i++) {
+      different = different || new_fields[i] != old_native_fields[i];
+    }
+    if (different) {
+      System.out.println(
+          "Missing expected fields! "
+              + Arrays.toString(new_fields)
+              + " vs "
+              + Arrays.toString(old_native_fields));
+    }
+    // Make sure the old handles work.
+    System.out.println("Reading with old method handles");
+    readMethodHandles(handles);
+    System.out.println("Reading with new method handles");
+    readMethodHandles(getMethodHandles(Transform1975.class.getFields()));
+    System.out.println("Writing " + printGeneric(Test1975.class) + " to CUR_CLASS with old method handle");
+    try {
+      write_wrapper.getHandle().invokeExact(Test1975.class);
+    } catch (Throwable t) {
+      throw new RuntimeException("something threw", t);
+    }
+    System.out.println("Reading changed value");
+    System.out.println("CUR_CLASS is now " + printGeneric(Transform1975.CUR_CLASS));
+  }
+
+  private static void printNativeField(long id, Field f, Object value) {
+    System.out.println(
+        "Field" + (PRINT_NONDETERMINISTIC ? " " + id : "") + " " + f + " = " + printGeneric(value));
+  }
+
+  public static native long[] getNativeFields(Field[] fields);
+
+  public static native void readNativeFields(Class<?> field_class, long[] sfields);
+}
diff --git a/test/1975-hello-structural-transformation/src/art/Transform1975.java b/test/1975-hello-structural-transformation/src/art/Transform1975.java
new file mode 100644
index 0000000..415be85
--- /dev/null
+++ b/test/1975-hello-structural-transformation/src/art/Transform1975.java
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Base64;
+
+public class Transform1975 {
+
+  static {
+  }
+
+  public static Class<?> CUR_CLASS = Transform1975.class;
+
+  /* Dex file for:
+   * // NB The name NEW_STRING ensures the offset for the REDEFINED_DEX_BYTES field is different.
+   * package art;
+   * public class Transform1975 {
+   *  static {}
+   *  public static Class<?> CUR_CLASS;
+   *  public static byte[] REDEFINED_DEX_BYTES;
+   *  public static String NEW_STRING;
+   *  public static void doSomething() {
+   *    System.out.println("Doing something");
+   *    new_string = "I did something!";
+   *  }
+   *  public static void readFields() {
+   *    System.out.println("NEW VALUE CUR_CLASS: " + CUR_CLASS);
+   *    System.out.println("NEW VALUE REDEFINED_DEX_BYTES: " + Base64.getEncoder().encodeToString(REDEFINED_DEX_BYTES));
+   *    System.out.println("NEW VALUE NEW_STRING: " + NEW_STRING);
+   *  }
+   * }
+   */
+  public static byte[] REDEFINED_DEX_BYTES =
+      Base64.getDecoder()
+          .decode(
+              "ZGV4CjAzNQDNGFvYlmyIt+u4bnNv+OyNVekBxlrJi6EgBwAAcAAAAHhWNBIAAAAAAAAAAFwGAAAm"
+                  + "AAAAcAAAAAwAAAAIAQAABwAAADgBAAAEAAAAjAEAAAwAAACsAQAAAQAAAAwCAAD0BAAALAIAAGYD"
+                  + "AABrAwAAdQMAAH0DAACIAwAAmQMAAKsDAACuAwAAsgMAAMcDAADmAwAA/QMAABAEAAAjBAAANwQA"
+                  + "AEsEAABmBAAAegQAAJYEAACqBAAAwQQAANkEAAD6BAAABgUAABsFAAAvBQAAMgUAADYFAAA6BQAA"
+                  + "QgUAAE8FAABfBQAAawUAAHAFAAB5BQAAhQUAAI8FAACWBQAACAAAAAkAAAAKAAAACwAAAA0AAAAO"
+                  + "AAAADwAAABAAAAARAAAAEgAAABkAAAAbAAAABgAAAAUAAAAAAAAABwAAAAUAAABQAwAABwAAAAYA"
+                  + "AABYAwAABwAAAAYAAABgAwAABgAAAAgAAAAAAAAAGQAAAAoAAAAAAAAAGgAAAAoAAABgAwAAAAAD"
+                  + "AAMAAAAAAAUAFgAAAAAACwAXAAAABwACACAAAAAAAAUAAQAAAAAABQACAAAAAAAFAB0AAAAAAAUA"
+                  + "IgAAAAIABgAhAAAABAAFAAIAAAAGAAUAAgAAAAYAAgAcAAAABgADABwAAAAGAAAAIwAAAAgAAQAe"
+                  + "AAAACQAEAB8AAAAAAAAAAQAAAAQAAAAAAAAAGAAAAEQGAAAYBgAAAAAAAAAAAAAAAAAAMgMAAAEA"
+                  + "AAAOAAAAAQABAAEAAAA2AwAABAAAAHAQBQAAAA4AAgAAAAIAAAA6AwAADAAAAGIAAwAaAQQAbiAE"
+                  + "ABAAGgAFAGkAAQAOAAQAAAACAAAAQAMAAFEAAABiAAMAYgEAACICBgBwEAYAAgAaAxMAbiAIADIA"
+                  + "biAHABIAbhAJAAIADAFuIAQAEABiAAMAcQALAAAADAFiAgIAbiAKACEADAEiAgYAcBAGAAIAGgMV"
+                  + "AG4gCAAyAG4gCAASAG4QCQACAAwBbiAEABAAYgADAGIBAQAiAgYAcBAGAAIAGgMUAG4gCAAyAG4g"
+                  + "CAASAG4QCQACAAwBbiAEABAADgAEAA4AAwAOAAkADnhLAA0ADgEYDwEgDwEYDwAAAAABAAAACwAA"
+                  + "AAEAAAAEAAAAAQAAAAUAAyo+OwAIPGNsaW5pdD4ABjxpbml0PgAJQ1VSX0NMQVNTAA9Eb2luZyBz"
+                  + "b21ldGhpbmcAEEkgZGlkIHNvbWV0aGluZyEAAUwAAkxMABNMYXJ0L1RyYW5zZm9ybTE5NzU7AB1M"
+                  + "ZGFsdmlrL2Fubm90YXRpb24vU2lnbmF0dXJlOwAVTGphdmEvaW8vUHJpbnRTdHJlYW07ABFMamF2"
+                  + "YS9sYW5nL0NsYXNzOwARTGphdmEvbGFuZy9DbGFzczwAEkxqYXZhL2xhbmcvT2JqZWN0OwASTGph"
+                  + "dmEvbGFuZy9TdHJpbmc7ABlMamF2YS9sYW5nL1N0cmluZ0J1aWxkZXI7ABJMamF2YS9sYW5nL1N5"
+                  + "c3RlbTsAGkxqYXZhL3V0aWwvQmFzZTY0JEVuY29kZXI7ABJMamF2YS91dGlsL0Jhc2U2NDsAFU5F"
+                  + "VyBWQUxVRSBDVVJfQ0xBU1M6IAAWTkVXIFZBTFVFIE5FV19TVFJJTkc6IAAfTkVXIFZBTFVFIFJF"
+                  + "REVGSU5FRF9ERVhfQllURVM6IAAKTkVXX1NUUklORwATUkVERUZJTkVEX0RFWF9CWVRFUwASVHJh"
+                  + "bnNmb3JtMTk3NS5qYXZhAAFWAAJWTAACW0IABmFwcGVuZAALZG9Tb21ldGhpbmcADmVuY29kZVRv"
+                  + "U3RyaW5nAApnZXRFbmNvZGVyAANvdXQAB3ByaW50bG4ACnJlYWRGaWVsZHMACHRvU3RyaW5nAAV2"
+                  + "YWx1ZQB2fn5EOHsiY29tcGlsYXRpb24tbW9kZSI6ImRlYnVnIiwibWluLWFwaSI6MSwic2hhLTEi"
+                  + "OiJhODM1MmYyNTQ4ODUzNjJjY2Q4ZDkwOWQzNTI5YzYwMDk0ZGQ4OTZlIiwidmVyc2lvbiI6IjEu"
+                  + "Ni4yMC1kZXYifQACAQEkHAIXDBcAAwAEAAAJAQkBCQCIgASsBAGBgATABAEJ2AQBCYAFAAAAAAAA"
+                  + "AQAAAA4GAAA4BgAAAQAAAAAAAAAAAAAAAAAAADwGAAAQAAAAAAAAAAEAAAAAAAAAAQAAACYAAABw"
+                  + "AAAAAgAAAAwAAAAIAQAAAwAAAAcAAAA4AQAABAAAAAQAAACMAQAABQAAAAwAAACsAQAABgAAAAEA"
+                  + "AAAMAgAAASAAAAQAAAAsAgAAAyAAAAQAAAAyAwAAARAAAAMAAABQAwAAAiAAACYAAABmAwAABCAA"
+                  + "AAEAAAAOBgAAACAAAAEAAAAYBgAAAxAAAAIAAAA4BgAABiAAAAEAAABEBgAAABAAAAEAAABcBgAA");
+
+  public static void doSomething() {
+    System.out.println("Not doing anything");
+  }
+
+  public static void readFields() {
+    System.out.println("ORIGINAL VALUE CUR_CLASS: " + CUR_CLASS);
+    System.out.println(
+        "ORIGINAL VALUE REDEFINED_DEX_BYTES: "
+            + Base64.getEncoder().encodeToString(REDEFINED_DEX_BYTES));
+  }
+}
diff --git a/test/1975-hello-structural-transformation/structural_transform.cc b/test/1975-hello-structural-transformation/structural_transform.cc
new file mode 100644
index 0000000..4217045
--- /dev/null
+++ b/test/1975-hello-structural-transformation/structural_transform.cc
@@ -0,0 +1,77 @@
+
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstdio>
+#include <memory>
+#include <mutex>
+#include <string>
+#include <vector>
+
+#include "android-base/logging.h"
+#include "android-base/macros.h"
+#include "android-base/stringprintf.h"
+#include "jni.h"
+#include "jvmti.h"
+#include "scoped_local_ref.h"
+#include "scoped_utf_chars.h"
+
+// Test infrastructure
+#include "jni_helper.h"
+#include "jvmti_helper.h"
+#include "test_env.h"
+#include "ti_macros.h"
+
+namespace art {
+namespace Test1975StructuralTransform {
+
+extern "C" JNIEXPORT void JNICALL Java_art_Test1975_readNativeFields(JNIEnv* env,
+                                                                    jclass k,
+                                                                    jclass f_class,
+                                                                    jlongArray f) {
+  jint len = env->GetArrayLength(f);
+  for (jint i = 0; i < len; i++) {
+    jlong fid_val;
+    env->GetLongArrayRegion(f, i, 1, &fid_val);
+    jfieldID fid = reinterpret_cast<jfieldID>(static_cast<intptr_t>(fid_val));
+    // For this test everything is objects and static.
+    jobject val = env->GetStaticObjectField(f_class, fid);
+    env->CallStaticVoidMethod(
+        k,
+        env->GetStaticMethodID(
+            k, "printNativeField", "(JLjava/lang/reflect/Field;Ljava/lang/Object;)V"),
+        fid_val,
+        env->ToReflectedField(f_class, fid, true),
+        val);
+    env->DeleteLocalRef(val);
+  }
+}
+
+extern "C" JNIEXPORT jlongArray JNICALL Java_art_Test1975_getNativeFields(JNIEnv* env,
+                                                                          jclass,
+                                                                          jobjectArray f) {
+  jint len = env->GetArrayLength(f);
+  jlongArray arr = env->NewLongArray(len);
+  for (jint i = 0; i < len; i++) {
+    jfieldID fid = env->FromReflectedField(env->GetObjectArrayElement(f, i));
+    jlong lfid = static_cast<jlong>(reinterpret_cast<intptr_t>(fid));
+    env->SetLongArrayRegion(arr, i, 1, &lfid);
+  }
+  return arr;
+}
+
+}  // namespace Test1975StructuralTransform
+}  // namespace art
diff --git a/test/1976-hello-structural-static-methods/expected.txt b/test/1976-hello-structural-static-methods/expected.txt
new file mode 100644
index 0000000..944cc0b
--- /dev/null
+++ b/test/1976-hello-structural-static-methods/expected.txt
@@ -0,0 +1,70 @@
+Running directly
+Saying everything!
+hello
+Saying hi!
+hello
+Running reflective
+Reflectively invoking public static void art.Transform1976.sayEverything()
+hello
+Reflectively invoking public static void art.Transform1976.sayHi()
+hello
+Running jni
+Running method public static void art.Transform1976.sayEverything() using JNI.
+hello
+Running method public static void art.Transform1976.sayHi() using JNI.
+hello
+Running method handles
+Invoking MethodHandle()void (public static void art.Transform1976.sayEverything())
+hello
+Invoking MethodHandle()void (public static void art.Transform1976.sayHi())
+hello
+Running directly after redef
+Saying everything!
+Not saying hi again!
+Bye
+Saying hi!
+Not saying hi again!
+Saying bye!
+Bye
+Running reflective after redef using old j.l.r.Method
+Reflectively invoking public static void art.Transform1976.sayEverything() on old j.l.r.Method
+Not saying hi again!
+Bye
+Reflectively invoking public static void art.Transform1976.sayHi() on old j.l.r.Method
+Not saying hi again!
+Running reflective after redef using new j.l.r.Method
+Reflectively invoking public static void art.Transform1976.sayBye() on new j.l.r.Method
+Bye
+Reflectively invoking public static void art.Transform1976.sayEverything() on new j.l.r.Method
+Not saying hi again!
+Bye
+Reflectively invoking public static void art.Transform1976.sayHi() on new j.l.r.Method
+Not saying hi again!
+Running jni with old ids
+Running method public static void art.Transform1976.sayEverything() using JNI.
+Not saying hi again!
+Bye
+Running method public static void art.Transform1976.sayHi() using JNI.
+Not saying hi again!
+Running jni with new ids
+Running method public static void art.Transform1976.sayBye() using JNI.
+Bye
+Running method public static void art.Transform1976.sayEverything() using JNI.
+Not saying hi again!
+Bye
+Running method public static void art.Transform1976.sayHi() using JNI.
+Not saying hi again!
+Running method handles using old handles
+Invoking MethodHandle()void (public static void art.Transform1976.sayEverything())
+Not saying hi again!
+Bye
+Invoking MethodHandle()void (public static void art.Transform1976.sayHi())
+Not saying hi again!
+Running method handles using new handles
+Invoking MethodHandle()void (public static void art.Transform1976.sayBye())
+Bye
+Invoking MethodHandle()void (public static void art.Transform1976.sayEverything())
+Not saying hi again!
+Bye
+Invoking MethodHandle()void (public static void art.Transform1976.sayHi())
+Not saying hi again!
diff --git a/test/1976-hello-structural-static-methods/info.txt b/test/1976-hello-structural-static-methods/info.txt
new file mode 100644
index 0000000..e9f9dbb
--- /dev/null
+++ b/test/1976-hello-structural-static-methods/info.txt
@@ -0,0 +1 @@
+Test adding static methods using structural class redefinition.
diff --git a/test/1976-hello-structural-static-methods/run b/test/1976-hello-structural-static-methods/run
new file mode 100755
index 0000000..03e41a5
--- /dev/null
+++ b/test/1976-hello-structural-static-methods/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/1976-hello-structural-static-methods/src/Main.java b/test/1976-hello-structural-static-methods/src/Main.java
new file mode 100644
index 0000000..5abf28f
--- /dev/null
+++ b/test/1976-hello-structural-static-methods/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1976.run();
+  }
+}
diff --git a/test/1976-hello-structural-static-methods/src/art/Redefinition.java b/test/1976-hello-structural-static-methods/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1976-hello-structural-static-methods/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1976-hello-structural-static-methods/src/art/Test1976.java b/test/1976-hello-structural-static-methods/src/art/Test1976.java
new file mode 100644
index 0000000..169d236
--- /dev/null
+++ b/test/1976-hello-structural-static-methods/src/art/Test1976.java
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.ref.*;
+import java.lang.reflect.*;
+import java.lang.invoke.*;
+import java.util.*;
+
+public class Test1976 {
+
+  // The fact that the target is having methods added makes it annoying to test since we cannot
+  // initially call them. To work around this in a simple-ish way just use (non-structural)
+  // redefinition to change the implementation of the caller of Transform1976 after redefining the
+  // target.
+  public static final class RunTransformMethods implements Runnable {
+    public void run() {
+      System.out.println("Saying everything!");
+      Transform1976.sayEverything();
+      System.out.println("Saying hi!");
+      Transform1976.sayHi();
+    }
+  }
+
+  /* Base64 encoded dex bytes of:
+   * public static final class RunTransformMethods implements Runnable {
+   *   public void run() {
+   *    System.out.println("Saying everything!");
+   *    Transform1976.sayEverything();
+   *    System.out.println("Saying hi!");
+   *    Transform1976.sayHi();
+   *    System.out.println("Saying bye!");
+   *    Transform1976.sayBye();
+   *   }
+   * }
+   */
+  public static final byte[] RUN_DEX_BYTES =
+      Base64.getDecoder()
+          .decode(
+              "ZGV4CjAzNQCv3eV8jFcpSsqMGl1ZXRk2iraZO41D0TIgBQAAcAAAAHhWNBIAAAAAAAAAAFwEAAAc"
+                  + "AAAAcAAAAAsAAADgAAAAAgAAAAwBAAABAAAAJAEAAAcAAAAsAQAAAQAAAGQBAACcAwAAhAEAAAYC"
+                  + "AAAOAgAAMgIAAEICAABXAgAAewIAAJsCAACyAgAAxgIAANwCAADwAgAABAMAABkDAAAmAwAAOgMA"
+                  + "AEYDAABVAwAAWAMAAFwDAABpAwAAbwMAAHQDAAB9AwAAggMAAIoDAACZAwAAoAMAAKcDAAABAAAA"
+                  + "AgAAAAMAAAAEAAAABQAAAAYAAAAHAAAACAAAAAkAAAAKAAAAEAAAABAAAAAKAAAAAAAAABEAAAAK"
+                  + "AAAAAAIAAAkABQAUAAAAAAAAAAAAAAAAAAAAFgAAAAIAAAAXAAAAAgAAABgAAAACAAAAGQAAAAUA"
+                  + "AQAVAAAABgAAAAAAAAAAAAAAEQAAAAYAAAD4AQAADwAAAEwEAAAuBAAAAAAAAAEAAQABAAAA6gEA"
+                  + "AAQAAABwEAYAAAAOAAMAAQACAAAA7gEAAB8AAABiAAAAGgENAG4gBQAQAHEAAwAAAGIAAAAaAQ4A"
+                  + "biAFABAAcQAEAAAAYgAAABoBDABuIAUAEABxAAIAAAAOAAYADgAIAA54PHg8eDwAAQAAAAcAAAAB"
+                  + "AAAACAAGPGluaXQ+ACJMYXJ0L1Rlc3QxOTc2JFJ1blRyYW5zZm9ybU1ldGhvZHM7AA5MYXJ0L1Rl"
+                  + "c3QxOTc2OwATTGFydC9UcmFuc2Zvcm0xOTc2OwAiTGRhbHZpay9hbm5vdGF0aW9uL0VuY2xvc2lu"
+                  + "Z0NsYXNzOwAeTGRhbHZpay9hbm5vdGF0aW9uL0lubmVyQ2xhc3M7ABVMamF2YS9pby9QcmludFN0"
+                  + "cmVhbTsAEkxqYXZhL2xhbmcvT2JqZWN0OwAUTGphdmEvbGFuZy9SdW5uYWJsZTsAEkxqYXZhL2xh"
+                  + "bmcvU3RyaW5nOwASTGphdmEvbGFuZy9TeXN0ZW07ABNSdW5UcmFuc2Zvcm1NZXRob2RzAAtTYXlp"
+                  + "bmcgYnllIQASU2F5aW5nIGV2ZXJ5dGhpbmchAApTYXlpbmcgaGkhAA1UZXN0MTk3Ni5qYXZhAAFW"
+                  + "AAJWTAALYWNjZXNzRmxhZ3MABG5hbWUAA291dAAHcHJpbnRsbgADcnVuAAZzYXlCeWUADXNheUV2"
+                  + "ZXJ5dGhpbmcABXNheUhpAAV2YWx1ZQB2fn5EOHsiY29tcGlsYXRpb24tbW9kZSI6ImRlYnVnIiwi"
+                  + "bWluLWFwaSI6MSwic2hhLTEiOiJhODM1MmYyNTQ4ODUzNjJjY2Q4ZDkwOWQzNTI5YzYwMDk0ZGQ4"
+                  + "OTZlIiwidmVyc2lvbiI6IjEuNi4yMC1kZXYifQACAwEaGAECBAISBBkTFwsAAAEBAIGABIQDAQGc"
+                  + "AwAAAAACAAAAHwQAACUEAABABAAAAAAAAAAAAAAAAAAAEAAAAAAAAAABAAAAAAAAAAEAAAAcAAAA"
+                  + "cAAAAAIAAAALAAAA4AAAAAMAAAACAAAADAEAAAQAAAABAAAAJAEAAAUAAAAHAAAALAEAAAYAAAAB"
+                  + "AAAAZAEAAAEgAAACAAAAhAEAAAMgAAACAAAA6gEAAAEQAAACAAAA+AEAAAIgAAAcAAAABgIAAAQg"
+                  + "AAACAAAAHwQAAAAgAAABAAAALgQAAAMQAAACAAAAPAQAAAYgAAABAAAATAQAAAAQAAABAAAAXAQA"
+                  + "AA==");
+
+  public static void run() throws Exception {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest();
+  }
+
+  private static final boolean PRINT_ID_NUM = false;
+
+  public static void printRun(long id, Method m) {
+    if (PRINT_ID_NUM) {
+      System.out.println("Running method " + id + " " + m + " using JNI.");
+    } else {
+      System.out.println("Running method " + m + " using JNI.");
+    }
+  }
+
+  public static final class MethodHandleWrapper {
+    private MethodHandle mh;
+    private Method m;
+    public MethodHandleWrapper(MethodHandle mh, Method m) {
+      this.m = m;
+      this.mh = mh;
+    }
+    public MethodHandle getHandle() {
+      return mh;
+    }
+    public Method getMethod() {
+      return m;
+    }
+    public Object invoke() throws Throwable {
+      return mh.invoke();
+    }
+    public String toString() {
+      return mh.toString();
+    }
+  }
+
+  public static MethodHandleWrapper[] getMethodHandles(Method[] methods) throws Exception {
+    final MethodHandles.Lookup l = MethodHandles.lookup();
+    ArrayList<MethodHandleWrapper> res = new ArrayList<>();
+    for (Method m : methods) {
+      if (!Modifier.isStatic(m.getModifiers())) {
+        continue;
+      }
+      res.add(new MethodHandleWrapper(l.unreflect(m), m));
+    }
+    return res.toArray(new MethodHandleWrapper[0]);
+  }
+
+  public static void runMethodHandles(MethodHandleWrapper[] handles) throws Exception {
+    for (MethodHandleWrapper h : handles) {
+      try {
+        System.out.println("Invoking " + h + " (" + h.getMethod() + ")");
+        h.invoke();
+      } catch (Throwable t) {
+        if (t instanceof Exception) {
+          throw (Exception)t;
+        } else if (t instanceof Error) {
+          throw (Error)t;
+        } else {
+          throw new RuntimeException("Unexpected throwable thrown!", t);
+        }
+      }
+    }
+  }
+
+  public static void doTest() throws Exception {
+    Runnable r = new RunTransformMethods();
+    System.out.println("Running directly");
+    r.run();
+    System.out.println("Running reflective");
+    Method[] methods = Transform1976.class.getDeclaredMethods();
+    for (Method m : methods) {
+      if (Modifier.isStatic(m.getModifiers())) {
+        System.out.println("Reflectively invoking " + m);
+        m.invoke(null);
+      } else {
+        System.out.println("Not invoking non-static method " + m);
+      }
+    }
+    System.out.println("Running jni");
+    long[] mids = getMethodIds(methods);
+    callNativeMethods(Transform1976.class, mids);
+    MethodHandleWrapper[] handles = getMethodHandles(methods);
+    System.out.println("Running method handles");
+    runMethodHandles(handles);
+    Redefinition.doCommonStructuralClassRedefinition(
+        Transform1976.class, Transform1976.REDEFINED_DEX_BYTES);
+    // Change RunTransformMethods to also call the 'runBye' method. No RI support so no classfile
+    // bytes required.
+    Redefinition.doCommonClassRedefinition(RunTransformMethods.class, new byte[] {}, RUN_DEX_BYTES);
+    System.out.println("Running directly after redef");
+    r.run();
+    System.out.println("Running reflective after redef using old j.l.r.Method");
+    for (Method m : methods) {
+      if (Modifier.isStatic(m.getModifiers())) {
+        System.out.println("Reflectively invoking " + m + " on old j.l.r.Method");
+        m.invoke(null);
+      } else {
+        System.out.println("Not invoking non-static method " + m);
+      }
+    }
+    System.out.println("Running reflective after redef using new j.l.r.Method");
+    for (Method m : Transform1976.class.getDeclaredMethods()) {
+      if (Modifier.isStatic(m.getModifiers())) {
+        System.out.println("Reflectively invoking " + m + " on new j.l.r.Method");
+        m.invoke(null);
+      } else {
+        System.out.println("Not invoking non-static method " + m);
+      }
+    }
+    System.out.println("Running jni with old ids");
+    callNativeMethods(Transform1976.class, mids);
+    System.out.println("Running jni with new ids");
+    callNativeMethods(Transform1976.class, getMethodIds(Transform1976.class.getDeclaredMethods()));
+
+    System.out.println("Running method handles using old handles");
+    runMethodHandles(handles);
+    System.out.println("Running method handles using new handles");
+    runMethodHandles(getMethodHandles(Transform1976.class.getDeclaredMethods()));
+  }
+
+  public static native long[] getMethodIds(Method[] m);
+
+  public static native void callNativeMethods(Class<?> k, long[] smethods);
+}
diff --git a/test/1976-hello-structural-static-methods/src/art/Transform1976.java b/test/1976-hello-structural-static-methods/src/art/Transform1976.java
new file mode 100644
index 0000000..e347711
--- /dev/null
+++ b/test/1976-hello-structural-static-methods/src/art/Transform1976.java
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Base64;
+
+public class Transform1976 {
+
+  static {
+  }
+
+  /* Dex file for:
+   * package art;
+   * public class Transform1976 {
+   *   static {}
+   *   public static byte[] REDEFINED_DEX_BYTES;
+   *   public static void sayEverything() {
+   *     sayHi();
+   *     sayBye();
+   *   }
+   *   public static void sayBye() {
+   *     System.out.println("Bye");
+   *   }
+   *   public static void sayHi() {
+   *     System.out.println("Not saying hi again!");
+   *   }
+   * }
+   */
+  public static byte[] REDEFINED_DEX_BYTES =
+      Base64.getDecoder()
+          .decode(
+              "ZGV4CjAzNQBHoxOnl1VNY5YvAENBMpZs9rgNOtJjgZFEBAAAcAAAAHhWNBIAAAAAAAAAAJgDAAAU"
+                  + "AAAAcAAAAAcAAADAAAAAAgAAANwAAAACAAAA9AAAAAcAAAAEAQAAAQAAADwBAADoAgAAXAEAAAYC"
+                  + "AAAQAgAAGAIAAB0CAAAyAgAASQIAAF0CAABxAgAAhQIAAJsCAACwAgAAxAIAAMcCAADLAgAAzwIA"
+                  + "ANQCAADdAgAA5QIAAPQCAAD7AgAAAwAAAAQAAAAFAAAABgAAAAcAAAALAAAADQAAAAsAAAAFAAAA"
+                  + "AAAAAAwAAAAFAAAAAAIAAAAABgAJAAAABAABAA4AAAAAAAAAAAAAAAAAAAABAAAAAAAAABAAAAAA"
+                  + "AAAAEQAAAAAAAAASAAAAAQABAA8AAAACAAAAAQAAAAAAAAABAAAAAgAAAAAAAAAKAAAAAAAAAHMD"
+                  + "AAAAAAAAAAAAAAAAAADoAQAAAQAAAA4AAAABAAEAAQAAAOwBAAAEAAAAcBAGAAAADgACAAAAAgAA"
+                  + "APABAAAIAAAAYgABABoBAgBuIAUAEAAOAAAAAAAAAAAA9QEAAAcAAABxAAQAAABxAAIAAAAOAAAA"
+                  + "AgAAAAIAAAD7AQAACAAAAGIAAQAaAQgAbiAFABAADgADAA4AAgAOAAoADngABgAOPDwADQAOeAAB"
+                  + "AAAAAwAIPGNsaW5pdD4ABjxpbml0PgADQnllABNMYXJ0L1RyYW5zZm9ybTE5NzY7ABVMamF2YS9p"
+                  + "by9QcmludFN0cmVhbTsAEkxqYXZhL2xhbmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7ABJM"
+                  + "amF2YS9sYW5nL1N5c3RlbTsAFE5vdCBzYXlpbmcgaGkgYWdhaW4hABNSRURFRklORURfREVYX0JZ"
+                  + "VEVTABJUcmFuc2Zvcm0xOTc2LmphdmEAAVYAAlZMAAJbQgADb3V0AAdwcmludGxuAAZzYXlCeWUA"
+                  + "DXNheUV2ZXJ5dGhpbmcABXNheUhpAHZ+fkQ4eyJjb21waWxhdGlvbi1tb2RlIjoiZGVidWciLCJt"
+                  + "aW4tYXBpIjoxLCJzaGEtMSI6ImE4MzUyZjI1NDg4NTM2MmNjZDhkOTA5ZDM1MjljNjAwOTRkZDg5"
+                  + "NmUiLCJ2ZXJzaW9uIjoiMS42LjIwLWRldiJ9AAEABQAACQCIgATcAgGBgATwAgEJiAMBCagDAQnI"
+                  + "AwAAAAAAAAAOAAAAAAAAAAEAAAAAAAAAAQAAABQAAABwAAAAAgAAAAcAAADAAAAAAwAAAAIAAADc"
+                  + "AAAABAAAAAIAAAD0AAAABQAAAAcAAAAEAQAABgAAAAEAAAA8AQAAASAAAAUAAABcAQAAAyAAAAUA"
+                  + "AADoAQAAARAAAAEAAAAAAgAAAiAAABQAAAAGAgAAACAAAAEAAABzAwAAAxAAAAEAAACUAwAAABAA"
+                  + "AAEAAACYAwAA");
+
+  public static void sayEverything() {
+    sayHi();
+  }
+
+  public static void sayHi() {
+    System.out.println("hello");
+  }
+}
diff --git a/test/1976-hello-structural-static-methods/structural_transform_methods.cc b/test/1976-hello-structural-static-methods/structural_transform_methods.cc
new file mode 100644
index 0000000..65bcae0
--- /dev/null
+++ b/test/1976-hello-structural-static-methods/structural_transform_methods.cc
@@ -0,0 +1,77 @@
+
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstdio>
+#include <memory>
+#include <mutex>
+#include <string>
+#include <vector>
+
+#include "android-base/logging.h"
+#include "android-base/macros.h"
+#include "android-base/stringprintf.h"
+#include "jni.h"
+#include "jvmti.h"
+#include "scoped_local_ref.h"
+#include "scoped_utf_chars.h"
+
+// Test infrastructure
+#include "jni_helper.h"
+#include "jvmti_helper.h"
+#include "test_env.h"
+#include "ti_macros.h"
+
+namespace art {
+namespace Test1976StructuralTransformMethods {
+
+extern "C" JNIEXPORT void JNICALL Java_art_Test1976_callNativeMethods(JNIEnv* env,
+                                                                      jclass k,
+                                                                      jclass m_class,
+                                                                      jlongArray m) {
+  jint len = env->GetArrayLength(m);
+  for (jint i = 0; i < len; i++) {
+    jlong mid_val;
+    env->GetLongArrayRegion(m, i, 1, &mid_val);
+    jmethodID mid = reinterpret_cast<jmethodID>(static_cast<intptr_t>(mid_val));
+    // For this test everything is objects and static.
+    env->CallStaticVoidMethod(
+        k,
+        env->GetStaticMethodID(
+            k, "printRun", "(JLjava/lang/reflect/Method;)V"),
+        mid_val,
+        env->ToReflectedMethod(m_class, mid, true));
+    env->CallStaticVoidMethod(m_class, mid);
+  }
+}
+
+extern "C" JNIEXPORT jlongArray JNICALL Java_art_Test1976_getMethodIds(JNIEnv* env,
+                                                                       jclass,
+                                                                       jobjectArray m) {
+  jint len = env->GetArrayLength(m);
+  jlongArray arr = env->NewLongArray(len);
+  for (jint i = 0; i < len; i++) {
+    env->PushLocalFrame(1);
+    jmethodID fid = env->FromReflectedMethod(env->GetObjectArrayElement(m, i));
+    jlong lmid = static_cast<jlong>(reinterpret_cast<intptr_t>(fid));
+    env->SetLongArrayRegion(arr, i, 1, &lmid);
+    env->PopLocalFrame(nullptr);
+  }
+  return arr;
+}
+
+}  // namespace Test1976StructuralTransformMethods
+}  // namespace art
diff --git a/test/1977-hello-structural-obsolescence/expected.txt b/test/1977-hello-structural-obsolescence/expected.txt
new file mode 100644
index 0000000..ae68bdb
--- /dev/null
+++ b/test/1977-hello-structural-obsolescence/expected.txt
@@ -0,0 +1,9 @@
+hello Alex
+Not doing anything here
+goodbye Alex
+hello Alex
+transforming calling function
+goodbye Alex
+Hello Alex - Transformed
+Not doing anything here
+Goodbye and good luck - Transformed
diff --git a/test/1977-hello-structural-obsolescence/info.txt b/test/1977-hello-structural-obsolescence/info.txt
new file mode 100644
index 0000000..6835227
--- /dev/null
+++ b/test/1977-hello-structural-obsolescence/info.txt
@@ -0,0 +1 @@
+Test interaction between obsolete methods and structural class redefinition.
diff --git a/test/1977-hello-structural-obsolescence/run b/test/1977-hello-structural-obsolescence/run
new file mode 100755
index 0000000..03e41a5
--- /dev/null
+++ b/test/1977-hello-structural-obsolescence/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/1977-hello-structural-obsolescence/src/Main.java b/test/1977-hello-structural-obsolescence/src/Main.java
new file mode 100644
index 0000000..4f94ed5
--- /dev/null
+++ b/test/1977-hello-structural-obsolescence/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1977.run();
+  }
+}
diff --git a/test/1977-hello-structural-obsolescence/src/art/Redefinition.java b/test/1977-hello-structural-obsolescence/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1977-hello-structural-obsolescence/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1977-hello-structural-obsolescence/src/art/Test1977.java b/test/1977-hello-structural-obsolescence/src/art/Test1977.java
new file mode 100644
index 0000000..f4cb9f1
--- /dev/null
+++ b/test/1977-hello-structural-obsolescence/src/art/Test1977.java
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Field;
+import java.util.Base64;
+
+import sun.misc.Unsafe;
+
+public class Test1977 {
+
+  // The class we will be transforming.
+  static class Transform {
+    static {
+    }
+
+    public static String sayHiName = " Alex";
+    // Called whenever we do something.
+    public static void somethingHappened() {}
+
+    public static void sayHi(Runnable r) {
+      System.out.println("hello" + sayHiName);
+      r.run();
+      somethingHappened();
+      System.out.println("goodbye" + sayHiName);
+    }
+  }
+
+  // static class Transform {
+  //   static {}
+  //   // NB Due to the ordering of fields offset of sayHiName will change.
+  //   public static String sayHiName;
+  //   public static String sayByeName;
+  //   public static void somethingHappened() {
+  //     sayByeName = " and good luck";
+  //   }
+  //   public static void doSayBye() {
+  //     System.out.println("Goodbye" + sayByeName + " - Transformed");
+  //   }
+  //   public static void doSayHi() {
+  //     System.out.println("Hello" + sayHiName + " - Transformed");
+  //   }
+  //   public static void sayHi(Runnable r) {
+  //     doSayHi();
+  //     r.run();
+  //     somethingHappened();
+  //     doSayBye();
+  //   }
+  // }
+  private static final byte[] DEX_BYTES =
+      Base64.getDecoder()
+          .decode(
+              "ZGV4CjAzNQBNCReVL85UCydGe4wKq3olUYP6Lb8WIlewBgAAcAAAAHhWNBIAAAAAAAAAAOwFAAAl"
+                  + "AAAAcAAAAAsAAAAEAQAABQAAADABAAADAAAAbAEAAAwAAACEAQAAAQAAAOQBAACsBAAABAIAAEID"
+                  + "AABSAwAAYgMAAGwDAAB0AwAAfQMAAIQDAACHAwAAiwMAAKUDAAC1AwAA2QMAAPkDAAAQBAAAJAQA"
+                  + "ADoEAABOBAAAaQQAAH0EAACMBAAAlwQAAJoEAACeBAAAqwQAALMEAAC9BAAAxgQAAMwEAADRBAAA"
+                  + "2gQAAN8EAADrBAAA8gQAAP0EAAAQBQAAGgUAACEFAAAIAAAACQAAAAoAAAALAAAADAAAAA0AAAAO"
+                  + "AAAADwAAABAAAAARAAAAFAAAAAYAAAAHAAAAAAAAAAcAAAAIAAAANAMAABQAAAAKAAAAAAAAABUA"
+                  + "AAAKAAAAPAMAABUAAAAKAAAANAMAAAAABwAeAAAAAAAHACAAAAAJAAQAGwAAAAAAAgACAAAAAAAC"
+                  + "AAMAAAAAAAIAGAAAAAAAAgAZAAAAAAADAB8AAAAAAAIAIQAAAAQABAAcAAAABQACAAMAAAAGAAIA"
+                  + "HQAAAAgAAgADAAAACAABABcAAAAIAAAAIgAAAAAAAAAAAAAABQAAAAAAAAASAAAA3AUAAKgFAAAA"
+                  + "AAAAAAAAAAAAAAAOAwAAAQAAAA4AAAABAAEAAQAAABIDAAAEAAAAcBAHAAAADgAEAAAAAgAAABYD"
+                  + "AAAeAAAAYgACAGIBAAAiAggAcBAJAAIAGgMEAG4gCgAyAG4gCgASABoBAABuIAoAEgBuEAsAAgAM"
+                  + "AW4gBgAQAA4ABAAAAAIAAAAdAwAAHgAAAGIAAgBiAQEAIgIIAHAQCQACABoDBQBuIAoAMgBuIAoA"
+                  + "EgAaAQAAbiAKABIAbhALAAIADAFuIAYAEAAOAAEAAQABAAAAJAMAAA0AAABxAAMAAAByEAgAAABx"
+                  + "AAUAAABxAAIAAAAOAAAAAQAAAAAAAAAtAwAABQAAABoAAQBpAAAADgAHAA4ABgAOAA8ADgEdDwAS"
+                  + "AA4BHQ8AFQEADjw8PDwADAAOSwAAAAEAAAAHAAAAAQAAAAYADiAtIFRyYW5zZm9ybWVkAA4gYW5k"
+                  + "IGdvb2QgbHVjawAIPGNsaW5pdD4ABjxpbml0PgAHR29vZGJ5ZQAFSGVsbG8AAUwAAkxMABhMYXJ0"
+                  + "L1Rlc3QxOTc3JFRyYW5zZm9ybTsADkxhcnQvVGVzdDE5Nzc7ACJMZGFsdmlrL2Fubm90YXRpb24v"
+                  + "RW5jbG9zaW5nQ2xhc3M7AB5MZGFsdmlrL2Fubm90YXRpb24vSW5uZXJDbGFzczsAFUxqYXZhL2lv"
+                  + "L1ByaW50U3RyZWFtOwASTGphdmEvbGFuZy9PYmplY3Q7ABRMamF2YS9sYW5nL1J1bm5hYmxlOwAS"
+                  + "TGphdmEvbGFuZy9TdHJpbmc7ABlMamF2YS9sYW5nL1N0cmluZ0J1aWxkZXI7ABJMamF2YS9sYW5n"
+                  + "L1N5c3RlbTsADVRlc3QxOTc3LmphdmEACVRyYW5zZm9ybQABVgACVkwAC2FjY2Vzc0ZsYWdzAAZh"
+                  + "cHBlbmQACGRvU2F5QnllAAdkb1NheUhpAARuYW1lAANvdXQAB3ByaW50bG4AA3J1bgAKc2F5Qnll"
+                  + "TmFtZQAFc2F5SGkACXNheUhpTmFtZQARc29tZXRoaW5nSGFwcGVuZWQACHRvU3RyaW5nAAV2YWx1"
+                  + "ZQB2fn5EOHsiY29tcGlsYXRpb24tbW9kZSI6ImRlYnVnIiwibWluLWFwaSI6MSwic2hhLTEiOiJh"
+                  + "ODM1MmYyNTQ4ODUzNjJjY2Q4ZDkwOWQzNTI5YzYwMDk0ZGQ4OTZlIiwidmVyc2lvbiI6IjEuNi4y"
+                  + "MC1kZXYifQACAgEjGAECAwIWBAgaFxMCAAYAAAkBCQCIgASEBAGAgASYBAEJsAQBCfwEAQnIBQEJ"
+                  + "9AUAAAAAAgAAAJkFAACfBQAA0AUAAAAAAAAAAAAAAAAAABAAAAAAAAAAAQAAAAAAAAABAAAAJQAA"
+                  + "AHAAAAACAAAACwAAAAQBAAADAAAABQAAADABAAAEAAAAAwAAAGwBAAAFAAAADAAAAIQBAAAGAAAA"
+                  + "AQAAAOQBAAABIAAABgAAAAQCAAADIAAABgAAAA4DAAABEAAAAgAAADQDAAACIAAAJQAAAEIDAAAE"
+                  + "IAAAAgAAAJkFAAAAIAAAAQAAAKgFAAADEAAAAgAAAMwFAAAGIAAAAQAAANwFAAAAEAAAAQAAAOwF"
+                  + "AAA=");
+
+  public static void run() throws Exception {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest();
+  }
+
+  public static void doTest() throws Exception {
+    Transform.sayHi(
+        () -> {
+          System.out.println("Not doing anything here");
+        });
+    Transform.sayHi(
+        () -> {
+          System.out.println("transforming calling function");
+          Redefinition.doCommonStructuralClassRedefinition(Transform.class, DEX_BYTES);
+        });
+    Transform.sayHi(
+        () -> {
+          System.out.println("Not doing anything here");
+        });
+  }
+}
diff --git a/test/1978-regular-obsolete-then-structural-obsolescence/expected.txt b/test/1978-regular-obsolete-then-structural-obsolescence/expected.txt
new file mode 100644
index 0000000..2bad9f2
--- /dev/null
+++ b/test/1978-regular-obsolete-then-structural-obsolescence/expected.txt
@@ -0,0 +1,21 @@
+hello Alex
+Not doing anything here - op1
+Running after op1 using normal definition
+how do you do Alex
+Not doing anything here - op2
+Running after op2 using normal definition
+goodbye Alex
+hello Alex
+transforming calling function - non-structural
+Running after op1 using non-structural redefinition
+how do you do Alex
+transforming calling function - structural
+Running after op2 using structural redefinition
+goodbye Alex
+Hello Alex - Transformed
+Not doing anything here - op1
+Running after op2 using structural redefinition
+How do you do this fine day - Transformed
+Not doing anything here - op2
+Running after op2 using structural redefinition
+Goodbye and good luck - Transformed
diff --git a/test/1978-regular-obsolete-then-structural-obsolescence/info.txt b/test/1978-regular-obsolete-then-structural-obsolescence/info.txt
new file mode 100644
index 0000000..6835227
--- /dev/null
+++ b/test/1978-regular-obsolete-then-structural-obsolescence/info.txt
@@ -0,0 +1 @@
+Test interaction between obsolete methods and structural class redefinition.
diff --git a/test/1978-regular-obsolete-then-structural-obsolescence/run b/test/1978-regular-obsolete-then-structural-obsolescence/run
new file mode 100755
index 0000000..03e41a5
--- /dev/null
+++ b/test/1978-regular-obsolete-then-structural-obsolescence/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/1978-regular-obsolete-then-structural-obsolescence/src/Main.java b/test/1978-regular-obsolete-then-structural-obsolescence/src/Main.java
new file mode 100644
index 0000000..7d023df
--- /dev/null
+++ b/test/1978-regular-obsolete-then-structural-obsolescence/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1978.run();
+  }
+}
diff --git a/test/1978-regular-obsolete-then-structural-obsolescence/src/art/Redefinition.java b/test/1978-regular-obsolete-then-structural-obsolescence/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1978-regular-obsolete-then-structural-obsolescence/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1978-regular-obsolete-then-structural-obsolescence/src/art/Test1978.java b/test/1978-regular-obsolete-then-structural-obsolescence/src/art/Test1978.java
new file mode 100644
index 0000000..54d9c2d
--- /dev/null
+++ b/test/1978-regular-obsolete-then-structural-obsolescence/src/art/Test1978.java
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Base64;
+
+// TODO Need version where non-structural obsolete method tries to touch a structural obsolete
+// field.
+public class Test1978 {
+
+  // The class we will be transforming.
+  static class Transform {
+    static {
+    }
+
+    public static String sayHiName = " Alex";
+    // Called whenever we do something.
+    public static void somethingHappened1() {
+      System.out.println("Running after op1 using normal definition");
+    }
+
+    public static void somethingHappened2() {
+      System.out.println("Running after op2 using normal definition");
+    }
+
+    public static void sayHi(Runnable r1, Runnable r2) {
+      System.out.println("hello" + sayHiName);
+      r1.run();
+      somethingHappened1();
+      System.out.println("how do you do" + sayHiName);
+      r2.run();
+      somethingHappened2();
+      System.out.println("goodbye" + sayHiName);
+    }
+  }
+
+  // static class Transform {
+  //   static {}
+  //   public static String sayHiName;
+  //   public static void somethingHappened1() {
+  //     System.out.println("Running after op1 using non-structural redefinition");
+  //   }
+  //   public static void somethingHappened2() {
+  //     System.out.println("Running after op2 using non-structural redefinition");
+  //   }
+  //   public static void sayHi(Runnable r1, Runnable r2) {
+  //     System.out.println("TRANSFORMED_NON_STRUCTURAL hello" + sayHiName);
+  //     r1.run();
+  //     somethingHappened1();
+  //     System.out.println("TRANSFORMED_NON_STRUCTURAL how do you do" + sayHiName);
+  //     r2.run();
+  //     somethingHappened2();
+  //     System.out.println("TRANSFORMED_NON_STRUCTURAL goodbye" + sayHiName);
+  //   }
+  // }
+  private static final byte[] NON_STRUCTURAL_DEX_BYTES =
+      Base64.getDecoder()
+          .decode(
+              "ZGV4CjAzNQCrRWiiuda6GysXvxv0iJ+9HUcPBCTU/UVkBwAAcAAAAHhWNBIAAAAAAAAAAKAGAAAl"
+                  + "AAAAcAAAAAsAAAAEAQAABQAAADABAAACAAAAbAEAAAsAAAB8AQAAAQAAANQBAABwBQAA9AEAAFQD"
+                  + "AABeAwAAZgMAAGkDAABtAwAAhwMAAJcDAAC7AwAA2wMAAPIDAAAGBAAAHAQAADAEAABLBAAAXwQA"
+                  + "AJQEAADJBAAA7QQAAA8FAAA5BQAASAUAAFMFAABWBQAAWgUAAF8FAABsBQAAdAUAAHoFAAB/BQAA"
+                  + "iAUAAI0FAACUBQAAnwUAALMFAADHBQAA0QUAANgFAAAEAAAABQAAAAYAAAAHAAAACAAAAAkAAAAK"
+                  + "AAAACwAAAAwAAAANAAAAFQAAAAIAAAAHAAAAAAAAAAMAAAAIAAAARAMAABUAAAAKAAAAAAAAABcA"
+                  + "AAAKAAAATAMAABYAAAAKAAAARAMAAAAABwAfAAAACQAEABsAAAAAAAIAAAAAAAAAAgABAAAAAAAD"
+                  + "AB4AAAAAAAIAIAAAAAAAAgAhAAAABAAEABwAAAAFAAIAAQAAAAYAAgAdAAAACAACAAEAAAAIAAEA"
+                  + "GQAAAAgAAAAiAAAAAAAAAAAAAAAFAAAAAAAAABMAAACQBgAAXwYAAAAAAAAAAAAAAAAAABwDAAAB"
+                  + "AAAADgAAAAEAAQABAAAAIAMAAAQAAABwEAYAAAAOAAYAAgACAAAAJAMAAFUAAABiAAEAYgEAACIC"
+                  + "CABwEAgAAgAaAxEAbiAJADIAbiAJABIAbhAKAAIADAFuIAUAEAByEAcABABxAAMAAABiBAEAYgAA"
+                  + "ACIBCABwEAgAAQAaAhIAbiAJACEAbiAJAAEAbhAKAAEADABuIAUABAByEAcABQBxAAQAAABiBAEA"
+                  + "YgUAACIACABwEAgAAAAaARAAbiAJABAAbiAJAFAAbhAKAAAADAVuIAUAVAAOAAAAAgAAAAIAAAA3"
+                  + "AwAACAAAAGIAAQAaAQ4AbiAFABAADgACAAAAAgAAADwDAAAIAAAAYgABABoBDwBuIAUAEAAOAAcA"
+                  + "DgAGAA4AEAIAAA4BGA88PAEYDzw8ARgPAAoADngADQAOeAAAAAABAAAABwAAAAIAAAAGAAYACDxj"
+                  + "bGluaXQ+AAY8aW5pdD4AAUwAAkxMABhMYXJ0L1Rlc3QxOTc4JFRyYW5zZm9ybTsADkxhcnQvVGVz"
+                  + "dDE5Nzg7ACJMZGFsdmlrL2Fubm90YXRpb24vRW5jbG9zaW5nQ2xhc3M7AB5MZGFsdmlrL2Fubm90"
+                  + "YXRpb24vSW5uZXJDbGFzczsAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwASTGphdmEvbGFuZy9PYmpl"
+                  + "Y3Q7ABRMamF2YS9sYW5nL1J1bm5hYmxlOwASTGphdmEvbGFuZy9TdHJpbmc7ABlMamF2YS9sYW5n"
+                  + "L1N0cmluZ0J1aWxkZXI7ABJMamF2YS9sYW5nL1N5c3RlbTsAM1J1bm5pbmcgYWZ0ZXIgb3AxIHVz"
+                  + "aW5nIG5vbi1zdHJ1Y3R1cmFsIHJlZGVmaW5pdGlvbgAzUnVubmluZyBhZnRlciBvcDIgdXNpbmcg"
+                  + "bm9uLXN0cnVjdHVyYWwgcmVkZWZpbml0aW9uACJUUkFOU0ZPUk1FRF9OT05fU1RSVUNUVVJBTCBn"
+                  + "b29kYnllACBUUkFOU0ZPUk1FRF9OT05fU1RSVUNUVVJBTCBoZWxsbwAoVFJBTlNGT1JNRURfTk9O"
+                  + "X1NUUlVDVFVSQUwgaG93IGRvIHlvdSBkbwANVGVzdDE5NzguamF2YQAJVHJhbnNmb3JtAAFWAAJW"
+                  + "TAADVkxMAAthY2Nlc3NGbGFncwAGYXBwZW5kAARuYW1lAANvdXQAB3ByaW50bG4AA3J1bgAFc2F5"
+                  + "SGkACXNheUhpTmFtZQASc29tZXRoaW5nSGFwcGVuZWQxABJzb21ldGhpbmdIYXBwZW5lZDIACHRv"
+                  + "U3RyaW5nAAV2YWx1ZQB2fn5EOHsiY29tcGlsYXRpb24tbW9kZSI6ImRlYnVnIiwibWluLWFwaSI6"
+                  + "MSwic2hhLTEiOiJhODM1MmYyNTQ4ODUzNjJjY2Q4ZDkwOWQzNTI5YzYwMDk0ZGQ4OTZlIiwidmVy"
+                  + "c2lvbiI6IjEuNi4yMC1kZXYifQACAgEjGAECAwIYBAgaFxQBAAUAAAkAiIAE9AMBgIAEiAQBCaAE"
+                  + "AQncBQEJ/AUAAAAAAAAAAgAAAFAGAABWBgAAhAYAAAAAAAAAAAAAAAAAABAAAAAAAAAAAQAAAAAA"
+                  + "AAABAAAAJQAAAHAAAAACAAAACwAAAAQBAAADAAAABQAAADABAAAEAAAAAgAAAGwBAAAFAAAACwAA"
+                  + "AHwBAAAGAAAAAQAAANQBAAABIAAABQAAAPQBAAADIAAABQAAABwDAAABEAAAAgAAAEQDAAACIAAA"
+                  + "JQAAAFQDAAAEIAAAAgAAAFAGAAAAIAAAAQAAAF8GAAADEAAAAgAAAIAGAAAGIAAAAQAAAJAGAAAA"
+                  + "EAAAAQAAAKAGAAA=");
+
+  // static class Transform {
+  //   static {}
+  //   // NB Due to the ordering of fields offset of sayHiName will change.
+  //   public static String sayHiName;
+  //   public static String sayByeName;
+  //   public static String sayQuery;
+  //   public static void somethingHappened1() {
+  //     System.out.println("Running after op2 using structural redefinition");
+  //     sayQuery = " this fine day";
+  //   }
+  //   public static void somethingHappened2() {
+  //     System.out.println("Running after op2 using structural redefinition");
+  //     sayByeName = " and good luck";
+  //   }
+  //   public static void doSayBye() {
+  //     System.out.println("Goodbye" + sayByeName + " - Transformed");
+  //   }
+  //   public static void doQuery() {
+  //     System.out.println("How do you do" + sayQuery + " - Transformed");
+  //   }
+  //   public static void doSayHi() {
+  //     System.out.println("Hello" + sayHiName + " - Transformed");
+  //   }
+  //   public static void sayHi(Runnable r, Runnable r2) {
+  //     doSayHi();
+  //     r1.run();
+  //     somethingHappened1();
+  //     doQuery();
+  //     r2.run();
+  //     somethingHappened2();
+  //     doSayBye();
+  //   }
+  // }
+  private static final byte[] STRUCTURAL_DEX_BYTES =
+      Base64.getDecoder()
+          .decode(
+              "ZGV4CjAzNQBcSGjP90G9cWx1TjBkAO5SCOfSe5sjsEAUCAAAcAAAAHhWNBIAAAAAAAAAAFAHAAAs"
+                  + "AAAAcAAAAAsAAAAgAQAABQAAAEwBAAAEAAAAiAEAAA4AAACoAQAAAQAAABgCAADcBQAAOAIAABwE"
+                  + "AAAsBAAAPAQAAEwEAABWBAAAXgQAAGcEAABuBAAAfQQAAIAEAACEBAAAngQAAK4EAADSBAAA8gQA"
+                  + "AAkFAAAdBQAAMwUAAEcFAABiBQAAdgUAAKcFAAC2BQAAwQUAAMQFAADIBQAAzQUAANoFAADiBQAA"
+                  + "6wUAAPUFAAD+BQAABAYAAAkGAAASBgAAFwYAACMGAAAqBgAANQYAAD8GAABTBgAAZwYAAHEGAAB4"
+                  + "BgAACgAAAAsAAAAMAAAADQAAAA4AAAAPAAAAEAAAABEAAAASAAAAEwAAABcAAAAIAAAABwAAAAAA"
+                  + "AAAJAAAACAAAAAwEAAAXAAAACgAAAAAAAAAZAAAACgAAABQEAAAYAAAACgAAAAwEAAAAAAcAIwAA"
+                  + "AAAABwAlAAAAAAAHACYAAAAJAAQAIAAAAAAAAgADAAAAAAACAAQAAAAAAAIAHAAAAAAAAgAdAAAA"
+                  + "AAACAB4AAAAAAAMAJAAAAAAAAgAnAAAAAAACACgAAAAEAAQAIQAAAAUAAgAEAAAABgACACIAAAAI"
+                  + "AAIABAAAAAgAAQAbAAAACAAAACkAAAAAAAAAAAAAAAUAAAAAAAAAFQAAAEAHAAD/BgAAAAAAAAAA"
+                  + "AAAAAAAA1AMAAAEAAAAOAAAAAQABAAEAAADYAwAABAAAAHAQCQAAAA4ABAAAAAIAAADcAwAAHgAA"
+                  + "AGIAAwBiAQIAIgIIAHAQCwACABoDBwBuIAwAMgBuIAwAEgAaAQAAbiAMABIAbhANAAIADAFuIAgA"
+                  + "EAAOAAQAAAACAAAA4wMAAB4AAABiAAMAYgEAACICCABwEAsAAgAaAwUAbiAMADIAbiAMABIAGgEA"
+                  + "AG4gDAASAG4QDQACAAwBbiAIABAADgAEAAAAAgAAAOoDAAAeAAAAYgADAGIBAQAiAggAcBALAAIA"
+                  + "GgMGAG4gDAAyAG4gDAASABoBAABuIAwAEgBuEA0AAgAMAW4gCAAQAA4AAgACAAEAAADxAwAAFgAA"
+                  + "AHEABAAAAHIQCgAAAHEABgAAAHEAAgAAAHIQCgABAHEABwAAAHEAAwAAAA4AAgAAAAIAAAD+AwAA"
+                  + "DAAAAGIAAwAaARQAbiAIABAAGgACAGkAAgAOAAIAAAACAAAABAQAAAwAAABiAAMAGgEUAG4gCAAQ"
+                  + "ABoAAQBpAAAADgAHAA4ABgAOABgADgEdDwAVAA4BHQ8AGwAOAR0PAB4CAAAOPDw8PDw8PAANAA54"
+                  + "SwARAA54SwAAAAEAAAAHAAAAAgAAAAYABgAOIC0gVHJhbnNmb3JtZWQADiBhbmQgZ29vZCBsdWNr"
+                  + "AA4gdGhpcyBmaW5lIGRheQAIPGNsaW5pdD4ABjxpbml0PgAHR29vZGJ5ZQAFSGVsbG8ADUhvdyBk"
+                  + "byB5b3UgZG8AAUwAAkxMABhMYXJ0L1Rlc3QxOTc4JFRyYW5zZm9ybTsADkxhcnQvVGVzdDE5Nzg7"
+                  + "ACJMZGFsdmlrL2Fubm90YXRpb24vRW5jbG9zaW5nQ2xhc3M7AB5MZGFsdmlrL2Fubm90YXRpb24v"
+                  + "SW5uZXJDbGFzczsAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwASTGphdmEvbGFuZy9PYmplY3Q7ABRM"
+                  + "amF2YS9sYW5nL1J1bm5hYmxlOwASTGphdmEvbGFuZy9TdHJpbmc7ABlMamF2YS9sYW5nL1N0cmlu"
+                  + "Z0J1aWxkZXI7ABJMamF2YS9sYW5nL1N5c3RlbTsAL1J1bm5pbmcgYWZ0ZXIgb3AyIHVzaW5nIHN0"
+                  + "cnVjdHVyYWwgcmVkZWZpbml0aW9uAA1UZXN0MTk3OC5qYXZhAAlUcmFuc2Zvcm0AAVYAAlZMAANW"
+                  + "TEwAC2FjY2Vzc0ZsYWdzAAZhcHBlbmQAB2RvUXVlcnkACGRvU2F5QnllAAdkb1NheUhpAARuYW1l"
+                  + "AANvdXQAB3ByaW50bG4AA3J1bgAKc2F5QnllTmFtZQAFc2F5SGkACXNheUhpTmFtZQAIc2F5UXVl"
+                  + "cnkAEnNvbWV0aGluZ0hhcHBlbmVkMQASc29tZXRoaW5nSGFwcGVuZWQyAAh0b1N0cmluZwAFdmFs"
+                  + "dWUAdn5+RDh7ImNvbXBpbGF0aW9uLW1vZGUiOiJkZWJ1ZyIsIm1pbi1hcGkiOjEsInNoYS0xIjoi"
+                  + "YTgzNTJmMjU0ODg1MzYyY2NkOGQ5MDlkMzUyOWM2MDA5NGRkODk2ZSIsInZlcnNpb24iOiIxLjYu"
+                  + "MjAtZGV2In0AAgIBKhgBAgMCGgQIHxcWAwAIAAAJAQkBCQCIgAS4BAGAgATMBAEJ5AQBCbAFAQn8"
+                  + "BQEJyAYBCYQHAQmsBwAAAAAAAAACAAAA8AYAAPYGAAA0BwAAAAAAAAAAAAAAAAAAEAAAAAAAAAAB"
+                  + "AAAAAAAAAAEAAAAsAAAAcAAAAAIAAAALAAAAIAEAAAMAAAAFAAAATAEAAAQAAAAEAAAAiAEAAAUA"
+                  + "AAAOAAAAqAEAAAYAAAABAAAAGAIAAAEgAAAIAAAAOAIAAAMgAAAIAAAA1AMAAAEQAAACAAAADAQA"
+                  + "AAIgAAAsAAAAHAQAAAQgAAACAAAA8AYAAAAgAAABAAAA/wYAAAMQAAACAAAAMAcAAAYgAAABAAAA"
+                  + "QAcAAAAQAAABAAAAUAcAAA==");
+
+  public static void run() throws Exception {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest();
+  }
+
+  public static void doTest() throws Exception {
+    Transform.sayHi(
+        () -> {
+          System.out.println("Not doing anything here - op1");
+        },
+        () -> {
+          System.out.println("Not doing anything here - op2");
+        });
+    Transform.sayHi(
+        () -> {
+          System.out.println("transforming calling function - non-structural");
+          Redefinition.doCommonClassRedefinition(
+              Transform.class, new byte[] {}, NON_STRUCTURAL_DEX_BYTES);
+        },
+        () -> {
+          System.out.println("transforming calling function - structural");
+          Redefinition.doCommonStructuralClassRedefinition(Transform.class, STRUCTURAL_DEX_BYTES);
+        });
+    Transform.sayHi(
+        () -> {
+          System.out.println("Not doing anything here - op1");
+        },
+        () -> {
+          System.out.println("Not doing anything here - op2");
+        });
+  }
+}
diff --git a/test/1979-threaded-structural-transformation/expected.txt b/test/1979-threaded-structural-transformation/expected.txt
new file mode 100644
index 0000000..ffe96bb
--- /dev/null
+++ b/test/1979-threaded-structural-transformation/expected.txt
@@ -0,0 +1,16 @@
+Hitting class class art.Test1979$Transform[FOO: value of <FOO FIELD>, BAR: value of <BAR FIELD>]
+Initial: class art.Test1979$Transform[FOO: value of <FOO FIELD>, BAR: value of <BAR FIELD>]
+Reading with reflection.
+public static java.lang.Object art.Test1979$Transform.BAR = (ID: 0) value of <BAR FIELD>
+public static java.lang.Object art.Test1979$Transform.FOO = (ID: 1) value of <FOO FIELD>
+Reading normally.
+Read BAR field: (ID: 0) value of <BAR FIELD>
+Read FOO field: (ID: 1) value of <FOO FIELD>
+Redefined: class art.Test1979$Transform[FOO: value of <FOO FIELD>, BAR: value of <BAR FIELD>, BAZ: null]
+Reading with reflection after possible modification.
+public static java.lang.Object art.Test1979$Transform.BAR = (ID: 0) value of <BAR FIELD>
+public static java.lang.Object art.Test1979$Transform.BAZ = (ID: 2) <NULL>
+public static java.lang.Object art.Test1979$Transform.FOO = (ID: 1) value of <FOO FIELD>
+Reading normally after possible modification.
+Read FOO field: (ID: 1) value of <FOO FIELD>
+Read BAR field: (ID: 0) value of <BAR FIELD>
diff --git a/test/1979-threaded-structural-transformation/info.txt b/test/1979-threaded-structural-transformation/info.txt
new file mode 100644
index 0000000..dc7623c
--- /dev/null
+++ b/test/1979-threaded-structural-transformation/info.txt
@@ -0,0 +1,2 @@
+Test that remote threads without references to the redefined class are properly deoptimized when
+structural redefinition occurs.
diff --git a/test/1979-threaded-structural-transformation/run b/test/1979-threaded-structural-transformation/run
new file mode 100755
index 0000000..03e41a5
--- /dev/null
+++ b/test/1979-threaded-structural-transformation/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/1979-threaded-structural-transformation/src/Main.java b/test/1979-threaded-structural-transformation/src/Main.java
new file mode 100644
index 0000000..36174eb
--- /dev/null
+++ b/test/1979-threaded-structural-transformation/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1979.run();
+  }
+}
diff --git a/test/1979-threaded-structural-transformation/src/art/Redefinition.java b/test/1979-threaded-structural-transformation/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1979-threaded-structural-transformation/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1979-threaded-structural-transformation/src/art/Test1979.java b/test/1979-threaded-structural-transformation/src/art/Test1979.java
new file mode 100644
index 0000000..ca4027d
--- /dev/null
+++ b/test/1979-threaded-structural-transformation/src/art/Test1979.java
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.ref.*;
+import java.lang.reflect.*;
+import java.util.*;
+import java.util.concurrent.CountDownLatch;
+import java.util.function.Supplier;
+
+public class Test1979 {
+  public static void run() throws Exception {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest();
+  }
+
+  private static final boolean PRINT_NONDETERMINISTIC = false;
+
+  public static WeakHashMap<Object, Long> id_nums = new WeakHashMap<>();
+  public static long next_id = 0;
+
+  public static String printGeneric(Object o) {
+    Long id = id_nums.get(o);
+    if (id == null) {
+      id = Long.valueOf(next_id++);
+      id_nums.put(o, id);
+    }
+    if (o == null) {
+      return "(ID: " + id + ") <NULL>";
+    }
+    Class oc = o.getClass();
+    if (oc.isArray() && oc.getComponentType() == Byte.TYPE) {
+      return "(ID: "
+          + id
+          + ") "
+          + Arrays.toString(Arrays.copyOf((byte[]) o, 10)).replace(']', ',')
+          + " ...]";
+    } else {
+      return "(ID: " + id + ") " + o.toString();
+    }
+  }
+
+  private static void doRedefinition() {
+    Redefinition.doCommonStructuralClassRedefinition(
+        Transform.class, REDEFINED_DEX_BYTES);
+  }
+
+  private static void readReflective(String msg) throws Exception {
+    System.out.println(msg);
+    for (Field f : Transform.class.getFields()) {
+      System.out.println(f.toString() + " = " + printGeneric(f.get(null)));
+    }
+  }
+
+  public static class Transform {
+    static {}
+    public static Object BAR = new Object() {
+      public String toString() {
+        return "value of <" + this.get() + ">";
+      }
+      public Object get() {
+        return "BAR FIELD";
+      }
+    };
+    public static Object FOO = new Object() {
+      public String toString() {
+        return "value of <" + this.get() + ">";
+      }
+      public Object get() {
+        return "FOO FIELD";
+      }
+    };
+    public static String staticToString() {
+      return Transform.class.toString() + "[FOO: " + FOO + ", BAR: " + BAR + "]";
+    }
+  }
+
+  /* Base64 encoded class of:
+   * public static class Transform {
+   *   static {}
+   *   // NB This is the order the fields will be laid out in memory.
+   *   public static Object BAR;
+   *   public static Object BAZ;
+   *   public static Object FOO;
+   *   public static String staticToString() {
+   *    return Transform.class.toString() + "[FOO: " + FOO + ", BAR: " + BAR + ", BAZ: " + BAZ + "]";
+   *   }
+   * }
+   */
+  private static byte[] REDEFINED_DEX_BYTES = Base64.getDecoder().decode(
+      "ZGV4CjAzNQDrznAlv8Fs6FNeDAHAxiU9uy8DUayd82ZkBQAAcAAAAHhWNBIAAAAAAAAAAKAEAAAd" +
+      "AAAAcAAAAAkAAADkAAAABAAAAAgBAAADAAAAOAEAAAkAAABQAQAAAQAAAJgBAACsAwAAuAEAAHoC" +
+      "AACDAgAAjAIAAJYCAACeAgAAowIAAKgCAACtAgAAsAIAALQCAADOAgAA3gIAAAIDAAAiAwAANQMA" +
+      "AEkDAABdAwAAeAMAAIcDAACSAwAAlQMAAJ0DAACgAwAArQMAALUDAAC7AwAAywMAANUDAADcAwAA" +
+      "CQAAAAoAAAALAAAADAAAAA0AAAAOAAAADwAAABAAAAATAAAABwAAAAYAAAAAAAAACAAAAAcAAABs" +
+      "AgAACAAAAAcAAAB0AgAAEwAAAAgAAAAAAAAAAAAFAAQAAAAAAAUABQAAAAAABQAGAAAAAAADAAIA" +
+      "AAAAAAMAAwAAAAAAAAAZAAAABAAAABoAAAAFAAMAAwAAAAcAAwADAAAABwABABcAAAAHAAIAFwAA" +
+      "AAcAAAAaAAAAAAAAAAEAAAAFAAAAAAAAABEAAACQBAAAYwQAAAAAAAAFAAAAAgAAAGgCAAA2AAAA" +
+      "HAAAAG4QAwAAAAwAYgECAGICAABiAwEAIgQHAHAQBQAEAG4gBwAEABoAFABuIAcABABuIAYAFAAa" +
+      "AAAAbiAHAAQAbiAGACQAGgABAG4gBwAEAG4gBgA0ABoAFQBuIAcABABuEAgABAAMABEAAAAAAAAA" +
+      "AABgAgAAAQAAAA4AAAABAAEAAQAAAGQCAAAEAAAAcBAEAAAADgAIAA4ABwAOAA4ADgABAAAABQAA" +
+      "AAEAAAAGAAcsIEJBUjogAAcsIEJBWjogAAg8Y2xpbml0PgAGPGluaXQ+AANCQVIAA0JBWgADRk9P" +
+      "AAFMAAJMTAAYTGFydC9UZXN0MTk3OSRUcmFuc2Zvcm07AA5MYXJ0L1Rlc3QxOTc5OwAiTGRhbHZp" +
+      "ay9hbm5vdGF0aW9uL0VuY2xvc2luZ0NsYXNzOwAeTGRhbHZpay9hbm5vdGF0aW9uL0lubmVyQ2xh" +
+      "c3M7ABFMamF2YS9sYW5nL0NsYXNzOwASTGphdmEvbGFuZy9PYmplY3Q7ABJMamF2YS9sYW5nL1N0" +
+      "cmluZzsAGUxqYXZhL2xhbmcvU3RyaW5nQnVpbGRlcjsADVRlc3QxOTc5LmphdmEACVRyYW5zZm9y" +
+      "bQABVgAGW0ZPTzogAAFdAAthY2Nlc3NGbGFncwAGYXBwZW5kAARuYW1lAA5zdGF0aWNUb1N0cmlu" +
+      "ZwAIdG9TdHJpbmcABXZhbHVlAHZ+fkQ4eyJjb21waWxhdGlvbi1tb2RlIjoiZGVidWciLCJtaW4t" +
+      "YXBpIjoxLCJzaGEtMSI6ImE4MzUyZjI1NDg4NTM2MmNjZDhkOTA5ZDM1MjljNjAwOTRkZDg5NmUi" +
+      "LCJ2ZXJzaW9uIjoiMS42LjIwLWRldiJ9AAICARsYAQIDAhYECRgXEgMAAwAACQEJAQkAiIAEtAQB" +
+      "gYAEyAQBCbgDAAAAAAAAAAIAAABUBAAAWgQAAIQEAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAEAAAAA" +
+      "AAAAAQAAAB0AAABwAAAAAgAAAAkAAADkAAAAAwAAAAQAAAAIAQAABAAAAAMAAAA4AQAABQAAAAkA" +
+      "AABQAQAABgAAAAEAAACYAQAAASAAAAMAAAC4AQAAAyAAAAMAAABgAgAAARAAAAIAAABsAgAAAiAA" +
+      "AB0AAAB6AgAABCAAAAIAAABUBAAAACAAAAEAAABjBAAAAxAAAAIAAACABAAABiAAAAEAAACQBAAA" +
+      "ABAAAAEAAACgBAAA");
+
+  public interface TRunnable {
+    public void run() throws Exception;
+  }
+
+  public static void doTest() throws Exception {
+    final CountDownLatch cdl = new CountDownLatch(1);
+    final CountDownLatch continueLatch = new CountDownLatch(1);
+    // Make sure the transformed class is already loaded before we start running (and possibly
+    // compiling) the test thread.
+    System.out.println("Hitting class " + Transform.staticToString());
+    Thread t = new Thread(() -> {
+      try {
+        // We don't want to read these in the same method here to ensure that no reference to
+        // Transform is active on this thread at the time the redefinition occurs. To accomplish
+        // this just run the code in a different method, which is good enough.
+        ((TRunnable)() -> {
+          System.out.println("Initial: " + Transform.staticToString());
+          readReflective("Reading with reflection.");
+          System.out.println("Reading normally.");
+          System.out.println("Read BAR field: " + printGeneric(Transform.BAR));
+          System.out.println("Read FOO field: " + printGeneric(Transform.FOO));
+        }).run();
+        cdl.countDown();
+        continueLatch.await();
+        // Now that redefinition has occurred without this frame having any references to the
+        // Transform class we want to make sure we have the correct offsets.
+        System.out.println("Redefined: " + Transform.staticToString());
+        readReflective("Reading with reflection after possible modification.");
+        System.out.println("Reading normally after possible modification.");
+        System.out.println("Read FOO field: " + printGeneric(Transform.FOO));
+        System.out.println("Read BAR field: " + printGeneric(Transform.BAR));
+      } catch (Exception e) {
+        throw new Error(e);
+      }
+    });
+    t.start();
+    cdl.await();
+    doRedefinition();
+    continueLatch.countDown();
+    t.join();
+  }
+}
diff --git a/test/1980-obsolete-object-cleared/expected.txt b/test/1980-obsolete-object-cleared/expected.txt
new file mode 100644
index 0000000..77569ee
--- /dev/null
+++ b/test/1980-obsolete-object-cleared/expected.txt
@@ -0,0 +1,440 @@
+JNI_OnLoad called
+Reading normally.
+	Original secret number is: 42
+	Original secret array is: [1, 2, 3, 4]
+Using unsafe to access values directly from memory.
+	Original secret number is: 42
+	Original secret array is: [1, 2, 3, 4]
+Reading normally post redefinition.
+	Post-redefinition secret number is: 42
+	Post-redefinition secret array is: [1, 2, 3, 4]
+Obsolete class is: class Main$Transform
+Using unsafe to access obsolete values directly from memory.
+	Obsolete secret number is: 0
+	Obsolete secret array is: null
+
+
+Using obsolete class object!
+
+
+Calling public java.lang.Class java.lang.Class.asSubclass(java.lang.Class) with params: [[null, class java.lang.Object, (obsolete)class Main$Transform, class Main$Transform, long, class java.lang.Class]]
+public java.lang.Class java.lang.Class.asSubclass(java.lang.Class) with [null] throws java.lang.reflect.InvocationTargetException: java.lang.NullPointerException: Attempt to invoke virtual method 'boolean java.lang.Class.isAssignableFrom(java.lang.Class)' on a null object reference
+public java.lang.Class java.lang.Class.asSubclass(java.lang.Class) on (obsolete)class Main$Transform with [class java.lang.Object] = (obsolete)class Main$Transform
+public java.lang.Class java.lang.Class.asSubclass(java.lang.Class) on (obsolete)class Main$Transform with [(obsolete)class Main$Transform] = (obsolete)class Main$Transform
+public java.lang.Class java.lang.Class.asSubclass(java.lang.Class) with [class Main$Transform] throws java.lang.reflect.InvocationTargetException: java.lang.ClassCastException: class Main$Transform cannot be cast to Main$Transform
+public java.lang.Class java.lang.Class.asSubclass(java.lang.Class) with [long] throws java.lang.reflect.InvocationTargetException: java.lang.ClassCastException: class Main$Transform cannot be cast to long
+public java.lang.Class java.lang.Class.asSubclass(java.lang.Class) with [class java.lang.Class] throws java.lang.reflect.InvocationTargetException: java.lang.ClassCastException: class Main$Transform cannot be cast to java.lang.Class
+Calling public java.lang.Object java.lang.Class.cast(java.lang.Object) with params: [[null, foo, NOT_USED_STRING, class Main$Transform]]
+public java.lang.Object java.lang.Class.cast(java.lang.Object) on (obsolete)class Main$Transform with [null] = null
+public java.lang.Object java.lang.Class.cast(java.lang.Object) with [foo] throws java.lang.reflect.InvocationTargetException: java.lang.ClassCastException: Cannot cast java.lang.String to Main$Transform
+public java.lang.Object java.lang.Class.cast(java.lang.Object) with [NOT_USED_STRING] throws java.lang.reflect.InvocationTargetException: java.lang.ClassCastException: Cannot cast java.lang.String to Main$Transform
+public java.lang.Object java.lang.Class.cast(java.lang.Object) with [class Main$Transform] throws java.lang.reflect.InvocationTargetException: java.lang.ClassCastException: Cannot cast java.lang.Class to Main$Transform
+Calling public boolean java.lang.Class.desiredAssertionStatus() with params: []
+public boolean java.lang.Class.desiredAssertionStatus() on (obsolete)class Main$Transform with [] = false
+Calling public int java.lang.Class.getAccessFlags() with params: []
+public int java.lang.Class.getAccessFlags() on (obsolete)class Main$Transform with [] = 2621441
+Calling public java.lang.annotation.Annotation java.lang.Class.getAnnotation(java.lang.Class) with params: [[null, class java.lang.Object, (obsolete)class Main$Transform, class Main$Transform, long, class java.lang.Class]]
+public java.lang.annotation.Annotation java.lang.Class.getAnnotation(java.lang.Class) with [null] throws java.lang.reflect.InvocationTargetException: java.lang.NullPointerException
+public java.lang.annotation.Annotation java.lang.Class.getAnnotation(java.lang.Class) with [class java.lang.Object] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.annotation.Annotation java.lang.Class.getAnnotation(java.lang.Class) with [(obsolete)class Main$Transform] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.annotation.Annotation java.lang.Class.getAnnotation(java.lang.Class) with [class Main$Transform] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.annotation.Annotation java.lang.Class.getAnnotation(java.lang.Class) with [long] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.annotation.Annotation java.lang.Class.getAnnotation(java.lang.Class) with [class java.lang.Class] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public java.lang.annotation.Annotation[] java.lang.Class.getAnnotations() with params: []
+public java.lang.annotation.Annotation[] java.lang.Class.getAnnotations() with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public java.lang.annotation.Annotation[] java.lang.Class.getAnnotationsByType(java.lang.Class) with params: [[null, class java.lang.Object, (obsolete)class Main$Transform, class Main$Transform, long, class java.lang.Class]]
+public java.lang.annotation.Annotation[] java.lang.Class.getAnnotationsByType(java.lang.Class) with [null] throws java.lang.reflect.InvocationTargetException: java.lang.NullPointerException: annotationClass
+public java.lang.annotation.Annotation[] java.lang.Class.getAnnotationsByType(java.lang.Class) with [class java.lang.Object] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.annotation.Annotation[] java.lang.Class.getAnnotationsByType(java.lang.Class) with [(obsolete)class Main$Transform] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.annotation.Annotation[] java.lang.Class.getAnnotationsByType(java.lang.Class) with [class Main$Transform] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.annotation.Annotation[] java.lang.Class.getAnnotationsByType(java.lang.Class) with [long] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.annotation.Annotation[] java.lang.Class.getAnnotationsByType(java.lang.Class) with [class java.lang.Class] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public java.lang.String java.lang.Class.getCanonicalName() with params: []
+public java.lang.String java.lang.Class.getCanonicalName() with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public java.lang.ClassLoader java.lang.Class.getClassLoader() with params: []
+public java.lang.ClassLoader java.lang.Class.getClassLoader() on (obsolete)class Main$Transform with [] = dalvik.system.PathClassLoader
+Calling public java.lang.Class[] java.lang.Class.getClasses() with params: []
+public java.lang.Class[] java.lang.Class.getClasses() with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public java.lang.Class java.lang.Class.getComponentType() with params: []
+public java.lang.Class java.lang.Class.getComponentType() on (obsolete)class Main$Transform with [] = null
+Calling public java.lang.reflect.Constructor java.lang.Class.getConstructor(java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with params: [[new java.lang.Object[0], new java.lang.Class[0], null]]
+public java.lang.reflect.Constructor java.lang.Class.getConstructor(java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [new java.lang.Object[0]] throws java.lang.IllegalArgumentException: method java.lang.Class.getConstructor argument 1 has type java.lang.Class[], got java.lang.Object[]: null
+public java.lang.reflect.Constructor java.lang.Class.getConstructor(java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [new java.lang.Class[0]] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.reflect.Constructor java.lang.Class.getConstructor(java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [null] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public java.lang.reflect.Constructor[] java.lang.Class.getConstructors() throws java.lang.SecurityException with params: []
+public java.lang.reflect.Constructor[] java.lang.Class.getConstructors() throws java.lang.SecurityException with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public native java.lang.annotation.Annotation java.lang.Class.getDeclaredAnnotation(java.lang.Class) with params: [[null, class java.lang.Object, (obsolete)class Main$Transform, class Main$Transform, long, class java.lang.Class]]
+public native java.lang.annotation.Annotation java.lang.Class.getDeclaredAnnotation(java.lang.Class) with [null] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public native java.lang.annotation.Annotation java.lang.Class.getDeclaredAnnotation(java.lang.Class) with [class java.lang.Object] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public native java.lang.annotation.Annotation java.lang.Class.getDeclaredAnnotation(java.lang.Class) with [(obsolete)class Main$Transform] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public native java.lang.annotation.Annotation java.lang.Class.getDeclaredAnnotation(java.lang.Class) with [class Main$Transform] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public native java.lang.annotation.Annotation java.lang.Class.getDeclaredAnnotation(java.lang.Class) with [long] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public native java.lang.annotation.Annotation java.lang.Class.getDeclaredAnnotation(java.lang.Class) with [class java.lang.Class] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public native java.lang.annotation.Annotation[] java.lang.Class.getDeclaredAnnotations() with params: []
+public native java.lang.annotation.Annotation[] java.lang.Class.getDeclaredAnnotations() with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public native java.lang.Class[] java.lang.Class.getDeclaredClasses() with params: []
+public native java.lang.Class[] java.lang.Class.getDeclaredClasses() with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public java.lang.reflect.Constructor java.lang.Class.getDeclaredConstructor(java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with params: [[new java.lang.Object[0], new java.lang.Class[0], null]]
+public java.lang.reflect.Constructor java.lang.Class.getDeclaredConstructor(java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [new java.lang.Object[0]] throws java.lang.IllegalArgumentException: method java.lang.Class.getDeclaredConstructor argument 1 has type java.lang.Class[], got java.lang.Object[]: null
+public java.lang.reflect.Constructor java.lang.Class.getDeclaredConstructor(java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [new java.lang.Class[0]] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.reflect.Constructor java.lang.Class.getDeclaredConstructor(java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [null] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public java.lang.reflect.Constructor[] java.lang.Class.getDeclaredConstructors() throws java.lang.SecurityException with params: []
+public java.lang.reflect.Constructor[] java.lang.Class.getDeclaredConstructors() throws java.lang.SecurityException with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public native java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String) throws java.lang.NoSuchFieldException with params: [[NOT_USED_STRING, foo, SECRET_ARRAY]]
+public native java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String) throws java.lang.NoSuchFieldException with [NOT_USED_STRING] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public native java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String) throws java.lang.NoSuchFieldException with [foo] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public native java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String) throws java.lang.NoSuchFieldException with [SECRET_ARRAY] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public native java.lang.reflect.Field[] java.lang.Class.getDeclaredFields() with params: []
+public native java.lang.reflect.Field[] java.lang.Class.getDeclaredFields() with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public native java.lang.reflect.Field[] java.lang.Class.getDeclaredFieldsUnchecked(boolean) with params: [[true, false]]
+public native java.lang.reflect.Field[] java.lang.Class.getDeclaredFieldsUnchecked(boolean) with [true] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public native java.lang.reflect.Field[] java.lang.Class.getDeclaredFieldsUnchecked(boolean) with [false] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public java.lang.reflect.Method java.lang.Class.getDeclaredMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with params: [[NOT_USED_STRING, foo, SECRET_ARRAY], [new java.lang.Object[0], new java.lang.Class[0], null]]
+public java.lang.reflect.Method java.lang.Class.getDeclaredMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [NOT_USED_STRING, new java.lang.Object[0]] throws java.lang.IllegalArgumentException: method java.lang.Class.getDeclaredMethod argument 2 has type java.lang.Class[], got java.lang.Object[]: null
+public java.lang.reflect.Method java.lang.Class.getDeclaredMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [NOT_USED_STRING, new java.lang.Class[0]] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.reflect.Method java.lang.Class.getDeclaredMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [NOT_USED_STRING, null] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.reflect.Method java.lang.Class.getDeclaredMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [foo, new java.lang.Object[0]] throws java.lang.IllegalArgumentException: method java.lang.Class.getDeclaredMethod argument 2 has type java.lang.Class[], got java.lang.Object[]: null
+public java.lang.reflect.Method java.lang.Class.getDeclaredMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [foo, new java.lang.Class[0]] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.reflect.Method java.lang.Class.getDeclaredMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [foo, null] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.reflect.Method java.lang.Class.getDeclaredMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [SECRET_ARRAY, new java.lang.Object[0]] throws java.lang.IllegalArgumentException: method java.lang.Class.getDeclaredMethod argument 2 has type java.lang.Class[], got java.lang.Object[]: null
+public java.lang.reflect.Method java.lang.Class.getDeclaredMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [SECRET_ARRAY, new java.lang.Class[0]] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.reflect.Method java.lang.Class.getDeclaredMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [SECRET_ARRAY, null] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public java.lang.reflect.Method[] java.lang.Class.getDeclaredMethods() throws java.lang.SecurityException with params: []
+public java.lang.reflect.Method[] java.lang.Class.getDeclaredMethods() throws java.lang.SecurityException with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public native java.lang.reflect.Method[] java.lang.Class.getDeclaredMethodsUnchecked(boolean) with params: [[true, false]]
+public native java.lang.reflect.Method[] java.lang.Class.getDeclaredMethodsUnchecked(boolean) with [true] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public native java.lang.reflect.Method[] java.lang.Class.getDeclaredMethodsUnchecked(boolean) with [false] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public native java.lang.Class java.lang.Class.getDeclaringClass() with params: []
+public native java.lang.Class java.lang.Class.getDeclaringClass() with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public native java.lang.Class java.lang.Class.getEnclosingClass() with params: []
+public native java.lang.Class java.lang.Class.getEnclosingClass() with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public java.lang.reflect.Constructor java.lang.Class.getEnclosingConstructor() with params: []
+public java.lang.reflect.Constructor java.lang.Class.getEnclosingConstructor() with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public java.lang.reflect.Method java.lang.Class.getEnclosingMethod() with params: []
+public java.lang.reflect.Method java.lang.Class.getEnclosingMethod() with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public java.lang.Object[] java.lang.Class.getEnumConstants() with params: []
+public java.lang.Object[] java.lang.Class.getEnumConstants() with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public java.lang.Object[] java.lang.Class.getEnumConstantsShared() with params: []
+public java.lang.Object[] java.lang.Class.getEnumConstantsShared() with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public java.lang.reflect.Field java.lang.Class.getField(java.lang.String) throws java.lang.NoSuchFieldException with params: [[NOT_USED_STRING, foo, SECRET_ARRAY]]
+public java.lang.reflect.Field java.lang.Class.getField(java.lang.String) throws java.lang.NoSuchFieldException with [NOT_USED_STRING] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.reflect.Field java.lang.Class.getField(java.lang.String) throws java.lang.NoSuchFieldException with [foo] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.reflect.Field java.lang.Class.getField(java.lang.String) throws java.lang.NoSuchFieldException with [SECRET_ARRAY] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public java.lang.reflect.Field[] java.lang.Class.getFields() throws java.lang.SecurityException with params: []
+public java.lang.reflect.Field[] java.lang.Class.getFields() throws java.lang.SecurityException with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public java.lang.reflect.Type[] java.lang.Class.getGenericInterfaces() with params: []
+public java.lang.reflect.Type[] java.lang.Class.getGenericInterfaces() with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public java.lang.reflect.Type java.lang.Class.getGenericSuperclass() with params: []
+public java.lang.reflect.Type java.lang.Class.getGenericSuperclass() with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public java.lang.reflect.Method java.lang.Class.getInstanceMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.IllegalAccessException with params: [[NOT_USED_STRING, foo, SECRET_ARRAY], [new java.lang.Object[0], new java.lang.Class[0], null]]
+public java.lang.reflect.Method java.lang.Class.getInstanceMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.IllegalAccessException with [NOT_USED_STRING, new java.lang.Object[0]] throws java.lang.IllegalArgumentException: method java.lang.Class.getInstanceMethod argument 2 has type java.lang.Class[], got java.lang.Object[]: null
+public java.lang.reflect.Method java.lang.Class.getInstanceMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.IllegalAccessException with [NOT_USED_STRING, new java.lang.Class[0]] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.reflect.Method java.lang.Class.getInstanceMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.IllegalAccessException with [NOT_USED_STRING, null] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.reflect.Method java.lang.Class.getInstanceMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.IllegalAccessException with [foo, new java.lang.Object[0]] throws java.lang.IllegalArgumentException: method java.lang.Class.getInstanceMethod argument 2 has type java.lang.Class[], got java.lang.Object[]: null
+public java.lang.reflect.Method java.lang.Class.getInstanceMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.IllegalAccessException with [foo, new java.lang.Class[0]] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.reflect.Method java.lang.Class.getInstanceMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.IllegalAccessException with [foo, null] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.reflect.Method java.lang.Class.getInstanceMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.IllegalAccessException with [SECRET_ARRAY, new java.lang.Object[0]] throws java.lang.IllegalArgumentException: method java.lang.Class.getInstanceMethod argument 2 has type java.lang.Class[], got java.lang.Object[]: null
+public java.lang.reflect.Method java.lang.Class.getInstanceMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.IllegalAccessException with [SECRET_ARRAY, new java.lang.Class[0]] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.reflect.Method java.lang.Class.getInstanceMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.IllegalAccessException with [SECRET_ARRAY, null] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public java.lang.Class[] java.lang.Class.getInterfaces() with params: []
+public java.lang.Class[] java.lang.Class.getInterfaces() with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public java.lang.reflect.Method java.lang.Class.getMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with params: [[NOT_USED_STRING, foo, SECRET_ARRAY], [new java.lang.Object[0], new java.lang.Class[0], null]]
+public java.lang.reflect.Method java.lang.Class.getMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [NOT_USED_STRING, new java.lang.Object[0]] throws java.lang.IllegalArgumentException: method java.lang.Class.getMethod argument 2 has type java.lang.Class[], got java.lang.Object[]: null
+public java.lang.reflect.Method java.lang.Class.getMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [NOT_USED_STRING, new java.lang.Class[0]] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.reflect.Method java.lang.Class.getMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [NOT_USED_STRING, null] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.reflect.Method java.lang.Class.getMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [foo, new java.lang.Object[0]] throws java.lang.IllegalArgumentException: method java.lang.Class.getMethod argument 2 has type java.lang.Class[], got java.lang.Object[]: null
+public java.lang.reflect.Method java.lang.Class.getMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [foo, new java.lang.Class[0]] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.reflect.Method java.lang.Class.getMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [foo, null] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.reflect.Method java.lang.Class.getMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [SECRET_ARRAY, new java.lang.Object[0]] throws java.lang.IllegalArgumentException: method java.lang.Class.getMethod argument 2 has type java.lang.Class[], got java.lang.Object[]: null
+public java.lang.reflect.Method java.lang.Class.getMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [SECRET_ARRAY, new java.lang.Class[0]] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.reflect.Method java.lang.Class.getMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [SECRET_ARRAY, null] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public java.lang.reflect.Method[] java.lang.Class.getMethods() throws java.lang.SecurityException with params: []
+public java.lang.reflect.Method[] java.lang.Class.getMethods() throws java.lang.SecurityException with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public int java.lang.Class.getModifiers() with params: []
+public int java.lang.Class.getModifiers() with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public java.lang.String java.lang.Class.getName() with params: []
+public java.lang.String java.lang.Class.getName() on (obsolete)class Main$Transform with [] = Main$Transform
+Calling public java.lang.Package java.lang.Class.getPackage() with params: []
+public java.lang.Package java.lang.Class.getPackage() on (obsolete)class Main$Transform with [] = null
+Calling public java.lang.String java.lang.Class.getPackageName$() with params: []
+public java.lang.String java.lang.Class.getPackageName$() on (obsolete)class Main$Transform with [] = null
+Calling public java.security.ProtectionDomain java.lang.Class.getProtectionDomain() with params: []
+public java.security.ProtectionDomain java.lang.Class.getProtectionDomain() on (obsolete)class Main$Transform with [] = null
+Calling public java.net.URL java.lang.Class.getResource(java.lang.String) with params: [[NOT_USED_STRING, foo, SECRET_ARRAY]]
+public java.net.URL java.lang.Class.getResource(java.lang.String) on (obsolete)class Main$Transform with [NOT_USED_STRING] = null
+public java.net.URL java.lang.Class.getResource(java.lang.String) on (obsolete)class Main$Transform with [foo] = null
+public java.net.URL java.lang.Class.getResource(java.lang.String) on (obsolete)class Main$Transform with [SECRET_ARRAY] = null
+Calling public java.io.InputStream java.lang.Class.getResourceAsStream(java.lang.String) with params: [[NOT_USED_STRING, foo, SECRET_ARRAY]]
+public java.io.InputStream java.lang.Class.getResourceAsStream(java.lang.String) on (obsolete)class Main$Transform with [NOT_USED_STRING] = null
+public java.io.InputStream java.lang.Class.getResourceAsStream(java.lang.String) on (obsolete)class Main$Transform with [foo] = null
+public java.io.InputStream java.lang.Class.getResourceAsStream(java.lang.String) on (obsolete)class Main$Transform with [SECRET_ARRAY] = null
+Calling public java.lang.Object[] java.lang.Class.getSigners() with params: []
+public java.lang.Object[] java.lang.Class.getSigners() on (obsolete)class Main$Transform with [] = null
+Calling public java.lang.String java.lang.Class.getSimpleName() with params: []
+public java.lang.String java.lang.Class.getSimpleName() with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public java.lang.Class java.lang.Class.getSuperclass() with params: []
+public java.lang.Class java.lang.Class.getSuperclass() on (obsolete)class Main$Transform with [] = class java.lang.Object
+Calling public java.lang.String java.lang.Class.getTypeName() with params: []
+public java.lang.String java.lang.Class.getTypeName() on (obsolete)class Main$Transform with [] = Main$Transform
+Calling public synchronized java.lang.reflect.TypeVariable[] java.lang.Class.getTypeParameters() with params: []
+public synchronized java.lang.reflect.TypeVariable[] java.lang.Class.getTypeParameters() with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public boolean java.lang.Class.isAnnotation() with params: []
+public boolean java.lang.Class.isAnnotation() with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public boolean java.lang.Class.isAnnotationPresent(java.lang.Class) with params: [[null, class java.lang.Object, (obsolete)class Main$Transform, class Main$Transform, long, class java.lang.Class]]
+public boolean java.lang.Class.isAnnotationPresent(java.lang.Class) with [null] throws java.lang.reflect.InvocationTargetException: java.lang.NullPointerException: annotationClass == null
+public boolean java.lang.Class.isAnnotationPresent(java.lang.Class) with [class java.lang.Object] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public boolean java.lang.Class.isAnnotationPresent(java.lang.Class) with [(obsolete)class Main$Transform] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public boolean java.lang.Class.isAnnotationPresent(java.lang.Class) with [class Main$Transform] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public boolean java.lang.Class.isAnnotationPresent(java.lang.Class) with [long] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public boolean java.lang.Class.isAnnotationPresent(java.lang.Class) with [class java.lang.Class] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public native boolean java.lang.Class.isAnonymousClass() with params: []
+public native boolean java.lang.Class.isAnonymousClass() with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public boolean java.lang.Class.isArray() with params: []
+public boolean java.lang.Class.isArray() on (obsolete)class Main$Transform with [] = false
+Calling public boolean java.lang.Class.isAssignableFrom(java.lang.Class) with params: [[null, class java.lang.Object, (obsolete)class Main$Transform, class Main$Transform, long, class java.lang.Class]]
+public boolean java.lang.Class.isAssignableFrom(java.lang.Class) with [null] throws java.lang.reflect.InvocationTargetException: java.lang.NullPointerException: Attempt to invoke virtual method 'boolean java.lang.Class.isInterface()' on a null object reference
+public boolean java.lang.Class.isAssignableFrom(java.lang.Class) on (obsolete)class Main$Transform with [class java.lang.Object] = false
+public boolean java.lang.Class.isAssignableFrom(java.lang.Class) on (obsolete)class Main$Transform with [(obsolete)class Main$Transform] = true
+public boolean java.lang.Class.isAssignableFrom(java.lang.Class) on (obsolete)class Main$Transform with [class Main$Transform] = false
+public boolean java.lang.Class.isAssignableFrom(java.lang.Class) on (obsolete)class Main$Transform with [long] = false
+public boolean java.lang.Class.isAssignableFrom(java.lang.Class) on (obsolete)class Main$Transform with [class java.lang.Class] = false
+Calling public boolean java.lang.Class.isEnum() with params: []
+public boolean java.lang.Class.isEnum() with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public boolean java.lang.Class.isFinalizable() with params: []
+public boolean java.lang.Class.isFinalizable() with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public boolean java.lang.Class.isInstance(java.lang.Object) with params: [[null, foo, NOT_USED_STRING, class Main$Transform]]
+public boolean java.lang.Class.isInstance(java.lang.Object) on (obsolete)class Main$Transform with [null] = false
+public boolean java.lang.Class.isInstance(java.lang.Object) on (obsolete)class Main$Transform with [foo] = false
+public boolean java.lang.Class.isInstance(java.lang.Object) on (obsolete)class Main$Transform with [NOT_USED_STRING] = false
+public boolean java.lang.Class.isInstance(java.lang.Object) on (obsolete)class Main$Transform with [class Main$Transform] = false
+Calling public boolean java.lang.Class.isInterface() with params: []
+public boolean java.lang.Class.isInterface() on (obsolete)class Main$Transform with [] = false
+Calling public boolean java.lang.Class.isLocalClass() with params: []
+public boolean java.lang.Class.isLocalClass() with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public boolean java.lang.Class.isMemberClass() with params: []
+public boolean java.lang.Class.isMemberClass() with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public boolean java.lang.Class.isPrimitive() with params: []
+public boolean java.lang.Class.isPrimitive() on (obsolete)class Main$Transform with [] = false
+Calling public boolean java.lang.Class.isProxy() with params: []
+public boolean java.lang.Class.isProxy() on (obsolete)class Main$Transform with [] = false
+Calling public boolean java.lang.Class.isSynthetic() with params: []
+public boolean java.lang.Class.isSynthetic() with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public native java.lang.Object java.lang.Class.newInstance() throws java.lang.InstantiationException,java.lang.IllegalAccessException with params: []
+public native java.lang.Object java.lang.Class.newInstance() throws java.lang.InstantiationException,java.lang.IllegalAccessException with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public java.lang.String java.lang.Class.toGenericString() with params: []
+public java.lang.String java.lang.Class.toGenericString() with [] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+Calling public java.lang.String java.lang.Class.toString() with params: []
+public java.lang.String java.lang.Class.toString() on (obsolete)class Main$Transform with [] = class Main$Transform
+
+
+Using non-obsolete class object!
+
+
+Calling public java.lang.Class java.lang.Class.asSubclass(java.lang.Class) with params: [[null, class java.lang.Object, (obsolete)class Main$Transform, class Main$Transform, long, class java.lang.Class]]
+public java.lang.Class java.lang.Class.asSubclass(java.lang.Class) with [null] throws java.lang.reflect.InvocationTargetException: java.lang.NullPointerException: Attempt to invoke virtual method 'boolean java.lang.Class.isAssignableFrom(java.lang.Class)' on a null object reference
+public java.lang.Class java.lang.Class.asSubclass(java.lang.Class) on class Main$Transform with [class java.lang.Object] = class Main$Transform
+public java.lang.Class java.lang.Class.asSubclass(java.lang.Class) with [(obsolete)class Main$Transform] throws java.lang.reflect.InvocationTargetException: java.lang.ClassCastException: class Main$Transform cannot be cast to Main$Transform
+public java.lang.Class java.lang.Class.asSubclass(java.lang.Class) on class Main$Transform with [class Main$Transform] = class Main$Transform
+public java.lang.Class java.lang.Class.asSubclass(java.lang.Class) with [long] throws java.lang.reflect.InvocationTargetException: java.lang.ClassCastException: class Main$Transform cannot be cast to long
+public java.lang.Class java.lang.Class.asSubclass(java.lang.Class) with [class java.lang.Class] throws java.lang.reflect.InvocationTargetException: java.lang.ClassCastException: class Main$Transform cannot be cast to java.lang.Class
+Calling public java.lang.Object java.lang.Class.cast(java.lang.Object) with params: [[null, foo, NOT_USED_STRING, class Main$Transform]]
+public java.lang.Object java.lang.Class.cast(java.lang.Object) on class Main$Transform with [null] = null
+public java.lang.Object java.lang.Class.cast(java.lang.Object) with [foo] throws java.lang.reflect.InvocationTargetException: java.lang.ClassCastException: Cannot cast java.lang.String to Main$Transform
+public java.lang.Object java.lang.Class.cast(java.lang.Object) with [NOT_USED_STRING] throws java.lang.reflect.InvocationTargetException: java.lang.ClassCastException: Cannot cast java.lang.String to Main$Transform
+public java.lang.Object java.lang.Class.cast(java.lang.Object) with [class Main$Transform] throws java.lang.reflect.InvocationTargetException: java.lang.ClassCastException: Cannot cast java.lang.Class to Main$Transform
+Calling public boolean java.lang.Class.desiredAssertionStatus() with params: []
+public boolean java.lang.Class.desiredAssertionStatus() on class Main$Transform with [] = false
+Calling public int java.lang.Class.getAccessFlags() with params: []
+public int java.lang.Class.getAccessFlags() on class Main$Transform with [] = 524289
+Calling public java.lang.annotation.Annotation java.lang.Class.getAnnotation(java.lang.Class) with params: [[null, class java.lang.Object, (obsolete)class Main$Transform, class Main$Transform, long, class java.lang.Class]]
+public java.lang.annotation.Annotation java.lang.Class.getAnnotation(java.lang.Class) with [null] throws java.lang.reflect.InvocationTargetException: java.lang.NullPointerException
+public java.lang.annotation.Annotation java.lang.Class.getAnnotation(java.lang.Class) on class Main$Transform with [class java.lang.Object] = null
+public java.lang.annotation.Annotation java.lang.Class.getAnnotation(java.lang.Class) with [(obsolete)class Main$Transform] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.annotation.Annotation java.lang.Class.getAnnotation(java.lang.Class) on class Main$Transform with [class Main$Transform] = null
+public java.lang.annotation.Annotation java.lang.Class.getAnnotation(java.lang.Class) on class Main$Transform with [long] = null
+public java.lang.annotation.Annotation java.lang.Class.getAnnotation(java.lang.Class) on class Main$Transform with [class java.lang.Class] = null
+Calling public java.lang.annotation.Annotation[] java.lang.Class.getAnnotations() with params: []
+public java.lang.annotation.Annotation[] java.lang.Class.getAnnotations() on class Main$Transform with [] = []
+Calling public java.lang.annotation.Annotation[] java.lang.Class.getAnnotationsByType(java.lang.Class) with params: [[null, class java.lang.Object, (obsolete)class Main$Transform, class Main$Transform, long, class java.lang.Class]]
+public java.lang.annotation.Annotation[] java.lang.Class.getAnnotationsByType(java.lang.Class) with [null] throws java.lang.reflect.InvocationTargetException: java.lang.NullPointerException: annotationClass
+public java.lang.annotation.Annotation[] java.lang.Class.getAnnotationsByType(java.lang.Class) with [class java.lang.Object] throws java.lang.reflect.InvocationTargetException: java.lang.ClassCastException: java.lang.Object[] cannot be cast to java.lang.annotation.Annotation[]
+public java.lang.annotation.Annotation[] java.lang.Class.getAnnotationsByType(java.lang.Class) with [(obsolete)class Main$Transform] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public java.lang.annotation.Annotation[] java.lang.Class.getAnnotationsByType(java.lang.Class) with [class Main$Transform] throws java.lang.reflect.InvocationTargetException: java.lang.ClassCastException: Main$Transform[] cannot be cast to java.lang.annotation.Annotation[]
+public java.lang.annotation.Annotation[] java.lang.Class.getAnnotationsByType(java.lang.Class) with [long] throws java.lang.reflect.InvocationTargetException: java.lang.ClassCastException: long[] cannot be cast to java.lang.annotation.Annotation[]
+public java.lang.annotation.Annotation[] java.lang.Class.getAnnotationsByType(java.lang.Class) with [class java.lang.Class] throws java.lang.reflect.InvocationTargetException: java.lang.ClassCastException: java.lang.Class[] cannot be cast to java.lang.annotation.Annotation[]
+Calling public java.lang.String java.lang.Class.getCanonicalName() with params: []
+public java.lang.String java.lang.Class.getCanonicalName() on class Main$Transform with [] = Main.Transform
+Calling public java.lang.ClassLoader java.lang.Class.getClassLoader() with params: []
+public java.lang.ClassLoader java.lang.Class.getClassLoader() on class Main$Transform with [] = dalvik.system.PathClassLoader
+Calling public java.lang.Class[] java.lang.Class.getClasses() with params: []
+public java.lang.Class[] java.lang.Class.getClasses() on class Main$Transform with [] = []
+Calling public java.lang.Class java.lang.Class.getComponentType() with params: []
+public java.lang.Class java.lang.Class.getComponentType() on class Main$Transform with [] = null
+Calling public java.lang.reflect.Constructor java.lang.Class.getConstructor(java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with params: [[new java.lang.Object[0], new java.lang.Class[0], null]]
+public java.lang.reflect.Constructor java.lang.Class.getConstructor(java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [new java.lang.Object[0]] throws java.lang.IllegalArgumentException: method java.lang.Class.getConstructor argument 1 has type java.lang.Class[], got java.lang.Object[]: null
+public java.lang.reflect.Constructor java.lang.Class.getConstructor(java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException on class Main$Transform with [new java.lang.Class[0]] = public Main$Transform()
+public java.lang.reflect.Constructor java.lang.Class.getConstructor(java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException on class Main$Transform with [null] = public Main$Transform()
+Calling public java.lang.reflect.Constructor[] java.lang.Class.getConstructors() throws java.lang.SecurityException with params: []
+public java.lang.reflect.Constructor[] java.lang.Class.getConstructors() throws java.lang.SecurityException on class Main$Transform with [] = [public Main$Transform()]
+Calling public native java.lang.annotation.Annotation java.lang.Class.getDeclaredAnnotation(java.lang.Class) with params: [[null, class java.lang.Object, (obsolete)class Main$Transform, class Main$Transform, long, class java.lang.Class]]
+public native java.lang.annotation.Annotation java.lang.Class.getDeclaredAnnotation(java.lang.Class) with [null] throws java.lang.reflect.InvocationTargetException: java.lang.NullPointerException: annotationClass
+public native java.lang.annotation.Annotation java.lang.Class.getDeclaredAnnotation(java.lang.Class) on class Main$Transform with [class java.lang.Object] = null
+public native java.lang.annotation.Annotation java.lang.Class.getDeclaredAnnotation(java.lang.Class) on class Main$Transform with [(obsolete)class Main$Transform] = null
+public native java.lang.annotation.Annotation java.lang.Class.getDeclaredAnnotation(java.lang.Class) on class Main$Transform with [class Main$Transform] = null
+public native java.lang.annotation.Annotation java.lang.Class.getDeclaredAnnotation(java.lang.Class) on class Main$Transform with [long] = null
+public native java.lang.annotation.Annotation java.lang.Class.getDeclaredAnnotation(java.lang.Class) on class Main$Transform with [class java.lang.Class] = null
+Calling public native java.lang.annotation.Annotation[] java.lang.Class.getDeclaredAnnotations() with params: []
+public native java.lang.annotation.Annotation[] java.lang.Class.getDeclaredAnnotations() on class Main$Transform with [] = []
+Calling public native java.lang.Class[] java.lang.Class.getDeclaredClasses() with params: []
+public native java.lang.Class[] java.lang.Class.getDeclaredClasses() on class Main$Transform with [] = []
+Calling public java.lang.reflect.Constructor java.lang.Class.getDeclaredConstructor(java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with params: [[new java.lang.Object[0], new java.lang.Class[0], null]]
+public java.lang.reflect.Constructor java.lang.Class.getDeclaredConstructor(java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [new java.lang.Object[0]] throws java.lang.IllegalArgumentException: method java.lang.Class.getDeclaredConstructor argument 1 has type java.lang.Class[], got java.lang.Object[]: null
+public java.lang.reflect.Constructor java.lang.Class.getDeclaredConstructor(java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException on class Main$Transform with [new java.lang.Class[0]] = public Main$Transform()
+public java.lang.reflect.Constructor java.lang.Class.getDeclaredConstructor(java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException on class Main$Transform with [null] = public Main$Transform()
+Calling public java.lang.reflect.Constructor[] java.lang.Class.getDeclaredConstructors() throws java.lang.SecurityException with params: []
+public java.lang.reflect.Constructor[] java.lang.Class.getDeclaredConstructors() throws java.lang.SecurityException on class Main$Transform with [] = [public Main$Transform()]
+Calling public native java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String) throws java.lang.NoSuchFieldException with params: [[NOT_USED_STRING, foo, SECRET_ARRAY]]
+public native java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String) throws java.lang.NoSuchFieldException with [NOT_USED_STRING] throws java.lang.reflect.InvocationTargetException: java.lang.NoSuchFieldException: No field NOT_USED_STRING in class LMain$Transform; (declaration of 'Main$Transform' appears in <transformed-jar>)
+public native java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String) throws java.lang.NoSuchFieldException with [foo] throws java.lang.reflect.InvocationTargetException: java.lang.NoSuchFieldException: No field foo in class LMain$Transform; (declaration of 'Main$Transform' appears in <transformed-jar>)
+public native java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String) throws java.lang.NoSuchFieldException on class Main$Transform with [SECRET_ARRAY] = public static java.lang.Object Main$Transform.SECRET_ARRAY
+Calling public native java.lang.reflect.Field[] java.lang.Class.getDeclaredFields() with params: []
+public native java.lang.reflect.Field[] java.lang.Class.getDeclaredFields() on class Main$Transform with [] = [public static java.lang.Object Main$Transform.AAA_PADDING, public static java.lang.Object Main$Transform.SECRET_ARRAY, public static long Main$Transform.SECRET_NUMBER]
+Calling public native java.lang.reflect.Field[] java.lang.Class.getDeclaredFieldsUnchecked(boolean) with params: [[true, false]]
+public native java.lang.reflect.Field[] java.lang.Class.getDeclaredFieldsUnchecked(boolean) on class Main$Transform with [true] = [public static java.lang.Object Main$Transform.AAA_PADDING, public static java.lang.Object Main$Transform.SECRET_ARRAY, public static long Main$Transform.SECRET_NUMBER]
+public native java.lang.reflect.Field[] java.lang.Class.getDeclaredFieldsUnchecked(boolean) on class Main$Transform with [false] = [public static java.lang.Object Main$Transform.AAA_PADDING, public static java.lang.Object Main$Transform.SECRET_ARRAY, public static long Main$Transform.SECRET_NUMBER]
+Calling public java.lang.reflect.Method java.lang.Class.getDeclaredMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with params: [[NOT_USED_STRING, foo, SECRET_ARRAY], [new java.lang.Object[0], new java.lang.Class[0], null]]
+public java.lang.reflect.Method java.lang.Class.getDeclaredMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [NOT_USED_STRING, new java.lang.Object[0]] throws java.lang.IllegalArgumentException: method java.lang.Class.getDeclaredMethod argument 2 has type java.lang.Class[], got java.lang.Object[]: null
+public java.lang.reflect.Method java.lang.Class.getDeclaredMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [NOT_USED_STRING, new java.lang.Class[0]] throws java.lang.reflect.InvocationTargetException: java.lang.NoSuchMethodException: Main$Transform.NOT_USED_STRING []
+public java.lang.reflect.Method java.lang.Class.getDeclaredMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [NOT_USED_STRING, null] throws java.lang.reflect.InvocationTargetException: java.lang.NoSuchMethodException: Main$Transform.NOT_USED_STRING []
+public java.lang.reflect.Method java.lang.Class.getDeclaredMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [foo, new java.lang.Object[0]] throws java.lang.IllegalArgumentException: method java.lang.Class.getDeclaredMethod argument 2 has type java.lang.Class[], got java.lang.Object[]: null
+public java.lang.reflect.Method java.lang.Class.getDeclaredMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException on class Main$Transform with [foo, new java.lang.Class[0]] = public static void Main$Transform.foo()
+public java.lang.reflect.Method java.lang.Class.getDeclaredMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException on class Main$Transform with [foo, null] = public static void Main$Transform.foo()
+public java.lang.reflect.Method java.lang.Class.getDeclaredMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [SECRET_ARRAY, new java.lang.Object[0]] throws java.lang.IllegalArgumentException: method java.lang.Class.getDeclaredMethod argument 2 has type java.lang.Class[], got java.lang.Object[]: null
+public java.lang.reflect.Method java.lang.Class.getDeclaredMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [SECRET_ARRAY, new java.lang.Class[0]] throws java.lang.reflect.InvocationTargetException: java.lang.NoSuchMethodException: Main$Transform.SECRET_ARRAY []
+public java.lang.reflect.Method java.lang.Class.getDeclaredMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [SECRET_ARRAY, null] throws java.lang.reflect.InvocationTargetException: java.lang.NoSuchMethodException: Main$Transform.SECRET_ARRAY []
+Calling public java.lang.reflect.Method[] java.lang.Class.getDeclaredMethods() throws java.lang.SecurityException with params: []
+public java.lang.reflect.Method[] java.lang.Class.getDeclaredMethods() throws java.lang.SecurityException on class Main$Transform with [] = [public static void Main$Transform.bar(), public static void Main$Transform.foo()]
+Calling public native java.lang.reflect.Method[] java.lang.Class.getDeclaredMethodsUnchecked(boolean) with params: [[true, false]]
+public native java.lang.reflect.Method[] java.lang.Class.getDeclaredMethodsUnchecked(boolean) on class Main$Transform with [true] = [public static void Main$Transform.bar(), public static void Main$Transform.foo()]
+public native java.lang.reflect.Method[] java.lang.Class.getDeclaredMethodsUnchecked(boolean) on class Main$Transform with [false] = [public static void Main$Transform.bar(), public static void Main$Transform.foo()]
+Calling public native java.lang.Class java.lang.Class.getDeclaringClass() with params: []
+public native java.lang.Class java.lang.Class.getDeclaringClass() on class Main$Transform with [] = class Main
+Calling public native java.lang.Class java.lang.Class.getEnclosingClass() with params: []
+public native java.lang.Class java.lang.Class.getEnclosingClass() on class Main$Transform with [] = class Main
+Calling public java.lang.reflect.Constructor java.lang.Class.getEnclosingConstructor() with params: []
+public java.lang.reflect.Constructor java.lang.Class.getEnclosingConstructor() on class Main$Transform with [] = null
+Calling public java.lang.reflect.Method java.lang.Class.getEnclosingMethod() with params: []
+public java.lang.reflect.Method java.lang.Class.getEnclosingMethod() on class Main$Transform with [] = null
+Calling public java.lang.Object[] java.lang.Class.getEnumConstants() with params: []
+public java.lang.Object[] java.lang.Class.getEnumConstants() on class Main$Transform with [] = null
+Calling public java.lang.Object[] java.lang.Class.getEnumConstantsShared() with params: []
+public java.lang.Object[] java.lang.Class.getEnumConstantsShared() on class Main$Transform with [] = null
+Calling public java.lang.reflect.Field java.lang.Class.getField(java.lang.String) throws java.lang.NoSuchFieldException with params: [[NOT_USED_STRING, foo, SECRET_ARRAY]]
+public java.lang.reflect.Field java.lang.Class.getField(java.lang.String) throws java.lang.NoSuchFieldException with [NOT_USED_STRING] throws java.lang.reflect.InvocationTargetException: java.lang.NoSuchFieldException: NOT_USED_STRING
+public java.lang.reflect.Field java.lang.Class.getField(java.lang.String) throws java.lang.NoSuchFieldException with [foo] throws java.lang.reflect.InvocationTargetException: java.lang.NoSuchFieldException: foo
+public java.lang.reflect.Field java.lang.Class.getField(java.lang.String) throws java.lang.NoSuchFieldException on class Main$Transform with [SECRET_ARRAY] = public static java.lang.Object Main$Transform.SECRET_ARRAY
+Calling public java.lang.reflect.Field[] java.lang.Class.getFields() throws java.lang.SecurityException with params: []
+public java.lang.reflect.Field[] java.lang.Class.getFields() throws java.lang.SecurityException on class Main$Transform with [] = [public static java.lang.Object Main$Transform.AAA_PADDING, public static java.lang.Object Main$Transform.SECRET_ARRAY, public static long Main$Transform.SECRET_NUMBER]
+Calling public java.lang.reflect.Type[] java.lang.Class.getGenericInterfaces() with params: []
+public java.lang.reflect.Type[] java.lang.Class.getGenericInterfaces() on class Main$Transform with [] = []
+Calling public java.lang.reflect.Type java.lang.Class.getGenericSuperclass() with params: []
+public java.lang.reflect.Type java.lang.Class.getGenericSuperclass() on class Main$Transform with [] = class java.lang.Object
+Calling public java.lang.reflect.Method java.lang.Class.getInstanceMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.IllegalAccessException with params: [[NOT_USED_STRING, foo, SECRET_ARRAY], [new java.lang.Object[0], new java.lang.Class[0], null]]
+public java.lang.reflect.Method java.lang.Class.getInstanceMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.IllegalAccessException with [NOT_USED_STRING, new java.lang.Object[0]] throws java.lang.IllegalArgumentException: method java.lang.Class.getInstanceMethod argument 2 has type java.lang.Class[], got java.lang.Object[]: null
+public java.lang.reflect.Method java.lang.Class.getInstanceMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.IllegalAccessException on class Main$Transform with [NOT_USED_STRING, new java.lang.Class[0]] = null
+public java.lang.reflect.Method java.lang.Class.getInstanceMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.IllegalAccessException on class Main$Transform with [NOT_USED_STRING, null] = null
+public java.lang.reflect.Method java.lang.Class.getInstanceMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.IllegalAccessException with [foo, new java.lang.Object[0]] throws java.lang.IllegalArgumentException: method java.lang.Class.getInstanceMethod argument 2 has type java.lang.Class[], got java.lang.Object[]: null
+public java.lang.reflect.Method java.lang.Class.getInstanceMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.IllegalAccessException on class Main$Transform with [foo, new java.lang.Class[0]] = null
+public java.lang.reflect.Method java.lang.Class.getInstanceMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.IllegalAccessException on class Main$Transform with [foo, null] = null
+public java.lang.reflect.Method java.lang.Class.getInstanceMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.IllegalAccessException with [SECRET_ARRAY, new java.lang.Object[0]] throws java.lang.IllegalArgumentException: method java.lang.Class.getInstanceMethod argument 2 has type java.lang.Class[], got java.lang.Object[]: null
+public java.lang.reflect.Method java.lang.Class.getInstanceMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.IllegalAccessException on class Main$Transform with [SECRET_ARRAY, new java.lang.Class[0]] = null
+public java.lang.reflect.Method java.lang.Class.getInstanceMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.IllegalAccessException on class Main$Transform with [SECRET_ARRAY, null] = null
+Calling public java.lang.Class[] java.lang.Class.getInterfaces() with params: []
+public java.lang.Class[] java.lang.Class.getInterfaces() on class Main$Transform with [] = []
+Calling public java.lang.reflect.Method java.lang.Class.getMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with params: [[NOT_USED_STRING, foo, SECRET_ARRAY], [new java.lang.Object[0], new java.lang.Class[0], null]]
+public java.lang.reflect.Method java.lang.Class.getMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [NOT_USED_STRING, new java.lang.Object[0]] throws java.lang.IllegalArgumentException: method java.lang.Class.getMethod argument 2 has type java.lang.Class[], got java.lang.Object[]: null
+public java.lang.reflect.Method java.lang.Class.getMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [NOT_USED_STRING, new java.lang.Class[0]] throws java.lang.reflect.InvocationTargetException: java.lang.NoSuchMethodException: Main$Transform.NOT_USED_STRING []
+public java.lang.reflect.Method java.lang.Class.getMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [NOT_USED_STRING, null] throws java.lang.reflect.InvocationTargetException: java.lang.NoSuchMethodException: Main$Transform.NOT_USED_STRING []
+public java.lang.reflect.Method java.lang.Class.getMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [foo, new java.lang.Object[0]] throws java.lang.IllegalArgumentException: method java.lang.Class.getMethod argument 2 has type java.lang.Class[], got java.lang.Object[]: null
+public java.lang.reflect.Method java.lang.Class.getMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException on class Main$Transform with [foo, new java.lang.Class[0]] = public static void Main$Transform.foo()
+public java.lang.reflect.Method java.lang.Class.getMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException on class Main$Transform with [foo, null] = public static void Main$Transform.foo()
+public java.lang.reflect.Method java.lang.Class.getMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [SECRET_ARRAY, new java.lang.Object[0]] throws java.lang.IllegalArgumentException: method java.lang.Class.getMethod argument 2 has type java.lang.Class[], got java.lang.Object[]: null
+public java.lang.reflect.Method java.lang.Class.getMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [SECRET_ARRAY, new java.lang.Class[0]] throws java.lang.reflect.InvocationTargetException: java.lang.NoSuchMethodException: Main$Transform.SECRET_ARRAY []
+public java.lang.reflect.Method java.lang.Class.getMethod(java.lang.String,java.lang.Class[]) throws java.lang.NoSuchMethodException,java.lang.SecurityException with [SECRET_ARRAY, null] throws java.lang.reflect.InvocationTargetException: java.lang.NoSuchMethodException: Main$Transform.SECRET_ARRAY []
+Calling public java.lang.reflect.Method[] java.lang.Class.getMethods() throws java.lang.SecurityException with params: []
+public java.lang.reflect.Method[] java.lang.Class.getMethods() throws java.lang.SecurityException on class Main$Transform with [] = [public static void Main$Transform.bar(), public boolean java.lang.Object.equals(java.lang.Object), public static void Main$Transform.foo(), public final java.lang.Class java.lang.Object.getClass(), public int java.lang.Object.hashCode(), public final native void java.lang.Object.notify(), public final native void java.lang.Object.notifyAll(), public java.lang.String java.lang.Object.toString(), public final void java.lang.Object.wait() throws java.lang.InterruptedException, public final void java.lang.Object.wait(long) throws java.lang.InterruptedException, public final native void java.lang.Object.wait(long,int) throws java.lang.InterruptedException]
+Calling public int java.lang.Class.getModifiers() with params: []
+public int java.lang.Class.getModifiers() on class Main$Transform with [] = 9
+Calling public java.lang.String java.lang.Class.getName() with params: []
+public java.lang.String java.lang.Class.getName() on class Main$Transform with [] = Main$Transform
+Calling public java.lang.Package java.lang.Class.getPackage() with params: []
+public java.lang.Package java.lang.Class.getPackage() on class Main$Transform with [] = null
+Calling public java.lang.String java.lang.Class.getPackageName$() with params: []
+public java.lang.String java.lang.Class.getPackageName$() on class Main$Transform with [] = null
+Calling public java.security.ProtectionDomain java.lang.Class.getProtectionDomain() with params: []
+public java.security.ProtectionDomain java.lang.Class.getProtectionDomain() on class Main$Transform with [] = null
+Calling public java.net.URL java.lang.Class.getResource(java.lang.String) with params: [[NOT_USED_STRING, foo, SECRET_ARRAY]]
+public java.net.URL java.lang.Class.getResource(java.lang.String) on class Main$Transform with [NOT_USED_STRING] = null
+public java.net.URL java.lang.Class.getResource(java.lang.String) on class Main$Transform with [foo] = null
+public java.net.URL java.lang.Class.getResource(java.lang.String) on class Main$Transform with [SECRET_ARRAY] = null
+Calling public java.io.InputStream java.lang.Class.getResourceAsStream(java.lang.String) with params: [[NOT_USED_STRING, foo, SECRET_ARRAY]]
+public java.io.InputStream java.lang.Class.getResourceAsStream(java.lang.String) on class Main$Transform with [NOT_USED_STRING] = null
+public java.io.InputStream java.lang.Class.getResourceAsStream(java.lang.String) on class Main$Transform with [foo] = null
+public java.io.InputStream java.lang.Class.getResourceAsStream(java.lang.String) on class Main$Transform with [SECRET_ARRAY] = null
+Calling public java.lang.Object[] java.lang.Class.getSigners() with params: []
+public java.lang.Object[] java.lang.Class.getSigners() on class Main$Transform with [] = null
+Calling public java.lang.String java.lang.Class.getSimpleName() with params: []
+public java.lang.String java.lang.Class.getSimpleName() on class Main$Transform with [] = Transform
+Calling public java.lang.Class java.lang.Class.getSuperclass() with params: []
+public java.lang.Class java.lang.Class.getSuperclass() on class Main$Transform with [] = class java.lang.Object
+Calling public java.lang.String java.lang.Class.getTypeName() with params: []
+public java.lang.String java.lang.Class.getTypeName() on class Main$Transform with [] = Main$Transform
+Calling public synchronized java.lang.reflect.TypeVariable[] java.lang.Class.getTypeParameters() with params: []
+public synchronized java.lang.reflect.TypeVariable[] java.lang.Class.getTypeParameters() on class Main$Transform with [] = []
+Calling public boolean java.lang.Class.isAnnotation() with params: []
+public boolean java.lang.Class.isAnnotation() on class Main$Transform with [] = false
+Calling public boolean java.lang.Class.isAnnotationPresent(java.lang.Class) with params: [[null, class java.lang.Object, (obsolete)class Main$Transform, class Main$Transform, long, class java.lang.Class]]
+public boolean java.lang.Class.isAnnotationPresent(java.lang.Class) with [null] throws java.lang.reflect.InvocationTargetException: java.lang.NullPointerException: annotationClass == null
+public boolean java.lang.Class.isAnnotationPresent(java.lang.Class) on class Main$Transform with [class java.lang.Object] = false
+public boolean java.lang.Class.isAnnotationPresent(java.lang.Class) with [(obsolete)class Main$Transform] throws java.lang.reflect.InvocationTargetException: java.lang.RuntimeException: Obsolete Object!
+public boolean java.lang.Class.isAnnotationPresent(java.lang.Class) on class Main$Transform with [class Main$Transform] = false
+public boolean java.lang.Class.isAnnotationPresent(java.lang.Class) on class Main$Transform with [long] = false
+public boolean java.lang.Class.isAnnotationPresent(java.lang.Class) on class Main$Transform with [class java.lang.Class] = false
+Calling public native boolean java.lang.Class.isAnonymousClass() with params: []
+public native boolean java.lang.Class.isAnonymousClass() on class Main$Transform with [] = false
+Calling public boolean java.lang.Class.isArray() with params: []
+public boolean java.lang.Class.isArray() on class Main$Transform with [] = false
+Calling public boolean java.lang.Class.isAssignableFrom(java.lang.Class) with params: [[null, class java.lang.Object, (obsolete)class Main$Transform, class Main$Transform, long, class java.lang.Class]]
+public boolean java.lang.Class.isAssignableFrom(java.lang.Class) with [null] throws java.lang.reflect.InvocationTargetException: java.lang.NullPointerException: Attempt to invoke virtual method 'boolean java.lang.Class.isInterface()' on a null object reference
+public boolean java.lang.Class.isAssignableFrom(java.lang.Class) on class Main$Transform with [class java.lang.Object] = false
+public boolean java.lang.Class.isAssignableFrom(java.lang.Class) on class Main$Transform with [(obsolete)class Main$Transform] = false
+public boolean java.lang.Class.isAssignableFrom(java.lang.Class) on class Main$Transform with [class Main$Transform] = true
+public boolean java.lang.Class.isAssignableFrom(java.lang.Class) on class Main$Transform with [long] = false
+public boolean java.lang.Class.isAssignableFrom(java.lang.Class) on class Main$Transform with [class java.lang.Class] = false
+Calling public boolean java.lang.Class.isEnum() with params: []
+public boolean java.lang.Class.isEnum() on class Main$Transform with [] = false
+Calling public boolean java.lang.Class.isFinalizable() with params: []
+public boolean java.lang.Class.isFinalizable() on class Main$Transform with [] = false
+Calling public boolean java.lang.Class.isInstance(java.lang.Object) with params: [[null, foo, NOT_USED_STRING, class Main$Transform]]
+public boolean java.lang.Class.isInstance(java.lang.Object) on class Main$Transform with [null] = false
+public boolean java.lang.Class.isInstance(java.lang.Object) on class Main$Transform with [foo] = false
+public boolean java.lang.Class.isInstance(java.lang.Object) on class Main$Transform with [NOT_USED_STRING] = false
+public boolean java.lang.Class.isInstance(java.lang.Object) on class Main$Transform with [class Main$Transform] = false
+Calling public boolean java.lang.Class.isInterface() with params: []
+public boolean java.lang.Class.isInterface() on class Main$Transform with [] = false
+Calling public boolean java.lang.Class.isLocalClass() with params: []
+public boolean java.lang.Class.isLocalClass() on class Main$Transform with [] = false
+Calling public boolean java.lang.Class.isMemberClass() with params: []
+public boolean java.lang.Class.isMemberClass() on class Main$Transform with [] = true
+Calling public boolean java.lang.Class.isPrimitive() with params: []
+public boolean java.lang.Class.isPrimitive() on class Main$Transform with [] = false
+Calling public boolean java.lang.Class.isProxy() with params: []
+public boolean java.lang.Class.isProxy() on class Main$Transform with [] = false
+Calling public boolean java.lang.Class.isSynthetic() with params: []
+public boolean java.lang.Class.isSynthetic() on class Main$Transform with [] = false
+Calling public native java.lang.Object java.lang.Class.newInstance() throws java.lang.InstantiationException,java.lang.IllegalAccessException with params: []
+public native java.lang.Object java.lang.Class.newInstance() throws java.lang.InstantiationException,java.lang.IllegalAccessException on class Main$Transform with [] = Transform Instance
+Calling public java.lang.String java.lang.Class.toGenericString() with params: []
+public java.lang.String java.lang.Class.toGenericString() on class Main$Transform with [] = public static class Main$Transform
+Calling public java.lang.String java.lang.Class.toString() with params: []
+public java.lang.String java.lang.Class.toString() on class Main$Transform with [] = class Main$Transform
diff --git a/test/1980-obsolete-object-cleared/info.txt b/test/1980-obsolete-object-cleared/info.txt
new file mode 100644
index 0000000..1c5ca82
--- /dev/null
+++ b/test/1980-obsolete-object-cleared/info.txt
@@ -0,0 +1,2 @@
+Test that obsoleted classes have their fields cleared and cannot be used to obtain
+obsolete/invalid reflection objects.
diff --git a/test/1980-obsolete-object-cleared/run b/test/1980-obsolete-object-cleared/run
new file mode 100755
index 0000000..03e41a5
--- /dev/null
+++ b/test/1980-obsolete-object-cleared/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/1980-obsolete-object-cleared/src/Main.java b/test/1980-obsolete-object-cleared/src/Main.java
new file mode 100644
index 0000000..514defc
--- /dev/null
+++ b/test/1980-obsolete-object-cleared/src/Main.java
@@ -0,0 +1,304 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import art.*;
+import java.lang.ref.*;
+import java.lang.reflect.*;
+import java.util.*;
+import java.util.function.Consumer;
+import sun.misc.Unsafe;
+
+public class Main {
+  public static class Transform {
+    static {
+    }
+
+    public static Object SECRET_ARRAY = new byte[] {1, 2, 3, 4};
+    public static long SECRET_NUMBER = 42;
+
+    public static void foo() {}
+  }
+
+  /* Base64 for
+   * public static class Trasform {
+   *   static {}
+   *   public static Object AAA_PADDING;
+   *   public static Object SECRET_ARRAY;
+   *   public static long SECRET_NUMBER;
+   *   public static void foo() {}
+   *   public static void bar() {}
+   * }
+   */
+  public static final byte[] REDEFINED_DEX_FILE =
+      Base64.getDecoder()
+          .decode(
+              "ZGV4CjAzNQDdmsOAlizFD4Ogb6+/mfSdVzhmL8e/mRcYBAAAcAAAAHhWNBIAAAAAAAAAAGADAAAU"
+                  + "AAAAcAAAAAcAAADAAAAAAQAAANwAAAADAAAA6AAAAAUAAAAAAQAAAQAAACgBAADQAgAASAEAAKwB"
+                  + "AAC2AQAAvgEAAMsBAADOAQAA4AEAAOgBAAAMAgAALAIAAEACAABLAgAAWQIAAGgCAABzAgAAdgIA"
+                  + "AIMCAACIAgAAjQIAAJMCAACaAgAAAwAAAAQAAAAFAAAABgAAAAcAAAAIAAAADQAAAA0AAAAGAAAA"
+                  + "AAAAAAEABQACAAAAAQAFAAoAAAABAAAACwAAAAEAAAAAAAAAAQAAAAEAAAABAAAADwAAAAEAAAAQ"
+                  + "AAAABQAAAAEAAAABAAAAAQAAAAUAAAAAAAAACQAAAFADAAAhAwAAAAAAAAAAAAAAAAAAmgEAAAEA"
+                  + "AAAOAAAAAQABAAEAAACeAQAABAAAAHAQBAAAAA4AAAAAAAAAAACiAQAAAQAAAA4AAAAAAAAAAAAA"
+                  + "AKYBAAABAAAADgAHAA4ABgAOAAsADgAKAA4AAAAIPGNsaW5pdD4ABjxpbml0PgALQUFBX1BBRERJ"
+                  + "TkcAAUoAEExNYWluJFRyYW5zZm9ybTsABkxNYWluOwAiTGRhbHZpay9hbm5vdGF0aW9uL0VuY2xv"
+                  + "c2luZ0NsYXNzOwAeTGRhbHZpay9hbm5vdGF0aW9uL0lubmVyQ2xhc3M7ABJMamF2YS9sYW5nL09i"
+                  + "amVjdDsACU1haW4uamF2YQAMU0VDUkVUX0FSUkFZAA1TRUNSRVRfTlVNQkVSAAlUcmFuc2Zvcm0A"
+                  + "AVYAC2FjY2Vzc0ZsYWdzAANiYXIAA2ZvbwAEbmFtZQAFdmFsdWUAdn5+RDh7ImNvbXBpbGF0aW9u"
+                  + "LW1vZGUiOiJkZWJ1ZyIsIm1pbi1hcGkiOjEsInNoYS0xIjoiYTgzNTJmMjU0ODg1MzYyY2NkOGQ5"
+                  + "MDlkMzUyOWM2MDA5NGRkODk2ZSIsInZlcnNpb24iOiIxLjYuMjAtZGV2In0AAgMBEhgCAgQCDgQJ"
+                  + "ERcMAwAEAAAJAQkBCQCIgATIAgGBgATcAgEJ9AIBCYgDAAAAAAACAAAAEgMAABgDAABEAwAAAAAA"
+                  + "AAAAAAAAAAAADwAAAAAAAAABAAAAAAAAAAEAAAAUAAAAcAAAAAIAAAAHAAAAwAAAAAMAAAABAAAA"
+                  + "3AAAAAQAAAADAAAA6AAAAAUAAAAFAAAAAAEAAAYAAAABAAAAKAEAAAEgAAAEAAAASAEAAAMgAAAE"
+                  + "AAAAmgEAAAIgAAAUAAAArAEAAAQgAAACAAAAEgMAAAAgAAABAAAAIQMAAAMQAAACAAAAQAMAAAYg"
+                  + "AAABAAAAUAMAAAAQAAABAAAAYAMAAA==");
+
+  private interface TConsumer<T> {
+    public void accept(T t) throws Exception;
+  }
+
+  private interface ResetIterator<T> extends Iterator<T> {
+    public void reset();
+  }
+
+  private static final class BaseResetIter implements ResetIterator<Object[]> {
+    private boolean have_next = true;
+
+    public Object[] next() {
+      if (have_next) {
+        have_next = false;
+        return new Object[0];
+      } else {
+        throw new NoSuchElementException("only one element");
+      }
+    }
+
+    public boolean hasNext() {
+      return have_next;
+    }
+
+    public void reset() {
+      have_next = true;
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+    System.loadLibrary(args[0]);
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+
+    // Get the Unsafe object.
+    Field f = Unsafe.class.getDeclaredField("THE_ONE");
+    f.setAccessible(true);
+    Unsafe u = (Unsafe) f.get(null);
+
+    // Get the offsets into the original Transform class of the fields
+    long off_secret_array = genericFieldOffset(Transform.class.getDeclaredField("SECRET_ARRAY"));
+    long off_secret_number = genericFieldOffset(Transform.class.getDeclaredField("SECRET_NUMBER"));
+
+    System.out.println("Reading normally.");
+    System.out.println("\tOriginal secret number is: " + Transform.SECRET_NUMBER);
+    System.out.println("\tOriginal secret array is: " + Arrays.toString((byte[])Transform.SECRET_ARRAY));
+    System.out.println("Using unsafe to access values directly from memory.");
+    System.out.println(
+        "\tOriginal secret number is: " + u.getLong(Transform.class, off_secret_number));
+    System.out.println(
+        "\tOriginal secret array is: "
+            + Arrays.toString((byte[]) u.getObject(Transform.class, off_secret_array)));
+
+    // Redefine in a way that changes the offsets.
+    Redefinition.doCommonStructuralClassRedefinition(Transform.class, REDEFINED_DEX_FILE);
+
+    // Make sure the value is the same.
+    System.out.println("Reading normally post redefinition.");
+    System.out.println("\tPost-redefinition secret number is: " + Transform.SECRET_NUMBER);
+    System.out.println("\tPost-redefinition secret array is: " + Arrays.toString((byte[])Transform.SECRET_ARRAY));
+
+    // Get the (old) obsolete class from the ClassExt
+    Field ext_field = Class.class.getDeclaredField("extData");
+    ext_field.setAccessible(true);
+    Object ext_data = ext_field.get(Transform.class);
+    Field oc_field = ext_data.getClass().getDeclaredField("obsoleteClass");
+    oc_field.setAccessible(true);
+    Class<?> obsolete_class = (Class<?>) oc_field.get(ext_data);
+
+    // Try reading the fields directly out of memory using unsafe.
+    System.out.println("Obsolete class is: " + obsolete_class);
+    System.out.println("Using unsafe to access obsolete values directly from memory.");
+    System.out.println(
+        "\tObsolete secret number is: " + u.getLong(obsolete_class, off_secret_number));
+    System.out.println(
+        "\tObsolete secret array is: "
+            + Arrays.toString((byte[]) u.getObject(obsolete_class, off_secret_array)));
+
+    // Try calling all the public, non-static methods on the obsolete class. Make sure we cannot get
+    // j.l.r.{Method,Field} objects or instances.
+    TConsumer<Class> cc =
+        (Class c) -> {
+          for (Method m : Class.class.getDeclaredMethods()) {
+            if (Modifier.isPublic(m.getModifiers()) && !Modifier.isStatic(m.getModifiers())) {
+              Iterable<Object[]> iter = CollectParameterValues(m, obsolete_class);
+              System.out.println("Calling " + m + " with params: " + iter);
+              for (Object[] arr : iter) {
+                try {
+                  System.out.println(
+                      m
+                          + " on "
+                          + safePrint(c)
+                          + " with "
+                          + deepPrint(arr)
+                          + " = "
+                          + safePrint(m.invoke(c, arr)));
+                } catch (Throwable e) {
+                  System.out.println(
+                      m + " with " + deepPrint(arr) + " throws " + safePrint(e) + ": " + safePrint(e.getCause()));
+                }
+              }
+            }
+          }
+        };
+    System.out.println("\n\nUsing obsolete class object!\n\n");
+    cc.accept(obsolete_class);
+    System.out.println("\n\nUsing non-obsolete class object!\n\n");
+    cc.accept(Transform.class);
+  }
+
+  public static Iterable<Object[]> CollectParameterValues(Method m, Class<?> obsolete_class) throws Exception {
+    Class<?>[] types = m.getParameterTypes();
+    final Object[][] params = new Object[types.length][];
+    for (int i = 0; i < types.length; i++) {
+      if (types[i].equals(Class.class)) {
+        params[i] =
+            new Object[] {
+              null, Object.class, obsolete_class, Transform.class, Long.TYPE, Class.class
+            };
+      } else if (types[i].equals(Boolean.TYPE)) {
+        params[i] = new Object[] {Boolean.TRUE, Boolean.FALSE};
+      } else if (types[i].equals(String.class)) {
+        params[i] = new Object[] {"NOT_USED_STRING", "foo", "SECRET_ARRAY"};
+      } else if (types[i].equals(Object.class)) {
+        params[i] = new Object[] {null, "foo", "NOT_USED_STRING", Transform.class};
+      } else if (types[i].isArray()) {
+        params[i] = new Object[] {new Object[0], new Class[0], null};
+      } else {
+        throw new Exception("Unknown type " + types[i] + " at " + i + " in " + m);
+      }
+    }
+    // Build the reset-iter.
+    ResetIterator<Object[]> iter = new BaseResetIter();
+    for (int i = params.length - 1; i >= 0; i--) {
+      iter = new ComboIter(Arrays.asList(params[i]), iter);
+    }
+    final Iterator<Object[]> fiter = iter;
+    // Wrap in an iterator with a useful toString method.
+    return new Iterable<Object[]>() {
+      public Iterator<Object[]> iterator() { return fiter; }
+      public String toString() { return deepPrint(params); }
+    };
+  }
+
+  public static String deepPrint(Object[] o) {
+    return Arrays.toString(
+        Arrays.stream(o)
+            .map(
+                (x) -> {
+                  if (x == null) {
+                    return "null";
+                  } else if (x.getClass().isArray()) {
+                    if (((Object[]) x).length == 0) {
+                      return "new " + x.getClass().getComponentType().getName() + "[0]";
+                    } else {
+                      return deepPrint((Object[]) x);
+                    }
+                  } else {
+                    return safePrint(x);
+                  }
+                })
+            .toArray());
+  }
+
+  public static String safePrint(Object o) {
+    if (o instanceof ClassLoader) {
+      return o.getClass().getName();
+    } else if (o == null) {
+      return "null";
+    } else if (o instanceof Exception) {
+      String res = o.toString();
+      if (res.endsWith("-transformed)")) {
+        res = res.substring(0, res.lastIndexOf(" ")) + " <transformed-jar>)";
+      } else if (res.endsWith(".jar)")) {
+        res = res.substring(0, res.lastIndexOf(" ")) + " <original-jar>)";
+      }
+      return res;
+    } else if (o instanceof Transform) {
+      return "Transform Instance";
+    } else if (o instanceof Class && isObsoleteObject((Class) o)) {
+      return "(obsolete)" + o.toString();
+    } else if (o.getClass().isArray()) {
+      return Arrays.toString((Object[])o);
+    } else {
+      return o.toString();
+    }
+  }
+
+  private static class ComboIter implements ResetIterator<Object[]> {
+    private ResetIterator<Object[]> next;
+    private Object cur;
+    private boolean first;
+    private Iterator<Object> my_vals;
+    private Iterable<Object> my_vals_reset;
+
+    public Object[] next() {
+      if (!next.hasNext()) {
+        cur = my_vals.next();
+        first = false;
+        if (next != null) {
+          next.reset();
+        }
+      }
+      if (first) {
+        first = false;
+        cur = my_vals.next();
+      }
+      Object[] nv = next.next();
+      Object[] res = new Object[nv.length + 1];
+      res[0] = cur;
+      for (int i = 0; i < nv.length; i++) {
+        res[i + 1] = nv[i];
+      }
+      return res;
+    }
+
+    public boolean hasNext() {
+      return next.hasNext() || my_vals.hasNext();
+    }
+
+    public void reset() {
+      my_vals = my_vals_reset.iterator();
+      next.reset();
+      cur = null;
+      first = true;
+    }
+
+    public ComboIter(Iterable<Object> this_reset, ResetIterator<Object[]> next_reset) {
+      my_vals_reset = this_reset;
+      next = next_reset;
+      reset();
+    }
+  }
+
+  public static native long genericFieldOffset(Field f);
+
+  public static native boolean isObsoleteObject(Class c);
+}
diff --git a/test/1980-obsolete-object-cleared/src/art/Redefinition.java b/test/1980-obsolete-object-cleared/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1980-obsolete-object-cleared/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1981-structural-redef-private-method-handles/build b/test/1981-structural-redef-private-method-handles/build
new file mode 100755
index 0000000..c80d7ad
--- /dev/null
+++ b/test/1981-structural-redef-private-method-handles/build
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Make us exit on a failure
+set -e
+
+./default-build "$@" --experimental var-handles
diff --git a/test/1981-structural-redef-private-method-handles/expected.txt b/test/1981-structural-redef-private-method-handles/expected.txt
new file mode 100644
index 0000000..0c0ac6e
--- /dev/null
+++ b/test/1981-structural-redef-private-method-handles/expected.txt
@@ -0,0 +1,37 @@
+Initial: class art.Test1981$Transform[FOO: value of <FOO FIELD>, BAR: value of <BAR FIELD>]
+Reading field FOO using (ID: 0) MethodHandle()Object = (ID: 1) value of <FOO FIELD>
+Reading field FOO using (ID: 2) java.lang.invoke.FieldVarHandle()->java.lang.Object = (ID: 1) value of <FOO FIELD>
+Reading field BAR using (ID: 3) MethodHandle()Object = (ID: 4) value of <BAR FIELD>
+Reading field BAR using (ID: 5) java.lang.invoke.FieldVarHandle()->java.lang.Object = (ID: 4) value of <BAR FIELD>
+Redefining Transform class
+Post redefinition : class art.Test1981$Transform[FOO: value of <FOO FIELD>, BAR: value of <BAR FIELD>, BAZ: null]
+Reading field FOO using (ID: 0) MethodHandle()Object = (ID: 1) value of <FOO FIELD>
+Reading field FOO using (ID: 2) java.lang.invoke.FieldVarHandle()->java.lang.Object = (ID: 1) value of <FOO FIELD>
+Reading field BAR using (ID: 3) MethodHandle()Object = (ID: 4) value of <BAR FIELD>
+Reading field BAR using (ID: 5) java.lang.invoke.FieldVarHandle()->java.lang.Object = (ID: 4) value of <BAR FIELD>
+Reading new field BAZ using (ID: 6) MethodHandle()Object = (ID: 7) <NULL>
+Reading new field BAZ using (ID: 8) java.lang.invoke.FieldVarHandle()->java.lang.Object = (ID: 7) <NULL>
+Reading new field FOO using (ID: 9) MethodHandle()Object = (ID: 1) value of <FOO FIELD>
+Reading new field FOO using (ID: 10) java.lang.invoke.FieldVarHandle()->java.lang.Object = (ID: 1) value of <FOO FIELD>
+Reading new field BAR using (ID: 11) MethodHandle()Object = (ID: 4) value of <BAR FIELD>
+Reading new field BAR using (ID: 12) java.lang.invoke.FieldVarHandle()->java.lang.Object = (ID: 4) value of <BAR FIELD>
+Setting BAZ to (ID: 13) foo with new mh.
+Post set with new mh: class art.Test1981$Transform[FOO: value of <FOO FIELD>, BAR: value of <BAR FIELD>, BAZ: foo]
+Setting FOO to (ID: 14) class art.Test1981$Transform with old mh.
+Post set with old mh: class art.Test1981$Transform[FOO: class art.Test1981$Transform, BAR: value of <BAR FIELD>, BAZ: foo]
+Setting FOO to '(ID: 15) new_value object' with old varhandle.
+Post set with new varhandle: class art.Test1981$Transform[FOO: new_value object, BAR: value of <BAR FIELD>, BAZ: foo]
+Setting BAZ to 'bar' with new varhandle.
+Post set with old varhandle: class art.Test1981$Transform[FOO: new_value object, BAR: value of <BAR FIELD>, BAZ: bar]
+Using mh to call new private method.
+Post reinit with mh: class art.Test1981$Transform[FOO: new_value object, BAR: value of <BAR FIELD>, BAZ: 42]
+Reading field FOO using (ID: 0) MethodHandle()Object = (ID: 15) new_value object
+Reading field FOO using (ID: 2) java.lang.invoke.FieldVarHandle()->java.lang.Object = (ID: 15) new_value object
+Reading field BAR using (ID: 3) MethodHandle()Object = (ID: 4) value of <BAR FIELD>
+Reading field BAR using (ID: 5) java.lang.invoke.FieldVarHandle()->java.lang.Object = (ID: 4) value of <BAR FIELD>
+Reading new field BAZ using (ID: 6) MethodHandle()Object = (ID: 16) 42
+Reading new field BAZ using (ID: 8) java.lang.invoke.FieldVarHandle()->java.lang.Object = (ID: 16) 42
+Reading new field FOO using (ID: 9) MethodHandle()Object = (ID: 15) new_value object
+Reading new field FOO using (ID: 10) java.lang.invoke.FieldVarHandle()->java.lang.Object = (ID: 15) new_value object
+Reading new field BAR using (ID: 11) MethodHandle()Object = (ID: 4) value of <BAR FIELD>
+Reading new field BAR using (ID: 12) java.lang.invoke.FieldVarHandle()->java.lang.Object = (ID: 4) value of <BAR FIELD>
diff --git a/test/1981-structural-redef-private-method-handles/expected_no_mh.txt b/test/1981-structural-redef-private-method-handles/expected_no_mh.txt
new file mode 100644
index 0000000..183fcc8
--- /dev/null
+++ b/test/1981-structural-redef-private-method-handles/expected_no_mh.txt
@@ -0,0 +1,21 @@
+Initial: class art.Test1981$Transform[FOO: value of <FOO FIELD>, BAR: value of <BAR FIELD>]
+Reading field FOO using (ID: 0) MethodHandle()Object = (ID: 1) value of <FOO FIELD>
+Reading field BAR using (ID: 2) MethodHandle()Object = (ID: 3) value of <BAR FIELD>
+Redefining Transform class
+Post redefinition : class art.Test1981$Transform[FOO: value of <FOO FIELD>, BAR: value of <BAR FIELD>, BAZ: null]
+Reading field FOO using (ID: 0) MethodHandle()Object = (ID: 1) value of <FOO FIELD>
+Reading field BAR using (ID: 2) MethodHandle()Object = (ID: 3) value of <BAR FIELD>
+Reading new field BAZ using (ID: 4) MethodHandle()Object = (ID: 5) <NULL>
+Reading new field FOO using (ID: 6) MethodHandle()Object = (ID: 1) value of <FOO FIELD>
+Reading new field BAR using (ID: 7) MethodHandle()Object = (ID: 3) value of <BAR FIELD>
+Setting BAZ to (ID: 8) foo with new mh.
+Post set with new mh: class art.Test1981$Transform[FOO: value of <FOO FIELD>, BAR: value of <BAR FIELD>, BAZ: foo]
+Setting FOO to (ID: 9) class art.Test1981$Transform with old mh.
+Post set with old mh: class art.Test1981$Transform[FOO: class art.Test1981$Transform, BAR: value of <BAR FIELD>, BAZ: foo]
+Using mh to call new private method.
+Post reinit with mh: class art.Test1981$Transform[FOO: class art.Test1981$Transform, BAR: value of <BAR FIELD>, BAZ: 42]
+Reading field FOO using (ID: 0) MethodHandle()Object = (ID: 9) class art.Test1981$Transform
+Reading field BAR using (ID: 2) MethodHandle()Object = (ID: 3) value of <BAR FIELD>
+Reading new field BAZ using (ID: 4) MethodHandle()Object = (ID: 10) 42
+Reading new field FOO using (ID: 6) MethodHandle()Object = (ID: 9) class art.Test1981$Transform
+Reading new field BAR using (ID: 7) MethodHandle()Object = (ID: 3) value of <BAR FIELD>
diff --git a/test/1981-structural-redef-private-method-handles/info.txt b/test/1981-structural-redef-private-method-handles/info.txt
new file mode 100644
index 0000000..1c5ca82
--- /dev/null
+++ b/test/1981-structural-redef-private-method-handles/info.txt
@@ -0,0 +1,2 @@
+Test that obsoleted classes have their fields cleared and cannot be used to obtain
+obsolete/invalid reflection objects.
diff --git a/test/1981-structural-redef-private-method-handles/run b/test/1981-structural-redef-private-method-handles/run
new file mode 100755
index 0000000..03e41a5
--- /dev/null
+++ b/test/1981-structural-redef-private-method-handles/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/1981-structural-redef-private-method-handles/src/Main.java b/test/1981-structural-redef-private-method-handles/src/Main.java
new file mode 100644
index 0000000..7c546fd
--- /dev/null
+++ b/test/1981-structural-redef-private-method-handles/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1981.run(new art.Test1981_Varhandles());
+  }
+}
diff --git a/test/1981-structural-redef-private-method-handles/src/art/Redefinition.java b/test/1981-structural-redef-private-method-handles/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1981-structural-redef-private-method-handles/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1981-structural-redef-private-method-handles/src/art/Test1981.java b/test/1981-structural-redef-private-method-handles/src/art/Test1981.java
new file mode 100644
index 0000000..ac8a019
--- /dev/null
+++ b/test/1981-structural-redef-private-method-handles/src/art/Test1981.java
@@ -0,0 +1,347 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.invoke.*;
+import java.lang.ref.*;
+import java.lang.reflect.*;
+import java.util.*;
+
+public class Test1981 {
+  // Allow us to hide the var-handle portions when running this on CTS.
+  public interface VarHandler {
+    public boolean doVarHandleTests();
+
+    public default Object findStaticVarHandle(MethodHandles.Lookup l, Class c, String n, Class t)
+        throws Throwable {
+      return null;
+    }
+
+    public default Object get(Object vh) throws Throwable {
+      throw new Error("Illegal call!");
+    }
+
+    public default void set(Object vh, Object v) throws Throwable {
+      throw new Error("Illegal call!");
+    }
+    public default boolean instanceofVarHandle(Object v) {
+      return false;
+    }
+    public default Object getVarTypeName(Object v) {
+      throw new Error("Illegal call!");
+    }
+  }
+
+  // CTS Entrypoint.
+  public static void run() throws Exception {
+    run(() -> false);
+  }
+
+  public static void run(VarHandler varhandle_portion) throws Exception {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest(varhandle_portion);
+  }
+
+  private static final boolean PRINT_NONDETERMINISTIC = false;
+
+  public static WeakHashMap<Object, Long> id_nums = new WeakHashMap<>();
+  public static long next_id = 0;
+
+  public static String printGeneric(VarHandler vh, Object o) {
+    Long id = id_nums.get(o);
+    if (id == null) {
+      id = Long.valueOf(next_id++);
+      id_nums.put(o, id);
+    }
+    if (o == null) {
+      return "(ID: " + id + ") <NULL>";
+    }
+    Class oc = o.getClass();
+    if (oc.isArray() && oc.getComponentType() == Byte.TYPE) {
+      return "(ID: "
+          + id
+          + ") "
+          + Arrays.toString(Arrays.copyOf((byte[]) o, 10)).replace(']', ',')
+          + " ...]";
+    } else if (vh.instanceofVarHandle(o)) {
+      // These don't have a good to-string. Give them one.
+      return "(ID: " + id + ") " + o.getClass().getName() + "()->" + vh.getVarTypeName(o);
+    } else {
+      return "(ID: " + id + ") " + o.toString();
+    }
+  }
+
+  public static class Transform {
+    static {
+    }
+
+    private static Object BAR =
+        new Object() {
+          public String toString() {
+            return "value of <" + this.get() + ">";
+          }
+
+          public Object get() {
+            return "BAR FIELD";
+          }
+        };
+    private static Object FOO =
+        new Object() {
+          public String toString() {
+            return "value of <" + this.get() + ">";
+          }
+
+          public Object get() {
+            return "FOO FIELD";
+          }
+        };
+
+    public static MethodHandles.Lookup getLookup() {
+      return MethodHandles.lookup();
+    }
+
+    public static String staticToString() {
+      return Transform.class.toString() + "[FOO: " + FOO + ", BAR: " + BAR + "]";
+    }
+  }
+
+  /* Base64 encoded class of:
+   * public static class Transform {
+   *   static {}
+   *   // NB This is the order the fields will be laid out in memory.
+   *   private static Object BAR;
+   *   private static Object BAZ;
+   *   private static Object FOO;
+   *   public static MethodHandles.Lookup getLookup() { return null; }
+   *   private static void reinitialize() {
+   *     BAZ = 42;
+   *   }
+   *   public static String staticToString() {
+   *    return Transform.class.toString() + "[FOO: " + FOO + ", BAR: " + BAR + ", BAZ: " + BAZ + "]";
+   *   }
+   * }
+   */
+  private static byte[] REDEFINED_DEX_BYTES =
+      Base64.getDecoder()
+          .decode(
+              "ZGV4CjAzNQDY+Vd3k8SVBE6A35RavIBzYN76h51YIqVwBgAAcAAAAHhWNBIAAAAAAAAAAKwFAAAk"
+                  + "AAAAcAAAAAwAAAAAAQAABgAAADABAAADAAAAeAEAAAwAAACQAQAAAQAAAPABAABgBAAAEAIAABoD"
+                  + "AAAjAwAALAMAADYDAAA+AwAAQwMAAEgDAABNAwAAUAMAAFMDAABXAwAAWwMAAHUDAACFAwAAqQMA"
+                  + "AMkDAADcAwAA8QMAAAUEAAAZBAAANAQAAF0EAABsBAAAdwQAAHoEAACCBAAAhQQAAJIEAACaBAAA"
+                  + "pQQAAKsEAAC5BAAAyQQAANMEAADaBAAA4wQAAAcAAAALAAAADAAAAA0AAAAOAAAADwAAABAAAAAR"
+                  + "AAAAEgAAABMAAAAUAAAAFwAAAAkAAAAGAAAABAMAAAgAAAAIAAAAAAAAAAoAAAAJAAAADAMAAAoA"
+                  + "AAAJAAAAFAMAAAgAAAAKAAAAAAAAABcAAAALAAAAAAAAAAEABwAEAAAAAQAHAAUAAAABAAcABgAA"
+                  + "AAEABQACAAAAAQAFAAMAAAABAAQAHAAAAAEABQAeAAAAAQABAB8AAAAFAAEAIAAAAAYAAAAiAAAA"
+                  + "BwAFAAMAAAAJAAUAAwAAAAkAAgAbAAAACQADABsAAAAJAAEAIAAAAAEAAAABAAAABwAAAAAAAAAV"
+                  + "AAAAnAUAAGoFAAAAAAAABQAAAAIAAAD/AgAANgAAABwAAQBuEAUAAAAMAGIBAgBiAgAAYgMBACIE"
+                  + "CQBwEAgABABuIAoABAAaABgAbiAKAAQAbiAJABQAGgAAAG4gCgAEAG4gCQAkABoAAQBuIAoABABu"
+                  + "IAkANAAaABkAbiAKAAQAbhALAAQADAARAAEAAAAAAAAA9gIAAAIAAAASABEAAAAAAAAAAADuAgAA"
+                  + "AQAAAA4AAAABAAEAAQAAAPICAAAEAAAAcBAHAAAADgABAAAAAQAAAPoCAAAJAAAAEwAqAHEQBgAA"
+                  + "AAwAaQABAA4ACgAOAAkADgAPAA4AEQAOhwAUAA4AAAEAAAAAAAAAAQAAAAcAAAABAAAACAAHLCBC"
+                  + "QVI6IAAHLCBCQVo6IAAIPGNsaW5pdD4ABjxpbml0PgADQkFSAANCQVoAA0ZPTwABSQABTAACTEkA"
+                  + "AkxMABhMYXJ0L1Rlc3QxOTgxJFRyYW5zZm9ybTsADkxhcnQvVGVzdDE5ODE7ACJMZGFsdmlrL2Fu"
+                  + "bm90YXRpb24vRW5jbG9zaW5nQ2xhc3M7AB5MZGFsdmlrL2Fubm90YXRpb24vSW5uZXJDbGFzczsA"
+                  + "EUxqYXZhL2xhbmcvQ2xhc3M7ABNMamF2YS9sYW5nL0ludGVnZXI7ABJMamF2YS9sYW5nL09iamVj"
+                  + "dDsAEkxqYXZhL2xhbmcvU3RyaW5nOwAZTGphdmEvbGFuZy9TdHJpbmdCdWlsZGVyOwAnTGphdmEv"
+                  + "bGFuZy9pbnZva2UvTWV0aG9kSGFuZGxlcyRMb29rdXA7AA1UZXN0MTk4MS5qYXZhAAlUcmFuc2Zv"
+                  + "cm0AAVYABltGT086IAABXQALYWNjZXNzRmxhZ3MABmFwcGVuZAAJZ2V0TG9va3VwAARuYW1lAAxy"
+                  + "ZWluaXRpYWxpemUADnN0YXRpY1RvU3RyaW5nAAh0b1N0cmluZwAFdmFsdWUAB3ZhbHVlT2YAdn5+"
+                  + "RDh7ImNvbXBpbGF0aW9uLW1vZGUiOiJkZWJ1ZyIsIm1pbi1hcGkiOjEsInNoYS0xIjoiYTgzNTJm"
+                  + "MjU0ODg1MzYyY2NkOGQ5MDlkMzUyOWM2MDA5NGRkODk2ZSIsInZlcnNpb24iOiIxLjYuMjAtZGV2"
+                  + "In0AAgMBIRgCAgQCGgQJHRcWAwAFAAAKAQoBCgCIgASgBQGBgAS0BQEJjAUBCswFAQmQBAAAAAAC"
+                  + "AAAAWwUAAGEFAACQBQAAAAAAAAAAAAAAAAAAEAAAAAAAAAABAAAAAAAAAAEAAAAkAAAAcAAAAAIA"
+                  + "AAAMAAAAAAEAAAMAAAAGAAAAMAEAAAQAAAADAAAAeAEAAAUAAAAMAAAAkAEAAAYAAAABAAAA8AEA"
+                  + "AAEgAAAFAAAAEAIAAAMgAAAFAAAA7gIAAAEQAAADAAAABAMAAAIgAAAkAAAAGgMAAAQgAAACAAAA"
+                  + "WwUAAAAgAAABAAAAagUAAAMQAAACAAAAjAUAAAYgAAABAAAAnAUAAAAQAAABAAAArAUAAA==");
+
+  public static void doTest(VarHandler vh) throws Exception {
+    try {
+      System.out.println("Initial: " + Transform.staticToString());
+      MethodHandles.Lookup lookup = Transform.getLookup();
+      String[] names =
+          new String[] {
+            "FOO", "BAR",
+          };
+      MethodHandle[] handles =
+          new MethodHandle[] {
+            lookup.findStaticGetter(Transform.class, "FOO", Object.class),
+            lookup.findStaticGetter(Transform.class, "BAR", Object.class),
+          };
+      Object foo_handle = vh.findStaticVarHandle(lookup, Transform.class, "FOO", Object.class);
+      Object[] var_handles =
+          new Object[] {
+            foo_handle, vh.findStaticVarHandle(lookup, Transform.class, "BAR", Object.class),
+          };
+
+      for (int i = 0; i < names.length; i++) {
+        System.out.println(
+            "Reading field "
+                + names[i]
+                + " using "
+                + printGeneric(vh, handles[i])
+                + " = "
+                + printGeneric(vh, handles[i].invoke()));
+        if (vh.doVarHandleTests()) {
+          System.out.println(
+              "Reading field "
+                  + names[i]
+                  + " using "
+                  + printGeneric(vh, var_handles[i])
+                  + " = "
+                  + printGeneric(vh, vh.get(var_handles[i])));
+        }
+      }
+      MethodHandle old_field_write = lookup.findStaticSetter(Transform.class, "FOO", Object.class);
+
+      System.out.println("Redefining Transform class");
+      Redefinition.doCommonStructuralClassRedefinition(Transform.class, REDEFINED_DEX_BYTES);
+      System.out.println("Post redefinition : " + Transform.staticToString());
+
+      String[] new_names =
+          new String[] {
+            "BAZ", "FOO", "BAR",
+          };
+      MethodHandle[] new_handles =
+          new MethodHandle[] {
+            lookup.findStaticGetter(Transform.class, "BAZ", Object.class),
+            lookup.findStaticGetter(Transform.class, "FOO", Object.class),
+            lookup.findStaticGetter(Transform.class, "BAR", Object.class),
+          };
+      Object baz_handle = vh.findStaticVarHandle(lookup, Transform.class, "BAZ", Object.class);
+      Object[] new_var_handles =
+          new Object[] {
+            baz_handle,
+            vh.findStaticVarHandle(lookup, Transform.class, "FOO", Object.class),
+            vh.findStaticVarHandle(lookup, Transform.class, "BAR", Object.class),
+          };
+
+      for (int i = 0; i < names.length; i++) {
+        System.out.println(
+            "Reading field "
+                + names[i]
+                + " using "
+                + printGeneric(vh, handles[i])
+                + " = "
+                + printGeneric(vh, handles[i].invoke()));
+        if (vh.doVarHandleTests()) {
+          System.out.println(
+              "Reading field "
+                  + names[i]
+                  + " using "
+                  + printGeneric(vh, var_handles[i])
+                  + " = "
+                  + printGeneric(vh, vh.get(var_handles[i])));
+        }
+      }
+
+      for (int i = 0; i < new_names.length; i++) {
+        System.out.println(
+            "Reading new field "
+                + new_names[i]
+                + " using "
+                + printGeneric(vh, new_handles[i])
+                + " = "
+                + printGeneric(vh, new_handles[i].invoke()));
+        if (vh.doVarHandleTests()) {
+          System.out.println(
+              "Reading new field "
+                  + new_names[i]
+                  + " using "
+                  + printGeneric(vh, new_var_handles[i])
+                  + " = "
+                  + printGeneric(vh, vh.get(new_var_handles[i])));
+        }
+      }
+
+      String val = "foo";
+      System.out.println("Setting BAZ to " + printGeneric(vh, val) + " with new mh.");
+      lookup.findStaticSetter(Transform.class, "BAZ", Object.class).invoke(val);
+      System.out.println("Post set with new mh: " + Transform.staticToString());
+
+      System.out.println("Setting FOO to " + printGeneric(vh, Transform.class) + " with old mh.");
+      old_field_write.invoke(Transform.class);
+      System.out.println("Post set with old mh: " + Transform.staticToString());
+
+      Object new_val =
+          new Object() {
+            public String toString() {
+              return "new_value object";
+            }
+          };
+      if (vh.doVarHandleTests()) {
+        System.out.println("Setting FOO to '" + printGeneric(vh, new_val) + "' with old varhandle.");
+        vh.set(foo_handle, new_val);
+        System.out.println("Post set with new varhandle: " + Transform.staticToString());
+
+        System.out.println("Setting BAZ to 'bar' with new varhandle.");
+        vh.set(baz_handle, "bar");
+        System.out.println("Post set with old varhandle: " + Transform.staticToString());
+      }
+
+      System.out.println("Using mh to call new private method.");
+      MethodHandle reinit =
+          lookup.findStatic(Transform.class, "reinitialize", MethodType.methodType(Void.TYPE));
+      reinit.invoke();
+      System.out.println("Post reinit with mh: " + Transform.staticToString());
+
+      for (int i = 0; i < names.length; i++) {
+        System.out.println(
+            "Reading field "
+                + names[i]
+                + " using "
+                + printGeneric(vh, handles[i])
+                + " = "
+                + printGeneric(vh, handles[i].invoke()));
+        if (vh.doVarHandleTests()) {
+          System.out.println(
+              "Reading field "
+                  + names[i]
+                  + " using "
+                  + printGeneric(vh, var_handles[i])
+                  + " = "
+                  + printGeneric(vh, vh.get(var_handles[i])));
+        }
+      }
+      for (int i = 0; i < new_names.length; i++) {
+        System.out.println(
+            "Reading new field "
+                + new_names[i]
+                + " using "
+                + printGeneric(vh, new_handles[i])
+                + " = "
+                + printGeneric(vh, new_handles[i].invoke()));
+        if (vh.doVarHandleTests()) {
+          System.out.println(
+              "Reading new field "
+                  + new_names[i]
+                  + " using "
+                  + printGeneric(vh, new_var_handles[i])
+                  + " = "
+                  + printGeneric(vh, vh.get(new_var_handles[i])));
+        }
+      }
+    } catch (Throwable t) {
+      if (t instanceof Exception) {
+        throw (Exception) t;
+      } else if (t instanceof Error) {
+        throw (Error) t;
+      } else {
+        throw new RuntimeException("Unexpected throwable!", t);
+      }
+    }
+  }
+}
diff --git a/test/1981-structural-redef-private-method-handles/src/art/Test1981_Varhandles.java b/test/1981-structural-redef-private-method-handles/src/art/Test1981_Varhandles.java
new file mode 100644
index 0000000..dbb63e7
--- /dev/null
+++ b/test/1981-structural-redef-private-method-handles/src/art/Test1981_Varhandles.java
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.invoke.*;
+import java.lang.ref.*;
+import java.lang.reflect.*;
+import java.util.*;
+
+public class Test1981_Varhandles implements Test1981.VarHandler {
+  public Test1981_Varhandles() {}
+
+  public boolean doVarHandleTests() {
+    return true;
+  }
+
+  public Object findStaticVarHandle(MethodHandles.Lookup l, Class c, String n, Class t)
+      throws Throwable {
+    return l.findStaticVarHandle(c, n, t);
+  }
+
+  public Object get(Object vh) throws Throwable {
+    return ((VarHandle) vh).get();
+  }
+
+  public void set(Object vh, Object v) throws Throwable {
+    ((VarHandle) vh).set(v);
+  }
+  public boolean instanceofVarHandle(Object v) {
+    return v instanceof VarHandle;
+  }
+  public Object getVarTypeName(Object v) {
+    return ((VarHandle)v).varType().getName();
+  }
+}
diff --git a/test/1982-no-virtuals-structural-redefinition/expected.txt b/test/1982-no-virtuals-structural-redefinition/expected.txt
new file mode 100644
index 0000000..604145d
--- /dev/null
+++ b/test/1982-no-virtuals-structural-redefinition/expected.txt
@@ -0,0 +1,30 @@
+Reading with reflection.
+public static java.lang.Object art.Test1982$Transform.BAR on (ID: 0) <NULL> = (ID: 1) value of <BAR FIELD>
+public static java.lang.Object art.Test1982$Transform.FOO on (ID: 0) <NULL> = (ID: 2) value of <FOO FIELD>
+Reading with reflection on subtransform instance.
+public static java.lang.Object art.Test1982$Transform.BAR on (ID: 3) SuperTransform { id: 2, class: class art.Test1982$SubTransform } = (ID: 1) value of <BAR FIELD>
+public static java.lang.Object art.Test1982$Transform.FOO on (ID: 3) SuperTransform { id: 2, class: class art.Test1982$SubTransform } = (ID: 2) value of <FOO FIELD>
+public int art.Test1982$SuperTransform.id on (ID: 3) SuperTransform { id: 2, class: class art.Test1982$SubTransform } = (ID: 4) 2
+Reading normally.
+Read BAR field: (ID: 1) value of <BAR FIELD>
+Read FOO field: (ID: 2) value of <FOO FIELD>
+t1 is (ID: 5) SuperTransform { id: 1, class: class art.Test1982$Transform }
+t2 is (ID: 3) SuperTransform { id: 2, class: class art.Test1982$SubTransform }
+Redefined: class art.Test1982$Transform[FOO: value of <FOO FIELD>, BAR: value of <BAR FIELD>, BAZ: null]
+Reading with reflection after redefinition.
+public static java.lang.Object art.Test1982$Transform.BAR on (ID: 0) <NULL> = (ID: 1) value of <BAR FIELD>
+public static java.lang.Object art.Test1982$Transform.BAZ on (ID: 0) <NULL> = (ID: 0) <NULL>
+public static java.lang.Object art.Test1982$Transform.FOO on (ID: 0) <NULL> = (ID: 2) value of <FOO FIELD>
+Reading with reflection after redefinition on subtransform instance.
+public static java.lang.Object art.Test1982$Transform.BAR on (ID: 3) SuperTransform { id: 2, class: class art.Test1982$SubTransform } = (ID: 1) value of <BAR FIELD>
+public static java.lang.Object art.Test1982$Transform.BAZ on (ID: 3) SuperTransform { id: 2, class: class art.Test1982$SubTransform } = (ID: 0) <NULL>
+public static java.lang.Object art.Test1982$Transform.FOO on (ID: 3) SuperTransform { id: 2, class: class art.Test1982$SubTransform } = (ID: 2) value of <FOO FIELD>
+public int art.Test1982$SuperTransform.id on (ID: 3) SuperTransform { id: 2, class: class art.Test1982$SubTransform } = (ID: 4) 2
+Reading normally after possible modification.
+Read FOO field: (ID: 2) value of <FOO FIELD>
+Read BAR field: (ID: 1) value of <BAR FIELD>
+t1 is (ID: 5) SuperTransform { id: 1, class: class art.Test1982$Transform }
+t2 is (ID: 3) SuperTransform { id: 2, class: class art.Test1982$SubTransform }
+new SubTransform is (ID: 6) SuperTransform { id: 1003, class: class art.Test1982$SubTransform }
+myToString of (ID: 6) SuperTransform { id: 1003, class: class art.Test1982$SubTransform } is SubTransform (subclass of: class art.Test1982$Transform[FOO: value of <FOO FIELD>, BAR: value of <BAR FIELD>, BAZ: null]) { id: 1003 }
+Creating new transform from t1 class = (ID: 7) SuperTransform { id: 1004, class: class art.Test1982$Transform }
diff --git a/test/1982-no-virtuals-structural-redefinition/info.txt b/test/1982-no-virtuals-structural-redefinition/info.txt
new file mode 100644
index 0000000..5921e53
--- /dev/null
+++ b/test/1982-no-virtuals-structural-redefinition/info.txt
@@ -0,0 +1,2 @@
+Test that structural redefinition works on classes with super-types and subtypes with virtual
+fields and methods so long as the target does not have any.
diff --git a/test/1982-no-virtuals-structural-redefinition/run b/test/1982-no-virtuals-structural-redefinition/run
new file mode 100755
index 0000000..03e41a5
--- /dev/null
+++ b/test/1982-no-virtuals-structural-redefinition/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/1982-no-virtuals-structural-redefinition/src/Main.java b/test/1982-no-virtuals-structural-redefinition/src/Main.java
new file mode 100644
index 0000000..19c56f8
--- /dev/null
+++ b/test/1982-no-virtuals-structural-redefinition/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1982.run();
+  }
+}
diff --git a/test/1982-no-virtuals-structural-redefinition/src/art/Redefinition.java b/test/1982-no-virtuals-structural-redefinition/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1982-no-virtuals-structural-redefinition/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1982-no-virtuals-structural-redefinition/src/art/Test1982.java b/test/1982-no-virtuals-structural-redefinition/src/art/Test1982.java
new file mode 100644
index 0000000..3336c64
--- /dev/null
+++ b/test/1982-no-virtuals-structural-redefinition/src/art/Test1982.java
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.ref.*;
+import java.lang.reflect.*;
+import java.util.*;
+
+public class Test1982 {
+  public static void run() throws Exception {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest();
+  }
+
+  private static final boolean PRINT_NONDETERMINISTIC = false;
+
+  public static WeakHashMap<Object, Long> id_nums = new WeakHashMap<>();
+  public static long next_id = 0;
+
+  public static String printGeneric(Object o) {
+    Long id = id_nums.get(o);
+    if (id == null) {
+      id = Long.valueOf(next_id++);
+      id_nums.put(o, id);
+    }
+    if (o == null) {
+      return "(ID: " + id + ") <NULL>";
+    }
+    Class oc = o.getClass();
+    if (oc.isArray() && oc.getComponentType() == Byte.TYPE) {
+      return "(ID: "
+          + id
+          + ") "
+          + Arrays.toString(Arrays.copyOf((byte[]) o, 10)).replace(']', ',')
+          + " ...]";
+    } else {
+      return "(ID: " + id + ") " + o.toString();
+    }
+  }
+
+  private static void doRedefinition() {
+    Redefinition.doCommonStructuralClassRedefinition(Transform.class, REDEFINED_DEX_BYTES);
+  }
+
+  private static void readReflective(String msg, Field[] fields, Object recv) throws Exception {
+    System.out.println(msg);
+    for (Field f : fields) {
+      System.out.println(
+          f.toString() + " on " + printGeneric(recv) + " = " + printGeneric(f.get(recv)));
+    }
+  }
+
+  public static class SuperTransform {
+    public int id;
+
+    public SuperTransform(int id) {
+      this.id = id;
+    }
+
+    public String toString() {
+      return "SuperTransform { id: " + id + ", class: " + getClass() + " }";
+    }
+  }
+
+  public static class Transform extends SuperTransform {
+    static {
+    }
+
+    public static Object BAR =
+        new Object() {
+          public String toString() {
+            return "value of <" + this.get() + ">";
+          }
+
+          public Object get() {
+            return "BAR FIELD";
+          }
+        };
+    public static Object FOO =
+        new Object() {
+          public String toString() {
+            return "value of <" + this.get() + ">";
+          }
+
+          public Object get() {
+            return "FOO FIELD";
+          }
+        };
+    // This class has no virtual fields or methods. This means we can structurally redefine it
+    // without having to change the size of any instances.
+    public Transform(int id) {
+      super(id);
+    }
+
+    public static String staticToString() {
+      return Transform.class.toString() + "[FOO: " + FOO + ", BAR: " + BAR + "]";
+    }
+  }
+
+  public static class SubTransform extends Transform {
+    public SubTransform(int id) {
+      super(id);
+    }
+
+    public String myToString() {
+      return "SubTransform (subclass of: " + staticToString() + ") { id: " + id + " }";
+    }
+  }
+
+  /* Base64 encoded class of:
+   * public static class Transform extends SuperTransform {
+   *   static {}
+   *   public Transform(int id) { super(id + 1000); }
+   *   // NB This is the order the fields will be laid out in memory.
+   *   public static Object BAR;
+   *   public static Object BAZ;
+   *   public static Object FOO;
+   *   public static String staticToString() {
+   *    return Transform.class.toString() + "[FOO: " + FOO + ", BAR: " + BAR + ", BAZ: " + BAZ + "]";
+   *   }
+   * }
+   */
+  private static byte[] REDEFINED_DEX_BYTES =
+      Base64.getDecoder()
+          .decode(
+              "ZGV4CjAzNQAV5GctNSI+SEKJDaJIQLEac9ClAxZUSZq4BQAAcAAAAHhWNBIAAAAAAAAAAPQEAAAg"
+                  + "AAAAcAAAAAsAAADwAAAABQAAABwBAAADAAAAWAEAAAkAAABwAQAAAQAAALgBAADgAwAA2AEAAKoC"
+                  + "AACzAgAAvAIAAMYCAADOAgAA0wIAANgCAADdAgAA4AIAAOMCAADnAgAABgMAACADAAAwAwAAVAMA"
+                  + "AHQDAACHAwAAmwMAAK8DAADKAwAA2QMAAOQDAADnAwAA6wMAAPMDAAD2AwAAAwQAAAsEAAARBAAA"
+                  + "IQQAACsEAAAyBAAABwAAAAoAAAALAAAADAAAAA0AAAAOAAAADwAAABAAAAARAAAAEgAAABUAAAAI"
+                  + "AAAACAAAAAAAAAAJAAAACQAAAJQCAAAJAAAACQAAAJwCAAAVAAAACgAAAAAAAAAWAAAACgAAAKQC"
+                  + "AAACAAcABAAAAAIABwAFAAAAAgAHAAYAAAABAAQAAwAAAAIAAwACAAAAAgAEAAMAAAACAAAAHAAA"
+                  + "AAYAAAAdAAAACQADAAMAAAAJAAEAGgAAAAkAAgAaAAAACQAAAB0AAAACAAAAAQAAAAEAAAAAAAAA"
+                  + "EwAAAOQEAAC5BAAAAAAAAAUAAAACAAAAjQIAADYAAAAcAAIAbhAEAAAADABiAQIAYgIAAGIDAQAi"
+                  + "BAkAcBAFAAQAbiAHAAQAGgAXAG4gBwAEAG4gBgAUABoAAABuIAcABABuIAYAJAAaAAEAbiAHAAQA"
+                  + "biAGADQAGgAYAG4gBwAEAG4QCAAEAAwAEQAAAAAAAAAAAIQCAAABAAAADgAAAAIAAgACAAAAiAIA"
+                  + "AAYAAADQEegDcCAAABAADgAKAA4ACwEADgARAA4AAAAAAQAAAAcAAAABAAAACAAAAAEAAAAAAAcs"
+                  + "IEJBUjogAAcsIEJBWjogAAg8Y2xpbml0PgAGPGluaXQ+AANCQVIAA0JBWgADRk9PAAFJAAFMAAJM"
+                  + "TAAdTGFydC9UZXN0MTk4MiRTdXBlclRyYW5zZm9ybTsAGExhcnQvVGVzdDE5ODIkVHJhbnNmb3Jt"
+                  + "OwAOTGFydC9UZXN0MTk4MjsAIkxkYWx2aWsvYW5ub3RhdGlvbi9FbmNsb3NpbmdDbGFzczsAHkxk"
+                  + "YWx2aWsvYW5ub3RhdGlvbi9Jbm5lckNsYXNzOwARTGphdmEvbGFuZy9DbGFzczsAEkxqYXZhL2xh"
+                  + "bmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7ABlMamF2YS9sYW5nL1N0cmluZ0J1aWxkZXI7"
+                  + "AA1UZXN0MTk4Mi5qYXZhAAlUcmFuc2Zvcm0AAVYAAlZJAAZbRk9POiAAAV0AC2FjY2Vzc0ZsYWdz"
+                  + "AAZhcHBlbmQABG5hbWUADnN0YXRpY1RvU3RyaW5nAAh0b1N0cmluZwAFdmFsdWUAdn5+RDh7ImNv"
+                  + "bXBpbGF0aW9uLW1vZGUiOiJkZWJ1ZyIsIm1pbi1hcGkiOjEsInNoYS0xIjoiYTgzNTJmMjU0ODg1"
+                  + "MzYyY2NkOGQ5MDlkMzUyOWM2MDA5NGRkODk2ZSIsInZlcnNpb24iOiIxLjYuMjAtZGV2In0AAgQB"
+                  + "HhgDAgUCGQQJGxcUAwADAAAJAQkBCQGIgATUBAGBgAToBAEJ2AMAAAAAAAIAAACqBAAAsAQAANgE"
+                  + "AAAAAAAAAAAAAAAAAAAQAAAAAAAAAAEAAAAAAAAAAQAAACAAAABwAAAAAgAAAAsAAADwAAAAAwAA"
+                  + "AAUAAAAcAQAABAAAAAMAAABYAQAABQAAAAkAAABwAQAABgAAAAEAAAC4AQAAASAAAAMAAADYAQAA"
+                  + "AyAAAAMAAACEAgAAARAAAAMAAACUAgAAAiAAACAAAACqAgAABCAAAAIAAACqBAAAACAAAAEAAAC5"
+                  + "BAAAAxAAAAIAAADUBAAABiAAAAEAAADkBAAAABAAAAEAAAD0BAAA");
+
+  public static void doTest() throws Exception {
+    Transform t1 = new Transform(1);
+    SuperTransform t2 = new SubTransform(2);
+    readReflective("Reading with reflection.", Transform.class.getDeclaredFields(), null);
+    readReflective(
+        "Reading with reflection on subtransform instance.", SubTransform.class.getFields(), t2);
+    System.out.println("Reading normally.");
+    System.out.println("Read BAR field: " + printGeneric(Transform.BAR));
+    System.out.println("Read FOO field: " + printGeneric(Transform.FOO));
+    System.out.println("t1 is " + printGeneric(t1));
+    System.out.println("t2 is " + printGeneric(t2));
+    doRedefinition();
+    System.out.println("Redefined: " + Transform.staticToString());
+    readReflective(
+        "Reading with reflection after redefinition.", Transform.class.getDeclaredFields(), null);
+    readReflective(
+        "Reading with reflection after redefinition on subtransform instance.",
+        SubTransform.class.getFields(),
+        t2);
+    System.out.println("Reading normally after possible modification.");
+    System.out.println("Read FOO field: " + printGeneric(Transform.FOO));
+    System.out.println("Read BAR field: " + printGeneric(Transform.BAR));
+    System.out.println("t1 is " + printGeneric(t1));
+    System.out.println("t2 is " + printGeneric(t2));
+    SubTransform t3 = new SubTransform(3);
+    System.out.println("new SubTransform is " + printGeneric(t3));
+    System.out.println("myToString of " + printGeneric(t3) + " is " + t3.myToString());
+    // We verified in test 1980 that getDeclaredConstructor will throw if the class is obsolete.
+    // This therefore is a reasonable test that the t1 object's declaring class was updated.
+    System.out.println(
+        "Creating new transform from t1 class = "
+            + printGeneric(t1.getClass().getDeclaredConstructor(Integer.TYPE).newInstance(4)));
+  }
+}
diff --git a/test/1983-structural-redefinition-failures/build b/test/1983-structural-redefinition-failures/build
new file mode 100755
index 0000000..c80d7ad
--- /dev/null
+++ b/test/1983-structural-redefinition-failures/build
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Make us exit on a failure
+set -e
+
+./default-build "$@" --experimental var-handles
diff --git a/test/1983-structural-redefinition-failures/expected-cts.txt b/test/1983-structural-redefinition-failures/expected-cts.txt
new file mode 100644
index 0000000..00d039f
--- /dev/null
+++ b/test/1983-structural-redefinition-failures/expected-cts.txt
@@ -0,0 +1,30 @@
+Checking mirror'd classes
+Is Structurally modifiable class java.lang.reflect.AccessibleObject false
+Is Structurally modifiable class java.lang.invoke.CallSite false
+Is Structurally modifiable class dalvik.system.ClassExt false
+Is Structurally modifiable class java.lang.ClassLoader false
+Is Structurally modifiable class java.lang.Class false
+Is Structurally modifiable class java.lang.reflect.Constructor false
+Is Structurally modifiable class java.lang.DexCache false
+Is Structurally modifiable class dalvik.system.EmulatedStackFrame false
+Is Structurally modifiable class java.lang.reflect.Executable false
+Is Structurally modifiable class java.lang.reflect.Field false
+Is Structurally modifiable class java.lang.ref.FinalizerReference false
+Is Structurally modifiable class java.lang.invoke.MethodHandle false
+Is Structurally modifiable class java.lang.invoke.MethodHandles$Lookup false
+Is Structurally modifiable class java.lang.invoke.MethodType false
+Is Structurally modifiable class java.lang.reflect.Method false
+Is Structurally modifiable class java.lang.Object false
+Is Structurally modifiable class java.lang.reflect.Proxy false
+Is Structurally modifiable class java.lang.ref.Reference false
+Is Structurally modifiable class java.lang.StackTraceElement false
+Is Structurally modifiable class java.lang.String false
+Is Structurally modifiable class java.lang.Thread false
+Is Structurally modifiable class java.lang.Throwable false
+Is Structurally modifiable class java.lang.invoke.VarHandle false
+Is Structurally modifiable class java.lang.invoke.FieldVarHandle false
+Checking non-mirror'd classes
+Is Structurally modifiable class [Ljava.lang.Object; false
+Is Structurally modifiable class art.Test1983$NoVirtuals true
+Is Structurally modifiable class art.Test1983$WithVirtuals true
+Is Structurally modifiable class art.Test1983$SubWithVirtuals true
diff --git a/test/1983-structural-redefinition-failures/expected.txt b/test/1983-structural-redefinition-failures/expected.txt
new file mode 100644
index 0000000..faa7528
--- /dev/null
+++ b/test/1983-structural-redefinition-failures/expected.txt
@@ -0,0 +1,37 @@
+Checking mirror'd classes
+Is Structurally modifiable class java.lang.reflect.AccessibleObject false
+Is Structurally modifiable class java.lang.invoke.CallSite false
+Is Structurally modifiable class dalvik.system.ClassExt false
+Is Structurally modifiable class java.lang.ClassLoader false
+Is Structurally modifiable class java.lang.Class false
+Is Structurally modifiable class java.lang.reflect.Constructor false
+Is Structurally modifiable class java.lang.DexCache false
+Is Structurally modifiable class dalvik.system.EmulatedStackFrame false
+Is Structurally modifiable class java.lang.reflect.Executable false
+Is Structurally modifiable class java.lang.reflect.Field false
+Is Structurally modifiable class java.lang.ref.FinalizerReference false
+Is Structurally modifiable class java.lang.invoke.MethodHandle false
+Is Structurally modifiable class java.lang.invoke.MethodHandles$Lookup false
+Is Structurally modifiable class java.lang.invoke.MethodType false
+Is Structurally modifiable class java.lang.reflect.Method false
+Is Structurally modifiable class java.lang.Object false
+Is Structurally modifiable class java.lang.reflect.Proxy false
+Is Structurally modifiable class java.lang.ref.Reference false
+Is Structurally modifiable class java.lang.StackTraceElement false
+Is Structurally modifiable class java.lang.String false
+Is Structurally modifiable class java.lang.Thread false
+Is Structurally modifiable class java.lang.Throwable false
+Is Structurally modifiable class java.lang.invoke.VarHandle false
+Is Structurally modifiable class java.lang.invoke.FieldVarHandle false
+Checking non-mirror'd classes
+Is Structurally modifiable class [Ljava.lang.Object; false
+Is Structurally modifiable class art.Test1983$NoVirtuals true
+Is Structurally modifiable class art.Test1983$WithVirtuals true
+Is Structurally modifiable class art.Test1983$SubWithVirtuals true
+Checking non-mirror'd classes (non-cts)
+Is Structurally modifiable class java.util.ArrayList true
+Is Structurally modifiable class java.util.Objects true
+Is Structurally modifiable class java.util.Arrays true
+Is Structurally modifiable class java.lang.Integer true
+Is Structurally modifiable class java.lang.Number true
+Is Structurally modifiable class java.lang.invoke.MethodHandles true
diff --git a/test/1983-structural-redefinition-failures/info.txt b/test/1983-structural-redefinition-failures/info.txt
new file mode 100644
index 0000000..794f8eb
--- /dev/null
+++ b/test/1983-structural-redefinition-failures/info.txt
@@ -0,0 +1 @@
+Sanity check for isStructurallyModifiable.
diff --git a/test/1983-structural-redefinition-failures/run b/test/1983-structural-redefinition-failures/run
new file mode 100755
index 0000000..03e41a5
--- /dev/null
+++ b/test/1983-structural-redefinition-failures/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/1983-structural-redefinition-failures/src/Main.java b/test/1983-structural-redefinition-failures/src/Main.java
new file mode 100644
index 0000000..ebf35f5
--- /dev/null
+++ b/test/1983-structural-redefinition-failures/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1983.runNonCts();
+  }
+}
diff --git a/test/1983-structural-redefinition-failures/src/art/Redefinition.java b/test/1983-structural-redefinition-failures/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1983-structural-redefinition-failures/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1983-structural-redefinition-failures/src/art/Test1983.java b/test/1983-structural-redefinition-failures/src/art/Test1983.java
new file mode 100644
index 0000000..b5f7942
--- /dev/null
+++ b/test/1983-structural-redefinition-failures/src/art/Test1983.java
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.ref.*;
+import java.lang.reflect.*;
+import java.lang.invoke.*;
+import java.util.*;
+
+public class Test1983 {
+  public static void runNonCts() throws Exception {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest();
+    doTestNonCts();
+  }
+  public static void run() throws Exception {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest();
+  }
+
+  public static void Check(Class[] klasses) {
+    for (Class k : klasses) {
+      try {
+        boolean res = Redefinition.isStructurallyModifiable(k);
+        System.out.println("Is Structurally modifiable " + k + " " + res);
+      } catch (Exception e) {
+        System.out.println("Got exception " + e + " during check modifiablity of " + k);
+        e.printStackTrace(System.out);
+      }
+    }
+  }
+
+  public static class WithVirtuals {
+    public Object o;
+    public void foobar() {}
+  }
+  public static class NoVirtuals extends WithVirtuals {
+    public static Object o;
+    public static void foo() {}
+  }
+  public static class SubWithVirtuals extends NoVirtuals {
+    public Object j;
+    public void bar() {}
+  }
+
+  public static void doTest() throws Exception {
+    Class[] mirrord_classes = new Class[] {
+      AccessibleObject.class,
+      CallSite.class,
+      // ClassExt is not on the compile classpath.
+      Class.forName("dalvik.system.ClassExt"),
+      ClassLoader.class,
+      Class.class,
+      Constructor.class,
+      // DexCache is not on the compile classpath
+      Class.forName("java.lang.DexCache"),
+      // EmulatedStackFrame is not on the compile classpath
+      Class.forName("dalvik.system.EmulatedStackFrame"),
+      Executable.class,
+      Field.class,
+      // @hide on CTS
+      Class.forName("java.lang.ref.FinalizerReference"),
+      MethodHandle.class,
+      MethodHandles.Lookup.class,
+      MethodType.class,
+      Method.class,
+      Object.class,
+      Proxy.class,
+      Reference.class,
+      StackTraceElement.class,
+      String.class,
+      Thread.class,
+      Throwable.class,
+      // @hide on CTS
+      Class.forName("java.lang.invoke.VarHandle"),
+      // TODO all the var handle types.
+      // @hide on CTS
+      Class.forName("java.lang.invoke.FieldVarHandle"),
+    };
+    System.out.println("Checking mirror'd classes");
+    Check(mirrord_classes);
+    // The results of some of these will change as we improve structural class redefinition. Any
+    // that are true should always remain so though.
+    Class[] non_mirrord_classes = new Class[] {
+      new Object[0].getClass(),
+      NoVirtuals.class,
+      WithVirtuals.class,
+      SubWithVirtuals.class,
+    };
+    System.out.println("Checking non-mirror'd classes");
+    Check(non_mirrord_classes);
+  }
+
+  public static void doTestNonCts() throws Exception {
+    System.out.println("Checking non-mirror'd classes (non-cts)");
+    Class[] non_mirrord_classes = new Class[] {
+      ArrayList.class,
+      Objects.class,
+      Arrays.class,
+      Integer.class,
+      Number.class,
+      MethodHandles.class,
+    };
+    Check(non_mirrord_classes);
+  }
+}
diff --git a/test/1984-structural-redefine-field-trace/expected.txt b/test/1984-structural-redefine-field-trace/expected.txt
new file mode 100644
index 0000000..6153d7e
--- /dev/null
+++ b/test/1984-structural-redefine-field-trace/expected.txt
@@ -0,0 +1,31 @@
+Dumping fields at start
+public static boolean art.Test1984$Transform.boom=false
+public static int art.Test1984$Transform.count_down=2
+public static boolean art.Test1984$Transform.tock=false
+method: public static void art.Test1984$Transform.tick()	ACCESS: public static boolean art.Test1984$Transform.tock
+method: public static void art.Test1984$Transform.tick()	ACCESS: public static boolean art.Test1984$Transform.tock
+method: public static void art.Test1984$Transform.tick()	MODIFY: public static boolean art.Test1984$Transform.tock	Set to: true
+method: public static void art.Test1984$Transform.tick()	ACCESS: public static int art.Test1984$Transform.count_down
+method: public static void art.Test1984$Transform.tick()	ACCESS: public static boolean art.Test1984$Transform.tock
+method: public static void art.Test1984$Transform.tick()	ACCESS: public static boolean art.Test1984$Transform.tock
+method: public static void art.Test1984$Transform.tick()	MODIFY: public static boolean art.Test1984$Transform.tock	Set to: false
+method: public static void art.Test1984$Transform.tick()	ACCESS: public static int art.Test1984$Transform.count_down
+method: public static void art.Test1984$Transform.tick()	MODIFY: public static int art.Test1984$Transform.count_down	Set to: 1
+method: public static void art.Test1984$Transform.tick()	ACCESS: public static int art.Test1984$Transform.count_down
+REDEFINING TRANSFORM CLASS
+method: public static void art.Test1984$Transform.tick()	ACCESS: public static boolean art.Test1984$Transform.tock
+method: public static void art.Test1984$Transform.tick()	ACCESS: public static boolean art.Test1984$Transform.tock
+method: public static void art.Test1984$Transform.tick()	MODIFY: public static boolean art.Test1984$Transform.tock	Set to: true
+method: public static void art.Test1984$Transform.tick()	ACCESS: public static int art.Test1984$Transform.count_down
+method: public static void art.Test1984$Transform.tick()	ACCESS: public static boolean art.Test1984$Transform.tock
+method: public static void art.Test1984$Transform.tick()	ACCESS: public static boolean art.Test1984$Transform.tock
+method: public static void art.Test1984$Transform.tick()	MODIFY: public static boolean art.Test1984$Transform.tock	Set to: false
+method: public static void art.Test1984$Transform.tick()	ACCESS: public static int art.Test1984$Transform.count_down
+method: public static void art.Test1984$Transform.tick()	MODIFY: public static int art.Test1984$Transform.count_down	Set to: 0
+method: public static void art.Test1984$Transform.tick()	ACCESS: public static int art.Test1984$Transform.count_down
+method: public static void art.Test1984$Transform.tick()	MODIFY: public static boolean art.Test1984$Transform.boom	Set to: true
+Dumping fields at end
+public static int art.Test1984$Transform.aaa_INITIAL=0
+public static boolean art.Test1984$Transform.boom=true
+public static int art.Test1984$Transform.count_down=0
+public static boolean art.Test1984$Transform.tock=false
diff --git a/test/1984-structural-redefine-field-trace/info.txt b/test/1984-structural-redefine-field-trace/info.txt
new file mode 100644
index 0000000..ded28c5
--- /dev/null
+++ b/test/1984-structural-redefine-field-trace/info.txt
@@ -0,0 +1 @@
+Tests field access and modification watches in JVMTI when target is structurally redefined.
diff --git a/test/1984-structural-redefine-field-trace/run b/test/1984-structural-redefine-field-trace/run
new file mode 100755
index 0000000..a36de16
--- /dev/null
+++ b/test/1984-structural-redefine-field-trace/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Ask for stack traces to be dumped to a file rather than to stdout.
+./default-run "$@" --jvmti --android-runtime-option -Xopaque-jni-ids:true
diff --git a/test/1984-structural-redefine-field-trace/src/Main.java b/test/1984-structural-redefine-field-trace/src/Main.java
new file mode 100644
index 0000000..415a85e
--- /dev/null
+++ b/test/1984-structural-redefine-field-trace/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1984.run();
+  }
+}
diff --git a/test/1984-structural-redefine-field-trace/src/art/Redefinition.java b/test/1984-structural-redefine-field-trace/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1984-structural-redefine-field-trace/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1984-structural-redefine-field-trace/src/art/Test1984.java b/test/1984-structural-redefine-field-trace/src/art/Test1984.java
new file mode 100644
index 0000000..a69d56e
--- /dev/null
+++ b/test/1984-structural-redefine-field-trace/src/art/Test1984.java
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Executable;
+import java.lang.reflect.Field;
+import java.util.Base64;
+
+public class Test1984 {
+  public static void notifyFieldModify(
+      Executable method, long location, Class<?> f_klass, Object target, Field f, Object value) {
+    System.out.println("method: " + method + "\tMODIFY: " + f + "\tSet to: " + value);
+  }
+
+  public static void notifyFieldAccess(
+      Executable method, long location, Class<?> f_klass, Object target, Field f) {
+    System.out.println("method: " + method + "\tACCESS: " + f);
+  }
+
+  public static class Transform {
+    public static int count_down = 2;
+    public static boolean boom = false;
+    public static boolean tock = false;
+
+    public static void tick() {
+      boolean tocked = tock;
+      tock = !tock;
+      if (tocked) {
+        count_down--;
+      }
+      if (count_down == 0) {
+        boom = true;
+      }
+    }
+  }
+
+  /* Base64 encoded dex file for.
+   * // NB The addition of aaa_INITIAL means the fields all have different offsets
+   * public static class Transform {
+   *   public static int aaa_INITIAL = 0;
+   *   public static int count_down = 2;
+   *   public static boolean boom = false;
+   *   public static boolean tock = false;
+   *   public static void tick() {
+   *     boolean tocked = tock;
+   *     tock = !tock;
+   *     if (tocked) {
+   *       count_down--;
+   *     }
+   *     if (count_down == 0) {
+   *       boom = true;
+   *     }
+   *   }
+   * }
+   */
+  public static final byte[] REDEFINED_DEX_BYTES =
+      Base64.getDecoder()
+          .decode(
+              "ZGV4CjAzNQDejZufbnVbJEn1/OfB3XmJPtVbudlWkvnsAwAAcAAAAHhWNBIAAAAAAAAAADQDAAAU"
+                  + "AAAAcAAAAAgAAADAAAAAAQAAAOAAAAAEAAAA7AAAAAQAAAAMAQAAAQAAACwBAACgAgAATAEAAPAB"
+                  + "AAD6AQAAAgIAAAUCAAAfAgAALwIAAFMCAABzAgAAhwIAAJYCAAChAgAApAIAAKcCAAC0AgAAwQIA"
+                  + "AMcCAADTAgAA2QIAAN8CAADlAgAAAgAAAAMAAAAEAAAABQAAAAYAAAAHAAAACgAAAAsAAAAKAAAA"
+                  + "BgAAAAAAAAABAAAADAAAAAEABwAOAAAAAQAAAA8AAAABAAcAEgAAAAEAAAAAAAAAAQAAAAEAAAAB"
+                  + "AAAAEQAAAAUAAAABAAAAAQAAAAEAAAAFAAAAAAAAAAgAAADgAQAAFgMAAAAAAAACAAAABwMAAA0D"
+                  + "AAACAAAAAAAAAOwCAAALAAAAEgFnAQAAEiBnAAIAagEBAGoBAwAOAAAAAQABAAEAAAD0AgAABAAA"
+                  + "AHAQAwAAAA4AAwAAAAAAAAD5AgAAGwAAABIRYwIDAGMAAwA5ABQAARBqAAMAOAIIAGAAAgDYAAD/"
+                  + "ZwACAGAAAgA5AAQAagEBAA4AEgAo7gAATAEAAAAAAAAAAAAAAAAAAAg8Y2xpbml0PgAGPGluaXQ+"
+                  + "AAFJABhMYXJ0L1Rlc3QxOTg0JFRyYW5zZm9ybTsADkxhcnQvVGVzdDE5ODQ7ACJMZGFsdmlrL2Fu"
+                  + "bm90YXRpb24vRW5jbG9zaW5nQ2xhc3M7AB5MZGFsdmlrL2Fubm90YXRpb24vSW5uZXJDbGFzczsA"
+                  + "EkxqYXZhL2xhbmcvT2JqZWN0OwANVGVzdDE5ODQuamF2YQAJVHJhbnNmb3JtAAFWAAFaAAthYWFf"
+                  + "SU5JVElBTAALYWNjZXNzRmxhZ3MABGJvb20ACmNvdW50X2Rvd24ABG5hbWUABHRpY2sABHRvY2sA"
+                  + "BXZhbHVlAAgABx0tPC0ABwAHDgANAAcdLXgtaksuAnkdAAIDARMYAgIEAg0ECRAXCQQAAwAACQEJ"
+                  + "AQkBCQCIgATYAgGBgASAAwEJmAMAAA8AAAAAAAAAAQAAAAAAAAABAAAAFAAAAHAAAAACAAAACAAA"
+                  + "AMAAAAADAAAAAQAAAOAAAAAEAAAABAAAAOwAAAAFAAAABAAAAAwBAAAGAAAAAQAAACwBAAADEAAA"
+                  + "AQAAAEwBAAABIAAAAwAAAFgBAAAGIAAAAQAAAOABAAACIAAAFAAAAPABAAADIAAAAwAAAOwCAAAE"
+                  + "IAAAAgAAAAcDAAAAIAAAAQAAABYDAAAAEAAAAQAAADQDAAA=");
+
+  public static void run() throws Exception {
+    System.out.println("Dumping fields at start");
+    for (Field f : Transform.class.getDeclaredFields()) {
+      System.out.println(f.toString() + "=" + f.get(null));
+    }
+    Trace.disableTracing(Thread.currentThread());
+    Trace.enableFieldTracing(
+        Test1984.class,
+        Test1984.class.getDeclaredMethod(
+            "notifyFieldAccess",
+            Executable.class,
+            Long.TYPE,
+            Class.class,
+            Object.class,
+            Field.class),
+        Test1984.class.getDeclaredMethod(
+            "notifyFieldModify",
+            Executable.class,
+            Long.TYPE,
+            Class.class,
+            Object.class,
+            Field.class,
+            Object.class),
+        Thread.currentThread());
+    for (Field f : Transform.class.getDeclaredFields()) {
+      Trace.watchFieldAccess(f);
+      Trace.watchFieldModification(f);
+    }
+    // count_down = 2
+    Transform.tick(); // count_down = 2
+    Transform.tick(); // count_down = 1
+    System.out.println("REDEFINING TRANSFORM CLASS");
+    Redefinition.doCommonStructuralClassRedefinition(Transform.class, REDEFINED_DEX_BYTES);
+    Transform.tick(); // count_down = 1
+    Transform.tick(); // count_down = 0
+    System.out.println("Dumping fields at end");
+    for (Field f : Transform.class.getDeclaredFields()) {
+      System.out.println(f.toString() + "=" + f.get(null));
+    }
+    // Turn off tracing so we don't have to deal with print internals.
+    Trace.disableTracing(Thread.currentThread());
+  }
+}
diff --git a/test/1984-structural-redefine-field-trace/src/art/Trace.java b/test/1984-structural-redefine-field-trace/src/art/Trace.java
new file mode 120000
index 0000000..5d9b44b
--- /dev/null
+++ b/test/1984-structural-redefine-field-trace/src/art/Trace.java
@@ -0,0 +1 @@
+../../../jvmti-common/Trace.java
\ No newline at end of file
diff --git a/test/1985-structural-redefine-stack-scope/expected.txt b/test/1985-structural-redefine-stack-scope/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/1985-structural-redefine-stack-scope/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/1985-structural-redefine-stack-scope/info.txt b/test/1985-structural-redefine-stack-scope/info.txt
new file mode 100644
index 0000000..a108013
--- /dev/null
+++ b/test/1985-structural-redefine-stack-scope/info.txt
@@ -0,0 +1 @@
+Tests StackReflectiveHandleScope.
diff --git a/test/1985-structural-redefine-stack-scope/run b/test/1985-structural-redefine-stack-scope/run
new file mode 100755
index 0000000..a36de16
--- /dev/null
+++ b/test/1985-structural-redefine-stack-scope/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Ask for stack traces to be dumped to a file rather than to stdout.
+./default-run "$@" --jvmti --android-runtime-option -Xopaque-jni-ids:true
diff --git a/test/1985-structural-redefine-stack-scope/src/Main.java b/test/1985-structural-redefine-stack-scope/src/Main.java
new file mode 100644
index 0000000..cb86c3d
--- /dev/null
+++ b/test/1985-structural-redefine-stack-scope/src/Main.java
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import art.Redefinition;
+import java.lang.invoke.*;
+import java.lang.reflect.Field;
+import java.util.Base64;
+
+public class Main {
+  public static final class Transform {
+    static {
+    }
+
+    public static Object foo = null;
+  }
+
+  /**
+   * Base64 encoded dex bytes for:
+   * public static final class Transform {
+   *   static {}
+   *   public static Object bar = null;
+   *   public static Object foo = null;
+   * }
+   */
+  public static final byte[] DEX_BYTES =
+      Base64.getDecoder()
+          .decode(
+              "ZGV4CjAzNQCjkRjcSr1RJO8FnnCjHV/8h6keJP/+P3WQAwAAcAAAAHhWNBIAAAAAAAAAANgCAAAQ"
+                  + "AAAAcAAAAAYAAACwAAAAAQAAAMgAAAACAAAA1AAAAAMAAADkAAAAAQAAAPwAAAB0AgAAHAEAAFwB"
+                  + "AABmAQAAbgEAAIABAACIAQAArAEAAMwBAADgAQAA6wEAAPYBAAD5AQAABgIAAAsCAAAQAgAAFgIA"
+                  + "AB0CAAACAAAAAwAAAAQAAAAFAAAABgAAAAkAAAAJAAAABQAAAAAAAAAAAAQACwAAAAAABAAMAAAA"
+                  + "AAAAAAAAAAAAAAAAAQAAAAQAAAABAAAAAAAAABEAAAAEAAAAAAAAAAcAAADIAgAApAIAAAAAAAAB"
+                  + "AAAAAAAAAFABAAAGAAAAEgBpAAAAaQABAA4AAQABAAEAAABVAQAABAAAAHAQAgAAAA4ABwAOPAAF"
+                  + "AA4AAAAACDxjbGluaXQ+AAY8aW5pdD4AEExNYWluJFRyYW5zZm9ybTsABkxNYWluOwAiTGRhbHZp"
+                  + "ay9hbm5vdGF0aW9uL0VuY2xvc2luZ0NsYXNzOwAeTGRhbHZpay9hbm5vdGF0aW9uL0lubmVyQ2xh"
+                  + "c3M7ABJMamF2YS9sYW5nL09iamVjdDsACU1haW4uamF2YQAJVHJhbnNmb3JtAAFWAAthY2Nlc3NG"
+                  + "bGFncwADYmFyAANmb28ABG5hbWUABXZhbHVlAHZ+fkQ4eyJjb21waWxhdGlvbi1tb2RlIjoiZGVi"
+                  + "dWciLCJtaW4tYXBpIjoxLCJzaGEtMSI6IjI4YmNlZjUwYWM4NTk3Y2YyMmU4OTJiMWJjM2EzYjky"
+                  + "Yjc0ZTcwZTkiLCJ2ZXJzaW9uIjoiMS42LjMyLWRldiJ9AAICAQ4YAQIDAgoEGQ0XCAIAAgAACQEJ"
+                  + "AIiABJwCAYGABLgCAAAAAAIAAACVAgAAmwIAALwCAAAAAAAAAAAAAAAAAAAPAAAAAAAAAAEAAAAA"
+                  + "AAAAAQAAABAAAABwAAAAAgAAAAYAAACwAAAAAwAAAAEAAADIAAAABAAAAAIAAADUAAAABQAAAAMA"
+                  + "AADkAAAABgAAAAEAAAD8AAAAASAAAAIAAAAcAQAAAyAAAAIAAABQAQAAAiAAABAAAABcAQAABCAA"
+                  + "AAIAAACVAgAAACAAAAEAAACkAgAAAxAAAAIAAAC4AgAABiAAAAEAAADIAgAAABAAAAEAAADYAgAA");
+
+  public static void assertEquals(Object a, Object b) {
+    if (a != b) {
+      throw new Error("Expected " + b + ", got " + a);
+    }
+  }
+
+  public static void main(String[] args) throws Exception, Throwable {
+    System.loadLibrary(args[0]);
+    Field f = Transform.class.getDeclaredField("foo");
+    Transform.foo = "THIS IS A FOO VALUE";
+    assertEquals(f.get(null), Transform.foo);
+    MethodHandle j =
+        NativeFieldScopeCheck(
+            f,
+            () -> {
+              Redefinition.doCommonStructuralClassRedefinition(Transform.class, DEX_BYTES);
+            });
+    assertEquals(j.invokeExact(), Transform.foo);
+  }
+
+  // Hold the field as a ArtField, run the 'test' function, turn the ArtField into a MethodHandle
+  // directly and return that.
+  public static native MethodHandle NativeFieldScopeCheck(Field in, Runnable test);
+}
diff --git a/test/1985-structural-redefine-stack-scope/src/art/Redefinition.java b/test/1985-structural-redefine-stack-scope/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1985-structural-redefine-stack-scope/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1985-structural-redefine-stack-scope/stack_scope.cc b/test/1985-structural-redefine-stack-scope/stack_scope.cc
new file mode 100644
index 0000000..5c5215b
--- /dev/null
+++ b/test/1985-structural-redefine-stack-scope/stack_scope.cc
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstdio>
+#include <memory>
+#include <mutex>
+#include <string>
+#include <vector>
+
+#include "class_linker.h"
+#include "class_root.h"
+#include "jni.h"
+#include "jni/jni_internal.h"
+#include "mirror/class.h"
+#include "mirror/method_handle_impl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-alloc-inl.h"
+#include "reflection.h"
+#include "reflective_handle.h"
+#include "reflective_handle_scope-inl.h"
+#include "runtime.h"
+#include "scoped_thread_state_change-inl.h"
+#include "thread-inl.h"
+
+namespace art {
+namespace Test1985StructuralRedefineStackScope {
+
+extern "C" JNICALL jobject JNIEXPORT Java_Main_NativeFieldScopeCheck(JNIEnv* env,
+                                                                     jclass,
+                                                                     jobject field,
+                                                                     jobject runnable) {
+  jfieldID fid = env->FromReflectedField(field);
+  jclass runnable_klass = env->FindClass("java/lang/Runnable");
+  jmethodID run = env->GetMethodID(runnable_klass, "run", "()V");
+  ScopedObjectAccess soa(Thread::Current());
+  StackHandleScope<4> hs(soa.Self());
+  StackArtFieldHandleScope<1> fhs(soa.Self());
+  StackArtFieldHandleScope<1> bhs(soa.Self());
+  ReflectiveHandle<ArtField> rf(fhs.NewHandle(jni::DecodeArtField(fid)));
+  ReflectiveHandle<ArtField> bf(bhs.NewHandle(jni::DecodeArtField(fid)));
+  ArtField* pre_ptr = rf.Get();
+  {
+    ScopedThreadSuspension sts(soa.Self(), ThreadState::kNative);
+    // Upcall to perform redefinition.
+    env->CallVoidMethod(runnable, run);
+  }
+  Handle<mirror::ObjectArray<mirror::Class>> mt_arr(
+      hs.NewHandle(mirror::ObjectArray<mirror::Class>::Alloc(
+          soa.Self(),
+          Runtime::Current()->GetClassLinker()->FindArrayClass(soa.Self(),
+                                                               GetClassRoot<mirror::Class>()),
+          0)));
+  Handle<mirror::MethodType> mt(hs.NewHandle(mirror::MethodType::Create(
+      soa.Self(), hs.NewHandle(GetClassRoot<mirror::Object>()), mt_arr)));
+  Handle<mirror::MethodHandleImpl> mhi(hs.NewHandle(
+      mirror::MethodHandleImpl::Create(soa.Self(),
+                                       reinterpret_cast<uintptr_t>(rf.Get()),
+                                       (rf->IsStatic() ? mirror::MethodHandle::Kind::kStaticGet
+                                                       : mirror::MethodHandle::Kind::kInstanceGet),
+                                       mt)));
+  CHECK_EQ(rf.Get(), bf.Get()) << "rf: " << rf->PrettyField() << " bf: " << bf->PrettyField();
+  // TODO Modify this to work for when run doesn't cause a change.
+  CHECK_NE(pre_ptr, rf.Get()) << "pre_ptr: " << pre_ptr->PrettyField()
+                              << " rf: " << rf->PrettyField();
+  CHECK_EQ(fid, jni::EncodeArtField(rf));
+  return soa.AddLocalReference<jobject>(mhi.Get());
+}
+
+}  // namespace Test1985StructuralRedefineStackScope
+}  // namespace art
diff --git a/test/1986-structural-redefine-multi-thread-stack-scope/expected.txt b/test/1986-structural-redefine-multi-thread-stack-scope/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/1986-structural-redefine-multi-thread-stack-scope/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/1986-structural-redefine-multi-thread-stack-scope/info.txt b/test/1986-structural-redefine-multi-thread-stack-scope/info.txt
new file mode 100644
index 0000000..184bd72
--- /dev/null
+++ b/test/1986-structural-redefine-multi-thread-stack-scope/info.txt
@@ -0,0 +1 @@
+Tests StackReflectiveHandleScope works when there are several all in different threads.
diff --git a/test/1986-structural-redefine-multi-thread-stack-scope/run b/test/1986-structural-redefine-multi-thread-stack-scope/run
new file mode 100755
index 0000000..a36de16
--- /dev/null
+++ b/test/1986-structural-redefine-multi-thread-stack-scope/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Ask for stack traces to be dumped to a file rather than to stdout.
+./default-run "$@" --jvmti --android-runtime-option -Xopaque-jni-ids:true
diff --git a/test/1986-structural-redefine-multi-thread-stack-scope/src/Main.java b/test/1986-structural-redefine-multi-thread-stack-scope/src/Main.java
new file mode 100644
index 0000000..23b1656
--- /dev/null
+++ b/test/1986-structural-redefine-multi-thread-stack-scope/src/Main.java
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import art.Redefinition;
+import java.lang.invoke.*;
+import java.lang.reflect.Field;
+import java.util.Base64;
+import java.util.concurrent.CountDownLatch;
+
+public class Main {
+  public static final class Transform {
+    static {
+    }
+
+    public static Object foo = null;
+  }
+
+  /**
+   * Base64 encoded dex bytes for:
+   *
+   * public static final class Transform {
+   *   static {}
+   *   public static Object bar = null;
+   *   public static Object foo = null;
+   * }
+   */
+  public static final byte[] DEX_BYTES =
+      Base64.getDecoder()
+          .decode(
+              "ZGV4CjAzNQCjkRjcSr1RJO8FnnCjHV/8h6keJP/+P3WQAwAAcAAAAHhWNBIAAAAAAAAAANgCAAAQ"
+                  + "AAAAcAAAAAYAAACwAAAAAQAAAMgAAAACAAAA1AAAAAMAAADkAAAAAQAAAPwAAAB0AgAAHAEAAFwB"
+                  + "AABmAQAAbgEAAIABAACIAQAArAEAAMwBAADgAQAA6wEAAPYBAAD5AQAABgIAAAsCAAAQAgAAFgIA"
+                  + "AB0CAAACAAAAAwAAAAQAAAAFAAAABgAAAAkAAAAJAAAABQAAAAAAAAAAAAQACwAAAAAABAAMAAAA"
+                  + "AAAAAAAAAAAAAAAAAQAAAAQAAAABAAAAAAAAABEAAAAEAAAAAAAAAAcAAADIAgAApAIAAAAAAAAB"
+                  + "AAAAAAAAAFABAAAGAAAAEgBpAAAAaQABAA4AAQABAAEAAABVAQAABAAAAHAQAgAAAA4ABwAOPAAF"
+                  + "AA4AAAAACDxjbGluaXQ+AAY8aW5pdD4AEExNYWluJFRyYW5zZm9ybTsABkxNYWluOwAiTGRhbHZp"
+                  + "ay9hbm5vdGF0aW9uL0VuY2xvc2luZ0NsYXNzOwAeTGRhbHZpay9hbm5vdGF0aW9uL0lubmVyQ2xh"
+                  + "c3M7ABJMamF2YS9sYW5nL09iamVjdDsACU1haW4uamF2YQAJVHJhbnNmb3JtAAFWAAthY2Nlc3NG"
+                  + "bGFncwADYmFyAANmb28ABG5hbWUABXZhbHVlAHZ+fkQ4eyJjb21waWxhdGlvbi1tb2RlIjoiZGVi"
+                  + "dWciLCJtaW4tYXBpIjoxLCJzaGEtMSI6IjI4YmNlZjUwYWM4NTk3Y2YyMmU4OTJiMWJjM2EzYjky"
+                  + "Yjc0ZTcwZTkiLCJ2ZXJzaW9uIjoiMS42LjMyLWRldiJ9AAICAQ4YAQIDAgoEGQ0XCAIAAgAACQEJ"
+                  + "AIiABJwCAYGABLgCAAAAAAIAAACVAgAAmwIAALwCAAAAAAAAAAAAAAAAAAAPAAAAAAAAAAEAAAAA"
+                  + "AAAAAQAAABAAAABwAAAAAgAAAAYAAACwAAAAAwAAAAEAAADIAAAABAAAAAIAAADUAAAABQAAAAMA"
+                  + "AADkAAAABgAAAAEAAAD8AAAAASAAAAIAAAAcAQAAAyAAAAIAAABQAQAAAiAAABAAAABcAQAABCAA"
+                  + "AAIAAACVAgAAACAAAAEAAACkAgAAAxAAAAIAAAC4AgAABiAAAAEAAADIAgAAABAAAAEAAADYAgAA");
+
+  public static void assertEquals(Object a, Object b) {
+    if (a != b) {
+      throw new Error("Expected " + b + ", got " + a);
+    }
+  }
+
+  public static void assertAllEquals(Object[] a, Object b) {
+    boolean failed = false;
+    String msg = "";
+    for (int i = 0; i < a.length; i++) {
+      if (a[i] != b) {
+        failed = true;
+        msg += "Expected " + b + ", got a[" + i + "] (" + a[i] + "), ";
+      }
+    }
+    if (failed) {
+      throw new Error(msg);
+    }
+  }
+
+  public static void main(String[] args) throws Exception, Throwable {
+    System.loadLibrary(args[0]);
+    Field f = Transform.class.getDeclaredField("foo");
+    Transform.foo = "THIS IS A FOO VALUE";
+    assertEquals(f.get(null), Transform.foo);
+    final int num_threads = 10;
+    Object[] results = new Object[num_threads];
+    Thread[] threads = new Thread[num_threads];
+    CountDownLatch start_latch = new CountDownLatch(num_threads);
+    CountDownLatch continue_latch = new CountDownLatch(1);
+    for (int i = 0; i < num_threads; i++) {
+      final int id = i;
+      threads[id] =
+          new Thread(
+              () -> {
+                try {
+                  MethodHandle mh =
+                      NativeFieldScopeCheck(
+                          f,
+                          () -> {
+                            try {
+                              start_latch.countDown();
+                              continue_latch.await();
+                            } catch (Exception e) {
+                              throw new Error("failed!", e);
+                            }
+                          });
+                  results[id] = mh.invokeExact();
+                } catch (Throwable t) {
+                  throw new Error("Failed", t);
+                }
+              },
+              "Target thread " + id);
+      threads[id].start();
+    }
+    start_latch.await();
+    Redefinition.doCommonStructuralClassRedefinition(Transform.class, DEX_BYTES);
+    continue_latch.countDown();
+    for (Thread t : threads) {
+      t.join();
+    }
+    assertAllEquals(results, Transform.foo);
+  }
+
+  // Hold the field as a ArtField, run the 'test' function, turn the ArtField into a MethodHandle
+  // directly and return that.
+  public static native MethodHandle NativeFieldScopeCheck(Field in, Runnable test);
+}
diff --git a/test/1986-structural-redefine-multi-thread-stack-scope/src/art/Redefinition.java b/test/1986-structural-redefine-multi-thread-stack-scope/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1986-structural-redefine-multi-thread-stack-scope/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1987-structural-redefine-recursive-stack-scope/expected.txt b/test/1987-structural-redefine-recursive-stack-scope/expected.txt
new file mode 100644
index 0000000..288bed8
--- /dev/null
+++ b/test/1987-structural-redefine-recursive-stack-scope/expected.txt
@@ -0,0 +1,12 @@
+JNI_OnLoad called
+Foo value is THIS IS A FOO VALUE
+Result at depth 0: THIS IS A FOO VALUE
+Result at depth 1: THIS IS A FOO VALUE
+Result at depth 2: THIS IS A FOO VALUE
+Result at depth 3: THIS IS A FOO VALUE
+Result at depth 4: THIS IS A FOO VALUE
+Result at depth 5: THIS IS A FOO VALUE
+Result at depth 6: THIS IS A FOO VALUE
+Result at depth 7: THIS IS A FOO VALUE
+Result at depth 8: THIS IS A FOO VALUE
+Result at depth 9: THIS IS A FOO VALUE
diff --git a/test/1987-structural-redefine-recursive-stack-scope/info.txt b/test/1987-structural-redefine-recursive-stack-scope/info.txt
new file mode 100644
index 0000000..f841897
--- /dev/null
+++ b/test/1987-structural-redefine-recursive-stack-scope/info.txt
@@ -0,0 +1 @@
+Tests StackReflectiveHandleScope works when there are several all in different recursive frames.
diff --git a/test/1987-structural-redefine-recursive-stack-scope/run b/test/1987-structural-redefine-recursive-stack-scope/run
new file mode 100755
index 0000000..a36de16
--- /dev/null
+++ b/test/1987-structural-redefine-recursive-stack-scope/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Ask for stack traces to be dumped to a file rather than to stdout.
+./default-run "$@" --jvmti --android-runtime-option -Xopaque-jni-ids:true
diff --git a/test/1987-structural-redefine-recursive-stack-scope/src/Main.java b/test/1987-structural-redefine-recursive-stack-scope/src/Main.java
new file mode 100644
index 0000000..2c53057
--- /dev/null
+++ b/test/1987-structural-redefine-recursive-stack-scope/src/Main.java
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import art.Redefinition;
+import java.lang.invoke.*;
+import java.lang.reflect.Field;
+import java.util.Base64;
+
+public class Main {
+  public static final class Transform {
+    static {
+    }
+
+    public static Object foo = null;
+  }
+
+  /* Base64 encoded dex bytes for:
+   *
+   * public static final class Transform {
+   *   static {}
+   *   public static Object bar = null;
+   *   public static Object foo = null;
+   * }
+   */
+  public static final byte[] DEX_BYTES =
+      Base64.getDecoder()
+          .decode(
+              "ZGV4CjAzNQCjkRjcSr1RJO8FnnCjHV/8h6keJP/+P3WQAwAAcAAAAHhWNBIAAAAAAAAAANgCAAAQ"
+                  + "AAAAcAAAAAYAAACwAAAAAQAAAMgAAAACAAAA1AAAAAMAAADkAAAAAQAAAPwAAAB0AgAAHAEAAFwB"
+                  + "AABmAQAAbgEAAIABAACIAQAArAEAAMwBAADgAQAA6wEAAPYBAAD5AQAABgIAAAsCAAAQAgAAFgIA"
+                  + "AB0CAAACAAAAAwAAAAQAAAAFAAAABgAAAAkAAAAJAAAABQAAAAAAAAAAAAQACwAAAAAABAAMAAAA"
+                  + "AAAAAAAAAAAAAAAAAQAAAAQAAAABAAAAAAAAABEAAAAEAAAAAAAAAAcAAADIAgAApAIAAAAAAAAB"
+                  + "AAAAAAAAAFABAAAGAAAAEgBpAAAAaQABAA4AAQABAAEAAABVAQAABAAAAHAQAgAAAA4ABwAOPAAF"
+                  + "AA4AAAAACDxjbGluaXQ+AAY8aW5pdD4AEExNYWluJFRyYW5zZm9ybTsABkxNYWluOwAiTGRhbHZp"
+                  + "ay9hbm5vdGF0aW9uL0VuY2xvc2luZ0NsYXNzOwAeTGRhbHZpay9hbm5vdGF0aW9uL0lubmVyQ2xh"
+                  + "c3M7ABJMamF2YS9sYW5nL09iamVjdDsACU1haW4uamF2YQAJVHJhbnNmb3JtAAFWAAthY2Nlc3NG"
+                  + "bGFncwADYmFyAANmb28ABG5hbWUABXZhbHVlAHZ+fkQ4eyJjb21waWxhdGlvbi1tb2RlIjoiZGVi"
+                  + "dWciLCJtaW4tYXBpIjoxLCJzaGEtMSI6IjI4YmNlZjUwYWM4NTk3Y2YyMmU4OTJiMWJjM2EzYjky"
+                  + "Yjc0ZTcwZTkiLCJ2ZXJzaW9uIjoiMS42LjMyLWRldiJ9AAICAQ4YAQIDAgoEGQ0XCAIAAgAACQEJ"
+                  + "AIiABJwCAYGABLgCAAAAAAIAAACVAgAAmwIAALwCAAAAAAAAAAAAAAAAAAAPAAAAAAAAAAEAAAAA"
+                  + "AAAAAQAAABAAAABwAAAAAgAAAAYAAACwAAAAAwAAAAEAAADIAAAABAAAAAIAAADUAAAABQAAAAMA"
+                  + "AADkAAAABgAAAAEAAAD8AAAAASAAAAIAAAAcAQAAAyAAAAIAAABQAQAAAiAAABAAAABcAQAABCAA"
+                  + "AAIAAACVAgAAACAAAAEAAACkAgAAAxAAAAIAAAC4AgAABiAAAAEAAADIAgAAABAAAAEAAADYAgAA");
+
+  public static void main(String[] args) throws Exception, Throwable {
+    System.loadLibrary(args[0]);
+    Field f = Transform.class.getDeclaredField("foo");
+    Transform.foo = "THIS IS A FOO VALUE";
+    System.out.println("Foo value is " + f.get(null));
+    final int max_depth = 10;
+    Object[] results = new Object[max_depth];
+    Runnable res =
+        () -> {
+          Redefinition.doCommonStructuralClassRedefinition(Transform.class, DEX_BYTES);
+        };
+    for (int i = 0; i < max_depth; i++) {
+      final Runnable next = res;
+      final int id = i;
+      res =
+          () -> {
+            try {
+              results[id] = NativeFieldScopeCheck(f, next).invokeExact();
+            } catch (Throwable t) {
+              throw new Error("Failed!", t);
+            }
+          };
+    }
+    res.run();
+    for (int i = 0; i < max_depth; i++) {
+      System.out.println("Result at depth " + i + ": " + results[i]);
+    }
+  }
+
+  // Hold the field as a ArtField, run the 'test' function, turn the ArtField into a MethodHandle
+  // directly and return that.
+  public static native MethodHandle NativeFieldScopeCheck(Field in, Runnable test);
+}
diff --git a/test/1987-structural-redefine-recursive-stack-scope/src/art/Redefinition.java b/test/1987-structural-redefine-recursive-stack-scope/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1987-structural-redefine-recursive-stack-scope/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1988-multi-structural-redefine/expected.txt b/test/1988-multi-structural-redefine/expected.txt
new file mode 100644
index 0000000..00aea88
--- /dev/null
+++ b/test/1988-multi-structural-redefine/expected.txt
@@ -0,0 +1,5 @@
+hello - Transform 1
+hello - Transform 2
+Redefining both class art.Test1988$Transform1 and class art.Test1988$Transform2 to use each other.
+Transform1 says hi and Transform2 says bye!
+Transform2 says hi and Transform1 says bye!
diff --git a/test/1988-multi-structural-redefine/info.txt b/test/1988-multi-structural-redefine/info.txt
new file mode 100644
index 0000000..875a5f6
--- /dev/null
+++ b/test/1988-multi-structural-redefine/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/1988-multi-structural-redefine/run b/test/1988-multi-structural-redefine/run
new file mode 100755
index 0000000..a36de16
--- /dev/null
+++ b/test/1988-multi-structural-redefine/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Ask for stack traces to be dumped to a file rather than to stdout.
+./default-run "$@" --jvmti --android-runtime-option -Xopaque-jni-ids:true
diff --git a/test/1988-multi-structural-redefine/src/Main.java b/test/1988-multi-structural-redefine/src/Main.java
new file mode 100644
index 0000000..7e95671
--- /dev/null
+++ b/test/1988-multi-structural-redefine/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1988.run();
+  }
+}
diff --git a/test/1988-multi-structural-redefine/src/art/Redefinition.java b/test/1988-multi-structural-redefine/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1988-multi-structural-redefine/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1988-multi-structural-redefine/src/art/Test1988.java b/test/1988-multi-structural-redefine/src/art/Test1988.java
new file mode 100644
index 0000000..6dab4da
--- /dev/null
+++ b/test/1988-multi-structural-redefine/src/art/Test1988.java
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Base64;
+
+public class Test1988 {
+  static class Transform1 {
+    public static void sayHi() {
+      System.out.println("hello - Transform 1");
+    }
+  }
+  static class Transform2 {
+    public static void sayHi() {
+      System.out.println("hello - Transform 2");
+    }
+  }
+
+  /** Base64 encoded dex file for
+   *
+   * static class Trasnform1 {
+   *   public static void sayHi() {
+   *     System.out.println("Transform1 says hi and " + Transform2.getBye());
+   *   }
+   *   public static String getBye() {
+   *     return "Transform1 says bye!";
+   *   }
+   * }
+   */
+  public static final byte[] T1_BYTES = Base64.getDecoder().decode(
+    "ZGV4CjAzNQAU4pPI4BKgrMtz7s1Ogc8in1PQhazaRWBcBQAAcAAAAHhWNBIAAAAAAAAAAJgEAAAd" +
+    "AAAAcAAAAAsAAADkAAAABAAAABABAAABAAAAQAEAAAkAAABIAQAAAQAAAJABAACsAwAAsAEAAD4C" +
+    "AABGAgAASQIAAE0CAABoAgAAgwIAAJMCAAC3AgAA1wIAAO4CAAACAwAAFgMAADEDAABFAwAAVAMA" +
+    "AGADAAB2AwAAjwMAAJIDAACWAwAAowMAAKsDAACzAwAAuQMAAL4DAADHAwAAzgMAANgDAADfAwAA" +
+    "AwAAAAQAAAAFAAAABgAAAAcAAAAIAAAACQAAAAoAAAALAAAADAAAABEAAAABAAAABwAAAAAAAAAC" +
+    "AAAACAAAADgCAAARAAAACgAAAAAAAAASAAAACgAAADgCAAAJAAUAFwAAAAAAAgAAAAAAAAAAABUA" +
+    "AAAAAAIAGQAAAAEAAAAVAAAABQADABgAAAAGAAIAAAAAAAgAAgAAAAAACAABABQAAAAIAAAAGgAA" +
+    "AAAAAAAAAAAABgAAAAAAAAANAAAAiAQAAGUEAAAAAAAAAQAAAAAAAAAqAgAAAwAAABoADwARAAAA" +
+    "AQABAAEAAAAmAgAABAAAAHAQBQAAAA4ABAAAAAIAAAAuAgAAGwAAAGIAAABxAAMAAAAMASICCABw" +
+    "EAYAAgAaAxAAbiAHADIAbiAHABIAbhAIAAIADAFuIAQAEAAOAAYADgALAA4ACAAOARoPAAAAAAEA" +
+    "AAAHAAY8aW5pdD4AAUwAAkxMABlMYXJ0L1Rlc3QxOTg4JFRyYW5zZm9ybTE7ABlMYXJ0L1Rlc3Qx" +
+    "OTg4JFRyYW5zZm9ybTI7AA5MYXJ0L1Rlc3QxOTg4OwAiTGRhbHZpay9hbm5vdGF0aW9uL0VuY2xv" +
+    "c2luZ0NsYXNzOwAeTGRhbHZpay9hbm5vdGF0aW9uL0lubmVyQ2xhc3M7ABVMamF2YS9pby9Qcmlu" +
+    "dFN0cmVhbTsAEkxqYXZhL2xhbmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7ABlMamF2YS9s" +
+    "YW5nL1N0cmluZ0J1aWxkZXI7ABJMamF2YS9sYW5nL1N5c3RlbTsADVRlc3QxOTg4LmphdmEAClRy" +
+    "YW5zZm9ybTEAFFRyYW5zZm9ybTEgc2F5cyBieWUhABdUcmFuc2Zvcm0xIHNheXMgaGkgYW5kIAAB" +
+    "VgACVkwAC2FjY2Vzc0ZsYWdzAAZhcHBlbmQABmdldEJ5ZQAEbmFtZQADb3V0AAdwcmludGxuAAVz" +
+    "YXlIaQAIdG9TdHJpbmcABXZhbHVlAHV+fkQ4eyJjb21waWxhdGlvbi1tb2RlIjoiZGVidWciLCJt" +
+    "aW4tYXBpIjoxLCJzaGEtMSI6IjY4NjQ4NTU3NTM0MDJiYmFjODk2Nzc2YjAzN2RlYmJjOTM4YzQ5" +
+    "NTMiLCJ2ZXJzaW9uIjoiMS43LjYtZGV2In0AAgMBGxgCAgQCEwQIFhcOAAADAACAgATIAwEJsAMB" +
+    "CeADAAAAAAACAAAAVgQAAFwEAAB8BAAAAAAAAAAAAAAAAAAAEAAAAAAAAAABAAAAAAAAAAEAAAAd" +
+    "AAAAcAAAAAIAAAALAAAA5AAAAAMAAAAEAAAAEAEAAAQAAAABAAAAQAEAAAUAAAAJAAAASAEAAAYA" +
+    "AAABAAAAkAEAAAEgAAADAAAAsAEAAAMgAAADAAAAJgIAAAEQAAABAAAAOAIAAAIgAAAdAAAAPgIA" +
+    "AAQgAAACAAAAVgQAAAAgAAABAAAAZQQAAAMQAAACAAAAeAQAAAYgAAABAAAAiAQAAAAQAAABAAAA" +
+    "mAQAAA==");
+
+
+  /** Base64 encoded dex file for
+   *
+   * static class Trasnform2 {
+   *   public static void sayHi() {
+   *     System.out.println("Transform2 says hi and " + Transform1.getBye());
+   *   }
+   *   public static String getBye() {
+   *     return "Transform2 says bye!";
+   *   }
+   * }
+   */
+  public static final byte[] T2_BYTES = Base64.getDecoder().decode(
+    "ZGV4CjAzNQD94cwR+R7Yw7VMom5CwuQd5mZlsV2xrVFcBQAAcAAAAHhWNBIAAAAAAAAAAJgEAAAd" +
+    "AAAAcAAAAAsAAADkAAAABAAAABABAAABAAAAQAEAAAkAAABIAQAAAQAAAJABAACsAwAAsAEAAD4C" +
+    "AABGAgAASQIAAE0CAABoAgAAgwIAAJMCAAC3AgAA1wIAAO4CAAACAwAAFgMAADEDAABFAwAAVAMA" +
+    "AGADAAB2AwAAjwMAAJIDAACWAwAAowMAAKsDAACzAwAAuQMAAL4DAADHAwAAzgMAANgDAADfAwAA" +
+    "AwAAAAQAAAAFAAAABgAAAAcAAAAIAAAACQAAAAoAAAALAAAADAAAABEAAAABAAAABwAAAAAAAAAC" +
+    "AAAACAAAADgCAAARAAAACgAAAAAAAAASAAAACgAAADgCAAAJAAUAFwAAAAAAAAAVAAAAAQACAAAA" +
+    "AAABAAAAFQAAAAEAAgAZAAAABQADABgAAAAGAAIAAAAAAAgAAgAAAAAACAABABQAAAAIAAAAGgAA" +
+    "AAEAAAAAAAAABgAAAAAAAAANAAAAiAQAAGUEAAAAAAAAAQAAAAAAAAAqAgAAAwAAABoADwARAAAA" +
+    "AQABAAEAAAAmAgAABAAAAHAQBQAAAA4ABAAAAAIAAAAuAgAAGwAAAGIAAABxAAAAAAAMASICCABw" +
+    "EAYAAgAaAxAAbiAHADIAbiAHABIAbhAIAAIADAFuIAQAEAAOAA4ADgATAA4AEAAOARoPAAAAAAEA" +
+    "AAAHAAY8aW5pdD4AAUwAAkxMABlMYXJ0L1Rlc3QxOTg4JFRyYW5zZm9ybTE7ABlMYXJ0L1Rlc3Qx" +
+    "OTg4JFRyYW5zZm9ybTI7AA5MYXJ0L1Rlc3QxOTg4OwAiTGRhbHZpay9hbm5vdGF0aW9uL0VuY2xv" +
+    "c2luZ0NsYXNzOwAeTGRhbHZpay9hbm5vdGF0aW9uL0lubmVyQ2xhc3M7ABVMamF2YS9pby9Qcmlu" +
+    "dFN0cmVhbTsAEkxqYXZhL2xhbmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7ABlMamF2YS9s" +
+    "YW5nL1N0cmluZ0J1aWxkZXI7ABJMamF2YS9sYW5nL1N5c3RlbTsADVRlc3QxOTg4LmphdmEAClRy" +
+    "YW5zZm9ybTIAFFRyYW5zZm9ybTIgc2F5cyBieWUhABdUcmFuc2Zvcm0yIHNheXMgaGkgYW5kIAAB" +
+    "VgACVkwAC2FjY2Vzc0ZsYWdzAAZhcHBlbmQABmdldEJ5ZQAEbmFtZQADb3V0AAdwcmludGxuAAVz" +
+    "YXlIaQAIdG9TdHJpbmcABXZhbHVlAHV+fkQ4eyJjb21waWxhdGlvbi1tb2RlIjoiZGVidWciLCJt" +
+    "aW4tYXBpIjoxLCJzaGEtMSI6IjY4NjQ4NTU3NTM0MDJiYmFjODk2Nzc2YjAzN2RlYmJjOTM4YzQ5" +
+    "NTMiLCJ2ZXJzaW9uIjoiMS43LjYtZGV2In0AAgMBGxgCAgQCEwQIFhcOAAADAAGAgATIAwEJsAMB" +
+    "CeADAAAAAAACAAAAVgQAAFwEAAB8BAAAAAAAAAAAAAAAAAAAEAAAAAAAAAABAAAAAAAAAAEAAAAd" +
+    "AAAAcAAAAAIAAAALAAAA5AAAAAMAAAAEAAAAEAEAAAQAAAABAAAAQAEAAAUAAAAJAAAASAEAAAYA" +
+    "AAABAAAAkAEAAAEgAAADAAAAsAEAAAMgAAADAAAAJgIAAAEQAAABAAAAOAIAAAIgAAAdAAAAPgIA" +
+    "AAQgAAACAAAAVgQAAAAgAAABAAAAZQQAAAMQAAACAAAAeAQAAAYgAAABAAAAiAQAAAAQAAABAAAA" +
+    "mAQAAA==");
+
+
+  public static void run() {
+    doTest();
+  }
+
+  public static void doTest() {
+    Transform1.sayHi();
+    Transform2.sayHi();
+    System.out.println(
+        "Redefining both " + Transform1.class + " and " + Transform2.class + " to use each other.");
+    Redefinition.doMultiStructuralClassRedefinition(
+        new Redefinition.DexOnlyClassDefinition(Transform1.class, T1_BYTES),
+        new Redefinition.DexOnlyClassDefinition(Transform2.class, T2_BYTES));
+    Transform1.sayHi();
+    Transform2.sayHi();
+  }
+}
diff --git a/test/1989-transform-bad-monitor/expected.txt b/test/1989-transform-bad-monitor/expected.txt
new file mode 100644
index 0000000..65ec72d5
--- /dev/null
+++ b/test/1989-transform-bad-monitor/expected.txt
@@ -0,0 +1,6 @@
+hello without locks
+Goodbye before unlock
+Goodbye after unlock
+Got exception of type class java.lang.IllegalMonitorStateException
+Make sure locks aren't held
+Locks are good.
diff --git a/test/1989-transform-bad-monitor/info.txt b/test/1989-transform-bad-monitor/info.txt
new file mode 100644
index 0000000..0056464
--- /dev/null
+++ b/test/1989-transform-bad-monitor/info.txt
@@ -0,0 +1,6 @@
+Tests basic functions in the jvmti plugin.
+
+b/142876078
+
+This tests that redefining a method to have unbalanced locks doesn't cause issues and the method
+is given lock-counting and not compiled.
diff --git a/test/1989-transform-bad-monitor/run b/test/1989-transform-bad-monitor/run
new file mode 100755
index 0000000..c6e62ae
--- /dev/null
+++ b/test/1989-transform-bad-monitor/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/1989-transform-bad-monitor/src/Main.java b/test/1989-transform-bad-monitor/src/Main.java
new file mode 100644
index 0000000..9c61e89
--- /dev/null
+++ b/test/1989-transform-bad-monitor/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1989.run();
+  }
+}
diff --git a/test/1989-transform-bad-monitor/src/art/Redefinition.java b/test/1989-transform-bad-monitor/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1989-transform-bad-monitor/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1989-transform-bad-monitor/src/art/Test1989.java b/test/1989-transform-bad-monitor/src/art/Test1989.java
new file mode 100644
index 0000000..fb16c22
--- /dev/null
+++ b/test/1989-transform-bad-monitor/src/art/Test1989.java
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Base64;
+public class Test1989 {
+
+  static class Transform {
+    public void sayHi(Object l_first, Object l_second) {
+      System.out.println("hello without locks");
+    }
+  }
+
+  /**
+   * base64 encoded class/dex file for
+   * class Transform {
+   *   public void sayHi(Object l_first, Object l_second) {
+   *    monitor-enter l_first
+   *    monitor-enter l_second
+   *    System.out.println("Goodbye before unlock");
+   *    monitor-exit l_second
+   *    System.out.println("Goodbye after unlock");
+   *   }
+   * }
+   */
+  private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+"yv66vgADAC0AHgEAFEdvb2RieWUgYWZ0ZXIgdW5sb2NrDAAYAB0BABBqYXZhL2xhbmcvT2JqZWN0" +
+"AQAGPGluaXQ+BwADDAAEAAkHABEBABZhcnQvVGVzdDE5ODkkVHJhbnNmb3JtAQADKClWBwAVAQAE" +
+"Q29kZQgAHAkACgACAQANVGVzdDE5ODkuamF2YQEAClNvdXJjZUZpbGUMABIAGwEAE2phdmEvaW8v" +
+"UHJpbnRTdHJlYW0BAAdwcmludGxuCgAFAAYBAAVzYXlIaQEAEGphdmEvbGFuZy9TeXN0ZW0IAAEK" +
+"AAcAEAEAA291dAcACAEAJyhMamF2YS9sYW5nL09iamVjdDtMamF2YS9sYW5nL09iamVjdDspVgEA" +
+"FShMamF2YS9sYW5nL1N0cmluZzspVgEAFUdvb2RieWUgYmVmb3JlIHVubG9jawEAFUxqYXZhL2lv" +
+"L1ByaW50U3RyZWFtOwAgABkABQAAAAAAAgAAAAQACQABAAsAAAARAAEAAQAAAAUqtwATsQAAAAAA" +
+"AQAUABoAAQALAAAAIwACAAMAAAAXK8IswrIADRIMtgAXLMOyAA0SFrYAF7EAAAAAAAEADwAAAAIA" +
+"Dg==");
+  private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+"ZGV4CjAzNQB5oAZwVUwJoMSgbr1BNffRcXjpPMVhYzgYBAAAcAAAAHhWNBIAAAAAAAAAAFQDAAAW" +
+"AAAAcAAAAAkAAADIAAAAAwAAAOwAAAABAAAAEAEAAAQAAAAYAQAAAQAAADgBAADAAgAAWAEAAFgB" +
+"AABgAQAAdgEAAI0BAACnAQAAtwEAANsBAAD7AQAAEgIAACYCAAA6AgAATgIAAF0CAABoAgAAawIA" +
+"AG8CAAB0AgAAgQIAAIcCAACMAgAAlQIAAJwCAAADAAAABAAAAAUAAAAGAAAABwAAAAgAAAAJAAAA" +
+"CgAAAA0AAAANAAAACAAAAAAAAAAPAAAACAAAAKQCAAAOAAAACAAAAKwCAAAHAAQAEgAAAAAAAAAA" +
+"AAAAAAABABQAAAAEAAIAEwAAAAUAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAsAAADYAgAARAMAAAAA" +
+"AAAGPGluaXQ+ABRHb29kYnllIGFmdGVyIHVubG9jawAVR29vZGJ5ZSBiZWZvcmUgdW5sb2NrABhM" +
+"YXJ0L1Rlc3QxOTg5JFRyYW5zZm9ybTsADkxhcnQvVGVzdDE5ODk7ACJMZGFsdmlrL2Fubm90YXRp" +
+"b24vRW5jbG9zaW5nQ2xhc3M7AB5MZGFsdmlrL2Fubm90YXRpb24vSW5uZXJDbGFzczsAFUxqYXZh" +
+"L2lvL1ByaW50U3RyZWFtOwASTGphdmEvbGFuZy9PYmplY3Q7ABJMamF2YS9sYW5nL1N0cmluZzsA" +
+"EkxqYXZhL2xhbmcvU3lzdGVtOwANVGVzdDE5ODkuamF2YQAJVHJhbnNmb3JtAAFWAAJWTAADVkxM" +
+"AAthY2Nlc3NGbGFncwAEbmFtZQADb3V0AAdwcmludGxuAAVzYXlIaQAFdmFsdWUAAAIAAAAFAAUA" +
+"AQAAAAYAAgMCEAQIERcMAgIBFRgBAAAAAAAAAAAAAAACAAAAuwIAALICAADMAgAAAAAAAAAAAAAA" +
+"AAAABgAOAAgCAAAOHh54H3gAAAEAAQABAAAA6AIAAAQAAABwEAMAAAAOAAUAAwACAAAA7AIAABIA" +
+"AAAdAx0EYgAAABoBAgBuIAIAEAAeBGIDAAAaBAEAbiACAEMADgAAAAEBAICABPgFAQGQBgAAEAAA" +
+"AAAAAAABAAAAAAAAAAEAAAAWAAAAcAAAAAIAAAAJAAAAyAAAAAMAAAADAAAA7AAAAAQAAAABAAAA" +
+"EAEAAAUAAAAEAAAAGAEAAAYAAAABAAAAOAEAAAIgAAAWAAAAWAEAAAEQAAACAAAApAIAAAQgAAAC" +
+"AAAAsgIAAAMQAAADAAAAxAIAAAYgAAABAAAA2AIAAAMgAAACAAAA6AIAAAEgAAACAAAA+AIAAAAg" +
+"AAABAAAARAMAAAAQAAABAAAAVAMAAA==");
+
+  public static void run() throws Exception {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest(new Transform());
+  }
+
+  public static void doTest(Transform t) throws Exception {
+    Object a = new Object();
+    Object b = new Object();
+    t.sayHi(a, b);
+    Redefinition.doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+    try {
+      t.sayHi(a, b);
+    } catch (Throwable e) {
+      System.out.println("Got exception of type " + e.getClass());
+    }
+    System.out.println("Make sure locks aren't held");
+    Thread thr = new Thread(() -> {
+      synchronized(a) {
+        synchronized (b) {
+          System.out.println("Locks are good.");
+        }
+      }
+    });
+    thr.start();
+    thr.join();
+  }
+}
diff --git a/test/1990-structural-bad-verify/expected.txt b/test/1990-structural-bad-verify/expected.txt
new file mode 100644
index 0000000..7478dda
--- /dev/null
+++ b/test/1990-structural-bad-verify/expected.txt
@@ -0,0 +1,2 @@
+hello
+I say hello and you say goodbye!
diff --git a/test/1990-structural-bad-verify/info.txt b/test/1990-structural-bad-verify/info.txt
new file mode 100644
index 0000000..f2ecd68
--- /dev/null
+++ b/test/1990-structural-bad-verify/info.txt
@@ -0,0 +1,6 @@
+Tests basic functions in the jvmti plugin.
+
+b/142876078
+
+This tests a crash that could occur which was caused by the dex-cache being in an unexpected
+state.
diff --git a/test/1990-structural-bad-verify/run b/test/1990-structural-bad-verify/run
new file mode 100755
index 0000000..03e41a5
--- /dev/null
+++ b/test/1990-structural-bad-verify/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/1990-structural-bad-verify/src/Main.java b/test/1990-structural-bad-verify/src/Main.java
new file mode 100644
index 0000000..5622925
--- /dev/null
+++ b/test/1990-structural-bad-verify/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1990.run();
+  }
+}
diff --git a/test/1990-structural-bad-verify/src/art/Redefinition.java b/test/1990-structural-bad-verify/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1990-structural-bad-verify/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1990-structural-bad-verify/src/art/Test1990.java b/test/1990-structural-bad-verify/src/art/Test1990.java
new file mode 100644
index 0000000..90034fc
--- /dev/null
+++ b/test/1990-structural-bad-verify/src/art/Test1990.java
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Base64;
+public class Test1990 {
+
+  static class Transform {
+    public static void saySomething() {
+      System.out.println("hello");
+    }
+  }
+
+  /**
+   * base64 encoded class/dex file for
+   * static class Transform {
+   *   public static void saySomething() {
+   *    System.out.println("I say hello and " + sayGoodbye());
+   *   }
+   *   public static String sayGoodbye() {
+   *    return "you say goodbye!";
+   *   }
+   * }
+   */
+  // NB The actual dex codes are as follows. This is an explanation of the error this test checks.
+  //
+  // The exact order of instructions is important. Notice the 'invoke-static sayGoodbye'
+  // (instruction 0002) dominates the rest of the block. During the first (runnable) verification
+  // step the verifier will first check and verify there are no hard-failures in this class. Next it
+  // will realize it cannot find the sayGoodbye method on the loaded & resolved Transform class.
+  // This is (correctly) recognized as a soft-verification failure but then the verifier decides the
+  // rest of the method is dead-code. This means the verifier will not perform any of the
+  // soft-failure checks on the rest of the method (since control would never reach there).
+  //
+  // Later after performing the redefinition we do a reverify. At this time we held an exclusive
+  // mutator-lock though so it cannot resolve classes and will not add anything to the dex-cache.
+  // Here we can get past instruction 0002 and successfully determine the rest of the function is
+  // fine. In the process we filled in the methods into the dex-cache but not the classes. This
+  // caused this test to crash when run through the interpreter.
+  //
+  //     #2              : (in Lart/Test1990$Transform;)
+  //       name          : 'saySomething'
+  //       type          : '()V'
+  //       access        : 0x0009 (PUBLIC STATIC)
+  //       code          -
+  //       registers     : 4
+  //       ins           : 0
+  //       outs          : 2
+  //       insns size    : 27 16-bit code units
+  // 0001d0:                                        |[0001d0] art.Test1990$Transform.saySomething:()V
+  // 0001e0: 6200 0000                              |0000: sget-object v0, Ljava/lang/System;.out:Ljava/io/PrintStream; // field@0000
+  // 0001e4: 7100 0100 0000                         |0002: invoke-static {}, Lart/Test1990$Transform;.sayGoodbye:()Ljava/lang/String; // method@0001
+  // 0001ea: 0c01                                   |0005: move-result-object v1
+  // 0001ec: 2202 0700                              |0006: new-instance v2, Ljava/lang/StringBuilder; // type@0007
+  // 0001f0: 7010 0500 0200                         |0008: invoke-direct {v2}, Ljava/lang/StringBuilder;.<init>:()V // method@0005
+  // 0001f6: 1a03 0100                              |000b: const-string v3, "I say hello and " // string@0001
+  // 0001fa: 6e20 0600 3200                         |000d: invoke-virtual {v2, v3}, Ljava/lang/StringBuilder;.append:(Ljava/lang/String;)Ljava/lang/StringBuilder; // method@0006
+  // 000200: 6e20 0600 1200                         |0010: invoke-virtual {v2, v1}, Ljava/lang/StringBuilder;.append:(Ljava/lang/String;)Ljava/lang/StringBuilder; // method@0006
+  // 000206: 6e10 0700 0200                         |0013: invoke-virtual {v2}, Ljava/lang/StringBuilder;.toString:()Ljava/lang/String; // method@0007
+  // 00020c: 0c01                                   |0016: move-result-object v1
+  // 00020e: 6e20 0300 1000                         |0017: invoke-virtual {v0, v1}, Ljava/io/PrintStream;.println:(Ljava/lang/String;)V // method@0003
+  // 000214: 0e00                                   |001a: return-void
+  //       catches       : (none)
+  //       positions     :
+  //         0x0000 line=5
+  //         0x001a line=6
+  //       locals        :
+
+  //   Virtual methods   -
+  //   source_file_idx   : 13 (Test1990.java)
+  private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+"ZGV4CjAzNQCV0LekDslEGFglxYgCw7HSyxVegIDjERswBQAAcAAAAHhWNBIAAAAAAAAAAGwEAAAc" +
+"AAAAcAAAAAoAAADgAAAABAAAAAgBAAABAAAAOAEAAAgAAABAAQAAAQAAAIABAACQAwAAoAEAAC4C" +
+"AAA2AgAASAIAAEsCAABPAgAAaQIAAHkCAACdAgAAvQIAANQCAADoAgAA/AIAABcDAAArAwAAOgMA" +
+"AEUDAABIAwAATAMAAFkDAABhAwAAZwMAAGwDAAB1AwAAgQMAAI8DAACZAwAAoAMAALIDAAAEAAAA" +
+"BQAAAAYAAAAHAAAACAAAAAkAAAAKAAAACwAAAAwAAAAPAAAAAgAAAAYAAAAAAAAAAwAAAAcAAAAo" +
+"AgAADwAAAAkAAAAAAAAAEAAAAAkAAAAoAgAACAAEABQAAAAAAAIAAAAAAAAAAAAWAAAAAAACABcA" +
+"AAAEAAMAFQAAAAUAAgAAAAAABwACAAAAAAAHAAEAEgAAAAcAAAAYAAAAAAAAAAAAAAAFAAAAAAAA" +
+"AA0AAABcBAAAOQQAAAAAAAABAAAAAAAAABoCAAADAAAAGgAaABEAAAABAAEAAQAAABYCAAAEAAAA" +
+"cBAEAAAADgAEAAAAAgAAAB4CAAAbAAAAYgAAAHEAAQAAAAwBIgIHAHAQBQACABoDAQBuIAYAMgBu" +
+"IAYAEgBuEAcAAgAMAW4gAwAQAA4AAwAOAAgADgAFAA4BGg8AAAAAAQAAAAYABjxpbml0PgAQSSBz" +
+"YXkgaGVsbG8gYW5kIAABTAACTEwAGExhcnQvVGVzdDE5OTAkVHJhbnNmb3JtOwAOTGFydC9UZXN0" +
+"MTk5MDsAIkxkYWx2aWsvYW5ub3RhdGlvbi9FbmNsb3NpbmdDbGFzczsAHkxkYWx2aWsvYW5ub3Rh" +
+"dGlvbi9Jbm5lckNsYXNzOwAVTGphdmEvaW8vUHJpbnRTdHJlYW07ABJMamF2YS9sYW5nL09iamVj" +
+"dDsAEkxqYXZhL2xhbmcvU3RyaW5nOwAZTGphdmEvbGFuZy9TdHJpbmdCdWlsZGVyOwASTGphdmEv" +
+"bGFuZy9TeXN0ZW07AA1UZXN0MTk5MC5qYXZhAAlUcmFuc2Zvcm0AAVYAAlZMAAthY2Nlc3NGbGFn" +
+"cwAGYXBwZW5kAARuYW1lAANvdXQAB3ByaW50bG4ACnNheUdvb2RieWUADHNheVNvbWV0aGluZwAI" +
+"dG9TdHJpbmcABXZhbHVlABB5b3Ugc2F5IGdvb2RieWUhAHZ+fkQ4eyJjb21waWxhdGlvbi1tb2Rl" +
+"IjoiZGVidWciLCJtaW4tYXBpIjoxLCJzaGEtMSI6IjYwZGE0ZDY3YjM4MWM0MjQ2Nzc1N2M0OWZi" +
+"NmU1NTc1NmQ4OGEyZjMiLCJ2ZXJzaW9uIjoiMS43LjEyLWRldiJ9AAICARkYAQIDAhEECBMXDgAA" +
+"AwAAgIAEuAMBCaADAQnQAwAAAAAAAgAAACoEAAAwBAAAUAQAAAAAAAAAAAAAAAAAABAAAAAAAAAA" +
+"AQAAAAAAAAABAAAAHAAAAHAAAAACAAAACgAAAOAAAAADAAAABAAAAAgBAAAEAAAAAQAAADgBAAAF" +
+"AAAACAAAAEABAAAGAAAAAQAAAIABAAABIAAAAwAAAKABAAADIAAAAwAAABYCAAABEAAAAQAAACgC" +
+"AAACIAAAHAAAAC4CAAAEIAAAAgAAACoEAAAAIAAAAQAAADkEAAADEAAAAgAAAEwEAAAGIAAAAQAA" +
+"AFwEAAAAEAAAAQAAAGwEAAA=");
+
+
+
+  public static void run() throws Exception {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest(new Transform());
+  }
+
+  public static void doTest(Transform t) throws Exception {
+    Transform.saySomething();
+    Redefinition.doCommonStructuralClassRedefinition(Transform.class, DEX_BYTES);
+    Transform.saySomething();
+  }
+}
diff --git a/test/1991-hello-structural-retransform/expected.txt b/test/1991-hello-structural-retransform/expected.txt
new file mode 100644
index 0000000..7478dda
--- /dev/null
+++ b/test/1991-hello-structural-retransform/expected.txt
@@ -0,0 +1,2 @@
+hello
+I say hello and you say goodbye!
diff --git a/test/1991-hello-structural-retransform/info.txt b/test/1991-hello-structural-retransform/info.txt
new file mode 100644
index 0000000..875a5f6
--- /dev/null
+++ b/test/1991-hello-structural-retransform/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/1991-hello-structural-retransform/run b/test/1991-hello-structural-retransform/run
new file mode 100755
index 0000000..03e41a5
--- /dev/null
+++ b/test/1991-hello-structural-retransform/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/1991-hello-structural-retransform/src/Main.java b/test/1991-hello-structural-retransform/src/Main.java
new file mode 100644
index 0000000..531ca4a
--- /dev/null
+++ b/test/1991-hello-structural-retransform/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1991.run();
+  }
+}
diff --git a/test/1991-hello-structural-retransform/src/art/Redefinition.java b/test/1991-hello-structural-retransform/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1991-hello-structural-retransform/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1991-hello-structural-retransform/src/art/Test1991.java b/test/1991-hello-structural-retransform/src/art/Test1991.java
new file mode 100644
index 0000000..6060c20
--- /dev/null
+++ b/test/1991-hello-structural-retransform/src/art/Test1991.java
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Base64;
+public class Test1991 {
+
+  static class Transform {
+    public static void sayHi() {
+      System.out.println("hello");
+    }
+  }
+
+
+  /**
+   * base64 encoded class/dex file for
+   * static class Transform {
+   *   public static void sayHi() {
+   *    System.out.println("I say hello and " + sayGoodbye());
+   *   }
+   *   public static String sayGoodbye() {
+   *     return "you say goodbye!";
+   *   }
+   * }
+   */
+  private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+    "ZGV4CjAzNQCi0OGZvVpTRbHGfNbo3bfcu60kPpJayMgoBQAAcAAAAHhWNBIAAAAAAAAAAGQEAAAc" +
+    "AAAAcAAAAAoAAADgAAAABAAAAAgBAAABAAAAOAEAAAgAAABAAQAAAQAAAIABAACIAwAAoAEAAC4C" +
+    "AAA2AgAASAIAAEsCAABPAgAAaQIAAHkCAACdAgAAvQIAANQCAADoAgAA/AIAABcDAAArAwAAOgMA" +
+    "AEUDAABIAwAATAMAAFkDAABhAwAAZwMAAGwDAAB1AwAAgQMAAIgDAACSAwAAmQMAAKsDAAAEAAAA" +
+    "BQAAAAYAAAAHAAAACAAAAAkAAAAKAAAACwAAAAwAAAAPAAAAAgAAAAYAAAAAAAAAAwAAAAcAAAAo" +
+    "AgAADwAAAAkAAAAAAAAAEAAAAAkAAAAoAgAACAAEABQAAAAAAAIAAAAAAAAAAAAWAAAAAAACABcA" +
+    "AAAEAAMAFQAAAAUAAgAAAAAABwACAAAAAAAHAAEAEgAAAAcAAAAYAAAAAAAAAAAAAAAFAAAAAAAA" +
+    "AA0AAABUBAAAMgQAAAAAAAABAAAAAAAAABoCAAADAAAAGgAaABEAAAABAAEAAQAAABYCAAAEAAAA" +
+    "cBAEAAAADgAEAAAAAgAAAB4CAAAbAAAAYgAAAHEAAQAAAAwBIgIHAHAQBQACABoDAQBuIAYAMgBu" +
+    "IAYAEgBuEAcAAgAMAW4gAwAQAA4ABgAOAAsADgAIAA4BGg8AAAAAAQAAAAYABjxpbml0PgAQSSBz" +
+    "YXkgaGVsbG8gYW5kIAABTAACTEwAGExhcnQvVGVzdDE5OTEkVHJhbnNmb3JtOwAOTGFydC9UZXN0" +
+    "MTk5MTsAIkxkYWx2aWsvYW5ub3RhdGlvbi9FbmNsb3NpbmdDbGFzczsAHkxkYWx2aWsvYW5ub3Rh" +
+    "dGlvbi9Jbm5lckNsYXNzOwAVTGphdmEvaW8vUHJpbnRTdHJlYW07ABJMamF2YS9sYW5nL09iamVj" +
+    "dDsAEkxqYXZhL2xhbmcvU3RyaW5nOwAZTGphdmEvbGFuZy9TdHJpbmdCdWlsZGVyOwASTGphdmEv" +
+    "bGFuZy9TeXN0ZW07AA1UZXN0MTk5MS5qYXZhAAlUcmFuc2Zvcm0AAVYAAlZMAAthY2Nlc3NGbGFn" +
+    "cwAGYXBwZW5kAARuYW1lAANvdXQAB3ByaW50bG4ACnNheUdvb2RieWUABXNheUhpAAh0b1N0cmlu" +
+    "ZwAFdmFsdWUAEHlvdSBzYXkgZ29vZGJ5ZSEAdn5+RDh7ImNvbXBpbGF0aW9uLW1vZGUiOiJkZWJ1" +
+    "ZyIsIm1pbi1hcGkiOjEsInNoYS0xIjoiNjBkYTRkNjdiMzgxYzQyNDY3NzU3YzQ5ZmI2ZTU1NzU2" +
+    "ZDg4YTJmMyIsInZlcnNpb24iOiIxLjcuMTItZGV2In0AAgIBGRgBAgMCEQQIExcOAAADAACAgAS4" +
+    "AwEJoAMBCdADAAAAAAIAAAAjBAAAKQQAAEgEAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAEAAAAAAAAA" +
+    "AQAAABwAAABwAAAAAgAAAAoAAADgAAAAAwAAAAQAAAAIAQAABAAAAAEAAAA4AQAABQAAAAgAAABA" +
+    "AQAABgAAAAEAAACAAQAAASAAAAMAAACgAQAAAyAAAAMAAAAWAgAAARAAAAEAAAAoAgAAAiAAABwA" +
+    "AAAuAgAABCAAAAIAAAAjBAAAACAAAAEAAAAyBAAAAxAAAAIAAABEBAAABiAAAAEAAABUBAAAABAA" +
+    "AAEAAABkBAAA");
+
+
+  public static void run() {
+    Redefinition.setTestConfiguration(Redefinition.Config.STRUCTURAL_TRANSFORM);
+    doTest();
+  }
+
+  public static void doTest() {
+    Transform.sayHi();
+    Redefinition.addCommonTransformationResult("art/Test1991$Transform", new byte[0], DEX_BYTES);
+    Redefinition.enableCommonRetransformation(true);
+    Redefinition.doCommonClassRetransformation(Transform.class);
+    Transform.sayHi();
+  }
+}
diff --git a/test/1992-retransform-no-such-field/expected.txt b/test/1992-retransform-no-such-field/expected.txt
new file mode 100644
index 0000000..53de32a
--- /dev/null
+++ b/test/1992-retransform-no-such-field/expected.txt
@@ -0,0 +1,2 @@
+This file was written in the year 2019!
+This new class was written in 2019
diff --git a/test/1992-retransform-no-such-field/info.txt b/test/1992-retransform-no-such-field/info.txt
new file mode 100644
index 0000000..875a5f6
--- /dev/null
+++ b/test/1992-retransform-no-such-field/info.txt
@@ -0,0 +1 @@
+Tests basic functions in the jvmti plugin.
diff --git a/test/1992-retransform-no-such-field/run b/test/1992-retransform-no-such-field/run
new file mode 100755
index 0000000..c6e62ae
--- /dev/null
+++ b/test/1992-retransform-no-such-field/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/1992-retransform-no-such-field/src/Main.java b/test/1992-retransform-no-such-field/src/Main.java
new file mode 100644
index 0000000..ccffb88
--- /dev/null
+++ b/test/1992-retransform-no-such-field/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1992.run();
+  }
+}
diff --git a/test/1992-retransform-no-such-field/src/art/Redefinition.java b/test/1992-retransform-no-such-field/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1992-retransform-no-such-field/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1992-retransform-no-such-field/src/art/Test1992.java b/test/1992-retransform-no-such-field/src/art/Test1992.java
new file mode 100644
index 0000000..5e1d5bb
--- /dev/null
+++ b/test/1992-retransform-no-such-field/src/art/Test1992.java
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Base64;
+public class Test1992 {
+  public static boolean FAIL_IT = false;
+
+  static class Transform {
+    public void saySomething() {
+      System.out.println("This file was written in the year 2019!");
+    }
+  }
+
+  /**
+   * base64 encoded class/dex file for
+   * class Transform {
+   *   public void saySomething() {
+   *    if (Test1992.FAIL_IT) {
+   *      // Force verification soft-fail.
+   *      Test1992.NOT_THERE = 1;
+   *    }
+   *    System.out.println("This new class was written in " + year);
+   *   }
+   * }
+   */
+  private static final byte[] CLASS_BYTES = Base64.getDecoder().decode(
+"yv66vgAAADUAKQoACAARCQASABMJABIAFAkAFQAWCAAXCgAYABkHABoHAB0BAAY8aW5pdD4BAAMo" +
+"KVYBAARDb2RlAQAPTGluZU51bWJlclRhYmxlAQAMc2F5U29tZXRoaW5nAQANU3RhY2tNYXBUYWJs" +
+"ZQEAClNvdXJjZUZpbGUBAA1UZXN0MTk5Mi5qYXZhDAAJAAoHAB4MAB8AIAwAIQAiBwAjDAAkACUB" +
+"ACJUaGlzIG5ldyBjbGFzcyB3YXMgd3JpdHRlbiBpbiAyMDE5BwAmDAAnACgBABZhcnQvVGVzdDE5" +
+"OTIkVHJhbnNmb3JtAQAJVHJhbnNmb3JtAQAMSW5uZXJDbGFzc2VzAQAQamF2YS9sYW5nL09iamVj" +
+"dAEADGFydC9UZXN0MTk5MgEAB0ZBSUxfSVQBAAFaAQAJTk9UX1RIRVJFAQABSQEAEGphdmEvbGFu" +
+"Zy9TeXN0ZW0BAANvdXQBABVMamF2YS9pby9QcmludFN0cmVhbTsBABNqYXZhL2lvL1ByaW50U3Ry" +
+"ZWFtAQAHcHJpbnRsbgEAFShMamF2YS9sYW5nL1N0cmluZzspVgAgAAcACAAAAAAAAgAAAAkACgAB" +
+"AAsAAAAdAAEAAQAAAAUqtwABsQAAAAEADAAAAAYAAQAAAAUAAQANAAoAAQALAAAAQAACAAEAAAAT" +
+"sgACmQAHBLMAA7IABBIFtgAGsQAAAAIADAAAABIABAAAAAkABgAKAAoADAASAA0ADgAAAAMAAQoA" +
+"AgAPAAAAAgAQABwAAAAKAAEABwASABsACA==");
+
+  private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+"ZGV4CjAzNQAWyivK2j0yR4u2YH/R8Bs3KIFv7O/Hs0WkBAAAcAAAAHhWNBIAAAAAAAAAAOADAAAZ" +
+"AAAAcAAAAAsAAADUAAAAAgAAAAABAAADAAAAGAEAAAQAAAAwAQAAAQAAAFABAAA0AwAAcAEAAMoB" +
+"AADSAQAA2wEAAN4BAAD4AQAACAIAACwCAABMAgAAYwIAAHcCAACLAgAAnwIAAKoCAAC5AgAA3QIA" +
+"AOgCAADrAgAA7wIAAPICAAD/AgAABQMAAAoDAAATAwAAIQMAACgDAAACAAAAAwAAAAQAAAAFAAAA" +
+"BgAAAAcAAAAIAAAACQAAAAoAAAAPAAAAEQAAAA8AAAAJAAAAAAAAABAAAAAJAAAAxAEAAAIACgAB" +
+"AAAAAgAAAAsAAAAIAAUAFAAAAAEAAAAAAAAAAQAAABYAAAAFAAEAFQAAAAYAAAAAAAAAAQAAAAAA" +
+"AAAGAAAAAAAAAAwAAADQAwAArwMAAAAAAAABAAEAAQAAALYBAAAEAAAAcBADAAAADgADAAEAAgAA" +
+"ALoBAAAPAAAAYwAAADgABQASEGcAAQBiAAIAGgENAG4gAgAQAA4ABQAOAAkADks9eAAAAAABAAAA" +
+"BwAGPGluaXQ+AAdGQUlMX0lUAAFJABhMYXJ0L1Rlc3QxOTkyJFRyYW5zZm9ybTsADkxhcnQvVGVz" +
+"dDE5OTI7ACJMZGFsdmlrL2Fubm90YXRpb24vRW5jbG9zaW5nQ2xhc3M7AB5MZGFsdmlrL2Fubm90" +
+"YXRpb24vSW5uZXJDbGFzczsAFUxqYXZhL2lvL1ByaW50U3RyZWFtOwASTGphdmEvbGFuZy9PYmpl" +
+"Y3Q7ABJMamF2YS9sYW5nL1N0cmluZzsAEkxqYXZhL2xhbmcvU3lzdGVtOwAJTk9UX1RIRVJFAA1U" +
+"ZXN0MTk5Mi5qYXZhACJUaGlzIG5ldyBjbGFzcyB3YXMgd3JpdHRlbiBpbiAyMDE5AAlUcmFuc2Zv" +
+"cm0AAVYAAlZMAAFaAAthY2Nlc3NGbGFncwAEbmFtZQADb3V0AAdwcmludGxuAAxzYXlTb21ldGhp" +
+"bmcABXZhbHVlAHZ+fkQ4eyJjb21waWxhdGlvbi1tb2RlIjoiZGVidWciLCJtaW4tYXBpIjoxLCJz" +
+"aGEtMSI6IjYwZGE0ZDY3YjM4MWM0MjQ2Nzc1N2M0OWZiNmU1NTc1NmQ4OGEyZjMiLCJ2ZXJzaW9u" +
+"IjoiMS43LjEyLWRldiJ9AAIDARcYAgIEAhIECBMXDgAAAQEAgIAE8AIBAYgDAAAAAAAAAAIAAACg" +
+"AwAApgMAAMQDAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAEAAAAAAAAAAQAAABkAAABwAAAAAgAAAAsA" +
+"AADUAAAAAwAAAAIAAAAAAQAABAAAAAMAAAAYAQAABQAAAAQAAAAwAQAABgAAAAEAAABQAQAAASAA" +
+"AAIAAABwAQAAAyAAAAIAAAC2AQAAARAAAAEAAADEAQAAAiAAABkAAADKAQAABCAAAAIAAACgAwAA" +
+"ACAAAAEAAACvAwAAAxAAAAIAAADAAwAABiAAAAEAAADQAwAAABAAAAEAAADgAwAA");
+
+
+
+  public static void run() {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest(new Transform());
+  }
+
+  public static void doTest(Transform t) {
+    t.saySomething();
+    Redefinition.doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+    t.saySomething();
+  }
+}
diff --git a/test/1993-fallback-non-structural/expected.txt b/test/1993-fallback-non-structural/expected.txt
new file mode 100644
index 0000000..f523e70
--- /dev/null
+++ b/test/1993-fallback-non-structural/expected.txt
@@ -0,0 +1,3 @@
+Can structurally Redefine: true
+hello
+Goodbye
diff --git a/test/1993-fallback-non-structural/info.txt b/test/1993-fallback-non-structural/info.txt
new file mode 100644
index 0000000..3b558e1
--- /dev/null
+++ b/test/1993-fallback-non-structural/info.txt
@@ -0,0 +1,4 @@
+Tests basic functions in the jvmti plugin.
+
+Tests that using the structural redefinition functions will fall back to non-structural
+redefinition when possible.
diff --git a/test/1993-fallback-non-structural/run b/test/1993-fallback-non-structural/run
new file mode 100755
index 0000000..03e41a5
--- /dev/null
+++ b/test/1993-fallback-non-structural/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/1993-fallback-non-structural/src/Main.java b/test/1993-fallback-non-structural/src/Main.java
new file mode 100644
index 0000000..61e060c
--- /dev/null
+++ b/test/1993-fallback-non-structural/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1993.run();
+  }
+}
diff --git a/test/1993-fallback-non-structural/src/art/Redefinition.java b/test/1993-fallback-non-structural/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1993-fallback-non-structural/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1993-fallback-non-structural/src/art/Test1993.java b/test/1993-fallback-non-structural/src/art/Test1993.java
new file mode 100644
index 0000000..e2a8f6e
--- /dev/null
+++ b/test/1993-fallback-non-structural/src/art/Test1993.java
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Base64;
+import java.lang.reflect.*;
+public class Test1993 {
+
+  static class Transform {
+    public void sayHi() {
+      // Use lower 'h' to make sure the string will have a different string id
+      // than the transformation (the transformation code is the same except
+      // the actual printed String, which was making the test inacurately passing
+      // in JIT mode when loading the string from the dex cache, as the string ids
+      // of the two different strings were the same).
+      // We know the string ids will be different because lexicographically:
+      // "Goodbye" < "LTransform;" < "hello".
+      System.out.println("hello");
+    }
+  }
+
+  /**
+   * base64 encoded class/dex file for
+   * class Transform {
+   *   public void sayHi() {
+   *    System.out.println("Goodbye");
+   *   }
+   * }
+   */
+  private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+    "ZGV4CjAzNQDxrdbiBcsn0r58mdtcdyDxVUxwbWfShNQwBAAAcAAAAHhWNBIAAAAAAAAAAGwDAAAV" +
+    "AAAAcAAAAAkAAADEAAAAAgAAAOgAAAABAAAAAAEAAAQAAAAIAQAAAQAAACgBAADoAgAASAEAAJIB" +
+    "AACaAQAAowEAAL0BAADNAQAA8QEAABECAAAoAgAAPAIAAFACAABkAgAAcwIAAH4CAACBAgAAhQIA" +
+    "AJICAACYAgAAnQIAAKYCAACtAgAAtAIAAAIAAAADAAAABAAAAAUAAAAGAAAABwAAAAgAAAAJAAAA" +
+    "DAAAAAwAAAAIAAAAAAAAAA0AAAAIAAAAjAEAAAcABAAQAAAAAAAAAAAAAAAAAAAAEgAAAAQAAQAR" +
+    "AAAABQAAAAAAAAAAAAAAAAAAAAUAAAAAAAAACgAAAFwDAAA7AwAAAAAAAAEAAQABAAAAgAEAAAQA" +
+    "AABwEAMAAAAOAAMAAQACAAAAhAEAAAgAAABiAAAAGgEBAG4gAgAQAA4AAwAOAAUADngAAAAAAQAA" +
+    "AAYABjxpbml0PgAHR29vZGJ5ZQAYTGFydC9UZXN0MTk5MyRUcmFuc2Zvcm07AA5MYXJ0L1Rlc3Qx" +
+    "OTkzOwAiTGRhbHZpay9hbm5vdGF0aW9uL0VuY2xvc2luZ0NsYXNzOwAeTGRhbHZpay9hbm5vdGF0" +
+    "aW9uL0lubmVyQ2xhc3M7ABVMamF2YS9pby9QcmludFN0cmVhbTsAEkxqYXZhL2xhbmcvT2JqZWN0" +
+    "OwASTGphdmEvbGFuZy9TdHJpbmc7ABJMamF2YS9sYW5nL1N5c3RlbTsADVRlc3QxOTkzLmphdmEA" +
+    "CVRyYW5zZm9ybQABVgACVkwAC2FjY2Vzc0ZsYWdzAARuYW1lAANvdXQAB3ByaW50bG4ABXNheUhp" +
+    "AAV2YWx1ZQB2fn5EOHsiY29tcGlsYXRpb24tbW9kZSI6ImRlYnVnIiwibWluLWFwaSI6MSwic2hh" +
+    "LTEiOiJjZDkwMDIzOTMwZDk3M2Y1NzcxMWYxZDRmZGFhZDdhM2U0NzE0NjM3IiwidmVyc2lvbiI6" +
+    "IjEuNy4xNC1kZXYifQACAgETGAECAwIOBAgPFwsAAAEBAICABMgCAQHgAgAAAAAAAAACAAAALAMA" +
+    "ADIDAABQAwAAAAAAAAAAAAAAAAAAEAAAAAAAAAABAAAAAAAAAAEAAAAVAAAAcAAAAAIAAAAJAAAA" +
+    "xAAAAAMAAAACAAAA6AAAAAQAAAABAAAAAAEAAAUAAAAEAAAACAEAAAYAAAABAAAAKAEAAAEgAAAC" +
+    "AAAASAEAAAMgAAACAAAAgAEAAAEQAAABAAAAjAEAAAIgAAAVAAAAkgEAAAQgAAACAAAALAMAAAAg" +
+    "AAABAAAAOwMAAAMQAAACAAAATAMAAAYgAAABAAAAXAMAAAAQAAABAAAAbAMAAA==");
+
+  public static void run() throws Exception {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest(new Transform());
+  }
+
+  public static void doTest(Transform t) throws Exception {
+    System.out.println("Can structurally Redefine: " +
+      Redefinition.isStructurallyModifiable(Transform.class));
+    t.sayHi();
+    Redefinition.doCommonStructuralClassRedefinition(Transform.class, DEX_BYTES);
+    t.sayHi();
+    // Check and make sure we didn't structurally redefine by looking for ClassExt.obsoleteClass
+    Field ext_data_field = Class.class.getDeclaredField("extData");
+    ext_data_field.setAccessible(true);
+    Object ext_data = ext_data_field.get(Transform.class);
+    Field obsolete_class_field = ext_data.getClass().getDeclaredField("obsoleteClass");
+    obsolete_class_field.setAccessible(true);
+    if (obsolete_class_field.get(ext_data) != null)  {
+      System.out.println("Expected no ClassExt.obsoleteClass but got " + obsolete_class_field.get(ext_data));
+    }
+  }
+}
diff --git a/test/1994-final-virtual-structural/expected.txt b/test/1994-final-virtual-structural/expected.txt
new file mode 100644
index 0000000..9b74d30
--- /dev/null
+++ b/test/1994-final-virtual-structural/expected.txt
@@ -0,0 +1,5 @@
+Hi!
+Hello world!
+Hej Verden!
+Bonjour le monde!
+こんにちは世界!
diff --git a/test/1994-final-virtual-structural/info.txt b/test/1994-final-virtual-structural/info.txt
new file mode 100644
index 0000000..606c984
--- /dev/null
+++ b/test/1994-final-virtual-structural/info.txt
@@ -0,0 +1,3 @@
+Tests basic functions in the jvmti plugin.
+
+Tests that using the structural redefinition can add new virtual methods and fields.
diff --git a/test/1994-final-virtual-structural/run b/test/1994-final-virtual-structural/run
new file mode 100755
index 0000000..03e41a5
--- /dev/null
+++ b/test/1994-final-virtual-structural/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/1994-final-virtual-structural/src/Main.java b/test/1994-final-virtual-structural/src/Main.java
new file mode 100644
index 0000000..3f0cb14
--- /dev/null
+++ b/test/1994-final-virtual-structural/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1994.run();
+  }
+}
diff --git a/test/1994-final-virtual-structural/src/art/Redefinition.java b/test/1994-final-virtual-structural/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1994-final-virtual-structural/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1994-final-virtual-structural/src/art/Test1994.java b/test/1994-final-virtual-structural/src/art/Test1994.java
new file mode 100644
index 0000000..9ae7772
--- /dev/null
+++ b/test/1994-final-virtual-structural/src/art/Test1994.java
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Base64;
+public class Test1994 {
+
+  public static final class Transform {
+    public void sayHi() {
+      System.out.println("Hi!");
+    }
+  }
+
+  /**
+   * base64 encoded class/dex file for
+   * public static final class Transform {
+   *   public void sayHi() {
+   *     sayHiEnglish();
+   *     sayHiDanish();
+   *     sayHiFrance();
+   *     sayHiJapan();
+   *   }
+   *   public void sayHiEnglish() {
+   *     System.out.println("Hello world!");
+   *   }
+   *   public void sayHiDanish() {
+   *     System.out.println("Hej Verden!");
+   *   }
+   *   public void sayHiJapan() {
+   *     System.out.println("こんにちは世界!");
+   *   }
+   *   public void sayHiFrance() {
+   *     System.out.println("Bonjour le monde!");
+   *   }
+   * }
+   */
+  private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+    "ZGV4CjAzNQA87tn3VIDgMrF+Md2W4r58elaMPcSfk2CMBQAAcAAAAHhWNBIAAAAAAAAAAMgEAAAc" +
+    "AAAAcAAAAAkAAADgAAAAAgAAAAQBAAABAAAAHAEAAAgAAAAkAQAAAQAAAGQBAAAIBAAAhAEAAG4C" +
+    "AAB2AgAAiQIAAJYCAACkAgAAvgIAAM4CAADyAgAAEgMAACkDAAA9AwAAUQMAAGUDAAB0AwAAfwMA" +
+    "AIIDAACGAwAAkwMAAJkDAACeAwAApwMAAK4DAAC7AwAAyQMAANYDAADiAwAA6QMAAGEEAAAEAAAA" +
+    "BQAAAAYAAAAHAAAACAAAAAkAAAAKAAAACwAAAA4AAAAOAAAACAAAAAAAAAAPAAAACAAAAGgCAAAH" +
+    "AAQAEgAAAAAAAAAAAAAAAAAAABQAAAAAAAAAFQAAAAAAAAAWAAAAAAAAABcAAAAAAAAAGAAAAAQA" +
+    "AQATAAAABQAAAAAAAAAAAAAAEQAAAAUAAAAAAAAADAAAALgEAACIBAAAAAAAAAEAAQABAAAASAIA" +
+    "AAQAAABwEAcAAAAOAAEAAQABAAAATAIAAA0AAABuEAMAAABuEAIAAABuEAQAAABuEAUAAAAOAAAA" +
+    "AwABAAIAAABUAgAACAAAAGIAAAAaAQIAbiAGABAADgADAAEAAgAAAFkCAAAIAAAAYgAAABoBAwBu" +
+    "IAYAEAAOAAMAAQACAAAAXgIAAAgAAABiAAAAGgEBAG4gBgAQAA4AAwABAAIAAABjAgAACAAAAGIA" +
+    "AAAaARsAbiAGABAADgADAA4ABQAOPDw8PAAOAA54AAsADngAFAAOeAARAA54AAEAAAAGAAY8aW5p" +
+    "dD4AEUJvbmpvdXIgbGUgbW9uZGUhAAtIZWogVmVyZGVuIQAMSGVsbG8gd29ybGQhABhMYXJ0L1Rl" +
+    "c3QxOTk0JFRyYW5zZm9ybTsADkxhcnQvVGVzdDE5OTQ7ACJMZGFsdmlrL2Fubm90YXRpb24vRW5j" +
+    "bG9zaW5nQ2xhc3M7AB5MZGFsdmlrL2Fubm90YXRpb24vSW5uZXJDbGFzczsAFUxqYXZhL2lvL1By" +
+    "aW50U3RyZWFtOwASTGphdmEvbGFuZy9PYmplY3Q7ABJMamF2YS9sYW5nL1N0cmluZzsAEkxqYXZh" +
+    "L2xhbmcvU3lzdGVtOwANVGVzdDE5OTQuamF2YQAJVHJhbnNmb3JtAAFWAAJWTAALYWNjZXNzRmxh" +
+    "Z3MABG5hbWUAA291dAAHcHJpbnRsbgAFc2F5SGkAC3NheUhpRGFuaXNoAAxzYXlIaUVuZ2xpc2gA" +
+    "C3NheUhpRnJhbmNlAApzYXlIaUphcGFuAAV2YWx1ZQB2fn5EOHsiY29tcGlsYXRpb24tbW9kZSI6" +
+    "ImRlYnVnIiwibWluLWFwaSI6MSwic2hhLTEiOiJjZDkwMDIzOTMwZDk3M2Y1NzcxMWYxZDRmZGFh" +
+    "ZDdhM2U0NzE0NjM3IiwidmVyc2lvbiI6IjEuNy4xNC1kZXYifQAI44GT44KT44Gr44Gh44Gv5LiW" +
+    "55WMIQACAgEZGAECAwIQBBkRFw0AAAEFAIGABIQDAQGcAwEByAMBAegDAQGIBAEBqAQAAAAAAAAC" +
+    "AAAAeQQAAH8EAACsBAAAAAAAAAAAAAAAAAAAEAAAAAAAAAABAAAAAAAAAAEAAAAcAAAAcAAAAAIA" +
+    "AAAJAAAA4AAAAAMAAAACAAAABAEAAAQAAAABAAAAHAEAAAUAAAAIAAAAJAEAAAYAAAABAAAAZAEA" +
+    "AAEgAAAGAAAAhAEAAAMgAAAGAAAASAIAAAEQAAABAAAAaAIAAAIgAAAcAAAAbgIAAAQgAAACAAAA" +
+    "eQQAAAAgAAABAAAAiAQAAAMQAAACAAAAqAQAAAYgAAABAAAAuAQAAAAQAAABAAAAyAQAAA==");
+
+  public static void run() {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest(new Transform());
+  }
+
+  public static void doTest(Transform t) {
+    t.sayHi();
+    Redefinition.doCommonStructuralClassRedefinition(Transform.class, DEX_BYTES);
+    t.sayHi();
+  }
+}
diff --git a/test/1995-final-virtual-structural-multithread/expected.txt b/test/1995-final-virtual-structural-multithread/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/1995-final-virtual-structural-multithread/expected.txt
diff --git a/test/1995-final-virtual-structural-multithread/info.txt b/test/1995-final-virtual-structural-multithread/info.txt
new file mode 100644
index 0000000..f9b7bdd
--- /dev/null
+++ b/test/1995-final-virtual-structural-multithread/info.txt
@@ -0,0 +1,4 @@
+Tests structural redefinition with multiple threads.
+
+Tests that using the structural redefinition while concurrently using the class being redefined
+doesn't cause any unexpected problems.
diff --git a/test/1995-final-virtual-structural-multithread/run b/test/1995-final-virtual-structural-multithread/run
new file mode 100755
index 0000000..e912529
--- /dev/null
+++ b/test/1995-final-virtual-structural-multithread/run
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# TODO(b/144168550) This test uses access patterns that can be replaced by
+# iget-object-quick during dex2dex compilation. This breaks the test since the
+# -quick opcode encodes the exact byte offset of fields. Since this test changes
+# the offset this causes problems.
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/1995-final-virtual-structural-multithread/src/Main.java b/test/1995-final-virtual-structural-multithread/src/Main.java
new file mode 100644
index 0000000..f19358d
--- /dev/null
+++ b/test/1995-final-virtual-structural-multithread/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1995.run();
+  }
+}
diff --git a/test/1995-final-virtual-structural-multithread/src/art/Redefinition.java b/test/1995-final-virtual-structural-multithread/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1995-final-virtual-structural-multithread/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1995-final-virtual-structural-multithread/src/art/Test1995.java b/test/1995-final-virtual-structural-multithread/src/art/Test1995.java
new file mode 100644
index 0000000..7073494
--- /dev/null
+++ b/test/1995-final-virtual-structural-multithread/src/art/Test1995.java
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Field;
+import java.util.ArrayList;
+import java.util.Base64;
+import java.util.concurrent.CountDownLatch;
+public class Test1995 {
+  private static final int NUM_THREADS = 20;
+  // Don't perform more than this many repeats per thread to prevent OOMEs
+  private static final int TASK_COUNT_LIMIT = 1000;
+
+  public static final class Transform {
+    public String greetingEnglish;
+    public Transform() {
+      this.greetingEnglish = "Hello";
+    }
+    public String sayHi() {
+      return greetingEnglish + " from " + Thread.currentThread().getName();
+    }
+  }
+
+  /**
+   * base64 encoded class/dex file for
+   * public static final class Transform {
+   *   public String greetingEnglish;
+   *   public String greetingFrench;
+   *   public String greetingDanish;
+   *   public String greetingJapanese;
+   *
+   *   public Transform() {
+   *     this.greetingEnglish = "Hello World";
+   *     this.greetingFrench = "Bonjour le Monde";
+   *     this.greetingDanish = "Hej Verden";
+   *     this.greetingJapanese = "こんにちは世界";
+   *   }
+   *   public String sayHi() {
+   *     return sayHiEnglish() + ", " + sayHiFrench() + ", " + sayHiDanish() + ", " + sayHiJapanese() + " from " + Thread.currentThread().getName();
+   *   }
+   *   public String sayHiEnglish() {
+   *     return greetingEnglish;
+   *   }
+   *   public String sayHiDanish() {
+   *     return greetingDanish;
+   *   }
+   *   public String sayHiJapanese() {
+   *     return greetingJapanese;
+   *   }
+   *   public String sayHiFrench() {
+   *     return greetingFrench;
+   *   }
+   * }
+   */
+  private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+"ZGV4CjAzNQCsHrUqkb8cYgT2oYN7HlVbeOxJT/kONRvgBgAAcAAAAHhWNBIAAAAAAAAAABwGAAAl" +
+"AAAAcAAAAAkAAAAEAQAABAAAACgBAAAEAAAAWAEAAAwAAAB4AQAAAQAAANgBAADoBAAA+AEAAEoD" +
+"AABSAwAAVgMAAF4DAABwAwAAfAMAAIkDAACMAwAAkAMAAKoDAAC6AwAA3gMAAP4DAAASBAAAJgQA" +
+"AEEEAABVBAAAZAQAAG8EAAByBAAAfwQAAIcEAACWBAAAnwQAAK8EAADABAAA0AQAAOIEAADoBAAA" +
+"7wQAAPwEAAAKBQAAFwUAACYFAAAwBQAANwUAAK8FAAAIAAAACQAAAAoAAAALAAAADAAAAA0AAAAO" +
+"AAAADwAAABIAAAAGAAAABQAAAAAAAAAHAAAABgAAAEQDAAAGAAAABwAAAAAAAAASAAAACAAAAAAA" +
+"AAAAAAUAFwAAAAAABQAYAAAAAAAFABkAAAAAAAUAGgAAAAAAAwACAAAAAAAAABwAAAAAAAAAHQAA" +
+"AAAAAAAeAAAAAAAAAB8AAAAAAAAAIAAAAAQAAwACAAAABgADAAIAAAAGAAEAFAAAAAYAAAAhAAAA" +
+"BwACABUAAAAHAAAAFgAAAAAAAAARAAAABAAAAAAAAAAQAAAADAYAANUFAAAAAAAABwABAAIAAAAt" +
+"AwAAQQAAAG4QAwAGAAwAbhAEAAYADAFuEAIABgAMAm4QBQAGAAwDcQAKAAAADARuEAsABAAMBCIF" +
+"BgBwEAcABQBuIAgABQAaAAEAbiAIAAUAbiAIABUAbiAIAAUAbiAIACUAbiAIAAUAbiAIADUAGgAA" +
+"AG4gCAAFAG4gCABFAG4QCQAFAAwAEQAAAAIAAQAAAAAAMQMAAAMAAABUEAAAEQAAAAIAAQAAAAAA" +
+"NQMAAAMAAABUEAEAEQAAAAIAAQAAAAAAOQMAAAMAAABUEAIAEQAAAAIAAQAAAAAAPQMAAAMAAABU" +
+"EAMAEQAAAAIAAQABAAAAJAMAABQAAABwEAYAAQAaAAUAWxABABoAAwBbEAIAGgAEAFsQAAAaACQA" +
+"WxADAA4ACQAOPEtLS0sAEAAOABYADgATAA4AHAAOABkADgAAAAABAAAABQAGIGZyb20gAAIsIAAG" +
+"PGluaXQ+ABBCb25qb3VyIGxlIE1vbmRlAApIZWogVmVyZGVuAAtIZWxsbyBXb3JsZAABTAACTEwA" +
+"GExhcnQvVGVzdDE5OTUkVHJhbnNmb3JtOwAOTGFydC9UZXN0MTk5NTsAIkxkYWx2aWsvYW5ub3Rh" +
+"dGlvbi9FbmNsb3NpbmdDbGFzczsAHkxkYWx2aWsvYW5ub3RhdGlvbi9Jbm5lckNsYXNzOwASTGph" +
+"dmEvbGFuZy9PYmplY3Q7ABJMamF2YS9sYW5nL1N0cmluZzsAGUxqYXZhL2xhbmcvU3RyaW5nQnVp" +
+"bGRlcjsAEkxqYXZhL2xhbmcvVGhyZWFkOwANVGVzdDE5OTUuamF2YQAJVHJhbnNmb3JtAAFWAAth" +
+"Y2Nlc3NGbGFncwAGYXBwZW5kAA1jdXJyZW50VGhyZWFkAAdnZXROYW1lAA5ncmVldGluZ0Rhbmlz" +
+"aAAPZ3JlZXRpbmdFbmdsaXNoAA5ncmVldGluZ0ZyZW5jaAAQZ3JlZXRpbmdKYXBhbmVzZQAEbmFt" +
+"ZQAFc2F5SGkAC3NheUhpRGFuaXNoAAxzYXlIaUVuZ2xpc2gAC3NheUhpRnJlbmNoAA1zYXlIaUph" +
+"cGFuZXNlAAh0b1N0cmluZwAFdmFsdWUAdn5+RDh7ImNvbXBpbGF0aW9uLW1vZGUiOiJkZWJ1ZyIs" +
+"Im1pbi1hcGkiOjEsInNoYS0xIjoiNjBkYTRkNjdiMzgxYzQyNDY3NzU3YzQ5ZmI2ZTU1NzU2ZDg4" +
+"YTJmMyIsInZlcnNpb24iOiIxLjcuMTItZGV2In0AB+OBk+OCk+OBq+OBoeOBr+S4lueVjAACAgEi" +
+"GAECAwITBBkbFxEABAEFAAEBAQEBAQEAgYAE7AUBAfgDAQGMBQEBpAUBAbwFAQHUBQAAAAAAAgAA" +
+"AMYFAADMBQAAAAYAAAAAAAAAAAAAAAAAABAAAAAAAAAAAQAAAAAAAAABAAAAJQAAAHAAAAACAAAA" +
+"CQAAAAQBAAADAAAABAAAACgBAAAEAAAABAAAAFgBAAAFAAAADAAAAHgBAAAGAAAAAQAAANgBAAAB" +
+"IAAABgAAAPgBAAADIAAABgAAACQDAAABEAAAAQAAAEQDAAACIAAAJQAAAEoDAAAEIAAAAgAAAMYF" +
+"AAAAIAAAAQAAANUFAAADEAAAAgAAAPwFAAAGIAAAAQAAAAwGAAAAEAAAAQAAABwGAAA=");
+
+
+  public static void run() throws Exception {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest();
+  }
+
+  public static final class MyThread extends Thread {
+    public MyThread(CountDownLatch delay, int id) {
+      super("Thread: " + id);
+      this.thr_id = id;
+      this.results = new ArrayList<>(TASK_COUNT_LIMIT);
+      this.finish = false;
+      this.delay = delay;
+    }
+
+    public void run() {
+      delay.countDown();
+      while (!finish && results.size() < TASK_COUNT_LIMIT) {
+        Transform t = new Transform();
+        results.add(t.sayHi());
+      }
+    }
+
+    public void finish() throws Exception {
+      finish = true;
+      this.join();
+    }
+
+    public void Check() throws Exception {
+      for (String s : results) {
+        if (!s.equals("Hello from " + getName()) &&
+            !s.equals("Hello, null, null, null from " + getName()) &&
+            !s.equals("Hello World, Bonjour le Monde, Hej Verden, こんにちは世界 from " + getName())) {
+          System.out.println("FAIL " + thr_id + ": Unexpected result: " + s);
+        }
+      }
+    }
+
+    public ArrayList<String> results;
+    public volatile boolean finish;
+    public int thr_id;
+    public CountDownLatch delay;
+  }
+
+  public static MyThread[] startThreads(int num_threads) throws Exception {
+    CountDownLatch cdl = new CountDownLatch(num_threads);
+    MyThread[] res = new MyThread[num_threads];
+    for (int i = 0; i < num_threads; i++) {
+      res[i] = new MyThread(cdl, i);
+      res[i].start();
+    }
+    cdl.await();
+    return res;
+  }
+  public static void finishThreads(MyThread[] thrs) throws Exception {
+    for (MyThread t : thrs) {
+      t.finish();
+    }
+    for (MyThread t : thrs) {
+      t.Check();
+    }
+  }
+
+  public static void doTest() throws Exception {
+    MyThread[] threads = startThreads(NUM_THREADS);
+    Redefinition.doCommonStructuralClassRedefinition(Transform.class, DEX_BYTES);
+    finishThreads(threads);
+  }
+}
diff --git a/test/1996-final-override-virtual-structural/expected.txt b/test/1996-final-override-virtual-structural/expected.txt
new file mode 100644
index 0000000..20cd98f
--- /dev/null
+++ b/test/1996-final-override-virtual-structural/expected.txt
@@ -0,0 +1,6 @@
+Not doing anything
+super: Hi this: Hi
+Redefining calling class
+super: Hi this: SALUTATIONS
+Not doing anything
+super: Hi and then this: SALUTATIONS
diff --git a/test/1996-final-override-virtual-structural/info.txt b/test/1996-final-override-virtual-structural/info.txt
new file mode 100644
index 0000000..55adf7c
--- /dev/null
+++ b/test/1996-final-override-virtual-structural/info.txt
@@ -0,0 +1,3 @@
+Tests basic functions in the jvmti plugin.
+
+Tests that using the structural redefinition allows one to override a superclass method.
diff --git a/test/1996-final-override-virtual-structural/run b/test/1996-final-override-virtual-structural/run
new file mode 100755
index 0000000..03e41a5
--- /dev/null
+++ b/test/1996-final-override-virtual-structural/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/1996-final-override-virtual-structural/src/Main.java b/test/1996-final-override-virtual-structural/src/Main.java
new file mode 100644
index 0000000..ade69cf
--- /dev/null
+++ b/test/1996-final-override-virtual-structural/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1996.run();
+  }
+}
diff --git a/test/1996-final-override-virtual-structural/src/art/Redefinition.java b/test/1996-final-override-virtual-structural/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1996-final-override-virtual-structural/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1996-final-override-virtual-structural/src/art/Test1996.java b/test/1996-final-override-virtual-structural/src/art/Test1996.java
new file mode 100644
index 0000000..c2b1125
--- /dev/null
+++ b/test/1996-final-override-virtual-structural/src/art/Test1996.java
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Base64;
+public class Test1996 {
+
+  public static class SuperTransform {
+    public String hiValue = "Hi";
+    public String sayHi() {
+      return this.hiValue;
+    }
+  }
+  public static final class Transform extends SuperTransform {
+    public void PostTransform() { }
+    public String sayHiTwice(Runnable run) {
+      run.run();
+      return "super: " + super.sayHi() + " this: " + sayHi();
+    }
+  }
+
+  /**
+   * base64 encoded class/dex file for
+   * public static final class Transform extends SuperTransform {
+   *   public String myGreeting;
+   *   public void PostTransform() {
+   *     myGreeting = "SALUTATIONS";
+   *   }
+   *   public String sayHiTwice(Runnable run) {
+   *     run.run();
+   *     return "super: " + super.sayHi() + " and then this: " + sayHi();
+   *   }
+   *   public String sayHi() {
+   *     return myGreeting;
+   *   }
+   * }
+   */
+  private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+"ZGV4CjAzNQAO4Dwurw97RcUtfH7np7S5RR8gsJYOfmeABQAAcAAAAHhWNBIAAAAAAAAAALwEAAAc" +
+"AAAAcAAAAAkAAADgAAAABAAAAAQBAAABAAAANAEAAAoAAAA8AQAAAQAAAIwBAADUAwAArAEAAHYC" +
+"AACIAgAAkAIAAJMCAACXAgAAtgIAANACAADgAgAABAMAACQDAAA6AwAATgMAAGkDAAB4AwAAhQMA" +
+"AJQDAACfAwAAogMAAK8DAAC3AwAAwwMAAMkDAADOAwAA1QMAAOEDAADqAwAA9AMAAPsDAAAEAAAA" +
+"BQAAAAYAAAAHAAAACAAAAAkAAAAKAAAACwAAABAAAAACAAAABgAAAAAAAAADAAAABgAAAGgCAAAD" +
+"AAAABwAAAHACAAAQAAAACAAAAAAAAAABAAYAEwAAAAAAAwABAAAAAAAAABYAAAABAAMAAQAAAAEA" +
+"AwAMAAAAAQAAABYAAAABAAEAFwAAAAUAAwAVAAAABwADAAEAAAAHAAIAEgAAAAcAAAAZAAAAAQAA" +
+"ABEAAAAAAAAAAAAAAA4AAACsBAAAggQAAAAAAAACAAEAAAAAAFsCAAADAAAAVBAAABEAAAAFAAIA" +
+"AgAAAF8CAAAlAAAAchAGAAQAbxABAAMADARuEAQAAwAMACIBBwBwEAcAAQAaAhgAbiAIACEAbiAI" +
+"AEEAGgQAAG4gCABBAG4gCAABAG4QCQABAAwEEQQAAAEAAQABAAAAUgIAAAQAAABwEAAAAAAOAAIA" +
+"AQAAAAAAVgIAAAUAAAAaAA0AWxAAAA4ACgAOAA0ADksAFAAOABABAA48AAAAAAEAAAAFAAAAAQAA" +
+"AAYAECBhbmQgdGhlbiB0aGlzOiAABjxpbml0PgABTAACTEwAHUxhcnQvVGVzdDE5OTYkU3VwZXJU" +
+"cmFuc2Zvcm07ABhMYXJ0L1Rlc3QxOTk2JFRyYW5zZm9ybTsADkxhcnQvVGVzdDE5OTY7ACJMZGFs" +
+"dmlrL2Fubm90YXRpb24vRW5jbG9zaW5nQ2xhc3M7AB5MZGFsdmlrL2Fubm90YXRpb24vSW5uZXJD" +
+"bGFzczsAFExqYXZhL2xhbmcvUnVubmFibGU7ABJMamF2YS9sYW5nL1N0cmluZzsAGUxqYXZhL2xh" +
+"bmcvU3RyaW5nQnVpbGRlcjsADVBvc3RUcmFuc2Zvcm0AC1NBTFVUQVRJT05TAA1UZXN0MTk5Ni5q" +
+"YXZhAAlUcmFuc2Zvcm0AAVYAC2FjY2Vzc0ZsYWdzAAZhcHBlbmQACm15R3JlZXRpbmcABG5hbWUA" +
+"A3J1bgAFc2F5SGkACnNheUhpVHdpY2UAB3N1cGVyOiAACHRvU3RyaW5nAAV2YWx1ZQB2fn5EOHsi" +
+"Y29tcGlsYXRpb24tbW9kZSI6ImRlYnVnIiwibWluLWFwaSI6MSwic2hhLTEiOiI2MGRhNGQ2N2Iz" +
+"ODFjNDI0Njc3NTdjNDlmYjZlNTU3NTZkODhhMmYzIiwidmVyc2lvbiI6IjEuNy4xMi1kZXYifQAC" +
+"AwEaGAICBAIRBBkUFw8AAQEDAAECgYAEoAQDAbgEAQGsAwEBxAMAAAAAAAACAAAAcwQAAHkEAACg" +
+"BAAAAAAAAAAAAAAAAAAAEAAAAAAAAAABAAAAAAAAAAEAAAAcAAAAcAAAAAIAAAAJAAAA4AAAAAMA" +
+"AAAEAAAABAEAAAQAAAABAAAANAEAAAUAAAAKAAAAPAEAAAYAAAABAAAAjAEAAAEgAAAEAAAArAEA" +
+"AAMgAAAEAAAAUgIAAAEQAAACAAAAaAIAAAIgAAAcAAAAdgIAAAQgAAACAAAAcwQAAAAgAAABAAAA" +
+"ggQAAAMQAAACAAAAnAQAAAYgAAABAAAArAQAAAAQAAABAAAAvAQAAA==");
+
+  public static void run() {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest(new Transform());
+  }
+
+  public static void doTest(final Transform t) {
+    System.out.println(t.sayHiTwice(() -> { System.out.println("Not doing anything"); }));
+    System.out.println(t.sayHiTwice(
+      () -> {
+        System.out.println("Redefining calling class");
+        Redefinition.doCommonStructuralClassRedefinition(Transform.class, DEX_BYTES);
+        t.PostTransform();
+      }));
+    System.out.println(t.sayHiTwice(() -> { System.out.println("Not doing anything"); }));
+  }
+}
diff --git a/test/1997-structural-shadow-method/expected.txt b/test/1997-structural-shadow-method/expected.txt
new file mode 100644
index 0000000..3a8b8de
--- /dev/null
+++ b/test/1997-structural-shadow-method/expected.txt
@@ -0,0 +1,6 @@
+Hello!
+Hello!
+Hello!
+Hello World!
+Hello World!
+Hello World!
diff --git a/test/1997-structural-shadow-method/info.txt b/test/1997-structural-shadow-method/info.txt
new file mode 100644
index 0000000..71e3bfc
--- /dev/null
+++ b/test/1997-structural-shadow-method/info.txt
@@ -0,0 +1 @@
+Test structural redefinition when the method being added was resolvable previously.
diff --git a/test/1997-structural-shadow-method/run b/test/1997-structural-shadow-method/run
new file mode 100755
index 0000000..03e41a5
--- /dev/null
+++ b/test/1997-structural-shadow-method/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/1997-structural-shadow-method/src/Main.java b/test/1997-structural-shadow-method/src/Main.java
new file mode 100644
index 0000000..3c9bc85
--- /dev/null
+++ b/test/1997-structural-shadow-method/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1997.run();
+  }
+}
diff --git a/test/1997-structural-shadow-method/src/art/Redefinition.java b/test/1997-structural-shadow-method/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1997-structural-shadow-method/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1997-structural-shadow-method/src/art/Test1997.java b/test/1997-structural-shadow-method/src/art/Test1997.java
new file mode 100644
index 0000000..7309a31
--- /dev/null
+++ b/test/1997-structural-shadow-method/src/art/Test1997.java
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Base64;
+
+public class Test1997 {
+
+  public static class SuperTransform {
+    // We will be shadowing this function.
+    public static void sayHi() {
+      System.out.println("Hello!");
+    }
+  }
+
+  // The class we will be transforming.
+  public static class Transform extends SuperTransform {
+    public static void sayHiTwice() {
+      Transform.sayHi();
+      Transform.sayHi();
+    }
+  }
+
+  // public static class Transform extends SuperTransform {
+  //   public static void sayHiTwice() {
+  //     Transform.sayHi();
+  //     Transform.sayHi();
+  //   }
+  //   public static void sayHi() {
+  //     System.out.println("Hello World!");
+  //   }
+  // }
+  private static final byte[] DEX_BYTES =
+      Base64.getDecoder()
+          .decode(
+              "ZGV4CjAzNQA9wdy7Lgbrv+sD+wixborREr0maZCK5yqABAAAcAAAAHhWNBIAAAAAAAAAALwDAAAW"
+                  + "AAAAcAAAAAkAAADIAAAAAgAAAOwAAAABAAAABAEAAAUAAAAMAQAAAQAAADQBAAAsAwAAVAEAAMIB"
+                  + "AADKAQAA2AEAAPcBAAARAgAAIQIAAEUCAABlAgAAfAIAAJACAACkAgAAswIAAL4CAADBAgAAxQIA"
+                  + "ANICAADYAgAA3QIAAOYCAADtAgAA+QIAAAADAAACAAAAAwAAAAQAAAAFAAAABgAAAAcAAAAIAAAA"
+                  + "CQAAAAwAAAAMAAAACAAAAAAAAAANAAAACAAAALwBAAAHAAUAEAAAAAAAAAAAAAAAAQAAAAAAAAAB"
+                  + "AAAAEgAAAAEAAAATAAAABQABABEAAAABAAAAAQAAAAAAAAAAAAAACgAAAKwDAACHAwAAAAAAAAEA"
+                  + "AQABAAAAqgEAAAQAAABwEAAAAAAOAAIAAAACAAAArgEAAAgAAABiAAAAGgEBAG4gBAAQAA4AAAAA"
+                  + "AAAAAACzAQAABwAAAHEAAgAAAHEAAgAAAA4ADwAOABUADngAEQAOPDwAAAAAAQAAAAYABjxpbml0"
+                  + "PgAMSGVsbG8gV29ybGQhAB1MYXJ0L1Rlc3QxOTk3JFN1cGVyVHJhbnNmb3JtOwAYTGFydC9UZXN0"
+                  + "MTk5NyRUcmFuc2Zvcm07AA5MYXJ0L1Rlc3QxOTk3OwAiTGRhbHZpay9hbm5vdGF0aW9uL0VuY2xv"
+                  + "c2luZ0NsYXNzOwAeTGRhbHZpay9hbm5vdGF0aW9uL0lubmVyQ2xhc3M7ABVMamF2YS9pby9Qcmlu"
+                  + "dFN0cmVhbTsAEkxqYXZhL2xhbmcvU3RyaW5nOwASTGphdmEvbGFuZy9TeXN0ZW07AA1UZXN0MTk5"
+                  + "Ny5qYXZhAAlUcmFuc2Zvcm0AAVYAAlZMAAthY2Nlc3NGbGFncwAEbmFtZQADb3V0AAdwcmludGxu"
+                  + "AAVzYXlIaQAKc2F5SGlUd2ljZQAFdmFsdWUAdn5+RDh7ImNvbXBpbGF0aW9uLW1vZGUiOiJkZWJ1"
+                  + "ZyIsIm1pbi1hcGkiOjEsInNoYS0xIjoiNjBkYTRkNjdiMzgxYzQyNDY3NzU3YzQ5ZmI2ZTU1NzU2"
+                  + "ZDg4YTJmMyIsInZlcnNpb24iOiIxLjcuMTItZGV2In0AAgMBFBgCAgQCDgQJDxcLAAADAAGBgATU"
+                  + "AgEJ7AIBCYwDAAAAAAAAAAIAAAB4AwAAfgMAAKADAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAEAAAAA"
+                  + "AAAAAQAAABYAAABwAAAAAgAAAAkAAADIAAAAAwAAAAIAAADsAAAABAAAAAEAAAAEAQAABQAAAAUA"
+                  + "AAAMAQAABgAAAAEAAAA0AQAAASAAAAMAAABUAQAAAyAAAAMAAACqAQAAARAAAAEAAAC8AQAAAiAA"
+                  + "ABYAAADCAQAABCAAAAIAAAB4AwAAACAAAAEAAACHAwAAAxAAAAIAAACcAwAABiAAAAEAAACsAwAA"
+                  + "ABAAAAEAAAC8AwAA");
+
+  public static void run() throws Exception {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest();
+  }
+
+  public static void doTest() throws Exception {
+    Transform.sayHiTwice();
+    Transform.sayHi();
+    Redefinition.doCommonStructuralClassRedefinition(Transform.class, DEX_BYTES);
+    Transform.sayHiTwice();
+    Transform.sayHi();
+  }
+}
diff --git a/test/1998-structural-shadow-field/expected.txt b/test/1998-structural-shadow-field/expected.txt
new file mode 100644
index 0000000..9ae530e
--- /dev/null
+++ b/test/1998-structural-shadow-field/expected.txt
@@ -0,0 +1,4 @@
+Hello
+Hello
+null
+Hello
diff --git a/test/1998-structural-shadow-field/info.txt b/test/1998-structural-shadow-field/info.txt
new file mode 100644
index 0000000..71e3bfc
--- /dev/null
+++ b/test/1998-structural-shadow-field/info.txt
@@ -0,0 +1 @@
+Test structural redefinition when the method being added was resolvable previously.
diff --git a/test/1998-structural-shadow-field/run b/test/1998-structural-shadow-field/run
new file mode 100755
index 0000000..03e41a5
--- /dev/null
+++ b/test/1998-structural-shadow-field/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/1998-structural-shadow-field/src/Main.java b/test/1998-structural-shadow-field/src/Main.java
new file mode 100644
index 0000000..f6aeca5
--- /dev/null
+++ b/test/1998-structural-shadow-field/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1998.run();
+  }
+}
diff --git a/test/1998-structural-shadow-field/src/art/Redefinition.java b/test/1998-structural-shadow-field/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1998-structural-shadow-field/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1998-structural-shadow-field/src/art/Test1998.java b/test/1998-structural-shadow-field/src/art/Test1998.java
new file mode 100644
index 0000000..3fda936
--- /dev/null
+++ b/test/1998-structural-shadow-field/src/art/Test1998.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Base64;
+
+public class Test1998 {
+
+  public static class SuperTransform {
+    public static String greeting = "Hello";
+  }
+
+  // The class we will be transforming.
+  public static class Transform extends SuperTransform { }
+
+  // public static class Transform extends SuperTransform {
+  //   public static String greeting;
+  // }
+  private static final byte[] DEX_BYTES =
+      Base64.getDecoder()
+          .decode(
+"ZGV4CjAzNQCYmnoWz4BqygrZQM4zf/mJ/25+dM86MHKAAwAAcAAAAHhWNBIAAAAAAAAAAMgCAAAP" +
+"AAAAcAAAAAcAAACsAAAAAQAAAMgAAAABAAAA1AAAAAIAAADcAAAAAQAAAOwAAAB0AgAADAEAACgB" +
+"AAAwAQAATwEAAGkBAAB5AQAAnQEAAL0BAADRAQAA4AEAAOsBAADuAQAA+wEAAAUCAAALAgAAEgIA" +
+"AAEAAAACAAAAAwAAAAQAAAAFAAAABgAAAAkAAAAJAAAABgAAAAAAAAABAAUACwAAAAAAAAAAAAAA" +
+"AQAAAAAAAAABAAAAAQAAAAAAAAAAAAAABwAAALgCAACZAgAAAAAAAAEAAQABAAAAJAEAAAQAAABw" +
+"EAAAAAAOAAUADgAGPGluaXQ+AB1MYXJ0L1Rlc3QxOTk4JFN1cGVyVHJhbnNmb3JtOwAYTGFydC9U" +
+"ZXN0MTk5OCRUcmFuc2Zvcm07AA5MYXJ0L1Rlc3QxOTk4OwAiTGRhbHZpay9hbm5vdGF0aW9uL0Vu" +
+"Y2xvc2luZ0NsYXNzOwAeTGRhbHZpay9hbm5vdGF0aW9uL0lubmVyQ2xhc3M7ABJMamF2YS9sYW5n" +
+"L1N0cmluZzsADVRlc3QxOTk4LmphdmEACVRyYW5zZm9ybQABVgALYWNjZXNzRmxhZ3MACGdyZWV0" +
+"aW5nAARuYW1lAAV2YWx1ZQB2fn5EOHsiY29tcGlsYXRpb24tbW9kZSI6ImRlYnVnIiwibWluLWFw" +
+"aSI6MSwic2hhLTEiOiI2MGRhNGQ2N2IzODFjNDI0Njc3NTdjNDlmYjZlNTU3NTZkODhhMmYzIiwi" +
+"dmVyc2lvbiI6IjEuNy4xMi1kZXYifQACAwENGAICBAIKBAkMFwgBAAEAAAkBgYAEjAIAAAAAAAAA" +
+"AgAAAIoCAACQAgAArAIAAAAAAAAAAAAAAAAAAA8AAAAAAAAAAQAAAAAAAAABAAAADwAAAHAAAAAC" +
+"AAAABwAAAKwAAAADAAAAAQAAAMgAAAAEAAAAAQAAANQAAAAFAAAAAgAAANwAAAAGAAAAAQAAAOwA" +
+"AAABIAAAAQAAAAwBAAADIAAAAQAAACQBAAACIAAADwAAACgBAAAEIAAAAgAAAIoCAAAAIAAAAQAA" +
+"AJkCAAADEAAAAgAAAKgCAAAGIAAAAQAAALgCAAAAEAAAAQAAAMgCAAA=");
+
+  public static void run() throws Exception {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest();
+  }
+
+  public static void doTest() throws Exception {
+    System.out.println(Transform.greeting);
+    System.out.println(SuperTransform.greeting);
+    Redefinition.doCommonStructuralClassRedefinition(Transform.class, DEX_BYTES);
+    System.out.println(Transform.greeting);
+    System.out.println(SuperTransform.greeting);
+  }
+}
diff --git a/test/1999-virtual-structural/expected.txt b/test/1999-virtual-structural/expected.txt
new file mode 100644
index 0000000..335b5d7
--- /dev/null
+++ b/test/1999-virtual-structural/expected.txt
@@ -0,0 +1,4 @@
+Hi(SubTransform called 1 times)
+Hi(SubTransform called 2 times)
+Hi(SubTransform called 3 times)
+Hello (Transform called 1 times)(SubTransform called 4 times)
diff --git a/test/1999-virtual-structural/info.txt b/test/1999-virtual-structural/info.txt
new file mode 100644
index 0000000..606c984
--- /dev/null
+++ b/test/1999-virtual-structural/info.txt
@@ -0,0 +1,3 @@
+Tests basic functions in the jvmti plugin.
+
+Tests that using the structural redefinition can add new virtual methods and fields.
diff --git a/test/1999-virtual-structural/run b/test/1999-virtual-structural/run
new file mode 100755
index 0000000..03e41a5
--- /dev/null
+++ b/test/1999-virtual-structural/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/1999-virtual-structural/src/Main.java b/test/1999-virtual-structural/src/Main.java
new file mode 100644
index 0000000..86a492b
--- /dev/null
+++ b/test/1999-virtual-structural/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test1999.run();
+  }
+}
diff --git a/test/1999-virtual-structural/src/art/Redefinition.java b/test/1999-virtual-structural/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/1999-virtual-structural/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/1999-virtual-structural/src/art/Test1999.java b/test/1999-virtual-structural/src/art/Test1999.java
new file mode 100644
index 0000000..f6811a9
--- /dev/null
+++ b/test/1999-virtual-structural/src/art/Test1999.java
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Base64;
+public class Test1999 {
+
+  public static class Transform {
+    public String getGreeting() {
+      return "Hi";
+    }
+  }
+
+  public static class SubTransform extends Transform {
+    private int count = 0;
+    public void sayHi() {
+      System.out.println(getGreeting() + "(SubTransform called " + (++count) + " times)");
+    }
+  }
+
+  /**
+   * base64 encoded class/dex file for
+   * public static class Transform {
+   *   private int count;
+   *   public String getGreeting() {
+   *     return "Hello (Transform called " + incrCount() + " times)";
+   *   }
+   *   protected int incrCount() {
+   *     return ++count;
+   *   }
+   * }
+   */
+  private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+"ZGV4CjAzNQAwwbMpPdPdWkU+6UJnvqa7v4VBdcuq2vkMBQAAcAAAAHhWNBIAAAAAAAAAAEgEAAAa" +
+"AAAAcAAAAAkAAADYAAAABQAAAPwAAAABAAAAOAEAAAgAAABAAQAAAQAAAIABAABsAwAAoAEAADoC" +
+"AABDAgAASwIAAGUCAABoAgAAawIAAG8CAABzAgAAjQIAAJ0CAADBAgAA4QIAAPUCAAAJAwAAJAMA" +
+"ADMDAAA+AwAAQQMAAE4DAABWAwAAXQMAAGoDAAB1AwAAewMAAIUDAACMAwAAAwAAAAcAAAAIAAAA" +
+"CQAAAAoAAAALAAAADAAAAA0AAAAQAAAAAwAAAAAAAAAAAAAABAAAAAYAAAAAAAAABQAAAAcAAAAs" +
+"AgAABgAAAAcAAAA0AgAAEAAAAAgAAAAAAAAAAQAAABMAAAABAAQAAQAAAAEAAQAUAAAAAQAAABUA" +
+"AAAFAAQAAQAAAAcABAABAAAABwACABIAAAAHAAMAEgAAAAcAAQAXAAAAAQAAAAEAAAAFAAAAAAAA" +
+"AA4AAAA4BAAAEwQAAAAAAAACAAEAAAAAACgCAAAHAAAAUhAAANgAAAFZEAAADwAAAAQAAQACAAAA" +
+"JAIAABsAAABuEAIAAwAKACIBBwBwEAQAAQAaAgIAbiAGACEAbiAFAAEAGgAAAG4gBgABAG4QBwAB" +
+"AAwAEQAAAAEAAQABAAAAIAIAAAQAAABwEAMAAAAOAAMADgAGAA4ACQAOAAEAAAAAAAAAAQAAAAYA" +
+"ByB0aW1lcykABjxpbml0PgAYSGVsbG8gKFRyYW5zZm9ybSBjYWxsZWQgAAFJAAFMAAJMSQACTEwA" +
+"GExhcnQvVGVzdDE5OTkkVHJhbnNmb3JtOwAOTGFydC9UZXN0MTk5OTsAIkxkYWx2aWsvYW5ub3Rh" +
+"dGlvbi9FbmNsb3NpbmdDbGFzczsAHkxkYWx2aWsvYW5ub3RhdGlvbi9Jbm5lckNsYXNzOwASTGph" +
+"dmEvbGFuZy9PYmplY3Q7ABJMamF2YS9sYW5nL1N0cmluZzsAGUxqYXZhL2xhbmcvU3RyaW5nQnVp" +
+"bGRlcjsADVRlc3QxOTk5LmphdmEACVRyYW5zZm9ybQABVgALYWNjZXNzRmxhZ3MABmFwcGVuZAAF" +
+"Y291bnQAC2dldEdyZWV0aW5nAAlpbmNyQ291bnQABG5hbWUACHRvU3RyaW5nAAV2YWx1ZQB2fn5E" +
+"OHsiY29tcGlsYXRpb24tbW9kZSI6ImRlYnVnIiwibWluLWFwaSI6MSwic2hhLTEiOiI2MGRhNGQ2" +
+"N2IzODFjNDI0Njc3NTdjNDlmYjZlNTU3NTZkODhhMmYzIiwidmVyc2lvbiI6IjEuNy4xMi1kZXYi" +
+"fQACAwEYGAICBAIRBAkWFw8AAQECAAIAgYAEiAQBAcADAQSgAwAAAAAAAgAAAAQEAAAKBAAALAQA" +
+"AAAAAAAAAAAAAAAAABAAAAAAAAAAAQAAAAAAAAABAAAAGgAAAHAAAAACAAAACQAAANgAAAADAAAA" +
+"BQAAAPwAAAAEAAAAAQAAADgBAAAFAAAACAAAAEABAAAGAAAAAQAAAIABAAABIAAAAwAAAKABAAAD" +
+"IAAAAwAAACACAAABEAAAAgAAACwCAAACIAAAGgAAADoCAAAEIAAAAgAAAAQEAAAAIAAAAQAAABME" +
+"AAADEAAAAgAAACgEAAAGIAAAAQAAADgEAAAAEAAAAQAAAEgEAAA=");
+
+
+  public static void run() {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest(new SubTransform());
+  }
+
+  public static void doTest(SubTransform t) {
+    t.sayHi();
+    t.sayHi();
+    t.sayHi();
+    Redefinition.doCommonStructuralClassRedefinition(Transform.class, DEX_BYTES);
+    t.sayHi();
+  }
+}
diff --git a/test/2000-virtual-list-structural/AbstractCollection.patch b/test/2000-virtual-list-structural/AbstractCollection.patch
new file mode 100644
index 0000000..7507c7d
--- /dev/null
+++ b/test/2000-virtual-list-structural/AbstractCollection.patch
@@ -0,0 +1,16 @@
+--- ../../../libcore/ojluni/src/main/java/java/util/AbstractCollection.java	2019-05-31 10:36:26.634361294 -0700
++++ src-ex/java/util/AbstractCollection.java	2019-11-18 13:04:48.253575013 -0800
+@@ -63,7 +63,13 @@
+      * Sole constructor.  (For invocation by subclass constructors, typically
+      * implicit.)
+      */
++    public static volatile int TOTAL_COUNT;
++    public int cnt;
++
+     protected AbstractCollection() {
++      synchronized (Collection.class) {
++        cnt = ++TOTAL_COUNT;
++      }
+     }
+ 
+     // Query Operations
diff --git a/test/2000-virtual-list-structural/build b/test/2000-virtual-list-structural/build
new file mode 100755
index 0000000..87d6acc
--- /dev/null
+++ b/test/2000-virtual-list-structural/build
@@ -0,0 +1,31 @@
+#!/bin/bash
+#
+# Copyright 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Stop on failure.
+set -e
+
+# Deref the symlink.
+mv src-ex/java/util/AbstractCollection.java src-ex/java/util/AbstractCollection.bak
+cp src-ex/java/util/AbstractCollection.bak src-ex/java/util/AbstractCollection.java
+
+# Patch the copied version.
+patch src-ex/java/util/AbstractCollection.java AbstractCollection.patch
+
+DESUGAR=false ./default-build "$@"
+
+# restore the symlink
+rm src-ex/java/util/AbstractCollection.java
+mv src-ex/java/util/AbstractCollection.bak src-ex/java/util/AbstractCollection.java
diff --git a/test/2000-virtual-list-structural/expected.txt b/test/2000-virtual-list-structural/expected.txt
new file mode 100644
index 0000000..9d3e1b6
--- /dev/null
+++ b/test/2000-virtual-list-structural/expected.txt
@@ -0,0 +1,5 @@
+List is: [a, b, c, d]
+List is: [1, 2, 3, 4]
+List is: [1, 2, 3, 4, xyz: 0, xyz: 1, xyz: 2, xyz: 3, xyz: 4, xyz: 5, xyz: 6, xyz: 7, xyz: 8, xyz: 9, xyz: 10, xyz: 11, xyz: 12, xyz: 13, xyz: 14, xyz: 15, xyz: 16, xyz: 17, xyz: 18, xyz: 19, xyz: 20, xyz: 21, xyz: 22, xyz: 23, xyz: 24, xyz: 25, xyz: 26, xyz: 27, xyz: 28, xyz: 29, xyz: 30, xyz: 31, xyz: 32, xyz: 33, xyz: 34, xyz: 35, xyz: 36, xyz: 37, xyz: 38, xyz: 39, xyz: 40, xyz: 41, xyz: 42, xyz: 43, xyz: 44, xyz: 45, xyz: 46, xyz: 47, xyz: 48, xyz: 49, xyz: 50, xyz: 51, xyz: 52, xyz: 53, xyz: 54, xyz: 55, xyz: 56, xyz: 57, xyz: 58, xyz: 59, xyz: 60, xyz: 61, xyz: 62, xyz: 63, xyz: 64, xyz: 65, xyz: 66, xyz: 67, xyz: 68, xyz: 69, xyz: 70, xyz: 71, xyz: 72, xyz: 73, xyz: 74, xyz: 75, xyz: 76, xyz: 77, xyz: 78, xyz: 79, xyz: 80, xyz: 81, xyz: 82, xyz: 83, xyz: 84, xyz: 85, xyz: 86, xyz: 87, xyz: 88, xyz: 89, xyz: 90, xyz: 91, xyz: 92, xyz: 93, xyz: 94, xyz: 95, xyz: 96, xyz: 97, xyz: 98, xyz: 99, xyz: 100, xyz: 101, xyz: 102, xyz: 103, xyz: 104, xyz: 105, xyz: 106, xyz: 107, xyz: 108, xyz: 109, xyz: 110, xyz: 111, xyz: 112, xyz: 113, xyz: 114, xyz: 115, xyz: 116, xyz: 117, xyz: 118, xyz: 119, xyz: 120, xyz: 121, xyz: 122, xyz: 123, xyz: 124, xyz: 125, xyz: 126, xyz: 127, xyz: 128, xyz: 129, xyz: 130, xyz: 131, xyz: 132, xyz: 133, xyz: 134, xyz: 135, xyz: 136, xyz: 137, xyz: 138, xyz: 139, xyz: 140, xyz: 141, xyz: 142, xyz: 143, xyz: 144, xyz: 145, xyz: 146, xyz: 147, xyz: 148, xyz: 149, xyz: 150, xyz: 151, xyz: 152, xyz: 153, xyz: 154, xyz: 155, xyz: 156, xyz: 157, xyz: 158, xyz: 159, xyz: 160, xyz: 161, xyz: 162, xyz: 163, xyz: 164, xyz: 165, xyz: 166, xyz: 167, xyz: 168, xyz: 169, xyz: 170, xyz: 171, xyz: 172, xyz: 173, xyz: 174, xyz: 175, xyz: 176, xyz: 177, xyz: 178, xyz: 179, xyz: 180, xyz: 181, xyz: 182, xyz: 183, xyz: 184, xyz: 185, xyz: 186, xyz: 187, xyz: 188, xyz: 189, xyz: 190, xyz: 191, xyz: 192, xyz: 193, xyz: 194, xyz: 195, xyz: 196, xyz: 197, xyz: 198, xyz: 199, xyz: 200, xyz: 201, xyz: 202, xyz: 203, xyz: 204, xyz: 205, xyz: 206, xyz: 207, xyz: 208, xyz: 209, xyz: 210, xyz: 211, xyz: 212, xyz: 213, xyz: 214, xyz: 215, xyz: 216, xyz: 217, xyz: 218, xyz: 219, xyz: 220, xyz: 221, xyz: 222, xyz: 223, xyz: 224, xyz: 225, xyz: 226, xyz: 227, xyz: 228, xyz: 229, xyz: 230, xyz: 231, xyz: 232, xyz: 233, xyz: 234, xyz: 235, xyz: 236, xyz: 237, xyz: 238, xyz: 239, xyz: 240, xyz: 241, xyz: 242, xyz: 243, xyz: 244, xyz: 245, xyz: 246, xyz: 247, xyz: 248, xyz: 249, xyz: 250, xyz: 251, xyz: 252, xyz: 253, xyz: 254, xyz: 255, xyz: 256, xyz: 257, xyz: 258, xyz: 259, xyz: 260, xyz: 261, xyz: 262, xyz: 263, xyz: 264, xyz: 265, xyz: 266, xyz: 267, xyz: 268, xyz: 269, xyz: 270, xyz: 271, xyz: 272, xyz: 273, xyz: 274, xyz: 275, xyz: 276, xyz: 277, xyz: 278, xyz: 279, xyz: 280, xyz: 281, xyz: 282, xyz: 283, xyz: 284, xyz: 285, xyz: 286, xyz: 287, xyz: 288, xyz: 289, xyz: 290, xyz: 291, xyz: 292, xyz: 293, xyz: 294, xyz: 295, xyz: 296, xyz: 297, xyz: 298, xyz: 299, xyz: 300, xyz: 301, xyz: 302, xyz: 303, xyz: 304, xyz: 305, xyz: 306, xyz: 307, xyz: 308, xyz: 309, xyz: 310, xyz: 311, xyz: 312, xyz: 313, xyz: 314, xyz: 315, xyz: 316, xyz: 317, xyz: 318, xyz: 319, xyz: 320, xyz: 321, xyz: 322, xyz: 323, xyz: 324, xyz: 325, xyz: 326, xyz: 327, xyz: 328, xyz: 329, xyz: 330, xyz: 331, xyz: 332, xyz: 333, xyz: 334, xyz: 335, xyz: 336, xyz: 337, xyz: 338, xyz: 339, xyz: 340, xyz: 341, xyz: 342, xyz: 343, xyz: 344, xyz: 345, xyz: 346, xyz: 347, xyz: 348, xyz: 349, xyz: 350, xyz: 351, xyz: 352, xyz: 353, xyz: 354, xyz: 355, xyz: 356, xyz: 357, xyz: 358, xyz: 359, xyz: 360, xyz: 361, xyz: 362, xyz: 363, xyz: 364, xyz: 365, xyz: 366, xyz: 367, xyz: 368, xyz: 369, xyz: 370, xyz: 371, xyz: 372, xyz: 373, xyz: 374, xyz: 375, xyz: 376, xyz: 377, xyz: 378, xyz: 379, xyz: 380, xyz: 381, xyz: 382, xyz: 383, xyz: 384, xyz: 385, xyz: 386, xyz: 387, xyz: 388, xyz: 389, xyz: 390, xyz: 391, xyz: 392, xyz: 393, xyz: 394, xyz: 395, xyz: 396, xyz: 397, xyz: 398, xyz: 399, xyz: 400, xyz: 401, xyz: 402, xyz: 403, xyz: 404, xyz: 405, xyz: 406, xyz: 407, xyz: 408, xyz: 409, xyz: 410, xyz: 411, xyz: 412, xyz: 413, xyz: 414, xyz: 415, xyz: 416, xyz: 417, xyz: 418, xyz: 419, xyz: 420, xyz: 421, xyz: 422, xyz: 423, xyz: 424, xyz: 425, xyz: 426, xyz: 427, xyz: 428, xyz: 429, xyz: 430, xyz: 431, xyz: 432, xyz: 433, xyz: 434, xyz: 435, xyz: 436, xyz: 437, xyz: 438, xyz: 439, xyz: 440, xyz: 441, xyz: 442, xyz: 443, xyz: 444, xyz: 445, xyz: 446, xyz: 447, xyz: 448, xyz: 449, xyz: 450, xyz: 451, xyz: 452, xyz: 453, xyz: 454, xyz: 455, xyz: 456, xyz: 457, xyz: 458, xyz: 459, xyz: 460, xyz: 461, xyz: 462, xyz: 463, xyz: 464, xyz: 465, xyz: 466, xyz: 467, xyz: 468, xyz: 469, xyz: 470, xyz: 471, xyz: 472, xyz: 473, xyz: 474, xyz: 475, xyz: 476, xyz: 477, xyz: 478, xyz: 479, xyz: 480, xyz: 481, xyz: 482, xyz: 483, xyz: 484, xyz: 485, xyz: 486, xyz: 487, xyz: 488, xyz: 489, xyz: 490, xyz: 491, xyz: 492, xyz: 493, xyz: 494, xyz: 495, xyz: 496, xyz: 497, xyz: 498, xyz: 499, xyz: 500, xyz: 501, xyz: 502, xyz: 503, xyz: 504, xyz: 505, xyz: 506, xyz: 507, xyz: 508, xyz: 509, xyz: 510, xyz: 511, xyz: 512, xyz: 513, xyz: 514, xyz: 515, xyz: 516, xyz: 517, xyz: 518, xyz: 519, xyz: 520, xyz: 521, xyz: 522, xyz: 523, xyz: 524, xyz: 525, xyz: 526, xyz: 527, xyz: 528, xyz: 529, xyz: 530, xyz: 531, xyz: 532, xyz: 533, xyz: 534, xyz: 535, xyz: 536, xyz: 537, xyz: 538, xyz: 539, xyz: 540, xyz: 541, xyz: 542, xyz: 543, xyz: 544, xyz: 545, xyz: 546, xyz: 547, xyz: 548, xyz: 549, xyz: 550, xyz: 551, xyz: 552, xyz: 553, xyz: 554, xyz: 555, xyz: 556, xyz: 557, xyz: 558, xyz: 559, xyz: 560, xyz: 561, xyz: 562, xyz: 563, xyz: 564, xyz: 565, xyz: 566, xyz: 567, xyz: 568, xyz: 569, xyz: 570, xyz: 571, xyz: 572, xyz: 573, xyz: 574, xyz: 575, xyz: 576, xyz: 577, xyz: 578, xyz: 579, xyz: 580, xyz: 581, xyz: 582, xyz: 583, xyz: 584, xyz: 585, xyz: 586, xyz: 587, xyz: 588, xyz: 589, xyz: 590, xyz: 591, xyz: 592, xyz: 593, xyz: 594, xyz: 595, xyz: 596, xyz: 597, xyz: 598, xyz: 599, xyz: 600, xyz: 601, xyz: 602, xyz: 603, xyz: 604, xyz: 605, xyz: 606, xyz: 607, xyz: 608, xyz: 609, xyz: 610, xyz: 611, xyz: 612, xyz: 613, xyz: 614, xyz: 615, xyz: 616, xyz: 617, xyz: 618, xyz: 619, xyz: 620, xyz: 621, xyz: 622, xyz: 623, xyz: 624, xyz: 625, xyz: 626, xyz: 627, xyz: 628, xyz: 629, xyz: 630, xyz: 631, xyz: 632, xyz: 633, xyz: 634, xyz: 635, xyz: 636, xyz: 637, xyz: 638, xyz: 639, xyz: 640, xyz: 641, xyz: 642, xyz: 643, xyz: 644, xyz: 645, xyz: 646, xyz: 647, xyz: 648, xyz: 649, xyz: 650, xyz: 651, xyz: 652, xyz: 653, xyz: 654, xyz: 655, xyz: 656, xyz: 657, xyz: 658, xyz: 659, xyz: 660, xyz: 661, xyz: 662, xyz: 663, xyz: 664, xyz: 665, xyz: 666, xyz: 667, xyz: 668, xyz: 669, xyz: 670, xyz: 671, xyz: 672, xyz: 673, xyz: 674, xyz: 675, xyz: 676, xyz: 677, xyz: 678, xyz: 679, xyz: 680, xyz: 681, xyz: 682, xyz: 683, xyz: 684, xyz: 685, xyz: 686, xyz: 687, xyz: 688, xyz: 689, xyz: 690, xyz: 691, xyz: 692, xyz: 693, xyz: 694, xyz: 695, xyz: 696, xyz: 697, xyz: 698, xyz: 699, xyz: 700, xyz: 701, xyz: 702, xyz: 703, xyz: 704, xyz: 705, xyz: 706, xyz: 707, xyz: 708, xyz: 709, xyz: 710, xyz: 711, xyz: 712, xyz: 713, xyz: 714, xyz: 715, xyz: 716, xyz: 717, xyz: 718, xyz: 719, xyz: 720, xyz: 721, xyz: 722, xyz: 723, xyz: 724, xyz: 725, xyz: 726, xyz: 727, xyz: 728, xyz: 729, xyz: 730, xyz: 731, xyz: 732, xyz: 733, xyz: 734, xyz: 735, xyz: 736, xyz: 737, xyz: 738, xyz: 739, xyz: 740, xyz: 741, xyz: 742, xyz: 743, xyz: 744, xyz: 745, xyz: 746, xyz: 747, xyz: 748, xyz: 749, xyz: 750, xyz: 751, xyz: 752, xyz: 753, xyz: 754, xyz: 755, xyz: 756, xyz: 757, xyz: 758, xyz: 759, xyz: 760, xyz: 761, xyz: 762, xyz: 763, xyz: 764, xyz: 765, xyz: 766, xyz: 767, xyz: 768, xyz: 769, xyz: 770, xyz: 771, xyz: 772, xyz: 773, xyz: 774, xyz: 775, xyz: 776, xyz: 777, xyz: 778, xyz: 779, xyz: 780, xyz: 781, xyz: 782, xyz: 783, xyz: 784, xyz: 785, xyz: 786, xyz: 787, xyz: 788, xyz: 789, xyz: 790, xyz: 791, xyz: 792, xyz: 793, xyz: 794, xyz: 795, xyz: 796, xyz: 797, xyz: 798, xyz: 799, xyz: 800, xyz: 801, xyz: 802, xyz: 803, xyz: 804, xyz: 805, xyz: 806, xyz: 807, xyz: 808, xyz: 809, xyz: 810, xyz: 811, xyz: 812, xyz: 813, xyz: 814, xyz: 815, xyz: 816, xyz: 817, xyz: 818, xyz: 819, xyz: 820, xyz: 821, xyz: 822, xyz: 823, xyz: 824, xyz: 825, xyz: 826, xyz: 827, xyz: 828, xyz: 829, xyz: 830, xyz: 831, xyz: 832, xyz: 833, xyz: 834, xyz: 835, xyz: 836, xyz: 837, xyz: 838, xyz: 839, xyz: 840, xyz: 841, xyz: 842, xyz: 843, xyz: 844, xyz: 845, xyz: 846, xyz: 847, xyz: 848, xyz: 849, xyz: 850, xyz: 851, xyz: 852, xyz: 853, xyz: 854, xyz: 855, xyz: 856, xyz: 857, xyz: 858, xyz: 859, xyz: 860, xyz: 861, xyz: 862, xyz: 863, xyz: 864, xyz: 865, xyz: 866, xyz: 867, xyz: 868, xyz: 869, xyz: 870, xyz: 871, xyz: 872, xyz: 873, xyz: 874, xyz: 875, xyz: 876, xyz: 877, xyz: 878, xyz: 879, xyz: 880, xyz: 881, xyz: 882, xyz: 883, xyz: 884, xyz: 885, xyz: 886, xyz: 887, xyz: 888, xyz: 889, xyz: 890, xyz: 891, xyz: 892, xyz: 893, xyz: 894, xyz: 895, xyz: 896, xyz: 897, xyz: 898, xyz: 899, xyz: 900, xyz: 901, xyz: 902, xyz: 903, xyz: 904, xyz: 905, xyz: 906, xyz: 907, xyz: 908, xyz: 909, xyz: 910, xyz: 911, xyz: 912, xyz: 913, xyz: 914, xyz: 915, xyz: 916, xyz: 917, xyz: 918, xyz: 919, xyz: 920, xyz: 921, xyz: 922, xyz: 923, xyz: 924, xyz: 925, xyz: 926, xyz: 927, xyz: 928, xyz: 929, xyz: 930, xyz: 931, xyz: 932, xyz: 933, xyz: 934, xyz: 935, xyz: 936, xyz: 937, xyz: 938, xyz: 939, xyz: 940, xyz: 941, xyz: 942, xyz: 943, xyz: 944, xyz: 945, xyz: 946, xyz: 947, xyz: 948, xyz: 949, xyz: 950, xyz: 951, xyz: 952, xyz: 953, xyz: 954, xyz: 955, xyz: 956, xyz: 957, xyz: 958, xyz: 959, xyz: 960, xyz: 961, xyz: 962, xyz: 963, xyz: 964, xyz: 965, xyz: 966, xyz: 967, xyz: 968, xyz: 969, xyz: 970, xyz: 971, xyz: 972, xyz: 973, xyz: 974, xyz: 975, xyz: 976, xyz: 977, xyz: 978, xyz: 979, xyz: 980, xyz: 981, xyz: 982, xyz: 983, xyz: 984, xyz: 985, xyz: 986, xyz: 987, xyz: 988, xyz: 989, xyz: 990, xyz: 991, xyz: 992, xyz: 993, xyz: 994, xyz: 995, xyz: 996, xyz: 997, xyz: 998, xyz: 999]
+List is: [1, 2, 3, 4]
+List is: [a, b, c, d]
diff --git a/test/2000-virtual-list-structural/info.txt b/test/2000-virtual-list-structural/info.txt
new file mode 100644
index 0000000..606c984
--- /dev/null
+++ b/test/2000-virtual-list-structural/info.txt
@@ -0,0 +1,3 @@
+Tests basic functions in the jvmti plugin.
+
+Tests that using the structural redefinition can add new virtual methods and fields.
diff --git a/test/2000-virtual-list-structural/run b/test/2000-virtual-list-structural/run
new file mode 100755
index 0000000..03e41a5
--- /dev/null
+++ b/test/2000-virtual-list-structural/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/2000-virtual-list-structural/src-ex/java/util/AbstractCollection.java b/test/2000-virtual-list-structural/src-ex/java/util/AbstractCollection.java
new file mode 120000
index 0000000..a30fbdc
--- /dev/null
+++ b/test/2000-virtual-list-structural/src-ex/java/util/AbstractCollection.java
@@ -0,0 +1 @@
+../../../../../../libcore/ojluni/src/main/java/java/util/AbstractCollection.java
\ No newline at end of file
diff --git a/test/2000-virtual-list-structural/src/Main.java b/test/2000-virtual-list-structural/src/Main.java
new file mode 100644
index 0000000..b2a42c2
--- /dev/null
+++ b/test/2000-virtual-list-structural/src/Main.java
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import art.*;
+import java.util.*;
+import java.lang.invoke.*;
+import java.io.*;
+import java.util.zip.*;
+
+public class Main {
+  public static final String TEST_NAME = "2000-virtual-list-structural";
+  public static final boolean PRINT_COUNT = false;
+  public static MethodHandles.Lookup lookup = MethodHandles.publicLookup();
+  public static MethodHandle getcnt;
+  public static MethodHandle get_total_cnt;
+  public static void GetHandles() throws Throwable {
+    getcnt = lookup.findGetter(AbstractCollection.class, "cnt", Integer.TYPE);
+    get_total_cnt = lookup.findStaticGetter(AbstractCollection.class, "TOTAL_COUNT", Integer.TYPE);
+  }
+
+  public static byte[] GetDexBytes() throws Throwable {
+    String jar_loc = System.getenv("DEX_LOCATION") + "/" + TEST_NAME + "-ex.jar";
+    try (ZipFile zip = new ZipFile(new File(jar_loc))) {
+      ZipEntry entry = zip.getEntry("classes.dex");
+      try (InputStream is = zip.getInputStream(entry)) {
+        byte[] res = new byte[(int)entry.getSize()];
+        is.read(res);
+        return res;
+      }
+    }
+  }
+  public static void PrintListAndData(AbstractCollection<String> c) throws Throwable {
+    if (PRINT_COUNT) {
+      System.out.println("List is: " + c + " count = " + getcnt.invoke(c) + " TOTAL_COUNT = " + get_total_cnt.invoke());
+    } else {
+      System.out.println("List is: " + c);
+    }
+  }
+  public static void main(String[] args) throws Throwable {
+    AbstractCollection<String> l1 = (AbstractCollection<String>)Arrays.asList("a", "b", "c", "d");
+    AbstractCollection<String> l2 = new ArrayList<>();
+    l2.add("1");
+    l2.add("2");
+    l2.add("3");
+    l2.add("4");
+    Redefinition.doCommonStructuralClassRedefinition(AbstractCollection.class, GetDexBytes());
+    GetHandles();
+    AbstractCollection<String> l3 = new HashSet<>(l2);
+    AbstractCollection<String> l4 = new LinkedList<>(l1);
+    PrintListAndData(l1);
+    PrintListAndData(l2);
+    for (int i = 0; i < 1000; i++) {
+      l2.add("xyz: " + i);
+    }
+    PrintListAndData(l2);
+    PrintListAndData(l3);
+    PrintListAndData(l4);
+    CheckLE(getcnt.invoke(l1), get_total_cnt.invoke());
+    CheckLE(getcnt.invoke(l2), get_total_cnt.invoke());
+    CheckLE(getcnt.invoke(l3), get_total_cnt.invoke());
+    CheckLE(getcnt.invoke(l4), get_total_cnt.invoke());
+    CheckEQ(getcnt.invoke(l1), 0);
+    CheckLE(getcnt.invoke(l2), 0);
+    CheckLE(getcnt.invoke(l1), getcnt.invoke(l2));
+    CheckLE(getcnt.invoke(l1), getcnt.invoke(l3));
+    CheckLE(getcnt.invoke(l1), getcnt.invoke(l4));
+    CheckLE(getcnt.invoke(l2), getcnt.invoke(l3));
+    CheckLE(getcnt.invoke(l2), getcnt.invoke(l4));
+    CheckLE(getcnt.invoke(l3), getcnt.invoke(l4));
+  }
+  public static void CheckEQ(Object a, int b) {
+    CheckEQ(((Integer)a).intValue(), b);
+  }
+  public static void CheckLE(Object a, Object b) {
+    CheckLE(((Integer)a).intValue(), ((Integer)b).intValue());
+  }
+  public static void CheckEQ(int a, int b) {
+    if (a != b) {
+      throw new Error(a + " is not equal to " + b);
+    }
+  }
+  public static void CheckLE(int a, int b) {
+    if (!(a <= b)) {
+      throw new Error(a + " is not less than or equal to " + b);
+    }
+  }
+}
diff --git a/test/2000-virtual-list-structural/src/art/Redefinition.java b/test/2000-virtual-list-structural/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/2000-virtual-list-structural/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/2001-virtual-structural-multithread/expected.txt b/test/2001-virtual-structural-multithread/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/2001-virtual-structural-multithread/expected.txt
diff --git a/test/2001-virtual-structural-multithread/info.txt b/test/2001-virtual-structural-multithread/info.txt
new file mode 100644
index 0000000..3e5291d
--- /dev/null
+++ b/test/2001-virtual-structural-multithread/info.txt
@@ -0,0 +1,4 @@
+Tests structural redefinition with multiple threads.
+
+Tests that using the structural redefinition while concurrently loading and using a subtype of
+the class being redefined doesn't cause any unexpected problems.
diff --git a/test/2001-virtual-structural-multithread/run b/test/2001-virtual-structural-multithread/run
new file mode 100755
index 0000000..03e41a5
--- /dev/null
+++ b/test/2001-virtual-structural-multithread/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/2001-virtual-structural-multithread/src-art/Main.java b/test/2001-virtual-structural-multithread/src-art/Main.java
new file mode 100644
index 0000000..618cdcd
--- /dev/null
+++ b/test/2001-virtual-structural-multithread/src-art/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test2001.run();
+  }
+}
diff --git a/test/2001-virtual-structural-multithread/src-art/art/Redefinition.java b/test/2001-virtual-structural-multithread/src-art/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/2001-virtual-structural-multithread/src-art/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/2001-virtual-structural-multithread/src-art/art/Test2001.java b/test/2001-virtual-structural-multithread/src-art/art/Test2001.java
new file mode 100644
index 0000000..40972db
--- /dev/null
+++ b/test/2001-virtual-structural-multithread/src-art/art/Test2001.java
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import dalvik.system.InMemoryDexClassLoader;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Base64;
+import java.util.concurrent.CountDownLatch;
+import java.util.function.Supplier;
+
+public class Test2001 {
+  private static final int NUM_THREADS = 20;
+  // Don't perform more than this many repeats per thread to prevent OOMEs
+  private static final int TASK_COUNT_LIMIT = 1000;
+
+  public static class Transform {
+    public String greetingEnglish;
+
+    public Transform() {
+      this.greetingEnglish = "Hello";
+    }
+
+    public String sayHi() {
+      return greetingEnglish + " from " + Thread.currentThread().getName();
+    }
+  }
+
+  /**
+   * base64 encoded class/dex file for
+   * public static class Transform {
+   *   public String greetingEnglish;
+   *   public String greetingFrench;
+   *   public String greetingDanish;
+   *   public String greetingJapanese;
+   *
+   *   public Transform() {
+   *     this.greetingEnglish = "Hello World";
+   *     this.greetingFrench = "Bonjour le Monde";
+   *     this.greetingDanish = "Hej Verden";
+   *     this.greetingJapanese = "こんにちは世界";
+   *   }
+   *   public String sayHi() {
+   *     return sayHiEnglish() + ", " + sayHiFrench() + ", " + sayHiDanish() + ", " + sayHiJapanese() + " from " + Thread.currentThread().getName();
+   *   }
+   *   public String sayHiEnglish() {
+   *     return greetingEnglish;
+   *   }
+   *   public String sayHiDanish() {
+   *     return greetingDanish;
+   *   }
+   *   public String sayHiJapanese() {
+   *     return greetingJapanese;
+   *   }
+   *   public String sayHiFrench() {
+   *     return greetingFrench;
+   *   }
+   * }
+   */
+  private static final byte[] DEX_BYTES =
+      Base64.getDecoder()
+          .decode(
+              "ZGV4CjAzNQCnKPY06VRa4aM/zFW0MYLmRxT/NtXxD/H4BgAAcAAAAHhWNBIAAAAAAAAAADQGAAAl"
+                  + "AAAAcAAAAAkAAAAEAQAABAAAACgBAAAEAAAAWAEAAAwAAAB4AQAAAQAAANgBAAAABQAA+AEAAEoD"
+                  + "AABSAwAAVgMAAF4DAABwAwAAfAMAAIkDAACMAwAAkAMAAKoDAAC6AwAA3gMAAP4DAAASBAAAJgQA"
+                  + "AEEEAABVBAAAZAQAAG8EAAByBAAAfwQAAIcEAACWBAAAnwQAAK8EAADABAAA0AQAAOIEAADoBAAA"
+                  + "7wQAAPwEAAAKBQAAFwUAACYFAAAwBQAANwUAAMUFAAAIAAAACQAAAAoAAAALAAAADAAAAA0AAAAO"
+                  + "AAAADwAAABIAAAAGAAAABQAAAAAAAAAHAAAABgAAAEQDAAAGAAAABwAAAAAAAAASAAAACAAAAAAA"
+                  + "AAAAAAUAFwAAAAAABQAYAAAAAAAFABkAAAAAAAUAGgAAAAAAAwACAAAAAAAAABwAAAAAAAAAHQAA"
+                  + "AAAAAAAeAAAAAAAAAB8AAAAAAAAAIAAAAAQAAwACAAAABgADAAIAAAAGAAEAFAAAAAYAAAAhAAAA"
+                  + "BwACABUAAAAHAAAAFgAAAAAAAAABAAAABAAAAAAAAAAQAAAAJAYAAOsFAAAAAAAABwABAAIAAAAt"
+                  + "AwAAQQAAAG4QAwAGAAwAbhAEAAYADAFuEAIABgAMAm4QBQAGAAwDcQAKAAAADARuEAsABAAMBCIF"
+                  + "BgBwEAcABQBuIAgABQAaAAEAbiAIAAUAbiAIABUAbiAIAAUAbiAIACUAbiAIAAUAbiAIADUAGgAA"
+                  + "AG4gCAAFAG4gCABFAG4QCQAFAAwAEQAAAAIAAQAAAAAAMQMAAAMAAABUEAAAEQAAAAIAAQAAAAAA"
+                  + "NQMAAAMAAABUEAEAEQAAAAIAAQAAAAAAOQMAAAMAAABUEAIAEQAAAAIAAQAAAAAAPQMAAAMAAABU"
+                  + "EAMAEQAAAAIAAQABAAAAJAMAABQAAABwEAYAAQAaAAUAWxABABoAAwBbEAIAGgAEAFsQAAAaACQA"
+                  + "WxADAA4ACwAOPEtLS0sAEgAOABgADgAVAA4AHgAOABsADgAAAAABAAAABQAGIGZyb20gAAIsIAAG"
+                  + "PGluaXQ+ABBCb25qb3VyIGxlIE1vbmRlAApIZWogVmVyZGVuAAtIZWxsbyBXb3JsZAABTAACTEwA"
+                  + "GExhcnQvVGVzdDIwMDEkVHJhbnNmb3JtOwAOTGFydC9UZXN0MjAwMTsAIkxkYWx2aWsvYW5ub3Rh"
+                  + "dGlvbi9FbmNsb3NpbmdDbGFzczsAHkxkYWx2aWsvYW5ub3RhdGlvbi9Jbm5lckNsYXNzOwASTGph"
+                  + "dmEvbGFuZy9PYmplY3Q7ABJMamF2YS9sYW5nL1N0cmluZzsAGUxqYXZhL2xhbmcvU3RyaW5nQnVp"
+                  + "bGRlcjsAEkxqYXZhL2xhbmcvVGhyZWFkOwANVGVzdDIwMDEuamF2YQAJVHJhbnNmb3JtAAFWAAth"
+                  + "Y2Nlc3NGbGFncwAGYXBwZW5kAA1jdXJyZW50VGhyZWFkAAdnZXROYW1lAA5ncmVldGluZ0Rhbmlz"
+                  + "aAAPZ3JlZXRpbmdFbmdsaXNoAA5ncmVldGluZ0ZyZW5jaAAQZ3JlZXRpbmdKYXBhbmVzZQAEbmFt"
+                  + "ZQAFc2F5SGkAC3NheUhpRGFuaXNoAAxzYXlIaUVuZ2xpc2gAC3NheUhpRnJlbmNoAA1zYXlIaUph"
+                  + "cGFuZXNlAAh0b1N0cmluZwAFdmFsdWUAiwF+fkQ4eyJjb21waWxhdGlvbi1tb2RlIjoiZGVidWci"
+                  + "LCJoYXMtY2hlY2tzdW1zIjpmYWxzZSwibWluLWFwaSI6MSwic2hhLTEiOiJmNjJiOGNlNmEwNTkw"
+                  + "MDU0ZWYzNGExYWVkZTcwYjQ2NjY4ZThiNDlmIiwidmVyc2lvbiI6IjIuMC4xLWRldiJ9AAfjgZPj"
+                  + "gpPjgavjgaHjga/kuJbnlYwAAgIBIhgBAgMCEwQJGxcRAAQBBQABAQEBAQEBAIGABOwFAQH4AwEB"
+                  + "jAUBAaQFAQG8BQEB1AUAAAAAAAAAAgAAANwFAADiBQAAGAYAAAAAAAAAAAAAAAAAABAAAAAAAAAA"
+                  + "AQAAAAAAAAABAAAAJQAAAHAAAAACAAAACQAAAAQBAAADAAAABAAAACgBAAAEAAAABAAAAFgBAAAF"
+                  + "AAAADAAAAHgBAAAGAAAAAQAAANgBAAABIAAABgAAAPgBAAADIAAABgAAACQDAAABEAAAAQAAAEQD"
+                  + "AAACIAAAJQAAAEoDAAAEIAAAAgAAANwFAAAAIAAAAQAAAOsFAAADEAAAAgAAABQGAAAGIAAAAQAA"
+                  + "ACQGAAAAEAAAAQAAADQGAAA=");
+
+  /*
+   * base64 encoded class/dex file for
+    package art;
+    import java.util.function.Supplier;
+    public class SubTransform extends art.Test2001.Transform implements Supplier<String> {
+      public SubTransform() {
+        super();
+      }
+      public String get() {
+        return "from SUBCLASS: " + super.sayHi();
+      }
+    }
+   */
+  private static final byte[] SUB_DEX_BYTES =
+      Base64.getDecoder()
+          .decode(
+              "ZGV4CjAzNQBawzkIDf9khFw00md41U4vIqRuhqBTjM+0BAAAcAAAAHhWNBIAAAAAAAAAAPwDAAAV"
+                  + "AAAAcAAAAAgAAADEAAAABAAAAOQAAAAAAAAAAAAAAAgAAAAUAQAAAQAAAFQBAABAAwAAdAEAAAIC"
+                  + "AAAKAgAADgIAABECAAAVAgAAKQIAAEMCAABiAgAAdgIAAIoCAAClAgAAxAIAAOMCAAD2AgAA+QIA"
+                  + "AAEDAAASAwAAFwMAAB4DAAAoAwAALwMAAAQAAAAFAAAABgAAAAcAAAAIAAAACQAAAAoAAAANAAAA"
+                  + "AgAAAAMAAAAAAAAAAgAAAAQAAAAAAAAAAwAAAAUAAAD8AQAADQAAAAcAAAAAAAAAAAADAAAAAAAA"
+                  + "AAAAEAAAAAAAAQAQAAAAAQADAAAAAAABAAEAEQAAAAUAAwAAAAAABQACAA4AAAAFAAEAEgAAAAAA"
+                  + "AAABAAAAAQAAAPQBAAAMAAAA7AMAAMsDAAAAAAAAAgABAAEAAADpAQAABQAAAG4QAgABAAwAEQAA"
+                  + "AAQAAQACAAAA7QEAABYAAABvEAQAAwAMACIBBQBwEAUAAQAaAg8AbiAGACEAbiAGAAEAbhAHAAEA"
+                  + "DAARAAEAAQABAAAA5AEAAAQAAABwEAMAAAAOAAYADjwABAAOAAkADgAAAAABAAAABgAAAAEAAAAE"
+                  + "AAY8aW5pdD4AAj47AAFMAAJMTAASTGFydC9TdWJUcmFuc2Zvcm07ABhMYXJ0L1Rlc3QyMDAxJFRy"
+                  + "YW5zZm9ybTsAHUxkYWx2aWsvYW5ub3RhdGlvbi9TaWduYXR1cmU7ABJMamF2YS9sYW5nL09iamVj"
+                  + "dDsAEkxqYXZhL2xhbmcvU3RyaW5nOwAZTGphdmEvbGFuZy9TdHJpbmdCdWlsZGVyOwAdTGphdmEv"
+                  + "dXRpbC9mdW5jdGlvbi9TdXBwbGllcjsAHUxqYXZhL3V0aWwvZnVuY3Rpb24vU3VwcGxpZXI8ABFT"
+                  + "dWJUcmFuc2Zvcm0uamF2YQABVgAGYXBwZW5kAA9mcm9tIFNVQkNMQVNTOiAAA2dldAAFc2F5SGkA"
+                  + "CHRvU3RyaW5nAAV2YWx1ZQCLAX5+RDh7ImNvbXBpbGF0aW9uLW1vZGUiOiJkZWJ1ZyIsImhhcy1j"
+                  + "aGVja3N1bXMiOmZhbHNlLCJtaW4tYXBpIjoxLCJzaGEtMSI6ImY2MmI4Y2U2YTA1OTAwNTRlZjM0"
+                  + "YTFhZWRlNzBiNDY2NjhlOGI0OWYiLCJ2ZXJzaW9uIjoiMi4wLjEtZGV2In0AAgIBExwEFwUXCxcI"
+                  + "FwEAAAECAIGABMwDAcEg9AIBAZADAAAAAAAAAQAAAL0DAADkAwAAAAAAAAAAAAAAAAAADwAAAAAA"
+                  + "AAABAAAAAAAAAAEAAAAVAAAAcAAAAAIAAAAIAAAAxAAAAAMAAAAEAAAA5AAAAAUAAAAIAAAAFAEA"
+                  + "AAYAAAABAAAAVAEAAAEgAAADAAAAdAEAAAMgAAADAAAA5AEAAAEQAAACAAAA9AEAAAIgAAAVAAAA"
+                  + "AgIAAAQgAAABAAAAvQMAAAAgAAABAAAAywMAAAMQAAACAAAA4AMAAAYgAAABAAAA7AMAAAAQAAAB"
+                  + "AAAA/AMAAA==");
+
+  public static void run() throws Exception {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest();
+  }
+
+  public static Supplier<String> mkTransform() {
+    try {
+      return (Supplier<String>)
+          (new InMemoryDexClassLoader(
+                  ByteBuffer.wrap(SUB_DEX_BYTES), Test2001.class.getClassLoader())
+              .loadClass("art.SubTransform")
+              .newInstance());
+    } catch (Exception e) {
+      return () -> {
+        return e.toString();
+      };
+    }
+  }
+
+  public static final class MyThread extends Thread {
+    public MyThread(CountDownLatch delay, int id) {
+      super("Thread: " + id);
+      this.thr_id = id;
+      this.results = new ArrayList<>(TASK_COUNT_LIMIT);
+      this.finish = false;
+      this.delay = delay;
+    }
+
+    public void run() {
+      delay.countDown();
+      while (!finish && results.size() < TASK_COUNT_LIMIT) {
+        Supplier<String> t = mkTransform();
+        results.add(t.get());
+      }
+    }
+
+    public void finish() throws Exception {
+      finish = true;
+      this.join();
+    }
+
+    public void Check() throws Exception {
+      for (String s : results) {
+        if (!s.equals("from SUBCLASS: Hello from " + getName())
+            && !s.equals("from SUBCLASS: Hello, null, null, null from " + getName())
+            && !s.equals(
+                "from SUBCLASS: Hello World, Bonjour le Monde, Hej Verden, こんにちは世界 from "
+                    + getName())) {
+          System.out.println("FAIL " + thr_id + ": Unexpected result: " + s);
+        }
+      }
+    }
+
+    public ArrayList<String> results;
+    public volatile boolean finish;
+    public int thr_id;
+    public CountDownLatch delay;
+  }
+
+  public static MyThread[] startThreads(int num_threads) throws Exception {
+    CountDownLatch cdl = new CountDownLatch(num_threads);
+    MyThread[] res = new MyThread[num_threads];
+    for (int i = 0; i < num_threads; i++) {
+      res[i] = new MyThread(cdl, i);
+      res[i].start();
+    }
+    cdl.await();
+    return res;
+  }
+
+  public static void finishThreads(MyThread[] thrs) throws Exception {
+    for (MyThread t : thrs) {
+      t.finish();
+    }
+    for (MyThread t : thrs) {
+      t.Check();
+    }
+  }
+
+  public static void doTest() throws Exception {
+    MyThread[] threads = startThreads(NUM_THREADS);
+    Redefinition.doCommonStructuralClassRedefinition(Transform.class, DEX_BYTES);
+    finishThreads(threads);
+  }
+}
diff --git a/test/2001-virtual-structural-multithread/src/Main.java b/test/2001-virtual-structural-multithread/src/Main.java
new file mode 100644
index 0000000..89b8557
--- /dev/null
+++ b/test/2001-virtual-structural-multithread/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    System.out.println("FAIL: Test is only for art!");
+  }
+}
diff --git a/test/2002-virtual-structural-initializing/expected.txt b/test/2002-virtual-structural-initializing/expected.txt
new file mode 100644
index 0000000..c1c8a70
--- /dev/null
+++ b/test/2002-virtual-structural-initializing/expected.txt
@@ -0,0 +1 @@
+Initialized Static Hello
diff --git a/test/2002-virtual-structural-initializing/info.txt b/test/2002-virtual-structural-initializing/info.txt
new file mode 100644
index 0000000..3e5291d
--- /dev/null
+++ b/test/2002-virtual-structural-initializing/info.txt
@@ -0,0 +1,4 @@
+Tests structural redefinition with multiple threads.
+
+Tests that using the structural redefinition while concurrently loading and using a subtype of
+the class being redefined doesn't cause any unexpected problems.
diff --git a/test/2002-virtual-structural-initializing/run b/test/2002-virtual-structural-initializing/run
new file mode 100755
index 0000000..03e41a5
--- /dev/null
+++ b/test/2002-virtual-structural-initializing/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/2002-virtual-structural-initializing/src-art/Main.java b/test/2002-virtual-structural-initializing/src-art/Main.java
new file mode 100644
index 0000000..a0aab42
--- /dev/null
+++ b/test/2002-virtual-structural-initializing/src-art/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test2002.run();
+  }
+}
diff --git a/test/2002-virtual-structural-initializing/src-art/art/Redefinition.java b/test/2002-virtual-structural-initializing/src-art/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/2002-virtual-structural-initializing/src-art/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/2002-virtual-structural-initializing/src-art/art/Test2002.java b/test/2002-virtual-structural-initializing/src-art/art/Test2002.java
new file mode 100644
index 0000000..f91e3f7
--- /dev/null
+++ b/test/2002-virtual-structural-initializing/src-art/art/Test2002.java
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import dalvik.system.InMemoryDexClassLoader;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Base64;
+import java.util.concurrent.CountDownLatch;
+import java.util.function.Supplier;
+
+public class Test2002 {
+  public static final CountDownLatch start_latch = new CountDownLatch(1);
+  public static final CountDownLatch finish_latch = new CountDownLatch(1);
+  public static class Transform {
+    public Transform() { }
+
+    public String sayHi() {
+      return "Hi";
+    }
+  }
+
+  /**
+   * base64 encoded class/dex file for
+   * public static class Transform {
+   *   public String greeting;
+   *
+   *   public Transform() {
+   *     greeting = "Hello";
+   *   }
+   *   public String sayHi() {
+   *     return greeting;
+   *   }
+   * }
+   */
+  private static final byte[] DEX_BYTES =
+      Base64.getDecoder()
+          .decode(
+"ZGV4CjAzNQBlpDFxr5PhCBfCyN+GZYuYQvSqtTEESU3oAwAAcAAAAHhWNBIAAAAAAAAAADADAAAS" +
+"AAAAcAAAAAcAAAC4AAAAAgAAANQAAAABAAAA7AAAAAMAAAD0AAAAAQAAAAwBAAC8AgAALAEAAHAB" +
+"AAB4AQAAfwEAAIIBAACcAQAArAEAANABAADwAQAABAIAABgCAAAnAgAAMgIAADUCAABCAgAATAIA" +
+"AFICAABZAgAAYAIAAAMAAAAEAAAABQAAAAYAAAAHAAAACAAAAAsAAAACAAAABQAAAAAAAAALAAAA" +
+"BgAAAAAAAAAAAAUADQAAAAAAAQAAAAAAAAAAAA8AAAAEAAEAAAAAAAAAAAABAAAABAAAAAAAAAAJ" +
+"AAAAIAMAAP0CAAAAAAAAAgABAAAAAABqAQAAAwAAAFQQAAARAAAAAgABAAEAAABkAQAACAAAAHAQ" +
+"AgABABoAAQBbEAAADgAGAA48SwAKAA4AAAAGPGluaXQ+AAVIZWxsbwABTAAYTGFydC9UZXN0MjAw" +
+"MiRUcmFuc2Zvcm07AA5MYXJ0L1Rlc3QyMDAyOwAiTGRhbHZpay9hbm5vdGF0aW9uL0VuY2xvc2lu" +
+"Z0NsYXNzOwAeTGRhbHZpay9hbm5vdGF0aW9uL0lubmVyQ2xhc3M7ABJMamF2YS9sYW5nL09iamVj" +
+"dDsAEkxqYXZhL2xhbmcvU3RyaW5nOwANVGVzdDIwMDIuamF2YQAJVHJhbnNmb3JtAAFWAAthY2Nl" +
+"c3NGbGFncwAIZ3JlZXRpbmcABG5hbWUABXNheUhpAAV2YWx1ZQCLAX5+RDh7ImNvbXBpbGF0aW9u" +
+"LW1vZGUiOiJkZWJ1ZyIsImhhcy1jaGVja3N1bXMiOmZhbHNlLCJtaW4tYXBpIjoxLCJzaGEtMSI6" +
+"ImY2MmI4Y2U2YTA1OTAwNTRlZjM0YTFhZWRlNzBiNDY2NjhlOGI0OWYiLCJ2ZXJzaW9uIjoiMi4w" +
+"LjEtZGV2In0AAgIBEBgBAgMCDAQJDhcKAAEBAQABAIGABMQCAQGsAgAAAAAAAAACAAAA7gIAAPQC" +
+"AAAUAwAAAAAAAAAAAAAAAAAADwAAAAAAAAABAAAAAAAAAAEAAAASAAAAcAAAAAIAAAAHAAAAuAAA" +
+"AAMAAAACAAAA1AAAAAQAAAABAAAA7AAAAAUAAAADAAAA9AAAAAYAAAABAAAADAEAAAEgAAACAAAA" +
+"LAEAAAMgAAACAAAAZAEAAAIgAAASAAAAcAEAAAQgAAACAAAA7gIAAAAgAAABAAAA/QIAAAMQAAAC" +
+"AAAAEAMAAAYgAAABAAAAIAMAAAAQAAABAAAAMAMAAA==");
+
+  /*
+   * base64 encoded class/dex file for
+    package art;
+    import java.util.function.Supplier;
+    import java.util.concurrent.CountDownLatch;
+
+    public class SubTransform extends art.Test2002.Transform implements Supplier<String> {
+      public static final String staticId;
+      static {
+        String res = null;
+        try {
+          Test2002.start_latch.countDown();
+          Test2002.finish_latch.await();
+          res = "Initialized Static";
+        } catch (Exception e) {
+          res = e.toString();
+        }
+        staticId = res;
+      }
+      public SubTransform() {
+        super();
+      }
+      public String get() {
+        return SubTransform.staticId + " " + sayHi();
+      }
+    }
+   */
+  private static final byte[] SUB_DEX_BYTES =
+      Base64.getDecoder()
+          .decode(
+"ZGV4CjAzNQB0BhXQtGTKXAGE/UzeevPgeNK7UrQJRJkoBgAAcAAAAHhWNBIAAAAAAAAAAGQFAAAf" +
+"AAAAcAAAAAsAAADsAAAABAAAABgBAAADAAAASAEAAAwAAABgAQAAAQAAAMABAABIBAAA4AEAAM4C" +
+"AADRAgAA2wIAAOMCAADnAgAA+wIAAP4CAAACAwAAFgMAADADAABAAwAAXwMAAHYDAACKAwAAngMA" +
+"ALkDAADgAwAA/wMAAB4EAAAxBAAANAQAADwEAABDBAAATgQAAFwEAABhBAAAaAQAAHUEAAB/BAAA" +
+"iQQAAJAEAAAHAAAACAAAAAkAAAAKAAAACwAAAAwAAAANAAAADgAAAA8AAAAQAAAAEwAAAAUAAAAF" +
+"AAAAAAAAAAUAAAAGAAAAAAAAAAYAAAAHAAAAyAIAABMAAAAKAAAAAAAAAAAABgAbAAAAAgAIABcA" +
+"AAACAAgAGgAAAAAAAwABAAAAAAADAAIAAAAAAAAAGAAAAAAAAQAYAAAAAAABABkAAAABAAMAAgAA" +
+"AAQAAQAcAAAABwADAAIAAAAHAAIAFAAAAAcAAQAcAAAACAADABUAAAAIAAMAFgAAAAAAAAABAAAA" +
+"AQAAAMACAAASAAAAVAUAACwFAAAAAAAAAgABAAEAAAC1AgAABQAAAG4QAwABAAwAEQAAAAQAAQAC" +
+"AAAAuQIAABsAAABiAAAAbhAEAAMADAEiAgcAcBAHAAIAbiAIAAIAGgAAAG4gCAACAG4gCAASAG4Q" +
+"CQACAAwAEQAAAAEAAAABAAEApAIAABYAAAAAAGIAAgBuEAsAAABiAAEAbhAKAAAAGgAEACgGDQBu" +
+"EAYAAAAMAGkAAAAOAAEAAAAMAAEAAQEEDgEAAQABAAAAsAIAAAQAAABwEAUAAAAOAAgADh9aWi8b" +
+"HkwtABMADjwABQAOABYADgAAAAABAAAACQAAAAEAAAAGAAEgAAg8Y2xpbml0PgAGPGluaXQ+AAI+" +
+"OwASSW5pdGlhbGl6ZWQgU3RhdGljAAFMAAJMTAASTGFydC9TdWJUcmFuc2Zvcm07ABhMYXJ0L1Rl" +
+"c3QyMDAyJFRyYW5zZm9ybTsADkxhcnQvVGVzdDIwMDI7AB1MZGFsdmlrL2Fubm90YXRpb24vU2ln" +
+"bmF0dXJlOwAVTGphdmEvbGFuZy9FeGNlcHRpb247ABJMamF2YS9sYW5nL09iamVjdDsAEkxqYXZh" +
+"L2xhbmcvU3RyaW5nOwAZTGphdmEvbGFuZy9TdHJpbmdCdWlsZGVyOwAlTGphdmEvdXRpbC9jb25j" +
+"dXJyZW50L0NvdW50RG93bkxhdGNoOwAdTGphdmEvdXRpbC9mdW5jdGlvbi9TdXBwbGllcjsAHUxq" +
+"YXZhL3V0aWwvZnVuY3Rpb24vU3VwcGxpZXI8ABFTdWJUcmFuc2Zvcm0uamF2YQABVgAGYXBwZW5k" +
+"AAVhd2FpdAAJY291bnREb3duAAxmaW5pc2hfbGF0Y2gAA2dldAAFc2F5SGkAC3N0YXJ0X2xhdGNo" +
+"AAhzdGF0aWNJZAAIdG9TdHJpbmcABXZhbHVlAIsBfn5EOHsiY29tcGlsYXRpb24tbW9kZSI6ImRl" +
+"YnVnIiwiaGFzLWNoZWNrc3VtcyI6ZmFsc2UsIm1pbi1hcGkiOjEsInNoYS0xIjoiZjYyYjhjZTZh" +
+"MDU5MDA1NGVmMzRhMWFlZGU3MGI0NjY2OGU4YjQ5ZiIsInZlcnNpb24iOiIyLjAuMS1kZXYifQAC" +
+"AwEdHAQXCBcRFw0XAwEAAgIAGQCIgATEBAGBgASMBQLBIOADAQH8AwAAAAAAAQAAAB4FAABMBQAA" +
+"AAAAAAAAAAAAAAAAEAAAAAAAAAABAAAAAAAAAAEAAAAfAAAAcAAAAAIAAAALAAAA7AAAAAMAAAAE" +
+"AAAAGAEAAAQAAAADAAAASAEAAAUAAAAMAAAAYAEAAAYAAAABAAAAwAEAAAEgAAAEAAAA4AEAAAMg" +
+"AAAEAAAApAIAAAEQAAACAAAAwAIAAAIgAAAfAAAAzgIAAAQgAAABAAAAHgUAAAAgAAABAAAALAUA" +
+"AAMQAAACAAAASAUAAAYgAAABAAAAVAUAAAAQAAABAAAAZAUAAA==");
+
+  public static void run() throws Exception {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest();
+  }
+
+  public static Supplier<String> mkTransform() {
+    try {
+      return (Supplier<String>)
+          (new InMemoryDexClassLoader(
+                  ByteBuffer.wrap(SUB_DEX_BYTES), Test2002.class.getClassLoader())
+              .loadClass("art.SubTransform")
+              .newInstance());
+    } catch (Exception e) {
+      return () -> {
+        return e.toString();
+      };
+    }
+  }
+
+  public static void doTest() throws Exception {
+    Thread t = new Thread(() -> {
+      Supplier<String> s = mkTransform();
+      System.out.println(s.get());
+    });
+    t.start();
+    start_latch.await();
+    Redefinition.doCommonStructuralClassRedefinition(Transform.class, DEX_BYTES);
+    finish_latch.countDown();
+    t.join();
+  }
+}
diff --git a/test/2002-virtual-structural-initializing/src/Main.java b/test/2002-virtual-structural-initializing/src/Main.java
new file mode 100644
index 0000000..89b8557
--- /dev/null
+++ b/test/2002-virtual-structural-initializing/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    System.out.println("FAIL: Test is only for art!");
+  }
+}
diff --git a/test/2003-double-virtual-structural/expected.txt b/test/2003-double-virtual-structural/expected.txt
new file mode 100644
index 0000000..3ace4f3
--- /dev/null
+++ b/test/2003-double-virtual-structural/expected.txt
@@ -0,0 +1,6 @@
+Hi(SubTransform called 1 times)
+Hi(SubTransform called 2 times)
+Hi(SubTransform called 3 times)
+Hello(SubTransform called 4 times, Transform called 1 times)
+Hello(SubTransform called 5 times, Transform called 2 times)
+Hello(SubTransform called 6 times, Transform called 3 times)
diff --git a/test/2003-double-virtual-structural/info.txt b/test/2003-double-virtual-structural/info.txt
new file mode 100644
index 0000000..9910b99
--- /dev/null
+++ b/test/2003-double-virtual-structural/info.txt
@@ -0,0 +1,4 @@
+Tests basic functions in the jvmti plugin.
+
+Tests that using the structural redefinition can add new virtual methods and fields that are
+accessed by a subtype undergoing non-structural redefinition simultaneously.
diff --git a/test/2003-double-virtual-structural/run b/test/2003-double-virtual-structural/run
new file mode 100755
index 0000000..b59f97c
--- /dev/null
+++ b/test/2003-double-virtual-structural/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/2003-double-virtual-structural/src/Main.java b/test/2003-double-virtual-structural/src/Main.java
new file mode 100644
index 0000000..ab0145a
--- /dev/null
+++ b/test/2003-double-virtual-structural/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test2003.run();
+  }
+}
diff --git a/test/2003-double-virtual-structural/src/art/Redefinition.java b/test/2003-double-virtual-structural/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/2003-double-virtual-structural/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/2003-double-virtual-structural/src/art/Test2003.java b/test/2003-double-virtual-structural/src/art/Test2003.java
new file mode 100644
index 0000000..e8a10e0
--- /dev/null
+++ b/test/2003-double-virtual-structural/src/art/Test2003.java
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Base64;
+public class Test2003 {
+
+  public static class Transform {
+    public String getGreeting() {
+      return "Hi";
+    }
+  }
+
+  public static class SubTransform extends Transform {
+    private int count = 0;
+    public void sayHi() {
+      System.out.println(getGreeting() + "(SubTransform called " + (++count) + " times)");
+    }
+  }
+  /**
+   * base64 encoded class/dex file for
+   * public static class SubTransform extends Transform {
+   *   private int count = 0;
+   *   public void sayHi() {
+   *     System.out.println(getGreeting() + "(SubTransform called " + (++count) + " times, Transform called " + getCount() + " times)");
+   *   }
+   * }
+   */
+  private static final byte[] SUBTRANSFORM_DEX_BYTES = Base64.getDecoder().decode(
+"ZGV4CjAzNQCh/FHVi1J2bdQclTBNUHHeAbrR2Sy2cWXoBQAAcAAAAHhWNBIAAAAAAAAAACQFAAAh" +
+"AAAAcAAAAAsAAAD0AAAABgAAACABAAACAAAAaAEAAAoAAAB4AQAAAQAAAMgBAAAABAAA6AEAAJ4C" +
+"AACnAgAAwgIAANkCAADhAgAA5AIAAOcCAADrAgAA7wIAAAwDAAAmAwAANgMAAFoDAAB6AwAAkQMA" +
+"AKUDAADAAwAA1AMAAOIDAADxAwAA9AMAAPgDAAAFBAAADQQAABQEAAAeBAAAKwQAADEEAAA2BAAA" +
+"PwQAAEYEAABQBAAAVwQAAAQAAAAIAAAACQAAAAoAAAALAAAADAAAAA0AAAAOAAAADwAAABAAAAAT" +
+"AAAABAAAAAAAAAAAAAAABQAAAAcAAAAAAAAABgAAAAgAAACQAgAABwAAAAgAAACYAgAAEwAAAAoA" +
+"AAAAAAAAFAAAAAoAAACYAgAAAQAAABcAAAAJAAYAGwAAAAEABAADAAAAAQAAABgAAAABAAEAGQAA" +
+"AAEABAAdAAAAAgAEAAMAAAAGAAUAHAAAAAgABAADAAAACAACABYAAAAIAAMAFgAAAAgAAQAeAAAA" +
+"AQAAAAEAAAACAAAAAAAAABIAAAAUBQAA9AQAAAAAAAACAAEAAQAAAIICAAAHAAAAcBAEAAEAEgBZ" +
+"EAAADgAAAAYAAQACAAAAhwIAADUAAABiAAEAbhACAAUADAFSUgAA2AICAVlSAABuEAEABQAKAyIE" +
+"CABwEAYABABuIAgAFAAaAQIAbiAIABQAbiAHACQAGgEBAG4gCAAUAG4gBwA0ABoBAABuIAgAFABu" +
+"EAkABAAMAW4gBQAQAA4ABQAOPAAIAA4BNA8AAAABAAAAAAAAAAEAAAAHAAcgdGltZXMpABkgdGlt" +
+"ZXMsIFRyYW5zZm9ybSBjYWxsZWQgABUoU3ViVHJhbnNmb3JtIGNhbGxlZCAABjxpbml0PgABSQAB" +
+"TAACTEkAAkxMABtMYXJ0L1Rlc3QyMDAzJFN1YlRyYW5zZm9ybTsAGExhcnQvVGVzdDIwMDMkVHJh" +
+"bnNmb3JtOwAOTGFydC9UZXN0MjAwMzsAIkxkYWx2aWsvYW5ub3RhdGlvbi9FbmNsb3NpbmdDbGFz" +
+"czsAHkxkYWx2aWsvYW5ub3RhdGlvbi9Jbm5lckNsYXNzOwAVTGphdmEvaW8vUHJpbnRTdHJlYW07" +
+"ABJMamF2YS9sYW5nL1N0cmluZzsAGUxqYXZhL2xhbmcvU3RyaW5nQnVpbGRlcjsAEkxqYXZhL2xh" +
+"bmcvU3lzdGVtOwAMU3ViVHJhbnNmb3JtAA1UZXN0MjAwMy5qYXZhAAFWAAJWTAALYWNjZXNzRmxh" +
+"Z3MABmFwcGVuZAAFY291bnQACGdldENvdW50AAtnZXRHcmVldGluZwAEbmFtZQADb3V0AAdwcmlu" +
+"dGxuAAVzYXlIaQAIdG9TdHJpbmcABXZhbHVlAIsBfn5EOHsiY29tcGlsYXRpb24tbW9kZSI6ImRl" +
+"YnVnIiwiaGFzLWNoZWNrc3VtcyI6ZmFsc2UsIm1pbi1hcGkiOjEsInNoYS0xIjoiODViZjE2Yzc1" +
+"NjUzZDQwNGE0YzNlZDQzNjA3Yzc3Yjg1YmFmMzFlZSIsInZlcnNpb24iOiIyLjAuNS1kZXYifQAC" +
+"BAEfGAMCBQIVBAkaFxEAAQEBAAIAgYAE6AMDAYgEAAAAAAIAAADlBAAA6wQAAAgFAAAAAAAAAAAA" +
+"AAAAAAAQAAAAAAAAAAEAAAAAAAAAAQAAACEAAABwAAAAAgAAAAsAAAD0AAAAAwAAAAYAAAAgAQAA" +
+"BAAAAAIAAABoAQAABQAAAAoAAAB4AQAABgAAAAEAAADIAQAAASAAAAIAAADoAQAAAyAAAAIAAACC" +
+"AgAAARAAAAIAAACQAgAAAiAAACEAAACeAgAABCAAAAIAAADlBAAAACAAAAEAAAD0BAAAAxAAAAIA" +
+"AAAEBQAABiAAAAEAAAAUBQAAABAAAAEAAAAkBQAA");
+
+  /**
+   * base64 encoded class/dex file for
+   * public static class Transform {
+   *   private int count;
+   *   public String getGreeting() {
+   *     incrCount();
+   *     return "Hello";
+   *   }
+   *   protected void incrCount() {
+   *     ++count;
+   *   }
+   *   protected int getCount() {
+   *     return count;
+   *   }
+   * }
+   */
+  private static final byte[] TRANSFORM_DEX_BYTES = Base64.getDecoder().decode(
+"ZGV4CjAzNQCAt16FlKvFzDaE6l56jUkorc7YXyrJmRpsBAAAcAAAAHhWNBIAAAAAAAAAALQDAAAV" +
+"AAAAcAAAAAgAAADEAAAAAwAAAOQAAAABAAAACAEAAAUAAAAQAQAAAQAAADgBAAAUAwAAWAEAANQB" +
+"AADcAQAA4wEAAOYBAADpAQAAAwIAABMCAAA3AgAAVwIAAGsCAAB/AgAAjgIAAJkCAACcAgAAqQIA" +
+"ALACAAC6AgAAxwIAANICAADYAgAA3wIAAAIAAAAEAAAABQAAAAYAAAAHAAAACAAAAAkAAAAMAAAA" +
+"AgAAAAAAAAAAAAAAAwAAAAYAAAAAAAAADAAAAAcAAAAAAAAAAQAAAA4AAAABAAIAAAAAAAEAAAAP" +
+"AAAAAQABABAAAAABAAIAEQAAAAUAAgAAAAAAAQAAAAEAAAAFAAAAAAAAAAoAAACkAwAAfAMAAAAA" +
+"AAACAAEAAAAAAMYBAAADAAAAUhAAAA8AAAACAAEAAQAAAMoBAAAGAAAAbhADAAEAGgABABEAAQAB" +
+"AAEAAADCAQAABAAAAHAQBAAAAA4AAgABAAAAAADPAQAABwAAAFIQAADYAAABWRAAAA4ACwAOABUA" +
+"DgAOAA48ABIADmkABjxpbml0PgAFSGVsbG8AAUkAAUwAGExhcnQvVGVzdDIwMDMkVHJhbnNmb3Jt" +
+"OwAOTGFydC9UZXN0MjAwMzsAIkxkYWx2aWsvYW5ub3RhdGlvbi9FbmNsb3NpbmdDbGFzczsAHkxk" +
+"YWx2aWsvYW5ub3RhdGlvbi9Jbm5lckNsYXNzOwASTGphdmEvbGFuZy9PYmplY3Q7ABJMamF2YS9s" +
+"YW5nL1N0cmluZzsADVRlc3QyMDAzLmphdmEACVRyYW5zZm9ybQABVgALYWNjZXNzRmxhZ3MABWNv" +
+"dW50AAhnZXRDb3VudAALZ2V0R3JlZXRpbmcACWluY3JDb3VudAAEbmFtZQAFdmFsdWUAiwF+fkQ4" +
+"eyJjb21waWxhdGlvbi1tb2RlIjoiZGVidWciLCJoYXMtY2hlY2tzdW1zIjpmYWxzZSwibWluLWFw" +
+"aSI6MSwic2hhLTEiOiI4NWJmMTZjNzU2NTNkNDA0YTRjM2VkNDM2MDdjNzdiODViYWYzMWVlIiwi" +
+"dmVyc2lvbiI6IjIuMC41LWRldiJ9AAIDARMYAgIEAg0ECRIXCwABAQMAAgCBgASMAwEE2AIBAfAC" +
+"AQSkAwAAAAACAAAAbQMAAHMDAACYAwAAAAAAAAAAAAAAAAAADwAAAAAAAAABAAAAAAAAAAEAAAAV" +
+"AAAAcAAAAAIAAAAIAAAAxAAAAAMAAAADAAAA5AAAAAQAAAABAAAACAEAAAUAAAAFAAAAEAEAAAYA" +
+"AAABAAAAOAEAAAEgAAAEAAAAWAEAAAMgAAAEAAAAwgEAAAIgAAAVAAAA1AEAAAQgAAACAAAAbQMA" +
+"AAAgAAABAAAAfAMAAAMQAAACAAAAlAMAAAYgAAABAAAApAMAAAAQAAABAAAAtAMAAA==");
+
+
+  public static void run() {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest(new SubTransform());
+  }
+
+  public static void doTest(SubTransform t) {
+    t.sayHi();
+    t.sayHi();
+    t.sayHi();
+    Redefinition.doMultiStructuralClassRedefinition(
+        new Redefinition.CommonClassDefinition(SubTransform.class, null, SUBTRANSFORM_DEX_BYTES),
+        new Redefinition.CommonClassDefinition(Transform.class, null, TRANSFORM_DEX_BYTES));
+    t.sayHi();
+    t.sayHi();
+    t.sayHi();
+  }
+}
diff --git a/test/2004-double-virtual-structural-abstract/expected.txt b/test/2004-double-virtual-structural-abstract/expected.txt
new file mode 100644
index 0000000..c705270
--- /dev/null
+++ b/test/2004-double-virtual-structural-abstract/expected.txt
@@ -0,0 +1,2 @@
+Hi
+Hello Alex
diff --git a/test/2004-double-virtual-structural-abstract/info.txt b/test/2004-double-virtual-structural-abstract/info.txt
new file mode 100644
index 0000000..9910b99
--- /dev/null
+++ b/test/2004-double-virtual-structural-abstract/info.txt
@@ -0,0 +1,4 @@
+Tests basic functions in the jvmti plugin.
+
+Tests that using the structural redefinition can add new virtual methods and fields that are
+accessed by a subtype undergoing non-structural redefinition simultaneously.
diff --git a/test/2004-double-virtual-structural-abstract/run b/test/2004-double-virtual-structural-abstract/run
new file mode 100755
index 0000000..b59f97c
--- /dev/null
+++ b/test/2004-double-virtual-structural-abstract/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/2004-double-virtual-structural-abstract/src/Main.java b/test/2004-double-virtual-structural-abstract/src/Main.java
new file mode 100644
index 0000000..592a7ba
--- /dev/null
+++ b/test/2004-double-virtual-structural-abstract/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test2004.run();
+  }
+}
diff --git a/test/2004-double-virtual-structural-abstract/src/art/Redefinition.java b/test/2004-double-virtual-structural-abstract/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/2004-double-virtual-structural-abstract/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/2004-double-virtual-structural-abstract/src/art/Test2004.java b/test/2004-double-virtual-structural-abstract/src/art/Test2004.java
new file mode 100644
index 0000000..d4a8c03
--- /dev/null
+++ b/test/2004-double-virtual-structural-abstract/src/art/Test2004.java
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Base64;
+public class Test2004 {
+
+  public static abstract class Transform {
+    public String getGreeting() {
+      return "Hi";
+    }
+  }
+
+  public static class SubTransform extends Transform {
+    public void sayHi() {
+      System.out.println(getGreeting());
+    }
+  }
+  /**
+   * base64 encoded class/dex file for
+   * public static class SubTransform extends Transform {
+   *   private int count = 0;
+   *   public void sayHi() {
+   *     System.out.println(getGreeting());
+   *   }
+   *   public string getName() {
+   *     return "Alex";
+   *   }
+   * }
+   */
+  private static final byte[] SUBTRANSFORM_DEX_BYTES = Base64.getDecoder().decode(
+"ZGV4CjAzNQA5zD9gOMLav7UxQjoS9LNQj2bnqGOL4VrcBAAAcAAAAHhWNBIAAAAAAAAAABgEAAAa" +
+"AAAAcAAAAAoAAADYAAAAAwAAAAABAAACAAAAJAEAAAYAAAA0AQAAAQAAAGQBAABYAwAAhAEAAPYB" +
+"AAD+AQAABAIAAAcCAAAKAgAAJwIAAEECAABRAgAAdQIAAJUCAACsAgAAwAIAANQCAADiAgAA8QIA" +
+"APQCAAD4AgAABQMAAAwDAAAZAwAAIgMAACgDAAAtAwAANgMAAD0DAABEAwAAAgAAAAQAAAAFAAAA" +
+"BgAAAAcAAAAIAAAACQAAAAoAAAALAAAADgAAAAMAAAAHAAAAAAAAAA4AAAAJAAAAAAAAAA8AAAAJ" +
+"AAAA8AEAAAEAAAARAAAACAAGABUAAAABAAEAAAAAAAEAAAASAAAAAQAAABMAAAABAAEAFwAAAAIA" +
+"AQAAAAAABgACABYAAAABAAAAAQAAAAIAAAAAAAAADQAAAAgEAADhAwAAAAAAAAIAAQAAAAAA5QEA" +
+"AAMAAAAaAAEAEQAAAAIAAQABAAAA4AEAAAcAAABwEAQAAQASAFkQAAAOAAAAAwABAAIAAADpAQAA" +
+"CgAAAGIAAQBuEAEAAgAMAW4gBQAQAA4ACgAOPAAQAA4ADQAOlgAAAAEAAAAHAAY8aW5pdD4ABEFs" +
+"ZXgAAUkAAUwAG0xhcnQvVGVzdDIwMDQkU3ViVHJhbnNmb3JtOwAYTGFydC9UZXN0MjAwNCRUcmFu" +
+"c2Zvcm07AA5MYXJ0L1Rlc3QyMDA0OwAiTGRhbHZpay9hbm5vdGF0aW9uL0VuY2xvc2luZ0NsYXNz" +
+"OwAeTGRhbHZpay9hbm5vdGF0aW9uL0lubmVyQ2xhc3M7ABVMamF2YS9pby9QcmludFN0cmVhbTsA" +
+"EkxqYXZhL2xhbmcvU3RyaW5nOwASTGphdmEvbGFuZy9TeXN0ZW07AAxTdWJUcmFuc2Zvcm0ADVRl" +
+"c3QyMDA0LmphdmEAAVYAAlZMAAthY2Nlc3NGbGFncwAFY291bnQAC2dldEdyZWV0aW5nAAdnZXRO" +
+"YW1lAARuYW1lAANvdXQAB3ByaW50bG4ABXNheUhpAAV2YWx1ZQCLAX5+RDh7ImNvbXBpbGF0aW9u" +
+"LW1vZGUiOiJkZWJ1ZyIsImhhcy1jaGVja3N1bXMiOmZhbHNlLCJtaW4tYXBpIjoxLCJzaGEtMSI6" +
+"Ijg1YmYxNmM3NTY1M2Q0MDRhNGMzZWQ0MzYwN2M3N2I4NWJhZjMxZWUiLCJ2ZXJzaW9uIjoiMi4w" +
+"LjUtZGV2In0AAgQBGBgDAgUCEAQJFBcMAAEBAgACAIGABJwDAgGEAwEBvAMAAAAAAAAAAgAAANID" +
+"AADYAwAA/AMAAAAAAAAAAAAAAAAAABAAAAAAAAAAAQAAAAAAAAABAAAAGgAAAHAAAAACAAAACgAA" +
+"ANgAAAADAAAAAwAAAAABAAAEAAAAAgAAACQBAAAFAAAABgAAADQBAAAGAAAAAQAAAGQBAAABIAAA" +
+"AwAAAIQBAAADIAAAAwAAAOABAAABEAAAAQAAAPABAAACIAAAGgAAAPYBAAAEIAAAAgAAANIDAAAA" +
+"IAAAAQAAAOEDAAADEAAAAgAAAPgDAAAGIAAAAQAAAAgEAAAAEAAAAQAAABgEAAA=");
+
+  /**
+   * base64 encoded class/dex file for
+   * public static abstract class Transform {
+   *   public String getGreeting() {
+   *     return "Hello " + getName();
+   *   }
+   *   public abstract string getName();
+   * }
+   */
+  private static final byte[] TRANSFORM_DEX_BYTES = Base64.getDecoder().decode(
+"ZGV4CjAzNQDtwEbrWZHwf9ALLXnPJ2zRU6kQs/yHTCJ4BAAAcAAAAHhWNBIAAAAAAAAAAMADAAAW" +
+"AAAAcAAAAAgAAADIAAAAAwAAAOgAAAAAAAAAAAAAAAcAAAAMAQAAAQAAAEQBAAAUAwAAZAEAAMYB" +
+"AADOAQAA1gEAANkBAADdAQAA9wEAAAcCAAArAgAASwIAAF8CAABzAgAAjgIAAJ0CAACoAgAAqwIA" +
+"ALgCAADAAgAAzQIAANYCAADcAgAA5gIAAO0CAAAEAAAABQAAAAYAAAAHAAAACAAAAAkAAAAKAAAA" +
+"DQAAAAIAAAAFAAAAAAAAAAMAAAAGAAAAwAEAAA0AAAAHAAAAAAAAAAAAAgAAAAAAAAAAABAAAAAA" +
+"AAAAEQAAAAQAAgAAAAAABgACAAAAAAAGAAEADwAAAAYAAAATAAAAAAAAAAEEAAAEAAAAAAAAAAsA" +
+"AACwAwAAiwMAAAAAAAAEAAEAAgAAALwBAAAWAAAAbhACAAMADAAiAQYAcBAEAAEAGgIBAG4gBQAh" +
+"AG4gBQABAG4QBgABAAwAEQABAAEAAQAAALgBAAAEAAAAcBADAAAADgAEAA4ABgAOAAEAAAAFAAY8" +
+"aW5pdD4ABkhlbGxvIAABTAACTEwAGExhcnQvVGVzdDIwMDQkVHJhbnNmb3JtOwAOTGFydC9UZXN0" +
+"MjAwNDsAIkxkYWx2aWsvYW5ub3RhdGlvbi9FbmNsb3NpbmdDbGFzczsAHkxkYWx2aWsvYW5ub3Rh" +
+"dGlvbi9Jbm5lckNsYXNzOwASTGphdmEvbGFuZy9PYmplY3Q7ABJMamF2YS9sYW5nL1N0cmluZzsA" +
+"GUxqYXZhL2xhbmcvU3RyaW5nQnVpbGRlcjsADVRlc3QyMDA0LmphdmEACVRyYW5zZm9ybQABVgAL" +
+"YWNjZXNzRmxhZ3MABmFwcGVuZAALZ2V0R3JlZXRpbmcAB2dldE5hbWUABG5hbWUACHRvU3RyaW5n" +
+"AAV2YWx1ZQCLAX5+RDh7ImNvbXBpbGF0aW9uLW1vZGUiOiJkZWJ1ZyIsImhhcy1jaGVja3N1bXMi" +
+"OmZhbHNlLCJtaW4tYXBpIjoxLCJzaGEtMSI6Ijg1YmYxNmM3NTY1M2Q0MDRhNGMzZWQ0MzYwN2M3" +
+"N2I4NWJhZjMxZWUiLCJ2ZXJzaW9uIjoiMi4wLjUtZGV2In0AAgIBFBgBAgMCDiQJBBIXDAAAAQIA" +
+"gYAEoAMBAeQCAYEIAAAAAAAAAAACAAAAewMAAIEDAACkAwAAAAAAAAAAAAAAAAAADwAAAAAAAAAB" +
+"AAAAAAAAAAEAAAAWAAAAcAAAAAIAAAAIAAAAyAAAAAMAAAADAAAA6AAAAAUAAAAHAAAADAEAAAYA" +
+"AAABAAAARAEAAAEgAAACAAAAZAEAAAMgAAACAAAAuAEAAAEQAAABAAAAwAEAAAIgAAAWAAAAxgEA" +
+"AAQgAAACAAAAewMAAAAgAAABAAAAiwMAAAMQAAACAAAAoAMAAAYgAAABAAAAsAMAAAAQAAABAAAA" +
+"wAMAAA==");
+
+
+  public static void run() {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest(new SubTransform());
+  }
+
+  public static void doTest(SubTransform t) {
+    t.sayHi();
+    Redefinition.doMultiStructuralClassRedefinition(
+        new Redefinition.CommonClassDefinition(SubTransform.class, null, SUBTRANSFORM_DEX_BYTES),
+        new Redefinition.CommonClassDefinition(Transform.class, null, TRANSFORM_DEX_BYTES));
+    t.sayHi();
+  }
+}
diff --git a/test/2005-pause-all-redefine-multithreaded/expected.txt b/test/2005-pause-all-redefine-multithreaded/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/2005-pause-all-redefine-multithreaded/expected.txt
diff --git a/test/2005-pause-all-redefine-multithreaded/info.txt b/test/2005-pause-all-redefine-multithreaded/info.txt
new file mode 100644
index 0000000..c300f0c
--- /dev/null
+++ b/test/2005-pause-all-redefine-multithreaded/info.txt
@@ -0,0 +1,5 @@
+Tests structural redefinition with multiple threads.
+
+Tests that using the structural redefinition while pausing all other (main thread-group) threads
+doesn't cause problems. This also tests that we can update the newly created fields while the
+other threads are suspended, thus making them look initialized.
diff --git a/test/2005-pause-all-redefine-multithreaded/pause-all.cc b/test/2005-pause-all-redefine-multithreaded/pause-all.cc
new file mode 100644
index 0000000..77df6e4
--- /dev/null
+++ b/test/2005-pause-all-redefine-multithreaded/pause-all.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+
+#include <vector>
+
+#include "android-base/logging.h"
+#include "android-base/macros.h"
+#include "jni.h"
+#include "jvmti.h"
+
+// Test infrastructure
+#include "jvmti_helper.h"
+#include "scoped_local_ref.h"
+#include "test_env.h"
+
+namespace art {
+namespace Test2005PauseAllRedefineMultithreaded {
+
+static constexpr jlong kRedefinedObjectTag = 0xDEADBEEF;
+
+extern "C" JNIEXPORT void JNICALL
+Java_art_Test2005_UpdateFieldValuesAndResumeThreads(JNIEnv* env,
+                                                    jclass klass ATTRIBUTE_UNUSED,
+                                                    jobjectArray threads_arr,
+                                                    jclass redefined_class,
+                                                    jobjectArray new_fields,
+                                                    jstring default_val) {
+  std::vector<jthread> threads;
+  for (jint i = 0; i < env->GetArrayLength(threads_arr); i++) {
+    threads.push_back(env->GetObjectArrayElement(threads_arr, i));
+  }
+  std::vector<jfieldID> fields;
+  for (jint i = 0; i < env->GetArrayLength(new_fields); i++) {
+    fields.push_back(env->FromReflectedField(env->GetObjectArrayElement(new_fields, i)));
+  }
+  // Tag every instance of the redefined class with kRedefinedObjectTag
+  CHECK_EQ(jvmti_env->IterateOverInstancesOfClass(
+               redefined_class,
+               JVMTI_HEAP_OBJECT_EITHER,
+               [](jlong class_tag ATTRIBUTE_UNUSED,
+                  jlong size ATTRIBUTE_UNUSED,
+                  jlong* tag_ptr,
+                  void* user_data ATTRIBUTE_UNUSED) -> jvmtiIterationControl {
+                 *tag_ptr = kRedefinedObjectTag;
+                 return JVMTI_ITERATION_CONTINUE;
+               },
+               nullptr),
+           JVMTI_ERROR_NONE);
+  jobject* objs;
+  jint cnt;
+  // Get the objects.
+  CHECK_EQ(jvmti_env->GetObjectsWithTags(1, &kRedefinedObjectTag, &cnt, &objs, nullptr),
+           JVMTI_ERROR_NONE);
+  // Set every field that's null
+  for (jint i = 0; i < cnt; i++) {
+    jobject obj = objs[i];
+    for (jfieldID field : fields) {
+      if (ScopedLocalRef<jobject>(env, env->GetObjectField(obj, field)).get() == nullptr) {
+        env->SetObjectField(obj, field, default_val);
+      }
+    }
+  }
+  LOG(INFO) << "Setting " << cnt << " objects with default values";
+  if (!threads.empty()) {
+    std::vector<jvmtiError> errs(threads.size(), JVMTI_ERROR_NONE);
+    CHECK_EQ(jvmti_env->ResumeThreadList(threads.size(), threads.data(), errs.data()),
+             JVMTI_ERROR_NONE);
+  }
+  jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(objs));
+}
+
+}  // namespace Test2005PauseAllRedefineMultithreaded
+}  // namespace art
diff --git a/test/2005-pause-all-redefine-multithreaded/run b/test/2005-pause-all-redefine-multithreaded/run
new file mode 100755
index 0000000..b59f97c
--- /dev/null
+++ b/test/2005-pause-all-redefine-multithreaded/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/2005-pause-all-redefine-multithreaded/src/Main.java b/test/2005-pause-all-redefine-multithreaded/src/Main.java
new file mode 100644
index 0000000..951236a
--- /dev/null
+++ b/test/2005-pause-all-redefine-multithreaded/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test2005.run();
+  }
+}
diff --git a/test/2005-pause-all-redefine-multithreaded/src/art/Redefinition.java b/test/2005-pause-all-redefine-multithreaded/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/2005-pause-all-redefine-multithreaded/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/2005-pause-all-redefine-multithreaded/src/art/Suspension.java b/test/2005-pause-all-redefine-multithreaded/src/art/Suspension.java
new file mode 120000
index 0000000..bcef96f
--- /dev/null
+++ b/test/2005-pause-all-redefine-multithreaded/src/art/Suspension.java
@@ -0,0 +1 @@
+../../../jvmti-common/Suspension.java
\ No newline at end of file
diff --git a/test/2005-pause-all-redefine-multithreaded/src/art/Test2005.java b/test/2005-pause-all-redefine-multithreaded/src/art/Test2005.java
new file mode 100644
index 0000000..84edb73
--- /dev/null
+++ b/test/2005-pause-all-redefine-multithreaded/src/art/Test2005.java
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Field;
+import java.util.*;
+import java.util.concurrent.CountDownLatch;
+public class Test2005 {
+  private static final int NUM_THREADS = 20;
+  private static final String DEFAULT_VAL = "DEFAULT_VALUE";
+
+  public static final class Transform {
+    public String greetingEnglish;
+    public Transform() {
+      this.greetingEnglish = "Hello";
+    }
+    public String sayHi() {
+      return greetingEnglish + " from " + Thread.currentThread().getName();
+    }
+  }
+
+  /**
+   * base64 encoded class/dex file for
+   * public static final class Transform {
+   *   public String greetingEnglish;
+   *   public String greetingFrench;
+   *   public String greetingDanish;
+   *   public String greetingJapanese;
+   *
+   *   public Transform() {
+   *     this.greetingEnglish = "Hello World";
+   *     this.greetingFrench = "Bonjour le Monde";
+   *     this.greetingDanish = "Hej Verden";
+   *     this.greetingJapanese = "こんにちは世界";
+   *   }
+   *   public String sayHi() {
+   *     return sayHiEnglish() + ", " + sayHiFrench() + ", " + sayHiDanish() + ", " +
+   * sayHiJapanese() + " from " + Thread.currentThread().getName();
+   *   }
+   *   public String sayHiEnglish() {
+   *     return greetingEnglish;
+   *   }
+   *   public String sayHiDanish() {
+   *     return greetingDanish;
+   *   }
+   *   public String sayHiJapanese() {
+   *     return greetingJapanese;
+   *   }
+   *   public String sayHiFrench() {
+   *     return greetingFrench;
+   *   }
+   * }
+   */
+  private static final byte[] DEX_BYTES = Base64.getDecoder().decode(
+      "ZGV4CjAzNQAgJ1QXHJ8PAODMKTV14wyH4oKGOMK1yyL4BgAAcAAAAHhWNBIAAAAAAAAAADQGAAAl"
+      + "AAAAcAAAAAkAAAAEAQAABAAAACgBAAAEAAAAWAEAAAwAAAB4AQAAAQAAANgBAAAABQAA+AEAAEoD"
+      + "AABSAwAAVgMAAF4DAABwAwAAfAMAAIkDAACMAwAAkAMAAKoDAAC6AwAA3gMAAP4DAAASBAAAJgQA"
+      + "AEEEAABVBAAAZAQAAG8EAAByBAAAfwQAAIcEAACWBAAAnwQAAK8EAADABAAA0AQAAOIEAADoBAAA"
+      + "7wQAAPwEAAAKBQAAFwUAACYFAAAwBQAANwUAAMUFAAAIAAAACQAAAAoAAAALAAAADAAAAA0AAAAO"
+      + "AAAADwAAABIAAAAGAAAABQAAAAAAAAAHAAAABgAAAEQDAAAGAAAABwAAAAAAAAASAAAACAAAAAAA"
+      + "AAAAAAUAFwAAAAAABQAYAAAAAAAFABkAAAAAAAUAGgAAAAAAAwACAAAAAAAAABwAAAAAAAAAHQAA"
+      + "AAAAAAAeAAAAAAAAAB8AAAAAAAAAIAAAAAQAAwACAAAABgADAAIAAAAGAAEAFAAAAAYAAAAhAAAA"
+      + "BwACABUAAAAHAAAAFgAAAAAAAAARAAAABAAAAAAAAAAQAAAAJAYAAOsFAAAAAAAABwABAAIAAAAt"
+      + "AwAAQQAAAG4QAwAGAAwAbhAEAAYADAFuEAIABgAMAm4QBQAGAAwDcQAKAAAADARuEAsABAAMBCIF"
+      + "BgBwEAcABQBuIAgABQAaAAEAbiAIAAUAbiAIABUAbiAIAAUAbiAIACUAbiAIAAUAbiAIADUAGgAA"
+      + "AG4gCAAFAG4gCABFAG4QCQAFAAwAEQAAAAIAAQAAAAAAMQMAAAMAAABUEAAAEQAAAAIAAQAAAAAA"
+      + "NQMAAAMAAABUEAEAEQAAAAIAAQAAAAAAOQMAAAMAAABUEAIAEQAAAAIAAQAAAAAAPQMAAAMAAABU"
+      + "EAMAEQAAAAIAAQABAAAAJAMAABQAAABwEAYAAQAaAAUAWxABABoAAwBbEAIAGgAEAFsQAAAaACQA"
+      + "WxADAA4ACQAOPEtLS0sAEAAOABYADgATAA4AHAAOABkADgAAAAABAAAABQAGIGZyb20gAAIsIAAG"
+      + "PGluaXQ+ABBCb25qb3VyIGxlIE1vbmRlAApIZWogVmVyZGVuAAtIZWxsbyBXb3JsZAABTAACTEwA"
+      + "GExhcnQvVGVzdDIwMDUkVHJhbnNmb3JtOwAOTGFydC9UZXN0MjAwNTsAIkxkYWx2aWsvYW5ub3Rh"
+      + "dGlvbi9FbmNsb3NpbmdDbGFzczsAHkxkYWx2aWsvYW5ub3RhdGlvbi9Jbm5lckNsYXNzOwASTGph"
+      + "dmEvbGFuZy9PYmplY3Q7ABJMamF2YS9sYW5nL1N0cmluZzsAGUxqYXZhL2xhbmcvU3RyaW5nQnVp"
+      + "bGRlcjsAEkxqYXZhL2xhbmcvVGhyZWFkOwANVGVzdDIwMDUuamF2YQAJVHJhbnNmb3JtAAFWAAth"
+      + "Y2Nlc3NGbGFncwAGYXBwZW5kAA1jdXJyZW50VGhyZWFkAAdnZXROYW1lAA5ncmVldGluZ0Rhbmlz"
+      + "aAAPZ3JlZXRpbmdFbmdsaXNoAA5ncmVldGluZ0ZyZW5jaAAQZ3JlZXRpbmdKYXBhbmVzZQAEbmFt"
+      + "ZQAFc2F5SGkAC3NheUhpRGFuaXNoAAxzYXlIaUVuZ2xpc2gAC3NheUhpRnJlbmNoAA1zYXlIaUph"
+      + "cGFuZXNlAAh0b1N0cmluZwAFdmFsdWUAiwF+fkQ4eyJjb21waWxhdGlvbi1tb2RlIjoiZGVidWci"
+      + "LCJoYXMtY2hlY2tzdW1zIjpmYWxzZSwibWluLWFwaSI6MSwic2hhLTEiOiI5N2RmNmVkNzlhNzQw"
+      + "ZWVhMzM4MmNiNWRhOTIyYmI1YmJjMDg2NDMzIiwidmVyc2lvbiI6IjIuMC45LWRldiJ9AAfjgZPj"
+      + "gpPjgavjgaHjga/kuJbnlYwAAgIBIhgBAgMCEwQZGxcRAAQBBQABAQEBAQEBAIGABOwFAQH4AwEB"
+      + "jAUBAaQFAQG8BQEB1AUAAAAAAAAAAgAAANwFAADiBQAAGAYAAAAAAAAAAAAAAAAAABAAAAAAAAAA"
+      + "AQAAAAAAAAABAAAAJQAAAHAAAAACAAAACQAAAAQBAAADAAAABAAAACgBAAAEAAAABAAAAFgBAAAF"
+      + "AAAADAAAAHgBAAAGAAAAAQAAANgBAAABIAAABgAAAPgBAAADIAAABgAAACQDAAABEAAAAQAAAEQD"
+      + "AAACIAAAJQAAAEoDAAAEIAAAAgAAANwFAAAAIAAAAQAAAOsFAAADEAAAAgAAABQGAAAGIAAAAQAA"
+      + "ACQGAAAAEAAAAQAAADQGAAA=");
+
+  public static void run() throws Exception {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest();
+  }
+
+  public static final class MyThread extends Thread {
+    public MyThread(CountDownLatch delay, int id) {
+      super("Thread: " + id);
+      this.thr_id = id;
+      this.results = new HashSet<>();
+      this.finish = false;
+      this.delay = delay;
+    }
+
+    public void run() {
+      delay.countDown();
+      while (!finish) {
+        Transform t = new Transform();
+        results.add(t.sayHi());
+      }
+    }
+
+    public void finish() throws Exception {
+      finish = true;
+      this.join();
+    }
+
+    public void Check() throws Exception {
+      for (String s : results) {
+        if (!s.equals("Hello from " + getName())
+            && !s.equals("Hello, " + DEFAULT_VAL + ", " + DEFAULT_VAL + ", " + DEFAULT_VAL
+                + " from " + getName())
+            && !s.equals(
+                "Hello World, Bonjour le Monde, Hej Verden, こんにちは世界 from " + getName())) {
+          System.out.println("FAIL " + thr_id + ": Unexpected result: " + s);
+        }
+      }
+    }
+
+    public HashSet<String> results;
+    public volatile boolean finish;
+    public int thr_id;
+    public CountDownLatch delay;
+  }
+
+  public static MyThread[] startThreads(int num_threads) throws Exception {
+    CountDownLatch cdl = new CountDownLatch(num_threads);
+    MyThread[] res = new MyThread[num_threads];
+    for (int i = 0; i < num_threads; i++) {
+      res[i] = new MyThread(cdl, i);
+      res[i].start();
+    }
+    cdl.await();
+    return res;
+  }
+  public static void finishThreads(MyThread[] thrs) throws Exception {
+    for (MyThread t : thrs) {
+      t.finish();
+    }
+    for (MyThread t : thrs) {
+      t.Check();
+    }
+  }
+
+  public static void doRedefinition() throws Exception {
+    // Get the current set of fields.
+    Field[] fields = Transform.class.getDeclaredFields();
+    // Get all the threads in the 'main' thread group
+    ThreadGroup mytg = Thread.currentThread().getThreadGroup();
+    Thread[] all_threads = new Thread[mytg.activeCount()];
+    mytg.enumerate(all_threads);
+    Set<Thread> thread_set = new HashSet<>(Arrays.asList(all_threads));
+    // We don't want to suspend ourself, that would cause a deadlock.
+    thread_set.remove(Thread.currentThread());
+    // If some of the other threads finished between calling mytg.activeCount and enumerate we will
+    // have nulls. These nulls are interpreted as currentThread by SuspendThreadList so we want to
+    // get rid of them.
+    thread_set.remove(null);
+    // Suspend them.
+    Suspension.suspendList(thread_set.toArray(new Thread[0]));
+    // Actual redefine.
+    Redefinition.doCommonStructuralClassRedefinition(Transform.class, DEX_BYTES);
+    // Get the new fields.
+    Field[] new_fields = Transform.class.getDeclaredFields();
+    Set<Field> field_set = new HashSet(Arrays.asList(new_fields));
+    field_set.removeAll(Arrays.asList(fields));
+    // Initialize the new fields on the old objects and resume.
+    UpdateFieldValuesAndResumeThreads(thread_set.toArray(new Thread[0]),
+        Transform.class,
+        field_set.toArray(new Field[0]),
+        DEFAULT_VAL);
+  }
+
+  public static void doTest() throws Exception {
+    // Force the Transform class to be initialized. We are suspending the remote
+    // threads so if one of them is in the class initialization (and therefore
+    // has a monitor lock on the class object) the redefinition will deadlock
+    // waiting for the clinit to finish and the monitor to be released.
+    if (null == Class.forName("art.Test2005$Transform")) {
+      throw new Error("No class!");
+    }
+    MyThread[] threads = startThreads(NUM_THREADS);
+
+    doRedefinition();
+    finishThreads(threads);
+  }
+  public static native void UpdateFieldValuesAndResumeThreads(
+      Thread[] t, Class<?> redefined_class, Field[] new_fields, String default_val);
+}
diff --git a/test/2006-virtual-structural-finalizing/expected.txt b/test/2006-virtual-structural-finalizing/expected.txt
new file mode 100644
index 0000000..e965357
--- /dev/null
+++ b/test/2006-virtual-structural-finalizing/expected.txt
@@ -0,0 +1,3 @@
+Finalizing
+start_counter: 1
+Finish_counter: 1
diff --git a/test/2006-virtual-structural-finalizing/info.txt b/test/2006-virtual-structural-finalizing/info.txt
new file mode 100644
index 0000000..3e5291d
--- /dev/null
+++ b/test/2006-virtual-structural-finalizing/info.txt
@@ -0,0 +1,4 @@
+Tests structural redefinition with multiple threads.
+
+Tests that using the structural redefinition while concurrently loading and using a subtype of
+the class being redefined doesn't cause any unexpected problems.
diff --git a/test/2006-virtual-structural-finalizing/run b/test/2006-virtual-structural-finalizing/run
new file mode 100755
index 0000000..03e41a5
--- /dev/null
+++ b/test/2006-virtual-structural-finalizing/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/2006-virtual-structural-finalizing/src-art/Main.java b/test/2006-virtual-structural-finalizing/src-art/Main.java
new file mode 100644
index 0000000..11f9aa7
--- /dev/null
+++ b/test/2006-virtual-structural-finalizing/src-art/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test2006.run();
+  }
+}
diff --git a/test/2006-virtual-structural-finalizing/src-art/art/Redefinition.java b/test/2006-virtual-structural-finalizing/src-art/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/2006-virtual-structural-finalizing/src-art/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/2006-virtual-structural-finalizing/src-art/art/Test2006.java b/test/2006-virtual-structural-finalizing/src-art/art/Test2006.java
new file mode 100644
index 0000000..510d13d
--- /dev/null
+++ b/test/2006-virtual-structural-finalizing/src-art/art/Test2006.java
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import dalvik.system.InMemoryDexClassLoader;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Base64;
+import java.util.concurrent.CountDownLatch;
+import java.util.function.Supplier;
+import java.util.concurrent.atomic.*;
+import java.lang.ref.*;
+
+public class Test2006 {
+  public static final CountDownLatch start_latch = new CountDownLatch(1);
+  public static final CountDownLatch redefine_latch = new CountDownLatch(1);
+  public static final CountDownLatch finish_latch = new CountDownLatch(1);
+  public static volatile int start_counter = 0;
+  public static volatile int finish_counter = 0;
+  public static class Transform {
+    public Transform() { }
+    protected void finalize() throws Throwable {
+      System.out.println("Finalizing");
+      start_counter++;
+      start_latch.countDown();
+      redefine_latch.await();
+      finish_counter++;
+      finish_latch.countDown();
+    }
+  }
+
+  /**
+   * base64 encoded class/dex file for
+   * public static class Transform {
+   *   public String greeting;
+   *
+   *   public Transform() {
+   *     greeting = "Hello";
+   *   }
+   *   protected void finalize() {
+   *     System.out.println("NOTHING HERE!");
+   *   }
+   * }
+   */
+  private static final byte[] DEX_BYTES =
+      Base64.getDecoder()
+          .decode(
+"ZGV4CjAzNQDtxu0Tsy2rLn9iTZHx3r+yuY0IuN+y1el4BAAAcAAAAHhWNBIAAAAAAAAAALQDAAAX" +
+"AAAAcAAAAAkAAADMAAAAAgAAAPAAAAACAAAACAEAAAQAAAAYAQAAAQAAADgBAAAgAwAAWAEAAKoB" +
+"AACyAQAAuQEAANMBAADjAQAABwIAACcCAAA+AgAAUgIAAGYCAAB6AgAAiQIAAJgCAACjAgAApgIA" +
+"AKoCAAC3AgAAwQIAAMsCAADRAgAA1gIAAN8CAADmAgAAAgAAAAMAAAAEAAAABQAAAAYAAAAHAAAA" +
+"CAAAAAkAAAANAAAADQAAAAgAAAAAAAAADgAAAAgAAACkAQAAAAAGABEAAAAHAAQAEwAAAAAAAAAA" +
+"AAAAAAAAABAAAAAEAAEAFAAAAAUAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAAsAAACkAwAAhAMAAAAA" +
+"AAACAAEAAQAAAJgBAAAIAAAAcBADAAEAGgABAFsQAAAOAAMAAQACAAAAngEAAAgAAABiAAEAGgEK" +
+"AG4gAgAQAA4ABgAOPEsACgAOeAAAAQAAAAYABjxpbml0PgAFSGVsbG8AGExhcnQvVGVzdDIwMDYk" +
+"VHJhbnNmb3JtOwAOTGFydC9UZXN0MjAwNjsAIkxkYWx2aWsvYW5ub3RhdGlvbi9FbmNsb3NpbmdD" +
+"bGFzczsAHkxkYWx2aWsvYW5ub3RhdGlvbi9Jbm5lckNsYXNzOwAVTGphdmEvaW8vUHJpbnRTdHJl" +
+"YW07ABJMamF2YS9sYW5nL09iamVjdDsAEkxqYXZhL2xhbmcvU3RyaW5nOwASTGphdmEvbGFuZy9T" +
+"eXN0ZW07AA1OT1RISU5HIEhFUkUhAA1UZXN0MjAwNi5qYXZhAAlUcmFuc2Zvcm0AAVYAAlZMAAth" +
+"Y2Nlc3NGbGFncwAIZmluYWxpemUACGdyZWV0aW5nAARuYW1lAANvdXQAB3ByaW50bG4ABXZhbHVl" +
+"AIwBfn5EOHsiY29tcGlsYXRpb24tbW9kZSI6ImRlYnVnIiwiaGFzLWNoZWNrc3VtcyI6ZmFsc2Us" +
+"Im1pbi1hcGkiOjEsInNoYS0xIjoiMTI5ZWU5ZjY3NTZjMzlkZjU3ZmYwNzg1ZDI1NmIyMzc3MjY0" +
+"MmI3YyIsInZlcnNpb24iOiIyLjAuMTAtZGV2In0AAgIBFRgBAgMCDwQJEhcMAAEBAQABAIGABNgC" +
+"AQT4AgAAAAACAAAAdQMAAHsDAACYAwAAAAAAAAAAAAAAAAAAEAAAAAAAAAABAAAAAAAAAAEAAAAX" +
+"AAAAcAAAAAIAAAAJAAAAzAAAAAMAAAACAAAA8AAAAAQAAAACAAAACAEAAAUAAAAEAAAAGAEAAAYA" +
+"AAABAAAAOAEAAAEgAAACAAAAWAEAAAMgAAACAAAAmAEAAAEQAAABAAAApAEAAAIgAAAXAAAAqgEA" +
+"AAQgAAACAAAAdQMAAAAgAAABAAAAhAMAAAMQAAACAAAAlAMAAAYgAAABAAAApAMAAAAQAAABAAAA" +
+"tAMAAA==");
+
+  public static void run() throws Exception {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest();
+  }
+
+  public static final class GcThread extends Thread {
+    public volatile boolean finished = false;
+    public void run() {
+      while (!finished) {
+        Runtime.getRuntime().gc();
+        System.runFinalization();
+      }
+    }
+  }
+
+  public static void doTest() throws Exception {
+    GcThread gc_thr = new GcThread();
+    gc_thr.start();
+    mktransform();
+    start_latch.await();
+    System.out.println("start_counter: " + start_counter);
+    Redefinition.doCommonStructuralClassRedefinition(Transform.class, DEX_BYTES);
+    redefine_latch.countDown();
+    finish_latch.await();
+    System.out.println("Finish_counter: " + finish_counter);
+    gc_thr.finished = true;
+    gc_thr.join();
+  }
+  public static void mktransform() throws Exception {
+    Transform tr = new Transform();
+  }
+}
diff --git a/test/2006-virtual-structural-finalizing/src/Main.java b/test/2006-virtual-structural-finalizing/src/Main.java
new file mode 100644
index 0000000..89b8557
--- /dev/null
+++ b/test/2006-virtual-structural-finalizing/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    System.out.println("FAIL: Test is only for art!");
+  }
+}
diff --git a/test/2007-virtual-structural-finalizable/expected.txt b/test/2007-virtual-structural-finalizable/expected.txt
new file mode 100644
index 0000000..781fc9a
--- /dev/null
+++ b/test/2007-virtual-structural-finalizable/expected.txt
@@ -0,0 +1,2 @@
+Finalizing
+counter: 1
diff --git a/test/2007-virtual-structural-finalizable/info.txt b/test/2007-virtual-structural-finalizable/info.txt
new file mode 100644
index 0000000..3e5291d
--- /dev/null
+++ b/test/2007-virtual-structural-finalizable/info.txt
@@ -0,0 +1,4 @@
+Tests structural redefinition with multiple threads.
+
+Tests that using the structural redefinition while concurrently loading and using a subtype of
+the class being redefined doesn't cause any unexpected problems.
diff --git a/test/2007-virtual-structural-finalizable/run b/test/2007-virtual-structural-finalizable/run
new file mode 100755
index 0000000..03e41a5
--- /dev/null
+++ b/test/2007-virtual-structural-finalizable/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/2007-virtual-structural-finalizable/src-art/Main.java b/test/2007-virtual-structural-finalizable/src-art/Main.java
new file mode 100644
index 0000000..ab8daea
--- /dev/null
+++ b/test/2007-virtual-structural-finalizable/src-art/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test2007.run();
+  }
+}
diff --git a/test/2007-virtual-structural-finalizable/src-art/art/Redefinition.java b/test/2007-virtual-structural-finalizable/src-art/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/2007-virtual-structural-finalizable/src-art/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/2007-virtual-structural-finalizable/src-art/art/Test2007.java b/test/2007-virtual-structural-finalizable/src-art/art/Test2007.java
new file mode 100644
index 0000000..77284eb
--- /dev/null
+++ b/test/2007-virtual-structural-finalizable/src-art/art/Test2007.java
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import dalvik.system.InMemoryDexClassLoader;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Base64;
+import java.util.concurrent.CountDownLatch;
+import java.util.function.Supplier;
+import java.util.concurrent.atomic.*;
+import java.lang.ref.*;
+
+public class Test2007 {
+  public static final CountDownLatch finish_latch = new CountDownLatch(1);
+  public static volatile int counter = 0;
+  public static Object theObject = null;
+  public static class Transform {
+    public Transform() { }
+    protected void finalize() throws Throwable {
+      System.out.println("Should never be called!");
+      // Do nothing.
+    }
+  }
+
+  /**
+   * base64 encoded class/dex file for
+   * public static class Transform {
+   *   public String greeting;
+   *
+   *   public Transform() {
+   *     greeting = "Hello";
+   *   }
+   *   protected void finalize() throws Throwable {
+   *     System.out.println("Finalizing");
+   *     counter++;
+   *     finish_latch.countDown();
+   *   }
+   * }
+   */
+  private static final byte[] DEX_BYTES =
+      Base64.getDecoder()
+          .decode(
+"ZGV4CjAzNQCC9DECxo2lTpw7FCCSqZArgZe8ab49ywRoBQAAcAAAAHhWNBIAAAAAAAAAAKQEAAAe" +
+"AAAAcAAAAA0AAADoAAAAAgAAABwBAAAEAAAANAEAAAUAAABUAQAAAQAAAHwBAADMAwAAnAEAAAYC" +
+"AAAOAgAAGgIAACECAAAkAgAAPgIAAE4CAAByAgAAkgIAAK4CAADFAgAA2QIAAO0CAAABAwAAGAMA" +
+"AD8DAABOAwAAWQMAAFwDAABgAwAAbQMAAHgDAACBAwAAiwMAAJkDAACjAwAAqQMAAK4DAAC3AwAA" +
+"vgMAAAMAAAAEAAAABQAAAAYAAAAHAAAACAAAAAkAAAAKAAAACwAAAAwAAAANAAAADgAAABEAAAAR" +
+"AAAADAAAAAAAAAASAAAADAAAAAACAAABAAgAGAAAAAIAAAAVAAAAAgALABcAAAAJAAYAGgAAAAEA" +
+"AAAAAAAAAQAAABYAAAAGAAEAGwAAAAcAAAAAAAAACwAAABQAAAABAAAAAQAAAAcAAAAAAAAADwAA" +
+"AIwEAABkBAAAAAAAAAIAAQABAAAA8gEAAAgAAABwEAMAAQAaAAIAWxAAAA4AAwABAAIAAAD4AQAA" +
+"EwAAAGIAAwAaAQEAbiACABAAYAABANgAAAFnAAEAYgACAG4QBAAAAA4ADAAOPEsAEAAOeGlaAAAB" +
+"AAAACAAGPGluaXQ+AApGaW5hbGl6aW5nAAVIZWxsbwABSQAYTGFydC9UZXN0MjAwNyRUcmFuc2Zv" +
+"cm07AA5MYXJ0L1Rlc3QyMDA3OwAiTGRhbHZpay9hbm5vdGF0aW9uL0VuY2xvc2luZ0NsYXNzOwAe" +
+"TGRhbHZpay9hbm5vdGF0aW9uL0lubmVyQ2xhc3M7ABpMZGFsdmlrL2Fubm90YXRpb24vVGhyb3dz" +
+"OwAVTGphdmEvaW8vUHJpbnRTdHJlYW07ABJMamF2YS9sYW5nL09iamVjdDsAEkxqYXZhL2xhbmcv" +
+"U3RyaW5nOwASTGphdmEvbGFuZy9TeXN0ZW07ABVMamF2YS9sYW5nL1Rocm93YWJsZTsAJUxqYXZh" +
+"L3V0aWwvY29uY3VycmVudC9Db3VudERvd25MYXRjaDsADVRlc3QyMDA3LmphdmEACVRyYW5zZm9y" +
+"bQABVgACVkwAC2FjY2Vzc0ZsYWdzAAljb3VudERvd24AB2NvdW50ZXIACGZpbmFsaXplAAxmaW5p" +
+"c2hfbGF0Y2gACGdyZWV0aW5nAARuYW1lAANvdXQAB3ByaW50bG4ABXZhbHVlAIwBfn5EOHsiY29t" +
+"cGlsYXRpb24tbW9kZSI6ImRlYnVnIiwiaGFzLWNoZWNrc3VtcyI6ZmFsc2UsIm1pbi1hcGkiOjEs" +
+"InNoYS0xIjoiMTI5ZWU5ZjY3NTZjMzlkZjU3ZmYwNzg1ZDI1NmIyMzc3MjY0MmI3YyIsInZlcnNp" +
+"b24iOiIyLjAuMTAtZGV2In0AAgUBHBwBGAoCAwEcGAICBAITBAkZFxAAAQEBAAEAgYAEnAMBBLwD" +
+"AAAAAAEAAABNBAAAAgAAAFUEAABbBAAAgAQAAAAAAAABAAAAAAAAAAEAAAB4BAAAEAAAAAAAAAAB" +
+"AAAAAAAAAAEAAAAeAAAAcAAAAAIAAAANAAAA6AAAAAMAAAACAAAAHAEAAAQAAAAEAAAANAEAAAUA" +
+"AAAFAAAAVAEAAAYAAAABAAAAfAEAAAEgAAACAAAAnAEAAAMgAAACAAAA8gEAAAEQAAABAAAAAAIA" +
+"AAIgAAAeAAAABgIAAAQgAAADAAAATQQAAAAgAAABAAAAZAQAAAMQAAADAAAAdAQAAAYgAAABAAAA" +
+"jAQAAAAQAAABAAAApAQAAA==");
+
+
+  public static void run() throws Exception {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest();
+  }
+
+  public static final class GcThread extends Thread {
+    public volatile boolean finished = false;
+    public void run() {
+      while (!finished) {
+        Runtime.getRuntime().gc();
+        System.runFinalization();
+      }
+    }
+  }
+
+  public static void doTest() throws Exception {
+    // Try GC forever
+    GcThread gc_thr = new GcThread();
+    gc_thr.start();
+    // Make a transform
+    mktransform();
+    Redefinition.doCommonStructuralClassRedefinition(Transform.class, DEX_BYTES);
+    theObject = null;
+    finish_latch.await();
+    System.out.println("counter: " + counter);
+    // Make sure we don't have any remaining things to finalize, eg obsolete objects or something.
+    Runtime.getRuntime().gc();
+    System.runFinalization();
+    gc_thr.finished = true;
+    gc_thr.join();
+  }
+
+  // Make sure there is never a transform in the frame of doTest.
+  public static void mktransform() throws Exception {
+    theObject = new Transform();
+  }
+}
diff --git a/test/2007-virtual-structural-finalizable/src/Main.java b/test/2007-virtual-structural-finalizable/src/Main.java
new file mode 100644
index 0000000..89b8557
--- /dev/null
+++ b/test/2007-virtual-structural-finalizable/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    System.out.println("FAIL: Test is only for art!");
+  }
+}
diff --git a/test/2008-redefine-then-old-reflect-field/expected.txt b/test/2008-redefine-then-old-reflect-field/expected.txt
new file mode 100644
index 0000000..e767888
--- /dev/null
+++ b/test/2008-redefine-then-old-reflect-field/expected.txt
@@ -0,0 +1,2 @@
+PreTransform Field public java.lang.Object art.Test2008$Transform.myField = "bar"
+PostTransform Field public java.lang.Object art.Test2008$Transform.myField = "bar"
diff --git a/test/2008-redefine-then-old-reflect-field/info.txt b/test/2008-redefine-then-old-reflect-field/info.txt
new file mode 100644
index 0000000..08c2799
--- /dev/null
+++ b/test/2008-redefine-then-old-reflect-field/info.txt
@@ -0,0 +1,4 @@
+Tests that j.l.r.Field objects survive across redefinitions
+
+We had a bug where java.lang.reflect.Field objects would be invalid after the class of the Field
+they are referencing is redefined. This tests that the bug is fixed.
diff --git a/test/2008-redefine-then-old-reflect-field/run b/test/2008-redefine-then-old-reflect-field/run
new file mode 100755
index 0000000..c6e62ae
--- /dev/null
+++ b/test/2008-redefine-then-old-reflect-field/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti
diff --git a/test/2008-redefine-then-old-reflect-field/src/Main.java b/test/2008-redefine-then-old-reflect-field/src/Main.java
new file mode 100644
index 0000000..e51f0c4
--- /dev/null
+++ b/test/2008-redefine-then-old-reflect-field/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test2008.run();
+  }
+}
diff --git a/test/2008-redefine-then-old-reflect-field/src/art/Redefinition.java b/test/2008-redefine-then-old-reflect-field/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/2008-redefine-then-old-reflect-field/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/2008-redefine-then-old-reflect-field/src/art/Test2008.java b/test/2008-redefine-then-old-reflect-field/src/art/Test2008.java
new file mode 100644
index 0000000..c97f29a
--- /dev/null
+++ b/test/2008-redefine-then-old-reflect-field/src/art/Test2008.java
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Base64;
+import java.util.concurrent.CountDownLatch;
+import java.util.function.Supplier;
+import java.util.concurrent.atomic.*;
+import java.lang.reflect.*;
+
+public class Test2008 {
+  public static class Transform {
+    public Transform() { myField = "bar"; }
+    public Object myField;
+  }
+
+  /**
+   * base64 encoded class/dex file for
+   * public static class Transform {
+   *   public Transform() { myField = "foo"; };
+   *   public Object myField;
+   * }
+   */
+  private static final byte[] DEX_BYTES =
+      Base64.getDecoder()
+          .decode(
+"ZGV4CjAzNQC9mLO3NCcl4Iqwlj+DV0clWONvLK5zDAqAAwAAcAAAAHhWNBIAAAAAAAAAAMgCAAAP" +
+"AAAAcAAAAAYAAACsAAAAAQAAAMQAAAABAAAA0AAAAAIAAADYAAAAAQAAAOgAAAB4AgAACAEAACwB" +
+"AAA0AQAATgEAAF4BAACCAQAAogEAALYBAADFAQAA0AEAANMBAADgAQAA5QEAAO4BAAD0AQAA+wEA" +
+"AAEAAAACAAAAAwAAAAQAAAAFAAAACAAAAAgAAAAFAAAAAAAAAAAABAALAAAAAAAAAAAAAAAEAAAA" +
+"AAAAAAAAAAABAAAABAAAAAAAAAAGAAAAuAIAAJkCAAAAAAAAAgABAAEAAAAoAQAACAAAAHAQAQAB" +
+"ABoACgBbEAAADgAFAA4ABjxpbml0PgAYTGFydC9UZXN0MjAwOCRUcmFuc2Zvcm07AA5MYXJ0L1Rl" +
+"c3QyMDA4OwAiTGRhbHZpay9hbm5vdGF0aW9uL0VuY2xvc2luZ0NsYXNzOwAeTGRhbHZpay9hbm5v" +
+"dGF0aW9uL0lubmVyQ2xhc3M7ABJMamF2YS9sYW5nL09iamVjdDsADVRlc3QyMDA4LmphdmEACVRy" +
+"YW5zZm9ybQABVgALYWNjZXNzRmxhZ3MAA2ZvbwAHbXlGaWVsZAAEbmFtZQAFdmFsdWUAjAF+fkQ4" +
+"eyJjb21waWxhdGlvbi1tb2RlIjoiZGVidWciLCJoYXMtY2hlY2tzdW1zIjpmYWxzZSwibWluLWFw" +
+"aSI6MSwic2hhLTEiOiI2NjA0MGE0MGQzY2JmNDA1MDU0NzQ4YmY1YTllOWYyZjNmZThhMzRiIiwi" +
+"dmVyc2lvbiI6IjIuMC4xMi1kZXYifQACAgENGAECAwIJBAkMFwcAAQEAAAEAgYAEiAIAAAAAAAAA" +
+"AgAAAIoCAACQAgAArAIAAAAAAAAAAAAAAAAAAA8AAAAAAAAAAQAAAAAAAAABAAAADwAAAHAAAAAC" +
+"AAAABgAAAKwAAAADAAAAAQAAAMQAAAAEAAAAAQAAANAAAAAFAAAAAgAAANgAAAAGAAAAAQAAAOgA" +
+"AAABIAAAAQAAAAgBAAADIAAAAQAAACgBAAACIAAADwAAACwBAAAEIAAAAgAAAIoCAAAAIAAAAQAA" +
+"AJkCAAADEAAAAgAAAKgCAAAGIAAAAQAAALgCAAAAEAAAAQAAAMgCAAA=");
+  private static final byte[] CLASS_BYTES =
+      Base64.getDecoder()
+          .decode(
+"yv66vgAAADQAFwoABQAOCAAPCQAEABAHABIHABUBAAdteUZpZWxkAQASTGphdmEvbGFuZy9PYmpl" +
+"Y3Q7AQAGPGluaXQ+AQADKClWAQAEQ29kZQEAD0xpbmVOdW1iZXJUYWJsZQEAClNvdXJjZUZpbGUB" +
+"AA1UZXN0MjAwOC5qYXZhDAAIAAkBAANmb28MAAYABwcAFgEAFmFydC9UZXN0MjAwOCRUcmFuc2Zv" +
+"cm0BAAlUcmFuc2Zvcm0BAAxJbm5lckNsYXNzZXMBABBqYXZhL2xhbmcvT2JqZWN0AQAMYXJ0L1Rl" +
+"c3QyMDA4ACEABAAFAAAAAQABAAYABwAAAAEAAQAIAAkAAQAKAAAAIwACAAEAAAALKrcAASoSArUA" +
+"A7EAAAABAAsAAAAGAAEAAAAFAAIADAAAAAIADQAUAAAACgABAAQAEQATAAk=");
+
+
+  public static void run() throws Exception {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest();
+  }
+
+  public static void doTest() throws Exception {
+    Transform t = new Transform();
+    Field f = Transform.class.getDeclaredField("myField");
+    System.out.println("PreTransform Field " + f + " = \"" + f.get(t) + "\"");
+    Redefinition.doCommonClassRedefinition(Transform.class, CLASS_BYTES, DEX_BYTES);
+    System.out.println("PostTransform Field " + f + " = \"" + f.get(t) + "\"");
+  }
+}
diff --git a/test/2009-structural-local-ref/expected.txt b/test/2009-structural-local-ref/expected.txt
new file mode 100644
index 0000000..3ad1fbd
--- /dev/null
+++ b/test/2009-structural-local-ref/expected.txt
@@ -0,0 +1,8 @@
+Doing redefinition for instance field
+Result was VirtualString
+Doing redefinition for static field
+Result was StaticString
+Doing redefinition for instance method
+Result was meth
+Doing redefinition for static method
+Result was static-meth
diff --git a/test/2009-structural-local-ref/info.txt b/test/2009-structural-local-ref/info.txt
new file mode 100644
index 0000000..4c9f871
--- /dev/null
+++ b/test/2009-structural-local-ref/info.txt
@@ -0,0 +1,3 @@
+Tests structural redefinition with local-refs
+
+Tests that using the structural redefinition updates JNI local-refs.
diff --git a/test/2009-structural-local-ref/local-ref.cc b/test/2009-structural-local-ref/local-ref.cc
new file mode 100644
index 0000000..9f6ef0b
--- /dev/null
+++ b/test/2009-structural-local-ref/local-ref.cc
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+
+#include <vector>
+
+#include "android-base/logging.h"
+#include "android-base/macros.h"
+#include "jni.h"
+#include "jvmti.h"
+
+// Test infrastructure
+#include "jvmti_helper.h"
+#include "scoped_local_ref.h"
+#include "test_env.h"
+
+namespace art {
+namespace Test2009StructuralLocalRef {
+
+extern "C" JNIEXPORT jstring JNICALL Java_art_Test2009_NativeLocalCallStatic(
+    JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject obj, jobject thnk) {
+  jclass obj_klass = env->GetObjectClass(obj);
+  jmethodID run_meth = env->GetMethodID(env->FindClass("java/lang/Runnable"), "run", "()V");
+  env->CallVoidMethod(thnk, run_meth);
+  jmethodID new_method =
+      env->GetStaticMethodID(obj_klass, "getGreetingStatic", "()Ljava/lang/String;");
+  if (env->ExceptionCheck()) {
+    return nullptr;
+  } else {
+    return reinterpret_cast<jstring>(env->CallStaticObjectMethod(obj_klass, new_method));
+  }
+}
+
+extern "C" JNIEXPORT jstring JNICALL Java_art_Test2009_NativeLocalCallVirtual(
+    JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject obj, jobject thnk) {
+  jclass obj_klass = env->GetObjectClass(obj);
+  jmethodID run_meth = env->GetMethodID(env->FindClass("java/lang/Runnable"), "run", "()V");
+  env->CallVoidMethod(thnk, run_meth);
+  jmethodID new_method = env->GetMethodID(obj_klass, "getGreeting", "()Ljava/lang/String;");
+  if (env->ExceptionCheck()) {
+    return nullptr;
+  } else {
+    return reinterpret_cast<jstring>(env->CallObjectMethod(obj, new_method));
+  }
+}
+extern "C" JNIEXPORT jstring JNICALL Java_art_Test2009_NativeLocalGetIField(
+    JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject obj, jobject thnk) {
+  jclass obj_klass = env->GetObjectClass(obj);
+  jmethodID run_meth = env->GetMethodID(env->FindClass("java/lang/Runnable"), "run", "()V");
+  env->CallVoidMethod(thnk, run_meth);
+  jfieldID new_field = env->GetFieldID(obj_klass, "greeting", "Ljava/lang/String;");
+  if (env->ExceptionCheck()) {
+    return nullptr;
+  } else {
+    env->SetObjectField(obj, new_field, env->NewStringUTF("VirtualString"));
+    return reinterpret_cast<jstring>(env->GetObjectField(obj, new_field));
+  }
+}
+extern "C" JNIEXPORT jstring JNICALL Java_art_Test2009_NativeLocalGetSField(
+    JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject obj, jobject thnk) {
+  jclass obj_klass = env->GetObjectClass(obj);
+  jmethodID run_meth = env->GetMethodID(env->FindClass("java/lang/Runnable"), "run", "()V");
+  env->CallVoidMethod(thnk, run_meth);
+  jfieldID new_field = env->GetStaticFieldID(obj_klass, "static_greeting", "Ljava/lang/String;");
+  if (env->ExceptionCheck()) {
+    return nullptr;
+  } else {
+    env->SetStaticObjectField(obj_klass, new_field, env->NewStringUTF("StaticString"));
+    return reinterpret_cast<jstring>(env->GetStaticObjectField(obj_klass, new_field));
+  }
+}
+
+}  // namespace Test2009StructuralLocalRef
+}  // namespace art
diff --git a/test/2009-structural-local-ref/run b/test/2009-structural-local-ref/run
new file mode 100755
index 0000000..03e41a5
--- /dev/null
+++ b/test/2009-structural-local-ref/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/2009-structural-local-ref/src-art/Main.java b/test/2009-structural-local-ref/src-art/Main.java
new file mode 100644
index 0000000..6635228
--- /dev/null
+++ b/test/2009-structural-local-ref/src-art/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test2009.run();
+  }
+}
diff --git a/test/2009-structural-local-ref/src-art/art/Redefinition.java b/test/2009-structural-local-ref/src-art/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/2009-structural-local-ref/src-art/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/2009-structural-local-ref/src-art/art/Test2009.java b/test/2009-structural-local-ref/src-art/art/Test2009.java
new file mode 100644
index 0000000..7ef9a8b
--- /dev/null
+++ b/test/2009-structural-local-ref/src-art/art/Test2009.java
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import dalvik.system.InMemoryDexClassLoader;
+import java.lang.ref.*;
+import java.nio.ByteBuffer;
+import java.util.Base64;
+import java.util.concurrent.atomic.*;
+
+public class Test2009 {
+  public static class Transform {
+    public Transform() {}
+  }
+  /*
+   * base64 encoded class/dex file for
+   *
+   * package art;
+   * public class Transform {
+   *   public Transform() { }
+   * }
+   */
+  private static final byte[] DEX_BYTES_INITIAL =
+      Base64.getDecoder()
+          .decode(
+              "ZGV4CjAzNQBMYVKB9B8EiEj/K5pUWVbEqHPGshupr2RkAgAAcAAAAHhWNBIAAAAAAAAAANABAAAG"
+                  + "AAAAcAAAAAMAAACIAAAAAQAAAJQAAAAAAAAAAAAAAAIAAACgAAAAAQAAALAAAACUAQAA0AAAAPAA"
+                  + "AAD4AAAACQEAAB0BAAAtAQAAMAEAAAEAAAACAAAABAAAAAQAAAACAAAAAAAAAAAAAAAAAAAAAQAA"
+                  + "AAAAAAAAAAAAAQAAAAEAAAAAAAAAAwAAAAAAAAC/AQAAAAAAAAEAAQABAAAA6AAAAAQAAABwEAEA"
+                  + "AAAOAAMADjwAAAAABjxpbml0PgAPTGFydC9UcmFuc2Zvcm07ABJMamF2YS9sYW5nL09iamVjdDsA"
+                  + "DlRyYW5zZm9ybS5qYXZhAAFWAIwBfn5EOHsiY29tcGlsYXRpb24tbW9kZSI6ImRlYnVnIiwiaGFz"
+                  + "LWNoZWNrc3VtcyI6ZmFsc2UsIm1pbi1hcGkiOjEsInNoYS0xIjoiZDFkNTFjMWNiM2U4NWFhMzBl"
+                  + "MDBhNjgyMmNjYTgzYmJlMWRmZTk0NSIsInZlcnNpb24iOiIyLjAuMTMtZGV2In0AAAABAACBgATQ"
+                  + "AQAAAAAAAAAMAAAAAAAAAAEAAAAAAAAAAQAAAAYAAABwAAAAAgAAAAMAAACIAAAAAwAAAAEAAACU"
+                  + "AAAABQAAAAIAAACgAAAABgAAAAEAAACwAAAAASAAAAEAAADQAAAAAyAAAAEAAADoAAAAAiAAAAYA"
+                  + "AADwAAAAACAAAAEAAAC/AQAAAxAAAAEAAADMAQAAABAAAAEAAADQAQAA");
+
+  /*
+   * base64 encoded class/dex file for
+   * package art;
+   * public static class Transform {
+   *   public String greeting;
+   *   public static String static_greeting;
+   *
+   *   public Transform() {
+   *     greeting = "Hello";
+   *   }
+   *   public static String getGreetingStatic() {
+   *     static_greeting = "static-meth";
+   *     return static_greeting;
+   *   }
+   *   public String getGreeting() { greeting = "meth"; return greeting; }
+   * }
+   */
+  private static final byte[] DEX_BYTES =
+      Base64.getDecoder()
+          .decode(
+              "ZGV4CjAzNQB6kDahLt0Aoqc///gYs0Vgd/hpukfKc5mEAwAAcAAAAHhWNBIAAAAAAAAAAOQCAAAP"
+                  + "AAAAcAAAAAQAAACsAAAAAgAAALwAAAACAAAA1AAAAAQAAADkAAAAAQAAAAQBAABgAgAAJAEAAIwB"
+                  + "AACUAQAAmwEAAJ4BAACvAQAAwwEAANcBAADnAQAA6gEAAPcBAAAKAgAAFAIAABoCAAAnAgAAOAIA"
+                  + "AAMAAAAEAAAABQAAAAcAAAACAAAAAgAAAAAAAAAHAAAAAwAAAAAAAAAAAAIACgAAAAAAAgANAAAA"
+                  + "AAABAAAAAAAAAAAACAAAAAAAAAAJAAAAAQABAAAAAAAAAAAAAQAAAAEAAAAAAAAABgAAAAAAAADH"
+                  + "AgAAAAAAAAIAAQAAAAAAhgEAAAUAAAAaAAsAWxAAABEAAAABAAAAAAAAAIIBAAAFAAAAGgAMAGkA"
+                  + "AQARAAAAAgABAAEAAAB8AQAACAAAAHAQAwABABoAAQBbEAAADgAGAA48SwAJAA4ACgAOAAAABjxp"
+                  + "bml0PgAFSGVsbG8AAUwAD0xhcnQvVHJhbnNmb3JtOwASTGphdmEvbGFuZy9PYmplY3Q7ABJMamF2"
+                  + "YS9sYW5nL1N0cmluZzsADlRyYW5zZm9ybS5qYXZhAAFWAAtnZXRHcmVldGluZwARZ2V0R3JlZXRp"
+                  + "bmdTdGF0aWMACGdyZWV0aW5nAARtZXRoAAtzdGF0aWMtbWV0aAAPc3RhdGljX2dyZWV0aW5nAIwB"
+                  + "fn5EOHsiY29tcGlsYXRpb24tbW9kZSI6ImRlYnVnIiwiaGFzLWNoZWNrc3VtcyI6ZmFsc2UsIm1p"
+                  + "bi1hcGkiOjEsInNoYS0xIjoiZDFkNTFjMWNiM2U4NWFhMzBlMDBhNjgyMmNjYTgzYmJlMWRmZTk0"
+                  + "NSIsInZlcnNpb24iOiIyLjAuMTMtZGV2In0AAQECAQEJAAEAgYAE3AICCcACAQGkAgAAAAAAAAAN"
+                  + "AAAAAAAAAAEAAAAAAAAAAQAAAA8AAABwAAAAAgAAAAQAAACsAAAAAwAAAAIAAAC8AAAABAAAAAIA"
+                  + "AADUAAAABQAAAAQAAADkAAAABgAAAAEAAAAEAQAAASAAAAMAAAAkAQAAAyAAAAMAAAB8AQAAAiAA"
+                  + "AA8AAACMAQAAACAAAAEAAADHAgAAAxAAAAEAAADgAgAAABAAAAEAAADkAgAA");
+
+  public static void run() throws Exception {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest();
+  }
+
+  public static Class<?> MakeClass() throws Exception {
+    return new InMemoryDexClassLoader(
+            ByteBuffer.wrap(DEX_BYTES_INITIAL), Test2009.class.getClassLoader())
+        .loadClass("art.Transform");
+  }
+
+  public static void doTest() throws Exception {
+    // Make a transform
+    Class<?> ifields = MakeClass();
+    String res =
+        NativeLocalGetIField(
+            ifields.newInstance(),
+            () -> {
+              System.out.println("Doing redefinition for instance field");
+              Redefinition.doCommonStructuralClassRedefinition(ifields, DEX_BYTES);
+            });
+    System.out.println("Result was " + res);
+    Class<?> sfields = MakeClass();
+    res =
+        NativeLocalGetSField(
+            sfields.newInstance(),
+            () -> {
+              System.out.println("Doing redefinition for static field");
+              Redefinition.doCommonStructuralClassRedefinition(sfields, DEX_BYTES);
+            });
+    System.out.println("Result was " + res);
+    Class<?> imeths = MakeClass();
+    res =
+        NativeLocalCallVirtual(
+            imeths.newInstance(),
+            () -> {
+              System.out.println("Doing redefinition for instance method");
+              Redefinition.doCommonStructuralClassRedefinition(imeths, DEX_BYTES);
+            });
+    System.out.println("Result was " + res);
+    Class<?> smeths = MakeClass();
+    res =
+        NativeLocalCallStatic(
+            smeths.newInstance(),
+            () -> {
+              System.out.println("Doing redefinition for static method");
+              Redefinition.doCommonStructuralClassRedefinition(smeths, DEX_BYTES);
+            });
+    System.out.println("Result was " + res);
+  }
+
+  public static native String NativeLocalCallVirtual(Object t, Runnable thnk);
+
+  public static native String NativeLocalCallStatic(Object t, Runnable thnk);
+
+  public static native String NativeLocalGetIField(Object t, Runnable thnk);
+
+  public static native String NativeLocalGetSField(Object t, Runnable thnk);
+}
diff --git a/test/2009-structural-local-ref/src/Main.java b/test/2009-structural-local-ref/src/Main.java
new file mode 100644
index 0000000..89b8557
--- /dev/null
+++ b/test/2009-structural-local-ref/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    System.out.println("FAIL: Test is only for art!");
+  }
+}
diff --git a/test/2011-stack-walk-concurrent-instrument/expected.txt b/test/2011-stack-walk-concurrent-instrument/expected.txt
new file mode 100644
index 0000000..77a1486
--- /dev/null
+++ b/test/2011-stack-walk-concurrent-instrument/expected.txt
@@ -0,0 +1,2 @@
+JNI_OnLoad called
+Done
diff --git a/test/2011-stack-walk-concurrent-instrument/info.txt b/test/2011-stack-walk-concurrent-instrument/info.txt
new file mode 100644
index 0000000..91f0106
--- /dev/null
+++ b/test/2011-stack-walk-concurrent-instrument/info.txt
@@ -0,0 +1,3 @@
+Tests concurrently instrumenting a thread while walking a stack doesn't crash/break.
+
+Bug: 72608560
diff --git a/test/2011-stack-walk-concurrent-instrument/src/Main.java b/test/2011-stack-walk-concurrent-instrument/src/Main.java
new file mode 100644
index 0000000..8f96f93
--- /dev/null
+++ b/test/2011-stack-walk-concurrent-instrument/src/Main.java
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.concurrent.*;
+
+public class Main {
+  public Main() {
+  }
+
+  void $noinline$f(Runnable r) throws Exception {
+    $noinline$g(r);
+  }
+
+  void $noinline$g(Runnable r) {
+    $noinline$h(r);
+  }
+
+  void $noinline$h(Runnable r) {
+    r.run();
+  }
+
+  public native void resetTest();
+  public native void waitAndDeopt(Thread t);
+  public native void doSelfStackWalk();
+
+  void testConcurrent() throws Exception {
+    resetTest();
+    final Thread current = Thread.currentThread();
+    Thread t = new Thread(() -> {
+      try {
+        this.waitAndDeopt(current);
+      } catch (Exception e) {
+        throw new Error("Fail!", e);
+      }
+    });
+    t.start();
+    $noinline$f(() -> {
+      try {
+        this.doSelfStackWalk();
+      } catch (Exception e) {
+        throw new Error("Fail!", e);
+      }
+    });
+    t.join();
+  }
+
+  public static void main(String[] args) throws Exception {
+    System.loadLibrary(args[0]);
+    Main st = new Main();
+    st.testConcurrent();
+    System.out.println("Done");
+  }
+}
diff --git a/test/2011-stack-walk-concurrent-instrument/stack_walk_concurrent.cc b/test/2011-stack-walk-concurrent-instrument/stack_walk_concurrent.cc
new file mode 100644
index 0000000..a185446
--- /dev/null
+++ b/test/2011-stack-walk-concurrent-instrument/stack_walk_concurrent.cc
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <atomic>
+#include <string_view>
+
+#include "arch/context.h"
+#include "art_method-inl.h"
+#include "jni.h"
+#include "scoped_thread_state_change.h"
+#include "stack.h"
+#include "thread.h"
+
+namespace art {
+namespace StackWalkConcurrentInstrument {
+
+std::atomic<bool> instrument_waiting = false;
+std::atomic<bool> instrumented = false;
+
+// Spin lock.
+static void WaitForInstrument() REQUIRES_SHARED(Locks::mutator_lock_) {
+  ScopedThreadSuspension sts(Thread::Current(), ThreadState::kWaitingForDeoptimization);
+  instrument_waiting = true;
+  while (!instrumented) {
+  }
+}
+
+class SelfStackWalkVisitor : public StackVisitor {
+ public:
+  explicit SelfStackWalkVisitor(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
+      : StackVisitor(thread, Context::Create(), StackWalkKind::kIncludeInlinedFrames) {}
+
+  bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
+    if (GetMethod()->GetNameView() == "$noinline$f") {
+      CHECK(!found_f_);
+      found_f_ = true;
+    } else if (GetMethod()->GetNameView() == "$noinline$g") {
+      CHECK(!found_g_);
+      found_g_ = true;
+      WaitForInstrument();
+    } else if (GetMethod()->GetNameView() == "$noinline$h") {
+      CHECK(!found_h_);
+      found_h_ = true;
+    }
+    return true;
+  }
+
+  bool found_f_ = false;
+  bool found_g_ = false;
+  bool found_h_ = false;
+};
+
+extern "C" JNIEXPORT void JNICALL Java_Main_resetTest(JNIEnv*, jobject) {
+  instrument_waiting = false;
+  instrumented = false;
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_doSelfStackWalk(JNIEnv*, jobject) {
+  ScopedObjectAccess soa(Thread::Current());
+  SelfStackWalkVisitor sswv(Thread::Current());
+  sswv.WalkStack();
+  CHECK(sswv.found_f_);
+  CHECK(sswv.found_g_);
+  CHECK(sswv.found_h_);
+}
+extern "C" JNIEXPORT void JNICALL Java_Main_waitAndDeopt(JNIEnv*, jobject, jobject target) {
+  while (!instrument_waiting) {
+  }
+  bool timed_out = false;
+  Thread* other = Runtime::Current()->GetThreadList()->SuspendThreadByPeer(
+      target, true, SuspendReason::kInternal, &timed_out);
+  CHECK(!timed_out);
+  CHECK(other != nullptr);
+  ScopedSuspendAll ssa(__FUNCTION__);
+  Runtime::Current()->GetInstrumentation()->InstrumentThreadStack(other);
+  MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_);
+  bool updated = other->ModifySuspendCount(Thread::Current(), -1, nullptr, SuspendReason::kInternal);
+  CHECK(updated);
+  instrumented = true;
+  return;
+}
+
+}  // namespace StackWalkConcurrentInstrument
+}  // namespace art
diff --git a/test/2012-structural-redefinition-failures-jni-id/expected.txt b/test/2012-structural-redefinition-failures-jni-id/expected.txt
new file mode 100644
index 0000000..4c3dd98
--- /dev/null
+++ b/test/2012-structural-redefinition-failures-jni-id/expected.txt
@@ -0,0 +1,8 @@
+Checking classes
+Is Structurally modifiable class Main$C1 true
+Is Structurally modifiable class Main$C2 true
+Is Structurally modifiable class Main$C3 true
+Setting C2 as having pointer-ids used and checking classes
+Is Structurally modifiable class Main$C1 false
+Is Structurally modifiable class Main$C2 false
+Is Structurally modifiable class Main$C3 true
diff --git a/test/2012-structural-redefinition-failures-jni-id/info.txt b/test/2012-structural-redefinition-failures-jni-id/info.txt
new file mode 100644
index 0000000..68520bf
--- /dev/null
+++ b/test/2012-structural-redefinition-failures-jni-id/info.txt
@@ -0,0 +1,3 @@
+Sanity check for isStructurallyModifiable.
+
+Ensures that types being not-modifiable makes their supertypes not-modifiable.
diff --git a/test/2012-structural-redefinition-failures-jni-id/run b/test/2012-structural-redefinition-failures-jni-id/run
new file mode 100755
index 0000000..03e41a5
--- /dev/null
+++ b/test/2012-structural-redefinition-failures-jni-id/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/2012-structural-redefinition-failures-jni-id/set-jni-id-used.cc b/test/2012-structural-redefinition-failures-jni-id/set-jni-id-used.cc
new file mode 100644
index 0000000..4b3dac9
--- /dev/null
+++ b/test/2012-structural-redefinition-failures-jni-id/set-jni-id-used.cc
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+
+#include <vector>
+
+#include "android-base/logging.h"
+#include "android-base/macros.h"
+#include "handle_scope-inl.h"
+#include "jni.h"
+#include "jvmti.h"
+
+// Test infrastructure
+#include "jvmti_helper.h"
+#include "mirror/class.h"
+#include "mirror/class_ext.h"
+#include "scoped_local_ref.h"
+#include "scoped_thread_state_change-inl.h"
+#include "test_env.h"
+
+namespace art {
+namespace Test2012SetJniIdUsed {
+
+extern "C" JNIEXPORT void JNICALL Java_Main_SetPointerIdsUsed(
+    JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jclass target) {
+  ScopedObjectAccess soa(env);
+  StackHandleScope<1> hs(soa.Self());
+  Handle<mirror::Class> h(hs.NewHandle(soa.Decode<mirror::Class>(target)));
+  ObjPtr<mirror::ClassExt> ext(h->EnsureExtDataPresent(h, soa.Self()));
+  CHECK(!ext.IsNull());
+  ext->SetIdsArraysForClassExtExtData(Runtime::Current()->GetJniIdManager()->GetPointerMarker());
+}
+
+}  // namespace Test2012SetJniIdUsed
+}  // namespace art
diff --git a/test/2012-structural-redefinition-failures-jni-id/src-art/Main.java b/test/2012-structural-redefinition-failures-jni-id/src-art/Main.java
new file mode 100644
index 0000000..4f39cde
--- /dev/null
+++ b/test/2012-structural-redefinition-failures-jni-id/src-art/Main.java
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import art.*;
+
+public class Main {
+  public static void Check(Class[] klasses) {
+    for (Class k : klasses) {
+      try {
+        boolean res = Redefinition.isStructurallyModifiable(k);
+        System.out.println("Is Structurally modifiable " + k + " " + res);
+      } catch (Exception e) {
+        System.out.println("Got exception " + e + " during check modifiablity of " + k);
+        e.printStackTrace(System.out);
+      }
+    }
+  }
+
+  public static class C1 {
+    public Object o;
+    public void foobar() {}
+  }
+  public static class C2 extends C1 {
+    public static Object o;
+    public static void foo() {}
+  }
+  public static class C3 extends C2 {
+    public Object j;
+    public void bar() {}
+  }
+
+  public static void doTest() throws Exception {
+    Class[] classes = new Class[] {
+      C1.class,
+      C2.class,
+      C3.class,
+    };
+    System.out.println("Checking classes");
+    Check(classes);
+    System.out.println("Setting C2 as having pointer-ids used and checking classes");
+    SetPointerIdsUsed(C2.class);
+    Check(classes);
+  }
+  public static native void SetPointerIdsUsed(Class<?> k);
+  public static void main(String[] args) throws Exception {
+    // Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest();
+  }
+}
diff --git a/test/2012-structural-redefinition-failures-jni-id/src-art/art/Redefinition.java b/test/2012-structural-redefinition-failures-jni-id/src-art/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/2012-structural-redefinition-failures-jni-id/src-art/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/2012-structural-redefinition-failures-jni-id/src-art/art/Test1983.java b/test/2012-structural-redefinition-failures-jni-id/src-art/art/Test1983.java
new file mode 100644
index 0000000..0576349
--- /dev/null
+++ b/test/2012-structural-redefinition-failures-jni-id/src-art/art/Test1983.java
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.invoke.*;
+import java.lang.ref.*;
+import java.lang.reflect.*;
+import java.util.*;
+
+public class Test1983 {
+  public static void runNonCts() throws Exception {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest();
+    doTestNonCts();
+  }
+  public static void run() throws Exception {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest();
+  }
+
+  public static void Check(Class[] klasses) {
+    for (Class k : klasses) {
+      try {
+        boolean res = Redefinition.isStructurallyModifiable(k);
+        System.out.println("Is Structurally modifiable " + k + " " + res);
+      } catch (Exception e) {
+        System.out.println("Got exception " + e + " during check modifiablity of " + k);
+        e.printStackTrace(System.out);
+      }
+    }
+  }
+
+  public static class WithVirtuals {
+    public Object o;
+    public void foobar() {}
+  }
+  public static class NoVirtuals extends WithVirtuals {
+    public static Object o;
+    public static void foo() {}
+  }
+  public static class SubWithVirtuals extends NoVirtuals {
+    public Object j;
+    public void bar() {}
+  }
+
+  public static void doTest() throws Exception {
+    Class[] mirrord_classes = new Class[] {
+      AccessibleObject.class,
+      CallSite.class,
+      // ClassExt is not on the compile classpath.
+      Class.forName("dalvik.system.ClassExt"),
+      ClassLoader.class,
+      Class.class,
+      Constructor.class,
+      // DexCache is not on the compile classpath
+      Class.forName("java.lang.DexCache"),
+      // EmulatedStackFrame is not on the compile classpath
+      Class.forName("dalvik.system.EmulatedStackFrame"),
+      Executable.class,
+      Field.class,
+      // @hide on CTS
+      Class.forName("java.lang.ref.FinalizerReference"),
+      MethodHandle.class,
+      MethodHandles.Lookup.class,
+      MethodType.class,
+      Method.class,
+      Object.class,
+      Proxy.class,
+      Reference.class,
+      StackTraceElement.class,
+      String.class,
+      Thread.class,
+      Throwable.class,
+      // @hide on CTS
+      Class.forName("java.lang.invoke.VarHandle"),
+      // TODO all the var handle types.
+      // @hide on CTS
+      Class.forName("java.lang.invoke.FieldVarHandle"),
+    };
+    System.out.println("Checking mirror'd classes");
+    Check(mirrord_classes);
+    // The results of some of these will change as we improve structural class redefinition. Any
+    // that are true should always remain so though.
+    Class[] non_mirrord_classes = new Class[] {
+      new Object[0].getClass(),
+      NoVirtuals.class,
+      WithVirtuals.class,
+      SubWithVirtuals.class,
+    };
+    System.out.println("Checking non-mirror'd classes");
+    Check(non_mirrord_classes);
+  }
+
+  public static void doTestNonCts() throws Exception {
+    System.out.println("Checking non-mirror'd classes (non-cts)");
+    Class[] non_mirrord_classes = new Class[] {
+      ArrayList.class,
+      Objects.class,
+      Arrays.class,
+      Integer.class,
+      Number.class,
+      MethodHandles.class,
+    };
+    Check(non_mirrord_classes);
+  }
+}
diff --git a/test/2019-constantcalculationsinking/expected.txt b/test/2019-constantcalculationsinking/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/2019-constantcalculationsinking/expected.txt
diff --git a/test/2019-constantcalculationsinking/info.txt b/test/2019-constantcalculationsinking/info.txt
new file mode 100644
index 0000000..9092d95
--- /dev/null
+++ b/test/2019-constantcalculationsinking/info.txt
@@ -0,0 +1 @@
+Tests for Constant calculation sinking
diff --git a/test/2019-constantcalculationsinking/src/Main.java b/test/2019-constantcalculationsinking/src/Main.java
new file mode 100644
index 0000000..6cef77a
--- /dev/null
+++ b/test/2019-constantcalculationsinking/src/Main.java
@@ -0,0 +1,229 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ *
+ * L: Break operator doesn't break one basic block limitation for int,
+ * for some reasons (most probably not a bug), there are two basic blocks for long type,
+ * 1 sinking expected for int, 0 for long.
+ * M: no limitations on basic blocks number, 1 constant calculation sinking expected for
+ * each method
+ *
+ **/
+
+public class Main {
+
+    final int iterations = 1100;
+
+    public static void assertIntEquals(int expected, int result) {
+        if (expected != result) {
+            throw new Error("Expected: " + expected + ", found: " + result);
+        }
+    }
+
+    public static void assertLongEquals(long expected, long result) {
+        if (expected != result) {
+            throw new Error("Expected: " + expected + ", found: " + result);
+        }
+    }
+
+
+    public int testLoopAddInt() {
+        int testVar = 10000;
+        int additionalVar = 10;
+
+        outer:
+            for (int i = 0; i < iterations; i++) {
+                additionalVar += i;
+                for (int k = 0; k < iterations; k++) {
+                    additionalVar += k;
+                    testVar += 5;
+                    continue outer;
+                }
+            }
+        assertIntEquals(testVar + additionalVar, 619960);
+        return testVar + additionalVar;
+    }
+
+    public int testLoopSubInt() {
+        int testVar = 10000;
+        int additionalVar = 10;
+
+        outer:
+            for (int i = 0; i < iterations; i++) {
+                additionalVar += i;
+                for (int k = 0; k < iterations; k++) {
+                    additionalVar += k;
+                    testVar -= 5;
+                    continue outer;
+                }
+            }
+        assertIntEquals(testVar + additionalVar, 608960);
+        return testVar + additionalVar;
+    }
+
+    public long testLoopSubLong() {
+        long testVar = 10000;
+        long additionalVar = 10;
+
+        outer:
+            for (long i = 0; i < iterations; i++) {
+                additionalVar += i;
+                for (long k = 0; k < iterations; k++) {
+                    additionalVar += k;
+                    testVar -= 5;
+                    continue outer;
+                }
+            }
+        assertLongEquals(testVar + additionalVar, 608960);
+        return testVar + additionalVar;
+    }
+
+    public int testLoopMulInt(int n) {
+        int testVar = 1;
+        int additionalVar = 10;
+
+        outer:
+            for (int i = 0; i < 3; i++) {
+                additionalVar += i + n * 2;
+                for (int k = 0; k < 5; k++) {
+                    additionalVar += k + n + k % 3 - i % 2 + n % 4 - i % 5
+                                     + (k + 2) / 7 - (i - 5) / 3 + k * 3 - k / 2;
+                    testVar *= 6;
+                    continue outer;
+                }
+            }
+        assertIntEquals(testVar + additionalVar, 324);
+        return testVar + additionalVar;
+    }
+
+    public long testLoopMulLong(long n) {
+        long testVar = 1;
+        long additionalVar = 10;
+
+        outer:
+            for (long i = 0; i < 5; i++) {
+                additionalVar += i + n;
+                for (long k = 0; k < 5; k++) {
+                    additionalVar += k + n + k % 3 - i % 2 + n % 4 - i % 5
+                                     + (k + 2) / 7 - (i - 5) / 3 + k * 3 - k / 2;
+                    testVar *= 6L;
+                    continue outer;
+                }
+            }
+        assertLongEquals(testVar + additionalVar, 7897);
+        return testVar + additionalVar;
+    }
+
+    public int testLoopDivInt() {
+        int testVar = 10000;
+        int additionalVar = 10;
+
+        outer:
+            for (int i = 0; i < iterations; i++) {
+                additionalVar += i;
+                for (int k = 0; k < iterations; k++) {
+                    additionalVar += k;
+                    testVar /= 5;
+                    continue outer;
+                }
+            }
+        assertIntEquals(testVar + additionalVar, 604460);
+        return testVar + additionalVar;
+    }
+
+    public long testLoopDivLong() {
+        long testVar = 10000;
+        long additionalVar = 10;
+
+        outer:
+            for (long i = 0; i < iterations; i++) {
+                additionalVar += i;
+                for (long k = 0; k < iterations; k++) {
+                    additionalVar += k;
+                    testVar /= 5;
+                    continue outer;
+                }
+            }
+        assertLongEquals(testVar + additionalVar, 604460);
+        return testVar + additionalVar;
+    }
+
+    public int testLoopRemInt() {
+        int testVar = 10000;
+        int additionalVar = 10;
+
+        outer:
+            for (int i = 0; i < iterations; i++) {
+                additionalVar += i;
+                for (int k = 0; k < iterations; k++) {
+                    additionalVar += k;
+                    testVar %= 5;
+                    continue outer;
+                }
+            }
+        assertIntEquals(testVar + additionalVar, 604460);
+        return testVar + additionalVar;
+    }
+
+    public long testLoopRemLong() {
+        long testVar = 10000;
+        long additionalVar = 10;
+
+        outer:
+            for (long i = 0; i < iterations; i++) {
+                additionalVar += i;
+                for (long k = 0; k < iterations; k++) {
+                    additionalVar += k;
+                    testVar %= 5;
+                    continue outer;
+                }
+            }
+        assertLongEquals(testVar + additionalVar, 604460);
+        return testVar + additionalVar;
+    }
+
+    public long testLoopAddLong() {
+        long testVar = 10000;
+        long additionalVar = 10;
+
+        outer:
+            for (long i = 0; i < iterations; i++) {
+                additionalVar += i;
+                for (long k = 0; k < iterations; k++) {
+                    additionalVar += k;
+                    testVar += 5;
+                    continue outer;
+                }
+            }
+        assertLongEquals(testVar + additionalVar, 619960);
+        return testVar + additionalVar;
+    }
+
+    public static void main(String[] args) {
+        Main obj = new Main();
+        obj.testLoopAddInt();
+        obj.testLoopAddLong();
+        obj.testLoopRemLong();
+        obj.testLoopRemInt();
+        obj.testLoopDivLong();
+        obj.testLoopDivInt();
+        obj.testLoopMulLong(10);
+        obj.testLoopMulInt(10);
+        obj.testLoopSubLong();
+        obj.testLoopSubInt();
+    }
+
+}
diff --git a/test/2020-InvokeVirtual-Inlining/expected.txt b/test/2020-InvokeVirtual-Inlining/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/2020-InvokeVirtual-Inlining/expected.txt
diff --git a/test/2020-InvokeVirtual-Inlining/info.txt b/test/2020-InvokeVirtual-Inlining/info.txt
new file mode 100644
index 0000000..ad45317
--- /dev/null
+++ b/test/2020-InvokeVirtual-Inlining/info.txt
@@ -0,0 +1 @@
+Extensive Test cases for Inlining - InvokeVirtual call
diff --git a/test/2020-InvokeVirtual-Inlining/src/Main.java b/test/2020-InvokeVirtual-Inlining/src/Main.java
new file mode 100644
index 0000000..fa96c2e
--- /dev/null
+++ b/test/2020-InvokeVirtual-Inlining/src/Main.java
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Main {
+    final static int iterations = 10;
+
+    public static void assertIntEquals(int expected, int result) {
+        if (expected != result) {
+            throw new Error("Expected: " + expected + ", found: " + result);
+        }
+    }
+
+    public static void assertDoubleEquals(double expected, double result) {
+        if (expected != result) {
+            throw new Error("Expected: " + expected + ", found: " + result);
+        }
+    }
+
+    public static void assertFloatEquals(float expected, float result) {
+        if (expected != result) {
+            throw new Error("Expected: " + expected + ", found: " + result);
+        }
+    }
+
+    public static void assertLongEquals(long expected, long result) {
+        if (expected != result) {
+            throw new Error("Expected: " + expected + ", found: " + result);
+        }
+    }
+
+
+    public static void main(String[] args) {
+        Test test = new Test();
+        long workJ = 2;
+        long workK = 3;
+        float workJ1 = 10.0f;
+        float workK1 = 15.0f;
+        int workJ2 = 10;
+        int workK2 = 15;
+        long workJ3 = 0xFAEFFFAB;
+        long workK3 = 0xF8E9DCBA;
+
+        for (long i = 0; i < iterations; i++) {
+            workJ = test.simplemethodMul(workJ, workK) + i;
+        }
+        assertLongEquals(workJ, 132855);
+
+        for (float i = 0.0f; i < iterations; i++) {
+            workJ1 = test.simplemethodRem(workJ1, workK1) + i;
+        }
+        assertFloatEquals(workJ1, 14.0f);
+
+        workJ2--;
+
+        try {
+            throw new Exception("Test");
+        } catch (Exception e) {
+            workJ++;
+        }
+
+        for (int i = 0; i < iterations; i++) {
+            workJ2 = test.simplemethodInt(workJ2, workK2) + i;
+        }
+        assertIntEquals(workJ2, 152);
+
+        for (long i = 0; i < iterations; i++) {
+            workJ3 = test.simplemethodXor(workJ3, workK3) + i;
+        }
+        assertLongEquals(workJ3, 118891342);
+    }
+}
diff --git a/test/2020-InvokeVirtual-Inlining/src/Test.java b/test/2020-InvokeVirtual-Inlining/src/Test.java
new file mode 100644
index 0000000..9ce2566
--- /dev/null
+++ b/test/2020-InvokeVirtual-Inlining/src/Test.java
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Test {
+
+    public long simplemethodMul(long jj, long ii) {
+        jj = ii * jj;
+        return jj;
+    }
+
+    public float simplemethodRem(float jj, float kk) {
+        jj = kk % jj;
+        jj = jj % kk;
+        return jj;
+    }
+
+    public int simplemethodInt(int jj, int kk) {
+        jj = kk | jj;
+        return jj;
+    }
+
+    public long simplemethodXor(long jj, long kk) {
+        jj = ~kk;
+        return jj;
+    }
+
+}
diff --git a/test/2021-InvokeStatic-Inlining/expected.txt b/test/2021-InvokeStatic-Inlining/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/2021-InvokeStatic-Inlining/expected.txt
diff --git a/test/2021-InvokeStatic-Inlining/info.txt b/test/2021-InvokeStatic-Inlining/info.txt
new file mode 100644
index 0000000..c15dcc8
--- /dev/null
+++ b/test/2021-InvokeStatic-Inlining/info.txt
@@ -0,0 +1 @@
+Extensive Test cases for Inlining - InvokeStatic call
diff --git a/test/2021-InvokeStatic-Inlining/src/Main.java b/test/2021-InvokeStatic-Inlining/src/Main.java
new file mode 100644
index 0000000..3568e1c
--- /dev/null
+++ b/test/2021-InvokeStatic-Inlining/src/Main.java
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Main {
+    final static int iterations = 10;
+
+    public static void assertIntEquals(int expected, int result) {
+        if (expected != result) {
+            throw new Error("Expected: " + expected + ", found: " + result);
+        }
+    }
+
+    public static void assertDoubleEquals(double expected, double result) {
+        if (expected != result) {
+            throw new Error("Expected: " + expected + ", found: " + result);
+        }
+    }
+
+    public static void assertFloatEquals(float expected, float result) {
+        if (expected != result) {
+            throw new Error("Expected: " + expected + ", found: " + result);
+        }
+    }
+
+    public static void assertLongEquals(long expected, long result) {
+        if (expected != result) {
+            throw new Error("Expected: " + expected + ", found: " + result);
+        }
+    }
+
+
+    public static long simpleMethod(long jj, long kk) {
+        jj = jj >>> kk;
+        return jj;
+    }
+    public static int simpleMethod1(int jj, int kk) {
+        jj = jj << kk;
+        jj = jj << kk;
+        return jj;
+      }
+    public static float simpleMethod2(float jj, float ii) {
+        jj = ii / jj;
+        jj = jj / ii;
+        return jj;
+    }
+
+    public static void main(String[] args) {
+        long workJ = 0xFFEFAAAA;
+        long workK = 0xF8E9BBBB;
+        int workJ1 = 0xFFEF;
+        int workK1 = 0xF8E9;
+        float workJ2 = 10.0f;
+        float workK2 = 15.0f;
+
+
+
+        for (long i = 0; i < iterations; i++) {
+            workJ = simpleMethod(workJ, workK) + i;
+        }
+        assertLongEquals(workJ, 9);
+
+        for (int i = 0; i < iterations; i++) {
+            workJ1 = simpleMethod1(workJ1, workK1) + i;
+        }
+        assertIntEquals(workJ1, 2097161);
+
+        for (float i = 0.0f; i < iterations; i++) {
+            workJ2 = simpleMethod2(workJ2, workK2) + i;
+        }
+        assertFloatEquals(workJ2, 9.122855f);
+    }
+}
diff --git a/test/2022-Invariantloops/expected.txt b/test/2022-Invariantloops/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/2022-Invariantloops/expected.txt
diff --git a/test/2022-Invariantloops/info.txt b/test/2022-Invariantloops/info.txt
new file mode 100644
index 0000000..fd7589e
--- /dev/null
+++ b/test/2022-Invariantloops/info.txt
@@ -0,0 +1,2 @@
+Test a loop with invariants. In this case, since the invariants gets used later in the loop constant calculation sinking doesnt work.
+Hence loop cannot be removed.
diff --git a/test/2022-Invariantloops/src/Main.java b/test/2022-Invariantloops/src/Main.java
new file mode 100644
index 0000000..b975ef6
--- /dev/null
+++ b/test/2022-Invariantloops/src/Main.java
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+    public static void assertIntEquals(int expected, int result) {
+        if (expected != result) {
+            throw new Error("Expected: " + expected + ", found: " + result);
+        }
+    }
+
+    public int loop1() {
+        int used1 = 1;
+        int used2 = 2;
+        int used3 = 3;
+        int used4 = 4;
+        int invar1 = 15;
+        int invar2 = 25;
+        int invar3 = 35;
+        int invar4 = 45;
+
+        for (int i = 0; i < 10000; i++) {
+            used1 += invar1 + invar2;
+            used2 -= used1 + invar2 - invar3;
+            used3 *= used2 + invar3 * invar4;
+            used4 /= used3 + invar1 * invar2 - invar3 + invar4;
+        }
+        assertIntEquals(used1 + used2 + used3 + used4, -1999709997);
+        return used1 + used2 + used3 + used4;
+    }
+
+    public static void main(String[] args) {
+        int res = new Main().loop1();
+    }
+}
diff --git a/test/2023-InvariantLoops_typecast/expected.txt b/test/2023-InvariantLoops_typecast/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/2023-InvariantLoops_typecast/expected.txt
diff --git a/test/2023-InvariantLoops_typecast/info.txt b/test/2023-InvariantLoops_typecast/info.txt
new file mode 100644
index 0000000..adb8b3e
--- /dev/null
+++ b/test/2023-InvariantLoops_typecast/info.txt
@@ -0,0 +1,2 @@
+Test a loop with invariants, which should be hoisted.
+In this case type casts are not sunk/hoisted.
diff --git a/test/2023-InvariantLoops_typecast/src/Main.java b/test/2023-InvariantLoops_typecast/src/Main.java
new file mode 100644
index 0000000..eba1440
--- /dev/null
+++ b/test/2023-InvariantLoops_typecast/src/Main.java
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+    public static void assertIntEquals(int expected, int result) {
+        if (expected != result) {
+            throw new Error("Expected: " + expected + ", found: " + result);
+        }
+    }
+
+    public static void assertLongEquals(long expected, long result) {
+        if (expected != result) {
+            throw new Error("Expected: " + expected + ", found: " + result);
+        }
+    }
+
+    public byte loop1() {
+        byte used1 = 1;
+        byte used2 = 2;
+        byte used3 = 3;
+        byte used4 = 4;
+        byte invar1 = 15;
+        byte invar2 = 25;
+        byte invar3 = 35;
+        byte invar4 = 45;
+
+
+        for (byte i = 0; i < 127; i++) {
+            used1 -= (byte)(invar1 + invar2);
+            used2 *= (byte)(invar2 - invar3);
+            used3 += (byte)(invar3 * invar4);
+            used4 /= (byte)(invar1 * invar2 - invar3 + invar4);
+        }
+
+        assertIntEquals((byte)(used1 + used2 + used3 + used4), -123);
+        return (byte)(used1 + used2 + used3 + used4);
+    }
+
+    public long loop2() {
+        double used1 = 1;
+        double used2 = 2;
+        double used3 = 3;
+        double used4 = 4;
+        double invar1 = 234234234234l;
+        double invar2 = 2523423423423424l;
+        double invar3 = 35234234234234234l;
+        double invar4 = 45234234234234234l;
+
+        for (double i = 0; i < 10000; i++) {
+            used1 += invar1 + invar2;
+            used2 *= invar2 - invar3;
+            used3 -= invar3 * invar4;
+            used4 /= invar1 * invar2 - invar3 + invar4;
+        }
+        assertLongEquals(Double.doubleToLongBits(used1 + used2 + used3 + used4),
+            9218868437227405312l);
+        return Double.doubleToLongBits(used1 + used2 + used3 + used4);
+
+    }
+
+    public static void main(String[] args) {
+        byte res = new Main().loop1();
+        long res1 = new Main().loop2();
+    }
+}
diff --git a/test/2024-InvariantNegativeLoop/expected.txt b/test/2024-InvariantNegativeLoop/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/2024-InvariantNegativeLoop/expected.txt
diff --git a/test/2024-InvariantNegativeLoop/info.txt b/test/2024-InvariantNegativeLoop/info.txt
new file mode 100644
index 0000000..7d7d055
--- /dev/null
+++ b/test/2024-InvariantNegativeLoop/info.txt
@@ -0,0 +1 @@
+Test a loop with invariants.
diff --git a/test/2024-InvariantNegativeLoop/src/Main.java b/test/2024-InvariantNegativeLoop/src/Main.java
new file mode 100644
index 0000000..5e729f8
--- /dev/null
+++ b/test/2024-InvariantNegativeLoop/src/Main.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+public class Main {
+
+    public static void assertFloatEquals(float expected, float result) {
+        if (expected != result) {
+            throw new Error("Expected: " + expected + ", found: " + result);
+        }
+    }
+
+    public int loop() {
+        float used1 = 1;
+        float used2 = 2;
+        float used3 = 3;
+        float used4 = 4;
+        float invar1 = 15;
+        float invar2 = 25;
+        float invar3 = 35;
+        float invar4 = 45;
+        float i = 0.5f;
+
+        do {
+            used1 = invar1 + invar2;
+            used2 = invar2 - invar3;
+            used3 = invar3 * invar4;
+            used4 = invar1 * invar2 - invar3 + invar4;
+            i += 0.5f;
+        } while (i < 5000.25f);
+        assertFloatEquals(Float.floatToIntBits(used1 + used2 + used3 + used4), 1157152768);
+        return Float.floatToIntBits(used1 + used2 + used3 + used4);
+    }
+
+    public static void main(String[] args) {
+        int res = new Main().loop();
+    }
+}
diff --git a/test/2025-ChangedArrayValue/expected.txt b/test/2025-ChangedArrayValue/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/2025-ChangedArrayValue/expected.txt
diff --git a/test/2025-ChangedArrayValue/info.txt b/test/2025-ChangedArrayValue/info.txt
new file mode 100644
index 0000000..0e09184
--- /dev/null
+++ b/test/2025-ChangedArrayValue/info.txt
@@ -0,0 +1 @@
+Tests are written for Load Hoist Store Sink pass
diff --git a/test/2025-ChangedArrayValue/src/Main.java b/test/2025-ChangedArrayValue/src/Main.java
new file mode 100644
index 0000000..a9ecbaad
--- /dev/null
+++ b/test/2025-ChangedArrayValue/src/Main.java
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+    public static void assertIntEquals(int expected, int result) {
+        if (expected != result) {
+            throw new Error("Expected: " + expected + ", found: " + result);
+        }
+    }
+
+    public class A {
+        public int[] value = new int[3];
+    }
+
+    public int testLoop() {
+        A x;
+        x = new A();
+
+        int []a0 = {0x7, 0x77, 0x707};
+        int []b0 = {0x7007, 0x70007, 0x700007};
+
+        for (int i = 0; i < 10; i++) {
+            b0[0]++;
+
+            if (i % 2 == 0) {
+                a0 = b0;
+            }
+
+            x.value = a0;
+        }
+
+        assertIntEquals(x.value[0], 28689);
+        return x.value[0];
+
+    }
+
+    public static void main(String[] args) {
+        Main obj = new Main();
+        obj.testLoop();
+    }
+}
diff --git a/test/2026-DifferentMemoryLSCouples/expected.txt b/test/2026-DifferentMemoryLSCouples/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/2026-DifferentMemoryLSCouples/expected.txt
diff --git a/test/2026-DifferentMemoryLSCouples/info.txt b/test/2026-DifferentMemoryLSCouples/info.txt
new file mode 100644
index 0000000..0e09184
--- /dev/null
+++ b/test/2026-DifferentMemoryLSCouples/info.txt
@@ -0,0 +1 @@
+Tests are written for Load Hoist Store Sink pass
diff --git a/test/2026-DifferentMemoryLSCouples/src/Main.java b/test/2026-DifferentMemoryLSCouples/src/Main.java
new file mode 100644
index 0000000..f5a305e
--- /dev/null
+++ b/test/2026-DifferentMemoryLSCouples/src/Main.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+public class Main {
+    class A {
+        int fieldA;
+        int dummy;
+    }
+
+    class B {
+        int dummy;
+        int fieldB;
+    }
+    public static void assertIntEquals(int expected, int result) {
+        if (expected != result) {
+            throw new Error("Expected: " + expected + ", found: " + result);
+        }
+    }
+
+
+    public void testLoop() {
+        A inst1 = new A();
+        B inst2 = new B();
+        int iterations = 50;
+        for (int i = 0; i < iterations; i++) {
+            int a = inst1.fieldA;
+            inst1.fieldA = a + i;
+            int b = inst2.fieldB;
+            inst2.fieldB = b + 2 * i;
+        }
+        assertIntEquals(inst1.fieldA, 1225);
+        assertIntEquals(inst2.fieldB, 2450);
+    }
+
+    public static void main(String[] args) {
+        Main obj = new Main();
+        obj.testLoop();
+    }
+}
diff --git a/test/2027-TwiceTheSameMemoryCouple/expected.txt b/test/2027-TwiceTheSameMemoryCouple/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/2027-TwiceTheSameMemoryCouple/expected.txt
diff --git a/test/2027-TwiceTheSameMemoryCouple/info.txt b/test/2027-TwiceTheSameMemoryCouple/info.txt
new file mode 100644
index 0000000..0e09184
--- /dev/null
+++ b/test/2027-TwiceTheSameMemoryCouple/info.txt
@@ -0,0 +1 @@
+Tests are written for Load Hoist Store Sink pass
diff --git a/test/2027-TwiceTheSameMemoryCouple/src/Main.java b/test/2027-TwiceTheSameMemoryCouple/src/Main.java
new file mode 100644
index 0000000..1dc9887
--- /dev/null
+++ b/test/2027-TwiceTheSameMemoryCouple/src/Main.java
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+public class Main {
+    public static int field = 0;
+
+    public static void assertIntEquals(int expected, int result) {
+        if (expected != result) {
+            throw new Error("Expected: " + expected + ", found: " + result);
+        }
+    }
+
+    public int testLoop() {
+        int iterations = 50;
+        for (int i = 0; i < iterations; i++) {
+            int a = field;
+            field = a + i;
+            int b = field;
+            field = b + 2 * i;
+        }
+        assertIntEquals(field, 3675);
+        return field;
+    }
+
+    public static void main(String[] args) {
+        Main obj = new Main();
+        obj.testLoop();
+    }
+}
diff --git a/test/2028-MultiBackward/expected.txt b/test/2028-MultiBackward/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/2028-MultiBackward/expected.txt
diff --git a/test/2028-MultiBackward/info.txt b/test/2028-MultiBackward/info.txt
new file mode 100644
index 0000000..0e09184
--- /dev/null
+++ b/test/2028-MultiBackward/info.txt
@@ -0,0 +1 @@
+Tests are written for Load Hoist Store Sink pass
diff --git a/test/2028-MultiBackward/src/Main.java b/test/2028-MultiBackward/src/Main.java
new file mode 100644
index 0000000..c111043
--- /dev/null
+++ b/test/2028-MultiBackward/src/Main.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+public class Main {
+    public class A {
+        public int value;
+    }
+
+    public static void assertIntEquals(int expected, int result) {
+        if (expected != result) {
+            throw new Error("Expected: " + expected + ", found: " + result);
+        }
+    }
+
+    public long testLoop() {
+        A x;
+        x = new A();
+        int a0 = 0x7;
+
+        int i = 0;
+        while (i < 10000000) {
+            a0++;
+            x.value = a0;
+
+            if (i % 2 == 0) {
+                i++;
+                continue;
+            }
+            i = i + 2;
+        }
+        assertIntEquals(x.value, 5000008);
+        return x.value;
+    }
+
+    public static void main(String[] args) {
+        Main obj = new Main();
+        obj.testLoop();
+    }
+}
diff --git a/test/2029-contended-monitors/expected.txt b/test/2029-contended-monitors/expected.txt
new file mode 100644
index 0000000..1894ad7
--- /dev/null
+++ b/test/2029-contended-monitors/expected.txt
@@ -0,0 +1,10 @@
+Starting
+Atomic increments
+Hold time 2, shared lock
+Hold time 20, shared lock
+Hold time 200, shared lock
+Hold time 2000, shared lock
+Hold time 20000, shared lock
+Hold time 200000, shared lock
+Hold for 2 msecs while sleeping, shared lock
+Hold for 2 msecs while sleeping, private lock
diff --git a/test/2029-contended-monitors/info.txt b/test/2029-contended-monitors/info.txt
new file mode 100644
index 0000000..f6ccdd3
--- /dev/null
+++ b/test/2029-contended-monitors/info.txt
@@ -0,0 +1,4 @@
+Checks that monitor-protected increments at various granularities are indeed
+atomic. Also checks j.u.c. increments. Can be configured to print execution
+times for contended and uncontentended monitor acquisition under different
+cicumstances.
diff --git a/test/2029-contended-monitors/src/Main.java b/test/2029-contended-monitors/src/Main.java
new file mode 100644
index 0000000..78d2ae4
--- /dev/null
+++ b/test/2029-contended-monitors/src/Main.java
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.util.concurrent.atomic.AtomicInteger;
+
+public class Main {
+
+  private final boolean PRINT_TIMES = false;  // False for use as run test.
+
+  // Number of increments done by each thread.  Must be multiple of largest hold time below,
+  // times any possible thread count. Finishes much faster when used as run test.
+  private final int TOTAL_ITERS = PRINT_TIMES? 16_000_000 : 1_600_000;
+  private final int MAX_HOLD_TIME = PRINT_TIMES? 2_000_000 : 200_000;
+
+  private int counter;
+
+  private AtomicInteger atomicCounter = new AtomicInteger();
+
+  private Object lock;
+
+  private int currentThreadCount = 0;
+
+  // A function such that if we repeatedly apply it to -1, the value oscillates
+  // between -1 and 3. Thus the average value is 1.
+  // This is designed to make it hard for the compiler to predict the values in
+  // the sequence.
+  private int nextInt(int x) {
+    if (x < 0) {
+      return x * x + 2;
+    } else {
+      return x - 4;
+    }
+  }
+
+  // Increment counter by n, holding log for time roughly propertional to n.
+  // N must be even.
+  private void holdFor(Object lock, int n) {
+    synchronized(lock) {
+      int y = -1;
+      for (int i = 0; i < n; ++i) {
+        counter += y;
+        y = nextInt(y);
+      }
+    }
+  }
+
+  private class RepeatedLockHolder implements Runnable {
+    RepeatedLockHolder(boolean shared, int n /* even */) {
+      sharedLock = shared;
+      holdTime = n;
+    }
+    @Override
+    public void run() {
+      Object myLock = sharedLock ? lock : new Object();
+      int nIters = TOTAL_ITERS / currentThreadCount / holdTime;
+      for (int i = 0; i < nIters; ++i) {
+        holdFor(myLock, holdTime);
+      }
+    }
+    private boolean sharedLock;
+    private int holdTime;
+  }
+
+  private class SleepyLockHolder implements Runnable {
+    SleepyLockHolder(boolean shared) {
+      sharedLock = shared;
+    }
+    @Override
+    public void run() {
+      Object myLock = sharedLock ? lock : new Object();
+      int nIters = TOTAL_ITERS / currentThreadCount / 10_000;
+      for (int i = 0; i < nIters; ++i) {
+        synchronized(myLock) {
+          try {
+            Thread.sleep(2);
+          } catch(InterruptedException e) {
+            throw new AssertionError("Unexpected interrupt");
+          }
+          counter += 10_000;
+        }
+      }
+    }
+    private boolean sharedLock;
+  }
+
+  // Increment atomicCounter n times, on average by 1 each time.
+  private class RepeatedIncrementer implements Runnable {
+    @Override
+    public void run() {
+      int y = -1;
+      int nIters = TOTAL_ITERS / currentThreadCount;
+      for (int i = 0; i < nIters; ++i) {
+        atomicCounter.addAndGet(y);
+        y = nextInt(y);
+      }
+    }
+  }
+
+  // Run n threads doing work. Return the elapsed time this took, in milliseconds.
+  private long runMultiple(int n, Runnable work) {
+    Thread[] threads = new Thread[n];
+    // Replace lock, so that we start with a clean, uninflated lock each time.
+    lock = new Object();
+    for (int i = 0; i < n; ++i) {
+      threads[i] = new Thread(work);
+    }
+    long startTime = System.currentTimeMillis();
+    for (int i = 0; i < n; ++i) {
+      threads[i].start();
+    }
+    for (int i = 0; i < n; ++i) {
+      try {
+        threads[i].join();
+      } catch(InterruptedException e) {
+        throw new AssertionError("Unexpected interrupt");
+      }
+    }
+    return System.currentTimeMillis() - startTime;
+  }
+
+  // Run on different numbers of threads.
+  private void runAll(Runnable work, Runnable init, Runnable checker) {
+    for (int i = 1; i <= 8; i *= 2) {
+      currentThreadCount = i;
+      init.run();
+      long time = runMultiple(i, work);
+      if (PRINT_TIMES) {
+        System.out.print(time + (i == 8 ? "\n" : "\t"));
+      }
+      checker.run();
+    }
+  }
+
+  private class CheckAtomicCounter implements Runnable {
+    @Override
+    public void run() {
+      if (atomicCounter.get() != TOTAL_ITERS) {
+        throw new AssertionError("Failed atomicCounter postcondition check for "
+            + currentThreadCount + " threads");
+      }
+    }
+  }
+
+  private class CheckCounter implements Runnable {
+    @Override
+    public void run() {
+      if (counter != TOTAL_ITERS) {
+        throw new AssertionError("Failed counter postcondition check for "
+            + currentThreadCount + " threads");
+      }
+    }
+  }
+
+  private void run() {
+    if (PRINT_TIMES) {
+      System.out.println("All times in milliseconds for 1, 2, 4 and 8 threads");
+    }
+    System.out.println("Atomic increments");
+    runAll(new RepeatedIncrementer(), () -> { atomicCounter.set(0); }, new CheckAtomicCounter());
+    for (int i = 2; i <= MAX_HOLD_TIME; i *= 10) {
+      // i * 8 (max thread count) divides TOTAL_ITERS
+      System.out.println("Hold time " + i + ", shared lock");
+      runAll(new RepeatedLockHolder(true, i), () -> { counter = 0; }, new CheckCounter());
+    }
+    if (PRINT_TIMES) {
+      for (int i = 2; i <= MAX_HOLD_TIME; i *= 1000) {
+        // i divides TOTAL_ITERS
+        System.out.println("Hold time " + i + ", private lock");
+        // Since there is no mutual exclusion final counter value is unpredictable.
+        runAll(new RepeatedLockHolder(false, i), () -> { counter = 0; }, () -> {});
+      }
+    }
+    System.out.println("Hold for 2 msecs while sleeping, shared lock");
+    runAll(new SleepyLockHolder(true), () -> { counter = 0; }, new CheckCounter());
+    System.out.println("Hold for 2 msecs while sleeping, private lock");
+    runAll(new SleepyLockHolder(false), () -> { counter = 0; }, () -> {});
+  }
+
+  public static void main(String[] args) {
+    System.out.println("Starting");
+    new Main().run();
+  }
+}
diff --git a/test/2029-spaces-in-SimpleName/build b/test/2029-spaces-in-SimpleName/build
new file mode 100755
index 0000000..9c3cc79
--- /dev/null
+++ b/test/2029-spaces-in-SimpleName/build
@@ -0,0 +1,40 @@
+#!/bin/bash
+#
+# Copyright 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Stop on failure and be verbose.
+set -e -x
+
+export ASM_JAR="${ANDROID_BUILD_TOP}/prebuilts/misc/common/asm/asm-6.0.jar"
+
+cd src
+
+# generate Java bytecode with ASM
+${JAVAC:-java} -cp "$ASM_JAR:." SpacesInSimpleName.java
+${JAVA:-java} -cp "$ASM_JAR:." SpacesInSimpleName
+
+# compile Java bytecode to DEX bytecode
+# TODO: replace DX with D8 when it adds support for spaces in SimpleName
+# ${D8} --min-api 10000 Main.class
+$ANDROID_HOST_OUT/bin/dx --dex --output=classes.dex Main.class
+
+# move the resulting DEX file and cleanup
+mv classes.dex ../classes.dex
+rm *.class
+
+cd ..
+
+# Use API level 10000 for spaces in SimpleName
+DESUGAR=false ./default-build "$@" --api-level 10000
diff --git a/test/2029-spaces-in-SimpleName/classes.dex b/test/2029-spaces-in-SimpleName/classes.dex
new file mode 100644
index 0000000..3804ca7
--- /dev/null
+++ b/test/2029-spaces-in-SimpleName/classes.dex
Binary files differ
diff --git a/test/2029-spaces-in-SimpleName/expected.txt b/test/2029-spaces-in-SimpleName/expected.txt
new file mode 100644
index 0000000..af5626b
--- /dev/null
+++ b/test/2029-spaces-in-SimpleName/expected.txt
@@ -0,0 +1 @@
+Hello, world!
diff --git a/test/2029-spaces-in-SimpleName/info.txt b/test/2029-spaces-in-SimpleName/info.txt
new file mode 100644
index 0000000..106ebeb
--- /dev/null
+++ b/test/2029-spaces-in-SimpleName/info.txt
@@ -0,0 +1,5 @@
+Whitespace support in DEX format 040.
+
+This test uses ASM Java bytecode generator to generate a simple class with
+the methods 'main' and some unpronounceable method which name contains all
+space characters in the Unicode 'Zs' category.
diff --git a/test/2029-spaces-in-SimpleName/src/SpacesInSimpleName.java b/test/2029-spaces-in-SimpleName/src/SpacesInSimpleName.java
new file mode 100644
index 0000000..847da5a
--- /dev/null
+++ b/test/2029-spaces-in-SimpleName/src/SpacesInSimpleName.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.*;
+
+import org.objectweb.asm.*;
+
+public class SpacesInSimpleName {
+  public static void main(String args[]) throws Exception {
+    String methodName = "method_with_spaces_"
+        + "20 "
+        + "a0\u00a0"
+        + "1680\u1680"
+        + "2000\u2000"
+        + "2001\u2001"
+        + "2002\u2002"
+        + "2003\u2003"
+        + "2004\u2004"
+        + "2005\u2005"
+        + "2006\u2006"
+        + "2007\u2007"
+        + "2008\u2008"
+        + "2009\u2009"
+        + "200a\u200a"
+        + "202f\u202f"
+        + "205f\u205f"
+        + "3000\u3000";
+
+    ClassWriter cw = new ClassWriter(ClassWriter.COMPUTE_MAXS);
+
+    cw.visit(Opcodes.V1_8, Opcodes.ACC_PUBLIC, "Main",
+      null, "java/lang/Object", null);
+
+    MethodVisitor mvMain = cw.visitMethod(Opcodes.ACC_PUBLIC + Opcodes.ACC_STATIC,
+      "main", "([Ljava/lang/String;)V", null, null);
+    mvMain.visitCode();
+    mvMain.visitFieldInsn(Opcodes.GETSTATIC, "java/lang/System", "out",
+      "Ljava/io/PrintStream;");
+    mvMain.visitLdcInsn("Hello, world!");
+    mvMain.visitMethodInsn(Opcodes.INVOKEVIRTUAL, "java/io/PrintStream",
+      "println", "(Ljava/lang/String;)V", false);
+    mvMain.visitMethodInsn(Opcodes.INVOKESTATIC, "Main", methodName, "()V", false);
+    mvMain.visitInsn(Opcodes.RETURN);
+    mvMain.visitMaxs(0, 0); // args are ignored with COMPUTE_MAXS
+    mvMain.visitEnd();
+    MethodVisitor mvSpaces = cw.visitMethod(Opcodes.ACC_PUBLIC + Opcodes.ACC_STATIC,
+      methodName, "()V", null, null);
+    mvSpaces.visitCode();
+    mvSpaces.visitInsn(Opcodes.RETURN);
+    mvSpaces.visitMaxs(0, 0); // args are ignored with COMPUTE_MAXS
+    mvSpaces.visitEnd();
+
+    cw.visitEnd();
+
+    byte[] b = cw.toByteArray();
+    OutputStream out = new FileOutputStream("Main.class");
+    out.write(b, 0, b.length);
+    out.close();
+  }
+}
diff --git a/test/2030-long-running-child/expected.txt b/test/2030-long-running-child/expected.txt
new file mode 100644
index 0000000..5180c39
--- /dev/null
+++ b/test/2030-long-running-child/expected.txt
@@ -0,0 +1,3 @@
+Main Started
+Main Finished
+Child finished
diff --git a/test/2030-long-running-child/info.txt b/test/2030-long-running-child/info.txt
new file mode 100644
index 0000000..339abf0
--- /dev/null
+++ b/test/2030-long-running-child/info.txt
@@ -0,0 +1,3 @@
+Check that a child of a main thread can run to completion even if the
+main thread terminates immediately, and the child allocates memory and
+forks additional threads.
diff --git a/test/2030-long-running-child/src/Main.java b/test/2030-long-running-child/src/Main.java
new file mode 100644
index 0000000..1d01615
--- /dev/null
+++ b/test/2030-long-running-child/src/Main.java
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.util.concurrent.atomic.AtomicInteger;
+
+public class Main {
+
+  private static class LazyGrandChildThread implements Runnable {
+    @Override
+    public void run() {}
+  }
+
+  private static class ChildThread implements Runnable {
+    @Override
+    public void run() {
+      // Allocate memory forcing GCs and fork children.
+      for (int i = 0; i < 100; ++i) {
+        int [][] a = new int[10][];
+        for (int j = 0; j < 10; ++j) {
+          a[j] = new int[50000 * j + 20];
+          a[j][17] = 1;
+        }
+        Thread t = new Thread(new LazyGrandChildThread());
+        t.start();
+        int sum = 0;
+        // Make it hard to optimize out the arrays.
+        for (int j = 0; j < 10; ++j) {
+          sum += a[j][16] /* = 0 */ + a[j][17] /* = 1 */;
+        }
+        if (sum != 10) {
+          System.out.println("Bad result! Was " + sum);
+        }
+        try {
+          t.join();
+        } catch (InterruptedException e) {
+          System.out.println("Interrupted by " + e);
+        }
+      }
+      System.out.println("Child finished");
+    }
+  }
+
+  public static void main(String[] args) {
+    System.out.println("Main Started");
+    new Thread(new ChildThread()).start();
+    System.out.println("Main Finished");
+  }
+}
diff --git a/test/2031-zygote-compiled-frame-deopt/expected.txt b/test/2031-zygote-compiled-frame-deopt/expected.txt
new file mode 100644
index 0000000..21a75cf
--- /dev/null
+++ b/test/2031-zygote-compiled-frame-deopt/expected.txt
@@ -0,0 +1,3 @@
+JNI_OnLoad called
+Starting up!
+This is my object!
diff --git a/test/2031-zygote-compiled-frame-deopt/info.txt b/test/2031-zygote-compiled-frame-deopt/info.txt
new file mode 100644
index 0000000..aa59e4f
--- /dev/null
+++ b/test/2031-zygote-compiled-frame-deopt/info.txt
@@ -0,0 +1,5 @@
+Regression test for b/144947842
+
+Check that we correctly identify jit-zygote compiled frames as non-debuggable.
+
+We would hit DCHECKS before (part of) the fix to this bug.
\ No newline at end of file
diff --git a/test/2031-zygote-compiled-frame-deopt/native-wait.cc b/test/2031-zygote-compiled-frame-deopt/native-wait.cc
new file mode 100644
index 0000000..bd1d224
--- /dev/null
+++ b/test/2031-zygote-compiled-frame-deopt/native-wait.cc
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <atomic>
+#include <sstream>
+
+#include "base/globals.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
+#include "jni.h"
+#include "runtime.h"
+#include "thread_list.h"
+
+namespace art {
+namespace NativeWait {
+
+std::atomic<bool> native_waiting = false;
+std::atomic<bool> native_wait = false;
+
+// Perform late debuggable switch in the same way the zygote would (clear-jit,
+// unmark-zygote, set-debuggable, deopt boot, restart jit). NB This skips
+// restarting the heap threads since that doesn't seem to be needed to trigger
+// b/144947842.
+extern "C" JNIEXPORT void JNICALL Java_art_Test2031_simulateZygoteFork(JNIEnv*, jclass) {
+  Runtime* runtime = Runtime::Current();
+  bool has_jit = runtime->GetJit() != nullptr;
+  if (has_jit) {
+    runtime->GetJit()->PreZygoteFork();
+  }
+  runtime->SetAsZygoteChild(/*is_system_server=*/false, /*is_zygote=*/false);
+  runtime->AddCompilerOption("--debuggable");
+  runtime->SetJavaDebuggable(true);
+  {
+    // Deoptimize the boot image as it may be non-debuggable.
+    ScopedSuspendAll ssa(__FUNCTION__);
+    runtime->DeoptimizeBootImage();
+  }
+
+  if (has_jit) {
+    runtime->GetJitCodeCache()->PostForkChildAction(false, false);
+    runtime->GetJit()->PostForkChildAction(false, false);
+    // We have "zygote" code that isn't really part of the BCP. just don't collect it.
+    runtime->GetJitCodeCache()->SetGarbageCollectCode(false);
+  }
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Test2031_setupJvmti(JNIEnv* env,
+                                                               jclass,
+                                                               jstring testdir) {
+  const char* td = env->GetStringUTFChars(testdir, nullptr);
+  std::string testdir_str;
+  testdir_str.resize(env->GetStringUTFLength(testdir));
+  memcpy(testdir_str.data(), td, testdir_str.size());
+  env->ReleaseStringUTFChars(testdir, td);
+  std::ostringstream oss;
+  Runtime* runtime = Runtime::Current();
+  oss << testdir_str << (kIsDebugBuild ? "libtiagentd.so" : "libtiagent.so")
+      << "=2031-zygote-compiled-frame-deopt,art";
+  LOG(INFO) << "agent " << oss.str();
+  runtime->AttachAgent(env, oss.str(), nullptr);
+}
+extern "C" JNIEXPORT void JNICALL Java_art_Test2031_waitForNativeSleep(JNIEnv*, jclass) {
+  while (!native_waiting) {
+  }
+}
+extern "C" JNIEXPORT void JNICALL Java_art_Test2031_wakeupNativeSleep(JNIEnv*, jclass) {
+  native_wait = false;
+}
+extern "C" JNIEXPORT void JNICALL Java_art_Test2031_nativeSleep(JNIEnv*, jclass) {
+  native_wait = true;
+  do {
+    native_waiting = true;
+  } while (native_wait);
+  native_waiting = false;
+}
+
+}  // namespace NativeWait
+}  // namespace art
diff --git a/test/2031-zygote-compiled-frame-deopt/run b/test/2031-zygote-compiled-frame-deopt/run
new file mode 100755
index 0000000..900099f
--- /dev/null
+++ b/test/2031-zygote-compiled-frame-deopt/run
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The -Xopaque-jni-ids makes sure we can do structural redefinition. The --add-libdir-argument tells
+# default-run to pass the directory where the jvmti-agent is so we can load it later. The others
+# set the process to zygote mode and setup the jit cache size. We use a larger than normal jit-size
+# to avoid having to deal with jit-gc, a complication that's not relevant to this test.
+./default-run "$@" --runtime-option -Xopaque-jni-ids:true --add-libdir-argument --runtime-option -Xzygote --runtime-option -Xjitinitialsize:64M
diff --git a/test/2031-zygote-compiled-frame-deopt/src/Main.java b/test/2031-zygote-compiled-frame-deopt/src/Main.java
new file mode 100644
index 0000000..5c2eab8
--- /dev/null
+++ b/test/2031-zygote-compiled-frame-deopt/src/Main.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    System.loadLibrary(args[0]);
+    art.Test2031.$noinline$run(args[1]);
+  }
+}
diff --git a/test/2031-zygote-compiled-frame-deopt/src/art/Redefinition.java b/test/2031-zygote-compiled-frame-deopt/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/2031-zygote-compiled-frame-deopt/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/2031-zygote-compiled-frame-deopt/src/art/Test2031.java b/test/2031-zygote-compiled-frame-deopt/src/art/Test2031.java
new file mode 100644
index 0000000..71cbebd
--- /dev/null
+++ b/test/2031-zygote-compiled-frame-deopt/src/art/Test2031.java
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.ref.*;
+import java.lang.reflect.*;
+import java.util.*;
+
+public class Test2031 {
+  public static class MyClass {
+    public void starting() {
+      System.out.println("Starting up!");
+    }
+    public String toString() {
+      return "This is my object!";
+    }
+  }
+
+  public static void $noinline$doSimulateZygoteFork() {
+    simulateZygoteFork();
+  }
+
+  public static void $noinline$run(String testdir) throws Exception {
+    $noinline$doSimulateZygoteFork();
+    final MyClass myObject = new MyClass();
+    $noinline$startTest(testdir, myObject);
+    System.out.println(myObject);
+  }
+
+  public static void $noinline$startTest(String testdir, final MyClass myObject) throws Exception {
+    Thread thr = new Thread(() -> {
+      try {
+        waitForNativeSleep();
+        setupJvmti(testdir);
+        Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+        Thread tester = new Thread(() -> {
+          try {
+            myObject.starting();
+            doTest();
+          } catch (Exception e) {
+            throw new Error("Failure!", e);
+          }
+        });
+        tester.start();
+        tester.join();
+        wakeupNativeSleep();
+      } catch (Exception e) {
+        throw new Error("Failure!", e);
+      }
+    });
+    thr.start();
+    nativeSleep();
+    thr.join();
+  }
+
+  public static native void simulateZygoteFork();
+  public static native void setupJvmti(String testdir);
+  public static native void waitForNativeSleep();
+  public static native void wakeupNativeSleep();
+  public static native void nativeSleep();
+
+  private static void doRedefinition() {
+    Redefinition.doCommonStructuralClassRedefinition(Transform.class, REDEFINED_DEX_BYTES);
+  }
+
+  public static class SuperTransform {
+    public int id;
+
+    public SuperTransform(int id) {
+      this.id = id;
+    }
+
+    public String toString() {
+      return "SuperTransform { id: " + id + ", class: " + getClass() + " }";
+    }
+  }
+
+  public static class Transform extends SuperTransform {
+    static {
+    }
+
+    public static Object BAR =
+        new Object() {
+          public String toString() {
+            return "value of <" + this.get() + ">";
+          }
+
+          public Object get() {
+            return "BAR FIELD";
+          }
+        };
+    public static Object FOO =
+        new Object() {
+          public String toString() {
+            return "value of <" + this.get() + ">";
+          }
+
+          public Object get() {
+            return "FOO FIELD";
+          }
+        };
+    // This class has no virtual fields or methods. This means we can structurally redefine it
+    // without having to change the size of any instances.
+    public Transform(int id) {
+      super(id);
+    }
+
+    public static String staticToString() {
+      return Transform.class.toString() + "[FOO: " + FOO + ", BAR: " + BAR + "]";
+    }
+  }
+
+  public static class SubTransform extends Transform {
+    public SubTransform(int id) {
+      super(id);
+    }
+
+    public String myToString() {
+      return "SubTransform (subclass of: " + staticToString() + ") { id: " + id + " }";
+    }
+  }
+
+  /* Base64 encoded class of:
+   * public static class Transform extends SuperTransform {
+   *   static {}
+   *   public Transform(int id) { super(id + 1000); }
+   *   // NB This is the order the fields will be laid out in memory.
+   *   public static Object BAR;
+   *   public static Object BAZ;
+   *   public static Object FOO;
+   *   public static String staticToString() {
+   *    return Transform.class.toString() + "[FOO: " + FOO + ", BAR: " + BAR + ", BAZ: " + BAZ + "]";
+   *   }
+   * }
+   */
+  private static byte[] REDEFINED_DEX_BYTES =
+      Base64.getDecoder()
+          .decode(
+"ZGV4CjAzNQC78lC18jI6omumTaKUcf/8pvcR4/Hx2u3QBQAAcAAAAHhWNBIAAAAAAAAAAAwFAAAg" +
+"AAAAcAAAAAsAAADwAAAABQAAABwBAAADAAAAWAEAAAkAAABwAQAAAQAAALgBAAD4AwAA2AEAAKoC" +
+"AACzAgAAvAIAAMYCAADOAgAA0wIAANgCAADdAgAA4AIAAOMCAADnAgAABgMAACADAAAwAwAAVAMA" +
+"AHQDAACHAwAAmwMAAK8DAADKAwAA2QMAAOQDAADnAwAA6wMAAPMDAAD2AwAAAwQAAAsEAAARBAAA" +
+"IQQAACsEAAAyBAAABwAAAAoAAAALAAAADAAAAA0AAAAOAAAADwAAABAAAAARAAAAEgAAABUAAAAI" +
+"AAAACAAAAAAAAAAJAAAACQAAAJQCAAAJAAAACQAAAJwCAAAVAAAACgAAAAAAAAAWAAAACgAAAKQC" +
+"AAACAAcABAAAAAIABwAFAAAAAgAHAAYAAAABAAQAAwAAAAIAAwACAAAAAgAEAAMAAAACAAAAHAAA" +
+"AAYAAAAdAAAACQADAAMAAAAJAAEAGgAAAAkAAgAaAAAACQAAAB0AAAACAAAAAQAAAAEAAAAAAAAA" +
+"EwAAAPwEAADQBAAAAAAAAAUAAAACAAAAjQIAADYAAAAcAAIAbhAEAAAADABiAQIAYgIAAGIDAQAi" +
+"BAkAcBAFAAQAbiAHAAQAGgAXAG4gBwAEAG4gBgAUABoAAABuIAcABABuIAYAJAAaAAEAbiAHAAQA" +
+"biAGADQAGgAYAG4gBwAEAG4QCAAEAAwAEQAAAAAAAAAAAIQCAAABAAAADgAAAAIAAgACAAAAiAIA" +
+"AAYAAADQEegDcCAAABAADgAPAA4AEAEADgAWAA4AAAAAAQAAAAcAAAABAAAACAAAAAEAAAAAAAcs" +
+"IEJBUjogAAcsIEJBWjogAAg8Y2xpbml0PgAGPGluaXQ+AANCQVIAA0JBWgADRk9PAAFJAAFMAAJM" +
+"TAAdTGFydC9UZXN0MjAzMSRTdXBlclRyYW5zZm9ybTsAGExhcnQvVGVzdDIwMzEkVHJhbnNmb3Jt" +
+"OwAOTGFydC9UZXN0MjAzMTsAIkxkYWx2aWsvYW5ub3RhdGlvbi9FbmNsb3NpbmdDbGFzczsAHkxk" +
+"YWx2aWsvYW5ub3RhdGlvbi9Jbm5lckNsYXNzOwARTGphdmEvbGFuZy9DbGFzczsAEkxqYXZhL2xh" +
+"bmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7ABlMamF2YS9sYW5nL1N0cmluZ0J1aWxkZXI7" +
+"AA1UZXN0MjAzMS5qYXZhAAlUcmFuc2Zvcm0AAVYAAlZJAAZbRk9POiAAAV0AC2FjY2Vzc0ZsYWdz" +
+"AAZhcHBlbmQABG5hbWUADnN0YXRpY1RvU3RyaW5nAAh0b1N0cmluZwAFdmFsdWUAjAF+fkQ4eyJj" +
+"b21waWxhdGlvbi1tb2RlIjoiZGVidWciLCJoYXMtY2hlY2tzdW1zIjpmYWxzZSwibWluLWFwaSI6" +
+"MSwic2hhLTEiOiJkMWQ1MWMxY2IzZTg1YWEzMGUwMGE2ODIyY2NhODNiYmUxZGZlOTQ1IiwidmVy" +
+"c2lvbiI6IjIuMC4xMy1kZXYifQACBAEeGAMCBQIZBAkbFxQDAAMAAAkBCQEJAYiABNQEAYGABOgE" +
+"AQnYAwAAAAAAAAIAAADBBAAAxwQAAPAEAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAEAAAAAAAAAAQAA" +
+"ACAAAABwAAAAAgAAAAsAAADwAAAAAwAAAAUAAAAcAQAABAAAAAMAAABYAQAABQAAAAkAAABwAQAA" +
+"BgAAAAEAAAC4AQAAASAAAAMAAADYAQAAAyAAAAMAAACEAgAAARAAAAMAAACUAgAAAiAAACAAAACq" +
+"AgAABCAAAAIAAADBBAAAACAAAAEAAADQBAAAAxAAAAIAAADsBAAABiAAAAEAAAD8BAAAABAAAAEA" +
+"AAAMBQAA");
+
+  public static void doTest() throws Exception {
+    Transform t1 = new Transform(1);
+    SuperTransform t2 = new SubTransform(2);
+    doRedefinition();
+  }
+}
diff --git a/test/2032-default-method-private-override/expected.txt b/test/2032-default-method-private-override/expected.txt
new file mode 100644
index 0000000..349ae32
--- /dev/null
+++ b/test/2032-default-method-private-override/expected.txt
@@ -0,0 +1,6 @@
+Concrete1
+Hello
+Concrete2
+Hello
+Concrete3
+Hello
diff --git a/test/2032-default-method-private-override/info.txt b/test/2032-default-method-private-override/info.txt
new file mode 100644
index 0000000..8367a3d
--- /dev/null
+++ b/test/2032-default-method-private-override/info.txt
@@ -0,0 +1,7 @@
+Regression test for b/152199517
+
+We would incorrectly search all declared methods of a class for interface
+implementations instead of restricting ourselves to virtual methods when
+looking for overrides to a superclasses interfaces. This could cause
+exceptions and incorrect behavior as we might try to use a private or a
+static method as an interface implementation.
diff --git a/test/2032-default-method-private-override/jasmin/Concrete1.j b/test/2032-default-method-private-override/jasmin/Concrete1.j
new file mode 100644
index 0000000..fbf62eb
--- /dev/null
+++ b/test/2032-default-method-private-override/jasmin/Concrete1.j
@@ -0,0 +1,34 @@
+; Copyright (C) 2020 The Android Open Source Project
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+
+.class public Concrete1
+.super java/lang/Object
+.implements IFace
+
+.method public <init>()V
+  .limit stack 1
+  .limit locals 1
+  aload_0
+  invokespecial java/lang/Object/<init>()V
+  return
+.end method
+
+.method private static sayHi()V
+  .limit stack 2
+  .limit locals 2
+  getstatic java/lang/System/out Ljava/io/PrintStream;
+  ldc "Hello from a private method!"
+  invokevirtual java/io/PrintStream/println(Ljava/lang/String;)V
+  return
+.end method
diff --git a/test/2032-default-method-private-override/jasmin/Concrete2.j b/test/2032-default-method-private-override/jasmin/Concrete2.j
new file mode 100644
index 0000000..00eee98
--- /dev/null
+++ b/test/2032-default-method-private-override/jasmin/Concrete2.j
@@ -0,0 +1,34 @@
+; Copyright (C) 2020 The Android Open Source Project
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+
+.class public Concrete2
+; Our superclass implements the IFace interface.
+.super Concrete2Base
+
+.method public <init>()V
+  .limit stack 1
+  .limit locals 1
+  aload_0
+  invokespecial Concrete2Base/<init>()V
+  return
+.end method
+
+.method private static sayHi()V
+  .limit stack 2
+  .limit locals 2
+  getstatic java/lang/System/out Ljava/io/PrintStream;
+  ldc "Hello from a private method!"
+  invokevirtual java/io/PrintStream/println(Ljava/lang/String;)V
+  return
+.end method
diff --git a/test/2032-default-method-private-override/jasmin/Concrete3.j b/test/2032-default-method-private-override/jasmin/Concrete3.j
new file mode 100644
index 0000000..cb231e8
--- /dev/null
+++ b/test/2032-default-method-private-override/jasmin/Concrete3.j
@@ -0,0 +1,34 @@
+; Copyright (C) 2020 The Android Open Source Project
+;
+; Licensed under the Apache License, Version 2.0 (the "License");
+; you may not use this file except in compliance with the License.
+; You may obtain a copy of the License at
+;
+;      http://www.apache.org/licenses/LICENSE-2.0
+;
+; Unless required by applicable law or agreed to in writing, software
+; distributed under the License is distributed on an "AS IS" BASIS,
+; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+; See the License for the specific language governing permissions and
+; limitations under the License.
+
+.class public Concrete3
+; Our superclass implements the IFace interface.
+.super Concrete2Base
+
+.method public <init>()V
+  .limit stack 1
+  .limit locals 1
+  aload_0
+  invokespecial Concrete2Base/<init>()V
+  return
+.end method
+
+.method public static sayHi()V
+  .limit stack 2
+  .limit locals 2
+  getstatic java/lang/System/out Ljava/io/PrintStream;
+  ldc "Hello from a private method!"
+  invokevirtual java/io/PrintStream/println(Ljava/lang/String;)V
+  return
+.end method
diff --git a/test/2032-default-method-private-override/src/Concrete2Base.java b/test/2032-default-method-private-override/src/Concrete2Base.java
new file mode 100644
index 0000000..470e844
--- /dev/null
+++ b/test/2032-default-method-private-override/src/Concrete2Base.java
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Concrete2Base implements IFace { }
diff --git a/test/2032-default-method-private-override/src/IFace.java b/test/2032-default-method-private-override/src/IFace.java
new file mode 100644
index 0000000..2cec4f1
--- /dev/null
+++ b/test/2032-default-method-private-override/src/IFace.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public interface IFace {
+  public default void sayHi() {
+    System.out.println("Hello");
+  }
+}
diff --git a/test/2032-default-method-private-override/src/Main.java b/test/2032-default-method-private-override/src/Main.java
new file mode 100644
index 0000000..4e17a21
--- /dev/null
+++ b/test/2032-default-method-private-override/src/Main.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void sayHi(String klass) throws Exception {
+    try {
+      System.out.println(klass);
+      IFace iface = (IFace)Class.forName(klass).newInstance();
+      iface.sayHi();
+    } catch (Exception e) {
+      System.out.println("Exception thrown!");
+      System.out.println(e);
+    }
+  }
+  public static void main(String[] args) throws Exception {
+    sayHi("Concrete1");
+    sayHi("Concrete2");
+    sayHi("Concrete3");
+  }
+}
diff --git a/test/2035-structural-native-method/expected.txt b/test/2035-structural-native-method/expected.txt
new file mode 100644
index 0000000..c782b8f
--- /dev/null
+++ b/test/2035-structural-native-method/expected.txt
@@ -0,0 +1,3 @@
+value is 42
+value is 42
+non-native value is 1337
diff --git a/test/2035-structural-native-method/info.txt b/test/2035-structural-native-method/info.txt
new file mode 100644
index 0000000..9467187
--- /dev/null
+++ b/test/2035-structural-native-method/info.txt
@@ -0,0 +1,5 @@
+Tests structural redefinition with register-natives
+
+Regression test for b/158476592. Structural redefinition was incorrectly
+clearing the JNI bindings of native methods. This could interfere with
+classes using 'JNIEnv::RegisterNatives'.
diff --git a/test/2035-structural-native-method/run b/test/2035-structural-native-method/run
new file mode 100755
index 0000000..ff387ff
--- /dev/null
+++ b/test/2035-structural-native-method/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --jvmti --runtime-option -Xopaque-jni-ids:true
diff --git a/test/2035-structural-native-method/src-art/Main.java b/test/2035-structural-native-method/src-art/Main.java
new file mode 100644
index 0000000..4bcd725
--- /dev/null
+++ b/test/2035-structural-native-method/src-art/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    art.Test2035.run();
+  }
+}
diff --git a/test/2035-structural-native-method/src-art/art/Redefinition.java b/test/2035-structural-native-method/src-art/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/2035-structural-native-method/src-art/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/2035-structural-native-method/src-art/art/Test2035.java b/test/2035-structural-native-method/src-art/art/Test2035.java
new file mode 100644
index 0000000..b95bff6
--- /dev/null
+++ b/test/2035-structural-native-method/src-art/art/Test2035.java
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.Base64;
+import java.lang.reflect.Method;
+
+public class Test2035 {
+  public static class Transform {
+    public Transform() {}
+
+    public native long getValue();
+  }
+  /*
+   * base64 encoded class/dex file for
+   * Base64 generated using:
+   * % javac Test2035.java
+   * % d8 Test2035\$Transform.class
+   * % base64 classes.dex| sed 's:^:":' | sed 's:$:" +:'
+   *
+   * package art;
+   * public static class Transform {
+   *   public Transform() {
+   *   }
+   *   public native long getValue();
+   *   public long nonNativeValue() {
+   *     return 1337;
+   *   };
+   * }
+   */
+  private static final byte[] DEX_BYTES =
+      Base64.getDecoder()
+          .decode(
+              "ZGV4CjAzNQAIm/YHNPSI0ggIbrKz6Jg/IBYl2Kq0TXS8AwAAcAAAAHhWNBIAAAAAAAAAABADAAAQ"
+                  + "AAAAcAAAAAcAAACwAAAAAgAAAMwAAAAAAAAAAAAAAAQAAADkAAAAAQAAAAQBAACYAgAAJAEAAGAB"
+                  + "AABoAQAAawEAAIUBAACVAQAAuQEAANkBAADtAQAA/AEAAAcCAAAKAgAAFwIAACECAAAnAgAANwIA"
+                  + "AD4CAAABAAAAAgAAAAMAAAAEAAAABQAAAAYAAAAJAAAAAQAAAAAAAAAAAAAACQAAAAYAAAAAAAAA"
+                  + "AQABAAAAAAABAAAACwAAAAEAAAANAAAABQABAAAAAAABAAAAAQAAAAUAAAAAAAAABwAAAAADAADb"
+                  + "AgAAAAAAAAMAAQAAAAAAVAEAAAMAAAAWADkFEAAAAAEAAQABAAAAWAEAAAQAAABwEAMAAAAOAA0A"
+                  + "DgAJAA48AAAAAAY8aW5pdD4AAUoAGExhcnQvVGVzdDIwMzUkVHJhbnNmb3JtOwAOTGFydC9UZXN0"
+                  + "MjAzNTsAIkxkYWx2aWsvYW5ub3RhdGlvbi9FbmNsb3NpbmdDbGFzczsAHkxkYWx2aWsvYW5ub3Rh"
+                  + "dGlvbi9Jbm5lckNsYXNzOwASTGphdmEvbGFuZy9PYmplY3Q7AA1UZXN0MjAzNS5qYXZhAAlUcmFu"
+                  + "c2Zvcm0AAVYAC2FjY2Vzc0ZsYWdzAAhnZXRWYWx1ZQAEbmFtZQAObm9uTmF0aXZlVmFsdWUABXZh"
+                  + "bHVlAIsBfn5EOHsiY29tcGlsYXRpb24tbW9kZSI6ImRlYnVnIiwiaGFzLWNoZWNrc3VtcyI6ZmFs"
+                  + "c2UsIm1pbi1hcGkiOjEsInNoYS0xIjoiOGNkYTg3OGE1MjJiMjJkMWQ2YTljNGQ0MjY5M2Y0OTAw"
+                  + "MjJmZTQ2YiIsInZlcnNpb24iOiIyLjIuMS1kZXYifQACAwEOGAICBAIKBAkMFwgAAAECAIGABLwC"
+                  + "AYECAAEBpAIAAAAAAAAAAgAAAMwCAADSAgAA9AIAAAAAAAAAAAAAAAAAAA4AAAAAAAAAAQAAAAAA"
+                  + "AAABAAAAEAAAAHAAAAACAAAABwAAALAAAAADAAAAAgAAAMwAAAAFAAAABAAAAOQAAAAGAAAAAQAA"
+                  + "AAQBAAABIAAAAgAAACQBAAADIAAAAgAAAFQBAAACIAAAEAAAAGABAAAEIAAAAgAAAMwCAAAAIAAA"
+                  + "AQAAANsCAAADEAAAAgAAAPACAAAGIAAAAQAAAAADAAAAEAAAAQAAABADAAA=");
+
+  public static void run() throws Exception {
+    Redefinition.setTestConfiguration(Redefinition.Config.COMMON_REDEFINE);
+    doTest();
+  }
+
+  public static void doTest() throws Exception {
+    LinkClassMethods(Transform.class);
+    Transform t = new Transform();
+    System.out.println("value is " + t.getValue());
+    Redefinition.doCommonStructuralClassRedefinition(Transform.class, DEX_BYTES);
+    System.out.println("value is " + t.getValue());
+    System.out.println(
+        "non-native value is " + Transform.class.getDeclaredMethod("nonNativeValue").invoke(t));
+  }
+
+  public static native void LinkClassMethods(Class<?> k);
+}
diff --git a/test/2035-structural-native-method/src/Main.java b/test/2035-structural-native-method/src/Main.java
new file mode 100644
index 0000000..e0477b0
--- /dev/null
+++ b/test/2035-structural-native-method/src/Main.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) throws Exception {
+    System.out.println("FAIL: Test is only for art!");
+  }
+}
diff --git a/test/2035-structural-native-method/structural-native.cc b/test/2035-structural-native-method/structural-native.cc
new file mode 100644
index 0000000..bf51c8b
--- /dev/null
+++ b/test/2035-structural-native-method/structural-native.cc
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+
+#include <vector>
+
+#include "android-base/logging.h"
+#include "android-base/macros.h"
+#include "jni.h"
+#include "jvmti.h"
+
+// Test infrastructure
+#include "jvmti_helper.h"
+#include "scoped_local_ref.h"
+#include "test_env.h"
+
+namespace art {
+namespace Test2035StructuralNativeMethod {
+
+jlong JNICALL TransformNativeMethod(JNIEnv* env ATTRIBUTE_UNUSED, jclass klass ATTRIBUTE_UNUSED) {
+  return 42;
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Test2035_LinkClassMethods(
+    JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jclass target) {
+  JNINativeMethod meth{"getValue", "()J", reinterpret_cast<void*>(TransformNativeMethod)};
+  env->RegisterNatives(target, &meth, 1);
+}
+
+
+}  // namespace Test2035StructuralNativeMethod
+}  // namespace art
diff --git a/test/2230-profile-save-hotness/expected.txt b/test/2230-profile-save-hotness/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/2230-profile-save-hotness/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/2230-profile-save-hotness/info.txt b/test/2230-profile-save-hotness/info.txt
new file mode 100644
index 0000000..c230391
--- /dev/null
+++ b/test/2230-profile-save-hotness/info.txt
@@ -0,0 +1 @@
+Check that profile recording with AOT hotness.
diff --git a/test/2230-profile-save-hotness/run b/test/2230-profile-save-hotness/run
new file mode 100644
index 0000000..d0c49b6
--- /dev/null
+++ b/test/2230-profile-save-hotness/run
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Copyright 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+${RUN} \
+  -Xcompiler-option --count-hotness-in-compiled-code \
+  -Xcompiler-option --compiler-filter=speed \
+  --runtime-option -Xps-profile-aot-code \
+  --runtime-option -Xjitsaveprofilinginfo \
+  --runtime-option -Xusejit:true "${@}"
diff --git a/test/2230-profile-save-hotness/src-art/Main.java b/test/2230-profile-save-hotness/src-art/Main.java
new file mode 100644
index 0000000..97177cc
--- /dev/null
+++ b/test/2230-profile-save-hotness/src-art/Main.java
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import dalvik.system.VMRuntime;
+import java.io.File;
+import java.io.IOException;
+import java.lang.reflect.Method;
+
+public class Main {
+  public static void $noinline$hotnessCount() {}
+
+  public static void $noinline$hotnessCountWithLoop(int count) {
+    for (int i = 0; i < count; i++) {
+      $noinline$hotnessCount();
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+    System.loadLibrary(args[0]);
+    if (!isAotCompiled(Main.class, "main")) {
+      return;
+    }
+
+    File file = null;
+    try {
+      file = createTempFile();
+      String codePath = System.getenv("DEX_LOCATION") + "/2230-profile-save-hotness.jar";
+      VMRuntime.registerAppInfo(file.getPath(), new String[] {codePath});
+
+      // Test that the profile saves an app method with a profiling info.
+      $noinline$hotnessCountWithLoop(10000);
+      ensureProfileProcessing();
+      String methodName = "$noinline$hotnessCount";
+      Method appMethod = Main.class.getDeclaredMethod(methodName);
+      if (!presentInProfile(file.getPath(), appMethod)) {
+        System.out.println("App method not hot in profile " +
+                getHotnessCounter(Main.class, methodName));
+      }
+      if (getHotnessCounter(Main.class, methodName) == 0) {
+        System.out.println("Hotness should be non zero " +
+                getHotnessCounter(Main.class, methodName));
+      }
+      VMRuntime.resetJitCounters();
+      if (getHotnessCounter(Main.class, methodName) != 0) {
+        System.out.println("Hotness should be zero " + getHotnessCounter(Main.class, methodName));
+      }
+    } finally {
+      if (file != null) {
+        file.delete();
+      }
+    }
+  }
+
+  // Checks if the profiles saver has the method as hot/warm.
+  public static native boolean presentInProfile(String profile, Method method);
+  // Ensures the profile saver does its usual processing.
+  public static native void ensureProfileProcessing();
+  public static native boolean isAotCompiled(Class<?> cls, String methodName);
+  public static native int getHotnessCounter(Class<?> cls, String methodName);
+
+  private static final String TEMP_FILE_NAME_PREFIX = "dummy";
+  private static final String TEMP_FILE_NAME_SUFFIX = "-file";
+
+  private static File createTempFile() throws Exception {
+    try {
+      return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX);
+    } catch (IOException e) {
+      System.setProperty("java.io.tmpdir", "/data/local/tmp");
+      try {
+        return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX);
+      } catch (IOException e2) {
+        System.setProperty("java.io.tmpdir", "/sdcard");
+        return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX);
+      }
+    }
+  }
+}
diff --git a/test/442-checker-constant-folding/src/Main.java b/test/442-checker-constant-folding/src/Main.java
index 3d92943..45157ab 100644
--- a/test/442-checker-constant-folding/src/Main.java
+++ b/test/442-checker-constant-folding/src/Main.java
@@ -864,6 +864,23 @@
     return lhs & rhs;
   }
 
+  /// CHECK-START: int Main.AndSelfNegated(int) constant_folding (before)
+  /// CHECK-DAG:     <<Arg:i\d+>>     ParameterValue
+  /// CHECK-DAG:     <<Not:i\d+>>     Not [<<Arg>>]
+  /// CHECK-DAG:     <<And:i\d+>>     And [<<Not>>,<<Arg>>]
+  /// CHECK-DAG:                      Return [<<And>>]
+
+  /// CHECK-START: int Main.AndSelfNegated(int) constant_folding (after)
+  /// CHECK-DAG:     <<Const0:i\d+>>  IntConstant 0
+  /// CHECK-DAG:                      Return [<<Const0>>]
+
+  /// CHECK-START: int Main.AndSelfNegated(int) constant_folding (after)
+  /// CHECK-NOT:                      And
+
+  public static int AndSelfNegated(int arg) {
+    return arg & ~arg;
+  }
+
 
   /**
    * Exercise constant folding on logical or.
diff --git a/test/461-get-reference-vreg/get_reference_vreg_jni.cc b/test/461-get-reference-vreg/get_reference_vreg_jni.cc
index 817a647..0636682 100644
--- a/test/461-get-reference-vreg/get_reference_vreg_jni.cc
+++ b/test/461-get-reference-vreg/get_reference_vreg_jni.cc
@@ -17,6 +17,7 @@
 #include "arch/context.h"
 #include "art_method-inl.h"
 #include "jni.h"
+#include "oat_quick_method_header.h"
 #include "scoped_thread_state_change-inl.h"
 #include "stack.h"
 #include "thread.h"
@@ -25,6 +26,29 @@
 
 namespace {
 
+bool IsFrameCompiledAndNonDebuggable(const art::StackVisitor* sv) {
+  return sv->GetCurrentShadowFrame() == nullptr &&
+         sv->GetCurrentOatQuickMethodHeader()->IsOptimized() &&
+         !Runtime::Current()->IsJavaDebuggable();
+}
+
+void CheckOptimizedOutRegLiveness(const art::StackVisitor* sv,
+                                  ArtMethod* m,
+                                  uint32_t dex_reg,
+                                  VRegKind vreg_kind,
+                                  bool check_val = false,
+                                  uint32_t expected = 0) REQUIRES_SHARED(Locks::mutator_lock_) {
+  uint32_t value = 0;
+  if (IsFrameCompiledAndNonDebuggable(sv)) {
+    CHECK_EQ(sv->GetVReg(m, dex_reg, vreg_kind, &value), false);
+  } else {
+    CHECK(sv->GetVReg(m, dex_reg, vreg_kind, &value));
+    if (check_val) {
+      CHECK_EQ(value, expected);
+    }
+  }
+}
+
 jint FindMethodIndex(jobject this_value_jobj) {
   ScopedObjectAccess soa(Thread::Current());
   std::unique_ptr<Context> context(Context::Create());
@@ -38,21 +62,22 @@
         if (m_name.compare("$noinline$testThisWithInstanceCall") == 0) {
           found_method_index = 1;
           uint32_t value = 0;
-          CHECK(stack_visitor->GetVReg(m, 1, kReferenceVReg, &value));
-          CHECK_EQ(reinterpret_cast<mirror::Object*>(value), this_value);
-          CHECK_EQ(stack_visitor->GetThisObject(), this_value);
+          if (IsFrameCompiledAndNonDebuggable(stack_visitor)) {
+            CheckOptimizedOutRegLiveness(stack_visitor, m, 1, kReferenceVReg);
+          } else {
+            CHECK(stack_visitor->GetVReg(m, 1, kReferenceVReg, &value));
+            CHECK_EQ(reinterpret_cast<mirror::Object*>(value), this_value);
+            CHECK_EQ(stack_visitor->GetThisObject(), this_value);
+          }
         } else if (m_name.compare("$noinline$testThisWithStaticCall") == 0) {
           found_method_index = 2;
-          uint32_t value = 0;
-          CHECK(stack_visitor->GetVReg(m, 1, kReferenceVReg, &value));
+          CheckOptimizedOutRegLiveness(stack_visitor, m, 1, kReferenceVReg);
         } else if (m_name.compare("$noinline$testParameter") == 0) {
           found_method_index = 3;
-          uint32_t value = 0;
-          CHECK(stack_visitor->GetVReg(m, 1, kReferenceVReg, &value));
+          CheckOptimizedOutRegLiveness(stack_visitor, m, 1, kReferenceVReg);
         } else if (m_name.compare("$noinline$testObjectInScope") == 0) {
           found_method_index = 4;
-          uint32_t value = 0;
-          CHECK(stack_visitor->GetVReg(m, 0, kReferenceVReg, &value));
+          CheckOptimizedOutRegLiveness(stack_visitor, m, 0, kReferenceVReg);
         }
 
         return true;
diff --git a/test/466-get-live-vreg/get_live_vreg_jni.cc b/test/466-get-live-vreg/get_live_vreg_jni.cc
index 905d8e6..b1fd6b5 100644
--- a/test/466-get-live-vreg/get_live_vreg_jni.cc
+++ b/test/466-get-live-vreg/get_live_vreg_jni.cc
@@ -40,9 +40,7 @@
       found_method_ = true;
       CHECK_EQ(CodeItemDataAccessor(m->DexInstructionData()).RegistersSize(), 3u);
       CheckOptimizedOutRegLiveness(m, 1, kIntVReg, true, 42);
-
-      uint32_t value;
-      CHECK(GetVReg(m, 2, kReferenceVReg, &value));
+      CheckOptimizedOutRegLiveness(m, 2, kReferenceVReg);
     } else if (m_name.compare("$noinline$testIntervalHole") == 0) {
       found_method_ = true;
       uint32_t number_of_dex_registers =
diff --git a/test/478-checker-clinit-check-pruning/src/Main.java b/test/478-checker-clinit-check-pruning/src/Main.java
index b1bc51e..5b9ebc8 100644
--- a/test/478-checker-clinit-check-pruning/src/Main.java
+++ b/test/478-checker-clinit-check-pruning/src/Main.java
@@ -225,6 +225,19 @@
   /// CHECK-DAG:                           ClinitCheck
   /// CHECK-DAG:                           InvokeStaticOrDirect
 
+  // The following checks ensure the clinit check and load class
+  // instructions added by the builder are pruned by the
+  // PrepareForRegisterAllocation.  As the control flow graph is not
+  // dumped after (nor before) this step, we check the CFG as it is
+  // before the next pass (liveness analysis) instead.
+
+  /// CHECK-START: void Main$ClassWithClinit4Instance.invokeStaticNotInlined() liveness (before)
+  /// CHECK:                               InvokeStaticOrDirect clinit_check:implicit
+
+  /// CHECK-START: void Main$ClassWithClinit4Instance.invokeStaticNotInlined() liveness (before)
+  /// CHECK-NOT:                           LoadClass
+  /// CHECK-NOT:                           ClinitCheck
+
   static class ClassWithClinit4Instance {
     void invokeStaticNotInlined() {
       // ClinitCheck required.
diff --git a/test/496-checker-inlining-class-loader/src/Main.java b/test/496-checker-inlining-class-loader/src/Main.java
index 5deb77f..4fe4723 100644
--- a/test/496-checker-inlining-class-loader/src/Main.java
+++ b/test/496-checker-inlining-class-loader/src/Main.java
@@ -107,13 +107,13 @@
                 /* Load and initialize FirstSeenByMyClassLoader */
   /// CHECK:      LoadClass class_name:FirstSeenByMyClassLoader gen_clinit_check:true
                 /* Load and initialize System */
-  // There may be MipsComputeBaseMethodAddress here.
+  // There may be HX86ComputeBaseMethodAddress here.
   /// CHECK:      LoadClass class_name:java.lang.System
   // The ClinitCheck may (PIC) or may not (non-PIC) be merged into the LoadClass.
   // (The merging checks for environment match but HLoadClass/kBootImageAddress
   // used for non-PIC mode does not have an environment at all.)
   /// CHECK:      StaticFieldGet
-  // There may be HX86ComputeBaseMethodAddress or MipsComputeBaseMethodAddress here.
+  // There may be HX86ComputeBaseMethodAddress here.
   /// CHECK:      LoadString
   /// CHECK-NEXT: NullCheck
   /// CHECK-NEXT: InvokeVirtual
diff --git a/test/521-checker-array-set-null/src/Main.java b/test/521-checker-array-set-null/src/Main.java
index 74bb73f..f166b92 100644
--- a/test/521-checker-array-set-null/src/Main.java
+++ b/test/521-checker-array-set-null/src/Main.java
@@ -22,19 +22,19 @@
   }
 
   /// CHECK-START: void Main.testWithNull(java.lang.Object[]) disassembly (after)
-  /// CHECK-NOT:      pAputObject
+  /// CHECK:          ArraySet needs_type_check:false
   public static void testWithNull(Object[] o) {
     o[0] = null;
   }
 
   /// CHECK-START: void Main.testWithUnknown(java.lang.Object[], java.lang.Object) disassembly (after)
-  /// CHECK:          pAputObject
+  /// CHECK:          ArraySet needs_type_check:true
   public static void testWithUnknown(Object[] o, Object obj) {
     o[0] = obj;
   }
 
   /// CHECK-START: void Main.testWithSame(java.lang.Object[]) disassembly (after)
-  /// CHECK-NOT:      pAputObject
+  /// CHECK:          ArraySet needs_type_check:false
   public static void testWithSame(Object[] o) {
     o[0] = o[1];
   }
diff --git a/test/530-checker-lse-simd/expected.txt b/test/530-checker-lse-simd/expected.txt
new file mode 100644
index 0000000..03bb57b
--- /dev/null
+++ b/test/530-checker-lse-simd/expected.txt
@@ -0,0 +1,7 @@
+1
+3
+1
+2
+2
+1
+1
diff --git a/test/530-checker-lse-simd/info.txt b/test/530-checker-lse-simd/info.txt
new file mode 100644
index 0000000..61b4741
--- /dev/null
+++ b/test/530-checker-lse-simd/info.txt
@@ -0,0 +1 @@
+Checker test for testing load-store elimination in presence of VecLoad and VecStore.
diff --git a/test/530-checker-lse-simd/src/Main.java b/test/530-checker-lse-simd/src/Main.java
new file mode 100644
index 0000000..12dd297
--- /dev/null
+++ b/test/530-checker-lse-simd/src/Main.java
@@ -0,0 +1,267 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  // Based on Linpack.matgen
+  // Load-store elimination did not work when a function had SIMD code.
+  // In the test below loop B is vectorized.
+  // Check that a redundant ArrayGet is eliminated in loop A.
+
+  /// CHECK-START: double Main.$noinline$vecgen(double[], double[], int) load_store_elimination (before)
+  /// CHECK:      Rem
+  /// CHECK-NEXT: TypeConversion
+  /// CHECK-NEXT: Sub
+  /// CHECK-NEXT: Mul
+  /// CHECK-NEXT: ArraySet
+  /// CHECK-NEXT: ArrayGet
+  /// CHECK-NEXT: LessThanOrEqual
+  /// CHECK-NEXT: Select
+  /// CHECK-NEXT: Add
+  /// CHECK-NEXT: Goto loop:{{B\d+}}
+
+  /// CHECK-START: double Main.$noinline$vecgen(double[], double[], int) load_store_elimination (after)
+  /// CHECK:      Rem
+  /// CHECK-NEXT: TypeConversion
+  /// CHECK-NEXT: Sub
+  /// CHECK-NEXT: Mul
+  /// CHECK-NEXT: ArraySet
+  /// CHECK-NEXT: LessThanOrEqual
+  /// CHECK-NEXT: Select
+  /// CHECK-NEXT: Add
+  /// CHECK-NEXT: Goto loop:{{B\d+}}
+  static double $noinline$vecgen(double a[], double b[], int n) {
+    double norma = 0.0;
+    int init = 1325;
+    // Loop A
+    for (int i = 0; i < n; ++i) {
+      init = 3125*init % 65536;
+      a[i] = (init - 32768.0)/16384.0;
+      norma = (a[i] > norma) ? a[i] : norma; // ArrayGet should be removed by LSE.
+    }
+
+    // Loop B
+    for (int i = 0; i < n; ++i) {
+      b[i] += a[i];
+    }
+
+    return norma;
+  }
+
+
+  static void test01() {
+    double a[] = new double[1024];
+    double norma = $noinline$vecgen(a, a, a.length);
+    System.out.println((int)norma);
+    System.out.println((int)a[1023]);
+  }
+
+  // Check LSE works when a function has SIMD code.
+  //
+  /// CHECK-START: double Main.$noinline$test02(double[], int) load_store_elimination (before)
+  /// CHECK:      BoundsCheck loop:none
+  /// CHECK-NEXT: ArrayGet
+  /// CHECK-NEXT: Mul
+  /// CHECK-NEXT: ArraySet
+  /// CHECK-NEXT: ArrayGet
+  /// CHECK-NEXT: ArrayLength
+  /// CHECK-NEXT: BelowOrEqual
+  //
+  /// CHECK:      ArrayGet loop:none
+  /// CHECK-NEXT: Return
+
+  /// CHECK-START: double Main.$noinline$test02(double[], int) load_store_elimination (after)
+  /// CHECK:      BoundsCheck loop:none
+  /// CHECK-NEXT: ArrayGet
+  /// CHECK-NEXT: Mul
+  /// CHECK-NEXT: ArraySet
+  /// CHECK-NEXT: ArrayLength
+  /// CHECK-NEXT: BelowOrEqual
+  //
+  /// CHECK:      ArrayGet loop:none
+  /// CHECK-NEXT: Return
+  static double $noinline$test02(double a[], int n) {
+    double b[] = new double[n];
+    a[0] = a[0] / 2;
+
+    double norma = a[0]; // ArrayGet should be removed by LSE.
+
+    // The following loop is vectorized.
+    for (int i = 0; i < 128; ++i) {
+      b[i] += a[i];
+    }
+
+    norma = a[0];
+    return norma;
+  }
+
+  static void test02() {
+    double a[] = new double[128];
+    java.util.Arrays.fill(a, 2.0);
+    double norma = $noinline$test02(a, a.length);
+    System.out.println((int)norma);
+  }
+
+  // Check LSE works when a function has SIMD code.
+  //
+  /// CHECK-START: double Main.$noinline$test03(int) load_store_elimination (before)
+  /// CHECK:      ArrayGet loop:none
+  /// CHECK-NEXT: Return
+
+  /// CHECK-START: double Main.$noinline$test03(int) load_store_elimination (after)
+  /// CHECK-NOT:  ArrayGet loop:none
+  static double $noinline$test03(int n) {
+    double a[] = new double[n];
+    double b[] = new double[n];
+
+    a[0] = 2.0;
+
+    // The following loop is vectorized.
+    for (int i = 0; i < 128; ++i) {
+      b[i] += a[i];
+    }
+
+    a[0] = 2.0;
+    return a[0]; // ArrayGet should be removed by LSE.
+  }
+
+  static void test03() {
+    double norma = $noinline$test03(128);
+    System.out.println((int)norma);
+  }
+
+  // Check LSE eliminates VecLoad.
+  //
+  /// CHECK-START-ARM64: double[] Main.$noinline$test04(int) load_store_elimination (before)
+  /// CHECK:             VecStore
+  /// CHECK-NEXT:        VecLoad
+  /// CHECK-NEXT:        VecAdd
+  /// CHECK-NEXT:        VecStore
+  /// CHECK-NEXT:        Add
+  /// CHECK-NEXT:        Goto loop:{{B\d+}}
+
+  /// CHECK-START-ARM64: double[] Main.$noinline$test04(int) load_store_elimination (after)
+  /// CHECK:             VecStore
+  /// CHECK-NEXT:        VecAdd
+  /// CHECK-NEXT:        VecStore
+  /// CHECK-NEXT:        Add
+  /// CHECK-NEXT:        Goto loop:{{B\d+}}
+  static double[] $noinline$test04(int n) {
+    double a[] = new double[n];
+    double b[] = new double[n];
+
+    // The following loop is vectorized.
+    for (int i = 0; i < n; ++i) {
+      a[i] = 1;
+      b[i] = a[i] + a[i]; // VecLoad should be removed by LSE.
+    }
+
+    return b;
+  }
+
+  static void test04() {
+    double norma = $noinline$test04(128)[0];
+    System.out.println((int)norma);
+  }
+
+  // Check LSE eliminates VecLoad.
+  //
+  /// CHECK-START-ARM64: double[] Main.$noinline$test05(int) load_store_elimination (before)
+  /// CHECK:             VecStore
+  /// CHECK-NEXT:        VecLoad
+  /// CHECK-NEXT:        VecStore
+  /// CHECK-NEXT:        VecStore
+  /// CHECK-NEXT:        Add
+  /// CHECK-NEXT:        Goto loop:{{B\d+}}
+
+  /// CHECK-START-ARM64: double[] Main.$noinline$test05(int) load_store_elimination (after)
+  /// CHECK:             VecStore
+  /// CHECK-NEXT:        VecStore
+  /// CHECK-NEXT:        Add
+  /// CHECK-NEXT:        Goto loop:{{B\d+}}
+  static double[] $noinline$test05(int n) {
+    double a[] = new double[n];
+    double b[] = new double[n];
+
+    // The following loop is vectorized.
+    for (int i = 0; i < n; ++i) {
+      a[i] = 1;
+      b[i] = a[i];
+      a[i] = 1;
+    }
+
+    return b;
+  }
+
+  static void test05() {
+    double norma = $noinline$test05(128)[0];
+    System.out.println((int)norma);
+  }
+
+  // Check LSE eliminates VecLoad and ArrayGet in case of singletons and default values.
+  //
+  /// CHECK-START-ARM64: double[] Main.$noinline$test06(int) load_store_elimination (before)
+  /// CHECK:             BoundsCheck loop:none
+  /// CHECK-NEXT:        ArrayGet
+  /// CHECK-NEXT:        Add
+  /// CHECK-NEXT:        ArrayLength
+  //
+  /// CHECK:             VecLoad loop:{{B\d+}}
+  /// CHECK-NEXT:        VecStore
+  /// CHECK-NEXT:        VecLoad
+  /// CHECK-NEXT:        VecLoad
+  /// CHECK-NEXT:        VecAdd
+  /// CHECK-NEXT:        VecAdd
+  /// CHECK-NEXT:        VecStore
+
+  /// CHECK-START-ARM64: double[] Main.$noinline$test06(int) load_store_elimination (after)
+  /// CHECK:             BoundsCheck loop:none
+  /// CHECK-NEXT:        Add
+  /// CHECK-NEXT:        ArrayLength
+  //
+  /// CHECK:             VecLoad loop:{{B\d+}}
+  /// CHECK-NEXT:        VecStore
+  /// CHECK-NEXT:        VecAdd
+  /// CHECK-NEXT:        VecAdd
+  /// CHECK-NEXT:        VecStore
+  static double[] $noinline$test06(int n) {
+    double a[] = new double[n];
+    double b[] = new double[n];
+
+    double r = a[0] + 1.0; // ArrayGet:a[0] is eliminated and default 0.0 is used.
+    // The following loop is vectorized.
+    for (int i = 0; i < n; ++i) {
+      b[i] = a[i]; // VecLoad:a[i] is not eliminated.
+      b[i] += a[i] + r; // VecLoad:a[i] and VecLoad:b[i] are eliminated.
+    }
+
+    return b;
+  }
+
+  static void test06() {
+    double norma = $noinline$test06(128)[0];
+    System.out.println((int)norma);
+  }
+
+  public static void main(String[] args) {
+    test01();
+    test02();
+    test03();
+    test04();
+    test05();
+    test06();
+  }
+}
+
diff --git a/test/536-checker-intrinsic-optimization/smali/SmaliTests.smali b/test/536-checker-intrinsic-optimization/smali/SmaliTests.smali
index ffb1853..87aca6f 100644
--- a/test/536-checker-intrinsic-optimization/smali/SmaliTests.smali
+++ b/test/536-checker-intrinsic-optimization/smali/SmaliTests.smali
@@ -15,11 +15,11 @@
 .class public LSmaliTests;
 .super Ljava/lang/Object;
 
-## CHECK-START: char SmaliTests.stringCharAtCatch(java.lang.String, int) instruction_simplifier (before)
+## CHECK-START: char SmaliTests.$noinline$stringCharAtCatch(java.lang.String, int) instruction_simplifier (before)
 ## CHECK-DAG:  <<Char:c\d+>>     InvokeVirtual intrinsic:StringCharAt
 ## CHECK-DAG:                    Return [<<Char>>]
 
-## CHECK-START: char SmaliTests.stringCharAtCatch(java.lang.String, int) instruction_simplifier (after)
+## CHECK-START: char SmaliTests.$noinline$stringCharAtCatch(java.lang.String, int) instruction_simplifier (after)
 ## CHECK-DAG:  <<String:l\d+>>   ParameterValue
 ## CHECK-DAG:  <<Pos:i\d+>>      ParameterValue
 ## CHECK-DAG:  <<NullCk:l\d+>>   NullCheck [<<String>>]
@@ -28,23 +28,15 @@
 ## CHECK-DAG:  <<Char:c\d+>>     ArrayGet [<<NullCk>>,<<Bounds>>] is_string_char_at:true
 ## CHECK-DAG:                    Return [<<Char>>]
 
-## CHECK-START: char SmaliTests.stringCharAtCatch(java.lang.String, int) instruction_simplifier (after)
+## CHECK-START: char SmaliTests.$noinline$stringCharAtCatch(java.lang.String, int) instruction_simplifier (after)
 ## CHECK-NOT:                    InvokeVirtual intrinsic:StringCharAt
-.method public static stringCharAtCatch(Ljava/lang/String;I)C
+.method public static $noinline$stringCharAtCatch(Ljava/lang/String;I)C
     .registers 4
     .param p0, "s"    # Ljava/lang/String;
     .param p1, "pos"    # I
 
     .prologue
 
-    # if (doThrow) { throw new Error(); }
-    sget-boolean v1, LMain;->doThrow:Z
-    if-eqz v1, :doThrow_false
-    new-instance v1, Ljava/lang/Error;
-    invoke-direct {v1}, Ljava/lang/Error;-><init>()V
-    throw v1
-
-  :doThrow_false
   :try_start
     # tmp = s.charAt(pos)
     invoke-virtual {p0, p1}, Ljava/lang/String;->charAt(I)C
diff --git a/test/536-checker-intrinsic-optimization/src/Main.java b/test/536-checker-intrinsic-optimization/src/Main.java
index 83a89a6..980df70 100644
--- a/test/536-checker-intrinsic-optimization/src/Main.java
+++ b/test/536-checker-intrinsic-optimization/src/Main.java
@@ -17,7 +17,20 @@
 import java.lang.reflect.Method;
 
 public class Main {
-  public static boolean doThrow = false;
+  public static String smallString = generateString(100);
+  public static String mediumString = generateString(300);
+  public static String largeString = generateString(2000);
+
+  public static String generateString(int length) {
+    // Generate a string in the ASCII range that will
+    // use string compression.
+    StringBuilder sb = new StringBuilder();
+    for (int i = 0; i < length; i++) {
+      // Generate repeating alphabet.
+      sb.append(Character.valueOf((char)('a' + (i % 26))));
+    }
+    return sb.toString();
+  }
 
   public static void assertIntEquals(int expected, int result) {
     if (expected != result) {
@@ -37,6 +50,12 @@
     }
   }
 
+  public static void assertStringEquals(String expected, String result) {
+    if (!expected.equals(result)) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
   public static void assertStringContains(String searchTerm, String result) {
     if (result == null || !result.contains(searchTerm)) {
       throw new Error("Search term: " + searchTerm + ", not found in: " + result);
@@ -61,6 +80,30 @@
     assertCharEquals('c', $opt$noinline$stringCharAt("abc", 2));
     assertCharEquals('7', $opt$noinline$stringCharAt("0123456789", 7));
 
+    // Single character.
+    assertStringEquals("a", stringGetCharsAndBack("a"));
+    // Strings < 8 characters.
+    assertStringEquals("foobar", stringGetCharsAndBack("foobar"));
+    // Strings > 8 characters of various lengths.
+    assertStringEquals(smallString, stringGetCharsAndBack(smallString));
+    assertStringEquals(mediumString, stringGetCharsAndBack(mediumString));
+    assertStringEquals(largeString, stringGetCharsAndBack(largeString));
+
+    // Get only a substring:
+    // Substring < 8 characters.
+    assertStringEquals(smallString.substring(5, 10), stringGetCharsRange(smallString, 5, 10, 0));
+    // Substring > 8 characters.
+    assertStringEquals(smallString.substring(7, 28), stringGetCharsRange(smallString, 7, 28, 0));
+
+    // Get full string with offset in the char array.
+    assertStringEquals(smallString, stringGetCharsAndBackOffset(smallString, 17));
+
+    // Get a substring with an offset in the char array.
+    // Substring < 8 characters.
+    assertStringEquals(smallString.substring(5, 10), stringGetCharsRange(smallString, 5, 10, 17));
+    // Substring > 8 characters.
+    assertStringEquals(smallString.substring(7, 28), stringGetCharsRange(smallString, 7, 28, 17));
+
     try {
       $opt$noinline$stringCharAt("abc", -1);
       throw new Error("Should throw SIOOB.");
@@ -84,9 +127,9 @@
     }
 
     assertCharEquals('7', $opt$noinline$stringCharAtCatch("0123456789", 7));
-    assertCharEquals('7', $noinline$runSmaliTest("stringCharAtCatch", "0123456789", 7));
+    assertCharEquals('7', $noinline$runSmaliTest("$noinline$stringCharAtCatch", "0123456789", 7));
     assertCharEquals('\0', $opt$noinline$stringCharAtCatch("0123456789", 10));
-    assertCharEquals('\0', $noinline$runSmaliTest("stringCharAtCatch","0123456789", 10));
+    assertCharEquals('\0', $noinline$runSmaliTest("$noinline$stringCharAtCatch","0123456789", 10));
 
     assertIntEquals('a' + 'b' + 'c', $opt$noinline$stringSumChars("abc"));
     assertIntEquals('a' + 'b' + 'c', $opt$noinline$stringSumLeadingChars("abcdef", 3));
@@ -123,7 +166,6 @@
   /// CHECK-NOT:                    InvokeVirtual intrinsic:StringLength
 
   static public int $opt$noinline$getStringLength(String s) {
-    if (doThrow) { throw new Error(); }
     return s.length();
   }
 
@@ -143,7 +185,6 @@
   /// CHECK-NOT:                    InvokeVirtual intrinsic:StringIsEmpty
 
   static public boolean $opt$noinline$isStringEmpty(String s) {
-    if (doThrow) { throw new Error(); }
     return s.isEmpty();
   }
 
@@ -164,7 +205,6 @@
   /// CHECK-NOT:                    InvokeVirtual intrinsic:StringCharAt
 
   static public char $opt$noinline$stringCharAt(String s, int pos) {
-    if (doThrow) { throw new Error(); }
     return s.charAt(pos);
   }
 
@@ -191,7 +231,6 @@
   /// CHECK-NOT:                    InvokeVirtual intrinsic:StringCharAt
 
   static public char $opt$noinline$stringCharAtCatch(String s, int pos) {
-    if (doThrow) { throw new Error(); }
     try {
       return s.charAt(pos);
     } catch (StringIndexOutOfBoundsException ignored) {
@@ -221,7 +260,6 @@
   /// CHECK-NOT:                    BoundsCheck
 
   static public int $opt$noinline$stringSumChars(String s) {
-    if (doThrow) { throw new Error(); }
     int sum = 0;
     int len = s.length();
     for (int i = 0; i < len; ++i) {
@@ -248,7 +286,6 @@
   /// CHECK-NOT:                    BoundsCheck is_string_char_at:true
 
   static public int $opt$noinline$stringSumLeadingChars(String s, int n) {
-    if (doThrow) { throw new Error(); }
     int sum = 0;
     for (int i = 0; i < n; ++i) {
       sum += s.charAt(i);
@@ -286,7 +323,6 @@
   /// CHECK-NOT:                    BoundsCheck is_string_char_at:true
 
   static public int $opt$noinline$stringSum4LeadingChars(String s) {
-    if (doThrow) { throw new Error(); }
     int sum = s.charAt(0) + s.charAt(1) + s.charAt(2) + s.charAt(3);
     return sum;
   }
@@ -339,21 +375,6 @@
   // Terminate the scope for the CHECK-NOT search at the reference or length comparison,
   // whichever comes first.
   /// CHECK:          cmp {{w.*,}} {{w.*|#.*}}
-
-  /// CHECK-START-MIPS: boolean Main.stringArgumentNotNull(java.lang.Object) disassembly (after)
-  /// CHECK:          InvokeVirtual {{.*\.equals.*}} intrinsic:StringEquals
-  /// CHECK-NOT:      beq zero,
-  /// CHECK-NOT:      beqz
-  /// CHECK-NOT:      beqzc
-  // Terminate the scope for the CHECK-NOT search at the class field or length comparison,
-  // whichever comes first.
-  /// CHECK:          lw
-
-  /// CHECK-START-MIPS64: boolean Main.stringArgumentNotNull(java.lang.Object) disassembly (after)
-  /// CHECK:          InvokeVirtual {{.*\.equals.*}} intrinsic:StringEquals
-  /// CHECK-NOT:      beqzc
-  // Terminate the scope for the CHECK-NOT search at the reference comparison.
-  /// CHECK:          beqc
   public static boolean stringArgumentNotNull(Object obj) {
     obj.getClass();
     return "foo".equals(obj);
@@ -408,22 +429,6 @@
   /// CHECK-NOT:      ldr {{w\d+}}, [{{x\d+}}]
   /// CHECK-NOT:      ldr {{w\d+}}, [{{x\d+}}, #0]
   /// CHECK:          cmp {{w\d+}}, {{w\d+|#.*}}
-
-  // Test is brittle as it depends on the class offset being 0.
-  /// CHECK-START-MIPS: boolean Main.stringArgumentIsString() disassembly (after)
-  /// CHECK:          InvokeVirtual intrinsic:StringEquals
-  /// CHECK:          beq{{(zc)?}}
-  // Check that we don't try to compare the classes.
-  /// CHECK-NOT:      lw {{r\d+}}, +0({{r\d+}})
-  /// CHECK:          bne{{c?}}
-
-  // Test is brittle as it depends on the class offset being 0.
-  /// CHECK-START-MIPS64: boolean Main.stringArgumentIsString() disassembly (after)
-  /// CHECK:          InvokeVirtual intrinsic:StringEquals
-  /// CHECK:          beqzc
-  // Check that we don't try to compare the classes.
-  /// CHECK-NOT:      lw {{r\d+}}, +0({{r\d+}})
-  /// CHECK:          bnec
   public static boolean stringArgumentIsString() {
     return "foo".equals(myString);
   }
@@ -440,4 +445,22 @@
       throw new Error(ex);
     }
   }
+
+  public static String stringGetCharsAndBack(String src) {
+    char[] dst = new char[src.length()];
+    src.getChars(0, src.length(), dst, 0);
+    return new String(dst);
+  }
+
+  public static String stringGetCharsAndBackOffset(String src, int offset) {
+    char[] dst = new char[src.length() + offset];
+    src.getChars(0, src.length(), dst, offset);
+    return new String(dst, offset, src.length());
+  }
+
+  public static String stringGetCharsRange(String src, int srcBegin, int srcEnd, int offset) {
+    char[] dst = new char[srcEnd - srcBegin + offset];
+    src.getChars(srcBegin, srcEnd, dst, offset);
+    return new String(dst, offset, srcEnd - srcBegin);
+  }
 }
diff --git a/test/543-env-long-ref/env_long_ref.cc b/test/543-env-long-ref/env_long_ref.cc
index 1885f8d..1c30d46 100644
--- a/test/543-env-long-ref/env_long_ref.cc
+++ b/test/543-env-long-ref/env_long_ref.cc
@@ -34,6 +34,11 @@
 
         if (m_name == "testCase") {
           found = true;
+          // For optimized non-debuggable code do not expect dex register info to be present.
+          if (stack_visitor->GetCurrentShadowFrame() == nullptr &&
+              !Runtime::Current()->IsAsyncDeoptimizeable(stack_visitor->GetCurrentQuickFramePc())) {
+            return true;
+          }
           uint32_t stack_value = 0;
           CHECK(stack_visitor->GetVReg(m, 1, kReferenceVReg, &stack_value));
           CHECK_EQ(reinterpret_cast<mirror::Object*>(stack_value),
diff --git a/test/550-checker-multiply-accumulate/src/Main.java b/test/550-checker-multiply-accumulate/src/Main.java
index b76efea..55dd5b5 100644
--- a/test/550-checker-multiply-accumulate/src/Main.java
+++ b/test/550-checker-multiply-accumulate/src/Main.java
@@ -424,16 +424,16 @@
     return - (left * right);
   }
 
-  /// CHECK-START-{ARM64,MIPS64}: void Main.SimdMulAdd(int[], int[]) instruction_simplifier$after_bce (before)
+  /// CHECK-START-ARM64: void Main.SimdMulAdd(int[], int[]) instruction_simplifier$after_bce (before)
   /// CHECK-DAG:     Phi                            loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG:     VecMul                         loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:     VecAdd                         loop:<<Loop>>      outer_loop:none
 
-  /// CHECK-START-{ARM64,MIPS64}: void Main.SimdMulAdd(int[], int[]) instruction_simplifier$after_bce (after)
+  /// CHECK-START-ARM64: void Main.SimdMulAdd(int[], int[]) instruction_simplifier$after_bce (after)
   /// CHECK-DAG:     Phi                            loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG:     VecMultiplyAccumulate kind:Add loop:<<Loop>>      outer_loop:none
 
-  /// CHECK-START-{ARM64,MIPS64}: void Main.SimdMulAdd(int[], int[]) instruction_simplifier$after_bce (after)
+  /// CHECK-START-ARM64: void Main.SimdMulAdd(int[], int[]) instruction_simplifier$after_bce (after)
   /// CHECK-NOT:     VecMul
   /// CHECK-NOT:     VecAdd
 
@@ -443,34 +443,22 @@
     }
   }
 
-  /// CHECK-START-MIPS64: void Main.SimdMulAddLong(long[], long[]) instruction_simplifier$after_bce (before)
-  /// CHECK-DAG:     Phi                            loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG:     VecMul                         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:     VecAdd                         loop:<<Loop>>      outer_loop:none
-
-  /// CHECK-START-MIPS64: void Main.SimdMulAddLong(long[], long[]) instruction_simplifier$after_bce (after)
-  /// CHECK-DAG:     Phi                            loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG:     VecMultiplyAccumulate kind:Add loop:<<Loop>>      outer_loop:none
-
-  /// CHECK-START-MIPS64: void Main.SimdMulAddLong(long[], long[]) instruction_simplifier$after_bce (after)
-  /// CHECK-NOT:     VecMul
-  /// CHECK-NOT:     VecAdd
   public static void SimdMulAddLong(long[] array1, long[] array2) {
     for (int j = 0; j < 100; j++) {
       array2[j] += 12345 * array1[j];
     }
   }
 
-  /// CHECK-START-{ARM64,MIPS64}: void Main.SimdMulSub(int[], int[]) instruction_simplifier$after_bce (before)
+  /// CHECK-START-ARM64: void Main.SimdMulSub(int[], int[]) instruction_simplifier$after_bce (before)
   /// CHECK-DAG:     Phi                            loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG:     VecMul                         loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:     VecSub                         loop:<<Loop>>      outer_loop:none
 
-  /// CHECK-START-{ARM64,MIPS64}: void Main.SimdMulSub(int[], int[]) instruction_simplifier$after_bce (after)
+  /// CHECK-START-ARM64: void Main.SimdMulSub(int[], int[]) instruction_simplifier$after_bce (after)
   /// CHECK-DAG:     Phi                            loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG:     VecMultiplyAccumulate kind:Sub loop:<<Loop>>      outer_loop:none
 
-  /// CHECK-START-{ARM64,MIPS64}: void Main.SimdMulSub(int[], int[]) instruction_simplifier$after_bce (after)
+  /// CHECK-START-ARM64: void Main.SimdMulSub(int[], int[]) instruction_simplifier$after_bce (after)
   /// CHECK-NOT:     VecMul
   /// CHECK-NOT:     VecSub
 
@@ -480,30 +468,18 @@
     }
   }
 
-  /// CHECK-START-MIPS64: void Main.SimdMulSubLong(long[], long[]) instruction_simplifier$after_bce (before)
-  /// CHECK-DAG:     Phi                            loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG:     VecMul                         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:     VecSub                         loop:<<Loop>>      outer_loop:none
-
-  /// CHECK-START-MIPS64: void Main.SimdMulSubLong(long[], long[]) instruction_simplifier$after_bce (after)
-  /// CHECK-DAG:     Phi                            loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG:     VecMultiplyAccumulate kind:Sub loop:<<Loop>>      outer_loop:none
-
-  /// CHECK-START-MIPS64: void Main.SimdMulSubLong(long[], long[]) instruction_simplifier$after_bce (after)
-  /// CHECK-NOT:     VecMul
-  /// CHECK-NOT:     VecSub
   public static void SimdMulSubLong(long[] array1, long[] array2) {
     for (int j = 0; j < 100; j++) {
       array2[j] -= 12345 * array1[j];
     }
   }
 
-  /// CHECK-START-{ARM64,MIPS64}: void Main.SimdMulMultipleUses(int[], int[]) instruction_simplifier$after_bce (before)
+  /// CHECK-START-ARM64: void Main.SimdMulMultipleUses(int[], int[]) instruction_simplifier$after_bce (before)
   /// CHECK-DAG:     Phi                            loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG:     VecMul                         loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:     VecSub                         loop:<<Loop>>      outer_loop:none
 
-  /// CHECK-START-{ARM64,MIPS64}: void Main.SimdMulMultipleUses(int[], int[]) instruction_simplifier$after_bce (after)
+  /// CHECK-START-ARM64: void Main.SimdMulMultipleUses(int[], int[]) instruction_simplifier$after_bce (after)
   /// CHECK-NOT: VecMultiplyAccumulate
 
   public static void SimdMulMultipleUses(int[] array1, int[] array2) {
@@ -514,13 +490,6 @@
     }
   }
 
-  /// CHECK-START-MIPS64: void Main.SimdMulMultipleUsesLong(long[], long[]) instruction_simplifier$after_bce (before)
-  /// CHECK-DAG:     Phi                            loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG:     VecMul                         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:     VecSub                         loop:<<Loop>>      outer_loop:none
-
-  /// CHECK-START-MIPS64: void Main.SimdMulMultipleUsesLong(long[], long[]) instruction_simplifier$after_bce (after)
-  /// CHECK-NOT: VecMultiplyAccumulate
   public static void SimdMulMultipleUsesLong(long[] array1, long[] array2) {
     for (int j = 0; j < 100; j++) {
        long temp = 12345 * array1[j];
diff --git a/test/552-checker-sharpening/src/Main.java b/test/552-checker-sharpening/src/Main.java
index 0bceffd..657cc93 100644
--- a/test/552-checker-sharpening/src/Main.java
+++ b/test/552-checker-sharpening/src/Main.java
@@ -41,7 +41,7 @@
     return x;
   }
 
-  /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: int Main.testSimple(int) builder (after)
+  /// CHECK-START-{ARM,ARM64,X86,X86_64}: int Main.testSimple(int) builder (after)
   /// CHECK:                InvokeStaticOrDirect method_load_kind:BssEntry
 
   /// CHECK-START-X86: int Main.testSimple(int) pc_relative_fixups_x86 (before)
@@ -56,7 +56,7 @@
     return $noinline$foo(x);
   }
 
-  /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: int Main.testDiamond(boolean, int) builder (after)
+  /// CHECK-START-{ARM,ARM64,X86,X86_64}: int Main.testDiamond(boolean, int) builder (after)
   /// CHECK:                InvokeStaticOrDirect method_load_kind:BssEntry
   /// CHECK:                InvokeStaticOrDirect method_load_kind:BssEntry
 
@@ -73,7 +73,7 @@
 
   public static int testDiamond(boolean negate, int x) {
     // These calls should use PC-relative loads to retrieve the target method.
-    // PC-relative bases used by MIPS32R2 and X86 should be pulled before the If.
+    // PC-relative bases used by X86 should be pulled before the If.
     if (negate) {
       return $noinline$foo(-x);
     } else {
@@ -100,7 +100,7 @@
   /// CHECK:                InvokeStaticOrDirect method_load_kind:BssEntry
 
   public static int testLoop(int[] array, int x) {
-    // PC-relative bases used by MIPS32R2 and X86 should be pulled before the loop.
+    // PC-relative bases used by X86 should be pulled before the loop.
     for (int i : array) {
       x += $noinline$foo(i);
     }
@@ -118,8 +118,7 @@
   /// CHECK-NEXT:           Goto
 
   public static int testLoopWithDiamond(int[] array, boolean negate, int x) {
-    // PC-relative bases used by MIPS32R2 and X86 should be pulled before the loop
-    // but not outside the if.
+    // PC-relative bases used by X86 should be pulled before the loop but not outside the if.
     if (array != null) {
       for (int i : array) {
         if (negate) {
@@ -132,7 +131,7 @@
     return x;
   }
 
-  /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: java.lang.String Main.$noinline$getBootImageString() builder (after)
+  /// CHECK-START-{ARM,ARM64,X86,X86_64}: java.lang.String Main.$noinline$getBootImageString() builder (after)
   /// CHECK:                LoadString load_kind:BootImageRelRo
 
   public static String $noinline$getBootImageString() {
@@ -142,7 +141,7 @@
     return "";
   }
 
-  /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: java.lang.String Main.$noinline$getNonBootImageString() builder (after)
+  /// CHECK-START-{ARM,ARM64,X86,X86_64}: java.lang.String Main.$noinline$getNonBootImageString() builder (after)
   /// CHECK:                LoadString load_kind:BssEntry
 
   /// CHECK-START-X86: java.lang.String Main.$noinline$getNonBootImageString() pc_relative_fixups_x86 (before)
@@ -159,7 +158,7 @@
     return "non-boot-image-string";
   }
 
-  /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: java.lang.Class Main.$noinline$getStringClass() builder (after)
+  /// CHECK-START-{ARM,ARM64,X86,X86_64}: java.lang.Class Main.$noinline$getStringClass() builder (after)
   /// CHECK:                LoadClass load_kind:BootImageRelRo class_name:java.lang.String
 
   public static Class<?> $noinline$getStringClass() {
@@ -169,7 +168,7 @@
     return String.class;
   }
 
-  /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: java.lang.Class Main.$noinline$getOtherClass() builder (after)
+  /// CHECK-START-{ARM,ARM64,X86,X86_64}: java.lang.Class Main.$noinline$getOtherClass() builder (after)
   /// CHECK:                LoadClass load_kind:BssEntry class_name:Other
 
   /// CHECK-START-X86: java.lang.Class Main.$noinline$getOtherClass() pc_relative_fixups_x86 (before)
@@ -186,13 +185,13 @@
     return Other.class;
   }
 
-  /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: java.lang.String Main.$noinline$toHexString(int) builder (after)
+  /// CHECK-START-{ARM,ARM64,X86,X86_64}: java.lang.String Main.$noinline$toHexString(int) builder (after)
   /// CHECK:                InvokeStaticOrDirect method_load_kind:BootImageRelRo
   public static String $noinline$toHexString(int value) {
     return Integer.toString(value, 16);
   }
 
-  /// CHECK-START-{ARM,ARM64,MIPS,MIPS64,X86,X86_64}: java.lang.String Main.$noinline$toHexStringIndirect(int) builder (after)
+  /// CHECK-START-{ARM,ARM64,X86,X86_64}: java.lang.String Main.$noinline$toHexStringIndirect(int) builder (after)
   /// CHECK:                InvokeStaticOrDirect method_load_kind:BssEntry
 
   /// CHECK-START-X86: java.lang.String Main.$noinline$toHexStringIndirect(int) pc_relative_fixups_x86 (before)
diff --git a/test/563-checker-fakestring/src/Main.java b/test/563-checker-fakestring/src/Main.java
index 77a108f..1ca80f0 100644
--- a/test/563-checker-fakestring/src/Main.java
+++ b/test/563-checker-fakestring/src/Main.java
@@ -22,6 +22,7 @@
   class Inner {}
 
   public static native void assertIsInterpreted();
+  public static native void ensureJitCompiled(Class<?> cls, String methodName);
 
   public static void assertEqual(String expected, String actual) {
     if (!expected.equals(actual)) {
@@ -53,6 +54,8 @@
     }
 
     {
+      // If the JIT is enabled, ensure it has compiled the method to force the deopt.
+      ensureJitCompiled(c, "deoptimizeNewInstance");
       Method m = c.getMethod("deoptimizeNewInstance", int[].class, byte[].class);
       try {
         m.invoke(null, new Object[] { new int[] { 1, 2, 3 }, testData });
@@ -114,6 +117,8 @@
       assertEqual(testString, result);
     }
     {
+      // If the JIT is enabled, ensure it has compiled the method to force the deopt.
+      ensureJitCompiled(c, "deoptimizeNewInstanceAfterLoop");
       Method m = c.getMethod(
           "deoptimizeNewInstanceAfterLoop", int[].class, byte[].class, int.class);
       try {
diff --git a/test/565-checker-condition-liveness/src/Main.java b/test/565-checker-condition-liveness/src/Main.java
index 25ec3f5..17a8613 100644
--- a/test/565-checker-condition-liveness/src/Main.java
+++ b/test/565-checker-condition-liveness/src/Main.java
@@ -55,10 +55,10 @@
 
   // X86 and X86_64 generate at use site the ArrayLength, meaning only the BoundsCheck will have environment uses.
   /// CHECK-START-{X86,X86_64}: void Main.testThrowIntoCatchBlock(int, java.lang.Object, int[]) liveness (after)
-  /// CHECK-DAG:  <<IntArg:i\d+>>   ParameterValue        env_uses:[25]
-  /// CHECK-DAG:  <<RefArg:l\d+>>   ParameterValue        env_uses:[11,25]
-  /// CHECK-DAG:  <<Array:l\d+>>    ParameterValue        env_uses:[11,25]
-  /// CHECK-DAG:  <<Const1:i\d+>>   IntConstant 1         env_uses:[25]
+  /// CHECK-DAG:  <<IntArg:i\d+>>   ParameterValue        env_uses:[25,25]
+  /// CHECK-DAG:  <<RefArg:l\d+>>   ParameterValue        env_uses:[11,25,25]
+  /// CHECK-DAG:  <<Array:l\d+>>    ParameterValue        env_uses:[11,25,25]
+  /// CHECK-DAG:  <<Const1:i\d+>>   IntConstant 1         env_uses:[25,25]
   /// CHECK-DAG:                    SuspendCheck          env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]]           liveness:10
   /// CHECK-DAG:                    NullCheck             env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]]  liveness:20
   /// CHECK-DAG:                    ArrayLength                                                               liveness:22
@@ -66,10 +66,10 @@
   /// CHECK-DAG:                    TryBoundary
 
   /// CHECK-START-{X86,X86_64}-DEBUGGABLE: void Main.testThrowIntoCatchBlock(int, java.lang.Object, int[]) liveness (after)
-  /// CHECK-DAG:  <<IntArg:i\d+>>   ParameterValue        env_uses:[11,25]
-  /// CHECK-DAG:  <<RefArg:l\d+>>   ParameterValue        env_uses:[11,25]
-  /// CHECK-DAG:  <<Array:l\d+>>    ParameterValue        env_uses:[11,25]
-  /// CHECK-DAG:  <<Const1:i\d+>>   IntConstant 1         env_uses:[25]
+  /// CHECK-DAG:  <<IntArg:i\d+>>   ParameterValue        env_uses:[11,25,25]
+  /// CHECK-DAG:  <<RefArg:l\d+>>   ParameterValue        env_uses:[11,25,25]
+  /// CHECK-DAG:  <<Array:l\d+>>    ParameterValue        env_uses:[11,25,25]
+  /// CHECK-DAG:  <<Const1:i\d+>>   IntConstant 1         env_uses:[25,25]
   /// CHECK-DAG:                    SuspendCheck          env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]]           liveness:10
   /// CHECK-DAG:                    NullCheck             env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]]  liveness:20
   /// CHECK-DAG:                    ArrayLength                                                               liveness:22
@@ -108,8 +108,8 @@
 
   /// CHECK-START-{X86,X86_64}: void Main.testBoundsCheck(int, java.lang.Object, int[]) liveness (after)
   /// CHECK-DAG:  <<IntArg:i\d+>>   ParameterValue        env_uses:[]
-  /// CHECK-DAG:  <<RefArg:l\d+>>   ParameterValue        env_uses:[11,21]
-  /// CHECK-DAG:  <<Array:l\d+>>    ParameterValue        env_uses:[11,21]
+  /// CHECK-DAG:  <<RefArg:l\d+>>   ParameterValue        env_uses:[11,21,21]
+  /// CHECK-DAG:  <<Array:l\d+>>    ParameterValue        env_uses:[11,21,21]
   /// CHECK-DAG:  <<Const1:i\d+>>   IntConstant 1         env_uses:[]
   /// CHECK-DAG:                    SuspendCheck          env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]]           liveness:10
   /// CHECK-DAG:                    NullCheck             env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]]  liveness:16
@@ -117,10 +117,10 @@
   /// CHECK-DAG:                    BoundsCheck           env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]]  liveness:20
 
   /// CHECK-START-{X86,X86_64}-DEBUGGABLE: void Main.testBoundsCheck(int, java.lang.Object, int[]) liveness (after)
-  /// CHECK-DAG:  <<IntArg:i\d+>>   ParameterValue        env_uses:[11,21]
-  /// CHECK-DAG:  <<RefArg:l\d+>>   ParameterValue        env_uses:[11,21]
-  /// CHECK-DAG:  <<Array:l\d+>>    ParameterValue        env_uses:[11,21]
-  /// CHECK-DAG:  <<Const1:i\d+>>   IntConstant 1         env_uses:[21]
+  /// CHECK-DAG:  <<IntArg:i\d+>>   ParameterValue        env_uses:[11,21,21]
+  /// CHECK-DAG:  <<RefArg:l\d+>>   ParameterValue        env_uses:[11,21,21]
+  /// CHECK-DAG:  <<Array:l\d+>>    ParameterValue        env_uses:[11,21,21]
+  /// CHECK-DAG:  <<Const1:i\d+>>   IntConstant 1         env_uses:[21,21]
   /// CHECK-DAG:                    SuspendCheck          env:[[_,<<IntArg>>,<<RefArg>>,<<Array>>]]           liveness:10
   /// CHECK-DAG:                    NullCheck             env:[[<<Const1>>,<<IntArg>>,<<RefArg>>,<<Array>>]]  liveness:16
   /// CHECK-DAG:                    ArrayLength                                                               liveness:18
diff --git a/test/566-polymorphic-inlining/polymorphic_inline.cc b/test/566-polymorphic-inlining/polymorphic_inline.cc
index 00827cf..37d998c 100644
--- a/test/566-polymorphic-inlining/polymorphic_inline.cc
+++ b/test/566-polymorphic-inlining/polymorphic_inline.cc
@@ -46,11 +46,11 @@
       usleep(1000);
     }
     // Will either ensure it's compiled or do the compilation itself.
-    jit->CompileMethod(method, soa.Self(), /*baseline=*/ false, /*osr=*/ false);
+    jit->CompileMethod(method, soa.Self(), /*baseline=*/ false, /*osr=*/ false, /*prejit=*/ false);
   }
 
   CodeInfo info(header);
-  CHECK(info.HasInlineInfo());
+  CHECK(info.HasInlineInfo()) << method->PrettyMethod();
 }
 
 static void allocate_profiling_info(jclass cls, const char* method_name) {
@@ -66,8 +66,8 @@
     return;
   }
 
-  allocate_profiling_info(cls, "testInvokeVirtual");
-  allocate_profiling_info(cls, "testInvokeInterface");
+  allocate_profiling_info(cls, "$noinline$testInvokeVirtual");
+  allocate_profiling_info(cls, "$noinline$testInvokeInterface");
   allocate_profiling_info(cls, "$noinline$testInlineToSameTarget");
 }
 
@@ -82,9 +82,9 @@
     return;
   }
 
-  do_checks(cls, "testInvokeVirtual");
-  do_checks(cls, "testInvokeInterface");
-  do_checks(cls, "testInvokeInterface2");
+  do_checks(cls, "$noinline$testInvokeVirtual");
+  do_checks(cls, "$noinline$testInvokeInterface");
+  do_checks(cls, "$noinline$testInvokeInterface2");
   do_checks(cls, "$noinline$testInlineToSameTarget");
 }
 
diff --git a/test/566-polymorphic-inlining/src/Main.java b/test/566-polymorphic-inlining/src/Main.java
index 793b85f..f8354be 100644
--- a/test/566-polymorphic-inlining/src/Main.java
+++ b/test/566-polymorphic-inlining/src/Main.java
@@ -44,16 +44,16 @@
     // Create the profiling info eagerly to make sure they are filled.
     ensureProfilingInfo566();
 
-    // Make testInvokeVirtual and testInvokeInterface hot to get them jitted.
+    // Make $noinline$testInvokeVirtual and $noinline$testInvokeInterface hot to get them jitted.
     // We pass Main and Subclass to get polymorphic inlining based on calling
     // the same method.
-    for (int i = 0; i < 10000; ++i) {
-      testInvokeVirtual(mains[0]);
-      testInvokeVirtual(mains[1]);
-      testInvokeInterface(itfs[0]);
-      testInvokeInterface(itfs[1]);
-      testInvokeInterface2(itfs[0]);
-      testInvokeInterface2(itfs[1]);
+    for (int i = 0; i < 1000000; ++i) {
+      $noinline$testInvokeVirtual(mains[0]);
+      $noinline$testInvokeVirtual(mains[1]);
+      $noinline$testInvokeInterface(itfs[0]);
+      $noinline$testInvokeInterface(itfs[1]);
+      $noinline$testInvokeInterface2(itfs[0]);
+      $noinline$testInvokeInterface2(itfs[1]);
       $noinline$testInlineToSameTarget(mains[0]);
       $noinline$testInlineToSameTarget(mains[1]);
     }
@@ -62,23 +62,23 @@
 
     // At this point, the JIT should have compiled both methods, and inline
     // sameInvokeVirtual and sameInvokeInterface.
-    assertEquals(Main.class, testInvokeVirtual(mains[0]));
-    assertEquals(Main.class, testInvokeVirtual(mains[1]));
+    assertEquals(Main.class, $noinline$testInvokeVirtual(mains[0]));
+    assertEquals(Main.class, $noinline$testInvokeVirtual(mains[1]));
 
-    assertEquals(Itf.class, testInvokeInterface(itfs[0]));
-    assertEquals(Itf.class, testInvokeInterface(itfs[1]));
+    assertEquals(Itf.class, $noinline$testInvokeInterface(itfs[0]));
+    assertEquals(Itf.class, $noinline$testInvokeInterface(itfs[1]));
 
-    assertEquals(Itf.class, testInvokeInterface2(itfs[0]));
-    assertEquals(Itf.class, testInvokeInterface2(itfs[1]));
+    assertEquals(Itf.class, $noinline$testInvokeInterface2(itfs[0]));
+    assertEquals(Itf.class, $noinline$testInvokeInterface2(itfs[1]));
 
     // This will trigger a deoptimization of the compiled code.
-    assertEquals(OtherSubclass.class, testInvokeVirtual(mains[2]));
-    assertEquals(OtherSubclass.class, testInvokeInterface(itfs[2]));
-    assertEquals(null, testInvokeInterface2(itfs[2]));
+    assertEquals(OtherSubclass.class, $noinline$testInvokeVirtual(mains[2]));
+    assertEquals(OtherSubclass.class, $noinline$testInvokeInterface(itfs[2]));
+    assertEquals(null, $noinline$testInvokeInterface2(itfs[2]));
 
     // Run this once to make sure we execute the JITted code.
     $noinline$testInlineToSameTarget(mains[0]);
-    assertEquals(20001, counter);
+    assertEquals(2000001, counter);
   }
 
   public Class<?> sameInvokeVirtual() {
@@ -101,11 +101,11 @@
     return Itf.class;
   }
 
-  public static Class<?> testInvokeInterface(Itf i) {
+  public static Class<?> $noinline$testInvokeInterface(Itf i) {
     return i.sameInvokeInterface();
   }
 
-  public static Class<?> testInvokeInterface2(Itf i) {
+  public static Class<?> $noinline$testInvokeInterface2(Itf i) {
     // Make three interface calls that will do a ClassTableGet to ensure bogus code
     // generation of ClassTableGet will crash.
     i.sameInvokeInterface();
@@ -113,7 +113,7 @@
     return i.sameInvokeInterface3();
   }
 
-  public static Class<?> testInvokeVirtual(Main m) {
+  public static Class<?> $noinline$testInvokeVirtual(Main m) {
     return m.sameInvokeVirtual();
   }
 
diff --git a/test/567-checker-compare/src/Main.java b/test/567-checker-compare/src/Main.java
index a3ff005..25d88aa 100644
--- a/test/567-checker-compare/src/Main.java
+++ b/test/567-checker-compare/src/Main.java
@@ -18,8 +18,6 @@
 
 public class Main {
 
-  public static boolean doThrow = false;
-
   /// CHECK-START: void Main.$opt$noinline$testReplaceInputWithItself(int) builder (after)
   /// CHECK-DAG:     <<ArgX:i\d+>>   ParameterValue
   /// CHECK-DAG:     <<Zero:i\d+>>   IntConstant 0
@@ -32,8 +30,6 @@
   /// CHECK-DAG:                     GreaterThanOrEqual [<<ArgX>>,<<Zero>>]
 
   public static void $opt$noinline$testReplaceInputWithItself(int x) {
-    if (doThrow) { throw new Error(); }
-
     // The instruction simplifier first replaces Integer.compare(x, 0) with Compare HIR
     // and then merges the Compare into the GreaterThanOrEqual. This is a regression
     // test that to check that it is allowed to replace the second input of the
diff --git a/test/570-checker-osr-locals/smali/WeirdLoop.smali b/test/570-checker-osr-locals/smali/WeirdLoop.smali
new file mode 100644
index 0000000..13cb4f9
--- /dev/null
+++ b/test/570-checker-osr-locals/smali/WeirdLoop.smali
@@ -0,0 +1,39 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LWeirdLoop;
+
+.super Ljava/lang/Object;
+
+.method public static weirdLoop()I
+    .registers 3
+    invoke-static {}, LMain;->$noinline$magicValue()I
+    move-result v0
+    const-string v1, "weirdLoop"
+    invoke-static {v1}, LMain;->isInInterpreter(Ljava/lang/String;)Z
+    move-result v2
+    if-eqz v2, :end
+    goto :mid
+
+    :top
+    invoke-static {}, LMain;->$noinline$magicValue()I
+    move-result v0
+    :mid
+    invoke-static {v1}, LMain;->isInOsrCode(Ljava/lang/String;)Z
+    move-result v2
+    if-eqz v2, :top
+    :end
+    return v0
+.end method
+
diff --git a/test/570-checker-osr-locals/src/Main.java b/test/570-checker-osr-locals/src/Main.java
index f4b3ab6..c215fa0 100644
--- a/test/570-checker-osr-locals/src/Main.java
+++ b/test/570-checker-osr-locals/src/Main.java
@@ -19,6 +19,7 @@
     System.loadLibrary(args[0]);
     while (runTests(true));
     runTests(false);
+    runSmaliTest();
   }
 
   public static boolean runTests(boolean warmup) {
@@ -65,6 +66,18 @@
     return true;
   }
 
+  public static void runSmaliTest() {
+    try {
+      Class<?> c = Class.forName("WeirdLoop");
+      int result = (int) c.getDeclaredMethod("weirdLoop").invoke(null);
+      if (result != 42) {
+        throw new Error("Unexpected result: " + result);
+      }
+    } catch (Throwable t) {
+      t.printStackTrace();
+    }
+  }
+
   public static int $noinline$magicValue() {
     return 42;
   }
diff --git a/test/570-checker-osr/osr.cc b/test/570-checker-osr/osr.cc
index dc0e94c..22423e2 100644
--- a/test/570-checker-osr/osr.cc
+++ b/test/570-checker-osr/osr.cc
@@ -90,7 +90,8 @@
         const OatQuickMethodHeader* header =
             Runtime::Current()->GetJit()->GetCodeCache()->LookupOsrMethodHeader(m);
         if ((header == nullptr || header != stack_visitor->GetCurrentOatQuickMethodHeader()) &&
-            stack_visitor->IsShadowFrame()) {
+            (stack_visitor->IsShadowFrame() ||
+             stack_visitor->GetCurrentOatQuickMethodHeader()->IsNterpMethodHeader())) {
           in_interpreter = true;
         }
       });
@@ -128,7 +129,8 @@
           // Sleep to yield to the compiler thread.
           usleep(1000);
           // Will either ensure it's compiled or do the compilation itself.
-          jit->CompileMethod(m, Thread::Current(), /*baseline=*/ false, /*osr=*/ true);
+          jit->CompileMethod(
+              m, Thread::Current(), /*baseline=*/ false, /*osr=*/ true, /*prejit=*/ false);
         }
       });
 }
diff --git a/test/570-checker-select/src/Main.java b/test/570-checker-select/src/Main.java
index 2dad14c..0e23b3d 100644
--- a/test/570-checker-select/src/Main.java
+++ b/test/570-checker-select/src/Main.java
@@ -16,322 +16,272 @@
 
 public class Main {
 
-  static boolean doThrow = false;
-
-  /// CHECK-START: int Main.BoolCond_IntVarVar(boolean, int, int) register (after)
+  /// CHECK-START: int Main.$noinline$BoolCond_IntVarVar(boolean, int, int) register (after)
   /// CHECK:               Select [{{i\d+}},{{i\d+}},{{z\d+}}]
 
-  /// CHECK-START-ARM64: int Main.BoolCond_IntVarVar(boolean, int, int) disassembly (after)
+  /// CHECK-START-ARM64: int Main.$noinline$BoolCond_IntVarVar(boolean, int, int) disassembly (after)
   /// CHECK:               Select
   /// CHECK-NEXT:            cmp
   /// CHECK-NEXT:            csel ne
 
-  /// CHECK-START-X86_64: int Main.BoolCond_IntVarVar(boolean, int, int) disassembly (after)
+  /// CHECK-START-X86_64: int Main.$noinline$BoolCond_IntVarVar(boolean, int, int) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> ParameterValue
   /// CHECK:                          Select [{{i\d+}},{{i\d+}},<<Cond>>]
   /// CHECK:                          cmovnz/ne
 
-  /// CHECK-START-X86: int Main.BoolCond_IntVarVar(boolean, int, int) disassembly (after)
+  /// CHECK-START-X86: int Main.$noinline$BoolCond_IntVarVar(boolean, int, int) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> ParameterValue
   /// CHECK:                          Select [{{i\d+}},{{i\d+}},<<Cond>>]
   /// CHECK:                          cmovnz/ne
 
-  public static int BoolCond_IntVarVar(boolean cond, int x, int y) {
-    if (doThrow) {
-      // Try defeating inlining.
-      throw new Error();
-    }
+  public static int $noinline$BoolCond_IntVarVar(boolean cond, int x, int y) {
     return cond ? x : y;
   }
 
-  /// CHECK-START: int Main.BoolCond_IntVarCst(boolean, int) register (after)
+  /// CHECK-START: int Main.$noinline$BoolCond_IntVarCst(boolean, int) register (after)
   /// CHECK:               Select [{{i\d+}},{{i\d+}},{{z\d+}}]
 
-  /// CHECK-START-ARM64: int Main.BoolCond_IntVarCst(boolean, int) disassembly (after)
+  /// CHECK-START-ARM64: int Main.$noinline$BoolCond_IntVarCst(boolean, int) disassembly (after)
   /// CHECK:               Select
   /// CHECK-NEXT:            cmp
   /// CHECK-NEXT:            csinc ne
 
-  /// CHECK-START-X86_64: int Main.BoolCond_IntVarCst(boolean, int) disassembly (after)
+  /// CHECK-START-X86_64: int Main.$noinline$BoolCond_IntVarCst(boolean, int) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> ParameterValue
   /// CHECK:                          Select [{{i\d+}},{{i\d+}},<<Cond>>]
   /// CHECK:                          cmovnz/ne
 
-  /// CHECK-START-X86: int Main.BoolCond_IntVarCst(boolean, int) disassembly (after)
+  /// CHECK-START-X86: int Main.$noinline$BoolCond_IntVarCst(boolean, int) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> ParameterValue
   /// CHECK:                          Select [{{i\d+}},{{i\d+}},<<Cond>>]
   /// CHECK:                          cmovnz/ne
 
-  public static int BoolCond_IntVarCst(boolean cond, int x) {
-    if (doThrow) {
-      // Try defeating inlining.
-      throw new Error();
-    }
+  public static int $noinline$BoolCond_IntVarCst(boolean cond, int x) {
     return cond ? x : 1;
   }
 
-  /// CHECK-START: int Main.BoolCond_IntCstVar(boolean, int) register (after)
+  /// CHECK-START: int Main.$noinline$BoolCond_IntCstVar(boolean, int) register (after)
   /// CHECK:               Select [{{i\d+}},{{i\d+}},{{z\d+}}]
 
-  /// CHECK-START-ARM64: int Main.BoolCond_IntCstVar(boolean, int) disassembly (after)
+  /// CHECK-START-ARM64: int Main.$noinline$BoolCond_IntCstVar(boolean, int) disassembly (after)
   /// CHECK:               Select
   /// CHECK-NEXT:            cmp
   /// CHECK-NEXT:            csinc eq
 
-  /// CHECK-START-X86_64: int Main.BoolCond_IntCstVar(boolean, int) disassembly (after)
+  /// CHECK-START-X86_64: int Main.$noinline$BoolCond_IntCstVar(boolean, int) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> ParameterValue
   /// CHECK:                          Select [{{i\d+}},{{i\d+}},<<Cond>>]
   /// CHECK:                          cmovnz/ne
 
-  /// CHECK-START-X86: int Main.BoolCond_IntCstVar(boolean, int) disassembly (after)
+  /// CHECK-START-X86: int Main.$noinline$BoolCond_IntCstVar(boolean, int) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> ParameterValue
   /// CHECK:                          Select [{{i\d+}},{{i\d+}},<<Cond>>]
   /// CHECK:                          cmovnz/ne
 
-  public static int BoolCond_IntCstVar(boolean cond, int y) {
-    if (doThrow) {
-      // Try defeating inlining.
-      throw new Error();
-    }
+  public static int $noinline$BoolCond_IntCstVar(boolean cond, int y) {
     return cond ? 1 : y;
   }
 
-  /// CHECK-START: long Main.BoolCond_LongVarVar(boolean, long, long) register (after)
+  /// CHECK-START: long Main.$noinline$BoolCond_LongVarVar(boolean, long, long) register (after)
   /// CHECK:               Select [{{j\d+}},{{j\d+}},{{z\d+}}]
 
-  /// CHECK-START-ARM64: long Main.BoolCond_LongVarVar(boolean, long, long) disassembly (after)
+  /// CHECK-START-ARM64: long Main.$noinline$BoolCond_LongVarVar(boolean, long, long) disassembly (after)
   /// CHECK:               Select
   /// CHECK-NEXT:            cmp
   /// CHECK-NEXT:            csel ne
 
-  /// CHECK-START-X86_64: long Main.BoolCond_LongVarVar(boolean, long, long) disassembly (after)
+  /// CHECK-START-X86_64: long Main.$noinline$BoolCond_LongVarVar(boolean, long, long) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> ParameterValue
   /// CHECK:                          Select [{{j\d+}},{{j\d+}},<<Cond>>]
   /// CHECK:                          cmovnz/neq
 
-  /// CHECK-START-X86: long Main.BoolCond_LongVarVar(boolean, long, long) disassembly (after)
+  /// CHECK-START-X86: long Main.$noinline$BoolCond_LongVarVar(boolean, long, long) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> ParameterValue
   /// CHECK:                          Select [{{j\d+}},{{j\d+}},<<Cond>>]
   /// CHECK:                          cmovnz/ne
   /// CHECK-NEXT:                     cmovnz/ne
 
-  public static long BoolCond_LongVarVar(boolean cond, long x, long y) {
-    if (doThrow) {
-      // Try defeating inlining.
-      throw new Error();
-    }
+  public static long $noinline$BoolCond_LongVarVar(boolean cond, long x, long y) {
     return cond ? x : y;
   }
 
-  /// CHECK-START: long Main.BoolCond_LongVarCst(boolean, long) register (after)
+  /// CHECK-START: long Main.$noinline$BoolCond_LongVarCst(boolean, long) register (after)
   /// CHECK:               Select [{{j\d+}},{{j\d+}},{{z\d+}}]
 
-  /// CHECK-START-ARM64: long Main.BoolCond_LongVarCst(boolean, long) disassembly (after)
+  /// CHECK-START-ARM64: long Main.$noinline$BoolCond_LongVarCst(boolean, long) disassembly (after)
   /// CHECK:               Select
   /// CHECK-NEXT:            cmp
   /// CHECK-NEXT:            csinc ne
 
-  /// CHECK-START-X86_64: long Main.BoolCond_LongVarCst(boolean, long) disassembly (after)
+  /// CHECK-START-X86_64: long Main.$noinline$BoolCond_LongVarCst(boolean, long) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> ParameterValue
   /// CHECK:                          Select [{{j\d+}},{{j\d+}},<<Cond>>]
   /// CHECK:                          cmovnz/neq
 
-  /// CHECK-START-X86: long Main.BoolCond_LongVarCst(boolean, long) disassembly (after)
+  /// CHECK-START-X86: long Main.$noinline$BoolCond_LongVarCst(boolean, long) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> ParameterValue
   /// CHECK:                          Select [{{j\d+}},{{j\d+}},<<Cond>>]
   /// CHECK:                          cmovnz/ne
   /// CHECK-NEXT:                     cmovnz/ne
 
-  public static long BoolCond_LongVarCst(boolean cond, long x) {
-    if (doThrow) {
-      // Try defeating inlining.
-      throw new Error();
-    }
+  public static long $noinline$BoolCond_LongVarCst(boolean cond, long x) {
     return cond ? x : 1L;
   }
 
-  /// CHECK-START: long Main.BoolCond_LongCstVar(boolean, long) register (after)
+  /// CHECK-START: long Main.$noinline$BoolCond_LongCstVar(boolean, long) register (after)
   /// CHECK:               Select [{{j\d+}},{{j\d+}},{{z\d+}}]
 
-  /// CHECK-START-ARM64: long Main.BoolCond_LongCstVar(boolean, long) disassembly (after)
+  /// CHECK-START-ARM64: long Main.$noinline$BoolCond_LongCstVar(boolean, long) disassembly (after)
   /// CHECK:               Select
   /// CHECK-NEXT:            cmp
   /// CHECK-NEXT:            csinc eq
 
-  /// CHECK-START-X86_64: long Main.BoolCond_LongCstVar(boolean, long) disassembly (after)
+  /// CHECK-START-X86_64: long Main.$noinline$BoolCond_LongCstVar(boolean, long) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> ParameterValue
   /// CHECK:                          Select [{{j\d+}},{{j\d+}},<<Cond>>]
   /// CHECK:                          cmovnz/neq
 
-  /// CHECK-START-X86: long Main.BoolCond_LongCstVar(boolean, long) disassembly (after)
+  /// CHECK-START-X86: long Main.$noinline$BoolCond_LongCstVar(boolean, long) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> ParameterValue
   /// CHECK:                          Select [{{j\d+}},{{j\d+}},<<Cond>>]
   /// CHECK:                          cmovnz/ne
   /// CHECK-NEXT:                     cmovnz/ne
 
-  public static long BoolCond_LongCstVar(boolean cond, long y) {
-    if (doThrow) {
-      // Try defeating inlining.
-      throw new Error();
-    }
+  public static long $noinline$BoolCond_LongCstVar(boolean cond, long y) {
     return cond ? 1L : y;
   }
 
-  /// CHECK-START: float Main.BoolCond_FloatVarVar(boolean, float, float) register (after)
+  /// CHECK-START: float Main.$noinline$BoolCond_FloatVarVar(boolean, float, float) register (after)
   /// CHECK:               Select [{{f\d+}},{{f\d+}},{{z\d+}}]
 
-  /// CHECK-START-ARM64: float Main.BoolCond_FloatVarVar(boolean, float, float) disassembly (after)
+  /// CHECK-START-ARM64: float Main.$noinline$BoolCond_FloatVarVar(boolean, float, float) disassembly (after)
   /// CHECK:               Select
   /// CHECK-NEXT:            cmp
   /// CHECK-NEXT:            fcsel ne
 
-  public static float BoolCond_FloatVarVar(boolean cond, float x, float y) {
-    if (doThrow) {
-      // Try defeating inlining.
-      throw new Error();
-    }
+  public static float $noinline$BoolCond_FloatVarVar(boolean cond, float x, float y) {
     return cond ? x : y;
   }
 
-  /// CHECK-START: float Main.BoolCond_FloatVarCst(boolean, float) register (after)
+  /// CHECK-START: float Main.$noinline$BoolCond_FloatVarCst(boolean, float) register (after)
   /// CHECK:               Select [{{f\d+}},{{f\d+}},{{z\d+}}]
 
-  /// CHECK-START-ARM64: float Main.BoolCond_FloatVarCst(boolean, float) disassembly (after)
+  /// CHECK-START-ARM64: float Main.$noinline$BoolCond_FloatVarCst(boolean, float) disassembly (after)
   /// CHECK:               Select
   /// CHECK-NEXT:            cmp
   /// CHECK-NEXT:            fcsel ne
 
-  public static float BoolCond_FloatVarCst(boolean cond, float x) {
-    if (doThrow) {
-      // Try defeating inlining.
-      throw new Error();
-    }
+  public static float $noinline$BoolCond_FloatVarCst(boolean cond, float x) {
     return cond ? x : 1.0f;
   }
 
-  /// CHECK-START: float Main.BoolCond_FloatCstVar(boolean, float) register (after)
+  /// CHECK-START: float Main.$noinline$BoolCond_FloatCstVar(boolean, float) register (after)
   /// CHECK:               Select [{{f\d+}},{{f\d+}},{{z\d+}}]
 
-  /// CHECK-START-ARM64: float Main.BoolCond_FloatCstVar(boolean, float) disassembly (after)
+  /// CHECK-START-ARM64: float Main.$noinline$BoolCond_FloatCstVar(boolean, float) disassembly (after)
   /// CHECK:               Select
   /// CHECK-NEXT:            cmp
   /// CHECK-NEXT:            fcsel ne
 
-  public static float BoolCond_FloatCstVar(boolean cond, float y) {
-    if (doThrow) {
-      // Try defeating inlining.
-      throw new Error();
-    }
+  public static float $noinline$BoolCond_FloatCstVar(boolean cond, float y) {
     return cond ? 1.0f : y;
   }
 
-  /// CHECK-START: int Main.IntNonmatCond_IntVarVar(int, int, int, int) register (after)
+  /// CHECK-START: int Main.$noinline$IntNonmatCond_IntVarVar(int, int, int, int) register (after)
   /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{i\d+}},{{i\d+}}]
   /// CHECK-NEXT:                     Select [{{i\d+}},{{i\d+}},<<Cond>>]
 
-  /// CHECK-START-ARM64: int Main.IntNonmatCond_IntVarVar(int, int, int, int) disassembly (after)
+  /// CHECK-START-ARM64: int Main.$noinline$IntNonmatCond_IntVarVar(int, int, int, int) disassembly (after)
   /// CHECK:               Select
   /// CHECK-NEXT:            cmp
   /// CHECK-NEXT:            csel le
 
-  /// CHECK-START-X86_64: int Main.IntNonmatCond_IntVarVar(int, int, int, int) disassembly (after)
+  /// CHECK-START-X86_64: int Main.$noinline$IntNonmatCond_IntVarVar(int, int, int, int) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{i\d+}},{{i\d+}}]
   /// CHECK-NEXT:                     Select [{{i\d+}},{{i\d+}},<<Cond>>]
   /// CHECK:                          cmovle/ng
 
-  /// CHECK-START-X86: int Main.IntNonmatCond_IntVarVar(int, int, int, int) disassembly (after)
+  /// CHECK-START-X86: int Main.$noinline$IntNonmatCond_IntVarVar(int, int, int, int) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{i\d+}},{{i\d+}}]
   /// CHECK-NEXT:                     Select [{{i\d+}},{{i\d+}},<<Cond>>]
   /// CHECK:                          cmovle/ng
 
-  public static int IntNonmatCond_IntVarVar(int a, int b, int x, int y) {
-    if (doThrow) {
-      // Try defeating inlining.
-      throw new Error();
-    }
+  public static int $noinline$IntNonmatCond_IntVarVar(int a, int b, int x, int y) {
     return a > b ? x : y;
   }
 
-  /// CHECK-START: int Main.IntMatCond_IntVarVar(int, int, int, int) register (after)
+  /// CHECK-START: int Main.$noinline$IntMatCond_IntVarVar(int, int, int, int) register (after)
   /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{i\d+}},{{i\d+}}]
   /// CHECK-NEXT:       <<Sel:i\d+>>  Select [{{i\d+}},{{i\d+}},{{z\d+}}]
   /// CHECK-NEXT:                     Add [<<Cond>>,<<Sel>>]
 
-  /// CHECK-START-ARM64: int Main.IntMatCond_IntVarVar(int, int, int, int) disassembly (after)
+  /// CHECK-START-ARM64: int Main.$noinline$IntMatCond_IntVarVar(int, int, int, int) disassembly (after)
   /// CHECK:               LessThanOrEqual
   /// CHECK-NEXT:            cmp
   /// CHECK-NEXT:            cset le
   /// CHECK:               Select
   /// CHECK-NEXT:            csel le
 
-  /// CHECK-START-X86_64: int Main.IntMatCond_IntVarVar(int, int, int, int) disassembly (after)
+  /// CHECK-START-X86_64: int Main.$noinline$IntMatCond_IntVarVar(int, int, int, int) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{i\d+}},{{i\d+}}]
   /// CHECK:                          Select [{{i\d+}},{{i\d+}},<<Cond>>]
   /// CHECK:                          cmovle/ng
 
-  /// CHECK-START-X86: int Main.IntMatCond_IntVarVar(int, int, int, int) disassembly (after)
+  /// CHECK-START-X86: int Main.$noinline$IntMatCond_IntVarVar(int, int, int, int) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{i\d+}},{{i\d+}}]
   /// CHECK:                          Select [{{i\d+}},{{i\d+}},<<Cond>>]
   /// CHECK:                          cmovle/ng
 
-  public static int IntMatCond_IntVarVar(int a, int b, int x, int y) {
-    if (doThrow) {
-      // Try defeating inlining.
-      throw new Error();
-    }
+  public static int $noinline$IntMatCond_IntVarVar(int a, int b, int x, int y) {
     int result = (a > b ? x : y);
     return result + (a > b ? 0 : 1);
   }
 
-  /// CHECK-START: long Main.IntNonmatCond_LongVarVar(int, int, long, long) register (after)
+  /// CHECK-START: long Main.$noinline$IntNonmatCond_LongVarVar(int, int, long, long) register (after)
   /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{i\d+}},{{i\d+}}]
   /// CHECK-NEXT:                     Select [{{j\d+}},{{j\d+}},<<Cond>>]
 
-  /// CHECK-START-ARM64: long Main.IntNonmatCond_LongVarVar(int, int, long, long) disassembly (after)
+  /// CHECK-START-ARM64: long Main.$noinline$IntNonmatCond_LongVarVar(int, int, long, long) disassembly (after)
   /// CHECK:               Select
   /// CHECK-NEXT:            cmp
   /// CHECK-NEXT:            csel le
 
-  /// CHECK-START-X86_64: long Main.IntNonmatCond_LongVarVar(int, int, long, long) disassembly (after)
+  /// CHECK-START-X86_64: long Main.$noinline$IntNonmatCond_LongVarVar(int, int, long, long) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{i\d+}},{{i\d+}}]
   /// CHECK-NEXT:                     Select [{{j\d+}},{{j\d+}},<<Cond>>]
   /// CHECK:                          cmovle/ngq
 
-  /// CHECK-START-X86: long Main.IntNonmatCond_LongVarVar(int, int, long, long) disassembly (after)
+  /// CHECK-START-X86: long Main.$noinline$IntNonmatCond_LongVarVar(int, int, long, long) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{i\d+}},{{i\d+}}]
   /// CHECK-NEXT:                     Select [{{j\d+}},{{j\d+}},<<Cond>>]
   /// CHECK:                          cmovle/ng
   /// CHECK-NEXT:                     cmovle/ng
 
-  public static long IntNonmatCond_LongVarVar(int a, int b, long x, long y) {
-    if (doThrow) {
-      // Try defeating inlining.
-      throw new Error();
-    }
+  public static long $noinline$IntNonmatCond_LongVarVar(int a, int b, long x, long y) {
     return a > b ? x : y;
   }
 
-  /// CHECK-START: long Main.IntMatCond_LongVarVar(int, int, long, long) register (after)
+  /// CHECK-START: long Main.$noinline$IntMatCond_LongVarVar(int, int, long, long) register (after)
   /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{i\d+}},{{i\d+}}]
   /// CHECK:            <<Sel1:j\d+>> Select [{{j\d+}},{{j\d+}},<<Cond>>]
   /// CHECK:            <<Sel2:j\d+>> Select [{{j\d+}},{{j\d+}},<<Cond>>]
   /// CHECK:                          Add [<<Sel2>>,<<Sel1>>]
 
-  /// CHECK-START-ARM64: long Main.IntMatCond_LongVarVar(int, int, long, long) disassembly (after)
+  /// CHECK-START-ARM64: long Main.$noinline$IntMatCond_LongVarVar(int, int, long, long) disassembly (after)
   /// CHECK:               LessThanOrEqual
   /// CHECK-NEXT:            cmp
   /// CHECK-NEXT:            cset le
   /// CHECK:               Select
   /// CHECK-NEXT:            csel le
 
-  /// CHECK-START-X86_64: long Main.IntMatCond_LongVarVar(int, int, long, long) disassembly (after)
+  /// CHECK-START-X86_64: long Main.$noinline$IntMatCond_LongVarVar(int, int, long, long) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{i\d+}},{{i\d+}}]
   /// CHECK:                          Select [{{j\d+}},{{j\d+}},<<Cond>>]
   /// CHECK:                          cmovle/ngq
   /// CHECK:                          Select [{{j\d+}},{{j\d+}},<<Cond>>]
   /// CHECK:                          cmovnz/neq
 
-  /// CHECK-START-X86: long Main.IntMatCond_LongVarVar(int, int, long, long) disassembly (after)
+  /// CHECK-START-X86: long Main.$noinline$IntMatCond_LongVarVar(int, int, long, long) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{i\d+}},{{i\d+}}]
   /// CHECK:                          Select [{{j\d+}},{{j\d+}},<<Cond>>]
   /// CHECK-NEXT:                     cmovle/ng
@@ -340,34 +290,26 @@
   /// CHECK:                          cmovnz/ne
   /// CHECK-NEXT:                     cmovnz/ne
 
-  public static long IntMatCond_LongVarVar(int a, int b, long x, long y) {
-    if (doThrow) {
-      // Try defeating inlining.
-      throw new Error();
-    }
+  public static long $noinline$IntMatCond_LongVarVar(int a, int b, long x, long y) {
     long result = (a > b ? x : y);
     return result + (a > b ? 0L : 1L);
   }
 
-  /// CHECK-START: long Main.LongNonmatCond_LongVarVar(long, long, long, long) register (after)
+  /// CHECK-START: long Main.$noinline$LongNonmatCond_LongVarVar(long, long, long, long) register (after)
   /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{j\d+}},{{j\d+}}]
   /// CHECK:                          Select [{{j\d+}},{{j\d+}},<<Cond>>]
 
-  /// CHECK-START-ARM64: long Main.LongNonmatCond_LongVarVar(long, long, long, long) disassembly (after)
+  /// CHECK-START-ARM64: long Main.$noinline$LongNonmatCond_LongVarVar(long, long, long, long) disassembly (after)
   /// CHECK:               Select
   /// CHECK-NEXT:            cmp
   /// CHECK-NEXT:            csel le
 
-  /// CHECK-START-X86_64: long Main.LongNonmatCond_LongVarVar(long, long, long, long) disassembly (after)
+  /// CHECK-START-X86_64: long Main.$noinline$LongNonmatCond_LongVarVar(long, long, long, long) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{j\d+}},{{j\d+}}]
   /// CHECK:                          Select [{{j\d+}},{{j\d+}},<<Cond>>]
   /// CHECK:                          cmovle/ngq
 
-  public static long LongNonmatCond_LongVarVar(long a, long b, long x, long y) {
-    if (doThrow) {
-      // Try defeating inlining.
-      throw new Error();
-    }
+  public static long $noinline$LongNonmatCond_LongVarVar(long a, long b, long x, long y) {
     return a > b ? x : y;
   }
 
@@ -454,205 +396,169 @@
     return a < 0 ? x : y;
   }
 
-  /// CHECK-START: long Main.LongMatCond_LongVarVar(long, long, long, long) register (after)
+  /// CHECK-START: long Main.$noinline$LongMatCond_LongVarVar(long, long, long, long) register (after)
   /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{j\d+}},{{j\d+}}]
   /// CHECK:            <<Sel1:j\d+>> Select [{{j\d+}},{{j\d+}},<<Cond>>]
   /// CHECK:            <<Sel2:j\d+>> Select [{{j\d+}},{{j\d+}},<<Cond>>]
   /// CHECK:                          Add [<<Sel2>>,<<Sel1>>]
 
-  /// CHECK-START-ARM64: long Main.LongMatCond_LongVarVar(long, long, long, long) disassembly (after)
+  /// CHECK-START-ARM64: long Main.$noinline$LongMatCond_LongVarVar(long, long, long, long) disassembly (after)
   /// CHECK:               LessThanOrEqual
   /// CHECK-NEXT:            cmp
   /// CHECK-NEXT:            cset le
   /// CHECK:               Select
   /// CHECK-NEXT:            csel le
 
-  /// CHECK-START-X86_64: long Main.LongMatCond_LongVarVar(long, long, long, long) disassembly (after)
+  /// CHECK-START-X86_64: long Main.$noinline$LongMatCond_LongVarVar(long, long, long, long) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{j\d+}},{{j\d+}}]
   /// CHECK:                          Select [{{j\d+}},{{j\d+}},<<Cond>>]
   /// CHECK:                          cmovle/ngq
   /// CHECK:                          Select [{{j\d+}},{{j\d+}},<<Cond>>]
   /// CHECK:                          cmovnz/neq
 
-  public static long LongMatCond_LongVarVar(long a, long b, long x, long y) {
-    if (doThrow) {
-      // Try defeating inlining.
-      throw new Error();
-    }
+  public static long $noinline$LongMatCond_LongVarVar(long a, long b, long x, long y) {
     long result = (a > b ? x : y);
     return result + (a > b ? 0L : 1L);
   }
 
-  /// CHECK-START: int Main.FloatLtNonmatCond_IntVarVar(float, float, int, int) register (after)
+  /// CHECK-START: int Main.$noinline$FloatLtNonmatCond_IntVarVar(float, float, int, int) register (after)
   /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{f\d+}},{{f\d+}}]
   /// CHECK-NEXT:                     Select [{{i\d+}},{{i\d+}},<<Cond>>]
 
-  /// CHECK-START-ARM64: int Main.FloatLtNonmatCond_IntVarVar(float, float, int, int) disassembly (after)
+  /// CHECK-START-ARM64: int Main.$noinline$FloatLtNonmatCond_IntVarVar(float, float, int, int) disassembly (after)
   /// CHECK:               LessThanOrEqual
   /// CHECK:               Select
   /// CHECK-NEXT:            fcmp
   /// CHECK-NEXT:            csel le
 
-  public static int FloatLtNonmatCond_IntVarVar(float a, float b, int x, int y) {
-    if (doThrow) {
-      // Try defeating inlining.
-      throw new Error();
-    }
+  public static int $noinline$FloatLtNonmatCond_IntVarVar(float a, float b, int x, int y) {
     return a > b ? x : y;
   }
 
-  /// CHECK-START: int Main.FloatGtNonmatCond_IntVarVar(float, float, int, int) register (after)
+  /// CHECK-START: int Main.$noinline$FloatGtNonmatCond_IntVarVar(float, float, int, int) register (after)
   /// CHECK:            <<Cond:z\d+>> GreaterThanOrEqual [{{f\d+}},{{f\d+}}]
   /// CHECK-NEXT:                     Select [{{i\d+}},{{i\d+}},<<Cond>>]
 
-  /// CHECK-START-ARM64: int Main.FloatGtNonmatCond_IntVarVar(float, float, int, int) disassembly (after)
+  /// CHECK-START-ARM64: int Main.$noinline$FloatGtNonmatCond_IntVarVar(float, float, int, int) disassembly (after)
   /// CHECK:               GreaterThanOrEqual
   /// CHECK:               Select
   /// CHECK-NEXT:            fcmp
   /// CHECK-NEXT:            csel hs
 
-  public static int FloatGtNonmatCond_IntVarVar(float a, float b, int x, int y) {
-    if (doThrow) {
-      // Try defeating inlining.
-      throw new Error();
-    }
+  public static int $noinline$FloatGtNonmatCond_IntVarVar(float a, float b, int x, int y) {
     return a < b ? x : y;
   }
 
-  /// CHECK-START: float Main.FloatGtNonmatCond_FloatVarVar(float, float, float, float) register (after)
+  /// CHECK-START: float Main.$noinline$FloatGtNonmatCond_FloatVarVar(float, float, float, float) register (after)
   /// CHECK:            <<Cond:z\d+>> GreaterThanOrEqual [{{f\d+}},{{f\d+}}]
   /// CHECK-NEXT:                     Select [{{f\d+}},{{f\d+}},<<Cond>>]
 
-  /// CHECK-START-ARM64: float Main.FloatGtNonmatCond_FloatVarVar(float, float, float, float) disassembly (after)
+  /// CHECK-START-ARM64: float Main.$noinline$FloatGtNonmatCond_FloatVarVar(float, float, float, float) disassembly (after)
   /// CHECK:               GreaterThanOrEqual
   /// CHECK:               Select
   /// CHECK-NEXT:            fcmp
   /// CHECK-NEXT:            fcsel hs
 
-  public static float FloatGtNonmatCond_FloatVarVar(float a, float b, float x, float y) {
-    if (doThrow) {
-      // Try defeating inlining.
-      throw new Error();
-    }
+  public static float $noinline$FloatGtNonmatCond_FloatVarVar(float a, float b, float x, float y) {
     return a < b ? x : y;
   }
 
-  /// CHECK-START: int Main.FloatLtMatCond_IntVarVar(float, float, int, int) register (after)
+  /// CHECK-START: int Main.$noinline$FloatLtMatCond_IntVarVar(float, float, int, int) register (after)
   /// CHECK:            <<Cond:z\d+>> LessThanOrEqual [{{f\d+}},{{f\d+}}]
   /// CHECK-NEXT:       <<Sel:i\d+>>  Select [{{i\d+}},{{i\d+}},<<Cond>>]
   /// CHECK-NEXT:                     Add [<<Cond>>,<<Sel>>]
 
-  /// CHECK-START-ARM64: int Main.FloatLtMatCond_IntVarVar(float, float, int, int) disassembly (after)
+  /// CHECK-START-ARM64: int Main.$noinline$FloatLtMatCond_IntVarVar(float, float, int, int) disassembly (after)
   /// CHECK:               LessThanOrEqual
   /// CHECK-NEXT:            fcmp
   /// CHECK-NEXT:            cset le
   /// CHECK:               Select
   /// CHECK-NEXT:            csel le
 
-  public static int FloatLtMatCond_IntVarVar(float a, float b, int x, int y) {
-    if (doThrow) {
-      // Try defeating inlining.
-      throw new Error();
-    }
+  public static int $noinline$FloatLtMatCond_IntVarVar(float a, float b, int x, int y) {
     int result = (a > b ? x : y);
     return result + (a > b ? 0 : 1);
   }
 
-  /// CHECK-START: int Main.FloatGtMatCond_IntVarVar(float, float, int, int) register (after)
+  /// CHECK-START: int Main.$noinline$FloatGtMatCond_IntVarVar(float, float, int, int) register (after)
   /// CHECK:            <<Cond:z\d+>> GreaterThanOrEqual [{{f\d+}},{{f\d+}}]
   /// CHECK-NEXT:       <<Sel:i\d+>>  Select [{{i\d+}},{{i\d+}},<<Cond>>]
   /// CHECK-NEXT:                     Add [<<Cond>>,<<Sel>>]
 
-  /// CHECK-START-ARM64: int Main.FloatGtMatCond_IntVarVar(float, float, int, int) disassembly (after)
+  /// CHECK-START-ARM64: int Main.$noinline$FloatGtMatCond_IntVarVar(float, float, int, int) disassembly (after)
   /// CHECK:               GreaterThanOrEqual
   /// CHECK-NEXT:            fcmp
   /// CHECK-NEXT:            cset hs
   /// CHECK:               Select
   /// CHECK-NEXT:            csel hs
 
-  public static int FloatGtMatCond_IntVarVar(float a, float b, int x, int y) {
-    if (doThrow) {
-      // Try defeating inlining.
-      throw new Error();
-    }
+  public static int $noinline$FloatGtMatCond_IntVarVar(float a, float b, int x, int y) {
     int result = (a < b ? x : y);
     return result + (a < b ? 0 : 1);
   }
 
-  /// CHECK-START: float Main.FloatGtMatCond_FloatVarVar(float, float, float, float) register (after)
+  /// CHECK-START: float Main.$noinline$FloatGtMatCond_FloatVarVar(float, float, float, float) register (after)
   /// CHECK:            <<Cond:z\d+>> GreaterThanOrEqual
   /// CHECK-NEXT:       <<Sel:f\d+>>  Select [{{f\d+}},{{f\d+}},<<Cond>>]
   /// CHECK-NEXT:                     TypeConversion [<<Cond>>]
 
-  /// CHECK-START-ARM64: float Main.FloatGtMatCond_FloatVarVar(float, float, float, float) disassembly (after)
+  /// CHECK-START-ARM64: float Main.$noinline$FloatGtMatCond_FloatVarVar(float, float, float, float) disassembly (after)
   /// CHECK:               GreaterThanOrEqual
   /// CHECK-NEXT:            fcmp
   /// CHECK-NEXT:            cset hs
   /// CHECK:               Select
   /// CHECK-NEXT:            fcsel hs
 
-  public static float FloatGtMatCond_FloatVarVar(float a, float b, float x, float y) {
-    if (doThrow) {
-      // Try defeating inlining.
-      throw new Error();
-    }
+  public static float $noinline$FloatGtMatCond_FloatVarVar(float a, float b, float x, float y) {
     float result = (a < b ? x : y);
     return result + (a < b ? 0 : 1);
   }
 
-  /// CHECK-START: int Main.BoolCond_0_m1(boolean) register (after)
+  /// CHECK-START: int Main.$noinline$BoolCond_0_m1(boolean) register (after)
   /// CHECK:            <<Cond:z\d+>> ParameterValue
   /// CHECK:                          Select [{{i\d+}},{{i\d+}},<<Cond>>]
 
-  /// CHECK-START-ARM64: int Main.BoolCond_0_m1(boolean) disassembly (after)
+  /// CHECK-START-ARM64: int Main.$noinline$BoolCond_0_m1(boolean) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> ParameterValue
   /// CHECK:                          Select [{{i\d+}},{{i\d+}},<<Cond>>]
   /// CHECK-NEXT:                     cmp {{w\d+}}, #0x0 (0)
   /// CHECK-NEXT:                     csetm {{w\d+}}, eq
 
-  /// CHECK-START-X86_64: int Main.BoolCond_0_m1(boolean) disassembly (after)
+  /// CHECK-START-X86_64: int Main.$noinline$BoolCond_0_m1(boolean) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> ParameterValue
   /// CHECK:                          Select [{{i\d+}},{{i\d+}},<<Cond>>]
   /// CHECK:                          cmovnz/ne
 
-  /// CHECK-START-X86: int Main.BoolCond_0_m1(boolean) disassembly (after)
+  /// CHECK-START-X86: int Main.$noinline$BoolCond_0_m1(boolean) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> ParameterValue
   /// CHECK:                          Select [{{i\d+}},{{i\d+}},<<Cond>>]
   /// CHECK:                          cmovnz/ne
 
-  public static int BoolCond_0_m1(boolean cond) {
-    if (doThrow) {
-      // Try defeating inlining.
-      throw new Error();
-    }
+  public static int $noinline$BoolCond_0_m1(boolean cond) {
     return cond ? 0 : -1;
   }
 
-  /// CHECK-START: int Main.BoolCond_m1_0(boolean) register (after)
+  /// CHECK-START: int Main.$noinline$BoolCond_m1_0(boolean) register (after)
   /// CHECK:            <<Cond:z\d+>> ParameterValue
   /// CHECK:                          Select [{{i\d+}},{{i\d+}},<<Cond>>]
 
-  /// CHECK-START-ARM64: int Main.BoolCond_m1_0(boolean) disassembly (after)
+  /// CHECK-START-ARM64: int Main.$noinline$BoolCond_m1_0(boolean) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> ParameterValue
   /// CHECK:                          Select [{{i\d+}},{{i\d+}},<<Cond>>]
   /// CHECK-NEXT:                     cmp {{w\d+}}, #0x0 (0)
   /// CHECK-NEXT:                     csetm {{w\d+}}, ne
 
-  /// CHECK-START-X86_64: int Main.BoolCond_m1_0(boolean) disassembly (after)
+  /// CHECK-START-X86_64: int Main.$noinline$BoolCond_m1_0(boolean) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> ParameterValue
   /// CHECK:                          Select [{{i\d+}},{{i\d+}},<<Cond>>]
   /// CHECK:                          cmovnz/ne
 
-  /// CHECK-START-X86: int Main.BoolCond_m1_0(boolean) disassembly (after)
+  /// CHECK-START-X86: int Main.$noinline$BoolCond_m1_0(boolean) disassembly (after)
   /// CHECK:            <<Cond:z\d+>> ParameterValue
   /// CHECK:                          Select [{{i\d+}},{{i\d+}},<<Cond>>]
   /// CHECK:                          cmovnz/ne
 
-  public static int BoolCond_m1_0(boolean cond) {
-    if (doThrow) {
-      // Try defeating inlining.
-      throw new Error();
-    }
+  public static int $noinline$BoolCond_m1_0(boolean cond) {
     return cond ? -1 : 0;
   }
 
@@ -669,46 +575,58 @@
   }
 
   public static void main(String[] args) {
-    assertEqual(5, BoolCond_IntVarVar(true, 5, 7));
-    assertEqual(7, BoolCond_IntVarVar(false, 5, 7));
-    assertEqual(5, BoolCond_IntVarCst(true, 5));
-    assertEqual(1, BoolCond_IntVarCst(false, 5));
-    assertEqual(1, BoolCond_IntCstVar(true, 7));
-    assertEqual(7, BoolCond_IntCstVar(false, 7));
+    assertEqual(5, $noinline$BoolCond_IntVarVar(true, 5, 7));
+    assertEqual(7, $noinline$BoolCond_IntVarVar(false, 5, 7));
+    assertEqual(5, $noinline$BoolCond_IntVarCst(true, 5));
+    assertEqual(1, $noinline$BoolCond_IntVarCst(false, 5));
+    assertEqual(1, $noinline$BoolCond_IntCstVar(true, 7));
+    assertEqual(7, $noinline$BoolCond_IntCstVar(false, 7));
 
-    assertEqual(5L, BoolCond_LongVarVar(true, 5L, 7L));
-    assertEqual(7L, BoolCond_LongVarVar(false, 5L, 7L));
-    assertEqual(5L, BoolCond_LongVarCst(true, 5L));
-    assertEqual(1L, BoolCond_LongVarCst(false, 5L));
-    assertEqual(1L, BoolCond_LongCstVar(true, 7L));
-    assertEqual(7L, BoolCond_LongCstVar(false, 7L));
+    assertEqual(5L, $noinline$BoolCond_LongVarVar(true, 5L, 7L));
+    assertEqual(7L, $noinline$BoolCond_LongVarVar(false, 5L, 7L));
+    assertEqual(5L, $noinline$BoolCond_LongVarCst(true, 5L));
+    assertEqual(1L, $noinline$BoolCond_LongVarCst(false, 5L));
+    assertEqual(1L, $noinline$BoolCond_LongCstVar(true, 7L));
+    assertEqual(7L, $noinline$BoolCond_LongCstVar(false, 7L));
 
-    assertEqual(5, BoolCond_FloatVarVar(true, 5, 7));
-    assertEqual(7, BoolCond_FloatVarVar(false, 5, 7));
-    assertEqual(5, BoolCond_FloatVarCst(true, 5));
-    assertEqual(1, BoolCond_FloatVarCst(false, 5));
-    assertEqual(1, BoolCond_FloatCstVar(true, 7));
-    assertEqual(7, BoolCond_FloatCstVar(false, 7));
+    assertEqual(5, $noinline$BoolCond_FloatVarVar(true, 5, 7));
+    assertEqual(7, $noinline$BoolCond_FloatVarVar(false, 5, 7));
+    assertEqual(5, $noinline$BoolCond_FloatVarCst(true, 5));
+    assertEqual(1, $noinline$BoolCond_FloatVarCst(false, 5));
+    assertEqual(1, $noinline$BoolCond_FloatCstVar(true, 7));
+    assertEqual(7, $noinline$BoolCond_FloatCstVar(false, 7));
 
-    assertEqual(5, IntNonmatCond_IntVarVar(3, 2, 5, 7));
-    assertEqual(7, IntNonmatCond_IntVarVar(2, 3, 5, 7));
-    assertEqual(5, IntMatCond_IntVarVar(3, 2, 5, 7));
-    assertEqual(8, IntMatCond_IntVarVar(2, 3, 5, 7));
+    assertEqual(5, $noinline$IntNonmatCond_IntVarVar(3, 2, 5, 7));
+    assertEqual(7, $noinline$IntNonmatCond_IntVarVar(2, 3, 5, 7));
+    assertEqual(5, $noinline$IntMatCond_IntVarVar(3, 2, 5, 7));
+    assertEqual(8, $noinline$IntMatCond_IntVarVar(2, 3, 5, 7));
+    assertEqual(5, $noinline$IntNonmatCond_LongVarVar(3, 2, 5L, 7L));
+    assertEqual(7, $noinline$IntNonmatCond_LongVarVar(2, 3, 5L, 7L));
+    assertEqual(5, $noinline$IntMatCond_LongVarVar(3, 2, 5L, 7L));
+    assertEqual(8, $noinline$IntMatCond_LongVarVar(2, 3, 5L, 7L));
+    assertEqual(5, $noinline$LongMatCond_LongVarVar(3L, 2L, 5L, 7L));
+    assertEqual(8, $noinline$LongMatCond_LongVarVar(2L, 3L, 5L, 7L));
 
-    assertEqual(0xAAAAAAAA55555555L,
-                LongNonmatCond_LongVarVar(3L, 2L, 0xAAAAAAAA55555555L, 0x8888888877777777L));
-    assertEqual(0x8888888877777777L,
-                LongNonmatCond_LongVarVar(2L, 2L, 0xAAAAAAAA55555555L, 0x8888888877777777L));
-    assertEqual(0x8888888877777777L,
-                LongNonmatCond_LongVarVar(2L, 3L, 0xAAAAAAAA55555555L, 0x8888888877777777L));
-    assertEqual(0xAAAAAAAA55555555L, LongNonmatCond_LongVarVar(0x0000000100000000L,
-                                                               0x00000000FFFFFFFFL,
-                                                               0xAAAAAAAA55555555L,
-                                                               0x8888888877777777L));
-    assertEqual(0x8888888877777777L, LongNonmatCond_LongVarVar(0x00000000FFFFFFFFL,
-                                                               0x0000000100000000L,
-                                                               0xAAAAAAAA55555555L,
-                                                               0x8888888877777777L));
+    assertEqual(0xAAAAAAAA55555555L, $noinline$LongNonmatCond_LongVarVar(3L,
+                                                                         2L,
+                                                                         0xAAAAAAAA55555555L,
+                                                                         0x8888888877777777L));
+    assertEqual(0x8888888877777777L, $noinline$LongNonmatCond_LongVarVar(2L,
+                                                                         2L,
+                                                                         0xAAAAAAAA55555555L,
+                                                                         0x8888888877777777L));
+    assertEqual(0x8888888877777777L, $noinline$LongNonmatCond_LongVarVar(2L,
+                                                                         3L,
+                                                                         0xAAAAAAAA55555555L,
+                                                                         0x8888888877777777L));
+    assertEqual(0xAAAAAAAA55555555L, $noinline$LongNonmatCond_LongVarVar(0x0000000100000000L,
+                                                                         0x00000000FFFFFFFFL,
+                                                                         0xAAAAAAAA55555555L,
+                                                                         0x8888888877777777L));
+    assertEqual(0x8888888877777777L, $noinline$LongNonmatCond_LongVarVar(0x00000000FFFFFFFFL,
+                                                                         0x0000000100000000L,
+                                                                         0xAAAAAAAA55555555L,
+                                                                         0x8888888877777777L));
 
     assertEqual(0x8888888877777777L, $noinline$LongEqNonmatCond_LongVarVar(2L,
                                                                            3L,
@@ -759,39 +677,39 @@
     assertEqual(7L, $noinline$LongNonmatCondCst_LongVarVar7(2L, 5L, 7L));
     assertEqual(5L, $noinline$LongNonmatCondCst_LongVarVar7(-9000L, 5L, 7L));
 
-    assertEqual(5, FloatLtNonmatCond_IntVarVar(3, 2, 5, 7));
-    assertEqual(7, FloatLtNonmatCond_IntVarVar(2, 3, 5, 7));
-    assertEqual(7, FloatLtNonmatCond_IntVarVar(Float.NaN, 2, 5, 7));
-    assertEqual(7, FloatLtNonmatCond_IntVarVar(2, Float.NaN, 5, 7));
+    assertEqual(5, $noinline$FloatLtNonmatCond_IntVarVar(3, 2, 5, 7));
+    assertEqual(7, $noinline$FloatLtNonmatCond_IntVarVar(2, 3, 5, 7));
+    assertEqual(7, $noinline$FloatLtNonmatCond_IntVarVar(Float.NaN, 2, 5, 7));
+    assertEqual(7, $noinline$FloatLtNonmatCond_IntVarVar(2, Float.NaN, 5, 7));
 
-    assertEqual(5, FloatGtNonmatCond_IntVarVar(2, 3, 5, 7));
-    assertEqual(7, FloatGtNonmatCond_IntVarVar(3, 2, 5, 7));
-    assertEqual(7, FloatGtNonmatCond_IntVarVar(Float.NaN, 2, 5, 7));
-    assertEqual(7, FloatGtNonmatCond_IntVarVar(2, Float.NaN, 5, 7));
+    assertEqual(5, $noinline$FloatGtNonmatCond_IntVarVar(2, 3, 5, 7));
+    assertEqual(7, $noinline$FloatGtNonmatCond_IntVarVar(3, 2, 5, 7));
+    assertEqual(7, $noinline$FloatGtNonmatCond_IntVarVar(Float.NaN, 2, 5, 7));
+    assertEqual(7, $noinline$FloatGtNonmatCond_IntVarVar(2, Float.NaN, 5, 7));
 
-    assertEqual(5, FloatGtNonmatCond_FloatVarVar(2, 3, 5, 7));
-    assertEqual(7, FloatGtNonmatCond_FloatVarVar(3, 2, 5, 7));
-    assertEqual(7, FloatGtNonmatCond_FloatVarVar(Float.NaN, 2, 5, 7));
-    assertEqual(7, FloatGtNonmatCond_FloatVarVar(2, Float.NaN, 5, 7));
+    assertEqual(5, $noinline$FloatGtNonmatCond_FloatVarVar(2, 3, 5, 7));
+    assertEqual(7, $noinline$FloatGtNonmatCond_FloatVarVar(3, 2, 5, 7));
+    assertEqual(7, $noinline$FloatGtNonmatCond_FloatVarVar(Float.NaN, 2, 5, 7));
+    assertEqual(7, $noinline$FloatGtNonmatCond_FloatVarVar(2, Float.NaN, 5, 7));
 
-    assertEqual(5, FloatLtMatCond_IntVarVar(3, 2, 5, 7));
-    assertEqual(8, FloatLtMatCond_IntVarVar(2, 3, 5, 7));
-    assertEqual(8, FloatLtMatCond_IntVarVar(Float.NaN, 2, 5, 7));
-    assertEqual(8, FloatLtMatCond_IntVarVar(2, Float.NaN, 5, 7));
+    assertEqual(5, $noinline$FloatLtMatCond_IntVarVar(3, 2, 5, 7));
+    assertEqual(8, $noinline$FloatLtMatCond_IntVarVar(2, 3, 5, 7));
+    assertEqual(8, $noinline$FloatLtMatCond_IntVarVar(Float.NaN, 2, 5, 7));
+    assertEqual(8, $noinline$FloatLtMatCond_IntVarVar(2, Float.NaN, 5, 7));
 
-    assertEqual(5, FloatGtMatCond_IntVarVar(2, 3, 5, 7));
-    assertEqual(8, FloatGtMatCond_IntVarVar(3, 2, 5, 7));
-    assertEqual(8, FloatGtMatCond_IntVarVar(Float.NaN, 2, 5, 7));
-    assertEqual(8, FloatGtMatCond_IntVarVar(2, Float.NaN, 5, 7));
+    assertEqual(5, $noinline$FloatGtMatCond_IntVarVar(2, 3, 5, 7));
+    assertEqual(8, $noinline$FloatGtMatCond_IntVarVar(3, 2, 5, 7));
+    assertEqual(8, $noinline$FloatGtMatCond_IntVarVar(Float.NaN, 2, 5, 7));
+    assertEqual(8, $noinline$FloatGtMatCond_IntVarVar(2, Float.NaN, 5, 7));
 
-    assertEqual(5, FloatGtMatCond_FloatVarVar(2, 3, 5, 7));
-    assertEqual(8, FloatGtMatCond_FloatVarVar(3, 2, 5, 7));
-    assertEqual(8, FloatGtMatCond_FloatVarVar(Float.NaN, 2, 5, 7));
-    assertEqual(8, FloatGtMatCond_FloatVarVar(2, Float.NaN, 5, 7));
+    assertEqual(5, $noinline$FloatGtMatCond_FloatVarVar(2, 3, 5, 7));
+    assertEqual(8, $noinline$FloatGtMatCond_FloatVarVar(3, 2, 5, 7));
+    assertEqual(8, $noinline$FloatGtMatCond_FloatVarVar(Float.NaN, 2, 5, 7));
+    assertEqual(8, $noinline$FloatGtMatCond_FloatVarVar(2, Float.NaN, 5, 7));
 
-    assertEqual(0, BoolCond_0_m1(true));
-    assertEqual(-1, BoolCond_0_m1(false));
-    assertEqual(-1, BoolCond_m1_0(true));
-    assertEqual(0, BoolCond_m1_0(false));
+    assertEqual(0, $noinline$BoolCond_0_m1(true));
+    assertEqual(-1, $noinline$BoolCond_0_m1(false));
+    assertEqual(-1, $noinline$BoolCond_m1_0(true));
+    assertEqual(0, $noinline$BoolCond_m1_0(false));
   }
 }
diff --git a/test/575-checker-string-init-alias/src/Main.java b/test/575-checker-string-init-alias/src/Main.java
index 1ab3207..b55b0c5 100644
--- a/test/575-checker-string-init-alias/src/Main.java
+++ b/test/575-checker-string-init-alias/src/Main.java
@@ -23,6 +23,7 @@
   class Inner {}
 
   public static native void assertIsInterpreted();
+  public static native void ensureJitCompiled(Class<?> cls, String methodName);
 
   private static void assertEqual(String expected, String actual) {
     if (!expected.equals(actual)) {
@@ -36,6 +37,8 @@
     int[] array = new int[1];
 
     {
+      // If the JIT is enabled, ensure it has compiled the method to force the deopt.
+      ensureJitCompiled(c, "testNoAlias");
       Method m = c.getMethod("testNoAlias", int[].class, String.class);
       try {
         m.invoke(null, new Object[] { array , "foo" });
@@ -51,6 +54,8 @@
     }
 
     {
+      // If the JIT is enabled, ensure it has compiled the method to force the deopt.
+      ensureJitCompiled(c, "testAlias");
       Method m = c.getMethod("testAlias", int[].class, String.class);
       try {
         m.invoke(null, new Object[] { array, "bar" });
diff --git a/test/580-fp16/expected.txt b/test/580-fp16/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/580-fp16/expected.txt
diff --git a/test/580-fp16/info.txt b/test/580-fp16/info.txt
new file mode 100644
index 0000000..547ae22
--- /dev/null
+++ b/test/580-fp16/info.txt
@@ -0,0 +1 @@
+This test case is used to test libcore.util.FP16.
diff --git a/test/580-fp16/src-art/Main.java b/test/580-fp16/src-art/Main.java
new file mode 100644
index 0000000..14b15f8
--- /dev/null
+++ b/test/580-fp16/src-art/Main.java
@@ -0,0 +1,360 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import libcore.util.FP16;
+
+public class Main {
+    public Main() {
+    }
+
+    public static int TestFP16ToFloatRawIntBits(short half) {
+        float f = FP16.toFloat(half);
+        // Since in this test class we need to check the integer representing of
+        // the actual float NaN values, the floatToRawIntBits() is used instead of
+        // floatToIntBits().
+        return Float.floatToRawIntBits(f);
+    }
+
+    public static void assertEquals(short expected, short calculated) {
+        if (expected != calculated) {
+            throw new Error("Expected: " + expected + ", Calculated: " + calculated);
+        }
+    }
+    public static void assertEquals(float expected, float calculated) {
+        if (expected != calculated) {
+            throw new Error("Expected: " + expected + ", Calculated: " + calculated);
+        }
+    }
+    static public void assertTrue(boolean condition) {
+        if (!condition) {
+            throw new Error("condition not true");
+        }
+    }
+
+    static public void assertFalse(boolean condition) {
+        if (condition) {
+            throw new Error("condition not false");
+        }
+    }
+
+    public static void testHalfToFloatToHalfConversions(){
+        // Test FP16 to float and back to Half for all possible Short values
+        for (short h = Short.MIN_VALUE; h < Short.MAX_VALUE; h++) {
+            if (FP16.isNaN(h)) {
+                // NaN inputs are tested below.
+                continue;
+            }
+            assertEquals(h, FP16.toHalf(FP16.toFloat(h)));
+        }
+    }
+
+    public static void testToHalf(){
+        // These asserts check some known values and edge cases for FP16.toHalf
+        // and have been inspired by the cts HalfTest.
+        // Zeroes, NaN and infinities
+        assertEquals(FP16.POSITIVE_ZERO, FP16.toHalf(0.0f));
+        assertEquals(FP16.NEGATIVE_ZERO, FP16.toHalf(-0.0f));
+        assertEquals(FP16.NaN, FP16.toHalf(Float.NaN));
+        assertEquals(FP16.POSITIVE_INFINITY, FP16.toHalf(Float.POSITIVE_INFINITY));
+        assertEquals(FP16.NEGATIVE_INFINITY, FP16.toHalf(Float.NEGATIVE_INFINITY));
+        // Known values
+        assertEquals((short) 0x3c01, FP16.toHalf(1.0009765625f));
+        assertEquals((short) 0xc000, FP16.toHalf(-2.0f));
+        assertEquals((short) 0x0400, FP16.toHalf(6.10352e-5f));
+        assertEquals((short) 0x7bff, FP16.toHalf(65504.0f));
+        assertEquals((short) 0x3555, FP16.toHalf(1.0f / 3.0f));
+        // Subnormals
+        assertEquals((short) 0x03ff, FP16.toHalf(6.09756e-5f));
+        assertEquals(FP16.MIN_VALUE, FP16.toHalf(5.96046e-8f));
+        assertEquals((short) 0x83ff, FP16.toHalf(-6.09756e-5f));
+        assertEquals((short) 0x8001, FP16.toHalf(-5.96046e-8f));
+        // Subnormals (flushed to +/-0)
+        assertEquals(FP16.POSITIVE_ZERO, FP16.toHalf(5.96046e-9f));
+        assertEquals(FP16.NEGATIVE_ZERO, FP16.toHalf(-5.96046e-9f));
+        // Test for values that overflow the mantissa bits into exp bits
+        assertEquals(0x1000, FP16.toHalf(Float.intBitsToFloat(0x39fff000)));
+        assertEquals(0x0400, FP16.toHalf(Float.intBitsToFloat(0x387fe000)));
+        // Floats with absolute value above +/-65519 are rounded to +/-inf
+        // when using round-to-even
+        assertEquals(0x7bff, FP16.toHalf(65519.0f));
+        assertEquals(0x7bff, FP16.toHalf(65519.9f));
+        assertEquals(FP16.POSITIVE_INFINITY, FP16.toHalf(65520.0f));
+        assertEquals(FP16.NEGATIVE_INFINITY, FP16.toHalf(-65520.0f));
+        // Check if numbers are rounded to nearest even when they
+        // cannot be accurately represented by Half
+        assertEquals(0x6800, FP16.toHalf(2049.0f));
+        assertEquals(0x6c00, FP16.toHalf(4098.0f));
+        assertEquals(0x7000, FP16.toHalf(8196.0f));
+        assertEquals(0x7400, FP16.toHalf(16392.0f));
+        assertEquals(0x7800, FP16.toHalf(32784.0f));
+
+    }
+
+    public static void testToFloat(){
+        // FP16 SNaN/QNaN inputs to float
+        // The most significant bit of mantissa:
+        //                 V
+        // 0xfc01: 1 11111 0000000001 (signaling NaN)
+        // 0xfdff: 1 11111 0111111111 (signaling NaN)
+        // 0xfe00: 1 11111 1000000000 (quiet NaN)
+        // 0xffff: 1 11111 1111111111 (quiet NaN)
+        // This test is inspired by Java implementation of android.util.Half.toFloat(),
+        // where the implementation performs SNaN->QNaN conversion.
+        assert(Float.isNaN(FP16.toFloat((short)0xfc01)));
+        assert(Float.isNaN(FP16.toFloat((short)0xfdff)));
+        assert(Float.isNaN(FP16.toFloat((short)0xfe00)));
+        assert(Float.isNaN(FP16.toFloat((short)0xffff)));
+        assertEquals(0xffc02000, TestFP16ToFloatRawIntBits((short)(0xfc01)));  // SNaN->QNaN
+        assertEquals(0xffffe000, TestFP16ToFloatRawIntBits((short)(0xfdff)));  // SNaN->QNaN
+        assertEquals(0xffc00000, TestFP16ToFloatRawIntBits((short)(0xfe00)));  // QNaN->QNaN
+        assertEquals(0xffffe000, TestFP16ToFloatRawIntBits((short)(0xffff)));  // QNaN->QNaN
+    }
+
+    public static void testFloor() {
+        // These tests have been taken from the cts HalfTest
+        assertEquals(FP16.POSITIVE_INFINITY, FP16.floor(FP16.POSITIVE_INFINITY));
+        assertEquals(FP16.NEGATIVE_INFINITY, FP16.floor(FP16.NEGATIVE_INFINITY));
+        assertEquals(FP16.POSITIVE_ZERO, FP16.floor(FP16.POSITIVE_ZERO));
+        assertEquals(FP16.NEGATIVE_ZERO, FP16.floor(FP16.NEGATIVE_ZERO));
+        assertEquals(FP16.NaN, FP16.floor(FP16.NaN));
+        assertEquals(FP16.LOWEST_VALUE, FP16.floor(FP16.LOWEST_VALUE));
+        assertEquals(FP16.POSITIVE_ZERO, FP16.floor(FP16.MIN_NORMAL));
+        assertEquals(FP16.POSITIVE_ZERO, FP16.floor((short) 0x3ff));
+        assertEquals(FP16.POSITIVE_ZERO, FP16.floor(FP16.toHalf(0.2f)));
+        assertEquals(-1.0f, FP16.toFloat(FP16.floor(FP16.toHalf(-0.2f))));
+        assertEquals(-1.0f, FP16.toFloat(FP16.floor(FP16.toHalf(-0.7f))));
+        assertEquals(FP16.POSITIVE_ZERO, FP16.floor(FP16.toHalf(0.7f)));
+        assertEquals(124.0f, FP16.toFloat(FP16.floor(FP16.toHalf(124.7f))));
+        assertEquals(-125.0f, FP16.toFloat(FP16.floor(FP16.toHalf(-124.7f))));
+        assertEquals(124.0f, FP16.toFloat(FP16.floor(FP16.toHalf(124.2f))));
+        assertEquals(-125.0f, FP16.toFloat(FP16.floor(FP16.toHalf(-124.2f))));
+        // floor for NaN values
+        assertEquals((short) 0x7e01, FP16.floor((short) 0x7c01));
+        assertEquals((short) 0x7f00, FP16.floor((short) 0x7d00));
+        assertEquals((short) 0xfe01, FP16.floor((short) 0xfc01));
+        assertEquals((short) 0xff00, FP16.floor((short) 0xfd00));
+    }
+
+    public static void testCeil() {
+        // These tests have been taken from the cts HalfTest
+        assertEquals(FP16.POSITIVE_INFINITY, FP16.ceil(FP16.POSITIVE_INFINITY));
+        assertEquals(FP16.NEGATIVE_INFINITY, FP16.ceil(FP16.NEGATIVE_INFINITY));
+        assertEquals(FP16.POSITIVE_ZERO, FP16.ceil(FP16.POSITIVE_ZERO));
+        assertEquals(FP16.NEGATIVE_ZERO, FP16.ceil(FP16.NEGATIVE_ZERO));
+        assertEquals(FP16.NaN, FP16.ceil(FP16.NaN));
+        assertEquals(FP16.LOWEST_VALUE, FP16.ceil(FP16.LOWEST_VALUE));
+        assertEquals(1.0f, FP16.toFloat(FP16.ceil(FP16.MIN_NORMAL)));
+        assertEquals(1.0f, FP16.toFloat(FP16.ceil((short) 0x3ff)));
+        assertEquals(1.0f, FP16.toFloat(FP16.ceil(FP16.toHalf(0.2f))));
+        assertEquals(FP16.NEGATIVE_ZERO, FP16.ceil(FP16.toHalf(-0.2f)));
+        assertEquals(1.0f, FP16.toFloat(FP16.ceil(FP16.toHalf(0.7f))));
+        assertEquals(FP16.NEGATIVE_ZERO, FP16.ceil(FP16.toHalf(-0.7f)));
+        assertEquals(125.0f, FP16.toFloat(FP16.ceil(FP16.toHalf(124.7f))));
+        assertEquals(-124.0f, FP16.toFloat(FP16.ceil(FP16.toHalf(-124.7f))));
+        assertEquals(125.0f, FP16.toFloat(FP16.ceil(FP16.toHalf(124.2f))));
+        assertEquals(-124.0f, FP16.toFloat(FP16.ceil(FP16.toHalf(-124.2f))));
+        // ceil for NaN values
+        assertEquals((short) 0x7e01, FP16.floor((short) 0x7c01));
+        assertEquals((short) 0x7f00, FP16.floor((short) 0x7d00));
+        assertEquals((short) 0xfe01, FP16.floor((short) 0xfc01));
+        assertEquals((short) 0xff00, FP16.floor((short) 0xfd00));
+    }
+
+    public static void testRint() {
+        assertEquals(FP16.POSITIVE_INFINITY, FP16.rint(FP16.POSITIVE_INFINITY));
+        assertEquals(FP16.NEGATIVE_INFINITY, FP16.rint(FP16.NEGATIVE_INFINITY));
+        assertEquals(FP16.POSITIVE_ZERO, FP16.rint(FP16.POSITIVE_ZERO));
+        assertEquals(FP16.NEGATIVE_ZERO, FP16.rint(FP16.NEGATIVE_ZERO));
+        assertEquals(FP16.NaN, FP16.rint(FP16.NaN));
+        assertEquals(FP16.LOWEST_VALUE, FP16.rint(FP16.LOWEST_VALUE));
+        assertEquals(FP16.POSITIVE_ZERO, FP16.rint(FP16.MIN_VALUE));
+        assertEquals(FP16.POSITIVE_ZERO, FP16.rint((short) 0x200));
+        assertEquals(FP16.POSITIVE_ZERO, FP16.rint((short) 0x3ff));
+        assertEquals(FP16.POSITIVE_ZERO, FP16.rint(FP16.toHalf(0.2f)));
+        assertEquals(FP16.NEGATIVE_ZERO, FP16.rint(FP16.toHalf(-0.2f)));
+        assertEquals(1.0f, FP16.toFloat(FP16.rint(FP16.toHalf(0.7f))));
+        assertEquals(-1.0f, FP16.toFloat(FP16.rint(FP16.toHalf(-0.7f))));
+        assertEquals(0.0f, FP16.toFloat(FP16.rint(FP16.toHalf(0.5f))));
+        assertEquals(-0.0f, FP16.toFloat(FP16.rint(FP16.toHalf(-0.5f))));
+        assertEquals(125.0f, FP16.toFloat(FP16.rint(FP16.toHalf(124.7f))));
+        assertEquals(-125.0f, FP16.toFloat(FP16.rint(FP16.toHalf(-124.7f))));
+        assertEquals(124.0f, FP16.toFloat(FP16.rint(FP16.toHalf(124.2f))));
+        assertEquals(-124.0f, FP16.toFloat(FP16.rint(FP16.toHalf(-124.2f))));
+        // floor for NaN values
+        assertEquals((short) 0x7e01, FP16.floor((short) 0x7c01));
+        assertEquals((short) 0x7f00, FP16.floor((short) 0x7d00));
+        assertEquals((short) 0xfe01, FP16.floor((short) 0xfc01));
+        assertEquals((short) 0xff00, FP16.floor((short) 0xfd00));
+
+    }
+
+    public static void testGreater() {
+        assertTrue(FP16.greater(FP16.POSITIVE_INFINITY, FP16.NEGATIVE_INFINITY));
+        assertTrue(FP16.greater(FP16.POSITIVE_INFINITY, FP16.MAX_VALUE));
+        assertFalse(FP16.greater(FP16.MAX_VALUE, FP16.POSITIVE_INFINITY));
+        assertFalse(FP16.greater(FP16.NEGATIVE_INFINITY, FP16.LOWEST_VALUE));
+        assertTrue(FP16.greater(FP16.LOWEST_VALUE, FP16.NEGATIVE_INFINITY));
+        assertFalse(FP16.greater(FP16.NEGATIVE_ZERO, FP16.POSITIVE_ZERO));
+        assertFalse(FP16.greater(FP16.POSITIVE_ZERO, FP16.NEGATIVE_ZERO));
+        assertFalse(FP16.greater(FP16.toHalf(12.3f), FP16.NaN));
+        assertFalse(FP16.greater(FP16.NaN, FP16.toHalf(12.3f)));
+        assertTrue(FP16.greater(FP16.MIN_NORMAL, FP16.MIN_VALUE));
+        assertFalse(FP16.greater(FP16.MIN_VALUE, FP16.MIN_NORMAL));
+        assertTrue(FP16.greater(FP16.toHalf(12.4f), FP16.toHalf(12.3f)));
+        assertFalse(FP16.greater(FP16.toHalf(12.3f), FP16.toHalf(12.4f)));
+        assertFalse(FP16.greater(FP16.toHalf(-12.4f), FP16.toHalf(-12.3f)));
+        assertTrue(FP16.greater(FP16.toHalf(-12.3f), FP16.toHalf(-12.4f)));
+        assertTrue(FP16.greater((short) 0x3ff, FP16.MIN_VALUE));
+
+        assertFalse(FP16.greater(FP16.toHalf(-1.0f), FP16.toHalf(0.0f)));
+        assertTrue(FP16.greater(FP16.toHalf(0.0f), FP16.toHalf(-1.0f)));
+        assertFalse(FP16.greater(FP16.toHalf(-1.0f), FP16.toHalf(-1.0f)));
+        assertFalse(FP16.greater(FP16.toHalf(-1.3f), FP16.toHalf(-1.3f)));
+        assertTrue(FP16.greater(FP16.toHalf(1.0f), FP16.toHalf(0.0f)));
+        assertFalse(FP16.greater(FP16.toHalf(0.0f), FP16.toHalf(1.0f)));
+        assertFalse(FP16.greater(FP16.toHalf(1.0f), FP16.toHalf(1.0f)));
+        assertFalse(FP16.greater(FP16.toHalf(1.3f), FP16.toHalf(1.3f)));
+        assertFalse(FP16.greater(FP16.toHalf(-0.1f), FP16.toHalf(0.0f)));
+        assertTrue(FP16.greater(FP16.toHalf(0.0f), FP16.toHalf(-0.1f)));
+        assertFalse(FP16.greater(FP16.toHalf(-0.1f), FP16.toHalf(-0.1f)));
+        assertTrue(FP16.greater(FP16.toHalf(0.1f), FP16.toHalf(0.0f)));
+        assertFalse(FP16.greater(FP16.toHalf(0.0f), FP16.toHalf(0.1f)));
+        assertFalse(FP16.greater(FP16.toHalf(0.1f), FP16.toHalf(0.1f)));
+    }
+
+    public static void testGreaterEquals() {
+        assertTrue(FP16.greaterEquals(FP16.POSITIVE_INFINITY, FP16.NEGATIVE_INFINITY));
+        assertTrue(FP16.greaterEquals(FP16.POSITIVE_INFINITY, FP16.MAX_VALUE));
+        assertFalse(FP16.greaterEquals(FP16.MAX_VALUE, FP16.POSITIVE_INFINITY));
+        assertFalse(FP16.greaterEquals(FP16.NEGATIVE_INFINITY, FP16.LOWEST_VALUE));
+        assertTrue(FP16.greaterEquals(FP16.LOWEST_VALUE, FP16.NEGATIVE_INFINITY));
+        assertTrue(FP16.greaterEquals(FP16.NEGATIVE_ZERO, FP16.POSITIVE_ZERO));
+        assertTrue(FP16.greaterEquals(FP16.POSITIVE_ZERO, FP16.NEGATIVE_ZERO));
+        assertFalse(FP16.greaterEquals(FP16.toHalf(12.3f), FP16.NaN));
+        assertFalse(FP16.greaterEquals(FP16.NaN, FP16.toHalf(12.3f)));
+        assertTrue(FP16.greaterEquals(FP16.MIN_NORMAL, FP16.MIN_VALUE));
+        assertFalse(FP16.greaterEquals(FP16.MIN_VALUE, FP16.MIN_NORMAL));
+        assertTrue(FP16.greaterEquals(FP16.toHalf(12.4f), FP16.toHalf(12.3f)));
+        assertFalse(FP16.greaterEquals(FP16.toHalf(12.3f), FP16.toHalf(12.4f)));
+        assertFalse(FP16.greaterEquals(FP16.toHalf(-12.4f), FP16.toHalf(-12.3f)));
+        assertTrue(FP16.greaterEquals(FP16.toHalf(-12.3f), FP16.toHalf(-12.4f)));
+        assertTrue(FP16.greaterEquals((short) 0x3ff, FP16.MIN_VALUE));
+        assertTrue(FP16.greaterEquals(FP16.NEGATIVE_INFINITY, FP16.NEGATIVE_INFINITY));
+        assertTrue(FP16.greaterEquals(FP16.POSITIVE_INFINITY, FP16.POSITIVE_INFINITY));
+        assertTrue(FP16.greaterEquals(FP16.toHalf(12.12356f), FP16.toHalf(12.12356f)));
+        assertTrue(FP16.greaterEquals(FP16.toHalf(-12.12356f), FP16.toHalf(-12.12356f)));
+
+        assertFalse(FP16.greaterEquals(FP16.toHalf(-1.0f), FP16.toHalf(0.0f)));
+        assertTrue(FP16.greaterEquals(FP16.toHalf(0.0f), FP16.toHalf(-1.0f)));
+        assertTrue(FP16.greaterEquals(FP16.toHalf(-1.0f), FP16.toHalf(-1.0f)));
+        assertTrue(FP16.greaterEquals(FP16.toHalf(-1.3f), FP16.toHalf(-1.3f)));
+        assertTrue(FP16.greaterEquals(FP16.toHalf(1.0f), FP16.toHalf(0.0f)));
+        assertFalse(FP16.greaterEquals(FP16.toHalf(0.0f), FP16.toHalf(1.0f)));
+        assertTrue(FP16.greaterEquals(FP16.toHalf(1.0f), FP16.toHalf(1.0f)));
+        assertTrue(FP16.greaterEquals(FP16.toHalf(1.3f), FP16.toHalf(1.3f)));
+        assertFalse(FP16.greaterEquals(FP16.toHalf(-0.1f), FP16.toHalf(0.0f)));
+        assertTrue(FP16.greaterEquals(FP16.toHalf(0.0f), FP16.toHalf(-0.1f)));
+        assertTrue(FP16.greaterEquals(FP16.toHalf(-0.1f), FP16.toHalf(-0.1f)));
+        assertTrue(FP16.greaterEquals(FP16.toHalf(0.1f), FP16.toHalf(0.0f)));
+        assertFalse(FP16.greaterEquals(FP16.toHalf(0.0f), FP16.toHalf(0.1f)));
+        assertTrue(FP16.greaterEquals(FP16.toHalf(0.1f), FP16.toHalf(0.1f)));
+    }
+
+    public static void testLess() {
+        assertTrue(FP16.less(FP16.NEGATIVE_INFINITY, FP16.POSITIVE_INFINITY));
+        assertTrue(FP16.less(FP16.MAX_VALUE, FP16.POSITIVE_INFINITY));
+        assertFalse(FP16.less(FP16.POSITIVE_INFINITY, FP16.MAX_VALUE));
+        assertFalse(FP16.less(FP16.LOWEST_VALUE, FP16.NEGATIVE_INFINITY));
+        assertTrue(FP16.less(FP16.NEGATIVE_INFINITY, FP16.LOWEST_VALUE));
+        assertFalse(FP16.less(FP16.POSITIVE_ZERO, FP16.NEGATIVE_ZERO));
+        assertFalse(FP16.less(FP16.NEGATIVE_ZERO, FP16.POSITIVE_ZERO));
+        assertFalse(FP16.less(FP16.NaN, FP16.toHalf(12.3f)));
+        assertFalse(FP16.less(FP16.toHalf(12.3f), FP16.NaN));
+        assertTrue(FP16.less(FP16.MIN_VALUE, FP16.MIN_NORMAL));
+        assertFalse(FP16.less(FP16.MIN_NORMAL, FP16.MIN_VALUE));
+        assertTrue(FP16.less(FP16.toHalf(12.3f), FP16.toHalf(12.4f)));
+        assertFalse(FP16.less(FP16.toHalf(12.4f), FP16.toHalf(12.3f)));
+        assertFalse(FP16.less(FP16.toHalf(-12.3f), FP16.toHalf(-12.4f)));
+        assertTrue(FP16.less(FP16.toHalf(-12.4f), FP16.toHalf(-12.3f)));
+        assertTrue(FP16.less(FP16.MIN_VALUE, (short) 0x3ff));
+
+        assertTrue(FP16.less(FP16.toHalf(-1.0f), FP16.toHalf(0.0f)));
+        assertFalse(FP16.less(FP16.toHalf(0.0f), FP16.toHalf(-1.0f)));
+        assertFalse(FP16.less(FP16.toHalf(-1.0f), FP16.toHalf(-1.0f)));
+        assertFalse(FP16.less(FP16.toHalf(-1.3f), FP16.toHalf(-1.3f)));
+        assertFalse(FP16.less(FP16.toHalf(1.0f), FP16.toHalf(0.0f)));
+        assertTrue(FP16.less(FP16.toHalf(0.0f), FP16.toHalf(1.0f)));
+        assertFalse(FP16.less(FP16.toHalf(1.0f), FP16.toHalf(1.0f)));
+        assertFalse(FP16.less(FP16.toHalf(1.3f), FP16.toHalf(1.3f)));
+        assertTrue(FP16.less(FP16.toHalf(-0.1f), FP16.toHalf(0.0f)));
+        assertFalse(FP16.less(FP16.toHalf(0.0f), FP16.toHalf(-0.1f)));
+        assertFalse(FP16.less(FP16.toHalf(-0.1f), FP16.toHalf(-0.1f)));
+        assertFalse(FP16.less(FP16.toHalf(0.1f), FP16.toHalf(0.0f)));
+        assertTrue(FP16.less(FP16.toHalf(0.0f), FP16.toHalf(0.1f)));
+        assertFalse(FP16.less(FP16.toHalf(0.1f), FP16.toHalf(0.1f)));
+    }
+
+    public static void testLessEquals() {
+        assertTrue(FP16.lessEquals(FP16.NEGATIVE_INFINITY, FP16.POSITIVE_INFINITY));
+        assertTrue(FP16.lessEquals(FP16.MAX_VALUE, FP16.POSITIVE_INFINITY));
+        assertFalse(FP16.lessEquals(FP16.POSITIVE_INFINITY, FP16.MAX_VALUE));
+        assertFalse(FP16.lessEquals(FP16.LOWEST_VALUE, FP16.NEGATIVE_INFINITY));
+        assertTrue(FP16.lessEquals(FP16.NEGATIVE_INFINITY, FP16.LOWEST_VALUE));
+        assertTrue(FP16.lessEquals(FP16.POSITIVE_ZERO, FP16.NEGATIVE_ZERO));
+        assertTrue(FP16.lessEquals(FP16.NEGATIVE_ZERO, FP16.POSITIVE_ZERO));
+        assertFalse(FP16.lessEquals(FP16.NaN, FP16.toHalf(12.3f)));
+        assertFalse(FP16.lessEquals(FP16.toHalf(12.3f), FP16.NaN));
+        assertTrue(FP16.lessEquals(FP16.MIN_VALUE, FP16.MIN_NORMAL));
+        assertFalse(FP16.lessEquals(FP16.MIN_NORMAL, FP16.MIN_VALUE));
+        assertTrue(FP16.lessEquals(FP16.toHalf(12.3f), FP16.toHalf(12.4f)));
+        assertFalse(FP16.lessEquals(FP16.toHalf(12.4f), FP16.toHalf(12.3f)));
+        assertFalse(FP16.lessEquals(FP16.toHalf(-12.3f), FP16.toHalf(-12.4f)));
+        assertTrue(FP16.lessEquals(FP16.toHalf(-12.4f), FP16.toHalf(-12.3f)));
+        assertTrue(FP16.lessEquals(FP16.MIN_VALUE, (short) 0x3ff));
+        assertTrue(FP16.lessEquals(FP16.NEGATIVE_INFINITY, FP16.NEGATIVE_INFINITY));
+        assertTrue(FP16.lessEquals(FP16.POSITIVE_INFINITY, FP16.POSITIVE_INFINITY));
+        assertTrue(FP16.lessEquals(FP16.toHalf(12.12356f), FP16.toHalf(12.12356f)));
+        assertTrue(FP16.lessEquals(FP16.toHalf(-12.12356f), FP16.toHalf(-12.12356f)));
+
+        assertTrue(FP16.lessEquals(FP16.toHalf(-1.0f), FP16.toHalf(0.0f)));
+        assertFalse(FP16.lessEquals(FP16.toHalf(0.0f), FP16.toHalf(-1.0f)));
+        assertTrue(FP16.lessEquals(FP16.toHalf(-1.0f), FP16.toHalf(-1.0f)));
+        assertTrue(FP16.lessEquals(FP16.toHalf(-1.3f), FP16.toHalf(-1.3f)));
+        assertFalse(FP16.lessEquals(FP16.toHalf(1.0f), FP16.toHalf(0.0f)));
+        assertTrue(FP16.lessEquals(FP16.toHalf(0.0f), FP16.toHalf(1.0f)));
+        assertTrue(FP16.lessEquals(FP16.toHalf(1.0f), FP16.toHalf(1.0f)));
+        assertTrue(FP16.lessEquals(FP16.toHalf(1.3f), FP16.toHalf(1.3f)));
+        assertTrue(FP16.lessEquals(FP16.toHalf(-0.1f), FP16.toHalf(0.0f)));
+        assertFalse(FP16.lessEquals(FP16.toHalf(0.0f), FP16.toHalf(-0.1f)));
+        assertTrue(FP16.lessEquals(FP16.toHalf(-0.1f), FP16.toHalf(-0.1f)));
+        assertFalse(FP16.lessEquals(FP16.toHalf(0.1f), FP16.toHalf(0.0f)));
+        assertTrue(FP16.lessEquals(FP16.toHalf(0.0f), FP16.toHalf(0.1f)));
+        assertTrue(FP16.lessEquals(FP16.toHalf(0.1f), FP16.toHalf(0.1f)));
+    }
+
+    public static void main(String args[]) {
+        testHalfToFloatToHalfConversions();
+        testToHalf();
+        testToFloat();
+        testFloor();
+        testCeil();
+        testRint();
+        testGreater();
+        testGreaterEquals();
+        testLessEquals();
+        testLess();
+    }
+}
diff --git a/test/593-checker-long-2-float-regression/src/Main.java b/test/593-checker-long-2-float-regression/src/Main.java
index 9c07f3d..b31cbde 100644
--- a/test/593-checker-long-2-float-regression/src/Main.java
+++ b/test/593-checker-long-2-float-regression/src/Main.java
@@ -37,10 +37,15 @@
   static float $noinline$longToFloat() {
     if (doThrow) { throw new Error(); }
     longValue = $inline$returnConst();
+    // This call prevents D8 from replacing the result of the sget instruction
+    // in line 43 by the result of the call to $inline$returnConst() in line 39.
+    $inline$preventRedundantFieldLoadEliminationInD8();
     return (float) longValue;
   }
 
   static long $inline$returnConst() {
     return 1L;
   }
+
+  static void $inline$preventRedundantFieldLoadEliminationInD8() {}
 }
diff --git a/test/595-profile-saving/expected.txt b/test/595-profile-saving/expected.txt
index 6a5618e..9e28e07 100644
--- a/test/595-profile-saving/expected.txt
+++ b/test/595-profile-saving/expected.txt
@@ -1 +1,2 @@
 JNI_OnLoad called
+IsForBootImage: true
diff --git a/test/595-profile-saving/profile-saving.cc b/test/595-profile-saving/profile-saving.cc
index b22d61e..d6ca447 100644
--- a/test/595-profile-saving/profile-saving.cc
+++ b/test/595-profile-saving/profile-saving.cc
@@ -63,5 +63,16 @@
                                                      art_method->GetDexMethodIndex()));
 }
 
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isForBootImage(JNIEnv* env,
+                                                               jclass,
+                                                               jstring filename) {
+  ScopedUtfChars filename_chars(env, filename);
+  CHECK(filename_chars.c_str() != nullptr);
+
+  ProfileCompilationInfo info;
+  info.Load(std::string(filename_chars.c_str()), /*clear_if_invalid=*/ false);
+  return info.IsForBootImage();
+}
+
 }  // namespace
 }  // namespace art
diff --git a/test/595-profile-saving/src/Main.java b/test/595-profile-saving/src/Main.java
index 18c0598..e0952e1 100644
--- a/test/595-profile-saving/src/Main.java
+++ b/test/595-profile-saving/src/Main.java
@@ -13,7 +13,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 import java.io.File;
 import java.io.IOException;
 import java.lang.reflect.Method;
@@ -42,6 +41,8 @@
         System.out.println("Class loader does not match boot class");
       }
       testAddMethodToProfile(file, bootMethod);
+
+      System.out.println("IsForBootImage: " + isForBootImage(file.getPath()));
     } finally {
       if (file != null) {
         file.delete();
@@ -66,6 +67,8 @@
   public static native void ensureProfileProcessing();
   // Checks if the profiles saver knows about the method.
   public static native boolean presentInProfile(String profile, Method method);
+  // Returns true if the profile is for the boot image.
+  public static native boolean isForBootImage(String profile);
 
   private static final String TEMP_FILE_NAME_PREFIX = "dummy";
   private static final String TEMP_FILE_NAME_SUFFIX = "-file";
diff --git a/test/597-deopt-busy-loop/expected.txt b/test/597-deopt-busy-loop/expected.txt
index f993efc..c8ab17d 100644
--- a/test/597-deopt-busy-loop/expected.txt
+++ b/test/597-deopt-busy-loop/expected.txt
@@ -1,2 +1,4 @@
 JNI_OnLoad called
-Finishing
+Simple loop finishing
+Float loop finishing
+Simd loop finishing
diff --git a/test/597-deopt-busy-loop/src/FloatLoop.java b/test/597-deopt-busy-loop/src/FloatLoop.java
new file mode 100644
index 0000000..57667a6
--- /dev/null
+++ b/test/597-deopt-busy-loop/src/FloatLoop.java
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This test checks that FP registers spill offset is correctly recorded in the SlowPath; by causing
+// asynchronous deoptimization in debuggable mode we observe the FP values in the interpreter.
+public class FloatLoop implements Runnable {
+    static final int numberOfThreads = 2;
+    volatile static boolean sExitFlag = false;
+    volatile static boolean sEntered = false;
+    int threadIndex;
+
+    FloatLoop(int index) {
+        threadIndex = index;
+    }
+
+    public static void main() throws Exception {
+        final Thread[] threads = new Thread[numberOfThreads];
+        for (int t = 0; t < threads.length; t++) {
+            threads[t] = new Thread(new FloatLoop(t));
+            threads[t].start();
+        }
+        for (Thread t : threads) {
+            t.join();
+        }
+
+        System.out.println("Float loop finishing");
+    }
+
+    static final float kFloatConst0 = 256.0f;
+    static final float kFloatConst1 = 128.0f;
+    static final int kArraySize = 128;
+    volatile static float floatField;
+
+    public void expectEqualToEither(float value, float expected0, float expected1) {
+        if (value != expected0 && value != expected1) {
+            throw new Error("Expected:  " + expected0 + " or "+ expected1 +
+                            ", found: " + value);
+        }
+    }
+
+    public void $noinline$busyLoop() {
+        Main.assertIsManaged();
+
+        // On Arm64:
+        // This loop is likely to be vectorized which causes the full 16-byte Q-register to be saved
+        // across slow paths.
+        int[] array = new int[kArraySize];
+        for (int i = 0; i < kArraySize; i++) {
+            array[i]++;
+        }
+
+        sEntered = true;
+        float s0 = kFloatConst0;
+        float s1 = kFloatConst1;
+        for (int i = 0; !sExitFlag; i++) {
+            if (i % 2 == 0) {
+                s0 += 2.0;
+                s1 += 2.0;
+            } else {
+                s0 -= 2.0;
+                s1 -= 2.0;
+            }
+            // SuspendCheckSlowPath must record correct stack offset for spilled FP registers.
+        }
+        Main.assertIsInterpreted();
+
+        expectEqualToEither(s0, kFloatConst0, kFloatConst0 + 2.0f);
+        expectEqualToEither(s1, kFloatConst1, kFloatConst1 + 2.0f);
+
+        floatField = s0 + s1;
+    }
+
+    public void run() {
+        if (threadIndex == 0) {
+            while (!sEntered) {
+              Thread.yield();
+            }
+            Main.deoptimizeAll();
+            sExitFlag = true;
+        } else {
+            $noinline$busyLoop();
+        }
+    }
+}
diff --git a/test/597-deopt-busy-loop/src/Main.java b/test/597-deopt-busy-loop/src/Main.java
index 46b6bbf..fc2821e 100644
--- a/test/597-deopt-busy-loop/src/Main.java
+++ b/test/597-deopt-busy-loop/src/Main.java
@@ -14,56 +14,26 @@
  * limitations under the License.
  */
 
-public class Main implements Runnable {
-    static final int numberOfThreads = 2;
-    volatile static boolean sExitFlag = false;
-    volatile static boolean sEntered = false;
-    int threadIndex;
+public class Main {
 
-    private static native void deoptimizeAll();
-    private static native void assertIsInterpreted();
-    private static native void assertIsManaged();
+    public static native void deoptimizeAll();
+    public static native void undeoptimizeAll();
+    public static native void assertIsInterpreted();
+    public static native void assertIsManaged();
     private static native void ensureJitCompiled(Class<?> cls, String methodName);
 
-    Main(int index) {
-        threadIndex = index;
-    }
-
     public static void main(String[] args) throws Exception {
         System.loadLibrary(args[0]);
 
-        final Thread[] threads = new Thread[numberOfThreads];
-        for (int t = 0; t < threads.length; t++) {
-            threads[t] = new Thread(new Main(t));
-            threads[t].start();
-        }
-        for (Thread t : threads) {
-            t.join();
-        }
-        System.out.println("Finishing");
-    }
+        ensureJitCompiled(SimpleLoop.class, "$noinline$busyLoop");
+        SimpleLoop.main();
 
-    public void $noinline$busyLoop() {
-        assertIsManaged();
-        sEntered = true;
-        for (;;) {
-            if (sExitFlag) {
-                break;
-            }
-        }
-        assertIsInterpreted();
-    }
+        undeoptimizeAll();
+        ensureJitCompiled(FloatLoop.class, "$noinline$busyLoop");
+        FloatLoop.main();
 
-    public void run() {
-        if (threadIndex == 0) {
-            while (!sEntered) {
-              Thread.yield();
-            }
-            deoptimizeAll();
-            sExitFlag = true;
-        } else {
-            ensureJitCompiled(Main.class, "$noinline$busyLoop");
-            $noinline$busyLoop();
-        }
+        undeoptimizeAll();
+        ensureJitCompiled(SimdLoop.class, "$noinline$busyLoop");
+        SimdLoop.main();
     }
 }
diff --git a/test/597-deopt-busy-loop/src/SimdLoop.java b/test/597-deopt-busy-loop/src/SimdLoop.java
new file mode 100644
index 0000000..4ad8858
--- /dev/null
+++ b/test/597-deopt-busy-loop/src/SimdLoop.java
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This test checks that values (e.g. induction variable) are properly set for SuspendCheck
+// environment in the SIMD loop; by causing asynchronous deoptimization in debuggable mode we
+// we observe the values.
+public class SimdLoop implements Runnable {
+    static final int numberOfThreads = 2;
+    volatile static boolean sExitFlag = false;
+    volatile static boolean sEntered = false;
+    int threadIndex;
+
+    SimdLoop(int index) {
+        threadIndex = index;
+    }
+
+    public static void main() throws Exception {
+        final Thread[] threads = new Thread[numberOfThreads];
+        for (int t = 0; t < threads.length; t++) {
+            threads[t] = new Thread(new SimdLoop(t));
+            threads[t].start();
+        }
+        for (Thread t : threads) {
+            t.join();
+        }
+
+        System.out.println("Simd loop finishing");
+    }
+
+    static final int kArraySize = 3000000;
+
+    public void expectEqual(int value, int expected) {
+        if (value != expected) {
+            throw new Error("Expected:  " + expected + ", found: " + value);
+        }
+    }
+
+    public void $noinline$busyLoop() {
+        Main.assertIsManaged();
+
+        int[] array = new int[kArraySize];
+        sEntered = true;
+
+        // On Arm64:
+        // These loops are likely to be vectorized; when deoptimizing to interpreter the induction
+        // variable i will be set to wrong value (== 0).
+        //
+        // Copy-paste instead of nested loop is here to avoid extra loop suspend check.
+        for (int i = 0; i < kArraySize; i++) {
+            array[i]++;
+        }
+        for (int i = 0; i < kArraySize; i++) {
+            array[i]++;
+        }
+        for (int i = 0; i < kArraySize; i++) {
+            array[i]++;
+        }
+        for (int i = 0; i < kArraySize; i++) {
+            array[i]++;
+        }
+
+        // We might have managed to execute the whole loop before deoptimizeAll() happend.
+        if (sExitFlag) {
+            Main.assertIsInterpreted();
+        }
+        // Regression: the value of the induction variable might have been set to 0 when
+        // deoptimizing causing to have another array[0]++.
+        expectEqual(array[0], 4);
+    }
+
+    public void run() {
+        if (threadIndex == 0) {
+            while (!sEntered) {
+              Thread.yield();
+            }
+
+            Main.deoptimizeAll();
+            sExitFlag = true;
+        } else {
+            $noinline$busyLoop();
+        }
+    }
+}
diff --git a/test/597-deopt-busy-loop/src/SimpleLoop.java b/test/597-deopt-busy-loop/src/SimpleLoop.java
new file mode 100644
index 0000000..12298a9
--- /dev/null
+++ b/test/597-deopt-busy-loop/src/SimpleLoop.java
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class SimpleLoop implements Runnable {
+    static final int numberOfThreads = 2;
+    volatile static boolean sExitFlag = false;
+    volatile static boolean sEntered = false;
+    int threadIndex;
+
+    SimpleLoop(int index) {
+        threadIndex = index;
+    }
+
+    public static void main() throws Exception {
+        final Thread[] threads = new Thread[numberOfThreads];
+        for (int t = 0; t < threads.length; t++) {
+            threads[t] = new Thread(new SimpleLoop(t));
+            threads[t].start();
+        }
+        for (Thread t : threads) {
+            t.join();
+        }
+
+        System.out.println("Simple loop finishing");
+    }
+
+    public void $noinline$busyLoop() {
+        Main.assertIsManaged();
+        sEntered = true;
+        for (;;) {
+            if (sExitFlag) {
+                break;
+            }
+        }
+        Main.assertIsInterpreted();
+    }
+
+    public void run() {
+        if (threadIndex == 0) {
+            while (!sEntered) {
+              Thread.yield();
+            }
+            Main.deoptimizeAll();
+            sExitFlag = true;
+        } else {
+            $noinline$busyLoop();
+        }
+    }
+}
diff --git a/test/603-checker-instanceof/src/Main.java b/test/603-checker-instanceof/src/Main.java
index 2c97bed..1487969 100644
--- a/test/603-checker-instanceof/src/Main.java
+++ b/test/603-checker-instanceof/src/Main.java
@@ -59,7 +59,7 @@
   /// CHECK:          InstanceOf check_kind:exact_check
   /// CHECK-NOT:      {{.*gs:.*}}
 
-  /// CHECK-START-{ARM,ARM64,MIPS,MIPS64}: boolean Main.$noinline$instanceOfString(java.lang.Object) disassembly (after)
+  /// CHECK-START-{ARM,ARM64}: boolean Main.$noinline$instanceOfString(java.lang.Object) disassembly (after)
   /// CHECK:          InstanceOf check_kind:exact_check
   // For ARM and ARM64, the marking register (r8 and x20, respectively) can be used in
   // non-CC configs for any other purpose, so we'd need a config-specific checker test.
diff --git a/test/623-checker-loop-regressions/src/Main.java b/test/623-checker-loop-regressions/src/Main.java
index 4097e33..6fa38f8 100644
--- a/test/623-checker-loop-regressions/src/Main.java
+++ b/test/623-checker-loop-regressions/src/Main.java
@@ -294,9 +294,6 @@
   /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
   //
   // NOTE: should correctly deal with compressed and uncompressed cases.
-  //
-  /// CHECK-START-MIPS64: void Main.string2Bytes(char[], java.lang.String) loop_optimization (after)
-  /// CHECK-NOT: VecLoad
   private static void string2Bytes(char[] a, String b) {
     int min = Math.min(a.length, b.length());
     for (int i = 0; i < min; i++) {
@@ -357,12 +354,6 @@
   /// CHECK-DAG:               VecStore [{{l\d+}},<<Phi:i\d+>>,<<Repl>>] loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG:               VecStore [{{l\d+}},<<Phi>>,<<Repl>>]      loop:<<Loop>>      outer_loop:none
   //
-  /// CHECK-START-MIPS64: void Main.oneBoth(short[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<One:i\d+>>  IntConstant 1                             loop:none
-  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>]              loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},<<Phi:i\d+>>,<<Repl>>] loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},<<Phi>>,<<Repl>>]      loop:<<Loop>>      outer_loop:none
-  //
   // Bug b/37764324: integral same-length packed types can be mixed freely.
   private static void oneBoth(short[] a, char[] b) {
     for (int i = 0; i < Math.min(a.length, b.length); i++) {
@@ -411,17 +402,6 @@
   /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<Add>>]              loop:<<Loop2>>      outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi2>>,<<Cnv>>]  loop:<<Loop2>>      outer_loop:none
   //
-  /// CHECK-START-MIPS64: void Main.typeConv(byte[], byte[]) loop_optimization (after)
-  /// CHECK-DAG: <<One:i\d+>>  IntConstant 1                         loop:none
-  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<One>>]          loop:none
-  /// CHECK-DAG: <<Load:d\d+>> VecLoad [{{l\d+}},<<Phi1:i\d+>>]      loop:<<Loop1:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Vadd:d\d+>> VecAdd [<<Load>>,<<Repl>>]            loop:<<Loop1>>      outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},<<Phi1>>,<<Vadd>>] loop:<<Loop1>>      outer_loop:none
-  /// CHECK-DAG: <<Get:b\d+>>  ArrayGet [{{l\d+}},<<Phi2:i\d+>>]     loop:<<Loop2:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get>>,<<One>>]                 loop:<<Loop2>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<Add>>]              loop:<<Loop2>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi2>>,<<Cnv>>]  loop:<<Loop2>>      outer_loop:none
-  //
   // Scalar code in cleanup loop uses correct byte type on array get and type conversion.
   private static void typeConv(byte[] a, byte[] b) {
     int len = Math.min(a.length, b.length);
diff --git a/test/624-checker-stringops/smali/Smali.smali b/test/624-checker-stringops/smali/Smali.smali
index f8b9275..3252cde 100644
--- a/test/624-checker-stringops/smali/Smali.smali
+++ b/test/624-checker-stringops/smali/Smali.smali
@@ -47,17 +47,17 @@
 ## CHECK-START: int Smali.builderLen2() instruction_simplifier (before)
 ## CHECK-DAG: <<New:l\d+>>     NewInstance
 ## CHECK-DAG: <<String1:l\d+>> LoadString
-## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>]     intrinsic:StringBuilderAppend
+## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>]     intrinsic:StringBuilderAppendString
 ## CHECK-DAG: <<String2:l\d+>> LoadString
-## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Append1>>,<<String2>>] intrinsic:StringBuilderAppend
+## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Append1>>,<<String2>>] intrinsic:StringBuilderAppendString
 ## CHECK-DAG:                  InvokeVirtual [<<Append2>>]             intrinsic:StringBuilderLength
 
 ## CHECK-START: int Smali.builderLen2() instruction_simplifier (after)
 ## CHECK-DAG: <<New:l\d+>>     NewInstance
 ## CHECK-DAG: <<String1:l\d+>> LoadString
-## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>] intrinsic:StringBuilderAppend
+## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>] intrinsic:StringBuilderAppendString
 ## CHECK-DAG: <<String2:l\d+>> LoadString
-## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<New>>,<<String2>>] intrinsic:StringBuilderAppend
+## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<New>>,<<String2>>] intrinsic:StringBuilderAppendString
 ## CHECK-DAG:                  InvokeVirtual [<<New>>]             intrinsic:StringBuilderLength
 .method public static builderLen2()I
     .registers 3
@@ -84,13 +84,13 @@
 ## CHECK-DAG: <<New:l\d+>>     NewInstance                                                           loop:none
 ## CHECK-DAG: <<String1:l\d+>> LoadString                                                            loop:<<Loop:B\d+>>
 ## CHECK-DAG: <<Null1:l\d+>>   NullCheck     [<<New>>]                                               loop:<<Loop>>
-## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<Null1>>,<<String1>>] intrinsic:StringBufferAppend    loop:<<Loop>>
+## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<Null1>>,<<String1>>]   intrinsic:StringBufferAppend  loop:<<Loop>>
 ## CHECK-DAG: <<String2:l\d+>> LoadString                                                            loop:<<Loop>>
 ## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Append1>>,<<String2>>] intrinsic:StringBufferAppend  loop:<<Loop>>
 ## CHECK-DAG: <<String3:l\d+>> LoadString                                                            loop:<<Loop>>
 ## CHECK-DAG: <<Append3:l\d+>> InvokeVirtual [<<Append2>>,<<String3>>] intrinsic:StringBufferAppend  loop:<<Loop>>
 ## CHECK-DAG: <<Null4:l\d+>>   NullCheck     [<<New>>]                                               loop:none
-## CHECK-DAG:                  InvokeVirtual [<<Null4>>]             intrinsic:StringBufferLength    loop:none
+## CHECK-DAG:                  InvokeVirtual [<<Null4>>]               intrinsic:StringBufferLength  loop:none
 
 ## CHECK-START: int Smali.bufferLoopAppender() instruction_simplifier (after)
 ## CHECK-DAG: <<New:l\d+>>     NewInstance                                                       loop:none
@@ -138,26 +138,26 @@
 .end method
 
 ## CHECK-START: int Smali.builderLoopAppender() instruction_simplifier (before)
-## CHECK-DAG: <<New:l\d+>>     NewInstance                                                           loop:none
-## CHECK-DAG: <<String1:l\d+>> LoadString                                                            loop:<<Loop:B\d+>>
-## CHECK-DAG: <<Null1:l\d+>>   NullCheck     [<<New>>]                                               loop:<<Loop>>
-## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<Null1>>,<<String1>>]   intrinsic:StringBuilderAppend loop:<<Loop>>
-## CHECK-DAG: <<String2:l\d+>> LoadString                                                            loop:<<Loop>>
-## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Append1>>,<<String2>>] intrinsic:StringBuilderAppend loop:<<Loop>>
-## CHECK-DAG: <<String3:l\d+>> LoadString                                                            loop:<<Loop>>
-## CHECK-DAG: <<Append3:l\d+>> InvokeVirtual [<<Append2>>,<<String3>>] intrinsic:StringBuilderAppend loop:<<Loop>>
-## CHECK-DAG: <<Null4:l\d+>>   NullCheck     [<<New>>]                                               loop:none
-## CHECK-DAG:                  InvokeVirtual [<<Null4>>]               intrinsic:StringBuilderLength loop:none
+## CHECK-DAG: <<New:l\d+>>     NewInstance                                                                 loop:none
+## CHECK-DAG: <<String1:l\d+>> LoadString                                                                  loop:<<Loop:B\d+>>
+## CHECK-DAG: <<Null1:l\d+>>   NullCheck     [<<New>>]                                                     loop:<<Loop>>
+## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<Null1>>,<<String1>>]   intrinsic:StringBuilderAppendString loop:<<Loop>>
+## CHECK-DAG: <<String2:l\d+>> LoadString                                                                  loop:<<Loop>>
+## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<Append1>>,<<String2>>] intrinsic:StringBuilderAppendString loop:<<Loop>>
+## CHECK-DAG: <<String3:l\d+>> LoadString                                                                  loop:<<Loop>>
+## CHECK-DAG: <<Append3:l\d+>> InvokeVirtual [<<Append2>>,<<String3>>] intrinsic:StringBuilderAppendString loop:<<Loop>>
+## CHECK-DAG: <<Null4:l\d+>>   NullCheck     [<<New>>]                                                     loop:none
+## CHECK-DAG:                  InvokeVirtual [<<Null4>>]               intrinsic:StringBuilderLength       loop:none
 
 ## CHECK-START: int Smali.builderLoopAppender() instruction_simplifier (after)
-## CHECK-DAG: <<New:l\d+>>     NewInstance                                                       loop:none
-## CHECK-DAG: <<String1:l\d+>> LoadString                                                        loop:<<Loop:B\d+>>
-## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>] intrinsic:StringBuilderAppend loop:<<Loop>>
-## CHECK-DAG: <<String2:l\d+>> LoadString                                                        loop:<<Loop>>
-## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<New>>,<<String2>>] intrinsic:StringBuilderAppend loop:<<Loop>>
-## CHECK-DAG: <<String3:l\d+>> LoadString                                                        loop:<<Loop>>
-## CHECK-DAG: <<Append3:l\d+>> InvokeVirtual [<<New>>,<<String3>>] intrinsic:StringBuilderAppend loop:<<Loop>>
-## CHECK-DAG:                  InvokeVirtual [<<New>>]             intrinsic:StringBuilderLength loop:none
+## CHECK-DAG: <<New:l\d+>>     NewInstance                                                             loop:none
+## CHECK-DAG: <<String1:l\d+>> LoadString                                                              loop:<<Loop:B\d+>>
+## CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>] intrinsic:StringBuilderAppendString loop:<<Loop>>
+## CHECK-DAG: <<String2:l\d+>> LoadString                                                              loop:<<Loop>>
+## CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<New>>,<<String2>>] intrinsic:StringBuilderAppendString loop:<<Loop>>
+## CHECK-DAG: <<String3:l\d+>> LoadString                                                              loop:<<Loop>>
+## CHECK-DAG: <<Append3:l\d+>> InvokeVirtual [<<New>>,<<String3>>] intrinsic:StringBuilderAppendString loop:<<Loop>>
+## CHECK-DAG:                  InvokeVirtual [<<New>>]             intrinsic:StringBuilderLength       loop:none
 .method public static builderLoopAppender()I
     .registers 4
 
diff --git a/test/624-checker-stringops/src/Main.java b/test/624-checker-stringops/src/Main.java
index f52d81a..055a4d7 100644
--- a/test/624-checker-stringops/src/Main.java
+++ b/test/624-checker-stringops/src/Main.java
@@ -136,17 +136,17 @@
   /// CHECK-START: int Main.builderLen2() instruction_simplifier (before)
   /// CHECK-DAG: <<New:l\d+>>     NewInstance
   /// CHECK-DAG: <<String1:l\d+>> LoadString
-  /// CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>]  intrinsic:StringBuilderAppend
+  /// CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>]  intrinsic:StringBuilderAppendString
   /// CHECK-DAG: <<String2:l\d+>> LoadString
-  /// CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [{{l\d+}},<<String2>>] intrinsic:StringBuilderAppend
+  /// CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [{{l\d+}},<<String2>>] intrinsic:StringBuilderAppendString
   /// CHECK-DAG:                  InvokeVirtual [{{l\d+}}]             intrinsic:StringBuilderLength
   //
   /// CHECK-START: int Main.builderLen2() instruction_simplifier (after)
   /// CHECK-DAG: <<New:l\d+>>     NewInstance
   /// CHECK-DAG: <<String1:l\d+>> LoadString
-  /// CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>] intrinsic:StringBuilderAppend
+  /// CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>] intrinsic:StringBuilderAppendString
   /// CHECK-DAG: <<String2:l\d+>> LoadString
-  /// CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<New>>,<<String2>>] intrinsic:StringBuilderAppend
+  /// CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<New>>,<<String2>>] intrinsic:StringBuilderAppendString
   /// CHECK-DAG:                  InvokeVirtual [<<New>>]             intrinsic:StringBuilderLength
   static int builderLen2() {
     StringBuilder s = new StringBuilder();
@@ -200,25 +200,25 @@
   // Similar situation in a loop.
   //
   /// CHECK-START: int Main.builderLoopAppender() instruction_simplifier (before)
-  /// CHECK-DAG: <<New:l\d+>>     NewInstance                                                         loop:none
-  /// CHECK-DAG: <<String1:l\d+>> LoadString                                                          loop:<<Loop:B\d+>>
-  /// CHECK-DAG: <<Null1:l\d+>>   NullCheck     [<<New>>]                                             loop:<<Loop>>
-  /// CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<Null1>>,<<String1>>] intrinsic:StringBuilderAppend loop:<<Loop>>
-  /// CHECK-DAG: <<String2:l\d+>> LoadString                                                          loop:<<Loop>>
-  /// CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [{{l\d+}},<<String2>>]  intrinsic:StringBuilderAppend loop:<<Loop>>
-  /// CHECK-DAG: <<String3:l\d+>> LoadString                                                          loop:<<Loop>>
-  /// CHECK-DAG: <<Append3:l\d+>> InvokeVirtual [{{l\d+}},<<String3>>]  intrinsic:StringBuilderAppend loop:<<Loop>>
-  /// CHECK-DAG:                  InvokeVirtual [{{l\d+}}]              intrinsic:StringBuilderLength loop:none
+  /// CHECK-DAG: <<New:l\d+>>     NewInstance                                                               loop:none
+  /// CHECK-DAG: <<String1:l\d+>> LoadString                                                                loop:<<Loop:B\d+>>
+  /// CHECK-DAG: <<Null1:l\d+>>   NullCheck     [<<New>>]                                                   loop:<<Loop>>
+  /// CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<Null1>>,<<String1>>] intrinsic:StringBuilderAppendString loop:<<Loop>>
+  /// CHECK-DAG: <<String2:l\d+>> LoadString                                                                loop:<<Loop>>
+  /// CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [{{l\d+}},<<String2>>]  intrinsic:StringBuilderAppendString loop:<<Loop>>
+  /// CHECK-DAG: <<String3:l\d+>> LoadString                                                                loop:<<Loop>>
+  /// CHECK-DAG: <<Append3:l\d+>> InvokeVirtual [{{l\d+}},<<String3>>]  intrinsic:StringBuilderAppendString loop:<<Loop>>
+  /// CHECK-DAG:                  InvokeVirtual [{{l\d+}}]              intrinsic:StringBuilderLength       loop:none
   //
   /// CHECK-START: int Main.builderLoopAppender() instruction_simplifier (after)
-  /// CHECK-DAG: <<New:l\d+>>     NewInstance                                                       loop:none
-  /// CHECK-DAG: <<String1:l\d+>> LoadString                                                        loop:<<Loop:B\d+>>
-  /// CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>] intrinsic:StringBuilderAppend loop:<<Loop>>
-  /// CHECK-DAG: <<String2:l\d+>> LoadString                                                        loop:<<Loop>>
-  /// CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<New>>,<<String2>>] intrinsic:StringBuilderAppend loop:<<Loop>>
-  /// CHECK-DAG: <<String3:l\d+>> LoadString                                                        loop:<<Loop>>
-  /// CHECK-DAG: <<Append3:l\d+>> InvokeVirtual [<<New>>,<<String3>>] intrinsic:StringBuilderAppend loop:<<Loop>>
-  /// CHECK-DAG:                  InvokeVirtual [<<New>>]             intrinsic:StringBuilderLength loop:none
+  /// CHECK-DAG: <<New:l\d+>>     NewInstance                                                             loop:none
+  /// CHECK-DAG: <<String1:l\d+>> LoadString                                                              loop:<<Loop:B\d+>>
+  /// CHECK-DAG: <<Append1:l\d+>> InvokeVirtual [<<New>>,<<String1>>] intrinsic:StringBuilderAppendString loop:<<Loop>>
+  /// CHECK-DAG: <<String2:l\d+>> LoadString                                                              loop:<<Loop>>
+  /// CHECK-DAG: <<Append2:l\d+>> InvokeVirtual [<<New>>,<<String2>>] intrinsic:StringBuilderAppendString loop:<<Loop>>
+  /// CHECK-DAG: <<String3:l\d+>> LoadString                                                              loop:<<Loop>>
+  /// CHECK-DAG: <<Append3:l\d+>> InvokeVirtual [<<New>>,<<String3>>] intrinsic:StringBuilderAppendString loop:<<Loop>>
+  /// CHECK-DAG:                  InvokeVirtual [<<New>>]             intrinsic:StringBuilderLength       loop:none
   static int builderLoopAppender() {
     StringBuilder b = new StringBuilder();
     for (int i = 0; i < 10; i++) {
diff --git a/test/626-checker-arm64-scratch-register/smali/Main2.smali b/test/626-checker-arm64-scratch-register/smali/Main2.smali
index 914ae6e..f37aca6 100644
--- a/test/626-checker-arm64-scratch-register/smali/Main2.smali
+++ b/test/626-checker-arm64-scratch-register/smali/Main2.smali
@@ -176,7 +176,7 @@
 #       // For the purpose of this regression test, the order of
 #       // definition of these float variable matters.  Likewise with the
 #       // order of the instructions where these variables are used below.
-#       // Reordering these lines make make the original (b/32545705)
+#       // Reordering these lines makes the original (b/32545705)
 #       // issue vanish.
 #       float f17 = b17 ? 0.0f : 1.0f;
 #       float f16 = b16 ? 0.0f : 1.0f;
diff --git a/test/638-checker-inline-cache-intrinsic/run b/test/638-checker-inline-cache-intrinsic/run
index 1540310..814181d 100644
--- a/test/638-checker-inline-cache-intrinsic/run
+++ b/test/638-checker-inline-cache-intrinsic/run
@@ -14,6 +14,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# Set threshold to 100 to math the iterations done in the test.
+# Set threshold to 1000 to match the iterations done in the test.
 # Pass --verbose-methods to only generate the CFG of these methods.
-exec ${RUN} --jit --runtime-option -Xjitthreshold:100 -Xcompiler-option --verbose-methods=inlineMonomorphic,knownReceiverType,stringEquals $@
+# The test is for JIT, but we run in "optimizing" (AOT) mode, so that the Checker
+# stanzas in test/638-checker-inline-cache-intrinsic/src/Main.java will be checked.
+# Also pass a large JIT code cache size to avoid getting the inline caches GCed.
+exec ${RUN} --jit --runtime-option -Xjitinitialsize:32M --runtime-option -Xjitthreshold:1000 -Xcompiler-option --verbose-methods=inlineMonomorphic,knownReceiverType,stringEquals $@
diff --git a/test/638-checker-inline-cache-intrinsic/src/Main.java b/test/638-checker-inline-cache-intrinsic/src/Main.java
index 4a9aba5..5334487 100644
--- a/test/638-checker-inline-cache-intrinsic/src/Main.java
+++ b/test/638-checker-inline-cache-intrinsic/src/Main.java
@@ -64,10 +64,10 @@
 
   public static void test() {
     // Warm up inline cache.
-    for (int i = 0; i < 45; i++) {
+    for (int i = 0; i < 600000; i++) {
       $noinline$inlineMonomorphic(str);
     }
-    for (int i = 0; i < 60; i++) {
+    for (int i = 0; i < 600000; i++) {
       $noinline$stringEquals(str);
     }
     ensureJitCompiled(Main.class, "$noinline$stringEquals");
diff --git a/test/638-no-line-number/expected.txt b/test/638-no-line-number/expected.txt
index 4b351f4..3b6ff96 100644
--- a/test/638-no-line-number/expected.txt
+++ b/test/638-no-line-number/expected.txt
@@ -1,5 +1,5 @@
 java.lang.Error
 	at Main.main(Unknown Source:2)
-java.lang.NullPointerException: throw with null exception
+java.lang.NullPointerException
 	at Main.doThrow(Unknown Source:0)
 	at Main.main(Unknown Source:16)
diff --git a/test/640-checker-boolean-simd/src/Main.java b/test/640-checker-boolean-simd/src/Main.java
index 7d98e68..5035ab2 100644
--- a/test/640-checker-boolean-simd/src/Main.java
+++ b/test/640-checker-boolean-simd/src/Main.java
@@ -29,7 +29,7 @@
   /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
   //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.and(boolean) loop_optimization (after)
+  /// CHECK-START-{ARM,ARM64}: void Main.and(boolean) loop_optimization (after)
   /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: VecAnd   loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
@@ -42,7 +42,7 @@
   /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
   //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.or(boolean) loop_optimization (after)
+  /// CHECK-START-{ARM,ARM64}: void Main.or(boolean) loop_optimization (after)
   /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: VecOr    loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
@@ -55,7 +55,7 @@
   /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
   //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.xor(boolean) loop_optimization (after)
+  /// CHECK-START-{ARM,ARM64}: void Main.xor(boolean) loop_optimization (after)
   /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: VecXor   loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
@@ -68,7 +68,7 @@
   /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
   //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.not() loop_optimization (after)
+  /// CHECK-START-{ARM,ARM64}: void Main.not() loop_optimization (after)
   /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG: VecNot   loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
diff --git a/test/640-checker-byte-simd/info.txt b/test/640-checker-byte-simd/info.txt
deleted file mode 100644
index c9c6d5e..0000000
--- a/test/640-checker-byte-simd/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Functional tests on SIMD vectorization.
diff --git a/test/640-checker-byte-simd/src/Main.java b/test/640-checker-byte-simd/src/Main.java
deleted file mode 100644
index 6b69127..0000000
--- a/test/640-checker-byte-simd/src/Main.java
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Functional tests for SIMD vectorization.
- */
-public class Main {
-
-  static byte[] a;
-
-  //
-  // Arithmetic operations.
-  //
-
-  /// CHECK-START: void Main.add(int) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.add(int) loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecAdd   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void add(int x) {
-    for (int i = 0; i < 128; i++)
-      a[i] += x;
-  }
-
-  /// CHECK-START: void Main.sub(int) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.sub(int) loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecSub   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void sub(int x) {
-    for (int i = 0; i < 128; i++)
-      a[i] -= x;
-  }
-
-  /// CHECK-START: void Main.mul(int) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.mul(int) loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecMul   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void mul(int x) {
-    for (int i = 0; i < 128; i++)
-      a[i] *= x;
-  }
-
-  /// CHECK-START: void Main.div(int) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.div(int) loop_optimization (after)
-  //
-  //  Not supported on any architecture.
-  //
-  static void div(int x) {
-    for (int i = 0; i < 128; i++)
-      a[i] /= x;
-  }
-
-  /// CHECK-START: void Main.neg() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.neg() loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecNeg   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void neg() {
-    for (int i = 0; i < 128; i++)
-      a[i] = (byte) -a[i];
-  }
-
-  /// CHECK-START: void Main.not() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.not() loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecNot   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void not() {
-    for (int i = 0; i < 128; i++)
-      a[i] = (byte) ~a[i];
-  }
-
-  /// CHECK-START: void Main.shl4() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.shl4() loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecShl   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void shl4() {
-    for (int i = 0; i < 128; i++)
-      a[i] <<= 4;
-  }
-
-  /// CHECK-START: void Main.sar2() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.sar2() loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecShr   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void sar2() {
-    for (int i = 0; i < 128; i++)
-      a[i] >>= 2;
-  }
-
-  /// CHECK-START: void Main.shr2() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  // TODO: would need signess flip.
-  /// CHECK-START: void Main.shr2() loop_optimization (after)
-  /// CHECK-NOT: VecUShr
-  static void shr2() {
-    for (int i = 0; i < 128; i++)
-      a[i] >>>= 2;
-  }
-
-  //
-  // Shift sanity.
-  //
-
-  static void sar31() {
-    for (int i = 0; i < 128; i++)
-      a[i] >>= 31;
-  }
-
-  static void shr31() {
-    for (int i = 0; i < 128; i++)
-      a[i] >>>= 31;
-  }
-
-  static void shr32() {
-    for (int i = 0; i < 128; i++)
-      a[i] >>>= 32;  // 0, since & 31
-  }
-
-  static void shr33() {
-    for (int i = 0; i < 128; i++)
-      a[i] >>>= 33;  // 1, since & 31
-  }
-
-  static void shl9() {
-    for (int i = 0; i < 128; i++)
-      a[i] <<= 9;  // yields all-zeros
-  }
-
-  //
-  // Loop bounds.
-  //
-
-  static void bounds() {
-    for (int i = 1; i < 127; i++)
-      a[i] += 11;
-  }
-
-  //
-  // Test Driver.
-  //
-
-  public static void main(String[] args) {
-    // Set up.
-    a = new byte[128];
-    for (int i = 0; i < 128; i++) {
-      a[i] = (byte) i;
-    }
-    // Arithmetic operations.
-    add(2);
-    for (int i = 0; i < 128; i++) {
-      expectEquals((byte)(i + 2), a[i], "add");
-    }
-    sub(2);
-    for (int i = 0; i < 128; i++) {
-      expectEquals(i, a[i], "sub");
-    }
-    mul(2);
-    for (int i = 0; i < 128; i++) {
-      expectEquals((byte)(i + i), a[i], "mul");
-    }
-    div(2);
-    for (int i = 0; i < 128; i++) {
-      expectEquals(((byte)(i + i)) >> 1, a[i], "div");
-      a[i] = (byte) i;  // undo arithmetic wrap-around effects
-    }
-    neg();
-    for (int i = 0; i < 128; i++) {
-      expectEquals(-i, a[i], "neg");
-    }
-    // Loop bounds.
-    bounds();
-    expectEquals(0, a[0], "bounds0");
-    for (int i = 1; i < 127; i++) {
-      expectEquals(11 - i, a[i], "bounds");
-    }
-    expectEquals(-127, a[127], "bounds127");
-    // Shifts.
-    for (int i = 0; i < 128; i++) {
-      a[i] = (byte) 0xff;
-    }
-    shl4();
-    for (int i = 0; i < 128; i++) {
-      expectEquals((byte) 0xf0, a[i], "shl4");
-    }
-    sar2();
-    for (int i = 0; i < 128; i++) {
-      expectEquals((byte) 0xfc, a[i], "sar2");
-    }
-    shr2();
-    for (int i = 0; i < 128; i++) {
-      expectEquals((byte) 0xff, a[i], "shr2");  // sic!
-    }
-    sar31();
-    for (int i = 0; i < 128; i++) {
-      expectEquals((byte) 0xff, a[i], "sar31");
-    }
-    shr31();
-    for (int i = 0; i < 128; i++) {
-      expectEquals(0x01, a[i], "shr31");
-      a[i] = (byte) 0x12;  // reset
-    }
-    shr32();
-    for (int i = 0; i < 128; i++) {
-      expectEquals((byte) 0x12, a[i], "shr32");
-    }
-    shr33();
-    for (int i = 0; i < 128; i++) {
-      expectEquals((byte) 0x09, a[i], "shr33");
-    }
-    shl9();
-    for (int i = 0; i < 128; i++) {
-      expectEquals((byte) 0x00, a[i], "shl9");
-      a[i] = (byte) 0xf0;  // reset
-    }
-    not();
-    for (int i = 0; i < 128; i++) {
-      expectEquals((byte) 0x0f, a[i], "not");
-    }
-    // Done.
-    System.out.println("passed");
-  }
-
-  private static void expectEquals(int expected, int result, String action) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result + " for " + action);
-    }
-  }
-}
diff --git a/test/640-checker-char-simd/expected.txt b/test/640-checker-char-simd/expected.txt
deleted file mode 100644
index b0aad4d..0000000
--- a/test/640-checker-char-simd/expected.txt
+++ /dev/null
@@ -1 +0,0 @@
-passed
diff --git a/test/640-checker-char-simd/info.txt b/test/640-checker-char-simd/info.txt
deleted file mode 100644
index c9c6d5e..0000000
--- a/test/640-checker-char-simd/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Functional tests on SIMD vectorization.
diff --git a/test/640-checker-char-simd/src/Main.java b/test/640-checker-char-simd/src/Main.java
deleted file mode 100644
index 317a666..0000000
--- a/test/640-checker-char-simd/src/Main.java
+++ /dev/null
@@ -1,265 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Functional tests for SIMD vectorization.
- */
-public class Main {
-
-  static char[] a;
-
-  //
-  // Arithmetic operations.
-  //
-
-  /// CHECK-START: void Main.add(int) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.add(int) loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecAdd   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void add(int x) {
-    for (int i = 0; i < 128; i++)
-      a[i] += x;
-  }
-
-  /// CHECK-START: void Main.sub(int) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.sub(int) loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecSub   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void sub(int x) {
-    for (int i = 0; i < 128; i++)
-      a[i] -= x;
-  }
-
-  /// CHECK-START: void Main.mul(int) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.mul(int) loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecMul   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void mul(int x) {
-    for (int i = 0; i < 128; i++)
-      a[i] *= x;
-  }
-
-  /// CHECK-START: void Main.div(int) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.div(int) loop_optimization (after)
-  /// CHECK-NOT: VecDiv
-  //
-  //  Not supported on any architecture.
-  //
-  static void div(int x) {
-    for (int i = 0; i < 128; i++)
-      a[i] /= x;
-  }
-
-  /// CHECK-START: void Main.neg() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.neg() loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecNeg   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void neg() {
-    for (int i = 0; i < 128; i++)
-      a[i] = (char) -a[i];
-  }
-
-  /// CHECK-START: void Main.not() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.not() loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecNot   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void not() {
-    for (int i = 0; i < 128; i++)
-      a[i] = (char) ~a[i];
-  }
-
-  /// CHECK-START: void Main.shl4() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.shl4() loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecShl   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void shl4() {
-    for (int i = 0; i < 128; i++)
-      a[i] <<= 4;
-  }
-
-  /// CHECK-START: void Main.sar2() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  // TODO: would need signess flip.
-  /// CHECK-START: void Main.sar2() loop_optimization (after)
-  /// CHECK-NOT: VecShr
-  static void sar2() {
-    for (int i = 0; i < 128; i++)
-      a[i] >>= 2;
-  }
-
-  /// CHECK-START: void Main.shr2() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.shr2() loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecUShr  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void shr2() {
-    for (int i = 0; i < 128; i++)
-      a[i] >>>= 2;
-  }
-
-  //
-  // Shift sanity.
-  //
-
-  static void sar31() {
-    for (int i = 0; i < 128; i++)
-      a[i] >>= 31;
-  }
-
-  static void shr31() {
-    for (int i = 0; i < 128; i++)
-      a[i] >>>= 31;
-  }
-
-  static void shr32() {
-    for (int i = 0; i < 128; i++)
-      a[i] >>>= 32;  // 0, since & 31
-  }
-
-  static void shr33() {
-    for (int i = 0; i < 128; i++)
-      a[i] >>>= 33;  // 1, since & 31
-  }
-
-  //
-  // Loop bounds.
-  //
-
-  static void bounds() {
-    for (int i = 1; i < 127; i++)
-      a[i] += 11;
-  }
-
-  //
-  // Test Driver.
-  //
-
-  public static void main(String[] args) {
-    // Set up.
-    a = new char[128];
-    for (int i = 0; i < 128; i++) {
-      a[i] = (char) i;
-    }
-    // Arithmetic operations.
-    add(2);
-    for (int i = 0; i < 128; i++) {
-      expectEquals(i + 2, a[i], "add");
-    }
-    sub(2);
-    for (int i = 0; i < 128; i++) {
-      expectEquals(i, a[i], "sub");
-    }
-    mul(2);
-    for (int i = 0; i < 128; i++) {
-      expectEquals(i + i, a[i], "mul");
-    }
-    div(2);
-    for (int i = 0; i < 128; i++) {
-      expectEquals(i, a[i], "div");
-    }
-    neg();
-    for (int i = 0; i < 128; i++) {
-      expectEquals((char)-i, a[i], "neg");
-    }
-    // Loop bounds.
-    bounds();
-    expectEquals(0, a[0], "bounds0");
-    for (int i = 1; i < 127; i++) {
-      expectEquals((char)(11 - i), a[i], "bounds");
-    }
-    expectEquals((char)-127, a[127], "bounds127");
-    // Shifts.
-    for (int i = 0; i < 128; i++) {
-      a[i] = (char) 0xffff;
-    }
-    shl4();
-    for (int i = 0; i < 128; i++) {
-      expectEquals((char) 0xfff0, a[i], "shl4");
-    }
-    sar2();
-    for (int i = 0; i < 128; i++) {
-      expectEquals((char) 0x3ffc, a[i], "sar2");
-    }
-    shr2();
-    for (int i = 0; i < 128; i++) {
-      expectEquals((char) 0x0fff, a[i], "shr2");
-      a[i] = (char) 0xffff;  // reset
-    }
-    sar31();
-    for (int i = 0; i < 128; i++) {
-      expectEquals(0, a[i], "sar31");
-      a[i] = (char) 0xffff;  // reset
-    }
-    shr31();
-    for (int i = 0; i < 128; i++) {
-      expectEquals(0, a[i], "shr31");
-      a[i] = (char) 0x1200;  // reset
-    }
-    shr32();
-    for (int i = 0; i < 128; i++) {
-      expectEquals((char) 0x1200, a[i], "shr32");
-    }
-    shr33();
-    for (int i = 0; i < 128; i++) {
-      expectEquals((char) 0x0900, a[i], "shr33");
-      a[i] = (char) 0xf1f0;  // reset
-    }
-    not();
-    for (int i = 0; i < 128; i++) {
-      expectEquals((char) 0x0e0f, a[i], "not");
-    }
-    // Done.
-    System.out.println("passed");
-  }
-
-  private static void expectEquals(int expected, int result, String action) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result + " for " + action);
-    }
-  }
-}
diff --git a/test/640-checker-double-simd/expected.txt b/test/640-checker-double-simd/expected.txt
deleted file mode 100644
index b0aad4d..0000000
--- a/test/640-checker-double-simd/expected.txt
+++ /dev/null
@@ -1 +0,0 @@
-passed
diff --git a/test/640-checker-double-simd/info.txt b/test/640-checker-double-simd/info.txt
deleted file mode 100644
index c9c6d5e..0000000
--- a/test/640-checker-double-simd/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Functional tests on SIMD vectorization.
diff --git a/test/640-checker-double-simd/src/Main.java b/test/640-checker-double-simd/src/Main.java
deleted file mode 100644
index 0f04f73..0000000
--- a/test/640-checker-double-simd/src/Main.java
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Functional tests for SIMD vectorization. Note that this class provides a mere
- * functional test, not a precise numerical verifier.
- */
-public class Main {
-
-  static double[] a;
-
-  //
-  // Arithmetic operations.
-  //
-
-  /// CHECK-START: void Main.add(double) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.add(double) loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecAdd   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void add(double x) {
-    for (int i = 0; i < 128; i++)
-      a[i] += x;
-  }
-
-  /// CHECK-START: void Main.sub(double) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.sub(double) loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecSub   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void sub(double x) {
-    for (int i = 0; i < 128; i++)
-      a[i] -= x;
-  }
-
-  /// CHECK-START: void Main.mul(double) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.mul(double) loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecMul   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void mul(double x) {
-    for (int i = 0; i < 128; i++)
-      a[i] *= x;
-  }
-
-  /// CHECK-START: void Main.div(double) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.div(double) loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecDiv   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void div(double x) {
-    for (int i = 0; i < 128; i++)
-      a[i] /= x;
-  }
-
-  /// CHECK-START: void Main.neg() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.neg() loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecNeg   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void neg() {
-    for (int i = 0; i < 128; i++)
-      a[i] = -a[i];
-  }
-
-  /// CHECK-START: void Main.abs() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.abs() loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecAbs   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void abs() {
-    for (int i = 0; i < 128; i++)
-      a[i] = Math.abs(a[i]);
-  }
-
-  /// CHECK-START: void Main.conv(long[]) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.conv(long[]) loop_optimization (after)
-  /// CHECK-NOT: VecLoad
-  /// CHECK-NOT: VecStore
-  //
-  // TODO: fill in when long2double is supported
-  static void conv(long[] b) {
-    for (int i = 0; i < 128; i++)
-      a[i] = b[i];
-  }
-
-  //
-  // Loop bounds.
-  //
-
-  static void bounds() {
-    for (int i = 1; i < 127; i++)
-      a[i] += 11;
-  }
-
-  //
-  // Test Driver.
-  //
-
-  public static void main(String[] args) {
-    // Set up.
-    a = new double[128];
-    for (int i = 0; i < 128; i++) {
-      a[i] = i;
-    }
-    // Arithmetic operations.
-    add(2.0);
-    for (int i = 0; i < 128; i++) {
-      expectEquals(i + 2, a[i], "add");
-    }
-    sub(2.0);
-    for (int i = 0; i < 128; i++) {
-      expectEquals(i, a[i], "sub");
-    }
-    mul(2.0);
-    for (int i = 0; i < 128; i++) {
-      expectEquals(i + i, a[i], "mul");
-    }
-    div(2.0);
-    for (int i = 0; i < 128; i++) {
-      expectEquals(i, a[i], "div");
-    }
-    neg();
-    for (int i = 0; i < 128; i++) {
-      expectEquals(-i, a[i], "neg");
-    }
-    // Loop bounds.
-    bounds();
-    expectEquals(0, a[0], "bounds0");
-    for (int i = 1; i < 127; i++) {
-      expectEquals(11 - i, a[i], "bounds");
-    }
-    expectEquals(-127, a[127], "bounds127");
-    // Abs.
-    abs();
-    expectEquals(0, a[0], "abs0");
-    for (int i = 1; i <= 11; i++) {
-      expectEquals(11 - i, a[i], "abs_lo");
-    }
-    for (int i = 12; i < 127; i++) {
-      expectEquals(i - 11, a[i], "abs_hi");
-    }
-    expectEquals(127, a[127], "abs127");
-    // Conversion.
-    long[] b = new long[128];
-    for (int i = 0; i < 128; i++) {
-      b[i] = 1000 * i;
-    }
-    conv(b);
-    for (int i = 1; i < 127; i++) {
-      expectEquals(1000.0 * i, a[i], "conv");
-    }
-    // Done.
-    System.out.println("passed");
-  }
-
-  private static void expectEquals(double expected, double result, String action) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result + " for " + action);
-    }
-  }
-}
diff --git a/test/640-checker-float-simd/expected.txt b/test/640-checker-float-simd/expected.txt
deleted file mode 100644
index b0aad4d..0000000
--- a/test/640-checker-float-simd/expected.txt
+++ /dev/null
@@ -1 +0,0 @@
-passed
diff --git a/test/640-checker-float-simd/info.txt b/test/640-checker-float-simd/info.txt
deleted file mode 100644
index c9c6d5e..0000000
--- a/test/640-checker-float-simd/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Functional tests on SIMD vectorization.
diff --git a/test/640-checker-float-simd/src/Main.java b/test/640-checker-float-simd/src/Main.java
deleted file mode 100644
index d4eef9f..0000000
--- a/test/640-checker-float-simd/src/Main.java
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Functional tests for SIMD vectorization. Note that this class provides a mere
- * functional test, not a precise numerical verifier.
- */
-public class Main {
-
-  static float[] a;
-
-  //
-  // Arithmetic operations.
-  //
-
-  /// CHECK-START: void Main.add(float) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.add(float) loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecAdd   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void add(float x) {
-    for (int i = 0; i < 128; i++)
-      a[i] += x;
-  }
-
-  /// CHECK-START: void Main.sub(float) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.sub(float) loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecSub   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void sub(float x) {
-    for (int i = 0; i < 128; i++)
-      a[i] -= x;
-  }
-
-  /// CHECK-START: void Main.mul(float) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.mul(float) loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecMul   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void mul(float x) {
-    for (int i = 0; i < 128; i++)
-      a[i] *= x;
-  }
-
-  /// CHECK-START: void Main.div(float) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.div(float) loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecDiv   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void div(float x) {
-    for (int i = 0; i < 128; i++)
-      a[i] /= x;
-  }
-
-  /// CHECK-START: void Main.neg() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.neg() loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecNeg   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void neg() {
-    for (int i = 0; i < 128; i++)
-      a[i] = -a[i];
-  }
-
-  /// CHECK-START: void Main.abs() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.abs() loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecAbs   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void abs() {
-    for (int i = 0; i < 128; i++)
-      a[i] = Math.abs(a[i]);
-  }
-
-  /// CHECK-START: void Main.conv(int[]) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.conv(int[]) loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecCnv   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void conv(int[] b) {
-    for (int i = 0; i < 128; i++)
-      a[i] = b[i];
-  }
-
-  //
-  // Loop bounds.
-  //
-
-  static void bounds() {
-    for (int i = 1; i < 127; i++)
-      a[i] += 11;
-  }
-
-  //
-  // Test Driver.
-  //
-
-  public static void main(String[] args) {
-    // Set up.
-    a = new float[128];
-    for (int i = 0; i < 128; i++) {
-      a[i] = i;
-    }
-    // Arithmetic operations.
-    add(2.0f);
-    for (int i = 0; i < 128; i++) {
-      expectEquals(i + 2, a[i], "add");
-    }
-    sub(2.0f);
-    for (int i = 0; i < 128; i++) {
-      expectEquals(i, a[i], "sub");
-    }
-    mul(2.0f);
-    for (int i = 0; i < 128; i++) {
-      expectEquals(i + i, a[i], "mul");
-    }
-    div(2.0f);
-    for (int i = 0; i < 128; i++) {
-      expectEquals(i, a[i], "div");
-    }
-    neg();
-    for (int i = 0; i < 128; i++) {
-      expectEquals(-i, a[i], "neg");
-    }
-    // Loop bounds.
-    bounds();
-    expectEquals(0, a[0], "bounds0");
-    for (int i = 1; i < 127; i++) {
-      expectEquals(11 - i, a[i], "bounds");
-    }
-    expectEquals(-127, a[127], "bounds127");
-    // Abs.
-    abs();
-    expectEquals(0, a[0], "abs0");
-    for (int i = 1; i <= 11; i++) {
-      expectEquals(11 - i, a[i], "abs_lo");
-    }
-    for (int i = 12; i < 127; i++) {
-      expectEquals(i - 11, a[i], "abs_hi");
-    }
-    expectEquals(127, a[127], "abs127");
-    // Conversion.
-    int[] b = new int[128];
-    for (int i = 0; i < 128; i++) {
-      b[i] = 1000 * i;
-    }
-    conv(b);
-    for (int i = 1; i < 127; i++) {
-      expectEquals(1000.0f * i, a[i], "conv");
-    }
-    // Done.
-    System.out.println("passed");
-  }
-
-  private static void expectEquals(float expected, float result, String action) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result + " for " + action);
-    }
-  }
-}
diff --git a/test/640-checker-int-simd/expected.txt b/test/640-checker-int-simd/expected.txt
deleted file mode 100644
index b0aad4d..0000000
--- a/test/640-checker-int-simd/expected.txt
+++ /dev/null
@@ -1 +0,0 @@
-passed
diff --git a/test/640-checker-int-simd/src/Main.java b/test/640-checker-int-simd/src/Main.java
deleted file mode 100644
index 85d8b1b..0000000
--- a/test/640-checker-int-simd/src/Main.java
+++ /dev/null
@@ -1,307 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Functional tests for SIMD vectorization.
- */
-public class Main {
-
-  static int[] a;
-
-  //
-  // Arithmetic operations.
-  //
-
-  /// CHECK-START: void Main.add(int) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.add(int) loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecAdd   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void add(int x) {
-    for (int i = 0; i < 128; i++)
-      a[i] += x;
-  }
-
-  /// CHECK-START: void Main.sub(int) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.sub(int) loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecSub   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void sub(int x) {
-    for (int i = 0; i < 128; i++)
-      a[i] -= x;
-  }
-
-  /// CHECK-START: void Main.mul(int) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.mul(int) loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecMul   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void mul(int x) {
-    for (int i = 0; i < 128; i++)
-      a[i] *= x;
-  }
-
-  /// CHECK-START: void Main.div(int) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.div(int) loop_optimization (after)
-  /// CHECK-NOT: VecDiv
-  //
-  //  Not supported on any architecture.
-  //
-  static void div(int x) {
-    for (int i = 0; i < 128; i++)
-      a[i] /= x;
-  }
-
-  /// CHECK-START: void Main.neg() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.neg() loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecNeg   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void neg() {
-    for (int i = 0; i < 128; i++)
-      a[i] = -a[i];
-  }
-
-  /// CHECK-START: void Main.not() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.not() loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecNot   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void not() {
-    for (int i = 0; i < 128; i++)
-      a[i] = ~a[i];
-  }
-
-  /// CHECK-START: void Main.shl4() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.shl4() loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecShl   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void shl4() {
-    for (int i = 0; i < 128; i++)
-      a[i] <<= 4;
-  }
-
-  /// CHECK-START: void Main.sar2() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.sar2() loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecShr   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void sar2() {
-    for (int i = 0; i < 128; i++)
-      a[i] >>= 2;
-  }
-
-  /// CHECK-START: void Main.shr2() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.shr2() loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecUShr  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void shr2() {
-    for (int i = 0; i < 128; i++)
-      a[i] >>>= 2;
-  }
-
-  //
-  // Shift sanity.
-  //
-
-  // Expose constants to optimizing compiler, but not to front-end.
-  public static int $opt$inline$IntConstant32()       { return 32; }
-  public static int $opt$inline$IntConstant33()       { return 33; }
-  public static int $opt$inline$IntConstantMinus254() { return -254; }
-
-  /// CHECK-START: void Main.shr32() instruction_simplifier$after_inlining (before)
-  /// CHECK-DAG: <<Dist:i\d+>> IntConstant 32                        loop:none
-  /// CHECK-DAG: <<Get:i\d+>>  ArrayGet                              loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Get>>,<<Dist>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.shr32() instruction_simplifier$after_inlining (after)
-  /// CHECK-DAG: <<Get:i\d+>> ArrayGet                             loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG:              ArraySet [{{l\d+}},{{i\d+}},<<Get>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.shr32() loop_optimization (after)
-  /// CHECK-DAG: <<Get:d\d+>> VecLoad                              loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG:              VecStore [{{l\d+}},{{i\d+}},<<Get>>] loop:<<Loop>>      outer_loop:none
-  static void shr32() {
-    // TODO: remove a[i] = a[i] altogether?
-    for (int i = 0; i < 128; i++)
-      a[i] >>>= $opt$inline$IntConstant32();  // 0, since & 31
-  }
-
-  /// CHECK-START: void Main.shr33() instruction_simplifier$after_inlining (before)
-  /// CHECK-DAG: <<Dist:i\d+>> IntConstant 33                        loop:none
-  /// CHECK-DAG: <<Get:i\d+>>  ArrayGet                              loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Get>>,<<Dist>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.shr33() instruction_simplifier$after_inlining (after)
-  /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1                         loop:none
-  /// CHECK-DAG: <<Get:i\d+>>  ArrayGet                              loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Get>>,<<Dist>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.shr33() loop_optimization (after)
-  /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1                         loop:none
-  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
-  static void shr33() {
-    for (int i = 0; i < 128; i++)
-      a[i] >>>= $opt$inline$IntConstant33();  // 1, since & 31
-  }
-
-  /// CHECK-START: void Main.shrMinus254() instruction_simplifier$after_inlining (before)
-  /// CHECK-DAG: <<Dist:i\d+>> IntConstant -254                      loop:none
-  /// CHECK-DAG: <<Get:i\d+>>  ArrayGet                              loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Get>>,<<Dist>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.shrMinus254() instruction_simplifier$after_inlining (after)
-  /// CHECK-DAG: <<Dist:i\d+>> IntConstant 2                         loop:none
-  /// CHECK-DAG: <<Get:i\d+>>  ArrayGet                              loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Get>>,<<Dist>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.shrMinus254() loop_optimization (after)
-  /// CHECK-DAG: <<Dist:i\d+>> IntConstant 2                         loop:none
-  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
-  static void shrMinus254() {
-    for (int i = 0; i < 128; i++)
-      a[i] >>>= $opt$inline$IntConstantMinus254();  // 2, since & 31
-  }
-
-  //
-  // Loop bounds.
-  //
-
-  static void bounds() {
-    for (int i = 1; i < 127; i++)
-      a[i] += 11;
-  }
-
-  //
-  // Test Driver.
-  //
-
-  public static void main(String[] args) {
-    // Set up.
-    a = new int[128];
-    for (int i = 0; i < 128; i++) {
-      a[i] = i;
-    }
-    // Arithmetic operations.
-    add(2);
-    for (int i = 0; i < 128; i++) {
-      expectEquals(i + 2, a[i], "add");
-    }
-    sub(2);
-    for (int i = 0; i < 128; i++) {
-      expectEquals(i, a[i], "sub");
-    }
-    mul(2);
-    for (int i = 0; i < 128; i++) {
-      expectEquals(i + i, a[i], "mul");
-    }
-    div(2);
-    for (int i = 0; i < 128; i++) {
-      expectEquals(i, a[i], "div");
-    }
-    neg();
-    for (int i = 0; i < 128; i++) {
-      expectEquals(-i, a[i], "neg");
-    }
-    // Loop bounds.
-    bounds();
-    expectEquals(0, a[0], "bounds0");
-    for (int i = 1; i < 127; i++) {
-      expectEquals(11 - i, a[i], "bounds");
-    }
-    expectEquals(-127, a[127], "bounds127");
-    // Shifts.
-    for (int i = 0; i < 128; i++) {
-      a[i] = 0xffffffff;
-    }
-    shl4();
-    for (int i = 0; i < 128; i++) {
-      expectEquals(0xfffffff0, a[i], "shl4");
-    }
-    sar2();
-    for (int i = 0; i < 128; i++) {
-      expectEquals(0xfffffffc, a[i], "sar2");
-    }
-    shr2();
-    for (int i = 0; i < 128; i++) {
-      expectEquals(0x3fffffff, a[i], "shr2");
-    }
-    shr32();
-    for (int i = 0; i < 128; i++) {
-      expectEquals(0x3fffffff, a[i], "shr32");
-    }
-    shr33();
-    for (int i = 0; i < 128; i++) {
-      expectEquals(0x1fffffff, a[i], "shr33");
-    }
-    shrMinus254();
-    for (int i = 0; i < 128; i++) {
-      expectEquals(0x07ffffff, a[i], "shrMinus254");
-    }
-    // Bit-wise not operator.
-    not();
-    for (int i = 0; i < 128; i++) {
-      expectEquals(0xf8000000, a[i], "not");
-    }
-    // Done.
-    System.out.println("passed");
-  }
-
-  private static void expectEquals(int expected, int result, String action) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result + " for " + action);
-    }
-  }
-}
diff --git a/test/640-checker-long-simd/expected.txt b/test/640-checker-long-simd/expected.txt
deleted file mode 100644
index b0aad4d..0000000
--- a/test/640-checker-long-simd/expected.txt
+++ /dev/null
@@ -1 +0,0 @@
-passed
diff --git a/test/640-checker-long-simd/info.txt b/test/640-checker-long-simd/info.txt
deleted file mode 100644
index c9c6d5e..0000000
--- a/test/640-checker-long-simd/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Functional tests on SIMD vectorization.
diff --git a/test/640-checker-long-simd/src/Main.java b/test/640-checker-long-simd/src/Main.java
deleted file mode 100644
index bb4d0cb..0000000
--- a/test/640-checker-long-simd/src/Main.java
+++ /dev/null
@@ -1,312 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Functional tests for SIMD vectorization.
- */
-public class Main {
-
-  static long[] a;
-
-  //
-  // Arithmetic operations.
-  //
-
-  /// CHECK-START: void Main.add(long) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.add(long) loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecAdd   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void add(long x) {
-    for (int i = 0; i < 128; i++)
-      a[i] += x;
-  }
-
-  /// CHECK-START: void Main.sub(long) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.sub(long) loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecSub   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void sub(long x) {
-    for (int i = 0; i < 128; i++)
-      a[i] -= x;
-  }
-
-  /// CHECK-START: void Main.mul(long) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  //  Not directly supported for longs.
-  //
-  /// CHECK-START-ARM64: void Main.mul(long) loop_optimization (after)
-  /// CHECK-NOT: VecMul
-  //
-  /// CHECK-START-MIPS64: void Main.mul(long) loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecMul   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void mul(long x) {
-    for (int i = 0; i < 128; i++)
-      a[i] *= x;
-  }
-
-  /// CHECK-START: void Main.div(long) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.div(long) loop_optimization (after)
-  /// CHECK-NOT: VecDiv
-  //
-  //  Not supported on any architecture.
-  //
-  static void div(long x) {
-    for (int i = 0; i < 128; i++)
-      a[i] /= x;
-  }
-
-  /// CHECK-START: void Main.neg() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.neg() loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecNeg   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void neg() {
-    for (int i = 0; i < 128; i++)
-      a[i] = -a[i];
-  }
-
-  /// CHECK-START: void Main.not() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.not() loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecNot   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void not() {
-    for (int i = 0; i < 128; i++)
-      a[i] = ~a[i];
-  }
-
-  /// CHECK-START: void Main.shl4() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.shl4() loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecShl   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void shl4() {
-    for (int i = 0; i < 128; i++)
-      a[i] <<= 4;
-  }
-
-  /// CHECK-START: void Main.sar2() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.sar2() loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecShr   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void sar2() {
-    for (int i = 0; i < 128; i++)
-      a[i] >>= 2;
-  }
-
-  /// CHECK-START: void Main.shr2() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.shr2() loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecUShr  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void shr2() {
-    for (int i = 0; i < 128; i++)
-      a[i] >>>= 2;
-  }
-
-  //
-  // Shift sanity.
-  //
-
-  // Expose constants to optimizing compiler, but not to front-end.
-  public static int $opt$inline$IntConstant64()       { return 64; }
-  public static int $opt$inline$IntConstant65()       { return 65; }
-  public static int $opt$inline$IntConstantMinus254() { return -254; }
-
-  /// CHECK-START: void Main.shr64() instruction_simplifier$after_inlining (before)
-  /// CHECK-DAG: <<Dist:i\d+>> IntConstant 64                        loop:none
-  /// CHECK-DAG: <<Get:j\d+>>  ArrayGet                              loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<UShr:j\d+>> UShr [<<Get>>,<<Dist>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.shr64() instruction_simplifier$after_inlining (after)
-  /// CHECK-DAG: <<Get:j\d+>> ArrayGet                             loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG:              ArraySet [{{l\d+}},{{i\d+}},<<Get>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.shr64() loop_optimization (after)
-  /// CHECK-DAG: <<Get:d\d+>> VecLoad                              loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG:              VecStore [{{l\d+}},{{i\d+}},<<Get>>] loop:<<Loop>>      outer_loop:none
-  static void shr64() {
-    // TODO: remove a[i] = a[i] altogether?
-    for (int i = 0; i < 128; i++)
-      a[i] >>>= $opt$inline$IntConstant64();  // 0, since & 63
-  }
-
-  /// CHECK-START: void Main.shr65() instruction_simplifier$after_inlining (before)
-  /// CHECK-DAG: <<Dist:i\d+>> IntConstant 65                        loop:none
-  /// CHECK-DAG: <<Get:j\d+>>  ArrayGet                              loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<UShr:j\d+>> UShr [<<Get>>,<<Dist>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.shr65() instruction_simplifier$after_inlining (after)
-  /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1                         loop:none
-  /// CHECK-DAG: <<Get:j\d+>>  ArrayGet                              loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<UShr:j\d+>> UShr [<<Get>>,<<Dist>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.shr65() loop_optimization (after)
-  /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1                         loop:none
-  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
-  static void shr65() {
-    for (int i = 0; i < 128; i++)
-      a[i] >>>= $opt$inline$IntConstant65();  // 1, since & 63
-  }
-
-  /// CHECK-START: void Main.shrMinus254() instruction_simplifier$after_inlining (before)
-  /// CHECK-DAG: <<Dist:i\d+>> IntConstant -254                      loop:none
-  /// CHECK-DAG: <<Get:j\d+>>  ArrayGet                              loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<UShr:j\d+>> UShr [<<Get>>,<<Dist>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.shrMinus254() instruction_simplifier$after_inlining (after)
-  /// CHECK-DAG: <<Dist:i\d+>> IntConstant 2                         loop:none
-  /// CHECK-DAG: <<Get:j\d+>>  ArrayGet                              loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<UShr:j\d+>> UShr [<<Get>>,<<Dist>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.shrMinus254() loop_optimization (after)
-  /// CHECK-DAG: <<Dist:i\d+>> IntConstant 2                         loop:none
-  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
-  static void shrMinus254() {
-    for (int i = 0; i < 128; i++)
-      a[i] >>>= $opt$inline$IntConstantMinus254();  // 2, since & 63
-  }
-
-  //
-  // Loop bounds.
-  //
-
-  static void bounds() {
-    for (int i = 1; i < 127; i++)
-      a[i] += 11;
-  }
-
-  //
-  // Test Driver.
-  //
-
-  public static void main(String[] args) {
-    // Set up.
-    a = new long[128];
-    for (int i = 0; i < 128; i++) {
-      a[i] = i;
-    }
-    // Arithmetic operations.
-    add(2L);
-    for (int i = 0; i < 128; i++) {
-      expectEquals(i + 2, a[i], "add");
-    }
-    sub(2L);
-    for (int i = 0; i < 128; i++) {
-      expectEquals(i, a[i], "sub");
-    }
-    mul(2L);
-    for (int i = 0; i < 128; i++) {
-      expectEquals(i + i, a[i], "mul");
-    }
-    div(2L);
-    for (int i = 0; i < 128; i++) {
-      expectEquals(i, a[i], "div");
-    }
-    neg();
-    for (int i = 0; i < 128; i++) {
-      expectEquals(-i, a[i], "neg");
-    }
-    // Loop bounds.
-    bounds();
-    expectEquals(0, a[0], "bounds0");
-    for (int i = 1; i < 127; i++) {
-      expectEquals(11 - i, a[i], "bounds");
-    }
-    expectEquals(-127, a[127], "bounds127");
-    // Shifts.
-    for (int i = 0; i < 128; i++) {
-      a[i] = 0xffffffffffffffffL;
-    }
-    shl4();
-    for (int i = 0; i < 128; i++) {
-      expectEquals(0xfffffffffffffff0L, a[i], "shl4");
-    }
-    sar2();
-    for (int i = 0; i < 128; i++) {
-      expectEquals(0xfffffffffffffffcL, a[i], "sar2");
-    }
-    shr2();
-    for (int i = 0; i < 128; i++) {
-      expectEquals(0x3fffffffffffffffL, a[i], "shr2");
-    }
-    shr64();
-    for (int i = 0; i < 128; i++) {
-      expectEquals(0x3fffffffffffffffL, a[i], "shr64");
-    }
-    shr65();
-    for (int i = 0; i < 128; i++) {
-      expectEquals(0x1fffffffffffffffL, a[i], "shr65");
-    }
-    shrMinus254();
-    for (int i = 0; i < 128; i++) {
-      expectEquals(0x07ffffffffffffffL, a[i], "shrMinus254");
-    }
-    // Bit-wise not operator.
-    not();
-    for (int i = 0; i < 128; i++) {
-      expectEquals(0xf800000000000000L, a[i], "not");
-    }
-    // Done.
-    System.out.println("passed");
-  }
-
-  private static void expectEquals(long expected, long result, String action) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result + " for " + action);
-    }
-  }
-}
diff --git a/test/640-checker-short-simd/expected.txt b/test/640-checker-short-simd/expected.txt
deleted file mode 100644
index b0aad4d..0000000
--- a/test/640-checker-short-simd/expected.txt
+++ /dev/null
@@ -1 +0,0 @@
-passed
diff --git a/test/640-checker-short-simd/info.txt b/test/640-checker-short-simd/info.txt
deleted file mode 100644
index c9c6d5e..0000000
--- a/test/640-checker-short-simd/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Functional tests on SIMD vectorization.
diff --git a/test/640-checker-short-simd/src/Main.java b/test/640-checker-short-simd/src/Main.java
deleted file mode 100644
index 2b4ba87..0000000
--- a/test/640-checker-short-simd/src/Main.java
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Functional tests for SIMD vectorization.
- */
-public class Main {
-
-  static short[] a;
-
-  //
-  // Arithmetic operations.
-  //
-
-  /// CHECK-START: void Main.add(int) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.add(int) loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecAdd   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void add(int x) {
-    for (int i = 0; i < 128; i++)
-      a[i] += x;
-  }
-
-  /// CHECK-START: void Main.sub(int) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.sub(int) loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecSub   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void sub(int x) {
-    for (int i = 0; i < 128; i++)
-      a[i] -= x;
-  }
-
-  /// CHECK-START: void Main.mul(int) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.mul(int) loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecMul   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void mul(int x) {
-    for (int i = 0; i < 128; i++)
-      a[i] *= x;
-  }
-
-  /// CHECK-START: void Main.div(int) loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.div(int) loop_optimization (after)
-  /// CHECK-NOT: VecDiv
-  //
-  //  Not supported on any architecture.
-  //
-  static void div(int x) {
-    for (int i = 0; i < 128; i++)
-      a[i] /= x;
-  }
-
-  /// CHECK-START: void Main.neg() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.neg() loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecNeg   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void neg() {
-    for (int i = 0; i < 128; i++)
-      a[i] = (short) -a[i];
-  }
-
-  /// CHECK-START: void Main.not() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.not() loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecNot   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void not() {
-    for (int i = 0; i < 128; i++)
-      a[i] = (short) ~a[i];
-  }
-
-  /// CHECK-START: void Main.shl4() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.shl4() loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecShl   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void shl4() {
-    for (int i = 0; i < 128; i++)
-      a[i] <<= 4;
-  }
-
-  /// CHECK-START: void Main.sar2() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.sar2() loop_optimization (after)
-  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: VecShr   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
-  static void sar2() {
-    for (int i = 0; i < 128; i++)
-      a[i] >>= 2;
-  }
-
-  /// CHECK-START: void Main.shr2() loop_optimization (before)
-  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
-  //
-  // TODO: would need signess flip.
-  /// CHECK-START: void Main.shr2() loop_optimization (after)
-  /// CHECK-NOT: VecUShr
-  static void shr2() {
-    for (int i = 0; i < 128; i++)
-      a[i] >>>= 2;
-  }
-
-  //
-  // Shift sanity.
-  //
-
-  static void sar31() {
-    for (int i = 0; i < 128; i++)
-      a[i] >>= 31;
-  }
-
-  static void shr31() {
-    for (int i = 0; i < 128; i++)
-      a[i] >>>= 31;
-  }
-
-  static void shr32() {
-    for (int i = 0; i < 128; i++)
-      a[i] >>>= 32;  // 0, since & 31
-  }
-
-
-  static void shr33() {
-    for (int i = 0; i < 128; i++)
-      a[i] >>>= 33;  // 1, since & 31
-  }
-
-  //
-  // Loop bounds.
-  //
-
-  static void add() {
-    for (int i = 1; i < 127; i++)
-      a[i] += 11;
-  }
-
-  //
-  // Test Driver.
-  //
-
-  public static void main(String[] args) {
-    // Set up.
-    a = new short[128];
-    for (int i = 0; i < 128; i++) {
-      a[i] = (short) i;
-    }
-    // Arithmetic operations.
-    add(2);
-    for (int i = 0; i < 128; i++) {
-      expectEquals(i + 2, a[i], "add");
-    }
-    sub(2);
-    for (int i = 0; i < 128; i++) {
-      expectEquals(i, a[i], "sub");
-    }
-    mul(2);
-    for (int i = 0; i < 128; i++) {
-      expectEquals(i + i, a[i], "mul");
-    }
-    div(2);
-    for (int i = 0; i < 128; i++) {
-      expectEquals(i, a[i], "div");
-    }
-    neg();
-    for (int i = 0; i < 128; i++) {
-      expectEquals(-i, a[i], "neg");
-    }
-    // Loop bounds.
-    add();
-    expectEquals(0, a[0], "bounds0");
-    for (int i = 1; i < 127; i++) {
-      expectEquals(11 - i, a[i], "bounds");
-    }
-    expectEquals(-127, a[127], "bounds127");
-    // Shifts.
-    for (int i = 0; i < 128; i++) {
-      a[i] = (short) 0xffff;
-    }
-    shl4();
-    for (int i = 0; i < 128; i++) {
-      expectEquals((short) 0xfff0, a[i], "shl4");
-    }
-    sar2();
-    for (int i = 0; i < 128; i++) {
-      expectEquals((short) 0xfffc, a[i], "sar2");
-    }
-    shr2();
-    for (int i = 0; i < 128; i++) {
-      expectEquals((short) 0xffff, a[i], "shr2");  // sic!
-    }
-    sar31();
-    for (int i = 0; i < 128; i++) {
-      expectEquals((short) 0xffff, a[i], "sar31");
-    }
-    shr31();
-    for (int i = 0; i < 128; i++) {
-      expectEquals(0x0001, a[i], "shr31");
-      a[i] = (short) 0x1200;  // reset
-    }
-    shr32();
-    for (int i = 0; i < 128; i++) {
-      expectEquals((short) 0x1200, a[i], "shr32");
-    }
-    shr33();
-    for (int i = 0; i < 128; i++) {
-      expectEquals((short) 0x0900, a[i], "shr33");
-      a[i] = (short) 0xf0f1;  // reset
-    }
-    not();
-    for (int i = 0; i < 128; i++) {
-      expectEquals((short) 0x0f0e, a[i], "not");
-    }
-    // Done.
-    System.out.println("passed");
-  }
-
-  private static void expectEquals(int expected, int result, String action) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result + " for " + action);
-    }
-  }
-}
diff --git a/test/640-checker-simd/expected.txt b/test/640-checker-simd/expected.txt
new file mode 100644
index 0000000..b7a4fcc
--- /dev/null
+++ b/test/640-checker-simd/expected.txt
@@ -0,0 +1,7 @@
+SimdByte passed
+SimdShort passed
+SimdChar passed
+SimdInt passed
+SimdLong passed
+SimdDouble passed
+SimdFloat passed
diff --git a/test/640-checker-int-simd/info.txt b/test/640-checker-simd/info.txt
similarity index 100%
rename from test/640-checker-int-simd/info.txt
rename to test/640-checker-simd/info.txt
diff --git a/test/640-checker-simd/src/Main.java b/test/640-checker-simd/src/Main.java
new file mode 100644
index 0000000..cece60c
--- /dev/null
+++ b/test/640-checker-simd/src/Main.java
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) {
+    SimdByte.main();
+    SimdShort.main();
+    SimdChar.main();
+    SimdInt.main();
+    SimdLong.main();
+    SimdDouble.main();
+    SimdFloat.main();
+  }
+}
diff --git a/test/640-checker-simd/src/SimdByte.java b/test/640-checker-simd/src/SimdByte.java
new file mode 100644
index 0000000..5066136
--- /dev/null
+++ b/test/640-checker-simd/src/SimdByte.java
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Functional tests for SIMD vectorization.
+ */
+public class SimdByte {
+
+  static byte[] a;
+
+  //
+  // Arithmetic operations.
+  //
+
+  /// CHECK-START: void SimdByte.add(int) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdByte.add(int) loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecAdd   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void add(int x) {
+    for (int i = 0; i < 128; i++)
+      a[i] += x;
+  }
+
+  /// CHECK-START: void SimdByte.sub(int) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdByte.sub(int) loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecSub   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void sub(int x) {
+    for (int i = 0; i < 128; i++)
+      a[i] -= x;
+  }
+
+  /// CHECK-START: void SimdByte.mul(int) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdByte.mul(int) loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecMul   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void mul(int x) {
+    for (int i = 0; i < 128; i++)
+      a[i] *= x;
+  }
+
+  /// CHECK-START: void SimdByte.div(int) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void SimdByte.div(int) loop_optimization (after)
+  //
+  //  Not supported on any architecture.
+  //
+  static void div(int x) {
+    for (int i = 0; i < 128; i++)
+      a[i] /= x;
+  }
+
+  /// CHECK-START: void SimdByte.neg() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdByte.neg() loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecNeg   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void neg() {
+    for (int i = 0; i < 128; i++)
+      a[i] = (byte) -a[i];
+  }
+
+  /// CHECK-START: void SimdByte.not() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdByte.not() loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecNot   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void not() {
+    for (int i = 0; i < 128; i++)
+      a[i] = (byte) ~a[i];
+  }
+
+  /// CHECK-START: void SimdByte.shl4() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdByte.shl4() loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecShl   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void shl4() {
+    for (int i = 0; i < 128; i++)
+      a[i] <<= 4;
+  }
+
+  /// CHECK-START: void SimdByte.sar2() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdByte.sar2() loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecShr   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void sar2() {
+    for (int i = 0; i < 128; i++)
+      a[i] >>= 2;
+  }
+
+  /// CHECK-START: void SimdByte.shr2() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  // TODO: would need signess flip.
+  /// CHECK-START: void SimdByte.shr2() loop_optimization (after)
+  /// CHECK-NOT: VecUShr
+  static void shr2() {
+    for (int i = 0; i < 128; i++)
+      a[i] >>>= 2;
+  }
+
+  //
+  // Shift sanity.
+  //
+
+  static void sar31() {
+    for (int i = 0; i < 128; i++)
+      a[i] >>= 31;
+  }
+
+  static void shr31() {
+    for (int i = 0; i < 128; i++)
+      a[i] >>>= 31;
+  }
+
+  static void shr32() {
+    for (int i = 0; i < 128; i++)
+      a[i] >>>= 32;  // 0, since & 31
+  }
+
+  static void shr33() {
+    for (int i = 0; i < 128; i++)
+      a[i] >>>= 33;  // 1, since & 31
+  }
+
+  static void shl9() {
+    for (int i = 0; i < 128; i++)
+      a[i] <<= 9;  // yields all-zeros
+  }
+
+  //
+  // Loop bounds.
+  //
+
+  static void bounds() {
+    for (int i = 1; i < 127; i++)
+      a[i] += 11;
+  }
+
+  //
+  // Test Driver.
+  //
+
+  public static void main() {
+    // Set up.
+    a = new byte[128];
+    for (int i = 0; i < 128; i++) {
+      a[i] = (byte) i;
+    }
+    // Arithmetic operations.
+    add(2);
+    for (int i = 0; i < 128; i++) {
+      expectEquals((byte)(i + 2), a[i], "add");
+    }
+    sub(2);
+    for (int i = 0; i < 128; i++) {
+      expectEquals(i, a[i], "sub");
+    }
+    mul(2);
+    for (int i = 0; i < 128; i++) {
+      expectEquals((byte)(i + i), a[i], "mul");
+    }
+    div(2);
+    for (int i = 0; i < 128; i++) {
+      expectEquals(((byte)(i + i)) >> 1, a[i], "div");
+      a[i] = (byte) i;  // undo arithmetic wrap-around effects
+    }
+    neg();
+    for (int i = 0; i < 128; i++) {
+      expectEquals(-i, a[i], "neg");
+    }
+    // Loop bounds.
+    bounds();
+    expectEquals(0, a[0], "bounds0");
+    for (int i = 1; i < 127; i++) {
+      expectEquals(11 - i, a[i], "bounds");
+    }
+    expectEquals(-127, a[127], "bounds127");
+    // Shifts.
+    for (int i = 0; i < 128; i++) {
+      a[i] = (byte) 0xff;
+    }
+    shl4();
+    for (int i = 0; i < 128; i++) {
+      expectEquals((byte) 0xf0, a[i], "shl4");
+    }
+    sar2();
+    for (int i = 0; i < 128; i++) {
+      expectEquals((byte) 0xfc, a[i], "sar2");
+    }
+    shr2();
+    for (int i = 0; i < 128; i++) {
+      expectEquals((byte) 0xff, a[i], "shr2");  // sic!
+    }
+    sar31();
+    for (int i = 0; i < 128; i++) {
+      expectEquals((byte) 0xff, a[i], "sar31");
+    }
+    shr31();
+    for (int i = 0; i < 128; i++) {
+      expectEquals(0x01, a[i], "shr31");
+      a[i] = (byte) 0x12;  // reset
+    }
+    shr32();
+    for (int i = 0; i < 128; i++) {
+      expectEquals((byte) 0x12, a[i], "shr32");
+    }
+    shr33();
+    for (int i = 0; i < 128; i++) {
+      expectEquals((byte) 0x09, a[i], "shr33");
+    }
+    shl9();
+    for (int i = 0; i < 128; i++) {
+      expectEquals((byte) 0x00, a[i], "shl9");
+      a[i] = (byte) 0xf0;  // reset
+    }
+    not();
+    for (int i = 0; i < 128; i++) {
+      expectEquals((byte) 0x0f, a[i], "not");
+    }
+    // Done.
+    System.out.println("SimdByte passed");
+  }
+
+  private static void expectEquals(int expected, int result, String action) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result + " for " + action);
+    }
+  }
+}
diff --git a/test/640-checker-simd/src/SimdChar.java b/test/640-checker-simd/src/SimdChar.java
new file mode 100644
index 0000000..bb06b06
--- /dev/null
+++ b/test/640-checker-simd/src/SimdChar.java
@@ -0,0 +1,265 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Functional tests for SIMD vectorization.
+ */
+public class SimdChar {
+
+  static char[] a;
+
+  //
+  // Arithmetic operations.
+  //
+
+  /// CHECK-START: void SimdChar.add(int) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdChar.add(int) loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecAdd   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void add(int x) {
+    for (int i = 0; i < 128; i++)
+      a[i] += x;
+  }
+
+  /// CHECK-START: void SimdChar.sub(int) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdChar.sub(int) loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecSub   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void sub(int x) {
+    for (int i = 0; i < 128; i++)
+      a[i] -= x;
+  }
+
+  /// CHECK-START: void SimdChar.mul(int) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdChar.mul(int) loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecMul   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void mul(int x) {
+    for (int i = 0; i < 128; i++)
+      a[i] *= x;
+  }
+
+  /// CHECK-START: void SimdChar.div(int) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void SimdChar.div(int) loop_optimization (after)
+  /// CHECK-NOT: VecDiv
+  //
+  //  Not supported on any architecture.
+  //
+  static void div(int x) {
+    for (int i = 0; i < 128; i++)
+      a[i] /= x;
+  }
+
+  /// CHECK-START: void SimdChar.neg() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdChar.neg() loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecNeg   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void neg() {
+    for (int i = 0; i < 128; i++)
+      a[i] = (char) -a[i];
+  }
+
+  /// CHECK-START: void SimdChar.not() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdChar.not() loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecNot   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void not() {
+    for (int i = 0; i < 128; i++)
+      a[i] = (char) ~a[i];
+  }
+
+  /// CHECK-START: void SimdChar.shl4() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdChar.shl4() loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecShl   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void shl4() {
+    for (int i = 0; i < 128; i++)
+      a[i] <<= 4;
+  }
+
+  /// CHECK-START: void SimdChar.sar2() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  // TODO: would need signess flip.
+  /// CHECK-START: void SimdChar.sar2() loop_optimization (after)
+  /// CHECK-NOT: VecShr
+  static void sar2() {
+    for (int i = 0; i < 128; i++)
+      a[i] >>= 2;
+  }
+
+  /// CHECK-START: void SimdChar.shr2() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdChar.shr2() loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecUShr  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void shr2() {
+    for (int i = 0; i < 128; i++)
+      a[i] >>>= 2;
+  }
+
+  //
+  // Shift sanity.
+  //
+
+  static void sar31() {
+    for (int i = 0; i < 128; i++)
+      a[i] >>= 31;
+  }
+
+  static void shr31() {
+    for (int i = 0; i < 128; i++)
+      a[i] >>>= 31;
+  }
+
+  static void shr32() {
+    for (int i = 0; i < 128; i++)
+      a[i] >>>= 32;  // 0, since & 31
+  }
+
+  static void shr33() {
+    for (int i = 0; i < 128; i++)
+      a[i] >>>= 33;  // 1, since & 31
+  }
+
+  //
+  // Loop bounds.
+  //
+
+  static void bounds() {
+    for (int i = 1; i < 127; i++)
+      a[i] += 11;
+  }
+
+  //
+  // Test Driver.
+  //
+
+  public static void main() {
+    // Set up.
+    a = new char[128];
+    for (int i = 0; i < 128; i++) {
+      a[i] = (char) i;
+    }
+    // Arithmetic operations.
+    add(2);
+    for (int i = 0; i < 128; i++) {
+      expectEquals(i + 2, a[i], "add");
+    }
+    sub(2);
+    for (int i = 0; i < 128; i++) {
+      expectEquals(i, a[i], "sub");
+    }
+    mul(2);
+    for (int i = 0; i < 128; i++) {
+      expectEquals(i + i, a[i], "mul");
+    }
+    div(2);
+    for (int i = 0; i < 128; i++) {
+      expectEquals(i, a[i], "div");
+    }
+    neg();
+    for (int i = 0; i < 128; i++) {
+      expectEquals((char)-i, a[i], "neg");
+    }
+    // Loop bounds.
+    bounds();
+    expectEquals(0, a[0], "bounds0");
+    for (int i = 1; i < 127; i++) {
+      expectEquals((char)(11 - i), a[i], "bounds");
+    }
+    expectEquals((char)-127, a[127], "bounds127");
+    // Shifts.
+    for (int i = 0; i < 128; i++) {
+      a[i] = (char) 0xffff;
+    }
+    shl4();
+    for (int i = 0; i < 128; i++) {
+      expectEquals((char) 0xfff0, a[i], "shl4");
+    }
+    sar2();
+    for (int i = 0; i < 128; i++) {
+      expectEquals((char) 0x3ffc, a[i], "sar2");
+    }
+    shr2();
+    for (int i = 0; i < 128; i++) {
+      expectEquals((char) 0x0fff, a[i], "shr2");
+      a[i] = (char) 0xffff;  // reset
+    }
+    sar31();
+    for (int i = 0; i < 128; i++) {
+      expectEquals(0, a[i], "sar31");
+      a[i] = (char) 0xffff;  // reset
+    }
+    shr31();
+    for (int i = 0; i < 128; i++) {
+      expectEquals(0, a[i], "shr31");
+      a[i] = (char) 0x1200;  // reset
+    }
+    shr32();
+    for (int i = 0; i < 128; i++) {
+      expectEquals((char) 0x1200, a[i], "shr32");
+    }
+    shr33();
+    for (int i = 0; i < 128; i++) {
+      expectEquals((char) 0x0900, a[i], "shr33");
+      a[i] = (char) 0xf1f0;  // reset
+    }
+    not();
+    for (int i = 0; i < 128; i++) {
+      expectEquals((char) 0x0e0f, a[i], "not");
+    }
+    // Done.
+    System.out.println("SimdChar passed");
+  }
+
+  private static void expectEquals(int expected, int result, String action) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result + " for " + action);
+    }
+  }
+}
diff --git a/test/640-checker-simd/src/SimdDouble.java b/test/640-checker-simd/src/SimdDouble.java
new file mode 100644
index 0000000..85704bf
--- /dev/null
+++ b/test/640-checker-simd/src/SimdDouble.java
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Functional tests for SIMD vectorization. Note that this class provides a mere
+ * functional test, not a precise numerical verifier.
+ */
+public class SimdDouble {
+
+  static double[] a;
+
+  //
+  // Arithmetic operations.
+  //
+
+  /// CHECK-START: void SimdDouble.add(double) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void SimdDouble.add(double) loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecAdd   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void add(double x) {
+    for (int i = 0; i < 128; i++)
+      a[i] += x;
+  }
+
+  /// CHECK-START: void SimdDouble.sub(double) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void SimdDouble.sub(double) loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecSub   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void sub(double x) {
+    for (int i = 0; i < 128; i++)
+      a[i] -= x;
+  }
+
+  /// CHECK-START: void SimdDouble.mul(double) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void SimdDouble.mul(double) loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecMul   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void mul(double x) {
+    for (int i = 0; i < 128; i++)
+      a[i] *= x;
+  }
+
+  /// CHECK-START: void SimdDouble.div(double) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void SimdDouble.div(double) loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecDiv   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void div(double x) {
+    for (int i = 0; i < 128; i++)
+      a[i] /= x;
+  }
+
+  /// CHECK-START: void SimdDouble.neg() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void SimdDouble.neg() loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecNeg   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void neg() {
+    for (int i = 0; i < 128; i++)
+      a[i] = -a[i];
+  }
+
+  /// CHECK-START: void SimdDouble.abs() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void SimdDouble.abs() loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecAbs   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void abs() {
+    for (int i = 0; i < 128; i++)
+      a[i] = Math.abs(a[i]);
+  }
+
+  /// CHECK-START: void SimdDouble.conv(long[]) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void SimdDouble.conv(long[]) loop_optimization (after)
+  /// CHECK-NOT: VecLoad
+  /// CHECK-NOT: VecStore
+  //
+  // TODO: fill in when long2double is supported
+  static void conv(long[] b) {
+    for (int i = 0; i < 128; i++)
+      a[i] = b[i];
+  }
+
+  //
+  // Loop bounds.
+  //
+
+  static void bounds() {
+    for (int i = 1; i < 127; i++)
+      a[i] += 11;
+  }
+
+  //
+  // Test Driver.
+  //
+
+  public static void main() {
+    // Set up.
+    a = new double[128];
+    for (int i = 0; i < 128; i++) {
+      a[i] = i;
+    }
+    // Arithmetic operations.
+    add(2.0);
+    for (int i = 0; i < 128; i++) {
+      expectEquals(i + 2, a[i], "add");
+    }
+    sub(2.0);
+    for (int i = 0; i < 128; i++) {
+      expectEquals(i, a[i], "sub");
+    }
+    mul(2.0);
+    for (int i = 0; i < 128; i++) {
+      expectEquals(i + i, a[i], "mul");
+    }
+    div(2.0);
+    for (int i = 0; i < 128; i++) {
+      expectEquals(i, a[i], "div");
+    }
+    neg();
+    for (int i = 0; i < 128; i++) {
+      expectEquals(-i, a[i], "neg");
+    }
+    // Loop bounds.
+    bounds();
+    expectEquals(0, a[0], "bounds0");
+    for (int i = 1; i < 127; i++) {
+      expectEquals(11 - i, a[i], "bounds");
+    }
+    expectEquals(-127, a[127], "bounds127");
+    // Abs.
+    abs();
+    expectEquals(0, a[0], "abs0");
+    for (int i = 1; i <= 11; i++) {
+      expectEquals(11 - i, a[i], "abs_lo");
+    }
+    for (int i = 12; i < 127; i++) {
+      expectEquals(i - 11, a[i], "abs_hi");
+    }
+    expectEquals(127, a[127], "abs127");
+    // Conversion.
+    long[] b = new long[128];
+    for (int i = 0; i < 128; i++) {
+      b[i] = 1000 * i;
+    }
+    conv(b);
+    for (int i = 1; i < 127; i++) {
+      expectEquals(1000.0 * i, a[i], "conv");
+    }
+    // Done.
+    System.out.println("SimdDouble passed");
+  }
+
+  private static void expectEquals(double expected, double result, String action) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result + " for " + action);
+    }
+  }
+}
diff --git a/test/640-checker-simd/src/SimdFloat.java b/test/640-checker-simd/src/SimdFloat.java
new file mode 100644
index 0000000..3d27dc1
--- /dev/null
+++ b/test/640-checker-simd/src/SimdFloat.java
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Functional tests for SIMD vectorization. Note that this class provides a mere
+ * functional test, not a precise numerical verifier.
+ */
+public class SimdFloat {
+
+  static float[] a;
+
+  //
+  // Arithmetic operations.
+  //
+
+  /// CHECK-START: void SimdFloat.add(float) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void SimdFloat.add(float) loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecAdd   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void add(float x) {
+    for (int i = 0; i < 128; i++)
+      a[i] += x;
+  }
+
+  /// CHECK-START: void SimdFloat.sub(float) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void SimdFloat.sub(float) loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecSub   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void sub(float x) {
+    for (int i = 0; i < 128; i++)
+      a[i] -= x;
+  }
+
+  /// CHECK-START: void SimdFloat.mul(float) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void SimdFloat.mul(float) loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecMul   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void mul(float x) {
+    for (int i = 0; i < 128; i++)
+      a[i] *= x;
+  }
+
+  /// CHECK-START: void SimdFloat.div(float) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void SimdFloat.div(float) loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecDiv   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void div(float x) {
+    for (int i = 0; i < 128; i++)
+      a[i] /= x;
+  }
+
+  /// CHECK-START: void SimdFloat.neg() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void SimdFloat.neg() loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecNeg   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void neg() {
+    for (int i = 0; i < 128; i++)
+      a[i] = -a[i];
+  }
+
+  /// CHECK-START: void SimdFloat.abs() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void SimdFloat.abs() loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecAbs   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void abs() {
+    for (int i = 0; i < 128; i++)
+      a[i] = Math.abs(a[i]);
+  }
+
+  /// CHECK-START: void SimdFloat.conv(int[]) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void SimdFloat.conv(int[]) loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecCnv   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void conv(int[] b) {
+    for (int i = 0; i < 128; i++)
+      a[i] = b[i];
+  }
+
+  //
+  // Loop bounds.
+  //
+
+  static void bounds() {
+    for (int i = 1; i < 127; i++)
+      a[i] += 11;
+  }
+
+  //
+  // Test Driver.
+  //
+
+  public static void main() {
+    // Set up.
+    a = new float[128];
+    for (int i = 0; i < 128; i++) {
+      a[i] = i;
+    }
+    // Arithmetic operations.
+    add(2.0f);
+    for (int i = 0; i < 128; i++) {
+      expectEquals(i + 2, a[i], "add");
+    }
+    sub(2.0f);
+    for (int i = 0; i < 128; i++) {
+      expectEquals(i, a[i], "sub");
+    }
+    mul(2.0f);
+    for (int i = 0; i < 128; i++) {
+      expectEquals(i + i, a[i], "mul");
+    }
+    div(2.0f);
+    for (int i = 0; i < 128; i++) {
+      expectEquals(i, a[i], "div");
+    }
+    neg();
+    for (int i = 0; i < 128; i++) {
+      expectEquals(-i, a[i], "neg");
+    }
+    // Loop bounds.
+    bounds();
+    expectEquals(0, a[0], "bounds0");
+    for (int i = 1; i < 127; i++) {
+      expectEquals(11 - i, a[i], "bounds");
+    }
+    expectEquals(-127, a[127], "bounds127");
+    // Abs.
+    abs();
+    expectEquals(0, a[0], "abs0");
+    for (int i = 1; i <= 11; i++) {
+      expectEquals(11 - i, a[i], "abs_lo");
+    }
+    for (int i = 12; i < 127; i++) {
+      expectEquals(i - 11, a[i], "abs_hi");
+    }
+    expectEquals(127, a[127], "abs127");
+    // Conversion.
+    int[] b = new int[128];
+    for (int i = 0; i < 128; i++) {
+      b[i] = 1000 * i;
+    }
+    conv(b);
+    for (int i = 1; i < 127; i++) {
+      expectEquals(1000.0f * i, a[i], "conv");
+    }
+    // Done.
+    System.out.println("SimdFloat passed");
+  }
+
+  private static void expectEquals(float expected, float result, String action) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result + " for " + action);
+    }
+  }
+}
diff --git a/test/640-checker-simd/src/SimdInt.java b/test/640-checker-simd/src/SimdInt.java
new file mode 100644
index 0000000..0276bca
--- /dev/null
+++ b/test/640-checker-simd/src/SimdInt.java
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Functional tests for SIMD vectorization.
+ */
+public class SimdInt {
+
+  static int[] a;
+
+  //
+  // Arithmetic operations.
+  //
+
+  /// CHECK-START: void SimdInt.add(int) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdInt.add(int) loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecAdd   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void add(int x) {
+    for (int i = 0; i < 128; i++)
+      a[i] += x;
+  }
+
+  /// CHECK-START: void SimdInt.sub(int) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdInt.sub(int) loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecSub   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void sub(int x) {
+    for (int i = 0; i < 128; i++)
+      a[i] -= x;
+  }
+
+  /// CHECK-START: void SimdInt.mul(int) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdInt.mul(int) loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecMul   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void mul(int x) {
+    for (int i = 0; i < 128; i++)
+      a[i] *= x;
+  }
+
+  /// CHECK-START: void SimdInt.div(int) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void SimdInt.div(int) loop_optimization (after)
+  /// CHECK-NOT: VecDiv
+  //
+  //  Not supported on any architecture.
+  //
+  static void div(int x) {
+    for (int i = 0; i < 128; i++)
+      a[i] /= x;
+  }
+
+  /// CHECK-START: void SimdInt.neg() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdInt.neg() loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecNeg   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void neg() {
+    for (int i = 0; i < 128; i++)
+      a[i] = -a[i];
+  }
+
+  /// CHECK-START: void SimdInt.not() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdInt.not() loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecNot   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void not() {
+    for (int i = 0; i < 128; i++)
+      a[i] = ~a[i];
+  }
+
+  /// CHECK-START: void SimdInt.shl4() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdInt.shl4() loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecShl   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void shl4() {
+    for (int i = 0; i < 128; i++)
+      a[i] <<= 4;
+  }
+
+  /// CHECK-START: void SimdInt.sar2() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdInt.sar2() loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecShr   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void sar2() {
+    for (int i = 0; i < 128; i++)
+      a[i] >>= 2;
+  }
+
+  /// CHECK-START: void SimdInt.shr2() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdInt.shr2() loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecUShr  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void shr2() {
+    for (int i = 0; i < 128; i++)
+      a[i] >>>= 2;
+  }
+
+  //
+  // Shift sanity.
+  //
+
+  // Expose constants to optimizing compiler, but not to front-end.
+  public static int $opt$inline$IntConstant32()       { return 32; }
+  public static int $opt$inline$IntConstant33()       { return 33; }
+  public static int $opt$inline$IntConstantMinus254() { return -254; }
+
+  /// CHECK-START: void SimdInt.shr32() instruction_simplifier$after_inlining (before)
+  /// CHECK-DAG: <<Dist:i\d+>> IntConstant 32                        loop:none
+  /// CHECK-DAG: <<Get:i\d+>>  ArrayGet                              loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Get>>,<<Dist>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void SimdInt.shr32() instruction_simplifier$after_inlining (after)
+  /// CHECK-DAG: <<Get:i\d+>> ArrayGet                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG:              ArraySet [{{l\d+}},{{i\d+}},<<Get>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdInt.shr32() loop_optimization (after)
+  /// CHECK-DAG: <<Get:d\d+>> VecLoad                              loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG:              VecStore [{{l\d+}},{{i\d+}},<<Get>>] loop:<<Loop>>      outer_loop:none
+  static void shr32() {
+    // TODO: remove a[i] = a[i] altogether?
+    for (int i = 0; i < 128; i++)
+      a[i] >>>= $opt$inline$IntConstant32();  // 0, since & 31
+  }
+
+  /// CHECK-START: void SimdInt.shr33() instruction_simplifier$after_inlining (before)
+  /// CHECK-DAG: <<Dist:i\d+>> IntConstant 33                        loop:none
+  /// CHECK-DAG: <<Get:i\d+>>  ArrayGet                              loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Get>>,<<Dist>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void SimdInt.shr33() instruction_simplifier$after_inlining (after)
+  /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1                         loop:none
+  /// CHECK-DAG: <<Get:i\d+>>  ArrayGet                              loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Get>>,<<Dist>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdInt.shr33() loop_optimization (after)
+  /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1                         loop:none
+  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
+  static void shr33() {
+    for (int i = 0; i < 128; i++)
+      a[i] >>>= $opt$inline$IntConstant33();  // 1, since & 31
+  }
+
+  /// CHECK-START: void SimdInt.shrMinus254() instruction_simplifier$after_inlining (before)
+  /// CHECK-DAG: <<Dist:i\d+>> IntConstant -254                      loop:none
+  /// CHECK-DAG: <<Get:i\d+>>  ArrayGet                              loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Get>>,<<Dist>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void SimdInt.shrMinus254() instruction_simplifier$after_inlining (after)
+  /// CHECK-DAG: <<Dist:i\d+>> IntConstant 2                         loop:none
+  /// CHECK-DAG: <<Get:i\d+>>  ArrayGet                              loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Get>>,<<Dist>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdInt.shrMinus254() loop_optimization (after)
+  /// CHECK-DAG: <<Dist:i\d+>> IntConstant 2                         loop:none
+  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
+  static void shrMinus254() {
+    for (int i = 0; i < 128; i++)
+      a[i] >>>= $opt$inline$IntConstantMinus254();  // 2, since & 31
+  }
+
+  //
+  // Loop bounds.
+  //
+
+  static void bounds() {
+    for (int i = 1; i < 127; i++)
+      a[i] += 11;
+  }
+
+  //
+  // Test Driver.
+  //
+
+  public static void main() {
+    // Set up.
+    a = new int[128];
+    for (int i = 0; i < 128; i++) {
+      a[i] = i;
+    }
+    // Arithmetic operations.
+    add(2);
+    for (int i = 0; i < 128; i++) {
+      expectEquals(i + 2, a[i], "add");
+    }
+    sub(2);
+    for (int i = 0; i < 128; i++) {
+      expectEquals(i, a[i], "sub");
+    }
+    mul(2);
+    for (int i = 0; i < 128; i++) {
+      expectEquals(i + i, a[i], "mul");
+    }
+    div(2);
+    for (int i = 0; i < 128; i++) {
+      expectEquals(i, a[i], "div");
+    }
+    neg();
+    for (int i = 0; i < 128; i++) {
+      expectEquals(-i, a[i], "neg");
+    }
+    // Loop bounds.
+    bounds();
+    expectEquals(0, a[0], "bounds0");
+    for (int i = 1; i < 127; i++) {
+      expectEquals(11 - i, a[i], "bounds");
+    }
+    expectEquals(-127, a[127], "bounds127");
+    // Shifts.
+    for (int i = 0; i < 128; i++) {
+      a[i] = 0xffffffff;
+    }
+    shl4();
+    for (int i = 0; i < 128; i++) {
+      expectEquals(0xfffffff0, a[i], "shl4");
+    }
+    sar2();
+    for (int i = 0; i < 128; i++) {
+      expectEquals(0xfffffffc, a[i], "sar2");
+    }
+    shr2();
+    for (int i = 0; i < 128; i++) {
+      expectEquals(0x3fffffff, a[i], "shr2");
+    }
+    shr32();
+    for (int i = 0; i < 128; i++) {
+      expectEquals(0x3fffffff, a[i], "shr32");
+    }
+    shr33();
+    for (int i = 0; i < 128; i++) {
+      expectEquals(0x1fffffff, a[i], "shr33");
+    }
+    shrMinus254();
+    for (int i = 0; i < 128; i++) {
+      expectEquals(0x07ffffff, a[i], "shrMinus254");
+    }
+    // Bit-wise not operator.
+    not();
+    for (int i = 0; i < 128; i++) {
+      expectEquals(0xf8000000, a[i], "not");
+    }
+    // Done.
+    System.out.println("SimdInt passed");
+  }
+
+  private static void expectEquals(int expected, int result, String action) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result + " for " + action);
+    }
+  }
+}
diff --git a/test/640-checker-simd/src/SimdLong.java b/test/640-checker-simd/src/SimdLong.java
new file mode 100644
index 0000000..c914b69
--- /dev/null
+++ b/test/640-checker-simd/src/SimdLong.java
@@ -0,0 +1,307 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Functional tests for SIMD vectorization.
+ */
+public class SimdLong {
+
+  static long[] a;
+
+  //
+  // Arithmetic operations.
+  //
+
+  /// CHECK-START: void SimdLong.add(long) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void SimdLong.add(long) loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecAdd   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void add(long x) {
+    for (int i = 0; i < 128; i++)
+      a[i] += x;
+  }
+
+  /// CHECK-START: void SimdLong.sub(long) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void SimdLong.sub(long) loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecSub   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void sub(long x) {
+    for (int i = 0; i < 128; i++)
+      a[i] -= x;
+  }
+
+  /// CHECK-START: void SimdLong.mul(long) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  //  Not directly supported for longs.
+  //
+  /// CHECK-START-ARM64: void SimdLong.mul(long) loop_optimization (after)
+  /// CHECK-NOT: VecMul
+  static void mul(long x) {
+    for (int i = 0; i < 128; i++)
+      a[i] *= x;
+  }
+
+  /// CHECK-START: void SimdLong.div(long) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void SimdLong.div(long) loop_optimization (after)
+  /// CHECK-NOT: VecDiv
+  //
+  //  Not supported on any architecture.
+  //
+  static void div(long x) {
+    for (int i = 0; i < 128; i++)
+      a[i] /= x;
+  }
+
+  /// CHECK-START: void SimdLong.neg() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void SimdLong.neg() loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecNeg   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void neg() {
+    for (int i = 0; i < 128; i++)
+      a[i] = -a[i];
+  }
+
+  /// CHECK-START: void SimdLong.not() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void SimdLong.not() loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecNot   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void not() {
+    for (int i = 0; i < 128; i++)
+      a[i] = ~a[i];
+  }
+
+  /// CHECK-START: void SimdLong.shl4() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void SimdLong.shl4() loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecShl   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void shl4() {
+    for (int i = 0; i < 128; i++)
+      a[i] <<= 4;
+  }
+
+  /// CHECK-START: void SimdLong.sar2() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void SimdLong.sar2() loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecShr   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void sar2() {
+    for (int i = 0; i < 128; i++)
+      a[i] >>= 2;
+  }
+
+  /// CHECK-START: void SimdLong.shr2() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void SimdLong.shr2() loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecUShr  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void shr2() {
+    for (int i = 0; i < 128; i++)
+      a[i] >>>= 2;
+  }
+
+  //
+  // Shift sanity.
+  //
+
+  // Expose constants to optimizing compiler, but not to front-end.
+  public static int $opt$inline$IntConstant64()       { return 64; }
+  public static int $opt$inline$IntConstant65()       { return 65; }
+  public static int $opt$inline$IntConstantMinus254() { return -254; }
+
+  /// CHECK-START: void SimdLong.shr64() instruction_simplifier$after_inlining (before)
+  /// CHECK-DAG: <<Dist:i\d+>> IntConstant 64                        loop:none
+  /// CHECK-DAG: <<Get:j\d+>>  ArrayGet                              loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<UShr:j\d+>> UShr [<<Get>>,<<Dist>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void SimdLong.shr64() instruction_simplifier$after_inlining (after)
+  /// CHECK-DAG: <<Get:j\d+>> ArrayGet                             loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG:              ArraySet [{{l\d+}},{{i\d+}},<<Get>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void SimdLong.shr64() loop_optimization (after)
+  /// CHECK-DAG: <<Get:d\d+>> VecLoad                              loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG:              VecStore [{{l\d+}},{{i\d+}},<<Get>>] loop:<<Loop>>      outer_loop:none
+  static void shr64() {
+    // TODO: remove a[i] = a[i] altogether?
+    for (int i = 0; i < 128; i++)
+      a[i] >>>= $opt$inline$IntConstant64();  // 0, since & 63
+  }
+
+  /// CHECK-START: void SimdLong.shr65() instruction_simplifier$after_inlining (before)
+  /// CHECK-DAG: <<Dist:i\d+>> IntConstant 65                        loop:none
+  /// CHECK-DAG: <<Get:j\d+>>  ArrayGet                              loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<UShr:j\d+>> UShr [<<Get>>,<<Dist>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void SimdLong.shr65() instruction_simplifier$after_inlining (after)
+  /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1                         loop:none
+  /// CHECK-DAG: <<Get:j\d+>>  ArrayGet                              loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<UShr:j\d+>> UShr [<<Get>>,<<Dist>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void SimdLong.shr65() loop_optimization (after)
+  /// CHECK-DAG: <<Dist:i\d+>> IntConstant 1                         loop:none
+  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
+  static void shr65() {
+    for (int i = 0; i < 128; i++)
+      a[i] >>>= $opt$inline$IntConstant65();  // 1, since & 63
+  }
+
+  /// CHECK-START: void SimdLong.shrMinus254() instruction_simplifier$after_inlining (before)
+  /// CHECK-DAG: <<Dist:i\d+>> IntConstant -254                      loop:none
+  /// CHECK-DAG: <<Get:j\d+>>  ArrayGet                              loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<UShr:j\d+>> UShr [<<Get>>,<<Dist>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void SimdLong.shrMinus254() instruction_simplifier$after_inlining (after)
+  /// CHECK-DAG: <<Dist:i\d+>> IntConstant 2                         loop:none
+  /// CHECK-DAG: <<Get:j\d+>>  ArrayGet                              loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<UShr:j\d+>> UShr [<<Get>>,<<Dist>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void SimdLong.shrMinus254() loop_optimization (after)
+  /// CHECK-DAG: <<Dist:i\d+>> IntConstant 2                         loop:none
+  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<UShr:d\d+>> VecUShr [<<Get>>,<<Dist>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<UShr>>] loop:<<Loop>>      outer_loop:none
+  static void shrMinus254() {
+    for (int i = 0; i < 128; i++)
+      a[i] >>>= $opt$inline$IntConstantMinus254();  // 2, since & 63
+  }
+
+  //
+  // Loop bounds.
+  //
+
+  static void bounds() {
+    for (int i = 1; i < 127; i++)
+      a[i] += 11;
+  }
+
+  //
+  // Test Driver.
+  //
+
+  public static void main() {
+    // Set up.
+    a = new long[128];
+    for (int i = 0; i < 128; i++) {
+      a[i] = i;
+    }
+    // Arithmetic operations.
+    add(2L);
+    for (int i = 0; i < 128; i++) {
+      expectEquals(i + 2, a[i], "add");
+    }
+    sub(2L);
+    for (int i = 0; i < 128; i++) {
+      expectEquals(i, a[i], "sub");
+    }
+    mul(2L);
+    for (int i = 0; i < 128; i++) {
+      expectEquals(i + i, a[i], "mul");
+    }
+    div(2L);
+    for (int i = 0; i < 128; i++) {
+      expectEquals(i, a[i], "div");
+    }
+    neg();
+    for (int i = 0; i < 128; i++) {
+      expectEquals(-i, a[i], "neg");
+    }
+    // Loop bounds.
+    bounds();
+    expectEquals(0, a[0], "bounds0");
+    for (int i = 1; i < 127; i++) {
+      expectEquals(11 - i, a[i], "bounds");
+    }
+    expectEquals(-127, a[127], "bounds127");
+    // Shifts.
+    for (int i = 0; i < 128; i++) {
+      a[i] = 0xffffffffffffffffL;
+    }
+    shl4();
+    for (int i = 0; i < 128; i++) {
+      expectEquals(0xfffffffffffffff0L, a[i], "shl4");
+    }
+    sar2();
+    for (int i = 0; i < 128; i++) {
+      expectEquals(0xfffffffffffffffcL, a[i], "sar2");
+    }
+    shr2();
+    for (int i = 0; i < 128; i++) {
+      expectEquals(0x3fffffffffffffffL, a[i], "shr2");
+    }
+    shr64();
+    for (int i = 0; i < 128; i++) {
+      expectEquals(0x3fffffffffffffffL, a[i], "shr64");
+    }
+    shr65();
+    for (int i = 0; i < 128; i++) {
+      expectEquals(0x1fffffffffffffffL, a[i], "shr65");
+    }
+    shrMinus254();
+    for (int i = 0; i < 128; i++) {
+      expectEquals(0x07ffffffffffffffL, a[i], "shrMinus254");
+    }
+    // Bit-wise not operator.
+    not();
+    for (int i = 0; i < 128; i++) {
+      expectEquals(0xf800000000000000L, a[i], "not");
+    }
+    // Done.
+    System.out.println("SimdLong passed");
+  }
+
+  private static void expectEquals(long expected, long result, String action) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result + " for " + action);
+    }
+  }
+}
diff --git a/test/640-checker-simd/src/SimdShort.java b/test/640-checker-simd/src/SimdShort.java
new file mode 100644
index 0000000..6b184a6
--- /dev/null
+++ b/test/640-checker-simd/src/SimdShort.java
@@ -0,0 +1,264 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Functional tests for SIMD vectorization.
+ */
+public class SimdShort {
+
+  static short[] a;
+
+  //
+  // Arithmetic operations.
+  //
+
+  /// CHECK-START: void SimdShort.add(int) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdShort.add(int) loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecAdd   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void add(int x) {
+    for (int i = 0; i < 128; i++)
+      a[i] += x;
+  }
+
+  /// CHECK-START: void SimdShort.sub(int) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdShort.sub(int) loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecSub   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void sub(int x) {
+    for (int i = 0; i < 128; i++)
+      a[i] -= x;
+  }
+
+  /// CHECK-START: void SimdShort.mul(int) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdShort.mul(int) loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecMul   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void mul(int x) {
+    for (int i = 0; i < 128; i++)
+      a[i] *= x;
+  }
+
+  /// CHECK-START: void SimdShort.div(int) loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void SimdShort.div(int) loop_optimization (after)
+  /// CHECK-NOT: VecDiv
+  //
+  //  Not supported on any architecture.
+  //
+  static void div(int x) {
+    for (int i = 0; i < 128; i++)
+      a[i] /= x;
+  }
+
+  /// CHECK-START: void SimdShort.neg() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdShort.neg() loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecNeg   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void neg() {
+    for (int i = 0; i < 128; i++)
+      a[i] = (short) -a[i];
+  }
+
+  /// CHECK-START: void SimdShort.not() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdShort.not() loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecNot   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void not() {
+    for (int i = 0; i < 128; i++)
+      a[i] = (short) ~a[i];
+  }
+
+  /// CHECK-START: void SimdShort.shl4() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdShort.shl4() loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecShl   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void shl4() {
+    for (int i = 0; i < 128; i++)
+      a[i] <<= 4;
+  }
+
+  /// CHECK-START: void SimdShort.sar2() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void SimdShort.sar2() loop_optimization (after)
+  /// CHECK-DAG: VecLoad  loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: VecShr   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: VecStore loop:<<Loop>>      outer_loop:none
+  static void sar2() {
+    for (int i = 0; i < 128; i++)
+      a[i] >>= 2;
+  }
+
+  /// CHECK-START: void SimdShort.shr2() loop_optimization (before)
+  /// CHECK-DAG: ArrayGet loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: ArraySet loop:<<Loop>>      outer_loop:none
+  //
+  // TODO: would need signess flip.
+  /// CHECK-START: void SimdShort.shr2() loop_optimization (after)
+  /// CHECK-NOT: VecUShr
+  static void shr2() {
+    for (int i = 0; i < 128; i++)
+      a[i] >>>= 2;
+  }
+
+  //
+  // Shift sanity.
+  //
+
+  static void sar31() {
+    for (int i = 0; i < 128; i++)
+      a[i] >>= 31;
+  }
+
+  static void shr31() {
+    for (int i = 0; i < 128; i++)
+      a[i] >>>= 31;
+  }
+
+  static void shr32() {
+    for (int i = 0; i < 128; i++)
+      a[i] >>>= 32;  // 0, since & 31
+  }
+
+
+  static void shr33() {
+    for (int i = 0; i < 128; i++)
+      a[i] >>>= 33;  // 1, since & 31
+  }
+
+  //
+  // Loop bounds.
+  //
+
+  static void add() {
+    for (int i = 1; i < 127; i++)
+      a[i] += 11;
+  }
+
+  //
+  // Test Driver.
+  //
+
+  public static void main() {
+    // Set up.
+    a = new short[128];
+    for (int i = 0; i < 128; i++) {
+      a[i] = (short) i;
+    }
+    // Arithmetic operations.
+    add(2);
+    for (int i = 0; i < 128; i++) {
+      expectEquals(i + 2, a[i], "add");
+    }
+    sub(2);
+    for (int i = 0; i < 128; i++) {
+      expectEquals(i, a[i], "sub");
+    }
+    mul(2);
+    for (int i = 0; i < 128; i++) {
+      expectEquals(i + i, a[i], "mul");
+    }
+    div(2);
+    for (int i = 0; i < 128; i++) {
+      expectEquals(i, a[i], "div");
+    }
+    neg();
+    for (int i = 0; i < 128; i++) {
+      expectEquals(-i, a[i], "neg");
+    }
+    // Loop bounds.
+    add();
+    expectEquals(0, a[0], "bounds0");
+    for (int i = 1; i < 127; i++) {
+      expectEquals(11 - i, a[i], "bounds");
+    }
+    expectEquals(-127, a[127], "bounds127");
+    // Shifts.
+    for (int i = 0; i < 128; i++) {
+      a[i] = (short) 0xffff;
+    }
+    shl4();
+    for (int i = 0; i < 128; i++) {
+      expectEquals((short) 0xfff0, a[i], "shl4");
+    }
+    sar2();
+    for (int i = 0; i < 128; i++) {
+      expectEquals((short) 0xfffc, a[i], "sar2");
+    }
+    shr2();
+    for (int i = 0; i < 128; i++) {
+      expectEquals((short) 0xffff, a[i], "shr2");  // sic!
+    }
+    sar31();
+    for (int i = 0; i < 128; i++) {
+      expectEquals((short) 0xffff, a[i], "sar31");
+    }
+    shr31();
+    for (int i = 0; i < 128; i++) {
+      expectEquals(0x0001, a[i], "shr31");
+      a[i] = (short) 0x1200;  // reset
+    }
+    shr32();
+    for (int i = 0; i < 128; i++) {
+      expectEquals((short) 0x1200, a[i], "shr32");
+    }
+    shr33();
+    for (int i = 0; i < 128; i++) {
+      expectEquals((short) 0x0900, a[i], "shr33");
+      a[i] = (short) 0xf0f1;  // reset
+    }
+    not();
+    for (int i = 0; i < 128; i++) {
+      expectEquals((short) 0x0f0e, a[i], "not");
+    }
+    // Done.
+    System.out.println("SimdShort passed");
+  }
+
+  private static void expectEquals(int expected, int result, String action) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result + " for " + action);
+    }
+  }
+}
diff --git a/test/645-checker-abs-simd/src/Main.java b/test/645-checker-abs-simd/src/Main.java
index 819304a..828b656 100644
--- a/test/645-checker-abs-simd/src/Main.java
+++ b/test/645-checker-abs-simd/src/Main.java
@@ -31,7 +31,7 @@
   /// CHECK-DAG: Abs       loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: ArraySet  loop:<<Loop>>      outer_loop:none
   //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.doitByte(byte[]) loop_optimization (after)
+  /// CHECK-START-{ARM,ARM64}: void Main.doitByte(byte[]) loop_optimization (after)
   /// CHECK-DAG: VecLoad   loop:<<Loop1:B\d+>> outer_loop:none
   /// CHECK-DAG: VecAbs    loop:<<Loop1>>      outer_loop:none
   /// CHECK-DAG: VecStore  loop:<<Loop1>>      outer_loop:none
@@ -65,7 +65,7 @@
   /// CHECK-DAG: Abs       loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: ArraySet  loop:<<Loop>>      outer_loop:none
   //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.doitShort(short[]) loop_optimization (after)
+  /// CHECK-START-{ARM,ARM64}: void Main.doitShort(short[]) loop_optimization (after)
   /// CHECK-DAG: VecLoad   loop:<<Loop1:B\d+>> outer_loop:none
   /// CHECK-DAG: VecAbs    loop:<<Loop1>>      outer_loop:none
   /// CHECK-DAG: VecStore  loop:<<Loop1>>      outer_loop:none
@@ -109,7 +109,7 @@
   /// CHECK-DAG: Abs       loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: ArraySet  loop:<<Loop>>      outer_loop:none
   //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.doitInt(int[]) loop_optimization (after)
+  /// CHECK-START-{ARM,ARM64}: void Main.doitInt(int[]) loop_optimization (after)
   /// CHECK-DAG: VecLoad   loop:<<Loop1:B\d+>> outer_loop:none
   /// CHECK-DAG: VecAbs    loop:<<Loop1>>      outer_loop:none
   /// CHECK-DAG: VecStore  loop:<<Loop1>>      outer_loop:none
@@ -131,7 +131,7 @@
   /// CHECK-DAG: Abs       loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: ArraySet  loop:<<Loop>>      outer_loop:none
   //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.doitLong(long[]) loop_optimization (after)
+  /// CHECK-START-ARM64: void Main.doitLong(long[]) loop_optimization (after)
   /// CHECK-DAG: VecLoad   loop:<<Loop1:B\d+>> outer_loop:none
   /// CHECK-DAG: VecAbs    loop:<<Loop1>>      outer_loop:none
   /// CHECK-DAG: VecStore  loop:<<Loop1>>      outer_loop:none
@@ -153,7 +153,7 @@
   /// CHECK-DAG: Abs       loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: ArraySet  loop:<<Loop>>      outer_loop:none
   //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.doitFloat(float[]) loop_optimization (after)
+  /// CHECK-START-ARM64: void Main.doitFloat(float[]) loop_optimization (after)
   /// CHECK-DAG: VecLoad   loop:<<Loop1:B\d+>> outer_loop:none
   /// CHECK-DAG: VecAbs    loop:<<Loop1>>      outer_loop:none
   /// CHECK-DAG: VecStore  loop:<<Loop1>>      outer_loop:none
@@ -175,7 +175,7 @@
   /// CHECK-DAG: Abs        loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: ArraySet   loop:<<Loop>>      outer_loop:none
   //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.doitDouble(double[]) loop_optimization (after)
+  /// CHECK-START-ARM64: void Main.doitDouble(double[]) loop_optimization (after)
   /// CHECK-DAG: VecLoad    loop:<<Loop1:B\d+>> outer_loop:none
   /// CHECK-DAG: VecAbs     loop:<<Loop1>>      outer_loop:none
   /// CHECK-DAG: VecStore   loop:<<Loop1>>      outer_loop:none
diff --git a/test/646-checker-hadd-alt-byte/expected.txt b/test/646-checker-hadd-alt-byte/expected.txt
deleted file mode 100644
index b0aad4d..0000000
--- a/test/646-checker-hadd-alt-byte/expected.txt
+++ /dev/null
@@ -1 +0,0 @@
-passed
diff --git a/test/646-checker-hadd-alt-byte/info.txt b/test/646-checker-hadd-alt-byte/info.txt
deleted file mode 100644
index 46e7334..0000000
--- a/test/646-checker-hadd-alt-byte/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Functional tests on halving-add SIMD vectorization.
diff --git a/test/646-checker-hadd-alt-byte/src/Main.java b/test/646-checker-hadd-alt-byte/src/Main.java
deleted file mode 100644
index 2ef340a..0000000
--- a/test/646-checker-hadd-alt-byte/src/Main.java
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Tests for halving-add idiomatic vectorization.
- *
- * Alternative version expressed with logical shift right
- * in the higher precision (has no impact on idiom).
- */
-public class Main {
-
-  private static final int N = 256;
-  private static final int M = N * N + 15;
-
-  static byte[] sB1 = new byte[M];
-  static byte[] sB2 = new byte[M];
-  static byte[] sBo = new byte[M];
-
-  /// CHECK-START: void Main.halving_add_signed(byte[], byte[], byte[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void halving_add_signed(byte[] b1, byte[] b2, byte[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (byte) ((b1[i] + b2[i]) >>> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.halving_add_unsigned(byte[], byte[], byte[]) instruction_simplifier (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<I255:i\d+>> IntConstant 255                     loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<I255>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<I255>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<And1>>,<<And2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:a\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:a\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void halving_add_unsigned(byte[] b1, byte[] b2, byte[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (byte) (((b1[i] & 0xff) + (b2[i] & 0xff)) >>> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.rounding_halving_add_signed(byte[], byte[], byte[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add2>>,<<I1>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.rounding_halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:true loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void rounding_halving_add_signed(byte[] b1, byte[] b2, byte[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (byte) ((b1[i] + b2[i] + 1) >>> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) instruction_simplifier (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<I255:i\d+>> IntConstant 255                     loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<I255>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<I255>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add1:i\d+>> Add [<<And1>>,<<And2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add2>>,<<I1>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:a\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:a\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add2>>,<<I1>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>]  packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void rounding_halving_add_unsigned(byte[] b1, byte[] b2, byte[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (byte) (((b1[i] & 0xff) + (b2[i] & 0xff) + 1) >>> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.halving_add_signed_constant(byte[], byte[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<I127:i\d+>> IntConstant 127                     loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get:b\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get>>,<<I127>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.halving_add_signed_constant(byte[], byte[]) loop_optimization (after)
-  /// CHECK-DAG: <<I127:i\d+>> IntConstant 127                       loop:none
-  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>]         loop:none
-  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void halving_add_signed_constant(byte[] b1, byte[] bo) {
-    int min_length = Math.min(bo.length, b1.length);
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (byte) ((b1[i] + 0x7f) >>> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.halving_add_unsigned_constant(byte[], byte[]) instruction_simplifier (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<I255:i\d+>> IntConstant 255                     loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get:b\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And:i\d+>>  And [<<Get>>,<<I255>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<And>>,<<I255>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<I255:i\d+>> IntConstant 255                     loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get:a\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get>>,<<I255>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
-  /// CHECK-DAG: <<I255:i\d+>> IntConstant 255                       loop:none
-  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>]         loop:none
-  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void halving_add_unsigned_constant(byte[] b1, byte[] bo) {
-    int min_length = Math.min(bo.length, b1.length);
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (byte) (((b1[i] & 0xff) + 0xff) >>> 1);
-    }
-  }
-
-  public static void main(String[] args) {
-    // Initialize cross-values to test all cases, and also
-    // set up some extra values to exercise the cleanup loop.
-    int k = 0;
-    for (int i = 0; i < N; i++) {
-      for (int j = 0; j < N; j++) {
-        sB1[k] = (byte) i;
-        sB2[k] = (byte) j;
-        k++;
-      }
-    }
-    for (int i = 0; i < 15; i++) {
-      sB1[k] = (byte) i;
-      sB2[k] = 100;
-      k++;
-    }
-    expectEquals(k, M);
-
-    // Test halving add idioms. Note that the expected result is computed
-    // with the arithmetic >> to demonstrate the computed narrower result
-    // does not depend on the wider >> or >>>.
-    halving_add_signed(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      byte e = (byte) ((sB1[i] + sB2[i]) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    halving_add_unsigned(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      byte e = (byte) (((sB1[i] & 0xff) + (sB2[i] & 0xff)) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    rounding_halving_add_signed(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      byte e = (byte) ((sB1[i] + sB2[i] + 1) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    rounding_halving_add_unsigned(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      byte e = (byte) (((sB1[i] & 0xff) + (sB2[i] & 0xff) + 1) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    halving_add_signed_constant(sB1, sBo);
-    for (int i = 0; i < M; i++) {
-      byte e = (byte) ((sB1[i] + 0x7f) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    halving_add_unsigned_constant(sB1, sBo);
-    for (int i = 0; i < M; i++) {
-      byte e = (byte) (((sB1[i] & 0xff) + 0xff) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-
-    System.out.println("passed");
-  }
-
-  private static void expectEquals(int expected, int result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-}
diff --git a/test/646-checker-hadd-alt-char/expected.txt b/test/646-checker-hadd-alt-char/expected.txt
deleted file mode 100644
index b0aad4d..0000000
--- a/test/646-checker-hadd-alt-char/expected.txt
+++ /dev/null
@@ -1 +0,0 @@
-passed
diff --git a/test/646-checker-hadd-alt-char/info.txt b/test/646-checker-hadd-alt-char/info.txt
deleted file mode 100644
index 46e7334..0000000
--- a/test/646-checker-hadd-alt-char/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Functional tests on halving-add SIMD vectorization.
diff --git a/test/646-checker-hadd-alt-char/src/Main.java b/test/646-checker-hadd-alt-char/src/Main.java
deleted file mode 100644
index 79904ce..0000000
--- a/test/646-checker-hadd-alt-char/src/Main.java
+++ /dev/null
@@ -1,277 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Tests for halving-add idiomatic vectorization.
- *
- * Alternative version expressed with logical shift right
- * in the higher precision (has no impact on idiom).
- */
-public class Main {
-
-  private static final int N = 64 * 1024;
-  private static final int M = N + 31;
-
-  static char[] sB1 = new char[M];
-  static char[] sB2 = new char[M];
-  static char[] sBo = new char[M];
-
-  /// CHECK-START: void Main.halving_add_unsigned(char[], char[], char[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void halving_add_unsigned(char[] b1, char[] b2, char[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (char) ((b1[i] + b2[i]) >>> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.halving_add_also_unsigned(char[], char[], char[]) instruction_simplifier (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<IMAX:i\d+>> IntConstant 65535                   loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<IMAX>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And2:i\d+>> And [<<IMAX>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<And1>>,<<And2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<IMAX:i\d+>> IntConstant 65535                   loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  //
-  // Note: HAnd has no impact (already a zero extension).
-  //
-  private static void halving_add_also_unsigned(char[] b1, char[] b2, char[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (char) (((b1[i] & 0xffff) + (b2[i] & 0xffff)) >>> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add2>>,<<I1>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void rounding_halving_add_unsigned(char[] b1, char[] b2, char[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (char) ((b1[i] + b2[i] + 1) >>> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) instruction_simplifier (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<IMAX:i\d+>> IntConstant 65535                   loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<IMAX>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And2:i\d+>> And [<<IMAX>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add1:i\d+>> Add [<<And1>>,<<And2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add2>>,<<I1>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add2>>,<<I1>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  //
-  // Note: HAnd has no impact (already a zero extension).
-  //
-  private static void rounding_halving_add_also_unsigned(char[] b1, char[] b2, char[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (char) (((b1[i] & 0xffff) + (b2[i] & 0xffff) + 1) >>> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.halving_add_unsigned_constant(char[], char[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get:c\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.halving_add_unsigned_constant(char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                     loop:none
-  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>]         loop:none
-  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void halving_add_unsigned_constant(char[] b1, char[] bo) {
-    int min_length = Math.min(bo.length, b1.length);
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (char) ((b1[i] + 0xffff) >>> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.halving_add_also_unsigned_constant(char[], char[]) instruction_simplifier (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get:c\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And:i\d+>>  And [<<Get>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<And>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get:c\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                     loop:none
-  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>]         loop:none
-  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  //
-  // Note: HAnd has no impact (already a zero extension).
-  //
-  private static void halving_add_also_unsigned_constant(char[] b1, char[] bo) {
-    int min_length = Math.min(bo.length, b1.length);
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (char) (((b1[i] & 0xffff) + 0xffff) >>> 1);
-    }
-  }
-
-  public static void main(String[] args) {
-    // Some interesting values.
-    char[] interesting = {
-      (char) 0x0000,
-      (char) 0x0001,
-      (char) 0x0002,
-      (char) 0x1234,
-      (char) 0x8000,
-      (char) 0x8001,
-      (char) 0x7fff,
-      (char) 0xffff
-    };
-    // Initialize cross-values to test all cases, and also
-    // set up some extra values to exercise the cleanup loop.
-    for (int i = 0; i < M; i++) {
-      sB1[i] = (char) i;
-      sB2[i] = interesting[i & 7];
-    }
-
-    // Test halving add idioms. Note that the expected result is computed
-    // with the arithmetic >> to demonstrate the computed narrower result
-    // does not depend on the wider >> or >>>.
-    halving_add_unsigned(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      char e = (char) ((sB1[i] + sB2[i]) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    halving_add_also_unsigned(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      char e = (char) ((sB1[i] + sB2[i]) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    rounding_halving_add_unsigned(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      char e = (char) ((sB1[i] + sB2[i] + 1) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    rounding_halving_add_also_unsigned(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      char e = (char) ((sB1[i] + sB2[i] + 1) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    halving_add_unsigned_constant(sB1, sBo);
-    for (int i = 0; i < M; i++) {
-      char e = (char) ((sB1[i] + 0xffff) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    halving_add_also_unsigned_constant(sB1, sBo);
-    for (int i = 0; i < M; i++) {
-      char e = (char) ((sB1[i] + 0xffff) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-
-    System.out.println("passed");
-  }
-
-  private static void expectEquals(int expected, int result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-}
diff --git a/test/646-checker-hadd-alt-short/expected.txt b/test/646-checker-hadd-alt-short/expected.txt
deleted file mode 100644
index b0aad4d..0000000
--- a/test/646-checker-hadd-alt-short/expected.txt
+++ /dev/null
@@ -1 +0,0 @@
-passed
diff --git a/test/646-checker-hadd-alt-short/info.txt b/test/646-checker-hadd-alt-short/info.txt
deleted file mode 100644
index 46e7334..0000000
--- a/test/646-checker-hadd-alt-short/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Functional tests on halving-add SIMD vectorization.
diff --git a/test/646-checker-hadd-alt-short/src/Main.java b/test/646-checker-hadd-alt-short/src/Main.java
deleted file mode 100644
index 1ecb1d82..0000000
--- a/test/646-checker-hadd-alt-short/src/Main.java
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Tests for halving-add idiomatic vectorization.
- *
- * Alternative version expressed with logical shift right
- * in the higher precision (has no impact on idiom).
- */
-public class Main {
-
-  private static final int N = 64 * 1024;
-  private static final int M = N + 31;
-
-  static short[] sB1 = new short[M];
-  static short[] sB2 = new short[M];
-  static short[] sBo = new short[M];
-
-  /// CHECK-START: void Main.halving_add_signed(short[], short[], short[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.halving_add_signed(short[], short[], short[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void halving_add_signed(short[] b1, short[] b2, short[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (short) ((b1[i] + b2[i]) >>> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.halving_add_unsigned(short[], short[], short[]) instruction_simplifier (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<UMAX>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And2:i\d+>> And [<<UMAX>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<And1>>,<<And2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      int v1 = b1[i] & 0xffff;
-      int v2 = b2[i] & 0xffff;
-      bo[i] = (short) ((v1 + v2) >>> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.rounding_halving_add_signed(short[], short[], short[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add2>>,<<I1>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.rounding_halving_add_signed(short[], short[], short[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void rounding_halving_add_signed(short[] b1, short[] b2, short[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (short) ((b1[i] + b2[i] + 1) >>> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.rounding_halving_add_unsigned(short[], short[], short[]) instruction_simplifier (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<UMAX>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And2:i\d+>> And [<<UMAX>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add1:i\d+>> Add [<<And1>>,<<And2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add2>>,<<I1>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add2>>,<<I1>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void rounding_halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      int v1 = b1[i] & 0xffff;
-      int v2 = b2[i] & 0xffff;
-      bo[i] = (short) ((v1 + v2 + 1) >>> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.halving_add_signed_constant(short[], short[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767                   loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get:s\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get>>,<<SMAX>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.halving_add_signed_constant(short[], short[]) loop_optimization (after)
-  /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767                     loop:none
-  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>]         loop:none
-  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void halving_add_signed_constant(short[] b1, short[] bo) {
-    int min_length = Math.min(bo.length, b1.length);
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (short) ((b1[i] + 0x7fff) >>> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.halving_add_unsigned_constant(short[], short[]) instruction_simplifier (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get:s\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And:i\d+>>  And [<<Get>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<And>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get:c\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
-  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                     loop:none
-  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>]         loop:none
-  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void halving_add_unsigned_constant(short[] b1, short[] bo) {
-    int min_length = Math.min(bo.length, b1.length);
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (short) (((b1[i] & 0xffff) + 0xffff) >>> 1);
-    }
-  }
-
-  public static void main(String[] args) {
-    // Some interesting values.
-    short[] interesting = {
-      (short) 0x0000,
-      (short) 0x0001,
-      (short) 0x0002,
-      (short) 0x1234,
-      (short) 0x8000,
-      (short) 0x8001,
-      (short) 0x7fff,
-      (short) 0xffff
-    };
-    // Initialize cross-values to test all cases, and also
-    // set up some extra values to exercise the cleanup loop.
-    for (int i = 0; i < M; i++) {
-      sB1[i] = (short) i;
-      sB2[i] = interesting[i & 7];
-    }
-
-    // Test halving add idioms. Note that the expected result is computed
-    // with the arithmetic >> to demonstrate the computed narrower result
-    // does not depend on the wider >> or >>>.
-    halving_add_signed(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      short e = (short) ((sB1[i] + sB2[i]) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    halving_add_unsigned(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      short e = (short) (((sB1[i] & 0xffff) + (sB2[i] & 0xffff)) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    rounding_halving_add_signed(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      short e = (short) ((sB1[i] + sB2[i] + 1) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    rounding_halving_add_unsigned(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      short e = (short) (((sB1[i] & 0xffff) + (sB2[i] & 0xffff) + 1) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    halving_add_signed_constant(sB1, sBo);
-    for (int i = 0; i < M; i++) {
-      short e = (short) ((sB1[i] + 0x7fff) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    halving_add_unsigned_constant(sB1, sBo);
-    for (int i = 0; i < M; i++) {
-      short e = (short) (((sB1[i] & 0xffff) + 0xffff) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-
-    System.out.println("passed");
-  }
-
-  private static void expectEquals(int expected, int result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-}
diff --git a/test/646-checker-hadd-byte/expected.txt b/test/646-checker-hadd-byte/expected.txt
deleted file mode 100644
index b0aad4d..0000000
--- a/test/646-checker-hadd-byte/expected.txt
+++ /dev/null
@@ -1 +0,0 @@
-passed
diff --git a/test/646-checker-hadd-byte/src/Main.java b/test/646-checker-hadd-byte/src/Main.java
deleted file mode 100644
index ca22200..0000000
--- a/test/646-checker-hadd-byte/src/Main.java
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Tests for halving-add idiomatic vectorization.
- */
-public class Main {
-
-  private static final int N = 256;
-  private static final int M = N * N + 15;
-
-  static byte[] sB1 = new byte[M];
-  static byte[] sB2 = new byte[M];
-  static byte[] sBo = new byte[M];
-
-  /// CHECK-START: void Main.halving_add_signed(byte[], byte[], byte[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void halving_add_signed(byte[] b1, byte[] b2, byte[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (byte) ((b1[i] + b2[i]) >> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.halving_add_unsigned(byte[], byte[], byte[]) instruction_simplifier (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<I255:i\d+>> IntConstant 255                     loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<I255>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<I255>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<And1>>,<<And2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:a\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:a\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void halving_add_unsigned(byte[] b1, byte[] b2, byte[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (byte) (((b1[i] & 0xff) + (b2[i] & 0xff)) >> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.rounding_halving_add_signed(byte[], byte[], byte[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.rounding_halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:true loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void rounding_halving_add_signed(byte[] b1, byte[] b2, byte[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (byte) ((b1[i] + b2[i] + 1) >> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) instruction_simplifier (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<I255:i\d+>> IntConstant 255                     loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<I255>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<I255>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add1:i\d+>> Add [<<And1>>,<<And2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:a\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:a\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>]  packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void rounding_halving_add_unsigned(byte[] b1, byte[] b2, byte[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (byte) (((b1[i] & 0xff) + (b2[i] & 0xff) + 1) >> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.halving_add_signed_constant(byte[], byte[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<I127:i\d+>> IntConstant 127                     loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get:b\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get>>,<<I127>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.halving_add_signed_constant(byte[], byte[]) loop_optimization (after)
-  /// CHECK-DAG: <<I127:i\d+>> IntConstant 127                       loop:none
-  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>]         loop:none
-  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void halving_add_signed_constant(byte[] b1, byte[] bo) {
-    int min_length = Math.min(bo.length, b1.length);
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (byte) ((b1[i] + 0x7f) >> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.halving_add_unsigned_constant(byte[], byte[]) instruction_simplifier (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<I255:i\d+>> IntConstant 255                     loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get:b\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And:i\d+>>  And [<<Get>>,<<I255>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<And>>,<<I255>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<I255:i\d+>> IntConstant 255                     loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get:a\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get>>,<<I255>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
-  /// CHECK-DAG: <<I255:i\d+>> IntConstant 255                       loop:none
-  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>]         loop:none
-  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void halving_add_unsigned_constant(byte[] b1, byte[] bo) {
-    int min_length = Math.min(bo.length, b1.length);
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (byte) (((b1[i] & 0xff) + 0xff) >> 1);
-    }
-  }
-
-  public static void main(String[] args) {
-    // Initialize cross-values to test all cases, and also
-    // set up some extra values to exercise the cleanup loop.
-    int k = 0;
-    for (int i = 0; i < N; i++) {
-      for (int j = 0; j < N; j++) {
-        sB1[k] = (byte) i;
-        sB2[k] = (byte) j;
-        k++;
-      }
-    }
-    for (int i = 0; i < 15; i++) {
-      sB1[k] = (byte) i;
-      sB2[k] = 100;
-      k++;
-    }
-    expectEquals(k, M);
-
-    // Test halving add idioms.
-    halving_add_signed(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      byte e = (byte) ((sB1[i] + sB2[i]) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    halving_add_unsigned(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      byte e = (byte) (((sB1[i] & 0xff) + (sB2[i] & 0xff)) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    rounding_halving_add_signed(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      byte e = (byte) ((sB1[i] + sB2[i] + 1) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    rounding_halving_add_unsigned(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      byte e = (byte) (((sB1[i] & 0xff) + (sB2[i] & 0xff) + 1) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    halving_add_signed_constant(sB1, sBo);
-    for (int i = 0; i < M; i++) {
-      byte e = (byte) ((sB1[i] + 0x7f) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    halving_add_unsigned_constant(sB1, sBo);
-    for (int i = 0; i < M; i++) {
-      byte e = (byte) (((sB1[i] & 0xff) + 0xff) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-
-    System.out.println("passed");
-  }
-
-  private static void expectEquals(int expected, int result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-}
diff --git a/test/646-checker-hadd-char/expected.txt b/test/646-checker-hadd-char/expected.txt
deleted file mode 100644
index b0aad4d..0000000
--- a/test/646-checker-hadd-char/expected.txt
+++ /dev/null
@@ -1 +0,0 @@
-passed
diff --git a/test/646-checker-hadd-char/info.txt b/test/646-checker-hadd-char/info.txt
deleted file mode 100644
index 46e7334..0000000
--- a/test/646-checker-hadd-char/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Functional tests on halving-add SIMD vectorization.
diff --git a/test/646-checker-hadd-char/src/Main.java b/test/646-checker-hadd-char/src/Main.java
deleted file mode 100644
index cbe6297..0000000
--- a/test/646-checker-hadd-char/src/Main.java
+++ /dev/null
@@ -1,345 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Tests for halving-add idiomatic vectorization.
- */
-public class Main {
-
-  private static final int N = 64 * 1024;
-  private static final int M = N + 31;
-
-  static char[] sB1 = new char[M];
-  static char[] sB2 = new char[M];
-  static char[] sBo = new char[M];
-
-  /// CHECK-START: void Main.halving_add_unsigned(char[], char[], char[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-ARM: void Main.halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-ARM64: void Main.halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-MIPS64: void Main.halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void halving_add_unsigned(char[] b1, char[] b2, char[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (char) ((b1[i] + b2[i]) >> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.halving_add_also_unsigned(char[], char[], char[]) instruction_simplifier (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<IMAX:i\d+>> IntConstant 65535                   loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<IMAX>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And2:i\d+>> And [<<IMAX>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<And1>>,<<And2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-ARM: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-ARM64: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-MIPS64: void Main.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  //
-  // Note: HAnd has no impact (already a zero extension).
-  //
-  private static void halving_add_also_unsigned(char[] b1, char[] b2, char[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (char) (((b1[i] & 0xffff) + (b2[i] & 0xffff)) >> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-ARM: void Main.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-ARM64: void Main.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-MIPS64: void Main.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void rounding_halving_add_unsigned(char[] b1, char[] b2, char[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (char) ((b1[i] + b2[i] + 1) >> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) instruction_simplifier (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<IMAX:i\d+>> IntConstant 65535                   loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<IMAX>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And2:i\d+>> And [<<IMAX>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add1:i\d+>> Add [<<And1>>,<<And2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-ARM: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-ARM64: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-MIPS64: void Main.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  //
-  // Note: HAnd has no impact (already a zero extension).
-  //
-  private static void rounding_halving_add_also_unsigned(char[] b1, char[] b2, char[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (char) (((b1[i] & 0xffff) + (b2[i] & 0xffff) + 1) >> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.halving_add_unsigned_constant(char[], char[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get:c\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-ARM: void Main.halving_add_unsigned_constant(char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                     loop:none
-  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>]         loop:none
-  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-ARM64: void Main.halving_add_unsigned_constant(char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                     loop:none
-  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>]         loop:none
-  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
-  //
-  /// CHECK-START-MIPS64: void Main.halving_add_unsigned_constant(char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                     loop:none
-  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>]         loop:none
-  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
-  private static void halving_add_unsigned_constant(char[] b1, char[] bo) {
-    int min_length = Math.min(bo.length, b1.length);
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (char) ((b1[i] + 0xffff) >> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.halving_add_also_unsigned_constant(char[], char[]) instruction_simplifier (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get:c\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And:i\d+>>  And [<<Get>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<And>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get:c\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-ARM: void Main.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                     loop:none
-  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>]         loop:none
-  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-ARM64: void Main.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                     loop:none
-  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>]         loop:none
-  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-MIPS64: void Main.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                     loop:none
-  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>]         loop:none
-  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  //
-  // Note: HAnd has no impact (already a zero extension).
-  //
-  private static void halving_add_also_unsigned_constant(char[] b1, char[] bo) {
-    int min_length = Math.min(bo.length, b1.length);
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (char) (((b1[i] & 0xffff) + 0xffff) >> 1);
-    }
-  }
-
-  public static void main(String[] args) {
-    // Some interesting values.
-    char[] interesting = {
-      (char) 0x0000,
-      (char) 0x0001,
-      (char) 0x0002,
-      (char) 0x1234,
-      (char) 0x8000,
-      (char) 0x8001,
-      (char) 0x7fff,
-      (char) 0xffff
-    };
-    // Initialize cross-values to test all cases, and also
-    // set up some extra values to exercise the cleanup loop.
-    for (int i = 0; i < M; i++) {
-      sB1[i] = (char) i;
-      sB2[i] = interesting[i & 7];
-    }
-
-    // Test halving add idioms.
-    halving_add_unsigned(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      char e = (char) ((sB1[i] + sB2[i]) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    halving_add_also_unsigned(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      char e = (char) ((sB1[i] + sB2[i]) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    rounding_halving_add_unsigned(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      char e = (char) ((sB1[i] + sB2[i] + 1) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    rounding_halving_add_also_unsigned(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      char e = (char) ((sB1[i] + sB2[i] + 1) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    halving_add_unsigned_constant(sB1, sBo);
-    for (int i = 0; i < M; i++) {
-      char e = (char) ((sB1[i] + 0xffff) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    halving_add_also_unsigned_constant(sB1, sBo);
-    for (int i = 0; i < M; i++) {
-      char e = (char) ((sB1[i] + 0xffff) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-
-    System.out.println("passed");
-  }
-
-  private static void expectEquals(int expected, int result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-}
diff --git a/test/646-checker-hadd-short/expected.txt b/test/646-checker-hadd-short/expected.txt
deleted file mode 100644
index b0aad4d..0000000
--- a/test/646-checker-hadd-short/expected.txt
+++ /dev/null
@@ -1 +0,0 @@
-passed
diff --git a/test/646-checker-hadd-short/info.txt b/test/646-checker-hadd-short/info.txt
deleted file mode 100644
index 46e7334..0000000
--- a/test/646-checker-hadd-short/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Functional tests on halving-add SIMD vectorization.
diff --git a/test/646-checker-hadd-short/src/Main.java b/test/646-checker-hadd-short/src/Main.java
deleted file mode 100644
index d78a678..0000000
--- a/test/646-checker-hadd-short/src/Main.java
+++ /dev/null
@@ -1,442 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Tests for halving-add idiomatic vectorization.
- */
-public class Main {
-
-  private static final int N = 64 * 1024;
-  private static final int M = N + 31;
-
-  static short[] sB1 = new short[M];
-  static short[] sB2 = new short[M];
-  static short[] sBo = new short[M];
-
-  private static int $inline$mone() {
-    return -1;
-  }
-
-  /// CHECK-START: void Main.halving_add_signed(short[], short[], short[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.halving_add_signed(short[], short[], short[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void halving_add_signed(short[] b1, short[] b2, short[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (short) ((b1[i] + b2[i]) >> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.halving_add_signed_alt(short[], short[], short[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<I10:i\d+>>  IntConstant 10                      loop:none
-  /// CHECK-DAG: <<M10:i\d+>>  IntConstant -10                     loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<I10>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Get2>>,<<M10>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add3:i\d+>> Add [<<Add1>>,<<Add2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add3>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.halving_add_signed_alt(short[], short[], short[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void halving_add_signed_alt(short[] b1, short[] b2, short[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      // Cancelling constant computations do not confuse recognition.
-      bo[i] = (short) (((b1[i] + 10) + (b2[i] - 10)) >> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.halving_add_unsigned(short[], short[], short[]) instruction_simplifier (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<UMAX>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And2:i\d+>> And [<<UMAX>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<And1>>,<<And2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      int v1 = b1[i] & 0xffff;
-      int v2 = b2[i] & 0xffff;
-      bo[i] = (short) ((v1 + v2) >> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.rounding_halving_add_signed(short[], short[], short[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.rounding_halving_add_signed(short[], short[], short[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void rounding_halving_add_signed(short[] b1, short[] b2, short[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (short) ((b1[i] + b2[i] + 1) >> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.rounding_halving_add_signed_alt(short[], short[], short[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.rounding_halving_add_signed_alt(short[], short[], short[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void rounding_halving_add_signed_alt(short[] b1, short[] b2, short[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      // Slightly different order in idiom does not confuse recognition.
-      bo[i] = (short) (((1 + b1[i]) + b2[i]) >> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.rounding_halving_add_signed_alt2(short[], short[], short[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<I10:i\d+>>  IntConstant 10                      loop:none
-  /// CHECK-DAG: <<M9:i\d+>>   IntConstant -9                      loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<I10>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Get2>>,<<M9>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add3:i\d+>> Add [<<Add1>>,<<Add2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add3>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.rounding_halving_add_signed_alt2(short[], short[], short[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void rounding_halving_add_signed_alt2(short[] b1, short[] b2, short[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      // Computations that cancel to adding 1 also do not confuse recognition.
-      bo[i] = (short) (((b1[i] + 10) + (b2[i] - 9)) >> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.rounding_halving_add_signed_alt3(short[], short[], short[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<M1:i\d+>>   IntConstant -1                      loop:none
-  /// CHECK-DAG: <<I9:i\d+>>   IntConstant 9                       loop:none
-  /// CHECK-DAG: <<M9:i\d+>>   IntConstant -9                      loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<I9>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Get2>>,<<M9>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add3:i\d+>> Add [<<Add1>>,<<Add2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:i\d+>>  Sub [<<Add3>>,<<M1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Sub>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.rounding_halving_add_signed_alt3(short[], short[], short[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void rounding_halving_add_signed_alt3(short[] b1, short[] b2, short[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      // Computations that cancel to adding 1 also do not confuse recognition.
-      bo[i] = (short) (((b1[i] + 9) + (b2[i] - 9) - $inline$mone()) >> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.rounding_halving_add_unsigned(short[], short[], short[]) instruction_simplifier (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<UMAX>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And2:i\d+>> And [<<UMAX>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add1:i\d+>> Add [<<And1>>,<<And2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void rounding_halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      int v1 = b1[i] & 0xffff;
-      int v2 = b2[i] & 0xffff;
-      bo[i] = (short) ((v1 + v2 + 1) >> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.rounding_halving_add_unsigned_alt(short[], short[], short[]) instruction_simplifier (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<UMAX>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And2:i\d+>> And [<<UMAX>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add1:i\d+>> Add [<<And2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<And1>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.rounding_halving_add_unsigned_alt(short[], short[], short[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Get1>>,<<Add1>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.rounding_halving_add_unsigned_alt(short[], short[], short[]) loop_optimization (after)
-  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void rounding_halving_add_unsigned_alt(short[] b1, short[] b2, short[] bo) {
-    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
-    for (int i = 0; i < min_length; i++) {
-      // Slightly different order in idiom does not confuse recognition.
-      int v1 = b1[i] & 0xffff;
-      int v2 = b2[i] & 0xffff;
-      bo[i] = (short) (v1 + (v2 + 1) >> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.halving_add_signed_constant(short[], short[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767                   loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get:s\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get>>,<<SMAX>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.halving_add_signed_constant(short[], short[]) loop_optimization (after)
-  /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767                     loop:none
-  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>]         loop:none
-  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void halving_add_signed_constant(short[] b1, short[] bo) {
-    int min_length = Math.min(bo.length, b1.length);
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (short) ((b1[i] + 0x7fff) >> 1);
-    }
-  }
-
-  /// CHECK-START: void Main.halving_add_unsigned_constant(short[], short[]) instruction_simplifier (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get:s\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<And:i\d+>>  And [<<Get>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<And>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (before)
-  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
-  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
-  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Get:c\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: void Main.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
-  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                     loop:none
-  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>]         loop:none
-  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
-  private static void halving_add_unsigned_constant(short[] b1, short[] bo) {
-    int min_length = Math.min(bo.length, b1.length);
-    for (int i = 0; i < min_length; i++) {
-      bo[i] = (short) (((b1[i] & 0xffff) + 0xffff) >> 1);
-    }
-  }
-
-  public static void main(String[] args) {
-    // Some interesting values.
-    short[] interesting = {
-      (short) 0x0000,
-      (short) 0x0001,
-      (short) 0x0002,
-      (short) 0x1234,
-      (short) 0x8000,
-      (short) 0x8001,
-      (short) 0x7fff,
-      (short) 0xffff
-    };
-    // Initialize cross-values to test all cases, and also
-    // set up some extra values to exercise the cleanup loop.
-    for (int i = 0; i < M; i++) {
-      sB1[i] = (short) i;
-      sB2[i] = interesting[i & 7];
-    }
-
-    // Test halving add idioms.
-    halving_add_signed(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      short e = (short) ((sB1[i] + sB2[i]) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    halving_add_signed_alt(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      short e = (short) ((sB1[i] + sB2[i]) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    halving_add_unsigned(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      short e = (short) (((sB1[i] & 0xffff) + (sB2[i] & 0xffff)) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    rounding_halving_add_signed(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      short e = (short) ((sB1[i] + sB2[i] + 1) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    rounding_halving_add_signed_alt(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      short e = (short) ((sB1[i] + sB2[i] + 1) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    rounding_halving_add_signed_alt2(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      short e = (short) ((sB1[i] + sB2[i] + 1) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    rounding_halving_add_signed_alt3(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      short e = (short) ((sB1[i] + sB2[i] + 1) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    rounding_halving_add_unsigned(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      short e = (short) (((sB1[i] & 0xffff) + (sB2[i] & 0xffff) + 1) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    rounding_halving_add_unsigned_alt(sB1, sB2, sBo);
-    for (int i = 0; i < M; i++) {
-      short e = (short) (((sB1[i] & 0xffff) + (sB2[i] & 0xffff) + 1) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    halving_add_signed_constant(sB1, sBo);
-    for (int i = 0; i < M; i++) {
-      short e = (short) ((sB1[i] + 0x7fff) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-    halving_add_unsigned_constant(sB1, sBo);
-    for (int i = 0; i < M; i++) {
-      short e = (short) (((sB1[i] & 0xffff) + 0xffff) >> 1);
-      expectEquals(e, sBo[i]);
-    }
-
-    System.out.println("passed");
-  }
-
-  private static void expectEquals(int expected, int result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-}
diff --git a/test/646-checker-simd-hadd/expected.txt b/test/646-checker-simd-hadd/expected.txt
new file mode 100644
index 0000000..93b1f4f
--- /dev/null
+++ b/test/646-checker-simd-hadd/expected.txt
@@ -0,0 +1,6 @@
+HaddAltByte passed
+HaddAltShort passed
+HaddAltChar passed
+HaddByte passed
+HaddShort passed
+HaddChar passed
diff --git a/test/646-checker-hadd-byte/info.txt b/test/646-checker-simd-hadd/info.txt
similarity index 100%
rename from test/646-checker-hadd-byte/info.txt
rename to test/646-checker-simd-hadd/info.txt
diff --git a/test/646-checker-simd-hadd/src/HaddAltByte.java b/test/646-checker-simd-hadd/src/HaddAltByte.java
new file mode 100644
index 0000000..28551ee
--- /dev/null
+++ b/test/646-checker-simd-hadd/src/HaddAltByte.java
@@ -0,0 +1,266 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for halving-add idiomatic vectorization.
+ *
+ * Alternative version expressed with logical shift right
+ * in the higher precision (has no impact on idiom).
+ */
+public class HaddAltByte {
+
+  private static final int N = 256;
+  private static final int M = N * N + 15;
+
+  static byte[] sB1 = new byte[M];
+  static byte[] sB2 = new byte[M];
+  static byte[] sBo = new byte[M];
+
+  /// CHECK-START: void HaddAltByte.halving_add_signed(byte[], byte[], byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddAltByte.halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void halving_add_signed(byte[] b1, byte[] b2, byte[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (byte) ((b1[i] + b2[i]) >>> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddAltByte.halving_add_unsigned(byte[], byte[], byte[]) instruction_simplifier (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<I255:i\d+>> IntConstant 255                     loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<I255>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<I255>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<And1>>,<<And2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void HaddAltByte.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:a\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:a\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddAltByte.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void halving_add_unsigned(byte[] b1, byte[] b2, byte[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (byte) (((b1[i] & 0xff) + (b2[i] & 0xff)) >>> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddAltByte.rounding_halving_add_signed(byte[], byte[], byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add2>>,<<I1>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddAltByte.rounding_halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:true loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void rounding_halving_add_signed(byte[] b1, byte[] b2, byte[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (byte) ((b1[i] + b2[i] + 1) >>> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddAltByte.rounding_halving_add_unsigned(byte[], byte[], byte[]) instruction_simplifier (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<I255:i\d+>> IntConstant 255                     loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<I255>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<I255>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add1:i\d+>> Add [<<And1>>,<<And2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add2>>,<<I1>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void HaddAltByte.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:a\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:a\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add2>>,<<I1>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddAltByte.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>]  packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void rounding_halving_add_unsigned(byte[] b1, byte[] b2, byte[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (byte) (((b1[i] & 0xff) + (b2[i] & 0xff) + 1) >>> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddAltByte.halving_add_signed_constant(byte[], byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<I127:i\d+>> IntConstant 127                     loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get:b\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get>>,<<I127>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddAltByte.halving_add_signed_constant(byte[], byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<I127:i\d+>> IntConstant 127                       loop:none
+  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>]         loop:none
+  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void halving_add_signed_constant(byte[] b1, byte[] bo) {
+    int min_length = Math.min(bo.length, b1.length);
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (byte) ((b1[i] + 0x7f) >>> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddAltByte.halving_add_unsigned_constant(byte[], byte[]) instruction_simplifier (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<I255:i\d+>> IntConstant 255                     loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get:b\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And:i\d+>>  And [<<Get>>,<<I255>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<And>>,<<I255>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void HaddAltByte.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<I255:i\d+>> IntConstant 255                     loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get:a\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get>>,<<I255>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddAltByte.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<I255:i\d+>> IntConstant 255                       loop:none
+  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>]         loop:none
+  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void halving_add_unsigned_constant(byte[] b1, byte[] bo) {
+    int min_length = Math.min(bo.length, b1.length);
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (byte) (((b1[i] & 0xff) + 0xff) >>> 1);
+    }
+  }
+
+  public static void main() {
+    // Initialize cross-values to test all cases, and also
+    // set up some extra values to exercise the cleanup loop.
+    int k = 0;
+    for (int i = 0; i < N; i++) {
+      for (int j = 0; j < N; j++) {
+        sB1[k] = (byte) i;
+        sB2[k] = (byte) j;
+        k++;
+      }
+    }
+    for (int i = 0; i < 15; i++) {
+      sB1[k] = (byte) i;
+      sB2[k] = 100;
+      k++;
+    }
+    expectEquals(k, M);
+
+    // Test halving add idioms. Note that the expected result is computed
+    // with the arithmetic >> to demonstrate the computed narrower result
+    // does not depend on the wider >> or >>>.
+    halving_add_signed(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      byte e = (byte) ((sB1[i] + sB2[i]) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    halving_add_unsigned(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      byte e = (byte) (((sB1[i] & 0xff) + (sB2[i] & 0xff)) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    rounding_halving_add_signed(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      byte e = (byte) ((sB1[i] + sB2[i] + 1) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    rounding_halving_add_unsigned(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      byte e = (byte) (((sB1[i] & 0xff) + (sB2[i] & 0xff) + 1) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    halving_add_signed_constant(sB1, sBo);
+    for (int i = 0; i < M; i++) {
+      byte e = (byte) ((sB1[i] + 0x7f) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    halving_add_unsigned_constant(sB1, sBo);
+    for (int i = 0; i < M; i++) {
+      byte e = (byte) (((sB1[i] & 0xff) + 0xff) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+
+    System.out.println("HaddAltByte passed");
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}
diff --git a/test/646-checker-simd-hadd/src/HaddAltChar.java b/test/646-checker-simd-hadd/src/HaddAltChar.java
new file mode 100644
index 0000000..2dd812d
--- /dev/null
+++ b/test/646-checker-simd-hadd/src/HaddAltChar.java
@@ -0,0 +1,277 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for halving-add idiomatic vectorization.
+ *
+ * Alternative version expressed with logical shift right
+ * in the higher precision (has no impact on idiom).
+ */
+public class HaddAltChar {
+
+  private static final int N = 64 * 1024;
+  private static final int M = N + 31;
+
+  static char[] sB1 = new char[M];
+  static char[] sB2 = new char[M];
+  static char[] sBo = new char[M];
+
+  /// CHECK-START: void HaddAltChar.halving_add_unsigned(char[], char[], char[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddAltChar.halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void halving_add_unsigned(char[] b1, char[] b2, char[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (char) ((b1[i] + b2[i]) >>> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddAltChar.halving_add_also_unsigned(char[], char[], char[]) instruction_simplifier (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<IMAX:i\d+>> IntConstant 65535                   loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<IMAX>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And2:i\d+>> And [<<IMAX>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<And1>>,<<And2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void HaddAltChar.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<IMAX:i\d+>> IntConstant 65535                   loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddAltChar.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  //
+  // Note: HAnd has no impact (already a zero extension).
+  //
+  private static void halving_add_also_unsigned(char[] b1, char[] b2, char[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (char) (((b1[i] & 0xffff) + (b2[i] & 0xffff)) >>> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddAltChar.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add2>>,<<I1>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddAltChar.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void rounding_halving_add_unsigned(char[] b1, char[] b2, char[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (char) ((b1[i] + b2[i] + 1) >>> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddAltChar.rounding_halving_add_also_unsigned(char[], char[], char[]) instruction_simplifier (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<IMAX:i\d+>> IntConstant 65535                   loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<IMAX>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And2:i\d+>> And [<<IMAX>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add1:i\d+>> Add [<<And1>>,<<And2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add2>>,<<I1>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void HaddAltChar.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add2>>,<<I1>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddAltChar.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  //
+  // Note: HAnd has no impact (already a zero extension).
+  //
+  private static void rounding_halving_add_also_unsigned(char[] b1, char[] b2, char[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (char) (((b1[i] & 0xffff) + (b2[i] & 0xffff) + 1) >>> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddAltChar.halving_add_unsigned_constant(char[], char[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get:c\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddAltChar.halving_add_unsigned_constant(char[], char[]) loop_optimization (after)
+  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                     loop:none
+  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>]         loop:none
+  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void halving_add_unsigned_constant(char[] b1, char[] bo) {
+    int min_length = Math.min(bo.length, b1.length);
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (char) ((b1[i] + 0xffff) >>> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddAltChar.halving_add_also_unsigned_constant(char[], char[]) instruction_simplifier (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get:c\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And:i\d+>>  And [<<Get>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<And>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void HaddAltChar.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get:c\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddAltChar.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (after)
+  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                     loop:none
+  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>]         loop:none
+  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  //
+  // Note: HAnd has no impact (already a zero extension).
+  //
+  private static void halving_add_also_unsigned_constant(char[] b1, char[] bo) {
+    int min_length = Math.min(bo.length, b1.length);
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (char) (((b1[i] & 0xffff) + 0xffff) >>> 1);
+    }
+  }
+
+  public static void main() {
+    // Some interesting values.
+    char[] interesting = {
+      (char) 0x0000,
+      (char) 0x0001,
+      (char) 0x0002,
+      (char) 0x1234,
+      (char) 0x8000,
+      (char) 0x8001,
+      (char) 0x7fff,
+      (char) 0xffff
+    };
+    // Initialize cross-values to test all cases, and also
+    // set up some extra values to exercise the cleanup loop.
+    for (int i = 0; i < M; i++) {
+      sB1[i] = (char) i;
+      sB2[i] = interesting[i & 7];
+    }
+
+    // Test halving add idioms. Note that the expected result is computed
+    // with the arithmetic >> to demonstrate the computed narrower result
+    // does not depend on the wider >> or >>>.
+    halving_add_unsigned(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      char e = (char) ((sB1[i] + sB2[i]) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    halving_add_also_unsigned(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      char e = (char) ((sB1[i] + sB2[i]) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    rounding_halving_add_unsigned(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      char e = (char) ((sB1[i] + sB2[i] + 1) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    rounding_halving_add_also_unsigned(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      char e = (char) ((sB1[i] + sB2[i] + 1) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    halving_add_unsigned_constant(sB1, sBo);
+    for (int i = 0; i < M; i++) {
+      char e = (char) ((sB1[i] + 0xffff) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    halving_add_also_unsigned_constant(sB1, sBo);
+    for (int i = 0; i < M; i++) {
+      char e = (char) ((sB1[i] + 0xffff) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+
+    System.out.println("HaddAltChar passed");
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}
diff --git a/test/646-checker-simd-hadd/src/HaddAltShort.java b/test/646-checker-simd-hadd/src/HaddAltShort.java
new file mode 100644
index 0000000..d7f4e40
--- /dev/null
+++ b/test/646-checker-simd-hadd/src/HaddAltShort.java
@@ -0,0 +1,271 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for halving-add idiomatic vectorization.
+ *
+ * Alternative version expressed with logical shift right
+ * in the higher precision (has no impact on idiom).
+ */
+public class HaddAltShort {
+
+  private static final int N = 64 * 1024;
+  private static final int M = N + 31;
+
+  static short[] sB1 = new short[M];
+  static short[] sB2 = new short[M];
+  static short[] sBo = new short[M];
+
+  /// CHECK-START: void HaddAltShort.halving_add_signed(short[], short[], short[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddAltShort.halving_add_signed(short[], short[], short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void halving_add_signed(short[] b1, short[] b2, short[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (short) ((b1[i] + b2[i]) >>> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddAltShort.halving_add_unsigned(short[], short[], short[]) instruction_simplifier (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<UMAX>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And2:i\d+>> And [<<UMAX>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<And1>>,<<And2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void HaddAltShort.halving_add_unsigned(short[], short[], short[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddAltShort.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      int v1 = b1[i] & 0xffff;
+      int v2 = b2[i] & 0xffff;
+      bo[i] = (short) ((v1 + v2) >>> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddAltShort.rounding_halving_add_signed(short[], short[], short[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add2>>,<<I1>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddAltShort.rounding_halving_add_signed(short[], short[], short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void rounding_halving_add_signed(short[] b1, short[] b2, short[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (short) ((b1[i] + b2[i] + 1) >>> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddAltShort.rounding_halving_add_unsigned(short[], short[], short[]) instruction_simplifier (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<UMAX>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And2:i\d+>> And [<<UMAX>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add1:i\d+>> Add [<<And1>>,<<And2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add2>>,<<I1>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void HaddAltShort.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add2>>,<<I1>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddAltShort.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void rounding_halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      int v1 = b1[i] & 0xffff;
+      int v2 = b2[i] & 0xffff;
+      bo[i] = (short) ((v1 + v2 + 1) >>> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddAltShort.halving_add_signed_constant(short[], short[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767                   loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get:s\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get>>,<<SMAX>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddAltShort.halving_add_signed_constant(short[], short[]) loop_optimization (after)
+  /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767                     loop:none
+  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>]         loop:none
+  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void halving_add_signed_constant(short[] b1, short[] bo) {
+    int min_length = Math.min(bo.length, b1.length);
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (short) ((b1[i] + 0x7fff) >>> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddAltShort.halving_add_unsigned_constant(short[], short[]) instruction_simplifier (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get:s\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And:i\d+>>  And [<<Get>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<And>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void HaddAltShort.halving_add_unsigned_constant(short[], short[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get:c\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<UShr:i\d+>> UShr [<<Add>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<UShr>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddAltShort.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
+  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                     loop:none
+  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>]         loop:none
+  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void halving_add_unsigned_constant(short[] b1, short[] bo) {
+    int min_length = Math.min(bo.length, b1.length);
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (short) (((b1[i] & 0xffff) + 0xffff) >>> 1);
+    }
+  }
+
+  public static void main() {
+    // Some interesting values.
+    short[] interesting = {
+      (short) 0x0000,
+      (short) 0x0001,
+      (short) 0x0002,
+      (short) 0x1234,
+      (short) 0x8000,
+      (short) 0x8001,
+      (short) 0x7fff,
+      (short) 0xffff
+    };
+    // Initialize cross-values to test all cases, and also
+    // set up some extra values to exercise the cleanup loop.
+    for (int i = 0; i < M; i++) {
+      sB1[i] = (short) i;
+      sB2[i] = interesting[i & 7];
+    }
+
+    // Test halving add idioms. Note that the expected result is computed
+    // with the arithmetic >> to demonstrate the computed narrower result
+    // does not depend on the wider >> or >>>.
+    halving_add_signed(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      short e = (short) ((sB1[i] + sB2[i]) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    halving_add_unsigned(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      short e = (short) (((sB1[i] & 0xffff) + (sB2[i] & 0xffff)) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    rounding_halving_add_signed(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      short e = (short) ((sB1[i] + sB2[i] + 1) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    rounding_halving_add_unsigned(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      short e = (short) (((sB1[i] & 0xffff) + (sB2[i] & 0xffff) + 1) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    halving_add_signed_constant(sB1, sBo);
+    for (int i = 0; i < M; i++) {
+      short e = (short) ((sB1[i] + 0x7fff) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    halving_add_unsigned_constant(sB1, sBo);
+    for (int i = 0; i < M; i++) {
+      short e = (short) (((sB1[i] & 0xffff) + 0xffff) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+
+    System.out.println("HaddAltShort passed");
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}
diff --git a/test/646-checker-simd-hadd/src/HaddByte.java b/test/646-checker-simd-hadd/src/HaddByte.java
new file mode 100644
index 0000000..9c99390
--- /dev/null
+++ b/test/646-checker-simd-hadd/src/HaddByte.java
@@ -0,0 +1,261 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for halving-add idiomatic vectorization.
+ */
+public class HaddByte {
+
+  private static final int N = 256;
+  private static final int M = N * N + 15;
+
+  static byte[] sB1 = new byte[M];
+  static byte[] sB2 = new byte[M];
+  static byte[] sBo = new byte[M];
+
+  /// CHECK-START: void HaddByte.halving_add_signed(byte[], byte[], byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddByte.halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void halving_add_signed(byte[] b1, byte[] b2, byte[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (byte) ((b1[i] + b2[i]) >> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddByte.halving_add_unsigned(byte[], byte[], byte[]) instruction_simplifier (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<I255:i\d+>> IntConstant 255                     loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<I255>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<I255>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<And1>>,<<And2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void HaddByte.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:a\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:a\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddByte.halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void halving_add_unsigned(byte[] b1, byte[] b2, byte[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (byte) (((b1[i] & 0xff) + (b2[i] & 0xff)) >> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddByte.rounding_halving_add_signed(byte[], byte[], byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddByte.rounding_halving_add_signed(byte[], byte[], byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int8 rounded:true loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void rounding_halving_add_signed(byte[] b1, byte[] b2, byte[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (byte) ((b1[i] + b2[i] + 1) >> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddByte.rounding_halving_add_unsigned(byte[], byte[], byte[]) instruction_simplifier (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<I255:i\d+>> IntConstant 255                     loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:b\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<I255>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And2:i\d+>> And [<<Get2>>,<<I255>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add1:i\d+>> Add [<<And1>>,<<And2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void HaddByte.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:a\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:a\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddByte.rounding_halving_add_unsigned(byte[], byte[], byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>]  packed_type:Uint8 rounded:true loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void rounding_halving_add_unsigned(byte[] b1, byte[] b2, byte[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (byte) (((b1[i] & 0xff) + (b2[i] & 0xff) + 1) >> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddByte.halving_add_signed_constant(byte[], byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<I127:i\d+>> IntConstant 127                     loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get:b\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get>>,<<I127>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddByte.halving_add_signed_constant(byte[], byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<I127:i\d+>> IntConstant 127                       loop:none
+  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I127>>]         loop:none
+  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int8 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void halving_add_signed_constant(byte[] b1, byte[] bo) {
+    int min_length = Math.min(bo.length, b1.length);
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (byte) ((b1[i] + 0x7f) >> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddByte.halving_add_unsigned_constant(byte[], byte[]) instruction_simplifier (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<I255:i\d+>> IntConstant 255                     loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get:b\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And:i\d+>>  And [<<Get>>,<<I255>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<And>>,<<I255>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void HaddByte.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<I255:i\d+>> IntConstant 255                     loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get:a\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get>>,<<I255>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:b\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddByte.halving_add_unsigned_constant(byte[], byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<I255:i\d+>> IntConstant 255                       loop:none
+  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<I255>>]         loop:none
+  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint8 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void halving_add_unsigned_constant(byte[] b1, byte[] bo) {
+    int min_length = Math.min(bo.length, b1.length);
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (byte) (((b1[i] & 0xff) + 0xff) >> 1);
+    }
+  }
+
+  public static void main() {
+    // Initialize cross-values to test all cases, and also
+    // set up some extra values to exercise the cleanup loop.
+    int k = 0;
+    for (int i = 0; i < N; i++) {
+      for (int j = 0; j < N; j++) {
+        sB1[k] = (byte) i;
+        sB2[k] = (byte) j;
+        k++;
+      }
+    }
+    for (int i = 0; i < 15; i++) {
+      sB1[k] = (byte) i;
+      sB2[k] = 100;
+      k++;
+    }
+    expectEquals(k, M);
+
+    // Test halving add idioms.
+    halving_add_signed(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      byte e = (byte) ((sB1[i] + sB2[i]) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    halving_add_unsigned(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      byte e = (byte) (((sB1[i] & 0xff) + (sB2[i] & 0xff)) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    rounding_halving_add_signed(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      byte e = (byte) ((sB1[i] + sB2[i] + 1) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    rounding_halving_add_unsigned(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      byte e = (byte) (((sB1[i] & 0xff) + (sB2[i] & 0xff) + 1) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    halving_add_signed_constant(sB1, sBo);
+    for (int i = 0; i < M; i++) {
+      byte e = (byte) ((sB1[i] + 0x7f) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    halving_add_unsigned_constant(sB1, sBo);
+    for (int i = 0; i < M; i++) {
+      byte e = (byte) (((sB1[i] & 0xff) + 0xff) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+
+    System.out.println("HaddByte passed");
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}
diff --git a/test/646-checker-simd-hadd/src/HaddChar.java b/test/646-checker-simd-hadd/src/HaddChar.java
new file mode 100644
index 0000000..255863e
--- /dev/null
+++ b/test/646-checker-simd-hadd/src/HaddChar.java
@@ -0,0 +1,308 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for halving-add idiomatic vectorization.
+ */
+public class HaddChar {
+
+  private static final int N = 64 * 1024;
+  private static final int M = N + 31;
+
+  static char[] sB1 = new char[M];
+  static char[] sB2 = new char[M];
+  static char[] sBo = new char[M];
+
+  /// CHECK-START: void HaddChar.halving_add_unsigned(char[], char[], char[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM: void HaddChar.halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void HaddChar.halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void halving_add_unsigned(char[] b1, char[] b2, char[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (char) ((b1[i] + b2[i]) >> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddChar.halving_add_also_unsigned(char[], char[], char[]) instruction_simplifier (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<IMAX:i\d+>> IntConstant 65535                   loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<IMAX>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And2:i\d+>> And [<<IMAX>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<And1>>,<<And2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void HaddChar.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM: void HaddChar.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void HaddChar.halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  //
+  // Note: HAnd has no impact (already a zero extension).
+  //
+  private static void halving_add_also_unsigned(char[] b1, char[] b2, char[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (char) (((b1[i] & 0xffff) + (b2[i] & 0xffff)) >> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddChar.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM: void HaddChar.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void HaddChar.rounding_halving_add_unsigned(char[], char[], char[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void rounding_halving_add_unsigned(char[] b1, char[] b2, char[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (char) ((b1[i] + b2[i] + 1) >> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddChar.rounding_halving_add_also_unsigned(char[], char[], char[]) instruction_simplifier (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<IMAX:i\d+>> IntConstant 65535                   loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<IMAX>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And2:i\d+>> And [<<IMAX>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add1:i\d+>> Add [<<And1>>,<<And2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void HaddChar.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM: void HaddChar.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void HaddChar.rounding_halving_add_also_unsigned(char[], char[], char[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  //
+  // Note: HAnd has no impact (already a zero extension).
+  //
+  private static void rounding_halving_add_also_unsigned(char[] b1, char[] b2, char[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (char) (((b1[i] & 0xffff) + (b2[i] & 0xffff) + 1) >> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddChar.halving_add_unsigned_constant(char[], char[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get:c\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM: void HaddChar.halving_add_unsigned_constant(char[], char[]) loop_optimization (after)
+  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                     loop:none
+  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>]         loop:none
+  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void HaddChar.halving_add_unsigned_constant(char[], char[]) loop_optimization (after)
+  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                     loop:none
+  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>]         loop:none
+  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+  private static void halving_add_unsigned_constant(char[] b1, char[] bo) {
+    int min_length = Math.min(bo.length, b1.length);
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (char) ((b1[i] + 0xffff) >> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddChar.halving_add_also_unsigned_constant(char[], char[]) instruction_simplifier (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get:c\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And:i\d+>>  And [<<Get>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<And>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void HaddChar.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get:c\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:c\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM: void HaddChar.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (after)
+  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                     loop:none
+  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>]         loop:none
+  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: void HaddChar.halving_add_also_unsigned_constant(char[], char[]) loop_optimization (after)
+  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                     loop:none
+  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>]         loop:none
+  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  //
+  // Note: HAnd has no impact (already a zero extension).
+  //
+  private static void halving_add_also_unsigned_constant(char[] b1, char[] bo) {
+    int min_length = Math.min(bo.length, b1.length);
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (char) (((b1[i] & 0xffff) + 0xffff) >> 1);
+    }
+  }
+
+  public static void main() {
+    // Some interesting values.
+    char[] interesting = {
+      (char) 0x0000,
+      (char) 0x0001,
+      (char) 0x0002,
+      (char) 0x1234,
+      (char) 0x8000,
+      (char) 0x8001,
+      (char) 0x7fff,
+      (char) 0xffff
+    };
+    // Initialize cross-values to test all cases, and also
+    // set up some extra values to exercise the cleanup loop.
+    for (int i = 0; i < M; i++) {
+      sB1[i] = (char) i;
+      sB2[i] = interesting[i & 7];
+    }
+
+    // Test halving add idioms.
+    halving_add_unsigned(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      char e = (char) ((sB1[i] + sB2[i]) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    halving_add_also_unsigned(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      char e = (char) ((sB1[i] + sB2[i]) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    rounding_halving_add_unsigned(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      char e = (char) ((sB1[i] + sB2[i] + 1) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    rounding_halving_add_also_unsigned(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      char e = (char) ((sB1[i] + sB2[i] + 1) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    halving_add_unsigned_constant(sB1, sBo);
+    for (int i = 0; i < M; i++) {
+      char e = (char) ((sB1[i] + 0xffff) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    halving_add_also_unsigned_constant(sB1, sBo);
+    for (int i = 0; i < M; i++) {
+      char e = (char) ((sB1[i] + 0xffff) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+
+    System.out.println("HaddChar passed");
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}
diff --git a/test/646-checker-simd-hadd/src/HaddShort.java b/test/646-checker-simd-hadd/src/HaddShort.java
new file mode 100644
index 0000000..4102efd
--- /dev/null
+++ b/test/646-checker-simd-hadd/src/HaddShort.java
@@ -0,0 +1,442 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for halving-add idiomatic vectorization.
+ */
+public class HaddShort {
+
+  private static final int N = 64 * 1024;
+  private static final int M = N + 31;
+
+  static short[] sB1 = new short[M];
+  static short[] sB2 = new short[M];
+  static short[] sBo = new short[M];
+
+  private static int $inline$mone() {
+    return -1;
+  }
+
+  /// CHECK-START: void HaddShort.halving_add_signed(short[], short[], short[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddShort.halving_add_signed(short[], short[], short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void halving_add_signed(short[] b1, short[] b2, short[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (short) ((b1[i] + b2[i]) >> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddShort.halving_add_signed_alt(short[], short[], short[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<I10:i\d+>>  IntConstant 10                      loop:none
+  /// CHECK-DAG: <<M10:i\d+>>  IntConstant -10                     loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<I10>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Get2>>,<<M10>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add3:i\d+>> Add [<<Add1>>,<<Add2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add3>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddShort.halving_add_signed_alt(short[], short[], short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void halving_add_signed_alt(short[] b1, short[] b2, short[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      // Cancelling constant computations do not confuse recognition.
+      bo[i] = (short) (((b1[i] + 10) + (b2[i] - 10)) >> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddShort.halving_add_unsigned(short[], short[], short[]) instruction_simplifier (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<UMAX>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And2:i\d+>> And [<<UMAX>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<And1>>,<<And2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void HaddShort.halving_add_unsigned(short[], short[], short[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddShort.halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      int v1 = b1[i] & 0xffff;
+      int v2 = b2[i] & 0xffff;
+      bo[i] = (short) ((v1 + v2) >> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddShort.rounding_halving_add_signed(short[], short[], short[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddShort.rounding_halving_add_signed(short[], short[], short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void rounding_halving_add_signed(short[] b1, short[] b2, short[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (short) ((b1[i] + b2[i] + 1) >> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddShort.rounding_halving_add_signed_alt(short[], short[], short[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddShort.rounding_halving_add_signed_alt(short[], short[], short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void rounding_halving_add_signed_alt(short[] b1, short[] b2, short[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      // Slightly different order in idiom does not confuse recognition.
+      bo[i] = (short) (((1 + b1[i]) + b2[i]) >> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddShort.rounding_halving_add_signed_alt2(short[], short[], short[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<I10:i\d+>>  IntConstant 10                      loop:none
+  /// CHECK-DAG: <<M9:i\d+>>   IntConstant -9                      loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<I10>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Get2>>,<<M9>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add3:i\d+>> Add [<<Add1>>,<<Add2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add3>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddShort.rounding_halving_add_signed_alt2(short[], short[], short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void rounding_halving_add_signed_alt2(short[] b1, short[] b2, short[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      // Computations that cancel to adding 1 also do not confuse recognition.
+      bo[i] = (short) (((b1[i] + 10) + (b2[i] - 9)) >> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddShort.rounding_halving_add_signed_alt3(short[], short[], short[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<M1:i\d+>>   IntConstant -1                      loop:none
+  /// CHECK-DAG: <<I9:i\d+>>   IntConstant 9                       loop:none
+  /// CHECK-DAG: <<M9:i\d+>>   IntConstant -9                      loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<I9>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Get2>>,<<M9>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add3:i\d+>> Add [<<Add1>>,<<Add2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:i\d+>>  Sub [<<Add3>>,<<M1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Sub>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddShort.rounding_halving_add_signed_alt3(short[], short[], short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Int16 rounded:true loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void rounding_halving_add_signed_alt3(short[] b1, short[] b2, short[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      // Computations that cancel to adding 1 also do not confuse recognition.
+      bo[i] = (short) (((b1[i] + 9) + (b2[i] - 9) - $inline$mone()) >> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddShort.rounding_halving_add_unsigned(short[], short[], short[]) instruction_simplifier (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<UMAX>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And2:i\d+>> And [<<UMAX>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add1:i\d+>> Add [<<And1>>,<<And2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void HaddShort.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddShort.rounding_halving_add_unsigned(short[], short[], short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void rounding_halving_add_unsigned(short[] b1, short[] b2, short[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      int v1 = b1[i] & 0xffff;
+      int v2 = b2[i] & 0xffff;
+      bo[i] = (short) ((v1 + v2 + 1) >> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddShort.rounding_halving_add_unsigned_alt(short[], short[], short[]) instruction_simplifier (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:s\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And1:i\d+>> And [<<Get1>>,<<UMAX>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And2:i\d+>> And [<<UMAX>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add1:i\d+>> Add [<<And2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Add1>>,<<And1>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void HaddShort.rounding_halving_add_unsigned_alt(short[], short[], short[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>> ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add1:i\d+>> Add [<<Get2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add2:i\d+>> Add [<<Get1>>,<<Add1>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add2>>,<<I1>>]               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddShort.rounding_halving_add_unsigned_alt(short[], short[], short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Get1:d\d+>> VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get2:d\d+>> VecLoad                               loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get1>>,<<Get2>>] packed_type:Uint16 rounded:true loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void rounding_halving_add_unsigned_alt(short[] b1, short[] b2, short[] bo) {
+    int min_length = Math.min(bo.length, Math.min(b1.length, b2.length));
+    for (int i = 0; i < min_length; i++) {
+      // Slightly different order in idiom does not confuse recognition.
+      int v1 = b1[i] & 0xffff;
+      int v2 = b2[i] & 0xffff;
+      bo[i] = (short) (v1 + (v2 + 1) >> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddShort.halving_add_signed_constant(short[], short[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767                   loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get:s\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get>>,<<SMAX>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddShort.halving_add_signed_constant(short[], short[]) loop_optimization (after)
+  /// CHECK-DAG: <<SMAX:i\d+>> IntConstant 32767                     loop:none
+  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<SMAX>>]         loop:none
+  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Int16 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void halving_add_signed_constant(short[] b1, short[] bo) {
+    int min_length = Math.min(bo.length, b1.length);
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (short) ((b1[i] + 0x7fff) >> 1);
+    }
+  }
+
+  /// CHECK-START: void HaddShort.halving_add_unsigned_constant(short[], short[]) instruction_simplifier (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get:s\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<And:i\d+>>  And [<<Get>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<And>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},{{i\d+}},<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: void HaddShort.halving_add_unsigned_constant(short[], short[]) loop_optimization (before)
+  /// CHECK-DAG: <<I1:i\d+>>   IntConstant 1                       loop:none
+  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                   loop:none
+  /// CHECK-DAG: <<Phi:i\d+>>  Phi                                 loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Get:c\d+>>  ArrayGet                            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>  Add [<<Get>>,<<UMAX>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Shr:i\d+>>  Shr [<<Add>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv:s\d+>>  TypeConversion [<<Shr>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: void HaddShort.halving_add_unsigned_constant(short[], short[]) loop_optimization (after)
+  /// CHECK-DAG: <<UMAX:i\d+>> IntConstant 65535                     loop:none
+  /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<UMAX>>]         loop:none
+  /// CHECK-DAG: <<Get:d\d+>>  VecLoad                               loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<HAdd:d\d+>> VecHalvingAdd [<<Get>>,<<Repl>>] packed_type:Uint16 rounded:false loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:               VecStore [{{l\d+}},{{i\d+}},<<HAdd>>] loop:<<Loop>>      outer_loop:none
+  private static void halving_add_unsigned_constant(short[] b1, short[] bo) {
+    int min_length = Math.min(bo.length, b1.length);
+    for (int i = 0; i < min_length; i++) {
+      bo[i] = (short) (((b1[i] & 0xffff) + 0xffff) >> 1);
+    }
+  }
+
+  public static void main() {
+    // Some interesting values.
+    short[] interesting = {
+      (short) 0x0000,
+      (short) 0x0001,
+      (short) 0x0002,
+      (short) 0x1234,
+      (short) 0x8000,
+      (short) 0x8001,
+      (short) 0x7fff,
+      (short) 0xffff
+    };
+    // Initialize cross-values to test all cases, and also
+    // set up some extra values to exercise the cleanup loop.
+    for (int i = 0; i < M; i++) {
+      sB1[i] = (short) i;
+      sB2[i] = interesting[i & 7];
+    }
+
+    // Test halving add idioms.
+    halving_add_signed(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      short e = (short) ((sB1[i] + sB2[i]) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    halving_add_signed_alt(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      short e = (short) ((sB1[i] + sB2[i]) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    halving_add_unsigned(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      short e = (short) (((sB1[i] & 0xffff) + (sB2[i] & 0xffff)) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    rounding_halving_add_signed(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      short e = (short) ((sB1[i] + sB2[i] + 1) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    rounding_halving_add_signed_alt(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      short e = (short) ((sB1[i] + sB2[i] + 1) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    rounding_halving_add_signed_alt2(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      short e = (short) ((sB1[i] + sB2[i] + 1) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    rounding_halving_add_signed_alt3(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      short e = (short) ((sB1[i] + sB2[i] + 1) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    rounding_halving_add_unsigned(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      short e = (short) (((sB1[i] & 0xffff) + (sB2[i] & 0xffff) + 1) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    rounding_halving_add_unsigned_alt(sB1, sB2, sBo);
+    for (int i = 0; i < M; i++) {
+      short e = (short) (((sB1[i] & 0xffff) + (sB2[i] & 0xffff) + 1) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    halving_add_signed_constant(sB1, sBo);
+    for (int i = 0; i < M; i++) {
+      short e = (short) ((sB1[i] + 0x7fff) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+    halving_add_unsigned_constant(sB1, sBo);
+    for (int i = 0; i < M; i++) {
+      short e = (short) (((sB1[i] & 0xffff) + 0xffff) >> 1);
+      expectEquals(e, sBo[i]);
+    }
+
+    System.out.println("HaddShort passed");
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}
diff --git a/test/646-checker-simd-hadd/src/Main.java b/test/646-checker-simd-hadd/src/Main.java
new file mode 100644
index 0000000..c5ec7a7
--- /dev/null
+++ b/test/646-checker-simd-hadd/src/Main.java
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) {
+    HaddAltByte.main();
+    HaddAltShort.main();
+    HaddAltChar.main();
+    HaddByte.main();
+    HaddShort.main();
+    HaddChar.main();
+  }
+}
diff --git a/test/656-checker-simd-opt/src/Main.java b/test/656-checker-simd-opt/src/Main.java
index 081e421..39a126f 100644
--- a/test/656-checker-simd-opt/src/Main.java
+++ b/test/656-checker-simd-opt/src/Main.java
@@ -102,7 +102,7 @@
   /// CHECK-DAG: <<Add2>>       Add [<<Phi2>>,<<Get>>]     loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Add1>>       Add [<<Phi1>>,<<L1>>]      loop:<<Loop>>      outer_loop:none
   //
-  /// CHECK-START-{ARM64,MIPS64}: long Main.longInductionReduction(long[]) loop_optimization (after)
+  /// CHECK-START-ARM64: long Main.longInductionReduction(long[]) loop_optimization (after)
   /// CHECK-DAG: <<L0:j\d+>>    LongConstant 0               loop:none
   /// CHECK-DAG: <<L1:j\d+>>    LongConstant 1               loop:none
   /// CHECK-DAG: <<L2:j\d+>>    LongConstant 2               loop:none
@@ -131,7 +131,7 @@
   /// CHECK-DAG:                ArraySet [{{l\d+}},<<Phi>>,<<Cnv>>] loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Add>>        Add [<<Phi>>,<<I1>>]                loop:<<Loop>>      outer_loop:none
   //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.intVectorLongInvariant(int[], long[]) loop_optimization (after)
+  /// CHECK-START-ARM64: void Main.intVectorLongInvariant(int[], long[]) loop_optimization (after)
   /// CHECK-DAG: <<I0:i\d+>>    IntConstant 0                       loop:none
   /// CHECK-DAG: <<I1:i\d+>>    IntConstant 1                       loop:none
   /// CHECK-DAG: <<I4:i\d+>>    IntConstant 4                       loop:none
@@ -159,7 +159,7 @@
   /// CHECK-DAG:                ArraySet [{{l\d+}},<<Phi>>,<<Cnv2>>] loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Add>>        Add [<<Phi>>,<<I1>>]                 loop:<<Loop>>      outer_loop:none
   //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.longCanBeDoneWithInt(int[], int[]) loop_optimization (after)
+  /// CHECK-START-ARM64: void Main.longCanBeDoneWithInt(int[], int[]) loop_optimization (after)
   /// CHECK-DAG: <<I0:i\d+>>    IntConstant 0                       loop:none
   /// CHECK-DAG: <<I4:i\d+>>    IntConstant 4                       loop:none
   /// CHECK-DAG: <<L1:j\d+>>    LongConstant 1                      loop:none
diff --git a/test/660-checker-sad-byte/expected.txt b/test/660-checker-sad-byte/expected.txt
deleted file mode 100644
index b0aad4d..0000000
--- a/test/660-checker-sad-byte/expected.txt
+++ /dev/null
@@ -1 +0,0 @@
-passed
diff --git a/test/660-checker-sad-byte/info.txt b/test/660-checker-sad-byte/info.txt
deleted file mode 100644
index 0c1cbda..0000000
--- a/test/660-checker-sad-byte/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Functional tests on SAD scalar operations.
diff --git a/test/660-checker-sad-byte/src/Main.java b/test/660-checker-sad-byte/src/Main.java
deleted file mode 100644
index bcd62c4..0000000
--- a/test/660-checker-sad-byte/src/Main.java
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Tests for SAD (sum of absolute differences).
- */
-public class Main {
-
-  /// CHECK-START: int Main.sad1(byte, byte) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:i\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: int Main.sad1(byte, byte) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static int sad1(byte x, byte y) {
-    return x >= y ? x - y : y - x;
-  }
-
-  /// CHECK-START: int Main.sad2(byte, byte) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:i\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: int Main.sad2(byte, byte) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static int sad2(byte x, byte y) {
-    int diff = x - y;
-    if (diff < 0) diff = -diff;
-    return diff;
-  }
-
-  /// CHECK-START: int Main.sad3(byte, byte) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:i\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: int Main.sad3(byte, byte) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static int sad3(byte x, byte y) {
-    int diff = x - y;
-    return diff >= 0 ? diff : -diff;
-  }
-
-  /// CHECK-START: int Main.sad3Alt(byte, byte) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:i\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: int Main.sad3Alt(byte, byte) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static int sad3Alt(byte x, byte y) {
-    int diff = x - y;
-    return 0 <= diff ? diff : -diff;
-  }
-
-  /// CHECK-START: long Main.sadL1(byte, byte) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:j\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: long Main.sadL1(byte, byte) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static long sadL1(byte x, byte y) {
-    long xl = x;
-    long yl = y;
-    return xl >= yl ? xl - yl : yl - xl;
-  }
-
-  /// CHECK-START: long Main.sadL2(byte, byte) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:j\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: long Main.sadL2(byte, byte) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static long sadL2(byte x, byte y) {
-    long diff = x - y;
-    if (diff < 0L) diff = -diff;
-    return diff;
-  }
-
-  /// CHECK-START: long Main.sadL3(byte, byte) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:j\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: long Main.sadL3(byte, byte) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static long sadL3(byte x, byte y) {
-    long diff = x - y;
-    return diff >= 0L ? diff : -diff;
-  }
-
-  /// CHECK-START: long Main.sadL3Alt(byte, byte) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:j\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: long Main.sadL3Alt(byte, byte) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static long sadL3Alt(byte x, byte y) {
-    long diff = x - y;
-    return 0L <= diff ? diff : -diff;
-  }
-
-  public static void main(String[] args) {
-    // Use cross-values to test all cases.
-    int n = 256;
-    for (int i = 0; i < n; i++) {
-      for (int j = 0; j < n; j++) {
-        byte x = (byte) i;
-        byte y = (byte) j;
-        int e = Math.abs(x - y);
-        expectEquals(e, sad1(x, y));
-        expectEquals(e, sad2(x, y));
-        expectEquals(e, sad3(x, y));
-        expectEquals(e, sad3Alt(x, y));
-        expectEquals(e, sadL2(x, y));
-        expectEquals(e, sadL3(x, y));
-        expectEquals(e, sadL3Alt(x, y));
-      }
-    }
-    System.out.println("passed");
-  }
-
-  private static void expectEquals(int expected, int result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-
-  private static void expectEquals(long expected, long result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-}
diff --git a/test/660-checker-sad-char/expected.txt b/test/660-checker-sad-char/expected.txt
deleted file mode 100644
index b0aad4d..0000000
--- a/test/660-checker-sad-char/expected.txt
+++ /dev/null
@@ -1 +0,0 @@
-passed
diff --git a/test/660-checker-sad-char/info.txt b/test/660-checker-sad-char/info.txt
deleted file mode 100644
index 0c1cbda..0000000
--- a/test/660-checker-sad-char/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Functional tests on SAD scalar operations.
diff --git a/test/660-checker-sad-char/src/Main.java b/test/660-checker-sad-char/src/Main.java
deleted file mode 100644
index 998ec33..0000000
--- a/test/660-checker-sad-char/src/Main.java
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Tests for SAD (sum of absolute differences).
- */
-public class Main {
-
-  /// CHECK-START: int Main.sad1(char, char) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:i\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: int Main.sad1(char, char) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static int sad1(char x, char y) {
-    return x >= y ? x - y : y - x;
-  }
-
-  /// CHECK-START: int Main.sad2(char, char) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:i\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: int Main.sad2(char, char) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static int sad2(char x, char y) {
-    int diff = x - y;
-    if (diff < 0) diff = -diff;
-    return diff;
-  }
-
-  /// CHECK-START: int Main.sad3(char, char) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:i\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: int Main.sad3(char, char) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static int sad3(char x, char y) {
-    int diff = x - y;
-    return diff >= 0 ? diff : -diff;
-  }
-
-  /// CHECK-START: int Main.sad3Alt(char, char) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:i\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: int Main.sad3Alt(char, char) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static int sad3Alt(char x, char y) {
-    int diff = x - y;
-    return 0 <= diff ? diff : -diff;
-  }
-
-  /// CHECK-START: long Main.sadL1(char, char) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:j\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: long Main.sadL1(char, char) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static long sadL1(char x, char y) {
-    long xl = x;
-    long yl = y;
-    return xl >= yl ? xl - yl : yl - xl;
-  }
-
-  /// CHECK-START: long Main.sadL2(char, char) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:j\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: long Main.sadL2(char, char) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static long sadL2(char x, char y) {
-    long diff = x - y;
-    if (diff < 0L) diff = -diff;
-    return diff;
-  }
-
-  /// CHECK-START: long Main.sadL3(char, char) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:j\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: long Main.sadL3(char, char) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static long sadL3(char x, char y) {
-    long diff = x - y;
-    return diff >= 0L ? diff : -diff;
-  }
-
-  /// CHECK-START: long Main.sadL3Alt(char, char) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:j\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: long Main.sadL3Alt(char, char) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static long sadL3Alt(char x, char y) {
-    long diff = x - y;
-    return 0L <= diff ? diff : -diff;
-  }
-
-  public static void main(String[] args) {
-    // Use cross-values to test all cases.
-    char[] interesting = {
-      (char) 0x0000, (char) 0x0001, (char) 0x007f,
-      (char) 0x0080, (char) 0x0081, (char) 0x00ff,
-      (char) 0x0100, (char) 0x0101, (char) 0x017f,
-      (char) 0x0180, (char) 0x0181, (char) 0x01ff,
-      (char) 0x7f00, (char) 0x7f01, (char) 0x7f7f,
-      (char) 0x7f80, (char) 0x7f81, (char) 0x7fff,
-      (char) 0x8000, (char) 0x8001, (char) 0x807f,
-      (char) 0x8080, (char) 0x8081, (char) 0x80ff,
-      (char) 0x8100, (char) 0x8101, (char) 0x817f,
-      (char) 0x8180, (char) 0x8181, (char) 0x81ff,
-      (char) 0xff00, (char) 0xff01, (char) 0xff7f,
-      (char) 0xff80, (char) 0xff81, (char) 0xffff
-    };
-    for (int i = 0; i < interesting.length; i++) {
-      for (int j = 0; j < interesting.length; j++) {
-        char x = interesting[i];
-        char y = interesting[j];
-        int e = Math.abs(x - y);
-        expectEquals(e, sad1(x, y));
-        expectEquals(e, sad2(x, y));
-        expectEquals(e, sad3(x, y));
-        expectEquals(e, sad3Alt(x, y));
-        expectEquals(e, sadL1(x, y));
-        expectEquals(e, sadL2(x, y));
-        expectEquals(e, sadL3(x, y));
-        expectEquals(e, sadL3Alt(x, y));
-      }
-    }
-    System.out.println("passed");
-  }
-
-  private static void expectEquals(int expected, int result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-
-  private static void expectEquals(long expected, long result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-}
diff --git a/test/660-checker-sad-int/expected.txt b/test/660-checker-sad-int/expected.txt
deleted file mode 100644
index b0aad4d..0000000
--- a/test/660-checker-sad-int/expected.txt
+++ /dev/null
@@ -1 +0,0 @@
-passed
diff --git a/test/660-checker-sad-int/src/Main.java b/test/660-checker-sad-int/src/Main.java
deleted file mode 100644
index 09878a5..0000000
--- a/test/660-checker-sad-int/src/Main.java
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Tests for SAD (sum of absolute differences).
- */
-public class Main {
-
-  /// CHECK-START: int Main.sad1(int, int) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:i\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: int Main.sad1(int, int) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Select:i\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: int Main.sad1(int, int) instruction_simplifier$after_gvn (after)
-  /// CHECK-NOT: Abs
-  //
-  // NOTE: for direct 32-bit operands, this is not an ABS.
-  static int sad1(int x, int y) {
-    return x >= y ? x - y : y - x;
-  }
-
-  /// CHECK-START: int Main.sad2(int, int) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:i\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: int Main.sad2(int, int) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static int sad2(int x, int y) {
-    int diff = x - y;
-    if (diff < 0) diff = -diff;
-    return diff;
-  }
-
-  /// CHECK-START: int Main.sad3(int, int) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:i\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: int Main.sad3(int, int) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static int sad3(int x, int y) {
-    int diff = x - y;
-    return diff >= 0 ? diff : -diff;
-  }
-
-  /// CHECK-START: int Main.sad3Alt(int, int) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:i\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: int Main.sad3Alt(int, int) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static int sad3Alt(int x, int y) {
-    int diff = x - y;
-    return 0 <= diff ? diff : -diff;
-  }
-
-  /// CHECK-START: long Main.sadL1(int, int) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:j\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: long Main.sadL1(int, int) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static long sadL1(int x, int y) {
-    long xl = x;
-    long yl = y;
-    return xl >= yl ? xl - yl : yl - xl;
-  }
-
-  /// CHECK-START: long Main.sadL2(int, int) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:j\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: long Main.sadL2(int, int) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static long sadL2(int x, int y) {
-    long diff = x - y;
-    if (diff < 0L) diff = -diff;
-    return diff;
-  }
-
-  /// CHECK-START: long Main.sadL3(int, int) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:j\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: long Main.sadL3(int, int) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static long sadL3(int x, int y) {
-    long diff = x - y;
-    return diff >= 0L ? diff : -diff;
-  }
-
-  /// CHECK-START: long Main.sadL3Alt(int, int) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:j\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: long Main.sadL3Alt(int, int) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static long sadL3Alt(int x, int y) {
-    long diff = x - y;
-    return 0L <= diff ? diff : -diff;
-  }
-
-  public static void main(String[] args) {
-    // Use cross-values for the interesting values.
-    int[] interesting = {
-      0x00000000, 0x00000001, 0x00007fff, 0x00008000, 0x00008001, 0x0000ffff,
-      0x00010000, 0x00010001, 0x00017fff, 0x00018000, 0x00018001, 0x0001ffff,
-      0x7fff0000, 0x7fff0001, 0x7fff7fff, 0x7fff8000, 0x7fff8001, 0x7fffffff,
-      0x80000000, 0x80000001, 0x80007fff, 0x80008000, 0x80008001, 0x8000ffff,
-      0x80010000, 0x80010001, 0x80017fff, 0x80018000, 0x80018001, 0x8001ffff,
-      0xffff0000, 0xffff0001, 0xffff7fff, 0xffff8000, 0xffff8001, 0xffffffff
-    };
-    for (int i = 0; i < interesting.length; i++) {
-      for (int j = 0; j < interesting.length; j++) {
-        int x = interesting[i];
-        int y = interesting[j];
-        int e1 = x >= y ? x - y : y - x;  // still select
-        expectEquals(e1, sad1(x, y));
-        int e2 = Math.abs(x - y);  // pure abs
-        expectEquals(e2, sad2(x, y));
-        expectEquals(e2, sad3(x, y));
-        expectEquals(e2, sad3Alt(x, y));
-        long eL1 = Math.abs(((long)x) - ((long)y));  // now, different, but abs
-        expectEquals(eL1, sadL1(x, y));
-        long eL2 = Math.abs((long)(x - y));  // also, different, but abs
-        expectEquals(eL2, sadL2(x, y));
-        expectEquals(eL2, sadL3(x, y));
-        expectEquals(eL2, sadL3Alt(x, y));
-      }
-    }
-    System.out.println("passed");
-  }
-
-  private static void expectEquals(int expected, int result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-
-  private static void expectEquals(long expected, long result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-}
diff --git a/test/660-checker-sad-long/expected.txt b/test/660-checker-sad-long/expected.txt
deleted file mode 100644
index b0aad4d..0000000
--- a/test/660-checker-sad-long/expected.txt
+++ /dev/null
@@ -1 +0,0 @@
-passed
diff --git a/test/660-checker-sad-long/info.txt b/test/660-checker-sad-long/info.txt
deleted file mode 100644
index 0c1cbda..0000000
--- a/test/660-checker-sad-long/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Functional tests on SAD scalar operations.
diff --git a/test/660-checker-sad-long/src/Main.java b/test/660-checker-sad-long/src/Main.java
deleted file mode 100644
index b9eeb5f..0000000
--- a/test/660-checker-sad-long/src/Main.java
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Tests for SAD (sum of absolute differences).
- */
-public class Main {
-
-  /// CHECK-START: long Main.sad1(long, long) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:j\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: long Main.sad1(long, long) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Select:j\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: long Main.sad1(long, long) instruction_simplifier$after_gvn (after)
-  /// CHECK-NOT: Abs
-  //
-  // NOTE: for direct 64-bit operands, this is not an ABS.
-  static long sad1(long x, long y) {
-    return x >= y ? x - y : y - x;
-  }
-
-  /// CHECK-START: long Main.sad2(long, long) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:j\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: long Main.sad2(long, long) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static long sad2(long x, long y) {
-    long diff = x - y;
-    if (diff < 0) diff = -diff;
-    return diff;
-  }
-
-  /// CHECK-START: long Main.sad3(long, long) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:j\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: long Main.sad3(long, long) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static long sad3(long x, long y) {
-    long diff = x - y;
-    return diff >= 0 ? diff : -diff;
-  }
-
-  /// CHECK-START: long Main.sad3Alt(long, long) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:j\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: long Main.sad3Alt(long, long) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static long sad3Alt(long x, long y) {
-    long diff = x - y;
-    return 0 <= diff ? diff : -diff;
-  }
-
-  public static void main(String[] args) {
-    // Use cross-values for the interesting values.
-    long[] interesting = {
-      0x0000000000000000L, 0x0000000000000001L, 0x000000007fffffffL,
-      0x0000000080000000L, 0x0000000080000001L, 0x00000000ffffffffL,
-      0x0000000100000000L, 0x0000000100000001L, 0x000000017fffffffL,
-      0x0000000180000000L, 0x0000000180000001L, 0x00000001ffffffffL,
-      0x7fffffff00000000L, 0x7fffffff00000001L, 0x7fffffff7fffffffL,
-      0x7fffffff80000000L, 0x7fffffff80000001L, 0x7fffffffffffffffL,
-      0x8000000000000000L, 0x8000000000000001L, 0x800000007fffffffL,
-      0x8000000080000000L, 0x8000000080000001L, 0x80000000ffffffffL,
-      0x8000000100000000L, 0x8000000100000001L, 0x800000017fffffffL,
-      0x8000000180000000L, 0x8000000180000001L, 0x80000001ffffffffL,
-      0xffffffff00000000L, 0xffffffff00000001L, 0xffffffff7fffffffL,
-      0xffffffff80000000L, 0xffffffff80000001L, 0xffffffffffffffffL
-    };
-    for (int i = 0; i < interesting.length; i++) {
-      for (int j = 0; j < interesting.length; j++) {
-        long x = interesting[i];
-        long y = interesting[j];
-        long e1 = x >= y ? x - y : y - x;  // still select
-        expectEquals(e1, sad1(x, y));
-        long e2 = Math.abs(x - y);  // pure abs
-        expectEquals(e2, sad2(x, y));
-        expectEquals(e2, sad3(x, y));
-        expectEquals(e2, sad3Alt(x, y));
-      }
-    }
-    System.out.println("passed");
-  }
-
-  private static void expectEquals(long expected, long result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-}
diff --git a/test/660-checker-sad-short/expected.txt b/test/660-checker-sad-short/expected.txt
deleted file mode 100644
index b0aad4d..0000000
--- a/test/660-checker-sad-short/expected.txt
+++ /dev/null
@@ -1 +0,0 @@
-passed
diff --git a/test/660-checker-sad-short/info.txt b/test/660-checker-sad-short/info.txt
deleted file mode 100644
index 0c1cbda..0000000
--- a/test/660-checker-sad-short/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Functional tests on SAD scalar operations.
diff --git a/test/660-checker-sad-short/src/Main.java b/test/660-checker-sad-short/src/Main.java
deleted file mode 100644
index 0a1a4dc..0000000
--- a/test/660-checker-sad-short/src/Main.java
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Tests for SAD (sum of absolute differences).
- */
-public class Main {
-
-  /// CHECK-START: int Main.sad1(short, short) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:i\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: int Main.sad1(short, short) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static int sad1(short x, short y) {
-    return x >= y ? x - y : y - x;
-  }
-
-  /// CHECK-START: int Main.sad2(short, short) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:i\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: int Main.sad2(short, short) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static int sad2(short x, short y) {
-    int diff = x - y;
-    if (diff < 0) diff = -diff;
-    return diff;
-  }
-
-  /// CHECK-START: int Main.sad3(short, short) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:i\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: int Main.sad3(short, short) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static int sad3(short x, short y) {
-    int diff = x - y;
-    return diff >= 0 ? diff : -diff;
-  }
-
-  /// CHECK-START: int Main.sad3Alt(short, short) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:i\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: int Main.sad3Alt(short, short) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static int sad3Alt(short x, short y) {
-    int diff = x - y;
-    return 0 <= diff ? diff : -diff;
-  }
-
-  /// CHECK-START: long Main.sadL1(short, short) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:j\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: long Main.sadL1(short, short) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static long sadL1(short x, short y) {
-    long xl = x;
-    long yl = y;
-    return xl >= yl ? xl - yl : yl - xl;
-  }
-
-  /// CHECK-START: long Main.sadL2(short, short) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:j\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: long Main.sadL2(short, short) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static long sadL2(short x, short y) {
-    long diff = x - y;
-    if (diff < 0L) diff = -diff;
-    return diff;
-  }
-
-  /// CHECK-START: long Main.sadL3(short, short) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:j\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: long Main.sadL3(short, short) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static long sadL3(short x, short y) {
-    long diff = x - y;
-    return diff >= 0L ? diff : -diff;
-  }
-
-  /// CHECK-START: long Main.sadL3Alt(short, short) instruction_simplifier$after_gvn (before)
-  /// CHECK-DAG: <<Select:j\d+>> Select
-  /// CHECK-DAG:                 Return [<<Select>>]
-  //
-  /// CHECK-START: long Main.sadL3Alt(short, short) instruction_simplifier$after_gvn (after)
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs
-  /// CHECK-DAG:                 Return [<<Intrin>>]
-  static long sadL3Alt(short x, short y) {
-    long diff = x - y;
-    return 0L <= diff ? diff : -diff;
-  }
-
-  public static void main(String[] args) {
-    // Use cross-values to test all cases.
-    short[] interesting = {
-      (short) 0x0000, (short) 0x0001, (short) 0x007f,
-      (short) 0x0080, (short) 0x0081, (short) 0x00ff,
-      (short) 0x0100, (short) 0x0101, (short) 0x017f,
-      (short) 0x0180, (short) 0x0181, (short) 0x01ff,
-      (short) 0x7f00, (short) 0x7f01, (short) 0x7f7f,
-      (short) 0x7f80, (short) 0x7f81, (short) 0x7fff,
-      (short) 0x8000, (short) 0x8001, (short) 0x807f,
-      (short) 0x8080, (short) 0x8081, (short) 0x80ff,
-      (short) 0x8100, (short) 0x8101, (short) 0x817f,
-      (short) 0x8180, (short) 0x8181, (short) 0x81ff,
-      (short) 0xff00, (short) 0xff01, (short) 0xff7f,
-      (short) 0xff80, (short) 0xff81, (short) 0xffff
-    };
-    for (int i = 0; i < interesting.length; i++) {
-      for (int j = 0; j < interesting.length; j++) {
-        short x = interesting[i];
-        short y = interesting[j];
-        int e = Math.abs(x - y);
-        expectEquals(e, sad1(x, y));
-        expectEquals(e, sad2(x, y));
-        expectEquals(e, sad3(x, y));
-        expectEquals(e, sad3Alt(x, y));
-        expectEquals(e, sadL1(x, y));
-        expectEquals(e, sadL2(x, y));
-        expectEquals(e, sadL3(x, y));
-        expectEquals(e, sadL3Alt(x, y));
-      }
-    }
-    System.out.println("passed");
-  }
-
-  private static void expectEquals(int expected, int result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-
-  private static void expectEquals(long expected, long result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-}
diff --git a/test/660-checker-sad/expected.txt b/test/660-checker-sad/expected.txt
new file mode 100644
index 0000000..b73875c
--- /dev/null
+++ b/test/660-checker-sad/expected.txt
@@ -0,0 +1,5 @@
+SadByte passed
+SadShort passed
+SadChar passed
+SadInt passed
+SadLong passed
diff --git a/test/660-checker-sad-int/info.txt b/test/660-checker-sad/info.txt
similarity index 100%
rename from test/660-checker-sad-int/info.txt
rename to test/660-checker-sad/info.txt
diff --git a/test/660-checker-sad/src/Main.java b/test/660-checker-sad/src/Main.java
new file mode 100644
index 0000000..936ed74
--- /dev/null
+++ b/test/660-checker-sad/src/Main.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) {
+    SadByte.main();
+    SadShort.main();
+    SadChar.main();
+    SadInt.main();
+    SadLong.main();
+  }
+}
diff --git a/test/660-checker-sad/src/SadByte.java b/test/660-checker-sad/src/SadByte.java
new file mode 100644
index 0000000..66ff917
--- /dev/null
+++ b/test/660-checker-sad/src/SadByte.java
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for SAD (sum of absolute differences).
+ */
+public class SadByte {
+
+  /// CHECK-START: int SadByte.sad1(byte, byte) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:i\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: int SadByte.sad1(byte, byte) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static int sad1(byte x, byte y) {
+    return x >= y ? x - y : y - x;
+  }
+
+  /// CHECK-START: int SadByte.sad2(byte, byte) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:i\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: int SadByte.sad2(byte, byte) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static int sad2(byte x, byte y) {
+    int diff = x - y;
+    if (diff < 0) diff = -diff;
+    return diff;
+  }
+
+  /// CHECK-START: int SadByte.sad3(byte, byte) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:i\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: int SadByte.sad3(byte, byte) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static int sad3(byte x, byte y) {
+    int diff = x - y;
+    return diff >= 0 ? diff : -diff;
+  }
+
+  /// CHECK-START: int SadByte.sad3Alt(byte, byte) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:i\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: int SadByte.sad3Alt(byte, byte) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static int sad3Alt(byte x, byte y) {
+    int diff = x - y;
+    return 0 <= diff ? diff : -diff;
+  }
+
+  /// CHECK-START: long SadByte.sadL1(byte, byte) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:j\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: long SadByte.sadL1(byte, byte) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static long sadL1(byte x, byte y) {
+    long xl = x;
+    long yl = y;
+    return xl >= yl ? xl - yl : yl - xl;
+  }
+
+  /// CHECK-START: long SadByte.sadL2(byte, byte) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:j\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: long SadByte.sadL2(byte, byte) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static long sadL2(byte x, byte y) {
+    long diff = x - y;
+    if (diff < 0L) diff = -diff;
+    return diff;
+  }
+
+  /// CHECK-START: long SadByte.sadL3(byte, byte) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:j\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: long SadByte.sadL3(byte, byte) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static long sadL3(byte x, byte y) {
+    long diff = x - y;
+    return diff >= 0L ? diff : -diff;
+  }
+
+  /// CHECK-START: long SadByte.sadL3Alt(byte, byte) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:j\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: long SadByte.sadL3Alt(byte, byte) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static long sadL3Alt(byte x, byte y) {
+    long diff = x - y;
+    return 0L <= diff ? diff : -diff;
+  }
+
+  public static void main() {
+    // Use cross-values to test all cases.
+    int n = 256;
+    for (int i = 0; i < n; i++) {
+      for (int j = 0; j < n; j++) {
+        byte x = (byte) i;
+        byte y = (byte) j;
+        int e = Math.abs(x - y);
+        expectEquals(e, sad1(x, y));
+        expectEquals(e, sad2(x, y));
+        expectEquals(e, sad3(x, y));
+        expectEquals(e, sad3Alt(x, y));
+        expectEquals(e, sadL2(x, y));
+        expectEquals(e, sadL3(x, y));
+        expectEquals(e, sadL3Alt(x, y));
+      }
+    }
+    System.out.println("SadByte passed");
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  private static void expectEquals(long expected, long result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}
diff --git a/test/660-checker-sad/src/SadChar.java b/test/660-checker-sad/src/SadChar.java
new file mode 100644
index 0000000..fe9b63d
--- /dev/null
+++ b/test/660-checker-sad/src/SadChar.java
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for SAD (sum of absolute differences).
+ */
+public class SadChar {
+
+  /// CHECK-START: int SadChar.sad1(char, char) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:i\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: int SadChar.sad1(char, char) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static int sad1(char x, char y) {
+    return x >= y ? x - y : y - x;
+  }
+
+  /// CHECK-START: int SadChar.sad2(char, char) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:i\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: int SadChar.sad2(char, char) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static int sad2(char x, char y) {
+    int diff = x - y;
+    if (diff < 0) diff = -diff;
+    return diff;
+  }
+
+  /// CHECK-START: int SadChar.sad3(char, char) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:i\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: int SadChar.sad3(char, char) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static int sad3(char x, char y) {
+    int diff = x - y;
+    return diff >= 0 ? diff : -diff;
+  }
+
+  /// CHECK-START: int SadChar.sad3Alt(char, char) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:i\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: int SadChar.sad3Alt(char, char) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static int sad3Alt(char x, char y) {
+    int diff = x - y;
+    return 0 <= diff ? diff : -diff;
+  }
+
+  /// CHECK-START: long SadChar.sadL1(char, char) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:j\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: long SadChar.sadL1(char, char) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static long sadL1(char x, char y) {
+    long xl = x;
+    long yl = y;
+    return xl >= yl ? xl - yl : yl - xl;
+  }
+
+  /// CHECK-START: long SadChar.sadL2(char, char) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:j\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: long SadChar.sadL2(char, char) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static long sadL2(char x, char y) {
+    long diff = x - y;
+    if (diff < 0L) diff = -diff;
+    return diff;
+  }
+
+  /// CHECK-START: long SadChar.sadL3(char, char) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:j\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: long SadChar.sadL3(char, char) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static long sadL3(char x, char y) {
+    long diff = x - y;
+    return diff >= 0L ? diff : -diff;
+  }
+
+  /// CHECK-START: long SadChar.sadL3Alt(char, char) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:j\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: long SadChar.sadL3Alt(char, char) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static long sadL3Alt(char x, char y) {
+    long diff = x - y;
+    return 0L <= diff ? diff : -diff;
+  }
+
+  public static void main() {
+    // Use cross-values to test all cases.
+    char[] interesting = {
+      (char) 0x0000, (char) 0x0001, (char) 0x007f,
+      (char) 0x0080, (char) 0x0081, (char) 0x00ff,
+      (char) 0x0100, (char) 0x0101, (char) 0x017f,
+      (char) 0x0180, (char) 0x0181, (char) 0x01ff,
+      (char) 0x7f00, (char) 0x7f01, (char) 0x7f7f,
+      (char) 0x7f80, (char) 0x7f81, (char) 0x7fff,
+      (char) 0x8000, (char) 0x8001, (char) 0x807f,
+      (char) 0x8080, (char) 0x8081, (char) 0x80ff,
+      (char) 0x8100, (char) 0x8101, (char) 0x817f,
+      (char) 0x8180, (char) 0x8181, (char) 0x81ff,
+      (char) 0xff00, (char) 0xff01, (char) 0xff7f,
+      (char) 0xff80, (char) 0xff81, (char) 0xffff
+    };
+    for (int i = 0; i < interesting.length; i++) {
+      for (int j = 0; j < interesting.length; j++) {
+        char x = interesting[i];
+        char y = interesting[j];
+        int e = Math.abs(x - y);
+        expectEquals(e, sad1(x, y));
+        expectEquals(e, sad2(x, y));
+        expectEquals(e, sad3(x, y));
+        expectEquals(e, sad3Alt(x, y));
+        expectEquals(e, sadL1(x, y));
+        expectEquals(e, sadL2(x, y));
+        expectEquals(e, sadL3(x, y));
+        expectEquals(e, sadL3Alt(x, y));
+      }
+    }
+    System.out.println("SadChar passed");
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  private static void expectEquals(long expected, long result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}
diff --git a/test/660-checker-sad/src/SadInt.java b/test/660-checker-sad/src/SadInt.java
new file mode 100644
index 0000000..99db78c
--- /dev/null
+++ b/test/660-checker-sad/src/SadInt.java
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for SAD (sum of absolute differences).
+ */
+public class SadInt {
+
+  /// CHECK-START: int SadInt.sad1(int, int) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:i\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: int SadInt.sad1(int, int) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Select:i\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: int SadInt.sad1(int, int) instruction_simplifier$after_gvn (after)
+  /// CHECK-NOT: Abs
+  //
+  // NOTE: for direct 32-bit operands, this is not an ABS.
+  static int sad1(int x, int y) {
+    return x >= y ? x - y : y - x;
+  }
+
+  /// CHECK-START: int SadInt.sad2(int, int) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:i\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: int SadInt.sad2(int, int) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static int sad2(int x, int y) {
+    int diff = x - y;
+    if (diff < 0) diff = -diff;
+    return diff;
+  }
+
+  /// CHECK-START: int SadInt.sad3(int, int) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:i\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: int SadInt.sad3(int, int) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static int sad3(int x, int y) {
+    int diff = x - y;
+    return diff >= 0 ? diff : -diff;
+  }
+
+  /// CHECK-START: int SadInt.sad3Alt(int, int) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:i\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: int SadInt.sad3Alt(int, int) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static int sad3Alt(int x, int y) {
+    int diff = x - y;
+    return 0 <= diff ? diff : -diff;
+  }
+
+  /// CHECK-START: long SadInt.sadL1(int, int) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:j\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: long SadInt.sadL1(int, int) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static long sadL1(int x, int y) {
+    long xl = x;
+    long yl = y;
+    return xl >= yl ? xl - yl : yl - xl;
+  }
+
+  /// CHECK-START: long SadInt.sadL2(int, int) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:j\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: long SadInt.sadL2(int, int) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static long sadL2(int x, int y) {
+    long diff = x - y;
+    if (diff < 0L) diff = -diff;
+    return diff;
+  }
+
+  /// CHECK-START: long SadInt.sadL3(int, int) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:j\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: long SadInt.sadL3(int, int) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static long sadL3(int x, int y) {
+    long diff = x - y;
+    return diff >= 0L ? diff : -diff;
+  }
+
+  /// CHECK-START: long SadInt.sadL3Alt(int, int) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:j\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: long SadInt.sadL3Alt(int, int) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static long sadL3Alt(int x, int y) {
+    long diff = x - y;
+    return 0L <= diff ? diff : -diff;
+  }
+
+  public static void main() {
+    // Use cross-values for the interesting values.
+    int[] interesting = {
+      0x00000000, 0x00000001, 0x00007fff, 0x00008000, 0x00008001, 0x0000ffff,
+      0x00010000, 0x00010001, 0x00017fff, 0x00018000, 0x00018001, 0x0001ffff,
+      0x7fff0000, 0x7fff0001, 0x7fff7fff, 0x7fff8000, 0x7fff8001, 0x7fffffff,
+      0x80000000, 0x80000001, 0x80007fff, 0x80008000, 0x80008001, 0x8000ffff,
+      0x80010000, 0x80010001, 0x80017fff, 0x80018000, 0x80018001, 0x8001ffff,
+      0xffff0000, 0xffff0001, 0xffff7fff, 0xffff8000, 0xffff8001, 0xffffffff
+    };
+    for (int i = 0; i < interesting.length; i++) {
+      for (int j = 0; j < interesting.length; j++) {
+        int x = interesting[i];
+        int y = interesting[j];
+        int e1 = x >= y ? x - y : y - x;  // still select
+        expectEquals(e1, sad1(x, y));
+        int e2 = Math.abs(x - y);  // pure abs
+        expectEquals(e2, sad2(x, y));
+        expectEquals(e2, sad3(x, y));
+        expectEquals(e2, sad3Alt(x, y));
+        long eL1 = Math.abs(((long)x) - ((long)y));  // now, different, but abs
+        expectEquals(eL1, sadL1(x, y));
+        long eL2 = Math.abs((long)(x - y));  // also, different, but abs
+        expectEquals(eL2, sadL2(x, y));
+        expectEquals(eL2, sadL3(x, y));
+        expectEquals(eL2, sadL3Alt(x, y));
+      }
+    }
+    System.out.println("SadInt passed");
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  private static void expectEquals(long expected, long result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}
diff --git a/test/660-checker-sad/src/SadLong.java b/test/660-checker-sad/src/SadLong.java
new file mode 100644
index 0000000..b87464b
--- /dev/null
+++ b/test/660-checker-sad/src/SadLong.java
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for SAD (sum of absolute differences).
+ */
+public class SadLong {
+
+  /// CHECK-START: long SadLong.sad1(long, long) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:j\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: long SadLong.sad1(long, long) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Select:j\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: long SadLong.sad1(long, long) instruction_simplifier$after_gvn (after)
+  /// CHECK-NOT: Abs
+  //
+  // NOTE: for direct 64-bit operands, this is not an ABS.
+  static long sad1(long x, long y) {
+    return x >= y ? x - y : y - x;
+  }
+
+  /// CHECK-START: long SadLong.sad2(long, long) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:j\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: long SadLong.sad2(long, long) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static long sad2(long x, long y) {
+    long diff = x - y;
+    if (diff < 0) diff = -diff;
+    return diff;
+  }
+
+  /// CHECK-START: long SadLong.sad3(long, long) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:j\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: long SadLong.sad3(long, long) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static long sad3(long x, long y) {
+    long diff = x - y;
+    return diff >= 0 ? diff : -diff;
+  }
+
+  /// CHECK-START: long SadLong.sad3Alt(long, long) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:j\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: long SadLong.sad3Alt(long, long) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static long sad3Alt(long x, long y) {
+    long diff = x - y;
+    return 0 <= diff ? diff : -diff;
+  }
+
+  public static void main() {
+    // Use cross-values for the interesting values.
+    long[] interesting = {
+      0x0000000000000000L, 0x0000000000000001L, 0x000000007fffffffL,
+      0x0000000080000000L, 0x0000000080000001L, 0x00000000ffffffffL,
+      0x0000000100000000L, 0x0000000100000001L, 0x000000017fffffffL,
+      0x0000000180000000L, 0x0000000180000001L, 0x00000001ffffffffL,
+      0x7fffffff00000000L, 0x7fffffff00000001L, 0x7fffffff7fffffffL,
+      0x7fffffff80000000L, 0x7fffffff80000001L, 0x7fffffffffffffffL,
+      0x8000000000000000L, 0x8000000000000001L, 0x800000007fffffffL,
+      0x8000000080000000L, 0x8000000080000001L, 0x80000000ffffffffL,
+      0x8000000100000000L, 0x8000000100000001L, 0x800000017fffffffL,
+      0x8000000180000000L, 0x8000000180000001L, 0x80000001ffffffffL,
+      0xffffffff00000000L, 0xffffffff00000001L, 0xffffffff7fffffffL,
+      0xffffffff80000000L, 0xffffffff80000001L, 0xffffffffffffffffL
+    };
+    for (int i = 0; i < interesting.length; i++) {
+      for (int j = 0; j < interesting.length; j++) {
+        long x = interesting[i];
+        long y = interesting[j];
+        long e1 = x >= y ? x - y : y - x;  // still select
+        expectEquals(e1, sad1(x, y));
+        long e2 = Math.abs(x - y);  // pure abs
+        expectEquals(e2, sad2(x, y));
+        expectEquals(e2, sad3(x, y));
+        expectEquals(e2, sad3Alt(x, y));
+      }
+    }
+    System.out.println("SadLong passed");
+  }
+
+  private static void expectEquals(long expected, long result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}
diff --git a/test/660-checker-sad/src/SadShort.java b/test/660-checker-sad/src/SadShort.java
new file mode 100644
index 0000000..c45218a
--- /dev/null
+++ b/test/660-checker-sad/src/SadShort.java
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for SAD (sum of absolute differences).
+ */
+public class SadShort {
+
+  /// CHECK-START: int SadShort.sad1(short, short) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:i\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: int SadShort.sad1(short, short) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static int sad1(short x, short y) {
+    return x >= y ? x - y : y - x;
+  }
+
+  /// CHECK-START: int SadShort.sad2(short, short) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:i\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: int SadShort.sad2(short, short) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static int sad2(short x, short y) {
+    int diff = x - y;
+    if (diff < 0) diff = -diff;
+    return diff;
+  }
+
+  /// CHECK-START: int SadShort.sad3(short, short) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:i\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: int SadShort.sad3(short, short) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static int sad3(short x, short y) {
+    int diff = x - y;
+    return diff >= 0 ? diff : -diff;
+  }
+
+  /// CHECK-START: int SadShort.sad3Alt(short, short) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:i\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: int SadShort.sad3Alt(short, short) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static int sad3Alt(short x, short y) {
+    int diff = x - y;
+    return 0 <= diff ? diff : -diff;
+  }
+
+  /// CHECK-START: long SadShort.sadL1(short, short) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:j\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: long SadShort.sadL1(short, short) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static long sadL1(short x, short y) {
+    long xl = x;
+    long yl = y;
+    return xl >= yl ? xl - yl : yl - xl;
+  }
+
+  /// CHECK-START: long SadShort.sadL2(short, short) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:j\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: long SadShort.sadL2(short, short) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static long sadL2(short x, short y) {
+    long diff = x - y;
+    if (diff < 0L) diff = -diff;
+    return diff;
+  }
+
+  /// CHECK-START: long SadShort.sadL3(short, short) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:j\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: long SadShort.sadL3(short, short) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static long sadL3(short x, short y) {
+    long diff = x - y;
+    return diff >= 0L ? diff : -diff;
+  }
+
+  /// CHECK-START: long SadShort.sadL3Alt(short, short) instruction_simplifier$after_gvn (before)
+  /// CHECK-DAG: <<Select:j\d+>> Select
+  /// CHECK-DAG:                 Return [<<Select>>]
+  //
+  /// CHECK-START: long SadShort.sadL3Alt(short, short) instruction_simplifier$after_gvn (after)
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs
+  /// CHECK-DAG:                 Return [<<Intrin>>]
+  static long sadL3Alt(short x, short y) {
+    long diff = x - y;
+    return 0L <= diff ? diff : -diff;
+  }
+
+  public static void main() {
+    // Use cross-values to test all cases.
+    short[] interesting = {
+      (short) 0x0000, (short) 0x0001, (short) 0x007f,
+      (short) 0x0080, (short) 0x0081, (short) 0x00ff,
+      (short) 0x0100, (short) 0x0101, (short) 0x017f,
+      (short) 0x0180, (short) 0x0181, (short) 0x01ff,
+      (short) 0x7f00, (short) 0x7f01, (short) 0x7f7f,
+      (short) 0x7f80, (short) 0x7f81, (short) 0x7fff,
+      (short) 0x8000, (short) 0x8001, (short) 0x807f,
+      (short) 0x8080, (short) 0x8081, (short) 0x80ff,
+      (short) 0x8100, (short) 0x8101, (short) 0x817f,
+      (short) 0x8180, (short) 0x8181, (short) 0x81ff,
+      (short) 0xff00, (short) 0xff01, (short) 0xff7f,
+      (short) 0xff80, (short) 0xff81, (short) 0xffff
+    };
+    for (int i = 0; i < interesting.length; i++) {
+      for (int j = 0; j < interesting.length; j++) {
+        short x = interesting[i];
+        short y = interesting[j];
+        int e = Math.abs(x - y);
+        expectEquals(e, sad1(x, y));
+        expectEquals(e, sad2(x, y));
+        expectEquals(e, sad3(x, y));
+        expectEquals(e, sad3Alt(x, y));
+        expectEquals(e, sadL1(x, y));
+        expectEquals(e, sadL2(x, y));
+        expectEquals(e, sadL3(x, y));
+        expectEquals(e, sadL3Alt(x, y));
+      }
+    }
+    System.out.println("SadShort passed");
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  private static void expectEquals(long expected, long result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}
diff --git a/test/660-checker-simd-sad-byte/expected.txt b/test/660-checker-simd-sad-byte/expected.txt
deleted file mode 100644
index b0aad4d..0000000
--- a/test/660-checker-simd-sad-byte/expected.txt
+++ /dev/null
@@ -1 +0,0 @@
-passed
diff --git a/test/660-checker-simd-sad-byte/info.txt b/test/660-checker-simd-sad-byte/info.txt
deleted file mode 100644
index b56c119..0000000
--- a/test/660-checker-simd-sad-byte/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Functional tests on SAD vectorization.
diff --git a/test/660-checker-simd-sad-byte/src/Main.java b/test/660-checker-simd-sad-byte/src/Main.java
deleted file mode 100644
index 38003d1..0000000
--- a/test/660-checker-simd-sad-byte/src/Main.java
+++ /dev/null
@@ -1,332 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Tests for SAD (sum of absolute differences).
- */
-public class Main {
-
-  // TODO: lower precision still coming, b/64091002
-
-  private static byte sadByte2Byte(byte[] b1, byte[] b2) {
-    int min_length = Math.min(b1.length, b2.length);
-    byte sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      sad += Math.abs(b1[i] - b2[i]);
-    }
-    return sad;
-  }
-
-  private static byte sadByte2ByteAlt(byte[] b1, byte[] b2) {
-    int min_length = Math.min(b1.length, b2.length);
-    byte sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      byte s = b1[i];
-      byte p = b2[i];
-      sad += s >= p ? s - p : p - s;
-    }
-    return sad;
-  }
-
-  private static byte sadByte2ByteAlt2(byte[] b1, byte[] b2) {
-    int min_length = Math.min(b1.length, b2.length);
-    byte sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      byte s = b1[i];
-      byte p = b2[i];
-      int x = s - p;
-      if (x < 0) x = -x;
-      sad += x;
-    }
-    return sad;
-  }
-
-  private static short sadByte2Short(byte[] b1, byte[] b2) {
-    int min_length = Math.min(b1.length, b2.length);
-    short sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      sad += Math.abs(b1[i] - b2[i]);
-    }
-    return sad;
-  }
-
-  private static short sadByte2ShortAlt(byte[] b1, byte[] b2) {
-    int min_length = Math.min(b1.length, b2.length);
-    short sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      byte s = b1[i];
-      byte p = b2[i];
-      sad += s >= p ? s - p : p - s;
-    }
-    return sad;
-  }
-
-  private static short sadByte2ShortAlt2(byte[] b1, byte[] b2) {
-    int min_length = Math.min(b1.length, b2.length);
-    short sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      byte s = b1[i];
-      byte p = b2[i];
-      int x = s - p;
-      if (x < 0) x = -x;
-      sad += x;
-    }
-    return sad;
-  }
-
-  /// CHECK-START: int Main.sadByte2Int(byte[], byte[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:b\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:b\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get1>>,<<Get2>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: int Main.sadByte2Int(byte[], byte[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons16:i\d+>> IntConstant 16                 loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons16>>]      loop:<<Loop>>      outer_loop:none
-  private static int sadByte2Int(byte[] b1, byte[] b2) {
-    int min_length = Math.min(b1.length, b2.length);
-    int sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      sad += Math.abs(b1[i] - b2[i]);
-    }
-    return sad;
-  }
-
-  /// CHECK-START: int Main.sadByte2IntAlt(byte[], byte[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:b\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:b\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get2>>,<<Get1>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: int Main.sadByte2IntAlt(byte[], byte[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons16:i\d+>> IntConstant 16                 loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load2>>,<<Load1>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons16>>]      loop:<<Loop>>      outer_loop:none
-  private static int sadByte2IntAlt(byte[] b1, byte[] b2) {
-    int min_length = Math.min(b1.length, b2.length);
-    int sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      byte s = b1[i];
-      byte p = b2[i];
-      sad += s >= p ? s - p : p - s;
-    }
-    return sad;
-  }
-
-  /// CHECK-START: int Main.sadByte2IntAlt2(byte[], byte[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:b\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:b\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get1>>,<<Get2>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: int Main.sadByte2IntAlt2(byte[], byte[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons16:i\d+>> IntConstant 16                 loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons16>>]      loop:<<Loop>>      outer_loop:none
-  private static int sadByte2IntAlt2(byte[] b1, byte[] b2) {
-    int min_length = Math.min(b1.length, b2.length);
-    int sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      byte s = b1[i];
-      byte p = b2[i];
-      int x = s - p;
-      if (x < 0) x = -x;
-      sad += x;
-    }
-    return sad;
-  }
-
-  /// CHECK-START: long Main.sadByte2Long(byte[], byte[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:b\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:b\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv1:j\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv2:j\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Cnv1>>,<<Cnv2>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: long Main.sadByte2Long(byte[], byte[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons16:i\d+>> IntConstant 16                 loop:none
-  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<ConsL>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons16>>]      loop:<<Loop>>      outer_loop:none
-  private static long sadByte2Long(byte[] b1, byte[] b2) {
-    int min_length = Math.min(b1.length, b2.length);
-    long sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      long x = b1[i];
-      long y = b2[i];
-      sad += Math.abs(x - y);
-    }
-    return sad;
-  }
-
-  /// CHECK-START: long Main.sadByte2LongAt1(byte[], byte[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 1                 loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:b\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:b\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv1:j\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv2:j\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Cnv1>>,<<Cnv2>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: long Main.sadByte2LongAt1(byte[], byte[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons16:i\d+>> IntConstant 16                 loop:none
-  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 1                 loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<ConsL>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons16>>]      loop:<<Loop>>      outer_loop:none
-  private static long sadByte2LongAt1(byte[] b1, byte[] b2) {
-    int min_length = Math.min(b1.length, b2.length);
-    long sad = 1;  // starts at 1
-    for (int i = 0; i < min_length; i++) {
-      long x = b1[i];
-      long y = b2[i];
-      sad += Math.abs(x - y);
-    }
-    return sad;
-  }
-
-  public static void main(String[] args) {
-    // Cross-test the two most extreme values individually.
-    byte[] b1 = { 0, -128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-    byte[] b2 = { 0,  127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-    expectEquals(-1, sadByte2Byte(b1, b2));
-    expectEquals(-1, sadByte2Byte(b2, b1));
-    expectEquals(-1, sadByte2ByteAlt(b1, b2));
-    expectEquals(-1, sadByte2ByteAlt(b2, b1));
-    expectEquals(-1, sadByte2ByteAlt2(b1, b2));
-    expectEquals(-1, sadByte2ByteAlt2(b2, b1));
-    expectEquals(255, sadByte2Short(b1, b2));
-    expectEquals(255, sadByte2Short(b2, b1));
-    expectEquals(255, sadByte2ShortAlt(b1, b2));
-    expectEquals(255, sadByte2ShortAlt(b2, b1));
-    expectEquals(255, sadByte2ShortAlt2(b1, b2));
-    expectEquals(255, sadByte2ShortAlt2(b2, b1));
-    expectEquals(255, sadByte2Int(b1, b2));
-    expectEquals(255, sadByte2Int(b2, b1));
-    expectEquals(255, sadByte2IntAlt(b1, b2));
-    expectEquals(255, sadByte2IntAlt(b2, b1));
-    expectEquals(255, sadByte2IntAlt2(b1, b2));
-    expectEquals(255, sadByte2IntAlt2(b2, b1));
-    expectEquals(255, sadByte2Long(b1, b2));
-    expectEquals(255L, sadByte2Long(b2, b1));
-    expectEquals(256L, sadByte2LongAt1(b1, b2));
-    expectEquals(256L, sadByte2LongAt1(b2, b1));
-
-    // Use cross-values to test all cases.
-    // One for scalar cleanup.
-    int n = 256;
-    int m = n * n + 1;
-    int k = 0;
-    b1 = new byte[m];
-    b2 = new byte[m];
-    for (int i = 0; i < n; i++) {
-      for (int j = 0; j < n; j++) {
-        b1[k] = (byte) i;
-        b2[k] = (byte) j;
-        k++;
-      }
-    }
-    b1[k] = 10;
-    b2[k] = 2;
-    expectEquals(8, sadByte2Byte(b1, b2));
-    expectEquals(8, sadByte2ByteAlt(b1, b2));
-    expectEquals(8, sadByte2ByteAlt2(b1, b2));
-    expectEquals(21768, sadByte2Short(b1, b2));
-    expectEquals(21768, sadByte2ShortAlt(b1, b2));
-    expectEquals(21768, sadByte2ShortAlt2(b1, b2));
-    expectEquals(5592328, sadByte2Int(b1, b2));
-    expectEquals(5592328, sadByte2IntAlt(b1, b2));
-    expectEquals(5592328, sadByte2IntAlt2(b1, b2));
-    expectEquals(5592328L, sadByte2Long(b1, b2));
-    expectEquals(5592329L, sadByte2LongAt1(b1, b2));
-
-    System.out.println("passed");
-  }
-
-  private static void expectEquals(int expected, int result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-
-  private static void expectEquals(long expected, long result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-}
diff --git a/test/660-checker-simd-sad-char/expected.txt b/test/660-checker-simd-sad-char/expected.txt
deleted file mode 100644
index b0aad4d..0000000
--- a/test/660-checker-simd-sad-char/expected.txt
+++ /dev/null
@@ -1 +0,0 @@
-passed
diff --git a/test/660-checker-simd-sad-char/info.txt b/test/660-checker-simd-sad-char/info.txt
deleted file mode 100644
index b56c119..0000000
--- a/test/660-checker-simd-sad-char/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Functional tests on SAD vectorization.
diff --git a/test/660-checker-simd-sad-char/src/Main.java b/test/660-checker-simd-sad-char/src/Main.java
deleted file mode 100644
index 18ae024..0000000
--- a/test/660-checker-simd-sad-char/src/Main.java
+++ /dev/null
@@ -1,259 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Tests for SAD (sum of absolute differences).
- */
-public class Main {
-
-  // TODO: lower precision still coming, b/64091002
-
-  // TODO: consider unsigned SAD too, b/64091002
-
-  private static char sadChar2Char(char[] s1, char[] s2) {
-    int min_length = Math.min(s1.length, s2.length);
-    char sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      sad += Math.abs(s1[i] - s2[i]);
-    }
-    return sad;
-  }
-
-  private static char sadChar2CharAlt(char[] s1, char[] s2) {
-    int min_length = Math.min(s1.length, s2.length);
-    char sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      char s = s1[i];
-      char p = s2[i];
-      sad += s >= p ? s - p : p - s;
-    }
-    return sad;
-  }
-
-  private static char sadChar2CharAlt2(char[] s1, char[] s2) {
-    int min_length = Math.min(s1.length, s2.length);
-    char sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      char s = s1[i];
-      char p = s2[i];
-      int x = s - p;
-      if (x < 0) x = -x;
-      sad += x;
-    }
-    return sad;
-  }
-
-  /// CHECK-START: int Main.sadChar2Int(char[], char[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:c\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:c\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get1>>,<<Get2>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: int Main.sadChar2Int(char[], char[]) loop_optimization (after)
-  /// CHECK-NOT: VecSADAccumulate
-  private static int sadChar2Int(char[] s1, char[] s2) {
-    int min_length = Math.min(s1.length, s2.length);
-    int sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      sad += Math.abs(s1[i] - s2[i]);
-    }
-    return sad;
-  }
-
-  /// CHECK-START: int Main.sadChar2IntAlt(char[], char[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:c\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:c\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get2>>,<<Get1>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: int Main.sadChar2IntAlt(char[], char[]) loop_optimization (after)
-  /// CHECK-NOT: VecSADAccumulate
-  private static int sadChar2IntAlt(char[] s1, char[] s2) {
-    int min_length = Math.min(s1.length, s2.length);
-    int sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      char s = s1[i];
-      char p = s2[i];
-      sad += s >= p ? s - p : p - s;
-    }
-    return sad;
-  }
-
-  /// CHECK-START: int Main.sadChar2IntAlt2(char[], char[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:c\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:c\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get1>>,<<Get2>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: int Main.sadChar2IntAlt2(char[], char[]) loop_optimization (after)
-  /// CHECK-NOT: VecSADAccumulate
-  private static int sadChar2IntAlt2(char[] s1, char[] s2) {
-    int min_length = Math.min(s1.length, s2.length);
-    int sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      char s = s1[i];
-      char p = s2[i];
-      int x = s - p;
-      if (x < 0) x = -x;
-      sad += x;
-    }
-    return sad;
-  }
-
-  /// CHECK-START: long Main.sadChar2Long(char[], char[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:c\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:c\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv1:j\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv2:j\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Cnv1>>,<<Cnv2>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: long Main.sadChar2Long(char[], char[]) loop_optimization (after)
-  /// CHECK-NOT: VecSADAccumulate
-  private static long sadChar2Long(char[] s1, char[] s2) {
-    int min_length = Math.min(s1.length, s2.length);
-    long sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      long x = s1[i];
-      long y = s2[i];
-      sad += Math.abs(x - y);
-    }
-    return sad;
-  }
-
-  /// CHECK-START: long Main.sadChar2LongAt1(char[], char[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 1                 loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:c\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:c\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv1:j\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv2:j\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Cnv1>>,<<Cnv2>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: long Main.sadChar2LongAt1(char[], char[]) loop_optimization (after)
-  /// CHECK-NOT: VecSADAccumulate
-  private static long sadChar2LongAt1(char[] s1, char[] s2) {
-    int min_length = Math.min(s1.length, s2.length);
-    long sad = 1;  // starts at 1
-    for (int i = 0; i < min_length; i++) {
-      long x = s1[i];
-      long y = s2[i];
-      sad += Math.abs(x - y);
-    }
-    return sad;
-  }
-
-  public static void main(String[] args) {
-    // Cross-test the two most extreme values individually.
-    char[] s1 = { 0, 0x8000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-    char[] s2 = { 0, 0x7fff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-    expectEquals(1, sadChar2Char(s1, s2));
-    expectEquals(1, sadChar2Char(s2, s1));
-    expectEquals(1, sadChar2CharAlt(s1, s2));
-    expectEquals(1, sadChar2CharAlt(s2, s1));
-    expectEquals(1, sadChar2CharAlt2(s1, s2));
-    expectEquals(1, sadChar2CharAlt2(s2, s1));
-    expectEquals(1, sadChar2Int(s1, s2));
-    expectEquals(1, sadChar2Int(s2, s1));
-    expectEquals(1, sadChar2IntAlt(s1, s2));
-    expectEquals(1, sadChar2IntAlt(s2, s1));
-    expectEquals(1, sadChar2IntAlt2(s1, s2));
-    expectEquals(1, sadChar2IntAlt2(s2, s1));
-    expectEquals(1L, sadChar2Long(s1, s2));
-    expectEquals(1L, sadChar2Long(s2, s1));
-    expectEquals(2L, sadChar2LongAt1(s1, s2));
-    expectEquals(2L, sadChar2LongAt1(s2, s1));
-
-    // Use cross-values to test all cases.
-    char[] interesting = {
-      (char) 0x0000,
-      (char) 0x0001,
-      (char) 0x0002,
-      (char) 0x1234,
-      (char) 0x8000,
-      (char) 0x8001,
-      (char) 0x7fff,
-      (char) 0xffff
-    };
-    int n = interesting.length;
-    int m = n * n + 1;
-    s1 = new char[m];
-    s2 = new char[m];
-    int k = 0;
-    for (int i = 0; i < n; i++) {
-      for (int j = 0; j < n; j++) {
-        s1[k] = interesting[i];
-        s2[k] = interesting[j];
-        k++;
-      }
-    }
-    s1[k] = 10;
-    s2[k] = 2;
-    expectEquals(56196, sadChar2Char(s1, s2));
-    expectEquals(56196, sadChar2CharAlt(s1, s2));
-    expectEquals(56196, sadChar2CharAlt2(s1, s2));
-    expectEquals(1497988, sadChar2Int(s1, s2));
-    expectEquals(1497988, sadChar2IntAlt(s1, s2));
-    expectEquals(1497988, sadChar2IntAlt2(s1, s2));
-    expectEquals(1497988L, sadChar2Long(s1, s2));
-    expectEquals(1497989L, sadChar2LongAt1(s1, s2));
-
-    System.out.println("passed");
-  }
-
-  private static void expectEquals(int expected, int result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-
-  private static void expectEquals(long expected, long result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-}
diff --git a/test/660-checker-simd-sad-int/expected.txt b/test/660-checker-simd-sad-int/expected.txt
deleted file mode 100644
index b0aad4d..0000000
--- a/test/660-checker-simd-sad-int/expected.txt
+++ /dev/null
@@ -1 +0,0 @@
-passed
diff --git a/test/660-checker-simd-sad-int/src/Main.java b/test/660-checker-simd-sad-int/src/Main.java
deleted file mode 100644
index 5952c41..0000000
--- a/test/660-checker-simd-sad-int/src/Main.java
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Tests for SAD (sum of absolute differences).
- */
-public class Main {
-
-  /// CHECK-START: int Main.sadInt2Int(int[], int[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:i\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:i\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get1>>,<<Get2>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: int Main.sadInt2Int(int[], int[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons:i\d+>>   IntConstant {{2|4}}                        loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [{{i\d+}}]                   loop:none
-  /// CHECK-DAG: <<Phi:d\d+>>    Phi [<<Set>>,{{d\d+}}]                     loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Ld1:d\d+>>    VecLoad [{{l\d+}},<<I:i\d+>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Ld2:d\d+>>    VecLoad [{{l\d+}},<<I>>]                   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi>>,<<Ld1>>,<<Ld2>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<I>>,<<Cons>>]                       loop:<<Loop>> outer_loop:none
-  private static int sadInt2Int(int[] x, int[] y) {
-    int min_length = Math.min(x.length, y.length);
-    int sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      sad += Math.abs(x[i] - y[i]);
-    }
-    return sad;
-  }
-
-  /// CHECK-START: int Main.sadInt2IntAlt(int[], int[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                       loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                       loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]            loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:i\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:i\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub1:i\d+>>   Sub [<<Get2>>,<<Get1>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub2:i\d+>>   Sub [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Select:i\d+>> Select [<<Sub2>>,<<Sub1>>,{{z\d+}}] loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Select>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]            loop:<<Loop>>      outer_loop:none
-  //
-  // No ABS? No SAD!
-  //
-  /// CHECK-START: int Main.sadInt2IntAlt(int[], int[]) loop_optimization (after)
-  /// CHECK-NOT: VecSADAccumulate
-  private static int sadInt2IntAlt(int[] x, int[] y) {
-    int min_length = Math.min(x.length, y.length);
-    int sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      int s = x[i];
-      int p = y[i];
-      sad += s >= p ? s - p : p - s;
-    }
-    return sad;
-  }
-
-  /// CHECK-START: int Main.sadInt2IntAlt2(int[], int[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:i\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:i\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get1>>,<<Get2>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: int Main.sadInt2IntAlt2(int[], int[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons:i\d+>>   IntConstant {{2|4}}                        loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [{{i\d+}}]                   loop:none
-  /// CHECK-DAG: <<Phi:d\d+>>    Phi [<<Set>>,{{d\d+}}]                     loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Ld1:d\d+>>    VecLoad [{{l\d+}},<<I:i\d+>>]              loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Ld2:d\d+>>    VecLoad [{{l\d+}},<<I>>]                   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi>>,<<Ld1>>,<<Ld2>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<I>>,<<Cons>>]                       loop:<<Loop>> outer_loop:none
-  private static int sadInt2IntAlt2(int[] x, int[] y) {
-    int min_length = Math.min(x.length, y.length);
-    int sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      int s = x[i];
-      int p = y[i];
-      int m = s - p;
-      if (m < 0) m = -m;
-      sad += m;
-    }
-    return sad;
-  }
-
-  /// CHECK-START: long Main.sadInt2Long(int[], int[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:i\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:i\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv1:j\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv2:j\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Cnv1>>,<<Cnv2>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: long Main.sadInt2Long(int[], int[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons4:i\d+>>  IntConstant 4                  loop:none
-  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<ConsL>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons4>>]       loop:<<Loop>>      outer_loop:none
-  private static long sadInt2Long(int[] x, int[] y) {
-    int min_length = Math.min(x.length, y.length);
-    long sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      long s = x[i];
-      long p = y[i];
-      sad += Math.abs(s - p);
-    }
-    return sad;
-  }
-
-  /// CHECK-START: long Main.sadInt2LongAt1(int[], int[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 1                 loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:i\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:i\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv1:j\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv2:j\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Cnv1>>,<<Cnv2>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: long Main.sadInt2LongAt1(int[], int[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons4:i\d+>>  IntConstant 4                  loop:none
-  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 1                 loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<ConsL>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons4>>]       loop:<<Loop>>      outer_loop:none
-  private static long sadInt2LongAt1(int[] x, int[] y) {
-    int min_length = Math.min(x.length, y.length);
-    long sad = 1;  // starts at 1
-    for (int i = 0; i < min_length; i++) {
-      long s = x[i];
-      long p = y[i];
-      sad += Math.abs(s - p);
-    }
-    return sad;
-  }
-
-  public static void main(String[] args) {
-    // Cross-test the two most extreme values individually.
-    int[] x = { 0, Integer.MAX_VALUE, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-    int[] y = { 0, Integer.MIN_VALUE, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-    expectEquals(1, sadInt2Int(x, y));
-    expectEquals(1, sadInt2Int(y, x));
-    expectEquals(-1, sadInt2IntAlt(x, y));
-    expectEquals(-1, sadInt2IntAlt(y, x));
-    expectEquals(1, sadInt2IntAlt2(x, y));
-    expectEquals(1, sadInt2IntAlt2(y, x));
-    expectEquals(4294967295L, sadInt2Long(x, y));
-    expectEquals(4294967295L, sadInt2Long(y, x));
-    expectEquals(4294967296L, sadInt2LongAt1(x, y));
-    expectEquals(4294967296L, sadInt2LongAt1(y, x));
-
-    // Use cross-values for the interesting values.
-    int[] interesting = {
-      0x00000000, 0x00000001, 0x00007fff, 0x00008000, 0x00008001, 0x0000ffff,
-      0x00010000, 0x00010001, 0x00017fff, 0x00018000, 0x00018001, 0x0001ffff,
-      0x7fff0000, 0x7fff0001, 0x7fff7fff, 0x7fff8000, 0x7fff8001, 0x7fffffff,
-      0x80000000, 0x80000001, 0x80007fff, 0x80008000, 0x80008001, 0x8000ffff,
-      0x80010000, 0x80010001, 0x80017fff, 0x80018000, 0x80018001, 0x8001ffff,
-      0xffff0000, 0xffff0001, 0xffff7fff, 0xffff8000, 0xffff8001, 0xffffffff
-    };
-    int n = interesting.length;
-    int m = n * n + 1;
-    x = new int[m];
-    y = new int[m];
-    int k = 0;
-    for (int i = 0; i < n; i++) {
-      for (int j = 0; j < n; j++) {
-        x[k] = interesting[i];
-        y[k] = interesting[j];
-        k++;
-      }
-    }
-    x[k] = 10;
-    y[k] = 2;
-    expectEquals(8, sadInt2Int(x, y));
-    expectEquals(-13762600, sadInt2IntAlt(x, y));
-    expectEquals(8, sadInt2IntAlt2(x, y));
-    expectEquals(2010030931928L, sadInt2Long(x, y));
-    expectEquals(2010030931929L, sadInt2LongAt1(x, y));
-
-    System.out.println("passed");
-  }
-
-  private static void expectEquals(int expected, int result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-
-  private static void expectEquals(long expected, long result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-}
diff --git a/test/660-checker-simd-sad-long/expected.txt b/test/660-checker-simd-sad-long/expected.txt
deleted file mode 100644
index b0aad4d..0000000
--- a/test/660-checker-simd-sad-long/expected.txt
+++ /dev/null
@@ -1 +0,0 @@
-passed
diff --git a/test/660-checker-simd-sad-long/info.txt b/test/660-checker-simd-sad-long/info.txt
deleted file mode 100644
index b56c119..0000000
--- a/test/660-checker-simd-sad-long/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Functional tests on SAD vectorization.
diff --git a/test/660-checker-simd-sad-long/src/Main.java b/test/660-checker-simd-sad-long/src/Main.java
deleted file mode 100644
index 360e723..0000000
--- a/test/660-checker-simd-sad-long/src/Main.java
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Tests for SAD (sum of absolute differences).
- */
-public class Main {
-
-  /// CHECK-START: long Main.sadLong2Long(long[], long[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:j\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:j\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Get1>>,<<Get2>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: long Main.sadLong2Long(long[], long[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons2:i\d+>>  IntConstant 2                  loop:none
-  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<ConsL>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons2>>]       loop:<<Loop>>      outer_loop:none
-  private static long sadLong2Long(long[] x, long[] y) {
-    int min_length = Math.min(x.length, y.length);
-    long sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      sad += Math.abs(x[i] - y[i]);
-    }
-    return sad;
-  }
-
-  /// CHECK-START: long Main.sadLong2LongAlt(long[], long[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                       loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                       loop:none
-  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]            loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:j\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:j\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub1:j\d+>>   Sub [<<Get2>>,<<Get1>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub2:j\d+>>   Sub [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Select:j\d+>> Select [<<Sub2>>,<<Sub1>>,{{z\d+}}] loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Select>>]           loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]            loop:<<Loop>>      outer_loop:none
-  //
-  // No ABS? No SAD!
-  //
-  /// CHECK-START: long Main.sadLong2LongAlt(long[], long[]) loop_optimization (after)
-  /// CHECK-NOT: VecSADAccumulate
-  private static long sadLong2LongAlt(long[] x, long[] y) {
-    int min_length = Math.min(x.length, y.length);
-    long sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      long s = x[i];
-      long p = y[i];
-      sad += s >= p ? s - p : p - s;
-    }
-    return sad;
-  }
-
-  /// CHECK-START: long Main.sadLong2LongAlt2(long[], long[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:j\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:j\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Get1>>,<<Get2>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: long Main.sadLong2LongAlt2(long[], long[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons2:i\d+>>  IntConstant 2                  loop:none
-  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<ConsL>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons2>>]       loop:<<Loop>>      outer_loop:none
-  private static long sadLong2LongAlt2(long[] x, long[] y) {
-    int min_length = Math.min(x.length, y.length);
-    long sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      long s = x[i];
-      long p = y[i];
-      long m = s - p;
-      if (m < 0) m = -m;
-      sad += m;
-    }
-    return sad;
-  }
-
-  /// CHECK-START: long Main.sadLong2LongAt1(long[], long[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 1                 loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:j\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:j\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Get1>>,<<Get2>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: long Main.sadLong2LongAt1(long[], long[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons2:i\d+>>  IntConstant 2                  loop:none
-  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 1                 loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<ConsL>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons2>>]       loop:<<Loop>>      outer_loop:none
-  private static long sadLong2LongAt1(long[] x, long[] y) {
-    int min_length = Math.min(x.length, y.length);
-    long sad = 1;  // starts at 1
-    for (int i = 0; i < min_length; i++) {
-      sad += Math.abs(x[i] - y[i]);
-    }
-    return sad;
-  }
-
-  public static void main(String[] args) {
-    // Cross-test the two most extreme values individually.
-    long[] x = { 0, Long.MIN_VALUE, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-    long[] y = { 0, Long.MAX_VALUE, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-    expectEquals(1L, sadLong2Long(x, y));
-    expectEquals(1L, sadLong2Long(y, x));
-    expectEquals(-1L, sadLong2LongAlt(x, y));
-    expectEquals(-1L, sadLong2LongAlt(y, x));
-    expectEquals(1L, sadLong2LongAlt2(x, y));
-    expectEquals(1L, sadLong2LongAlt2(y, x));
-    expectEquals(2L, sadLong2LongAt1(x, y));
-    expectEquals(2L, sadLong2LongAt1(y, x));
-
-    // Use cross-values for the interesting values.
-    long[] interesting = {
-      0x0000000000000000L, 0x0000000000000001L, 0x000000007fffffffL,
-      0x0000000080000000L, 0x0000000080000001L, 0x00000000ffffffffL,
-      0x0000000100000000L, 0x0000000100000001L, 0x000000017fffffffL,
-      0x0000000180000000L, 0x0000000180000001L, 0x00000001ffffffffL,
-      0x7fffffff00000000L, 0x7fffffff00000001L, 0x7fffffff7fffffffL,
-      0x7fffffff80000000L, 0x7fffffff80000001L, 0x7fffffffffffffffL,
-      0x8000000000000000L, 0x8000000000000001L, 0x800000007fffffffL,
-      0x8000000080000000L, 0x8000000080000001L, 0x80000000ffffffffL,
-      0x8000000100000000L, 0x8000000100000001L, 0x800000017fffffffL,
-      0x8000000180000000L, 0x8000000180000001L, 0x80000001ffffffffL,
-      0xffffffff00000000L, 0xffffffff00000001L, 0xffffffff7fffffffL,
-      0xffffffff80000000L, 0xffffffff80000001L, 0xffffffffffffffffL
-    };
-    int n = interesting.length;
-    int m = n * n + 1;
-    x = new long[m];
-    y = new long[m];
-    int k = 0;
-    for (int i = 0; i < n; i++) {
-      for (int j = 0; j < n; j++) {
-        x[k] = interesting[i];
-        y[k] = interesting[j];
-        k++;
-      }
-    }
-    x[k] = 10;
-    y[k] = 2;
-    expectEquals(8L, sadLong2Long(x, y));
-    expectEquals(-901943132200L, sadLong2LongAlt(x, y));
-    expectEquals(8L, sadLong2LongAlt2(x, y));
-    expectEquals(9L, sadLong2LongAt1(x, y));
-
-    System.out.println("passed");
-  }
-
-  private static void expectEquals(long expected, long result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-}
diff --git a/test/660-checker-simd-sad-short/expected.txt b/test/660-checker-simd-sad-short/expected.txt
deleted file mode 100644
index b0aad4d..0000000
--- a/test/660-checker-simd-sad-short/expected.txt
+++ /dev/null
@@ -1 +0,0 @@
-passed
diff --git a/test/660-checker-simd-sad-short/info.txt b/test/660-checker-simd-sad-short/info.txt
deleted file mode 100644
index b56c119..0000000
--- a/test/660-checker-simd-sad-short/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Functional tests on SAD vectorization.
diff --git a/test/660-checker-simd-sad-short/src/Main.java b/test/660-checker-simd-sad-short/src/Main.java
deleted file mode 100644
index ff74559..0000000
--- a/test/660-checker-simd-sad-short/src/Main.java
+++ /dev/null
@@ -1,405 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Tests for SAD (sum of absolute differences).
- */
-public class Main {
-
-  private static int $inline$seven() {
-    return 7;
-  }
-
-  // TODO: lower precision still coming, b/64091002
-
-  private static short sadShort2Short(short[] s1, short[] s2) {
-    int min_length = Math.min(s1.length, s2.length);
-    short sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      sad += Math.abs(s1[i] - s2[i]);
-    }
-    return sad;
-  }
-
-  private static short sadShort2ShortAlt(short[] s1, short[] s2) {
-    int min_length = Math.min(s1.length, s2.length);
-    short sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      short s = s1[i];
-      short p = s2[i];
-      sad += s >= p ? s - p : p - s;
-    }
-    return sad;
-  }
-
-  private static short sadShort2ShortAlt2(short[] s1, short[] s2) {
-    int min_length = Math.min(s1.length, s2.length);
-    short sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      short s = s1[i];
-      short p = s2[i];
-      int x = s - p;
-      if (x < 0) x = -x;
-      sad += x;
-    }
-    return sad;
-  }
-
-  /// CHECK-START: int Main.sadShort2Int(short[], short[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get1>>,<<Get2>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: int Main.sadShort2Int(short[], short[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
-  private static int sadShort2Int(short[] s1, short[] s2) {
-    int min_length = Math.min(s1.length, s2.length);
-    int sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      sad += Math.abs(s1[i] - s2[i]);
-    }
-    return sad;
-  }
-
-  /// CHECK-START: int Main.sadShort2IntAlt(short[], short[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get2>>,<<Get1>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: int Main.sadShort2IntAlt(short[], short[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load2>>,<<Load1>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
-  private static int sadShort2IntAlt(short[] s1, short[] s2) {
-    int min_length = Math.min(s1.length, s2.length);
-    int sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      short s = s1[i];
-      short p = s2[i];
-      sad += s >= p ? s - p : p - s;
-    }
-    return sad;
-  }
-
-  /// CHECK-START: int Main.sadShort2IntAlt2(short[], short[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get1>>,<<Get2>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: int Main.sadShort2IntAlt2(short[], short[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
-  private static int sadShort2IntAlt2(short[] s1, short[] s2) {
-    int min_length = Math.min(s1.length, s2.length);
-    int sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      short s = s1[i];
-      short p = s2[i];
-      int x = s - p;
-      if (x < 0) x = -x;
-      sad += x;
-    }
-    return sad;
-  }
-
-  /// CHECK-START: int Main.sadShort2IntConstant1(short[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<Cons:i\d+>>   IntConstant -7                 loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>    Add [<<Get1>>,<<Cons>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Add>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: int Main.sadShort2IntConstant1(short[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<Cons:i\d+>>   IntConstant 7                  loop:none
-  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
-  /// CHECK-DAG: <<Rep:d\d+>>    VecReplicateScalar [<<Cons>>]  loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Rep>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
-  private static int sadShort2IntConstant1(short[] s) {
-    int sad = 0;
-    for (int i = 0; i < s.length; i++) {
-      sad += Math.abs(s[i] - 7);  // s[i] + -7
-    }
-    return sad;
-  }
-
-  /// CHECK-START: int Main.sadShort2IntConstant2(short[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<Cons:i\d+>>   IntConstant 7                  loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get1>>,<<Cons>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: int Main.sadShort2IntConstant2(short[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<Cons:i\d+>>   IntConstant 7                  loop:none
-  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
-  /// CHECK-DAG: <<Rep:d\d+>>    VecReplicateScalar [<<Cons>>]  loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Rep>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
-  private static int sadShort2IntConstant2(short[] s) {
-    int sad = 0;
-    for (int i = 0; i < s.length; i++) {
-      sad += Math.abs(s[i] - $inline$seven());  // s[i] - 7
-    }
-    return sad;
-  }
-
-  /// CHECK-START: int Main.sadShort2IntConstant3(short[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<Cons:i\d+>>   IntConstant 7                  loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>    Add [<<Get1>>,<<Cons>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Add>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: int Main.sadShort2IntConstant3(short[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<Cons:i\d+>>   IntConstant -7                 loop:none
-  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
-  /// CHECK-DAG: <<Rep:d\d+>>    VecReplicateScalar [<<Cons>>]  loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Rep>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
-  private static int sadShort2IntConstant3(short[] s) {
-    int sad = 0;
-    for (int i = 0; i < s.length; i++) {
-      sad += Math.abs(s[i] + $inline$seven());  // hidden s[i] - (-7)
-    }
-    return sad;
-  }
-
-  /// CHECK-START: long Main.sadShort2Long(short[], short[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv1:j\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv2:j\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Cnv1>>,<<Cnv2>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: long Main.sadShort2Long(short[], short[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
-  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<ConsL>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
-  private static long sadShort2Long(short[] s1, short[] s2) {
-    int min_length = Math.min(s1.length, s2.length);
-    long sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      long x = s1[i];
-      long y = s2[i];
-      sad += Math.abs(x - y);
-    }
-    return sad;
-  }
-
-  /// CHECK-START: long Main.sadShort2LongAt1(short[], short[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 1                 loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv1:j\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv2:j\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Cnv1>>,<<Cnv2>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: long Main.sadShort2LongAt1(short[], short[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
-  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 1                 loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<ConsL>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
-  private static long sadShort2LongAt1(short[] s1, short[] s2) {
-    int min_length = Math.min(s1.length, s2.length);
-    long sad = 1;  // starts at 1
-    for (int i = 0; i < min_length; i++) {
-      long x = s1[i];
-      long y = s2[i];
-      sad += Math.abs(x - y);
-    }
-    return sad;
-  }
-
-  public static void main(String[] args) {
-    // Cross-test the two most extreme values individually.
-    short[] s1 = { 0, -32768, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-    short[] s2 = { 0,  32767, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-    expectEquals(-1, sadShort2Short(s1, s2));
-    expectEquals(-1, sadShort2Short(s2, s1));
-    expectEquals(-1, sadShort2ShortAlt(s1, s2));
-    expectEquals(-1, sadShort2ShortAlt(s2, s1));
-    expectEquals(-1, sadShort2ShortAlt2(s1, s2));
-    expectEquals(-1, sadShort2ShortAlt2(s2, s1));
-    expectEquals(65535, sadShort2Int(s1, s2));
-    expectEquals(65535, sadShort2Int(s2, s1));
-    expectEquals(65535, sadShort2IntAlt(s1, s2));
-    expectEquals(65535, sadShort2IntAlt(s2, s1));
-    expectEquals(65535, sadShort2IntAlt2(s1, s2));
-    expectEquals(65535, sadShort2IntAlt2(s2, s1));
-    expectEquals(32880, sadShort2IntConstant1(s1));
-    expectEquals(32880, sadShort2IntConstant2(s1));
-    expectEquals(32866, sadShort2IntConstant3(s1));
-    expectEquals(65535L, sadShort2Long(s1, s2));
-    expectEquals(65535L, sadShort2Long(s2, s1));
-    expectEquals(65536L, sadShort2LongAt1(s1, s2));
-    expectEquals(65536L, sadShort2LongAt1(s2, s1));
-
-    // Use cross-values to test all cases.
-    short[] interesting = {
-      (short) 0x0000,
-      (short) 0x0001,
-      (short) 0x0002,
-      (short) 0x1234,
-      (short) 0x8000,
-      (short) 0x8001,
-      (short) 0x7fff,
-      (short) 0xffff
-    };
-    int n = interesting.length;
-    int m = n * n + 1;
-    s1 = new short[m];
-    s2 = new short[m];
-    int k = 0;
-    for (int i = 0; i < n; i++) {
-      for (int j = 0; j < n; j++) {
-        s1[k] = interesting[i];
-        s2[k] = interesting[j];
-        k++;
-      }
-    }
-    s1[k] = 10;
-    s2[k] = 2;
-    expectEquals(-18932, sadShort2Short(s1, s2));
-    expectEquals(-18932, sadShort2ShortAlt(s1, s2));
-    expectEquals(-18932, sadShort2ShortAlt2(s1, s2));
-    expectEquals(1291788, sadShort2Int(s1, s2));
-    expectEquals(1291788, sadShort2IntAlt(s1, s2));
-    expectEquals(1291788, sadShort2IntAlt2(s1, s2));
-    expectEquals(823907, sadShort2IntConstant1(s1));
-    expectEquals(823907, sadShort2IntConstant2(s1));
-    expectEquals(823953, sadShort2IntConstant3(s1));
-    expectEquals(1291788L, sadShort2Long(s1, s2));
-    expectEquals(1291789L, sadShort2LongAt1(s1, s2));
-
-    System.out.println("passed");
-  }
-
-  private static void expectEquals(int expected, int result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-
-  private static void expectEquals(long expected, long result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-}
diff --git a/test/660-checker-simd-sad-short2/info.txt b/test/660-checker-simd-sad-short2/info.txt
deleted file mode 100644
index b56c119..0000000
--- a/test/660-checker-simd-sad-short2/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Functional tests on SAD vectorization.
diff --git a/test/660-checker-simd-sad-short2/src/Main.java b/test/660-checker-simd-sad-short2/src/Main.java
deleted file mode 100644
index 1ce0e2a..0000000
--- a/test/660-checker-simd-sad-short2/src/Main.java
+++ /dev/null
@@ -1,389 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Tests for SAD (sum of absolute differences).
- *
- * Special case, char array that is first casted to short, forcing sign extension.
- */
-public class Main {
-
-  // TODO: lower precision still coming, b/64091002
-
-  private static short sadCastedChar2Short(char[] s1, char[] s2) {
-    int min_length = Math.min(s1.length, s2.length);
-    short sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      sad += Math.abs(((short) s1[i]) - ((short) s2[i]));
-    }
-    return sad;
-  }
-
-  private static short sadCastedChar2ShortAlt(char[] s1, char[] s2) {
-    int min_length = Math.min(s1.length, s2.length);
-    short sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      short s = (short) s1[i];
-      short p = (short) s2[i];
-      sad += s >= p ? s - p : p - s;
-    }
-    return sad;
-  }
-
-  private static short sadCastedChar2ShortAlt2(char[] s1, char[] s2) {
-    int min_length = Math.min(s1.length, s2.length);
-    short sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      short s = (short) s1[i];
-      short p = (short) s2[i];
-      int x = s - p;
-      if (x < 0) x = -x;
-      sad += x;
-    }
-    return sad;
-  }
-
-  /// CHECK-START: int Main.sadCastedChar2Int(char[], char[]) instruction_simplifier (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<BC1:i\d+>>    BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>>     outer_loop:none
-  /// CHECK-DAG: <<BC2:i\d+>>    BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>>     outer_loop:none
-  /// CHECK-DAG: <<Get1:c\d+>>   ArrayGet [{{l\d+}},<<BC1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:c\d+>>   ArrayGet [{{l\d+}},<<BC2>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv1:s\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv2:s\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Cnv1>>,<<Cnv2>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:i\d+>> InvokeStaticOrDirect [<<Sub>>] intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: int Main.sadCastedChar2Int(char[], char[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get1>>,<<Get2>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: int Main.sadCastedChar2Int(char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
-  private static int sadCastedChar2Int(char[] s1, char[] s2) {
-    int min_length = Math.min(s1.length, s2.length);
-    int sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      sad += Math.abs(((short) s1[i]) - ((short) s2[i]));
-    }
-    return sad;
-  }
-
-  /// CHECK-START: int Main.sadCastedChar2IntAlt(char[], char[]) instruction_simplifier (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<BC1:i\d+>>    BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<BC2:i\d+>>    BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:c\d+>>   ArrayGet [{{l\d+}},<<BC1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:c\d+>>   ArrayGet [{{l\d+}},<<BC2>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv1:s\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv2:s\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub1:i\d+>>   Sub [<<Cnv2>>,<<Cnv1>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub2:i\d+>>   Sub [<<Cnv1>>,<<Cnv2>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Phi3:i\d+>>   Phi [<<Sub2>>,<<Sub1>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Phi3>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: int Main.sadCastedChar2IntAlt(char[], char[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get2>>,<<Get1>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: int Main.sadCastedChar2IntAlt(char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load2>>,<<Load1>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
-  private static int sadCastedChar2IntAlt(char[] s1, char[] s2) {
-    int min_length = Math.min(s1.length, s2.length);
-    int sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      short s = (short) s1[i];
-      short p = (short) s2[i];
-      sad += s >= p ? s - p : p - s;
-    }
-    return sad;
-  }
-
-  /// CHECK-START: int Main.sadCastedChar2IntAlt2(char[], char[]) instruction_simplifier (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<BC1:\i\d+>>   BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<BC2:\i\d+>>   BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:c\d+>>   ArrayGet [{{l\d+}},<<BC1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:c\d+>>   ArrayGet [{{l\d+}},<<BC2>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv1:s\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv2:s\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Cnv1>>,<<Cnv2>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Neg:i\d+>>    Neg [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Phi3:i\d+>>   Phi [<<Sub>>,<<Neg>>]          loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Phi3>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: int Main.sadCastedChar2IntAlt2(char[], char[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get1>>,<<Get2>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: int Main.sadCastedChar2IntAlt2(char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
-  private static int sadCastedChar2IntAlt2(char[] s1, char[] s2) {
-    int min_length = Math.min(s1.length, s2.length);
-    int sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      short s = (short) s1[i];
-      short p = (short) s2[i];
-      int x = s - p;
-      if (x < 0) x = -x;
-      sad += x;
-    }
-    return sad;
-  }
-
-  /// CHECK-START: long Main.sadCastedChar2Long(char[], char[]) instruction_simplifier (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<BC1:\i\d+>>   BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>>     outer_loop:none
-  /// CHECK-DAG: <<BC2:\i\d+>>   BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>>     outer_loop:none
-  /// CHECK-DAG: <<Get1:c\d+>>   ArrayGet [{{l\d+}},<<BC1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:c\d+>>   ArrayGet [{{l\d+}},<<BC2>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv1:s\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv2:s\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv3:j\d+>>   TypeConversion [<<Cnv1>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv4:j\d+>>   TypeConversion [<<Cnv2>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Cnv3>>,<<Cnv4>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:j\d+>> InvokeStaticOrDirect [<<Sub>>] intrinsic:MathAbsLong loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: long Main.sadCastedChar2Long(char[], char[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv1:j\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv2:j\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Cnv1>>,<<Cnv2>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: long Main.sadCastedChar2Long(char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
-  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<ConsL>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
-  private static long sadCastedChar2Long(char[] s1, char[] s2) {
-    int min_length = Math.min(s1.length, s2.length);
-    long sad = 0;
-    for (int i = 0; i < min_length; i++) {
-      long x = (short) s1[i];
-      long y = (short) s2[i];
-      sad += Math.abs(x - y);
-    }
-    return sad;
-  }
-
-  /// CHECK-START: long Main.sadCastedChar2LongAt1(char[], char[]) instruction_simplifier (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 1                 loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<BC1:\i\d+>>   BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>>     outer_loop:none
-  /// CHECK-DAG: <<BC2:\i\d+>>   BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>>     outer_loop:none
-  /// CHECK-DAG: <<Get1:c\d+>>   ArrayGet [{{l\d+}},<<BC1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:c\d+>>   ArrayGet [{{l\d+}},<<BC2>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv1:s\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv2:s\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv3:j\d+>>   TypeConversion [<<Cnv1>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv4:j\d+>>   TypeConversion [<<Cnv2>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Cnv3>>,<<Cnv4>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:j\d+>> InvokeStaticOrDirect [<<Sub>>] intrinsic:MathAbsLong loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START: long Main.sadCastedChar2LongAt1(char[], char[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 1                 loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get1:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get2:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv1:j\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Cnv2:j\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Cnv1>>,<<Cnv2>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:j\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: long Main.sadCastedChar2LongAt1(char[], char[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
-  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 1                 loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<ConsL>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
-  private static long sadCastedChar2LongAt1(char[] s1, char[] s2) {
-    int min_length = Math.min(s1.length, s2.length);
-    long sad = 1;  // starts at 1
-    for (int i = 0; i < min_length; i++) {
-      long x = (short) s1[i];
-      long y = (short) s2[i];
-      sad += Math.abs(x - y);
-    }
-    return sad;
-  }
-
-  public static void main(String[] args) {
-    // Cross-test the two most extreme values individually.
-    char[] s1 = { 0, 0x8000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-    char[] s2 = { 0, 0x7fff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-    expectEquals(-1, sadCastedChar2Short(s1, s2));
-    expectEquals(-1, sadCastedChar2Short(s2, s1));
-    expectEquals(-1, sadCastedChar2ShortAlt(s1, s2));
-    expectEquals(-1, sadCastedChar2ShortAlt(s2, s1));
-    expectEquals(-1, sadCastedChar2ShortAlt2(s1, s2));
-    expectEquals(-1, sadCastedChar2ShortAlt2(s2, s1));
-    expectEquals(65535, sadCastedChar2Int(s1, s2));
-    expectEquals(65535, sadCastedChar2Int(s2, s1));
-    expectEquals(65535, sadCastedChar2IntAlt(s1, s2));
-    expectEquals(65535, sadCastedChar2IntAlt(s2, s1));
-    expectEquals(65535, sadCastedChar2IntAlt2(s1, s2));
-    expectEquals(65535, sadCastedChar2IntAlt2(s2, s1));
-    expectEquals(65535L, sadCastedChar2Long(s1, s2));
-    expectEquals(65535L, sadCastedChar2Long(s2, s1));
-    expectEquals(65536L, sadCastedChar2LongAt1(s1, s2));
-    expectEquals(65536L, sadCastedChar2LongAt1(s2, s1));
-
-    // Use cross-values to test all cases.
-    char[] interesting = {
-      (char) 0x0000,
-      (char) 0x0001,
-      (char) 0x0002,
-      (char) 0x1234,
-      (char) 0x8000,
-      (char) 0x8001,
-      (char) 0x7fff,
-      (char) 0xffff
-    };
-    int n = interesting.length;
-    int m = n * n + 1;
-    s1 = new char[m];
-    s2 = new char[m];
-    int k = 0;
-    for (int i = 0; i < n; i++) {
-      for (int j = 0; j < n; j++) {
-        s1[k] = interesting[i];
-        s2[k] = interesting[j];
-        k++;
-      }
-    }
-    s1[k] = 10;
-    s2[k] = 2;
-    expectEquals(-18932, sadCastedChar2Short(s1, s2));
-    expectEquals(-18932, sadCastedChar2ShortAlt(s1, s2));
-    expectEquals(-18932, sadCastedChar2ShortAlt2(s1, s2));
-    expectEquals(1291788, sadCastedChar2Int(s1, s2));
-    expectEquals(1291788, sadCastedChar2IntAlt(s1, s2));
-    expectEquals(1291788, sadCastedChar2IntAlt2(s1, s2));
-    expectEquals(1291788L, sadCastedChar2Long(s1, s2));
-    expectEquals(1291789L, sadCastedChar2LongAt1(s1, s2));
-
-    System.out.println("passed");
-  }
-
-  private static void expectEquals(int expected, int result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-
-  private static void expectEquals(long expected, long result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-}
diff --git a/test/660-checker-simd-sad-short3/expected.txt b/test/660-checker-simd-sad-short3/expected.txt
deleted file mode 100644
index b0aad4d..0000000
--- a/test/660-checker-simd-sad-short3/expected.txt
+++ /dev/null
@@ -1 +0,0 @@
-passed
diff --git a/test/660-checker-simd-sad-short3/info.txt b/test/660-checker-simd-sad-short3/info.txt
deleted file mode 100644
index b56c119..0000000
--- a/test/660-checker-simd-sad-short3/info.txt
+++ /dev/null
@@ -1 +0,0 @@
-Functional tests on SAD vectorization.
diff --git a/test/660-checker-simd-sad-short3/src/Main.java b/test/660-checker-simd-sad-short3/src/Main.java
deleted file mode 100644
index d0892c3..0000000
--- a/test/660-checker-simd-sad-short3/src/Main.java
+++ /dev/null
@@ -1,354 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * Tests for SAD (sum of absolute differences).
- *
- * Some special cases: parameters, constants, invariants, casted computations.
- */
-public class Main {
-
-  /// CHECK-START: int Main.sadShort2IntParamRight(short[], short) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<Param:s\d+>>  ParameterValue                 loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get:s\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get>>,<<Param>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: int Main.sadShort2IntParamRight(short[], short) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
-  /// CHECK-DAG: <<Param:s\d+>>  ParameterValue                 loop:none
-  /// CHECK-DAG: <<Rep:d\d+>>    VecReplicateScalar [<<Param>>] loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load>>,<<Rep>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
-  private static int sadShort2IntParamRight(short[] s, short param) {
-    int sad = 0;
-    for (int i = 0; i < s.length; i++) {
-      sad += Math.abs(s[i] - param);
-    }
-    return sad;
-  }
-
-  /// CHECK-START: int Main.sadShort2IntParamLeft(short[], short) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<Param:s\d+>>  ParameterValue                 loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get:s\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Param>>,<<Get>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: int Main.sadShort2IntParamLeft(short[], short) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
-  /// CHECK-DAG: <<Param:s\d+>>  ParameterValue                 loop:none
-  /// CHECK-DAG: <<Rep:d\d+>>    VecReplicateScalar [<<Param>>] loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Rep>>,<<Load>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
-  private static int sadShort2IntParamLeft(short[] s, short param) {
-    int sad = 0;
-    for (int i = 0; i < s.length; i++) {
-      sad += Math.abs(param - s[i]);
-    }
-    return sad;
-  }
-
-  /// CHECK-START: int Main.sadShort2IntConstRight(short[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<ConsI:i\d+>>  IntConstant -32767             loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get:s\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>    Add [<<Get>>,<<ConsI>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Add>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: int Main.sadShort2IntConstRight(short[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
-  /// CHECK-DAG: <<ConsI:i\d+>>  IntConstant 32767              loop:none
-  /// CHECK-DAG: <<Rep:d\d+>>    VecReplicateScalar [<<ConsI>>] loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load>>,<<Rep>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
-  private static int sadShort2IntConstRight(short[] s) {
-    int sad = 0;
-    for (int i = 0; i < s.length; i++) {
-      sad += Math.abs(s[i] - 32767);
-    }
-    return sad;
-  }
-
-  /// CHECK-START: int Main.sadShort2IntConstLeft(short[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<ConsI:i\d+>>  IntConstant 32767              loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get:s\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<ConsI>>,<<Get>>]        loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: int Main.sadShort2IntConstLeft(short[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
-  /// CHECK-DAG: <<ConsI:i\d+>>  IntConstant 32767              loop:none
-  /// CHECK-DAG: <<Rep:d\d+>>    VecReplicateScalar [<<ConsI>>] loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Rep>>,<<Load>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
-  private static int sadShort2IntConstLeft(short[] s) {
-    int sad = 0;
-    for (int i = 0; i < s.length; i++) {
-      sad += Math.abs(32767 - s[i]);
-    }
-    return sad;
-  }
-
-  /// CHECK-START: int Main.sadShort2IntInvariantRight(short[], int) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<Conv:s\d+>>   TypeConversion [{{i\d+}}]      loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get:s\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get>>,<<Conv>>]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: int Main.sadShort2IntInvariantRight(short[], int) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
-  /// CHECK-DAG: <<Conv:s\d+>>   TypeConversion [{{i\d+}}]      loop:none
-  /// CHECK-DAG: <<Rep:d\d+>>    VecReplicateScalar [<<Conv>>]  loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load>>,<<Rep>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
-  private static int sadShort2IntInvariantRight(short[] s, int val) {
-    int sad = 0;
-    short x = (short) (val + 1);
-    for (int i = 0; i < s.length; i++) {
-      sad += Math.abs(s[i] - x);
-    }
-    return sad;
-  }
-
-  /// CHECK-START: int Main.sadShort2IntInvariantLeft(short[], int) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<Conv:s\d+>>   TypeConversion [{{i\d+}}]      loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get:s\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Conv>>,<<Get>>]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: int Main.sadShort2IntInvariantLeft(short[], int) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
-  /// CHECK-DAG: <<Conv:s\d+>>   TypeConversion [{{i\d+}}]      loop:none
-  /// CHECK-DAG: <<Rep:d\d+>>    VecReplicateScalar [<<Conv>>]  loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Rep>>,<<Load>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
-  private static int sadShort2IntInvariantLeft(short[] s, int val) {
-    int sad = 0;
-    short x = (short) (val + 1);
-    for (int i = 0; i < s.length; i++) {
-      sad += Math.abs(x - s[i]);
-    }
-    return sad;
-  }
-
-  /// CHECK-START: int Main.sadShort2IntCastedExprRight(short[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<ConsI:i\d+>>  IntConstant 110                loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get:s\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>    [<<Get>>,<<ConsI>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Conv:s\d+>>   TypeConversion [<<Add>>]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get>>,<<Conv>>]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: int Main.sadShort2IntCastedExprRight(short[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
-  /// CHECK-DAG: <<ConsI:i\d+>>  IntConstant 110                loop:none
-  /// CHECK-DAG: <<Rep:d\d+>>    VecReplicateScalar [<<ConsI>>] loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:d\d+>>    VecAdd [<<Load>>,<<Rep>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load>>,<<Add>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
-  private static int sadShort2IntCastedExprRight(short[] s) {
-    int sad = 0;
-    for (int i = 0; i < s.length; i++) {
-      short x = (short) (s[i] + 110);  // narrower part sign extends
-      sad += Math.abs(s[i] - x);
-    }
-    return sad;
-  }
-
-  /// CHECK-START: int Main.sadShort2IntCastedExprLeft(short[]) loop_optimization (before)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
-  /// CHECK-DAG: <<ConsI:i\d+>>  IntConstant 110                loop:none
-  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Get:s\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:i\d+>>    [<<Get>>,<<ConsI>>]            loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Conv:s\d+>>   TypeConversion [<<Add>>]       loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Conv>>,<<Get>>]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
-  //
-  /// CHECK-START-{ARM64,MIPS64}: int Main.sadShort2IntCastedExprLeft(short[]) loop_optimization (after)
-  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
-  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
-  /// CHECK-DAG: <<ConsI:i\d+>>  IntConstant 110                loop:none
-  /// CHECK-DAG: <<Rep:d\d+>>    VecReplicateScalar [<<ConsI>>] loop:none
-  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
-  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
-  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Load:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<Add:d\d+>>    VecAdd [<<Load>>,<<Rep>>]      loop:<<Loop>>      outer_loop:none
-  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Add>>,<<Load>>] loop:<<Loop>> outer_loop:none
-  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
-  private static int sadShort2IntCastedExprLeft(short[] s) {
-    int sad = 0;
-    for (int i = 0; i < s.length; i++) {
-      short x = (short) (s[i] + 110);  // narrower part sign extends
-      sad += Math.abs(x - s[i]);
-    }
-    return sad;
-  }
-
-  public static void main(String[] args) {
-    short[] interesting = {
-      (short) 0x0000,
-      (short) 0x0001,
-      (short) 0x0002,
-      (short) 0x0003,
-      (short) 0x0004,
-      (short) 0x1234,
-      (short) 0x8000,
-      (short) 0x8001,
-      (short) 0x8002,
-      (short) 0x8003,
-      (short) 0x8004,
-      (short) 0x8004,
-      (short) 0x7000,
-      (short) 0x7fff,
-      (short) 0xf000,
-      (short) 0xffff
-    };
-    short[] s = new short[64];
-    for (int i = 0; i < 64; i++) {
-      s[i] = interesting[i % interesting.length];
-    }
-
-    expectEquals(1067200, sadShort2IntParamRight(s, (short)-1));
-    expectEquals(1067200, sadShort2IntParamRight(s, (short) 0));
-    expectEquals(1067208, sadShort2IntParamRight(s, (short) 1));
-    expectEquals(1067224, sadShort2IntParamRight(s, (short) 2));
-    expectEquals(2635416, sadShort2IntParamRight(s, (short) 0x7fff));
-    expectEquals(1558824, sadShort2IntParamRight(s, (short) 0x8000));
-
-    expectEquals(1067200, sadShort2IntParamLeft(s, (short)-1));
-    expectEquals(1067200, sadShort2IntParamLeft(s, (short) 0));
-    expectEquals(1067208, sadShort2IntParamLeft(s, (short) 1));
-    expectEquals(1067224, sadShort2IntParamLeft(s, (short) 2));
-    expectEquals(2635416, sadShort2IntParamLeft(s, (short) 0x7fff));
-    expectEquals(1558824, sadShort2IntParamLeft(s, (short) 0x8000));
-
-    expectEquals(2635416, sadShort2IntConstRight(s));
-    expectEquals(2635416, sadShort2IntConstLeft(s));
-
-    expectEquals(1067200, sadShort2IntInvariantRight(s, -2));
-    expectEquals(1067200, sadShort2IntInvariantRight(s, -1));
-    expectEquals(1067208, sadShort2IntInvariantRight(s, 0));
-    expectEquals(1067224, sadShort2IntInvariantRight(s, 1));
-    expectEquals(2635416, sadShort2IntInvariantRight(s, 0x7ffe));
-    expectEquals(1558824, sadShort2IntInvariantRight(s, 0x7fff));
-
-    expectEquals(1067200, sadShort2IntInvariantLeft(s, -2));
-    expectEquals(1067200, sadShort2IntInvariantLeft(s, -1));
-    expectEquals(1067208, sadShort2IntInvariantLeft(s, 0));
-    expectEquals(1067224, sadShort2IntInvariantLeft(s, 1));
-    expectEquals(2635416, sadShort2IntInvariantLeft(s, 0x7ffe));
-    expectEquals(1558824, sadShort2IntInvariantLeft(s, 0x7fff));
-
-    expectEquals(268304, sadShort2IntCastedExprLeft(s));
-    expectEquals(268304, sadShort2IntCastedExprRight(s));
-
-    System.out.println("passed");
-  }
-
-  private static void expectEquals(int expected, int result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-
-  private static void expectEquals(long expected, long result) {
-    if (expected != result) {
-      throw new Error("Expected: " + expected + ", found: " + result);
-    }
-  }
-}
diff --git a/test/660-checker-simd-sad/expected.txt b/test/660-checker-simd-sad/expected.txt
new file mode 100644
index 0000000..4685416
--- /dev/null
+++ b/test/660-checker-simd-sad/expected.txt
@@ -0,0 +1,7 @@
+SimdSadByte passed
+SimdSadShort passed
+SimdSadShort2 passed
+SimdSadShort3 passed
+SimdSadChar passed
+SimdSadInt passed
+SimdSadLong passed
diff --git a/test/660-checker-simd-sad-int/info.txt b/test/660-checker-simd-sad/info.txt
similarity index 100%
rename from test/660-checker-simd-sad-int/info.txt
rename to test/660-checker-simd-sad/info.txt
diff --git a/test/660-checker-simd-sad/src/Main.java b/test/660-checker-simd-sad/src/Main.java
new file mode 100644
index 0000000..48a4ec0
--- /dev/null
+++ b/test/660-checker-simd-sad/src/Main.java
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+  public static void main(String[] args) {
+    SimdSadByte.main();
+    SimdSadShort.main();
+    SimdSadShort2.main();
+    SimdSadShort3.main();
+    SimdSadChar.main();
+    SimdSadInt.main();
+    SimdSadLong.main();
+  }
+}
diff --git a/test/660-checker-simd-sad/src/SimdSadByte.java b/test/660-checker-simd-sad/src/SimdSadByte.java
new file mode 100644
index 0000000..11d0f21
--- /dev/null
+++ b/test/660-checker-simd-sad/src/SimdSadByte.java
@@ -0,0 +1,332 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for SAD (sum of absolute differences).
+ */
+public class SimdSadByte {
+
+  // TODO: lower precision still coming, b/64091002
+
+  private static byte sadByte2Byte(byte[] b1, byte[] b2) {
+    int min_length = Math.min(b1.length, b2.length);
+    byte sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      sad += Math.abs(b1[i] - b2[i]);
+    }
+    return sad;
+  }
+
+  private static byte sadByte2ByteAlt(byte[] b1, byte[] b2) {
+    int min_length = Math.min(b1.length, b2.length);
+    byte sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      byte s = b1[i];
+      byte p = b2[i];
+      sad += s >= p ? s - p : p - s;
+    }
+    return sad;
+  }
+
+  private static byte sadByte2ByteAlt2(byte[] b1, byte[] b2) {
+    int min_length = Math.min(b1.length, b2.length);
+    byte sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      byte s = b1[i];
+      byte p = b2[i];
+      int x = s - p;
+      if (x < 0) x = -x;
+      sad += x;
+    }
+    return sad;
+  }
+
+  private static short sadByte2Short(byte[] b1, byte[] b2) {
+    int min_length = Math.min(b1.length, b2.length);
+    short sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      sad += Math.abs(b1[i] - b2[i]);
+    }
+    return sad;
+  }
+
+  private static short sadByte2ShortAlt(byte[] b1, byte[] b2) {
+    int min_length = Math.min(b1.length, b2.length);
+    short sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      byte s = b1[i];
+      byte p = b2[i];
+      sad += s >= p ? s - p : p - s;
+    }
+    return sad;
+  }
+
+  private static short sadByte2ShortAlt2(byte[] b1, byte[] b2) {
+    int min_length = Math.min(b1.length, b2.length);
+    short sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      byte s = b1[i];
+      byte p = b2[i];
+      int x = s - p;
+      if (x < 0) x = -x;
+      sad += x;
+    }
+    return sad;
+  }
+
+  /// CHECK-START: int SimdSadByte.sadByte2Int(byte[], byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:b\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:b\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get1>>,<<Get2>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: int SimdSadByte.sadByte2Int(byte[], byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons16:i\d+>> IntConstant 16                 loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons16>>]      loop:<<Loop>>      outer_loop:none
+  private static int sadByte2Int(byte[] b1, byte[] b2) {
+    int min_length = Math.min(b1.length, b2.length);
+    int sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      sad += Math.abs(b1[i] - b2[i]);
+    }
+    return sad;
+  }
+
+  /// CHECK-START: int SimdSadByte.sadByte2IntAlt(byte[], byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:b\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:b\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get2>>,<<Get1>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: int SimdSadByte.sadByte2IntAlt(byte[], byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons16:i\d+>> IntConstant 16                 loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load2>>,<<Load1>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons16>>]      loop:<<Loop>>      outer_loop:none
+  private static int sadByte2IntAlt(byte[] b1, byte[] b2) {
+    int min_length = Math.min(b1.length, b2.length);
+    int sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      byte s = b1[i];
+      byte p = b2[i];
+      sad += s >= p ? s - p : p - s;
+    }
+    return sad;
+  }
+
+  /// CHECK-START: int SimdSadByte.sadByte2IntAlt2(byte[], byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:b\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:b\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get1>>,<<Get2>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: int SimdSadByte.sadByte2IntAlt2(byte[], byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons16:i\d+>> IntConstant 16                 loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons16>>]      loop:<<Loop>>      outer_loop:none
+  private static int sadByte2IntAlt2(byte[] b1, byte[] b2) {
+    int min_length = Math.min(b1.length, b2.length);
+    int sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      byte s = b1[i];
+      byte p = b2[i];
+      int x = s - p;
+      if (x < 0) x = -x;
+      sad += x;
+    }
+    return sad;
+  }
+
+  /// CHECK-START: long SimdSadByte.sadByte2Long(byte[], byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:b\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:b\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv1:j\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv2:j\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Cnv1>>,<<Cnv2>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: long SimdSadByte.sadByte2Long(byte[], byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons16:i\d+>> IntConstant 16                 loop:none
+  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<ConsL>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons16>>]      loop:<<Loop>>      outer_loop:none
+  private static long sadByte2Long(byte[] b1, byte[] b2) {
+    int min_length = Math.min(b1.length, b2.length);
+    long sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      long x = b1[i];
+      long y = b2[i];
+      sad += Math.abs(x - y);
+    }
+    return sad;
+  }
+
+  /// CHECK-START: long SimdSadByte.sadByte2LongAt1(byte[], byte[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 1                 loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:b\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:b\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv1:j\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv2:j\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Cnv1>>,<<Cnv2>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: long SimdSadByte.sadByte2LongAt1(byte[], byte[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons16:i\d+>> IntConstant 16                 loop:none
+  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 1                 loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<ConsL>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons16>>]      loop:<<Loop>>      outer_loop:none
+  private static long sadByte2LongAt1(byte[] b1, byte[] b2) {
+    int min_length = Math.min(b1.length, b2.length);
+    long sad = 1;  // starts at 1
+    for (int i = 0; i < min_length; i++) {
+      long x = b1[i];
+      long y = b2[i];
+      sad += Math.abs(x - y);
+    }
+    return sad;
+  }
+
+  public static void main() {
+    // Cross-test the two most extreme values individually.
+    byte[] b1 = { 0, -128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+    byte[] b2 = { 0,  127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+    expectEquals(-1, sadByte2Byte(b1, b2));
+    expectEquals(-1, sadByte2Byte(b2, b1));
+    expectEquals(-1, sadByte2ByteAlt(b1, b2));
+    expectEquals(-1, sadByte2ByteAlt(b2, b1));
+    expectEquals(-1, sadByte2ByteAlt2(b1, b2));
+    expectEquals(-1, sadByte2ByteAlt2(b2, b1));
+    expectEquals(255, sadByte2Short(b1, b2));
+    expectEquals(255, sadByte2Short(b2, b1));
+    expectEquals(255, sadByte2ShortAlt(b1, b2));
+    expectEquals(255, sadByte2ShortAlt(b2, b1));
+    expectEquals(255, sadByte2ShortAlt2(b1, b2));
+    expectEquals(255, sadByte2ShortAlt2(b2, b1));
+    expectEquals(255, sadByte2Int(b1, b2));
+    expectEquals(255, sadByte2Int(b2, b1));
+    expectEquals(255, sadByte2IntAlt(b1, b2));
+    expectEquals(255, sadByte2IntAlt(b2, b1));
+    expectEquals(255, sadByte2IntAlt2(b1, b2));
+    expectEquals(255, sadByte2IntAlt2(b2, b1));
+    expectEquals(255, sadByte2Long(b1, b2));
+    expectEquals(255L, sadByte2Long(b2, b1));
+    expectEquals(256L, sadByte2LongAt1(b1, b2));
+    expectEquals(256L, sadByte2LongAt1(b2, b1));
+
+    // Use cross-values to test all cases.
+    // One for scalar cleanup.
+    int n = 256;
+    int m = n * n + 1;
+    int k = 0;
+    b1 = new byte[m];
+    b2 = new byte[m];
+    for (int i = 0; i < n; i++) {
+      for (int j = 0; j < n; j++) {
+        b1[k] = (byte) i;
+        b2[k] = (byte) j;
+        k++;
+      }
+    }
+    b1[k] = 10;
+    b2[k] = 2;
+    expectEquals(8, sadByte2Byte(b1, b2));
+    expectEquals(8, sadByte2ByteAlt(b1, b2));
+    expectEquals(8, sadByte2ByteAlt2(b1, b2));
+    expectEquals(21768, sadByte2Short(b1, b2));
+    expectEquals(21768, sadByte2ShortAlt(b1, b2));
+    expectEquals(21768, sadByte2ShortAlt2(b1, b2));
+    expectEquals(5592328, sadByte2Int(b1, b2));
+    expectEquals(5592328, sadByte2IntAlt(b1, b2));
+    expectEquals(5592328, sadByte2IntAlt2(b1, b2));
+    expectEquals(5592328L, sadByte2Long(b1, b2));
+    expectEquals(5592329L, sadByte2LongAt1(b1, b2));
+
+    System.out.println("SimdSadByte passed");
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  private static void expectEquals(long expected, long result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}
diff --git a/test/660-checker-simd-sad/src/SimdSadChar.java b/test/660-checker-simd-sad/src/SimdSadChar.java
new file mode 100644
index 0000000..b847beb
--- /dev/null
+++ b/test/660-checker-simd-sad/src/SimdSadChar.java
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for SAD (sum of absolute differences).
+ */
+public class SimdSadChar {
+
+  // TODO: lower precision still coming, b/64091002
+
+  // TODO: consider unsigned SAD too, b/64091002
+
+  private static char sadChar2Char(char[] s1, char[] s2) {
+    int min_length = Math.min(s1.length, s2.length);
+    char sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      sad += Math.abs(s1[i] - s2[i]);
+    }
+    return sad;
+  }
+
+  private static char sadChar2CharAlt(char[] s1, char[] s2) {
+    int min_length = Math.min(s1.length, s2.length);
+    char sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      char s = s1[i];
+      char p = s2[i];
+      sad += s >= p ? s - p : p - s;
+    }
+    return sad;
+  }
+
+  private static char sadChar2CharAlt2(char[] s1, char[] s2) {
+    int min_length = Math.min(s1.length, s2.length);
+    char sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      char s = s1[i];
+      char p = s2[i];
+      int x = s - p;
+      if (x < 0) x = -x;
+      sad += x;
+    }
+    return sad;
+  }
+
+  /// CHECK-START: int SimdSadChar.sadChar2Int(char[], char[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get1>>,<<Get2>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: int SimdSadChar.sadChar2Int(char[], char[]) loop_optimization (after)
+  /// CHECK-NOT: VecSADAccumulate
+  private static int sadChar2Int(char[] s1, char[] s2) {
+    int min_length = Math.min(s1.length, s2.length);
+    int sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      sad += Math.abs(s1[i] - s2[i]);
+    }
+    return sad;
+  }
+
+  /// CHECK-START: int SimdSadChar.sadChar2IntAlt(char[], char[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get2>>,<<Get1>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: int SimdSadChar.sadChar2IntAlt(char[], char[]) loop_optimization (after)
+  /// CHECK-NOT: VecSADAccumulate
+  private static int sadChar2IntAlt(char[] s1, char[] s2) {
+    int min_length = Math.min(s1.length, s2.length);
+    int sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      char s = s1[i];
+      char p = s2[i];
+      sad += s >= p ? s - p : p - s;
+    }
+    return sad;
+  }
+
+  /// CHECK-START: int SimdSadChar.sadChar2IntAlt2(char[], char[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get1>>,<<Get2>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: int SimdSadChar.sadChar2IntAlt2(char[], char[]) loop_optimization (after)
+  /// CHECK-NOT: VecSADAccumulate
+  private static int sadChar2IntAlt2(char[] s1, char[] s2) {
+    int min_length = Math.min(s1.length, s2.length);
+    int sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      char s = s1[i];
+      char p = s2[i];
+      int x = s - p;
+      if (x < 0) x = -x;
+      sad += x;
+    }
+    return sad;
+  }
+
+  /// CHECK-START: long SimdSadChar.sadChar2Long(char[], char[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv1:j\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv2:j\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Cnv1>>,<<Cnv2>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: long SimdSadChar.sadChar2Long(char[], char[]) loop_optimization (after)
+  /// CHECK-NOT: VecSADAccumulate
+  private static long sadChar2Long(char[] s1, char[] s2) {
+    int min_length = Math.min(s1.length, s2.length);
+    long sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      long x = s1[i];
+      long y = s2[i];
+      sad += Math.abs(x - y);
+    }
+    return sad;
+  }
+
+  /// CHECK-START: long SimdSadChar.sadChar2LongAt1(char[], char[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 1                 loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv1:j\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv2:j\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Cnv1>>,<<Cnv2>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: long SimdSadChar.sadChar2LongAt1(char[], char[]) loop_optimization (after)
+  /// CHECK-NOT: VecSADAccumulate
+  private static long sadChar2LongAt1(char[] s1, char[] s2) {
+    int min_length = Math.min(s1.length, s2.length);
+    long sad = 1;  // starts at 1
+    for (int i = 0; i < min_length; i++) {
+      long x = s1[i];
+      long y = s2[i];
+      sad += Math.abs(x - y);
+    }
+    return sad;
+  }
+
+  public static void main() {
+    // Cross-test the two most extreme values individually.
+    char[] s1 = { 0, 0x8000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+    char[] s2 = { 0, 0x7fff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+    expectEquals(1, sadChar2Char(s1, s2));
+    expectEquals(1, sadChar2Char(s2, s1));
+    expectEquals(1, sadChar2CharAlt(s1, s2));
+    expectEquals(1, sadChar2CharAlt(s2, s1));
+    expectEquals(1, sadChar2CharAlt2(s1, s2));
+    expectEquals(1, sadChar2CharAlt2(s2, s1));
+    expectEquals(1, sadChar2Int(s1, s2));
+    expectEquals(1, sadChar2Int(s2, s1));
+    expectEquals(1, sadChar2IntAlt(s1, s2));
+    expectEquals(1, sadChar2IntAlt(s2, s1));
+    expectEquals(1, sadChar2IntAlt2(s1, s2));
+    expectEquals(1, sadChar2IntAlt2(s2, s1));
+    expectEquals(1L, sadChar2Long(s1, s2));
+    expectEquals(1L, sadChar2Long(s2, s1));
+    expectEquals(2L, sadChar2LongAt1(s1, s2));
+    expectEquals(2L, sadChar2LongAt1(s2, s1));
+
+    // Use cross-values to test all cases.
+    char[] interesting = {
+      (char) 0x0000,
+      (char) 0x0001,
+      (char) 0x0002,
+      (char) 0x1234,
+      (char) 0x8000,
+      (char) 0x8001,
+      (char) 0x7fff,
+      (char) 0xffff
+    };
+    int n = interesting.length;
+    int m = n * n + 1;
+    s1 = new char[m];
+    s2 = new char[m];
+    int k = 0;
+    for (int i = 0; i < n; i++) {
+      for (int j = 0; j < n; j++) {
+        s1[k] = interesting[i];
+        s2[k] = interesting[j];
+        k++;
+      }
+    }
+    s1[k] = 10;
+    s2[k] = 2;
+    expectEquals(56196, sadChar2Char(s1, s2));
+    expectEquals(56196, sadChar2CharAlt(s1, s2));
+    expectEquals(56196, sadChar2CharAlt2(s1, s2));
+    expectEquals(1497988, sadChar2Int(s1, s2));
+    expectEquals(1497988, sadChar2IntAlt(s1, s2));
+    expectEquals(1497988, sadChar2IntAlt2(s1, s2));
+    expectEquals(1497988L, sadChar2Long(s1, s2));
+    expectEquals(1497989L, sadChar2LongAt1(s1, s2));
+
+    System.out.println("SimdSadChar passed");
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  private static void expectEquals(long expected, long result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}
diff --git a/test/660-checker-simd-sad/src/SimdSadInt.java b/test/660-checker-simd-sad/src/SimdSadInt.java
new file mode 100644
index 0000000..08cdf98
--- /dev/null
+++ b/test/660-checker-simd-sad/src/SimdSadInt.java
@@ -0,0 +1,244 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for SAD (sum of absolute differences).
+ */
+public class SimdSadInt {
+
+  /// CHECK-START: int SimdSadInt.sadInt2Int(int[], int[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:i\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:i\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get1>>,<<Get2>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: int SimdSadInt.sadInt2Int(int[], int[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons:i\d+>>   IntConstant {{2|4}}                        loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [{{i\d+}}]                   loop:none
+  /// CHECK-DAG: <<Phi:d\d+>>    Phi [<<Set>>,{{d\d+}}]                     loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Ld1:d\d+>>    VecLoad [{{l\d+}},<<I:i\d+>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Ld2:d\d+>>    VecLoad [{{l\d+}},<<I>>]                   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi>>,<<Ld1>>,<<Ld2>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<I>>,<<Cons>>]                       loop:<<Loop>> outer_loop:none
+  private static int sadInt2Int(int[] x, int[] y) {
+    int min_length = Math.min(x.length, y.length);
+    int sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      sad += Math.abs(x[i] - y[i]);
+    }
+    return sad;
+  }
+
+  /// CHECK-START: int SimdSadInt.sadInt2IntAlt(int[], int[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                       loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                       loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]            loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:i\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:i\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub1:i\d+>>   Sub [<<Get2>>,<<Get1>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub2:i\d+>>   Sub [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Select:i\d+>> Select [<<Sub2>>,<<Sub1>>,{{z\d+}}] loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Select>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]            loop:<<Loop>>      outer_loop:none
+  //
+  // No ABS? No SAD!
+  //
+  /// CHECK-START: int SimdSadInt.sadInt2IntAlt(int[], int[]) loop_optimization (after)
+  /// CHECK-NOT: VecSADAccumulate
+  private static int sadInt2IntAlt(int[] x, int[] y) {
+    int min_length = Math.min(x.length, y.length);
+    int sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      int s = x[i];
+      int p = y[i];
+      sad += s >= p ? s - p : p - s;
+    }
+    return sad;
+  }
+
+  /// CHECK-START: int SimdSadInt.sadInt2IntAlt2(int[], int[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:i\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:i\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get1>>,<<Get2>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-{ARM,ARM64}: int SimdSadInt.sadInt2IntAlt2(int[], int[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons:i\d+>>   IntConstant {{2|4}}                        loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [{{i\d+}}]                   loop:none
+  /// CHECK-DAG: <<Phi:d\d+>>    Phi [<<Set>>,{{d\d+}}]                     loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Ld1:d\d+>>    VecLoad [{{l\d+}},<<I:i\d+>>]              loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Ld2:d\d+>>    VecLoad [{{l\d+}},<<I>>]                   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi>>,<<Ld1>>,<<Ld2>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<I>>,<<Cons>>]                       loop:<<Loop>> outer_loop:none
+  private static int sadInt2IntAlt2(int[] x, int[] y) {
+    int min_length = Math.min(x.length, y.length);
+    int sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      int s = x[i];
+      int p = y[i];
+      int m = s - p;
+      if (m < 0) m = -m;
+      sad += m;
+    }
+    return sad;
+  }
+
+  /// CHECK-START: long SimdSadInt.sadInt2Long(int[], int[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:i\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:i\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv1:j\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv2:j\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Cnv1>>,<<Cnv2>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: long SimdSadInt.sadInt2Long(int[], int[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons4:i\d+>>  IntConstant 4                  loop:none
+  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<ConsL>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons4>>]       loop:<<Loop>>      outer_loop:none
+  private static long sadInt2Long(int[] x, int[] y) {
+    int min_length = Math.min(x.length, y.length);
+    long sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      long s = x[i];
+      long p = y[i];
+      sad += Math.abs(s - p);
+    }
+    return sad;
+  }
+
+  /// CHECK-START: long SimdSadInt.sadInt2LongAt1(int[], int[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 1                 loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:i\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:i\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv1:j\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv2:j\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Cnv1>>,<<Cnv2>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: long SimdSadInt.sadInt2LongAt1(int[], int[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons4:i\d+>>  IntConstant 4                  loop:none
+  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 1                 loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<ConsL>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons4>>]       loop:<<Loop>>      outer_loop:none
+  private static long sadInt2LongAt1(int[] x, int[] y) {
+    int min_length = Math.min(x.length, y.length);
+    long sad = 1;  // starts at 1
+    for (int i = 0; i < min_length; i++) {
+      long s = x[i];
+      long p = y[i];
+      sad += Math.abs(s - p);
+    }
+    return sad;
+  }
+
+  public static void main() {
+    // Cross-test the two most extreme values individually.
+    int[] x = { 0, Integer.MAX_VALUE, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+    int[] y = { 0, Integer.MIN_VALUE, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+    expectEquals(1, sadInt2Int(x, y));
+    expectEquals(1, sadInt2Int(y, x));
+    expectEquals(-1, sadInt2IntAlt(x, y));
+    expectEquals(-1, sadInt2IntAlt(y, x));
+    expectEquals(1, sadInt2IntAlt2(x, y));
+    expectEquals(1, sadInt2IntAlt2(y, x));
+    expectEquals(4294967295L, sadInt2Long(x, y));
+    expectEquals(4294967295L, sadInt2Long(y, x));
+    expectEquals(4294967296L, sadInt2LongAt1(x, y));
+    expectEquals(4294967296L, sadInt2LongAt1(y, x));
+
+    // Use cross-values for the interesting values.
+    int[] interesting = {
+      0x00000000, 0x00000001, 0x00007fff, 0x00008000, 0x00008001, 0x0000ffff,
+      0x00010000, 0x00010001, 0x00017fff, 0x00018000, 0x00018001, 0x0001ffff,
+      0x7fff0000, 0x7fff0001, 0x7fff7fff, 0x7fff8000, 0x7fff8001, 0x7fffffff,
+      0x80000000, 0x80000001, 0x80007fff, 0x80008000, 0x80008001, 0x8000ffff,
+      0x80010000, 0x80010001, 0x80017fff, 0x80018000, 0x80018001, 0x8001ffff,
+      0xffff0000, 0xffff0001, 0xffff7fff, 0xffff8000, 0xffff8001, 0xffffffff
+    };
+    int n = interesting.length;
+    int m = n * n + 1;
+    x = new int[m];
+    y = new int[m];
+    int k = 0;
+    for (int i = 0; i < n; i++) {
+      for (int j = 0; j < n; j++) {
+        x[k] = interesting[i];
+        y[k] = interesting[j];
+        k++;
+      }
+    }
+    x[k] = 10;
+    y[k] = 2;
+    expectEquals(8, sadInt2Int(x, y));
+    expectEquals(-13762600, sadInt2IntAlt(x, y));
+    expectEquals(8, sadInt2IntAlt2(x, y));
+    expectEquals(2010030931928L, sadInt2Long(x, y));
+    expectEquals(2010030931929L, sadInt2LongAt1(x, y));
+
+    System.out.println("SimdSadInt passed");
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  private static void expectEquals(long expected, long result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}
diff --git a/test/660-checker-simd-sad/src/SimdSadLong.java b/test/660-checker-simd-sad/src/SimdSadLong.java
new file mode 100644
index 0000000..6a05963
--- /dev/null
+++ b/test/660-checker-simd-sad/src/SimdSadLong.java
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for SAD (sum of absolute differences).
+ */
+public class SimdSadLong {
+
+  /// CHECK-START: long SimdSadLong.sadLong2Long(long[], long[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:j\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:j\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Get1>>,<<Get2>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: long SimdSadLong.sadLong2Long(long[], long[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons2:i\d+>>  IntConstant 2                  loop:none
+  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<ConsL>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons2>>]       loop:<<Loop>>      outer_loop:none
+  private static long sadLong2Long(long[] x, long[] y) {
+    int min_length = Math.min(x.length, y.length);
+    long sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      sad += Math.abs(x[i] - y[i]);
+    }
+    return sad;
+  }
+
+  /// CHECK-START: long SimdSadLong.sadLong2LongAlt(long[], long[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                       loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                       loop:none
+  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]            loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:j\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:j\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub1:j\d+>>   Sub [<<Get2>>,<<Get1>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub2:j\d+>>   Sub [<<Get1>>,<<Get2>>]             loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Select:j\d+>> Select [<<Sub2>>,<<Sub1>>,{{z\d+}}] loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Select>>]           loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]            loop:<<Loop>>      outer_loop:none
+  //
+  // No ABS? No SAD!
+  //
+  /// CHECK-START: long SimdSadLong.sadLong2LongAlt(long[], long[]) loop_optimization (after)
+  /// CHECK-NOT: VecSADAccumulate
+  private static long sadLong2LongAlt(long[] x, long[] y) {
+    int min_length = Math.min(x.length, y.length);
+    long sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      long s = x[i];
+      long p = y[i];
+      sad += s >= p ? s - p : p - s;
+    }
+    return sad;
+  }
+
+  /// CHECK-START: long SimdSadLong.sadLong2LongAlt2(long[], long[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:j\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:j\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Get1>>,<<Get2>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: long SimdSadLong.sadLong2LongAlt2(long[], long[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons2:i\d+>>  IntConstant 2                  loop:none
+  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<ConsL>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons2>>]       loop:<<Loop>>      outer_loop:none
+  private static long sadLong2LongAlt2(long[] x, long[] y) {
+    int min_length = Math.min(x.length, y.length);
+    long sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      long s = x[i];
+      long p = y[i];
+      long m = s - p;
+      if (m < 0) m = -m;
+      sad += m;
+    }
+    return sad;
+  }
+
+  /// CHECK-START: long SimdSadLong.sadLong2LongAt1(long[], long[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 1                 loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:j\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:j\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Get1>>,<<Get2>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: long SimdSadLong.sadLong2LongAt1(long[], long[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons2:i\d+>>  IntConstant 2                  loop:none
+  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 1                 loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<ConsL>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons2>>]       loop:<<Loop>>      outer_loop:none
+  private static long sadLong2LongAt1(long[] x, long[] y) {
+    int min_length = Math.min(x.length, y.length);
+    long sad = 1;  // starts at 1
+    for (int i = 0; i < min_length; i++) {
+      sad += Math.abs(x[i] - y[i]);
+    }
+    return sad;
+  }
+
+  public static void main() {
+    // Cross-test the two most extreme values individually.
+    long[] x = { 0, Long.MIN_VALUE, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+    long[] y = { 0, Long.MAX_VALUE, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+    expectEquals(1L, sadLong2Long(x, y));
+    expectEquals(1L, sadLong2Long(y, x));
+    expectEquals(-1L, sadLong2LongAlt(x, y));
+    expectEquals(-1L, sadLong2LongAlt(y, x));
+    expectEquals(1L, sadLong2LongAlt2(x, y));
+    expectEquals(1L, sadLong2LongAlt2(y, x));
+    expectEquals(2L, sadLong2LongAt1(x, y));
+    expectEquals(2L, sadLong2LongAt1(y, x));
+
+    // Use cross-values for the interesting values.
+    long[] interesting = {
+      0x0000000000000000L, 0x0000000000000001L, 0x000000007fffffffL,
+      0x0000000080000000L, 0x0000000080000001L, 0x00000000ffffffffL,
+      0x0000000100000000L, 0x0000000100000001L, 0x000000017fffffffL,
+      0x0000000180000000L, 0x0000000180000001L, 0x00000001ffffffffL,
+      0x7fffffff00000000L, 0x7fffffff00000001L, 0x7fffffff7fffffffL,
+      0x7fffffff80000000L, 0x7fffffff80000001L, 0x7fffffffffffffffL,
+      0x8000000000000000L, 0x8000000000000001L, 0x800000007fffffffL,
+      0x8000000080000000L, 0x8000000080000001L, 0x80000000ffffffffL,
+      0x8000000100000000L, 0x8000000100000001L, 0x800000017fffffffL,
+      0x8000000180000000L, 0x8000000180000001L, 0x80000001ffffffffL,
+      0xffffffff00000000L, 0xffffffff00000001L, 0xffffffff7fffffffL,
+      0xffffffff80000000L, 0xffffffff80000001L, 0xffffffffffffffffL
+    };
+    int n = interesting.length;
+    int m = n * n + 1;
+    x = new long[m];
+    y = new long[m];
+    int k = 0;
+    for (int i = 0; i < n; i++) {
+      for (int j = 0; j < n; j++) {
+        x[k] = interesting[i];
+        y[k] = interesting[j];
+        k++;
+      }
+    }
+    x[k] = 10;
+    y[k] = 2;
+    expectEquals(8L, sadLong2Long(x, y));
+    expectEquals(-901943132200L, sadLong2LongAlt(x, y));
+    expectEquals(8L, sadLong2LongAlt2(x, y));
+    expectEquals(9L, sadLong2LongAt1(x, y));
+
+    System.out.println("SimdSadLong passed");
+  }
+
+  private static void expectEquals(long expected, long result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}
diff --git a/test/660-checker-simd-sad/src/SimdSadShort.java b/test/660-checker-simd-sad/src/SimdSadShort.java
new file mode 100644
index 0000000..b6e4a14
--- /dev/null
+++ b/test/660-checker-simd-sad/src/SimdSadShort.java
@@ -0,0 +1,405 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for SAD (sum of absolute differences).
+ */
+public class SimdSadShort {
+
+  private static int $inline$seven() {
+    return 7;
+  }
+
+  // TODO: lower precision still coming, b/64091002
+
+  private static short sadShort2Short(short[] s1, short[] s2) {
+    int min_length = Math.min(s1.length, s2.length);
+    short sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      sad += Math.abs(s1[i] - s2[i]);
+    }
+    return sad;
+  }
+
+  private static short sadShort2ShortAlt(short[] s1, short[] s2) {
+    int min_length = Math.min(s1.length, s2.length);
+    short sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      short s = s1[i];
+      short p = s2[i];
+      sad += s >= p ? s - p : p - s;
+    }
+    return sad;
+  }
+
+  private static short sadShort2ShortAlt2(short[] s1, short[] s2) {
+    int min_length = Math.min(s1.length, s2.length);
+    short sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      short s = s1[i];
+      short p = s2[i];
+      int x = s - p;
+      if (x < 0) x = -x;
+      sad += x;
+    }
+    return sad;
+  }
+
+  /// CHECK-START: int SimdSadShort.sadShort2Int(short[], short[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get1>>,<<Get2>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: int SimdSadShort.sadShort2Int(short[], short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
+  private static int sadShort2Int(short[] s1, short[] s2) {
+    int min_length = Math.min(s1.length, s2.length);
+    int sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      sad += Math.abs(s1[i] - s2[i]);
+    }
+    return sad;
+  }
+
+  /// CHECK-START: int SimdSadShort.sadShort2IntAlt(short[], short[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get2>>,<<Get1>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: int SimdSadShort.sadShort2IntAlt(short[], short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load2>>,<<Load1>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
+  private static int sadShort2IntAlt(short[] s1, short[] s2) {
+    int min_length = Math.min(s1.length, s2.length);
+    int sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      short s = s1[i];
+      short p = s2[i];
+      sad += s >= p ? s - p : p - s;
+    }
+    return sad;
+  }
+
+  /// CHECK-START: int SimdSadShort.sadShort2IntAlt2(short[], short[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get1>>,<<Get2>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: int SimdSadShort.sadShort2IntAlt2(short[], short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
+  private static int sadShort2IntAlt2(short[] s1, short[] s2) {
+    int min_length = Math.min(s1.length, s2.length);
+    int sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      short s = s1[i];
+      short p = s2[i];
+      int x = s - p;
+      if (x < 0) x = -x;
+      sad += x;
+    }
+    return sad;
+  }
+
+  /// CHECK-START: int SimdSadShort.sadShort2IntConstant1(short[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<Cons:i\d+>>   IntConstant -7                 loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>    Add [<<Get1>>,<<Cons>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Add>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: int SimdSadShort.sadShort2IntConstant1(short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<Cons:i\d+>>   IntConstant 7                  loop:none
+  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
+  /// CHECK-DAG: <<Rep:d\d+>>    VecReplicateScalar [<<Cons>>]  loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Rep>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
+  private static int sadShort2IntConstant1(short[] s) {
+    int sad = 0;
+    for (int i = 0; i < s.length; i++) {
+      sad += Math.abs(s[i] - 7);  // s[i] + -7
+    }
+    return sad;
+  }
+
+  /// CHECK-START: int SimdSadShort.sadShort2IntConstant2(short[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<Cons:i\d+>>   IntConstant 7                  loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get1>>,<<Cons>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: int SimdSadShort.sadShort2IntConstant2(short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<Cons:i\d+>>   IntConstant 7                  loop:none
+  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
+  /// CHECK-DAG: <<Rep:d\d+>>    VecReplicateScalar [<<Cons>>]  loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Rep>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
+  private static int sadShort2IntConstant2(short[] s) {
+    int sad = 0;
+    for (int i = 0; i < s.length; i++) {
+      sad += Math.abs(s[i] - $inline$seven());  // s[i] - 7
+    }
+    return sad;
+  }
+
+  /// CHECK-START: int SimdSadShort.sadShort2IntConstant3(short[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<Cons:i\d+>>   IntConstant 7                  loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>    Add [<<Get1>>,<<Cons>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Add>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: int SimdSadShort.sadShort2IntConstant3(short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<Cons:i\d+>>   IntConstant -7                 loop:none
+  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
+  /// CHECK-DAG: <<Rep:d\d+>>    VecReplicateScalar [<<Cons>>]  loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Rep>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
+  private static int sadShort2IntConstant3(short[] s) {
+    int sad = 0;
+    for (int i = 0; i < s.length; i++) {
+      sad += Math.abs(s[i] + $inline$seven());  // hidden s[i] - (-7)
+    }
+    return sad;
+  }
+
+  /// CHECK-START: long SimdSadShort.sadShort2Long(short[], short[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv1:j\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv2:j\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Cnv1>>,<<Cnv2>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: long SimdSadShort.sadShort2Long(short[], short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
+  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<ConsL>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
+  private static long sadShort2Long(short[] s1, short[] s2) {
+    int min_length = Math.min(s1.length, s2.length);
+    long sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      long x = s1[i];
+      long y = s2[i];
+      sad += Math.abs(x - y);
+    }
+    return sad;
+  }
+
+  /// CHECK-START: long SimdSadShort.sadShort2LongAt1(short[], short[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 1                 loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv1:j\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv2:j\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Cnv1>>,<<Cnv2>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: long SimdSadShort.sadShort2LongAt1(short[], short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
+  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 1                 loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<ConsL>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
+  private static long sadShort2LongAt1(short[] s1, short[] s2) {
+    int min_length = Math.min(s1.length, s2.length);
+    long sad = 1;  // starts at 1
+    for (int i = 0; i < min_length; i++) {
+      long x = s1[i];
+      long y = s2[i];
+      sad += Math.abs(x - y);
+    }
+    return sad;
+  }
+
+  public static void main() {
+    // Cross-test the two most extreme values individually.
+    short[] s1 = { 0, -32768, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+    short[] s2 = { 0,  32767, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+    expectEquals(-1, sadShort2Short(s1, s2));
+    expectEquals(-1, sadShort2Short(s2, s1));
+    expectEquals(-1, sadShort2ShortAlt(s1, s2));
+    expectEquals(-1, sadShort2ShortAlt(s2, s1));
+    expectEquals(-1, sadShort2ShortAlt2(s1, s2));
+    expectEquals(-1, sadShort2ShortAlt2(s2, s1));
+    expectEquals(65535, sadShort2Int(s1, s2));
+    expectEquals(65535, sadShort2Int(s2, s1));
+    expectEquals(65535, sadShort2IntAlt(s1, s2));
+    expectEquals(65535, sadShort2IntAlt(s2, s1));
+    expectEquals(65535, sadShort2IntAlt2(s1, s2));
+    expectEquals(65535, sadShort2IntAlt2(s2, s1));
+    expectEquals(32880, sadShort2IntConstant1(s1));
+    expectEquals(32880, sadShort2IntConstant2(s1));
+    expectEquals(32866, sadShort2IntConstant3(s1));
+    expectEquals(65535L, sadShort2Long(s1, s2));
+    expectEquals(65535L, sadShort2Long(s2, s1));
+    expectEquals(65536L, sadShort2LongAt1(s1, s2));
+    expectEquals(65536L, sadShort2LongAt1(s2, s1));
+
+    // Use cross-values to test all cases.
+    short[] interesting = {
+      (short) 0x0000,
+      (short) 0x0001,
+      (short) 0x0002,
+      (short) 0x1234,
+      (short) 0x8000,
+      (short) 0x8001,
+      (short) 0x7fff,
+      (short) 0xffff
+    };
+    int n = interesting.length;
+    int m = n * n + 1;
+    s1 = new short[m];
+    s2 = new short[m];
+    int k = 0;
+    for (int i = 0; i < n; i++) {
+      for (int j = 0; j < n; j++) {
+        s1[k] = interesting[i];
+        s2[k] = interesting[j];
+        k++;
+      }
+    }
+    s1[k] = 10;
+    s2[k] = 2;
+    expectEquals(-18932, sadShort2Short(s1, s2));
+    expectEquals(-18932, sadShort2ShortAlt(s1, s2));
+    expectEquals(-18932, sadShort2ShortAlt2(s1, s2));
+    expectEquals(1291788, sadShort2Int(s1, s2));
+    expectEquals(1291788, sadShort2IntAlt(s1, s2));
+    expectEquals(1291788, sadShort2IntAlt2(s1, s2));
+    expectEquals(823907, sadShort2IntConstant1(s1));
+    expectEquals(823907, sadShort2IntConstant2(s1));
+    expectEquals(823953, sadShort2IntConstant3(s1));
+    expectEquals(1291788L, sadShort2Long(s1, s2));
+    expectEquals(1291789L, sadShort2LongAt1(s1, s2));
+
+    System.out.println("SimdSadShort passed");
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  private static void expectEquals(long expected, long result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}
diff --git a/test/660-checker-simd-sad/src/SimdSadShort2.java b/test/660-checker-simd-sad/src/SimdSadShort2.java
new file mode 100644
index 0000000..b110bb4
--- /dev/null
+++ b/test/660-checker-simd-sad/src/SimdSadShort2.java
@@ -0,0 +1,389 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for SAD (sum of absolute differences).
+ *
+ * Special case, char array that is first casted to short, forcing sign extension.
+ */
+public class SimdSadShort2 {
+
+  // TODO: lower precision still coming, b/64091002
+
+  private static short sadCastedChar2Short(char[] s1, char[] s2) {
+    int min_length = Math.min(s1.length, s2.length);
+    short sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      sad += Math.abs(((short) s1[i]) - ((short) s2[i]));
+    }
+    return sad;
+  }
+
+  private static short sadCastedChar2ShortAlt(char[] s1, char[] s2) {
+    int min_length = Math.min(s1.length, s2.length);
+    short sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      short s = (short) s1[i];
+      short p = (short) s2[i];
+      sad += s >= p ? s - p : p - s;
+    }
+    return sad;
+  }
+
+  private static short sadCastedChar2ShortAlt2(char[] s1, char[] s2) {
+    int min_length = Math.min(s1.length, s2.length);
+    short sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      short s = (short) s1[i];
+      short p = (short) s2[i];
+      int x = s - p;
+      if (x < 0) x = -x;
+      sad += x;
+    }
+    return sad;
+  }
+
+  /// CHECK-START: int SimdSadShort2.sadCastedChar2Int(char[], char[]) instruction_simplifier (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<BC1:i\d+>>    BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>>     outer_loop:none
+  /// CHECK-DAG: <<BC2:i\d+>>    BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>>     outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>>   ArrayGet [{{l\d+}},<<BC1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>>   ArrayGet [{{l\d+}},<<BC2>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv1:s\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv2:s\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Cnv1>>,<<Cnv2>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:i\d+>> InvokeStaticOrDirect [<<Sub>>] intrinsic:MathAbsInt loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: int SimdSadShort2.sadCastedChar2Int(char[], char[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get1>>,<<Get2>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: int SimdSadShort2.sadCastedChar2Int(char[], char[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
+  private static int sadCastedChar2Int(char[] s1, char[] s2) {
+    int min_length = Math.min(s1.length, s2.length);
+    int sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      sad += Math.abs(((short) s1[i]) - ((short) s2[i]));
+    }
+    return sad;
+  }
+
+  /// CHECK-START: int SimdSadShort2.sadCastedChar2IntAlt(char[], char[]) instruction_simplifier (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<BC1:i\d+>>    BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<BC2:i\d+>>    BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>>   ArrayGet [{{l\d+}},<<BC1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>>   ArrayGet [{{l\d+}},<<BC2>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv1:s\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv2:s\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub1:i\d+>>   Sub [<<Cnv2>>,<<Cnv1>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub2:i\d+>>   Sub [<<Cnv1>>,<<Cnv2>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Phi3:i\d+>>   Phi [<<Sub2>>,<<Sub1>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Phi3>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: int SimdSadShort2.sadCastedChar2IntAlt(char[], char[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get2>>,<<Get1>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: int SimdSadShort2.sadCastedChar2IntAlt(char[], char[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load2>>,<<Load1>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
+  private static int sadCastedChar2IntAlt(char[] s1, char[] s2) {
+    int min_length = Math.min(s1.length, s2.length);
+    int sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      short s = (short) s1[i];
+      short p = (short) s2[i];
+      sad += s >= p ? s - p : p - s;
+    }
+    return sad;
+  }
+
+  /// CHECK-START: int SimdSadShort2.sadCastedChar2IntAlt2(char[], char[]) instruction_simplifier (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<BC1:\i\d+>>   BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<BC2:\i\d+>>   BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>>   ArrayGet [{{l\d+}},<<BC1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>>   ArrayGet [{{l\d+}},<<BC2>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv1:s\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv2:s\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Cnv1>>,<<Cnv2>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Neg:i\d+>>    Neg [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Phi3:i\d+>>   Phi [<<Sub>>,<<Neg>>]          loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Phi3>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: int SimdSadShort2.sadCastedChar2IntAlt2(char[], char[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get1>>,<<Get2>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: int SimdSadShort2.sadCastedChar2IntAlt2(char[], char[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
+  private static int sadCastedChar2IntAlt2(char[] s1, char[] s2) {
+    int min_length = Math.min(s1.length, s2.length);
+    int sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      short s = (short) s1[i];
+      short p = (short) s2[i];
+      int x = s - p;
+      if (x < 0) x = -x;
+      sad += x;
+    }
+    return sad;
+  }
+
+  /// CHECK-START: long SimdSadShort2.sadCastedChar2Long(char[], char[]) instruction_simplifier (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<BC1:\i\d+>>   BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>>     outer_loop:none
+  /// CHECK-DAG: <<BC2:\i\d+>>   BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>>     outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>>   ArrayGet [{{l\d+}},<<BC1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>>   ArrayGet [{{l\d+}},<<BC2>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv1:s\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv2:s\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv3:j\d+>>   TypeConversion [<<Cnv1>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv4:j\d+>>   TypeConversion [<<Cnv2>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Cnv3>>,<<Cnv4>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:j\d+>> InvokeStaticOrDirect [<<Sub>>] intrinsic:MathAbsLong loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: long SimdSadShort2.sadCastedChar2Long(char[], char[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv1:j\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv2:j\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Cnv1>>,<<Cnv2>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: long SimdSadShort2.sadCastedChar2Long(char[], char[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
+  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 0                 loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<ConsL>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
+  private static long sadCastedChar2Long(char[] s1, char[] s2) {
+    int min_length = Math.min(s1.length, s2.length);
+    long sad = 0;
+    for (int i = 0; i < min_length; i++) {
+      long x = (short) s1[i];
+      long y = (short) s2[i];
+      sad += Math.abs(x - y);
+    }
+    return sad;
+  }
+
+  /// CHECK-START: long SimdSadShort2.sadCastedChar2LongAt1(char[], char[]) instruction_simplifier (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 1                 loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<BC1:\i\d+>>   BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>>     outer_loop:none
+  /// CHECK-DAG: <<BC2:\i\d+>>   BoundsCheck [<<Phi1>>,{{i\d+}}] loop:<<Loop>>     outer_loop:none
+  /// CHECK-DAG: <<Get1:c\d+>>   ArrayGet [{{l\d+}},<<BC1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:c\d+>>   ArrayGet [{{l\d+}},<<BC2>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv1:s\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv2:s\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv3:j\d+>>   TypeConversion [<<Cnv1>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv4:j\d+>>   TypeConversion [<<Cnv2>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Cnv3>>,<<Cnv4>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:j\d+>> InvokeStaticOrDirect [<<Sub>>] intrinsic:MathAbsLong loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START: long SimdSadShort2.sadCastedChar2LongAt1(char[], char[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 1                 loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:j\d+>>   Phi [<<ConsL>>,{{j\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get1:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get2:s\d+>>   ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv1:j\d+>>   TypeConversion [<<Get1>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Cnv2:j\d+>>   TypeConversion [<<Get2>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:j\d+>>    Sub [<<Cnv1>>,<<Cnv2>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:j\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: long SimdSadShort2.sadCastedChar2LongAt1(char[], char[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
+  /// CHECK-DAG: <<ConsL:j\d+>>  LongConstant 1                 loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<ConsL>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load2:d\d+>>  VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load1>>,<<Load2>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
+  private static long sadCastedChar2LongAt1(char[] s1, char[] s2) {
+    int min_length = Math.min(s1.length, s2.length);
+    long sad = 1;  // starts at 1
+    for (int i = 0; i < min_length; i++) {
+      long x = (short) s1[i];
+      long y = (short) s2[i];
+      sad += Math.abs(x - y);
+    }
+    return sad;
+  }
+
+  public static void main() {
+    // Cross-test the two most extreme values individually.
+    char[] s1 = { 0, 0x8000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+    char[] s2 = { 0, 0x7fff, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+    expectEquals(-1, sadCastedChar2Short(s1, s2));
+    expectEquals(-1, sadCastedChar2Short(s2, s1));
+    expectEquals(-1, sadCastedChar2ShortAlt(s1, s2));
+    expectEquals(-1, sadCastedChar2ShortAlt(s2, s1));
+    expectEquals(-1, sadCastedChar2ShortAlt2(s1, s2));
+    expectEquals(-1, sadCastedChar2ShortAlt2(s2, s1));
+    expectEquals(65535, sadCastedChar2Int(s1, s2));
+    expectEquals(65535, sadCastedChar2Int(s2, s1));
+    expectEquals(65535, sadCastedChar2IntAlt(s1, s2));
+    expectEquals(65535, sadCastedChar2IntAlt(s2, s1));
+    expectEquals(65535, sadCastedChar2IntAlt2(s1, s2));
+    expectEquals(65535, sadCastedChar2IntAlt2(s2, s1));
+    expectEquals(65535L, sadCastedChar2Long(s1, s2));
+    expectEquals(65535L, sadCastedChar2Long(s2, s1));
+    expectEquals(65536L, sadCastedChar2LongAt1(s1, s2));
+    expectEquals(65536L, sadCastedChar2LongAt1(s2, s1));
+
+    // Use cross-values to test all cases.
+    char[] interesting = {
+      (char) 0x0000,
+      (char) 0x0001,
+      (char) 0x0002,
+      (char) 0x1234,
+      (char) 0x8000,
+      (char) 0x8001,
+      (char) 0x7fff,
+      (char) 0xffff
+    };
+    int n = interesting.length;
+    int m = n * n + 1;
+    s1 = new char[m];
+    s2 = new char[m];
+    int k = 0;
+    for (int i = 0; i < n; i++) {
+      for (int j = 0; j < n; j++) {
+        s1[k] = interesting[i];
+        s2[k] = interesting[j];
+        k++;
+      }
+    }
+    s1[k] = 10;
+    s2[k] = 2;
+    expectEquals(-18932, sadCastedChar2Short(s1, s2));
+    expectEquals(-18932, sadCastedChar2ShortAlt(s1, s2));
+    expectEquals(-18932, sadCastedChar2ShortAlt2(s1, s2));
+    expectEquals(1291788, sadCastedChar2Int(s1, s2));
+    expectEquals(1291788, sadCastedChar2IntAlt(s1, s2));
+    expectEquals(1291788, sadCastedChar2IntAlt2(s1, s2));
+    expectEquals(1291788L, sadCastedChar2Long(s1, s2));
+    expectEquals(1291789L, sadCastedChar2LongAt1(s1, s2));
+
+    System.out.println("SimdSadShort2 passed");
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  private static void expectEquals(long expected, long result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}
diff --git a/test/660-checker-simd-sad/src/SimdSadShort3.java b/test/660-checker-simd-sad/src/SimdSadShort3.java
new file mode 100644
index 0000000..bbe35d2
--- /dev/null
+++ b/test/660-checker-simd-sad/src/SimdSadShort3.java
@@ -0,0 +1,354 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Tests for SAD (sum of absolute differences).
+ *
+ * Some special cases: parameters, constants, invariants, casted computations.
+ */
+public class SimdSadShort3 {
+
+  /// CHECK-START: int SimdSadShort3.sadShort2IntParamRight(short[], short) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<Param:s\d+>>  ParameterValue                 loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get:s\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get>>,<<Param>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: int SimdSadShort3.sadShort2IntParamRight(short[], short) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
+  /// CHECK-DAG: <<Param:s\d+>>  ParameterValue                 loop:none
+  /// CHECK-DAG: <<Rep:d\d+>>    VecReplicateScalar [<<Param>>] loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load>>,<<Rep>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
+  private static int sadShort2IntParamRight(short[] s, short param) {
+    int sad = 0;
+    for (int i = 0; i < s.length; i++) {
+      sad += Math.abs(s[i] - param);
+    }
+    return sad;
+  }
+
+  /// CHECK-START: int SimdSadShort3.sadShort2IntParamLeft(short[], short) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<Param:s\d+>>  ParameterValue                 loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get:s\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Param>>,<<Get>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: int SimdSadShort3.sadShort2IntParamLeft(short[], short) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
+  /// CHECK-DAG: <<Param:s\d+>>  ParameterValue                 loop:none
+  /// CHECK-DAG: <<Rep:d\d+>>    VecReplicateScalar [<<Param>>] loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Rep>>,<<Load>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
+  private static int sadShort2IntParamLeft(short[] s, short param) {
+    int sad = 0;
+    for (int i = 0; i < s.length; i++) {
+      sad += Math.abs(param - s[i]);
+    }
+    return sad;
+  }
+
+  /// CHECK-START: int SimdSadShort3.sadShort2IntConstRight(short[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<ConsI:i\d+>>  IntConstant -32767             loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get:s\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>    Add [<<Get>>,<<ConsI>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Add>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: int SimdSadShort3.sadShort2IntConstRight(short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
+  /// CHECK-DAG: <<ConsI:i\d+>>  IntConstant 32767              loop:none
+  /// CHECK-DAG: <<Rep:d\d+>>    VecReplicateScalar [<<ConsI>>] loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load>>,<<Rep>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
+  private static int sadShort2IntConstRight(short[] s) {
+    int sad = 0;
+    for (int i = 0; i < s.length; i++) {
+      sad += Math.abs(s[i] - 32767);
+    }
+    return sad;
+  }
+
+  /// CHECK-START: int SimdSadShort3.sadShort2IntConstLeft(short[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<ConsI:i\d+>>  IntConstant 32767              loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get:s\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<ConsI>>,<<Get>>]        loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: int SimdSadShort3.sadShort2IntConstLeft(short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
+  /// CHECK-DAG: <<ConsI:i\d+>>  IntConstant 32767              loop:none
+  /// CHECK-DAG: <<Rep:d\d+>>    VecReplicateScalar [<<ConsI>>] loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Rep>>,<<Load>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
+  private static int sadShort2IntConstLeft(short[] s) {
+    int sad = 0;
+    for (int i = 0; i < s.length; i++) {
+      sad += Math.abs(32767 - s[i]);
+    }
+    return sad;
+  }
+
+  /// CHECK-START: int SimdSadShort3.sadShort2IntInvariantRight(short[], int) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<Conv:s\d+>>   TypeConversion [{{i\d+}}]      loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get:s\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get>>,<<Conv>>]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: int SimdSadShort3.sadShort2IntInvariantRight(short[], int) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
+  /// CHECK-DAG: <<Conv:s\d+>>   TypeConversion [{{i\d+}}]      loop:none
+  /// CHECK-DAG: <<Rep:d\d+>>    VecReplicateScalar [<<Conv>>]  loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load>>,<<Rep>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
+  private static int sadShort2IntInvariantRight(short[] s, int val) {
+    int sad = 0;
+    short x = (short) (val + 1);
+    for (int i = 0; i < s.length; i++) {
+      sad += Math.abs(s[i] - x);
+    }
+    return sad;
+  }
+
+  /// CHECK-START: int SimdSadShort3.sadShort2IntInvariantLeft(short[], int) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<Conv:s\d+>>   TypeConversion [{{i\d+}}]      loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get:s\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Conv>>,<<Get>>]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: int SimdSadShort3.sadShort2IntInvariantLeft(short[], int) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
+  /// CHECK-DAG: <<Conv:s\d+>>   TypeConversion [{{i\d+}}]      loop:none
+  /// CHECK-DAG: <<Rep:d\d+>>    VecReplicateScalar [<<Conv>>]  loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Rep>>,<<Load>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
+  private static int sadShort2IntInvariantLeft(short[] s, int val) {
+    int sad = 0;
+    short x = (short) (val + 1);
+    for (int i = 0; i < s.length; i++) {
+      sad += Math.abs(x - s[i]);
+    }
+    return sad;
+  }
+
+  /// CHECK-START: int SimdSadShort3.sadShort2IntCastedExprRight(short[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<ConsI:i\d+>>  IntConstant 110                loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get:s\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>    [<<Get>>,<<ConsI>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Conv:s\d+>>   TypeConversion [<<Add>>]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Get>>,<<Conv>>]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: int SimdSadShort3.sadShort2IntCastedExprRight(short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
+  /// CHECK-DAG: <<ConsI:i\d+>>  IntConstant 110                loop:none
+  /// CHECK-DAG: <<Rep:d\d+>>    VecReplicateScalar [<<ConsI>>] loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:d\d+>>    VecAdd [<<Load>>,<<Rep>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Load>>,<<Add>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
+  private static int sadShort2IntCastedExprRight(short[] s) {
+    int sad = 0;
+    for (int i = 0; i < s.length; i++) {
+      short x = (short) (s[i] + 110);  // narrower part sign extends
+      sad += Math.abs(s[i] - x);
+    }
+    return sad;
+  }
+
+  /// CHECK-START: int SimdSadShort3.sadShort2IntCastedExprLeft(short[]) loop_optimization (before)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons1:i\d+>>  IntConstant 1                  loop:none
+  /// CHECK-DAG: <<ConsI:i\d+>>  IntConstant 110                loop:none
+  /// CHECK-DAG: <<Phi2:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Get:s\d+>>    ArrayGet [{{l\d+}},<<Phi1>>]   loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:i\d+>>    [<<Get>>,<<ConsI>>]            loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Conv:s\d+>>   TypeConversion [<<Add>>]       loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Sub:i\d+>>    Sub [<<Conv>>,<<Get>>]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Intrin:i\d+>> Abs [<<Sub>>]                  loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi2>>,<<Intrin>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]       loop:<<Loop>>      outer_loop:none
+  //
+  /// CHECK-START-ARM64: int SimdSadShort3.sadShort2IntCastedExprLeft(short[]) loop_optimization (after)
+  /// CHECK-DAG: <<Cons0:i\d+>>  IntConstant 0                  loop:none
+  /// CHECK-DAG: <<Cons8:i\d+>>  IntConstant 8                  loop:none
+  /// CHECK-DAG: <<ConsI:i\d+>>  IntConstant 110                loop:none
+  /// CHECK-DAG: <<Rep:d\d+>>    VecReplicateScalar [<<ConsI>>] loop:none
+  /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [<<Cons0>>]      loop:none
+  /// CHECK-DAG: <<Phi1:i\d+>>   Phi [<<Cons0>>,{{i\d+}}]       loop:<<Loop:B\d+>> outer_loop:none
+  /// CHECK-DAG: <<Phi2:d\d+>>   Phi [<<Set>>,{{d\d+}}]         loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Load:d\d+>>   VecLoad [{{l\d+}},<<Phi1>>]    loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<Add:d\d+>>    VecAdd [<<Load>>,<<Rep>>]      loop:<<Loop>>      outer_loop:none
+  /// CHECK-DAG: <<SAD:d\d+>>    VecSADAccumulate [<<Phi2>>,<<Add>>,<<Load>>] loop:<<Loop>> outer_loop:none
+  /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons8>>]       loop:<<Loop>>      outer_loop:none
+  private static int sadShort2IntCastedExprLeft(short[] s) {
+    int sad = 0;
+    for (int i = 0; i < s.length; i++) {
+      short x = (short) (s[i] + 110);  // narrower part sign extends
+      sad += Math.abs(x - s[i]);
+    }
+    return sad;
+  }
+
+  public static void main() {
+    short[] interesting = {
+      (short) 0x0000,
+      (short) 0x0001,
+      (short) 0x0002,
+      (short) 0x0003,
+      (short) 0x0004,
+      (short) 0x1234,
+      (short) 0x8000,
+      (short) 0x8001,
+      (short) 0x8002,
+      (short) 0x8003,
+      (short) 0x8004,
+      (short) 0x8004,
+      (short) 0x7000,
+      (short) 0x7fff,
+      (short) 0xf000,
+      (short) 0xffff
+    };
+    short[] s = new short[64];
+    for (int i = 0; i < 64; i++) {
+      s[i] = interesting[i % interesting.length];
+    }
+
+    expectEquals(1067200, sadShort2IntParamRight(s, (short)-1));
+    expectEquals(1067200, sadShort2IntParamRight(s, (short) 0));
+    expectEquals(1067208, sadShort2IntParamRight(s, (short) 1));
+    expectEquals(1067224, sadShort2IntParamRight(s, (short) 2));
+    expectEquals(2635416, sadShort2IntParamRight(s, (short) 0x7fff));
+    expectEquals(1558824, sadShort2IntParamRight(s, (short) 0x8000));
+
+    expectEquals(1067200, sadShort2IntParamLeft(s, (short)-1));
+    expectEquals(1067200, sadShort2IntParamLeft(s, (short) 0));
+    expectEquals(1067208, sadShort2IntParamLeft(s, (short) 1));
+    expectEquals(1067224, sadShort2IntParamLeft(s, (short) 2));
+    expectEquals(2635416, sadShort2IntParamLeft(s, (short) 0x7fff));
+    expectEquals(1558824, sadShort2IntParamLeft(s, (short) 0x8000));
+
+    expectEquals(2635416, sadShort2IntConstRight(s));
+    expectEquals(2635416, sadShort2IntConstLeft(s));
+
+    expectEquals(1067200, sadShort2IntInvariantRight(s, -2));
+    expectEquals(1067200, sadShort2IntInvariantRight(s, -1));
+    expectEquals(1067208, sadShort2IntInvariantRight(s, 0));
+    expectEquals(1067224, sadShort2IntInvariantRight(s, 1));
+    expectEquals(2635416, sadShort2IntInvariantRight(s, 0x7ffe));
+    expectEquals(1558824, sadShort2IntInvariantRight(s, 0x7fff));
+
+    expectEquals(1067200, sadShort2IntInvariantLeft(s, -2));
+    expectEquals(1067200, sadShort2IntInvariantLeft(s, -1));
+    expectEquals(1067208, sadShort2IntInvariantLeft(s, 0));
+    expectEquals(1067224, sadShort2IntInvariantLeft(s, 1));
+    expectEquals(2635416, sadShort2IntInvariantLeft(s, 0x7ffe));
+    expectEquals(1558824, sadShort2IntInvariantLeft(s, 0x7fff));
+
+    expectEquals(268304, sadShort2IntCastedExprLeft(s));
+    expectEquals(268304, sadShort2IntCastedExprRight(s));
+
+    System.out.println("SimdSadShort3 passed");
+  }
+
+  private static void expectEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  private static void expectEquals(long expected, long result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+}
diff --git a/test/660-clinit/expected.txt b/test/660-clinit/expected.txt
index 9eb4941..ee1b479 100644
--- a/test/660-clinit/expected.txt
+++ b/test/660-clinit/expected.txt
@@ -1,4 +1,5 @@
 JNI_OnLoad called
+hello world
 A.a: 5
 A.a: 10
 B.b: 10
diff --git a/test/660-clinit/profile b/test/660-clinit/profile
index 0239f22..9eb4924 100644
--- a/test/660-clinit/profile
+++ b/test/660-clinit/profile
@@ -4,7 +4,10 @@
 LA;
 LB;
 LC;
+LE;
 LG;
 LGs;
 LObjectRef;
-
+LInvokeStatic;
+LClinitE;
+LPrint;
diff --git a/test/660-clinit/run b/test/660-clinit/run
index d24ef42..a0e79ee 100644
--- a/test/660-clinit/run
+++ b/test/660-clinit/run
@@ -14,4 +14,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-exec ${RUN} $@ --profile
+exec ${RUN} $@ --profile -Xcompiler-option --initialize-app-image-classes=true
diff --git a/test/660-clinit/src/Main.java b/test/660-clinit/src/Main.java
index f9b068e..5fb5fe5 100644
--- a/test/660-clinit/src/Main.java
+++ b/test/660-clinit/src/Main.java
@@ -24,19 +24,28 @@
     if (!checkAppImageLoaded()) {
       System.out.println("AppImage not loaded.");
     }
+    if (!checkAppImageContains(ClInit.class)) {
+      System.out.println("ClInit class is not in app image!");
+    }
 
-    expectNotPreInit(Day.class);
-    expectNotPreInit(ClInit.class); // should pass
-    expectNotPreInit(A.class); // should pass
-    expectNotPreInit(B.class); // should fail
-    expectNotPreInit(C.class); // should fail
-    expectNotPreInit(G.class); // should fail
-    expectNotPreInit(Gs.class); // should fail
-    expectNotPreInit(Gss.class); // should fail
+    expectPreInit(ClInit.class);
+    expectPreInit(A.class);
+    expectPreInit(E.class);
+    expectNotPreInit(B.class);
+    expectNotPreInit(C.class);
+    expectNotPreInit(G.class);
+    expectNotPreInit(Gs.class);
+    expectNotPreInit(Gss.class);
+    expectPreInit(InvokeStatic.class);
+    expectNotPreInit(ClinitE.class);
 
     expectNotPreInit(Add.class);
     expectNotPreInit(Mul.class);
     expectNotPreInit(ObjectRef.class);
+    expectNotPreInit(Print.class);
+
+    Print p = new Print();
+    Gs gs = new Gs();
 
     A x = new A();
     System.out.println("A.a: " + A.a);
@@ -62,6 +71,10 @@
       System.out.println("a != 101");
     }
 
+    try {
+      ClinitE e = new ClinitE();
+    } catch (Error err) { }
+
     return;
   }
 
@@ -154,6 +167,13 @@
   }
 }
 
+class E {
+  public static final int e;
+  static {
+    e = 100;
+  }
+}
+
 class G {
   static G g;
   static int i;
@@ -182,9 +202,35 @@
   }
 }
 
+// test of INVOKE_STATIC instruction
+class InvokeStatic {
+  static int a;
+  static int b;
+  static {
+    a = Add.exec(10, 20);
+    b = Mul.exec(10, 20);
+  }
+}
+
 // non-image
 class Mul {
   static int exec(int a, int b) {
     return a * b;
   }
 }
+
+class ClinitE {
+  static {
+    if (Math.sin(3) < 0.5) {
+      // throw anyway, can't initialized
+      throw new ExceptionInInitializerError("Can't initialize this class!");
+    }
+  }
+}
+
+// fail because JNI
+class Print {
+  static {
+    System.out.println("hello world");
+  }
+}
diff --git a/test/661-checker-simd-reduc/src/Main.java b/test/661-checker-simd-reduc/src/Main.java
index 7b6f957..7c37d91 100644
--- a/test/661-checker-simd-reduc/src/Main.java
+++ b/test/661-checker-simd-reduc/src/Main.java
@@ -62,7 +62,7 @@
   /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]      loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:                 Return [<<Phi2>>]             loop:none
   //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: int Main.reductionInt(int[]) loop_optimization (after)
+  /// CHECK-START-{ARM,ARM64}: int Main.reductionInt(int[]) loop_optimization (after)
   /// CHECK-DAG: <<Cons:i\d+>>   IntConstant {{2|4}}           loop:none
   /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [{{i\d+}}]      loop:none
   /// CHECK-DAG: <<Phi:d\d+>>    Phi [<<Set>>,{{d\d+}}]        loop:<<Loop:B\d+>> outer_loop:none
@@ -71,6 +71,12 @@
   /// CHECK-DAG:                 Add [<<I>>,<<Cons>>]          loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG: <<Red:d\d+>>    VecReduce [<<Phi>>]           loop:none
   /// CHECK-DAG: <<Extr:i\d+>>   VecExtractScalar [<<Red>>]    loop:none
+
+  //  Check that full 128-bit Q-Register are saved across SuspendCheck slow path.
+  /// CHECK-START-ARM64: int Main.reductionInt(int[]) disassembly (after)
+  /// CHECK:                     SuspendCheckSlowPathARM64
+  /// CHECK:                       stur q<<RegNo:\d+>>, [sp, #<<Offset:\d+>>]
+  /// CHECK:                       ldur q<<RegNo>>, [sp, #<<Offset>>]
   private static int reductionInt(int[] x) {
     int sum = 0;
     for (int i = 0; i < x.length; i++) {
@@ -96,7 +102,7 @@
   //
   /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
   //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: int Main.reductionIntChain() loop_optimization (after)
+  /// CHECK-START-{ARM,ARM64}: int Main.reductionIntChain() loop_optimization (after)
   /// CHECK-DAG: <<Set1:d\d+>>   VecSetScalars [{{i\d+}}]       loop:none
   /// CHECK-DAG: <<Phi1:d\d+>>   Phi [<<Set1>>,{{d\d+}}]        loop:<<Loop1:B\d+>> outer_loop:none
   /// CHECK-DAG: <<Load1:d\d+>>  VecLoad [{{l\d+}},<<I1:i\d+>>] loop:<<Loop1>>      outer_loop:none
@@ -140,7 +146,7 @@
   //
   /// CHECK-EVAL: "<<Loop1>>" != "<<Loop2>>"
   //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: int Main.reductionIntToLoop(int[]) loop_optimization (after)
+  /// CHECK-START-{ARM,ARM64}: int Main.reductionIntToLoop(int[]) loop_optimization (after)
   /// CHECK-DAG: <<Cons:i\d+>>   IntConstant {{2|4}}           loop:none
   /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [{{i\d+}}]      loop:none
   /// CHECK-DAG: <<Phi:d\d+>>    Phi [<<Set>>,{{d\d+}}]        loop:<<Loop1:B\d+>> outer_loop:none
@@ -171,7 +177,7 @@
   /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]      loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:                 Return [<<Phi2>>]             loop:none
   //
-  /// CHECK-START-{ARM64,MIPS64}: long Main.reductionLong(long[]) loop_optimization (after)
+  /// CHECK-START-ARM64: long Main.reductionLong(long[]) loop_optimization (after)
   /// CHECK-DAG: <<Cons2:i\d+>>  IntConstant 2                 loop:none
   /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [{{j\d+}}]      loop:none
   /// CHECK-DAG: <<Phi:d\d+>>    Phi [<<Set>>,{{d\d+}}]        loop:<<Loop:B\d+>> outer_loop:none
@@ -223,7 +229,7 @@
   /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]      loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:                 Return [<<Phi2>>]             loop:none
   //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: int Main.reductionIntM1(int[]) loop_optimization (after)
+  /// CHECK-START-{ARM,ARM64}: int Main.reductionIntM1(int[]) loop_optimization (after)
   /// CHECK-DAG: <<Cons:i\d+>>   IntConstant {{2|4}}           loop:none
   /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [{{i\d+}}]      loop:none
   /// CHECK-DAG: <<Phi:d\d+>>    Phi [<<Set>>,{{d\d+}}]        loop:<<Loop:B\d+>> outer_loop:none
@@ -251,7 +257,7 @@
   /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]      loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:                 Return [<<Phi2>>]             loop:none
   //
-  /// CHECK-START-{ARM64,MIPS64}: long Main.reductionLongM1(long[]) loop_optimization (after)
+  /// CHECK-START-ARM64: long Main.reductionLongM1(long[]) loop_optimization (after)
   /// CHECK-DAG: <<Cons2:i\d+>>  IntConstant 2                 loop:none
   /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [{{j\d+}}]      loop:none
   /// CHECK-DAG: <<Phi:d\d+>>    Phi [<<Set>>,{{d\d+}}]        loop:<<Loop:B\d+>> outer_loop:none
@@ -302,7 +308,7 @@
   /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]      loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:                 Return [<<Phi2>>]             loop:none
   //
-  /// CHECK-START-{ARM,ARM64,MIPS64}: int Main.reductionMinusInt(int[]) loop_optimization (after)
+  /// CHECK-START-{ARM,ARM64}: int Main.reductionMinusInt(int[]) loop_optimization (after)
   /// CHECK-DAG: <<Cons:i\d+>>   IntConstant {{2|4}}           loop:none
   /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [{{i\d+}}]      loop:none
   /// CHECK-DAG: <<Phi:d\d+>>    Phi [<<Set>>,{{d\d+}}]        loop:<<Loop:B\d+>> outer_loop:none
@@ -330,7 +336,7 @@
   /// CHECK-DAG:                 Add [<<Phi1>>,<<Cons1>>]      loop:<<Loop>>      outer_loop:none
   /// CHECK-DAG:                 Return [<<Phi2>>]             loop:none
   //
-  /// CHECK-START-{ARM64,MIPS64}: long Main.reductionMinusLong(long[]) loop_optimization (after)
+  /// CHECK-START-ARM64: long Main.reductionMinusLong(long[]) loop_optimization (after)
   /// CHECK-DAG: <<Cons2:i\d+>>  IntConstant 2                 loop:none
   /// CHECK-DAG: <<Set:d\d+>>    VecSetScalars [{{j\d+}}]      loop:none
   /// CHECK-DAG: <<Phi:d\d+>>    Phi [<<Set>>,{{d\d+}}]        loop:<<Loop:B\d+>> outer_loop:none
diff --git a/test/661-oat-writer-layout/run b/test/661-oat-writer-layout/run
index f93d7b7..99e78af 100644
--- a/test/661-oat-writer-layout/run
+++ b/test/661-oat-writer-layout/run
@@ -16,5 +16,7 @@
 
 # Always use the 'profile'.
 # Note that this test only works with --compiler-filter=speed
-# -- we accomplish this by blacklisting other compiler variants.
-"${RUN}" "$@" --profile
+# -- we accomplish this by blacklisting other compiler variants
+# and we also have to pass the option explicitly as dex2oat
+# defaults to speed-profile if a profile is specified.
+"${RUN}" "$@" --profile -Xcompiler-option --compiler-filter=speed
diff --git a/test/665-checker-simd-zero/src/Main.java b/test/665-checker-simd-zero/src/Main.java
index 5c581c4..66eea64 100644
--- a/test/665-checker-simd-zero/src/Main.java
+++ b/test/665-checker-simd-zero/src/Main.java
@@ -24,7 +24,7 @@
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                  loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Zero>>] loop:<<Loop>>      outer_loop:none
   //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.zeroz(boolean[]) loop_optimization (after)
+  /// CHECK-START-ARM64: void Main.zeroz(boolean[]) loop_optimization (after)
   /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0                        loop:none
   /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>]        loop:none
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                  loop:<<Loop:B\d+>> outer_loop:none
@@ -40,7 +40,7 @@
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                  loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Zero>>] loop:<<Loop>>      outer_loop:none
   //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.zerob(byte[]) loop_optimization (after)
+  /// CHECK-START-ARM64: void Main.zerob(byte[]) loop_optimization (after)
   /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0                        loop:none
   /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>]        loop:none
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                  loop:<<Loop:B\d+>> outer_loop:none
@@ -56,7 +56,7 @@
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                  loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Zero>>] loop:<<Loop>>      outer_loop:none
   //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.zeroc(char[]) loop_optimization (after)
+  /// CHECK-START-ARM64: void Main.zeroc(char[]) loop_optimization (after)
   /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0                        loop:none
   /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>]        loop:none
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                  loop:<<Loop:B\d+>> outer_loop:none
@@ -72,7 +72,7 @@
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                  loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Zero>>] loop:<<Loop>>      outer_loop:none
   //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.zeros(short[]) loop_optimization (after)
+  /// CHECK-START-ARM64: void Main.zeros(short[]) loop_optimization (after)
   /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0                        loop:none
   /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>]        loop:none
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                  loop:<<Loop:B\d+>> outer_loop:none
@@ -88,7 +88,7 @@
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                  loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Zero>>] loop:<<Loop>>      outer_loop:none
   //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.zeroi(int[]) loop_optimization (after)
+  /// CHECK-START-ARM64: void Main.zeroi(int[]) loop_optimization (after)
   /// CHECK-DAG: <<Zero:i\d+>> IntConstant 0                        loop:none
   /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>]        loop:none
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                  loop:<<Loop:B\d+>> outer_loop:none
@@ -104,7 +104,7 @@
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                  loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Zero>>] loop:<<Loop>>      outer_loop:none
   //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.zerol(long[]) loop_optimization (after)
+  /// CHECK-START-ARM64: void Main.zerol(long[]) loop_optimization (after)
   /// CHECK-DAG: <<Zero:j\d+>> LongConstant 0                       loop:none
   /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>]        loop:none
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                  loop:<<Loop:B\d+>> outer_loop:none
@@ -120,7 +120,7 @@
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                  loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Zero>>] loop:<<Loop>>      outer_loop:none
   //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.zerof(float[]) loop_optimization (after)
+  /// CHECK-START-ARM64: void Main.zerof(float[]) loop_optimization (after)
   /// CHECK-DAG: <<Zero:f\d+>> FloatConstant 0                      loop:none
   /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>]        loop:none
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                  loop:<<Loop:B\d+>> outer_loop:none
@@ -136,7 +136,7 @@
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                  loop:<<Loop:B\d+>> outer_loop:none
   /// CHECK-DAG:               ArraySet [{{l\d+}},<<Phi>>,<<Zero>>] loop:<<Loop>>      outer_loop:none
   //
-  /// CHECK-START-{ARM64,MIPS64}: void Main.zerod(double[]) loop_optimization (after)
+  /// CHECK-START-ARM64: void Main.zerod(double[]) loop_optimization (after)
   /// CHECK-DAG: <<Zero:d\d+>> DoubleConstant 0                     loop:none
   /// CHECK-DAG: <<Repl:d\d+>> VecReplicateScalar [<<Zero>>]        loop:none
   /// CHECK-DAG: <<Phi:i\d+>>  Phi                                  loop:<<Loop:B\d+>> outer_loop:none
diff --git a/test/667-jit-jni-stub/jit_jni_stub_test.cc b/test/667-jit-jni-stub/jit_jni_stub_test.cc
index 82e06fc..c21971f 100644
--- a/test/667-jit-jni-stub/jit_jni_stub_test.cc
+++ b/test/667-jit-jni-stub/jit_jni_stub_test.cc
@@ -31,7 +31,7 @@
   static bool isNextJitGcFull(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
     CHECK(Runtime::Current()->GetJit() != nullptr);
     jit::JitCodeCache* cache = Runtime::Current()->GetJit()->GetCodeCache();
-    MutexLock mu(self, cache->lock_);
+    MutexLock mu(self, *Locks::jit_lock_);
     return cache->ShouldDoFullCollection();
   }
 };
diff --git a/test/670-bitstring-type-check/run b/test/670-bitstring-type-check/run
new file mode 100644
index 0000000..a189dc5
--- /dev/null
+++ b/test/670-bitstring-type-check/run
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (C) 2008 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This test can take 7-11 mins, so raise the default 10 min timeout.
+export ART_TIME_OUT_MULTIPLIER=2
+
+exec ${RUN} "$@"
diff --git a/test/674-hiddenapi/hiddenapi.cc b/test/674-hiddenapi/hiddenapi.cc
index 3dc2789..742b6b3 100644
--- a/test/674-hiddenapi/hiddenapi.cc
+++ b/test/674-hiddenapi/hiddenapi.cc
@@ -27,6 +27,9 @@
 namespace art {
 namespace Test674HiddenApi {
 
+// Should be the same as dalvik.system.VMRuntime.PREVENT_META_REFLECTION_BLACKLIST_ACCESS
+static constexpr uint64_t kPreventMetaReflectionBlacklistAccess = 142365358;
+
 std::vector<std::vector<std::unique_ptr<const DexFile>>> opened_dex_files;
 
 extern "C" JNIEXPORT void JNICALL Java_Main_init(JNIEnv*, jclass) {
@@ -76,7 +79,7 @@
 
   ScopedObjectAccess soa(Thread::Current());
   for (std::unique_ptr<const DexFile>& dex_file : opened_dex_files[index]) {
-    Runtime::Current()->GetClassLinker()->AppendToBootClassPath(Thread::Current(), *dex_file.get());
+    Runtime::Current()->GetClassLinker()->AppendToBootClassPath(Thread::Current(), dex_file.get());
   }
 
   return int_index;
@@ -316,5 +319,18 @@
   return static_cast<jint>(kAccHiddenapiBits);
 }
 
+extern "C" JNIEXPORT void JNICALL Java_Reflection_setHiddenApiCheckHardening(JNIEnv*, jclass,
+    jboolean value) {
+  std::set<uint64_t> disabled_changes = Runtime::Current()->GetDisabledCompatChanges();
+  if (value == JNI_TRUE) {
+    // If hidden api check hardening is enabled, remove it from the set of disabled changes.
+    disabled_changes.erase(kPreventMetaReflectionBlacklistAccess);
+  } else {
+    // If hidden api check hardening is disabled, add it to the set of disabled changes.
+    disabled_changes.insert(kPreventMetaReflectionBlacklistAccess);
+  }
+  Runtime::Current()->SetDisabledCompatChanges(disabled_changes);
+}
+
 }  // namespace Test674HiddenApi
 }  // namespace art
diff --git a/test/674-hiddenapi/src-ex/ChildClass.java b/test/674-hiddenapi/src-ex/ChildClass.java
index f120bda..9295655 100644
--- a/test/674-hiddenapi/src-ex/ChildClass.java
+++ b/test/674-hiddenapi/src-ex/ChildClass.java
@@ -105,6 +105,13 @@
     boolean isSameBoot = (isParentInBoot == isChildInBoot);
     boolean isDebuggable = VMRuntime.getRuntime().isJavaDebuggable();
 
+    // For compat reasons, meta-reflection should still be usable by apps if hidden api check
+    // hardening is disabled (i.e. target SDK is Q or earlier). The only configuration where this
+    // workaround used to work is for ChildClass in the Application domain and ParentClass in the
+    // Platform domain, so only test that configuration with hidden api check hardening disabled.
+    boolean testHiddenApiCheckHardeningDisabled =
+        (childDomain == DexDomain.Application) && (parentDomain == DexDomain.Platform);
+
     // Run meaningful combinations of access flags.
     for (Hiddenness hiddenness : Hiddenness.values()) {
       final Behaviour expected;
@@ -138,18 +145,19 @@
           for (Class klass : new Class<?>[] { ParentClass.class, ParentInterface.class }) {
             String baseName = visibility.name() + suffix;
             checkField(klass, "field" + baseName, isStatic, visibility, expected,
-                invokesMemberCallback);
+                invokesMemberCallback, testHiddenApiCheckHardeningDisabled);
             checkMethod(klass, "method" + baseName, isStatic, visibility, expected,
-                invokesMemberCallback);
+                invokesMemberCallback, testHiddenApiCheckHardeningDisabled);
           }
 
           // Check whether one can use a class constructor.
-          checkConstructor(ParentClass.class, visibility, hiddenness, expected);
+          checkConstructor(ParentClass.class, visibility, hiddenness, expected,
+                testHiddenApiCheckHardeningDisabled);
 
           // Check whether one can use an interface default method.
           String name = "method" + visibility.name() + "Default" + hiddenness.name();
           checkMethod(ParentInterface.class, name, /*isStatic*/ false, visibility, expected,
-              invokesMemberCallback);
+              invokesMemberCallback, testHiddenApiCheckHardeningDisabled);
         }
 
         // Test whether static linking succeeds.
@@ -212,7 +220,8 @@
   }
 
   private static void checkField(Class<?> klass, String name, boolean isStatic,
-      Visibility visibility, Behaviour behaviour, boolean invokesMemberCallback) throws Exception {
+      Visibility visibility, Behaviour behaviour, boolean invokesMemberCallback,
+      boolean testHiddenApiCheckHardeningDisabled) throws Exception {
 
     boolean isPublic = (visibility == Visibility.Public);
     boolean canDiscover = (behaviour != Behaviour.Denied);
@@ -277,6 +286,23 @@
                               canDiscover);
     }
 
+    // Check for meta reflection.
+
+    // With hidden api check hardening enabled, only white and light greylisted fields should be
+    // discoverable.
+    if (Reflection.canDiscoverFieldWithMetaReflection(klass, name, true) != canDiscover) {
+      throwDiscoveryException(klass, name, false,
+          "Meta reflection with hidden api hardening enabled", canDiscover);
+    }
+
+    if (testHiddenApiCheckHardeningDisabled) {
+      // With hidden api check hardening disabled, all fields should be discoverable.
+      if (Reflection.canDiscoverFieldWithMetaReflection(klass, name, false) != true) {
+        throwDiscoveryException(klass, name, false,
+            "Meta reflection with hidden api hardening enabled", canDiscover);
+      }
+    }
+
     if (canDiscover) {
       // Test that modifiers are unaffected.
 
@@ -305,7 +331,8 @@
   }
 
   private static void checkMethod(Class<?> klass, String name, boolean isStatic,
-      Visibility visibility, Behaviour behaviour, boolean invokesMemberCallback) throws Exception {
+      Visibility visibility, Behaviour behaviour, boolean invokesMemberCallback,
+      boolean testHiddenApiCheckHardeningDisabled) throws Exception {
 
     boolean isPublic = (visibility == Visibility.Public);
     if (klass.isInterface() && !isPublic) {
@@ -353,6 +380,23 @@
                               canDiscover);
     }
 
+    // Check for meta reflection.
+
+    // With hidden api check hardening enabled, only white and light greylisted methods should be
+    // discoverable.
+    if (Reflection.canDiscoverMethodWithMetaReflection(klass, name, true) != canDiscover) {
+      throwDiscoveryException(klass, name, false,
+          "Meta reflection with hidden api hardening enabled", canDiscover);
+    }
+
+    if (testHiddenApiCheckHardeningDisabled) {
+      // With hidden api check hardening disabled, all methods should be discoverable.
+      if (Reflection.canDiscoverMethodWithMetaReflection(klass, name, false) != true) {
+        throwDiscoveryException(klass, name, false,
+            "Meta reflection with hidden api hardening enabled", canDiscover);
+      }
+    }
+
     // Finish here if we could not discover the method.
 
     if (canDiscover) {
@@ -381,7 +425,7 @@
   }
 
   private static void checkConstructor(Class<?> klass, Visibility visibility, Hiddenness hiddenness,
-      Behaviour behaviour) throws Exception {
+      Behaviour behaviour, boolean testHiddenApiCheckHardeningDisabled) throws Exception {
 
     boolean isPublic = (visibility == Visibility.Public);
     String signature = "(" + visibility.mAssociatedType.mShorty +
@@ -436,6 +480,23 @@
                               canDiscover);
     }
 
+    // Check for meta reflection.
+
+    // With hidden api check hardening enabled, only white and light greylisted constructors should
+    // be discoverable.
+    if (Reflection.canDiscoverConstructorWithMetaReflection(klass, args, true) != canDiscover) {
+      throwDiscoveryException(klass, fullName, false,
+          "Meta reflection with hidden api hardening enabled", canDiscover);
+    }
+
+    if (testHiddenApiCheckHardeningDisabled) {
+      // With hidden api check hardening disabled, all constructors should be discoverable.
+      if (Reflection.canDiscoverConstructorWithMetaReflection(klass, args, false) != true) {
+        throwDiscoveryException(klass, fullName, false,
+            "Meta reflection with hidden api hardening enabled", canDiscover);
+      }
+    }
+
     if (canDiscover) {
       // Test whether we can invoke the constructor.
 
diff --git a/test/674-hiddenapi/src-ex/Reflection.java b/test/674-hiddenapi/src-ex/Reflection.java
index 3667e91..173b9af 100644
--- a/test/674-hiddenapi/src-ex/Reflection.java
+++ b/test/674-hiddenapi/src-ex/Reflection.java
@@ -16,6 +16,7 @@
 
 import java.lang.reflect.Constructor;
 import java.lang.reflect.Field;
+import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.lang.reflect.Modifier;
 import java.util.Arrays;
@@ -186,7 +187,45 @@
     }
   }
 
+  public static boolean canDiscoverMethodWithMetaReflection(Class<?> klass, String name,
+      boolean hardeningEnabled) {
+    try {
+      setHiddenApiCheckHardening(hardeningEnabled);
+      Method metaGetDeclaredMethod =
+          Class.class.getDeclaredMethod("getDeclaredMethod", String.class, Class[].class);
+      // Assumes method without parameters.
+      return ((Method)metaGetDeclaredMethod.invoke(klass, name,  null)) != null;
+    } catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException ex) {
+      return false;
+    }
+  }
+
+  public static boolean canDiscoverFieldWithMetaReflection(Class<?> klass, String name,
+      boolean hardeningEnabled) {
+    try {
+      setHiddenApiCheckHardening(hardeningEnabled);
+      Method metaGetDeclaredField =
+          Class.class.getDeclaredMethod("getDeclaredField", String.class);
+      return ((Field)metaGetDeclaredField.invoke(klass, name)) != null;
+    } catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException ex) {
+      return false;
+    }
+  }
+
+  public static boolean canDiscoverConstructorWithMetaReflection(Class<?> klass, Class<?> args[],
+      boolean hardeningEnabled) {
+    try {
+      setHiddenApiCheckHardening(hardeningEnabled);
+      Method metaGetDeclaredConstructor =
+          Class.class.getDeclaredMethod("getDeclaredConstructor", Class[].class);
+      return ((Constructor<?>)metaGetDeclaredConstructor.invoke(klass, (Object)args)) != null;
+    } catch (NoSuchMethodException | InvocationTargetException | IllegalAccessException ex) {
+      return false;
+    }
+  }
+
   private static native int getHiddenApiAccessFlags();
+  private static native void setHiddenApiCheckHardening(boolean value);
 
   public static boolean canObserveFieldHiddenAccessFlags(Class<?> klass, String name)
       throws Exception {
diff --git a/test/674-hotness-compiled/src/Main.java b/test/674-hotness-compiled/src/Main.java
index 76ec927..5f0d10a 100644
--- a/test/674-hotness-compiled/src/Main.java
+++ b/test/674-hotness-compiled/src/Main.java
@@ -18,8 +18,8 @@
   public static void $noinline$hotnessCount() {
   }
 
-  public static void $noinline$hotnessCountWithLoop() {
-    for (int i = 0; i < 100; i++) {
+  public static void $noinline$hotnessCountWithLoop(int count) {
+    for (int i = 0; i < count; i++) {
       $noinline$hotnessCount();
     }
   }
@@ -35,9 +35,17 @@
       throw new Error("Expected hotness counter to be updated");
     }
 
-    $noinline$hotnessCountWithLoop();
-    if (getHotnessCounter(Main.class, "$noinline$hotnessCountWithLoop") <= counter) {
-      throw new Error("Expected hotness counter of a loop to be greater than without loop");
+    $noinline$hotnessCountWithLoop(1000);
+    int newCounter = getHotnessCounter(Main.class, "$noinline$hotnessCountWithLoop");
+    if (newCounter <= counter) {
+      throw new Error("Expected counter " + newCounter + " to be larger than " + counter);
+    }
+    counter = newCounter;
+
+    $noinline$hotnessCountWithLoop(65500);
+    newCounter = getHotnessCounter(Main.class, "$noinline$hotnessCountWithLoop");
+    if (newCounter <= counter) {
+      throw new Error("Expected counter " + newCounter + " to be larger than " + counter);
     }
   }
 
diff --git a/test/684-checker-simd-dotprod/src/Main.java b/test/684-checker-simd-dotprod/src/Main.java
index e0c8716..aa03d1e 100644
--- a/test/684-checker-simd-dotprod/src/Main.java
+++ b/test/684-checker-simd-dotprod/src/Main.java
@@ -17,6 +17,7 @@
 import other.TestByte;
 import other.TestCharShort;
 import other.TestVarious;
+import other.TestFloatDouble;
 
 /**
  * Tests for dot product idiom vectorization.
@@ -26,6 +27,7 @@
      TestByte.run();
      TestCharShort.run();
      TestVarious.run();
+     TestFloatDouble.run();
      System.out.println("passed");
   }
 }
diff --git a/test/684-checker-simd-dotprod/src/other/TestFloatDouble.java b/test/684-checker-simd-dotprod/src/other/TestFloatDouble.java
new file mode 100644
index 0000000..b155ae1
--- /dev/null
+++ b/test/684-checker-simd-dotprod/src/other/TestFloatDouble.java
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package other;
+
+/**
+ * Tests for dot product idiom vectorization: char and short case.
+ */
+public class TestFloatDouble {
+
+  public static final int ARRAY_SIZE = 1024;
+
+
+  /// CHECK-START-{X86_64}: float other.TestFloatDouble.testDotProdSimpleFloat(float[], float[]) loop_optimization (after)
+  /// CHECK-NOT:                 VecDotProd
+  public static final float testDotProdSimpleFloat(float[] a, float[] b) {
+    float sum = 0;
+    for (int i = 0; i < b.length; i++) {
+      sum += a[i] * b[i];
+    }
+    return sum;
+  }
+
+
+  /// CHECK-START-{X86_64}: double other.TestFloatDouble.testDotProdSimpleDouble(double[], double[]) loop_optimization (after)
+  /// CHECK-NOT:                 VecDotProd
+
+  public static final double testDotProdSimpleDouble(double[] a, double[] b) {
+    double sum = 0;
+    for (int i = 0; i < b.length; i++) {
+      sum += a[i] * b[i];
+    }
+    return sum;
+  }
+
+  private static void expectEquals(float expected, float result) {
+    if (Float.compare(expected, result) != 0) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  private static void expectEquals(double expected, double result) {
+    if (Double.compare(expected, result) != 0) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  public static void run() {
+    final float MAX_F = Float.MAX_VALUE;
+    final float MIN_F = Float.MIN_VALUE;
+    final double MAX_D = Double.MAX_VALUE;
+    final double MIN_D = Double.MIN_VALUE;
+
+    double[] a = new double[1024];
+    for (int i = 0; i != 1024; ++i) a[i] = MAX_D;
+    double[] b = new double[1024];
+    for (int i = 0; i != 1024; ++i) b[i] = ((i & 1) == 0) ? 1.0 : -1.0;
+    expectEquals(0.0, testDotProdSimpleDouble(a,b));
+
+    float[] f1_1 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3.33f, 0.125f, 3.0f, 0.25f};
+    float[] f2_1 = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6.125f, 2.25f, 1.213f, 0.5f};
+    expectEquals(24.4415f, testDotProdSimpleFloat(f1_1, f2_1));
+
+    float [] f1_2 = { 0, 0, 0, 0, 0, 0, 0, 0,
+                      0, 0, 0, 0,  0.63671875f, 0.76953125f, 0.22265625f, 1.0f};
+    float [] f2_2 = { 0, 0, 0, 0, 0, 0, 0, 0,
+                      0, 0, 0, 0, MIN_F, MAX_F, MAX_F, MIN_F };
+    expectEquals(3.376239E38f, testDotProdSimpleFloat(f1_2, f2_2));
+
+    float[] f1_3 = { 0xc0000000, 0xc015c28f, 0x411dd42c, 0, 0, 0, 0,
+                     0, 0, 0, 0, 0, 0, 0, MIN_F, MIN_F };
+    float[] f2_3 = { 0x3f4c779a, 0x408820c5, 0, 0, 0, 0, 0,
+                     0, 0, 0, 0, 0, 0x00000000, 0, MAX_F, MAX_F };
+    expectEquals(-2.30124471E18f, testDotProdSimpleFloat(f1_3, f2_3));
+  }
+
+  public static void main(String[] args) {
+    run();
+  }
+}
diff --git a/test/688-shared-library/check b/test/688-shared-library/check
index 0b6c9e4..55847cd 100644
--- a/test/688-shared-library/check
+++ b/test/688-shared-library/check
@@ -16,6 +16,7 @@
 
 # Finalizers of DexFile will complain not being able to close
 # the main dex file, as it's still open. That's OK to ignore.
-sed -e '/^E\/System/d' "$2" > "$2.tmp"
+# Oat file manager will also complain about duplicate dex files. Ignore.
+sed -e '/^E\/System/d' "$2" | sed -e '/.*oat_file_manager.*/d' > "$2.tmp"
 
 diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
diff --git a/test/689-zygote-jit-deopt/run b/test/689-zygote-jit-deopt/run
new file mode 100644
index 0000000..7b4b7eb
--- /dev/null
+++ b/test/689-zygote-jit-deopt/run
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Copyright 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+./default-run "$@" --zygote
diff --git a/test/689-zygote-jit-deopt/src/Main.java b/test/689-zygote-jit-deopt/src/Main.java
index 330663e..fbdc728 100644
--- a/test/689-zygote-jit-deopt/src/Main.java
+++ b/test/689-zygote-jit-deopt/src/Main.java
@@ -14,6 +14,8 @@
  * limitations under the License.
  */
 
+import dalvik.system.ZygoteHooks;
+
 public class Main {
   public static void main(String[] args) {
     System.loadLibrary(args[0]);
@@ -21,7 +23,10 @@
       return;
     }
     ensureJitCompiled(Object.class, "toString");
-    transitionJitFromZygote();
+    ZygoteHooks.preFork();
+    ZygoteHooks.postForkChild(
+        /*flags=*/0, /*is_system_server=*/false, /*is_zygote=*/false, /*instruction_set=*/null);
+    ZygoteHooks.postForkCommon();
     deoptimizeBootImage();
     if (hasJitCompiledEntrypoint(Object.class, "toString")) {
       throw new Error("Expected Object.toString to be deoptimized");
@@ -32,5 +37,4 @@
   private static native void ensureJitCompiled(Class<?> cls, String name);
   private static native boolean hasJitCompiledEntrypoint(Class<?> cls, String name);
   private static native void deoptimizeBootImage();
-  private static native void transitionJitFromZygote();
 }
diff --git a/test/640-checker-byte-simd/expected.txt b/test/697-checker-string-append/expected.txt
similarity index 100%
rename from test/640-checker-byte-simd/expected.txt
rename to test/697-checker-string-append/expected.txt
diff --git a/test/697-checker-string-append/info.txt b/test/697-checker-string-append/info.txt
new file mode 100644
index 0000000..cb612cb
--- /dev/null
+++ b/test/697-checker-string-append/info.txt
@@ -0,0 +1 @@
+Test for String append pattern recognition.
diff --git a/test/697-checker-string-append/src/Main.java b/test/697-checker-string-append/src/Main.java
new file mode 100644
index 0000000..c63c328
--- /dev/null
+++ b/test/697-checker-string-append/src/Main.java
@@ -0,0 +1,299 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+    public static void main(String[] args) {
+        testAppendStringAndLong();
+        testAppendStringAndInt();
+        testAppendStringAndString();
+        testMiscelaneous();
+        testNoArgs();
+        testInline();
+        testEquals();
+        System.out.println("passed");
+    }
+
+    private static final String APPEND_LONG_PREFIX = "Long/";
+    private static final String[] APPEND_LONG_TEST_CASES = {
+        "Long/0",
+        "Long/1",
+        "Long/9",
+        "Long/10",
+        "Long/99",
+        "Long/100",
+        "Long/999",
+        "Long/1000",
+        "Long/9999",
+        "Long/10000",
+        "Long/99999",
+        "Long/100000",
+        "Long/999999",
+        "Long/1000000",
+        "Long/9999999",
+        "Long/10000000",
+        "Long/99999999",
+        "Long/100000000",
+        "Long/999999999",
+        "Long/1000000000",
+        "Long/9999999999",
+        "Long/10000000000",
+        "Long/99999999999",
+        "Long/100000000000",
+        "Long/999999999999",
+        "Long/1000000000000",
+        "Long/9999999999999",
+        "Long/10000000000000",
+        "Long/99999999999999",
+        "Long/100000000000000",
+        "Long/999999999999999",
+        "Long/1000000000000000",
+        "Long/9999999999999999",
+        "Long/10000000000000000",
+        "Long/99999999999999999",
+        "Long/100000000000000000",
+        "Long/999999999999999999",
+        "Long/1000000000000000000",
+        "Long/9223372036854775807",  // Long.MAX_VALUE
+        "Long/-1",
+        "Long/-9",
+        "Long/-10",
+        "Long/-99",
+        "Long/-100",
+        "Long/-999",
+        "Long/-1000",
+        "Long/-9999",
+        "Long/-10000",
+        "Long/-99999",
+        "Long/-100000",
+        "Long/-999999",
+        "Long/-1000000",
+        "Long/-9999999",
+        "Long/-10000000",
+        "Long/-99999999",
+        "Long/-100000000",
+        "Long/-999999999",
+        "Long/-1000000000",
+        "Long/-9999999999",
+        "Long/-10000000000",
+        "Long/-99999999999",
+        "Long/-100000000000",
+        "Long/-999999999999",
+        "Long/-1000000000000",
+        "Long/-9999999999999",
+        "Long/-10000000000000",
+        "Long/-99999999999999",
+        "Long/-100000000000000",
+        "Long/-999999999999999",
+        "Long/-1000000000000000",
+        "Long/-9999999999999999",
+        "Long/-10000000000000000",
+        "Long/-99999999999999999",
+        "Long/-100000000000000000",
+        "Long/-999999999999999999",
+        "Long/-1000000000000000000",
+        "Long/-9223372036854775808",  // Long.MIN_VALUE
+    };
+
+    /// CHECK-START: java.lang.String Main.$noinline$appendStringAndLong(java.lang.String, long) instruction_simplifier (before)
+    /// CHECK-NOT:              StringBuilderAppend
+
+    /// CHECK-START: java.lang.String Main.$noinline$appendStringAndLong(java.lang.String, long) instruction_simplifier (after)
+    /// CHECK:                  StringBuilderAppend
+    public static String $noinline$appendStringAndLong(String s, long l) {
+        return new StringBuilder().append(s).append(l).toString();
+    }
+
+    public static void testAppendStringAndLong() {
+        for (String expected : APPEND_LONG_TEST_CASES) {
+            long l = Long.valueOf(expected.substring(APPEND_LONG_PREFIX.length()));
+            String result = $noinline$appendStringAndLong(APPEND_LONG_PREFIX, l);
+            assertEquals(expected, result);
+        }
+    }
+
+    private static final String APPEND_INT_PREFIX = "Int/";
+    private static final String[] APPEND_INT_TEST_CASES = {
+        "Int/0",
+        "Int/1",
+        "Int/9",
+        "Int/10",
+        "Int/99",
+        "Int/100",
+        "Int/999",
+        "Int/1000",
+        "Int/9999",
+        "Int/10000",
+        "Int/99999",
+        "Int/100000",
+        "Int/999999",
+        "Int/1000000",
+        "Int/9999999",
+        "Int/10000000",
+        "Int/99999999",
+        "Int/100000000",
+        "Int/999999999",
+        "Int/1000000000",
+        "Int/2147483647",  // Integer.MAX_VALUE
+        "Int/-1",
+        "Int/-9",
+        "Int/-10",
+        "Int/-99",
+        "Int/-100",
+        "Int/-999",
+        "Int/-1000",
+        "Int/-9999",
+        "Int/-10000",
+        "Int/-99999",
+        "Int/-100000",
+        "Int/-999999",
+        "Int/-1000000",
+        "Int/-9999999",
+        "Int/-10000000",
+        "Int/-99999999",
+        "Int/-100000000",
+        "Int/-999999999",
+        "Int/-1000000000",
+        "Int/-2147483648",  // Integer.MIN_VALUE
+    };
+
+    /// CHECK-START: java.lang.String Main.$noinline$appendStringAndInt(java.lang.String, int) instruction_simplifier (before)
+    /// CHECK-NOT:              StringBuilderAppend
+
+    /// CHECK-START: java.lang.String Main.$noinline$appendStringAndInt(java.lang.String, int) instruction_simplifier (after)
+    /// CHECK:                  StringBuilderAppend
+    public static String $noinline$appendStringAndInt(String s, int i) {
+        return new StringBuilder().append(s).append(i).toString();
+    }
+
+    public static void testAppendStringAndInt() {
+        for (String expected : APPEND_INT_TEST_CASES) {
+            int i = Integer.valueOf(expected.substring(APPEND_INT_PREFIX.length()));
+            String result = $noinline$appendStringAndInt(APPEND_INT_PREFIX, i);
+            assertEquals(expected, result);
+        }
+    }
+
+    public static String $noinline$appendStringAndString(String s1, String s2) {
+        return new StringBuilder().append(s1).append(s2).toString();
+    }
+
+    public static void testAppendStringAndString() {
+        assertEquals("nullnull", $noinline$appendStringAndString(null, null));
+        assertEquals("nullTEST", $noinline$appendStringAndString(null, "TEST"));
+        assertEquals("TESTnull", $noinline$appendStringAndString("TEST", null));
+        assertEquals("abcDEFGH", $noinline$appendStringAndString("abc", "DEFGH"));
+        // Test with a non-ASCII character.
+        assertEquals("test\u0131", $noinline$appendStringAndString("test", "\u0131"));
+        assertEquals("\u0131test", $noinline$appendStringAndString("\u0131", "test"));
+        assertEquals("\u0131test\u0131", $noinline$appendStringAndString("\u0131", "test\u0131"));
+    }
+
+    /// CHECK-START: java.lang.String Main.$noinline$appendSLILC(java.lang.String, long, int, long, char) instruction_simplifier (before)
+    /// CHECK-NOT:              StringBuilderAppend
+
+    /// CHECK-START: java.lang.String Main.$noinline$appendSLILC(java.lang.String, long, int, long, char) instruction_simplifier (after)
+    /// CHECK:                  StringBuilderAppend
+    public static String $noinline$appendSLILC(String s,
+                                               long l1,
+                                               int i,
+                                               long l2,
+                                               char c) {
+        return new StringBuilder().append(s)
+                                  .append(l1)
+                                  .append(i)
+                                  .append(l2)
+                                  .append(c).toString();
+    }
+
+    public static void testMiscelaneous() {
+        assertEquals("x17-1q",
+                     $noinline$appendSLILC("x", 1L, 7, -1L, 'q'));
+        assertEquals("null17-1q",
+                     $noinline$appendSLILC(null, 1L, 7, -1L, 'q'));
+        assertEquals("x\u013117-1q",
+                     $noinline$appendSLILC("x\u0131", 1L, 7, -1L, 'q'));
+        assertEquals("x427-1q",
+                     $noinline$appendSLILC("x", 42L, 7, -1L, 'q'));
+        assertEquals("x1-42-1q",
+                     $noinline$appendSLILC("x", 1L, -42, -1L, 'q'));
+        assertEquals("x17424242q",
+                     $noinline$appendSLILC("x", 1L, 7, 424242L, 'q'));
+        assertEquals("x17-1\u0131",
+                     $noinline$appendSLILC("x", 1L, 7, -1L, '\u0131'));
+    }
+
+    public static String $inline$testInlineInner(StringBuilder sb, String s, int i) {
+        return sb.append(s).append(i).toString();
+    }
+
+    /// CHECK-START: java.lang.String Main.$noinline$testInlineOuter(java.lang.String, int) instruction_simplifier$after_inlining (before)
+    /// CHECK-NOT:              StringBuilderAppend
+
+    /// CHECK-START: java.lang.String Main.$noinline$testInlineOuter(java.lang.String, int) instruction_simplifier$after_inlining (after)
+    /// CHECK:                  StringBuilderAppend
+    public static String $noinline$testInlineOuter(String s, int i) {
+        StringBuilder sb = new StringBuilder();
+        return $inline$testInlineInner(sb, s, i);
+    }
+
+    public static void testInline() {
+        assertEquals("x42", $noinline$testInlineOuter("x", 42));
+    }
+
+    /// CHECK-START: java.lang.String Main.$noinline$appendNothing() instruction_simplifier (before)
+    /// CHECK-NOT:              StringBuilderAppend
+
+    /// CHECK-START: java.lang.String Main.$noinline$appendNothing() instruction_simplifier (after)
+    /// CHECK-NOT:              StringBuilderAppend
+    public static String $noinline$appendNothing() {
+        return new StringBuilder().toString();
+    }
+
+    public static void testNoArgs() {
+        assertEquals("", $noinline$appendNothing());
+    }
+
+    /// CHECK-START: boolean Main.$noinline$testAppendEquals(java.lang.String, int) instruction_simplifier (before)
+    /// CHECK-NOT:              StringBuilderAppend
+
+    /// CHECK-START: boolean Main.$noinline$testAppendEquals(java.lang.String, int) instruction_simplifier (after)
+    /// CHECK:                  StringBuilderAppend
+    public static boolean $noinline$testAppendEquals(String s, int i) {
+      // Regression test for b/151107293 .
+      // When a string is used as both receiver and argument of String.equals(), we DCHECK()
+      // that it cannot be null. However, when replacing the call to StringBuilder.toString()
+      // with the HStringBuilderAppend(), the former reported CanBeNull() as false and
+      // therefore no explicit null checks were needed, but the replacement reported
+      // CanBeNull() as true, so when the result was used in String.equals() for both
+      // receiver and argument, the DCHECK() failed. This was fixed by overriding
+      // CanBeNull() in HStringBuilderAppend to correctly return false; the string that
+      // previously didn't require null check still does not require it.
+      String str = new StringBuilder().append(s).append(i).toString();
+      return str.equals(str);
+    }
+
+    public static void testEquals() {
+      if (!$noinline$testAppendEquals("Test", 42)) {
+        throw new Error("str.equals(str) is false");
+      }
+    }
+
+    public static void assertEquals(String expected, String actual) {
+        if (!expected.equals(actual)) {
+            throw new AssertionError("Expected: " + expected + ", actual: " + actual);
+        }
+    }
+}
diff --git a/test/660-checker-simd-sad-short2/expected.txt b/test/699-checker-string-append2/expected.txt
similarity index 100%
rename from test/660-checker-simd-sad-short2/expected.txt
rename to test/699-checker-string-append2/expected.txt
diff --git a/test/699-checker-string-append2/info.txt b/test/699-checker-string-append2/info.txt
new file mode 100644
index 0000000..1bd4a1a
--- /dev/null
+++ b/test/699-checker-string-append2/info.txt
@@ -0,0 +1 @@
+Regression tests for String append pattern recognition bugs. b/146014745
diff --git a/test/699-checker-string-append2/smali/B146014745.smali b/test/699-checker-string-append2/smali/B146014745.smali
new file mode 100644
index 0000000..0a20b41
--- /dev/null
+++ b/test/699-checker-string-append2/smali/B146014745.smali
@@ -0,0 +1,163 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LB146014745;
+.super Ljava/lang/Object;
+
+## CHECK-START: java.lang.String B146014745.$noinline$testAppend1(java.lang.String, int) instruction_simplifier (before)
+## CHECK-NOT:                  StringBuilderAppend
+
+## CHECK-START: java.lang.String B146014745.$noinline$testAppend1(java.lang.String, int) instruction_simplifier (after)
+## CHECK:                      StringBuilderAppend
+
+.method public static $noinline$testAppend1(Ljava/lang/String;I)Ljava/lang/String;
+    .registers 4
+# StringBuilder sb = new StringBuilder();
+    new-instance v0, Ljava/lang/StringBuilder;
+    invoke-direct {v0}, Ljava/lang/StringBuilder;-><init>()V
+# sb.append(s).append(i);
+    invoke-virtual {v0, p0}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder;
+    move-result-object v1
+    invoke-virtual {v1, p1}, Ljava/lang/StringBuilder;->append(I)Ljava/lang/StringBuilder;
+# return sb.append(s).append(i).toString();
+    invoke-virtual {v0, p0}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder;
+    move-result-object v1
+    invoke-virtual {v1, p1}, Ljava/lang/StringBuilder;->append(I)Ljava/lang/StringBuilder;
+    move-result-object v1
+    invoke-virtual {v1}, Ljava/lang/StringBuilder;->toString()Ljava/lang/String;
+    move-result-object v1
+    return-object v1
+.end method
+
+## CHECK-START: java.lang.String B146014745.$noinline$testAppend2(java.lang.String, int) instruction_simplifier (after)
+## CHECK-NOT:                  StringBuilderAppend
+
+.method public static $noinline$testAppend2(Ljava/lang/String;I)Ljava/lang/String;
+    .registers 4
+# StringBuilder sb = new StringBuilder();
+    new-instance v0, Ljava/lang/StringBuilder;
+    invoke-direct {v0}, Ljava/lang/StringBuilder;-><init>()V
+# String s2 = sb.append(s).append(i).toString();
+    invoke-virtual {v0, p0}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder;
+    move-result-object v1
+    invoke-virtual {v1, p1}, Ljava/lang/StringBuilder;->append(I)Ljava/lang/StringBuilder;
+    move-result-object v1
+    invoke-virtual {v1}, Ljava/lang/StringBuilder;->toString()Ljava/lang/String;
+    move-result-object v1
+# return sb.append(s2).toString();
+    invoke-virtual {v0, v1}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder;
+    move-result-object v1
+    invoke-virtual {v1}, Ljava/lang/StringBuilder;->toString()Ljava/lang/String;
+    move-result-object v1
+    return-object v1
+.end method
+
+## CHECK-START: java.lang.String B146014745.$noinline$testAppend3(java.lang.String, int) instruction_simplifier (after)
+## CHECK-NOT:                  StringBuilderAppend
+
+.method public static $noinline$testAppend3(Ljava/lang/String;I)Ljava/lang/String;
+    .registers 5
+# StringBuilder sb = new StringBuilder();
+    new-instance v0, Ljava/lang/StringBuilder;
+    invoke-direct {v0}, Ljava/lang/StringBuilder;-><init>()V
+# String s2 = sb.append(s).toString();
+    invoke-virtual {v0, p0}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder;
+    move-result-object v2
+    invoke-virtual {v2}, Ljava/lang/StringBuilder;->toString()Ljava/lang/String;
+    move-result-object v2
+# return sb.append(i).append(s2).append(i);
+    invoke-virtual {v0, p1}, Ljava/lang/StringBuilder;->append(I)Ljava/lang/StringBuilder;
+    move-result-object v1
+    invoke-virtual {v1, v2}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder;
+    move-result-object v1
+    invoke-virtual {v1, p1}, Ljava/lang/StringBuilder;->append(I)Ljava/lang/StringBuilder;
+# return sb.toString();
+    invoke-virtual {v0}, Ljava/lang/StringBuilder;->toString()Ljava/lang/String;
+    move-result-object v1
+    return-object v1
+.end method
+
+# The following is a jasmin version.
+# Unfortunately, this would be translated without the required move-result-object,
+# instead using the register initialized by new-instance for subsequent calls.
+
+#.class public B146014745
+#.super java/lang/Object
+
+#.method public static $noinline$testAppend1(Ljava/lang/String;I)Ljava/lang/String;
+#    .limit stack 3
+#    .limit locals 2
+#; StringBuilder sb = new StringBuilder();
+#    new java/lang/StringBuilder
+#    dup
+#    invokespecial java.lang.StringBuilder.<init>()V
+#; sb.append(s).append(i);
+#    dup
+#    aload_0
+#    invokevirtual java.lang.StringBuilder.append(Ljava/lang/String;)Ljava/lang/StringBuilder;
+#    iload_1
+#    invokevirtual java.lang.StringBuilder.append(I)Ljava/lang/StringBuilder;
+#    pop
+#; return sb.append(s).append(i).toString();
+#    aload_0
+#    invokevirtual java.lang.StringBuilder.append(Ljava/lang/String;)Ljava/lang/StringBuilder;
+#    iload_1
+#    invokevirtual java.lang.StringBuilder.append(I)Ljava/lang/StringBuilder;
+#    invokevirtual java.lang.StringBuilder.toString()Ljava/lang/String;
+#    areturn
+#.end method
+
+#.method public static $noinline$testAppend2(Ljava/lang/String;I)Ljava/lang/String;
+#    .limit stack 3
+#    .limit locals 2
+#; StringBuilder sb = new StringBuilder();
+#    new java/lang/StringBuilder
+#    dup
+#    invokespecial java.lang.StringBuilder.<init>()V
+#; String s2 = sb.append(s).append(i).toString();
+#    dup
+#    aload_0
+#    invokevirtual java.lang.StringBuilder.append(Ljava/lang/String;)Ljava/lang/StringBuilder;
+#    iload_1
+#    invokevirtual java.lang.StringBuilder.append(I)Ljava/lang/StringBuilder;
+#    invokevirtual java.lang.StringBuilder.toString()Ljava/lang/String;
+#; return sb.append(s2).toString();
+#    invokevirtual java.lang.StringBuilder.append(Ljava/lang/String;)Ljava/lang/StringBuilder;
+#    invokevirtual java.lang.StringBuilder.toString()Ljava/lang/String;
+#    areturn
+#.end method
+
+#.method public static $noinline$testAppend3(Ljava/lang/String;I)Ljava/lang/String;
+#    .limit stack 3
+#    .limit locals 3
+#; StringBuilder sb = new StringBuilder();
+#    new java/lang/StringBuilder
+#    dup
+#    invokespecial java.lang.StringBuilder.<init>()V
+#; String s2 = sb.append(s).toString();
+#    dup
+#    aload_0
+#    invokevirtual java.lang.StringBuilder.append(Ljava/lang/String;)Ljava/lang/StringBuilder;
+#    invokevirtual java.lang.StringBuilder.toString()Ljava/lang/String;
+#    astore_2
+#; return sb.append(i).append(s2).append(i);
+#    iload_1
+#    invokevirtual java.lang.StringBuilder.append(I)Ljava/lang/StringBuilder;
+#    aload_2
+#    invokevirtual java.lang.StringBuilder.append(Ljava/lang/String;)Ljava/lang/StringBuilder;
+#    iload_1
+#    invokevirtual java.lang.StringBuilder.append(I)Ljava/lang/StringBuilder;
+#    invokevirtual java.lang.StringBuilder.toString()Ljava/lang/String;
+#    areturn
+#.end method
diff --git a/test/699-checker-string-append2/src/Main.java b/test/699-checker-string-append2/src/Main.java
new file mode 100644
index 0000000..3753af6
--- /dev/null
+++ b/test/699-checker-string-append2/src/Main.java
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+    public static void main(String[] args) throws Exception {
+        Class<?> c = Class.forName("B146014745");
+        Method m1 = c.getDeclaredMethod("$noinline$testAppend1", String.class, int.class);
+        String b146014745_result1 = (String) m1.invoke(null, "x", 42);
+        assertEquals("x42x42", b146014745_result1);
+        Method m2 = c.getDeclaredMethod("$noinline$testAppend2", String.class, int.class);
+        String b146014745_result2 = (String) m2.invoke(null, "x", 42);
+        assertEquals("x42x42", b146014745_result2);
+        Method m3 = c.getDeclaredMethod("$noinline$testAppend3", String.class, int.class);
+        String b146014745_result3 = (String) m3.invoke(null, "x", 42);
+        assertEquals("x42x42", b146014745_result3);
+
+        System.out.println("passed");
+    }
+
+    public static void assertEquals(String expected, String actual) {
+        if (!expected.equals(actual)) {
+            throw new AssertionError("Expected: " + expected + ", actual: " + actual);
+        }
+    }
+}
diff --git a/test/701-easy-div-rem/genMain.py b/test/701-easy-div-rem/genMain.py
index b6c769f..918f647 100644
--- a/test/701-easy-div-rem/genMain.py
+++ b/test/701-easy-div-rem/genMain.py
@@ -40,7 +40,7 @@
 
 def subst_vars(variables, text):
     '''Substitute variables in text.'''
-    for key, value in variables.iteritems():
+    for key, value in variables.items():
         text = text.replace(str(key), str(value))
     return text
 
diff --git a/test/706-checker-scheduler/src/Main.java b/test/706-checker-scheduler/src/Main.java
index af18193..5a66fbb 100644
--- a/test/706-checker-scheduler/src/Main.java
+++ b/test/706-checker-scheduler/src/Main.java
@@ -322,7 +322,7 @@
   // but has more complex chains of transforming the original references:
   // ParameterValue --> BoundType --> NullCheck --> ArrayGet.
   // ParameterValue --> BoundType --> NullCheck --> IntermediateAddress --> ArraySet.
-  // After using LSA to analyze the orginal references, the scheduler should be able
+  // After using LSA to analyze the original references, the scheduler should be able
   // to find out that 'a' and 'b' may alias, hence unable to schedule these ArraGet/Set.
 
   /// CHECK-START-ARM64: void Main.CrossOverLoop2(java.lang.Object, java.lang.Object) scheduler (before)
@@ -584,9 +584,126 @@
     }
   }
 
+  // Check that instructions having cross iteration dependencies are not
+  // reordered.
+  //
+  /// CHECK-START-{ARM,ARM64}: void Main.testCrossItersDependencies() scheduler (before)
+  /// CHECK:     <<ID1:i\d+>>  Phi [{{i\d+}},<<ID3:i\d+>>]
+  /// CHECK:     <<ID2:i\d+>>  Phi [{{i\d+}},<<ID4:i\d+>>]
+  //
+  /// CHECK:     <<ID3>>  Sub [<<ID1>>,<<ID2>>]
+  /// CHECK:     <<ID4>>  Add [<<ID2>>,{{i\d+}}]
+
+  /// CHECK-START-{ARM,ARM64}: void Main.testCrossItersDependencies() scheduler (after)
+  /// CHECK:     <<ID1:i\d+>>  Phi [{{i\d+}},<<ID3:i\d+>>]
+  /// CHECK:     <<ID2:i\d+>>  Phi [{{i\d+}},<<ID4:i\d+>>]
+  //
+  /// CHECK:     <<ID3>>  Sub [<<ID1>>,<<ID2>>]
+  /// CHECK:     <<ID4>>  Add [<<ID2>>,{{i\d+}}]
+
+  /// CHECK-START-ARM: void Main.testCrossItersDependencies() disassembly (after)
+  /// CHECK:     subs
+  /// CHECK:     add
+  /// CHECK:     adds
+  /// CHECK:     ldrh
+  /// CHECK:     cmp
+  /// CHECK:     beq
+
+  /// CHECK-START-ARM64: void Main.testCrossItersDependencies() disassembly (after)
+  /// CHECK:     sub
+  /// CHECK:     add
+  /// CHECK:     add
+  /// CHECK:     ldrh
+  /// CHECK:     cbz
+  private static void testCrossItersDependencies() {
+    int[] data = {1, 2, 3, 0};
+    int sub = 0;
+    int sum = data[0];
+    for (int i = 1; data[i] != 0; ++i) {
+      sub -= sum;
+      sum += data[i];
+    }
+    expectEquals(sub, -4);
+    expectEquals(sum, 6);
+  }
+
+  // Check instructions defining values for the next iteration don't become
+  // self-dependent in a scheduling graph which prevents valid reordering.
+  //
+  /// CHECK-START-{ARM,ARM64}: void Main.testNoSelfDependantSchedNode(int) scheduler (before)
+  /// CHECK:     IntermediateAddress
+  /// CHECK:     ArrayGet
+  /// CHECK:     LessThanOrEqual
+  /// CHECK:     Select
+  /// CHECK:     IntermediateAddress
+  /// CHECK:     ArraySet
+  /// CHECK:     Add
+
+  /// CHECK-START-{ARM,ARM64}: void Main.testNoSelfDependantSchedNode(int) scheduler (after)
+  /// CHECK:     IntermediateAddress
+  /// CHECK:     ArrayGet
+  /// CHECK:     IntermediateAddress
+  /// CHECK:     LessThanOrEqual
+  /// CHECK:     Select
+  /// CHECK:     ArraySet
+  /// CHECK:     Add
+  //
+  // Parameter n is to prevent unrolling of the main loop.
+  private static void testNoSelfDependantSchedNode(int n) {
+    final int MAX = 2;
+    int[] a = {1, 2, 3};
+    int[] b = new int[a.length];
+    n = Math.min(n, a.length);
+    for (int i = 0; i < n; ++i) {
+      int j = a[i];
+      b[i] = (j > MAX ? MAX : 0);
+    }
+    expectEquals(b[0], 0);
+    expectEquals(b[1], 0);
+    expectEquals(b[2], 2);
+  }
+
+  // In case of cross iteration dependencies when a value for the next iteration is also used on
+  // the current iteration a MOV instruction is generated anyway. In such cases setting dependency
+  // between scheduling nodes will not eliminate MOV.
+  // In the test 'i+1' is such an example.
+  // The test checks that a dependency between scheduling nodes (first ArrayGet and Add) is not
+  // setup and Add is scheduled before ArrayGet.
+  //
+  /// CHECK-START-{ARM,ARM64}: void Main.testNonPreventingSchedulingCrossItersDeps(int) scheduler (before)
+  /// CHECK:          IntermediateAddress
+  /// CHECK-NEXT:     ArrayGet
+  /// CHECK-NEXT:     Add
+  /// CHECK-NEXT:     ArrayGet
+
+  /// CHECK-START-{ARM,ARM64}: void Main.testNonPreventingSchedulingCrossItersDeps(int) scheduler (after)
+  /// CHECK:          IntermediateAddress
+  /// CHECK-NEXT:     Add
+  /// CHECK-NEXT:     ArrayGet
+  /// CHECK-NEXT:     ArrayGet
+  //
+  // Parameter n is to prevent unrolling of the main loop.
+  private static void testNonPreventingSchedulingCrossItersDeps(int n) {
+    int[] a = {1, 2, 3};
+    n = Math.min(n, a.length);
+    for (int i = 0; i < n - 1; ++i) {
+      if (a[i] < a[i + 1]) {
+        int tmp = a[i];
+        a[i] = a[i + 1];
+        a[i + 1] = tmp;
+      }
+    }
+    expectEquals(a[0], 2);
+    expectEquals(a[1], 3);
+    expectEquals(a[2], 1);
+  }
+
   public static void main(String[] args) {
     testVecSetScalars();
     testVecReplicateScalar();
+    testCrossItersDependencies();
+    testNoSelfDependantSchedNode(3);
+    testNonPreventingSchedulingCrossItersDeps(3);
     if ((arrayAccess() + intDiv(10)) != -35) {
       System.out.println("FAIL");
     }
diff --git a/test/721-osr/expected.txt b/test/721-osr/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/721-osr/expected.txt
diff --git a/test/721-osr/info.txt b/test/721-osr/info.txt
new file mode 100644
index 0000000..d43357e
--- /dev/null
+++ b/test/721-osr/info.txt
@@ -0,0 +1,3 @@
+Regression test for OSR compilation, which used to not fill the
+right dex register value in the presence of equivalent phis.
+(see b/136698025)
diff --git a/test/721-osr/src/Main.java b/test/721-osr/src/Main.java
new file mode 100644
index 0000000..2f0892c
--- /dev/null
+++ b/test/721-osr/src/Main.java
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class Main {
+    private enum TestType {
+        ONE,
+        TWO
+    }
+
+    private static TestType type = TestType.ONE;
+
+    public static void main(String[] args) {
+        float testFloat;
+        switch (type) {
+            case ONE: testFloat = 1000.0f; break;
+            default: testFloat = 5f; break;
+        }
+
+        // Loop enough to potentially trigger OSR.
+        List<Integer> dummyObjects = new ArrayList<Integer>(200_000);
+        for (int i = 0; i < 200_000; i++) {
+            dummyObjects.add(1024);
+        }
+
+        if (testFloat != 1000.0f) {
+          throw new Error("Expected 1000.0f, got " + testFloat);
+        }
+    }
+}
diff --git a/test/723-string-init-range/expected.txt b/test/723-string-init-range/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/723-string-init-range/expected.txt
diff --git a/test/723-string-init-range/info.txt b/test/723-string-init-range/info.txt
new file mode 100644
index 0000000..20c3704
--- /dev/null
+++ b/test/723-string-init-range/info.txt
@@ -0,0 +1 @@
+Test for calling String.<init> with invoke-range.
diff --git a/test/723-string-init-range/smali/new-instance.smali b/test/723-string-init-range/smali/new-instance.smali
new file mode 100644
index 0000000..45d6329
--- /dev/null
+++ b/test/723-string-init-range/smali/new-instance.smali
@@ -0,0 +1,25 @@
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LNewInstance;
+.super Ljava/lang/Object;
+
+.method public static initRange(Ljava/lang/String;)Ljava/lang/String;
+    .registers 2
+    new-instance v0, Ljava/lang/String;
+    move-object v1, p0
+    invoke-direct/range {v0 .. v1}, Ljava/lang/String;-><init>(Ljava/lang/String;)V
+    return-object v0
+.end method
diff --git a/test/723-string-init-range/src/Main.java b/test/723-string-init-range/src/Main.java
new file mode 100644
index 0000000..97ba76d
--- /dev/null
+++ b/test/723-string-init-range/src/Main.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+
+  static String expected = "Hello";
+  public static void main(String args[]) throws Throwable {
+    Class<?> c = Class.forName("NewInstance");
+    Method m = c.getMethod("initRange", String.class);
+    String result = (String)m.invoke(null, expected);
+    if (!expected.equals(result)) {
+      throw new Error("Expected '" + expected + "', got " + result);
+    }
+  }
+}
diff --git a/test/724-invoke-super-npe/expected.txt b/test/724-invoke-super-npe/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/724-invoke-super-npe/expected.txt
diff --git a/test/724-invoke-super-npe/info.txt b/test/724-invoke-super-npe/info.txt
new file mode 100644
index 0000000..b85a214
--- /dev/null
+++ b/test/724-invoke-super-npe/info.txt
@@ -0,0 +1 @@
+Regression test for implict null checks on invoke-super and invoke-super-range.
diff --git a/test/724-invoke-super-npe/smali/TestCase.smali b/test/724-invoke-super-npe/smali/TestCase.smali
new file mode 100644
index 0000000..9b79a92
--- /dev/null
+++ b/test/724-invoke-super-npe/smali/TestCase.smali
@@ -0,0 +1,42 @@
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class  LTestCase;
+.super  LMain;
+
+.method public constructor <init>()V
+.registers 2
+       invoke-direct {v1}, LMain;-><init>()V
+       return-void
+.end method
+
+.method public testSuperRange(LTestCase;)I
+.registers 8
+       invoke-super/range {p1}, LMain;->toInt()I
+       move-result v0
+       return v0
+.end method
+
+.method public testSuper(LTestCase;)I
+.registers 8
+       invoke-super {p1}, LMain;->toInt()I
+       move-result v0
+       return v0
+.end method
+
+.method public toInt()I
+.registers 3
+    const v0, 777
+    return v0
+.end method
diff --git a/test/724-invoke-super-npe/src/Main.java b/test/724-invoke-super-npe/src/Main.java
new file mode 100644
index 0000000..5d5567e
--- /dev/null
+++ b/test/724-invoke-super-npe/src/Main.java
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+
+class Main {
+  public static void main(String[] args) throws Exception {
+    Class<?> cls = Class.forName("TestCase");
+    test("testSuper", cls);
+    test("testSuperRange", cls);
+  }
+
+  public static void test(String methodName, Class<?> cls) throws Exception {
+    Method m = cls.getDeclaredMethod(methodName, cls);
+    try {
+      m.invoke(cls.newInstance(), (Object)null);
+      throw new Error("Expected NullPointerException");
+    } catch (InvocationTargetException e) {
+      if (e.getCause().getClass() != NullPointerException.class) {
+        throw new Error("Expected NullPointerException, got " + e.getCause().getClass());
+      }
+    }
+  }
+
+  public int toInt() {
+    return 42;
+  }
+}
diff --git a/test/725-imt-conflict-object/expected.txt b/test/725-imt-conflict-object/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/725-imt-conflict-object/expected.txt
diff --git a/test/725-imt-conflict-object/info.txt b/test/725-imt-conflict-object/info.txt
new file mode 100644
index 0000000..db6345c
--- /dev/null
+++ b/test/725-imt-conflict-object/info.txt
@@ -0,0 +1,2 @@
+Test that invokeinterface through a j.l.Object method doesn't go
+through the IMT conflict trampoline.
diff --git a/test/725-imt-conflict-object/smali/TestCase.smali b/test/725-imt-conflict-object/smali/TestCase.smali
new file mode 100644
index 0000000..77665aa
--- /dev/null
+++ b/test/725-imt-conflict-object/smali/TestCase.smali
@@ -0,0 +1,25 @@
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTestCase;
+
+.super Ljava/lang/Object;
+
+.method public static test()Ljava/lang/String;
+   .registers 2
+   sget-object v0, LMain;->main:LItf;
+   invoke-interface {v0}, LItf;->toString()Ljava/lang/String;
+   move-result-object v0
+   return-object v0
+.end method
diff --git a/test/725-imt-conflict-object/src/Main.java b/test/725-imt-conflict-object/src/Main.java
new file mode 100644
index 0000000..58320b2
--- /dev/null
+++ b/test/725-imt-conflict-object/src/Main.java
@@ -0,0 +1,246 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+interface Itf {
+  public void method0a();
+  public void method0b();
+  public void method0c();
+  public void method0d();
+  public void method0e();
+  public void method0f();
+  public void method0g();
+  public void method0h();
+  public void method0i();
+  public void method0j();
+  public void method0k();
+  public void method0l();
+  public void method0m();
+  public void method0n();
+  public void method0o();
+  public void method0p();
+  public void method0q();
+  public void method0r();
+  public void method0s();
+  public void method0t();
+  public void method0u();
+  public void method0v();
+  public void method0w();
+  public void method0x();
+  public void method0y();
+  public void method0z();
+  public void method1a();
+  public void method1b();
+  public void method1c();
+  public void method1d();
+  public void method1e();
+  public void method1f();
+  public void method1g();
+  public void method1h();
+  public void method1i();
+  public void method1j();
+  public void method1k();
+  public void method1l();
+  public void method1m();
+  public void method1n();
+  public void method1o();
+  public void method1p();
+  public void method1q();
+  public void method1r();
+  public void method1s();
+  public void method1t();
+  public void method1u();
+  public void method1v();
+  public void method1w();
+  public void method1x();
+  public void method1y();
+  public void method1z();
+  public void method2a();
+  public void method2b();
+  public void method2c();
+  public void method2d();
+  public void method2e();
+  public void method2f();
+  public void method2g();
+  public void method2h();
+  public void method2i();
+  public void method2j();
+  public void method2k();
+  public void method2l();
+  public void method2m();
+  public void method2n();
+  public void method2o();
+  public void method2p();
+  public void method2q();
+  public void method2r();
+  public void method2s();
+  public void method2t();
+  public void method2u();
+  public void method2v();
+  public void method2w();
+  public void method2x();
+  public void method2y();
+  public void method2z();
+  public void method3a();
+  public void method3b();
+  public void method3c();
+  public void method3d();
+  public void method3e();
+  public void method3f();
+  public void method3g();
+  public void method3h();
+  public void method3i();
+  public void method3j();
+  public void method3k();
+  public void method3l();
+  public void method3m();
+  public void method3n();
+  public void method3o();
+  public void method3p();
+  public void method3q();
+  public void method3r();
+  public void method3s();
+  public void method3t();
+  public void method3u();
+  public void method3v();
+  public void method3w();
+  public void method3x();
+  public void method3y();
+  public void method3z();
+}
+
+public class Main implements Itf {
+  public static Itf main;
+  public static void main(String[] args) throws Exception {
+    main = new Main();
+    Class<?> c = Class.forName("TestCase");
+    Method m = c.getMethod("test");
+    String result = (String)m.invoke(null);
+    if (!"MainInstance".equals(result)) {
+      throw new Error("Expected 'MainInstance', got '" + result + "'");
+    }
+  }
+
+  public String toString() {
+    return "MainInstance";
+  }
+
+  public void method0a() {}
+  public void method0b() {}
+  public void method0c() {}
+  public void method0d() {}
+  public void method0e() {}
+  public void method0f() {}
+  public void method0g() {}
+  public void method0h() {}
+  public void method0i() {}
+  public void method0j() {}
+  public void method0k() {}
+  public void method0l() {}
+  public void method0m() {}
+  public void method0n() {}
+  public void method0o() {}
+  public void method0p() {}
+  public void method0q() {}
+  public void method0r() {}
+  public void method0s() {}
+  public void method0t() {}
+  public void method0u() {}
+  public void method0v() {}
+  public void method0w() {}
+  public void method0x() {}
+  public void method0y() {}
+  public void method0z() {}
+  public void method1a() {}
+  public void method1b() {}
+  public void method1c() {}
+  public void method1d() {}
+  public void method1e() {}
+  public void method1f() {}
+  public void method1g() {}
+  public void method1h() {}
+  public void method1i() {}
+  public void method1j() {}
+  public void method1k() {}
+  public void method1l() {}
+  public void method1m() {}
+  public void method1n() {}
+  public void method1o() {}
+  public void method1p() {}
+  public void method1q() {}
+  public void method1r() {}
+  public void method1s() {}
+  public void method1t() {}
+  public void method1u() {}
+  public void method1v() {}
+  public void method1w() {}
+  public void method1x() {}
+  public void method1y() {}
+  public void method1z() {}
+  public void method2a() {}
+  public void method2b() {}
+  public void method2c() {}
+  public void method2d() {}
+  public void method2e() {}
+  public void method2f() {}
+  public void method2g() {}
+  public void method2h() {}
+  public void method2i() {}
+  public void method2j() {}
+  public void method2k() {}
+  public void method2l() {}
+  public void method2m() {}
+  public void method2n() {}
+  public void method2o() {}
+  public void method2p() {}
+  public void method2q() {}
+  public void method2r() {}
+  public void method2s() {}
+  public void method2t() {}
+  public void method2u() {}
+  public void method2v() {}
+  public void method2w() {}
+  public void method2x() {}
+  public void method2y() {}
+  public void method2z() {}
+  public void method3a() {}
+  public void method3b() {}
+  public void method3c() {}
+  public void method3d() {}
+  public void method3e() {}
+  public void method3f() {}
+  public void method3g() {}
+  public void method3h() {}
+  public void method3i() {}
+  public void method3j() {}
+  public void method3k() {}
+  public void method3l() {}
+  public void method3m() {}
+  public void method3n() {}
+  public void method3o() {}
+  public void method3p() {}
+  public void method3q() {}
+  public void method3r() {}
+  public void method3s() {}
+  public void method3t() {}
+  public void method3u() {}
+  public void method3v() {}
+  public void method3w() {}
+  public void method3x() {}
+  public void method3y() {}
+  public void method3z() {}
+}
diff --git a/test/800-smali/expected.txt b/test/800-smali/expected.txt
index 291de72..fbf10bb 100644
--- a/test/800-smali/expected.txt
+++ b/test/800-smali/expected.txt
@@ -74,4 +74,10 @@
 b/31313170
 ConstClassAliasing
 b/121191566
+b/122501785
+b/134061982
+b/134061982 (2)
+b/121245951
+b/121245951 (2)
+b/121245951 (3)
 Done!
diff --git a/test/800-smali/smali/b_121245951.smali b/test/800-smali/smali/b_121245951.smali
new file mode 100644
index 0000000..4faaf67
--- /dev/null
+++ b/test/800-smali/smali/b_121245951.smali
@@ -0,0 +1,26 @@
+.class public LB121245951;
+
+.super Ljava/lang/Object;
+
+.method public static run(ZLjava/lang/Object;)V
+  .registers 3
+
+  # Create an unequal lock stack.
+
+  if-eqz v1, :LfalseBranch
+
+:LtrueBranch
+  monitor-enter v2
+  monitor-enter v2
+  goto :Ljoin
+
+:LfalseBranch
+  monitor-enter v2
+  goto :Ljoin
+
+:Ljoin
+  monitor-exit v2
+
+  # Should throw here.
+  return-void
+.end method
diff --git a/test/800-smali/smali/b_121245951_2.smali b/test/800-smali/smali/b_121245951_2.smali
new file mode 100644
index 0000000..0750bb0
--- /dev/null
+++ b/test/800-smali/smali/b_121245951_2.smali
@@ -0,0 +1,30 @@
+.class public LB121245951_2;
+
+.super Ljava/lang/Object;
+
+.method public static run(ZLjava/lang/Object;)V
+  .registers 3
+
+  # Create an unequal lock stack.
+
+  if-eqz v1, :LfalseBranch
+
+:LtrueBranch
+  monitor-enter v2
+  monitor-enter v2
+  const/4 v0, 0x0
+  goto :Ljoin
+
+:LfalseBranch
+  monitor-enter v2
+  move-object v0, v2
+  goto :Ljoin
+
+:Ljoin
+  monitor-exit v2
+
+  # This should fail the class
+  add-int/lit8 v0, v0, 0x1
+
+  return-void
+.end method
diff --git a/test/800-smali/smali/b_121245951_3.smali b/test/800-smali/smali/b_121245951_3.smali
new file mode 100644
index 0000000..b6e7b1c
--- /dev/null
+++ b/test/800-smali/smali/b_121245951_3.smali
@@ -0,0 +1,33 @@
+.class public LB121245951_3;
+
+.super Ljava/lang/Object;
+
+.method public static run(Ljava/lang/Object;)V
+  .registers 3
+
+  const/4 v1, 0x1
+
+:LcatchStart
+
+  monitor-enter v2
+
+  # Possibly throwing to merge v1 into catch handler as int.
+  sget-object v2, Ljava/lang/System;->out:Ljava/io/PrintStream;
+
+  move-object v1, v2
+
+  # This should cause a runtime failure, and not merge into the
+  # catch handler.
+  return-void
+
+:LcatchEnd
+:LcatchHandler
+  move-exception v0
+  # If the lock fail at the return-void above merged into the catch
+  # handler, this will fail the class.
+  add-int/lit8 v1, v1, 0x1
+  throw v0
+
+.catchall {:LcatchStart .. :LcatchEnd} :LcatchHandler
+
+.end method
\ No newline at end of file
diff --git a/test/800-smali/smali/b_122501785.smali b/test/800-smali/smali/b_122501785.smali
new file mode 100644
index 0000000..240aad9
--- /dev/null
+++ b/test/800-smali/smali/b_122501785.smali
@@ -0,0 +1,14 @@
+.class public LB122501785;
+
+# Test that a hard + soft verifier failure in instance field access
+# correctly triggers the hard fail to protect the compiler.
+
+.super Ljava/lang/Object;
+
+.method public static run(LB122501785;Ljava/lang/Object;)V
+    .registers 4
+    const/4 v0, 0
+    const/4 v1, 1
+    iput-boolean v0, v1, Ldoes/not/Exist;->field:Z
+    return-void
+.end method
diff --git a/test/800-smali/smali/b_134061982.smali b/test/800-smali/smali/b_134061982.smali
new file mode 100644
index 0000000..c62fa8b
--- /dev/null
+++ b/test/800-smali/smali/b_134061982.smali
@@ -0,0 +1,60 @@
+.class public LB134061982;
+.super Ljava/lang/Object;
+
+
+.method public constructor <init>()V
+.registers 1
+       invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+       return-void
+.end method
+
+.method public static run(I)V
+.registers 4
+
+# Registers:
+# * v0 = 0/null
+# * v1 = "outer" catch value to operate on
+# * v2 = exception value for inner catch
+# * v3 = p0 = input for two legs.
+
+        const v0, 0
+
+        # Start with r1 == null
+        const v1, 0
+
+        if-eqz p0, :direct_leg
+        goto :indirect_leg
+
+:direct_leg
+        throw v0
+
+:indirect_leg
+        # Make r1 not-reference.
+        const v1, 1
+        throw v0
+
+:end
+        return-void
+
+:catch_inner
+        move-exception v2
+        # r2 should not be primitive, so this should hard-fail if reached.
+        add-int/lit8 v2, v2, 0x1
+        goto :end
+
+:catch_outer
+        # Just some random call.
+        invoke-virtual {v1}, Ljava/io/PrintStream;->println()V
+        goto :end
+
+# Direct leg is directly covered by catch_outer.
+.catchall {:direct_leg .. :indirect_leg} :catch_outer
+
+# Indirect leg is directly covered by catch_inner.
+# * Covered by unresolved exception class -> unreachable.
+.catch Ldoes/not/ResolveException; {:indirect_leg .. :end} :catch_inner
+
+# catch_inner is covered by catch_outer.
+.catchall {:catch_inner .. :catch_outer} :catch_outer
+
+.end method
diff --git a/test/800-smali/smali/b_134061983_2.smali b/test/800-smali/smali/b_134061983_2.smali
new file mode 100644
index 0000000..a7ad684
--- /dev/null
+++ b/test/800-smali/smali/b_134061983_2.smali
@@ -0,0 +1,61 @@
+.class public LB134061982_2;
+.super Ljava/lang/Object;
+
+
+.method public constructor <init>()V
+.registers 1
+       invoke-direct {p0}, Ljava/lang/Object;-><init>()V
+       return-void
+.end method
+
+.method public static run(I)V
+.registers 4
+
+# Registers:
+# * v0 = 0/null
+# * v1 = "outer" catch value to operate on
+# * v2 = exception value for inner catch
+# * v3 = p0 = input for two legs.
+
+        const v0, 0
+
+        # Start with r1 == null
+        const v1, 0
+
+        if-eqz p0, :direct_leg
+        goto :indirect_leg
+
+:direct_leg
+        throw v0
+
+:indirect_leg
+        # Make r1 not-reference.
+        const v1, 1
+        throw v0
+
+:end
+        return-void
+
+:catch_inner
+        move-exception v2
+        # r2 should not be primitive, so this should hard-fail if reached.
+        add-int/lit8 v2, v2, 0x1
+        goto :end
+
+:catch_outer
+        # Just some random call.
+        invoke-virtual {v1}, Ljava/io/PrintStream;->println()V
+        goto :end
+
+# Direct leg is directly covered by catch_outer.
+.catchall {:direct_leg .. :indirect_leg} :catch_outer
+
+# Indirect leg is directly covered by catch_inner.
+# * Covered by unresolved and resolved exception classes -> live.
+.catch Ldoes/not/ResolveException; {:indirect_leg .. :end} :catch_inner
+.catch Ljava/lang/ArithmeticException; {:indirect_leg .. :end} :catch_inner
+
+# catch_inner is covered by catch_outer.
+.catchall {:catch_inner .. :catch_outer} :catch_outer
+
+.end method
diff --git a/test/800-smali/src/Main.java b/test/800-smali/src/Main.java
index d7979e1..90476b3 100644
--- a/test/800-smali/src/Main.java
+++ b/test/800-smali/src/Main.java
@@ -195,6 +195,18 @@
                 null, true));
         testCases.add(new TestCase("b/121191566", "B121191566", "run", new Object[] { "a" }, null,
                 true, false));
+        testCases.add(new TestCase("b/122501785", "B122501785", "run", null, new VerifyError(),
+                0));
+        testCases.add(new TestCase("b/134061982", "B134061982", "run", new Object[] { 0 },
+                new NullPointerException(), 0));
+        testCases.add(new TestCase("b/134061982 (2)", "B134061982_2", "run", new Object[] { 0 },
+                new VerifyError(), 0));
+        testCases.add(new TestCase("b/121245951", "B121245951", "run", new Object[] { true,
+                new Object() }, new IllegalMonitorStateException(), 0));
+        testCases.add(new TestCase("b/121245951 (2)", "B121245951_2", "run", new Object[] { true,
+                new Object() }, new VerifyError(), 0));
+        testCases.add(new TestCase("b/121245951 (3)", "B121245951_3", "run", new Object[] {
+                new Object() }, new IllegalMonitorStateException(), 0));
     }
 
     public void runTests() {
diff --git a/test/900-hello-plugin/run b/test/900-hello-plugin/run
index c633f6d..a19a38c 100755
--- a/test/900-hello-plugin/run
+++ b/test/900-hello-plugin/run
@@ -18,6 +18,28 @@
 if  [[ "$@" == *"-O"* ]]; then
   plugin=libartagent.so
 fi
+
+# Adjust the agent path when running on device.
+if  [[ "$@" != *"--host"* ]]; then
+  if [[ -z "$ANDROID_BUILD_TOP" ]]; then
+    echo 'ANDROID_BUILD_TOP environment variable is empty; did you forget to run `lunch`?'
+    exit 1
+  fi
+
+  bitness_flag=--32
+  if  [[ "$@" == *"--64"* ]]; then
+    bitness_flag=--64
+  fi
+
+  # Path to native libraries installed on the device for testing purposes.
+  test_native_lib_path=$("$ANDROID_BUILD_TOP/art/test/utils/get-device-test-native-lib-path" \
+    "$bitness_flag")
+
+  # The linker configuration used for dalvikvm(64) in the ART APEX requires us
+  # to pass the full path to the agent to the runtime when running on device.
+  plugin=${test_native_lib_path}/${plugin}
+fi
+
 ./default-run "$@" --runtime-option -agentpath:${plugin}=test_900 \
                    --runtime-option -agentpath:${plugin}=test_900_round_2 \
                    --android-runtime-option -Xplugin:${plugin}
diff --git a/test/901-hello-ti-agent/src/art/Main.java b/test/901-hello-ti-agent/src/art/Main.java
deleted file mode 100644
index 8b01920..0000000
--- a/test/901-hello-ti-agent/src/art/Main.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-// Binder class so the agent's C code has something that can be bound and exposed to tests.
-// In a package to separate cleanly and work around CTS reference issues (though this class
-// should be replaced in the CTS version).
-public class Main {
-  // Load the given class with the given classloader, and bind all native methods to corresponding
-  // C methods in the agent. Will abort if any of the steps fail.
-  public static native void bindAgentJNI(String className, ClassLoader classLoader);
-  // Same as above, giving the class directly.
-  public static native void bindAgentJNIForClass(Class<?> klass);
-}
diff --git a/test/901-hello-ti-agent/src/art/Main.java b/test/901-hello-ti-agent/src/art/Main.java
new file mode 120000
index 0000000..84ae4ac
--- /dev/null
+++ b/test/901-hello-ti-agent/src/art/Main.java
@@ -0,0 +1 @@
+../../../jvmti-common/Main.java
\ No newline at end of file
diff --git a/test/902-hello-transformation/src/art/Redefinition.java b/test/902-hello-transformation/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/902-hello-transformation/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/902-hello-transformation/src/art/Redefinition.java b/test/902-hello-transformation/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/902-hello-transformation/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/903-hello-tagging/src/art/Main.java b/test/903-hello-tagging/src/art/Main.java
deleted file mode 100644
index aa5498b..0000000
--- a/test/903-hello-tagging/src/art/Main.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-// Binder class so the agent's C code has something that can be bound and exposed to tests.
-// In a package to separate cleanly and work around CTS reference issues (though this class
-// should be replaced in the CTS version).
-public class Main {
-  // Load the given class with the given classloader, and bind all native methods to corresponding
-  // C methods in the agent. Will abort if any of the steps fail.
-  public static native void bindAgentJNI(String className, ClassLoader classLoader);
-  // Same as above, giving the class directly.
-  public static native void bindAgentJNIForClass(Class<?> klass);
-
-  // Common infrastructure.
-  public static native void setTag(Object o, long tag);
-  public static native long getTag(Object o);
-}
diff --git a/test/903-hello-tagging/src/art/Main.java b/test/903-hello-tagging/src/art/Main.java
new file mode 120000
index 0000000..84ae4ac
--- /dev/null
+++ b/test/903-hello-tagging/src/art/Main.java
@@ -0,0 +1 @@
+../../../jvmti-common/Main.java
\ No newline at end of file
diff --git a/test/904-object-allocation/tracking.cc b/test/904-object-allocation/tracking.cc
index f7296b1..abb6083 100644
--- a/test/904-object-allocation/tracking.cc
+++ b/test/904-object-allocation/tracking.cc
@@ -61,7 +61,7 @@
   }
 
   T Get(JNIEnv* env) const {
-    return env->NewLocalRef(obj_);
+    return reinterpret_cast<T>(env->NewLocalRef(obj_));
   }
 
  private:
@@ -75,7 +75,9 @@
 };
 
 struct EventLog {
-  std::string msg_;
+  ScopedGlobalRef<jclass> object_klass;
+  ScopedGlobalRef<jclass> object_klass2;
+  jlong size;
   ScopedGlobalRef<jthread> thr_;
 };
 
@@ -88,15 +90,11 @@
                                     jobject object,
                                     jclass object_klass,
                                     jlong size) {
-  std::string object_klass_descriptor = GetClassName(jni_env, object_klass);
   ScopedLocalRef<jclass> object_klass2(jni_env, jni_env->GetObjectClass(object));
-  std::string object_klass_descriptor2 = GetClassName(jni_env, object_klass2.get());
-
   std::lock_guard<std::mutex> guard(gEventsMutex);
-  gEvents.push_back({android::base::StringPrintf("ObjectAllocated type %s/%s size %zu",
-                                                 object_klass_descriptor.c_str(),
-                                                 object_klass_descriptor2.c_str(),
-                                                 static_cast<size_t>(size)),
+  gEvents.push_back({ScopedGlobalRef<jclass>(jni_env, object_klass),
+                     ScopedGlobalRef<jclass>(jni_env, object_klass2.get()),
+                     size,
                      ScopedGlobalRef<jthread>(jni_env, thread)});
 }
 
@@ -135,7 +133,15 @@
       ScopedLocalRef<jthread> thr(env, ev.thr_.Get(env));
       for (jthread req_thread : thread_lst) {
         if (env->IsSameObject(req_thread, thr.get())) {
-          real_events.push_back(ev.msg_);
+          ScopedLocalRef<jclass> klass(env, ev.object_klass.Get(env));
+          ScopedLocalRef<jclass> klass2(env, ev.object_klass2.Get(env));
+          std::string object_klass_descriptor = GetClassName(env, klass.get());
+          std::string object_klass_descriptor2 = GetClassName(env, klass2.get());
+          std::string res(android::base::StringPrintf("ObjectAllocated type %s/%s size %zu",
+                                                      object_klass_descriptor.c_str(),
+                                                      object_klass_descriptor2.c_str(),
+                                                      static_cast<size_t>(ev.size)));
+          real_events.push_back(res);
           break;
         }
       }
diff --git a/test/905-object-free/src/art/Main.java b/test/905-object-free/src/art/Main.java
deleted file mode 100644
index aa5498b..0000000
--- a/test/905-object-free/src/art/Main.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-// Binder class so the agent's C code has something that can be bound and exposed to tests.
-// In a package to separate cleanly and work around CTS reference issues (though this class
-// should be replaced in the CTS version).
-public class Main {
-  // Load the given class with the given classloader, and bind all native methods to corresponding
-  // C methods in the agent. Will abort if any of the steps fail.
-  public static native void bindAgentJNI(String className, ClassLoader classLoader);
-  // Same as above, giving the class directly.
-  public static native void bindAgentJNIForClass(Class<?> klass);
-
-  // Common infrastructure.
-  public static native void setTag(Object o, long tag);
-  public static native long getTag(Object o);
-}
diff --git a/test/905-object-free/src/art/Main.java b/test/905-object-free/src/art/Main.java
new file mode 120000
index 0000000..84ae4ac
--- /dev/null
+++ b/test/905-object-free/src/art/Main.java
@@ -0,0 +1 @@
+../../../jvmti-common/Main.java
\ No newline at end of file
diff --git a/test/906-iterate-heap/src/art/Main.java b/test/906-iterate-heap/src/art/Main.java
deleted file mode 100644
index aa5498b..0000000
--- a/test/906-iterate-heap/src/art/Main.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-// Binder class so the agent's C code has something that can be bound and exposed to tests.
-// In a package to separate cleanly and work around CTS reference issues (though this class
-// should be replaced in the CTS version).
-public class Main {
-  // Load the given class with the given classloader, and bind all native methods to corresponding
-  // C methods in the agent. Will abort if any of the steps fail.
-  public static native void bindAgentJNI(String className, ClassLoader classLoader);
-  // Same as above, giving the class directly.
-  public static native void bindAgentJNIForClass(Class<?> klass);
-
-  // Common infrastructure.
-  public static native void setTag(Object o, long tag);
-  public static native long getTag(Object o);
-}
diff --git a/test/906-iterate-heap/src/art/Main.java b/test/906-iterate-heap/src/art/Main.java
new file mode 120000
index 0000000..84ae4ac
--- /dev/null
+++ b/test/906-iterate-heap/src/art/Main.java
@@ -0,0 +1 @@
+../../../jvmti-common/Main.java
\ No newline at end of file
diff --git a/test/906-iterate-heap/src/art/Test906.java b/test/906-iterate-heap/src/art/Test906.java
index 190f36f..b782c9b 100644
--- a/test/906-iterate-heap/src/art/Test906.java
+++ b/test/906-iterate-heap/src/art/Test906.java
@@ -91,17 +91,26 @@
   public static class Baz extends Bar {}
   public static class Alpha extends Bar {}
   public static class MISSING extends Baz {}
+  public interface Iface {}
+  public static class Beta implements Iface {}
+  public static class Gamma implements Iface {}
+  public static class Delta extends Beta {}
   private static void testIterateOverInstances() throws Exception {
     Object[] foos = GenTs(Foo.class);
     Object[] bars = GenTs(Bar.class);
     Object[] bazs = GenTs(Baz.class);
     Object[] alphas = GenTs(Alpha.class);
+    Object[] betas = GenTs(Beta.class);
+    Object[] gammas = GenTs(Gamma.class);
+    Object[] deltas = GenTs(Delta.class);
     checkEq(0, iterateOverInstancesCount(MISSING.class));
     checkEq(alphas.length, iterateOverInstancesCount(Alpha.class));
     checkEq(bazs.length, iterateOverInstancesCount(Baz.class));
     checkEq(bazs.length + alphas.length + bars.length, iterateOverInstancesCount(Bar.class));
     checkEq(bazs.length + alphas.length + bars.length + foos.length,
         iterateOverInstancesCount(Foo.class));
+    checkEq(betas.length + gammas.length + deltas.length,
+        iterateOverInstancesCount(Iface.class));
   }
 
   public static void doTest() throws Exception {
diff --git a/test/909-attach-agent/run b/test/909-attach-agent/run
index fd45abd..add558e 100755
--- a/test/909-attach-agent/run
+++ b/test/909-attach-agent/run
@@ -28,22 +28,55 @@
   patch -s expected.txt <interpreter-expected.patch
 fi
 
+# Provide additional runtime options when running on device.
+extra_runtime_options=
+if  [[ "$@" != *"--host"* ]]; then
+  if [[ -z "$ANDROID_BUILD_TOP" ]]; then
+    echo 'ANDROID_BUILD_TOP environment variable is empty; did you forget to run `lunch`?'
+    exit 1
+  fi
+
+  bitness_flag=--32
+  if  [[ "$@" == *"--64"* ]]; then
+    bitness_flag=--64
+  fi
+
+  # Path to native libraries installed on the device for testing purposes.
+  test_native_lib_path=$("$ANDROID_BUILD_TOP/art/test/utils/get-device-test-native-lib-path" \
+    "$bitness_flag")
+
+  # The linker configuration used for dalvikvm(64) in the ART APEX requires us
+  # to pass the full path to the agent to the runtime when running on device.
+  agent=${test_native_lib_path}/${agent}
+
+  # The above agent path is an absolute one; append the root directory to the
+  # library path so that the agent can be found via the `java.library.path`
+  # system property (see method `Main.find` in
+  # test/909-attach-agent/src-art/Main.java).
+  extra_runtime_options="--runtime-option -Djava.library.path=${test_native_lib_path}:/"
+fi
+
 export ANDROID_LOG_TAGS='*:f'
 ./default-run "$@" --android-runtime-option -Xplugin:${plugin} \
                    --android-runtime-option -Xcompiler-option \
                    --android-runtime-option --debuggable \
+                   $extra_runtime_options \
                    --args agent:${agent}=909-attach-agent
 return_status1=$?
 
 ./default-run "$@" --android-runtime-option -Xcompiler-option \
                    --android-runtime-option --debuggable \
+                   $extra_runtime_options \
                    --args agent:${agent}=909-attach-agent
 return_status2=$?
 
-./default-run "$@" --args agent:${agent}=909-attach-agent --external-log-tags
+./default-run "$@" $extra_runtime_options \
+                   --args agent:${agent}=909-attach-agent \
+                   --external-log-tags
 return_status3=$?
 
-./default-run "$@" --args agent:${agent}=909-attach-agent \
+./default-run "$@" $extra_runtime_options \
+                   --args agent:${agent}=909-attach-agent \
                    --args disallow-debugging \
                    --external-log-tags
 return_status4=$?
diff --git a/test/911-get-stack-trace/expected-cts-version.txt b/test/911-get-stack-trace/expected-cts-version.txt
new file mode 100644
index 0000000..da5da9f
--- /dev/null
+++ b/test/911-get-stack-trace/expected-cts-version.txt
@@ -0,0 +1,485 @@
+###################
+### Same thread ###
+###################
+From top
+---------
+ getStackTrace (Ljava/lang/Thread;II)[[Ljava/lang/String; -1 -2
+ print (Ljava/lang/Thread;II)V 0 38
+ printOrWait (IILart/ControlData;)V 6 41
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ doTest ()V 31 25
+ run ()V 0 31
+---------
+ print (Ljava/lang/Thread;II)V 0 38
+ printOrWait (IILart/ControlData;)V 6 41
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ doTest ()V 35 26
+ run ()V 0 31
+---------
+ getStackTrace (Ljava/lang/Thread;II)[[Ljava/lang/String; -1 -2
+ print (Ljava/lang/Thread;II)V 0 38
+ printOrWait (IILart/ControlData;)V 6 41
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+---------
+ printOrWait (IILart/ControlData;)V 6 41
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+---------
+ printOrWait (IILart/ControlData;)V 6 41
+From bottom
+---------
+ run ()V 0 31
+---------
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ doTest ()V 61 33
+ run ()V 0 31
+---------
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+
+################################
+### Other thread (suspended) ###
+################################
+From top
+---------
+ wait (JI)V -1 -2
+ wait (J)V 1 442
+ wait ()V 2 568
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 28
+---------
+ wait (J)V 1 442
+ wait ()V 2 568
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 28
+---------
+ wait (JI)V -1 -2
+ wait (J)V 1 442
+ wait ()V 2 568
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+---------
+ wait ()V 2 568
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+---------
+ wait ()V 2 568
+From bottom
+---------
+ run ()V 4 28
+---------
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 28
+---------
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+
+###########################
+### Other thread (live) ###
+###########################
+From top
+---------
+ printOrWait (IILart/ControlData;)V 45 54
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 62
+---------
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 62
+---------
+ printOrWait (IILart/ControlData;)V 45 54
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+---------
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+From bottom
+---------
+ run ()V 4 62
+---------
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 62
+---------
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+
+
+########################################
+### Other select threads (suspended) ###
+########################################
+---------
+Test911
+
+---------
+ThreadListTraces Thread 0
+
+---------
+ThreadListTraces Thread 2
+
+---------
+ThreadListTraces Thread 4
+
+---------
+ThreadListTraces Thread 6
+
+---------
+ThreadListTraces Thread 8
+
+---------
+Test911
+ getThreadListStackTraces ([Ljava/lang/Thread;I)[[Ljava/lang/Object; -1 -2
+ printList ([Ljava/lang/Thread;I)V 0 68
+ doTest ()V 110 54
+ run ()V 36 49
+
+---------
+ThreadListTraces Thread 0
+ wait (JI)V -1 -2
+ wait (J)V 1 442
+ wait ()V 2 568
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+
+---------
+ThreadListTraces Thread 2
+ wait (JI)V -1 -2
+ wait (J)V 1 442
+ wait ()V 2 568
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+
+---------
+ThreadListTraces Thread 4
+ wait (JI)V -1 -2
+ wait (J)V 1 442
+ wait ()V 2 568
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+
+---------
+ThreadListTraces Thread 6
+ wait (JI)V -1 -2
+ wait (J)V 1 442
+ wait ()V 2 568
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+
+---------
+ThreadListTraces Thread 8
+ wait (JI)V -1 -2
+ wait (J)V 1 442
+ wait ()V 2 568
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+
+---------
+Test911
+ getThreadListStackTraces ([Ljava/lang/Thread;I)[[Ljava/lang/Object; -1 -2
+ printList ([Ljava/lang/Thread;I)V 0 68
+ doTest ()V 115 56
+ run ()V 36 49
+
+---------
+ThreadListTraces Thread 0
+ wait (JI)V -1 -2
+ wait (J)V 1 442
+ wait ()V 2 568
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 37
+
+---------
+ThreadListTraces Thread 2
+ wait (JI)V -1 -2
+ wait (J)V 1 442
+ wait ()V 2 568
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 37
+
+---------
+ThreadListTraces Thread 4
+ wait (JI)V -1 -2
+ wait (J)V 1 442
+ wait ()V 2 568
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 37
+
+---------
+ThreadListTraces Thread 6
+ wait (JI)V -1 -2
+ wait (J)V 1 442
+ wait ()V 2 568
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 37
+
+---------
+ThreadListTraces Thread 8
+ wait (JI)V -1 -2
+ wait (J)V 1 442
+ wait ()V 2 568
+ printOrWait (IILart/ControlData;)V 24 47
+ baz (IIILart/ControlData;)Ljava/lang/Object; 2 32
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
+ bar (IIILart/ControlData;)J 0 26
+ foo (IIILart/ControlData;)I 0 21
+ run ()V 4 37
+
+
+###################
+### Same thread ###
+###################
+4
+JVMTI_ERROR_ILLEGAL_ARGUMENT
+[public static native java.lang.Object[] art.Frames.getFrameLocation(java.lang.Thread,int), ffffffff]
+[public static void art.Frames.doTestSameThread(), 3e]
+[public static void art.Frames.doTest() throws java.lang.Exception, 0]
+[public void art.Test911$1.run(), 2c]
+JVMTI_ERROR_NO_MORE_FRAMES
+
+################################
+### Other thread (suspended) ###
+################################
+20
+JVMTI_ERROR_ILLEGAL_ARGUMENT
+[public final native void java.lang.Object.wait(long,int) throws java.lang.InterruptedException, ffffffff]
+[public final void java.lang.Object.wait(long) throws java.lang.InterruptedException, 1]
+[public final void java.lang.Object.wait() throws java.lang.InterruptedException, 2]
+[private static void art.Recurse.printOrWait(int,int,art.ControlData), 18]
+[private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 2]
+[private static long art.Recurse.bar(int,int,int,art.ControlData), 0]
+[public static int art.Recurse.foo(int,int,int,art.ControlData), 0]
+[private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 8]
+[private static long art.Recurse.bar(int,int,int,art.ControlData), 0]
+[public static int art.Recurse.foo(int,int,int,art.ControlData), 0]
+[private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 8]
+[private static long art.Recurse.bar(int,int,int,art.ControlData), 0]
+[public static int art.Recurse.foo(int,int,int,art.ControlData), 0]
+[private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 8]
+[private static long art.Recurse.bar(int,int,int,art.ControlData), 0]
+[public static int art.Recurse.foo(int,int,int,art.ControlData), 0]
+[private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 8]
+[private static long art.Recurse.bar(int,int,int,art.ControlData), 0]
+[public static int art.Recurse.foo(int,int,int,art.ControlData), 0]
+[public void art.Frames$1.run(), 4]
+JVMTI_ERROR_NO_MORE_FRAMES
+
+###########################
+### Other thread (live) ###
+###########################
+17
+JVMTI_ERROR_ILLEGAL_ARGUMENT
+[private static void art.Recurse.printOrWait(int,int,art.ControlData), 2d]
+[private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 2]
+[private static long art.Recurse.bar(int,int,int,art.ControlData), 0]
+[public static int art.Recurse.foo(int,int,int,art.ControlData), 0]
+[private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 8]
+[private static long art.Recurse.bar(int,int,int,art.ControlData), 0]
+[public static int art.Recurse.foo(int,int,int,art.ControlData), 0]
+[private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 8]
+[private static long art.Recurse.bar(int,int,int,art.ControlData), 0]
+[public static int art.Recurse.foo(int,int,int,art.ControlData), 0]
+[private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 8]
+[private static long art.Recurse.bar(int,int,int,art.ControlData), 0]
+[public static int art.Recurse.foo(int,int,int,art.ControlData), 0]
+[private static java.lang.Object art.Recurse.baz(int,int,int,art.ControlData), 8]
+[private static long art.Recurse.bar(int,int,int,art.ControlData), 0]
+[public static int art.Recurse.foo(int,int,int,art.ControlData), 0]
+[public void art.Frames$2.run(), 4]
+JVMTI_ERROR_NO_MORE_FRAMES
+Done
diff --git a/test/911-get-stack-trace/expected.txt b/test/911-get-stack-trace/expected.txt
index 42e8aa7..c1d0378 100644
--- a/test/911-get-stack-trace/expected.txt
+++ b/test/911-get-stack-trace/expected.txt
@@ -22,7 +22,7 @@
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
  doTest ()V 31 25
- run ()V 0 25
+ run ()V 0 31
 ---------
  print (Ljava/lang/Thread;II)V 0 38
  printOrWait (IILart/ControlData;)V 6 41
@@ -42,7 +42,7 @@
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
  doTest ()V 35 26
- run ()V 0 25
+ run ()V 0 31
 ---------
  getStackTrace (Ljava/lang/Thread;II)[[Ljava/lang/String; -1 -2
  print (Ljava/lang/Thread;II)V 0 38
@@ -59,13 +59,13 @@
  printOrWait (IILart/ControlData;)V 6 41
 From bottom
 ---------
- run ()V 0 25
+ run ()V 0 31
 ---------
  baz (IIILart/ControlData;)Ljava/lang/Object; 8 34
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
  doTest ()V 61 33
- run ()V 0 25
+ run ()V 0 31
 ---------
  bar (IIILart/ControlData;)J 0 26
  foo (IIILart/ControlData;)I 0 21
@@ -393,7 +393,7 @@
  getAllStackTraces (I)[[Ljava/lang/Object; -1 -2
  printAll (I)V 0 75
  doTest ()V 120 59
- run ()V 24 37
+ run ()V 28 44
 
 ---------
 main
@@ -648,7 +648,7 @@
  getAllStackTraces (I)[[Ljava/lang/Object; -1 -2
  printAll (I)V 0 75
  doTest ()V 125 61
- run ()V 24 37
+ run ()V 28 44
 
 ---------
 main
@@ -680,7 +680,7 @@
  getThreadListStackTraces ([Ljava/lang/Thread;I)[[Ljava/lang/Object; -1 -2
  printList ([Ljava/lang/Thread;I)V 0 68
  doTest ()V 110 54
- run ()V 32 41
+ run ()V 36 49
 
 ---------
 ThreadListTraces Thread 0
@@ -737,7 +737,7 @@
  getThreadListStackTraces ([Ljava/lang/Thread;I)[[Ljava/lang/Object; -1 -2
  printList ([Ljava/lang/Thread;I)V 0 68
  doTest ()V 115 56
- run ()V 32 41
+ run ()V 36 49
 
 ---------
 ThreadListTraces Thread 0
@@ -863,7 +863,7 @@
 [public static native java.lang.Object[] art.Frames.getFrameLocation(java.lang.Thread,int), ffffffff]
 [public static void art.Frames.doTestSameThread(), 3e]
 [public static void art.Frames.doTest() throws java.lang.Exception, 0]
-[public void art.Test911$1.run(), 28]
+[public void art.Test911$1.run(), 2c]
 JVMTI_ERROR_NO_MORE_FRAMES
 
 ################################
diff --git a/test/911-get-stack-trace/src/Main.java b/test/911-get-stack-trace/src/Main.java
index 96705bc..706dc3d 100644
--- a/test/911-get-stack-trace/src/Main.java
+++ b/test/911-get-stack-trace/src/Main.java
@@ -16,6 +16,6 @@
 
 public class Main {
   public static void main(String[] args) throws Exception {
-    art.Test911.run();
+    art.Test911.run(true);
   }
 }
diff --git a/test/911-get-stack-trace/src/art/Test911.java b/test/911-get-stack-trace/src/art/Test911.java
index 5774546..9c22339 100644
--- a/test/911-get-stack-trace/src/art/Test911.java
+++ b/test/911-get-stack-trace/src/art/Test911.java
@@ -17,7 +17,13 @@
 package art;
 
 public class Test911 {
+  // CTS Entrypoint. We don't want to run 'AllTraces' since it breaks everytime somebody adds a new
+  // thread, which happens somewhat regularly.
   public static void run() throws Exception {
+    run(false);
+  }
+
+  public static void run(boolean known_thread_env) throws Exception {
     Thread t = new Thread("Test911") {
       @Override
       public void run() {
@@ -34,7 +40,9 @@
 
           System.out.println();
 
-          AllTraces.doTest();
+          if (known_thread_env) {
+            AllTraces.doTest();
+          }
 
           System.out.println();
 
diff --git a/test/912-classes/classes.cc b/test/912-classes/classes.cc
index 1f6954e..ff50223 100644
--- a/test/912-classes/classes.cc
+++ b/test/912-classes/classes.cc
@@ -596,5 +596,31 @@
   }
 }
 
+// Global to pass information to the ClassPrepare event.
+static jobject gRunnableGlobal = nullptr;
+extern "C" JNIEXPORT void JNICALL Java_art_Test912_runRecursiveClassPrepareEvents(
+    JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject runnable) {
+  CHECK(gRunnableGlobal == nullptr);
+  gRunnableGlobal = env->NewGlobalRef(runnable);
+  EnableEvents(
+      env,
+      true,
+      nullptr,
+      [](jvmtiEnv* jenv ATTRIBUTE_UNUSED,
+         JNIEnv* jni_env,
+         jthread thread ATTRIBUTE_UNUSED,
+         jclass klass ATTRIBUTE_UNUSED) -> void {
+        jclass runnable_class = jni_env->FindClass("java/lang/Runnable");
+        jni_env->CallVoidMethod(
+            gRunnableGlobal, jni_env->GetMethodID(runnable_class, "run", "()V"));
+      });
+  jclass runnable_class = env->FindClass("java/lang/Runnable");
+  env->CallVoidMethod(
+      runnable, env->GetMethodID(runnable_class, "run", "()V"));
+  EnableEvents(env, false, nullptr, nullptr);
+  env->DeleteGlobalRef(gRunnableGlobal);
+  gRunnableGlobal = nullptr;
+}
+
 }  // namespace Test912Classes
 }  // namespace art
diff --git a/test/912-classes/expected.txt b/test/912-classes/expected.txt
index 7ad5d60..d7699b6 100644
--- a/test/912-classes/expected.txt
+++ b/test/912-classes/expected.txt
@@ -91,3 +91,7 @@
 Prepare: L$Proxy21; on ClassEvents (cur=ClassEvents)
 Load: [Lart/Test912; on ClassEvents
 Prepare: [Lart/Test912; on ClassEvents (cur=ClassEvents)
+class-prepare event START!
+class-prepare event START!
+class-prepare event END!
+class-prepare event END!
diff --git a/test/912-classes/src-art/art/Test912.java b/test/912-classes/src-art/art/Test912.java
index 1a60185..a2e8934 100644
--- a/test/912-classes/src-art/art/Test912.java
+++ b/test/912-classes/src-art/art/Test912.java
@@ -105,6 +105,9 @@
     };
     classEventsThread.start();
     classEventsThread.join();
+
+    // b/146170757
+    TestRecursiveClassPrepareEvents();
   }
 
   private static void testClass(String className) throws Exception {
@@ -394,6 +397,32 @@
   private static native void setEqualityEventStorageClass(Class<?> c);
   private static native void enableClassLoadPrepareEqualityEvents(boolean b);
 
+  private static native void runRecursiveClassPrepareEvents(Runnable forceLoad);
+
+  private static void TestRecursiveClassPrepareEvents() {
+    final int[] called = new int[] { 0 };
+    runRecursiveClassPrepareEvents(() -> {
+      if (called[0] == 2) {
+        return;
+      } else {
+        called[0]++;
+      }
+      try {
+        System.out.println("class-prepare event START!");
+        // Load a new class in a new class-loader.
+        Class<?> class_loader_class = Class.forName("dalvik.system.InMemoryDexClassLoader");
+        Constructor<?> ctor = class_loader_class.getConstructor(ByteBuffer.class, ClassLoader.class);
+        Class<?> target = ((ClassLoader)ctor.newInstance(
+            ByteBuffer.wrap(DEX_BYTES), Test912.class.getClassLoader())).loadClass("Transform");
+        target.newInstance();
+      } catch (Exception e) { }
+      System.out.println("class-prepare event END!");
+    });
+    if (called[0] != 2) {
+      System.out.println("Failed to cause recursive Class prepare.");
+    }
+  }
+
   private static class TestForNonInit {
     public static double dummy = Math.random();  // So it can't be compile-time initialized.
   }
diff --git a/test/913-heaps/expected.txt b/test/913-heaps/expected.txt
index 1bd56d1..8fe2ba5 100644
--- a/test/913-heaps/expected.txt
+++ b/test/913-heaps/expected.txt
@@ -200,6 +200,7 @@
 ---- untagged objects
 root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=124, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestNonRoot,vreg=8,location= 31])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=4,method=runFollowReferences,vreg=3,location= 164])--> 1000@0 [size=123456780050, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
 root@root --(thread)--> 3000@0 [size=124, length=-1]
 1001@0 --(superclass)--> 1000@0 [size=123456780050, length=-1]
@@ -247,6 +248,7 @@
 root@root --(stack-local[id=1,tag=3000,depth=1,method=doFollowReferencesTestImpl,vreg=5,location= 8])--> 1@1000 [size=16, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=13,location= 20])--> 1@1000 [size=16, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=2,method=doFollowReferencesTestRoot,vreg=4,location= 20])--> 1@1000 [size=16, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=4,method=runFollowReferences,vreg=3,location= 164])--> 1000@0 [size=123456780055, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
 root@root --(thread)--> 1@1000 [size=16, length=-1]
 root@root --(thread)--> 3000@0 [size=124, length=-1]
@@ -290,6 +292,7 @@
 ---
 ---- tagged classes
 root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=124, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=4,method=runFollowReferences,vreg=3,location= 181])--> 1000@0 [size=123456780060, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
 root@root --(thread)--> 3000@0 [size=124, length=-1]
 1001@0 --(superclass)--> 1000@0 [size=123456780060, length=-1]
@@ -317,6 +320,7 @@
 6@1000 --(class)--> 1000@0 [size=123456780060, length=-1]
 ---
 root@root --(jni-local[id=1,tag=3000,depth=0,method=followReferences])--> 3000@0 [size=124, length=-1]
+root@root --(stack-local[id=1,tag=3000,depth=4,method=runFollowReferences,vreg=3,location= 181])--> 1000@0 [size=123456780065, length=-1]
 root@root --(stack-local[id=1,tag=3000,depth=5,method=run,vreg=2,location= 0])--> 3000@0 [size=124, length=-1]
 root@root --(thread)--> 3000@0 [size=124, length=-1]
 1001@0 --(superclass)--> 1000@0 [size=123456780065, length=-1]
diff --git a/test/913-heaps/src/art/Main.java b/test/913-heaps/src/art/Main.java
new file mode 120000
index 0000000..84ae4ac
--- /dev/null
+++ b/test/913-heaps/src/art/Main.java
@@ -0,0 +1 @@
+../../../jvmti-common/Main.java
\ No newline at end of file
diff --git a/test/914-hello-obsolescence/src/art/Redefinition.java b/test/914-hello-obsolescence/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/914-hello-obsolescence/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/914-hello-obsolescence/src/art/Redefinition.java b/test/914-hello-obsolescence/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/914-hello-obsolescence/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/915-obsolete-2/src/art/Redefinition.java b/test/915-obsolete-2/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/915-obsolete-2/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/915-obsolete-2/src/art/Redefinition.java b/test/915-obsolete-2/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/915-obsolete-2/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/916-obsolete-jit/src/art/Redefinition.java b/test/916-obsolete-jit/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/916-obsolete-jit/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/916-obsolete-jit/src/art/Redefinition.java b/test/916-obsolete-jit/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/916-obsolete-jit/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/917-fields-transformation/src/art/Redefinition.java b/test/917-fields-transformation/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/917-fields-transformation/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/917-fields-transformation/src/art/Redefinition.java b/test/917-fields-transformation/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/917-fields-transformation/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/919-obsolete-fields/src/art/Redefinition.java b/test/919-obsolete-fields/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/919-obsolete-fields/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/919-obsolete-fields/src/art/Redefinition.java b/test/919-obsolete-fields/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/919-obsolete-fields/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/921-hello-failure/src/art/Redefinition.java b/test/921-hello-failure/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/921-hello-failure/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/921-hello-failure/src/art/Redefinition.java b/test/921-hello-failure/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/921-hello-failure/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/925-threadgroups/expected.txt b/test/925-threadgroups/expected.txt
index 7d1a259..9dfa37d 100644
--- a/test/925-threadgroups/expected.txt
+++ b/test/925-threadgroups/expected.txt
@@ -14,3 +14,8 @@
 system:
   [Thread[FinalizerDaemon,5,system], Thread[FinalizerWatchdogDaemon,5,system], Thread[HeapTaskDaemon,5,system], Thread[ReferenceQueueDaemon,5,system], Thread[Signal Catcher,5,system]]
   [java.lang.ThreadGroup[name=main,maxpri=10]]
+art.Test925$CustomThreadGroup[name=TEST GROUP,maxpri=10]
+  java.lang.ThreadGroup[name=main,maxpri=10]
+  TEST GROUP
+  10
+  false
diff --git a/test/925-threadgroups/src/art/Test925.java b/test/925-threadgroups/src/art/Test925.java
index 8d1e665..a63f4ce 100644
--- a/test/925-threadgroups/src/art/Test925.java
+++ b/test/925-threadgroups/src/art/Test925.java
@@ -22,6 +22,7 @@
 import java.util.Collections;
 import java.util.Iterator;
 import java.util.List;
+import java.util.concurrent.CountDownLatch;
 
 public class Test925 {
   public static void run() throws Exception {
@@ -49,6 +50,30 @@
     waitGroupChildren(rootGroup, 5 /* # daemons */, 30 /* timeout in seconds */);
 
     checkChildren(curGroup);
+
+    // Test custom groups
+    ThreadGroup testGroup = new CustomThreadGroup(curGroup, "TEST GROUP");
+    final CountDownLatch cdl = new CountDownLatch(1);
+    final CountDownLatch startup = new CountDownLatch(1);
+    Thread t2 = new Thread(testGroup, "Test Thread") {
+      public void run() {
+        startup.countDown();
+        try {
+          cdl.await();
+        } catch (Exception e) {}
+      }
+    };
+    t2.start();
+    startup.await();
+    printThreadGroupInfo(testGroup);
+    cdl.countDown();
+    t2.join();
+  }
+
+  private static final class CustomThreadGroup extends ThreadGroup {
+    public CustomThreadGroup(ThreadGroup parent, String name) {
+      super(parent, name);
+    }
   }
 
   private static void printThreadGroupInfo(ThreadGroup tg) {
@@ -99,7 +124,16 @@
     for (int i = 0; i <  timeoutS; i++) {
       Object[] data = getThreadGroupChildren(tg);
       Thread[] threads = (Thread[])data[0];
-      if (threads.length == expectedChildCount) {
+      List<Thread> lthreads = new ArrayList<>(Arrays.asList(threads));
+      Iterator<Thread> it = lthreads.iterator();
+      while (it.hasNext()) {
+        Thread t = it.next();
+        if (t.getName().startsWith("Jit thread pool worker")) {
+          it.remove();
+          break;
+        }
+      }
+      if (lthreads.size() == expectedChildCount) {
         return;
       }
       Thread.sleep(1000);
diff --git a/test/926-multi-obsolescence/src/art/Redefinition.java b/test/926-multi-obsolescence/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/926-multi-obsolescence/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/926-multi-obsolescence/src/art/Redefinition.java b/test/926-multi-obsolescence/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/926-multi-obsolescence/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/928-jni-table/info.txt b/test/928-jni-table/info.txt
index 875a5f6..a5da6eb 100644
--- a/test/928-jni-table/info.txt
+++ b/test/928-jni-table/info.txt
@@ -1 +1,5 @@
 Tests basic functions in the jvmti plugin.
+
+Tests that we can do basic things by replacing the JNIEnv vtable.
+
+TODO: We should really do more with this test.
diff --git a/test/928-jni-table/jni_table.cc b/test/928-jni-table/jni_table.cc
index 9a8b7fe..1dfe34b 100644
--- a/test/928-jni-table/jni_table.cc
+++ b/test/928-jni-table/jni_table.cc
@@ -39,6 +39,25 @@
   return gOriginalEnv->NewGlobalRef(env, o);
 }
 
+static void DoDeleteGlobalRef(JNIEnv* env, jobject o) {
+  jclass thr = env->FindClass("java/lang/Thread");
+  CHECK(thr != nullptr);
+  if (env->IsInstanceOf(o, thr)) {
+    jvmtiThreadInfo jti;
+    // b/146170834: This could cause DCHECK failures.
+    CHECK_EQ(jvmti_env->GetThreadInfo(reinterpret_cast<jthread>(o), &jti), JVMTI_ERROR_NONE);
+  }
+  gOriginalEnv->DeleteGlobalRef(env, o);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Test928_doOtherThreadTest(JNIEnv* env, jclass klass) {
+  size_t start_other_thread_count = gGlobalRefCount;
+  // Make sure it still works even on another thread.
+  jobject global = env->NewGlobalRef(klass);
+  CHECK_EQ(start_other_thread_count + 1, gGlobalRefCount);
+  env->DeleteGlobalRef(global);
+}
+
 extern "C" JNIEXPORT void JNICALL Java_art_Test928_doJNITableTest(
     JNIEnv* env, jclass klass) {
   // Get the current table, as the delegate.
@@ -55,6 +74,7 @@
   }
 
   env_override->NewGlobalRef = CountNewGlobalRef;
+  env_override->DeleteGlobalRef = DoDeleteGlobalRef;
   gGlobalRefCount = 0;
 
   // Install the override.
@@ -67,14 +87,21 @@
   CHECK_EQ(1u, gGlobalRefCount);
   env->DeleteGlobalRef(global);
 
+  // Try and create and destroy a thread.
+  env->CallStaticVoidMethod(klass, env->GetStaticMethodID(klass, "runThreadTest", "()V"));
+  // Make sure something got ref'd, in the other thread we make and then clear a global ref so that
+  // should at least be present.
+  CHECK_LT(1u, gGlobalRefCount);
+
   // Install the "original." There is no real reset.
+  size_t final_global_ref_count = gGlobalRefCount;
   jvmtiError setoverride2_result = jvmti_env->SetJNIFunctionTable(gOriginalEnv);
   if (JvmtiErrorToException(env, jvmti_env, setoverride2_result)) {
     return;
   }
 
   jobject global2 = env->NewGlobalRef(klass);
-  CHECK_EQ(1u, gGlobalRefCount);
+  CHECK_EQ(final_global_ref_count, gGlobalRefCount);
   env->DeleteGlobalRef(global2);
 
   // Try to install null. Should return NULL_POINTER error.
diff --git a/test/928-jni-table/src/art/Test928.java b/test/928-jni-table/src/art/Test928.java
index 0fbfb7e..1bea2a5 100644
--- a/test/928-jni-table/src/art/Test928.java
+++ b/test/928-jni-table/src/art/Test928.java
@@ -23,5 +23,15 @@
     System.out.println("Done");
   }
 
+  // Called by native code.
+  public static void runThreadTest() throws Exception {
+    Thread t = new Thread(() -> {
+      doOtherThreadTest();
+    });
+    t.start();
+    t.join();
+  }
+
   public static native void doJNITableTest();
+  public static native void doOtherThreadTest();
 }
diff --git a/test/930-hello-retransform/src/art/Redefinition.java b/test/930-hello-retransform/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/930-hello-retransform/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/930-hello-retransform/src/art/Redefinition.java b/test/930-hello-retransform/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/930-hello-retransform/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/932-transform-saves/src/art/Redefinition.java b/test/932-transform-saves/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/932-transform-saves/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/932-transform-saves/src/art/Redefinition.java b/test/932-transform-saves/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/932-transform-saves/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/934-load-transform/src/art/Redefinition.java b/test/934-load-transform/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/934-load-transform/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/934-load-transform/src/art/Redefinition.java b/test/934-load-transform/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/934-load-transform/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/935-non-retransformable/src/art/Redefinition.java b/test/935-non-retransformable/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/935-non-retransformable/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/935-non-retransformable/src/art/Redefinition.java b/test/935-non-retransformable/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/935-non-retransformable/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/937-hello-retransform-package/src/art/Redefinition.java b/test/937-hello-retransform-package/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/937-hello-retransform-package/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/937-hello-retransform-package/src/art/Redefinition.java b/test/937-hello-retransform-package/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/937-hello-retransform-package/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/938-load-transform-bcp/src/art/Redefinition.java b/test/938-load-transform-bcp/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/938-load-transform-bcp/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/938-load-transform-bcp/src/art/Redefinition.java b/test/938-load-transform-bcp/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/938-load-transform-bcp/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/939-hello-transformation-bcp/src/art/Redefinition.java b/test/939-hello-transformation-bcp/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/939-hello-transformation-bcp/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/939-hello-transformation-bcp/src/art/Redefinition.java b/test/939-hello-transformation-bcp/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/939-hello-transformation-bcp/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/940-recursive-obsolete/src/art/Redefinition.java b/test/940-recursive-obsolete/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/940-recursive-obsolete/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/940-recursive-obsolete/src/art/Redefinition.java b/test/940-recursive-obsolete/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/940-recursive-obsolete/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/941-recurive-obsolete-jit/src/art/Redefinition.java b/test/941-recurive-obsolete-jit/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/941-recurive-obsolete-jit/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/941-recurive-obsolete-jit/src/art/Redefinition.java b/test/941-recurive-obsolete-jit/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/941-recurive-obsolete-jit/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/942-private-recursive/src/art/Redefinition.java b/test/942-private-recursive/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/942-private-recursive/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/942-private-recursive/src/art/Redefinition.java b/test/942-private-recursive/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/942-private-recursive/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/943-private-recursive-jit/src/art/Redefinition.java b/test/943-private-recursive-jit/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/943-private-recursive-jit/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/943-private-recursive-jit/src/art/Redefinition.java b/test/943-private-recursive-jit/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/943-private-recursive-jit/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/944-transform-classloaders/src/art/Redefinition.java b/test/944-transform-classloaders/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/944-transform-classloaders/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/944-transform-classloaders/src/art/Redefinition.java b/test/944-transform-classloaders/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/944-transform-classloaders/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/945-obsolete-native/src/art/Redefinition.java b/test/945-obsolete-native/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/945-obsolete-native/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/945-obsolete-native/src/art/Redefinition.java b/test/945-obsolete-native/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/945-obsolete-native/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/946-obsolete-throw/src/art/Redefinition.java b/test/946-obsolete-throw/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/946-obsolete-throw/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/946-obsolete-throw/src/art/Redefinition.java b/test/946-obsolete-throw/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/946-obsolete-throw/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/947-reflect-method/src/art/Redefinition.java b/test/947-reflect-method/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/947-reflect-method/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/947-reflect-method/src/art/Redefinition.java b/test/947-reflect-method/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/947-reflect-method/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/948-change-annotations/src/art/Redefinition.java b/test/948-change-annotations/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/948-change-annotations/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/948-change-annotations/src/art/Redefinition.java b/test/948-change-annotations/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/948-change-annotations/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/949-in-memory-transform/src/art/Redefinition.java b/test/949-in-memory-transform/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/949-in-memory-transform/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/949-in-memory-transform/src/art/Redefinition.java b/test/949-in-memory-transform/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/949-in-memory-transform/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/950-redefine-intrinsic/src/art/Redefinition.java b/test/950-redefine-intrinsic/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/950-redefine-intrinsic/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/950-redefine-intrinsic/src/art/Redefinition.java b/test/950-redefine-intrinsic/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/950-redefine-intrinsic/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/951-threaded-obsolete/src/art/Redefinition.java b/test/951-threaded-obsolete/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/951-threaded-obsolete/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/951-threaded-obsolete/src/art/Redefinition.java b/test/951-threaded-obsolete/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/951-threaded-obsolete/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/952-invoke-custom/expected.txt b/test/952-invoke-custom/expected.txt
index 7e8ffa6..ba41787 100644
--- a/test/952-invoke-custom/expected.txt
+++ b/test/952-invoke-custom/expected.txt
@@ -11,6 +11,15 @@
 100
 -9000
 9000
+Linking _add (int,int,int,int,int,int)int
+15
+188
+5728
+Linking _multiply (Double,Double,Double,Double,Double,Double)Double
+1.0
+111.0
+12543.0
+2310.0
 TestLinkerUnrelatedBSM
 Winners 1 Votes 16
 TestInvocationKinds
diff --git a/test/952-invoke-custom/src/Main.java b/test/952-invoke-custom/src/Main.java
index d2250a9..4745a06 100644
--- a/test/952-invoke-custom/src/Main.java
+++ b/test/952-invoke-custom/src/Main.java
@@ -44,6 +44,21 @@
         TestLinkerMethodMultipleArgumentTypes.test(-1000, 10000);
     }
 
+    private static void TestLinkerMethodWithRange() throws Throwable {
+        TestLinkerMethodWithRange.test(0, 1, 2, 3, 4, 5);
+        TestLinkerMethodWithRange.test(-101, -79, 113, 9, 17, 229);
+        TestLinkerMethodWithRange.test(811, 823, 947, 967, 1087, 1093);
+
+        TestLinkerMethodWithRange.test(null, null, null, null, null, null);
+        TestLinkerMethodWithRange.test(Double.valueOf(1.0), null, Double.valueOf(3.0), null,
+                                       Double.valueOf(37.0), null);
+        TestLinkerMethodWithRange.test(null, Double.valueOf(3.0), null,
+                                       Double.valueOf(37.0), null, Double.valueOf(113.0));
+        TestLinkerMethodWithRange.test(Double.valueOf(1.0), Double.valueOf(2.0),
+                                       Double.valueOf(3.0), Double.valueOf(5.0),
+                                       Double.valueOf(7.0), Double.valueOf(11.0));
+    }
+
     private static void TestLinkerMethodMinimalArguments() throws Throwable {
         try {
             TestLinkerMethodMinimalArguments.test(
@@ -78,6 +93,7 @@
         TestUninitializedCallSite();
         TestLinkerMethodMinimalArguments();
         TestLinkerMethodMultipleArgumentTypes();
+        TestLinkerMethodWithRange();
         TestLinkerUnrelatedBSM.test();
         TestInvokeCustomWithConcurrentThreads.test();
         TestInvocationKinds.test();
diff --git a/test/952-invoke-custom/src/TestInvokeCustomWithConcurrentThreads.java b/test/952-invoke-custom/src/TestInvokeCustomWithConcurrentThreads.java
index 2ef7ff7..2414046 100644
--- a/test/952-invoke-custom/src/TestInvokeCustomWithConcurrentThreads.java
+++ b/test/952-invoke-custom/src/TestInvokeCustomWithConcurrentThreads.java
@@ -43,7 +43,7 @@
     // Array of counters for how many times each instantiated call site is called
     private static final AtomicInteger[] called = new AtomicInteger[NUMBER_OF_THREADS];
 
-    // Array of call site indicies of which call site a thread invoked
+    // Array of call site indices of which call site a thread invoked
     private static final AtomicInteger[] targetted = new AtomicInteger[NUMBER_OF_THREADS];
 
     // Synchronization barrier all threads will wait on in the bootstrap method.
diff --git a/test/952-invoke-custom/src/TestLinkerMethodWithRange.java b/test/952-invoke-custom/src/TestLinkerMethodWithRange.java
new file mode 100644
index 0000000..8114303
--- /dev/null
+++ b/test/952-invoke-custom/src/TestLinkerMethodWithRange.java
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import annotations.BootstrapMethod;
+import annotations.CalledByIndy;
+import annotations.Constant;
+import java.lang.invoke.CallSite;
+import java.lang.invoke.ConstantCallSite;
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.MethodType;
+
+// Tests for methods generating invoke-custom/range.
+public class TestLinkerMethodWithRange extends TestBase {
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestLinkerMethodWithRange.class,
+                    name = "primLinkerMethod",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                        int.class,
+                        int.class,
+                        int.class,
+                        int.class,
+                        int.class,
+                        float.class,
+                        double.class,
+                        String.class,
+                        Class.class,
+                        long.class
+                    }
+                ),
+        fieldOrMethodName = "_add",
+        returnType = int.class,
+        parameterTypes = {int.class, int.class, int.class, int.class, int.class, int.class},
+        constantArgumentsForBootstrapMethod = {
+            @Constant(intValue = -1),
+            @Constant(intValue = 1),
+            @Constant(intValue = (int) 'a'),
+            @Constant(intValue = 1024),
+            @Constant(intValue = 1),
+            @Constant(floatValue = 11.1f),
+            @Constant(doubleValue = 2.2),
+            @Constant(stringValue = "Hello"),
+            @Constant(classValue = TestLinkerMethodWithRange.class),
+            @Constant(longValue = 123456789L)
+        }
+    )
+
+    private static int add(int a, int b, int c, int d, int e, int f) {
+        assertNotReached();
+        return -1;
+    }
+
+    @SuppressWarnings("unused")
+    private static int _add(int a, int b, int c, int d, int e, int f) {
+        return a + b + c + d + e + f;
+    }
+
+    @SuppressWarnings("unused")
+    private static CallSite primLinkerMethod(
+            MethodHandles.Lookup caller,
+            String name,
+            MethodType methodType,
+            int v1,
+            int v2,
+            int v3,
+            int v4,
+            int v5,
+            float v6,
+            double v7,
+            String v8,
+            Class<?> v9,
+            long v10)
+            throws Throwable {
+        System.out.println("Linking " + name + " " + methodType);
+        assertEquals(-1, v1);
+        assertEquals(1, v2);
+        assertEquals('a', v3);
+        assertEquals(1024, v4);
+        assertEquals(1, v5);
+        assertEquals(11.1f, v6);
+        assertEquals(2.2, v7);
+        assertEquals("Hello", v8);
+        assertEquals(TestLinkerMethodWithRange.class, v9);
+        assertEquals(123456789L, v10);
+        MethodHandle mh_add =
+                caller.findStatic(TestLinkerMethodWithRange.class, name, methodType);
+        return new ConstantCallSite(mh_add);
+    }
+
+    public static void test(int u, int v, int w, int x, int y, int z) throws Throwable {
+        assertEquals(u + v + w + x + y + z, add(u, v, w, x, y, z));
+        System.out.println(u + v + w + x + y + z);
+    }
+
+    @CalledByIndy(
+        bootstrapMethod =
+                @BootstrapMethod(
+                    enclosingType = TestLinkerMethodWithRange.class,
+                    name = "refLinkerMethod",
+                    parameterTypes = {
+                        MethodHandles.Lookup.class,
+                        String.class,
+                        MethodType.class,
+                    }
+                ),
+        fieldOrMethodName = "_multiply",
+        returnType = Integer.class,
+        parameterTypes = {Double.class, Double.class, Double.class,
+                          Double.class, Double.class, Double.class},
+        constantArgumentsForBootstrapMethod = {}
+    )
+
+    private static Double multiply(Double a, Double b, Double c, Double d, Double e, Double f) {
+        assertNotReached();
+        return 0.0;
+    }
+
+    @SuppressWarnings("unused")
+    private static Double _multiply(Double a, Double b, Double c, Double d, Double e, Double f) {
+        Double[] values = new Double[] { a, b, c, d, e, f };
+        Double product = 1.0;
+        for (Double value : values) {
+            if (value != null) {
+                product *= value;
+            }
+        }
+        return product;
+    }
+
+    @SuppressWarnings("unused")
+    private static CallSite refLinkerMethod(
+            MethodHandles.Lookup caller, String name, MethodType methodType) throws Throwable {
+        System.out.println("Linking " + name + " " + methodType);
+        MethodHandle mh_multiply =
+                caller.findStatic(TestLinkerMethodWithRange.class, name, methodType);
+        return new ConstantCallSite(mh_multiply);
+    }
+
+    public static void test(Double u, Double v, Double w, Double x, Double y, Double z)
+            throws Throwable {
+        Double product = 1.0;
+        if (u != null) product *= u;
+        if (v != null) product *= v;
+        if (w != null) product *= w;
+        if (x != null) product *= x;
+        if (y != null) product *= y;
+        if (z != null) product *= z;
+        assertEquals(product, multiply(u, v, w, x, y, z));
+        System.out.println(product);
+    }
+}
diff --git a/test/954-invoke-polymorphic-verifier/check b/test/954-invoke-polymorphic-verifier/check
index dc5ddb7..85db5ae 100755
--- a/test/954-invoke-polymorphic-verifier/check
+++ b/test/954-invoke-polymorphic-verifier/check
@@ -14,6 +14,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# Strip out temporary file path information and indicies from output.
+# Strip out temporary file path information and indices from output.
 sed -e "s/ [(]declaration of.*//" -e "s/\[0x[0-9A-F]*\] //g" "$2" > "$2.tmp"
 diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
diff --git a/test/956-methodhandles/expected.txt b/test/956-methodhandles/expected.txt
index a8b609b..206ab20 100644
--- a/test/956-methodhandles/expected.txt
+++ b/test/956-methodhandles/expected.txt
@@ -26,3 +26,14 @@
 Don't expect Hi now
 [3, 2, 1]
 [1, 2, 3]
+Trying to call public abstract void java.util.function.Consumer.accept(java.lang.Object)
+Called accept with foo
+Trying to call public default java.util.function.Consumer java.util.function.Consumer.andThen(java.util.function.Consumer)
+Trying to call public abstract void java.util.function.Consumer.accept(java.lang.Object)
+Called accept with bar
+and then bar
+Ignoring and then
+Got hello
+Got hello there
+Called and then with hello there
+Got expected IAE when invoke-special on an abstract interface method
diff --git a/test/956-methodhandles/src/Main.java b/test/956-methodhandles/src/Main.java
index 11d6ead..e70c83b 100644
--- a/test/956-methodhandles/src/Main.java
+++ b/test/956-methodhandles/src/Main.java
@@ -24,12 +24,15 @@
 import java.lang.reflect.Field;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
 import java.nio.charset.Charset;
 import java.nio.charset.StandardCharsets;
+import java.util.AbstractList;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collections;
 import java.util.List;
-
+import java.util.function.Consumer;
 import other.Chatty;
 
 public class Main {
@@ -92,6 +95,11 @@
     }
   }
 
+  public static class I {
+    public static void someVoidMethod() {
+    }
+  }
+
   public static void main(String[] args) throws Throwable {
     testfindSpecial_invokeSuperBehaviour();
     testfindSpecial_invokeDirectBehaviour();
@@ -108,6 +116,7 @@
     testVariableArity_MethodHandles_bind();
     testRevealDirect();
     testReflectiveCalls();
+    testInterfaceSpecial();
   }
 
   public static void testfindSpecial_invokeSuperBehaviour() throws Throwable {
@@ -631,6 +640,30 @@
       fail();
     } catch (WrongMethodTypeException expected) {
     }
+
+    // Zero / null introduction
+    MethodHandle voidMH = MethodHandles.lookup().findStatic(I.class, "someVoidMethod",
+                                                            MethodType.methodType(void.class));
+    {
+      MethodHandle booleanMH = voidMH.asType(MethodType.methodType(boolean.class));
+      assertEquals(boolean.class, booleanMH.type().returnType());
+      assertEquals(false, booleanMH.invoke());
+    }
+    {
+      MethodHandle intMH = voidMH.asType(MethodType.methodType(int.class));
+      assertEquals(int.class, intMH.type().returnType());
+      assertEquals(0, intMH.invoke());
+    }
+    {
+      MethodHandle longMH = voidMH.asType(MethodType.methodType(long.class));
+      assertEquals(long.class, longMH.type().returnType());
+      assertEquals(0L, longMH.invoke());
+    }
+    {
+      MethodHandle objMH = voidMH.asType(MethodType.methodType(Object.class));
+      assertEquals(Object.class, objMH.type().returnType());
+      assertEquals(null, objMH.invoke());
+    }
   }
 
   public static void assertTrue(boolean value) {
@@ -752,6 +785,14 @@
             Object[].class, MethodType.methodType(void.class));
         fail("Unexpected success for array class type for findConstructor");
     } catch (NoSuchMethodException e) {}
+
+    // Child class constructor (b/143343351)
+    {
+        MethodHandle handle = MethodHandles.lookup().findConstructor(
+            ArrayList.class, MethodType.methodType(void.class));
+        AbstractList list = (AbstractList) handle.asType(MethodType.methodType(AbstractList.class))
+                .invokeExact();
+    }
   }
 
   public static void testStringConstructors() throws Throwable {
@@ -871,6 +912,16 @@
       fail("Unexpected string constructor result: '" + s + "'");
     }
 
+    // Child class constructor (b/143343351)
+    {
+        MethodHandle handle = MethodHandles.lookup().findConstructor(
+            String.class, MethodType.methodType(void.class));
+        CharSequence o = (CharSequence) handle.asType(MethodType.methodType(CharSequence.class))
+                .invokeExact();
+        if (!o.equals("")) {
+            fail("Unexpected child class constructor result: '" + o + "'");
+        }
+    }
     System.out.println("String constructors done.");
   }
 
@@ -1780,4 +1831,73 @@
       }
     }
   }
+
+  public static void testInterfaceSpecial() throws Throwable {
+    final Method acceptMethod = Consumer.class.getDeclaredMethod("accept", Object.class);
+    final Method andThenMethod = Consumer.class.getDeclaredMethod("andThen", Consumer.class);
+    // Proxies
+    Consumer<Object> c = (Consumer<Object>)Proxy.newProxyInstance(
+        Main.class.getClassLoader(),
+        new Class<?>[] { Consumer.class },
+        (p, m, a) -> {
+          System.out.println("Trying to call " + m);
+          if (m.equals(andThenMethod)) {
+            List<Object> args = a == null ? Collections.EMPTY_LIST : Arrays.asList(a);
+            return MethodHandles.lookup()
+                                .findSpecial(Consumer.class,
+                                             m.getName(),
+                                             MethodType.methodType(m.getReturnType(),
+                                                                   m.getParameterTypes()),
+                                             p.getClass())
+                                .bindTo(p)
+                                .invokeWithArguments(args);
+          } else if (m.equals(acceptMethod)) {
+            System.out.println("Called accept with " + a[0]);
+          }
+          return null;
+        });
+    c.accept("foo");
+    Consumer<Object> c2 = c.andThen((Object o) -> { System.out.println("and then " + o); });
+    c2.accept("bar");
+
+    // Non-proxies
+    Consumer<Object> c3 = new Consumer() {
+      public void accept(Object o) {
+        System.out.println("Got " + o);
+      }
+      @Override
+      public Consumer<Object> andThen(Consumer c) {
+        System.out.println("Ignoring and then");
+        return this;
+      }
+    };
+    Consumer<Object> c4 = c3.andThen((x) -> { throw new Error("Failed"); });
+    c4.accept("hello");
+    Consumer<Object> andthen = (Object o) -> { System.out.println("Called and then with " + o);};
+    Consumer<Object> c5 =
+        (Consumer<Object>)MethodHandles.lookup()
+                                       .findSpecial(Consumer.class,
+                                                    andThenMethod.getName(),
+                                                    MethodType.methodType(
+                                                          andThenMethod.getReturnType(),
+                                                          andThenMethod.getParameterTypes()),
+                                                    c3.getClass())
+                                       .bindTo(c3)
+                                       .invoke(andthen);
+    c5.accept("hello there");
+
+    // Failures
+    MethodHandle abstract_target =
+        MethodHandles.lookup()
+                    .findSpecial(Consumer.class,
+                                 acceptMethod.getName(),
+                                 MethodType.methodType(acceptMethod.getReturnType(),
+                                                       acceptMethod.getParameterTypes()),
+                                 c3.getClass());
+    try {
+      abstract_target.invoke(c3, "hello");
+    } catch (IllegalAccessException e) {
+      System.out.println("Got expected IAE when invoke-special on an abstract interface method");
+    }
+  }
 }
diff --git a/test/982-ok-no-retransform/src/art/Redefinition.java b/test/982-ok-no-retransform/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/982-ok-no-retransform/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/982-ok-no-retransform/src/art/Redefinition.java b/test/982-ok-no-retransform/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/982-ok-no-retransform/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/983-source-transform-verify/src/art/Redefinition.java b/test/983-source-transform-verify/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/983-source-transform-verify/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/983-source-transform-verify/src/art/Redefinition.java b/test/983-source-transform-verify/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/983-source-transform-verify/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/984-obsolete-invoke/src/art/Redefinition.java b/test/984-obsolete-invoke/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/984-obsolete-invoke/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/984-obsolete-invoke/src/art/Redefinition.java b/test/984-obsolete-invoke/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/984-obsolete-invoke/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/985-re-obsolete/src/art/Redefinition.java b/test/985-re-obsolete/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/985-re-obsolete/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/985-re-obsolete/src/art/Redefinition.java b/test/985-re-obsolete/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/985-re-obsolete/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/988-method-trace/src/art/Trace.java b/test/988-method-trace/src/art/Trace.java
deleted file mode 100644
index 8999bb1..0000000
--- a/test/988-method-trace/src/art/Trace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Method;
-
-public class Trace {
-  public static native void enableTracing(Class<?> methodClass,
-                                          Method entryMethod,
-                                          Method exitMethod,
-                                          Method fieldAccess,
-                                          Method fieldModify,
-                                          Method singleStep,
-                                          Thread thr);
-  public static native void disableTracing(Thread thr);
-
-  public static void enableFieldTracing(Class<?> methodClass,
-                                        Method fieldAccess,
-                                        Method fieldModify,
-                                        Thread thr) {
-    enableTracing(methodClass, null, null, fieldAccess, fieldModify, null, thr);
-  }
-
-  public static void enableMethodTracing(Class<?> methodClass,
-                                         Method entryMethod,
-                                         Method exitMethod,
-                                         Thread thr) {
-    enableTracing(methodClass, entryMethod, exitMethod, null, null, null, thr);
-  }
-
-  public static void enableSingleStepTracing(Class<?> methodClass,
-                                             Method singleStep,
-                                             Thread thr) {
-    enableTracing(methodClass, null, null, null, null, singleStep, thr);
-  }
-
-  public static native void watchFieldAccess(Field f);
-  public static native void watchFieldModification(Field f);
-  public static native void watchAllFieldAccesses();
-  public static native void watchAllFieldModifications();
-
-  // the names, arguments, and even line numbers of these functions are embedded in the tests so we
-  // need to add to the bottom and not modify old ones to maintain compat.
-  public static native void enableTracing2(Class<?> methodClass,
-                                           Method entryMethod,
-                                           Method exitMethod,
-                                           Method fieldAccess,
-                                           Method fieldModify,
-                                           Method singleStep,
-                                           Method ThreadStart,
-                                           Method ThreadEnd,
-                                           Thread thr);
-}
diff --git a/test/988-method-trace/src/art/Trace.java b/test/988-method-trace/src/art/Trace.java
new file mode 120000
index 0000000..5d9b44b
--- /dev/null
+++ b/test/988-method-trace/src/art/Trace.java
@@ -0,0 +1 @@
+../../../jvmti-common/Trace.java
\ No newline at end of file
diff --git a/test/989-method-trace-throw/src/art/Trace.java b/test/989-method-trace-throw/src/art/Trace.java
deleted file mode 100644
index 8999bb1..0000000
--- a/test/989-method-trace-throw/src/art/Trace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Method;
-
-public class Trace {
-  public static native void enableTracing(Class<?> methodClass,
-                                          Method entryMethod,
-                                          Method exitMethod,
-                                          Method fieldAccess,
-                                          Method fieldModify,
-                                          Method singleStep,
-                                          Thread thr);
-  public static native void disableTracing(Thread thr);
-
-  public static void enableFieldTracing(Class<?> methodClass,
-                                        Method fieldAccess,
-                                        Method fieldModify,
-                                        Thread thr) {
-    enableTracing(methodClass, null, null, fieldAccess, fieldModify, null, thr);
-  }
-
-  public static void enableMethodTracing(Class<?> methodClass,
-                                         Method entryMethod,
-                                         Method exitMethod,
-                                         Thread thr) {
-    enableTracing(methodClass, entryMethod, exitMethod, null, null, null, thr);
-  }
-
-  public static void enableSingleStepTracing(Class<?> methodClass,
-                                             Method singleStep,
-                                             Thread thr) {
-    enableTracing(methodClass, null, null, null, null, singleStep, thr);
-  }
-
-  public static native void watchFieldAccess(Field f);
-  public static native void watchFieldModification(Field f);
-  public static native void watchAllFieldAccesses();
-  public static native void watchAllFieldModifications();
-
-  // the names, arguments, and even line numbers of these functions are embedded in the tests so we
-  // need to add to the bottom and not modify old ones to maintain compat.
-  public static native void enableTracing2(Class<?> methodClass,
-                                           Method entryMethod,
-                                           Method exitMethod,
-                                           Method fieldAccess,
-                                           Method fieldModify,
-                                           Method singleStep,
-                                           Method ThreadStart,
-                                           Method ThreadEnd,
-                                           Thread thr);
-}
diff --git a/test/989-method-trace-throw/src/art/Trace.java b/test/989-method-trace-throw/src/art/Trace.java
new file mode 120000
index 0000000..5d9b44b
--- /dev/null
+++ b/test/989-method-trace-throw/src/art/Trace.java
@@ -0,0 +1 @@
+../../../jvmti-common/Trace.java
\ No newline at end of file
diff --git a/test/990-field-trace/src/art/Trace.java b/test/990-field-trace/src/art/Trace.java
deleted file mode 100644
index 8999bb1..0000000
--- a/test/990-field-trace/src/art/Trace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Method;
-
-public class Trace {
-  public static native void enableTracing(Class<?> methodClass,
-                                          Method entryMethod,
-                                          Method exitMethod,
-                                          Method fieldAccess,
-                                          Method fieldModify,
-                                          Method singleStep,
-                                          Thread thr);
-  public static native void disableTracing(Thread thr);
-
-  public static void enableFieldTracing(Class<?> methodClass,
-                                        Method fieldAccess,
-                                        Method fieldModify,
-                                        Thread thr) {
-    enableTracing(methodClass, null, null, fieldAccess, fieldModify, null, thr);
-  }
-
-  public static void enableMethodTracing(Class<?> methodClass,
-                                         Method entryMethod,
-                                         Method exitMethod,
-                                         Thread thr) {
-    enableTracing(methodClass, entryMethod, exitMethod, null, null, null, thr);
-  }
-
-  public static void enableSingleStepTracing(Class<?> methodClass,
-                                             Method singleStep,
-                                             Thread thr) {
-    enableTracing(methodClass, null, null, null, null, singleStep, thr);
-  }
-
-  public static native void watchFieldAccess(Field f);
-  public static native void watchFieldModification(Field f);
-  public static native void watchAllFieldAccesses();
-  public static native void watchAllFieldModifications();
-
-  // the names, arguments, and even line numbers of these functions are embedded in the tests so we
-  // need to add to the bottom and not modify old ones to maintain compat.
-  public static native void enableTracing2(Class<?> methodClass,
-                                           Method entryMethod,
-                                           Method exitMethod,
-                                           Method fieldAccess,
-                                           Method fieldModify,
-                                           Method singleStep,
-                                           Method ThreadStart,
-                                           Method ThreadEnd,
-                                           Thread thr);
-}
diff --git a/test/990-field-trace/src/art/Trace.java b/test/990-field-trace/src/art/Trace.java
new file mode 120000
index 0000000..5d9b44b
--- /dev/null
+++ b/test/990-field-trace/src/art/Trace.java
@@ -0,0 +1 @@
+../../../jvmti-common/Trace.java
\ No newline at end of file
diff --git a/test/991-field-trace-2/src/art/Trace.java b/test/991-field-trace-2/src/art/Trace.java
deleted file mode 100644
index 8999bb1..0000000
--- a/test/991-field-trace-2/src/art/Trace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Method;
-
-public class Trace {
-  public static native void enableTracing(Class<?> methodClass,
-                                          Method entryMethod,
-                                          Method exitMethod,
-                                          Method fieldAccess,
-                                          Method fieldModify,
-                                          Method singleStep,
-                                          Thread thr);
-  public static native void disableTracing(Thread thr);
-
-  public static void enableFieldTracing(Class<?> methodClass,
-                                        Method fieldAccess,
-                                        Method fieldModify,
-                                        Thread thr) {
-    enableTracing(methodClass, null, null, fieldAccess, fieldModify, null, thr);
-  }
-
-  public static void enableMethodTracing(Class<?> methodClass,
-                                         Method entryMethod,
-                                         Method exitMethod,
-                                         Thread thr) {
-    enableTracing(methodClass, entryMethod, exitMethod, null, null, null, thr);
-  }
-
-  public static void enableSingleStepTracing(Class<?> methodClass,
-                                             Method singleStep,
-                                             Thread thr) {
-    enableTracing(methodClass, null, null, null, null, singleStep, thr);
-  }
-
-  public static native void watchFieldAccess(Field f);
-  public static native void watchFieldModification(Field f);
-  public static native void watchAllFieldAccesses();
-  public static native void watchAllFieldModifications();
-
-  // the names, arguments, and even line numbers of these functions are embedded in the tests so we
-  // need to add to the bottom and not modify old ones to maintain compat.
-  public static native void enableTracing2(Class<?> methodClass,
-                                           Method entryMethod,
-                                           Method exitMethod,
-                                           Method fieldAccess,
-                                           Method fieldModify,
-                                           Method singleStep,
-                                           Method ThreadStart,
-                                           Method ThreadEnd,
-                                           Thread thr);
-}
diff --git a/test/991-field-trace-2/src/art/Trace.java b/test/991-field-trace-2/src/art/Trace.java
new file mode 120000
index 0000000..5d9b44b
--- /dev/null
+++ b/test/991-field-trace-2/src/art/Trace.java
@@ -0,0 +1 @@
+../../../jvmti-common/Trace.java
\ No newline at end of file
diff --git a/test/993-breakpoints/src/art/Breakpoint.java b/test/993-breakpoints/src/art/Breakpoint.java
deleted file mode 100644
index bbb89f7..0000000
--- a/test/993-breakpoints/src/art/Breakpoint.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Objects;
-
-public class Breakpoint {
-  public static class Manager {
-    public static class BP {
-      public final Executable method;
-      public final long location;
-
-      public BP(Executable method) {
-        this(method, getStartLocation(method));
-      }
-
-      public BP(Executable method, long location) {
-        this.method = method;
-        this.location = location;
-      }
-
-      @Override
-      public boolean equals(Object other) {
-        return (other instanceof BP) &&
-            method.equals(((BP)other).method) &&
-            location == ((BP)other).location;
-      }
-
-      @Override
-      public String toString() {
-        return method.toString() + " @ " + getLine();
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(method, location);
-      }
-
-      public int getLine() {
-        try {
-          LineNumber[] lines = getLineNumberTable(method);
-          int best = -1;
-          for (LineNumber l : lines) {
-            if (l.location > location) {
-              break;
-            } else {
-              best = l.line;
-            }
-          }
-          return best;
-        } catch (Exception e) {
-          return -1;
-        }
-      }
-    }
-
-    private Set<BP> breaks = new HashSet<>();
-
-    public void setBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.add(b)) {
-          Breakpoint.setBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void setBreakpoint(Executable method, long location) {
-      setBreakpoints(new BP(method, location));
-    }
-
-    public void clearBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.remove(b)) {
-          Breakpoint.clearBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void clearBreakpoint(Executable method, long location) {
-      clearBreakpoints(new BP(method, location));
-    }
-
-    public void clearAllBreakpoints() {
-      clearBreakpoints(breaks.toArray(new BP[0]));
-    }
-  }
-
-  public static void startBreakpointWatch(Class<?> methodClass,
-                                          Executable breakpointReached,
-                                          Thread thr) {
-    startBreakpointWatch(methodClass, breakpointReached, false, thr);
-  }
-
-  /**
-   * Enables the trapping of breakpoint events.
-   *
-   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
-   */
-  public static native void startBreakpointWatch(Class<?> methodClass,
-                                                 Executable breakpointReached,
-                                                 boolean allowRecursive,
-                                                 Thread thr);
-  public static native void stopBreakpointWatch(Thread thr);
-
-  public static final class LineNumber implements Comparable<LineNumber> {
-    public final long location;
-    public final int line;
-
-    private LineNumber(long loc, int line) {
-      this.location = loc;
-      this.line = line;
-    }
-
-    public boolean equals(Object other) {
-      return other instanceof LineNumber && ((LineNumber)other).line == line &&
-          ((LineNumber)other).location == location;
-    }
-
-    public int compareTo(LineNumber other) {
-      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
-      if (v != 0) {
-        return v;
-      } else {
-        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
-      }
-    }
-  }
-
-  public static native void setBreakpoint(Executable m, long loc);
-  public static void setBreakpoint(Executable m, LineNumber l) {
-    setBreakpoint(m, l.location);
-  }
-
-  public static native void clearBreakpoint(Executable m, long loc);
-  public static void clearBreakpoint(Executable m, LineNumber l) {
-    clearBreakpoint(m, l.location);
-  }
-
-  private static native Object[] getLineNumberTableNative(Executable m);
-  public static LineNumber[] getLineNumberTable(Executable m) {
-    Object[] nativeTable = getLineNumberTableNative(m);
-    long[] location = (long[])(nativeTable[0]);
-    int[] lines = (int[])(nativeTable[1]);
-    if (lines.length != location.length) {
-      throw new Error("Lines and locations have different lengths!");
-    }
-    LineNumber[] out = new LineNumber[lines.length];
-    for (int i = 0; i < lines.length; i++) {
-      out[i] = new LineNumber(location[i], lines[i]);
-    }
-    return out;
-  }
-
-  public static native long getStartLocation(Executable m);
-
-  public static int locationToLine(Executable m, long location) {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      int best = -1;
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.location > location) {
-          break;
-        } else {
-          best = l.line;
-        }
-      }
-      return best;
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  public static long lineToLocation(Executable m, int line) throws Exception {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.line == line) {
-          return l.location;
-        }
-      }
-      throw new Exception("Unable to find line " + line + " in " + m);
-    } catch (Exception e) {
-      throw new Exception("Unable to get line number info for " + m, e);
-    }
-  }
-}
-
diff --git a/test/993-breakpoints/src/art/Breakpoint.java b/test/993-breakpoints/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/993-breakpoints/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/994-breakpoint-line/src/art/Breakpoint.java b/test/994-breakpoint-line/src/art/Breakpoint.java
deleted file mode 100644
index bbb89f7..0000000
--- a/test/994-breakpoint-line/src/art/Breakpoint.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Objects;
-
-public class Breakpoint {
-  public static class Manager {
-    public static class BP {
-      public final Executable method;
-      public final long location;
-
-      public BP(Executable method) {
-        this(method, getStartLocation(method));
-      }
-
-      public BP(Executable method, long location) {
-        this.method = method;
-        this.location = location;
-      }
-
-      @Override
-      public boolean equals(Object other) {
-        return (other instanceof BP) &&
-            method.equals(((BP)other).method) &&
-            location == ((BP)other).location;
-      }
-
-      @Override
-      public String toString() {
-        return method.toString() + " @ " + getLine();
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(method, location);
-      }
-
-      public int getLine() {
-        try {
-          LineNumber[] lines = getLineNumberTable(method);
-          int best = -1;
-          for (LineNumber l : lines) {
-            if (l.location > location) {
-              break;
-            } else {
-              best = l.line;
-            }
-          }
-          return best;
-        } catch (Exception e) {
-          return -1;
-        }
-      }
-    }
-
-    private Set<BP> breaks = new HashSet<>();
-
-    public void setBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.add(b)) {
-          Breakpoint.setBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void setBreakpoint(Executable method, long location) {
-      setBreakpoints(new BP(method, location));
-    }
-
-    public void clearBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.remove(b)) {
-          Breakpoint.clearBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void clearBreakpoint(Executable method, long location) {
-      clearBreakpoints(new BP(method, location));
-    }
-
-    public void clearAllBreakpoints() {
-      clearBreakpoints(breaks.toArray(new BP[0]));
-    }
-  }
-
-  public static void startBreakpointWatch(Class<?> methodClass,
-                                          Executable breakpointReached,
-                                          Thread thr) {
-    startBreakpointWatch(methodClass, breakpointReached, false, thr);
-  }
-
-  /**
-   * Enables the trapping of breakpoint events.
-   *
-   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
-   */
-  public static native void startBreakpointWatch(Class<?> methodClass,
-                                                 Executable breakpointReached,
-                                                 boolean allowRecursive,
-                                                 Thread thr);
-  public static native void stopBreakpointWatch(Thread thr);
-
-  public static final class LineNumber implements Comparable<LineNumber> {
-    public final long location;
-    public final int line;
-
-    private LineNumber(long loc, int line) {
-      this.location = loc;
-      this.line = line;
-    }
-
-    public boolean equals(Object other) {
-      return other instanceof LineNumber && ((LineNumber)other).line == line &&
-          ((LineNumber)other).location == location;
-    }
-
-    public int compareTo(LineNumber other) {
-      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
-      if (v != 0) {
-        return v;
-      } else {
-        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
-      }
-    }
-  }
-
-  public static native void setBreakpoint(Executable m, long loc);
-  public static void setBreakpoint(Executable m, LineNumber l) {
-    setBreakpoint(m, l.location);
-  }
-
-  public static native void clearBreakpoint(Executable m, long loc);
-  public static void clearBreakpoint(Executable m, LineNumber l) {
-    clearBreakpoint(m, l.location);
-  }
-
-  private static native Object[] getLineNumberTableNative(Executable m);
-  public static LineNumber[] getLineNumberTable(Executable m) {
-    Object[] nativeTable = getLineNumberTableNative(m);
-    long[] location = (long[])(nativeTable[0]);
-    int[] lines = (int[])(nativeTable[1]);
-    if (lines.length != location.length) {
-      throw new Error("Lines and locations have different lengths!");
-    }
-    LineNumber[] out = new LineNumber[lines.length];
-    for (int i = 0; i < lines.length; i++) {
-      out[i] = new LineNumber(location[i], lines[i]);
-    }
-    return out;
-  }
-
-  public static native long getStartLocation(Executable m);
-
-  public static int locationToLine(Executable m, long location) {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      int best = -1;
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.location > location) {
-          break;
-        } else {
-          best = l.line;
-        }
-      }
-      return best;
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  public static long lineToLocation(Executable m, int line) throws Exception {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.line == line) {
-          return l.location;
-        }
-      }
-      throw new Exception("Unable to find line " + line + " in " + m);
-    } catch (Exception e) {
-      throw new Exception("Unable to get line number info for " + m, e);
-    }
-  }
-}
-
diff --git a/test/994-breakpoint-line/src/art/Breakpoint.java b/test/994-breakpoint-line/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/994-breakpoint-line/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/995-breakpoints-throw/src/art/Breakpoint.java b/test/995-breakpoints-throw/src/art/Breakpoint.java
deleted file mode 100644
index bbb89f7..0000000
--- a/test/995-breakpoints-throw/src/art/Breakpoint.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Objects;
-
-public class Breakpoint {
-  public static class Manager {
-    public static class BP {
-      public final Executable method;
-      public final long location;
-
-      public BP(Executable method) {
-        this(method, getStartLocation(method));
-      }
-
-      public BP(Executable method, long location) {
-        this.method = method;
-        this.location = location;
-      }
-
-      @Override
-      public boolean equals(Object other) {
-        return (other instanceof BP) &&
-            method.equals(((BP)other).method) &&
-            location == ((BP)other).location;
-      }
-
-      @Override
-      public String toString() {
-        return method.toString() + " @ " + getLine();
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(method, location);
-      }
-
-      public int getLine() {
-        try {
-          LineNumber[] lines = getLineNumberTable(method);
-          int best = -1;
-          for (LineNumber l : lines) {
-            if (l.location > location) {
-              break;
-            } else {
-              best = l.line;
-            }
-          }
-          return best;
-        } catch (Exception e) {
-          return -1;
-        }
-      }
-    }
-
-    private Set<BP> breaks = new HashSet<>();
-
-    public void setBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.add(b)) {
-          Breakpoint.setBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void setBreakpoint(Executable method, long location) {
-      setBreakpoints(new BP(method, location));
-    }
-
-    public void clearBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.remove(b)) {
-          Breakpoint.clearBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void clearBreakpoint(Executable method, long location) {
-      clearBreakpoints(new BP(method, location));
-    }
-
-    public void clearAllBreakpoints() {
-      clearBreakpoints(breaks.toArray(new BP[0]));
-    }
-  }
-
-  public static void startBreakpointWatch(Class<?> methodClass,
-                                          Executable breakpointReached,
-                                          Thread thr) {
-    startBreakpointWatch(methodClass, breakpointReached, false, thr);
-  }
-
-  /**
-   * Enables the trapping of breakpoint events.
-   *
-   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
-   */
-  public static native void startBreakpointWatch(Class<?> methodClass,
-                                                 Executable breakpointReached,
-                                                 boolean allowRecursive,
-                                                 Thread thr);
-  public static native void stopBreakpointWatch(Thread thr);
-
-  public static final class LineNumber implements Comparable<LineNumber> {
-    public final long location;
-    public final int line;
-
-    private LineNumber(long loc, int line) {
-      this.location = loc;
-      this.line = line;
-    }
-
-    public boolean equals(Object other) {
-      return other instanceof LineNumber && ((LineNumber)other).line == line &&
-          ((LineNumber)other).location == location;
-    }
-
-    public int compareTo(LineNumber other) {
-      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
-      if (v != 0) {
-        return v;
-      } else {
-        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
-      }
-    }
-  }
-
-  public static native void setBreakpoint(Executable m, long loc);
-  public static void setBreakpoint(Executable m, LineNumber l) {
-    setBreakpoint(m, l.location);
-  }
-
-  public static native void clearBreakpoint(Executable m, long loc);
-  public static void clearBreakpoint(Executable m, LineNumber l) {
-    clearBreakpoint(m, l.location);
-  }
-
-  private static native Object[] getLineNumberTableNative(Executable m);
-  public static LineNumber[] getLineNumberTable(Executable m) {
-    Object[] nativeTable = getLineNumberTableNative(m);
-    long[] location = (long[])(nativeTable[0]);
-    int[] lines = (int[])(nativeTable[1]);
-    if (lines.length != location.length) {
-      throw new Error("Lines and locations have different lengths!");
-    }
-    LineNumber[] out = new LineNumber[lines.length];
-    for (int i = 0; i < lines.length; i++) {
-      out[i] = new LineNumber(location[i], lines[i]);
-    }
-    return out;
-  }
-
-  public static native long getStartLocation(Executable m);
-
-  public static int locationToLine(Executable m, long location) {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      int best = -1;
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.location > location) {
-          break;
-        } else {
-          best = l.line;
-        }
-      }
-      return best;
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  public static long lineToLocation(Executable m, int line) throws Exception {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.line == line) {
-          return l.location;
-        }
-      }
-      throw new Exception("Unable to find line " + line + " in " + m);
-    } catch (Exception e) {
-      throw new Exception("Unable to get line number info for " + m, e);
-    }
-  }
-}
-
diff --git a/test/995-breakpoints-throw/src/art/Breakpoint.java b/test/995-breakpoints-throw/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/995-breakpoints-throw/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/996-breakpoint-obsolete/src/art/Breakpoint.java b/test/996-breakpoint-obsolete/src/art/Breakpoint.java
deleted file mode 100644
index bbb89f7..0000000
--- a/test/996-breakpoint-obsolete/src/art/Breakpoint.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Objects;
-
-public class Breakpoint {
-  public static class Manager {
-    public static class BP {
-      public final Executable method;
-      public final long location;
-
-      public BP(Executable method) {
-        this(method, getStartLocation(method));
-      }
-
-      public BP(Executable method, long location) {
-        this.method = method;
-        this.location = location;
-      }
-
-      @Override
-      public boolean equals(Object other) {
-        return (other instanceof BP) &&
-            method.equals(((BP)other).method) &&
-            location == ((BP)other).location;
-      }
-
-      @Override
-      public String toString() {
-        return method.toString() + " @ " + getLine();
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(method, location);
-      }
-
-      public int getLine() {
-        try {
-          LineNumber[] lines = getLineNumberTable(method);
-          int best = -1;
-          for (LineNumber l : lines) {
-            if (l.location > location) {
-              break;
-            } else {
-              best = l.line;
-            }
-          }
-          return best;
-        } catch (Exception e) {
-          return -1;
-        }
-      }
-    }
-
-    private Set<BP> breaks = new HashSet<>();
-
-    public void setBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.add(b)) {
-          Breakpoint.setBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void setBreakpoint(Executable method, long location) {
-      setBreakpoints(new BP(method, location));
-    }
-
-    public void clearBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.remove(b)) {
-          Breakpoint.clearBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void clearBreakpoint(Executable method, long location) {
-      clearBreakpoints(new BP(method, location));
-    }
-
-    public void clearAllBreakpoints() {
-      clearBreakpoints(breaks.toArray(new BP[0]));
-    }
-  }
-
-  public static void startBreakpointWatch(Class<?> methodClass,
-                                          Executable breakpointReached,
-                                          Thread thr) {
-    startBreakpointWatch(methodClass, breakpointReached, false, thr);
-  }
-
-  /**
-   * Enables the trapping of breakpoint events.
-   *
-   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
-   */
-  public static native void startBreakpointWatch(Class<?> methodClass,
-                                                 Executable breakpointReached,
-                                                 boolean allowRecursive,
-                                                 Thread thr);
-  public static native void stopBreakpointWatch(Thread thr);
-
-  public static final class LineNumber implements Comparable<LineNumber> {
-    public final long location;
-    public final int line;
-
-    private LineNumber(long loc, int line) {
-      this.location = loc;
-      this.line = line;
-    }
-
-    public boolean equals(Object other) {
-      return other instanceof LineNumber && ((LineNumber)other).line == line &&
-          ((LineNumber)other).location == location;
-    }
-
-    public int compareTo(LineNumber other) {
-      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
-      if (v != 0) {
-        return v;
-      } else {
-        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
-      }
-    }
-  }
-
-  public static native void setBreakpoint(Executable m, long loc);
-  public static void setBreakpoint(Executable m, LineNumber l) {
-    setBreakpoint(m, l.location);
-  }
-
-  public static native void clearBreakpoint(Executable m, long loc);
-  public static void clearBreakpoint(Executable m, LineNumber l) {
-    clearBreakpoint(m, l.location);
-  }
-
-  private static native Object[] getLineNumberTableNative(Executable m);
-  public static LineNumber[] getLineNumberTable(Executable m) {
-    Object[] nativeTable = getLineNumberTableNative(m);
-    long[] location = (long[])(nativeTable[0]);
-    int[] lines = (int[])(nativeTable[1]);
-    if (lines.length != location.length) {
-      throw new Error("Lines and locations have different lengths!");
-    }
-    LineNumber[] out = new LineNumber[lines.length];
-    for (int i = 0; i < lines.length; i++) {
-      out[i] = new LineNumber(location[i], lines[i]);
-    }
-    return out;
-  }
-
-  public static native long getStartLocation(Executable m);
-
-  public static int locationToLine(Executable m, long location) {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      int best = -1;
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.location > location) {
-          break;
-        } else {
-          best = l.line;
-        }
-      }
-      return best;
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  public static long lineToLocation(Executable m, int line) throws Exception {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.line == line) {
-          return l.location;
-        }
-      }
-      throw new Exception("Unable to find line " + line + " in " + m);
-    } catch (Exception e) {
-      throw new Exception("Unable to get line number info for " + m, e);
-    }
-  }
-}
-
diff --git a/test/996-breakpoint-obsolete/src/art/Breakpoint.java b/test/996-breakpoint-obsolete/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/996-breakpoint-obsolete/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/996-breakpoint-obsolete/src/art/Redefinition.java b/test/996-breakpoint-obsolete/src/art/Redefinition.java
deleted file mode 100644
index 56d2938..0000000
--- a/test/996-breakpoint-obsolete/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/996-breakpoint-obsolete/src/art/Redefinition.java b/test/996-breakpoint-obsolete/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/996-breakpoint-obsolete/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/997-single-step/src/art/Breakpoint.java b/test/997-single-step/src/art/Breakpoint.java
deleted file mode 100644
index bbb89f7..0000000
--- a/test/997-single-step/src/art/Breakpoint.java
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Executable;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.Objects;
-
-public class Breakpoint {
-  public static class Manager {
-    public static class BP {
-      public final Executable method;
-      public final long location;
-
-      public BP(Executable method) {
-        this(method, getStartLocation(method));
-      }
-
-      public BP(Executable method, long location) {
-        this.method = method;
-        this.location = location;
-      }
-
-      @Override
-      public boolean equals(Object other) {
-        return (other instanceof BP) &&
-            method.equals(((BP)other).method) &&
-            location == ((BP)other).location;
-      }
-
-      @Override
-      public String toString() {
-        return method.toString() + " @ " + getLine();
-      }
-
-      @Override
-      public int hashCode() {
-        return Objects.hash(method, location);
-      }
-
-      public int getLine() {
-        try {
-          LineNumber[] lines = getLineNumberTable(method);
-          int best = -1;
-          for (LineNumber l : lines) {
-            if (l.location > location) {
-              break;
-            } else {
-              best = l.line;
-            }
-          }
-          return best;
-        } catch (Exception e) {
-          return -1;
-        }
-      }
-    }
-
-    private Set<BP> breaks = new HashSet<>();
-
-    public void setBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.add(b)) {
-          Breakpoint.setBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void setBreakpoint(Executable method, long location) {
-      setBreakpoints(new BP(method, location));
-    }
-
-    public void clearBreakpoints(BP... bs) {
-      for (BP b : bs) {
-        if (breaks.remove(b)) {
-          Breakpoint.clearBreakpoint(b.method, b.location);
-        }
-      }
-    }
-    public void clearBreakpoint(Executable method, long location) {
-      clearBreakpoints(new BP(method, location));
-    }
-
-    public void clearAllBreakpoints() {
-      clearBreakpoints(breaks.toArray(new BP[0]));
-    }
-  }
-
-  public static void startBreakpointWatch(Class<?> methodClass,
-                                          Executable breakpointReached,
-                                          Thread thr) {
-    startBreakpointWatch(methodClass, breakpointReached, false, thr);
-  }
-
-  /**
-   * Enables the trapping of breakpoint events.
-   *
-   * If allowRecursive == true then breakpoints will be sent even if one is currently being handled.
-   */
-  public static native void startBreakpointWatch(Class<?> methodClass,
-                                                 Executable breakpointReached,
-                                                 boolean allowRecursive,
-                                                 Thread thr);
-  public static native void stopBreakpointWatch(Thread thr);
-
-  public static final class LineNumber implements Comparable<LineNumber> {
-    public final long location;
-    public final int line;
-
-    private LineNumber(long loc, int line) {
-      this.location = loc;
-      this.line = line;
-    }
-
-    public boolean equals(Object other) {
-      return other instanceof LineNumber && ((LineNumber)other).line == line &&
-          ((LineNumber)other).location == location;
-    }
-
-    public int compareTo(LineNumber other) {
-      int v = Integer.valueOf(line).compareTo(Integer.valueOf(other.line));
-      if (v != 0) {
-        return v;
-      } else {
-        return Long.valueOf(location).compareTo(Long.valueOf(other.location));
-      }
-    }
-  }
-
-  public static native void setBreakpoint(Executable m, long loc);
-  public static void setBreakpoint(Executable m, LineNumber l) {
-    setBreakpoint(m, l.location);
-  }
-
-  public static native void clearBreakpoint(Executable m, long loc);
-  public static void clearBreakpoint(Executable m, LineNumber l) {
-    clearBreakpoint(m, l.location);
-  }
-
-  private static native Object[] getLineNumberTableNative(Executable m);
-  public static LineNumber[] getLineNumberTable(Executable m) {
-    Object[] nativeTable = getLineNumberTableNative(m);
-    long[] location = (long[])(nativeTable[0]);
-    int[] lines = (int[])(nativeTable[1]);
-    if (lines.length != location.length) {
-      throw new Error("Lines and locations have different lengths!");
-    }
-    LineNumber[] out = new LineNumber[lines.length];
-    for (int i = 0; i < lines.length; i++) {
-      out[i] = new LineNumber(location[i], lines[i]);
-    }
-    return out;
-  }
-
-  public static native long getStartLocation(Executable m);
-
-  public static int locationToLine(Executable m, long location) {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      int best = -1;
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.location > location) {
-          break;
-        } else {
-          best = l.line;
-        }
-      }
-      return best;
-    } catch (Exception e) {
-      return -1;
-    }
-  }
-
-  public static long lineToLocation(Executable m, int line) throws Exception {
-    try {
-      Breakpoint.LineNumber[] lines = Breakpoint.getLineNumberTable(m);
-      for (Breakpoint.LineNumber l : lines) {
-        if (l.line == line) {
-          return l.location;
-        }
-      }
-      throw new Exception("Unable to find line " + line + " in " + m);
-    } catch (Exception e) {
-      throw new Exception("Unable to get line number info for " + m, e);
-    }
-  }
-}
-
diff --git a/test/997-single-step/src/art/Breakpoint.java b/test/997-single-step/src/art/Breakpoint.java
new file mode 120000
index 0000000..3673916
--- /dev/null
+++ b/test/997-single-step/src/art/Breakpoint.java
@@ -0,0 +1 @@
+../../../jvmti-common/Breakpoint.java
\ No newline at end of file
diff --git a/test/997-single-step/src/art/Trace.java b/test/997-single-step/src/art/Trace.java
deleted file mode 100644
index 8999bb1..0000000
--- a/test/997-single-step/src/art/Trace.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.Method;
-
-public class Trace {
-  public static native void enableTracing(Class<?> methodClass,
-                                          Method entryMethod,
-                                          Method exitMethod,
-                                          Method fieldAccess,
-                                          Method fieldModify,
-                                          Method singleStep,
-                                          Thread thr);
-  public static native void disableTracing(Thread thr);
-
-  public static void enableFieldTracing(Class<?> methodClass,
-                                        Method fieldAccess,
-                                        Method fieldModify,
-                                        Thread thr) {
-    enableTracing(methodClass, null, null, fieldAccess, fieldModify, null, thr);
-  }
-
-  public static void enableMethodTracing(Class<?> methodClass,
-                                         Method entryMethod,
-                                         Method exitMethod,
-                                         Thread thr) {
-    enableTracing(methodClass, entryMethod, exitMethod, null, null, null, thr);
-  }
-
-  public static void enableSingleStepTracing(Class<?> methodClass,
-                                             Method singleStep,
-                                             Thread thr) {
-    enableTracing(methodClass, null, null, null, null, singleStep, thr);
-  }
-
-  public static native void watchFieldAccess(Field f);
-  public static native void watchFieldModification(Field f);
-  public static native void watchAllFieldAccesses();
-  public static native void watchAllFieldModifications();
-
-  // the names, arguments, and even line numbers of these functions are embedded in the tests so we
-  // need to add to the bottom and not modify old ones to maintain compat.
-  public static native void enableTracing2(Class<?> methodClass,
-                                           Method entryMethod,
-                                           Method exitMethod,
-                                           Method fieldAccess,
-                                           Method fieldModify,
-                                           Method singleStep,
-                                           Method ThreadStart,
-                                           Method ThreadEnd,
-                                           Thread thr);
-}
diff --git a/test/997-single-step/src/art/Trace.java b/test/997-single-step/src/art/Trace.java
new file mode 120000
index 0000000..5d9b44b
--- /dev/null
+++ b/test/997-single-step/src/art/Trace.java
@@ -0,0 +1 @@
+../../../jvmti-common/Trace.java
\ No newline at end of file
diff --git a/test/999-redefine-hiddenapi/src/art/Redefinition.java b/test/999-redefine-hiddenapi/src/art/Redefinition.java
deleted file mode 100644
index 1eec70b..0000000
--- a/test/999-redefine-hiddenapi/src/art/Redefinition.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package art;
-
-import java.util.ArrayList;
-// Common Redefinition functions. Placed here for use by CTS
-public class Redefinition {
-  public static final class CommonClassDefinition {
-    public final Class<?> target;
-    public final byte[] class_file_bytes;
-    public final byte[] dex_file_bytes;
-
-    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
-      this.target = target;
-      this.class_file_bytes = class_file_bytes;
-      this.dex_file_bytes = dex_file_bytes;
-    }
-  }
-
-  // A set of possible test configurations. Test should set this if they need to.
-  // This must be kept in sync with the defines in ti-agent/common_helper.cc
-  public static enum Config {
-    COMMON_REDEFINE(0),
-    COMMON_RETRANSFORM(1),
-    COMMON_TRANSFORM(2);
-
-    private final int val;
-    private Config(int val) {
-      this.val = val;
-    }
-  }
-
-  public static void setTestConfiguration(Config type) {
-    nativeSetTestConfiguration(type.val);
-  }
-
-  private static native void nativeSetTestConfiguration(int type);
-
-  // Transforms the class
-  public static native void doCommonClassRedefinition(Class<?> target,
-                                                      byte[] classfile,
-                                                      byte[] dexfile);
-
-  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
-    ArrayList<Class<?>> classes = new ArrayList<>();
-    ArrayList<byte[]> class_files = new ArrayList<>();
-    ArrayList<byte[]> dex_files = new ArrayList<>();
-
-    for (CommonClassDefinition d : defs) {
-      classes.add(d.target);
-      class_files.add(d.class_file_bytes);
-      dex_files.add(d.dex_file_bytes);
-    }
-    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
-                                   class_files.toArray(new byte[0][]),
-                                   dex_files.toArray(new byte[0][]));
-  }
-
-  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
-    for (CommonClassDefinition d : defs) {
-      addCommonTransformationResult(d.target.getCanonicalName(),
-                                    d.class_file_bytes,
-                                    d.dex_file_bytes);
-    }
-  }
-
-  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
-                                                           byte[][] classfiles,
-                                                           byte[][] dexfiles);
-  public static native void doCommonClassRetransformation(Class<?>... target);
-  public static native void setPopRetransformations(boolean pop);
-  public static native void popTransformationFor(String name);
-  public static native void enableCommonRetransformation(boolean enable);
-  public static native void addCommonTransformationResult(String target_name,
-                                                          byte[] class_bytes,
-                                                          byte[] dex_bytes);
-}
diff --git a/test/999-redefine-hiddenapi/src/art/Redefinition.java b/test/999-redefine-hiddenapi/src/art/Redefinition.java
new file mode 120000
index 0000000..81eaf31
--- /dev/null
+++ b/test/999-redefine-hiddenapi/src/art/Redefinition.java
@@ -0,0 +1 @@
+../../../jvmti-common/Redefinition.java
\ No newline at end of file
diff --git a/test/Android.bp b/test/Android.bp
index cb9f612..0f09fcc 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -14,6 +14,8 @@
 // limitations under the License.
 //
 
+// ART gtests.
+
 art_cc_defaults {
     name: "art_test_defaults",
     host_supported: true,
@@ -24,12 +26,6 @@
         android_arm64: {
             relative_install_path: "art/arm64",
         },
-        android_mips: {
-            relative_install_path: "art/mips",
-        },
-        android_mips64: {
-            relative_install_path: "art/mips64",
-        },
         android_x86: {
             relative_install_path: "art/x86",
         },
@@ -43,6 +39,9 @@
     cflags: [
         "-Wno-frame-larger-than=",
     ],
+    apex_available: [
+        "com.android.art.debug",
+    ],
 }
 
 art_cc_defaults {
@@ -141,6 +140,9 @@
             enabled: false,
         },
     },
+    apex_available: [
+        "com.android.art.debug",
+    ],
 }
 
 art_cc_library {
@@ -166,8 +168,67 @@
             enabled: false,
         },
     },
+    apex_available: [
+        "com.android.art.debug",
+    ],
 }
 
+// ART run-tests.
+
+art_cc_test_library {
+    name: "libarttest",
+    defaults: ["libarttest-defaults"],
+    shared_libs: [
+        "libart",
+        "libdexfile",
+        "libprofile",
+        "libartbase",
+    ],
+}
+
+art_cc_test_library {
+    name: "libarttestd",
+    defaults: [
+        "art_debug_defaults",
+        "libarttest-defaults",
+    ],
+    shared_libs: [
+        "libartd",
+        "libdexfiled",
+        "libprofiled",
+        "libartbased",
+    ],
+}
+
+art_cc_defaults {
+    name: "libnativebridgetest-defaults",
+    defaults: [
+        "art_test_defaults",
+        "art_defaults",
+    ],
+    header_libs: ["libnativebridge-headers"],
+    srcs: ["115-native-bridge/nativebridge.cc"],
+}
+
+art_cc_test_library {
+    name: "libnativebridgetest",
+    shared_libs: ["libart"],
+    defaults: [
+        "libnativebridgetest-defaults",
+    ],
+}
+
+art_cc_test_library {
+    name: "libnativebridgetestd",
+    shared_libs: ["libartd"],
+    defaults: [
+        "libnativebridgetest-defaults",
+        "art_debug_defaults",
+    ],
+}
+
+// ART JVMTI run-tests.
+
 cc_defaults {
     name: "libartagent-defaults",
     defaults: [
@@ -224,11 +285,13 @@
         "ti-agent/test_env.cc",
         "ti-agent/breakpoint_helper.cc",
         "ti-agent/common_helper.cc",
+        "ti-agent/early_return_helper.cc",
         "ti-agent/frame_pop_helper.cc",
         "ti-agent/locals_helper.cc",
         "ti-agent/monitors_helper.cc",
         "ti-agent/redefinition_helper.cc",
         "ti-agent/suspension_helper.cc",
+        "ti-agent/suspend_event_helper.cc",
         "ti-agent/stack_trace_helper.cc",
         "ti-agent/threads_helper.cc",
         "ti-agent/trace_helper.cc",
@@ -292,13 +355,24 @@
         "1951-monitor-enter-no-suspend/raw_monitor.cc",
         "1953-pop-frame/pop_frame.cc",
         "1957-error-ext/lasterror.cc",
+        // TODO Renumber
         "1962-multi-thread-events/multi_thread_events.cc",
+        "1963-add-to-dex-classloader-in-memory/add_to_loader.cc",
+        "1968-force-early-return/force_early_return.cc",
+        "1969-force-early-return-void/force_early_return_void.cc",
+        "1970-force-early-return-long/force_early_return_long.cc",
+        "1974-resize-array/resize_array.cc",
+        "1975-hello-structural-transformation/structural_transform.cc",
+        "1976-hello-structural-static-methods/structural_transform_methods.cc",
+        "2005-pause-all-redefine-multithreaded/pause-all.cc",
+        "2009-structural-local-ref/local-ref.cc",
+        "2035-structural-native-method/structural-native.cc",
     ],
     // Use NDK-compatible headers for ctstiagent.
     header_libs: [
         "libopenjdkjvmti_headers",
     ],
-    include_dirs: ["art/test/ti-agent"],
+    local_include_dirs: ["ti-agent"],
 }
 
 art_cc_defaults {
@@ -321,10 +395,13 @@
         "980-redefine-object/redef_object.cc",
         "983-source-transform-verify/source_transform_art.cc",
         "1940-ddms-ext/ddm_ext.cc",
-        "1944-sudden-exit/sudden_exit.cc",
         // "1952-pop-frame-jit/pop_frame.cc",
         "1959-redefine-object-instrument/fake_redef_object.cc",
         "1960-obsolete-jit-multithread-native/native_say_hi.cc",
+        "1964-add-to-dex-classloader-file/add_to_loader.cc",
+        "1963-add-to-dex-classloader-in-memory/check_memfd_create.cc",
+        "2012-structural-redefinition-failures-jni-id/set-jni-id-used.cc",
+        "2031-zygote-compiled-frame-deopt/native-wait.cc",
     ],
     static_libs: [
         "libz",
@@ -360,6 +437,9 @@
 cc_library_static {
     name: "libctstiagent",
     defaults: ["libtiagent-base-defaults"],
+    visibility: [
+        "//cts/hostsidetests/jvmti:__subpackages__",
+    ],
     host_supported: false,
     srcs: [
         "983-source-transform-verify/source_transform_slicer.cc",
@@ -371,18 +451,17 @@
         "libbase_ndk",
     ],
     shared_libs: [
-        "libz",  // for slicer (using adler32).
+        "libz", // for slicer (using adler32).
     ],
     sdk_version: "current",
     stl: "c++_static",
-    include_dirs: [
+    header_libs: [
+        "jni_headers",
         // This is needed to resolve the base/ header file in libdexfile. Unfortunately there are
         // many problems with how we export headers that are making doing this the 'right' way
         // difficult.
         // TODO: move those headers to art/ rather than under runtime.
-        "art/runtime",
-        // NDK headers aren't available in platform NDK builds.
-        "libnativehelper/include_jni",
+        "libart_runtime_headers_ndk",
     ],
     export_include_dirs: ["ti-agent"],
 }
@@ -400,15 +479,18 @@
     name: "libtistress-defaults",
     defaults: ["libtistress-srcs"],
     shared_libs: [
-        "libbase",
         "slicer_no_rtti",
+        "libz", // for slicer (using adler32).
     ],
 }
 
 art_cc_test_library {
     name: "libtistress",
     defaults: ["libtistress-defaults"],
-    shared_libs: ["libartbase"],
+    shared_libs: [
+        "libartbase",
+        "libz",
+    ],
 }
 
 art_cc_test_library {
@@ -417,31 +499,38 @@
         "art_debug_defaults",
         "libtistress-defaults",
     ],
-    shared_libs: ["libartbased"],
+    shared_libs: [
+        "libartbased",
+        "libz",
+    ],
 }
 
 art_cc_defaults {
-    name: "libtistress-static-defaults",
+    name: "libtistress-shared-defaults",
     defaults: [
         "libtistress-srcs",
-        "libart_static_defaults",
     ],
     static_libs: ["slicer_no_rtti"],
 }
 
 art_cc_test_library {
     name: "libtistresss",
-    defaults: ["libtistress-static-defaults"],
-    static_libs: ["libartbase"],
+    defaults: ["libtistress-shared-defaults"],
+    shared_libs: [
+        "libartbase",
+        "libz",
+    ],
 }
 
 art_cc_test_library {
     name: "libtistressds",
     defaults: [
-        "art_debug_defaults",
-        "libtistress-static-defaults",
+        "libtistress-shared-defaults",
     ],
-    static_libs: ["libartbased"],
+    shared_libs: [
+        "libartbased",
+        "libz",
+    ],
 }
 
 cc_defaults {
@@ -470,6 +559,8 @@
         "167-visit-locks/visit_locks.cc",
         "169-threadgroup-jni/jni_daemon_thread.cc",
         "172-app-image-twice/debug_print_class.cc",
+        "177-visibly-initialized-deadlock/visibly_initialized.cc",
+        "178-app-image-native-method/native_methods.cc",
         "1945-proxy-method-arguments/get_args.cc",
         "203-multi-checkpoint/multi_checkpoint.cc",
         "305-other-fault-handler/fault_handler.cc",
@@ -502,6 +593,10 @@
         "1001-app-image-regions/app_image_regions.cc",
         "1002-notify-startup/startup_interface.cc",
         "1947-breakpoint-redefine-deopt/check_deopt.cc",
+        "1972-jni-id-swap-indices/jni_id.cc",
+        "1985-structural-redefine-stack-scope/stack_scope.cc",
+        "2011-stack-walk-concurrent-instrument/stack_walk_concurrent.cc",
+        "2031-zygote-compiled-frame-deopt/native-wait.cc",
         "common/runtime_state.cc",
         "common/stack_inspect.cc",
     ],
@@ -512,58 +607,26 @@
     ],
 }
 
-art_cc_test_library {
-    name: "libarttest",
-    defaults: ["libarttest-defaults"],
-    shared_libs: [
-        "libart",
-        "libdexfile",
-        "libprofile",
-        "libartbase",
-    ],
-}
-
-art_cc_test_library {
-    name: "libarttestd",
-    defaults: [
-        "art_debug_defaults",
-        "libarttest-defaults",
-    ],
-    shared_libs: [
-        "libartd",
-        "libdexfiled",
-        "libprofiled",
-        "libartbased",
-    ],
-}
-
-art_cc_test_library {
-    name: "libnativebridgetest",
-    shared_libs: ["libart"],
-    defaults: [
-        "art_test_defaults",
-        "art_debug_defaults",
-        "art_defaults",
-    ],
-    header_libs: ["libnativebridge-headers"],
-    srcs: ["115-native-bridge/nativebridge.cc"],
-}
-
 filegroup {
     name: "art_cts_jvmti_test_library",
+    visibility: [
+        "//cts/hostsidetests/jvmti:__subpackages__",
+    ],
     srcs: [
         // shim classes. We use one that exposes the common functionality.
-        "902-hello-transformation/src/art/Redefinition.java",
-        "903-hello-tagging/src/art/Main.java",
-        "989-method-trace-throw/src/art/Trace.java",
-        "993-breakpoints/src/art/Breakpoint.java",
-        "1902-suspend/src/art/Suspension.java",
-        "1911-get-local-var-table/src/art/Locals.java",
-        "1912-get-set-local-primitive/src/art/StackTrace.java",
-        "1923-frame-pop/src/art/FramePop.java",
-        "1927-exception-event/src/art/Exceptions.java",
-        "1930-monitor-info/src/art/Monitors.java",
-        "1934-jvmti-signal-thread/src/art/Threads.java",
+        "jvmti-common/Redefinition.java",
+        "jvmti-common/Main.java",
+        "jvmti-common/Trace.java",
+        "jvmti-common/Breakpoint.java",
+        "jvmti-common/Suspension.java",
+        "jvmti-common/Locals.java",
+        "jvmti-common/StackTrace.java",
+        "jvmti-common/FramePop.java",
+        "jvmti-common/Exceptions.java",
+        "jvmti-common/Monitors.java",
+        "jvmti-common/NonStandardExit.java",
+        "jvmti-common/Threads.java",
+        "jvmti-common/SuspendEvents.java",
 
         // Actual test classes.
         "901-hello-ti-agent/src/art/Test901.java",
@@ -668,6 +731,44 @@
         "1953-pop-frame/src/art/Test1953.java",
         "1958-transform-try-jit/src/art/Test1958.java",
         "1962-multi-thread-events/src/art/Test1962.java",
+        "1963-add-to-dex-classloader-in-memory/src/art/Test1963.java",
+        "1967-get-set-local-bad-slot/src/art/Test1967.java",
+        "1968-force-early-return/src/art/Test1968.java",
+        "1969-force-early-return-void/src/art/Test1969.java",
+        "1970-force-early-return-long/src/art/Test1970.java",
+        "1971-multi-force-early-return/src/art/Test1971.java",
+        "1974-resize-array/src/art/Test1974.java",
+        "1975-hello-structural-transformation/src/art/Test1975.java",
+        "1975-hello-structural-transformation/src/art/Transform1975.java",
+        "1976-hello-structural-static-methods/src/art/Test1976.java",
+        "1976-hello-structural-static-methods/src/art/Transform1976.java",
+        "1977-hello-structural-obsolescence/src/art/Test1977.java",
+        "1978-regular-obsolete-then-structural-obsolescence/src/art/Test1978.java",
+        "1979-threaded-structural-transformation/src/art/Test1979.java",
+        "1981-structural-redef-private-method-handles/src/art/Test1981.java",
+        // TODO Requires VarHandles to be un-@hide. See b/64382372
+        // "1981-structural-redef-private-method-handles/src/art/Test1981_Varhandles.java",
+        "1982-no-virtuals-structural-redefinition/src/art/Test1982.java",
+        "1983-structural-redefinition-failures/src/art/Test1983.java",
+        "1984-structural-redefine-field-trace/src/art/Test1984.java",
+        "1988-multi-structural-redefine/src/art/Test1988.java",
+        "1989-transform-bad-monitor/src/art/Test1989.java",
+        "1990-structural-bad-verify/src/art/Test1990.java",
+        "1991-hello-structural-retransform/src/art/Test1991.java",
+        "1992-retransform-no-such-field/src/art/Test1992.java",
+        "1994-final-virtual-structural/src/art/Test1994.java",
+        "1995-final-virtual-structural-multithread/src/art/Test1995.java",
+        "1996-final-override-virtual-structural/src/art/Test1996.java",
+        "1997-structural-shadow-method/src/art/Test1997.java",
+        "1998-structural-shadow-field/src/art/Test1998.java",
+        "1999-virtual-structural/src/art/Test1999.java",
+        "2001-virtual-structural-multithread/src-art/art/Test2001.java",
+        "2002-virtual-structural-initializing/src-art/art/Test2002.java",
+        "2003-double-virtual-structural/src/art/Test2003.java",
+        "2004-double-virtual-structural-abstract/src/art/Test2004.java",
+        "2005-pause-all-redefine-multithreaded/src/art/Test2005.java",
+        "2006-virtual-structural-finalizing/src-art/art/Test2006.java",
+        "2007-virtual-structural-finalizable/src-art/art/Test2007.java",
     ],
 }
 
@@ -677,6 +778,9 @@
 // Copy+rename them them to a temporary directory and them zip them.
 java_genrule {
     name: "expected_cts_outputs",
+    visibility: [
+        "//cts/hostsidetests/jvmti:__subpackages__",
+    ],
     srcs: [
         "901-hello-ti-agent/expected.txt",
         "902-hello-transformation/expected.txt",
@@ -687,7 +791,7 @@
         "907-get-loaded-classes/expected.txt",
         "908-gc-start-finish/expected.txt",
         "910-methods/expected.txt",
-        "911-get-stack-trace/expected.txt",
+        "911-get-stack-trace/expected-cts-version.txt",
         "912-classes/expected.txt",
         "913-heaps/expected.txt",
         "914-hello-obsolescence/expected.txt",
@@ -767,6 +871,44 @@
         "1943-suspend-raw-monitor-wait/expected.txt",
         "1953-pop-frame/expected.txt",
         "1958-transform-try-jit/expected.txt",
+        "1962-multi-thread-events/expected.txt",
+        "1963-add-to-dex-classloader-in-memory/expected.txt",
+        "1967-get-set-local-bad-slot/expected.txt",
+        "1968-force-early-return/expected.txt",
+        "1969-force-early-return-void/expected.txt",
+        "1970-force-early-return-long/expected.txt",
+        "1971-multi-force-early-return/expected.txt",
+        "1974-resize-array/expected.txt",
+        "1975-hello-structural-transformation/expected.txt",
+        "1976-hello-structural-static-methods/expected.txt",
+        "1977-hello-structural-obsolescence/expected.txt",
+        "1978-regular-obsolete-then-structural-obsolescence/expected.txt",
+        "1979-threaded-structural-transformation/expected.txt",
+        // TODO Requires VarHandles to be un-@hide. See b/64382372
+        // "test/1981-structural-redef-private-method-handles/expected.txt",
+        "1981-structural-redef-private-method-handles/expected_no_mh.txt",
+        "1982-no-virtuals-structural-redefinition/expected.txt",
+        // JNI-id use can change the outcome of this test on device.
+        "1983-structural-redefinition-failures/expected-cts.txt",
+        "1984-structural-redefine-field-trace/expected.txt",
+        "1988-multi-structural-redefine/expected.txt",
+        "1989-transform-bad-monitor/expected.txt",
+        "1990-structural-bad-verify/expected.txt",
+        "1991-hello-structural-retransform/expected.txt",
+        "1992-retransform-no-such-field/expected.txt",
+        "1994-final-virtual-structural/expected.txt",
+        "1995-final-virtual-structural-multithread/expected.txt",
+        "1996-final-override-virtual-structural/expected.txt",
+        "1997-structural-shadow-method/expected.txt",
+        "1998-structural-shadow-field/expected.txt",
+        "1999-virtual-structural/expected.txt",
+        "2001-virtual-structural-multithread/expected.txt",
+        "2002-virtual-structural-initializing/expected.txt",
+        "2003-double-virtual-structural/expected.txt",
+        "2004-double-virtual-structural-abstract/expected.txt",
+        "2005-pause-all-redefine-multithreaded/expected.txt",
+        "2006-virtual-structural-finalizing/expected.txt",
+        "2007-virtual-structural-finalizable/expected.txt",
     ],
     out: ["expected_cts_outputs.jar"],
     tools: ["soong_zip"],
@@ -775,3 +917,453 @@
         "cp $$f $(genDir)/res/results.$${x%%-*}.expected.txt; done && " +
         "$(location soong_zip) -o $(out) -C $(genDir)/res -D $(genDir)/res",
 }
+
+filegroup {
+    name: "art-gtest-jars",
+    srcs: [
+        ":art-gtest-jars-AbstractMethod",
+        ":art-gtest-jars-AllFields",
+        ":art-gtest-jars-DefaultMethods",
+        ":art-gtest-jars-DexToDexDecompiler",
+        ":art-gtest-jars-ErroneousA",
+        ":art-gtest-jars-ErroneousB",
+        ":art-gtest-jars-ErroneousInit",
+        ":art-gtest-jars-Extension1",
+        ":art-gtest-jars-Extension2",
+        ":art-gtest-jars-ForClassLoaderA",
+        ":art-gtest-jars-ForClassLoaderB",
+        ":art-gtest-jars-ForClassLoaderC",
+        ":art-gtest-jars-ForClassLoaderD",
+        ":art-gtest-jars-ExceptionHandle",
+        ":art-gtest-jars-GetMethodSignature",
+        ":art-gtest-jars-HiddenApi",
+        ":art-gtest-jars-HiddenApiSignatures",
+        ":art-gtest-jars-HiddenApiStubs",
+        ":art-gtest-jars-ImageLayoutA",
+        ":art-gtest-jars-ImageLayoutB",
+        ":art-gtest-jars-IMTA",
+        ":art-gtest-jars-IMTB",
+        ":art-gtest-jars-Instrumentation",
+        ":art-gtest-jars-Interfaces",
+        ":art-gtest-jars-Lookup",
+        ":art-gtest-jars-Main",
+        ":art-gtest-jars-ManyMethods",
+        ":art-gtest-jars-MethodTypes",
+        ":art-gtest-jars-MultiDex",
+        ":art-gtest-jars-MultiDexModifiedSecondary",
+        ":art-gtest-jars-MyClass",
+        ":art-gtest-jars-MyClassNatives",
+        ":art-gtest-jars-Nested",
+        ":art-gtest-jars-NonStaticLeafMethods",
+        ":art-gtest-jars-Packages",
+        ":art-gtest-jars-ProtoCompare",
+        ":art-gtest-jars-ProtoCompare2",
+        ":art-gtest-jars-ProfileTestMultiDex",
+        ":art-gtest-jars-StaticLeafMethods",
+        ":art-gtest-jars-Statics",
+        ":art-gtest-jars-StaticsFromCode",
+        ":art-gtest-jars-StringLiterals",
+        ":art-gtest-jars-Transaction",
+        ":art-gtest-jars-XandY",
+        ":art-gtest-jars-MainEmptyUncompressed",
+        ":art-gtest-jars-MainEmptyUncompressedAligned",
+        ":art-gtest-jars-MainStripped",
+        ":art-gtest-jars-MainUncompressedAligned",
+        ":art-gtest-jars-MultiDexUncompressedAligned",
+        ":art-gtest-jars-VerifierDeps",
+        ":art-gtest-jars-VerifierDepsMulti",
+        ":art-gtest-jars-VerifySoftFailDuringClinit",
+    ],
+}
+
+java_defaults {
+    name: "art-gtest-jars-defaults",
+    installable: true,
+    dex_preopt: {
+        enabled: false,
+    },
+    sdk_version: "core_platform",
+}
+
+// The following modules are just trivial compilations (non-trivial cases are the end).
+
+java_library {
+    name: "art-gtest-jars-AbstractMethod",
+    srcs: ["AbstractMethod/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-AllFields",
+    srcs: ["AllFields/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-DefaultMethods",
+    srcs: ["DefaultMethods/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-DexToDexDecompiler",
+    srcs: ["DexToDexDecompiler/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-ErroneousA",
+    srcs: ["ErroneousA/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-ErroneousB",
+    srcs: ["ErroneousB/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-ErroneousInit",
+    srcs: ["ErroneousInit/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-Extension1",
+    srcs: ["Extension1/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-Extension2",
+    srcs: ["Extension2/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-ForClassLoaderA",
+    srcs: ["ForClassLoaderA/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-ForClassLoaderB",
+    srcs: ["ForClassLoaderB/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-ForClassLoaderC",
+    srcs: ["ForClassLoaderC/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-ForClassLoaderD",
+    srcs: ["ForClassLoaderD/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-ExceptionHandle",
+    srcs: ["ExceptionHandle/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-GetMethodSignature",
+    srcs: ["GetMethodSignature/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-HiddenApi",
+    srcs: ["HiddenApi/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-HiddenApiSignatures",
+    srcs: ["HiddenApiSignatures/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-HiddenApiStubs",
+    srcs: ["HiddenApiStubs/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-ImageLayoutA",
+    srcs: ["ImageLayoutA/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-ImageLayoutB",
+    srcs: ["ImageLayoutB/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-IMTA",
+    srcs: ["IMTA/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-IMTB",
+    srcs: ["IMTB/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-Instrumentation",
+    srcs: ["Instrumentation/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-Interfaces",
+    srcs: ["Interfaces/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-Lookup",
+    srcs: ["Lookup/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-Main",
+    srcs: ["Main/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-ManyMethods",
+    srcs: ["ManyMethods/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-MethodTypes",
+    srcs: ["MethodTypes/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-MyClass",
+    srcs: ["MyClass/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-MyClassNatives",
+    srcs: ["MyClassNatives/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-Nested",
+    srcs: ["Nested/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-NonStaticLeafMethods",
+    srcs: ["NonStaticLeafMethods/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-Packages",
+    srcs: ["Packages/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-ProtoCompare",
+    srcs: ["ProtoCompare/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-ProtoCompare2",
+    srcs: ["ProtoCompare2/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-StaticLeafMethods",
+    srcs: ["StaticLeafMethods/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-Statics",
+    srcs: ["Statics/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-StaticsFromCode",
+    srcs: ["StaticsFromCode/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-StringLiterals",
+    srcs: ["StringLiterals/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-Transaction",
+    srcs: ["Transaction/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+java_library {
+    name: "art-gtest-jars-XandY",
+    srcs: ["XandY/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+}
+
+// The following cases are non-trivial.
+
+// Uncompress classes.dex files in the jar file.
+genrule_defaults {
+    name: "art-gtest-jars-uncompress-defaults",
+    cmd: "$(location zip2zip) -i $(in) -o $(out) -0 'classes*.dex'",
+    tools: ["zip2zip"],
+}
+
+// Ensure the files are at least 4 byte aligned.
+genrule_defaults {
+    name: "art-gtest-jars-align-defaults",
+    cmd: "$(location zipalign) 4 $(in) $(out)",
+    tools: ["zipalign"],
+}
+
+// Assemble jar file from smali source.
+genrule_defaults {
+    name: "art-gtest-jars-smali-defaults",
+    cmd: "$(location smali) assemble --output $(out) $(in)",
+    tools: ["smali"],
+}
+
+// A copy of Main with the classes.dex stripped for the oat file assistant tests.
+genrule {
+    name: "art-gtest-jars-MainStripped",
+    srcs: [":art-gtest-jars-Main"],
+    cmd: "$(location zip2zip) -i $(in) -o $(out) -x 'classes*.dex'",
+    out: ["art-gtest-jars-MainStripped.jar"],
+    tools: ["zip2zip"],
+}
+
+// An empty.dex that is empty and uncompressed for the dex2oat tests.
+genrule {
+    name: "art-gtest-jars-MainEmptyUncompressed",
+    srcs: ["Main/empty.dex"],
+    cmd: "$(location soong_zip) -j -L 0 -o $(out) -f $(in)",
+    out: ["art-gtest-jars-MainEmptyUncompressed.jar"],
+    tools: ["soong_zip"],
+}
+
+// An empty.dex that is empty and uncompressed and aligned for the dex2oat tests.
+genrule {
+    name: "art-gtest-jars-MainEmptyUncompressedAligned",
+    defaults: ["art-gtest-jars-align-defaults"],
+    srcs: [":art-gtest-jars-MainEmptyUncompressed"],
+    out: ["art-gtest-jars-MainEmptyUncompressedAligned.jar"],
+}
+
+// A copy of Main with the classes.dex uncompressed for the dex2oat tests.
+genrule {
+    name: "art-gtest-jars-MainUncompressed",
+    defaults: ["art-gtest-jars-uncompress-defaults"],
+    srcs: [":art-gtest-jars-Main"],
+    out: ["art-gtest-jars-MainUncompressed.jar"],
+}
+
+// A copy of Main with the classes.dex uncompressed and aligned for the dex2oat tests.
+genrule {
+    name: "art-gtest-jars-MainUncompressedAligned",
+    defaults: ["art-gtest-jars-align-defaults"],
+    srcs: [":art-gtest-jars-MainUncompressed"],
+    out: ["art-gtest-jars-MainUncompressedAligned.jar"],
+}
+
+java_library {
+    name: "art-gtest-jars-MultiDex",
+    srcs: ["MultiDex/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+    min_sdk_version: "19",
+    dxflags: [
+        "--main-dex-list",
+        "art/test/MultiDex/main.list",
+    ],
+}
+
+// A copy of MultiDex with the classes.dex uncompressed for the OatFile tests.
+genrule {
+    name: "art-gtest-jars-MultiDexUncompressed",
+    defaults: ["art-gtest-jars-uncompress-defaults"],
+    srcs: [":art-gtest-jars-MultiDex"],
+    out: ["art-gtest-jars-MultiDexUncompressed.jar"],
+}
+
+// A copy of MultiDex with the classes.dex uncompressed and aligned for the OatFile tests.
+genrule {
+    name: "art-gtest-jars-MultiDexUncompressedAligned",
+    defaults: ["art-gtest-jars-align-defaults"],
+    srcs: [":art-gtest-jars-MultiDexUncompressed"],
+    out: ["art-gtest-jars-MultiDexUncompressedAligned.jar"],
+}
+
+java_library {
+    name: "art-gtest-jars-MultiDexModifiedSecondary",
+    srcs: ["MultiDexModifiedSecondary/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+    min_sdk_version: "19",
+    dxflags: [
+        "--main-dex-list",
+        "art/test/MultiDexModifiedSecondary/main.list",
+    ],
+}
+
+java_library {
+    name: "art-gtest-jars-ProfileTestMultiDex",
+    srcs: ["ProfileTestMultiDex/**/*.java"],
+    defaults: ["art-gtest-jars-defaults"],
+    min_sdk_version: "19",
+    dxflags: [
+        "--main-dex-list",
+        "art/test/ProfileTestMultiDex/main.list",
+    ],
+}
+
+genrule {
+    name: "art-gtest-jars-VerifierDeps",
+    defaults: ["art-gtest-jars-smali-defaults"],
+    srcs: ["VerifierDeps/**/*.smali"],
+    out: ["art-gtest-jars-VerifierDeps.jar"],
+}
+
+genrule {
+    name: "art-gtest-jars-VerifierDepsMulti",
+    defaults: ["art-gtest-jars-smali-defaults"],
+    srcs: ["VerifierDepsMulti/**/*.smali"],
+    out: ["art-gtest-jars-VerifierDepsMulti.jar"],
+}
+
+genrule {
+    name: "art-gtest-jars-VerifySoftFailDuringClinit",
+    defaults: ["art-gtest-jars-smali-defaults"],
+    srcs: ["VerifySoftFailDuringClinit/**/*.smali"],
+    out: ["art-gtest-jars-VerifySoftFailDuringClinit.jar"],
+}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index e3157ef..c4e07d3 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -19,15 +19,17 @@
 
 # Dependencies for actually running a run-test.
 TEST_ART_RUN_TEST_DEPENDENCIES := \
+  $(HOST_OUT_EXECUTABLES)/dx \
   $(HOST_OUT_EXECUTABLES)/d8 \
-  $(HOST_OUT_EXECUTABLES)/d8-compat-dx \
   $(HOST_OUT_EXECUTABLES)/hiddenapi \
   $(HOST_OUT_EXECUTABLES)/jasmin \
   $(HOST_OUT_EXECUTABLES)/smali
 
-# We need dex2oat and dalvikvm on the target as well as the core images (all images as we sync
-# only once).
-ART_TEST_TARGET_RUN_TEST_DEPENDENCIES := $(ART_TARGET_EXECUTABLES) $(TARGET_CORE_IMG_OUTS)
+# We need the ART Testing APEX (which is a superset of the Release
+# and Debug APEXes) -- which contains dex2oat, dalvikvm, their
+# dependencies and ART gtests -- on the target, as well as the core
+# images (all images as we sync only once).
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES := $(TESTING_ART_APEX) $(TARGET_CORE_IMG_OUTS)
 
 # Also need libartagent.
 ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += libartagent-target libartagentd-target
@@ -42,10 +44,10 @@
 ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += libarttest-target libarttestd-target
 
 # Also need libnativebridgetest.
-ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += libnativebridgetest-target
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += libnativebridgetest-target libnativebridgetestd-target
 
-# Also need libopenjdkjvmti.
-ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += libopenjdkjvmti-target libopenjdkjvmtid-target
+# Also need signal_dumper.
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += signal_dumper-target
 
 ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += \
   $(foreach jar,$(TARGET_TEST_CORE_JARS),$(TARGET_OUT_JAVA_LIBRARIES)/$(jar).jar)
@@ -55,16 +57,18 @@
 ART_TEST_HOST_RUN_TEST_DEPENDENCIES := \
   $(ART_HOST_EXECUTABLES) \
   $(HOST_OUT_EXECUTABLES)/hprof-conv \
-  $(HOST_OUT_EXECUTABLES)/timeout_dumper \
-  $(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libtiagent) \
-  $(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libtiagentd) \
-  $(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libtistress) \
-  $(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libtistressd) \
-  $(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libartagent) \
-  $(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libartagentd) \
-  $(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libarttest) \
-  $(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libarttestd) \
-  $(OUT_DIR)/$(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libnativebridgetest) \
+  $(HOST_OUT_EXECUTABLES)/signal_dumper \
+  $(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libtiagent) \
+  $(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libtiagentd) \
+  $(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libtistress) \
+  $(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libtistressd) \
+  $(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libartagent) \
+  $(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libartagentd) \
+  $(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libarttest) \
+  $(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libarttestd) \
+  $(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libnativebridgetest) \
+  $(ART_TEST_LIST_host_$(ART_HOST_ARCH)_libnativebridgetestd) \
+  $(ART_HOST_OUT_SHARED_LIBRARIES)/libicu_jni$(ART_HOST_SHLIB_EXTENSION) \
   $(ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION) \
   $(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdk$(ART_HOST_SHLIB_EXTENSION) \
   $(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$(ART_HOST_SHLIB_EXTENSION) \
@@ -74,15 +78,17 @@
 
 ifneq ($(HOST_PREFER_32_BIT),true)
 ART_TEST_HOST_RUN_TEST_DEPENDENCIES += \
-  $(OUT_DIR)/$(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_libtiagent) \
-  $(OUT_DIR)/$(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_libtiagentd) \
-  $(OUT_DIR)/$(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_libtistress) \
-  $(OUT_DIR)/$(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_libtistressd) \
-  $(OUT_DIR)/$(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_libartagent) \
-  $(OUT_DIR)/$(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_libartagentd) \
-  $(OUT_DIR)/$(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_libarttest) \
-  $(OUT_DIR)/$(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_libarttestd) \
-  $(OUT_DIR)/$(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_libnativebridgetest) \
+  $(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_libtiagent) \
+  $(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_libtiagentd) \
+  $(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_libtistress) \
+  $(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_libtistressd) \
+  $(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_libartagent) \
+  $(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_libartagentd) \
+  $(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_libarttest) \
+  $(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_libarttestd) \
+  $(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_libnativebridgetest) \
+  $(ART_TEST_LIST_host_$(2ND_ART_HOST_ARCH)_libnativebridgetestd) \
+  $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libicu_jni$(ART_HOST_SHLIB_EXTENSION) \
   $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libjavacore$(ART_HOST_SHLIB_EXTENSION) \
   $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdk$(ART_HOST_SHLIB_EXTENSION) \
   $(2ND_ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$(ART_HOST_SHLIB_EXTENSION) \
@@ -97,10 +103,6 @@
 # Required for jasmin and smali.
 host_prereq_rules += $(TEST_ART_RUN_TEST_DEPENDENCIES)
 
-# Sync test files to the target, depends upon all things that must be pushed
-#to the target.
-target_prereq_rules += test-art-target-sync
-
 define core-image-dependencies
   image_suffix := $(3)
   ifeq ($(3),regalloc_gc)
@@ -138,7 +140,7 @@
 
 test-art-host-run-test-dependencies : $(host_prereq_rules)
 .PHONY: test-art-host-run-test-dependencies
-test-art-target-run-test-dependencies : $(target_prereq_rules)
+test-art-target-run-test-dependencies :
 .PHONY: test-art-target-run-test-dependencies
 test-art-run-test-dependencies : test-art-host-run-test-dependencies test-art-target-run-test-dependencies
 .PHONY: test-art-run-test-dependencies
@@ -162,7 +164,6 @@
 test-art-run-test : test-art-host-run-test test-art-target-run-test
 
 host_prereq_rules :=
-target_prereq_rules :=
 core-image-dependencies :=
 define-test-art-host-or-target-run-test-group :=
 TARGET_TYPES :=
diff --git a/test/Extension1/ExtensionClass1.java b/test/Extension1/ExtensionClass1.java
new file mode 100644
index 0000000..b59643a
--- /dev/null
+++ b/test/Extension1/ExtensionClass1.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class ExtensionClass1 {
+    public static String sharedString = "SharedBootImageExtensionTestString";
+    public static String uniqueString = "UniqueExtension1String";
+}
diff --git a/test/Extension2/ExtensionClass2.java b/test/Extension2/ExtensionClass2.java
new file mode 100644
index 0000000..437341d
--- /dev/null
+++ b/test/Extension2/ExtensionClass2.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class ExtensionClass2 {
+    public static String sharedString = "SharedBootImageExtensionTestString";
+    public static String uniqueString1 = "UniqueExtension2String1";
+    public static String uniqueString2 = "UniqueExtension2String2";
+}
diff --git a/test/Interfaces/Interfaces.java b/test/Interfaces/Interfaces.java
index db60253..6290e7c 100644
--- a/test/Interfaces/Interfaces.java
+++ b/test/Interfaces/Interfaces.java
@@ -26,6 +26,8 @@
     interface K extends J {
         public void k();
     }
+    interface L extends I, J {
+    }
     class A implements I, J {
         public void i() {};
         public void j1() {};
diff --git a/test/Main/empty.dex b/test/Main/empty.dex
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/Main/empty.dex
diff --git a/test/MyClassNatives/MyClassNatives.java b/test/MyClassNatives/MyClassNatives.java
index c601e3e..3d939d6 100644
--- a/test/MyClassNatives/MyClassNatives.java
+++ b/test/MyClassNatives/MyClassNatives.java
@@ -122,6 +122,8 @@
     native void withoutImplementation();
     // Normal native
     native Object withoutImplementationRefReturn();
+    // Normal native
+    native static void staticWithoutImplementation();
 
     // Normal native
     native static void stackArgsIntsFirst(int i1, int i2, int i3, int i4, int i5, int i6, int i7,
@@ -139,10 +141,6 @@
         float f9, int i10, float f10);
 
     // Normal native
-    native static long getStackArgSignExtendedMips64(int i1, int i2, int i3, int i4, int i5, int i6,
-        int stack_arg);
-
-    // Normal native
     static native double logD(double d);
     // Normal native
     static native float logF(float f);
@@ -256,6 +254,8 @@
     native void withoutImplementation_Fast();
     @FastNative
     native Object withoutImplementationRefReturn_Fast();
+    @FastNative
+    native static void staticWithoutImplementation_Fast();
 
     @FastNative
     native static void stackArgsIntsFirst_Fast(int i1, int i2, int i3, int i4, int i5, int i6, int i7,
@@ -273,10 +273,6 @@
         float f9, int i10, float f10);
 
     @FastNative
-    native static long getStackArgSignExtendedMips64_Fast(int i1, int i2, int i3, int i4, int i5, int i6,
-        int stack_arg);
-
-    @FastNative
     static native double logD_Fast(double d);
     @FastNative
     static native float logF_Fast(float f);
@@ -301,6 +297,9 @@
     static native double fooSDD_Critical(double x, double y);
 
     @CriticalNative
+    native static void staticWithoutImplementation_Critical();
+
+    @CriticalNative
     native static void stackArgsIntsFirst_Critical(int i1, int i2, int i3, int i4, int i5, int i6, int i7,
         int i8, int i9, int i10, float f1, float f2, float f3, float f4, float f5, float f6,
         float f7, float f8, float f9, float f10);
diff --git a/test/ProfileTestMultiDex/Main.java b/test/ProfileTestMultiDex/Main.java
index a8ced54..978cb2c 100644
--- a/test/ProfileTestMultiDex/Main.java
+++ b/test/ProfileTestMultiDex/Main.java
@@ -67,3 +67,157 @@
 class SubE extends Super {
   int getValue() { return 16; };
 }
+
+// Add a class with lots of methods so we can test profile guided compilation triggers.
+class ZLotsOfMethods {
+  public void m1() {}
+  public void m2() {}
+  public void m3() {}
+  public void m4() {}
+  public void m5() {}
+  public void m6() {}
+  public void m7() {}
+  public void m8() {}
+  public void m9() {}
+  public void m10() {}
+  public void m11() {}
+  public void m12() {}
+  public void m13() {}
+  public void m14() {}
+  public void m15() {}
+  public void m16() {}
+  public void m17() {}
+  public void m18() {}
+  public void m19() {}
+  public void m20() {}
+  public void m21() {}
+  public void m22() {}
+  public void m23() {}
+  public void m24() {}
+  public void m25() {}
+  public void m26() {}
+  public void m27() {}
+  public void m28() {}
+  public void m29() {}
+  public void m30() {}
+  public void m31() {}
+  public void m32() {}
+  public void m33() {}
+  public void m34() {}
+  public void m35() {}
+  public void m36() {}
+  public void m37() {}
+  public void m38() {}
+  public void m39() {}
+  public void m40() {}
+  public void m41() {}
+  public void m42() {}
+  public void m43() {}
+  public void m44() {}
+  public void m45() {}
+  public void m46() {}
+  public void m47() {}
+  public void m48() {}
+  public void m49() {}
+  public void m50() {}
+  public void m51() {}
+  public void m52() {}
+  public void m53() {}
+  public void m54() {}
+  public void m55() {}
+  public void m56() {}
+  public void m57() {}
+  public void m58() {}
+  public void m59() {}
+  public void m60() {}
+  public void m61() {}
+  public void m62() {}
+  public void m63() {}
+  public void m64() {}
+  public void m65() {}
+  public void m66() {}
+  public void m67() {}
+  public void m68() {}
+  public void m69() {}
+  public void m70() {}
+  public void m71() {}
+  public void m72() {}
+  public void m73() {}
+  public void m74() {}
+  public void m75() {}
+  public void m76() {}
+  public void m77() {}
+  public void m78() {}
+  public void m79() {}
+  public void m80() {}
+  public void m81() {}
+  public void m82() {}
+  public void m83() {}
+  public void m84() {}
+  public void m85() {}
+  public void m86() {}
+  public void m87() {}
+  public void m88() {}
+  public void m89() {}
+  public void m90() {}
+  public void m91() {}
+  public void m92() {}
+  public void m93() {}
+  public void m94() {}
+  public void m95() {}
+  public void m96() {}
+  public void m97() {}
+  public void m98() {}
+  public void m99() {}
+  public void m100() {}
+  public void m101() {}
+  public void m102() {}
+  public void m103() {}
+  public void m104() {}
+  public void m105() {}
+  public void m106() {}
+  public void m107() {}
+  public void m108() {}
+  public void m109() {}
+  public void m110() {}
+  public void m111() {}
+  public void m112() {}
+  public void m113() {}
+  public void m114() {}
+  public void m115() {}
+  public void m116() {}
+  public void m117() {}
+  public void m118() {}
+  public void m119() {}
+  public void m120() {}
+  public void m121() {}
+  public void m122() {}
+  public void m123() {}
+  public void m124() {}
+  public void m125() {}
+  public void m126() {}
+  public void m127() {}
+  public void m128() {}
+  public void m129() {}
+  public void m130() {}
+  public void m131() {}
+  public void m132() {}
+  public void m133() {}
+  public void m134() {}
+  public void m135() {}
+  public void m136() {}
+  public void m137() {}
+  public void m138() {}
+  public void m139() {}
+  public void m140() {}
+  public void m141() {}
+  public void m142() {}
+  public void m143() {}
+  public void m144() {}
+  public void m145() {}
+  public void m146() {}
+  public void m147() {}
+  public void m148() {}
+  public void m149() {}
+  public void m150() {}
+}
diff --git a/test/ProfileTestMultiDex/Second.java b/test/ProfileTestMultiDex/Second.java
index 9f5dc66..a2bb8d4 100644
--- a/test/ProfileTestMultiDex/Second.java
+++ b/test/ProfileTestMultiDex/Second.java
@@ -38,3 +38,158 @@
     return Integer.valueOf(i);
   }
 }
+
+// Add a class with lots of methods so we can test profile guided compilation triggers.
+// Start the name with 'Z' so that the class is added at the end of the dex file.
+class ZLotsOfMethodsSecond {
+  public void m1() {}
+  public void m2() {}
+  public void m3() {}
+  public void m4() {}
+  public void m5() {}
+  public void m6() {}
+  public void m7() {}
+  public void m8() {}
+  public void m9() {}
+  public void m10() {}
+  public void m11() {}
+  public void m12() {}
+  public void m13() {}
+  public void m14() {}
+  public void m15() {}
+  public void m16() {}
+  public void m17() {}
+  public void m18() {}
+  public void m19() {}
+  public void m20() {}
+  public void m21() {}
+  public void m22() {}
+  public void m23() {}
+  public void m24() {}
+  public void m25() {}
+  public void m26() {}
+  public void m27() {}
+  public void m28() {}
+  public void m29() {}
+  public void m30() {}
+  public void m31() {}
+  public void m32() {}
+  public void m33() {}
+  public void m34() {}
+  public void m35() {}
+  public void m36() {}
+  public void m37() {}
+  public void m38() {}
+  public void m39() {}
+  public void m40() {}
+  public void m41() {}
+  public void m42() {}
+  public void m43() {}
+  public void m44() {}
+  public void m45() {}
+  public void m46() {}
+  public void m47() {}
+  public void m48() {}
+  public void m49() {}
+  public void m50() {}
+  public void m51() {}
+  public void m52() {}
+  public void m53() {}
+  public void m54() {}
+  public void m55() {}
+  public void m56() {}
+  public void m57() {}
+  public void m58() {}
+  public void m59() {}
+  public void m60() {}
+  public void m61() {}
+  public void m62() {}
+  public void m63() {}
+  public void m64() {}
+  public void m65() {}
+  public void m66() {}
+  public void m67() {}
+  public void m68() {}
+  public void m69() {}
+  public void m70() {}
+  public void m71() {}
+  public void m72() {}
+  public void m73() {}
+  public void m74() {}
+  public void m75() {}
+  public void m76() {}
+  public void m77() {}
+  public void m78() {}
+  public void m79() {}
+  public void m80() {}
+  public void m81() {}
+  public void m82() {}
+  public void m83() {}
+  public void m84() {}
+  public void m85() {}
+  public void m86() {}
+  public void m87() {}
+  public void m88() {}
+  public void m89() {}
+  public void m90() {}
+  public void m91() {}
+  public void m92() {}
+  public void m93() {}
+  public void m94() {}
+  public void m95() {}
+  public void m96() {}
+  public void m97() {}
+  public void m98() {}
+  public void m99() {}
+  public void m100() {}
+  public void m101() {}
+  public void m102() {}
+  public void m103() {}
+  public void m104() {}
+  public void m105() {}
+  public void m106() {}
+  public void m107() {}
+  public void m108() {}
+  public void m109() {}
+  public void m110() {}
+  public void m111() {}
+  public void m112() {}
+  public void m113() {}
+  public void m114() {}
+  public void m115() {}
+  public void m116() {}
+  public void m117() {}
+  public void m118() {}
+  public void m119() {}
+  public void m120() {}
+  public void m121() {}
+  public void m122() {}
+  public void m123() {}
+  public void m124() {}
+  public void m125() {}
+  public void m126() {}
+  public void m127() {}
+  public void m128() {}
+  public void m129() {}
+  public void m130() {}
+  public void m131() {}
+  public void m132() {}
+  public void m133() {}
+  public void m134() {}
+  public void m135() {}
+  public void m136() {}
+  public void m137() {}
+  public void m138() {}
+  public void m139() {}
+  public void m140() {}
+  public void m141() {}
+  public void m142() {}
+  public void m143() {}
+  public void m144() {}
+  public void m145() {}
+  public void m146() {}
+  public void m147() {}
+  public void m148() {}
+  public void m149() {}
+  public void m150() {}
+}
diff --git a/test/ProfileTestMultiDex/main.jpp b/test/ProfileTestMultiDex/main.jpp
index 5e55e96..0644072 100644
--- a/test/ProfileTestMultiDex/main.jpp
+++ b/test/ProfileTestMultiDex/main.jpp
@@ -19,3 +19,6 @@
 SubE:
   @@com.android.jack.annotations.ForceInMainDex
   class SubE
+ZLotsOfMethods:
+@@com.android.jack.annotations.ForceInMainDex
+  class ZLotsOfMethods
diff --git a/test/ProfileTestMultiDex/main.list b/test/ProfileTestMultiDex/main.list
index ec131f0..6ca79d4 100644
--- a/test/ProfileTestMultiDex/main.list
+++ b/test/ProfileTestMultiDex/main.list
@@ -5,3 +5,4 @@
 SubB.class
 SubD.class
 SubE.class
+ZLotsOfMethods.class
diff --git a/test/README.chroot.md b/test/README.chroot.md
new file mode 100644
index 0000000..7c3fa8f
--- /dev/null
+++ b/test/README.chroot.md
@@ -0,0 +1,130 @@
+# ART Chroot-Based On-Device Testing
+
+This file documents the use of a chroot environment in on-device testing of the
+Android Runtime (ART). Using a chroot allows tests to run a standalone ART from
+a locally built source tree on a device running (almost any) system image and
+does not interfere with the Runtime installed in the device's system partition.
+
+## Introduction
+
+The Android Runtime (ART) supports testing in a chroot-based environment, by
+setting up a chroot directory in a `ART_TEST_CHROOT` directory located under
+`/data/local` (e.g. `ART_TEST_CHROOT=/data/local/art-test-chroot`) on a device,
+installing ART and all other required artifacts there, and having tests use `adb
+shell chroot $ART_TEST_CHROOT <command>` to execute commands on the device
+within this environment.
+
+This way to run tests using a "standalone ART" ("guest system") only affects
+files in the data partition (the system partition and other partitions are left
+untouched) and is as independent as possible from the Android system ("host
+system") running on the device. This has some benefits:
+
+* no need to build and flash a whole device to do ART testing (or "overwriting"
+  an existing ART by syncing the system partition);
+* the possibility to use a smaller AOSP Android manifest
+  ([`master-art`](https://android.googlesource.com/platform/manifest/+/refs/heads/master-art/default.xml))
+  to build ART and the required dependencies for testing;
+* no instability due to updating/replacing ART on the system partition (a
+  functional Android Runtime is necessary to properly boot a device);
+* the possibility to have several standalone ART instances (one per directory,
+  e.g. `/data/local/art-test-chroot1`, `/data/local/art-test-chroot2`, etc.).
+
+Note that using this chroot-based approach requires root access to the device
+(i.e. be able to run `adb root` successfully).
+
+## Quick User Guide
+
+0. Unset variables which are not used with the chroot-based approach (if they
+   were set previously):
+   ```bash
+   unset ART_TEST_ANDROID_ROOT
+   unset CUSTOM_TARGET_LINKER
+   unset ART_TEST_ANDROID_ART_ROOT
+   unset ART_TEST_ANDROID_RUNTIME_ROOT
+   unset ART_TEST_ANDROID_I18N_ROOT
+   unset ART_TEST_ANDROID_TZDATA_ROOT
+   ```
+1. Set the chroot directory in `ART_TEST_CHROOT`:
+    ```bash
+    export ART_TEST_CHROOT=/data/local/art-test-chroot
+    ```
+2. Set lunch target and ADB:
+    * With a minimal `aosp/master-art` tree:
+        ```bash
+        export SOONG_ALLOW_MISSING_DEPENDENCIES=true
+        . ./build/envsetup.sh
+        lunch armv8-eng  # or arm_krait-eng for 32-bit ARM
+        export PATH="$(pwd)/prebuilts/runtime:$PATH"
+        export ADB="$ANDROID_BUILD_TOP/prebuilts/runtime/adb"
+        ```
+    * With a full Android (AOSP) `aosp/master` tree:
+        ```bash
+        export OVERRIDE_TARGET_FLATTEN_APEX=true
+        . ./build/envsetup.sh
+        lunch aosp_arm64-eng  # or aosp_arm-eng for 32-bit ARM
+        m adb
+        ```
+3. Build ART and required dependencies:
+    ```bash
+    art/tools/buildbot-build.sh --target
+    ```
+4. Clean up the device:
+    ```bash
+    art/tools/buildbot-cleanup-device.sh
+    ```
+5. Setup the device (including setting up mount points and files in the chroot directory):
+    ```bash
+    art/tools/buildbot-setup-device.sh
+    ```
+6. Populate the chroot tree on the device (including "activating" APEX packages
+   in the chroot environment):
+    ```bash
+    art/tools/buildbot-sync.sh
+    ```
+7. Run ART gtests:
+    ```bash
+    art/tools/run-gtests.sh -j4
+    ```
+    * Note: This currently fails on test
+    `test-art-target-gtest-image_space_test{32,64}` when using the full AOSP
+    tree (b/119815008).
+        * Workaround: Run `m clean-oat-host` before the build step
+        (`art/tools/buildbot-build.sh --target`) above.
+    * Note: The `-j` option is not honored yet (b/129930445).
+    * Specific tests to run can be passed on the command line, specified by
+    their absolute paths beginning with `/apex/`.
+8. Run ART run-tests:
+    * On a 64-bit target:
+        ```bash
+        art/test/testrunner/testrunner.py --target --64
+        ```
+    * On a 32-bit target:
+        ```bash
+        art/test/testrunner/testrunner.py --target --32
+        ```
+9. Run Libcore tests:
+    * On a 64-bit target:
+        ```bash
+        art/tools/run-libcore-tests.sh --mode=device --variant=X64
+        ```
+    * On a 32-bit target:
+        ```bash
+        art/tools/run-libcore-tests.sh --mode=device --variant=X32
+        ```
+10. Run JDWP tests:
+    * On a 64-bit target:
+        ```bash
+        art/tools/run-jdwp-tests.sh --mode=device --variant=X64
+        ```
+    * On a 32-bit target:
+        ```bash
+        art/tools/run-jdwp-tests.sh --mode=device --variant=X32
+        ```
+11. Tear down device setup:
+    ```bash
+    art/tools/buildbot-teardown-device.sh
+    ```
+12. Clean up the device:
+    ```bash
+    art/tools/buildbot-cleanup-device.sh
+    ```
diff --git a/test/README.md b/test/README.md
index 350350e..d199bfe 100644
--- a/test/README.md
+++ b/test/README.md
@@ -1,24 +1,90 @@
-# VM test harness
+# ART Testing
 
-There are two suites of tests in this directory: run-tests and gtests.
+There are two suites of tests in the Android Runtime (ART):
+* _ART run-tests_: Tests of the ART runtime using Dex bytecode (mostly written
+  in Java).
+* _ART gtests_: C++ tests exercising various aspects of ART.
 
-The run-tests are identified by directories named with with a numeric
-prefix and containing an info.txt file. For most run tests, the
-sources are in the "src" subdirectory. Sources found in the "src2"
-directory are compiled separately but to the same output directory;
-this can be used to exercise "API mismatch" situations by replacing
-class files created in the first pass. The "src-ex" directory is
-built separately, and is intended for exercising class loaders.
-Resources can be stored in the "res" directory, which is distributed
-together with the executable files.
+## ART run-tests
 
-The gtests are in named directories and contain a .java source
-file.
+ART run-tests are tests exercising the runtime using Dex bytecode. They are
+written in Java and/or [Smali](https://github.com/JesusFreke/smali)
+(compiled/assembled as Dex bytecode) and sometimes native code (written as C/C++
+testing libraries). Some tests also make use of the
+[Jasmin](http://jasmin.sourceforge.net/) assembler or the
+[ASM](https://asm.ow2.io/) bytecode manipulation tool. Run-tests are
+executed on the ART runtime (`dalvikvm`), possibly preceded by a
+pre-optimization of the Dex code (using `dex2oat`).
 
-All tests in either suite can be run using the "art/test.py"
-script. Additionally, run-tests can be run individidually. All of the
-tests can be run on the build host, on a USB-attached device, or using
-the build host "reference implementation".
+The run-tests are identified by directories in this `test` directory, named with
+a numeric prefix and containing an `info.txt` file. For most run tests, the
+sources are in the `src` subdirectory. Sources found in the `src2` directory are
+compiled separately but to the same output directory; this can be used to
+exercise "API mismatch" situations by replacing class files created in the first
+pass. The `src-ex` directory is built separately, and is intended for exercising
+class loaders.  Resources can be stored in the `res` directory, which is
+distributed together with the executable files.
+
+The run-tests logic lives in the `test/run-test` Bash script. The execution of a
+run-test has three main parts: building the test, running the test, and checking
+the test's output. By default, these three steps are implemented by three Bash
+scripts located in the `test/etc` directory (`default-build`, `default-run`, and
+`default-check`). These scripts rely on environment variables set by
+`test/run-test`.
+
+The default logic for all of these these steps (build, run, check) is overridden
+if the test's directory contains a Bash script named after the step
+(i.e. `build`, `run`, or `check`). Note that the default logic of the "run" step
+is actually implemented in the "JAR runner" (`test/etc/run-test-jar`), invoked
+by `test/etc/default-run`.
+
+After the execution of a run-test, the check step's default behavior
+(implemented in `test/etc/default-check`) is to compare its standard output with
+the contents of the `expected.txt` file contained in the test's directory; any
+mismatch triggers a test failure.
+
+The `test/run-test` script handles the execution of a single run-test in a given
+configuration. The Python script `test/testrunner/testrunner.py` is a convenient
+script handling the construction and execution of multiple tests in one
+configuration or more.
+
+To see the invocation options supported by `run-test` and `testrunner.py`, run
+these commands from the Android source top-level directory:
+```sh
+art/test/run-test --help
+```
+```sh
+art/test/testrunner/testrunner.py --help
+```
+
+## ART gtests
+
+ART gtests are written in C++ using the [Google
+Test](https://github.com/google/googletest) framework. These tests exercise
+various aspects of the runtime (the logic in `libart`, `libart-compiler`, etc.)
+and its binaries (`dalvikvm`, `dex2oat`, `oatdump`, etc.). Some of them are used
+as unit tests to verify a particular construct in ART. These tests may depend on
+some test Dex files and core images.
+
+ART gtests are defined in various directories within the ART project (usually in
+the same directory as the code they exercise). Their source files usually end
+with the suffix `_test.cc`. The construction logic of these tests is implemented
+in ART's build system (`Android.bp` and `Android*.mk` files). On host, these
+gtests can be run by executing `m test-art-host-gtest`. On device, the
+recommended approach is to run these tests in a chroot environment (see
+`README.chroot.md` in this directory).
+
+
+# Test execution
+
+All tests in either suite can be run using the `art/test.py`
+script. Additionally, run-tests can be run individually. All of the tests can be
+run on the build host, on a USB-attached device, or using the build host
+"reference implementation".
+
+ART also supports running target (device) tests in a chroot environment (see
+`README.chroot.md` in this directory). This is currently the recommended way to
+run tests on target (rather than using `art/test.py --target`).
 
 To see command flags run:
 
@@ -73,3 +139,11 @@
 ```sh
 $ art/test.py --target -r -t 001-HelloWorld
 ```
+
+
+# ART Continuous Integration
+
+Both ART run-tests and gtests are run continuously as part of [ART's continuous
+integration](https://ci.chromium.org/p/art/g/luci/console). In addition, two
+other test suites are run continuously on this service: Libcore tests and JDWP
+tests.
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index ba2d46e..6c76288 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -19,6 +19,7 @@
 #include <android-base/logging.h>
 #include <android-base/macros.h>
 
+#include "art_field.h"
 #include "art_method-inl.h"
 #include "base/enums.h"
 #include "common_throws.h"
@@ -29,12 +30,15 @@
 #include "jit/profiling_info.h"
 #include "jni/jni_internal.h"
 #include "mirror/class-inl.h"
+#include "mirror/class.h"
 #include "nativehelper/ScopedUtfChars.h"
+#include "oat.h"
 #include "oat_file.h"
 #include "oat_quick_method_header.h"
 #include "profile/profile_compilation_info.h"
 #include "runtime.h"
 #include "scoped_thread_state_change-inl.h"
+#include "scoped_thread_state_change.h"
 #include "thread-current-inl.h"
 
 namespace art {
@@ -175,7 +179,8 @@
   }
   const void* actual_code = method->GetEntryPointFromQuickCompiledCodePtrSize(kRuntimePointerSize);
   bool interpreter =
-      Runtime::Current()->GetClassLinker()->ShouldUseInterpreterEntrypoint(method, actual_code);
+      Runtime::Current()->GetClassLinker()->ShouldUseInterpreterEntrypoint(method, actual_code) ||
+      (actual_code == interpreter::GetNterpEntryPoint());
   return !interpreter;
 }
 
@@ -236,14 +241,28 @@
       ThrowIllegalStateException(msg.c_str());
       return;
     }
-    // We force initialization of the declaring class to make sure the method doesn't keep
-    // the resolution stub as entrypoint.
+    // We force visible initialization of the declaring class to make sure the method
+    // doesn't keep the resolution stub as entrypoint.
     StackHandleScope<1> hs(self);
     Handle<mirror::Class> h_klass(hs.NewHandle(method->GetDeclaringClass()));
-    if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_klass, true, true)) {
+    ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+    if (!class_linker->EnsureInitialized(self, h_klass, true, true)) {
       self->AssertPendingException();
       return;
     }
+    if (UNLIKELY(!h_klass->IsInitialized())) {
+      // Must be initializing in this thread.
+      CHECK_EQ(h_klass->GetStatus(), ClassStatus::kInitializing);
+      CHECK_EQ(h_klass->GetClinitThreadId(), self->GetTid());
+      std::string msg(method->PrettyMethod());
+      msg += ": is not safe to jit because the class is being initialized in this thread!";
+      ThrowIllegalStateException(msg.c_str());
+      return;
+    }
+    if (!h_klass->IsVisiblyInitialized()) {
+      ScopedThreadSuspension sts(self, ThreadState::kNative);
+      class_linker->MakeInitializedClassesVisiblyInitialized(self, /*wait=*/ true);
+    }
   }
   jit::Jit* jit = GetJitIfEnabled();
   jit::JitCodeCache* code_cache = jit->GetCodeCache();
@@ -253,18 +272,22 @@
   while (true) {
     if (native && code_cache->ContainsMethod(method)) {
       break;
-    } else if (code_cache->WillExecuteJitCode(method)) {
-      break;
     } else {
       // Sleep to yield to the compiler thread.
       usleep(1000);
       ScopedObjectAccess soa(self);
-      if (!native) {
+      if (!native && jit->GetCodeCache()->CanAllocateProfilingInfo()) {
         // Make sure there is a profiling info, required by the compiler.
         ProfilingInfo::Create(self, method, /* retry_allocation */ true);
       }
-      // Will either ensure it's compiled or do the compilation itself.
-      jit->CompileMethod(method, self, /*baseline=*/ false, /*osr=*/ false);
+      // Will either ensure it's compiled or do the compilation itself. We do
+      // this before checking if we will execute JIT code to make sure the
+      // method is compiled 'optimized' and not baseline (tests expect optimized
+      // compilation).
+      jit->CompileMethod(method, self, /*baseline=*/ false, /*osr=*/ false, /*prejit=*/ false);
+      if (code_cache->WillExecuteJitCode(method)) {
+        break;
+      }
     }
   }
 }
@@ -383,17 +406,6 @@
   return (jit != nullptr) ? jit->HotMethodThreshold() : 0;
 }
 
-extern "C" JNIEXPORT void JNICALL Java_Main_transitionJitFromZygote(JNIEnv*, jclass) {
-  jit::Jit* jit = Runtime::Current()->GetJit();
-  if (jit == nullptr) {
-    return;
-  }
-  // Mimic the transition behavior a zygote fork would have.
-  jit->PreZygoteFork();
-  jit->GetCodeCache()->PostForkChildAction(/*is_system_server=*/ false, /*is_zygote=*/ false);
-  jit->PostForkChildAction(/*is_system_server=*/ false, /*is_zygote=*/ false);
-}
-
 extern "C" JNIEXPORT void JNICALL Java_Main_deoptimizeBootImage(JNIEnv*, jclass) {
   ScopedSuspendAll ssa(__FUNCTION__);
   Runtime::Current()->DeoptimizeBootImage();
@@ -407,4 +419,16 @@
   Runtime::Current()->SetTargetSdkVersion(static_cast<uint32_t>(version));
 }
 
+extern "C" JNIEXPORT jlong JNICALL Java_Main_genericFieldOffset(JNIEnv* env, jclass, jobject fld) {
+  jfieldID fid = env->FromReflectedField(fld);
+  ScopedObjectAccess soa(env);
+  ArtField* af = jni::DecodeArtField(fid);
+  return af->GetOffset().Int32Value();
+}
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isObsoleteObject(JNIEnv* env, jclass, jclass c) {
+  ScopedObjectAccess soa(env);
+  return soa.Decode<mirror::Class>(c)->IsObsoleteObject();
+}
+
 }  // namespace art
diff --git a/test/common/stack_inspect.cc b/test/common/stack_inspect.cc
index cb011a8..79c7a36 100644
--- a/test/common/stack_inspect.cc
+++ b/test/common/stack_inspect.cc
@@ -25,6 +25,7 @@
 #include "mirror/class-inl.h"
 #include "nth_caller_visitor.h"
 #include "oat_file.h"
+#include "oat_quick_method_header.h"
 #include "runtime.h"
 #include "scoped_thread_state_change-inl.h"
 #include "stack.h"
@@ -47,7 +48,10 @@
   NthCallerVisitor caller(soa.Self(), level, false);
   caller.WalkStack();
   CHECK(caller.caller != nullptr);
-  return caller.GetCurrentShadowFrame() != nullptr ? JNI_TRUE : JNI_FALSE;
+  bool is_shadow_frame = (caller.GetCurrentShadowFrame() != nullptr);
+  bool is_nterp_frame = (caller.GetCurrentQuickFrame() != nullptr) &&
+      (caller.GetCurrentOatQuickMethodHeader()->IsNterpMethodHeader());
+  return (is_shadow_frame || is_nterp_frame) ? JNI_TRUE : JNI_FALSE;
 }
 
 // public static native boolean isInterpreted();
@@ -187,8 +191,11 @@
   jobject result = nullptr;
   StackVisitor::WalkStack(
       [&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
-        // Discard stubs and Main.getThisOfCaller.
-        if (stack_visitor->GetMethod() == nullptr || stack_visitor->GetMethod()->IsNative()) {
+        // Discard stubs and Main.getThisOfCaller and methods without vreg info.
+        if (stack_visitor->GetMethod() == nullptr ||
+            stack_visitor->GetMethod()->IsNative() ||
+            (stack_visitor->GetCurrentShadowFrame() == nullptr &&
+             !Runtime::Current()->IsAsyncDeoptimizeable(stack_visitor->GetCurrentQuickFramePc()))) {
           return true;
         }
         result = soa.AddLocalReference<jobject>(stack_visitor->GetThisObject());
diff --git a/test/dexdump/run-all-tests b/test/dexdump/run-all-tests
index e555a44..2fe4a02 100755
--- a/test/dexdump/run-all-tests
+++ b/test/dexdump/run-all-tests
@@ -38,7 +38,7 @@
 mkdir ${tmpdir}
 
 # Set up tools and commands to run
-DEXDUMP="${ANDROID_HOST_OUT}/bin/dexdump2"
+DEXDUMP="${ANDROID_HOST_OUT}/bin/dexdump"
 DEXLIST="${ANDROID_HOST_OUT}/bin/dexlist"
 
 declare -A SUFFIX_COMMAND_MAP
diff --git a/test/etc/default-build b/test/etc/default-build
index d203698..1122c01 100755
--- a/test/etc/default-build
+++ b/test/etc/default-build
@@ -270,7 +270,7 @@
   if [[ "$USE_DESUGAR" = "true" ]]; then
     local boot_class_path_list=$($ANDROID_BUILD_TOP/art/tools/bootjars.sh --$BUILD_MODE --core --path)
     for boot_class_path_element in $boot_class_path_list; do
-      d8_local_flags="$d8_local_flags --classpath $boot_class_path_element"
+      d8_local_flags="$d8_local_flags --lib $boot_class_path_element"
     done
   else
     d8_local_flags="$d8_local_flags --no-desugaring"
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index bcd35e6..101fa52 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -2,6 +2,11 @@
 #
 # Runner for an individual run-test.
 
+if [[ -z "$ANDROID_BUILD_TOP" ]]; then
+  echo 'ANDROID_BUILD_TOP environment variable is empty; did you forget to run `lunch`?'
+  exit 1
+fi
+
 msg() {
     if [ "$QUIET" = "n" ]; then
         echo "$@"
@@ -9,11 +14,13 @@
 }
 
 ANDROID_ROOT="/system"
-ANDROID_RUNTIME_ROOT="/apex/com.android.runtime"
+ANDROID_ART_ROOT="/apex/com.android.art"
+ANDROID_I18N_ROOT="/apex/com.android.i18n"
 ANDROID_TZDATA_ROOT="/apex/com.android.tzdata"
-ARCHITECTURES_32="(arm|x86|mips|none)"
-ARCHITECTURES_64="(arm64|x86_64|mips64|none)"
+ARCHITECTURES_32="(arm|x86|none)"
+ARCHITECTURES_64="(arm64|x86_64|none)"
 ARCHITECTURES_PATTERN="${ARCHITECTURES_32}"
+GET_DEVICE_ISA_BITNESS_FLAG="--32"
 BOOT_IMAGE=""
 CHROOT=
 COMPILE_FLAGS=""
@@ -23,8 +30,8 @@
 DEBUGGER_AGENT=""
 WRAP_DEBUGGER_AGENT="n"
 DEV_MODE="n"
-DEX2OAT_NDEBUG_BINARY="dex2oat"
-DEX2OAT_DEBUG_BINARY="dex2oatd"
+DEX2OAT_NDEBUG_BINARY="dex2oat32"
+DEX2OAT_DEBUG_BINARY="dex2oatd32"
 EXPERIMENTAL=""
 FALSE_BIN="false"
 FLAGS=""
@@ -45,6 +52,7 @@
 JIT="n"
 INVOKE_WITH=""
 IS_JVMTI_TEST="n"
+ADD_LIBDIR_ARGUMENTS="n"
 ISA=x86
 LIBRARY_DIRECTORY="lib"
 TEST_DIRECTORY="nativetest"
@@ -55,14 +63,11 @@
 RELOCATE="n"
 STRIP_DEX="n"
 SECONDARY_DEX=""
-TIME_OUT="gdb"  # "n" (disabled), "timeout" (use timeout), "gdb" (use gdb)
-TIMEOUT_DUMPER=timeout_dumper
-# Value in seconds
-if [ "$ART_USE_READ_BARRIER" != "false" ]; then
-  TIME_OUT_VALUE=2400  # 40 minutes.
-else
-  TIME_OUT_VALUE=1200  # 20 minutes.
-fi
+TIME_OUT="n"  # "n" (disabled), "timeout" (use timeout), "gdb" (use gdb)
+TIMEOUT_DUMPER=signal_dumper
+# Values in seconds.
+TIME_OUT_EXTRA=0
+TIME_OUT_VALUE=
 USE_GDB="n"
 USE_GDBSERVER="n"
 GDBSERVER_PORT=":5039"
@@ -91,6 +96,7 @@
 DEX2OAT_TIMEOUT="300" # 5 mins
 # The *hard* timeout where we really start trying to kill the dex2oat.
 DEX2OAT_RT_TIMEOUT="360" # 6 mins
+CREATE_RUNNER="n"
 
 # if "y", run 'sync' before dalvikvm to make sure all files from
 # build step (e.g. dex2oat) were finished writing.
@@ -126,6 +132,9 @@
         USE_JVMTI="y"
         IS_JVMTI_TEST="y"
         shift
+    elif [ "x$1" = "x--add-libdir-argument" ]; then
+        ADD_LIBDIR_ARGUMENTS="y"
+        shift
     elif [ "x$1" = "x-O" ]; then
         TEST_IS_NDEBUG="y"
         shift
@@ -138,8 +147,8 @@
         LIB="$1"
         shift
     elif [ "x$1" = "x--gc-stress" ]; then
-        # Give an extra 5 mins if we are gc-stress.
-        TIME_OUT_VALUE=$((${TIME_OUT_VALUE} + 300))
+        # Give an extra 20 mins if we are gc-stress.
+        TIME_OUT_EXTRA=$((${TIME_OUT_EXTRA} + 1200))
         shift
     elif [ "x$1" = "x--testlib" ]; then
         shift
@@ -157,12 +166,20 @@
         fi
         ARGS="${ARGS} $1"
         shift
+    elif [ "x$1" = "x--compiler-only-option" ]; then
+        shift
+        option="$1"
+        COMPILE_FLAGS="${COMPILE_FLAGS} $option"
+        shift
     elif [ "x$1" = "x-Xcompiler-option" ]; then
         shift
         option="$1"
         FLAGS="${FLAGS} -Xcompiler-option $option"
         COMPILE_FLAGS="${COMPILE_FLAGS} $option"
         shift
+    elif [ "x$1" = "x--create-runner" ]; then
+        CREATE_RUNNER="y"
+        shift
     elif [ "x$1" = "x--android-runtime-option" ]; then
         shift
         option="$1"
@@ -172,6 +189,11 @@
         shift
         option="$1"
         FLAGS="${FLAGS} $option"
+        if [ "x$option" = "x-Xmethod-trace" ]; then
+            # Method tracing can slow some tests down a lot, in particular
+            # 530-checker-lse2.
+            TIME_OUT_EXTRA=$((${TIME_OUT_EXTRA} + 1200))
+        fi
         shift
     elif [ "x$1" = "x--boot" ]; then
         shift
@@ -221,8 +243,14 @@
     elif [ "x$1" = "x--host" ]; then
         HOST="y"
         ANDROID_ROOT="${ANDROID_HOST_OUT}"
-        ANDROID_RUNTIME_ROOT="${ANDROID_HOST_OUT}/com.android.runtime"
+        ANDROID_ART_ROOT="${ANDROID_HOST_OUT}/com.android.art"
+        ANDROID_I18N_ROOT="${ANDROID_HOST_OUT}/com.android.i18n"
         ANDROID_TZDATA_ROOT="${ANDROID_HOST_OUT}/com.android.tzdata"
+        # On host, we default to using the symlink, as the PREFER_32BIT
+        # configuration is the only configuration building a 32bit version of
+        # dex2oat.
+        DEX2OAT_DEBUG_BINARY="dex2oatd"
+        DEX2OAT_NDEBUG_BINARY="dex2oat"
         shift
     elif [ "x$1" = "x--bionic" ]; then
         BIONIC="y"
@@ -276,6 +304,7 @@
         TIME_OUT="n"
         shift
     elif [ "x$1" = "x--debug" ]; then
+        USE_JVMTI="y"
         DEBUGGER="y"
         TIME_OUT="n"
         shift
@@ -292,10 +321,6 @@
         USE_GDBSERVER="y"
         DEV_MODE="y"
         TIME_OUT="n"
-        HOST="y"
-        ANDROID_ROOT="${ANDROID_HOST_OUT}"
-        ANDROID_RUNTIME_ROOT="${ANDROID_HOST_OUT}/com.android.runtime"
-        ANDROID_TZDATA_ROOT="${ANDROID_HOST_OUT}/com.android.tzdata"
         shift
     elif [ "x$1" = "x--gdb" ]; then
         USE_GDB="y"
@@ -356,9 +381,13 @@
         shift
         ANDROID_ROOT="$1"
         shift
-    elif [ "x$1" = "x--android-runtime-root" ]; then
+    elif [ "x$1" = "x--android-i18n-root" ]; then
         shift
-        ANDROID_RUNTIME_ROOT="$1"
+        ANDROID_I18N_ROOT="$1"
+        shift
+    elif [ "x$1" = "x--android-art-root" ]; then
+        shift
+        ANDROID_ART_ROOT="$1"
         shift
     elif [ "x$1" = "x--android-tzdata-root" ]; then
         shift
@@ -382,6 +411,9 @@
         LIBRARY_DIRECTORY="lib64"
         TEST_DIRECTORY="nativetest64"
         ARCHITECTURES_PATTERN="${ARCHITECTURES_64}"
+        GET_DEVICE_ISA_BITNESS_FLAG="--64"
+        DEX2OAT_NDEBUG_BINARY="dex2oat64"
+        DEX2OAT_DEBUG_BINARY="dex2oatd64"
         shift
     elif [ "x$1" = "x--experimental" ]; then
         if [ "$#" -lt 2 ]; then
@@ -428,9 +460,38 @@
     fi
 done
 
+# HACK: Force the use of `signal_dumper` on host.
+if [[ "$HOST" = "y" ]]; then
+  TIME_OUT="timeout"
+fi
+
+# If you change this, update the timeout in testrunner.py as well.
+if [ -z "$TIME_OUT_VALUE" ] ; then
+  # 10 minutes is the default.
+  TIME_OUT_VALUE=600
+
+  # For sanitized builds use a larger base.
+  # TODO: Consider sanitized target builds?
+  if [ "x$SANITIZE_HOST" != "x" ] ; then
+    TIME_OUT_VALUE=1500  # 25 minutes.
+  fi
+
+  TIME_OUT_VALUE=$((${TIME_OUT_VALUE} + ${TIME_OUT_EXTRA}))
+fi
+
+# Escape hatch for slow hosts or devices. Accept an environment variable as a timeout factor.
+if [ ! -z "$ART_TIME_OUT_MULTIPLIER" ] ; then
+  TIME_OUT_VALUE=$((${TIME_OUT_VALUE} * ${ART_TIME_OUT_MULTIPLIER}))
+fi
+
 # The DEX_LOCATION with the chroot prefix, if any.
 CHROOT_DEX_LOCATION="$CHROOT$DEX_LOCATION"
 
+# If running on device, determine the ISA of the device.
+if [ "$HOST" = "n" ]; then
+  ISA=$("$ANDROID_BUILD_TOP/art/test/utils/get-device-isa" "$GET_DEVICE_ISA_BITNESS_FLAG")
+fi
+
 if [ "$USE_JVM" = "n" ]; then
     FLAGS="${FLAGS} ${ANDROID_FLAGS}"
     # we don't want to be trying to get adbconnections since the plugin might
@@ -484,7 +545,7 @@
 
 if [ "$DEBUGGER" = "y" ]; then
   # Use this instead for ddms and connect by running 'ddms':
-  # DEBUGGER_OPTS="-agentlib:jdwp=transport=dt_android_adb,server=y,suspend=y"
+  # DEBUGGER_OPTS="-XjdwpOptions=server=y,suspend=y -XjdwpProvider:adbconnection"
   # TODO: add a separate --ddms option?
 
   PORT=12345
@@ -494,9 +555,8 @@
   fi
   msg "    jdb -attach localhost:$PORT"
   if [ "$USE_JVM" = "n" ]; then
-    # TODO We should switch over to using the jvmti agent by default.
-    # Need to tell the runtime to enable the internal jdwp implementation.
-    DEBUGGER_OPTS="-XjdwpOptions:transport=dt_socket,address=$PORT,server=y,suspend=y -XjdwpProvider:internal"
+    # Use the default libjdwp agent. Use --debug-agent to use a custom one.
+    DEBUGGER_OPTS="-agentpath:libjdwp.so=transport=dt_socket,address=$PORT,server=y,suspend=y -XjdwpProvider:internal"
   else
     DEBUGGER_OPTS="-agentlib:jdwp=transport=dt_socket,address=$PORT,server=y,suspend=y"
   fi
@@ -535,6 +595,14 @@
   fi
 fi
 
+# Add the libdir to the argv passed to the main function.
+if [ "$ADD_LIBDIR_ARGUMENTS" = "y" ]; then
+  if [[ "$HOST" = "y" ]]; then
+    ARGS="${ARGS} ${ANDROID_HOST_OUT}/${TEST_DIRECTORY}/"
+  else
+    ARGS="${ARGS} /data/${TEST_DIRECTORY}/art/${ISA}/"
+  fi
+fi
 if [ "$IS_JVMTI_TEST" = "y" ]; then
   agent=libtiagentd.so
   lib=tiagentd
@@ -547,7 +615,14 @@
   if [[ "$USE_JVM" = "y" ]]; then
     FLAGS="${FLAGS} -agentpath:${ANDROID_HOST_OUT}/nativetest64/${agent}=${TEST_NAME},jvm"
   else
-    FLAGS="${FLAGS} -agentpath:${agent}=${TEST_NAME},art"
+    if [[ "$HOST" = "y" ]]; then
+      FLAGS="${FLAGS} -agentpath:${agent}=${TEST_NAME},art"
+    else
+      # The linker configuration used for dalvikvm(64) in the ART APEX
+      # requires us to pass the full path to the agent to the runtime when
+      # running on device.
+      FLAGS="${FLAGS} -agentpath:/data/${TEST_DIRECTORY}/art/${ISA}/${agent}=${TEST_NAME},art"
+    fi
   fi
 fi
 
@@ -578,7 +653,14 @@
   if [[ "$USE_JVM" = "y" ]]; then
     FLAGS="${FLAGS} -agentpath:${ANDROID_HOST_OUT}/nativetest64/${agent}=${agent_args}"
   else
-    FLAGS="${FLAGS} -agentpath:${agent}=${agent_args}"
+    if [[ "$HOST" = "y" ]]; then
+      FLAGS="${FLAGS} -agentpath:${agent}=${agent_args}"
+    else
+      # The linker configuration used for dalvikvm(64) in the ART APEX
+      # requires us to pass the full path to the agent to the runtime when
+      # running on device.
+      FLAGS="${FLAGS} -agentpath:/data/${TEST_DIRECTORY}/art/${ISA}/${agent}=${agent_args}"
+    fi
   fi
 fi
 
@@ -594,14 +676,22 @@
   if [ "$DEV_MODE" = "y" ]; then
     echo $cmdline
   fi
-  $cmdline
+  if [ "$CREATE_RUNNER" = "y" ]; then
+    echo "#!/bin/bash" > runit.sh
+    echo "export LD_LIBRARY_PATH=\"$LD_LIBRARY_PATH\""
+    echo $cmdline "2>&1" >> runit.sh
+    chmod u+x runit.sh
+    echo "Runnable test script written to $PWD/runit.sh"
+  else
+    $cmdline
+  fi
   exit
 fi
 
 # Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
 # because that's what we use for compiling the core.art image.
 # It may contain additional modules from TEST_CORE_JARS.
-bpath_modules="core-oj core-libart okhttp bouncycastle apache-xml conscrypt"
+bpath_modules="core-oj core-libart core-icu4j okhttp bouncycastle apache-xml conscrypt"
 if [ "${HOST}" = "y" ]; then
     framework="${ANDROID_HOST_OUT}/framework"
     if [ "${ANDROID_HOST_OUT:0:${#ANDROID_BUILD_TOP}+1}" = "${ANDROID_BUILD_TOP}/" ]; then
@@ -654,7 +744,8 @@
     echo "Cannot pass both --gdb and --gdbserver at the same time!" >&2
     exit 1
   elif [ "$HOST" = "n" ]; then
-    GDB="$GDBSERVER_DEVICE $GDBSERVER_PORT"
+    # We might not have any hostname resolution if we are using a chroot.
+    GDB="$GDBSERVER_DEVICE --no-startup-with-shell 127.0.0.1$GDBSERVER_PORT"
   else
     if [ `uname` = "Darwin" ]; then
         GDB=lldb
@@ -669,10 +760,11 @@
   fi
 elif [ "$USE_GDBSERVER" = "y" ]; then
   if [ "$HOST" = "n" ]; then
-    echo "Cannot use --gdbserver in non-host configs" >&2
-    exit 1
+    # We might not have any hostname resolution if we are using a chroot.
+    GDB="$GDBSERVER_DEVICE --no-startup-with-shell 127.0.0.1$GDBSERVER_PORT"
+  else
+    GDB="$GDBSERVER_HOST $GDBSERVER_PORT"
   fi
-  GDB="$GDBSERVER_HOST $GDBSERVER_PORT"
 fi
 
 if [ "$INTERPRETER" = "y" ]; then
@@ -714,35 +806,6 @@
     FLAGS="$FLAGS -Xnorelocate"
 fi
 
-if [ "$HOST" = "n" ]; then
-  # Need to be root to query /data/dalvik-cache
-  adb root > /dev/null
-  adb wait-for-device
-  ISA=
-  ISA_adb_invocation=
-  ISA_outcome=
-  # We iterate a few times to workaround an adb issue. b/32655576
-  for i in {1..10}; do
-    ISA_adb_invocation=$(adb shell ls -F /data/dalvik-cache)
-    ISA_outcome=$?
-    ISA=$(echo $ISA_adb_invocation | grep -Ewo "${ARCHITECTURES_PATTERN}")
-    if [ x"$ISA" != "x" ]; then
-      break;
-    fi
-  done
-  if [ x"$ISA" = "x" ]; then
-    echo "Unable to determine architecture"
-    # Print a few things for helping diagnosing the problem.
-    echo "adb invocation output: $ISA_adb_invocation"
-    echo "adb invocation outcome: $ISA_outcome"
-    echo $(adb shell ls -F /data/dalvik-cache)
-    echo $(adb shell ls /data/dalvik-cache)
-    echo ${ARCHITECTURES_PATTERN}
-    echo $(adb shell ls -F /data/dalvik-cache | grep -Ewo "${ARCHITECTURES_PATTERN}")
-    exit 1
-  fi
-fi
-
 if [ "$BIONIC" = "y" ]; then
   # This is the location that soong drops linux_bionic builds. Despite being
   # called linux_bionic-x86 the build is actually amd64 (x86_64) only.
@@ -750,8 +813,8 @@
     echo "linux_bionic-x86 target doesn't seem to have been built!" >&2
     exit 1
   fi
-  # Set timeout_dumper manually so it works even with apex's
-  TIMEOUT_DUMPER=$OUT_DIR/soong/host/linux_bionic-x86/bin/timeout_dumper
+  # Set TIMEOUT_DUMPER manually so it works even with apex's
+  TIMEOUT_DUMPER=$OUT_DIR/soong/host/linux_bionic-x86/bin/signal_dumper
 fi
 
 # Prevent test from silently falling back to interpreter in no-prebuild mode. This happens
@@ -774,7 +837,20 @@
     exit 1
 fi
 
-BIN_DIR=$ANDROID_ROOT/bin
+if [ "$HOST" = "y" ]; then
+  # On host, run binaries (`dex2oat(d)`, `dalvikvm`, `profman`) from the `bin`
+  # directory under the "Android Root" (usually `out/host/linux-x86`).
+  #
+  # TODO(b/130295968): Adjust this if/when ART host artifacts are installed
+  # under the ART root (usually `out/host/linux-x86/com.android.art`).
+  ANDROID_ART_BIN_DIR=$ANDROID_ROOT/bin
+else
+  # On target, run binaries (`dex2oat(d)`, `dalvikvm`, `profman`) from the ART
+  # APEX's `bin` directory. This means the linker will observe the ART APEX
+  # linker configuration file (`/apex/com.android.art/etc/ld.config.txt`) for
+  # these binaries.
+  ANDROID_ART_BIN_DIR=$ANDROID_ART_ROOT/bin
+fi
 
 profman_cmdline="true"
 dex2oat_cmdline="true"
@@ -811,10 +887,10 @@
   fi
   setupapex_cmdline="unzip -o -u ${zip_options} ${ZIPAPEX_LOC} apex_payload.zip -d ${DEX_LOCATION}"
   installapex_cmdline="unzip -o -u ${zip_options} ${DEX_LOCATION}/apex_payload.zip -d ${DEX_LOCATION}/zipapex"
-  BIN_DIR=$DEX_LOCATION/zipapex/bin
+  ANDROID_ART_BIN_DIR=$DEX_LOCATION/zipapex/bin
 elif [ "$USE_EXTRACTED_ZIPAPEX" = "y" ]; then
   # Just symlink the zipapex binaries
-  BIN_DIR=$DEX_LOCATION/zipapex/bin
+  ANDROID_ART_BIN_DIR=$DEX_LOCATION/zipapex/bin
   # Force since some tests manually run this file twice.
   ln_options=""
   if [ "$DEV_MODE" = "y" ]; then
@@ -826,7 +902,7 @@
 # PROFILE takes precedence over RANDOM_PROFILE, since PROFILE tests require a
 # specific profile to run properly.
 if [ "$PROFILE" = "y" ] || [ "$RANDOM_PROFILE" = "y" ]; then
-  profman_cmdline="$BIN_DIR/profman  \
+  profman_cmdline="$ANDROID_ART_BIN_DIR/profman  \
     --apk=$DEX_LOCATION/$TEST_NAME.jar \
     --dex-location=$DEX_LOCATION/$TEST_NAME.jar"
   if [ -f $DEX_LOCATION/$TEST_NAME-ex.jar ]; then
@@ -845,23 +921,27 @@
   fi
 fi
 
+# Enable mini-debug-info for JIT (if JIT is used).
+FLAGS="$FLAGS -Xcompiler-option --generate-mini-debug-info"
+
 if [ "$PREBUILD" = "y" ]; then
   mkdir_locations="${mkdir_locations} ${DEX_LOCATION}/oat/$ISA"
   if [ "$APP_IMAGE" = "y" ]; then
     # Pick a base that will force the app image to get relocated.
-    app_image="--base=0x4000 --app-image-file=$DEX_LOCATION/oat/$ISA/$TEST_NAME.art --resolve-startup-const-strings=true"
+    app_image="--app-image-file=$DEX_LOCATION/oat/$ISA/$TEST_NAME.art --resolve-startup-const-strings=true"
   fi
 
   dex2oat_binary=${DEX2OAT_DEBUG_BINARY}
   if  [[ "$TEST_IS_NDEBUG" = "y" ]]; then
     dex2oat_binary=${DEX2OAT_NDEBUG_BINARY}
   fi
-  dex2oat_cmdline="$INVOKE_WITH $BIN_DIR/$dex2oat_binary \
+  dex2oat_cmdline="$INVOKE_WITH $ANDROID_ART_BIN_DIR/$dex2oat_binary \
                       $COMPILE_FLAGS \
                       --boot-image=${BOOT_IMAGE} \
                       --dex-file=$DEX_LOCATION/$TEST_NAME.jar \
                       --oat-file=$DEX_LOCATION/oat/$ISA/$TEST_NAME.odex \
                       ${app_image} \
+                      --generate-mini-debug-info \
                       --instruction-set=$ISA"
   if [ "x$INSTRUCTION_SET_FEATURES" != "x" ] ; then
     dex2oat_cmdline="${dex2oat_cmdline} --instruction-set-features=${INSTRUCTION_SET_FEATURES}"
@@ -910,10 +990,17 @@
   TMP_DIR_OPTION="-Djava.io.tmpdir=/data/local/tmp"
 fi
 
+# The build servers have an ancient version of bash so we cannot use @Q.
+if [ "$USE_GDBSERVER" == "y" ]; then
+  printf -v QUOTED_DALVIKVM_BOOT_OPT "%q" "$DALVIKVM_BOOT_OPT"
+else
+  QUOTED_DALVIKVM_BOOT_OPT="$DALVIKVM_BOOT_OPT"
+fi
+
 # We set DumpNativeStackOnSigQuit to false to avoid stressing libunwind.
 # b/27185632
 # b/24664297
-dalvikvm_cmdline="$INVOKE_WITH $GDB $BIN_DIR/$DALVIKVM \
+dalvikvm_cmdline="$INVOKE_WITH $GDB $ANDROID_ART_BIN_DIR/$DALVIKVM \
                   $GDB_ARGS \
                   $FLAGS \
                   $DEX_VERIFY \
@@ -924,7 +1011,7 @@
                   $JNI_OPTS \
                   $INT_OPTS \
                   $DEBUGGER_OPTS \
-                  $DALVIKVM_BOOT_OPT \
+                  ${QUOTED_DALVIKVM_BOOT_OPT} \
                   $TMP_DIR_OPTION \
                   -XX:DumpNativeStackOnSigQuit:false \
                   -cp $DEX_LOCATION/$TEST_NAME.jar$SECONDARY_DEX $MAIN $ARGS"
@@ -990,13 +1077,23 @@
       fi
     fi
 
-    LD_LIBRARY_PATH=/data/$TEST_DIRECTORY/art/$ISA
+    # Populate LD_LIBRARY_PATH.
+    LD_LIBRARY_PATH=
     if [ "$ANDROID_ROOT" != "/system" ]; then
       # Current default installation is dalvikvm 64bits and dex2oat 32bits,
       # so we can only use LD_LIBRARY_PATH when testing on a local
       # installation.
-      LD_LIBRARY_PATH="$ANDROID_ROOT/$LIBRARY_DIRECTORY:$LD_LIBRARY_PATH"
+      LD_LIBRARY_PATH="$ANDROID_ROOT/$LIBRARY_DIRECTORY"
     fi
+    # Needed to access libarttest(d).so and JVMTI agent libraries.
+    LD_LIBRARY_PATH="/data/$TEST_DIRECTORY/art/$ISA:$LD_LIBRARY_PATH"
+    # Needed to access the boot (core) image files.
+    LD_LIBRARY_PATH="/data/art-test/$ISA:$LD_LIBRARY_PATH"
+    # Needed to access the test's Odex files.
+    LD_LIBRARY_PATH="$DEX_LOCATION/oat/$ISA:$LD_LIBRARY_PATH"
+    # Needed to access the test's native libraries (see e.g. 674-hiddenapi,
+    # which generates `libhiddenapitest_*.so` libraries in `$DEX_LOCATION`).
+    LD_LIBRARY_PATH="$DEX_LOCATION:$LD_LIBRARY_PATH"
 
     # System libraries needed by libarttestd.so
     PUBLIC_LIBS=libc++.so:libbacktrace.so:libbase.so:libnativehelper.so
@@ -1006,6 +1103,53 @@
       PUBLIC_LIBS=$PUBLIC_LIBS:libartd.so:libdexfiled.so:libprofiled.so:libartbased.so
     fi
 
+    # Prepend directories to the path on device.
+    PREPEND_TARGET_PATH=$ANDROID_ART_BIN_DIR
+    if [ "$ANDROID_ROOT" != "/system" ]; then
+      PREPEND_TARGET_PATH="$PREPEND_TARGET_PATH:$ANDROID_ROOT/bin"
+    fi
+
+    timeout_dumper_cmd=
+
+    # Check whether signal_dumper is available.
+    if [ "$TIMEOUT_DUMPER" = signal_dumper ] ; then
+      # Chroot? Use as prefix for tests.
+      TIMEOUT_DUMPER_PATH_PREFIX=
+      if [ -n "$CHROOT" ]; then
+        TIMEOUT_DUMPER_PATH_PREFIX="$CHROOT/"
+      fi
+
+      # Testing APEX?
+      if adb shell "test -x ${TIMEOUT_DUMPER_PATH_PREFIX}/apex/com.android.art/bin/signal_dumper" ; then
+        TIMEOUT_DUMPER="/apex/com.android.art/bin/signal_dumper"
+      # Is it in /system/bin?
+      elif adb shell "test -x ${TIMEOUT_DUMPER_PATH_PREFIX}/system/bin/signal_dumper" ; then
+        TIMEOUT_DUMPER="/system/bin/signal_dumper"
+      else
+        TIMEOUT_DUMPER=
+      fi
+    else
+      TIMEOUT_DUMPER=
+    fi
+
+    if [ ! -z "$TIMEOUT_DUMPER" ] ; then
+      # Use "-l" to dump to logcat. That is convenience for the build bot crash symbolization.
+      # Use exit code 124 for toybox timeout (b/141007616).
+      timeout_dumper_cmd="${TIMEOUT_DUMPER} -l -s 15 -e 124"
+    fi
+
+    timeout_prefix=
+    if [ "$TIME_OUT" = "timeout" ]; then
+      # Add timeout command if time out is desired.
+      #
+      # Note: We first send SIGTERM (the timeout default, signal 15) to the signal dumper, which
+      #       will induce a full thread dump before killing the process. To ensure any issues in
+      #       dumping do not lead to a deadlock, we also use the "-k" option to definitely kill the
+      #       child.
+      # Note: Using "--foreground" to not propagate the signal to children, i.e., the runtime.
+      timeout_prefix="timeout --foreground -k 120s ${TIME_OUT_VALUE}s ${timeout_dumper_cmd} $cmdline"
+    fi
+
     # Create a script with the command. The command can get longer than the longest
     # allowed adb command and there is no way to get the exit status from a adb shell
     # command. Dalvik cache is cleaned before running to make subsequent executions
@@ -1016,26 +1160,30 @@
              export ANDROID_ADDITIONAL_PUBLIC_LIBRARIES=$PUBLIC_LIBS && \
              export DEX_LOCATION=$DEX_LOCATION && \
              export ANDROID_ROOT=$ANDROID_ROOT && \
-             export ANDROID_RUNTIME_ROOT=$ANDROID_RUNTIME_ROOT && \
+             export ANDROID_I18N_ROOT=$ANDROID_I18N_ROOT && \
+             export ANDROID_ART_ROOT=$ANDROID_ART_ROOT && \
              export ANDROID_TZDATA_ROOT=$ANDROID_TZDATA_ROOT && \
              export ANDROID_LOG_TAGS=$ANDROID_LOG_TAGS && \
              rm -rf ${DEX_LOCATION}/dalvik-cache/ && \
              mkdir -p ${mkdir_locations} && \
              export LD_LIBRARY_PATH=$LD_LIBRARY_PATH && \
-             export PATH=$BIN_DIR:$PATH && \
+             export PATH=$PREPEND_TARGET_PATH:\$PATH && \
              $profman_cmdline && \
              $dex2oat_cmdline && \
              $dm_cmdline && \
              $vdex_cmdline && \
              $strip_cmdline && \
              $sync_cmdline && \
-             $dalvikvm_cmdline"
+             $timeout_prefix $dalvikvm_cmdline"
 
     cmdfile=$(tempfile -p "cmd-" -s "-$TEST_NAME")
     echo "$cmdline" > $cmdfile
 
     if [ "$DEV_MODE" = "y" ]; then
       echo $cmdline
+      if [ "$USE_GDB" = "y" ] || [ "$USE_GDBSERVER" = "y" ]; then
+        echo "Forward ${GDBSERVER_PORT} to local port and connect GDB"
+      fi
     fi
 
     if [ "$QUIET" = "n" ]; then
@@ -1062,7 +1210,8 @@
 
     export ANDROID_DATA="$DEX_LOCATION"
     export ANDROID_ROOT="${ANDROID_ROOT}"
-    export ANDROID_RUNTIME_ROOT="${ANDROID_RUNTIME_ROOT}"
+    export ANDROID_I18N_ROOT="${ANDROID_I18N_ROOT}"
+    export ANDROID_ART_ROOT="${ANDROID_ART_ROOT}"
     export ANDROID_TZDATA_ROOT="${ANDROID_TZDATA_ROOT}"
     if [ "$USE_ZIPAPEX" = "y" ] || [ "$USE_EXRACTED_ZIPAPEX" = "y" ]; then
       # Put the zipapex files in front of the ld-library-path
@@ -1072,7 +1221,7 @@
       export LD_LIBRARY_PATH="${ANDROID_ROOT}/${LIBRARY_DIRECTORY}:${ANDROID_ROOT}/${TEST_DIRECTORY}"
       export DYLD_LIBRARY_PATH="${ANDROID_ROOT}/${LIBRARY_DIRECTORY}:${ANDROID_ROOT}/${TEST_DIRECTORY}"
     fi
-    export PATH="$PATH:$BIN_DIR"
+    export PATH="$PATH:$ANDROID_ART_BIN_DIR"
 
     # Temporarily disable address space layout randomization (ASLR).
     # This is needed on the host so that the linker loads core.oat at the necessary address.
@@ -1100,15 +1249,16 @@
     if [ "$TIME_OUT" = "timeout" ]; then
       # Add timeout command if time out is desired.
       #
-      # Note: We first send SIGRTMIN+2 (usually 36) to ART, which will induce a full thread dump
-      #       before abort. However, dumping threads might deadlock, so we also use the "-k"
-      #       option to definitely kill the child.
+      # Note: We first send SIGTERM (the timeout default, signal 15) to the signal dumper, which
+      #       will induce a full thread dump before killing the process. To ensure any issues in
+      #       dumping do not lead to a deadlock, we also use the "-k" option to definitely kill the
+      #       child.
       # Note: Using "--foreground" to not propagate the signal to children, i.e., the runtime.
-      cmdline="timeout --foreground -k 120s -s SIGRTMIN+2 ${TIME_OUT_VALUE}s ${TIMEOUT_DUMPER} $cmdline"
+      cmdline="timeout --foreground -k 120s ${TIME_OUT_VALUE}s ${TIMEOUT_DUMPER} -s 15 $cmdline"
     fi
 
     if [ "$DEV_MODE" = "y" ]; then
-      for var in ANDROID_PRINTF_LOG ANDROID_DATA ANDROID_ROOT LD_LIBRARY_PATH DYLD_LIBRARY_PATH PATH LD_USE_LOAD_BIAS; do
+      for var in ANDROID_PRINTF_LOG ANDROID_DATA ANDROID_ROOT ANDROID_I18N_ROOT ANDROID_TZDATA_ROOT ANDROID_ART_ROOT LD_LIBRARY_PATH DYLD_LIBRARY_PATH PATH LD_USE_LOAD_BIAS; do
         echo EXPORT $var=${!var}
       done
       echo "$(declare -f linkdirs)"
@@ -1137,6 +1287,32 @@
     $strip_cmdline || { echo "Strip failed." >&2 ; exit 3; }
     $sync_cmdline || { echo "Sync failed." >&2 ; exit 4; }
 
+    if [ "$CREATE_RUNNER" = "y" ]; then
+      echo "#!/bin/bash" > ${DEX_LOCATION}/runit.sh
+      for var in ANDROID_PRINTF_LOG ANDROID_DATA ANDROID_ROOT ANDROID_I18N_ROOT ANDROID_TZDATA_ROOT ANDROID_ART_ROOT LD_LIBRARY_PATH DYLD_LIBRARY_PATH PATH LD_USE_LOAD_BIAS; do
+        echo export $var="${!var}" >> ${DEX_LOCATION}/runit.sh
+      done
+      if [ "$DEV_MODE" = "y" ]; then
+        echo $cmdline "2>&1" >> ${DEX_LOCATION}/runit.sh
+      else
+        echo 'STDERR=$(mktemp)' >> ${DEX_LOCATION}/runit.sh
+        echo 'STDOUT=$(mktemp)' >> ${DEX_LOCATION}/runit.sh
+        echo $cmdline '2>${STDERR} >${STDOUT}' >> ${DEX_LOCATION}/runit.sh
+        echo 'if diff ${STDOUT} $ANDROID_DATA/expected.txt; then' >> ${DEX_LOCATION}/runit.sh
+        echo '  rm -f ${STDOUT} ${STDERR}' >> ${DEX_LOCATION}/runit.sh
+        echo '  exit 0' >> ${DEX_LOCATION}/runit.sh
+        echo 'else' >> ${DEX_LOCATION}/runit.sh
+        echo '  echo  STDOUT:' >> ${DEX_LOCATION}/runit.sh
+        echo '  cat ${STDOUT}' >> ${DEX_LOCATION}/runit.sh
+        echo '  echo  STDERR:' >> ${DEX_LOCATION}/runit.sh
+        echo '  cat ${STDERR}' >> ${DEX_LOCATION}/runit.sh
+        echo '  rm -f ${STDOUT} ${STDERR}' >> ${DEX_LOCATION}/runit.sh
+        echo '  exit 1' >> ${DEX_LOCATION}/runit.sh
+        echo 'fi' >> ${DEX_LOCATION}/runit.sh
+      fi
+      chmod u+x $DEX_LOCATION/runit.sh
+      echo "Runnable test script written to ${DEX_LOCATION}/runit.sh"
+    fi
     if [ "$DRY_RUN" = "y" ]; then
       exit 0
     fi
diff --git a/test/1923-frame-pop/src/art/Breakpoint.java b/test/jvmti-common/Breakpoint.java
similarity index 100%
rename from test/1923-frame-pop/src/art/Breakpoint.java
rename to test/jvmti-common/Breakpoint.java
diff --git a/test/1927-exception-event/src/art/Exceptions.java b/test/jvmti-common/Exceptions.java
similarity index 100%
rename from test/1927-exception-event/src/art/Exceptions.java
rename to test/jvmti-common/Exceptions.java
diff --git a/test/1923-frame-pop/src/art/FramePop.java b/test/jvmti-common/FramePop.java
similarity index 100%
rename from test/1923-frame-pop/src/art/FramePop.java
rename to test/jvmti-common/FramePop.java
diff --git a/test/1923-frame-pop/src/art/Locals.java b/test/jvmti-common/Locals.java
similarity index 100%
rename from test/1923-frame-pop/src/art/Locals.java
rename to test/jvmti-common/Locals.java
diff --git a/test/913-heaps/src/art/Main.java b/test/jvmti-common/Main.java
similarity index 100%
rename from test/913-heaps/src/art/Main.java
rename to test/jvmti-common/Main.java
diff --git a/test/1930-monitor-info/src/art/Monitors.java b/test/jvmti-common/Monitors.java
similarity index 100%
rename from test/1930-monitor-info/src/art/Monitors.java
rename to test/jvmti-common/Monitors.java
diff --git a/test/jvmti-common/NonStandardExit.java b/test/jvmti-common/NonStandardExit.java
new file mode 100644
index 0000000..37f699e
--- /dev/null
+++ b/test/jvmti-common/NonStandardExit.java
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+public class NonStandardExit {
+  public static native void popFrame(Thread thr);
+  public static native void forceEarlyReturnVoid(Thread thr);
+  public static native void forceEarlyReturnFloat(Thread thr, float f);
+  public static native void forceEarlyReturnDouble(Thread thr, double f);
+  public static native void forceEarlyReturnInt(Thread thr, int f);
+  public static native void forceEarlyReturnLong(Thread thr, long f);
+  public static native void forceEarlyReturnObject(Thread thr, Object f);
+
+  public static void forceEarlyReturn(Thread thr, Object o) {
+    if (o instanceof Number && o.getClass().getPackage().equals(Object.class.getPackage())) {
+      Number n = (Number)o;
+      if (n instanceof Integer || n instanceof Short || n instanceof Byte) {
+        forceEarlyReturnInt(thr, n.intValue());
+      } else if (n instanceof Long) {
+        forceEarlyReturnLong(thr, n.longValue());
+      } else if (n instanceof Float) {
+        forceEarlyReturnFloat(thr, n.floatValue());
+      } else if (n instanceof Double) {
+        forceEarlyReturnDouble(thr, n.doubleValue());
+      } else {
+        throw new IllegalArgumentException("Unknown number subtype: " + n.getClass() + " - " + n);
+      }
+    } else if (o instanceof Character) {
+      forceEarlyReturnInt(thr, ((Character)o).charValue());
+    } else if (o instanceof Boolean) {
+      forceEarlyReturnInt(thr, ((Boolean)o).booleanValue() ? 1 : 0);
+    } else {
+      forceEarlyReturnObject(thr, o);
+    }
+  }
+}
diff --git a/test/jvmti-common/Redefinition.java b/test/jvmti-common/Redefinition.java
new file mode 100644
index 0000000..3402fa1
--- /dev/null
+++ b/test/jvmti-common/Redefinition.java
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.util.ArrayList;
+// Common Redefinition functions. Placed here for use by CTS
+public class Redefinition {
+  public static class CommonClassDefinition {
+    public final Class<?> target;
+    public final byte[] class_file_bytes;
+    public final byte[] dex_file_bytes;
+
+    public CommonClassDefinition(Class<?> target, byte[] class_file_bytes, byte[] dex_file_bytes) {
+      this.target = target;
+      this.class_file_bytes = class_file_bytes;
+      this.dex_file_bytes = dex_file_bytes;
+    }
+  }
+
+  public static class DexOnlyClassDefinition extends CommonClassDefinition {
+    public DexOnlyClassDefinition(Class<?> target, byte[] dex_file_bytes) {
+      super(target, new byte[0], dex_file_bytes);
+    }
+  }
+
+  // A set of possible test configurations. Test should set this if they need to.
+  // This must be kept in sync with the defines in ti-agent/common_helper.cc
+  public static enum Config {
+    COMMON_REDEFINE(0),
+    COMMON_RETRANSFORM(1),
+    COMMON_TRANSFORM(2),
+    STRUCTURAL_TRANSFORM(3);
+
+    private final int val;
+    private Config(int val) {
+      this.val = val;
+    }
+  }
+
+  public static void setTestConfiguration(Config type) {
+    nativeSetTestConfiguration(type.val);
+  }
+
+  private static native void nativeSetTestConfiguration(int type);
+
+  // Transforms the class
+  public static native void doCommonClassRedefinition(Class<?> target,
+                                                      byte[] classfile,
+                                                      byte[] dexfile);
+
+  public static void doMultiClassRedefinition(CommonClassDefinition... defs) {
+    ArrayList<Class<?>> classes = new ArrayList<>();
+    ArrayList<byte[]> class_files = new ArrayList<>();
+    ArrayList<byte[]> dex_files = new ArrayList<>();
+
+    for (CommonClassDefinition d : defs) {
+      classes.add(d.target);
+      class_files.add(d.class_file_bytes);
+      dex_files.add(d.dex_file_bytes);
+    }
+    doCommonMultiClassRedefinition(classes.toArray(new Class<?>[0]),
+                                   class_files.toArray(new byte[0][]),
+                                   dex_files.toArray(new byte[0][]));
+  }
+
+  public static void addMultiTransformationResults(CommonClassDefinition... defs) {
+    for (CommonClassDefinition d : defs) {
+      addCommonTransformationResult(d.target.getCanonicalName(),
+                                    d.class_file_bytes,
+                                    d.dex_file_bytes);
+    }
+  }
+
+  public static native void doCommonMultiClassRedefinition(Class<?>[] targets,
+                                                           byte[][] classfiles,
+                                                           byte[][] dexfiles);
+  public static native void doCommonClassRetransformation(Class<?>... target);
+  public static native void setPopRetransformations(boolean pop);
+  public static native void popTransformationFor(String name);
+  public static native void enableCommonRetransformation(boolean enable);
+  public static native void addCommonTransformationResult(String target_name,
+                                                          byte[] class_bytes,
+                                                          byte[] dex_bytes);
+
+  public static native void doCommonStructuralClassRedefinition(Class<?> target, byte[] dex_file);
+  public static void doMultiStructuralClassRedefinition(CommonClassDefinition... defs) {
+    ArrayList<Class<?>> classes = new ArrayList<>();
+    ArrayList<byte[]> dex_files = new ArrayList<>();
+
+    for (CommonClassDefinition d : defs) {
+      classes.add(d.target);
+      dex_files.add(d.dex_file_bytes);
+    }
+    doCommonMultiStructuralClassRedefinition(classes.toArray(new Class<?>[0]),
+                                             dex_files.toArray(new byte[0][]));
+  }
+  public static native void doCommonMultiStructuralClassRedefinition(Class<?>[] targets,
+                                                                     byte[][] dexfiles);
+  public static native boolean isStructurallyModifiable(Class<?> target);
+}
diff --git a/test/1923-frame-pop/src/art/StackTrace.java b/test/jvmti-common/StackTrace.java
similarity index 100%
rename from test/1923-frame-pop/src/art/StackTrace.java
rename to test/jvmti-common/StackTrace.java
diff --git a/test/jvmti-common/SuspendEvents.java b/test/jvmti-common/SuspendEvents.java
new file mode 100644
index 0000000..6bf6bda
--- /dev/null
+++ b/test/jvmti-common/SuspendEvents.java
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package art;
+
+import java.lang.reflect.Executable;
+import java.lang.reflect.Field;
+
+/**
+ * A set of functions to request events that suspend the thread they trigger on.
+ */
+public final class SuspendEvents {
+  /**
+   * Sets up the suspension support. Must be called at the start of the test.
+   */
+  public static native void setupTest();
+
+  public static native void setupSuspendBreakpointFor(Executable meth, long loc, Thread thr);
+  public static native void clearSuspendBreakpointFor(Thread thr);
+
+  public static native void setupSuspendSingleStepAt(Executable meth, long loc, Thread thr);
+  public static native void clearSuspendSingleStepFor(Thread thr);
+
+  public static native void setupFieldSuspendFor(Class klass, Field f, boolean access, Thread thr);
+  public static native void clearFieldSuspendFor(Thread thr);
+
+  public static native void setupSuspendMethodEvent(Executable meth, boolean enter, Thread thr);
+  public static native void clearSuspendMethodEvent(Thread thr);
+
+  public static native void setupSuspendExceptionEvent(
+      Executable meth, boolean is_catch, Thread thr);
+  public static native void clearSuspendExceptionEvent(Thread thr);
+
+  public static native void setupSuspendPopFrameEvent(
+      int offset, Executable breakpointFunction, Thread thr);
+  public static native void clearSuspendPopFrameEvent(Thread thr);
+
+  public static final int EVENT_TYPE_CLASS_LOAD = 55;
+  public static final int EVENT_TYPE_CLASS_PREPARE = 56;
+  public static native void setupSuspendClassEvent(
+      int eventType, String[] interestingNames, Thread thr);
+  public static native void clearSuspendClassEvent(Thread thr);
+
+  public static native void setupWaitForNativeCall(Thread thr);
+  public static native void clearWaitForNativeCall(Thread thr);
+
+  /**
+   * Waits for the given thread to be suspended.
+   * @param thr the thread to wait for.
+   */
+  public static native void waitForSuspendHit(Thread thr);
+}
diff --git a/test/1902-suspend/src/art/Suspension.java b/test/jvmti-common/Suspension.java
similarity index 100%
rename from test/1902-suspend/src/art/Suspension.java
rename to test/jvmti-common/Suspension.java
diff --git a/test/1934-jvmti-signal-thread/src/art/Threads.java b/test/jvmti-common/Threads.java
similarity index 100%
rename from test/1934-jvmti-signal-thread/src/art/Threads.java
rename to test/jvmti-common/Threads.java
diff --git a/test/1923-frame-pop/src/art/Trace.java b/test/jvmti-common/Trace.java
similarity index 100%
rename from test/1923-frame-pop/src/art/Trace.java
rename to test/jvmti-common/Trace.java
diff --git a/test/knownfailures.json b/test/knownfailures.json
index 9e57764..398f123 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -19,13 +19,6 @@
     },
     {
         "tests": "080-oom-fragmentation",
-        "description": ["Disable 080-oom-fragmentation for GSS GC due to lack of",
-                        "support for allocations larger than 32MB."],
-        "env_vars": {"ART_DEFAULT_GC_TYPE": "GSS"},
-        "bug": "http://b/33795328"
-    },
-    {
-        "tests": "080-oom-fragmentation",
         "description": ["Disable 080-oom-fragmentation for CC collector in debug mode",
                         "because of potential fragmentation caused by the region space's",
                         "cyclic region allocation (which is enabled in debug mode)."],
@@ -449,10 +442,16 @@
             ".*method-handle.*",
             ".*varhandle.*",
             ".*var-handle.*",
-            "716-jli-jit-samples"
+            "716-jli-jit-samples",
+            "1975-hello-structural-transformation",
+            "1976-hello-structural-static-methods",
+            "1985-structural-redefine-stack-scope",
+            "1986-structural-redefine-multi-thread-stack-scope",
+            "1987-structural-redefine-recursive-stack-scope",
+            "2000-virtual-list-structural"
         ],
         "description": [
-            "Tests for bytecodes introduced after DEX version 037 that are unsupported by",
+            "Tests for/using bytecodes introduced after DEX version 037 that are unsupported by",
             "dexter/slicer."
         ],
         "bug": "b/37272822",
@@ -509,7 +508,8 @@
             "909-attach-agent",
             "924-threads",
             "981-dedup-original-dex",
-            "1900-track-alloc"
+            "1900-track-alloc",
+            "2230-profile-save-hotness"
         ],
         "description": ["Tests that require exact knowledge of the deoptimization state, the ",
                         "number of plugins and agents, or breaks other openjdkjvmti assumptions."],
@@ -637,6 +637,12 @@
         "env_vars": {"SANITIZE_HOST": "address"}
     },
     {
+        "tests": "175-alloc-big-bignums",
+        "description": "ASAN runs out of memory due to huge allocations.",
+        "variant": "target",
+        "env_vars": {"SANITIZE_TARGET": "hwaddress"}
+    },
+    {
         "tests": "202-thread-oome",
         "description": "ASAN aborts when large thread stacks are requested.",
         "variant": "host",
@@ -703,6 +709,17 @@
         "env_vars": {"SANITIZE_TARGET": "address"}
     },
     {
+        "tests": [
+            "074-gc-thrash"
+        ],
+        "description": [
+            "Interpreter with access checks stack frames are too large and result in",
+            "StackOverFlow errors being thrown."
+        ],
+        "variant": "interp-ac & target",
+        "env_vars": {"SANITIZE_TARGET": "hwaddress"}
+    },
+    {
         "tests": "071-dexfile-map-clean",
         "description": [ "We use prebuilt zipalign on master-art-host to avoid pulling in a lot",
                          "of the framework. But a non-sanitized zipalign binary does not work with",
@@ -745,9 +762,11 @@
     },
     {
         "tests": "660-clinit",
-        "variant": "no-image | no-prebuild | jvmti-stress | redefine-stress",
+        "variant": "no-image | no-prebuild | jvmti-stress | redefine-stress | interp-ac | debuggable",
         "description": ["Tests <clinit> for app images, which --no-image, --no-prebuild, ",
-                        "and --redefine-stress do not create"]
+                        "and --redefine-stress do not create. Also avoid for ",
+                        "verify-soft-fail (interp-ac) and debuggable since they prevent ",
+                        "initialization."]
     },
     {
         "tests": ["961-default-iface-resolution-gen",
@@ -775,7 +794,7 @@
     },
     {
         "tests": ["004-StackWalk"],
-        "variant": "interp-ac | interpreter | jit | no-prebuild | no-image | trace | redefine-stress | jvmti-stress | debuggable",
+        "variant": "speed-profile | interp-ac | interpreter | jit | no-prebuild | no-image | trace | redefine-stress | jvmti-stress | debuggable",
         "description": ["Test is designed to only check --optimizing"]
     },
     {
@@ -784,6 +803,18 @@
         "description": ["Requires zip, which isn't available on device"]
     },
     {
+        "tests": [
+          "1965-get-set-local-primitive-no-tables",
+          "1966-get-set-local-objects-no-table"
+        ],
+        "variant": "jvm",
+        "bug": "133241695",
+        "description": [
+          "The RI is wildly inconsistent about how it handles Get/SetLocalVariable when classes ",
+          "lack debug info."
+        ]
+    },
+    {
         "tests": ["683-clinit-inline-static-invoke"],
         "variant": "jvm",
         "description": ["Uses android-specific boot image class."]
@@ -867,6 +898,7 @@
           "167-visit-locks",
           "168-vmstack-annotated",
           "172-app-image-twice",
+          "177-visibly-initialized-deadlock",
           "201-built-in-except-detail-messages",
           "203-multi-checkpoint",
           "304-method-tracing",
@@ -942,6 +974,7 @@
           "574-irreducible-and-constant-area",
           "575-checker-string-init-alias",
           "580-checker-string-fact-intrinsics",
+          "580-fp16",
           "585-inline-unresolved",
           "586-checker-null-array-get",
           "587-inline-class-error",
@@ -1017,6 +1050,7 @@
           "685-shifts",
           "686-get-this",
           "687-deopt",
+          "699-checker-string-append2",
           "706-checker-scheduler",
           "707-checker-invalid-profile",
           "714-invoke-custom-lambda-metafactory",
@@ -1084,11 +1118,54 @@
           "1940-ddms-ext",
           "1945-proxy-method-arguments",
           "1946-list-descriptors",
-          "1947-breakpoint-redefine-deopt"
+          "1947-breakpoint-redefine-deopt",
+          "2230-profile-save-hotness"
         ],
         "variant": "jvm",
         "bug": "b/73888836",
-        "description": ["Failing on RI. Needs further investigating."]
+        "description": ["Failing on RI. Needs further investigating. Some of these use smali."]
+    },
+    {
+      "tests": [
+                  "1974-resize-array",
+                  "1975-hello-structural-transformation",
+                  "1976-hello-structural-static-methods",
+                  "1977-hello-structural-obsolescence",
+                  "1978-regular-obsolete-then-structural-obsolescence",
+                  "1979-threaded-structural-transformation",
+                  "1980-obsolete-object-cleared",
+                  "1981-structural-redef-private-method-handles",
+                  "1982-no-virtuals-structural-redefinition",
+                  "1983-structural-redefinition-failures",
+                  "1984-structural-redefine-field-trace",
+                  "1985-structural-redefine-stack-scope",
+                  "1986-structural-redefine-multi-thread-stack-scope",
+                  "1987-structural-redefine-recursive-stack-scope",
+                  "1988-multi-structural-redefine",
+                  "1989-transform-bad-monitor",
+                  "1990-structural-bad-verify",
+                  "1991-hello-structural-retransform",
+                  "1992-retransform-no-such-field",
+                  "1993-fallback-non-structural",
+                  "1994-final-virtual-structural",
+                  "1995-final-virtual-structural-multithread",
+                  "1996-final-override-virtual-structural",
+                  "1997-structural-shadow-method",
+                  "1998-structural-shadow-field",
+                  "1999-virtual-structural",
+                  "2000-virtual-list-structural",
+                  "2001-virtual-structural-multithread",
+                  "2002-virtual-structural-initializing",
+                  "2003-double-virtual-structural",
+                  "2004-double-virtual-structural-abstract",
+                  "2005-pause-all-redefine-multithreaded",
+                  "2006-virtual-structural-finalizing",
+                  "2007-virtual-structural-finalizable"
+                ],
+        "env_vars": {"ART_USE_READ_BARRIER": "false"},
+        "description": ["Relies on the accuracy of the Heap::VisitObjects function which is broken",
+                        " when READ_BARRIER==false (I.e. On CMS collector)."],
+        "bug": "b/147207934"
     },
     {
         "tests": ["530-checker-peel-unroll",
@@ -1102,12 +1179,50 @@
                   "691-hiddenapi-proxy",
                   "692-vdex-inmem-loader",
                   "693-vdex-inmem-loader-evict",
+                  "723-string-init-range",
                   "999-redefine-hiddenapi",
                   "1000-non-moving-space-stress",
                   "1001-app-image-regions",
                   "1339-dead-reference-safe",
                   "1951-monitor-enter-no-suspend",
-                  "1957-error-ext"],
+                  "1957-error-ext",
+                  "1972-jni-id-swap-indices",
+                  "1973-jni-id-swap-pointer",
+                  "1974-resize-array",
+                  "1975-hello-structural-transformation",
+                  "1976-hello-structural-static-methods",
+                  "1977-hello-structural-obsolescence",
+                  "1978-regular-obsolete-then-structural-obsolescence",
+                  "1979-threaded-structural-transformation",
+                  "1980-obsolete-object-cleared",
+                  "1981-structural-redef-private-method-handles",
+                  "1982-no-virtuals-structural-redefinition",
+                  "1983-structural-redefinition-failures",
+                  "1984-structural-redefine-field-trace",
+                  "1985-structural-redefine-stack-scope",
+                  "1986-structural-redefine-multi-thread-stack-scope",
+                  "1987-structural-redefine-recursive-stack-scope",
+                  "1988-multi-structural-redefine",
+                  "1989-transform-bad-monitor",
+                  "1990-structural-bad-verify",
+                  "1991-hello-structural-retransform",
+                  "1992-retransform-no-such-field",
+                  "1993-fallback-non-structural",
+                  "1994-final-virtual-structural",
+                  "1995-final-virtual-structural-multithread",
+                  "1996-final-override-virtual-structural",
+                  "1997-structural-shadow-method",
+                  "1998-structural-shadow-field",
+                  "1999-virtual-structural",
+                  "2000-virtual-list-structural",
+                  "2001-virtual-structural-multithread",
+                  "2002-virtual-structural-initializing",
+                  "2003-double-virtual-structural",
+                  "2004-double-virtual-structural-abstract",
+                  "2005-pause-all-redefine-multithreaded",
+                  "2006-virtual-structural-finalizing",
+                  "2007-virtual-structural-finalizable",
+                  "2035-structural-native-method"],
         "variant": "jvm",
         "description": ["Doesn't run on RI."]
     },
@@ -1190,22 +1305,6 @@
         ]
     },
     {
-        "tests": ["135-MirandaDispatch"],
-        "variant": "interp-ac & 32 & host",
-        "env_vars": {"SANITIZE_HOST": "address"},
-        "bug": "b/112993554",
-        "description": ["Timeout with ASan and interp-ac on 32-bit host (x86)."]
-    },
-    {
-        "tests": ["980-redefine-object"],
-        "description": ["This test appears to be unusally flaky on no-image + jit. The cause of ",
-                        "this flakiness is unknown but appears to be related to the ",
-                        "jit-compilation of JNI bridges. Until more investigation is done we skip ",
-                        "this test."],
-        "bug": "http://b/73333076",
-        "variant": "no-image & jit"
-    },
-    {
         "tests": ["454-get-vreg", "457-regs"],
         "variant": "baseline",
         "description": ["Tests are expected to fail with baseline."]
@@ -1216,12 +1315,6 @@
         "description": [ "Fails to eliminate dead reference when debuggable." ]
     },
     {
-        "tests": ["708-jit-cache-churn"],
-        "variant": "jit-on-first-use",
-        "bug": "b/120112467",
-        "description": [ "Fails on Android Build hosts with uncaught std::bad_alloc." ]
-    },
-    {
         "tests": ["719-dm-verify-redefinition"],
         "variant": "jvm | speed-profile | interp-ac | target | no-prebuild",
         "description": ["Doesn't run on RI because of boot class redefintion.",
@@ -1251,10 +1344,34 @@
         "description": ["Test containing Checker assertions expecting Baker read barriers."]
     },
     {
-
-        "tests": ["570-checker-osr-locals"],
-        "variant": "jvm",
-        "bug": "b/154802847",
-        "description": ["Failing on RI. Needs further investigating."]
+        "tests": ["689-zygote-jit-deopt"],
+        "variant": "gcstress",
+        "bug": "b/137887811",
+        "description": ["Occasional timeouts."]
+    },
+    {
+        "tests": ["2031-zygote-compiled-frame-deopt"],
+        "zipapex": true,
+        "bug": "b/144947842",
+        "description": ["This test requires strong knowledge about where the libdir is",
+                        "which the zipapex runner breaks."]
+    },
+    {
+        "tests": ["909-attach-agent", "126-miranda-multidex"],
+        "zipapex": true,
+        "bug": "b/135507613",
+        "description": ["These tests run dalvikvm multiple times, this can mess up the",
+                        "zipapex runner."]
+    },
+    {
+        "tests": ["2029-contended-monitors"],
+        "variant": "interpreter | interp-ac | gcstress | trace",
+        "description": ["Slow test. Prone to timeouts."]
+    },
+    {
+        "tests": ["096-array-copy-concurrent-gc"],
+        "variant": "gcstress & debuggable & debug & host",
+        "bug": "b/149708943",
+        "description": ["Timeouts."]
     }
 ]
diff --git a/test/run-test b/test/run-test
index a2d180e..66039b7 100755
--- a/test/run-test
+++ b/test/run-test
@@ -130,7 +130,7 @@
 strace_output="strace-output.txt"
 lib="libartd.so"
 testlib="arttestd"
-run_args="--quiet"
+run_args=(--quiet)
 build_args=""
 
 quiet="no"
@@ -138,6 +138,7 @@
 prebuild_mode="yes"
 target_mode="yes"
 dev_mode="no"
+create_runner="no"
 update_mode="no"
 debug_mode="no"
 relocate="no"
@@ -160,8 +161,8 @@
 have_image="yes"
 android_root="/system"
 bisection_search="no"
+timeout=""
 suspend_timeout="500000"
-image_suffix=""
 run_optimizing="false"
 
 # To cause tests to fail fast, limit the file sizes created by dx, dex2oat and
@@ -173,11 +174,12 @@
 # particular configurations.
 file_ulimit=128000
 
+
 while true; do
     if [ "x$1" = "x--host" ]; then
         target_mode="no"
         DEX_LOCATION=$tmp_dir
-        run_args="${run_args} --host"
+        run_args+=(--host)
         shift
     elif [ "x$1" = "x--quiet" ]; then
         quiet="yes"
@@ -197,12 +199,12 @@
         runtime="jvm"
         prebuild_mode="no"
         NEED_DEX="false"
-        run_args="${run_args} --jvm"
+        run_args+=(--jvm)
         shift
     elif [ "x$1" = "x-O" ]; then
         lib="libart.so"
         testlib="arttest"
-        run_args="${run_args} -O"
+        run_args+=(-O)
         shift
     elif [ "x$1" = "x--dalvik" ]; then
         lib="libdvm.so"
@@ -218,23 +220,23 @@
         relocate="no"
         shift
     elif [ "x$1" = "x--prebuild" ]; then
-        run_args="${run_args} --prebuild"
+        run_args+=(--prebuild)
         prebuild_mode="yes"
         shift;
     elif [ "x$1" = "x--compact-dex-level" ]; then
         option="$1"
         shift
-        run_args="${run_args} $option $1"
+        run_args+=("$option" "$1")
         shift;
     elif [ "x$1" = "x--strip-dex" ]; then
-        run_args="${run_args} --strip-dex"
+        run_args+=(--strip-dex)
         shift;
     elif [ "x$1" = "x--debuggable" ]; then
-        run_args="${run_args} -Xcompiler-option --debuggable"
+        run_args+=(-Xcompiler-option --debuggable)
         debuggable="yes"
         shift;
     elif [ "x$1" = "x--no-prebuild" ]; then
-        run_args="${run_args} --no-prebuild"
+        run_args+=(--no-prebuild)
         prebuild_mode="no"
         shift;
     elif [ "x$1" = "x--gcverify" ]; then
@@ -264,12 +266,12 @@
     elif [ "x$1" = "x--image" ]; then
         shift
         image="$1"
-        run_args="${run_args} --image $image"
+        run_args+=(--image "$image")
         shift
     elif [ "x$1" = "x-Xcompiler-option" ]; then
         shift
         option="$1"
-        run_args="${run_args} -Xcompiler-option $option"
+        run_args+=(-Xcompiler-option "$option")
         shift
     elif [ "x$1" = "x--build-option" ]; then
         shift
@@ -279,78 +281,76 @@
     elif [ "x$1" = "x--runtime-option" ]; then
         shift
         option="$1"
-        run_args="${run_args} --runtime-option $option"
+        run_args+=(--runtime-option "$option")
         shift
     elif [ "x$1" = "x--gdb-arg" ]; then
         shift
         gdb_arg="$1"
-        run_args="${run_args} --gdb-arg $gdb_arg"
+        run_args+=(--gdb-arg "$gdb_arg")
         shift
     elif [ "x$1" = "x--debug" ]; then
-        run_args="${run_args} --debug"
+        run_args+=(--debug)
         shift
     elif [ "x$1" = "x--debug-wrap-agent" ]; then
-        run_args="${run_args} --debug-wrap-agent"
+        run_args+=(--debug-wrap-agent)
         shift
     elif [ "x$1" = "x--with-agent" ]; then
         shift
         option="$1"
-        run_args="${run_args} --with-agent $1"
+        run_args+=(--with-agent "$1")
         shift
     elif [ "x$1" = "x--debug-agent" ]; then
         shift
         option="$1"
-        run_args="${run_args} --debug-agent $1"
+        run_args+=(--debug-agent "$1")
         shift
     elif [ "x$1" = "x--gdb" ]; then
-        run_args="${run_args} --gdb"
+        run_args+=(--gdb)
         dev_mode="yes"
         shift
     elif [ "x$1" = "x--gdbserver-bin" ]; then
         shift
-        run_args="${run_args} --gdbserver-bin $1"
+        run_args+=(--gdbserver-bin "$1")
         shift
     elif [ "x$1" = "x--gdbserver-port" ]; then
         shift
-        run_args="${run_args} --gdbserver-port $1"
+        run_args+=(--gdbserver-port "$1")
         shift
     elif [ "x$1" = "x--gdbserver" ]; then
-        run_args="${run_args} --gdbserver"
+        run_args+=(--gdbserver)
         dev_mode="yes"
         shift
     elif [ "x$1" = "x--strace" ]; then
         strace="yes"
-        run_args="${run_args} --timeout 1800 --invoke-with strace --invoke-with -o --invoke-with $tmp_dir/$strace_output"
+        run_args+=(--invoke-with strace --invoke-with -o --invoke-with "$tmp_dir/$strace_output")
+        timeout="${timeout:-1800}"
         shift
     elif [ "x$1" = "x--zygote" ]; then
-        run_args="${run_args} --zygote"
+        run_args+=(--zygote)
         shift
     elif [ "x$1" = "x--interpreter" ]; then
-        run_args="${run_args} --interpreter"
-        image_suffix="-interpreter"
+        run_args+=(--interpreter)
         shift
     elif [ "x$1" = "x--jit" ]; then
-        run_args="${run_args} --jit"
-        image_suffix="-interpreter"
+        run_args+=(--jit)
         shift
     elif [ "x$1" = "x--baseline" ]; then
-        run_args="${run_args} --baseline"
+        run_args+=(--baseline)
         shift
     elif [ "x$1" = "x--optimizing" ]; then
         run_optimizing="true"
         shift
     elif [ "x$1" = "x--no-verify" ]; then
-        run_args="${run_args} --no-verify"
+        run_args+=(--no-verify)
         shift
     elif [ "x$1" = "x--verify-soft-fail" ]; then
-        run_args="${run_args} --verify-soft-fail"
-        image_suffix="-interp-ac"
+        run_args+=(--verify-soft-fail)
         shift
     elif [ "x$1" = "x--no-optimize" ]; then
-        run_args="${run_args} --no-optimize"
+        run_args+=(--no-optimize)
         shift
     elif [ "x$1" = "x--no-precise" ]; then
-        run_args="${run_args} --no-precise"
+        run_args+=(--no-precise)
         shift
     elif [ "x$1" = "x--invoke-with" ]; then
         shift
@@ -360,10 +360,16 @@
             usage="yes"
             break
         fi
-        run_args="${run_args} --invoke-with ${what}"
+        run_args+=(--invoke-with "${what}")
+        shift
+    elif [ "x$1" = "x--create-runner" ]; then
+        run_args+=(--create-runner --dry-run)
+        dev_mode="yes"
+        never_clean="yes"
+        create_runner="yes"
         shift
     elif [ "x$1" = "x--dev" ]; then
-        run_args="${run_args} --dev"
+        run_args+=(--dev)
         dev_mode="yes"
         shift
     elif [ "x$1" = "x--build-only" ]; then
@@ -386,7 +392,7 @@
             break
         fi
         chroot="$1"
-        run_args="${run_args} --chroot $1"
+        run_args+=(--chroot "$1")
         shift
     elif [ "x$1" = "x--android-root" ]; then
         shift
@@ -396,16 +402,25 @@
             break
         fi
         android_root="$1"
-        run_args="${run_args} --android-root $1"
+        run_args+=(--android-root "$1")
         shift
-    elif [ "x$1" = "x--android-runtime-root" ]; then
+    elif [ "x$1" = "x--android-art-root" ]; then
         shift
         if [ "x$1" = "x" ]; then
-            echo "$0 missing argument to --android-runtime-root" 1>&2
+            echo "$0 missing argument to --android-art-root" 1>&2
             usage="yes"
             break
         fi
-        run_args="${run_args} --android-runtime-root $1"
+        run_args+=(--android-art-root "$1")
+        shift
+    elif [ "x$1" = "x--android-tzdata-root" ]; then
+        shift
+        if [ "x$1" = "x" ]; then
+            echo "$0 missing argument to --android-tzdata-root" 1>&2
+            usage="yes"
+            break
+        fi
+        run_args+=(--android-tzdata-root "$1")
         shift
     elif [ "x$1" = "x--update" ]; then
         update_mode="yes"
@@ -414,12 +429,12 @@
         usage="yes"
         shift
     elif [ "x$1" = "x--64" ]; then
-        run_args="${run_args} --64"
+        run_args+=(--64)
         suffix64="64"
         shift
     elif [ "x$1" = "x--bionic" ]; then
         # soong linux_bionic builds are 64bit only.
-        run_args="${run_args} --bionic --host --64"
+        run_args+=(--bionic --host --64)
         suffix64="64"
         target_mode="no"
         DEX_LOCATION=$tmp_dir
@@ -430,7 +445,7 @@
         # TODO Should we allow the java.library.path to search the zipapex too?
         # Not needed at the moment and adding it will be complicated so for now
         # we'll ignore this.
-        run_args="${run_args} --host --runtime-extracted-zipapex $1"
+        run_args+=(--host --runtime-extracted-zipapex "$1")
         target_mode="no"
         DEX_LOCATION=$tmp_dir
         shift
@@ -439,13 +454,22 @@
         # TODO Should we allow the java.library.path to search the zipapex too?
         # Not needed at the moment and adding it will be complicated so for now
         # we'll ignore this.
-        run_args="${run_args} --host --runtime-zipapex $1"
+        run_args+=(--host --runtime-zipapex "$1")
         target_mode="no"
         DEX_LOCATION=$tmp_dir
         # apex_payload.zip is quite large we need a high enough ulimit to
         # extract it. 512mb should be good enough.
         file_ulimit=512000
         shift
+    elif [ "x$1" = "x--timeout" ]; then
+        shift
+        if [ "x$1" = "x" ]; then
+            echo "$0 missing argument to --timeout" 1>&2
+            usage="yes"
+            break
+        fi
+        timeout="$1"
+        shift
     elif [ "x$1" = "x--trace" ]; then
         trace="true"
         shift
@@ -459,32 +483,32 @@
         never_clean="yes"
         shift
     elif [ "x$1" = "x--dex2oat-swap" ]; then
-        run_args="${run_args} --dex2oat-swap"
+        run_args+=(--dex2oat-swap)
         shift
     elif [ "x$1" = "x--instruction-set-features" ]; then
         shift
-        run_args="${run_args} --instruction-set-features $1"
+        run_args+=(--instruction-set-features "$1")
         shift
     elif [ "x$1" = "x--bisection-search" ]; then
         bisection_search="yes"
         shift
     elif [ "x$1" = "x--vdex" ]; then
-        run_args="${run_args} --vdex"
+        run_args+=(--vdex)
         shift
     elif [ "x$1" = "x--dm" ]; then
-        run_args="${run_args} --dm"
+        run_args+=(--dm)
         shift
     elif [ "x$1" = "x--vdex-filter" ]; then
         shift
         filter=$1
-        run_args="${run_args} --vdex-filter $filter"
+        run_args+=(--vdex-filter "$filter")
         shift
     elif [ "x$1" = "x--random-profile" ]; then
-        run_args="${run_args} --random-profile"
+        run_args+=(--random-profile)
         shift
     elif [ "x$1" = "x--dex2oat-jobs" ]; then
         shift
-        run_args="${run_args} -Xcompiler-option -j$1"
+        run_args+=(-Xcompiler-option "-j$1")
         shift
     elif expr "x$1" : "x--" >/dev/null 2>&1; then
         echo "unknown $0 option: $1" 1>&2
@@ -533,51 +557,58 @@
 # Cannot use readlink -m, as it does not exist on Mac.
 # Fallback to nuclear option:
 noncanonical_tmp_dir=$tmp_dir
-tmp_dir="`cd $oldwd ; python -c "import os; print os.path.realpath('$tmp_dir')"`"
+tmp_dir="`cd $oldwd ; python -c "import os; import sys; sys.stdout.write(os.path.realpath('$tmp_dir'))"`"
+if [ -z $tmp_dir ] ; then
+  err_echo "Failed to resolve $tmp_dir"
+  exit 1
+fi
 mkdir -p $tmp_dir
 
 # Add thread suspend timeout flag
 if [ ! "$runtime" = "jvm" ]; then
-  run_args="${run_args} --runtime-option -XX:ThreadSuspendTimeout=$suspend_timeout"
+  run_args+=(--runtime-option "-XX:ThreadSuspendTimeout=$suspend_timeout")
 fi
 
 if [ "$basic_verify" = "true" ]; then
   # Set HspaceCompactForOOMMinIntervalMs to zero to run hspace compaction for OOM more frequently in tests.
-  run_args="${run_args} --runtime-option -Xgc:preverify --runtime-option -Xgc:postverify --runtime-option -XX:HspaceCompactForOOMMinIntervalMs=0"
+  run_args+=(--runtime-option -Xgc:preverify --runtime-option -Xgc:postverify --runtime-option -XX:HspaceCompactForOOMMinIntervalMs=0)
 fi
 if [ "$gc_verify" = "true" ]; then
-  run_args="${run_args} --runtime-option -Xgc:preverify_rosalloc --runtime-option -Xgc:postverify_rosalloc"
+  run_args+=(--runtime-option -Xgc:preverify_rosalloc --runtime-option -Xgc:postverify_rosalloc)
 fi
 if [ "$gc_stress" = "true" ]; then
-  run_args="${run_args} --gc-stress --runtime-option -Xgc:gcstress --runtime-option -Xms2m --runtime-option -Xmx16m"
+  run_args+=(--gc-stress --runtime-option -Xgc:gcstress --runtime-option -Xms2m --runtime-option -Xmx16m)
 fi
 if [ "$jvmti_redefine_stress" = "true" ]; then
-    run_args="${run_args} --no-app-image --jvmti-redefine-stress"
+    run_args+=(--no-app-image --jvmti-redefine-stress)
 fi
 if [ "$jvmti_step_stress" = "true" ]; then
-    run_args="${run_args} --no-app-image --jvmti-step-stress"
+    run_args+=(--no-app-image --jvmti-step-stress)
 fi
 if [ "$jvmti_field_stress" = "true" ]; then
-    run_args="${run_args} --no-app-image --jvmti-field-stress"
+    run_args+=(--no-app-image --jvmti-field-stress)
 fi
 if [ "$jvmti_trace_stress" = "true" ]; then
-    run_args="${run_args} --no-app-image --jvmti-trace-stress"
+    run_args+=(--no-app-image --jvmti-trace-stress)
 fi
 if [ "$trace" = "true" ]; then
-    run_args="${run_args} --runtime-option -Xmethod-trace --runtime-option -Xmethod-trace-file-size:2000000"
+    run_args+=(--runtime-option -Xmethod-trace --runtime-option -Xmethod-trace-file-size:2000000)
     if [ "$trace_stream" = "true" ]; then
         # Streaming mode uses the file size as the buffer size. So output gets really large. Drop
         # the ability to analyze the file and just write to /dev/null.
-        run_args="${run_args} --runtime-option -Xmethod-trace-file:/dev/null"
+        run_args+=(--runtime-option -Xmethod-trace-file:/dev/null)
         # Enable streaming mode.
-        run_args="${run_args} --runtime-option -Xmethod-trace-stream"
+        run_args+=(--runtime-option -Xmethod-trace-stream)
     else
-        run_args="${run_args} --runtime-option -Xmethod-trace-file:${DEX_LOCATION}/trace.bin"
+        run_args+=(--runtime-option "-Xmethod-trace-file:${DEX_LOCATION}/trace.bin")
     fi
 elif [ "$trace_stream" = "true" ]; then
     err_echo "Cannot use --stream without --trace."
     exit 1
 fi
+if [ -n "$timeout" ]; then
+    run_args+=(--timeout "$timeout")
+fi
 
 # Most interesting target architecture variables are Makefile variables, not environment variables.
 # Try to map the suffix64 flag and what we find in ${ANDROID_PRODUCT_OUT}/data/art-test to an architecture name.
@@ -596,8 +627,8 @@
             target_arch_name=x86
         fi
     else
-        grep32bit=`ls ${ANDROID_PRODUCT_OUT}/data/art-test | grep -E '^(arm|x86|mips)$'`
-        grep64bit=`ls ${ANDROID_PRODUCT_OUT}/data/art-test | grep -E '^(arm64|x86_64|mips64)$'`
+        grep32bit=`ls ${ANDROID_PRODUCT_OUT}/data/art-test | grep -E '^(arm|x86)$'`
+        grep64bit=`ls ${ANDROID_PRODUCT_OUT}/data/art-test | grep -E '^(arm64|x86_64)$'`
         if [ "x${suffix64}" = "x64" ]; then
             target_arch_name=${grep64bit}
         else
@@ -630,35 +661,35 @@
 fi
 
 if [ ! "$runtime" = "jvm" ]; then
-  run_args="${run_args} --lib $lib"
+  run_args+=(--lib "$lib")
 fi
 
 if [ "$runtime" = "dalvik" ]; then
     if [ "$target_mode" = "no" ]; then
         framework="${ANDROID_PRODUCT_OUT}/system/framework"
-        bpath="${framework}/core-libart.jar:${framework}/core-oj.jar:${framework}/conscrypt.jar:${framework}/okhttp.jar:${framework}/bouncycastle.jar:${framework}/ext.jar"
-        run_args="${run_args} --boot --runtime-option -Xbootclasspath:${bpath}"
+        bpath="${framework}/core-icu4j.jar:${framework}/core-libart.jar:${framework}/core-oj.jar:${framework}/conscrypt.jar:${framework}/okhttp.jar:${framework}/bouncycastle.jar:${framework}/ext.jar"
+        run_args+=(--boot --runtime-option "-Xbootclasspath:${bpath}")
     else
         true # defaults to using target BOOTCLASSPATH
     fi
 elif [ "$runtime" = "art" ]; then
     if [ "$target_mode" = "no" ]; then
         guess_host_arch_name
-        run_args="${run_args} --boot ${ANDROID_HOST_OUT}/framework/core${image_suffix}.art"
-        run_args="${run_args} --runtime-option -Djava.library.path=${host_lib_root}/lib${suffix64}:${host_lib_root}/nativetest${suffix64}"
+        run_args+=(--boot "${ANDROID_HOST_OUT}/framework/core.art:*")
+        run_args+=(--runtime-option "-Djava.library.path=${host_lib_root}/lib${suffix64}:${host_lib_root}/nativetest${suffix64}")
     else
         guess_target_arch_name
-        run_args="${run_args} --runtime-option -Djava.library.path=/data/nativetest${suffix64}/art/${target_arch_name}"
-        run_args="${run_args} --boot /data/art-test/core${image_suffix}.art"
+        run_args+=(--runtime-option "-Djava.library.path=/data/nativetest${suffix64}/art/${target_arch_name}")
+        run_args+=(--boot "/data/art-test/core.art:/data/art-test/*")
     fi
     if [ "$relocate" = "yes" ]; then
-      run_args="${run_args} --relocate"
+      run_args+=(--relocate)
     else
-      run_args="${run_args} --no-relocate"
+      run_args+=(--no-relocate)
     fi
 elif [ "$runtime" = "jvm" ]; then
     # TODO: Detect whether the host is 32-bit or 64-bit.
-    run_args="${run_args} --runtime-option -Djava.library.path=${ANDROID_HOST_OUT}/lib64:${ANDROID_HOST_OUT}/nativetest64"
+    run_args+=(--runtime-option "-Djava.library.path=${ANDROID_HOST_OUT}/lib64:${ANDROID_HOST_OUT}/nativetest64")
 fi
 
 if [ "$have_image" = "no" ]; then
@@ -666,7 +697,12 @@
         err_echo "--no-image is only supported on the art runtime"
         exit 1
     fi
-    run_args="${run_args} --no-image"
+    run_args+=(--no-image)
+fi
+
+if [ "$create_runner" = "yes" -a "$target_mode" = "yes" ]; then
+    err_echo "--create-runner does not function for non --host tests"
+    usage="yes"
 fi
 
 if [ "$dev_mode" = "yes" -a "$update_mode" = "yes" ]; then
@@ -718,6 +754,11 @@
         echo "  $prog [options] [test-name]           Run test normally."
         echo "  $prog --dev [options] [test-name]     Development mode" \
              "(dumps to stdout)."
+        echo "  $prog --create-runner [options] [test-name]"
+        echo "              Creates a runner script for use with other " \
+             "tools (e.g. parallel_run.py)."
+        echo "              The script will only run the test portion, and " \
+             "share oat and dex files."
         echo "  $prog --update [options] [test-name]  Update mode" \
              "(replaces expected.txt)."
         echo '  Omitting the test name or specifying "-" will use the' \
@@ -783,6 +824,7 @@
         echo "    --runtime-extracted-zipapex [dir]"
         echo "                          Use the given extracted zipapex directory to provide"
         echo "                          runtime binaries"
+        echo "    --timeout n           Test timeout in seconds"
         echo "    --trace               Run with method tracing"
         echo "    --strace              Run with syscall tracing from strace."
         echo "    --stream              Run method tracing in streaming mode (requires --trace)"
@@ -796,9 +838,15 @@
         echo "    --never-clean         Keep the test files even if the test succeeds."
         echo "    --chroot [newroot]    Run with root directory set to newroot."
         echo "    --android-root [path] The path on target for the android root. (/system by default)."
-        echo "    --android-runtime-root [path]"
-        echo "                          The path on target for the Android Runtime root."
-        echo "                          (/apex/com.android.runtime by default)."
+        echo "    --android-i18n-root [path]"
+        echo "                          The path on target for the i18n module root."
+        echo "                          (/apex/com.android.i18n by default)."
+        echo "    --android-art-root [path]"
+        echo "                          The path on target for the ART module root."
+        echo "                          (/apex/com.android.art by default)."
+        echo "    --android-tzdata-root [path]"
+        echo "                          The path on target for the Android Time Zone Data root."
+        echo "                          (/apex/com.android.tzdata by default)."
         echo "    --dex2oat-swap        Use a dex2oat swap file."
         echo "    --instruction-set-features [string]"
         echo "                          Set instruction-set-features for compilation."
@@ -862,7 +910,7 @@
 # Tests named '<number>-checker-*' will also have their CFGs verified with
 # Checker when compiled with Optimizing on host.
 if [[ "$TEST_NAME" =~ ^[0-9]+-checker- ]]; then
-  if [ "$runtime" = "art" -a "$image_suffix" = "" -a "$run_optimizing" = "true" ]; then
+  if [ "$runtime" = "art" -a "$run_optimizing" = "true" ]; then
     # In no-prebuild or no-image mode, the compiler only quickens so disable the checker.
     if [ "$prebuild_mode" = "yes" -a "$have_image" = "yes" ]; then
       run_checker="yes"
@@ -879,13 +927,12 @@
         checker_args="$checker_args --debuggable"
       fi
 
-      run_args="${run_args} -Xcompiler-option --dump-cfg=$cfg_output_dir/$cfg_output \
-                            -Xcompiler-option -j1"
+      run_args+=(-Xcompiler-option "--dump-cfg=$cfg_output_dir/$cfg_output" -Xcompiler-option -j1)
     fi
   fi
 fi
 
-run_args="${run_args} --testlib ${testlib}"
+run_args+=(--testlib "${testlib}")
 
 if ! ulimit -f ${file_ulimit}; then
   err_echo "ulimit file size setting failed"
@@ -917,7 +964,7 @@
     echo "build exit status: $build_exit" 1>&2
     if [ "$build_exit" = '0' ]; then
         echo "${test_dir}: running..." 1>&2
-        "./${run}" $run_args "$@" 2>&1
+        "./${run}" "${run_args[@]}" "$@" 2>&1
         run_exit="$?"
 
         if [ "$run_exit" = "0" ]; then
@@ -942,7 +989,7 @@
     build_exit="$?"
     if [ "$build_exit" = '0' ]; then
         echo "${test_dir}: running..." 1>&2
-        "./${run}" $run_args "$@" >"$output" 2>&1
+        "./${run}" "${run_args[@]}" "$@" >"$output" 2>&1
         if [ "$run_checker" = "yes" ]; then
           if [ "$target_mode" = "yes" ]; then
             adb pull "$chroot/$cfg_output_dir/$cfg_output" &> /dev/null
@@ -976,7 +1023,7 @@
     build_exit="$?"
     if [ "$build_exit" = '0' ]; then
         echo "${test_dir}: running..." 1>&2
-        "./${run}" $run_args "$@" >"$output" 2>&1
+        "./${run}" "${run_args[@]}" "$@" >"$output" 2>&1
         run_exit="$?"
         if [ "$run_exit" != "0" ]; then
             err_echo "run exit status: $run_exit"
@@ -1025,7 +1072,12 @@
         echo '#################### info'
         cat "${td_info}" | sed 's/^/# /g'
         echo '#################### diffs'
-        diff --strip-trailing-cr -u "$expected" "$output" | tail -n 3000
+        if [ "$run_checker" == "yes" ]; then
+          # Checker failures dump the whole CFG, so we output the whole diff.
+          diff --strip-trailing-cr -u "$expected" "$output"
+        else
+          diff --strip-trailing-cr -u "$expected" "$output" | tail -n 3000
+        fi
         echo '####################'
         if [ "$strace" = "yes" ]; then
             echo '#################### strace output'
@@ -1059,12 +1111,12 @@
         # so that cmdline.sh forwards its arguments to dalvikvm. invoke-with is set
         # to exec in order to preserve pid when calling dalvikvm. This is required
         # for bisection search to correctly retrieve logs from device.
-        "./${run}" $run_args --runtime-option '"$@"' --invoke-with exec --dry-run "$@" &> /dev/null
+        "./${run}" "${run_args[@]}" --runtime-option '"$@"' --invoke-with exec --dry-run "$@" &> /dev/null
         adb shell chmod u+x "$chroot_dex_location/cmdline.sh"
         maybe_device_mode="--device"
         raw_cmd="$DEX_LOCATION/cmdline.sh"
       else
-        raw_cmd="$cwd/${run} --external-log-tags $run_args $@"
+        raw_cmd="$cwd/${run} --external-log-tags "${run_args[@]}" $@"
       fi
       # TODO: Pass a `--chroot` option to the bisection_search.py script and use it there.
       $ANDROID_BUILD_TOP/art/tools/bisection_search/bisection_search.py \
@@ -1073,7 +1125,7 @@
         --check-script="$cwd/check" \
         --expected-output="$cwd/expected.txt" \
         --logfile="$cwd/bisection_log.txt" \
-        --timeout=300
+        --timeout=${timeout:-300}
     fi
 fi
 
diff --git a/test/testrunner/env.py b/test/testrunner/env.py
index a8360cb..6c9447d 100644
--- a/test/testrunner/env.py
+++ b/test/testrunner/env.py
@@ -90,7 +90,8 @@
 
 ART_TEST_CHROOT = _env.get('ART_TEST_CHROOT')
 ART_TEST_ANDROID_ROOT = _env.get('ART_TEST_ANDROID_ROOT')
-ART_TEST_ANDROID_RUNTIME_ROOT = _env.get('ART_TEST_ANDROID_RUNTIME_ROOT')
+ART_TEST_ANDROID_ART_ROOT = _env.get('ART_TEST_ANDROID_ART_ROOT')
+ART_TEST_ANDROID_I18N_ROOT = _env.get('ART_TEST_ANDROID_I18N_ROOT')
 ART_TEST_ANDROID_TZDATA_ROOT = _env.get('ART_TEST_ANDROID_TZDATA_ROOT')
 
 ART_TEST_WITH_STRACE = _getEnvBoolean('ART_TEST_DEBUG_GC', False)
diff --git a/test/testrunner/target_config.py b/test/testrunner/target_config.py
index 6e299bd..907f4ec 100644
--- a/test/testrunner/target_config.py
+++ b/test/testrunner/target_config.py
@@ -124,16 +124,6 @@
             'ART_USE_READ_BARRIER' : 'false'
         }
     },
-    # TODO: Remove this configuration (b/62611253) when the GSS collector is removed (b/73295078).
-    'art-gss-gc' : {
-        'run-test' : ['--interpreter',
-                      '--optimizing',
-                      '--jit'],
-        'env' : {
-            'ART_DEFAULT_GC_TYPE' : 'GSS',
-            'ART_USE_READ_BARRIER' : 'false'
-        }
-    },
     # TODO: Consider removing this configuration when it is no longer used by
     # any continuous testing target (b/62611253), as the SS collector overlaps
     # with the CC collector, since both move objects.
@@ -147,17 +137,6 @@
             'ART_USE_READ_BARRIER' : 'false'
         }
     },
-    # TODO: Remove this configuration (b/62611253) when the GSS collector is removed (b/73295078).
-    'art-gss-gc-tlab' : {
-        'run-test' : ['--interpreter',
-                      '--optimizing',
-                      '--jit'],
-        'env' : {
-            'ART_DEFAULT_GC_TYPE' : 'GSS',
-            'ART_USE_TLAB' : 'true',
-            'ART_USE_READ_BARRIER' : 'false'
-        }
-    },
     'art-tracing' : {
         'run-test' : ['--trace']
     },
@@ -229,14 +208,6 @@
             'ART_DEFAULT_COMPACT_DEX_LEVEL' : 'none'
         }
     },
-    # TODO: Remove this configuration (b/62611253) when the GSS collector is removed (b/73295078).
-    'art-gtest-gss-gc': {
-        'make' :  'test-art-host-gtest',
-        'env' : {
-            'ART_DEFAULT_GC_TYPE' : 'GSS',
-            'ART_USE_READ_BARRIER' : 'false'
-        }
-    },
     # TODO: Consider removing this configuration when it is no longer used by
     # any continuous testing target (b/62611253), as the SS collector overlaps
     # with the CC collector, since both move objects.
@@ -248,15 +219,6 @@
             'ART_USE_READ_BARRIER' : 'false',
         }
     },
-    # TODO: Remove this configuration (b/62611253) when the GSS collector is removed (b/73295078).
-    'art-gtest-gss-gc-tlab': {
-        'make' :  'test-art-host-gtest',
-        'env': {
-            'ART_DEFAULT_GC_TYPE' : 'GSS',
-            'ART_USE_TLAB' : 'true',
-            'ART_USE_READ_BARRIER' : 'false'
-        }
-    },
     'art-gtest-debug-gc' : {
         'make' :  'test-art-host-gtest',
         'env' : {
@@ -337,9 +299,9 @@
                      '--no-build-dependencies'],
     },
     'art-linux-bionic-x64-zipapex': {
-        'build': '{ANDROID_BUILD_TOP}/art/tools/build_linux_bionic_tests.sh {MAKE_OPTIONS} com.android.runtime.host',
+        'build': '{ANDROID_BUILD_TOP}/art/tools/build_linux_bionic_tests.sh {MAKE_OPTIONS} com.android.art.host',
         'run-test': ['--run-test-option=--bionic',
-                     "--runtime-zipapex={SOONG_OUT_DIR}/host/linux_bionic-x86/apex/com.android.runtime.host.zipapex",
+                     "--runtime-zipapex={SOONG_OUT_DIR}/host/linux_bionic-x86/apex/com.android.art.host.zipapex",
                      '--host',
                      '--64',
                      '--no-build-dependencies'],
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
index 461887e..831622c 100755
--- a/test/testrunner/testrunner.py
+++ b/test/testrunner/testrunner.py
@@ -1,5 +1,9 @@
 #!/usr/bin/env python3
 #
+# [VPYTHON:BEGIN]
+# python_version: "3.8"
+# [VPYTHON:END]
+#
 # Copyright 2017, The Android Open Source Project
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -46,7 +50,18 @@
 """
 import argparse
 import collections
+
+# b/140161314 diagnostics.
+try:
+  import concurrent.futures
+except Exception:
+  import sys
+  sys.stdout.write("\n\n" + sys.executable + " " + sys.version + "\n\n")
+  sys.stdout.flush()
+  raise
+
 import contextlib
+import datetime
 import fnmatch
 import itertools
 import json
@@ -55,10 +70,10 @@
 import re
 import shlex
 import shutil
+import signal
 import subprocess
 import sys
 import tempfile
-import threading
 import time
 
 import env
@@ -67,7 +82,15 @@
 
 # timeout for individual tests.
 # TODO: make it adjustable per tests and for buildbots
-timeout = 3000 # 50 minutes
+#
+# Note: this needs to be larger than run-test timeouts, as long as this script
+#       does not push the value to run-test. run-test is somewhat complicated:
+#                      base: 25m  (large for ASAN)
+#        + timeout handling:  2m
+#        +   gcstress extra: 20m
+#        -----------------------
+#                            47m
+timeout = 3600 # 60 minutes
 
 # DISABLED_TEST_CONTAINER holds information about the disabled tests. It is a map
 # that has key as the test name (like 001-HelloWorld), and value as set of
@@ -79,6 +102,9 @@
 # the test name given as the argument to run.
 VARIANT_TYPE_DICT = {}
 
+# The set of all variant sets that are incompatible and will always be skipped.
+NONFUNCTIONAL_VARIANT_SETS = set()
+
 # The set contains all the variants of each time.
 TOTAL_VARIANTS_SET = set()
 
@@ -89,27 +115,15 @@
 COLOR_SKIP = '\033[93m'
 COLOR_NORMAL = '\033[0m'
 
-# The mutex object is used by the threads for exclusive access of test_count
-# to make any changes in its value.
-test_count_mutex = threading.Lock()
-
 # The set contains the list of all the possible run tests that are in art/test
 # directory.
 RUN_TEST_SET = set()
 
-# The semaphore object is used by the testrunner to limit the number of
-# threads to the user requested concurrency value.
-semaphore = threading.Semaphore(1)
-
-# The mutex object is used to provide exclusive access to a thread to print
-# its output.
-print_mutex = threading.Lock()
 failed_tests = []
 skipped_tests = []
 
 # Flags
 n_thread = -1
-test_count = 0
 total_test_count = 0
 verbose = False
 dry_run = False
@@ -121,7 +135,6 @@
 with_agent = []
 zipapex_loc = None
 run_test_option = []
-stop_testrunner = False
 dex2oat_jobs = -1   # -1 corresponds to default threads for dex2oat
 run_all_configs = False
 
@@ -139,7 +152,6 @@
   of disabled test. It also maps various variants to types.
   """
   global TOTAL_VARIANTS_SET
-  global DISABLED_TEST_CONTAINER
   # TODO: Avoid duplication of the variant names in different lists.
   VARIANT_TYPE_DICT['run'] = {'ndebug', 'debug'}
   VARIANT_TYPE_DICT['target'] = {'target', 'host', 'jvm'}
@@ -155,7 +167,11 @@
   VARIANT_TYPE_DICT['jvmti'] = {'no-jvmti', 'jvmti-stress', 'redefine-stress', 'trace-stress',
                                 'field-stress', 'step-stress'}
   VARIANT_TYPE_DICT['compiler'] = {'interp-ac', 'interpreter', 'jit', 'jit-on-first-use',
-                                   'optimizing', 'regalloc_gc', 'speed-profile', 'baseline'}
+                                   'optimizing', 'regalloc_gc',
+                                   'speed-profile', 'baseline'}
+
+  # Regalloc_GC cannot work with prebuild.
+  NONFUNCTIONAL_VARIANT_SETS.add(frozenset({'regalloc_gc', 'prebuild'}))
 
   for v_type in VARIANT_TYPE_DICT:
     TOTAL_VARIANTS_SET = TOTAL_VARIANTS_SET.union(VARIANT_TYPE_DICT.get(v_type))
@@ -164,7 +180,6 @@
   for f in os.listdir(test_dir):
     if fnmatch.fnmatch(f, '[0-9]*'):
       RUN_TEST_SET.add(f)
-  DISABLED_TEST_CONTAINER = get_disabled_test_info()
 
 
 def setup_test_env():
@@ -225,7 +240,7 @@
     _user_input_variants['address_sizes_target']['target'] = _user_input_variants['address_sizes']
 
   global n_thread
-  if n_thread is -1:
+  if n_thread == -1:
     if 'target' in _user_input_variants['target']:
       n_thread = get_default_threads('target')
     else:
@@ -236,9 +251,6 @@
   for target in _user_input_variants['target']:
     extra_arguments[target] = find_extra_device_arguments(target)
 
-  global semaphore
-  semaphore = threading.Semaphore(n_thread)
-
   if not sys.stdout.isatty():
     global COLOR_ERROR
     global COLOR_PASS
@@ -277,15 +289,7 @@
     return "UNKNOWN_TARGET"
 
 def run_tests(tests):
-  """Creates thread workers to run the tests.
-
-  The method generates command and thread worker to run the tests. Depending on
-  the user input for the number of threads to be used, the method uses a
-  semaphore object to keep a count in control for the thread workers. When a new
-  worker is created, it acquires the semaphore object, and when the number of
-  workers reaches the maximum allowed concurrency, the method wait for an
-  existing thread worker to release the semaphore object. Worker releases the
-  semaphore object when they finish printing the output.
+  """This method generates variants of the tests to be run and executes them.
 
   Args:
     tests: The set of tests to be run.
@@ -362,18 +366,10 @@
       'debuggable': [''], 'jvmti': [''],
       'cdex_level': ['']})
 
-  def start_combination(config_tuple, global_options, address_size):
+  def start_combination(executor, config_tuple, global_options, address_size):
       test, target, run, prebuild, compiler, relocate, trace, gc, \
       jni, image, debuggable, jvmti, cdex_level = config_tuple
 
-      if stop_testrunner:
-        # When ART_TEST_KEEP_GOING is set to false, then as soon as a test
-        # fails, stop_testrunner is set to True. When this happens, the method
-        # stops creating any any thread and wait for all the exising threads
-        # to end.
-        while threading.active_count() > 2:
-          time.sleep(0.1)
-          return
       # NB The order of components here should match the order of
       # components in the regex parser in parse_test_name.
       test_name = 'test-art-'
@@ -402,15 +398,18 @@
       elif target == 'jvm':
         options_test += ' --jvm'
 
-      # Honor ART_TEST_CHROOT, ART_TEST_ANDROID_ROOT, ART_TEST_ANDROID_RUNTIME_ROOT,
-      # and ART_TEST_ANDROID_TZDATA_ROOT but only for target tests.
+      # Honor ART_TEST_CHROOT, ART_TEST_ANDROID_ROOT, ART_TEST_ANDROID_ART_ROOT,
+      # ART_TEST_ANDROID_I18N_ROOT, and ART_TEST_ANDROID_TZDATA_ROOT but only
+      # for target tests.
       if target == 'target':
         if env.ART_TEST_CHROOT:
           options_test += ' --chroot ' + env.ART_TEST_CHROOT
         if env.ART_TEST_ANDROID_ROOT:
           options_test += ' --android-root ' + env.ART_TEST_ANDROID_ROOT
-        if env.ART_TEST_ANDROID_RUNTIME_ROOT:
-          options_test += ' --android-runtime-root ' + env.ART_TEST_ANDROID_RUNTIME_ROOT
+        if env.ART_TEST_ANDROID_I18N_ROOT:
+            options_test += ' --android-i18n-root ' + env.ART_TEST_ANDROID_I18N_ROOT
+        if env.ART_TEST_ANDROID_ART_ROOT:
+          options_test += ' --android-art-root ' + env.ART_TEST_ANDROID_ART_ROOT
         if env.ART_TEST_ANDROID_TZDATA_ROOT:
           options_test += ' --android-tzdata-root ' + env.ART_TEST_ANDROID_TZDATA_ROOT
 
@@ -467,7 +466,7 @@
         options_test += ' --no-image'
 
       if debuggable == 'debuggable':
-        options_test += ' --debuggable'
+        options_test += ' --debuggable --runtime-option -Xopaque-jni-ids:true'
 
       if jvmti == 'jvmti-stress':
         options_test += ' --jvmti-trace-stress --jvmti-redefine-stress --jvmti-field-stress'
@@ -498,25 +497,32 @@
 
       run_test_sh = env.ANDROID_BUILD_TOP + '/art/test/run-test'
       command = ' '.join((run_test_sh, options_test, ' '.join(extra_arguments[target]), test))
-
-      semaphore.acquire()
-      worker = threading.Thread(target=run_test, args=(command, test, variant_set, test_name))
-      worker.daemon = True
-      worker.start()
+      return executor.submit(run_test, command, test, variant_set, test_name)
 
   #  Use a context-manager to handle cleaning up the extracted zipapex if needed.
   with handle_zipapex(zipapex_loc) as zipapex_opt:
     options_all += zipapex_opt
-    for config_tuple in config:
-      target = config_tuple[1]
-      for address_size in _user_input_variants['address_sizes_target'][target]:
-        start_combination(config_tuple, options_all, address_size)
+    global n_thread
+    with concurrent.futures.ThreadPoolExecutor(max_workers=n_thread) as executor:
+      test_futures = []
+      for config_tuple in config:
+        target = config_tuple[1]
+        for address_size in _user_input_variants['address_sizes_target'][target]:
+          test_futures.append(start_combination(executor, config_tuple, options_all, address_size))
 
-    for config_tuple in uncombinated_config:
-        start_combination(config_tuple, options_all, "")  # no address size
+        for config_tuple in uncombinated_config:
+          test_futures.append(start_combination(executor, config_tuple, options_all, ""))  # no address size
 
-    while threading.active_count() > 2:
-      time.sleep(0.1)
+      tests_done = 0
+      for test_future in concurrent.futures.as_completed(test_futures):
+        (test, status, failure_info, test_time) = test_future.result()
+        tests_done += 1
+        print_test_info(tests_done, test, status, failure_info, test_time)
+        if failure_info and not env.ART_TEST_KEEP_GOING:
+          for f in test_futures:
+            f.cancel()
+          break
+      executor.shutdown(True)
 
 @contextlib.contextmanager
 def handle_zipapex(ziploc):
@@ -543,57 +549,84 @@
   passed, otherwise, put it in the list of failed test. Before actually running
   the test, it also checks if the test is placed in the list of disabled tests,
   and if yes, it skips running it, and adds the test in the list of skipped
-  tests. The method uses print_text method to actually print the output. After
-  successfully running and capturing the output for the test, it releases the
-  semaphore object.
+  tests.
 
   Args:
     command: The command to be used to invoke the script
     test: The name of the test without the variant information.
     test_variant: The set of variant for the test.
     test_name: The name of the test along with the variants.
+
+  Returns: a tuple of testname, status, optional failure info, and test time.
   """
-  global stop_testrunner
   try:
     if is_test_disabled(test, test_variant):
       test_skipped = True
+      test_time = datetime.timedelta()
     else:
       test_skipped = False
+      test_start_time = time.monotonic()
+      if verbose:
+        print_text("Starting %s at %s\n" % (test_name, test_start_time))
       if gdb:
-        proc = subprocess.Popen(command.split(), stderr=subprocess.STDOUT, universal_newlines=True)
+        proc = subprocess.Popen(command.split(), stderr=subprocess.STDOUT,
+                                universal_newlines=True, start_new_session=True)
       else:
         proc = subprocess.Popen(command.split(), stderr=subprocess.STDOUT, stdout = subprocess.PIPE,
-                                universal_newlines=True)
+                                universal_newlines=True, start_new_session=True)
       script_output = proc.communicate(timeout=timeout)[0]
       test_passed = not proc.wait()
+      test_time_seconds = time.monotonic() - test_start_time
+      test_time = datetime.timedelta(seconds=test_time_seconds)
 
     if not test_skipped:
       if test_passed:
-        print_test_info(test_name, 'PASS')
+        return (test_name, 'PASS', None, test_time)
       else:
         failed_tests.append((test_name, str(command) + "\n" + script_output))
-        if not env.ART_TEST_KEEP_GOING:
-          stop_testrunner = True
-        print_test_info(test_name, 'FAIL', ('%s\n%s') % (
-          command, script_output))
+        return (test_name, 'FAIL', ('%s\n%s') % (command, script_output), test_time)
     elif not dry_run:
-      print_test_info(test_name, 'SKIP')
       skipped_tests.append(test_name)
+      return (test_name, 'SKIP', None, test_time)
     else:
-      print_test_info(test_name, '')
+      return (test_name, 'PASS', None, test_time)
   except subprocess.TimeoutExpired as e:
+    if verbose:
+      print_text("Timeout of %s at %s\n" % (test_name, time.monotonic()))
+    test_time_seconds = time.monotonic() - test_start_time
+    test_time = datetime.timedelta(seconds=test_time_seconds)
     failed_tests.append((test_name, 'Timed out in %d seconds' % timeout))
-    print_test_info(test_name, 'TIMEOUT', 'Timed out in %d seconds\n%s' % (
-        timeout, command))
+
+    # HACK(b/142039427): Print extra backtraces on timeout.
+    if "-target-" in test_name:
+      for i in range(8):
+        proc_name = "dalvikvm" + test_name[-2:]
+        pidof = subprocess.run(["adb", "shell", "pidof", proc_name], stdout=subprocess.PIPE)
+        for pid in pidof.stdout.decode("ascii").split():
+          if i >= 4:
+            print_text("Backtrace of %s at %s\n" % (pid, time.monotonic()))
+            subprocess.run(["adb", "shell", "debuggerd", pid])
+            time.sleep(10)
+          task_dir = "/proc/%s/task" % pid
+          tids = subprocess.run(["adb", "shell", "ls", task_dir], stdout=subprocess.PIPE)
+          for tid in tids.stdout.decode("ascii").split():
+            for status in ["stat", "status"]:
+              filename = "%s/%s/%s" % (task_dir, tid, status)
+              print_text("Content of %s\n" % (filename))
+              subprocess.run(["adb", "shell", "cat", filename])
+        time.sleep(60)
+
+    # The python documentation states that it is necessary to actually kill the process.
+    os.killpg(proc.pid, signal.SIGKILL)
+    script_output = proc.communicate()
+
+    return (test_name, 'TIMEOUT', 'Timed out in %d seconds\n%s' % (timeout, command), test_time)
   except Exception as e:
     failed_tests.append((test_name, str(e)))
-    print_test_info(test_name, 'FAIL',
-    ('%s\n%s\n\n') % (command, str(e)))
-  finally:
-    semaphore.release()
+    return (test_name, 'FAIL', ('%s\n%s\n\n') % (command, str(e)), datetime.timedelta())
 
-
-def print_test_info(test_name, result, failed_test_info=""):
+def print_test_info(test_count, test_name, result, failed_test_info="",
+                    test_time=datetime.timedelta()):
   """Print the continous test information
 
   If verbose is set to True, it continuously prints test status information
@@ -608,7 +641,6 @@
   test information in either of the cases.
   """
 
-  global test_count
   info = ''
   if not verbose:
     # Without --verbose, the testrunner erases passing test info. It
@@ -617,13 +649,14 @@
     console_width = int(os.popen('stty size', 'r').read().split()[1])
     info = '\r' + ' ' * console_width + '\r'
   try:
-    print_mutex.acquire()
-    test_count += 1
     percent = (test_count * 100) / total_test_count
     progress_info = ('[ %d%% %d/%d ]') % (
       percent,
       test_count,
       total_test_count)
+    if test_time.total_seconds() != 0 and verbose:
+      info += '(%s)' % str(test_time)
+
 
     if result == 'FAIL' or result == 'TIMEOUT':
       if not verbose:
@@ -666,8 +699,6 @@
   except Exception as e:
     print_text(('%s\n%s\n') % (test_name, str(e)))
     failed_tests.append(test_name)
-  finally:
-    print_mutex.release()
 
 def verify_knownfailure_entry(entry):
   supported_field = {
@@ -676,7 +707,9 @@
       'description' : (list, str),
       'bug' : (str,),
       'variant' : (str,),
+      'devices': (list, str),
       'env_vars' : (dict,),
+      'zipapex' : (bool,),
   }
   for field in entry:
     field_type = type(entry[field])
@@ -686,7 +719,7 @@
           field,
           str(entry)))
 
-def get_disabled_test_info():
+def get_disabled_test_info(device_name):
   """Generate set of known failures.
 
   It parses the art/test/knownfailures.json file to generate the list of
@@ -708,10 +741,23 @@
       tests = [tests]
     patterns = failure.get("test_patterns", [])
     if (not isinstance(patterns, list)):
-      raise ValueError("test_patters is not a list in %s" % failure)
+      raise ValueError("test_patterns is not a list in %s" % failure)
 
     tests += [f for f in RUN_TEST_SET if any(re.match(pat, f) is not None for pat in patterns)]
     variants = parse_variants(failure.get('variant'))
+
+    # Treat a '"devices": "<foo>"' equivalent to 'target' variant if
+    # "foo" is present in "devices".
+    device_names = failure.get('devices', [])
+    if isinstance(device_names, str):
+      device_names = [device_names]
+    if len(device_names) != 0:
+      if device_name in device_names:
+        variants.add('target')
+      else:
+        # Skip adding test info as device_name is not present in "devices" entry.
+        continue
+
     env_vars = failure.get('env_vars')
 
     if check_env_vars(env_vars):
@@ -723,8 +769,23 @@
           disabled_test_info[test] = disabled_test_info[test].union(variants)
         else:
           disabled_test_info[test] = variants
+
+    zipapex_disable = failure.get("zipapex", False)
+    if zipapex_disable and zipapex_loc is not None:
+      for test in tests:
+        if test not in RUN_TEST_SET:
+          raise ValueError('%s is not a valid run-test' % (test))
+        if test in disabled_test_info:
+          disabled_test_info[test] = disabled_test_info[test].union(variants)
+        else:
+          disabled_test_info[test] = variants
+
   return disabled_test_info
 
+def gather_disabled_test_info():
+  global DISABLED_TEST_CONTAINER
+  device_name = get_device_name() if 'target' in _user_input_variants['target'] else None
+  DISABLED_TEST_CONTAINER = get_disabled_test_info(device_name)
 
 def check_env_vars(env_vars):
   """Checks if the env variables are set as required to run the test.
@@ -765,6 +826,9 @@
         break
     if variants_present:
       return True
+  for bad_combo in NONFUNCTIONAL_VARIANT_SETS:
+    if bad_combo.issubset(variant_set):
+      return True
   return False
 
 
@@ -836,7 +900,7 @@
   It supports two types of test_name:
   1) Like 001-HelloWorld. In this case, it will just verify if the test actually
   exists and if it does, it returns the testname.
-  2) Like test-art-host-run-test-debug-prebuild-interpreter-no-relocate-ntrace-cms-checkjni-picimage-ndebuggable-001-HelloWorld32
+  2) Like test-art-host-run-test-debug-prebuild-interpreter-no-relocate-ntrace-cms-checkjni-pointer-ids-picimage-ndebuggable-001-HelloWorld32
   In this case, it will parse all the variants and check if they are placed
   correctly. If yes, it will set the various VARIANT_TYPES to use the
   variants required to run the test. Again, it returns the test_name
@@ -903,13 +967,13 @@
   return target_options
 
 def get_default_threads(target):
-  if target is 'target':
+  if target == 'target':
     adb_command = 'adb shell cat /sys/devices/system/cpu/present'
     cpu_info_proc = subprocess.Popen(adb_command.split(), stdout=subprocess.PIPE)
     cpu_info = cpu_info_proc.stdout.read()
     if type(cpu_info) is bytes:
       cpu_info = cpu_info.decode('utf-8')
-    cpu_info_regex = '\d*-(\d*)'
+    cpu_info_regex = r'\d*-(\d*)'
     match = re.match(cpu_info_regex, cpu_info)
     if match:
       return int(match.group(1))
@@ -982,11 +1046,21 @@
     var_group = parser.add_argument_group(
         '{}-type Options'.format(variant_type),
         "Options that control the '{}' variants.".format(variant_type))
+    var_group.add_argument('--all-' + variant_type,
+                           action='store_true',
+                           dest='all_' + variant_type,
+                           help='Enable all variants of ' + variant_type)
     for variant in variant_set:
       flag = '--' + variant
       var_group.add_argument(flag, action='store_true', dest=variant)
 
   options = vars(parser.parse_args())
+  # Handle the --all-<type> meta-options
+  for variant_type, variant_set in VARIANT_TYPE_DICT.items():
+    if options['all_' + variant_type]:
+      for variant in variant_set:
+        options[variant] = True
+
   if options['build_target']:
     options = setup_env_for_build_target(target_config[options['build_target']],
                                          parser, options)
@@ -1034,6 +1108,7 @@
   gather_test_info()
   user_requested_tests = parse_option()
   setup_test_env()
+  gather_disabled_test_info()
   if build:
     build_targets = ''
     if 'host' in _user_input_variants['target']:
@@ -1050,28 +1125,16 @@
       if env.DIST_DIR:
         shutil.copyfile(env.SOONG_OUT_DIR + '/build.ninja', env.DIST_DIR + '/soong.ninja')
       sys.exit(1)
+
   if user_requested_tests:
-    test_runner_thread = threading.Thread(target=run_tests, args=(user_requested_tests,))
+    run_tests(user_requested_tests)
   else:
-    test_runner_thread = threading.Thread(target=run_tests, args=(RUN_TEST_SET,))
-  test_runner_thread.daemon = True
-  try:
-    test_runner_thread.start()
-    # This loops waits for all the threads to finish, unless
-    # stop_testrunner is set to True. When ART_TEST_KEEP_GOING
-    # is set to false, stop_testrunner is set to True as soon as
-    # a test fails to signal the parent thread  to stop
-    # the execution of the testrunner.
-    while threading.active_count() > 1 and not stop_testrunner:
-      time.sleep(0.1)
-    print_analysis()
-  except Exception as e:
-    print_analysis()
-    print_text(str(e))
-    sys.exit(1)
-  if failed_tests:
-    sys.exit(1)
-  sys.exit(0)
+    run_tests(RUN_TEST_SET)
+
+  print_analysis()
+
+  exit_code = 0 if len(failed_tests) == 0 else 1
+  sys.exit(exit_code)
 
 if __name__ == '__main__':
   main()
diff --git a/test/ti-agent/common_load.cc b/test/ti-agent/common_load.cc
index bfd165d..28265fc 100644
--- a/test/ti-agent/common_load.cc
+++ b/test/ti-agent/common_load.cc
@@ -83,6 +83,7 @@
   { "941-recursive-obsolete-jit", common_redefine::OnLoad, nullptr },
   { "943-private-recursive-jit", common_redefine::OnLoad, nullptr },
   { "1919-vminit-thread-start-timing", Test1919VMInitThreadStart::OnLoad, nullptr },
+  { "2031-zygote-compiled-frame-deopt", nullptr, MinimalOnLoad },
 };
 
 static AgentLib* FindAgent(char* name) {
diff --git a/test/ti-agent/early_return_helper.cc b/test/ti-agent/early_return_helper.cc
new file mode 100644
index 0000000..e4aa5d0
--- /dev/null
+++ b/test/ti-agent/early_return_helper.cc
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common_helper.h"
+
+#include "jni.h"
+#include "jvmti.h"
+
+#include "jvmti_helper.h"
+#include "scoped_local_ref.h"
+#include "test_env.h"
+
+namespace art {
+namespace common_early_return {
+
+extern "C" JNIEXPORT void JNICALL Java_art_NonStandardExit_popFrame(
+    JNIEnv* env, jclass k ATTRIBUTE_UNUSED, jthread thr) {
+  JvmtiErrorToException(env, jvmti_env, jvmti_env->PopFrame(thr));
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_NonStandardExit_forceEarlyReturnFloat(
+    JNIEnv* env, jclass k ATTRIBUTE_UNUSED, jthread thr, jfloat val) {
+  JvmtiErrorToException(env, jvmti_env, jvmti_env->ForceEarlyReturnFloat(thr, val));
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_NonStandardExit_forceEarlyReturnDouble(
+    JNIEnv* env, jclass k ATTRIBUTE_UNUSED, jthread thr, jdouble val) {
+  JvmtiErrorToException(env, jvmti_env, jvmti_env->ForceEarlyReturnDouble(thr, val));
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_NonStandardExit_forceEarlyReturnLong(
+    JNIEnv* env, jclass k ATTRIBUTE_UNUSED, jthread thr, jlong val) {
+  JvmtiErrorToException(env, jvmti_env, jvmti_env->ForceEarlyReturnLong(thr, val));
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_NonStandardExit_forceEarlyReturnInt(
+    JNIEnv* env, jclass k ATTRIBUTE_UNUSED, jthread thr, jint val) {
+  JvmtiErrorToException(env, jvmti_env, jvmti_env->ForceEarlyReturnInt(thr, val));
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_NonStandardExit_forceEarlyReturnVoid(
+    JNIEnv* env, jclass k ATTRIBUTE_UNUSED, jthread thr) {
+  JvmtiErrorToException(env, jvmti_env, jvmti_env->ForceEarlyReturnVoid(thr));
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_NonStandardExit_forceEarlyReturnObject(
+    JNIEnv* env, jclass k ATTRIBUTE_UNUSED, jthread thr, jobject val) {
+  JvmtiErrorToException(env, jvmti_env, jvmti_env->ForceEarlyReturnObject(thr, val));
+}
+
+}  // namespace common_early_return
+}  // namespace art
diff --git a/test/ti-agent/jvmti_helper.cc b/test/ti-agent/jvmti_helper.cc
index bceaa6b..22bc64a 100644
--- a/test/ti-agent/jvmti_helper.cc
+++ b/test/ti-agent/jvmti_helper.cc
@@ -15,6 +15,7 @@
  */
 
 #include "jvmti_helper.h"
+#include "jvmti.h"
 #include "test_env.h"
 
 #include <dlfcn.h>
@@ -231,4 +232,52 @@
   __builtin_unreachable();
 }
 
+void DeallocParams(jvmtiEnv* env, jvmtiParamInfo* params, jint n_params) {
+  for (jint i = 0; i < n_params; i++) {
+    Dealloc(env, params[i].name);
+  }
+}
+
+jint GetExtensionEventId(jvmtiEnv* jvmti, const std::string_view& name) {
+  jint n_ext = 0;
+  jint res = -1;
+  bool found_res = false;
+  jvmtiExtensionEventInfo* infos = nullptr;
+  CHECK_EQ(jvmti->GetExtensionEvents(&n_ext, &infos), JVMTI_ERROR_NONE);
+  for (jint i = 0; i < n_ext; i++) {
+    const jvmtiExtensionEventInfo& info = infos[i];
+    if (name == info.id) {
+      res = info.extension_event_index;
+      found_res = true;
+    }
+    DeallocParams(jvmti, info.params, info.param_count);
+    Dealloc(jvmti, info.short_description, info.id, info.params);
+  }
+  Dealloc(jvmti, infos);
+  CHECK(found_res);
+  return res;
+}
+
+void* GetExtensionFunctionVoid(JNIEnv* env, jvmtiEnv* jvmti, const std::string_view& name) {
+  jint n_ext = 0;
+  void* res = nullptr;
+  jvmtiExtensionFunctionInfo* infos = nullptr;
+  if (JvmtiErrorToException(env, jvmti, jvmti->GetExtensionFunctions(&n_ext, &infos))) {
+    return nullptr;
+  }
+  for (jint i = 0; i < n_ext; i++) {
+    const jvmtiExtensionFunctionInfo& info = infos[i];
+    if (name == info.id) {
+      res = reinterpret_cast<void*>(info.func);
+    }
+    DeallocParams(jvmti, info.params, info.param_count);
+    Dealloc(jvmti, info.short_description, info.errors, info.id, info.params);
+  }
+  Dealloc(jvmti, infos);
+  if (res == nullptr) {
+    JvmtiErrorToException(env, jvmti, JVMTI_ERROR_NOT_FOUND);
+  }
+  return res;
+}
+
 }  // namespace art
diff --git a/test/ti-agent/jvmti_helper.h b/test/ti-agent/jvmti_helper.h
index a47a402..74d594f 100644
--- a/test/ti-agent/jvmti_helper.h
+++ b/test/ti-agent/jvmti_helper.h
@@ -79,6 +79,23 @@
 // To print jvmtiError. Does not rely on GetErrorName, so is an approximation.
 std::ostream& operator<<(std::ostream& os, const jvmtiError& rhs);
 
+template <typename T> void Dealloc(jvmtiEnv* env, T* t) {
+  env->Deallocate(reinterpret_cast<unsigned char*>(t));
+}
+
+template <typename T, typename... Rest> void Dealloc(jvmtiEnv* env, T* t, Rest... rs) {
+  Dealloc(env, t);
+  Dealloc(env, rs...);
+}
+
+void* GetExtensionFunctionVoid(JNIEnv* env, jvmtiEnv* jvmti, const std::string_view& name);
+
+template<typename T> T GetExtensionFunction(JNIEnv* env, jvmtiEnv* jvmti, const std::string_view& name) {
+  return reinterpret_cast<T>(GetExtensionFunctionVoid(env, jvmti, name));
+}
+
+jint GetExtensionEventId(jvmtiEnv* jvmti, const std::string_view& name);
+
 }  // namespace art
 
 #endif  // ART_TEST_TI_AGENT_JVMTI_HELPER_H_
diff --git a/test/ti-agent/redefinition_helper.cc b/test/ti-agent/redefinition_helper.cc
index 0e4b1bd..0baa9fe 100644
--- a/test/ti-agent/redefinition_helper.cc
+++ b/test/ti-agent/redefinition_helper.cc
@@ -31,8 +31,13 @@
 
 namespace art {
 
+enum class RedefineType {
+  kNormal,
+  kStructural,
+};
+
 static void SetupCommonRedefine();
-static void SetupCommonRetransform();
+static void SetupCommonRetransform(RedefineType type);
 static void SetupCommonTransform();
 template <bool is_redefine>
 static void throwCommonRedefinitionError(jvmtiEnv* jvmti,
@@ -68,6 +73,7 @@
 #define CONFIGURATION_COMMON_REDEFINE 0
 #define CONFIGURATION_COMMON_RETRANSFORM 1
 #define CONFIGURATION_COMMON_TRANSFORM 2
+#define CONFIGURATION_STRUCTURAL_TRANSFORM 3
 
 extern "C" JNIEXPORT void JNICALL Java_art_Redefinition_nativeSetTestConfiguration(JNIEnv*,
                                                                                    jclass,
@@ -78,21 +84,54 @@
       return;
     }
     case CONFIGURATION_COMMON_RETRANSFORM: {
-      SetupCommonRetransform();
+      SetupCommonRetransform(RedefineType::kNormal);
       return;
     }
     case CONFIGURATION_COMMON_TRANSFORM: {
       SetupCommonTransform();
       return;
     }
+    case CONFIGURATION_STRUCTURAL_TRANSFORM: {
+      SetupCommonRetransform(RedefineType::kStructural);
+      return;
+    }
     default: {
       LOG(FATAL) << "Unknown test configuration: " << type;
     }
   }
 }
 
+template<RedefineType kType>
+static bool SupportsAndIsJVM() {
+  if constexpr (kType == RedefineType::kStructural) {
+    return false;
+  } else {
+    return IsJVM();
+  }
+}
+
+
 namespace common_redefine {
 
+template <RedefineType kType>
+static jvmtiError CallRedefineEntrypoint(JNIEnv* env,
+                                         jvmtiEnv* jvmti,
+                                         jint num_defs,
+                                         const jvmtiClassDefinition* defs) {
+  decltype(jvmti->functions->RedefineClasses) entrypoint = nullptr;
+  if constexpr (kType == RedefineType::kNormal) {
+    entrypoint = jvmti->functions->RedefineClasses;
+  } else {
+    entrypoint = GetExtensionFunction<decltype(entrypoint)>(
+        env, jvmti_env, "com.android.art.class.structurally_redefine_classes");
+  }
+  if (entrypoint == nullptr) {
+    LOG(INFO) << "Could not find entrypoint!";
+    return JVMTI_ERROR_NOT_AVAILABLE;
+  }
+  return entrypoint(jvmti, num_defs, defs);
+}
+
 static void throwRedefinitionError(jvmtiEnv* jvmti,
                                    JNIEnv* env,
                                    jint num_targets,
@@ -101,6 +140,7 @@
   return throwCommonRedefinitionError<true>(jvmti, env, num_targets, target, res);
 }
 
+template<RedefineType kType>
 static void DoMultiClassRedefine(jvmtiEnv* jvmti_env,
                                  JNIEnv* env,
                                  jint num_redefines,
@@ -109,31 +149,81 @@
                                  jbyteArray* dex_file_bytes) {
   std::vector<jvmtiClassDefinition> defs;
   for (jint i = 0; i < num_redefines; i++) {
-    jbyteArray desired_array = IsJVM() ? class_file_bytes[i] : dex_file_bytes[i];
+    jbyteArray desired_array = SupportsAndIsJVM<kType>() ? class_file_bytes[i] : dex_file_bytes[i];
     jint len = static_cast<jint>(env->GetArrayLength(desired_array));
     const unsigned char* redef_bytes = reinterpret_cast<const unsigned char*>(
         env->GetByteArrayElements(desired_array, nullptr));
     defs.push_back({targets[i], static_cast<jint>(len), redef_bytes});
   }
-  jvmtiError res = jvmti_env->RedefineClasses(num_redefines, defs.data());
+  jvmtiError res = CallRedefineEntrypoint<kType>(env, jvmti_env, num_redefines, defs.data());
   if (res != JVMTI_ERROR_NONE) {
     throwRedefinitionError(jvmti_env, env, num_redefines, targets, res);
   }
 }
 
+template<RedefineType kType>
 static void DoClassRedefine(jvmtiEnv* jvmti_env,
                             JNIEnv* env,
                             jclass target,
                             jbyteArray class_file_bytes,
                             jbyteArray dex_file_bytes) {
-  return DoMultiClassRedefine(jvmti_env, env, 1, &target, &class_file_bytes, &dex_file_bytes);
+  return DoMultiClassRedefine<kType>(jvmti_env, env, 1, &target, &class_file_bytes, &dex_file_bytes);
+}
+
+extern "C" JNIEXPORT jboolean JNICALL
+Java_art_Redefinition_isStructurallyModifiable(JNIEnv* env, jclass, jclass target) {
+  using ArtCanStructurallyRedefineClass =
+      jvmtiError (*)(jvmtiEnv * env, jclass k, jboolean * result);
+  ArtCanStructurallyRedefineClass can_redef = GetExtensionFunction<ArtCanStructurallyRedefineClass>(
+      env, jvmti_env, "com.android.art.class.is_structurally_modifiable_class");
+  if (can_redef == nullptr || env->ExceptionCheck()) {
+    return false;
+  }
+  jboolean result = false;
+  JvmtiErrorToException(env, jvmti_env, can_redef(jvmti_env, target, &result));
+  return result;
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_Redefinition_doCommonStructuralClassRedefinition(
+    JNIEnv* env, jclass, jclass target, jbyteArray dex_file_bytes) {
+  DoClassRedefine<RedefineType::kStructural>(jvmti_env, env, target, nullptr, dex_file_bytes);
 }
 
 // Magic JNI export that classes can use for redefining classes.
 // To use classes should declare this as a native function with signature (Ljava/lang/Class;[B[B)V
 extern "C" JNIEXPORT void JNICALL Java_art_Redefinition_doCommonClassRedefinition(
     JNIEnv* env, jclass, jclass target, jbyteArray class_file_bytes, jbyteArray dex_file_bytes) {
-  DoClassRedefine(jvmti_env, env, target, class_file_bytes, dex_file_bytes);
+  DoClassRedefine<RedefineType::kNormal>(jvmti_env, env, target, class_file_bytes, dex_file_bytes);
+}
+
+// Magic JNI export that classes can use for redefining classes.
+// To use classes should declare this as a native function with signature
+// ([Ljava/lang/Class;[[B[[B)V
+extern "C" JNIEXPORT void JNICALL Java_art_Redefinition_doCommonMultiStructuralClassRedefinition(
+    JNIEnv* env,
+    jclass,
+    jobjectArray targets,
+    jobjectArray dex_file_bytes) {
+  std::vector<jclass> classes;
+  std::vector<jbyteArray> class_files;
+  std::vector<jbyteArray> dex_files;
+  jint len = env->GetArrayLength(targets);
+  if (len != env->GetArrayLength(dex_file_bytes)) {
+    env->ThrowNew(env->FindClass("java/lang/IllegalArgumentException"),
+                  "the three array arguments passed to this function have different lengths!");
+    return;
+  }
+  for (jint i = 0; i < len; i++) {
+    classes.push_back(static_cast<jclass>(env->GetObjectArrayElement(targets, i)));
+    dex_files.push_back(static_cast<jbyteArray>(env->GetObjectArrayElement(dex_file_bytes, i)));
+    class_files.push_back(nullptr);
+  }
+  return DoMultiClassRedefine<RedefineType::kStructural>(jvmti_env,
+                                                         env,
+                                                         len,
+                                                         classes.data(),
+                                                         class_files.data(),
+                                                         dex_files.data());
 }
 
 // Magic JNI export that classes can use for redefining classes.
@@ -159,12 +249,12 @@
     dex_files.push_back(static_cast<jbyteArray>(env->GetObjectArrayElement(dex_file_bytes, i)));
     class_files.push_back(static_cast<jbyteArray>(env->GetObjectArrayElement(class_file_bytes, i)));
   }
-  return DoMultiClassRedefine(jvmti_env,
-                              env,
-                              len,
-                              classes.data(),
-                              class_files.data(),
-                              dex_files.data());
+  return DoMultiClassRedefine<RedefineType::kNormal>(jvmti_env,
+                                                     env,
+                                                     len,
+                                                     classes.data(),
+                                                     class_files.data(),
+                                                     dex_files.data());
 }
 
 // Get all capabilities except those related to retransformation.
@@ -350,7 +440,7 @@
     printf("Unable to get jvmti env!\n");
     return 1;
   }
-  SetupCommonRetransform();
+  SetupCommonRetransform(RedefineType::kNormal);
   return 0;
 }
 
@@ -379,11 +469,20 @@
   jvmti_env->AddCapabilities(&caps);
 }
 
-static void SetupCommonRetransform() {
+static void SetupCommonRetransform(RedefineType type) {
   SetStandardCapabilities(jvmti_env);
-  current_callbacks.ClassFileLoadHook = common_retransform::CommonClassFileLoadHookRetransformable;
-  jvmtiError res = jvmti_env->SetEventCallbacks(&current_callbacks, sizeof(current_callbacks));
-  CHECK_EQ(res, JVMTI_ERROR_NONE);
+  if (type == RedefineType::kNormal) {
+    current_callbacks.ClassFileLoadHook =
+        common_retransform::CommonClassFileLoadHookRetransformable;
+    jvmtiError res = jvmti_env->SetEventCallbacks(&current_callbacks, sizeof(current_callbacks));
+    CHECK_EQ(res, JVMTI_ERROR_NONE);
+  } else {
+    jvmtiError res = jvmti_env->SetExtensionEventCallback(
+        GetExtensionEventId(jvmti_env, "com.android.art.class.structural_dex_file_load_hook"),
+        reinterpret_cast<jvmtiExtensionEvent>(
+            common_retransform::CommonClassFileLoadHookRetransformable));
+    CHECK_EQ(res, JVMTI_ERROR_NONE);
+  }
   common_retransform::gTransformations.clear();
 }
 
diff --git a/test/ti-agent/suspend_event_helper.cc b/test/ti-agent/suspend_event_helper.cc
new file mode 100644
index 0000000..cbc54d4
--- /dev/null
+++ b/test/ti-agent/suspend_event_helper.cc
@@ -0,0 +1,803 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "suspend_event_helper.h"
+
+#include <inttypes.h>
+
+#include <cstdio>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "android-base/logging.h"
+#include "android-base/stringprintf.h"
+
+#include "jni.h"
+#include "jvmti.h"
+#include "scoped_local_ref.h"
+#include "scoped_utf_chars.h"
+
+// Test infrastructure
+#include "jni_binder.h"
+#include "jni_helper.h"
+#include "jvmti_helper.h"
+#include "test_env.h"
+#include "ti_macros.h"
+
+namespace art {
+namespace common_suspend_event {
+
+struct TestData {
+  jlocation target_loc;
+  jmethodID target_method;
+  jclass target_klass;
+  jfieldID target_field;
+  jrawMonitorID notify_monitor;
+  jint frame_pop_offset;
+  jmethodID frame_pop_setup_method;
+  std::vector<std::string> interesting_classes;
+  bool hit_location;
+
+  TestData(jvmtiEnv* jvmti,
+           JNIEnv* env,
+           jlocation loc,
+           jobject meth,
+           jclass klass,
+           jobject field,
+           jobject setup_meth,
+           jint pop_offset,
+           const std::vector<std::string>&& interesting)
+      : target_loc(loc), target_method(meth != nullptr ? env->FromReflectedMethod(meth) : nullptr),
+        target_klass(reinterpret_cast<jclass>(env->NewGlobalRef(klass))),
+        target_field(field != nullptr ? env->FromReflectedField(field) : nullptr),
+        frame_pop_offset(pop_offset),
+        frame_pop_setup_method(setup_meth != nullptr ? env->FromReflectedMethod(setup_meth)
+                                                     : nullptr),
+        interesting_classes(interesting), hit_location(false) {
+    JvmtiErrorToException(
+        env, jvmti, jvmti->CreateRawMonitor("SuspendStopMonitor", &notify_monitor));
+  }
+
+  void PerformSuspend(jvmtiEnv* jvmti, JNIEnv* env) {
+    // Wake up the waiting thread.
+    JvmtiErrorToException(env, jvmti, jvmti->RawMonitorEnter(notify_monitor));
+    hit_location = true;
+    JvmtiErrorToException(env, jvmti, jvmti->RawMonitorNotifyAll(notify_monitor));
+    JvmtiErrorToException(env, jvmti, jvmti->RawMonitorExit(notify_monitor));
+    // Suspend ourself
+    jvmti->SuspendThread(nullptr);
+  }
+};
+
+void PerformSuspension(jvmtiEnv* jvmti, JNIEnv* env) {
+  TestData* data;
+  if (JvmtiErrorToException(
+          env,
+          jvmti_env,
+          jvmti->GetThreadLocalStorage(/* thread */ nullptr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL
+cbSingleStep(jvmtiEnv* jvmti, JNIEnv* env, jthread thr, jmethodID meth, jlocation loc) {
+  TestData* data;
+  if (JvmtiErrorToException(
+          env, jvmti, jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (meth != data->target_method || loc != data->target_loc) {
+    return;
+  }
+  data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL cbExceptionCatch(jvmtiEnv* jvmti,
+                              JNIEnv* env,
+                              jthread thr,
+                              jmethodID method,
+                              jlocation location ATTRIBUTE_UNUSED,
+                              jobject exception ATTRIBUTE_UNUSED) {
+  TestData* data;
+  if (JvmtiErrorToException(
+          env, jvmti, jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (method != data->target_method) {
+    return;
+  }
+  data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL cbException(jvmtiEnv* jvmti,
+                         JNIEnv* env,
+                         jthread thr,
+                         jmethodID method,
+                         jlocation location ATTRIBUTE_UNUSED,
+                         jobject exception ATTRIBUTE_UNUSED,
+                         jmethodID catch_method ATTRIBUTE_UNUSED,
+                         jlocation catch_location ATTRIBUTE_UNUSED) {
+  TestData* data;
+  if (JvmtiErrorToException(
+          env, jvmti, jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (method != data->target_method) {
+    return;
+  }
+  data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL cbMethodEntry(jvmtiEnv* jvmti, JNIEnv* env, jthread thr, jmethodID method) {
+  TestData* data;
+  if (JvmtiErrorToException(
+          env, jvmti, jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (method != data->target_method) {
+    return;
+  }
+  data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL cbMethodExit(jvmtiEnv* jvmti,
+                          JNIEnv* env,
+                          jthread thr,
+                          jmethodID method,
+                          jboolean was_popped_by_exception ATTRIBUTE_UNUSED,
+                          jvalue return_value ATTRIBUTE_UNUSED) {
+  TestData* data;
+  if (JvmtiErrorToException(
+          env, jvmti, jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (method != data->target_method) {
+    return;
+  }
+  data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL cbFieldModification(jvmtiEnv* jvmti,
+                                 JNIEnv* env,
+                                 jthread thr,
+                                 jmethodID method ATTRIBUTE_UNUSED,
+                                 jlocation location ATTRIBUTE_UNUSED,
+                                 jclass field_klass ATTRIBUTE_UNUSED,
+                                 jobject object ATTRIBUTE_UNUSED,
+                                 jfieldID field,
+                                 char signature_type ATTRIBUTE_UNUSED,
+                                 jvalue new_value ATTRIBUTE_UNUSED) {
+  TestData* data;
+  if (JvmtiErrorToException(
+          env, jvmti, jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (field != data->target_field) {
+    // TODO What to do here.
+    LOG(FATAL) << "Strange, shouldn't get here!";
+  }
+  data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL cbFieldAccess(jvmtiEnv* jvmti,
+                           JNIEnv* env,
+                           jthread thr,
+                           jmethodID method ATTRIBUTE_UNUSED,
+                           jlocation location ATTRIBUTE_UNUSED,
+                           jclass field_klass,
+                           jobject object ATTRIBUTE_UNUSED,
+                           jfieldID field) {
+  TestData* data;
+  if (JvmtiErrorToException(
+          env, jvmti, jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (field != data->target_field || !env->IsSameObject(field_klass, data->target_klass)) {
+    // TODO What to do here.
+    LOG(FATAL) << "Strange, shouldn't get here!";
+  }
+  data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL
+cbBreakpointHit(jvmtiEnv* jvmti, JNIEnv* env, jthread thr, jmethodID method, jlocation loc) {
+  TestData* data;
+  if (JvmtiErrorToException(
+          env, jvmti, jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (data->frame_pop_setup_method == method) {
+    CHECK(loc == 0) << "We should have stopped at location 0";
+    if (JvmtiErrorToException(env, jvmti, jvmti->NotifyFramePop(thr, data->frame_pop_offset))) {
+      return;
+    }
+    return;
+  }
+  if (method != data->target_method || loc != data->target_loc) {
+    // TODO What to do here.
+    LOG(FATAL) << "Strange, shouldn't get here!";
+  }
+  data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL cbFramePop(jvmtiEnv* jvmti,
+                        JNIEnv* env,
+                        jthread thr,
+                        jmethodID method ATTRIBUTE_UNUSED,
+                        jboolean was_popped_by_exception ATTRIBUTE_UNUSED) {
+  TestData* data;
+  if (JvmtiErrorToException(
+          env, jvmti, jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  data->PerformSuspend(jvmti, env);
+}
+
+void JNICALL cbClassLoadOrPrepare(jvmtiEnv* jvmti, JNIEnv* env, jthread thr, jclass klass) {
+  TestData* data;
+  if (JvmtiErrorToException(
+          env, jvmti, jvmti->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  char* name;
+  if (JvmtiErrorToException(env, jvmti, jvmti->GetClassSignature(klass, &name, nullptr))) {
+    return;
+  }
+  std::string name_str(name);
+  if (JvmtiErrorToException(
+          env, jvmti, jvmti->Deallocate(reinterpret_cast<unsigned char*>(name)))) {
+    return;
+  }
+  if (std::find(data->interesting_classes.cbegin(), data->interesting_classes.cend(), name_str) !=
+      data->interesting_classes.cend()) {
+    data->PerformSuspend(jvmti, env);
+  }
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_SuspendEvents_setupTest(JNIEnv* env,
+                                                                   jclass klass ATTRIBUTE_UNUSED) {
+  jvmtiCapabilities caps;
+  memset(&caps, 0, sizeof(caps));
+  // Most of these will already be there but might as well be complete.
+  caps.can_pop_frame = 1;
+  caps.can_force_early_return = 1;
+  caps.can_generate_single_step_events = 1;
+  caps.can_generate_breakpoint_events = 1;
+  caps.can_suspend = 1;
+  caps.can_generate_method_entry_events = 1;
+  caps.can_generate_method_exit_events = 1;
+  caps.can_generate_monitor_events = 1;
+  caps.can_generate_exception_events = 1;
+  caps.can_generate_frame_pop_events = 1;
+  caps.can_generate_field_access_events = 1;
+  caps.can_generate_field_modification_events = 1;
+  caps.can_redefine_classes = 1;
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->AddCapabilities(&caps))) {
+    return;
+  }
+  jvmtiEventCallbacks cb;
+  memset(&cb, 0, sizeof(cb));
+  // TODO Add the rest of these.
+  cb.Breakpoint = cbBreakpointHit;
+  cb.SingleStep = cbSingleStep;
+  cb.FieldAccess = cbFieldAccess;
+  cb.FieldModification = cbFieldModification;
+  cb.MethodEntry = cbMethodEntry;
+  cb.MethodExit = cbMethodExit;
+  cb.Exception = cbException;
+  cb.ExceptionCatch = cbExceptionCatch;
+  cb.FramePop = cbFramePop;
+  cb.ClassLoad = cbClassLoadOrPrepare;
+  cb.ClassPrepare = cbClassLoadOrPrepare;
+  JvmtiErrorToException(env, jvmti_env, jvmti_env->SetEventCallbacks(&cb, sizeof(cb)));
+}
+
+static bool DeleteTestData(JNIEnv* env, jthread thr, TestData* data) {
+  env->DeleteGlobalRef(data->target_klass);
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->SetThreadLocalStorage(thr, nullptr))) {
+    return false;
+  }
+  return JvmtiErrorToException(
+      env, jvmti_env, jvmti_env->Deallocate(reinterpret_cast<uint8_t*>(data)));
+}
+
+static TestData* SetupTestData(JNIEnv* env,
+                               jobject meth,
+                               jlocation loc,
+                               jclass target_klass,
+                               jobject field,
+                               jobject setup_meth,
+                               jint pop_offset,
+                               const std::vector<std::string>&& interesting_names) {
+  void* data_ptr;
+  TestData* data;
+  if (JvmtiErrorToException(
+          env,
+          jvmti_env,
+          jvmti_env->Allocate(sizeof(TestData), reinterpret_cast<uint8_t**>(&data_ptr)))) {
+    return nullptr;
+  }
+  data = new (data_ptr) TestData(jvmti_env,
+                                 env,
+                                 loc,
+                                 meth,
+                                 target_klass,
+                                 field,
+                                 setup_meth,
+                                 pop_offset,
+                                 std::move(interesting_names));
+  if (env->ExceptionCheck()) {
+    env->DeleteGlobalRef(data->target_klass);
+    jvmti_env->Deallocate(reinterpret_cast<uint8_t*>(data));
+    return nullptr;
+  }
+  return data;
+}
+
+static TestData* SetupTestData(JNIEnv* env,
+                               jobject meth,
+                               jlocation loc,
+                               jclass target_klass,
+                               jobject field,
+                               jobject setup_meth,
+                               jint pop_offset) {
+  std::vector<std::string> empty;
+  return SetupTestData(
+      env, meth, loc, target_klass, field, setup_meth, pop_offset, std::move(empty));
+}
+
+extern "C" JNIEXPORT void JNICALL
+Java_art_SuspendEvents_setupSuspendClassEvent(JNIEnv* env,
+                                              jclass klass ATTRIBUTE_UNUSED,
+                                              jint event_num,
+                                              jobjectArray interesting_names,
+                                              jthread thr) {
+  CHECK(event_num == JVMTI_EVENT_CLASS_LOAD || event_num == JVMTI_EVENT_CLASS_PREPARE);
+  std::vector<std::string> names;
+  jint cnt = env->GetArrayLength(interesting_names);
+  for (jint i = 0; i < cnt; i++) {
+    env->PushLocalFrame(1);
+    jstring name_obj = reinterpret_cast<jstring>(env->GetObjectArrayElement(interesting_names, i));
+    const char* name_chr = env->GetStringUTFChars(name_obj, nullptr);
+    names.push_back(std::string(name_chr));
+    env->ReleaseStringUTFChars(name_obj, name_chr);
+    env->PopLocalFrame(nullptr);
+  }
+  TestData* data;
+  if (JvmtiErrorToException(
+          env, jvmti_env, jvmti_env->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data == nullptr) << "Data was not cleared!";
+  data = SetupTestData(env, nullptr, 0, nullptr, nullptr, nullptr, 0, std::move(names));
+  if (data == nullptr) {
+    return;
+  }
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->SetThreadLocalStorage(thr, data))) {
+    return;
+  }
+  JvmtiErrorToException(
+      env,
+      jvmti_env,
+      jvmti_env->SetEventNotificationMode(JVMTI_ENABLE, static_cast<jvmtiEvent>(event_num), thr));
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_SuspendEvents_clearSuspendClassEvent(
+    JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jthread thr) {
+  TestData* data;
+  if (JvmtiErrorToException(
+          env, jvmti_env, jvmti_env->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (JvmtiErrorToException(
+          env,
+          jvmti_env,
+          jvmti_env->SetEventNotificationMode(JVMTI_DISABLE, JVMTI_EVENT_CLASS_LOAD, thr))) {
+    return;
+  }
+  if (JvmtiErrorToException(
+          env,
+          jvmti_env,
+          jvmti_env->SetEventNotificationMode(JVMTI_DISABLE, JVMTI_EVENT_CLASS_PREPARE, thr))) {
+    return;
+  }
+  DeleteTestData(env, thr, data);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_SuspendEvents_setupSuspendSingleStepAt(
+    JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject meth, jlocation loc, jthread thr) {
+  TestData* data;
+  if (JvmtiErrorToException(
+          env, jvmti_env, jvmti_env->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data == nullptr) << "Data was not cleared!";
+  data = SetupTestData(env, meth, loc, nullptr, nullptr, nullptr, 0);
+  if (data == nullptr) {
+    return;
+  }
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->SetThreadLocalStorage(thr, data))) {
+    return;
+  }
+  JvmtiErrorToException(
+      env,
+      jvmti_env,
+      jvmti_env->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_SINGLE_STEP, thr));
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_SuspendEvents_clearSuspendSingleStepFor(
+    JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jthread thr) {
+  TestData* data;
+  if (JvmtiErrorToException(
+          env, jvmti_env, jvmti_env->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (JvmtiErrorToException(
+          env,
+          jvmti_env,
+          jvmti_env->SetEventNotificationMode(JVMTI_DISABLE, JVMTI_EVENT_SINGLE_STEP, thr))) {
+    return;
+  }
+  DeleteTestData(env, thr, data);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_SuspendEvents_setupSuspendPopFrameEvent(
+    JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jint offset, jobject breakpoint_func, jthread thr) {
+  TestData* data;
+  if (JvmtiErrorToException(
+          env, jvmti_env, jvmti_env->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data == nullptr) << "Data was not cleared!";
+  data = SetupTestData(env, nullptr, 0, nullptr, nullptr, breakpoint_func, offset);
+  CHECK(data != nullptr);
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->SetThreadLocalStorage(thr, data))) {
+    return;
+  }
+  if (JvmtiErrorToException(
+          env,
+          jvmti_env,
+          jvmti_env->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_FRAME_POP, thr))) {
+    return;
+  }
+  if (JvmtiErrorToException(
+          env,
+          jvmti_env,
+          jvmti_env->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_BREAKPOINT, thr))) {
+    return;
+  }
+  if (JvmtiErrorToException(
+          env, jvmti_env, jvmti_env->SetBreakpoint(data->frame_pop_setup_method, 0))) {
+    return;
+  }
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_SuspendEvents_clearSuspendPopFrameEvent(
+    JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jthread thr) {
+  TestData* data;
+  if (JvmtiErrorToException(
+          env, jvmti_env, jvmti_env->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (JvmtiErrorToException(
+          env,
+          jvmti_env,
+          jvmti_env->SetEventNotificationMode(JVMTI_DISABLE, JVMTI_EVENT_FRAME_POP, thr))) {
+    return;
+  }
+  if (JvmtiErrorToException(
+          env,
+          jvmti_env,
+          jvmti_env->SetEventNotificationMode(JVMTI_DISABLE, JVMTI_EVENT_BREAKPOINT, thr))) {
+    return;
+  }
+  if (JvmtiErrorToException(
+          env, jvmti_env, jvmti_env->ClearBreakpoint(data->frame_pop_setup_method, 0))) {
+    return;
+  }
+  DeleteTestData(env, thr, data);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_SuspendEvents_setupSuspendBreakpointFor(
+    JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject meth, jlocation loc, jthread thr) {
+  TestData* data;
+  if (JvmtiErrorToException(
+          env, jvmti_env, jvmti_env->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data == nullptr) << "Data was not cleared!";
+  data = SetupTestData(env, meth, loc, nullptr, nullptr, nullptr, 0);
+  if (data == nullptr) {
+    return;
+  }
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->SetThreadLocalStorage(thr, data))) {
+    return;
+  }
+  if (JvmtiErrorToException(
+          env,
+          jvmti_env,
+          jvmti_env->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_BREAKPOINT, thr))) {
+    return;
+  }
+  JvmtiErrorToException(
+      env, jvmti_env, jvmti_env->SetBreakpoint(data->target_method, data->target_loc));
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_SuspendEvents_clearSuspendBreakpointFor(
+    JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jthread thr) {
+  TestData* data;
+  if (JvmtiErrorToException(
+          env, jvmti_env, jvmti_env->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (JvmtiErrorToException(
+          env,
+          jvmti_env,
+          jvmti_env->SetEventNotificationMode(JVMTI_DISABLE, JVMTI_EVENT_BREAKPOINT, thr))) {
+    return;
+  }
+  if (JvmtiErrorToException(
+          env, jvmti_env, jvmti_env->ClearBreakpoint(data->target_method, data->target_loc))) {
+    return;
+  }
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->SetThreadLocalStorage(thr, nullptr))) {
+    return;
+  }
+  DeleteTestData(env, thr, data);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_SuspendEvents_setupSuspendExceptionEvent(
+    JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject method, jboolean is_catch, jthread thr) {
+  TestData* data;
+  if (JvmtiErrorToException(
+          env, jvmti_env, jvmti_env->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data == nullptr) << "Data was not cleared!";
+  data = SetupTestData(env, method, 0, nullptr, nullptr, nullptr, 0);
+  if (data == nullptr) {
+    return;
+  }
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->SetThreadLocalStorage(thr, data))) {
+    return;
+  }
+  JvmtiErrorToException(
+      env,
+      jvmti_env,
+      jvmti_env->SetEventNotificationMode(
+          JVMTI_ENABLE, is_catch ? JVMTI_EVENT_EXCEPTION_CATCH : JVMTI_EVENT_EXCEPTION, thr));
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_SuspendEvents_clearSuspendExceptionEvent(
+    JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jthread thr) {
+  TestData* data;
+  if (JvmtiErrorToException(
+          env, jvmti_env, jvmti_env->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (JvmtiErrorToException(
+          env,
+          jvmti_env,
+          jvmti_env->SetEventNotificationMode(JVMTI_DISABLE, JVMTI_EVENT_EXCEPTION_CATCH, thr))) {
+    return;
+  }
+  if (JvmtiErrorToException(
+          env,
+          jvmti_env,
+          jvmti_env->SetEventNotificationMode(JVMTI_DISABLE, JVMTI_EVENT_EXCEPTION, thr))) {
+    return;
+  }
+  DeleteTestData(env, thr, data);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_SuspendEvents_setupSuspendMethodEvent(
+    JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jobject method, jboolean enter, jthread thr) {
+  TestData* data;
+  if (JvmtiErrorToException(
+          env, jvmti_env, jvmti_env->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data == nullptr) << "Data was not cleared!";
+  data = SetupTestData(env, method, 0, nullptr, nullptr, nullptr, 0);
+  if (data == nullptr) {
+    return;
+  }
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->SetThreadLocalStorage(thr, data))) {
+    return;
+  }
+  JvmtiErrorToException(
+      env,
+      jvmti_env,
+      jvmti_env->SetEventNotificationMode(
+          JVMTI_ENABLE, enter ? JVMTI_EVENT_METHOD_ENTRY : JVMTI_EVENT_METHOD_EXIT, thr));
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_SuspendEvents_clearSuspendMethodEvent(
+    JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jthread thr) {
+  TestData* data;
+  if (JvmtiErrorToException(
+          env, jvmti_env, jvmti_env->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (JvmtiErrorToException(
+          env,
+          jvmti_env,
+          jvmti_env->SetEventNotificationMode(JVMTI_DISABLE, JVMTI_EVENT_METHOD_EXIT, thr))) {
+    return;
+  }
+  if (JvmtiErrorToException(
+          env,
+          jvmti_env,
+          jvmti_env->SetEventNotificationMode(JVMTI_DISABLE, JVMTI_EVENT_METHOD_ENTRY, thr))) {
+    return;
+  }
+  DeleteTestData(env, thr, data);
+}
+
+extern "C" JNIEXPORT void JNICALL
+Java_art_SuspendEvents_setupFieldSuspendFor(JNIEnv* env,
+                                            jclass klass ATTRIBUTE_UNUSED,
+                                            jclass target_klass,
+                                            jobject field,
+                                            jboolean access,
+                                            jthread thr) {
+  TestData* data;
+  if (JvmtiErrorToException(
+          env, jvmti_env, jvmti_env->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data == nullptr) << "Data was not cleared!";
+  data = SetupTestData(env, nullptr, 0, target_klass, field, nullptr, 0);
+  if (data == nullptr) {
+    return;
+  }
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->SetThreadLocalStorage(thr, data))) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetEventNotificationMode(
+                                JVMTI_ENABLE,
+                                access ? JVMTI_EVENT_FIELD_ACCESS : JVMTI_EVENT_FIELD_MODIFICATION,
+                                thr))) {
+    return;
+  }
+  if (access) {
+    JvmtiErrorToException(
+        env, jvmti_env, jvmti_env->SetFieldAccessWatch(data->target_klass, data->target_field));
+  } else {
+    JvmtiErrorToException(
+        env,
+        jvmti_env,
+        jvmti_env->SetFieldModificationWatch(data->target_klass, data->target_field));
+  }
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_SuspendEvents_clearFieldSuspendFor(
+    JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jthread thr) {
+  TestData* data;
+  if (JvmtiErrorToException(
+          env, jvmti_env, jvmti_env->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (JvmtiErrorToException(
+          env,
+          jvmti_env,
+          jvmti_env->SetEventNotificationMode(JVMTI_DISABLE, JVMTI_EVENT_FIELD_ACCESS, thr))) {
+    return;
+  }
+  if (JvmtiErrorToException(env,
+                            jvmti_env,
+                            jvmti_env->SetEventNotificationMode(
+                                JVMTI_DISABLE, JVMTI_EVENT_FIELD_MODIFICATION, thr))) {
+    return;
+  }
+  if (JvmtiErrorToException(
+          env,
+          jvmti_env,
+          jvmti_env->ClearFieldModificationWatch(data->target_klass, data->target_field)) &&
+      JvmtiErrorToException(
+          env,
+          jvmti_env,
+          jvmti_env->ClearFieldAccessWatch(data->target_klass, data->target_field))) {
+    return;
+  } else {
+    env->ExceptionClear();
+  }
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->SetThreadLocalStorage(thr, nullptr))) {
+    return;
+  }
+  DeleteTestData(env, thr, data);
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_SuspendEvents_setupWaitForNativeCall(
+    JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jthread thr) {
+  TestData* data;
+  if (JvmtiErrorToException(
+          env, jvmti_env, jvmti_env->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data == nullptr) << "Data was not cleared!";
+  data = SetupTestData(env, nullptr, 0, nullptr, nullptr, nullptr, 0);
+  if (data == nullptr) {
+    return;
+  }
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->SetThreadLocalStorage(thr, data))) {
+    return;
+  }
+}
+
+extern "C" JNIEXPORT void JNICALL Java_art_SuspendEvents_clearWaitForNativeCall(
+    JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jthread thr) {
+  TestData* data;
+  if (JvmtiErrorToException(
+          env, jvmti_env, jvmti_env->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->SetThreadLocalStorage(thr, nullptr))) {
+    return;
+  }
+  DeleteTestData(env, thr, data);
+}
+
+extern "C" JNIEXPORT void JNICALL
+Java_art_SuspendEvents_waitForSuspendHit(JNIEnv* env, jclass klass ATTRIBUTE_UNUSED, jthread thr) {
+  TestData* data;
+  if (JvmtiErrorToException(
+          env, jvmti_env, jvmti_env->GetThreadLocalStorage(thr, reinterpret_cast<void**>(&data)))) {
+    return;
+  }
+  CHECK(data != nullptr);
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->RawMonitorEnter(data->notify_monitor))) {
+    return;
+  }
+  while (!data->hit_location) {
+    if (JvmtiErrorToException(
+            env, jvmti_env, jvmti_env->RawMonitorWait(data->notify_monitor, -1))) {
+      return;
+    }
+  }
+  if (JvmtiErrorToException(env, jvmti_env, jvmti_env->RawMonitorExit(data->notify_monitor))) {
+    return;
+  }
+  jint state = 0;
+  while (!JvmtiErrorToException(env, jvmti_env, jvmti_env->GetThreadState(thr, &state)) &&
+         (state & JVMTI_THREAD_STATE_SUSPENDED) == 0) {
+  }
+}
+}  // namespace common_suspend_event
+}  // namespace art
diff --git a/test/ti-agent/suspend_event_helper.h b/test/ti-agent/suspend_event_helper.h
new file mode 100644
index 0000000..74740bc
--- /dev/null
+++ b/test/ti-agent/suspend_event_helper.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TEST_TI_AGENT_SUSPEND_EVENT_HELPER_H_
+#define ART_TEST_TI_AGENT_SUSPEND_EVENT_HELPER_H_
+
+#include "jni.h"
+#include "jvmti.h"
+
+namespace art {
+namespace common_suspend_event {
+
+void PerformSuspension(jvmtiEnv* jvmti, JNIEnv* env);
+
+}  // namespace common_suspend_event
+}  // namespace art
+#endif  // ART_TEST_TI_AGENT_SUSPEND_EVENT_HELPER_H_
diff --git a/test/utils/get-device-isa b/test/utils/get-device-isa
new file mode 100755
index 0000000..c9b342d
--- /dev/null
+++ b/test/utils/get-device-isa
@@ -0,0 +1,72 @@
+#! /bin/bash
+#
+# Copyright 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+usage() {
+  cat >&2 <<EOF
+Determine and print the 32- or 64-bit architecture of a device.
+
+Usage:
+  $0 --32    Select the 32-bit architecture
+  $0 --64    Select the 64-bit architecture
+EOF
+  exit 1
+}
+
+if [[ $# -ne 1 ]]; then
+  usage
+fi
+
+ARCHITECTURES_32="(arm|x86|none)"
+ARCHITECTURES_64="(arm64|x86_64|none)"
+
+case "$1" in
+  (--32)
+    ARCHITECTURES_PATTERN="${ARCHITECTURES_32}"
+    ;;
+  (--64)
+    ARCHITECTURES_PATTERN="${ARCHITECTURES_64}"
+    ;;
+  (*) usage;;
+esac
+
+# Need to be root to query /data/dalvik-cache
+adb root > /dev/null
+adb wait-for-device
+ISA=
+ISA_adb_invocation=
+ISA_outcome=
+# We iterate a few times to workaround an adb issue. b/32655576
+for i in {1..10}; do
+  ISA_adb_invocation=$(adb shell ls /data/dalvik-cache)
+  ISA_outcome=$?
+  ISA=$(echo $ISA_adb_invocation | grep -Ewo "${ARCHITECTURES_PATTERN}")
+  if [[ -n "$ISA" ]]; then
+    break;
+  fi
+done
+if [[ -z "$ISA" ]]; then
+  echo >&2 "Unable to determine architecture"
+  # Print a few things for helping diagnosing the problem.
+  echo >&2 "adb invocation output: $ISA_adb_invocation"
+  echo >&2 "adb invocation outcome: $ISA_outcome"
+  echo >&2 $(adb shell ls -F /data/dalvik-cache)
+  echo >&2 $(adb shell ls /data/dalvik-cache)
+  echo >&2 ${ARCHITECTURES_PATTERN}
+  echo >&2 $(adb shell ls -F /data/dalvik-cache | grep -Ewo "${ARCHITECTURES_PATTERN}")
+  exit 1
+fi
+
+echo "$ISA"
diff --git a/test/utils/get-device-test-native-lib-path b/test/utils/get-device-test-native-lib-path
new file mode 100755
index 0000000..21ea98c
--- /dev/null
+++ b/test/utils/get-device-test-native-lib-path
@@ -0,0 +1,47 @@
+#! /bin/bash
+#
+# Copyright 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+usage() {
+  cat >&2 <<EOF
+Determine the 32- or 64-bit architecture of a device and print the path to
+native libraries installed on the device for testing purposes.
+
+Usage:
+  $0 --32    Select the 32-bit architecture
+  $0 --64    Select the 64-bit architecture
+EOF
+  exit 1
+}
+
+if [[ $# -ne 1 ]]; then
+  usage
+fi
+
+case "$1" in
+  (--32) TEST_DIRECTORY="nativetest";;
+  (--64) TEST_DIRECTORY="nativetest64";;
+  (*) usage;;
+esac
+
+if [[ -z "$ANDROID_BUILD_TOP" ]]; then
+  echo 'ANDROID_BUILD_TOP environment variable is empty; did you forget to run `lunch`?'
+  exit 1
+fi
+
+bitness_flag=$1
+ISA=$("$ANDROID_BUILD_TOP/art/test/utils/get-device-isa" "$bitness_flag")
+
+echo "/data/${TEST_DIRECTORY}/art/${ISA}"
diff --git a/tools/Android.bp b/tools/Android.bp
index a7ed9bc..2a8ff0a 100644
--- a/tools/Android.bp
+++ b/tools/Android.bp
@@ -15,16 +15,24 @@
 //
 
 python_binary_host {
-  name: "generate_operator_out",
-  srcs: [
-    "generate_operator_out.py",
-  ],
-  version: {
-    py2: {
-      enabled: true,
+    name: "generate_operator_out",
+    srcs: [
+        "generate_operator_out.py",
+    ],
+    version: {
+        py2: {
+            enabled: true,
+        },
+        py3: {
+            enabled: false,
+        },
     },
-    py3: {
-      enabled: false,
-    },
-  },
+}
+
+// Copy the art shell script to the host and target's bin directory
+sh_binary {
+    name: "art-script",
+    host_supported: true,
+    src: "art",
+    filename_from_src: true,
 }
diff --git a/tools/Android.mk b/tools/Android.mk
deleted file mode 100644
index e90f5f5..0000000
--- a/tools/Android.mk
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-# Copyright (C) 2014 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-LOCAL_PATH := $(call my-dir)
-
-# Copy the art shell script to the host's bin directory
-include $(CLEAR_VARS)
-LOCAL_IS_HOST_MODULE := true
-LOCAL_MODULE_CLASS := EXECUTABLES
-LOCAL_MODULE := art-script
-LOCAL_SRC_FILES := art
-LOCAL_MODULE_STEM := art
-include $(BUILD_PREBUILT)
-
-# Copy the art shell script to the target's bin directory
-include $(CLEAR_VARS)
-LOCAL_MODULE_CLASS := EXECUTABLES
-LOCAL_MODULE := art-script
-LOCAL_SRC_FILES := art
-LOCAL_MODULE_STEM := art
-include $(BUILD_PREBUILT)
-
-include $(LOCAL_PATH)/class2greylist/test/Android.mk
diff --git a/tools/ahat/Android.bp b/tools/ahat/Android.bp
index c77ecbf..85aca4c 100644
--- a/tools/ahat/Android.bp
+++ b/tools/ahat/Android.bp
@@ -19,9 +19,6 @@
     ],
     custom_template: "droiddoc-templates-sdk",
     args: "-stubpackages com.android.ahat:com.android.ahat.*",
-    api_tag_name: "AHAT",
-    api_filename: "ahat_api.txt",
-    removed_api_filename: "ahat_removed_api.txt",
     check_api: {
         current: {
             api_file: "etc/ahat_api.txt",
@@ -31,10 +28,34 @@
 }
 
 // --- ahat.jar ----------------
-java_library_host {
+java_binary_host {
     name: "ahat",
+    visibility: [
+        "//libcore/metrictests/memory/host",
+    ],
+    wrapper: "ahat",
     srcs: ["src/main/**/*.java"],
     manifest: "etc/ahat.mf",
     java_resources: ["etc/style.css"],
     javacflags: ["-Xdoclint:all/protected"],
 }
+
+// --- ahat-test-dump.jar --------------
+java_test_helper_library {
+    name: "ahat-test-dump",
+    srcs: ["src/test-dump/**/*.java"],
+    sdk_version: "core_platform",
+    optimize: {
+        obfuscate: true,
+        enabled: true,
+        proguard_flags_files: ["etc/test-dump.pro"],
+    },
+}
+
+// --- ahat-ri-test-dump.jar -------
+java_test_helper_library {
+    host_supported: true,
+    device_supported: false,
+    name: "ahat-ri-test-dump",
+    srcs: ["src/ri-test-dump/**/*.java"],
+}
diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk
index 0556350..160bb28 100644
--- a/tools/ahat/Android.mk
+++ b/tools/ahat/Android.mk
@@ -18,46 +18,30 @@
 
 include art/build/Android.common_path.mk
 
-# --- ahat script ----------------
-include $(CLEAR_VARS)
-LOCAL_IS_HOST_MODULE := true
-LOCAL_MODULE_CLASS := EXECUTABLES
-LOCAL_MODULE := ahat
-LOCAL_SRC_FILES := ahat
-include $(BUILD_PREBUILT)
-
 # The ahat tests rely on running ART to generate a heap dump for test, but ART
 # doesn't run on darwin. Only build and run the tests for linux.
 # There are also issues with running under instrumentation.
 ifeq ($(HOST_OS),linux)
 ifneq ($(EMMA_INSTRUMENT),true)
-# --- ahat-test-dump.jar --------------
-include $(CLEAR_VARS)
-LOCAL_MODULE := ahat-test-dump
-LOCAL_MODULE_TAGS := tests
-LOCAL_SRC_FILES := $(call all-java-files-under, src/test-dump)
-LOCAL_PROGUARD_ENABLED := obfuscation
-LOCAL_PROGUARD_FLAG_FILES := etc/test-dump.pro
-include $(BUILD_JAVA_LIBRARY)
 
 # Determine the location of the test-dump.jar, test-dump.hprof, and proguard
-# map files. These use variables set implicitly by the include of
-# BUILD_JAVA_LIBRARY above.
-AHAT_TEST_DUMP_JAR := $(LOCAL_BUILT_MODULE)
-AHAT_TEST_DUMP_HPROF := $(intermediates.COMMON)/test-dump.hprof
-AHAT_TEST_DUMP_BASE_HPROF := $(intermediates.COMMON)/test-dump-base.hprof
-AHAT_TEST_DUMP_PROGUARD_MAP := $(intermediates.COMMON)/test-dump.map
+AHAT_TEST_DUMP_JAR := $(call intermediates-dir-for,JAVA_LIBRARIES,ahat-test-dump)/javalib.jar
+AHAT_TEST_DUMP_COMMON := $(call intermediates-dir-for,JAVA_LIBRARIES,ahat-test-dump,,COMMON)
+AHAT_TEST_DUMP_HPROF := $(AHAT_TEST_DUMP_COMMON)/test-dump.hprof
+AHAT_TEST_DUMP_BASE_HPROF := $(AHAT_TEST_DUMP_COMMON)/test-dump-base.hprof
+AHAT_TEST_DUMP_PROGUARD_MAP := $(AHAT_TEST_DUMP_COMMON)/test-dump.map
+AHAT_TEST_DUMP_PROGUARD_DICTIONARY := $(AHAT_TEST_DUMP_COMMON)/proguard_dictionary
 
 # Directories to use for ANDROID_DATA when generating the test dumps to
 # ensure we don't pollute the source tree with any artifacts from running
 # dalvikvm.
-AHAT_TEST_DUMP_ANDROID_DATA := $(intermediates.COMMON)/test-dump-android_data
-AHAT_TEST_DUMP_BASE_ANDROID_DATA := $(intermediates.COMMON)/test-dump-base-android_data
+AHAT_TEST_DUMP_ANDROID_DATA := $(AHAT_TEST_DUMP_COMMON)/test-dump-android_data
+AHAT_TEST_DUMP_BASE_ANDROID_DATA := $(AHAT_TEST_DUMP_COMMON)/test-dump-base-android_data
 
 # Generate the proguard map in the desired location by copying it from
 # wherever the build system generates it by default.
-$(AHAT_TEST_DUMP_PROGUARD_MAP): PRIVATE_AHAT_SOURCE_PROGUARD_MAP := $(proguard_dictionary)
-$(AHAT_TEST_DUMP_PROGUARD_MAP): $(proguard_dictionary)
+$(AHAT_TEST_DUMP_PROGUARD_MAP): PRIVATE_AHAT_SOURCE_PROGUARD_MAP := $(AHAT_TEST_DUMP_PROGUARD_DICTIONARY)
+$(AHAT_TEST_DUMP_PROGUARD_MAP): $(AHAT_TEST_DUMP_PROGUARD_DICTIONARY)
 	cp $(PRIVATE_AHAT_SOURCE_PROGUARD_MAP) $@
 
 ifeq (true,$(HOST_PREFER_32_BIT))
@@ -99,19 +83,11 @@
 	  $(PRIVATE_AHAT_TEST_ART) --no-compile -d $(PRIVATE_AHAT_TEST_DALVIKVM_ARG) \
 	  -cp $(PRIVATE_AHAT_TEST_DUMP_JAR) Main $@ --base
 
-# --- ahat-ri-test-dump.jar -------
-include $(CLEAR_VARS)
-LOCAL_MODULE := ahat-ri-test-dump
-LOCAL_MODULE_TAGS := tests
-LOCAL_SRC_FILES := $(call all-java-files-under, src/ri-test-dump)
-LOCAL_IS_HOST_MODULE := true
-include $(BUILD_HOST_JAVA_LIBRARY)
 
 # Determine the location of the ri-test-dump.jar and ri-test-dump.hprof.
-# These use variables set implicitly by the include of BUILD_JAVA_LIBRARY
-# above.
-AHAT_RI_TEST_DUMP_JAR := $(LOCAL_BUILT_MODULE)
-AHAT_RI_TEST_DUMP_HPROF := $(intermediates.COMMON)/ri-test-dump.hprof
+AHAT_RI_TEST_DUMP_JAR := $(call intermediates-dir-for,JAVA_LIBRARIES,ahat-ri-test-dump,HOST)/javalib.jar
+AHAT_RI_TEST_DUMP_COMMON := $(call intermediates-dir-for,JAVA_LIBRARIES,ahat-ri-test-dump,HOST,COMMON)
+AHAT_RI_TEST_DUMP_HPROF := $(AHAT_RI_TEST_DUMP_COMMON)/ri-test-dump.hprof
 
 # Run ahat-ri-test-dump.jar to generate ri-test-dump.hprof
 $(AHAT_RI_TEST_DUMP_HPROF): PRIVATE_AHAT_RI_TEST_DUMP_JAR := $(AHAT_RI_TEST_DUMP_JAR)
@@ -147,6 +123,7 @@
 # Clean up local variables.
 AHAT_TEST_JAR :=
 AHAT_TEST_DUMP_JAR :=
+AHAT_TEST_DUMP_COMMON :=
 AHAT_TEST_DUMP_HPROF :=
 AHAT_TEST_DUMP_BASE_HPROF :=
 AHAT_TEST_DUMP_PROGUARD_MAP :=
@@ -154,3 +131,6 @@
 AHAT_TEST_DUMP_ANDROID_DATA :=
 AHAT_TEST_DUMP_BASE_ANDROID_DATA :=
 
+AHAT_RI_TEST_DUMP_JAR :=
+AHAT_RI_TEST_DUMP_COMMON :=
+AHAT_RI_TEST_DUMP_HPROF :=
diff --git a/tools/ahat/README.txt b/tools/ahat/README.txt
index 12d3aa8..0474b13 100644
--- a/tools/ahat/README.txt
+++ b/tools/ahat/README.txt
@@ -52,7 +52,11 @@
  * Request to be able to sort tables by size.
 
 Release History:
- 1.7 Pending
+ 1.7 August 8, 2019
+   Annotate binder services, tokens, and proxies.
+   Add option for viewing subclass instances of a class.
+   Recognize java.lang.ref.Finalizer as a finalizer reference.
+   Minor bug fixes and API improvements.
 
  1.6 July 24, 2018
    Distinguish between soft/weak/phantom/etc references.
diff --git a/tools/ahat/etc/ahat.mf b/tools/ahat/etc/ahat.mf
index 8ce9863..f96aa64 100644
--- a/tools/ahat/etc/ahat.mf
+++ b/tools/ahat/etc/ahat.mf
@@ -1,4 +1,4 @@
 Name: ahat/
 Implementation-Title: ahat
-Implementation-Version: 1.6
+Implementation-Version: 1.7
 Main-Class: com.android.ahat.Main
diff --git a/tools/ahat/etc/ahat_api.txt b/tools/ahat/etc/ahat_api.txt
index 01e00e9..962f12b 100644
--- a/tools/ahat/etc/ahat_api.txt
+++ b/tools/ahat/etc/ahat_api.txt
@@ -300,6 +300,7 @@
     method public abstract boolean equals(java.lang.Object);
     method public static com.android.ahat.heapdump.Value getBaseline(com.android.ahat.heapdump.Value);
     method public static com.android.ahat.heapdump.Type getType(com.android.ahat.heapdump.Value);
+    method public abstract int hashCode();
     method public boolean isAhatInstance();
     method public boolean isInteger();
     method public boolean isLong();
diff --git a/tools/ahat/src/main/com/android/ahat/Main.java b/tools/ahat/src/main/com/android/ahat/Main.java
index 0c18b10..586f95e 100644
--- a/tools/ahat/src/main/com/android/ahat/Main.java
+++ b/tools/ahat/src/main/com/android/ahat/Main.java
@@ -188,9 +188,9 @@
     server.createContext("/objects", new AhatHttpHandler(new ObjectsHandler(ahat)));
     server.createContext("/site", new AhatHttpHandler(new SiteHandler(ahat)));
     server.createContext("/bitmap", new BitmapHandler(ahat));
-    server.createContext("/style.css", new StaticHandler("style.css", "text/css"));
+    server.createContext("/style.css", new StaticHandler("etc/style.css", "text/css"));
     server.setExecutor(Executors.newFixedThreadPool(1));
-    System.out.println("Server started on localhost:" + port);
+    System.out.println("Server started on http://localhost:" + port);
 
     server.start();
   }
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java b/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java
index 281c977..e62fb40 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/AhatInstance.java
@@ -264,6 +264,9 @@
    * @return the immediate dominator of this instance
    */
   public AhatInstance getImmediateDominator() {
+    if (mImmediateDominator instanceof SuperRoot) {
+      return null;
+    }
     return mImmediateDominator;
   }
 
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/DiffedFieldValue.java b/tools/ahat/src/main/com/android/ahat/heapdump/DiffedFieldValue.java
index 8de337e..fcb92a9 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/DiffedFieldValue.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/DiffedFieldValue.java
@@ -114,6 +114,10 @@
     this.status = status;
   }
 
+  @Override public int hashCode() {
+    return Objects.hash(name, type, current, baseline, status);
+  }
+
   @Override
   public boolean equals(Object otherObject) {
     if (otherObject instanceof DiffedFieldValue) {
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/Parser.java b/tools/ahat/src/main/com/android/ahat/heapdump/Parser.java
index 4e7cd43..c7f7b4b 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/Parser.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/Parser.java
@@ -32,6 +32,7 @@
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 
 /**
  * Provides methods for parsing heap dumps.
@@ -509,6 +510,7 @@
                       obj.initialize(data);
                       break;
                     }
+                    default: throw new AssertionError("unsupported enum member");
                   }
                   break;
                 }
@@ -736,6 +738,10 @@
       return String.format("0x%08x", mId);
     }
 
+    @Override public int hashCode() {
+      return Objects.hash(mId);
+    }
+
     @Override public boolean equals(Object other) {
       if (other instanceof DeferredInstanceValue) {
         DeferredInstanceValue value = (DeferredInstanceValue)other;
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/Size.java b/tools/ahat/src/main/com/android/ahat/heapdump/Size.java
index a4593e1..b721bac 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/Size.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/Size.java
@@ -16,6 +16,8 @@
 
 package com.android.ahat.heapdump;
 
+import java.util.Objects;
+
 /**
  * Used to represent how much space an instance takes up.
  * An abstraction is introduced rather than using a long directly in order to
@@ -110,6 +112,11 @@
     return new Size(mJavaSize, mRegisteredNativeSize + size);
   }
 
+  @Override
+  public int hashCode() {
+    return Objects.hash(mJavaSize, mRegisteredNativeSize);
+  }
+
   @Override public boolean equals(Object other) {
     if (other instanceof Size) {
       Size s = (Size)other;
diff --git a/tools/ahat/src/main/com/android/ahat/heapdump/Value.java b/tools/ahat/src/main/com/android/ahat/heapdump/Value.java
index d78f95b..5e48dca 100644
--- a/tools/ahat/src/main/com/android/ahat/heapdump/Value.java
+++ b/tools/ahat/src/main/com/android/ahat/heapdump/Value.java
@@ -16,6 +16,8 @@
 
 package com.android.ahat.heapdump;
 
+import java.util.Objects;
+
 /**
  * A Java instance or primitive value from a parsed heap dump.
  * Note: To save memory, a null Value is used to represent a null Java
@@ -226,6 +228,9 @@
   }
 
   @Override
+  public abstract int hashCode();
+
+  @Override
   public abstract boolean equals(Object other);
 
   private static class BooleanValue extends Value {
@@ -245,6 +250,10 @@
       return Boolean.toString(mBool);
     }
 
+    @Override public int hashCode() {
+      return Objects.hash(mBool);
+    }
+
     @Override public boolean equals(Object other) {
       if (other instanceof BooleanValue) {
         BooleanValue value = (BooleanValue)other;
@@ -276,6 +285,10 @@
       return Byte.toString(mByte);
     }
 
+    @Override public int hashCode() {
+      return Objects.hash(mByte);
+    }
+
     @Override public boolean equals(Object other) {
       if (other instanceof ByteValue) {
         ByteValue value = (ByteValue)other;
@@ -307,6 +320,10 @@
       return Character.toString(mChar);
     }
 
+    @Override public int hashCode() {
+      return Objects.hash(mChar);
+    }
+
     @Override public boolean equals(Object other) {
       if (other instanceof CharValue) {
         CharValue value = (CharValue)other;
@@ -333,6 +350,10 @@
       return Double.toString(mDouble);
     }
 
+    @Override public int hashCode() {
+      return Objects.hash(mDouble);
+    }
+
     @Override public boolean equals(Object other) {
       if (other instanceof DoubleValue) {
         DoubleValue value = (DoubleValue)other;
@@ -359,6 +380,10 @@
       return Float.toString(mFloat);
     }
 
+    @Override public int hashCode() {
+      return Objects.hash(mFloat);
+    }
+
     @Override public boolean equals(Object other) {
       if (other instanceof FloatValue) {
         FloatValue value = (FloatValue)other;
@@ -401,6 +426,10 @@
       return InstanceValue.pack(mInstance.getBaseline());
     }
 
+    @Override public int hashCode() {
+      return Objects.hash(mInstance);
+    }
+
     @Override public boolean equals(Object other) {
       if (other instanceof InstanceValue) {
         InstanceValue value = (InstanceValue)other;
@@ -437,6 +466,10 @@
       return Integer.toString(mInt);
     }
 
+    @Override public int hashCode() {
+      return Objects.hash(mInt);
+    }
+
     @Override public boolean equals(Object other) {
       if (other instanceof IntValue) {
         IntValue value = (IntValue)other;
@@ -473,6 +506,10 @@
       return Long.toString(mLong);
     }
 
+    @Override public int hashCode() {
+      return Objects.hash(mLong);
+    }
+
     @Override public boolean equals(Object other) {
       if (other instanceof LongValue) {
         LongValue value = (LongValue)other;
@@ -499,6 +536,10 @@
       return Short.toString(mShort);
     }
 
+    @Override public int hashCode() {
+      return Objects.hash(mShort);
+    }
+
     @Override public boolean equals(Object other) {
       if (other instanceof ShortValue) {
         ShortValue value = (ShortValue)other;
diff --git a/tools/ahat/src/test/com/android/ahat/InstanceTest.java b/tools/ahat/src/test/com/android/ahat/InstanceTest.java
index af0a73b..376122b 100644
--- a/tools/ahat/src/test/com/android/ahat/InstanceTest.java
+++ b/tools/ahat/src/test/com/android/ahat/InstanceTest.java
@@ -412,6 +412,15 @@
   }
 
   @Test
+  public void isRoot() throws IOException {
+    // We expect the Main class to be a root.
+    TestDump dump = TestDump.getTestDump();
+    AhatInstance main = dump.findClass("Main");
+    assertTrue(main.isRoot());
+    assertNull(main.getImmediateDominator());
+  }
+
+  @Test
   public void isNotRoot() throws IOException {
     TestDump dump = TestDump.getTestDump();
     AhatInstance obj = dump.getDumpedAhatInstance("anObject");
diff --git a/tools/amm/AmmTest/AndroidManifest.xml b/tools/amm/AmmTest/AndroidManifest.xml
deleted file mode 100644
index 16529bc..0000000
--- a/tools/amm/AmmTest/AndroidManifest.xml
+++ /dev/null
@@ -1,17 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<manifest
-  xmlns:android="http://schemas.android.com/apk/res/android"
-  package="com.android.amm.test">
-
-  <application
-    android:label="AmmTest"
-    android:debuggable="true">
-
-    <activity android:name="com.android.amm.test.MainActivity">
-      <intent-filter>
-        <action android:name="android.intent.action.MAIN" />
-        <category android:name="android.intent.category.LAUNCHER" />
-      </intent-filter>
-    </activity>
-  </application>
-</manifest>
diff --git a/tools/amm/AmmTest/aahat.png b/tools/amm/AmmTest/aahat.png
deleted file mode 100644
index 01b92f4..0000000
--- a/tools/amm/AmmTest/aahat.png
+++ /dev/null
Binary files differ
diff --git a/tools/amm/AmmTest/jni/ammtest.c b/tools/amm/AmmTest/jni/ammtest.c
deleted file mode 100644
index 9d48475..0000000
--- a/tools/amm/AmmTest/jni/ammtest.c
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "jni.h"
-
-// A large uninitialized array gets put in the .bss section:
-char uninit[3 * 4096];
-
-// A large initialized array gets put in the .data section:
-char init[2 * 4096] =
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.."
-  "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789..";
-
-// A large constant initialized array gets put in the .rodata section:
-const char cinit[1 * 4096] =
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.."
-  "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789..";
-
-JNIEXPORT jint JNICALL
-Java_com_android_amm_test_SoCodeUse_nGetANumber(JNIEnv* env, jclass cls) {
-  (void) env;
-  (void) cls;
-
-  uninit[4096] = init[123] + cinit[123];
-  return 42;
-}
-
diff --git a/tools/amm/AmmTest/src/com/android/amm/test/BitmapUse.java b/tools/amm/AmmTest/src/com/android/amm/test/BitmapUse.java
deleted file mode 100644
index d8eba2e..0000000
--- a/tools/amm/AmmTest/src/com/android/amm/test/BitmapUse.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.android.amm.test;
-
-import android.graphics.Bitmap;
-import android.graphics.BitmapFactory;
-
-/**
- * Exercise loading of a bitmap.
- */
-class BitmapUse {
-
-  private Bitmap mBitmap;
-
-  public BitmapUse() {
-    ClassLoader loader = BitmapUse.class.getClassLoader();
-    mBitmap = BitmapFactory.decodeStream(loader.getResourceAsStream("aahat.png"), null, null);
-  }
-}
diff --git a/tools/amm/AmmTest/src/com/android/amm/test/MainActivity.java b/tools/amm/AmmTest/src/com/android/amm/test/MainActivity.java
deleted file mode 100644
index 4577f4b..0000000
--- a/tools/amm/AmmTest/src/com/android/amm/test/MainActivity.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.android.amm.test;
-
-import android.app.Activity;
-import android.os.Bundle;
-import android.widget.LinearLayout;
-
-public class MainActivity extends Activity {
-
-  private BitmapUse mBitmapUse;
-  private SoCodeUse mSoCodeUse;
-  private TextureViewUse mTextureViewUse;
-  private SurfaceViewUse mSurfaceViewUse;
-  private ThreadedRendererUse mThreadedRendererUse;
-
-  @Override
-  public void onCreate(Bundle savedInstanceState) {
-    super.onCreate(savedInstanceState);
-
-    mBitmapUse = new BitmapUse();
-    mSoCodeUse = new SoCodeUse();
-
-    LinearLayout ll = new LinearLayout(this);
-    mTextureViewUse = new TextureViewUse(this, ll, 200, 500);
-    mSurfaceViewUse = new SurfaceViewUse(this, ll, 240, 250);
-    setContentView(ll);
-
-    mThreadedRendererUse = new ThreadedRendererUse(this, 122, 152);
-  }
-}
-
diff --git a/tools/amm/AmmTest/src/com/android/amm/test/SoCodeUse.java b/tools/amm/AmmTest/src/com/android/amm/test/SoCodeUse.java
deleted file mode 100644
index 9636c0f..0000000
--- a/tools/amm/AmmTest/src/com/android/amm/test/SoCodeUse.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.android.amm.test;
-
-class SoCodeUse {
-  private int value;
-
-  public SoCodeUse() {
-    // TODO: Figure out how to cause the native library to be unloaded when
-    // the SoCodeUse instance goes away?
-    System.loadLibrary("ammtestjni");
-    value = nGetANumber();
-  }
-
-  private static native int nGetANumber();
-}
diff --git a/tools/amm/AmmTest/src/com/android/amm/test/SurfaceViewUse.java b/tools/amm/AmmTest/src/com/android/amm/test/SurfaceViewUse.java
deleted file mode 100644
index 0c17c77..0000000
--- a/tools/amm/AmmTest/src/com/android/amm/test/SurfaceViewUse.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.android.amm.test;
-
-import android.content.Context;
-import android.graphics.Canvas;
-import android.view.SurfaceHolder;
-import android.view.SurfaceView;
-import android.view.ViewGroup;
-
-class SurfaceViewUse {
-
-  private SurfaceView mSurfaceView;
-
-  /**
-   * Constructs a SurfaceView object with given dimensions.
-   * The surface view is added to the given ViewGroup object, which should be
-   * included in the main display.
-   */
-  public SurfaceViewUse(Context context, ViewGroup vg, int width, int height) {
-    mSurfaceView = new SurfaceView(context);
-    vg.addView(mSurfaceView, width, height);
-    mSurfaceView.post(new CycleRunnable());
-  }
-
-  // To force as many graphics buffers as will ever be used to actually be
-  // used, we cycle the color of the surface view a handful of times right
-  // when things start up.
-  private class CycleRunnable implements Runnable {
-    private int mCycles = 0;
-    private int mRed = 255;
-    private int mGreen = 0;
-    private int mBlue = 255;
-
-    public void run() {
-      if (mCycles < 10) {
-        mCycles++;
-        updateSurfaceView();
-        mSurfaceView.post(this);
-      }
-    }
-
-    private void updateSurfaceView() {
-      SurfaceHolder holder = mSurfaceView.getHolder();
-      Canvas canvas = holder.lockHardwareCanvas();
-      if (canvas != null) {
-        canvas.drawRGB(mRed, mGreen, mBlue);
-        int tmp = mRed;
-        holder.unlockCanvasAndPost(canvas);
-        mRed = mGreen;
-        mGreen = mBlue;
-        mBlue = tmp;
-      }
-    }
-  }
-}
-
diff --git a/tools/amm/AmmTest/src/com/android/amm/test/TextureViewUse.java b/tools/amm/AmmTest/src/com/android/amm/test/TextureViewUse.java
deleted file mode 100644
index 51ffcd2..0000000
--- a/tools/amm/AmmTest/src/com/android/amm/test/TextureViewUse.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.android.amm.test;
-
-import android.content.Context;
-import android.graphics.Canvas;
-import android.view.TextureView;
-import android.view.ViewGroup;
-
-class TextureViewUse {
-
-  private TextureView mTextureView;
-
-  /**
-   * Constructs a TextureView object with given dimensions.
-   * The texture view is added to the given ViewGroup object, which should be
-   * included in the main display.
-   */
-  public TextureViewUse(Context context, ViewGroup vg, int width, int height) {
-    mTextureView = new TextureView(context);
-    vg.addView(mTextureView, width, height);
-    mTextureView.post(new CycleRunnable());
-  }
-
-  // To force as many graphics buffers as will ever be used to actually be
-  // used, we cycle the color of the texture view a handful of times right
-  // when things start up.
-  private class CycleRunnable implements Runnable {
-    private int mCycles = 0;
-    private int mRed = 255;
-    private int mGreen = 255;
-    private int mBlue = 0;
-
-    public void run() {
-      if (mCycles < 10) {
-        mCycles++;
-        updateTextureView();
-        mTextureView.post(this);
-      }
-    }
-
-    private void updateTextureView() {
-      Canvas canvas = mTextureView.lockCanvas();
-      if (canvas != null) {
-        canvas.drawRGB(mRed, mGreen, mBlue);
-        int tmp = mRed;
-        mTextureView.unlockCanvasAndPost(canvas);
-        mRed = mGreen;
-        mGreen = mBlue;
-        mBlue = tmp;
-      }
-    }
-  }
-}
-
diff --git a/tools/amm/AmmTest/src/com/android/amm/test/ThreadedRendererUse.java b/tools/amm/AmmTest/src/com/android/amm/test/ThreadedRendererUse.java
deleted file mode 100644
index 9c25612..0000000
--- a/tools/amm/AmmTest/src/com/android/amm/test/ThreadedRendererUse.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.android.amm.test;
-
-import android.content.Context;
-import android.view.WindowManager;
-import android.widget.TextView;
-
-class ThreadedRendererUse {
-
-  private TextView mTextView;
-
-  /**
-   * Cause a threaded renderer EGL allocation to be used, with given
-   * dimensions.
-   */
-  public ThreadedRendererUse(Context context, int width, int height) {
-    mTextView = new TextView(context);
-    mTextView.setText("TRU");
-    mTextView.setBackgroundColor(0xffff0000);
-
-    // Adding a view to the WindowManager (as opposed to the app's root view
-    // hierarchy) causes a ThreadedRenderer and EGL allocations under the cover.
-    // We use a TextView here to trigger the use case, but we could use any
-    // other kind of view as well.
-    WindowManager wm = context.getSystemService(WindowManager.class);
-    WindowManager.LayoutParams layout = new WindowManager.LayoutParams();
-    layout.width = width;
-    layout.height = height;
-    wm.addView(mTextView, layout);
-
-    mTextView.post(new CycleRunnable());
-  }
-
-  // To force as many graphics buffers as will ever be used to actually be
-  // used, we cycle the text of the text view a handful of times right
-  // when things start up.
-  private class CycleRunnable implements Runnable {
-    private int mCycles = 0;
-
-    public void run() {
-      if (mCycles < 10) {
-        mCycles++;
-        mTextView.setText("TRU " + mCycles);
-        mTextView.post(this);
-      }
-    }
-  }
-}
-
diff --git a/tools/amm/Android.bp b/tools/amm/Android.bp
deleted file mode 100644
index e6f6ff7..0000000
--- a/tools/amm/Android.bp
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright (C) 2017 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// --- ammtestjni.so -------------
-
-cc_library_shared {
-    name: "libammtestjni",
-
-    srcs: [
-        "AmmTest/jni/ammtest.c",
-    ],
-
-    sdk_version: "current",
-}
diff --git a/tools/amm/Android.mk b/tools/amm/Android.mk
deleted file mode 100644
index fa4ca44..0000000
--- a/tools/amm/Android.mk
+++ /dev/null
@@ -1,26 +0,0 @@
-# Copyright (C) 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-LOCAL_PATH := $(call my-dir)
-
-# --- AmmTest.apk --------------
-include $(CLEAR_VARS)
-LOCAL_PACKAGE_NAME := AmmTest
-LOCAL_MODULE_TAGS := samples tests
-LOCAL_SRC_FILES := $(call all-java-files-under, AmmTest/src)
-LOCAL_SDK_VERSION := current
-LOCAL_JNI_SHARED_LIBRARIES := libammtestjni
-LOCAL_JAVA_RESOURCE_FILES := $(LOCAL_PATH)/AmmTest/aahat.png
-LOCAL_MANIFEST_FILE := AmmTest/AndroidManifest.xml
-include $(BUILD_PACKAGE)
diff --git a/tools/amm/README.md b/tools/amm/README.md
deleted file mode 100644
index 17f94a8..0000000
--- a/tools/amm/README.md
+++ /dev/null
@@ -1,16 +0,0 @@
-# Actionable Memory Metric
-
-The goal of the actionable memory metric (AMM) is to provide a view of an
-application's memory use that application developers can track, understand,
-and control. AMM can be thought of as a Java heap dump augmented with models
-for non-Java allocations that app developers have some control of.
-
-There are two components of the actionable memory metric:
-1. The value of the metric.
-2. An actionable breakdown of the value of the metric.
-
-The metric is made up of a collection of separate models for different
-categories of memory use. Each model contributes to the value and actionable
-breakdown of the overall metric.
-
-See models/ for a list of models proposed for the actionable memory metric.
diff --git a/tools/amm/models/Bitmap.md b/tools/amm/models/Bitmap.md
deleted file mode 100644
index 49a0b9d..0000000
--- a/tools/amm/models/Bitmap.md
+++ /dev/null
@@ -1,15 +0,0 @@
-# Bitmap Model
-
-The value of the Bitmap model is the sum of bytes used for native pixel data
-of instances of `android.graphics.Bitmap`. It is calculated by summing for
-each instance `x` of `android.graphics.Bitmap`:
-
-    x.getAllocationByteCount()
-
-The actionable breakdown of the Bitmap model is a breakdown by
-`android.graphics.Bitmap` instance, including width, height, and ideally a
-thumbnail image of each bitmap.
-
-For example, an 800 x 600 bitmap instance using the `ARGB_8888` pixel format
-with native pixel data will be shown as an 800 x 600 bitmap instance taking up
-1875 kB.
diff --git a/tools/amm/models/DexCode.md b/tools/amm/models/DexCode.md
deleted file mode 100644
index a907280..0000000
--- a/tools/amm/models/DexCode.md
+++ /dev/null
@@ -1,17 +0,0 @@
-# Dex Code Model
-
-The value of the Dex Code model is the sum of the original uncompressed file
-sizes of all loaded dex files. It is calculated using the best approximation
-of the dex file size available to us on device. On Android O, for example,
-this can be approximated as the virtual size of the corresponding memory
-mapped `.vdex` file read from `/proc/self/maps`. Different Android platform
-versions and scenarios may require different approximations.
-
-The actionable breakdown of the dex code model is a breakdown by
-`dalvik.system.DexFile` instance. Further breakdown of individual dex files
-can be achieved using tools such as dexdump.
-
-For example, for an application `AmmTest.apk` that has a single `classes.dex` file
-that is 500 KB uncompressed, the `DexFile` instance for
-`/data/app/com.android.amm.test-_uHI4CJWpeoztbjN6Tr-Nw==/base.apk` is shown as
-Taking up 500 KB (or the best available approximation thereof).
diff --git a/tools/amm/models/Graphics.md b/tools/amm/models/Graphics.md
deleted file mode 100644
index b327961..0000000
--- a/tools/amm/models/Graphics.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# Graphics Models
-
-There are three components to the graphics model, each modeling EGL memory
-use:
-1. For each `android.view.TextureView` instance:
-    2 * (4 * width * height)
-
-2. For each `android.view.Surface$HwuiContext` instance:
-    3 * (4 * width * height)
-
-3. For each initialized `android.view.ThreadedRenderer`:
-    3 * (4 * width * height)
-
-Note: 4 is the number of bytes per pixel. 2 or 3 is the maximum number of
-buffers that may be allocated.
-
-The actionable breakdown is the breakdown by `TextureView`,
-`Surface$HwuiContext` and `ThreadedRenderer` instance, with further details
-about the width and height associated with each instance.
-
-For example, an application with a single 64x256 `TextureView` instance will
-be shown as taking up 128 KB.
diff --git a/tools/amm/models/JavaHeap.md b/tools/amm/models/JavaHeap.md
deleted file mode 100644
index c34c186..0000000
--- a/tools/amm/models/JavaHeap.md
+++ /dev/null
@@ -1,8 +0,0 @@
-# Java Heap Model
-
-The value of the Java heap model is the sum of bytes of Java objects allocated
-on the Java heap. It can be calculated using:
-
-    Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()
-
-A Java heap dump is used for an actionable breakdown of the Java heap.
diff --git a/tools/amm/models/SoCode.md b/tools/amm/models/SoCode.md
deleted file mode 100644
index 5d3184e..0000000
--- a/tools/amm/models/SoCode.md
+++ /dev/null
@@ -1,17 +0,0 @@
-# Shared Native Code Model
-
-The value of the Shared Native Code model is the sum of the virtual memory
-sizes of all loaded `.so` files. It is calculated by reading `/proc/self/maps`.
-
-The actionable breakdown of the shared native code model is a breakdown by
-library name. Unfortunately, due to technical limitations, this does not
-include information about what caused a library to be loaded, whether the
-library was loaded by the app or the platform, the library dependency graph,
-or what is causing a library to remain loaded. Individual `.so` files can be
-further broken down using tools such as `readelf`.
-
-For example, for an application `AmmTest.apk` that includes `libammtestjni.so` as a
-native library that loads 36 KB worth of memory regions, `BaseClassLoader` will
-be shown with library
-`/data/app/com.android.amm.test-_uHI4CJWpeoztbjN6Tr-Nw==/lib/arm64/libammtestjni.so`
-taking up 36 KB.
diff --git a/tools/art b/tools/art
index 9e39464..d44737c 100644
--- a/tools/art
+++ b/tools/art
@@ -199,7 +199,8 @@
     # (see run_art function)
     verbose_run ANDROID_DATA=$ANDROID_DATA                    \
           ANDROID_ROOT=$ANDROID_ROOT                          \
-          ANDROID_RUNTIME_ROOT=$ANDROID_RUNTIME_ROOT          \
+          ANDROID_I18N_ROOT=$ANDROID_I18N_ROOT                \
+          ANDROID_ART_ROOT=$ANDROID_ART_ROOT                  \
           ANDROID_TZDATA_ROOT=$ANDROID_TZDATA_ROOT            \
           LD_LIBRARY_PATH=$LD_LIBRARY_PATH                    \
           PATH=$ANDROID_ROOT/bin:$PATH                        \
@@ -275,7 +276,8 @@
   # Run dalvikvm.
   verbose_run ANDROID_DATA="$ANDROID_DATA"                  \
               ANDROID_ROOT="$ANDROID_ROOT"                  \
-              ANDROID_RUNTIME_ROOT="$ANDROID_RUNTIME_ROOT"  \
+              ANDROID_I18N_ROOT="$ANDROID_I18N_ROOT"        \
+              ANDROID_ART_ROOT="$ANDROID_ART_ROOT"          \
               ANDROID_TZDATA_ROOT="$ANDROID_TZDATA_ROOT"    \
               LD_LIBRARY_PATH="$LD_LIBRARY_PATH"            \
               PATH="$ANDROID_ROOT/bin:$PATH"                \
@@ -300,6 +302,7 @@
 ######################################
 ART_BINARY=dalvikvm
 DEX2OAT_BINARY=dex2oat
+DEX2OAT_SUFFIX=""
 DELETE_ANDROID_DATA="no"
 LAUNCH_WRAPPER=
 LIBART=libart.so
@@ -322,15 +325,17 @@
     ;;
   --32)
     ART_BINARY=dalvikvm32
+    DEX2OAT_SUFFIX=32
     ;;
   --64)
     ART_BINARY=dalvikvm64
+    DEX2OAT_SUFFIX=64
     ;;
   -d)
     ;& # Fallthrough
   --debug)
     LIBART="libartd.so"
-    DEX2OAT_BINARY=dex2oatd
+    DEX2OAT_BINARY="dex2oatd"
     # Expect that debug mode wants all checks.
     EXTRA_OPTIONS+=(-XX:SlowDebug=true)
     ;;
@@ -403,45 +408,67 @@
 PROG_DIR="$(cd "${PROG_NAME%/*}" ; pwd -P)"
 ANDROID_ROOT="$(cd $PROG_DIR/..; pwd -P)"
 
-# If ANDROID_RUNTIME_ROOT is not set, try to detect whether we are running on
+# If ANDROID_I18N_ROOT is not set, try to detect whether we are running on
 # target or host and set that environment variable to the usual default value.
-if [ -z "$ANDROID_RUNTIME_ROOT" ]; then
+if [ -z "$ANDROID_I18N_ROOT" ]; then
   # This script is used on host and target (device). However, the (expected)
-  # default value `ANDROID_RUNTIME_ROOT` is not the same on host and target:
-  # - on host, `ANDROID_RUNTIME_ROOT` is expected to be "$ANDROID_ROOT/com.android.apex";
-  # - on target, `ANDROID_RUNTIME_ROOT` is expected to be "$ANDROID_ROOT/../apex/com.android.apex".
+  # default value `ANDROID_I18N_ROOT` is not the same on host and target:
+  # - on host, `ANDROID_I18N_ROOT` is expected to be
+  #   "$ANDROID_ROOT/com.android.i18n";
+  # - on target, `ANDROID_I18N_ROOT` is expected to be
+  #   "/apex/com.android.i18n".
   #
   # We use the presence/absence of the `$ANDROID_ROOT/../apex` directory to
   # determine whether we are on target or host (this is brittle, but simple).
   if [ -d "$ANDROID_ROOT/../apex" ]; then
     # Target case.
-    #
-    # We should be setting `ANDROID_RUNTIME_ROOT` to
-    # "$ANDROID_ROOT/../apex/com.android.runtime" here. However, the Runtime APEX
-    # is not (yet) supported by the ART Buildbot setup (see b/121117762); and yet
-    # ICU code depends on `ANDROID_RUNTIME_ROOT` to find ICU .dat files.
-    #
-    # As a temporary workaround, we:
-    # - make the ART Buildbot build script (art/tools/buildbot-build.sh) also
-    #   generate the ICU .dat files in `/system/etc/icu` on device (these files
-    #   are normally only put in the Runtime APEX on device);
-    # - set `ANDROID_RUNTIME_ROOT` to `$ANDROID_ROOT` (i.e. "/system") here.
-    #
-    # TODO(b/121117762): Set `ANDROID_RUNTIME_ROOT` to
-    # "$ANDROID_ROOT/../apex/com.android.runtime" when the Runtime APEX is fully
-    # supported on the ART Buildbot and Golem.
-    ANDROID_RUNTIME_ROOT=$ANDROID_ROOT
+    ANDROID_I18N_ROOT="/apex/com.android.i18n"
   else
     # Host case.
-    ANDROID_RUNTIME_ROOT="$ANDROID_ROOT/com.android.runtime"
+    ANDROID_I18N_ROOT="$ANDROID_ROOT/com.android.i18n"
   fi
 fi
 
-# If ANDROID_TZDATA_ROOT is not set point it to somewhere safe. Android code
-# currently treats the module as optional so it does not require the path exists.
+# If ANDROID_ART_ROOT is not set, try to detect whether we are running on
+# target or host and set that environment variable to the usual default value.
+if [ -z "$ANDROID_ART_ROOT" ]; then
+  # This script is used on host and target (device). However, the (expected)
+  # default value `ANDROID_ART_ROOT` is not the same on host and target:
+  # - on host, `ANDROID_ART_ROOT` is expected to be
+  #   "$ANDROID_ROOT/com.android.art";
+  # - on target, `ANDROID_ART_ROOT` is expected to be
+  #   "/apex/com.android.art".
+  #
+  # We use the presence/absence of the `$ANDROID_ROOT/../apex` directory to
+  # determine whether we are on target or host (this is brittle, but simple).
+  if [ -d "$ANDROID_ROOT/../apex" ]; then
+    # Target case.
+    ANDROID_ART_ROOT="/apex/com.android.art"
+  else
+    # Host case.
+    ANDROID_ART_ROOT="$ANDROID_ROOT/com.android.art"
+  fi
+fi
+
+# If ANDROID_TZDATA_ROOT is not set, try to detect whether we are running on
+# target or host and set that environment variable to the usual default value.
 if [ -z "$ANDROID_TZDATA_ROOT" ]; then
-  # Safe stubbed location that we don't need to exist.
-  ANDROID_TZDATA_ROOT="$ANDROID_ROOT/com.android.tzdata_doesnotexist"
+  # This script is used on host and target (device). However, the (expected)
+  # default value `ANDROID_TZDATA_ROOT` is not the same on host and target:
+  # - on host, `ANDROID_TZDATA_ROOT` is expected to be
+  #   "$ANDROID_ROOT/com.android.tzdata";
+  # - on target, `ANDROID_TZDATA_ROOT` is expected to be
+  #   "/apex/com.android.tzdata".
+  #
+  # We use the presence/absence of the `$ANDROID_ROOT/../apex` directory to
+  # determine whether we are on target or host (this is brittle, but simple).
+  if [ -d "$ANDROID_ROOT/../apex" ]; then
+    # Target case.
+    ANDROID_TZDATA_ROOT="/apex/com.android.tzdata"
+  else
+    # Host case.
+    ANDROID_TZDATA_ROOT="$ANDROID_ROOT/com.android.tzdata"
+  fi
 fi
 
 ART_BINARY_PATH=$ANDROID_ROOT/bin/$ART_BINARY
@@ -454,7 +481,7 @@
   exit 1
 fi
 
-DEX2OAT_BINARY_PATH=$ANDROID_ROOT/bin/$DEX2OAT_BINARY
+DEX2OAT_BINARY_PATH=$ANDROID_ROOT/bin/$DEX2OAT_BINARY$DEX2OAT_SUFFIX
 
 if [ ! -x "$DEX2OAT_BINARY_PATH" ]; then
   echo "Warning: Android Compiler not found: $DEX2OAT_BINARY_PATH"
@@ -504,7 +531,7 @@
   # Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
   # because that's what we use for compiling the core.art image.
   # It may contain additional modules from TEST_CORE_JARS.
-  core_jars_list="core-oj core-libart okhttp bouncycastle apache-xml conscrypt"
+  core_jars_list="core-oj core-libart core-icu4j okhttp bouncycastle apache-xml conscrypt"
   core_jars_suffix=
   if [[ -e $ANDROID_ROOT/framework/core-oj-hostdex.jar ]]; then
     core_jars_suffix=-hostdex
diff --git a/tools/art_verifier/Android.bp b/tools/art_verifier/Android.bp
index 64ca171..72327d5 100644
--- a/tools/art_verifier/Android.bp
+++ b/tools/art_verifier/Android.bp
@@ -18,9 +18,9 @@
     name: "art_verifier-defaults",
     defaults: [
         "art_defaults",
-        "libart_static_defaults",
     ],
     host_supported: true,
+    device_supported: false,
     srcs: [
         "art_verifier.cc",
     ],
@@ -47,5 +47,17 @@
 
 art_cc_binary {
     name: "art_verifier",
-    defaults: ["art_verifier-defaults"],
+    defaults: [
+        "art_verifier-defaults",
+        "libart_static_defaults",
+    ],
+}
+
+art_cc_binary {
+    name: "art_verifierd",
+    defaults: [
+        "art_debug_defaults",
+        "art_verifier-defaults",
+        "libartd_static_defaults",
+    ],
 }
diff --git a/tools/bisection_search/bisection_search.py b/tools/bisection_search/bisection_search.py
index 250b5d1..102bbad 100755
--- a/tools/bisection_search/bisection_search.py
+++ b/tools/bisection_search/bisection_search.py
@@ -46,10 +46,8 @@
 # Passes that are never disabled during search process because disabling them
 # would compromise correctness.
 MANDATORY_PASSES = ['dex_cache_array_fixups_arm',
-                    'dex_cache_array_fixups_mips',
                     'instruction_simplifier$before_codegen',
                     'pc_relative_fixups_x86',
-                    'pc_relative_fixups_mips',
                     'x86_memory_operand_generation']
 
 # Passes that show up as optimizations in compiler verbose output but aren't
diff --git a/tools/boot-image-profile-configure-device.sh b/tools/boot-image-profile-configure-device.sh
new file mode 100755
index 0000000..081f442
--- /dev/null
+++ b/tools/boot-image-profile-configure-device.sh
@@ -0,0 +1,59 @@
+#!/bin/bash
+#
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# This script configures a device for boot image profile
+#
+
+if [[ -z "$ANDROID_BUILD_TOP" ]]; then
+  echo "You must run on this after running envsetup.sh and launch target"
+  exit 1
+fi
+
+if [[ "$#" -lt 1 ]]; then
+  echo "Usage $0 <output-for-boot-zip>"
+  echo "Example: $0 boot.zip"
+  exit 1
+fi
+
+OUT_BOOT_ZIP="$1"
+
+echo "Changing dirs to the build top"
+cd "$ANDROID_BUILD_TOP"
+
+# Make dist in order to easily get the boot and system server dex files
+# This will be stored in $ANDROID_PRODUCT_OUT/boot.zip
+echo "Make dist"
+m dist
+echo "Copy boot.zip to $OUT_BOOT_ZIP"
+cp "$ANDROID_PRODUCT_OUT"/boot.zip $OUT_BOOT_ZIP
+
+echo "Setting properties and clearing existing profiles"
+# If the device needs to be rebooted, it is better to set the properties
+# via a local.prop file:
+#  1) create a local.prop file with the content
+#      dalvik.vm.profilebootclasspath=true
+#      dalvik.vm.profilesystemserver=true
+#  2) adb push local.prop /data/
+#     adb shell chmod 0750 /data/local.prop
+#     adb reboot
+
+adb root
+adb shell stop
+adb shell setprop dalvik.vm.profilebootclasspath true
+adb shell setprop dalvik.vm.profilesystemserver true
+adb shell find "/data/misc/profiles -name *.prof -exec truncate -s 0 {} \;"
+adb shell start
\ No newline at end of file
diff --git a/tools/boot-image-profile-extract-profile.sh b/tools/boot-image-profile-extract-profile.sh
new file mode 100755
index 0000000..e050e36
--- /dev/null
+++ b/tools/boot-image-profile-extract-profile.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+#
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# This script extracts the boot image profile from a previously configured device,
+# which executed the critical user journyes.
+#
+
+if [[ "$#" -lt 1 ]]; then
+  echo "Usage $0 <output-profile>"
+  echo "Example: $0 android.prof"
+  exit 1
+fi
+
+OUT_PROFILE="$1"
+
+echo "Snapshoting platform profiles"
+adb shell cmd package snapshot-profile android
+adb pull /data/misc/profman/android.prof "$OUT_PROFILE"
\ No newline at end of file
diff --git a/tools/boot-image-profile-generate.sh b/tools/boot-image-profile-generate.sh
new file mode 100755
index 0000000..de4dd42
--- /dev/null
+++ b/tools/boot-image-profile-generate.sh
@@ -0,0 +1,150 @@
+#!/bin/bash
+#
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# This script creates the final boot image profile (suitable to include in the platform build).
+# The input to the script are:
+#   1) the boot.zip file which contains the boot classpath and system server jars.
+#      This file can be obtained from running `m dist` or by configuring the device with
+#      the `art/tools/boot-image-profile-configure-device.sh` script.
+#   2) the preloaded classes blacklist which specify what clases should not be preloaded
+#      in Zygote. Usually located in usually in frameworks/base/config/preloaded-classes-blacklist
+#   3) a list of raw boot image profiles extracted from devices. An example how to do that is
+#      by running `art/tools/boot-image-profile-extract-profile.sh` script.
+#
+# It is strongly recommended that you make use of extensive critical user journeys flows in order
+# to capture the raw boot image profiles described in #3.
+#
+# NOTE: by default, the script uses default arguments for producing the boot image profiles.
+# You might want to adjust the default generation arguments based on the shape of profile
+# and based on the metrics that matter for each product.
+#
+
+if [[ -z "$ANDROID_BUILD_TOP" ]]; then
+  echo "You must run on this after running envsetup.sh and launch target"
+  exit 1
+fi
+
+if [[ "$#" -lt 4 ]]; then
+  echo "Usage $0 <output-dir> <boot.zip-location> <preloaded-blacklist-location> <profile-input1> <profile-input2> ... <profman args>"
+  echo "Without any profman args the script will use defaults."
+  echo "Example: $0 output-dir boot.zip frameworks/base/config/preloaded-classes-blacklist android1.prof android2.prof"
+  echo "         $0 output-dir boot.zip frameworks/base/config/preloaded-classes-blacklist android.prof --profman-arg --upgrade-startup-to-hot=true"
+  echo "preloaded.black-list is usually in frameworks/base/config/preloaded-classes-blacklist"
+  exit 1
+fi
+
+echo "Creating work dir"
+WORK_DIR=/tmp/android-bcp
+mkdir -p "$WORK_DIR"
+
+OUT_DIR="$1"
+BOOT_ZIP="$2"
+PRELOADED_BLACKLIST="$3"
+shift 3
+
+# Read the profile input args.
+profman_profile_input_args=()
+while [[ "$#" -ge 1 ]] && [[ ! "$1" = '--profman-arg' ]]; do
+  profman_profile_input_args+=("--profile-file=$1")
+  shift
+done
+
+# Read the profman args.
+profman_args=()
+while [[ "$#" -ge 2 ]] && [[ "$1" = '--profman-arg' ]]; do
+  profman_args+=("$2")
+  shift 2
+done
+
+OUT_BOOT_PROFILE="$OUT_DIR"/boot-image-profile.txt
+OUT_PRELOADED_CLASSES="$OUT_DIR"/preloaded-classes
+OUT_SYSTEM_SERVER="$OUT_DIR"/art-profile
+
+echo "Changing dirs to the build top"
+cd "$ANDROID_BUILD_TOP"
+
+echo "Unziping boot.zip"
+BOOT_UNZIP_DIR="$WORK_DIR"/boot-dex
+ART_JARS="$BOOT_UNZIP_DIR"/dex_artjars_input
+BOOT_JARS="$BOOT_UNZIP_DIR"/dex_bootjars_input
+SYSTEM_SERVER_JAR="$BOOT_UNZIP_DIR"/system/framework/services.jar
+
+unzip -o "$BOOT_ZIP" -d "$BOOT_UNZIP_DIR"
+
+echo "Processing boot image jar files"
+jar_args=()
+for entry in "$ART_JARS"/*
+do
+  jar_args+=("--apk=$entry")
+done
+for entry in "$BOOT_JARS"/*
+do
+  jar_args+=("--apk=$entry")
+done
+profman_args+=("${jar_args[@]}")
+
+echo "Running profman for boot image profiles"
+# NOTE:
+# You might want to adjust the default generation arguments based on the data
+# For example, to update the selection thresholds you could specify:
+#  --method-threshold=10 \
+#  --class-threshold=10 \
+#  --preloaded-class-threshold=10 \
+#  --special-package=android:1 \
+#  --special-package=com.android.systemui:1 \
+# The threshold is percentage of total aggregation, that is, a method/class is
+# included in the profile only if it's used by at least x% of the packages.
+# (from 0% - include everything to 100% - include only the items that
+# are used by all packages on device).
+# The --special-package allows you to give a prioriority to certain packages,
+# meaning, if the methods is used by that package then the algorithm will use a
+# different selection thresholds.
+# (system server is identified as the "android" package)
+profman \
+  --generate-boot-image-profile \
+  "${profman_profile_input_args[@]}" \
+  --out-profile-path="$OUT_BOOT_PROFILE" \
+  --out-preloaded-classes-path="$OUT_PRELOADED_CLASSES" \
+  --preloaded-classes-blacklist="$PRELOADED_BLACKLIST" \
+  --special-package=android:1 \
+  --special-package=com.android.systemui:1 \
+  "${profman_args[@]}"
+
+echo "Done boot image profile"
+
+echo "Running profman for system server"
+# For system server profile we want to include everything usually
+# We also don't have a preloaded-classes file for it, so we ignore the argument.
+profman \
+  --generate-boot-image-profile \
+  "${profman_profile_input_args[@]}" \
+  --out-profile-path="$OUT_SYSTEM_SERVER" \
+  --apk="$SYSTEM_SERVER_JAR" \
+  --method-threshold=0 \
+  --class-threshold=0
+
+echo "Done system server"
+
+echo ""
+echo "Boot profile methods+classes count:          $(wc -l $OUT_BOOT_PROFILE)"
+echo "Preloaded classes count:                     $(wc -l $OUT_PRELOADED_CLASSES)"
+echo "System server profile methods+classes count: $(wc -l $OUT_SYSTEM_SERVER)"
+
+CLEAN_UP="${CLEAN_UP:-true}"
+if [[ "$CLEAN_UP" = "true" ]]; then
+  rm -rf "$WORK_DIR"
+fi
\ No newline at end of file
diff --git a/tools/bootjars.sh b/tools/bootjars.sh
index 78df99c..f7abf27 100755
--- a/tools/bootjars.sh
+++ b/tools/bootjars.sh
@@ -75,7 +75,7 @@
   # Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
   # because that's what we use for compiling the core.art image.
   # It may contain additional modules from TEST_CORE_JARS.
-  core_jars_list="core-oj core-libart okhttp bouncycastle apache-xml conscrypt"
+  core_jars_list="core-oj core-libart core-icu4j okhttp bouncycastle apache-xml conscrypt"
   core_jars_suffix=
   if [[ $mode == target ]]; then
     core_jars_suffix=-testdex
diff --git a/tools/build/var_cache.py b/tools/build/var_cache.py
index 9e616fa..abfc626 100644
--- a/tools/build/var_cache.py
+++ b/tools/build/var_cache.py
@@ -35,8 +35,7 @@
 #
 # For example, this would be a valid var-cache:
 #
-# export ART_TOOLS_BUILD_VAR_CACHE="TARGET_CORE_JARS='core-oj core-libart'
-#   HOST_CORE_JARS='core-oj-hostdex core-libart-hostdex'"
+# export ART_TOOLS_BUILD_VAR_CACHE="ART_APEX_JARS='core-oj core-libart'"
 #
 # Calling into soong repeatedly is very slow; whenever it needs to be done
 # more than once, the var_cache.py or var_cache.sh script should be used instead.
@@ -119,8 +118,7 @@
   _var_cache_dict = {}
 
   # Parse $ART_TOOLS_BUILD_VAR_CACHE, e.g.
-  #   TARGET_CORE_JARS='core-oj core-libart conscrypt okhttp bouncycastle apache-xml'
-  #   HOST_CORE_JARS='core-oj-hostdex core-libart-hostdex ...'
+  #   ART_APEX_JARS='core-oj core-libart conscrypt okhttp bouncycastle apache-xml'
 
   for line in os.environ['ART_TOOLS_BUILD_VAR_CACHE'].splitlines():
     _debug(line)
diff --git a/tools/build/var_cache.sh b/tools/build/var_cache.sh
index 26e9770..70835d7 100755
--- a/tools/build/var_cache.sh
+++ b/tools/build/var_cache.sh
@@ -37,8 +37,7 @@
 #
 # For example, this would be a valid var-cache:
 #
-# export ART_TOOLS_BUILD_VAR_CACHE="TARGET_CORE_JARS='core-oj core-libart'
-#   HOST_CORE_JARS='core-oj-hostdex core-libart-hostdex'"
+# export ART_TOOLS_BUILD_VAR_CACHE="ART_APEX_JARS='core-oj core-libart'"
 #
 # Calling into soong repeatedly is very slow; whenever it needs to be done
 # more than once, the var_cache.py or var_cache.sh script should be used instead.
@@ -124,8 +123,7 @@
   fi
 
   # Parse $ART_TOOLS_BUILD_VAR_CACHE, e.g.
-  #   TARGET_CORE_JARS='core-oj core-libart conscrypt okhttp bouncycastle apache-xml'
-  #   HOST_CORE_JARS='core-oj-hostdex core-libart-hostdex ...'
+  #   ART_APEX_JARS='core-oj core-libart conscrypt okhttp bouncycastle apache-xml'
 
   local var_name
   local var_value
diff --git a/tools/build/var_list b/tools/build/var_list
index 98a5472..3f2ccfc 100644
--- a/tools/build/var_list
+++ b/tools/build/var_list
@@ -20,10 +20,9 @@
 #
 
 # javac-helper.sh
-TARGET_CORE_JARS
+ART_APEX_JARS
 PRODUCT_BOOT_JARS
 TARGET_OUT_COMMON_INTERMEDIATES
-HOST_CORE_JARS
 HOST_OUT_COMMON_INTERMEDIATES
 
 # testrunner/env.py
diff --git a/tools/build_linux_bionic.sh b/tools/build_linux_bionic.sh
index b401071..89e72b2 100755
--- a/tools/build_linux_bionic.sh
+++ b/tools/build_linux_bionic.sh
@@ -35,15 +35,9 @@
 # Soong needs a bunch of variables set and will not run if they are missing.
 # The default values of these variables is only contained in make, so use
 # nothing to create the variables then remove all the other artifacts.
-
-# TODO(b/123645297) Move hiddenapi steps to soong.
-#
-# Currently hiddenapi relies on .mk to build some of it's configuration files.
-# This prevents us from just cleaning using soong and forces us to do this
-# hacky workaround where we build the targets without linux_bionic and delete
-# the build-config files before going around again. If we fix this issue we can
-# change to only building 'nothing' instead.
-build/soong/soong_ui.bash --make-mode "$@"
+# Lunch since it seems we cannot find the build-number otherwise.
+lunch aosp_x86-eng
+build/soong/soong_ui.bash --make-mode nothing
 
 if [ $? != 0 ]; then
   exit 1
@@ -57,6 +51,7 @@
 # There is no good way to force soong to generate host-bionic builds currently
 # so this is a hacky workaround.
 tmp_soong_var=$(mktemp --tmpdir soong.variables.bak.XXXXXX)
+tmp_build_number=$(cat ${out_dir}/soong/build_number.txt)
 
 cat $out_dir/soong/soong.variables > ${tmp_soong_var}
 
@@ -81,9 +76,14 @@
 x['CrossHostArch'] = 'x86_64'
 if 'CrossHostSecondaryArch' in x:
   del x['CrossHostSecondaryArch']
+if 'DexpreoptGlobalConfig' in x:
+  del x['DexpreoptGlobalConfig']
 json.dump(x, open(sys.argv[2], mode='w'))
 END
 
 rm $tmp_soong_var
 
+# Write a new build-number
+echo ${tmp_build_number}_SOONG_ONLY_BUILD > ${out_dir}/soong/build_number.txt
+
 build/soong/soong_ui.bash --make-mode --skip-make $@
diff --git a/tools/build_linux_bionic_tests.sh b/tools/build_linux_bionic_tests.sh
index c532c90..76dabd4 100755
--- a/tools/build_linux_bionic_tests.sh
+++ b/tools/build_linux_bionic_tests.sh
@@ -80,7 +80,8 @@
   $soong_out/bin/profmand
   $soong_out/bin/hiddenapi
   $soong_out/bin/hprof-conv
-  $soong_out/bin/timeout_dumper
+  $soong_out/bin/signal_dumper
+  $soong_out/lib64/libclang_rt.ubsan_standalone-x86_64-android.so
   $(find $host_out/apex -type f | sed "s:$host_out:$soong_out:g")
   $(find $host_out/lib64 -type f | sed "s:$host_out:$soong_out:g")
   $(find $host_out/nativetest64 -type f | sed "s:$host_out:$soong_out:g"))
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index d404466..17c68f6 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#! /bin/bash
 #
 # Copyright (C) 2015 The Android Open Source Project
 #
@@ -14,6 +14,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+set -e
+
 if [ ! -d art ]; then
   echo "Script needs to be run at the root of the android tree"
   exit 1
@@ -33,7 +35,7 @@
 fi
 
 java_libraries_dir=${out_dir}/target/common/obj/JAVA_LIBRARIES
-common_targets="vogar core-tests apache-harmony-jdwp-tests-hostdex jsr166-tests mockito-target"
+common_targets="vogar core-tests apache-harmony-jdwp-tests-hostdex jsr166-tests libartpalette-system mockito-target"
 mode="target"
 j_arg="-j$(nproc)"
 showcommands=
@@ -64,7 +66,7 @@
 extra_args="SOONG_ALLOW_MISSING_DEPENDENCIES=true TEMPORARY_DISABLE_PATH_RESTRICTIONS=true"
 
 if [[ $mode == "host" ]]; then
-  make_command="make $j_arg $extra_args $showcommands build-art-host-tests $common_targets"
+  make_command="build/soong/soong_ui.bash --make-mode $j_arg $extra_args $showcommands build-art-host-tests $common_targets"
   make_command+=" dx-tests junit-host"
   mode_suffix="-host"
 elif [[ $mode == "target" ]]; then
@@ -72,36 +74,33 @@
     echo 'ANDROID_PRODUCT_OUT environment variable is empty; did you forget to run `lunch`?'
     exit 1
   fi
-  make_command="make $j_arg $extra_args $showcommands build-art-target-tests $common_targets"
-  make_command+=" libjavacrypto-target libnetd_client-target linker toybox toolbox sh unzip"
-  make_command+=" debuggerd su"
+  make_command="build/soong/soong_ui.bash --make-mode $j_arg $extra_args $showcommands build-art-target-tests $common_targets"
+  make_command+=" libnetd_client-target toybox toolbox sh"
+  make_command+=" debuggerd su gdbserver"
   make_command+=" libstdc++ "
   make_command+=" ${ANDROID_PRODUCT_OUT#"${ANDROID_BUILD_TOP}/"}/system/etc/public.libraries.txt"
-  make_command+=" standalone-apex-files"
   if [[ -n "$ART_TEST_CHROOT" ]]; then
-    # These targets are needed for the chroot environment.
+    # Targets required to generate a linker configuration on device within the
+    # chroot environment.
+    make_command+=" linkerconfig"
+    # Additional targets needed for the chroot environment.
     make_command+=" crash_dump event-log-tags"
   fi
-  # Build the Debug Runtime APEX (which is a superset of the Release Runtime APEX).
-  make_command+=" com.android.runtime.debug"
-  # Build the bootstrap Bionic libraries (libc, libdl, libm). These are required
-  # as the "main" libc, libdl, and libm have moved to the Runtime APEX. This is
-  # a temporary change needed until both the ART Buildbot and Golem fully
-  # support the Runtime APEX.
-  #
-  # TODO(b/121117762): Remove this when the ART Buildbot and Golem have full
-  # support for the Runtime APEX.
-  make_command+=" libc.bootstrap libdl.bootstrap libm.bootstrap"
-  # Create a copy of the ICU .dat prebuilt files in /system/etc/icu on target,
-  # so that it can found even if the Runtime APEX is not available, by setting
-  # the environment variable `ART_TEST_ANDROID_RUNTIME_ROOT` to "/system" on
-  # device. This is a temporary change needed until both the ART Buildbot and
-  # Golem fully support the Runtime APEX.
-  #
-  # TODO(b/121117762): Remove this when the ART Buildbot and Golem have full
-  # support for the Runtime APEX.
-  make_command+=" icu-data-art-test"
-  mode_suffix="-target"
+  # Build the Runtime (Bionic) APEX.
+  make_command+=" com.android.runtime"
+  # Build the Testing ART APEX (which is a superset of the Release and Debug ART APEXes).
+  make_command+=" com.android.art.testing"
+  # Build the bootstrap Bionic artifacts links (linker, libc, libdl, libm).
+  # These targets create these symlinks:
+  # - from /system/bin/linker(64) to /apex/com.android.runtime/bin/linker(64); and
+  # - from /system/lib(64)/$lib to /apex/com.android.runtime/lib(64)/$lib.
+  make_command+=" linker libc.bootstrap libdl.bootstrap libdl_android.bootstrap libm.bootstrap"
+  # Build the Conscrypt APEX.
+  make_command+=" com.android.conscrypt"
+  # Build the i18n APEX.
+  make_command+=" com.android.i18n"
+  # Build the Time Zone Data APEX.
+  make_command+=" com.android.tzdata"
 fi
 
 mode_specific_libraries="libjavacoretests libjdwp libwrapagentproperties libwrapagentpropertiesd"
@@ -110,7 +109,55 @@
 done
 
 
-
 echo "Executing $make_command"
 # Disable path restrictions to enable luci builds using vpython.
-bash -c "$make_command"
+eval "$make_command"
+
+if [[ $mode == "target" ]]; then
+  # Create canonical name -> file name symlink in the symbol directory for the
+  # Testing ART APEX.
+  #
+  # This mimics the logic from `art/Android.mk`. We made the choice not to
+  # implement this in `art/Android.mk`, as the Testing ART APEX is a test artifact
+  # that should never ship with an actual product, and we try to keep it out of
+  # standard build recipes
+  #
+  # TODO(b/141004137, b/129534335): Remove this, expose the Testing ART APEX in
+  # the `art/Android.mk` build logic, and add absence checks (e.g. in
+  # `build/make/core/main.mk`) to prevent the Testing ART APEX from ending up in a
+  # system image.
+  target_out_unstripped="$ANDROID_PRODUCT_OUT/symbols"
+  link_name="$target_out_unstripped/apex/com.android.art"
+  link_command="mkdir -p $(dirname "$link_name") && ln -sf com.android.art.testing \"$link_name\""
+  echo "Executing $link_command"
+  eval "$link_command"
+  # Also provide access to symbols of binaries from the Runtime (Bionic) APEX,
+  # e.g. to support debugging in GDB.
+  find "$target_out_unstripped/apex/com.android.runtime/bin" -type f | while read target; do
+    cmd="ln -sf $target $target_out_unstripped/system/bin/$(basename $target)"
+    echo "Executing $cmd"
+    eval "$cmd"
+  done
+
+  # Temporary fix for libjavacrypto.so dependencies in libcore and jvmti tests (b/147124225).
+  conscrypt_apex="$ANDROID_PRODUCT_OUT/system/apex/com.android.conscrypt"
+  conscrypt_libs="libjavacrypto.so libcrypto.so libssl.so"
+  if [ ! -d "${conscrypt_apex}" ]; then
+    echo -e "Missing conscrypt APEX in build output: ${conscrypt_apex}"
+    exit 1
+  fi
+  for l in lib lib64; do
+    if [ ! -d "${conscrypt_apex}/$l" ]; then
+      continue
+    fi
+    for so in $conscrypt_libs; do
+      src="${conscrypt_apex}/${l}/${so}"
+      dst="$ANDROID_PRODUCT_OUT/system/${l}/${so}"
+      if [ "${src}" -nt "${dst}" ]; then
+        cmd="cp -p \"${src}\" \"${dst}\""
+        echo "Executing $cmd"
+        eval "$cmd"
+      fi
+    done
+  done
+fi
diff --git a/tools/buildbot-cleanup-device.sh b/tools/buildbot-cleanup-device.sh
new file mode 100755
index 0000000..97e494a
--- /dev/null
+++ b/tools/buildbot-cleanup-device.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+#
+# Copyright (C) 2017 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if [ -t 1 ]; then
+  # Color sequences if terminal is a tty.
+  green='\033[0;32m'
+  nc='\033[0m'
+fi
+
+# Setup as root, as device cleanup requires it.
+adb root
+adb wait-for-device
+
+if [[ -n "$ART_TEST_CHROOT" ]]; then
+  # Check that ART_TEST_CHROOT is correctly defined.
+  if [[ "x$ART_TEST_CHROOT" != x/* ]]; then
+    echo "$ART_TEST_CHROOT is not an absolute path"
+    exit 1
+  fi
+
+  if adb shell test -d "$ART_TEST_CHROOT"; then
+    echo -e "${green}Remove entire /linkerconfig directory from chroot directory${nc}"
+    adb shell rm -rf "$ART_TEST_CHROOT/linkerconfig"
+
+    echo -e "${green}Remove entire /system directory from chroot directory${nc}"
+    adb shell rm -rf "$ART_TEST_CHROOT/system"
+
+    echo -e "${green}Remove entire /data directory from chroot directory${nc}"
+    adb shell rm -rf "$ART_TEST_CHROOT/data"
+
+    echo -e "${green}Remove entire chroot directory${nc}"
+    adb shell rmdir "$ART_TEST_CHROOT" || adb shell ls -la "$ART_TEST_CHROOT"
+  fi
+else
+  adb shell rm -rf \
+    /data/local/tmp /data/art-test /data/nativetest /data/nativetest64 '/data/misc/trace/*'
+fi
diff --git a/tools/buildbot-setup-device.sh b/tools/buildbot-setup-device.sh
new file mode 100755
index 0000000..54c928a
--- /dev/null
+++ b/tools/buildbot-setup-device.sh
@@ -0,0 +1,176 @@
+#!/bin/bash
+#
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The work does by this script is (mostly) undone by tools/buildbot-teardown-device.sh.
+# Make sure to keep these files in sync.
+
+if [ -t 1 ]; then
+  # Color sequences if terminal is a tty.
+  red='\033[0;31m'
+  green='\033[0;32m'
+  yellow='\033[0;33m'
+  nc='\033[0m'
+fi
+
+if [ "$1" = --verbose ]; then
+  verbose=true
+else
+  verbose=false
+fi
+
+# Setup as root, as some actions performed here require it.
+adb root
+adb wait-for-device
+
+echo -e "${green}Date on host${nc}"
+date
+
+echo -e "${green}Date on device${nc}"
+adb shell date
+
+host_seconds_since_epoch=$(date -u +%s)
+device_seconds_since_epoch=$(adb shell date -u +%s)
+
+abs_time_difference_in_seconds=$(expr $host_seconds_since_epoch - $device_seconds_since_epoch)
+if [ $abs_time_difference_in_seconds -lt 0 ]; then
+  abs_time_difference_in_seconds=$(expr 0 - $abs_time_difference_in_seconds)
+fi
+
+seconds_per_hour=3600
+
+# Kill logd first, so that when we set the adb buffer size later in this file,
+# it is brought up again.
+echo -e "${green}Killing logd, seen leaking on fugu/N${nc}"
+adb shell pkill -9 -U logd logd && echo -e "${green}...logd killed${nc}"
+
+# Update date on device if the difference with host is more than one hour.
+if [ $abs_time_difference_in_seconds -gt $seconds_per_hour ]; then
+  echo -e "${green}Update date on device${nc}"
+  adb shell date -u @$host_seconds_since_epoch
+fi
+
+echo -e "${green}Turn off selinux${nc}"
+adb shell setenforce 0
+$verbose && adb shell getenforce
+
+echo -e "${green}Setting local loopback${nc}"
+adb shell ifconfig lo up
+$verbose && adb shell ifconfig
+
+if $verbose; then
+  echo -e "${green}List properties${nc}"
+  adb shell getprop
+
+  echo -e "${green}Uptime${nc}"
+  adb shell uptime
+
+  echo -e "${green}Battery info${nc}"
+  adb shell dumpsys battery
+fi
+
+# Fugu only handles buffer size up to 16MB.
+product_name=$(adb shell getprop ro.build.product)
+
+if [ "x$product_name" = xfugu ]; then
+  buffer_size=16MB
+else
+  buffer_size=32MB
+fi
+
+echo -e "${green}Setting adb buffer size to ${buffer_size}${nc}"
+adb logcat -G ${buffer_size}
+$verbose && adb logcat -g
+
+echo -e "${green}Removing adb spam filter${nc}"
+adb logcat -P ""
+$verbose && adb logcat -p
+
+echo -e "${green}Kill stalled dalvikvm processes${nc}"
+# 'ps' on M can sometimes hang.
+timeout 2s adb shell "ps" >/dev/null
+if [ $? = 124 ]; then
+  echo -e "${green}Rebooting device to fix 'ps'${nc}"
+  adb reboot
+  adb wait-for-device root
+else
+  processes=$(adb shell "ps" | grep dalvikvm | awk '{print $2}')
+  for i in $processes; do adb shell kill -9 $i; done
+fi
+
+# Chroot environment.
+# ===================
+
+if [[ -n "$ART_TEST_CHROOT" ]]; then
+  # Prepare the chroot dir.
+  echo -e "${green}Prepare the chroot dir in $ART_TEST_CHROOT${nc}"
+
+  # Check that ART_TEST_CHROOT is correctly defined.
+  [[ "x$ART_TEST_CHROOT" = x/* ]] || { echo "$ART_TEST_CHROOT is not an absolute path"; exit 1; }
+
+  # Create chroot.
+  adb shell mkdir -p "$ART_TEST_CHROOT"
+
+  # Provide property_contexts file(s) in chroot.
+  # This is required to have Android system properties work from the chroot.
+  # Notes:
+  # - In Android N, only '/property_contexts' is expected.
+  # - In Android O+, property_context files are expected under /system and /vendor.
+  # (See bionic/libc/bionic/system_properties.cpp or
+  # bionic/libc/system_properties/contexts_split.cpp for more information.)
+  property_context_files="/property_contexts \
+    /system/etc/selinux/plat_property_contexts \
+    /vendor/etc/selinux/nonplat_property_context \
+    /plat_property_contexts \
+    /nonplat_property_contexts"
+  for f in $property_context_files; do
+    adb shell test -f "$f" \
+      "&&" mkdir -p "$ART_TEST_CHROOT$(dirname $f)" \
+      "&&" cp -f "$f" "$ART_TEST_CHROOT$f"
+  done
+
+  # Create directories required for ART testing in chroot.
+  adb shell mkdir -p "$ART_TEST_CHROOT/tmp"
+  adb shell mkdir -p "$ART_TEST_CHROOT/data/dalvik-cache"
+  adb shell mkdir -p "$ART_TEST_CHROOT/data/local/tmp"
+
+  # Populate /etc in chroot with required files.
+  adb shell mkdir -p "$ART_TEST_CHROOT/system/etc"
+  adb shell "cd $ART_TEST_CHROOT && ln -sf system/etc etc"
+
+  # Provide /proc in chroot.
+  adb shell mkdir -p "$ART_TEST_CHROOT/proc"
+  adb shell mount | grep -q "^proc on $ART_TEST_CHROOT/proc type proc " \
+    || adb shell mount -t proc proc "$ART_TEST_CHROOT/proc"
+
+  # Provide /sys in chroot.
+  adb shell mkdir -p "$ART_TEST_CHROOT/sys"
+  adb shell mount | grep -q "^sysfs on $ART_TEST_CHROOT/sys type sysfs " \
+    || adb shell mount -t sysfs sysfs "$ART_TEST_CHROOT/sys"
+  # Provide /sys/kernel/debug in chroot.
+  adb shell mount | grep -q "^debugfs on $ART_TEST_CHROOT/sys/kernel/debug type debugfs " \
+    || adb shell mount -t debugfs debugfs "$ART_TEST_CHROOT/sys/kernel/debug"
+
+  # Provide /dev in chroot.
+  adb shell mkdir -p "$ART_TEST_CHROOT/dev"
+  adb shell mount | grep -q "^tmpfs on $ART_TEST_CHROOT/dev type tmpfs " \
+    || adb shell mount -o bind /dev "$ART_TEST_CHROOT/dev"
+
+  # Create /apex directory in chroot.
+  adb shell mkdir -p "$ART_TEST_CHROOT/apex"
+
+  # Create /linkerconfig directory in chroot.
+  adb shell mkdir -p "$ART_TEST_CHROOT/linkerconfig"
+fi
diff --git a/tools/symbolize-buildbot-crashes.sh b/tools/buildbot-symbolize-crashes.sh
similarity index 100%
rename from tools/symbolize-buildbot-crashes.sh
rename to tools/buildbot-symbolize-crashes.sh
diff --git a/tools/buildbot-sync.sh b/tools/buildbot-sync.sh
index 351eb6c..de83c50 100755
--- a/tools/buildbot-sync.sh
+++ b/tools/buildbot-sync.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#! /bin/bash
 #
 # Copyright (C) 2018 The Android Open Source Project
 #
@@ -14,24 +14,116 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+# Push ART artifacts and its dependencies to a chroot directory for on-device testing.
+
+if [ -t 1 ]; then
+  # Color sequences if terminal is a tty.
+  red='\033[0;31m'
+  green='\033[0;32m'
+  yellow='\033[0;33m'
+  magenta='\033[0;35m'
+  nc='\033[0m'
+fi
+
+# Setup as root, as some actions performed here require it.
+adb root
 adb wait-for-device
 
-if [[ -z "${ANDROID_PRODUCT_OUT}" ]]; then
+if [[ -z "$ANDROID_BUILD_TOP" ]]; then
+  echo 'ANDROID_BUILD_TOP environment variable is empty; did you forget to run `lunch`?'
+  exit 1
+fi
+
+if [[ -z "$ANDROID_PRODUCT_OUT" ]]; then
   echo 'ANDROID_PRODUCT_OUT environment variable is empty; did you forget to run `lunch`?'
   exit 1
 fi
 
-if [[ -z "${ART_TEST_CHROOT}" ]]; then
-  echo 'ART_TEST_CHROOT environment variable is empty'
+if [[ -z "$ART_TEST_CHROOT" ]]; then
+  echo 'ART_TEST_CHROOT environment variable is empty; please set it before running this script.'
   exit 1
 fi
 
+if [[ "$(build/soong/soong_ui.bash --dumpvar-mode TARGET_FLATTEN_APEX)" != "true" ]]; then
+  echo -e "${red}This script only works when  APEX packages are flattened, but the build" \
+    "configuration is set up to use non-flattened APEX packages.${nc}"
+  echo -e "${magenta}You can force APEX flattening by setting the environment variable" \
+    "\`OVERRIDE_TARGET_FLATTEN_APEX\` to \"true\" before starting the build and running this" \
+    "script.${nc}"
+  exit 1
+fi
+
+
+# `/system` "partition" synchronization.
+# --------------------------------------
+
 # Sync the system directory to the chroot.
-adb push ${ANDROID_PRODUCT_OUT}/system ${ART_TEST_CHROOT}/
+echo -e "${green}Syncing system directory...${nc}"
+adb shell mkdir -p "$ART_TEST_CHROOT/system"
+adb push "$ANDROID_PRODUCT_OUT/system" "$ART_TEST_CHROOT/"
 # Overwrite the default public.libraries.txt file with a smaller one that
 # contains only the public libraries pushed to the chroot directory.
-adb push ${ANDROID_BUILD_TOP}/art/tools/public.libraries.buildbot.txt \
-  ${ART_TEST_CHROOT}/system/etc/public.libraries.txt
+adb push "$ANDROID_BUILD_TOP/art/tools/public.libraries.buildbot.txt" \
+  "$ART_TEST_CHROOT/system/etc/public.libraries.txt"
+
+
+# APEX packages activation.
+# -------------------------
+
+# Manually "activate" the flattened APEX $1 by syncing it to /apex/$2 in the
+# chroot. $2 defaults to $1.
+#
+# TODO: Handle the case of build targets using non-flatted APEX packages.
+# As a workaround, one can run `export OVERRIDE_TARGET_FLATTEN_APEX=true` before building
+# a target to have its APEX packages flattened.
+activate_apex() {
+  local src_apex=${1}
+  local dst_apex=${2:-${src_apex}}
+  echo -e "${green}Activating APEX ${src_apex} as ${dst_apex}...${nc}"
+  # We move the files from `/system/apex/${src_apex}` to `/apex/${dst_apex}` in
+  # the chroot directory, instead of simply using a symlink, as Bionic's linker
+  # relies on the real path name of a binary (e.g.
+  # `/apex/com.android.art/bin/dex2oat`) to select the linker configuration.
+  adb shell mkdir -p "$ART_TEST_CHROOT/apex"
+  adb shell rm -rf "$ART_TEST_CHROOT/apex/${dst_apex}"
+  # Use use mv instead of cp, as cp has a bug on fugu NRD90R where symbolic
+  # links get copied with odd names, eg: libcrypto.so -> /system/lib/libcrypto.soe.sort.so
+  adb shell mv "$ART_TEST_CHROOT/system/apex/${src_apex}" "$ART_TEST_CHROOT/apex/${dst_apex}" \
+    || exit 1
+}
+
+# "Activate" the required APEX modules.
+activate_apex com.android.art.testing com.android.art
+activate_apex com.android.i18n
+activate_apex com.android.runtime
+activate_apex com.android.tzdata
+activate_apex com.android.conscrypt
+
+
+# Linker configuration.
+# ---------------------
+
+# Statically linked `linkerconfig` binary.
+linkerconfig_binary="/system/bin/linkerconfig"
+# Generated linker configuration file path (since Android R).
+ld_generated_config_file_path="/linkerconfig/ld.config.txt"
+# Location of the generated linker configuration file.
+ld_generated_config_file_location=$(dirname "$ld_generated_config_file_path")
+
+# Generate linker configuration files on device.
+echo -e "${green}Generating linker configuration files on device in" \
+  "\`$ld_generated_config_file_path\`${nc}..."
+adb shell chroot "$ART_TEST_CHROOT" \
+  "$linkerconfig_binary" --target "$ld_generated_config_file_location" || exit 1
+ld_generated_config_files=$(adb shell find $ART_TEST_CHROOT/linkerconfig ! -type d | sed 's/^/  /')
+echo -e "${green}Generated linker configuration files on device:${nc}"
+echo -e "${green}$ld_generated_config_files${nc}"
+
+
+# `/data` "partition" synchronization.
+# ------------------------------------
 
 # Sync the data directory to the chroot.
-adb push ${ANDROID_PRODUCT_OUT}/data ${ART_TEST_CHROOT}/
+echo -e "${green}Syncing data directory...${nc}"
+adb shell mkdir -p "$ART_TEST_CHROOT/data"
+adb push "$ANDROID_PRODUCT_OUT/data" "$ART_TEST_CHROOT/"
diff --git a/tools/buildbot-teardown-device.sh b/tools/buildbot-teardown-device.sh
new file mode 100755
index 0000000..e067a70
--- /dev/null
+++ b/tools/buildbot-teardown-device.sh
@@ -0,0 +1,147 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script undoes (most of) the work done by tools/buildbot-setup-device.sh.
+# Make sure to keep these files in sync.
+
+if [ -t 1 ]; then
+  # Color sequences if terminal is a tty.
+  green='\033[0;32m'
+  nc='\033[0m'
+fi
+
+# Setup as root, as some actions performed here require it.
+adb root
+adb wait-for-device
+
+if [[ -n "$ART_TEST_CHROOT" ]]; then
+  # Check that ART_TEST_CHROOT is correctly defined.
+  [[ "x$ART_TEST_CHROOT" = x/* ]] || { echo "$ART_TEST_CHROOT is not an absolute path"; exit 1; }
+
+  if adb shell test -d "$ART_TEST_CHROOT"; then
+    # Display users of the chroot dir.
+
+    echo -e "${green}List open files under chroot dir $ART_TEST_CHROOT${nc}"
+    adb shell lsof | grep "$ART_TEST_CHROOT"
+
+    # for_all_chroot_process ACTION
+    # -----------------------------
+    # Execute ACTION on all processes running from binaries located
+    # under the chroot directory. ACTION is passed two arguments: the
+    # PID of the process, and a string containing the command line
+    # that started this process.
+    for_all_chroot_process() {
+      local action=$1
+      adb shell ls -ld "/proc/*/root" \
+        | sed -n -e "s,^.* \\(/proc/.*/root\\) -> $ART_TEST_CHROOT\$,\\1,p" \
+        | while read link; do
+            local dir=$(dirname "$link")
+            local pid=$(basename "$dir")
+            local cmdline=$(adb shell cat "$dir"/cmdline | tr '\000' ' ')
+            $action "$pid" "$cmdline"
+          done
+    }
+
+    # display_process PID CMDLINE
+    # ---------------------------
+    # Display information about process with given PID, that was started with CMDLINE.
+    display_process() {
+      local pid=$1
+      local cmdline=$2
+      echo "$cmdline (PID: $pid)"
+    }
+
+    echo -e "${green}List processes running from binaries under chroot dir $ART_TEST_CHROOT${nc}"
+    for_all_chroot_process display_process
+
+    # Tear down the chroot dir.
+
+    echo -e "${green}Tear down the chroot set up in $ART_TEST_CHROOT${nc}"
+
+    # remove_filesystem_from_chroot DIR-IN-CHROOT FSTYPE REMOVE-DIR-IN-CHROOT
+    # -----------------------------------------------------------------------
+    # Unmount filesystem with type FSTYPE mounted in directory DIR-IN-CHROOT
+    # under the chroot directory.
+    # Remove DIR-IN-CHROOT under the chroot if REMOVE-DIR-IN-CHROOT is
+    # true.
+    remove_filesystem_from_chroot() {
+      local dir_in_chroot=$1
+      local fstype=$2
+      local remove_dir=$3
+      local dir="$ART_TEST_CHROOT/$dir_in_chroot"
+      adb shell test -d "$dir" \
+        && adb shell mount | grep -q "^$fstype on $dir type $fstype " \
+        && if adb shell umount "$dir"; then
+             $remove_dir && adb shell rmdir "$dir"
+           else
+             echo "Files still open in $dir:"
+             adb shell lsof | grep "$dir"
+           fi
+    }
+
+    # Remove /apex from chroot.
+    adb shell rm -rf "$ART_TEST_CHROOT/apex"
+
+    # Remove /dev from chroot.
+    remove_filesystem_from_chroot dev tmpfs true
+
+    # Remove /sys/kernel/debug from chroot.
+    # The /sys/kernel/debug directory under the chroot dir cannot be
+    # deleted, as it is part of the host device's /sys filesystem.
+    remove_filesystem_from_chroot sys/kernel/debug debugfs false
+    # Remove /sys from chroot.
+    remove_filesystem_from_chroot sys sysfs true
+
+    # Remove /proc from chroot.
+    remove_filesystem_from_chroot proc proc true
+
+    # Remove /etc from chroot.
+    adb shell rm -f "$ART_TEST_CHROOT/etc"
+    adb shell rm -rf "$ART_TEST_CHROOT/system/etc"
+
+    # Remove directories used for ART testing in chroot.
+    adb shell rm -rf "$ART_TEST_CHROOT/data/local/tmp"
+    adb shell rm -rf "$ART_TEST_CHROOT/data/dalvik-cache"
+    adb shell rm -rf "$ART_TEST_CHROOT/tmp"
+
+    # Remove property_contexts file(s) from chroot.
+    property_context_files="/property_contexts \
+      /system/etc/selinux/plat_property_contexts \
+      /vendor/etc/selinux/nonplat_property_context \
+      /plat_property_contexts \
+      /nonplat_property_contexts"
+    for f in $property_context_files; do
+      adb shell rm -f "$ART_TEST_CHROOT$f"
+    done
+
+
+    # Kill processes still running in the chroot.
+
+    # kill_process PID CMDLINE
+    # ------------------------
+    # Kill process with given PID, that was started with CMDLINE.
+    kill_process() {
+      local pid=$1
+      local cmdline=$2
+      echo "Killing $cmdline (PID: $pid)"
+      adb shell kill -9 "$pid"
+    }
+
+    echo -e "${green}Kill processes still running from binaries under" \
+      "chroot dir $ART_TEST_CHROOT (if any)${nc} "
+    for_all_chroot_process kill_process
+  fi
+fi
diff --git a/tools/checker/README b/tools/checker/README
index b8dd803..8a6b128 100644
--- a/tools/checker/README
+++ b/tools/checker/README
@@ -82,4 +82,4 @@
 thereby avoiding to repeat the check lines if some, but not all architectures
 match. An example line looks like:
 
-  /// CHECK-START-{MIPS,ARM,ARM64}: int MyClass.MyMethod() constant_folding (after)
+  /// CHECK-START-{X86_64,ARM,ARM64}: int MyClass.MyMethod() constant_folding (after)
diff --git a/tools/checker/common/archs.py b/tools/checker/common/archs.py
index 178e0b5..9628c88 100644
--- a/tools/checker/common/archs.py
+++ b/tools/checker/common/archs.py
@@ -12,4 +12,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-archs_list = ['ARM', 'ARM64', 'MIPS', 'MIPS64', 'X86', 'X86_64']
+archs_list = ['ARM', 'ARM64', 'X86', 'X86_64']
diff --git a/tools/checker/match/file.py b/tools/checker/match/file.py
index 520c4ae..3b2e67e 100644
--- a/tools/checker/match/file.py
+++ b/tools/checker/match/file.py
@@ -172,6 +172,8 @@
     # match a check group against the first output group of the same name.
     c1Pass = c1File.findPass(testCase.name)
     if c1Pass is None:
+      with file(c1File.fileName) as cfgFile:
+        Logger.log(''.join(cfgFile), Logger.Level.Error)
       Logger.fail("Test case not found in the CFG file",
                   testCase.fileName, testCase.startLineNo, testCase.name)
 
@@ -186,4 +188,6 @@
       else:
         msg = "Assertion could not be matched starting from line {}"
       msg = msg.format(lineNo)
+      with file(c1File.fileName) as cfgFile:
+        Logger.log(''.join(cfgFile), Logger.Level.Error)
       Logger.testFailed(msg, e.assertion, e.variables)
diff --git a/tools/class2greylist/Android.bp b/tools/class2greylist/Android.bp
index 1e3cdff..f54aee7 100644
--- a/tools/class2greylist/Android.bp
+++ b/tools/class2greylist/Android.bp
@@ -15,20 +15,21 @@
 //
 
 java_library_host {
-  name: "class2greylistlib",
-  srcs: ["src/**/*.java"],
-  static_libs: [
-    "commons-cli-1.2",
-    "apache-bcel",
-    "guava",
-  ],
+    name: "class2greylistlib",
+    srcs: ["src/**/*.java"],
+    static_libs: [
+        "commons-cli-1.2",
+        "apache-bcel",
+        "guava",
+        "testng",
+        "hamcrest-library",
+    ],
 }
 
 java_binary_host {
-  name: "class2greylist",
-  manifest: "src/class2greylist.mf",
-  static_libs: [
-    "class2greylistlib",
-  ],
+    name: "class2greylist",
+    manifest: "src/class2greylist.mf",
+    static_libs: [
+        "class2greylistlib",
+    ],
 }
-
diff --git a/tools/class2greylist/src/com/android/class2greylist/AlternativeNotFoundError.java b/tools/class2greylist/src/com/android/class2greylist/AlternativeNotFoundError.java
new file mode 100644
index 0000000..10b2d9a
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/AlternativeNotFoundError.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License
+ */
+
+package com.android.class2greylist;
+
+public class AlternativeNotFoundError extends Exception {
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/AnnotationContext.java b/tools/class2greylist/src/com/android/class2greylist/AnnotationContext.java
index 73b74a9..f0f7446 100644
--- a/tools/class2greylist/src/com/android/class2greylist/AnnotationContext.java
+++ b/tools/class2greylist/src/com/android/class2greylist/AnnotationContext.java
@@ -20,7 +20,7 @@
 
 /**
  */
-public abstract class AnnotationContext {
+public abstract class AnnotationContext implements ErrorReporter {
 
   public final Status status;
   public final JavaClass definingClass;
@@ -42,10 +42,4 @@
    * the greylist.
    */
   public abstract String getMemberDescriptor();
-
-  /**
-   * Report an error in this context. The final error message will include
-   * the class and member names, and the source file name.
-   */
-  public abstract void reportError(String message, Object... args);
 }
diff --git a/tools/class2greylist/src/com/android/class2greylist/AnnotationPropertyWriter.java b/tools/class2greylist/src/com/android/class2greylist/AnnotationPropertyWriter.java
index aacd963..6656d3f 100644
--- a/tools/class2greylist/src/com/android/class2greylist/AnnotationPropertyWriter.java
+++ b/tools/class2greylist/src/com/android/class2greylist/AnnotationPropertyWriter.java
@@ -3,6 +3,7 @@
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.FileOutputStream;
+import java.io.OutputStream;
 import java.io.PrintStream;
 import java.util.ArrayList;
 import java.util.Comparator;
@@ -25,6 +26,12 @@
         mColumns = new HashSet<>();
     }
 
+    public AnnotationPropertyWriter(OutputStream output) {
+        mOutput = new PrintStream(output);
+        mContents = new ArrayList<>();
+        mColumns = new HashSet<>();
+    }
+
     public void consume(String apiSignature, Map<String, String> annotationProperties,
             Set<String> parsedFlags) {
         // Clone properties map.
@@ -38,6 +45,13 @@
         mContents.add(contents);
     }
 
+    private static String escapeCsvColumn(String column) {
+        // Using '|' as a quote character, as in frameworks/base/tools/hiddenapi/merge_csv.py
+        // Escape '|' characters in the column, then wrap the column in '|' characters.
+        column = column.replace("|", "||");
+        return "|" + column + "|";
+    }
+
     public void close() {
         // Sort columns by name and print header row.
         List<String> columns = new ArrayList<>(mColumns);
@@ -47,6 +61,7 @@
         // Sort contents according to columns and print.
         for (Map<String, String> row : mContents) {
             mOutput.println(columns.stream().map(column -> row.getOrDefault(column, ""))
+                    .map(column -> escapeCsvColumn(column))
                     .collect(Collectors.joining(",")));
         }
 
diff --git a/tools/class2greylist/src/com/android/class2greylist/ApiComponents.java b/tools/class2greylist/src/com/android/class2greylist/ApiComponents.java
new file mode 100644
index 0000000..3da4fe8
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/ApiComponents.java
@@ -0,0 +1,327 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License
+ */
+
+package com.android.class2greylist;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+
+/**
+ * Class which can parse either dex style signatures (e.g. Lfoo/bar/baz$bat;->foo()V) or javadoc
+ * links to class members (e.g. {@link #toString()} or {@link java.util.List#clear()}).
+ */
+public class ApiComponents {
+    private static final String PRIMITIVE_TYPES = "ZBCSIJFD";
+    private final PackageAndClassName mPackageAndClassName;
+    // The reference can be just to a class, in which case mMemberName should be empty.
+    private final String mMemberName;
+    // If the member being referenced is a field, this will always be empty.
+    private final String mMethodParameterTypes;
+
+    private ApiComponents(PackageAndClassName packageAndClassName, String memberName,
+            String methodParameterTypes) {
+        mPackageAndClassName = packageAndClassName;
+        mMemberName = memberName;
+        mMethodParameterTypes = methodParameterTypes;
+    }
+
+    @Override
+    public String toString() {
+        StringBuilder sb = new StringBuilder()
+                .append(mPackageAndClassName.packageName)
+                .append(".")
+                .append(mPackageAndClassName.className);
+        if (!mMemberName.isEmpty()) {
+            sb.append("#").append(mMemberName).append("(").append(mMethodParameterTypes).append(
+                    ")");
+        }
+        return sb.toString();
+    }
+
+    public PackageAndClassName getPackageAndClassName() {
+        return mPackageAndClassName;
+    }
+
+    public String getMemberName() {
+        return mMemberName;
+    }
+
+    public String getMethodParameterTypes() {
+        return mMethodParameterTypes;
+    }
+
+    /**
+     * Parse a JNI class descriptor. e.g. Lfoo/bar/Baz;
+     *
+     * @param sc Cursor over string assumed to contain a JNI class descriptor.
+     * @return The fully qualified class, in 'dot notation' (e.g. foo.bar.Baz for a class named Baz
+     * in the foo.bar package). The cursor will be placed after the semicolon.
+     */
+    private static String parseJNIClassDescriptor(StringCursor sc)
+            throws SignatureSyntaxError, StringCursorOutOfBoundsException {
+        if (sc.peek() != 'L') {
+            throw new SignatureSyntaxError(
+                    "Expected JNI class descriptor to start with L, but instead got " + sc.peek(),
+                    sc);
+        }
+        // Consume the L.
+        sc.next();
+        int semiColonPos = sc.find(';');
+        if (semiColonPos == -1) {
+            throw new SignatureSyntaxError("Expected semicolon at the end of JNI class descriptor",
+                    sc);
+        }
+        String jniClassDescriptor = sc.next(semiColonPos);
+        // Consume the semicolon.
+        sc.next();
+        return jniClassDescriptor.replace("/", ".");
+    }
+
+    /**
+     * Parse a primitive JNI type
+     *
+     * @param sc Cursor over a string assumed to contain a primitive JNI type.
+     * @return String containing parsed primitive JNI type.
+     */
+    private static String parseJNIPrimitiveType(StringCursor sc)
+            throws SignatureSyntaxError, StringCursorOutOfBoundsException {
+        char c = sc.next();
+        switch (c) {
+            case 'Z':
+                return "boolean";
+            case 'B':
+                return "byte";
+            case 'C':
+                return "char";
+            case 'S':
+                return "short";
+            case 'I':
+                return "int";
+            case 'J':
+                return "long";
+            case 'F':
+                return "float";
+            case 'D':
+                return "double";
+            default:
+                throw new SignatureSyntaxError(c + " is not a primitive type!", sc);
+        }
+    }
+
+    /**
+     * Parse a JNI type; can be either a primitive or object type. Arrays are handled separately.
+     *
+     * @param sc Cursor over the string assumed to contain a JNI type.
+     * @return String containing parsed JNI type.
+     */
+    private static String parseJniTypeWithoutArrayDimensions(StringCursor sc)
+            throws SignatureSyntaxError, StringCursorOutOfBoundsException {
+        char c = sc.peek();
+        if (PRIMITIVE_TYPES.indexOf(c) != -1) {
+            return parseJNIPrimitiveType(sc);
+        } else if (c == 'L') {
+            return parseJNIClassDescriptor(sc);
+        }
+        throw new SignatureSyntaxError("Illegal token " + c + " within signature", sc);
+    }
+
+    /**
+     * Parse a JNI type.
+     *
+     * This parameter can be an array, in which case it will be preceded by a number of open square
+     * brackets (corresponding to its dimensionality)
+     *
+     * @param sc Cursor over the string assumed to contain a JNI type.
+     * @return Same as {@link #parseJniTypeWithoutArrayDimensions}, but also handle arrays.
+     */
+    private static String parseJniType(StringCursor sc)
+            throws SignatureSyntaxError, StringCursorOutOfBoundsException {
+        int arrayDimension = 0;
+        while (sc.peek() == '[') {
+            ++arrayDimension;
+            sc.next();
+        }
+        StringBuilder sb = new StringBuilder();
+        sb.append(parseJniTypeWithoutArrayDimensions(sc));
+        for (int i = 0; i < arrayDimension; ++i) {
+            sb.append("[]");
+        }
+        return sb.toString();
+    }
+
+    /**
+     * Converts the parameters of method from JNI notation to Javadoc link notation. e.g.
+     * "(IILfoo/bar/Baz;)V" turns into "int, int, foo.bar.Baz". The parentheses and return type are
+     * discarded.
+     *
+     * @param sc Cursor over the string assumed to contain a JNI method parameters.
+     * @return Comma separated list of parameter types.
+     */
+    private static String convertJNIMethodParametersToJavadoc(StringCursor sc)
+            throws SignatureSyntaxError, StringCursorOutOfBoundsException {
+        List<String> methodParameterTypes = new ArrayList<>();
+        if (sc.next() != '(') {
+            throw new IllegalArgumentException("Trying to parse method params of an invalid dex " +
+                    "signature: " + sc.getOriginalString());
+        }
+        while (sc.peek() != ')') {
+            methodParameterTypes.add(parseJniType(sc));
+        }
+        return String.join(", ", methodParameterTypes);
+    }
+
+    /**
+     * Generate ApiComponents from a dex signature.
+     *
+     * This is used to extract the necessary context for an alternative API to try to infer missing
+     * information.
+     *
+     * @param signature Dex signature.
+     * @return ApiComponents instance with populated package, class name, and parameter types if
+     * applicable.
+     */
+    public static ApiComponents fromDexSignature(String signature) throws SignatureSyntaxError {
+        StringCursor sc = new StringCursor(signature);
+        try {
+            String fullyQualifiedClass = parseJNIClassDescriptor(sc);
+
+            PackageAndClassName packageAndClassName =
+                    PackageAndClassName.splitClassName(fullyQualifiedClass);
+            if (!sc.peek(2).equals("->")) {
+                throw new SignatureSyntaxError("Expected '->'", sc);
+            }
+            // Consume "->"
+            sc.next(2);
+            String memberName = "";
+            String methodParameterTypes = "";
+            int leftParenPos = sc.find('(');
+            if (leftParenPos != -1) {
+                memberName = sc.next(leftParenPos);
+                methodParameterTypes = convertJNIMethodParametersToJavadoc(sc);
+            } else {
+                int colonPos = sc.find(':');
+                if (colonPos == -1) {
+                    throw new IllegalArgumentException("Expected : or -> beyond position "
+                            + sc.position() + " in " + signature);
+                } else {
+                    memberName = sc.next(colonPos);
+                    // Consume the ':'.
+                    sc.next();
+                    // Consume the type.
+                    parseJniType(sc);
+                }
+            }
+            return new ApiComponents(packageAndClassName, memberName, methodParameterTypes);
+        } catch (StringCursorOutOfBoundsException e) {
+            throw new SignatureSyntaxError(
+                    "Unexpectedly reached end of string while trying to parse signature ", sc);
+        }
+    }
+
+    /**
+     * Generate ApiComponents from a link tag.
+     *
+     * @param linkTag          The contents of a link tag.
+     * @param contextSignature The signature of the private API that this is an alternative for.
+     *                         Used to infer unspecified components.
+     */
+    public static ApiComponents fromLinkTag(String linkTag, String contextSignature)
+            throws JavadocLinkSyntaxError {
+        ApiComponents contextAlternative;
+        try {
+            contextAlternative = fromDexSignature(contextSignature);
+        } catch (SignatureSyntaxError e) {
+            throw new RuntimeException(
+                    "Failed to parse the context signature for public alternative!");
+        }
+        StringCursor sc = new StringCursor(linkTag);
+        try {
+
+            String memberName = "";
+            String methodParameterTypes = "";
+
+            int tagPos = sc.find('#');
+            String fullyQualifiedClassName = sc.next(tagPos);
+
+            PackageAndClassName packageAndClassName =
+                    PackageAndClassName.splitClassName(fullyQualifiedClassName);
+
+            if (packageAndClassName.packageName.isEmpty()) {
+                packageAndClassName.packageName = contextAlternative.getPackageAndClassName()
+                        .packageName;
+            }
+
+            if (packageAndClassName.className.isEmpty()) {
+                packageAndClassName.className = contextAlternative.getPackageAndClassName()
+                        .className;
+            }
+
+            if (tagPos == -1) {
+                // This suggested alternative is just a class. We can allow that.
+                return new ApiComponents(packageAndClassName, "", "");
+            } else {
+                // Consume the #.
+                sc.next();
+            }
+
+            int leftParenPos = sc.find('(');
+            memberName = sc.next(leftParenPos);
+            if (leftParenPos != -1) {
+                // Consume the '('.
+                sc.next();
+                int rightParenPos = sc.find(')');
+                if (rightParenPos == -1) {
+                    throw new JavadocLinkSyntaxError(
+                            "Linked method is missing a closing parenthesis", sc);
+                } else {
+                    methodParameterTypes = sc.next(rightParenPos);
+                }
+            }
+
+            return new ApiComponents(packageAndClassName, memberName, methodParameterTypes);
+        } catch (StringCursorOutOfBoundsException e) {
+            throw new JavadocLinkSyntaxError(
+                    "Unexpectedly reached end of string while trying to parse javadoc link", sc);
+        }
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (!(obj instanceof ApiComponents)) {
+            return false;
+        }
+        ApiComponents other = (ApiComponents) obj;
+        return mPackageAndClassName.equals(other.mPackageAndClassName) && mMemberName.equals(
+                other.mMemberName) && mMethodParameterTypes.equals(other.mMethodParameterTypes);
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(mPackageAndClassName, mMemberName, mMethodParameterTypes);
+    }
+
+    /**
+     * Less restrictive comparator to use in case a link tag is missing a method's parameters.
+     * e.g. foo.bar.Baz#foo will be considered the same as foo.bar.Baz#foo(int, int) and
+     * foo.bar.Baz#foo(long, long). If the class only has one method with that name, then specifying
+     * its parameter types is optional within the link tag.
+     */
+    public boolean equalsIgnoringParam(ApiComponents other) {
+        return mPackageAndClassName.equals(other.mPackageAndClassName) &&
+                mMemberName.equals(other.mMemberName);
+    }
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/ApiResolver.java b/tools/class2greylist/src/com/android/class2greylist/ApiResolver.java
new file mode 100644
index 0000000..b120978
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/ApiResolver.java
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License
+ */
+
+package com.android.class2greylist;
+
+import com.google.common.base.Strings;
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+public class ApiResolver {
+    private final List<ApiComponents> mPotentialPublicAlternatives;
+    private final Set<PackageAndClassName> mPublicApiClasses;
+
+    private static final Pattern LINK_TAG_PATTERN = Pattern.compile("\\{@link ([^\\}]+)\\}");
+    private static final Pattern CODE_TAG_PATTERN = Pattern.compile("\\{@code ([^\\}]+)\\}");
+    private static final Integer MIN_SDK_REQUIRING_PUBLIC_ALTERNATIVES = 29;
+
+    public ApiResolver() {
+        mPotentialPublicAlternatives = null;
+        mPublicApiClasses = null;
+    }
+
+    public ApiResolver(Set<String> publicApis) {
+        mPotentialPublicAlternatives = publicApis.stream()
+                .map(api -> {
+                    try {
+                        return ApiComponents.fromDexSignature(api);
+                    } catch (SignatureSyntaxError e) {
+                        throw new RuntimeException("Could not parse public API signature:", e);
+                    }
+                })
+                .collect(Collectors.toList());
+        mPublicApiClasses = mPotentialPublicAlternatives.stream()
+                .map(api -> api.getPackageAndClassName())
+                .collect(Collectors.toCollection(HashSet::new));
+    }
+
+    /**
+     * Verify that all public alternatives are valid.
+     *
+     * @param publicAlternativesString String containing public alternative explanations.
+     * @param signature                Signature of the member that has the annotation.
+     */
+    public void resolvePublicAlternatives(String publicAlternativesString, String signature,
+                                          Integer maxSdkVersion)
+            throws JavadocLinkSyntaxError, AlternativeNotFoundError,
+                    RequiredAlternativeNotSpecifiedError {
+        if (Strings.isNullOrEmpty(publicAlternativesString) && maxSdkVersion != null
+                && maxSdkVersion >= MIN_SDK_REQUIRING_PUBLIC_ALTERNATIVES) {
+            throw new RequiredAlternativeNotSpecifiedError();
+        }
+        if (publicAlternativesString != null && mPotentialPublicAlternatives != null) {
+            // Grab all instances of type {@link foo}
+            Matcher matcher = LINK_TAG_PATTERN.matcher(publicAlternativesString);
+            boolean hasLinkAlternative = false;
+            // Validate all link tags
+            while (matcher.find()) {
+                hasLinkAlternative = true;
+                String alternativeString = matcher.group(1);
+                ApiComponents alternative = ApiComponents.fromLinkTag(alternativeString,
+                        signature);
+                if (alternative.getMemberName().isEmpty()) {
+                    // Provided class as alternative
+                    if (!mPublicApiClasses.contains(alternative.getPackageAndClassName())) {
+                        throw new ClassAlternativeNotFoundError(alternative);
+                    }
+                } else if (!mPotentialPublicAlternatives.contains(alternative)) {
+                    // If the link is not a public alternative, it must because the link does not
+                    // contain the method parameter types, e.g. {@link foo.bar.Baz#foo} instead of
+                    // {@link foo.bar.Baz#foo(int)}. If the method name is unique within the class,
+                    // we can handle it.
+                    if (!Strings.isNullOrEmpty(alternative.getMethodParameterTypes())) {
+                        throw new MemberAlternativeNotFoundError(alternative);
+                    }
+                    List<ApiComponents> almostMatches = mPotentialPublicAlternatives.stream()
+                            .filter(api -> api.equalsIgnoringParam(alternative))
+                            .collect(Collectors.toList());
+                    if (almostMatches.size() == 0) {
+                        throw new MemberAlternativeNotFoundError(alternative);
+                    } else if (almostMatches.size() > 1) {
+                        throw new MultipleAlternativesFoundError(alternative, almostMatches);
+                    }
+                }
+            }
+            // No {@link ...} alternatives exist; try looking for {@code ...}
+            if (!hasLinkAlternative) {
+                if (!CODE_TAG_PATTERN.matcher(publicAlternativesString).find()) {
+                    throw new NoAlternativesSpecifiedError();
+                }
+            }
+        }
+    }
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java b/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
index 01b36a4..afdd692 100644
--- a/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
+++ b/tools/class2greylist/src/com/android/class2greylist/Class2Greylist.java
@@ -16,12 +16,9 @@
 
 package com.android.class2greylist;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Splitter;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableMap.Builder;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.collect.Sets;
 import com.google.common.io.Files;
 
 import org.apache.commons.cli.CommandLine;
@@ -34,8 +31,7 @@
 
 import java.io.File;
 import java.io.IOException;
-import java.nio.charset.Charset;
-import java.util.Arrays;
+import java.nio.charset.StandardCharsets;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
@@ -48,32 +44,29 @@
  */
 public class Class2Greylist {
 
-    private static final Set<String> GREYLIST_ANNOTATIONS =
-            ImmutableSet.of(
-                    "android.annotation.UnsupportedAppUsage",
-                    "dalvik.annotation.compat.UnsupportedAppUsage");
-    private static final Set<String> WHITELIST_ANNOTATIONS = ImmutableSet.of();
+    private static final String UNSUPPORTED_APP_USAGE_ANNOTATION =
+            "android.compat.annotation.UnsupportedAppUsage";
 
-    public static final String FLAG_WHITELIST = "whitelist";
-    public static final String FLAG_GREYLIST = "greylist";
-    public static final String FLAG_BLACKLIST = "blacklist";
-    public static final String FLAG_GREYLIST_MAX_O = "greylist-max-o";
-    public static final String FLAG_GREYLIST_MAX_P = "greylist-max-p";
+    private static final String FLAG_GREYLIST = "greylist";
+    private static final String FLAG_BLACKLIST = "blacklist";
+    private static final String FLAG_GREYLIST_MAX_O = "greylist-max-o";
+    private static final String FLAG_GREYLIST_MAX_P = "greylist-max-p";
+    private static final String FLAG_GREYLIST_MAX_Q = "greylist-max-q";
 
-    public static final String FLAG_PUBLIC_API = "public-api";
+    private static final String FLAG_PUBLIC_API = "public-api";
 
     private static final Map<Integer, String> TARGET_SDK_TO_LIST_MAP;
     static {
         Map<Integer, String> map = new HashMap<>();
         map.put(null, FLAG_GREYLIST);
+        map.put(0, FLAG_BLACKLIST);
         map.put(26, FLAG_GREYLIST_MAX_O);
         map.put(28, FLAG_GREYLIST_MAX_P);
+        map.put(29, FLAG_GREYLIST_MAX_Q);
         TARGET_SDK_TO_LIST_MAP = Collections.unmodifiableMap(map);
     }
 
     private final Status mStatus;
-    private final String mCsvFlagsFile;
-    private final String mCsvMetadataFile;
     private final String[] mJarFiles;
     private final AnnotationConsumer mOutput;
     private final Set<String> mPublicApis;
@@ -162,23 +155,20 @@
 
     }
 
-    @VisibleForTesting
-    Class2Greylist(Status status, String stubApiFlagsFile, String csvFlagsFile,
+    private Class2Greylist(Status status, String stubApiFlagsFile, String csvFlagsFile,
             String csvMetadataFile, String[] jarFiles)
             throws IOException {
         mStatus = status;
-        mCsvFlagsFile = csvFlagsFile;
-        mCsvMetadataFile = csvMetadataFile;
         mJarFiles = jarFiles;
-        if (mCsvMetadataFile != null) {
-            mOutput = new AnnotationPropertyWriter(mCsvMetadataFile);
+        if (csvMetadataFile != null) {
+            mOutput = new AnnotationPropertyWriter(csvMetadataFile);
         } else {
-            mOutput = new HiddenapiFlagsWriter(mCsvFlagsFile);
+            mOutput = new HiddenapiFlagsWriter(csvFlagsFile);
         }
 
         if (stubApiFlagsFile != null) {
             mPublicApis =
-                    Files.readLines(new File(stubApiFlagsFile), Charset.forName("UTF-8")).stream()
+                    Files.readLines(new File(stubApiFlagsFile), StandardCharsets.UTF_8).stream()
                         .map(s -> Splitter.on(",").splitToList(s))
                         .filter(s -> s.contains(FLAG_PUBLIC_API))
                         .map(s -> s.get(0))
@@ -193,12 +183,12 @@
         UnsupportedAppUsageAnnotationHandler greylistAnnotationHandler =
                 new UnsupportedAppUsageAnnotationHandler(
                     mStatus, mOutput, mPublicApis, TARGET_SDK_TO_LIST_MAP);
-        GREYLIST_ANNOTATIONS
-            .forEach(a -> addRepeatedAnnotationHandlers(
+
+        addRepeatedAnnotationHandlers(
                 builder,
-                classNameToSignature(a),
-                classNameToSignature(a + "$Container"),
-                greylistAnnotationHandler));
+                classNameToSignature(UNSUPPORTED_APP_USAGE_ANNOTATION),
+                classNameToSignature(UNSUPPORTED_APP_USAGE_ANNOTATION + "$Container"),
+                greylistAnnotationHandler);
 
         CovariantReturnTypeHandler covariantReturnTypeHandler = new CovariantReturnTypeHandler(
             mOutput, mPublicApis, FLAG_PUBLIC_API);
@@ -230,7 +220,7 @@
             .put(containerAnnotationName, new RepeatedAnnotationHandler(annotationName, handler));
     }
 
-    private void main() throws IOException {
+    private void main() {
         Map<String, AnnotationHandler> handlers = createAnnotationHandlers();
         for (String jarFile : mJarFiles) {
             mStatus.debug("Processing jar file %s", jarFile);
diff --git a/tools/class2greylist/src/com/android/class2greylist/ClassAlternativeNotFoundError.java b/tools/class2greylist/src/com/android/class2greylist/ClassAlternativeNotFoundError.java
new file mode 100644
index 0000000..1f398f1
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/ClassAlternativeNotFoundError.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License
+ */
+
+package com.android.class2greylist;
+
+public class ClassAlternativeNotFoundError extends AlternativeNotFoundError {
+    public final ApiComponents alternative;
+
+    ClassAlternativeNotFoundError(ApiComponents alternative) {
+        this.alternative = alternative;
+    }
+
+    @Override
+    public String toString() {
+        return "Specified class " + alternative.getPackageAndClassName() + " does not exist!";
+    }
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/ErrorReporter.java b/tools/class2greylist/src/com/android/class2greylist/ErrorReporter.java
new file mode 100644
index 0000000..24a92f0
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/ErrorReporter.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License
+ */
+
+package com.android.class2greylist;
+
+public interface ErrorReporter {
+    /**
+     * Report an error in this context. The final error message will include
+     * the class and member names, and the source file name.
+     */
+    void reportError(String message, Object... args);
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/JavadocLinkSyntaxError.java b/tools/class2greylist/src/com/android/class2greylist/JavadocLinkSyntaxError.java
new file mode 100644
index 0000000..55014cb
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/JavadocLinkSyntaxError.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License
+ */
+
+package com.android.class2greylist;
+
+public class JavadocLinkSyntaxError extends Exception {
+    public final String expected;
+    public final int position;
+    public final String context;
+
+    public JavadocLinkSyntaxError(String expected, StringCursor sc) {
+        super(expected + " at position " + sc.position() + " in " + sc.getOriginalString());
+        this.expected = expected;
+        this.position = sc.position();
+        this.context = sc.getOriginalString();
+    }
+}
+
diff --git a/tools/class2greylist/src/com/android/class2greylist/MemberAlternativeNotFoundError.java b/tools/class2greylist/src/com/android/class2greylist/MemberAlternativeNotFoundError.java
new file mode 100644
index 0000000..43f853e
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/MemberAlternativeNotFoundError.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License
+ */
+
+package com.android.class2greylist;
+
+public class MemberAlternativeNotFoundError extends AlternativeNotFoundError {
+    public final ApiComponents alternative;
+
+    MemberAlternativeNotFoundError(ApiComponents alternative) {
+        this.alternative = alternative;
+    }
+
+    @Override
+    public String toString() {
+        return "Could not find public api " + alternative + ".";
+    }
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/MultipleAlternativesFoundError.java b/tools/class2greylist/src/com/android/class2greylist/MultipleAlternativesFoundError.java
new file mode 100644
index 0000000..a598534
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/MultipleAlternativesFoundError.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License
+ */
+
+package com.android.class2greylist;
+
+import com.google.common.base.Joiner;
+
+import java.util.List;
+
+public class MultipleAlternativesFoundError extends AlternativeNotFoundError {
+    public final ApiComponents alternative;
+    public final List<ApiComponents> almostMatches;
+
+    public MultipleAlternativesFoundError(ApiComponents alternative,
+            List<ApiComponents> almostMatches) {
+        this.alternative = alternative;
+        this.almostMatches = almostMatches;
+    }
+
+    @Override
+    public String toString() {
+        return "Alternative " + alternative + " returned multiple matches: "
+                + Joiner.on(", ").join(almostMatches);
+    }
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/NoAlternativesSpecifiedError.java b/tools/class2greylist/src/com/android/class2greylist/NoAlternativesSpecifiedError.java
new file mode 100644
index 0000000..28c5003
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/NoAlternativesSpecifiedError.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License
+ */
+
+package com.android.class2greylist;
+
+public class NoAlternativesSpecifiedError extends AlternativeNotFoundError {
+
+    @Override
+    public String toString() {
+        return "Hidden API has a public alternative annotation field, but no concrete "
+                + "explanations. Please provide either a reference to an SDK method using javadoc "
+                + "syntax, e.g. {@link foo.bar.Baz#bat}, or a small code snippet if the "
+                + "alternative is part of a support library or third party library, e.g. "
+                + "{@code foo.bar.Baz bat = new foo.bar.Baz(); bat.doSomething();}.\n"
+                + "If this is too restrictive for your use case, please contact compat-team@.";
+    }
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/PackageAndClassName.java b/tools/class2greylist/src/com/android/class2greylist/PackageAndClassName.java
new file mode 100644
index 0000000..709092d
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/PackageAndClassName.java
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License
+ */
+
+package com.android.class2greylist;
+
+import java.util.Objects;
+
+class PackageAndClassName{
+    public String packageName;
+    public String className;
+
+    private PackageAndClassName(String packageName, String className) {
+        this.packageName = packageName;
+        this.className = className;
+    }
+
+    /**
+     * Given a potentially fully qualified class name, split it into package and class.
+     *
+     * @param fullyQualifiedClassName potentially fully qualified class name.
+     * @return A pair of strings, containing the package name (or empty if not specified) and
+     * the
+     * class name (or empty if string is empty).
+     */
+    public static PackageAndClassName splitClassName(String fullyQualifiedClassName) {
+        int lastDotIdx = fullyQualifiedClassName.lastIndexOf('.');
+        if (lastDotIdx == -1) {
+            return new PackageAndClassName("", fullyQualifiedClassName);
+        }
+        String packageName = fullyQualifiedClassName.substring(0, lastDotIdx);
+        String className = fullyQualifiedClassName.substring(lastDotIdx + 1);
+        return new PackageAndClassName(packageName, className);
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (!(obj instanceof PackageAndClassName)) {
+            return false;
+        }
+        PackageAndClassName other = (PackageAndClassName) obj;
+        return Objects.equals(packageName, other.packageName) && Objects.equals(className,
+                other.className);
+    }
+
+    @Override
+    public String toString() {
+        return packageName + "." + className;
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(packageName, className);
+    }
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/RequiredAlternativeNotSpecifiedError.java b/tools/class2greylist/src/com/android/class2greylist/RequiredAlternativeNotSpecifiedError.java
new file mode 100644
index 0000000..a65f1fe
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/RequiredAlternativeNotSpecifiedError.java
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License
+ */
+
+package com.android.class2greylist;
+
+/**
+ * Exception to be thrown when a greylisted private api gets restricted to max-FOO (where FOO is Q
+ * or later), without providing a public API alternative.
+ */
+public class RequiredAlternativeNotSpecifiedError extends Exception {
+
+}
+
diff --git a/tools/class2greylist/src/com/android/class2greylist/SignatureSyntaxError.java b/tools/class2greylist/src/com/android/class2greylist/SignatureSyntaxError.java
new file mode 100644
index 0000000..7685caa
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/SignatureSyntaxError.java
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License
+ */
+
+package com.android.class2greylist;
+
+public class SignatureSyntaxError extends Exception {
+    public final String expected;
+    public final int position;
+    public final String context;
+    public SignatureSyntaxError(String expected, StringCursor sc) {
+        super(expected + " at position " + sc.position() + " in " + sc.getOriginalString());
+        this.expected = expected;
+        this.position = sc.position();
+        this.context = sc.getOriginalString();
+    }
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/StringCursor.java b/tools/class2greylist/src/com/android/class2greylist/StringCursor.java
new file mode 100644
index 0000000..08e8521
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/StringCursor.java
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License
+ */
+
+package com.android.class2greylist;
+
+/**
+ * Utility class to simplify parsing of signatures.
+ */
+public class StringCursor {
+
+    private final String mString;
+    private int mCursor;
+
+    public StringCursor(String str) {
+        mString = str;
+        mCursor = 0;
+    }
+
+    /**
+     * Position of cursor in string.
+     *
+     * @return Current position of cursor in string.
+     */
+    public int position() {
+        return mCursor;
+    }
+
+    /**
+     * Peek current cursor position.
+     *
+     * @return The character at the current cursor position.
+     */
+    public char peek() {
+        return mString.charAt(mCursor);
+    }
+
+    /**
+     * Peek several characters at the current cursor position without moving the cursor.
+     *
+     * @param n The number of characters to peek.
+     * @return A string with x characters from the cursor position. If n is -1, return the whole
+     *        rest of the string.
+     */
+    public String peek(int n) throws StringCursorOutOfBoundsException {
+        if (n == -1) {
+            return mString.substring(mCursor);
+        }
+        if (n < 0 || (n + mCursor) >= mString.length()) {
+            throw new StringCursorOutOfBoundsException();
+        }
+        return mString.substring(mCursor, mCursor + n);
+    }
+
+    /**
+     * Consume the character at the current cursor position and move the cursor forwards.
+     *
+     * @return The character at the current cursor position.
+     */
+    public char next() throws StringCursorOutOfBoundsException {
+        if (!hasNext()) {
+            throw new StringCursorOutOfBoundsException();
+        }
+        return mString.charAt(mCursor++);
+    }
+
+    /**
+     * Consume several characters at the current cursor position and move the cursor further along.
+     *
+     * @param n The number of characters to consume.
+     * @return A string with x characters from the cursor position. If n is -1, return the whole
+     *         rest of the string.
+     */
+    public String next(int n) throws StringCursorOutOfBoundsException {
+        if (n == -1) {
+            String restOfString = mString.substring(mCursor);
+            mCursor = mString.length();
+            return restOfString;
+        }
+        if (n < 0) {
+            throw new StringCursorOutOfBoundsException();
+        }
+        mCursor += n;
+        return mString.substring(mCursor - n, mCursor);
+    }
+
+    /**
+     * Search for the first occurrence of a character beyond the current cursor position.
+     *
+     * @param c The character to search for.
+     * @return The offset of the first occurrence of c in the string beyond the cursor position.
+     * If the character does not exist, return -1.
+     */
+    public int find(char c) {
+        int firstIndex = mString.indexOf(c, mCursor);
+        if (firstIndex == -1) {
+            return -1;
+        }
+        return firstIndex - mCursor;
+    }
+
+    /**
+     * Check if cursor has reached end of string.
+     *
+     * @return Cursor has reached end of string.
+     */
+    public boolean hasNext() {
+        return mCursor < mString.length();
+    }
+
+    @Override
+    public String toString() {
+        return mString.substring(mCursor);
+    }
+
+    public String getOriginalString() {
+        return mString;
+    }
+}
\ No newline at end of file
diff --git a/tools/class2greylist/src/com/android/class2greylist/StringCursorOutOfBoundsException.java b/tools/class2greylist/src/com/android/class2greylist/StringCursorOutOfBoundsException.java
new file mode 100644
index 0000000..caf0bd6
--- /dev/null
+++ b/tools/class2greylist/src/com/android/class2greylist/StringCursorOutOfBoundsException.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License
+ */
+
+package com.android.class2greylist;
+
+public class StringCursorOutOfBoundsException extends IndexOutOfBoundsException {
+
+}
diff --git a/tools/class2greylist/src/com/android/class2greylist/UnsupportedAppUsageAnnotationHandler.java b/tools/class2greylist/src/com/android/class2greylist/UnsupportedAppUsageAnnotationHandler.java
index b45e1b3..3ce00df 100644
--- a/tools/class2greylist/src/com/android/class2greylist/UnsupportedAppUsageAnnotationHandler.java
+++ b/tools/class2greylist/src/com/android/class2greylist/UnsupportedAppUsageAnnotationHandler.java
@@ -32,12 +32,15 @@
     private static final String EXPECTED_SIGNATURE_PROPERTY = "expectedSignature";
     private static final String MAX_TARGET_SDK_PROPERTY = "maxTargetSdk";
     private static final String IMPLICIT_MEMBER_PROPERTY = "implicitMember";
+    private static final String PUBLIC_ALTERNATIVES_PROPERTY = "publicAlternatives";
 
     private final Status mStatus;
     private final Predicate<ClassMember> mClassMemberFilter;
     private final Map<Integer, String> mSdkVersionToFlagMap;
     private final AnnotationConsumer mAnnotationConsumer;
 
+    private ApiResolver mApiResolver;
+
     /**
      * Represents a member of a class file (a field or method).
      */
@@ -66,6 +69,7 @@
         this(status, annotationConsumer,
                 member -> !(member.isBridgeMethod && publicApis.contains(member.signature)),
                 sdkVersionToFlagMap);
+        mApiResolver = new ApiResolver(publicApis);
     }
 
     @VisibleForTesting
@@ -76,6 +80,7 @@
         mAnnotationConsumer = annotationConsumer;
         mClassMemberFilter = memberFilter;
         mSdkVersionToFlagMap = sdkVersionToFlagMap;
+        mApiResolver = new ApiResolver();
     }
 
     @Override
@@ -85,7 +90,7 @@
             AnnotatedMemberContext memberContext = (AnnotatedMemberContext) context;
             FieldOrMethod member = memberContext.member;
             isBridgeMethod = (member instanceof Method) &&
-                (member.getAccessFlags() & Const.ACC_BRIDGE) != 0;
+                    (member.getAccessFlags() & Const.ACC_BRIDGE) != 0;
             if (isBridgeMethod) {
                 mStatus.debug("Member is a bridge method");
             }
@@ -94,6 +99,7 @@
         String signature = context.getMemberDescriptor();
         Integer maxTargetSdk = null;
         String implicitMemberSignature = null;
+        String publicAlternativesString = null;
 
         for (ElementValuePair property : annotation.getElementValuePairs()) {
             switch (property.getNameString()) {
@@ -102,8 +108,8 @@
                     // Don't enforce for bridge methods; they're generated so won't match.
                     if (!isBridgeMethod && !signature.equals(expected)) {
                         context.reportError("Expected signature does not match generated:\n"
-                                        + "Expected:  %s\n"
-                                        + "Generated: %s", expected, signature);
+                                + "Expected:  %s\n"
+                                + "Generated: %s", expected, signature);
                         return;
                     }
                     break;
@@ -121,23 +127,27 @@
                     implicitMemberSignature = property.getValue().stringifyValue();
                     if (context instanceof AnnotatedClassContext) {
                         signature = String.format("L%s;->%s",
-                            context.getClassDescriptor(), implicitMemberSignature);
+                                context.getClassDescriptor(), implicitMemberSignature);
                     } else {
                         context.reportError(
-                            "Expected annotation with an %s property to be on a class but is on %s",
-                            IMPLICIT_MEMBER_PROPERTY,
-                            signature);
+                                "Expected annotation with an %s property to be on a class but is "
+                                        + "on %s",
+                                IMPLICIT_MEMBER_PROPERTY,
+                                signature);
                         return;
                     }
                     break;
+                case PUBLIC_ALTERNATIVES_PROPERTY:
+                    publicAlternativesString = property.getValue().stringifyValue();
+                    break;
             }
         }
 
         if (context instanceof AnnotatedClassContext && implicitMemberSignature == null) {
             context.reportError(
-                "Missing property %s on annotation on class %s",
-                IMPLICIT_MEMBER_PROPERTY,
-                signature);
+                    "Missing property %s on annotation on class %s",
+                    IMPLICIT_MEMBER_PROPERTY,
+                    signature);
             return;
         }
 
@@ -150,6 +160,18 @@
             return;
         }
 
+        try {
+            mApiResolver.resolvePublicAlternatives(publicAlternativesString, signature,
+                    maxTargetSdk);
+        } catch (JavadocLinkSyntaxError | AlternativeNotFoundError e) {
+            context.reportError(e.toString());
+        } catch (RequiredAlternativeNotSpecifiedError e) {
+            context.reportError("Signature %s moved to %s without specifying public "
+                            + "alternatives; Refer to go/unsupportedappusage-public-alternatives "
+                            + "for details.",
+                    signature, mSdkVersionToFlagMap.get(maxTargetSdk));
+        }
+
         // Consume this annotation if it matches the predicate.
         if (mClassMemberFilter.test(new ClassMember(signature, isBridgeMethod))) {
             mAnnotationConsumer.consume(signature, stringifyAnnotationProperties(annotation),
diff --git a/tools/class2greylist/test/Android.bp b/tools/class2greylist/test/Android.bp
new file mode 100644
index 0000000..5483ea0
--- /dev/null
+++ b/tools/class2greylist/test/Android.bp
@@ -0,0 +1,29 @@
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+java_test_host {
+    name: "class2greylisttest",
+
+    // Only compile source java files in this apk.
+    srcs: ["src/**/*.java"],
+
+    static_libs: [
+        "class2greylistlib",
+        "libjavac",
+        "truth-host-prebuilt",
+        "mockito-host",
+        "junit-host",
+        "objenesis",
+    ],
+}
diff --git a/tools/class2greylist/test/Android.mk b/tools/class2greylist/test/Android.mk
deleted file mode 100644
index f35e74c..0000000
--- a/tools/class2greylist/test/Android.mk
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright (C) 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-
-# Only compile source java files in this apk.
-LOCAL_SRC_FILES := $(call all-java-files-under, src)
-
-LOCAL_MODULE := class2greylisttest
-
-LOCAL_STATIC_JAVA_LIBRARIES := class2greylistlib truth-host-prebuilt mockito-host junit-host objenesis
-
-# tag this module as a cts test artifact
-LOCAL_COMPATIBILITY_SUITE := general-tests
-
-include $(BUILD_HOST_JAVA_LIBRARY)
-
-# Build the test APKs using their own makefiles
-include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/tools/class2greylist/test/src/com/android/class2greylist/AnnotationPropertyWriterTest.java b/tools/class2greylist/test/src/com/android/class2greylist/AnnotationPropertyWriterTest.java
new file mode 100644
index 0000000..a6c7770
--- /dev/null
+++ b/tools/class2greylist/test/src/com/android/class2greylist/AnnotationPropertyWriterTest.java
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License
+ */
+
+package com.android.class2greylist;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import com.google.common.collect.ImmutableMap;
+
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.ByteArrayOutputStream;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+
+public class AnnotationPropertyWriterTest {
+
+    private ByteArrayOutputStream mByteArrayOutputStream;
+    private AnnotationPropertyWriter mAnnotationPropertyWriter;
+
+    @Before
+    public void setup() {
+        mByteArrayOutputStream = new ByteArrayOutputStream();
+        mAnnotationPropertyWriter = new AnnotationPropertyWriter(mByteArrayOutputStream);
+    }
+
+    @Test
+    public void testExportPropertiesNoEscaping() {
+        String signature = "foo";
+        Map<String, String> annotationProperties = ImmutableMap.of(
+                "prop", "val"
+        );
+        Set<String> parsedFlags = new HashSet<String>();
+        mAnnotationPropertyWriter.consume(signature, annotationProperties, parsedFlags);
+        mAnnotationPropertyWriter.close();
+
+        String output = mByteArrayOutputStream.toString();
+        String expected = "prop,signature\n"
+                + "|val|,|foo|\n";
+        assertThat(output).isEqualTo(expected);
+    }
+
+    @Test
+    public void testExportPropertiesEscapeQuotes() {
+        String signature = "foo";
+        Map<String, String> annotationProperties = ImmutableMap.of(
+                "prop", "val1 | val2 | val3"
+        );
+        Set<String> parsedFlags = new HashSet<String>();
+        mAnnotationPropertyWriter.consume(signature, annotationProperties, parsedFlags);
+        mAnnotationPropertyWriter.close();
+
+        String output = mByteArrayOutputStream.toString();
+        String expected = "prop,signature\n"
+                + "|val1 || val2 || val3|,|foo|\n";
+        assertThat(output).isEqualTo(expected);
+    }
+}
diff --git a/tools/class2greylist/test/src/com/android/class2greylist/ApiComponentsTest.java b/tools/class2greylist/test/src/com/android/class2greylist/ApiComponentsTest.java
new file mode 100644
index 0000000..e93d1e1
--- /dev/null
+++ b/tools/class2greylist/test/src/com/android/class2greylist/ApiComponentsTest.java
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License
+ */
+
+package com.android.class2greylist;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import static org.testng.Assert.assertThrows;
+
+import org.junit.Test;
+
+
+public class ApiComponentsTest extends AnnotationHandlerTestBase {
+
+    @Test
+    public void testGetApiComponentsPackageFromSignature() throws SignatureSyntaxError {
+        ApiComponents api = ApiComponents.fromDexSignature("La/b/C;->foo()V");
+        PackageAndClassName packageAndClassName = api.getPackageAndClassName();
+        assertThat(packageAndClassName.packageName).isEqualTo("a.b");
+    }
+
+    @Test
+    public void testGetApiComponentsFromSignature() throws SignatureSyntaxError {
+        ApiComponents api = ApiComponents.fromDexSignature("La/b/C;->foo(IJLfoo2/bar/Baz;)V");
+        PackageAndClassName packageAndClassName = api.getPackageAndClassName();
+        assertThat(packageAndClassName.className).isEqualTo("C");
+        assertThat(api.getMemberName()).isEqualTo("foo");
+        assertThat(api.getMethodParameterTypes()).isEqualTo("int, long, foo2.bar.Baz");
+    }
+
+    @Test
+    public void testInvalidDexSignatureInvalidClassFormat() throws SignatureSyntaxError {
+        assertThrows(SignatureSyntaxError.class, () -> {
+            ApiComponents.fromDexSignature("a/b/C;->foo()V");
+        });
+        assertThrows(SignatureSyntaxError.class, () -> {
+            ApiComponents.fromDexSignature("La/b/C->foo()V");
+        });
+    }
+
+    @Test
+    public void testInvalidDexSignatureInvalidParameterType() throws SignatureSyntaxError {
+        assertThrows(SignatureSyntaxError.class, () -> {
+            ApiComponents.fromDexSignature("a/b/C;->foo(foo)V");
+        });
+    }
+
+    @Test
+    public void testInvalidDexSignatureInvalidReturnType() throws SignatureSyntaxError {
+        assertThrows(SignatureSyntaxError.class, () -> {
+            ApiComponents.fromDexSignature("a/b/C;->foo()foo");
+        });
+    }
+
+    @Test
+    public void testInvalidDexSignatureMissingReturnType() throws SignatureSyntaxError {
+        assertThrows(SignatureSyntaxError.class, () -> {
+            ApiComponents.fromDexSignature("a/b/C;->foo(I)");
+        });
+    }
+
+    @Test
+    public void testInvalidDexSignatureMissingArrowOrColon() throws SignatureSyntaxError {
+        assertThrows(SignatureSyntaxError.class, () -> {
+            ApiComponents.fromDexSignature("La/b/C;foo()V");
+        });
+    }
+
+    @Test
+    public void testGetApiComponentsFromFieldLink() throws JavadocLinkSyntaxError {
+        ApiComponents api = ApiComponents.fromLinkTag("a.b.C#foo(int, long, foo2.bar.Baz)",
+                "La/b/C;->foo:I");
+        PackageAndClassName packageAndClassName = api.getPackageAndClassName();
+        assertThat(packageAndClassName.packageName).isEqualTo("a.b");
+        assertThat(packageAndClassName.className).isEqualTo("C");
+        assertThat(api.getMemberName()).isEqualTo("foo");
+    }
+
+    @Test
+    public void testGetApiComponentsLinkOnlyClass() throws JavadocLinkSyntaxError {
+        ApiComponents api = ApiComponents.fromLinkTag("b.c.D", "La/b/C;->foo:I");
+        PackageAndClassName packageAndClassName = api.getPackageAndClassName();
+        assertThat(packageAndClassName.packageName).isEqualTo("b.c");
+        assertThat(packageAndClassName.className).isEqualTo("D");
+        assertThat(api.getMethodParameterTypes()).isEqualTo("");
+    }
+
+    @Test
+    public void testGetApiComponentsFromLinkOnlyClassDeducePackage() throws JavadocLinkSyntaxError {
+        ApiComponents api = ApiComponents.fromLinkTag("D", "La/b/C;->foo:I");
+        PackageAndClassName packageAndClassName = api.getPackageAndClassName();
+        assertThat(packageAndClassName.packageName).isEqualTo("a.b");
+        assertThat(packageAndClassName.className).isEqualTo("D");
+        assertThat(api.getMemberName().isEmpty()).isTrue();
+        assertThat(api.getMethodParameterTypes().isEmpty()).isTrue();
+    }
+
+    @Test
+    public void testGetApiComponentsParametersFromMethodLink() throws JavadocLinkSyntaxError {
+        ApiComponents api = ApiComponents.fromLinkTag("a.b.C#foo(int, long, foo2.bar.Baz)",
+                "La/b/C;->foo:I");
+        assertThat(api.getMethodParameterTypes()).isEqualTo("int, long, foo2.bar.Baz");
+    }
+
+    @Test
+    public void testDeduceApiComponentsPackageFromLinkUsingContext() throws JavadocLinkSyntaxError {
+        ApiComponents api = ApiComponents.fromLinkTag("C#foo(int, long, foo2.bar.Baz)",
+                "La/b/C;->foo:I");
+        PackageAndClassName packageAndClassName = api.getPackageAndClassName();
+        assertThat(packageAndClassName.packageName).isEqualTo("a.b");
+    }
+
+    @Test
+    public void testDeduceApiComponentsPackageAndClassFromLinkUsingContext()
+            throws JavadocLinkSyntaxError {
+        ApiComponents api = ApiComponents.fromLinkTag("#foo(int, long, foo2.bar.Baz)",
+                "La/b/C;->foo:I");
+        PackageAndClassName packageAndClassName = api.getPackageAndClassName();
+        assertThat(packageAndClassName.packageName).isEqualTo("a.b");
+        assertThat(packageAndClassName.className).isEqualTo("C");
+    }
+
+    @Test
+    public void testInvalidLinkTagUnclosedParenthesis() throws JavadocLinkSyntaxError {
+        assertThrows(JavadocLinkSyntaxError.class, () -> {
+            ApiComponents.fromLinkTag("a.b.C#foo(int,float", "La/b/C;->foo()V");
+        });
+    }
+
+}
\ No newline at end of file
diff --git a/tools/class2greylist/test/src/com/android/class2greylist/ApiResolverTest.java b/tools/class2greylist/test/src/com/android/class2greylist/ApiResolverTest.java
new file mode 100644
index 0000000..888a9b5
--- /dev/null
+++ b/tools/class2greylist/test/src/com/android/class2greylist/ApiResolverTest.java
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License
+ */
+
+package com.android.class2greylist;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import static org.testng.Assert.expectThrows;
+import static org.testng.Assert.assertThrows;
+
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+
+public class ApiResolverTest extends AnnotationHandlerTestBase {
+    @Test
+    public void testFindPublicAlternativeExactly()
+            throws JavadocLinkSyntaxError, AlternativeNotFoundError,
+            RequiredAlternativeNotSpecifiedError {
+        Set<String> publicApis = Collections.unmodifiableSet(new HashSet<>(
+                Arrays.asList("La/b/C;->foo(I)V", "La/b/C;->bar(I)V")));
+        ApiResolver resolver = new ApiResolver(publicApis);
+        resolver.resolvePublicAlternatives("{@link a.b.C#foo(int)}", "Lb/c/D;->bar()V", 1);
+    }
+
+    @Test
+    public void testFindPublicAlternativeDeducedPackageName()
+            throws JavadocLinkSyntaxError, AlternativeNotFoundError,
+            RequiredAlternativeNotSpecifiedError {
+        Set<String> publicApis = Collections.unmodifiableSet(new HashSet<>(
+                Arrays.asList("La/b/C;->foo(I)V", "La/b/C;->bar(I)V")));
+        ApiResolver resolver = new ApiResolver(publicApis);
+        resolver.resolvePublicAlternatives("{@link C#foo(int)}", "La/b/D;->bar()V", 1);
+    }
+
+    @Test
+    public void testFindPublicAlternativeDeducedPackageAndClassName()
+            throws JavadocLinkSyntaxError, AlternativeNotFoundError,
+            RequiredAlternativeNotSpecifiedError {
+        Set<String> publicApis = Collections.unmodifiableSet(new HashSet<>(
+                Arrays.asList("La/b/C;->foo(I)V", "La/b/C;->bar(I)V")));
+        ApiResolver resolver = new ApiResolver(publicApis);
+        resolver.resolvePublicAlternatives("{@link #foo(int)}", "La/b/C;->bar()V", 1);
+    }
+
+    @Test
+    public void testFindPublicAlternativeDeducedParameterTypes()
+            throws JavadocLinkSyntaxError, AlternativeNotFoundError,
+            RequiredAlternativeNotSpecifiedError {
+        Set<String> publicApis = Collections.unmodifiableSet(new HashSet<>(
+                Arrays.asList("La/b/C;->foo(I)V", "La/b/C;->bar(I)V")));
+        ApiResolver resolver = new ApiResolver(publicApis);
+        resolver.resolvePublicAlternatives("{@link #foo}", "La/b/C;->bar()V", 1);
+    }
+
+    @Test
+    public void testFindPublicAlternativeFailDueToMultipleParameterTypes()
+            throws SignatureSyntaxError {
+        Set<String> publicApis = Collections.unmodifiableSet(new HashSet<>(
+                Arrays.asList("La/b/C;->foo(I)V", "La/b/C;->bar(I)I", "La/b/C;->foo(II)V")));
+        ApiResolver resolver = new ApiResolver(publicApis);
+        MultipleAlternativesFoundError e = expectThrows(MultipleAlternativesFoundError.class,
+                () -> resolver.resolvePublicAlternatives("{@link #foo}", "La/b/C;->bar()V", 1));
+        assertThat(e.almostMatches).containsExactly(
+                ApiComponents.fromDexSignature("La/b/C;->foo(I)V"),
+                ApiComponents.fromDexSignature("La/b/C;->foo(II)V")
+        );
+    }
+
+    @Test
+    public void testFindPublicAlternativeFailNoAlternative() {
+        Set<String> publicApis = Collections.unmodifiableSet(new HashSet<>(
+                Arrays.asList("La/b/C;->bar(I)V")));
+        ApiResolver resolver = new ApiResolver(publicApis);
+        assertThrows(MemberAlternativeNotFoundError.class, ()
+                -> resolver.resolvePublicAlternatives("{@link #foo(int)}", "La/b/C;->bar()V", 1));
+    }
+
+    @Test
+    public void testFindPublicAlternativeFailNoAlternativeNoParameterTypes() {
+
+        Set<String> publicApis = Collections.unmodifiableSet(new HashSet<>(
+                Arrays.asList("La/b/C;->bar(I)V")));
+        ApiResolver resolver = new ApiResolver(publicApis);
+        assertThrows(MemberAlternativeNotFoundError.class,
+                () -> resolver.resolvePublicAlternatives("{@link #foo}", "La/b/C;->bar()V", 1));
+    }
+
+    @Test
+    public void testNoPublicClassAlternatives() {
+        Set<String> publicApis = Collections.unmodifiableSet(new HashSet<>());
+        ApiResolver resolver = new ApiResolver(publicApis);
+        expectThrows(NoAlternativesSpecifiedError.class,
+                () -> resolver.resolvePublicAlternatives("Foo", "La/b/C;->bar()V", 1));
+    }
+
+    @Test
+    public void testPublicAlternativesJustPackageAndClassName()
+            throws JavadocLinkSyntaxError, AlternativeNotFoundError,
+            RequiredAlternativeNotSpecifiedError {
+        Set<String> publicApis = Collections.unmodifiableSet(new HashSet<>(
+                Arrays.asList("La/b/C;->bar(I)V")));
+        ApiResolver resolver = new ApiResolver(publicApis);
+        resolver.resolvePublicAlternatives("Foo {@link a.b.C}", "Lb/c/D;->bar()V", 1);
+    }
+
+    @Test
+    public void testPublicAlternativesJustClassName()
+            throws JavadocLinkSyntaxError, AlternativeNotFoundError,
+            RequiredAlternativeNotSpecifiedError {
+        Set<String> publicApis = Collections.unmodifiableSet(new HashSet<>(
+                Arrays.asList("La/b/C;->bar(I)V")));
+        ApiResolver resolver = new ApiResolver(publicApis);
+        resolver.resolvePublicAlternatives("Foo {@link C}", "La/b/D;->bar()V", 1);
+    }
+
+    @Test
+    public void testNoPublicAlternativesButHasExplanation()
+            throws JavadocLinkSyntaxError, AlternativeNotFoundError,
+            RequiredAlternativeNotSpecifiedError {
+        Set<String> publicApis = Collections.unmodifiableSet(new HashSet<>());
+        ApiResolver resolver = new ApiResolver(publicApis);
+        resolver.resolvePublicAlternatives("Foo {@code bar}", "La/b/C;->bar()V", 1);
+    }
+
+    @Test
+    public void testNoPublicAlternativesSpecifiedWithMaxSdk() {
+        Set<String> publicApis = Collections.unmodifiableSet(new HashSet<>());
+        ApiResolver resolver = new ApiResolver(publicApis);
+        assertThrows(RequiredAlternativeNotSpecifiedError.class,
+                () -> resolver.resolvePublicAlternatives(null, "La/b/C;->bar()V", 29));
+    }
+
+    @Test
+    public void testNoPublicAlternativesSpecifiedWithMaxLessThanQ()
+            throws JavadocLinkSyntaxError, AlternativeNotFoundError,
+            RequiredAlternativeNotSpecifiedError {
+        Set<String> publicApis = Collections.unmodifiableSet(new HashSet<>());
+        ApiResolver resolver = new ApiResolver(publicApis);
+        resolver.resolvePublicAlternatives(null, "La/b/C;->bar()V", 28);
+    }
+
+}
\ No newline at end of file
diff --git a/tools/class2greylist/test/src/com/android/javac/Javac.java b/tools/class2greylist/test/src/com/android/javac/Javac.java
deleted file mode 100644
index 94e4e49..0000000
--- a/tools/class2greylist/test/src/com/android/javac/Javac.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.android.javac;
-
-import com.google.common.io.Files;
-
-import java.util.stream.Collectors;
-import org.apache.bcel.classfile.ClassParser;
-import org.apache.bcel.classfile.JavaClass;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Locale;
-
-import javax.tools.DiagnosticCollector;
-import javax.tools.JavaCompiler;
-import javax.tools.JavaFileObject;
-import javax.tools.SimpleJavaFileObject;
-import javax.tools.StandardJavaFileManager;
-import javax.tools.StandardLocation;
-import javax.tools.ToolProvider;
-
-/**
- * Helper class for compiling snippets of Java source and providing access to the resulting class
- * files.
- */
-public class Javac {
-
-    private final JavaCompiler mJavac;
-    private final StandardJavaFileManager mFileMan;
-    private final List<JavaFileObject> mCompilationUnits;
-    private final File mClassOutDir;
-
-    public Javac() throws IOException {
-        mJavac = ToolProvider.getSystemJavaCompiler();
-        mFileMan = mJavac.getStandardFileManager(null, Locale.US, null);
-        mClassOutDir = Files.createTempDir();
-        mFileMan.setLocation(StandardLocation.CLASS_OUTPUT, Arrays.asList(mClassOutDir));
-        mFileMan.setLocation(StandardLocation.CLASS_PATH, Arrays.asList(mClassOutDir));
-        mCompilationUnits = new ArrayList<>();
-    }
-
-    private String classToFileName(String classname) {
-        return classname.replace('.', '/');
-    }
-
-    public Javac addSource(String classname, String contents) {
-        JavaFileObject java = new SimpleJavaFileObject(URI.create(
-                String.format("string:///%s.java", classToFileName(classname))),
-                JavaFileObject.Kind.SOURCE
-                ){
-            @Override
-            public CharSequence getCharContent(boolean ignoreEncodingErrors) throws IOException {
-                return contents;
-            }
-        };
-        mCompilationUnits.add(java);
-        return this;
-    }
-
-    public void compile() {
-        DiagnosticCollector<JavaFileObject> diagnosticCollector = new DiagnosticCollector<>();
-        JavaCompiler.CompilationTask task = mJavac.getTask(
-                null,
-                mFileMan,
-                diagnosticCollector,
-                null,
-                null,
-                mCompilationUnits);
-        boolean result = task.call();
-        if (!result) {
-            throw new IllegalStateException(
-                "Compilation failed:" +
-                    diagnosticCollector.getDiagnostics()
-                        .stream()
-                        .map(Object::toString)
-                        .collect(Collectors.joining("\n")));
-        }
-    }
-
-    public InputStream getClassFile(String classname) throws IOException {
-        Iterable<? extends JavaFileObject> objs = mFileMan.getJavaFileObjects(
-                new File(mClassOutDir, String.format("%s.class", classToFileName(classname))));
-        if (!objs.iterator().hasNext()) {
-            return null;
-        }
-        return objs.iterator().next().openInputStream();
-    }
-
-    public JavaClass getCompiledClass(String classname) throws IOException {
-        return new ClassParser(getClassFile(classname),
-                String.format("%s.class", classToFileName(classname))).parse();
-    }
-}
diff --git a/tools/cleanup-buildbot-device.sh b/tools/cleanup-buildbot-device.sh
deleted file mode 100755
index 694c739..0000000
--- a/tools/cleanup-buildbot-device.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-green='\033[0;32m'
-nc='\033[0m'
-
-# Setup as root, as device cleanup requires it.
-adb root
-adb wait-for-device
-
-if [[ -n "$ART_TEST_CHROOT" ]]; then
-  # Check that ART_TEST_CHROOT is correctly defined.
-  if [[ "x$ART_TEST_CHROOT" != x/* ]]; then
-    echo "$ART_TEST_CHROOT is not an absolute path"
-    exit 1
-  fi
-
-  if adb shell test -d "$ART_TEST_CHROOT"; then
-    echo -e "${green}Remove entire /system directory from chroot directory${nc}"
-    adb shell rm -rf "$ART_TEST_CHROOT/system"
-
-    echo -e "${green}Remove entire /data directory from chroot directory${nc}"
-    adb shell rm -rf "$ART_TEST_CHROOT/data"
-
-    echo -e "${green}Remove entire chroot directory${nc}"
-    adb shell rmdir "$ART_TEST_CHROOT" || adb shell ls -la "$ART_TEST_CHROOT"
-  fi
-else
-  adb shell rm -rf \
-    /data/local/tmp /data/art-test /data/nativetest /data/nativetest64 '/data/misc/trace/*'
-fi
diff --git a/tools/common/common.py b/tools/common/common.py
index 4171dfe..fc9d879 100755
--- a/tools/common/common.py
+++ b/tools/common/common.py
@@ -299,14 +299,16 @@
       os.mkdir(arch_cache_path)
     lib = 'lib64' if x64 else 'lib'
     android_root = GetEnvVariableOrError('ANDROID_HOST_OUT')
-    android_runtime_root = android_root + '/com.android.runtime'
+    android_i18n_root = android_root + '/com.android.i18n'
+    android_art_root = android_root + '/com.android.art'
     android_tzdata_root = android_root + '/com.android.tzdata'
     library_path = android_root + '/' + lib
     path = android_root + '/bin'
     self._shell_env = os.environ.copy()
     self._shell_env['ANDROID_DATA'] = self._env_path
     self._shell_env['ANDROID_ROOT'] = android_root
-    self._shell_env['ANDROID_RUNTIME_ROOT'] = android_runtime_root
+    self._shell_env['ANDROID_I18N_ROOT'] = android_i18n_root
+    self._shell_env['ANDROID_ART_ROOT'] = android_art_root
     self._shell_env['ANDROID_TZDATA_ROOT'] = android_tzdata_root
     self._shell_env['LD_LIBRARY_PATH'] = library_path
     self._shell_env['DYLD_LIBRARY_PATH'] = library_path
diff --git a/tools/compile-classes.sh b/tools/compile-classes.sh
new file mode 100755
index 0000000..86a4ff7
--- /dev/null
+++ b/tools/compile-classes.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+#
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# This script compiles Java Language source files into arm64 odex files.
+#
+# Before running this script, do lunch and build for an arm64 device.
+#
+# Usage:
+#     compile-classes.sh Scratch.java
+
+set -e
+
+SCRATCH=`mktemp -d`
+DEX_FILE=classes.dex
+ODEX_FILE=classes.odex
+
+javac -d $SCRATCH $1
+d8 $SCRATCH/*.class
+
+$ANDROID_BUILD_TOP/art/tools/compile-jar.sh $DEX_FILE $ODEX_FILE arm64 \
+    --generate-debug-info \
+    --dump-cfg=classes.cfg
+
+rm -rf $SCRATCH
+
+echo
+echo "OAT file is at $ODEX_FILE"
+echo
+echo "View it with one of the following commands:"
+echo
+echo "    oatdump --oat-file=$ODEX_FILE"
+echo
+echo "    aarch64-linux-android-objdump -d $ODEX_FILE"
+echo
+echo "The CFG is dumped to output.cfg for inspection of individual compiler passes."
+echo
diff --git a/tools/compile-jar.sh b/tools/compile-jar.sh
new file mode 100755
index 0000000..5024ccc
--- /dev/null
+++ b/tools/compile-jar.sh
@@ -0,0 +1,41 @@
+#!/bin/bash
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# This script creates a boot image profile based on input profiles.
+#
+
+if [[ "$#" -lt 1 ]]; then
+  echo "Usage $0 <jar|apk> <output> <isa> [args]+"
+  echo "Example $0 Maps.apk maps.odex arm64"
+  exit 1
+fi
+
+FILE=$1
+shift
+OUTPUT=$1
+shift
+ISA=$1
+shift
+
+dex2oat \
+    --runtime-arg -Xms64m --runtime-arg -Xmx512m \
+    --boot-image=${OUT}/apex/com.android.art/javalib/boot.art:${OUT}/system/framework/boot-framework.art \
+    $(${ANDROID_BUILD_TOP}/art/tools/host_bcp.sh ${OUT}/system/framework/oat/${ISA}/services.odex --use-first-dir) \
+    --dex-file=${FILE} --dex-location=/system/framework/${FILE} \
+    --oat-file=${OUTPUT} \
+    --android-root=${OUT}/system --instruction-set=$ISA \
+    $@
diff --git a/tools/cpp-define-generator/Android.bp b/tools/cpp-define-generator/Android.bp
index 027f128..701735f 100644
--- a/tools/cpp-define-generator/Android.bp
+++ b/tools/cpp-define-generator/Android.bp
@@ -23,16 +23,18 @@
         "art_debug_defaults",
         "art_defaults",
     ],
-    include_dirs: [
-        "art/libartbase",
-        "art/libdexfile",
-        "art/libartbase",
-        "art/runtime",
-        "system/core/base/include",
+    header_libs: [
+        "art_libartbase_headers", // For base/bit_utils.h
+        "libart_runtime_headers_ndk",
+        "libdexfile_all_headers", // For dex/modifiers.h
     ],
     // Produce text file rather than binary.
     cflags: ["-S"],
     srcs: ["asm_defines.cc"],
+    apex_available: [
+        "com.android.art.debug",
+        "com.android.art.release",
+    ],
 }
 
 // This extracts the compile-time constants from asm_defines.s and creates the header.
@@ -44,12 +46,27 @@
     out: ["asm_defines.h"],
     tool_files: ["make_header.py"],
     cmd: "$(location make_header.py) \"$(in)\" > \"$(out)\"",
+    target: {
+        darwin: {
+            enabled: false,
+        },
+    },
+
+    apex_available: [
+        "com.android.art.debug",
+        "com.android.art.release",
+    ],
 }
 
 cc_library_headers {
     name: "cpp-define-generator-definitions",
     host_supported: true,
     export_include_dirs: ["."],
+
+    apex_available: [
+        "com.android.art.debug",
+        "com.android.art.release",
+    ],
 }
 
 python_binary_host {
diff --git a/tools/cpp-define-generator/art_field.def b/tools/cpp-define-generator/art_field.def
new file mode 100644
index 0000000..a15076f
--- /dev/null
+++ b/tools/cpp-define-generator/art_field.def
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "art_field.h"
+#endif
+
+ASM_DEFINE(ART_FIELD_OFFSET_OFFSET,
+           art::ArtField::OffsetOffset().Int32Value())
+ASM_DEFINE(ART_FIELD_DECLARING_CLASS_OFFSET,
+           art::ArtField::DeclaringClassOffset().Int32Value())
diff --git a/tools/cpp-define-generator/art_method.def b/tools/cpp-define-generator/art_method.def
index 21859dc..75fbab0 100644
--- a/tools/cpp-define-generator/art_method.def
+++ b/tools/cpp-define-generator/art_method.def
@@ -20,6 +20,8 @@
 
 ASM_DEFINE(ART_METHOD_ACCESS_FLAGS_OFFSET,
            art::ArtMethod::AccessFlagsOffset().Int32Value())
+ASM_DEFINE(ART_METHOD_IS_STATIC_FLAG,
+           art::kAccStatic)
 ASM_DEFINE(ART_METHOD_DECLARING_CLASS_OFFSET,
            art::ArtMethod::DeclaringClassOffset().Int32Value())
 ASM_DEFINE(ART_METHOD_JNI_OFFSET_32,
@@ -30,3 +32,9 @@
            art::ArtMethod::EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k32).Int32Value())
 ASM_DEFINE(ART_METHOD_QUICK_CODE_OFFSET_64,
            art::ArtMethod::EntryPointFromQuickCompiledCodeOffset(art::PointerSize::k64).Int32Value())
+ASM_DEFINE(ART_METHOD_METHOD_INDEX_OFFSET,
+           art::ArtMethod::MethodIndexOffset().Int32Value())
+ASM_DEFINE(ART_METHOD_IMT_INDEX_OFFSET,
+           art::ArtMethod::ImtIndexOffset().Int32Value())
+ASM_DEFINE(ART_METHOD_HOTNESS_COUNT_OFFSET,
+           art::ArtMethod::HotnessCountOffset().Int32Value())
diff --git a/tools/cpp-define-generator/asm_defines.def b/tools/cpp-define-generator/asm_defines.def
index 7a77e8e..a64676f 100644
--- a/tools/cpp-define-generator/asm_defines.def
+++ b/tools/cpp-define-generator/asm_defines.def
@@ -19,13 +19,17 @@
 #endif
 
 #include "globals.def"
+#include "art_field.def"
 #include "art_method.def"
+#include "code_item.def"
 #include "lockword.def"
 #include "mirror_array.def"
 #include "mirror_class.def"
 #include "mirror_dex_cache.def"
 #include "mirror_object.def"
 #include "mirror_string.def"
+#include "osr.def"
+#include "profiling_info.def"
 #include "rosalloc.def"
 #include "runtime.def"
 #include "shadow_frame.def"
diff --git a/tools/cpp-define-generator/code_item.def b/tools/cpp-define-generator/code_item.def
new file mode 100644
index 0000000..01b0e85
--- /dev/null
+++ b/tools/cpp-define-generator/code_item.def
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "dex/standard_dex_file.h"
+#endif
+
+ASM_DEFINE(CODE_ITEM_REGISTERS_SIZE_OFFSET,
+           art::StandardDexFile::CodeItem::RegistersSizeOffset())
+ASM_DEFINE(CODE_ITEM_INS_SIZE_OFFSET,
+           art::StandardDexFile::CodeItem::InsSizeOffset())
+ASM_DEFINE(CODE_ITEM_OUTS_SIZE_OFFSET,
+           art::StandardDexFile::CodeItem::OutsSizeOffset())
+ASM_DEFINE(CODE_ITEM_INSNS_OFFSET,
+           art::StandardDexFile::CodeItem::InsnsOffset())
diff --git a/tools/cpp-define-generator/globals.def b/tools/cpp-define-generator/globals.def
index 10542622..6c9b2b0 100644
--- a/tools/cpp-define-generator/globals.def
+++ b/tools/cpp-define-generator/globals.def
@@ -30,6 +30,10 @@
 #include "stack.h"
 #endif
 
+ASM_DEFINE(ACCESS_FLAGS_METHOD_IS_FAST_NATIVE,
+           art::kAccFastNative)
+ASM_DEFINE(ACCESS_FLAGS_METHOD_IS_CRITICAL_NATIVE,
+           art::kAccCriticalNative)
 ASM_DEFINE(ACCESS_FLAGS_CLASS_IS_FINALIZABLE,
            art::kAccClassIsFinalizable)
 ASM_DEFINE(ACCESS_FLAGS_CLASS_IS_FINALIZABLE_BIT,
@@ -70,3 +74,5 @@
            sizeof(art::StackReference<art::mirror::Object>))
 ASM_DEFINE(STD_MEMORY_ORDER_RELAXED,
            std::memory_order_relaxed)
+ASM_DEFINE(STACK_OVERFLOW_RESERVED_BYTES,
+           GetStackOverflowReservedBytes(art::kRuntimeISA))
diff --git a/tools/cpp-define-generator/make_header.py b/tools/cpp-define-generator/make_header.py
index 1b13923..f3657b1 100755
--- a/tools/cpp-define-generator/make_header.py
+++ b/tools/cpp-define-generator/make_header.py
@@ -31,7 +31,7 @@
 def convert(input):
   """Find all defines in the compiler generated assembly and convert them to #define pragmas"""
 
-  asm_define_re = re.compile(r'">>(\w+) (?:\$|#)([-0-9]+) (?:\$|#)(0|1)<<"')
+  asm_define_re = re.compile(r'">>(\w+) (?:\$|#)?([-0-9]+) (?:\$|#)?(0|1)<<"')
   asm_defines = asm_define_re.findall(input)
   if not asm_defines:
     raise RuntimeError("Failed to find any asm defines in the input")
diff --git a/tools/cpp-define-generator/mirror_class.def b/tools/cpp-define-generator/mirror_class.def
index c15ae92..6df6c41 100644
--- a/tools/cpp-define-generator/mirror_class.def
+++ b/tools/cpp-define-generator/mirror_class.def
@@ -36,3 +36,11 @@
            art::mirror::Class::StatusOffset().Int32Value())
 ASM_DEFINE(PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT,
            art::mirror::Class::kPrimitiveTypeSizeShiftShift)
+ASM_DEFINE(MIRROR_CLASS_VTABLE_OFFSET_32,
+           art::mirror::Class::EmbeddedVTableOffset(art::PointerSize::k32).Int32Value())
+ASM_DEFINE(MIRROR_CLASS_VTABLE_OFFSET_64,
+           art::mirror::Class::EmbeddedVTableOffset(art::PointerSize::k64).Int32Value())
+ASM_DEFINE(MIRROR_CLASS_IMT_PTR_OFFSET_32,
+           art::mirror::Class::ImtPtrOffset(art::PointerSize::k32).Int32Value())
+ASM_DEFINE(MIRROR_CLASS_IMT_PTR_OFFSET_64,
+           art::mirror::Class::ImtPtrOffset(art::PointerSize::k64).Int32Value())
diff --git a/tools/cpp-define-generator/mirror_object.def b/tools/cpp-define-generator/mirror_object.def
index facb037..7d7028b 100644
--- a/tools/cpp-define-generator/mirror_object.def
+++ b/tools/cpp-define-generator/mirror_object.def
@@ -24,3 +24,10 @@
            sizeof(art::mirror::Object))
 ASM_DEFINE(MIRROR_OBJECT_LOCK_WORD_OFFSET,
            art::mirror::Object::MonitorOffset().Int32Value())
+ASM_DEFINE(GRAY_BYTE_OFFSET,
+           art::mirror::Object::MonitorOffset().Int32Value() +
+               art::LockWord::kReadBarrierStateShift / art::kBitsPerByte)
+ASM_DEFINE(GRAY_BIT_POSITION,
+           art::LockWord::kReadBarrierStateShift % art::kBitsPerByte)
+ASM_DEFINE(READ_BARRIER_TEST_VALUE,
+           static_cast<int8_t>(1 << (art::LockWord::kReadBarrierStateShift % art::kBitsPerByte)))
diff --git a/tools/cpp-define-generator/osr.def b/tools/cpp-define-generator/osr.def
new file mode 100644
index 0000000..bf611fd
--- /dev/null
+++ b/tools/cpp-define-generator/osr.def
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "jit/jit.h"
+#endif
+
+ASM_DEFINE(OSR_DATA_NATIVE_PC, art::jit::OsrData::NativePcOffset().Int32Value())
+ASM_DEFINE(OSR_DATA_FRAME_SIZE, art::jit::OsrData::FrameSizeOffset().Int32Value())
+ASM_DEFINE(OSR_DATA_MEMORY, art::jit::OsrData::MemoryOffset().Int32Value())
diff --git a/tools/cpp-define-generator/profiling_info.def b/tools/cpp-define-generator/profiling_info.def
new file mode 100644
index 0000000..6d77b9d
--- /dev/null
+++ b/tools/cpp-define-generator/profiling_info.def
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "jit/profiling_info.h"
+#endif
+
+ASM_DEFINE(INLINE_CACHE_SIZE, art::InlineCache::kIndividualCacheSize);
+ASM_DEFINE(INLINE_CACHE_CLASSES_OFFSET, art::InlineCache::ClassesOffset().Int32Value());
diff --git a/tools/cpp-define-generator/thread.def b/tools/cpp-define-generator/thread.def
index 8c91dc8..4fee6df 100644
--- a/tools/cpp-define-generator/thread.def
+++ b/tools/cpp-define-generator/thread.def
@@ -15,6 +15,7 @@
  */
 
 #if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "entrypoints/quick/quick_entrypoints_enum.h"
 #include "thread.h"
 #endif
 
@@ -36,6 +37,10 @@
            art::Thread::InterpreterCacheOffset<art::kRuntimePointerSize>().Int32Value())
 ASM_DEFINE(THREAD_INTERPRETER_CACHE_SIZE_LOG2,
            art::Thread::InterpreterCacheSizeLog2())
+ASM_DEFINE(THREAD_INTERPRETER_CACHE_SIZE_MASK,
+           (sizeof(art::InterpreterCache::Entry) * (art::InterpreterCache::kSize - 1)))
+ASM_DEFINE(THREAD_INTERPRETER_CACHE_SIZE_SHIFT,
+           2)
 ASM_DEFINE(THREAD_IS_GC_MARKING_OFFSET,
            art::Thread::IsGcMarkingOffset<art::kRuntimePointerSize>().Int32Value())
 ASM_DEFINE(THREAD_LOCAL_ALLOC_STACK_END_OFFSET,
@@ -60,3 +65,11 @@
            art::Thread::UseMterpOffset<art::kRuntimePointerSize>().Int32Value())
 ASM_DEFINE(THREAD_TOP_QUICK_FRAME_OFFSET,
            art::Thread::TopOfManagedStackOffset<art::kRuntimePointerSize>().Int32Value())
+ASM_DEFINE(THREAD_ALLOC_OBJECT_ENTRYPOINT_OFFSET,
+           art::GetThreadOffset<art::kRuntimePointerSize>(art::kQuickAllocObjectInitialized)
+               .Int32Value())
+ASM_DEFINE(THREAD_ALLOC_ARRAY_ENTRYPOINT_OFFSET,
+           art::GetThreadOffset<art::kRuntimePointerSize>(art::kQuickAllocArrayResolved)
+               .Int32Value())
+ASM_DEFINE(THREAD_READ_BARRIER_MARK_REG00_OFFSET,
+           art::Thread::ReadBarrierMarkEntryPointsOffset<art::kRuntimePointerSize>(0))
diff --git a/tools/dexanalyze/Android.bp b/tools/dexanalyze/Android.bp
index a232a1b..e7ae113 100644
--- a/tools/dexanalyze/Android.bp
+++ b/tools/dexanalyze/Android.bp
@@ -37,6 +37,10 @@
         "libartbase",
         "libbase",
     ],
+    apex_available: [
+        "com.android.art.release",
+        "com.android.art.debug",
+    ],
 }
 
 art_cc_test {
diff --git a/tools/dexanalyze/dexanalyze_test.cc b/tools/dexanalyze/dexanalyze_test.cc
index c6648c0..9e6ed6d 100644
--- a/tools/dexanalyze/dexanalyze_test.cc
+++ b/tools/dexanalyze/dexanalyze_test.cc
@@ -22,7 +22,7 @@
 class DexAnalyzeTest : public CommonRuntimeTest {
  public:
   std::string GetDexAnalyzePath() {
-    return GetTestAndroidRoot() + "/bin/dexanalyze";
+    return GetArtBinDir() + "/dexanalyze";
   }
 
   void DexAnalyzeExec(const std::vector<std::string>& args, bool expect_success) {
diff --git a/tools/dexfuzz/Android.bp b/tools/dexfuzz/Android.bp
new file mode 100644
index 0000000..b095e00
--- /dev/null
+++ b/tools/dexfuzz/Android.bp
@@ -0,0 +1,29 @@
+//
+// Copyright (C) 2014 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// --- dexfuzz.jar ----------------
+java_library_host {
+    name: "dexfuzz",
+    srcs: ["src/**/*.java"],
+    manifest: "manifest.txt",
+}
+
+// --- dexfuzz script ----------------
+sh_binary_host {
+    name: "dexfuzz-script",
+    src: "dexfuzz",
+    filename_from_src: true,
+}
diff --git a/tools/dexfuzz/Android.mk b/tools/dexfuzz/Android.mk
index 06d3f62..e316fad 100644
--- a/tools/dexfuzz/Android.mk
+++ b/tools/dexfuzz/Android.mk
@@ -16,22 +16,6 @@
 
 LOCAL_PATH := $(call my-dir)
 
-# --- dexfuzz.jar ----------------
-include $(CLEAR_VARS)
-LOCAL_SRC_FILES := $(call all-java-files-under, src)
-LOCAL_JAR_MANIFEST := manifest.txt
-LOCAL_IS_HOST_MODULE := true
-LOCAL_MODULE := dexfuzz
-include $(BUILD_HOST_JAVA_LIBRARY)
-
-# --- dexfuzz script ----------------
-include $(CLEAR_VARS)
-LOCAL_IS_HOST_MODULE := true
-LOCAL_MODULE_CLASS := EXECUTABLES
-LOCAL_MODULE := dexfuzz
-LOCAL_SRC_FILES := dexfuzz
-include $(BUILD_PREBUILT)
-
 # --- dexfuzz script with core image dependencies ----------------
 .PHONY: fuzzer
-fuzzer: $(LOCAL_BUILT_MODULE) $(HOST_CORE_IMG_OUTS)
+fuzzer: dexfuzz-script $(HOST_CORE_IMG_OUTS)
diff --git a/tools/dexfuzz/README b/tools/dexfuzz/README
index fff5473..e6ec1ec 100644
--- a/tools/dexfuzz/README
+++ b/tools/dexfuzz/README
@@ -48,8 +48,6 @@
   --arm64
   --x86
   --x86_64
-  --mips
-  --mips64
 
 And also at least two of the following backends:
   --interpreter
diff --git a/tools/dexfuzz/src/dexfuzz/Options.java b/tools/dexfuzz/src/dexfuzz/Options.java
index d1d8172..5b45c89 100644
--- a/tools/dexfuzz/src/dexfuzz/Options.java
+++ b/tools/dexfuzz/src/dexfuzz/Options.java
@@ -68,8 +68,6 @@
   public static boolean useArchArm64;
   public static boolean useArchX86;
   public static boolean useArchX86_64;
-  public static boolean useArchMips;
-  public static boolean useArchMips64;
   public static boolean skipHostVerify;
   public static boolean shortTimeouts;
   public static boolean dumpOutput;
@@ -112,8 +110,6 @@
     Log.always("    --allarm             : Short for --arm --arm64");
     Log.always("    --x86                : Include x86 backends in comparisons");
     Log.always("    --x86-64             : Include x86-64 backends in comparisons");
-    Log.always("    --mips               : Include MIPS backends in comparisons");
-    Log.always("    --mips64             : Include MIPS64 backends in comparisons");
     Log.always("");
     Log.always("    --dump-output        : Dump outputs of executed programs");
     Log.always("    --dump-verify        : Dump outputs of verification");
@@ -179,10 +175,6 @@
       useArchX86 = true;
     } else if (flag.equals("x86-64")) {
       useArchX86_64 = true;
-    } else if (flag.equals("mips")) {
-      useArchMips = true;
-    } else if (flag.equals("mips64")) {
-      useArchMips64 = true;
     } else if (flag.equals("mutate-limit")) {
       mutateLimit = true;
     } else if (flag.equals("report-unique")) {
@@ -411,9 +403,7 @@
         if (!(useArchArm
             || useArchArm64
             || useArchX86
-            || useArchX86_64
-            || useArchMips
-            || useArchMips64)) {
+            || useArchX86_64)) {
           Log.error("No architecture to execute on was specified!");
           return false;
         }
@@ -425,14 +415,6 @@
         Log.error("Did you mean to specify ARM and x86?");
         return false;
       }
-      if ((useArchArm || useArchArm64) && (useArchMips || useArchMips64)) {
-        Log.error("Did you mean to specify ARM and MIPS?");
-        return false;
-      }
-      if ((useArchX86 || useArchX86_64) && (useArchMips || useArchMips64)) {
-        Log.error("Did you mean to specify x86 and MIPS?");
-        return false;
-      }
       int backends = 0;
       if (useInterpreter) {
         backends++;
diff --git a/tools/dexfuzz/src/dexfuzz/executors/Architecture.java b/tools/dexfuzz/src/dexfuzz/executors/Architecture.java
index 051d80e..ab0c758 100644
--- a/tools/dexfuzz/src/dexfuzz/executors/Architecture.java
+++ b/tools/dexfuzz/src/dexfuzz/executors/Architecture.java
@@ -23,9 +23,7 @@
   ARM("arm"),
   ARM64("arm64"),
   X86("x86"),
-  X86_64("x86_64"),
-  MIPS("mips"),
-  MIPS64("mips64");
+  X86_64("x86_64");
 
   private String archString = "";
 
diff --git a/tools/dexfuzz/src/dexfuzz/executors/Mips64InterpreterExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/Mips64InterpreterExecutor.java
deleted file mode 100644
index eee6111..0000000
--- a/tools/dexfuzz/src/dexfuzz/executors/Mips64InterpreterExecutor.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package dexfuzz.executors;
-
-import dexfuzz.listeners.BaseListener;
-
-public class Mips64InterpreterExecutor extends Executor {
-
-  public Mips64InterpreterExecutor(BaseListener listener, Device device) {
-    super("MIPS64 Interpreter", 30, listener, Architecture.MIPS64, device,
-        /*needsCleanCodeCache*/ false, /*isBisectable*/ false);
-  }
-
-  @Override
-  protected String constructCommand(String programName) {
-    StringBuilder commandBuilder = new StringBuilder();
-    commandBuilder.append("dalvikvm64 -Xint ");
-    commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
-    commandBuilder.append(executeClass);
-    return commandBuilder.toString();
-
-  }
-}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/Mips64OptimizingBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/Mips64OptimizingBackendExecutor.java
deleted file mode 100644
index 883ff2a..0000000
--- a/tools/dexfuzz/src/dexfuzz/executors/Mips64OptimizingBackendExecutor.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package dexfuzz.executors;
-
-import dexfuzz.listeners.BaseListener;
-
-public class Mips64OptimizingBackendExecutor extends Executor {
-
-  public Mips64OptimizingBackendExecutor(BaseListener listener, Device device) {
-    super("MIPS64 Optimizing Backend", 5, listener, Architecture.MIPS64, device,
-        /*needsCleanCodeCache*/ true, /*isBisectable*/ true);
-  }
-
-  @Override
-  protected String constructCommand(String programName) {
-    StringBuilder commandBuilder = new StringBuilder();
-    commandBuilder.append("dalvikvm64 -Xcompiler-option --compiler-backend=Optimizing ");
-    // The -Xno-dex-file-fallback option ensures that the execution does not default to
-    // interpreter if compilations fails.
-    commandBuilder.append("-Xno-dex-file-fallback ");
-    commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
-    commandBuilder.append(executeClass);
-    return commandBuilder.toString();
-  }
-}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/MipsInterpreterExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/MipsInterpreterExecutor.java
deleted file mode 100644
index 4a403db..0000000
--- a/tools/dexfuzz/src/dexfuzz/executors/MipsInterpreterExecutor.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package dexfuzz.executors;
-
-import dexfuzz.listeners.BaseListener;
-
-public class MipsInterpreterExecutor extends Executor {
-
-  public MipsInterpreterExecutor(BaseListener listener, Device device) {
-    super("MIPS Interpreter", 30, listener, Architecture.MIPS, device,
-        /*needsCleanCodeCache*/ false, /*isBisectable*/ false);
-  }
-
-  @Override
-  protected String constructCommand(String programName) {
-    StringBuilder commandBuilder = new StringBuilder();
-    commandBuilder.append("dalvikvm32 -Xint ");
-    commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
-    commandBuilder.append(executeClass);
-    return commandBuilder.toString();
-  }
-}
diff --git a/tools/dexfuzz/src/dexfuzz/executors/MipsOptimizingBackendExecutor.java b/tools/dexfuzz/src/dexfuzz/executors/MipsOptimizingBackendExecutor.java
deleted file mode 100644
index b7babdc..0000000
--- a/tools/dexfuzz/src/dexfuzz/executors/MipsOptimizingBackendExecutor.java
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package dexfuzz.executors;
-
-import dexfuzz.listeners.BaseListener;
-
-public class MipsOptimizingBackendExecutor extends Executor {
-
-  public MipsOptimizingBackendExecutor(BaseListener listener, Device device) {
-    super("MIPS Optimizing Backend", 5, listener, Architecture.MIPS, device,
-        /*needsCleanCodeCache*/ true, /*isBisectable*/ true);
-  }
-
-  @Override
-  protected String constructCommand(String programName) {
-    StringBuilder commandBuilder = new StringBuilder();
-    commandBuilder.append("dalvikvm32 -Xcompiler-option --compiler-backend=Optimizing ");
-    // The -Xno-dex-file-fallback option ensures that the execution does not default to
-    // interpreter if compilations fails.
-    commandBuilder.append("-Xno-dex-file-fallback ");
-    commandBuilder.append("-cp ").append(testLocation).append("/").append(programName).append(" ");
-    commandBuilder.append(executeClass);
-    return commandBuilder.toString();
-  }
-}
diff --git a/tools/dexfuzz/src/dexfuzz/fuzzers/Fuzzer.java b/tools/dexfuzz/src/dexfuzz/fuzzers/Fuzzer.java
index ccc426c..56e6846 100644
--- a/tools/dexfuzz/src/dexfuzz/fuzzers/Fuzzer.java
+++ b/tools/dexfuzz/src/dexfuzz/fuzzers/Fuzzer.java
@@ -26,10 +26,6 @@
 import dexfuzz.executors.ArmOptimizingBackendExecutor;
 import dexfuzz.executors.Device;
 import dexfuzz.executors.Executor;
-import dexfuzz.executors.Mips64InterpreterExecutor;
-import dexfuzz.executors.Mips64OptimizingBackendExecutor;
-import dexfuzz.executors.MipsInterpreterExecutor;
-import dexfuzz.executors.MipsOptimizingBackendExecutor;
 import dexfuzz.executors.X86InterpreterExecutor;
 import dexfuzz.executors.X86OptimizingBackendExecutor;
 import dexfuzz.executors.X86_64InterpreterExecutor;
@@ -173,16 +169,6 @@
           X86InterpreterExecutor.class);
     }
 
-    if (Options.useArchMips64) {
-      addExecutorsForArchitecture(device, Mips64OptimizingBackendExecutor.class,
-          Mips64InterpreterExecutor.class);
-    }
-
-    if (Options.useArchMips) {
-      addExecutorsForArchitecture(device, MipsOptimizingBackendExecutor.class,
-          MipsInterpreterExecutor.class);
-    }
-
     // Add the first backend as the golden executor for self-divergence tests.
     goldenExecutor = executors.get(0);
   }
diff --git a/tools/dmtracedump/tracedump.cc b/tools/dmtracedump/tracedump.cc
index 3afee6f..42527c5 100644
--- a/tools/dmtracedump/tracedump.cc
+++ b/tools/dmtracedump/tracedump.cc
@@ -2332,7 +2332,7 @@
   MethodEntry** methods1 = parseMethodEntries(d1);
   MethodEntry** methods2 = parseMethodEntries(d2);
 
-  // sort and assign the indicies
+  // sort and assign the indices
   qsort(methods1, d1->numMethods, sizeof(MethodEntry*), compareElapsedInclusive);
   for (int32_t i = 0; i < d1->numMethods; ++i) {
     methods1[i]->index = i;
diff --git a/tools/external_oj_libjdwp_art_failures.txt b/tools/external_oj_libjdwp_art_failures.txt
index 38e5a99..5783d92 100644
--- a/tools/external_oj_libjdwp_art_failures.txt
+++ b/tools/external_oj_libjdwp_art_failures.txt
@@ -31,6 +31,8 @@
            "org.apache.harmony.jpda.tests.jdwp.ClassType_SetValues002Test#testSetValues002",
            "org.apache.harmony.jpda.tests.jdwp.Events_ClassPrepare002Test#testClassPrepareCausedByDebugger",
            "org.apache.harmony.jpda.tests.jdwp.Events_ExceptionCaughtTest#testExceptionEvent_ThrowLocation_FromNative",
+           "org.apache.harmony.jpda.tests.jdwp.Events_MonitorWaitedTest#testMonitorWaitedForClassMatchFirst",
+           "org.apache.harmony.jpda.tests.jdwp.Events_MonitorWaitTest#testMonitorWaitForClassMatchFirst",
            "org.apache.harmony.jpda.tests.jdwp.ObjectReference_DisableCollectionTest#testDisableCollection_null",
            "org.apache.harmony.jpda.tests.jdwp.ObjectReference_EnableCollectionTest#testEnableCollection_invalid",
            "org.apache.harmony.jpda.tests.jdwp.ObjectReference_EnableCollectionTest#testEnableCollection_null",
diff --git a/tools/generate-boot-image-profile.sh b/tools/generate-boot-image-profile.sh
deleted file mode 100755
index 44c64d2..0000000
--- a/tools/generate-boot-image-profile.sh
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2017 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# This script creates a boot image profile based on input profiles.
-#
-
-if [[ "$#" -lt 2 ]]; then
-  echo "Usage $0 <output> <profman args> <profiles>+"
-  echo "Also outputs <output>.txt and <output>.preloaded-classes"
-  echo 'Example: generate-boot-image-profile.sh boot.prof --profman-arg --boot-image-sampled-method-threshold=1 profiles/0/*/primary.prof'
-  exit 1
-fi
-
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-TOP="$DIR/../.."
-source "${TOP}/build/envsetup.sh" >&/dev/null # import get_build_var
-
-OUT_PROFILE=$1
-shift
-
-# Read the profman args.
-profman_args=()
-while [[ "$#" -ge 2 ]] && [[ "$1" = '--profman-arg' ]]; do
-  profman_args+=("$2")
-  shift 2
-done
-
-# Remaining args are all the profiles.
-for file in "$@"; do
-  if [[ -s $file ]]; then
-    profman_args+=("--profile-file=$file")
-  fi
-done
-
-# Boot jars have hidden API access flags which do not pass dex file
-# verification. Skip it.
-jar_args=()
-boot_jars=$("$ANDROID_BUILD_TOP"/art/tools/bootjars.sh --target)
-jar_dir=$ANDROID_BUILD_TOP/$(get_build_var TARGET_OUT_JAVA_LIBRARIES)
-for file in $boot_jars; do
-  filename="$jar_dir/$file.jar"
-  jar_args+=("--apk=$filename")
-  jar_args+=("--dex-location=$filename")
-done
-profman_args+=("${jar_args[@]}")
-
-# Generate the profile.
-"$ANDROID_HOST_OUT/bin/profman" --generate-boot-image-profile "--reference-profile-file=$OUT_PROFILE" "${profman_args[@]}"
-
-# Convert it to text.
-echo Dumping profile to $OUT_PROFILE.txt
-"$ANDROID_HOST_OUT/bin/profman" --dump-classes-and-methods "--profile-file=$OUT_PROFILE" "${jar_args[@]}" > "$OUT_PROFILE.txt"
-
-# Generate preloaded classes
-# Filter only classes by using grep -v
-# Remove first and last characters L and ;
-# Replace / with . to make dot format
-grep -v "\\->" "$OUT_PROFILE.txt" | sed 's/.\(.*\)./\1/g' | tr "/" "." > "$OUT_PROFILE.preloaded-classes"
-
-# You may need to filter some classes out since creating threads is not allowed in the zygote.
-# i.e. using: grep -v -E '(android.net.ConnectivityThread\$Singleton)'
diff --git a/tools/generate_cmake_lists.py b/tools/generate_cmake_lists.py
index 6c3ce08..b19c292 100755
--- a/tools/generate_cmake_lists.py
+++ b/tools/generate_cmake_lists.py
@@ -68,7 +68,7 @@
 
   ANDROID_BUILD_TOP = get_android_build_top()
 
-  subprocess.check_output(('make -j64 -C %s') % (ANDROID_BUILD_TOP), shell=True)
+  subprocess.check_output('build/soong/soong_ui.bash --make-mode', shell=True, cwd=ANDROID_BUILD_TOP)
 
   out_art_cmakelists_dir = os.path.join(ANDROID_BUILD_TOP,
                                         'out/development/ide/clion/art')
diff --git a/tools/generate_operator_out.py b/tools/generate_operator_out.py
index 3bd62fe..921ae68 100755
--- a/tools/generate_operator_out.py
+++ b/tools/generate_operator_out.py
@@ -17,194 +17,212 @@
 """Generates default implementations of operator<< for enum types."""
 
 import codecs
-import os
 import re
-import string
 import sys
 
 
-_ENUM_START_RE = re.compile(r'\benum\b\s+(class\s+)?(\S+)\s+:?.*\{(\s+// private)?')
+_ENUM_START_RE = re.compile(
+    r'\benum\b\s+(class\s+)?(\S+)\s+:?.*\{(\s+// private)?')
 _ENUM_VALUE_RE = re.compile(r'([A-Za-z0-9_]+)(.*)')
 _ENUM_END_RE = re.compile(r'^\s*\};$')
 _ENUMS = {}
 _NAMESPACES = {}
 _ENUM_CLASSES = {}
 
+
 def Confused(filename, line_number, line):
-  sys.stderr.write('%s:%d: confused by:\n%s\n' % (filename, line_number, line))
-  raise Exception("giving up!")
-  sys.exit(1)
+    sys.stderr.write('%s:%d: confused by:\n%s\n' %
+                     (filename, line_number, line))
+    raise Exception("giving up!")
+    sys.exit(1)
 
 
 def ProcessFile(filename):
-  lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
-  in_enum = False
-  is_enum_private = False
-  is_enum_class = False
-  line_number = 0
-  
+    lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
 
-  namespaces = []
-  enclosing_classes = []
+    class EnumLines:
+        def __init__(self, ns, ec):
+            self.namespaces = ns
+            self.enclosing_classes = ec
+            self.lines = []
 
-  for raw_line in lines:
-    line_number += 1
+    def generate_enum_lines(l):
+        line_number = 0
+        enum_lines = None
+        namespaces = []
+        enclosing_classes = []
 
-    if not in_enum:
-      # Is this the start of a new enum?
-      m = _ENUM_START_RE.search(raw_line)
-      if m:
-        # Yes, so add an empty entry to _ENUMS for this enum.
-        
-        # Except when it's private
+        for raw_line in l:
+            line_number += 1
+
+            if enum_lines is None:
+                # Is this the start of a new enum?
+                m = _ENUM_START_RE.search(raw_line)
+                if m:
+                    # Yes, so create new line list.
+                    enum_lines = EnumLines(namespaces[:], enclosing_classes[:])
+                    enum_lines.lines.append((raw_line, line_number))
+                    continue
+
+                # Is this the start or end of a namespace?
+                m = re.search(r'^namespace (\S+) \{', raw_line)
+                if m:
+                    namespaces.append(m.group(1))
+                    continue
+                m = re.search(r'^\}\s+// namespace', raw_line)
+                if m:
+                    namespaces = namespaces[0:len(namespaces) - 1]
+                    continue
+
+                # Is this the start or end of an enclosing class or struct?
+                m = re.search(
+                    r'^\s*(?:class|struct)(?: MANAGED)?(?: PACKED\([0-9]\))? (\S+).* \{', raw_line)
+                if m:
+                    enclosing_classes.append(m.group(1))
+                    continue
+
+                # End of class/struct -- be careful not to match "do { ... } while" constructs by accident
+                m = re.search(r'^\s*\}(\s+)?(while)?(.+)?;', raw_line)
+                if m and not m.group(2):
+                    enclosing_classes = enclosing_classes[0:len(enclosing_classes) - 1]
+                    continue
+
+                continue
+
+            # Is this the end of the current enum?
+            m = _ENUM_END_RE.search(raw_line)
+            if m:
+                if enum_lines is None:
+                    Confused(filename, line_number, raw_line)
+                yield enum_lines
+                enum_lines = None
+                continue
+
+            # Append the line
+            enum_lines.lines.append((raw_line, line_number))
+
+    for enum_lines in generate_enum_lines(lines):
+        m = _ENUM_START_RE.search(enum_lines.lines[0][0])
         if m.group(3) is not None:
-          is_enum_private = True
-        else:
-          is_enum_private = False
-          is_enum_class = m.group(1) is not None
-          enum_name = m.group(2)
-          if len(enclosing_classes) > 0:
-            enum_name = '::'.join(enclosing_classes) + '::' + enum_name
-          _ENUMS[enum_name] = []
-          _NAMESPACES[enum_name] = '::'.join(namespaces)
-          _ENUM_CLASSES[enum_name] = is_enum_class
-        in_enum = True
-        continue
+            # Skip private enums.
+            continue
 
-      # Is this the start or end of a namespace?
-      m = re.compile(r'^namespace (\S+) \{').search(raw_line)
-      if m:
-        namespaces.append(m.group(1))
-        continue
-      m = re.compile(r'^\}\s+// namespace').search(raw_line)
-      if m:
-        namespaces = namespaces[0:len(namespaces) - 1]
-        continue
+        # Add an empty entry to _ENUMS for this enum.
+        is_enum_class = m.group(1) is not None
+        enum_name = m.group(2)
+        if len(enum_lines.enclosing_classes) > 0:
+            enum_name = '::'.join(enum_lines.enclosing_classes) + '::' + enum_name
+        _ENUMS[enum_name] = []
+        _NAMESPACES[enum_name] = '::'.join(enum_lines.namespaces)
+        _ENUM_CLASSES[enum_name] = is_enum_class
 
-      # Is this the start or end of an enclosing class or struct?
-      m = re.compile(r'^\s*(?:class|struct)(?: MANAGED)?(?: PACKED\([0-9]\))? (\S+).* \{').search(raw_line)
-      if m:
-        enclosing_classes.append(m.group(1))
-        continue
+        def generate_non_empty_line(lines):
+            for raw_line, line_number in lines:
+                # Strip // comments.
+                line = re.sub(r'//.*', '', raw_line)
+                # Strip whitespace.
+                line = line.strip()
+                # Skip blank lines.
+                if len(line) == 0:
+                    continue
 
-      # End of class/struct -- be careful not to match "do { ... } while" constructs by accident
-      m = re.compile(r'^\s*\}(\s+)?(while)?(.+)?;').search(raw_line)
-      if m and not m.group(2):
-        enclosing_classes = enclosing_classes[0:len(enclosing_classes) - 1]
-        continue
+                # The only useful thing in comments is the <<alternate text>> syntax for
+                # overriding the default enum value names. Pull that out...
+                enum_text = None
+                m_comment = re.search(r'// <<(.*?)>>', raw_line)
+                if m_comment:
+                    enum_text = m_comment.group(1)
 
-      continue
+                yield (line, enum_text, raw_line, line_number)
 
-    # Is this the end of the current enum?
-    m = _ENUM_END_RE.search(raw_line)
-    if m:
-      if not in_enum:
-        Confused(filename, line_number, raw_line)
-      in_enum = False
-      continue
+        for line, enum_text, raw_line, line_number in generate_non_empty_line(enum_lines.lines[1:]):
+            # Since we know we're in an enum type, and we're not looking at a comment
+            # or a blank line, this line should be the next enum value...
+            m = _ENUM_VALUE_RE.search(line)
+            if not m:
+                Confused(filename, line_number, raw_line)
+            enum_value = m.group(1)
 
-    if is_enum_private:
-      continue
+            # By default, we turn "kSomeValue" into "SomeValue".
+            if enum_text is None:
+                enum_text = enum_value
+                if enum_text.startswith('k'):
+                    enum_text = enum_text[1:]
 
-    # The only useful thing in comments is the <<alternate text>> syntax for
-    # overriding the default enum value names. Pull that out...
-    enum_text = None
-    m_comment = re.compile(r'// <<(.*?)>>').search(raw_line)
-    if m_comment:
-      enum_text = m_comment.group(1)
-    # ...and then strip // comments.
-    line = re.sub(r'//.*', '', raw_line)
+            # Check that we understand the line (and hopefully do not parse incorrectly), or should
+            # filter.
+            rest = m.group(2).strip()
 
-    # Strip whitespace.
-    line = line.strip()
+            # With "kSomeValue = kOtherValue," we take the original and skip later synonyms.
+            # TODO: check that the rhs is actually an existing value.
+            if rest.startswith('= k'):
+                continue
 
-    # Skip blank lines.
-    if len(line) == 0:
-      continue
+            # Remove trailing comma.
+            if rest.endswith(','):
+                rest = rest[:-1]
 
-    # Since we know we're in an enum type, and we're not looking at a comment
-    # or a blank line, this line should be the next enum value...
-    m = _ENUM_VALUE_RE.search(line)
-    if not m:
-      Confused(filename, line_number, raw_line)
-    enum_value = m.group(1)
+            # We now expect rest to be empty, or an assignment to an "expression."
+            if len(rest):
+                # We want to lose the expression "= [exp]". As we do not have a real C parser, just
+                # assume anything without a comma is valid.
+                m_exp = re.match('= [^,]+$', rest)
+                if m_exp is None:
+                    sys.stderr.write('%s\n' % (rest))
+                    Confused(filename, line_number, raw_line)
 
-    # By default, we turn "kSomeValue" into "SomeValue".
-    if enum_text == None:
-      enum_text = enum_value
-      if enum_text.startswith('k'):
-        enum_text = enum_text[1:]
+            # If the enum is scoped, we must prefix enum value with enum name (which is already prefixed
+            # by enclosing classes).
+            if is_enum_class:
+                enum_value = enum_name + '::' + enum_value
+            else:
+                if len(enum_lines.enclosing_classes) > 0:
+                    enum_value = '::'.join(enum_lines.enclosing_classes) + '::' + enum_value
 
-    # Lose literal values because we don't care; turn "= 123, // blah" into ", // blah".
-    rest = m.group(2).strip()
-    m_literal = re.compile(r'= (0x[0-9a-f]+|-?[0-9]+|\'.\')').search(rest)
-    if m_literal:
-      rest = rest[(len(m_literal.group(0))):]
+            _ENUMS[enum_name].append((enum_value, enum_text))
 
-    # With "kSomeValue = kOtherValue," we take the original and skip later synonyms.
-    # TODO: check that the rhs is actually an existing value.
-    if rest.startswith('= k'):
-      continue
-
-    # Remove any trailing comma and whitespace
-    if rest.startswith(','):
-      rest = rest[1:]
-    rest = rest.strip()
-
-    # There shouldn't be anything left.
-    if len(rest):
-      sys.stderr.write('%s\n' % (rest))
-      Confused(filename, line_number, raw_line)
-
-    # If the enum is scoped, we must prefix enum value with enum name (which is already prefixed
-    # by enclosing classes).
-    if is_enum_class:
-      enum_value = enum_name + '::' + enum_value
-    else:
-      if len(enclosing_classes) > 0:
-        enum_value = '::'.join(enclosing_classes) + '::' + enum_value
-
-    _ENUMS[enum_name].append((enum_value, enum_text))
 
 def main():
-  local_path = sys.argv[1]
-  header_files = []
-  for header_file in sys.argv[2:]:
-    header_files.append(header_file)
-    ProcessFile(header_file)
+    local_path = sys.argv[1]
+    header_files = []
+    for header_file in sys.argv[2:]:
+        header_files.append(header_file)
+        ProcessFile(header_file)
 
-  print('#include <iostream>')
-  print('')
-
-  for header_file in header_files:
-    header_file = header_file.replace(local_path + '/', '')
-    print('#include "%s"' % header_file)
-
-  print('')
-
-  for enum_name in _ENUMS:
-    print('// This was automatically generated by %s --- do not edit!' % sys.argv[0])
-
-    namespaces = _NAMESPACES[enum_name].split('::')
-    for namespace in namespaces:
-      print('namespace %s {' % namespace)
-
-    print('std::ostream& operator<<(std::ostream& os, const %s& rhs) {' % enum_name)
-    print('  switch (rhs) {')
-    for (enum_value, enum_text) in _ENUMS[enum_name]:
-      print('    case %s: os << "%s"; break;' % (enum_value, enum_text))
-    if not _ENUM_CLASSES[enum_name]:
-      print('    default: os << "%s[" << static_cast<int>(rhs) << "]"; break;' % enum_name)
-    print('  }')
-    print('  return os;')
-    print('}')
-
-    for namespace in reversed(namespaces):
-      print('}  // namespace %s' % namespace)
+    print('#include <iostream>')
     print('')
 
-  sys.exit(0)
+    for header_file in header_files:
+        header_file = header_file.replace(local_path + '/', '')
+        print('#include "%s"' % header_file)
+
+    print('')
+
+    for enum_name in _ENUMS:
+        print('// This was automatically generated by art/tools/generate_operator_out.py --- do not edit!')
+
+        namespaces = _NAMESPACES[enum_name].split('::')
+        for namespace in namespaces:
+            print('namespace %s {' % namespace)
+
+        print(
+            'std::ostream& operator<<(std::ostream& os, const %s& rhs) {' % enum_name)
+        print('  switch (rhs) {')
+        for (enum_value, enum_text) in _ENUMS[enum_name]:
+            print('    case %s: os << "%s"; break;' % (enum_value, enum_text))
+        if not _ENUM_CLASSES[enum_name]:
+            print(
+                '    default: os << "%s[" << static_cast<int>(rhs) << "]"; break;' % enum_name)
+        print('  }')
+        print('  return os;')
+        print('}')
+
+        for namespace in reversed(namespaces):
+            print('}  // namespace %s' % namespace)
+        print('')
+
+    sys.exit(0)
 
 
 if __name__ == '__main__':
-  main()
+    main()
diff --git a/tools/hiddenapi/hiddenapi.cc b/tools/hiddenapi/hiddenapi.cc
index 646e483..f6d599f 100644
--- a/tools/hiddenapi/hiddenapi.cc
+++ b/tools/hiddenapi/hiddenapi.cc
@@ -197,7 +197,7 @@
 
  private:
   inline uint32_t GetAccessFlags() const { return item_.GetAccessFlags(); }
-  inline uint32_t HasAccessFlags(uint32_t mask) const { return (GetAccessFlags() & mask) == mask; }
+  inline bool HasAccessFlags(uint32_t mask) const { return (GetAccessFlags() & mask) == mask; }
 
   inline std::string_view GetName() const {
     return IsMethod() ? item_.GetDexFile().GetMethodName(GetMethodId())
diff --git a/tools/hiddenapi/hiddenapi_test.cc b/tools/hiddenapi/hiddenapi_test.cc
index 9f65a54..41eb4db 100644
--- a/tools/hiddenapi/hiddenapi_test.cc
+++ b/tools/hiddenapi/hiddenapi_test.cc
@@ -31,10 +31,9 @@
 class HiddenApiTest : public CommonRuntimeTest {
  protected:
   std::string GetHiddenApiCmd() {
-    std::string file_path = GetTestAndroidRoot();
-    file_path += "/bin/hiddenapi";
+    std::string file_path = GetArtBinDir() + "/hiddenapi";
     if (kIsDebugBuild) {
-      file_path += "d";
+      file_path += 'd';
     }
     if (!OS::FileExists(file_path.c_str())) {
       LOG(FATAL) << "Could not find binary " << file_path;
@@ -691,6 +690,40 @@
   ASSERT_EQ(dex_file.get(), nullptr);
 }
 
+TEST_F(HiddenApiTest, InstanceFieldCorePlatformApiMatch) {
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->ifield:LBadType1;,greylist" << std::endl
+      << "LMain;->ifield:LBadType2;,greylist-max-o" << std::endl
+      << "LMain;->ifield:I,greylist,core-platform-api" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
+  ASSERT_NE(dex_file.get(), nullptr);
+  ASSERT_EQ(hiddenapi::ApiList::CorePlatformApi() |
+  hiddenapi::ApiList::Greylist(), GetIFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceFieldTestApiMatch) {
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->ifield:LBadType1;,greylist" << std::endl
+      << "LMain;->ifield:LBadType2;,greylist-max-o" << std::endl
+      << "LMain;->ifield:I,greylist,test-api" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
+  ASSERT_NE(dex_file.get(), nullptr);
+  ASSERT_EQ(hiddenapi::ApiList::TestApi()
+  | hiddenapi::ApiList::Greylist(), GetIFieldHiddenFlags(*dex_file));
+}
+
+TEST_F(HiddenApiTest, InstanceFieldUnknownFlagMatch) {
+  ScratchFile dex, flags_csv;
+  OpenStream(flags_csv)
+      << "LMain;->ifield:LBadType1;,greylist" << std::endl
+      << "LMain;->ifield:LBadType2;,greylist-max-o" << std::endl
+      << "LMain;->ifield:I,greylist,unknown-flag" << std::endl;
+  auto dex_file = RunHiddenapiEncode(flags_csv, {}, dex);
+  ASSERT_EQ(dex_file.get(), nullptr);
+}
+
 // The following tests use this class hierarchy:
 //
 //    AbstractPackageClass  PublicInterface
diff --git a/tools/host_bcp.sh b/tools/host_bcp.sh
index 65a48bf..26231cd 100755
--- a/tools/host_bcp.sh
+++ b/tools/host_bcp.sh
@@ -14,24 +14,32 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-if [[ ${#@} != 1 ]]; then
+if [[ ${#@} != 1 ]] && [[ ${#@} != 2 ]]; then
   cat <<EOF
 Usage
-  host_bcp <image> | xargs <art-host-tool> ...
+  host_bcp <image> [--use-first-dir] | xargs <art-host-tool> ...
 Extracts boot class path locations from <image> and outputs the appropriate
   --runtime-arg -Xbootclasspath:...
   --runtime-arg -Xbootclasspath-locations:...
 arguments for many ART host tools based on the \$ANDROID_PRODUCT_OUT variable
-and existing \$ANDROID_PRODUCT_OUT/apex/com.android.runtime* paths.
+and existing \$ANDROID_PRODUCT_OUT/apex/com.android.art* paths.
+If --use-first-dir is specified, the script will use the first apex dir instead
+of resulting in an error.
 EOF
   exit 1
 fi
 
 IMAGE=$1
+USE_FIRST_DIR=false
+
+if [[ $2 == "--use-first-dir" ]]; then
+  USE_FIRST_DIR=true
+fi
+
 if [[ ! -e ${IMAGE} ]]; then
-  IMAGE=${ANDROID_PRODUCT_OUT}$1
+  IMAGE=${ANDROID_PRODUCT_OUT}/$1
   if [[ ! -e ${IMAGE} ]]; then
-    echo "Neither $1 nor ${ANDROID_PRODUCT_OUT}$1 exists."
+    echo "Neither $1 nor ${ANDROID_PRODUCT_OUT}/$1 exists."
     exit 1
   fi
 fi
@@ -42,17 +50,22 @@
   exit 1
 fi
 
-RUNTIME_APEX=/apex/com.android.runtime
-RUNTIME_APEX_SELECTED=
-for d in `ls -1 -d ${ANDROID_PRODUCT_OUT}${RUNTIME_APEX}* 2>/dev/null`; do
-  if [[ "x${RUNTIME_APEX_SELECTED}" != "x" ]]; then
-    echo "Multiple Runtime apex dirs: ${RUNTIME_APEX_SELECTED}, ${d}."
+MANIFEST=/apex_manifest.pb
+ART_APEX=/apex/com.android.art
+ART_APEX_SELECTED=
+for m in `ls -1 -d ${ANDROID_PRODUCT_OUT}{,/system}${ART_APEX}*${MANIFEST} 2>/dev/null`; do
+  d=${m:0:-${#MANIFEST}}
+  if [[ "x${ART_APEX_SELECTED}" != "x" ]]; then
+    if [[ $USE_FIRST_DIR == true ]]; then
+      break
+    fi
+    echo "Multiple ART APEX dirs: ${ART_APEX_SELECTED}, ${d}."
     exit 1
   fi
-  RUNTIME_APEX_SELECTED=${d}
+  ART_APEX_SELECTED=${d}
 done
-if [[ "x${RUNTIME_APEX_SELECTED}" == "x" ]]; then
-  echo "No Runtime apex dir."
+if [[ "x${ART_APEX_SELECTED}" == "x" ]]; then
+  echo "No ART APEX dir."
   exit 1
 fi
 
@@ -62,9 +75,13 @@
 for COMPONENT in ${BCPL}; do
   HEAD=${ANDROID_PRODUCT_OUT}
   TAIL=${COMPONENT}
-  if [[ ${COMPONENT:0:${#RUNTIME_APEX}} = ${RUNTIME_APEX} ]]; then
-    HEAD=${RUNTIME_APEX_SELECTED}
-    TAIL=${COMPONENT:${#RUNTIME_APEX}}
+  if [[ ${COMPONENT:0:${#ART_APEX}} = ${ART_APEX} ]]; then
+    HEAD=${ART_APEX_SELECTED}
+    TAIL=${COMPONENT:${#ART_APEX}}
+  fi
+  if [[ ! -e $HEAD$TAIL ]]; then
+    echo "File does not exist: $HEAD$TAIL"
+    exit 1
   fi
   BCP="${BCP}:${HEAD}${TAIL}"
 done
diff --git a/tools/jvmti-agents/README.md b/tools/jvmti-agents/README.md
index 6f3e6dc..35dfc68 100644
--- a/tools/jvmti-agents/README.md
+++ b/tools/jvmti-agents/README.md
@@ -9,6 +9,7 @@
 * [libdumpjvmti](./dump-jvmti-state)
 * [libfieldnull](./field-null-percent)
 * [libjitload](./jit-load)
+* [liblistextensions](./list-extensions)
 * [libforceredefine](./simple-force-redefine)
 * [litifast](./ti-fast)
 * [libtitrace](./titrace)
diff --git a/tools/jvmti-agents/breakpoint-logger/Android.bp b/tools/jvmti-agents/breakpoint-logger/Android.bp
index 67b423a..57878cd 100644
--- a/tools/jvmti-agents/breakpoint-logger/Android.bp
+++ b/tools/jvmti-agents/breakpoint-logger/Android.bp
@@ -38,15 +38,6 @@
     header_libs: [
         "libopenjdkjvmti_headers",
     ],
-    multilib: {
-        lib32: {
-            suffix: "32",
-        },
-        lib64: {
-            suffix: "64",
-        },
-    },
-    symlink_preferred_arch: true,
 }
 
 art_cc_library {
diff --git a/tools/jvmti-agents/breakpoint-logger/README.md b/tools/jvmti-agents/breakpoint-logger/README.md
index d7ffb34..101f277 100644
--- a/tools/jvmti-agents/breakpoint-logger/README.md
+++ b/tools/jvmti-agents/breakpoint-logger/README.md
@@ -5,7 +5,7 @@
 
 # Usage
 ### Build
->    `make libbreakpointlogger`  # or 'make libbreakpointloggerd' with debugging checks enabled
+>    `m libbreakpointlogger`  # or 'm libbreakpointloggerd' with debugging checks enabled
 
 The libraries will be built for 32-bit, 64-bit, host and target. Below examples
 assume you want to use the 64-bit version.
diff --git a/tools/jvmti-agents/chain-agents/Android.bp b/tools/jvmti-agents/chain-agents/Android.bp
new file mode 100644
index 0000000..9d66069
--- /dev/null
+++ b/tools/jvmti-agents/chain-agents/Android.bp
@@ -0,0 +1,75 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Build variants {target,host} x {debug,ndebug} x {32,64}
+
+cc_defaults {
+    name: "chainagents-base-defaults",
+    srcs: ["chainagents.cc"],
+    defaults: ["art_defaults"],
+
+    // Note that this tool needs to be built for both 32-bit and 64-bit since it requires
+    // to be same ISA as what it is attached to.
+    compile_multilib: "both",
+
+    header_libs: [
+        "libopenjdkjvmti_headers",
+        "libnativehelper_header_only",
+        "jni_headers",
+    ],
+}
+
+cc_defaults {
+    name: "chainagents-defaults",
+    host_supported: true,
+    shared_libs: [
+        "libbase",
+    ],
+    defaults: ["chainagents-base-defaults"],
+}
+
+art_cc_library {
+    name: "libchainagents",
+    defaults: ["chainagents-defaults"],
+}
+
+art_cc_library {
+    name: "libchainagentsd",
+    defaults: [
+        "art_debug_defaults",
+        "chainagents-defaults",
+    ],
+}
+
+cc_defaults {
+    name: "chainagents-static-defaults",
+    host_supported: false,
+    defaults: ["chainagents-base-defaults"],
+
+    shared_libs: [
+        "liblog",
+    ],
+    static_libs: [
+        "libbase_ndk",
+    ],
+    sdk_version: "current",
+    stl: "c++_static",
+}
+
+cc_library {
+    name: "libchainagentss",
+    defaults: ["chainagents-static-defaults"],
+}
diff --git a/tools/jvmti-agents/chain-agents/README.md b/tools/jvmti-agents/chain-agents/README.md
new file mode 100644
index 0000000..c76ac28
--- /dev/null
+++ b/tools/jvmti-agents/chain-agents/README.md
@@ -0,0 +1,36 @@
+# chainagent
+
+The chainagents agent is a JVMTI agent that chain loads other agents from a file found at a
+location relative to a passed in path. It can be used in combination with android startup_agents
+in order to implement more complicated agent-loading rules.
+
+It will open the file `chain_agents.txt` from the directory passed in as an argument and read it
+line-by-line loading the agents (with the arguments) listed in the file.
+
+Errors in loading are logged then ignored.
+
+# Usage
+### Build
+>    `m libchainagents`
+
+The libraries will be built for 32-bit, 64-bit, host and target. Below examples
+assume you want to use the 64-bit version.
+
+### Command Line
+#### ART
+>    `art -Xplugin:$ANDROID_HOST_OUT/lib64/libopenjdkjvmti.so -agentpath:$ANDROID_HOST_OUT/lib64/libchainagents.so=/some/path/here -Xint helloworld`
+
+* `-Xplugin` and `-agentpath` need to be used, otherwise libtitrace agent will fail during init.
+* If using `libartd.so`, make sure to use the debug version of jvmti.
+
+### chain_agents.txt file format.
+
+The chain-agents file is a list of agent files and arguments to load in the same format as the
+`-agentpath` argument.
+
+#### Example chain_agents.txt file
+
+```
+/data/data/com.android.launcher3/code_cache/libtifast32.so=ClassLoad
+/data/data/com.android.launcher3/code_cache/libtifast64.so=ClassLoad
+```
diff --git a/tools/jvmti-agents/chain-agents/chainagents.cc b/tools/jvmti-agents/chain-agents/chainagents.cc
new file mode 100644
index 0000000..1242409
--- /dev/null
+++ b/tools/jvmti-agents/chain-agents/chainagents.cc
@@ -0,0 +1,136 @@
+// Copyright (C) 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <android-base/logging.h>
+#include <dlfcn.h>
+#include <jni.h>
+#include <jvmti.h>
+
+#include <atomic>
+#include <fstream>
+#include <iomanip>
+#include <iostream>
+#include <memory>
+#include <mutex>
+#include <sstream>
+#include <string>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+namespace chainagents {
+
+static constexpr const char* kChainFile = "chain_agents.txt";
+static constexpr const char* kOnLoad = "Agent_OnLoad";
+static constexpr const char* kOnAttach = "Agent_OnAttach";
+static constexpr const char* kOnUnload = "Agent_OnUnload";
+using AgentLoadFunction = jint (*)(JavaVM*, const char*, void*);
+using AgentUnloadFunction = jint (*)(JavaVM*);
+
+// Global namespace. Shared by every usage of this wrapper unfortunately.
+// We need to keep track of them to call Agent_OnUnload.
+static std::mutex unload_mutex;
+
+struct Unloader {
+  AgentUnloadFunction unload;
+};
+static std::vector<Unloader> unload_functions;
+
+enum class StartType {
+  OnAttach,
+  OnLoad,
+};
+
+static std::pair<std::string, std::string> Split(std::string source, char delim) {
+  std::string first(source.substr(0, source.find(delim)));
+  if (source.find(delim) == std::string::npos) {
+    return std::pair(first, "");
+  } else {
+    return std::pair(first, source.substr(source.find(delim) + 1));
+  }
+}
+
+static jint Load(StartType start,
+                 JavaVM* vm,
+                 void* reserved,
+                 const std::pair<std::string, std::string>& lib_and_args,
+                 /*out*/ std::string* err) {
+  void* handle = dlopen(lib_and_args.first.c_str(), RTLD_LAZY);
+  std::ostringstream oss;
+  if (handle == nullptr) {
+    oss << "Failed to dlopen due to " << dlerror();
+    *err = oss.str();
+    return JNI_ERR;
+  }
+  AgentLoadFunction alf = reinterpret_cast<AgentLoadFunction>(
+      dlsym(handle, start == StartType::OnLoad ? kOnLoad : kOnAttach));
+  if (alf == nullptr) {
+    oss << "Failed to dlsym " << (start == StartType::OnLoad ? kOnLoad : kOnAttach) << " due to "
+        << dlerror();
+    *err = oss.str();
+    return JNI_ERR;
+  }
+  jint res = alf(vm, lib_and_args.second.c_str(), reserved);
+  if (res != JNI_OK) {
+    *err = "load function failed!";
+    return res;
+  }
+  AgentUnloadFunction auf = reinterpret_cast<AgentUnloadFunction>(dlsym(handle, kOnUnload));
+  if (auf != nullptr) {
+    unload_functions.push_back({ auf });
+  }
+  return JNI_OK;
+}
+
+static jint AgentStart(StartType start, JavaVM* vm, char* options, void* reserved) {
+  std::string input_file(options);
+  input_file = input_file + "/" + kChainFile;
+  std::ifstream input(input_file);
+  std::string line;
+  std::lock_guard<std::mutex> mu(unload_mutex);
+  while (std::getline(input, line)) {
+    std::pair<std::string, std::string> lib_and_args(Split(line, '='));
+    std::string err;
+    jint new_res = Load(start, vm, reserved, lib_and_args, &err);
+    if (new_res != JNI_OK) {
+      PLOG(WARNING) << "Failed to load library " << lib_and_args.first
+                    << " (arguments: " << lib_and_args.second << ") due to " << err;
+    }
+  }
+  return JNI_OK;
+}
+
+// Late attachment (e.g. 'am attach-agent').
+extern "C" JNIEXPORT jint JNICALL Agent_OnAttach(JavaVM* vm, char* options, void* reserved) {
+  return AgentStart(StartType::OnAttach, vm, options, reserved);
+}
+
+// Early attachment
+// (e.g. 'java
+// -agentpath:/path/to/libwrapagentproperties.so=/path/to/propfile,/path/to/wrapped.so=[ops]').
+extern "C" JNIEXPORT jint JNICALL Agent_OnLoad(JavaVM* jvm, char* options, void* reserved) {
+  return AgentStart(StartType::OnLoad, jvm, options, reserved);
+}
+
+extern "C" JNIEXPORT void JNICALL Agent_OnUnload(JavaVM* jvm) {
+  std::lock_guard<std::mutex> lk(unload_mutex);
+  for (const Unloader& u : unload_functions) {
+    u.unload(jvm);
+    // Don't dlclose since some agents expect to still have code loaded after this.
+  }
+  unload_functions.clear();
+}
+
+}  // namespace chainagents
diff --git a/tools/jvmti-agents/dump-jvmti-state/Android.bp b/tools/jvmti-agents/dump-jvmti-state/Android.bp
index 5c78965..3a48941 100644
--- a/tools/jvmti-agents/dump-jvmti-state/Android.bp
+++ b/tools/jvmti-agents/dump-jvmti-state/Android.bp
@@ -31,15 +31,6 @@
     header_libs: [
         "libopenjdkjvmti_headers",
     ],
-    multilib: {
-        lib32: {
-            suffix: "32",
-        },
-        lib64: {
-            suffix: "64",
-        },
-    },
-    symlink_preferred_arch: true,
 }
 
 art_cc_library {
diff --git a/tools/jvmti-agents/dump-jvmti-state/README.md b/tools/jvmti-agents/dump-jvmti-state/README.md
index 4aabc08..c07a194 100644
--- a/tools/jvmti-agents/dump-jvmti-state/README.md
+++ b/tools/jvmti-agents/dump-jvmti-state/README.md
@@ -6,7 +6,7 @@
 
 # Usage
 ### Build
->    `make libdumpjvmti`
+>    `m libdumpjvmti`
 
 The libraries will be built for 32-bit, 64-bit, host and target. Below examples
 assume you want to use the 64-bit version.
@@ -24,4 +24,4 @@
 >
 >    `adb shell am start-activity --attach-agent /data/local/tmp/libdumpjvmti.so some.debuggable.apps/.the.app.MainActivity`
 >
->    `adb shell kill -3 $(adb shell pidof some.debuggable.apps)`
\ No newline at end of file
+>    `adb shell kill -3 $(adb shell pidof some.debuggable.apps)`
diff --git a/tools/jvmti-agents/field-counts/Android.bp b/tools/jvmti-agents/field-counts/Android.bp
new file mode 100644
index 0000000..a91af99
--- /dev/null
+++ b/tools/jvmti-agents/field-counts/Android.bp
@@ -0,0 +1,73 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Build variants {target,host} x {debug,ndebug} x {32,64}
+cc_defaults {
+    name: "fieldcount-base-defaults",
+    srcs: ["fieldcount.cc"],
+    defaults: ["art_defaults"],
+
+    // Note that this tool needs to be built for both 32-bit and 64-bit since it requires
+    // to be same ISA as what it is attached to.
+    compile_multilib: "both",
+    header_libs: [
+        "libopenjdkjvmti_headers",
+        "libnativehelper_header_only",
+        "jni_headers",
+    ],
+}
+
+cc_defaults {
+    name: "fieldcount-defaults",
+    host_supported: true,
+    shared_libs: [
+        "libbase",
+    ],
+    defaults: ["fieldcount-base-defaults"],
+}
+
+cc_defaults {
+    name: "fieldcount-static-defaults",
+    host_supported: false,
+    defaults: ["fieldcount-base-defaults"],
+
+    shared_libs: [
+        "liblog",
+    ],
+    static_libs: [
+        "libbase_ndk",
+    ],
+    sdk_version: "current",
+    stl: "c++_static",
+}
+
+cc_library {
+    name: "libfieldcounts",
+    defaults: ["fieldcount-static-defaults"],
+}
+
+art_cc_library {
+    name: "libfieldcount",
+    defaults: ["fieldcount-defaults"],
+}
+
+art_cc_library {
+    name: "libfieldcountd",
+    defaults: [
+        "art_debug_defaults",
+        "fieldcount-defaults",
+    ],
+}
diff --git a/tools/jvmti-agents/field-counts/README.md b/tools/jvmti-agents/field-counts/README.md
new file mode 100644
index 0000000..15ff9d9
--- /dev/null
+++ b/tools/jvmti-agents/field-counts/README.md
@@ -0,0 +1,64 @@
+# fieldcount
+
+fieldcount is a JVMTI agent designed to investigate the types being held by specific fields and
+how large the objects referenced by these fields are.
+
+Note that just by using the agent some fields might be written (for example fields related to
+keeping track of jfieldIDs). Users should be aware of this.
+
+# Usage
+### Build
+>    `m libfieldcount libfieldcounts`
+
+The libraries will be built for 32-bit, 64-bit, host and target. Below examples
+assume you want to use the 64-bit version.
+
+### Command Line
+
+The agent is loaded using -agentpath like normal. It takes arguments in the
+following format:
+>     `Lname/of/class;.nameOfField:Ltype/of/field;[,...]`
+
+#### ART
+```shell
+art -Xplugin:$ANDROID_HOST_OUT/lib64/libopenjdkjvmti.so '-agentpath:libfieldcount.so=Ljava/lang/Class;.extData:Ldalvik/system/ClassExt;,Ldalvik/system/ClassExt;.jmethodIDs:Ljava/lang/Object;' -cp tmp/java/helloworld.dex -Xint helloworld
+```
+
+* `-Xplugin` and `-agentpath` need to be used, otherwise the agent will fail during init.
+* If using `libartd.so`, make sure to use the debug version of jvmti.
+
+```shell
+adb shell setenforce 0
+
+adb push $ANDROID_PRODUCT_OUT/system/lib64/libfieldcounts.so /data/local/tmp/
+
+adb shell am start-activity --attach-agent '/data/local/tmp/libfieldcounts.so=Ljava/lang/Class;.extData:Ldalvik/system/ClassExt;,Ldalvik/system/ClassExt;.jmethodIDs:Ljava/lang/Object;' some.debuggable.apps/.the.app.MainActivity
+```
+
+#### RI
+>    `java '-agentpath:libfieldcount.so=Lname/of/class;.nameOfField:Ltype/of/field;' -cp tmp/helloworld/classes helloworld`
+
+### Printing the Results
+All statistics gathered during the trace are printed automatically when the
+program normally exits. In the case of Android applications, they are always
+killed, so we need to manually print the results.
+
+>    `kill -SIGQUIT $(pid com.littleinc.orm_benchmark)`
+
+Will initiate a dump of the counts (to logcat).
+
+The dump will look something like this.
+
+```
+dalvikvm64 I 06-27 14:24:59 183155 183155 fieldcount.cc:60] listing field Ljava/lang/Class;.extData:Ldalvik/system/ClassExt;
+dalvikvm64 I 06-27 14:24:59 183155 183155 fieldcount.cc:60] listing field Ldalvik/system/ClassExt;.jmethodIDs:Ljava/lang/Object;
+Hello, world!
+dalvikvm64 I 06-27 14:24:59 183155 183155 fieldcount.cc:97] Dumping counts of fields.
+dalvikvm64 I 06-27 14:24:59 183155 183155 fieldcount.cc:98]     Field name      Type    Count   Total Size
+dalvikvm64 I 06-27 14:24:59 183155 183155 fieldcount.cc:155]    Ljava/lang/Class;.extData:Ldalvik/system/ClassExt;      <ALL TYPES>     2800    3024
+dalvikvm64 I 06-27 14:24:59 183155 183155 fieldcount.cc:161]    Ljava/lang/Class;.extData:Ldalvik/system/ClassExt;      Ldalvik/system/ClassExt;        64      3024
+dalvikvm64 I 06-27 14:24:59 183155 183155 fieldcount.cc:161]    Ljava/lang/Class;.extData:Ldalvik/system/ClassExt;      <null>  2738    0
+dalvikvm64 I 06-27 14:24:59 183155 183155 fieldcount.cc:155]    Ldalvik/system/ClassExt;.jmethodIDs:Ljava/lang/Object;  <ALL TYPES>     63      10008
+dalvikvm64 I 06-27 14:24:59 183155 183155 fieldcount.cc:161]    Ldalvik/system/ClassExt;.jmethodIDs:Ljava/lang/Object;  <null>  26      0
+dalvikvm64 I 06-27 14:24:59 183155 183155 fieldcount.cc:161]    Ldalvik/system/ClassExt;.jmethodIDs:Ljava/lang/Object;  [J      39      10008
+```
diff --git a/tools/jvmti-agents/field-counts/count-fields.py b/tools/jvmti-agents/field-counts/count-fields.py
new file mode 100755
index 0000000..3764aba
--- /dev/null
+++ b/tools/jvmti-agents/field-counts/count-fields.py
@@ -0,0 +1,167 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Retrieves the counts of how many objects have a particular field filled with what on all running
+processes.
+
+Prints a json map from pid -> (log-tag, field-name, field-type, count, total-size).
+"""
+
+
+import adb
+import argparse
+import concurrent.futures
+import itertools
+import json
+import logging
+import os
+import os.path
+import signal
+import subprocess
+import time
+
+def main():
+  parser = argparse.ArgumentParser(description="Get counts of null fields from a device.")
+  parser.add_argument("-S", "--serial", metavar="SERIAL", type=str,
+                      required=False,
+                      default=os.environ.get("ANDROID_SERIAL", None),
+                      help="Android serial to use. Defaults to ANDROID_SERIAL")
+  parser.add_argument("-p", "--pid", required=False,
+                      default=[], action="append",
+                      help="Specific pids to check. By default checks all running dalvik processes")
+  has_out = "OUT" in os.environ
+  def_32 = os.path.join(os.environ.get("OUT", ""), "system", "lib", "libfieldcounts.so")
+  def_64 = os.path.join(os.environ.get("OUT", ""), "system", "lib64", "libfieldcounts.so")
+  has_32 = has_out and os.path.exists(def_32)
+  has_64 = has_out and os.path.exists(def_64)
+  def pushable_lib(name):
+    if os.path.isfile(name):
+      return name
+    else:
+      raise argparse.ArgumentTypeError(name + " is not a file!")
+  parser.add_argument('--lib32', type=pushable_lib,
+                      required=not has_32,
+                      action='store',
+                      default=def_32,
+                      help="Location of 32 bit agent to push")
+  parser.add_argument('--lib64', type=pushable_lib,
+                      required=not has_64,
+                      action='store',
+                      default=def_64 if has_64 else None,
+                      help="Location of 64 bit agent to push")
+  parser.add_argument("fields", nargs="+",
+                      help="fields to check")
+
+  out = parser.parse_args()
+
+  device = adb.device.get_device(out.serial)
+  print("getting root")
+  device.root()
+
+  print("Disabling selinux")
+  device.shell("setenforce 0".split())
+
+  print("Pushing libraries")
+  lib32 = device.shell("mktemp".split())[0].strip()
+  lib64 = device.shell("mktemp".split())[0].strip()
+
+  print(out.lib32 + " -> " + lib32)
+  device.push(out.lib32, lib32)
+
+  print(out.lib64 + " -> " + lib64)
+  device.push(out.lib64, lib64)
+
+  mkcmd = lambda lib: "'{}={}'".format(lib, ','.join(out.fields))
+
+  if len(out.pid) == 0:
+    print("Getting jdwp pids")
+    new_env = dict(os.environ)
+    new_env["ANDROID_SERIAL"] = device.serial
+    p = subprocess.Popen([device.adb_path, "jdwp"], env=new_env, stdout=subprocess.PIPE)
+    # ADB jdwp doesn't ever exit so just kill it after 1 second to get a list of pids.
+    with concurrent.futures.ProcessPoolExecutor() as ppe:
+      ppe.submit(kill_it, p.pid).result()
+    out.pid = p.communicate()[0].strip().split()
+    p.wait()
+    print(out.pid)
+  print("Clearing logcat")
+  device.shell("logcat -c".split())
+  final = {}
+  print("Getting info from every process dumped to logcat")
+  for p in out.pid:
+    res = check_single_process(p, device, mkcmd, lib32, lib64);
+    if res is not None:
+      final[p] = res
+  device.shell('rm {}'.format(lib32).split())
+  device.shell('rm {}'.format(lib64).split())
+  print(json.dumps(final, indent=2))
+
+def kill_it(p):
+  time.sleep(1)
+  os.kill(p, signal.SIGINT)
+
+def check_single_process(pid, device, mkcmd, bit32, bit64):
+  try:
+    # Link agent into the /data/data/<app>/code_cache directory
+    name = device.shell('cat /proc/{}/cmdline'.format(pid).split())[0].strip('\0')
+    targetdir = str('/data/data/{}/code_cache'.format(str(name).strip()))
+    print("Will place agents in {}".format(targetdir))
+    target32 = device.shell('mktemp -p {}'.format(targetdir).split())[0].strip()
+    print("{} -> {}".format(bit32, target32))
+    target64 = device.shell('mktemp -p {}'.format(targetdir).split())[0].strip()
+    print("{} -> {}".format(bit64, target64))
+    try:
+      device.shell('cp {} {}'.format(bit32, target32).split())
+      device.shell('cp {} {}'.format(bit64, target64).split())
+      device.shell('chmod 555 {}'.format(target32).split())
+      device.shell('chmod 555 {}'.format(target64).split())
+      # Just try attaching both 32 and 64 bit. Wrong one will fail silently.
+      device.shell(['am', 'attach-agent', str(pid), mkcmd(target32)])
+      device.shell(['am', 'attach-agent', str(pid), mkcmd(target64)])
+      time.sleep(0.5)
+      device.shell('kill -3 {}'.format(pid).split())
+      time.sleep(0.5)
+    finally:
+      print("Removing agent copies at {}, {}".format(target32, target64))
+      device.shell(['rm', '-f', target32])
+      device.shell(['rm', '-f', target64])
+    out = []
+    all_fields = []
+    lc_cmd = "logcat -d -b main --pid={} -e '^\\t.*\\t.*\\t[0-9]*\\t[0-9]*$'".format(pid).split(' ')
+    for l in device.shell(lc_cmd)[0].strip().split('\n'):
+      # first 4 are just date and other useless data.
+      data = l.strip().split()[5:]
+      if len(data) < 5:
+        continue
+      # If we run multiple times many copies of the agent will be attached. Just choose one of any
+      # copies for each field.
+      # data is (process, field, field-type, count, size)
+      field = (data[1], data[2])
+      if field not in all_fields:
+        out.append((str(data[0]), str(data[1]), str(data[2]), int(data[3]), int(data[4])))
+        all_fields.append(field)
+    if len(out) != 0:
+      print("pid: " + pid + " -> " + str(out))
+      return out
+    else:
+      return None
+  except adb.device.ShellError as e:
+    print("failed on pid " + repr(pid) + " because " + repr(e))
+    return None
+
+if __name__ == '__main__':
+  main()
diff --git a/tools/jvmti-agents/field-counts/fieldcount.cc b/tools/jvmti-agents/field-counts/fieldcount.cc
new file mode 100644
index 0000000..c31a973
--- /dev/null
+++ b/tools/jvmti-agents/field-counts/fieldcount.cc
@@ -0,0 +1,274 @@
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <android-base/logging.h>
+#include <nativehelper/scoped_local_ref.h>
+
+#include <atomic>
+#include <iomanip>
+#include <iostream>
+#include <istream>
+#include <jni.h>
+#include <jvmti.h>
+#include <memory>
+#include <sstream>
+#include <string.h>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+namespace fieldnull {
+
+#define CHECK_JVMTI(x) CHECK_EQ((x), JVMTI_ERROR_NONE)
+
+// Special art ti-version number. We will use this as a fallback if we cannot get a regular JVMTI
+// env.
+static constexpr jint kArtTiVersion = JVMTI_VERSION_1_2 | 0x40000000;
+
+static JavaVM* java_vm = nullptr;
+
+// Field is "Lclass/name/here;.field_name:Lfield/type/here;"
+static std::pair<jclass, jfieldID> SplitField(JNIEnv* env, const std::string& field_id) {
+  CHECK_EQ(field_id[0], 'L');
+  env->PushLocalFrame(1);
+  std::istringstream is(field_id);
+  std::string class_name;
+  std::string field_name;
+  std::string field_type;
+
+  std::getline(is, class_name, '.');
+  std::getline(is, field_name, ':');
+  std::getline(is, field_type, '\0');
+
+  jclass klass = reinterpret_cast<jclass>(
+      env->NewGlobalRef(env->FindClass(class_name.substr(1, class_name.size() - 2).c_str())));
+  CHECK(klass != nullptr) << class_name;
+  jfieldID field = env->GetFieldID(klass, field_name.c_str(), field_type.c_str());
+  CHECK(field != nullptr) << field_name;
+  LOG(INFO) << "listing field " << field_id;
+  env->PopLocalFrame(nullptr);
+  return std::make_pair(klass, field);
+}
+
+static std::vector<std::pair<jclass, jfieldID>> GetRequestedFields(JNIEnv* env,
+                                                                   const std::string& args) {
+  std::vector<std::pair<jclass, jfieldID>> res;
+  std::stringstream args_stream(args);
+  std::string item;
+  while (std::getline(args_stream, item, ',')) {
+    if (item == "") {
+      continue;
+    }
+    res.push_back(SplitField(env, item));
+  }
+  return res;
+}
+
+static jint SetupJvmtiEnv(JavaVM* vm, jvmtiEnv** jvmti) {
+  jint res = 0;
+  res = vm->GetEnv(reinterpret_cast<void**>(jvmti), JVMTI_VERSION_1_1);
+
+  if (res != JNI_OK || *jvmti == nullptr) {
+    LOG(ERROR) << "Unable to access JVMTI, error code " << res;
+    return vm->GetEnv(reinterpret_cast<void**>(jvmti), kArtTiVersion);
+  }
+  return res;
+}
+
+struct RequestList {
+  std::vector<std::pair<jclass, jfieldID>> fields_;
+};
+
+static void DataDumpRequestCb(jvmtiEnv* jvmti) {
+  JNIEnv* env = nullptr;
+  CHECK_EQ(java_vm->GetEnv(reinterpret_cast<void**>(&env), JNI_VERSION_1_6), JNI_OK);
+  LOG(INFO) << "Dumping counts of fields.";
+  LOG(INFO) << "\t" << "Field name"
+            << "\t" << "Type"
+            << "\t" << "Count"
+            << "\t" << "TotalSize";
+  RequestList* list;
+  CHECK_JVMTI(jvmti->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&list)));
+  for (std::pair<jclass, jfieldID>& p : list->fields_) {
+    jclass klass = p.first;
+    jfieldID field = p.second;
+    // Make sure all instances of the class are tagged with the klass ptr value. Since this is a
+    // global ref it's guaranteed to be unique.
+    CHECK_JVMTI(jvmti->IterateOverInstancesOfClass(
+        p.first,
+        // We need to do this to all objects every time since we might be looking for multiple
+        // fields in classes that are subtypes of each other.
+        JVMTI_HEAP_OBJECT_EITHER,
+        /* class_tag, size, tag_ptr, user_data*/
+        [](jlong, jlong, jlong* tag_ptr, void* klass) -> jvmtiIterationControl {
+          *tag_ptr = static_cast<jlong>(reinterpret_cast<intptr_t>(klass));
+          return JVMTI_ITERATION_CONTINUE;
+        },
+        klass));
+    jobject* obj_list;
+    jint obj_len;
+    jlong tag = static_cast<jlong>(reinterpret_cast<intptr_t>(klass));
+    CHECK_JVMTI(jvmti->GetObjectsWithTags(1, &tag, &obj_len, &obj_list, nullptr));
+
+    std::unordered_map<std::string, size_t> class_sizes;
+    std::unordered_map<std::string, size_t> class_counts;
+    size_t total_size = 0;
+    // Mark all the referenced objects with a single tag value, this way we can dedup them.
+    jlong referenced_object_tag = static_cast<jlong>(reinterpret_cast<intptr_t>(klass) + 1);
+    std::string null_class_name("<null>");
+    class_counts[null_class_name] = 0;
+    class_sizes[null_class_name] = 0;
+    for (jint i = 0; i < obj_len; i++) {
+      ScopedLocalRef<jobject> cur_thiz(env, obj_list[i]);
+      ScopedLocalRef<jobject> obj(env, env->GetObjectField(cur_thiz.get(), field));
+      std::string class_name(null_class_name);
+      if (obj == nullptr) {
+        class_counts[null_class_name]++;
+      } else {
+        CHECK_JVMTI(jvmti->SetTag(obj.get(), referenced_object_tag));
+        jlong size = 0;
+        if (obj.get() != nullptr) {
+          char* class_name_tmp;
+          ScopedLocalRef<jclass> obj_klass(env, env->GetObjectClass(obj.get()));
+          CHECK_JVMTI(jvmti->GetClassSignature(obj_klass.get(), &class_name_tmp, nullptr));
+          CHECK_JVMTI(jvmti->GetObjectSize(obj.get(), &size));
+          class_name = class_name_tmp;
+          CHECK_JVMTI(jvmti->Deallocate(reinterpret_cast<unsigned char*>(class_name_tmp)));
+        }
+        if (class_sizes.find(class_name) == class_counts.end()) {
+          class_sizes[class_name] = 0;
+          class_counts[class_name] = 0;
+        }
+        class_counts[class_name]++;
+      }
+    }
+    jobject* ref_list;
+    jint ref_len;
+    CHECK_JVMTI(jvmti->GetObjectsWithTags(1, &referenced_object_tag, &ref_len, &ref_list, nullptr));
+    for (jint i = 0; i < ref_len; i++) {
+      ScopedLocalRef<jobject> obj(env, ref_list[i]);
+      std::string class_name(null_class_name);
+      jlong size = 0;
+      if (obj.get() != nullptr) {
+        char* class_name_tmp;
+        ScopedLocalRef<jclass> obj_klass(env, env->GetObjectClass(obj.get()));
+        CHECK_JVMTI(jvmti->GetClassSignature(obj_klass.get(), &class_name_tmp, nullptr));
+        CHECK_JVMTI(jvmti->GetObjectSize(obj.get(), &size));
+        class_name = class_name_tmp;
+        CHECK_JVMTI(jvmti->Deallocate(reinterpret_cast<unsigned char*>(class_name_tmp)));
+      }
+      total_size += static_cast<size_t>(size);
+      class_sizes[class_name] += static_cast<size_t>(size);
+    }
+
+    char* field_name;
+    char* field_sig;
+    char* field_class_name;
+    CHECK_JVMTI(jvmti->GetFieldName(klass, field, &field_name, &field_sig, nullptr));
+    CHECK_JVMTI(jvmti->GetClassSignature(klass, &field_class_name, nullptr));
+    LOG(INFO) << "\t" << field_class_name << "." << field_name << ":" << field_sig
+              << "\t" << "<ALL_TYPES>"
+              << "\t" << obj_len
+              << "\t" << total_size;
+    for (auto sz : class_sizes) {
+      size_t count = class_counts[sz.first];
+      LOG(INFO) << "\t" << field_class_name << "." << field_name << ":" << field_sig
+                << "\t" << sz.first
+                << "\t" << count
+                << "\t" << sz.second;
+    }
+    CHECK_JVMTI(jvmti->Deallocate(reinterpret_cast<unsigned char*>(field_name)));
+    CHECK_JVMTI(jvmti->Deallocate(reinterpret_cast<unsigned char*>(field_sig)));
+    CHECK_JVMTI(jvmti->Deallocate(reinterpret_cast<unsigned char*>(field_class_name)));
+  }
+}
+
+static void VMDeathCb(jvmtiEnv* jvmti, JNIEnv* env ATTRIBUTE_UNUSED) {
+  DataDumpRequestCb(jvmti);
+  RequestList* list = nullptr;
+  CHECK_JVMTI(jvmti->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&list)));
+  delete list;
+}
+
+static void CreateFieldList(jvmtiEnv* jvmti, JNIEnv* env, const std::string& args) {
+  RequestList* list = nullptr;
+  CHECK_JVMTI(jvmti->Allocate(sizeof(*list), reinterpret_cast<unsigned char**>(&list)));
+  new (list) RequestList{
+    .fields_ = GetRequestedFields(env, args),
+  };
+  CHECK_JVMTI(jvmti->SetEnvironmentLocalStorage(list));
+}
+
+static void VMInitCb(jvmtiEnv* jvmti, JNIEnv* env, jobject thr ATTRIBUTE_UNUSED) {
+  char* args = nullptr;
+  CHECK_JVMTI(jvmti->GetEnvironmentLocalStorage(reinterpret_cast<void**>(&args)));
+  CHECK_JVMTI(jvmti->SetEnvironmentLocalStorage(nullptr));
+  CreateFieldList(jvmti, env, args);
+  CHECK_JVMTI(jvmti->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_VM_DEATH, nullptr));
+  CHECK_JVMTI(
+      jvmti->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_DATA_DUMP_REQUEST, nullptr));
+  CHECK_JVMTI(jvmti->Deallocate(reinterpret_cast<unsigned char*>(args)));
+}
+
+static jint AgentStart(JavaVM* vm, char* options, bool is_onload) {
+  android::base::InitLogging(/* argv= */ nullptr);
+  java_vm = vm;
+  jvmtiEnv* jvmti = nullptr;
+  if (SetupJvmtiEnv(vm, &jvmti) != JNI_OK) {
+    LOG(ERROR) << "Could not get JVMTI env or ArtTiEnv!";
+    return JNI_ERR;
+  }
+  jvmtiCapabilities caps{
+    .can_tag_objects = 1,
+  };
+  CHECK_JVMTI(jvmti->AddCapabilities(&caps));
+  jvmtiEventCallbacks cb{
+    .VMInit = VMInitCb,
+    .VMDeath = VMDeathCb,
+    .DataDumpRequest = DataDumpRequestCb,
+  };
+  CHECK_JVMTI(jvmti->SetEventCallbacks(&cb, sizeof(cb)));
+  if (is_onload) {
+    unsigned char* ptr = nullptr;
+    CHECK_JVMTI(jvmti->Allocate(strlen(options) + 1, &ptr));
+    strcpy(reinterpret_cast<char*>(ptr), options);
+    CHECK_JVMTI(jvmti->SetEnvironmentLocalStorage(ptr));
+    CHECK_JVMTI(jvmti->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_VM_INIT, nullptr));
+  } else {
+    JNIEnv* env = nullptr;
+    CHECK_EQ(vm->GetEnv(reinterpret_cast<void**>(&env), JNI_VERSION_1_6), JNI_OK);
+    CreateFieldList(jvmti, env, options);
+    CHECK_JVMTI(jvmti->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_VM_DEATH, nullptr));
+    CHECK_JVMTI(
+        jvmti->SetEventNotificationMode(JVMTI_ENABLE, JVMTI_EVENT_DATA_DUMP_REQUEST, nullptr));
+  }
+  return JNI_OK;
+}
+
+// Late attachment (e.g. 'am attach-agent').
+extern "C" JNIEXPORT jint JNICALL Agent_OnAttach(JavaVM* vm,
+                                                 char* options,
+                                                 void* reserved ATTRIBUTE_UNUSED) {
+  return AgentStart(vm, options, /*is_onload=*/false);
+}
+
+// Early attachment
+extern "C" JNIEXPORT jint JNICALL Agent_OnLoad(JavaVM* jvm,
+                                               char* options,
+                                               void* reserved ATTRIBUTE_UNUSED) {
+  return AgentStart(jvm, options, /*is_onload=*/true);
+}
+
+}  // namespace fieldnull
diff --git a/tools/jvmti-agents/field-null-percent/Android.bp b/tools/jvmti-agents/field-null-percent/Android.bp
index 26bb1dc..4950b7f 100644
--- a/tools/jvmti-agents/field-null-percent/Android.bp
+++ b/tools/jvmti-agents/field-null-percent/Android.bp
@@ -16,30 +16,45 @@
 
 // Build variants {target,host} x {debug,ndebug} x {32,64}
 cc_defaults {
-    name: "fieldnull-defaults",
-    host_supported: true,
+    name: "fieldnull-base-defaults",
     srcs: ["fieldnull.cc"],
     defaults: ["art_defaults"],
 
     // Note that this tool needs to be built for both 32-bit and 64-bit since it requires
     // to be same ISA as what it is attached to.
     compile_multilib: "both",
-
-    shared_libs: [
-        "libbase",
-    ],
     header_libs: [
         "libopenjdkjvmti_headers",
     ],
-    multilib: {
-        lib32: {
-            suffix: "32",
-        },
-        lib64: {
-            suffix: "64",
-        },
-    },
-    symlink_preferred_arch: true,
+}
+
+cc_defaults {
+    name: "fieldnull-defaults",
+    host_supported: true,
+    shared_libs: [
+        "libbase",
+    ],
+    defaults: ["fieldnull-base-defaults"],
+}
+
+cc_defaults {
+    name: "fieldnull-static-defaults",
+    host_supported: false,
+    defaults: ["fieldnull-base-defaults"],
+
+    shared_libs: [
+        "liblog",
+    ],
+    static_libs: [
+        "libbase_ndk",
+    ],
+    sdk_version: "current",
+    stl: "c++_static",
+}
+
+cc_library {
+    name: "libfieldnulls",
+    defaults: ["fieldnull-static-defaults"],
 }
 
 art_cc_library {
diff --git a/tools/jvmti-agents/field-null-percent/README.md b/tools/jvmti-agents/field-null-percent/README.md
index d8bc65d..f4d38c2 100644
--- a/tools/jvmti-agents/field-null-percent/README.md
+++ b/tools/jvmti-agents/field-null-percent/README.md
@@ -7,7 +7,7 @@
 
 # Usage
 ### Build
->    `make libfieldnull`
+>    `m libfieldnull libfieldnulls`
 
 The libraries will be built for 32-bit, 64-bit, host and target. Below examples
 assume you want to use the 64-bit version.
@@ -26,9 +26,9 @@
 
 >    `adb shell setenforce 0`
 >
->    `adb push $ANDROID_PRODUCT_OUT/system/lib64/libfieldnull.so /data/local/tmp/`
+>    `adb push $ANDROID_PRODUCT_OUT/system/lib64/libfieldnulls.so /data/local/tmp/`
 >
->    `adb shell am start-activity --attach-agent '/data/local/tmp/libfieldnull.so=Ljava/lang/Class;.name:Ljava/lang/String;' some.debuggable.apps/.the.app.MainActivity`
+>    `adb shell am start-activity --attach-agent '/data/local/tmp/libfieldnulls.so=Ljava/lang/Class;.name:Ljava/lang/String;' some.debuggable.apps/.the.app.MainActivity`
 
 #### RI
 >    `java '-agentpath:libfieldnull.so=Lname/of/class;.nameOfField:Ltype/of/field;' -cp tmp/helloworld/classes helloworld`
diff --git a/tools/jvmti-agents/field-null-percent/fieldnull.cc b/tools/jvmti-agents/field-null-percent/fieldnull.cc
index 8f5b389..016164f 100644
--- a/tools/jvmti-agents/field-null-percent/fieldnull.cc
+++ b/tools/jvmti-agents/field-null-percent/fieldnull.cc
@@ -178,8 +178,8 @@
   CHECK_JVMTI(jvmti->AddCapabilities(&caps));
   jvmtiEventCallbacks cb {
     .VMInit = VMInitCb,
-    .DataDumpRequest = DataDumpRequestCb,
     .VMDeath = VMDeathCb,
+    .DataDumpRequest = DataDumpRequestCb,
   };
   CHECK_JVMTI(jvmti->SetEventCallbacks(&cb, sizeof(cb)));
   if (is_onload) {
diff --git a/tools/jvmti-agents/jit-load/Android.bp b/tools/jvmti-agents/jit-load/Android.bp
index a57a408..5adf98c 100644
--- a/tools/jvmti-agents/jit-load/Android.bp
+++ b/tools/jvmti-agents/jit-load/Android.bp
@@ -40,15 +40,6 @@
     header_libs: [
         "libopenjdkjvmti_headers",
     ],
-    multilib: {
-        lib32: {
-            suffix: "32",
-        },
-        lib64: {
-            suffix: "64",
-        },
-    },
-    symlink_preferred_arch: true,
 }
 
 art_cc_library {
diff --git a/tools/jvmti-agents/jit-load/README.md b/tools/jvmti-agents/jit-load/README.md
index 8aa4513..6efc33a 100644
--- a/tools/jvmti-agents/jit-load/README.md
+++ b/tools/jvmti-agents/jit-load/README.md
@@ -5,7 +5,7 @@
 
 # Usage
 ### Build
->    `make libjitload`  # or 'make libjitloadd' with debugging checks enabled
+>    `m libjitload`  # or 'm libjitloadd' with debugging checks enabled
 
 The libraries will be built for 32-bit, 64-bit, host and target. Below examples assume you want to use the 64-bit version.
 ### Command Line
diff --git a/tools/jvmti-agents/jit-load/jitload.cc b/tools/jvmti-agents/jit-load/jitload.cc
index 7e715de..6ef7b67 100644
--- a/tools/jvmti-agents/jit-load/jitload.cc
+++ b/tools/jvmti-agents/jit-load/jitload.cc
@@ -99,9 +99,9 @@
   }
   jvmtiEventCallbacks cb {
         .VMInit = VmInitCb,
+        .VMDeath = VMDeathCb,
         .ClassPrepare = ClassPrepareJit,
         .DataDumpRequest = DataDumpRequestCb,
-        .VMDeath = VMDeathCb,
   };
   AgentOptions* ops;
   CHECK_CALL_SUCCESS(
diff --git a/tools/jvmti-agents/list-extensions/Android.bp b/tools/jvmti-agents/list-extensions/Android.bp
new file mode 100644
index 0000000..09ba5aa
--- /dev/null
+++ b/tools/jvmti-agents/list-extensions/Android.bp
@@ -0,0 +1,47 @@
+//
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Build variants {target,host} x {debug,ndebug} x {32,64}
+cc_defaults {
+    name: "listextensions-defaults",
+    host_supported: true,
+    srcs: ["list-extensions.cc"],
+    defaults: ["art_defaults"],
+
+    // Note that this tool needs to be built for both 32-bit and 64-bit since it requires
+    // to be same ISA as what it is attached to.
+    compile_multilib: "both",
+
+    shared_libs: [
+        "libbase",
+    ],
+    header_libs: [
+        "libopenjdkjvmti_headers",
+    ],
+}
+
+art_cc_library {
+    name: "liblistextensions",
+    defaults: ["listextensions-defaults"],
+}
+
+art_cc_library {
+    name: "liblistextensionsd",
+    defaults: [
+        "art_debug_defaults",
+        "listextensions-defaults",
+    ],
+}
diff --git a/tools/jvmti-agents/list-extensions/README.md b/tools/jvmti-agents/list-extensions/README.md
new file mode 100644
index 0000000..976a8c3
--- /dev/null
+++ b/tools/jvmti-agents/list-extensions/README.md
@@ -0,0 +1,56 @@
+# listextensions
+
+listextensions is a jvmti agent that will print the details of all available jvmti extension
+functions and events.
+
+# Usage
+### Build
+>    `m liblistextensions`
+
+The libraries will be built for 32-bit, 64-bit, host and target. Below examples
+assume you want to use the 64-bit version.
+
+#### ART
+>    `art -Xplugin:$ANDROID_HOST_OUT/lib64/libopenjdkjvmti.so '-agentpath:liblistextensions.so' -cp tmp/java/helloworld.dex -Xint helloworld`
+
+This will print something similar to:
+```
+dalvikvm64 I 07-30 10:47:37 154719 154719 list-extensions.cc:104] Found 13 extension functions
+dalvikvm64 I 07-30 10:47:37 154719 154719 list-extensions.cc:107] com.android.art.heap.get_object_heap_id
+dalvikvm64 I 07-30 10:47:37 154719 154719 list-extensions.cc:108]       desc: Retrieve the heap id of the object tagged with the given argument. An arbitrary object is chosen if multiple objects exist with the same tag.
+dalvikvm64 I 07-30 10:47:37 154719 154719 list-extensions.cc:109]       arguments: (count: 2)
+dalvikvm64 I 07-30 10:47:37 154719 154719 list-extensions.cc:112]               tag (IN, JLONG)
+dalvikvm64 I 07-30 10:47:37 154719 154719 list-extensions.cc:112]               heap_id (OUT, JINT)
+dalvikvm64 I 07-30 10:47:37 154719 154719 list-extensions.cc:114]       Errors: (count: 1)
+dalvikvm64 I 07-30 10:47:37 154719 154719 list-extensions.cc:118]               JVMTI_ERROR_NOT_FOUND
+dalvikvm64 I 07-30 10:47:37 154719 154719 list-extensions.cc:107] com.android.art.heap.get_heap_name
+dalvikvm64 I 07-30 10:47:37 154719 154719 list-extensions.cc:108]       desc: Retrieve the name of the heap with the given id.
+dalvikvm64 I 07-30 10:47:37 154719 154719 list-extensions.cc:109]       arguments: (count: 2)
+dalvikvm64 I 07-30 10:47:37 154719 154719 list-extensions.cc:112]               heap_id (IN, JINT)
+dalvikvm64 I 07-30 10:47:37 154719 154719 list-extensions.cc:112]               heap_name (ALLOC_BUF, CCHAR)
+dalvikvm64 I 07-30 10:47:37 154719 154719 list-extensions.cc:114]       Errors: (count: 1)
+dalvikvm64 I 07-30 10:47:37 154719 154719 list-extensions.cc:118]               JVMTI_ERROR_ILLEGAL_ARGUMENT
+...
+dalvikvm64 I 07-30 10:47:37 154719 154719 list-extensions.cc:130] Found 2 extension events
+dalvikvm64 I 07-30 10:47:37 154719 154719 list-extensions.cc:133] com.android.art.internal.ddm.publish_chunk
+dalvikvm64 I 07-30 10:47:37 154719 154719 list-extensions.cc:134]       index: 86
+dalvikvm64 I 07-30 10:47:37 154719 154719 list-extensions.cc:135]       desc: Called when there is new ddms information that the agent or other clients can use. The agent is given the 'type' of the ddms chunk and a 'data_size' byte-buffer in 'data'. The 'data' pointer is only valid for the duration of the publish_chunk event. The agent is responsible for interpreting the information present in the 'data' buffer. This is provided for backwards-compatibility support only. Agents should prefer to use relevant JVMTI events and functions above listening for this event.
+dalvikvm64 I 07-30 10:47:37 154719 154719 list-extensions.cc:136]       event arguments: (count: 4)
+dalvikvm64 I 07-30 10:47:37 154719 154719 list-extensions.cc:139]               jni_env (IN_PTR, JNIENV)
+dalvikvm64 I 07-30 10:47:37 154719 154719 list-extensions.cc:139]               type (IN, JINT)
+dalvikvm64 I 07-30 10:47:37 154719 154719 list-extensions.cc:139]               data_size (IN, JINT)
+dalvikvm64 I 07-30 10:47:37 154719 154719 list-extensions.cc:139]               data (IN_BUF, JBYTE)
+...
+```
+
+* `-Xplugin` and `-agentpath` need to be used, otherwise the agent will fail during init.
+* If using `libartd.so`, make sure to use the debug version of jvmti.
+
+>    `adb shell setenforce 0`
+>
+>    `adb push $ANDROID_PRODUCT_OUT/system/lib64/liblistextensions.so /data/local/tmp/`
+>
+>    `adb shell am start-activity --attach-agent /data/local/tmp/liblistextensions.so some.debuggable.apps/.the.app.MainActivity`
+
+#### RI
+>    `java -agentpath:liblistextensions.so -cp tmp/helloworld/classes helloworld`
diff --git a/tools/jvmti-agents/list-extensions/list-extensions.cc b/tools/jvmti-agents/list-extensions/list-extensions.cc
new file mode 100644
index 0000000..6d8237a
--- /dev/null
+++ b/tools/jvmti-agents/list-extensions/list-extensions.cc
@@ -0,0 +1,170 @@
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <android-base/logging.h>
+
+#include <jni.h>
+#include <jvmti.h>
+#include <string>
+
+namespace listextensions {
+
+namespace {
+
+// Special art ti-version number. We will use this as a fallback if we cannot get a regular JVMTI
+// env.
+constexpr jint kArtTiVersion = JVMTI_VERSION_1_2 | 0x40000000;
+
+template <typename T> void Dealloc(jvmtiEnv* env, T* t) {
+  env->Deallocate(reinterpret_cast<unsigned char*>(t));
+}
+
+template <typename T, typename... Rest> void Dealloc(jvmtiEnv* env, T* t, Rest... rs) {
+  Dealloc(env, t);
+  Dealloc(env, rs...);
+}
+
+void DeallocParams(jvmtiEnv* env, jvmtiParamInfo* params, jint n_params) {
+  for (jint i = 0; i < n_params; i++) {
+    Dealloc(env, params[i].name);
+  }
+}
+
+std::ostream& operator<<(std::ostream& os, const jvmtiParamInfo& param) {
+  os << param.name << " (";
+#define CASE(type, name)       \
+  case JVMTI_##type##_##name:  \
+    os << #name;               \
+    break
+  switch (param.kind) {
+    CASE(KIND, IN);
+    CASE(KIND, IN_PTR);
+    CASE(KIND, IN_BUF);
+    CASE(KIND, ALLOC_BUF);
+    CASE(KIND, ALLOC_ALLOC_BUF);
+    CASE(KIND, OUT);
+    CASE(KIND, OUT_BUF);
+  }
+  os << ", ";
+  switch (param.base_type) {
+    CASE(TYPE, JBYTE);
+    CASE(TYPE, JCHAR);
+    CASE(TYPE, JSHORT);
+    CASE(TYPE, JINT);
+    CASE(TYPE, JLONG);
+    CASE(TYPE, JFLOAT);
+    CASE(TYPE, JDOUBLE);
+    CASE(TYPE, JBOOLEAN);
+    CASE(TYPE, JOBJECT);
+    CASE(TYPE, JTHREAD);
+    CASE(TYPE, JCLASS);
+    CASE(TYPE, JVALUE);
+    CASE(TYPE, JFIELDID);
+    CASE(TYPE, JMETHODID);
+    CASE(TYPE, CCHAR);
+    CASE(TYPE, CVOID);
+    CASE(TYPE, JNIENV);
+  }
+#undef CASE
+  os << ")";
+  return os;
+}
+
+jint SetupJvmtiEnv(JavaVM* vm) {
+  jint res = 0;
+  jvmtiEnv* env = nullptr;
+  res = vm->GetEnv(reinterpret_cast<void**>(&env), JVMTI_VERSION_1_1);
+
+  if (res != JNI_OK || env == nullptr) {
+    LOG(ERROR) << "Unable to access JVMTI, error code " << res;
+    res = vm->GetEnv(reinterpret_cast<void**>(&env), kArtTiVersion);
+    if (res != JNI_OK) {
+      return res;
+    }
+  }
+
+  // Get the extensions.
+  jint n_ext = 0;
+  jvmtiExtensionFunctionInfo* infos = nullptr;
+  if (env->GetExtensionFunctions(&n_ext, &infos) != JVMTI_ERROR_NONE) {
+    return JNI_ERR;
+  }
+  LOG(INFO) << "Found " << n_ext << " extension functions";
+  for (jint i = 0; i < n_ext; i++) {
+    const jvmtiExtensionFunctionInfo& info = infos[i];
+    LOG(INFO) << info.id;
+    LOG(INFO) << "\tdesc: " << info.short_description;
+    LOG(INFO) << "\targuments: (count: " << info.param_count << ")";
+    for (jint j = 0; j < info.param_count; j++) {
+      const jvmtiParamInfo& param = info.params[j];
+      LOG(INFO) << "\t\t" << param;
+    }
+    LOG(INFO) << "\tErrors: (count: " << info.error_count << ")";
+    for (jint j = 0; j < info.error_count; j++) {
+      char* name;
+      CHECK_EQ(JVMTI_ERROR_NONE, env->GetErrorName(info.errors[j], &name));
+      LOG(INFO) << "\t\t" << name;
+      Dealloc(env, name);
+    }
+    DeallocParams(env, info.params, info.param_count);
+    Dealloc(env, info.short_description, info.id, info.errors, info.params);
+  }
+  // Cleanup the array.
+  Dealloc(env, infos);
+  jvmtiExtensionEventInfo* events = nullptr;
+  if (env->GetExtensionEvents(&n_ext, &events) != JVMTI_ERROR_NONE) {
+    return JNI_ERR;
+  }
+  LOG(INFO) << "Found " << n_ext << " extension events";
+  for (jint i = 0; i < n_ext; i++) {
+    const jvmtiExtensionEventInfo& info = events[i];
+    LOG(INFO) << info.id;
+    LOG(INFO) << "\tindex: " << info.extension_event_index;
+    LOG(INFO) << "\tdesc: " << info.short_description;
+    LOG(INFO) << "\tevent arguments: (count: " << info.param_count << ")";
+    for (jint j = 0; j < info.param_count; j++) {
+      const jvmtiParamInfo& param = info.params[j];
+      LOG(INFO) << "\t\t" << param;
+    }
+    DeallocParams(env, info.params, info.param_count);
+    Dealloc(env, info.short_description, info.id, info.params);
+  }
+  // Cleanup the array.
+  Dealloc(env, events);
+  env->DisposeEnvironment();
+  return JNI_OK;
+}
+
+jint AgentStart(JavaVM* vm, char* options ATTRIBUTE_UNUSED, void* reserved ATTRIBUTE_UNUSED) {
+  if (SetupJvmtiEnv(vm) != JNI_OK) {
+    LOG(ERROR) << "Could not get JVMTI env or ArtTiEnv!";
+    return JNI_ERR;
+  }
+  return JNI_OK;
+}
+
+}  // namespace
+
+// Late attachment (e.g. 'am attach-agent').
+extern "C" JNIEXPORT jint JNICALL Agent_OnAttach(JavaVM* vm, char* options, void* reserved) {
+  return AgentStart(vm, options, reserved);
+}
+
+// Early attachment
+extern "C" JNIEXPORT jint JNICALL Agent_OnLoad(JavaVM* jvm, char* options, void* reserved) {
+  return AgentStart(jvm, options, reserved);
+}
+
+}  // namespace listextensions
diff --git a/tools/jvmti-agents/simple-force-redefine/Android.bp b/tools/jvmti-agents/simple-force-redefine/Android.bp
index 871f210..38eb9f7 100644
--- a/tools/jvmti-agents/simple-force-redefine/Android.bp
+++ b/tools/jvmti-agents/simple-force-redefine/Android.bp
@@ -26,47 +26,32 @@
     compile_multilib: "both",
 
     shared_libs: [
-      "libz",
-      "liblog",
+        "libz",
+        "liblog",
     ],
     header_libs: [
         "libopenjdkjvmti_headers",
-        // Annoyingly you aren't allowed to include even header-only non-ndk libs into an ndk build.
-        // Instead we put the directories this would bring in below in 'include_dirs'
-        // "libnativehelper_header_only",
-    ],
-    include_dirs: [
-        // NDK headers aren't available in platform NDK builds.
-        "libnativehelper/include_jni",
-        "libnativehelper/header_only_include",
+        "libnativehelper_header_only",
+        "jni_headers",
     ],
     sdk_version: "current",
     stl: "libc++_static",
     target: {
-      android: {
-        static_libs: [
-          "slicer_ndk_no_rtti",
-          "libbase_ndk",
-        ],
-      },
-      host: {
-        static_libs: [
-          "slicer_no_rtti",
-        ],
-        shared_libs: [
-          "libbase",
-        ],
-      },
-    },
-    multilib: {
-        lib32: {
-            suffix: "32",
+        android: {
+            static_libs: [
+                "slicer_ndk_no_rtti",
+                "libbase_ndk",
+            ],
         },
-        lib64: {
-            suffix: "64",
+        host: {
+            static_libs: [
+                "slicer_no_rtti",
+            ],
+            shared_libs: [
+                "libbase",
+            ],
         },
     },
-    symlink_preferred_arch: true,
 }
 
 art_cc_library {
diff --git a/tools/jvmti-agents/simple-force-redefine/README.md b/tools/jvmti-agents/simple-force-redefine/README.md
index 362c704..254eaf6 100644
--- a/tools/jvmti-agents/simple-force-redefine/README.md
+++ b/tools/jvmti-agents/simple-force-redefine/README.md
@@ -7,7 +7,7 @@
 
 # Usage
 ### Build
->    `make libforceredefine`
+>    `m libforceredefine`
 
 The libraries will be built for 32-bit, 64-bit, host and target. Below examples
 assume you want to use the 64-bit version.
@@ -30,4 +30,4 @@
 >    `adb shell am attach-agent $(adb shell pidof some.deubggable.app) /data/local/tmp/libforceredefine.so=/data/local/tmp/classlist`
 
 One can also use fifos to send classes interactively to the process. (TODO: Have the agent
-continue reading from the fifo even after it gets an EOF.)
\ No newline at end of file
+continue reading from the fifo even after it gets an EOF.)
diff --git a/tools/jvmti-agents/simple-force-redefine/forceredefine.cc b/tools/jvmti-agents/simple-force-redefine/forceredefine.cc
index 91702c2..055fb8a 100644
--- a/tools/jvmti-agents/simple-force-redefine/forceredefine.cc
+++ b/tools/jvmti-agents/simple-force-redefine/forceredefine.cc
@@ -287,8 +287,8 @@
     return JNI_ERR;
   }
   jvmtiEventCallbacks cb{
-    .ClassFileLoadHook = CbClassFileLoadHook,
     .VMInit = CbVmInit,
+    .ClassFileLoadHook = CbClassFileLoadHook,
   };
   jvmti->SetEventCallbacks(&cb, sizeof(cb));
   jvmti->SetEnvironmentLocalStorage(reinterpret_cast<void*>(ai));
diff --git a/tools/jvmti-agents/ti-alloc-sample/Android.bp b/tools/jvmti-agents/ti-alloc-sample/Android.bp
new file mode 100644
index 0000000..0dc2dd8
--- /dev/null
+++ b/tools/jvmti-agents/ti-alloc-sample/Android.bp
@@ -0,0 +1,73 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// Build variants {target,host} x {debug,ndebug} x {32,64}
+cc_defaults {
+    name: "ti-alloc-sample-base-defaults",
+    srcs: ["ti_alloc_sample.cc"],
+    defaults: ["art_defaults"],
+
+    // Note that this tool needs to be built for both 32-bit and 64-bit since it requires
+    // to be same ISA as what it is attached to.
+    compile_multilib: "both",
+    header_libs: [
+        "libopenjdkjvmti_headers",
+        "libnativehelper_header_only",
+        "jni_headers",
+    ],
+}
+
+cc_defaults {
+    name: "ti-alloc-sample-defaults",
+    host_supported: true,
+    shared_libs: [
+        "libbase",
+    ],
+    defaults: ["ti-alloc-sample-base-defaults"],
+}
+
+cc_defaults {
+    name: "ti-alloc-sample-static-defaults",
+    host_supported: false,
+    defaults: ["ti-alloc-sample-base-defaults"],
+
+    shared_libs: [
+        "liblog",
+    ],
+    static_libs: [
+        "libbase_ndk",
+    ],
+    sdk_version: "current",
+    stl: "c++_static",
+}
+
+art_cc_library {
+    name: "libtiallocsamples",
+    defaults: ["ti-alloc-sample-static-defaults"],
+}
+
+art_cc_library {
+    name: "libtiallocsample",
+    defaults: ["ti-alloc-sample-defaults"],
+}
+
+art_cc_library {
+    name: "libtiallocsampled",
+    defaults: [
+        "art_debug_defaults",
+        "ti-alloc-sample-defaults",
+    ],
+}
diff --git a/tools/jvmti-agents/ti-alloc-sample/README.md b/tools/jvmti-agents/ti-alloc-sample/README.md
new file mode 100644
index 0000000..0da090a
--- /dev/null
+++ b/tools/jvmti-agents/ti-alloc-sample/README.md
@@ -0,0 +1,79 @@
+# tiallocsample
+
+tiallocsample is a JVMTI agent designed to track the call stacks of allocations
+in the heap.
+
+# Usage
+### Build
+>    `m libtiallocsample`
+
+The libraries will be built for 32-bit, 64-bit, host and target. Below examples
+assume you want to use the 64-bit version.
+
+Use `libtiallocsamples` if you wish to build a version without non-NDK dynamic dependencies.
+
+### Command Line
+
+The agent is loaded using -agentpath like normal. It takes arguments in the
+following format:
+>     `sample_rate,stack_depth_limit,log_path`
+
+* sample_rate is an integer specifying how frequently an event is reported.
+  E.g., 10 means every tenth call to new will be logged.
+* stack_depth_limit is an integer that determines the number of frames the deepest stack trace
+  can contain.  It returns just the top portion if the limit is exceeded.
+* log_path is an absolute file path specifying where the log is to be written.
+
+#### Output Format
+
+The resulting file is a sequence of object allocations, with a limited form of
+text compression.  For example a single stack frame might look like:
+
+```
++0,jthread[main], jclass[[I file: <UNKNOWN_FILE>], size[24, hex: 0x18]
++1,main([Ljava/lang/String;)V
++2,run()V
++3,invoke(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;
++4,loop()V
++5,dispatchMessage(Landroid/os/Message;)V
++6,handleMessage(Landroid/os/Message;)V
++7,onDisplayChanged(I)V
++8,getState()I
++9,updateDisplayInfoLocked()V
++10,getDisplayInfo(I)Landroid/view/DisplayInfo;
++11,createFromParcel(Landroid/os/Parcel;)Ljava/lang/Object;
++12,createFromParcel(Landroid/os/Parcel;)Landroid/view/DisplayInfo;
++13,<init>(Landroid/os/Parcel;Landroid/view/DisplayInfo$1;)V
++14,<init>(Landroid/os/Parcel;)V
++15,readFromParcel(Landroid/os/Parcel;)V
+=16,0;1;2;3;1;4;5;6;7;8;9;10;10;11;12;13;14;15
+16
+```
+
+Lines starting with a + are key, value pairs.  So, for instance, key 2 stands for
+```
+run()V
+```
+.
+
+The line starting with 0 is the thread, type, and size (TTS) of an allocation.  The
+remaining lines starting with + are stack frames (SFs), containing function signatures.
+Lines starting with = are stack traces (STs), and are again key, value pairs.  In the
+example above, an ST called 16 is the TTS plus sequence of SFs.  Any line not starting
+with + or = is a sample.  It is a reference to an ST.  Hence repeated samples are
+represented as just numbers.
+
+#### ART
+>    `art -Xplugin:$ANDROID_HOST_OUT/lib64/libopenjdkjvmti.so '-agentpath:libtiallocsample.so=100' -cp tmp/java/helloworld.dex -Xint helloworld`
+
+* `-Xplugin` and `-agentpath` need to be used, otherwise the agent will fail during init.
+* If using `libartd.so`, make sure to use the debug version of jvmti.
+
+>    `adb shell setenforce 0`
+>
+>    `adb push $ANDROID_PRODUCT_OUT/system/lib64/libtiallocsample.so /data/local/tmp/`
+>
+>    `adb shell am start-activity --attach-agent /data/local/tmp/libtiallocsample.so=100 some.debuggable.apps/.the.app.MainActivity`
+
+#### RI
+>    `java '-agentpath:libtiallocsample.so=MethodEntry' -cp tmp/helloworld/classes helloworld`
diff --git a/tools/jvmti-agents/ti-alloc-sample/mkflame.py b/tools/jvmti-agents/ti-alloc-sample/mkflame.py
new file mode 100755
index 0000000..f37aa4a
--- /dev/null
+++ b/tools/jvmti-agents/ti-alloc-sample/mkflame.py
@@ -0,0 +1,213 @@
+#!/usr/bin/python3
+#
+# Copyright 2019, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Usage: mkflame.py <jvmti_trace_file>
+"""
+
+import argparse
+import sys
+
+class TraceCollection:
+  def __init__(self, args):
+    self.args = args
+    # A table indexed by number and containing the definition for that number.
+    self.definitions = {}
+    # The "weight" of a stack trace, either 1 for counting or the size of the allocation.
+    self.weights = {}
+    # The count for each individual allocation.
+    self.allocation_count = {}
+
+  def definition(self, index):
+    """
+    Returns the definition for "index".
+    """
+    return self.definitions[index]
+
+  def set_definition(self, index, definition):
+    """
+    Sets the definition for "index".
+    """
+    self.definitions[index] = definition
+
+  def weight(self, index):
+    """
+    Returns the weight for "index".
+    """
+    return self.weights[index]
+
+  def set_weight(self, index, weight):
+    """
+    Sets the weight for "index".
+    """
+    self.weights[index] = weight
+
+  def read_file(self, filename):
+    """
+    Reads a file into a DefinitionTable.
+    """
+    def process_definition(line):
+      """
+      Adds line to the list of definitions in table.
+      """
+      def expand_stack_trace(definition):
+        """
+        Converts a semicolon-separated list of numbers into the text stack trace.
+        """
+        def get_allocation_thread(thread_type_size):
+          """
+          Returns the thread of an allocation from the thread/type/size record.
+          """
+          THREAD_STRING = "thread["
+          THREAD_STRING_LEN = len(THREAD_STRING)
+          thread_string = thread_type_size[thread_type_size.find(THREAD_STRING) +
+                                           THREAD_STRING_LEN:]
+          return thread_string[:thread_string.find("]")]
+
+        def get_allocation_type(thread_type_size):
+          """
+          Returns the type of an allocation from the thread/type/size record.
+          """
+          TYPE_STRING = "jclass["
+          TYPE_STRING_LEN = len(TYPE_STRING)
+          type_string = thread_type_size[thread_type_size.find(TYPE_STRING) + TYPE_STRING_LEN:]
+          return type_string[:type_string.find(" ")]
+
+        def get_allocation_size(thread_type_size):
+          """
+          Returns the size of an allocation from the thread/type/size record.
+          """
+          SIZE_STRING = "size["
+          SIZE_STRING_LEN = len(SIZE_STRING)
+          size_string = thread_type_size[thread_type_size.find(SIZE_STRING) + SIZE_STRING_LEN:]
+          size_string = size_string[:size_string.find(",")]
+          return int(size_string)
+
+        def get_top_and_weight(index):
+          thread_type_size = self.definition(int(tokens[0]))
+          size = get_allocation_size(thread_type_size)
+          if self.args.type_only:
+            thread_type_size = get_allocation_type(thread_type_size)
+          elif self.args.thread_only:
+            thread_type_size = get_allocation_thread(thread_type_size)
+          return (thread_type_size, size)
+
+        tokens = definition.split(";")
+        # The first element (base) of the stack trace is the thread/type/size.
+        # Get the weight (either 1 or the number of bytes allocated).
+        (thread_type_size, weight) = get_top_and_weight(int(tokens[0]))
+        self.set_weight(index, weight)
+        # Remove the thread/type/size from the base of the stack trace.
+        del tokens[0]
+        # Build the stack trace list.
+        expanded_definition = ""
+        for i in range(len(tokens)):
+          if self.args.depth_limit > 0 and i >= self.args.depth_limit:
+            break
+          token = tokens[i]
+          # Replace semicolons by colons in the method entry signatures.
+          method = self.definition(int(token)).replace(";", ":")
+          if len(expanded_definition) > 0:
+            expanded_definition += ";"
+          expanded_definition += method
+        if not self.args.ignore_type:
+          # Add the thread/type/size as the top-most stack frame.
+          if len(expanded_definition) > 0:
+            expanded_definition += ";"
+          expanded_definition += thread_type_size.replace(";", ":")
+        if self.args.reverse_stack:
+          def_list = expanded_definition.split(";")
+          expanded_definition = ";".join(def_list[::-1])
+        return expanded_definition
+
+      # If the line contains a comma, it is of the form [+=]index,definition,
+      # where index is a string containing an integer, and definition is the
+      # value represented by the integer whenever it is used later.
+      # * Lines starting with + are either a thread/type/size record or a single
+      #   stack frame.  These are simply interned in the table.
+      # * Those starting with = are stack traces, and contain a sequence of
+      #   numbers separated by semicolon.  These are "expanded" and then interned.
+      comma_pos = line.find(",")
+      index = int(line[1:comma_pos])
+      definition = line[comma_pos+1:]
+      if line[0:1] == "=":
+        definition = expand_stack_trace(definition)
+      # Intern the definition in the table.
+      #if len(definition) == 0:
+        # Zero length samples are errors and are discarded.
+        #print("ERROR: definition for " + str(index) + " is empty")
+        #return
+      self.set_definition(index, definition)
+
+    def process_trace(index):
+      """
+      Remembers one stack trace in the list of stack traces we have seen.
+      Remembering a stack trace increments a count associated with the trace.
+      """
+      trace = self.definition(index)
+      if self.args.use_size:
+        weight = self.weight(index)
+      else:
+        weight = 1
+      if trace in self.allocation_count:
+        self.allocation_count[trace] = self.allocation_count[trace] + weight
+      else:
+        self.allocation_count[trace] = weight
+
+    # Read the file, processing each line as a definition or stack trace.
+    tracefile = open(filename, "r")
+    current_allocation_trace = ""
+    for line in tracefile:
+      line = line.rstrip("\n")
+      if line[0:1] == "=" or line[0:1] == "+":
+        # definition.
+        process_definition(line)
+      else:
+        # stack trace.
+        process_trace(int(line))
+
+  def dump_flame_graph(self):
+    """
+    Prints out a stack trace format compatible with flame graph creation utilities.
+    """
+    for definition, weight in self.allocation_count.items():
+      print(definition + " " + str(weight))
+
+def parse_options():
+  parser = argparse.ArgumentParser(description="Convert a trace to a form usable for flame graphs.")
+  parser.add_argument("filename", help="The trace file as input", type=str)
+  parser.add_argument("--use_size", help="Count by allocation size", action="store_true",
+                      default=False)
+  parser.add_argument("--ignore_type", help="Ignore type of allocation", action="store_true",
+                      default=False)
+  parser.add_argument("--reverse_stack", help="Reverse root and top of stacks", action="store_true",
+                      default=False)
+  parser.add_argument("--type_only", help="Only consider allocation type", action="store_true",
+                      default=False)
+  parser.add_argument("--thread_only", help="Only consider allocation thread", action="store_true",
+                      default=False)
+  parser.add_argument("--depth_limit", help="Limit the length of a trace", type=int, default=0)
+  args = parser.parse_args()
+  return args
+
+def main(argv):
+  args = parse_options()
+  trace_collection = TraceCollection(args)
+  trace_collection.read_file(args.filename)
+  trace_collection.dump_flame_graph()
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv))
diff --git a/tools/jvmti-agents/ti-alloc-sample/ti_alloc_sample.cc b/tools/jvmti-agents/ti-alloc-sample/ti_alloc_sample.cc
new file mode 100644
index 0000000..d719db5
--- /dev/null
+++ b/tools/jvmti-agents/ti-alloc-sample/ti_alloc_sample.cc
@@ -0,0 +1,461 @@
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#include <android-base/logging.h>
+
+#include <atomic>
+#include <fstream>
+#include <iostream>
+#include <istream>
+#include <iomanip>
+#include <jni.h>
+#include <jvmti.h>
+#include <limits>
+#include <map>
+#include <memory>
+#include <mutex>
+#include <string>
+#include <sstream>
+#include <vector>
+
+namespace tifast {
+
+namespace {
+
+// Special art ti-version number. We will use this as a fallback if we cannot get a regular JVMTI
+// env.
+static constexpr jint kArtTiVersion = JVMTI_VERSION_1_2 | 0x40000000;
+
+// jthread is a typedef of jobject so we use this to allow the templates to distinguish them.
+struct jthreadContainer { jthread thread; };
+// jlocation is a typedef of jlong so use this to distinguish the less common jlong.
+struct jlongContainer { jlong val; };
+
+static void DeleteLocalRef(JNIEnv* env, jobject obj) {
+  if (obj != nullptr && env != nullptr) {
+    env->DeleteLocalRef(obj);
+  }
+}
+
+class ScopedThreadInfo {
+ public:
+  ScopedThreadInfo(jvmtiEnv* jvmtienv, JNIEnv* env, jthread thread)
+      : jvmtienv_(jvmtienv), env_(env), free_name_(false) {
+    if (thread == nullptr) {
+      info_.name = const_cast<char*>("<NULLPTR>");
+    } else if (jvmtienv->GetThreadInfo(thread, &info_) != JVMTI_ERROR_NONE) {
+      info_.name = const_cast<char*>("<UNKNOWN THREAD>");
+    } else {
+      free_name_ = true;
+    }
+  }
+
+  ~ScopedThreadInfo() {
+    if (free_name_) {
+      jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(info_.name));
+    }
+    DeleteLocalRef(env_, info_.thread_group);
+    DeleteLocalRef(env_, info_.context_class_loader);
+  }
+
+  const char* GetName() const {
+    return info_.name;
+  }
+
+ private:
+  jvmtiEnv* jvmtienv_;
+  JNIEnv* env_;
+  bool free_name_;
+  jvmtiThreadInfo info_{};
+};
+
+class ScopedClassInfo {
+ public:
+  ScopedClassInfo(jvmtiEnv* jvmtienv, jclass c) : jvmtienv_(jvmtienv), class_(c) {}
+
+  ~ScopedClassInfo() {
+    if (class_ != nullptr) {
+      jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(name_));
+      jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
+      jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(file_));
+      jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(debug_ext_));
+    }
+  }
+
+  bool Init(bool get_generic = true) {
+    if (class_ == nullptr) {
+      name_ = const_cast<char*>("<NONE>");
+      generic_ = const_cast<char*>("<NONE>");
+      return true;
+    } else {
+      jvmtiError ret1 = jvmtienv_->GetSourceFileName(class_, &file_);
+      jvmtiError ret2 = jvmtienv_->GetSourceDebugExtension(class_, &debug_ext_);
+      char** gen_ptr = &generic_;
+      if (!get_generic) {
+        generic_ = nullptr;
+        gen_ptr = nullptr;
+      }
+      return jvmtienv_->GetClassSignature(class_, &name_, gen_ptr) == JVMTI_ERROR_NONE &&
+          ret1 != JVMTI_ERROR_MUST_POSSESS_CAPABILITY &&
+          ret1 != JVMTI_ERROR_INVALID_CLASS &&
+          ret2 != JVMTI_ERROR_MUST_POSSESS_CAPABILITY &&
+          ret2 != JVMTI_ERROR_INVALID_CLASS;
+    }
+  }
+
+  jclass GetClass() const {
+    return class_;
+  }
+
+  const char* GetName() const {
+    return name_;
+  }
+
+  const char* GetGeneric() const {
+    return generic_;
+  }
+
+  const char* GetSourceDebugExtension() const {
+    if (debug_ext_ == nullptr) {
+      return "<UNKNOWN_SOURCE_DEBUG_EXTENSION>";
+    } else {
+      return debug_ext_;
+    }
+  }
+  const char* GetSourceFileName() const {
+    if (file_ == nullptr) {
+      return "<UNKNOWN_FILE>";
+    } else {
+      return file_;
+    }
+  }
+
+ private:
+  jvmtiEnv* jvmtienv_;
+  jclass class_;
+  char* name_ = nullptr;
+  char* generic_ = nullptr;
+  char* file_ = nullptr;
+  char* debug_ext_ = nullptr;
+
+  friend std::ostream& operator<<(std::ostream &os, ScopedClassInfo const& m);
+};
+
+class ScopedMethodInfo {
+ public:
+  ScopedMethodInfo(jvmtiEnv* jvmtienv, JNIEnv* env, jmethodID m)
+      : jvmtienv_(jvmtienv), env_(env), method_(m) {}
+
+  ~ScopedMethodInfo() {
+    DeleteLocalRef(env_, declaring_class_);
+    jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(name_));
+    jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(signature_));
+    jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
+  }
+
+  bool Init(bool get_generic = true) {
+    if (jvmtienv_->GetMethodDeclaringClass(method_, &declaring_class_) != JVMTI_ERROR_NONE) {
+      return false;
+    }
+    class_info_.reset(new ScopedClassInfo(jvmtienv_, declaring_class_));
+    jint nlines;
+    jvmtiLineNumberEntry* lines;
+    jvmtiError err = jvmtienv_->GetLineNumberTable(method_, &nlines, &lines);
+    if (err == JVMTI_ERROR_NONE) {
+      if (nlines > 0) {
+        first_line_ = lines[0].line_number;
+      }
+      jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(lines));
+    } else if (err != JVMTI_ERROR_ABSENT_INFORMATION &&
+               err != JVMTI_ERROR_NATIVE_METHOD) {
+      return false;
+    }
+    return class_info_->Init(get_generic) &&
+        (jvmtienv_->GetMethodName(method_, &name_, &signature_, &generic_) == JVMTI_ERROR_NONE);
+  }
+
+  const ScopedClassInfo& GetDeclaringClassInfo() const {
+    return *class_info_;
+  }
+
+  jclass GetDeclaringClass() const {
+    return declaring_class_;
+  }
+
+  const char* GetName() const {
+    return name_;
+  }
+
+  const char* GetSignature() const {
+    return signature_;
+  }
+
+  const char* GetGeneric() const {
+    return generic_;
+  }
+
+  jint GetFirstLine() const {
+    return first_line_;
+  }
+
+ private:
+  jvmtiEnv* jvmtienv_;
+  JNIEnv* env_;
+  jmethodID method_;
+  jclass declaring_class_ = nullptr;
+  std::unique_ptr<ScopedClassInfo> class_info_;
+  char* name_ = nullptr;
+  char* signature_ = nullptr;
+  char* generic_ = nullptr;
+  jint first_line_ = -1;
+};
+
+std::ostream& operator<<(std::ostream &os, ScopedClassInfo const& c) {
+  const char* generic = c.GetGeneric();
+  if (generic != nullptr) {
+    return os << c.GetName() << "<" << generic << ">" << " file: " << c.GetSourceFileName();
+  } else {
+    return os << c.GetName() << " file: " << c.GetSourceFileName();
+  }
+}
+
+class LockedStream {
+ public:
+  explicit LockedStream(const std::string& filepath) {
+    stream_.open(filepath, std::ofstream::out);
+    if (!stream_.is_open()) {
+      LOG(ERROR) << "====== JVMTI FAILED TO OPEN LOG FILE";
+    }
+  }
+  ~LockedStream() {
+    stream_.close();
+  }
+  void Write(const std::string& str) {
+    stream_ << str;
+    stream_.flush();
+  }
+ private:
+  std::ofstream stream_;
+};
+
+static LockedStream* stream = nullptr;
+
+class UniqueStringTable {
+ public:
+  UniqueStringTable() = default;
+  ~UniqueStringTable() = default;
+  std::string Intern(const std::string& header, const std::string& key) {
+    if (map_.find(key) == map_.end()) {
+      map_[key] = next_index_;
+      // Emit definition line.  E.g., =123,string
+      stream->Write(header + std::to_string(next_index_) + "," + key + "\n");
+      ++next_index_;
+    }
+    return std::to_string(map_[key]);
+  }
+ private:
+  int32_t next_index_;
+  std::map<std::string, int32_t> map_;
+};
+
+static UniqueStringTable* string_table = nullptr;
+
+// Formatter for the thread, type, and size of an allocation.
+static std::string formatAllocation(jvmtiEnv* jvmti,
+                                    JNIEnv* jni,
+                                    jthreadContainer thr,
+                                    jclass klass,
+                                    jlongContainer size) {
+  ScopedThreadInfo sti(jvmti, jni, thr.thread);
+  std::ostringstream allocation;
+  allocation << "jthread[" << sti.GetName() << "]";
+  ScopedClassInfo sci(jvmti, klass);
+  if (sci.Init(/*get_generic=*/false)) {
+    allocation << ", jclass[" << sci << "]";
+  } else {
+    allocation << ", jclass[TYPE UNKNOWN]";
+  }
+  allocation << ", size[" << size.val << ", hex: 0x" << std::hex << size.val << "]";
+  return string_table->Intern("+", allocation.str());
+}
+
+// Formatter for a method entry on a call stack.
+static std::string formatMethod(jvmtiEnv* jvmti, JNIEnv* jni, jmethodID method_id) {
+  ScopedMethodInfo smi(jvmti, jni, method_id);
+  std::string method;
+  if (smi.Init(/*get_generic=*/false)) {
+    method = std::string(smi.GetDeclaringClassInfo().GetName()) +
+        "::" + smi.GetName() + smi.GetSignature();
+  } else {
+    method = "ERROR";
+  }
+  return string_table->Intern("+", method);
+}
+
+static int sampling_rate;
+static int stack_depth_limit;
+
+static void JNICALL logVMObjectAlloc(jvmtiEnv* jvmti,
+                                     JNIEnv* jni,
+                                     jthread thread,
+                                     jobject obj ATTRIBUTE_UNUSED,
+                                     jclass klass,
+                                     jlong size) {
+  // Sample only once out of sampling_rate tries, and prevent recursive allocation tracking,
+  static thread_local int sample_countdown = sampling_rate;
+  --sample_countdown;
+  if (sample_countdown != 0) {
+    return;
+  }
+
+  // Guard accesses to string table and emission.
+  static std::mutex mutex;
+  std::lock_guard<std::mutex> lg(mutex);
+
+  std::string record =
+      formatAllocation(jvmti,
+                       jni,
+                       jthreadContainer{.thread = thread},
+                       klass,
+                       jlongContainer{.val = size});
+
+  std::unique_ptr<jvmtiFrameInfo[]> stack_frames(new jvmtiFrameInfo[stack_depth_limit]);
+  jint stack_depth;
+  jvmtiError err = jvmti->GetStackTrace(thread,
+                                        0,
+                                        stack_depth_limit,
+                                        stack_frames.get(),
+                                        &stack_depth);
+  if (err == JVMTI_ERROR_NONE) {
+    // Emit stack frames in order from deepest in the stack to most recent.
+    // This simplifies post-collection processing.
+    for (int i = stack_depth - 1; i >= 0; --i) {
+      record += ";" + formatMethod(jvmti, jni, stack_frames[i].method);
+    }
+  }
+  stream->Write(string_table->Intern("=", record) + "\n");
+
+  sample_countdown = sampling_rate;
+}
+
+static jvmtiEventCallbacks kLogCallbacks {
+  .VMObjectAlloc = logVMObjectAlloc,
+};
+
+static jint SetupJvmtiEnv(JavaVM* vm, jvmtiEnv** jvmti) {
+  jint res = vm->GetEnv(reinterpret_cast<void**>(jvmti), JVMTI_VERSION_1_1);
+  if (res != JNI_OK || *jvmti == nullptr) {
+    LOG(ERROR) << "Unable to access JVMTI, error code " << res;
+    return vm->GetEnv(reinterpret_cast<void**>(jvmti), kArtTiVersion);
+  }
+  return res;
+}
+
+}  // namespace
+
+static jvmtiError SetupCapabilities(jvmtiEnv* jvmti) {
+  jvmtiCapabilities caps{};
+  caps.can_generate_vm_object_alloc_events = 1;
+  caps.can_get_line_numbers = 1;
+  caps.can_get_source_file_name = 1;
+  caps.can_get_source_debug_extension = 1;
+  return jvmti->AddCapabilities(&caps);
+}
+
+static bool ProcessOptions(std::string options) {
+  std::string output_file_path;
+  if (options.empty()) {
+    static constexpr int kDefaultSamplingRate = 10;
+    static constexpr int kDefaultStackDepthLimit = 50;
+    static constexpr const char* kDefaultOutputFilePath = "/data/local/tmp/logstream.txt";
+
+    sampling_rate = kDefaultSamplingRate;
+    stack_depth_limit = kDefaultStackDepthLimit;
+    output_file_path = kDefaultOutputFilePath;
+  } else {
+    // options string should contain "sampling_rate,stack_depth_limit,output_file_path".
+    size_t comma_pos = options.find(',');
+    if (comma_pos == std::string::npos) {
+      return false;
+    }
+    sampling_rate = std::stoi(options.substr(0, comma_pos));
+    options = options.substr(comma_pos + 1);
+    comma_pos = options.find(',');
+    if (comma_pos == std::string::npos) {
+      return false;
+    }
+    stack_depth_limit = std::stoi(options.substr(0, comma_pos));
+    output_file_path = options.substr(comma_pos + 1);
+  }
+  LOG(INFO) << "Starting allocation tracing: sampling_rate=" << sampling_rate
+            << ", stack_depth_limit=" << stack_depth_limit
+            << ", output_file_path=" << output_file_path;
+  stream = new LockedStream(output_file_path);
+
+  return true;
+}
+
+static jint AgentStart(JavaVM* vm,
+                       char* options,
+                       void* reserved ATTRIBUTE_UNUSED) {
+  // Handle the sampling rate, depth limit, and output path, if set.
+  if (!ProcessOptions(options)) {
+    return JNI_ERR;
+  }
+
+  // Create the environment.
+  jvmtiEnv* jvmti = nullptr;
+  if (SetupJvmtiEnv(vm, &jvmti) != JNI_OK) {
+    LOG(ERROR) << "Could not get JVMTI env or ArtTiEnv!";
+    return JNI_ERR;
+  }
+
+  jvmtiError error = SetupCapabilities(jvmti);
+  if (error != JVMTI_ERROR_NONE) {
+    LOG(ERROR) << "Unable to set caps";
+    return JNI_ERR;
+  }
+
+  // Add callbacks and notification.
+  error = jvmti->SetEventCallbacks(&kLogCallbacks, static_cast<jint>(sizeof(kLogCallbacks)));
+  if (error != JVMTI_ERROR_NONE) {
+    LOG(ERROR) << "Unable to set event callbacks.";
+    return JNI_ERR;
+  }
+  error = jvmti->SetEventNotificationMode(JVMTI_ENABLE,
+                                          JVMTI_EVENT_VM_OBJECT_ALLOC,
+                                          nullptr /* all threads */);
+  if (error != JVMTI_ERROR_NONE) {
+    LOG(ERROR) << "Unable to enable event " << JVMTI_EVENT_VM_OBJECT_ALLOC;
+    return JNI_ERR;
+  }
+
+  string_table = new UniqueStringTable();
+
+  return JNI_OK;
+}
+
+// Late attachment (e.g. 'am attach-agent').
+extern "C" JNIEXPORT jint JNICALL Agent_OnAttach(JavaVM *vm, char* options, void* reserved) {
+  return AgentStart(vm, options, reserved);
+}
+
+// Early attachment
+extern "C" JNIEXPORT jint JNICALL Agent_OnLoad(JavaVM* jvm, char* options, void* reserved) {
+  return AgentStart(jvm, options, reserved);
+}
+
+}  // namespace tifast
+
diff --git a/tools/jvmti-agents/ti-fast/Android.bp b/tools/jvmti-agents/ti-fast/Android.bp
index fd867c9..797654b 100644
--- a/tools/jvmti-agents/ti-fast/Android.bp
+++ b/tools/jvmti-agents/ti-fast/Android.bp
@@ -16,30 +16,47 @@
 
 // Build variants {target,host} x {debug,ndebug} x {32,64}
 cc_defaults {
-    name: "tifast-defaults",
-    host_supported: true,
+    name: "tifast-base-defaults",
     srcs: ["tifast.cc"],
     defaults: ["art_defaults"],
 
     // Note that this tool needs to be built for both 32-bit and 64-bit since it requires
     // to be same ISA as what it is attached to.
     compile_multilib: "both",
+    header_libs: [
+        "libopenjdkjvmti_headers",
+        "libnativehelper_header_only",
+        "jni_headers",
+    ],
+}
 
+cc_defaults {
+    name: "tifast-defaults",
+    host_supported: true,
     shared_libs: [
         "libbase",
     ],
-    header_libs: [
-        "libopenjdkjvmti_headers",
+    defaults: ["tifast-base-defaults"],
+}
+
+cc_defaults {
+    name: "tifast-static-defaults",
+    host_supported: false,
+    defaults: ["tifast-base-defaults"],
+
+    shared_libs: [
+        "liblog",
     ],
-    multilib: {
-        lib32: {
-            suffix: "32",
-        },
-        lib64: {
-            suffix: "64",
-        },
-    },
-    symlink_preferred_arch: true,
+    static_libs: [
+        "libbase_ndk",
+    ],
+    sdk_version: "current",
+    stl: "c++_static",
+}
+
+art_cc_library {
+    name: "libtifasts",
+    defaults: ["tifast-static-defaults"],
 }
 
 art_cc_library {
diff --git a/tools/jvmti-agents/ti-fast/README.md b/tools/jvmti-agents/ti-fast/README.md
index c8cf180..6194af9 100644
--- a/tools/jvmti-agents/ti-fast/README.md
+++ b/tools/jvmti-agents/ti-fast/README.md
@@ -6,11 +6,13 @@
 
 # Usage
 ### Build
->    `make libtifast`
+>    `m libtifast`
 
 The libraries will be built for 32-bit, 64-bit, host and target. Below examples
 assume you want to use the 64-bit version.
 
+Use `libtifasts` if you wish to build a version without non-NDK dynamic dependencies.
+
 ### Command Line
 
 The agent is loaded using -agentpath like normal. It takes arguments in the
diff --git a/tools/jvmti-agents/ti-fast/tifast.cc b/tools/jvmti-agents/ti-fast/tifast.cc
index 677823a..bb49aa1 100644
--- a/tools/jvmti-agents/ti-fast/tifast.cc
+++ b/tools/jvmti-agents/ti-fast/tifast.cc
@@ -70,40 +70,37 @@
   }
 }
 
-// Setup for all supported events. Give a macro with fun(name, event_num, args)
-#define FOR_ALL_SUPPORTED_JNI_EVENTS(fun) \
-    fun(SingleStep, EVENT(SINGLE_STEP), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jmethodID meth, jlocation loc), (jvmti, jni, jthreadContainer{.thread = thread}, meth, loc)) \
-    fun(MethodEntry, EVENT(METHOD_ENTRY), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jmethodID meth), (jvmti, jni, jthreadContainer{.thread = thread}, meth)) \
-    fun(MethodExit, EVENT(METHOD_EXIT), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jmethodID meth, jboolean jb, jvalue jv), (jvmti, jni, jthreadContainer{.thread = thread}, meth, jb, jv)) \
-    fun(NativeMethodBind, EVENT(NATIVE_METHOD_BIND), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jmethodID meth, void* v1, void** v2), (jvmti, jni, jthreadContainer{.thread = thread}, meth, v1, v2)) \
-    fun(Exception, EVENT(EXCEPTION), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jmethodID meth1, jlocation loc1, jobject obj, jmethodID meth2, jlocation loc2), (jvmti, jni, jthreadContainer{.thread = thread}, meth1, loc1, obj, meth2, loc2)) \
-    fun(ExceptionCatch, EVENT(EXCEPTION_CATCH), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jmethodID meth, jlocation loc, jobject obj), (jvmti, jni, jthreadContainer{.thread = thread}, meth, loc, obj)) \
-    fun(ThreadStart, EVENT(THREAD_START), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread), (jvmti, jni, jthreadContainer{.thread = thread})) \
-    fun(ThreadEnd, EVENT(THREAD_END), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread), (jvmti, jni, jthreadContainer{.thread = thread})) \
-    fun(ClassLoad, EVENT(CLASS_LOAD), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jclass klass), (jvmti, jni, jthreadContainer{.thread = thread}, klass) ) \
-    fun(ClassPrepare, EVENT(CLASS_PREPARE), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jclass klass), (jvmti, jni, jthreadContainer{.thread = thread}, klass)) \
-    fun(ClassFileLoadHook, EVENT(CLASS_FILE_LOAD_HOOK), (jvmtiEnv* jvmti, JNIEnv* jni, jclass klass, jobject obj1, const char* c1, jobject obj2, jint i1, const unsigned char* c2, jint* ip1, unsigned char** cp1), (jvmti, jni, klass, obj1, c1, obj2, i1, c2, ip1, cp1)) \
-    fun(MonitorContendedEnter, EVENT(MONITOR_CONTENDED_ENTER), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jobject obj), (jvmti, jni, jthreadContainer{.thread = thread}, obj)) \
-    fun(MonitorContendedEntered, EVENT(MONITOR_CONTENDED_ENTERED), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jobject obj), (jvmti, jni, jthreadContainer{.thread = thread}, obj)) \
-    fun(MonitorWait, EVENT(MONITOR_WAIT), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jobject obj, jlong l1), (jvmti, jni, jthreadContainer{.thread = thread}, obj, jlongContainer{.val = l1})) \
-    fun(MonitorWaited, EVENT(MONITOR_WAITED), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jobject obj, jboolean b1), (jvmti, jni, jthreadContainer{.thread = thread}, obj, b1)) \
-    fun(ResourceExhausted, EVENT(RESOURCE_EXHAUSTED), (jvmtiEnv* jvmti, JNIEnv* jni, jint i1, const void* cv, const char* cc), (jvmti, jni, i1, cv, cc)) \
-    fun(VMObjectAlloc, EVENT(VM_OBJECT_ALLOC), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jobject obj, jclass klass, jlong l1), (jvmti, jni, jthreadContainer{.thread = thread}, obj, klass, jlongContainer{.val = l1})) \
-    fun(VMInit, EVENT(VM_INIT), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread), (jvmti, jni, jthreadContainer{.thread = thread})) \
-    fun(VMStart, EVENT(VM_START), (jvmtiEnv* jvmti, JNIEnv* jni), (jvmti, jni)) \
-    fun(VMDeath, EVENT(VM_DEATH), (jvmtiEnv* jvmti, JNIEnv* jni), (jvmti, jni)) \
-
-#define FOR_ALL_SUPPORTED_NO_JNI_EVENTS(fun) \
-    fun(CompiledMethodLoad, EVENT(COMPILED_METHOD_LOAD), (jvmtiEnv* jvmti, jmethodID meth, jint i1, const void* cv1, jint i2, const jvmtiAddrLocationMap* alm, const void* cv2), (jvmti, meth, i1, cv1, i2, alm, cv2)) \
-    fun(CompiledMethodUnload, EVENT(COMPILED_METHOD_UNLOAD), (jvmtiEnv* jvmti, jmethodID meth, const void* cv1), (jvmti, meth, cv1)) \
-    fun(DynamicCodeGenerated, EVENT(DYNAMIC_CODE_GENERATED), (jvmtiEnv* jvmti, const char* cc, const void* cv, jint i1), (jvmti, cc, cv, i1)) \
-    fun(DataDumpRequest, EVENT(DATA_DUMP_REQUEST), (jvmtiEnv* jvmti), (jvmti)) \
-    fun(GarbageCollectionStart, EVENT(GARBAGE_COLLECTION_START), (jvmtiEnv* jvmti), (jvmti)) \
-    fun(GarbageCollectionFinish, EVENT(GARBAGE_COLLECTION_FINISH), (jvmtiEnv* jvmti), (jvmti))
+// Setup for all supported events. Give a macro with {non_}jni_fun(name, event_num, args)
+#define FOR_ALL_SUPPORTED_EVENTS_DIFFERENT(jni_fun, non_jni_fun)          \
+    jni_fun(VMInit, EVENT(VM_INIT), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread), (jvmti, jni, jthreadContainer{.thread = thread})) \
+    jni_fun(VMDeath, EVENT(VM_DEATH), (jvmtiEnv* jvmti, JNIEnv* jni), (jvmti, jni)) \
+    jni_fun(ThreadStart, EVENT(THREAD_START), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread), (jvmti, jni, jthreadContainer{.thread = thread})) \
+    jni_fun(ThreadEnd, EVENT(THREAD_END), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread), (jvmti, jni, jthreadContainer{.thread = thread})) \
+    jni_fun(ClassFileLoadHook, EVENT(CLASS_FILE_LOAD_HOOK), (jvmtiEnv* jvmti, JNIEnv* jni, jclass klass, jobject obj1, const char* c1, jobject obj2, jint i1, const unsigned char* c2, jint* ip1, unsigned char** cp1), (jvmti, jni, klass, obj1, c1, obj2, i1, c2, ip1, cp1)) \
+    jni_fun(ClassLoad, EVENT(CLASS_LOAD), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jclass klass), (jvmti, jni, jthreadContainer{.thread = thread}, klass) ) \
+    jni_fun(ClassPrepare, EVENT(CLASS_PREPARE), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jclass klass), (jvmti, jni, jthreadContainer{.thread = thread}, klass)) \
+    jni_fun(VMStart, EVENT(VM_START), (jvmtiEnv* jvmti, JNIEnv* jni), (jvmti, jni)) \
+    jni_fun(Exception, EVENT(EXCEPTION), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jmethodID meth1, jlocation loc1, jobject obj, jmethodID meth2, jlocation loc2), (jvmti, jni, jthreadContainer{.thread = thread}, meth1, loc1, obj, meth2, loc2)) \
+    jni_fun(ExceptionCatch, EVENT(EXCEPTION_CATCH), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jmethodID meth, jlocation loc, jobject obj), (jvmti, jni, jthreadContainer{.thread = thread}, meth, loc, obj)) \
+    jni_fun(SingleStep, EVENT(SINGLE_STEP), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jmethodID meth, jlocation loc), (jvmti, jni, jthreadContainer{.thread = thread}, meth, loc)) \
+    jni_fun(MethodEntry, EVENT(METHOD_ENTRY), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jmethodID meth), (jvmti, jni, jthreadContainer{.thread = thread}, meth)) \
+    jni_fun(MethodExit, EVENT(METHOD_EXIT), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jmethodID meth, jboolean jb, jvalue jv), (jvmti, jni, jthreadContainer{.thread = thread}, meth, jb, jv)) \
+    jni_fun(NativeMethodBind, EVENT(NATIVE_METHOD_BIND), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jmethodID meth, void* v1, void** v2), (jvmti, jni, jthreadContainer{.thread = thread}, meth, v1, v2)) \
+    non_jni_fun(CompiledMethodLoad, EVENT(COMPILED_METHOD_LOAD), (jvmtiEnv* jvmti, jmethodID meth, jint i1, const void* cv1, jint i2, const jvmtiAddrLocationMap* alm, const void* cv2), (jvmti, meth, i1, cv1, i2, alm, cv2)) \
+    non_jni_fun(CompiledMethodUnload, EVENT(COMPILED_METHOD_UNLOAD), (jvmtiEnv* jvmti, jmethodID meth, const void* cv1), (jvmti, meth, cv1)) \
+    non_jni_fun(DynamicCodeGenerated, EVENT(DYNAMIC_CODE_GENERATED), (jvmtiEnv* jvmti, const char* cc, const void* cv, jint i1), (jvmti, cc, cv, i1)) \
+    non_jni_fun(DataDumpRequest, EVENT(DATA_DUMP_REQUEST), (jvmtiEnv* jvmti), (jvmti)) \
+    jni_fun(MonitorWait, EVENT(MONITOR_WAIT), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jobject obj, jlong l1), (jvmti, jni, jthreadContainer{.thread = thread}, obj, jlongContainer{.val = l1})) \
+    jni_fun(MonitorWaited, EVENT(MONITOR_WAITED), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jobject obj, jboolean b1), (jvmti, jni, jthreadContainer{.thread = thread}, obj, b1)) \
+    jni_fun(MonitorContendedEnter, EVENT(MONITOR_CONTENDED_ENTER), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jobject obj), (jvmti, jni, jthreadContainer{.thread = thread}, obj)) \
+    jni_fun(MonitorContendedEntered, EVENT(MONITOR_CONTENDED_ENTERED), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jobject obj), (jvmti, jni, jthreadContainer{.thread = thread}, obj)) \
+    jni_fun(ResourceExhausted, EVENT(RESOURCE_EXHAUSTED), (jvmtiEnv* jvmti, JNIEnv* jni, jint i1, const void* cv, const char* cc), (jvmti, jni, i1, cv, cc)) \
+    non_jni_fun(GarbageCollectionStart, EVENT(GARBAGE_COLLECTION_START), (jvmtiEnv* jvmti), (jvmti)) \
+    non_jni_fun(GarbageCollectionFinish, EVENT(GARBAGE_COLLECTION_FINISH), (jvmtiEnv* jvmti), (jvmti)) \
+    jni_fun(VMObjectAlloc, EVENT(VM_OBJECT_ALLOC), (jvmtiEnv* jvmti, JNIEnv* jni, jthread thread, jobject obj, jclass klass, jlong l1), (jvmti, jni, jthreadContainer{.thread = thread}, obj, klass, jlongContainer{.val = l1})) \
 
 #define FOR_ALL_SUPPORTED_EVENTS(fun) \
-    FOR_ALL_SUPPORTED_JNI_EVENTS(fun) \
-    FOR_ALL_SUPPORTED_NO_JNI_EVENTS(fun)
+    FOR_ALL_SUPPORTED_EVENTS_DIFFERENT(fun, fun)
 
 static const jvmtiEvent kAllEvents[] = {
 #define GET_EVENT(a, event, b, c) event,
@@ -582,8 +579,7 @@
       LOG(INFO) << "Got event " << #name << "(" << printer.GetResult() << ")"; \
     } \
 
-FOR_ALL_SUPPORTED_JNI_EVENTS(GENERATE_LOG_FUNCTION_JNI)
-FOR_ALL_SUPPORTED_NO_JNI_EVENTS(GENERATE_LOG_FUNCTION_NO_JNI)
+FOR_ALL_SUPPORTED_EVENTS_DIFFERENT(GENERATE_LOG_FUNCTION_JNI, GENERATE_LOG_FUNCTION_NO_JNI)
 #undef GENERATE_LOG_FUNCTION
 
 static jvmtiEventCallbacks kLogCallbacks {
@@ -614,9 +610,8 @@
 #undef CHECK_NAME
 }
 
-#undef FOR_ALL_SUPPORTED_JNI_EVENTS
-#undef FOR_ALL_SUPPORTED_NO_JNI_EVENTS
 #undef FOR_ALL_SUPPORTED_EVENTS
+#undef FOR_ALL_SUPPORTED_EVENTS_DIFFERENT
 
 static std::vector<jvmtiEvent> GetAllAvailableEvents(jvmtiEnv* jvmti) {
   std::vector<jvmtiEvent> out;
diff --git a/tools/jvmti-agents/titrace/Android.bp b/tools/jvmti-agents/titrace/Android.bp
index 21f266c..21c2bf0 100644
--- a/tools/jvmti-agents/titrace/Android.bp
+++ b/tools/jvmti-agents/titrace/Android.bp
@@ -40,17 +40,8 @@
     },
     header_libs: [
         "libopenjdkjvmti_headers",
+        "libdexfile_all_headers", // for dex_instruction_list.h only
     ],
-    include_dirs: ["art/libdexfile"], // for dex_instruction_list.h only
-    multilib: {
-        lib32: {
-            suffix: "32",
-        },
-        lib64: {
-            suffix: "64",
-        },
-    },
-    symlink_preferred_arch: true,
 }
 
 art_cc_library {
diff --git a/tools/jvmti-agents/titrace/README.md b/tools/jvmti-agents/titrace/README.md
index a82025b..2bb9983 100644
--- a/tools/jvmti-agents/titrace/README.md
+++ b/tools/jvmti-agents/titrace/README.md
@@ -4,7 +4,7 @@
 
 # Usage
 ### Build
->    `make libtitrace`  # or 'make libtitraced' with debugging checks enabled
+>    `m libtitrace`  # or 'm libtitraced' with debugging checks enabled
 
 The libraries will be built for 32-bit, 64-bit, host and target. Below examples assume you want to use the 64-bit version.
 ### Command Line
diff --git a/tools/jvmti-agents/wrapagentproperties/Android.bp b/tools/jvmti-agents/wrapagentproperties/Android.bp
index 8dec847..88b1e67 100644
--- a/tools/jvmti-agents/wrapagentproperties/Android.bp
+++ b/tools/jvmti-agents/wrapagentproperties/Android.bp
@@ -38,15 +38,6 @@
     header_libs: [
         "libopenjdkjvmti_headers",
     ],
-    multilib: {
-        lib32: {
-            suffix: "32",
-        },
-        lib64: {
-            suffix: "64",
-        },
-    },
-    symlink_preferred_arch: true,
 }
 
 art_cc_library {
diff --git a/tools/jvmti-agents/wrapagentproperties/README.md b/tools/jvmti-agents/wrapagentproperties/README.md
index d968087..14894ad 100644
--- a/tools/jvmti-agents/wrapagentproperties/README.md
+++ b/tools/jvmti-agents/wrapagentproperties/README.md
@@ -5,7 +5,7 @@
 
 # Usage
 ### Build
->    `make libwrapagentproperties`  # or 'make libwrapagentpropertiesd' with debugging checks enabled
+>    `m libwrapagentproperties`  # or 'm libwrapagentpropertiesd' with debugging checks enabled
 
 The libraries will be built for 32-bit, 64-bit, host and target. Below examples
 assume you want to use the 64-bit version.
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index d65134c..336de1a 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -9,6 +9,14 @@
 
 [
 {
+  description: "Os.memfd_create() only supports bionic-based platforms.",
+  result: EXEC_FAILED,
+  modes: [host],
+  names: ["libcore.android.system.OsTest#testMemfdCreate",
+          "libcore.android.system.OsTest#testMemfdCreateErrno",
+          "libcore.android.system.OsTest#testMemfdCreateFlags"]
+},
+{
   description: "Assert.java differences between vogar and junit.",
   result: EXEC_FAILED,
   modes: [host],
@@ -105,12 +113,6 @@
   names: ["org.apache.harmony.tests.java.lang.ProcessTest#test_getErrorStream"]
 },
 {
-  description: "Test sometimes timeouts on volantis, and on most modes in debug mode",
-  result: EXEC_TIMEOUT,
-  names: ["libcore.java.lang.SystemTest#testArrayCopyConcurrentModification"],
-  bug: 19165288
-},
-{
   description: "Needs kernel updates on host/device",
   result: EXEC_FAILED,
   names: ["libcore.libcore.io.OsTest#test_socketPing"]
@@ -242,5 +244,33 @@
   names: [
     "libcore.libcore.io.OsTest#testCrossFamilyBindConnectSendto"
   ]
+},
+{
+  description: "Test fails on host with `socket failed: EACCES (Permission denied)`",
+  result: EXEC_FAILED,
+  modes: [host],
+  names: ["libcore.android.system.OsTest#test_socketPing"]
+},
+{
+  description: "Test fails on host with: `/home/dalvik-prebuild/jaxen/xml/test/tests.xml: open failed: ENOENT (No such file or directory)`",
+  result: EXEC_FAILED,
+  names: ["libcore.xml.JaxenXPathTestSuite#suite"]
+},
+{
+  description: "Test fails on host with: `/home/dalvik-prebuild/OASIS/XSLT-Conformance-TC/TESTS/catalog.xml: open failed: ENOENT (No such file or directory)`",
+  result: EXEC_FAILED,
+  names: ["libcore.xml.XsltXPathConformanceTestSuite#suite"]
+},
+{
+  description: "Fails on device",
+  result: EXEC_FAILED,
+  modes: [device_testdex],
+  bug: 145348591,
+  names: ["libcore.android.system.OsTest#test_if_nametoindex_if_indextoname"]
+},
+{
+  description: "Fails on non-Android and Android versions < R",
+  result: EXEC_FAILED,
+  names: ["libcore.android.system.OsTest#test_NetlinkSocket"]
 }
 ]
diff --git a/tools/libcore_fugu_failures.txt b/tools/libcore_fugu_failures.txt
new file mode 100644
index 0000000..11183bb
--- /dev/null
+++ b/tools/libcore_fugu_failures.txt
@@ -0,0 +1,131 @@
+/*
+ * This file contains expectations for ART's Buildbot when running on fugu devices
+ * (Nexus Player, kernel 3.10).
+ *
+ * The script that uses this file is art/tools/run-libcore-tests.sh.
+ */
+
+[
+{
+  description: "Test using memfd_create() syscall, only available from Linux 3.17.",
+  result: EXEC_FAILED,
+  bug: 146113753,
+  modes: [device_testdex],
+  names: [
+    "libcore.android.system.OsTest#testMemfdCreate",
+    "libcore.android.system.OsTest#testMemfdCreateErrno",
+    "libcore.android.system.OsTest#testMemfdCreateFlags"
+  ]
+},
+{
+  description: "Test using the getrandom() syscall, only available from Linux 3.17.",
+  result: EXEC_FAILED,
+  bug: 141230711,
+  modes: [device_testdex],
+  names: [
+    "libcore.java.math.BigIntegerTest#test_Constructor_IILjava_util_Random",
+    "libcore.java.math.BigIntegerTest#test_probablePrime",
+    "libcore.javax.crypto.CipherInputStreamTest#testDecryptCorruptGCM",
+    "libcore.javax.crypto.CipherOutputStreamTest#testDecryptCorruptGCM",
+    "libcore.libcore.timezone.TelephonyLookupTest#createInstanceWithFallback",
+    "libcore.libcore.timezone.TelephonyLookupTest#getTelephonyNetworkFinder",
+    "libcore.libcore.timezone.TelephonyLookupTest#validateCountryCodeLowerCase",
+    "libcore.libcore.timezone.TelephonyLookupTest#validateDuplicateMccMnc",
+    "libcore.libcore.timezone.TelephonyLookupTest#xmlParsing_emptyFile",
+    "libcore.libcore.timezone.TelephonyLookupTest#xmlParsing_emptyNetworksOk",
+    "libcore.libcore.timezone.TelephonyLookupTest#xmlParsing_missingCountryCodeAttribute",
+    "libcore.libcore.timezone.TelephonyLookupTest#xmlParsing_missingMccAttribute",
+    "libcore.libcore.timezone.TelephonyLookupTest#xmlParsing_missingMncAttribute",
+    "libcore.libcore.timezone.TelephonyLookupTest#xmlParsing_missingNetworks",
+    "libcore.libcore.timezone.TelephonyLookupTest#xmlParsing_truncatedInput",
+    "libcore.libcore.timezone.TelephonyLookupTest#xmlParsing_unexpectedComments",
+    "libcore.libcore.timezone.TelephonyLookupTest#xmlParsing_unexpectedElementsIgnored",
+    "libcore.libcore.timezone.TelephonyLookupTest#xmlParsing_unexpectedRootElement",
+    "libcore.libcore.timezone.TelephonyLookupTest#xmlParsing_unexpectedTextIgnored",
+    "libcore.libcore.timezone.TimeZoneFinderTest#createInstanceWithFallback",
+    "libcore.libcore.timezone.TimeZoneFinderTest#getCountryZonesFinder",
+    "libcore.libcore.timezone.TimeZoneFinderTest#getCountryZonesFinder_empty",
+    "libcore.libcore.timezone.TimeZoneFinderTest#getCountryZonesFinder_invalid",
+    "libcore.libcore.timezone.TimeZoneFinderTest#getIanaVersion",
+    "libcore.libcore.timezone.TimeZoneFinderTest#lookupCountryTimeZones_caseInsensitive",
+    "libcore.libcore.timezone.TimeZoneFinderTest#lookupCountryTimeZones_unknownCountryReturnsNull",
+    "libcore.libcore.timezone.TimeZoneFinderTest#xmlParsing_badCountryDefaultBoost",
+    "libcore.libcore.timezone.TimeZoneFinderTest#xmlParsing_badCountryEverUtc",
+    "libcore.libcore.timezone.TimeZoneFinderTest#xmlParsing_badTimeZoneMappingNotAfter",
+    "libcore.libcore.timezone.TimeZoneFinderTest#xmlParsing_badTimeZoneMappingPicker",
+    "libcore.libcore.timezone.TimeZoneFinderTest#xmlParsing_countryDefaultBoost",
+    "libcore.libcore.timezone.TimeZoneFinderTest#xmlParsing_emptyFile",
+    "libcore.libcore.timezone.TimeZoneFinderTest#xmlParsing_missingCountryCode",
+    "libcore.libcore.timezone.TimeZoneFinderTest#xmlParsing_missingCountryDefault",
+    "libcore.libcore.timezone.TimeZoneFinderTest#xmlParsing_missingCountryEverUtc",
+    "libcore.libcore.timezone.TimeZoneFinderTest#xmlParsing_missingCountryZones",
+    "libcore.libcore.timezone.TimeZoneFinderTest#xmlParsing_missingIanaVersionAttribute",
+    "libcore.libcore.timezone.TimeZoneFinderTest#xmlParsing_noCountriesOk",
+    "libcore.libcore.timezone.TimeZoneFinderTest#xmlParsing_timeZoneMappingNotAfter",
+    "libcore.libcore.timezone.TimeZoneFinderTest#xmlParsing_timeZoneMappingPicker",
+    "libcore.libcore.timezone.TimeZoneFinderTest#xmlParsing_truncatedInput",
+    "libcore.libcore.timezone.TimeZoneFinderTest#xmlParsing_unexpectedChildInTimeZoneIdThrows",
+    "libcore.libcore.timezone.TimeZoneFinderTest#xmlParsing_unexpectedComments",
+    "libcore.libcore.timezone.TimeZoneFinderTest#xmlParsing_unexpectedElementsIgnored",
+    "libcore.libcore.timezone.TimeZoneFinderTest#xmlParsing_unexpectedRootElement",
+    "libcore.libcore.timezone.TimeZoneFinderTest#xmlParsing_unexpectedTextIgnored",
+    "libcore.libcore.timezone.TimeZoneFinderTest#xmlParsing_unknownTimeZoneIdIgnored",
+    "org.apache.harmony.crypto.tests.javax.crypto.CipherInputStream1Test#test_ConstructorLjava_io_InputStreamLjavax_crypto_Cipher",
+    "org.apache.harmony.crypto.tests.javax.crypto.CipherOutputStream1Test#test_ConstructorLjava_io_OutputStreamLjavax_crypto_Cipher",
+    "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_",
+    "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_doFinalLjava_nio_ByteBufferLjava_nio_ByteBuffer",
+    "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_initWithKey",
+    "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_initWithSecureRandom",
+    "org.apache.harmony.crypto.tests.javax.crypto.CipherTest#test_updateLjava_nio_ByteBufferLjava_nio_ByteBuffer",
+    "org.apache.harmony.crypto.tests.javax.crypto.EncryptedPrivateKeyInfoTest#test_ROUNDTRIP_GetKeySpecCipher01",
+    "org.apache.harmony.crypto.tests.javax.crypto.EncryptedPrivateKeyInfoTest#test_ROUNDTRIP_GetKeySpecCipher02",
+    "org.apache.harmony.crypto.tests.javax.crypto.EncryptedPrivateKeyInfoTest#test_ROUNDTRIP_GetKeySpecKey01",
+    "org.apache.harmony.crypto.tests.javax.crypto.EncryptedPrivateKeyInfoTest#test_ROUNDTRIP_GetKeySpecKey02",
+    "org.apache.harmony.crypto.tests.javax.crypto.EncryptedPrivateKeyInfoTest#test_ROUNDTRIP_GetKeySpecKeyProvider01",
+    "org.apache.harmony.crypto.tests.javax.crypto.EncryptedPrivateKeyInfoTest#test_ROUNDTRIP_GetKeySpecKeyProvider02",
+    "org.apache.harmony.crypto.tests.javax.crypto.EncryptedPrivateKeyInfoTest#test_ROUNDTRIP_GetKeySpecKeyString01",
+    "org.apache.harmony.crypto.tests.javax.crypto.EncryptedPrivateKeyInfoTest#test_ROUNDTRIP_GetKeySpecKeyString02",
+    "org.apache.harmony.crypto.tests.javax.crypto.EncryptedPrivateKeyInfoTest#test_getAlgName",
+    "org.apache.harmony.crypto.tests.javax.crypto.ExemptionMechanismTest#test_initLjava_security_Key",
+    "org.apache.harmony.crypto.tests.javax.crypto.ExemptionMechanismTest#test_initLjava_security_KeyLjava_security_AlgorithmParameters",
+    "org.apache.harmony.crypto.tests.javax.crypto.ExemptionMechanismTest#test_initLjava_security_KeyLjava_security_spec_AlgorithmParameterSpec",
+    "org.apache.harmony.crypto.tests.javax.crypto.KeyGeneratorTest#testGenerateKey",
+    "org.apache.harmony.crypto.tests.javax.crypto.KeyGeneratorTest#test_initLjava_security_spec_AlgorithmParameterSpec",
+    "org.apache.harmony.crypto.tests.javax.crypto.SealedObjectTest#testDeserialization",
+    "org.apache.harmony.crypto.tests.javax.crypto.SealedObjectTest#testGetAlgorithm",
+    "org.apache.harmony.crypto.tests.javax.crypto.SealedObjectTest#testGetAlgorithmAfterSerialization",
+    "org.apache.harmony.crypto.tests.javax.crypto.SealedObjectTest#testGetObject1",
+    "org.apache.harmony.crypto.tests.javax.crypto.SealedObjectTest#testGetObject2",
+    "org.apache.harmony.crypto.tests.javax.crypto.SealedObjectTest#testGetObject3",
+    "org.apache.harmony.crypto.tests.javax.crypto.SealedObjectTest#testSealedObject1",
+    "org.apache.harmony.crypto.tests.javax.crypto.SecretKeyFactoryTest#test_translateKeyLjavax_crypto_SecretKey",
+    "org.apache.harmony.crypto.tests.javax.crypto.func.CipherAesTest#test_AesISO",
+    "org.apache.harmony.crypto.tests.javax.crypto.func.CipherAesTest#test_AesNoISO",
+    "org.apache.harmony.crypto.tests.javax.crypto.func.CipherAesWrapTest#test_AesWrap",
+    "org.apache.harmony.crypto.tests.javax.crypto.func.CipherDESedeTest#test_DESedeISO",
+    "org.apache.harmony.crypto.tests.javax.crypto.func.CipherDESedeTest#test_DESedeNoISO",
+    "org.apache.harmony.crypto.tests.javax.crypto.func.CipherDESedeWrapTest#test_DESedeWrap",
+    "org.apache.harmony.crypto.tests.javax.crypto.func.CipherDesTest#test_DesISO",
+    "org.apache.harmony.crypto.tests.javax.crypto.func.CipherDesTest#test_DesNoISO",
+    "org.apache.harmony.crypto.tests.javax.crypto.func.CipherPBETest#test_PBEWithMD5AndDES",
+    "org.apache.harmony.crypto.tests.javax.crypto.func.CipherPBETest#test_PBEWithSHAand3KeyTripleDES",
+    "org.apache.harmony.crypto.tests.javax.crypto.func.CipherRSATest#test_RSANoPadding",
+    "org.apache.harmony.crypto.tests.javax.crypto.func.CipherRSATest#test_RSAShortKey",
+    "org.apache.harmony.crypto.tests.javax.crypto.func.KeyGeneratorFunctionalTest#test_",
+    "org.apache.harmony.tests.java.math.BigIntegerConstructorsTest#testConstructorPrime",
+    "org.apache.harmony.tests.java.math.BigIntegerTest#test_isProbablePrimeI",
+    "org.apache.harmony.tests.java.math.OldBigIntegerTest#test_ConstructorIILjava_util_Random",
+    "org.apache.harmony.tests.java.math.OldBigIntegerTest#test_isProbablePrimeI",
+    "org.apache.harmony.tests.java.math.OldBigIntegerTest#test_nextProbablePrime",
+    "org.apache.harmony.tests.java.math.OldBigIntegerTest#test_probablePrime",
+    "org.apache.harmony.tests.java.util.ScannerTest#test_ConstructorLjava_nio_file_Path",
+    "org.apache.harmony.tests.java.util.ScannerTest#test_ConstructorLjava_nio_file_PathLjava_lang_String",
+    "org.apache.harmony.tests.java.util.ScannerTest#test_ConstructorLjava_nio_file_PathLjava_lang_String_Exception",
+    "org.apache.harmony.tests.java.util.UUIDTest#test_randomUUID",
+    "org.apache.harmony.tests.javax.security.OldSHA1PRNGSecureRandomTest#testGenerateSeedint02",
+    "org.apache.harmony.tests.javax.security.OldSHA1PRNGSecureRandomTest#testGenerateSeedint03",
+    "org.apache.harmony.tests.javax.security.OldSHA1PRNGSecureRandomTest#testNextBytesbyteArray03",
+    "org.apache.harmony.tests.javax.security.OldSHA1PRNGSecureRandomTest#testSetSeedbyteArray02"
+  ]
+}
+]
diff --git a/tools/libcore_gcstress_debug_failures.txt b/tools/libcore_gcstress_debug_failures.txt
index 4e10d91..0112644 100644
--- a/tools/libcore_gcstress_debug_failures.txt
+++ b/tools/libcore_gcstress_debug_failures.txt
@@ -40,7 +40,8 @@
           "jsr166.StampedLockTest#testReadLockInterruptibly_Interruptible",
           "jsr166.StampedLockTest#testReadLockInterruptibly",
           "jsr166.StampedLockTest#testWriteLockInterruptibly",
-          "org.apache.harmony.tests.java.lang.ProcessManagerTest#testSleep"
+          "org.apache.harmony.tests.java.lang.ProcessManagerTest#testSleep",
+          "libcore.java.lang.StringTest#testFastPathString_wellFormedUtf8Sequence"
   ]
 },
 {
diff --git a/tools/libcore_network_failures.txt b/tools/libcore_network_failures.txt
deleted file mode 100644
index 380f56b..0000000
--- a/tools/libcore_network_failures.txt
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * This file contains extra expectations for ART's buildbot regarding network tests.
- * The script that uses this file is art/tools/run-libcore-tests.sh.
- */
-
-[
-{
-  description: "Ignore failure of network-related tests on new devices running Android O",
-  result: EXEC_FAILED,
-  bug: 74725685,
-  modes: [device_testdex],
-  names: ["libcore.libcore.io.OsTest#test_byteBufferPositions_sendto_recvfrom_af_inet",
-          "libcore.libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithFtpURLConnection",
-          "libcore.libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithHttpURLConnection",
-          "libcore.libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithJarFtpURLConnection",
-          "libcore.libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithJarHttpURLConnection",
-          "libcore.libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithLoggingSocketHandler",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_40555",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_ConstructorLjava_io_File",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_ConstructorLjava_io_FileLjava_lang_String",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_ConstructorLjava_io_InputStream",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_ConstructorLjava_io_InputStreamLjava_lang_String",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_ConstructorLjava_lang_Readable",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_ConstructorLjava_lang_String",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_ConstructorLjava_nio_channels_ReadableByteChannel",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_ConstructorLjava_nio_channels_ReadableByteChannelLjava_lang_String",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_ConstructorLjava_nio_file_Path",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_ConstructorLjava_nio_file_PathLjava_lang_String",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_ConstructorLjava_nio_file_PathLjava_lang_String_Exception",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_ConstructorLjava_nio_file_Path_Exception",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_close",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_delimiter",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_findInLine_LPattern",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_findInLine_LString",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_findInLine_LString_NPEs",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_findWithinHorizon_LPatternI",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_hasNext",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextBigDecimal",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextBigInteger",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextBigIntegerI",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextBigIntegerI_cache",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextBoolean",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextByte",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextByteI",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextByteI_cache",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextDouble",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextFloat",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextInt",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextIntI",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextIntI_cache",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextLPattern",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextLString",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextLine",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextLine_sequence",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextLong",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextLongI",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextLongI_cache",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextShort",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextShortI",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_hasNextShortI_cache",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_ioException",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_locale",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_match",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_next",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_nextBigDecimal",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_nextBigInteger",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_nextBigIntegerI",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_nextBoolean",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_nextByte",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_nextByteI",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_nextDouble",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_nextFloat",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_nextInt",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_nextIntI",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_nextLPattern",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_nextLString",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_nextLine",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_nextLong",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_nextLongI",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_nextShort",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_nextShortI",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_radix",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_remove",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_skip_LPattern",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_skip_LString",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_toString",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_useDelimiter_LPattern",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_useDelimiter_String",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_useLocale_LLocale",
-          "org.apache.harmony.tests.java.util.ScannerTest#test_useRadix_I"]
-}
-]
diff --git a/tools/libjavac/Android.bp b/tools/libjavac/Android.bp
new file mode 100644
index 0000000..c93402e
--- /dev/null
+++ b/tools/libjavac/Android.bp
@@ -0,0 +1,27 @@
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This is a helper library for host tests that compile Java code at test time.
+java_library_host {
+    name: "libjavac",
+
+    visibility: ["//visibility:public"],
+
+    srcs: ["src/**/*.java"],
+
+    static_libs: [
+        "apache-bcel",
+        "guava",
+    ],
+}
diff --git a/tools/libjavac/src/com/android/javac/Javac.java b/tools/libjavac/src/com/android/javac/Javac.java
new file mode 100644
index 0000000..8ed7278
--- /dev/null
+++ b/tools/libjavac/src/com/android/javac/Javac.java
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.javac;
+
+import com.google.common.collect.Lists;
+import com.google.common.io.Files;
+
+import java.util.stream.Collectors;
+import org.apache.bcel.classfile.ClassParser;
+import org.apache.bcel.classfile.JavaClass;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Locale;
+
+import javax.annotation.processing.Processor;
+import javax.tools.DiagnosticCollector;
+import javax.tools.JavaCompiler;
+import javax.tools.JavaFileObject;
+import javax.tools.SimpleJavaFileObject;
+import javax.tools.StandardJavaFileManager;
+import javax.tools.StandardLocation;
+import javax.tools.ToolProvider;
+
+/**
+ * Helper class for compiling snippets of Java source and providing access to the resulting class
+ * files.
+ */
+public class Javac {
+
+    private final JavaCompiler mJavac;
+    private final StandardJavaFileManager mFileMan;
+    private final List<JavaFileObject> mCompilationUnits;
+    private final File mClassOutDir;
+
+    public Javac() throws IOException {
+        mJavac = ToolProvider.getSystemJavaCompiler();
+        mFileMan = mJavac.getStandardFileManager(null, Locale.US, null);
+        mClassOutDir = Files.createTempDir();
+        mFileMan.setLocation(StandardLocation.CLASS_OUTPUT, Arrays.asList(mClassOutDir));
+        mFileMan.setLocation(StandardLocation.CLASS_PATH, Arrays.asList(mClassOutDir));
+        mCompilationUnits = new ArrayList<>();
+    }
+
+    private String classToFileName(String classname) {
+        return classname.replace('.', '/');
+    }
+
+    public Javac addSource(String classname, String contents) {
+        JavaFileObject java = new SimpleJavaFileObject(URI.create(
+                String.format("string:///%s.java", classToFileName(classname))),
+                JavaFileObject.Kind.SOURCE
+                ){
+            @Override
+            public CharSequence getCharContent(boolean ignoreEncodingErrors) throws IOException {
+                return contents;
+            }
+        };
+        mCompilationUnits.add(java);
+        return this;
+    }
+
+    public void compile() {
+        compileWithAnnotationProcessor(null);
+    }
+
+    public void compileWithAnnotationProcessor(Processor processor) {
+        DiagnosticCollector<JavaFileObject> diagnosticCollector = new DiagnosticCollector<>();
+        JavaCompiler.CompilationTask task = mJavac.getTask(
+                null,
+                mFileMan,
+                diagnosticCollector,
+                null,
+                null,
+                mCompilationUnits);
+        if (processor != null) {
+            task.setProcessors(Lists.newArrayList(processor));
+        }
+        boolean result = task.call();
+        if (!result) {
+            throw new IllegalStateException(
+                    "Compilation failed:" +
+                            diagnosticCollector.getDiagnostics()
+                                    .stream()
+                                    .map(Object::toString)
+                                    .collect(Collectors.joining("\n")));
+        }
+    }
+
+    public InputStream getOutputFile(String filename) throws IOException {
+        Iterable<? extends JavaFileObject> objs = mFileMan.getJavaFileObjects(
+                new File(mClassOutDir, filename));
+        if (!objs.iterator().hasNext()) {
+            return null;
+        }
+        return objs.iterator().next().openInputStream();
+    }
+
+    public InputStream getClassFile(String classname) throws IOException {
+        return getOutputFile(String.format("%s.class", classToFileName(classname)));
+    }
+
+    public JavaClass getCompiledClass(String classname) throws IOException {
+        return new ClassParser(getClassFile(classname),
+                String.format("%s.class", classToFileName(classname))).parse();
+    }
+}
diff --git a/tools/luci/config/cr-buildbucket.cfg b/tools/luci/config/cr-buildbucket.cfg
index 8c0125a..89aeda2 100644
--- a/tools/luci/config/cr-buildbucket.cfg
+++ b/tools/luci/config/cr-buildbucket.cfg
@@ -33,7 +33,10 @@
     builder_defaults {
       dimensions: "pool:luci.art.ci"
       service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
-      execution_timeout_secs: 108000  # 30h
+      # We have a limited set of runners, so put the expiration time close to
+      # the time it takes to run all steps on most builders.
+      expiration_secs: 36000 # 10h
+      execution_timeout_secs: 133200  # 37h
       swarming_tags: "vpython:native-python-wrapper"
       build_numbers: YES
       # Some builders require specific hardware, so we make the assignment in bots.cfg
diff --git a/tools/luci/config/luci-milo.cfg b/tools/luci/config/luci-milo.cfg
index a4f95a2..22b4e17 100644
--- a/tools/luci/config/luci-milo.cfg
+++ b/tools/luci/config/luci-milo.cfg
@@ -1,177 +1,6 @@
 logo_url: "https://storage.googleapis.com/chrome-infra-public/logo/art-logo.png"
 
 consoles {
-  id: "main"
-  name: "ART Main Console"
-  repo_url: "https://android.googlesource.com/platform/art"
-  refs: "refs/heads/master"
-  manifest_name: "REVISION"
-
-  builders {
-    name: "buildbot/client.art/angler-armv7-debug"
-    name: "buildbucket/luci.art.ci/angler-armv7-debug"
-    category: "angler|armv7"
-    short_name: "dbg"
-  }
-  builders {
-    name: "buildbot/client.art/angler-armv7-non-gen-cc"
-    name: "buildbucket/luci.art.ci/angler-armv7-non-gen-cc"
-    category: "angler|armv7"
-    short_name: "ngen"
-  }
-  builders {
-    name: "buildbot/client.art/angler-armv7-ndebug"
-    name: "buildbucket/luci.art.ci/angler-armv7-ndebug"
-    category: "angler|armv7"
-    short_name: "ndbg"
-  }
-  builders {
-    name: "buildbot/client.art/angler-armv8-debug"
-    name: "buildbucket/luci.art.ci/angler-armv8-debug"
-    category: "angler|armv8"
-    short_name: "dbg"
-  }
-  builders {
-    name: "buildbot/client.art/angler-armv8-non-gen-cc"
-    name: "buildbucket/luci.art.ci/angler-armv8-non-gen-cc"
-    category: "angler|armv8"
-    short_name: "ngen"
-  }
-  builders {
-    name: "buildbot/client.art/angler-armv8-ndebug"
-    name: "buildbucket/luci.art.ci/angler-armv8-ndebug"
-    category: "angler|armv8"
-    short_name: "ndbg"
-  }
-  builders {
-    name: "buildbot/client.art/aosp-builder-cc"
-    name: "buildbucket/luci.art.ci/aosp-builder-cc"
-    category: "aosp"
-    short_name: "cc"
-  }
-  builders {
-    name: "buildbot/client.art/aosp-builder-cms"
-    name: "buildbucket/luci.art.ci/aosp-builder-cms"
-    category: "aosp"
-    short_name: "cms"
-  }
-  builders {
-    name: "buildbot/client.art/bullhead-armv7-gcstress-ndebug"
-    name: "buildbucket/luci.art.ci/bullhead-armv7-gcstress-ndebug"
-    category: "bullhead|armv7|gcstress"
-    short_name: "dbg"
-  }
-  builders {
-    name: "buildbot/client.art/bullhead-armv8-gcstress-debug"
-    name: "buildbucket/luci.art.ci/bullhead-armv8-gcstress-debug"
-    category: "bullhead|armv8|gcstress"
-    short_name: "dbg"
-  }
-  builders {
-    name: "buildbot/client.art/bullhead-armv8-gcstress-ndebug"
-    name: "buildbucket/luci.art.ci/bullhead-armv8-gcstress-ndebug"
-    category: "bullhead|armv8|gcstress"
-    short_name: "ndbg"
-  }
-  builders {
-    name: "buildbot/client.art/fugu-debug"
-    name: "buildbucket/luci.art.ci/fugu-debug"
-    category: "fugu"
-    short_name: "dbg"
-  }
-  builders {
-    name: "buildbot/client.art/fugu-ndebug"
-    name: "buildbucket/luci.art.ci/fugu-ndebug"
-    category: "fugu"
-    short_name: "ndbg"
-  }
-  builders {
-    name: "buildbot/client.art/host-x86-cms"
-    name: "buildbucket/luci.art.ci/host-x86-cms"
-    category: "host|x86"
-    short_name: "cms"
-  }
-  builders {
-    name: "buildbot/client.art/host-x86-debug"
-    name: "buildbucket/luci.art.ci/host-x86-debug"
-    category: "host|x86"
-    short_name: "dbg"
-  }
-  builders {
-    name: "buildbot/client.art/host-x86-ndebug"
-    name: "buildbucket/luci.art.ci/host-x86-ndebug"
-    category: "host|x86"
-    short_name: "ndbg"
-  }
-  builders {
-    name: "buildbot/client.art/host-x86-gcstress-debug"
-    name: "buildbucket/luci.art.ci/host-x86-gcstress-debug"
-    category: "host|x86"
-    short_name: "gcs"
-  }
-  builders {
-    name: "buildbot/client.art/host-x86-poison-debug"
-    name: "buildbucket/luci.art.ci/host-x86-poison-debug"
-    category: "host|x86"
-    short_name: "psn"
-  }
-  builders {
-    name: "buildbot/client.art/host-x86_64-cdex-fast"
-    name: "buildbucket/luci.art.ci/host-x86_64-cdex-fast"
-    category: "host|x64"
-    short_name: "cdx"
-  }
-  builders {
-    name: "buildbot/client.art/host-x86_64-cms"
-    name: "buildbucket/luci.art.ci/host-x86_64-cms"
-    category: "host|x64"
-    short_name: "cms"
-  }
-  builders {
-    name: "buildbot/client.art/host-x86_64-debug"
-    name: "buildbucket/luci.art.ci/host-x86_64-debug"
-    category: "host|x64"
-    short_name: "dbg"
-  }
-  builders {
-    name: "buildbot/client.art/host-x86_64-non-gen-cc"
-    name: "buildbucket/luci.art.ci/host-x86_64-non-gen-cc"
-    category: "host|x64"
-    short_name: "ngen"
-  }
-  builders {
-    name: "buildbot/client.art/host-x86_64-ndebug"
-    name: "buildbucket/luci.art.ci/host-x86_64-ndebug"
-    category: "host|x64"
-    short_name: "ndbg"
-  }
-  builders {
-    name: "buildbot/client.art/host-x86_64-poison-debug"
-    name: "buildbucket/luci.art.ci/host-x86_64-poison-debug"
-    category: "host|x64"
-    short_name: "psn"
-  }
-  builders {
-    name: "buildbot/client.art/walleye-armv7-poison-debug"
-    name: "buildbucket/luci.art.ci/walleye-armv7-poison-debug"
-    category: "walleye|armv7|poison"
-    short_name: "dbg"
-  }
-  builders {
-    name: "buildbot/client.art/walleye-armv8-poison-debug"
-    name: "buildbucket/luci.art.ci/walleye-armv8-poison-debug"
-    category: "walleye|armv8|poison"
-    short_name: "dbg"
-  }
-  builders {
-    name: "buildbot/client.art/walleye-armv8-poison-ndebug"
-    name: "buildbucket/luci.art.ci/walleye-armv8-poison-ndebug"
-    category: "walleye|armv8|poison"
-    short_name: "ndbg"
-  }
-}
-
-consoles {
   id: "luci"
   name: "ART LUCI Console"
   repo_url: "https://android.googlesource.com/platform/art"
diff --git a/tools/luci/config/luci-notify.cfg b/tools/luci/config/luci-notify.cfg
new file mode 100644
index 0000000..994cdfd
--- /dev/null
+++ b/tools/luci/config/luci-notify.cfg
@@ -0,0 +1,126 @@
+# Defines email notifications for builders.
+# See schema at
+# https://chromium.googlesource.com/infra/luci/luci-go/+/master/luci_notify/api/config/notify.proto
+#
+# Please keep this list sorted by name.
+
+notifiers {
+  name: "art-team+chromium-buildbot"
+  notifications {
+    on_change: true
+    on_success: false
+    on_failure: true
+    on_new_failure: false
+    email {
+      recipients: "art-team+chromium-buildbot@google.com"
+    }
+  }
+  builders {
+    name: "angler-armv7-debug"
+    bucket: "ci"
+  }
+  builders {
+    name: "angler-armv7-non-gen-cc"
+    bucket: "ci"
+  }
+  builders {
+    name: "angler-armv7-ndebug"
+    bucket: "ci"
+  }
+  builders {
+    name: "angler-armv8-debug"
+    bucket: "ci"
+  }
+  builders {
+    name: "angler-armv8-non-gen-cc"
+    bucket: "ci"
+  }
+  builders {
+    name: "angler-armv8-ndebug"
+    bucket: "ci"
+  }
+  builders {
+    name: "aosp-builder-cc"
+    bucket: "ci"
+  }
+  builders {
+    name: "aosp-builder-cms"
+    bucket: "ci"
+  }
+  builders {
+    name: "bullhead-armv7-gcstress-ndebug"
+    bucket: "ci"
+  }
+  builders {
+    name: "bullhead-armv8-gcstress-debug"
+    bucket: "ci"
+  }
+  builders {
+    name: "bullhead-armv8-gcstress-ndebug"
+    bucket: "ci"
+  }
+  builders {
+    name: "fugu-debug"
+    bucket: "ci"
+  }
+  builders {
+    name: "fugu-ndebug"
+    bucket: "ci"
+  }
+  builders {
+    name: "host-x86-cms"
+    bucket: "ci"
+  }
+  builders {
+    name: "host-x86-debug"
+    bucket: "ci"
+  }
+  builders {
+    name: "host-x86-gcstress-debug"
+    bucket: "ci"
+  }
+  builders {
+    name: "host-x86-ndebug"
+    bucket: "ci"
+  }
+  builders {
+    name: "host-x86-poison-debug"
+    bucket: "ci"
+  }
+  builders {
+    name: "host-x86_64-cdex-fast"
+    bucket: "ci"
+  }
+  builders {
+    name: "host-x86_64-cms"
+    bucket: "ci"
+  }
+  builders {
+    name: "host-x86_64-debug"
+    bucket: "ci"
+  }
+  builders {
+    name: "host-x86_64-non-gen-cc"
+    bucket: "ci"
+  }
+  builders {
+    name: "host-x86_64-ndebug"
+    bucket: "ci"
+  }
+  builders {
+    name: "host-x86_64-poison-debug"
+    bucket: "ci"
+  }
+  builders {
+    name: "walleye-armv7-poison-debug"
+    bucket: "ci"
+  }
+  builders {
+    name: "walleye-armv8-poison-debug"
+    bucket: "ci"
+  }
+  builders {
+    name: "walleye-armv8-poison-ndebug"
+    bucket: "ci"
+  }
+}
diff --git a/tools/luci/config/luci-notify/email-templates/default.template b/tools/luci/config/luci-notify/email-templates/default.template
new file mode 100644
index 0000000..c6e552c
--- /dev/null
+++ b/tools/luci/config/luci-notify/email-templates/default.template
@@ -0,0 +1,37 @@
+[Build Status] Builder "{{ .Build.Builder | formatBuilderID }}" {{ .Build.Status }}
+
+luci-notify detected a status change for builder "{{ .Build.Builder | formatBuilderID }}" at {{ .Build.UpdateTime | time }}.
+<br/>
+<table>
+  <tr>
+    <td>New status:</td>
+    <td><b>{{ .Build.Status }}</b></td>
+  </tr>
+  <tr>
+    <td>Previous status:</td>
+    <td>{{ .OldStatus }}</td>
+  </tr>
+  <tr>
+    <td>Builder:</td>
+    <td>{{ .Build.Builder | formatBuilderID }}</td>
+  </tr>
+  <tr>
+    <td>Created by:</td>
+    <td>{{ .Build.CreatedBy }}</td>
+  </tr>
+  <tr>
+    <td>Created at:</td>
+    <td>{{ .Build.CreateTime | time }}</td>
+  </tr>
+  <tr>
+    <td>Finished at:</td>
+    <td>{{ .Build.EndTime | time }}</td>
+  </tr>
+  <tr>
+    <td>Summary:</td>
+    <td>{{ .Build.SummaryMarkdown }}</td>
+  </tr>
+</table>
+<br/>
+Full details are available <a href="https://{{.BuildbucketHostname}}/build/{{.Build.Id}}">here</a>.
+<br/><br/>
diff --git a/tools/luci/config/luci-scheduler.cfg b/tools/luci/config/luci-scheduler.cfg
index fa30834..489ba04 100644
--- a/tools/luci/config/luci-scheduler.cfg
+++ b/tools/luci/config/luci-scheduler.cfg
@@ -53,6 +53,80 @@
   triggers: "walleye-armv8-poison-ndebug"
 }
 
+trigger {
+  id: "master-libcore-gitiles-trigger"
+  acl_sets: "default"
+  gitiles: {
+    repo: "https://android.googlesource.com/platform/libcore"
+    refs: "refs/heads/master"
+  }
+
+  triggers: "angler-armv7-debug"
+  triggers: "angler-armv7-non-gen-cc"
+  triggers: "angler-armv7-ndebug"
+  triggers: "angler-armv8-debug"
+  triggers: "angler-armv8-non-gen-cc"
+  triggers: "angler-armv8-ndebug"
+  triggers: "aosp-builder-cc"
+  triggers: "aosp-builder-cms"
+  triggers: "bullhead-armv7-gcstress-ndebug"
+  triggers: "bullhead-armv8-gcstress-debug"
+  triggers: "bullhead-armv8-gcstress-ndebug"
+  triggers: "fugu-debug"
+  triggers: "fugu-ndebug"
+  triggers: "host-x86-cms"
+  triggers: "host-x86-debug"
+  triggers: "host-x86-gcstress-debug"
+  triggers: "host-x86-ndebug"
+  triggers: "host-x86-poison-debug"
+  triggers: "host-x86_64-cdex-fast"
+  triggers: "host-x86_64-cms"
+  triggers: "host-x86_64-debug"
+  triggers: "host-x86_64-non-gen-cc"
+  triggers: "host-x86_64-ndebug"
+  triggers: "host-x86_64-poison-debug"
+  triggers: "walleye-armv7-poison-debug"
+  triggers: "walleye-armv8-poison-debug"
+  triggers: "walleye-armv8-poison-ndebug"
+}
+
+trigger {
+  id: "master-art-manifest-gitiles-trigger"
+  acl_sets: "default"
+  gitiles: {
+    repo: "https://android.googlesource.com/platform/manifest"
+    refs: "refs/heads/master-art"
+  }
+
+  triggers: "angler-armv7-debug"
+  triggers: "angler-armv7-non-gen-cc"
+  triggers: "angler-armv7-ndebug"
+  triggers: "angler-armv8-debug"
+  triggers: "angler-armv8-non-gen-cc"
+  triggers: "angler-armv8-ndebug"
+  triggers: "aosp-builder-cc"
+  triggers: "aosp-builder-cms"
+  triggers: "bullhead-armv7-gcstress-ndebug"
+  triggers: "bullhead-armv8-gcstress-debug"
+  triggers: "bullhead-armv8-gcstress-ndebug"
+  triggers: "fugu-debug"
+  triggers: "fugu-ndebug"
+  triggers: "host-x86-cms"
+  triggers: "host-x86-debug"
+  triggers: "host-x86-gcstress-debug"
+  triggers: "host-x86-ndebug"
+  triggers: "host-x86-poison-debug"
+  triggers: "host-x86_64-cdex-fast"
+  triggers: "host-x86_64-cms"
+  triggers: "host-x86_64-debug"
+  triggers: "host-x86_64-non-gen-cc"
+  triggers: "host-x86_64-ndebug"
+  triggers: "host-x86_64-poison-debug"
+  triggers: "walleye-armv7-poison-debug"
+  triggers: "walleye-armv8-poison-debug"
+  triggers: "walleye-armv8-poison-ndebug"
+}
+
 job {
   id: "angler-armv7-debug"
   acl_sets: "default"
diff --git a/tools/mount-buildbot-apexes.sh b/tools/mount-buildbot-apexes.sh
deleted file mode 100755
index 778d634..0000000
--- a/tools/mount-buildbot-apexes.sh
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2019 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Mount Android Runtime and Core Libraries APEX packages required in the chroot directory.
-# This script emulates some the actions performed by `apexd`.
-
-green='\033[0;32m'
-nc='\033[0m'
-
-# Setup as root, as some actions performed here require it.
-adb root
-adb wait-for-device
-
-# Exit early if there is no chroot.
-[[ -n "$ART_TEST_CHROOT" ]] || exit
-
-# Check that ART_TEST_CHROOT is correctly defined.
-[[ "$ART_TEST_CHROOT" = /* ]] || { echo "$ART_TEST_CHROOT is not an absolute path"; exit 1; }
-
-# Check that the "$ART_TEST_CHROOT/apex" directory exists.
-adb shell test -d "$ART_TEST_CHROOT/apex" \
-  || { echo "$ART_TEST_CHROOT/apex does not exist or is not a directory"; exit 1; }
-
-# Create a directory where we extract APEX packages' payloads (ext4 images)
-# under the chroot directory.
-apex_image_dir="/tmp/apex"
-adb shell mkdir -p "$ART_TEST_CHROOT$apex_image_dir"
-
-# activate_system_package APEX_PACKAGE APEX_NAME
-# ----------------------------------------------
-# Extract payload (ext4 image) from system APEX_PACKAGE and mount it as
-# APEX_NAME in `/apex` under the chroot directory.
-activate_system_package() {
-  local apex_package=$1
-  local apex_name=$2
-  local apex_package_path="/system/apex/$apex_package"
-  local abs_mount_point="$ART_TEST_CHROOT/apex/$apex_name"
-  local abs_image_filename="$ART_TEST_CHROOT$apex_image_dir/$apex_name.img"
-
-  # Make sure that the (absolute) path to the mounted ext4 image is less than
-  # 64 characters, which is a hard limit set in the kernel for loop device
-  # filenames (otherwise, we would get an error message from `losetup`, used
-  # by `mount` to manage the loop device).
-  [[ "${#abs_image_filename}" -ge 64 ]] \
-    && { echo "Filename $abs_image_filename is too long to be used with a loop device"; exit 1; }
-
-  echo -e "${green}Activating package $apex_package as $apex_name${nc}"
-
-  # Extract payload (ext4 image). As standard Android builds do not contain
-  # `unzip`, we use the one we built and sync'd to the chroot directory instead.
-  local payload_filename="apex_payload.img"
-  adb shell chroot "$ART_TEST_CHROOT" \
-    /system/bin/unzip -q "$apex_package_path" "$payload_filename" -d "$apex_image_dir"
-  # Rename the extracted payload to have its name match the APEX's name.
-  adb shell mv "$ART_TEST_CHROOT$apex_image_dir/$payload_filename" "$abs_image_filename"
-  # Check that the mount point is available.
-  adb shell mount | grep -q " on $abs_mount_point" && \
-    { echo "$abs_mount_point is already used as mount point"; exit 1; }
-  # Mount the ext4 image.
-  adb shell mkdir -p "$abs_mount_point"
-  adb shell mount -o loop,ro "$abs_image_filename" "$abs_mount_point"
-}
-
-# Activate the Android Runtime APEX.
-# Note: We use the Debug Runtime APEX (which is a superset of the Release Runtime APEX).
-activate_system_package com.android.runtime.debug.apex com.android.runtime
diff --git a/tools/parallel_run.py b/tools/parallel_run.py
new file mode 100755
index 0000000..0fc9ebd
--- /dev/null
+++ b/tools/parallel_run.py
@@ -0,0 +1,79 @@
+#!/usr/bin/python3
+#
+# Copyright 2019, The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Run a command using multiple cores in parallel. Stop when one exits zero and save the log from
+that run.
+"""
+
+import argparse
+import concurrent.futures
+import contextlib
+import itertools
+import os
+import os.path
+import shutil
+import subprocess
+import tempfile
+
+
+def run_one(cmd, tmpfile):
+  """Run the command and log result to tmpfile. Return both the file name and returncode."""
+  with open(tmpfile, "x") as fd:
+    return tmpfile, subprocess.run(cmd, stdout=fd).returncode
+
+def main():
+  parser = argparse.ArgumentParser(
+      description="""Run a command using multiple cores and save non-zero exit log
+
+      The cmd should print all output to stdout. Stderr is not captured."""
+  )
+  parser.add_argument("--jobs", "-j", type=int, help="max number of jobs. default 60", default=60)
+  parser.add_argument("cmd", help="command to run")
+  parser.add_argument("--out", type=str, help="where to put result", default="out_log")
+  args = parser.parse_args()
+  cnt = 0
+  found_fail = False
+  ids = itertools.count(0)
+  with tempfile.TemporaryDirectory() as td:
+    print("Temporary files in {}".format(td))
+    with concurrent.futures.ProcessPoolExecutor(args.jobs) as p:
+      fs = set()
+      while len(fs) != 0 or not found_fail:
+        if not found_fail:
+          for _, idx in zip(range(args.jobs - len(fs)), ids):
+            fs.add(p.submit(run_one, args.cmd, os.path.join(td, "run_log." + str(idx))))
+        ws = concurrent.futures.wait(fs, return_when=concurrent.futures.FIRST_COMPLETED)
+        fs = ws.not_done
+        done = list(map(lambda a: a.result(), ws.done))
+        cnt += len(done)
+        print("\r{} runs".format(cnt), end="")
+        failed = [d for d,r in done if r != 0]
+        succ = [d for d,r in done if r == 0]
+        for f in succ:
+          os.remove(f)
+        if len(failed) != 0:
+          if not found_fail:
+            found_fail = True
+            print("\rFailed at {} runs".format(cnt))
+            if len(failed) != 1:
+              for f,i in zip(failed, range(len(failed))):
+                shutil.copyfile(f, args.out+"."+str(i))
+            else:
+              shutil.copyfile(failed[0], args.out)
+
+if __name__ == '__main__':
+  main()
diff --git a/tools/public.libraries.buildbot.txt b/tools/public.libraries.buildbot.txt
index 9b171a2..e23cf2c 100644
--- a/tools/public.libraries.buildbot.txt
+++ b/tools/public.libraries.buildbot.txt
@@ -1,14 +1,6 @@
-libart.so
-libartd.so
-libartbase.so
-libartbased.so
-libdexfile.so
-libdexfiled.so
 libbacktrace.so
 libc.so
 libc++.so
 libdl.so
 libm.so
 libnativehelper.so
-libprofile.so
-libprofiled.so
diff --git a/tools/run-gtests.sh b/tools/run-gtests.sh
index 7360ddd..5a4ab3a 100755
--- a/tools/run-gtests.sh
+++ b/tools/run-gtests.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#! /bin/bash
 #
 # Copyright (C) 2019 The Android Open Source Project
 #
@@ -14,32 +14,55 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# Script to run all gtests located under $ART_TEST_CHROOT/data/nativetest{64}
+if [[ $1 = -h ]]; then
+  cat <<EOF
+Script to run gtests located in the ART (Testing) APEX.
 
-ADB="${ADB:-adb}"
-all_tests=()
+If called with arguments, only those tests are run, as specified by their
+absolute paths (starting with /apex). All gtests are run otherwise.
+EOF
+  exit
+fi
+
+if [[ -z "$ART_TEST_CHROOT" ]]; then
+  echo 'ART_TEST_CHROOT environment variable is empty; please set it before running this script.'
+  exit 1
+fi
+
+adb="${ADB:-adb}"
+
+android_i18n_root=/apex/com.android.i18n
+android_art_root=/apex/com.android.art
+android_tzdata_root=/apex/com.android.tzdata
+
+if [[ $1 = -j* ]]; then
+  # TODO(b/129930445): Implement support for parallel execution.
+  shift
+fi
+
+if [ $# -gt 0 ]; then
+  tests="$@"
+else
+  # Search for executables under the `bin/art` directory of the ART APEX.
+  tests=$("$adb" shell chroot "$ART_TEST_CHROOT" \
+    find "$android_art_root/bin/art" -type f -perm /ugo+x | sort)
+fi
+
 failing_tests=()
 
-function add_tests {
-  # Search for *_test and *_tests executables, but skip e.g. libfoo_test.so.
-  all_tests+=$(${ADB} shell "test -d $ART_TEST_CHROOT/$1 && chroot $ART_TEST_CHROOT find $1 -type f -perm /ugo+x -name \*_test\* \! -name \*.so")
-}
-
-function fail {
-  failing_tests+=($1)
-}
-
-add_tests "/data/nativetest"
-add_tests "/data/nativetest64"
-
-for i in $all_tests; do
-  echo $i
-  ${ADB} shell "chroot $ART_TEST_CHROOT env LD_LIBRARY_PATH= ANDROID_ROOT='/system' ANDROID_RUNTIME_ROOT=/system ANDROID_TZDATA_ROOT='/system' $i" || fail $i
+for t in $tests; do
+  echo "$t"
+  "$adb" shell chroot "$ART_TEST_CHROOT" \
+    env ANDROID_ART_ROOT="$android_art_root" \
+        ANDROID_I18N_ROOT="$android_i18n_root" \
+        ANDROID_TZDATA_ROOT="$android_tzdata_root" \
+        $t \
+    || failing_tests+=("$t")
 done
 
 if [ -n "$failing_tests" ]; then
-  for i in "${failing_tests[@]}"; do
-    echo "Failed test: $i"
+  for t in "${failing_tests[@]}"; do
+    echo "Failed test: $t"
   done
   exit 1
 fi
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index fbd8077..9ad9d66 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -19,16 +19,6 @@
   exit 1
 fi
 
-# Prevent JDWP tests from running on the following devices running
-# Android O (they are failing because of a network-related issue), as
-# a workaround for b/74725685:
-# - FA7BN1A04406 (walleye device testing configuration aosp-poison/volantis-armv7-poison-debug)
-# - FA7BN1A04412 (walleye device testing configuration aosp-poison/volantis-armv8-poison-ndebug)
-# - FA7BN1A04433 (walleye device testing configuration aosp-poison/volantis-armv8-poison-debug)
-case "$ANDROID_SERIAL" in
-  (FA7BN1A04406|FA7BN1A04412|FA7BN1A04433) exit 0;;
-esac
-
 source build/envsetup.sh >&/dev/null # for get_build_var, setpaths
 setpaths # include platform prebuilt java, javac, etc in $PATH.
 
@@ -58,7 +48,7 @@
 # Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
 # because that's what we use for compiling the core.art image.
 # It may contain additional modules from TEST_CORE_JARS.
-BOOT_CLASSPATH_JARS="core-oj core-libart okhttp bouncycastle apache-xml conscrypt"
+BOOT_CLASSPATH_JARS="core-oj core-libart core-icu4j okhttp bouncycastle apache-xml conscrypt"
 
 vm_args=""
 art="$android_root/bin/art"
@@ -90,6 +80,12 @@
 instant_jit=false
 variant_cmdline_parameter="--variant=X32"
 dump_command="/bin/true"
+called_from_libjdwp=${RUN_JDWP_TESTS_CALLED_FROM_LIBJDWP:-false}
+run_internal_jdwp_test=false
+# Let LUCI bots do what they want.
+if test -v LUCI_CONTEXT; then
+  run_internal_jdwp_test=true
+fi
 # Timeout of JDWP test in ms.
 #
 # Note: some tests expect a timeout to check that *no* reply/event is received for a specific case.
@@ -97,6 +93,7 @@
 # continuous testing. This value can be adjusted to fit the configuration of the host machine(s).
 jdwp_test_timeout=10000
 
+skip_tests=
 gdb_target=
 has_gdb="no"
 
@@ -132,7 +129,8 @@
     shift
   elif [[ "$1" == "--mode=jvm" ]]; then
     mode="ri"
-    make_target_name="apache-harmony-jdwp-tests-host"
+    make_target_name="apache-harmony-jdwp-tests"
+    run_internal_jdwp_test=true
     art="$(which java)"
     art_debugee="$(which java)"
     # No need for extra args.
@@ -148,6 +146,19 @@
     # We don't care about jit with the RI
     use_jit=false
     shift
+  elif [[ $1 == --skip-test ]]; then
+    skip_tests="${skip_tests},${2}"
+    # remove the --skip-test
+    args=${args/$1}
+    shift
+    # remove the arg
+    args=${args/$1}
+    shift
+  elif [[ $1 == --force-run-test ]]; then
+    run_internal_jdwp_test=true
+    # remove the --force-run-test
+    args=${args/$1}
+    shift
   elif [[ $1 == --test-timeout-ms ]]; then
     # Remove the --test-timeout-ms from the arguments.
     args=${args/$1}
@@ -234,6 +245,11 @@
   fi
 done
 
+if [ ! -t 1 ] ; then
+  # Suppress color codes if not attached to a terminal
+  args="$args --no-color"
+fi
+
 if [[ $mode == "target" ]]; then
   # Honor environment variable ART_TEST_CHROOT.
   if [[ -n "$ART_TEST_CHROOT" ]]; then
@@ -247,6 +263,16 @@
   fi
 fi
 
+if [[ $called_from_libjdwp != "true" ]]; then
+  if [[ $run_internal_jdwp_test = "false" ]]; then
+    echo "Calling run_jdwp_tests.sh directly is probably not what you want. You probably want to"
+    echo "run ./art/tools/run-libjdwp-tests.sh instead in order to test the JDWP implementation"
+    echo "used by apps. If you really wish to run these tests using the deprecated internal JDWP"
+    echo "implementation pass the '--force-run-test' flag."
+    exit 1
+  fi
+fi
+
 if [[ $has_gdb = "yes" ]]; then
   if [[ $explicit_debug = "no" ]]; then
     debug="yes"
@@ -316,7 +342,7 @@
 
 if [[ ! -f $test_jar ]]; then
   echo "Before running, you must build jdwp tests and vogar:" \
-       "make ${make_target_name} vogar"
+       "m ${make_target_name} vogar"
   exit 1
 fi
 
@@ -410,6 +436,7 @@
       --vm-arg -Djpda.settings.transportAddress=127.0.0.1:55107 \
       --vm-arg -Djpda.settings.dumpProcess="$dump_command" \
       --vm-arg -Djpda.settings.debuggeeJavaPath="$art_debugee $plugin $debuggee_args" \
+      --vm-arg -Djpda.settings.badTestCases="$skip_tests" \
       --classpath "$test_jar" \
       $toolchain_args \
       $test
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index 735549e..726b12d 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -14,20 +14,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+# Exit on errors.
+set -e
+
 if [ ! -d libcore ]; then
   echo "Script needs to be run at the root of the android tree"
   exit 1
 fi
 
-source build/envsetup.sh >&/dev/null # for get_build_var, setpaths
-setpaths # include platform prebuilt java, javac, etc in $PATH.
-
-if [ -z "$ANDROID_PRODUCT_OUT" ] ; then
-  JAVA_LIBRARIES=out/target/common/obj/JAVA_LIBRARIES
-else
-  JAVA_LIBRARIES=${ANDROID_PRODUCT_OUT}/../../common/obj/JAVA_LIBRARIES
-fi
-
 # "Root" (actually "system") directory on device (in the case of
 # target testing).
 android_root=${ART_TEST_ANDROID_ROOT:-/system}
@@ -35,8 +29,12 @@
 function classes_jar_path {
   local var="$1"
   local suffix="jar"
-
-  echo "${JAVA_LIBRARIES}/${var}_intermediates/classes.${suffix}"
+  if [ -z "$ANDROID_PRODUCT_OUT" ] ; then
+    local java_libraries=out/target/common/obj/JAVA_LIBRARIES
+  else
+    local java_libraries=${ANDROID_PRODUCT_OUT}/../../common/obj/JAVA_LIBRARIES
+  fi
+  echo "${java_libraries}/${var}_intermediates/classes.${suffix}"
 }
 
 function cparg {
@@ -57,45 +55,70 @@
   done
 }
 
-# Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
-# because that's what we use for compiling the core.art image.
-# It may contain additional modules from TEST_CORE_JARS.
-BOOT_CLASSPATH_JARS="core-oj core-libart okhttp bouncycastle apache-xml conscrypt"
+function usage {
+  local me=$(basename "${BASH_SOURCE[0]}")
+  (
+    cat << EOF
+  Usage: ${me} --mode=<mode> [options] [-- <package_to_test> ...]
 
-DEPS="core-tests jsr166-tests mockito-target"
+  Run libcore tests using the vogar testing tool.
 
-for lib in $DEPS
-do
-  if [[ ! -f "$(classes_jar_path "$lib")" ]]; then
-    echo "${lib} is missing. Before running, you must run art/tools/buildbot-build.sh"
-    exit 1
-  fi
-done
+  Required parameters:
+    --mode=device|host|jvm Specify where tests should be run.
 
-expectations="--expectations art/tools/libcore_failures.txt"
+  Optional parameters:
+    --debug                Use debug version of ART (device|host only).
+    --dry-run              Print vogar command-line, but do not run.
+    --no-getrandom         Ignore failures from getrandom() (for kernel < 3.17).
+    --no-jit               Disable JIT (device|host only).
+    --Xgc:gcstress         Enable GC stress configuration (device|host only).
 
-emulator="no"
-if [ "$ANDROID_SERIAL" = "emulator-5554" ]; then
-  emulator="yes"
-fi
+  The script passes unrecognized options to the command-line created for vogar.
 
-# Use JIT compiling by default.
-use_jit=true
+  The script runs a hardcoded list of libcore test packages by default. The user
+  may run a subset of packages by appending '--' followed by a list of package
+  names.
+
+  Examples:
+
+    1. Run full test suite on host:
+      ${me} --mode=host
+
+    2. Run full test suite on device:
+      ${me} --mode=device
+
+    3. Run tests only from the libcore.java.lang package on device:
+      ${me} --mode=device -- libcore.java.lang
+EOF
+  ) | sed -e 's/^  //' >&2 # Strip leading whitespace from heredoc.
+}
 
 # Packages that currently work correctly with the expectation files.
-working_packages=("libcore.dalvik.system"
+working_packages=("libcore.android.system"
+                  "libcore.build"
+                  "libcore.dalvik.system"
+                  "libcore.java.awt"
                   "libcore.java.lang"
                   "libcore.java.math"
                   "libcore.java.text"
                   "libcore.java.util"
                   "libcore.javax.crypto"
+                  "libcore.javax.net"
                   "libcore.javax.security"
                   "libcore.javax.sql"
                   "libcore.javax.xml"
+                  "libcore.libcore.internal"
                   "libcore.libcore.io"
                   "libcore.libcore.net"
                   "libcore.libcore.reflect"
                   "libcore.libcore.util"
+                  "libcore.libcore.timezone"
+                  "libcore.sun.invoke"
+                  "libcore.sun.net"
+                  "libcore.sun.misc"
+                  "libcore.sun.security"
+                  "libcore.sun.util"
+                  "libcore.xml"
                   "org.apache.harmony.annotation"
                   "org.apache.harmony.crypto"
                   "org.apache.harmony.luni"
@@ -115,51 +138,113 @@
 # changes in case of failures.
 # "org.apache.harmony.security"
 
-vogar_args=$@
-gcstress=false
-debug=false
+#
+# Setup environment for running tests.
+#
+source build/envsetup.sh >&/dev/null # for get_build_var, setpaths
+setpaths # include platform prebuilt java, javac, etc in $PATH.
 
-# Don't use device mode by default.
-device_mode=false
+# Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
+# because that's what we use for compiling the core.art image.
+# It may contain additional modules from TEST_CORE_JARS.
+BOOT_CLASSPATH_JARS="core-oj core-libart core-icu4j okhttp bouncycastle apache-xml conscrypt"
 
-while true; do
-  if [[ "$1" == "--mode=device" ]]; then
-    device_mode=true
-    # Remove the --mode=device from the arguments and replace it with --mode=device_testdex
-    vogar_args=${vogar_args/$1}
-    vogar_args="$vogar_args --mode=device_testdex"
-    vogar_args="$vogar_args --vm-arg -Ximage:/data/art-test/core.art"
-    vogar_args="$vogar_args $(boot_classpath_arg /system/framework -testdex $BOOT_CLASSPATH_JARS)"
-    shift
-  elif [[ "$1" == "--mode=host" ]]; then
-    # We explicitly give a wrong path for the image, to ensure vogar
-    # will create a boot image with the default compiler. Note that
-    # giving an existing image on host does not work because of
-    # classpath/resources differences when compiling the boot image.
-    vogar_args="$vogar_args --vm-arg -Ximage:/non/existent/vogar.art"
-    shift
-  elif [[ "$1" == "--no-jit" ]]; then
-    # Remove the --no-jit from the arguments.
-    vogar_args=${vogar_args/$1}
-    use_jit=false
-    shift
-  elif [[ "$1" == "--debug" ]]; then
-    # Remove the --debug from the arguments.
-    vogar_args=${vogar_args/$1}
-    vogar_args="$vogar_args --vm-arg -XXlib:libartd.so --vm-arg -XX:SlowDebug=true"
-    debug=true
-    shift
-  elif [[ "$1" == "-Xgc:gcstress" ]]; then
-    gcstress=true
-    shift
-  elif [[ "$1" == "" ]]; then
-    break
-  else
-    shift
+DEPS="core-tests jsr166-tests mockito-target"
+
+for lib in $DEPS
+do
+  if [[ ! -f "$(classes_jar_path "$lib")" ]]; then
+    echo "${lib} is missing. Before running, you must run art/tools/buildbot-build.sh"
+    exit 1
   fi
 done
 
-if $device_mode; then
+#
+# Defaults affected by command-line parsing
+#
+
+# Use JIT compiling by default.
+use_jit=true
+
+gcstress=false
+debug=false
+dry_run=false
+
+# Run tests that use the getrandom() syscall? (Requires Linux 3.17+).
+getrandom=true
+
+# Execution mode specifies where to run tests (device|host|jvm).
+execution_mode=""
+
+# Default expectations file.
+expectations="--expectations art/tools/libcore_failures.txt"
+
+vogar_args=""
+while [ -n "$1" ]; do
+  case "$1" in
+    --mode=device)
+      # Use --mode=device_testdex not --mode=device for buildbot-build.sh.
+      # See commit 191cae33c7c24e for more details.
+      vogar_args="$vogar_args --mode=device_testdex"
+      vogar_args="$vogar_args --vm-arg -Ximage:/data/art-test/core.art"
+      vogar_args="$vogar_args $(boot_classpath_arg /system/framework -testdex $BOOT_CLASSPATH_JARS)"
+      execution_mode="device"
+      ;;
+    --mode=host)
+      # We explicitly give a wrong path for the image, to ensure vogar
+      # will create a boot image with the default compiler. Note that
+      # giving an existing image on host does not work because of
+      # classpath/resources differences when compiling the boot image.
+      vogar_args="$vogar_args $1 --vm-arg -Ximage:/non/existent/vogar.art"
+      execution_mode="host"
+      ;;
+    --mode=jvm)
+      vogar_args="$vogar_args $1"
+      execution_mode="jvm"
+      ;;
+    --no-getrandom)
+      getrandom=false
+      ;;
+    --no-jit)
+      use_jit=false
+      ;;
+    --debug)
+      vogar_args="$vogar_args --vm-arg -XXlib:libartd.so --vm-arg -XX:SlowDebug=true"
+      debug=true
+      ;;
+    -Xgc:gcstress)
+      vogar_args="$vogar_args $1"
+      gcstress=true
+      ;;
+    --dry-run)
+      dry_run=true
+      ;;
+    --)
+      shift
+      # Assume remaining elements are packages to test.
+      user_packages=("$@")
+      break
+      ;;
+    --help)
+      usage
+      exit 1
+      ;;
+    *)
+      vogar_args="$vogar_args $1"
+      ;;
+  esac
+  shift
+done
+
+if [ -z "$execution_mode" ]; then
+  usage
+  exit 1
+fi
+
+# Default timeout, gets overridden on device under gcstress.
+timeout_secs=480
+
+if [ $execution_mode = "device" ]; then
   # Honor environment variable ART_TEST_CHROOT.
   if [[ -n "$ART_TEST_CHROOT" ]]; then
     # Set Vogar's `--chroot` option.
@@ -171,46 +256,62 @@
     vogar_args="$vogar_args --device-dir=/data/local/tmp"
   fi
   vogar_args="$vogar_args --vm-command=$android_root/bin/art"
-fi
 
-# Increase the timeout, as vogar cannot set individual test
-# timeout when being asked to run packages, and some tests go above
-# the default timeout.
-if $gcstress && $debug && $device_mode; then
-  vogar_args="$vogar_args --timeout 1440"
-else
-  vogar_args="$vogar_args --timeout 480"
-fi
-
-# set the toolchain to use.
-vogar_args="$vogar_args --toolchain d8 --language CUR"
-
-# JIT settings.
-if $use_jit; then
-  vogar_args="$vogar_args --vm-arg -Xcompiler-option --vm-arg --compiler-filter=quicken"
-fi
-vogar_args="$vogar_args --vm-arg -Xusejit:$use_jit"
-
-# gcstress may lead to timeouts, so we need dedicated expectations files for it.
-if $gcstress; then
-  expectations="$expectations --expectations art/tools/libcore_gcstress_failures.txt"
-  if $debug; then
-    expectations="$expectations --expectations art/tools/libcore_gcstress_debug_failures.txt"
+  # Increase the timeout, as vogar cannot set individual test
+  # timeout when being asked to run packages, and some tests go above
+  # the default timeout.
+  if $gcstress; then
+    if $debug; then
+      timeout_secs=1440
+    else
+      timeout_secs=900
+    fi
   fi
-else
-  # We only run this package when not under gcstress as it can cause timeouts. See b/78228743.
-  working_packages+=("libcore.libcore.icu")
+fi  # $execution_mode = "device"
+
+if [ $execution_mode = "device" -o $execution_mode = "host" ]; then
+  # Add timeout to vogar command-line.
+  vogar_args="$vogar_args --timeout $timeout_secs"
+
+  # set the toolchain to use.
+  vogar_args="$vogar_args --toolchain d8 --language CUR"
+
+  # JIT settings.
+  if $use_jit; then
+    vogar_args="$vogar_args --vm-arg -Xcompiler-option --vm-arg --compiler-filter=quicken"
+  fi
+  vogar_args="$vogar_args --vm-arg -Xusejit:$use_jit"
+
+  # gcstress may lead to timeouts, so we need dedicated expectations files for it.
+  if $gcstress; then
+    expectations="$expectations --expectations art/tools/libcore_gcstress_failures.txt"
+    if $debug; then
+      expectations="$expectations --expectations art/tools/libcore_gcstress_debug_failures.txt"
+    fi
+  else
+    # We only run this package when user has not specified packages
+    # to run and not under gcstress as it can cause timeouts. See
+    # b/78228743.
+    working_packages+=("libcore.libcore.icu")
+  fi
+
+  if $getrandom; then :; else
+    # Ignore failures in tests that use the system calls not supported
+    # on fugu (Nexus Player, kernel version Linux 3.10).
+    expectations="$expectations --expectations art/tools/libcore_fugu_failures.txt"
+  fi
 fi
 
-# Disable network-related libcore tests that are failing on the following
-# devices running Android O, as a workaround for b/74725685:
-# - FA7BN1A04406 (walleye device testing configuration aosp-poison/volantis-armv7-poison-debug)
-# - FA7BN1A04412 (walleye device testing configuration aosp-poison/volantis-armv8-poison-ndebug)
-# - FA7BN1A04433 (walleye device testing configuration aosp-poison/volantis-armv8-poison-debug)
-case "$ANDROID_SERIAL" in
-  (FA7BN1A04406|FA7BN1A04412|FA7BN1A04433)
-    expectations="$expectations --expectations art/tools/libcore_network_failures.txt";;
-esac
+if [ ! -t 1 ] ; then
+  # Suppress color codes if not attached to a terminal
+  vogar_args="$vogar_args --no-color"
+fi
+
+# Override working_packages if user provided specific packages to
+# test.
+if [[ ${#user_packages[@]} != 0 ]] ; then
+  working_packages=("${user_packages[@]}")
+fi
 
 # Run the tests using vogar.
 echo "Running tests for the following test packages:"
@@ -218,4 +319,4 @@
 
 cmd="vogar $vogar_args $expectations $(cparg $DEPS) ${working_packages[@]}"
 echo "Running $cmd"
-eval $cmd
+$dry_run || eval $cmd
diff --git a/tools/run-libjdwp-tests.sh b/tools/run-libjdwp-tests.sh
index 0bea6a5..b816aab 100755
--- a/tools/run-libjdwp-tests.sh
+++ b/tools/run-libjdwp-tests.sh
@@ -24,6 +24,17 @@
   exit 2
 fi
 
+# See b/141907697. These tests all crash on both the RI and ART when using the libjdwp agent JDWP
+# implementation. To avoid them cluttering the log on the buildbot we explicitly skip them. This
+# list should not be added to.
+declare -a known_bad_tests=(
+  'org.apache.harmony.jpda.tests.jdwp.ClassType_NewInstanceTest#testNewInstance002'
+  'org.apache.harmony.jpda.tests.jdwp.ObjectReference_GetValues002Test#testGetValues002'
+  'org.apache.harmony.jpda.tests.jdwp.ObjectReference_SetValuesTest#testSetValues001'
+  'org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference_NameTest#testName001_NullObject'
+  'org.apache.harmony.jpda.tests.jdwp.ThreadGroupReference_ParentTest#testParent_NullObject'
+)
+
 declare -a args=("$@")
 debug="no"
 has_variant="no"
@@ -58,6 +69,10 @@
     verbose_level=0xFFF
     unset args[arg_idx]
     shift
+  elif [[ $1 == --no-skips ]]; then
+    declare -a known_bad_tests=()
+    unset args[arg_idx]
+    shift
   elif [[ $1 == --verbose ]]; then
     has_verbose="yes"
     shift
@@ -118,8 +133,16 @@
   env "$@"
 }
 
+for skip in "${known_bad_tests[@]}"; do
+  args+=("--skip-test" "$skip")
+done
+
+# Tell run-jdwp-tests.sh it was called from run-libjdwp-tests.sh
+export RUN_JDWP_TESTS_CALLED_FROM_LIBJDWP=true
+
 verbose_run ./art/tools/run-jdwp-tests.sh \
             "${args[@]}"                  \
             --jdwp-path "libjdwp.so"      \
             --vm-arg -Djpda.settings.debuggeeAgentExtraOptions=coredump=y \
+            --vm-arg -Djpda.settings.testSuiteType=libjdwp \
             --expectations "$expect_path"
diff --git a/tools/setup-buildbot-device.sh b/tools/setup-buildbot-device.sh
deleted file mode 100755
index 1359092..0000000
--- a/tools/setup-buildbot-device.sh
+++ /dev/null
@@ -1,181 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# The work does by this script is (mostly) undone by tools/teardown-buildbot-device.sh.
-# Make sure to keep these files in sync.
-
-green='\033[0;32m'
-nc='\033[0m'
-
-if [ "$1" = --verbose ]; then
-  verbose=true
-else
-  verbose=false
-fi
-
-# Setup as root, as some actions performed here require it.
-adb root
-adb wait-for-device
-
-echo -e "${green}Date on host${nc}"
-date
-
-echo -e "${green}Date on device${nc}"
-adb shell date
-
-host_seconds_since_epoch=$(date -u +%s)
-device_seconds_since_epoch=$(adb shell date -u +%s)
-
-abs_time_difference_in_seconds=$(expr $host_seconds_since_epoch - $device_seconds_since_epoch)
-if [ $abs_time_difference_in_seconds -lt 0 ]; then
-  abs_time_difference_in_seconds=$(expr 0 - $abs_time_difference_in_seconds)
-fi
-
-seconds_per_hour=3600
-
-# Kill logd first, so that when we set the adb buffer size later in this file,
-# it is brought up again.
-echo -e "${green}Killing logd, seen leaking on fugu/N${nc}"
-adb shell pkill -9 -U logd logd && echo -e "${green}...logd killed${nc}"
-
-# Update date on device if the difference with host is more than one hour.
-if [ $abs_time_difference_in_seconds -gt $seconds_per_hour ]; then
-  echo -e "${green}Update date on device${nc}"
-  adb shell date -u @$host_seconds_since_epoch
-fi
-
-echo -e "${green}Turn off selinux${nc}"
-adb shell setenforce 0
-$verbose && adb shell getenforce
-
-echo -e "${green}Setting local loopback${nc}"
-adb shell ifconfig lo up
-$verbose && adb shell ifconfig
-
-# Ensure netd is running, as otherwise the logcat would be spammed
-# with the following messages on devices running Android O:
-#
-#   E NetdConnector: Communications error: java.io.IOException: No such file or directory
-#   E mDnsConnector: Communications error: java.io.IOException: No such file or directory
-#
-# Netd was initially disabled as an attempt to solve issues with
-# network-related libcore and JDWP tests failing on devices running
-# Android O (MR1) (see b/74725685). These tests are currently
-# disabled. When a better solution has been found, we should remove
-# the following lines.
-echo -e "${green}Turning on netd${nc}"
-adb shell start netd
-$verbose && adb shell getprop init.svc.netd
-
-if $verbose; then
-  echo -e "${green}List properties${nc}"
-  adb shell getprop
-
-  echo -e "${green}Uptime${nc}"
-  adb shell uptime
-
-  echo -e "${green}Battery info${nc}"
-  adb shell dumpsys battery
-fi
-
-# Fugu only handles buffer size up to 16MB.
-product_name=$(adb shell getprop ro.build.product)
-
-if [ "x$product_name" = xfugu ]; then
-  buffer_size=16MB
-else
-  buffer_size=32MB
-fi
-
-echo -e "${green}Setting adb buffer size to ${buffer_size}${nc}"
-adb logcat -G ${buffer_size}
-$verbose && adb logcat -g
-
-echo -e "${green}Removing adb spam filter${nc}"
-adb logcat -P ""
-$verbose && adb logcat -p
-
-echo -e "${green}Kill stalled dalvikvm processes${nc}"
-# 'ps' on M can sometimes hang.
-timeout 2s adb shell "ps" >/dev/null
-if [ $? = 124 ]; then
-  echo -e "${green}Rebooting device to fix 'ps'${nc}"
-  adb reboot
-  adb wait-for-device root
-else
-  processes=$(adb shell "ps" | grep dalvikvm | awk '{print $2}')
-  for i in $processes; do adb shell kill -9 $i; done
-fi
-
-if [[ -n "$ART_TEST_CHROOT" ]]; then
-  # Prepare the chroot dir.
-  echo -e "${green}Prepare the chroot dir in $ART_TEST_CHROOT${nc}"
-
-  # Check that ART_TEST_CHROOT is correctly defined.
-  [[ "x$ART_TEST_CHROOT" = x/* ]] || { echo "$ART_TEST_CHROOT is not an absolute path"; exit 1; }
-
-  # Create chroot.
-  adb shell mkdir -p "$ART_TEST_CHROOT"
-
-  # Provide property_contexts file(s) in chroot.
-  # This is required to have Android system properties work from the chroot.
-  # Notes:
-  # - In Android N, only '/property_contexts' is expected.
-  # - In Android O, property_context files are expected under /system and /vendor.
-  # (See bionic/libc/bionic/system_properties.cpp for more information.)
-  property_context_files="/property_contexts \
-    /system/etc/selinux/plat_property_contexts \
-    /vendor/etc/selinux/nonplat_property_context \
-    /plat_property_contexts \
-    /nonplat_property_contexts"
-  for f in $property_context_files; do
-    adb shell test -f "$f" \
-      "&&" mkdir -p "$ART_TEST_CHROOT$(dirname $f)" \
-      "&&" cp -f "$f" "$ART_TEST_CHROOT$f"
-  done
-
-  # Create directories required for ART testing in chroot.
-  adb shell mkdir -p "$ART_TEST_CHROOT/tmp"
-  adb shell mkdir -p "$ART_TEST_CHROOT/data/dalvik-cache"
-  adb shell mkdir -p "$ART_TEST_CHROOT/data/local/tmp"
-
-  # Populate /etc in chroot with required files.
-  adb shell mkdir -p "$ART_TEST_CHROOT/system/etc"
-  adb shell "cd $ART_TEST_CHROOT && ln -s system/etc etc"
-
-  # Provide /proc in chroot.
-  adb shell mkdir -p "$ART_TEST_CHROOT/proc"
-  adb shell mount | grep -q "^proc on $ART_TEST_CHROOT/proc type proc " \
-    || adb shell mount -t proc proc "$ART_TEST_CHROOT/proc"
-
-  # Provide /sys in chroot.
-  adb shell mkdir -p "$ART_TEST_CHROOT/sys"
-  adb shell mount | grep -q "^sysfs on $ART_TEST_CHROOT/sys type sysfs " \
-    || adb shell mount -t sysfs sysfs "$ART_TEST_CHROOT/sys"
-  # Provide /sys/kernel/debug in chroot.
-  adb shell mount | grep -q "^debugfs on $ART_TEST_CHROOT/sys/kernel/debug type debugfs " \
-    || adb shell mount -t debugfs debugfs "$ART_TEST_CHROOT/sys/kernel/debug"
-
-  # Provide /dev in chroot.
-  adb shell mkdir -p "$ART_TEST_CHROOT/dev"
-  adb shell mount | grep -q "^tmpfs on $ART_TEST_CHROOT/dev type tmpfs " \
-    || adb shell mount -o bind /dev "$ART_TEST_CHROOT/dev"
-
-  # Create /apex tmpfs in chroot.
-  adb shell mkdir -p "$ART_TEST_CHROOT/apex"
-  adb shell mount | grep -q "^tmpfs on $ART_TEST_CHROOT/apex type tmpfs " \
-    || adb shell mount -t tmpfs -o nodev,noexec,nosuid tmpfs "$ART_TEST_CHROOT/apex"
-fi
diff --git a/tools/signal_dumper/Android.bp b/tools/signal_dumper/Android.bp
new file mode 100644
index 0000000..e727f9f
--- /dev/null
+++ b/tools/signal_dumper/Android.bp
@@ -0,0 +1,68 @@
+//
+// Copyright (C) 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+cc_defaults {
+    name: "signal_dumper_libbase_static_deps",
+    static_libs: ["liblog"],
+}
+
+cc_defaults {
+    name: "signal_dumper_libunwindstack_static_deps",
+    defaults: ["signal_dumper_libbase_static_deps"],
+    static_libs: [
+        "libbase",
+        "libdexfile_support_static",
+        "liblog",
+        "liblzma",
+    ],
+}
+
+cc_defaults {
+    name: "signal_dumper_libbacktrace_static_deps",
+    defaults: [
+        "signal_dumper_libbase_static_deps",
+        "signal_dumper_libunwindstack_static_deps",
+    ],
+    static_libs: [
+        "libbase",
+        "libunwindstack",
+    ],
+}
+
+art_cc_binary {
+    name: "signal_dumper",
+
+    host_supported: true,
+    target: {
+        darwin: {
+            enabled: false,
+        },
+    },
+    device_supported: true,
+
+    defaults: [
+        "art_defaults",
+        "signal_dumper_libbacktrace_static_deps",
+        "signal_dumper_libbase_static_deps",
+    ],
+
+    srcs: ["signal_dumper.cc"],
+
+    static_libs: [
+        "libbacktrace",
+        "libbase",
+    ],
+}
diff --git a/tools/signal_dumper/signal_dumper.cc b/tools/signal_dumper/signal_dumper.cc
new file mode 100644
index 0000000..e9a589e
--- /dev/null
+++ b/tools/signal_dumper/signal_dumper.cc
@@ -0,0 +1,726 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <dirent.h>
+#include <poll.h>
+#include <sys/prctl.h>
+#include <sys/ptrace.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include <csignal>
+#include <cstdlib>
+#include <cstring>
+#include <iostream>
+#include <thread>
+#include <memory>
+#include <set>
+#include <string>
+
+#include <android-base/file.h>
+#include <android-base/logging.h>
+#include <android-base/macros.h>
+#include <android-base/parseint.h>
+#include <android-base/stringprintf.h>
+#include <android-base/strings.h>
+#include <android-base/unique_fd.h>
+#include <backtrace/Backtrace.h>
+#include <backtrace/BacktraceMap.h>
+
+namespace art {
+namespace {
+
+using android::base::StringPrintf;
+using android::base::unique_fd;
+
+constexpr bool kUseAddr2line = true;
+
+namespace timeout_signal {
+
+class SignalSet {
+ public:
+  SignalSet() {
+    if (sigemptyset(&set_) == -1) {
+      PLOG(FATAL) << "sigemptyset failed";
+    }
+  }
+
+  void Add(int signal) {
+    if (sigaddset(&set_, signal) == -1) {
+      PLOG(FATAL) << "sigaddset " << signal << " failed";
+    }
+  }
+
+  void Block() {
+    if (pthread_sigmask(SIG_BLOCK, &set_, nullptr) != 0) {
+      PLOG(FATAL) << "pthread_sigmask failed";
+    }
+  }
+
+  int Wait() {
+    // Sleep in sigwait() until a signal arrives. gdb causes EINTR failures.
+    int signal_number;
+    int rc = TEMP_FAILURE_RETRY(sigwait(&set_, &signal_number));
+    if (rc != 0) {
+      PLOG(FATAL) << "sigwait failed";
+    }
+    return signal_number;
+  }
+
+ private:
+  sigset_t set_;
+};
+
+}  // namespace timeout_signal
+
+namespace addr2line {
+
+constexpr const char* kAddr2linePath =
+    "/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.17-4.8/bin/x86_64-linux-addr2line";
+
+std::unique_ptr<std::string> FindAddr2line() {
+  const char* env_value = getenv("ANDROID_BUILD_TOP");
+  if (env_value != nullptr) {
+    std::string path = std::string(env_value) + kAddr2linePath;
+    if (access(path.c_str(), X_OK) == 0) {
+      return std::make_unique<std::string>(path);
+    }
+  }
+
+  {
+    std::string path = std::string(".") + kAddr2linePath;
+    if (access(path.c_str(), X_OK) == 0) {
+      return std::make_unique<std::string>(path);
+    }
+  }
+
+  {
+    using android::base::Dirname;
+
+    std::string exec_dir = android::base::GetExecutableDirectory();
+    std::string derived_top = Dirname(Dirname(Dirname(Dirname(exec_dir))));
+    std::string path = derived_top + kAddr2linePath;
+    if (access(path.c_str(), X_OK) == 0) {
+      return std::make_unique<std::string>(path);
+    }
+  }
+
+  constexpr const char* kHostAddr2line = "/usr/bin/addr2line";
+  if (access(kHostAddr2line, F_OK) == 0) {
+    return std::make_unique<std::string>(kHostAddr2line);
+  }
+
+  return nullptr;
+}
+
+// The state of an open pipe to addr2line. In "server" mode, addr2line takes input on stdin
+// and prints the result to stdout. This struct keeps the state of the open connection.
+struct Addr2linePipe {
+  Addr2linePipe(int in_fd, int out_fd, const std::string& file_name, pid_t pid)
+      : in(in_fd), out(out_fd), file(file_name), child_pid(pid), odd(true) {}
+
+  ~Addr2linePipe() {
+    kill(child_pid, SIGKILL);
+  }
+
+  unique_fd in;      // The file descriptor that is connected to the output of addr2line.
+  unique_fd out;     // The file descriptor that is connected to the input of addr2line.
+
+  const std::string file;     // The file addr2line is working on, so that we know when to close
+                              // and restart.
+  const pid_t child_pid;      // The pid of the child, which we should kill when we're done.
+  bool odd;                   // Print state for indentation of lines.
+};
+
+std::unique_ptr<Addr2linePipe> Connect(const std::string& name, const char* args[]) {
+  int caller_to_addr2line[2];
+  int addr2line_to_caller[2];
+
+  if (pipe(caller_to_addr2line) == -1) {
+    return nullptr;
+  }
+  if (pipe(addr2line_to_caller) == -1) {
+    close(caller_to_addr2line[0]);
+    close(caller_to_addr2line[1]);
+    return nullptr;
+  }
+
+  pid_t pid = fork();
+  if (pid == -1) {
+    close(caller_to_addr2line[0]);
+    close(caller_to_addr2line[1]);
+    close(addr2line_to_caller[0]);
+    close(addr2line_to_caller[1]);
+    return nullptr;
+  }
+
+  if (pid == 0) {
+    dup2(caller_to_addr2line[0], STDIN_FILENO);
+    dup2(addr2line_to_caller[1], STDOUT_FILENO);
+
+    close(caller_to_addr2line[0]);
+    close(caller_to_addr2line[1]);
+    close(addr2line_to_caller[0]);
+    close(addr2line_to_caller[1]);
+
+    execv(args[0], const_cast<char* const*>(args));
+    exit(1);
+  } else {
+    close(caller_to_addr2line[0]);
+    close(addr2line_to_caller[1]);
+    return std::make_unique<Addr2linePipe>(addr2line_to_caller[0],
+                                           caller_to_addr2line[1],
+                                           name,
+                                           pid);
+  }
+}
+
+void WritePrefix(std::ostream& os, const char* prefix, bool odd) {
+  if (prefix != nullptr) {
+    os << prefix;
+  }
+  os << "  ";
+  if (!odd) {
+    os << " ";
+  }
+}
+
+void Drain(size_t expected,
+           const char* prefix,
+           std::unique_ptr<Addr2linePipe>* pipe /* inout */,
+           std::ostream& os) {
+  DCHECK(pipe != nullptr);
+  DCHECK(pipe->get() != nullptr);
+  int in = pipe->get()->in.get();
+  DCHECK_GE(in, 0);
+
+  bool prefix_written = false;
+
+  for (;;) {
+    constexpr uint32_t kWaitTimeExpectedMilli = 500;
+    constexpr uint32_t kWaitTimeUnexpectedMilli = 50;
+
+    int timeout = expected > 0 ? kWaitTimeExpectedMilli : kWaitTimeUnexpectedMilli;
+    struct pollfd read_fd{in, POLLIN, 0};
+    int retval = TEMP_FAILURE_RETRY(poll(&read_fd, 1, timeout));
+    if (retval == -1) {
+      // An error occurred.
+      pipe->reset();
+      return;
+    }
+
+    if (retval == 0) {
+      // Timeout.
+      return;
+    }
+
+    if (!(read_fd.revents & POLLIN)) {
+      // addr2line call exited.
+      pipe->reset();
+      return;
+    }
+
+    constexpr size_t kMaxBuffer = 128;  // Relatively small buffer. Should be OK as we're on an
+    // alt stack, but just to be sure...
+    char buffer[kMaxBuffer];
+    memset(buffer, 0, kMaxBuffer);
+    int bytes_read = TEMP_FAILURE_RETRY(read(in, buffer, kMaxBuffer - 1));
+    if (bytes_read <= 0) {
+      // This should not really happen...
+      pipe->reset();
+      return;
+    }
+    buffer[bytes_read] = '\0';
+
+    char* tmp = buffer;
+    while (*tmp != 0) {
+      if (!prefix_written) {
+        WritePrefix(os, prefix, (*pipe)->odd);
+        prefix_written = true;
+      }
+      char* new_line = strchr(tmp, '\n');
+      if (new_line == nullptr) {
+        os << tmp;
+
+        break;
+      } else {
+        os << std::string(tmp, new_line - tmp + 1);
+
+        tmp = new_line + 1;
+        prefix_written = false;
+        (*pipe)->odd = !(*pipe)->odd;
+
+        if (expected > 0) {
+          expected--;
+        }
+      }
+    }
+  }
+}
+
+void Addr2line(const std::string& addr2line,
+               const std::string& map_src,
+               uintptr_t offset,
+               std::ostream& os,
+               const char* prefix,
+               std::unique_ptr<Addr2linePipe>* pipe /* inout */) {
+  DCHECK(pipe != nullptr);
+
+  if (map_src == "[vdso]" || android::base::EndsWith(map_src, ".vdex")) {
+    // addr2line will not work on the vdso.
+    // vdex files are special frames injected for the interpreter
+    // so they don't have any line number information available.
+    return;
+  }
+
+  if (*pipe == nullptr || (*pipe)->file != map_src) {
+    if (*pipe != nullptr) {
+      Drain(0, prefix, pipe, os);
+    }
+    pipe->reset();  // Close early.
+
+    const char* args[] = {
+        addr2line.c_str(),
+        "--functions",
+        "--inlines",
+        "--demangle",
+        "-e",
+        map_src.c_str(),
+        nullptr
+    };
+    *pipe = Connect(map_src, args);
+  }
+
+  Addr2linePipe* pipe_ptr = pipe->get();
+  if (pipe_ptr == nullptr) {
+    // Failed...
+    return;
+  }
+
+  // Send the offset.
+  const std::string hex_offset = StringPrintf("%zx\n", offset);
+
+  if (!android::base::WriteFully(pipe_ptr->out.get(), hex_offset.data(), hex_offset.length())) {
+    // Error. :-(
+    pipe->reset();
+    return;
+  }
+
+  // Now drain (expecting two lines).
+  Drain(2U, prefix, pipe, os);
+}
+
+}  // namespace addr2line
+
+namespace ptrace {
+
+std::set<pid_t> PtraceSiblings(pid_t pid) {
+  std::set<pid_t> ret;
+  std::string task_path = android::base::StringPrintf("/proc/%d/task", pid);
+
+  std::unique_ptr<DIR, int (*)(DIR*)> d(opendir(task_path.c_str()), closedir);
+
+  // Bail early if the task directory cannot be opened.
+  if (d == nullptr) {
+    PLOG(ERROR) << "Failed to scan task folder";
+    return ret;
+  }
+
+  struct dirent* de;
+  while ((de = readdir(d.get())) != nullptr) {
+    // Ignore "." and "..".
+    if (!strcmp(de->d_name, ".") || !strcmp(de->d_name, "..")) {
+      continue;
+    }
+
+    char* end;
+    pid_t tid = strtoul(de->d_name, &end, 10);
+    if (*end) {
+      continue;
+    }
+
+    if (tid == pid) {
+      continue;
+    }
+
+    if (::ptrace(PTRACE_ATTACH, tid, 0, 0) != 0) {
+      PLOG(ERROR) << "Failed to attach to tid " << tid;
+      continue;
+    }
+
+    ret.insert(tid);
+  }
+  return ret;
+}
+
+void DumpABI(pid_t forked_pid) {
+  enum class ABI { kArm, kArm64, kX86, kX86_64 };
+#if defined(__arm__)
+  constexpr ABI kDumperABI = ABI::kArm;
+#elif defined(__aarch64__)
+  constexpr ABI kDumperABI = ABI::kArm64;
+#elif defined(__i386__)
+  constexpr ABI kDumperABI = ABI::kX86;
+#elif defined(__x86_64__)
+  constexpr ABI kDumperABI = ABI::kX86_64;
+#else
+#error Unsupported architecture
+#endif
+
+  char data[1024];  // Should be more than enough.
+  struct iovec io_vec;
+  io_vec.iov_base = &data;
+  io_vec.iov_len = 1024;
+  ABI to_print;
+  if (0 != ::ptrace(PTRACE_GETREGSET, forked_pid, /* NT_PRSTATUS */ 1, &io_vec)) {
+    LOG(ERROR) << "Could not get registers to determine abi.";
+    // Use 64-bit as default.
+    switch (kDumperABI) {
+      case ABI::kArm:
+      case ABI::kArm64:
+        to_print = ABI::kArm64;
+        break;
+      case ABI::kX86:
+      case ABI::kX86_64:
+        to_print = ABI::kX86_64;
+        break;
+      default:
+        __builtin_unreachable();
+    }
+  } else {
+    // Check the length of the data. Assume that it's the same arch as the tool.
+    switch (kDumperABI) {
+      case ABI::kArm:
+      case ABI::kArm64:
+        to_print = io_vec.iov_len == 18 * sizeof(uint32_t) ? ABI::kArm : ABI::kArm64;
+        break;
+      case ABI::kX86:
+      case ABI::kX86_64:
+        to_print = io_vec.iov_len == 17 * sizeof(uint32_t) ? ABI::kX86 : ABI::kX86_64;
+        break;
+      default:
+        __builtin_unreachable();
+    }
+  }
+  std::string abi_str;
+  switch (to_print) {
+    case ABI::kArm:
+      abi_str = "arm";
+      break;
+    case ABI::kArm64:
+      abi_str = "arm64";
+      break;
+    case ABI::kX86:
+      abi_str = "x86";
+      break;
+    case ABI::kX86_64:
+      abi_str = "x86_64";
+      break;
+  }
+  LOG(ERROR) << "ABI: '" << abi_str << "'" << std::endl;
+}
+
+}  // namespace ptrace
+
+template <typename T>
+bool WaitLoop(uint32_t max_wait_micros, const T& handler) {
+  constexpr uint32_t kWaitMicros = 10;
+  const size_t kMaxLoopCount = max_wait_micros / kWaitMicros;
+
+  for (size_t loop_count = 1; loop_count <= kMaxLoopCount; ++loop_count) {
+    bool ret;
+    if (handler(&ret)) {
+      return ret;
+    }
+    usleep(kWaitMicros);
+  }
+  return false;
+}
+
+bool WaitForMainSigStop(const std::atomic<bool>& saw_wif_stopped_for_main) {
+  auto handler = [&](bool* res) {
+    if (saw_wif_stopped_for_main) {
+      *res = true;
+      return true;
+    }
+    return false;
+  };
+  constexpr uint32_t kMaxWaitMicros = 30 * 1000 * 1000;  // 30s wait.
+  return WaitLoop(kMaxWaitMicros, handler);
+}
+
+bool WaitForSigStopped(pid_t pid, uint32_t max_wait_micros) {
+  auto handler = [&](bool* res) {
+    int status;
+    pid_t rc = TEMP_FAILURE_RETRY(waitpid(pid, &status, WNOHANG));
+    if (rc == -1) {
+      PLOG(ERROR) << "Failed to waitpid for " << pid;
+      *res = false;
+      return true;
+    }
+    if (rc == pid) {
+      if (!(WIFSTOPPED(status))) {
+        LOG(ERROR) << "Did not get expected stopped signal for " << pid;
+        *res = false;
+      } else {
+        *res = true;
+      }
+      return true;
+    }
+    return false;
+  };
+  return WaitLoop(max_wait_micros, handler);
+}
+
+#ifdef __LP64__
+constexpr bool kIs64Bit = true;
+#else
+constexpr bool kIs64Bit = false;
+#endif
+
+void DumpThread(pid_t pid,
+                pid_t tid,
+                const std::string* addr2line_path,
+                const char* prefix,
+                BacktraceMap* map) {
+  LOG(ERROR) << std::endl << "=== pid: " << pid << " tid: " << tid << " ===" << std::endl;
+
+  constexpr uint32_t kMaxWaitMicros = 1000 * 1000;  // 1s.
+  if (pid != tid && !WaitForSigStopped(tid, kMaxWaitMicros)) {
+    LOG(ERROR) << "Failed to wait for sigstop on " << tid;
+  }
+
+  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, tid, map));
+  if (backtrace == nullptr) {
+    LOG(ERROR) << prefix << "(failed to create Backtrace for thread " << tid << ")";
+    return;
+  }
+  backtrace->SetSkipFrames(false);
+  if (!backtrace->Unwind(0, nullptr)) {
+    LOG(ERROR) << prefix << "(backtrace::Unwind failed for thread " << tid
+               << ": " <<  backtrace->GetErrorString(backtrace->GetError()) << ")";
+    return;
+  }
+  if (backtrace->NumFrames() == 0) {
+    LOG(ERROR) << prefix << "(no native stack frames for thread " << tid << ")";
+    return;
+  }
+
+  std::unique_ptr<addr2line::Addr2linePipe> addr2line_state;
+
+  for (Backtrace::const_iterator it = backtrace->begin();
+      it != backtrace->end(); ++it) {
+    std::ostringstream oss;
+    oss << prefix << StringPrintf("#%02zu pc ", it->num);
+    bool try_addr2line = false;
+    if (!BacktraceMap::IsValid(it->map)) {
+      oss << StringPrintf(kIs64Bit ? "%016" PRIx64 "  ???" : "%08" PRIx64 "  ???", it->pc);
+    } else {
+      oss << StringPrintf(kIs64Bit ? "%016" PRIx64 "  " : "%08" PRIx64 "  ", it->rel_pc);
+      if (it->map.name.empty()) {
+        oss << StringPrintf("<anonymous:%" PRIx64 ">", it->map.start);
+      } else {
+        oss << it->map.name;
+      }
+      if (it->map.offset != 0) {
+        oss << StringPrintf(" (offset %" PRIx64 ")", it->map.offset);
+      }
+      oss << " (";
+      if (!it->func_name.empty()) {
+        oss << it->func_name;
+        if (it->func_offset != 0) {
+          oss << "+" << it->func_offset;
+        }
+        // Functions found using the gdb jit interface will be in an empty
+        // map that cannot be found using addr2line.
+        if (!it->map.name.empty()) {
+          try_addr2line = true;
+        }
+      } else {
+        oss << "???";
+      }
+      oss << ")";
+    }
+    LOG(ERROR) << oss.str() << std::endl;
+    if (try_addr2line && addr2line_path != nullptr) {
+      addr2line::Addr2line(*addr2line_path,
+                           it->map.name,
+                           it->rel_pc,
+                           LOG_STREAM(ERROR),
+                           prefix,
+                           &addr2line_state);
+    }
+  }
+
+  if (addr2line_state != nullptr) {
+    addr2line::Drain(0, prefix, &addr2line_state, LOG_STREAM(ERROR));
+  }
+}
+
+void DumpProcess(pid_t forked_pid, const std::atomic<bool>& saw_wif_stopped_for_main) {
+  LOG(ERROR) << "Timeout for process " << forked_pid;
+
+  CHECK_EQ(0, ::ptrace(PTRACE_ATTACH, forked_pid, 0, 0));
+  std::set<pid_t> tids = ptrace::PtraceSiblings(forked_pid);
+  tids.insert(forked_pid);
+
+  ptrace::DumpABI(forked_pid);
+
+  // Check whether we have and should use addr2line.
+  std::unique_ptr<std::string> addr2line_path;
+  if (kUseAddr2line) {
+    addr2line_path = addr2line::FindAddr2line();
+    if (addr2line_path == nullptr) {
+      LOG(ERROR) << "Did not find usable addr2line";
+    }
+  }
+
+  if (!WaitForMainSigStop(saw_wif_stopped_for_main)) {
+    LOG(ERROR) << "Did not receive SIGSTOP for pid " << forked_pid;
+  }
+
+  std::unique_ptr<BacktraceMap> backtrace_map(BacktraceMap::Create(forked_pid));
+  if (backtrace_map == nullptr) {
+    LOG(ERROR) << "Could not create BacktraceMap";
+    return;
+  }
+
+  for (pid_t tid : tids) {
+    DumpThread(forked_pid, tid, addr2line_path.get(), "  ", backtrace_map.get());
+  }
+}
+
+[[noreturn]]
+void WaitMainLoop(pid_t forked_pid, std::atomic<bool>* saw_wif_stopped_for_main) {
+  for (;;) {
+    // Consider switching to waitid to not get woken up for WIFSTOPPED.
+    int status;
+    pid_t res = TEMP_FAILURE_RETRY(waitpid(forked_pid, &status, 0));
+    if (res == -1) {
+      PLOG(FATAL) << "Failure during waitpid";
+      __builtin_unreachable();
+    }
+
+    if (WIFEXITED(status)) {
+      _exit(WEXITSTATUS(status));
+      __builtin_unreachable();
+    }
+    if (WIFSIGNALED(status)) {
+      _exit(1);
+      __builtin_unreachable();
+    }
+    if (WIFSTOPPED(status)) {
+      *saw_wif_stopped_for_main = true;
+      continue;
+    }
+    if (WIFCONTINUED(status)) {
+      continue;
+    }
+
+    LOG(FATAL) << "Unknown status " << std::hex << status;
+  }
+}
+
+[[noreturn]]
+void SetupAndWait(pid_t forked_pid, int signal, int timeout_exit_code) {
+  timeout_signal::SignalSet signals;
+  signals.Add(signal);
+  signals.Block();
+
+  std::atomic<bool> saw_wif_stopped_for_main(false);
+
+  std::thread signal_catcher([&]() {
+    signals.Block();
+    int sig = signals.Wait();
+    CHECK_EQ(sig, signal);
+
+    DumpProcess(forked_pid, saw_wif_stopped_for_main);
+
+    // Don't clean up. Just kill the child and exit.
+    kill(forked_pid, SIGKILL);
+    _exit(timeout_exit_code);
+  });
+
+  WaitMainLoop(forked_pid, &saw_wif_stopped_for_main);
+}
+
+}  // namespace
+}  // namespace art
+
+int main(int argc ATTRIBUTE_UNUSED, char** argv) {
+  android::base::InitLogging(argv);
+
+  int signal = SIGRTMIN + 2;
+  int timeout_exit_code = 1;
+
+  size_t index = 1u;
+  CHECK(argv[index] != nullptr);
+
+  bool to_logcat = false;
+#ifdef __ANDROID__
+  if (strcmp(argv[index], "-l") == 0) {
+    index++;
+    CHECK(argv[index] != nullptr);
+    to_logcat = true;
+  }
+#endif
+  if (!to_logcat) {
+    android::base::SetLogger(android::base::StderrLogger);
+  }
+
+  if (strcmp(argv[index], "-s") == 0) {
+    index++;
+    CHECK(argv[index] != nullptr);
+    uint32_t signal_uint;
+    CHECK(android::base::ParseUint(argv[index], &signal_uint)) << "Signal not a number.";
+    signal = signal_uint;
+    index++;
+    CHECK(argv[index] != nullptr);
+  }
+
+  if (strcmp(argv[index], "-e") == 0) {
+    index++;
+    CHECK(argv[index] != nullptr);
+    uint32_t timeout_exit_code_uint;
+    CHECK(android::base::ParseUint(argv[index], &timeout_exit_code_uint))
+        << "Exit code not a number.";
+    timeout_exit_code = timeout_exit_code_uint;
+    index++;
+    CHECK(argv[index] != nullptr);
+  }
+
+  pid_t orig_ppid = getpid();
+
+  pid_t pid = fork();
+  if (pid == 0) {
+    if (prctl(PR_SET_PDEATHSIG, SIGTERM) == -1) {
+      _exit(1);
+    }
+
+    if (getppid() != orig_ppid) {
+      _exit(2);
+    }
+
+    execvp(argv[index], &argv[index]);
+
+    _exit(3);
+    __builtin_unreachable();
+  }
+
+  art::SetupAndWait(pid, signal, timeout_exit_code);
+  __builtin_unreachable();
+}
diff --git a/tools/teardown-buildbot-device.sh b/tools/teardown-buildbot-device.sh
deleted file mode 100755
index 7eb5cc3..0000000
--- a/tools/teardown-buildbot-device.sh
+++ /dev/null
@@ -1,144 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This script undoes (most of) the work done by tools/setup-buildbot-device.sh.
-# Make sure to keep these files in sync.
-
-green='\033[0;32m'
-nc='\033[0m'
-
-# Setup as root, as some actions performed here require it.
-adb root
-adb wait-for-device
-
-if [[ -n "$ART_TEST_CHROOT" ]]; then
-  # Check that ART_TEST_CHROOT is correctly defined.
-  [[ "x$ART_TEST_CHROOT" = x/* ]] || { echo "$ART_TEST_CHROOT is not an absolute path"; exit 1; }
-
-  if adb shell test -d "$ART_TEST_CHROOT"; then
-    # Display users of the chroot dir.
-
-    echo -e "${green}List open files under chroot dir $ART_TEST_CHROOT${nc}"
-    adb shell lsof | grep "$ART_TEST_CHROOT"
-
-    # for_all_chroot_process ACTION
-    # -----------------------------
-    # Execute ACTION on all processes running from binaries located
-    # under the chroot directory. ACTION is passed two arguments: the
-    # PID of the process, and a string containing the command line
-    # that started this process.
-    for_all_chroot_process() {
-      local action=$1
-      adb shell ls -ld "/proc/*/root" \
-        | sed -n -e "s,^.* \\(/proc/.*/root\\) -> $ART_TEST_CHROOT\$,\\1,p" \
-        | while read link; do
-            local dir=$(dirname "$link")
-            local pid=$(basename "$dir")
-            local cmdline=$(adb shell cat "$dir"/cmdline | tr '\000' ' ')
-            $action "$pid" "$cmdline"
-          done
-    }
-
-    # display_process PID CMDLINE
-    # ---------------------------
-    # Display information about process with given PID, that was started with CMDLINE.
-    display_process() {
-      local pid=$1
-      local cmdline=$2
-      echo "$cmdline (PID: $pid)"
-    }
-
-    echo -e "${green}List processes running from binaries under chroot dir $ART_TEST_CHROOT${nc}"
-    for_all_chroot_process display_process
-
-    # Tear down the chroot dir.
-
-    echo -e "${green}Tear down the chroot set up in $ART_TEST_CHROOT${nc}"
-
-    # remove_filesystem_from_chroot DIR-IN-CHROOT FSTYPE REMOVE-DIR-IN-CHROOT
-    # -----------------------------------------------------------------------
-    # Unmount filesystem with type FSTYPE mounted in directory DIR-IN-CHROOT
-    # under the chroot directory.
-    # Remove DIR-IN-CHROOT under the chroot if REMOVE-DIR-IN-CHROOT is
-    # true.
-    remove_filesystem_from_chroot() {
-      local dir_in_chroot=$1
-      local fstype=$2
-      local remove_dir=$3
-      local dir="$ART_TEST_CHROOT/$dir_in_chroot"
-      adb shell test -d "$dir" \
-        && adb shell mount | grep -q "^$fstype on $dir type $fstype " \
-        && if adb shell umount "$dir"; then
-             $remove_dir && adb shell rmdir "$dir"
-           else
-             echo "Files still open in $dir:"
-             adb shell lsof | grep "$dir"
-           fi
-    }
-
-    # Remove /apex from chroot.
-    remove_filesystem_from_chroot apex tmpfs true
-
-    # Remove /dev from chroot.
-    remove_filesystem_from_chroot dev tmpfs true
-
-    # Remove /sys/kernel/debug from chroot.
-    # The /sys/kernel/debug directory under the chroot dir cannot be
-    # deleted, as it is part of the host device's /sys filesystem.
-    remove_filesystem_from_chroot sys/kernel/debug debugfs false
-    # Remove /sys from chroot.
-    remove_filesystem_from_chroot sys sysfs true
-
-    # Remove /proc from chroot.
-    remove_filesystem_from_chroot proc proc true
-
-    # Remove /etc from chroot.
-    adb shell rm -f "$ART_TEST_CHROOT/etc"
-    adb shell rm -rf "$ART_TEST_CHROOT/system/etc"
-
-    # Remove directories used for ART testing in chroot.
-    adb shell rm -rf "$ART_TEST_CHROOT/data/local/tmp"
-    adb shell rm -rf "$ART_TEST_CHROOT/data/dalvik-cache"
-    adb shell rm -rf "$ART_TEST_CHROOT/tmp"
-
-    # Remove property_contexts file(s) from chroot.
-    property_context_files="/property_contexts \
-      /system/etc/selinux/plat_property_contexts \
-      /vendor/etc/selinux/nonplat_property_context \
-      /plat_property_contexts \
-      /nonplat_property_contexts"
-    for f in $property_context_files; do
-      adb shell rm -f "$ART_TEST_CHROOT$f"
-    done
-
-
-    # Kill processes still running in the chroot.
-
-    # kill_process PID CMDLINE
-    # ------------------------
-    # Kill process with given PID, that was started with CMDLINE.
-    kill_process() {
-      local pid=$1
-      local cmdline=$2
-      echo "Killing $cmdline (PID: $pid)"
-      adb shell kill -9 "$pid"
-    }
-
-    echo -e "${green}Kill processes still running from binaries under" \
-      "chroot dir $ART_TEST_CHROOT (if any)${nc} "
-    for_all_chroot_process kill_process
-  fi
-fi
diff --git a/tools/timeout_dumper/Android.bp b/tools/timeout_dumper/Android.bp
deleted file mode 100644
index bb813d4..0000000
--- a/tools/timeout_dumper/Android.bp
+++ /dev/null
@@ -1,44 +0,0 @@
-//
-// Copyright (C) 2018 The Android Open Source Project
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-art_cc_binary {
-    name: "timeout_dumper",
-
-    host_supported: true,
-    target: {
-        darwin: {
-            enabled: false,
-        },
-        linux_bionic: {
-            sanitize: {
-                address: false,
-            },
-        },
-    },
-    device_supported: false,
-
-    defaults: ["art_defaults"],
-
-    srcs: ["timeout_dumper.cc"],
-
-    shared_libs: [
-        "libbacktrace",
-        "libbase",
-    ],
-    sanitize: {
-        address: true,
-    },
-}
diff --git a/tools/timeout_dumper/timeout_dumper.cc b/tools/timeout_dumper/timeout_dumper.cc
deleted file mode 100644
index 08d2f4c..0000000
--- a/tools/timeout_dumper/timeout_dumper.cc
+++ /dev/null
@@ -1,707 +0,0 @@
-/*
- * Copyright (C) 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <dirent.h>
-#include <poll.h>
-#include <sys/prctl.h>
-#include <sys/ptrace.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <unistd.h>
-
-#include <csignal>
-#include <cstdlib>
-#include <cstring>
-#include <iostream>
-#include <thread>
-#include <memory>
-#include <set>
-#include <string>
-
-#include <android-base/file.h>
-#include <android-base/logging.h>
-#include <android-base/macros.h>
-#include <android-base/stringprintf.h>
-#include <android-base/strings.h>
-#include <android-base/unique_fd.h>
-#include <backtrace/Backtrace.h>
-#include <backtrace/BacktraceMap.h>
-
-namespace art {
-namespace {
-
-using android::base::StringPrintf;
-using android::base::unique_fd;
-
-constexpr bool kUseAddr2line = true;
-
-namespace timeout_signal {
-
-class SignalSet {
- public:
-  SignalSet() {
-    if (sigemptyset(&set_) == -1) {
-      PLOG(FATAL) << "sigemptyset failed";
-    }
-  }
-
-  void Add(int signal) {
-    if (sigaddset(&set_, signal) == -1) {
-      PLOG(FATAL) << "sigaddset " << signal << " failed";
-    }
-  }
-
-  void Block() {
-    if (pthread_sigmask(SIG_BLOCK, &set_, nullptr) != 0) {
-      PLOG(FATAL) << "pthread_sigmask failed";
-    }
-  }
-
-  int Wait() {
-    // Sleep in sigwait() until a signal arrives. gdb causes EINTR failures.
-    int signal_number;
-    int rc = TEMP_FAILURE_RETRY(sigwait(&set_, &signal_number));
-    if (rc != 0) {
-      PLOG(FATAL) << "sigwait failed";
-    }
-    return signal_number;
-  }
-
- private:
-  sigset_t set_;
-};
-
-int GetTimeoutSignal() {
-  return SIGRTMIN + 2;
-}
-
-}  // namespace timeout_signal
-
-namespace addr2line {
-
-constexpr const char* kAddr2linePath =
-    "/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.17-4.8/bin/x86_64-linux-addr2line";
-
-std::unique_ptr<std::string> FindAddr2line() {
-  const char* env_value = getenv("ANDROID_BUILD_TOP");
-  if (env_value != nullptr) {
-    std::string path = std::string(env_value) + kAddr2linePath;
-    if (access(path.c_str(), X_OK) == 0) {
-      return std::make_unique<std::string>(path);
-    }
-  }
-
-  {
-    std::string path = std::string(".") + kAddr2linePath;
-    if (access(path.c_str(), X_OK) == 0) {
-      return std::make_unique<std::string>(path);
-    }
-  }
-
-  {
-    using android::base::Dirname;
-
-    std::string exec_dir = android::base::GetExecutableDirectory();
-    std::string derived_top = Dirname(Dirname(Dirname(Dirname(exec_dir))));
-    std::string path = derived_top + kAddr2linePath;
-    if (access(path.c_str(), X_OK) == 0) {
-      return std::make_unique<std::string>(path);
-    }
-  }
-
-  constexpr const char* kHostAddr2line = "/usr/bin/addr2line";
-  if (access(kHostAddr2line, F_OK) == 0) {
-    return std::make_unique<std::string>(kHostAddr2line);
-  }
-
-  return nullptr;
-}
-
-// The state of an open pipe to addr2line. In "server" mode, addr2line takes input on stdin
-// and prints the result to stdout. This struct keeps the state of the open connection.
-struct Addr2linePipe {
-  Addr2linePipe(int in_fd, int out_fd, const std::string& file_name, pid_t pid)
-      : in(in_fd), out(out_fd), file(file_name), child_pid(pid), odd(true) {}
-
-  ~Addr2linePipe() {
-    kill(child_pid, SIGKILL);
-  }
-
-  unique_fd in;      // The file descriptor that is connected to the output of addr2line.
-  unique_fd out;     // The file descriptor that is connected to the input of addr2line.
-
-  const std::string file;     // The file addr2line is working on, so that we know when to close
-                              // and restart.
-  const pid_t child_pid;      // The pid of the child, which we should kill when we're done.
-  bool odd;                   // Print state for indentation of lines.
-};
-
-std::unique_ptr<Addr2linePipe> Connect(const std::string& name, const char* args[]) {
-  int caller_to_addr2line[2];
-  int addr2line_to_caller[2];
-
-  if (pipe(caller_to_addr2line) == -1) {
-    return nullptr;
-  }
-  if (pipe(addr2line_to_caller) == -1) {
-    close(caller_to_addr2line[0]);
-    close(caller_to_addr2line[1]);
-    return nullptr;
-  }
-
-  pid_t pid = fork();
-  if (pid == -1) {
-    close(caller_to_addr2line[0]);
-    close(caller_to_addr2line[1]);
-    close(addr2line_to_caller[0]);
-    close(addr2line_to_caller[1]);
-    return nullptr;
-  }
-
-  if (pid == 0) {
-    dup2(caller_to_addr2line[0], STDIN_FILENO);
-    dup2(addr2line_to_caller[1], STDOUT_FILENO);
-
-    close(caller_to_addr2line[0]);
-    close(caller_to_addr2line[1]);
-    close(addr2line_to_caller[0]);
-    close(addr2line_to_caller[1]);
-
-    execv(args[0], const_cast<char* const*>(args));
-    exit(1);
-  } else {
-    close(caller_to_addr2line[0]);
-    close(addr2line_to_caller[1]);
-    return std::make_unique<Addr2linePipe>(addr2line_to_caller[0],
-                                           caller_to_addr2line[1],
-                                           name,
-                                           pid);
-  }
-}
-
-void WritePrefix(std::ostream& os, const char* prefix, bool odd) {
-  if (prefix != nullptr) {
-    os << prefix;
-  }
-  os << "  ";
-  if (!odd) {
-    os << " ";
-  }
-}
-
-void Drain(size_t expected,
-           const char* prefix,
-           std::unique_ptr<Addr2linePipe>* pipe /* inout */,
-           std::ostream& os) {
-  DCHECK(pipe != nullptr);
-  DCHECK(pipe->get() != nullptr);
-  int in = pipe->get()->in.get();
-  DCHECK_GE(in, 0);
-
-  bool prefix_written = false;
-
-  for (;;) {
-    constexpr uint32_t kWaitTimeExpectedMilli = 500;
-    constexpr uint32_t kWaitTimeUnexpectedMilli = 50;
-
-    int timeout = expected > 0 ? kWaitTimeExpectedMilli : kWaitTimeUnexpectedMilli;
-    struct pollfd read_fd{in, POLLIN, 0};
-    int retval = TEMP_FAILURE_RETRY(poll(&read_fd, 1, timeout));
-    if (retval == -1) {
-      // An error occurred.
-      pipe->reset();
-      return;
-    }
-
-    if (retval == 0) {
-      // Timeout.
-      return;
-    }
-
-    if (!(read_fd.revents & POLLIN)) {
-      // addr2line call exited.
-      pipe->reset();
-      return;
-    }
-
-    constexpr size_t kMaxBuffer = 128;  // Relatively small buffer. Should be OK as we're on an
-    // alt stack, but just to be sure...
-    char buffer[kMaxBuffer];
-    memset(buffer, 0, kMaxBuffer);
-    int bytes_read = TEMP_FAILURE_RETRY(read(in, buffer, kMaxBuffer - 1));
-    if (bytes_read <= 0) {
-      // This should not really happen...
-      pipe->reset();
-      return;
-    }
-    buffer[bytes_read] = '\0';
-
-    char* tmp = buffer;
-    while (*tmp != 0) {
-      if (!prefix_written) {
-        WritePrefix(os, prefix, (*pipe)->odd);
-        prefix_written = true;
-      }
-      char* new_line = strchr(tmp, '\n');
-      if (new_line == nullptr) {
-        os << tmp;
-
-        break;
-      } else {
-        os << std::string(tmp, new_line - tmp + 1);
-
-        tmp = new_line + 1;
-        prefix_written = false;
-        (*pipe)->odd = !(*pipe)->odd;
-
-        if (expected > 0) {
-          expected--;
-        }
-      }
-    }
-  }
-}
-
-void Addr2line(const std::string& addr2line,
-               const std::string& map_src,
-               uintptr_t offset,
-               std::ostream& os,
-               const char* prefix,
-               std::unique_ptr<Addr2linePipe>* pipe /* inout */) {
-  DCHECK(pipe != nullptr);
-
-  if (map_src == "[vdso]" || android::base::EndsWith(map_src, ".vdex")) {
-    // addr2line will not work on the vdso.
-    // vdex files are special frames injected for the interpreter
-    // so they don't have any line number information available.
-    return;
-  }
-
-  if (*pipe == nullptr || (*pipe)->file != map_src) {
-    if (*pipe != nullptr) {
-      Drain(0, prefix, pipe, os);
-    }
-    pipe->reset();  // Close early.
-
-    const char* args[] = {
-        addr2line.c_str(),
-        "--functions",
-        "--inlines",
-        "--demangle",
-        "-e",
-        map_src.c_str(),
-        nullptr
-    };
-    *pipe = Connect(map_src, args);
-  }
-
-  Addr2linePipe* pipe_ptr = pipe->get();
-  if (pipe_ptr == nullptr) {
-    // Failed...
-    return;
-  }
-
-  // Send the offset.
-  const std::string hex_offset = StringPrintf("%zx\n", offset);
-
-  if (!android::base::WriteFully(pipe_ptr->out.get(), hex_offset.data(), hex_offset.length())) {
-    // Error. :-(
-    pipe->reset();
-    return;
-  }
-
-  // Now drain (expecting two lines).
-  Drain(2U, prefix, pipe, os);
-}
-
-}  // namespace addr2line
-
-namespace ptrace {
-
-std::set<pid_t> PtraceSiblings(pid_t pid) {
-  std::set<pid_t> ret;
-  std::string task_path = android::base::StringPrintf("/proc/%d/task", pid);
-
-  std::unique_ptr<DIR, int (*)(DIR*)> d(opendir(task_path.c_str()), closedir);
-
-  // Bail early if the task directory cannot be opened.
-  if (d == nullptr) {
-    PLOG(ERROR) << "Failed to scan task folder";
-    return ret;
-  }
-
-  struct dirent* de;
-  while ((de = readdir(d.get())) != nullptr) {
-    // Ignore "." and "..".
-    if (!strcmp(de->d_name, ".") || !strcmp(de->d_name, "..")) {
-      continue;
-    }
-
-    char* end;
-    pid_t tid = strtoul(de->d_name, &end, 10);
-    if (*end) {
-      continue;
-    }
-
-    if (tid == pid) {
-      continue;
-    }
-
-    if (::ptrace(PTRACE_ATTACH, tid, 0, 0) != 0) {
-      PLOG(ERROR) << "Failed to attach to tid " << tid;
-      continue;
-    }
-
-    ret.insert(tid);
-  }
-  return ret;
-}
-
-void DumpABI(pid_t forked_pid) {
-  enum class ABI { kArm, kArm64, kMips, kMips64, kX86, kX86_64 };
-#if defined(__arm__)
-  constexpr ABI kDumperABI = ABI::kArm;
-#elif defined(__aarch64__)
-  constexpr ABI kDumperABI = ABI::kArm64;
-#elif defined(__mips__) && !defined(__LP64__)
-  constexpr ABI kDumperABI = ABI::kMips;
-#elif defined(__mips__) && defined(__LP64__)
-  constexpr ABI kDumperABI = ABI::kMips64;
-#elif defined(__i386__)
-  constexpr ABI kDumperABI = ABI::kX86;
-#elif defined(__x86_64__)
-  constexpr ABI kDumperABI = ABI::kX86_64;
-#else
-#error Unsupported architecture
-#endif
-
-  char data[1024];  // Should be more than enough.
-  struct iovec io_vec;
-  io_vec.iov_base = &data;
-  io_vec.iov_len = 1024;
-  ABI to_print;
-  if (0 != ::ptrace(PTRACE_GETREGSET, forked_pid, /* NT_PRSTATUS */ 1, &io_vec)) {
-    LOG(ERROR) << "Could not get registers to determine abi.";
-    // Use 64-bit as default.
-    switch (kDumperABI) {
-      case ABI::kArm:
-      case ABI::kArm64:
-        to_print = ABI::kArm64;
-        break;
-      case ABI::kMips:
-      case ABI::kMips64:
-        to_print = ABI::kMips64;
-        break;
-      case ABI::kX86:
-      case ABI::kX86_64:
-        to_print = ABI::kX86_64;
-        break;
-      default:
-        __builtin_unreachable();
-    }
-  } else {
-    // Check the length of the data. Assume that it's the same arch as the tool.
-    switch (kDumperABI) {
-      case ABI::kArm:
-      case ABI::kArm64:
-        to_print = io_vec.iov_len == 18 * sizeof(uint32_t) ? ABI::kArm : ABI::kArm64;
-        break;
-      case ABI::kMips:
-      case ABI::kMips64:
-        to_print = ABI::kMips64;  // TODO Figure out how this should work.
-        break;
-      case ABI::kX86:
-      case ABI::kX86_64:
-        to_print = io_vec.iov_len == 17 * sizeof(uint32_t) ? ABI::kX86 : ABI::kX86_64;
-        break;
-      default:
-        __builtin_unreachable();
-    }
-  }
-  std::string abi_str;
-  switch (to_print) {
-    case ABI::kArm:
-      abi_str = "arm";
-      break;
-    case ABI::kArm64:
-      abi_str = "arm64";
-      break;
-    case ABI::kMips:
-      abi_str = "mips";
-      break;
-    case ABI::kMips64:
-      abi_str = "mips64";
-      break;
-    case ABI::kX86:
-      abi_str = "x86";
-      break;
-    case ABI::kX86_64:
-      abi_str = "x86_64";
-      break;
-  }
-  std::cerr << "ABI: '" << abi_str << "'" << std::endl;
-}
-
-}  // namespace ptrace
-
-template <typename T>
-bool WaitLoop(uint32_t max_wait_micros, const T& handler) {
-  constexpr uint32_t kWaitMicros = 10;
-  const size_t kMaxLoopCount = max_wait_micros / kWaitMicros;
-
-  for (size_t loop_count = 1; loop_count <= kMaxLoopCount; ++loop_count) {
-    bool ret;
-    if (handler(&ret)) {
-      return ret;
-    }
-    usleep(kWaitMicros);
-  }
-  return false;
-}
-
-bool WaitForMainSigStop(const std::atomic<bool>& saw_wif_stopped_for_main) {
-  auto handler = [&](bool* res) {
-    if (saw_wif_stopped_for_main) {
-      *res = true;
-      return true;
-    }
-    return false;
-  };
-  constexpr uint32_t kMaxWaitMicros = 30 * 1000 * 1000;  // 30s wait.
-  return WaitLoop(kMaxWaitMicros, handler);
-}
-
-bool WaitForSigStopped(pid_t pid, uint32_t max_wait_micros) {
-  auto handler = [&](bool* res) {
-    int status;
-    pid_t rc = TEMP_FAILURE_RETRY(waitpid(pid, &status, WNOHANG));
-    if (rc == -1) {
-      PLOG(ERROR) << "Failed to waitpid for " << pid;
-      *res = false;
-      return true;
-    }
-    if (rc == pid) {
-      if (!(WIFSTOPPED(status))) {
-        LOG(ERROR) << "Did not get expected stopped signal for " << pid;
-        *res = false;
-      } else {
-        *res = true;
-      }
-      return true;
-    }
-    return false;
-  };
-  return WaitLoop(max_wait_micros, handler);
-}
-
-#ifdef __LP64__
-constexpr bool kIs64Bit = true;
-#else
-constexpr bool kIs64Bit = false;
-#endif
-
-void DumpThread(pid_t pid,
-                pid_t tid,
-                const std::string* addr2line_path,
-                const char* prefix,
-                BacktraceMap* map) {
-  // Use std::cerr to avoid the LOG prefix.
-  std::cerr << std::endl << "=== pid: " << pid << " tid: " << tid << " ===" << std::endl;
-
-  constexpr uint32_t kMaxWaitMicros = 1000 * 1000;  // 1s.
-  if (pid != tid && !WaitForSigStopped(tid, kMaxWaitMicros)) {
-    LOG(ERROR) << "Failed to wait for sigstop on " << tid;
-  }
-
-  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, tid, map));
-  if (backtrace == nullptr) {
-    LOG(ERROR) << prefix << "(failed to create Backtrace for thread " << tid << ")";
-    return;
-  }
-  backtrace->SetSkipFrames(false);
-  if (!backtrace->Unwind(0, nullptr)) {
-    LOG(ERROR) << prefix << "(backtrace::Unwind failed for thread " << tid
-               << ": " <<  backtrace->GetErrorString(backtrace->GetError()) << ")";
-    return;
-  }
-  if (backtrace->NumFrames() == 0) {
-    LOG(ERROR) << prefix << "(no native stack frames for thread " << tid << ")";
-    return;
-  }
-
-  std::unique_ptr<addr2line::Addr2linePipe> addr2line_state;
-
-  for (Backtrace::const_iterator it = backtrace->begin();
-      it != backtrace->end(); ++it) {
-    std::ostringstream oss;
-    oss << prefix << StringPrintf("#%02zu pc ", it->num);
-    bool try_addr2line = false;
-    if (!BacktraceMap::IsValid(it->map)) {
-      oss << StringPrintf(kIs64Bit ? "%016" PRIx64 "  ???" : "%08" PRIx64 "  ???", it->pc);
-    } else {
-      oss << StringPrintf(kIs64Bit ? "%016" PRIx64 "  " : "%08" PRIx64 "  ", it->rel_pc);
-      if (it->map.name.empty()) {
-        oss << StringPrintf("<anonymous:%" PRIx64 ">", it->map.start);
-      } else {
-        oss << it->map.name;
-      }
-      if (it->map.offset != 0) {
-        oss << StringPrintf(" (offset %" PRIx64 ")", it->map.offset);
-      }
-      oss << " (";
-      if (!it->func_name.empty()) {
-        oss << it->func_name;
-        if (it->func_offset != 0) {
-          oss << "+" << it->func_offset;
-        }
-        // Functions found using the gdb jit interface will be in an empty
-        // map that cannot be found using addr2line.
-        if (!it->map.name.empty()) {
-          try_addr2line = true;
-        }
-      } else {
-        oss << "???";
-      }
-      oss << ")";
-    }
-    std::cerr << oss.str() << std::endl;
-    if (try_addr2line && addr2line_path != nullptr) {
-      addr2line::Addr2line(*addr2line_path,
-                           it->map.name,
-                           it->rel_pc,
-                           std::cerr,
-                           prefix,
-                           &addr2line_state);
-    }
-  }
-
-  if (addr2line_state != nullptr) {
-    addr2line::Drain(0, prefix, &addr2line_state, std::cerr);
-  }
-}
-
-void DumpProcess(pid_t forked_pid, const std::atomic<bool>& saw_wif_stopped_for_main) {
-  LOG(ERROR) << "Timeout for process " << forked_pid;
-
-  CHECK_EQ(0, ::ptrace(PTRACE_ATTACH, forked_pid, 0, 0));
-  std::set<pid_t> tids = ptrace::PtraceSiblings(forked_pid);
-  tids.insert(forked_pid);
-
-  ptrace::DumpABI(forked_pid);
-
-  // Check whether we have and should use addr2line.
-  std::unique_ptr<std::string> addr2line_path;
-  if (kUseAddr2line) {
-    addr2line_path = addr2line::FindAddr2line();
-    if (addr2line_path == nullptr) {
-      LOG(ERROR) << "Did not find usable addr2line";
-    }
-  }
-
-  if (!WaitForMainSigStop(saw_wif_stopped_for_main)) {
-    LOG(ERROR) << "Did not receive SIGSTOP for pid " << forked_pid;
-  }
-
-  std::unique_ptr<BacktraceMap> backtrace_map(BacktraceMap::Create(forked_pid));
-  if (backtrace_map == nullptr) {
-    LOG(ERROR) << "Could not create BacktraceMap";
-    return;
-  }
-
-  for (pid_t tid : tids) {
-    DumpThread(forked_pid, tid, addr2line_path.get(), "  ", backtrace_map.get());
-  }
-}
-
-[[noreturn]]
-void WaitMainLoop(pid_t forked_pid, std::atomic<bool>* saw_wif_stopped_for_main) {
-  for (;;) {
-    // Consider switching to waitid to not get woken up for WIFSTOPPED.
-    int status;
-    pid_t res = TEMP_FAILURE_RETRY(waitpid(forked_pid, &status, 0));
-    if (res == -1) {
-      PLOG(FATAL) << "Failure during waitpid";
-      __builtin_unreachable();
-    }
-
-    if (WIFEXITED(status)) {
-      _exit(WEXITSTATUS(status));
-      __builtin_unreachable();
-    }
-    if (WIFSIGNALED(status)) {
-      _exit(1);
-      __builtin_unreachable();
-    }
-    if (WIFSTOPPED(status)) {
-      *saw_wif_stopped_for_main = true;
-      continue;
-    }
-    if (WIFCONTINUED(status)) {
-      continue;
-    }
-
-    LOG(FATAL) << "Unknown status " << std::hex << status;
-  }
-}
-
-[[noreturn]]
-void SetupAndWait(pid_t forked_pid) {
-  timeout_signal::SignalSet signals;
-  signals.Add(timeout_signal::GetTimeoutSignal());
-  signals.Block();
-
-  std::atomic<bool> saw_wif_stopped_for_main(false);
-
-  std::thread signal_catcher([&]() {
-    signals.Block();
-    int sig = signals.Wait();
-    CHECK_EQ(sig, timeout_signal::GetTimeoutSignal());
-
-    DumpProcess(forked_pid, saw_wif_stopped_for_main);
-
-    // Don't clean up. Just kill the child and exit.
-    kill(forked_pid, SIGKILL);
-    _exit(1);
-  });
-
-  WaitMainLoop(forked_pid, &saw_wif_stopped_for_main);
-}
-
-}  // namespace
-}  // namespace art
-
-int main(int argc ATTRIBUTE_UNUSED, char** argv) {
-  pid_t orig_ppid = getpid();
-
-  pid_t pid = fork();
-  if (pid == 0) {
-    if (prctl(PR_SET_PDEATHSIG, SIGTERM) == -1) {
-      _exit(1);
-    }
-
-    if (getppid() != orig_ppid) {
-      _exit(2);
-    }
-
-    execvp(argv[1], &argv[1]);
-
-    _exit(3);
-    __builtin_unreachable();
-  }
-
-  art::SetupAndWait(pid);
-  __builtin_unreachable();
-}
diff --git a/tools/tracefast-plugin/Android.bp b/tools/tracefast-plugin/Android.bp
index b7ae6c6..7cdf1dc 100644
--- a/tools/tracefast-plugin/Android.bp
+++ b/tools/tracefast-plugin/Android.bp
@@ -37,15 +37,6 @@
     header_libs: [
         "libnativehelper_header_only",
     ],
-    multilib: {
-        lib32: {
-            suffix: "32",
-        },
-        lib64: {
-            suffix: "64",
-        },
-    },
-    symlink_preferred_arch: true,
 }
 
 cc_defaults {
diff --git a/tools/tracefast-plugin/tracefast.cc b/tools/tracefast-plugin/tracefast.cc
index 98f7ea5..782b5fe 100644
--- a/tools/tracefast-plugin/tracefast.cc
+++ b/tools/tracefast-plugin/tracefast.cc
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+#include "android-base/macros.h"
 #include "gc/scoped_gc_critical_section.h"
 #include "instrumentation.h"
 #include "runtime.h"
@@ -52,14 +53,16 @@
                     art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
                     art::ArtMethod* method ATTRIBUTE_UNUSED,
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
-                    art::Handle<art::mirror::Object> return_value ATTRIBUTE_UNUSED)
+                    art::instrumentation::OptionalFrame frame ATTRIBUTE_UNUSED,
+                    art::MutableHandle<art::mirror::Object>& return_value ATTRIBUTE_UNUSED)
       override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
   void MethodExited(art::Thread* thread ATTRIBUTE_UNUSED,
                     art::Handle<art::mirror::Object> this_object ATTRIBUTE_UNUSED,
                     art::ArtMethod* method ATTRIBUTE_UNUSED,
                     uint32_t dex_pc ATTRIBUTE_UNUSED,
-                    const art::JValue& return_value ATTRIBUTE_UNUSED)
+                    art::instrumentation::OptionalFrame frame ATTRIBUTE_UNUSED,
+                    art::JValue& return_value ATTRIBUTE_UNUSED)
       override REQUIRES_SHARED(art::Locks::mutator_lock_) { }
 
   void MethodUnwind(art::Thread* thread ATTRIBUTE_UNUSED,
@@ -153,10 +156,10 @@
 TraceFastPhaseCB gPhaseCallback;
 
 // The plugin initialization function.
-extern "C" bool ArtPlugin_Initialize() REQUIRES_SHARED(art::Locks::mutator_lock_) {
+extern "C" bool ArtPlugin_Initialize() {
   art::Runtime* runtime = art::Runtime::Current();
-  art::ScopedThreadSuspension stsc(art::Thread::Current(),
-                                   art::ThreadState::kWaitingForMethodTracingStart);
+  art::ScopedThreadStateChange stsc(art::Thread::Current(),
+                                    art::ThreadState::kWaitingForMethodTracingStart);
   art::ScopedSuspendAll ssa("Add phase callback");
   runtime->GetRuntimeCallbacks()->AddRuntimePhaseCallback(&gPhaseCallback);
   return true;
diff --git a/tools/unmount-buildbot-apexes.sh b/tools/unmount-buildbot-apexes.sh
deleted file mode 100755
index 8f0ad5f..0000000
--- a/tools/unmount-buildbot-apexes.sh
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/bin/bash
-#
-# Copyright (C) 2019 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Unmount Android Runtime and Core Libraries APEX packages required in the chroot directory.
-# This script emulates some the actions performed by `apexd`.
-
-# This script undoes the work done by tools/mount-buildbot-apexes.sh.
-# Make sure to keep these files in sync.
-
-green='\033[0;32m'
-nc='\033[0m'
-
-# Setup as root, as some actions performed here require it.
-adb root
-adb wait-for-device
-
-# Exit early if there is no chroot.
-[[ -n "$ART_TEST_CHROOT" ]] || exit
-
-# Check that ART_TEST_CHROOT is correctly defined.
-[[ "$ART_TEST_CHROOT" = /* ]] || { echo "$ART_TEST_CHROOT is not an absolute path"; exit 1; }
-
-# Directory containing extracted APEX packages' payloads (ext4 images) under
-# the chroot directory.
-apex_image_dir="/tmp/apex"
-
-# deactivate_system_package APEX_NAME
-# -----------------------------------
-# Unmount APEX_NAME in `/apex` under the chroot directory and delete the
-# corresponding APEX package payload (ext4 image).
-deactivate_system_package() {
-  local apex_name=$1
-  local abs_image_filename="$ART_TEST_CHROOT$apex_image_dir/$apex_name.img"
-  local abs_mount_point="$ART_TEST_CHROOT/apex/$apex_name"
-
-  echo -e "${green}Deactivating package $apex_name${nc}"
-
-  # Unmount the package's payload (ext4 image).
-  if adb shell mount | grep -q "^/dev/block/loop[0-9]\+ on $abs_mount_point type ext4"; then
-    adb shell umount "$abs_mount_point"
-    adb shell rmdir "$abs_mount_point"
-    # Delete the ext4 image.
-    adb shell rm "$abs_image_filename"
-  fi
-}
-
-# Deactivate the Android Runtime APEX.
-deactivate_system_package com.android.runtime
-
-# Delete the image's directory.
-adb shell rmdir "$ART_TEST_CHROOT$apex_image_dir"
diff --git a/tools/veridex/Android.bp b/tools/veridex/Android.bp
index e309607..1640a46 100644
--- a/tools/veridex/Android.bp
+++ b/tools/veridex/Android.bp
@@ -24,7 +24,10 @@
         "resolver.cc",
         "veridex.cc",
     ],
-    cflags: ["-Wall", "-Werror"],
+    cflags: [
+        "-Wall",
+        "-Werror",
+    ],
     static_libs: [
         "libdexfile",
         "libartbase",
@@ -43,3 +46,10 @@
         },
     },
 }
+
+// Expose the appcompat.sh script for use by the build.
+sh_binary_host {
+    name: "veridex-appcompat",
+    src: "appcompat.sh",
+    filename_from_src: true,
+}
diff --git a/tools/veridex/README.md b/tools/veridex/README.md
index f85a51b..ab446c0 100644
--- a/tools/veridex/README.md
+++ b/tools/veridex/README.md
@@ -8,7 +8,7 @@
 that do not exist. It can also miss on reflection uses.
 
 To build it:
-> make appcompat
+> m appcompat
 
 To run it:
 > ./art/tools/veridex/appcompat.sh --dex-file=test.apk
diff --git a/tools/veridex/api_list_filter.h b/tools/veridex/api_list_filter.h
new file mode 100644
index 0000000..58065db
--- /dev/null
+++ b/tools/veridex/api_list_filter.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TOOLS_VERIDEX_API_LIST_FILTER_H_
+#define ART_TOOLS_VERIDEX_API_LIST_FILTER_H_
+
+#include <algorithm>
+#include <android-base/strings.h>
+
+#include "base/hiddenapi_flags.h"
+
+namespace art {
+
+class ApiListFilter {
+ public:
+  explicit ApiListFilter(const std::vector<std::string>& exclude_api_lists) {
+    std::set<hiddenapi::ApiList> exclude_set;
+    bool include_invalid_list = true;
+    for (const std::string& name : exclude_api_lists) {
+      if (name.empty()) {
+        continue;
+      }
+      if (name == "invalid") {
+        include_invalid_list = false;
+        continue;
+      }
+      hiddenapi::ApiList list = hiddenapi::ApiList::FromName(name);
+      if (!list.IsValid()) {
+        LOG(ERROR) << "Unknown ApiList::Value " << name
+                   << ". See valid values in art/libartbase/base/hiddenapi_flags.h.";
+      }
+      exclude_set.insert(list);
+    }
+
+    if (include_invalid_list) {
+      lists_.push_back(hiddenapi::ApiList());
+    }
+    for (size_t i = 0; i < hiddenapi::ApiList::kValueCount; ++i) {
+      hiddenapi::ApiList list = hiddenapi::ApiList(i);
+      if (exclude_set.find(list) == exclude_set.end()) {
+          lists_.push_back(list);
+      }
+    }
+  }
+
+  bool Matches(hiddenapi::ApiList list) const {
+    for (const auto& it : lists_) {
+      if (list.GetIntValue() == it.GetIntValue()) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+ private:
+  std::vector<hiddenapi::ApiList> lists_;
+};
+
+}  // namespace art
+
+#endif  // ART_TOOLS_VERIDEX_API_LIST_FILTER_H_
diff --git a/tools/veridex/appcompat.sh b/tools/veridex/appcompat.sh
index 99537a4..ce90c06 100755
--- a/tools/veridex/appcompat.sh
+++ b/tools/veridex/appcompat.sh
@@ -28,6 +28,7 @@
   exec ${SCRIPT_DIR}/veridex \
     --core-stubs=${SCRIPT_DIR}/system-stubs.zip:${SCRIPT_DIR}/org.apache.http.legacy-stubs.zip \
     --api-flags=${SCRIPT_DIR}/hiddenapi-flags.csv \
+    --exclude-api-lists=whitelist,invalid \
     $@
 fi
 
@@ -69,6 +70,10 @@
   extra_flags="--api-flags=$file"
 fi
 
+# If --exclude-api-lists is not passed directly, exclude whitelist APIs.
+if [[ "$@" != "*--exclude-api-lists=*" ]]; then
+  extra_flags="${extra_flags} --exclude-api-lists=whitelist,invalid"
+fi
 
 ${ANDROID_HOST_OUT}/bin/veridex \
     --core-stubs=${PACKAGING}/core_dex_intermediates/classes.dex:${PACKAGING}/oahl_dex_intermediates/classes.dex \
diff --git a/tools/veridex/class_filter.h b/tools/veridex/class_filter.h
new file mode 100644
index 0000000..aa74d53
--- /dev/null
+++ b/tools/veridex/class_filter.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_TOOLS_VERIDEX_CLASS_FILTER_H_
+#define ART_TOOLS_VERIDEX_CLASS_FILTER_H_
+
+#include <android-base/strings.h>
+
+namespace art {
+
+class ClassFilter {
+ public:
+  explicit ClassFilter(const std::vector<std::string>& prefixes) : prefixes_(prefixes) {}
+
+  bool Matches(const char* class_descriptor) const {
+    if (prefixes_.empty()) {
+      return true;
+    }
+
+    for (const std::string& filter : prefixes_) {
+      if (android::base::StartsWith(class_descriptor, filter)) {
+        return true;
+      }
+    }
+
+    return false;
+  }
+
+ private:
+  const std::vector<std::string>& prefixes_;
+};
+
+}  // namespace art
+
+#endif  // ART_TOOLS_VERIDEX_CLASS_FILTER_H_
diff --git a/tools/veridex/hidden_api.cc b/tools/veridex/hidden_api.cc
index efb01f7..71ea56b5 100644
--- a/tools/veridex/hidden_api.cc
+++ b/tools/veridex/hidden_api.cc
@@ -24,7 +24,8 @@
 
 namespace art {
 
-HiddenApi::HiddenApi(const char* filename, bool sdk_uses_only) {
+HiddenApi::HiddenApi(const char* filename, const ApiListFilter& api_list_filter)
+    : api_list_filter_(api_list_filter) {
   CHECK(filename != nullptr);
 
   std::ifstream in(filename);
@@ -37,12 +38,6 @@
     CHECK(success) << "Unknown ApiList flag: " << str;
     CHECK(membership.IsValid()) << "Invalid ApiList: " << membership;
 
-    if (sdk_uses_only != membership.Contains(hiddenapi::ApiList::Whitelist())) {
-      // Either we want only SDK uses and this is not a whitelist entry,
-      // or we want only non-SDK uses and this is a whitelist entry.
-      continue;
-    }
-
     AddSignatureToApiList(signature, membership);
     size_t pos = signature.find("->");
     if (pos != std::string::npos) {
diff --git a/tools/veridex/hidden_api.h b/tools/veridex/hidden_api.h
index e1b67a2..a830174 100644
--- a/tools/veridex/hidden_api.h
+++ b/tools/veridex/hidden_api.h
@@ -17,6 +17,8 @@
 #ifndef ART_TOOLS_VERIDEX_HIDDEN_API_H_
 #define ART_TOOLS_VERIDEX_HIDDEN_API_H_
 
+#include "api_list_filter.h"
+
 #include "base/hiddenapi_flags.h"
 #include "dex/method_reference.h"
 
@@ -28,20 +30,51 @@
 
 class DexFile;
 
+enum class SignatureSource {
+  UNKNOWN,
+  BOOT,
+  APP,
+};
+
 /**
  * Helper class for logging if a method/field is in a hidden API list.
  */
 class HiddenApi {
  public:
-  HiddenApi(const char* flags_file, bool sdk_uses_only);
+  HiddenApi(const char* flags_file, const ApiListFilter& api_list_filter);
 
   hiddenapi::ApiList GetApiList(const std::string& name) const {
     auto it = api_list_.find(name);
     return (it == api_list_.end()) ? hiddenapi::ApiList() : it->second;
   }
 
-  bool IsInAnyList(const std::string& name) const {
-    return !GetApiList(name).IsEmpty();
+  bool ShouldReport(const std::string& signature) const {
+    return api_list_filter_.Matches(GetApiList(signature));
+  }
+
+  void AddSignatureSource(const std::string &signature, SignatureSource source) {
+    const auto type = GetApiClassName(signature);
+    auto it = source_.find(type);
+    if (it == source_.end() || it->second == SignatureSource::UNKNOWN) {
+      source_[type] = source;
+    } else if (it->second != source) {
+      LOG(WARNING) << type << "is present both in boot and in app.";
+      if (source == SignatureSource::BOOT) {
+        // Runtime resolves to boot type, so it takes precedence.
+        it->second = source;
+      }
+    } else {
+      // Already exists with the same source.
+    }
+  }
+
+  SignatureSource GetSignatureSource(const std::string& signature) const {
+    auto it = source_.find(GetApiClassName(signature));
+    return (it == source_.end()) ? SignatureSource::UNKNOWN : it->second;
+  }
+
+  bool IsInBoot(const std::string& signature) const {
+    return SignatureSource::BOOT == GetSignatureSource(signature);
   }
 
   static std::string GetApiMethodName(const DexFile& dex_file, uint32_t method_index);
@@ -61,16 +94,28 @@
  private:
   void AddSignatureToApiList(const std::string& signature, hiddenapi::ApiList membership);
 
+  static std::string GetApiClassName(const std::string& signature) {
+    size_t pos = signature.find("->");
+    if (pos != std::string::npos) {
+      return signature.substr(0, pos);
+    }
+    return signature;
+  }
+
+  const ApiListFilter& api_list_filter_;
   std::map<std::string, hiddenapi::ApiList> api_list_;
+  std::map<std::string, SignatureSource> source_;
 };
 
 struct HiddenApiStats {
   uint32_t count = 0;
   uint32_t reflection_count = 0;
   uint32_t linking_count = 0;
-  uint32_t api_counts[hiddenapi::ApiList::kValueCount] = {};  // initialize all to zero
+  // Ensure enough space for kInvalid as well, and initialize all to zero
+  uint32_t api_counts[hiddenapi::ApiList::kValueSize] = {};
 };
 
+
 }  // namespace art
 
 #endif  // ART_TOOLS_VERIDEX_HIDDEN_API_H_
diff --git a/tools/veridex/hidden_api_finder.cc b/tools/veridex/hidden_api_finder.cc
index fe6d88a..e740cf4 100644
--- a/tools/veridex/hidden_api_finder.cc
+++ b/tools/veridex/hidden_api_finder.cc
@@ -32,172 +32,178 @@
 void HiddenApiFinder::CheckMethod(uint32_t method_id,
                                   VeridexResolver* resolver,
                                   MethodReference ref) {
-  // Note: we always query whether a method is in a list, as the app
+  // Note: we always query whether a method is in boot, as the app
   // might define blacklisted APIs (which won't be used at runtime).
-  std::string name = HiddenApi::GetApiMethodName(resolver->GetDexFile(), method_id);
-  if (hidden_api_.IsInAnyList(name)) {
-    method_locations_[name].push_back(ref);
-  }
+  const auto& name = HiddenApi::GetApiMethodName(resolver->GetDexFile(), method_id);
+  method_locations_[name].push_back(ref);
 }
 
 void HiddenApiFinder::CheckField(uint32_t field_id,
                                  VeridexResolver* resolver,
                                  MethodReference ref) {
-  // Note: we always query whether a field is in a list, as the app
+  // Note: we always query whether a field is in a boot, as the app
   // might define blacklisted APIs (which won't be used at runtime).
-  std::string name = HiddenApi::GetApiFieldName(resolver->GetDexFile(), field_id);
-  if (hidden_api_.IsInAnyList(name)) {
-    field_locations_[name].push_back(ref);
-  }
+  const auto& name = HiddenApi::GetApiFieldName(resolver->GetDexFile(), field_id);
+  field_locations_[name].push_back(ref);
 }
 
-void HiddenApiFinder::CollectAccesses(VeridexResolver* resolver) {
+void HiddenApiFinder::CollectAccesses(VeridexResolver* resolver,
+                                      const ClassFilter& class_filter) {
   const DexFile& dex_file = resolver->GetDexFile();
   // Look at all types referenced in this dex file. Any of these
   // types can lead to being used through reflection.
   for (uint32_t i = 0; i < dex_file.NumTypeIds(); ++i) {
     std::string name(dex_file.StringByTypeIdx(dex::TypeIndex(i)));
-    if (hidden_api_.IsInAnyList(name)) {
-      classes_.insert(name);
-    }
+    classes_.insert(name);
   }
   // Note: we collect strings constants only referenced in code items as the string table
   // contains other kind of strings (eg types).
   for (ClassAccessor accessor : dex_file.GetClasses()) {
-    for (const ClassAccessor::Method& method : accessor.GetMethods()) {
-      for (const DexInstructionPcPair& inst : method.GetInstructions()) {
-        switch (inst->Opcode()) {
-          case Instruction::CONST_STRING: {
-            dex::StringIndex string_index(inst->VRegB_21c());
-            std::string name = std::string(dex_file.StringDataByIdx(string_index));
-            // Cheap filtering on the string literal. We know it cannot be a field/method/class
-            // if it contains a space.
-            if (name.find(' ') == std::string::npos) {
-              // Class names at the Java level are of the form x.y.z, but the list encodes
-              // them of the form Lx/y/z;. Inner classes have '$' for both Java level class
-              // names in strings, and hidden API lists.
-              std::string str = HiddenApi::ToInternalName(name);
-              // Note: we can query the lists directly, as HiddenApi added classes that own
-              // private methods and fields in them.
-              // We don't add class names to the `strings_` set as we know method/field names
-              // don't have '.' or '/'. All hidden API class names have a '/'.
-              if (hidden_api_.IsInAnyList(str)) {
-                classes_.insert(str);
-              } else if (hidden_api_.IsInAnyList(name)) {
-                // Could be something passed to JNI.
-                classes_.insert(name);
-              } else {
-                // We only keep track of the location for strings, as these will be the
-                // field/method names the user is interested in.
-                strings_.insert(name);
-                reflection_locations_[name].push_back(method.GetReference());
+    if (class_filter.Matches(accessor.GetDescriptor())) {
+      for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+        for (const DexInstructionPcPair& inst : method.GetInstructions()) {
+          switch (inst->Opcode()) {
+            case Instruction::CONST_STRING: {
+              dex::StringIndex string_index(inst->VRegB_21c());
+              const auto& name = std::string(dex_file.StringDataByIdx(string_index));
+              // Cheap filtering on the string literal. We know it cannot be a field/method/class
+              // if it contains a space.
+              if (name.find(' ') == std::string::npos) {
+                // Class names at the Java level are of the form x.y.z, but the list encodes
+                // them of the form Lx/y/z;. Inner classes have '$' for both Java level class
+                // names in strings, and hidden API lists.
+                std::string str = HiddenApi::ToInternalName(name);
+                // Note: we can query the lists directly, as HiddenApi added classes that own
+                // private methods and fields in them.
+                // We don't add class names to the `strings_` set as we know method/field names
+                // don't have '.' or '/'. All hidden API class names have a '/'.
+                if (hidden_api_.IsInBoot(str)) {
+                  classes_.insert(str);
+                } else if (hidden_api_.IsInBoot(name)) {
+                  // Could be something passed to JNI.
+                  classes_.insert(name);
+                } else {
+                  // We only keep track of the location for strings, as these will be the
+                  // field/method names the user is interested in.
+                  strings_.insert(name);
+                  reflection_locations_[name].push_back(method.GetReference());
+                }
               }
+              break;
             }
-            break;
-          }
-          case Instruction::INVOKE_DIRECT:
-          case Instruction::INVOKE_INTERFACE:
-          case Instruction::INVOKE_STATIC:
-          case Instruction::INVOKE_SUPER:
-          case Instruction::INVOKE_VIRTUAL: {
-            CheckMethod(inst->VRegB_35c(), resolver, method.GetReference());
-            break;
-          }
+            case Instruction::INVOKE_DIRECT:
+            case Instruction::INVOKE_INTERFACE:
+            case Instruction::INVOKE_STATIC:
+            case Instruction::INVOKE_SUPER:
+            case Instruction::INVOKE_VIRTUAL: {
+              CheckMethod(inst->VRegB_35c(), resolver, method.GetReference());
+              break;
+            }
 
-          case Instruction::INVOKE_DIRECT_RANGE:
-          case Instruction::INVOKE_INTERFACE_RANGE:
-          case Instruction::INVOKE_STATIC_RANGE:
-          case Instruction::INVOKE_SUPER_RANGE:
-          case Instruction::INVOKE_VIRTUAL_RANGE: {
-            CheckMethod(inst->VRegB_3rc(), resolver, method.GetReference());
-            break;
-          }
+            case Instruction::INVOKE_DIRECT_RANGE:
+            case Instruction::INVOKE_INTERFACE_RANGE:
+            case Instruction::INVOKE_STATIC_RANGE:
+            case Instruction::INVOKE_SUPER_RANGE:
+            case Instruction::INVOKE_VIRTUAL_RANGE: {
+              CheckMethod(inst->VRegB_3rc(), resolver, method.GetReference());
+              break;
+            }
 
-          case Instruction::IGET:
-          case Instruction::IGET_WIDE:
-          case Instruction::IGET_OBJECT:
-          case Instruction::IGET_BOOLEAN:
-          case Instruction::IGET_BYTE:
-          case Instruction::IGET_CHAR:
-          case Instruction::IGET_SHORT: {
-            CheckField(inst->VRegC_22c(), resolver, method.GetReference());
-            break;
-          }
+            case Instruction::IGET:
+            case Instruction::IGET_WIDE:
+            case Instruction::IGET_OBJECT:
+            case Instruction::IGET_BOOLEAN:
+            case Instruction::IGET_BYTE:
+            case Instruction::IGET_CHAR:
+            case Instruction::IGET_SHORT: {
+              CheckField(inst->VRegC_22c(), resolver, method.GetReference());
+              break;
+            }
 
-          case Instruction::IPUT:
-          case Instruction::IPUT_WIDE:
-          case Instruction::IPUT_OBJECT:
-          case Instruction::IPUT_BOOLEAN:
-          case Instruction::IPUT_BYTE:
-          case Instruction::IPUT_CHAR:
-          case Instruction::IPUT_SHORT: {
-            CheckField(inst->VRegC_22c(), resolver, method.GetReference());
-            break;
-          }
+            case Instruction::IPUT:
+            case Instruction::IPUT_WIDE:
+            case Instruction::IPUT_OBJECT:
+            case Instruction::IPUT_BOOLEAN:
+            case Instruction::IPUT_BYTE:
+            case Instruction::IPUT_CHAR:
+            case Instruction::IPUT_SHORT: {
+              CheckField(inst->VRegC_22c(), resolver, method.GetReference());
+              break;
+            }
 
-          case Instruction::SGET:
-          case Instruction::SGET_WIDE:
-          case Instruction::SGET_OBJECT:
-          case Instruction::SGET_BOOLEAN:
-          case Instruction::SGET_BYTE:
-          case Instruction::SGET_CHAR:
-          case Instruction::SGET_SHORT: {
-            CheckField(inst->VRegB_21c(), resolver, method.GetReference());
-            break;
-          }
+            case Instruction::SGET:
+            case Instruction::SGET_WIDE:
+            case Instruction::SGET_OBJECT:
+            case Instruction::SGET_BOOLEAN:
+            case Instruction::SGET_BYTE:
+            case Instruction::SGET_CHAR:
+            case Instruction::SGET_SHORT: {
+              CheckField(inst->VRegB_21c(), resolver, method.GetReference());
+              break;
+            }
 
-          case Instruction::SPUT:
-          case Instruction::SPUT_WIDE:
-          case Instruction::SPUT_OBJECT:
-          case Instruction::SPUT_BOOLEAN:
-          case Instruction::SPUT_BYTE:
-          case Instruction::SPUT_CHAR:
-          case Instruction::SPUT_SHORT: {
-            CheckField(inst->VRegB_21c(), resolver, method.GetReference());
-            break;
-          }
+            case Instruction::SPUT:
+            case Instruction::SPUT_WIDE:
+            case Instruction::SPUT_OBJECT:
+            case Instruction::SPUT_BOOLEAN:
+            case Instruction::SPUT_BYTE:
+            case Instruction::SPUT_CHAR:
+            case Instruction::SPUT_SHORT: {
+              CheckField(inst->VRegB_21c(), resolver, method.GetReference());
+              break;
+            }
 
-          default:
-            break;
+            default:
+              break;
+          }
         }
       }
     }
   }
 }
 
-void HiddenApiFinder::Run(const std::vector<std::unique_ptr<VeridexResolver>>& resolvers) {
+void HiddenApiFinder::Run(const std::vector<std::unique_ptr<VeridexResolver>>& resolvers,
+                          const ClassFilter& class_filter) {
   for (const std::unique_ptr<VeridexResolver>& resolver : resolvers) {
-    CollectAccesses(resolver.get());
+    CollectAccesses(resolver.get(), class_filter);
   }
 }
 
 void HiddenApiFinder::Dump(std::ostream& os,
                            HiddenApiStats* stats,
                            bool dump_reflection) {
-  stats->linking_count = method_locations_.size() + field_locations_.size();
-
   // Dump methods from hidden APIs linked against.
   for (const std::pair<const std::string,
                        std::vector<MethodReference>>& pair : method_locations_) {
-    hiddenapi::ApiList api_list = hidden_api_.GetApiList(pair.first);
-    CHECK(api_list.IsValid());
-    stats->api_counts[api_list.GetIntValue()]++;
-    os << "#" << ++stats->count << ": Linking " << api_list << " " << pair.first << " use(s):";
-    os << std::endl;
-    HiddenApiFinder::DumpReferences(os, pair.second);
-    os << std::endl;
+    const auto& name = pair.first;
+    if (hidden_api_.GetSignatureSource(name) != SignatureSource::APP &&
+        hidden_api_.ShouldReport(name)) {
+      stats->linking_count++;
+      hiddenapi::ApiList api_list = hidden_api_.GetApiList(pair.first);
+      stats->api_counts[api_list.GetIntValue()]++;
+      os << "#" << ++stats->count << ": Linking " << api_list << " " << pair.first << " use(s):";
+      os << std::endl;
+      HiddenApiFinder::DumpReferences(os, pair.second);
+      os << std::endl;
+    }
   }
 
   // Dump fields from hidden APIs linked against.
   for (const std::pair<const std::string,
                        std::vector<MethodReference>>& pair : field_locations_) {
-    hiddenapi::ApiList api_list = hidden_api_.GetApiList(pair.first);
-    CHECK(api_list.IsValid());
-    stats->api_counts[api_list.GetIntValue()]++;
-    os << "#" << ++stats->count << ": Linking " << api_list << " " << pair.first << " use(s):";
-    os << std::endl;
-    HiddenApiFinder::DumpReferences(os, pair.second);
-    os << std::endl;
+    const auto& name = pair.first;
+    if (hidden_api_.GetSignatureSource(name) != SignatureSource::APP &&
+        hidden_api_.ShouldReport(name)) {
+      stats->linking_count++;
+      hiddenapi::ApiList api_list = hidden_api_.GetApiList(pair.first);
+      stats->api_counts[api_list.GetIntValue()]++;
+      // Note: There is a test depending on this output format,
+      // so please be careful when you modify the format. b/123662832
+      os << "#" << ++stats->count << ": Linking " << api_list << " " << pair.first << " use(s):";
+      os << std::endl;
+      HiddenApiFinder::DumpReferences(os, pair.second);
+      os << std::endl;
+    }
   }
 
   if (dump_reflection) {
@@ -205,10 +211,13 @@
     for (const std::string& cls : classes_) {
       for (const std::string& name : strings_) {
         std::string full_name = cls + "->" + name;
-        hiddenapi::ApiList api_list = hidden_api_.GetApiList(full_name);
-        if (api_list.IsValid()) {
+        if (hidden_api_.GetSignatureSource(full_name) != SignatureSource::APP &&
+            hidden_api_.ShouldReport(full_name)) {
+          hiddenapi::ApiList api_list = hidden_api_.GetApiList(full_name);
           stats->api_counts[api_list.GetIntValue()]++;
           stats->reflection_count++;
+          // Note: There is a test depending on this output format,
+          // so please be careful when you modify the format. b/123662832
           os << "#" << ++stats->count << ": Reflection " << api_list << " " << full_name
              << " potential use(s):";
           os << std::endl;
diff --git a/tools/veridex/hidden_api_finder.h b/tools/veridex/hidden_api_finder.h
index 9e10c1a..f395e89 100644
--- a/tools/veridex/hidden_api_finder.h
+++ b/tools/veridex/hidden_api_finder.h
@@ -17,6 +17,7 @@
 #ifndef ART_TOOLS_VERIDEX_HIDDEN_API_FINDER_H_
 #define ART_TOOLS_VERIDEX_HIDDEN_API_FINDER_H_
 
+#include "class_filter.h"
 #include "dex/method_reference.h"
 
 #include <iostream>
@@ -39,12 +40,13 @@
 
   // Iterate over the dex files associated with the passed resolvers to report
   // hidden API uses.
-  void Run(const std::vector<std::unique_ptr<VeridexResolver>>& app_resolvers);
+  void Run(const std::vector<std::unique_ptr<VeridexResolver>>& app_resolvers,
+           const ClassFilter& app_class_filter);
 
   void Dump(std::ostream& os, HiddenApiStats* stats, bool dump_reflection);
 
  private:
-  void CollectAccesses(VeridexResolver* resolver);
+  void CollectAccesses(VeridexResolver* resolver, const ClassFilter& class_filter);
   void CheckMethod(uint32_t method_idx, VeridexResolver* resolver, MethodReference ref);
   void CheckField(uint32_t field_idx, VeridexResolver* resolver, MethodReference ref);
   void DumpReferences(std::ostream& os, const std::vector<MethodReference>& references);
diff --git a/tools/veridex/precise_hidden_api_finder.cc b/tools/veridex/precise_hidden_api_finder.cc
index be99ed2..6f66a33 100644
--- a/tools/veridex/precise_hidden_api_finder.cc
+++ b/tools/veridex/precise_hidden_api_finder.cc
@@ -16,6 +16,7 @@
 
 #include "precise_hidden_api_finder.h"
 
+#include "class_filter.h"
 #include "dex/class_accessor-inl.h"
 #include "dex/code_item_accessors-inl.h"
 #include "dex/dex_instruction-inl.h"
@@ -32,12 +33,15 @@
 
 void PreciseHiddenApiFinder::RunInternal(
     const std::vector<std::unique_ptr<VeridexResolver>>& resolvers,
+    const ClassFilter& class_filter,
     const std::function<void(VeridexResolver*, const ClassAccessor::Method&)>& action) {
   for (const std::unique_ptr<VeridexResolver>& resolver : resolvers) {
     for (ClassAccessor accessor : resolver->GetDexFile().GetClasses()) {
-      for (const ClassAccessor::Method& method : accessor.GetMethods()) {
-        if (method.GetCodeItem() != nullptr) {
-          action(resolver.get(), method);
+      if (class_filter.Matches(accessor.GetDescriptor())) {
+        for (const ClassAccessor::Method& method : accessor.GetMethods()) {
+          if (method.GetCodeItem() != nullptr) {
+            action(resolver.get(), method);
+          }
         }
       }
     }
@@ -55,9 +59,12 @@
   }
 }
 
-void PreciseHiddenApiFinder::Run(const std::vector<std::unique_ptr<VeridexResolver>>& resolvers) {
+void PreciseHiddenApiFinder::Run(const std::vector<std::unique_ptr<VeridexResolver>>& resolvers,
+                                 const ClassFilter& class_filter) {
   // Collect reflection uses.
-  RunInternal(resolvers, [this] (VeridexResolver* resolver, const ClassAccessor::Method& method) {
+  RunInternal(resolvers,
+              class_filter,
+              [this] (VeridexResolver* resolver, const ClassAccessor::Method& method) {
     FlowAnalysisCollector collector(resolver, method);
     collector.Run();
     AddUsesAt(collector.GetUses(), method.GetReference());
@@ -73,6 +80,7 @@
     std::map<MethodReference, std::vector<ReflectAccessInfo>> current_uses
         = std::move(abstract_uses_);
     RunInternal(resolvers,
+                class_filter,
                 [this, current_uses] (VeridexResolver* resolver,
                                       const ClassAccessor::Method& method) {
       FlowAnalysisSubstitutor substitutor(resolver, method, current_uses);
@@ -91,23 +99,24 @@
       std::string cls(info.cls.ToString());
       std::string name(info.name.ToString());
       std::string full_name = cls + "->" + name;
-      if (hidden_api_.IsInAnyList(full_name)) {
-        named_uses[full_name].push_back(ref);
-      }
+      named_uses[full_name].push_back(ref);
     }
   }
 
   for (auto& it : named_uses) {
-    ++stats->reflection_count;
     const std::string& full_name = it.first;
-    hiddenapi::ApiList api_list = hidden_api_.GetApiList(full_name);
-    stats->api_counts[api_list.GetIntValue()]++;
-    os << "#" << ++stats->count << ": Reflection " << api_list << " " << full_name << " use(s):";
-    os << std::endl;
-    for (const MethodReference& ref : it.second) {
-      os << kPrefix << HiddenApi::GetApiMethodName(ref) << std::endl;
+    if (hidden_api_.GetSignatureSource(full_name) != SignatureSource::APP &&
+        hidden_api_.ShouldReport(full_name)) {
+      stats->reflection_count++;
+      hiddenapi::ApiList api_list = hidden_api_.GetApiList(full_name);
+      stats->api_counts[api_list.GetIntValue()]++;
+      os << "#" << ++stats->count << ": Reflection " << api_list << " " << full_name << " use(s):";
+      os << std::endl;
+      for (const MethodReference& ref : it.second) {
+        os << kPrefix << HiddenApi::GetApiMethodName(ref) << std::endl;
+      }
+      os << std::endl;
     }
-    os << std::endl;
   }
 }
 
diff --git a/tools/veridex/precise_hidden_api_finder.h b/tools/veridex/precise_hidden_api_finder.h
index 8c5126c..5254e84 100644
--- a/tools/veridex/precise_hidden_api_finder.h
+++ b/tools/veridex/precise_hidden_api_finder.h
@@ -17,6 +17,7 @@
 #ifndef ART_TOOLS_VERIDEX_PRECISE_HIDDEN_API_FINDER_H_
 #define ART_TOOLS_VERIDEX_PRECISE_HIDDEN_API_FINDER_H_
 
+#include "class_filter.h"
 #include "dex/method_reference.h"
 #include "flow_analysis.h"
 
@@ -40,7 +41,8 @@
 
   // Iterate over the dex files associated with the passed resolvers to report
   // hidden API uses.
-  void Run(const std::vector<std::unique_ptr<VeridexResolver>>& app_resolvers);
+  void Run(const std::vector<std::unique_ptr<VeridexResolver>>& app_resolvers,
+           const ClassFilter& app_class_filter);
 
   void Dump(std::ostream& os, HiddenApiStats* stats);
 
@@ -48,6 +50,7 @@
   // Run over all methods of all dex files, and call `action` on each.
   void RunInternal(
       const std::vector<std::unique_ptr<VeridexResolver>>& resolvers,
+      const ClassFilter& class_filter,
       const std::function<void(VeridexResolver*, const ClassAccessor::Method&)>& action);
 
   // Add uses found in method `ref`.
diff --git a/tools/veridex/veridex.cc b/tools/veridex/veridex.cc
index 3b6c7f9..ae1c33e 100644
--- a/tools/veridex/veridex.cc
+++ b/tools/veridex/veridex.cc
@@ -17,6 +17,7 @@
 #include "veridex.h"
 
 #include <android-base/file.h>
+#include <android-base/strings.h>
 
 #include "dex/dex_file.h"
 #include "dex/dex_file_loader.h"
@@ -70,15 +71,17 @@
 static const char* kFlagsOption = "--api-flags=";
 static const char* kImprecise = "--imprecise";
 static const char* kTargetSdkVersion = "--target-sdk-version=";
-static const char* kOnlyReportSdkUses = "--only-report-sdk-uses";
+static const char* kAppClassFilter = "--app-class-filter=";
+static const char* kExcludeApiListsOption = "--exclude-api-lists=";
 
 struct VeridexOptions {
   const char* dex_file = nullptr;
   const char* core_stubs = nullptr;
   const char* flags_file = nullptr;
   bool precise = true;
-  int target_sdk_version = 28; /* P */
-  bool only_report_sdk_uses = false;
+  int target_sdk_version = 29; /* Q */
+  std::vector<std::string> app_class_name_filter;
+  std::vector<std::string> exclude_api_lists;
 };
 
 static const char* Substr(const char* str, int index) {
@@ -105,8 +108,14 @@
       options->precise = false;
     } else if (StartsWith(argv[i], kTargetSdkVersion)) {
       options->target_sdk_version = atoi(Substr(argv[i], strlen(kTargetSdkVersion)));
-    } else if (strcmp(argv[i], kOnlyReportSdkUses) == 0) {
-      options->only_report_sdk_uses = true;
+    } else if (StartsWith(argv[i], kAppClassFilter)) {
+      options->app_class_name_filter = android::base::Split(
+          Substr(argv[i], strlen(kAppClassFilter)), ",");
+    } else if (StartsWith(argv[i], kExcludeApiListsOption)) {
+      options->exclude_api_lists = android::base::Split(
+          Substr(argv[i], strlen(kExcludeApiListsOption)), ",");
+    } else {
+      LOG(ERROR) << "Unknown command line argument: " << argv[i];
     }
   }
 }
@@ -165,6 +174,9 @@
 
     // Resolve classes/methods/fields defined in each dex file.
 
+    ApiListFilter api_list_filter(options.exclude_api_lists);
+    HiddenApi hidden_api(options.flags_file, api_list_filter);
+
     // Cache of types we've seen, for quick class name lookups.
     TypeMap type_map;
     // Add internally defined primitives.
@@ -183,56 +195,66 @@
 
     std::vector<std::unique_ptr<VeridexResolver>> boot_resolvers;
     Resolve(boot_dex_files, resolver_map, type_map, &boot_resolvers);
+    for (const auto &it : type_map) {
+        hidden_api.AddSignatureSource(it.first, SignatureSource::BOOT);
+    }
 
-    // Now that boot classpath has been resolved, fill classes and reflection
-    // methods.
-    VeriClass::object_ = type_map["Ljava/lang/Object;"];
-    VeriClass::class_ = type_map["Ljava/lang/Class;"];
-    VeriClass::class_loader_ = type_map["Ljava/lang/ClassLoader;"];
-    VeriClass::string_ = type_map["Ljava/lang/String;"];
-    VeriClass::throwable_ = type_map["Ljava/lang/Throwable;"];
-    VeriClass::forName_ = boot_resolvers[0]->LookupDeclaredMethodIn(
-        *VeriClass::class_, "forName", "(Ljava/lang/String;)Ljava/lang/Class;");
-    VeriClass::getField_ = boot_resolvers[0]->LookupDeclaredMethodIn(
-        *VeriClass::class_, "getField", "(Ljava/lang/String;)Ljava/lang/reflect/Field;");
-    VeriClass::getDeclaredField_ = boot_resolvers[0]->LookupDeclaredMethodIn(
-        *VeriClass::class_, "getDeclaredField", "(Ljava/lang/String;)Ljava/lang/reflect/Field;");
-    VeriClass::getMethod_ = boot_resolvers[0]->LookupDeclaredMethodIn(
-        *VeriClass::class_,
-        "getMethod",
-        "(Ljava/lang/String;[Ljava/lang/Class;)Ljava/lang/reflect/Method;");
-    VeriClass::getDeclaredMethod_ = boot_resolvers[0]->LookupDeclaredMethodIn(
-        *VeriClass::class_,
-        "getDeclaredMethod",
-        "(Ljava/lang/String;[Ljava/lang/Class;)Ljava/lang/reflect/Method;");
-    VeriClass::getClass_ = boot_resolvers[0]->LookupDeclaredMethodIn(
-        *VeriClass::object_, "getClass", "()Ljava/lang/Class;");
-    VeriClass::loadClass_ = boot_resolvers[0]->LookupDeclaredMethodIn(
-        *VeriClass::class_loader_, "loadClass", "(Ljava/lang/String;)Ljava/lang/Class;");
+    if (options.precise) {
+      // For precise mode we expect core-stubs to contain java.lang classes.
+      VeriClass::object_ = type_map["Ljava/lang/Object;"];
+      VeriClass::class_ = type_map["Ljava/lang/Class;"];
+      VeriClass::class_loader_ = type_map["Ljava/lang/ClassLoader;"];
+      VeriClass::string_ = type_map["Ljava/lang/String;"];
+      VeriClass::throwable_ = type_map["Ljava/lang/Throwable;"];
+      VeriClass::forName_ = boot_resolvers[0]->LookupDeclaredMethodIn(
+          *VeriClass::class_, "forName", "(Ljava/lang/String;)Ljava/lang/Class;");
+      VeriClass::getField_ = boot_resolvers[0]->LookupDeclaredMethodIn(
+          *VeriClass::class_, "getField", "(Ljava/lang/String;)Ljava/lang/reflect/Field;");
+      VeriClass::getDeclaredField_ = boot_resolvers[0]->LookupDeclaredMethodIn(
+          *VeriClass::class_, "getDeclaredField", "(Ljava/lang/String;)Ljava/lang/reflect/Field;");
+      VeriClass::getMethod_ = boot_resolvers[0]->LookupDeclaredMethodIn(
+          *VeriClass::class_,
+          "getMethod",
+          "(Ljava/lang/String;[Ljava/lang/Class;)Ljava/lang/reflect/Method;");
+      VeriClass::getDeclaredMethod_ = boot_resolvers[0]->LookupDeclaredMethodIn(
+          *VeriClass::class_,
+          "getDeclaredMethod",
+          "(Ljava/lang/String;[Ljava/lang/Class;)Ljava/lang/reflect/Method;");
+      VeriClass::getClass_ = boot_resolvers[0]->LookupDeclaredMethodIn(
+          *VeriClass::object_, "getClass", "()Ljava/lang/Class;");
+      VeriClass::loadClass_ = boot_resolvers[0]->LookupDeclaredMethodIn(
+          *VeriClass::class_loader_, "loadClass", "(Ljava/lang/String;)Ljava/lang/Class;");
 
-    VeriClass* version = type_map["Landroid/os/Build$VERSION;"];
-    if (version != nullptr) {
-      VeriClass::sdkInt_ = boot_resolvers[0]->LookupFieldIn(*version, "SDK_INT", "I");
+      VeriClass* version = type_map["Landroid/os/Build$VERSION;"];
+      if (version != nullptr) {
+        VeriClass::sdkInt_ = boot_resolvers[0]->LookupFieldIn(*version, "SDK_INT", "I");
+      }
     }
 
     std::vector<std::unique_ptr<VeridexResolver>> app_resolvers;
     Resolve(app_dex_files, resolver_map, type_map, &app_resolvers);
+    for (const auto &it : type_map) {
+      if (!hidden_api.IsInBoot(it.first)) {
+        hidden_api.AddSignatureSource(it.first, SignatureSource::APP);
+      }
+    }
+
+    ClassFilter app_class_filter(options.app_class_name_filter);
 
     // Find and log uses of hidden APIs.
-    HiddenApi hidden_api(options.flags_file, options.only_report_sdk_uses);
     HiddenApiStats stats;
 
     HiddenApiFinder api_finder(hidden_api);
-    api_finder.Run(app_resolvers);
+    api_finder.Run(app_resolvers, app_class_filter);
     api_finder.Dump(std::cout, &stats, !options.precise);
 
     if (options.precise) {
       PreciseHiddenApiFinder precise_api_finder(hidden_api);
-      precise_api_finder.Run(app_resolvers);
+      precise_api_finder.Run(app_resolvers, app_class_filter);
       precise_api_finder.Dump(std::cout, &stats);
     }
 
-    DumpSummaryStats(std::cout, stats, options);
+    DumpSummaryStats(std::cout, stats, api_list_filter);
 
     if (options.precise) {
       std::cout << "To run an analysis that can give more reflection accesses, " << std::endl
@@ -245,21 +267,22 @@
  private:
   static void DumpSummaryStats(std::ostream& os,
                                const HiddenApiStats& stats,
-                               const VeridexOptions& options) {
-    static const char* kPrefix = "       ";
-    if (options.only_report_sdk_uses) {
-      os << stats.api_counts[hiddenapi::ApiList::Whitelist().GetIntValue()]
-         << " SDK API uses." << std::endl;
-    } else {
-      os << stats.count << " hidden API(s) used: "
-         << stats.linking_count << " linked against, "
-         << stats.reflection_count << " through reflection" << std::endl;
-      for (size_t i = 0; i < hiddenapi::ApiList::kValueCount; ++i) {
-        hiddenapi::ApiList api_list = hiddenapi::ApiList(i);
-        if (api_list != hiddenapi::ApiList::Whitelist()) {
-          os << kPrefix << stats.api_counts[i] << " in " << api_list << std::endl;
-        }
-      }
+                               const ApiListFilter& api_list_filter) {
+    os << stats.count << " hidden API(s) used: "
+       << stats.linking_count << " linked against, "
+       << stats.reflection_count << " through reflection" << std::endl;
+    DumpApiListStats(os, stats, hiddenapi::ApiList(), api_list_filter);
+    for (size_t i = 0; i < hiddenapi::ApiList::kValueCount; ++i) {
+      DumpApiListStats(os, stats, hiddenapi::ApiList(i), api_list_filter);
+    }
+  }
+
+  static void DumpApiListStats(std::ostream& os,
+                               const HiddenApiStats& stats,
+                               const hiddenapi::ApiList& api_list,
+                               const ApiListFilter& api_list_filter) {
+    if (api_list_filter.Matches(api_list)) {
+      os << "\t" << stats.api_counts[api_list.GetIntValue()] << " in " << api_list << std::endl;
     }
   }